summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-01-27 17:14:02 +0100
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-01-27 17:14:02 +0100
commit6c02b7b1610f873888af20f291c07730889ff0f9 (patch)
tree1b33e6642cc81605b8d37c0bda0abff0ba64fa2d /drivers
parentx86: xen: size struct xen_spinlock to always fit in arch_spinlock_t (diff)
parentLinux 3.3-rc1 (diff)
downloadlinux-6c02b7b1610f873888af20f291c07730889ff0f9.tar.xz
linux-6c02b7b1610f873888af20f291c07730889ff0f9.zip
Merge commit 'v3.3-rc1' into stable/for-linus-fixes-3.3
* commit 'v3.3-rc1': (9775 commits) Linux 3.3-rc1 x86, syscall: Need __ARCH_WANT_SYS_IPC for 32 bits qnx4: don't leak ->BitMap on late failure exits qnx4: reduce the insane nesting in qnx4_checkroot() qnx4: di_fname is an array, for crying out loud... KEYS: Permit key_serial() to be called with a const key pointer keys: fix user_defined key sparse messages ima: fix cred sparse warning uml: fix compile for x86-64 MPILIB: Add a missing ENOMEM check tpm: fix (ACPI S3) suspend regression nvme: fix merge error due to change of 'make_request_fn' fn type xen: using EXPORT_SYMBOL requires including export.h gpio: tps65910: Use correct offset for gpio initialization acpi/apei/einj: Add extensions to EINJ from rev 5.0 of acpi spec intel_idle: Split up and provide per CPU initialization func ACPI processor: Remove unneeded variable passed by acpi_processor_hotadd_init V2 tg3: Fix single-vector MSI-X code openvswitch: Fix multipart datapath dumps. ipv6: fix per device IP snmp counters ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpica/Makefile158
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h9
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h19
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h26
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h8
-rw-r--r--drivers/acpi/acpica/acopcode.h6
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h41
-rw-r--r--drivers/acpi/acpica/acresrc.h115
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h21
-rw-r--r--drivers/acpi/acpica/amlcode.h29
-rw-r--r--drivers/acpi/acpica/amlresrc.h138
-rw-r--r--drivers/acpi/acpica/dsargs.c18
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c83
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c14
-rw-r--r--drivers/acpi/acpica/evglock.c8
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c31
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c31
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c9
-rw-r--r--drivers/acpi/acpica/exfield.c30
-rw-r--r--drivers/acpi/acpica/exfldio.c38
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c27
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c27
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c4
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c31
-rw-r--r--drivers/acpi/acpica/nsrepair.c3
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c143
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c15
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c8
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c89
-rw-r--r--drivers/acpi/acpica/rscreate.c69
-rw-r--r--drivers/acpi/acpica/rsdump.c196
-rw-r--r--drivers/acpi/acpica/rsinfo.c58
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c33
-rw-r--r--drivers/acpi/acpica/rslist.c77
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c269
-rw-r--r--drivers/acpi/acpica/rsserial.c441
-rw-r--r--drivers/acpi/acpica/rsutils.c56
-rw-r--r--drivers/acpi/acpica/rsxface.c52
-rw-r--r--drivers/acpi/acpica/tbfadt.c41
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c9
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c294
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c6
-rw-r--r--drivers/acpi/acpica/utdelete.c15
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c8
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c3
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c11
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c278
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c40
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c187
-rw-r--r--drivers/acpi/apei/apei-base.c150
-rw-r--r--drivers/acpi/apei/apei-internal.h6
-rw-r--r--drivers/acpi/apei/einj.c290
-rw-r--r--drivers/acpi/apei/erst.c42
-rw-r--r--drivers/acpi/apei/ghes.c104
-rw-r--r--drivers/acpi/apei/hest.c7
-rw-r--r--drivers/acpi/atomicio.c77
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/nvs.c53
-rw-r--r--drivers/acpi/osl.c242
-rw-r--r--drivers/acpi/pci_irq.c10
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/acpi/pci_slot.c2
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c26
-rw-r--r--drivers/acpi/processor_thermal.c1
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/video.c6
-rw-r--r--drivers/amba/bus.c140
-rw-r--r--drivers/ata/ahci.c26
-rw-r--r--drivers/ata/ahci_platform.c68
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/libahci.c5
-rw-r--r--drivers/ata/libata-core.c190
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata-transport.c6
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_arasan_cf.c12
-rw-r--r--drivers/ata/pata_at91.c21
-rw-r--r--drivers/ata/pata_bf54x.c187
-rw-r--r--drivers/ata/pata_cs5536.c99
-rw-r--r--drivers/ata/pata_imx.c12
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c13
-rw-r--r--drivers/ata/pata_mpc52xx.c21
-rw-r--r--drivers/ata/pata_of_platform.c27
-rw-r--r--drivers/ata/pata_palmld.c13
-rw-r--r--drivers/ata/pata_platform.c12
-rw-r--r--drivers/ata/pata_pxa.c13
-rw-r--r--drivers/ata/pata_rb532_cf.c21
-rw-r--r--drivers/ata/sata_dwc_460ex.c13
-rw-r--r--drivers/ata/sata_fsl.c25
-rw-r--r--drivers/ata/sata_mv.c19
-rw-r--r--drivers/ata/sata_nv.c6
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/atm/he.c6
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/Kconfig15
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/core.c10
-rw-r--r--drivers/base/cpu.c171
-rw-r--r--drivers/base/devres.c2
-rw-r--r--drivers/base/devtmpfs.c8
-rw-r--r--drivers/base/dma-buf.c291
-rw-r--r--drivers/base/firmware_class.c18
-rw-r--r--drivers/base/memory.c177
-rw-r--r--drivers/base/node.c154
-rw-r--r--drivers/base/platform.c115
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c554
-rw-r--r--drivers/base/power/domain_governor.c170
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/qos.c49
-rw-r--r--drivers/base/power/runtime.c157
-rw-r--r--drivers/base/regmap/Kconfig3
-rw-r--r--drivers/base/regmap/Makefile4
-rw-r--r--drivers/base/regmap/internal.h6
-rw-r--r--drivers/base/regmap/regcache-indexed.c64
-rw-r--r--drivers/base/regmap/regcache-lzo.c21
-rw-r--r--drivers/base/regmap/regcache-rbtree.c61
-rw-r--r--drivers/base/regmap/regcache.c87
-rw-r--r--drivers/base/regmap/regmap-irq.c302
-rw-r--r--drivers/base/regmap/regmap.c179
-rw-r--r--drivers/base/topology.c51
-rw-r--r--drivers/bcma/bcma_private.h4
-rw-r--r--drivers/bcma/host_pci.c62
-rw-r--r--drivers/bcma/main.c40
-rw-r--r--drivers/bcma/sprom.c61
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoechr.c2
-rw-r--r--drivers/block/brd.c9
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/mtip32xx/Kconfig9
-rw-r--r--drivers/block/mtip32xx/Makefile5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3651
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h423
-rw-r--r--drivers/block/nvme.c1739
-rw-r--r--drivers/block/paride/bpck6.c5
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c3
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/paride/pg.c3
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c103
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/block/sx8.c12
-rw-r--r--drivers/block/ub.c3
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c84
-rw-r--r--drivers/block/xen-blkback/common.h67
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/block/xen-blkfront.c90
-rw-r--r--drivers/block/xsysace.c10
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/ath3k.c15
-rw-r--r--drivers/bluetooth/bfusb.c4
-rw-r--r--drivers/bluetooth/bluecard_cs.c4
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c41
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c13
-rw-r--r--drivers/cdrom/cdrom.c16
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/generic.c8
-rw-r--r--drivers/char/agp/sis-agp.c2
-rw-r--r--drivers/char/hw_random/atmel-rng.c12
-rw-r--r--drivers/char/hw_random/n2-drv.c13
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/hw_random/octeon-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c12
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c12
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c12
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c13
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/i8k.c8
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c41
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/nwflash.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/ramoops.c24
-rw-r--r--drivers/char/random.c16
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/tile-srom.c2
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm.c146
-rw-r--r--drivers/char/tpm/tpm.h12
-rw-r--r--drivers/char/tpm/tpm_tis.c90
-rw-r--r--drivers/char/virtio_console.c140
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clocksource/acpi_pm.c2
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c16
-rw-r--r--drivers/clocksource/i8253.c6
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq.c82
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c50
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c57
-rw-r--r--drivers/cpufreq/cpufreq_stats.c6
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c8
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c290
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c643
-rw-r--r--drivers/cpufreq/omap-cpufreq.c274
-rw-r--r--drivers/cpufreq/powernow-k8.c47
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c35
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/cpuidle.h10
-rw-r--r--drivers/cpuidle/sysfs.c74
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c13
-rw-r--r--drivers/crypto/caam/caamalg.c67
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c26
-rw-r--r--drivers/crypto/caam/desc.h2265
-rw-r--r--drivers/crypto/caam/desc_constr.h7
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/crypto/picoxcell_crypto.c16
-rw-r--r--drivers/crypto/s5p-sss.c13
-rw-r--r--drivers/crypto/talitos.c493
-rw-r--r--drivers/crypto/talitos.h45
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos4_bus.c1135
-rw-r--r--drivers/dma/Kconfig27
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c43
-rw-r--r--drivers/dma/at_hdmac.c103
-rw-r--r--drivers/dma/at_hdmac_regs.h1
-rw-r--r--drivers/dma/coh901318.c12
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/coh901318_lli.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c46
-rw-r--r--drivers/dma/dw_dmac.c83
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c90
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c10
-rw-r--r--drivers/dma/imx-sdma.c27
-rw-r--r--drivers/dma/intel_mid_dma.c39
-rw-r--r--drivers/dma/intel_mid_dma_regs.h4
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/ipu/ipu_idmac.c29
-rw-r--r--drivers/dma/mpc512x_dma.c12
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/dma/mxs-dma.c59
-rw-r--r--drivers/dma/pch_dma.c20
-rw-r--r--drivers/dma/pl330.c132
-rw-r--r--drivers/dma/shdma.c72
-rw-r--r--drivers/dma/sirf-dma.c707
-rw-r--r--drivers/dma/ste_dma40.c441
-rw-r--r--drivers/dma/ste_dma40_ll.h11
-rw-r--r--drivers/dma/timb_dma.c30
-rw-r--r--drivers/dma/txx9dmac.c12
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/i82975x_edac.c30
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/firewire/sbp2.c2
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efivars.c20
-rw-r--r--drivers/firmware/iscsi_ibft.c54
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c118
-rw-r--r--drivers/gpio/Kconfig32
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/devres.c90
-rw-r--r--drivers/gpio/gpio-adp5520.c12
-rw-r--r--drivers/gpio/gpio-adp5588.c5
-rw-r--r--drivers/gpio/gpio-bt8xx.c3
-rw-r--r--drivers/gpio/gpio-cs5535.c14
-rw-r--r--drivers/gpio/gpio-da9052.c33
-rw-r--r--drivers/gpio/gpio-generic.c12
-rw-r--r--drivers/gpio/gpio-janz-ttl.c15
-rw-r--r--drivers/gpio/gpio-ml-ioh.c40
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-nomadik.c4
-rw-r--r--drivers/gpio/gpio-pcf857x.c5
-rw-r--r--drivers/gpio/gpio-pch.c11
-rw-r--r--drivers/gpio/gpio-pl061.c206
-rw-r--r--drivers/gpio/gpio-pxa.c377
-rw-r--r--drivers/gpio/gpio-rdc321x.c13
-rw-r--r--drivers/gpio/gpio-sa1100.c6
-rw-r--r--drivers/gpio/gpio-samsung.c105
-rw-r--r--drivers/gpio/gpio-sch.c13
-rw-r--r--drivers/gpio/gpio-stmpe.c25
-rw-r--r--drivers/gpio/gpio-tegra.c9
-rw-r--r--drivers/gpio/gpio-timberdale.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-ucb1400.c13
-rw-r--r--drivers/gpio/gpio-vr41xx.c13
-rw-r--r--drivers/gpio/gpio-vx855.c12
-rw-r--r--drivers/gpio/gpio-wm8994.c79
-rw-r--r--drivers/gpio/gpio-xilinx.c1
-rw-r--r--drivers/gpio/gpiolib.c6
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_context.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c608
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c77
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_edid.c103
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h284
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c15
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_sman.c351
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig7
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c139
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c168
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c190
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c126
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c255
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c248
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h75
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c439
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h73
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c163
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h14
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1176
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1070
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h92
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h147
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h141
-rw-r--r--drivers/gpu/drm/exynos/regs-vp.h91
-rw-r--r--drivers/gpu/drm/gma500/Kconfig27
-rw-r--r--drivers/gpu/drm/gma500/Makefile40
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c364
-rw-r--r--drivers/gpu/drm/gma500/backlight.c49
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c351
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h36
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c339
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1508
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c392
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c732
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c831
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h47
-rw-r--r--drivers/gpu/drm/gma500/gem.c292
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c89
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c553
-rw-r--r--drivers/gpu/drm/gma500/gtt.h64
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c303
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h430
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c493
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c169
-rw-r--r--drivers/gpu/drm/gma500/intel_opregion.c81
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c263
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.h21
-rw-r--r--drivers/gpu/drm/gma500/mmu.c858
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h252
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c604
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c512
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c865
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c328
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c449
-rw-r--r--drivers/gpu/drm/gma500/power.c316
-rw-r--r--drivers/gpu/drm/gma500/power.h67
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c328
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c703
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h956
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1446
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h28
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h289
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c868
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1309
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2623
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h723
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c564
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h45
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c88
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h582
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c19
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c24
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h6
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c87
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c82
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h216
-rw-r--r--drivers/gpu/drm/i915/intel_display.c445
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c174
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h52
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c668
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c904
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c403
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c243
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c258
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c556
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c677
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c382
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c176
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c109
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c347
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c144
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c272
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c783
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c78
-rw-r--r--drivers/gpu/drm/nouveau/nv98_ppp.c78
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc262
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc56
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc217
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc311
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c237
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c835
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c30
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c256
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c242
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c396
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h42
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h96
-rw-r--r--drivers/gpu/drm/radeon/ni.c395
-rw-r--r--drivers/gpu/drm/radeon/nid.h35
-rw-r--r--drivers/gpu/drm/radeon/r100.c237
-rw-r--r--drivers/gpu/drm/radeon/r200.c21
-rw-r--r--drivers/gpu/drm/radeon/r300.c160
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c273
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c57
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c235
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h367
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c197
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c307
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c425
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c147
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c465
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c189
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c269
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c355
-rw-r--r--drivers/gpu/drm/radeon/rs400.c27
-rw-r--r--drivers/gpu/drm/radeon/rs600.c54
-rw-r--r--drivers/gpu/drm/radeon/rs690.c30
-rw-r--r--drivers/gpu/drm/radeon/rv515.c106
-rw-r--r--drivers/gpu/drm/radeon/rv770.c70
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c23
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c56
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h7
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c199
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c23
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c105
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c90
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c184
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1142
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c324
-rw-r--r--drivers/gpu/drm/via/via_drv.c48
-rw-r--r--drivers/gpu/drm/via/via_drv.h7
-rw-r--r--drivers/gpu/drm/via/via_map.c10
-rw-r--r--drivers/gpu/drm/via/via_mm.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c427
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c58
-rw-r--r--drivers/hid/Kconfig38
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c84
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-emsff.c2
-rw-r--r--drivers/hid/hid-hyperv.c (renamed from drivers/staging/hv/hv_mouse.c)303
-rw-r--r--drivers/hid/hid-ids.h45
-rw-r--r--drivers/hid/hid-input.c228
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-multitouch.c213
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/hid-pl.c4
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-quanta.c261
-rw-r--r--drivers/hid/hid-roccat-common.c4
-rw-r--r--drivers/hid/hid-roccat-isku.c487
-rw-r--r--drivers/hid/hid-roccat-isku.h147
-rw-r--r--drivers/hid/hid-roccat-kone.c4
-rw-r--r--drivers/hid/hid-twinhan.c2
-rw-r--r--drivers/hid/hid-wacom.c195
-rw-r--r--drivers/hid/hid-wiimote-core.c (renamed from drivers/hid/hid-wiimote.c)256
-rw-r--r--drivers/hid/hid-wiimote-debug.c227
-rw-r--r--drivers/hid/hid-wiimote-ext.c752
-rw-r--r--drivers/hid/hid-wiimote.h208
-rw-r--r--drivers/hid/usbhid/hid-core.c241
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h3
-rw-r--r--drivers/hid/usbhid/usbkbd.c64
-rw-r--r--drivers/hv/hv_kvp.c10
-rw-r--r--drivers/hv/vmbus_drv.c5
-rw-r--r--drivers/hwmon/Kconfig12
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/abituguru3.c4
-rw-r--r--drivers/hwmon/acpi_power_meter.c8
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/adcxx.c2
-rw-r--r--drivers/hwmon/adm1021.c2
-rw-r--r--drivers/hwmon/adm1031.c155
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/ads7828.c4
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c28
-rw-r--r--drivers/hwmon/adt7470.c26
-rw-r--r--drivers/hwmon/adt7475.c20
-rw-r--r--drivers/hwmon/amc6821.c14
-rw-r--r--drivers/hwmon/applesmc.c6
-rw-r--r--drivers/hwmon/asc7621.c24
-rw-r--r--drivers/hwmon/coretemp.c33
-rw-r--r--drivers/hwmon/dme1737.c10
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc1403.c6
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/hwmon/emc6w201.c6
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c309
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gpio-fan.c19
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/it87.c99
-rw-r--r--drivers/hwmon/jc42.c6
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/lm63.c592
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm75.c25
-rw-r--r--drivers/hwmon/lm75.h5
-rw-r--r--drivers/hwmon/lm80.c70
-rw-r--r--drivers/hwmon/lm90.c12
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c8
-rw-r--r--drivers/hwmon/lm95245.c8
-rw-r--r--drivers/hwmon/ltc4261.c1
-rw-r--r--drivers/hwmon/max1111.c17
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c6
-rw-r--r--drivers/hwmon/max6639.c8
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/pc87427.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/adm1275.c71
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c43
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp401.c10
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/w83627ehf.c22
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/hwmon/w83791d.c12
-rw-r--r--drivers/hwmon/w83792d.c4
-rw-r--r--drivers/hwmon/w83793.c4
-rw-r--r--drivers/hwmon/w83795.c34
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/busses/Kconfig18
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c38
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c17
-rw-r--r--drivers/i2c/busses/i2c-au1550.c13
-rw-r--r--drivers/i2c/busses/i2c-cpm.c13
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c12
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c43
-rw-r--r--drivers/i2c/busses/i2c-highlander.c15
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c17
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c16
-rw-r--r--drivers/i2c/busses/i2c-isch.c13
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c13
-rw-r--r--drivers/i2c/busses/i2c-mpc.c13
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c15
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c4
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c17
-rw-r--r--drivers/i2c/busses/i2c-octeon.c16
-rw-r--r--drivers/i2c/busses/i2c-omap.c121
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c17
-rw-r--r--drivers/i2c/busses/i2c-powermac.c19
-rw-r--r--drivers/i2c/busses/i2c-puv3.c16
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c13
-rw-r--r--drivers/i2c/busses/i2c-simtec.c18
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c6
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c10
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c11
-rw-r--r--drivers/i2c/busses/i2c-xiic.c20
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/i2c-dev.c13
-rw-r--r--drivers/i2c/muxes/gpio-i2cmux.c13
-rw-r--r--drivers/ide/ali14xx.c2
-rw-r--r--drivers/ide/at91_ide.c2
-rw-r--r--drivers/ide/cmd640.c2
-rw-r--r--drivers/ide/dtc2278.c2
-rw-r--r--drivers/ide/gayle.c2
-rw-r--r--drivers/ide/ht6560b.c2
-rw-r--r--drivers/ide/ide-4drives.c2
-rw-r--r--drivers/ide/ide-acpi.c6
-rw-r--r--drivers/ide/ide-floppy_ioctl.c3
-rw-r--r--drivers/ide/ide-pci-generic.c2
-rw-r--r--drivers/ide/qd65xx.c2
-rw-r--r--drivers/ide/umc8672.c2
-rw-r--r--drivers/idle/intel_idle.c96
-rw-r--r--drivers/ieee802154/fakehard.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/addr.c50
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cm_msgs.h1
-rw-r--r--drivers/infiniband/core/cma.c20
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c218
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c6
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c20
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c58
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c43
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig12
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_dm_mad.h139
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4073
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h444
-rw-r--r--drivers/input/evdev.c20
-rw-r--r--drivers/input/input-polldev.c8
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/Kconfig21
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c13
-rw-r--r--drivers/input/keyboard/atkbd.c40
-rw-r--r--drivers/input/keyboard/bf54x-keys.c16
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c14
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c14
-rw-r--r--drivers/input/keyboard/imx_keypad.c14
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c14
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c14
-rw-r--r--drivers/input/keyboard/lm8323.c11
-rw-r--r--drivers/input/keyboard/matrix_keypad.c14
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c15
-rw-r--r--drivers/input/keyboard/omap4-keypad.c13
-rw-r--r--drivers/input/keyboard/opencores-kbd.c13
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c13
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c14
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c13
-rw-r--r--drivers/input/keyboard/samsung-keypad.c278
-rw-r--r--drivers/input/keyboard/sh_keysc.c14
-rw-r--r--drivers/input/keyboard/spear-keyboard.c13
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c13
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c15
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c430
-rw-r--r--drivers/input/keyboard/tegra-kbc.c132
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c14
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c13
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c14
-rw-r--r--drivers/input/misc/88pm860x_onkey.c13
-rw-r--r--drivers/input/misc/Kconfig25
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ab8500-ponkey.c15
-rw-r--r--drivers/input/misc/adxl34x-spi.c1
-rw-r--r--drivers/input/misc/adxl34x.c16
-rw-r--r--drivers/input/misc/ati_remote2.c19
-rw-r--r--drivers/input/misc/bfin_rotary.c13
-rw-r--r--drivers/input/misc/cma3000_d0x.c4
-rw-r--r--drivers/input/misc/cobalt_btns.c14
-rw-r--r--drivers/input/misc/dm355evm_keys.c13
-rw-r--r--drivers/input/misc/gp2ap002a00f.c299
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c213
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c13
-rw-r--r--drivers/input/misc/max8925_onkey.c13
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c14
-rw-r--r--drivers/input/misc/mpu3050.c128
-rw-r--r--drivers/input/misc/pcap_keys.c14
-rw-r--r--drivers/input/misc/pcf50633-input.c13
-rw-r--r--drivers/input/misc/pcspkr.c14
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c13
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c13
-rw-r--r--drivers/input/misc/pwm-beeper.c13
-rw-r--r--drivers/input/misc/rb532_button.c14
-rw-r--r--drivers/input/misc/rotary_encoder.c14
-rw-r--r--drivers/input/misc/sgi_btns.c13
-rw-r--r--drivers/input/misc/twl4030-vibra.c14
-rw-r--r--drivers/input/misc/twl6040-vibra.c13
-rw-r--r--drivers/input/misc/wistron_btns.c2
-rw-r--r--drivers/input/misc/wm831x-on.c13
-rw-r--r--drivers/input/misc/xen-kbdfront.c7
-rw-r--r--drivers/input/mouse/alps.c1036
-rw-r--r--drivers/input/mouse/alps.h19
-rw-r--r--drivers/input/mouse/bcm5974.c3
-rw-r--r--drivers/input/mouse/elantech.c80
-rw-r--r--drivers/input/mouse/elantech.h2
-rw-r--r--drivers/input/mouse/gpio_mouse.c13
-rw-r--r--drivers/input/mouse/hgpk.c18
-rw-r--r--drivers/input/mouse/logips2pp.c9
-rw-r--r--drivers/input/mouse/psmouse-base.c231
-rw-r--r--drivers/input/mouse/psmouse.h3
-rw-r--r--drivers/input/mouse/pxa930_trkball.c14
-rw-r--r--drivers/input/mouse/sentelic.c51
-rw-r--r--drivers/input/mouse/sentelic.h3
-rw-r--r--drivers/input/mouse/synaptics.c208
-rw-r--r--drivers/input/mouse/synaptics.h5
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/mouse/trackpoint.c17
-rw-r--r--drivers/input/serio/altera_ps2.c13
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/serio/i8042.c23
-rw-r--r--drivers/input/serio/rpckbd.c14
-rw-r--r--drivers/input/serio/serio_raw.c8
-rw-r--r--drivers/input/serio/xilinx_ps2.c16
-rw-r--r--drivers/input/tablet/aiptek.c34
-rw-r--r--drivers/input/tablet/wacom_sys.c101
-rw-r--r--drivers/input/tablet/wacom_wac.c191
-rw-r--r--drivers/input/tablet/wacom_wac.h5
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c13
-rw-r--r--drivers/input/touchscreen/Kconfig41
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c21
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c31
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c27
-rw-r--r--drivers/input/touchscreen/ad7879.c23
-rw-r--r--drivers/input/touchscreen/ad7879.h4
-rw-r--r--drivers/input/touchscreen/ads7846.c9
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c15
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c652
-rw-r--r--drivers/input/touchscreen/da9034-ts.c13
-rw-r--r--drivers/input/touchscreen/eeti_ts.c4
-rw-r--r--drivers/input/touchscreen/egalax_ts.c303
-rw-r--r--drivers/input/touchscreen/htcpen.c11
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c13
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c14
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c13
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c14
-rw-r--r--drivers/input/touchscreen/migor_ts.c117
-rw-r--r--drivers/input/touchscreen/pcap_ts.c14
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c239
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c14
-rw-r--r--drivers/input/touchscreen/st1232.c13
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c15
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c14
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c13
-rw-r--r--drivers/input/touchscreen/tsc2005.c4
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c289
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c40
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c14
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c13
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c19
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c883
-rw-r--r--drivers/iommu/amd_iommu_init.c133
-rw-r--r--drivers/iommu/amd_iommu_proto.h24
-rw-r--r--drivers/iommu/amd_iommu_types.h118
-rw-r--r--drivers/iommu/amd_iommu_v2.c994
-rw-r--r--drivers/iommu/intel-iommu.c87
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/iommu/iommu.c179
-rw-r--r--drivers/iommu/msm_iommu.c25
-rw-r--r--drivers/iommu/omap-iommu.c80
-rw-r--r--drivers/iommu/omap-iovmm.c48
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/gigaset/i4l.c3
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hisax/enternow_pci.c2
-rw-r--r--drivers/isdn/i4l/Kconfig2
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/isdn/sc/init.c2
-rw-r--r--drivers/leds/Kconfig18
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c1
-rw-r--r--drivers/leds/led-triggers.c1
-rw-r--r--drivers/leds/leds-88pm860x.c12
-rw-r--r--drivers/leds/leds-adp5520.c12
-rw-r--r--drivers/leds/leds-ams-delta.c13
-rw-r--r--drivers/leds/leds-asic3.c16
-rw-r--r--drivers/leds/leds-atmel-pwm.c17
-rw-r--r--drivers/leds/leds-bd2802.c15
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cobalt-qube.c17
-rw-r--r--drivers/leds/leds-da903x.c12
-rw-r--r--drivers/leds/leds-dac124s085.c13
-rw-r--r--drivers/leds/leds-fsg.c15
-rw-r--r--drivers/leds/leds-gpio.c16
-rw-r--r--drivers/leds/leds-hp6xx.c17
-rw-r--r--drivers/leds/leds-lm3530.c13
-rw-r--r--drivers/leds/leds-lp3944.c13
-rw-r--r--drivers/leds/leds-lp5521.c20
-rw-r--r--drivers/leds/leds-lp5523.c22
-rw-r--r--drivers/leds/leds-lt3593.c16
-rw-r--r--drivers/leds/leds-max8997.c372
-rw-r--r--drivers/leds/leds-mc13783.c14
-rw-r--r--drivers/leds/leds-netxbig.c39
-rw-r--r--drivers/leds/leds-ns2.c15
-rw-r--r--drivers/leds/leds-pca9532.c14
-rw-r--r--drivers/leds/leds-pca955x.c13
-rw-r--r--drivers/leds/leds-pwm.c13
-rw-r--r--drivers/leds/leds-rb532.c16
-rw-r--r--drivers/leds/leds-regulator.c12
-rw-r--r--drivers/leds/leds-renesas-tpu.c13
-rw-r--r--drivers/leds/leds-s3c24xx.c13
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-tca6507.c779
-rw-r--r--drivers/leds/leds-wm831x-status.c17
-rw-r--r--drivers/leds/leds-wm8350.c19
-rw-r--r--drivers/lguest/Makefile2
-rw-r--r--drivers/lguest/lguest_device.c24
-rw-r--r--drivers/lguest/segments.c28
-rw-r--r--drivers/lguest/x86/core.c2
-rw-r--r--drivers/macintosh/ams/ams-core.c2
-rw-r--r--drivers/macintosh/ams/ams-input.c4
-rw-r--r--drivers/macintosh/rack-meter.c14
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/md/bitmap.c21
-rw-r--r--drivers/md/dm-flakey.c11
-rw-r--r--drivers/md/dm-linear.c12
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-table.c6
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c145
-rw-r--r--drivers/md/md.h82
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid1.c185
-rw-r--r--drivers/md/raid1.h7
-rw-r--r--drivers/md/raid10.c582
-rw-r--r--drivers/md/raid10.h61
-rw-r--r--drivers/md/raid5.c577
-rw-r--r--drivers/md/raid5.h98
-rw-r--r--drivers/media/common/tuners/Kconfig9
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/max2165.c39
-rw-r--r--drivers/media/common/tuners/mc44s803.c10
-rw-r--r--drivers/media/common/tuners/mt2060.c13
-rw-r--r--drivers/media/common/tuners/mt2060_priv.h1
-rw-r--r--drivers/media/common/tuners/mt2063.c2307
-rw-r--r--drivers/media/common/tuners/mt2063.h36
-rw-r--r--drivers/media/common/tuners/mt2131.c20
-rw-r--r--drivers/media/common/tuners/mt2131_priv.h1
-rw-r--r--drivers/media/common/tuners/mt2266.c25
-rw-r--r--drivers/media/common/tuners/mxl5005s.c69
-rw-r--r--drivers/media/common/tuners/mxl5007t.c101
-rw-r--r--drivers/media/common/tuners/qt1010.c21
-rw-r--r--drivers/media/common/tuners/qt1010_priv.h1
-rw-r--r--drivers/media/common/tuners/tda18212.c72
-rw-r--r--drivers/media/common/tuners/tda18212.h4
-rw-r--r--drivers/media/common/tuners/tda18218.c34
-rw-r--r--drivers/media/common/tuners/tda18218_priv.h2
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c83
-rw-r--r--drivers/media/common/tuners/tda18271-maps.c4
-rw-r--r--drivers/media/common/tuners/tda18271-priv.h2
-rw-r--r--drivers/media/common/tuners/tda18271.h1
-rw-r--r--drivers/media/common/tuners/tda827x.c52
-rw-r--r--drivers/media/common/tuners/tuner-simple.c68
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c143
-rw-r--r--drivers/media/common/tuners/xc4000.c191
-rw-r--r--drivers/media/common/tuners/xc5000.c147
-rw-r--r--drivers/media/dvb/b2c2/flexcop.c29
-rw-r--r--drivers/media/dvb/bt8xx/dst.c72
-rw-r--r--drivers/media/dvb/bt8xx/dst_common.h2
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c205
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c7
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c922
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h27
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c4
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig5
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-fe.c105
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c25
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h2
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c492
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h6
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c403
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.h6
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-fe.c33
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c11
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c867
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u-fe.c33
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-dvb.c8
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c93
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c29
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c24
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c336
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c8
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-demod.c42
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-tuner.c102
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.c16
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c19
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x-fe.c20
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045-fe.c32
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c98
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c5
-rw-r--r--drivers/media/dvb/firewire/firedtv-fe.c35
-rw-r--r--drivers/media/dvb/firewire/firedtv.h4
-rw-r--r--drivers/media/dvb/frontends/Kconfig7
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/af9013.c1727
-rw-r--r--drivers/media/dvb/frontends/af9013.h113
-rw-r--r--drivers/media/dvb/frontends/af9013_priv.h93
-rw-r--r--drivers/media/dvb/frontends/atbm8830.c27
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c58
-rw-r--r--drivers/media/dvb/frontends/bcm3510.c18
-rw-r--r--drivers/media/dvb/frontends/bsbe1.h7
-rw-r--r--drivers/media/dvb/frontends/bsru6.h9
-rw-r--r--drivers/media/dvb/frontends/cx22700.c51
-rw-r--r--drivers/media/dvb/frontends/cx22702.c69
-rw-r--r--drivers/media/dvb/frontends/cx24110.c20
-rw-r--r--drivers/media/dvb/frontends/cx24113.c10
-rw-r--r--drivers/media/dvb/frontends/cx24116.c36
-rw-r--r--drivers/media/dvb/frontends/cx24123.c56
-rw-r--r--drivers/media/dvb/frontends/cxd2820r.h13
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_c.c25
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c641
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_priv.h23
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_t.c63
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_t2.c70
-rw-r--r--drivers/media/dvb/frontends/dib0070.c10
-rw-r--r--drivers/media/dvb/frontends/dib0090.c165
-rw-r--r--drivers/media/dvb/frontends/dib0090.h54
-rw-r--r--drivers/media/dvb/frontends/dib3000mb.c113
-rw-r--r--drivers/media/dvb/frontends/dib3000mb_priv.h2
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c132
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c136
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c456
-rw-r--r--drivers/media/dvb/frontends/dib7000p.h16
-rw-r--r--drivers/media/dvb/frontends/dib8000.c1073
-rw-r--r--drivers/media/dvb/frontends/dib8000.h42
-rw-r--r--drivers/media/dvb/frontends/dib9000.c36
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.h17
-rw-r--r--drivers/media/dvb/frontends/drxd.h2
-rw-r--r--drivers/media/dvb/frontends/drxd_hard.c62
-rw-r--r--drivers/media/dvb/frontends/drxk.h11
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c314
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.h8
-rw-r--r--drivers/media/dvb/frontends/ds3000.c38
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c65
-rw-r--r--drivers/media/dvb/frontends/dvb_dummy_fe.c18
-rw-r--r--drivers/media/dvb/frontends/ec100.c20
-rw-r--r--drivers/media/dvb/frontends/hd29l2.c861
-rw-r--r--drivers/media/dvb/frontends/hd29l2.h66
-rw-r--r--drivers/media/dvb/frontends/hd29l2_priv.h314
-rw-r--r--drivers/media/dvb/frontends/it913x-fe-priv.h806
-rw-r--r--drivers/media/dvb/frontends/it913x-fe.c289
-rw-r--r--drivers/media/dvb/frontends/it913x-fe.h43
-rw-r--r--drivers/media/dvb/frontends/itd1000.c7
-rw-r--r--drivers/media/dvb/frontends/ix2505v.c8
-rw-r--r--drivers/media/dvb/frontends/l64781.c117
-rw-r--r--drivers/media/dvb/frontends/lgdt3305.c98
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c37
-rw-r--r--drivers/media/dvb/frontends/lgs8gl5.c29
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c26
-rw-r--r--drivers/media/dvb/frontends/mb86a16.c8
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c546
-rw-r--r--drivers/media/dvb/frontends/mt312.c37
-rw-r--r--drivers/media/dvb/frontends/mt352.c65
-rw-r--r--drivers/media/dvb/frontends/nxt200x.c17
-rw-r--r--drivers/media/dvb/frontends/nxt6000.c32
-rw-r--r--drivers/media/dvb/frontends/or51132.c52
-rw-r--r--drivers/media/dvb/frontends/or51211.c13
-rw-r--r--drivers/media/dvb/frontends/s5h1409.c48
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c48
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c71
-rw-r--r--drivers/media/dvb/frontends/s5h1432.c36
-rw-r--r--drivers/media/dvb/frontends/s921.c23
-rw-r--r--drivers/media/dvb/frontends/si21xx.c20
-rw-r--r--drivers/media/dvb/frontends/sp8870.c29
-rw-r--r--drivers/media/dvb/frontends/sp887x.c50
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c37
-rw-r--r--drivers/media/dvb/frontends/stb6000.c8
-rw-r--r--drivers/media/dvb/frontends/stb6100.c6
-rw-r--r--drivers/media/dvb/frontends/stv0288.c17
-rw-r--r--drivers/media/dvb/frontends/stv0297.c37
-rw-r--r--drivers/media/dvb/frontends/stv0299.c32
-rw-r--r--drivers/media/dvb/frontends/stv0367.c156
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c37
-rw-r--r--drivers/media/dvb/frontends/stv090x.c13
-rw-r--r--drivers/media/dvb/frontends/stv6110.c3
-rw-r--r--drivers/media/dvb/frontends/tda10021.c111
-rw-r--r--drivers/media/dvb/frontends/tda10023.c103
-rw-r--r--drivers/media/dvb/frontends/tda10048.c83
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c111
-rw-r--r--drivers/media/dvb/frontends/tda10071.c8
-rw-r--r--drivers/media/dvb/frontends/tda10086.c62
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c53
-rw-r--r--drivers/media/dvb/frontends/tda8083.c19
-rw-r--r--drivers/media/dvb/frontends/tda826x.c7
-rw-r--r--drivers/media/dvb/frontends/tdhd1.h11
-rw-r--r--drivers/media/dvb/frontends/tua6100.c31
-rw-r--r--drivers/media/dvb/frontends/ves1820.c23
-rw-r--r--drivers/media/dvb/frontends/ves1x93.c23
-rw-r--r--drivers/media/dvb/frontends/zl10036.c10
-rw-r--r--drivers/media/dvb/frontends/zl10039.c10
-rw-r--r--drivers/media/dvb/frontends/zl10353.c116
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.c8
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.c9
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.c9
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c2
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c6
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.c6
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.c6
-rw-r--r--drivers/media/dvb/siano/smsdvb.c33
-rw-r--r--drivers/media/dvb/ttpci/av7110.c102
-rw-r--r--drivers/media/dvb/ttpci/av7110.h3
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c50
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c51
-rw-r--r--drivers/media/dvb/ttpci/budget-patch.c27
-rw-r--r--drivers/media/dvb/ttpci/budget.c68
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c102
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusbdecfe.c14
-rw-r--r--drivers/media/media-device.c3
-rw-r--r--drivers/media/radio/Kconfig297
-rw-r--r--drivers/media/radio/radio-gemtek.c10
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-si4713.c15
-rw-r--r--drivers/media/radio/radio-timb.c15
-rw-r--r--drivers/media/radio/radio-wl1273.c17
-rw-r--r--drivers/media/radio/tef6862.c8
-rw-r--r--drivers/media/radio/wl128x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c58
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h28
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c84
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h50
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c61
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h20
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c1
-rw-r--r--drivers/media/rc/Kconfig10
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c111
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/ir-nec-decoder.c4
-rw-r--r--drivers/media/rc/ir-raw.c1
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c67
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c205
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c96
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c51
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c128
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c114
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c24
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/media/rc/mceusb.c4
-rw-r--r--drivers/media/rc/rc-core-priv.h12
-rw-r--r--drivers/media/rc/rc-main.c3
-rw-r--r--drivers/media/rc/redrat3.c52
-rw-r--r--drivers/media/rc/streamzap.c4
-rw-r--r--drivers/media/rc/winbond-cir.c6
-rw-r--r--drivers/media/video/Kconfig427
-rw-r--r--drivers/media/video/Makefile4
-rw-r--r--drivers/media/video/adv7170.c62
-rw-r--r--drivers/media/video/as3645a.c905
-rw-r--r--drivers/media/video/atmel-isi.c35
-rw-r--r--drivers/media/video/au0828/Kconfig1
-rw-r--r--drivers/media/video/au0828/au0828-cards.c7
-rw-r--r--drivers/media/video/au0828/au0828-i2c.c2
-rw-r--r--drivers/media/video/bt8xx/bt848.h5
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c58
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/video/bt8xx/bttv.h3
-rw-r--r--drivers/media/video/c-qcam.c2
-rw-r--r--drivers/media/video/cs5345.c2
-rw-r--r--drivers/media/video/cs53l32a.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c41
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/video/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/video/cx231xx/Kconfig6
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c24
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c86
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c7
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dvb.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c11
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c14
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c141
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c79
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c24
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c113
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c176
-rw-r--r--drivers/media/video/cx23885/cx23885.h14
-rw-r--r--drivers/media/video/cx25821/cx25821-alsa.c75
-rw-r--r--drivers/media/video/cx25821/cx25821-audio-upstream.c113
-rw-r--r--drivers/media/video/cx25821/cx25821-audio.h39
-rw-r--r--drivers/media/video/cx25821/cx25821-cards.c2
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c57
-rw-r--r--drivers/media/video/cx25821/cx25821-i2c.c12
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-defines.h6
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-reg.h518
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.c410
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream-ch2.c138
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream.c156
-rw-r--r--drivers/media/video/cx25821/cx25821-video.c145
-rw-r--r--drivers/media/video/cx25821/cx25821.h4
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c10
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c3241
-rw-r--r--drivers/media/video/cx88/Kconfig10
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/video/cx88/cx88-cards.c118
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c30
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88-input.c4
-rw-r--r--drivers/media/video/cx88/cx88.h2
-rw-r--r--drivers/media/video/davinci/dm355_ccdc.c13
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc.c13
-rw-r--r--drivers/media/video/davinci/isif.c13
-rw-r--r--drivers/media/video/davinci/vpbe.c76
-rw-r--r--drivers/media/video/davinci/vpbe_display.c43
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c491
-rw-r--r--drivers/media/video/davinci/vpbe_venc.c223
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c18
-rw-r--r--drivers/media/video/davinci/vpif.h1
-rw-r--r--drivers/media/video/davinci/vpif_capture.c14
-rw-r--r--drivers/media/video/davinci/vpif_capture.h2
-rw-r--r--drivers/media/video/davinci/vpif_display.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c256
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c61
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c190
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c7
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h5
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c14
-rw-r--r--drivers/media/video/em28xx/em28xx.h8
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c4
-rw-r--r--drivers/media/video/fsl-viu.c13
-rw-r--r--drivers/media/video/gspca/Kconfig10
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/benq.c7
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c1
-rw-r--r--drivers/media/video/gspca/gspca.c79
-rw-r--r--drivers/media/video/gspca/gspca.h5
-rw-r--r--drivers/media/video/gspca/jl2005bcd.c554
-rw-r--r--drivers/media/video/gspca/konica.c3
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c4
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h2
-rw-r--r--drivers/media/video/gspca/mars.c1
-rw-r--r--drivers/media/video/gspca/nw80x.c2
-rw-r--r--drivers/media/video/gspca/ov519.c5
-rw-r--r--drivers/media/video/gspca/ov534_9.c141
-rw-r--r--drivers/media/video/gspca/pac207.c10
-rw-r--r--drivers/media/video/gspca/pac7302.c1
-rw-r--r--drivers/media/video/gspca/se401.c10
-rw-r--r--drivers/media/video/gspca/sn9c20x.c38
-rw-r--r--drivers/media/video/gspca/sonixb.c15
-rw-r--r--drivers/media/video/gspca/sonixj.c18
-rw-r--r--drivers/media/video/gspca/spca561.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c8
-rw-r--r--drivers/media/video/gspca/t613.c25
-rw-r--r--drivers/media/video/gspca/topro.c2
-rw-r--r--drivers/media/video/gspca/vicam.c3
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c6
-rw-r--r--drivers/media/video/gspca/zc3xx.c117
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c25
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c118
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c22
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c22
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/m5mols/m5mols.h48
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c83
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c310
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h247
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c42
-rw-r--r--drivers/media/video/marvell-ccic/mmp-driver.c35
-rw-r--r--drivers/media/video/msp3400-driver.c6
-rw-r--r--drivers/media/video/msp3400-driver.h6
-rw-r--r--drivers/media/video/mt9m001.c5
-rw-r--r--drivers/media/video/mt9m111.c381
-rw-r--r--drivers/media/video/mt9p031.c5
-rw-r--r--drivers/media/video/mt9t001.c5
-rw-r--r--drivers/media/video/mt9t031.c5
-rw-r--r--drivers/media/video/mt9t112.c4
-rw-r--r--drivers/media/video/mt9v022.c5
-rw-r--r--drivers/media/video/mt9v032.c8
-rw-r--r--drivers/media/video/mx1_camera.c2
-rw-r--r--drivers/media/video/mx2_camera.c299
-rw-r--r--drivers/media/video/mx3_camera.c19
-rw-r--r--drivers/media/video/omap/omap_vout.c242
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.c2
-rw-r--r--drivers/media/video/omap/omap_voutdef.h2
-rw-r--r--drivers/media/video/omap1_camera.c17
-rw-r--r--drivers/media/video/omap24xxcam-dma.c2
-rw-r--r--drivers/media/video/omap24xxcam.c19
-rw-r--r--drivers/media/video/omap3isp/isp.c102
-rw-r--r--drivers/media/video/omap3isp/isp.h2
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c30
-rw-r--r--drivers/media/video/omap3isp/ispccdc.h2
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c22
-rw-r--r--drivers/media/video/omap3isp/ispccp2.h3
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.c18
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.h2
-rw-r--r--drivers/media/video/omap3isp/isppreview.c25
-rw-r--r--drivers/media/video/omap3isp/isppreview.h2
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c7
-rw-r--r--drivers/media/video/omap3isp/ispresizer.h1
-rw-r--r--drivers/media/video/omap3isp/ispstat.c10
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c32
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h8
-rw-r--r--drivers/media/video/ov6650.c2
-rw-r--r--drivers/media/video/ov7670.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c5
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c935
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c16
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h6
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c307
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h14
-rw-r--r--drivers/media/video/pwc/pwc-if.c460
-rw-r--r--drivers/media/video/pwc/pwc-kiara.h2
-rw-r--r--drivers/media/video/pwc/pwc-misc.c88
-rw-r--r--drivers/media/video/pwc/pwc-timon.h2
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.c46
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c330
-rw-r--r--drivers/media/video/pwc/pwc.h78
-rw-r--r--drivers/media/video/pxa_camera.c17
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c32
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c164
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h32
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c44
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c68
-rw-r--r--drivers/media/video/s5p-fimc/mipi-csis.c22
-rw-r--r--drivers/media/video/s5p-fimc/mipi-csis.h3
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h5
-rw-r--r--drivers/media/video/s5p-g2d/Makefile3
-rw-r--r--drivers/media/video/s5p-g2d/g2d-hw.c104
-rw-r--r--drivers/media/video/s5p-g2d/g2d-regs.h115
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c811
-rw-r--r--drivers/media/video/s5p-g2d/g2d.h83
-rw-r--r--drivers/media/video/s5p-jpeg/Makefile2
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c1482
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.h143
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-hw.h353
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-regs.h170
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c25
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c2
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c30
-rw-r--r--drivers/media/video/s5p-tv/mixer.h14
-rw-r--r--drivers/media/video/s5p-tv/mixer_grp_layer.c157
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c343
-rw-r--r--drivers/media/video/s5p-tv/mixer_vp_layer.c108
-rw-r--r--drivers/media/video/s5p-tv/sdo_drv.c22
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c33
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c33
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c23
-rw-r--r--drivers/media/video/saa7134/saa7134-tvaudio.c65
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h2
-rw-r--r--drivers/media/video/saa7164/saa7164-bus.c4
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c45
-rw-r--r--drivers/media/video/sh_mobile_csi2.c17
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c4
-rw-r--r--drivers/media/video/soc_camera.c7
-rw-r--r--drivers/media/video/soc_camera_platform.c13
-rw-r--r--drivers/media/video/stk-webcam.c8
-rw-r--r--drivers/media/video/timblogiw.c17
-rw-r--r--drivers/media/video/tlg2300/pd-common.h2
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c22
-rw-r--r--drivers/media/video/tlg2300/pd-main.c4
-rw-r--r--drivers/media/video/tm6000/Kconfig6
-rw-r--r--drivers/media/video/tm6000/tm6000-alsa.c23
-rw-r--r--drivers/media/video/tm6000/tm6000-cards.c35
-rw-r--r--drivers/media/video/tm6000/tm6000-core.c86
-rw-r--r--drivers/media/video/tm6000/tm6000-dvb.c21
-rw-r--r--drivers/media/video/tm6000/tm6000-i2c.c8
-rw-r--r--drivers/media/video/tm6000/tm6000-input.c407
-rw-r--r--drivers/media/video/tm6000/tm6000-regs.h14
-rw-r--r--drivers/media/video/tm6000/tm6000-stds.c89
-rw-r--r--drivers/media/video/tm6000/tm6000-video.c21
-rw-r--r--drivers/media/video/tm6000/tm6000.h3
-rw-r--r--drivers/media/video/tuner-core.c1
-rw-r--r--drivers/media/video/tvp514x.c2
-rw-r--r--drivers/media/video/tvp5150.c81
-rw-r--r--drivers/media/video/tvp7002.c2
-rw-r--r--drivers/media/video/upd64083.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c46
-rw-r--r--drivers/media/video/uvc/Kconfig1
-rw-r--r--drivers/media/video/uvc/Makefile2
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c19
-rw-r--r--drivers/media/video/uvc/uvc_debugfs.c136
-rw-r--r--drivers/media/video/uvc/uvc_driver.c30
-rw-r--r--drivers/media/video/uvc/uvc_isight.c10
-rw-r--r--drivers/media/video/uvc/uvc_queue.c564
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c29
-rw-r--r--drivers/media/video/uvc/uvc_video.c625
-rw-r--r--drivers/media/video/uvc/uvcvideo.h128
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/video/v4l2-ctrls.c59
-rw-r--r--drivers/media/video/v4l2-dev.c14
-rw-r--r--drivers/media/video/v4l2-device.c4
-rw-r--r--drivers/media/video/v4l2-ioctl.c128
-rw-r--r--drivers/media/video/v4l2-subdev.c4
-rw-r--r--drivers/media/video/via-camera.c26
-rw-r--r--drivers/media/video/videobuf-dvb.c7
-rw-r--r--drivers/media/video/videobuf2-core.c118
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c3
-rw-r--r--drivers/media/video/videobuf2-memops.c28
-rw-r--r--drivers/media/video/videobuf2-vmalloc.c90
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/media/video/zoran/zoran_device.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c3
-rw-r--r--drivers/media/video/zoran/zr36060.c2
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h2
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/mfd/88pm860x-i2c.c241
-rw-r--r--drivers/mfd/Kconfig64
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/aat2870-core.c25
-rw-r--r--drivers/mfd/ab5500-core.c2
-rw-r--r--drivers/mfd/ab5500-debugfs.c4
-rw-r--r--drivers/mfd/ab8500-core.c4
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-gpadc.c4
-rw-r--r--drivers/mfd/ab8500-i2c.c2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c4
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/cs5535-mfd.c8
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/da9052-core.c694
-rw-r--r--drivers/mfd/da9052-i2c.c140
-rw-r--r--drivers/mfd/da9052-spi.c115
-rw-r--r--drivers/mfd/db8500-prcmu.c7
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/intel_msic.c12
-rw-r--r--drivers/mfd/janz-cmodio.c2
-rw-r--r--drivers/mfd/jz4740-adc.c15
-rw-r--r--drivers/mfd/lpc_sch.c2
-rw-r--r--drivers/mfd/max8925-core.c15
-rw-r--r--drivers/mfd/max8925-i2c.c27
-rw-r--r--drivers/mfd/max8997.c3
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/mc13xxx-core.c111
-rw-r--r--drivers/mfd/mcp-core.c44
-rw-r--r--drivers/mfd/mcp-sa11x0.c182
-rw-r--r--drivers/mfd/omap-usb-host.c763
-rw-r--r--drivers/mfd/pcf50633-adc.c12
-rw-r--r--drivers/mfd/s5m-core.c176
-rw-r--r--drivers/mfd/s5m-irq.c487
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/stmpe-i2c.c109
-rw-r--r--drivers/mfd/stmpe-spi.c150
-rw-r--r--drivers/mfd/stmpe.c277
-rw-r--r--drivers/mfd/stmpe.h53
-rw-r--r--drivers/mfd/t7l66xb.c16
-rw-r--r--drivers/mfd/tc6387xb.c14
-rw-r--r--drivers/mfd/ti-ssp.c12
-rw-r--r--drivers/mfd/timberdale.c2
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910-irq.c3
-rw-r--r--drivers/mfd/tps65910.c9
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/twl-core.c67
-rw-r--r--drivers/mfd/twl4030-audio.c12
-rw-r--r--drivers/mfd/twl4030-irq.c21
-rw-r--r--drivers/mfd/twl4030-madc.c14
-rw-r--r--drivers/mfd/twl4030-power.c42
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/mfd/twl6040-core.c20
-rw-r--r--drivers/mfd/ucb1x00-core.c48
-rw-r--r--drivers/mfd/ucb1x00-ts.c2
-rw-r--r--drivers/mfd/vx855.c2
-rw-r--r--drivers/mfd/wm831x-core.c4
-rw-r--r--drivers/mfd/wm831x-i2c.c3
-rw-r--r--drivers/mfd/wm831x-irq.c8
-rw-r--r--drivers/mfd/wm831x-spi.c4
-rw-r--r--drivers/mfd/wm8350-core.c2
-rw-r--r--drivers/mfd/wm8350-i2c.c4
-rw-r--r--drivers/mfd/wm8400-core.c7
-rw-r--r--drivers/mfd/wm8994-core.c159
-rw-r--r--drivers/mfd/wm8994-irq.c206
-rw-r--r--drivers/mfd/wm8994-regmap.c1238
-rw-r--r--drivers/mfd/wm8994.h25
-rw-r--r--drivers/misc/Kconfig8
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ab8500-pwm.c2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c10
-rw-r--r--drivers/misc/ad525x_dpot-spi.c97
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/ad525x_dpot.h8
-rw-r--r--drivers/misc/bmp085.c2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c88
-rw-r--r--drivers/misc/ibmasm/command.c2
-rw-r--r--drivers/misc/ibmasm/dot_command.c2
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c2
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h2
-rw-r--r--drivers/misc/ibmasm/ibmasm.h2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h2
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/remote.h2
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/iwmc3200top/main.c12
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/max8997-muic.c505
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/ti-st/st_core.c18
-rw-r--r--drivers/misc/ti-st/st_kim.c84
-rw-r--r--drivers/mmc/Makefile3
-rw-r--r--drivers/mmc/card/block.c253
-rw-r--r--drivers/mmc/card/mmc_test.c3
-rw-r--r--drivers/mmc/card/queue.c5
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/bus.c5
-rw-r--r--drivers/mmc/core/cd-gpio.c74
-rw-r--r--drivers/mmc/core/core.c199
-rw-r--r--drivers/mmc/core/core.h5
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c64
-rw-r--r--drivers/mmc/core/mmc.c215
-rw-r--r--drivers/mmc/core/sd.c21
-rw-r--r--drivers/mmc/core/sdio.c342
-rw-r--r--drivers/mmc/core/sdio_io.c8
-rw-r--r--drivers/mmc/core/sdio_ops.c14
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/at91_mci.c38
-rw-r--r--drivers/mmc/host/atmel-mci.c10
-rw-r--r--drivers/mmc/host/au1xmmc.c45
-rw-r--r--drivers/mmc/host/bfin_sdh.c12
-rw-r--r--drivers/mmc/host/cb710-mmc.c13
-rw-r--r--drivers/mmc/host/dw_mmc.c71
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c12
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/mmci.c28
-rw-r--r--drivers/mmc/host/msm_sdcc.c19
-rw-r--r--drivers/mmc/host/mvsdio.c13
-rw-r--r--drivers/mmc/host/mxcmmc.c24
-rw-r--r--drivers/mmc/host/mxs-mmc.c33
-rw-r--r--drivers/mmc/host/omap.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c76
-rw-r--r--drivers/mmc/host/pxamci.c13
-rw-r--r--drivers/mmc/host/s3cmci.c13
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c18
-rw-r--r--drivers/mmc/host/sdhci-dove.c17
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c17
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c17
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c17
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c5
-rw-r--r--drivers/mmc/host/sdhci-pci.c207
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c18
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c17
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c17
-rw-r--r--drivers/mmc/host/sdhci-s3c.c48
-rw-r--r--drivers/mmc/host/sdhci-spear.c51
-rw-r--r--drivers/mmc/host/sdhci-tegra.c17
-rw-r--r--drivers/mmc/host/sdhci.c152
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sh_mmcif.c732
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c13
-rw-r--r--drivers/mmc/host/tifm_sd.c20
-rw-r--r--drivers/mmc/host/tmio_mmc.c14
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c32
-rw-r--r--drivers/mmc/host/vub300.c12
-rw-r--r--drivers/mtd/Kconfig8
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/afs.c4
-rw-r--r--drivers/mtd/ar7part.c15
-rw-r--r--drivers/mtd/bcm63xxpart.c222
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c13
-rw-r--r--drivers/mtd/devices/Kconfig12
-rw-r--r--drivers/mtd/devices/block2mtd.c3
-rw-r--r--drivers/mtd/devices/doc2000.c9
-rw-r--r--drivers/mtd/devices/doc2001.c8
-rw-r--r--drivers/mtd/devices/doc2001plus.c9
-rw-r--r--drivers/mtd/devices/docg3.c1441
-rw-r--r--drivers/mtd/devices/docg3.h65
-rw-r--r--drivers/mtd/devices/docprobe.c7
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c3
-rw-r--r--drivers/mtd/ftl.c81
-rw-r--r--drivers/mtd/inftlcore.c25
-rw-r--r--drivers/mtd/inftlmount.c19
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c7
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c277
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c12
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ixp2000.c12
-rw-r--r--drivers/mtd/maps/ixp4xx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c12
-rw-r--r--drivers/mtd/maps/physmap.c10
-rw-r--r--drivers/mtd/maps/physmap_of.c13
-rw-r--r--drivers/mtd/maps/plat-ram.c12
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c19
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c18
-rw-r--r--drivers/mtd/maps/sa1100-flash.c17
-rw-r--r--drivers/mtd/maps/scb2_flash.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c13
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/mtdblock.c21
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c203
-rw-r--r--drivers/mtd/mtdconcat.c53
-rw-r--r--drivers/mtd/mtdcore.c126
-rw-r--r--drivers/mtd/mtdoops.c47
-rw-r--r--drivers/mtd/mtdpart.c63
-rw-r--r--drivers/mtd/mtdswap.c29
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/ams-delta.c12
-rw-r--r--drivers/mtd/nand/atmel_nand.c8
-rw-r--r--drivers/mtd/nand/au1550nd.c298
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c13
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c75
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/gpio.c115
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c34
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c14
-rw-r--r--drivers/mtd/nand/nand_base.c7
-rw-r--r--drivers/mtd/nand/nand_bbt.c14
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c2
-rw-r--r--drivers/mtd/nand/ndfc.c15
-rw-r--r--drivers/mtd/nand/nomadik_nand.c18
-rw-r--r--drivers/mtd/nand/nuc900_nand.c13
-rw-r--r--drivers/mtd/nand/omap2.c15
-rw-r--r--drivers/mtd/nand/pasemi_nand.c12
-rw-r--r--drivers/mtd/nand/plat_nand.c13
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c18
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c13
-rw-r--r--drivers/mtd/nand/tmio_nand.c13
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c6
-rw-r--r--drivers/mtd/nftlcore.c25
-rw-r--r--drivers/mtd/nftlmount.c13
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/onenand_base.c3
-rw-r--r--drivers/mtd/onenand/samsung.c13
-rw-r--r--drivers/mtd/redboot.c12
-rw-r--r--drivers/mtd/rfd_ftl.c46
-rw-r--r--drivers/mtd/sm_ftl.c12
-rw-r--r--drivers/mtd/ssfdc.c12
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c28
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c57
-rw-r--r--drivers/mtd/tests/mtd_readtest.c11
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c37
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c22
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c32
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c15
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h5
-rw-r--r--drivers/mtd/ubi/eba.c6
-rw-r--r--drivers/mtd/ubi/io.c19
-rw-r--r--drivers/mtd/ubi/kapi.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/mtd/ubi/wl.c12
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_alb.c112
-rw-r--r--drivers/net/bonding/bond_ipv6.c225
-rw-r--r--drivers/net/bonding/bond_main.c116
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/caif/caif_hsi.c13
-rw-r--r--drivers/net/caif/caif_serial.c10
-rw-r--r--drivers/net/caif/caif_shmcore.c27
-rw-r--r--drivers/net/caif/caif_spi.c178
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c13
-rw-r--r--drivers/net/can/bfin_can.c12
-rw-r--r--drivers/net/can/c_can/c_can_platform.c12
-rw-r--r--drivers/net/can/cc770/Kconfig21
-rw-r--r--drivers/net/can/cc770/Makefile9
-rw-r--r--drivers/net/can/cc770/cc770.c881
-rw-r--r--drivers/net/can/cc770/cc770.h203
-rw-r--r--drivers/net/can/cc770/cc770_isa.c367
-rw-r--r--drivers/net/can/cc770/cc770_platform.c272
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c25
-rw-r--r--drivers/net/can/janz-ican3.c13
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c12
-rw-r--r--drivers/net/can/mscan/mscan.c8
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c118
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c13
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c16
-rw-r--r--drivers/net/can/ti_hecc.c15
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/dsa/Kconfig36
-rw-r--r--drivers/net/dsa/Makefile9
-rw-r--r--drivers/net/dsa/mv88e6060.c293
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c438
-rw-r--r--drivers/net/dsa/mv88e6131.c435
-rw-r--r--drivers/net/dsa/mv88e6xxx.c549
-rw-r--r--drivers/net/dsa/mv88e6xxx.h98
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c7
-rw-r--r--drivers/net/ethernet/3com/3c59x.c12
-rw-r--r--drivers/net/ethernet/3com/typhoon.c16
-rw-r--r--drivers/net/ethernet/8390/8390.h2
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c21
-rw-r--r--drivers/net/ethernet/8390/es3210.c2
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c2
-rw-r--r--drivers/net/ethernet/8390/hp.c2
-rw-r--r--drivers/net/ethernet/8390/hydra.c2
-rw-r--r--drivers/net/ethernet/8390/lne390.c4
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/8390/ne3210.c2
-rw-r--r--drivers/net/ethernet/8390/stnic.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c3
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c15
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h5
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c18
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c19
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/amd/sunlance.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c23
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c13
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c196
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c366
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h116
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c83
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h217
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1287
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c478
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h71
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c130
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c19
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c734
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h44
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h98
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c493
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h54
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h97
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c70
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c623
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c157
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c1
-rw-r--r--drivers/net/ethernet/cadence/Kconfig16
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c26
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c419
-rw-r--r--drivers/net/ethernet/cadence/macb.h152
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig7
-rw-r--r--drivers/net/ethernet/calxeda/Makefile1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1928
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c10
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dlink/de600.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c22
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h39
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c237
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h84
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c120
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c588
-rw-r--r--drivers/net/ethernet/ethoc.c13
-rw-r--r--drivers/net/ethernet/fealnx.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/fec.c99
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c69
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c62
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h10
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c7
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c59
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c13
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c61
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c465
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c37
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c359
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c83
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c129
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/korina.c13
-rw-r--r--drivers/net/ethernet/lantiq_etop.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c27
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/skge.c15
-rw-r--r--drivers/net/ethernet/marvell/sky2.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1332
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c430
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c415
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c902
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h670
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c486
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c616
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c132
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c17
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c513
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h15
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c16
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c79
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c13
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c20
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c13
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c503
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c5
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c48
-rw-r--r--drivers/net/ethernet/rdc/r6040.c42
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c20
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c89
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c22
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c18
-rw-r--r--drivers/net/ethernet/sfc/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c221
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c310
-rw-r--r--drivers/net/ethernet/sfc/filter.h12
-rw-r--r--drivers/net/ethernet/sfc/mtd.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c9
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/sgi/meth.c67
-rw-r--r--drivers/net/ethernet/sis/sis190.c15
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/sis/sis900.h2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c6
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c206
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c457
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c198
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c42
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c13
-rw-r--r--drivers/net/ethernet/sun/sungem.c6
-rw-r--r--drivers/net/ethernet/sun/sunhme.c18
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c3
-rw-r--r--drivers/net/ethernet/tile/tilepro.c15
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c26
-rw-r--r--drivers/net/ethernet/via/via-rhine.c689
-rw-r--r--drivers/net/ethernet/via/via-velocity.c12
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c30
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/hyperv/Kconfig5
-rw-r--r--drivers/net/hyperv/Makefile3
-rw-r--r--drivers/net/hyperv/hyperv_net.h (renamed from drivers/staging/hv/hyperv_net.h)139
-rw-r--r--drivers/net/hyperv/netvsc.c (renamed from drivers/staging/hv/netvsc.c)164
-rw-r--r--drivers/net/hyperv/netvsc_drv.c (renamed from drivers/staging/hv/netvsc_drv.c)162
-rw-r--r--drivers/net/hyperv/rndis_filter.c (renamed from drivers/staging/hv/rndis_filter.c)44
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/au1000_ircc.h125
-rw-r--r--drivers/net/irda/au1k_ir.c1229
-rw-r--r--drivers/net/irda/bfin_sir.c13
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c13
-rw-r--r--drivers/net/irda/sh_irda.c13
-rw-r--r--drivers/net/irda/sh_sir.c13
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/via-ircc.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/macvtap.c24
-rw-r--r--drivers/net/mii.c53
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/fixed.c2
-rw-r--r--drivers/net/phy/mdio-bitbang.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c3
-rw-r--r--drivers/net/phy/mdio-octeon.c3
-rw-r--r--drivers/net/phy/mdio_bus.c24
-rw-r--r--drivers/net/phy/phy_device.c20
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/phy/spi_ks8995.c375
-rw-r--r--drivers/net/ppp/pptp.c10
-rw-r--r--drivers/net/team/Kconfig43
-rw-r--r--drivers/net/team/Makefile7
-rw-r--r--drivers/net/team/team.c1684
-rw-r--r--drivers/net/team/team_mode_activebackup.c136
-rw-r--r--drivers/net/team/team_mode_roundrobin.c107
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/asix.c29
-rw-r--r--drivers/net/usb/cdc-phonet.c10
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/pegasus.c8
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/smsc75xx.c5
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c173
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wimax/i2400m/tx.c8
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c7
-rw-r--r--drivers/net/wireless/Makefile8
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h571
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c293
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c217
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h124
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c370
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c81
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c222
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c853
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c234
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h59
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h22
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h5
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c244
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1695
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h32
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h17
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h309
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c849
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h79
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c (renamed from drivers/net/wireless/ath/ath6kl/htc_hif.c)150
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h66
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c725
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h18
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h92
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1013
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c771
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c660
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h19
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c219
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c834
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h386
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig31
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c155
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c223
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1493
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h102
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h60
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c135
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h41
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c215
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c93
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c282
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h243
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c346
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c668
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h134
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h321
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c359
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c97
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c14
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/key.c8
-rw-r--r--drivers/net/wireless/ath/regd.c77
-rw-r--r--drivers/net/wireless/b43/b43.h20
-rw-r--r--drivers/net/wireless/b43/dma.c27
-rw-r--r--drivers/net/wireless/b43/leds.c16
-rw-r--r--drivers/net/wireless/b43/lo.c8
-rw-r--r--drivers/net/wireless/b43/main.c160
-rw-r--r--drivers/net/wireless/b43/phy_common.c8
-rw-r--r--drivers/net/wireless/b43/phy_g.c34
-rw-r--r--drivers/net/wireless/b43/phy_lp.c8
-rw-r--r--drivers/net/wireless/b43/phy_n.c4112
-rw-r--r--drivers/net/wireless/b43/phy_n.h14
-rw-r--r--drivers/net/wireless/b43/pio.c22
-rw-r--r--drivers/net/wireless/b43/radio_2056.c25
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c183
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h31
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h20
-rw-r--r--drivers/net/wireless/b43legacy/dma.c81
-rw-r--r--drivers/net/wireless/b43legacy/dma.h5
-rw-r--r--drivers/net/wireless/b43legacy/leds.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c150
-rw-r--r--drivers/net/wireless/b43legacy/radio.c20
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c153
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c218
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h140
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c50
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c711
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1296
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c607
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h136
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h41
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c1251
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h224
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c118
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c469
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c336
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c1408
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c101
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c122
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c271
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h44
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c508
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h54
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c218
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h30
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/defs.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h12
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c19
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h12
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c25
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3976
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c986
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2743
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h607
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6515
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2402
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1301
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5867
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3246
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1411
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1314
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c550
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c817
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c659
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c436
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c103
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c365
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1685
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-cfg.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h111
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c403
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c252
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c1601
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h241
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c (renamed from drivers/net/wireless/iwlwifi/iwl-sv-open.c)265
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c140
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c)418
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-wifi.h (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.h)21
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c12
-rw-r--r--drivers/net/wireless/libertas/cfg.c35
-rw-r--r--drivers/net/wireless/libertas/debugfs.c2
-rw-r--r--drivers/net/wireless/libertas/ethtool.c7
-rw-r--r--drivers/net/wireless/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c323
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h1
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c38
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h5
-rw-r--r--drivers/net/wireless/mwifiex/init.c46
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/mwifiex/join.c106
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c27
-rw-r--r--drivers/net/wireless/mwifiex/scan.c28
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c43
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c23
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c76
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c5
-rw-r--r--drivers/net/wireless/mwl8k.c166
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/scan.c16
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54spi.c12
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c335
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c5
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/rayctl.h4
-rw-r--r--drivers/net/wireless/rndis_wlan.c109
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c30
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c10
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c42
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c40
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c55
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h14
-rw-r--r--drivers/net/wireless/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/wl12xx/acx.c172
-rw-r--r--drivers/net/wireless/wl12xx/acx.h91
-rw-r--r--drivers/net/wireless/wl12xx/boot.c15
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c371
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h50
-rw-r--r--drivers/net/wireless/wl12xx/conf.h4
-rw-r--r--drivers/net/wireless/wl12xx/debug.h101
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c157
-rw-r--r--drivers/net/wireless/wl12xx/event.c216
-rw-r--r--drivers/net/wireless/wl12xx/event.h3
-rw-r--r--drivers/net/wireless/wl12xx/init.c491
-rw-r--r--drivers/net/wireless/wl12xx/init.h8
-rw-r--r--drivers/net/wireless/wl12xx/io.c12
-rw-r--r--drivers/net/wireless/wl12xx/io.h23
-rw-r--r--drivers/net/wireless/wl12xx/main.c2058
-rw-r--r--drivers/net/wireless/wl12xx/ps.c62
-rw-r--r--drivers/net/wireless/wl12xx/ps.h9
-rw-r--r--drivers/net/wireless/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c38
-rw-r--r--drivers/net/wireless/wl12xx/scan.c120
-rw-r--r--drivers/net/wireless/wl12xx/scan.h8
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c259
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c543
-rw-r--r--drivers/net/wireless/wl12xx/spi.c215
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c77
-rw-r--r--drivers/net/wireless/wl12xx/tx.c382
-rw-r--r--drivers/net/wireless/wl12xx/tx.h11
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h386
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c25
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c12
-rw-r--r--drivers/net/xen-netback/xenbus.c9
-rw-r--r--drivers/net/xen-netfront.c21
-rw-r--r--drivers/nfc/pn533.c170
-rw-r--r--drivers/of/Kconfig9
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c16
-rw-r--r--drivers/of/base.c156
-rw-r--r--drivers/of/fdt.c6
-rw-r--r--drivers/of/gpio.c45
-rw-r--r--drivers/of/irq.c26
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/of/selftest.c139
-rw-r--r--drivers/oprofile/nmi_timer_int.c173
-rw-r--r--drivers/oprofile/oprof.c21
-rw-r--r--drivers/oprofile/oprof.h10
-rw-r--r--drivers/oprofile/oprofile_files.c7
-rw-r--r--drivers/oprofile/oprofilefs.c11
-rw-r--r--drivers/oprofile/timer_int.c29
-rw-r--r--drivers/parisc/Kconfig7
-rw-r--r--drivers/parisc/dino.c47
-rw-r--r--drivers/parisc/lba_pci.c72
-rw-r--r--drivers/parport/parport_ax88796.c13
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/parport/parport_sunbpp.c13
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/access.c76
-rw-r--r--drivers/pci/ats.c108
-rw-r--r--drivers/pci/bus.c32
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h7
-rw-r--r--drivers/pci/hotplug/pciehp_core.c17
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c1
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c2
-rw-r--r--drivers/pci/hotplug/rpaphp.h2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/ioapic.c15
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/msi.c160
-rw-r--r--drivers/pci/pci-acpi.c13
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c162
-rw-r--r--drivers/pci/pci.h10
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c4
-rw-r--r--drivers/pci/pcie/aspm.c58
-rw-r--r--drivers/pci/probe.c68
-rw-r--r--drivers/pci/remove.c10
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pci/xen-pcifront.c11
-rw-r--r--drivers/pcmcia/Kconfig8
-rw-r--r--drivers/pcmcia/Makefile4
-rw-r--r--drivers/pcmcia/au1000_generic.c545
-rw-r--r--drivers/pcmcia/au1000_generic.h135
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c294
-rw-r--r--drivers/pcmcia/db1xxx_ss.c26
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c16
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c9
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c2
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c6
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c5
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c4
-rw-r--r--drivers/pcmcia/yenta_socket.c6
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/core.c143
-rw-r--r--drivers/pinctrl/core.h13
-rw-r--r--drivers/pinctrl/pinconf.c326
-rw-r--r--drivers/pinctrl/pinconf.h36
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c (renamed from drivers/gpio/gpio-u300.c)21
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c (renamed from drivers/pinctrl/pinmux-sirf.c)9
-rw-r--r--drivers/pinctrl/pinctrl-u300.c (renamed from drivers/pinctrl/pinmux-u300.c)47
-rw-r--r--drivers/pinctrl/pinmux.c265
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c4
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c2
-rw-r--r--drivers/platform/x86/samsung-laptop.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c35
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/pnp/quirks.c42
-rw-r--r--drivers/power/Kconfig32
-rw-r--r--drivers/power/Makefile5
-rw-r--r--drivers/power/bq27x00_battery.c210
-rw-r--r--drivers/power/charger-manager.c1072
-rw-r--r--drivers/power/collie_battery.c55
-rw-r--r--drivers/power/da9030_battery.c13
-rw-r--r--drivers/power/da9052-battery.c664
-rw-r--r--drivers/power/ds2760_battery.c21
-rw-r--r--drivers/power/ds2780_battery.c18
-rw-r--r--drivers/power/gpio-charger.c12
-rw-r--r--drivers/power/intel_mid_battery.c25
-rw-r--r--drivers/power/isp1704_charger.c14
-rw-r--r--drivers/power/jz4740-battery.c14
-rw-r--r--drivers/power/lp8727_charger.c494
-rw-r--r--drivers/power/max17042_battery.c94
-rw-r--r--drivers/power/max8903_charger.c14
-rw-r--r--drivers/power/max8925_power.c75
-rw-r--r--drivers/power/max8997_charger.c4
-rw-r--r--drivers/power/max8998_charger.c14
-rw-r--r--drivers/power/olpc_battery.c75
-rw-r--r--drivers/power/pcf50633-charger.c12
-rw-r--r--drivers/power/pda_power.c89
-rw-r--r--drivers/power/power_supply_core.c19
-rw-r--r--drivers/power/power_supply_sysfs.c16
-rw-r--r--drivers/power/s3c_adc_battery.c37
-rw-r--r--drivers/power/sbs-battery.c (renamed from drivers/power/bq20z75.c)481
-rw-r--r--drivers/power/tosa_battery.c79
-rw-r--r--drivers/power/wm831x_backup.c12
-rw-r--r--drivers/power/wm831x_power.c56
-rw-r--r--drivers/power/wm8350_power.c12
-rw-r--r--drivers/power/wm97xx_battery.c20
-rw-r--r--drivers/power/z2_battery.c4
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/88pm8607.c2
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c6
-rw-r--r--drivers/regulator/ab3100.c2
-rw-r--r--drivers/regulator/ab8500.c4
-rw-r--r--drivers/regulator/ad5398.c2
-rw-r--r--drivers/regulator/bq24022.c2
-rw-r--r--drivers/regulator/core.c180
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/da9052-regulator.c606
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/fixed.c85
-rw-r--r--drivers/regulator/gpio-regulator.c2
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max8649.c157
-rw-r--r--drivers/regulator/max8660.c2
-rw-r--r--drivers/regulator/max8925-regulator.c35
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8997.c2
-rw-r--r--drivers/regulator/max8998.c2
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c47
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c57
-rw-r--r--drivers/regulator/mc13xxx.h26
-rw-r--r--drivers/regulator/of_regulator.c87
-rw-r--r--drivers/regulator/pcap-regulator.c2
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c3
-rw-r--r--drivers/regulator/tps65023-regulator.c89
-rw-r--r--drivers/regulator/tps6507x-regulator.c2
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/tps6586x-regulator.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c39
-rw-r--r--drivers/regulator/tps65912-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c48
-rw-r--r--drivers/regulator/userspace-consumer.c13
-rw-r--r--drivers/regulator/virtual.c12
-rw-r--r--drivers/regulator/wm831x-dcdc.c18
-rw-r--r--drivers/regulator/wm831x-isink.c7
-rw-r--r--drivers/regulator/wm831x-ldo.c18
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/regulator/wm8994-regulator.c2
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-88pm860x.c12
-rw-r--r--drivers/rtc/rtc-ab8500.c138
-rw-r--r--drivers/rtc/rtc-at91rm9200.c101
-rw-r--r--drivers/rtc/rtc-bfin.c13
-rw-r--r--drivers/rtc/rtc-bq4802.c13
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-dm355evm.c12
-rw-r--r--drivers/rtc/rtc-ds1286.c13
-rw-r--r--drivers/rtc/rtc-ds1511.c15
-rw-r--r--drivers/rtc/rtc-ds1553.c13
-rw-r--r--drivers/rtc/rtc-ds1742.c13
-rw-r--r--drivers/rtc/rtc-jz4740.c14
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t80.c9
-rw-r--r--drivers/rtc/rtc-m41t93.c1
-rw-r--r--drivers/rtc/rtc-m41t94.c1
-rw-r--r--drivers/rtc/rtc-m48t35.c13
-rw-r--r--drivers/rtc/rtc-m48t59.c13
-rw-r--r--drivers/rtc/rtc-m48t86.c13
-rw-r--r--drivers/rtc/rtc-max6902.c1
-rw-r--r--drivers/rtc/rtc-max8925.c38
-rw-r--r--drivers/rtc/rtc-max8998.c12
-rw-r--r--drivers/rtc/rtc-mc13xxx.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c12
-rw-r--r--drivers/rtc/rtc-mrst.c13
-rw-r--r--drivers/rtc/rtc-mxc.c123
-rw-r--r--drivers/rtc/rtc-pcf2123.c1
-rw-r--r--drivers/rtc/rtc-pcf50633.c12
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c12
-rw-r--r--drivers/rtc/rtc-puv3.c22
-rw-r--r--drivers/rtc/rtc-rs5c348.c1
-rw-r--r--drivers/rtc/rtc-s3c.c39
-rw-r--r--drivers/rtc/rtc-sa1100.c313
-rw-r--r--drivers/rtc/rtc-spear.c12
-rw-r--r--drivers/rtc/rtc-stk17ta8.c13
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c13
-rw-r--r--drivers/rtc/rtc-twl.c10
-rw-r--r--drivers/rtc/rtc-v3020.c13
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/rtc/rtc-vt8500.c12
-rw-r--r--drivers/rtc/rtc-wm831x.c36
-rw-r--r--drivers/rtc/rtc-wm8350.c12
-rw-r--r--drivers/s390/block/dasd.c5
-rw-r--r--drivers/s390/block/dasd_3990_erp.c4
-rw-r--r--drivers/s390/block/dasd_alias.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c411
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_config.c8
-rw-r--r--drivers/s390/char/tape_class.h1
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c32
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/netiucv.c217
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c19
-rw-r--r--drivers/s390/net/qeth_l3_main.c32
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/aacraid/commctrl.c1
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c17
-rw-r--r--drivers/scsi/bfa/bfa_defs.h4
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h444
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c416
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h7
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c6
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c27
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c5
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c17
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c58
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c21
-rw-r--r--drivers/scsi/fcoe/fcoe.c162
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/ipr.c67
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c340
-rw-r--r--drivers/scsi/isci/host.h27
-rw-r--r--drivers/scsi/isci/init.c25
-rw-r--r--drivers/scsi/isci/isci.h1
-rw-r--r--drivers/scsi/isci/phy.c172
-rw-r--r--drivers/scsi/isci/port.c104
-rw-r--r--drivers/scsi/isci/port.h10
-rw-r--r--drivers/scsi/isci/port_config.c35
-rw-r--r--drivers/scsi/isci/probe_roms.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h89
-rw-r--r--drivers/scsi/isci/remote_device.c10
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/isci/task.h7
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c14
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c436
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c432
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c172
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c338
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c704
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c6
-rw-r--r--drivers/scsi/mac_esp.c3
-rw-r--r--drivers/scsi/mac_scsi.c3
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c4
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h28
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h49
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h67
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h9
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c205
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h26
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c170
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c9
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c314
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c98
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c655
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c255
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c554
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h59
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c246
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c28
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1388
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/scsi_pm.c27
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sg.c32
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/sh/Makefile9
-rw-r--r--drivers/sh/clk/core.c9
-rw-r--r--drivers/sh/clk/cpg.c79
-rw-r--r--drivers/sh/intc/core.c37
-rw-r--r--drivers/sh/intc/internals.h7
-rw-r--r--drivers/sh/intc/userimask.c16
-rw-r--r--drivers/sh/pfc.c273
-rw-r--r--drivers/spi/Kconfig20
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-dw-mid.c8
-rw-r--r--drivers/spi/spi-ep93xx.c9
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c51
-rw-r--r--drivers/spi/spi-pl022.c151
-rw-r--r--drivers/spi/spi-s3c64xx.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c17
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/ssb/pci.c23
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig110
-rw-r--r--drivers/staging/android/Makefile9
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/android_pmem.h93
-rw-r--r--drivers/staging/android/ashmem.c752
-rw-r--r--drivers/staging/android/ashmem.h48
-rw-r--r--drivers/staging/android/binder.c3600
-rw-r--r--drivers/staging/android/binder.h330
-rw-r--r--drivers/staging/android/logger.c616
-rw-r--r--drivers/staging/android/logger.h49
-rw-r--r--drivers/staging/android/lowmemorykiller.c219
-rw-r--r--drivers/staging/android/pmem.c1345
-rw-r--r--drivers/staging/android/ram_console.c443
-rw-r--r--drivers/staging/android/ram_console.h22
-rw-r--r--drivers/staging/android/switch/Kconfig11
-rw-r--r--drivers/staging/android/switch/Makefile4
-rw-r--r--drivers/staging/android/switch/switch.h53
-rw-r--r--drivers/staging/android/switch/switch_class.c174
-rw-r--r--drivers/staging/android/switch/switch_gpio.c172
-rw-r--r--drivers/staging/android/timed_gpio.c176
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c123
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/asus_oled/asus_oled.c4
-rw-r--r--drivers/staging/bcm/Bcmchar.c376
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c323
-rw-r--r--drivers/staging/bcm/InterfaceDld.c12
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c33
-rw-r--r--drivers/staging/bcm/InterfaceInit.c12
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c24
-rw-r--r--drivers/staging/bcm/Misc.c32
-rw-r--r--drivers/staging/bcm/hostmibs.c178
-rw-r--r--drivers/staging/bcm/led_control.c1131
-rw-r--r--drivers/staging/bcm/nvm.c95
-rw-r--r--drivers/staging/bcm/target_params.h4
-rw-r--r--drivers/staging/comedi/comedi_fops.c160
-rw-r--r--drivers/staging/comedi/comedi_fops.h3
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c137
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7296.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7432.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c6
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c32
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c23
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c20
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c42
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c26
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c18
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c23
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c50
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c30
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c19
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c73
-rw-r--r--drivers/staging/comedi/drivers/das08.c6
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c8
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c37
-rw-r--r--drivers/staging/comedi/drivers/das1800.c65
-rw-r--r--drivers/staging/comedi/drivers/das6402.c23
-rw-r--r--drivers/staging/comedi/drivers/das800.c43
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c38
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c81
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c6
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c29
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c4
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c108
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c26
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c40
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c7
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c23
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h262
-rw-r--r--drivers/staging/cxt1e1/comet.h22
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c24
-rw-r--r--drivers/staging/cxt1e1/comet_tables.h24
-rw-r--r--drivers/staging/cxt1e1/libsbew.h34
-rw-r--r--drivers/staging/cxt1e1/musycc.c49
-rw-r--r--drivers/staging/cxt1e1/musycc.h31
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c10
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.h21
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h42
-rw-r--r--drivers/staging/cxt1e1/pmcc4_cpld.h33
-rw-r--r--drivers/staging/cxt1e1/pmcc4_defs.h14
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c70
-rw-r--r--drivers/staging/cxt1e1/pmcc4_ioctls.h16
-rw-r--r--drivers/staging/cxt1e1/sbe_bid.h14
-rw-r--r--drivers/staging/cxt1e1/sbe_promformat.h27
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h20
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h20
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h55
-rw-r--r--drivers/staging/et131x/et131x.c370
-rw-r--r--drivers/staging/frontier/tranzport.c2
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2
-rw-r--r--drivers/staging/gma500/Kconfig2
-rw-r--r--drivers/staging/gma500/accel_2d.c2
-rw-r--r--drivers/staging/gma500/cdv_intel_display.c4
-rw-r--r--drivers/staging/gma500/framebuffer.c41
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c4
-rw-r--r--drivers/staging/gma500/mrst_crtc.c4
-rw-r--r--drivers/staging/gma500/power.c2
-rw-r--r--drivers/staging/gma500/psb_drv.c23
-rw-r--r--drivers/staging/gma500/psb_intel_display.c4
-rw-r--r--drivers/staging/hv/Kconfig12
-rw-r--r--drivers/staging/hv/Makefile3
-rw-r--r--drivers/staging/hv/TODO4
-rw-r--r--drivers/staging/hv/storvsc_drv.c310
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c8
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h26
-rw-r--r--drivers/staging/iio/Documentation/ring.txt10
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio10
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c36
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c24
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c35
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c32
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c9
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c27
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c32
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c9
-rw-r--r--drivers/staging/iio/accel/kxsd9.c10
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h12
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c35
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c61
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c24
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c17
-rw-r--r--drivers/staging/iio/adc/ad7192.c84
-rw-r--r--drivers/staging/iio/adc/ad7280a.c28
-rw-r--r--drivers/staging/iio/adc/ad7291.c39
-rw-r--r--drivers/staging/iio/adc/ad7298.h5
-rw-r--r--drivers/staging/iio/adc/ad7298_core.c67
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c45
-rw-r--r--drivers/staging/iio/adc/ad7476.h5
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c25
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c31
-rw-r--r--drivers/staging/iio/adc/ad7606.h1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c10
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c1
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c30
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c3
-rw-r--r--drivers/staging/iio/adc/ad7780.c8
-rw-r--r--drivers/staging/iio/adc/ad7793.c95
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/adc/ad7887.h5
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c13
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c46
-rw-r--r--drivers/staging/iio/adc/ad799x.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c9
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c47
-rw-r--r--drivers/staging/iio/adc/adt7310.c3
-rw-r--r--drivers/staging/iio/adc/adt7410.c1
-rw-r--r--drivers/staging/iio/adc/max1363.h14
-rw-r--r--drivers/staging/iio/adc/max1363_core.c78
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c92
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c1
-rw-r--r--drivers/staging/iio/addac/adt7316.c1
-rw-r--r--drivers/staging/iio/buffer.h (renamed from drivers/staging/iio/buffer_generic.h)87
-rw-r--r--drivers/staging/iio/cdc/ad7150.c8
-rw-r--r--drivers/staging/iio/cdc/ad7152.c38
-rw-r--r--drivers/staging/iio/cdc/ad7746.c50
-rw-r--r--drivers/staging/iio/chrdev.h25
-rw-r--r--drivers/staging/iio/dac/Kconfig39
-rw-r--r--drivers/staging/iio/dac/Makefile3
-rw-r--r--drivers/staging/iio/dac/ad5064.c6
-rw-r--r--drivers/staging/iio/dac/ad5360.c24
-rw-r--r--drivers/staging/iio/dac/ad5380.c676
-rw-r--r--drivers/staging/iio/dac/ad5421.c555
-rw-r--r--drivers/staging/iio/dac/ad5421.h32
-rw-r--r--drivers/staging/iio/dac/ad5446.c188
-rw-r--r--drivers/staging/iio/dac/ad5446.h10
-rw-r--r--drivers/staging/iio/dac/ad5504.c132
-rw-r--r--drivers/staging/iio/dac/ad5504.h5
-rw-r--r--drivers/staging/iio/dac/ad5624r.h4
-rw-r--r--drivers/staging/iio/dac/ad5624r_spi.c127
-rw-r--r--drivers/staging/iio/dac/ad5686.c5
-rw-r--r--drivers/staging/iio/dac/ad5764.c393
-rw-r--r--drivers/staging/iio/dac/ad5791.c15
-rw-r--r--drivers/staging/iio/dds/ad5930.c1
-rw-r--r--drivers/staging/iio/dds/ad9832.c5
-rw-r--r--drivers/staging/iio/dds/ad9834.c11
-rw-r--r--drivers/staging/iio/dds/ad9850.c1
-rw-r--r--drivers/staging/iio/dds/ad9852.c1
-rw-r--r--drivers/staging/iio/dds/ad9910.c1
-rw-r--r--drivers/staging/iio/dds/ad9951.c1
-rw-r--r--drivers/staging/iio/events.h103
-rw-r--r--drivers/staging/iio/gyro/Kconfig6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c1
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c1
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c45
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c9
-rw-r--r--drivers/staging/iio/gyro/adxrs450.h5
-rw-r--r--drivers/staging/iio/gyro/adxrs450_core.c88
-rw-r--r--drivers/staging/iio/iio.h144
-rw-r--r--drivers/staging/iio/iio_core.h11
-rw-r--r--drivers/staging/iio/iio_core_trigger.h3
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c49
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c12
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c30
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c269
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c23
-rw-r--r--drivers/staging/iio/industrialio-buffer.c346
-rw-r--r--drivers/staging/iio/industrialio-core.c70
-rw-r--r--drivers/staging/iio/industrialio-trigger.c36
-rw-r--r--drivers/staging/iio/kfifo_buf.c72
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c7
-rw-r--r--drivers/staging/iio/light/tsl2563.c11
-rw-r--r--drivers/staging/iio/light/tsl2583.c17
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c4
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c5
-rw-r--r--drivers/staging/iio/meter/ade7753.c1
-rw-r--r--drivers/staging/iio/meter/ade7754.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c37
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c10
-rw-r--r--drivers/staging/iio/meter/ade7759.c1
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c1
-rw-r--r--drivers/staging/iio/ring_sw.c151
-rw-r--r--drivers/staging/iio/ring_sw.h2
-rw-r--r--drivers/staging/iio/sysfs.h43
-rw-r--r--drivers/staging/iio/trigger.h2
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c1
-rw-r--r--drivers/staging/iio/types.h49
-rw-r--r--drivers/staging/intel_sst/Kconfig19
-rw-r--r--drivers/staging/intel_sst/Makefile7
-rw-r--r--drivers/staging/intel_sst/TODO13
-rw-r--r--drivers/staging/intel_sst/intel_sst.c649
-rw-r--r--drivers/staging/intel_sst/intel_sst.h162
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c1460
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h623
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c564
-rw-r--r--drivers/staging/intel_sst/intel_sst_dsp.c496
-rw-r--r--drivers/staging/intel_sst/intel_sst_fw_ipc.h416
-rw-r--r--drivers/staging/intel_sst/intel_sst_ioctl.h440
-rw-r--r--drivers/staging/intel_sst/intel_sst_ipc.c774
-rw-r--r--drivers/staging/intel_sst/intel_sst_pvt.c313
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream.c583
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream_encoded.c1273
-rw-r--r--drivers/staging/intel_sst/intelmid.c1022
-rw-r--r--drivers/staging/intel_sst/intelmid.h209
-rw-r--r--drivers/staging/intel_sst/intelmid_adc_control.h193
-rw-r--r--drivers/staging/intel_sst/intelmid_ctrl.c921
-rw-r--r--drivers/staging/intel_sst/intelmid_msic_control.c1047
-rw-r--r--drivers/staging/intel_sst/intelmid_pvt.c173
-rw-r--r--drivers/staging/intel_sst/intelmid_snd_control.h123
-rw-r--r--drivers/staging/intel_sst/intelmid_v0_control.c866
-rw-r--r--drivers/staging/intel_sst/intelmid_v1_control.c978
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c1156
-rw-r--r--drivers/staging/keucr/smilmain.c4
-rw-r--r--drivers/staging/line6/Makefile3
-rw-r--r--drivers/staging/line6/capture.c44
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c74
-rw-r--r--drivers/staging/line6/driver.h10
-rw-r--r--drivers/staging/line6/midi.c37
-rw-r--r--drivers/staging/line6/midi.h4
-rw-r--r--drivers/staging/line6/pcm.c115
-rw-r--r--drivers/staging/line6/pcm.h8
-rw-r--r--drivers/staging/line6/playback.c53
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/pod.c6
-rw-r--r--drivers/staging/line6/podhd.c154
-rw-r--r--drivers/staging/line6/podhd.h30
-rw-r--r--drivers/staging/line6/revision.h2
-rw-r--r--drivers/staging/line6/toneport.c6
-rw-r--r--drivers/staging/line6/usbdefs.h91
-rw-r--r--drivers/staging/line6/variax.c6
-rw-r--r--drivers/staging/media/as102/Kconfig1
-rw-r--r--drivers/staging/media/as102/Makefile2
-rw-r--r--drivers/staging/media/as102/as102_drv.c126
-rw-r--r--drivers/staging/media/as102/as102_drv.h59
-rw-r--r--drivers/staging/media/as102/as102_fe.c81
-rw-r--r--drivers/staging/media/as102/as102_fw.c44
-rw-r--r--drivers/staging/media/as102/as102_fw.h10
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.c48
-rw-r--r--drivers/staging/media/as102/as102_usb_drv.h6
-rw-r--r--drivers/staging/media/as102/as10x_cmd.c143
-rw-r--r--drivers/staging/media/as102/as10x_cmd.h895
-rw-r--r--drivers/staging/media/as102/as10x_cmd_cfg.c66
-rw-r--r--drivers/staging/media/as102/as10x_cmd_stream.c56
-rw-r--r--drivers/staging/media/as102/as10x_handle.h26
-rw-r--r--drivers/staging/media/as102/as10x_types.h250
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c17
-rw-r--r--drivers/staging/media/easycap/easycap.h93
-rw-r--r--drivers/staging/media/easycap/easycap_ioctl.c60
-rw-r--r--drivers/staging/media/easycap/easycap_low.c273
-rw-r--r--drivers/staging/media/easycap/easycap_main.c379
-rw-r--r--drivers/staging/media/easycap/easycap_settings.c2
-rw-r--r--drivers/staging/media/easycap/easycap_sound.c340
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c8
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c2
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c2
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c4
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c4
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c6
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c123
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c2
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/solo6x10/Makefile2
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-jpeg.h (renamed from drivers/staging/media/solo6x10/jpeg.h)0
-rw-r--r--drivers/staging/media/solo6x10/v4l2-enc.c2
-rw-r--r--drivers/staging/mei/init.c49
-rw-r--r--drivers/staging/mei/interface.c8
-rw-r--r--drivers/staging/mei/interface.h11
-rw-r--r--drivers/staging/mei/interrupt.c322
-rw-r--r--drivers/staging/mei/iorw.c77
-rw-r--r--drivers/staging/mei/main.c576
-rw-r--r--drivers/staging/mei/mei.txt226
-rw-r--r--drivers/staging/mei/mei_dev.h13
-rw-r--r--drivers/staging/mei/wd.c32
-rw-r--r--drivers/staging/nvec/nvec.c30
-rw-r--r--drivers/staging/octeon/Makefile5
-rw-r--r--drivers/staging/octeon/cvmx-address.h274
-rw-r--r--drivers/staging/octeon/cvmx-asxx-defs.h475
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.c306
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.h617
-rw-r--r--drivers/staging/octeon/cvmx-config.h169
-rw-r--r--drivers/staging/octeon/cvmx-dbg-defs.h72
-rw-r--r--drivers/staging/octeon/cvmx-fau.h597
-rw-r--r--drivers/staging/octeon/cvmx-fpa-defs.h403
-rw-r--r--drivers/staging/octeon/cvmx-fpa.c183
-rw-r--r--drivers/staging/octeon/cvmx-fpa.h299
-rw-r--r--drivers/staging/octeon/cvmx-gmxx-defs.h2529
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.c695
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.h151
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.c243
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.h64
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.c85
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.h59
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.c113
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.h60
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.c525
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.h110
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.c550
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.h104
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.c195
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.h84
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.c433
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.h215
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.c348
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.h103
-rw-r--r--drivers/staging/octeon/cvmx-helper.c1058
-rw-r--r--drivers/staging/octeon/cvmx-helper.h227
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-decodes.c371
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-rsl.c140
-rw-r--r--drivers/staging/octeon/cvmx-ipd.h338
-rw-r--r--drivers/staging/octeon/cvmx-mdio.h506
-rw-r--r--drivers/staging/octeon/cvmx-packet.h65
-rw-r--r--drivers/staging/octeon/cvmx-pcsx-defs.h370
-rw-r--r--drivers/staging/octeon/cvmx-pcsxx-defs.h316
-rw-r--r--drivers/staging/octeon/cvmx-pip-defs.h1267
-rw-r--r--drivers/staging/octeon/cvmx-pip.h524
-rw-r--r--drivers/staging/octeon/cvmx-pko-defs.h1133
-rw-r--r--drivers/staging/octeon/cvmx-pko.c506
-rw-r--r--drivers/staging/octeon/cvmx-pko.h610
-rw-r--r--drivers/staging/octeon/cvmx-pow.h1982
-rw-r--r--drivers/staging/octeon/cvmx-scratch.h139
-rw-r--r--drivers/staging/octeon/cvmx-smix-defs.h178
-rw-r--r--drivers/staging/octeon/cvmx-spi.c667
-rw-r--r--drivers/staging/octeon/cvmx-spi.h269
-rw-r--r--drivers/staging/octeon/cvmx-spxx-defs.h347
-rw-r--r--drivers/staging/octeon/cvmx-srxx-defs.h126
-rw-r--r--drivers/staging/octeon/cvmx-stxx-defs.h292
-rw-r--r--drivers/staging/octeon/cvmx-wqe.h397
-rw-r--r--drivers/staging/octeon/ethernet-defines.h2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c4
-rw-r--r--drivers/staging/octeon/ethernet-mem.c2
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c16
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-spi.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c12
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c4
-rw-r--r--drivers/staging/octeon/ethernet.c16
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c18
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c10
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c10
-rw-r--r--drivers/staging/omapdrm/Kconfig25
-rw-r--r--drivers/staging/omapdrm/Makefile21
-rw-r--r--drivers/staging/omapdrm/TODO38
-rw-r--r--drivers/staging/omapdrm/omap_connector.c371
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c326
-rw-r--r--drivers/staging/omapdrm/omap_debugfs.c42
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h187
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c830
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h135
-rw-r--r--drivers/staging/omapdrm/omap_drm.h123
-rw-r--r--drivers/staging/omapdrm/omap_drv.c821
-rw-r--r--drivers/staging/omapdrm/omap_drv.h135
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c171
-rw-r--r--drivers/staging/omapdrm/omap_fb.c243
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c372
-rw-r--r--drivers/staging/omapdrm/omap_gem.c1231
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/staging/omapdrm/omap_priv.h47
-rw-r--r--drivers/staging/omapdrm/tcm-sita.c703
-rw-r--r--drivers/staging/omapdrm/tcm-sita.h95
-rw-r--r--drivers/staging/omapdrm/tcm.h326
-rw-r--r--drivers/staging/phison/phison.c2
-rw-r--r--drivers/staging/pohmelfs/dir.c11
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--drivers/staging/pohmelfs/netfs.h2
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c2
-rw-r--r--drivers/staging/rtl8192e/Kconfig50
-rw-r--r--drivers/staging/rtl8192e/Makefile46
-rw-r--r--drivers/staging/rtl8192e/dot11d.c4
-rw-r--r--drivers/staging/rtl8192e/dot11d.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h (renamed from drivers/staging/rtl8192e/r8190P_def.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c (renamed from drivers/staging/rtl8192e/r8190P_rtl8256.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h (renamed from drivers/staging/rtl8192e/r8190P_rtl8256.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c (renamed from drivers/staging/rtl8192e/r8192E_cmdpkt.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h (renamed from drivers/staging/rtl8192e/r8192E_cmdpkt.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c (renamed from drivers/staging/rtl8192e/r8192E_dev.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h (renamed from drivers/staging/rtl8192e/r8192E_dev.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c (renamed from drivers/staging/rtl8192e/r8192E_firmware.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h (renamed from drivers/staging/rtl8192e/r8192E_firmware.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h (renamed from drivers/staging/rtl8192e/r8192E_hw.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c (renamed from drivers/staging/rtl8192e/r8192E_hwimg.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h (renamed from drivers/staging/rtl8192e/r8192E_hwimg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c (renamed from drivers/staging/rtl8192e/r8192E_phy.c)3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h (renamed from drivers/staging/rtl8192e/r8192E_phy.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h (renamed from drivers/staging/rtl8192e/r8192E_phyreg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h (renamed from drivers/staging/rtl8192e/r819xE_phyreg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c (renamed from drivers/staging/rtl8192e/rtl_cam.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h (renamed from drivers/staging/rtl8192e/rtl_cam.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c (renamed from drivers/staging/rtl8192e/rtl_core.c)68
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h (renamed from drivers/staging/rtl8192e/rtl_core.h)53
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h (renamed from drivers/staging/rtl8192e/rtl_crypto.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_debug.c (renamed from drivers/staging/rtl8192e/rtl_debug.c)79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c (renamed from drivers/staging/rtl8192e/rtl_dm.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h (renamed from drivers/staging/rtl8192e/rtl_dm.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c (renamed from drivers/staging/rtl8192e/rtl_eeprom.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h (renamed from drivers/staging/rtl8192e/rtl_eeprom.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c (renamed from drivers/staging/rtl8192e/rtl_ethtool.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c (renamed from drivers/staging/rtl8192e/rtl_pci.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h (renamed from drivers/staging/rtl8192e/rtl_pci.h)1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c (renamed from drivers/staging/rtl8192e/rtl_pm.c)2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h (renamed from drivers/staging/rtl8192e/rtl_pm.h)4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c (renamed from drivers/staging/rtl8192e/rtl_ps.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h (renamed from drivers/staging/rtl8192e/rtl_ps.h)2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c (renamed from drivers/staging/rtl8192e/rtl_wx.c)3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h (renamed from drivers/staging/rtl8192e/rtl_wx.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c1
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c1
-rw-r--r--drivers/staging/rtl8192e/rtl_debug.h299
-rw-r--r--drivers/staging/rtl8192e/rtllib.h113
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.c80
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.h64
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c26
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c25
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h86
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c21
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c96
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c109
-rw-r--r--drivers/staging/rtl8192u/ieee80211/api.c244
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts5139/rts51x.h1
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c3
-rw-r--r--drivers/staging/sep/sep_driver.c4
-rw-r--r--drivers/staging/serial/68360serial.c4
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c6
-rw-r--r--drivers/staging/speakup/kobjects.c3
-rw-r--r--drivers/staging/speakup/main.c5
-rw-r--r--drivers/staging/speakup/speakup.h2
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/spectra/Kconfig41
-rw-r--r--drivers/staging/spectra/Makefile11
-rw-r--r--drivers/staging/spectra/README29
-rw-r--r--drivers/staging/spectra/ffsdefs.h58
-rw-r--r--drivers/staging/spectra/ffsport.c834
-rw-r--r--drivers/staging/spectra/ffsport.h85
-rw-r--r--drivers/staging/spectra/flash.c4305
-rw-r--r--drivers/staging/spectra/flash.h198
-rw-r--r--drivers/staging/spectra/lld.c339
-rw-r--r--drivers/staging/spectra/lld.h111
-rw-r--r--drivers/staging/spectra/lld_cdma.c910
-rw-r--r--drivers/staging/spectra/lld_cdma.h123
-rw-r--r--drivers/staging/spectra/lld_emu.c776
-rw-r--r--drivers/staging/spectra/lld_emu.h51
-rw-r--r--drivers/staging/spectra/lld_mtd.c683
-rw-r--r--drivers/staging/spectra/lld_mtd.h51
-rw-r--r--drivers/staging/spectra/lld_nand.c2619
-rw-r--r--drivers/staging/spectra/lld_nand.h131
-rw-r--r--drivers/staging/spectra/nand_regs.h619
-rw-r--r--drivers/staging/spectra/spectraswconfig.h82
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/stub_dev.c5
-rw-r--r--drivers/staging/usbip/stub_rx.c2
-rw-r--r--drivers/staging/usbip/usbip_common.c61
-rw-r--r--drivers/staging/usbip/usbip_common.h17
-rw-r--r--drivers/staging/usbip/vhci_rx.c12
-rw-r--r--drivers/staging/vme/TODO67
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c31
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c38
-rw-r--r--drivers/staging/vme/devices/Kconfig13
-rw-r--r--drivers/staging/vme/devices/Makefile3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h249
-rw-r--r--drivers/staging/vme/devices/vme_pio2_cntr.c71
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c524
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c232
-rw-r--r--drivers/staging/vme/devices/vme_user.h10
-rw-r--r--drivers/staging/vme/vme.c69
-rw-r--r--drivers/staging/vme/vme.h38
-rw-r--r--drivers/staging/vme/vme_api.txt61
-rw-r--r--drivers/staging/vme/vme_bridge.h38
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6655/ioctl.c8
-rw-r--r--drivers/staging/vt6655/iwctl.c12
-rw-r--r--drivers/staging/vt6655/iwctl.h5
-rw-r--r--drivers/staging/vt6656/80211mgr.c35
-rw-r--r--drivers/staging/vt6656/baseband.c61
-rw-r--r--drivers/staging/vt6656/bssdb.c100
-rw-r--r--drivers/staging/vt6656/card.c14
-rw-r--r--drivers/staging/vt6656/card.h4
-rw-r--r--drivers/staging/vt6656/int.c5
-rw-r--r--drivers/staging/vt6656/int.h2
-rw-r--r--drivers/staging/vt6656/ioctl.c8
-rw-r--r--drivers/staging/vt6656/iwctl.c12
-rw-r--r--drivers/staging/vt6656/iwctl.h5
-rw-r--r--drivers/staging/vt6656/mac.c4
-rw-r--r--drivers/staging/vt6656/mac.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c12
-rw-r--r--drivers/staging/wlags49_h2/debug.h2
-rw-r--r--drivers/staging/wlags49_h2/dhfcfg.h2
-rw-r--r--drivers/staging/wlags49_h2/hcf.c6
-rw-r--r--drivers/staging/wlags49_h2/hcf.h6
-rw-r--r--drivers/staging/wlags49_h2/hcfcfg.h6
-rw-r--r--drivers/staging/wlags49_h2/hcfdef.h6
-rw-r--r--drivers/staging/wlags49_h2/mdd.h6
-rw-r--r--drivers/staging/wlags49_h2/mmd.c2
-rw-r--r--drivers/staging/wlags49_h2/mmd.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_if.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c17
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.h4
-rw-r--r--drivers/staging/xgifb/Makefile2
-rw-r--r--drivers/staging/xgifb/XGI_main.h13
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c158
-rw-r--r--drivers/staging/xgifb/XGIfb.h3
-rw-r--r--drivers/staging/xgifb/vb_ext.c444
-rw-r--r--drivers/staging/xgifb/vb_ext.h9
-rw-r--r--drivers/staging/xgifb/vb_init.c276
-rw-r--r--drivers/staging/xgifb/vb_setmode.c1003
-rw-r--r--drivers/staging/xgifb/vb_setmode.h52
-rw-r--r--drivers/staging/xgifb/vb_struct.h4
-rw-r--r--drivers/staging/xgifb/vb_table.h27
-rw-r--r--drivers/staging/xgifb/vgatypes.h2
-rw-r--r--drivers/staging/zcache/zcache-main.c6
-rw-r--r--drivers/staging/zram/zram_drv.c3
-rw-r--r--drivers/staging/zram/zram_sysfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c42
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c36
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c8
-rw-r--r--drivers/target/loopback/tcm_loop.c74
-rw-r--r--drivers/target/loopback/tcm_loop.h11
-rw-r--r--drivers/target/target_core_alua.c34
-rw-r--r--drivers/target/target_core_cdb.c59
-rw-r--r--drivers/target/target_core_cdb.h14
-rw-r--r--drivers/target/target_core_configfs.c45
-rw-r--r--drivers/target/target_core_device.c45
-rw-r--r--drivers/target/target_core_fabric_configfs.c8
-rw-r--r--drivers/target/target_core_fabric_lib.c13
-rw-r--r--drivers/target/target_core_file.c35
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c23
-rw-r--r--drivers/target/target_core_internal.h123
-rw-r--r--drivers/target/target_core_pr.c264
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c48
-rw-r--r--drivers/target/target_core_rd.c264
-rw-r--r--drivers/target/target_core_stat.c9
-rw-r--r--drivers/target/target_core_stat.h8
-rw-r--r--drivers/target/target_core_tmr.c40
-rw-r--r--drivers/target/target_core_tpg.c9
-rw-r--r--drivers/target/target_core_transport.c589
-rw-r--r--drivers/target/target_core_ua.c6
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c59
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c9
-rw-r--r--drivers/target/tcm_fc/tfc_io.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/pty.c26
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250.c98
-rw-r--r--drivers/tty/serial/8250.h26
-rw-r--r--drivers/tty/serial/8250_dw.c12
-rw-r--r--drivers/tty/serial/8250_fsl.c63
-rw-r--r--drivers/tty/serial/8250_pci.c47
-rw-r--r--drivers/tty/serial/Kconfig106
-rw-r--r--drivers/tty/serial/Makefile8
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c10
-rw-r--r--drivers/tty/serial/apbuart.c4
-rw-r--r--drivers/tty/serial/ar933x_uart.c688
-rw-r--r--drivers/tty/serial/atmel_serial.c12
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c22
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h5
-rw-r--r--drivers/tty/serial/bfin_uart.c83
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/imx.c148
-rw-r--r--drivers/tty/serial/m32r_sio.c7
-rw-r--r--drivers/tty/serial/max3100.c1
-rw-r--r--drivers/tty/serial/max3107-aava.c1
-rw-r--r--drivers/tty/serial/max3107.c1
-rw-r--r--drivers/tty/serial/mfd.c18
-rw-r--r--drivers/tty/serial/mrst_max3110.c1
-rw-r--r--drivers/tty/serial/msm_serial_hs.c23
-rw-r--r--drivers/tty/serial/mxs-auart.c13
-rw-r--r--drivers/tty/serial/omap-serial.c430
-rw-r--r--drivers/tty/serial/pch_uart.c164
-rw-r--r--drivers/tty/serial/pmac_zilog.c423
-rw-r--r--drivers/tty/serial/pmac_zilog.h19
-rw-r--r--drivers/tty/serial/s3c2410.c115
-rw-r--r--drivers/tty/serial/s3c2412.c149
-rw-r--r--drivers/tty/serial/s3c2440.c178
-rw-r--r--drivers/tty/serial/s3c6400.c149
-rw-r--r--drivers/tty/serial/s5pv210.c158
-rw-r--r--drivers/tty/serial/samsung.c639
-rw-r--r--drivers/tty/serial/samsung.h32
-rw-r--r--drivers/tty/serial/sc26xx.c14
-rw-r--r--drivers/tty/serial/serial_core.c325
-rw-r--r--drivers/tty/serial/serial_cs.c8
-rw-r--r--drivers/tty/serial/sh-sci.c187
-rw-r--r--drivers/tty/serial/sh-sci.h4
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c783
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h185
-rw-r--r--drivers/tty/serial/timbuart.c15
-rw-r--r--drivers/tty/serial/ucc_uart.c3
-rw-r--r--drivers/tty/serial/vr41xx_siu.c13
-rw-r--r--drivers/tty/synclink.c2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_io.c311
-rw-r--r--drivers/tty/tty_ldisc.c22
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/uio/uio_pci_generic.c78
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/speedtch.c6
-rw-r--r--drivers/usb/atm/ueagle-atm.c2
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c15
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c1
-rw-r--r--drivers/usb/class/cdc-acm.c348
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/devio.c191
-rw-r--r--drivers/usb/core/driver.c36
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hcd.c31
-rw-r--r--drivers/usb/core/hub.c97
-rw-r--r--drivers/usb/core/inode.c31
-rw-r--r--drivers/usb/core/quirks.c5
-rw-r--r--drivers/usb/core/sysfs.c4
-rw-r--r--drivers/usb/core/usb.c4
-rw-r--r--drivers/usb/core/usb.h14
-rw-r--r--drivers/usb/dwc3/Kconfig5
-rw-r--r--drivers/usb/dwc3/Makefile6
-rw-r--r--drivers/usb/dwc3/core.c211
-rw-r--r--drivers/usb/dwc3/core.h62
-rw-r--r--drivers/usb/dwc3/debugfs.c83
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c43
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c51
-rw-r--r--drivers/usb/dwc3/ep0.c160
-rw-r--r--drivers/usb/dwc3/gadget.c440
-rw-r--r--drivers/usb/dwc3/gadget.h29
-rw-r--r--drivers/usb/dwc3/host.c102
-rw-r--r--drivers/usb/dwc3/io.h2
-rw-r--r--drivers/usb/gadget/Kconfig29
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/amd5536udc.c12
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c36
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h2
-rw-r--r--drivers/usb/gadget/composite.c8
-rw-r--r--drivers/usb/gadget/dbgp.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c15
-rw-r--r--drivers/usb/gadget/epautoconf.c9
-rw-r--r--drivers/usb/gadget/ether.c4
-rw-r--r--drivers/usb/gadget/f_fs.c33
-rw-r--r--drivers/usb/gadget/f_mass_storage.c61
-rw-r--r--drivers/usb/gadget/f_phonet.c11
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/file_storage.c74
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c20
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c77
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/goku_udc.c3
-rw-r--r--drivers/usb/gadget/imx_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c32
-rw-r--r--drivers/usb/gadget/langwell_udc.c2
-rw-r--r--drivers/usb/gadget/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/mv_udc.h7
-rw-r--r--drivers/usb/gadget/mv_udc_core.c344
-rw-r--r--drivers/usb/gadget/net2272.c6
-rw-r--r--drivers/usb/gadget/net2280.c10
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/pch_udc.c6
-rw-r--r--drivers/usb/gadget/printer.c6
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c19
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c137
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c8
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h2
-rw-r--r--drivers/usb/gadget/serial.c4
-rw-r--r--drivers/usb/gadget/udc-core.c26
-rw-r--r--drivers/usb/gadget/usbstring.c73
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/Kconfig17
-rw-r--r--drivers/usb/host/alchemy-common.c277
-rw-r--r--drivers/usb/host/ehci-ath79.c4
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c71
-rw-r--r--drivers/usb/host/ehci-mv.c391
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-omap.c19
-rw-r--r--drivers/usb/host/ehci-orion.c10
-rw-r--r--drivers/usb/host/ehci-ps3.c30
-rw-r--r--drivers/usb/host/ehci-pxa168.c2
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/ehci-s5p.c4
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/ehci-tegra.c71
-rw-r--r--drivers/usb/host/ehci-vt8500.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c12
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c12
-rw-r--r--drivers/usb/host/hwa-hc.c3
-rw-r--r--drivers/usb/host/imx21-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c74
-rw-r--r--drivers/usb/host/isp1760-if.c27
-rw-r--r--drivers/usb/host/ohci-at91.c12
-rw-r--r--drivers/usb/host/ohci-au1xxx.c18
-rw-r--r--drivers/usb/host/ohci-dbg.c18
-rw-r--r--drivers/usb/host/ohci-ep93xx.c2
-rw-r--r--drivers/usb/host/ohci-exynos.c274
-rw-r--r--drivers/usb/host/ohci-hcd.c37
-rw-r--r--drivers/usb/host/ohci-hub.c7
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-omap3.c18
-rw-r--r--drivers/usb/host/ohci-pci.c5
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-q.c8
-rw-r--r--drivers/usb/host/ohci-s3c2410.c55
-rw-r--r--drivers/usb/host/ohci-sh.c1
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-spear.c1
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/ohci-xls.c2
-rw-r--r--drivers/usb/host/ohci.h14
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c21
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/whci/qset.c6
-rw-r--r--drivers/usb/host/xhci-hub.c18
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-ring.c113
-rw-r--r--drivers/usb/host/xhci.c36
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/ftdi-elan.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/isight_firmware.c6
-rw-r--r--drivers/usb/misc/legousbtower.c2
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbtest.c1
-rw-r--r--drivers/usb/musb/Kconfig61
-rw-r--r--drivers/usb/musb/Makefile26
-rw-r--r--drivers/usb/musb/cppi_dma.c4
-rw-r--r--drivers/usb/musb/musb_core.c16
-rw-r--r--drivers/usb/musb/musb_core.h4
-rw-r--r--drivers/usb/musb/musb_debug.h4
-rw-r--r--drivers/usb/musb/musb_debugfs.c8
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c1
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/musb/musb_io.h2
-rw-r--r--drivers/usb/musb/omap2430.c61
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/ux500_dma.c43
-rw-r--r--drivers/usb/otg/Kconfig32
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/ab8500-usb.c2
-rw-r--r--drivers/usb/otg/fsl_otg.c15
-rw-r--r--drivers/usb/otg/mv_otg.c957
-rw-r--r--drivers/usb/otg/mv_otg.h165
-rw-r--r--drivers/usb/renesas_usbhs/common.c52
-rw-r--r--drivers/usb/renesas_usbhs/common.h9
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c13
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h3
-rw-r--r--drivers/usb/renesas_usbhs/mod.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c238
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c953
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c31
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/ChangeLog.history730
-rw-r--r--drivers/usb/serial/aircable.c2
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c45
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/cp210x.c61
-rw-r--r--drivers/usb/serial/cyberjack.c35
-rw-r--r--drivers/usb/serial/cypress_m8.c35
-rw-r--r--drivers/usb/serial/digi_acceleport.c229
-rw-r--r--drivers/usb/serial/empeg.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c7
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c11
-rw-r--r--drivers/usb/serial/generic.c83
-rw-r--r--drivers/usb/serial/io_edgeport.c5
-rw-r--r--drivers/usb/serial/io_ti.c32
-rw-r--r--drivers/usb/serial/ipaq.c36
-rw-r--r--drivers/usb/serial/ipw.c2
-rw-r--r--drivers/usb/serial/ir-usb.c34
-rw-r--r--drivers/usb/serial/iuu_phoenix.c9
-rw-r--r--drivers/usb/serial/keyspan.c92
-rw-r--r--drivers/usb/serial/keyspan_pda.c68
-rw-r--r--drivers/usb/serial/kl5kusb105.c2
-rw-r--r--drivers/usb/serial/kobil_sct.c25
-rw-r--r--drivers/usb/serial/mct_u232.c48
-rw-r--r--drivers/usb/serial/mos7720.c20
-rw-r--r--drivers/usb/serial/mos7840.c6
-rw-r--r--drivers/usb/serial/navman.c2
-rw-r--r--drivers/usb/serial/omninet.c53
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/usb/serial/oti6858.c25
-rw-r--r--drivers/usb/serial/pl2303.c19
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/safe_serial.c6
-rw-r--r--drivers/usb/serial/sierra.c5
-rw-r--r--drivers/usb/serial/spcp8x5.c2
-rw-r--r--drivers/usb/serial/ssu100.c2
-rw-r--r--drivers/usb/serial/symbolserial.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c15
-rw-r--r--drivers/usb/serial/usb-serial.c100
-rw-r--r--drivers/usb/serial/usb_debug.c13
-rw-r--r--drivers/usb/serial/usb_wwan.c2
-rw-r--r--drivers/usb/serial/visor.c2
-rw-r--r--drivers/usb/serial/whiteheat.c60
-rw-r--r--drivers/usb/storage/alauda.c2
-rw-r--r--drivers/usb/storage/cypress_atacb.c2
-rw-r--r--drivers/usb/storage/datafab.c2
-rw-r--r--drivers/usb/storage/ene_ub6250.c12
-rw-r--r--drivers/usb/storage/freecom.c2
-rw-r--r--drivers/usb/storage/isd200.c4
-rw-r--r--drivers/usb/storage/jumpshot.c2
-rw-r--r--drivers/usb/storage/karma.c2
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/realtek_cr.c14
-rw-r--r--drivers/usb/storage/sddr09.c2
-rw-r--r--drivers/usb/storage/sddr55.c2
-rw-r--r--drivers/usb/storage/shuttle_usbat.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/usb.c14
-rw-r--r--drivers/usb/usb-skeleton.c40
-rw-r--r--drivers/usb/wusbcore/Kconfig1
-rw-r--r--drivers/usb/wusbcore/security.c2
-rw-r--r--drivers/uwb/est.c2
-rw-r--r--drivers/uwb/i1480/dfu/usb.c2
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/amifb.c3732
-rw-r--r--drivers/video/atmel_lcdfb.c37
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/radeon_base.c18
-rw-r--r--drivers/video/au1100fb.c12
-rw-r--r--drivers/video/au1200fb.c273
-rw-r--r--drivers/video/backlight/88pm860x_bl.c12
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c12
-rw-r--r--drivers/video/backlight/adx_bl.c182
-rw-r--r--drivers/video/backlight/backlight.c6
-rw-r--r--drivers/video/backlight/da903x_bl.c12
-rw-r--r--drivers/video/backlight/ep93xx_bl.c13
-rw-r--r--drivers/video/backlight/generic_bl.c13
-rw-r--r--drivers/video/backlight/jornada720_bl.c13
-rw-r--r--drivers/video/backlight/jornada720_lcd.c13
-rw-r--r--drivers/video/backlight/lcd.c26
-rw-r--r--drivers/video/backlight/ld9040.c71
-rw-r--r--drivers/video/backlight/max8925_bl.c12
-rw-r--r--drivers/video/backlight/omap1_bl.c13
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c12
-rw-r--r--drivers/video/backlight/platform_lcd.c22
-rw-r--r--drivers/video/backlight/pwm_bl.c33
-rw-r--r--drivers/video/backlight/wm831x_bl.c12
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/cirrusfb.c270
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/newport_con.c63
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/display/Kconfig24
-rw-r--r--drivers/video/display/Makefile6
-rw-r--r--drivers/video/display/display-sysfs.c219
-rw-r--r--drivers/video/fbmem.c14
-rw-r--r--drivers/video/fsl-diu-fb.c587
-rw-r--r--drivers/video/grvga.c4
-rw-r--r--drivers/video/hgafb.c2
-rw-r--r--drivers/video/i810/i810_main.c16
-rw-r--r--drivers/video/intelfb/intelfbdrv.c18
-rw-r--r--drivers/video/logo/logo.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c1
-rw-r--r--drivers/video/mbx/mbxfb.c13
-rw-r--r--drivers/video/mx3fb.c65
-rw-r--r--drivers/video/mxsfb.c21
-rw-r--r--drivers/video/neofb.c10
-rw-r--r--drivers/video/nuc900fb.c13
-rw-r--r--drivers/video/nvidia/nvidia.c6
-rw-r--r--drivers/video/offb.c71
-rw-r--r--drivers/video/omap/lcd_ams_delta.c15
-rw-r--r--drivers/video/omap/lcd_h3.c16
-rw-r--r--drivers/video/omap/lcd_htcherald.c16
-rw-r--r--drivers/video/omap/lcd_inn1510.c16
-rw-r--r--drivers/video/omap/lcd_inn1610.c16
-rw-r--r--drivers/video/omap/lcd_mipid.c1
-rw-r--r--drivers/video/omap/lcd_osk.c16
-rw-r--r--drivers/video/omap/lcd_palmte.c16
-rw-r--r--drivers/video/omap/lcd_palmtt.c15
-rw-r--r--drivers/video/omap/lcd_palmz71.c15
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/omap/rfbi.c2
-rw-r--r--drivers/video/omap/sossi.c2
-rw-r--r--drivers/video/omap2/displays/Kconfig2
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c66
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c1
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c62
-rw-r--r--drivers/video/omap2/displays/panel-taal.c38
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c1
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/apply.c1324
-rw-r--r--drivers/video/omap2/dss/core.c4
-rw-r--r--drivers/video/omap2/dss/dispc.c407
-rw-r--r--drivers/video/omap2/dss/dispc.h11
-rw-r--r--drivers/video/omap2/dss/dispc_coefs.c326
-rw-r--r--drivers/video/omap2/dss/dpi.c7
-rw-r--r--drivers/video/omap2/dss/dsi.c616
-rw-r--r--drivers/video/omap2/dss/dss.h76
-rw-r--r--drivers/video/omap2/dss/dss_features.c11
-rw-r--r--drivers/video/omap2/dss/dss_features.h1
-rw-r--r--drivers/video/omap2/dss/hdmi.c59
-rw-r--r--drivers/video/omap2/dss/manager.c1221
-rw-r--r--drivers/video/omap2/dss/overlay.c435
-rw-r--r--drivers/video/omap2/dss/rfbi.c1
-rw-r--r--drivers/video/omap2/dss/sdi.c8
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h10
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c37
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h3
-rw-r--r--drivers/video/omap2/dss/venc.c28
-rw-r--r--drivers/video/omap2/omapfb/Kconfig2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c42
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c22
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c4
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h13
-rw-r--r--drivers/video/pm2fb.c8
-rw-r--r--drivers/video/pm3fb.c4
-rw-r--r--drivers/video/pnx4008/pnxrgbfb.c13
-rw-r--r--drivers/video/pnx4008/sdum.c13
-rw-r--r--drivers/video/pxa168fb.c12
-rw-r--r--drivers/video/pxa3xx-gcu.c15
-rw-r--r--drivers/video/riva/fbdev.c6
-rw-r--r--drivers/video/s3c-fb.c202
-rw-r--r--drivers/video/s3c2410fb.c29
-rw-r--r--drivers/video/s3fb.c30
-rw-r--r--drivers/video/sbuslib.c2
-rw-r--r--drivers/video/sh7760fb.c13
-rw-r--r--drivers/video/sh_mipi_dsi.c218
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c375
-rw-r--r--drivers/video/sh_mobile_meram.c13
-rw-r--r--drivers/video/sm501fb.c13
-rw-r--r--drivers/video/smscufx.c4
-rw-r--r--drivers/video/sstfb.c6
-rw-r--r--drivers/video/tdfxfb.c2
-rw-r--r--drivers/video/udlfb.c6
-rw-r--r--drivers/video/uvesafb.c6
-rw-r--r--drivers/video/vfb.c2
-rw-r--r--drivers/video/vt8500lcdfb.c13
-rw-r--r--drivers/video/w100fb.c13
-rw-r--r--drivers/video/wm8505fb.c13
-rw-r--r--drivers/video/wmt_ge_rops.c13
-rw-r--r--drivers/video/xen-fbfront.c9
-rw-r--r--drivers/video/xilinxfb.c20
-rw-r--r--drivers/virtio/virtio_balloon.c108
-rw-r--r--drivers/virtio/virtio_mmio.c10
-rw-r--r--drivers/virtio/virtio_pci.c118
-rw-r--r--drivers/virtio/virtio_ring.c245
-rw-r--r--drivers/watchdog/Kconfig13
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c17
-rw-r--r--drivers/watchdog/at91sam9_wdt.c22
-rw-r--r--drivers/watchdog/at91sam9_wdt.h6
-rw-r--r--drivers/watchdog/ath79_wdt.c6
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c13
-rw-r--r--drivers/watchdog/coh901327_wdt.c6
-rw-r--r--drivers/watchdog/cpu5wdt.c3
-rw-r--r--drivers/watchdog/cpwd.c13
-rw-r--r--drivers/watchdog/davinci_wdt.c13
-rw-r--r--drivers/watchdog/dw_wdt.c12
-rw-r--r--drivers/watchdog/eurotechwdt.c4
-rw-r--r--drivers/watchdog/f71808e_wdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/ibmasr.c4
-rw-r--r--drivers/watchdog/indydog.c4
-rw-r--r--drivers/watchdog/iop_wdt.c5
-rw-r--r--drivers/watchdog/ixp2000_wdt.c3
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c13
-rw-r--r--drivers/watchdog/ks8695_wdt.c3
-rw-r--r--drivers/watchdog/lantiq_wdt.c3
-rw-r--r--drivers/watchdog/max63xx_wdt.c13
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c13
-rw-r--r--drivers/watchdog/nuc900_wdt.c13
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c13
-rw-r--r--drivers/watchdog/omap_wdt.c17
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/watchdog/pnx4008_wdt.c13
-rw-r--r--drivers/watchdog/rc32434_wdt.c13
-rw-r--r--drivers/watchdog/rdc321x_wdt.c13
-rw-r--r--drivers/watchdog/riowd.c13
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c13
-rw-r--r--drivers/watchdog/ts72xx_wdt.c12
-rw-r--r--drivers/watchdog/twl4030_wdt.c12
-rw-r--r--drivers/watchdog/via_wdt.c267
-rw-r--r--drivers/watchdog/w83627hf_wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c23
-rw-r--r--drivers/watchdog/wm8350_wdt.c12
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--drivers/xen/events.c77
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntalloc.c121
-rw-r--r--drivers/xen/gntdev.c34
-rw-r--r--drivers/xen/grant-table.c518
-rw-r--r--drivers/xen/privcmd.c (renamed from drivers/xen/xenfs/privcmd.c)41
-rw-r--r--drivers/xen/privcmd.h3
-rw-r--r--drivers/xen/swiotlb-xen.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--drivers/xen/xen-pciback/xenbus.c21
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--drivers/xen/xenbus/Makefile2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c193
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h4
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c90
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c (renamed from drivers/xen/xenfs/xenbus.c)40
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h6
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c36
-rw-r--r--drivers/xen/xenfs/Makefile2
-rw-r--r--drivers/xen/xenfs/super.c6
-rw-r--r--drivers/xen/xenfs/xenfs.h2
-rw-r--r--drivers/zorro/zorro.ids2
4276 files changed, 270569 insertions, 180371 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 1b3142127bf5..c07be024b962 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -97,7 +97,7 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-$(CONFIG_MMC) += mmc/
+obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index cb423f5aef24..c339a0880e6e 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -44,7 +44,7 @@ MODULE_LICENSE("GPL");
*/
/* Emit various sounds */
-static int sound;
+static bool sound;
module_param(sound, bool, 0);
MODULE_PARM_DESC(sound, "emit sounds");
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index de0e3df76776..7556913aba45 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -374,7 +374,7 @@ config ACPI_CUSTOM_METHOD
depends on DEBUG_FS
default n
help
- This debug facility allows ACPI AML methods to me inserted and/or
+ This debug facility allows ACPI AML methods to be inserted and/or
replaced without rebooting the system. For details refer to:
Documentation/acpi/method-customizing.txt.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb26b4f29a0..c07f44f05f9d 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -20,11 +20,12 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
acpi-y += atomicio.o
+acpi-y += nvs.o
# sleep related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
+acpi-$(CONFIG_ACPI_SLEEP) += proc.o
#
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 301bd2d388ad..0ca208b6dcf0 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
-acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
- dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
- dsinit.o dsargs.o dscontrol.o dswload2.o
+acpi-y := \
+ dsargs.o \
+ dscontrol.o \
+ dsfield.o \
+ dsinit.o \
+ dsmethod.o \
+ dsmthdat.o \
+ dsobject.o \
+ dsopcode.o \
+ dsutils.o \
+ dswexec.o \
+ dswload.o \
+ dswload2.o \
+ dswscope.o \
+ dswstate.o
-acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
- evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
+acpi-y += \
+ evevent.o \
+ evgpe.o \
+ evgpeblk.o \
+ evgpeinit.o \
+ evgpeutil.o \
+ evglock.o \
+ evmisc.o \
+ evregion.o \
+ evrgnini.o \
+ evsci.o \
+ evxface.o \
+ evxfevnt.o \
+ evxfgpe.o \
+ evxfregn.o
-acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
- exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
- excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
+acpi-y += \
+ exconfig.o \
+ exconvrt.o \
+ excreate.o \
+ exdebug.o \
+ exdump.o \
+ exfield.o \
+ exfldio.o \
+ exmutex.o \
+ exnames.o \
+ exoparg1.o \
+ exoparg2.o \
+ exoparg3.o \
+ exoparg6.o \
+ exprep.o \
+ exmisc.o \
+ exregion.o \
+ exresnte.o \
+ exresolv.o \
+ exresop.o \
+ exstore.o \
+ exstoren.o \
+ exstorob.o \
+ exsystem.o \
+ exutils.o
-acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o
+acpi-y += \
+ hwacpi.o \
+ hwgpe.o \
+ hwpci.o \
+ hwregs.o \
+ hwsleep.o \
+ hwvalid.o \
+ hwxface.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
-acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
- nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
- nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
- nsparse.o nspredef.o nsrepair.o nsrepair2.o
+acpi-y += \
+ nsaccess.o \
+ nsalloc.o \
+ nsdump.o \
+ nseval.o \
+ nsinit.o \
+ nsload.o \
+ nsnames.o \
+ nsobject.o \
+ nsparse.o \
+ nspredef.o \
+ nsrepair.o \
+ nsrepair2.o \
+ nssearch.o \
+ nsutils.o \
+ nswalk.o \
+ nsxfeval.o \
+ nsxfname.o \
+ nsxfobj.o
acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
-acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
- psopcode.o psscope.o psutils.o psxface.o
+acpi-y += \
+ psargs.o \
+ psloop.o \
+ psopcode.o \
+ psparse.o \
+ psscope.o \
+ pstree.o \
+ psutils.o \
+ pswalk.o \
+ psxface.o
-acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
- rscalc.o rsirq.o rsmemory.o rsutils.o
+acpi-y += \
+ rsaddr.o \
+ rscalc.o \
+ rscreate.o \
+ rsinfo.o \
+ rsio.o \
+ rsirq.o \
+ rslist.o \
+ rsmemory.o \
+ rsmisc.o \
+ rsserial.o \
+ rsutils.o \
+ rsxface.o
acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
-acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y += \
+ tbfadt.o \
+ tbfind.o \
+ tbinstal.o \
+ tbutils.o \
+ tbxface.o \
+ tbxfroot.o
-acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
- utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
- utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
- utosi.o utxferror.o utdecode.o
+acpi-y += \
+ utaddress.o \
+ utalloc.o \
+ utcopy.o \
+ utdebug.o \
+ utdecode.o \
+ utdelete.o \
+ uteval.o \
+ utglobal.o \
+ utids.o \
+ utinit.o \
+ utlock.o \
+ utmath.o \
+ utmisc.o \
+ utmutex.o \
+ utobject.o \
+ utosi.o \
+ utresrc.o \
+ utstate.o \
+ utxface.o \
+ utxferror.o \
+ utxfmutex.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index e0ba17f0a7c8..a44bd424f9f4 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index f895a244ca7e..1f30af613e87 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,10 @@
#define ACPI_MAX_SLEEP 2000 /* Two seconds */
+/* Address Range lists are per-space_id (Memory and I/O only) */
+
+#define ACPI_ADDRESS_RANGE_MAX 2
+
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
@@ -202,9 +206,10 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus and IPMI bidirectional buffer size */
+/* SMBus, GSBus and IPMI bidirectional buffer size */
#define ACPI_SMBUS_BUFFER_SIZE 34
+#define ACPI_GSBUS_BUFFER_SIZE 34
#define ACPI_IPMI_BUFFER_SIZE 66
/* _sx_d and _sx_w control methods */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index eb0b1f8dee6d..deaa81979561 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 2d1b7ffa377a..5935ba6707e2 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index bea3b4899183..c53caa521a30 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 76dc02f15574..2853f7673f3b 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
/*
* Optionally enable output from the AML Debug Object.
*/
-u32 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
/*
* Optionally copy the entire DSDT to local memory (instead of simply
@@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags;
acpi_name acpi_gbl_trace_method_name;
u8 acpi_gbl_system_awake_and_running;
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
+
#endif
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
+
/*****************************************************************************
*
* Debug support
@@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
/*****************************************************************************
*
- * Mutual exlusion within ACPICA subsystem
+ * Mutual exclusion within ACPICA subsystem
*
****************************************************************************/
@@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
ACPI_EXTERN u8 acpi_gbl_osi_data;
ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
+ACPI_EXTERN struct acpi_address_range
+ *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
#ifndef DEFINE_ACPI_GLOBALS
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index e7213beaafc7..677793e938f5 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 3731e1c34b83..eb308635da72 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
void acpi_ex_integer_to_string(char *dest, u64 value);
+u8 acpi_is_valid_space_id(u8 space_id);
+
/*
* exregion - default op_region handlers
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 5552125d8340..3f24068837d5 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x7F
+#define AML_NUM_OPCODES 0x81
/* Forward declarations */
@@ -249,12 +249,16 @@ struct acpi_create_field_info {
struct acpi_namespace_node *field_node;
struct acpi_namespace_node *register_node;
struct acpi_namespace_node *data_register_node;
+ struct acpi_namespace_node *connection_node;
+ u8 *resource_buffer;
u32 bank_value;
u32 field_bit_position;
u32 field_bit_length;
+ u16 resource_length;
u8 field_flags;
u8 attribute;
u8 field_type;
+ u8 access_length;
};
typedef
@@ -315,7 +319,8 @@ struct acpi_name_info {
/*
* Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
+ * ACPI_PTYPE2_FIX_VAR
*/
struct acpi_package_info {
u8 type;
@@ -625,6 +630,15 @@ union acpi_generic_state {
typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+/* Address Range info block */
+
+struct acpi_address_range {
+ struct acpi_address_range *next;
+ struct acpi_namespace_node *region_node;
+ acpi_physical_address start_address;
+ acpi_physical_address end_address;
+};
+
/*****************************************************************************
*
* Parser typedefs and structs
@@ -951,7 +965,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
#define ACPI_RESOURCE_NAME_IO 0x40
#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
-#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50
+#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50
#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
@@ -973,7 +987,9 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B
+#define ACPI_RESOURCE_NAME_GPIO 0x8C
+#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index b7491ee1fba6..ef338a96f5b2 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 79a598c67fe3..2c9e0f049523 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1055769f2f01..c065078ca83b 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+ u8 access_length; /* For serial regions/fields */
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and
};
struct acpi_object_region_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+ union acpi_operand_object *region_obj; /* Containing op_region object */
+ u8 *resource_buffer; /* resource_template for serial regions/fields */
};
struct acpi_object_bank_field {
@@ -358,6 +361,7 @@ typedef enum {
*/
struct acpi_object_extra {
ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ struct acpi_namespace_node *scope_node;
void *region_context; /* Region-specific data */
u8 *aml_start;
u32 aml_length;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index bb2ccfad7376..9440d053fbb3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
+#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_CONTINUE_OP ARG_NONE
#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
@@ -164,6 +165,7 @@
#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
#define ARGP_REVISION_OP ARG_NONE
#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
+#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
@@ -223,6 +225,7 @@
#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
@@ -294,6 +297,7 @@
#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
#define ARGI_REVISION_OP ARG_NONE
#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
+#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 5ea1e06afa20..b725d780d34d 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index c445cca490ea..bbb34c9be4e8 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,14 @@
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
* (Used for _ART, _FPS)
*
+ * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
+ * followed by an optional element
+ * object type
+ * count
+ * object type
+ * count = 0 (optional)
+ * (Used for _DLM)
+ *
*****************************************************************************/
enum acpi_return_package_types {
@@ -105,7 +113,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_PKG_COUNT = 6,
ACPI_PTYPE2_FIXED = 7,
ACPI_PTYPE2_MIN = 8,
- ACPI_PTYPE2_REV_FIXED = 9
+ ACPI_PTYPE2_REV_FIXED = 9,
+ ACPI_PTYPE2_FIX_VAR = 10
};
#ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_AC8", 0, ACPI_RTYPE_INTEGER}},
{{"_AC9", 0, ACPI_RTYPE_INTEGER}},
{{"_ADR", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
{{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
+ {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+
+ {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
+ 0}},
+
{{"_CRS", 0, ACPI_RTYPE_BUFFER}},
{{"_CRT", 0, ACPI_RTYPE_INTEGER}},
{{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
{{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
+ {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
{{"_DCS", 0, ACPI_RTYPE_INTEGER}},
{{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
{{"_DDN", 0, ACPI_RTYPE_STRING}},
+ {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
{{"_DGS", 0, ACPI_RTYPE_INTEGER}},
{{"_DIS", 0, 0}},
+
+ {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+ {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+ ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
+
{{"_DMA", 0, ACPI_RTYPE_BUFFER}},
{{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_EJ3", 1, 0}},
{{"_EJ4", 1, 0}},
{{"_EJD", 0, ACPI_RTYPE_STRING}},
+ {{"_EVT", 1, 0}},
{{"_FDE", 0, ACPI_RTYPE_BUFFER}},
{{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] =
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
{{"_GAI", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
{{"_GHL", 0, ACPI_RTYPE_INTEGER}},
{{"_GLK", 0, ACPI_RTYPE_INTEGER}},
{{"_GPD", 0, ACPI_RTYPE_INTEGER}},
{{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
+ {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
{{"_GSB", 0, ACPI_RTYPE_INTEGER}},
{{"_GTF", 0, ACPI_RTYPE_BUFFER}},
{{"_GTM", 0, ACPI_RTYPE_BUFFER}},
{{"_GTS", 1, 0}},
+ {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
{{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
{{"_HOT", 0, ACPI_RTYPE_INTEGER}},
{{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
{{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+ {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
{{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_INI", 0, 0}},
{{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+ {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
{{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
@@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
{{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
+ {{"_PSE", 1, 0}},
{{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SLI", 0, ACPI_RTYPE_BUFFER}},
{{"_SPD", 1, ACPI_RTYPE_INTEGER}},
{{"_SRS", 1, 0}},
+ {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
{{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_SST", 1, 0}},
{{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_STP", 2, ACPI_RTYPE_INTEGER}},
{{"_STR", 0, ACPI_RTYPE_BUFFER}},
{{"_STV", 2, ACPI_RTYPE_INTEGER}},
+ {{"_SUB", 0, ACPI_RTYPE_STRING}},
{{"_SUN", 0, ACPI_RTYPE_INTEGER}},
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f08b55b7f3a0..0347d0993497 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info {
/* Resource conversion opcodes */
-#define ACPI_RSC_INITGET 0
-#define ACPI_RSC_INITSET 1
-#define ACPI_RSC_FLAGINIT 2
-#define ACPI_RSC_1BITFLAG 3
-#define ACPI_RSC_2BITFLAG 4
-#define ACPI_RSC_COUNT 5
-#define ACPI_RSC_COUNT16 6
-#define ACPI_RSC_LENGTH 7
-#define ACPI_RSC_MOVE8 8
-#define ACPI_RSC_MOVE16 9
-#define ACPI_RSC_MOVE32 10
-#define ACPI_RSC_MOVE64 11
-#define ACPI_RSC_SET8 12
-#define ACPI_RSC_DATA8 13
-#define ACPI_RSC_ADDRESS 14
-#define ACPI_RSC_SOURCE 15
-#define ACPI_RSC_SOURCEX 16
-#define ACPI_RSC_BITMASK 17
-#define ACPI_RSC_BITMASK16 18
-#define ACPI_RSC_EXIT_NE 19
-#define ACPI_RSC_EXIT_LE 20
-#define ACPI_RSC_EXIT_EQ 21
+typedef enum {
+ ACPI_RSC_INITGET = 0,
+ ACPI_RSC_INITSET,
+ ACPI_RSC_FLAGINIT,
+ ACPI_RSC_1BITFLAG,
+ ACPI_RSC_2BITFLAG,
+ ACPI_RSC_3BITFLAG,
+ ACPI_RSC_ADDRESS,
+ ACPI_RSC_BITMASK,
+ ACPI_RSC_BITMASK16,
+ ACPI_RSC_COUNT,
+ ACPI_RSC_COUNT16,
+ ACPI_RSC_COUNT_GPIO_PIN,
+ ACPI_RSC_COUNT_GPIO_RES,
+ ACPI_RSC_COUNT_GPIO_VEN,
+ ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RSC_DATA8,
+ ACPI_RSC_EXIT_EQ,
+ ACPI_RSC_EXIT_LE,
+ ACPI_RSC_EXIT_NE,
+ ACPI_RSC_LENGTH,
+ ACPI_RSC_MOVE_GPIO_PIN,
+ ACPI_RSC_MOVE_GPIO_RES,
+ ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RSC_MOVE8,
+ ACPI_RSC_MOVE16,
+ ACPI_RSC_MOVE32,
+ ACPI_RSC_MOVE64,
+ ACPI_RSC_SET8,
+ ACPI_RSC_SOURCE,
+ ACPI_RSC_SOURCEX
+} ACPI_RSCONVERT_OPCODES;
/* Resource Conversion sub-opcodes */
@@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info {
#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
+/*
+ * Individual entry for the resource dump tables
+ */
typedef const struct acpi_rsdump_info {
u8 opcode;
u8 offset;
@@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info {
/* Values for the Opcode field above */
-#define ACPI_RSD_TITLE 0
-#define ACPI_RSD_LITERAL 1
-#define ACPI_RSD_STRING 2
-#define ACPI_RSD_UINT8 3
-#define ACPI_RSD_UINT16 4
-#define ACPI_RSD_UINT32 5
-#define ACPI_RSD_UINT64 6
-#define ACPI_RSD_1BITFLAG 7
-#define ACPI_RSD_2BITFLAG 8
-#define ACPI_RSD_SHORTLIST 9
-#define ACPI_RSD_LONGLIST 10
-#define ACPI_RSD_DWORDLIST 11
-#define ACPI_RSD_ADDRESS 12
-#define ACPI_RSD_SOURCE 13
+typedef enum {
+ ACPI_RSD_TITLE = 0,
+ ACPI_RSD_1BITFLAG,
+ ACPI_RSD_2BITFLAG,
+ ACPI_RSD_3BITFLAG,
+ ACPI_RSD_ADDRESS,
+ ACPI_RSD_DWORDLIST,
+ ACPI_RSD_LITERAL,
+ ACPI_RSD_LONGLIST,
+ ACPI_RSD_SHORTLIST,
+ ACPI_RSD_SHORTLISTX,
+ ACPI_RSD_SOURCE,
+ ACPI_RSD_STRING,
+ ACPI_RSD_UINT8,
+ ACPI_RSD_UINT16,
+ ACPI_RSD_UINT32,
+ ACPI_RSD_UINT64,
+ ACPI_RSD_WORDLIST
+} ACPI_RSDUMP_OPCODES;
/* restore default alignment */
@@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info {
/* Resource tables indexed by internal resource type */
extern const u8 acpi_gbl_aml_resource_sizes[];
+extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
/* Resource tables indexed by raw AML resource descriptor type */
extern const u8 acpi_gbl_resource_struct_sizes[];
+extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
+extern struct acpi_rsconvert_info
+ *acpi_gbl_convert_resource_serial_bus_dispatch[];
+
struct acpi_vendor_walk_info {
struct acpi_vendor_uuid *uuid;
struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@ acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *ret_buffer);
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
/*
* rscalc
*/
@@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
+extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
/* These resources require separate get/set tables */
@@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
* rsinfo
*/
extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
+extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
/*
* rsdump
@@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
+extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
+extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 1623b245dde2..0404df605bc1 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 967f08124eba..d5bec304c823 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 99c140d8e348..925ccf22101b 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
#define _ACUTILS_H
extern const u8 acpi_gbl_resource_aml_sizes[];
+extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
/* Strings used by the disassembler and debugger resource dump routines */
@@ -579,6 +580,24 @@ acpi_ut_create_list(char *list_name,
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
/*
+ * utaddress - address range check
+ */
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ u32 length, struct acpi_namespace_node *region_node);
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ struct acpi_namespace_node *region_node);
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address, u32 length, u8 warn);
+
+void acpi_ut_delete_address_lists(void);
+
+/*
* utxferror - various error/warning output functions
*/
void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1077f17859ed..905280fec0fa 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -189,6 +189,14 @@
#define AML_LNOTEQUAL_OP (u16) 0x9293
/*
+ * Opcodes for "Field" operators
+ */
+#define AML_FIELD_OFFSET_OP (u8) 0x00
+#define AML_FIELD_ACCESS_OP (u8) 0x01
+#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */
+#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */
+
+/*
* Internal opcodes
* Use only "Unknown" AML opcodes, don't attempt to use
* any valid ACPI ASCII values (A-Z, 0-9, '-')
@@ -202,6 +210,8 @@
#define AML_INT_METHODCALL_OP (u16) 0x0035
#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
+#define AML_INT_CONNECTION_OP (u16) 0x0038
+#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039
#define ARG_NONE 0x0
@@ -456,13 +466,16 @@ typedef enum {
* access_as keyword
*/
typedef enum {
- AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
- AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
- AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
- AML_FIELD_ATTRIB_SMB_WORD = 0x08,
- AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
- AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
- AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
+ AML_FIELD_ATTRIB_QUICK = 0x02,
+ AML_FIELD_ATTRIB_SEND_RCV = 0x04,
+ AML_FIELD_ATTRIB_BYTE = 0x06,
+ AML_FIELD_ATTRIB_WORD = 0x08,
+ AML_FIELD_ATTRIB_BLOCK = 0x0A,
+ AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
+ AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
+ AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
+ AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
+ AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
} AML_ACCESS_ATTRIBUTE;
/* Bit fields in the AML method_flags byte */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 59122cde247c..7b2128f274e7 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,29 +58,48 @@
#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
#define ACPI_RESTAG_BASEADDRESS "_BAS"
#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DEBOUNCETIME "_DBT"
#define ACPI_RESTAG_DECODE "_DEC"
+#define ACPI_RESTAG_DEVICEPOLARITY "_DPL"
#define ACPI_RESTAG_DMA "_DMA"
#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_DRIVESTRENGTH "_DRS"
+#define ACPI_RESTAG_ENDIANNESS "_END"
+#define ACPI_RESTAG_FLOWCONTROL "_FLC"
#define ACPI_RESTAG_GRANULARITY "_GRA"
#define ACPI_RESTAG_INTERRUPT "_INT"
#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_IORESTRICTION "_IOR"
#define ACPI_RESTAG_LENGTH "_LEN"
+#define ACPI_RESTAG_LINE "_LIN"
#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
#define ACPI_RESTAG_MAXADDR "_MAX"
#define ACPI_RESTAG_MINADDR "_MIN"
#define ACPI_RESTAG_MAXTYPE "_MAF"
#define ACPI_RESTAG_MINTYPE "_MIF"
+#define ACPI_RESTAG_MODE "_MOD"
+#define ACPI_RESTAG_PARITY "_PAR"
+#define ACPI_RESTAG_PHASE "_PHA"
+#define ACPI_RESTAG_PIN "_PIN"
+#define ACPI_RESTAG_PINCONFIG "_PPI"
+#define ACPI_RESTAG_POLARITY "_POL"
#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
#define ACPI_RESTAG_RANGETYPE "_RNG"
#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_LENGTH_RX "_RXL"
+#define ACPI_RESTAG_LENGTH_TX "_TXL"
+#define ACPI_RESTAG_SLAVEMODE "_SLV"
+#define ACPI_RESTAG_SPEED "_SPE"
+#define ACPI_RESTAG_STOPBITS "_STB"
#define ACPI_RESTAG_TRANSLATION "_TRA"
#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_VENDORDATA "_VEN"
/* Default sizes for "small" resource descriptors */
@@ -90,6 +109,7 @@
#define ASL_RDESC_END_DEPEND_SIZE 0x00
#define ASL_RDESC_IO_SIZE 0x07
#define ASL_RDESC_FIXED_IO_SIZE 0x03
+#define ASL_RDESC_FIXED_DMA_SIZE 0x05
#define ASL_RDESC_END_TAG_SIZE 0x01
struct asl_resource_node {
@@ -164,6 +184,12 @@ struct aml_resource_end_tag {
AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
};
+struct aml_resource_fixed_dma {
+ AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
+ u16 channels;
+ u8 width;
+};
+
/*
* LARGE descriptors
*/
@@ -263,6 +289,110 @@ struct aml_resource_generic_register {
u64 address;
};
+/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
+
+struct aml_resource_gpio {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+ u8 connection_type;
+ u16 flags;
+ u16 int_flags;
+ u8 pin_config;
+ u16 drive_strength;
+ u16 debounce_timeout;
+ u16 pin_table_offset;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+ /*
+ * Optional fields follow immediately:
+ * 1) PIN list (Words)
+ * 2) Resource Source String
+ * 3) Vendor Data bytes
+ */
+};
+
+#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */
+
+/* Values for connection_type above */
+
+#define AML_RESOURCE_GPIO_TYPE_INT 0
+#define AML_RESOURCE_GPIO_TYPE_IO 1
+#define AML_RESOURCE_MAX_GPIOTYPE 1
+
+/* Common preamble for all serial descriptors (ACPI 5.0) */
+
+#define AML_RESOURCE_SERIAL_COMMON \
+ u8 revision_id; \
+ u8 res_source_index; \
+ u8 type; \
+ u8 flags; \
+ u16 type_specific_flags; \
+ u8 type_revision_id; \
+ u16 type_data_length; \
+
+/* Values for the type field above */
+
+#define AML_RESOURCE_I2C_SERIALBUSTYPE 1
+#define AML_RESOURCE_SPI_SERIALBUSTYPE 2
+#define AML_RESOURCE_UART_SERIALBUSTYPE 3
+#define AML_RESOURCE_MAX_SERIALBUSTYPE 3
+#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
+
+struct aml_resource_common_serialbus {
+AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
+
+struct aml_resource_i2c_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+ u16 slave_address;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_MIN_DATA_LEN 6
+
+struct aml_resource_spi_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+ u8 data_bit_length;
+ u8 clock_phase;
+ u8 clock_polarity;
+ u16 device_selection;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_MIN_DATA_LEN 9
+
+struct aml_resource_uart_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
+ u16 rx_fifo_size;
+ u16 tx_fifo_size;
+ u8 parity;
+ u8 lines_enabled;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_UART_MIN_DATA_LEN 10
+
/* restore default alignment */
#pragma pack()
@@ -284,6 +414,7 @@ union aml_resource {
struct aml_resource_end_dependent end_dpf;
struct aml_resource_io io;
struct aml_resource_fixed_io fixed_io;
+ struct aml_resource_fixed_dma fixed_dma;
struct aml_resource_vendor_small vendor_small;
struct aml_resource_end_tag end_tag;
@@ -299,6 +430,11 @@ union aml_resource {
struct aml_resource_address64 address64;
struct aml_resource_extended_address64 ext_address64;
struct aml_resource_extended_irq extended_irq;
+ struct aml_resource_gpio gpio;
+ struct aml_resource_i2c_serialbus i2c_serial_bus;
+ struct aml_resource_spi_serialbus spi_serial_bus;
+ struct aml_resource_uart_serialbus uart_serial_bus;
+ struct aml_resource_common_serialbus common_serial_bus;
/* Utility overlays */
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 8c7b99728aa2..80eb1900297f 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
@@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
/* Execute the argument AML */
- status = acpi_ds_execute_arguments(node, node->parent,
+ status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 26c49fff58da..effe4ca1133f 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 34be60c0e448..cd243cf2cab2 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
{
acpi_status status;
u64 position;
+ union acpi_parse_object *child;
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
@@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
while (arg) {
/*
- * Three types of field elements are handled:
- * 1) Offset - specifies a bit offset
- * 2) access_as - changes the access mode
- * 3) Name - Enters a new named field into the namespace
+ * Four types of field elements are handled:
+ * 1) Name - Enters a new named field into the namespace
+ * 2) Offset - specifies a bit offset
+ * 3) access_as - changes the access mode/attributes
+ * 4) Connection - Associate a resource template with the field
*/
switch (arg->common.aml_opcode) {
case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
break;
case AML_INT_ACCESSFIELD_OP:
-
+ case AML_INT_EXTACCESSFIELD_OP:
/*
- * Get a new access_type and access_attribute -- to be used for all
- * field units that follow, until field end or another access_as
- * keyword.
+ * Get new access_type, access_attribute, and access_length fields
+ * -- to be used for all field units that follow, until the
+ * end-of-field or another access_as keyword is encountered.
+ * NOTE. These three bytes are encoded in the integer value
+ * of the parseop for convenience.
*
* In field_flags, preserve the flag bits other than the
- * ACCESS_TYPE bits
+ * ACCESS_TYPE bits.
*/
+
+ /* access_type (byte_acc, word_acc, etc.) */
+
info->field_flags = (u8)
((info->
field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
- ((u8) ((u32) arg->common.value.integer >> 8)));
+ ((u8)((u32)(arg->common.value.integer & 0x07))));
+
+ /* access_attribute (attrib_quick, attrib_byte, etc.) */
+
+ info->attribute =
+ (u8)((arg->common.value.integer >> 8) & 0xFF);
+
+ /* access_length (for serial/buffer protocols) */
+
+ info->access_length =
+ (u8)((arg->common.value.integer >> 16) & 0xFF);
+ break;
+
+ case AML_INT_CONNECTION_OP:
+ /*
+ * Clear any previous connection. New connection is used for all
+ * fields that follow, similar to access_as
+ */
+ info->resource_buffer = NULL;
+ info->connection_node = NULL;
- info->attribute = (u8) (arg->common.value.integer);
+ /*
+ * A Connection() is either an actual resource descriptor (buffer)
+ * or a named reference to a resource template
+ */
+ child = arg->common.value.arg;
+ if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
+ info->resource_buffer = child->named.data;
+ info->resource_length =
+ (u16)child->named.value.integer;
+ } else {
+ /* Lookup the Connection() namepath, it should already exist */
+
+ status = acpi_ns_lookup(walk_state->scope_info,
+ child->common.value.
+ name, ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_DONT_OPEN_SCOPE,
+ walk_state,
+ &info->connection_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(child->common.
+ value.name,
+ status);
+ return_ACPI_STATUS(status);
+ }
+ }
break;
case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
}
}
+ ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+
/* Second arg is the field flags */
arg = arg->common.next;
@@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
return_ACPI_STATUS(status);
}
@@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
*/
while (arg) {
/*
- * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
- * field names in order to enter them into the namespace.
+ * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
+ * in the field names in order to enter them into the namespace.
*/
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a7718bf2b9a1..9e5ac7f780a7 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 5d797751e205..00f5dab5bcc0 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 905ce29a92e1..b40bd507be5d 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index f42e17e5c252..d7045ca3e32a 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index c627a288e027..e5eff7585102 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 2c477ce172fa..1abcda31037f 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index fe40e4c6554f..642f3c053e87 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 324acec1179a..552aa3a50c84 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 976318138c56..ae7147724763 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 76a661fc1e09..9e9490a9cbf0 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index a6c374ef9914..c9c2ac13e7cc 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d458b041e651..6729ebe2f1e6 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void)
ACPI_FUNCTION_TRACE(ev_initialize_events);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Initialize the Fixed and General Purpose Events. This is done prior to
* enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
+ /* If Hardware Reduced flag is set, there is no ACPI h/w */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Install the SCI handler */
status = acpi_ev_install_sci_handler();
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 56a562a1e5d7..5e5683cb1f0d 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void)
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+ /* If Hardware Reduced flag is set, there is no global lock */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 65c79add3b19..9e88cb6fb25e 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index ca2c41a53311..be75339cd5dd 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index ce9aa9f9a972..adf7494da9db 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 80a81d0c4a80..25073932aa10 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d0b331844427..84966f416463 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index f0edf5c43c03..1b0180a1b798 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* FUNCTION: acpi_ev_address_space_dispatch
*
* PARAMETERS: region_obj - Internal region object
+ * field_obj - Corresponding field. Can be NULL.
* Function - Read or Write operation
* region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value)
{
@@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *handler_desc;
union acpi_operand_object *region_obj2;
void *region_context = NULL;
+ struct acpi_connection_info *context;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
+ context = handler_desc->address_space.context;
+
/*
* It may be the case that the region has never been initialized.
* Some types of regions require special init code
@@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ex_exit_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
- handler_desc->address_space.context,
- &region_context);
+ context, &region_context);
/* Re-enter the interpreter */
@@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
+ /*
+ * Special handling for generic_serial_bus and general_purpose_io:
+ * There are three extra parameters that must be passed to the
+ * handler via the context:
+ * 1) Connection buffer, a resource template from Connection() op.
+ * 2) Length of the above buffer.
+ * 3) Actual access length from the access_as() op.
+ */
+ if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
+ (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+ context && field_obj) {
+
+ /* Get the Connection (resource_template) buffer */
+
+ context->connection = field_obj->field.resource_buffer;
+ context->length = field_obj->field.resource_length;
+ context->access_length = field_obj->field.access_length;
+ }
+
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
@@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
status = handler(function,
(region_obj->region.address + region_offset),
- bit_width, value, handler_desc->address_space.context,
+ bit_width, value, context,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 55a5d35ef34a..819c17f5897a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 2ebd40e1a3ef..26065c612e76 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index f4f523bf5939..61944e89565a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 20516e599476..1768bbec1002 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f06a3ee356ba..33388fd69df4 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index aee887e3ca5c..6019208cd4b6 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 745a42b401f5..c86d44e41bc8 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
/* Bytewise reads */
for (i = 0; i < length; i++) {
- status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
- region_offset, 8,
- &value);
+ status =
+ acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
+ region_offset, 8, &value);
if (ACPI_FAILURE(status)) {
return status;
}
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 74162a11817d..e385436bd424 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 110711afada8..3f5bc998c1cb 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
*
* PARAMETERS: aml_start - Pointer to the region declaration AML
* aml_length - Max length of the declaration AML
- * region_space - space_iD for the region
+ * space_id - Address space ID for the region
* walk_state - Current state
*
* RETURN: Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status
acpi_ex_create_region(u8 * aml_start,
u32 aml_length,
- u8 region_space, struct acpi_walk_state *walk_state)
+ u8 space_id, struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start,
* Space ID must be one of the predefined IDs, or in the user-defined
* range
*/
- if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN) &&
- (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
- region_space));
- return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ if (!acpi_is_valid_space_id(space_id)) {
+ /*
+ * Print an error message, but continue. We don't want to abort
+ * a table load for this exception. Instead, if the region is
+ * actually used at runtime, abort the executing method.
+ */
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unknown Address Space ID: 0x%2.2X",
+ space_id));
}
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
- acpi_ut_get_region_name(region_space), region_space));
+ acpi_ut_get_region_name(space_id), space_id));
/* Create the region descriptor */
@@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start,
region_obj2 = obj_desc->common.next_object;
region_obj2->extra.aml_start = aml_start;
region_obj2->extra.aml_length = aml_length;
+ if (walk_state->scope_info) {
+ region_obj2->extra.scope_node =
+ walk_state->scope_info->scope.node;
+ } else {
+ region_obj2->extra.scope_node = node;
+ }
/* Init the region from the operands */
- obj_desc->region.space_id = region_space;
+ obj_desc->region.space_id = space_id;
obj_desc->region.address = 0;
obj_desc->region.length = 0;
obj_desc->region.node = node;
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index c7a2f1edd282..e211e9c19215 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 61b8c0e8b74d..2a6ac0a3bc1e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
"Buffer Object"}
};
-static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
{ACPI_EXD_FIELD, 0, NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
+ "ResourceBuffer"}
};
static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 0bde2230c028..dc092f5b35d6 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus or IPMI read. We must create a buffer to hold
+ * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
* the data and then directly access the region handler.
*
- * Note: Smbus protocol value is passed in upper 16-bits of Function
+ * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
*/
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
+ } else if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS) {
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ function =
+ ACPI_READ | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus or IPMI write. We will bypass the entire field
+ * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
* mechanism and handoff the buffer directly to the handler. For
* these address spaces, the buffer is bi-directional; on a write,
* return data is returned in the same buffer.
*
* Source must be a buffer of sufficient size:
- * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
+ * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
*
- * Note: SMBus protocol type is passed in upper 16-bits of Function
+ * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer, found type %s",
+ "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
+ } else if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS) {
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ function =
+ ACPI_WRITE | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %u, found length %u",
+ "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f915a7f3f921..149de45fdadd 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
+ u8 space_id;
ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
@@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
+ space_id = rgn_desc->region.space_id;
+
+ /* Validate the Space ID */
+
+ if (!acpi_is_valid_space_id(space_id)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unknown Address Space ID: 0x%2.2X",
+ space_id));
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ }
+
/*
* If the Region Address and Length have not been previously evaluated,
* evaluate them now and save the results.
@@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus or IPMI address space, it has a non-linear
+ * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
*/
- if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
- rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
+ if (space_id == ACPI_ADR_SPACE_SMBUS ||
+ space_id == ACPI_ADR_SPACE_GSBUS ||
+ space_id == ACPI_ADR_SPACE_IPMI) {
/* SMBus or IPMI has a non-linear address space */
@@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
/* Invoke the appropriate address_space/op_region handler */
- status =
- acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
- ACPI_MUL_8(obj_desc->common_field.
- access_byte_width),
- value);
+ status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
+ function, region_offset,
+ ACPI_MUL_8(obj_desc->
+ common_field.
+ access_byte_width),
+ value);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
{
+ ACPI_FUNCTION_NAME(ex_register_overflow);
if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
/*
@@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
* The Value is larger than the maximum value that can fit into
* the register.
*/
+ ACPI_ERROR((AE_INFO,
+ "Index value 0x%8.8X%8.8X overflows field width 0x%X",
+ ACPI_FORMAT_UINT64(value),
+ obj_desc->common_field.bit_length));
+
return (TRUE);
}
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 703d88ed0b3d..0a0893310348 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index be1c56ead653..60933e9dc3c0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 49ec049c157e..fcc75fa27d32 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 236ead14b7f7..9ba8c73cea16 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2571b4a310f4..879e8a277b94 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 1b48d9d28c9a..71fcc65c9ffa 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f4a2787e8e92..0786b8659061 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index cc95e2000406..30157f5a12d7 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
#include "acinterp.h"
#include "amlcode.h"
#include "acnamesp.h"
+#include "acdispat.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.region_obj =
acpi_ns_get_attached_object(info->region_node);
+ /* Fields specific to generic_serial_bus fields */
+
+ obj_desc->field.access_length = info->access_length;
+
+ if (info->connection_node) {
+ second_desc = info->connection_node->object;
+ if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status =
+ acpi_ds_get_buffer_arguments(second_desc);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ obj_desc->field.resource_buffer =
+ second_desc->buffer.pointer;
+ obj_desc->field.resource_length =
+ (u16)second_desc->buffer.length;
+ } else if (info->resource_buffer) {
+ obj_desc->field.resource_buffer = info->resource_buffer;
+ obj_desc->field.resource_length = info->resource_length;
+ }
+
/* Allow full data read from EC address space */
if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f0d5e14f1f2c..12d51df6d3bf 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 55997e46948b..fa50e77e64a8 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index db502cd7d934..6e335dc34528 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index e3bb00ccdff5..a67b1d925ddd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c0c8842dd344..c6cf843cc4c9 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index a979017d56b8..b35bed52e061 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc665cc554de..65a45d8335c8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index df66e7b686be..191a12945226 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8ad93146dd32..eb6798ba8b59 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
}
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_is_valid_space_id
+ *
+ * PARAMETERS: space_id - ID to be validated
+ *
+ * RETURN: TRUE if valid/supported ID.
+ *
+ * DESCRIPTION: Validate an operation region space_iD.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_space_id(u8 space_id)
+{
+
+ if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
+ (space_id < ACPI_USER_REGION_BEGIN) &&
+ (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
+ (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
#endif
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index fc380d3d45ab..d21ec5f0b3a9 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f610d88a66be..1a6894afef79 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 050fd227951b..1455ddcdc32c 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index cc70f3fdcdd1..4ea4eeb51bfd 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d52da3073650..3c4a922a9fc2 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 50d21c40b5c1..d4973d9da9f1 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 5f1605874655..6e5c43a60bb7 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
+ ACPI_ERROR((AE_INFO,
+ "Bad BitWidth parameter: %8.8X", bit_width));
return AE_BAD_PARAMETER;
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c2793a82f120..9d38eb6c0d0b 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -356,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
* Value - Value to write to the register, in bit
- * position zero. The bit is automaticallly
+ * position zero. The bit is automatically
* shifted to the correct position.
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d93172fd15a8..61623f3f6826 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1d0ef15d158f..7c3d3ceb98b3 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b683cc2ff9d3..b7f2b3be79ac 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 2ed294b7a4db..30ea5bc53a78 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index c1bd02b1a058..f375cb82e321 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index fd7c6380e294..9d84ec2f0211 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5f7dc691c183..5cbf15ffe7d8 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d5fa520c3de5..b20e7c8c3ffb 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3bb8bf105ea2..dd77a3ce6e50 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index b3234fa795b8..ec7ba2d3463c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index c845c8089f39..bbe46a447d34 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_FIX_VAR:
/*
* These types all return a single Package that consists of a
@@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
}
break;
+ case ACPI_PTYPE2_FIX_VAR:
+ /*
+ * Each subpackage has a fixed number of elements and an
+ * optional element
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ sub_package->package.
+ count -
+ package->ret_info.
+ count1, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
case ACPI_PTYPE2_FIXED:
/* Each sub-package has a fixed length */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index ac7b854b0bd7..9c35d20eb52b 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_REV_FIXED:
+ case ACPI_PTYPE2_FIX_VAR:
break;
default:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 024c4f263f87..726bc8e687f7 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
}
/*
- * Copy and uppercase the string. From the ACPI specification:
+ * Copy and uppercase the string. From the ACPI 5.0 specification:
*
* A valid PNP ID must be of the form "AAA####" where A is an uppercase
* letter and # is a hex digit. A valid ACPI ID must be of the form
- * "ACPI####" where # is a hex digit.
+ * "NNNN####" where N is an uppercase letter or decimal digit, and
+ * # is a hex digit.
*/
for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)ACPI_TOUPPER(*source);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 28b0d7a62b99..507043d66114 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cb1b104a69a2..a535b7afda5c 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 345f0c3c6ad2..f69895a54895 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e7f016d1b226..71d15f61807b 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83bf93024303..af401c9c4dfc 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 57e6d825ed84..880a605cee20 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e1fad0ee0136..5ac36aba507c 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state)
{
- u32 aml_offset = (u32)
- ACPI_PTR_DIFF(parser_state->aml,
- parser_state->aml_start);
+ u32 aml_offset;
union acpi_parse_object *field;
+ union acpi_parse_object *arg = NULL;
u16 opcode;
u32 name;
+ u8 access_type;
+ u8 access_attribute;
+ u8 access_length;
+ u32 pkg_length;
+ u8 *pkg_end;
+ u32 buffer_length;
ACPI_FUNCTION_TRACE(ps_get_next_field);
+ aml_offset =
+ (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+
/* Determine field type */
switch (ACPI_GET8(parser_state->aml)) {
- default:
+ case AML_FIELD_OFFSET_OP:
- opcode = AML_INT_NAMEDFIELD_OP;
+ opcode = AML_INT_RESERVEDFIELD_OP;
+ parser_state->aml++;
break;
- case 0x00:
+ case AML_FIELD_ACCESS_OP:
- opcode = AML_INT_RESERVEDFIELD_OP;
+ opcode = AML_INT_ACCESSFIELD_OP;
parser_state->aml++;
break;
- case 0x01:
+ case AML_FIELD_CONNECTION_OP:
- opcode = AML_INT_ACCESSFIELD_OP;
+ opcode = AML_INT_CONNECTION_OP;
+ parser_state->aml++;
+ break;
+
+ case AML_FIELD_EXT_ACCESS_OP:
+
+ opcode = AML_INT_EXTACCESSFIELD_OP;
parser_state->aml++;
break;
+
+ default:
+
+ opcode = AML_INT_NAMEDFIELD_OP;
+ break;
}
/* Allocate a new field op */
@@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
break;
case AML_INT_ACCESSFIELD_OP:
+ case AML_INT_EXTACCESSFIELD_OP:
/*
* Get access_type and access_attrib and merge into the field Op
- * access_type is first operand, access_attribute is second
+ * access_type is first operand, access_attribute is second. stuff
+ * these bytes into the node integer value for convenience.
*/
- field->common.value.integer =
- (((u32) ACPI_GET8(parser_state->aml) << 8));
+
+ /* Get the two bytes (Type/Attribute) */
+
+ access_type = ACPI_GET8(parser_state->aml);
parser_state->aml++;
- field->common.value.integer |= ACPI_GET8(parser_state->aml);
+ access_attribute = ACPI_GET8(parser_state->aml);
parser_state->aml++;
+
+ field->common.value.integer = (u8)access_type;
+ field->common.value.integer |= (u16)(access_attribute << 8);
+
+ /* This opcode has a third byte, access_length */
+
+ if (opcode == AML_INT_EXTACCESSFIELD_OP) {
+ access_length = ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+
+ field->common.value.integer |=
+ (u32)(access_length << 16);
+ }
+ break;
+
+ case AML_INT_CONNECTION_OP:
+
+ /*
+ * Argument for Connection operator can be either a Buffer
+ * (resource descriptor), or a name_string.
+ */
+ if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
+ parser_state->aml++;
+
+ pkg_end = parser_state->aml;
+ pkg_length =
+ acpi_ps_get_next_package_length(parser_state);
+ pkg_end += pkg_length;
+
+ if (parser_state->aml < pkg_end) {
+
+ /* Non-empty list */
+
+ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+ if (!arg) {
+ return_PTR(NULL);
+ }
+
+ /* Get the actual buffer length argument */
+
+ opcode = ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+
+ switch (opcode) {
+ case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
+ buffer_length =
+ ACPI_GET8(parser_state->aml);
+ parser_state->aml += 1;
+ break;
+
+ case AML_WORD_OP: /* AML_WORDDATA_ARG */
+ buffer_length =
+ ACPI_GET16(parser_state->aml);
+ parser_state->aml += 2;
+ break;
+
+ case AML_DWORD_OP: /* AML_DWORDATA_ARG */
+ buffer_length =
+ ACPI_GET32(parser_state->aml);
+ parser_state->aml += 4;
+ break;
+
+ default:
+ buffer_length = 0;
+ break;
+ }
+
+ /* Fill in bytelist data */
+
+ arg->named.value.size = buffer_length;
+ arg->named.data = parser_state->aml;
+ }
+
+ /* Skip to End of byte data */
+
+ parser_state->aml = pkg_end;
+ } else {
+ arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ if (!arg) {
+ return_PTR(NULL);
+ }
+
+ /* Get the Namestring argument */
+
+ arg->common.value.name =
+ acpi_ps_get_next_namestring(parser_state);
+ }
+
+ /* Link the buffer/namestring to parent (CONNECTION_OP) */
+
+ acpi_ps_append_arg(field, arg);
break;
default:
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 01dd70d1de51..9547ad8a620b 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index bed08de7528c..a0226fdcf75c 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R)
+ AML_FLAGS_EXEC_0A_0T_1R),
+
+/* ACPI 5.0 opcodes */
+
+/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
+ ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
+ ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
/*! [End] no source code translation !*/
};
@@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9bb0cbd37b5e..2ff9c35a1968 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a5faa1323a02..c872aa4b926e 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index f1464c03aa42..2b03cdbbe1c0 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
ACPI_FUNCTION_ENTRY();
+/*
+ if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
+ {
+ return (Op->Common.Value.Arg);
+ }
+*/
/* Get the info structure for this opcode */
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 7eda78503422..13bb131ae125 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 3312d6368bf1..ab96cf47896d 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 8086805d4494..9d98c5ff66a5 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 9e66f9078426..a0305652394f 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3a8a89ec2ca4..3c6df4b7eb2d 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
resource_source));
break;
+ case ACPI_RESOURCE_TYPE_GPIO:
+
+ total_size =
+ (acpi_rs_length) (total_size +
+ (resource->data.gpio.
+ pin_table_length * 2) +
+ resource->data.gpio.
+ resource_source.string_length +
+ resource->data.gpio.
+ vendor_length);
+
+ break;
+
+ case ACPI_RESOURCE_TYPE_SERIAL_BUS:
+
+ total_size =
+ acpi_gbl_aml_resource_serial_bus_sizes[resource->
+ data.
+ common_serial_bus.
+ type];
+
+ total_size = (acpi_rs_length) (total_size +
+ resource->data.
+ i2c_serial_bus.
+ resource_source.
+ string_length +
+ resource->data.
+ i2c_serial_bus.
+ vendor_length);
+
+ break;
+
default:
break;
}
@@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer,
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
+ union aml_resource *aml_resource;
ACPI_FUNCTION_TRACE(rs_get_list_length);
- *size_needed = 0;
+ *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
if (ACPI_FAILURE(status)) {
+ /*
+ * Exit on failure. Cannot continue because the descriptor length
+ * may be bogus also.
+ */
return_ACPI_STATUS(status);
}
+ aml_resource = (void *)aml_buffer;
+
/* Get the resource length and base (minimum) AML size */
resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
case ACPI_RESOURCE_NAME_END_TAG:
/*
- * End Tag:
- * This is the normal exit, add size of end_tag
+ * End Tag: This is the normal exit
*/
- *size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer,
minimum_aml_resource_length);
break;
+ case ACPI_RESOURCE_NAME_GPIO:
+
+ /* Vendor data is optional */
+
+ if (aml_resource->gpio.vendor_length) {
+ extra_struct_bytes +=
+ aml_resource->gpio.vendor_offset -
+ aml_resource->gpio.pin_table_offset +
+ aml_resource->gpio.vendor_length;
+ } else {
+ extra_struct_bytes +=
+ aml_resource->large_header.resource_length +
+ sizeof(struct aml_resource_large_header) -
+ aml_resource->gpio.pin_table_offset;
+ }
+ break;
+
+ case ACPI_RESOURCE_NAME_SERIAL_BUS:
+
+ minimum_aml_resource_length =
+ acpi_gbl_resource_aml_serial_bus_sizes
+ [aml_resource->common_serial_bus.type];
+ extra_struct_bytes +=
+ aml_resource->common_serial_bus.resource_length -
+ minimum_aml_resource_length;
+ break;
+
default:
break;
}
@@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer,
* Important: Round the size up for the appropriate alignment. This
* is a requirement on IA64.
*/
- buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
- extra_struct_bytes;
- buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+ if (acpi_ut_get_resource_type(aml_buffer) ==
+ ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ buffer_size =
+ acpi_gbl_resource_struct_serial_bus_sizes
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
+ } else {
+ buffer_size =
+ acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ }
+ buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
*size_needed += buffer_size;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 4ce6e1147e80..46d6eb38ae66 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,70 @@ ACPI_MODULE_NAME("rscreate")
/*******************************************************************************
*
+ * FUNCTION: acpi_buffer_to_resource
+ *
+ * PARAMETERS: aml_buffer - Pointer to the resource byte stream
+ * aml_buffer_length - Length of the aml_buffer
+ * resource_ptr - Where the converted resource is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert a raw AML buffer to a resource list
+ *
+ ******************************************************************************/
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+ u16 aml_buffer_length,
+ struct acpi_resource **resource_ptr)
+{
+ acpi_status status;
+ acpi_size list_size_needed;
+ void *resource;
+ void *current_resource_ptr;
+
+ /*
+ * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
+ * is not required here.
+ */
+
+ /* Get the required length for the converted resource */
+
+ status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
+ &list_size_needed);
+ if (status == AE_AML_NO_RESOURCE_END_TAG) {
+ status = AE_OK;
+ }
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Allocate a buffer for the converted resource */
+
+ resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
+ current_resource_ptr = resource;
+ if (!resource) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Perform the AML-to-Resource conversion */
+
+ status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+ acpi_rs_convert_aml_to_resources,
+ &current_resource_ptr);
+ if (status == AE_AML_NO_RESOURCE_END_TAG) {
+ status = AE_OK;
+ }
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(resource);
+ } else {
+ *resource_ptr = resource;
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_rs_create_resource_list
*
* PARAMETERS: aml_buffer - Pointer to the resource byte stream
@@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate")
* of device resources.
*
******************************************************************************/
+
acpi_status
acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
- struct acpi_buffer *output_buffer)
+ struct acpi_buffer * output_buffer)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 33db7520c74b..b4c581132393 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value);
static void acpi_rs_out_title(char *title);
-static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+static void acpi_rs_dump_byte_list(u16 length, u8 *data);
-static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+static void acpi_rs_dump_word_list(u16 length, u16 *data);
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+static void acpi_rs_dump_dword_list(u8 length, u32 *data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
static void
acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
};
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+ "ConnectionType", acpi_gbl_ct_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+ "ProducerConsumer", acpi_gbl_consume_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+ acpi_gbl_ppc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+ "IoRestriction", acpi_gbl_ior_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+ "DebounceTimeout", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+ "ResourceSource", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+ "PinTableLength", NULL},
+ {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+ NULL},
+ {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+ NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+ "FixedDma", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+ "RequestLines", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+ acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
+ {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+ "Common Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+ "I2C Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(i2c_serial_bus.
+ access_mode),
+ "AccessMode", acpi_gbl_am_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+ "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+ "Spi Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(spi_serial_bus.
+ wire_mode), "WireMode",
+ acpi_gbl_wm_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+ "DevicePolarity", acpi_gbl_dp_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+ "DataBitLength", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+ "ClockPhase", acpi_gbl_cph_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+ "ClockPolarity", acpi_gbl_cpo_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+ "DeviceSelection", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+ "Uart Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+ ACPI_RSD_OFFSET(uart_serial_bus.
+ flow_control),
+ "FlowControl", acpi_gbl_fc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+ "StopBits", acpi_gbl_sb_decode},
+ {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+ "DataBits", acpi_gbl_bpb_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+ acpi_gbl_ed_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+ acpi_gbl_pt_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+ "LinesEnabled", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+ "RxFifoSize", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+ "TxFifoSize", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+ "ConnectionSpeed", NULL},
+};
+
/*
* Tables used for common address descriptor flag fields
*/
@@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/* Data items, 8/16/32/64 bit */
case ACPI_RSD_UINT8:
- acpi_rs_out_integer8(name, ACPI_GET8(target));
+ if (table->pointer) {
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer
+ [*target]));
+ } else {
+ acpi_rs_out_integer8(name, ACPI_GET8(target));
+ }
break;
case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
0x03]));
break;
+ case ACPI_RSD_3BITFLAG:
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer[*target &
+ 0x07]));
+ break;
+
case ACPI_RSD_SHORTLIST:
/*
* Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
+ case ACPI_RSD_SHORTLISTX:
+ /*
+ * Short byte list (single line output) for GPIO vendor data
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_out_title(name);
+ acpi_rs_dump_short_byte_list(*previous_target,
+ *
+ (ACPI_CAST_INDIRECT_PTR
+ (u8, target)));
+ }
+ break;
+
case ACPI_RSD_LONGLIST:
/*
* Long byte list for Vendor resource data
@@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
+ case ACPI_RSD_WORDLIST:
+ /*
+ * Word list for GPIO Pin Table
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_dump_word_list(*previous_target,
+ *(ACPI_CAST_INDIRECT_PTR
+ (u16, target)));
+ }
+ break;
+
case ACPI_RSD_ADDRESS:
/*
* Common flags for all Address resources
@@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
/* Dump the resource descriptor */
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_resource_dispatch[type]);
+ if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_serial_bus_dispatch
+ [resource_list->data.
+ common_serial_bus.type]);
+ } else {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch
+ [type]);
+ }
/* Point to the next resource structure */
- resource_list =
- ACPI_ADD_PTR(struct acpi_resource, resource_list,
- resource_list->length);
+ resource_list = ACPI_NEXT_RESOURCE(resource_list);
/* Exit when END_TAG descriptor is reached */
@@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
}
}
+static void acpi_rs_dump_word_list(u16 length, u16 *data)
+{
+ u16 i;
+
+ for (i = 0; i < length; i++) {
+ acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
+ }
+}
+
#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index f9ea60872aa4..a9fa5158200b 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
+ acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
+ NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
};
/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
- NULL, /* 0x0A, Reserved */
+ acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
NULL, /* 0x0B, Reserved */
NULL, /* 0x0C, Reserved */
NULL, /* 0x0D, Reserved */
@@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
- acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
+ NULL, /* 0x0D, Reserved */
+ NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
+};
+
+/* Subtype table for serial_bus -- I2C, SPI, and UART */
+
+struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
+ NULL,
+ acpi_rs_convert_i2c_serial_bus,
+ acpi_rs_convert_spi_serial_bus,
+ acpi_rs_convert_uart_serial_bus,
};
#ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */
+ acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+ NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+};
+
+struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
+ NULL,
+ acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */
+ acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */
+ acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */
};
#endif
@@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */
+ sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+ sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
};
const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE_MIN,
ACPI_RS_SIZE(struct acpi_resource_io),
ACPI_RS_SIZE(struct acpi_resource_fixed_io),
- 0,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
0,
0,
0,
@@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE(struct acpi_resource_address16),
ACPI_RS_SIZE(struct acpi_resource_extended_irq),
ACPI_RS_SIZE(struct acpi_resource_address64),
- ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+ ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+ ACPI_RS_SIZE(struct acpi_resource_gpio),
+ ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
+};
+
+const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
+ 0,
+ sizeof(struct aml_resource_i2c_serialbus),
+ sizeof(struct aml_resource_spi_serialbus),
+ sizeof(struct aml_resource_uart_serialbus),
+};
+
+const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
};
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 0c7efef008be..f6a081057a22 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 50b8ad211167..e23a9ec248cb 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
AML_OFFSET(dma.dma_channel_mask),
ACPI_RS_OFFSET(data.dma.channel_count)}
};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
+ sizeof(struct aml_resource_fixed_dma),
+ 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * request_lines
+ * Channels
+ */
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
+ AML_OFFSET(fixed_dma.request_lines),
+ 2},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
+ AML_OFFSET(fixed_dma.width),
+ 1},
+
+};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 1bfcef736c50..9be129f5d6f4 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
struct acpi_resource **resource_ptr =
ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
struct acpi_resource *resource;
+ union aml_resource *aml_resource;
+ struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
"Misaligned resource pointer %p", resource));
}
+ /* Get the appropriate conversion info table */
+
+ aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ conversion_table = NULL;
+ } else {
+ /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+ conversion_table =
+ acpi_gbl_convert_resource_serial_bus_dispatch
+ [aml_resource->common_serial_bus.type];
+ }
+ } else {
+ conversion_table =
+ acpi_gbl_get_resource_dispatch[resource_index];
+ }
+
+ if (!conversion_table) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_index));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
/* Convert the AML byte stream resource to a local resource struct */
status =
- acpi_rs_convert_aml_to_resource(resource,
- ACPI_CAST_PTR(union aml_resource,
- aml),
- acpi_gbl_get_resource_dispatch
- [resource_index]);
+ acpi_rs_convert_aml_to_resource(resource, aml_resource,
+ conversion_table);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
/* Point to the next structure in the output buffer */
- *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+ *resource_ptr = ACPI_NEXT_RESOURCE(resource);
return_ACPI_STATUS(AE_OK);
}
@@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
{
u8 *aml = output_buffer;
u8 *end_aml = output_buffer + aml_size_needed;
+ struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform the conversion */
- status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
- aml_resource,
- aml),
- acpi_gbl_set_resource_dispatch
- [resource->type]);
+ if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ if (resource->data.common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ conversion_table = NULL;
+ } else {
+ /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+ conversion_table =
+ acpi_gbl_convert_resource_serial_bus_dispatch
+ [resource->data.common_serial_bus.type];
+ }
+ } else {
+ conversion_table =
+ acpi_gbl_set_resource_dispatch[resource->type];
+ }
+
+ if (!conversion_table) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource->type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ status = acpi_rs_convert_resource_to_aml(resource,
+ ACPI_CAST_PTR(union
+ aml_resource,
+ aml),
+ conversion_table);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Point to the next input resource descriptor */
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ resource = ACPI_NEXT_RESOURCE(resource);
}
/* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 7cc6d8625f1e..4fd611ad02b4 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 410264b22a29..8073b371cc7c 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
if (((acpi_size) resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* table length (# of table entries)
*/
count = INIT_TABLE_LENGTH(info);
-
while (count) {
/*
* Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
((ACPI_GET8(source) >> info->value) & 0x03);
break;
+ case ACPI_RSC_3BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) = (u8)
+ ((ACPI_GET8(source) >> info->value) & 0x07);
+ break;
+
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
(info->value * (item_count - 1));
break;
+ case ACPI_RSC_COUNT_GPIO_PIN:
+
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ item_count = ACPI_GET16(target) - ACPI_GET16(source);
+
+ resource->length = resource->length + item_count;
+ item_count = item_count / 2;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_VEN:
+
+ item_count = ACPI_GET8(source);
+ ACPI_SET8(destination) = (u8)item_count;
+
+ resource->length = resource->length +
+ (info->value * item_count);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_RES:
+
+ /*
+ * Vendor data is optional (length/offset may both be zero)
+ * Examine vendor data length field first
+ */
+ target = ACPI_ADD_PTR(void, aml, (info->value + 2));
+ if (ACPI_GET16(target)) {
+
+ /* Use vendor offset to get resource source length */
+
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ item_count =
+ ACPI_GET16(target) - ACPI_GET16(source);
+ } else {
+ /* No vendor data to worry about */
+
+ item_count = aml->large_header.resource_length +
+ sizeof(struct aml_resource_large_header) -
+ ACPI_GET16(source);
+ }
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_VEN:
+
+ item_count = ACPI_GET16(source) - info->value;
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_RES:
+
+ item_count = (aml_resource_length +
+ sizeof(struct aml_resource_large_header))
+ - ACPI_GET16(source) - info->value;
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
case ACPI_RSC_LENGTH:
resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info->opcode);
break;
+ case ACPI_RSC_MOVE_GPIO_PIN:
+
+ /* Generate and set the PIN data pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count * 2));
+ *(u16 **)destination = ACPI_CAST_PTR(u16, target);
+
+ /* Copy the PIN data */
+
+ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_GPIO_RES:
+
+ /* Generate and set the resource_source string pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the resource_source string */
+
+ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+
+ /* Generate and set the Vendor Data pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the Vendor Data */
+
+ source = ACPI_ADD_PTR(void, aml, info->value);
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_RES:
+
+ /* Generate and set the resource_source string pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the resource_source string */
+
+ source =
+ ACPI_ADD_PTR(void, aml,
+ (ACPI_GET16(source) + info->value));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
case ACPI_RSC_SET8:
ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* Optional resource_source (Index and String). This is the more
* complicated case used by the Interrupt() macro
*/
- target =
- ACPI_ADD_PTR(char, resource,
- info->aml_offset + (item_count * 4));
+ target = ACPI_ADD_PTR(char, resource,
+ info->aml_offset +
+ (item_count * 4));
resource->length +=
acpi_rs_get_resource_source(aml_resource_length,
- (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+ (acpi_rs_length)
+ (((item_count -
+ 1) * sizeof(u32)) +
+ info->value),
+ destination, aml,
+ target);
break;
case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
{
void *source = NULL;
void *destination;
+ char *target;
acpi_rsdesc_size aml_length = 0;
u8 count;
u16 temp16 = 0;
@@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
* table length (# of table entries)
@@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
((ACPI_GET8(source) & 0x03) << info->value);
break;
+ case ACPI_RSC_3BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) |= (u8)
+ ((ACPI_GET8(source) & 0x07) << info->value);
+ break;
+
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
acpi_rs_set_resource_length(aml_length, aml);
break;
+ case ACPI_RSC_COUNT_GPIO_PIN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)aml_length;
+
+ aml_length = (u16)(aml_length + item_count * 2);
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ ACPI_SET16(target) = (u16)aml_length;
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_VEN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)item_count;
+
+ aml_length =
+ (u16)(aml_length + (info->value * item_count));
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_RES:
+
+ /* Set resource source string length */
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)aml_length;
+
+ /* Compute offset for the Vendor Data */
+
+ aml_length = (u16)(aml_length + item_count);
+ target = ACPI_ADD_PTR(void, aml, info->value);
+
+ /* Set vendor offset only if there is vendor data */
+
+ if (resource->data.gpio.vendor_length) {
+ ACPI_SET16(target) = (u16)aml_length;
+ }
+
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_VEN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = item_count + info->value;
+ aml_length = (u16)(aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_RES:
+
+ item_count = ACPI_GET16(source);
+ aml_length = (u16)(aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
case ACPI_RSC_LENGTH:
acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info->opcode);
break;
+ case ACPI_RSC_MOVE_GPIO_PIN:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ ACPI_GET16
+ (destination));
+ source = *(u16 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_GPIO_RES:
+
+ /* Used for both resource_source string and vendor_data */
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ ACPI_GET16
+ (destination));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ (aml_length -
+ item_count));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_RES:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ (aml_length -
+ item_count));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
case ACPI_RSC_ADDRESS:
/* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
new file mode 100644
index 000000000000..9aa5e689b444
--- /dev/null
+++ b/drivers/acpi/acpica/rsserial.c
@@ -0,0 +1,441 @@
+/*******************************************************************************
+ *
+ * Module Name: rsserial - GPIO/serial_bus resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsserial")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_gpio
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
+ ACPI_RS_SIZE(struct acpi_resource_gpio),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
+ sizeof(struct aml_resource_gpio),
+ 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * revision_id
+ * connection_type
+ */
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
+ AML_OFFSET(gpio.revision_id),
+ 2},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
+ AML_OFFSET(gpio.flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+ AML_OFFSET(gpio.int_flags),
+ 3},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
+ AML_OFFSET(gpio.int_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
+ AML_OFFSET(gpio.int_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
+ AML_OFFSET(gpio.int_flags),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
+ AML_OFFSET(gpio.pin_config),
+ 1},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * drive_strength
+ * debounce_timeout
+ */
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
+ AML_OFFSET(gpio.drive_strength),
+ 2},
+
+ /* Pin Table */
+
+ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
+ AML_OFFSET(gpio.pin_table_offset),
+ AML_OFFSET(gpio.res_source_offset)},
+
+ {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
+ AML_OFFSET(gpio.pin_table_offset),
+ 0},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
+ AML_OFFSET(gpio.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_GPIO_RES,
+ ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
+ AML_OFFSET(gpio.res_source_offset),
+ AML_OFFSET(gpio.vendor_offset)},
+
+ {ACPI_RSC_MOVE_GPIO_RES,
+ ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
+ AML_OFFSET(gpio.res_source_offset),
+ 0},
+
+ /* Vendor Data */
+
+ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
+ AML_OFFSET(gpio.vendor_length),
+ 1},
+
+ {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
+ AML_OFFSET(gpio.vendor_offset),
+ 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_i2c_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_i2c_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_I2C_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_i2c_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* I2C bus type specific */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
+ AML_OFFSET(i2c_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
+ AML_OFFSET(i2c_serial_bus.connection_speed),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
+ AML_OFFSET(i2c_serial_bus.slave_address),
+ 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_spi_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_spi_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_SPI_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_spi_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* Spi bus type specific */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
+ AML_OFFSET(spi_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
+ AML_OFFSET(spi_serial_bus.type_specific_flags),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
+ AML_OFFSET(spi_serial_bus.data_bit_length),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
+ AML_OFFSET(spi_serial_bus.clock_phase),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
+ AML_OFFSET(spi_serial_bus.clock_polarity),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
+ AML_OFFSET(spi_serial_bus.device_selection),
+ 1},
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
+ AML_OFFSET(spi_serial_bus.connection_speed),
+ 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_uart_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_uart_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_UART_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_uart_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* Uart bus type specific */
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 2},
+
+ {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 4},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 7},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
+ AML_OFFSET(uart_serial_bus.parity),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
+ AML_OFFSET(uart_serial_bus.lines_enabled),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
+ AML_OFFSET(uart_serial_bus.rx_fifo_size),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
+ AML_OFFSET(uart_serial_bus.tx_fifo_size),
+ 1},
+
+ {ACPI_RSC_MOVE32,
+ ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
+ AML_OFFSET(uart_serial_bus.default_baud_rate),
+ 1},
+};
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 231811e56939..433a375deb93 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* since there are no alignment or endian issues
*/
case ACPI_RSC_MOVE8:
+ case ACPI_RSC_MOVE_GPIO_RES:
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+ case ACPI_RSC_MOVE_SERIAL_RES:
ACPI_MEMCPY(destination, source, item_count);
return;
@@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
+ case ACPI_RSC_MOVE_GPIO_PIN:
ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
&ACPI_CAST_PTR(u16, source)[i]);
break;
@@ -590,6 +594,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
/*******************************************************************************
*
+ * FUNCTION: acpi_rs_get_aei_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _AEI value of an object
+ * contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
+ ACPI_BTYPE_BUFFER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Make the call to create a resource linked list from the
+ * byte stream buffer that comes back from the _CRS method
+ * execution.
+ */
+ status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_rs_get_method_data
*
* PARAMETERS: Handle - Handle to the containing object
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index fe86b37b16ce..f58c098c7aeb 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle,
ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_event_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are getting resources
+ * in_buffer - Pointer to a buffer containing the
+ * resources to be set for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the event resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is passed to the routine
+ * the buffer pointed to by the in_buffer variable. Uses the
+ * _AEI method.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_get_event_resources);
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_get_aei_method_data(node, ret_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
+
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
@@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * Name - Method name of the resources we want.
+ * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ * METHOD_NAME__AEI)
* user_function - Called for each resource
* Context - Passed to user_function
*
@@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS or _PRS resource list */
+ /* Get the _CRS/_PRS/_AEI resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6f5588e62c0a..c5d870406f41 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void);
typedef struct acpi_fadt_info {
char *name;
- u8 address64;
- u8 address32;
- u8 length;
+ u16 address64;
+ u16 address32;
+ u16 length;
u8 default_length;
u8 type;
} acpi_fadt_info;
+#define ACPI_FADT_OPTIONAL 0
#define ACPI_FADT_REQUIRED 1
#define ACPI_FADT_SEPARATE_LENGTH 2
@@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_event_block),
ACPI_FADT_OFFSET(pm1_event_length),
ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
- 0},
+ ACPI_FADT_OPTIONAL},
{"Pm1aControlBlock",
ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_control_block),
ACPI_FADT_OFFSET(pm1_control_length),
ACPI_PM1_REGISTER_WIDTH,
- 0},
+ ACPI_FADT_OPTIONAL},
{"Pm2ControlBlock",
ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
typedef struct acpi_fadt_pm_info {
struct acpi_generic_address *target;
- u8 source;
+ u16 source;
u8 register_num;
} acpi_fadt_pm_info;
@@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index)
acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
- acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
- ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (!acpi_gbl_reduced_hardware) {
+ acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
+ Xfacs, ACPI_SIG_FACS,
+ ACPI_TABLE_INDEX_FACS);
+ }
}
/*******************************************************************************
@@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
* Check if the FADT is larger than the largest table that we expect
- * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+ * (the ACPI 5.0 version). If so, truncate the table, and issue
* a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 2.0 version, "
+ "FADT (revision %u) is longer than ACPI 5.0 version, "
"truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
ACPI_MEMCPY(&acpi_gbl_FADT, table,
ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+ /* Take a copy of the Hardware Reduced flag */
+
+ acpi_gbl_reduced_hardware = FALSE;
+ if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
+ acpi_gbl_reduced_hardware = TRUE;
+ }
+
/* Convert the local copy of the FADT to the common internal format */
acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void)
acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
}
+ /* If Hardware Reduced flag is set, we are all done */
+
+ if (acpi_gbl_reduced_hardware) {
+ return;
+ }
+
/* Examine all of the 64-bit extended address fields (X fields) */
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index a55cb2bb5abb..4903e36ea75a 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 62365f6075dd..1aecf7baa4e0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0f2d395feaba..09ca39e14337 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void)
{
acpi_status status;
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (acpi_gbl_reduced_hardware) {
+ acpi_gbl_FACS = NULL;
+ return (AE_OK);
+ }
+
status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
ACPI_CAST_INDIRECT_PTR(struct
acpi_table_header,
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index e7d13f5d3f2d..abcc6412c244 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6cc1edf..4258f647ca3d 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
new file mode 100644
index 000000000000..67932aebe6dd
--- /dev/null
+++ b/drivers/acpi/acpica/utaddress.c
@@ -0,0 +1,294 @@
+/******************************************************************************
+ *
+ * Module Name: utaddress - op_region address range check
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utaddress")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_add_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - op_region start address
+ * Length - op_region length
+ * region_node - op_region namespace node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Add the Operation Region address range to the global list.
+ * The only supported Space IDs are Memory and I/O. Called when
+ * the op_region address/length operands are fully evaluated.
+ *
+ * MUTEX: Locks the namespace
+ *
+ * NOTE: Because this interface is only called when an op_region argument
+ * list is evaluated, there cannot be any duplicate region_nodes.
+ * Duplicate Address/Length values are allowed, however, so that multiple
+ * address conflicts can be detected.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ u32 length, struct acpi_namespace_node *region_node)
+{
+ struct acpi_address_range *range_info;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_add_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate/init a new info block, add it to the appropriate list */
+
+ range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
+ if (!range_info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ range_info->start_address = address;
+ range_info->end_address = (address + length - 1);
+ range_info->region_node = region_node;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(range_info);
+ return_ACPI_STATUS(status);
+ }
+
+ range_info->next = acpi_gbl_address_range_list[space_id];
+ acpi_gbl_address_range_list[space_id] = range_info;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+ acpi_ut_get_node_name(range_info->region_node),
+ ACPI_CAST_PTR(void, address),
+ ACPI_CAST_PTR(void, range_info->end_address)));
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * region_node - op_region namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Remove the Operation Region from the global list. The only
+ * supported Space IDs are Memory and I/O. Called when an
+ * op_region is deleted.
+ *
+ * MUTEX: Assumes the namespace is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ struct acpi_namespace_node *region_node)
+{
+ struct acpi_address_range *range_info;
+ struct acpi_address_range *prev;
+
+ ACPI_FUNCTION_TRACE(ut_remove_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_VOID;
+ }
+
+ /* Get the appropriate list head and check the list */
+
+ range_info = prev = acpi_gbl_address_range_list[space_id];
+ while (range_info) {
+ if (range_info->region_node == region_node) {
+ if (range_info == prev) { /* Found at list head */
+ acpi_gbl_address_range_list[space_id] =
+ range_info->next;
+ } else {
+ prev->next = range_info->next;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+ acpi_ut_get_node_name(range_info->
+ region_node),
+ ACPI_CAST_PTR(void,
+ range_info->
+ start_address),
+ ACPI_CAST_PTR(void,
+ range_info->
+ end_address)));
+
+ ACPI_FREE(range_info);
+ return_VOID;
+ }
+
+ prev = range_info;
+ range_info = range_info->next;
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - Start address
+ * Length - Length of address range
+ * Warn - TRUE if warning on overlap desired
+ *
+ * RETURN: Count of the number of conflicts detected. Zero is always
+ * returned for Space IDs other than Memory or I/O.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ * ASL operation region address ranges. The only supported
+ * Space IDs are Memory and I/O.
+ *
+ * MUTEX: Assumes the namespace is locked.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address, u32 length, u8 warn)
+{
+ struct acpi_address_range *range_info;
+ acpi_physical_address end_address;
+ char *pathname;
+ u32 overlap_count = 0;
+
+ ACPI_FUNCTION_TRACE(ut_check_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_UINT32(0);
+ }
+
+ range_info = acpi_gbl_address_range_list[space_id];
+ end_address = address + length - 1;
+
+ /* Check entire list for all possible conflicts */
+
+ while (range_info) {
+ /*
+ * Check if the requested Address/Length overlaps this address_range.
+ * Four cases to consider:
+ *
+ * 1) Input address/length is contained completely in the address range
+ * 2) Input address/length overlaps range at the range start
+ * 3) Input address/length overlaps range at the range end
+ * 4) Input address/length completely encompasses the range
+ */
+ if ((address <= range_info->end_address) &&
+ (end_address >= range_info->start_address)) {
+
+ /* Found an address range overlap */
+
+ overlap_count++;
+ if (warn) { /* Optional warning message */
+ pathname =
+ acpi_ns_get_external_pathname(range_info->
+ region_node);
+
+ ACPI_WARNING((AE_INFO,
+ "0x%p-0x%p %s conflicts with Region %s %d",
+ ACPI_CAST_PTR(void, address),
+ ACPI_CAST_PTR(void, end_address),
+ acpi_ut_get_region_name(space_id),
+ pathname, overlap_count));
+ ACPI_FREE(pathname);
+ }
+ }
+
+ range_info = range_info->next;
+ }
+
+ return_UINT32(overlap_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_address_lists
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all global address range lists (called during
+ * subsystem shutdown).
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_address_lists(void)
+{
+ struct acpi_address_range *next;
+ struct acpi_address_range *range_info;
+ int i;
+
+ /* Delete all elements in all address range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ next = acpi_gbl_address_range_list[i];
+
+ while (next) {
+ range_info = next;
+ next = range_info->next;
+ ACPI_FREE(range_info);
+ }
+
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 0a697351cf69..9982d2ea66fb 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index aded299a2fa8..3317c0a406ee 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a1f8d7509e66..a0998a886318 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 8b087e2d64f4..d42ede5260c7 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI"
+ "IPMI",
+ "GeneralPurposeIo",
+ "GenericSerialBus"
};
char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 31f5a7832ef1..2a6c3e183697 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Region %p\n", object));
- /* Invalidate the region address/length via the host OS */
-
- acpi_os_invalidate_address(object->region.space_id,
- object->region.address,
- (acpi_size) object->region.length);
+ /*
+ * Update address_range list. However, only permanent regions
+ * are installed in this list. (Not created within a method)
+ */
+ if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
+ acpi_ut_remove_address_range(object->region.space_id,
+ object->region.node);
+ }
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 18f73c9d10bc..479f32b33415 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffba0a39c3e8..4153584cf526 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void)
return_ACPI_STATUS(status);
}
+ /* Address Range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+
/* Mutex locked flags */
for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index b679ea693545..c92eb1d93785 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 191b6828cce9..8359c0c5dc98 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@ static void acpi_ut_terminate(void)
gpe_xrupt_info = next_gpe_xrupt_info;
}
+ acpi_ut_delete_address_lists();
return_VOID;
}
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index f6bb75c6faf5..155fd786d0f2 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index ce481da9bb45..2491a552b0e6 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c33a852d4f42..86f19db74e05 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 7d797e2baecd..43174df33121 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
- acpi_thread_id this_thread_id;
-
ACPI_FUNCTION_NAME(ut_release_mutex);
- this_thread_id = acpi_os_get_thread_id();
-
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
- (u32)this_thread_id,
+ (u32)acpi_os_get_thread_id(),
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
- if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+ if (acpi_gbl_mutex_info[i].thread_id ==
+ acpi_os_get_thread_id()) {
if (i == mutex_id) {
continue;
}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 188340a017b4..b112744fc9ae 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 1fb10cb8f11d..2360cf70c18c 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 6ffd3a8bdaa5..9d441ea70305 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "amlresrc.h"
+#include "acresrc.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = {
"TypeF"
};
+const char *acpi_gbl_ppc_decode[] = {
+ "PullDefault",
+ "PullUp",
+ "PullDown",
+ "PullNone"
+};
+
+const char *acpi_gbl_ior_decode[] = {
+ "IoRestrictionNone",
+ "IoRestrictionInputOnly",
+ "IoRestrictionOutputOnly",
+ "IoRestrictionNoneAndPreserve"
+};
+
+const char *acpi_gbl_dts_decode[] = {
+ "Width8bit",
+ "Width16bit",
+ "Width32bit",
+ "Width64bit",
+ "Width128bit",
+ "Width256bit",
+};
+
+/* GPIO connection type */
+
+const char *acpi_gbl_ct_decode[] = {
+ "Interrupt",
+ "I/O"
+};
+
+/* Serial bus type */
+
+const char *acpi_gbl_sbt_decode[] = {
+ "/* UNKNOWN serial bus type */",
+ "I2C",
+ "SPI",
+ "UART"
+};
+
+/* I2C serial bus access mode */
+
+const char *acpi_gbl_am_decode[] = {
+ "AddressingMode7Bit",
+ "AddressingMode10Bit"
+};
+
+/* I2C serial bus slave mode */
+
+const char *acpi_gbl_sm_decode[] = {
+ "ControllerInitiated",
+ "DeviceInitiated"
+};
+
+/* SPI serial bus wire mode */
+
+const char *acpi_gbl_wm_decode[] = {
+ "FourWireMode",
+ "ThreeWireMode"
+};
+
+/* SPI serial clock phase */
+
+const char *acpi_gbl_cph_decode[] = {
+ "ClockPhaseFirst",
+ "ClockPhaseSecond"
+};
+
+/* SPI serial bus clock polarity */
+
+const char *acpi_gbl_cpo_decode[] = {
+ "ClockPolarityLow",
+ "ClockPolarityHigh"
+};
+
+/* SPI serial bus device polarity */
+
+const char *acpi_gbl_dp_decode[] = {
+ "PolarityLow",
+ "PolarityHigh"
+};
+
+/* UART serial bus endian */
+
+const char *acpi_gbl_ed_decode[] = {
+ "LittleEndian",
+ "BigEndian"
+};
+
+/* UART serial bus bits per byte */
+
+const char *acpi_gbl_bpb_decode[] = {
+ "DataBitsFive",
+ "DataBitsSix",
+ "DataBitsSeven",
+ "DataBitsEight",
+ "DataBitsNine",
+ "/* UNKNOWN Bits per byte */",
+ "/* UNKNOWN Bits per byte */",
+ "/* UNKNOWN Bits per byte */"
+};
+
+/* UART serial bus stop bits */
+
+const char *acpi_gbl_sb_decode[] = {
+ "StopBitsNone",
+ "StopBitsOne",
+ "StopBitsOnePlusHalf",
+ "StopBitsTwo"
+};
+
+/* UART serial bus flow control */
+
+const char *acpi_gbl_fc_decode[] = {
+ "FlowControlNone",
+ "FlowControlHardware",
+ "FlowControlXON",
+ "/* UNKNOWN flow control keyword */"
+};
+
+/* UART serial bus parity type */
+
+const char *acpi_gbl_pt_decode[] = {
+ "ParityTypeNone",
+ "ParityTypeEven",
+ "ParityTypeOdd",
+ "ParityTypeMark",
+ "ParityTypeSpace",
+ "/* UNKNOWN parity keyword */",
+ "/* UNKNOWN parity keyword */",
+ "/* UNKNOWN parity keyword */"
+};
+
#endif
/*
@@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
- 0,
+ ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
0,
0,
0,
@@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
- ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+ ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
+};
+
+const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
};
/*
@@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = {
0,
0,
0,
- ACPI_SMALL_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_SMALL_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- 0,
+ ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
+ ACPI_FIXED_LENGTH, /* 05 DMA */
+ ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
+ ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
+ ACPI_FIXED_LENGTH, /* 08 IO */
+ ACPI_FIXED_LENGTH, /* 09 fixed_iO */
+ ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
0,
0,
0,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
+ ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
+ ACPI_FIXED_LENGTH, /* 0_f end_tag */
/* Large descriptors */
0,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH, /* 01 Memory24 */
+ ACPI_FIXED_LENGTH, /* 02 generic_register */
0,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH
+ ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
+ ACPI_FIXED_LENGTH, /* 05 Memory32 */
+ ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
+ ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
+ ACPI_VARIABLE_LENGTH, /* 08 Word* address */
+ ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
+ ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
+ ACPI_FIXED_LENGTH, /* 0_b Extended* address */
+ ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
+ 0,
+ ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
};
+/*
+ * For the i_aSL compiler/disassembler, we don't want any error messages
+ * because the disassembler uses the resource validation code to determine
+ * if Buffer objects are actually Resource Templates.
+ */
+#ifdef ACPI_ASL_COMPILER
+#define ACPI_RESOURCE_ERROR(plist)
+#else
+#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
u8 resource_index;
u32 length;
u32 offset = 0;
+ u8 end_tag[2] = { 0x79, 0x00 };
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
@@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
status = acpi_ut_validate_resource(aml, &resource_index);
if (ACPI_FAILURE(status)) {
+ /*
+ * Exit on failure. Cannot continue because the descriptor length
+ * may be bogus also.
+ */
return_ACPI_STATUS(status);
}
@@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
}
@@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Did not find an end_tag descriptor */
- return (AE_AML_NO_RESOURCE_END_TAG);
+ if (user_function) {
+
+ /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
+
+ (void)acpi_ut_validate_resource(end_tag, &resource_index);
+ status =
+ user_function(end_tag, 2, offset, resource_index, context);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
@@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
{
+ union aml_resource *aml_resource;
u8 resource_type;
u8 resource_index;
acpi_rs_length resource_length;
@@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
}
/*
@@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
}
- /* Check validity of the resource type, zero indicates name is invalid */
-
+ /*
+ * Check validity of the resource type, via acpi_gbl_resource_types. Zero
+ * indicates an invalid resource.
+ */
if (!acpi_gbl_resource_types[resource_index]) {
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
}
/*
- * 2) Validate the resource_length field. This ensures that the length
- * is at least reasonable, and guarantees that it is non-zero.
+ * Validate the resource_length field. This ensures that the length
+ * is at least reasonable, and guarantees that it is non-zero.
*/
resource_length = acpi_ut_get_resource_length(aml);
minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Fixed length resource, length must match exactly */
if (resource_length != minimum_resource_length) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Variable length resource, length must be at least the minimum */
if (resource_length < minimum_resource_length) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
if ((resource_length > minimum_resource_length) ||
(resource_length < (minimum_resource_length - 1))) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Shouldn't happen (because of validation earlier), but be sure */
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
+ }
+
+ aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+
+ /* Validate the bus_type field */
+
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+ aml_resource->common_serial_bus.
+ type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
}
/* Optionally return the resource table index */
@@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
}
return (AE_OK);
+
+ invalid_resource:
+
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+
+ bad_resource_length:
+
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid resource descriptor length: Type "
+ "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+ resource_type, resource_length,
+ minimum_resource_length));
+ return (AE_AML_BAD_RESOURCE_LENGTH);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 30c21e1a9360..4267477c2797 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 420ebfe08c72..644e8c8ebc4b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
}
ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_check_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - Start address
+ * Length - Length
+ * Warn - TRUE if warning on overlap desired
+ *
+ * RETURN: Count of the number of conflicts detected.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ * ASL operation region address ranges.
+ *
+ ****************************************************************************/
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ acpi_size length, u8 warn)
+{
+ u32 overlaps;
+ acpi_status status;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (0);
+ }
+
+ overlaps = acpi_ut_check_address_range(space_id, address,
+ (u32)length, warn);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (overlaps);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 8d0245ec4315..52b568af1819 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
new file mode 100644
index 000000000000..1427d191d15a
--- /dev/null
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -0,0 +1,187 @@
+/*******************************************************************************
+ *
+ * Module Name: utxfmutex - external AML mutex access functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfmutex")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+ acpi_string pathname,
+ union acpi_operand_object **ret_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_mutex_object
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ * ret_obj - Where the mutex object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+ acpi_string pathname,
+ union acpi_operand_object **ret_obj)
+{
+ struct acpi_namespace_node *mutex_node;
+ union acpi_operand_object *mutex_obj;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!ret_obj || (!handle && !pathname)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Get a the namespace node for the mutex */
+
+ mutex_node = handle;
+ if (pathname != NULL) {
+ status = acpi_get_handle(handle, pathname,
+ ACPI_CAST_PTR(acpi_handle,
+ &mutex_node));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* Ensure that we actually have a Mutex object */
+
+ if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
+ return (AE_TYPE);
+ }
+
+ /* Get the low-level mutex object */
+
+ mutex_obj = acpi_ns_get_attached_object(mutex_node);
+ if (!mutex_obj) {
+ return (AE_NULL_OBJECT);
+ }
+
+ *ret_obj = mutex_obj;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_acquire_mutex
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ * Timeout - Max time to wait for the lock (millisec)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
+ * AML mutex objects, and allows for transaction locking between
+ * drivers and AML code. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
+{
+ acpi_status status;
+ union acpi_operand_object *mutex_obj;
+
+ /* Get the low-level mutex associated with Handle:Pathname */
+
+ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Acquire the OS mutex */
+
+ status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_release_mutex
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release an AML mutex. This is a device driver interface to
+ * AML mutex objects, and allows for transaction locking between
+ * drivers and AML code. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
+{
+ acpi_status status;
+ union acpi_operand_object *mutex_obj;
+
+ /* Get the low-level mutex associated with Handle:Pathname */
+
+ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Release the OS mutex */
+
+ acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
+ return (AE_OK);
+}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 61540360d5ce..e45350cb6ac8 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/kref.h>
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
-#include <acpi/atomicio.h>
#include "apei-internal.h"
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
{
int rc;
- rc = acpi_atomic_read(val, &entry->register_region);
+ rc = apei_read(val, &entry->register_region);
if (rc)
return rc;
*val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
val <<= entry->register_region.bit_offset;
if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
u64 valr = 0;
- rc = acpi_atomic_read(&valr, &entry->register_region);
+ rc = apei_read(&valr, &entry->register_region);
if (rc)
return rc;
valr &= ~(entry->mask << entry->register_region.bit_offset);
val |= valr;
}
- rc = acpi_atomic_write(val, &entry->register_region);
+ rc = apei_write(val, &entry->register_region);
return rc;
}
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_pre_map_gar(&entry->register_region);
+ return acpi_os_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_post_unmap_gar(&entry->register_region);
+ acpi_os_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
return 0;
}
+int apei_resources_add(struct apei_resources *resources,
+ unsigned long start, unsigned long size,
+ bool iomem)
+{
+ if (iomem)
+ return apei_res_add(&resources->iomem, start, size);
+ else
+ return apei_res_add(&resources->ioport, start, size);
+}
+EXPORT_SYMBOL_GPL(apei_resources_add);
+
/*
* EINJ has two groups of GARs (EINJ table entry and trigger table
* entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
+static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+{
+ struct apei_resources *resources = data;
+ return apei_res_add(&resources->iomem, start, size);
+}
+
+static int apei_get_nvs_resources(struct apei_resources *resources)
+{
+ return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+}
+
/*
- * IO memory/port rersource management mechanism is used to check
+ * IO memory/port resource management mechanism is used to check
* whether memory/port area used by GARs conflicts with normal memory
* or IO memory/port of devices.
*/
@@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
{
struct apei_res *res, *res_bak = NULL;
struct resource *r;
+ struct apei_resources nvs_resources;
int rc;
rc = apei_resources_sub(resources, &apei_resources_all);
if (rc)
return rc;
+ /*
+ * Some firmware uses ACPI NVS region, that has been marked as
+ * busy, so exclude it from APEI resources to avoid false
+ * conflict.
+ */
+ apei_resources_init(&nvs_resources);
+ rc = apei_get_nvs_resources(&nvs_resources);
+ if (rc)
+ goto res_fini;
+ rc = apei_resources_sub(resources, &nvs_resources);
+ if (rc)
+ goto res_fini;
+
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ "Can not request [mem %#010llx-%#010llx] for %s registers\n",
(unsigned long long)res->start,
- (unsigned long long)res->end);
+ (unsigned long long)res->end - 1, desc);
res_bak = res;
goto err_unmap_iomem;
}
@@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
r = request_region(res->start, res->end - res->start, desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ "Can not request [io %#06llx-%#06llx] for %s registers\n",
(unsigned long long)res->start,
- (unsigned long long)res->end);
+ (unsigned long long)res->end - 1, desc);
res_bak = res;
goto err_unmap_ioport;
}
@@ -500,6 +536,8 @@ err_unmap_iomem:
break;
release_mem_region(res->start, res->end - res->start);
}
+res_fini:
+ apei_resources_fini(&nvs_resources);
return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,96 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
return 0;
}
+/* read GAR in interrupt (including NMI) or process context */
+int apei_read(u64 *val, struct acpi_generic_address *reg)
+{
+ int rc;
+ u64 address;
+ u32 tmp, width = reg->bit_width;
+ acpi_status status;
+
+ rc = apei_check_gar(reg, &address);
+ if (rc)
+ return rc;
+
+ if (width == 64)
+ width = 32; /* Break into two 32-bit transfers */
+
+ *val = 0;
+ switch(reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ status = acpi_os_read_memory((acpi_physical_address)
+ address, &tmp, width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ *val = tmp;
+
+ if (reg->bit_width == 64) {
+ /* Read the top 32 bits */
+ status = acpi_os_read_memory((acpi_physical_address)
+ (address + 4), &tmp, 32);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ *val |= ((u64)tmp << 32);
+ }
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_read);
+
+/* write GAR in interrupt (including NMI) or process context */
+int apei_write(u64 val, struct acpi_generic_address *reg)
+{
+ int rc;
+ u64 address;
+ u32 width = reg->bit_width;
+ acpi_status status;
+
+ rc = apei_check_gar(reg, &address);
+ if (rc)
+ return rc;
+
+ if (width == 64)
+ width = 32; /* Break into two 32-bit transfers */
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ status = acpi_os_write_memory((acpi_physical_address)
+ address, ACPI_LODWORD(val),
+ width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (reg->bit_width == 64) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ (address + 4),
+ ACPI_HIDWORD(val), 32);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ status = acpi_os_write_port(address, val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_write);
+
static int collect_res_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f57050e7a5e7..cca240a33038 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_read(u64 *val, struct acpi_generic_address *reg);
+int apei_write(u64 val, struct acpi_generic_address *reg);
+
int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
}
void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_add(struct apei_resources *resources,
+ unsigned long start, unsigned long size,
+ bool iomem);
int apei_resources_sub(struct apei_resources *resources1,
struct apei_resources *resources2);
int apei_resources_request(struct apei_resources *resources,
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 589b96c38704..5b898d4dda99 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -43,6 +43,42 @@
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
/*
+ * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
+ */
+static int acpi5;
+
+struct set_error_type_with_address {
+ u32 type;
+ u32 vendor_extension;
+ u32 flags;
+ u32 apicid;
+ u64 memory_address;
+ u64 memory_address_range;
+ u32 pcie_sbdf;
+};
+enum {
+ SETWA_FLAGS_APICID = 1,
+ SETWA_FLAGS_MEM = 2,
+ SETWA_FLAGS_PCIE_SBDF = 4,
+};
+
+/*
+ * Vendor extensions for platform specific operations
+ */
+struct vendor_error_type_extension {
+ u32 length;
+ u32 pcie_sbdf;
+ u16 vendor_id;
+ u16 device_id;
+ u8 rev_id;
+ u8 reserved[3];
+};
+
+static u32 vendor_flags;
+static struct debugfs_blob_wrapper vendor_blob;
+static char vendor_dev[64];
+
+/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
@@ -103,7 +139,14 @@ static struct apei_exec_ins_type einj_ins_type[] = {
*/
static DEFINE_MUTEX(einj_mutex);
-static struct einj_parameter *einj_param;
+static void *einj_param;
+
+#ifndef readq
+static inline __u64 readq(volatile void __iomem *addr)
+{
+ return ((__u64)readl(addr+4) << 32) + readl(addr);
+}
+#endif
#ifndef writeq
static inline void writeq(__u64 val, volatile void __iomem *addr)
@@ -158,10 +201,31 @@ static int einj_timedout(u64 *t)
return 0;
}
-static u64 einj_get_parameter_address(void)
+static void check_vendor_extension(u64 paddr,
+ struct set_error_type_with_address *v5param)
+{
+ int offset = readl(&v5param->vendor_extension);
+ struct vendor_error_type_extension *v;
+ u32 sbdf;
+
+ if (!offset)
+ return;
+ v = ioremap(paddr + offset, sizeof(*v));
+ if (!v)
+ return;
+ sbdf = readl(&v->pcie_sbdf);
+ sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
+ sbdf >> 24, (sbdf >> 16) & 0xff,
+ (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
+ readw(&v->vendor_id), readw(&v->device_id),
+ readb(&v->rev_id));
+ iounmap(v);
+}
+
+static void *einj_get_parameter_address(void)
{
int i;
- u64 paddr = 0;
+ u64 paddrv4 = 0, paddrv5 = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +234,40 @@ static u64 einj_get_parameter_address(void)
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddr, &entry->register_region.address,
- sizeof(paddr));
+ memcpy(&paddrv4, &entry->register_region.address,
+ sizeof(paddrv4));
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddrv5, &entry->register_region.address,
+ sizeof(paddrv5));
entry++;
}
+ if (paddrv5) {
+ struct set_error_type_with_address *v5param;
+
+ v5param = ioremap(paddrv5, sizeof(*v5param));
+ if (v5param) {
+ acpi5 = 1;
+ check_vendor_extension(paddrv5, v5param);
+ return v5param;
+ }
+ }
+ if (paddrv4) {
+ struct einj_parameter *v4param;
+
+ v4param = ioremap(paddrv4, sizeof(*v4param));
+ if (!v4param)
+ return 0;
+ if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
+ iounmap(v4param);
+ return 0;
+ }
+ return v4param;
+ }
- return paddr;
+ return 0;
}
/* do sanity check to trigger table */
@@ -194,8 +286,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
return 0;
}
+static struct acpi_generic_address *einj_get_trigger_parameter_region(
+ struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
+{
+ int i;
+ struct acpi_whea_header *entry;
+
+ entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ for (i = 0; i < trigger_tab->entry_count; i++) {
+ if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ (entry->register_region.address & param2) == (param1 & param2))
+ return &entry->register_region;
+ entry++;
+ }
+
+ return NULL;
+}
/* Execute instructions in trigger error action table */
-static int __einj_error_trigger(u64 trigger_paddr)
+static int __einj_error_trigger(u64 trigger_paddr, u32 type,
+ u64 param1, u64 param2)
{
struct acpi_einj_trigger *trigger_tab = NULL;
struct apei_exec_context trigger_ctx;
@@ -204,14 +317,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
struct resource *r;
u32 table_size;
int rc = -EIO;
+ struct acpi_generic_address *trigger_param_region = NULL;
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
- "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
- (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ (unsigned long long)trigger_paddr +
+ sizeof(*trigger_tab) - 1);
goto out;
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -232,9 +347,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
-"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
- (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
- (unsigned long long)trigger_paddr + table_size);
+"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+ (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size - 1);
goto out_rel_header;
}
iounmap(trigger_tab);
@@ -255,6 +370,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
rc = apei_resources_sub(&trigger_resources, &einj_resources);
if (rc)
goto out_fini;
+ /*
+ * Some firmware will access target address specified in
+ * param1 to trigger the error when injecting memory error.
+ * This will cause resource conflict with regular memory. So
+ * remove it from trigger table resources.
+ */
+ if (param_extension && (type & 0x0038) && param2) {
+ struct apei_resources addr_resources;
+ apei_resources_init(&addr_resources);
+ trigger_param_region = einj_get_trigger_parameter_region(
+ trigger_tab, param1, param2);
+ if (trigger_param_region) {
+ rc = apei_resources_add(&addr_resources,
+ trigger_param_region->address,
+ trigger_param_region->bit_width/8, true);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources,
+ &addr_resources);
+ }
+ apei_resources_fini(&addr_resources);
+ if (rc)
+ goto out_fini;
+ }
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
if (rc)
goto out_fini;
@@ -293,12 +432,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
- rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
- if (rc)
- return rc;
- if (einj_param) {
- writeq(param1, &einj_param->param1);
- writeq(param2, &einj_param->param2);
+ if (acpi5) {
+ struct set_error_type_with_address *v5param = einj_param;
+
+ writel(type, &v5param->type);
+ if (type & 0x80000000) {
+ switch (vendor_flags) {
+ case SETWA_FLAGS_APICID:
+ writel(param1, &v5param->apicid);
+ break;
+ case SETWA_FLAGS_MEM:
+ writeq(param1, &v5param->memory_address);
+ writeq(param2, &v5param->memory_address_range);
+ break;
+ case SETWA_FLAGS_PCIE_SBDF:
+ writel(param1, &v5param->pcie_sbdf);
+ break;
+ }
+ writel(vendor_flags, &v5param->flags);
+ } else {
+ switch (type) {
+ case ACPI_EINJ_PROCESSOR_CORRECTABLE:
+ case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
+ case ACPI_EINJ_PROCESSOR_FATAL:
+ writel(param1, &v5param->apicid);
+ writel(SETWA_FLAGS_APICID, &v5param->flags);
+ break;
+ case ACPI_EINJ_MEMORY_CORRECTABLE:
+ case ACPI_EINJ_MEMORY_UNCORRECTABLE:
+ case ACPI_EINJ_MEMORY_FATAL:
+ writeq(param1, &v5param->memory_address);
+ writeq(param2, &v5param->memory_address_range);
+ writel(SETWA_FLAGS_MEM, &v5param->flags);
+ break;
+ case ACPI_EINJ_PCIX_CORRECTABLE:
+ case ACPI_EINJ_PCIX_UNCORRECTABLE:
+ case ACPI_EINJ_PCIX_FATAL:
+ writel(param1, &v5param->pcie_sbdf);
+ writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
+ break;
+ }
+ }
+ } else {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ struct einj_parameter *v4param = einj_param;
+ writeq(param1, &v4param->param1);
+ writeq(param2, &v4param->param2);
+ }
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
if (rc)
@@ -324,7 +507,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
trigger_paddr = apei_exec_ctx_get_output(&ctx);
- rc = __einj_error_trigger(trigger_paddr);
+ rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
if (rc)
return rc;
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +591,25 @@ static int error_type_set(void *data, u64 val)
{
int rc;
u32 available_error_type = 0;
+ u32 tval, vendor;
+
+ /*
+ * Vendor defined types have 0x80000000 bit set, and
+ * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
+ */
+ vendor = val & 0x80000000;
+ tval = val & 0x7fffffff;
/* Only one error type can be specified */
- if (val & (val - 1))
- return -EINVAL;
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
- if (!(val & available_error_type))
+ if (tval & (tval - 1))
return -EINVAL;
+ if (!vendor) {
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ }
error_type = val;
return 0;
@@ -455,7 +648,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
static int __init einj_init(void)
{
int rc;
- u64 param_paddr;
acpi_status status;
struct dentry *fentry;
struct apei_exec_context ctx;
@@ -465,10 +657,9 @@ static int __init einj_init(void)
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(EINJ_PFX "Table is not found!\n");
+ if (status == AE_NOT_FOUND)
return -ENODEV;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
return -EINVAL;
@@ -509,23 +700,30 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- if (param_extension) {
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_unmap;
- } else
- pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
+
+ einj_param = einj_get_parameter_address();
+ if ((param_extension || acpi5) && einj_param) {
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ }
+
+ if (vendor_dev[0]) {
+ vendor_blob.data = vendor_dev;
+ vendor_blob.size = strlen(vendor_dev);
+ fentry = debugfs_create_blob("vendor", S_IRUSR,
+ einj_debug_dir, &vendor_blob);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &vendor_flags);
+ if (!fentry)
+ goto err_unmap;
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca7..eb9fab5b96e4 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,8 +932,10 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
struct pstore_info *psi);
@@ -986,17 +988,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1004,22 +1012,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,10 +1050,12 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
@@ -1112,10 +1127,9 @@ static int __init erst_init(void)
status = acpi_get_table(ACPI_SIG_ERST, 0,
(struct acpi_table_header **)&erst_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(ERST_PFX "Table is not found!\n");
+ if (status == AE_NOT_FOUND)
goto err;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b8e08cb67a18..9b3cac0abecc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -45,8 +46,9 @@
#include <linux/irq_work.h>
#include <linux/llist.h>
#include <linux/genalloc.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
#include <acpi/apei.h>
-#include <acpi/atomicio.h>
#include <acpi/hed.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
@@ -118,7 +120,7 @@ struct ghes_estatus_cache {
struct rcu_head rcu;
};
-int ghes_disable;
+bool ghes_disable;
module_param_named(disable, ghes_disable, bool, 0);
static int ghes_panic_timeout __read_mostly = 30;
@@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_pre_map_gar(&generic->error_status_address);
+ rc = acpi_os_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_post_unmap_gar(&generic->error_status_address);
+ acpi_os_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -328,7 +330,7 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_post_unmap_gar(&ghes->generic->error_status_address);
+ acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
@@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
u32 len;
int rc;
- rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ rc = apei_read(&buf_paddr, &g->error_status_address);
if (rc) {
if (!silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
}
#endif
}
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PCIE)) {
+ struct cper_sec_pcie *pcie_err;
+ pcie_err = (struct cper_sec_pcie *)(gdata+1);
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(sev);
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity);
+ }
+
+ }
+#endif
}
}
@@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
const struct acpi_hest_generic_status *estatus)
{
+ static atomic_t seqno;
+ unsigned int curr_seqno;
+ char pfx_seq[64];
+
if (pfx == NULL) {
if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
- pfx = KERN_WARNING HW_ERR;
+ pfx = KERN_WARNING;
else
- pfx = KERN_ERR HW_ERR;
+ pfx = KERN_ERR;
}
+ curr_seqno = atomic_inc_return(&seqno);
+ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, generic->header.source_id);
- apei_estatus_print(pfx, estatus);
+ pfx_seq, generic->header.source_id);
+ apei_estatus_print(pfx_seq, estatus);
}
static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
+{
+ struct llist_node *next, *tail = NULL;
+
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+
+ return tail;
+}
+
static void ghes_proc_in_irq(struct irq_work *irq_work)
{
- struct llist_node *llnode, *next, *tail = NULL;
+ struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
u32 len, node_len;
+ llnode = llist_del_all(&ghes_estatus_llist);
/*
* Because the time order of estatus in list is reversed,
* revert it back to proper order.
*/
- llnode = llist_del_all(&ghes_estatus_llist);
- while (llnode) {
- next = llnode->next;
- llnode->next = tail;
- tail = llnode;
- llnode = next;
- }
- llnode = tail;
+ llnode = llist_nodes_reverse(llnode);
while (llnode) {
next = llnode->next;
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
}
}
+static void ghes_print_queued_estatus(void)
+{
+ struct llist_node *llnode;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ llnode = llist_del_all(&ghes_estatus_llist);
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_nodes_reverse(llnode);
+ while (llnode) {
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ generic = estatus_node->generic;
+ ghes_print_estatus(NULL, generic, estatus);
+ llnode = llnode->next;
+ }
+}
+
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_print_queued_estatus();
+ __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 05fee06f4d6e..7f00cf38098f 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -41,7 +41,7 @@
#define HEST_PFX "HEST: "
-int hest_disable;
+bool hest_disable;
EXPORT_SYMBOL_GPL(hest_disable);
/* HEST table parsing */
@@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(HEST_PFX "Table not found.\n");
+ if (status == AE_NOT_FOUND)
goto err;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index cfc0cc10af39..d4a5b3d3657b 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
#include <acpi/atomicio.h>
#define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
return NULL;
}
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn) page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn) 0
+#endif
+
+static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
+{
+ unsigned long pfn;
+
+ pfn = pg_off >> PAGE_SHIFT;
+ if (should_use_kmap(pfn)) {
+ if (pg_sz > PAGE_SIZE)
+ return NULL;
+ return (void __iomem __force *)kmap(pfn_to_page(pfn));
+ } else
+ return ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
+{
+ unsigned long pfn;
+
+ pfn = pg_off >> PAGE_SHIFT;
+ if (page_is_ram(pfn))
+ kunmap(pfn_to_page(pfn));
+ else
+ iounmap(vaddr);
+}
+
/*
* Used to pre-map the specified IO memory area. First try to find
* whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
pg_off = paddr & PAGE_MASK;
pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
- vaddr = ioremap(pg_off, pg_sz);
+ vaddr = acpi_map(pg_off, pg_sz);
if (!vaddr)
return NULL;
map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
vaddr = __acpi_try_ioremap(paddr, size);
if (vaddr) {
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- iounmap(map->vaddr);
+ acpi_unmap(pg_off, map->vaddr);
kfree(map);
return vaddr;
}
@@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
return map->vaddr + (paddr - map->paddr);
err_unmap:
- iounmap(vaddr);
+ acpi_unmap(pg_off, vaddr);
return NULL;
}
@@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
return;
synchronize_rcu();
- iounmap(map->vaddr);
+ acpi_unmap(map->paddr, map->vaddr);
kfree(map);
}
@@ -260,6 +293,21 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
}
EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+#ifdef readq
+static inline u64 read64(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#else
+static inline u64 read64(const volatile void __iomem *addr)
+{
+ u64 l, h;
+ l = readl(addr);
+ h = readl(addr+4);
+ return l | (h << 32);
+}
+#endif
+
/*
* Can be used in atomic (including NMI) or process context. RCU read
* lock can only be released after the IO memory area accessing.
@@ -280,11 +328,9 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
case 32:
*val = readl(addr);
break;
-#ifdef readq
case 64:
- *val = readq(addr);
+ *val = read64(addr);
break;
-#endif
default:
return -EINVAL;
}
@@ -293,6 +339,19 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
return 0;
}
+#ifdef writeq
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#else
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val>>32, addr+4);
+}
+#endif
+
static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
{
void __iomem *addr;
@@ -309,11 +368,9 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
case 32:
writel(val, addr);
break;
-#ifdef writeq
case 64:
- writeq(val, addr);
+ write64(val, addr);
break;
-#endif
default:
return -EINVAL;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7711d94a0409..86933ca8b472 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -873,7 +873,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
static const struct battery_file {
struct file_operations ops;
- mode_t mode;
+ umode_t mode;
const char *name;
} acpi_battery_file[] = {
FILE_DESCRIPTION_RO(info),
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 19a61136d848..88eb14304667 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -43,7 +43,7 @@ MODULE_AUTHOR("Kristen Carlson Accardi");
MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
-static int immediate_undock = 1;
+static bool immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
"undock immediately when the undock button is pressed, 0 will cause"
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c47ae9793a7..b258cab9061c 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -105,7 +105,7 @@ int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
- mode_t mode = 0400;
+ umode_t mode = 0400;
if (ec_device_count == 0) {
acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 3b5c3189fd99..e56f3be7b07d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+unsigned char acpi_srat_revision __initdata;
+
int pxm_to_node(int pxm)
{
if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
+ struct acpi_table_srat *srat;
if (!table)
return -EINVAL;
+ srat = (struct acpi_table_srat *)table;
+ acpi_srat_revision = srat->header.revision;
+
/* Real work done in acpi_table_parse_srat below. */
return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 096787b43c96..7a2035fa8c71 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -15,6 +15,56 @@
#include <linux/acpi_io.h>
#include <acpi/acpiosxf.h>
+/* ACPI NVS regions, APEI may use it */
+
+struct nvs_region {
+ __u64 phys_start;
+ __u64 size;
+ struct list_head node;
+};
+
+static LIST_HEAD(nvs_region_list);
+
+#ifdef CONFIG_ACPI_SLEEP
+static int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+ return 0;
+}
+#endif
+
+int acpi_nvs_register(__u64 start, __u64 size)
+{
+ struct nvs_region *region;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ region->phys_start = start;
+ region->size = size;
+ list_add_tail(&region->node, &nvs_region_list);
+
+ return suspend_nvs_register(start, size);
+}
+
+int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
+ void *data)
+{
+ int rc;
+ struct nvs_region *region;
+
+ list_for_each_entry(region, &nvs_region_list, node) {
+ rc = func(region->phys_start, region->size, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_ACPI_SLEEP
/*
* Platforms, like ACPI, may want us to save some memory used by them during
* suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
-int suspend_nvs_register(unsigned long start, unsigned long size)
+static int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
@@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
if (entry->data)
memcpy(entry->kaddr, entry->data, entry->size);
}
+#endif
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f31c5c5f1b7e..fcc12d842bcc 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -83,19 +83,6 @@ static struct workqueue_struct *kacpi_notify_wq;
struct workqueue_struct *kacpi_hotplug_wq;
EXPORT_SYMBOL(kacpi_hotplug_wq);
-struct acpi_res_list {
- resource_size_t start;
- resource_size_t end;
- acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
- char name[5]; /* only can have a length of 4 chars, make use of this
- one instead of res->name, no need to kalloc then */
- struct list_head resource_list;
- int count;
-};
-
-static LIST_HEAD(resource_list_head);
-static DEFINE_SPINLOCK(acpi_res_lock);
-
/*
* This list of permanent mappings is for memory that may be accessed from
* interrupt context, where we can't do the ioremap().
@@ -166,17 +153,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
return supported;
}
-static void __init acpi_request_region (struct acpi_generic_address *addr,
+static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
- if (!addr->address || !length)
+ u64 addr;
+
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !length)
return;
/* Resources are never freed */
- if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- request_region(addr->address, length, desc);
- else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- request_mem_region(addr->address, length, desc);
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr, length, desc);
+ else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr, length, desc);
}
static int __init acpi_reserve_resources(void)
@@ -427,35 +418,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
-static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
+ u64 addr;
void __iomem *virt;
- if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
- if (!addr->address || !addr->bit_width)
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !gas->bit_width)
return -EINVAL;
- virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+ virt = acpi_os_map_memory(addr, gas->bit_width / 8);
if (!virt)
return -EIO;
return 0;
}
+EXPORT_SYMBOL(acpi_os_map_generic_address);
-static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
+ u64 addr;
struct acpi_ioremap *map;
- if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
- if (!addr->address || !addr->bit_width)
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !gas->bit_width)
return;
mutex_lock(&acpi_ioremap_lock);
- map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+ map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
@@ -465,6 +463,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
acpi_os_map_cleanup(map);
}
+EXPORT_SYMBOL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
@@ -1278,44 +1277,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
* drivers */
int acpi_check_resource_conflict(const struct resource *res)
{
- struct acpi_res_list *res_list_elem;
- int ioport = 0, clash = 0;
+ acpi_adr_space_type space_id;
+ acpi_size length;
+ u8 warn = 0;
+ int clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
return 0;
- ioport = res->flags & IORESOURCE_IO;
-
- spin_lock(&acpi_res_lock);
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
- if (ioport && (res_list_elem->resource_type
- != ACPI_ADR_SPACE_SYSTEM_IO))
- continue;
- if (!ioport && (res_list_elem->resource_type
- != ACPI_ADR_SPACE_SYSTEM_MEMORY))
- continue;
+ if (res->flags & IORESOURCE_IO)
+ space_id = ACPI_ADR_SPACE_SYSTEM_IO;
+ else
+ space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- if (res->end < res_list_elem->start
- || res_list_elem->end < res->start)
- continue;
- clash = 1;
- break;
- }
- spin_unlock(&acpi_res_lock);
+ length = res->end - res->start + 1;
+ if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
+ warn = 1;
+ clash = acpi_check_address_range(space_id, res->start, length, warn);
if (clash) {
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
- printk(KERN_WARNING "ACPI: resource %s %pR"
- " conflicts with ACPI region %s "
- "[%s 0x%zx-0x%zx]\n",
- res->name, res, res_list_elem->name,
- (res_list_elem->resource_type ==
- ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
- (size_t) res_list_elem->start,
- (size_t) res_list_elem->end);
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
printk(KERN_NOTICE "ACPI: This conflict may"
" cause random problems and system"
@@ -1467,155 +1450,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
kmem_cache_free(cache, object);
return (AE_OK);
}
-
-static inline int acpi_res_list_add(struct acpi_res_list *res)
-{
- struct acpi_res_list *res_list_elem;
-
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
-
- if (res->resource_type == res_list_elem->resource_type &&
- res->start == res_list_elem->start &&
- res->end == res_list_elem->end) {
-
- /*
- * The Region(addr,len) already exist in the list,
- * just increase the count
- */
-
- res_list_elem->count++;
- return 0;
- }
- }
-
- res->count = 1;
- list_add(&res->resource_list, &resource_list_head);
- return 1;
-}
-
-static inline void acpi_res_list_del(struct acpi_res_list *res)
-{
- struct acpi_res_list *res_list_elem;
-
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
-
- if (res->resource_type == res_list_elem->resource_type &&
- res->start == res_list_elem->start &&
- res->end == res_list_elem->end) {
-
- /*
- * If the res count is decreased to 0,
- * remove and free it
- */
-
- if (--res_list_elem->count == 0) {
- list_del(&res_list_elem->resource_list);
- kfree(res_list_elem);
- }
- return;
- }
- }
-}
-
-acpi_status
-acpi_os_invalidate_address(
- u8 space_id,
- acpi_physical_address address,
- acpi_size length)
-{
- struct acpi_res_list res;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SystemMemory
- are needed */
- res.start = address;
- res.end = address + length - 1;
- res.resource_type = space_id;
- spin_lock(&acpi_res_lock);
- acpi_res_list_del(&res);
- spin_unlock(&acpi_res_lock);
- break;
- case ACPI_ADR_SPACE_PCI_CONFIG:
- case ACPI_ADR_SPACE_EC:
- case ACPI_ADR_SPACE_SMBUS:
- case ACPI_ADR_SPACE_CMOS:
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- case ACPI_ADR_SPACE_DATA_TABLE:
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
- break;
- }
- return AE_OK;
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_os_validate_address
- *
- * PARAMETERS: space_id - ACPI space ID
- * address - Physical address
- * length - Address length
- *
- * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
- * should return AE_AML_ILLEGAL_ADDRESS.
- *
- * DESCRIPTION: Validate a system address via the host OS. Used to validate
- * the addresses accessed by AML operation regions.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_address (
- u8 space_id,
- acpi_physical_address address,
- acpi_size length,
- char *name)
-{
- struct acpi_res_list *res;
- int added;
- if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
- return AE_OK;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SystemMemory
- are needed */
- res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
- if (!res)
- return AE_OK;
- /* ACPI names are fixed to 4 bytes, still better use strlcpy */
- strlcpy(res->name, name, 5);
- res->start = address;
- res->end = address + length - 1;
- res->resource_type = space_id;
- spin_lock(&acpi_res_lock);
- added = acpi_res_list_add(res);
- spin_unlock(&acpi_res_lock);
- pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
- "name: %s\n", added ? "Added" : "Already exist",
- (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- ? "SystemIO" : "System Memory",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- res->name);
- if (!added)
- kfree(res);
- break;
- case ACPI_ADR_SPACE_PCI_CONFIG:
- case ACPI_ADR_SPACE_EC:
- case ACPI_ADR_SPACE_SMBUS:
- case ACPI_ADR_SPACE_CMOS:
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- case ACPI_ADR_SPACE_DATA_TABLE:
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
- break;
- }
- return AE_OK;
-}
#endif
acpi_status __init acpi_os_initialize(void)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 7f9eba9a0b02..0eefa12e648c 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -487,10 +487,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
else
link_desc[0] = '\0';
- dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
- pin_name(pin), link_desc, gsi,
- (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
+ pin_name(pin), link_desc, gsi,
+ (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
+ (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
return 0;
}
@@ -524,6 +524,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
* (e.g. PCI_UNDEFINED_IRQ).
*/
- dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
+ dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
acpi_unregister_gsi(gsi);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 2672c798272f..7aff6312ce7c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -596,6 +596,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (ACPI_SUCCESS(status)) {
dev_info(root->bus->bridge,
"ACPI _OSC control (0x%02x) granted\n", flags);
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ /*
+ * We have ASPM control, but the FADT indicates
+ * that it's unsupported. Clear it.
+ */
+ pcie_clear_aspm(root->bus);
+ }
} else {
dev_info(root->bus->bridge,
"ACPI _OSC request failed (%s), "
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 07f7fea8a4e2..e50e31a518af 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -34,7 +34,7 @@
#include <acpi/acpi_drivers.h>
#include <linux/dmi.h>
-static int debug;
+static bool debug;
static int check_sta_before_sun;
#define DRIVER_VERSION "0.1"
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 3a0428e8435c..c850de4c9a14 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
- if (apic_id == -1)
- return apic_id;
+ if (apic_id == -1) {
+ /*
+ * On UP processor, there is no _MAT or MADT table.
+ * So above apic_id is always set to -1.
+ *
+ * BIOS may define multiple CPU handles even for UP processor.
+ * For example,
+ *
+ * Scope (_PR)
+ * {
+ * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
+ * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
+ * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+ * Ignores apic_id and always return 0 for CPU0's handle.
+ * Return -1 for other CPU's handle.
+ */
+ if (acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
+ }
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 9d7bc9f6b6cc..0034ede38710 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
* they are physically not present.
*/
if (pr->id == -1) {
- if (ACPI_FAILURE
- (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
+ if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
return -ENODEV;
- }
}
/*
* On some boxes several processors use the same processor bus id.
@@ -446,7 +444,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
int result = 0;
- struct sys_device *sysdev;
+ struct device *dev;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
@@ -491,8 +489,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
per_cpu(processors, pr->id) = pr;
- sysdev = get_cpu_sysdev(pr->id);
- if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
+ dev = get_cpu_device(pr->id);
+ if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
result = -EFAULT;
goto err_free_cpumask;
}
@@ -539,6 +537,7 @@ err_thermal_unregister:
thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
acpi_processor_power_exit(pr, device);
+ sysfs_remove_link(&device->dev.kobj, "sysdev");
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -720,18 +719,19 @@ processor_walk_namespace_cb(acpi_handle handle,
return (AE_OK);
}
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
{
+ acpi_handle handle = pr->handle;
if (!is_processor_present(handle)) {
return AE_ERROR;
}
- if (acpi_map_lsapic(handle, p_cpu))
+ if (acpi_map_lsapic(handle, &pr->id))
return AE_ERROR;
- if (arch_register_cpu(*p_cpu)) {
- acpi_unmap_lsapic(*p_cpu);
+ if (arch_register_cpu(pr->id)) {
+ acpi_unmap_lsapic(pr->id);
return AE_ERROR;
}
@@ -748,7 +748,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
return (0);
}
#else
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
{
return AE_ERROR;
}
@@ -827,8 +827,6 @@ static void __exit acpi_processor_exit(void)
acpi_bus_unregister_driver(&acpi_processor_driver);
- cpuidle_unregister_driver(&acpi_idle_driver);
-
return;
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 870550d6a4bf..3b599abf2b40 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/sysdev.h>
#include <asm/uaccess.h>
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab58db2..0a7ed69546ba 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54HR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 08a44b532f7c..eaef02afc7cf 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -69,21 +69,21 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
-static int brightness_switch_enabled = 1;
+static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
* By default, we don't allow duplicate ACPI video bus devices
* under the same VGA controller
*/
-static int allow_duplicates;
+static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
* Some BIOSes claim they use minimum backlight at boot,
* and this may bring dimming screen after boot
*/
-static int use_bios_initial_backlight = 1;
+static bool use_bios_initial_backlight = 1;
module_param(use_bios_initial_backlight, bool, 0644);
static int register_count = 0;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index bd230e801131..54eaf96ab217 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -52,6 +52,10 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
int retval = 0;
retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
return retval;
}
#else
@@ -109,31 +113,7 @@ static int amba_legacy_resume(struct device *dev)
return ret;
}
-static int amba_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-static void amba_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define amba_pm_prepare NULL
-#define amba_pm_complete NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -155,22 +135,6 @@ static int amba_pm_suspend(struct device *dev)
return ret;
}
-static int amba_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -189,28 +153,10 @@ static int amba_pm_resume(struct device *dev)
return ret;
}
-static int amba_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_SUSPEND */
#define amba_pm_suspend NULL
#define amba_pm_resume NULL
-#define amba_pm_suspend_noirq NULL
-#define amba_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
@@ -234,22 +180,6 @@ static int amba_pm_freeze(struct device *dev)
return ret;
}
-static int amba_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -268,22 +198,6 @@ static int amba_pm_thaw(struct device *dev)
return ret;
}
-static int amba_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -302,22 +216,6 @@ static int amba_pm_poweroff(struct device *dev)
return ret;
}
-static int amba_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -336,32 +234,12 @@ static int amba_pm_restore(struct device *dev)
return ret;
}
-static int amba_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
#define amba_pm_poweroff NULL
#define amba_pm_restore NULL
-#define amba_pm_freeze_noirq NULL
-#define amba_pm_thaw_noirq NULL
-#define amba_pm_poweroff_noirq NULL
-#define amba_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -402,20 +280,12 @@ static int amba_pm_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
- .prepare = amba_pm_prepare,
- .complete = amba_pm_complete,
.suspend = amba_pm_suspend,
.resume = amba_pm_resume,
.freeze = amba_pm_freeze,
.thaw = amba_pm_thaw,
.poweroff = amba_pm_poweroff,
.restore = amba_pm_restore,
- .suspend_noirq = amba_pm_suspend_noirq,
- .resume_noirq = amba_pm_resume_noirq,
- .freeze_noirq = amba_pm_freeze_noirq,
- .thaw_noirq = amba_pm_thaw_noirq,
- .poweroff_noirq = amba_pm_poweroff_noirq,
- .restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cf26222a93c5..d07bf0366d99 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -52,7 +52,8 @@
#define DRV_VERSION "3.0"
enum {
- AHCI_PCI_BAR = 5,
+ AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_STANDARD = 5,
};
enum board_ids {
@@ -375,6 +376,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+ /* ST Microelectronics */
+ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
+
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
@@ -622,6 +626,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
+ /*
+ * If the device fixup already set the dma_mask to some non-standard
+ * value, don't extend it here. This happens on STA2X11, for example.
+ */
+ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
+ return 0;
+
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1026,6 +1037,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
+ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
VPRINTK("ENTER\n");
@@ -1057,6 +1069,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
+ /* The Connext uses non-standard BAR */
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
@@ -1065,7 +1081,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1108,7 +1124,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1172,8 +1188,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
+ ata_port_pbar_desc(ap, ahci_pci_bar,
0x100 + ap->port_no * 0x80, "port");
/* set enclosure management message type */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 43b875810d1b..48be4e189163 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -202,6 +202,71 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int ahci_suspend(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 ctl;
+ int rc;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(dev, "firmware update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ /*
+ * AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+
+ rc = ata_host_suspend(host, PMSG_SUSPEND);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->suspend)
+ return pdata->suspend(dev);
+ return 0;
+}
+
+static int ahci_resume(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ int rc;
+
+ if (pdata && pdata->resume) {
+ rc = pdata->resume(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static struct dev_pm_ops ahci_pm_ops = {
+ .suspend = &ahci_suspend,
+ .resume = &ahci_resume,
+};
+#endif
+
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci", },
{},
@@ -214,6 +279,9 @@ static struct platform_driver ahci_driver = {
.name = "ahci",
.owner = THIS_MODULE,
.of_match_table = ahci_of_match,
+#ifdef CONFIG_PM
+ .pm = &ahci_pm_ops,
+#endif
},
.id_table = ahci_devtype,
};
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 69ac373c72ab..fdf27b9fce43 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1117,6 +1117,13 @@ static int piix_broken_suspend(void)
},
},
{
+ .ident = "Satellite Pro A120",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
+ },
+ },
+ {
.ident = "Portege M500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3c92dbd751e0..a72bfd0ecfee 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -746,9 +746,6 @@ static void ahci_start_port(struct ata_port *ap)
/* enable FIS reception */
ahci_start_fis_rx(ap);
- /* enable DMA */
- ahci_start_engine(ap);
-
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
ata_for_each_link(link, ap, EDGE) {
@@ -2022,7 +2019,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ahci_power_down(ap);
else {
ata_port_err(ap, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
+ ata_port_freeze(ap);
}
return rc;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c04ad68cb602..c06e0ec11556 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -66,6 +66,7 @@
#include <asm/byteorder.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
@@ -3248,10 +3249,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
ata_force_xfermask(dev);
pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
- dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
if (libata_dma_mask & mode_mask)
- dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+ dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
+ dev->udma_mask);
else
dma_mask = 0;
@@ -4124,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* device and controller are SATA.
*/
{ "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
@@ -5234,73 +5237,55 @@ bool ata_link_offline(struct ata_link *link)
}
#ifdef CONFIG_PM
-static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
+static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
int wait)
{
+ struct ata_link *link;
unsigned long flags;
- int i, rc;
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
- struct ata_link *link;
+ int rc;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
- */
- if (ap->pflags & ATA_PFLAG_PM_PENDING) {
- ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
+ /* Previous resume operation might still be in
+ * progress. Wait for PM_PENDING to clear.
+ */
+ if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ }
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
+ /* request PM ops to EH */
+ spin_lock_irqsave(ap->lock, flags);
- ap->pm_mesg = mesg;
- if (wait) {
- rc = 0;
- ap->pm_result = &rc;
- }
+ ap->pm_mesg = mesg;
+ if (wait) {
+ rc = 0;
+ ap->pm_result = &rc;
+ }
- ap->pflags |= ATA_PFLAG_PM_PENDING;
- ata_for_each_link(link, ap, HOST_FIRST) {
- link->eh_info.action |= action;
- link->eh_info.flags |= ehi_flags;
- }
+ ap->pflags |= ATA_PFLAG_PM_PENDING;
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ link->eh_info.action |= action;
+ link->eh_info.flags |= ehi_flags;
+ }
- ata_port_schedule_eh(ap);
+ ata_port_schedule_eh(ap);
- spin_unlock_irqrestore(ap->lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
- /* wait and check result */
- if (wait) {
- ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- if (rc)
- return rc;
- }
+ /* wait and check result */
+ if (wait) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
- return 0;
+ return rc;
}
-/**
- * ata_host_suspend - suspend host
- * @host: host to suspend
- * @mesg: PM message
- *
- * Suspend @host. Actual operation is performed by EH. This
- * function requests EH to perform PM operations and waits for EH
- * to finish.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+
+static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
{
+ struct ata_port *ap = to_ata_port(dev);
unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
@@ -5315,31 +5300,108 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
if (mesg.event == PM_EVENT_SUSPEND)
ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
- rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
- if (rc == 0)
- host->dev->power.power_state = mesg;
+ rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
return rc;
}
+static int ata_port_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ata_port_suspend_common(dev, PMSG_SUSPEND);
+}
+
+static int ata_port_do_freeze(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ pm_runtime_resume(dev);
+
+ return ata_port_suspend_common(dev, PMSG_FREEZE);
+}
+
+static int ata_port_poweroff(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+}
+
+static int ata_port_resume_common(struct device *dev)
+{
+ struct ata_port *ap = to_ata_port(dev);
+ int rc;
+
+ rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
+ return rc;
+}
+
+static int ata_port_resume(struct device *dev)
+{
+ int rc;
+
+ rc = ata_port_resume_common(dev);
+ if (!rc) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return rc;
+}
+
+static int ata_port_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops ata_port_pm_ops = {
+ .suspend = ata_port_suspend,
+ .resume = ata_port_resume,
+ .freeze = ata_port_do_freeze,
+ .thaw = ata_port_resume,
+ .poweroff = ata_port_poweroff,
+ .restore = ata_port_resume,
+
+ .runtime_suspend = ata_port_suspend,
+ .runtime_resume = ata_port_resume_common,
+ .runtime_idle = ata_port_runtime_idle,
+};
+
+/**
+ * ata_host_suspend - suspend host
+ * @host: host to suspend
+ * @mesg: PM message
+ *
+ * Suspend @host. Actual operation is performed by port suspend.
+ */
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+{
+ host->dev->power.power_state = mesg;
+ return 0;
+}
+
/**
* ata_host_resume - resume host
* @host: host to resume
*
- * Resume @host. Actual operation is performed by EH. This
- * function requests EH to perform PM operations and returns.
- * Note that all resume operations are performed parallelly.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
+ * Resume @host. Actual operation is performed by port resume.
*/
void ata_host_resume(struct ata_host *host)
{
- ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
}
#endif
+struct device_type ata_port_type = {
+ .name = "ata_port",
+#ifdef CONFIG_PM
+ .pm = &ata_port_pm_ops,
+#endif
+};
+
/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2a5412e7e9c1..508a60bfe5c1 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3381,6 +3381,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
if (!shost)
goto err_alloc;
+ shost->eh_noresume = 1;
*(struct ata_port **)&shost->hostdata[0] = ap;
ap->scsi_host = shost;
@@ -3398,7 +3399,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+ rc = scsi_add_host(ap->scsi_host, &ap->tdev);
if (rc)
goto err_add;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 4cadfa28f940..9691dd0966d7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -929,11 +929,11 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
bytes = (bc_hi << 8) | bc_lo;
/* shall be cleared to zero, indicating xfer of data */
- if (unlikely(ireason & (1 << 0)))
+ if (unlikely(ireason & ATAPI_COD))
goto atapi_check;
/* make sure transfer direction matches expected */
- i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+ i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
if (unlikely(do_write != i_write))
goto atapi_check;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index ce9dc6207f37..74aaee30e264 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -32,6 +32,7 @@
#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
@@ -279,6 +280,7 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
+ dev->type = &ata_port_type;
dev->parent = get_device(parent);
dev->release = ata_tport_release;
@@ -289,6 +291,10 @@ int ata_tport_add(struct device *parent,
goto tport_err;
}
+ device_enable_async_suspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
transport_add_device(dev);
transport_configure_device(dev);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 773de97988a2..814486d35c44 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -58,6 +58,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
+extern struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index e8574bba3ee4..048589fad2ca 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -963,17 +963,7 @@ static struct platform_driver arasan_cf_driver = {
},
};
-static int __init arasan_cf_init(void)
-{
- return platform_driver_register(&arasan_cf_driver);
-}
-module_init(arasan_cf_init);
-
-static void __exit arasan_cf_exit(void)
-{
- platform_driver_unregister(&arasan_cf_driver);
-}
-module_exit(arasan_cf_exit);
+module_platform_driver(arasan_cf_driver);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index a76f24a8e5db..a7d91a72ee35 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -360,7 +360,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->pio_mask = ATA_PIO4;
- if (!irq) {
+ if (!gpio_is_valid(irq)) {
ap->flags |= ATA_FLAG_PIO_POLLING;
ata_port_desc(ap, "no IRQ, using PIO polling");
}
@@ -414,8 +414,8 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
host->private_data = info;
- ret = ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
- irq ? ata_sff_interrupt : NULL,
+ return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+ gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
irq_flags, &pata_at91_sht);
if (!ret)
@@ -454,20 +454,7 @@ static struct platform_driver pata_at91_driver = {
},
};
-static int __init pata_at91_init(void)
-{
- return platform_driver_register(&pata_at91_driver);
-}
-
-static void __exit pata_at91_exit(void)
-{
- platform_driver_unregister(&pata_at91_driver);
-}
-
-
-module_init(pata_at91_init);
-module_exit(pata_at91_exit);
-
+module_platform_driver(pata_at91_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index bd987bb082eb..1e65842e2ca7 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
static const u32 udma_tackmin = 20;
static const u32 udma_tssmin = 50;
+#define BFIN_MAX_SG_SEGMENTS 4
+
/**
*
* Function: num_clocks_min
@@ -418,14 +420,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(tcyc_tdvs<<8 | tdvs));
ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
-
- /* Enable host ATAPI Untra DMA interrupts */
- ATAPI_SET_INT_MASK(base,
- ATAPI_GET_INT_MASK(base)
- | UDMAIN_DONE_MASK
- | UDMAOUT_DONE_MASK
- | UDMAIN_TERM_MASK
- | UDMAOUT_TERM_MASK);
}
}
}
@@ -470,10 +464,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
-
- /* Enable host ATAPI Multi DMA interrupts */
- ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
- | MULTI_DONE_MASK | MULTI_TERM_MASK);
SSYNC();
}
}
@@ -841,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
{
- unsigned short config = WDSIZE_16;
+ struct ata_port *ap = qc->ap;
+ struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
struct scatterlist *sg;
unsigned int si;
+ unsigned int channel;
+ unsigned int dir;
+ unsigned int size = 0;
dev_dbg(qc->ap->dev, "in atapi dma setup\n");
/* Program the ATA_CTRL register with dir */
if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /* fill the ATAPI DMA controller */
- set_dma_config(CH_ATAPI_TX, config);
- set_dma_x_modify(CH_ATAPI_TX, 2);
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
- set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
- }
+ channel = CH_ATAPI_TX;
+ dir = DMA_TO_DEVICE;
} else {
+ channel = CH_ATAPI_RX;
+ dir = DMA_FROM_DEVICE;
config |= WNR;
- /* fill the ATAPI DMA controller */
- set_dma_config(CH_ATAPI_RX, config);
- set_dma_x_modify(CH_ATAPI_RX, 2);
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
- set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
- }
}
-}
-/**
- * bfin_bmdma_start - Start an IDE DMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_start().
- */
+ dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
-static void bfin_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- struct scatterlist *sg;
- unsigned int si;
+ /* fill the ATAPI DMA controller */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_desc_cpu[si].start_addr = sg_dma_address(sg);
+ dma_desc_cpu[si].cfg = config;
+ dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
+ dma_desc_cpu[si].x_modify = 2;
+ size += sg_dma_len(sg);
+ }
- dev_dbg(qc->ap->dev, "in atapi dma start\n");
- if (!(ap->udma_mask || ap->mwdma_mask))
- return;
+ /* Set the last descriptor to stop mode */
+ dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
- /* start ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /*
- * On blackfin arch, uncacheable memory is not
- * allocated with flag GFP_DMA. DMA buffer from
- * common kenel code should be flushed if WB
- * data cache is enabled. Otherwise, this loop
- * is an empty loop and optimized out.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- flush_dcache_range(sg_dma_address(sg),
- sg_dma_address(sg) + sg_dma_len(sg));
- }
- enable_dma(CH_ATAPI_TX);
- dev_dbg(qc->ap->dev, "enable udma write\n");
+ flush_dcache_range((unsigned int)dma_desc_cpu,
+ (unsigned int)dma_desc_cpu +
+ qc->n_elem * sizeof(struct dma_desc_array));
+
+ /* Enable ATA DMA operation*/
+ set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
+ set_dma_x_count(channel, 0);
+ set_dma_x_modify(channel, 0);
+ set_dma_config(channel, config);
+
+ SSYNC();
- /* Send ATA DMA write command */
- bfin_exec_command(ap, &qc->tf);
+ /* Send ATA DMA command */
+ bfin_exec_command(ap, &qc->tf);
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* set ATA DMA write direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
| XFER_DIR));
} else {
- enable_dma(CH_ATAPI_RX);
- dev_dbg(qc->ap->dev, "enable udma read\n");
-
- /* Send ATA DMA read command */
- bfin_exec_command(ap, &qc->tf);
-
/* set ATA DMA read direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
& ~XFER_DIR));
@@ -925,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
/* Set ATAPI state machine contorl in terminate sequence */
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
- /* Set transfer length to buffer len */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
- }
+ /* Set transfer length to the total size of sg buffers */
+ ATAPI_SET_XFER_LEN(base, size >> 1);
+}
- /* Enable ATA DMA operation*/
+/**
+ * bfin_bmdma_start - Start an IDE DMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ dev_dbg(qc->ap->dev, "in atapi dma start\n");
+
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return;
+
+ /* start ATAPI transfer*/
if (ap->udma_mask)
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
| ULTRA_START);
@@ -947,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si;
+ unsigned int dir;
dev_dbg(qc->ap->dev, "in atapi dma stop\n");
+
if (!(ap->udma_mask || ap->mwdma_mask))
return;
/* stop ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE)
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ dir = DMA_TO_DEVICE;
disable_dma(CH_ATAPI_TX);
- else {
+ } else {
+ dir = DMA_FROM_DEVICE;
disable_dma(CH_ATAPI_RX);
- if (ap->hsm_task_state & HSM_ST_LAST) {
- /*
- * On blackfin arch, uncacheable memory is not
- * allocated with flag GFP_DMA. DMA buffer from
- * common kenel code should be invalidated if
- * data cache is enabled. Otherwise, this loop
- * is an empty loop and optimized out.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- invalidate_dcache_range(
- sg_dma_address(sg),
- sg_dma_address(sg)
- + sg_dma_len(sg));
- }
- }
}
+
+ dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
}
/**
@@ -1153,15 +1130,11 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
{
unsigned char host_stat = 0;
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- unsigned short int_status = ATAPI_GET_INT_STATUS(base);
- if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
+ if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON))
host_stat |= ATA_DMA_ACTIVE;
- if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
- ATAPI_DEV_INT))
+ if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT)
host_stat |= ATA_DMA_INTR;
- if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
- host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
@@ -1276,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
{
dev_dbg(ap->dev, "in atapi port stop\n");
if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+ dma_free_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ ap->bmdma_prd,
+ ap->bmdma_prd_dma);
+
free_dma(CH_ATAPI_RX);
free_dma(CH_ATAPI_TX);
}
@@ -1287,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
if (!(ap->udma_mask || ap->mwdma_mask))
return 0;
+ ap->bmdma_prd = dma_alloc_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ &ap->bmdma_prd_dma,
+ GFP_KERNEL);
+
+ if (ap->bmdma_prd == NULL) {
+ dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
+ goto out;
+ }
+
if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
if (request_dma(CH_ATAPI_TX,
"BFIN ATAPI TX DMA") >= 0)
return 0;
free_dma(CH_ATAPI_RX);
+ dma_free_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ ap->bmdma_prd,
+ ap->bmdma_prd_dma);
}
+out:
ap->udma_mask = 0;
ap->mwdma_mask = 0;
dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1416,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
static struct scsi_host_template bfin_sht = {
ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
.dma_boundary = ATA_DMA_BOUNDARY,
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 628c8fae5937..7a402c75ab90 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -1,6 +1,7 @@
/*
* pata_cs5536.c - CS5536 PATA for new ATA layer
* (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ * (C) 2011 Bartlomiej Zolnierkiewicz
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -55,24 +56,16 @@ MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
#define DRV_VERSION "0.0.8"
enum {
- CFG = 0,
- DTC = 1,
- CAST = 2,
- ETC = 3,
-
- MSR_IDE_BASE = 0x51300000,
- MSR_IDE_CFG = (MSR_IDE_BASE + 0x10),
- MSR_IDE_DTC = (MSR_IDE_BASE + 0x12),
- MSR_IDE_CAST = (MSR_IDE_BASE + 0x13),
- MSR_IDE_ETC = (MSR_IDE_BASE + 0x14),
-
+ MSR_IDE_CFG = 0x51300010,
PCI_IDE_CFG = 0x40,
- PCI_IDE_DTC = 0x48,
- PCI_IDE_CAST = 0x4c,
- PCI_IDE_ETC = 0x50,
- IDE_CFG_CHANEN = 0x2,
- IDE_CFG_CABLE = 0x10000,
+ CFG = 0,
+ DTC = 2,
+ CAST = 3,
+ ETC = 4,
+
+ IDE_CFG_CHANEN = (1 << 1),
+ IDE_CFG_CABLE = (1 << 17) | (1 << 16),
IDE_D0_SHIFT = 24,
IDE_D1_SHIFT = 16,
@@ -84,45 +77,50 @@ enum {
IDE_CAST_CMD_MASK = 0xff,
IDE_CAST_CMD_SHIFT = 24,
- IDE_ETC_NODMA = 0x03,
-};
-
-static const u32 msr_reg[4] = {
- MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
-};
-
-static const u8 pci_reg[4] = {
- PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
+ IDE_ETC_UDMA_MASK = 0xc0,
};
-static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
+static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
u32 dummy __maybe_unused;
- rdmsr(msr_reg[reg], *val, dummy);
+ rdmsr(MSR_IDE_CFG + reg, *val, dummy);
return 0;
}
- return pci_read_config_dword(pdev, pci_reg[reg], val);
+ return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
}
-static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
+static int cs5536_write(struct pci_dev *pdev, int reg, int val)
{
if (unlikely(use_msr)) {
- wrmsr(msr_reg[reg], val, 0);
+ wrmsr(MSR_IDE_CFG + reg, val, 0);
return 0;
}
- return pci_write_config_dword(pdev, pci_reg[reg], val);
+ return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
+}
+
+static void cs5536_program_dtc(struct ata_device *adev, u8 tim)
+{
+ struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev);
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+ u32 dtc;
+
+ cs5536_read(pdev, DTC, &dtc);
+ dtc &= ~(IDE_DRV_MASK << dshift);
+ dtc |= tim << dshift;
+ cs5536_write(pdev, DTC, dtc);
}
/**
* cs5536_cable_detect - detect cable type
* @ap: Port to detect on
*
- * Perform cable detection for ATA66 capable cable. Return a libata
- * cable type.
+ * Perform cable detection for ATA66 capable cable.
+ *
+ * Returns a cable type.
*/
static int cs5536_cable_detect(struct ata_port *ap)
@@ -132,7 +130,7 @@ static int cs5536_cable_detect(struct ata_port *ap)
cs5536_read(pdev, CFG, &cfg);
- if (cfg & (IDE_CFG_CABLE << ap->port_no))
+ if (cfg & IDE_CFG_CABLE)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
@@ -162,19 +160,15 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
- int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
- u32 dtc, cast, etc;
+ u32 cast;
if (pair)
cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
- cs5536_read(pdev, DTC, &dtc);
- cs5536_read(pdev, CAST, &cast);
- cs5536_read(pdev, ETC, &etc);
+ cs5536_program_dtc(adev, drv_timings[mode]);
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= drv_timings[mode] << dshift;
+ cs5536_read(pdev, CAST, &cast);
cast &= ~(IDE_CAST_DRV_MASK << cshift);
cast |= addr_timings[mode] << cshift;
@@ -182,12 +176,7 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
- etc &= ~(IDE_DRV_MASK << dshift);
- etc |= IDE_ETC_NODMA << dshift;
-
- cs5536_write(pdev, DTC, dtc);
cs5536_write(pdev, CAST, cast);
- cs5536_write(pdev, ETC, etc);
}
/**
@@ -208,25 +197,21 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 dtc, etc;
+ u32 etc;
int mode = adev->dma_mode;
int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- if (mode >= XFER_UDMA_0) {
- cs5536_read(pdev, ETC, &etc);
+ cs5536_read(pdev, ETC, &etc);
+ if (mode >= XFER_UDMA_0) {
etc &= ~(IDE_DRV_MASK << dshift);
etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
-
- cs5536_write(pdev, ETC, etc);
} else { /* MWDMA */
- cs5536_read(pdev, DTC, &dtc);
-
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= mwdma_timings[mode - XFER_MW_DMA_0] << dshift;
-
- cs5536_write(pdev, DTC, dtc);
+ etc &= ~(IDE_ETC_UDMA_MASK << dshift);
+ cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]);
}
+
+ cs5536_write(pdev, ETC, etc);
}
static struct scsi_host_template cs5536_sht = {
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index ca9d9caedfa3..c5af97f5107b 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -235,17 +235,7 @@ static struct platform_driver pata_imx_driver = {
},
};
-static int __init pata_imx_init(void)
-{
- return platform_driver_register(&pata_imx_driver);
-}
-
-static void __exit pata_imx_exit(void)
-{
- platform_driver_unregister(&pata_imx_driver);
-}
-module_init(pata_imx_init);
-module_exit(pata_imx_exit);
+module_platform_driver(pata_imx_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("low-level driver for iMX PATA");
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 15b64311fe0a..badb1789a918 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -205,21 +205,10 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.remove = __devexit_p(ixp4xx_pata_remove),
};
-static int __init ixp4xx_pata_init(void)
-{
- return platform_driver_register(&ixp4xx_pata_platform_driver);
-}
-
-static void __exit ixp4xx_pata_exit(void)
-{
- platform_driver_unregister(&ixp4xx_pata_platform_driver);
-}
+module_platform_driver(ixp4xx_pata_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(ixp4xx_pata_init);
-module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3e1746314f22..00748ae1a016 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -897,26 +897,7 @@ static struct platform_driver mpc52xx_ata_of_platform_driver = {
},
};
-
-/* ======================================================================== */
-/* Module */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_ata_init(void)
-{
- printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
- return platform_driver_register(&mpc52xx_ata_of_platform_driver);
-}
-
-static void __exit
-mpc52xx_ata_exit(void)
-{
- platform_driver_unregister(&mpc52xx_ata_of_platform_driver);
-}
-
-module_init(mpc52xx_ata_init);
-module_exit(mpc52xx_ata_exit);
+module_platform_driver(mpc52xx_ata_of_platform_driver);
MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 2a472c5bb7db..1654dc27e7f8 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -12,8 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/ata_platform.h>
static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
@@ -22,7 +21,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
- struct resource irq_res;
+ struct resource *irq_res;
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
@@ -51,11 +50,9 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
}
}
- ret = of_irq_to_resource(dn, 0, &irq_res);
- if (!ret)
- irq_res.start = irq_res.end = 0;
- else
- irq_res.flags = 0;
+ irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+ if (irq_res)
+ irq_res->flags = 0;
prop = of_get_property(dn, "reg-shift", NULL);
if (prop)
@@ -75,7 +72,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
pio_mask = 1 << pio_mode;
pio_mask |= (1 << pio_mode) - 1;
- return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, &irq_res,
+ return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
reg_shift, pio_mask);
}
@@ -101,17 +98,7 @@ static struct platform_driver pata_of_platform_driver = {
.remove = __devexit_p(pata_of_platform_remove),
};
-static int __init pata_of_platform_init(void)
-{
- return platform_driver_register(&pata_of_platform_driver);
-}
-module_init(pata_of_platform_init);
-
-static void __exit pata_of_platform_exit(void)
-{
- platform_driver_unregister(&pata_of_platform_driver);
-}
-module_exit(pata_of_platform_exit);
+module_platform_driver(pata_of_platform_driver);
MODULE_DESCRIPTION("OF-platform PATA driver");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index b86d7e22595e..5ff31b68135c 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -132,20 +132,9 @@ static struct platform_driver palmld_pata_platform_driver = {
.remove = __devexit_p(palmld_pata_remove),
};
-static int __init palmld_pata_init(void)
-{
- return platform_driver_register(&palmld_pata_platform_driver);
-}
-
-static void __exit palmld_pata_exit(void)
-{
- platform_driver_unregister(&palmld_pata_platform_driver);
-}
+module_platform_driver(palmld_pata_platform_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("PalmLD PATA driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(palmld_pata_init);
-module_exit(palmld_pata_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 2067308f683f..f1848aeda783 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -256,17 +256,7 @@ static struct platform_driver pata_platform_driver = {
},
};
-static int __init pata_platform_init(void)
-{
- return platform_driver_register(&pata_platform_driver);
-}
-
-static void __exit pata_platform_exit(void)
-{
- platform_driver_unregister(&pata_platform_driver);
-}
-module_init(pata_platform_init);
-module_exit(pata_platform_exit);
+module_platform_driver(pata_platform_driver);
module_param(pio_mask, int, 0);
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index b4ede40f8ae1..0bb0fb7b26bc 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -390,18 +390,7 @@ static struct platform_driver pxa_ata_driver = {
},
};
-static int __init pxa_ata_init(void)
-{
- return platform_driver_register(&pxa_ata_driver);
-}
-
-static void __exit pxa_ata_exit(void)
-{
- platform_driver_unregister(&pxa_ata_driver);
-}
-
-module_init(pxa_ata_init);
-module_exit(pxa_ata_exit);
+module_platform_driver(pxa_ata_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 1b9d10d9c5d9..9417101bd5ca 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -188,9 +188,6 @@ static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" DRV_NAME);
-
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
.remove = __devexit_p(rb532_pata_driver_remove),
@@ -200,27 +197,13 @@ static struct platform_driver rb532_pata_platform_driver = {
},
};
-/* ------------------------------------------------------------------------ */
-
#define DRV_INFO DRV_DESC " version " DRV_VERSION
-static int __init rb532_pata_module_init(void)
-{
- printk(KERN_INFO DRV_INFO "\n");
-
- return platform_driver_register(&rb532_pata_platform_driver);
-}
-
-static void __exit rb532_pata_module_exit(void)
-{
- platform_driver_unregister(&rb532_pata_platform_driver);
-}
+module_platform_driver(rb532_pata_platform_driver);
MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-
-module_init(rb532_pata_module_init);
-module_exit(rb532_pata_module_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 5c4237452f50..69f7cde49c6b 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1777,18 +1777,7 @@ static struct platform_driver sata_dwc_driver = {
.remove = sata_dwc_remove,
};
-static int __init sata_dwc_init(void)
-{
- return platform_driver_register(&sata_dwc_driver);
-}
-
-static void __exit sata_dwc_exit(void)
-{
- platform_driver_unregister(&sata_dwc_driver);
-}
-
-module_init(sata_dwc_init);
-module_exit(sata_dwc_exit);
+module_platform_driver(sata_dwc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 78ae7b67b09e..0120b0d1e9a5 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -140,6 +140,7 @@ enum {
*/
HCONTROL_ONLINE_PHY_RST = (1 << 31),
HCONTROL_FORCE_OFFLINE = (1 << 30),
+ HCONTROL_LEGACY = (1 << 28),
HCONTROL_PARITY_PROT_MOD = (1 << 14),
HCONTROL_DPATH_PARITY = (1 << 12),
HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
* part of the port_start() callback
*/
+ /* sata controller to operate in enterprise mode */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
+
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
if (temp & 0x3F)
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
/* Recovery the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+ iowrite32((ioread32(hcr_base + HCONTROL)
+ | HCONTROL_ONLINE_PHY_RST
+ | HCONTROL_SNOOP_ENABLE
+ | HCONTROL_PMP_ATTACHED),
+ hcr_base + HCONTROL);
+
ata_host_resume(host);
return 0;
}
@@ -1452,21 +1463,9 @@ static struct platform_driver fsl_sata_driver = {
#endif
};
-static int __init sata_fsl_init(void)
-{
- platform_driver_register(&fsl_sata_driver);
- return 0;
-}
-
-static void __exit sata_fsl_exit(void)
-{
- platform_driver_unregister(&fsl_sata_driver);
-}
+module_platform_driver(fsl_sata_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
MODULE_VERSION("1.10");
-
-module_init(sata_fsl_init);
-module_exit(sata_fsl_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 0b8b8b488ee8..38950ea8398a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3988,7 +3988,7 @@ static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
}
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -3998,7 +3998,7 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -4019,6 +4019,7 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
static int mv_platform_probe(struct platform_device *pdev)
{
const struct mv_sata_platform_data *mv_platform_data;
+ const struct mbus_dram_target_info *dram;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
struct ata_host *host;
@@ -4072,8 +4073,9 @@ static int mv_platform_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (mv_platform_data->dram != NULL)
- mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(hpriv, dram);
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
@@ -4141,17 +4143,18 @@ static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
static int mv_platform_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
+ const struct mbus_dram_target_info *dram;
int ret;
if (host) {
struct mv_host_priv *hpriv = host->private_data;
- const struct mv_sata_platform_data *mv_platform_data = \
- pdev->dev.platform_data;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (mv_platform_data->dram != NULL)
- mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(hpriv, dram);
/* initialize adapter */
ret = mv_init_host(host);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index e0bc9646a38e..55d6179dde58 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -599,9 +599,9 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static int adma_enabled;
-static int swncq_enabled = 1;
-static int msi_enabled;
+static bool adma_enabled;
+static bool swncq_enabled = 1;
+static bool msi_enabled;
static void nv_adma_register_mode(struct ata_port *ap)
{
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 1e9140626a83..e7e610aa9a7a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -417,7 +417,7 @@ static struct ata_port_operations sil24_ops = {
#endif
};
-static int sata_sil24_msi; /* Disable MSI */
+static bool sata_sil24_msi; /* Disable MSI */
module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 9a51df4f5b74..b182c2f7d777 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -112,12 +112,12 @@ static u8 read_prom_byte(struct he_dev *he_dev, int addr);
/* globals */
static struct he_dev *he_devs;
-static int disable64;
+static bool disable64;
static short nvpibits = -1;
static short nvcibits = -1;
static short rx_skb_reserve = 16;
-static int irq_coalesce = 1;
-static int sdh = 0;
+static bool irq_coalesce = 1;
+static bool sdh = 0;
/* Read from EEPROM = 0000 0011b */
static unsigned int readtab[] = {
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 3d0c2b0fed9c..9e373ba20308 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev)
if (ia_vcc == NULL)
{
atomic_inc(&vcc->stats->rx_err);
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
// get real pkt length pwang_test
@@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev)
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
length, skb->len);)
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
skb_trim(skb, length);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 21cf46f45245..7be9f79018e9 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -172,6 +172,21 @@ config SYS_HYPERVISOR
bool
default n
+config GENERIC_CPU_DEVICES
+ bool
+ default n
+
source "drivers/base/regmap/Kconfig"
+config DMA_SHARED_BUFFER
+ bool
+ default n
+ select ANON_INODES
+ depends on EXPERIMENTAL
+ help
+ This option enables the framework for buffer-sharing between
+ multiple drivers. A buffer is associated with a file using driver
+ APIs extension; the file's descriptor can then be passed on to other
+ driver.
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 1334d893b560..2c8272dd93c4 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 7a6ae4228761..b858dfd9a37c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -94,7 +94,7 @@ extern int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
extern int platform_bus_init(void);
-extern int cpu_dev_init(void);
+extern void cpu_dev_init(void);
extern int bus_add_device(struct device *dev);
extern void bus_probe_device(struct device *dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2dbe801c6aef..4a67cc0c8b37 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -248,7 +248,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (MAJOR(dev->devt)) {
const char *tmp;
const char *name;
- mode_t mode = 0;
+ umode_t mode = 0;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
@@ -1235,7 +1235,7 @@ static struct device *next_device(struct klist_iter *i)
* freed by the caller.
*/
const char *device_get_devnode(struct device *dev,
- mode_t *mode, const char **tmp)
+ umode_t *mode, const char **tmp)
{
char *s;
@@ -1796,8 +1796,10 @@ void device_shutdown(void)
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
- /* Disable all device's runtime power management */
- pm_runtime_disable(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 251acea3d359..db87e78d7459 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -1,8 +1,8 @@
/*
- * drivers/base/cpu.c - basic CPU class support
+ * CPU subsystem support
*/
-#include <linux/sysdev.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -11,43 +11,44 @@
#include <linux/device.h>
#include <linux/node.h>
#include <linux/gfp.h>
+#include <linux/percpu.h>
#include "base.h"
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[];
-
-struct sysdev_class cpu_sysdev_class = {
+struct bus_type cpu_subsys = {
.name = "cpu",
- .attrs = cpu_sysdev_class_attrs,
+ .dev_name = "cpu",
};
-EXPORT_SYMBOL(cpu_sysdev_class);
+EXPORT_SYMBOL_GPL(cpu_subsys);
-static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
+static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_online(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id));
+ return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
}
-static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
- const char *buf, size_t count)
+static ssize_t __ref store_online(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t ret;
cpu_hotplug_driver_lock();
switch (buf[0]) {
case '0':
- ret = cpu_down(cpu->sysdev.id);
+ ret = cpu_down(cpu->dev.id);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
break;
case '1':
- ret = cpu_up(cpu->sysdev.id);
+ ret = cpu_up(cpu->dev.id);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
break;
@@ -60,44 +61,44 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
ret = count;
return ret;
}
-static SYSDEV_ATTR(online, 0644, show_online, store_online);
+static DEVICE_ATTR(online, 0644, show_online, store_online);
static void __cpuinit register_cpu_control(struct cpu *cpu)
{
- sysdev_create_file(&cpu->sysdev, &attr_online);
+ device_create_file(&cpu->dev, &dev_attr_online);
}
void unregister_cpu(struct cpu *cpu)
{
- int logical_cpu = cpu->sysdev.id;
+ int logical_cpu = cpu->dev.id;
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
- sysdev_remove_file(&cpu->sysdev, &attr_online);
+ device_remove_file(&cpu->dev, &dev_attr_online);
- sysdev_unregister(&cpu->sysdev);
+ device_unregister(&cpu->dev);
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t cpu_probe_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
return arch_cpu_probe(buf, count);
}
-static ssize_t cpu_release_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t cpu_release_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
return arch_cpu_release(buf, count);
}
-static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -109,15 +110,15 @@ static inline void register_cpu_control(struct cpu *cpu)
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
-static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t rc;
unsigned long long addr;
int cpunum;
- cpunum = cpu->sysdev.id;
+ cpunum = cpu->dev.id;
/*
* Might be reading other cpu's data based on which cpu read thread
@@ -129,7 +130,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
-static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
#endif
/*
@@ -137,12 +138,12 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*/
struct cpu_attr {
- struct sysdev_class_attribute attr;
+ struct device_attribute attr;
const struct cpumask *const * const map;
};
-static ssize_t show_cpus_attr(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_cpus_attr(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
@@ -153,10 +154,10 @@ static ssize_t show_cpus_attr(struct sysdev_class *class,
return n;
}
-#define _CPU_ATTR(name, map) \
- { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map }
+#define _CPU_ATTR(name, map) \
+ { __ATTR(name, 0444, show_cpus_attr, NULL), map }
-/* Keep in sync with cpu_sysdev_class_attrs */
+/* Keep in sync with cpu_subsys_attrs */
static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &cpu_online_mask),
_CPU_ATTR(possible, &cpu_possible_mask),
@@ -166,19 +167,19 @@ static struct cpu_attr cpu_attrs[] = {
/*
* Print values for NR_CPUS and offlined cpus
*/
-static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_kernel_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
return n;
}
-static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
unsigned int total_cpus;
-static ssize_t print_cpus_offline(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_offline(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = 0, len = PAGE_SIZE-2;
cpumask_var_t offline;
@@ -205,7 +206,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
n += snprintf(&buf[n], len - n, "\n");
return n;
}
-static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
+static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
/*
* register_cpu - Setup a sysfs device for a CPU.
@@ -218,57 +219,87 @@ static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
int __cpuinit register_cpu(struct cpu *cpu, int num)
{
int error;
- cpu->node_id = cpu_to_node(num);
- cpu->sysdev.id = num;
- cpu->sysdev.cls = &cpu_sysdev_class;
-
- error = sysdev_register(&cpu->sysdev);
+ cpu->node_id = cpu_to_node(num);
+ cpu->dev.id = num;
+ cpu->dev.bus = &cpu_subsys;
+ error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
if (!error)
- per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
+ per_cpu(cpu_sys_devices, num) = &cpu->dev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
#ifdef CONFIG_KEXEC
if (!error)
- error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
+ error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
#endif
return error;
}
-struct sys_device *get_cpu_sysdev(unsigned cpu)
+struct device *get_cpu_device(unsigned cpu)
{
if (cpu < nr_cpu_ids && cpu_possible(cpu))
return per_cpu(cpu_sys_devices, cpu);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(get_cpu_sysdev);
+EXPORT_SYMBOL_GPL(get_cpu_device);
+
+static struct attribute *cpu_root_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ &dev_attr_probe.attr,
+ &dev_attr_release.attr,
+#endif
+ &cpu_attrs[0].attr.attr,
+ &cpu_attrs[1].attr.attr,
+ &cpu_attrs[2].attr.attr,
+ &dev_attr_kernel_max.attr,
+ &dev_attr_offline.attr,
+ NULL
+};
+
+static struct attribute_group cpu_root_attr_group = {
+ .attrs = cpu_root_attrs,
+};
-int __init cpu_dev_init(void)
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &cpu_root_attr_group,
+ NULL,
+};
+
+bool cpu_is_hotpluggable(unsigned cpu)
{
- int err;
+ struct device *dev = get_cpu_device(cpu);
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
- err = sysdev_class_register(&cpu_sysdev_class);
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (!err)
- err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
#endif
- return err;
+static void __init cpu_dev_register_generic(void)
+{
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+ int i;
+
+ for_each_possible_cpu(i) {
+ if (register_cpu(&per_cpu(cpu_devices, i), i))
+ panic("Failed to register CPU device");
+ }
+#endif
}
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = {
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
- &attr_probe,
- &attr_release,
+void __init cpu_dev_init(void)
+{
+ if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
+ panic("Failed to register CPU subsystem");
+
+ cpu_dev_register_generic();
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
#endif
- &cpu_attrs[0].attr,
- &cpu_attrs[1].attr,
- &cpu_attrs[2].attr,
- &attr_kernel_max,
- &attr_offline,
- NULL
-};
+}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 65cd74832450..524bf96c289f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(devm_kzalloc);
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with dev_kzalloc().
+ * Free memory allocated with devm_kzalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 2bb4bff3af7d..8493536ea55b 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -40,7 +40,7 @@ static struct req {
struct completion done;
int err;
const char *name;
- mode_t mode; /* 0 => delete */
+ umode_t mode; /* 0 => delete */
struct device *dev;
} *requests;
@@ -142,7 +142,7 @@ int devtmpfs_delete_node(struct device *dev)
return req.err;
}
-static int dev_mkdir(const char *name, mode_t mode)
+static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
@@ -189,7 +189,7 @@ static int create_path(const char *nodepath)
return err;
}
-static int handle_create(const char *nodename, mode_t mode, struct device *dev)
+static int handle_create(const char *nodename, umode_t mode, struct device *dev)
{
struct dentry *dentry;
struct path path;
@@ -378,7 +378,7 @@ int devtmpfs_mount(const char *mntdir)
static DECLARE_COMPLETION(setup_done);
-static int handle(const char *name, mode_t mode, struct device *dev)
+static int handle(const char *name, umode_t mode, struct device *dev)
{
if (mode)
return handle_create(name, mode, dev);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
new file mode 100644
index 000000000000..e38ad243b4bb
--- /dev/null
+++ b/drivers/base/dma-buf.c
@@ -0,0 +1,291 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+
+static inline int is_dma_buf_file(struct file *);
+
+static int dma_buf_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ dmabuf->ops->release(dmabuf);
+ kfree(dmabuf);
+ return 0;
+}
+
+static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_release,
+};
+
+/*
+ * is_dma_buf_file - Check if struct file* is associated with dma_buf
+ */
+static inline int is_dma_buf_file(struct file *file)
+{
+ return file->f_op == &dma_buf_fops;
+}
+
+/**
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * with this buffer, so it can be exported.
+ * Also connect the allocator specific data and ops to the buffer.
+ *
+ * @priv: [in] Attach private data of allocator to this buffer
+ * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
+ * @size: [in] Size of the buffer
+ * @flags: [in] mode flags for the file.
+ *
+ * Returns, on success, a newly created dma_buf object, which wraps the
+ * supplied private data and operations for dma_buf_ops. On either missing
+ * ops, or error in allocating struct dma_buf, will return negative error.
+ *
+ */
+struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+ size_t size, int flags)
+{
+ struct dma_buf *dmabuf;
+ struct file *file;
+
+ if (WARN_ON(!priv || !ops
+ || !ops->map_dma_buf
+ || !ops->unmap_dma_buf
+ || !ops->release)) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
+ if (dmabuf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ dmabuf->priv = priv;
+ dmabuf->ops = ops;
+ dmabuf->size = size;
+
+ file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+
+ dmabuf->file = file;
+
+ mutex_init(&dmabuf->lock);
+ INIT_LIST_HEAD(&dmabuf->attachments);
+
+ return dmabuf;
+}
+EXPORT_SYMBOL_GPL(dma_buf_export);
+
+
+/**
+ * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * @dmabuf: [in] pointer to dma_buf for which fd is required.
+ *
+ * On success, returns an associated 'fd'. Else, returns error.
+ */
+int dma_buf_fd(struct dma_buf *dmabuf)
+{
+ int error, fd;
+
+ if (!dmabuf || !dmabuf->file)
+ return -EINVAL;
+
+ error = get_unused_fd();
+ if (error < 0)
+ return error;
+ fd = error;
+
+ fd_install(fd, dmabuf->file);
+
+ return fd;
+}
+EXPORT_SYMBOL_GPL(dma_buf_fd);
+
+/**
+ * dma_buf_get - returns the dma_buf structure related to an fd
+ * @fd: [in] fd associated with the dma_buf to be returned
+ *
+ * On success, returns the dma_buf structure associated with an fd; uses
+ * file's refcounting done by fget to increase refcount. returns ERR_PTR
+ * otherwise.
+ */
+struct dma_buf *dma_buf_get(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (!is_dma_buf_file(file)) {
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file->private_data;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get);
+
+/**
+ * dma_buf_put - decreases refcount of the buffer
+ * @dmabuf: [in] buffer to reduce refcount of
+ *
+ * Uses file's refcounting done implicitly by fput()
+ */
+void dma_buf_put(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf || !dmabuf->file))
+ return;
+
+ fput(dmabuf->file);
+}
+EXPORT_SYMBOL_GPL(dma_buf_put);
+
+/**
+ * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Returns struct dma_buf_attachment * for this attachment; may return negative
+ * error codes.
+ *
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ struct dma_buf_attachment *attach;
+ int ret;
+
+ if (WARN_ON(!dmabuf || !dev || !dmabuf->ops))
+ return ERR_PTR(-EINVAL);
+
+ attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
+ if (attach == NULL)
+ goto err_alloc;
+
+ mutex_lock(&dmabuf->lock);
+
+ attach->dev = dev;
+ attach->dmabuf = dmabuf;
+ if (dmabuf->ops->attach) {
+ ret = dmabuf->ops->attach(dmabuf, dev, attach);
+ if (ret)
+ goto err_attach;
+ }
+ list_add(&attach->node, &dmabuf->attachments);
+
+ mutex_unlock(&dmabuf->lock);
+ return attach;
+
+err_alloc:
+ return ERR_PTR(-ENOMEM);
+err_attach:
+ kfree(attach);
+ mutex_unlock(&dmabuf->lock);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_attach);
+
+/**
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
+ * optionally calls detach() of dma_buf_ops for device-specific detach
+ * @dmabuf: [in] buffer to detach from.
+ * @attach: [in] attachment to be detached; is free'd after this call.
+ *
+ */
+void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+{
+ if (WARN_ON(!dmabuf || !attach || !dmabuf->ops))
+ return;
+
+ mutex_lock(&dmabuf->lock);
+ list_del(&attach->node);
+ if (dmabuf->ops->detach)
+ dmabuf->ops->detach(dmabuf, attach);
+
+ mutex_unlock(&dmabuf->lock);
+ kfree(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_detach);
+
+/**
+ * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; may return NULL
+ * or ERR_PTR.
+ *
+ */
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table = ERR_PTR(-EINVAL);
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&attach->dmabuf->lock);
+ if (attach->dmabuf->ops->map_dma_buf)
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ mutex_unlock(&attach->dmabuf->lock);
+
+ return sg_table;
+}
+EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+
+/**
+ * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ *
+ */
+void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table)
+{
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table
+ || !attach->dmabuf->ops))
+ return;
+
+ mutex_lock(&attach->dmabuf->lock);
+ if (attach->dmabuf->ops->unmap_dma_buf)
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
+ mutex_unlock(&attach->dmabuf->lock);
+
+}
+EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 06ed6b4e7df5..26ab358dac62 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev,
int loading = simple_strtol(buf, NULL, 10);
int i;
+ mutex_lock(&fw_lock);
+
+ if (!fw_priv->fw)
+ goto out;
+
switch (loading) {
case 1:
- mutex_lock(&fw_lock);
- if (!fw_priv->fw) {
- mutex_unlock(&fw_lock);
- break;
- }
firmware_free_data(fw_priv->fw);
memset(fw_priv->fw, 0, sizeof(struct firmware));
/* If the pages are not owned by 'struct firmware' */
@@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev,
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
- mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
@@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev,
fw_load_abort(fw_priv);
break;
}
-
+out:
+ mutex_unlock(&fw_lock);
return count;
}
@@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
return 0;
}
+ read_lock_usermodehelper();
+
if (WARN_ON(usermodehelper_is_disabled())) {
dev_err(device, "firmware: %s will not be loaded\n", name);
retval = -EBUSY;
@@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
fw_destroy_instance(fw_priv);
out:
+ read_unlock_usermodehelper();
+
if (retval) {
release_firmware(firmware);
*firmware_p = NULL;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8272d92d22c0..ed5de58c340f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/memory.c - basic Memory class support
+ * Memory subsystem support
*
* Written by Matt Tolentino <matthew.e.tolentino@intel.com>
* Dave Hansen <haveblue@us.ibm.com>
@@ -10,7 +10,6 @@
* SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/topology.h>
@@ -38,26 +37,9 @@ static inline int base_memory_block_id(int section_nr)
return section_nr / sections_per_block;
}
-static struct sysdev_class memory_sysdev_class = {
+static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
-};
-
-static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
-{
- return MEMORY_CLASS_NAME;
-}
-
-static int memory_uevent(struct kset *kset, struct kobject *obj,
- struct kobj_uevent_env *env)
-{
- int retval = 0;
-
- return retval;
-}
-
-static const struct kset_uevent_ops memory_uevent_ops = {
- .name = memory_uevent_name,
- .uevent = memory_uevent,
+ .dev_name = MEMORY_CLASS_NAME,
};
static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -96,21 +78,21 @@ int register_memory(struct memory_block *memory)
{
int error;
- memory->sysdev.cls = &memory_sysdev_class;
- memory->sysdev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.bus = &memory_subsys;
+ memory->dev.id = memory->start_section_nr / sections_per_block;
- error = sysdev_register(&memory->sysdev);
+ error = device_register(&memory->dev);
return error;
}
static void
unregister_memory(struct memory_block *memory)
{
- BUG_ON(memory->sysdev.cls != &memory_sysdev_class);
+ BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->sysdev.kobj);
- sysdev_unregister(&memory->sysdev);
+ kobject_put(&memory->dev.kobj);
+ device_unregister(&memory->dev);
}
unsigned long __weak memory_block_size_bytes(void)
@@ -138,22 +120,22 @@ static unsigned long get_memory_block_size(void)
* uses.
*/
-static ssize_t show_mem_start_phys_index(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_start_phys_index(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
return sprintf(buf, "%08lx\n", phys_index);
}
-static ssize_t show_mem_end_phys_index(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_end_phys_index(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
unsigned long phys_index;
phys_index = mem->end_section_nr / sections_per_block;
@@ -163,13 +145,13 @@ static ssize_t show_mem_end_phys_index(struct sys_device *dev,
/*
* Show whether the section of memory is likely to be hot-removable
*/
-static ssize_t show_mem_removable(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_removable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long i, pfn;
int ret = 1;
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
for (i = 0; i < sections_per_block; i++) {
pfn = section_nr_to_pfn(mem->start_section_nr + i);
@@ -182,11 +164,11 @@ static ssize_t show_mem_removable(struct sys_device *dev,
/*
* online, offline, going offline, etc.
*/
-static ssize_t show_mem_state(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
ssize_t len = 0;
/*
@@ -313,24 +295,35 @@ static int memory_block_change_state(struct memory_block *mem,
ret = memory_block_action(mem->start_section_nr, to_state);
- if (ret)
+ if (ret) {
mem->state = from_state_req;
- else
- mem->state = to_state;
+ goto out;
+ }
+ mem->state = to_state;
+ switch (mem->state) {
+ case MEM_OFFLINE:
+ kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
+ break;
+ case MEM_ONLINE:
+ kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
+ break;
+ default:
+ break;
+ }
out:
mutex_unlock(&mem->state_mutex);
return ret;
}
static ssize_t
-store_mem_state(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf, size_t count)
+store_mem_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
struct memory_block *mem;
int ret = -EINVAL;
- mem = container_of(dev, struct memory_block, sysdev);
+ mem = container_of(dev, struct memory_block, dev);
if (!strncmp(buf, "online", min((int)count, 6)))
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -351,41 +344,41 @@ store_mem_state(struct sys_device *dev,
* s.t. if I offline all of these sections I can then
* remove the physical device?
*/
-static ssize_t show_phys_device(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_phys_device(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
return sprintf(buf, "%d\n", mem->phys_device);
}
-static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
-static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
-static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
-static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
+static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
+static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
+static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
+static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
+static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
#define mem_create_simple_file(mem, attr_name) \
- sysdev_create_file(&mem->sysdev, &attr_##attr_name)
+ device_create_file(&mem->dev, &dev_attr_##attr_name)
#define mem_remove_simple_file(mem, attr_name) \
- sysdev_remove_file(&mem->sysdev, &attr_##attr_name)
+ device_remove_file(&mem->dev, &dev_attr_##attr_name)
/*
* Block size attribute stuff
*/
static ssize_t
-print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
+print_block_size(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lx\n", get_memory_block_size());
}
-static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
static int block_size_init(void)
{
- return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &attr_block_size_bytes.attr);
+ return device_create_file(memory_subsys.dev_root,
+ &dev_attr_block_size_bytes);
}
/*
@@ -396,7 +389,7 @@ static int block_size_init(void)
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
-memory_probe_store(struct class *class, struct class_attribute *attr,
+memory_probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
@@ -423,12 +416,11 @@ memory_probe_store(struct class *class, struct class_attribute *attr,
out:
return ret;
}
-static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
static int memory_probe_init(void)
{
- return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_probe.attr);
+ return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
}
#else
static inline int memory_probe_init(void)
@@ -444,8 +436,8 @@ static inline int memory_probe_init(void)
/* Soft offline a page */
static ssize_t
-store_soft_offline_page(struct class *class,
- struct class_attribute *attr,
+store_soft_offline_page(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -463,8 +455,8 @@ store_soft_offline_page(struct class *class,
/* Forcibly offline a page, including killing processes. */
static ssize_t
-store_hard_offline_page(struct class *class,
- struct class_attribute *attr,
+store_hard_offline_page(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -478,18 +470,18 @@ store_hard_offline_page(struct class *class,
return ret ? ret : count;
}
-static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
int err;
- err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_soft_offline_page.attr);
+ err = device_create_file(memory_subsys.dev_root,
+ &dev_attr_soft_offline_page);
if (!err)
- err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_hard_offline_page.attr);
+ err = device_create_file(memory_subsys.dev_root,
+ &dev_attr_hard_offline_page);
return err;
}
#else
@@ -509,31 +501,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
+/*
+ * A reference for the returned object is held and the reference for the
+ * hinted object is released.
+ */
struct memory_block *find_memory_block_hinted(struct mem_section *section,
struct memory_block *hint)
{
- struct kobject *kobj;
- struct sys_device *sysdev;
- struct memory_block *mem;
- char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
int block_id = base_memory_block_id(__section_nr(section));
+ struct device *hintdev = hint ? &hint->dev : NULL;
+ struct device *dev;
- kobj = hint ? &hint->sysdev.kobj : NULL;
-
- /*
- * This only works because we know that section == sysdev->id
- * slightly redundant with sysdev_register()
- */
- sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id);
-
- kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
- if (!kobj)
+ dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
+ if (hint)
+ put_device(&hint->dev);
+ if (!dev)
return NULL;
-
- sysdev = container_of(kobj, struct sys_device, kobj);
- mem = container_of(sysdev, struct memory_block, sysdev);
-
- return mem;
+ return container_of(dev, struct memory_block, dev);
}
/*
@@ -542,7 +526,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
* this gets to be a real problem, we can always use a radix
* tree or something here.
*
- * This could be made generic for all sysdev classes.
+ * This could be made generic for all device subsystems.
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
@@ -598,7 +582,7 @@ static int add_memory_section(int nid, struct mem_section *section,
mem = find_memory_block(section);
if (mem) {
mem->section_count++;
- kobject_put(&mem->sysdev.kobj);
+ kobject_put(&mem->dev.kobj);
} else
ret = init_memory_block(&mem, section, state);
@@ -631,7 +615,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
unregister_memory(mem);
kfree(mem);
} else
- kobject_put(&mem->sysdev.kobj);
+ kobject_put(&mem->dev.kobj);
mutex_unlock(&mem_sysfs_mutex);
return 0;
@@ -664,8 +648,7 @@ int __init memory_dev_init(void)
int err;
unsigned long block_sz;
- memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
- ret = sysdev_class_register(&memory_sysdev_class);
+ ret = subsys_system_register(&memory_subsys, NULL);
if (ret)
goto out;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 5693ecee9a40..44f427a66117 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -1,8 +1,7 @@
/*
- * drivers/base/node.c - basic Node class support
+ * Basic Node interface support
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -19,18 +18,16 @@
#include <linux/swap.h>
#include <linux/slab.h>
-static struct sysdev_class_attribute *node_state_attrs[];
-
-static struct sysdev_class node_class = {
+static struct bus_type node_subsys = {
.name = "node",
- .attrs = node_state_attrs,
+ .dev_name = "node",
};
-static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
{
struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
+ const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
int len;
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
@@ -44,23 +41,23 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
return len;
}
-static inline ssize_t node_read_cpumask(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpumask(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 0, buf);
}
-static inline ssize_t node_read_cpulist(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpulist(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 1, buf);
}
-static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
-static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
+static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
#define K(x) ((x) << (PAGE_SHIFT - 10))
-static ssize_t node_read_meminfo(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_meminfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n;
int nid = dev->id;
@@ -157,10 +154,10 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
}
#undef K
-static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
-static ssize_t node_read_numastat(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_numastat(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf,
"numa_hit %lu\n"
@@ -176,10 +173,10 @@ static ssize_t node_read_numastat(struct sys_device * dev,
node_page_state(dev->id, NUMA_LOCAL),
node_page_state(dev->id, NUMA_OTHER));
}
-static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
-static ssize_t node_read_vmstat(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t node_read_vmstat(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nid = dev->id;
int i;
@@ -191,10 +188,10 @@ static ssize_t node_read_vmstat(struct sys_device *dev,
return n;
}
-static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
-static ssize_t node_read_distance(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_distance(struct device *dev,
+ struct device_attribute *attr, char * buf)
{
int nid = dev->id;
int len = 0;
@@ -212,7 +209,7 @@ static ssize_t node_read_distance(struct sys_device * dev,
len += sprintf(buf + len, "\n");
return len;
}
-static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
#ifdef CONFIG_HUGETLBFS
/*
@@ -230,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
- node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+ node_state(node->dev.id, N_HIGH_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
@@ -266,17 +263,17 @@ int register_node(struct node *node, int num, struct node *parent)
{
int error;
- node->sysdev.id = num;
- node->sysdev.cls = &node_class;
- error = sysdev_register(&node->sysdev);
+ node->dev.id = num;
+ node->dev.bus = &node_subsys;
+ error = device_register(&node->dev);
if (!error){
- sysdev_create_file(&node->sysdev, &attr_cpumap);
- sysdev_create_file(&node->sysdev, &attr_cpulist);
- sysdev_create_file(&node->sysdev, &attr_meminfo);
- sysdev_create_file(&node->sysdev, &attr_numastat);
- sysdev_create_file(&node->sysdev, &attr_distance);
- sysdev_create_file(&node->sysdev, &attr_vmstat);
+ device_create_file(&node->dev, &dev_attr_cpumap);
+ device_create_file(&node->dev, &dev_attr_cpulist);
+ device_create_file(&node->dev, &dev_attr_meminfo);
+ device_create_file(&node->dev, &dev_attr_numastat);
+ device_create_file(&node->dev, &dev_attr_distance);
+ device_create_file(&node->dev, &dev_attr_vmstat);
scan_unevictable_register_node(node);
@@ -296,17 +293,17 @@ int register_node(struct node *node, int num, struct node *parent)
*/
void unregister_node(struct node *node)
{
- sysdev_remove_file(&node->sysdev, &attr_cpumap);
- sysdev_remove_file(&node->sysdev, &attr_cpulist);
- sysdev_remove_file(&node->sysdev, &attr_meminfo);
- sysdev_remove_file(&node->sysdev, &attr_numastat);
- sysdev_remove_file(&node->sysdev, &attr_distance);
- sysdev_remove_file(&node->sysdev, &attr_vmstat);
+ device_remove_file(&node->dev, &dev_attr_cpumap);
+ device_remove_file(&node->dev, &dev_attr_cpulist);
+ device_remove_file(&node->dev, &dev_attr_meminfo);
+ device_remove_file(&node->dev, &dev_attr_numastat);
+ device_remove_file(&node->dev, &dev_attr_distance);
+ device_remove_file(&node->dev, &dev_attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
- sysdev_unregister(&node->sysdev);
+ device_unregister(&node->dev);
}
struct node node_devices[MAX_NUMNODES];
@@ -317,41 +314,41 @@ struct node node_devices[MAX_NUMNODES];
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
int ret;
- struct sys_device *obj;
+ struct device *obj;
if (!node_online(nid))
return 0;
- obj = get_cpu_sysdev(cpu);
+ obj = get_cpu_device(cpu);
if (!obj)
return 0;
- ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ ret = sysfs_create_link(&node_devices[nid].dev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
- &node_devices[nid].sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ &node_devices[nid].dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- struct sys_device *obj;
+ struct device *obj;
if (!node_online(nid))
return 0;
- obj = get_cpu_sysdev(cpu);
+ obj = get_cpu_device(cpu);
if (!obj)
return 0;
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ sysfs_remove_link(&node_devices[nid].dev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ kobject_name(&node_devices[nid].dev.kobj));
return 0;
}
@@ -393,15 +390,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
- &mem_blk->sysdev.kobj,
- kobject_name(&mem_blk->sysdev.kobj));
+ ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+ &mem_blk->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
- return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
- &node_devices[nid].sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ &node_devices[nid].dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -434,10 +431,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (node_test_and_set(nid, *unlinked_nodes))
continue;
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
- kobject_name(&mem_blk->sysdev.kobj));
- sysfs_remove_link(&mem_blk->sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ sysfs_remove_link(&node_devices[nid].dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ sysfs_remove_link(&mem_blk->dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
NODEMASK_FREE(unlinked_nodes);
return 0;
@@ -468,7 +465,7 @@ static int link_mem_sections(int nid)
}
if (mem_blk)
- kobject_put(&mem_blk->sysdev.kobj);
+ kobject_put(&mem_blk->dev.kobj);
return err;
}
@@ -596,19 +593,19 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
}
struct node_attr {
- struct sysdev_class_attribute attr;
+ struct device_attribute attr;
enum node_states state;
};
-static ssize_t show_node_state(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_node_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct node_attr *na = container_of(attr, struct node_attr, attr);
return print_nodes_state(na->state, buf);
}
#define _NODE_ATTR(name, state) \
- { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
+ { __ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
_NODE_ATTR(possible, N_POSSIBLE),
@@ -620,17 +617,26 @@ static struct node_attr node_state_attr[] = {
#endif
};
-static struct sysdev_class_attribute *node_state_attrs[] = {
- &node_state_attr[0].attr,
- &node_state_attr[1].attr,
- &node_state_attr[2].attr,
- &node_state_attr[3].attr,
+static struct attribute *node_state_attrs[] = {
+ &node_state_attr[0].attr.attr,
+ &node_state_attr[1].attr.attr,
+ &node_state_attr[2].attr.attr,
+ &node_state_attr[3].attr.attr,
#ifdef CONFIG_HIGHMEM
- &node_state_attr[4].attr,
+ &node_state_attr[4].attr.attr,
#endif
NULL
};
+static struct attribute_group memory_root_attr_group = {
+ .attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
@@ -639,7 +645,7 @@ static int __init register_node_type(void)
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
- ret = sysdev_class_register(&node_class);
+ ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
if (!ret) {
hotplug_memory_notifier(node_memory_callback,
NODE_CALLBACK_PRI);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a7c06374062e..f0c605e99ade 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
return ret;
}
-int platform_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-void platform_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
return ret;
}
-int platform_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
return ret;
}
-int platform_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
return ret;
}
-int platform_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
return ret;
}
-int platform_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
return ret;
}
-int platform_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
return ret;
}
-int platform_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_HIBERNATE_CALLBACKS */
static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd17900..2e58ebb1f6c0 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7eba5a..978bbf7ac6af 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/suspend.h>
+#include <linux/export.h>
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
+({ \
+ type (*__routine)(struct device *__d); \
+ type __ret = (type)0; \
+ \
+ __routine = genpd->dev_ops.callback; \
+ if (__routine) { \
+ __ret = __routine(dev); \
+ } else { \
+ __routine = dev_gpd_data(dev)->ops.callback; \
+ if (__routine) \
+ __ret = __routine(dev); \
+ } \
+ __ret; \
+})
+
+#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
+({ \
+ ktime_t __start = ktime_get(); \
+ type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
+ s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
+ struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
+ if (__elapsed > __gpd_data->td.field) { \
+ __gpd_data->td.field = __elapsed; \
+ dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ __elapsed); \
+ } \
+ __retval; \
+})
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
#ifdef CONFIG_PM
-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
return pd_to_genpd(dev->pm_domain);
}
+static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
+ stop_latency_ns, "stop");
+}
+
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
+ start_latency_ns, "start");
+}
+
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
if (genpd->power_on) {
+ ktime_t time_start = ktime_get();
+ s64 elapsed_ns;
+
ret = genpd->power_on(genpd);
if (ret)
goto err;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_on_latency_ns) {
+ genpd->power_on_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-on latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
int ret = 0;
if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_suspend) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- ret = drv->pm->runtime_suspend(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ ret = genpd_save_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
if (!gpd_data->need_restore)
return;
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_resume) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- drv->pm->runtime_resume(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ genpd_restore_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
if (genpd->power_off) {
+ ktime_t time_start;
+ s64 elapsed_ns;
+
if (atomic_read(&genpd->sd_count) > 0) {
ret = -EBUSY;
goto out;
}
+ time_start = ktime_get();
+
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_set_active(genpd);
goto out;
}
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_off_latency_ns) {
+ genpd->power_off_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-off latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd->power_off_time = ktime_get();
+
+ /* Update PM QoS information for devices in the domain. */
+ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
+
+ pm_runtime_update_max_time_suspended(pdd->dev,
+ td->start_latency_ns +
+ td->restore_state_latency_ns +
+ genpd->power_on_latency_ns);
+ }
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
+ bool (*stop_ok)(struct device *__dev);
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
- if (genpd->stop_device) {
- int ret = genpd->stop_device(dev);
- if (ret)
- return ret;
- }
+ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
+ if (stop_ok && !stop_ok(dev))
+ return -EBUSY;
+
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_update_max_time_suspended(dev,
+ dev_gpd_data(dev)->td.start_latency_ns);
/*
* If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
mutex_unlock(&genpd->lock);
out:
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
return 0;
}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#ifdef CONFIG_PM_SLEEP
+static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
+}
+
+static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
+}
+
+static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
+}
+
+static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
+}
+
+static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
+}
+
+static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
+}
+
+static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
+}
+
+static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
+}
+
+static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
+}
+
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ active_wakeup = genpd_dev_active_wakeup(genpd, dev);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
* so pm_genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+ return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
}
/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_suspend_noirq(dev);
+ ret = genpd_suspend_late(genpd, dev);
if (ret)
return ret;
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_resume_noirq(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+ return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
}
/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+ return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
}
/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_freeze_noirq(dev);
+ ret = genpd_freeze_late(genpd, dev);
if (ret)
return ret;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
return 0;
}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_thaw_noirq(dev);
+ return genpd_thaw_early(genpd, dev);
}
/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Power off a device under the assumption that its pm_domain field points to
- * the domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late powering off of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->suspend_power_off)
- return 0;
-
- ret = pm_generic_poweroff_noirq(dev);
- if (ret)
- return ret;
-
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
- return 0;
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
-
- return 0;
+ return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
}
/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_restore_noirq(dev);
-}
-
-/**
- * pm_genpd_restore - Restore a device belonging to an I/O power domain.
- * @dev: Device to resume.
- *
- * Restore a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_restore(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_freeze_noirq NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
-#define pm_genpd_dev_poweroff_noirq NULL
-#define pm_genpd_dev_poweroff NULL
#define pm_genpd_restore_noirq NULL
-#define pm_genpd_restore NULL
#define pm_genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
/**
- * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
* @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
*/
-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ struct gpd_timing_data *td)
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
gpd_data->base.dev = dev;
gpd_data->need_restore = false;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ if (td)
+ gpd_data->td = *td;
out:
genpd_release_lock(genpd);
@@ -1280,6 +1320,219 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
+ * @dev: Device to add the callbacks to.
+ * @ops: Set of callbacks to add.
+ * @td: Timing data to add to the device along with the callbacks (optional).
+ */
+int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data && ops))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
+
+/**
+ * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
+ * @dev: Device to remove the callbacks from.
+ * @clear_td: If set, clear the device's timing data too.
+ */
+int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (clear_td)
+ gpd_data->td = (struct gpd_timing_data){ 0 };
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+
+/* Default device callbacks for generic PM domains. */
+
+/**
+ * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_save_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.save_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend)
+ return drv->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_restore_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.restore_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_resume)
+ return drv->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * pm_genpd_default_suspend - Default "device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
+
+ return cb ? cb(dev) : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+
+ return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+
+ return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume - Default "device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
+
+ return cb ? cb(dev) : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_default_freeze - Default "device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
+
+ return cb ? cb(dev) : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
+
+ return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
+
+ return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw - Default "device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
+
+ return cb ? cb(dev) : pm_generic_thaw(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define pm_genpd_default_suspend NULL
+#define pm_genpd_default_suspend_late NULL
+#define pm_genpd_default_resume_early NULL
+#define pm_genpd_default_resume NULL
+#define pm_genpd_default_freeze NULL
+#define pm_genpd_default_freeze_late NULL
+#define pm_genpd_default_thaw_early NULL
+#define pm_genpd_default_thaw NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1558,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
+ genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1571,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
- genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.poweroff = pm_genpd_suspend;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->dev_ops.save_state = pm_genpd_default_save_state;
+ genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+ genpd->dev_ops.suspend = pm_genpd_default_suspend;
+ genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
+ genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
+ genpd->dev_ops.resume = pm_genpd_default_resume;
+ genpd->dev_ops.freeze = pm_genpd_default_freeze;
+ genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
+ genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
+ genpd->dev_ops.thaw = pm_genpd_default_thaw;
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000000..66a265bf5867
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,170 @@
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+
+#ifdef CONFIG_PM_RUNTIME
+
+/**
+ * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * @dev: Device to check.
+ */
+bool default_stop_ok(struct device *dev)
+{
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
+ return true;
+
+ return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
+ && td->break_even_ns < dev->power.max_time_suspended_ns;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+ struct pm_domain_data *pdd;
+ s64 min_dev_off_time_ns;
+ s64 off_on_time_ns;
+ ktime_t time_now = ktime_get();
+
+ off_on_time_ns = genpd->power_off_latency_ns +
+ genpd->power_on_latency_ns;
+ /*
+ * It doesn't make sense to remove power from the domain if saving
+ * the state of all devices in it and the power off/power on operations
+ * take too much time.
+ *
+ * All devices in this domain have been stopped already at this point.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ if (pdd->dev->driver)
+ off_on_time_ns +=
+ to_gpd_data(pdd)->td.save_state_latency_ns;
+ }
+
+ /*
+ * Check if subdomains can be off for enough time.
+ *
+ * All subdomains have been powered off already at this point.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ struct generic_pm_domain *sd = link->slave;
+ s64 sd_max_off_ns = sd->max_off_time_ns;
+
+ if (sd_max_off_ns < 0)
+ continue;
+
+ sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
+ sd->power_off_time));
+ /*
+ * Check if the subdomain is allowed to be off long enough for
+ * the current domain to turn off and on (that's how much time
+ * it will have to wait worst case).
+ */
+ if (sd_max_off_ns <= off_on_time_ns)
+ return false;
+ }
+
+ /*
+ * Check if the devices in the domain can be off enough time.
+ */
+ min_dev_off_time_ns = -1;
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td;
+ struct device *dev = pdd->dev;
+ s64 dev_off_time_ns;
+
+ if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+ continue;
+
+ td = &to_gpd_data(pdd)->td;
+ dev_off_time_ns = dev->power.max_time_suspended_ns -
+ (td->start_latency_ns + td->restore_state_latency_ns +
+ ktime_to_ns(ktime_sub(time_now,
+ dev->power.suspend_time)));
+ if (dev_off_time_ns <= off_on_time_ns)
+ return false;
+
+ if (min_dev_off_time_ns > dev_off_time_ns
+ || min_dev_off_time_ns < 0)
+ min_dev_off_time_ns = dev_off_time_ns;
+ }
+
+ if (min_dev_off_time_ns < 0) {
+ /*
+ * There are no latency constraints, so the domain can spend
+ * arbitrary time in the "off" state.
+ */
+ genpd->max_off_time_ns = -1;
+ return true;
+ }
+
+ /*
+ * The difference between the computed minimum delta and the time needed
+ * to turn the domain on is the maximum theoretical time this domain can
+ * spend in the "off" state.
+ */
+ min_dev_off_time_ns -= genpd->power_on_latency_ns;
+
+ /*
+ * If the difference between the computed minimum delta and the time
+ * needed to turn the domain off and back on on is smaller than the
+ * domain's power break even time, removing power from the domain is not
+ * worth it.
+ */
+ if (genpd->break_even_ns >
+ min_dev_off_time_ns - genpd->power_off_latency_ns)
+ return false;
+
+ genpd->max_off_time_ns = min_dev_off_time_ns;
+ return true;
+}
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+ return false;
+}
+
+#else /* !CONFIG_PM_RUNTIME */
+
+bool default_stop_ok(struct device *dev)
+{
+ return false;
+}
+
+#define default_power_down_ok NULL
+#define always_on_power_down_ok NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+struct dev_power_governor simple_qos_governor = {
+ .stop_ok = default_stop_ok,
+ .power_down_ok = default_power_down_ok,
+};
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+ .power_down_ok = always_on_power_down_ok,
+ .stop_ok = default_stop_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee3b49e..10bdd793f0bd 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
- * If the device has not been suspended at run time, execute the
- * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
- * return its error code. Otherwise, return zero.
+ * Execute the PM callback corresponding to @event provided by the driver of
+ * @dev, if defined, and return its error code. Return 0 if the callback is
+ * not present.
*/
static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
- if (!pm || pm_runtime_suspended(dev))
+ if (!pm)
return 0;
switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
case PM_EVENT_HIBERNATE:
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
+ case PM_EVENT_RESUME:
+ callback = noirq ? pm->resume_noirq : pm->resume;
+ break;
case PM_EVENT_THAW:
callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
+ case PM_EVENT_RESTORE:
+ callback = noirq ? pm->restore_noirq : pm->restore;
+ break;
default:
callback = NULL;
break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw);
/**
- * __pm_generic_resume - Generic resume/restore callback for subsystems.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the resume/resotre callback provided by the @dev's driver, if
- * defined. If it returns 0, change the device's runtime PM status to 'active'.
- * Return the callback's error code.
- */
-static int __pm_generic_resume(struct device *dev, int event, bool noirq)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*callback)(struct device *);
- int ret;
-
- if (!pm)
- return 0;
-
- switch (event) {
- case PM_EVENT_RESUME:
- callback = noirq ? pm->resume_noirq : pm->resume;
- break;
- case PM_EVENT_RESTORE:
- callback = noirq ? pm->restore_noirq : pm->restore;
- break;
- default:
- callback = NULL;
- break;
- }
-
- if (!callback)
- return 0;
-
- ret = callback(dev);
- if (!ret && !noirq && pm_runtime_enabled(dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return ret;
-}
-
-/**
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, true);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
pm_runtime_idle(dev);
}
#endif /* CONFIG_PM_SLEEP */
-
-struct dev_pm_ops generic_subsys_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .prepare = pm_generic_prepare,
- .suspend = pm_generic_suspend,
- .suspend_noirq = pm_generic_suspend_noirq,
- .resume = pm_generic_resume,
- .resume_noirq = pm_generic_resume_noirq,
- .freeze = pm_generic_freeze,
- .freeze_noirq = pm_generic_freeze_noirq,
- .thaw = pm_generic_thaw,
- .thaw_noirq = pm_generic_thaw_noirq,
- .poweroff = pm_generic_poweroff,
- .poweroff_noirq = pm_generic_poweroff_noirq,
- .restore = pm_generic_restore,
- .restore_noirq = pm_generic_restore_noirq,
- .complete = pm_generic_complete,
-#endif
-#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = pm_generic_runtime_suspend,
- .runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
-#endif
-};
-EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfcf438d..e2cc3d2e0ecc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
#include "../base.h"
#include "power.h"
+typedef int (*pm_callback_t)(struct device *);
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
ktime_t calltime = ktime_set(0, 0);
if (initcall_debug) {
- pr_info("calling %s+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
}
/**
- * pm_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
-static int pm_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend) {
- error = ops->suspend(dev);
- suspend_report_result(ops->suspend, error);
- }
- break;
+ return ops->suspend;
case PM_EVENT_RESUME:
- if (ops->resume) {
- error = ops->resume(dev);
- suspend_report_result(ops->resume, error);
- }
- break;
+ return ops->resume;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze) {
- error = ops->freeze(dev);
- suspend_report_result(ops->freeze, error);
- }
- break;
+ return ops->freeze;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff) {
- error = ops->poweroff(dev);
- suspend_report_result(ops->poweroff, error);
- }
- break;
+ return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw) {
- error = ops->thaw(dev);
- suspend_report_result(ops->thaw, error);
- }
+ return ops->thaw;
break;
case PM_EVENT_RESTORE:
- if (ops->restore) {
- error = ops->restore(dev);
- suspend_report_result(ops->restore, error);
- }
- break;
+ return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
}
- initcall_debug_report(dev, calltime, error);
-
- return error;
+ return NULL;
}
/**
- * pm_noirq_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int pm_noirq_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime = ktime_set(0, 0), delta, rettime;
-
- if (initcall_debug) {
- pr_info("calling %s+ @ %i, parent: %s\n",
- dev_name(dev), task_pid_nr(current),
- dev->parent ? dev_name(dev->parent) : "none");
- calltime = ktime_get();
- }
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend_noirq) {
- error = ops->suspend_noirq(dev);
- suspend_report_result(ops->suspend_noirq, error);
- }
- break;
+ return ops->suspend_noirq;
case PM_EVENT_RESUME:
- if (ops->resume_noirq) {
- error = ops->resume_noirq(dev);
- suspend_report_result(ops->resume_noirq, error);
- }
- break;
+ return ops->resume_noirq;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze_noirq) {
- error = ops->freeze_noirq(dev);
- suspend_report_result(ops->freeze_noirq, error);
- }
- break;
+ return ops->freeze_noirq;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff_noirq) {
- error = ops->poweroff_noirq(dev);
- suspend_report_result(ops->poweroff_noirq, error);
- }
- break;
+ return ops->poweroff_noirq;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw_noirq) {
- error = ops->thaw_noirq(dev);
- suspend_report_result(ops->thaw_noirq, error);
- }
- break;
+ return ops->thaw_noirq;
case PM_EVENT_RESTORE:
- if (ops->restore_noirq) {
- error = ops->restore_noirq(dev);
- suspend_report_result(ops->restore_noirq, error);
- }
- break;
+ return ops->restore_noirq;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
- }
-
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- printk("initcall %s_i+ returned %d after %Ld usecs\n",
- dev_name(dev), error,
- (unsigned long long)ktime_to_ns(delta) >> 10);
}
- return error;
+ return NULL;
}
static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+ pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ int error;
+
+ if (!cb)
+ return 0;
+
+ calltime = initcall_debug_start(dev);
+
+ pm_dev_dbg(dev, state, info);
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+ info = "EARLY power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "EARLY type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
+ info = "EARLY type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "EARLY class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
+ info = "EARLY class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "EARLY ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
+ info = "EARLY bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "EARLY driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
TRACE_RESUME(error);
return error;
}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * legacy_resume - Execute a legacy (bus or class) resume callback for device.
- * @dev: Device to resume.
- * @cb: Resume callback to execute.
- */
-static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
-{
- int error;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
- error = cb(dev);
- suspend_report_result(cb, error);
-
- initcall_debug_report(dev, calltime, error);
-
- return error;
-}
-
-/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
*/
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
bool put = false;
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
put = true;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Driver;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Driver;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
} else if (dev->class->resume) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_resume(dev, dev->class->resume);
+ info = "legacy class ";
+ callback = dev->class->resume;
goto End;
}
}
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->resume) {
- pm_dev_dbg(dev, state, "legacy ");
- error = legacy_resume(dev, dev->bus->resume);
+ info = "legacy bus ";
+ callback = dev->bus->resume;
+ goto End;
}
}
+ Driver:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
End:
+ error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
*/
static void device_complete(struct device *dev, pm_message_t state)
{
+ void (*callback)(struct device *) = NULL;
+ char *info = NULL;
+
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pm_domain->ops.complete)
- dev->pm_domain->ops.complete(dev);
+ info = "completing power domain ";
+ callback = dev->pm_domain->ops.complete;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "completing type ");
- if (dev->type->pm->complete)
- dev->type->pm->complete(dev);
+ info = "completing type ";
+ callback = dev->type->pm->complete;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "completing class ");
- if (dev->class->pm->complete)
- dev->class->pm->complete(dev);
+ info = "completing class ";
+ callback = dev->class->pm->complete;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "completing ");
- if (dev->bus->pm->complete)
- dev->bus->pm->complete(dev);
+ info = "completing bus ";
+ callback = dev->bus->pm->complete;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "completing driver ";
+ callback = dev->driver->pm->complete;
+ }
+
+ if (callback) {
+ pm_dev_dbg(dev, state, info);
+ callback(dev);
}
device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
- int error;
+ pm_callback_t callback = NULL;
+ char *info = NULL;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
- if (error)
- return error;
+ info = "LATE power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "LATE type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
- if (error)
- return error;
+ info = "LATE type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "LATE class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
- if (error)
- return error;
+ info = "LATE class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "LATE ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
- if (error)
- return error;
+ info = "LATE bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
- return 0;
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "LATE driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ return dpm_run_callback(callback, dev, state, info);
}
/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
*/
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Run;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Run;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
- pm_dev_dbg(dev, state, "legacy ");
+ pm_dev_dbg(dev, state, "legacy bus ");
error = legacy_suspend(dev, state, dev->bus->suspend);
+ goto End;
}
}
+ Run:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
End:
if (!error) {
dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
+ int (*callback)(struct device *) = NULL;
+ char *info = NULL;
int error = 0;
device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev->power.wakeup_path = device_may_wakeup(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pm_domain->ops.prepare)
- error = dev->pm_domain->ops.prepare(dev);
- suspend_report_result(dev->pm_domain->ops.prepare, error);
- if (error)
- goto End;
+ info = "preparing power domain ";
+ callback = dev->pm_domain->ops.prepare;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "preparing type ");
- if (dev->type->pm->prepare)
- error = dev->type->pm->prepare(dev);
- suspend_report_result(dev->type->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing type ";
+ callback = dev->type->pm->prepare;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "preparing class ");
- if (dev->class->pm->prepare)
- error = dev->class->pm->prepare(dev);
- suspend_report_result(dev->class->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing class ";
+ callback = dev->class->pm->prepare;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "preparing ");
- if (dev->bus->pm->prepare)
- error = dev->bus->pm->prepare(dev);
- suspend_report_result(dev->bus->pm->prepare, error);
+ info = "preparing bus ";
+ callback = dev->bus->pm->prepare;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "preparing driver ";
+ callback = dev->driver->pm->prepare;
+ }
+
+ if (callback) {
+ error = callback(dev);
+ suspend_report_result(callback, error);
}
- End:
device_unlock(dev);
return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c50fc41..c5d358837461 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
- * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c = dev->power.constraints;
+
+ return c ? pm_qos_read_value(c) : 0;
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
*/
s32 dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c;
unsigned long flags;
- s32 ret = 0;
+ s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
-
- c = dev->power.constraints;
- if (c)
- ret = pm_qos_read_value(c);
-
+ ret = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
@@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value)
+{
+ struct device *ancestor = dev->parent;
+ int error = -ENODEV;
+
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ if (ancestor)
+ error = dev_pm_qos_add_request(ancestor, req, value);
+
+ if (error)
+ req->dev = NULL;
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443bca8f..541f821d4ea6 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_idle;
+
if (callback)
__rpm_callback(callback, dev);
@@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
return retval != -EACCES ? retval : -EIO;
}
+struct rpm_qos_data {
+ ktime_t time_now;
+ s64 constraint_ns;
+};
+
+/**
+ * rpm_update_qos_constraint - Update a given PM QoS constraint data.
+ * @dev: Device whose timing data to use.
+ * @data: PM QoS constraint data to update.
+ *
+ * Use the suspend timing data of @dev to update PM QoS constraint data pointed
+ * to by @data.
+ */
+static int rpm_update_qos_constraint(struct device *dev, void *data)
+{
+ struct rpm_qos_data *qos = data;
+ unsigned long flags;
+ s64 delta_ns;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.max_time_suspended_ns < 0)
+ goto out;
+
+ delta_ns = dev->power.max_time_suspended_ns -
+ ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
+ if (delta_ns <= 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
+ qos->constraint_ns = delta_ns;
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
@@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
struct device *parent = NULL;
+ struct rpm_qos_data qos;
int retval;
trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ qos.constraint_ns = __dev_pm_qos_read_value(dev);
+ if (qos.constraint_ns < 0) {
+ /* Negative constraint means "never suspend". */
+ retval = -EPERM;
+ goto out;
+ }
+ qos.constraint_ns *= NSEC_PER_USEC;
+ qos.time_now = ktime_get();
+
__update_runtime_status(dev, RPM_SUSPENDING);
+ if (!dev->power.ignore_children) {
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = device_for_each_child(dev, &qos,
+ rpm_update_qos_constraint);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ if (retval)
+ goto fail;
+ }
+
+ dev->power.suspend_time = qos.time_now;
+ dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
+
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_suspend;
+
retval = rpm_callback(callback, dev);
- if (retval) {
- __update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = false;
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ if (retval)
+ goto fail;
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
- wake_up_all(&dev->power.wait_queue);
- goto out;
- }
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
+
+ fail:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+ dev->power.deferred_resume = false;
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ wake_up_all(&dev->power.wait_queue);
+ goto out;
}
/**
@@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pm_domain)
@@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_resume;
+
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
(unsigned long)dev);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put_sync(dev->parent);
}
+
+/**
+ * pm_runtime_update_max_time_suspended - Update device's suspend time data.
+ * @dev: Device to handle.
+ * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
+ *
+ * Update the device's power.max_time_suspended_ns field by subtracting
+ * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
+ * never negative.
+ */
+void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
+ if (dev->power.max_time_suspended_ns > delta_ns)
+ dev->power.max_time_suspended_ns -= delta_ns;
+ else
+ dev->power.max_time_suspended_ns = 0;
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 2fc6a66f39a4..0f6c7fb418e8 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -13,3 +13,6 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+
+config REGMAP_IRQ
+ bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0573c8a9dacb..defd57963c84 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 348ff02eb93e..1a02b7537c8b 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -74,6 +74,7 @@ struct regmap {
struct reg_default *reg_defaults;
const void *reg_defaults_raw;
void *cache;
+ bool cache_dirty;
};
struct regcache_ops {
@@ -105,7 +106,7 @@ static inline void regmap_debugfs_exit(struct regmap *map) { }
#endif
/* regcache core declarations */
-int regcache_init(struct regmap *map);
+int regcache_init(struct regmap *map, const struct regmap_config *config);
void regcache_exit(struct regmap *map);
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value);
@@ -118,10 +119,7 @@ unsigned int regcache_get_val(const void *base, unsigned int idx,
bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
- unsigned int val);
-extern struct regcache_ops regcache_indexed_ops;
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c
deleted file mode 100644
index 507731ad8ec1..000000000000
--- a/drivers/base/regmap/regcache-indexed.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Register cache access API - indexed caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-
-#include "internal.h"
-
-static int regcache_indexed_read(struct regmap *map, unsigned int reg,
- unsigned int *value)
-{
- int ret;
-
- ret = regcache_lookup_reg(map, reg);
- if (ret >= 0)
- *value = map->reg_defaults[ret].def;
-
- return ret;
-}
-
-static int regcache_indexed_write(struct regmap *map, unsigned int reg,
- unsigned int value)
-{
- int ret;
-
- ret = regcache_lookup_reg(map, reg);
- if (ret < 0)
- return regcache_insert_reg(map, reg, value);
- map->reg_defaults[ret].def = value;
- return 0;
-}
-
-static int regcache_indexed_sync(struct regmap *map)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = _regmap_write(map, map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret < 0)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- }
- return 0;
-}
-
-struct regcache_ops regcache_indexed_ops = {
- .type = REGCACHE_INDEXED,
- .name = "indexed",
- .read = regcache_indexed_read,
- .write = regcache_indexed_write,
- .sync = regcache_indexed_sync
-};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 066aeece3626..b7d16143edeb 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -15,6 +15,8 @@
#include "internal.h"
+static int regcache_lzo_exit(struct regmap *map);
+
struct regcache_lzo_ctx {
void *wmem;
void *dst;
@@ -27,7 +29,7 @@ struct regcache_lzo_ctx {
};
#define LZO_BLOCK_NUM 8
-static int regcache_lzo_block_count(void)
+static int regcache_lzo_block_count(struct regmap *map)
{
return LZO_BLOCK_NUM;
}
@@ -106,19 +108,22 @@ static inline int regcache_lzo_get_blkindex(struct regmap *map,
unsigned int reg)
{
return (reg * map->cache_word_size) /
- DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+ DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
}
static inline int regcache_lzo_get_blkpos(struct regmap *map,
unsigned int reg)
{
- return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
+ return reg % (DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map)) /
map->cache_word_size);
}
static inline int regcache_lzo_get_blksize(struct regmap *map)
{
- return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+ return DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
}
static int regcache_lzo_init(struct regmap *map)
@@ -131,7 +136,7 @@ static int regcache_lzo_init(struct regmap *map)
ret = 0;
- blkcount = regcache_lzo_block_count();
+ blkcount = regcache_lzo_block_count(map);
map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
GFP_KERNEL);
if (!map->cache)
@@ -190,7 +195,7 @@ static int regcache_lzo_init(struct regmap *map)
return 0;
err:
- regcache_exit(map);
+ regcache_lzo_exit(map);
return ret;
}
@@ -203,7 +208,7 @@ static int regcache_lzo_exit(struct regmap *map)
if (!lzo_blocks)
return 0;
- blkcount = regcache_lzo_block_count();
+ blkcount = regcache_lzo_block_count(map);
/*
* the pointer to the bitmap used for syncing the cache
* is shared amongst all lzo_blocks. Ensure it is freed
@@ -351,7 +356,7 @@ static int regcache_lzo_sync(struct regmap *map)
}
struct regcache_ops regcache_lzo_ops = {
- .type = REGCACHE_LZO,
+ .type = REGCACHE_COMPRESSED,
.name = "lzo",
.init = regcache_lzo_init,
.exit = regcache_lzo_exit,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e31498499b0f..32620c4f1683 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -11,12 +11,15 @@
*/
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <linux/rbtree.h>
+#include <linux/seq_file.h>
#include "internal.h"
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value);
+static int regcache_rbtree_exit(struct regmap *map);
struct regcache_rbtree_node {
/* the actual rbtree node holding this block */
@@ -124,6 +127,60 @@ static int regcache_rbtree_insert(struct rb_root *root,
return 1;
}
+#ifdef CONFIG_DEBUG_FS
+static int rbtree_show(struct seq_file *s, void *ignored)
+{
+ struct regmap *map = s->private;
+ struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+ struct regcache_rbtree_node *n;
+ struct rb_node *node;
+ unsigned int base, top;
+ int nodes = 0;
+ int registers = 0;
+
+ mutex_lock(&map->lock);
+
+ for (node = rb_first(&rbtree_ctx->root); node != NULL;
+ node = rb_next(node)) {
+ n = container_of(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(n, &base, &top);
+ seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
+
+ nodes++;
+ registers += top - base + 1;
+ }
+
+ seq_printf(s, "%d nodes, %d registers, average %d registers\n",
+ nodes, registers, registers / nodes);
+
+ mutex_unlock(&map->lock);
+
+ return 0;
+}
+
+static int rbtree_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rbtree_show, inode->i_private);
+}
+
+static const struct file_operations rbtree_fops = {
+ .open = rbtree_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void rbtree_debugfs_init(struct regmap *map)
+{
+ debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
+}
+#else
+static void rbtree_debugfs_init(struct regmap *map)
+{
+}
+#endif
+
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
@@ -146,10 +203,12 @@ static int regcache_rbtree_init(struct regmap *map)
goto err;
}
+ rbtree_debugfs_init(map);
+
return 0;
err:
- regcache_exit(map);
+ regcache_rbtree_exit(map);
return ret;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 666f6f5011dc..1ead66186b7c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -19,7 +19,6 @@
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
- &regcache_indexed_ops,
&regcache_rbtree_ops,
&regcache_lzo_ops,
};
@@ -61,8 +60,10 @@ static int regcache_hw_init(struct regmap *map)
map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
GFP_KERNEL);
- if (!map->reg_defaults)
- return -ENOMEM;
+ if (!map->reg_defaults) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
/* fill the reg_defaults */
map->num_reg_defaults = count;
@@ -77,9 +78,15 @@ static int regcache_hw_init(struct regmap *map)
}
return 0;
+
+err_free:
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
}
-int regcache_init(struct regmap *map)
+int regcache_init(struct regmap *map, const struct regmap_config *config)
{
int ret;
int i;
@@ -100,6 +107,12 @@ int regcache_init(struct regmap *map)
return -EINVAL;
}
+ map->num_reg_defaults = config->num_reg_defaults;
+ map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+ map->reg_defaults_raw = config->reg_defaults_raw;
+ map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -112,10 +125,10 @@ int regcache_init(struct regmap *map)
* won't vanish from under us. We'll need to make
* a copy of it.
*/
- if (map->reg_defaults) {
+ if (config->reg_defaults) {
if (!map->num_reg_defaults)
return -EINVAL;
- tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+ tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
sizeof(struct reg_default), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
@@ -136,9 +149,18 @@ int regcache_init(struct regmap *map)
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
- return map->cache_ops->init(map);
+ ret = map->cache_ops->init(map);
+ if (ret)
+ goto err_free;
}
return 0;
+
+err_free:
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
}
void regcache_exit(struct regmap *map)
@@ -171,16 +193,21 @@ void regcache_exit(struct regmap *map)
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
+ int ret;
+
if (map->cache_type == REGCACHE_NONE)
return -ENOSYS;
BUG_ON(!map->cache_ops);
- if (!regmap_readable(map, reg))
- return -EIO;
+ if (!regmap_volatile(map, reg)) {
+ ret = map->cache_ops->read(map, reg, value);
- if (!regmap_volatile(map, reg))
- return map->cache_ops->read(map, reg, value);
+ if (ret == 0)
+ trace_regmap_reg_read_cache(map->dev, reg, *value);
+
+ return ret;
+ }
return -EINVAL;
}
@@ -241,6 +268,8 @@ int regcache_sync(struct regmap *map)
map->cache_ops->name);
name = map->cache_ops->name;
trace_regcache_sync(map->dev, name, "start");
+ if (!map->cache_dirty)
+ goto out;
if (map->cache_ops->sync) {
ret = map->cache_ops->sync(map);
} else {
@@ -291,6 +320,23 @@ void regcache_cache_only(struct regmap *map, bool enable)
EXPORT_SYMBOL_GPL(regcache_cache_only);
/**
+ * regcache_mark_dirty: Mark the register cache as dirty
+ *
+ * @map: map to mark
+ *
+ * Mark the register cache as dirty, for example due to the device
+ * having been powered down for suspend. If the cache is not marked
+ * as dirty then the cache sync will be suppressed.
+ */
+void regcache_mark_dirty(struct regmap *map)
+{
+ mutex_lock(&map->lock);
+ map->cache_dirty = true;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_mark_dirty);
+
+/**
* regcache_cache_bypass: Put a register map into cache bypass mode
*
* @map: map to configure
@@ -381,22 +427,3 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
else
return -ENOENT;
}
-
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
- unsigned int val)
-{
- void *tmp;
-
- tmp = krealloc(map->reg_defaults,
- (map->num_reg_defaults + 1) * sizeof(struct reg_default),
- GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- map->reg_defaults = tmp;
- map->num_reg_defaults++;
- map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
- map->reg_defaults[map->num_reg_defaults - 1].def = val;
- sort(map->reg_defaults, map->num_reg_defaults,
- sizeof(struct reg_default), regcache_default_cmp, NULL);
- return 0;
-}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
new file mode 100644
index 000000000000..428836fc5835
--- /dev/null
+++ b/drivers/base/regmap/regmap-irq.c
@@ -0,0 +1,302 @@
+/*
+ * regmap based irq_chip
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_irq_chip_data {
+ struct mutex lock;
+
+ struct regmap *map;
+ struct regmap_irq_chip *chip;
+
+ int irq_base;
+
+ void *status_reg_buf;
+ unsigned int *status_buf;
+ unsigned int *mask_buf;
+ unsigned int *mask_buf_def;
+};
+
+static inline const
+struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
+ int irq)
+{
+ return &data->chip->irqs[irq - data->irq_base];
+}
+
+static void regmap_irq_lock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->lock);
+}
+
+static void regmap_irq_sync_unlock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ int i, ret;
+
+ /*
+ * If there's been a change in the mask write it back to the
+ * hardware. We rely on the use of the regmap core cache to
+ * suppress pointless writes.
+ */
+ for (i = 0; i < d->chip->num_regs; i++) {
+ ret = regmap_update_bits(d->map, d->chip->mask_base + i,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ d->chip->mask_base + i);
+ }
+
+ mutex_unlock(&d->lock);
+}
+
+static void regmap_irq_enable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+ d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
+}
+
+static void regmap_irq_disable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+ d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
+}
+
+static struct irq_chip regmap_irq_chip = {
+ .name = "regmap",
+ .irq_bus_lock = regmap_irq_lock,
+ .irq_bus_sync_unlock = regmap_irq_sync_unlock,
+ .irq_disable = regmap_irq_disable,
+ .irq_enable = regmap_irq_enable,
+};
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+ bool handled = false;
+
+ ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
+ chip->num_regs);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowleding the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ return IRQ_NONE;
+ }
+
+ data->status_buf[i] &= ~data->mask_buf[i];
+
+ if (data->status_buf[i] && chip->ack_base) {
+ ret = regmap_write(map, chip->ack_base + i,
+ data->status_buf[i]);
+ if (ret != 0)
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ chip->ack_base + i, ret);
+ }
+ }
+
+ for (i = 0; i < chip->num_irqs; i++) {
+ if (data->status_buf[chip->irqs[i].reg_offset] &
+ chip->irqs[i].mask) {
+ handle_nested_irq(data->irq_base + i);
+ handled = true;
+ }
+ }
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+/**
+ * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
+ *
+ * map: The regmap for the device.
+ * irq: The IRQ the device uses to signal interrupts
+ * irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * chip: Configuration for the interrupt controller.
+ * data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * In order for this to be efficient the chip really should use a
+ * register cache. The chip driver is responsible for restoring the
+ * register values used by the IRQ controller over suspend and resume.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data *d;
+ int cur_irq, i;
+ int ret = -ENOMEM;
+
+ irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+ if (irq_base < 0) {
+ dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ return irq_base;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->status_buf)
+ goto err_alloc;
+
+ d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+
+ d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->mask_buf)
+ goto err_alloc;
+
+ d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->mask_buf_def)
+ goto err_alloc;
+
+ d->map = map;
+ d->chip = chip;
+ d->irq_base = irq_base;
+ mutex_init(&d->lock);
+
+ for (i = 0; i < chip->num_irqs; i++)
+ d->mask_buf_def[chip->irqs[i].reg_offset]
+ |= chip->irqs[i].mask;
+
+ /* Mask all the interrupts by default */
+ for (i = 0; i < chip->num_regs; i++) {
+ d->mask_buf[i] = d->mask_buf_def[i];
+ ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ chip->mask_base + i, ret);
+ goto err_alloc;
+ }
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = irq_base;
+ cur_irq < chip->num_irqs + irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, d);
+ irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+ chip->name, d);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
+ goto err_alloc;
+ }
+
+ return 0;
+
+err_alloc:
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ kfree(d);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
+
+/**
+ * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
+ *
+ * @irq: Primary IRQ for the device
+ * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+{
+ if (!d)
+ return;
+
+ free_irq(irq, d);
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ kfree(d);
+}
+EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+
+/**
+ * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
+ *
+ * Useful for drivers to request their own IRQs.
+ *
+ * @data: regmap_irq controller to operate on.
+ */
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
+{
+ return data->irq_base;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bf441db1ee90..be10a4ff6609 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -64,6 +64,18 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (!regmap_volatile(map, reg + i))
+ return false;
+
+ return true;
+}
+
static void regmap_format_4_12_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -78,6 +90,16 @@ static void regmap_format_7_9_write(struct regmap *map,
*out = cpu_to_be16((reg << 9) | val);
}
+static void regmap_format_10_14_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = (val >> 8) | (reg << 6);
+ out[0] = reg >> 2;
+}
+
static void regmap_format_8(void *buf, unsigned int val)
{
u8 *b = buf;
@@ -127,7 +149,7 @@ struct regmap *regmap_init(struct device *dev,
int ret = -EINVAL;
if (!bus || !config)
- return NULL;
+ goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
@@ -147,12 +169,6 @@ struct regmap *regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->reg_defaults = config->reg_defaults;
- map->num_reg_defaults = config->num_reg_defaults;
- map->num_reg_defaults_raw = config->num_reg_defaults_raw;
- map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw;
- map->cache_word_size = config->val_bits / 8;
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
@@ -182,6 +198,16 @@ struct regmap *regmap_init(struct device *dev,
}
break;
+ case 10:
+ switch (config->val_bits) {
+ case 14:
+ map->format.format_write = regmap_format_10_14_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
case 8:
map->format.format_reg = regmap_format_8;
break;
@@ -215,14 +241,16 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
- ret = regcache_init(map);
- if (ret < 0)
- goto err_map;
-
regmap_debugfs_init(map);
+ ret = regcache_init(map, config);
+ if (ret < 0)
+ goto err_free_workbuf;
+
return map;
+err_free_workbuf:
+ kfree(map->work_buf);
err_map:
kfree(map);
err:
@@ -231,6 +259,39 @@ err:
EXPORT_SYMBOL_GPL(regmap_init);
/**
+ * regmap_reinit_cache(): Reinitialise the current register cache
+ *
+ * @map: Register map to operate on.
+ * @config: New configuration. Only the cache data will be used.
+ *
+ * Discard any existing register cache for the map and initialize a
+ * new cache. This can be used to restore the cache to defaults or to
+ * update the cache configuration to reflect runtime discovery of the
+ * hardware.
+ */
+int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ regcache_exit(map);
+
+ map->max_register = config->max_register;
+ map->writeable_reg = config->writeable_reg;
+ map->readable_reg = config->readable_reg;
+ map->volatile_reg = config->volatile_reg;
+ map->precious_reg = config->precious_reg;
+ map->cache_type = config->cache_type;
+
+ ret = regcache_init(map, config);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+
+/**
* regmap_exit(): Free a previously allocated register map
*/
void regmap_exit(struct regmap *map)
@@ -306,8 +367,10 @@ int _regmap_write(struct regmap *map, unsigned int reg,
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
- if (map->cache_only)
+ if (map->cache_only) {
+ map->cache_dirty = true;
return 0;
+ }
}
trace_regmap_reg_write(map->dev, reg, val);
@@ -375,9 +438,11 @@ EXPORT_SYMBOL_GPL(regmap_write);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
+ size_t val_count = val_len / map->format.val_bytes;
int ret;
- WARN_ON(map->cache_type != REGCACHE_NONE);
+ WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+ map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -422,15 +487,15 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
{
int ret;
- if (!map->format.parse_val)
- return -EINVAL;
-
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
if (ret == 0)
return 0;
}
+ if (!map->format.parse_val)
+ return -EINVAL;
+
if (map->cache_only)
return -EBUSY;
@@ -481,15 +546,11 @@ EXPORT_SYMBOL_GPL(regmap_read);
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t val_len)
{
+ size_t val_count = val_len / map->format.val_bytes;
int ret;
- int i;
- bool vol = true;
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_volatile(map, reg + i))
- vol = false;
-
- WARN_ON(!vol && map->cache_type != REGCACHE_NONE);
+ WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+ map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -517,16 +578,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
{
int ret, i;
size_t val_bytes = map->format.val_bytes;
- bool vol = true;
+ bool vol = regmap_volatile_range(map, reg, val_count);
if (!map->format.parse_val)
return -EINVAL;
- /* Is this a block of volatile registers? */
- for (i = 0; i < val_count; i++)
- if (!regmap_volatile(map, reg + i))
- vol = false;
-
if (vol || map->cache_type == REGCACHE_NONE) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
@@ -546,40 +602,73 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
-/**
- * remap_update_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
{
int ret;
- unsigned int tmp;
+ unsigned int tmp, orig;
mutex_lock(&map->lock);
- ret = _regmap_read(map, reg, &tmp);
+ ret = _regmap_read(map, reg, &orig);
if (ret != 0)
goto out;
- tmp &= ~mask;
+ tmp = orig & ~mask;
tmp |= val & mask;
- ret = _regmap_write(map, reg, tmp);
+ if (tmp != orig) {
+ ret = _regmap_write(map, reg, tmp);
+ *change = true;
+ } else {
+ *change = false;
+ }
out:
mutex_unlock(&map->lock);
return ret;
}
+
+/**
+ * regmap_update_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ bool change;
+ return _regmap_update_bits(map, reg, mask, val, &change);
+}
EXPORT_SYMBOL_GPL(regmap_update_bits);
+/**
+ * regmap_update_bits_check: Perform a read/modify/write cycle on the
+ * register map and report if updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return _regmap_update_bits(map, reg, mask, val, change);
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index f6f37a05a0c3..ae989c57cd5e 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
@@ -32,14 +31,14 @@
#include <linux/topology.h>
#define define_one_ro_named(_name, _func) \
-static SYSDEV_ATTR(_name, 0444, _func, NULL)
+ static DEVICE_ATTR(_name, 0444, _func, NULL)
#define define_one_ro(_name) \
-static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
+ static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
#define define_id_show_func(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
@@ -65,16 +64,16 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
#ifdef arch_provides_topology_pointers
#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(0, topology_##name(cpu), buf); \
}
#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##name##_list(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
@@ -83,15 +82,15 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
#else
#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
return show_cpumap(0, topology_##name(dev->id), buf); \
}
#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##name##_list(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
return show_cpumap(1, topology_##name(dev->id), buf); \
@@ -124,16 +123,16 @@ define_one_ro_named(book_siblings_list, show_book_cpumask_list);
#endif
static struct attribute *default_attrs[] = {
- &attr_physical_package_id.attr,
- &attr_core_id.attr,
- &attr_thread_siblings.attr,
- &attr_thread_siblings_list.attr,
- &attr_core_siblings.attr,
- &attr_core_siblings_list.attr,
+ &dev_attr_physical_package_id.attr,
+ &dev_attr_core_id.attr,
+ &dev_attr_thread_siblings.attr,
+ &dev_attr_thread_siblings_list.attr,
+ &dev_attr_core_siblings.attr,
+ &dev_attr_core_siblings_list.attr,
#ifdef CONFIG_SCHED_BOOK
- &attr_book_id.attr,
- &attr_book_siblings.attr,
- &attr_book_siblings_list.attr,
+ &dev_attr_book_id.attr,
+ &dev_attr_book_siblings.attr,
+ &dev_attr_book_siblings_list.attr,
#endif
NULL
};
@@ -146,16 +145,16 @@ static struct attribute_group topology_attr_group = {
/* Add/Remove cpu_topology interface for CPU device */
static int __cpuinit topology_add_dev(unsigned int cpu)
{
- struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+ struct device *dev = get_cpu_device(cpu);
- return sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
+ return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
static void __cpuinit topology_remove_dev(unsigned int cpu)
{
- struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+ struct device *dev = get_cpu_device(cpu);
- sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
+ sysfs_remove_group(&dev->kobj, &topology_attr_group);
}
static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 30a3085d3354..0def898a1d15 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -18,6 +18,10 @@ void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus);
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 1b51d8b7ac80..f59244e33971 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pr_debug("Switched to core: 0x%X\n", core->id.id);
}
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
{
+ switch (core->id.id) {
+ case BCMA_CORE_CHIPCOMMON:
+ return 3 * BCMA_CORE_SIZE;
+ case BCMA_CORE_PCIE:
+ return 2 * BCMA_CORE_SIZE;
+ }
+
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
+ return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread8(core->bus->mmio + offset);
}
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread16(core->bus->mmio + offset);
}
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread32(core->bus->mmio + offset);
}
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
u8 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite8(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
u16 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite16(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
u32 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite32(value, core->bus->mmio + offset);
}
@@ -224,6 +234,35 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bcma_bus *bus = pci_get_drvdata(pdev);
+
+ bus->mapped_core = NULL;
+
+ return bcma_bus_suspend(bus);
+}
+
+static int bcma_host_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bcma_bus *bus = pci_get_drvdata(pdev);
+
+ return bcma_bus_resume(bus);
+}
+
+static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
+ bcma_host_pci_resume);
+#define BCMA_PM_OPS (&bcma_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define BCMA_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
@@ -239,6 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
+ .driver.pm = BCMA_PM_OPS,
};
int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 70c84b951098..febbc0a1222a 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -240,6 +240,46 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return 0;
}
+#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ list_for_each_entry(core, &bus->cores, list) {
+ struct device_driver *drv = core->dev.driver;
+ if (drv) {
+ struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ if (adrv->suspend)
+ adrv->suspend(core);
+ }
+ }
+ return 0;
+}
+
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.setup_done = false;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ list_for_each_entry(core, &bus->cores, list) {
+ struct device_driver *drv = core->dev.driver;
+ if (drv) {
+ struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ if (adrv->resume)
+ adrv->resume(core);
+ }
+ }
+
+ return 0;
+}
+#endif
+
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index d7292390d236..6f230fb087c5 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
u16 v;
int i;
+ bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+ SSB_SPROM_REVISION_REV;
+
for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
+ bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+ bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+ bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+ bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+
+ bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+ bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+ bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+ bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+
+ bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+ bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+ bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+ bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+
+ bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+ bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+ bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+ bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+
bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+
+ bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+
+ bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
}
int bcma_sprom_get(struct bcma_bus *bus)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 6f07ec1c2f58..4e4c8a4a5fd3 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -116,6 +116,8 @@ config PARIDE
source "drivers/block/paride/Kconfig"
+source "drivers/block/mtip32xx/Kconfig"
+
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS
@@ -315,6 +317,17 @@ config BLK_DEV_NBD
If unsure, say N.
+config BLK_DEV_NVME
+ tristate "NVM Express block device"
+ depends on PCI
+ ---help---
+ The NVM Express driver is for solid state drives directly
+ connected to the PCI or PCI Express bus. If you know you
+ don't have one of these, it is safe to answer N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme.
+
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 76646e9a1c91..5b795059f8fb 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
@@ -39,5 +40,6 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
+obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8eba86bba599..386146d792d1 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -63,7 +63,7 @@
#include <linux/mutex.h>
#include <linux/amifdreg.h>
#include <linux/amifd.h>
-#include <linux/buffer_head.h>
+#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 5f8e39c43ae5..e86d2062a164 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -270,7 +270,7 @@ static const struct file_operations aoe_fops = {
.llseek = noop_llseek,
};
-static char *aoe_devnode(struct device *dev, mode_t *mode)
+static char *aoe_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d22119d49e53..ec246437f5a4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -17,7 +17,7 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
-#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
+#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -402,14 +402,13 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
error = -EBUSY;
if (bdev->bd_openers <= 1) {
/*
- * Invalidate the cache first, so it isn't written
- * back to the device.
+ * Kill the cache first, so it isn't written back to the
+ * device.
*
* Another thread might instantiate more buffercache here,
* but there is not much we can do to close that race.
*/
- invalidate_bh_lrus();
- truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+ kill_bdev(bdev);
brd_free_pages(brd);
error = 0;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8004ac30a7a8..b0f553b26d0f 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_BIG_PASSTHRU:
return cciss_bigpassthru(h, argp);
- /* scsi_cmd_ioctl handles these, below, though some are not */
+ /* scsi_cmd_blk_ioctl handles these, below, though some are not */
/* very meaningful for cciss. SG_IO is the main one people want. */
case SG_GET_VERSION_NUM:
@@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
case SG_EMULATED_HOST:
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+ return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
- /* scsi_cmd_ioctl would normally handle these, below, but */
+ /* scsi_cmd_blk_ioctl would normally handle these, below, but */
/* they aren't a good fit for cciss, as CD-ROMs are */
/* not supported, and we don't have any bus/target/lun */
/* which we present to the kernel. */
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9cf20355ceec..8d680562ba73 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -59,8 +59,8 @@
/* module parameter, defined in drbd_main.c */
extern unsigned int minor_count;
-extern int disable_sendpage;
-extern int allow_oos;
+extern bool disable_sendpage;
+extern bool allow_oos;
extern unsigned int cn_idx;
#ifdef CONFIG_DRBD_FAULT_INJECTION
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0358e55356c8..211fc44f84be 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -117,8 +117,8 @@ module_param(fault_devs, int, 0644);
/* module parameter, defined */
unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
-int disable_sendpage;
-int allow_oos;
+bool disable_sendpage;
+bool allow_oos;
unsigned int cn_idx = CN_IDX_DRBD;
int proc_details; /* Detail level in proc drbd*/
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9955a53733b2..510fb10ec45a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -188,7 +188,6 @@ static int print_unex = 1;
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
-#include <linux/buffer_head.h> /* for invalidate_buffers() */
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68b205a9338f..f00257782fcc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -69,7 +69,6 @@
#include <linux/freezer.h>
#include <linux/mutex.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
@@ -422,7 +421,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@@ -797,7 +796,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
new file mode 100644
index 000000000000..b5dd14e072f2
--- /dev/null
+++ b/drivers/block/mtip32xx/Kconfig
@@ -0,0 +1,9 @@
+#
+# mtip32xx device driver configuration
+#
+
+config BLK_DEV_PCIESSD_MTIP32XX
+ tristate "Block Device Driver for Micron PCIe SSDs"
+ depends on HOTPLUG_PCI_PCIE
+ help
+ This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/Makefile b/drivers/block/mtip32xx/Makefile
new file mode 100644
index 000000000000..4fbef8c8329b
--- /dev/null
+++ b/drivers/block/mtip32xx/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Block device driver for Micron PCIe SSD
+#
+
+obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx.o
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
new file mode 100644
index 000000000000..b74eab70c3d0
--- /dev/null
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -0,0 +1,3651 @@
+/*
+ * Driver for the Micron P320 SSD
+ * Copyright (C) 2011 Micron Technology, Inc.
+ *
+ * Portions of this code were derived from works subjected to the
+ * following copyright:
+ * Copyright (C) 2009 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ata.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/kthread.h>
+#include <../drivers/ata/ahci.h>
+#include "mtip32xx.h"
+
+#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
+#define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
+#define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
+#define HW_PORT_PRIV_DMA_SZ \
+ (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+#define HOST_HSORG 0xFC
+#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
+#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
+#define HSORG_HWREV 0xFF00
+#define HSORG_STYLE 0x8
+#define HSORG_SLOTGROUPS 0x7
+
+#define PORT_COMMAND_ISSUE 0x38
+#define PORT_SDBV 0x7C
+
+#define PORT_OFFSET 0x100
+#define PORT_MEM_SIZE 0x80
+
+#define PORT_IRQ_ERR \
+ (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
+ PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
+ PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
+ PORT_IRQ_OVERFLOW)
+#define PORT_IRQ_LEGACY \
+ (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
+#define PORT_IRQ_HANDLED \
+ (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
+ PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
+ PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
+#define DEF_PORT_IRQ \
+ (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
+
+/* product numbers */
+#define MTIP_PRODUCT_UNKNOWN 0x00
+#define MTIP_PRODUCT_ASICFPGA 0x11
+
+/* Device instance number, incremented each time a device is probed. */
+static int instance;
+
+/*
+ * Global variable used to hold the major block device number
+ * allocated in mtip_init().
+ */
+static int mtip_major;
+
+static DEFINE_SPINLOCK(rssd_index_lock);
+static DEFINE_IDA(rssd_index_ida);
+
+static int mtip_block_initialize(struct driver_data *dd);
+
+#ifdef CONFIG_COMPAT
+struct mtip_compat_ide_task_request_s {
+ __u8 io_ports[8];
+ __u8 hob_ports[8];
+ ide_reg_valid_t out_flags;
+ ide_reg_valid_t in_flags;
+ int data_phase;
+ int req_cmd;
+ compat_ulong_t out_size;
+ compat_ulong_t in_size;
+};
+#endif
+
+/*
+ * This function check_for_surprise_removal is called
+ * while card is removed from the system and it will
+ * read the vendor id from the configration space
+ *
+ * @pdev Pointer to the pci_dev structure.
+ *
+ * return value
+ * true if device removed, else false
+ */
+static bool mtip_check_surprise_removal(struct pci_dev *pdev)
+{
+ u16 vendor_id = 0;
+
+ /* Read the vendorID from the configuration space */
+ pci_read_config_word(pdev, 0x00, &vendor_id);
+ if (vendor_id == 0xFFFF)
+ return true; /* device removed */
+
+ return false; /* device present */
+}
+
+/*
+ * This function is called for clean the pending command in the
+ * command slot during the surprise removal of device and return
+ * error to the upper layer.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_command_cleanup(struct driver_data *dd)
+{
+ int group = 0, commandslot = 0, commandindex = 0;
+ struct mtip_cmd *command;
+ struct mtip_port *port = dd->port;
+
+ for (group = 0; group < 4; group++) {
+ for (commandslot = 0; commandslot < 32; commandslot++) {
+ if (!(port->allocated[group] & (1 << commandslot)))
+ continue;
+
+ commandindex = group << 5 | commandslot;
+ command = &port->commands[commandindex];
+
+ if (atomic_read(&command->active)
+ && (command->async_callback)) {
+ command->async_callback(command->async_data,
+ -ENODEV);
+ command->async_callback = NULL;
+ command->async_data = NULL;
+ }
+
+ dma_unmap_sg(&port->dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+ }
+ }
+
+ up(&port->cmd_slot);
+
+ atomic_set(&dd->drv_cleanup_done, true);
+}
+
+/*
+ * Obtain an empty command slot.
+ *
+ * This function needs to be reentrant since it could be called
+ * at the same time on multiple CPUs. The allocation of the
+ * command slot must be atomic.
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * >= 0 Index of command slot obtained.
+ * -1 No command slots available.
+ */
+static int get_slot(struct mtip_port *port)
+{
+ int slot, i;
+ unsigned int num_command_slots = port->dd->slot_groups * 32;
+
+ /*
+ * Try 10 times, because there is a small race here.
+ * that's ok, because it's still cheaper than a lock.
+ *
+ * Race: Since this section is not protected by lock, same bit
+ * could be chosen by different process contexts running in
+ * different processor. So instead of costly lock, we are going
+ * with loop.
+ */
+ for (i = 0; i < 10; i++) {
+ slot = find_next_zero_bit(port->allocated,
+ num_command_slots, 1);
+ if ((slot < num_command_slots) &&
+ (!test_and_set_bit(slot, port->allocated)))
+ return slot;
+ }
+ dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
+
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
+ /* Device not present, clean outstanding commands */
+ mtip_command_cleanup(port->dd);
+ }
+ return -1;
+}
+
+/*
+ * Release a command slot.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of command to release
+ *
+ * return value
+ * None
+ */
+static inline void release_slot(struct mtip_port *port, int tag)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(tag, port->allocated);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * Reset the HBA (without sleeping)
+ *
+ * Just like hba_reset, except does not call sleep, so can be
+ * run from interrupt/tasklet context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int hba_reset_nosleep(struct driver_data *dd)
+{
+ unsigned long timeout;
+
+ /* Chip quirk: quiesce any chip function */
+ mdelay(10);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /*
+ * Wait 10ms then spin for up to 1 second
+ * waiting for reset acknowledgement
+ */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ mdelay(10);
+ while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ && time_before(jiffies, timeout))
+ mdelay(1);
+
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Issue a command to the hardware.
+ *
+ * Set the appropriate bit in the s_active and Command Issue hardware
+ * registers, causing hardware command processing to begin.
+ *
+ * @port Pointer to the port structure.
+ * @tag The tag of the command to be issued.
+ *
+ * return value
+ * None
+ */
+static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
+{
+ unsigned long flags = 0;
+
+ atomic_set(&port->commands[tag].active, 1);
+
+ spin_lock_irqsave(&port->cmd_issue_lock, flags);
+
+ writel((1 << MTIP_TAG_BIT(tag)),
+ port->s_active[MTIP_TAG_INDEX(tag)]);
+ writel((1 << MTIP_TAG_BIT(tag)),
+ port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+
+ spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+}
+
+/*
+ * Enable/disable the reception of FIS
+ *
+ * @port Pointer to the port data structure
+ * @enable 1 to enable, 0 to disable
+ *
+ * return value
+ * Previous state: 1 enabled, 0 disabled
+ */
+static int mtip_enable_fis(struct mtip_port *port, int enable)
+{
+ u32 tmp;
+
+ /* enable FIS reception */
+ tmp = readl(port->mmio + PORT_CMD);
+ if (enable)
+ writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
+ else
+ writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
+
+ /* Flush */
+ readl(port->mmio + PORT_CMD);
+
+ return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
+}
+
+/*
+ * Enable/disable the DMA engine
+ *
+ * @port Pointer to the port data structure
+ * @enable 1 to enable, 0 to disable
+ *
+ * return value
+ * Previous state: 1 enabled, 0 disabled.
+ */
+static int mtip_enable_engine(struct mtip_port *port, int enable)
+{
+ u32 tmp;
+
+ /* enable FIS reception */
+ tmp = readl(port->mmio + PORT_CMD);
+ if (enable)
+ writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
+ else
+ writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
+
+ readl(port->mmio + PORT_CMD);
+ return (((tmp & PORT_CMD_START) == PORT_CMD_START));
+}
+
+/*
+ * Enables the port DMA engine and FIS reception.
+ *
+ * return value
+ * None
+ */
+static inline void mtip_start_port(struct mtip_port *port)
+{
+ /* Enable FIS reception */
+ mtip_enable_fis(port, 1);
+
+ /* Enable the DMA engine */
+ mtip_enable_engine(port, 1);
+}
+
+/*
+ * Deinitialize a port by disabling port interrupts, the DMA engine,
+ * and FIS reception.
+ *
+ * @port Pointer to the port structure
+ *
+ * return value
+ * None
+ */
+static inline void mtip_deinit_port(struct mtip_port *port)
+{
+ /* Disable interrupts on this port */
+ writel(0, port->mmio + PORT_IRQ_MASK);
+
+ /* Disable the DMA engine */
+ mtip_enable_engine(port, 0);
+
+ /* Disable FIS reception */
+ mtip_enable_fis(port, 0);
+}
+
+/*
+ * Initialize a port.
+ *
+ * This function deinitializes the port by calling mtip_deinit_port() and
+ * then initializes it by setting the command header and RX FIS addresses,
+ * clearing the SError register and any pending port interrupts before
+ * re-enabling the default set of port interrupts.
+ *
+ * @port Pointer to the port structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_init_port(struct mtip_port *port)
+{
+ int i;
+ mtip_deinit_port(port);
+
+ /* Program the command list base and FIS base addresses */
+ if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
+ writel((port->command_list_dma >> 16) >> 16,
+ port->mmio + PORT_LST_ADDR_HI);
+ writel((port->rxfis_dma >> 16) >> 16,
+ port->mmio + PORT_FIS_ADDR_HI);
+ }
+
+ writel(port->command_list_dma & 0xFFFFFFFF,
+ port->mmio + PORT_LST_ADDR);
+ writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
+
+ /* Clear SError */
+ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+
+ /* reset the completed registers.*/
+ for (i = 0; i < port->dd->slot_groups; i++)
+ writel(0xFFFFFFFF, port->completed[i]);
+
+ /* Clear any pending interrupts for this port */
+ writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
+
+ /* Enable port interrupts */
+ writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
+}
+
+/*
+ * Restart a port
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_restart_port(struct mtip_port *port)
+{
+ unsigned long timeout;
+
+ /* Disable the DMA engine */
+ mtip_enable_engine(port, 0);
+
+ /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
+ && time_before(jiffies, timeout))
+ ;
+
+ /*
+ * Chip quirk: escalate to hba reset if
+ * PxCMD.CR not clear after 500 ms
+ */
+ if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
+ dev_warn(&port->dd->pdev->dev,
+ "PxCMD.CR not clear, escalating reset\n");
+
+ if (hba_reset_nosleep(port->dd))
+ dev_err(&port->dd->pdev->dev,
+ "HBA reset escalation failed.\n");
+
+ /* 30 ms delay before com reset to quiesce chip */
+ mdelay(30);
+ }
+
+ dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
+
+ /* Set PxSCTL.DET */
+ writel(readl(port->mmio + PORT_SCR_CTL) |
+ 1, port->mmio + PORT_SCR_CTL);
+ readl(port->mmio + PORT_SCR_CTL);
+
+ /* Wait 1 ms to quiesce chip function */
+ timeout = jiffies + msecs_to_jiffies(1);
+ while (time_before(jiffies, timeout))
+ ;
+
+ /* Clear PxSCTL.DET */
+ writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
+ port->mmio + PORT_SCR_CTL);
+ readl(port->mmio + PORT_SCR_CTL);
+
+ /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
+ && time_before(jiffies, timeout))
+ ;
+
+ if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
+ dev_warn(&port->dd->pdev->dev,
+ "COM reset failed\n");
+
+ /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
+ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+
+ /* Enable the DMA engine */
+ mtip_enable_engine(port, 1);
+}
+
+/*
+ * Called periodically to see if any read/write commands are
+ * taking too long to complete.
+ *
+ * @data Pointer to the PORT data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_timeout_function(unsigned long int data)
+{
+ struct mtip_port *port = (struct mtip_port *) data;
+ struct host_to_dev_fis *fis;
+ struct mtip_cmd *command;
+ int tag, cmdto_cnt = 0;
+ unsigned int bit, group;
+ unsigned int num_command_slots = port->dd->slot_groups * 32;
+
+ if (unlikely(!port))
+ return;
+
+ if (atomic_read(&port->dd->resumeflag) == true) {
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(30000));
+ return;
+ }
+
+ for (tag = 0; tag < num_command_slots; tag++) {
+ /*
+ * Skip internal command slot as it has
+ * its own timeout mechanism
+ */
+ if (tag == MTIP_TAG_INTERNAL)
+ continue;
+
+ if (atomic_read(&port->commands[tag].active) &&
+ (time_after(jiffies, port->commands[tag].comp_time))) {
+ group = tag >> 5;
+ bit = tag & 0x1F;
+
+ command = &port->commands[tag];
+ fis = (struct host_to_dev_fis *) command->command;
+
+ dev_warn(&port->dd->pdev->dev,
+ "Timeout for command tag %d\n", tag);
+
+ cmdto_cnt++;
+ if (cmdto_cnt == 1)
+ set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+
+ /*
+ * Clear the completed bit. This should prevent
+ * any interrupt handlers from trying to retire
+ * the command.
+ */
+ writel(1 << bit, port->completed[group]);
+
+ /* Call the async completion callback. */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data,
+ -EIO);
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&port->dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /*
+ * Clear the allocated bit and active tag for the
+ * command.
+ */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+ }
+ }
+
+ if (cmdto_cnt) {
+ dev_warn(&port->dd->pdev->dev,
+ "%d commands timed out: restarting port",
+ cmdto_cnt);
+ mtip_restart_port(port);
+ clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ }
+
+ /* Restart the timer */
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+}
+
+/*
+ * IO completion function.
+ *
+ * This completion function is called by the driver ISR when a
+ * command that was issued by the kernel completes. It first calls the
+ * asynchronous completion function which normally calls back into the block
+ * layer passing the asynchronous callback data, then unmaps the
+ * scatter list associated with the completed command, and finally
+ * clears the allocated bit associated with the completed command.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command.
+ * @data Pointer to driver_data.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_async_complete(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command;
+ struct driver_data *dd = data;
+ int cb_status = status ? -EIO : 0;
+
+ if (unlikely(!dd) || unlikely(!port))
+ return;
+
+ command = &port->commands[tag];
+
+ if (unlikely(status == PORT_IRQ_TF_ERR)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Command tag %d failed due to TFE\n", tag);
+ }
+
+ /* Upper layer callback */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data, cb_status);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /* Clear the allocated and active bits for the command */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+}
+
+/*
+ * Internal command completion callback function.
+ *
+ * This function is normally called by the driver ISR when an internal
+ * command completed. This function signals the command completion by
+ * calling complete().
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command that has completed.
+ * @data Pointer to a completion structure.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_completion(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command = &port->commands[tag];
+ struct completion *waiting = data;
+ if (unlikely(status == PORT_IRQ_TF_ERR))
+ dev_warn(&port->dd->pdev->dev,
+ "Internal command %d completed with TFE\n", tag);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ complete(waiting);
+}
+
+/*
+ * Helper function for tag logging
+ */
+static void print_tags(struct driver_data *dd,
+ char *msg,
+ unsigned long *tagbits)
+{
+ unsigned int tag, count = 0;
+
+ for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
+ if (test_bit(tag, tagbits))
+ count++;
+ }
+ if (count)
+ dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
+}
+
+/*
+ * Handle an error.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_handle_tfe(struct driver_data *dd)
+{
+ int group, tag, bit, reissue;
+ struct mtip_port *port;
+ struct mtip_cmd *command;
+ u32 completed;
+ struct host_to_dev_fis *fis;
+ unsigned long tagaccum[SLOTBITS_IN_LONGS];
+
+ dev_warn(&dd->pdev->dev, "Taskfile error\n");
+
+ port = dd->port;
+
+ /* Stop the timer to prevent command timeouts. */
+ del_timer(&port->cmd_timer);
+
+ /* Set eh_active */
+ set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+
+ /* Loop through all the groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ completed = readl(port->completed[group]);
+
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
+
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
+ /* Process successfully completed commands */
+ for (bit = 0; bit < 32 && completed; bit++) {
+ if (!(completed & (1<<bit)))
+ continue;
+ tag = (group << 5) + bit;
+
+ /* Skip the internal command slot */
+ if (tag == MTIP_TAG_INTERNAL)
+ continue;
+
+ command = &port->commands[tag];
+ if (likely(command->comp_func)) {
+ set_bit(tag, tagaccum);
+ atomic_set(&port->commands[tag].active, 0);
+ command->comp_func(port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_err(&port->dd->pdev->dev,
+ "Missing completion func for tag %d",
+ tag);
+ if (mtip_check_surprise_removal(dd->pdev)) {
+ mtip_command_cleanup(dd);
+ /* don't proceed further */
+ return;
+ }
+ }
+ }
+ }
+ print_tags(dd, "TFE tags completed:", tagaccum);
+
+ /* Restart the port */
+ mdelay(20);
+ mtip_restart_port(port);
+
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
+ /* Loop through all the groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ for (bit = 0; bit < 32; bit++) {
+ reissue = 1;
+ tag = (group << 5) + bit;
+
+ /* If the active bit is set re-issue the command */
+ if (atomic_read(&port->commands[tag].active) == 0)
+ continue;
+
+ fis = (struct host_to_dev_fis *)
+ port->commands[tag].command;
+
+ /* Should re-issue? */
+ if (tag == MTIP_TAG_INTERNAL ||
+ fis->command == ATA_CMD_SET_FEATURES)
+ reissue = 0;
+
+ /*
+ * First check if this command has
+ * exceeded its retries.
+ */
+ if (reissue &&
+ (port->commands[tag].retries-- > 0)) {
+
+ set_bit(tag, tagaccum);
+
+ /* Update the timeout value. */
+ port->commands[tag].comp_time =
+ jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
+ /* Re-issue the command. */
+ mtip_issue_ncq_command(port, tag);
+
+ continue;
+ }
+
+ /* Retire a command that will not be reissued */
+ dev_warn(&port->dd->pdev->dev,
+ "retiring tag %d\n", tag);
+ atomic_set(&port->commands[tag].active, 0);
+
+ if (port->commands[tag].comp_func)
+ port->commands[tag].comp_func(
+ port,
+ tag,
+ port->commands[tag].comp_data,
+ PORT_IRQ_TF_ERR);
+ else
+ dev_warn(&port->dd->pdev->dev,
+ "Bad completion for tag %d\n",
+ tag);
+ }
+ }
+ print_tags(dd, "TFE tags reissued:", tagaccum);
+
+ /* clear eh_active */
+ clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+}
+
+/*
+ * Handle a set device bits interrupt
+ */
+static inline void mtip_process_sdbf(struct driver_data *dd)
+{
+ struct mtip_port *port = dd->port;
+ int group, tag, bit;
+ u32 completed;
+ struct mtip_cmd *command;
+
+ /* walk all bits in all slot groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ completed = readl(port->completed[group]);
+
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
+
+ /* Process completed commands. */
+ for (bit = 0;
+ (bit < 32) && completed;
+ bit++, completed >>= 1) {
+ if (completed & 0x01) {
+ tag = (group << 5) | bit;
+
+ /* skip internal command slot. */
+ if (unlikely(tag == MTIP_TAG_INTERNAL))
+ continue;
+
+ command = &port->commands[tag];
+ /* make internal callback */
+ if (likely(command->comp_func)) {
+ command->comp_func(
+ port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "Null completion "
+ "for tag %d",
+ tag);
+
+ if (mtip_check_surprise_removal(
+ dd->pdev)) {
+ mtip_command_cleanup(dd);
+ return;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Process legacy pio and d2h interrupts
+ */
+static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
+{
+ struct mtip_port *port = dd->port;
+ struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
+
+ if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+ (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))) {
+ if (cmd->comp_func) {
+ cmd->comp_func(port,
+ MTIP_TAG_INTERNAL,
+ cmd->comp_data,
+ 0);
+ return;
+ }
+ }
+
+ dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
+
+ return;
+}
+
+/*
+ * Demux and handle errors
+ */
+static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
+{
+ if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
+ mtip_handle_tfe(dd);
+
+ if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
+ dev_warn(&dd->pdev->dev,
+ "Clearing PxSERR.DIAG.x\n");
+ writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
+ }
+
+ if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
+ dev_warn(&dd->pdev->dev,
+ "Clearing PxSERR.DIAG.n\n");
+ writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
+ }
+
+ if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
+ dev_warn(&dd->pdev->dev,
+ "Port stat errors %x unhandled\n",
+ (port_stat & ~PORT_IRQ_HANDLED));
+ }
+}
+
+static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
+{
+ struct driver_data *dd = (struct driver_data *) data;
+ struct mtip_port *port = dd->port;
+ u32 hba_stat, port_stat;
+ int rv = IRQ_NONE;
+
+ hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
+ if (hba_stat) {
+ rv = IRQ_HANDLED;
+
+ /* Acknowledge the interrupt status on the port.*/
+ port_stat = readl(port->mmio + PORT_IRQ_STAT);
+ writel(port_stat, port->mmio + PORT_IRQ_STAT);
+
+ /* Demux port status */
+ if (likely(port_stat & PORT_IRQ_SDB_FIS))
+ mtip_process_sdbf(dd);
+
+ if (unlikely(port_stat & PORT_IRQ_ERR)) {
+ if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ mtip_command_cleanup(dd);
+ /* don't proceed further */
+ return IRQ_HANDLED;
+ }
+
+ mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
+ }
+
+ if (unlikely(port_stat & PORT_IRQ_LEGACY))
+ mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
+ }
+
+ /* acknowledge interrupt */
+ writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
+
+ return rv;
+}
+
+/*
+ * Wrapper for mtip_handle_irq
+ * (ignores return code)
+ */
+static void mtip_tasklet(unsigned long data)
+{
+ mtip_handle_irq((struct driver_data *) data);
+}
+
+/*
+ * HBA interrupt subroutine.
+ *
+ * @irq IRQ number.
+ * @instance Pointer to the driver data structure.
+ *
+ * return value
+ * IRQ_HANDLED A HBA interrupt was pending and handled.
+ * IRQ_NONE This interrupt was not for the HBA.
+ */
+static irqreturn_t mtip_irq_handler(int irq, void *instance)
+{
+ struct driver_data *dd = instance;
+ tasklet_schedule(&dd->tasklet);
+ return IRQ_HANDLED;
+}
+
+static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
+{
+ atomic_set(&port->commands[tag].active, 1);
+ writel(1 << MTIP_TAG_BIT(tag),
+ port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+}
+
+/*
+ * Wait for port to quiesce
+ *
+ * @port Pointer to port data structure
+ * @timeout Max duration to wait (ms)
+ *
+ * return value
+ * 0 Success
+ * -EBUSY Commands still active
+ */
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+{
+ unsigned long to;
+ unsigned int n;
+ unsigned int active = 1;
+
+ to = jiffies + msecs_to_jiffies(timeout);
+ do {
+ if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ msleep(20);
+ continue; /* svc thd is actively issuing commands */
+ }
+ /*
+ * Ignore s_active bit 0 of array element 0.
+ * This bit will always be set
+ */
+ active = readl(port->s_active[0]) & 0xFFFFFFFE;
+ for (n = 1; n < port->dd->slot_groups; n++)
+ active |= readl(port->s_active[n]);
+
+ if (!active)
+ break;
+
+ msleep(20);
+ } while (time_before(jiffies, to));
+
+ return active ? -EBUSY : 0;
+}
+
+/*
+ * Execute an internal command and wait for the completion.
+ *
+ * @port Pointer to the port data structure.
+ * @fis Pointer to the FIS that describes the command.
+ * @fis_len Length in WORDS of the FIS.
+ * @buffer DMA accessible for command data.
+ * @buf_len Length, in bytes, of the data buffer.
+ * @opts Command header options, excluding the FIS length
+ * and the number of PRD entries.
+ * @timeout Time in ms to wait for the command to complete.
+ *
+ * return value
+ * 0 Command completed successfully.
+ * -EFAULT The buffer address is not correctly aligned.
+ * -EBUSY Internal command or other IO in progress.
+ * -EAGAIN Time out waiting for command to complete.
+ */
+static int mtip_exec_internal_command(struct mtip_port *port,
+ void *fis,
+ int fis_len,
+ dma_addr_t buffer,
+ int buf_len,
+ u32 opts,
+ gfp_t atomic,
+ unsigned long timeout)
+{
+ struct mtip_cmd_sg *command_sg;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int rv = 0;
+ struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
+
+ /* Make sure the buffer is 8 byte aligned. This is asic specific. */
+ if (buffer & 0x00000007) {
+ dev_err(&port->dd->pdev->dev,
+ "SG buffer is not 8 byte aligned\n");
+ return -EFAULT;
+ }
+
+ /* Only one internal command should be running at a time */
+ if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Internal command already active\n");
+ return -EBUSY;
+ }
+ set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+
+ if (atomic == GFP_KERNEL) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, 5000) < 0) {
+ dev_warn(&port->dd->pdev->dev,
+ "Failed to quiesce IO\n");
+ release_slot(port, MTIP_TAG_INTERNAL);
+ clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ return -EBUSY;
+ }
+
+ /* Set the completion function and data for the command. */
+ int_cmd->comp_data = &wait;
+ int_cmd->comp_func = mtip_completion;
+
+ } else {
+ /* Clear completion - we're going to poll */
+ int_cmd->comp_data = NULL;
+ int_cmd->comp_func = NULL;
+ }
+
+ /* Copy the command to the command table */
+ memcpy(int_cmd->command, fis, fis_len*4);
+
+ /* Populate the SG list */
+ int_cmd->command_header->opts =
+ __force_bit2int cpu_to_le32(opts | fis_len);
+ if (buf_len) {
+ command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
+
+ command_sg->info =
+ __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
+ command_sg->dba =
+ __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
+ command_sg->dba_upper =
+ __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
+
+ int_cmd->command_header->opts |=
+ __force_bit2int cpu_to_le32((1 << 16));
+ }
+
+ /* Populate the command header */
+ int_cmd->command_header->byte_count = 0;
+
+ /* Issue the command to the hardware */
+ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+
+ /* Poll if atomic, wait_for_completion otherwise */
+ if (atomic == GFP_KERNEL) {
+ /* Wait for the command to complete or timeout. */
+ if (wait_for_completion_timeout(
+ &wait,
+ msecs_to_jiffies(timeout)) == 0) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d] "
+ "within timeout of %lu ms\n",
+ atomic, timeout);
+ rv = -EAGAIN;
+ }
+
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Retiring internal command but CI is 1.\n");
+ }
+
+ } else {
+ /* Spin for <timeout> checking if command still outstanding */
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while ((readl(
+ port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))
+ && time_before(jiffies, timeout))
+ ;
+
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL)) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d]\n",
+ atomic);
+ rv = -EAGAIN;
+ }
+ }
+
+ /* Clear the allocated and active bits for the internal command. */
+ atomic_set(&int_cmd->active, 0);
+ release_slot(port, MTIP_TAG_INTERNAL);
+ clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+
+ return rv;
+}
+
+/*
+ * Byte-swap ATA ID strings.
+ *
+ * ATA identify data contains strings in byte-swapped 16-bit words.
+ * They must be swapped (on all architectures) to be usable as C strings.
+ * This function swaps bytes in-place.
+ *
+ * @buf The buffer location of the string
+ * @len The number of bytes to swap
+ *
+ * return value
+ * None
+ */
+static inline void ata_swap_string(u16 *buf, unsigned int len)
+{
+ int i;
+ for (i = 0; i < (len/2); i++)
+ be16_to_cpus(&buf[i]);
+}
+
+/*
+ * Request the device identity information.
+ *
+ * If a user space buffer is not specified, i.e. is NULL, the
+ * identify information is still read from the drive and placed
+ * into the identify data buffer (@e port->identify) in the
+ * port data structure.
+ * When the identify buffer contains valid identify information @e
+ * port->identify_valid is non-zero.
+ *
+ * @port Pointer to the port structure.
+ * @user_buffer A user space buffer where the identify data should be
+ * copied.
+ *
+ * return value
+ * 0 Command completed successfully.
+ * -EFAULT An error occurred while coping data to the user buffer.
+ * -1 Command failed.
+ */
+static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
+{
+ int rv = 0;
+ struct host_to_dev_fis fis;
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_ID_ATA;
+
+ /* Set the identify information as invalid. */
+ port->identify_valid = 0;
+
+ /* Clear the identify information. */
+ memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ port->identify_dma,
+ sizeof(u16) * ATA_ID_WORDS,
+ 0,
+ GFP_KERNEL,
+ MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
+ < 0) {
+ rv = -1;
+ goto out;
+ }
+
+ /*
+ * Perform any necessary byte-swapping. Yes, the kernel does in fact
+ * perform field-sensitive swapping on the string fields.
+ * See the kernel use of ata_id_string() for proof of this.
+ */
+#ifdef __LITTLE_ENDIAN
+ ata_swap_string(port->identify + 27, 40); /* model string*/
+ ata_swap_string(port->identify + 23, 8); /* firmware string*/
+ ata_swap_string(port->identify + 10, 20); /* serial# string*/
+#else
+ {
+ int i;
+ for (i = 0; i < ATA_ID_WORDS; i++)
+ port->identify[i] = le16_to_cpu(port->identify[i]);
+ }
+#endif
+
+ /* Set the identify buffer as valid. */
+ port->identify_valid = 1;
+
+ if (user_buffer) {
+ if (copy_to_user(
+ user_buffer,
+ port->identify,
+ ATA_ID_WORDS * sizeof(u16))) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ return rv;
+}
+
+/*
+ * Issue a standby immediate command to the device.
+ *
+ * @port Pointer to the port structure.
+ *
+ * return value
+ * 0 Command was executed successfully.
+ * -1 An error occurred while executing the command.
+ */
+static int mtip_standby_immediate(struct mtip_port *port)
+{
+ int rv;
+ struct host_to_dev_fis fis;
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_STANDBYNOW1;
+
+ /* Execute the command. Use a 15-second timeout for large drives. */
+ rv = mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ 0,
+ 0,
+ 0,
+ GFP_KERNEL,
+ 15000);
+
+ return rv;
+}
+
+/*
+ * Get the drive capacity.
+ *
+ * @dd Pointer to the device data structure.
+ * @sectors Pointer to the variable that will receive the sector count.
+ *
+ * return value
+ * 1 Capacity was returned successfully.
+ * 0 The identify information is invalid.
+ */
+static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
+{
+ struct mtip_port *port = dd->port;
+ u64 total, raw0, raw1, raw2, raw3;
+ raw0 = port->identify[100];
+ raw1 = port->identify[101];
+ raw2 = port->identify[102];
+ raw3 = port->identify[103];
+ total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
+ *sectors = total;
+ return (bool) !!port->identify_valid;
+}
+
+/*
+ * Reset the HBA.
+ *
+ * Resets the HBA by setting the HBA Reset bit in the Global
+ * HBA Control register. After setting the HBA Reset bit the
+ * function waits for 1 second before reading the HBA Reset
+ * bit to make sure it has cleared. If HBA Reset is not clear
+ * an error is returned. Cannot be used in non-blockable
+ * context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int mtip_hba_reset(struct driver_data *dd)
+{
+ mtip_deinit_port(dd->port);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /* Wait for reset to clear */
+ ssleep(1);
+
+ /* Check the bit has cleared */
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
+ dev_err(&dd->pdev->dev,
+ "Reset bit did not clear.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Display the identify command data.
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_dump_identify(struct mtip_port *port)
+{
+ sector_t sectors;
+ unsigned short revid;
+ char cbuf[42];
+
+ if (!port->identify_valid)
+ return;
+
+ strlcpy(cbuf, (char *)(port->identify+10), 21);
+ dev_info(&port->dd->pdev->dev,
+ "Serial No.: %s\n", cbuf);
+
+ strlcpy(cbuf, (char *)(port->identify+23), 9);
+ dev_info(&port->dd->pdev->dev,
+ "Firmware Ver.: %s\n", cbuf);
+
+ strlcpy(cbuf, (char *)(port->identify+27), 41);
+ dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
+
+ if (mtip_hw_get_capacity(port->dd, &sectors))
+ dev_info(&port->dd->pdev->dev,
+ "Capacity: %llu sectors (%llu MB)\n",
+ (u64)sectors,
+ ((u64)sectors) * ATA_SECT_SIZE >> 20);
+
+ pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
+ switch (revid & 0xFF) {
+ case 0x1:
+ strlcpy(cbuf, "A0", 3);
+ break;
+ case 0x3:
+ strlcpy(cbuf, "A2", 3);
+ break;
+ default:
+ strlcpy(cbuf, "?", 2);
+ break;
+ }
+ dev_info(&port->dd->pdev->dev,
+ "Card Type: %s\n", cbuf);
+}
+
+/*
+ * Map the commands scatter list into the command table.
+ *
+ * @command Pointer to the command.
+ * @nents Number of scatter list entries.
+ *
+ * return value
+ * None
+ */
+static inline void fill_command_sg(struct driver_data *dd,
+ struct mtip_cmd *command,
+ int nents)
+{
+ int n;
+ unsigned int dma_len;
+ struct mtip_cmd_sg *command_sg;
+ struct scatterlist *sg = command->sg;
+
+ command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
+
+ for (n = 0; n < nents; n++) {
+ dma_len = sg_dma_len(sg);
+ if (dma_len > 0x400000)
+ dev_err(&dd->pdev->dev,
+ "DMA segment length truncated\n");
+ command_sg->info = __force_bit2int
+ cpu_to_le32((dma_len-1) & 0x3FFFFF);
+ command_sg->dba = __force_bit2int
+ cpu_to_le32(sg_dma_address(sg));
+ command_sg->dba_upper = __force_bit2int
+ cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
+ command_sg++;
+ sg++;
+ }
+}
+
+/*
+ * @brief Execute a drive command.
+ *
+ * return value 0 The command completed successfully.
+ * return value -1 An error occurred while executing the command.
+ */
+static int exec_drive_task(struct mtip_port *port, u8 *command)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
+ fis.features = command[1];
+ fis.sect_count = command[2];
+ fis.sector = command[3];
+ fis.cyl_low = command[4];
+ fis.cyl_hi = command[5];
+ fis.device = command[6] & ~0x10; /* Clear the dev bit*/
+
+
+ dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
+ "nsect %x, sect %x, lcyl %x, "
+ "hcyl %x, sel %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2],
+ command[3],
+ command[4],
+ command[5],
+ command[6]);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ 0,
+ 0,
+ 0,
+ GFP_KERNEL,
+ MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
+ return -1;
+ }
+
+ command[0] = reply->command; /* Status*/
+ command[1] = reply->features; /* Error*/
+ command[4] = reply->cyl_low;
+ command[5] = reply->cyl_hi;
+
+ dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
+ "err %x , cyl_lo %x cyl_hi %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[4],
+ command[5]);
+
+ return 0;
+}
+
+/*
+ * @brief Execute a drive command.
+ *
+ * @param port Pointer to the port data structure.
+ * @param command Pointer to the user specified command parameters.
+ * @param user_buffer Pointer to the user space buffer where read sector
+ * data should be copied.
+ *
+ * return value 0 The command completed successfully.
+ * return value -EFAULT An error occurred while copying the completion
+ * data to the user space buffer.
+ * return value -1 An error occurred while executing the command.
+ */
+static int exec_drive_command(struct mtip_port *port, u8 *command,
+ void __user *user_buffer)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
+ fis.features = command[2];
+ fis.sect_count = command[3];
+ if (fis.command == ATA_CMD_SMART) {
+ fis.sector = command[1];
+ fis.cyl_low = 0x4F;
+ fis.cyl_hi = 0xC2;
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: User Command: cmd %x, sect %x, "
+ "feat %x, sectcnt %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2],
+ command[3]);
+
+ memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ port->sector_buffer_dma,
+ (command[3] != 0) ? ATA_SECT_SIZE : 0,
+ 0,
+ GFP_KERNEL,
+ MTIP_IOCTL_COMMAND_TIMEOUT_MS)
+ < 0) {
+ return -1;
+ }
+
+ /* Collect the completion status. */
+ command[0] = reply->command; /* Status*/
+ command[1] = reply->features; /* Error*/
+ command[2] = command[3];
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: Completion Status: stat %x, "
+ "err %x, cmd %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2]);
+
+ if (user_buffer && command[3]) {
+ if (copy_to_user(user_buffer,
+ port->sector_buffer,
+ ATA_SECT_SIZE * command[3])) {
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Indicates whether a command has a single sector payload.
+ *
+ * @command passed to the device to perform the certain event.
+ * @features passed to the device to perform the certain event.
+ *
+ * return value
+ * 1 command is one that always has a single sector payload,
+ * regardless of the value in the Sector Count field.
+ * 0 otherwise
+ *
+ */
+static unsigned int implicit_sector(unsigned char command,
+ unsigned char features)
+{
+ unsigned int rv = 0;
+
+ /* list of commands that have an implicit sector count of 1 */
+ switch (command) {
+ case ATA_CMD_SEC_SET_PASS:
+ case ATA_CMD_SEC_UNLOCK:
+ case ATA_CMD_SEC_ERASE_PREP:
+ case ATA_CMD_SEC_ERASE_UNIT:
+ case ATA_CMD_SEC_FREEZE_LOCK:
+ case ATA_CMD_SEC_DISABLE_PASS:
+ case ATA_CMD_PMP_READ:
+ case ATA_CMD_PMP_WRITE:
+ rv = 1;
+ break;
+ case ATA_CMD_SET_MAX:
+ if (features == ATA_SET_MAX_UNLOCK)
+ rv = 1;
+ break;
+ case ATA_CMD_SMART:
+ if ((features == ATA_SMART_READ_VALUES) ||
+ (features == ATA_SMART_READ_THRESHOLDS))
+ rv = 1;
+ break;
+ case ATA_CMD_CONF_OVERLAY:
+ if ((features == ATA_DCO_IDENTIFY) ||
+ (features == ATA_DCO_SET))
+ rv = 1;
+ break;
+ }
+ return rv;
+}
+
+/*
+ * Executes a taskfile
+ * See ide_taskfile_ioctl() for derivation
+ */
+static int exec_drive_taskfile(struct driver_data *dd,
+ void __user *buf,
+ ide_task_request_t *req_task,
+ int outtotal)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply;
+ u8 *outbuf = NULL;
+ u8 *inbuf = NULL;
+ dma_addr_t outbuf_dma = 0;
+ dma_addr_t inbuf_dma = 0;
+ dma_addr_t dma_buffer = 0;
+ int err = 0;
+ unsigned int taskin = 0;
+ unsigned int taskout = 0;
+ u8 nsect = 0;
+ unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ unsigned int force_single_sector;
+ unsigned int transfer_size;
+ unsigned long task_file_data;
+ int intotal = outtotal + req_task->out_size;
+
+ taskout = req_task->out_size;
+ taskin = req_task->in_size;
+ /* 130560 = 512 * 0xFF*/
+ if (taskin > 130560 || taskout > 130560) {
+ err = -EINVAL;
+ goto abort;
+ }
+
+ if (taskout) {
+ outbuf = kzalloc(taskout, GFP_KERNEL);
+ if (outbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(outbuf, buf + outtotal, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ outbuf_dma = pci_map_single(dd->pdev,
+ outbuf,
+ taskout,
+ DMA_TO_DEVICE);
+ if (outbuf_dma == 0) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ dma_buffer = outbuf_dma;
+ }
+
+ if (taskin) {
+ inbuf = kzalloc(taskin, GFP_KERNEL);
+ if (inbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+
+ if (copy_from_user(inbuf, buf + intotal, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ inbuf_dma = pci_map_single(dd->pdev,
+ inbuf,
+ taskin, DMA_FROM_DEVICE);
+ if (inbuf_dma == 0) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ dma_buffer = inbuf_dma;
+ }
+
+ /* only supports PIO and non-data commands from this ioctl. */
+ switch (req_task->data_phase) {
+ case TASKFILE_OUT:
+ nsect = taskout / ATA_SECT_SIZE;
+ reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
+ break;
+ case TASKFILE_IN:
+ reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
+ break;
+ case TASKFILE_NO_DATA:
+ reply = (dd->port->rxfis + RX_FIS_D2H_REG);
+ break;
+ default:
+ err = -EINVAL;
+ goto abort;
+ }
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = req_task->io_ports[7];
+ fis.features = req_task->io_ports[1];
+ fis.sect_count = req_task->io_ports[2];
+ fis.lba_low = req_task->io_ports[3];
+ fis.lba_mid = req_task->io_ports[4];
+ fis.lba_hi = req_task->io_ports[5];
+ /* Clear the dev bit*/
+ fis.device = req_task->io_ports[6] & ~0x10;
+
+ if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
+ req_task->in_flags.all =
+ IDE_TASKFILE_STD_IN_FLAGS |
+ (IDE_HOB_STD_IN_FLAGS << 8);
+ fis.lba_low_ex = req_task->hob_ports[3];
+ fis.lba_mid_ex = req_task->hob_ports[4];
+ fis.lba_hi_ex = req_task->hob_ports[5];
+ fis.features_ex = req_task->hob_ports[1];
+ fis.sect_cnt_ex = req_task->hob_ports[2];
+
+ } else {
+ req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
+ }
+
+ force_single_sector = implicit_sector(fis.command, fis.features);
+
+ if ((taskin || taskout) && (!fis.sect_count)) {
+ if (nsect)
+ fis.sect_count = nsect;
+ else {
+ if (!force_single_sector) {
+ dev_warn(&dd->pdev->dev,
+ "data movement but "
+ "sect_count is 0\n");
+ err = -EINVAL;
+ goto abort;
+ }
+ }
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "taskfile: cmd %x, feat %x, nsect %x,"
+ " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
+ " head/dev %x\n",
+ fis.command,
+ fis.features,
+ fis.sect_count,
+ fis.lba_low,
+ fis.lba_mid,
+ fis.lba_hi,
+ fis.device);
+
+ switch (fis.command) {
+ case ATA_CMD_DOWNLOAD_MICRO:
+ /* Change timeout for Download Microcode to 60 seconds.*/
+ timeout = 60000;
+ break;
+ case ATA_CMD_SEC_ERASE_UNIT:
+ /* Change timeout for Security Erase Unit to 4 minutes.*/
+ timeout = 240000;
+ break;
+ case ATA_CMD_STANDBYNOW1:
+ /* Change timeout for standby immediate to 10 seconds.*/
+ timeout = 10000;
+ break;
+ case 0xF7:
+ case 0xFA:
+ /* Change timeout for vendor unique command to 10 secs */
+ timeout = 10000;
+ break;
+ case ATA_CMD_SMART:
+ /* Change timeout for vendor unique command to 10 secs */
+ timeout = 10000;
+ break;
+ default:
+ timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ break;
+ }
+
+ /* Determine the correct transfer size.*/
+ if (force_single_sector)
+ transfer_size = ATA_SECT_SIZE;
+ else
+ transfer_size = ATA_SECT_SIZE * fis.sect_count;
+
+ /* Execute the command.*/
+ if (mtip_exec_internal_command(dd->port,
+ &fis,
+ 5,
+ dma_buffer,
+ transfer_size,
+ 0,
+ GFP_KERNEL,
+ timeout) < 0) {
+ err = -EIO;
+ goto abort;
+ }
+
+ task_file_data = readl(dd->port->mmio+PORT_TFDATA);
+
+ if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
+ reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
+ req_task->io_ports[7] = reply->control;
+ } else {
+ reply = dd->port->rxfis + RX_FIS_D2H_REG;
+ req_task->io_ports[7] = reply->command;
+ }
+
+ /* reclaim the DMA buffers.*/
+ if (inbuf_dma)
+ pci_unmap_single(dd->pdev, inbuf_dma,
+ taskin, DMA_FROM_DEVICE);
+ if (outbuf_dma)
+ pci_unmap_single(dd->pdev, outbuf_dma,
+ taskout, DMA_TO_DEVICE);
+ inbuf_dma = 0;
+ outbuf_dma = 0;
+
+ /* return the ATA registers to the caller.*/
+ req_task->io_ports[1] = reply->features;
+ req_task->io_ports[2] = reply->sect_count;
+ req_task->io_ports[3] = reply->lba_low;
+ req_task->io_ports[4] = reply->lba_mid;
+ req_task->io_ports[5] = reply->lba_hi;
+ req_task->io_ports[6] = reply->device;
+
+ if (req_task->out_flags.all & 1) {
+
+ req_task->hob_ports[3] = reply->lba_low_ex;
+ req_task->hob_ports[4] = reply->lba_mid_ex;
+ req_task->hob_ports[5] = reply->lba_hi_ex;
+ req_task->hob_ports[1] = reply->features_ex;
+ req_task->hob_ports[2] = reply->sect_cnt_ex;
+ }
+
+ /* Com rest after secure erase or lowlevel format */
+ if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
+ ((fis.command == 0xFC) &&
+ (fis.features == 0x27 || fis.features == 0x72 ||
+ fis.features == 0x62 || fis.features == 0x26))) &&
+ !(reply->command & 1)) {
+ mtip_restart_port(dd->port);
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: Completion: stat %x,"
+ "err %x, sect_cnt %x, lbalo %x,"
+ "lbamid %x, lbahi %x, dev %x\n",
+ __func__,
+ req_task->io_ports[7],
+ req_task->io_ports[1],
+ req_task->io_ports[2],
+ req_task->io_ports[3],
+ req_task->io_ports[4],
+ req_task->io_ports[5],
+ req_task->io_ports[6]);
+
+ if (taskout) {
+ if (copy_to_user(buf + outtotal, outbuf, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+ if (taskin) {
+ if (copy_to_user(buf + intotal, inbuf, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+abort:
+ if (inbuf_dma)
+ pci_unmap_single(dd->pdev, inbuf_dma,
+ taskin, DMA_FROM_DEVICE);
+ if (outbuf_dma)
+ pci_unmap_single(dd->pdev, outbuf_dma,
+ taskout, DMA_TO_DEVICE);
+ kfree(outbuf);
+ kfree(inbuf);
+
+ return err;
+}
+
+/*
+ * Handle IOCTL calls from the Block Layer.
+ *
+ * This function is called by the Block Layer when it receives an IOCTL
+ * command that it does not understand. If the IOCTL command is not supported
+ * this function returns -ENOTTY.
+ *
+ * @dd Pointer to the driver data structure.
+ * @cmd IOCTL command passed from the Block Layer.
+ * @arg IOCTL argument passed from the Block Layer.
+ *
+ * return value
+ * 0 The IOCTL completed successfully.
+ * -ENOTTY The specified command is not supported.
+ * -EFAULT An error occurred copying data to a user space buffer.
+ * -EIO An error occurred while executing the command.
+ */
+static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case HDIO_GET_IDENTITY:
+ if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
+ dev_warn(&dd->pdev->dev,
+ "Unable to read identity\n");
+ return -EIO;
+ }
+
+ break;
+ case HDIO_DRIVE_CMD:
+ {
+ u8 drive_command[4];
+
+ /* Copy the user command info to our buffer. */
+ if (copy_from_user(drive_command,
+ (void __user *) arg,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ /* Execute the drive command. */
+ if (exec_drive_command(dd->port,
+ drive_command,
+ (void __user *) (arg+4)))
+ return -EIO;
+
+ /* Copy the status back to the users buffer. */
+ if (copy_to_user((void __user *) arg,
+ drive_command,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ break;
+ }
+ case HDIO_DRIVE_TASK:
+ {
+ u8 drive_command[7];
+
+ /* Copy the user command info to our buffer. */
+ if (copy_from_user(drive_command,
+ (void __user *) arg,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ /* Execute the drive command. */
+ if (exec_drive_task(dd->port, drive_command))
+ return -EIO;
+
+ /* Copy the status back to the users buffer. */
+ if (copy_to_user((void __user *) arg,
+ drive_command,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ break;
+ }
+ case HDIO_DRIVE_TASKFILE: {
+ ide_task_request_t req_task;
+ int ret, outtotal;
+
+ if (copy_from_user(&req_task, (void __user *) arg,
+ sizeof(req_task)))
+ return -EFAULT;
+
+ outtotal = sizeof(req_task);
+
+ ret = exec_drive_taskfile(dd, (void __user *) arg,
+ &req_task, outtotal);
+
+ if (copy_to_user((void __user *) arg, &req_task,
+ sizeof(req_task)))
+ return -EFAULT;
+
+ return ret;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Submit an IO to the hw
+ *
+ * This function is called by the block layer to issue an io
+ * to the device. Upon completion, the callback function will
+ * be called with the data parameter passed as the callback data.
+ *
+ * @dd Pointer to the driver data structure.
+ * @start First sector to read.
+ * @nsect Number of sectors to read.
+ * @nents Number of entries in scatter list for the read command.
+ * @tag The tag of this read command.
+ * @callback Pointer to the function that should be called
+ * when the read completes.
+ * @data Callback data passed to the callback function
+ * when the read completes.
+ * @barrier If non-zero, this command must be completed before
+ * issuing any other commands.
+ * @dir Direction (read or write)
+ *
+ * return value
+ * None
+ */
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+ int nsect, int nents, int tag, void *callback,
+ void *data, int barrier, int dir)
+{
+ struct host_to_dev_fis *fis;
+ struct mtip_port *port = dd->port;
+ struct mtip_cmd *command = &port->commands[tag];
+
+ /* Map the scatter list for DMA access */
+ if (dir == READ)
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ nents, DMA_FROM_DEVICE);
+ else
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ nents, DMA_TO_DEVICE);
+
+ command->scatter_ents = nents;
+
+ /*
+ * The number of retries for this command before it is
+ * reported as a failure to the upper layers.
+ */
+ command->retries = MTIP_MAX_RETRIES;
+
+ /* Fill out fis */
+ fis = command->command;
+ fis->type = 0x27;
+ fis->opts = 1 << 7;
+ fis->command =
+ (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
+ *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
+ *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+ fis->device = 1 << 6;
+ if (barrier)
+ fis->device |= FUA_BIT;
+ fis->features = nsect & 0xFF;
+ fis->features_ex = (nsect >> 8) & 0xFF;
+ fis->sect_count = ((tag << 3) | (tag >> 5));
+ fis->sect_cnt_ex = 0;
+ fis->control = 0;
+ fis->res2 = 0;
+ fis->res3 = 0;
+ fill_command_sg(dd, command, nents);
+
+ /* Populate the command header */
+ command->command_header->opts =
+ __force_bit2int cpu_to_le32(
+ (nents << 16) | 5 | AHCI_CMD_PREFETCH);
+ command->command_header->byte_count = 0;
+
+ /*
+ * Set the completion function and data for the command
+ * within this layer.
+ */
+ command->comp_data = dd;
+ command->comp_func = mtip_async_complete;
+ command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
+ /*
+ * Set the completion function and data for the command passed
+ * from the upper layer.
+ */
+ command->async_data = data;
+ command->async_callback = callback;
+
+ /*
+ * To prevent this command from being issued
+ * if an internal command is in progress or error handling is active.
+ */
+ if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) ||
+ test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
+ set_bit(tag, port->cmds_to_issue);
+ set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+ return;
+ }
+
+ /* Issue the command to the hardware */
+ mtip_issue_ncq_command(port, tag);
+
+ /* Set the command's timeout value.*/
+ port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
+}
+
+/*
+ * Release a command slot.
+ *
+ * @dd Pointer to the driver data structure.
+ * @tag Slot tag
+ *
+ * return value
+ * None
+ */
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+{
+ release_slot(dd->port, tag);
+}
+
+/*
+ * Obtain a command slot and return its associated scatter list.
+ *
+ * @dd Pointer to the driver data structure.
+ * @tag Pointer to an int that will receive the allocated command
+ * slot tag.
+ *
+ * return value
+ * Pointer to the scatter list for the allocated command slot
+ * or NULL if no command slots are available.
+ */
+static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
+ int *tag)
+{
+ /*
+ * It is possible that, even with this semaphore, a thread
+ * may think that no command slots are available. Therefore, we
+ * need to make an attempt to get_slot().
+ */
+ down(&dd->port->cmd_slot);
+ *tag = get_slot(dd->port);
+
+ if (unlikely(*tag < 0))
+ return NULL;
+
+ return dd->port->commands[*tag].sg;
+}
+
+/*
+ * Sysfs register/status dump.
+ *
+ * @dev Pointer to the device structure, passed by the kernrel.
+ * @attr Pointer to the device_attribute structure passed by the kernel.
+ * @buf Pointer to the char buffer that will receive the stats info.
+ *
+ * return value
+ * The size, in bytes, of the data copied into buf.
+ */
+static ssize_t hw_show_registers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 group_allocated;
+ struct driver_data *dd = dev_to_disk(dev)->private_data;
+ int size = 0;
+ int n;
+
+ size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->s_active[n]));
+
+ size += sprintf(&buf[size], "Command Issue:\n");
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->cmd_issue[n]));
+
+ size += sprintf(&buf[size], "Allocated:\n");
+
+ for (n = 0; n < dd->slot_groups; n++) {
+ if (sizeof(long) > sizeof(u32))
+ group_allocated =
+ dd->port->allocated[n/2] >> (32*(n&1));
+ else
+ group_allocated = dd->port->allocated[n];
+ size += sprintf(&buf[size], "0x%08x\n",
+ group_allocated);
+ }
+
+ size += sprintf(&buf[size], "completed:\n");
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->completed[n]));
+
+ size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
+ readl(dd->port->mmio + PORT_IRQ_STAT));
+ size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
+ readl(dd->mmio + HOST_IRQ_STAT));
+
+ return size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
+
+/*
+ * Create the sysfs related attributes.
+ *
+ * @dd Pointer to the driver data structure.
+ * @kobj Pointer to the kobj for the block device.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -EINVAL Invalid parameter.
+ */
+static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
+{
+ if (!kobj || !dd)
+ return -EINVAL;
+
+ if (sysfs_create_file(kobj, &dev_attr_registers.attr))
+ dev_warn(&dd->pdev->dev,
+ "Error creating registers sysfs entry\n");
+ return 0;
+}
+
+/*
+ * Remove the sysfs related attributes.
+ *
+ * @dd Pointer to the driver data structure.
+ * @kobj Pointer to the kobj for the block device.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -EINVAL Invalid parameter.
+ */
+static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
+{
+ if (!kobj || !dd)
+ return -EINVAL;
+
+ sysfs_remove_file(kobj, &dev_attr_registers.attr);
+
+ return 0;
+}
+
+/*
+ * Perform any init/resume time hardware setup
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * None
+ */
+static inline void hba_setup(struct driver_data *dd)
+{
+ u32 hwdata;
+ hwdata = readl(dd->mmio + HOST_HSORG);
+
+ /* interrupt bug workaround: use only 1 IS bit.*/
+ writel(hwdata |
+ HSORG_DISABLE_SLOTGRP_INTR |
+ HSORG_DISABLE_SLOTGRP_PXIS,
+ dd->mmio + HOST_HSORG);
+}
+
+/*
+ * Detect the details of the product, and store anything needed
+ * into the driver data structure. This includes product type and
+ * version and number of slot groups.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_detect_product(struct driver_data *dd)
+{
+ u32 hwdata;
+ unsigned int rev, slotgroups;
+
+ /*
+ * HBA base + 0xFC [15:0] - vendor-specific hardware interface
+ * info register:
+ * [15:8] hardware/software interface rev#
+ * [ 3] asic-style interface
+ * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
+ */
+ hwdata = readl(dd->mmio + HOST_HSORG);
+
+ dd->product_type = MTIP_PRODUCT_UNKNOWN;
+ dd->slot_groups = 1;
+
+ if (hwdata & 0x8) {
+ dd->product_type = MTIP_PRODUCT_ASICFPGA;
+ rev = (hwdata & HSORG_HWREV) >> 8;
+ slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
+ dev_info(&dd->pdev->dev,
+ "ASIC-FPGA design, HS rev 0x%x, "
+ "%i slot groups [%i slots]\n",
+ rev,
+ slotgroups,
+ slotgroups * 32);
+
+ if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
+ dev_warn(&dd->pdev->dev,
+ "Warning: driver only supports "
+ "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
+ slotgroups = MTIP_MAX_SLOT_GROUPS;
+ }
+ dd->slot_groups = slotgroups;
+ return;
+ }
+
+ dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
+}
+
+/*
+ * Blocking wait for FTL rebuild to complete
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * 0 FTL rebuild completed successfully
+ * -EFAULT FTL rebuild error/timeout/interruption
+ */
+static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+{
+ unsigned long timeout, cnt = 0, start;
+
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild in progress. Polling for completion.\n");
+
+ start = jiffies;
+ dd->ftlrebuildflag = 1;
+ timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
+
+ do {
+ if (mtip_check_surprise_removal(dd->pdev))
+ return -EFAULT;
+
+ if (mtip_get_identify(dd->port, NULL) < 0)
+ return -EFAULT;
+
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ ssleep(1);
+ /* Print message every 3 minutes */
+ if (cnt++ >= 180) {
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild in progress (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ cnt = 0;
+ }
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild complete (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ dd->ftlrebuildflag = 0;
+ mtip_block_initialize(dd);
+ break;
+ }
+ ssleep(10);
+ } while (time_before(jiffies, timeout));
+
+ /* Check for timeout */
+ if (dd->ftlrebuildflag) {
+ dev_err(&dd->pdev->dev,
+ "Timed out waiting for FTL rebuild to complete (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * service thread to issue queued commands
+ *
+ * @data Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+
+static int mtip_service_thread(void *data)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ unsigned long slot, slot_start, slot_wrap;
+ unsigned int num_cmd_slots = dd->slot_groups * 32;
+ struct mtip_port *port = dd->port;
+
+ while (1) {
+ /*
+ * the condition is to check neither an internal command is
+ * is in progress nor error handling is active
+ */
+ wait_event_interruptible(port->svc_wait, (port->flags) &&
+ !test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+ !test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
+
+ if (kthread_should_stop())
+ break;
+
+ set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+ if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ slot = 1;
+ /* used to restrict the loop to one iteration */
+ slot_start = num_cmd_slots;
+ slot_wrap = 0;
+ while (1) {
+ slot = find_next_bit(port->cmds_to_issue,
+ num_cmd_slots, slot);
+ if (slot_wrap == 1) {
+ if ((slot_start >= slot) ||
+ (slot >= num_cmd_slots))
+ break;
+ }
+ if (unlikely(slot_start == num_cmd_slots))
+ slot_start = slot;
+
+ if (unlikely(slot == num_cmd_slots)) {
+ slot = 1;
+ slot_wrap = 1;
+ continue;
+ }
+
+ /* Issue the command to the hardware */
+ mtip_issue_ncq_command(port, slot);
+
+ /* Set the command's timeout value.*/
+ port->commands[slot].comp_time = jiffies +
+ msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
+
+ clear_bit(slot, port->cmds_to_issue);
+ }
+
+ clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+ } else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) {
+ mtip_ftl_rebuild_poll(dd);
+ clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags);
+ }
+ clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+
+ if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags))
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Called once for each card.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 on success, else an error code.
+ */
+static int mtip_hw_init(struct driver_data *dd)
+{
+ int i;
+ int rv;
+ unsigned int num_command_slots;
+
+ dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+
+ mtip_detect_product(dd);
+ if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
+ rv = -EIO;
+ goto out1;
+ }
+ num_command_slots = dd->slot_groups * 32;
+
+ hba_setup(dd);
+
+ tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
+
+ dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
+ if (!dd->port) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: port structure\n");
+ return -ENOMEM;
+ }
+
+ /* Counting semaphore to track command slot usage */
+ sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+
+ /* Spinlock to prevent concurrent issue */
+ spin_lock_init(&dd->port->cmd_issue_lock);
+
+ /* Set the port mmio base address. */
+ dd->port->mmio = dd->mmio + PORT_OFFSET;
+ dd->port->dd = dd;
+
+ /* Allocate memory for the command list. */
+ dd->port->command_list =
+ dmam_alloc_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ &dd->port->command_list_dma,
+ GFP_KERNEL);
+ if (!dd->port->command_list) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: command list\n");
+ rv = -ENOMEM;
+ goto out1;
+ }
+
+ /* Clear the memory we have allocated. */
+ memset(dd->port->command_list,
+ 0,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
+
+ /* Setup the addresse of the RX FIS. */
+ dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
+ dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
+
+ /* Setup the address of the command tables. */
+ dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
+ dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
+
+ /* Setup the address of the identify data. */
+ dd->port->identify = dd->port->command_table +
+ HW_CMD_TBL_AR_SZ;
+ dd->port->identify_dma = dd->port->command_tbl_dma +
+ HW_CMD_TBL_AR_SZ;
+
+ /* Setup the address of the sector buffer. */
+ dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
+ dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
+
+ /* Point the command headers at the command tables. */
+ for (i = 0; i < num_command_slots; i++) {
+ dd->port->commands[i].command_header =
+ dd->port->command_list +
+ (sizeof(struct mtip_cmd_hdr) * i);
+ dd->port->commands[i].command_header_dma =
+ dd->port->command_list_dma +
+ (sizeof(struct mtip_cmd_hdr) * i);
+
+ dd->port->commands[i].command =
+ dd->port->command_table + (HW_CMD_TBL_SZ * i);
+ dd->port->commands[i].command_dma =
+ dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
+
+ if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
+ dd->port->commands[i].command_header->ctbau =
+ __force_bit2int cpu_to_le32(
+ (dd->port->commands[i].command_dma >> 16) >> 16);
+ dd->port->commands[i].command_header->ctba =
+ __force_bit2int cpu_to_le32(
+ dd->port->commands[i].command_dma & 0xFFFFFFFF);
+
+ /*
+ * If this is not done, a bug is reported by the stock
+ * FC11 i386. Due to the fact that it has lots of kernel
+ * debugging enabled.
+ */
+ sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
+
+ /* Mark all commands as currently inactive.*/
+ atomic_set(&dd->port->commands[i].active, 0);
+ }
+
+ /* Setup the pointers to the extended s_active and CI registers. */
+ for (i = 0; i < dd->slot_groups; i++) {
+ dd->port->s_active[i] =
+ dd->port->mmio + i*0x80 + PORT_SCR_ACT;
+ dd->port->cmd_issue[i] =
+ dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
+ dd->port->completed[i] =
+ dd->port->mmio + i*0x80 + PORT_SDBV;
+ }
+
+ /* Reset the HBA. */
+ if (mtip_hba_reset(dd) < 0) {
+ dev_err(&dd->pdev->dev,
+ "Card did not reset within timeout\n");
+ rv = -EIO;
+ goto out2;
+ }
+
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Setup the ISR and enable interrupts. */
+ rv = devm_request_irq(&dd->pdev->dev,
+ dd->pdev->irq,
+ mtip_irq_handler,
+ IRQF_SHARED,
+ dev_driver_string(&dd->pdev->dev),
+ dd);
+
+ if (rv) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate IRQ %d\n", dd->pdev->irq);
+ goto out2;
+ }
+
+ /* Enable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ init_timer(&dd->port->cmd_timer);
+ init_waitqueue_head(&dd->port->svc_wait);
+
+ dd->port->cmd_timer.data = (unsigned long int) dd->port;
+ dd->port->cmd_timer.function = mtip_timeout_function;
+ mod_timer(&dd->port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+
+ if (mtip_get_identify(dd->port, NULL) < 0) {
+ rv = -EFAULT;
+ goto out3;
+ }
+
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags);
+ return MTIP_FTL_REBUILD_MAGIC;
+ }
+ mtip_dump_identify(dd->port);
+ return rv;
+
+out3:
+ del_timer_sync(&dd->port->cmd_timer);
+
+ /* Disable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ /*Release the IRQ. */
+ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
+
+out2:
+ mtip_deinit_port(dd->port);
+
+ /* Free the command/command header memory. */
+ dmam_free_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ dd->port->command_list,
+ dd->port->command_list_dma);
+out1:
+ /* Free the memory allocated for the for structure. */
+ kfree(dd->port);
+
+ return rv;
+}
+
+/*
+ * Called to deinitialize an interface.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_hw_exit(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ if (atomic_read(&dd->drv_cleanup_done) != true) {
+
+ mtip_standby_immediate(dd->port);
+
+ /* de-initialize the port. */
+ mtip_deinit_port(dd->port);
+
+ /* Disable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ }
+
+ del_timer_sync(&dd->port->cmd_timer);
+
+ /* Release the IRQ. */
+ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
+
+ /* Stop the bottom half tasklet. */
+ tasklet_kill(&dd->tasklet);
+
+ /* Free the command/command header memory. */
+ dmam_free_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ dd->port->command_list,
+ dd->port->command_list_dma);
+ /* Free the memory allocated for the for structure. */
+ kfree(dd->port);
+
+ return 0;
+}
+
+/*
+ * Issue a Standby Immediate command to the device.
+ *
+ * This function is called by the Block Layer just before the
+ * system powers off during a shutdown.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_hw_shutdown(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ mtip_standby_immediate(dd->port);
+
+ return 0;
+}
+
+/*
+ * Suspend function
+ *
+ * This function is called by the Block Layer just before the
+ * system hibernates.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 Suspend was successful
+ * -EFAULT Suspend was not successful
+ */
+static int mtip_hw_suspend(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive
+ * so that it saves its state.
+ */
+ if (mtip_standby_immediate(dd->port) != 0) {
+ dev_err(&dd->pdev->dev,
+ "Failed standby-immediate command\n");
+ return -EFAULT;
+ }
+
+ /* Disable interrupts on the HBA.*/
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ mtip_deinit_port(dd->port);
+
+ return 0;
+}
+
+/*
+ * Resume function
+ *
+ * This function is called by the Block Layer as the
+ * system resumes.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 Resume was successful
+ * -EFAULT Resume was not successful
+ */
+static int mtip_hw_resume(struct driver_data *dd)
+{
+ /* Perform any needed hardware setup steps */
+ hba_setup(dd);
+
+ /* Reset the HBA */
+ if (mtip_hba_reset(dd) != 0) {
+ dev_err(&dd->pdev->dev,
+ "Unable to reset the HBA\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Enable the port, DMA engine, and FIS reception specific
+ * h/w in controller.
+ */
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Enable interrupts on the HBA.*/
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ return 0;
+}
+
+/*
+ * Helper function for reusing disk name
+ * upon hot insertion.
+ */
+static int rssd_disk_name_format(char *prefix,
+ int index,
+ char *buf,
+ int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
+/*
+ * Block layer IOCTL handler.
+ *
+ * @dev Pointer to the block_device structure.
+ * @mode ignored
+ * @cmd IOCTL command passed from the user application.
+ * @arg Argument passed from the user application.
+ *
+ * return value
+ * 0 IOCTL completed successfully.
+ * -ENOTTY IOCTL not supported or invalid driver data
+ * structure pointer.
+ */
+static int mtip_block_ioctl(struct block_device *dev,
+ fmode_t mode,
+ unsigned cmd,
+ unsigned long arg)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!dd)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return -ENOTTY;
+ default:
+ return mtip_hw_ioctl(dd, cmd, arg);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Block layer compat IOCTL handler.
+ *
+ * @dev Pointer to the block_device structure.
+ * @mode ignored
+ * @cmd IOCTL command passed from the user application.
+ * @arg Argument passed from the user application.
+ *
+ * return value
+ * 0 IOCTL completed successfully.
+ * -ENOTTY IOCTL not supported or invalid driver data
+ * structure pointer.
+ */
+static int mtip_block_compat_ioctl(struct block_device *dev,
+ fmode_t mode,
+ unsigned cmd,
+ unsigned long arg)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!dd)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return -ENOTTY;
+ case HDIO_DRIVE_TASKFILE: {
+ struct mtip_compat_ide_task_request_s __user *compat_req_task;
+ ide_task_request_t req_task;
+ int compat_tasksize, outtotal, ret;
+
+ compat_tasksize =
+ sizeof(struct mtip_compat_ide_task_request_s);
+
+ compat_req_task =
+ (struct mtip_compat_ide_task_request_s __user *) arg;
+
+ if (copy_from_user(&req_task, (void __user *) arg,
+ compat_tasksize - (2 * sizeof(compat_long_t))))
+ return -EFAULT;
+
+ if (get_user(req_task.out_size, &compat_req_task->out_size))
+ return -EFAULT;
+
+ if (get_user(req_task.in_size, &compat_req_task->in_size))
+ return -EFAULT;
+
+ outtotal = sizeof(struct mtip_compat_ide_task_request_s);
+
+ ret = exec_drive_taskfile(dd, (void __user *) arg,
+ &req_task, outtotal);
+
+ if (copy_to_user((void __user *) arg, &req_task,
+ compat_tasksize -
+ (2 * sizeof(compat_long_t))))
+ return -EFAULT;
+
+ if (put_user(req_task.out_size, &compat_req_task->out_size))
+ return -EFAULT;
+
+ if (put_user(req_task.in_size, &compat_req_task->in_size))
+ return -EFAULT;
+
+ return ret;
+ }
+ default:
+ return mtip_hw_ioctl(dd, cmd, arg);
+ }
+}
+#endif
+
+/*
+ * Obtain the geometry of the device.
+ *
+ * You may think that this function is obsolete, but some applications,
+ * fdisk for example still used CHS values. This function describes the
+ * device as having 224 heads and 56 sectors per cylinder. These values are
+ * chosen so that each cylinder is aligned on a 4KB boundary. Since a
+ * partition is described in terms of a start and end cylinder this means
+ * that each partition is also 4KB aligned. Non-aligned partitions adversely
+ * affects performance.
+ *
+ * @dev Pointer to the block_device strucutre.
+ * @geo Pointer to a hd_geometry structure.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -ENOTTY An error occurred while reading the drive capacity.
+ */
+static int mtip_block_getgeo(struct block_device *dev,
+ struct hd_geometry *geo)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+ sector_t capacity;
+
+ if (!dd)
+ return -ENOTTY;
+
+ if (!(mtip_hw_get_capacity(dd, &capacity))) {
+ dev_warn(&dd->pdev->dev,
+ "Could not get drive capacity.\n");
+ return -ENOTTY;
+ }
+
+ geo->heads = 224;
+ geo->sectors = 56;
+ sector_div(capacity, (geo->heads * geo->sectors));
+ geo->cylinders = capacity;
+ return 0;
+}
+
+/*
+ * Block device operation function.
+ *
+ * This structure contains pointers to the functions required by the block
+ * layer.
+ */
+static const struct block_device_operations mtip_block_ops = {
+ .ioctl = mtip_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtip_block_compat_ioctl,
+#endif
+ .getgeo = mtip_block_getgeo,
+ .owner = THIS_MODULE
+};
+
+/*
+ * Block layer make request function.
+ *
+ * This function is called by the kernel to process a BIO for
+ * the P320 device.
+ *
+ * @queue Pointer to the request queue. Unused other than to obtain
+ * the driver data structure.
+ * @bio Pointer to the BIO.
+ *
+ */
+static void mtip_make_request(struct request_queue *queue, struct bio *bio)
+{
+ struct driver_data *dd = queue->queuedata;
+ struct scatterlist *sg;
+ struct bio_vec *bvec;
+ int nents = 0;
+ int tag = 0;
+
+ if (unlikely(!bio_has_data(bio))) {
+ blk_queue_flush(queue, 0);
+ bio_endio(bio, 0);
+ return;
+ }
+
+ sg = mtip_hw_get_scatterlist(dd, &tag);
+ if (likely(sg != NULL)) {
+ blk_queue_bounce(queue, &bio);
+
+ if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
+ dev_warn(&dd->pdev->dev,
+ "Maximum number of SGL entries exceeded");
+ bio_io_error(bio);
+ mtip_hw_release_scatterlist(dd, tag);
+ return;
+ }
+
+ /* Create the scatter list for this bio. */
+ bio_for_each_segment(bvec, bio, nents) {
+ sg_set_page(&sg[nents],
+ bvec->bv_page,
+ bvec->bv_len,
+ bvec->bv_offset);
+ }
+
+ /* Issue the read/write. */
+ mtip_hw_submit_io(dd,
+ bio->bi_sector,
+ bio_sectors(bio),
+ nents,
+ tag,
+ bio_endio,
+ bio,
+ bio->bi_rw & REQ_FUA,
+ bio_data_dir(bio));
+ } else
+ bio_io_error(bio);
+}
+
+/*
+ * Block layer initialization function.
+ *
+ * This function is called once by the PCI layer for each P320
+ * device that is connected to the system.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 on success else an error code.
+ */
+static int mtip_block_initialize(struct driver_data *dd)
+{
+ int rv = 0, wait_for_rebuild = 0;
+ sector_t capacity;
+ unsigned int index = 0;
+ struct kobject *kobj;
+ unsigned char thd_name[16];
+
+ if (dd->disk)
+ goto skip_create_disk; /* hw init done, before rebuild */
+
+ /* Initialize the protocol layer. */
+ wait_for_rebuild = mtip_hw_init(dd);
+ if (wait_for_rebuild < 0) {
+ dev_err(&dd->pdev->dev,
+ "Protocol layer initialization failed\n");
+ rv = -EINVAL;
+ goto protocol_init_error;
+ }
+
+ dd->disk = alloc_disk(MTIP_MAX_MINORS);
+ if (dd->disk == NULL) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate gendisk structure\n");
+ rv = -EINVAL;
+ goto alloc_disk_error;
+ }
+
+ /* Generate the disk name, implemented same as in sd.c */
+ do {
+ if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+ goto ida_get_error;
+
+ spin_lock(&rssd_index_lock);
+ rv = ida_get_new(&rssd_index_ida, &index);
+ spin_unlock(&rssd_index_lock);
+ } while (rv == -EAGAIN);
+
+ if (rv)
+ goto ida_get_error;
+
+ rv = rssd_disk_name_format("rssd",
+ index,
+ dd->disk->disk_name,
+ DISK_NAME_LEN);
+ if (rv)
+ goto disk_index_error;
+
+ dd->disk->driverfs_dev = &dd->pdev->dev;
+ dd->disk->major = dd->major;
+ dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS;
+ dd->disk->fops = &mtip_block_ops;
+ dd->disk->private_data = dd;
+ dd->index = index;
+
+ /*
+ * if rebuild pending, start the service thread, and delay the block
+ * queue creation and add_disk()
+ */
+ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
+ goto start_service_thread;
+
+skip_create_disk:
+ /* Allocate the request queue. */
+ dd->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dd->queue == NULL) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate request queue\n");
+ rv = -ENOMEM;
+ goto block_queue_alloc_init_error;
+ }
+
+ /* Attach our request function to the request queue. */
+ blk_queue_make_request(dd->queue, mtip_make_request);
+
+ dd->disk->queue = dd->queue;
+ dd->queue->queuedata = dd;
+
+ /* Set device limits. */
+ set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
+ blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
+ blk_queue_physical_block_size(dd->queue, 4096);
+ blk_queue_io_min(dd->queue, 4096);
+ blk_queue_flush(dd->queue, 0);
+
+ /* Set the capacity of the device in 512 byte sectors. */
+ if (!(mtip_hw_get_capacity(dd, &capacity))) {
+ dev_warn(&dd->pdev->dev,
+ "Could not read drive capacity\n");
+ rv = -EIO;
+ goto read_capacity_error;
+ }
+ set_capacity(dd->disk, capacity);
+
+ /* Enable the block device and add it to /dev */
+ add_disk(dd->disk);
+
+ /*
+ * Now that the disk is active, initialize any sysfs attributes
+ * managed by the protocol layer.
+ */
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_init(dd, kobj);
+ kobject_put(kobj);
+ }
+
+ if (dd->mtip_svc_handler)
+ return rv; /* service thread created for handling rebuild */
+
+start_service_thread:
+ sprintf(thd_name, "mtip_svc_thd_%02d", index);
+
+ dd->mtip_svc_handler = kthread_run(mtip_service_thread,
+ dd, thd_name);
+
+ if (IS_ERR(dd->mtip_svc_handler)) {
+ printk(KERN_ERR "mtip32xx: service thread failed to start\n");
+ dd->mtip_svc_handler = NULL;
+ rv = -EFAULT;
+ goto kthread_run_error;
+ }
+
+ return rv;
+
+kthread_run_error:
+ /* Delete our gendisk. This also removes the device from /dev */
+ del_gendisk(dd->disk);
+
+read_capacity_error:
+ blk_cleanup_queue(dd->queue);
+
+block_queue_alloc_init_error:
+disk_index_error:
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, index);
+ spin_unlock(&rssd_index_lock);
+
+ida_get_error:
+ put_disk(dd->disk);
+
+alloc_disk_error:
+ mtip_hw_exit(dd); /* De-initialize the protocol layer. */
+
+protocol_init_error:
+ return rv;
+}
+
+/*
+ * Block layer deinitialization function.
+ *
+ * Called by the PCI layer as each P320 device is removed.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_block_remove(struct driver_data *dd)
+{
+ struct kobject *kobj;
+
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ kthread_stop(dd->mtip_svc_handler);
+ }
+
+ /* Clean up the sysfs attributes managed by the protocol layer. */
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+
+ /*
+ * Delete our gendisk structure. This also removes the device
+ * from /dev
+ */
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->disk = NULL;
+ dd->queue = NULL;
+
+ /* De-initialize the protocol layer. */
+ mtip_hw_exit(dd);
+
+ return 0;
+}
+
+/*
+ * Function called by the PCI layer when just before the
+ * machine shuts down.
+ *
+ * If a protocol layer shutdown function is present it will be called
+ * by this function.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_block_shutdown(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
+
+ /* Delete our gendisk structure, and cleanup the blk queue. */
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->disk = NULL;
+ dd->queue = NULL;
+
+ mtip_hw_shutdown(dd);
+ return 0;
+}
+
+static int mtip_block_suspend(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev,
+ "Suspending %s ...\n", dd->disk->disk_name);
+ mtip_hw_suspend(dd);
+ return 0;
+}
+
+static int mtip_block_resume(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev, "Resuming %s ...\n",
+ dd->disk->disk_name);
+ mtip_hw_resume(dd);
+ return 0;
+}
+
+/*
+ * Called for each supported PCI device detected.
+ *
+ * This function allocates the private data structure, enables the
+ * PCI device and then calls the block layer initialization function.
+ *
+ * return value
+ * 0 on success else an error code.
+ */
+static int mtip_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv = 0;
+ struct driver_data *dd = NULL;
+
+ /* Allocate memory for this devices private data. */
+ dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ /* Set the atomic variable as 1 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ atomic_set(&dd->resumeflag, false);
+
+ /* Attach the private data to this PCI device. */
+ pci_set_drvdata(pdev, dd);
+
+ rv = pcim_enable_device(pdev);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "Unable to enable device\n");
+ goto iomap_err;
+ }
+
+ /* Map BAR5 to memory. */
+ rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "Unable to map regions\n");
+ goto iomap_err;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
+ if (rv) {
+ rv = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (rv) {
+ dev_warn(&pdev->dev,
+ "64-bit DMA enable failed\n");
+ goto setmask_err;
+ }
+ }
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_enable_msi(pdev)) {
+ dev_warn(&pdev->dev,
+ "Unable to enable MSI interrupt.\n");
+ goto block_initialize_err;
+ }
+
+ /* Copy the info we may need later into the private data structure. */
+ dd->major = mtip_major;
+ dd->instance = instance;
+ dd->pdev = pdev;
+
+ /* Initialize the block layer. */
+ rv = mtip_block_initialize(dd);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Unable to initialize block layer\n");
+ goto block_initialize_err;
+ }
+
+ /*
+ * Increment the instance count so that each device has a unique
+ * instance number.
+ */
+ instance++;
+
+ goto done;
+
+block_initialize_err:
+ pci_disable_msi(pdev);
+
+setmask_err:
+ pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+
+iomap_err:
+ kfree(dd);
+ pci_set_drvdata(pdev, NULL);
+ return rv;
+done:
+ /* Set the atomic variable as 0 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ return rv;
+}
+
+/*
+ * Called for each probed device when the device is removed or the
+ * driver is unloaded.
+ *
+ * return value
+ * None
+ */
+static void mtip_pci_remove(struct pci_dev *pdev)
+{
+ struct driver_data *dd = pci_get_drvdata(pdev);
+ int counter = 0;
+
+ if (mtip_check_surprise_removal(pdev)) {
+ while (atomic_read(&dd->drv_cleanup_done) == false) {
+ counter++;
+ msleep(20);
+ if (counter == 10) {
+ /* Cleanup the outstanding commands */
+ mtip_command_cleanup(dd);
+ break;
+ }
+ }
+ }
+ /* Set the atomic variable as 1 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ /* Clean up the block layer. */
+ mtip_block_remove(dd);
+
+ pci_disable_msi(pdev);
+
+ kfree(dd);
+ pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+}
+
+/*
+ * Called for each probed device when the device is suspended.
+ *
+ * return value
+ * 0 Success
+ * <0 Error
+ */
+static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ int rv = 0;
+ struct driver_data *dd = pci_get_drvdata(pdev);
+
+ if (!dd) {
+ dev_err(&pdev->dev,
+ "Driver private datastructure is NULL\n");
+ return -EFAULT;
+ }
+
+ atomic_set(&dd->resumeflag, true);
+
+ /* Disable ports & interrupts then send standby immediate */
+ rv = mtip_block_suspend(dd);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Failed to suspend controller\n");
+ return rv;
+ }
+
+ /*
+ * Save the pci config space to pdev structure &
+ * disable the device
+ */
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ /* Move to Low power state*/
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return rv;
+}
+
+/*
+ * Called for each probed device when the device is resumed.
+ *
+ * return value
+ * 0 Success
+ * <0 Error
+ */
+static int mtip_pci_resume(struct pci_dev *pdev)
+{
+ int rv = 0;
+ struct driver_data *dd;
+
+ dd = pci_get_drvdata(pdev);
+ if (!dd) {
+ dev_err(&pdev->dev,
+ "Driver private datastructure is NULL\n");
+ return -EFAULT;
+ }
+
+ /* Move the device to active State */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* Restore PCI configuration space */
+ pci_restore_state(pdev);
+
+ /* Enable the PCI device*/
+ rv = pcim_enable_device(pdev);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Failed to enable card during resume\n");
+ goto err;
+ }
+ pci_set_master(pdev);
+
+ /*
+ * Calls hbaReset, initPort, & startPort function
+ * then enables interrupts
+ */
+ rv = mtip_block_resume(dd);
+ if (rv < 0)
+ dev_err(&pdev->dev, "Unable to resume\n");
+
+err:
+ atomic_set(&dd->resumeflag, false);
+
+ return rv;
+}
+
+/*
+ * Shutdown routine
+ *
+ * return value
+ * None
+ */
+static void mtip_pci_shutdown(struct pci_dev *pdev)
+{
+ struct driver_data *dd = pci_get_drvdata(pdev);
+ if (dd)
+ mtip_block_shutdown(dd);
+}
+
+/* Table of device ids supported by this driver. */
+static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
+ { 0 }
+};
+
+/* Structure that describes the PCI driver functions. */
+static struct pci_driver mtip_pci_driver = {
+ .name = MTIP_DRV_NAME,
+ .id_table = mtip_pci_tbl,
+ .probe = mtip_pci_probe,
+ .remove = mtip_pci_remove,
+ .suspend = mtip_pci_suspend,
+ .resume = mtip_pci_resume,
+ .shutdown = mtip_pci_shutdown,
+};
+
+MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
+
+/*
+ * Module initialization function.
+ *
+ * Called once when the module is loaded. This function allocates a major
+ * block device number to the Cyclone devices and registers the PCI layer
+ * of the driver.
+ *
+ * Return value
+ * 0 on success else error code.
+ */
+static int __init mtip_init(void)
+{
+ printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
+
+ /* Allocate a major block device number to use with this driver. */
+ mtip_major = register_blkdev(0, MTIP_DRV_NAME);
+ if (mtip_major < 0) {
+ printk(KERN_ERR "Unable to register block device (%d)\n",
+ mtip_major);
+ return -EBUSY;
+ }
+
+ /* Register our PCI operations. */
+ return pci_register_driver(&mtip_pci_driver);
+}
+
+/*
+ * Module de-initialization function.
+ *
+ * Called once when the module is unloaded. This function deallocates
+ * the major block device number allocated by mtip_init() and
+ * unregisters the PCI layer of the driver.
+ *
+ * Return value
+ * none
+ */
+static void __exit mtip_exit(void)
+{
+ /* Release the allocated major block device number. */
+ unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+
+ /* Unregister the PCI driver. */
+ pci_unregister_driver(&mtip_pci_driver);
+}
+
+MODULE_AUTHOR("Micron Technology, Inc");
+MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MTIP_DRV_VERSION);
+
+module_init(mtip_init);
+module_exit(mtip_exit);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
new file mode 100644
index 000000000000..723d7c4946dc
--- /dev/null
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -0,0 +1,423 @@
+/*
+ * mtip32xx.h - Header file for the P320 SSD Block Driver
+ * Copyright (C) 2011 Micron Technology, Inc.
+ *
+ * Portions of this code were derived from works subjected to the
+ * following copyright:
+ * Copyright (C) 2009 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTIP32XX_H__
+#define __MTIP32XX_H__
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/ata.h>
+#include <linux/interrupt.h>
+#include <linux/genhd.h>
+#include <linux/version.h>
+
+/* Offset of Subsystem Device ID in pci confoguration space */
+#define PCI_SUBSYSTEM_DEVICEID 0x2E
+
+/* offset of Device Control register in PCIe extended capabilites space */
+#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
+
+/* # of times to retry timed out IOs */
+#define MTIP_MAX_RETRIES 5
+
+/* Various timeout values in ms */
+#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
+#define MTIP_IOCTL_COMMAND_TIMEOUT_MS 5000
+#define MTIP_INTERNAL_COMMAND_TIMEOUT_MS 5000
+
+/* check for timeouts every 500ms */
+#define MTIP_TIMEOUT_CHECK_PERIOD 500
+
+/* ftl rebuild */
+#define MTIP_FTL_REBUILD_OFFSET 142
+#define MTIP_FTL_REBUILD_MAGIC 0xED51
+#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
+
+/* Macro to extract the tag bit number from a tag value. */
+#define MTIP_TAG_BIT(tag) (tag & 0x1F)
+
+/*
+ * Macro to extract the tag index from a tag value. The index
+ * is used to access the correct s_active/Command Issue register based
+ * on the tag value.
+ */
+#define MTIP_TAG_INDEX(tag) (tag >> 5)
+
+/*
+ * Maximum number of scatter gather entries
+ * a single command may have.
+ */
+#define MTIP_MAX_SG 128
+
+/*
+ * Maximum number of slot groups (Command Issue & s_active registers)
+ * NOTE: This is the driver maximum; check dd->slot_groups for actual value.
+ */
+#define MTIP_MAX_SLOT_GROUPS 8
+
+/* Internal command tag. */
+#define MTIP_TAG_INTERNAL 0
+
+/* Micron Vendor ID & P320x SSD Device ID */
+#define PCI_VENDOR_ID_MICRON 0x1344
+#define P320_DEVICE_ID 0x5150
+
+/* Driver name and version strings */
+#define MTIP_DRV_NAME "mtip32xx"
+#define MTIP_DRV_VERSION "1.2.6os3"
+
+/* Maximum number of minor device numbers per device. */
+#define MTIP_MAX_MINORS 16
+
+/* Maximum number of supported command slots. */
+#define MTIP_MAX_COMMAND_SLOTS (MTIP_MAX_SLOT_GROUPS * 32)
+
+/*
+ * Per-tag bitfield size in longs.
+ * Linux bit manipulation functions
+ * (i.e. test_and_set_bit, find_next_zero_bit)
+ * manipulate memory in longs, so we try to make the math work.
+ * take the slot groups and find the number of longs, rounding up.
+ * Careful! i386 and x86_64 use different size longs!
+ */
+#define U32_PER_LONG (sizeof(long) / sizeof(u32))
+#define SLOTBITS_IN_LONGS ((MTIP_MAX_SLOT_GROUPS + \
+ (U32_PER_LONG-1))/U32_PER_LONG)
+
+/* BAR number used to access the HBA registers. */
+#define MTIP_ABAR 5
+
+/* Forced Unit Access Bit */
+#define FUA_BIT 0x80
+
+#ifdef DEBUG
+ #define dbg_printk(format, arg...) \
+ printk(pr_fmt(format), ##arg);
+#else
+ #define dbg_printk(format, arg...)
+#endif
+
+#define __force_bit2int (unsigned int __force)
+
+/* below are bit numbers in 'flags' defined in mtip_port */
+#define MTIP_FLAG_IC_ACTIVE_BIT 0
+#define MTIP_FLAG_EH_ACTIVE_BIT 1
+#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2
+#define MTIP_FLAG_ISSUE_CMDS_BIT 4
+#define MTIP_FLAG_REBUILD_BIT 5
+#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8
+
+/* Register Frame Information Structure (FIS), host to device. */
+struct host_to_dev_fis {
+ /*
+ * FIS type.
+ * - 27h Register FIS, host to device.
+ * - 34h Register FIS, device to host.
+ * - 39h DMA Activate FIS, device to host.
+ * - 41h DMA Setup FIS, bi-directional.
+ * - 46h Data FIS, bi-directional.
+ * - 58h BIST Activate FIS, bi-directional.
+ * - 5Fh PIO Setup FIS, device to host.
+ * - A1h Set Device Bits FIS, device to host.
+ */
+ unsigned char type;
+ unsigned char opts;
+ unsigned char command;
+ unsigned char features;
+
+ union {
+ unsigned char lba_low;
+ unsigned char sector;
+ };
+ union {
+ unsigned char lba_mid;
+ unsigned char cyl_low;
+ };
+ union {
+ unsigned char lba_hi;
+ unsigned char cyl_hi;
+ };
+ union {
+ unsigned char device;
+ unsigned char head;
+ };
+
+ union {
+ unsigned char lba_low_ex;
+ unsigned char sector_ex;
+ };
+ union {
+ unsigned char lba_mid_ex;
+ unsigned char cyl_low_ex;
+ };
+ union {
+ unsigned char lba_hi_ex;
+ unsigned char cyl_hi_ex;
+ };
+ unsigned char features_ex;
+
+ unsigned char sect_count;
+ unsigned char sect_cnt_ex;
+ unsigned char res2;
+ unsigned char control;
+
+ unsigned int res3;
+};
+
+/* Command header structure. */
+struct mtip_cmd_hdr {
+ /*
+ * Command options.
+ * - Bits 31:16 Number of PRD entries.
+ * - Bits 15:8 Unused in this implementation.
+ * - Bit 7 Prefetch bit, informs the drive to prefetch PRD entries.
+ * - Bit 6 Write bit, should be set when writing data to the device.
+ * - Bit 5 Unused in this implementation.
+ * - Bits 4:0 Length of the command FIS in DWords (DWord = 4 bytes).
+ */
+ unsigned int opts;
+ /* This field is unsed when using NCQ. */
+ union {
+ unsigned int byte_count;
+ unsigned int status;
+ };
+ /*
+ * Lower 32 bits of the command table address associated with this
+ * header. The command table addresses must be 128 byte aligned.
+ */
+ unsigned int ctba;
+ /*
+ * If 64 bit addressing is used this field is the upper 32 bits
+ * of the command table address associated with this command.
+ */
+ unsigned int ctbau;
+ /* Reserved and unused. */
+ unsigned int res[4];
+};
+
+/* Command scatter gather structure (PRD). */
+struct mtip_cmd_sg {
+ /*
+ * Low 32 bits of the data buffer address. For P320 this
+ * address must be 8 byte aligned signified by bits 2:0 being
+ * set to 0.
+ */
+ unsigned int dba;
+ /*
+ * When 64 bit addressing is used this field is the upper
+ * 32 bits of the data buffer address.
+ */
+ unsigned int dba_upper;
+ /* Unused. */
+ unsigned int reserved;
+ /*
+ * Bit 31: interrupt when this data block has been transferred.
+ * Bits 30..22: reserved
+ * Bits 21..0: byte count (minus 1). For P320 the byte count must be
+ * 8 byte aligned signified by bits 2:0 being set to 1.
+ */
+ unsigned int info;
+};
+struct mtip_port;
+
+/* Structure used to describe a command. */
+struct mtip_cmd {
+
+ struct mtip_cmd_hdr *command_header; /* ptr to command header entry */
+
+ dma_addr_t command_header_dma; /* corresponding physical address */
+
+ void *command; /* ptr to command table entry */
+
+ dma_addr_t command_dma; /* corresponding physical address */
+
+ void *comp_data; /* data passed to completion function comp_func() */
+ /*
+ * Completion function called by the ISR upon completion of
+ * a command.
+ */
+ void (*comp_func)(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status);
+ /* Additional callback function that may be called by comp_func() */
+ void (*async_callback)(void *data, int status);
+
+ void *async_data; /* Addl. data passed to async_callback() */
+
+ int scatter_ents; /* Number of scatter list entries used */
+
+ struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
+
+ int retries; /* The number of retries left for this command. */
+
+ int direction; /* Data transfer direction */
+
+ unsigned long comp_time; /* command completion time, in jiffies */
+
+ atomic_t active; /* declares if this command sent to the drive. */
+};
+
+/* Structure used to describe a port. */
+struct mtip_port {
+ /* Pointer back to the driver data for this port. */
+ struct driver_data *dd;
+ /*
+ * Used to determine if the data pointed to by the
+ * identify field is valid.
+ */
+ unsigned long identify_valid;
+ /* Base address of the memory mapped IO for the port. */
+ void __iomem *mmio;
+ /* Array of pointers to the memory mapped s_active registers. */
+ void __iomem *s_active[MTIP_MAX_SLOT_GROUPS];
+ /* Array of pointers to the memory mapped completed registers. */
+ void __iomem *completed[MTIP_MAX_SLOT_GROUPS];
+ /* Array of pointers to the memory mapped Command Issue registers. */
+ void __iomem *cmd_issue[MTIP_MAX_SLOT_GROUPS];
+ /*
+ * Pointer to the beginning of the command header memory as used
+ * by the driver.
+ */
+ void *command_list;
+ /*
+ * Pointer to the beginning of the command header memory as used
+ * by the DMA.
+ */
+ dma_addr_t command_list_dma;
+ /*
+ * Pointer to the beginning of the RX FIS memory as used
+ * by the driver.
+ */
+ void *rxfis;
+ /*
+ * Pointer to the beginning of the RX FIS memory as used
+ * by the DMA.
+ */
+ dma_addr_t rxfis_dma;
+ /*
+ * Pointer to the beginning of the command table memory as used
+ * by the driver.
+ */
+ void *command_table;
+ /*
+ * Pointer to the beginning of the command table memory as used
+ * by the DMA.
+ */
+ dma_addr_t command_tbl_dma;
+ /*
+ * Pointer to the beginning of the identify data memory as used
+ * by the driver.
+ */
+ u16 *identify;
+ /*
+ * Pointer to the beginning of the identify data memory as used
+ * by the DMA.
+ */
+ dma_addr_t identify_dma;
+ /*
+ * Pointer to the beginning of a sector buffer that is used
+ * by the driver when issuing internal commands.
+ */
+ u16 *sector_buffer;
+ /*
+ * Pointer to the beginning of a sector buffer that is used
+ * by the DMA when the driver issues internal commands.
+ */
+ dma_addr_t sector_buffer_dma;
+ /*
+ * Bit significant, used to determine if a command slot has
+ * been allocated. i.e. the slot is in use. Bits are cleared
+ * when the command slot and all associated data structures
+ * are no longer needed.
+ */
+ unsigned long allocated[SLOTBITS_IN_LONGS];
+ /*
+ * used to queue commands when an internal command is in progress
+ * or error handling is active
+ */
+ unsigned long cmds_to_issue[SLOTBITS_IN_LONGS];
+ /*
+ * Array of command slots. Structure includes pointers to the
+ * command header and command table, and completion function and data
+ * pointers.
+ */
+ struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS];
+ /* Used by mtip_service_thread to wait for an event */
+ wait_queue_head_t svc_wait;
+ /*
+ * indicates the state of the port. Also, helps the service thread
+ * to determine its action on wake up.
+ */
+ unsigned long flags;
+ /*
+ * Timer used to complete commands that have been active for too long.
+ */
+ struct timer_list cmd_timer;
+ /*
+ * Semaphore used to block threads if there are no
+ * command slots available.
+ */
+ struct semaphore cmd_slot;
+ /* Spinlock for working around command-issue bug. */
+ spinlock_t cmd_issue_lock;
+};
+
+/*
+ * Driver private data structure.
+ *
+ * One structure is allocated per probed device.
+ */
+struct driver_data {
+ void __iomem *mmio; /* Base address of the HBA registers. */
+
+ int major; /* Major device number. */
+
+ int instance; /* Instance number. First device probed is 0, ... */
+
+ struct gendisk *disk; /* Pointer to our gendisk structure. */
+
+ struct pci_dev *pdev; /* Pointer to the PCI device structure. */
+
+ struct request_queue *queue; /* Our request queue. */
+
+ struct mtip_port *port; /* Pointer to the port data structure. */
+
+ /* Tasklet used to process the bottom half of the ISR. */
+ struct tasklet_struct tasklet;
+
+ unsigned product_type; /* magic value declaring the product type */
+
+ unsigned slot_groups; /* number of slot groups the product supports */
+
+ atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
+
+ unsigned long index; /* Index to determine the disk name */
+
+ unsigned int ftlrebuildflag; /* FTL rebuild flag */
+
+ atomic_t resumeflag; /* Atomic variable to track suspend/resume */
+
+ atomic_t eh_active; /* Flag for error handling tracking */
+
+ struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+};
+
+#endif
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 000000000000..c1dc4d86c221
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1739 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT (5 * HZ)
+#define ADMIN_TIMEOUT (60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct list_head node;
+ struct nvme_queue **queues;
+ u32 __iomem *dbs;
+ struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ int instance;
+ int queue_count;
+ int db_stride;
+ u32 ctrl_config;
+ struct msix_entry *entry;
+ struct nvme_bar __iomem *bar;
+ struct list_head namespaces;
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_dev *dev;
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ int ns_id;
+ int lba_shift;
+};
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct device *q_dmadev;
+ struct nvme_dev *dev;
+ spinlock_t q_lock;
+ struct nvme_command *sq_cmds;
+ volatile struct nvme_completion *cqes;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ wait_queue_head_t sq_full;
+ wait_queue_t sq_cong_wait;
+ struct bio_list sq_cong;
+ u32 __iomem *q_db;
+ u16 q_depth;
+ u16 cq_vector;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 cq_phase;
+ unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+ struct nvme_completion *);
+
+struct nvme_cmd_info {
+ nvme_completion_fn fn;
+ void *ctx;
+ unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+ return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue. The data passed in will
+ * be passed to the completion handler. This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ int cmdid;
+
+ do {
+ cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+ if (cmdid >= depth)
+ return -EBUSY;
+ } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+ info[cmdid].fn = handler;
+ info[cmdid].ctx = ctx;
+ info[cmdid].timeout = jiffies + timeout;
+ return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int cmdid;
+ wait_event_killable(nvmeq->sq_full,
+ (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+ return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ if (ctx == CMD_CTX_CANCELLED)
+ return;
+ if (ctx == CMD_CTX_FLUSH)
+ return;
+ if (ctx == CMD_CTX_COMPLETED) {
+ dev_warn(&dev->pci_dev->dev,
+ "completed id %d twice on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+ if (ctx == CMD_CTX_INVALID) {
+ dev_warn(&dev->pci_dev->dev,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+
+ dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (cmdid >= nvmeq->q_depth) {
+ *fn = special_completion;
+ return CMD_CTX_INVALID;
+ }
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_COMPLETED;
+ clear_bit(cmdid, nvmeq->cmdid_data);
+ wake_up(&nvmeq->sq_full);
+ return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ if (fn)
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_CANCELLED;
+ return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+ return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+ put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+ unsigned long flags;
+ u16 tail;
+ spin_lock_irqsave(&nvmeq->q_lock, flags);
+ tail = nvmeq->sq_tail;
+ memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ if (++tail == nvmeq->q_depth)
+ tail = 0;
+ writel(tail, nvmeq->q_db);
+ nvmeq->sq_tail = tail;
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+ return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries. You can't see it in this data structure because C doesn't let
+ * me express that. Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+ void *private; /* For the use of the submitter of the I/O */
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int offset; /* Of PRP list */
+ int nents; /* Used in scatterlist */
+ int length; /* Of data, in bytes */
+ dma_addr_t first_dma;
+ struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+ return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+ unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+ struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+ sizeof(__le64 *) * nvme_npages(nbytes) +
+ sizeof(struct scatterlist) * nseg, gfp);
+
+ if (iod) {
+ iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+ iod->npages = -1;
+ iod->length = nbytes;
+ }
+
+ return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+ const int last_prp = PAGE_SIZE / 8 - 1;
+ int i;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma = iod->first_dma;
+
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = list[i];
+ dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+ dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+ prp_dma = next_prp_dma;
+ }
+ kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+ struct nvme_queue *nvmeq = get_nvmeq(dev);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ put_nvmeq(nvmeq);
+ wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct nvme_iod *iod = ctx;
+ struct bio *bio = iod->private;
+ u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ nvme_free_iod(dev, iod);
+ if (status) {
+ bio_endio(bio, -EIO);
+ } else if (bio->bi_vcnt > bio->bi_idx) {
+ requeue_bio(dev, bio);
+ } else {
+ bio_endio(bio, 0);
+ }
+}
+
+/* length is in bytes. gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+ struct nvme_common_command *cmd, struct nvme_iod *iod,
+ int total_len, gfp_t gfp)
+{
+ struct dma_pool *pool;
+ int length = total_len;
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = offset_in_page(dma_addr);
+ __le64 *prp_list;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ cmd->prp1 = cpu_to_le64(dma_addr);
+ length -= (PAGE_SIZE - offset);
+ if (length <= 0)
+ return total_len;
+
+ dma_len -= (PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= PAGE_SIZE) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ return total_len;
+ }
+
+ nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->npages = -1;
+ return (total_len - length) + PAGE_SIZE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ cmd->prp2 = cpu_to_le64(prp_dma);
+ i = 0;
+ for (;;) {
+ if (i == PAGE_SIZE / 8) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list)
+ return total_len - length;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ BUG_ON(dma_len < 0);
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
+ (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+ struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+ struct bio_vec *bvec, *bvprv = NULL;
+ struct scatterlist *sg = NULL;
+ int i, old_idx, length = 0, nsegs = 0;
+
+ sg_init_table(iod->sg, psegs);
+ old_idx = bio->bi_idx;
+ bio_for_each_segment(bvec, bio, i) {
+ if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+ sg->length += bvec->bv_len;
+ } else {
+ if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+ break;
+ sg = sg ? sg + 1 : iod->sg;
+ sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset);
+ nsegs++;
+ }
+ length += bvec->bv_len;
+ bvprv = bvec;
+ }
+ bio->bi_idx = i;
+ iod->nents = nsegs;
+ sg_mark_end(sg);
+ if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+ bio->bi_idx = old_idx;
+ return -ENOMEM;
+ }
+ return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ int cmdid)
+{
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.command_id = cmdid;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+ int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+ special_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ return cmdid;
+
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio)
+{
+ struct nvme_command *cmnd;
+ struct nvme_iod *iod;
+ enum dma_data_direction dma_dir;
+ int cmdid, length, result = -ENOMEM;
+ u16 control;
+ u32 dsmgmt;
+ int psegs = bio_phys_segments(ns->queue, bio);
+
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ if (!iod)
+ goto nomem;
+ iod->private = bio;
+
+ result = -EBUSY;
+ cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ goto free_iod;
+
+ if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+
+ control = 0;
+ if (bio->bi_rw & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ dsmgmt = 0;
+ if (bio->bi_rw & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ if (bio_data_dir(bio)) {
+ cmnd->rw.opcode = nvme_cmd_write;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ cmnd->rw.opcode = nvme_cmd_read;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+
+ result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+ if (result < 0)
+ goto free_iod;
+ length = result;
+
+ cmnd->rw.command_id = cmdid;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+ GFP_ATOMIC);
+ cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+ bio->bi_sector += length >> 9;
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+
+ free_iod:
+ nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+ return result;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+ u16 head, phase;
+
+ head = nvmeq->cq_head;
+ phase = nvmeq->cq_phase;
+
+ for (;;) {
+ void *ctx;
+ nvme_completion_fn fn;
+ struct nvme_completion cqe = nvmeq->cqes[head];
+ if ((le16_to_cpu(cqe.status) & 1) != phase)
+ break;
+ nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+ if (++head == nvmeq->q_depth) {
+ head = 0;
+ phase = !phase;
+ }
+
+ ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+
+ /* If the controller ignores the cq head doorbell and continuously
+ * writes to the queue, it is theoretically possible to wrap around
+ * the queue twice and mistakenly return IRQ_NONE. Linux only
+ * requires that 0.1% of your interrupts are handled, so this isn't
+ * a big problem.
+ */
+ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+ return IRQ_NONE;
+
+ writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ nvmeq->cq_head = head;
+ nvmeq->cq_phase = phase;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+ spin_lock_irq(&nvmeq->q_lock);
+ cancel_cmdid(nvmeq, cmdid, NULL);
+ spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+ struct task_struct *task;
+ u32 result;
+ int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct sync_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success. If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+ int cmdid;
+ struct sync_cmd_info cmdinfo;
+
+ cmdinfo.task = current;
+ cmdinfo.status = -EINTR;
+
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+ timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmd->common.command_id = cmdid;
+
+ set_current_state(TASK_KILLABLE);
+ nvme_submit_cmd(nvmeq, cmd);
+ schedule();
+
+ if (cmdinfo.status == -EINTR) {
+ nvme_abort_command(nvmeq, cmdid);
+ return -EINTR;
+ }
+
+ if (result)
+ *result = cmdinfo.result;
+
+ return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result)
+{
+ return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+ int status;
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(id);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+ memset(&c, 0, sizeof(c));
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(qid);
+ c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_cq.cq_flags = cpu_to_le16(flags);
+ c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+ memset(&c, 0, sizeof(c));
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(qid);
+ c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_sq.sq_flags = cpu_to_le16(flags);
+ c.create_sq.cqid = cpu_to_le16(qid);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.prp1 = cpu_to_le64(dma_addr);
+ c.identify.cns = cpu_to_le32(cns);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+ int vector = dev->entry[nvmeq->cq_vector].vector;
+
+ irq_set_affinity_hint(vector, NULL);
+ free_irq(vector, nvmeq);
+
+ /* Don't tell the adapter to delete the admin queue */
+ if (qid) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int vector)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+ struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+ if (!nvmeq)
+ return NULL;
+
+ nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->cqes)
+ goto free_nvmeq;
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+ nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ goto free_cqdma;
+
+ nvmeq->q_dmadev = dmadev;
+ nvmeq->dev = dev;
+ spin_lock_init(&nvmeq->q_lock);
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ init_waitqueue_head(&nvmeq->sq_full);
+ init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+ bio_list_init(&nvmeq->sq_cong);
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_depth = depth;
+ nvmeq->cq_vector = vector;
+
+ return nvmeq;
+
+ free_cqdma:
+ dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
+ free_nvmeq:
+ kfree(nvmeq);
+ return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ const char *name)
+{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
+ return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+ int qid, int cq_size, int vector)
+{
+ int result;
+ struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+ if (!nvmeq)
+ return ERR_PTR(-ENOMEM);
+
+ result = adapter_alloc_cq(dev, qid, nvmeq);
+ if (result < 0)
+ goto free_nvmeq;
+
+ result = adapter_alloc_sq(dev, qid, nvmeq);
+ if (result < 0)
+ goto release_cq;
+
+ result = queue_request_irq(dev, nvmeq, "nvme");
+ if (result < 0)
+ goto release_sq;
+
+ return nvmeq;
+
+ release_sq:
+ adapter_delete_sq(dev, qid);
+ release_cq:
+ adapter_delete_cq(dev, qid);
+ free_nvmeq:
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+ return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+ int result;
+ u32 aqa;
+ u64 cap;
+ unsigned long timeout;
+ struct nvme_queue *nvmeq;
+
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+ nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
+
+ aqa = nvmeq->q_depth - 1;
+ aqa |= aqa << 16;
+
+ dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+ dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+ writel(0, &dev->bar->cc);
+ writel(aqa, &dev->bar->aqa);
+ writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+ writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+ writel(dev->ctrl_config, &dev->bar->cc);
+
+ cap = readq(&dev->bar->cap);
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ result = queue_request_irq(dev, nvmeq, "nvme admin");
+ dev->queues[0] = nvmeq;
+ return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length)
+{
+ int i, err, count, nents, offset;
+ struct scatterlist *sg;
+ struct page **pages;
+ struct nvme_iod *iod;
+
+ if (addr & 3)
+ return ERR_PTR(-EINVAL);
+ if (!length)
+ return ERR_PTR(-EINVAL);
+
+ offset = offset_in_page(addr);
+ count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+ err = get_user_pages_fast(addr, count, 1, pages);
+ if (err < count) {
+ count = err;
+ err = -EFAULT;
+ goto put_pages;
+ }
+
+ iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+ sg = iod->sg;
+ sg_init_table(sg, count);
+ for (i = 0; i < count; i++) {
+ sg_set_page(&sg[i], pages[i],
+ min_t(int, length, PAGE_SIZE - offset), offset);
+ length -= (PAGE_SIZE - offset);
+ offset = 0;
+ }
+ sg_mark_end(&sg[i - 1]);
+ iod->nents = count;
+
+ err = -ENOMEM;
+ nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!nents)
+ goto free_iod;
+
+ kfree(pages);
+ return iod;
+
+ free_iod:
+ kfree(iod);
+ put_pages:
+ for (i = 0; i < count; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ struct nvme_iod *iod)
+{
+ int i;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ for (i = 0; i < iod->nents; i++)
+ put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length;
+ int status;
+ struct nvme_iod *iod;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+ c.rw.reftag = io.reftag;
+ c.rw.apptag = io.apptag;
+ c.rw.appmask = io.appmask;
+ /* XXX: metadata */
+ length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ * disabled. We may be preempted at any point, and be rescheduled
+ * to a different CPU. That will cause cacheline bouncing, but no
+ * additional races since q_lock already protects against other CPUs.
+ */
+ put_nvmeq(nvmeq);
+ if (length != (io.nblocks + 1) << ns->lba_shift)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+ struct nvme_admin_cmd __user *ucmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_admin_cmd cmd;
+ struct nvme_command c;
+ int status, length;
+ struct nvme_iod *iod;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+ c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+ c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+ c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+ length = cmd.data_len;
+ if (cmd.data_len) {
+ iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+ length);
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+ length = nvme_setup_prps(dev, &c.common, iod, length,
+ GFP_KERNEL);
+ }
+
+ if (length != cmd.data_len)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+ if (cmd.data_len) {
+ nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ }
+ return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+ return ns->ns_id;
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(ns, (void __user *)arg);
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct block_device_operations nvme_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+ if (!time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ bio_list_add_head(&nvmeq->sq_cong, bio);
+ break;
+ }
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
+static int nvme_kthread(void *data)
+{
+ struct nvme_dev *dev;
+
+ while (!kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node) {
+ int i;
+ for (i = 0; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ if (!nvmeq)
+ continue;
+ spin_lock_irq(&nvmeq->q_lock);
+ if (nvme_process_cq(nvmeq))
+ printk("process_cq did something\n");
+ nvme_timeout_ios(nvmeq);
+ nvme_resubmit_bios(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ }
+ }
+ spin_unlock(&dev_list_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+ return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+ int index, error;
+
+ do {
+ if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+ return -1;
+
+ spin_lock(&dev_list_lock);
+ error = ida_get_new(&nvme_index_ida, &index);
+ spin_unlock(&dev_list_lock);
+ } while (error == -EAGAIN);
+
+ if (error)
+ index = -1;
+ return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+ spin_lock(&dev_list_lock);
+ ida_remove(&nvme_index_ida, index);
+ spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+ struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+ struct nvme_ns *ns;
+ struct gendisk *disk;
+ int lbaf;
+
+ if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+ return NULL;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+ ns->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!ns->queue)
+ goto out_free_ns;
+ ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+ blk_queue_make_request(ns->queue, nvme_make_request);
+ ns->dev = dev;
+ ns->queue->queuedata = ns;
+
+ disk = alloc_disk(NVME_MINORS);
+ if (!disk)
+ goto out_free_queue;
+ ns->ns_id = nsid;
+ ns->disk = disk;
+ lbaf = id->flbas & 0xf;
+ ns->lba_shift = id->lbaf[lbaf].ds;
+
+ disk->major = nvme_major;
+ disk->minors = NVME_MINORS;
+ disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->driverfs_dev = &dev->pci_dev->dev;
+ sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+ set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+ return ns;
+
+ out_free_queue:
+ blk_cleanup_queue(ns->queue);
+ out_free_ns:
+ kfree(ns);
+ return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+ int index = ns->disk->first_minor / NVME_MINORS;
+ put_disk(ns->disk);
+ nvme_put_ns_idx(index);
+ blk_cleanup_queue(ns->queue);
+ kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+ int status;
+ u32 result;
+ u32 q_count = (count - 1) | ((count - 1) << 16);
+
+ status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ &result);
+ if (status)
+ return -EIO;
+ return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+ int result, cpu, i, nr_io_queues, db_bar_size;
+
+ nr_io_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_io_queues);
+ if (result < 0)
+ return result;
+ if (result < nr_io_queues)
+ nr_io_queues = result;
+
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ if (db_bar_size > 8192) {
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+ db_bar_size);
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->queues[0]->q_db = dev->dbs;
+ }
+
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].entry = i;
+ for (;;) {
+ result = pci_enable_msix(dev->pci_dev, dev->entry,
+ nr_io_queues);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+
+ result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ /* XXX: handle failure here */
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_io_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ for (i = 0; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+ NVME_Q_DEPTH, i);
+ if (IS_ERR(dev->queues[i + 1]))
+ return PTR_ERR(dev->queues[i + 1]);
+ dev->queue_count++;
+ }
+
+ for (; i < num_possible_cpus(); i++) {
+ int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+ dev->queues[i + 1] = dev->queues[target + 1];
+ }
+
+ return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+ int res, nn, i;
+ struct nvme_ns *ns, *next;
+ struct nvme_id_ctrl *ctrl;
+ struct nvme_id_ns *id_ns;
+ void *mem;
+ dma_addr_t dma_addr;
+
+ res = nvme_setup_io_queues(dev);
+ if (res)
+ return res;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+ GFP_KERNEL);
+
+ res = nvme_identify(dev, 0, 1, dma_addr);
+ if (res) {
+ res = -EIO;
+ goto out_free;
+ }
+
+ ctrl = mem;
+ nn = le32_to_cpup(&ctrl->nn);
+ memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+ memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+ memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+ id_ns = mem;
+ for (i = 1; i <= nn; i++) {
+ res = nvme_identify(dev, i, 0, dma_addr);
+ if (res)
+ continue;
+
+ if (id_ns->ncap == 0)
+ continue;
+
+ res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+ dma_addr + 4096);
+ if (res)
+ continue;
+
+ ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+ if (ns)
+ list_add_tail(&ns->list, &dev->namespaces);
+ }
+ list_for_each_entry(ns, &dev->namespaces, list)
+ add_disk(ns->disk);
+
+ goto out;
+
+ out_free:
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ nvme_ns_free(ns);
+ }
+
+ out:
+ dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+ return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ /* TODO: wait all I/O finished or cancel them */
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ del_gendisk(ns->disk);
+ nvme_ns_free(ns);
+ }
+
+ nvme_free_queues(dev);
+
+ return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+ static int instance;
+ dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int bars, result = -ENOMEM;
+ struct nvme_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+ GFP_KERNEL);
+ if (!dev->entry)
+ goto free;
+ dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->queues)
+ goto free;
+
+ if (pci_enable_device_mem(pdev))
+ goto free;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable;
+
+ INIT_LIST_HEAD(&dev->namespaces);
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ nvme_set_instance(dev);
+ dev->entry[0].vector = pdev->irq;
+
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto disable_msix;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar) {
+ result = -ENOMEM;
+ goto disable_msix;
+ }
+
+ result = nvme_configure_admin_queue(dev);
+ if (result)
+ goto unmap;
+ dev->queue_count++;
+
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ result = nvme_dev_add(dev);
+ if (result)
+ goto delete;
+
+ return 0;
+
+ delete:
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ nvme_free_queues(dev);
+ unmap:
+ iounmap(dev->bar);
+ disable_msix:
+ pci_disable_msix(pdev);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ disable:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ free:
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+ return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_remove(dev);
+ pci_disable_msix(pdev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+ .error_detected = nvme_error_detected,
+ .mmio_enabled = nvme_dump_registers,
+ .link_reset = nvme_link_reset,
+ .slot_reset = nvme_slot_reset,
+ .resume = nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS 0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+ .name = "nvme",
+ .id_table = nvme_id_table,
+ .probe = nvme_probe,
+ .remove = __devexit_p(nvme_remove),
+ .suspend = nvme_suspend,
+ .resume = nvme_resume,
+ .err_handler = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+ int result = -EBUSY;
+
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ if (IS_ERR(nvme_thread))
+ return PTR_ERR(nvme_thread);
+
+ nvme_major = register_blkdev(nvme_major, "nvme");
+ if (nvme_major <= 0)
+ goto kill_kthread;
+
+ result = pci_register_driver(&nvme_driver);
+ if (result)
+ goto unregister_blkdev;
+ return 0;
+
+ unregister_blkdev:
+ unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+ kthread_stop(nvme_thread);
+ return result;
+}
+
+static void __exit nvme_exit(void)
+{
+ pci_unregister_driver(&nvme_driver);
+ unregister_blkdev(nvme_major, "nvme");
+ kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
index ad124525ac23..ec64e7f5d1ce 100644
--- a/drivers/block/paride/bpck6.c
+++ b/drivers/block/paride/bpck6.c
@@ -20,9 +20,6 @@
*/
-/* PARAMETERS */
-static int verbose; /* set this to 1 to see debugging messages and whatnot */
-
#define BACKPACK_VERSION "2.0.2"
#include <linux/module.h>
@@ -36,6 +33,8 @@ static int verbose; /* set this to 1 to see debugging messages and whatnot */
#include "ppc6lnx.c"
#include "paride.h"
+/* PARAMETERS */
+static bool verbose; /* set this to 1 to see debugging messages and whatnot */
#define PPCSTRUCT(pi) ((Interface *)(pi->private))
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 46b8136c31bb..ba2b6b5e5910 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -144,7 +144,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
static DEFINE_MUTEX(pcd_mutex);
static DEFINE_SPINLOCK(pcd_lock);
-module_param(verbose, bool, 0644);
+module_param(verbose, int, 0644);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(nice, int, 0);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 869e7676d46f..831e3ac156e6 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -124,8 +124,9 @@
by default.
*/
+#include <linux/types.h>
-static int verbose = 0;
+static bool verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index f21b520ef419..ec8f9ed6326e 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -118,13 +118,15 @@
#define PF_NAME "pf"
#define PF_UNITS 4
+#include <linux/types.h>
+
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is off
by default.
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PF_MAJOR;
static char *name = PF_NAME;
static int cluster = 64;
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index a79fb4f7ff62..4a27b1de5fcb 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -130,13 +130,14 @@
#define PI_PG 4
#endif
+#include <linux/types.h>
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is 0
by default.
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PG_MAJOR;
static char *name = PG_NAME;
static int disable = 0;
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 7179f79d7468..2596042eb987 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -109,13 +109,15 @@
#define PT_NAME "pt"
#define PT_UNITS 4
+#include <linux/types.h>
+
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is on
by default.
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a63b0a2b7805..d59edeabd93f 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2817,7 +2817,7 @@ static const struct block_device_operations pktcdvd_ops = {
.check_events = pkt_check_events,
};
-static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
+static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b0..3fd31dec8c9c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ return -ENXIO;
+ }
+
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
}
/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
- u64 snapid,
- const char *obj)
-{
- struct ceph_osd_req_op *ops;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
- if (ret < 0)
- return ret;
-
- ops[0].snap.snapid = snapid;
-
- ret = rbd_req_sync_op(dev, NULL,
- CEPH_NOSNAP,
- 0,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
-
- rbd_destroy_ops(ops);
-
- return ret;
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ pr_warning("unrecognized header format"
+ " for image %s", rbd_dev->obj);
+ }
goto out_dh;
+ }
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
- &dev_attr_rollback_snap.attr,
NULL
};
@@ -2207,6 +2184,8 @@ static ssize_t rbd_add(struct bus_type *bus,
INIT_LIST_HEAD(&rbd_dev->node);
INIT_LIST_HEAD(&rbd_dev->snaps);
+ init_rwsem(&rbd_dev->header.snap_rwsem);
+
/* generate unique id: find highest unique id, add one */
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -2424,64 +2403,6 @@ err_unlock:
return ret;
}
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
- int ret;
- u64 snapid;
- u64 cur_ofs;
- char *seg_name = NULL;
- char *snap_name = kmalloc(count + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (!snap_name)
- return ret;
-
- /* parse snaps add command */
- snprintf(snap_name, count, "%s", buf);
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
- if (!seg_name)
- goto done;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
- if (ret < 0)
- goto done_unlock;
-
- dout("snapid=%lld\n", snapid);
-
- cur_ofs = 0;
- while (cur_ofs < rbd_dev->header.image_size) {
- cur_ofs += rbd_get_segment(&rbd_dev->header,
- rbd_dev->obj,
- cur_ofs, (u64)-1,
- seg_name, NULL);
- dout("seg_name=%s\n", seg_name);
-
- ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
- if (ret < 0)
- pr_warning("could not roll back obj %s err=%d\n",
- seg_name, ret);
- }
-
- ret = __rbd_update_snaps(rbd_dev);
- if (ret < 0)
- goto done_unlock;
-
- ret = count;
-
-done_unlock:
- mutex_unlock(&ctl_mutex);
-done:
- kfree(seg_name);
- kfree(snap_name);
-
- return ret;
-}
-
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index fd5adcd55944..6d5a914b9619 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <asm/macintosh.h>
#include <asm/mac_via.h>
#define CARDNAME "swim"
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17ad..89ddab127e33 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
@@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index b70f0fca9a42..e7472f567c9d 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -619,8 +619,10 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
host->state == HST_DEV_SCAN);
spin_unlock_irq(&host->lock);
- DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq);
+ DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
+ crq->rq->cmd_type = REQ_TYPE_SPECIAL;
+ crq->rq->special = crq;
+ blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
return 0;
@@ -658,8 +660,10 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
BUG_ON(rc < 0);
crq->msg_bucket = (u32) rc;
- DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq);
+ DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
+ crq->rq->cmd_type = REQ_TYPE_SPECIAL;
+ crq->rq->special = crq;
+ blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
return 0;
}
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0e376d46bdd1..7333b9e44411 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
void __user *usermem = (void __user *) arg;
int ret;
mutex_lock(&ub_mutex);
- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
mutex_unlock(&ub_mutex);
return ret;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4d0b70adf5f7..c4a60badf252 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -4,6 +4,7 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
@@ -36,6 +37,12 @@ struct virtio_blk
/* Process context for config space updates */
struct work_struct config_work;
+ /* Lock for config space updates */
+ struct mutex config_lock;
+
+ /* enable config space updates */
+ bool config_enable;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -172,7 +179,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -243,8 +250,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOTTY;
- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
- (void __user *)data);
+ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
+ (void __user *)data);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -318,6 +325,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
char cap_str_2[10], cap_str_10[10];
u64 capacity, size;
+ mutex_lock(&vblk->config_lock);
+ if (!vblk->config_enable)
+ goto done;
+
/* Host must always specify the capacity. */
vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
&capacity, sizeof(capacity));
@@ -340,6 +351,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
cap_str_10, cap_str_2);
set_capacity(vblk->disk, capacity);
+done:
+ mutex_unlock(&vblk->config_lock);
}
static void virtblk_config_changed(struct virtio_device *vdev)
@@ -349,6 +362,18 @@ static void virtblk_config_changed(struct virtio_device *vdev)
queue_work(virtblk_wq, &vblk->config_work);
}
+static int init_vq(struct virtio_blk *vblk)
+{
+ int err = 0;
+
+ /* We expect one virtqueue, for output. */
+ vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests");
+ if (IS_ERR(vblk->vq))
+ err = PTR_ERR(vblk->vq);
+
+ return err;
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -388,14 +413,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
+ mutex_init(&vblk->config_lock);
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+ vblk->config_enable = true;
- /* We expect one virtqueue, for output. */
- vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
- if (IS_ERR(vblk->vq)) {
- err = PTR_ERR(vblk->vq);
+ err = init_vq(vblk);
+ if (err)
goto out_free_vblk;
- }
vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
if (!vblk->pool) {
@@ -542,7 +566,10 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
- flush_work(&vblk->config_work);
+ /* Prevent config work handler from accessing the device. */
+ mutex_lock(&vblk->config_lock);
+ vblk->config_enable = false;
+ mutex_unlock(&vblk->config_lock);
/* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
@@ -550,6 +577,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
+ flush_work(&vblk->config_work);
+
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
@@ -559,6 +588,46 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
ida_simple_remove(&vd_index_ida, index);
}
+#ifdef CONFIG_PM
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ /* Ensure we don't receive any more interrupts */
+ vdev->config->reset(vdev);
+
+ /* Prevent config work handler from accessing the device. */
+ mutex_lock(&vblk->config_lock);
+ vblk->config_enable = false;
+ mutex_unlock(&vblk->config_lock);
+
+ flush_work(&vblk->config_work);
+
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ blk_stop_queue(vblk->disk->queue);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ blk_sync_queue(vblk->disk->queue);
+
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+ int ret;
+
+ vblk->config_enable = true;
+ ret = init_vq(vdev->priv);
+ if (!ret) {
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ blk_start_queue(vblk->disk->queue);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ }
+ return ret;
+}
+#endif
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -584,6 +653,10 @@ static struct virtio_driver __refdata virtio_blk = {
.probe = virtblk_probe,
.remove = __devexit_p(virtblk_remove),
.config_changed = virtblk_config_changed,
+#ifdef CONFIG_PM
+ .freeze = virtblk_freeze,
+ .restore = virtblk_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 4abd2bcd20fb..51a972704db5 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -148,7 +148,7 @@ static volatile int xdc_busy;
static struct timer_list xd_watchdog_int;
static volatile u_char xd_error;
-static int nodma = XD_DONT_USE_DMA;
+static bool nodma = XD_DONT_USE_DMA;
static struct request_queue *xd_queue;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 15ec4db194d1..0088bf60f368 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,9 +39,6 @@
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
-#include <linux/loop.h>
-#include <linux/falloc.h>
-#include <linux/fs.h>
#include <xen/events.h>
#include <xen/page.h>
@@ -362,7 +359,7 @@ static int xen_blkbk_map(struct blkif_request *req,
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i;
- int nseg = req->nr_segments;
+ int nseg = req->u.rw.nr_segments;
int ret = 0;
/*
@@ -416,30 +413,25 @@ static int xen_blkbk_map(struct blkif_request *req,
return ret;
}
-static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
+static int dispatch_discard_io(struct xen_blkif *blkif,
+ struct blkif_request *req)
{
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
- if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
- /* just forward the discard request */
+ blkif->st_ds_req++;
+
+ xen_blkif_get(blkif);
+ if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
+ blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
+ unsigned long secure = (blkif->vbd.discard_secure &&
+ (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
+ BLKDEV_DISCARD_SECURE : 0;
err = blkdev_issue_discard(bdev,
req->u.discard.sector_number,
req->u.discard.nr_sectors,
- GFP_KERNEL, 0);
- else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
- /* punch a hole in the backing file */
- struct loop_device *lo = bdev->bd_disk->private_data;
- struct file *file = lo->lo_backing_file;
-
- if (file->f_op->fallocate)
- err = file->f_op->fallocate(file,
- FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
- req->u.discard.sector_number << 9,
- req->u.discard.nr_sectors << 9);
- else
- err = -EOPNOTSUPP;
+ GFP_KERNEL, secure);
} else
err = -EOPNOTSUPP;
@@ -449,7 +441,9 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
} else if (err)
status = BLKIF_RSP_ERROR;
- make_response(blkif, req->id, req->operation, status);
+ make_response(blkif, req->u.discard.id, req->operation, status);
+ xen_blkif_put(blkif);
+ return err;
}
static void xen_blk_drain_io(struct xen_blkif *blkif)
@@ -573,8 +567,11 @@ __do_block_io_op(struct xen_blkif *blkif)
/* Apply all sanity checks to /private copy/ of request. */
barrier();
-
- if (dispatch_rw_block_io(blkif, &req, pending_req))
+ if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
+ free_req(pending_req);
+ if (dispatch_discard_io(blkif, &req))
+ break;
+ } else if (dispatch_rw_block_io(blkif, &req, pending_req))
break;
/* Yield point for this unbounded loop. */
@@ -633,10 +630,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
blkif->st_f_req++;
operation = WRITE_FLUSH;
break;
- case BLKIF_OP_DISCARD:
- blkif->st_ds_req++;
- operation = REQ_DISCARD;
- break;
default:
operation = 0; /* make gcc happy */
goto fail_response;
@@ -644,9 +637,9 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
}
/* Check that the number of segments is sane. */
- nseg = req->nr_segments;
- if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
- operation != REQ_DISCARD) ||
+ nseg = req->u.rw.nr_segments;
+
+ if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
nseg);
@@ -654,12 +647,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
goto fail_response;
}
- preq.dev = req->handle;
+ preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0;
pending_req->blkif = blkif;
- pending_req->id = req->id;
+ pending_req->id = req->u.rw.id;
pending_req->operation = req->operation;
pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg;
@@ -707,7 +700,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (operation != REQ_DISCARD && xen_blkbk_map(req, pending_req, seg))
+ if (xen_blkbk_map(req, pending_req, seg))
goto fail_flush;
/*
@@ -739,23 +732,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
+ BUG_ON(operation != WRITE_FLUSH);
- if (operation == WRITE_FLUSH) {
- bio = bio_alloc(GFP_KERNEL, 0);
- if (unlikely(bio == NULL))
- goto fail_put_bio;
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (unlikely(bio == NULL))
+ goto fail_put_bio;
- biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
- bio->bi_private = pending_req;
- bio->bi_end_io = end_block_io_op;
- } else if (operation == REQ_DISCARD) {
- xen_blk_discard(blkif, req);
- xen_blkif_put(blkif);
- free_req(pending_req);
- return 0;
- }
+ biolist[nbio++] = bio;
+ bio->bi_bdev = preq.bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
}
/*
@@ -784,7 +770,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
xen_blkbk_unmap(pending_req);
fail_response:
/* Haven't submitted any bio's yet. */
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
free_req(pending_req);
msleep(1); /* back off a bit */
return -EIO;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dfb1b3a43a5d..d0ee7edc9be8 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -60,58 +60,66 @@ struct blkif_common_response {
char dummy;
};
-/* i386 protocol version */
-#pragma pack(push, 4)
-
struct blkif_x86_32_request_rw {
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-};
+} __attribute__((__packed__));
struct blkif_x86_32_request_discard {
+ uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
+ blkif_vdev_t _pad1; /* was "handle" for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- uint64_t nr_sectors;
-};
+ uint64_t nr_sectors;
+} __attribute__((__packed__));
struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */
- uint8_t nr_segments; /* number of segments */
- blkif_vdev_t handle; /* only for read/write requests */
- uint64_t id; /* private guest value, echoed in resp */
union {
struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard;
} u;
-};
+} __attribute__((__packed__));
+
+/* i386 protocol version */
+#pragma pack(push, 4)
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
#pragma pack(pop)
-
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
+ uint64_t id;
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-};
+} __attribute__((__packed__));
struct blkif_x86_64_request_discard {
+ uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
+ blkif_vdev_t _pad1; /* was "handle" for read/write requests */
+ uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
+ uint64_t id;
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- uint64_t nr_sectors;
-};
+ uint64_t nr_sectors;
+} __attribute__((__packed__));
struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */
- uint8_t nr_segments; /* number of segments */
- blkif_vdev_t handle; /* only for read/write requests */
- uint64_t __attribute__((__aligned__(8))) id;
union {
struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard;
} u;
-};
+} __attribute__((__packed__));
+
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
@@ -156,6 +164,7 @@ struct xen_vbd {
/* Cached size parameter. */
sector_t size;
bool flush_support;
+ bool discard_secure;
};
struct backend_info;
@@ -237,22 +246,23 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
- dst->nr_segments = src->nr_segments;
- dst->handle = src->handle;
- dst->id = src->id;
switch (src->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = src->u.rw.nr_segments;
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
barrier();
- if (n > dst->nr_segments)
- n = dst->nr_segments;
+ if (n > dst->u.rw.nr_segments)
+ n = dst->u.rw.nr_segments;
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
@@ -266,22 +276,23 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
- dst->nr_segments = src->nr_segments;
- dst->handle = src->handle;
- dst->id = src->id;
switch (src->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = src->u.rw.nr_segments;
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
barrier();
- if (n > dst->nr_segments)
- n = dst->nr_segments;
+ if (n > dst->u.rw.nr_segments)
+ n = dst->u.rw.nr_segments;
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f759ad4584c3..24a2fb57e5d0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -338,6 +338,9 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && q->flush_flags)
vbd->flush_support = true;
+ if (q && blk_queue_secdiscard(q))
+ vbd->discard_secure = true;
+
DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -420,6 +423,15 @@ int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
state = 1;
blkif->blk_backend_type = BLKIF_BACKEND_PHY;
}
+ /* Optional. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-secure", "%d",
+ blkif->vbd.discard_secure);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "writting discard-secure");
+ goto kfree;
+ }
}
} else {
err = PTR_ERR(type);
@@ -613,7 +625,7 @@ static void frontend_changed(struct xenbus_device *dev,
case XenbusStateConnected:
/*
* Ensure we connect even when two watches fire in
- * close successsion and we miss the intermediate value
+ * close succession and we miss the intermediate value
* of frontend_state.
*/
if (dev->state == XenbusStateConnected)
@@ -787,17 +799,14 @@ static const struct xenbus_device_id xen_blkbk_ids[] = {
};
-static struct xenbus_driver xen_blkbk = {
- .name = "vbd",
- .owner = THIS_MODULE,
- .ids = xen_blkbk_ids,
+static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
.probe = xen_blkbk_probe,
.remove = xen_blkbk_remove,
.otherend_changed = frontend_changed
-};
+);
int xen_blkif_xenbus_init(void)
{
- return xenbus_register_backend(&xen_blkbk);
+ return xenbus_register_backend(&xen_blkbk_driver);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7b2ec5908413..2f22874c0a37 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,7 +98,8 @@ struct blkfront_info
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
- unsigned int feature_discard;
+ unsigned int feature_discard:1;
+ unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
int is_ready;
@@ -135,15 +136,15 @@ static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
BUG_ON(free >= BLK_RING_SIZE);
- info->shadow_free = info->shadow[free].req.id;
- info->shadow[free].req.id = 0x0fffffee; /* debug */
+ info->shadow_free = info->shadow[free].req.u.rw.id;
+ info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
return free;
}
static void add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
- info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].req.u.rw.id = info->shadow_free;
info->shadow[id].request = NULL;
info->shadow_free = id;
}
@@ -156,7 +157,7 @@ static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
if (end > nr_minors) {
unsigned long *bitmap, *old;
- bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
+ bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
GFP_KERNEL);
if (bitmap == NULL)
return -ENOMEM;
@@ -287,9 +288,9 @@ static int blkif_queue_request(struct request *req)
id = get_id_from_freelist(info);
info->shadow[id].request = req;
- ring_req->id = id;
+ ring_req->u.rw.id = id;
ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->handle = info->handle;
+ ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -305,16 +306,21 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = info->flush_op;
}
- if (unlikely(req->cmd_flags & REQ_DISCARD)) {
+ if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
/* id, sector_number and handle are set above. */
ring_req->operation = BLKIF_OP_DISCARD;
- ring_req->nr_segments = 0;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+ if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+ ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+ else
+ ring_req->u.discard.flag = 0;
} else {
- ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
- BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
+ info->sg);
+ BUG_ON(ring_req->u.rw.nr_segments >
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
- for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+ for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
@@ -424,6 +430,8 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment;
+ if (info->feature_secdiscard)
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -705,7 +713,9 @@ static void blkif_free(struct blkfront_info *info, int suspend)
static void blkif_completion(struct blk_shadow *s)
{
int i;
- for (i = 0; i < s->req.nr_segments; i++)
+ /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
+ * flag. */
+ for (i = 0; i < s->req.u.rw.nr_segments; i++)
gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}
@@ -736,7 +746,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
id = bret->id;
req = info->shadow[id].request;
- blkif_completion(&info->shadow[id]);
+ if (bret->operation != BLKIF_OP_DISCARD)
+ blkif_completion(&info->shadow[id]);
add_id_to_freelist(info, id);
@@ -749,7 +760,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->gd->disk_name);
error = -EOPNOTSUPP;
info->feature_discard = 0;
+ info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+ queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
__blk_end_request_all(req, error);
break;
@@ -763,7 +776,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
- info->shadow[id].req.nr_segments == 0)) {
+ info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : "flush disk cache",
@@ -984,8 +997,8 @@ static int blkfront_probe(struct xenbus_device *dev,
INIT_WORK(&info->work, blkif_restart_queue);
for (i = 0; i < BLK_RING_SIZE; i++)
- info->shadow[i].req.id = i+1;
- info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+ info->shadow[i].req.u.rw.id = i+1;
+ info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@ -1019,9 +1032,9 @@ static int blkif_recover(struct blkfront_info *info)
/* Stage 2: Set up free list. */
memset(&info->shadow, 0, sizeof(info->shadow));
for (i = 0; i < BLK_RING_SIZE; i++)
- info->shadow[i].req.id = i+1;
+ info->shadow[i].req.u.rw.id = i+1;
info->shadow_free = info->ring.req_prod_pvt;
- info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+ info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
/* Stage 3: Find pending requests and requeue them. */
for (i = 0; i < BLK_RING_SIZE; i++) {
@@ -1034,17 +1047,19 @@ static int blkif_recover(struct blkfront_info *info)
*req = copy[i].req;
/* We get a new request id, and must reset the shadow state. */
- req->id = get_id_from_freelist(info);
- memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+ req->u.rw.id = get_id_from_freelist(info);
+ memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
+ if (req->operation != BLKIF_OP_DISCARD) {
/* Rewrite any grant references invalidated by susp/resume. */
- for (j = 0; j < req->nr_segments; j++)
- gnttab_grant_foreign_access_ref(
- req->u.rw.seg[j].gref,
- info->xbdev->otherend_id,
- pfn_to_mfn(info->shadow[req->id].frame[j]),
- rq_data_dir(info->shadow[req->id].request));
- info->shadow[req->id].req = *req;
+ for (j = 0; j < req->u.rw.nr_segments; j++)
+ gnttab_grant_foreign_access_ref(
+ req->u.rw.seg[j].gref,
+ info->xbdev->otherend_id,
+ pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
+ rq_data_dir(info->shadow[req->u.rw.id].request));
+ }
+ info->shadow[req->u.rw.id].req = *req;
info->ring.req_prod_pvt++;
}
@@ -1135,11 +1150,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
char *type;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int discard_secure;
type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
if (IS_ERR(type))
return;
+ info->feature_secdiscard = 0;
if (strncmp(type, "phy", 3) == 0) {
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"discard-granularity", "%u", &discard_granularity,
@@ -1150,6 +1167,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "discard-secure", "%d", &discard_secure,
+ NULL);
+ if (!err)
+ info->feature_secdiscard = discard_secure;
+
} else if (strncmp(type, "file", 4) == 0)
info->feature_discard = 1;
@@ -1437,16 +1460,13 @@ static const struct xenbus_device_id blkfront_ids[] = {
{ "" }
};
-static struct xenbus_driver blkfront = {
- .name = "vbd",
- .owner = THIS_MODULE,
- .ids = blkfront_ids,
+static DEFINE_XENBUS_DRIVER(blkfront, ,
.probe = blkfront_probe,
.remove = blkfront_remove,
.resume = blkfront_resume,
.otherend_changed = blkback_changed,
.is_ready = blkfront_is_ready,
-};
+);
static int __init xlblk_init(void)
{
@@ -1461,7 +1481,7 @@ static int __init xlblk_init(void)
return -ENODEV;
}
- ret = xenbus_register_frontend(&blkfront);
+ ret = xenbus_register_frontend(&blkfront_driver);
if (ret) {
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
return ret;
@@ -1474,7 +1494,7 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront);
+ return xenbus_unregister_driver(&blkfront_driver);
}
module_exit(xlblk_exit);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index fb1975d82a73..1a17e338735e 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -456,7 +456,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
- if (ace->irq == NO_IRQ)
+ if (!ace->irq)
/* No IRQ assigned, so need to poll */
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
@@ -1034,12 +1034,12 @@ static int __devinit ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
- if (ace->irq != NO_IRQ) {
+ if (ace->irq) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
- ace->irq = NO_IRQ;
+ ace->irq = 0;
}
}
@@ -1086,7 +1086,7 @@ static void __devexit ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet);
- if (ace->irq != NO_IRQ)
+ if (ace->irq)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
@@ -1156,7 +1156,7 @@ static int __devinit ace_probe(struct platform_device *dev)
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
u32 id = dev->id;
- int irq = NO_IRQ;
+ int irq = 0;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c27..5ccf142ef0b8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index cf77a9a2bfa4..07f14d10ea49 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -30,6 +30,7 @@
#include <net/bluetooth/bluetooth.h>
#define VERSION "1.0"
+#define ATH3K_FIRMWARE "ath3k-1.fw"
#define ATH3K_DNLOAD 0x01
#define ATH3K_GETSTATE 0x05
@@ -400,9 +401,15 @@ static int ath3k_probe(struct usb_interface *intf,
return 0;
}
- if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
- BT_ERR("Error loading firmware");
- return -EIO;
+ ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ BT_ERR("Firmware file \"%s\" not found",
+ ATH3K_FIRMWARE);
+ else
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+ ATH3K_FIRMWARE, ret);
+ return ret;
}
ret = ath3k_load_firmware(udev, firmware);
@@ -429,4 +436,4 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("ath3k-1.fw");
+MODULE_FIRMWARE(ATH3K_FIRMWARE);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index e64a290a1b98..a323baee51b0 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -751,9 +751,7 @@ static void bfusb_disconnect(struct usb_interface *intf)
bfusb_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index aed1904ea67b..c6a0c6103743 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -844,9 +844,7 @@ static int bluecard_close(bluecard_info_t *info)
/* Turn FPGA off */
outb(0x80, iobase + 0x30);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4fc01949d399..0c97e5d514b6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -636,9 +636,7 @@ static int bt3c_close(bt3c_info_t *info)
bt3c_hci_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c86162..6c3defa50845 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
init_waitqueue_entry(&wait, current);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
add_wait_queue(&thread->wait_q, &wait);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2cf..27b74b0d547b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 526b61807d94..200b3a2877d6 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -565,9 +565,7 @@ static int btuart_close(btuart_info_t *info)
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index da832353abea..f00f596c1029 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -37,13 +37,13 @@
#define VERSION "0.6"
-static int ignore_dga;
-static int ignore_csr;
-static int ignore_sniffer;
-static int disable_scofix;
-static int force_scofix;
+static bool ignore_dga;
+static bool ignore_csr;
+static bool ignore_sniffer;
+static bool disable_scofix;
+static bool force_scofix;
-static int reset = 1;
+static bool reset = 1;
static struct usb_driver btusb_driver;
@@ -101,6 +101,7 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0a5c, 0x21e3) },
{ USB_DEVICE(0x413c, 0x8197) },
{ } /* Terminating entry */
@@ -315,7 +316,8 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -400,7 +402,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -506,15 +509,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
- urb->dev = data->udev;
- urb->pipe = pipe;
- urb->context = hdev;
- urb->complete = btusb_isoc_complete;
- urb->interval = data->isoc_rx_ep->bInterval;
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
+ hdev, data->isoc_rx_ep->bInterval);
urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
- urb->transfer_buffer = buf;
- urb->transfer_buffer_length = size;
__fill_isoc_descriptor(urb, size,
le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
@@ -523,7 +521,8 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -727,6 +726,9 @@ static int btusb_send_frame(struct sk_buff *skb)
usb_fill_bulk_urb(urb, data->udev, pipe,
skb->data, skb->len, btusb_tx_complete, skb);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ urb->transfer_flags = URB_ISO_ASAP;
+
hdev->stat.acl_tx++;
break;
@@ -770,16 +772,17 @@ skip_waking:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p submission failed", hdev->name, urb);
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
} else {
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 5e4c2de9fc3f..969bb22e493f 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -551,9 +551,7 @@ static int dtl1_close(dtl1_info_t *info)
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 9c5b2dc38e29..a767d4de45a4 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -49,8 +49,8 @@
#define VERSION "0.3"
-static int txcrc = 1;
-static int hciextn = 1;
+static bool txcrc = 1;
+static bool hciextn = 1;
#define BCSP_TXWINSIZE 4
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 48ad2a7ab080..07114489994f 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -48,7 +48,7 @@
#define VERSION "2.2"
-static int reset = 0;
+static bool reset = 0;
static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 67c180c2c1e0..2ed6ab1c6e1b 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,6 +41,8 @@
#define VERSION "1.3"
+static bool amp;
+
struct vhci_data {
struct hci_dev *hdev;
@@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file)
hdev->bus = HCI_VIRTUAL;
hdev->driver_data = data;
+ if (amp)
+ hdev->dev_type = HCI_AMP;
+
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
@@ -264,10 +269,7 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
- if (hci_unregister_dev(hdev) < 0) {
- BT_ERR("Can't unregister HCI device %s", hdev->name);
- }
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
file->private_data = NULL;
@@ -306,6 +308,9 @@ static void __exit vhci_exit(void)
module_init(vhci_init);
module_exit(vhci_exit);
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index f997c27d79e2..55eaf474d32c 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -267,7 +267,6 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/buffer_head.h>
#include <linux/major.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -286,17 +285,17 @@
#include <asm/uaccess.h>
/* used to tell the module to turn on full debugging messages */
-static int debug;
+static bool debug;
/* used to keep tray locked at all times */
static int keeplocked;
/* default compatibility mode */
-static int autoclose=1;
-static int autoeject;
-static int lockdoor = 1;
+static bool autoclose=1;
+static bool autoeject;
+static bool lockdoor = 1;
/* will we ever get to use this... sigh. */
-static int check_media_type;
+static bool check_media_type;
/* automatically restart mrw format */
-static int mrw_format_restart = 1;
+static bool mrw_format_restart = 1;
module_param(debug, bool, 0);
module_param(autoclose, bool, 0);
module_param(autoeject, bool, 0);
@@ -2747,12 +2746,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
{
void __user *argp = (void __user *)arg;
int ret;
- struct gendisk *disk = bdev->bd_disk;
/*
* Try the generic SCSI command ioctl's first.
*/
- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
if (ret != -ENOTTY)
return ret;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 780498d76581..444f8b6ab411 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -33,7 +33,7 @@
#define ULI_X86_64_ENU_SCR_REG 0x54
static struct resource *aperture_resource;
-static int __initdata agp_try_unsupported = 1;
+static bool __initdata agp_try_unsupported = 1;
static int agp_bridges_found;
static void amd64_tlbflush(struct agp_memory *temp)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index b072648dc3f6..17e05d1076b3 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
switch (*bridge_agpstat & 7) {
case 4:
*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
- printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
"Fixing up support for x2 & x1\n");
break;
case 2:
*bridge_agpstat |= AGPSTAT2_1X;
- printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
"Fixing up support for x1\n");
break;
default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
} else {
- printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+ printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
if (!(*bridge_agpstat & AGPSTAT3_8X)) {
printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
*bridge_agpstat, origbridge);
@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
bridge->driver->cache_flush();
#ifdef CONFIG_X86
if (set_memory_uc((unsigned long)table, 1 << page_order))
- printk(KERN_WARNING "Could not set GATT table memory to UC!");
+ printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
bridge->gatt_table = (void *)table;
#else
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 29aacd81de78..08704ae53956 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -17,7 +17,7 @@
#define PCI_DEVICE_ID_SI_662 0x0662
#define PCI_DEVICE_ID_SI_671 0x0671
-static int __devinitdata agp_sis_force_delay = 0;
+static bool __devinitdata agp_sis_force_delay = 0;
static int __devinitdata agp_sis_agp_spec = -1;
static int sis_fetch_size(void)
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 241df2e76aba..f518b99f53f5 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -141,17 +141,7 @@ static struct platform_driver atmel_trng_driver = {
},
};
-static int __init atmel_trng_init(void)
-{
- return platform_driver_register(&atmel_trng_driver);
-}
-module_init(atmel_trng_init);
-
-static void __exit atmel_trng_exit(void)
-{
- platform_driver_unregister(&atmel_trng_driver);
-}
-module_exit(atmel_trng_exit);
+module_platform_driver(atmel_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index c3de70de00d4..ebd48f0135da 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -770,15 +770,4 @@ static struct platform_driver n2rng_driver = {
.remove = __devexit_p(n2rng_remove),
};
-static int __init n2rng_init(void)
-{
- return platform_driver_register(&n2rng_driver);
-}
-
-static void __exit n2rng_exit(void)
-{
- platform_driver_unregister(&n2rng_driver);
-}
-
-module_init(n2rng_init);
-module_exit(n2rng_exit);
+module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 52e08ca3ccd7..3d3c1e6703b4 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -95,6 +95,8 @@ static struct amba_id nmk_rng_ids[] = {
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
+
static struct amba_driver nmk_rng_driver = {
.drv = {
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 9cd0feca318c..0943edc782a1 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -131,18 +131,7 @@ static struct platform_driver octeon_rng_driver = {
.remove = __exit_p(octeon_rng_remove),
};
-static int __init octeon_rng_mod_init(void)
-{
- return platform_driver_register(&octeon_rng_driver);
-}
-
-static void __exit octeon_rng_mod_exit(void)
-{
- platform_driver_unregister(&octeon_rng_driver);
-}
-
-module_init(octeon_rng_mod_init);
-module_exit(octeon_rng_mod_exit);
+module_platform_driver(octeon_rng_driver);
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 1d504815e6db..3a632673aed5 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -148,17 +148,7 @@ static struct platform_driver rng_driver = {
.remove = rng_remove,
};
-static int __init rng_init(void)
-{
- return platform_driver_register(&rng_driver);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
- platform_driver_unregister(&rng_driver);
-}
-module_exit(rng_exit);
+module_platform_driver(rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index 990d55a5e3e8..97bd891422c7 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -191,17 +191,7 @@ static struct platform_driver picoxcell_trng_driver = {
},
};
-static int __init picoxcell_trng_init(void)
-{
- return platform_driver_register(&picoxcell_trng_driver);
-}
-module_init(picoxcell_trng_init);
-
-static void __exit picoxcell_trng_exit(void)
-{
- platform_driver_unregister(&picoxcell_trng_driver);
-}
-module_exit(picoxcell_trng_exit);
+module_platform_driver(picoxcell_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index b8afa6a4ff67..c51762c13031 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -139,17 +139,7 @@ static struct platform_driver ppc4xx_rng_driver = {
.remove = ppc4xx_rng_remove,
};
-static int __init ppc4xx_rng_init(void)
-{
- return platform_driver_register(&ppc4xx_rng_driver);
-}
-module_init(ppc4xx_rng_init);
-
-static void __exit ppc4xx_rng_exit(void)
-{
- platform_driver_unregister(&ppc4xx_rng_driver);
-}
-module_exit(ppc4xx_rng_exit);
+module_platform_driver(ppc4xx_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index a8428e6f64a9..f1a1618db1fb 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -149,18 +149,7 @@ static struct platform_driver timeriomem_rng_driver = {
.remove = __devexit_p(timeriomem_rng_remove),
};
-static int __init timeriomem_rng_init(void)
-{
- return platform_driver_register(&timeriomem_rng_driver);
-}
-
-static void __exit timeriomem_rng_exit(void)
-{
- platform_driver_unregister(&timeriomem_rng_driver);
-}
-
-module_init(timeriomem_rng_init);
-module_exit(timeriomem_rng_exit);
+module_platform_driver(timeriomem_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index fd699ccecf5b..723725bbb96b 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 6e40072fbf67..40cc0cf2ded6 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -69,19 +69,19 @@ MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
MODULE_LICENSE("GPL");
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force loading without checking for supported models");
-static int ignore_dmi;
+static bool ignore_dmi;
module_param(ignore_dmi, bool, 0);
MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
-static int restricted;
+static bool restricted;
module_param(restricted, bool, 0);
MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
-static int power_status;
+static bool power_status;
module_param(power_status, bool, 0600);
MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 3ed20e8abc0d..cdd4c09fda96 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -560,7 +560,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_CONTROL(BT_H_BUSY); /* set */
/*
- * Uncached, ordered writes should just proceeed serially but
+ * Uncached, ordered writes should just proceed serially but
* some BMCs don't clear B2H_ATN with one hit. Fast-path a
* workaround without too much penalty to the general case.
*/
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 9397ab49b72e..50fcf9c04569 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1227,7 +1227,7 @@ static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
#define DEFAULT_REGSIZE 1
-static int si_trydefaults = 1;
+static bool si_trydefaults = 1;
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
static char si_type_str[MAX_SI_TYPE_STR];
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index c2917ffad2c2..34767a6d7f42 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -139,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 97c3edb95ae7..f43485607063 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -829,7 +829,7 @@ static struct console lpcons = {
static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
static char *parport[LP_NO];
-static int reset;
+static bool reset;
module_param_array(parport, charp, NULL, 0);
module_param(reset, bool, 0);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 145179033716..d6e9d081c8b1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -847,7 +847,7 @@ static const struct file_operations kmsg_fops = {
static const struct memdev {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
@@ -901,7 +901,7 @@ static const struct file_operations memory_fops = {
.llseek = noop_llseek,
};
-static char *mem_devnode(struct device *dev, mode_t *mode)
+static char *mem_devnode(struct device *dev, umode_t *mode)
{
if (mode && devlist[MINOR(dev->devt)].mode)
*mode = devlist[MINOR(dev->devt)].mode;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 778273c93242..522136d40843 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -258,7 +258,7 @@ int misc_deregister(struct miscdevice *misc)
EXPORT_SYMBOL(misc_register);
EXPORT_SYMBOL(misc_deregister);
-static char *misc_devnode(struct device *dev, mode_t *mode)
+static char *misc_devnode(struct device *dev, umode_t *mode)
{
struct miscdevice *c = dev_get_drvdata(dev);
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index a12f52400dbc..bf586ae1ee83 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -51,7 +51,7 @@ static int write_block(unsigned long p, const char __user *buf, int count);
#define KFLASH_ID 0x89A6 //Intel flash
#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
-static int flashdebug; //if set - we will display progress msgs
+static bool flashdebug; //if set - we will display progress msgs
static int gbWriteEnable;
static int gbWriteBase64Enable;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 15781396af25..07f6a5abe372 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -439,7 +439,7 @@ static int mgslpc_device_count = 0;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static int break_on_load=0;
+static bool break_on_load=0;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 7c7f42a1f880..9fec3232b736 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -83,8 +83,7 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
struct timeval timestamp;
if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC &&
- reason != KMSG_DUMP_KEXEC)
+ reason != KMSG_DUMP_PANIC)
return;
/* Only dump oopses if dump_oops is set */
@@ -126,8 +125,8 @@ static int __init ramoops_probe(struct platform_device *pdev)
goto fail3;
}
- rounddown_pow_of_two(pdata->mem_size);
- rounddown_pow_of_two(pdata->record_size);
+ pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
+ pdata->record_size = rounddown_pow_of_two(pdata->record_size);
/* Check for the minimum memory size */
if (pdata->mem_size < MIN_MEM_SIZE &&
@@ -148,14 +147,6 @@ static int __init ramoops_probe(struct platform_device *pdev)
cxt->phys_addr = pdata->mem_address;
cxt->record_size = pdata->record_size;
cxt->dump_oops = pdata->dump_oops;
- /*
- * Update the module parameter variables as well so they are visible
- * through /sys/module/ramoops/parameters/
- */
- mem_size = pdata->mem_size;
- mem_address = pdata->mem_address;
- record_size = pdata->record_size;
- dump_oops = pdata->dump_oops;
if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
pr_err("request mem region failed\n");
@@ -176,6 +167,15 @@ static int __init ramoops_probe(struct platform_device *pdev)
goto fail1;
}
+ /*
+ * Update the module parameter variables as well so they are visible
+ * through /sys/module/ramoops/parameters/
+ */
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ record_size = pdata->record_size;
+ dump_oops = pdata->dump_oops;
+
return 0;
fail1:
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6035ab8d5ef7..54ca8b23cde3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -387,7 +387,7 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
#if 0
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
#define DEBUG_ENT(fmt, arg...) do { \
if (debug) \
@@ -624,8 +624,8 @@ static struct timer_rand_state input_timer_state;
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
struct {
- cycles_t cycles;
long jiffies;
+ unsigned cycles;
unsigned num;
} sample;
long delta, delta2, delta3;
@@ -637,7 +637,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
goto out;
sample.jiffies = jiffies;
- sample.cycles = get_cycles();
+
+ /* Use arch random value, fall back to cycles */
+ if (!arch_get_random_int(&sample.cycles))
+ sample.cycles = get_cycles();
+
sample.num = num;
mix_pool_bytes(&input_pool, &sample, sizeof(sample));
@@ -961,6 +965,7 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void init_std_data(struct entropy_store *r)
{
+ int i;
ktime_t now;
unsigned long flags;
@@ -970,6 +975,11 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
+ if (!arch_get_random_long(&flags))
+ break;
+ mix_pool_bytes(r, &flags, sizeof(flags));
+ }
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
}
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index b6de2c047145..54a3a6d09819 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -308,7 +308,7 @@ static const struct file_operations raw_ctl_fops = {
static struct cdev raw_cdev;
-static char *raw_devnode(struct device *dev, mode_t *mode)
+static char *raw_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index cf3ee008dca2..4dc019408fac 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -329,7 +329,7 @@ static struct device_attribute srom_dev_attrs[] = {
__ATTR_NULL
};
-static char *srom_devnode(struct device *dev, mode_t *mode)
+static char *srom_devnode(struct device *dev, umode_t *mode)
{
*mode = S_IRUGO | S_IWUSR;
return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fa567f1158c2..7fc75e47e6d0 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -27,6 +27,7 @@ if TCG_TPM
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
+ depends on X86
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -35,6 +36,7 @@ config TCG_TIS
config TCG_NSC
tristate "National Semiconductor TPM Interface"
+ depends on X86
---help---
If you have a TPM security chip from National Semiconductor
say Yes and it will be accessible from within Linux. To
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 361a1dff8f77..32362cf35b8d 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/freezer.h>
#include "tpm.h"
@@ -440,7 +441,6 @@ out:
}
#define TPM_DIGEST_SIZE 20
-#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
enum tpm_capabilities {
@@ -469,12 +469,14 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
- if (len == TPM_ERROR_SIZE) {
- err = be32_to_cpu(cmd->header.out.return_code);
- dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
- return err;
- }
- return 0;
+ else if (len < TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ err = be32_to_cpu(cmd->header.out.return_code);
+ if (err != 0)
+ dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
+
+ return err;
}
#define TPM_INTERNAL_RESULT_SIZE 200
@@ -530,7 +532,7 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
-void tpm_get_timeouts(struct tpm_chip *chip)
+int tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
struct timeout_t *timeout_cap;
@@ -552,7 +554,7 @@ void tpm_get_timeouts(struct tpm_chip *chip)
if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
be32_to_cpu(tpm_cmd.header.out.length)
!= sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
- return;
+ return -EINVAL;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
@@ -583,12 +585,12 @@ duration:
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
- return;
+ return rc;
if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
be32_to_cpu(tpm_cmd.header.out.length)
!= sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
- return;
+ return -EINVAL;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
@@ -610,20 +612,36 @@ duration:
chip->vendor.duration_adjusted = true;
dev_info(chip->dev, "Adjusting TPM timeout parameters.");
}
+ return 0;
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
-void tpm_continue_selftest(struct tpm_chip *chip)
+#define TPM_ORD_CONTINUE_SELFTEST 83
+#define CONTINUE_SELFTEST_RESULT_SIZE 10
+
+static struct tpm_input_header continue_selftest_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(10),
+ .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
+};
+
+/**
+ * tpm_continue_selftest -- run TPM's selftest
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+static int tpm_continue_selftest(struct tpm_chip *chip)
{
- u8 data[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* length */
- 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
- };
+ int rc;
+ struct tpm_cmd_t cmd;
- tpm_transmit(chip, data, sizeof(data));
+ cmd.header.in = continue_selftest_header;
+ rc = transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
+ "continue selftest");
+ return rc;
}
-EXPORT_SYMBOL_GPL(tpm_continue_selftest);
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
@@ -718,7 +736,7 @@ static struct tpm_input_header pcrread_header = {
.ordinal = TPM_ORDINAL_PCRREAD
};
-int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
@@ -798,6 +816,54 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+/**
+ * tpm_do_selftest - have the TPM continue its selftest and wait until it
+ * can receive further commands
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+int tpm_do_selftest(struct tpm_chip *chip)
+{
+ int rc;
+ u8 digest[TPM_DIGEST_SIZE];
+ unsigned int loops;
+ unsigned int delay_msec = 1000;
+ unsigned long duration;
+
+ duration = tpm_calc_ordinal_duration(chip,
+ TPM_ORD_CONTINUE_SELFTEST);
+
+ loops = jiffies_to_msecs(duration) / delay_msec;
+
+ rc = tpm_continue_selftest(chip);
+ /* This may fail if there was no TPM driver during a suspend/resume
+ * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+ */
+ if (rc)
+ return rc;
+
+ do {
+ rc = __tpm_pcr_read(chip, 0, digest);
+ if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+ dev_info(chip->dev,
+ "TPM is disabled/deactivated (0x%X)\n", rc);
+ /* TPM is disabled and/or deactivated; driver can
+ * proceed and TPM does handle commands for
+ * suspend/resume correctly
+ */
+ return 0;
+ }
+ if (rc != TPM_WARN_DOING_SELFTEST)
+ return rc;
+ msleep(delay_msec);
+ } while (--loops > 0);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_do_selftest);
+
int tpm_send(u32 chip_num, void *cmd, size_t buflen)
{
struct tpm_chip *chip;
@@ -1005,6 +1071,46 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
+int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+
+ /* check current status */
+ status = chip->vendor.status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ ((chip->vendor.status(chip)
+ & mask) == mask),
+ timeout);
+ if (rc > 0)
+ return 0;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ msleep(TPM_TIMEOUT);
+ status = chip->vendor.status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
/*
* Device file system interface to the TPM
*
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 9c4163cfa3ce..010547138281 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -38,6 +38,11 @@ enum tpm_addr {
TPM_ADDR = 0x4E,
};
+#define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED 0x6
+#define TPM_ERR_DISABLED 0x7
+
+#define TPM_HEADER_SIZE 10
extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
@@ -279,9 +284,9 @@ struct tpm_cmd_t {
ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
-extern void tpm_get_timeouts(struct tpm_chip *);
+extern int tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);
-extern void tpm_continue_selftest(struct tpm_chip *);
+extern int tpm_do_selftest(struct tpm_chip *);
extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
extern struct tpm_chip* tpm_register_hardware(struct device *,
const struct tpm_vendor_specific *);
@@ -294,7 +299,8 @@ extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *, pm_message_t);
extern int tpm_pm_resume(struct device *);
-
+extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
+ wait_queue_head_t *);
#ifdef CONFIG_ACPI
extern struct dentry ** tpm_bios_log_setup(char *);
extern void tpm_bios_log_teardown(struct dentry **);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 3f4051a7c5a7..a1748621111b 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -29,8 +29,6 @@
#include <linux/freezer.h>
#include "tpm.h"
-#define TPM_HEADER_SIZE 10
-
enum tis_access {
TPM_ACCESS_VALID = 0x80,
TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
@@ -193,54 +191,14 @@ static int get_burstcount(struct tpm_chip *chip)
return -EBUSY;
}
-static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue)
-{
- unsigned long stop;
- long rc;
- u8 status;
-
- /* check current status */
- status = tpm_tis_status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->vendor.irq) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -ETIME;
- rc = wait_event_interruptible_timeout(*queue,
- ((tpm_tis_status
- (chip) & mask) ==
- mask), timeout);
- if (rc > 0)
- return 0;
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- msleep(TPM_TIMEOUT);
- status = tpm_tis_status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
- return -ETIME;
-}
-
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0, burstcnt;
while (size < count &&
- wait_for_stat(chip,
- TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- chip->vendor.timeout_c,
- &chip->vendor.read_queue)
+ wait_for_tpm_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue)
== 0) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && size < count; burstcnt--)
@@ -282,8 +240,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->dev, "Error left over data\n");
@@ -297,7 +255,7 @@ out:
return size;
}
-static int itpm;
+static bool itpm;
module_param(itpm, bool, 0444);
MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
@@ -317,7 +275,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
status = tpm_tis_status(chip);
if ((status & TPM_STS_COMMAND_READY) == 0) {
tpm_tis_ready(chip);
- if (wait_for_stat
+ if (wait_for_tpm_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
&chip->vendor.int_queue) < 0) {
rc = -ETIME;
@@ -333,8 +291,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
count++;
}
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -345,8 +303,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
/* write last byte */
iowrite8(buf[count],
chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
status = tpm_tis_status(chip);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -381,7 +339,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
if (chip->vendor.irq) {
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
- if (wait_for_stat
+ if (wait_for_tpm_stat
(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
tpm_calc_ordinal_duration(chip, ordinal),
&chip->vendor.read_queue) < 0) {
@@ -432,6 +390,9 @@ static int probe_itpm(struct tpm_chip *chip)
out:
itpm = rem_itpm;
tpm_tis_ready(chip);
+ /* some TPMs need a break here otherwise they will not work
+ * correctly on the immediately subsequent command */
+ msleep(chip->vendor.timeout_b);
release_locality(chip, chip->vendor.locality, 0);
return rc;
@@ -539,7 +500,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static int interrupts = 1;
+static bool interrupts = 1;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@@ -614,7 +575,17 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
dev_dbg(dev, "\tData Avail Int Support\n");
/* get the timeouts before testing for irqs */
- tpm_get_timeouts(chip);
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ dev_err(dev, "TPM self test failed\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
@@ -722,7 +693,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
list_add(&chip->vendor.list, &tis_chips);
spin_unlock(&tis_lock);
- tpm_continue_selftest(chip);
return 0;
out_err:
@@ -790,7 +760,7 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
ret = tpm_pm_resume(&dev->dev);
if (!ret)
- tpm_continue_selftest(chip);
+ tpm_do_selftest(chip);
return ret;
}
@@ -858,7 +828,7 @@ static struct platform_driver tis_drv = {
static struct platform_device *pdev;
-static int force;
+static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
static int __init init_tis(void)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8e3c46d67cb3..b58b56187065 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -392,7 +392,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
return ret;
}
@@ -457,7 +457,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
@@ -506,7 +506,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
reclaim_consumed_buffers(port);
sg_init_one(sg, in_buf, in_count);
- ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
@@ -1271,6 +1271,20 @@ static void remove_port(struct kref *kref)
kfree(port);
}
+static void remove_port_data(struct port *port)
+{
+ struct port_buffer *buf;
+
+ /* Remove unused data this port might have received. */
+ discard_port_data(port);
+
+ reclaim_consumed_buffers(port);
+
+ /* Remove buffers we queued up for the Host to send us data in. */
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+}
+
/*
* Port got unplugged. Remove port from portdev's list and drop the
* kref reference. If no userspace has this port opened, it will
@@ -1278,8 +1292,6 @@ static void remove_port(struct kref *kref)
*/
static void unplug_port(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->portdev->ports_lock);
list_del(&port->list);
spin_unlock_irq(&port->portdev->ports_lock);
@@ -1300,14 +1312,7 @@ static void unplug_port(struct port *port)
hvc_remove(port->cons.hvc);
}
- /* Remove unused data this port might have received. */
- discard_port_data(port);
-
- reclaim_consumed_buffers(port);
-
- /* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ remove_port_data(port);
/*
* We should just assume the device itself has gone off --
@@ -1659,6 +1664,28 @@ static const struct file_operations portdev_fops = {
.owner = THIS_MODULE,
};
+static void remove_vqs(struct ports_device *portdev)
+{
+ portdev->vdev->config->del_vqs(portdev->vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+}
+
+static void remove_controlq_data(struct ports_device *portdev)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ if (!use_multiport(portdev))
+ return;
+
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
+ free_buf(buf);
+
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
+ free_buf(buf);
+}
+
/*
* Once we're further in boot, we get probed like any other virtio
* device.
@@ -1764,9 +1791,7 @@ free_vqs:
/* The host might want to notify mgmt sw about device add failure */
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 0);
- vdev->config->del_vqs(vdev);
- kfree(portdev->in_vqs);
- kfree(portdev->out_vqs);
+ remove_vqs(portdev);
free_chrdev:
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
free:
@@ -1804,21 +1829,8 @@ static void virtcons_remove(struct virtio_device *vdev)
* have to just stop using the port, as the vqs are going
* away.
*/
- if (use_multiport(portdev)) {
- struct port_buffer *buf;
- unsigned int len;
-
- while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf);
-
- while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf);
- }
-
- vdev->config->del_vqs(vdev);
- kfree(portdev->in_vqs);
- kfree(portdev->out_vqs);
-
+ remove_controlq_data(portdev);
+ remove_vqs(portdev);
kfree(portdev);
}
@@ -1832,6 +1844,68 @@ static unsigned int features[] = {
VIRTIO_CONSOLE_F_MULTIPORT,
};
+#ifdef CONFIG_PM
+static int virtcons_freeze(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+
+ portdev = vdev->priv;
+
+ vdev->config->reset(vdev);
+
+ virtqueue_disable_cb(portdev->c_ivq);
+ cancel_work_sync(&portdev->control_work);
+ /*
+ * Once more: if control_work_handler() was running, it would
+ * enable the cb as the last step.
+ */
+ virtqueue_disable_cb(portdev->c_ivq);
+ remove_controlq_data(portdev);
+
+ list_for_each_entry(port, &portdev->ports, list) {
+ virtqueue_disable_cb(port->in_vq);
+ virtqueue_disable_cb(port->out_vq);
+ /*
+ * We'll ask the host later if the new invocation has
+ * the port opened or closed.
+ */
+ port->host_connected = false;
+ remove_port_data(port);
+ }
+ remove_vqs(portdev);
+
+ return 0;
+}
+
+static int virtcons_restore(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+ int ret;
+
+ portdev = vdev->priv;
+
+ ret = init_vqs(portdev);
+ if (ret)
+ return ret;
+
+ if (use_multiport(portdev))
+ fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+
+ list_for_each_entry(port, &portdev->ports, list) {
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ fill_queue(port->in_vq, &port->inbuf_lock);
+
+ /* Get port open/close status on the host */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ }
+ return 0;
+}
+#endif
+
static struct virtio_driver virtio_console = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
@@ -1841,6 +1915,10 @@ static struct virtio_driver virtio_console = {
.probe = virtcons_probe,
.remove = virtcons_remove,
.config_changed = config_intr,
+#ifdef CONFIG_PM
+ .freeze = virtcons_freeze,
+ .restore = virtcons_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 35309274ad68..9b3cd08cd0ed 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -3,5 +3,8 @@ config CLKDEV_LOOKUP
bool
select HAVE_CLK
+config HAVE_CLK_PREPARE
+ bool
+
config HAVE_MACH_CLKDEV
bool
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index effe7974aa9a..6b5cf02c35c8 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
#ifndef CONFIG_X86_64
#include <asm/mach_timer.h>
#define PMTMR_EXPECTED_RATE \
- ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
+ ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))
/*
* Some boards have the PMTMR running way too fast. We check
* the PMTMR rate against PIT channel 2 to catch these cases.
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 59feefe0e3e6..fb6b6d28b60e 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -58,25 +58,15 @@ static struct clocksource clocksource_dbx500_prcmu = {
};
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace dbx500_prcmu_sched_clock_read(void)
{
- u32 cyc;
-
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
- cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
-
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+ return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
}
-static void notrace clksrc_dbx500_prcmu_update_sched_clock(void)
-{
- u32 cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
- update_sched_clock(&cd, cyc, (u32)~0);
-}
#endif
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
@@ -97,7 +87,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- init_sched_clock(&cd, clksrc_dbx500_prcmu_update_sched_clock,
+ setup_sched_clock(dbx500_prcmu_sched_clock_read,
32, RATE_32K);
#endif
clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 27c49e60b7d6..e7cab2da910f 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -53,7 +53,7 @@ static cycle_t i8253_read(struct clocksource *cs)
count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
- if (count > LATCH) {
+ if (count > PIT_LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
@@ -114,8 +114,8 @@ static void init_pit_timer(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- outb_p(LATCH >> 8 , PIT_CH0); /* MSB */
+ outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */
+ outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */
break;
case CLOCK_EVT_MODE_SHUTDOWN:
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 79c47e88d5d1..55d0f95f82f9 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -59,7 +59,6 @@ static struct clocksource clksrc = {
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 18,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -256,7 +255,6 @@ static int __init tcb_clksrc_init(void)
best_divisor_idx = i;
}
- clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift);
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
@@ -292,7 +290,7 @@ static int __init tcb_clksrc_init(void)
__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
/* and away we go! */
- clocksource_register(&clksrc);
+ clocksource_register_hz(&clksrc, divided_rate);
/* channel 2: periodic and oneshot timer support */
setup_clkevents(tc, clk32k_divisor_idx);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 72a0044c1baa..e0664fed018a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N.
+config ARM_EXYNOS_CPUFREQ
+ bool "SAMSUNG EXYNOS SoCs"
+ depends on ARCH_EXYNOS
+ select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210
+ default y
+ help
+ This adds the CPUFreq driver common part for Samsung
+ EXYNOS SoCs.
+
+ If in doubt, say N.
+
config ARM_EXYNOS4210_CPUFREQ
bool "Samsung EXYNOS4210"
- depends on CPU_EXYNOS4210
- default y
help
This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210).
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index a48bc02cd765..ac000fa76bbb 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
+obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 987a165ede26..622013fb7890 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
pr_debug("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
- if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
- (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
+ if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
@@ -679,7 +678,7 @@ static struct kobj_type ktype_cpufreq = {
*/
static int cpufreq_add_dev_policy(unsigned int cpu,
struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+ struct device *dev)
{
int ret = 0;
#ifdef CONFIG_SMP
@@ -728,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&sys_dev->kobj,
+ ret = sysfs_create_link(&dev->kobj,
&managed_policy->kobj,
"cpufreq");
if (ret)
@@ -761,7 +760,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
if (j == cpu)
continue;
@@ -770,8 +769,8 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
- cpu_sys_dev = get_cpu_sysdev(j);
- ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
+ cpu_dev = get_cpu_device(j);
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
@@ -783,7 +782,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
static int cpufreq_add_dev_interface(unsigned int cpu,
struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+ struct device *dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
@@ -793,7 +792,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &sys_dev->kobj, "cpufreq");
+ &dev->kobj, "cpufreq");
if (ret)
return ret;
@@ -866,9 +865,9 @@ err_out_kobj_put:
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
-static int cpufreq_add_dev(struct sys_device *sys_dev)
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
int ret = 0, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
@@ -947,7 +946,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
+ ret = cpufreq_add_dev_policy(cpu, policy, dev);
if (ret) {
if (ret > 0)
/* This is a managed cpu, symlink created,
@@ -956,7 +955,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
goto err_unlock_policy;
}
- ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
+ ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
@@ -999,15 +998,15 @@ module_out:
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
-static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
#ifdef CONFIG_SMP
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
unsigned int j;
#endif
@@ -1032,7 +1031,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
pr_debug("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &sys_dev->kobj;
+ kobj = &dev->kobj;
cpufreq_cpu_put(data);
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
@@ -1071,8 +1070,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
strncpy(per_cpu(cpufreq_cpu_governor, j),
data->governor->name, CPUFREQ_NAME_LEN);
#endif
- cpu_sys_dev = get_cpu_sysdev(j);
- kobj = &cpu_sys_dev->kobj;
+ cpu_dev = get_cpu_device(j);
+ kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
lock_policy_rwsem_write(cpu);
@@ -1112,11 +1111,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (unlikely(cpumask_weight(data->cpus) > 1)) {
/* first sibling now owns the new sysfs dir */
cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
+ cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
/* finally remove our own symlink */
lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(sys_dev);
+ __cpufreq_remove_dev(dev, sif);
}
#endif
@@ -1128,9 +1127,9 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
}
-static int cpufreq_remove_dev(struct sys_device *sys_dev)
+static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
int retval;
if (cpu_is_offline(cpu))
@@ -1139,7 +1138,7 @@ static int cpufreq_remove_dev(struct sys_device *sys_dev)
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- retval = __cpufreq_remove_dev(sys_dev);
+ retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1271,9 +1270,11 @@ out:
}
EXPORT_SYMBOL(cpufreq_get);
-static struct sysdev_driver cpufreq_sysdev_driver = {
- .add = cpufreq_add_dev,
- .remove = cpufreq_remove_dev,
+static struct subsys_interface cpufreq_interface = {
+ .name = "cpufreq",
+ .subsys = &cpu_subsys,
+ .add_dev = cpufreq_add_dev,
+ .remove_dev = cpufreq_remove_dev,
};
@@ -1765,25 +1766,25 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
- sys_dev = get_cpu_sysdev(cpu);
- if (sys_dev) {
+ dev = get_cpu_device(cpu);
+ if (dev) {
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- cpufreq_add_dev(sys_dev);
+ cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- __cpufreq_remove_dev(sys_dev);
+ __cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- cpufreq_add_dev(sys_dev);
+ cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -1830,8 +1831,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = sysdev_driver_register(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+ ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_null_driver;
@@ -1850,7 +1850,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
driver_data->name);
- goto err_sysdev_unreg;
+ goto err_if_unreg;
}
}
@@ -1858,9 +1858,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("driver %s up and running\n", driver_data->name);
return 0;
-err_sysdev_unreg:
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+err_if_unreg:
+ subsys_interface_unregister(&cpufreq_interface);
err_null_driver:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
@@ -1887,7 +1886,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
- sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+ subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1907,8 +1906,7 @@ static int __init cpufreq_core_init(void)
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
- cpufreq_global_kobject = kobject_create_and_add("cpufreq",
- &cpu_sysdev_class.kset.kobj);
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c97b468ee9f7..235a340e81f2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -95,27 +95,26 @@ static struct dbs_tuners {
.freq_step = 5,
};
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- cputime64_t idle_time;
- cputime64_t cur_wall_time;
- cputime64_t busy_time;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
- kstat_cpu(cpu).cpustat.system);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cputime64_sub(cur_wall_time, busy_time);
+ idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+ *wall = jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);
+ return jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
}
@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
- wall_time = (unsigned int) cputime64_sub(cur_wall_time,
- j_dbs_info->prev_cpu_wall);
+ wall_time = (unsigned int)
+ (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- idle_time = (unsigned int) cputime64_sub(cur_idle_time,
- j_dbs_info->prev_cpu_idle);
+ idle_time = (unsigned int)
+ (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
+ u64 cur_nice;
unsigned long cur_nice_jiffies;
- cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
- j_dbs_info->prev_cpu_nice);
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ j_dbs_info->prev_cpu_nice;
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
- j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice) {
+ if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
- kstat_cpu(j).cpustat.nice;
- }
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index fa8af4ebb1d6..c3e0652520a1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -119,27 +119,26 @@ static struct dbs_tuners {
.powersave_bias = 0,
};
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- cputime64_t idle_time;
- cputime64_t cur_wall_time;
- cputime64_t busy_time;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
- kstat_cpu(cpu).cpustat.system);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cputime64_sub(cur_wall_time, busy_time);
+ idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+ *wall = jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);
+ return jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
- wall_time = (unsigned int) cputime64_sub(cur_wall_time,
- j_dbs_info->prev_cpu_wall);
+ wall_time = (unsigned int)
+ (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- idle_time = (unsigned int) cputime64_sub(cur_idle_time,
- j_dbs_info->prev_cpu_idle);
+ idle_time = (unsigned int)
+ (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
- iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
- j_dbs_info->prev_cpu_iowait);
+ iowait_time = (unsigned int)
+ (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
j_dbs_info->prev_cpu_iowait = cur_iowait_time;
if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
+ u64 cur_nice;
unsigned long cur_nice_jiffies;
- cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
- j_dbs_info->prev_cpu_nice);
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ j_dbs_info->prev_cpu_nice;
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
- j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice) {
+ if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
- kstat_cpu(j).cpustat.nice;
- }
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
this_dbs_info->cpu = cpu;
this_dbs_info->rate_mult = 1;
@@ -715,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int __init cpufreq_gov_dbs_init(void)
{
- cputime64_t wall;
u64 idle_time;
int cpu = get_cpu();
- idle_time = get_cpu_idle_time_us(cpu, &wall);
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c5072a91e848..b40ee1403be9 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysdev.h>
#include <linux/cpu.h>
#include <linux/sysfs.h>
#include <linux/cpufreq.h>
@@ -61,9 +60,8 @@ static int cpufreq_stats_update(unsigned int cpu)
spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu);
if (stat->time_in_state)
- stat->time_in_state[stat->last_index] =
- cputime64_add(stat->time_in_state[stat->last_index],
- cputime_sub(cur_time, stat->last_time));
+ stat->time_in_state[stat->last_index] +=
+ cur_time - stat->last_time;
stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index f231015904c0..bedac1aa9be3 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!per_cpu(cpu_is_managed, freq->cpu))
return 0;
- pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
- freq->cpu, freq->new);
- per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
+ if (val == CPUFREQ_POSTCHANGE) {
+ pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
+ freq->cpu, freq->new);
+ per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
+ }
return 0;
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
new file mode 100644
index 000000000000..5467879ea07d
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPU frequency scaling support for EXYNOS series
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/cpufreq.h>
+#include <linux/suspend.h>
+
+#include <mach/cpufreq.h>
+
+#include <plat/cpu.h>
+
+static struct exynos_dvfs_info *exynos_info;
+
+static struct regulator *arm_regulator;
+static struct cpufreq_freqs freqs;
+
+static unsigned int locking_frequency;
+static bool frequency_locked;
+static DEFINE_MUTEX(cpufreq_lock);
+
+int exynos_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ exynos_info->freq_table);
+}
+
+unsigned int exynos_getspeed(unsigned int cpu)
+{
+ return clk_get_rate(exynos_info->cpu_clk) / 1000;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index, old_index;
+ unsigned int arm_volt, safe_arm_volt = 0;
+ int ret = 0;
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ unsigned int *volt_table = exynos_info->volt_table;
+ unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+
+ mutex_lock(&cpufreq_lock);
+
+ freqs.old = policy->cur;
+
+ if (frequency_locked && target_freq != locking_frequency) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ freqs.old, relation, &old_index)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ freqs.cpu = policy->cpu;
+
+ /*
+ * ARM clock source will be changed APLL to MPLL temporary
+ * To support this level, need to control regulator for
+ * required voltage level
+ */
+ if (exynos_info->need_apll_change != NULL) {
+ if (exynos_info->need_apll_change(old_index, index) &&
+ (freq_table[index].frequency < mpll_freq_khz) &&
+ (freq_table[old_index].frequency < mpll_freq_khz))
+ safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
+ }
+ arm_volt = volt_table[index];
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* When the new frequency is higher than current frequency */
+ if ((freqs.new > freqs.old) && !safe_arm_volt) {
+ /* Firstly, voltage up to increase frequency */
+ regulator_set_voltage(arm_regulator, arm_volt,
+ arm_volt);
+ }
+
+ if (safe_arm_volt)
+ regulator_set_voltage(arm_regulator, safe_arm_volt,
+ safe_arm_volt);
+ if (freqs.new != freqs.old)
+ exynos_info->set_freq(old_index, index);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ /* When the new frequency is lower than current frequency */
+ if ((freqs.new < freqs.old) ||
+ ((freqs.new > freqs.old) && safe_arm_volt)) {
+ /* down the voltage after frequency change */
+ regulator_set_voltage(arm_regulator, arm_volt,
+ arm_volt);
+ }
+
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+#endif
+
+/**
+ * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
+ * context
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ * While frequency_locked == true, target() ignores every frequency but
+ * locking_frequency. The locking_frequency value is the initial frequency,
+ * which is set by the bootloader. In order to eliminate possible
+ * inconsistency in clock values, we save and restore frequencies during
+ * suspend and resume and block CPUFREQ activities. Note that the standard
+ * suspend/resume cannot be used as they are too deep (syscore_ops) for
+ * regulator actions.
+ */
+static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ static unsigned int saved_frequency;
+ unsigned int temp;
+
+ mutex_lock(&cpufreq_lock);
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ if (frequency_locked)
+ goto out;
+
+ frequency_locked = true;
+
+ if (locking_frequency) {
+ saved_frequency = exynos_getspeed(0);
+
+ mutex_unlock(&cpufreq_lock);
+ exynos_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+ }
+ break;
+
+ case PM_POST_SUSPEND:
+ if (saved_frequency) {
+ /*
+ * While frequency_locked, only locking_frequency
+ * is valid for target(). In order to use
+ * saved_frequency while keeping frequency_locked,
+ * we temporarly overwrite locking_frequency.
+ */
+ temp = locking_frequency;
+ locking_frequency = saved_frequency;
+
+ mutex_unlock(&cpufreq_lock);
+ exynos_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+
+ locking_frequency = temp;
+ }
+ frequency_locked = false;
+ break;
+ }
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpufreq_nb = {
+ .notifier_call = exynos_cpufreq_pm_notifier,
+};
+
+static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
+
+ /* set the transition latency value */
+ policy->cpuinfo.transition_latency = 100000;
+
+ /*
+ * EXYNOS4 multi-core processors has 2 cores
+ * that the frequency cannot be set independently.
+ * Each cpu is bound to the same speed.
+ * So the affected cpu is all of the cpus.
+ */
+ if (num_online_cpus() == 1) {
+ cpumask_copy(policy->related_cpus, cpu_possible_mask);
+ cpumask_copy(policy->cpus, cpu_online_mask);
+ } else {
+ cpumask_setall(policy->cpus);
+ }
+
+ return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
+}
+
+static struct cpufreq_driver exynos_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = exynos_verify_speed,
+ .target = exynos_target,
+ .get = exynos_getspeed,
+ .init = exynos_cpufreq_cpu_init,
+ .name = "exynos_cpufreq",
+#ifdef CONFIG_PM
+ .suspend = exynos_cpufreq_suspend,
+ .resume = exynos_cpufreq_resume,
+#endif
+};
+
+static int __init exynos_cpufreq_init(void)
+{
+ int ret = -EINVAL;
+
+ exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
+ if (!exynos_info)
+ return -ENOMEM;
+
+ if (soc_is_exynos4210())
+ ret = exynos4210_cpufreq_init(exynos_info);
+ else
+ pr_err("%s: CPU type not found\n", __func__);
+
+ if (ret)
+ goto err_vdd_arm;
+
+ if (exynos_info->set_freq == NULL) {
+ pr_err("%s: No set_freq function (ERR)\n", __func__);
+ goto err_vdd_arm;
+ }
+
+ arm_regulator = regulator_get(NULL, "vdd_arm");
+ if (IS_ERR(arm_regulator)) {
+ pr_err("%s: failed to get resource vdd_arm\n", __func__);
+ goto err_vdd_arm;
+ }
+
+ register_pm_notifier(&exynos_cpufreq_nb);
+
+ if (cpufreq_register_driver(&exynos_driver)) {
+ pr_err("%s: failed to register cpufreq driver\n", __func__);
+ goto err_cpufreq;
+ }
+
+ return 0;
+err_cpufreq:
+ unregister_pm_notifier(&exynos_cpufreq_nb);
+
+ if (!IS_ERR(arm_regulator))
+ regulator_put(arm_regulator);
+err_vdd_arm:
+ kfree(exynos_info);
+ pr_debug("%s: failed initialization\n", __func__);
+ return -EINVAL;
+}
+late_initcall(exynos_cpufreq_init);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index ab9741fab92e..065da5b702f1 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -2,61 +2,52 @@
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - CPU frequency scaling support
+ * EXYNOS4210 - CPU frequency scaling support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/types.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-#include <linux/suspend.h>
-#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/regs-mem.h>
+#include <mach/cpufreq.h>
-#include <plat/clock.h>
-#include <plat/pm.h>
+#define CPUFREQ_LEVEL_END L5
+
+static int max_support_idx = L0;
+static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-static struct regulator *arm_regulator;
-static struct regulator *int_regulator;
-
-static struct cpufreq_freqs freqs;
-static unsigned int memtype;
-
-static unsigned int locking_frequency;
-static bool frequency_locked;
-static DEFINE_MUTEX(cpufreq_lock);
-
-enum exynos4_memory_type {
- DDR2 = 4,
- LPDDR2,
- DDR3,
+struct cpufreq_clkdiv {
+ unsigned int index;
+ unsigned int clkdiv;
};
-enum cpufreq_level_index {
- L0, L1, L2, L3, CPUFREQ_LEVEL_END,
+static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
+ 1250000, 1150000, 1050000, 975000, 950000,
};
-static struct cpufreq_frequency_table exynos4_freq_table[] = {
- {L0, 1000*1000},
- {L1, 800*1000},
- {L2, 400*1000},
- {L3, 100*1000},
+
+static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
+
+static struct cpufreq_frequency_table exynos4210_freq_table[] = {
+ {L0, 1200*1000},
+ {L1, 1000*1000},
+ {L2, 800*1000},
+ {L3, 500*1000},
+ {L4, 200*1000},
{0, CPUFREQ_TABLE_END},
};
@@ -67,17 +58,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
* DIVATB, DIVPCLK_DBG, DIVAPLL }
*/
- /* ARM L0: 1000MHz */
- { 0, 3, 7, 3, 3, 0, 1 },
+ /* ARM L0: 1200MHz */
+ { 0, 3, 7, 3, 4, 1, 7 },
- /* ARM L1: 800MHz */
- { 0, 3, 7, 3, 3, 0, 1 },
+ /* ARM L1: 1000MHz */
+ { 0, 3, 7, 3, 4, 1, 7 },
- /* ARM L2: 400MHz */
- { 0, 1, 3, 1, 3, 0, 1 },
+ /* ARM L2: 800MHz */
+ { 0, 3, 7, 3, 3, 1, 7 },
- /* ARM L3: 100MHz */
- { 0, 0, 1, 0, 3, 1, 1 },
+ /* ARM L3: 500MHz */
+ { 0, 3, 7, 3, 3, 1, 7 },
+
+ /* ARM L4: 200MHz */
+ { 0, 1, 3, 1, 3, 1, 0 },
};
static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
@@ -86,147 +80,46 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
* { DIVCOPY, DIVHPM }
*/
- /* ARM L0: 1000MHz */
- { 3, 0 },
+ /* ARM L0: 1200MHz */
+ { 5, 0 },
- /* ARM L1: 800MHz */
- { 3, 0 },
+ /* ARM L1: 1000MHz */
+ { 4, 0 },
- /* ARM L2: 400MHz */
+ /* ARM L2: 800MHz */
{ 3, 0 },
- /* ARM L3: 100MHz */
+ /* ARM L3: 500MHz */
{ 3, 0 },
-};
-
-static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
- */
-
- /* DMC L0: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
-
- /* DMC L1: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
-
- /* DMC L2: 266.7MHz */
- { 7, 1, 1, 2, 1, 1, 3, 1 },
-
- /* DMC L3: 200MHz */
- { 7, 1, 1, 3, 1, 1, 3, 1 },
-};
-
-static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
- */
- /* ACLK200 L0: 200MHz */
- { 3, 7, 4, 5, 1 },
-
- /* ACLK200 L1: 200MHz */
- { 3, 7, 4, 5, 1 },
-
- /* ACLK200 L2: 160MHz */
- { 4, 7, 5, 7, 1 },
-
- /* ACLK200 L3: 133.3MHz */
- { 5, 7, 7, 7, 1 },
-};
-
-static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- { 3, 1 },
-
- /* ACLK_GDL/R L1: 200MHz */
- { 3, 1 },
-
- /* ACLK_GDL/R L2: 160MHz */
- { 4, 1 },
-
- /* ACLK_GDL/R L3: 133.3MHz */
- { 5, 1 },
-};
-
-struct cpufreq_voltage_table {
- unsigned int index; /* any */
- unsigned int arm_volt; /* uV */
- unsigned int int_volt;
+ /* ARM L4: 200MHz */
+ { 3, 0 },
};
-static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
- {
- .index = L0,
- .arm_volt = 1200000,
- .int_volt = 1100000,
- }, {
- .index = L1,
- .arm_volt = 1100000,
- .int_volt = 1100000,
- }, {
- .index = L2,
- .arm_volt = 1000000,
- .int_volt = 1000000,
- }, {
- .index = L3,
- .arm_volt = 900000,
- .int_volt = 1000000,
- },
-};
+static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
+ /* APLL FOUT L0: 1200MHz */
+ ((150 << 16) | (3 << 8) | 1),
-static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1000MHz */
+ /* APLL FOUT L1: 1000MHz */
((250 << 16) | (6 << 8) | 1),
- /* APLL FOUT L1: 800MHz */
+ /* APLL FOUT L2: 800MHz */
((200 << 16) | (6 << 8) | 1),
- /* APLL FOUT L2 : 400MHz */
- ((200 << 16) | (6 << 8) | 2),
+ /* APLL FOUT L3: 500MHz */
+ ((250 << 16) | (6 << 8) | 2),
- /* APLL FOUT L3: 100MHz */
- ((200 << 16) | (6 << 8) | 4),
+ /* APLL FOUT L4: 200MHz */
+ ((200 << 16) | (6 << 8) | 3),
};
-static int exynos4_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
-}
-
-static unsigned int exynos4_getspeed(unsigned int cpu)
-{
- return clk_get_rate(cpu_clk) / 1000;
-}
-
-static void exynos4_set_clkdiv(unsigned int div_index)
+static void exynos4210_set_clkdiv(unsigned int div_index)
{
unsigned int tmp;
/* Change Divider - CPU0 */
- tmp = __raw_readl(S5P_CLKDIV_CPU);
-
- tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
- S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
- S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
- S5P_CLKDIV_CPU0_APLL_MASK);
-
- tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+ tmp = exynos4210_clkdiv_table[div_index].clkdiv;
__raw_writel(tmp, S5P_CLKDIV_CPU);
@@ -248,83 +141,9 @@ static void exynos4_set_clkdiv(unsigned int div_index)
do {
tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
} while (tmp & 0x11);
-
- /* Change Divider - DMC0 */
-
- tmp = __raw_readl(S5P_CLKDIV_DMC0);
-
- tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
- S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
- S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
- S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
-
- tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
- (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
- (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
- (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
- (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
- (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
- (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - TOP */
-
- tmp = __raw_readl(S5P_CLKDIV_TOP);
-
- tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
- S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
- S5P_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
- (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
- (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
- (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
- (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
-
- tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
-
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
- (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
-
- tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
-
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
- (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
}
-static void exynos4_set_apll(unsigned int index)
+static void exynos4210_set_apll(unsigned int index)
{
unsigned int tmp;
@@ -343,7 +162,7 @@ static void exynos4_set_apll(unsigned int index)
/* 3. Change PLL PMS values */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4_apll_pms_table[index];
+ tmp |= exynos4210_apll_pms_table[index];
__raw_writel(tmp, S5P_APLL_CON0);
/* 4. wait_lock_time */
@@ -360,328 +179,126 @@ static void exynos4_set_apll(unsigned int index)
} while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
+bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
+{
+ unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
+ unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
+
+ return (old_pm == new_pm) ? 0 : 1;
+}
+
+static void exynos4210_set_frequency(unsigned int old_index,
+ unsigned int new_index)
{
unsigned int tmp;
if (old_index > new_index) {
- /* The frequency changing to L0 needs to change apll */
- if (freqs.new == exynos4_freq_table[L0].frequency) {
- /* 1. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
-
- /* 2. Change the apll m,p,s value */
- exynos4_set_apll(new_index);
- } else {
+ if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
+ exynos4210_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
+ tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0);
- }
- }
-
- else if (old_index < new_index) {
- /* The frequency changing from L0 needs to change apll */
- if (freqs.old == exynos4_freq_table[L0].frequency) {
- /* 1. Change the apll m,p,s value */
- exynos4_set_apll(new_index);
-
- /* 2. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
} else {
+ /* Clock Configuration Procedure */
+ /* 1. Change the system clock divider values */
+ exynos4210_set_clkdiv(new_index);
+ /* 2. Change the apll m,p,s value */
+ exynos4210_set_apll(new_index);
+ }
+ } else if (old_index < new_index) {
+ if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
+ tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0);
/* 2. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
+ exynos4210_set_clkdiv(new_index);
+ } else {
+ /* Clock Configuration Procedure */
+ /* 1. Change the apll m,p,s value */
+ exynos4210_set_apll(new_index);
+ /* 2. Change the system clock divider values */
+ exynos4210_set_clkdiv(new_index);
}
}
}
-static int exynos4_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int index, old_index;
- unsigned int arm_volt, int_volt;
- int err = -EINVAL;
-
- freqs.old = exynos4_getspeed(policy->cpu);
-
- mutex_lock(&cpufreq_lock);
-
- if (frequency_locked && target_freq != locking_frequency) {
- err = -EAGAIN;
- goto out;
- }
-
- if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
- freqs.old, relation, &old_index))
- goto out;
-
- if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
- target_freq, relation, &index))
- goto out;
-
- err = 0;
-
- freqs.new = exynos4_freq_table[index].frequency;
- freqs.cpu = policy->cpu;
-
- if (freqs.new == freqs.old)
- goto out;
-
- /* get the voltage value */
- arm_volt = exynos4_volt_table[index].arm_volt;
- int_volt = exynos4_volt_table[index].int_volt;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- /* control regulator */
- if (freqs.new > freqs.old) {
- /* Voltage up */
- regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- regulator_set_voltage(int_regulator, int_volt, int_volt);
- }
-
- /* Clock Configuration Procedure */
- exynos4_set_frequency(old_index, index);
-
- /* control regulator */
- if (freqs.new < freqs.old) {
- /* Voltage down */
- regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- regulator_set_voltage(int_regulator, int_volt, int_volt);
- }
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
-out:
- mutex_unlock(&cpufreq_lock);
- return err;
-}
-
-#ifdef CONFIG_PM
-/*
- * These suspend/resume are used as syscore_ops, it is already too
- * late to set regulator voltages at this stage.
- */
-static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
+int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
- return 0;
-}
-#endif
-
-/**
- * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
- * context
- * @notifier
- * @pm_event
- * @v
- *
- * While frequency_locked == true, target() ignores every frequency but
- * locking_frequency. The locking_frequency value is the initial frequency,
- * which is set by the bootloader. In order to eliminate possible
- * inconsistency in clock values, we save and restore frequencies during
- * suspend and resume and block CPUFREQ activities. Note that the standard
- * suspend/resume cannot be used as they are too deep (syscore_ops) for
- * regulator actions.
- */
-static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
- unsigned long pm_event, void *v)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
- static unsigned int saved_frequency;
- unsigned int temp;
-
- mutex_lock(&cpufreq_lock);
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- if (frequency_locked)
- goto out;
- frequency_locked = true;
-
- if (locking_frequency) {
- saved_frequency = exynos4_getspeed(0);
-
- mutex_unlock(&cpufreq_lock);
- exynos4_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
- }
-
- break;
- case PM_POST_SUSPEND:
-
- if (saved_frequency) {
- /*
- * While frequency_locked, only locking_frequency
- * is valid for target(). In order to use
- * saved_frequency while keeping frequency_locked,
- * we temporarly overwrite locking_frequency.
- */
- temp = locking_frequency;
- locking_frequency = saved_frequency;
-
- mutex_unlock(&cpufreq_lock);
- exynos4_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
-
- locking_frequency = temp;
- }
-
- frequency_locked = false;
- break;
- }
-out:
- mutex_unlock(&cpufreq_lock);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block exynos4_cpufreq_nb = {
- .notifier_call = exynos4_cpufreq_pm_notifier,
-};
-
-static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- int ret;
-
- policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
-
- /* set the transition latency value */
- policy->cpuinfo.transition_latency = 100000;
-
- /*
- * EXYNOS4 multi-core processors has 2 cores
- * that the frequency cannot be set independently.
- * Each cpu is bound to the same speed.
- * So the affected cpu is all of the cpus.
- */
- cpumask_setall(policy->cpus);
-
- ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
- if (ret)
- return ret;
-
- cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *exynos4_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static struct cpufreq_driver exynos4_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = exynos4_verify_speed,
- .target = exynos4_target,
- .get = exynos4_getspeed,
- .init = exynos4_cpufreq_cpu_init,
- .exit = exynos4_cpufreq_cpu_exit,
- .name = "exynos4_cpufreq",
- .attr = exynos4_cpufreq_attr,
-#ifdef CONFIG_PM
- .suspend = exynos4_cpufreq_suspend,
- .resume = exynos4_cpufreq_resume,
-#endif
-};
+ int i;
+ unsigned int tmp;
+ unsigned long rate;
-static int __init exynos4_cpufreq_init(void)
-{
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
- locking_frequency = exynos4_getspeed(0);
-
moutcore = clk_get(NULL, "moutcore");
if (IS_ERR(moutcore))
- goto out;
+ goto err_moutcore;
mout_mpll = clk_get(NULL, "mout_mpll");
if (IS_ERR(mout_mpll))
- goto out;
+ goto err_mout_mpll;
+
+ rate = clk_get_rate(mout_mpll) / 1000;
mout_apll = clk_get(NULL, "mout_apll");
if (IS_ERR(mout_apll))
- goto out;
+ goto err_mout_apll;
- arm_regulator = regulator_get(NULL, "vdd_arm");
- if (IS_ERR(arm_regulator)) {
- printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
- goto out;
- }
+ tmp = __raw_readl(S5P_CLKDIV_CPU);
- int_regulator = regulator_get(NULL, "vdd_int");
- if (IS_ERR(int_regulator)) {
- printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
- goto out;
+ for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
+ tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK |
+ S5P_CLKDIV_CPU0_COREM0_MASK |
+ S5P_CLKDIV_CPU0_COREM1_MASK |
+ S5P_CLKDIV_CPU0_PERIPH_MASK |
+ S5P_CLKDIV_CPU0_ATB_MASK |
+ S5P_CLKDIV_CPU0_PCLKDBG_MASK |
+ S5P_CLKDIV_CPU0_APLL_MASK);
+
+ tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
+ (clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
+ (clkdiv_cpu0[i][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
+ (clkdiv_cpu0[i][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
+ (clkdiv_cpu0[i][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
+ (clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
+ (clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+
+ exynos4210_clkdiv_table[i].clkdiv = tmp;
}
- /*
- * Check DRAM type.
- * Because DVFS level is different according to DRAM type.
- */
- memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
- memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
- memtype &= S5P_DMC0_MEMTYPE_MASK;
-
- if ((memtype < DDR2) && (memtype > DDR3)) {
- printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
- goto out;
- } else {
- printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
- }
-
- register_pm_notifier(&exynos4_cpufreq_nb);
-
- return cpufreq_register_driver(&exynos4_driver);
-
-out:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
+ info->mpll_freq_khz = rate;
+ info->pm_lock_idx = L2;
+ info->pll_safe_idx = L2;
+ info->max_support_idx = max_support_idx;
+ info->min_support_idx = min_support_idx;
+ info->cpu_clk = cpu_clk;
+ info->volt_table = exynos4210_volt_table;
+ info->freq_table = exynos4210_freq_table;
+ info->set_freq = exynos4210_set_frequency;
+ info->need_apll_change = exynos4210_pms_change;
- if (!IS_ERR(moutcore))
- clk_put(moutcore);
+ return 0;
+err_mout_apll:
if (!IS_ERR(mout_mpll))
clk_put(mout_mpll);
+err_mout_mpll:
+ if (!IS_ERR(moutcore))
+ clk_put(moutcore);
+err_moutcore:
+ if (!IS_ERR(cpu_clk))
+ clk_put(cpu_clk);
- if (!IS_ERR(mout_apll))
- clk_put(mout_apll);
-
- if (!IS_ERR(arm_regulator))
- regulator_put(arm_regulator);
-
- if (!IS_ERR(int_regulator))
- regulator_put(int_regulator);
-
- printk(KERN_ERR "%s: failed initialization\n", __func__);
-
+ pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
-late_initcall(exynos4_cpufreq_init);
+EXPORT_SYMBOL(exynos4210_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
new file mode 100644
index 000000000000..5d04c57aae30
--- /dev/null
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -0,0 +1,274 @@
+/*
+ * CPU frequency scaling for OMAP using OPP information
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
+ * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/opp.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/smp_plat.h>
+#include <asm/cpu.h>
+
+#include <plat/clock.h>
+#include <plat/omap-pm.h>
+#include <plat/common.h>
+#include <plat/omap_device.h>
+
+#include <mach/hardware.h>
+
+#ifdef CONFIG_SMP
+struct lpj_info {
+ unsigned long ref;
+ unsigned int freq;
+};
+
+static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
+static struct lpj_info global_lpj_ref;
+#endif
+
+static struct cpufreq_frequency_table *freq_table;
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+static struct clk *mpu_clk;
+static char *mpu_clk_name;
+static struct device *mpu_dev;
+
+static int omap_verify_speed(struct cpufreq_policy *policy)
+{
+ if (!freq_table)
+ return -EINVAL;
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int omap_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NR_CPUS)
+ return 0;
+
+ rate = clk_get_rate(mpu_clk) / 1000;
+ return rate;
+}
+
+static int omap_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int i;
+ int ret = 0;
+ struct cpufreq_freqs freqs;
+
+ if (!freq_table) {
+ dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
+ policy->cpu);
+ return -EINVAL;
+ }
+
+ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &i);
+ if (ret) {
+ dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
+ __func__, policy->cpu, target_freq, ret);
+ return ret;
+ }
+ freqs.new = freq_table[i].frequency;
+ if (!freqs.new) {
+ dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
+ policy->cpu, target_freq);
+ return -EINVAL;
+ }
+
+ freqs.old = omap_getspeed(policy->cpu);
+ freqs.cpu = policy->cpu;
+
+ if (freqs.old == freqs.new && policy->cur == freqs.new)
+ return ret;
+
+ /* notifiers */
+ for_each_cpu(i, policy->cpus) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
+#endif
+
+ ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+ freqs.new = omap_getspeed(policy->cpu);
+
+#ifdef CONFIG_SMP
+ /*
+ * Note that loops_per_jiffy is not updated on SMP systems in
+ * cpufreq driver. So, update the per-CPU loops_per_jiffy value
+ * on frequency transition. We need to update all dependent CPUs.
+ */
+ for_each_cpu(i, policy->cpus) {
+ struct lpj_info *lpj = &per_cpu(lpj_ref, i);
+ if (!lpj->freq) {
+ lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
+ lpj->freq = freqs.old;
+ }
+
+ per_cpu(cpu_data, i).loops_per_jiffy =
+ cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
+ }
+
+ /* And don't forget to adjust the global one */
+ if (!global_lpj_ref.freq) {
+ global_lpj_ref.ref = loops_per_jiffy;
+ global_lpj_ref.freq = freqs.old;
+ }
+ loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
+ freqs.new);
+#endif
+
+ /* notifiers */
+ for_each_cpu(i, policy->cpus) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ return ret;
+}
+
+static inline void freq_table_free(void)
+{
+ if (atomic_dec_and_test(&freq_table_users))
+ opp_free_cpufreq_table(mpu_dev, &freq_table);
+}
+
+static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+
+ mpu_clk = clk_get(NULL, mpu_clk_name);
+ if (IS_ERR(mpu_clk))
+ return PTR_ERR(mpu_clk);
+
+ if (policy->cpu >= NR_CPUS) {
+ result = -EINVAL;
+ goto fail_ck;
+ }
+
+ policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
+
+ if (atomic_inc_return(&freq_table_users) == 1)
+ result = opp_init_cpufreq_table(mpu_dev, &freq_table);
+
+ if (result) {
+ dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+ __func__, policy->cpu, result);
+ goto fail_ck;
+ }
+
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (result)
+ goto fail_table;
+
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = omap_getspeed(policy->cpu);
+
+ /*
+ * On OMAP SMP configuartion, both processors share the voltage
+ * and clock. So both CPUs needs to be scaled together and hence
+ * needs software co-ordination. Use cpufreq affected_cpus
+ * interface to handle this scenario. Additional is_smp() check
+ * is to keep SMP_ON_UP build working.
+ */
+ if (is_smp()) {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ cpumask_setall(policy->cpus);
+ }
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ return 0;
+
+fail_table:
+ freq_table_free();
+fail_ck:
+ clk_put(mpu_clk);
+ return result;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+ freq_table_free();
+ clk_put(mpu_clk);
+ return 0;
+}
+
+static struct freq_attr *omap_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver omap_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = omap_verify_speed,
+ .target = omap_target,
+ .get = omap_getspeed,
+ .init = omap_cpu_init,
+ .exit = omap_cpu_exit,
+ .name = "omap",
+ .attr = omap_cpufreq_attr,
+};
+
+static int __init omap_cpufreq_init(void)
+{
+ if (cpu_is_omap24xx())
+ mpu_clk_name = "virt_prcm_set";
+ else if (cpu_is_omap34xx())
+ mpu_clk_name = "dpll1_ck";
+ else if (cpu_is_omap44xx())
+ mpu_clk_name = "dpll_mpu_ck";
+
+ if (!mpu_clk_name) {
+ pr_err("%s: unsupported Silicon?\n", __func__);
+ return -EINVAL;
+ }
+
+ mpu_dev = omap_device_get_by_hwmod_name("mpu");
+ if (!mpu_dev) {
+ pr_warning("%s: unable to get the mpu device\n", __func__);
+ return -EINVAL;
+ }
+
+ return cpufreq_register_driver(&omap_driver);
+}
+
+static void __exit omap_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&omap_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
+MODULE_LICENSE("GPL");
+module_init(omap_cpufreq_init);
+module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index bce576d7478e..8f9b2ceeec85 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1,10 +1,11 @@
/*
- * (c) 2003-2010 Advanced Micro Devices, Inc.
+ * (c) 2003-2012 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
- * Support : mark.langsdorf@amd.com
+ * Maintainer:
+ * Andreas Herrmann <andreas.herrmann3@amd.com>
*
* Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones on behalf of SuSE Labs
@@ -16,12 +17,14 @@
* Valuable input gratefully received from Dave Jones, Pavel Machek,
* Dominik Brodowski, Jacob Shin, and others.
* Originally developed by Paul Devriendt.
- * Processor information obtained from Chapter 9 (Power and Thermal Management)
- * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
- * Opteron Processors" available for download from www.amd.com
*
- * Tables for specific CPUs can be inferred from
- * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
+ * Processor information obtained from Chapter 9 (Power and Thermal
+ * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
+ * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
+ * Power Management" in BKDGs for newer AMD CPU families.
+ *
+ * Tables for specific CPUs can be inferred from AMD's processor
+ * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
*/
#include <linux/kernel.h>
@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON;
+/* array to map SW pstate number to acpi state */
+static u32 ps_to_as[8];
+
/* core performance boost */
static bool cpb_capable, cpb_enabled;
static struct msr __percpu *msrs;
@@ -80,9 +86,9 @@ static u32 find_khz_freq_from_fid(u32 fid)
}
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
- u32 pstate)
+ u32 pstate)
{
- return data[pstate].frequency;
+ return data[ps_to_as[pstate]].frequency;
}
/* Return the vco fid for an input fid
@@ -926,23 +932,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
invalidate_entry(powernow_table, i);
continue;
}
- rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
- if (!(hi & HW_PSTATE_VALID_MASK)) {
- pr_debug("invalid pstate %d, ignoring\n", index);
- invalidate_entry(powernow_table, i);
- continue;
- }
- powernow_table[i].index = index;
+ ps_to_as[index] = i;
/* Frequency may be rounded for these */
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
+
+ rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+ if (!(hi & HW_PSTATE_VALID_MASK)) {
+ pr_debug("invalid pstate %d, ignoring\n", index);
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+
powernow_table[i].frequency =
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
} else
powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
+
+ powernow_table[i].index = index;
}
return 0;
}
@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
powernow_k8_acpi_pst_values(data, newstate);
if (cpu_family == CPU_HW_PSTATE)
- ret = transition_frequency_pstate(data, newstate);
+ ret = transition_frequency_pstate(data,
+ data->powernow_table[newstate].index);
else
ret = transition_frequency_fidvid(data, newstate);
if (ret) {
@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table,
- newstate);
+ data->powernow_table[newstate].index);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 3475f65aeec6..a5e72cb5f53c 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "cpufreq: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
+ pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err;
}
@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) {
- pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
+ pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret);
goto err;
}
@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err_clk;
}
}
#endif
- pr_debug("cpufreq: Set actual frequency %lukHz\n",
+ pr_debug("Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000);
return 0;
@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
count = regulator_count_voltages(vddarm);
if (count < 0) {
- pr_err("cpufreq: Unable to check supported voltages\n");
+ pr_err("Unable to check supported voltages\n");
}
freq = s3c64xx_freq_table;
@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
}
if (!found) {
- pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ pr_debug("%dkHz unsupported by regulator\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
return -EINVAL;
if (s3c64xx_freq_table == NULL) {
- pr_err("cpufreq: No frequency information for this CPU\n");
+ pr_err("No frequency information for this CPU\n");
return -ENODEV;
}
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
- pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
+ pr_err("Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk));
return PTR_ERR(armclk);
}
@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm);
- pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
- pr_err("cpufreq: Only frequency scaling available\n");
+ pr_err("Failed to obtain VDDARM: %d\n", ret);
+ pr_err("Only frequency scaling available\n");
vddarm = NULL;
} else {
s3c64xx_cpufreq_config_regulator();
}
+
+ vddint = regulator_get(NULL, "vddint");
+ if (IS_ERR(vddint)) {
+ ret = PTR_ERR(vddint);
+ pr_err("Failed to obtain VDDINT: %d\n", ret);
+ vddint = NULL;
+ }
#endif
freq = s3c64xx_freq_table;
@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
- pr_debug("cpufreq: %dkHz unsupported by clock\n",
+ pr_debug("%dkHz unsupported by clock\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) {
- pr_err("cpufreq: Failed to configure frequency table: %d\n",
+ pr_err("Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
clk_put(armclk);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 06ce2680d00d..59f4261c753a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -291,10 +291,10 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
- if (!sys_dev)
+ if (!dev)
return -EINVAL;
if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
@@ -303,7 +303,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(sys_dev))) {
+ if ((ret = cpuidle_add_sysfs(cpu_dev))) {
module_put(cpuidle_driver->owner);
return ret;
}
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
@@ -354,7 +354,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
- cpuidle_remove_sysfs(sys_dev);
+ cpuidle_remove_sysfs(cpu_dev);
list_del(&dev->device_list);
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
@@ -411,7 +411,7 @@ static int __init cpuidle_init(void)
if (cpuidle_disabled())
return -ENODEV;
- ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
+ ret = cpuidle_add_interface(cpu_subsys.dev_root);
if (ret)
return ret;
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 38c3fd8b9d76..7db186685c27 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,7 +5,7 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
-#include <linux/sysdev.h>
+#include <linux/device.h>
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
@@ -23,11 +23,11 @@ extern void cpuidle_uninstall_idle_handler(void);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
-extern int cpuidle_add_class_sysfs(struct sysdev_class *cls);
-extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls);
+extern int cpuidle_add_interface(struct device *dev);
+extern void cpuidle_remove_interface(struct device *dev);
extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct sys_device *sysdev);
-extern void cpuidle_remove_sysfs(struct sys_device *sysdev);
+extern int cpuidle_add_sysfs(struct device *dev);
+extern void cpuidle_remove_sysfs(struct device *dev);
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 1e756e160dca..3fe41fe4851a 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -22,8 +22,8 @@ static int __init cpuidle_sysfs_setup(char *unused)
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-static ssize_t show_available_governors(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_available_governors(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t i = 0;
@@ -42,8 +42,8 @@ out:
return i;
}
-static ssize_t show_current_driver(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_current_driver(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t ret;
@@ -59,8 +59,8 @@ static ssize_t show_current_driver(struct sysdev_class *class,
return ret;
}
-static ssize_t show_current_governor(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_current_governor(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t ret;
@@ -75,8 +75,8 @@ static ssize_t show_current_governor(struct sysdev_class *class,
return ret;
}
-static ssize_t store_current_governor(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t store_current_governor(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
@@ -109,50 +109,48 @@ static ssize_t store_current_governor(struct sysdev_class *class,
return count;
}
-static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
-static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
- NULL);
+static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
+static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-static struct attribute *cpuclass_default_attrs[] = {
- &attr_current_driver.attr,
- &attr_current_governor_ro.attr,
+static struct attribute *cpuidle_default_attrs[] = {
+ &dev_attr_current_driver.attr,
+ &dev_attr_current_governor_ro.attr,
NULL
};
-static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
- NULL);
-static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
+static DEVICE_ATTR(current_governor, 0644, show_current_governor,
+ store_current_governor);
-static struct attribute *cpuclass_switch_attrs[] = {
- &attr_available_governors.attr,
- &attr_current_driver.attr,
- &attr_current_governor.attr,
+static struct attribute *cpuidle_switch_attrs[] = {
+ &dev_attr_available_governors.attr,
+ &dev_attr_current_driver.attr,
+ &dev_attr_current_governor.attr,
NULL
};
-static struct attribute_group cpuclass_attr_group = {
- .attrs = cpuclass_default_attrs,
+static struct attribute_group cpuidle_attr_group = {
+ .attrs = cpuidle_default_attrs,
.name = "cpuidle",
};
/**
- * cpuidle_add_class_sysfs - add CPU global sysfs attributes
+ * cpuidle_add_interface - add CPU global sysfs attributes
*/
-int cpuidle_add_class_sysfs(struct sysdev_class *cls)
+int cpuidle_add_interface(struct device *dev)
{
if (sysfs_switch)
- cpuclass_attr_group.attrs = cpuclass_switch_attrs;
+ cpuidle_attr_group.attrs = cpuidle_switch_attrs;
- return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group);
+ return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
}
/**
- * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes
+ * cpuidle_remove_interface - remove CPU global sysfs attributes
*/
-void cpuidle_remove_class_sysfs(struct sysdev_class *cls)
+void cpuidle_remove_interface(struct device *dev)
{
- sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group);
+ sysfs_remove_group(&dev->kobj, &cpuidle_attr_group);
}
struct cpuidle_attr {
@@ -365,16 +363,16 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
- * @sysdev: the target device
+ * @dev: the target device
*/
-int cpuidle_add_sysfs(struct sys_device *sysdev)
+int cpuidle_add_sysfs(struct device *cpu_dev)
{
- int cpu = sysdev->id;
+ int cpu = cpu_dev->id;
struct cpuidle_device *dev;
int error;
dev = per_cpu(cpuidle_devices, cpu);
- error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj,
+ error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (!error)
kobject_uevent(&dev->kobj, KOBJ_ADD);
@@ -383,11 +381,11 @@ int cpuidle_add_sysfs(struct sys_device *sysdev)
/**
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
- * @sysdev: the target device
+ * @dev: the target device
*/
-void cpuidle_remove_sysfs(struct sys_device *sysdev)
+void cpuidle_remove_sysfs(struct device *cpu_dev)
{
- int cpu = sysdev->id;
+ int cpu = cpu_dev->id;
struct cpuidle_device *dev;
dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 1d103f997dc2..13f8e1a14988 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1292,18 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
.remove = crypto4xx_remove,
};
-static int __init crypto4xx_init(void)
-{
- return platform_driver_register(&crypto4xx_driver);
-}
-
-static void __exit crypto4xx_exit(void)
-{
- platform_driver_unregister(&crypto4xx_driver);
-}
-
-module_init(crypto4xx_init);
-module_exit(crypto4xx_exit);
+module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4159265b453b..e73cf2e8110a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -113,7 +113,7 @@ static inline void append_dec_shr_done(u32 *desc)
jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd);
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
/*
@@ -213,7 +213,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
set_jump_tgt_here(desc, key_jump_cmd);
/* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -310,7 +310,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Only propagate error immediately if shared */
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
set_jump_tgt_here(desc, jump_cmd);
/* Class 2 operation */
@@ -683,7 +683,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, key_jump_cmd);
/* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
/* Load iv */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -724,7 +724,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* For aead, only propagate error immediately if shared */
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
set_jump_tgt_here(desc, jump_cmd);
/* load IV */
@@ -1806,6 +1806,25 @@ struct caam_alg_template {
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
+ .name = "authenc(hmac(md5),cbc(aes))",
+ .driver_name = "authenc-hmac-md5-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
@@ -1865,6 +1884,25 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(md5),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1924,6 +1962,25 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(md5),cbc(des))",
+ .driver_name = "authenc-hmac-md5-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index d38f2afaa966..a63bc65fae86 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 73988bb7322a..8ae3ba2a160d 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -52,8 +52,6 @@ static int caam_probe(struct platform_device *pdev)
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
- struct caam_deco **deco;
- u32 deconum;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
@@ -92,17 +90,6 @@ static int caam_probe(struct platform_device *pdev)
if (sizeof(dma_addr_t) == sizeof(u64))
dma_set_mask(dev, DMA_BIT_MASK(36));
- /* Find out how many DECOs are present */
- deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
- CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
-
- ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
- GFP_KERNEL);
-
- deco = (struct caam_deco __force **)&topregs->deco;
- for (d = 0; d < deconum; d++)
- ctrlpriv->deco[d] = deco[d];
-
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
@@ -253,18 +240,7 @@ static struct platform_driver caam_driver = {
.remove = __devexit_p(caam_remove),
};
-static int __init caam_base_init(void)
-{
- return platform_driver_register(&caam_driver);
-}
-
-static void __exit caam_base_exit(void)
-{
- return platform_driver_unregister(&caam_driver);
-}
-
-module_init(caam_base_init);
-module_exit(caam_base_exit);
+module_platform_driver(caam_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM request backend");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 974a75842da9..a17c2958dab1 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -9,7 +9,7 @@
#define DESC_H
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
-#define MAX_CAAM_DESCSIZE 64
+#define MAX_CAAM_DESCSIZE 64
/* Block size of any entity covered/uncovered with a KEK/TKEK */
#define KEK_BLOCKSIZE 16
@@ -18,38 +18,38 @@
* Supported descriptor command types as they show up
* inside a descriptor command word.
*/
-#define CMD_SHIFT 27
-#define CMD_MASK 0xf8000000
-
-#define CMD_KEY (0x00 << CMD_SHIFT)
-#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
-#define CMD_LOAD (0x02 << CMD_SHIFT)
-#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
-#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
-#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
-#define CMD_STORE (0x0a << CMD_SHIFT)
-#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
-#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
-#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
-#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
-#define CMD_MOVE (0x0f << CMD_SHIFT)
-#define CMD_OPERATION (0x10 << CMD_SHIFT)
-#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
-#define CMD_JUMP (0x14 << CMD_SHIFT)
-#define CMD_MATH (0x15 << CMD_SHIFT)
-#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
-#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
-#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
-#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
+#define CMD_SHIFT 27
+#define CMD_MASK 0xf8000000
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION (0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
+#define CMD_JUMP (0x14 << CMD_SHIFT)
+#define CMD_MATH (0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
/* General-purpose class selector for all commands */
-#define CLASS_SHIFT 25
-#define CLASS_MASK (0x03 << CLASS_SHIFT)
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
-#define CLASS_NONE (0x00 << CLASS_SHIFT)
-#define CLASS_1 (0x01 << CLASS_SHIFT)
-#define CLASS_2 (0x02 << CLASS_SHIFT)
-#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
/*
* Descriptor header command constructs
@@ -60,82 +60,82 @@
* Do Not Run - marks a descriptor inexecutable if there was
* a preceding error somewhere
*/
-#define HDR_DNR 0x01000000
+#define HDR_DNR 0x01000000
/*
* ONE - should always be set. Combination of ONE (always
* set) and ZRO (always clear) forms an endianness sanity check
*/
-#define HDR_ONE 0x00800000
-#define HDR_ZRO 0x00008000
+#define HDR_ONE 0x00800000
+#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
-#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK 0x3f
+#define HDR_START_IDX_SHIFT 16
/* If shared descriptor header, 6-bit length */
-#define HDR_DESCLEN_SHR_MASK 0x3f
+#define HDR_DESCLEN_SHR_MASK 0x3f
/* If non-shared header, 7-bit length */
-#define HDR_DESCLEN_MASK 0x7f
+#define HDR_DESCLEN_MASK 0x7f
/* This is a TrustedDesc (if not SharedDesc) */
-#define HDR_TRUSTED 0x00004000
+#define HDR_TRUSTED 0x00004000
/* Make into TrustedDesc (if not SharedDesc) */
-#define HDR_MAKE_TRUSTED 0x00002000
+#define HDR_MAKE_TRUSTED 0x00002000
/* Save context if self-shared (if SharedDesc) */
-#define HDR_SAVECTX 0x00001000
+#define HDR_SAVECTX 0x00001000
/* Next item points to SharedDesc */
-#define HDR_SHARED 0x00001000
+#define HDR_SHARED 0x00001000
/*
* Reverse Execution Order - execute JobDesc first, then
* execute SharedDesc (normally SharedDesc goes first).
*/
-#define HDR_REVERSE 0x00000800
+#define HDR_REVERSE 0x00000800
/* Propogate DNR property to SharedDesc */
-#define HDR_PROP_DNR 0x00000800
+#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
-#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
-#define HDR_JD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK 0x03
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK 0x07
+#define HDR_JD_SHARE_SHIFT 8
-#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
/* JobDesc/SharedDesc descriptor length */
-#define HDR_JD_LENGTH_MASK 0x7f
-#define HDR_SD_LENGTH_MASK 0x3f
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
/*
* KEY/SEQ_KEY Command Constructs
*/
-/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
-#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
-#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
/* Scatter-Gather Table/Variable Length Field */
-#define KEY_SGF 0x01000000
-#define KEY_VLF 0x01000000
+#define KEY_SGF 0x01000000
+#define KEY_VLF 0x01000000
/* Immediate - Key follows command in the descriptor */
-#define KEY_IMM 0x00800000
+#define KEY_IMM 0x00800000
/*
* Encrypted - Key is encrypted either with the KEK, or
* with the TDKEK if TK is set
*/
-#define KEY_ENC 0x00400000
+#define KEY_ENC 0x00400000
/*
* No Write Back - Do not allow key to be FIFO STOREd
@@ -156,16 +156,16 @@
* KDEST - Key Destination: 0 - class key register,
* 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
*/
-#define KEY_DEST_SHIFT 16
-#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
-#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
-#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
-#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
-#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
/* Length in bytes */
-#define KEY_LENGTH_MASK 0x000003ff
+#define KEY_LENGTH_MASK 0x000003ff
/*
* LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
@@ -175,25 +175,25 @@
* Load/Store Destination: 0 = class independent CCB,
* 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
*/
-#define LDST_CLASS_SHIFT 25
-#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
/* Scatter-Gather Table/Variable Length Field */
-#define LDST_SGF 0x01000000
+#define LDST_SGF 0x01000000
#define LDST_VLF LDST_SGF
-/* Immediate - Key follows this command in descriptor */
-#define LDST_IMM_MASK 1
-#define LDST_IMM_SHIFT 23
-#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
-/* SRC/DST - Destination for LOAD, Source for STORE */
-#define LDST_SRCDST_SHIFT 16
-#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
@@ -205,64 +205,64 @@
#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
-
-/* Offset in source/destination */
-#define LDST_OFFSET_SHIFT 8
-#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
-#define LDOFF_CHG_SHARE_SHIFT 0
-#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
-
-#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
-#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
-
-#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
-#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-
-#define LDOFF_CHG_SEQLIODN_SHIFT 6
-#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-
-/* Data length in bytes */
-#define LDST_LEN_SHIFT 0
-#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
/* Special Length definitions when dst=deco-ctrl */
-#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
-#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
-#define LDLEN_RST_OFIFO (1 << 5)
-#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
-#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
-#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
-#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
+#define LDLEN_RST_OFIFO (1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
@@ -274,808 +274,808 @@
* 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
* Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
*/
-#define FIFOLD_CLASS_SHIFT 25
-#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
-
-#define FIFOST_CLASS_SHIFT 25
-#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
/*
* Scatter-Gather Table/Variable Length Field
* If set for FIFO_LOAD, refers to a SG table. Within
* SEQ_FIFO_LOAD, is variable input sequence
*/
-#define FIFOLDST_SGF_SHIFT 24
-#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
/* Immediate - Data follows command in descriptor */
-#define FIFOLD_IMM_SHIFT 23
-#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
-#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
/* Continue - Not the last FIFO store to come */
-#define FIFOST_CONT_SHIFT 23
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
/*
* Extended Length - use 32-bit extended length that
* follows the pointer field. Illegal with IMM set
*/
-#define FIFOLDST_EXT_SHIFT 22
-#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
-#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
/* Input data type.*/
-#define FIFOLD_TYPE_SHIFT 16
-#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
-#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
/* PK types */
-#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
/* Other types. Need to OR in last/flush bits as desired */
-#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
/* Last/Flush bits for use with "other" types above */
-#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLDST_LEN_MASK 0xffff
-#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
/* Output data types */
-#define FIFOST_TYPE_SHIFT 16
-#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
-
-#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
* OPERATION Command Constructs
*/
/* Operation type selectors - OP TYPE */
-#define OP_TYPE_SHIFT 24
-#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
-#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
-#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
-#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
-#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
/* ProtocolID selectors - PROTID */
-#define OP_PCLID_SHIFT 16
-#define OP_PCLID_MASK (0xff << 16)
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << 16)
/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
-#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
-#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
-#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
-#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
-#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
-#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
/*
* ProtocolInfo selectors
*/
-#define OP_PCLINFO_MASK 0xffff
+#define OP_PCLINFO_MASK 0xffff
/* for OP_PCLID_IPSEC */
-#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
-#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
-
-#define OP_PCL_IPSEC_DES_IV64 0x0100
-#define OP_PCL_IPSEC_DES 0x0200
-#define OP_PCL_IPSEC_3DES 0x0300
-#define OP_PCL_IPSEC_AES_CBC 0x0c00
-#define OP_PCL_IPSEC_AES_CTR 0x0d00
-#define OP_PCL_IPSEC_AES_XTS 0x1600
-#define OP_PCL_IPSEC_AES_CCM8 0x0e00
-#define OP_PCL_IPSEC_AES_CCM12 0x0f00
-#define OP_PCL_IPSEC_AES_CCM16 0x1000
-#define OP_PCL_IPSEC_AES_GCM8 0x1200
-#define OP_PCL_IPSEC_AES_GCM12 0x1300
-#define OP_PCL_IPSEC_AES_GCM16 0x1400
-
-#define OP_PCL_IPSEC_HMAC_NULL 0x0000
-#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
-#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
-#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
-#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
-#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
-#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
-#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
-#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
/* For SRTP - OP_PCLID_SRTP */
-#define OP_PCL_SRTP_CIPHER_MASK 0xff00
-#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
-#define OP_PCL_SRTP_AES_CTR 0x0d00
+#define OP_PCL_SRTP_AES_CTR 0x0d00
-#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
/* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
-
-#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_SSL30_RC4_128_MD5 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_SSL30_RC4_40_MD5 0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_SSL30_RC4_128_SHA 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
/* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS10_RC4_128_MD5 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS10_RC4_40_MD5 0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS10_RC4_128_SHA 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS10_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
/* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS11_RC4_128_MD5 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS11_RC4_40_MD5 0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS11_RC4_128_SHA 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS11_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
/* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS12_RC4_128_MD5 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS12_RC4_40_MD5 0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS12_RC4_128_SHA 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS12_RC4_40_SHA 0x0028
-
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
-
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
/* For DTLS - OP_PCLID_DTLS */
-#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
-
-#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
-
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
/* 802.16 WiMAX protinfos */
-#define OP_PCL_WIMAX_OFDM 0x0201
-#define OP_PCL_WIMAX_OFDMA 0x0231
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
/* 802.11 WiFi protinfos */
-#define OP_PCL_WIFI 0xac04
+#define OP_PCL_WIFI 0xac04
/* MacSec protinfos */
-#define OP_PCL_MACSEC 0x0001
+#define OP_PCL_MACSEC 0x0001
/* PKI unidirectional protocol protinfo bits */
-#define OP_PCL_PKPROT_TEST 0x0008
-#define OP_PCL_PKPROT_DECRYPT 0x0004
-#define OP_PCL_PKPROT_ECC 0x0002
-#define OP_PCL_PKPROT_F2M 0x0001
+#define OP_PCL_PKPROT_TEST 0x0008
+#define OP_PCL_PKPROT_DECRYPT 0x0004
+#define OP_PCL_PKPROT_ECC 0x0002
+#define OP_PCL_PKPROT_F2M 0x0001
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
@@ -1181,114 +1181,114 @@
#define OP_ALG_ENCRYPT 1
/* PKHA algorithm type set */
-#define OP_ALG_PK 0x00800000
-#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
/* PKHA mode clear memory functions */
-#define OP_ALG_PKMODE_A_RAM 0x80000
-#define OP_ALG_PKMODE_B_RAM 0x40000
-#define OP_ALG_PKMODE_E_RAM 0x20000
-#define OP_ALG_PKMODE_N_RAM 0x10000
-#define OP_ALG_PKMODE_CLEARMEM 0x00001
+#define OP_ALG_PKMODE_A_RAM 0x80000
+#define OP_ALG_PKMODE_B_RAM 0x40000
+#define OP_ALG_PKMODE_E_RAM 0x20000
+#define OP_ALG_PKMODE_N_RAM 0x10000
+#define OP_ALG_PKMODE_CLEARMEM 0x00001
/* PKHA mode modular-arithmetic functions */
-#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
-#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
-#define OP_ALG_PKMODE_MOD_F2M 0x20000
-#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
-#define OP_ALG_PKMODE_PRJECTV 0x00800
-#define OP_ALG_PKMODE_TIME_EQ 0x400
-#define OP_ALG_PKMODE_OUT_B 0x000
-#define OP_ALG_PKMODE_OUT_A 0x100
-#define OP_ALG_PKMODE_MOD_ADD 0x002
-#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
-#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
-#define OP_ALG_PKMODE_MOD_MULT 0x005
-#define OP_ALG_PKMODE_MOD_EXPO 0x006
-#define OP_ALG_PKMODE_MOD_REDUCT 0x007
-#define OP_ALG_PKMODE_MOD_INV 0x008
-#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
-#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
-#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
-#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
-#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
-#define OP_ALG_PKMODE_MOD_GCD 0x00e
-#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
+#define OP_ALG_PKMODE_MOD_F2M 0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
+#define OP_ALG_PKMODE_PRJECTV 0x00800
+#define OP_ALG_PKMODE_TIME_EQ 0x400
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
-#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_SHIFT 10
-#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
-#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
-#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-
-#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
-#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
/*
* SEQ_IN_PTR Command Constructs
*/
/* Release Buffers */
-#define SQIN_RBS 0x04000000
+#define SQIN_RBS 0x04000000
/* Sequence pointer is really a descriptor */
-#define SQIN_INL 0x02000000
+#define SQIN_INL 0x02000000
/* Sequence pointer is a scatter-gather table */
-#define SQIN_SGF 0x01000000
+#define SQIN_SGF 0x01000000
/* Appends to a previous pointer */
-#define SQIN_PRE 0x00800000
+#define SQIN_PRE 0x00800000
/* Use extended length following pointer */
-#define SQIN_EXT 0x00400000
+#define SQIN_EXT 0x00400000
/* Restore sequence with pointer/length */
-#define SQIN_RTO 0x00200000
+#define SQIN_RTO 0x00200000
/* Replace job descriptor */
-#define SQIN_RJD 0x00100000
+#define SQIN_RJD 0x00100000
-#define SQIN_LEN_SHIFT 0
-#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
/*
* SEQ_OUT_PTR Command Constructs
*/
/* Sequence pointer is a scatter-gather table */
-#define SQOUT_SGF 0x01000000
+#define SQOUT_SGF 0x01000000
/* Appends to a previous pointer */
-#define SQOUT_PRE 0x00800000
+#define SQOUT_PRE 0x00800000
/* Restore sequence with pointer/length */
-#define SQOUT_RTO 0x00200000
+#define SQOUT_RTO 0x00200000
/* Use extended length following pointer */
-#define SQOUT_EXT 0x00400000
+#define SQOUT_EXT 0x00400000
-#define SQOUT_LEN_SHIFT 0
-#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
/*
@@ -1296,196 +1296,196 @@
*/
/* TYPE field is all that's relevant */
-#define SIGN_TYPE_SHIFT 16
-#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
/*
* MOVE Command Constructs
*/
-#define MOVE_AUX_SHIFT 25
-#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
-
-#define MOVE_WAITCOMP_SHIFT 24
-#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
-#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
-
-#define MOVE_SRC_SHIFT 20
-#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
-
-#define MOVE_DEST_SHIFT 16
-#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
-
-#define MOVE_OFFSET_SHIFT 8
-#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
-
-#define MOVE_LEN_SHIFT 0
-#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
-
-#define MOVELEN_MRSEL_SHIFT 0
-#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
/*
* MATH Command Constructs
*/
-#define MATH_IFB_SHIFT 26
-#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
-#define MATH_IFB (1 << MATH_IFB_SHIFT)
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB (1 << MATH_IFB_SHIFT)
-#define MATH_NFU_SHIFT 25
-#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
-#define MATH_NFU (1 << MATH_NFU_SHIFT)
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU (1 << MATH_NFU_SHIFT)
-#define MATH_STL_SHIFT 24
-#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
-#define MATH_STL (1 << MATH_STL_SHIFT)
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL (1 << MATH_STL_SHIFT)
/* Function selectors */
-#define MATH_FUN_SHIFT 20
-#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
-#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
-#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
-#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
-#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
-#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
-#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
-#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
-#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
-#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
/* Source 0 selectors */
-#define MATH_SRC0_SHIFT 16
-#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
-#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
/* Source 1 selectors */
-#define MATH_SRC1_SHIFT 12
-#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
-#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
-#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_SHIFT 12
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
/* Destination selectors */
-#define MATH_DEST_SHIFT 8
-#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
-#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
-#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_SHIFT 8
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
/* Length selectors */
-#define MATH_LEN_SHIFT 0
-#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
-#define MATH_LEN_1BYTE 0x01
-#define MATH_LEN_2BYTE 0x02
-#define MATH_LEN_4BYTE 0x04
-#define MATH_LEN_8BYTE 0x08
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
/*
* JUMP Command Constructs
*/
-#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_SHIFT 25
#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_NONE 0
#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
-#define JUMP_JSL_SHIFT 24
-#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
-#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
-#define JUMP_TYPE_SHIFT 22
-#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_SHIFT 22
+#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TEST_SHIFT 16
-#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
/* Condition codes. JSL bit is factored in */
-#define JUMP_COND_SHIFT 8
-#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
-
-#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
-
-#define JUMP_OFFSET_SHIFT 0
-#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
/*
* NFIFO ENTRY
@@ -1500,20 +1500,20 @@
#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
#define NFIFOENTRY_LC2_SHIFT 29
-#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
-#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
#define NFIFOENTRY_LC1_SHIFT 28
-#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
-#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
#define NFIFOENTRY_FC2_SHIFT 27
-#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
-#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
#define NFIFOENTRY_FC1_SHIFT 26
-#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
-#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
#define NFIFOENTRY_STYPE_SHIFT 24
#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
@@ -1525,60 +1525,59 @@
#define NFIFOENTRY_DTYPE_SHIFT 20
#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
-
-#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_BND_SHIFT 19
-#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
-#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
#define NFIFOENTRY_PTYPE_SHIFT 16
#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_OC_SHIFT 15
-#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
#define NFIFOENTRY_AST_SHIFT 14
-#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_BM_SHIFT 11
-#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
-#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
-
-#define NFIFOENTRY_PS_SHIFT 10
-#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
-#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
#define NFIFOENTRY_DLEN_SHIFT 0
#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
@@ -1591,15 +1590,15 @@
*/
/* IPSec ESP CBC Encap/Decap Options */
-#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
-#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
-#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
-#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
-#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
-#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
-#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
-#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
-#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
-#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
+#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
+#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
+#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
+#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 0991323cf3fd..348b882275f0 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -18,9 +18,10 @@
#define PRINT_POS
#endif
-#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
- LDST_SRCDST_WORD_DECOCTRL | \
- (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_NO_PROP << \
+ LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index aee394e39056..e9f7a70cdd5e 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -657,7 +657,6 @@ struct caam_full {
u64 rsvd[512];
struct caam_assurance assure;
struct caam_queue_if qi;
- struct caam_deco *deco;
};
#endif /* REGS_H */
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index dcd8babae9eb..597235a2f8f9 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1128,17 +1128,7 @@ static struct platform_driver marvell_crypto = {
};
MODULE_ALIAS("platform:mv_crypto");
-static int __init mv_crypto_init(void)
-{
- return platform_driver_register(&marvell_crypto);
-}
-module_init(mv_crypto_init);
-
-static void __exit mv_crypto_exit(void)
-{
- platform_driver_unregister(&marvell_crypto);
-}
-module_exit(mv_crypto_exit);
+module_platform_driver(marvell_crypto);
MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a2b553eabbdb..58480d009324 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -873,7 +873,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* request for any other size (192 bits) then we need to do a software
* fallback.
*/
- if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
ctx->sw_cipher) {
/*
* Set the fallback transform to use the same request flags as
@@ -886,7 +886,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
- } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
!ctx->sw_cipher)
err = -EINVAL;
@@ -1854,17 +1854,7 @@ static struct platform_driver spacc_driver = {
.id_table = spacc_id_table,
};
-static int __init spacc_init(void)
-{
- return platform_driver_register(&spacc_driver);
-}
-module_init(spacc_init);
-
-static void __exit spacc_exit(void)
-{
- platform_driver_unregister(&spacc_driver);
-}
-module_exit(spacc_exit);
+module_platform_driver(spacc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 8115417a1c93..3376bca200fc 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -683,18 +683,7 @@ static struct platform_driver s5p_aes_crypto = {
},
};
-static int __init s5p_aes_mod_init(void)
-{
- return platform_driver_register(&s5p_aes_crypto);
-}
-
-static void __exit s5p_aes_mod_exit(void)
-{
- platform_driver_unregister(&s5p_aes_crypto);
-}
-
-module_init(s5p_aes_mod_init);
-module_exit(s5p_aes_mod_exit);
+module_platform_driver(s5p_aes_crypto);
MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dbe76b5df9cf..2d8c78901686 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -99,6 +99,8 @@ struct talitos_request {
/* per-channel fifo management */
struct talitos_channel {
+ void __iomem *reg;
+
/* request fifo */
struct talitos_request *fifo;
@@ -120,7 +122,7 @@ struct talitos_private {
struct device *dev;
struct platform_device *ofdev;
void __iomem *reg;
- int irq;
+ int irq[2];
/* SEC version geometry (from device tree node) */
unsigned int num_channels;
@@ -144,7 +146,7 @@ struct talitos_private {
atomic_t last_chan ____cacheline_aligned;
/* request callback tasklet */
- struct tasklet_struct done_task;
+ struct tasklet_struct done_task[2];
/* list of registered algorithms */
struct list_head alg_list;
@@ -157,6 +159,7 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
#define TALITOS_FTR_SHA224_HWINIT 0x00000004
+#define TALITOS_FTR_HMAC_OK 0x00000008
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
@@ -196,9 +199,9 @@ static int reset_channel(struct device *dev, int ch)
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
- setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
- while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
+ while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
&& --timeout)
cpu_relax();
@@ -208,12 +211,12 @@ static int reset_channel(struct device *dev, int ch)
}
/* set 36-bit addressing, done writeback enable and done IRQ enable */
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
- setbits32(priv->reg + TALITOS_CCCR_LO(ch),
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
TALITOS_CCCR_LO_IWSE);
return 0;
@@ -223,13 +226,19 @@ static int reset_device(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
+ u32 mcr = TALITOS_MCR_SWR;
- setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
+ setbits32(priv->reg + TALITOS_MCR, mcr);
while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
&& --timeout)
cpu_relax();
+ if (priv->irq[1]) {
+ mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
+ setbits32(priv->reg + TALITOS_MCR, mcr);
+ }
+
if (timeout == 0) {
dev_err(dev, "failed to reset device\n");
return -EIO;
@@ -327,8 +336,9 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
/* GO! */
wmb();
- out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
- out_be32(priv->reg + TALITOS_FF_LO(ch),
+ out_be32(priv->chan[ch].reg + TALITOS_FF,
+ upper_32_bits(request->dma_desc));
+ out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
lower_32_bits(request->dma_desc));
spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
@@ -397,21 +407,32 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/*
* process completed requests for channels that have done status
*/
-static void talitos_done(unsigned long data)
-{
- struct device *dev = (struct device *)data;
- struct talitos_private *priv = dev_get_drvdata(dev);
- int ch;
-
- for (ch = 0; ch < priv->num_channels; ch++)
- flush_channel(dev, ch, 0, 0);
-
- /* At this point, all completed channels have been processed.
- * Unmask done interrupts for channels completed later on.
- */
- setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
- setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
+#define DEF_TALITOS_DONE(name, ch_done_mask) \
+static void talitos_done_##name(unsigned long data) \
+{ \
+ struct device *dev = (struct device *)data; \
+ struct talitos_private *priv = dev_get_drvdata(dev); \
+ \
+ if (ch_done_mask & 1) \
+ flush_channel(dev, 0, 0, 0); \
+ if (priv->num_channels == 1) \
+ goto out; \
+ if (ch_done_mask & (1 << 2)) \
+ flush_channel(dev, 1, 0, 0); \
+ if (ch_done_mask & (1 << 4)) \
+ flush_channel(dev, 2, 0, 0); \
+ if (ch_done_mask & (1 << 6)) \
+ flush_channel(dev, 3, 0, 0); \
+ \
+out: \
+ /* At this point, all completed channels have been processed */ \
+ /* Unmask done interrupts for channels completed later on. */ \
+ setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
+ setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
}
+DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
+DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
+DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
/*
* locate current (offending) descriptor
@@ -422,7 +443,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
int tail = priv->chan[ch].tail;
dma_addr_t cur_desc;
- cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
+ cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
@@ -444,7 +465,7 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
int i;
if (!desc_hdr)
- desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
+ desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
@@ -506,16 +527,15 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
for (i = 0; i < 8; i++)
dev_err(dev, "DESCBUF 0x%08x_%08x\n",
- in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
- in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
+ in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
+ in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
}
/*
* recover from error interrupts
*/
-static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
+static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
{
- struct device *dev = (struct device *)data;
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
int ch, error, reset_dev = 0, reset_ch = 0;
@@ -528,8 +548,8 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
error = -EINVAL;
- v = in_be32(priv->reg + TALITOS_CCPSR(ch));
- v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
+ v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
+ v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
if (v_lo & TALITOS_CCPSR_LO_DOF) {
dev_err(dev, "double fetch fifo overflow error\n");
@@ -567,10 +587,10 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (reset_ch) {
reset_channel(dev, ch);
} else {
- setbits32(priv->reg + TALITOS_CCCR(ch),
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR,
TALITOS_CCCR_CONT);
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
- while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
+ while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
TALITOS_CCCR_CONT) && --timeout)
cpu_relax();
if (timeout == 0) {
@@ -580,7 +600,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
}
}
}
- if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
+ if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
dev_err(dev, "done overflow, internal time out, or rngu error: "
"ISR 0x%08x_%08x\n", isr, isr_lo);
@@ -593,30 +613,35 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
}
}
-static irqreturn_t talitos_interrupt(int irq, void *data)
-{
- struct device *dev = data;
- struct talitos_private *priv = dev_get_drvdata(dev);
- u32 isr, isr_lo;
-
- isr = in_be32(priv->reg + TALITOS_ISR);
- isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
- /* Acknowledge interrupt */
- out_be32(priv->reg + TALITOS_ICR, isr);
- out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
-
- if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
- talitos_error((unsigned long)data, isr, isr_lo);
- else
- if (likely(isr & TALITOS_ISR_CHDONE)) {
- /* mask further done interrupts. */
- clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
- /* done_task will unmask done interrupts at exit */
- tasklet_schedule(&priv->done_task);
- }
-
- return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
+#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
+static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
+{ \
+ struct device *dev = data; \
+ struct talitos_private *priv = dev_get_drvdata(dev); \
+ u32 isr, isr_lo; \
+ \
+ isr = in_be32(priv->reg + TALITOS_ISR); \
+ isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
+ /* Acknowledge interrupt */ \
+ out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
+ out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
+ \
+ if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \
+ talitos_error(dev, isr, isr_lo); \
+ else \
+ if (likely(isr & ch_done_mask)) { \
+ /* mask further done interrupts. */ \
+ clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
+ /* done_task will unmask done interrupts at exit */ \
+ tasklet_schedule(&priv->done_task[tlet]); \
+ } \
+ \
+ return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
+ IRQ_NONE; \
}
+DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
+DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
+DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
/*
* hwrng
@@ -1874,6 +1899,97 @@ static int ahash_digest(struct ahash_request *areq)
return ahash_process_req(areq, areq->nbytes);
}
+struct keyhash_result {
+ struct completion completion;
+ int err;
+};
+
+static void keyhash_complete(struct crypto_async_request *req, int err)
+{
+ struct keyhash_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
+ u8 *hash)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+ struct scatterlist sg[1];
+ struct ahash_request *req;
+ struct keyhash_result hresult;
+ int ret;
+
+ init_completion(&hresult.completion);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /* Keep tfm keylen == 0 during hash of the long key */
+ ctx->keylen = 0;
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ keyhash_complete, &hresult);
+
+ sg_init_one(&sg[0], key, keylen);
+
+ ahash_request_set_crypt(req, sg, hash, keylen);
+ ret = crypto_ahash_digest(req);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &hresult.completion);
+ if (!ret)
+ ret = hresult.err;
+ break;
+ default:
+ break;
+ }
+ ahash_request_free(req);
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ unsigned int keysize = keylen;
+ u8 hash[SHA512_DIGEST_SIZE];
+ int ret;
+
+ if (keylen <= blocksize)
+ memcpy(ctx->key, key, keysize);
+ else {
+ /* Must get the hash of the long key */
+ ret = keyhash(tfm, key, keylen, hash);
+
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ keysize = digestsize;
+ memcpy(ctx->key, hash, digestsize);
+ }
+
+ ctx->keylen = keysize;
+
+ return 0;
+}
+
+
struct talitos_alg_template {
u32 type;
union {
@@ -2217,6 +2333,138 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_MDEUB |
DESC_HDR_MODE0_MDEUB_SHA512,
},
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "hmac-md5-talitos",
+ .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_MD5,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-talitos",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA1,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac-sha224-talitos",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA224,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-talitos",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac-sha384-talitos",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA384,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac-sha512-talitos",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA512,
+ }
};
struct talitos_crypto_alg {
@@ -2331,12 +2579,15 @@ static int talitos_remove(struct platform_device *ofdev)
kfree(priv->chan);
- if (priv->irq != NO_IRQ) {
- free_irq(priv->irq, dev);
- irq_dispose_mapping(priv->irq);
- }
+ for (i = 0; i < 2; i++)
+ if (priv->irq[i]) {
+ free_irq(priv->irq[i], dev);
+ irq_dispose_mapping(priv->irq[i]);
+ }
- tasklet_kill(&priv->done_task);
+ tasklet_kill(&priv->done_task[0]);
+ if (priv->irq[1])
+ tasklet_kill(&priv->done_task[1]);
iounmap(priv->reg);
@@ -2373,8 +2624,14 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
+ if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
+ !strncmp(alg->cra_name, "hmac", 4)) {
+ kfree(t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
- !strcmp(alg->cra_name, "sha224")) {
+ (!strcmp(alg->cra_name, "sha224") ||
+ !strcmp(alg->cra_name, "hmac(sha224)"))) {
t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
t_alg->algt.desc_hdr_template =
DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2397,6 +2654,54 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return t_alg;
}
+static int talitos_probe_irq(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ int err;
+
+ priv->irq[0] = irq_of_parse_and_map(np, 0);
+ if (!priv->irq[0]) {
+ dev_err(dev, "failed to map irq\n");
+ return -EINVAL;
+ }
+
+ priv->irq[1] = irq_of_parse_and_map(np, 1);
+
+ /* get the primary irq line */
+ if (!priv->irq[1]) {
+ err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
+ dev_driver_string(dev), dev);
+ goto primary_out;
+ }
+
+ err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
+ dev_driver_string(dev), dev);
+ if (err)
+ goto primary_out;
+
+ /* get the secondary irq line */
+ err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
+ dev_driver_string(dev), dev);
+ if (err) {
+ dev_err(dev, "failed to request secondary irq\n");
+ irq_dispose_mapping(priv->irq[1]);
+ priv->irq[1] = 0;
+ }
+
+ return err;
+
+primary_out:
+ if (err) {
+ dev_err(dev, "failed to request primary irq\n");
+ irq_dispose_mapping(priv->irq[0]);
+ priv->irq[0] = 0;
+ }
+
+ return err;
+}
+
static int talitos_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@@ -2413,28 +2718,22 @@ static int talitos_probe(struct platform_device *ofdev)
priv->ofdev = ofdev;
- tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
-
- INIT_LIST_HEAD(&priv->alg_list);
-
- priv->irq = irq_of_parse_and_map(np, 0);
-
- if (priv->irq == NO_IRQ) {
- dev_err(dev, "failed to map irq\n");
- err = -EINVAL;
+ err = talitos_probe_irq(ofdev);
+ if (err)
goto err_out;
- }
- /* get the irq line */
- err = request_irq(priv->irq, talitos_interrupt, 0,
- dev_driver_string(dev), dev);
- if (err) {
- dev_err(dev, "failed to request irq %d\n", priv->irq);
- irq_dispose_mapping(priv->irq);
- priv->irq = NO_IRQ;
- goto err_out;
+ if (!priv->irq[1]) {
+ tasklet_init(&priv->done_task[0], talitos_done_4ch,
+ (unsigned long)dev);
+ } else {
+ tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
+ (unsigned long)dev);
+ tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
+ (unsigned long)dev);
}
+ INIT_LIST_HEAD(&priv->alg_list);
+
priv->reg = of_iomap(np, 0);
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
@@ -2471,7 +2770,8 @@ static int talitos_probe(struct platform_device *ofdev)
if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
- TALITOS_FTR_SHA224_HWINIT;
+ TALITOS_FTR_SHA224_HWINIT |
+ TALITOS_FTR_HMAC_OK;
priv->chan = kzalloc(sizeof(struct talitos_channel) *
priv->num_channels, GFP_KERNEL);
@@ -2482,6 +2782,12 @@ static int talitos_probe(struct platform_device *ofdev)
}
for (i = 0; i < priv->num_channels; i++) {
+ priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
+ if (!priv->irq[1] || !(i & 1))
+ priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
+ }
+
+ for (i = 0; i < priv->num_channels; i++) {
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
}
@@ -2530,6 +2836,8 @@ static int talitos_probe(struct platform_device *ofdev)
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
+ if (err == -ENOTSUPP)
+ continue;
goto err_out;
}
@@ -2551,12 +2859,13 @@ static int talitos_probe(struct platform_device *ofdev)
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
- } else {
+ } else
list_add_tail(&t_alg->entry, &priv->alg_list);
- dev_info(dev, "%s\n", name);
- }
}
}
+ if (!list_empty(&priv->alg_list))
+ dev_info(dev, "%s algorithms registered in /proc/crypto\n",
+ (char *)of_get_property(np, "compatible", NULL));
return 0;
@@ -2584,17 +2893,7 @@ static struct platform_driver talitos_driver = {
.remove = talitos_remove,
};
-static int __init talitos_init(void)
-{
- return platform_driver_register(&talitos_driver);
-}
-module_init(talitos_init);
-
-static void __exit talitos_exit(void)
-{
- platform_driver_unregister(&talitos_driver);
-}
-module_exit(talitos_exit);
+module_platform_driver(talitos_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 0b746aca4587..3c173954ef29 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
- * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,28 +34,37 @@
/* global register offset addresses */
#define TALITOS_MCR 0x1030 /* master control register */
-#define TALITOS_MCR_LO 0x1038
+#define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */
+#define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */
+#define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */
+#define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */
#define TALITOS_MCR_SWR 0x1 /* s/w reset */
+#define TALITOS_MCR_LO 0x1034
#define TALITOS_IMR 0x1008 /* interrupt mask register */
#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */
#define TALITOS_IMR_DONE 0x00055 /* done IRQs */
#define TALITOS_IMR_LO 0x100C
#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
#define TALITOS_ISR 0x1010 /* interrupt status register */
-#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
-#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
+#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */
+#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */
+#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */
+#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */
+#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */
+#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */
#define TALITOS_ISR_LO 0x1014
#define TALITOS_ICR 0x1018 /* interrupt clear register */
#define TALITOS_ICR_LO 0x101C
/* channel register address stride */
+#define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */
#define TALITOS_CH_STRIDE 0x100
/* channel configuration register */
-#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
+#define TALITOS_CCCR 0x8
#define TALITOS_CCCR_CONT 0x2 /* channel continue */
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
-#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
+#define TALITOS_CCCR_LO 0xc
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
@@ -63,8 +72,8 @@
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
/* CCPSR: channel pointer status register */
-#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
-#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
+#define TALITOS_CCPSR 0x10
+#define TALITOS_CCPSR_LO 0x14
#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
@@ -79,24 +88,24 @@
#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
/* channel fetch fifo register */
-#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
-#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
+#define TALITOS_FF 0x48
+#define TALITOS_FF_LO 0x4c
/* current descriptor pointer register */
-#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
-#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
+#define TALITOS_CDPR 0x40
+#define TALITOS_CDPR_LO 0x44
/* descriptor buffer register */
-#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
-#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
+#define TALITOS_DESCBUF 0x80
+#define TALITOS_DESCBUF_LO 0x84
/* gather link table */
-#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
-#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
+#define TALITOS_GATHER 0xc0
+#define TALITOS_GATHER_LO 0xc4
/* scatter link table */
-#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
-#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
+#define TALITOS_SCATTER 0xe0
+#define TALITOS_SCATTER_LO 0xe4
/* execution unit interrupt status registers */
#define TALITOS_DEUISR 0x2030 /* DES unit */
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f0491037080..464fa2147dfb 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
comment "DEVFREQ Drivers"
+config ARM_EXYNOS4_BUS_DEVFREQ
+ bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
+ depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+ select ARCH_HAS_OPP
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
+ and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
+ It reads PPMU counters of memory controllers and adjusts
+ the operating frequencies and voltages with OPP support.
+ To operate with optimal voltages, ASV support is required
+ (CONFIG_EXYNOS_ASV).
+
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89e970a..8c464234f7e7 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9cb8c5..c189b82f5ece 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (!IS_ERR(devfreq)) {
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
err = -EINVAL;
- goto out;
+ goto err_out;
}
}
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
dev_err(dev, "%s: Unable to create devfreq for the device\n",
__func__);
err = -ENOMEM;
- goto out;
+ goto err_out;
}
mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->next_polling);
}
mutex_unlock(&devfreq_list_lock);
- goto out;
+out:
+ return devfreq;
+
err_init:
device_unregister(&devfreq->dev);
err_dev:
mutex_unlock(&devfreq->lock);
kfree(devfreq);
-out:
- if (err)
- return ERR_PTR(err);
- else
- return devfreq;
+err_out:
+ return ERR_PTR(err);
}
/**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 000000000000..6460577d6701
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
+/* drivers/devfreq/exynos4210_memorybus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ * This version supports EXYNOS4210 only. This changes bus frequencies
+ * and vddint voltages. Exynos4412/4212 should be able to be supported
+ * with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+#ifdef CONFIG_EXYNOS_ASV
+extern unsigned int exynos_result_of_asv;
+#endif
+
+#include <mach/regs-clock.h>
+
+#include <plat/map-s5p.h>
+
+#define MAX_SAFEVOLT 1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+ TYPE_BUSF_EXYNOS4210,
+ TYPE_BUSF_EXYNOS4x12,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% */
+#define BUS_SATURATION_RATIO 40
+
+enum ppmu_counter {
+ PPMU_PMNCNT0 = 0,
+ PPMU_PMCCNT1,
+ PPMU_PMNCNT2,
+ PPMU_PMNCNT3,
+ PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu {
+ void __iomem *hw_base;
+ unsigned int ccnt;
+ unsigned int event;
+ unsigned int count[PPMU_PMNCNT_MAX];
+ bool ccnt_overflow;
+ bool count_overflow[PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+ LV_0 = 0,
+ LV_1,
+ LV_2,
+ LV_3,
+ LV_4,
+ _LV_END
+};
+#define EX4210_LV_MAX LV_2
+#define EX4x12_LV_MAX LV_4
+#define EX4210_LV_NUM (LV_2 + 1)
+#define EX4x12_LV_NUM (LV_4 + 1)
+
+struct busfreq_data {
+ enum exynos4_busf_type type;
+ struct device *dev;
+ struct devfreq *devfreq;
+ bool disabled;
+ struct regulator *vdd_int;
+ struct regulator *vdd_mif; /* Exynos4412/4212 only */
+ struct opp *curr_opp;
+ struct exynos4_ppmu dmc[2];
+
+ struct notifier_block pm_notifier;
+ struct mutex lock;
+
+ /* Dividers calculated at boot/probe-time */
+ unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+ unsigned int top_divtable[_LV_END];
+};
+
+struct bus_opp_table {
+ unsigned int idx;
+ unsigned long clk;
+ unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+ {LV_0, 400000, 1150000},
+ {LV_1, 267000, 1050000},
+ {LV_2, 133000, 1025000},
+ {0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+ {LV_0, 400000, 1100000},
+ {LV_1, 267000, 1000000},
+ {LV_2, 160000, 950000},
+ {LV_3, 133000, 950000},
+ {LV_4, 100000, 950000},
+ {0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+ {LV_0, 200000, 1000000},
+ {LV_1, 160000, 950000},
+ {LV_2, 133000, 925000},
+ {LV_3, 100000, 900000},
+ {0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+ {1150000, 1050000, 1050000},
+ {1125000, 1025000, 1025000},
+ {1100000, 1000000, 1000000},
+ {1075000, 975000, 975000},
+ {1050000, 950000, 950000},
+};
+
+static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
+ /* 400 267 160 133 100 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+ {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
+ {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
+ {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
+ /* 200 160 133 100 */
+ {1000000, 950000, 925000, 900000}, /* ASV0 */
+ {975000, 925000, 925000, 900000}, /* ASV1 */
+ {950000, 925000, 900000, 875000}, /* ASV2 */
+ {950000, 900000, 900000, 875000}, /* ASV3 */
+ {925000, 875000, 875000, 875000}, /* ASV4 */
+ {900000, 850000, 850000, 850000}, /* ASV5 */
+ {900000, 850000, 850000, 850000}, /* ASV6 */
+ {900000, 850000, 850000, 850000}, /* ASV7 */
+ {900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+ */
+
+ /* DMC L0: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+ /* DMC L1: 266.7MHz */
+ { 4, 1, 1, 2, 1, 1, 3, 1 },
+ /* DMC L2: 133MHz */
+ { 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+ */
+ /* ACLK200 L0: 200MHz */
+ { 3, 7, 4, 5, 1 },
+ /* ACLK200 L1: 160MHz */
+ { 4, 7, 5, 6, 1 },
+ /* ACLK200 L2: 133MHz */
+ { 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+ /* ACLK_GDL/R L1: 200MHz */
+ { 3, 1 },
+ /* ACLK_GDL/R L2: 160MHz */
+ { 4, 1 },
+ /* ACLK_GDL/R L3: 133MHz */
+ { 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP}
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1, 1, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 1, 1, 2, 1, 1},
+ /* DMC L2: 160MHz */
+ {5, 1, 1, 4, 1, 1},
+ /* DMC L3: 133MHz */
+ {5, 1, 1, 5, 1, 1},
+ /* DMC L4: 100MHz */
+ {7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
+ /*
+ * Clock divider value for following
+ * { G2DACP, DIVC2C, DIVC2C_ACLK }
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 2, 1},
+ /* DMC L2: 160MHz */
+ {5, 4, 1},
+ /* DMC L3: 133MHz */
+ {5, 5, 1},
+ /* DMC L4: 100MHz */
+ {7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+ DIVACLK133, DIVONENAND }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {5, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+ /*
+ * Clock divider value for following
+ * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+ */
+
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 160MHz */
+ {4, 4, 5},
+ /* SCLK_MFC: 133MHz */
+ {5, 5, 5},
+ /* SCLK_MFC: 100MHz */
+ {7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4210_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ break;
+
+ if (index == EX4210_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - TOP */
+ tmp = data->top_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4x12_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ break;
+
+ if (index == EX4x12_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - DMC1 */
+ tmp = __raw_readl(S5P_CLKDIV_DMC1);
+
+ tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
+ S5P_CLKDIV_DMC1_C2C_MASK |
+ S5P_CLKDIV_DMC1_C2CACLK_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+ S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][1] <<
+ S5P_CLKDIV_DMC1_C2C_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][2] <<
+ S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+ } while (tmp & 0x111111);
+
+ /* Change Divider - TOP */
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+ S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+ (exynos4x12_clkdiv_top[index][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4x12_clkdiv_top[index][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4x12_clkdiv_top[index][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4x12_clkdiv_top[index][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - MFC */
+ tmp = __raw_readl(S5P_CLKDIV_MFC);
+
+ tmp &= ~(S5P_CLKDIV_MFC_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+ S5P_CLKDIV_MFC_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_MFC);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+ } while (tmp & 0x1);
+
+ /* Change Divider - JPEG */
+ tmp = __raw_readl(S5P_CLKDIV_CAM1);
+
+ tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+ S5P_CLKDIV_CAM1_JPEG_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1);
+
+ /* Change Divider - FIMC0~3 */
+ tmp = __raw_readl(S5P_CLKDIV_CAM);
+
+ tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
+ S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC3_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1111);
+
+ return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+ /* Reset PPMU */
+ __raw_writel(0x8000000f, ppmu_base + 0xf010);
+ __raw_writel(0x8000000f, ppmu_base + 0xf050);
+ __raw_writel(0x6, ppmu_base + 0xf000);
+ __raw_writel(0x0, ppmu_base + 0xf100);
+
+ /* Set PPMU Event */
+ data->dmc[i].event = 0x6;
+ __raw_writel(((data->dmc[i].event << 12) | 0x1),
+ ppmu_base + 0xfc);
+
+ /* Start PPMU */
+ __raw_writel(0x1, ppmu_base + 0xf000);
+ }
+}
+
+static void exynos4_read_ppmu(struct busfreq_data *data)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+ u32 overflow;
+
+ /* Stop PPMU */
+ __raw_writel(0x0, ppmu_base + 0xf000);
+
+ /* Update local data from PPMU */
+ overflow = __raw_readl(ppmu_base + 0xf050);
+
+ data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+ data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+ for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+ data->dmc[i].count[j] = __raw_readl(
+ ppmu_base + (0xf110 + (0x10 * j)));
+ data->dmc[i].count_overflow[j] = overflow & (1 << j);
+ }
+ }
+
+ busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+ int i = 0;
+
+ while (exynos4x12_intclk_table[i].clk) {
+ if (exynos4x12_intclk_table[i].clk <= mifclk)
+ return i;
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+ struct opp *oldopp)
+{
+ int err = 0, tmp;
+ unsigned long volt = opp_get_voltage(opp);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ /* OPP represents DMC clock + INT voltage */
+ err = regulator_set_voltage(data->vdd_int, volt,
+ MAX_SAFEVOLT);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ /* OPP represents MIF clock + MIF voltage */
+ err = regulator_set_voltage(data->vdd_mif, volt,
+ MAX_SAFEVOLT);
+ if (err)
+ break;
+
+ tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ if (tmp < 0) {
+ err = tmp;
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ }
+ err = regulator_set_voltage(data->vdd_int,
+ exynos4x12_intclk_table[tmp].volt,
+ MAX_SAFEVOLT);
+ /* Try to recover */
+ if (err)
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
+{
+ int err = 0;
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ struct opp *opp = devfreq_recommended_opp(dev, _freq);
+ unsigned long old_freq = opp_get_freq(data->curr_opp);
+ unsigned long freq = opp_get_freq(opp);
+
+ if (old_freq == freq)
+ return 0;
+
+ dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+
+ mutex_lock(&data->lock);
+
+ if (data->disabled)
+ goto out;
+
+ if (old_freq < freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ if (old_freq != freq) {
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ goto out;
+
+ if (old_freq > freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ data->curr_opp = opp;
+out:
+ mutex_unlock(&data->lock);
+ return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+ u64 p0 = data->dmc[0].count[0];
+ u64 p1 = data->dmc[1].count[0];
+
+ p0 *= data->dmc[1].ccnt;
+ p1 *= data->dmc[0].ccnt;
+
+ if (data->dmc[1].ccnt == 0)
+ return 0;
+
+ if (p0 > p1)
+ return 0;
+ return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ int busier_dmc;
+ int cycles_x2 = 2; /* 2 x cycles */
+ void __iomem *addr;
+ u32 timing;
+ u32 memctrl;
+
+ exynos4_read_ppmu(data);
+ busier_dmc = exynos4_get_busier_dmc(data);
+ stat->current_frequency = opp_get_freq(data->curr_opp);
+
+ if (busier_dmc)
+ addr = S5P_VA_DMC1;
+ else
+ addr = S5P_VA_DMC0;
+
+ memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+ timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+ switch ((memctrl >> 8) & 0xf) {
+ case 0x4: /* DDR2 */
+ cycles_x2 = ((timing >> 16) & 0xf) * 2;
+ break;
+ case 0x5: /* LPDDR2 */
+ case 0x6: /* DDR3 */
+ cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+ break;
+ default:
+ pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+ (memctrl >> 8) & 0xf);
+ return -EINVAL;
+ }
+
+ /* Number of cycles spent on memory access */
+ stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+ stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+ stat->total_time = data->dmc[busier_dmc].ccnt;
+
+ /* If the counters have overflown, retry */
+ if (data->dmc[busier_dmc].ccnt_overflow ||
+ data->dmc[busier_dmc].count_overflow[0])
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+ .initial_freq = 400000,
+ .polling_ms = 50,
+ .target = exynos4_bus_target,
+ .get_dev_status = exynos4_bus_get_dev_status,
+ .exit = exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+ u32 tmp;
+ int mgrp;
+ int i, err = 0;
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK |
+ S5P_CLKDIV_DMC0_COPY2_MASK |
+ S5P_CLKDIV_DMC0_CORETI_MASK);
+
+ tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][6] <<
+ S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][7] <<
+ S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4210_clkdiv_top[i][0] <<
+ S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+ (exynos4210_clkdiv_top[i][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4210_clkdiv_top[i][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4210_clkdiv_top[i][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4210_clkdiv_top[i][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ data->top_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+ /* Use merged grouping for voltage */
+ switch (tmp) {
+ case 0:
+ mgrp = 0;
+ break;
+ case 1:
+ case 2:
+ mgrp = 1;
+ break;
+ case 3:
+ case 4:
+ mgrp = 2;
+ break;
+ case 5:
+ case 6:
+ mgrp = 3;
+ break;
+ case 7:
+ mgrp = 4;
+ break;
+ default:
+ pr_warn("Unknown ASV Group. Use max voltage.\n");
+ mgrp = 0;
+ }
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++)
+ exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ exynos4210_busclk_table[i].volt);
+ if (err) {
+ dev_err(data->dev, "Cannot add opp entries.\n");
+ return err;
+ }
+ }
+
+
+ return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+ unsigned int i;
+ unsigned int tmp;
+ int ret;
+
+ /* Enable pause function for DREX2 DVFS */
+ tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
+ tmp |= DMC_PAUSE_ENABLE;
+ __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ if (tmp > 8)
+ tmp = 0;
+ pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ exynos4x12_mifclk_table[i].volt =
+ exynos4x12_mif_step_50[tmp][i];
+ exynos4x12_intclk_table[i].volt =
+ exynos4x12_int_volt[tmp][i];
+ }
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ exynos4x12_mifclk_table[i].volt);
+ if (ret) {
+ dev_err(data->dev, "Fail to add opp entries.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct busfreq_data *data = container_of(this, struct busfreq_data,
+ pm_notifier);
+ struct opp *opp;
+ unsigned long maxfreq = ULONG_MAX;
+ int err = 0;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /* Set Fastest and Deactivate DVFS */
+ mutex_lock(&data->lock);
+
+ data->disabled = true;
+
+ opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto unlock;
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto unlock;
+
+ data->curr_opp = opp;
+unlock:
+ mutex_unlock(&data->lock);
+ if (err)
+ return err;
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ /* Reactivate */
+ mutex_lock(&data->lock);
+ data->disabled = false;
+ mutex_unlock(&data->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+ struct busfreq_data *data;
+ struct opp *opp;
+ struct device *dev = &pdev->dev;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ data->type = pdev->id_entry->driver_data;
+ data->dmc[0].hw_base = S5P_VA_DMC0;
+ data->dmc[1].hw_base = S5P_VA_DMC1;
+ data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+ data->dev = dev;
+ mutex_init(&data->lock);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_init_tables(data);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_init_tables(data);
+ break;
+ default:
+ dev_err(dev, "Cannot determine the device id %d\n", data->type);
+ err = -EINVAL;
+ }
+ if (err)
+ goto err_regulator;
+
+ data->vdd_int = regulator_get(dev, "vdd_int");
+ if (IS_ERR(data->vdd_int)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+ err = PTR_ERR(data->vdd_int);
+ goto err_regulator;
+ }
+ if (data->type == TYPE_BUSF_EXYNOS4x12) {
+ data->vdd_mif = regulator_get(dev, "vdd_mif");
+ if (IS_ERR(data->vdd_mif)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+ err = PTR_ERR(data->vdd_mif);
+ regulator_put(data->vdd_int);
+ goto err_regulator;
+
+ }
+ }
+
+ opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+ exynos4_devfreq_profile.initial_freq);
+ err = PTR_ERR(opp);
+ goto err_opp_add;
+ }
+ data->curr_opp = opp;
+
+ platform_set_drvdata(pdev, data);
+
+ busfreq_mon_reset(data);
+
+ data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+ &devfreq_simple_ondemand, NULL);
+ if (IS_ERR(data->devfreq)) {
+ err = PTR_ERR(data->devfreq);
+ goto err_opp_add;
+ }
+
+ devfreq_register_opp_notifier(dev, data->devfreq);
+
+ err = register_pm_notifier(&data->pm_notifier);
+ if (err) {
+ dev_err(dev, "Failed to setup pm notifier\n");
+ goto err_devfreq_add;
+ }
+
+ return 0;
+err_devfreq_add:
+ devfreq_remove_device(data->devfreq);
+err_opp_add:
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ regulator_put(data->vdd_int);
+err_regulator:
+ kfree(data);
+ return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ unregister_pm_notifier(&data->pm_notifier);
+ devfreq_remove_device(data->devfreq);
+ regulator_put(data->vdd_int);
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ kfree(data);
+
+ return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ busfreq_mon_reset(data);
+ return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+ .resume = exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+ { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+ { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+ .probe = exynos4_busfreq_probe,
+ .remove = __devexit_p(exynos4_busfreq_remove),
+ .id_table = exynos4_busfreq_id,
+ .driver = {
+ .name = "exynos4-busfreq",
+ .owner = THIS_MODULE,
+ .pm = &exynos4_busfreq_pm,
+ },
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+ return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+ platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ab8f469f5cf8..f1a274994bb1 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on ARCH_MX3
+ depends on ARCH_MXC
select DMA_ENGINE
default y
help
@@ -187,6 +187,13 @@ config TIMB_DMA
help
Enable support for the Timberdale FPGA DMA engine.
+config SIRF_DMA
+ tristate "CSR SiRFprimaII DMA support"
+ depends on ARCH_PRIMA2
+ select DMA_ENGINE
+ help
+ Enable support for the CSR SiRFprimaII DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
@@ -201,26 +208,26 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+ tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on ARCH_MXC
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51 chips.
+ Freescale i.MX25/31/35/51/53 chips.
config IMX_DMA
tristate "i.MX DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1f0c5c..009a222e8283 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b7cbd1ab1db1..8a281584458b 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
int ret;
/* Check if we already have a channel */
- if (plchan->phychan)
- return 0;
+ if (plchan->phychan) {
+ ch = plchan->phychan;
+ goto got_channel;
+ }
ch = pl08x_get_phy_channel(pl08x, plchan);
if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
-
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_TO_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_FROM_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
+ plchan->phychan = ch;
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
ch->id,
ch->signal,
plchan->name);
+got_channel:
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_MEM_TO_DEV)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_DEV_TO_MEM)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
plchan->phychan_hold++;
- plchan->phychan = ch;
return 0;
}
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
- if (config->direction == DMA_TO_DEVICE) {
+ if (config->direction == DMA_MEM_TO_DEV) {
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_FROM_DEVICE) {
+ } else if (config->direction == DMA_DEV_TO_MEM) {
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
addr_width,
maxburst,
cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
txd->direction = direction;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
txd->cctl = plchan->dst_cctl;
slave_addr = plchan->dst_addr;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
txd->cctl = plchan->src_cctl;
slave_addr = plchan->src_addr;
} else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
if (plchan->cd->device_fc)
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
list_add_tail(&dsg->node, &txd->dsg_list);
dsg->len = sg_dma_len(sg);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
dsg->src_addr = sg_phys(sg);
dsg->dst_addr = slave_addr;
} else {
@@ -2054,6 +2057,8 @@ static struct amba_id pl08x_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl08x_ids);
+
static struct amba_driver pl08x_amba_driver = {
.drv.name = DRIVER_NAME,
.id_table = pl08x_ids,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fcfa0a8b5c59..97f87b29b9f3 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "at_hdmac_regs.h"
@@ -660,7 +662,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
sg_len,
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
flags);
if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb = ATC_IEN;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
total_len += len;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ err_desc_get:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
goto err_out;
return 0;
@@ -810,7 +812,7 @@ err_out:
static int
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
u32 ctrla;
unsigned int reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| period_len >> reg_width;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.saddr = buf_addr + (period_len * period_index);
desc->lli.daddr = atslave->tx_reg;
desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| ATC_DIF(AT_DMA_PER_IF);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.saddr = atslave->rx_reg;
desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int i;
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
buf_addr,
periods, buf_len, period_len);
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*-- Module Management -----------------------------------------------*/
+/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
+static struct at_dma_platform_data at91sam9rl_config = {
+ .nr_channels = 2,
+};
+static struct at_dma_platform_data at91sam9g45_config = {
+ .nr_channels = 8,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_dma_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9rl-dma",
+ .data = &at91sam9rl_config,
+ }, {
+ .compatible = "atmel,at91sam9g45-dma",
+ .data = &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
+#endif
+
+static const struct platform_device_id atdma_devtypes[] = {
+ {
+ .name = "at91sam9rl_dma",
+ .driver_data = (unsigned long) &at91sam9rl_config,
+ }, {
+ .name = "at91sam9g45_dma",
+ .driver_data = (unsigned long) &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+ struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
+ if (match == NULL)
+ return NULL;
+ return match->data;
+ }
+ return (struct at_dma_platform_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
/**
* at_dma_off - disable DMA controller
* @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
static int __init at_dma_probe(struct platform_device *pdev)
{
- struct at_dma_platform_data *pdata;
struct resource *io;
struct at_dma *atdma;
size_t size;
int irq;
int err;
int i;
+ struct at_dma_platform_data *plat_dat;
- /* get DMA Controller parameters from platform */
- pdata = pdev->dev.platform_data;
- if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
+ /* setup platform data for each SoC */
+ dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+ dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+
+ /* get DMA parameters from controller type */
+ plat_dat = at_dma_get_driver_data(pdev);
+ if (!plat_dat)
+ return -ENODEV;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
return irq;
size = sizeof(struct at_dma);
- size += pdata->nr_channels * sizeof(struct at_dma_chan);
+ size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
atdma = kzalloc(size, GFP_KERNEL);
if (!atdma)
return -ENOMEM;
- /* discover transaction capabilites from the platform data */
- atdma->dma_common.cap_mask = pdata->cap_mask;
- atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
+ /* discover transaction capabilities */
+ atdma->dma_common.cap_mask = plat_dat->cap_mask;
+ atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++) {
+ for (i = 0; i < plat_dat->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- pdata->nr_channels);
+ plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
static struct platform_driver at_dma_driver = {
.remove = __exit_p(at_dma_remove),
.shutdown = at_dma_shutdown,
+ .id_table = atdma_devtypes,
.driver = {
.name = "at_hdmac",
.pm = &at_dma_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_dma_dt_ids),
},
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index aa4c9aebab7c..dcaedfc181cf 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @chan_common: common dmaengine dma_device object members
+ * @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4234f416ef11..d65a718c0f9b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,7 @@ struct coh901318_desc {
struct scatterlist *sg;
unsigned int sg_len;
struct coh901318_lli *lli;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_last |= cohc->runtime_ctrl;
ctrl |= cohc->runtime_ctrl;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 9f7e0e6a7eea..6c0e2d4c6682 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -7,11 +7,10 @@
* Author: Per Friden <per.friden@stericsson.com>
*/
-#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/dmapool.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/dmapool.h>
#include <mach/coh901318.h>
#include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
int s = size;
dma_addr_t src;
dma_addr_t dst;
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
src = buf;
dst = dev_addr;
- } else if (dir == DMA_FROM_DEVICE) {
+ } else if (dir == DMA_DEV_TO_MEM) {
src = dev_addr;
dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli = coh901318_lli_next(lli);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
src += block_size;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
dst += block_size;
}
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sgl, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask)
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask)
{
int i;
struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
spin_lock(&pool->lock);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
dst = dev_addr;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
src = dev_addr;
else
goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
/* increment source address */
src = sg_phys(sg);
else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- if (dir == DMA_FROM_DEVICE)
+ if (dir == DMA_DEV_TO_MEM)
dst += elem_size;
else
src += elem_size;
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
index 7a5c80990e9e..abff3714fdda 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318_lli.h
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_data_direction dir);
+ enum dma_transfer_direction dir);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sg, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained,
u32 ctrl, u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask);
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask);
#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b499da..a6c6051ec858 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
+ BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+ !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d8641cf5c..2b8661b54eaf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
+static void dmatest_callback(void *arg)
{
- complete(completion);
+ struct dmatest_done *done = arg;
+
+ done->done = true;
+ wake_up_all(done->wait);
}
/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
*/
static int dmatest_func(void *data)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
+ struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
const char *thread_name;
unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
- set_freezable_with_signal();
+ set_freezable();
ret = -ENOMEM;
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
- struct completion cmp;
- unsigned long start, tmo, end = 0 /* compiler... */;
- bool reload = true;
u8 align = 0;
total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
continue;
}
- init_completion(&cmp);
+ done.done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &cmp;
+ tx->callback_param = &done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- do {
- start = jiffies;
- if (reload)
- end = start + msecs_to_jiffies(timeout);
- else if (end <= start)
- end = start + 1;
- tmo = wait_for_completion_interruptible_timeout(&cmp,
- end - start);
- reload = try_to_freeze();
- } while (tmo == -ERESTARTSYS);
+ wait_event_freezable_timeout(done_wait, done.done,
+ msecs_to_jiffies(timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (tmo == 0) {
+ if (!done.done) {
+ /*
+ * We're leaving the timed out dma operation with
+ * dangling pointer to done_wait. To make this
+ * correct, we'll need to allocate wait_done for
+ * each test iteration and perform "who's gonna
+ * free it this time?" dancing. For now, just
+ * leave it dangling.
+ */
pr_warning("%s: #%u: test timed out\n",
thread_name, total_tests - 1);
failed_tests++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9bfd6d360718..9b592b02b5f4 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
return cookie;
}
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ if (dwc->initialized == true)
+ return;
+
+ if (dws) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+ cfghi = dws->cfg_hi;
+ cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ }
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ dwc->initialized = true;
+}
+
/*----------------------------------------------------------------------*/
/* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
return;
}
+ dwc_initialize(dwc);
+
channel_writel(dwc, LLP, first->txd.phys);
channel_writel(dwc, CTL_LO,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ err_desc_get:
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev = first = NULL;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
goto slave_sg_todev_fill_desc;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
- struct dw_dma_slave *dws;
int i;
- u32 cfghi;
- u32 cfglo;
unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dwc->completed = chan->cookie = 1;
- cfghi = DWC_CFGH_FIFO_MODE;
- cfglo = 0;
-
- dws = chan->private;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi = dws->cfg_hi;
- cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
- }
-
- cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-
/*
* NOTE: some controllers may have additional features that we
* need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
i = ++dwc->descs_allocated;
}
- /* Enable interrupts */
- channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+ dwc->initialized = false;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
*/
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_cyclic_desc *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err_desc_get;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
| DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
static void dw_dma_off(struct dw_dma *dw)
{
+ int i;
+
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
+
+ for (i = 0; i < dw->dma.chancnt; i++)
+ dw->chan[i].initialized = false;
}
static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
dw_dma_off(platform_get_drvdata(pdev));
clk_disable(dw->clk);
+
return 0;
}
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index c3419518d701..5eef6946a367 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -140,6 +140,7 @@ struct dw_dma_chan {
u8 mask;
u8 priority;
bool paused;
+ bool initialized;
spinlock_t lock;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index b47e2b803faf..59e7a965772b 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
+ if (list_empty(&edmac->active))
+ return NULL;
+
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
}
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
*/
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
{
+ struct ep93xx_dma_desc *desc;
+
list_rotate_left(&edmac->active);
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
return true;
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc)
+ return false;
+
/*
* If txd.cookie is set it means that we are back in the first
* descriptor in the chain and hence done with it.
*/
- return !ep93xx_dma_get_active(edmac)->txd.cookie;
+ return !desc->txd.cookie;
}
/*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
u32 bus_addr;
- if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+ return;
+ }
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
bus_addr = desc->src_addr;
else
bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- control |= M2M_CONTROL_NO_HDSK;
- control |= M2M_CONTROL_RSS_IDE;
- control |= M2M_CONTROL_PW_16;
-
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_SAH;
control |= M2M_CONTROL_TM_RX;
}
+
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
break;
default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
+
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+ return;
+ }
if (edmac->buffer == 0) {
writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback;
- void *callback_param;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
LIST_HEAD(list);
spin_lock_irq(&edmac->lock);
+ /*
+ * If dma_terminate_all() was called before we get to run, the active
+ * list has become empty. If that happens we aren't supposed to do
+ * anything more than call ep93xx_dma_advance_work().
+ */
desc = ep93xx_dma_get_active(edmac);
- if (desc->complete) {
- edmac->last_completed = desc->txd.cookie;
- list_splice_init(&edmac->active, &list);
+ if (desc) {
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
}
spin_unlock_irq(&edmac->lock);
/* Pick up the next descriptor from the queue */
ep93xx_dma_advance_work(edmac);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
-
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
/*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
{
struct ep93xx_dma_chan *edmac = dev_id;
+ struct ep93xx_dma_desc *desc;
irqreturn_t ret = IRQ_HANDLED;
spin_lock(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac),
+ "got interrupt while active list is empty\n");
+ spin_unlock(&edmac->lock);
+ return IRQ_NONE;
+ }
+
switch (edmac->edma->hw_interrupt(edmac)) {
case INTERRUPT_DONE:
- ep93xx_dma_get_active(edmac)->complete = true;
+ desc->complete = true;
tasklet_schedule(&edmac->tasklet);
break;
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_TO_DEVICE &&
- data->direction != DMA_FROM_DEVICE)
+ if (data->direction != DMA_MEM_TO_DEV &&
+ data->direction != DMA_DEV_TO_MEM)
return -EINVAL;
break;
default:
@@ -952,7 +988,7 @@ fail:
*/
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction dir,
+ unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = sg_dma_address(sg);
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1032,7 +1068,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = dma_addr + offset;
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
return -EINVAL;
switch (config->direction) {
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
break;
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
width = config->dst_addr_width;
addr = config->dst_addr;
break;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 8a781540590c..b98070c33ca9 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -772,7 +772,7 @@ fail:
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
/*
* This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
return -ENXIO;
/* we set the controller burst size depending on direction */
- if (config->direction == DMA_TO_DEVICE)
+ if (config->direction == DMA_MEM_TO_DEV)
size = config->dst_addr_width * config->dst_maxburst;
else
size = config->src_addr_width * config->src_maxburst;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4be55f9bb6c1..e4383ee2c9ac 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
imx_dma_disable(imxdmac->imxdma_channel);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg->length;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f993955a640c..a8af379680c1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -247,7 +247,7 @@ struct sdma_engine;
struct sdma_channel {
struct sdma_engine *sdma;
unsigned int channel;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
struct dma_async_tx_descriptor desc;
dma_cookie_t last_completed;
enum dma_status status;
+ unsigned int chn_count;
+ unsigned int chn_real_count;
};
#define IMX_DMA_SG_LOOP (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd;
int i, error = 0;
+ sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
+ sdmac->chn_real_count += bd->mode.count;
}
if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
else
sdmac->status = DMA_SUCCESS;
+ sdmac->last_completed = sdmac->desc.cookie;
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
- sdmac->last_completed = sdmac->desc.cookie;
}
static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
- if (sdmac->direction == DMA_FROM_DEVICE) {
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
} else {
load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
+ unsigned long flags;
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
struct sdma_engine *sdma = sdmac->sdma;
dma_cookie_t cookie;
- spin_lock_irq(&sdmac->lock);
+ spin_lock_irqsave(&sdmac->lock, flags);
cookie = sdma_assign_cookie(sdmac);
sdma_enable_channel(sdma, sdmac->channel);
- spin_unlock_irq(&sdmac->lock);
+ spin_unlock_irqrestore(&sdmac->lock, flags);
return cookie;
}
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
goto err_out;
}
+ sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
}
bd->mode.count = count;
+ sdmac->chn_count += count;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
ret = -EINVAL;
@@ -1008,7 +1015,7 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdma_disable_channel(sdmac);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
sdmac->per_address = dmaengine_cfg->src_addr;
sdmac->watermark_level = dmaengine_cfg->src_maxburst;
sdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
}
+ sdmac->direction = dmaengine_cfg->direction;
return sdma_config_channel(sdmac);
default:
return -ENOSYS;
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
last_used = chan->cookie;
- dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+ sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
}
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 19a0c64d45d3..74f70aadf9e4 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
* callbacks but must be called with the lock held.
*/
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
- struct intel_mid_dma_desc *desc)
+ struct intel_mid_dma_desc *desc)
+ __releases(&midc->lock) __acquires(&midc->lock)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
midc->dma->block_size);
/*Populate SAR and DAR values*/
sg_phy_addr = sg_phys(sg);
- if (desc->dirn == DMA_TO_DEVICE) {
+ if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
- } else if (desc->dirn == DMA_FROM_DEVICE) {
+ } else if (desc->dirn == DMA_DEV_TO_MEM) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
}
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
+ spin_unlock_bh(&midc->lock);
last_complete = midc->completed;
last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
}
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_get_sync(&mid->pdev->dev);
if (mid->state == SUSPENDED) {
- if (dma_resume(mid->pdev)) {
+ if (dma_resume(&mid->pdev->dev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
LNW_PERIPHRAL_MASK_SIZE);
if (dma->mask_reg == NULL) {
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_ioremap;
}
} else
dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
err_engine:
free_irq(pdev->irq, dma);
err_irq:
+ if (dma->mask_reg)
+ iounmap(dma->mask_reg);
+err_ioremap:
pci_pool_destroy(dma->dma_pool);
err_dma_pool:
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
*
* This function is called by OS when a power event occurs
*/
-int dma_suspend(struct pci_dev *pci, pm_message_t state)
+static int dma_suspend(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int i;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
*
* This function is called by OS when a power event occurs
*/
-int dma_resume(struct pci_dev *pci)
+int dma_resume(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int ret;
struct middma_device *device = pci_get_drvdata(pci);
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
.runtime_suspend = dma_runtime_suspend,
.runtime_resume = dma_runtime_resume,
.runtime_idle = dma_runtime_idle,
+ .suspend = dma_suspend,
+ .resume = dma_resume,
};
static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
#ifdef CONFIG_PM
- .suspend = dma_suspend,
- .resume = dma_resume,
.driver = {
.pm = &intel_mid_dma_pm,
},
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index aea5ee88ce03..c83d35b97bd8 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
unsigned int lli_length;
unsigned int current_lli;
dma_addr_t next;
- enum dma_data_direction dirn;
+ enum dma_transfer_direction dirn;
enum dma_status status;
enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
}
-int dma_resume(struct pci_dev *pci);
+int dma_resume(struct device *dev);
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e03f811a83dd..04be90b645b8 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
-MODULE_ALIAS("platform:iop-adma");
-
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
},
};
-static int __init iop_adma_init (void)
-{
- return platform_driver_register(&iop_adma_driver);
-}
-
-static void __exit iop_adma_exit (void)
-{
- platform_driver_unregister(&iop_adma_driver);
- return;
-}
-module_exit(iop_adma_exit);
-module_init(iop_adma_init);
+module_platform_driver(iop_adma_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("IOP ADMA Engine Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iop-adma");
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 0e5ef33f90a1..6212b16e8cf2 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
case IPU_PIX_FMT_RGB565:
params->ip.bpp = 2;
params->ip.pfs = 4;
- params->ip.npb = 7;
+ params->ip.npb = 15;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 5; /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->pp.nsb = 1;
}
-static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
- uint16_t burst_pixels)
-{
- params->pp.npb = burst_pixels - 1;
-}
-
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
ipu_ch_param_set_rotation(&params, rot_mode);
- /* Some channels (rotation) have restriction on burst length */
- switch (channel) {
- case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
- invalid - Table 44-30 */
-/*
- ipu_ch_param_set_burst_size(&params, 8);
- */
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
- ipu_ch_param_set_burst_size(&params, 16);
- break;
- case IDMAC_IC_0:
- default:
- break;
- }
spin_lock_irqsave(&ipu->lock, flags);
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long tx_flags)
+ enum dma_transfer_direction direction, unsigned long tx_flags)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 8ba4edc6185e..4d6d4cf66949 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
},
};
-static int __init mpc_dma_init(void)
-{
- return platform_driver_register(&mpc_dma_driver);
-}
-module_init(mpc_dma_init);
-
-static void __exit mpc_dma_exit(void)
-{
- platform_driver_unregister(&mpc_dma_driver);
-}
-module_exit(mpc_dma_exit);
+module_platform_driver(mpc_dma_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 9a353c2216d0..e779b434af45 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1250,7 +1250,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
static void
mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->xor_base;
u32 win_enable = 0;
@@ -1264,7 +1264,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -1290,7 +1290,7 @@ static struct platform_driver mv_xor_driver = {
static int mv_xor_shared_probe(struct platform_device *pdev)
{
- struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct mv_xor_shared_private *msp;
struct resource *res;
@@ -1323,8 +1323,9 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (msd != NULL && msd->dram != NULL)
- mv_xor_conf_mbus_windows(msp, msd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_xor_conf_mbus_windows(msp, dram);
return 0;
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b4588bdd98bb..b06cd4ca626f 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -44,7 +44,6 @@
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
#define BP_APBH_CTRL0_RESET_CHANNEL 16
#define HW_APBHX_CTRL1 0x010
#define HW_APBHX_CTRL2 0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
+ int desc_count;
dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
-static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
-{
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int chan_id = mxs_chan->chan.chan_id;
- int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
-
- /* enable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- }
-}
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- /* clkgate needs to be enabled before writing other registers */
- mxs_dma_clkgate(mxs_chan, 1);
-
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- /* disable apbh channel clock */
- mxs_dma_clkgate(mxs_chan, 0);
-
mxs_chan->status = DMA_SUCCESS;
}
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handler here in case of ether it's (1) an bus
+ * an error we need to handle here in case of either it's (1) a bus
* error or (2) a termination error with no completion.
*/
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -334,14 +311,11 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
goto err_irq;
}
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
goto err_clk;
- /* clkgate needs to be enabled for reset to finish */
- mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
- mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -372,12 +346,12 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
- clk_disable(mxs_dma->clk);
+ clk_disable_unprepare(mxs_dma->clk);
}
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long append)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
- static int idx;
+ int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
idx = 0;
}
- if (direction == DMA_NONE) {
+ if (direction == DMA_TRANS_NONE) {
ccw = &mxs_chan->ccw[idx++];
pio = (u32 *) sgl;
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
COMMAND);
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
}
}
}
+ mxs_chan->desc_count = idx;
return &mxs_chan->desc;
@@ -472,7 +447,7 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
i++;
}
+ mxs_chan->desc_count = i;
return &mxs_chan->desc;
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- mxs_dma_disable_chan(mxs_chan);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_disable_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -578,9 +554,9 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
- goto err_out;
+ return ret;
ret = mxs_reset_block(mxs_dma->base);
if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
- clk_disable(mxs_dma->clk);
-
- return 0;
-
err_out:
+ clk_disable_unprepare(mxs_dma->clk);
return ret;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index a6d0e3dbed07..823f58179f9d 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,7 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
struct pch_dma_chan {
struct dma_chan chan;
void __iomem *membase;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
struct tasklet_struct tasklet;
unsigned long err_status;
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
return NULL;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
reg = pd_slave->rx_reg;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
reg = pd_slave->tx_reg;
else
return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
{ 0, },
};
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 571041477ab2..b8ec03ee8e22 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -19,6 +19,7 @@
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <linux/of.h>
#define NR_DEFAULT_DESC 16
@@ -116,6 +117,9 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+/* forward declaration */
+static struct amba_driver pl330_driver;
+
static inline struct dma_pl330_chan *
to_pchan(struct dma_chan *ch)
{
@@ -267,6 +271,32 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+bool pl330_filter(struct dma_chan *chan, void *param)
+{
+ u8 *peri_id;
+
+ if (chan->device->dev->driver != &pl330_driver.drv)
+ return false;
+
+#ifdef CONFIG_OF
+ if (chan->device->dev->of_node) {
+ const __be32 *prop_value;
+ phandle phandle;
+ struct device_node *node;
+
+ prop_value = ((struct property *)param)->value;
+ phandle = be32_to_cpup(prop_value++);
+ node = of_find_node_by_phandle(phandle);
+ return ((chan->private == node) &&
+ (chan->chan_id == be32_to_cpup(prop_value)));
+ }
+#endif
+
+ peri_id = chan->private;
+ return *peri_id == (unsigned)param;
+}
+EXPORT_SYMBOL(pl330_filter);
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -320,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
- if (slave_config->direction == DMA_TO_DEVICE) {
+ if (slave_config->direction == DMA_MEM_TO_DEV) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
- } else if (slave_config->direction == DMA_FROM_DEVICE) {
+ } else if (slave_config->direction == DMA_DEV_TO_MEM) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
@@ -497,7 +527,7 @@ pluck_desc(struct dma_pl330_dmac *pdmac)
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
{
struct dma_pl330_dmac *pdmac = pch->dmac;
- struct dma_pl330_peri *peri = pch->chan.private;
+ u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
@@ -522,13 +552,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- if (peri) {
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = pch->chan.chan_id;
- } else {
- desc->req.rqtype = MEMTOMEM;
- desc->req.peri = 0;
- }
+ desc->req.peri = peri_id ? pch->chan.chan_id : 0;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -597,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -612,15 +636,17 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
}
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
@@ -646,16 +672,12 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct pl330_info *pi;
int burst;
if (unlikely(!pch || !len))
return NULL;
- if (peri && peri->rqtype != MEMTOMEM)
- return NULL;
-
pi = &pch->dmac->pif;
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
@@ -664,6 +686,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = MEMTOMEM;
/* Select max possible burst size */
burst = pi->pcfg.data_bus_width / 8;
@@ -687,30 +710,19 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flg)
{
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
int i;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len || !peri))
+ if (unlikely(!pch || !sgl || !sg_len))
return NULL;
- /* Make sure the direction is consistent */
- if ((direction == DMA_TO_DEVICE &&
- peri->rqtype != MEMTODEV) ||
- (direction == DMA_FROM_DEVICE &&
- peri->rqtype != DEVTOMEM)) {
- dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
- __func__, __LINE__);
- return NULL;
- }
-
addr = pch->fifo_addr;
first = NULL;
@@ -747,14 +759,16 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
else
list_add_tail(&desc->node, &first->node);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
fill_px(&desc->px,
addr, sg_dma_address(sg), sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
fill_px(&desc->px,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
@@ -820,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
-#ifdef CONFIG_PM_RUNTIME
- /* to use the runtime PM helper functions */
- pm_runtime_enable(&adev->dev);
-
- /* enable the power domain */
- if (pm_runtime_get_sync(&adev->dev)) {
- dev_err(&adev->dev, "failed to get runtime pm\n");
- ret = -ENODEV;
- goto probe_err1;
- }
-#else
+#ifndef CONFIG_PM_RUNTIME
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
@@ -856,32 +860,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
+ (u8)pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i];
- if (pdat) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
-
- switch (peri->rqtype) {
- case MEMTOMEM:
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- dma_cap_set(DMA_CYCLIC, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
- }
- pch->chan.private = peri;
- } else {
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- pch->chan.private = NULL;
- }
+ if (!adev->dev.of_node)
+ pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
+ else
+ pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
@@ -894,6 +882,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
pd->dev = &adev->dev;
+ if (pdat) {
+ pd->cap_mask = pdat->cap_mask;
+ } else {
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ if (pi->pcfg.num_peri) {
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ }
+ }
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
@@ -970,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
-#ifdef CONFIG_PM_RUNTIME
- pm_runtime_put(&adev->dev);
- pm_runtime_disable(&adev->dev);
-#else
+#ifndef CONFIG_PM_RUNTIME
clk_disable(pdmac->clk);
#endif
@@ -990,6 +984,8 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl330_ids);
+
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 81809c2b46ab..54043cd831c8 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
-#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, shdev->chan_reg +
+ shdev->pdata->channel[sh_dc->id].chclr_offset);
+}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ chclr_write(sh_chan, 0);
+ }
+ }
+
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
return -EIO;
}
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->common.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
return 0;
}
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
+ } else {
+ sh_chan->pm_state = DMAE_PM_PENDING;
}
spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
* @sh_chan: DMA channel
* @flags: DMA transfer flags
* @dest: destination DMA address, incremented when direction equals
- * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_DEV_TO_MEM
* @src: source DMA address, incremented when direction equals
- * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_MEM_TO_DEV
* @len: DMA transfer length
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
* @direction: needed for slave DMA to decide which address to keep constant,
- * equals DMA_BIDIRECTIONAL for MEMCPY
+ * equals DMA_MEM_TO_MEM for MEMCPY
* Returns 0 or an error
* Locks: called with desc_lock held
*/
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
- struct sh_desc **first, enum dma_data_direction direction)
+ struct sh_desc **first, enum dma_transfer_direction direction)
{
struct sh_desc *new;
size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
new->direction = direction;
*len -= copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
*src += copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
*dest += copy_size;
return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
* converted to scatter-gather to guarantee consistent locking and a correct
* list manipulation. For slave DMA direction carries the usual meaning, and,
* logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
* and the SG list contains only one element and points at the source buffer.
*/
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct scatterlist *sg;
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
i, sg, len, (unsigned long long)sg_addr);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
new = sh_dmae_add_desc(sh_chan, flags,
&sg_addr, addr, &len, &first,
direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+ return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
flags);
}
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
- ((desc->direction == DMA_FROM_DEVICE &&
+ ((desc->direction == DMA_DEV_TO_MEM &&
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, shdev);
+ shdev->common.dev = &pdev->dev;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_control = sh_dmae_control;
- shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
- struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- if (sh_chan->descs_allocated)
- sh_chan->pm_error = pm_runtime_put_sync(dev);
- }
-
return 0;
}
static int sh_dmae_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
if (!sh_chan->descs_allocated)
continue;
- if (!sh_chan->pm_error)
- pm_runtime_get_sync(dev);
-
if (param) {
const struct sh_dmae_slave_config *cfg = param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 000000000000..2333810d1688
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,707 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS 16
+#define SIRFSOC_DMA_CHANNELS 16
+
+#define SIRFSOC_DMA_CH_ADDR 0x00
+#define SIRFSOC_DMA_CH_XLEN 0x04
+#define SIRFSOC_DMA_CH_YLEN 0x08
+#define SIRFSOC_DMA_CH_CTRL 0x0C
+
+#define SIRFSOC_DMA_WIDTH_0 0x100
+#define SIRFSOC_DMA_CH_VALID 0x140
+#define SIRFSOC_DMA_CH_INT 0x144
+#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT 4
+#define SIRFSOC_DMA_DIR_CTRL_BIT 5
+
+/* xlen and dma_width register is in 4 bytes boundary */
+#define SIRFSOC_DMA_WORD_LEN 4
+
+struct sirfsoc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct list_head node;
+
+ /* SiRFprimaII 2D-DMA parameters */
+
+ int xlen; /* DMA xlen */
+ int ylen; /* DMA ylen */
+ int width; /* DMA width */
+ int dir;
+ bool cyclic; /* is loop DMA? */
+ u32 addr; /* DMA buffer address */
+};
+
+struct sirfsoc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ dma_cookie_t completed_cookie;
+ unsigned long happened_cyclic;
+ unsigned long completed_cyclic;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+
+ int mode;
+};
+
+struct sirfsoc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
+ void __iomem *base;
+ int irq;
+};
+
+#define DRV_NAME "sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+ return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/* Execute all queued DMA descriptors */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+
+ /*
+ * lock has been held by functions calling this, so we don't hold
+ * lock again
+ */
+
+ sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
+ node);
+ /* Move the first queued descriptor to active list */
+ list_move_tail(&schan->queued, &schan->active);
+
+ /* Start the DMA transfer */
+ writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
+ cid * 4);
+ writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+ (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
+ sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
+ (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /*
+ * writel has an implict memory write barrier to make sure data is
+ * flushed into memory before starting DMA
+ */
+ writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+
+ if (sdesc->cyclic) {
+ writel((1 << cid) | 1 << (cid + 16) |
+ readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ schan->happened_cyclic = schan->completed_cyclic = 0;
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+ struct sirfsoc_dma *sdma = data;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ u32 is;
+ int ch;
+
+ is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
+ while ((ch = fls(is) - 1) >= 0) {
+ is &= ~(1 << ch);
+ writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
+ schan = &sdma->channels[ch];
+
+ spin_lock(&schan->lock);
+
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+ if (!sdesc->cyclic) {
+ /* Execute queued descriptors */
+ list_splice_tail_init(&schan->active, &schan->completed);
+ if (!list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+ } else
+ schan->happened_cyclic++;
+
+ spin_unlock(&schan->lock);
+ }
+
+ /* Schedule tasklet */
+ tasklet_schedule(&sdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+ dma_cookie_t last_cookie = 0;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ unsigned long happened_cyclic;
+ LIST_HEAD(list);
+ int i;
+
+ for (i = 0; i < sdma->dma.chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ if (!list_empty(&schan->completed)) {
+ list_splice_tail_init(&schan->completed, &list);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(sdesc, &list, node) {
+ desc = &sdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&list, &schan->free);
+ schan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+ } else {
+ /* for cyclic channel, desc is always in active list */
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+
+ if (!sdesc || (sdesc && !sdesc->cyclic)) {
+ /* without active cyclic DMA */
+ spin_unlock_irqrestore(&schan->lock, flags);
+ continue;
+ }
+
+ /* cyclic DMA */
+ happened_cyclic = schan->happened_cyclic;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ desc = &sdesc->desc;
+ while (happened_cyclic != schan->completed_cyclic) {
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+ schan->completed_cyclic++;
+ }
+ }
+ }
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+ struct sirfsoc_dma *sdma = (void *)data;
+
+ sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&sdesc->node, &schan->queued);
+
+ /* Update cookie */
+ cookie = schan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ schan->chan.cookie = cookie;
+ sdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+ struct dma_slave_config *config)
+{
+ unsigned long flags;
+
+ if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ schan->mode = (config->src_maxburst == 4 ? 1 : 0);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&schan->active, &schan->free);
+ list_splice_tail_init(&schan->queued, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct dma_slave_config *config;
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return sirfsoc_dma_terminate_all(schan);
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return sirfsoc_dma_slave_config(schan, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+ sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
+ if (!sdesc) {
+ dev_notice(sdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&sdesc->desc, chan);
+ sdesc->desc.flags = DMA_CTRL_ACK;
+ sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+ list_add_tail(&sdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ list_splice_tail_init(&descs, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return i;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&schan->prepared));
+ BUG_ON(!list_empty(&schan->queued));
+ BUG_ON(!list_empty(&schan->active));
+ BUG_ON(!list_empty(&schan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&schan->free, &descs);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(sdesc, tmp, &descs, node)
+ kfree(sdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (list_empty(&schan->active) && !list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ last_used = schan->chan.cookie;
+ last_complete = schan->completed_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+ int ret;
+
+ if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+ ret = -EINVAL;
+ goto err_dir;
+ }
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc) {
+ /* try to free completed descriptors */
+ sirfsoc_dma_process_completed(sdma);
+ ret = 0;
+ goto no_desc;
+ }
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+
+ /*
+ * Number of chunks in a frame can only be 1 for prima2
+ * and ylen (number of frame - 1) must be at least 0
+ */
+ if ((xt->frame_size == 1) && (xt->numf > 0)) {
+ sdesc->cyclic = 0;
+ sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
+ sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
+ SIRFSOC_DMA_WORD_LEN;
+ sdesc->ylen = xt->numf - 1;
+ if (xt->dir == DMA_MEM_TO_DEV) {
+ sdesc->addr = xt->src_start;
+ sdesc->dir = 1;
+ } else {
+ sdesc->addr = xt->dst_start;
+ sdesc->dir = 0;
+ }
+
+ list_add_tail(&sdesc->node, &schan->prepared);
+ } else {
+ pr_err("sirfsoc DMA Invalid xfer\n");
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+err_xfer:
+ spin_unlock_irqrestore(&schan->lock, iflags);
+no_desc:
+err_dir:
+ return ERR_PTR(ret);
+}
+
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+
+ /*
+ * we only support cycle transfer with 2 period
+ * If the X-length is set to 0, it would be the loop mode.
+ * The DMA address keeps increasing until reaching the end of a loop
+ * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
+ * the DMA address goes back to the beginning of this area.
+ * In loop mode, the DMA data region is divided into two parts, BUFA
+ * and BUFB. DMA controller generates interrupts twice in each loop:
+ * when the DMA address reaches the end of BUFA or the end of the
+ * BUFB
+ */
+ if (buf_len != 2 * period_len)
+ return ERR_PTR(-EINVAL);
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc)
+ return 0;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+ sdesc->addr = addr;
+ sdesc->cyclic = 1;
+ sdesc->xlen = 0;
+ sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
+ sdesc->width = 1;
+ list_add_tail(&sdesc->node, &schan->prepared);
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ unsigned int ch_nr = (unsigned int) chan_id;
+
+ if (ch_nr == chan->chan_id +
+ chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct sirfsoc_dma *sdma;
+ struct sirfsoc_dma_chan *schan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ u32 id;
+ int ret, i;
+
+ sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
+ if (!sdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(dn, "cell-index", &id)) {
+ dev_err(dev, "Fail to get DMAC index\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ sdma->irq = irq_of_parse_and_map(dn, 0);
+ if (sdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ ret = -EINVAL;
+ goto free_mem;
+ }
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret) {
+ dev_err(dev, "Error parsing memory region!\n");
+ goto free_mem;
+ }
+
+ regs_start = res.start;
+ regs_size = resource_size(&res);
+
+ sdma->base = devm_ioremap(dev, regs_start, regs_size);
+ if (!sdma->base) {
+ dev_err(dev, "Error mapping memory region!\n");
+ ret = -ENOMEM;
+ goto irq_dispose;
+ }
+
+ ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+ sdma);
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ ret = -EINVAL;
+ goto unmap_mem;
+ }
+
+ dma = &sdma->dma;
+ dma->dev = dev;
+ dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+ dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+ dma->device_issue_pending = sirfsoc_dma_issue_pending;
+ dma->device_control = sirfsoc_dma_control;
+ dma->device_tx_status = sirfsoc_dma_tx_status;
+ dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
+ dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ schan->chan.device = dma;
+ schan->chan.cookie = 1;
+ schan->completed_cookie = schan->chan.cookie;
+
+ INIT_LIST_HEAD(&schan->free);
+ INIT_LIST_HEAD(&schan->prepared);
+ INIT_LIST_HEAD(&schan->queued);
+ INIT_LIST_HEAD(&schan->active);
+ INIT_LIST_HEAD(&schan->completed);
+
+ spin_lock_init(&schan->lock);
+ list_add_tail(&schan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, sdma);
+ ret = dma_async_device_register(dma);
+ if (ret)
+ goto free_irq;
+
+ dev_info(dev, "initialized SIRFSOC DMAC driver\n");
+
+ return 0;
+
+free_irq:
+ devm_free_irq(dev, sdma->irq, sdma);
+irq_dispose:
+ irq_dispose_mapping(sdma->irq);
+unmap_mem:
+ iounmap(sdma->base);
+free_mem:
+ devm_kfree(dev, sdma);
+ return ret;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&sdma->dma);
+ devm_free_irq(dev, sdma->irq, sdma);
+ irq_dispose_mapping(sdma->irq);
+ iounmap(sdma->base);
+ devm_kfree(dev, sdma);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+ { .compatible = "sirf,prima2-dmac", },
+ {},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+ .probe = sirfsoc_dma_probe,
+ .remove = __devexit_p(sirfsoc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_dma_match,
+ },
+};
+
+module_platform_driver(sirfsoc_dma_driver);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 13259cad0ceb..cc5ecbc067a3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
@@ -32,6 +34,9 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY 100
+
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
@@ -62,6 +67,55 @@ enum d40_command {
D40_DMA_SUSPENDED = 3
};
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+ D40_DREG_LCPA,
+ D40_DREG_LCLA,
+ D40_DREG_PRMSE,
+ D40_DREG_PRMSO,
+ D40_DREG_PRMOE,
+ D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+ D40_DREG_PSEG1,
+ D40_DREG_PSEG2,
+ D40_DREG_PSEG3,
+ D40_DREG_PSEG4,
+ D40_DREG_PCEG1,
+ D40_DREG_PCEG2,
+ D40_DREG_PCEG3,
+ D40_DREG_PCEG4,
+ D40_DREG_RSEG1,
+ D40_DREG_RSEG2,
+ D40_DREG_RSEG3,
+ D40_DREG_RSEG4,
+ D40_DREG_RCEG1,
+ D40_DREG_RCEG2,
+ D40_DREG_RCEG3,
+ D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+ D40_CHAN_REG_SSCFG,
+ D40_CHAN_REG_SSELT,
+ D40_CHAN_REG_SSPTR,
+ D40_CHAN_REG_SSLNK,
+ D40_CHAN_REG_SDCFG,
+ D40_CHAN_REG_SDELT,
+ D40_CHAN_REG_SDPTR,
+ D40_CHAN_REG_SDLNK,
+};
+
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
@@ -96,7 +150,7 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
*
* This descriptor is used for both logical and physical transfers.
*/
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
* channels.
*
* @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
*/
struct d40_phy_res {
spinlock_t lock;
+ bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
- enum dma_data_direction runtime_direction;
+ enum dma_transfer_direction runtime_direction;
};
/**
@@ -241,6 +296,7 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
+ struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
+ u32 reg_val_backup[BACKUP_REGS_SZ];
+ u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 *reg_val_backup_chan;
+ u16 gcc_pwr_off_mask;
+ bool initialized;
};
/**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
struct d40_desc *d;
struct d40_desc *_d;
- list_for_each_entry_safe(d, _d, &d40c->client, node)
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
+ }
}
if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
+ bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
&lli->src[lli_current],
next_lcla, flags);
- dma_sync_single_range_for_device(chan->base->dev,
- pool->dma_addr, lcla_offset,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
-
+ /*
+ * Cache maintenance is not needed if lcla is
+ * mapped in esram
+ */
+ if (!use_esram_lcla) {
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ }
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->rev >= 3)
+ dma40_backup(base->virtbase, base->reg_val_backup_v3,
+ d40_backup_regs_v3,
+ ARRAY_SIZE(d40_backup_regs_v3),
+ save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
/* Set LIDX for lcla */
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
+
+ /* Clear LNK which will be used by d40_chan_has_events() */
+ writel(0, chanbase + D40_CHAN_REG_SSLNK);
+ writel(0, chanbase + D40_CHAN_REG_SDLNK);
}
}
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
+ pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
D40_DMA_RUN);
}
}
-
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
-
+ pm_runtime_get_sync(d40c->base->dev);
if (d40c->base->rev == 0)
if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
}
no_suspend:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- d40c->busy = true;
+ if (!d40c->busy)
+ d40c->busy = true;
+
+ pm_runtime_get_sync(d40c->base->dev);
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40_queue_start(d40c) == NULL)
d40c->busy = false;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
return res;
}
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
- int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+ bool is_src, int log_event_line, bool is_log,
+ bool *first_user)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
+
+ *first_user = ((phy->allocated_src | phy->allocated_dst)
+ == D40_ALLOC_FREE);
+
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ out:
return is_free;
}
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
int dev_type;
int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
for (i = 0; i < d40c->base->num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log))
+ 0, is_log,
+ first_phy_user))
goto found_phy;
}
} else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
- is_log))
+ is_log,
+ first_phy_user))
goto found_phy;
}
}
@@ -1552,6 +1703,25 @@ found_phy:
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
+
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
+
+ if ((i != phy_num) && (i != phy_num + 1)) {
+ dev_err(chan2dev(d40c),
+ "invalid fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
+ if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+ is_log, first_phy_user))
+ goto found_log;
+
+ dev_err(chan2dev(d40c),
+ "could not allocate fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ found_phy:
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
}
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
chan_err(d40c, "suspend failed\n");
- return res;
+ goto out;
}
if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
if (d40_chan_has_events(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
- if (res) {
+ if (res)
chan_err(d40c,
"Executing RUN command\n");
- return res;
- }
}
- return 0;
+ goto out;
}
} else {
(void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "Failed to stop channel\n");
- return res;
+ goto out;
}
+
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+
+ d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
- return 0;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ return res;
}
static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ err:
}
static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
if (chan->runtime_addr)
return chan->runtime_addr;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
addr = plat->dev_rx[cfg->src_dev_type];
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
addr = plat->dev_tx[cfg->dst_dev_type];
return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long dma_flags)
+ enum dma_transfer_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (direction != DMA_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
src_dev_addr = dev_addr;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
dst_dev_addr = dev_addr;
}
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
goto fail;
}
}
- is_free_phy = (d40c->phy_chan == NULL);
- err = d40_allocate_channel(d40c);
+ err = d40_allocate_channel(d40c, &is_free_phy);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
+ d40c->configured = false;
goto fail;
}
+ pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
+ dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+ chan_is_logical(d40c) ? "logical" : "physical",
+ d40c->phy_chan->num,
+ d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (is_free_phy)
d40_config_write(d40c);
fail:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
- enum dma_data_direction direction,
+ enum dma_transfer_direction direction,
unsigned long dma_flags)
{
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_addr_width = config->dst_addr_width;
dst_maxburst = config->dst_maxburst;
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
dma_addr_t dev_addr_rx =
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (dst_maxburst == 0)
dst_maxburst = src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
dma_addr_t dev_addr_tx =
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
"configured channel %s for %s, data width %d/%d, "
"maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
src_addr_width, dst_addr_width,
src_maxburst, dst_maxburst);
@@ -2519,6 +2709,72 @@ failure1:
return err;
}
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+ if (!pm_runtime_suspended(dev))
+ return -EBUSY;
+
+ if (base->lcpa_regulator)
+ ret = regulator_disable(base->lcpa_regulator);
+ return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ d40_save_restore_registers(base, true);
+
+ /* Don't disable/enable clocks for v1 due to HW bugs */
+ if (base->rev != 1)
+ writel_relaxed(base->gcc_pwr_off_mask,
+ base->virtbase + D40_DREG_GCC);
+
+ return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ if (base->initialized)
+ d40_save_restore_registers(base, false);
+
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+ base->virtbase + D40_DREG_GCC);
+ return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator)
+ ret = regulator_enable(base->lcpa_regulator);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+ .suspend = dma40_pm_suspend,
+ .runtime_suspend = dma40_runtime_suspend,
+ .runtime_resume = dma40_runtime_resume,
+ .resume = dma40_resume,
+};
+#define DMA40_PM_OPS (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS NULL
+#endif
+
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
+ int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[i].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_DST);
+
+
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[chan].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
val[0] = val[0] >> 2;
}
+ /*
+ * To keep things simple, Enable all clocks initially.
+ * The clocks will get managed later post channel allocation.
+ * The clocks for the event lines on which reserved channels exists
+ * are not managed here.
+ */
+ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+ base->gcc_pwr_off_mask = gcc;
+
return num_phy_chans_avail;
}
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
- sizeof(struct d40_desc *) *
- D40_LCLA_LINK_PER_EVENT_GRP,
+ base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+ sizeof(d40_backup_regs_chan),
GFP_KERNEL);
+ if (!base->reg_val_backup_chan)
+ goto failure;
+
+ base->lcla_pool.alloc_map =
+ kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+ * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2741,9 +3025,9 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static const struct d40_reg_val dma_init_reg[] = {
+ static struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
+ /* If lcla has to be located in ESRAM we don't need to allocate */
+ if (base->plat_data->use_esram_lcla) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lcla_esram");
+ if (!res) {
+ ret = -ENOENT;
+ d40_err(&pdev->dev,
+ "No \"lcla_esram\" memory resource\n");
+ goto failure;
+ }
+ base->lcla_pool.base = ioremap(res->start,
+ resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+ goto failure;
+ }
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
- ret = d40_lcla_allocate(base);
- if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
+ } else {
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ goto failure;
+ }
}
spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_enable(base->dev);
+ pm_runtime_resume(base->dev);
+
+ if (base->plat_data->use_esram_lcla) {
+
+ base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+ if (IS_ERR(base->lcpa_regulator)) {
+ d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret) {
+ d40_err(&pdev->dev,
+ "Failed to enable lcpa_regulator\n");
+ regulator_put(base->lcpa_regulator);
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+ }
+
+ base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
@@ -2976,6 +3306,11 @@ failure:
if (base->virtbase)
iounmap(base->virtbase);
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
+
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ failure:
clk_put(base->clk);
}
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
+ }
+
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
+ .pm = DMA40_PM_OPS,
},
};
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index b44c455158de..8d3d490968a3 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -16,6 +16,8 @@
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
/* Most bits of the CFG register are the same in log as in phy mode */
#define D40_SREG_CFG_MST_POS 15
@@ -123,6 +125,15 @@
/* DMA Register Offsets */
#define D40_DREG_GCC 0x000
+#define D40_DREG_GCC_ENA 0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+ (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
#define D40_DREG_PRTYP 0x004
#define D40_DREG_PRSME 0x008
#define D40_DREG_PRSMO 0x00C
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a4a398f2ef61..a6f9c1684a0f 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -90,7 +90,7 @@ struct timb_dma_chan {
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
- if (td_chan->direction == DMA_FROM_DEVICE) {
+ if (td_chan->direction == DMA_DEV_TO_MEM) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
txd->cookie);
/* make sure to stop the transfer */
- if (td_chan->direction == DMA_FROM_DEVICE)
+ if (td_chan->direction == DMA_DEV_TO_MEM)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_TO_DEVICE);
+ td_desc->desc_list_len, DMA_MEM_TO_DEV);
return &td_desc->txd;
}
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
- td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE;
+ td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
+ DMA_MEM_TO_DEV;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
.remove = __exit_p(td_remove),
};
-static int __init td_init(void)
-{
- return platform_driver_register(&td_driver);
-}
-module_init(td_init);
-
-static void __exit td_exit(void)
-{
- platform_driver_unregister(&td_driver);
-}
-module_exit(td_exit);
+module_platform_driver(td_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index cbd83e362b5e..6122c364cf11 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
BUG_ON(!ds || !ds->reg_width);
if (ds->tx_reg)
- BUG_ON(direction != DMA_TO_DEVICE);
+ BUG_ON(direction != DMA_MEM_TO_DEV);
else
- BUG_ON(direction != DMA_FROM_DEVICE);
+ BUG_ON(direction != DMA_DEV_TO_MEM);
if (unlikely(!sg_len))
return NULL;
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
if (__is_dmac64(ddev)) {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc.SAR = mem;
desc->hwdesc.DAR = ds->tx_reg;
} else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc.CNTR = sg_dma_len(sg);
} else {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc32.SAR = mem;
desc->hwdesc32.DAR = ds->tx_reg;
} else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc32.CNTR = sg_dma_len(sg);
}
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
sai = ds->reg_width;
dai = 0;
} else {
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 70ad8923f1d7..8568d9b61875 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2234,7 +2234,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+ mce_unregister_decode_chain(&i7_mce_dec);
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -2336,7 +2336,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+ mce_register_decode_chain(&i7_mce_dec);
return 0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index a5da732fe5b2..4184e0171f00 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -277,11 +277,9 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci,
static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
- int row, multi_chan, chan;
+ int row, chan;
unsigned long offst, page;
- multi_chan = mci->csrows[0].nr_channels - 1;
-
if (!(info->errsts2 & 0x0003))
return 0;
@@ -294,20 +292,30 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
}
page = (unsigned long) info->eap;
- if (info->xeap & 1)
- page |= 0x100000000ul;
- chan = page & 1;
page >>= 1;
- offst = page & ((1 << PAGE_SHIFT) - 1);
- page >>= PAGE_SHIFT;
+ if (info->xeap & 1)
+ page |= 0x80000000;
+ page >>= (PAGE_SHIFT - 1);
row = edac_mc_find_csrow_by_page(mci, page);
+ if (row == -1) {
+ i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
+ "\tXEAP=%u\n"
+ "\t EAP=0x%08x\n"
+ "\tPAGE=0x%08x\n",
+ (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
+ return 0;
+ }
+ chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
+ offst = info->eap
+ & ((1 << PAGE_SHIFT) -
+ (1 << mci->csrows[row].grain));
+
if (info->errsts & 0x0002)
edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
else
edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- multi_chan ? chan : 0,
- "i82975x CE");
+ chan, "i82975x CE");
return 1;
}
@@ -410,7 +418,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
+ csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
csrow->dtype = i82975x_dram_type(mch_window, index);
csrow->edac_mode = EDAC_SECDED; /* only supported */
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index d0864d9c38ad..bd926ea2e00c 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -884,7 +884,7 @@ static int __init mce_amd_init(void)
pr_info("MCE: In-kernel MCE decoding enabled.\n");
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
}
@@ -893,7 +893,7 @@ early_initcall(mce_amd_init);
#ifdef MODULE
static void __exit mce_amd_exit(void)
{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ mce_unregister_decode_chain(&amd_mce_dec_nb);
kfree(fam_ops);
}
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 38400963e245..fc757069c6af 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -142,7 +142,7 @@
/*
* The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
- * indirectly acccessed and have a base and length defined by the
+ * indirectly accessed and have a base and length defined by the
* device tree. The base can be anything; however, we expect the
* length to be precisely two registers, the first for the address
* window and the second for the data window.
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index b153674431f1..e294e1b3616c 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -131,7 +131,7 @@ struct r82600_error_info {
u32 eapr;
};
-static unsigned int disable_hardware_scrub;
+static bool disable_hardware_scrub;
static struct edac_pci_ctl_info *r82600_pci;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 7a402bfbee7d..1dc118d83cc6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1609,11 +1609,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mce->cpuvendor, mce->cpuid, mce->time,
mce->socketid, mce->apicid);
-#ifdef CONFIG_SMP
/* Only handle if it is the right mc controller */
if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
return NOTIFY_DONE;
-#endif
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
@@ -1661,8 +1659,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &sbridge_dev->pdev[0]->dev);
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
- &sbridge_mce_dec);
+ mce_unregister_decode_chain(&sbridge_mce_dec);
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->dev);
@@ -1731,8 +1728,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
goto fail0;
}
- atomic_notifier_chain_register(&x86_mce_decoder_chain,
- &sbridge_mce_dec);
+ mce_register_decode_chain(&sbridge_mce_dec);
return 0;
fail0:
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 68375bc3aef6..80e95aa3bf14 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -66,7 +66,7 @@
*
* Concurrent logins are useful together with cluster filesystems.
*/
-static int sbp2_param_exclusive_login = 1;
+static bool sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = Y, use N for concurrent initiators)");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index efba163595db..9b00072a020f 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -145,18 +145,6 @@ config ISCSI_IBFT
detect iSCSI boot parameters dynamically during system boot, say Y.
Otherwise, say N.
-config SIGMA
- tristate "SigmaStudio firmware loader"
- depends on I2C
- select CRC32
- default n
- help
- Enable helper functions for working with Analog Devices SigmaDSP
- parts and binary firmwares produced by Analog Devices SigmaStudio.
-
- If unsure, say N here. Drivers that need these helpers will select
- this option automatically.
-
source "drivers/firmware/google/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 47338c979126..5a7e27399729 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -12,6 +12,5 @@ obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
-obj-$(CONFIG_SIGMA) += sigma.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87ff..d25599f2a3f8 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
- memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
@@ -490,7 +495,8 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
return 0;
}
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
@@ -560,7 +566,7 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
- efi_pstore_write(type, &id, (unsigned int)id, 0, psi);
+ efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
return 0;
}
@@ -576,12 +582,14 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
return -1;
}
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
return 0;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb107904..3ee852c9925b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -433,11 +433,11 @@ static int __init ibft_check_device(void)
* Helper routiners to check to determine if the entry is valid
* in the proper iBFT structure.
*/
-static mode_t ibft_check_nic_for(void *data, int type)
+static umode_t ibft_check_nic_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_nic *nic = entry->nic;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_ETH_INDEX:
@@ -488,11 +488,11 @@ static mode_t ibft_check_nic_for(void *data, int type)
return rc;
}
-static mode_t __init ibft_check_tgt_for(void *data, int type)
+static umode_t __init ibft_check_tgt_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_tgt *tgt = entry->tgt;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_TGT_INDEX:
@@ -524,11 +524,11 @@ static mode_t __init ibft_check_tgt_for(void *data, int type)
return rc;
}
-static mode_t __init ibft_check_initiator_for(void *data, int type)
+static umode_t __init ibft_check_initiator_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_initiator *init = entry->initiator;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_INI_INDEX:
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd8..4da4eb9ae926 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
deleted file mode 100644
index f10fc521951b..000000000000
--- a/drivers/firmware/sigma.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Load Analog Devices SigmaStudio firmware files
- *
- * Copyright 2009-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/sigma.h>
-
-/* Return: 0==OK, <0==error, =1 ==no more actions */
-static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
-{
- struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
- size_t len = sigma_action_len(sa);
- int ret = 0;
-
- pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
- sa->instr, sa->addr, len);
-
- switch (sa->instr) {
- case SIGMA_ACTION_WRITEXBYTES:
- case SIGMA_ACTION_WRITESINGLE:
- case SIGMA_ACTION_WRITESAFELOAD:
- if (ssfw->fw->size < ssfw->pos + len)
- return -EINVAL;
- ret = i2c_master_send(client, (void *)&sa->addr, len);
- if (ret < 0)
- return -EINVAL;
- break;
-
- case SIGMA_ACTION_DELAY:
- ret = 0;
- udelay(len);
- len = 0;
- break;
-
- case SIGMA_ACTION_END:
- return 1;
-
- default:
- return -EINVAL;
- }
-
- /* when arrive here ret=0 or sent data */
- ssfw->pos += sigma_action_size(sa, len);
- return ssfw->pos == ssfw->fw->size;
-}
-
-static int
-process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
-{
- pr_debug("%s: processing %p\n", __func__, ssfw);
-
- while (1) {
- int ret = process_sigma_action(client, ssfw);
- pr_debug("%s: action returned %i\n", __func__, ret);
- if (ret == 1)
- return 0;
- else if (ret)
- return ret;
- }
-}
-
-int process_sigma_firmware(struct i2c_client *client, const char *name)
-{
- int ret;
- struct sigma_firmware_header *ssfw_head;
- struct sigma_firmware ssfw;
- const struct firmware *fw;
- u32 crc;
-
- pr_debug("%s: loading firmware %s\n", __func__, name);
-
- /* first load the blob */
- ret = request_firmware(&fw, name, &client->dev);
- if (ret) {
- pr_debug("%s: request_firmware() failed with %i\n", __func__, ret);
- return ret;
- }
- ssfw.fw = fw;
-
- /* then verify the header */
- ret = -EINVAL;
- if (fw->size < sizeof(*ssfw_head))
- goto done;
-
- ssfw_head = (void *)fw->data;
- if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
- goto done;
-
- crc = crc32(0, fw->data, fw->size);
- pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != ssfw_head->crc)
- goto done;
-
- ssfw.pos = sizeof(*ssfw_head);
-
- /* finally process all of the actions */
- ret = process_sigma_actions(client, &ssfw);
-
- done:
- release_firmware(fw);
-
- pr_debug("%s: loaded %s\n", __func__, name);
-
- return ret;
-}
-EXPORT_SYMBOL(process_sigma_firmware);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8482a23887dc..d0c41188d4e5 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,7 +70,7 @@ config GPIO_GENERIC
config GPIO_DA9052
tristate "Dialog DA9052 GPIO"
- depends on PMIC_DA9052
+ depends on PMIC_DA9052 && BROKEN
help
Say yes here to enable the GPIO driver for the DA9052 chip.
@@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM
config GPIO_IT8761E
tristate "IT8761E GPIO support"
+ depends on X86 # unconditional access to IO space.
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
@@ -138,9 +139,16 @@ config GPIO_MXS
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
+ select GENERIC_IRQ_CHIP
help
Say yes here to support the PrimeCell PL061 GPIO device
+config GPIO_PXA
+ bool "PXA GPIO support"
+ depends on ARCH_PXA || ARCH_MMP
+ help
+ Say yes here to support the PXA GPIO device
+
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -170,15 +178,6 @@ config GPIO_SCH
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
-config GPIO_U300
- bool "ST-Ericsson U300 COH 901 335/571 GPIO"
- depends on GPIOLIB && ARCH_U300
- help
- Say yes here to support GPIO interface on ST-Ericsson U300.
- The names of the two IP block variants supported are
- COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
- ports of 8 GPIO pins each.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on PCI
@@ -356,7 +355,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
+ depends on PCI && X86 && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -387,7 +386,7 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
+ tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
depends on PCI && X86
select GENERIC_IRQ_CHIP
help
@@ -395,11 +394,12 @@ config GPIO_PCH
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7223 and ML7831.
ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
+ ML7831 IOH is for general purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8da..fa10df604c01 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,7 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIOLIB) += gpiolib.o
+obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
@@ -40,7 +40,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
-obj-$(CONFIG_PLAT_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
@@ -54,7 +54,6 @@ obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
-obj-$(CONFIG_MACH_U300) += gpio-u300.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
new file mode 100644
index 000000000000..3dd29399cef5
--- /dev/null
+++ b/drivers/gpio/devres.c
@@ -0,0 +1,90 @@
+/*
+ * drivers/gpio/devres.c - managed gpio resources
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This file is based on kernel/irq/devres.c
+ *
+ * Copyright (c) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+static void devm_gpio_release(struct device *dev, void *res)
+{
+ unsigned *gpio = res;
+
+ gpio_free(*gpio);
+}
+
+static int devm_gpio_match(struct device *dev, void *res, void *data)
+{
+ unsigned *this = res, *gpio = data;
+
+ return *this == *gpio;
+}
+
+/**
+ * devm_gpio_request - request a gpio for a managed device
+ * @dev: device to request the gpio for
+ * @gpio: gpio to allocate
+ * @label: the name of the requested gpio
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as
+ * gpio_request(). GPIOs requested with this function will be
+ * automatically freed on driver detach.
+ *
+ * If an GPIO allocated with this function needs to be freed
+ * separately, devm_gpio_free() must be used.
+ */
+
+int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
+{
+ unsigned *dr;
+ int rc;
+
+ dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = gpio_request(gpio, label);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = gpio;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_gpio_request);
+
+/**
+ * devm_gpio_free - free an interrupt
+ * @dev: device to free gpio for
+ * @gpio: gpio to free
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as gpio_free().
+ * This function instead of gpio_free() should be used to manually
+ * free GPIOs allocated with devm_gpio_request().
+ */
+void devm_gpio_free(struct device *dev, unsigned int gpio)
+{
+
+ WARN_ON(devres_destroy(dev, devm_gpio_release, devm_gpio_match,
+ &gpio));
+ gpio_free(gpio);
+}
+EXPORT_SYMBOL(devm_gpio_free);
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 9f2781537001..2f263cc32561 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -193,17 +193,7 @@ static struct platform_driver adp5520_gpio_driver = {
.remove = __devexit_p(adp5520_gpio_remove),
};
-static int __init adp5520_gpio_init(void)
-{
- return platform_driver_register(&adp5520_gpio_driver);
-}
-module_init(adp5520_gpio_init);
-
-static void __exit adp5520_gpio_exit(void)
-{
- platform_driver_unregister(&adp5520_gpio_driver);
-}
-module_exit(adp5520_gpio_exit);
+module_platform_driver(adp5520_gpio_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("GPIO ADP5520 Driver");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3525ad918771..9ad1703d1408 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -418,9 +418,8 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
if (ret)
goto err_irq;
- dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
- gc->base, gc->base + gc->ngpio - 1,
- pdata->irq_base, client->name, revid);
+ dev_info(&client->dev, "IRQ Base: %d Rev.: %d\n",
+ pdata->irq_base, revid);
if (pdata->setup) {
ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index ec57936aef62..5ca4098ba092 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -223,9 +223,6 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
goto err_release_mem;
}
- printk(KERN_INFO "bt8xxgpio: Abusing BT8xx card for GPIOs %d to %d\n",
- bg->gpio.base, bg->gpio.base + BT8XXGPIO_NR_GPIOS - 1);
-
return 0;
err_release_mem:
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6e16cba56ad2..19eda1bbe343 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -347,7 +347,6 @@ static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
if (err)
goto release_region;
- dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
return 0;
release_region:
@@ -382,18 +381,7 @@ static struct platform_driver cs5535_gpio_driver = {
.remove = __devexit_p(cs5535_gpio_remove),
};
-static int __init cs5535_gpio_init(void)
-{
- return platform_driver_register(&cs5535_gpio_driver);
-}
-
-static void __exit cs5535_gpio_exit(void)
-{
- platform_driver_unregister(&cs5535_gpio_driver);
-}
-
-module_init(cs5535_gpio_init);
-module_exit(cs5535_gpio_exit);
+module_platform_driver(cs5535_gpio_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13d..56dd047d5844 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
@@ -259,17 +254,7 @@ static struct platform_driver da9052_gpio_driver = {
},
};
-static int __init da9052_gpio_init(void)
-{
- return platform_driver_register(&da9052_gpio_driver);
-}
-module_init(da9052_gpio_init);
-
-static void __exit da9052_gpio_exit(void)
-{
- return platform_driver_unregister(&da9052_gpio_driver);
-}
-module_exit(da9052_gpio_exit);
+module_platform_driver(da9052_gpio_driver);
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
MODULE_DESCRIPTION("DA9052 GPIO Device Driver");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 4e24436b0f82..e38dd0c31973 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -524,17 +524,7 @@ static struct platform_driver bgpio_driver = {
.remove = __devexit_p(bgpio_pdev_remove),
};
-static int __init bgpio_platform_init(void)
-{
- return platform_driver_register(&bgpio_driver);
-}
-module_init(bgpio_platform_init);
-
-static void __exit bgpio_platform_exit(void)
-{
- platform_driver_unregister(&bgpio_driver);
-}
-module_exit(bgpio_platform_exit);
+module_platform_driver(bgpio_driver);
#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 813ac077e5d7..f2f000dd70b3 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -201,8 +201,6 @@ static int __devinit ttl_probe(struct platform_device *pdev)
goto out_iounmap_regs;
}
- dev_info(&pdev->dev, "module %d: registered GPIO device\n",
- pdata->modno);
return 0;
out_iounmap_regs:
@@ -239,20 +237,9 @@ static struct platform_driver ttl_driver = {
.remove = __devexit_p(ttl_remove),
};
-static int __init ttl_init(void)
-{
- return platform_driver_register(&ttl_driver);
-}
-
-static void __exit ttl_exit(void)
-{
- platform_driver_unregister(&ttl_driver);
-}
+module_platform_driver(ttl_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ttl");
-
-module_init(ttl_init);
-module_exit(ttl_exit);
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e73869250..03d6dd5dcb77 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
static int ioh_irq_type(struct irq_data *d, unsigned int type)
{
u32 im;
- u32 *im_reg;
+ void __iomem *im_reg;
u32 ien;
u32 im_pos;
int ch;
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
@@ -382,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
int i, j;
struct ioh_gpio *chip;
void __iomem *base;
- void __iomem *chip_save;
+ void *chip_save;
int irq_base;
ret = pci_enable_device(pdev);
@@ -398,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
}
base = pci_iomap(pdev, 1, 0);
- if (base == 0) {
+ if (!base) {
dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
ret = -ENOMEM;
goto err_iomap;
@@ -491,7 +521,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
int err;
int i;
struct ioh_gpio *chip = pci_get_drvdata(pdev);
- void __iomem *chip_save;
+ void *chip_save;
chip_save = chip;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e12..5cd04b65c556 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 1ebedfb6d46d..839624f9fe6a 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -1150,8 +1150,8 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
nmk_gpio_init_irq(nmk_chip);
- dev_info(&dev->dev, "Bits %i-%i at address %p\n",
- nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+ dev_info(&dev->dev, "at address %p\n",
+ nmk_chip->addr);
return 0;
out_free:
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 3e1f1ecd07be..2d1de9e7e9bd 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -290,10 +290,7 @@ static int pcf857x_probe(struct i2c_client *client,
* methods can't be called from sleeping contexts.
*/
- dev_info(&client->dev, "gpios %d..%d on a %s%s\n",
- gpio->chip.base,
- gpio->chip.base + gpio->chip.ngpio - 1,
- client->name,
+ dev_info(&client->dev, "%s\n",
client->irq ? " (irq ignored)" : "");
/* Let platform code set up the GPIOs and their users.
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index a6008e123d04..68fa55e86eb1 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -49,8 +49,8 @@ struct pch_regs {
enum pch_type_t {
INTEL_EG20T_PCH,
- OKISEMI_ML7223m_IOH, /* OKISEMI ML7223 IOH PCIe Bus-m */
- OKISEMI_ML7223n_IOH /* OKISEMI ML7223 IOH PCIe Bus-n */
+ OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
+ OKISEMI_ML7223n_IOH /* LAPIS Semiconductor ML7223 IOH PCIe Bus-n */
};
/* Specifies number of GPIO PINS */
@@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
static int pch_irq_type(struct irq_data *d, unsigned int type)
{
u32 im;
- u32 *im_reg;
+ u32 __iomem *im_reg;
u32 ien;
u32 im_pos;
int ch;
@@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
}
chip->base = pci_iomap(pdev, 1, 0);
- if (chip->base == 0) {
+ if (!chip->base) {
dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
ret = -ENOMEM;
goto err_iomap;
@@ -524,6 +524,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1d..77c9cc70fa77 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -12,7 +12,6 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/list.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@@ -23,6 +22,8 @@
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/slab.h>
+#include <linux/pm.h>
+#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
@@ -35,25 +36,33 @@
#define PL061_GPIO_NR 8
-struct pl061_gpio {
- /* We use a list of pl061_gpio structs for each trigger IRQ in the main
- * interrupts controller of the system. We need this to support systems
- * in which more that one PL061s are connected to the same IRQ. The ISR
- * interates through this list to find the source of the interrupt.
- */
- struct list_head list;
+#ifdef CONFIG_PM
+struct pl061_context_save_regs {
+ u8 gpio_data;
+ u8 gpio_dir;
+ u8 gpio_is;
+ u8 gpio_ibe;
+ u8 gpio_iev;
+ u8 gpio_ie;
+};
+#endif
+struct pl061_gpio {
/* Each of the two spinlocks protects a different set of hardware
* regiters and data structurs. This decouples the code of the IRQ from
* the GPIO code. This also makes the case of a GPIO routine call from
* the IRQ code simpler.
*/
spinlock_t lock; /* GPIO registers */
- spinlock_t irq_lock; /* IRQ registers */
void __iomem *base;
- unsigned irq_base;
+ int irq_base;
+ struct irq_chip_generic *irq_gc;
struct gpio_chip gc;
+
+#ifdef CONFIG_PM
+ struct pl061_context_save_regs csave_regs;
+#endif
};
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -118,46 +127,16 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- if (chip->irq_base == NO_IRQ)
+ if (chip->irq_base <= 0)
return -EINVAL;
return chip->irq_base + offset;
}
-/*
- * PL061 GPIO IRQ
- */
-static void pl061_irq_disable(struct irq_data *d)
-{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
- int offset = d->irq - chip->irq_base;
- unsigned long flags;
- u8 gpioie;
-
- spin_lock_irqsave(&chip->irq_lock, flags);
- gpioie = readb(chip->base + GPIOIE);
- gpioie &= ~(1 << offset);
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock_irqrestore(&chip->irq_lock, flags);
-}
-
-static void pl061_irq_enable(struct irq_data *d)
-{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
- int offset = d->irq - chip->irq_base;
- unsigned long flags;
- u8 gpioie;
-
- spin_lock_irqsave(&chip->irq_lock, flags);
- gpioie = readb(chip->base + GPIOIE);
- gpioie |= 1 << offset;
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock_irqrestore(&chip->irq_lock, flags);
-}
-
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct pl061_gpio *chip = gc->private;
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@@ -165,7 +144,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
- spin_lock_irqsave(&chip->irq_lock, flags);
+ raw_spin_lock_irqsave(&gc->lock, flags);
gpioiev = readb(chip->base + GPIOIEV);
@@ -194,53 +173,54 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
writeb(gpioiev, chip->base + GPIOIEV);
- spin_unlock_irqrestore(&chip->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&gc->lock, flags);
return 0;
}
-static struct irq_chip pl061_irqchip = {
- .name = "GPIO",
- .irq_enable = pl061_irq_enable,
- .irq_disable = pl061_irq_disable,
- .irq_set_type = pl061_irq_type,
-};
-
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct list_head *chip_list = irq_get_handler_data(irq);
- struct list_head *ptr;
- struct pl061_gpio *chip;
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- list_for_each(ptr, chip_list) {
- unsigned long pending;
- int offset;
-
- chip = list_entry(ptr, struct pl061_gpio, list);
- pending = readb(chip->base + GPIOMIS);
- writeb(pending, chip->base + GPIOIC);
+ unsigned long pending;
+ int offset;
+ struct pl061_gpio *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
- if (pending == 0)
- continue;
+ chained_irq_enter(irqchip, desc);
+ pending = readb(chip->base + GPIOMIS);
+ writeb(pending, chip->base + GPIOIC);
+ if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static void __init pl061_init_gc(struct pl061_gpio *chip, int irq_base)
+{
+ struct irq_chip_type *ct;
+
+ chip->irq_gc = irq_alloc_generic_chip("gpio-pl061", 1, irq_base,
+ chip->base, handle_simple_irq);
+ chip->irq_gc->private = chip;
+
+ ct = chip->irq_gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_set_type = pl061_irq_type;
+ ct->chip.irq_set_wake = irq_gc_set_wake;
+ ct->regs.mask = GPIOIE;
+
+ irq_setup_generic_chip(chip->irq_gc, IRQ_MSK(PL061_GPIO_NR),
+ IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
}
static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
- struct list_head *chip_list;
int ret, irq, i;
- static DECLARE_BITMAP(init_irq, NR_IRQS);
-
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -252,7 +232,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
chip->irq_base = pdata->irq_base;
} else if (dev->dev.of_node) {
chip->gc.base = -1;
- chip->irq_base = NO_IRQ;
+ chip->irq_base = 0;
} else {
ret = -ENODEV;
goto free_mem;
@@ -271,8 +251,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
}
spin_lock_init(&chip->lock);
- spin_lock_init(&chip->irq_lock);
- INIT_LIST_HEAD(&chip->list);
chip->gc.direction_input = pl061_direction_input;
chip->gc.direction_output = pl061_direction_output;
@@ -292,9 +270,11 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
* irq_chip support
*/
- if (chip->irq_base == NO_IRQ)
+ if (chip->irq_base <= 0)
return 0;
+ pl061_init_gc(chip, chip->irq_base);
+
writeb(0, chip->base + GPIOIE); /* disable irqs */
irq = dev->irq[0];
if (irq < 0) {
@@ -302,18 +282,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
goto iounmap;
}
irq_set_chained_handler(irq, pl061_irq_handler);
- if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
- chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
- if (chip_list == NULL) {
- clear_bit(irq, init_irq);
- ret = -ENOMEM;
- goto iounmap;
- }
- INIT_LIST_HEAD(chip_list);
- irq_set_handler_data(irq, chip_list);
- } else
- chip_list = irq_get_handler_data(irq);
- list_add(&chip->list, chip_list);
+ irq_set_handler_data(irq, chip);
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
@@ -323,13 +292,10 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
else
pl061_direction_input(&chip->gc, i);
}
-
- irq_set_chip_and_handler(i + chip->irq_base, &pl061_irqchip,
- handle_simple_irq);
- set_irq_flags(i+chip->irq_base, IRQF_VALID);
- irq_set_chip_data(i + chip->irq_base, chip);
}
+ amba_set_drvdata(dev, chip);
+
return 0;
iounmap:
@@ -342,6 +308,53 @@ free_mem:
return ret;
}
+#ifdef CONFIG_PM
+static int pl061_suspend(struct device *dev)
+{
+ struct pl061_gpio *chip = dev_get_drvdata(dev);
+ int offset;
+
+ chip->csave_regs.gpio_data = 0;
+ chip->csave_regs.gpio_dir = readb(chip->base + GPIODIR);
+ chip->csave_regs.gpio_is = readb(chip->base + GPIOIS);
+ chip->csave_regs.gpio_ibe = readb(chip->base + GPIOIBE);
+ chip->csave_regs.gpio_iev = readb(chip->base + GPIOIEV);
+ chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
+
+ for (offset = 0; offset < PL061_GPIO_NR; offset++) {
+ if (chip->csave_regs.gpio_dir & (1 << offset))
+ chip->csave_regs.gpio_data |=
+ pl061_get_value(&chip->gc, offset) << offset;
+ }
+
+ return 0;
+}
+
+static int pl061_resume(struct device *dev)
+{
+ struct pl061_gpio *chip = dev_get_drvdata(dev);
+ int offset;
+
+ for (offset = 0; offset < PL061_GPIO_NR; offset++) {
+ if (chip->csave_regs.gpio_dir & (1 << offset))
+ pl061_direction_output(&chip->gc, offset,
+ chip->csave_regs.gpio_data &
+ (1 << offset));
+ else
+ pl061_direction_input(&chip->gc, offset);
+ }
+
+ writeb(chip->csave_regs.gpio_is, chip->base + GPIOIS);
+ writeb(chip->csave_regs.gpio_ibe, chip->base + GPIOIBE);
+ writeb(chip->csave_regs.gpio_iev, chip->base + GPIOIEV);
+ writeb(chip->csave_regs.gpio_ie, chip->base + GPIOIE);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pl061_dev_pm_ops, pl061_suspend, pl061_resume);
+#endif
+
static struct amba_id pl061_ids[] = {
{
.id = 0x00041061,
@@ -350,9 +363,14 @@ static struct amba_id pl061_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl061_ids);
+
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
+#ifdef CONFIG_PM
+ .pm = &pl061_dev_pm_ops,
+#endif
},
.id_table = pl061_ids,
.probe = pl061_probe,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index ee137712f9db..b2d3ee1d183a 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -11,14 +11,46 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/slab.h>
-#include <mach/gpio-pxa.h>
+/*
+ * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
+ * one set of registers. The register offsets are organized below:
+ *
+ * GPLR GPDR GPSR GPCR GRER GFER GEDR
+ * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048
+ * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C
+ * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050
+ *
+ * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148
+ * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
+ * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
+ *
+ * NOTE:
+ * BANK 3 is only available on PXA27x and later processors.
+ * BANK 4 and 5 are only available on PXA935
+ */
+
+#define GPLR_OFFSET 0x00
+#define GPDR_OFFSET 0x0C
+#define GPSR_OFFSET 0x18
+#define GPCR_OFFSET 0x24
+#define GRER_OFFSET 0x30
+#define GFER_OFFSET 0x3C
+#define GEDR_OFFSET 0x48
+#define GAFR_OFFSET 0x54
+#define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
+
+#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
int pxa_last_gpio;
@@ -39,8 +71,20 @@ struct pxa_gpio_chip {
#endif
};
+enum {
+ PXA25X_GPIO = 0,
+ PXA26X_GPIO,
+ PXA27X_GPIO,
+ PXA3XX_GPIO,
+ PXA93X_GPIO,
+ MMP_GPIO = 0x10,
+ MMP2_GPIO,
+};
+
static DEFINE_SPINLOCK(gpio_lock);
static struct pxa_gpio_chip *pxa_gpio_chips;
+static int gpio_type;
+static void __iomem *gpio_reg_base;
#define for_each_gpio_chip(i, c) \
for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
@@ -55,6 +99,122 @@ static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
return &pxa_gpio_chips[gpio_to_bank(gpio)];
}
+static inline int gpio_is_pxa_type(int type)
+{
+ return (type & MMP_GPIO) == 0;
+}
+
+static inline int gpio_is_mmp_type(int type)
+{
+ return (type & MMP_GPIO) != 0;
+}
+
+/* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
+ * as well as their Alternate Function value being '1' for GPIO in GAFRx.
+ */
+static inline int __gpio_is_inverted(int gpio)
+{
+ if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
+ return 1;
+ return 0;
+}
+
+/*
+ * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
+ * function of a GPIO, and GPDRx cannot be altered once configured. It
+ * is attributed as "occupied" here (I know this terminology isn't
+ * accurate, you are welcome to propose a better one :-)
+ */
+static inline int __gpio_is_occupied(unsigned gpio)
+{
+ struct pxa_gpio_chip *pxachip;
+ void __iomem *base;
+ unsigned long gafr = 0, gpdr = 0;
+ int ret, af = 0, dir = 0;
+
+ pxachip = gpio_to_pxachip(gpio);
+ base = gpio_chip_base(&pxachip->chip);
+ gpdr = readl_relaxed(base + GPDR_OFFSET);
+
+ switch (gpio_type) {
+ case PXA25X_GPIO:
+ case PXA26X_GPIO:
+ case PXA27X_GPIO:
+ gafr = readl_relaxed(base + GAFR_OFFSET);
+ af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
+ dir = gpdr & GPIO_bit(gpio);
+
+ if (__gpio_is_inverted(gpio))
+ ret = (af != 1) || (dir == 0);
+ else
+ ret = (af != 0) || (dir != 0);
+ break;
+ default:
+ ret = gpdr & GPIO_bit(gpio);
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_ARCH_PXA
+static inline int __pxa_gpio_to_irq(int gpio)
+{
+ if (gpio_is_pxa_type(gpio_type))
+ return PXA_GPIO_TO_IRQ(gpio);
+ return -1;
+}
+
+static inline int __pxa_irq_to_gpio(int irq)
+{
+ if (gpio_is_pxa_type(gpio_type))
+ return irq - PXA_GPIO_TO_IRQ(0);
+ return -1;
+}
+#else
+static inline int __pxa_gpio_to_irq(int gpio) { return -1; }
+static inline int __pxa_irq_to_gpio(int irq) { return -1; }
+#endif
+
+#ifdef CONFIG_ARCH_MMP
+static inline int __mmp_gpio_to_irq(int gpio)
+{
+ if (gpio_is_mmp_type(gpio_type))
+ return MMP_GPIO_TO_IRQ(gpio);
+ return -1;
+}
+
+static inline int __mmp_irq_to_gpio(int irq)
+{
+ if (gpio_is_mmp_type(gpio_type))
+ return irq - MMP_GPIO_TO_IRQ(0);
+ return -1;
+}
+#else
+static inline int __mmp_gpio_to_irq(int gpio) { return -1; }
+static inline int __mmp_irq_to_gpio(int irq) { return -1; }
+#endif
+
+static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio, ret;
+
+ gpio = chip->base + offset;
+ ret = __pxa_gpio_to_irq(gpio);
+ if (ret >= 0)
+ return ret;
+ return __mmp_gpio_to_irq(gpio);
+}
+
+int pxa_irq_to_gpio(int irq)
+{
+ int ret;
+
+ ret = __pxa_irq_to_gpio(irq);
+ if (ret >= 0)
+ return ret;
+ return __mmp_irq_to_gpio(irq);
+}
+
static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
void __iomem *base = gpio_chip_base(chip);
@@ -63,12 +223,12 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&gpio_lock, flags);
- value = __raw_readl(base + GPDR_OFFSET);
+ value = readl_relaxed(base + GPDR_OFFSET);
if (__gpio_is_inverted(chip->base + offset))
value |= mask;
else
value &= ~mask;
- __raw_writel(value, base + GPDR_OFFSET);
+ writel_relaxed(value, base + GPDR_OFFSET);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
@@ -81,16 +241,16 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
uint32_t tmp, mask = 1 << offset;
unsigned long flags;
- __raw_writel(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+ writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
spin_lock_irqsave(&gpio_lock, flags);
- tmp = __raw_readl(base + GPDR_OFFSET);
+ tmp = readl_relaxed(base + GPDR_OFFSET);
if (__gpio_is_inverted(chip->base + offset))
tmp &= ~mask;
else
tmp |= mask;
- __raw_writel(tmp, base + GPDR_OFFSET);
+ writel_relaxed(tmp, base + GPDR_OFFSET);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
@@ -98,16 +258,16 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return __raw_readl(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
+ return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
}
static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- __raw_writel(1 << offset, gpio_chip_base(chip) +
+ writel_relaxed(1 << offset, gpio_chip_base(chip) +
(value ? GPSR_OFFSET : GPCR_OFFSET));
}
-static int __init pxa_init_gpio_chip(int gpio_end)
+static int __devinit pxa_init_gpio_chip(int gpio_end)
{
int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
struct pxa_gpio_chip *chips;
@@ -122,7 +282,7 @@ static int __init pxa_init_gpio_chip(int gpio_end)
struct gpio_chip *c = &chips[i].chip;
sprintf(chips[i].label, "gpio-%d", i);
- chips[i].regbase = GPIO_BANK(i);
+ chips[i].regbase = gpio_reg_base + BANK_OFF(i);
c->base = gpio;
c->label = chips[i].label;
@@ -131,6 +291,7 @@ static int __init pxa_init_gpio_chip(int gpio_end)
c->direction_output = pxa_gpio_direction_output;
c->get = pxa_gpio_get;
c->set = pxa_gpio_set;
+ c->to_irq = pxa_gpio_to_irq;
/* number of GPIOs on last bank may be less than 32 */
c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -147,18 +308,18 @@ static inline void update_edge_detect(struct pxa_gpio_chip *c)
{
uint32_t grer, gfer;
- grer = __raw_readl(c->regbase + GRER_OFFSET) & ~c->irq_mask;
- gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~c->irq_mask;
+ grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
+ gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
grer |= c->irq_edge_rise & c->irq_mask;
gfer |= c->irq_edge_fall & c->irq_mask;
- __raw_writel(grer, c->regbase + GRER_OFFSET);
- __raw_writel(gfer, c->regbase + GFER_OFFSET);
+ writel_relaxed(grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct pxa_gpio_chip *c;
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
unsigned long gpdr, mask = GPIO_bit(gpio);
c = gpio_to_pxachip(gpio);
@@ -176,12 +337,12 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
- gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
+ gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
if (__gpio_is_inverted(gpio))
- __raw_writel(gpdr | mask, c->regbase + GPDR_OFFSET);
+ writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET);
else
- __raw_writel(gpdr & ~mask, c->regbase + GPDR_OFFSET);
+ writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
if (type & IRQ_TYPE_EDGE_RISING)
c->irq_edge_rise |= mask;
@@ -212,9 +373,9 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
for_each_gpio_chip(gpio, c) {
gpio_base = c->chip.base;
- gedr = __raw_readl(c->regbase + GEDR_OFFSET);
+ gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
gedr = gedr & c->irq_mask;
- __raw_writel(gedr, c->regbase + GEDR_OFFSET);
+ writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
n = find_first_bit(&gedr, BITS_PER_LONG);
while (n < BITS_PER_LONG) {
@@ -229,29 +390,29 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
static void pxa_ack_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
- __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
+ writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
}
static void pxa_mask_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
uint32_t grer, gfer;
c->irq_mask &= ~GPIO_bit(gpio);
- grer = __raw_readl(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
- gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
- __raw_writel(grer, c->regbase + GRER_OFFSET);
- __raw_writel(gfer, c->regbase + GFER_OFFSET);
+ grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
+ gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
+ writel_relaxed(grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
static void pxa_unmask_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
c->irq_mask |= GPIO_bit(gpio);
@@ -266,34 +427,143 @@ static struct irq_chip pxa_muxed_gpio_chip = {
.irq_set_type = pxa_gpio_irq_type,
};
-void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
+static int pxa_gpio_nums(void)
{
- struct pxa_gpio_chip *c;
- int gpio, irq;
+ int count = 0;
+
+#ifdef CONFIG_ARCH_PXA
+ if (cpu_is_pxa25x()) {
+#ifdef CONFIG_CPU_PXA26x
+ count = 89;
+ gpio_type = PXA26X_GPIO;
+#elif defined(CONFIG_PXA25x)
+ count = 84;
+ gpio_type = PXA26X_GPIO;
+#endif /* CONFIG_CPU_PXA26x */
+ } else if (cpu_is_pxa27x()) {
+ count = 120;
+ gpio_type = PXA27X_GPIO;
+ } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
+ count = 191;
+ gpio_type = PXA93X_GPIO;
+ } else if (cpu_is_pxa3xx()) {
+ count = 127;
+ gpio_type = PXA3XX_GPIO;
+ }
+#endif /* CONFIG_ARCH_PXA */
+
+#ifdef CONFIG_ARCH_MMP
+ if (cpu_is_pxa168() || cpu_is_pxa910()) {
+ count = 127;
+ gpio_type = MMP_GPIO;
+ } else if (cpu_is_mmp2()) {
+ count = 191;
+ gpio_type = MMP2_GPIO;
+ }
+#endif /* CONFIG_ARCH_MMP */
+ return count;
+}
- pxa_last_gpio = end;
+static int __devinit pxa_gpio_probe(struct platform_device *pdev)
+{
+ struct pxa_gpio_chip *c;
+ struct resource *res;
+ struct clk *clk;
+ int gpio, irq, ret;
+ int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
+
+ pxa_last_gpio = pxa_gpio_nums();
+ if (!pxa_last_gpio)
+ return -EINVAL;
+
+ irq0 = platform_get_irq_byname(pdev, "gpio0");
+ irq1 = platform_get_irq_byname(pdev, "gpio1");
+ irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
+ if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
+ || (irq_mux <= 0))
+ return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ gpio_reg_base = ioremap(res->start, resource_size(res));
+ if (!gpio_reg_base)
+ return -EINVAL;
+
+ if (irq0 > 0)
+ gpio_offset = 2;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
+ PTR_ERR(clk));
+ iounmap(gpio_reg_base);
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare(clk);
+ if (ret) {
+ clk_put(clk);
+ iounmap(gpio_reg_base);
+ return ret;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ clk_unprepare(clk);
+ clk_put(clk);
+ iounmap(gpio_reg_base);
+ return ret;
+ }
/* Initialize GPIO chips */
- pxa_init_gpio_chip(end);
+ pxa_init_gpio_chip(pxa_last_gpio);
/* clear all GPIO edge detects */
for_each_gpio_chip(gpio, c) {
- __raw_writel(0, c->regbase + GFER_OFFSET);
- __raw_writel(0, c->regbase + GRER_OFFSET);
- __raw_writel(~0,c->regbase + GEDR_OFFSET);
+ writel_relaxed(0, c->regbase + GFER_OFFSET);
+ writel_relaxed(0, c->regbase + GRER_OFFSET);
+ writel_relaxed(~0,c->regbase + GEDR_OFFSET);
+ /* unmask GPIO edge detect for AP side */
+ if (gpio_is_mmp_type(gpio_type))
+ writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
}
- for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
+#ifdef CONFIG_ARCH_PXA
+ irq = gpio_to_irq(0);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
+
+ irq = gpio_to_irq(1);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
+#endif
+
+ for (irq = gpio_to_irq(gpio_offset);
+ irq <= gpio_to_irq(pxa_last_gpio); irq++) {
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- /* Install handler for GPIO>=2 edge detect interrupts */
- irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
- pxa_muxed_gpio_chip.irq_set_wake = fn;
+ irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
+ return 0;
}
+static struct platform_driver pxa_gpio_driver = {
+ .probe = pxa_gpio_probe,
+ .driver = {
+ .name = "pxa-gpio",
+ },
+};
+
+static int __init pxa_gpio_init(void)
+{
+ return platform_driver_register(&pxa_gpio_driver);
+}
+postcore_initcall(pxa_gpio_init);
+
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
{
@@ -301,13 +571,13 @@ static int pxa_gpio_suspend(void)
int gpio;
for_each_gpio_chip(gpio, c) {
- c->saved_gplr = __raw_readl(c->regbase + GPLR_OFFSET);
- c->saved_gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
- c->saved_grer = __raw_readl(c->regbase + GRER_OFFSET);
- c->saved_gfer = __raw_readl(c->regbase + GFER_OFFSET);
+ c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
+ c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
+ c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
+ c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
/* Clear GPIO transition detect bits */
- __raw_writel(0xffffffff, c->regbase + GEDR_OFFSET);
+ writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
}
return 0;
}
@@ -319,12 +589,12 @@ static void pxa_gpio_resume(void)
for_each_gpio_chip(gpio, c) {
/* restore level with set/clear */
- __raw_writel( c->saved_gplr, c->regbase + GPSR_OFFSET);
- __raw_writel(~c->saved_gplr, c->regbase + GPCR_OFFSET);
+ writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET);
+ writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
- __raw_writel(c->saved_grer, c->regbase + GRER_OFFSET);
- __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
- __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
+ writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
+ writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
}
}
#else
@@ -336,3 +606,10 @@ struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
+
+static int __init pxa_gpio_sysinit(void)
+{
+ register_syscore_ops(&pxa_gpio_syscore_ops);
+ return 0;
+}
+postcore_initcall(pxa_gpio_sysinit);
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 2762698e0204..e97016af6443 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -227,18 +227,7 @@ static struct platform_driver rdc321x_gpio_driver = {
.remove = __devexit_p(rdc321x_gpio_remove),
};
-static int __init rdc321x_gpio_init(void)
-{
- return platform_driver_register(&rdc321x_gpio_driver);
-}
-
-static void __exit rdc321x_gpio_exit(void)
-{
- platform_driver_unregister(&rdc321x_gpio_driver);
-}
-
-module_init(rdc321x_gpio_init);
-module_exit(rdc321x_gpio_exit);
+module_platform_driver(rdc321x_gpio_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x GPIO driver");
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index b6c1f6d80649..7eecf69362ee 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -47,12 +47,18 @@ static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int
return 0;
}
+static int sa1100_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return offset < 11 ? (IRQ_GPIO0 + offset) : (IRQ_GPIO11 - 11 + offset);
+}
+
static struct gpio_chip sa1100_gpio_chip = {
.label = "gpio",
.direction_input = sa1100_direction_input,
.direction_output = sa1100_direction_output,
.set = sa1100_gpio_set,
.get = sa1100_gpio_get,
+ .to_irq = sa1100_to_irq,
.base = 0,
.ngpio = GPIO_MAX + 1,
};
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 866251852719..a7661773c052 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -22,8 +22,11 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
#include <asm/irq.h>
@@ -230,7 +233,7 @@ static int samsung_gpio_setcfg_2bit(struct samsung_gpio_chip *chip,
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
*
- * The reverse of samsung_gpio_setcfg_2bit(). Will return a value whicg
+ * The reverse of samsung_gpio_setcfg_2bit(). Will return a value which
* could be directly passed back to samsung_gpio_setcfg_2bit(), from the
* S3C_GPIO_SPECIAL() macro.
*/
@@ -467,33 +470,42 @@ static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
#endif
static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
- {
+ [0] = {
.cfg_eint = 0x0,
- }, {
+ },
+ [1] = {
.cfg_eint = 0x3,
- }, {
+ },
+ [2] = {
.cfg_eint = 0x7,
- }, {
+ },
+ [3] = {
.cfg_eint = 0xF,
- }, {
+ },
+ [4] = {
.cfg_eint = 0x0,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [5] = {
.cfg_eint = 0x2,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [6] = {
.cfg_eint = 0x3,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [7] = {
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [8] = {
.set_pull = exynos4_gpio_setpull,
.get_pull = exynos4_gpio_getpull,
- }, {
+ },
+ [9] = {
.cfg_eint = 0x3,
.set_pull = exynos4_gpio_setpull,
.get_pull = exynos4_gpio_getpull,
@@ -2374,6 +2386,63 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
#endif
};
+#if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF)
+static int exynos4_gpio_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags)
+{
+ const __be32 *gpio = gpio_spec;
+ const u32 n = be32_to_cpup(gpio);
+ unsigned int pin = gc->base + be32_to_cpu(gpio[0]);
+
+ if (WARN_ON(gc->of_gpio_n_cells < 4))
+ return -EINVAL;
+
+ if (n > gc->ngpio)
+ return -EINVAL;
+
+ if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(be32_to_cpu(gpio[1]))))
+ pr_warn("gpio_xlate: failed to set pin function\n");
+ if (s3c_gpio_setpull(pin, be32_to_cpu(gpio[2])))
+ pr_warn("gpio_xlate: failed to set pin pull up/down\n");
+ if (s5p_gpio_set_drvstr(pin, be32_to_cpu(gpio[3])))
+ pr_warn("gpio_xlate: failed to set pin drive strength\n");
+
+ return n;
+}
+
+static const struct of_device_id exynos4_gpio_dt_match[] __initdata = {
+ { .compatible = "samsung,exynos4-gpio", },
+ {}
+};
+
+static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
+{
+ struct gpio_chip *gc = &chip->chip;
+ u64 address;
+
+ if (!of_have_populated_dt())
+ return;
+
+ address = chip->base ? base + ((u32)chip->base & 0xfff) : base + offset;
+ gc->of_node = of_find_matching_node_by_address(NULL,
+ exynos4_gpio_dt_match, address);
+ if (!gc->of_node) {
+ pr_info("gpio: device tree node not found for gpio controller"
+ " with base address %08llx\n", address);
+ return;
+ }
+ gc->of_gpio_n_cells = 4;
+ gc->of_xlate = exynos4_gpio_xlate;
+}
+#elif defined(CONFIG_ARCH_EXYNOS4)
+static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
+{
+ return;
+}
+#endif /* defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF) */
+
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
@@ -2455,6 +2524,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO1, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_1, nr_chips, S5P_VA_GPIO1);
@@ -2467,6 +2540,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO2, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_2, nr_chips, S5P_VA_GPIO2);
@@ -2479,6 +2556,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO3, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_3, nr_chips, S5P_VA_GPIO3);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 163515845494..8cadf4d683a8 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -297,18 +297,7 @@ static struct platform_driver sch_gpio_driver = {
.remove = __devexit_p(sch_gpio_remove),
};
-static int __init sch_gpio_init(void)
-{
- return platform_driver_register(&sch_gpio_driver);
-}
-
-static void __exit sch_gpio_exit(void)
-{
- platform_driver_unregister(&sch_gpio_driver);
-}
-
-module_init(sch_gpio_init);
-module_exit(sch_gpio_exit);
+module_platform_driver(sch_gpio_driver);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("GPIO interface for Intel Poulsbo SCH");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 4c980b573328..87a68a896abf 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -65,7 +65,14 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
u8 reg = stmpe->regs[which] - (offset / 8);
u8 mask = 1 << (offset % 8);
- stmpe_reg_write(stmpe, reg, mask);
+ /*
+ * Some variants have single register for gpio set/clear functionality.
+ * For them we need to write 0 to clear and 1 to set.
+ */
+ if (stmpe->regs[STMPE_IDX_GPSR_LSB] == stmpe->regs[STMPE_IDX_GPCR_LSB])
+ stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
+ else
+ stmpe_reg_write(stmpe, reg, mask);
}
static int stmpe_gpio_direction_output(struct gpio_chip *chip,
@@ -132,6 +139,10 @@ static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
return -EINVAL;
+ /* STMPE801 doesn't have RE and FE registers */
+ if (stmpe_gpio->stmpe->partnum == STMPE801)
+ return 0;
+
if (type == IRQ_TYPE_EDGE_RISING)
stmpe_gpio->regs[REG_RE][regoffset] |= mask;
else
@@ -165,6 +176,11 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
int i, j;
for (i = 0; i < CACHE_NR_REGS; i++) {
+ /* STMPE801 doesn't have RE and FE registers */
+ if ((stmpe->partnum == STMPE801) &&
+ (i != REG_IE))
+ continue;
+
for (j = 0; j < num_banks; j++) {
u8 old = stmpe_gpio->oldregs[i][j];
u8 new = stmpe_gpio->regs[i][j];
@@ -241,8 +257,11 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
}
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
- stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB] + i,
- status[i]);
+
+ /* Edge detect register is not present on 801 */
+ if (stmpe->partnum != STMPE801)
+ stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB]
+ + i, status[i]);
}
return IRQ_HANDLED;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 61044c889f7f..bdc293791590 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -361,14 +361,7 @@ static int __devinit tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "Couldn't request MEM resource\n");
- return -ENODEV;
- }
-
- regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ regs = devm_request_and_ioremap(&pdev->dev, res);
if (!regs) {
dev_err(&pdev->dev, "Couldn't ioremap regs\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index c593bd46bfb6..031c6adf5b65 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -359,18 +359,7 @@ static struct platform_driver timbgpio_platform_driver = {
/*--------------------------------------------------------------------------*/
-static int __init timbgpio_init(void)
-{
- return platform_driver_register(&timbgpio_platform_driver);
-}
-
-static void __exit timbgpio_exit(void)
-{
- platform_driver_unregister(&timbgpio_platform_driver);
-}
-
-module_init(timbgpio_init);
-module_exit(timbgpio_exit);
+module_platform_driver(timbgpio_platform_driver);
MODULE_DESCRIPTION("Timberdale GPIO driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index b9c1c297669e..91f45b965d1e 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
/* Set the initial value */
- tps65910_gpio_set(gc, 0, value);
+ tps65910_gpio_set(gc, offset, value);
return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 50e6bd1392ce..26405efe0f9f 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -103,23 +103,12 @@ static struct platform_driver ucb1400_gpio_driver = {
},
};
-static int __init ucb1400_gpio_init(void)
-{
- return platform_driver_register(&ucb1400_gpio_driver);
-}
-
-static void __exit ucb1400_gpio_exit(void)
-{
- platform_driver_unregister(&ucb1400_gpio_driver);
-}
-
void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data)
{
ucbdata = data;
}
-module_init(ucb1400_gpio_init);
-module_exit(ucb1400_gpio_exit);
+module_platform_driver(ucb1400_gpio_driver);
MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 98723cb9ac68..82d5c20ad3cb 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -571,15 +571,4 @@ static struct platform_driver giu_device_driver = {
},
};
-static int __init vr41xx_giu_init(void)
-{
- return platform_driver_register(&giu_device_driver);
-}
-
-static void __exit vr41xx_giu_exit(void)
-{
- platform_driver_unregister(&giu_device_driver);
-}
-
-module_init(vr41xx_giu_init);
-module_exit(vr41xx_giu_exit);
+module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index ef5aabd8b8b7..76ebfe5ff702 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -315,17 +315,7 @@ static struct platform_driver vx855gpio_driver = {
.remove = __devexit_p(vx855gpio_remove),
};
-static int vx855gpio_init(void)
-{
- return platform_driver_register(&vx855gpio_driver);
-}
-module_init(vx855gpio_init);
-
-static void vx855gpio_exit(void)
-{
- platform_driver_unregister(&vx855gpio_driver);
-}
-module_exit(vx855gpio_exit);
+module_platform_driver(vx855gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 96198f3fab73..92ea5350dfe9 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -117,6 +117,60 @@ static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
#ifdef CONFIG_DEBUG_FS
+static const char *wm8994_gpio_fn(u16 fn)
+{
+ switch (fn) {
+ case WM8994_GP_FN_PIN_SPECIFIC:
+ return "pin-specific";
+ case WM8994_GP_FN_GPIO:
+ return "GPIO";
+ case WM8994_GP_FN_SDOUT:
+ return "SDOUT";
+ case WM8994_GP_FN_IRQ:
+ return "IRQ";
+ case WM8994_GP_FN_TEMPERATURE:
+ return "Temperature";
+ case WM8994_GP_FN_MICBIAS1_DET:
+ return "MICBIAS1 detect";
+ case WM8994_GP_FN_MICBIAS1_SHORT:
+ return "MICBIAS1 short";
+ case WM8994_GP_FN_MICBIAS2_DET:
+ return "MICBIAS2 detect";
+ case WM8994_GP_FN_MICBIAS2_SHORT:
+ return "MICBIAS2 short";
+ case WM8994_GP_FN_FLL1_LOCK:
+ return "FLL1 lock";
+ case WM8994_GP_FN_FLL2_LOCK:
+ return "FLL2 lock";
+ case WM8994_GP_FN_SRC1_LOCK:
+ return "SRC1 lock";
+ case WM8994_GP_FN_SRC2_LOCK:
+ return "SRC2 lock";
+ case WM8994_GP_FN_DRC1_ACT:
+ return "DRC1 activity";
+ case WM8994_GP_FN_DRC2_ACT:
+ return "DRC2 activity";
+ case WM8994_GP_FN_DRC3_ACT:
+ return "DRC3 activity";
+ case WM8994_GP_FN_WSEQ_STATUS:
+ return "Write sequencer";
+ case WM8994_GP_FN_FIFO_ERROR:
+ return "FIFO error";
+ case WM8994_GP_FN_OPCLK:
+ return "OPCLK";
+ case WM8994_GP_FN_THW:
+ return "Thermal warning";
+ case WM8994_GP_FN_DCS_DONE:
+ return "DC servo";
+ case WM8994_GP_FN_FLL1_OUT:
+ return "FLL1 output";
+ case WM8994_GP_FN_FLL2_OUT:
+ return "FLL1 output";
+ default:
+ return "Unknown";
+ }
+}
+
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -148,8 +202,29 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- /* No decode yet; note that GPIO2 is special */
- seq_printf(s, "(%x)\n", reg);
+ if (reg & WM8994_GPN_DIR)
+ seq_printf(s, "in ");
+ else
+ seq_printf(s, "out ");
+
+ if (reg & WM8994_GPN_PU)
+ seq_printf(s, "pull up ");
+
+ if (reg & WM8994_GPN_PD)
+ seq_printf(s, "pull down ");
+
+ if (reg & WM8994_GPN_POL)
+ seq_printf(s, "inverted ");
+ else
+ seq_printf(s, "noninverted ");
+
+ if (reg & WM8994_GPN_OP_CFG)
+ seq_printf(s, "open drain ");
+ else
+ seq_printf(s, "CMOS ");
+
+ seq_printf(s, "%s (%x)\n",
+ wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
}
}
#else
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 0ce6ac9898b1..79b0fe8a7253 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -206,7 +206,6 @@ static int __devinit xgpio_of_probe(struct device_node *np)
np->full_name, status);
return status;
}
- pr_info("XGpio: %s: registered\n", np->full_name);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a971e3d043ba..17fdf4b6af93 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -114,7 +114,7 @@ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpio_desc[gpio].chip;
}
@@ -1089,6 +1089,10 @@ unlock:
if (status)
goto fail;
+ pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ chip->base, chip->base + chip->ngpio - 1,
+ chip->label ? : "generic");
+
return 0;
fail:
/* failures here can mean systems won't boot... */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1368826ef284..2418429a9836 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -162,3 +162,6 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c0496f660707..0cde1b80fdb1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_usb.o
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 6d440fb894cf..325365f6d355 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
break;
}
}
+
+ mutex_unlock(&dev->struct_mutex);
+
if (request->handle == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8323fc389840..5e818a808ace 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -36,6 +36,7 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
+#include "drm_fourcc.h"
struct drm_prop_enum_list {
int type;
@@ -324,6 +325,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
+ struct drm_plane *plane;
struct drm_mode_set set;
int ret;
@@ -340,6 +342,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
}
}
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
+ }
+
drm_mode_object_put(dev, &fb->base);
list_del(&fb->head);
dev->mode_config.num_fb--;
@@ -540,6 +554,63 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ mutex_lock(&dev->mode_config.mutex);
+
+ plane->dev = dev;
+ drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ plane->funcs = funcs;
+ plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ mutex_unlock(&dev->mode_config.mutex);
+ return -ENOMEM;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ kfree(plane->format_types);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
@@ -871,6 +942,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
mutex_lock(&dev->mode_config.mutex);
@@ -947,6 +1019,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
+ struct drm_plane *plane, *plt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
@@ -971,6 +1044,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
@@ -1379,7 +1456,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
@@ -1394,8 +1471,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (put_user(connector->property_ids[i],
@@ -1417,7 +1494,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
@@ -1471,6 +1548,245 @@ out:
}
/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, &config->plane_list, head) {
+ if (put_user(plane->base.id, plane_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = plane->gamma_size;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (copy_to_user(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ obj = drm_mode_object_find(dev, plane_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
@@ -1576,7 +1892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
@@ -1625,10 +1941,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
+ if (!req->flags)
return -EINVAL;
- }
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1955,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
ret = -ENXIO;
goto out;
}
@@ -1654,7 +1967,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
- DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
@@ -1664,6 +1976,42 @@ out:
return ret;
}
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_RGB332;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
@@ -1684,7 +2032,140 @@ out:
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return -EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* TODO check buffer is sufficiently large */
+ /* TODO setup destructor callback */
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+ if (IS_ERR(fb)) {
+ DRM_ERROR("could not create framebuffer\n");
+ ret = PTR_ERR(fb);
+ goto out;
+ }
+
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static int format_check(struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
@@ -1693,18 +2174,23 @@ int drm_mode_addfb(struct drm_device *dev,
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
+ DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
+ DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
+ ret = format_check(r);
+ if (ret) {
+ DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
- /* TODO check buffer is sufficiently large */
- /* TODO setup destructor callback */
+ mutex_lock(&dev->mode_config.mutex);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
@@ -1756,7 +2242,6 @@ int drm_mode_rmfb(struct drm_device *dev,
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
@@ -1767,7 +2252,6 @@ int drm_mode_rmfb(struct drm_device *dev,
found = 1;
if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
ret = -EINVAL;
goto out;
}
@@ -1814,7 +2298,6 @@ int drm_mode_getfb(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
@@ -1824,7 +2307,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
+ r->pitch = fb->pitches[0];
fb->funcs->create_handle(fb, file_priv, &r->handle);
out:
@@ -1850,14 +2333,13 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
fb = obj_to_fb(obj);
num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
@@ -2253,7 +2735,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
+ uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
@@ -2283,7 +2765,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->flags = property->flags;
if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
@@ -2296,7 +2778,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (property->flags & DRM_MODE_PROP_ENUM) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2318,8 +2800,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (property->flags & DRM_MODE_PROP_BLOB) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+ blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2380,7 +2862,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void *blob_ptr;
+ void __user *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2394,7 +2876,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
@@ -2788,3 +3270,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3969f7553fe7..84a4a809793f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_fourcc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
@@ -456,6 +457,30 @@ done:
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
@@ -510,8 +535,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- set->mode = NULL;
- set->num_connectors = 0;
+ return drm_crtc_helper_disable(set->crtc);
}
dev = set->crtc->dev;
@@ -687,7 +711,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
@@ -824,13 +848,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
EXPORT_SYMBOL(drm_helper_connector_dpms);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
+ int i;
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
- fb->pitch = mode_cmd->pitch;
- fb->bits_per_pixel = mode_cmd->bpp;
- fb->depth = mode_cmd->depth;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
return 0;
}
@@ -985,3 +1015,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 40c187c60f44..ebf7d3f68fc4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -136,8 +136,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
@@ -150,6 +153,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3e927ce7557d..ece03fc2d386 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -508,25 +508,10 @@ static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
- u8 rev = ext[0x01], d = ext[0x02];
+ u8 d = ext[0x02];
u8 *det_base = ext + d;
- switch (rev) {
- case 0:
- /* can't happen */
- return;
- case 1:
- /* have to infer how many blocks we have, check pixel clock */
- for (i = 0; i < 6; i++)
- if (det_base[18*i] || det_base[18*i+1])
- n++;
- break;
- default:
- /* explicit count */
- n = min(ext[0x03] & 0x0f, 6);
- break;
- }
-
+ n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
@@ -1319,6 +1304,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
@@ -1349,6 +1335,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ u8 * mode, cea_mode;
+ int modes = 0;
+
+ for (mode = db; mode < db + len; mode++) {
+ cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+ if (cea_mode < drm_num_cea_modes) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev,
+ &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ u8 * cea = drm_find_cea_extension(edid);
+ u8 * db, dbl;
+ int modes = 0;
+
+ if (cea && cea[1] >= 3) {
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+ if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+ modes += do_cea_modes (connector, db+1, dbl);
+ }
+ }
+
+ return modes;
+}
+
static void
parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
{
@@ -1432,26 +1459,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
-
- switch ((db[0] & 0xe0) >> 5) {
- case AUDIO_BLOCK: /* Audio Data Block, contains SADs */
- sad_count = dbl / 3;
- memcpy(eld + 20 + mnl, &db[1], dbl);
- break;
- case SPEAKER_BLOCK: /* Speaker Allocation Data Block */
- eld[7] = db[1];
- break;
- case VENDOR_BLOCK:
- /* HDMI Vendor-Specific Data Block */
- if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
- parse_hdmi_vsdb(connector, db);
- break;
- default:
- break;
+ if (cea[1] >= 3)
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+
+ switch ((db[0] & 0xe0) >> 5) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
}
- }
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
@@ -1722,6 +1752,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 5f2064489fd5..a91ffb117220 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -378,3 +378,287 @@ static const struct {
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x480i@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x480i@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576i@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x576i@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480i@120Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@120Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@200Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@200Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480i@240 */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@240 */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes =
+ sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 80fe39d98b0c..aada26f63dec 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
+ /*
+ * It's a waste of time and effort to switch back to text console
+ * if the kernel should reboot before panic messages can be seen.
+ */
+ if (panic_timeout < 0)
+ return 0;
+
printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4911e1d1dcf2..c00cf154cc0b 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -182,7 +182,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
goto out;
old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
+ filp->f_op = fops_get(dev->driver->fops);
if (filp->f_op == NULL) {
filp->f_op = old_fops;
goto out;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 904d7e9c8e47..956fd38d7c9e 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -158,14 +158,11 @@ int drm_getmap(struct drm_device *dev, void *data,
int i;
idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
+ if (idx < 0)
return -EINVAL;
- }
i = 0;
+ mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
@@ -211,9 +208,9 @@ int drm_getclient(struct drm_device *dev, void *data,
int i;
idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
i = 0;
+
+ mutex_lock(&dev->struct_mutex);
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx) {
client->auth = pt->authenticated;
@@ -249,8 +246,6 @@ int drm_getstats(struct drm_device *dev, void *data,
memset(stats, 0, sizeof(*stats));
- mutex_lock(&dev->struct_mutex);
-
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
@@ -262,8 +257,6 @@ int drm_getstats(struct drm_device *dev, void *data,
stats->count = dev->counters;
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 632ae243ede0..c79c713eeba0 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -33,6 +33,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include "drmP.h"
static int drm_notifier(void *priv);
@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644
index cebce45f4429..000000000000
--- a/drivers/gpu/drm/drm_sman.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/export.h>
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0f9ef9bf6730..62c3675045ac 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -72,7 +72,7 @@ static int drm_class_resume(struct device *dev)
return 0;
}
-static char *drm_devnode(struct device *dev, mode_t *mode)
+static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 847466aab435..f9aaa56eae07 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -18,3 +18,10 @@ config DRM_EXYNOS_FIMD
help
Choose this option if you want to use Exynos FIMD for DRM.
If M is selected, the module will be called exynos_drm_fimd
+
+config DRM_EXYNOS_HDMI
+ tristate "Exynos DRM HDMI"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos HDMI for DRM.
+ If M is selected, the module will be called exynos_drm_hdmi
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 0496d3ff2683..395e69c9a96e 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -5,7 +5,10 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
- exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+ exynos_drm_plane.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
+ exynos_hdmiphy.o exynos_drm_hdmi.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644
index 000000000000..84b614fe26fd
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+static int s5p_ddc_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ hdmi_attach_ddc_client(client);
+
+ dev_info(&client->adapter->dev, "attached s5p_ddc "
+ "into i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_ddc "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static struct i2c_device_id ddc_idtable[] = {
+ {"s5p_ddc", 0},
+ { },
+};
+
+struct i2c_driver ddc_driver = {
+ .driver = {
+ .name = "s5p_ddc",
+ .owner = THIS_MODULE,
+ },
+ .id_table = ddc_idtable,
+ .probe = s5p_ddc_probe,
+ .remove = __devexit_p(s5p_ddc_remove),
+ .command = NULL,
+};
+EXPORT_SYMBOL(ddc_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc9..3cf785c58186 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,83 @@
#include "drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
- (dma_addr_t *)&entry->paddr, GFP_KERNEL);
- if (!entry->paddr) {
+ buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
- (unsigned int)entry->vaddr, entry->paddr, entry->size);
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr,
+ buffer->size);
return 0;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (entry->paddr && entry->vaddr && entry->size)
- dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
- entry->paddr);
+ if (buffer->dma_addr && buffer->size)
+ dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+ (dma_addr_t)buffer->dma_addr);
else
- DRM_DEBUG_KMS("entry data is null.\n");
+ DRM_DEBUG_KMS("buffer data are invalid.\n");
}
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
- return ERR_PTR(-ENOMEM);
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ return NULL;
}
- entry->size = size;
+ buffer->size = size;
/*
* allocate memory region with size and set the memory information
- * to vaddr and paddr of a entry object.
+ * to vaddr and dma_addr of a buffer object.
*/
- if (lowlevel_buffer_allocate(dev, entry) < 0) {
- kfree(entry);
- entry = NULL;
- return ERR_PTR(-ENOMEM);
+ if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+ kfree(buffer);
+ return NULL;
}
- return entry;
+ return buffer;
}
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (!entry) {
- DRM_DEBUG_KMS("entry is null.\n");
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
return;
}
- lowlevel_buffer_deallocate(dev, entry);
+ lowlevel_buffer_deallocate(dev, buffer);
- kfree(entry);
- entry = NULL;
+ kfree(buffer);
+ buffer = NULL;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01a..c913f2bad760 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,12 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
- dma_addr_t paddr;
- void __iomem *vaddr;
- unsigned int size;
-};
-
/* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
-
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry);
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e768728..d620b0784257 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
struct exynos_drm_connector {
struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_manager *manager;
};
/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
+ mode->vrefresh = timing->refresh;
mode->hdisplay = timing->xres;
mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+ if (timing->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timing->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
memset(timing, 0, sizeof(*timing));
timing->pixclock = mode->clock * 1000;
- timing->refresh = mode->vrefresh;
+ timing->refresh = drm_mode_vrefresh(mode);
timing->xres = mode->hdisplay;
timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
unsigned int count;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!display) {
- DRM_DEBUG_KMS("display is null.\n");
+ if (!display_ops) {
+ DRM_DEBUG_KMS("display_ops is null.\n");
return 0;
}
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display->get_edid) {
+ if (display_ops->get_edid) {
int ret;
void *edid;
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- ret = display->get_edid(manager->dev, connector,
+ ret = display_ops->get_edid(manager->dev, connector,
edid, MAX_EDID);
if (ret < 0) {
DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct fb_videomode *timing;
- if (display->get_timing)
- timing = display->get_timing(manager->dev);
+ if (display_ops->get_timing)
+ timing = display_ops->get_timing(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
struct fb_videomode timing;
int ret = MODE_BAD;
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
convert_to_video_timing(&timing, mode);
- if (display && display->check_timing)
- if (!display->check_timing(manager->dev, (void *)&timing))
+ if (display_ops && display_ops->check_timing)
+ if (!display_ops->check_timing(manager->dev, (void *)&timing))
ret = MODE_OK;
return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return connector->encoder;
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display && display->is_connected) {
- if (display->is_connected(manager->dev))
+ if (display_ops && display_ops->is_connected) {
+ if (display_ops->is_connected(manager->dev))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display->type) {
+ switch (manager->display_ops->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
if (err)
goto err_connector;
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->manager = manager;
connector->encoder = encoder;
+
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb6..e3861ac49295 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,16 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
-#include "exynos_drm_buf.h"
+#include "exynos_drm_gem.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
@@ -71,11 +51,13 @@ struct exynos_drm_crtc_pos {
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occured through
* this pipe value.
+ * @dpms: store the crtc dpms value
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct exynos_drm_overlay overlay;
unsigned int pipe;
+ unsigned int dpms;
};
static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
@@ -85,30 +67,35 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
exynos_drm_fn_encoder(crtc, overlay,
exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
+ int nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ buffer = exynos_drm_fb_buffer(fb, i);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
- return -EFAULT;
- }
-
- overlay->paddr = entry->paddr;
- overlay->vaddr = entry->vaddr;
+ overlay->dma_addr[i] = buffer->dma_addr;
+ overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)overlay->vaddr,
- (unsigned long)overlay->paddr);
+ DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->vaddr[i],
+ (unsigned long)overlay->dma_addr[i]);
+ }
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -119,7 +106,8 @@ static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
overlay->bpp = fb->bits_per_pixel;
- overlay->pitch = fb->pitch;
+ overlay->pitch = fb->pitches[0];
+ overlay->pixel_format = fb->pixel_format;
/* set overlay range to be displayed. */
overlay->crtc_x = pos->crtc_x;
@@ -171,9 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ if (exynos_crtc->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
- /* TODO */
+ mutex_unlock(&dev->struct_mutex);
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +201,34 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ /*
+ * when set_crtc is requested from user or at booting time,
+ * crtc->commit would be called without dpms call so if dpms is
+ * no power on then crtc->dpms should be called
+ * with DRM_MODE_DPMS_ON for the hardware power to be on.
+ */
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
+ int mode = DRM_MODE_DPMS_ON;
+
+ /*
+ * enable hardware(power on) to all encoders hdmi connected
+ * to current crtc.
+ */
+ exynos_drm_crtc_dpms(crtc, mode);
+ /*
+ * enable dma to all encoders connected to current crtc and
+ * lcd panel.
+ */
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_dpms_from_crtc);
+ }
+
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
static bool
@@ -342,6 +383,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
}
exynos_crtc->pipe = nr;
+ exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+ exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
crtc = &exynos_crtc->drm_crtc;
private->crtc[nr] = crtc;
@@ -355,9 +398,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[crtc]);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_enable_vblank);
@@ -367,9 +415,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[crtc]);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return;
+
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_disable_vblank);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2c..25f72a62cb88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c17..35889ca255e9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
#include <drm/exynos_drm.h>
@@ -35,13 +36,16 @@
#include "exynos_drm_fbdev.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
-#define DRIVER_NAME "exynos-drm"
+#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
#define DRIVER_DATE "20110530"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+#define VBLANK_OFF_DELAY 50000
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -61,6 +65,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
exynos_drm_mode_config_init(dev);
/*
@@ -73,6 +80,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
goto err_crtc;
}
+ for (nr = 0; nr < MAX_PLANE; nr++) {
+ ret = exynos_plane_init(dev, nr);
+ if (ret)
+ goto err_crtc;
+ }
+
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
goto err_crtc;
@@ -96,6 +109,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
goto err_drm_device;
}
+ drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
return 0;
err_drm_device:
@@ -116,6 +131,7 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
kfree(dev->dev_private);
@@ -158,6 +174,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = exynos_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
};
static struct drm_driver exynos_drm_driver = {
@@ -177,15 +205,7 @@ static struct drm_driver exynos_drm_driver = {
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
.ioctls = exynos_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = exynos_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- },
+ .fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae72..e685e1e33055 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,14 +29,20 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <linux/module.h>
#include "drm.h"
#define MAX_CRTC 2
+#define MAX_PLANE 5
+#define MAX_FB_BUFFER 3
+#define DEFAULT_ZPOS -1
struct drm_device;
struct exynos_drm_overlay;
struct drm_connector;
+extern unsigned int drm_vblank_offdelay;
+
/* this enumerates display type. */
enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_NONE,
@@ -56,8 +62,8 @@ enum exynos_drm_output_type {
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
struct exynos_drm_overlay *overlay);
- void (*commit)(struct device *subdrv_dev);
- void (*disable)(struct device *subdrv_dev);
+ void (*commit)(struct device *subdrv_dev, int zpos);
+ void (*disable)(struct device *subdrv_dev, int zpos);
};
/*
@@ -79,9 +85,11 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- * and this is physically continuous.
- * @vaddr: virtual memory addresss to this overlay.
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ * allocated for a overlay.
+ * @vaddr: array of virtual memory addresss to this overlay.
+ * @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
* @index_color: if using color key feature then this value would be used
@@ -108,8 +116,10 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t paddr;
- void __iomem *vaddr;
+ uint32_t pixel_format;
+ dma_addr_t dma_addr[MAX_FB_BUFFER];
+ void __iomem *vaddr[MAX_FB_BUFFER];
+ int zpos;
bool default_win;
bool color_key;
@@ -130,7 +140,7 @@ struct exynos_drm_overlay {
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -143,6 +153,8 @@ struct exynos_drm_display {
/*
* Exynos drm manager ops
*
+ * @dpms: control device power.
+ * @apply: set timing, vblank and overlay data to registers.
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
@@ -150,6 +162,8 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
+ void (*dpms)(struct device *subdrv_dev, int mode);
+ void (*apply)(struct device *subdrv_dev);
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
@@ -178,7 +192,7 @@ struct exynos_drm_manager {
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display *display;
+ struct exynos_drm_display_ops *display_ops;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67e..86b93dde219a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -42,30 +42,70 @@
* @drm_encoder: encoder object.
* @manager: specific encoder has its own manager to control a hardware
* appropriately and we can access a hardware drawing on this manager.
+ * @dpms: store the encoder dpms value.
*/
struct exynos_drm_encoder {
struct drm_encoder drm_encoder;
struct exynos_drm_manager *manager;
+ int dpms;
};
-static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
- DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
- if (display && display->power_on)
- display->power_on(manager->dev, mode);
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
}
}
}
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+
+ DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+
+ if (exynos_encoder->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (manager_ops && manager_ops->apply)
+ manager_ops->apply(manager->dev);
+ exynos_drm_display_power(encoder, mode);
+ exynos_encoder->dpms = mode;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_drm_display_power(encoder, mode);
+ exynos_encoder->dpms = mode;
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
static bool
exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -116,15 +156,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
}
static struct drm_crtc *
@@ -152,7 +188,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
exynos_encoder->manager->pipe = -1;
drm_encoder_cleanup(encoder);
- encoder->dev->mode_config.num_encoder--;
kfree(exynos_encoder);
}
@@ -182,6 +217,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
return NULL;
}
+ exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
exynos_encoder->manager = manager;
encoder = &exynos_encoder->drm_encoder;
encoder->possible_crtcs = possible_crtcs;
@@ -208,10 +244,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_manager *manager;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ /*
+ * if crtc is detached from encoder, check pipe,
+ * otherwise check crtc attached to encoder
+ */
+ if (!encoder->crtc) {
+ manager = to_exynos_encoder(encoder)->manager;
+ if (manager->pipe < 0 ||
+ private->crtc[manager->pipe] != crtc)
+ continue;
+ } else {
+ if (encoder->crtc != crtc)
+ continue;
+ }
fn(encoder, data);
}
@@ -245,13 +294,83 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
manager_ops->disable_vblank(manager->dev);
}
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+ void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
- overlay_ops->commit(manager->dev);
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ int crtc = *(int *)data;
+ int zpos = DEFAULT_ZPOS;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * when crtc is detached from encoder, this pipe is used
+ * to select manager operation
+ */
+ manager->pipe = crtc;
+
+ exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+}
+
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ int mode = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_encoder_dpms(encoder, mode);
+
+ exynos_encoder->dpms = mode;
+}
+
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct drm_connector *connector;
+ int mode = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops && manager_ops->dpms)
+ manager_ops->dpms(manager->dev, mode);
+
+ /*
+ * set current dpms mode to the connector connected to
+ * current encoder. connector->dpms would be checked
+ * at drm_helper_connector_dpms()
+ */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ connector->dpms = mode;
+
+ /*
+ * if this condition is ok then it means that the crtc is already
+ * detached from encoder and last function for detaching is properly
+ * done, so clear pipe from manager to prevent repeated call.
+ */
+ if (mode > DRM_MODE_DPMS_ON) {
+ if (!encoder->crtc)
+ manager->pipe = -1;
+ }
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +380,24 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
struct exynos_drm_overlay *overlay = data;
- overlay_ops->mode_set(manager->dev, overlay);
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev, zpos);
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a9..97b087a51cb6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -39,7 +39,13 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
void (*fn)(struct drm_encoder *, void *));
void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+ void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
+ void *data);
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd5240..3733fe6723d3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,9 +29,10 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -40,15 +41,11 @@
* exynos specific framebuffer structure.
*
* @fb: drm framebuffer obejct.
- * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- * - containing only the information to physically continuous memory
- * region allocated at default framebuffer creation.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -59,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
- /*
- * default framebuffer has no gem object so
- * a buffer of the default framebuffer should be released at here.
- */
- if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
-
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -79,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
DRM_DEBUG_KMS("%s\n", __FILE__);
return drm_gem_handle_create(file_priv,
- &exynos_fb->exynos_gem_obj->base, handle);
+ &exynos_fb->exynos_gem_obj[0]->base, handle);
}
static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -100,146 +90,110 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.dirty = exynos_drm_fb_dirty,
};
-static struct drm_framebuffer *
-exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd)
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
- struct drm_framebuffer *fb;
- struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- struct drm_gem_object *obj;
- unsigned int size;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- mode_cmd->pitch = max(mode_cmd->pitch,
- mode_cmd->width * (mode_cmd->bpp >> 3));
-
- DRM_LOG_KMS("drm fb create(%dx%d)\n",
- mode_cmd->width, mode_cmd->height);
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
- fb = &exynos_fb->fb;
- ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
- DRM_ERROR("failed to initialize framebuffer.\n");
- goto err_init;
+ DRM_ERROR("failed to initialize framebuffer\n");
+ return ERR_PTR(ret);
}
- DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
- size = mode_cmd->pitch * mode_cmd->height;
+ return &exynos_fb->fb;
+}
- /*
- * mode_cmd->handle could be NULL at booting time or
- * with user request. if NULL, a new buffer or a gem object
- * would be allocated.
- */
- if (!mode_cmd->handle) {
- if (!file_priv) {
- struct exynos_drm_buf_entry *entry;
-
- /*
- * in case that file_priv is NULL, it allocates
- * only buffer and this buffer would be used
- * for default framebuffer.
- */
- entry = exynos_drm_buf_create(dev, size);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
- goto err_buffer;
- }
-
- exynos_fb->entry = entry;
-
- DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
- (unsigned long)entry->paddr, size);
-
- goto out;
- } else {
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
- size,
- &mode_cmd->handle);
- if (IS_ERR(exynos_gem_obj)) {
- ret = PTR_ERR(exynos_gem_obj);
- goto err_buffer;
- }
- }
- } else {
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- goto err_buffer;
- }
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ struct exynos_drm_fb *exynos_fb;
+ int nr;
+ int i;
- exynos_gem_obj = to_exynos_gem_obj(obj);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- drm_gem_object_unreference_unlocked(obj);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ return ERR_PTR(-ENOENT);
}
- /*
- * if got a exynos_gem_obj from either a handle or
- * a new creation then exynos_fb->exynos_gem_obj is NULL
- * so that default framebuffer has no its own gem object,
- * only its own buffer object.
- */
- exynos_fb->entry = exynos_gem_obj->entry;
-
- DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->entry->paddr, size,
- (unsigned int)&exynos_gem_obj->base);
+ drm_gem_object_unreference_unlocked(obj);
-out:
- exynos_fb->exynos_gem_obj = exynos_gem_obj;
+ fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
+ if (IS_ERR(fb))
+ return fb;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ exynos_fb = to_exynos_fb(fb);
+ nr = exynos_drm_format_num_buffers(fb->pixel_format);
- return fb;
-
-err_buffer:
- drm_framebuffer_cleanup(fb);
-
-err_init:
- kfree(exynos_fb);
+ for (i = 1; i < nr; i++) {
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ exynos_drm_fb_destroy(fb);
+ return ERR_PTR(-ENOENT);
+ }
- return ERR_PTR(ret);
-}
+ drm_gem_object_unreference_unlocked(obj);
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
+ }
- return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+ return fb;
}
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry = exynos_fb->entry;
- if (!entry)
+ if (index >= MAX_FB_BUFFER)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)entry->vaddr,
- (unsigned long)entry->paddr);
+ buffer = exynos_fb->exynos_gem_obj[index]->buffer;
+ if (!buffer)
+ return NULL;
+
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr);
+
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
- return entry;
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
- .fb_create = exynos_drm_fb_create,
+ .fb_create = exynos_user_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index eb35931d302c..3ecb30d93552 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -28,9 +28,27 @@
#ifndef _EXYNOS_DRM_FB_H_
#define _EXYNOS_DRM_FB_H
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
- struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd);
+static inline int exynos_drm_format_num_buffers(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12MT:
+ return 2;
+ case DRM_FORMAT_YUV420M:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index);
void exynos_drm_mode_config_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a7713..d7ae29d2f3d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,7 +33,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
+#include "exynos_drm_gem.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
@@ -42,8 +42,8 @@
drm_fb_helper)
struct exynos_drm_fbdev {
- struct drm_fb_helper drm_fb_helper;
- struct drm_framebuffer *fb;
+ struct drm_fb_helper drm_fb_helper;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
};
static int exynos_drm_fbdev_set_par(struct fb_info *info)
@@ -85,36 +85,32 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_framebuffer *fb,
- unsigned int fb_width,
- unsigned int fb_height)
+ struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
- struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
- struct exynos_drm_buf_entry *entry;
- unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_fb->fb = fb;
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
-
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ /* RGB formats use only one buffer */
+ buffer = exynos_drm_fb_buffer(fb, 0);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
- offset += fbi->var.yoffset * fb->pitch;
+ offset += fbi->var.yoffset * fb->pitches[0];
- dev->mode_config.fb_base = entry->paddr;
- fbi->screen_base = entry->vaddr + offset;
- fbi->fix.smem_start = entry->paddr + offset;
+ dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -125,10 +121,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_device *dev = helper->dev;
struct fb_info *fbi;
- struct drm_mode_fb_cmd mode_cmd = { 0 };
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct platform_device *pdev = dev->platformdev;
+ unsigned long size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -139,8 +137,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
mutex_lock(&dev->struct_mutex);
@@ -151,14 +150,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
- if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ exynos_gem_obj = exynos_drm_gem_create(dev, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ goto out;
+ }
+
+ exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+ &exynos_gem_obj->base);
+ if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
- ret = PTR_ERR(exynos_fbdev->fb);
+ ret = PTR_ERR(helper->fb);
goto out;
}
- helper->fb = exynos_fbdev->fb;
helper->fbdev = fbi;
fbi->par = helper;
@@ -171,10 +179,11 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
- if (ret < 0)
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
+ if (ret < 0) {
fb_dealloc_cmap(&fbi->cmap);
+ goto out;
+ }
/*
* if failed, all resources allocated above would be released by
@@ -207,36 +216,43 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
{
struct drm_device *dev = helper->dev;
struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
- struct drm_framebuffer *fb = exynos_fbdev->fb;
- struct drm_mode_fb_cmd mode_cmd = { 0 };
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_framebuffer *fb = helper->fb;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ unsigned long size;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (helper->fb != fb) {
- DRM_ERROR("drm framebuffer is different\n");
- return -EINVAL;
- }
-
if (exynos_drm_fbdev_is_samefb(fb, sizes))
return 0;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ if (exynos_fbdev->exynos_gem_obj)
+ exynos_drm_gem_destroy(exynos_fbdev->exynos_gem_obj);
if (fb->funcs->destroy)
fb->funcs->destroy(fb);
- exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
- if (IS_ERR(exynos_fbdev->fb)) {
- DRM_ERROR("failed to allocate fb.\n");
- return PTR_ERR(exynos_fbdev->fb);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ exynos_gem_obj = exynos_drm_gem_create(dev, size);
+ if (IS_ERR(exynos_gem_obj))
+ return PTR_ERR(exynos_gem_obj);
+
+ exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+ &exynos_gem_obj->base);
+ if (IS_ERR_OR_NULL(helper->fb)) {
+ DRM_ERROR("failed to create drm framebuffer.\n");
+ return PTR_ERR(helper->fb);
}
- helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ return exynos_drm_fbdev_update(helper, helper->fb);
}
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -369,6 +385,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
fbdev = to_exynos_fbdev(private->fb_helper);
+ if (fbdev->exynos_gem_obj)
+ exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
exynos_drm_fbdev_destroy(dev, private->fb_helper);
kfree(fbdev);
private->fb_helper = NULL;
@@ -405,6 +424,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
fb_helper = private->fb_helper;
if (fb_helper) {
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+
+ /*
+ * fb_helper is reintialized but kernel fb is reused
+ * so kernel_fb_list need to be backuped and restored
+ */
+ if (!list_empty(&fb_helper->kernel_fb_list))
+ list_replace_init(&fb_helper->kernel_fb_list,
+ &temp_list);
+
drm_fb_helper_fini(fb_helper);
ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +445,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
return ret;
}
+ if (!list_empty(&temp_list))
+ list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret < 0) {
DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9b..ca83139cd309 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <drm/exynos_drm.h>
#include <plat/regs-fb-v4.h>
@@ -64,10 +65,11 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
+ bool enabled;
};
struct fimd_context {
@@ -84,6 +86,8 @@ struct fimd_context {
unsigned long irq_flags;
u32 vidcon0;
u32 vidcon1;
+ bool suspended;
+ struct mutex lock;
struct fb_videomode *timing;
};
@@ -119,12 +123,12 @@ static int fimd_display_power_on(struct device *dev, int mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* TODO */
return 0;
}
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
.get_timing = fimd_get_timing,
@@ -132,12 +136,68 @@ static struct exynos_drm_display fimd_display = {
.power_on = fimd_display_power_on,
};
+static void fimd_dpms(struct device *subdrv_dev, int mode)
+{
+ struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ mutex_lock(&ctx->lock);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /*
+ * enable fimd hardware only if suspended status.
+ *
+ * P.S. fimd_dpms function would be called at booting time so
+ * clk_enable could be called double time.
+ */
+ if (ctx->suspended)
+ pm_runtime_get_sync(subdrv_dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ pm_runtime_put_sync(subdrv_dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&ctx->lock);
+}
+
+static void fimd_apply(struct device *subdrv_dev)
+{
+ struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+ struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+ struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+ struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+ struct fimd_win_data *win_data;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+ ovl_ops->commit(subdrv_dev, i);
+ }
+
+ if (mgr_ops && mgr_ops->commit)
+ mgr_ops->commit(subdrv_dev);
+}
+
static void fimd_commit(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fb_videomode *timing = ctx->timing;
u32 val;
+ if (ctx->suspended)
+ return;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
/* setup polarity values from machine code. */
@@ -184,6 +244,9 @@ static int fimd_enable_vblank(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return -EPERM;
+
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -208,6 +271,9 @@ static void fimd_disable_vblank(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return;
+
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -219,6 +285,8 @@ static void fimd_disable_vblank(struct device *dev)
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
+ .dpms = fimd_dpms,
+ .apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
@@ -229,6 +297,7 @@ static void fimd_win_mode_set(struct device *dev,
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
+ int win;
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -238,12 +307,19 @@ static void fimd_win_mode_set(struct device *dev,
return;
}
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
offset = overlay->fb_x * (overlay->bpp >> 3);
offset += overlay->fb_y * overlay->pitch;
DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
- win_data = &ctx->win_data[ctx->default_win];
+ win_data = &ctx->win_data[win];
win_data->offset_x = overlay->crtc_x;
win_data->offset_y = overlay->crtc_y;
@@ -251,8 +327,8 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->paddr = overlay->paddr + offset;
- win_data->vaddr = overlay->vaddr + offset;
+ win_data->dma_addr = overlay->dma_addr[0] + offset;
+ win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -263,7 +339,7 @@ static void fimd_win_mode_set(struct device *dev,
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->paddr,
+ (unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
@@ -346,15 +422,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
-static void fimd_win_commit(struct device *dev)
+static void fimd_win_commit(struct device *dev, int zpos)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
- int win = ctx->default_win;
+ int win = zpos;
unsigned long val, alpha, size;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
if (win < 0 || win > WINDOWS_NR)
return;
@@ -376,16 +458,16 @@ static void fimd_win_commit(struct device *dev)
writel(val, ctx->regs + SHADOWCON);
/* buffer start address */
- val = win_data->paddr;
+ val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = win_data->paddr + size;
+ val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->paddr, val, size);
+ (unsigned long)win_data->dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
@@ -437,22 +519,32 @@ static void fimd_win_commit(struct device *dev)
if (win != 0)
fimd_win_set_colkey(dev, win);
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val |= WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
/* Enable DMA channel and unprotect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_CHx_ENABLE(win);
val &= ~SHADOWCON_WINx_PROTECT(win);
writel(val, ctx->regs + SHADOWCON);
+
+ win_data->enabled = true;
}
-static void fimd_win_disable(struct device *dev)
+static void fimd_win_disable(struct device *dev, int zpos)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
- int win = ctx->default_win;
+ int win = zpos;
u32 val;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
if (win < 0 || win > WINDOWS_NR)
return;
@@ -473,6 +565,8 @@ static void fimd_win_disable(struct device *dev)
val &= ~SHADOWCON_CHx_ENABLE(win);
val &= ~SHADOWCON_WINx_PROTECT(win);
writel(val, ctx->regs + SHADOWCON);
+
+ win_data->enabled = false;
}
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
@@ -508,9 +602,17 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
wake_up_interruptible(&e->base.file_priv->event_wait);
}
- if (is_checked)
+ if (is_checked) {
drm_vblank_put(drm_dev, crtc);
+ /*
+ * don't off vblank if vblank_disable_allowed is 1,
+ * because vblank would be off by timer handler.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, crtc);
+ }
+
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -528,9 +630,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ /* check the crtc is detached already from encoder */
+ if (manager->pipe < 0)
+ goto out;
+
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
+out:
return IRQ_HANDLED;
}
@@ -551,7 +658,7 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
- * vblank event.(drm_vblank_put function was called)
+ * vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
@@ -704,9 +811,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->irq = res->start;
- for (win = 0; win < WINDOWS_NR; win++)
- fimd_clear_win(ctx, win);
-
ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
if (ret < 0) {
dev_err(dev, "irq request failed.\n");
@@ -731,10 +835,20 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.pipe = -1;
subdrv->manager.ops = &fimd_manager_ops;
subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display = &fimd_display;
+ subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
+ mutex_init(&ctx->lock);
+
platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ for (win = 0; win < WINDOWS_NR; win++)
+ fimd_clear_win(ctx, win);
+
exynos_drm_subdrv_register(subdrv);
return 0;
@@ -762,14 +876,25 @@ err_clk_get:
static int __devexit fimd_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct fimd_context *ctx = platform_get_drvdata(pdev);
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_subdrv_unregister(&ctx->subdrv);
+ if (ctx->suspended)
+ goto out;
+
clk_disable(ctx->lcd_clk);
clk_disable(ctx->bus_clk);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_sync(dev);
+
+out:
+ pm_runtime_disable(dev);
+
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
@@ -783,12 +908,102 @@ static int __devexit fimd_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int fimd_suspend(struct device *dev)
+{
+ int ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = pm_runtime_suspend(dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int fimd_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_resume(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to resume runtime pm.\n");
+ return ret;
+ }
+
+ pm_runtime_disable(dev);
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to active runtime pm.\n");
+ pm_runtime_enable(dev);
+ pm_runtime_suspend(dev);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimd_runtime_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ clk_disable(ctx->lcd_clk);
+ clk_disable(ctx->bus_clk);
+
+ ctx->suspended = true;
+ return 0;
+}
+
+static int fimd_runtime_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = clk_enable(ctx->bus_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(ctx->lcd_clk);
+ if (ret < 0) {
+ clk_disable(ctx->bus_clk);
+ return ret;
+ }
+
+ ctx->suspended = false;
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ fimd_enable_vblank(dev);
+
+ fimd_apply(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fimd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
+ SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
+};
+
static struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = __devexit_p(fimd_remove),
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
+ .pm = &fimd_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906ed..025abb3e3b67 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -55,104 +55,128 @@ static unsigned int convert_to_vm_err_msg(int msg)
return out_msg;
}
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ unsigned int *handle)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ int ret;
- return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle)
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
struct drm_gem_object *obj;
- int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- size = roundup(size, PAGE_SIZE);
+ if (!exynos_gem_obj)
+ return;
+
+ obj = &exynos_gem_obj->base;
+
+ DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+
+ exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+
+ if (obj->map_list.map)
+ drm_gem_free_mmap_offset(obj);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(obj);
+
+ kfree(exynos_gem_obj);
+}
+
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+ int ret;
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
- DRM_ERROR("failed to allocate exynos gem object.\n");
- return ERR_PTR(-ENOMEM);
+ DRM_ERROR("failed to allocate exynos gem object\n");
+ return NULL;
}
- /* allocate the new buffer object and memory region. */
- entry = exynos_drm_buf_create(dev, size);
- if (!entry) {
- kfree(exynos_gem_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- exynos_gem_obj->entry = entry;
-
obj = &exynos_gem_obj->base;
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initailize gem object.\n");
- goto err_obj_init;
+ DRM_ERROR("failed to initialize gem object\n");
+ kfree(exynos_gem_obj);
+ return NULL;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
- ret = drm_gem_create_mmap_offset(obj);
- if (ret < 0) {
- DRM_ERROR("failed to allocate mmap offset.\n");
- goto err_create_mmap_offset;
- }
-
- /*
- * allocate a id of idr table where the obj is registered
- * and handle has the id what user can see.
- */
- ret = drm_gem_handle_create(file_priv, obj, handle);
- if (ret)
- goto err_handle_create;
-
- DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
-
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
-
return exynos_gem_obj;
+}
-err_handle_create:
- drm_gem_free_mmap_offset(obj);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ unsigned long size)
+{
+ struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
-err_create_mmap_offset:
- drm_gem_object_release(obj);
+ size = roundup(size, PAGE_SIZE);
+ DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
-err_obj_init:
- exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
+ buffer = exynos_drm_buf_create(dev, size);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
- kfree(exynos_gem_obj);
+ exynos_gem_obj = exynos_drm_gem_init(dev, size);
+ if (!exynos_gem_obj) {
+ exynos_drm_buf_destroy(dev, buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ exynos_gem_obj->buffer = buffer;
- return ERR_PTR(ret);
+ return exynos_gem_obj;
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
- DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
return 0;
}
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
@@ -171,36 +195,37 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
}
static int exynos_drm_gem_mmap_buffer(struct file *filp,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= (VM_IO | VM_RESERVED);
+ /* in case of direct mapping, always having non-cachable attribute */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_file = filp;
vm_size = vma->vm_end - vma->vm_start;
/*
- * a entry contains information to physically continuous memory
+ * a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- entry = exynos_gem_obj->entry;
+ buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
- if (vm_size > entry->size)
+ if (vm_size > buffer->size)
return -EINVAL;
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -218,7 +243,7 @@ static const struct file_operations exynos_drm_gem_fops = {
};
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
@@ -264,32 +289,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
-
DRM_DEBUG_KMS("%s\n", __FILE__);
- DRM_DEBUG_KMS("handle count = %d\n",
- atomic_read(&gem_obj->handle_count));
-
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
-
- /* release file pointer to gem object. */
- drm_gem_object_release(gem_obj);
-
- exynos_gem_obj = to_exynos_gem_obj(gem_obj);
-
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
-
- kfree(exynos_gem_obj);
+ exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args)
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -302,19 +314,27 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
return 0;
}
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle, uint64_t *offset)
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
+ int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -329,19 +349,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
- *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
-
- drm_gem_object_unreference(obj);
+ if (!exynos_gem_obj->base.map_list.map) {
+ ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (ret)
+ goto out;
+ }
+ *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+out:
+ drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ unsigned int handle)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * obj->refcount and obj->handle_count are decreased and
+ * if both them are 0 then exynos_drm_gem_free_object()
+ * would be called by callback to release resources.
+ */
+ ret = drm_gem_handle_delete(file_priv, handle);
+ if (ret < 0) {
+ DRM_ERROR("failed to delete drm_gem_handle.\n");
+ return ret;
+ }
return 0;
}
@@ -360,7 +407,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+ pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -388,28 +436,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int handle)
-{
- int ret;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * obj->refcount and obj->handle_count are decreased and
- * if both them are 0 then exynos_drm_gem_free_object()
- * would be called by callback to release resources.
- */
- ret = drm_gem_handle_delete(file_priv, handle);
- if (ret < 0) {
- DRM_ERROR("failed to delete drm_gem_handle.\n");
- return ret;
- }
-
- return 0;
-}
-
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277b..67cdc9168708 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
struct exynos_drm_gem_obj, base)
/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ unsigned long size;
+};
+
+/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- * - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
*
@@ -44,14 +60,16 @@
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
- struct drm_gem_object base;
- struct exynos_drm_buf_entry *entry;
+ struct drm_gem_object base;
+ struct exynos_drm_gem_buf *buffer;
};
-/* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle);
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a new buffer with gem object */
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ unsigned long size);
/*
* request gem object creation and buffer allocation as the size
@@ -59,15 +77,18 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
* height and bpp.
*/
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
-/* unmap a buffer from user space. */
-int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
@@ -77,24 +98,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
/* create memory region for drm framebuffer. */
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args);
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/* map memory region for drm framebuffer to user space. */
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle, uint64_t *offset);
-
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-
-/*
- * mmap the physically continuous memory that a gem object contains
- * to user space.
- */
-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/*
* destroy memory region allocated.
@@ -102,6 +112,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
* would be released by drm_gem_handle_delete().
*/
int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int handle);
+ struct drm_device *dev,
+ unsigned int handle);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644
index 000000000000..ed8a319ed84b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define to_subdrv(dev) to_context(dev)
+#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
+ struct drm_hdmi_context, subdrv);
+
+/* these callback points shoud be set by specific drivers. */
+static struct exynos_hdmi_display_ops *hdmi_display_ops;
+static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
+static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+
+struct drm_hdmi_context {
+ struct exynos_drm_subdrv subdrv;
+ struct exynos_drm_hdmi_context *hdmi_ctx;
+ struct exynos_drm_hdmi_context *mixer_ctx;
+ struct work_struct work;
+};
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+ *display_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (display_ops)
+ hdmi_display_ops = display_ops;
+}
+EXPORT_SYMBOL(exynos_drm_display_ops_register);
+
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+ *manager_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops)
+ hdmi_manager_ops = manager_ops;
+}
+EXPORT_SYMBOL(exynos_drm_manager_ops_register);
+
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+ *overlay_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (overlay_ops)
+ hdmi_overlay_ops = overlay_ops;
+}
+EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
+
+static bool drm_hdmi_is_connected(struct device *dev)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->is_connected)
+ return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+
+ return false;
+}
+
+static int drm_hdmi_get_edid(struct device *dev,
+ struct drm_connector *connector, u8 *edid, int len)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->get_edid)
+ return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
+ connector, edid, len);
+
+ return 0;
+}
+
+static int drm_hdmi_check_timing(struct device *dev, void *timing)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->check_timing)
+ return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
+ timing);
+
+ return 0;
+}
+
+static int drm_hdmi_power_on(struct device *dev, int mode)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->power_on)
+ return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+
+ return 0;
+}
+
+static struct exynos_drm_display_ops drm_hdmi_display_ops = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .is_connected = drm_hdmi_is_connected,
+ .get_edid = drm_hdmi_get_edid,
+ .check_timing = drm_hdmi_check_timing,
+ .power_on = drm_hdmi_power_on,
+};
+
+static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
+ return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
+ manager->pipe);
+
+ return 0;
+}
+
+static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
+ return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
+ hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_commit(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->commit)
+ hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (hdmi_manager_ops && hdmi_manager_ops->disable)
+ hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
+ .dpms = drm_hdmi_dpms,
+ .enable_vblank = drm_hdmi_enable_vblank,
+ .disable_vblank = drm_hdmi_disable_vblank,
+ .mode_set = drm_hdmi_mode_set,
+ .commit = drm_hdmi_commit,
+};
+
+static void drm_mixer_mode_set(struct device *subdrv_dev,
+ struct exynos_drm_overlay *overlay)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
+ hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+}
+
+static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
+ hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+}
+
+static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
+ hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+}
+
+static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
+ .mode_set = drm_mixer_mode_set,
+ .commit = drm_mixer_commit,
+ .disable = drm_mixer_disable,
+};
+
+
+static int hdmi_subdrv_probe(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+ struct drm_hdmi_context *ctx;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_common_hdmi_pd *pd;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ pd = pdev->dev.platform_data;
+
+ if (!pd) {
+ DRM_DEBUG_KMS("platform data is null.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->hdmi_dev) {
+ DRM_DEBUG_KMS("hdmi device is null.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->mixer_dev) {
+ DRM_DEBUG_KMS("mixer device is null.\n");
+ return -EFAULT;
+ }
+
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register hdmi driver.\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&mixer_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register mixer driver.\n");
+ goto err_hdmidrv;
+ }
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
+ to_context(pd->hdmi_dev);
+ if (!ctx->hdmi_ctx) {
+ DRM_DEBUG_KMS("hdmi context is null.\n");
+ ret = -EFAULT;
+ goto err_mixerdrv;
+ }
+
+ ctx->hdmi_ctx->drm_dev = drm_dev;
+
+ ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
+ to_context(pd->mixer_dev);
+ if (!ctx->mixer_ctx) {
+ DRM_DEBUG_KMS("mixer context is null.\n");
+ ret = -EFAULT;
+ goto err_mixerdrv;
+ }
+
+ ctx->mixer_ctx->drm_dev = drm_dev;
+
+ return 0;
+
+err_mixerdrv:
+ platform_driver_unregister(&mixer_driver);
+err_hdmidrv:
+ platform_driver_unregister(&hdmi_driver);
+ return ret;
+}
+
+static void hdmi_subdrv_remove(struct drm_device *drm_dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ platform_driver_unregister(&hdmi_driver);
+ platform_driver_unregister(&mixer_driver);
+}
+
+static void exynos_drm_hdmi_late_probe(struct work_struct *work)
+{
+ struct drm_hdmi_context *ctx = container_of(work,
+ struct drm_hdmi_context, work);
+
+ /*
+ * this function calls subdrv->probe() so this must be called
+ * after probe context.
+ *
+ * PS. subdrv->probe() will call platform_driver_register() to probe
+ * hdmi and mixer driver.
+ */
+ exynos_drm_subdrv_register(&ctx->subdrv);
+}
+
+static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_subdrv *subdrv;
+ struct drm_hdmi_context *ctx;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ subdrv = &ctx->subdrv;
+
+ subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
+ subdrv->manager.pipe = -1;
+ subdrv->manager.ops = &drm_hdmi_manager_ops;
+ subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
+ subdrv->manager.display_ops = &drm_hdmi_display_ops;
+ subdrv->manager.dev = dev;
+
+ platform_set_drvdata(pdev, subdrv);
+
+ INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
+
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+{
+ struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+ kfree(ctx);
+
+ return 0;
+}
+
+static struct platform_driver exynos_drm_common_hdmi_driver = {
+ .probe = exynos_drm_hdmi_probe,
+ .remove = __devexit_p(exynos_drm_hdmi_remove),
+ .driver = {
+ .name = "exynos-drm-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+
+static int __init exynos_drm_hdmi_init(void)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit exynos_drm_hdmi_exit(void)
+{
+ platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+}
+
+module_init(exynos_drm_hdmi_init);
+module_exit(exynos_drm_hdmi_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644
index 000000000000..3c29f790ee45
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -0,0 +1,73 @@
+/* exynos_drm_hdmi.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_HDMI_H_
+#define _EXYNOS_DRM_HDMI_H_
+
+/*
+ * exynos hdmi common context structure.
+ *
+ * @drm_dev: pointer to drm_device.
+ * @ctx: pointer to the context of specific device driver.
+ * this context should be hdmi_context or mixer_context.
+ */
+struct exynos_drm_hdmi_context {
+ struct drm_device *drm_dev;
+ void *ctx;
+};
+
+struct exynos_hdmi_display_ops {
+ bool (*is_connected)(void *ctx);
+ int (*get_edid)(void *ctx, struct drm_connector *connector,
+ u8 *edid, int len);
+ int (*check_timing)(void *ctx, void *timing);
+ int (*power_on)(void *ctx, int mode);
+};
+
+struct exynos_hdmi_manager_ops {
+ void (*mode_set)(void *ctx, void *mode);
+ void (*commit)(void *ctx);
+ void (*disable)(void *ctx);
+};
+
+struct exynos_hdmi_overlay_ops {
+ int (*enable_vblank)(void *ctx, int pipe);
+ void (*disable_vblank)(void *ctx);
+ void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
+ void (*win_commit)(void *ctx, int zpos);
+ void (*win_disable)(void *ctx, int zpos);
+};
+
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+ *display_ops);
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+ *manager_ops);
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+ *overlay_ops);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644
index 000000000000..bdcf770aa22e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "exynos_drm.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+struct exynos_plane {
+ struct drm_plane base;
+ struct exynos_drm_overlay overlay;
+ bool enabled;
+};
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+ struct exynos_drm_crtc_pos pos;
+ unsigned int x = src_x >> 16;
+ unsigned int y = src_y >> 16;
+ int ret;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+ pos.crtc_x = crtc_x;
+ pos.crtc_y = crtc_y;
+ pos.crtc_w = crtc_w;
+ pos.crtc_h = crtc_h;
+
+ pos.fb_x = x;
+ pos.fb_y = y;
+
+ /* TODO: scale feature */
+ ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+ if (ret < 0)
+ return ret;
+
+ exynos_drm_fn_encoder(crtc, overlay,
+ exynos_drm_encoder_crtc_mode_set);
+ exynos_drm_fn_encoder(crtc, &overlay->zpos,
+ exynos_drm_encoder_crtc_plane_commit);
+
+ exynos_plane->enabled = true;
+
+ return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!exynos_plane->enabled)
+ return 0;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_crtc_disable);
+
+ exynos_plane->enabled = false;
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+ return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ exynos_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(exynos_plane);
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+ .update_plane = exynos_update_plane,
+ .disable_plane = exynos_disable_plane,
+ .destroy = exynos_plane_destroy,
+};
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+{
+ struct exynos_plane *exynos_plane;
+ uint32_t possible_crtcs;
+
+ exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ if (!exynos_plane)
+ return -ENOMEM;
+
+ /* all CRTCs are available */
+ possible_crtcs = (1 << MAX_CRTC) - 1;
+
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+ /* TODO: format */
+ return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+ &exynos_plane_funcs, NULL, 0, false);
+}
+
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_plane_set_zpos *zpos_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct exynos_plane *exynos_plane;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
+ if (zpos_req->zpos != DEFAULT_ZPOS) {
+ DRM_ERROR("zpos not within limits\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, zpos_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ zpos_req->plane_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ plane = obj_to_plane(obj);
+ exynos_plane = container_of(plane, struct exynos_plane, base);
+
+ exynos_plane->overlay.zpos = zpos_req->zpos;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644
index 000000000000..16b71f8217e7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr);
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644
index 000000000000..f48f7ce92f5f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#include "exynos_hdmi.h"
+
+#define HDMI_OVERLAY_NUMBER 3
+#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 hdmiphy_conf27[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf27_027[32] = {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+struct hdmi_tg_regs {
+ u8 cmd;
+ u8 h_fsz_l;
+ u8 h_fsz_h;
+ u8 hact_st_l;
+ u8 hact_st_h;
+ u8 hact_sz_l;
+ u8 hact_sz_h;
+ u8 v_fsz_l;
+ u8 v_fsz_h;
+ u8 vsync_l;
+ u8 vsync_h;
+ u8 vsync2_l;
+ u8 vsync2_h;
+ u8 vact_st_l;
+ u8 vact_st_h;
+ u8 vact_sz_l;
+ u8 vact_sz_h;
+ u8 field_chg_l;
+ u8 field_chg_h;
+ u8 vact_st2_l;
+ u8 vact_st2_h;
+ u8 vsync_top_hdmi_l;
+ u8 vsync_top_hdmi_h;
+ u8 vsync_bot_hdmi_l;
+ u8 vsync_bot_hdmi_h;
+ u8 field_top_hdmi_l;
+ u8 field_top_hdmi_h;
+ u8 field_bot_hdmi_l;
+ u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+ .core = {
+ .h_blank = {0x8a, 0x00},
+ .v_blank = {0x0d, 0x6a, 0x01},
+ .h_v_line = {0x0d, 0xa2, 0x35},
+ .vsync_pol = {0x01},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00},
+ .h_sync_gen = {0x0e, 0x30, 0x11},
+ .v_sync_gen1 = {0x0f, 0x90, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x5a, 0x03, /* h_fsz */
+ 0x8a, 0x00, 0xd0, 0x02, /* hact */
+ 0x0d, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0xe0, 0x01, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+ .core = {
+ .h_blank = {0x72, 0x01},
+ .v_blank = {0xee, 0xf2, 0x00},
+ .h_v_line = {0xee, 0x22, 0x67},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x6c, 0x50, 0x02},
+ .v_sync_gen1 = {0x0a, 0x50, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x72, 0x06, /* h_fsz */
+ 0x71, 0x01, 0x01, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x32, 0xB2, 0x00},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f = {0x49, 0x2A, 0x23},
+ .h_sync_gen = {0x0E, 0xEA, 0x08},
+ .v_sync_gen1 = {0x07, 0x20, 0x00},
+ .v_sync_gen2 = {0x39, 0x42, 0x23},
+ .v_sync_gen3 = {0x38, 0x87, 0x73},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0A, /* h_fsz */
+ 0xCF, 0x02, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x0e, 0xea, 0x08},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0A, /* h_fsz */
+ 0xCF, 0x02, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x32, 0xB2, 0x00},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f = {0x49, 0x2A, 0x23},
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x07, 0x20, 0x00},
+ .v_sync_gen2 = {0x39, 0x42, 0x23},
+ .v_sync_gen3 = {0xa4, 0x44, 0x4a},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x17, 0x01, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x17, 0x01, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_conf hdmi_confs[] = {
+ { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
+ { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+};
+
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+ return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+ u32 reg_id, u8 value)
+{
+ writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+ u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(hdata->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, hdata->regs + reg_id);
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdata->regs + reg_id))
+ DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_PHY_RSTOUT);
+ DUMPREG(HDMI_PHY_VPLL);
+ DUMPREG(HDMI_PHY_CMU);
+ DUMPREG(HDMI_CORE_RSTOUT);
+
+ DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_HPD_GEN);
+ DUMPREG(HDMI_DC_CONTROL);
+ DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+ DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_0);
+ DUMPREG(HDMI_V_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_2);
+ DUMPREG(HDMI_H_V_LINE_0);
+ DUMPREG(HDMI_H_V_LINE_1);
+ DUMPREG(HDMI_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V_BLANK_F_0);
+ DUMPREG(HDMI_V_BLANK_F_1);
+ DUMPREG(HDMI_V_BLANK_F_2);
+ DUMPREG(HDMI_H_SYNC_GEN_0);
+ DUMPREG(HDMI_H_SYNC_GEN_1);
+ DUMPREG(HDMI_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+ DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static int hdmi_conf_index(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ if (hdmi_confs[i].width == mode->hdisplay &&
+ hdmi_confs[i].height == mode->vdisplay &&
+ hdmi_confs[i].vrefresh == mode->vrefresh &&
+ hdmi_confs[i].interlace ==
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+ true : false))
+ return i;
+
+ return -1;
+}
+
+static bool hdmi_is_connected(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+
+ if (val)
+ return true;
+
+ return false;
+}
+
+static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
+ u8 *edid, int len)
+{
+ struct edid *raw_edid;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!hdata->ddc_port)
+ return -ENODEV;
+
+ raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
+ if (raw_edid) {
+ memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
+ * EDID_LENGTH, len));
+ DRM_DEBUG_KMS("width[%d] x height[%d]\n",
+ raw_edid->width_cm, raw_edid->height_cm);
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hdmi_check_timing(void *ctx, void *timing)
+{
+ struct fb_videomode *check_timing = timing;
+ int i;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
+ check_timing->yres, check_timing->refresh,
+ check_timing->vmode);
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ if (hdmi_confs[i].width == check_timing->xres &&
+ hdmi_confs[i].height == check_timing->yres &&
+ hdmi_confs[i].vrefresh == check_timing->refresh &&
+ hdmi_confs[i].interlace ==
+ ((check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int hdmi_display_power_on(void *ctx, int mode)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG_KMS("hdmi [on]\n");
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("hdmi [off]\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_hdmi_display_ops display_ops = {
+ .is_connected = hdmi_is_connected,
+ .get_edid = hdmi_get_edid,
+ .check_timing = hdmi_check_timing,
+ .power_on = hdmi_display_power_on,
+};
+
+static void hdmi_conf_reset(struct hdmi_context *hdata)
+{
+ /* disable hpd handle for drm */
+ hdata->hpd_handle = false;
+
+ /* resetting HDMI core */
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+
+ /* enable hpd handle for drm */
+ hdata->hpd_handle = true;
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+ /* disable hpd handle for drm */
+ hdata->hpd_handle = false;
+
+ /* enable HPD interrupts */
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+ /* choose HDMI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* disable bluescreen */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ /* choose bluescreen (fecal) color */
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+ /* force RGB, look to CEA-861-D, table 7 for more detail */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+ hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
+
+ /* enable hpd handle for drm */
+ hdata->hpd_handle = true;
+}
+
+static void hdmi_timing_apply(struct hdmi_context *hdata,
+ const struct hdmi_preset_conf *conf)
+{
+ const struct hdmi_core_regs *core = &conf->core;
+ const struct hdmi_tg_regs *tg = &conf->tg;
+ int tries;
+
+ /* setting core registers */
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ mdelay(1);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+ hdmi_regs_dump(hdata, "timing apply");
+ }
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+ if (core->int_pro_mode[0])
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+ HDMI_FIELD_EN);
+ else
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+ u8 buffer[2];
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* operation mode */
+ buffer[0] = 0x1f;
+ buffer[1] = 0x00;
+
+ if (hdata->hdmiphy_port)
+ i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+ /* reset hdmiphy */
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+ u8 buffer[32];
+ u8 operation[2];
+ u8 read_buffer[32] = {0, };
+ int ret;
+ int i;
+
+ if (!hdata->hdmiphy_port) {
+ DRM_ERROR("hdmiphy is not attached\n");
+ return;
+ }
+
+ /* pixel clock */
+ memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
+ ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
+ if (ret != 32) {
+ DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+ return;
+ }
+
+ mdelay(10);
+
+ /* operation mode */
+ operation[0] = 0x1f;
+ operation[1] = 0x80;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+ if (ret != 2) {
+ DRM_ERROR("failed to enable hdmiphy\n");
+ return;
+ }
+
+ ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+ if (ret < 0) {
+ DRM_ERROR("failed to read hdmiphy config\n");
+ return;
+ }
+
+ for (i = 0; i < ret; i++)
+ DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+ "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+ const struct hdmi_preset_conf *conf =
+ hdmi_confs[hdata->cur_conf].conf;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmiphy_conf_reset(hdata);
+ hdmiphy_conf_apply(hdata);
+
+ hdmi_conf_reset(hdata);
+ hdmi_conf_init(hdata);
+
+ /* setting core registers */
+ hdmi_timing_apply(hdata, conf);
+
+ hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_mode_set(void *ctx, void *mode)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ int conf_idx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ conf_idx = hdmi_conf_index(mode);
+ if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
+ hdata->cur_conf = conf_idx;
+ else
+ DRM_DEBUG_KMS("not supported mode\n");
+}
+
+static void hdmi_commit(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_conf_apply(hdata);
+
+ hdata->enabled = true;
+}
+
+static void hdmi_disable(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->enabled) {
+ hdmiphy_conf_reset(hdata);
+ hdmi_conf_reset(hdata);
+ }
+}
+
+static struct exynos_hdmi_manager_ops manager_ops = {
+ .mode_set = hdmi_mode_set,
+ .commit = hdmi_commit,
+ .disable = hdmi_disable,
+};
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void hdmi_hotplug_func(struct work_struct *work)
+{
+ struct hdmi_context *hdata =
+ container_of(work, struct hdmi_context, hotplug_work);
+ struct exynos_drm_hdmi_context *ctx =
+ (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+{
+ struct exynos_drm_hdmi_context *ctx = arg;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+ u32 intc_flag;
+
+ intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
+ /* clearing flags for HPD plug/unplug */
+ if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+ DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+ hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_UNPLUG);
+ }
+ if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+ DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+ hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_PLUG);
+ }
+
+ if (ctx->drm_dev && hdata->hpd_handle)
+ queue_work(hdata->wq, &hdata->hotplug_work);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+{
+ struct device *dev = hdata->dev;
+ struct hdmi_resources *res = &hdata->res;
+ static char *supply[] = {
+ "hdmi-en",
+ "vdd",
+ "vdd_osc",
+ "vdd_pll",
+ };
+ int i, ret;
+
+ DRM_DEBUG_KMS("HDMI resource init\n");
+
+ memset(res, 0, sizeof *res);
+
+ /* get clocks, power */
+ res->hdmi = clk_get(dev, "hdmi");
+ if (IS_ERR_OR_NULL(res->hdmi)) {
+ DRM_ERROR("failed to get clock 'hdmi'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+ goto fail;
+ }
+ res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+ goto fail;
+ }
+ res->hdmiphy = clk_get(dev, "hdmiphy");
+ if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ DRM_ERROR("failed to get clock 'hdmiphy'\n");
+ goto fail;
+ }
+
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+
+ res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ sizeof res->regul_bulk[0], GFP_KERNEL);
+ if (!res->regul_bulk) {
+ DRM_ERROR("failed to get memory for regulators\n");
+ goto fail;
+ }
+ for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ res->regul_bulk[i].supply = supply[i];
+ res->regul_bulk[i].consumer = NULL;
+ }
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ if (ret) {
+ DRM_ERROR("failed to get regulators\n");
+ goto fail;
+ }
+ res->regul_count = ARRAY_SIZE(supply);
+
+ return 0;
+fail:
+ DRM_ERROR("HDMI resource init - failed\n");
+ return -ENODEV;
+}
+
+static int hdmi_resources_cleanup(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ regulator_bulk_free(res->regul_count, res->regul_bulk);
+ /* kfree is NULL-safe */
+ kfree(res->regul_bulk);
+ if (!IS_ERR_OR_NULL(res->hdmiphy))
+ clk_put(res->hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+ clk_put(res->sclk_hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_pixel))
+ clk_put(res->sclk_pixel);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->hdmi))
+ clk_put(res->hdmi);
+ memset(res, 0, sizeof *res);
+
+ return 0;
+}
+
+static void hdmi_resource_poweron(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* turn HDMI power on */
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ /* power-on hdmi physical interface */
+ clk_enable(res->hdmiphy);
+ /* turn clocks on */
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+
+ hdmiphy_conf_reset(hdata);
+ hdmi_conf_reset(hdata);
+ hdmi_conf_init(hdata);
+
+}
+
+static void hdmi_resource_poweroff(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* turn clocks off */
+ clk_disable(res->sclk_hdmi);
+ clk_disable(res->hdmi);
+ /* power-off hdmiphy */
+ clk_disable(res->hdmiphy);
+ /* turn HDMI power off */
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc)
+{
+ if (ddc)
+ hdmi_ddc = ddc;
+}
+EXPORT_SYMBOL(hdmi_attach_ddc_client);
+
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
+{
+ if (hdmiphy)
+ hdmi_hdmiphy = hdmiphy;
+}
+EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct hdmi_context *hdata;
+ struct exynos_drm_hdmi_pdata *pdata;
+ struct resource *res;
+ int ret;
+
+ DRM_DEBUG_KMS("[%d]\n", __LINE__);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ DRM_ERROR("no platform data specified\n");
+ return -EINVAL;
+ }
+
+ drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx) {
+ DRM_ERROR("failed to allocate common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
+ if (!hdata) {
+ DRM_ERROR("out of memory\n");
+ kfree(drm_hdmi_ctx);
+ return -ENOMEM;
+ }
+
+ drm_hdmi_ctx->ctx = (void *)hdata;
+ hdata->parent_ctx = (void *)drm_hdmi_ctx;
+
+ platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+ hdata->default_win = pdata->default_win;
+ hdata->default_timing = &pdata->timing;
+ hdata->default_bpp = pdata->bpp;
+ hdata->dev = dev;
+
+ ret = hdmi_resources_init(hdata);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_data;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("failed to find registers\n");
+ ret = -ENOENT;
+ goto err_resource;
+ }
+
+ hdata->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!hdata->regs_res) {
+ DRM_ERROR("failed to claim register region\n");
+ ret = -ENOENT;
+ goto err_resource;
+ }
+
+ hdata->regs = ioremap(res->start, resource_size(res));
+ if (!hdata->regs) {
+ DRM_ERROR("failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ /* DDC i2c driver */
+ if (i2c_add_driver(&ddc_driver)) {
+ DRM_ERROR("failed to register ddc i2c driver\n");
+ ret = -ENOENT;
+ goto err_iomap;
+ }
+
+ hdata->ddc_port = hdmi_ddc;
+
+ /* hdmiphy i2c driver */
+ if (i2c_add_driver(&hdmiphy_driver)) {
+ DRM_ERROR("failed to register hdmiphy i2c driver\n");
+ ret = -ENOENT;
+ goto err_ddc;
+ }
+
+ hdata->hdmiphy_port = hdmi_hdmiphy;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ DRM_ERROR("get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto err_hdmiphy;
+ }
+
+ /* create workqueue and hotplug work */
+ hdata->wq = alloc_workqueue("exynos-drm-hdmi",
+ WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+ if (hdata->wq == NULL) {
+ DRM_ERROR("Failed to create workqueue.\n");
+ ret = -ENOMEM;
+ goto err_hdmiphy;
+ }
+ INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
+
+ /* register hpd interrupt */
+ ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
+ drm_hdmi_ctx);
+ if (ret) {
+ DRM_ERROR("request interrupt failed.\n");
+ goto err_workqueue;
+ }
+ hdata->irq = res->start;
+
+ /* register specific callbacks to common hdmi. */
+ exynos_drm_display_ops_register(&display_ops);
+ exynos_drm_manager_ops_register(&manager_ops);
+
+ hdmi_resource_poweron(hdata);
+
+ return 0;
+
+err_workqueue:
+ destroy_workqueue(hdata->wq);
+err_hdmiphy:
+ i2c_del_driver(&hdmiphy_driver);
+err_ddc:
+ i2c_del_driver(&ddc_driver);
+err_iomap:
+ iounmap(hdata->regs);
+err_req_region:
+ release_resource(hdata->regs_res);
+ kfree(hdata->regs_res);
+err_resource:
+ hdmi_resources_cleanup(hdata);
+err_data:
+ kfree(hdata);
+ kfree(drm_hdmi_ctx);
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+ struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_resource_poweroff(hdata);
+
+ disable_irq(hdata->irq);
+ free_irq(hdata->irq, hdata);
+
+ cancel_work_sync(&hdata->hotplug_work);
+ destroy_workqueue(hdata->wq);
+
+ hdmi_resources_cleanup(hdata);
+
+ iounmap(hdata->regs);
+
+ release_resource(hdata->regs_res);
+ kfree(hdata->regs_res);
+
+ /* hdmiphy i2c driver */
+ i2c_del_driver(&hdmiphy_driver);
+ /* DDC i2c driver */
+ i2c_del_driver(&ddc_driver);
+
+ kfree(hdata);
+
+ return 0;
+}
+
+struct platform_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = __devexit_p(hdmi_remove),
+ .driver = {
+ .name = "exynos4-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+EXPORT_SYMBOL(hdmi_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644
index 000000000000..31d6cf84c1aa
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_HDMI_H_
+#define _EXYNOS_HDMI_H_
+
+struct hdmi_conf {
+ int width;
+ int height;
+ int vrefresh;
+ bool interlace;
+ const u8 *hdmiphy_data;
+ const struct hdmi_preset_conf *conf;
+};
+
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *hdmiphy;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct fb_videomode *default_timing;
+ unsigned int default_win;
+ unsigned int default_bpp;
+ bool hpd_handle;
+ bool enabled;
+
+ struct resource *regs_res;
+ /** base address of HDMI registers */
+ void __iomem *regs;
+ /** HDMI hotplug interrupt */
+ unsigned int irq;
+ /** workqueue for delayed work */
+ struct workqueue_struct *wq;
+ /** hotplug handling work */
+ struct work_struct hotplug_work;
+
+ struct i2c_client *ddc_port;
+ struct i2c_client *hdmiphy_port;
+
+ /** current hdmiphy conf index */
+ int cur_conf;
+ /** other resources */
+ struct hdmi_resources res;
+
+ void *parent_ctx;
+};
+
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc);
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
+
+extern struct i2c_driver hdmiphy_driver;
+extern struct i2c_driver ddc_driver;
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644
index 000000000000..9fe2995ab9f9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+
+static int hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ hdmi_attach_hdmiphy_client(client);
+
+ dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
+ "into i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int hdmiphy_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+ { "s5p_hdmiphy", 0 },
+ { },
+};
+
+struct i2c_driver hdmiphy_driver = {
+ .driver = {
+ .name = "s5p-hdmiphy",
+ .owner = THIS_MODULE,
+ },
+ .id_table = hdmiphy_id,
+ .probe = hdmiphy_probe,
+ .remove = __devexit_p(hdmiphy_remove),
+ .command = NULL,
+};
+EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644
index 000000000000..ac24cff39775
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -0,0 +1,1070 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+#include "exynos_hdmi.h"
+#include "exynos_mixer.h"
+
+#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 filter_y_horiz_tap8[] = {
+ 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 0, 0, 0,
+ 0, 2, 4, 5, 6, 6, 6, 6,
+ 6, 5, 5, 4, 3, 2, 1, 1,
+ 0, -6, -12, -16, -18, -20, -21, -20,
+ -20, -18, -16, -13, -10, -8, -5, -2,
+ 127, 126, 125, 121, 114, 107, 99, 89,
+ 79, 68, 57, 46, 35, 25, 16, 8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+ 0, 5, 11, 19, 27, 37, 48, 59,
+ 70, 81, 92, 102, 111, 118, 124, 126,
+ 0, 0, -1, -1, -2, -3, -4, -5,
+ -6, -7, -8, -8, -8, -8, -6, -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = vp_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+ u32 reg_id, u32 val, u32 mask)
+{
+ u32 old = mixer_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(MXR_STATUS);
+ DUMPREG(MXR_CFG);
+ DUMPREG(MXR_INT_EN);
+ DUMPREG(MXR_INT_STATUS);
+
+ DUMPREG(MXR_LAYER_CFG);
+ DUMPREG(MXR_VIDEO_CFG);
+
+ DUMPREG(MXR_GRAPHIC0_CFG);
+ DUMPREG(MXR_GRAPHIC0_BASE);
+ DUMPREG(MXR_GRAPHIC0_SPAN);
+ DUMPREG(MXR_GRAPHIC0_WH);
+ DUMPREG(MXR_GRAPHIC0_SXY);
+ DUMPREG(MXR_GRAPHIC0_DXY);
+
+ DUMPREG(MXR_GRAPHIC1_CFG);
+ DUMPREG(MXR_GRAPHIC1_BASE);
+ DUMPREG(MXR_GRAPHIC1_SPAN);
+ DUMPREG(MXR_GRAPHIC1_WH);
+ DUMPREG(MXR_GRAPHIC1_SXY);
+ DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(VP_ENABLE);
+ DUMPREG(VP_SRESET);
+ DUMPREG(VP_SHADOW_UPDATE);
+ DUMPREG(VP_FIELD_ID);
+ DUMPREG(VP_MODE);
+ DUMPREG(VP_IMG_SIZE_Y);
+ DUMPREG(VP_IMG_SIZE_C);
+ DUMPREG(VP_PER_RATE_CTRL);
+ DUMPREG(VP_TOP_Y_PTR);
+ DUMPREG(VP_BOT_Y_PTR);
+ DUMPREG(VP_TOP_C_PTR);
+ DUMPREG(VP_BOT_C_PTR);
+ DUMPREG(VP_ENDIAN_MODE);
+ DUMPREG(VP_SRC_H_POSITION);
+ DUMPREG(VP_SRC_V_POSITION);
+ DUMPREG(VP_SRC_WIDTH);
+ DUMPREG(VP_SRC_HEIGHT);
+ DUMPREG(VP_DST_H_POSITION);
+ DUMPREG(VP_DST_V_POSITION);
+ DUMPREG(VP_DST_WIDTH);
+ DUMPREG(VP_DST_HEIGHT);
+ DUMPREG(VP_H_RATIO);
+ DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+ int reg_id, const u8 *data, unsigned int size)
+{
+ /* assure 4-byte align */
+ BUG_ON(size & 3);
+ for (; size; size -= 4, reg_id += 4, data += 4) {
+ u32 val = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ vp_reg_write(res, reg_id, val);
+ }
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+ vp_filter_set(res, VP_POLY8_Y0_LL,
+ filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ vp_filter_set(res, VP_POLY4_Y0_LL,
+ filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ vp_filter_set(res, VP_POLY4_C0_LL,
+ filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ /* block update on vsync */
+ mixer_reg_writemask(res, MXR_STATUS, enable ?
+ MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+ vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+ VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ /* choosing between interlace and progressive mode */
+ val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+ MXR_CFG_SCAN_PROGRASSIVE);
+
+ /* choosing between porper HD and SD mode */
+ if (height == 480)
+ val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+ else if (height == 576)
+ val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+ else if (height == 720)
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ else if (height == 1080)
+ val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+ else
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ if (height == 480) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 576) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 720) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else if (height == 1080) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ }
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val = enable ? ~0 : 0;
+
+ switch (win) {
+ case 0:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ break;
+ case 1:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ break;
+ case 2:
+ vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+ break;
+ }
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+ mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int full_width, full_height, width, height;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int mode_width, mode_height;
+ unsigned int buf_num;
+ dma_addr_t luma_addr[2], chroma_addr[2];
+ bool tiled_mode = false;
+ bool crcb_mode = false;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_NV12MT:
+ tiled_mode = true;
+ case DRM_FORMAT_NV12M:
+ crcb_mode = false;
+ buf_num = 2;
+ break;
+ /* TODO: single buffer format NV12, NV21 */
+ default:
+ /* ignore pixel format at disable time */
+ if (!win_data->dma_addr)
+ break;
+
+ DRM_ERROR("pixel format for vp is wrong [%d].\n",
+ win_data->pixel_format);
+ return;
+ }
+
+ full_width = win_data->fb_width;
+ full_height = win_data->fb_height;
+ width = win_data->crtc_width;
+ height = win_data->crtc_height;
+ mode_width = win_data->mode_width;
+ mode_height = win_data->mode_height;
+
+ /* scaling feature: (src << 16) / dst */
+ x_ratio = (width << 16) / width;
+ y_ratio = (height << 16) / height;
+
+ src_x_offset = win_data->fb_x;
+ src_y_offset = win_data->fb_y;
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ if (buf_num == 2) {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->chroma_dma_addr;
+ } else {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->dma_addr
+ + (full_width * full_height);
+ }
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ ctx->interlace = true;
+ if (tiled_mode) {
+ luma_addr[1] = luma_addr[0] + 0x40;
+ chroma_addr[1] = chroma_addr[0] + 0x40;
+ } else {
+ luma_addr[1] = luma_addr[0] + full_width;
+ chroma_addr[1] = chroma_addr[0] + full_width;
+ }
+ } else {
+ ctx->interlace = false;
+ luma_addr[1] = 0;
+ chroma_addr[1] = 0;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* interlace or progressive scan mode */
+ val = (ctx->interlace ? ~0 : 0);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+ /* setup format */
+ val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+ /* setting size of input image */
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
+ VP_IMG_VSIZE(full_height));
+ /* chroma height has to reduced by 2 to avoid chroma distorions */
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
+ VP_IMG_VSIZE(full_height / 2));
+
+ vp_reg_write(res, VP_SRC_WIDTH, width);
+ vp_reg_write(res, VP_SRC_HEIGHT, height);
+ vp_reg_write(res, VP_SRC_H_POSITION,
+ VP_SRC_H_POSITION_VAL(src_x_offset));
+ vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+
+ vp_reg_write(res, VP_DST_WIDTH, width);
+ vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+ if (ctx->interlace) {
+ vp_reg_write(res, VP_DST_HEIGHT, height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+ } else {
+ vp_reg_write(res, VP_DST_HEIGHT, height);
+ vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+ }
+
+ vp_reg_write(res, VP_H_RATIO, x_ratio);
+ vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+ vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+ /* set buffer address to vp */
+ vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+ mixer_cfg_scan(ctx, mode_height);
+ mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ vp_regs_dump(ctx);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int full_width, width, height;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int mode_width, mode_height;
+ dma_addr_t dma_addr;
+ unsigned int fmt;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ #define RGB565 4
+ #define ARGB1555 5
+ #define ARGB4444 6
+ #define ARGB8888 7
+
+ switch (win_data->bpp) {
+ case 16:
+ fmt = ARGB4444;
+ break;
+ case 32:
+ fmt = ARGB8888;
+ break;
+ default:
+ fmt = ARGB8888;
+ }
+
+ dma_addr = win_data->dma_addr;
+ full_width = win_data->fb_width;
+ width = win_data->crtc_width;
+ height = win_data->crtc_height;
+ mode_width = win_data->mode_width;
+ mode_height = win_data->mode_height;
+
+ /* 2x scaling feature */
+ x_ratio = 0;
+ y_ratio = 0;
+
+ src_x_offset = win_data->fb_x;
+ src_y_offset = win_data->fb_y;
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ /* converting dma address base and source offset */
+ dma_addr = dma_addr
+ + (src_x_offset * win_data->bpp >> 3)
+ + (src_y_offset * full_width * win_data->bpp >> 3);
+ src_x_offset = 0;
+ src_y_offset = 0;
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ ctx->interlace = true;
+ else
+ ctx->interlace = false;
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* setup format */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+ /* setup geometry */
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+
+ val = MXR_GRP_WH_WIDTH(width);
+ val |= MXR_GRP_WH_HEIGHT(height);
+ val |= MXR_GRP_WH_H_SCALE(x_ratio);
+ val |= MXR_GRP_WH_V_SCALE(y_ratio);
+ mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+ /* setup offsets in source image */
+ val = MXR_GRP_SXY_SX(src_x_offset);
+ val |= MXR_GRP_SXY_SY(src_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+ /* setup offsets in display image */
+ val = MXR_GRP_DXY_DX(dst_x_offset);
+ val |= MXR_GRP_DXY_DY(dst_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+ /* set buffer address to mixer */
+ mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+ mixer_cfg_scan(ctx, mode_height);
+ mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int tries = 100;
+
+ vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+ for (tries = 100; tries; --tries) {
+ /* waiting until VP_SRESET_PROCESSING is 0 */
+ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+ break;
+ mdelay(10);
+ }
+ WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static int mixer_enable_vblank(void *ctx, int pipe)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_ctx->pipe = pipe;
+
+ /* enable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+ MXR_INT_EN_VSYNC);
+
+ return 0;
+}
+
+static void mixer_disable_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(void *ctx,
+ struct exynos_drm_overlay *overlay)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct hdmi_win_data *win_data;
+ int win;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!overlay) {
+ DRM_ERROR("overlay is NULL\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+ overlay->fb_width, overlay->fb_height,
+ overlay->fb_x, overlay->fb_y,
+ overlay->crtc_width, overlay->crtc_height,
+ overlay->crtc_x, overlay->crtc_y);
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ win_data = &mixer_ctx->win_data[win];
+
+ win_data->dma_addr = overlay->dma_addr[0];
+ win_data->vaddr = overlay->vaddr[0];
+ win_data->chroma_dma_addr = overlay->dma_addr[1];
+ win_data->chroma_vaddr = overlay->vaddr[1];
+ win_data->pixel_format = overlay->pixel_format;
+ win_data->bpp = overlay->bpp;
+
+ win_data->crtc_x = overlay->crtc_x;
+ win_data->crtc_y = overlay->crtc_y;
+ win_data->crtc_width = overlay->crtc_width;
+ win_data->crtc_height = overlay->crtc_height;
+
+ win_data->fb_x = overlay->fb_x;
+ win_data->fb_y = overlay->fb_y;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+
+ win_data->mode_width = overlay->mode_width;
+ win_data->mode_height = overlay->mode_height;
+
+ win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(void *ctx, int zpos)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ if (win > 1)
+ vp_video_buffer(mixer_ctx, win);
+ else
+ mixer_graph_buffer(mixer_ctx, win);
+}
+
+static void mixer_win_disable(void *ctx, int zpos)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+ unsigned long flags;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(mixer_ctx, false);
+
+ mixer_cfg_layer(mixer_ctx, win, false);
+
+ mixer_vsync_set_update(mixer_ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static struct exynos_hdmi_overlay_ops overlay_ops = {
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
+
+/* for pageflip event */
+static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ bool is_checked = false;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ is_checked = true;
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ if (is_checked)
+ drm_vblank_put(drm_dev, crtc);
+
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
+ struct mixer_context *ctx =
+ (struct mixer_context *)drm_hdmi_ctx->ctx;
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val, val_base;
+
+ spin_lock(&res->reg_slock);
+
+ /* read interrupt status for handling and clearing flags for VSYNC */
+ val = mixer_reg_read(res, MXR_INT_STATUS);
+
+ /* handling VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ /* interlace scan need to check shadow register */
+ if (ctx->interlace) {
+ val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (ctx->win_data[0].dma_addr != val_base)
+ goto out;
+
+ val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (ctx->win_data[1].dma_addr != val_base)
+ goto out;
+ }
+
+ drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ }
+
+out:
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mixer_reg_write(res, MXR_INT_STATUS, val);
+
+ spin_unlock(&res->reg_slock);
+
+ return IRQ_HANDLED;
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > video > layer0
+ * because typical usage scenario would be
+ * layer0 - framebuffer
+ * video - video overlay
+ * layer1 - OSD
+ */
+ val = MXR_LAYER_CFG_GRP0_VAL(1);
+ val |= MXR_LAYER_CFG_VP_VAL(2);
+ val |= MXR_LAYER_CFG_GRP1_VAL(3);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_resource_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+
+ mixer_win_reset(ctx);
+}
+
+static void mixer_resource_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ clk_disable(res->mixer);
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+ DRM_DEBUG_KMS("resume - start\n");
+
+ mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+
+ return 0;
+}
+
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+ DRM_DEBUG_KMS("suspend - start\n");
+
+ mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mixer_pm_ops = {
+ .runtime_suspend = mixer_runtime_suspend,
+ .runtime_resume = mixer_runtime_resume,
+};
+
+static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
+{
+ struct mixer_context *mixer_ctx =
+ (struct mixer_context *)ctx->ctx;
+ struct device *dev = &pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+ int ret;
+
+ mixer_res->dev = dev;
+ spin_lock_init(&mixer_res->reg_slock);
+
+ mixer_res->mixer = clk_get(dev, "mixer");
+ if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+ dev_err(dev, "failed to get clock 'mixer'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->vp = clk_get(dev, "vp");
+ if (IS_ERR_OR_NULL(mixer_res->vp)) {
+ dev_err(dev, "failed to get clock 'vp'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+ mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
+ if (mixer_res->mixer_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_mixer_regs;
+ }
+
+ mixer_res->vp_regs = ioremap(res->start, resource_size(res));
+ if (mixer_res->vp_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_mixer_regs;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_vp_regs;
+ }
+
+ ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_vp_regs;
+ }
+ mixer_res->irq = res->start;
+
+ return 0;
+
+fail_vp_regs:
+ iounmap(mixer_res->vp_regs);
+
+fail_mixer_regs:
+ iounmap(mixer_res->mixer_regs);
+
+fail:
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
+ clk_put(mixer_res->sclk_dac);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
+ clk_put(mixer_res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
+ clk_put(mixer_res->sclk_mixer);
+ if (!IS_ERR_OR_NULL(mixer_res->vp))
+ clk_put(mixer_res->vp);
+ if (!IS_ERR_OR_NULL(mixer_res->mixer))
+ clk_put(mixer_res->mixer);
+ mixer_res->dev = NULL;
+ return ret;
+}
+
+static void mixer_resources_cleanup(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ disable_irq(res->irq);
+ free_irq(res->irq, ctx);
+
+ iounmap(res->vp_regs);
+ iounmap(res->mixer_regs);
+}
+
+static int __devinit mixer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *ctx;
+ int ret;
+
+ dev_info(dev, "probe start\n");
+
+ drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx) {
+ DRM_ERROR("failed to allocate common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
+ kfree(drm_hdmi_ctx);
+ return -ENOMEM;
+ }
+
+ drm_hdmi_ctx->ctx = (void *)ctx;
+
+ platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+ /* acquire resources: regs, irqs, clocks */
+ ret = mixer_resources_init(drm_hdmi_ctx, pdev);
+ if (ret)
+ goto fail;
+
+ /* register specific callback point to common hdmi. */
+ exynos_drm_overlay_ops_register(&overlay_ops);
+
+ mixer_resource_poweron(ctx);
+
+ return 0;
+
+
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx =
+ platform_get_drvdata(pdev);
+ struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+
+ dev_info(dev, "remove sucessful\n");
+
+ mixer_resource_poweroff(ctx);
+ mixer_resources_cleanup(ctx);
+
+ return 0;
+}
+
+struct platform_driver mixer_driver = {
+ .driver = {
+ .name = "s5p-mixer",
+ .owner = THIS_MODULE,
+ .pm = &mixer_pm_ops,
+ },
+ .probe = mixer_probe,
+ .remove = __devexit_p(mixer_remove),
+};
+EXPORT_SYMBOL(mixer_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 000000000000..cebacfefc077
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+#define HDMI_OVERLAY_NUMBER 3
+
+struct hdmi_win_data {
+ dma_addr_t dma_addr;
+ void __iomem *vaddr;
+ dma_addr_t chroma_dma_addr;
+ void __iomem *chroma_vaddr;
+ uint32_t pixel_format;
+ unsigned int bpp;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int scan_flags;
+};
+
+struct mixer_resources {
+ struct device *dev;
+ /** interrupt index */
+ int irq;
+ /** pointer to Mixer registers */
+ void __iomem *mixer_regs;
+ /** pointer to Video Processor registers */
+ void __iomem *vp_regs;
+ /** spinlock for protection of registers */
+ spinlock_t reg_slock;
+ /** other resources */
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+struct mixer_context {
+ unsigned int default_win;
+ struct fb_videomode *default_timing;
+ unsigned int default_bpp;
+
+ /** mixer interrupt */
+ unsigned int irq;
+ /** current crtc pipe for vblank */
+ int pipe;
+ /** interlace scan mode */
+ bool interlace;
+ /** vp enabled status */
+ bool vp_enabled;
+
+ /** mixer and vp resources */
+ struct mixer_resources mixer_res;
+
+ /** overlay window data */
+ struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
+};
+
+#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644
index 000000000000..72e6b52be740
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -0,0 +1,147 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_TG_BASE(x) ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_ACR_CON HDMI_CORE_BASE(0x0180)
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
+#define HDMI_AUI_CON HDMI_CORE_BASE(0x0360)
+#define HDMI_SPD_CON HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_EN (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN (1 << 1)
+#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_MODE_MASK (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN (1 << 0)
+#define HDMI_FIELD_EN (1 << 1)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644
index 000000000000..fd2f4d14cf6d
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS 0x0000
+#define MXR_CFG 0x0004
+#define MXR_INT_EN 0x0008
+#define MXR_INT_STATUS 0x000C
+#define MXR_LAYER_CFG 0x0010
+#define MXR_VIDEO_CFG 0x0014
+#define MXR_GRAPHIC0_CFG 0x0020
+#define MXR_GRAPHIC0_BASE 0x0024
+#define MXR_GRAPHIC0_SPAN 0x0028
+#define MXR_GRAPHIC0_SXY 0x002C
+#define MXR_GRAPHIC0_WH 0x0030
+#define MXR_GRAPHIC0_DXY 0x0034
+#define MXR_GRAPHIC0_BLANK 0x0038
+#define MXR_GRAPHIC1_CFG 0x0040
+#define MXR_GRAPHIC1_BASE 0x0044
+#define MXR_GRAPHIC1_SPAN 0x0048
+#define MXR_GRAPHIC1_SXY 0x004C
+#define MXR_GRAPHIC1_WH 0x0050
+#define MXR_GRAPHIC1_DXY 0x0054
+#define MXR_GRAPHIC1_BLANK 0x0058
+#define MXR_BG_CFG 0x0060
+#define MXR_BG_COLOR0 0x0064
+#define MXR_BG_COLOR1 0x0068
+#define MXR_BG_COLOR2 0x006C
+#define MXR_CM_COEFF_Y 0x0080
+#define MXR_CM_COEFF_CB 0x0084
+#define MXR_CM_COEFF_CR 0x0088
+#define MXR_GRAPHIC0_BASE_S 0x2024
+#define MXR_GRAPHIC1_BASE_S 0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i) (0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i) (0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST (1 << 7)
+#define MXR_STATUS_BURST_MASK (1 << 7)
+#define MXR_STATUS_BIG_ENDIAN (1 << 3)
+#define MXR_STATUS_ENDIAN_MASK (1 << 3)
+#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_RUN (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_RGB601_0_255 (0 << 9)
+#define MXR_CFG_RGB601_16_235 (1 << 9)
+#define MXR_CFG_RGB709_0_255 (2 << 9)
+#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_RGB_FMT_MASK 0x600
+#define MXR_CFG_OUT_YUV444 (0 << 8)
+#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_OUT_MASK (1 << 8)
+#define MXR_CFG_DST_SDO (0 << 7)
+#define MXR_CFG_DST_HDMI (1 << 7)
+#define MXR_CFG_DST_MASK (1 << 7)
+#define MXR_CFG_SCAN_HD_720 (0 << 6)
+#define MXR_CFG_SCAN_HD_1080 (1 << 6)
+#define MXR_CFG_GRP1_ENABLE (1 << 5)
+#define MXR_CFG_GRP0_ENABLE (1 << 4)
+#define MXR_CFG_VP_ENABLE (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_NTSC (0 << 1)
+#define MXR_CFG_SCAN_PAL (1 << 1)
+#define MXR_CFG_SCAN_SD (0 << 0)
+#define MXR_CFG_SCAN_HD (1 << 0)
+#define MXR_CFG_SCAN_MASK 0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC (1 << 11)
+#define MXR_INT_EN_ALL (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC (1 << 11)
+#define MXR_INT_STATUS_VSYNC (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644
index 000000000000..10b737af0a72
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-vp.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE 0x0000
+#define VP_SRESET 0x0004
+#define VP_SHADOW_UPDATE 0x0008
+#define VP_FIELD_ID 0x000C
+#define VP_MODE 0x0010
+#define VP_IMG_SIZE_Y 0x0014
+#define VP_IMG_SIZE_C 0x0018
+#define VP_PER_RATE_CTRL 0x001C
+#define VP_TOP_Y_PTR 0x0028
+#define VP_BOT_Y_PTR 0x002C
+#define VP_TOP_C_PTR 0x0030
+#define VP_BOT_C_PTR 0x0034
+#define VP_ENDIAN_MODE 0x03CC
+#define VP_SRC_H_POSITION 0x0044
+#define VP_SRC_V_POSITION 0x0048
+#define VP_SRC_WIDTH 0x004C
+#define VP_SRC_HEIGHT 0x0050
+#define VP_DST_H_POSITION 0x0054
+#define VP_DST_V_POSITION 0x0058
+#define VP_DST_WIDTH 0x005C
+#define VP_DST_HEIGHT 0x0060
+#define VP_H_RATIO 0x0064
+#define VP_V_RATIO 0x0068
+#define VP_POLY8_Y0_LL 0x006C
+#define VP_POLY4_Y0_LL 0x00EC
+#define VP_POLY4_C0_LL 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12 (0 << 6)
+#define VP_MODE_NV21 (1 << 6)
+#define VP_MODE_LINE_SKIP (1 << 5)
+#define VP_MODE_MEM_LINEAR (0 << 4)
+#define VP_MODE_MEM_TILED (1 << 4)
+#define VP_MODE_FMT_MASK (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
new file mode 100644
index 000000000000..754e14bdc801
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -0,0 +1,27 @@
+config DRM_GMA500
+ tristate "Intel GMA5/600 KMS Framebuffer"
+ depends on DRM && PCI && X86 && EXPERIMENTAL
+ select FB_CFB_COPYAREA
+ select FB_CFB_FILLRECT
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ Say yes for an experimental 2D KMS framebuffer driver for the
+ Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+ devices.
+
+config DRM_GMA600
+ bool "Intel GMA600 support (Experimental)"
+ depends on DRM_GMA500
+ help
+ Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+ platforms with LVDS ports. HDMI and MIPI are not currently
+ supported.
+
+config DRM_GMA3600
+ bool "Intel GMA3600/3650 support (Experimental)"
+ depends on DRM_GMA500
+ help
+ Say yes to include basic support for Intel GMA3600/3650 (Intel
+ Cedar Trail) platforms.
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644
index 000000000000..81c103be5e21
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -0,0 +1,40 @@
+#
+# KMS driver for the GMA500
+#
+ccflags-y += -Iinclude/drm
+
+gma500_gfx-y += gem_glue.o \
+ accel_2d.o \
+ backlight.o \
+ framebuffer.o \
+ gem.o \
+ gtt.o \
+ intel_bios.o \
+ intel_i2c.o \
+ intel_gmbus.o \
+ intel_opregion.o \
+ mmu.o \
+ power.o \
+ psb_drv.o \
+ psb_intel_display.o \
+ psb_intel_lvds.o \
+ psb_intel_modes.o \
+ psb_intel_sdvo.o \
+ psb_lid.o \
+ psb_irq.o \
+ psb_device.o \
+ mid_bios.o
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+ oaktrail_crtc.o \
+ oaktrail_lvds.o \
+ oaktrail_hdmi.o \
+ oaktrail_hdmi_i2c.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644
index 000000000000..d5ef1a5793c8
--- /dev/null
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "framebuffer.h"
+
+/**
+ * psb_spank - reset the 2D engine
+ * @dev_priv: our PSB DRM device
+ *
+ * Soft reset the graphics engine and then reload the necessary registers.
+ * We use this at initialisation time but it will become relevant for
+ * accelerated X later
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+ PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+ _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+ _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+ PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+ msleep(1);
+
+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+ wmb();
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ wmb();
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ msleep(1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+/**
+ * psb2_2d_wait_available - wait for FIFO room
+ * @dev_priv: our DRM device
+ * @size: size (in dwords) of the command we want to issue
+ *
+ * Wait until there is room to load the FIFO with our data. If the
+ * device is not responding then reset it
+ */
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+ unsigned size)
+{
+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+ unsigned long t = jiffies + HZ;
+
+ while (avail < size) {
+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+ if (time_after(jiffies, t)) {
+ psb_spank(dev_priv);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/**
+ * psb_2d_submit - submit a 2D command
+ * @dev_priv: our DRM device
+ * @cmdbuf: command to issue
+ * @size: length (in dwords)
+ *
+ * Issue one or more 2D commands to the accelerator. This needs to be
+ * serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+ unsigned size)
+{
+ int ret = 0;
+ int i;
+ unsigned submit_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
+ while (size > 0) {
+ submit_size = (size < 0x60) ? size : 0x60;
+ size -= submit_size;
+ ret = psb_2d_wait_available(dev_priv, submit_size);
+ if (ret)
+ break;
+
+ submit_size <<= 2;
+
+ for (i = 0; i < submit_size; i += 4)
+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+
+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+ }
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+ return ret;
+}
+
+
+/**
+ * psb_accel_2d_copy_direction - compute blit order
+ * @xdir: X direction of move
+ * @ydir: Y direction of move
+ *
+ * Compute the correct order setings to ensure that an overlapping blit
+ * correctly copies all the pixels.
+ */
+static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+ if (xdir < 0)
+ return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+ PSB_2D_COPYORDER_TR2BL;
+ else
+ return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+ PSB_2D_COPYORDER_TL2BR;
+}
+
+/**
+ * psb_accel_2d_copy - accelerated 2D copy
+ * @dev_priv: our DRM device
+ * @src_offset in bytes
+ * @src_stride in bytes
+ * @src_format psb 2D format defines
+ * @dst_offset in bytes
+ * @dst_stride in bytes
+ * @dst_format psb 2D format defines
+ * @src_x offset in pixels
+ * @src_y offset in pixels
+ * @dst_x offset in pixels
+ * @dst_y offset in pixels
+ * @size_x of the copied area
+ * @size_y of the copied area
+ *
+ * Format and issue a 2D accelerated copy command.
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+ uint32_t src_offset, uint32_t src_stride,
+ uint32_t src_format, uint32_t dst_offset,
+ uint32_t dst_stride, uint32_t dst_format,
+ uint16_t src_x, uint16_t src_y,
+ uint16_t dst_x, uint16_t dst_y,
+ uint16_t size_x, uint16_t size_y)
+{
+ uint32_t blit_cmd;
+ uint32_t buffer[10];
+ uint32_t *buf;
+ uint32_t direction;
+
+ buf = buffer;
+
+ direction =
+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+ if (direction == PSB_2D_COPYORDER_BR2TL ||
+ direction == PSB_2D_COPYORDER_TR2BL) {
+ src_x += size_x - 1;
+ dst_x += size_x - 1;
+ }
+ if (direction == PSB_2D_COPYORDER_BR2TL ||
+ direction == PSB_2D_COPYORDER_BL2TR) {
+ src_y += size_y - 1;
+ dst_y += size_y - 1;
+ }
+
+ blit_cmd =
+ PSB_2D_BLIT_BH |
+ PSB_2D_ROT_NONE |
+ PSB_2D_DSTCK_DISABLE |
+ PSB_2D_SRCCK_DISABLE |
+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+ *buf++ = PSB_2D_FENCE_BH;
+ *buf++ =
+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+ PSB_2D_DST_STRIDE_SHIFT);
+ *buf++ = dst_offset;
+ *buf++ =
+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+ PSB_2D_SRC_STRIDE_SHIFT);
+ *buf++ = src_offset;
+ *buf++ =
+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+ *buf++ = blit_cmd;
+ *buf++ =
+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+ PSB_2D_DST_YSTART_SHIFT);
+ *buf++ =
+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+ PSB_2D_DST_YSIZE_SHIFT);
+ *buf++ = PSB_2D_FLUSH_BH;
+
+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+}
+
+/**
+ * psbfb_copyarea_accel - copyarea acceleration for /dev/fb
+ * @info: our framebuffer
+ * @a: copyarea parameters from the framebuffer core
+ *
+ * Perform a 2D copy via the accelerator
+ */
+static void psbfb_copyarea_accel(struct fb_info *info,
+ const struct fb_copyarea *a)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ uint32_t stride;
+ uint32_t src_format;
+ uint32_t dst_format;
+
+ if (!fb)
+ return;
+
+ offset = psbfb->gtt->offset;
+ stride = fb->pitches[0];
+
+ switch (fb->depth) {
+ case 8:
+ src_format = PSB_2D_SRC_332RGB;
+ dst_format = PSB_2D_DST_332RGB;
+ break;
+ case 15:
+ src_format = PSB_2D_SRC_555RGB;
+ dst_format = PSB_2D_DST_555RGB;
+ break;
+ case 16:
+ src_format = PSB_2D_SRC_565RGB;
+ dst_format = PSB_2D_DST_565RGB;
+ break;
+ case 24:
+ case 32:
+ /* this is wrong but since we don't do blending its okay */
+ src_format = PSB_2D_SRC_8888ARGB;
+ dst_format = PSB_2D_DST_8888ARGB;
+ break;
+ default:
+ /* software fallback */
+ cfb_copyarea(info, a);
+ return;
+ }
+
+ if (!gma_power_begin(dev, false)) {
+ cfb_copyarea(info, a);
+ return;
+ }
+ psb_accel_2d_copy(dev_priv,
+ offset, stride, src_format,
+ offset, stride, dst_format,
+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+ gma_power_end(dev);
+}
+
+/**
+ * psbfb_copyarea - 2D copy interface
+ * @info: our framebuffer
+ * @region: region to copy
+ *
+ * Copy an area of the framebuffer console either by the accelerator
+ * or directly using the cfb helpers according to the request
+ */
+void psbfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
+ return;
+
+ /* Avoid the 8 pixel erratum */
+ if (region->width == 8 || region->height == 8 ||
+ (info->flags & FBINFO_HWACCEL_DISABLED))
+ return cfb_copyarea(info, region);
+
+ psbfb_copyarea_accel(info, region);
+}
+
+/**
+ * psbfb_sync - synchronize 2D
+ * @info: our framebuffer
+ *
+ * Wait for the 2D engine to quiesce so that we can do CPU
+ * access to the framebuffer again
+ */
+int psbfb_sync(struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long _end = jiffies + DRM_HZ;
+ int busy = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
+ /*
+ * First idle the 2D engine.
+ */
+
+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+ goto out;
+
+ do {
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ cpu_relax();
+ } while (busy && !time_after_eq(jiffies, _end));
+
+ if (busy)
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ if (busy)
+ goto out;
+
+ do {
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+ cpu_relax();
+ } while (busy && !time_after_eq(jiffies, _end));
+ if (busy)
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+
+out:
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+ return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
new file mode 100644
index 000000000000..20793951fcac
--- /dev/null
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -0,0 +1,49 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->ops->backlight_init(dev);
+#else
+ return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ backlight_update_status(dev_priv->backlight_device);
+ backlight_device_unregister(dev_priv->backlight_device);
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644
index 000000000000..4a5b099c3bc5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -0,0 +1,351 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+static void cdv_disable_vga(struct drm_device *dev)
+{
+ u8 sr1;
+ u32 vga_reg;
+
+ vga_reg = VGACNTRL;
+
+ outb(1, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ udelay(300);
+
+ REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+ REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ cdv_disable_vga(dev);
+
+ cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+ cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+ /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
+ the HDMI interface */
+ if (REG_READ(SDVOB) & SDVO_DETECTED)
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+ if (REG_READ(SDVOC) & SDVO_DETECTED)
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int cdv_brightness;
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return cdv_brightness;
+}
+
+
+static int cdv_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ /* FIXME */
+ }
+ return 0;
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
+ cdv_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+ .get_brightness = cdv_get_brightness,
+ .update_status = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ cdv_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &cdv_ops, &props);
+ if (IS_ERR(cdv_backlight_device))
+ return PTR_ERR(cdv_backlight_device);
+
+ ret = cdv_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(cdv_backlight_device);
+ cdv_backlight_device = NULL;
+ return ret;
+ }
+ cdv_backlight_device->props.brightness = 100;
+ cdv_backlight_device->props.max_brightness = 100;
+ backlight_update_status(cdv_backlight_device);
+ dev_priv->backlight_device = cdv_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Cedarview specific chip logic and low level methods
+ * for power management
+ *
+ * FIXME: we need to implement the apm/ospm base management bits
+ * for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_GFX_MASK 0x3
+#define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c
+#define CDV_PWRGT_DISPLAY_STS 0x000fc00c
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt;
+ int i;
+
+ dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_APMBA) & 0xFFFF;
+ dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_OSPMBA) & 0xFFFF;
+
+ /* Force power on for now */
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+ break;
+ udelay(10);
+ }
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
+ outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
+ break;
+ udelay(10);
+ }
+}
+
+/**
+ * cdv_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ *
+ * FIXME: review
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+ return 0;
+}
+
+/**
+ * cdv_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ *
+ * FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+/* FIXME ? - shared with Poulsbo */
+static void cdv_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+ cdv_get_core_freq(dev);
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+ .name = "GMA3600/3650",
+ .accel_2d = 0,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+ .chip_setup = cdv_chip_setup,
+
+ .crtc_helper = &cdv_intel_helper_funcs,
+ .crtc_funcs = &cdv_intel_crtc_funcs,
+
+ .output_init = cdv_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = cdv_backlight_init,
+#endif
+
+ .init_pm = cdv_init_pm,
+ .save_regs = cdv_save_display_registers,
+ .restore_regs = cdv_restore_display_registers,
+ .power_down = cdv_power_down,
+ .power_up = cdv_power_up,
+};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644
index 000000000000..2a88b7beb551
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+ int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+
+extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ /* FIXME: msleep ?? */
+ mdelay(20);
+}
+
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644
index 000000000000..c100f3e9c920
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 temp, reg;
+ reg = ADPA;
+
+ temp = REG_READ(reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ int max_clock = 0;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* The lowest clock for CDV is 20000KHz */
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ /* The max clock for CDV is 355 instead of 400 */
+ max_clock = 355000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
+ return MODE_PANEL;
+
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *psb_intel_crtc =
+ to_psb_intel_crtc(crtc);
+ int dpll_md_reg;
+ u32 adpa, dpll_md;
+ u32 adpa_reg;
+
+ if (psb_intel_crtc->pipe == 0)
+ dpll_md_reg = DPLL_A_MD;
+ else
+ dpll_md_reg = DPLL_B_MD;
+
+ adpa_reg = ADPA;
+
+ /*
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+ {
+ dpll_md = REG_READ(dpll_md_reg);
+ REG_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+ }
+
+ adpa = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ if (psb_intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+ bool force)
+{
+ struct drm_device *dev = connector->dev;
+ u32 hotplug_en;
+ int i, tries = 0, ret = false;
+ u32 adpa_orig;
+
+ /* disable the DAC when doing the hotplug detection */
+
+ adpa_orig = REG_READ(ADPA);
+
+ REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+
+ /*
+ * On a CDV thep, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+ tries = 2;
+
+ hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
+ /* turn on the FORCE_DETECT */
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
+ /* wait for FORCE_DETECT to go off */
+ do {
+ if (!(REG_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+ }
+
+ if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+ CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* Restore the saved ADPA */
+ REG_WRITE(ADPA, adpa_orig);
+ return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+ struct drm_connector *connector, bool force)
+{
+ if (cdv_intel_crt_detect_hotplug(connector, force))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+ .dpms = cdv_intel_crt_dpms,
+ .mode_fixup = cdv_intel_crt_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .commit = psb_intel_encoder_commit,
+ .mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cdv_intel_crt_destroy,
+ .set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_crt_connector_helper_funcs = {
+ .mode_valid = cdv_intel_crt_mode_valid,
+ .get_modes = cdv_intel_crt_get_modes,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+ .destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+
+ struct psb_intel_connector *psb_intel_connector;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ u32 i2c_reg;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ drm_connector_init(dev, connector,
+ &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ encoder = &psb_intel_encoder->base;
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+
+ /* Set up the DDC bus. */
+ i2c_reg = GPIOA;
+ /* Remove the following code for CDV */
+ /*
+ if (dev_priv->crt_ddc_bus != 0)
+ i2c_reg = dev_priv->crt_ddc_bus;
+ }*/
+ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ i2c_reg, "CRTDDC_A");
+ if (!psb_intel_encoder->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ goto failed_ddc;
+ }
+
+ psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ /*
+ psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+ psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+
+ return;
+failed_ddc:
+ drm_encoder_cleanup(&psb_intel_encoder->base);
+ drm_connector_cleanup(&psb_intel_connector->base);
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
+ return;
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644
index 000000000000..18d11525095e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -0,0 +1,1508 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+
+struct cdv_intel_range_t {
+ int min, max;
+};
+
+struct cdv_intel_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+struct cdv_intel_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+#define INTEL_P2_NUM 2
+
+struct cdv_intel_limit_t {
+ struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct cdv_intel_p2_t p2;
+};
+
+#define CDV_LIMIT_SINGLE_LVDS_96 0
+#define CDV_LIMIT_SINGLE_LVDS_100 1
+#define CDV_LIMIT_DAC_HDMI_27 2
+#define CDV_LIMIT_DAC_HDMI_96 3
+
+static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+ { /* CDV_SIGNLE_LVDS_96MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ .p2 = {.dot_limit = 200000,
+ .p2_slow = 14, .p2_fast = 14},
+ },
+ { /* CDV_SINGLE_LVDS_100MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ },
+ { /* CDV_DAC_HDMI_27MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ },
+ { /* CDV_DAC_HDMI_96MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ },
+};
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) \
+ msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before read\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after read\n");
+ return ret;
+ }
+
+ *val = REG_READ(SB_DATA);
+
+ return 0;
+}
+
+static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+ int ret;
+ static bool dpio_debug = true;
+ u32 temp;
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+ DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+ }
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before write\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_DATA, val);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after write\n");
+ return ret;
+ }
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+ }
+
+ return 0;
+}
+
+/* Reset the DPIO configuration register. The BIOS does this at every
+ * mode set.
+ */
+static void cdv_sb_reset(struct drm_device *dev)
+{
+
+ REG_WRITE(DPIO_CFG, 0);
+ REG_READ(DPIO_CFG);
+ REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus. They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+ struct cdv_intel_clock_t *clock)
+{
+ struct psb_intel_crtc *psb_crtc =
+ to_psb_intel_crtc(crtc);
+ int pipe = psb_crtc->pipe;
+ u32 m, n_vco, p;
+ int ret = 0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ u32 ref_value;
+
+ cdv_sb_reset(dev);
+
+ if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
+ DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
+ return -EBUSY;
+ }
+
+ /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+ ref_value = 0x68A701;
+
+ cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+ /* We don't know what the other fields of these regs are, so
+ * leave them in place.
+ */
+ ret = cdv_sb_read(dev, SB_M(pipe), &m);
+ if (ret)
+ return ret;
+ m &= ~SB_M_DIVIDER_MASK;
+ m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+ ret = cdv_sb_write(dev, SB_M(pipe), m);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+ if (ret)
+ return ret;
+
+ /* Follow the BIOS to program the N_DIVIDER REG */
+ n_vco &= 0xFFFF;
+ n_vco |= 0x107;
+ n_vco &= ~(SB_N_VCO_SEL_MASK |
+ SB_N_DIVIDER_MASK |
+ SB_N_CB_TUNE_MASK);
+
+ n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+ if (clock->vco < 2250000) {
+ n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 2750000) {
+ n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 3300000) {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+ } else {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+ }
+
+ ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_P(pipe), &p);
+ if (ret)
+ return ret;
+ p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+ p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+ switch (clock->p2) {
+ case 5:
+ p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+ break;
+ case 10:
+ p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+ break;
+ case 14:
+ p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+ break;
+ case 7:
+ p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+ break;
+ default:
+ DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+ return -EINVAL;
+ }
+ ret = cdv_sb_write(dev, SB_P(pipe), p);
+ if (ret)
+ return ret;
+
+ /* always Program the Lane Register for the Pipe A*/
+ if (pipe == 0) {
+ /* Program the Lane0/1 for HDMI B */
+ u32 lane_reg, lane_value;
+
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ /* Program the Lane2/3 for HDMI C */
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+ }
+
+ return 0;
+}
+
+/*
+ * Returns whether any encoder on the specified pipe is of the specified type
+ */
+bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(l_entry);
+ if (psb_intel_encoder->type == type)
+ return true;
+ }
+ }
+ return false;
+}
+
+static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ const struct cdv_intel_limit_t *limit;
+ if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * Now only single-channel LVDS is supported on CDV. If it is
+ * incorrect, please add the dual-channel LVDS.
+ */
+ if (refclk == 96000)
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+ } else {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(struct drm_device *dev,
+ int refclk, struct cdv_intel_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = (refclk * clock->m) / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+
+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
+static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+ const struct cdv_intel_limit_t *limit,
+ struct cdv_intel_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ /* unnecessary to check the range of m(m1/M2)/n again */
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cdv_intel_clock_t clock;
+ const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
+ int err = target;
+
+
+ if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ clock.m1 = 0;
+ /* m1 is reserved as 0 in CDV, n is a ring counter.
+ So skip the m1 loop */
+ for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+ clock.m2++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ cdv_intel_clock(dev, refclk, &clock);
+
+ if (!cdv_intel_PLL_is_valid(crtc,
+ limit, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
+
+int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
+ }
+
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto psb_intel_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto psb_intel_pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ dev_dbg(dev->dev,
+ "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+psb_intel_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Jim Bish - switch plan and pipe per scott */
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ udelay(150);
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Jim Bish - changed pipe/plane here as well. */
+
+ /* Wait for vblank for the disable to take effect */
+ cdv_intel_wait_for_vblank(dev);
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ cdv_intel_wait_for_vblank(dev);
+
+ udelay(150);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void cdv_intel_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see cdv_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk;
+ struct cdv_intel_clock_t clock;
+ u32 dpll = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_hdmi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ }
+ }
+
+ refclk = 96000;
+
+ /* Hack selection about ref clk for CRT */
+ /* Select 27MHz as the reference clk for HDMI */
+ if (is_crt || is_hdmi)
+ refclk = 27000;
+
+ drm_mode_debug_printmodeline(adjusted_mode);
+
+ ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ return 0;
+ }
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_SYNCLOCK_ENABLE;
+ dpll |= DPLL_VGA_MODE_DIS;
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ /* dpll |= (2 << 11); */
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(dpll_reg);
+
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+
+ udelay(150);
+
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds |=
+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+ LVDS_PIPEB_SELECT;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ REG_WRITE(dpll_reg,
+ (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
+
+ if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ dev_err(dev->dev, "Failed to get DPLL lock\n");
+ return -EBUSY;
+ }
+
+ {
+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ }
+
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(pipesrc_reg,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ cdv_intel_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->save_palette_a[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_dbg(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+ /*NOTE: DSPSIZE DSPPOS only for psb*/
+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+ DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ crtc_state->saveDSPCNTR,
+ crtc_state->savePIPECONF,
+ crtc_state->savePIPESRC,
+ crtc_state->saveFP0,
+ crtc_state->saveFP1,
+ crtc_state->saveDPLL,
+ crtc_state->saveHTOTAL,
+ crtc_state->saveHBLANK,
+ crtc_state->saveHSYNC,
+ crtc_state->saveVTOTAL,
+ crtc_state->saveVBLANK,
+ crtc_state->saveVSYNC,
+ crtc_state->saveDSPSTRIDE,
+ crtc_state->saveDSPSIZE,
+ crtc_state->saveDSPPOS,
+ crtc_state->saveDSPBASE
+ );
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private * dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_dbg(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ DRM_DEBUG(
+ "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+ REG_READ(pipeA ? FPA0 : FPB0),
+ REG_READ(pipeA ? FPA1 : FPB1),
+ REG_READ(pipeA ? DPLL_A : DPLL_B),
+ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+ REG_READ(pipeA ? DSPABASE : DSPBBASE)
+ );
+
+ DRM_DEBUG(
+ "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ crtc_state->saveDSPCNTR,
+ crtc_state->savePIPECONF,
+ crtc_state->savePIPESRC,
+ crtc_state->saveFP0,
+ crtc_state->saveFP1,
+ crtc_state->saveDPLL,
+ crtc_state->saveHTOTAL,
+ crtc_state->saveHBLANK,
+ crtc_state->saveHSYNC,
+ crtc_state->saveVTOTAL,
+ crtc_state->saveVBLANK,
+ crtc_state->saveVSYNC,
+ crtc_state->saveDSPSTRIDE,
+ crtc_state->saveDSPSIZE,
+ crtc_state->saveDSPPOS,
+ crtc_state->saveDSPBASE
+ );
+
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ DRM_DEBUG("write dpll: %x\n",
+ REG_READ(pipeA ? DPLL_A : DPLL_B));
+ udelay(150);
+ }
+
+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+ REG_READ(pipeA ? FPA0 : FPB0);
+
+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+ REG_READ(pipeA ? FPA1 : FPB1);
+
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+
+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* if we want to turn of the cursor ignore width and height */
+ if (!handle) {
+ /* turn off the cursor */
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "buffer is to small\n");
+ return -ENOMEM;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ return ret;
+ }
+
+ addr = gt->offset; /* Or resource.start ??? */
+
+ psb_intel_crtc->cursor_addr = addr;
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = obj;
+ }
+ return 0;
+}
+
+static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t adder;
+
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ adder = psb_intel_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int i;
+ int end = (start + size > 256) ? 256 : start + size;
+
+ for (i = start; i < end; i++) {
+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ cdv_intel_crtc_load_lut(crtc);
+}
+
+static int cdv_crtc_set_config(struct drm_mode_set *set)
+{
+ int ret = 0;
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+
+ ret = drm_crtc_helper_set_config(set);
+
+ pm_runtime_allow(&dev->pdev->dev);
+
+ return ret;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ u32 dpll;
+ u32 fp;
+ struct cdv_intel_clock_t clock;
+ bool is_lvds;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ else
+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = (pipe == 0) ?
+ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA0 :
+ dev_priv->saveFPB0;
+ else
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA1 :
+ dev_priv->saveFPB1;
+
+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ if (clock.p1 == 0) {
+ clock.p1 = 4;
+ dev_err(dev->dev, "PLL %d\n", dpll);
+ }
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ gma_power_end(dev);
+ } else {
+ htot = (pipe == 0) ?
+ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ hsync = (pipe == 0) ?
+ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ vtot = (pipe == 0) ?
+ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ vsync = (pipe == 0) ?
+ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ kfree(psb_intel_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .dpms = cdv_intel_crtc_dpms,
+ .mode_fixup = cdv_intel_crtc_mode_fixup,
+ .mode_set = cdv_intel_crtc_mode_set,
+ .mode_set_base = cdv_intel_pipe_set_base,
+ .prepare = cdv_intel_crtc_prepare,
+ .commit = cdv_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+ .save = cdv_intel_crtc_save,
+ .restore = cdv_intel_crtc_restore,
+ .cursor_set = cdv_intel_crtc_cursor_set,
+ .cursor_move = cdv_intel_crtc_cursor_move,
+ .gamma_set = cdv_intel_crtc_gamma_set,
+ .set_config = cdv_crtc_set_config,
+ .destroy = cdv_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on oaktrail
+ */
+void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+ uint32_t control;
+ uint32_t base;
+
+ switch (pipe) {
+ case 0:
+ control = CURACNTR;
+ base = CURABASE;
+ break;
+ case 1:
+ control = CURBCNTR;
+ base = CURBBASE;
+ break;
+ case 2:
+ control = CURCCNTR;
+ base = CURCBASE;
+ break;
+ default:
+ return;
+ }
+
+ REG_WRITE(control, 0);
+ REG_WRITE(base, 0);
+}
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644
index 000000000000..de25560e629d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ * We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define HDMI_BORDER_ENABLE (1 << 7)
+#define HDMI_AUDIO_ENABLE (1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
+/* hdmi-b control bits */
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+ u32 hdmi_reg;
+ u32 save_HDMIB;
+ bool has_hdmi_sink;
+ bool has_hdmi_audio;
+ /* Should set this when detect hotplug */
+ bool hdmi_device_connected;
+ struct mdfld_hdmi_i2c *i2c_bus;
+ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
+ struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ u32 hdmib;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+ hdmib = (2 << 10);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->pipe == 1)
+ hdmib |= HDMIB_PIPE_B_SELECT;
+
+ if (hdmi_priv->has_hdmi_audio) {
+ hdmib |= HDMI_AUDIO_ENABLE;
+ hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+ }
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ u32 hdmib;
+
+ hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+ else
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+ hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_connector *psb_intel_connector =
+ to_psb_intel_connector(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+ hdmi_priv->has_hdmi_sink = false;
+ hdmi_priv->has_hdmi_audio = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ hdmi_priv->has_hdmi_audio =
+ drm_detect_monitor_audio(edid);
+ }
+
+ psb_intel_connector->base.display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+ bool centre;
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property, &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_connector_property_set_value(connector,
+ property, value))
+ return -1;
+
+ centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (centre) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+ encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+ return -1;
+ } else {
+ struct drm_encoder_helper_funcs *helpers
+ = encoder->helper_private;
+ helpers->mode_set(encoder, &crtc->saved_mode,
+ &crtc->saved_adjusted_mode);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct edid *edid = NULL;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_HIGH;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+ .dpms = cdv_hdmi_dpms,
+ .mode_fixup = cdv_hdmi_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = cdv_hdmi_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_hdmi_connector_helper_funcs = {
+ .get_modes = cdv_hdmi_get_modes,
+ .mode_valid = cdv_hdmi_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_hdmi_save,
+ .restore = cdv_hdmi_restore,
+ .detect = cdv_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_hdmi_set_property,
+ .destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int reg)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct mid_intel_hdmi_priv *hdmi_priv;
+ int ddc_bus;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ GFP_KERNEL);
+
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ GFP_KERNEL);
+
+ if (!psb_intel_connector)
+ goto err_connector;
+
+ hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+ if (!hdmi_priv)
+ goto err_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &cdv_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ hdmi_priv->hdmi_reg = reg;
+ hdmi_priv->has_hdmi_sink = false;
+ psb_intel_encoder->dev_priv = hdmi_priv;
+
+ drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_hdmi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+
+ switch (reg) {
+ case SDVOB:
+ ddc_bus = GPIOE;
+ break;
+ case SDVOC:
+ ddc_bus = GPIOD;
+ break;
+ default:
+ DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+ goto failed_ddc;
+ break;
+ }
+
+ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+ if (!psb_intel_encoder->i2c_bus) {
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ goto failed_ddc;
+ }
+
+ hdmi_priv->hdmi_i2c_adapter =
+ &(psb_intel_encoder->i2c_bus->adapter);
+ hdmi_priv->dev = dev;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_ddc:
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+err_priv:
+ kfree(psb_intel_connector);
+err_connector:
+ kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644
index 000000000000..50e744be9852
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+ /**
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 retval;
+
+ if (gma_power_begin(dev, false)) {
+ retval = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ retval = ((dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return retval;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+ return 0;
+
+ DRM_ERROR("I2C transfer error\n");
+ return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->lvds_bl) {
+ DRM_ERROR("NO LVDS Backlight Info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ cdv_lvds_i2c_set_brightness(dev, level);
+ else
+ cdv_lvds_pwm_set_brightness(dev, level);
+}
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl =
+ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+ struct drm_encoder *encoder, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ cdv_intel_lvds_set_backlight(dev,
+ dev_priv->mode_dev.backlight_duty_cycle);
+ } else {
+ cdv_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ if (mode == DRM_MODE_DPMS_ON)
+ cdv_intel_lvds_set_power(dev, encoder, true);
+ else
+ cdv_intel_lvds_set_power(dev, encoder, false);
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode =
+ dev_priv->mode_dev.panel_fixed_mode;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ cdv_intel_lvds_set_power(dev, encoder, false);
+
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ cdv_intel_lvds_get_max_backlight(dev);
+
+ cdv_intel_lvds_set_power(dev, encoder, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ int ret;
+
+ ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int cdv_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property,
+ &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ return -1;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
+ return -1;
+ }
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ return -1;
+ else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv =
+ encoder->dev->dev_private;
+ struct backlight_device *bd =
+ dev_priv->backlight_device;
+ bd->props.brightness = value;
+ backlight_update_status(bd);
+#endif
+ }
+ } else if (!strcmp(property->name, "DPMS") && encoder) {
+ struct drm_encoder_helper_funcs *helpers =
+ encoder->helper_private;
+ helpers->dpms(encoder, value);
+ }
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+ cdv_intel_lvds_helper_funcs = {
+ .dpms = cdv_intel_lvds_encoder_dpms,
+ .mode_fixup = cdv_intel_lvds_mode_fixup,
+ .prepare = cdv_intel_lvds_prepare,
+ .mode_set = cdv_intel_lvds_mode_set,
+ .commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_lvds_connector_helper_funcs = {
+ .get_modes = cdv_intel_lvds_get_modes,
+ .mode_valid = cdv_intel_lvds_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_intel_lvds_save,
+ .restore = cdv_intel_lvds_restore,
+ .detect = cdv_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_lvds_set_property,
+ .destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+ .destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct cdv_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan;
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv)
+ goto failed_lvds_priv;
+
+ psb_intel_encoder->dev_priv = lvds_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+
+
+ drm_connector_init(dev, connector,
+ &cdv_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /**
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ GPIOB,
+ "LVDSBLC_B");
+ if (!psb_intel_encoder->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ GPIOC,
+ "LVDSDDC_C");
+ if (!psb_intel_encoder->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ psb_intel_ddc_get_modes(connector,
+ &psb_intel_encoder->ddc_bus->adapter);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this?*/
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ cdv_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ DRM_DEBUG
+ ("Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ printk(KERN_ERR "Failed find\n");
+ if (psb_intel_encoder->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+failed_ddc:
+ printk(KERN_ERR "Failed DDC\n");
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+failed_blc_i2c:
+ printk(KERN_ERR "Failed BLC\n");
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(lvds_priv);
+failed_lvds_priv:
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644
index 000000000000..791c0ef1a65b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -0,0 +1,831 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "framebuffer.h"
+#include "gtt.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+ .destroy = psb_user_framebuffer_destroy,
+ .create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ uint32_t v;
+
+ if (!fb)
+ return -ENOMEM;
+
+ if (regno > 255)
+ return 1;
+
+ red = CMAP_TOHW(red, info->var.red.length);
+ blue = CMAP_TOHW(blue, info->var.blue.length);
+ green = CMAP_TOHW(green, info->var.green.length);
+ transp = CMAP_TOHW(transp, info->var.transp.length);
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ if (regno < 16) {
+ switch (fb->bits_per_pixel) {
+ case 16:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ case 24:
+ case 32:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+
+ /*
+ * We have to poke our nose in here. The core fb code assumes
+ * panning is part of the hardware that can be invoked before
+ * the actual fb is mapped. In our case that isn't quite true.
+ */
+ if (psbfb->gtt->npage) {
+ /* GTT roll shifts in 4K pages, we need to shift the right
+ number of pages */
+ int pages = info->fix.line_length >> 12;
+ psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ }
+ return 0;
+}
+
+void psbfb_suspend(struct drm_device *dev)
+{
+ struct drm_framebuffer *fb = 0;
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+ console_lock();
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct fb_info *info = psbfb->fbdev;
+ fb_set_suspend(info, 1);
+ drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ console_unlock();
+}
+
+void psbfb_resume(struct drm_device *dev)
+{
+ struct drm_framebuffer *fb = 0;
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+ console_lock();
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct fb_info *info = psbfb->fbdev;
+ fb_set_suspend(info, 0);
+ drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ console_unlock();
+ drm_helper_disable_unused_functions(dev);
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct psb_framebuffer *psbfb = vma->vm_private_data;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int page_num;
+ int i;
+ unsigned long address;
+ int ret;
+ unsigned long pfn;
+ /* FIXME: assumes fb at stolen base which may not be true */
+ unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+
+ page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ address = (unsigned long)vmf->virtual_address;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ for (i = 0; i < page_num; i++) {
+ pfn = (phys_addr >> PAGE_SHIFT);
+
+ ret = vm_insert_mixed(vma, address, pfn);
+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ break;
+ else if (unlikely(ret != 0)) {
+ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ return ret;
+ }
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ }
+ return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct psbfb_vm_ops = {
+ .fault = psbfb_vm_fault,
+ .open = psbfb_vm_open,
+ .close = psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ if (!psbfb->addr_space)
+ psbfb->addr_space = vma->vm_file->f_mapping;
+ /*
+ * If this is a GEM object then info->screen_base is the virtual
+ * kernel remapping of the object. FIXME: Review if this is
+ * suitable for our mmap work
+ */
+ vma->vm_ops = &psbfb_vm_ops;
+ vma->vm_private_data = (void *)psbfb;
+ vma->vm_flags |= VM_RESERVED | VM_IO |
+ VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+}
+
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static struct fb_ops psbfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = psbfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_sync = psbfb_sync,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_roll_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = psbfb_pan,
+ .fb_mmap = psbfb_mmap,
+ .fb_sync = psbfb_sync,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_unaccel_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ * psb_framebuffer_init - initialize a framebuffer
+ * @dev: our DRM device
+ * @fb: framebuffer to set up
+ * @mode_cmd: mode description
+ * @gt: backing object
+ *
+ * Configure and fill in the boilerplate for our frame buffer. Return
+ * 0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+ struct psb_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct gtt_range *gt)
+{
+ u32 bpp, depth;
+ int ret;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (mode_cmd->pitches[0] & 63)
+ return -EINVAL;
+ switch (bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
+ return 0;
+}
+
+/**
+ * psb_framebuffer_create - create a framebuffer backed by gt
+ * @dev: our DRM device
+ * @mode_cmd: the description of the requested mode
+ * @gt: the backing object
+ *
+ * Create a framebuffer object backed by the gt, and fill in the
+ * boilerplate required
+ *
+ * TODO: review object references
+ */
+
+static struct drm_framebuffer *psb_framebuffer_create
+ (struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct gtt_range *gt)
+{
+ struct psb_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+ return &fb->base;
+}
+
+/**
+ * psbfb_alloc - allocate frame buffer memory
+ * @dev: the DRM device
+ * @aligned_size: space needed
+ * @force: fall back to GEM buffers if need be
+ *
+ * Allocate the frame buffer. In the usual case we get a GTT range that
+ * is stolen memory backed and life is simple. If there isn't sufficient
+ * we fail as we don't have the virtual mapping space to really vmap it
+ * and the kernel console code can't handle non linear framebuffers.
+ *
+ * Re-address this as and if the framebuffer layer grows this ability.
+ */
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+{
+ struct gtt_range *backing;
+ /* Begin by trying to use stolen memory backing */
+ backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+ if (backing) {
+ if (drm_gem_private_object_init(dev,
+ &backing->gem, aligned_size) == 0)
+ return backing;
+ psb_gtt_free_range(dev, backing);
+ }
+ return NULL;
+}
+
+/**
+ * psbfb_create - create a framebuffer
+ * @fbdev: the framebuffer device
+ * @sizes: specification of the layout
+ *
+ * Create a framebuffer to the specifications provided
+ */
+static int psbfb_create(struct psb_fbdev *fbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ int size;
+ int ret;
+ struct gtt_range *backing;
+ u32 bpp, depth;
+ int gtt_roll = 0;
+ int pitch_lines = 0;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ bpp = sizes->surface_bpp;
+
+ /* No 24bit packed */
+ if (bpp == 24)
+ bpp = 32;
+
+ do {
+ /*
+ * Acceleration via the GTT requires pitch to be
+ * power of two aligned. Preferably page but less
+ * is ok with some fonts
+ */
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+ depth = sizes->surface_depth;
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the fb in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+
+ if (pitch_lines)
+ pitch_lines *= 2;
+ else
+ pitch_lines = 1;
+ gtt_roll++;
+ } while (backing == NULL && pitch_lines <= 16);
+
+ /* The final pitch we accepted if we succeeded */
+ pitch_lines /= 2;
+
+ if (backing == NULL) {
+ /*
+ * We couldn't get the space we wanted, fall back to the
+ * display engine requirement instead. The HW requires
+ * the pitch to be 64 byte aligned
+ */
+
+ gtt_roll = 0; /* Don't use GTT accelerated scrolling */
+ pitch_lines = 64;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the framebuffer in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+ if (backing == NULL)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+ info->par = fbdev;
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+ if (ret)
+ goto out_unref;
+
+ fb = &psbfb->base;
+ psbfb->fbdev = info;
+
+ fbdev->psb_fb_helper.fb = fb;
+ fbdev->psb_fb_helper.fbdev = info;
+
+ strcpy(info->fix.id, "psbfb");
+
+ info->flags = FBINFO_DEFAULT;
+ if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
+ info->fbops = &psbfb_ops;
+ else if (gtt_roll) { /* GTT rolling seems best */
+ info->fbops = &psbfb_roll_ops;
+ info->flags |= FBINFO_HWACCEL_YPAN;
+ } else /* Software */
+ info->fbops = &psbfb_unaccel_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ info->fix.smem_start = dev->mode_config.fb_base;
+ info->fix.smem_len = size;
+ info->fix.ywrapstep = gtt_roll;
+ info->fix.ypanstep = 0;
+
+ /* Accessed stolen memory directly */
+ info->screen_base = (char *)dev_priv->vram_addr +
+ backing->offset;
+ info->screen_size = size;
+
+ if (dev_priv->gtt.stolen_size) {
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+ }
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+ info->pixmap.size = 64 * 1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+
+ dev_info(dev->dev, "allocated %dx%d fb\n",
+ psbfb->base.width, psbfb->base.height);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+out_unref:
+ if (backing->stolen)
+ psb_gtt_free_range(dev, backing);
+ else
+ drm_gem_object_unreference(&backing->gem);
+out_err1:
+ mutex_unlock(&dev->struct_mutex);
+ psb_gtt_free_range(dev, backing);
+ return ret;
+}
+
+/**
+ * psb_user_framebuffer_create - create framebuffer
+ * @dev: our DRM device
+ * @filp: client file
+ * @cmd: mode request
+ *
+ * Create a new framebuffer backed by a userspace GEM object
+ */
+static struct drm_framebuffer *psb_user_framebuffer_create
+ (struct drm_device *dev, struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *cmd)
+{
+ struct gtt_range *r;
+ struct drm_gem_object *obj;
+
+ /*
+ * Find the GEM object and thus the gtt range object that is
+ * to back this space
+ */
+ obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ /* Let the core code do all the work */
+ r = container_of(obj, struct gtt_range, gem);
+ return psb_framebuffer_create(dev, cmd, r);
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, int regno)
+{
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = psbfb_create(psb_fbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+ .gamma_set = psbfb_gamma_set,
+ .gamma_get = psbfb_gamma_get,
+ .fb_probe = psbfb_probe,
+};
+
+int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+ struct fb_info *info;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+ if (fbdev->psb_fb_helper.fbdev) {
+ info = fbdev->psb_fb_helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_framebuffer_cleanup(&psbfb->base);
+
+ if (psbfb->gtt)
+ drm_gem_object_unreference(&psbfb->gtt->gem);
+ return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+ struct psb_fbdev *fbdev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "no memory\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->fbdev = fbdev;
+ fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+ INTELFB_CONN_LIMIT);
+
+ drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ return 0;
+}
+
+void psb_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->fbdev)
+ return;
+
+ psb_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+ drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+/**
+ * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+ * @fb: framebuffer
+ * @file_priv: our DRM file
+ * @handle: returned handle
+ *
+ * Our framebuffer object is a GTT range which also contains a GEM
+ * object. We need to turn it into a handle for userspace. GEM will do
+ * the work for us
+ */
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+ return drm_gem_handle_create(file_priv, &r->gem, handle);
+}
+
+/**
+ * psb_user_framebuffer_destroy - destruct user created fb
+ * @fb: framebuffer
+ *
+ * User framebuffers are backed by GEM objects so all we have to do is
+ * clean up a bit and drop the reference, GEM will handle the fallout
+ */
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+ struct drm_device *dev = fb->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+ struct drm_crtc *crtc;
+ int reset = 0;
+
+ /* Should never get stolen memory for a user fb */
+ WARN_ON(r->stolen);
+
+ /* Check if we are erroneously live */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->fb == fb)
+ reset = 1;
+
+ if (reset)
+ /*
+ * Now force a sane response before we permit the DRM CRTC
+ * layer to do stupid things like blank the display. Instead
+ * we reset this framebuffer as if the user had forced a reset.
+ * We must do this before the cleanup so that the DRM layer
+ * doesn't get a chance to stick its oar in where it isn't
+ * wanted.
+ */
+ drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+
+ /* Let DRM do its clean up */
+ drm_framebuffer_cleanup(fb);
+ /* We are no longer using the resource in GEM */
+ drm_gem_object_unreference_unlocked(&r->gem);
+ kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+ .fb_create = psb_user_framebuffer_create,
+ .output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *backlight;
+
+ if (dev_priv->backlight_property)
+ return 0;
+
+ backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "backlight", 2);
+ backlight->values[0] = 0;
+ backlight->values[1] = 100;
+
+ dev_priv->backlight_property = backlight;
+
+ return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+
+ drm_mode_create_scaling_mode_property(dev);
+ psb_create_backlight_property(dev);
+
+ dev_priv->ops->output_init(dev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct drm_encoder *encoder = &psb_intel_encoder->base;
+ int crtc_mask = 0, clone_mask = 0;
+
+ /* valid crtcs */
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+ break;
+ case INTEL_OUTPUT_SDVO:
+ crtc_mask = ((1 << 0) | (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
+ break;
+ case INTEL_OUTPUT_LVDS:
+ if (IS_MRST(dev))
+ crtc_mask = (1 << 0);
+ else
+ crtc_mask = (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ break;
+ case INTEL_OUTPUT_MIPI:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
+ break;
+ case INTEL_OUTPUT_MIPI2:
+ crtc_mask = (1 << 2);
+ clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+ break;
+ case INTEL_OUTPUT_HDMI:
+ if (IS_MFLD(dev))
+ crtc_mask = (1 << 1);
+ else
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_HDMI);
+ break;
+ }
+ encoder->possible_crtcs = crtc_mask;
+ encoder->possible_clones =
+ psb_intel_connector_clones(dev, clone_mask);
+ }
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ int i;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = (void *) &psb_mode_funcs;
+
+ /* set memory base */
+ /* Oaktrail and Poulsbo should use BAR 2*/
+ pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+ &(dev->mode_config.fb_base));
+
+ /* num pipes is 2 for PSB but 1 for Mrst */
+ for (i = 0; i < dev_priv->num_pipe; i++)
+ psb_intel_crtc_init(dev, i, mode_dev);
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ psb_setup_outputs(dev);
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+ mutex_lock(&dev->struct_mutex);
+
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644
index 000000000000..989558a9e6ee
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2008-2011, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+
+struct psb_framebuffer {
+ struct drm_framebuffer base;
+ struct address_space *addr_space;
+ struct fb_info *fbdev;
+ struct gtt_range *gtt;
+};
+
+struct psb_fbdev {
+ struct drm_fb_helper psb_fb_helper;
+ struct psb_framebuffer pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
+
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
new file mode 100644
index 000000000000..9fbb86868e2e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -0,0 +1,292 @@
+/*
+ * psb GEM interface
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Alan Cox
+ *
+ * TODO:
+ * - we need to work out if the MMU is relevant (eg for
+ * accelerated operations on a GEM object)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+
+int psb_gem_init_object(struct drm_gem_object *obj)
+{
+ return -EINVAL;
+}
+
+void psb_gem_free_object(struct drm_gem_object *obj)
+{
+ struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+ drm_gem_object_release_wrap(obj);
+ /* This must occur last as it frees up the memory of the GEM object */
+ psb_gtt_free_range(obj->dev, gtt);
+}
+
+int psb_gem_get_aperture(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -EINVAL;
+}
+
+/**
+ * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ int ret = 0;
+ struct drm_gem_object *obj;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ /* What validation is needed here ? */
+
+ /* Make it mmapable */
+ if (!obj->map_list.map) {
+ ret = gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ }
+ /* GEM should really work out the hash offsets for us */
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * psb_gem_create - create a mappable object
+ * @file: the DRM file of the client
+ * @dev: our device
+ * @size: the size requested
+ * @handlep: returned handle (opaque number)
+ *
+ * Create a GEM object, fill in the boilerplate and attach a handle to
+ * it so that userspace can speak about it. This does the core work
+ * for the various methods that do/will create GEM objects for things
+ */
+static int psb_gem_create(struct drm_file *file,
+ struct drm_device *dev, uint64_t size, uint32_t *handlep)
+{
+ struct gtt_range *r;
+ int ret;
+ u32 handle;
+
+ size = roundup(size, PAGE_SIZE);
+
+ /* Allocate our object - for now a direct gtt range which is not
+ stolen memory backed */
+ r = psb_gtt_alloc_range(dev, size, "gem", 0);
+ if (r == NULL) {
+ dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+ return -ENOSPC;
+ }
+ /* Initialize the extra goodies GEM needs to do all the hard work */
+ if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+ psb_gtt_free_range(dev, r);
+ /* GEM doesn't give an error code so use -ENOMEM */
+ dev_err(dev->dev, "GEM init failed for %lld\n", size);
+ return -ENOMEM;
+ }
+ /* Give the object a handle so we can carry it more easily */
+ ret = drm_gem_handle_create(file, &r->gem, &handle);
+ if (ret) {
+ dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+ &r->gem, size);
+ drm_gem_object_release(&r->gem);
+ psb_gtt_free_range(dev, r);
+ return ret;
+ }
+ /* We have the initial and handle reference but need only one now */
+ drm_gem_object_unreference(&r->gem);
+ *handlep = handle;
+ return 0;
+}
+
+/**
+ * psb_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+/**
+ * psb_gem_dumb_destroy - destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via psb_gem_dumb_create, at least
+ * we hope it was created that way. i915 seems to assume the caller
+ * does the checking but that might be worth review ! FIXME
+ */
+int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * psb_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * This code eventually needs to handle faulting objects in and out
+ * of the GTT and repacking it when we run out of space. We can put
+ * that off for now and for our simple uses
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj;
+ struct gtt_range *r;
+ int ret;
+ unsigned long pfn;
+ pgoff_t page_offset;
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
+
+ obj = vma->vm_private_data; /* GEM object */
+ dev = obj->dev;
+ dev_priv = dev->dev_private;
+
+ r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ something from beneath our feet */
+ mutex_lock(&dev->struct_mutex);
+
+ /* For now the mmap pins the object and it stays pinned. As things
+ stand that will do us no harm */
+ if (r->mmapping == 0) {
+ ret = psb_gtt_pin(r);
+ if (ret < 0) {
+ dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+ goto fail;
+ }
+ r->mmapping = 1;
+ }
+
+ /* Page relative to the VMA start - we must calculate this ourselves
+ because vmf->pgoff is the fake GEM offset */
+ page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+ >> PAGE_SHIFT;
+
+ /* CPU view of the page, don't go via the GART for CPU writes */
+ if (r->stolen)
+ pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ else
+ pfn = page_to_pfn(r->pages[page_offset]);
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+ int size, u32 *handle)
+{
+ struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+ if (gtt == NULL)
+ return -ENOMEM;
+ if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+ goto free_gtt;
+ if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+ return 0;
+free_gtt:
+ psb_gtt_free_range(dev, gtt);
+ return -ENOMEM;
+}
+
+/*
+ * GEM interfaces for our specific client
+ */
+int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_psb_gem_create *args = data;
+ int ret;
+ if (args->flags & GMA_GEM_CREATE_STOLEN) {
+ ret = psb_gem_create_stolen(file, dev, args->size,
+ &args->handle);
+ if (ret == 0)
+ return 0;
+ /* Fall throguh */
+ args->flags &= ~GMA_GEM_CREATE_STOLEN;
+ }
+ return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_psb_gem_mmap *args = data;
+ return dev->driver->dumb_map_offset(file, dev,
+ args->handle, &args->offset);
+}
+
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
new file mode 100644
index 000000000000..daac12120653
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem_glue.c
@@ -0,0 +1,89 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void drm_gem_object_release_wrap(struct drm_gem_object *obj)
+{
+ /* Remove the list map if one is present */
+ if (obj->map_list.map) {
+ struct drm_gem_mm *mm = obj->dev->mm_private;
+ struct drm_map_list *list = &obj->map_list;
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+ }
+ drm_gem_object_release(obj);
+}
+
+/**
+ * gem_create_mmap_offset - invent an mmap offset
+ * @obj: our object
+ *
+ * Standard implementation of offset generation for mmap as is
+ * duplicated in several drivers. This belongs in GEM.
+ */
+int gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret;
+
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (list->map == NULL)
+ return -ENOMEM;
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = obj->size;
+ map->handle = obj;
+
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ obj->size / PAGE_SIZE, 0, 0);
+ if (!list->file_offset_node) {
+ dev_err(dev->dev, "failed to allocate offset for bo %d\n",
+ obj->name);
+ ret = -ENOSPC;
+ goto free_it;
+ }
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ obj->size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto free_it;
+ }
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ dev_err(dev->dev, "failed to add to map hash\n");
+ goto free_mm;
+ }
+ return 0;
+
+free_mm:
+ drm_mm_put_block(list->file_offset_node);
+free_it:
+ kfree(list->map);
+ list->map = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
new file mode 100644
index 000000000000..ce5ce30f74db
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem_glue.h
@@ -0,0 +1,2 @@
+extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
+extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644
index 000000000000..e770bd190a5c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ * Alan Cox <alan@linux.intel.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+
+/*
+ * GTT resource allocator - manage page mappings in GTT space
+ */
+
+/**
+ * psb_gtt_mask_pte - generate GTT pte entry
+ * @pfn: page number to encode
+ * @type: type of memory in the GTT
+ *
+ * Set the GTT entry for the appropriate memory type.
+ */
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+ uint32_t mask = PSB_PTE_VALID;
+
+ if (type & PSB_MMU_CACHED_MEMORY)
+ mask |= PSB_PTE_CACHED;
+ if (type & PSB_MMU_RO_MEMORY)
+ mask |= PSB_PTE_RO;
+ if (type & PSB_MMU_WO_MEMORY)
+ mask |= PSB_PTE_WO;
+
+ return (pfn << PAGE_SHIFT) | mask;
+}
+
+/**
+ * psb_gtt_entry - find the GTT entries for a gtt_range
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Given a gtt_range object return the GTT offset of the page table
+ * entries for this gtt_range
+ */
+u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long offset;
+
+ offset = r->resource.start - dev_priv->gtt_mem->start;
+
+ return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+}
+
+/**
+ * psb_gtt_insert - put an object into the GTT
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Take our preallocated GTT range and insert the GEM object into
+ * the GTT. This is protected via the gtt mutex which the caller
+ * must hold.
+ */
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+{
+ u32 *gtt_slot, pte;
+ struct page **pages;
+ int i;
+
+ if (r->pages == NULL) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(r->stolen); /* refcount these maybe ? */
+
+ gtt_slot = psb_gtt_entry(dev, r);
+ pages = r->pages;
+
+ /* Make sure changes are visible to the GPU */
+ set_pages_array_uc(pages, r->npage);
+
+ /* Write our page entries into the GTT itself */
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ /* Make sure all the entries are set before we return */
+ ioread32(gtt_slot - 1);
+
+ return 0;
+}
+
+/**
+ * psb_gtt_remove - remove an object from the GTT
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Remove a preallocated GTT range from the GTT. Overwrite all the
+ * page table entries with the dummy page. This is protected via the gtt
+ * mutex which the caller must hold.
+ */
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 *gtt_slot, pte;
+ int i;
+
+ WARN_ON(r->stolen);
+
+ gtt_slot = psb_gtt_entry(dev, r);
+ pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+
+ for (i = 0; i < r->npage; i++)
+ iowrite32(pte, gtt_slot++);
+ ioread32(gtt_slot - 1);
+ set_pages_array_wb(r->pages, r->npage);
+}
+
+/**
+ * psb_gtt_roll - set scrolling position
+ * @dev: our DRM device
+ * @r: the gtt mapping we are using
+ * @roll: roll offset
+ *
+ * Roll an existing pinned mapping by moving the pages through the GTT.
+ * This allows us to implement hardware scrolling on the consoles without
+ * a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+ u32 *gtt_slot, pte;
+ int i;
+
+ if (roll >= r->npage) {
+ WARN_ON(1);
+ return;
+ }
+
+ r->roll = roll;
+
+ /* Not currently in the GTT - no worry we will write the mapping at
+ the right position when it gets pinned */
+ if (!r->stolen && !r->in_gart)
+ return;
+
+ gtt_slot = psb_gtt_entry(dev, r);
+
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ ioread32(gtt_slot - 1);
+}
+
+/**
+ * psb_gtt_attach_pages - attach and pin GEM pages
+ * @gt: the gtt range
+ *
+ * Pin and build an in kernel list of the pages that back our GEM object.
+ * While we hold this the pages cannot be swapped out. This is protected
+ * via the gtt mutex which the caller must hold.
+ */
+static int psb_gtt_attach_pages(struct gtt_range *gt)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ int i;
+ struct page *p;
+ int pages = gt->gem.size / PAGE_SIZE;
+
+ WARN_ON(gt->pages);
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = gt->gem.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
+ if (gt->pages == NULL)
+ return -ENOMEM;
+ gt->npage = pages;
+
+ for (i = 0; i < pages; i++) {
+ /* FIXME: needs updating as per mail from Hugh Dickins */
+ p = read_cache_page_gfp(mapping, i,
+ __GFP_COLD | GFP_KERNEL);
+ if (IS_ERR(p))
+ goto err;
+ gt->pages[i] = p;
+ }
+ return 0;
+
+err:
+ while (i--)
+ page_cache_release(gt->pages[i]);
+ kfree(gt->pages);
+ gt->pages = NULL;
+ return PTR_ERR(p);
+}
+
+/**
+ * psb_gtt_detach_pages - attach and pin GEM pages
+ * @gt: the gtt range
+ *
+ * Undo the effect of psb_gtt_attach_pages. At this point the pages
+ * must have been removed from the GTT as they could now be paged out
+ * and move bus address. This is protected via the gtt mutex which the
+ * caller must hold.
+ */
+static void psb_gtt_detach_pages(struct gtt_range *gt)
+{
+ int i;
+ for (i = 0; i < gt->npage; i++) {
+ /* FIXME: do we need to force dirty */
+ set_page_dirty(gt->pages[i]);
+ page_cache_release(gt->pages[i]);
+ }
+ kfree(gt->pages);
+ gt->pages = NULL;
+}
+
+/**
+ * psb_gtt_pin - pin pages into the GTT
+ * @gt: range to pin
+ *
+ * Pin a set of pages into the GTT. The pins are refcounted so that
+ * multiple pins need multiple unpins to undo.
+ *
+ * Non GEM backed objects treat this as a no-op as they are always GTT
+ * backed objects.
+ */
+int psb_gtt_pin(struct gtt_range *gt)
+{
+ int ret = 0;
+ struct drm_device *dev = gt->gem.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ if (gt->in_gart == 0 && gt->stolen == 0) {
+ ret = psb_gtt_attach_pages(gt);
+ if (ret < 0)
+ goto out;
+ ret = psb_gtt_insert(dev, gt);
+ if (ret < 0) {
+ psb_gtt_detach_pages(gt);
+ goto out;
+ }
+ }
+ gt->in_gart++;
+out:
+ mutex_unlock(&dev_priv->gtt_mutex);
+ return ret;
+}
+
+/**
+ * psb_gtt_unpin - Drop a GTT pin requirement
+ * @gt: range to pin
+ *
+ * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+ * will be removed from the GTT which will also drop the page references
+ * and allow the VM to clean up or page stuff.
+ *
+ * Non GEM backed objects treat this as a no-op as they are always GTT
+ * backed objects.
+ */
+void psb_gtt_unpin(struct gtt_range *gt)
+{
+ struct drm_device *dev = gt->gem.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ WARN_ON(!gt->in_gart);
+
+ gt->in_gart--;
+ if (gt->in_gart == 0 && gt->stolen == 0) {
+ psb_gtt_remove(dev, gt);
+ psb_gtt_detach_pages(gt);
+ }
+ mutex_unlock(&dev_priv->gtt_mutex);
+}
+
+/*
+ * GTT resource allocator - allocate and manage GTT address space
+ */
+
+/**
+ * psb_gtt_alloc_range - allocate GTT address space
+ * @dev: Our DRM device
+ * @len: length (bytes) of address space required
+ * @name: resource name
+ * @backed: resource should be backed by stolen pages
+ *
+ * Ask the kernel core to find us a suitable range of addresses
+ * to use for a GTT mapping.
+ *
+ * Returns a gtt_range structure describing the object, or NULL on
+ * error. On successful return the resource is both allocated and marked
+ * as in use.
+ */
+struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+ const char *name, int backed)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gt;
+ struct resource *r = dev_priv->gtt_mem;
+ int ret;
+ unsigned long start, end;
+
+ if (backed) {
+ /* The start of the GTT is the stolen pages */
+ start = r->start;
+ end = r->start + dev_priv->gtt.stolen_size - 1;
+ } else {
+ /* The rest we will use for GEM backed objects */
+ start = r->start + dev_priv->gtt.stolen_size;
+ end = r->end;
+ }
+
+ gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+ if (gt == NULL)
+ return NULL;
+ gt->resource.name = name;
+ gt->stolen = backed;
+ gt->in_gart = backed;
+ gt->roll = 0;
+ /* Ensure this is set for non GEM objects */
+ gt->gem.dev = dev;
+ ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+ len, start, end, PAGE_SIZE, NULL, NULL);
+ if (ret == 0) {
+ gt->offset = gt->resource.start - r->start;
+ return gt;
+ }
+ kfree(gt);
+ return NULL;
+}
+
+/**
+ * psb_gtt_free_range - release GTT address space
+ * @dev: our DRM device
+ * @gt: a mapping created with psb_gtt_alloc_range
+ *
+ * Release a resource that was allocated with psb_gtt_alloc_range. If the
+ * object has been pinned by mmap users we clean this up here currently.
+ */
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+{
+ /* Undo the mmap pin if we are destroying the object */
+ if (gt->mmapping) {
+ psb_gtt_unpin(gt);
+ gt->mmapping = 0;
+ }
+ WARN_ON(gt->in_gart && !gt->stolen);
+ release_resource(&gt->resource);
+ kfree(gt);
+}
+
+void psb_gtt_alloc(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ init_rwsem(&dev_priv->gtt.sem);
+}
+
+void psb_gtt_takedown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gtt_map) {
+ iounmap(dev_priv->gtt_map);
+ dev_priv->gtt_map = NULL;
+ }
+ if (dev_priv->gtt_initialized) {
+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl);
+ PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
+ }
+ if (dev_priv->vram_addr)
+ iounmap(dev_priv->gtt_map);
+}
+
+int psb_gtt_init(struct drm_device *dev, int resume)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned gtt_pages;
+ unsigned long stolen_size, vram_stolen_size;
+ unsigned i, num_pages;
+ unsigned pfn_base;
+ uint32_t vram_pages;
+ uint32_t dvmt_mode = 0;
+ struct psb_gtt *pg;
+
+ int ret = 0;
+ uint32_t pte;
+
+ mutex_init(&dev_priv->gtt_mutex);
+
+ psb_gtt_alloc(dev);
+ pg = &dev_priv->gtt;
+
+ /* Enable the GTT */
+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+ dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+ PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+ /* The root resource we allocate address space from */
+ dev_priv->gtt_initialized = 1;
+
+ pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+
+ /*
+ * The video mmu has a hw bug when accessing 0x0D0000000.
+ * Make gatt start at 0x0e000,0000. This doesn't actually
+ * matter for us but may do if the video acceleration ever
+ * gets opened up.
+ */
+ pg->mmu_gatt_start = 0xE0000000;
+
+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ >> PAGE_SHIFT;
+ /* Some CDV firmware doesn't report this currently. In which case the
+ system has 64 gtt pages */
+ if (pg->gtt_start == 0 || gtt_pages == 0) {
+ dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+ gtt_pages = 64;
+ pg->gtt_start = dev_priv->pge_ctl;
+ }
+
+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+ >> PAGE_SHIFT;
+ dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+ if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+ static struct resource fudge; /* Preferably peppermint */
+ /* This can occur on CDV SDV systems. Fudge it in this case.
+ We really don't care what imaginary space is being allocated
+ at this point */
+ dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+ pg->gatt_start = 0x40000000;
+ pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+ /* This is a little confusing but in fact the GTT is providing
+ a view from the GPU into memory and not vice versa. As such
+ this is really allocating space that is not the same as the
+ CPU address space on CDV */
+ fudge.start = 0x40000000;
+ fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+ fudge.name = "fudge";
+ fudge.flags = IORESOURCE_MEM;
+ dev_priv->gtt_mem = &fudge;
+ }
+
+ pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+ vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+ - PAGE_SIZE;
+
+ stolen_size = vram_stolen_size;
+
+ printk(KERN_INFO "Stolen memory information\n");
+ printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
+ printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+ vram_stolen_size/1024);
+ dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
+ printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
+ (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+
+ if (resume && (gtt_pages != pg->gtt_pages) &&
+ (stolen_size != pg->stolen_size)) {
+ dev_err(dev->dev, "GTT resume error.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ pg->gtt_pages = gtt_pages;
+ pg->stolen_size = stolen_size;
+ dev_priv->vram_stolen_size = vram_stolen_size;
+
+ /*
+ * Map the GTT and the stolen memory area
+ */
+ dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ gtt_pages << PAGE_SHIFT);
+ if (!dev_priv->gtt_map) {
+ dev_err(dev->dev, "Failure to map gtt.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+ if (!dev_priv->vram_addr) {
+ dev_err(dev->dev, "Failure to map stolen base.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /*
+ * Insert vram stolen pages into the GTT
+ */
+
+ pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+ printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ num_pages, pfn_base << PAGE_SHIFT, 0);
+ for (i = 0; i < num_pages; ++i) {
+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
+ iowrite32(pte, dev_priv->gtt_map + i);
+ }
+
+ /*
+ * Init rest of GTT to the scratch page to avoid accidents or scribbles
+ */
+
+ pfn_base = page_to_pfn(dev_priv->scratch_page);
+ pte = psb_gtt_mask_pte(pfn_base, 0);
+ for (; i < gtt_pages; ++i)
+ iowrite32(pte, dev_priv->gtt_map + i);
+
+ (void) ioread32(dev_priv->gtt_map + i - 1);
+ return 0;
+
+out_err:
+ psb_gtt_takedown(dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644
index 000000000000..aa1742387f5a
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -0,0 +1,64 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+struct psb_gtt {
+ uint32_t gatt_start;
+ uint32_t mmu_gatt_start;
+ uint32_t gtt_start;
+ uint32_t gtt_phys_start;
+ unsigned gtt_pages;
+ unsigned gatt_pages;
+ unsigned long stolen_size;
+ unsigned long vram_stolen_size;
+ struct rw_semaphore sem;
+};
+
+/* Exported functions */
+extern int psb_gtt_init(struct drm_device *dev, int resume);
+extern void psb_gtt_takedown(struct drm_device *dev);
+
+/* Each gtt_range describes an allocation in the GTT area */
+struct gtt_range {
+ struct resource resource; /* Resource for our allocation */
+ u32 offset; /* GTT offset of our object */
+ struct drm_gem_object gem; /* GEM high level stuff */
+ int in_gart; /* Currently in the GART (ref ct) */
+ bool stolen; /* Backed from stolen RAM */
+ bool mmapping; /* Is mmappable */
+ struct page **pages; /* Backing pages if present */
+ int npage; /* Number of backing pages */
+ int roll; /* Roll applied to the GTT entries */
+};
+
+extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+ const char *name, int backed);
+extern void psb_gtt_kref_put(struct gtt_range *gt);
+extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+extern int psb_gtt_pin(struct gtt_range *gt);
+extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+ struct gtt_range *gt, int roll);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644
index 000000000000..d4d0c5b8bf91
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static void *find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+static void parse_backlight_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+ struct bdb_lvds_backlight *lvds_bl;
+ u8 p_type = 0;
+ void *bl_start = NULL;
+ struct bdb_lvds_options *lvds_opts
+ = find_section(bdb, BDB_LVDS_OPTIONS);
+
+ dev_priv->lvds_bl = NULL;
+
+ if (lvds_opts)
+ p_type = lvds_opts->panel_type;
+ else
+ return;
+
+ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+
+ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+ if (!lvds_bl) {
+ dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+ return;
+ }
+ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
+ dev_priv->lvds_bl = lvds_bl;
+}
+
+/* Try to find integrated panel data */
+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_lvds_options *lvds_options;
+ struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_entry *entry;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ /* Defaults if we can't find VBT info */
+ dev_priv->lvds_dither = 0;
+ dev_priv->lvds_vbt = 0;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+
+ entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ dvo_timing = &entry->dvo_timing;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+ GFP_KERNEL);
+ if (panel_fixed_mode == NULL) {
+ dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ return;
+ }
+
+ dev_priv->lvds_vbt = 1;
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+ if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+ } else {
+ dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+ dev_priv->lvds_vbt = 0;
+ kfree(panel_fixed_mode);
+ }
+ return;
+}
+
+/* Try to find sdvo panel data */
+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode,
+ dvo_timing + sdvo_lvds_options->panel_type);
+
+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ return;
+}
+
+static void parse_general_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_features *general;
+
+ /* Set sensible defaults in case we can't find the general block */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+ dev_priv->int_crt_support = general->int_crt_support;
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+
+ if (dev_priv->lvds_use_ssc) {
+ dev_priv->lvds_ssc_freq
+ = general->ssc_freq ? 100 : 96;
+ }
+ }
+}
+
+/**
+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool psb_intel_init_bios(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ struct vbt_header *vbt = NULL;
+ struct bdb_header *bdb;
+ u8 __iomem *bios;
+ size_t size;
+ int i;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ dev_err(dev->dev, "VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_backlight_data(dev_priv, bdb);
+
+ pci_unmap_rom(pdev, bios);
+
+ return 0;
+}
+
+/**
+ * Destroy and free VBT data
+ */
+void psb_intel_destroy_bios(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *sdvo_lvds_vbt_mode =
+ dev_priv->sdvo_lvds_vbt_mode;
+ struct drm_display_mode *lfp_lvds_vbt_mode =
+ dev_priv->lfp_lvds_vbt_mode;
+ struct bdb_lvds_backlight *lvds_bl =
+ dev_priv->lvds_bl;
+
+ /*free sdvo panel mode*/
+ if (sdvo_lvds_vbt_mode) {
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+ kfree(sdvo_lvds_vbt_mode);
+ }
+
+ if (lfp_lvds_vbt_mode) {
+ dev_priv->lfp_lvds_vbt_mode = NULL;
+ kfree(lfp_lvds_vbt_mode);
+ }
+
+ if (lvds_bl) {
+ dev_priv->lvds_bl = NULL;
+ kfree(lvds_bl);
+ }
+}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644
index 000000000000..70f1bf018183
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <drm/drmP.h>
+
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd8:3; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:6; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 rsvd11:6; /* finish byte */
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /* device info */
+ u8 tv_or_lvds_info[33];
+ u8 dev1[33];
+ u8 dev2[33];
+ u8 dev3[33];
+ u8 dev4[33];
+ /* may be another device block here on some platforms */
+};
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+} __attribute__((packed));
+
+struct bdb_lvds_backlight {
+ u8 type:2;
+ u8 pol:1;
+ u8 gpio:3;
+ u8 gmbus:2;
+ u16 freq;
+ u8 minbrightness;
+ u8 i2caddr;
+ u8 brightnesscmd;
+ /*FIXME: more...*/
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+extern bool psb_intel_init_bios(struct drm_device *dev);
+extern void psb_intel_destroy_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644
index 000000000000..147584ac8d02
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_psb_private *dev_priv;
+ u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+ REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ /* FIXME: We are never Pineview, right?
+
+ u32 val;
+
+ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
+
+ val = REG_READ(DSPCLK_GATE_D);
+ if (enable)
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ REG_WRITE(DSPCLK_GATE_D, val);
+
+ return;
+ */
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved = REG_READ(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
+
+static int get_clock(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ REG_WRITE(gpio->reg, reserved);
+ return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ REG_WRITE(gpio->reg, reserved);
+ return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ REG_WRITE(gpio->reg, reserved | clock_bits);
+ REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ REG_WRITE(gpio->reg, reserved | data_bits);
+ REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
+
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "gma500 GPIO%c", "?BACDE?F"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ struct drm_device *dev = dev_priv->dev;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = 0;
+
+ REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ REG_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ REG_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = REG_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ REG_WRITE(GMBUS3 + reg_offset, val);
+ REG_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ REG_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ REG_WRITE(GMBUS3 + reg_offset, val);
+ REG_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+ }
+
+ goto done;
+
+clear_err:
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+ /* Mark the GMBUS interface as disabled. We will re-enable it at the
+ * start of the next xfer, till then let it sleep.
+ */
+ REG_WRITE(GMBUS0 + reg_offset, 0);
+ return i;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ REG_WRITE(GMBUS0 + reg_offset, 0);
+
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved",
+ "dpd",
+ };
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "gma500 gmbus %s",
+ names[i]);
+
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
+
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->gmbus == NULL)
+ return;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+}
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644
index 000000000000..98a28c209555
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 val;
+
+ val = REG_READ(chan->reg);
+ return (val & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 val;
+
+ val = REG_READ(chan->reg);
+ return (val & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 reserved = 0, clock_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved =
+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+ REG_WRITE(chan->reg, reserved | clock_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 reserved = 0, data_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved =
+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits =
+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ REG_WRITE(chan->reg, reserved | data_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+/**
+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ * %GPIOA
+ * %GPIOB
+ * %GPIOC
+ * %GPIOD
+ * %GPIOE
+ * %GPIOF
+ * %GPIOG
+ * %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+ const u32 reg, const char *name)
+{
+ struct psb_intel_i2c_chan *chan;
+
+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ if (!chan)
+ goto out_free;
+
+ chan->drm_dev = dev;
+ chan->reg = reg;
+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+ chan->adapter.owner = THIS_MODULE;
+ chan->adapter.algo_data = &chan->algo;
+ chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->algo.setsda = set_data;
+ chan->algo.setscl = set_clock;
+ chan->algo.getsda = get_data;
+ chan->algo.getscl = get_clock;
+ chan->algo.udelay = 20;
+ chan->algo.timeout = usecs_to_jiffies(2200);
+ chan->algo.data = chan;
+
+ i2c_set_adapdata(&chan->adapter, chan);
+
+ if (i2c_bit_add_bus(&chan->adapter))
+ goto out_free;
+
+ /* JJJ: raise SCL and SDA? */
+ set_data(chan, 1);
+ set_clock(chan, 1);
+ udelay(20);
+
+ return chan;
+
+out_free:
+ kfree(chan);
+ return NULL;
+}
+
+/**
+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+{
+ if (!chan)
+ return;
+
+ i2c_del_adapter(&chan->adapter);
+ kfree(chan);
+}
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
new file mode 100644
index 000000000000..d946bc1b17bf
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_opregion.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * FIXME: resolve with the i915 version
+ */
+
+#include "psb_drv.h"
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+struct opregion_apci {
+ /*FIXME: add it later*/
+} __packed;
+
+struct opregion_swsci {
+ /*FIXME: add it later*/
+} __packed;
+
+struct opregion_acpi {
+ /*FIXME: add it later*/
+} __packed;
+
+int gma_intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 opregion_phy;
+ void *base;
+ u32 *lid_state;
+
+ dev_priv->lid_state = NULL;
+
+ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
+ if (opregion_phy == 0)
+ return -ENOTSUPP;
+
+ base = ioremap(opregion_phy, 8*1024);
+ if (!base)
+ return -ENOMEM;
+
+ lid_state = base + 0x01ac;
+
+ dev_priv->lid_state = lid_state;
+ dev_priv->lid_last_state = readl(lid_state);
+ return 0;
+}
+
+int gma_intel_opregion_exit(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->lid_state)
+ iounmap(dev_priv->lid_state);
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644
index 000000000000..5eee9ad80da4
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -0,0 +1,263 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "mid_bios.h"
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ uint32_t fuse_value = 0;
+ uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK 0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+ if (pci_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+ if (IS_MRST(dev))
+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+ DRM_INFO("internal display is %s\n",
+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+ /* Prevent runtime suspend at start*/
+ if (dev_priv->iLVDS_enable) {
+ dev_priv->is_lvds_on = true;
+ dev_priv->is_mipi_on = false;
+ } else {
+ dev_priv->is_mipi_on = true;
+ dev_priv->is_lvds_on = false;
+ }
+
+ dev_priv->video_device_fuse = fuse_value;
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+ dev_priv->fuse_reg_value = fuse_value;
+
+ switch (fuse_value_tmp) {
+ case FB_SKU_100:
+ dev_priv->core_freq = 200;
+ break;
+ case FB_SKU_100L:
+ dev_priv->core_freq = 100;
+ break;
+ case FB_SKU_83:
+ dev_priv->core_freq = 166;
+ break;
+ default:
+ dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+ fuse_value_tmp);
+ dev_priv->core_freq = 0;
+ }
+ dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+ pci_dev_put(pci_root);
+}
+
+/*
+ * Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+ uint32_t platform_rev_id = 0;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+ pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+ dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+ pci_dev_put(pci_gfx_root);
+ dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+ dev_priv->platform_rev_id);
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+ u32 addr;
+ u16 new_size;
+ u8 *vbt_virtual;
+ u8 bpi;
+ u8 number_desc = 0;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info ti;
+ void *pGCT;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+ pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+ pci_dev_put(pci_gfx_root);
+
+ dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+ /* check for platform config address == 0. */
+ /* this means fw doesn't support vbt */
+
+ if (addr == 0) {
+ vbt->size = 0;
+ return;
+ }
+
+ /* get the virtual address of the vbt */
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL) {
+ vbt->size = 0;
+ return;
+ }
+
+ memcpy(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual); /* Free virtual address space */
+
+ /* No matching signature don't process the data */
+ if (memcmp(vbt->signature, "$GCT", 4)) {
+ vbt->size = 0;
+ return;
+ }
+
+ dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
+
+ switch (vbt->revision) {
+ case 0:
+ vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->size - sizeof(*vbt) + 4);
+ pGCT = vbt->oaktrail_gct;
+ bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt =
+ ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
+ memcpy(&dev_priv->gct_data.DTD,
+ &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct oaktrail_timing_info));
+ dev_priv->gct_data.Panel_Port_Control =
+ ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ break;
+ case 1:
+ vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->size - sizeof(*vbt) + 4);
+ pGCT = vbt->oaktrail_gct;
+ bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt =
+ ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
+ memcpy(&dev_priv->gct_data.DTD,
+ &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct oaktrail_timing_info));
+ dev_priv->gct_data.Panel_Port_Control =
+ ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ break;
+ case 0x10:
+ /*header definition changed from rev 01 (v2) to rev 10h. */
+ /*so, some values have changed location*/
+ new_size = vbt->checksum; /*checksum contains lo size byte*/
+ /*LSB of oaktrail_gct contains hi size byte*/
+ new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
+
+ vbt->checksum = vbt->size; /*size contains the checksum*/
+ if (new_size > 0xff)
+ vbt->size = 0xff; /*restrict size to 255*/
+ else
+ vbt->size = new_size;
+
+ /* number of descriptors defined in the GCT */
+ number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
+ bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
+ vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+ GCT_R10_DISPLAY_DESC_SIZE * number_desc);
+ pGCT = vbt->oaktrail_gct;
+ pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
+ dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
+
+ /*copy the GCT display timings into a temp structure*/
+ memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
+
+ /*now copy the temp struct into the dev_priv->gct_data*/
+ dp_ti->pixel_clock = ti.pixel_clock;
+ dp_ti->hactive_hi = ti.hactive_hi;
+ dp_ti->hactive_lo = ti.hactive_lo;
+ dp_ti->hblank_hi = ti.hblank_hi;
+ dp_ti->hblank_lo = ti.hblank_lo;
+ dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti.vactive_hi;
+ dp_ti->vactive_lo = ti.vactive_lo;
+ dp_ti->vblank_hi = ti.vblank_hi;
+ dp_ti->vblank_lo = ti.vblank_lo;
+ dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
+
+ /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ *((u8 *)pGCT + 0x0d);
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
+ (*((u8 *)pGCT + 0x0e)) << 8;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown revision of GCT!\n");
+ vbt->size = 0;
+ }
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ mid_get_fuse_settings(dev);
+ mid_get_vbt_data(dev_priv);
+ mid_get_pci_revID(dev_priv);
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644
index 000000000000..00e7d564b7eb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644
index 000000000000..c904d73b1de3
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -0,0 +1,858 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+ /* protects driver- and pd structures. Always take in read mode
+ * before taking the page table spinlock.
+ */
+ struct rw_semaphore sem;
+
+ /* protects page tables, directory tables and pt tables.
+ * and pt structures.
+ */
+ spinlock_t lock;
+
+ atomic_t needs_tlbflush;
+
+ uint8_t __iomem *register_map;
+ struct psb_mmu_pd *default_pd;
+ /*uint32_t bif_ctrl;*/
+ int has_clflush;
+ int clflush_add;
+ unsigned long clflush_mask;
+
+ struct drm_psb_private *dev_priv;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+ struct psb_mmu_pd *pd;
+ uint32_t index;
+ uint32_t count;
+ struct page *p;
+ uint32_t *v;
+};
+
+struct psb_mmu_pd {
+ struct psb_mmu_driver *driver;
+ int hw_context;
+ struct psb_mmu_pt **tables;
+ struct page *p;
+ struct page *dummy_pt;
+ struct page *dummy_page;
+ uint32_t pd_mask;
+ uint32_t invalid_pde;
+ uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+ return offset >> PSB_PDE_SHIFT;
+}
+
+static inline void psb_clflush(void *addr)
+{
+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+ void *addr)
+{
+ if (!driver->has_clflush)
+ return;
+
+ mb();
+ psb_clflush(addr);
+ mb();
+}
+
+static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+{
+ uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
+ int i;
+ uint8_t *clf;
+
+ clf = kmap_atomic(page, KM_USER0);
+ mb();
+ for (i = 0; i < clflush_count; ++i) {
+ psb_clflush(clf);
+ clf += clflush_add;
+ }
+ mb();
+ kunmap_atomic(clf, KM_USER0);
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver,
+ struct page *page[], unsigned long num_pages)
+{
+ int i;
+
+ if (!driver->has_clflush)
+ return ;
+
+ for (i = 0; i < num_pages; i++)
+ psb_page_clflush(driver, *page++);
+}
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+ int force)
+{
+ atomic_set(&driver->needs_tlbflush, 0);
+}
+
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+ down_write(&driver->sem);
+ psb_mmu_flush_pd_locked(driver, force);
+ up_write(&driver->sem);
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+{
+ if (rc_prot)
+ down_write(&driver->sem);
+ if (rc_prot)
+ up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+ /*ttm_tt_cache_flush(&pd->p, 1);*/
+ psb_pages_clflush(pd->driver, &pd->p, 1);
+ down_write(&pd->driver->sem);
+ wmb();
+ psb_mmu_flush_pd_locked(pd->driver, 1);
+ pd->hw_context = hw_context;
+ up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+ unsigned long end)
+{
+
+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+ return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+ uint32_t mask = PSB_PTE_VALID;
+
+ if (type & PSB_MMU_CACHED_MEMORY)
+ mask |= PSB_PTE_CACHED;
+ if (type & PSB_MMU_RO_MEMORY)
+ mask |= PSB_PTE_RO;
+ if (type & PSB_MMU_WO_MEMORY)
+ mask |= PSB_PTE_WO;
+
+ return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults, int invalid_type)
+{
+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ uint32_t *v;
+ int i;
+
+ if (!pd)
+ return NULL;
+
+ pd->p = alloc_page(GFP_DMA32);
+ if (!pd->p)
+ goto out_err1;
+ pd->dummy_pt = alloc_page(GFP_DMA32);
+ if (!pd->dummy_pt)
+ goto out_err2;
+ pd->dummy_page = alloc_page(GFP_DMA32);
+ if (!pd->dummy_page)
+ goto out_err3;
+
+ if (!trap_pagefaults) {
+ pd->invalid_pde =
+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+ invalid_type);
+ pd->invalid_pte =
+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+ invalid_type);
+ } else {
+ pd->invalid_pde = 0;
+ pd->invalid_pte = 0;
+ }
+
+ v = kmap(pd->dummy_pt);
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ v[i] = pd->invalid_pte;
+
+ kunmap(pd->dummy_pt);
+
+ v = kmap(pd->p);
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ v[i] = pd->invalid_pde;
+
+ kunmap(pd->p);
+
+ clear_page(kmap(pd->dummy_page));
+ kunmap(pd->dummy_page);
+
+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+ if (!pd->tables)
+ goto out_err4;
+
+ pd->hw_context = -1;
+ pd->pd_mask = PSB_PTE_VALID;
+ pd->driver = driver;
+
+ return pd;
+
+out_err4:
+ __free_page(pd->dummy_page);
+out_err3:
+ __free_page(pd->dummy_pt);
+out_err2:
+ __free_page(pd->p);
+out_err1:
+ kfree(pd);
+ return NULL;
+}
+
+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+ __free_page(pt->p);
+ kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+ struct psb_mmu_driver *driver = pd->driver;
+ struct psb_mmu_pt *pt;
+ int i;
+
+ down_write(&driver->sem);
+ if (pd->hw_context != -1)
+ psb_mmu_flush_pd_locked(driver, 1);
+
+ /* Should take the spinlock here, but we don't need to do that
+ since we have the semaphore in write mode. */
+
+ for (i = 0; i < 1024; ++i) {
+ pt = pd->tables[i];
+ if (pt)
+ psb_mmu_free_pt(pt);
+ }
+
+ vfree(pd->tables);
+ __free_page(pd->dummy_page);
+ __free_page(pd->dummy_pt);
+ __free_page(pd->p);
+ kfree(pd);
+ up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+ void *v;
+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
+ spinlock_t *lock = &pd->driver->lock;
+ uint8_t *clf;
+ uint32_t *ptes;
+ int i;
+
+ if (!pt)
+ return NULL;
+
+ pt->p = alloc_page(GFP_DMA32);
+ if (!pt->p) {
+ kfree(pt);
+ return NULL;
+ }
+
+ spin_lock(lock);
+
+ v = kmap_atomic(pt->p, KM_USER0);
+ clf = (uint8_t *) v;
+ ptes = (uint32_t *) v;
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ *ptes++ = pd->invalid_pte;
+
+
+ if (pd->driver->has_clflush && pd->hw_context != -1) {
+ mb();
+ for (i = 0; i < clflush_count; ++i) {
+ psb_clflush(clf);
+ clf += clflush_add;
+ }
+ mb();
+ }
+
+ kunmap_atomic(v, KM_USER0);
+ spin_unlock(lock);
+
+ pt->count = 0;
+ pt->pd = pd;
+ pt->index = 0;
+
+ return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
+{
+ uint32_t index = psb_mmu_pd_index(addr);
+ struct psb_mmu_pt *pt;
+ uint32_t *v;
+ spinlock_t *lock = &pd->driver->lock;
+
+ spin_lock(lock);
+ pt = pd->tables[index];
+ while (!pt) {
+ spin_unlock(lock);
+ pt = psb_mmu_alloc_pt(pd);
+ if (!pt)
+ return NULL;
+ spin_lock(lock);
+
+ if (pd->tables[index]) {
+ spin_unlock(lock);
+ psb_mmu_free_pt(pt);
+ spin_lock(lock);
+ pt = pd->tables[index];
+ continue;
+ }
+
+ v = kmap_atomic(pd->p, KM_USER0);
+ pd->tables[index] = pt;
+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+ pt->index = index;
+ kunmap_atomic((void *) v, KM_USER0);
+
+ if (pd->hw_context != -1) {
+ psb_mmu_clflush(pd->driver, (void *) &v[index]);
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+ }
+ }
+ pt->v = kmap_atomic(pt->p, KM_USER0);
+ return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
+{
+ uint32_t index = psb_mmu_pd_index(addr);
+ struct psb_mmu_pt *pt;
+ spinlock_t *lock = &pd->driver->lock;
+
+ spin_lock(lock);
+ pt = pd->tables[index];
+ if (!pt) {
+ spin_unlock(lock);
+ return NULL;
+ }
+ pt->v = kmap_atomic(pt->p, KM_USER0);
+ return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+ struct psb_mmu_pd *pd = pt->pd;
+ uint32_t *v;
+
+ kunmap_atomic(pt->v, KM_USER0);
+ if (pt->count == 0) {
+ v = kmap_atomic(pd->p, KM_USER0);
+ v[pt->index] = pd->invalid_pde;
+ pd->tables[pt->index] = NULL;
+
+ if (pd->hw_context != -1) {
+ psb_mmu_clflush(pd->driver,
+ (void *) &v[pt->index]);
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+ }
+ kunmap_atomic(pt->v, KM_USER0);
+ spin_unlock(&pd->driver->lock);
+ psb_mmu_free_pt(pt);
+ return;
+ }
+ spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+ unsigned long addr, uint32_t pte)
+{
+ pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+ unsigned long addr)
+{
+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+ uint32_t mmu_offset, uint32_t gtt_start,
+ uint32_t gtt_pages)
+{
+ uint32_t *v;
+ uint32_t start = psb_mmu_pd_index(mmu_offset);
+ struct psb_mmu_driver *driver = pd->driver;
+ int num_pages = gtt_pages;
+
+ down_read(&driver->sem);
+ spin_lock(&driver->lock);
+
+ v = kmap_atomic(pd->p, KM_USER0);
+ v += start;
+
+ while (gtt_pages--) {
+ *v++ = gtt_start | pd->pd_mask;
+ gtt_start += PAGE_SIZE;
+ }
+
+ /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+ psb_pages_clflush(pd->driver, &pd->p, num_pages);
+ kunmap_atomic(v, KM_USER0);
+ spin_unlock(&driver->lock);
+
+ if (pd->hw_context != -1)
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+
+ up_read(&pd->driver->sem);
+ psb_mmu_flush_pd(pd->driver, 0);
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+ struct psb_mmu_pd *pd;
+
+ /* down_read(&driver->sem); */
+ pd = driver->default_pd;
+ /* up_read(&driver->sem); */
+
+ return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+{
+ struct psb_mmu_pd *pd;
+
+ pd = psb_mmu_get_default_pd(driver);
+ return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+ psb_mmu_free_pagedir(driver->default_pd);
+ kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+ int trap_pagefaults,
+ int invalid_type,
+ struct drm_psb_private *dev_priv)
+{
+ struct psb_mmu_driver *driver;
+
+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+
+ if (!driver)
+ return NULL;
+ driver->dev_priv = dev_priv;
+
+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+ invalid_type);
+ if (!driver->default_pd)
+ goto out_err1;
+
+ spin_lock_init(&driver->lock);
+ init_rwsem(&driver->sem);
+ down_write(&driver->sem);
+ driver->register_map = registers;
+ atomic_set(&driver->needs_tlbflush, 1);
+
+ driver->has_clflush = 0;
+
+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+ uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+ /*
+ * clflush size is determined at kernel setup for x86_64
+ * but not for i386. We have to do it here.
+ */
+
+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+ clflush_size = ((misc >> 8) & 0xff) * 8;
+ driver->has_clflush = 1;
+ driver->clflush_add =
+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
+ driver->clflush_mask = driver->clflush_add - 1;
+ driver->clflush_mask = ~driver->clflush_mask;
+ }
+
+ up_write(&driver->sem);
+ return driver;
+
+out_err1:
+ kfree(driver);
+ return NULL;
+}
+
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long clflush_add = pd->driver->clflush_add;
+ unsigned long clflush_mask = pd->driver->clflush_mask;
+
+ if (!pd->driver->has_clflush) {
+ /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+ psb_pages_clflush(pd->driver, &pd->p, num_pages);
+ return;
+ }
+
+ if (hw_tile_stride)
+ rows = num_pages / desired_tile_stride;
+ else
+ desired_tile_stride = num_pages;
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+ mb();
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_map_lock(pd, addr);
+ if (!pt)
+ continue;
+ do {
+ psb_clflush(&pt->v
+ [psb_mmu_pt_index(addr)]);
+ } while (addr +=
+ clflush_add,
+ (addr & clflush_mask) < next);
+
+ psb_mmu_pt_unmap_unlock(pt);
+ } while (addr = next, next != end);
+ address += row_add;
+ }
+ mb();
+}
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages)
+{
+ struct psb_mmu_pt *pt;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long f_address = address;
+
+ down_read(&pd->driver->sem);
+
+ addr = address;
+ end = addr + (num_pages << PAGE_SHIFT);
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt)
+ goto out;
+ do {
+ psb_mmu_invalidate_pte(pt, addr);
+ --pt->count;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 0);
+
+ return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long f_address = address;
+
+ if (hw_tile_stride)
+ rows = num_pages / desired_tile_stride;
+ else
+ desired_tile_stride = num_pages;
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+
+ /* down_read(&pd->driver->sem); */
+
+ /* Make sure we only need to flush this processor's cache */
+
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_map_lock(pd, addr);
+ if (!pt)
+ continue;
+ do {
+ psb_mmu_invalidate_pte(pt, addr);
+ --pt->count;
+
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+ address += row_add;
+ }
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages,
+ desired_tile_stride, hw_tile_stride);
+
+ /* up_read(&pd->driver->sem); */
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 0);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+ unsigned long address, uint32_t num_pages,
+ int type)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t pte;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long f_address = address;
+ int ret = 0;
+
+ down_read(&pd->driver->sem);
+
+ addr = address;
+ end = addr + (num_pages << PAGE_SHIFT);
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ do {
+ pte = psb_mmu_mask_pte(start_pfn++, type);
+ psb_mmu_set_pte(pt, addr, pte);
+ pt->count++;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 1);
+
+ return ret;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride, int type)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ uint32_t pte;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long f_address = address;
+ int ret = 0;
+
+ if (hw_tile_stride) {
+ if (num_pages % desired_tile_stride != 0)
+ return -EINVAL;
+ rows = num_pages / desired_tile_stride;
+ } else {
+ desired_tile_stride = num_pages;
+ }
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+
+ down_read(&pd->driver->sem);
+
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ do {
+ pte =
+ psb_mmu_mask_pte(page_to_pfn(*pages++),
+ type);
+ psb_mmu_set_pte(pt, addr, pte);
+ pt->count++;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+ address += row_add;
+ }
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages,
+ desired_tile_stride, hw_tile_stride);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 1);
+
+ return ret;
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn)
+{
+ int ret;
+ struct psb_mmu_pt *pt;
+ uint32_t tmp;
+ spinlock_t *lock = &pd->driver->lock;
+
+ down_read(&pd->driver->sem);
+ pt = psb_mmu_pt_map_lock(pd, virtual);
+ if (!pt) {
+ uint32_t *v;
+
+ spin_lock(lock);
+ v = kmap_atomic(pd->p, KM_USER0);
+ tmp = v[psb_mmu_pd_index(virtual)];
+ kunmap_atomic(v, KM_USER0);
+ spin_unlock(lock);
+
+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+ !(pd->invalid_pte & PSB_PTE_VALID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
+ goto out;
+ }
+ tmp = pt->v[psb_mmu_pt_index(virtual)];
+ if (!(tmp & PSB_PTE_VALID)) {
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ *pfn = tmp >> PAGE_SHIFT;
+ }
+ psb_mmu_pt_unmap_unlock(pt);
+out:
+ up_read(&pd->driver->sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644
index 000000000000..2da1f368f14e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -0,0 +1,252 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* MID device specific descriptors */
+
+struct oaktrail_vbt {
+ s8 signature[4]; /*4 bytes,"$GCT" */
+ u8 revision;
+ u8 size;
+ u8 checksum;
+ void *oaktrail_gct;
+} __packed;
+
+struct oaktrail_timing_info {
+ u16 pixel_clock;
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_offset_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_offset_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_offset_hi:2;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 height_mm_hi:4;
+ u8 width_mm_hi:4;
+ u8 hborder;
+ u8 vborder;
+ u8 unknown0:1;
+ u8 hsync_positive:1;
+ u8 vsync_positive:1;
+ u8 separate_sync:2;
+ u8 stereo:1;
+ u8 unknown6:1;
+ u8 interlaced:1;
+} __packed;
+
+struct gct_r10_timing_info {
+ u16 pixel_clock;
+ u32 hactive_lo:8;
+ u32 hactive_hi:4;
+ u32 hblank_lo:8;
+ u32 hblank_hi:4;
+ u32 hsync_offset_lo:8;
+ u16 hsync_offset_hi:2;
+ u16 hsync_pulse_width_lo:8;
+ u16 hsync_pulse_width_hi:2;
+ u16 hsync_positive:1;
+ u16 rsvd_1:3;
+ u8 vactive_lo:8;
+ u16 vactive_hi:4;
+ u16 vblank_lo:8;
+ u16 vblank_hi:4;
+ u16 vsync_offset_lo:4;
+ u16 vsync_offset_hi:2;
+ u16 vsync_pulse_width_lo:4;
+ u16 vsync_pulse_width_hi:2;
+ u16 vsync_positive:1;
+ u16 rsvd_2:3;
+} __packed;
+
+struct oaktrail_panel_descriptor_v1 {
+ u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+ /* 0x61190 if MIPI */
+ u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+ /* Register 0x61210 */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+ u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+ u16 Panel_MIPI_Display_Descriptor;
+ /*16 bits, Defined as follows: */
+ /* if MIPI, 0x0000 if LVDS */
+ /* Bit 0, Type, 2 bits, */
+ /* 0: Type-1, */
+ /* 1: Type-2, */
+ /* 2: Type-3, */
+ /* 3: Type-4 */
+ /* Bit 2, Pixel Format, 4 bits */
+ /* Bit0: 16bpp (not supported in LNC), */
+ /* Bit1: 18bpp loosely packed, */
+ /* Bit2: 18bpp packed, */
+ /* Bit3: 24bpp */
+ /* Bit 6, Reserved, 2 bits, 00b */
+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+ /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+struct oaktrail_panel_descriptor_v2 {
+ u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+ /* 0x61190 if MIPI */
+ u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+ /* Register 0x61210 */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+ u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+ u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+ u16 Panel_MIPI_Display_Descriptor;
+ /*16 bits, Defined as follows: */
+ /* if MIPI, 0x0000 if LVDS */
+ /* Bit 0, Type, 2 bits, */
+ /* 0: Type-1, */
+ /* 1: Type-2, */
+ /* 2: Type-3, */
+ /* 3: Type-4 */
+ /* Bit 2, Pixel Format, 4 bits */
+ /* Bit0: 16bpp (not supported in LNC), */
+ /* Bit1: 18bpp loosely packed, */
+ /* Bit2: 18bpp packed, */
+ /* Bit3: 24bpp */
+ /* Bit 6, Reserved, 2 bits, 00b */
+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+ /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+union oaktrail_panel_rx {
+ struct {
+ u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+ u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+ u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+ /* 1: Burst and non-burst */
+ /* 2/3: Reserved */
+ u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+ u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+ u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+ u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+ u16 Rsvd:5;/*5 bits,00000b */
+ } panelrx;
+ u16 panel_receiver;
+} __packed;
+
+struct oaktrail_gct_v1 {
+ union { /*8 bits,Defined as follows: */
+ struct {
+ u8 PanelType:4; /*4 bits, Bit field for panels*/
+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+ /*2 bits,Specifies which of the*/
+ u8 BootPanelIndex:2;
+ /* 4 panels to use by default*/
+ u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+ /* the 4 MIPI DSI receivers to use*/
+ } PD;
+ u8 PanelDescriptor;
+ };
+ struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_v2 {
+ union { /*8 bits,Defined as follows: */
+ struct {
+ u8 PanelType:4; /*4 bits, Bit field for panels*/
+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+ /*2 bits,Specifies which of the*/
+ u8 BootPanelIndex:2;
+ /* 4 panels to use by default*/
+ u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+ /* the 4 MIPI DSI receivers to use*/
+ } PD;
+ u8 PanelDescriptor;
+ };
+ struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_data {
+ u8 bpi; /* boot panel index, number of panel used during boot */
+ u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+ struct oaktrail_timing_info DTD; /* timing info for the selected panel */
+ u32 Panel_Port_Control;
+ u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u32 PP_Cycle_Delay;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u16 Panel_MIPI_Display_Descriptor;
+} __packed;
+
+#define MODE_SETTING_IN_CRTC 0x1
+#define MODE_SETTING_IN_ENCODER 0x2
+#define MODE_SETTING_ON_GOING 0x3
+#define MODE_SETTING_IN_DSR 0x4
+#define MODE_SETTING_ENCODER_DONE 0x8
+
+#define GCT_R10_HEADER_SIZE 16
+#define GCT_R10_DISPLAY_DESC_SIZE 28
+
+/*
+ * Moorestown HDMI interfaces
+ */
+
+struct oaktrail_hdmi_dev {
+ struct pci_dev *dev;
+ void __iomem *regs;
+ unsigned int mmio, mmio_len;
+ int dpms_mode;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ /* register state */
+ u32 saveDPLL_CTRL;
+ u32 saveDPLL_DIV_CTRL;
+ u32 saveDPLL_ADJUST;
+ u32 saveDPLL_UPDATE;
+ u32 saveDPLL_CLK_ENABLE;
+ u32 savePCH_HTOTAL_B;
+ u32 savePCH_HBLANK_B;
+ u32 savePCH_HSYNC_B;
+ u32 savePCH_VTOTAL_B;
+ u32 savePCH_VBLANK_B;
+ u32 savePCH_VSYNC_B;
+ u32 savePCH_PIPEBCONF;
+ u32 savePCH_PIPEBSRC;
+};
+
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644
index 000000000000..9d12a3ee1600
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct oaktrail_limit_t {
+ struct psb_intel_range_t dot, m, p1;
+};
+
+struct oaktrail_clock_t {
+ /* derived values */
+ int dot;
+ int m;
+ int p1;
+};
+
+#define MRST_LIMIT_LVDS_100L 0
+#define MRST_LIMIT_LVDS_83 1
+#define MRST_LIMIT_LVDS_100 2
+
+#define MRST_DOT_MIN 19750
+#define MRST_DOT_MAX 120000
+#define MRST_M_MIN_100L 20
+#define MRST_M_MIN_100 10
+#define MRST_M_MIN_83 12
+#define MRST_M_MAX_100L 34
+#define MRST_M_MAX_100 17
+#define MRST_M_MAX_83 20
+#define MRST_P1_MIN 2
+#define MRST_P1_MAX_0 7
+#define MRST_P1_MAX_1 8
+
+static const struct oaktrail_limit_t oaktrail_limits[] = {
+ { /* MRST_LIMIT_LVDS_100L */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ },
+ { /* MRST_LIMIT_LVDS_83L */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+ },
+ { /* MRST_LIMIT_LVDS_100 */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ },
+};
+
+#define MRST_M_MIN 10
+static const u32 oaktrail_m_converts[] = {
+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+};
+
+static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+{
+ const struct oaktrail_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+ switch (dev_priv->core_freq) {
+ case 100:
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+ break;
+ case 166:
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+ break;
+ case 200:
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+ break;
+ }
+ } else {
+ limit = NULL;
+ dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+ }
+
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+{
+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
+}
+
+void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+{
+ pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
+ prefix, clock->dot, clock->m, clock->p1);
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE. Divisor values are the actual divisors for
+ */
+static bool
+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+ struct oaktrail_clock_t *best_clock)
+{
+ struct oaktrail_clock_t clock;
+ const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ oaktrail_clock(refclk, &clock);
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
+ return err != target;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+ /* Wait for for the pipe disable to take effect. */
+ psb_intel_wait_for_vblank(dev);
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3FFF);
+ REG_WRITE(DSPFW1, 0x3F88080A);
+ REG_WRITE(DSPFW2, 0x0b060808);
+ REG_WRITE(DSPFW3, 0x0);
+ REG_WRITE(DSPFW4, 0x08030404);
+ REG_WRITE(DSPFW5, 0x04040404);
+ REG_WRITE(DSPFW6, 0x78);
+ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
+ /* Must write Bit 14 of the Chicken Bit Register */
+
+ gma_power_end(dev);
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 3;
+}
+
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = psb_intel_crtc->pipe;
+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk = 0;
+ struct oaktrail_clock_t clock;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_mipi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct psb_intel_encoder *psb_intel_encoder = NULL;
+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+ struct drm_connector *connector;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ memcpy(&psb_intel_crtc->saved_mode,
+ mode,
+ sizeof(struct drm_display_mode));
+ memcpy(&psb_intel_crtc->saved_adjusted_mode,
+ adjusted_mode,
+ sizeof(struct drm_display_mode));
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_MIPI:
+ is_mipi = true;
+ break;
+ }
+ }
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (oaktrail_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ REG_WRITE(pipesrc_reg,
+ ((mode->crtc_hdisplay - 1) << 16) |
+ (mode->crtc_vdisplay - 1));
+
+ if (psb_intel_encoder)
+ drm_connector_property_get_value(connector,
+ dev->mode_config.scaling_mode_property, &scalingType);
+
+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+ /* Moorestown doesn't have register support for centering so
+ * we need to mess with the h/vblank and h/vsync start and
+ * ends to get centering */
+ int offsetX = 0, offsetY = 0;
+
+ offsetX = (adjusted_mode->crtc_hdisplay -
+ mode->crtc_hdisplay) / 2;
+ offsetY = (adjusted_mode->crtc_vdisplay -
+ mode->crtc_vdisplay) / 2;
+
+ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg,
+ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+ REG_WRITE(hsync_reg,
+ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+ REG_WRITE(vblank_reg,
+ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+ REG_WRITE(vsync_reg,
+ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+ } else {
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ }
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
+
+ if (is_mipi)
+ goto oaktrail_crtc_mode_set_exit;
+
+ refclk = dev_priv->core_freq * 1000;
+
+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
+
+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+
+ if (!ok) {
+ dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
+ } else {
+ dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
+ "m = %x, p1 = %x.\n", clock.dot, clock.m,
+ clock.p1);
+ }
+
+ fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+
+ dpll |= DPLL_VGA_MODE_DIS;
+
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ if (is_lvds)
+ dpll |= DPLLA_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (is_sdvo) {
+ int sdvo_pixel_multiply =
+ adjusted_mode->clock / mode->clock;
+
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |=
+ (sdvo_pixel_multiply -
+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 2)) << 17;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ mrstPrintPll("chosen", &clock);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Check the DPLLA lock bit PIPEACONF[29] */
+ udelay(150);
+ }
+
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+ psb_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ psb_intel_wait_for_vblank(dev);
+
+oaktrail_crtc_mode_set_exit:
+ gma_power_end(dev);
+ return 0;
+}
+
+static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+
+ int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ return 0;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void oaktrail_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+ .dpms = oaktrail_crtc_dpms,
+ .mode_fixup = oaktrail_crtc_mode_fixup,
+ .mode_set = oaktrail_crtc_mode_set,
+ .mode_set_base = oaktrail_pipe_set_base,
+ .prepare = oaktrail_crtc_prepare,
+ .commit = oaktrail_crtc_commit,
+};
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644
index 000000000000..63aea2f010d9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -0,0 +1,512 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/mrst.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+#include "intel_bios.h"
+
+static int oaktrail_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->iLVDS_enable)
+ oaktrail_lvds_init(dev, &dev_priv->mode_dev);
+ else
+ dev_err(dev->dev, "DSI is not supported\n");
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+ return 0;
+}
+
+/*
+ * Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
+
+static int oaktrail_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
+ u32 max_pwm_blc;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ if (gma_power_begin(dev, 0)) {
+ /* Calculate and set the brightness value */
+ max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+ blc_pwm_ctl = level * max_pwm_blc / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj1;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* force PWM bit on */
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+ gma_power_end(dev);
+ }
+ oaktrail_brightness = level;
+ return 0;
+}
+
+static int oaktrail_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return oaktrail_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+ bl_max_freq = 256;
+ /* this needs to be set elsewhere */
+ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+ return -ERANGE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_ops = {
+ .get_brightness = oaktrail_get_brightness,
+ .update_status = oaktrail_set_brightness,
+};
+
+int oaktrail_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+ NULL, (void *)dev, &oaktrail_ops, &props);
+
+ if (IS_ERR(oaktrail_backlight_device))
+ return PTR_ERR(oaktrail_backlight_device);
+
+ ret = device_backlight_init(dev);
+ if (ret < 0) {
+ backlight_device_unregister(oaktrail_backlight_device);
+ return ret;
+ }
+ oaktrail_backlight_device->props.brightness = 100;
+ oaktrail_backlight_device->props.max_brightness = 100;
+ backlight_update_status(oaktrail_backlight_device);
+ dev_priv->backlight_device = oaktrail_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Moorestown specific chip logic and low level methods
+ * for power management
+ */
+
+static void oaktrail_init_pm(struct drm_device *dev)
+{
+}
+
+/**
+ * oaktrail_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int oaktrail_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+ u32 pp_stat;
+
+ /* Display arbitration control + watermarks */
+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Pipe & plane A info */
+ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
+ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
+ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
+ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
+ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
+ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
+ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+
+ /* Save cursor regs */
+ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+ /* Save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_save(dev);
+
+ /* Save performance state */
+ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+ /* LVDS state */
+ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
+ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+ /* HW overlay */
+ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+ /* DPST registers */
+ dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ if (dev_priv->iLVDS_enable) {
+ /* Shut down the panel */
+ PSB_WVDC32(0, PP_CONTROL);
+
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x80000000);
+
+ /* Turn off the plane */
+ PSB_WVDC32(0x58000000, DSPACNTR);
+ /* Trigger the plane disable */
+ PSB_WVDC32(0, DSPASURF);
+
+ /* Wait ~4 ticks */
+ msleep(4);
+
+ /* Turn off pipe */
+ PSB_WVDC32(0x0, PIPEACONF);
+ /* Wait ~8 ticks */
+ msleep(8);
+
+ /* Turn off PLLs */
+ PSB_WVDC32(0, MRST_DPLL_A);
+ }
+ return 0;
+}
+
+/**
+ * oaktrail_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int oaktrail_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pp_stat;
+ int i;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /* Make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ /* set the plls */
+ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
+ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+
+ /* Actually enable it */
+ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
+ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
+ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
+ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
+ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
+ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
+ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+
+ /* Restore performance mode*/
+ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+
+ /* Enable the pipe*/
+ if (dev_priv->iLVDS_enable)
+ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+
+ /* Set up the plane*/
+ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
+ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
+ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+
+ /* Enable the plane */
+ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
+ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+
+ /* Enable Cursor A */
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+
+ /* Restore palette (gamma) */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_restore(dev);
+
+ if (dev_priv->iLVDS_enable) {
+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
+ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
+ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
+ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
+ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
+ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+ }
+
+ /* Wait for cycle delay */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x08000000);
+
+ /* Wait for panel power up */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x10000000);
+
+ /* Restore HW overlay */
+ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+
+ /* DPST registers */
+ PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+ HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+ HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+ return 0;
+}
+
+/**
+ * oaktrail_power_down - power down the display island
+ * @dev: our DRM device
+ *
+ * Power down the display interface of our device
+ */
+static int oaktrail_power_down(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask ;
+ u32 pwr_sts;
+
+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == pwr_mask)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+/*
+ * oaktrail_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int oaktrail_power_up(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ u32 pwr_sts, pwr_cnt;
+
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~pwr_mask;
+ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == 0)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+
+static int oaktrail_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+ int ret;
+
+ ret = mid_chip_setup(dev);
+ if (ret < 0)
+ return ret;
+ if (vbt->size == 0) {
+ /* Now pull the BIOS data */
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ }
+ return 0;
+}
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+
+ oaktrail_hdmi_teardown(dev);
+ if (vbt->size == 0)
+ psb_intel_destroy_bios(dev);
+}
+
+const struct psb_ops oaktrail_chip_ops = {
+ .name = "Oaktrail",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = oaktrail_chip_setup,
+ .chip_teardown = oaktrail_teardown,
+ .crtc_helper = &oaktrail_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = oaktrail_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = oaktrail_backlight_init,
+#endif
+
+ .init_pm = oaktrail_init_pm,
+ .save_regs = oaktrail_save_display_registers,
+ .restore_regs = oaktrail_restore_display_registers,
+ .power_down = oaktrail_power_down,
+ .power_up = oaktrail_power_up,
+
+ .i2c_bus = 1,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644
index 000000000000..025d30970cc0
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -0,0 +1,865 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+
+#define HDMI_HICR 0x1004
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_DETECT_HDP (1 << 0)
+
+#define HDMI_VIDEO_REG 0x3000
+#define HDMI_UNIT_EN (1 << 7)
+#define HDMI_MODE_OUTPUT (1 << 0)
+#define HDMI_HBLANK_A 0x3100
+
+#define HDMI_AUDIO_CTRL 0x4000
+#define HDMI_ENABLE_AUDIO (1 << 0)
+
+#define PCH_HTOTAL_B 0x3100
+#define PCH_HBLANK_B 0x3104
+#define PCH_HSYNC_B 0x3108
+#define PCH_VTOTAL_B 0x310C
+#define PCH_VBLANK_B 0x3110
+#define PCH_VSYNC_B 0x3114
+#define PCH_PIPEBSRC 0x311C
+
+#define PCH_PIPEB_DSL 0x3800
+#define PCH_PIPEB_SLC 0x3804
+#define PCH_PIPEBCONF 0x3808
+#define PCH_PIPEBSTAT 0x3824
+
+#define CDVO_DFT 0x5000
+#define CDVO_SLEWRATE 0x5004
+#define CDVO_STRENGTH 0x5008
+#define CDVO_RCOMP 0x500C
+
+#define DPLL_CTRL 0x6000
+#define DPLL_PDIV_SHIFT 16
+#define DPLL_PDIV_MASK (0xf << 16)
+#define DPLL_PWRDN (1 << 4)
+#define DPLL_RESET (1 << 3)
+#define DPLL_FASTEN (1 << 2)
+#define DPLL_ENSTAT (1 << 1)
+#define DPLL_DITHEN (1 << 0)
+
+#define DPLL_DIV_CTRL 0x6004
+#define DPLL_CLKF_MASK 0xffffffc0
+#define DPLL_CLKR_MASK (0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP (1 << 31)
+#define DPLL_SEL_HDMI (1 << 8)
+#define DPLL_EN_HDMI (1 << 1)
+#define DPLL_EN_VGA (1 << 0)
+
+#define DPLL_ADJUST 0x600C
+#define DPLL_STATUS 0x6010
+#define DPLL_UPDATE 0x6014
+#define DPLL_DFT 0x6020
+
+struct intel_range {
+ int min, max;
+};
+
+struct oaktrail_hdmi_limit {
+ struct intel_range vco, np, nr, nf;
+};
+
+struct oaktrail_hdmi_clock {
+ int np;
+ int nr;
+ int nf;
+ int dot;
+};
+
+#define VCO_MIN 320000
+#define VCO_MAX 1650000
+#define NP_MIN 1
+#define NP_MAX 15
+#define NR_MIN 1
+#define NR_MAX 64
+#define NF_MIN 2
+#define NF_MAX 4095
+
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
+ .vco = { .min = VCO_MIN, .max = VCO_MAX },
+ .np = { .min = NP_MIN, .max = NP_MAX },
+ .nr = { .min = NR_MIN, .max = NR_MAX },
+ .nf = { .min = NF_MIN, .max = NF_MAX },
+};
+
+static void wait_for_vblank(struct drm_device *dev)
+{
+ /* FIXME: Can we do this as a sleep ? */
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+static void scu_busy_loop(void *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+ void *base;
+ /* FIXME: at least make these defines */
+ unsigned int scu_ipc_mmio = 0xff11c000;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map SCU mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(HDMI_HCR, 0x67);
+ HDMI_READ(HDMI_HCR);
+
+ HDMI_WRITE(0x51a8, 0x10);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(0x51a8, 0x0);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+
+ HDMI_WRITE(HDMI_HCR, 0x47);
+ HDMI_READ(HDMI_HCR);
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ /* Disable VGACNTRL */
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+ /* wait for pipe off */
+ udelay(150);
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ /* wait for dpll off */
+ udelay(150);
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+ wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+ psb_intel_crtc_load_lut(crtc);
+ }
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+ /* LNC Chicken Bits */
+ REG_WRITE(0x70400, 0x4000);
+}
+
+
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ static int dpms_mode = -1;
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ if (dpms_mode == mode)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ temp = 0x0;
+ else
+ temp = 0x99;
+
+ dpms_mode = mode;
+ HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct oaktrail_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* XXX: Disable the panel fitter if it was on our pipe */
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* reset controller: FIXME - can we sort out the ioremap mess ? */
+ iounmap(hdmi_dev->regs);
+ oaktrail_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Setting DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (hdmi_dev->regs == NULL) {
+ DRM_ERROR("failed to do hdmi mmio mapping\n");
+ return -ENOMEM;
+ }
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg,
+ ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC,
+ ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ wait_for_vblank(dev);
+
+ return 0;
+}
+
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static enum drm_connector_status
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HSR);
+ DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+ if ((temp & HDMI_DETECT_HDP) != 0)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ return status;
+}
+
+static const unsigned char raw_edid[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+ 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+ 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+ 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+ 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+ 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ struct drm_display_mode *mode, *t;
+ int i = 0, ret = 0;
+
+ i2c_adap = i2c_get_adapter(3);
+ if (i2c_adap == NULL) {
+ DRM_ERROR("No ddc adapter available!\n");
+ edid = (struct edid *)raw_edid;
+ } else {
+ edid = (struct edid *)raw_edid;
+ /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+ }
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ }
+
+ /*
+ * prune modes that require frame buffer bigger than stolen mem
+ */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+ i++;
+ drm_mode_remove(connector, mode);
+ }
+ }
+ return ret - i;
+}
+
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ oaktrail_hdmi_audio_enable(dev);
+ return;
+}
+
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+ .dpms = oaktrail_hdmi_dpms,
+ .mode_fixup = oaktrail_hdmi_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = oaktrail_hdmi_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ oaktrail_hdmi_connector_helper_funcs = {
+ .get_modes = oaktrail_hdmi_get_modes,
+ .mode_valid = oaktrail_hdmi_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = oaktrail_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = oaktrail_hdmi_destroy,
+};
+
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+ .destroy = oaktrail_hdmi_enc_destroy,
+};
+
+void oaktrail_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &oaktrail_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, encoder,
+ &oaktrail_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+
+ psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+ drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_sysfs_connector_add(connector);
+
+ return;
+
+failed_connector:
+ kfree(psb_intel_encoder);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+ {}
+};
+
+void oaktrail_hdmi_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev;
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+ if (!pdev)
+ return;
+
+ hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
+ if (!hdmi_dev) {
+ dev_err(dev->dev, "failed to allocate memory\n");
+ goto out;
+ }
+
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hdmi controller\n");
+ goto free;
+ }
+
+ hdmi_dev->mmio = pci_resource_start(pdev, 0);
+ hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (!hdmi_dev->regs) {
+ dev_err(dev->dev, "failed to map hdmi mmio\n");
+ goto free;
+ }
+
+ hdmi_dev->dev = pdev;
+ pci_set_drvdata(pdev, hdmi_dev);
+
+ /* Initialize i2c controller */
+ ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
+ if (ret)
+ dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+ dev_priv->hdmi_priv = hdmi_dev;
+ oaktrail_hdmi_audio_disable(dev);
+ return;
+
+free:
+ kfree(hdmi_dev);
+out:
+ return;
+}
+
+void oaktrail_hdmi_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct pci_dev *pdev;
+
+ if (hdmi_dev) {
+ pdev = hdmi_dev->dev;
+ pci_set_drvdata(pdev, NULL);
+ oaktrail_hdmi_i2c_exit(pdev);
+ iounmap(hdmi_dev->regs);
+ kfree(hdmi_dev);
+ pci_dev_put(pdev);
+ }
+}
+
+/* save HDMI register state */
+void oaktrail_hdmi_save(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int i;
+
+ /* dpll */
+ hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+ hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+ hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+ hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+ hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+ /* pipe B */
+ dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+ dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
+ dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
+ dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
+ dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
+ dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
+ dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
+ dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+
+ hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+ hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+ hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+ hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+ hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
+ hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+ hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+ hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
+
+ /* plane */
+ dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+ dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+ dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+ dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+ dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+
+ /* cursor B */
+ dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+ dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+ dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+ /* save palette */
+ for (i = 0; i < 256; i++)
+ dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void oaktrail_hdmi_restore(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int i;
+
+ /* dpll */
+ PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+ PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+ PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+ DRM_UDELAY(150);
+
+ /* pipe */
+ PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
+ PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
+ PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
+ PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B);
+ PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
+ PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
+ PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B);
+
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+ PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
+
+ PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+ /* plane */
+ PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
+ PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
+ PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
+ PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+
+ /* cursor B */
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+
+ /* restore palette */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644
index 000000000000..705440874ac0
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_DETECT_HDP (1 << 6)
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+#define HDMI_HICR 0x1004
+#define HDMI_INTR_I2C_ERROR (1 << 4)
+#define HDMI_INTR_I2C_FULL (1 << 3)
+#define HDMI_INTR_I2C_DONE (1 << 2)
+#define HDMI_INTR_HPD (1 << 0)
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_HI2CRDB0 0x1200
+#define HDMI_HI2CHCR 0x1240
+#define HI2C_HDCP_WRITE (0 << 2)
+#define HI2C_HDCP_RI_READ (1 << 2)
+#define HI2C_HDCP_READ (2 << 2)
+#define HI2C_EDID_READ (3 << 2)
+#define HI2C_READ_CONTINUE (1 << 1)
+#define HI2C_ENABLE_TRANSACTION (1 << 0)
+
+#define HDMI_ICRH 0x1100
+#define HDMI_HI2CTDR0 0x1244
+#define HDMI_HI2CTDR1 0x1248
+
+#define I2C_STAT_INIT 0
+#define I2C_READ_DONE 1
+#define I2C_TRANSACTION_DONE 2
+
+struct hdmi_i2c_dev {
+ struct i2c_adapter *adap;
+ struct mutex i2c_lock;
+ struct completion complete;
+ int status;
+ struct i2c_msg *msg;
+ int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HICR);
+ temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+ HDMI_WRITE(HDMI_HICR, temp);
+ HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ HDMI_WRITE(HDMI_HICR, 0x0);
+ HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ i2c_dev->status = I2C_STAT_INIT;
+ i2c_dev->msg = pmsg;
+ i2c_dev->buf_offset = 0;
+ INIT_COMPLETION(i2c_dev->complete);
+
+ /* Enable I2C transaction */
+ temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+ HDMI_WRITE(HDMI_HI2CHCR, temp);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ while (i2c_dev->status != I2C_TRANSACTION_DONE)
+ wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+ 10 * HZ);
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ /*
+ * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+ */
+ return 0;
+}
+
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
+ struct i2c_msg *pmsg,
+ int num)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ int i, err = 0;
+
+ mutex_lock(&i2c_dev->i2c_lock);
+
+ /* Enable i2c unit */
+ HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+ /* Enable irq */
+ hdmi_i2c_irq_enable(hdmi_dev);
+ for (i = 0; i < num; i++) {
+ if (pmsg->len && pmsg->buf) {
+ if (pmsg->flags & I2C_M_RD)
+ err = xfer_read(adap, pmsg);
+ else
+ err = xfer_write(adap, pmsg);
+ }
+ pmsg++; /* next message */
+ }
+
+ /* Disable irq */
+ hdmi_i2c_irq_disable(hdmi_dev);
+
+ mutex_unlock(&i2c_dev->i2c_lock);
+
+ return i;
+}
+
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+ .master_xfer = oaktrail_hdmi_i2c_access,
+ .functionality = oaktrail_hdmi_i2c_func,
+};
+
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+ .name = "oaktrail_hdmi_i2c",
+ .nr = 3,
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_DDC,
+ .algo = &oaktrail_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ struct i2c_msg *msg = i2c_dev->msg;
+ u8 *buf = msg->buf;
+ u32 temp;
+ int i, offset;
+
+ offset = i2c_dev->buf_offset;
+ for (i = 0; i < 0x10; i++) {
+ temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+ memcpy(buf + (offset + i * 4), &temp, 4);
+ }
+ i2c_dev->buf_offset += (0x10 * 4);
+
+ /* clearing read buffer full intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+ HDMI_READ(HDMI_HISR);
+
+ /* continue read transaction */
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_READ_DONE;
+ return;
+}
+
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ /* clear transaction done intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+ HDMI_READ(HDMI_HISR);
+
+
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_TRANSACTION_DONE;
+ return;
+}
+
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = dev;
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 stat;
+
+ stat = HDMI_READ(HDMI_HISR);
+
+ if (stat & HDMI_INTR_HPD) {
+ HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+ HDMI_READ(HDMI_HISR);
+ }
+
+ if (stat & HDMI_INTR_I2C_FULL)
+ hdmi_i2c_read(hdmi_dev);
+
+ if (stat & HDMI_INTR_I2C_DONE)
+ hdmi_i2c_transaction_done(hdmi_dev);
+
+ complete(&i2c_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void oaktrail_hdmi_i2c_gpio_fix(void)
+{
+ void *base;
+ unsigned int gpio_base = 0xff12c000;
+ int gpio_len = 0x1000;
+ u32 temp;
+
+ base = ioremap((resource_size_t)gpio_base, gpio_len);
+ if (base == NULL) {
+ DRM_ERROR("gpio ioremap fail\n");
+ return;
+ }
+
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+ writel((temp | 0x00000a00), (base + 0x44));
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+ iounmap(base);
+}
+
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+ int ret;
+
+ hdmi_dev = pci_get_drvdata(dev);
+
+ i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+ if (i2c_dev == NULL) {
+ DRM_ERROR("Can't allocate interface\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+ i2c_dev->status = I2C_STAT_INIT;
+ init_completion(&i2c_dev->complete);
+ mutex_init(&i2c_dev->i2c_lock);
+ i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
+ hdmi_dev->i2c_dev = i2c_dev;
+
+ /* Enable HDMI I2C function on gpio */
+ oaktrail_hdmi_i2c_gpio_fix();
+
+ /* request irq */
+ ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+ oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+ if (ret) {
+ DRM_ERROR("Failed to request IRQ for I2C controller\n");
+ goto err;
+ }
+
+ /* Adapter registration */
+ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+ return ret;
+
+err:
+ kfree(i2c_dev);
+exit:
+ return ret;
+}
+
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ hdmi_dev = pci_get_drvdata(dev);
+ if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
+ DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+
+ i2c_dev = hdmi_dev->i2c_dev;
+ kfree(i2c_dev);
+ free_irq(dev->irq, hdmi_dev);
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644
index 000000000000..238bbe105304
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <asm/mrst.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/* The max/min PWM frequency in BPCR[31:17] - */
+/* The smallest number is 1 (not 0) that can fit in the
+ * 15-bit field of the and then*/
+/* shifts to the left by one bit to get the actual 16-bit
+ * value that the 15-bits correspond to.*/
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BRIGHTNESS_MAX_LEVEL 100
+
+/**
+ * Sets the power state for the panel.
+ */
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+ struct psb_intel_encoder *psb_intel_encoder,
+ bool on)
+{
+ u32 pp_status;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+ dev_priv->is_lvds_on = true;
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, true);
+ } else {
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, false);
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ dev_priv->is_lvds_on = false;
+ pm_request_idle(&dev->pdev->dev);
+ }
+ gma_power_end(dev);
+}
+
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+ else
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
+ u32 lvds_port;
+ uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+ lvds_port = (REG_READ(LVDS) &
+ (~LVDS_PIPEB_SELECT)) |
+ LVDS_PORT_EN |
+ LVDS_BORDER_EN;
+
+ /* If the firmware says dither on Moorestown, or the BIOS does
+ on Oaktrail then enable dithering */
+ if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(LVDS, lvds_port);
+
+ /* Find the connector we're trying to set up */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+ }
+
+ if (!connector) {
+ DRM_ERROR("Couldn't find connector when setting mode");
+ return;
+ }
+
+ drm_connector_property_get_value(
+ connector,
+ dev->mode_config.scaling_mode_property,
+ &v);
+
+ if (v == DRM_MODE_SCALE_NO_SCALE)
+ REG_WRITE(PFIT_CONTROL, 0);
+ else if (v == DRM_MODE_SCALE_ASPECT) {
+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+ (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+ else if ((adjusted_mode->crtc_hdisplay *
+ mode->vdisplay) > (mode->hdisplay *
+ adjusted_mode->crtc_vdisplay))
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+ PFIT_SCALING_MODE_PILLARBOX);
+ else
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+ PFIT_SCALING_MODE_LETTERBOX);
+ } else
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+ } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+
+ gma_power_end(dev);
+}
+
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+ gma_power_end(dev);
+}
+
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ ret = ((dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return ret;
+}
+
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ oaktrail_lvds_get_max_backlight(dev);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+ .dpms = oaktrail_lvds_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+ .prepare = oaktrail_lvds_prepare,
+ .mode_set = oaktrail_lvds_mode_set,
+ .commit = oaktrail_lvds_commit,
+};
+
+static struct drm_display_mode lvds_configuration_modes[] = {
+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
+ { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
+ 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
+ /* hard coded fixed mode for LVDS 800x480 */
+ { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
+ 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
+ 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
+ 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
+ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
+ 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
+ /* hard coded fixed mode for LVDS 1024x768 */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
+ /* hard coded fixed mode for LVDS 1366x768 */
+ { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
+ 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
+};
+
+/* Returns the panel fixed mode from configuration. */
+
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct drm_display_mode *mode = NULL;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+
+ mode_dev->panel_fixed_mode = NULL;
+
+ /* Use the firmware provided data on Moorestown */
+ if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return;
+
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+#if 0
+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+ printk(KERN_INFO "clock is %d\n", mode->clock);
+#endif
+ mode_dev->panel_fixed_mode = mode;
+ }
+
+ /* Use the BIOS VBT mode if available */
+ if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+ mode_dev->vbt_mode);
+
+ /* Then try the LVDS VBT mode */
+ if (mode_dev->panel_fixed_mode == NULL)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+ /* Then guess */
+ if (mode_dev->panel_fixed_mode == NULL)
+ mode_dev->panel_fixed_mode
+ = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+
+ drm_mode_set_name(mode_dev->panel_fixed_mode);
+ drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
+}
+
+/**
+ * oaktrail_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct edid *edid;
+ int ret = 0;
+ struct i2c_adapter *i2c_adap;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ dev_priv->is_lvds_on = true;
+ drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &psb_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ mode_dev->panel_wants_dither = false;
+ if (dev_priv->vbt_data.size != 0x00)
+ mode_dev->panel_wants_dither = (dev_priv->gct_data.
+ Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+ if (dev_priv->lvds_dither)
+ mode_dev->panel_wants_dither = 1;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+ if (i2c_adap == NULL)
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ if (i2c_adap) {
+ edid = drm_get_edid(connector, i2c_adap);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ }
+ /*
+ * If we didn't get EDID, try geting panel timing
+ * from configuration data
+ */
+ oaktrail_lvds_get_configuration_mode(dev, mode_dev);
+
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+ if (psb_intel_encoder->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+
+/* failed_ddc: */
+
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
+}
+
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
new file mode 100644
index 000000000000..94025693bae1
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.c
@@ -0,0 +1,316 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ * Alan Cox <alan@linux.intel.com>
+ */
+
+#include "power.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+static struct mutex power_mutex; /* Serialize power ops */
+static spinlock_t power_ctrl_lock; /* Serialize power claim */
+
+/**
+ * gma_power_init - initialise power manager
+ * @dev: our device
+ *
+ * Set up for power management tracking of our hardware.
+ */
+void gma_power_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* FIXME: Move APM/OSPM base into relevant device code */
+ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+ dev_priv->ospm_base &= 0xffff;
+
+ dev_priv->display_power = true; /* We start active */
+ dev_priv->display_count = 0; /* Currently no users */
+ dev_priv->suspended = false; /* And not suspended */
+ spin_lock_init(&power_ctrl_lock);
+ mutex_init(&power_mutex);
+
+ dev_priv->ops->init_pm(dev);
+}
+
+/**
+ * gma_power_uninit - end power manager
+ * @dev: device to end for
+ *
+ * Undo the effects of gma_power_init
+ */
+void gma_power_uninit(struct drm_device *dev)
+{
+ pm_runtime_disable(&dev->pdev->dev);
+ pm_runtime_set_suspended(&dev->pdev->dev);
+}
+
+/**
+ * gma_suspend_display - suspend the display logic
+ * @dev: our DRM device
+ *
+ * Suspend the display logic of the graphics interface
+ */
+static void gma_suspend_display(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->suspended)
+ return;
+ dev_priv->ops->save_regs(dev);
+ dev_priv->ops->power_down(dev);
+ dev_priv->display_power = false;
+}
+
+/**
+ * gma_resume_display - resume display side logic
+ *
+ * Resume the display hardware restoring state and enabling
+ * as necessary.
+ */
+static void gma_resume_display(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->suspended == false)
+ return;
+
+ /* turn on the display power island */
+ dev_priv->ops->power_up(dev);
+ dev_priv->suspended = false;
+ dev_priv->display_power = true;
+
+ PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+ dev_priv->ops->restore_regs(dev);
+}
+
+/**
+ * gma_suspend_pci - suspend PCI side
+ * @pdev: PCI device
+ *
+ * Perform the suspend processing on our PCI device state
+ */
+static void gma_suspend_pci(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int bsm, vbt;
+
+ if (dev_priv->suspended)
+ return;
+
+ pci_save_state(pdev);
+ pci_read_config_dword(pdev, 0x5C, &bsm);
+ dev_priv->saveBSM = bsm;
+ pci_read_config_dword(pdev, 0xFC, &vbt);
+ dev_priv->saveVBT = vbt;
+ pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+ pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ dev_priv->suspended = true;
+}
+
+/**
+ * gma_resume_pci - resume helper
+ * @dev: our PCI device
+ *
+ * Perform the resume processing on our PCI device state - rewrite
+ * register state and re-enable the PCI device
+ */
+static bool gma_resume_pci(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->suspended)
+ return true;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+ pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+ /* restoring MSI address and data in PCIx space */
+ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+ pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+ ret = pci_enable_device(pdev);
+
+ if (ret != 0)
+ dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+ else
+ dev_priv->suspended = false;
+ return !dev_priv->suspended;
+}
+
+/**
+ * gma_power_suspend - bus callback for suspend
+ * @pdev: our PCI device
+ * @state: suspend type
+ *
+ * Called back by the PCI layer during a suspend of the system. We
+ * perform the necessary shut down steps and save enough state that
+ * we can undo this when resume is called.
+ */
+int gma_power_suspend(struct device *_dev)
+{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&power_mutex);
+ if (!dev_priv->suspended) {
+ if (dev_priv->display_count) {
+ mutex_unlock(&power_mutex);
+ return -EBUSY;
+ }
+ psb_irq_uninstall(dev);
+ gma_suspend_display(dev);
+ gma_suspend_pci(pdev);
+ }
+ mutex_unlock(&power_mutex);
+ return 0;
+}
+
+/**
+ * gma_power_resume - resume power
+ * @pdev: PCI device
+ *
+ * Resume the PCI side of the graphics and then the displays
+ */
+int gma_power_resume(struct device *_dev)
+{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ mutex_lock(&power_mutex);
+ gma_resume_pci(pdev);
+ gma_resume_display(pdev);
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ mutex_unlock(&power_mutex);
+ return 0;
+}
+
+/**
+ * gma_power_is_on - returne true if power is on
+ * @dev: our DRM device
+ *
+ * Returns true if the display island power is on at this moment
+ */
+bool gma_power_is_on(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->display_power;
+}
+
+/**
+ * gma_power_begin - begin requiring power
+ * @dev: our DRM device
+ * @force_on: true to force power on
+ *
+ * Begin an action that requires the display power island is enabled.
+ * We refcount the islands.
+ */
+bool gma_power_begin(struct drm_device *dev, bool force_on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_ctrl_lock, flags);
+ /* Power already on ? */
+ if (dev_priv->display_power) {
+ dev_priv->display_count++;
+ pm_runtime_get(&dev->pdev->dev);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return true;
+ }
+ if (force_on == false)
+ goto out_false;
+
+ /* Ok power up needed */
+ ret = gma_resume_pci(dev->pdev);
+ if (ret == 0) {
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ pm_runtime_get(&dev->pdev->dev);
+ dev_priv->display_count++;
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return true;
+ }
+out_false:
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return false;
+}
+
+/**
+ * gma_power_end - end use of power
+ * @dev: Our DRM device
+ *
+ * Indicate that one of our gma_power_begin() requested periods when
+ * the diplay island power is needed has completed.
+ */
+void gma_power_end(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ spin_lock_irqsave(&power_ctrl_lock, flags);
+ dev_priv->display_count--;
+ WARN_ON(dev_priv->display_count < 0);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ pm_runtime_put(&dev->pdev->dev);
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+ return gma_power_suspend(dev);
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+ return gma_power_resume(dev);;
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+ struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_psb_private *dev_priv = drmdev->dev_private;
+ if (dev_priv->display_count)
+ return 0;
+ else
+ return 1;
+}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
new file mode 100644
index 000000000000..1969d2ecb328
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ * Alan Cox <alan@linux.intel.com>
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+
+void gma_power_init(struct drm_device *dev);
+void gma_power_uninit(struct drm_device *dev);
+
+/*
+ * The kernel bus power management will call these functions
+ */
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool gma_power_begin(struct drm_device *dev, bool force);
+void gma_power_end(struct drm_device *dev);
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the mutex is already held such
+ * as in irq install/uninstall and you need to
+ * prevent a deadlock situation. Otherwise use gma_power_begin().
+ */
+bool gma_power_is_on(struct drm_device *dev);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644
index 000000000000..e5f5906172b0
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -0,0 +1,328 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static int psb_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+ psb_intel_sdvo_init(dev, SDVOB);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+ }
+ return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(psb_backlight_device);
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ psb_intel_lvds_set_brightness(dev, level);
+ psb_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+ .get_brightness = psb_get_brightness,
+ .update_status = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ psb_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &psb_ops, &props);
+ if (IS_ERR(psb_backlight_device))
+ return PTR_ERR(psb_backlight_device);
+
+ ret = psb_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(psb_backlight_device);
+ psb_backlight_device = NULL;
+ return ret;
+ }
+ psb_backlight_device->props.brightness = 100;
+ psb_backlight_device->props.max_brightness = 100;
+ backlight_update_status(psb_backlight_device);
+ dev_priv->backlight_device = psb_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Poulsbo specific chip logic and low level methods
+ * for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+ gating &= ~3; /* Disable 2D clock gating */
+ gating |= 1;
+ PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+ PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ * psb_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ /* Display arbitration control + watermarks */
+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Save crtc and output state */
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->save(crtc);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->save(connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+}
+
+/**
+ * psb_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /*make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->restore(crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->restore(connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+static void psb_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+ psb_get_core_freq(dev);
+ gma_intel_setup_gmbus(dev);
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+static void psb_chip_teardown(struct drm_device *dev)
+{
+ gma_intel_teardown_gmbus(dev);
+}
+
+const struct psb_ops psb_chip_ops = {
+ .name = "Poulsbo",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = PSB_SGX_OFFSET,
+ .chip_setup = psb_chip_setup,
+ .chip_teardown = psb_chip_teardown,
+
+ .crtc_helper = &psb_intel_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = psb_backlight_init,
+#endif
+
+ .init_pm = psb_init_pm,
+ .save_regs = psb_save_display_registers,
+ .restore_regs = psb_restore_display_registers,
+ .power_down = psb_power_down,
+ .power_up = psb_power_up,
+};
+
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644
index 000000000000..f14768f2b364
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -0,0 +1,703 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static int drm_psb_trap_pagefaults;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+ { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ /* Atom E620 */
+ { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
+ { 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_ADB \
+ DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION \
+ DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
+ struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY \
+ DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+ struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_GAMMA \
+ DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
+ struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_PSB_DPST_BL \
+ DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
+ uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
+ DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+ struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_PSB_GEM_CREATE \
+ DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_create)
+#define DRM_IOCTL_PSB_GEM_MMAP \
+ DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_mmap)
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+ DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+ DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+ psb_intel_get_pipe_from_crtc_id, 0),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static void psb_lastclose(struct drm_device *dev)
+{
+ return;
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ uint32_t stolen_gtt;
+
+ int ret = -ENOMEM;
+
+ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+ dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+
+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ stolen_gtt =
+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+ (stolen_gtt << PAGE_SHIFT) * 1024;
+
+ if (1 || drm_debug) {
+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+ _PSB_CC_REVISION_MAJOR_SHIFT,
+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+ _PSB_CC_REVISION_MINOR_SHIFT);
+ DRM_INFO
+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+ _PSB_CC_REVISION_DESIGNER_SHIFT);
+ }
+
+
+ spin_lock_init(&dev_priv->irqmask_lock);
+ spin_lock_init(&dev_priv->lock_2d);
+
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+ PSB_RSGX32(PSB_CR_BIF_BANK1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+ PSB_CR_BIF_CTRL);
+ psb_spank(dev_priv);
+
+ /* mmu_gatt ?? */
+ PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+ return 0;
+out_err:
+ psb_do_takedown(dev);
+ return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* Kill vblank etc here */
+
+ gma_backlight_exit(dev);
+
+ psb_modeset_cleanup(dev);
+
+ if (dev_priv) {
+ psb_lid_timer_takedown(dev_priv);
+ gma_intel_opregion_exit(dev);
+
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
+ psb_do_takedown(dev);
+
+
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+
+ /*destroy VBT data*/
+ psb_intel_destroy_bios(dev);
+ }
+
+ gma_power_uninit(dev);
+
+ return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct drm_psb_private *dev_priv;
+ unsigned long resource_start;
+ struct psb_gtt *pg;
+ unsigned long irqflags;
+ int ret = -ENOMEM;
+ uint32_t tt_pages;
+ struct drm_connector *connector;
+ struct psb_intel_encoder *psb_intel_encoder;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev_priv->ops = (struct psb_ops *)chipset;
+ dev_priv->dev = dev;
+ dev->dev_private = (void *) dev_priv;
+
+ if (!IS_PSB(dev)) {
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ }
+
+ dev_priv->num_pipe = dev_priv->ops->pipes;
+
+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+ dev_priv->vdc_reg =
+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+ if (!dev_priv->vdc_reg)
+ goto out_err;
+
+ dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+ PSB_SGX_SIZE);
+ if (!dev_priv->sgx_reg)
+ goto out_err;
+
+ ret = dev_priv->ops->chip_setup(dev);
+ if (ret)
+ goto out_err;
+
+ /* Init OSPM support */
+ gma_power_init(dev);
+
+ ret = -ENOMEM;
+
+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+ if (!dev_priv->scratch_page)
+ goto out_err;
+
+ set_pages_uc(dev_priv->scratch_page, 1);
+
+ ret = psb_gtt_init(dev, 0);
+ if (ret)
+ goto out_err;
+
+ dev_priv->mmu = psb_mmu_driver_init((void *)0,
+ drm_psb_trap_pagefaults, 0,
+ dev_priv);
+ if (!dev_priv->mmu)
+ goto out_err;
+
+ pg = &dev_priv->gtt;
+
+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+
+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+ if (!dev_priv->pf_pd)
+ goto out_err;
+
+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+ ret = psb_do_init(dev);
+ if (ret)
+ return ret;
+
+ PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+ PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+/* igd_opregion_init(&dev_priv->opregion_dev); */
+ acpi_video_register();
+ if (dev_priv->lid_state)
+ psb_lid_timer_init(dev_priv);
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
+ if (ret)
+ goto out_err;
+
+ /*
+ * Install interrupt handlers prior to powering off SGX or else we will
+ * crash.
+ */
+ dev_priv->vdc_irq_mask = 0;
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+ dev_priv->pipestat[2] = 0;
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_install(dev);
+
+ dev->vblank_disable_allowed = 1;
+
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+ dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+ psb_modeset_init(dev);
+ psb_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ /* Only add backlight support if we have LVDS output */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ case INTEL_OUTPUT_MIPI:
+ ret = gma_backlight_init(dev);
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+#if 0
+ /*enable runtime pm at last*/
+ pm_runtime_enable(&dev->pdev->dev);
+ pm_runtime_set_active(&dev->pdev->dev);
+#endif
+ /*Intel drm driver load is done, continue doing pvr load*/
+ return 0;
+out_err:
+ psb_driver_unload(dev);
+ return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ if (bd) {
+ bd->props.brightness = bd->ops->get_brightness(bd);
+ backlight_update_status(bd);
+ }
+#endif
+}
+
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ uint32_t *arg = data;
+
+ dev_priv->blc_adj2 = *arg;
+ get_brightness(dev_priv->backlight_device);
+ return 0;
+}
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ uint32_t *arg = data;
+
+ dev_priv->blc_adj1 = *arg;
+ get_brightness(dev_priv->backlight_device);
+ return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_dpst_lut_arg *lut_arg = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct psb_intel_crtc *psb_intel_crtc;
+ int i = 0;
+ int32_t obj_id;
+
+ obj_id = lut_arg->output_id;
+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ dev_dbg(dev->dev, "Invalid Connector object.\n");
+ return -EINVAL;
+ }
+
+ connector = obj_to_connector(obj);
+ crtc = connector->encoder->crtc;
+ psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ for (i = 0; i < 256; i++)
+ psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+ psb_intel_crtc_load_lut(crtc);
+
+ return 0;
+}
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ uint32_t obj_id;
+ uint16_t op;
+ struct drm_mode_modeinfo *umode;
+ struct drm_display_mode *mode = NULL;
+ struct drm_psb_mode_operation_arg *arg;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ int ret = 0;
+ int resp = MODE_OK;
+
+ arg = (struct drm_psb_mode_operation_arg *)data;
+ obj_id = arg->obj_id;
+ op = arg->operation;
+
+ switch (op) {
+ case PSB_MODE_OPERATION_MODE_VALID:
+ umode = &arg->mode;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, obj_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto mode_op_out;
+ }
+
+ connector = obj_to_connector(obj);
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto mode_op_out;
+ }
+
+ /* drm_crtc_convert_umode(mode, umode); */
+ {
+ mode->clock = umode->clock;
+ mode->hdisplay = umode->hdisplay;
+ mode->hsync_start = umode->hsync_start;
+ mode->hsync_end = umode->hsync_end;
+ mode->htotal = umode->htotal;
+ mode->hskew = umode->hskew;
+ mode->vdisplay = umode->vdisplay;
+ mode->vsync_start = umode->vsync_start;
+ mode->vsync_end = umode->vsync_end;
+ mode->vtotal = umode->vtotal;
+ mode->vscan = umode->vscan;
+ mode->vrefresh = umode->vrefresh;
+ mode->flags = umode->flags;
+ mode->type = umode->type;
+ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ }
+
+ connector_funcs = (struct drm_connector_helper_funcs *)
+ connector->helper_private;
+
+ if (connector_funcs->mode_valid) {
+ resp = connector_funcs->mode_valid(connector, mode);
+ arg->data = resp;
+ }
+
+ /*do some clean up work*/
+ if (mode)
+ drm_mode_destroy(dev, mode);
+mode_op_out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+
+ default:
+ dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ struct drm_psb_stolen_memory_arg *arg = data;
+
+ arg->base = dev_priv->stolen_base;
+ arg->size = dev_priv->vram_stolen_size;
+
+ return 0;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+ return 0;
+}
+
+static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ static unsigned int runtime_allowed;
+
+ if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+ runtime_allowed++;
+ pm_runtime_allow(&dev->pdev->dev);
+ dev_priv->rpm_enabled = 1;
+ }
+ return drm_ioctl(filp, cmd, arg);
+ /* FIXME: do we need to wrap the other side of this */
+}
+
+
+/* When a client dies:
+ * - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+ .resume = gma_power_resume,
+ .suspend = gma_power_suspend,
+ .runtime_suspend = psb_runtime_suspend,
+ .runtime_resume = psb_runtime_resume,
+ .runtime_idle = psb_runtime_idle,
+};
+
+static struct vm_operations_struct psb_gem_vm_ops = {
+ .fault = psb_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = psb_unlocked_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+ DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+ .load = psb_driver_load,
+ .unload = psb_driver_unload,
+
+ .ioctls = psb_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+ .device_is_agp = psb_driver_device_is_agp,
+ .irq_preinstall = psb_irq_preinstall,
+ .irq_postinstall = psb_irq_postinstall,
+ .irq_uninstall = psb_irq_uninstall,
+ .irq_handler = psb_irq_handler,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
+ .lastclose = psb_lastclose,
+ .open = psb_driver_open,
+ .preclose = psb_driver_preclose,
+ .postclose = psb_driver_close,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+
+ .gem_init_object = psb_gem_init_object,
+ .gem_free_object = psb_gem_free_object,
+ .gem_vm_ops = &psb_gem_vm_ops,
+ .dumb_create = psb_gem_dumb_create,
+ .dumb_map_offset = psb_gem_dumb_map_gtt,
+ .dumb_destroy = psb_gem_dumb_destroy,
+ .fops = &psb_gem_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = PSB_DRM_DRIVER_DATE,
+ .major = PSB_DRM_DRIVER_MAJOR,
+ .minor = PSB_DRM_DRIVER_MINOR,
+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = psb_probe,
+ .remove = psb_remove,
+ .driver.pm = &psb_pm_ops,
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+ return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+ drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644
index 000000000000..eb1568a0da95
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -0,0 +1,956 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/kref.h>
+
+#include <drm/drmP.h>
+#include "drm_global.h"
+#include "gem_glue.h"
+#include "gma_drm.h"
+#include "psb_reg.h"
+#include "psb_intel_drv.h"
+#include "gtt.h"
+#include "power.h"
+#include "oaktrail.h"
+
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE 2
+
+enum {
+ CHIP_PSB_8108 = 0, /* Poulsbo */
+ CHIP_PSB_8109 = 1, /* Poulsbo */
+ CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
+ CHIP_MFLD_0130 = 3, /* Medfield */
+};
+
+#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+
+/*
+ * Driver definitions
+ */
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500"
+
+#define PSB_DRM_DRIVER_DATE "2011-06-06"
+#define PSB_DRM_DRIVER_MAJOR 1
+#define PSB_DRM_DRIVER_MINOR 0
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+/*
+ * Hardware offsets
+ */
+#define PSB_VDC_OFFSET 0x00000000
+#define PSB_VDC_SIZE 0x000080000
+#define MRST_MMIO_SIZE 0x0000C0000
+#define MDFLD_MMIO_SIZE 0x000100000
+#define PSB_SGX_SIZE 0x8000
+#define PSB_SGX_OFFSET 0x00040000
+#define MRST_SGX_OFFSET 0x00080000
+/*
+ * PCI resource identifiers
+ */
+#define PSB_MMIO_RESOURCE 0
+#define PSB_GATT_RESOURCE 2
+#define PSB_GTT_RESOURCE 3
+/*
+ * PCI configuration
+ */
+#define PSB_GMCH_CTRL 0x52
+#define PSB_BSM 0x5C
+#define _PSB_GMCH_ENABLED 0x4
+#define PSB_PGETBL_CTL 0x2020
+#define _PSB_PGETBL_ENABLED 0x00000001
+#define PSB_SGX_2D_SLAVE_PORT 0x4000
+
+/* To get rid of */
+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+
+/*
+ * SGX side MMU definitions (these can probably go)
+ */
+
+/*
+ * Flags for external memory type field.
+ */
+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
+/*
+ * PTE's and PDE's
+ */
+#define PSB_PDE_MASK 0x003FFFFF
+#define PSB_PDE_SHIFT 22
+#define PSB_PTE_SHIFT 12
+/*
+ * Cache control
+ */
+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
+#define PSB_PTE_WO 0x0002 /* Write only */
+#define PSB_PTE_RO 0x0004 /* Read only */
+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
+
+/*
+ * VDC registers and bits
+ */
+#define PSB_MSVDX_CLOCKGATING 0x2064
+#define PSB_TOPAZ_CLOCKGATING 0x2068
+#define PSB_HWSTAM 0x2098
+#define PSB_INSTPM 0x20C0
+#define PSB_INT_IDENTITY_R 0x20A4
+#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
+#define _PSB_DPST_PIPEB_FLAG (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
+#define _PSB_DPST_PIPEA_FLAG (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
+#define _MDFLD_MIPIA_FLAG (1<<16)
+#define _MDFLD_MIPIC_FLAG (1<<17)
+#define _PSB_IRQ_SGX_FLAG (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
+
+#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
+ _PSB_VSYNC_PIPEB_FLAG)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+ _MDFLD_PIPEB_EVENT_FLAG | \
+ _PSB_PIPEA_EVENT_FLAG | \
+ _PSB_VSYNC_PIPEA_FLAG | \
+ _MDFLD_MIPIA_FLAG | \
+ _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R 0x20A4
+#define PSB_INT_MASK_R 0x20A8
+#define PSB_INT_ENABLE_R 0x20A0
+
+#define _PSB_MMU_ER_MASK 0x0001FF00
+#define _PSB_MMU_ER_HOST (1 << 16)
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define VCLK_DIVISOR_VGA0 0x6000
+#define VCLK_DIVISOR_VGA1 0x6004
+#define VCLK_POST_DIV 0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST 1
+#define PSB_UIRQ_OOM_REPLY 2
+#define PSB_UIRQ_FIRE_TA_REPLY 3
+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0 (1 << 0)
+#define MDFLD_DSR_2D_3D_2 (1 << 1)
+#define MDFLD_DSR_CURSOR_0 (1 << 2)
+#define MDFLD_DSR_CURSOR_2 (1 << 3)
+#define MDFLD_DSR_OVERLAY_0 (1 << 4)
+#define MDFLD_DSR_OVERLAY_2 (1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR 45
+#define MDFLD_DPU_ENABLE (1 << 31)
+#define MDFLD_DSR_FULLSCREEN (1 << 30)
+#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON 1
+#define PSB_PWR_STATE_OFF 2
+
+#define PSB_PMPOLICY_NOPM 0
+#define PSB_PMPOLICY_CLOCKGATING 1
+#define PSB_PMPOLICY_POWERDOWN 2
+
+#define PSB_PMSTATE_POWERUP 0
+#define PSB_PMSTATE_CLOCKGATED 1
+#define PSB_PMSTATE_POWERDOWN 2
+#define PSB_PCIx_MSI_ADDR_LOC 0x94
+#define PSB_PCIx_MSI_DATA_LOC 0x98
+
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct psb_intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+ int enabled;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 i2c_speed;
+ u8 ddc_pin;
+};
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+};
+
+struct psb_ops;
+
+#define PSB_NUM_PIPE 3
+
+struct drm_psb_private {
+ struct drm_device *dev;
+ const struct psb_ops *ops;
+
+ struct psb_gtt gtt;
+
+ /* GTT Memory manager */
+ struct psb_gtt_mm *gtt_mm;
+ struct page *scratch_page;
+ u32 *gtt_map;
+ uint32_t stolen_base;
+ void *vram_addr;
+ unsigned long vram_stolen_size;
+ int gtt_initialized;
+ u16 gmch_ctrl; /* Saved GTT setup */
+ u32 pge_ctl;
+
+ struct mutex gtt_mutex;
+ struct resource *gtt_mem; /* Our PCI resource */
+
+ struct psb_mmu_driver *mmu;
+ struct psb_mmu_pd *pf_pd;
+
+ /*
+ * Register base
+ */
+
+ uint8_t *sgx_reg;
+ uint8_t *vdc_reg;
+ uint32_t gatt_free_offset;
+
+ /*
+ * Fencing / irq.
+ */
+
+ uint32_t vdc_irq_mask;
+ uint32_t pipestat[PSB_NUM_PIPE];
+
+ spinlock_t irqmask_lock;
+
+ /*
+ * Power
+ */
+
+ bool suspended;
+ bool display_power;
+ int display_count;
+
+ /*
+ * Modesetting
+ */
+ struct psb_intel_mode_device mode_dev;
+
+ struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+ struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+ uint32_t num_pipe;
+
+ /*
+ * OSPM info (Power management base) (can go ?)
+ */
+ uint32_t ospm_base;
+
+ /*
+ * Sizes info
+ */
+
+ u32 fuse_reg_value;
+ u32 video_device_fuse;
+
+ /* PCI revision ID for B0:D2:F0 */
+ uint8_t platform_rev_id;
+
+ /* gmbus */
+ struct intel_gmbus *gmbus;
+
+ /* Used by SDVO */
+ int crt_ddc_pin;
+ /* FIXME: The mappings should be parsed from bios but for now we can
+ pretend there are no mappings available */
+ struct sdvo_device_mapping sdvo_mappings[2];
+ u32 hotplug_supported_mask;
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
+ /*
+ * LVDS info
+ */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *lfp_lvds_vbt_mode;
+ struct drm_display_mode *sdvo_lvds_vbt_mode;
+
+ struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+ struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ int lvds_ssc_freq;
+ bool is_lvds_on;
+ bool is_mipi_on;
+ u32 mipi_ctrl_display;
+
+ unsigned int core_freq;
+ uint32_t iLVDS_enable;
+
+ /* Runtime PM state */
+ int rpm_enabled;
+
+ /* MID specific */
+ struct oaktrail_vbt vbt_data;
+ struct oaktrail_gct_data gct_data;
+
+ /* MIPI Panel type etc */
+ int panel_id;
+ bool dual_mipi; /* dual display - DPI & DBI */
+ bool dpi_panel_on; /* The DPI panel power is on */
+ bool dpi_panel_on2; /* The DPI panel power is on */
+ bool dbi_panel_on; /* The DBI panel power is on */
+ bool dbi_panel_on2; /* The DBI panel power is on */
+ u32 dsr_fb_update; /* DSR FB update counter */
+
+ /* Moorestown HDMI state */
+ struct oaktrail_hdmi_dev *hdmi_priv;
+
+ /* Moorestown pipe config register value cache */
+ uint32_t pipeconf;
+ uint32_t pipeconf1;
+ uint32_t pipeconf2;
+
+ /* Moorestown plane control register value cache */
+ uint32_t dspcntr;
+ uint32_t dspcntr1;
+ uint32_t dspcntr2;
+
+ /* Moorestown MM backlight cache */
+ uint8_t saveBKLTCNT;
+ uint8_t saveBKLTREQ;
+ uint8_t saveBKLTBRTL;
+
+ /*
+ * Register state
+ */
+ uint32_t saveDSPACNTR;
+ uint32_t saveDSPBCNTR;
+ uint32_t savePIPEACONF;
+ uint32_t savePIPEBCONF;
+ uint32_t savePIPEASRC;
+ uint32_t savePIPEBSRC;
+ uint32_t saveFPA0;
+ uint32_t saveFPA1;
+ uint32_t saveDPLL_A;
+ uint32_t saveDPLL_A_MD;
+ uint32_t saveHTOTAL_A;
+ uint32_t saveHBLANK_A;
+ uint32_t saveHSYNC_A;
+ uint32_t saveVTOTAL_A;
+ uint32_t saveVBLANK_A;
+ uint32_t saveVSYNC_A;
+ uint32_t saveDSPASTRIDE;
+ uint32_t saveDSPASIZE;
+ uint32_t saveDSPAPOS;
+ uint32_t saveDSPABASE;
+ uint32_t saveDSPASURF;
+ uint32_t saveDSPASTATUS;
+ uint32_t saveFPB0;
+ uint32_t saveFPB1;
+ uint32_t saveDPLL_B;
+ uint32_t saveDPLL_B_MD;
+ uint32_t saveHTOTAL_B;
+ uint32_t saveHBLANK_B;
+ uint32_t saveHSYNC_B;
+ uint32_t saveVTOTAL_B;
+ uint32_t saveVBLANK_B;
+ uint32_t saveVSYNC_B;
+ uint32_t saveDSPBSTRIDE;
+ uint32_t saveDSPBSIZE;
+ uint32_t saveDSPBPOS;
+ uint32_t saveDSPBBASE;
+ uint32_t saveDSPBSURF;
+ uint32_t saveDSPBSTATUS;
+ uint32_t saveVCLK_DIVISOR_VGA0;
+ uint32_t saveVCLK_DIVISOR_VGA1;
+ uint32_t saveVCLK_POST_DIV;
+ uint32_t saveVGACNTRL;
+ uint32_t saveADPA;
+ uint32_t saveLVDS;
+ uint32_t saveDVOA;
+ uint32_t saveDVOB;
+ uint32_t saveDVOC;
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePaletteA[256];
+ uint32_t savePaletteB[256];
+ uint32_t saveBLC_PWM_CTL2;
+ uint32_t saveBLC_PWM_CTL;
+ uint32_t saveCLOCKGATING;
+ uint32_t saveDSPARB;
+ uint32_t saveDSPATILEOFF;
+ uint32_t saveDSPBTILEOFF;
+ uint32_t saveDSPAADDR;
+ uint32_t saveDSPBADDR;
+ uint32_t savePFIT_AUTO_RATIOS;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t savePP_ON_DELAYS;
+ uint32_t savePP_OFF_DELAYS;
+ uint32_t savePP_DIVISOR;
+ uint32_t saveBSM;
+ uint32_t saveVBT;
+ uint32_t saveBCLRPAT_A;
+ uint32_t saveBCLRPAT_B;
+ uint32_t saveDSPALINOFF;
+ uint32_t saveDSPBLINOFF;
+ uint32_t savePERF_MODE;
+ uint32_t saveDSPFW1;
+ uint32_t saveDSPFW2;
+ uint32_t saveDSPFW3;
+ uint32_t saveDSPFW4;
+ uint32_t saveDSPFW5;
+ uint32_t saveDSPFW6;
+ uint32_t saveCHICKENBIT;
+ uint32_t saveDSPACURSOR_CTRL;
+ uint32_t saveDSPBCURSOR_CTRL;
+ uint32_t saveDSPACURSOR_BASE;
+ uint32_t saveDSPBCURSOR_BASE;
+ uint32_t saveDSPACURSOR_POS;
+ uint32_t saveDSPBCURSOR_POS;
+ uint32_t save_palette_a[256];
+ uint32_t save_palette_b[256];
+ uint32_t saveOV_OVADD;
+ uint32_t saveOV_OGAMC0;
+ uint32_t saveOV_OGAMC1;
+ uint32_t saveOV_OGAMC2;
+ uint32_t saveOV_OGAMC3;
+ uint32_t saveOV_OGAMC4;
+ uint32_t saveOV_OGAMC5;
+ uint32_t saveOVC_OVADD;
+ uint32_t saveOVC_OGAMC0;
+ uint32_t saveOVC_OGAMC1;
+ uint32_t saveOVC_OGAMC2;
+ uint32_t saveOVC_OGAMC3;
+ uint32_t saveOVC_OGAMC4;
+ uint32_t saveOVC_OGAMC5;
+
+ /* MSI reg save */
+ uint32_t msi_addr;
+ uint32_t msi_data;
+
+ /* Medfield specific register save state */
+ uint32_t saveHDMIPHYMISCCTL;
+ uint32_t saveHDMIB_CONTROL;
+ uint32_t saveDSPCCNTR;
+ uint32_t savePIPECCONF;
+ uint32_t savePIPECSRC;
+ uint32_t saveHTOTAL_C;
+ uint32_t saveHBLANK_C;
+ uint32_t saveHSYNC_C;
+ uint32_t saveVTOTAL_C;
+ uint32_t saveVBLANK_C;
+ uint32_t saveVSYNC_C;
+ uint32_t saveDSPCSTRIDE;
+ uint32_t saveDSPCSIZE;
+ uint32_t saveDSPCPOS;
+ uint32_t saveDSPCSURF;
+ uint32_t saveDSPCSTATUS;
+ uint32_t saveDSPCLINOFF;
+ uint32_t saveDSPCTILEOFF;
+ uint32_t saveDSPCCURSOR_CTRL;
+ uint32_t saveDSPCCURSOR_BASE;
+ uint32_t saveDSPCCURSOR_POS;
+ uint32_t save_palette_c[256];
+ uint32_t saveOV_OVADD_C;
+ uint32_t saveOV_OGAMC0_C;
+ uint32_t saveOV_OGAMC1_C;
+ uint32_t saveOV_OGAMC2_C;
+ uint32_t saveOV_OGAMC3_C;
+ uint32_t saveOV_OGAMC4_C;
+ uint32_t saveOV_OGAMC5_C;
+
+ /* DSI register save */
+ uint32_t saveDEVICE_READY_REG;
+ uint32_t saveINTR_EN_REG;
+ uint32_t saveDSI_FUNC_PRG_REG;
+ uint32_t saveHS_TX_TIMEOUT_REG;
+ uint32_t saveLP_RX_TIMEOUT_REG;
+ uint32_t saveTURN_AROUND_TIMEOUT_REG;
+ uint32_t saveDEVICE_RESET_REG;
+ uint32_t saveDPI_RESOLUTION_REG;
+ uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+ uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+ uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+ uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+ uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+ uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+ uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+ uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+ uint32_t saveINIT_COUNT_REG;
+ uint32_t saveMAX_RET_PAK_REG;
+ uint32_t saveVIDEO_FMT_REG;
+ uint32_t saveEOT_DISABLE_REG;
+ uint32_t saveLP_BYTECLK_REG;
+ uint32_t saveHS_LS_DBI_ENABLE_REG;
+ uint32_t saveTXCLKESC_REG;
+ uint32_t saveDPHY_PARAM_REG;
+ uint32_t saveMIPI_CONTROL_REG;
+ uint32_t saveMIPI;
+ uint32_t saveMIPI_C;
+
+ /* DPST register save */
+ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+ uint32_t savePWM_CONTROL_LOGIC;
+
+ /*
+ * DSI info.
+ */
+ void * dbi_dsr_info;
+ void * dbi_dpu_info;
+ void * dsi_configs[2];
+ /*
+ * LID-Switch
+ */
+ spinlock_t lid_lock;
+ struct timer_list lid_timer;
+ struct psb_intel_opregion opregion;
+ u32 *lid_state;
+ u32 lid_last_state;
+
+ /*
+ * Watchdog
+ */
+
+ uint32_t apm_reg;
+ uint16_t apm_base;
+
+ /*
+ * Used for modifying backlight from
+ * xrandr -- consider removing and using HAL instead
+ */
+ struct backlight_device *backlight_device;
+ struct drm_property *backlight_property;
+ uint32_t blc_adj1;
+ uint32_t blc_adj2;
+
+ void *fbdev;
+
+ /* 2D acceleration */
+ spinlock_t lock_2d;
+};
+
+
+/*
+ * Operations for each board type
+ */
+
+struct psb_ops {
+ const char *name;
+ unsigned int accel_2d:1;
+ int pipes; /* Number of output pipes */
+ int crtcs; /* Number of CRTCs */
+ int sgx_offset; /* Base offset of SGX device */
+
+ /* Sub functions */
+ struct drm_crtc_helper_funcs const *crtc_helper;
+ struct drm_crtc_funcs const *crtc_funcs;
+
+ /* Setup hooks */
+ int (*chip_setup)(struct drm_device *dev);
+ void (*chip_teardown)(struct drm_device *dev);
+
+ /* Display management hooks */
+ int (*output_init)(struct drm_device *dev);
+ /* Power management hooks */
+ void (*init_pm)(struct drm_device *dev);
+ int (*save_regs)(struct drm_device *dev);
+ int (*restore_regs)(struct drm_device *dev);
+ int (*power_up)(struct drm_device *dev);
+ int (*power_down)(struct drm_device *dev);
+
+ void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ /* Backlight */
+ int (*backlight_init)(struct drm_device *dev);
+#endif
+ int i2c_bus; /* I2C bus identifier for Moorestown */
+};
+
+
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+ return (struct drm_psb_private *) dev->dev_private;
+}
+
+/*
+ * MMU stuff.
+ */
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+ int trap_pagefaults,
+ int invalid_type,
+ struct drm_psb_private *dev_priv);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+ *driver);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+ uint32_t gtt_start, uint32_t gtt_pages);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults,
+ int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address,
+ uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+ uint32_t start_pfn,
+ unsigned long address,
+ uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn);
+
+/*
+ * Enable / disable MMU for different requestors.
+ */
+
+
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride);
+/*
+ *psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+
+/*
+ * intel_opregion.c
+ */
+extern int gma_intel_opregion_init(struct drm_device *dev);
+extern int gma_intel_opregion_exit(struct drm_device *dev);
+
+/*
+ * framebuffer.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+ struct drm_framebuffer *fb);
+/*
+ * accel_2d.c
+ */
+extern void psbfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region);
+extern int psbfb_sync(struct fb_info *info);
+extern void psb_spank(struct drm_psb_private *dev_priv);
+
+/*
+ * psb_reset.c
+ */
+
+extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+extern int psb_fbdev_init(struct drm_device *dev);
+
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
+
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
+
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
+/* psb_intel_lvds.c */
+extern const struct drm_connector_helper_funcs
+ psb_intel_lvds_connector_helper_funcs;
+extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+
+/* gem.c */
+extern int psb_gem_init_object(struct drm_gem_object *obj);
+extern void psb_gem_free_object(struct drm_gem_object *obj);
+extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
+
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
+
+/*
+ * Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT (1 << 1)
+#define PSB_D_IRQ (1 << 2)
+#define PSB_D_ENTRY (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV (1 << 4)
+#define PSB_D_DBI_BF (1 << 5)
+#define PSB_D_PM (1 << 6)
+#define PSB_D_RENDER (1 << 7)
+#define PSB_D_REG (1 << 8)
+#define PSB_D_MSVDX (1 << 9)
+#define PSB_D_TOPAZ (1 << 10)
+
+extern int drm_psb_no_fb;
+extern int drm_idle_check_interval;
+
+/*
+ * Utilities
+ */
+
+static inline u32 MRST_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return ioread32(dev_priv->vdc_reg + reg);
+}
+
+#define REG_READ(reg) REGISTER_READ(dev, (reg))
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+ uint32_t reg, uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+ uint32_t reg, uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs) \
+({ \
+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
+ printk(KERN_ERR \
+ "access sgx when it's off!! (READ) %s, %d\n", \
+ __FILE__, __LINE__); \
+ melay(1000); \
+ } \
+ ioread32(dev_priv->sgx_reg + (_offs)); \
+})
+#else
+#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
+
+#define MSVDX_REG_DUMP 0
+
+#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644
index 000000000000..49e983508d5c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -0,0 +1,1446 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct psb_intel_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+#define INTEL_P2_NUM 2
+
+struct psb_intel_limit_t {
+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct psb_intel_p2_t p2;
+};
+
+#define I8XX_DOT_MIN 25000
+#define I8XX_DOT_MAX 350000
+#define I8XX_VCO_MIN 930000
+#define I8XX_VCO_MAX 1400000
+#define I8XX_N_MIN 3
+#define I8XX_N_MAX 16
+#define I8XX_M_MIN 96
+#define I8XX_M_MAX 140
+#define I8XX_M1_MIN 18
+#define I8XX_M1_MAX 26
+#define I8XX_M2_MIN 6
+#define I8XX_M2_MAX 16
+#define I8XX_P_MIN 4
+#define I8XX_P_MAX 128
+#define I8XX_P1_MIN 2
+#define I8XX_P1_MAX 33
+#define I8XX_P1_LVDS_MIN 1
+#define I8XX_P1_LVDS_MAX 6
+#define I8XX_P2_SLOW 4
+#define I8XX_P2_FAST 2
+#define I8XX_P2_LVDS_SLOW 14
+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
+#define I8XX_P2_SLOW_LIMIT 165000
+
+#define I9XX_DOT_MIN 20000
+#define I9XX_DOT_MAX 400000
+#define I9XX_VCO_MIN 1400000
+#define I9XX_VCO_MAX 2800000
+#define I9XX_N_MIN 3
+#define I9XX_N_MAX 8
+#define I9XX_M_MIN 70
+#define I9XX_M_MAX 120
+#define I9XX_M1_MIN 10
+#define I9XX_M1_MAX 20
+#define I9XX_M2_MIN 5
+#define I9XX_M2_MAX 9
+#define I9XX_P_SDVO_DAC_MIN 5
+#define I9XX_P_SDVO_DAC_MAX 80
+#define I9XX_P_LVDS_MIN 7
+#define I9XX_P_LVDS_MAX 98
+#define I9XX_P1_MIN 1
+#define I9XX_P1_MAX 8
+#define I9XX_P2_SDVO_DAC_SLOW 10
+#define I9XX_P2_SDVO_DAC_FAST 5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
+#define I9XX_P2_LVDS_SLOW 14
+#define I9XX_P2_LVDS_FAST 7
+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC 0
+#define INTEL_LIMIT_I8XX_LVDS 1
+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
+#define INTEL_LIMIT_I9XX_LVDS 3
+
+static const struct psb_intel_limit_t psb_intel_limits[] = {
+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
+ },
+ { /* INTEL_LIMIT_I8XX_LVDS */
+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
+ },
+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
+ I9XX_P2_SDVO_DAC_FAST},
+ },
+ { /* INTEL_LIMIT_I9XX_LVDS */
+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+ },
+};
+
+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+{
+ const struct psb_intel_limit_t *limit;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ else
+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+static void psb_intel_clock(struct drm_device *dev, int refclk,
+ struct psb_intel_clock_t *clock)
+{
+ return i9xx_clock(refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(l_entry);
+ if (psb_intel_encoder->type == type)
+ return true;
+ }
+ }
+ return false;
+}
+
+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
+ struct psb_intel_clock_t *clock)
+{
+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ INTELPllInvalid("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid("m1 out of range\n");
+ if (clock->m1 <= clock->m2)
+ INTELPllInvalid("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ int refclk,
+ struct psb_intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_clock_t clock;
+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+ int err = target;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
+ clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ psb_intel_clock(dev, refclk, &clock);
+
+ if (!psb_intel_PLL_is_valid
+ (crtc, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
+
+void psb_intel_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_i915_master_private *master_priv; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
+ }
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto psb_intel_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ psb_gtt_unpin(psbfb->gtt);
+ goto psb_intel_pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+
+ if (0 /* FIXMEAC - check what PSB needs */) {
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+ } else {
+ REG_WRITE(dspbase, start + offset);
+ REG_READ(dspbase);
+ }
+
+psb_intel_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_i915_master_private *master_priv; */
+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ psb_intel_wait_for_vblank(dev);
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void psb_intel_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see psb_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ /* Must be on PIPE 1 for PSB */
+ return 1;
+}
+
+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ int pipe = psb_intel_crtc->pipe;
+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk;
+ struct psb_intel_clock_t clock;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ /* No scan out no play */
+ if (crtc->fb == NULL) {
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ return 0;
+ }
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ }
+ }
+
+ refclk = 96000;
+
+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ return 0;
+ }
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_lvds) {
+ dpll |= DPLLB_MODE_LVDS;
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ } else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int sdvo_pixel_multiply =
+ adjusted_mode->clock / mode->clock;
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |=
+ (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 1)) << 16;
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ drm_mode_debug_printmodeline(mode);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ udelay(150);
+ }
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds &= ~LVDS_PIPEB_SELECT;
+ if (pipe == 1)
+ lvds |= LVDS_PIPEB_SELECT;
+
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE(dpll_reg, dpll);
+
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(pipesrc_reg,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ psb_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ /* Flush the plane changes */
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+
+ psb_intel_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->save_palette_a[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void psb_intel_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+ /*NOTE: DSPSIZE DSPPOS only for psb*/
+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private * dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+ }
+
+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+ REG_READ(pipeA ? FPA0 : FPB0);
+
+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+ REG_READ(pipeA ? FPA1 : FPB1);
+
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+
+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+ psb_intel_wait_for_vblank(dev);
+
+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+ psb_intel_wait_for_vblank(dev);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* if we want to turn of the cursor ignore width and height */
+ if (!handle) {
+ /* turn off the cursor */
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* Unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "buffer is to small\n");
+ return -ENOMEM;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ return ret;
+ }
+
+
+ addr = gt->offset; /* Or resource.start ??? */
+
+ psb_intel_crtc->cursor_addr = addr;
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old bo */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = obj;
+ }
+ return 0;
+}
+
+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t addr;
+
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ addr = psb_intel_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t type, uint32_t size)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+}
+
+static int psb_crtc_set_config(struct drm_mode_set *set)
+{
+ int ret;
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+ ret = drm_crtc_helper_set_config(set);
+ pm_runtime_allow(&dev->pdev->dev);
+ return ret;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int psb_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ u32 dpll;
+ u32 fp;
+ struct psb_intel_clock_t clock;
+ bool is_lvds;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ else
+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = (pipe == 0) ?
+ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA0 :
+ dev_priv->saveFPB0;
+ else
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA1 :
+ dev_priv->saveFPB1;
+
+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ gma_power_end(dev);
+ } else {
+ htot = (pipe == 0) ?
+ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ hsync = (pipe == 0) ?
+ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ vtot = (pipe == 0) ?
+ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ vsync = (pipe == 0) ?
+ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gtt_range *gt;
+
+ /* Unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+ kfree(psb_intel_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .dpms = psb_intel_crtc_dpms,
+ .mode_fixup = psb_intel_crtc_mode_fixup,
+ .mode_set = psb_intel_crtc_mode_set,
+ .mode_set_base = psb_intel_pipe_set_base,
+ .prepare = psb_intel_crtc_prepare,
+ .commit = psb_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+ .save = psb_intel_crtc_save,
+ .restore = psb_intel_crtc_restore,
+ .cursor_set = psb_intel_crtc_cursor_set,
+ .cursor_move = psb_intel_crtc_cursor_move,
+ .gamma_set = psb_intel_crtc_gamma_set,
+ .set_config = psb_crtc_set_config,
+ .destroy = psb_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+ u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+ u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+
+ REG_WRITE(control[pipe], 0);
+ REG_WRITE(base[pipe], 0);
+}
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc;
+ int i;
+ uint16_t *r_base, *g_base, *b_base;
+
+ /* We allocate a extra array of drm_connector pointers
+ * for fbdev after the crtc */
+ psb_intel_crtc =
+ kzalloc(sizeof(struct psb_intel_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+ if (psb_intel_crtc == NULL)
+ return;
+
+ psb_intel_crtc->crtc_state =
+ kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+ if (!psb_intel_crtc->crtc_state) {
+ dev_err(dev->dev, "Crtc state error: No memory\n");
+ kfree(psb_intel_crtc);
+ return;
+ }
+
+ /* Set the CRTC operations from the chip specific data */
+ drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+ psb_intel_crtc->pipe = pipe;
+ psb_intel_crtc->plane = pipe;
+
+ r_base = psb_intel_crtc->base.gamma_store;
+ g_base = r_base + 256;
+ b_base = g_base + 256;
+ for (i = 0; i < 256; i++) {
+ psb_intel_crtc->lut_r[i] = i;
+ psb_intel_crtc->lut_g[i] = i;
+ psb_intel_crtc->lut_b[i] = i;
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+
+ psb_intel_crtc->lut_adj[i] = 0;
+ }
+
+ psb_intel_crtc->mode_dev = mode_dev;
+ psb_intel_crtc->cursor_addr = 0;
+
+ drm_crtc_helper_add(&psb_intel_crtc->base,
+ dev_priv->ops->crtc_helper);
+
+ /* Setup the array of drm_connector pointer array */
+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+ &psb_intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+ &psb_intel_crtc->base;
+ psb_intel_crtc->mode_set.connectors =
+ (struct drm_connector **) (psb_intel_crtc + 1);
+ psb_intel_crtc->mode_set.num_connectors = 0;
+ psb_intel_cursor_init(dev, pipe);
+}
+
+int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+ struct drm_mode_object *drmmode_obj;
+ struct psb_intel_crtc *crtc;
+
+ if (!dev_priv) {
+ dev_err(dev->dev, "called with no initialization\n");
+ return -EINVAL;
+ }
+
+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+
+ if (!drmmode_obj) {
+ dev_err(dev->dev, "no such CRTC id\n");
+ return -EINVAL;
+ }
+
+ crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ if (psb_intel_crtc->pipe == pipe)
+ break;
+ }
+ return crtc;
+}
+
+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+ int index_mask = 0;
+ struct drm_connector *connector;
+ int entry = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ if (type_mask & (1 << psb_intel_encoder->type))
+ index_mask |= (1 << entry);
+ entry++;
+ }
+ return index_mask;
+}
+
+
+void psb_intel_modeset_cleanup(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+ always give back the encoder for the connector
+*/
+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ return &psb_intel_encoder->base;
+}
+
+void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
+ struct psb_intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
new file mode 100644
index 000000000000..535b49a5e409
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -0,0 +1,28 @@
+/* copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t type, uint32_t size);
+void psb_intel_crtc_destroy(struct drm_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644
index 000000000000..f40535e56689
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+ >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
+/*
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+ */
+struct psb_intel_mode_device {
+
+ /*
+ * Abstracted memory manager operations
+ */
+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
+
+ /*
+ * Cursor (Can go ?)
+ */
+ int cursor_needs_physical;
+
+ /*
+ * LVDS info
+ */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *panel_fixed_mode2;
+ struct drm_display_mode *vbt_mode; /* if any */
+
+ uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+ /* for getting at dev. private (mmio etc.) */
+ struct drm_device *drm_dev;
+ u32 reg; /* GPIO reg */
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ u8 slave_addr;
+};
+
+struct psb_intel_encoder {
+ struct drm_encoder base;
+ int type;
+ bool needs_tv_clock;
+ void (*hot_plug)(struct psb_intel_encoder *);
+ int crtc_mask;
+ int clone_mask;
+ void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+ /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+ own set of output privates */
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
+
+struct psb_intel_connector {
+ struct drm_connector base;
+ struct psb_intel_encoder *encoder;
+};
+
+struct psb_intel_crtc_state {
+ uint32_t saveDSPCNTR;
+ uint32_t savePIPECONF;
+ uint32_t savePIPESRC;
+ uint32_t saveDPLL;
+ uint32_t saveFP0;
+ uint32_t saveFP1;
+ uint32_t saveHTOTAL;
+ uint32_t saveHBLANK;
+ uint32_t saveHSYNC;
+ uint32_t saveVTOTAL;
+ uint32_t saveVBLANK;
+ uint32_t saveVSYNC;
+ uint32_t saveDSPSTRIDE;
+ uint32_t saveDSPSIZE;
+ uint32_t saveDSPPOS;
+ uint32_t saveDSPBASE;
+ uint32_t savePalette[256];
+};
+
+struct psb_intel_crtc {
+ struct drm_crtc base;
+ int pipe;
+ int plane;
+ uint32_t cursor_addr;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ u8 lut_adj[256];
+ struct psb_intel_framebuffer *fbdev_fb;
+ /* a mode_set for fbdev users on this crtc */
+ struct drm_mode_set mode_set;
+
+ /* GEM object that holds our cursor */
+ struct drm_gem_object *cursor_obj;
+
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode saved_adjusted_mode;
+
+ struct psb_intel_mode_device *mode_dev;
+
+ /*crtc mode setting flags*/
+ u32 mode_flags;
+
+ /* Saved Crtc HW states */
+ struct psb_intel_crtc_state *crtc_state;
+};
+
+#define to_psb_intel_crtc(x) \
+ container_of(x, struct psb_intel_crtc, base)
+#define to_psb_intel_connector(x) \
+ container_of(x, struct psb_intel_connector, base)
+#define to_psb_intel_encoder(x) \
+ container_of(x, struct psb_intel_encoder, base)
+#define to_psb_intel_framebuffer(x) \
+ container_of(x, struct psb_intel_framebuffer, base)
+
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+ const u32 reg, const char *name);
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+ struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void psb_intel_dvo_init(struct drm_device *dev);
+extern void psb_intel_tv_init(struct drm_device *dev);
+extern void psb_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+extern void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void mid_dsi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int dsi_num);
+
+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+ struct drm_connector *connector)
+{
+ return to_psb_intel_connector(connector)->encoder;
+}
+
+extern void psb_intel_connector_attach_encoder(
+ struct psb_intel_connector *connector,
+ struct psb_intel_encoder *encoder);
+
+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+ *connector);
+
+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+ int pipe);
+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+ int sdvoB);
+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+ int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev,
+ struct drm_framebuffer *fb);
+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+ *dev, struct
+ drm_mode_fb_cmd
+ *mode_cmd,
+ void *mm_private);
+extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value);
+extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644
index 000000000000..a25e4ca5e91c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/*
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct psb_intel_lvds_priv {
+ /*
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
+
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = REG_READ(BLC_PWM_CTL);
+ gma_power_end(dev);
+ } else /* Powered off, use the saved value */
+ ret = dev_priv->saveBLC_PWM_CTL;
+
+ /* Top 15bits hold the frequency mask */
+ ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+ ret *= 2; /* Return a 16bit range as needed for setting */
+ if (ret == 0)
+ dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+ REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+ return ret;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ *
+ * FIXME: at some point we need to both track this for PM and also
+ * disable runtime pm on MRST if the brightness is nil (ie blanked)
+ */
+static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+ dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+ dev_priv->lvds_bl->brightnesscmd,
+ blc_i2c_brightness);
+ return 0;
+ }
+
+ dev_err(dev->dev, "I2C transfer error\n");
+ return -1;
+}
+
+
+static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON(max_pwm_blc == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ dev_dbg(dev->dev, "backlight level is %d\n", level);
+
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "NO LVDS backlight info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ psb_lvds_i2c_set_brightness(dev, level);
+ else
+ psb_lvds_pwm_set_brightness(dev, level);
+}
+
+/*
+ * Sets the backlight level.
+ *
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ */
+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "set power, chip off!\n");
+ return;
+ }
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ psb_intel_lvds_set_backlight(dev,
+ mode_dev->backlight_duty_cycle);
+ } else {
+ psb_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+
+ gma_power_end(dev);
+}
+
+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ psb_intel_lvds_set_power(dev, true);
+ else
+ psb_intel_lvds_set_power(dev, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void psb_intel_lvds_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv =
+ (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+ lvds_priv->saveLVDS = REG_READ(LVDS);
+ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ /*
+ * If the light is off at server startup,
+ * just make it full brightness
+ */
+ if (dev_priv->backlight_duty_cycle == 0)
+ dev_priv->backlight_duty_cycle =
+ psb_intel_lvds_get_max_backlight(dev);
+
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ lvds_priv->savePP_ON,
+ lvds_priv->savePP_OFF,
+ lvds_priv->saveLVDS,
+ lvds_priv->savePP_CONTROL,
+ lvds_priv->savePP_CYCLE,
+ lvds_priv->saveBLC_PWM_CTL);
+}
+
+static void psb_intel_lvds_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ u32 pp_status;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv =
+ (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ lvds_priv->savePP_ON,
+ lvds_priv->savePP_OFF,
+ lvds_priv->saveLVDS,
+ lvds_priv->savePP_CONTROL,
+ lvds_priv->savePP_CYCLE,
+ lvds_priv->saveBLC_PWM_CTL);
+
+ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+ REG_WRITE(LVDS, lvds_priv->saveLVDS);
+
+ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+ } else {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+}
+
+int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct drm_display_mode *fixed_mode =
+ dev_priv->mode_dev.panel_fixed_mode;
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct psb_intel_crtc *psb_intel_crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ panel_fixed_mode = mode_dev->panel_fixed_mode2;
+
+ /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+ if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
+ return false;
+ }
+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+ printk(KERN_ERR "Must use PIPE A\n");
+ return false;
+ }
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ psb_intel_lvds_set_power(dev, false);
+
+ gma_power_end(dev);
+}
+
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ psb_intel_lvds_get_max_backlight(dev);
+
+ psb_intel_lvds_set_power(dev, true);
+}
+
+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/*
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+ int ret = 0;
+
+ if (!IS_MRST(dev))
+ ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * psb_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void psb_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int psb_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!encoder)
+ return -1;
+
+ if (!strcmp(property->name, "scaling mode")) {
+ struct psb_intel_crtc *crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ uint64_t curval;
+
+ if (!crtc)
+ goto set_prop_error;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ goto set_prop_error;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property,
+ &curval))
+ goto set_prop_error;
+
+ if (curval == value)
+ goto set_prop_done;
+
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ goto set_prop_error;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
+ goto set_prop_error;
+ }
+ } else if (!strcmp(property->name, "backlight")) {
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ goto set_prop_error;
+ else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *devp =
+ encoder->dev->dev_private;
+ struct backlight_device *bd = devp->backlight_device;
+ if (bd) {
+ bd->props.brightness = value;
+ backlight_update_status(bd);
+ }
+#endif
+ }
+ } else if (!strcmp(property->name, "DPMS")) {
+ struct drm_encoder_helper_funcs *hfuncs
+ = encoder->helper_private;
+ hfuncs->dpms(encoder, value);
+ }
+
+set_prop_done:
+ return 0;
+set_prop_error:
+ return -1;
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+ .dpms = psb_intel_lvds_encoder_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+ .prepare = psb_intel_lvds_prepare,
+ .mode_set = psb_intel_lvds_mode_set,
+ .commit = psb_intel_lvds_commit,
+};
+
+const struct drm_connector_helper_funcs
+ psb_intel_lvds_connector_helper_funcs = {
+ .get_modes = psb_intel_lvds_get_modes,
+ .mode_valid = psb_intel_lvds_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = psb_intel_lvds_save,
+ .restore = psb_intel_lvds_restore,
+ .detect = psb_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_lvds_set_property,
+ .destroy = psb_intel_lvds_destroy,
+};
+
+
+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+ .destroy = psb_intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * psb_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void psb_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct psb_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+
+ psb_intel_encoder =
+ kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+
+ if (!psb_intel_encoder) {
+ dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+ return;
+ }
+
+ psb_intel_connector =
+ kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+
+ if (!psb_intel_connector) {
+ kfree(psb_intel_encoder);
+ dev_err(dev->dev, "psb_intel_connector allocation error\n");
+ }
+
+ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv) {
+ dev_err(dev->dev, "LVDS private allocation error\n");
+ goto failed_connector;
+ }
+
+ psb_intel_encoder->dev_priv = lvds_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder,
+ &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &psb_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /*
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+ if (!lvds_priv->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ lvds_priv->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!lvds_priv->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this? */
+ if (mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
+
+ if (!mode_dev->panel_fixed_mode)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ psb_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+ /*
+ * Blacklist machines with BIOSes that list an LVDS panel without
+ * actually having one.
+ */
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+failed_ddc:
+ if (lvds_priv->i2c_bus)
+ psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+failed_blc_i2c:
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+failed_connector:
+ if (psb_intel_connector)
+ kfree(psb_intel_connector);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644
index 000000000000..4fca0d6feebe
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authers: Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+
+/**
+ * psb_intel_ddc_probe
+ *
+ */
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
+{
+ u8 out_buf[] = { 0x0, 0x0 };
+ u8 buf[2];
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(adapter, msgs, 2);
+ if (ret == 2)
+ return true;
+
+ return false;
+}
+
+/**
+ * psb_intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644
index 000000000000..fcc0af03d685
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -0,0 +1,1309 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+/*
+ * GPIO regs
+ */
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+# define GPIO_CLOCK_DIR_MASK (1 << 0)
+# define GPIO_CLOCK_DIR_IN (0 << 1)
+# define GPIO_CLOCK_DIR_OUT (1 << 1)
+# define GPIO_CLOCK_VAL_MASK (1 << 2)
+# define GPIO_CLOCK_VAL_OUT (1 << 3)
+# define GPIO_CLOCK_VAL_IN (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+# define GPIO_DATA_DIR_MASK (1 << 8)
+# define GPIO_DATA_DIR_IN (0 << 9)
+# define GPIO_DATA_DIR_OUT (1 << 9)
+# define GPIO_DATA_VAL_MASK (1 << 10)
+# define GPIO_DATA_VAL_OUT (1 << 11)
+# define GPIO_DATA_VAL_IN (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
+
+#define BLC_PWM_CTL 0x61254
+#define BLC_PWM_CTL2 0x61250
+#define BLC_PWM_CTL_C 0x62254
+#define BLC_PWM_CTL2_C 0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+
+#define I915_GCFGC 0xf0
+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+
+#define I855_HPLLCC 0xc0
+#define I855_CLOCK_CONTROL_MASK (3 << 0)
+#define I855_CLOCK_133_200 (0 << 0)
+#define I855_CLOCK_100_200 (1 << 0)
+#define I855_CLOCK_100_133 (2 << 0)
+#define I855_CLOCK_166_250 (3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define HTOTAL_C 0x62000
+#define HBLANK_C 0x62004
+#define HSYNC_C 0x62008
+#define VTOTAL_C 0x6200c
+#define VBLANK_C 0x62010
+#define VSYNC_C 0x62014
+#define PIPECSRC 0x6201c
+#define BCLRPAT_C 0x62020
+#define VSYNCSHIFT_C 0x62028
+
+#define PP_STATUS 0x61200
+# define PP_ON (1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_ON (1 << 28)
+#define PP_SEQUENCE_OFF (2 << 28)
+#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+#define PFIT_CONTROL 0x61230
+#define PFIT_ENABLE (1 << 31)
+#define PFIT_PIPE_MASK (3 << 29)
+#define PFIT_PIPE_SHIFT 29
+#define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
+#define VERT_INTERP_DISABLE (0 << 10)
+#define VERT_INTERP_BILINEAR (1 << 10)
+#define VERT_INTERP_MASK (3 << 10)
+#define VERT_AUTO_SCALE (1 << 9)
+#define HORIZ_INTERP_DISABLE (0 << 6)
+#define HORIZ_INTERP_BILINEAR (1 << 6)
+#define HORIZ_INTERP_MASK (3 << 6)
+#define HORIZ_AUTO_SCALE (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+
+#define PFIT_PGM_RATIOS 0x61234
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+
+#define PFIT_AUTO_RATIOS 0x61238
+
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_LOCK (1 << 15) /* CDV */
+
+/*
+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
+ * in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
+ * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+/*
+ * PLL_MD
+ */
+/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD 0x0601c
+/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD 0x06020
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define DPLL_TEST 0x606c
+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+#define DPLLB_TEST_N_BYPASS (1 << 19)
+#define DPLLB_TEST_M_BYPASS (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+#define DPLLA_TEST_N_BYPASS (1 << 3)
+#define DPLLA_TEST_M_BYPASS (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
+#define ADPA 0x61100
+#define ADPA_DAC_ENABLE (1 << 31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1 << 30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_DIV_SHIFT 0
+
+#define PORT_HOTPLUG_EN 0x61110
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK 0x000000F8
+
+#define PORT_HOTPLUG_STAT 0x61114
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+
+#define SDVOB 0x61140
+#define SDVOC 0x61160
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
+#define SDVO_AUDIO_ENABLE (1 << 6)
+
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK (1 << 17)
+
+/*
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS 0x61180
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN (1 << 31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT (1 << 30)
+
+/* Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN (1 << 15)
+
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK (3 << 6)
+#define LVDS_A3_POWER_DOWN (0 << 6)
+#define LVDS_A3_POWER_UP (3 << 6)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK (3 << 4)
+#define LVDS_CLKB_POWER_DOWN (0 << 4)
+#define LVDS_CLKB_POWER_UP (3 << 4)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK (3 << 2)
+#define LVDS_B0B3_POWER_DOWN (0 << 2)
+#define LVDS_B0B3_POWER_UP (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1 << 31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1 << 30)
+#define PIPECONF_ACTIVE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSIPLL_LOCK (1 << 29)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_DSR (1 << 26)
+#define PIPEACONF_PIPE_LOCKED (1 << 25)
+#define PIPEACONF_PALETTE 0
+#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPEACONF_GAMMA (1 << 24)
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_PLANE_OFF (1 << 19)
+#define PIPECONF_CURSOR_OFF (1 << 18)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1 << 31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1 << 30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1 << 24)
+#define PIPEBCONF_PALETTE 0
+
+#define PIPECCONF 0x72008
+
+#define PIPEBGCMAXRED 0x71010
+#define PIPEBGCMAXGREEN 0x71014
+#define PIPEBGCMAXBLUE 0x71018
+
+#define PIPEASTAT 0x70024
+#define PIPEBSTAT 0x71024
+#define PIPECSTAT 0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2)
+#define PIPE_VBLANK_CLEAR (1 << 1)
+#define PIPE_VBLANK_STATUS (1 << 1)
+#define PIPE_TE_STATUS (1UL << 6)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_VSYNC_CLEAR (1UL << 9)
+#define PIPE_VSYNC_STATUS (1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
+#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define PIPE_VSYNC_ENABL (1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
+ PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+#define HISTOGRAM_INT_CONTROL 0x61268
+#define HISTOGRAM_BIN_DATA 0X61264
+#define HISTOGRAM_LOGIC_CONTROL 0x61260
+#define PWM_CONTROL_LOGIC 0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE (1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE (1UL << 31)
+#define PWM_LOGIC_ENABLE (1UL << 31)
+#define PWM_PHASEIN_ENABLE (1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE (1UL << 24)
+#define PWM_PHASEIN_VB_COUNT 0x00001f00
+#define PWM_PHASEIN_INC 0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
+#define DPST_YUV_LUMA_MODE 0
+
+struct dpst_ie_histogram_control {
+ union {
+ uint32_t data;
+ struct {
+ uint32_t bin_reg_index:7;
+ uint32_t reserved:4;
+ uint32_t bin_reg_func_select:1;
+ uint32_t sync_to_phase_in:1;
+ uint32_t alt_enhancement_mode:2;
+ uint32_t reserved1:1;
+ uint32_t sync_to_phase_in_count:8;
+ uint32_t histogram_mode_select:1;
+ uint32_t reserved2:4;
+ uint32_t ie_pipe_assignment:1;
+ uint32_t ie_mode_table_enabled:1;
+ uint32_t ie_histogram_enable:1;
+ };
+ };
+};
+
+struct dpst_guardband {
+ union {
+ uint32_t data;
+ struct {
+ uint32_t guardband:22;
+ uint32_t guardband_interrupt_delay:8;
+ uint32_t interrupt_status:1;
+ uint32_t interrupt_enable:1;
+ };
+ };
+};
+
+#define PIPEAFRAMEHIGH 0x70040
+#define PIPEAFRAMEPIXEL 0x70044
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPEBFRAMEPIXEL 0x71044
+#define PIPECFRAMEHIGH 0x72040
+#define PIPECFRAMEPIXEL 0x72044
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+#define DSPARB 0x70030
+#define DSPFW1 0x70034
+#define DSPFW2 0x70038
+#define DSPFW3 0x7003c
+#define DSPFW4 0x70050
+#define DSPFW5 0x70054
+#define DSPFW6 0x70058
+#define DSPCHICKENBIT 0x70400
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DSPCCNTR 0x72180
+#define DISPLAY_PLANE_ENABLE (1 << 31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_15_16BPP (0x4 << 26)
+#define DISPPLANE_16BPP (0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6 << 26)
+#define DISPPLANE_32BPP (0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_MASK (1 << 24)
+#define DISPPLANE_SEL_PIPE_POS 24
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+#define DISPPLANE_BOTTOM (4)
+
+#define DSPABASE 0x70184
+#define DSPALINOFF 0x70184
+#define DSPASTRIDE 0x70188
+
+#define DSPBBASE 0x71184
+#define DSPBLINOFF 0X71184
+#define DSPBADDR DSPBBASE
+#define DSPBSTRIDE 0x71188
+
+#define DSPCBASE 0x72184
+#define DSPCLINOFF 0x72184
+#define DSPCSTRIDE 0x72188
+
+#define DSPAKEYVAL 0x70194
+#define DSPAKEYMASK 0x70198
+
+#define DSPAPOS 0x7018C /* reserved */
+#define DSPASIZE 0x70190
+#define DSPBPOS 0x7118C
+#define DSPBSIZE 0x71190
+#define DSPCPOS 0x7218C
+#define DSPCSIZE 0x72190
+
+#define DSPASURF 0x7019C
+#define DSPATILEOFF 0x701A4
+
+#define DSPBSURF 0x7119C
+#define DSPBTILEOFF 0x711A4
+
+#define DSPCSURF 0x7219C
+#define DSPCTILEOFF 0x721A4
+#define DSPCKEYMAXVAL 0x721A0
+#define DSPCKEYMINVAL 0x72194
+#define DSPCKEYMSK 0x72198
+
+#define VGACNTRL 0x71400
+#define VGA_DISP_DISABLE (1 << 31)
+#define VGA_2X_MODE (1 << 30)
+#define VGA_PIPE_B_SELECT (1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET 0x08000
+#define OV_OVADD 0x30000
+#define OV_DOVASTA 0x30008
+# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS 6
+# define OV_PIPE_A 0
+# define OV_PIPE_C 1
+#define OV_OGAMC5 0x30010
+#define OV_OGAMC4 0x30014
+#define OV_OGAMC3 0x30018
+#define OV_OGAMC2 0x3001C
+#define OV_OGAMC1 0x30020
+#define OV_OGAMC0 0x30024
+#define OVC_OVADD 0x38000
+#define OVC_DOVCSTA 0x38008
+#define OVC_OGAMC5 0x38010
+#define OVC_OGAMC4 0x38014
+#define OVC_OGAMC3 0x38018
+#define OVC_OGAMC2 0x3801C
+#define OVC_OGAMC1 0x38020
+#define OVC_OGAMC0 0x38024
+
+/*
+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0 0x71410
+#define SWF1 0x71414
+#define SWF2 0x71418
+#define SWF3 0x7141c
+#define SWF4 0x71420
+#define SWF5 0x71424
+#define SWF6 0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00 0x70410
+#define SWF01 0x70414
+#define SWF02 0x70418
+#define SWF03 0x7041c
+#define SWF04 0x70420
+#define SWF05 0x70424
+#define SWF06 0x70428
+
+#define SWF10 SWF0
+#define SWF11 SWF1
+#define SWF12 SWF2
+#define SWF13 SWF3
+#define SWF14 SWF4
+#define SWF15 SWF5
+#define SWF16 SWF6
+
+#define SWF30 0x72414
+#define SWF31 0x72418
+#define SWF32 0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A 0x0a000
+#define PALETTE_B 0x0a800
+#define PALETTE_C 0x0ac00
+
+/* Cursor A & B regs */
+#define CURACNTR 0x70080
+#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURABASE 0x70084
+#define CURAPOS 0x70088
+#define CURSOR_POS_MASK 0x007FF
+#define CURSOR_POS_SIGN 0x8000
+#define CURSOR_X_SHIFT 0
+#define CURSOR_Y_SHIFT 16
+#define CURBCNTR 0x700c0
+#define CURBBASE 0x700c4
+#define CURBPOS 0x700c8
+#define CURCCNTR 0x700e0
+#define CURCBASE 0x700e4
+#define CURCPOS 0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A 0x0f014
+#define MDFLD_DPLL_B 0x0f018
+#define MDFLD_INPUT_REF_SEL (1 << 14)
+#define MDFLD_VCO_SEL (1 << 16)
+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
+#define MDFLD_PLL_LATCHEN (1 << 28)
+#define MDFLD_PWR_GATE_EN (1 << 30)
+#define MDFLD_P1_MASK (0x1FF << 17)
+#define MRST_FPA0 0x0f040
+#define MRST_FPA1 0x0f044
+#define MDFLD_DPLL_DIV0 0x0f048
+#define MDFLD_DPLL_DIV1 0x0f04c
+#define MRST_PERF_MODE 0x020f4
+
+/*
+ * MEDFIELD HDMI registers
+ */
+#define HDMIPHYMISCCTL 0x61134
+#define HDMI_PHY_POWER_DOWN 0x7f
+#define HDMIB_CONTROL 0x61140
+#define HDMIB_PORT_EN (1 << 31)
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+#define HDMIB_NULL_PACKET (1 << 9)
+#define HDMIB_HDCP_PORT (1 << 5)
+
+/* #define LVDS 0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
+
+#define MIPI 0x61190
+#define MIPI_C 0x62190
+#define MIPI_PORT_EN (1 << 31)
+/* Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX (1 << 23)
+#define PASS_FROM_SPHY_TO_AFE (1 << 16)
+#define MIPI_BORDER_EN (1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE 0x1
+#define MIPIA_2LANE_MIPIC_2LANE 0x2
+#define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
+#define TE_TRIGGER_GPIO_PIN (1 << 3)
+#define MIPI_TE_COUNT 0x61194
+
+/* #define PP_CONTROL 0x61204 */
+#define POWER_DOWN_ON_RESET (1 << 1)
+
+/* #define PFIT_CONTROL 0x61230 */
+#define PFIT_PIPE_SELECT (3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT (29)
+
+/* #define BLC_PWM_CTL 0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE (1 << 30)
+/* #define DSPACNTR 0x70180 */
+
+#define MRST_DSPABASE 0x7019c
+#define MRST_DSPBBASE 0x7119c
+#define MDFLD_DSPCBASE 0x7219c
+
+/*
+ * Moorestown registers.
+ */
+
+/*
+ * MIPI IP registers
+ */
+#define MIPIC_REG_OFFSET 0x800
+
+#define DEVICE_READY_REG 0xb000
+#define LP_OUTPUT_HOLD (1 << 16)
+#define EXIT_ULPS_DEV_READY 0x3
+#define LP_OUTPUT_HOLD_RELEASE 0x810000
+# define ENTERING_ULPS (2 << 1)
+# define EXITING_ULPS (1 << 1)
+# define ULPS_MASK (3 << 1)
+# define BUS_POSSESSION (1 << 3)
+#define INTR_STAT_REG 0xb004
+#define RX_SOT_ERROR (1 << 0)
+#define RX_SOT_SYNC_ERROR (1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_FALSE_CONTROL_ERROR (1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
+#define RX_CHECKSUM_ERROR (1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
+#define RX_DSI_VC_ID_INVALID (1 << 11)
+#define TX_FALSE_CONTROL_ERROR (1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
+#define TX_CHECKSUM_ERROR (1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
+#define TX_DSI_VC_ID_INVALID (1 << 17)
+#define HIGH_CONTENTION (1 << 18)
+#define LOW_CONTENTION (1 << 19)
+#define DPI_FIFO_UNDER_RUN (1 << 20)
+#define HS_TX_TIMEOUT (1 << 21)
+#define LP_RX_TIMEOUT (1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define SPL_PKT_SENT (1 << 30)
+#define INTR_EN_REG 0xb008
+#define DSI_FUNC_PRG_REG 0xb00c
+#define DPI_CHANNEL_NUMBER_POS 0x03
+#define DBI_CHANNEL_NUMBER_POS 0x05
+#define FMT_DPI_POS 0x07
+#define FMT_DBI_POS 0x0A
+#define DBI_DATA_WIDTH_POS 0x0D
+
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
+ * 666 FORMAT
+ */
+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED 0x00 /* command mode
+ * is not supported
+ */
+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
+#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
+
+#define HS_TX_TIMEOUT_REG 0xb010
+#define LP_RX_TIMEOUT_REG 0xb014
+#define TURN_AROUND_TIMEOUT_REG 0xb018
+#define DEVICE_RESET_REG 0xb01C
+#define DPI_RESOLUTION_REG 0xb020
+#define RES_V_POS 0x10
+#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
+#define VERT_SYNC_PAD_COUNT_REG 0xb038
+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
+#define DPI_CONTROL_REG 0xb048
+#define DPI_SHUT_DOWN (1 << 0)
+#define DPI_TURN_ON (1 << 1)
+#define DPI_COLOR_MODE_ON (1 << 2)
+#define DPI_COLOR_MODE_OFF (1 << 3)
+#define DPI_BACK_LIGHT_ON (1 << 4)
+#define DPI_BACK_LIGHT_OFF (1 << 5)
+#define DPI_LP (1 << 6)
+#define DPI_DATA_REG 0xb04c
+#define DPI_BACK_LIGHT_ON_DATA 0x07
+#define DPI_BACK_LIGHT_OFF_DATA 0x17
+#define INIT_COUNT_REG 0xb050
+#define MAX_RET_PAK_REG 0xb054
+#define VIDEO_FMT_REG 0xb058
+#define COMPLETE_LAST_PCKT (1 << 2)
+#define EOT_DISABLE_REG 0xb05c
+#define ENABLE_CLOCK_STOPPING (1 << 1)
+#define LP_BYTECLK_REG 0xb060
+#define LP_GEN_DATA_REG 0xb064
+#define HS_GEN_DATA_REG 0xb068
+#define LP_GEN_CTRL_REG 0xb06C
+#define HS_GEN_CTRL_REG 0xb070
+#define DCS_CHANNEL_NUMBER_POS 0x6
+#define MCS_COMMANDS_POS 0x8
+#define WORD_COUNTS_POS 0x8
+#define MCS_PARAMETER_POS 0x10
+#define GEN_FIFO_STAT_REG 0xb074
+#define HS_DATA_FIFO_FULL (1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define HS_LS_DBI_ENABLE_REG 0xb078
+#define TXCLKESC_REG 0xb07c
+#define DPHY_PARAM_REG 0xb080
+#define DBI_BW_CTRL_REG 0xb084
+#define CLK_LANE_SWT_REG 0xb088
+
+/*
+ * MIPI Adapter registers
+ */
+#define MIPI_CONTROL_REG 0xb104
+#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG 0xb108
+#define MIPI_DATA_LENGTH_REG 0xb10C
+#define MIPI_COMMAND_ADDRESS_REG 0xb110
+#define MIPI_COMMAND_LENGTH_REG 0xb114
+#define MIPI_READ_DATA_RETURN_REG0 0xb118
+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
+#define MIPI_READ_DATA_RETURN_REG2 0xb120
+#define MIPI_READ_DATA_RETURN_REG3 0xb124
+#define MIPI_READ_DATA_RETURN_REG4 0xb128
+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
+#define MIPI_READ_DATA_RETURN_REG6 0xb130
+#define MIPI_READ_DATA_RETURN_REG7 0xb134
+#define MIPI_READ_DATA_VALID_REG 0xb138
+
+/* DBI COMMANDS */
+#define soft_reset 0x01
+/*
+ * The display module performs a software reset.
+ * Registers are written with their SW Reset default values.
+ */
+#define get_power_mode 0x0a
+/*
+ * The display module returns the current power mode
+ */
+#define get_address_mode 0x0b
+/*
+ * The display module returns the current status.
+ */
+#define get_pixel_format 0x0c
+/*
+ * This command gets the pixel format for the RGB image data
+ * used by the interface.
+ */
+#define get_display_mode 0x0d
+/*
+ * The display module returns the Display Image Mode status.
+ */
+#define get_signal_mode 0x0e
+/*
+ * The display module returns the Display Signal Mode.
+ */
+#define get_diagnostic_result 0x0f
+/*
+ * The display module returns the self-diagnostic results following
+ * a Sleep Out command.
+ */
+#define enter_sleep_mode 0x10
+/*
+ * This command causes the display module to enter the Sleep mode.
+ * In this mode, all unnecessary blocks inside the display module are
+ * disabled except interface communication. This is the lowest power
+ * mode the display module supports.
+ */
+#define exit_sleep_mode 0x11
+/*
+ * This command causes the display module to exit Sleep mode.
+ * All blocks inside the display module are enabled.
+ */
+#define enter_partial_mode 0x12
+/*
+ * This command causes the display module to enter the Partial Display
+ * Mode. The Partial Display Mode window is described by the
+ * set_partial_area command.
+ */
+#define enter_normal_mode 0x13
+/*
+ * This command causes the display module to enter the Normal mode.
+ * Normal Mode is defined as Partial Display mode and Scroll mode are off
+ */
+#define exit_invert_mode 0x20
+/*
+ * This command causes the display module to stop inverting the image
+ * data on the display device. The frame memory contents remain unchanged.
+ * No status bits are changed.
+ */
+#define enter_invert_mode 0x21
+/*
+ * This command causes the display module to invert the image data only on
+ * the display device. The frame memory contents remain unchanged.
+ * No status bits are changed.
+ */
+#define set_gamma_curve 0x26
+/*
+ * This command selects the desired gamma curve for the display device.
+ * Four fixed gamma curves are defined in section DCS spec.
+ */
+#define set_display_off 0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on 0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address 0x2a
+/*
+ * This command defines the column extent of the frame memory accessed by
+ * the hostprocessor with the read_memory_continue and
+ * write_memory_continue commands.
+ * No status bits are changed.
+ */
+#define set_page_addr 0x2b
+/*
+ * This command defines the page extent of the frame memory accessed by
+ * the host processor with the write_memory_continue and
+ * read_memory_continue command.
+ * No status bits are changed.
+ */
+#define write_mem_start 0x2c
+/*
+ * This command transfers image data from the host processor to the
+ * display modules frame memory starting at the pixel location specified
+ * by preceding set_column_address and set_page_address commands.
+ */
+#define set_partial_area 0x30
+/*
+ * This command defines the Partial Display mode s display area.
+ * There are two parameters associated with this command, the first
+ * defines the Start Row (SR) and the second the End Row (ER). SR and ER
+ * refer to the Frame Memory Line Pointer.
+ */
+#define set_scroll_area 0x33
+/*
+ * This command defines the display modules Vertical Scrolling Area.
+ */
+#define set_tear_off 0x34
+/*
+ * This command turns off the display modules Tearing Effect output
+ * signal on the TE signal line.
+ */
+#define set_tear_on 0x35
+/*
+ * This command turns on the display modules Tearing Effect output signal
+ * on the TE signal line.
+ */
+#define set_address_mode 0x36
+/*
+ * This command sets the data order for transfers from the host processor
+ * to display modules frame memory,bits B[7:5] and B3, and from the
+ * display modules frame memory to the display device, bits B[2:0] and B4.
+ */
+#define set_scroll_start 0x37
+/*
+ * This command sets the start of the vertical scrolling area in the frame
+ * memory. The vertical scrolling area is fully defined when this command
+ * is used with the set_scroll_area command The set_scroll_start command
+ * has one parameter, the Vertical Scroll Pointer. The VSP defines the
+ * line in the frame memory that is written to the display device as the
+ * first line of the vertical scroll area.
+ */
+#define exit_idle_mode 0x38
+/*
+ * This command causes the display module to exit Idle mode.
+ */
+#define enter_idle_mode 0x39
+/*
+ * This command causes the display module to enter Idle Mode.
+ * In Idle Mode, color expression is reduced. Colors are shown on the
+ * display device using the MSB of each of the R, G and B color
+ * components in the frame memory
+ */
+#define set_pixel_format 0x3a
+/*
+ * This command sets the pixel format for the RGB image data used by the
+ * interface.
+ * Bits D[6:4] DPI Pixel Format Definition
+ * Bits D[2:0] DBI Pixel Format Definition
+ * Bits D7 and D3 are not used.
+ */
+#define DCS_PIXEL_FORMAT_3bpp 0x1
+#define DCS_PIXEL_FORMAT_8bpp 0x2
+#define DCS_PIXEL_FORMAT_12bpp 0x3
+#define DCS_PIXEL_FORMAT_16bpp 0x5
+#define DCS_PIXEL_FORMAT_18bpp 0x6
+#define DCS_PIXEL_FORMAT_24bpp 0x7
+
+#define write_mem_cont 0x3c
+
+/*
+ * This command transfers image data from the host processor to the
+ * display module's frame memory continuing from the pixel location
+ * following the previous write_memory_continue or write_memory_start
+ * command.
+ */
+#define set_tear_scanline 0x44
+/*
+ * This command turns on the display modules Tearing Effect output signal
+ * on the TE signal line when the display module reaches line N.
+ */
+#define get_scanline 0x45
+/*
+ * The display module returns the current scanline, N, used to update the
+ * display device. The total number of scanlines on a display device is
+ * defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+ * the first line of V Sync and is denoted as Line 0.
+ * When in Sleep Mode, the value returned by get_scanline is undefined.
+ */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */
+#define GEN_READ_0 0x04 /* generic read, no parameters */
+#define GEN_READ_1 0x14 /* generic read, 1 parameters */
+#define GEN_READ_2 0x24 /* generic read, 2 parameters */
+#define GEN_LONG_WRITE 0x29 /* generic long write */
+#define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */
+#define MCS_READ 0x06 /* MCS read, no parameters */
+#define MCS_LONG_WRITE 0x39 /* MCS long write */
+/* MCS/generic commands */
+/* TPO MCS */
+#define write_display_profile 0x50
+#define write_display_brightness 0x51
+#define write_ctrl_display 0x53
+#define write_ctrl_cabc 0x55
+ #define UI_IMAGE 0x01
+ #define STILL_IMAGE 0x02
+ #define MOVING_IMAGE 0x03
+#define write_hysteresis 0x57
+#define write_gamma_setting 0x58
+#define write_cabc_min_bright 0x5e
+#define write_kbbc_profile 0x60
+/* TMD MCS */
+#define tmd_write_display_brightness 0x8c
+
+/*
+ * This command is used to control ambient light, panel backlight
+ * brightness and gamma settings.
+ */
+#define BRIGHT_CNTL_BLOCK_ON (1 << 5)
+#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
+#define DISPLAY_DIMMING_ON (1 << 3)
+#define BACKLIGHT_ON (1 << 2)
+#define DISPLAY_BRIGHTNESS_AUTO (1 << 1)
+#define GAMMA_AUTO (1 << 0)
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP 0x1
+#define DCS_PIXEL_FORMAT_8BPP 0x2
+#define DCS_PIXEL_FORMAT_12BPP 0x3
+#define DCS_PIXEL_FORMAT_16BPP 0x5
+#define DCS_PIXEL_FORMAT_18BPP 0x6
+#define DCS_PIXEL_FORMAT_24BPP 0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data 0xfc
+#define diag_res_data 0x00
+#define disp_mode_data 0x23
+#define pxl_fmt_data 0x77
+#define pwr_mode_data 0x74
+#define sig_mode_data 0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1 0xff
+#define scanline_data2 0xff
+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
+ * with Sync Pulse
+ */
+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
+ * with Sync events
+ */
+#define BURST_MODE 0x03 /* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */
+ /* Allocate at least
+ * 0x100 Byte with 32
+ * byte alignment
+ */
+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
+ * 0x100 Byte with 32
+ * byte alignment
+ */
+#define DBI_CB_TIME_OUT 0xFFFF
+
+#define GEN_FB_TIME_OUT 2000
+
+#define SKU_83 0x01
+#define SKU_100 0x02
+#define SKU_100L 0x04
+#define SKU_BYPASS 0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT 0x02100 /* cedarview */
+# define SB_OPCODE_MASK PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT 16
+# define SB_OPCODE_READ 0
+# define SB_OPCODE_WRITE 1
+# define SB_DEST_MASK PSB_MASK(15, 8)
+# define SB_DEST_SHIFT 8
+# define SB_DEST_DPLL 0x88
+# define SB_BYTE_ENABLE_MASK PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT 4
+# define SB_BUSY (1 << 0)
+
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA 0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR 0x02108 /* cedarview */
+#define DPIO_CFG 0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1 (1 << 3)
+# define DPIO_MODE_SELECT_0 (1 << 2)
+# define DPIO_SFR_BYPASS (1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N (1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A 0x8008
+#define _SB_M_B 0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK (0xFF << 24)
+# define SB_M_DIVIDER_SHIFT 24
+
+#define _SB_N_VCO_A 0x8014
+#define _SB_N_VCO_B 0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT 30
+#define SB_N_DIVIDER_MASK PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT 26
+#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT 24
+
+#define _SB_REF_A 0x8018
+#define _SB_REF_B 0x8038
+#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A 0x801c
+#define _SB_P_B 0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT 30
+#define SB_P2_10 0 /* HDMI, DP, DAC */
+#define SB_P2_5 1 /* DAC */
+#define SB_P2_14 2 /* LVDS single */
+#define SB_P2_7 3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT 12
+
+#define PSB_LANE0 0x120
+#define PSB_LANE1 0x220
+#define PSB_LANE2 0x2320
+#define PSB_LANE3 0x2420
+
+#define LANE_PLL_MASK (0x7 << 20)
+#define LANE_PLL_ENABLE (0x3 << 20)
+
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644
index 000000000000..88b42971c0fd
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -0,0 +1,2623 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+ struct psb_intel_encoder base;
+
+ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
+ struct i2c_adapter ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ int sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct psb_intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /* Input timings for adjusted_mode */
+ struct psb_intel_sdvo_dtd input_dtd;
+};
+
+struct psb_intel_sdvo_connector {
+ struct psb_intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ int force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(psb_intel_attached_encoder(connector),
+ struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (psb_intel_sdvo->sdvo_reg == SDVOB) {
+ cval = REG_READ(SDVOC);
+ } else {
+ bval = REG_READ(SDVOB);
+ }
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ REG_WRITE(SDVOB, bval);
+ REG_READ(SDVOB);
+ REG_WRITE(SDVOC, cval);
+ REG_READ(SDVOC);
+ }
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg) (reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(psb_intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ DRM_LOG_KMS(" ");
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
+ DRM_LOG_KMS("(%02X)", cmd);
+ DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
+
+ psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+ void *response, int response_len)
+{
+ u8 retry = 5;
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ udelay(15);
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ }
+ DRM_LOG_KMS("\n");
+ return true;
+
+log_fail:
+ DRM_LOG_KMS("... failed\n");
+ return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_set_target_input_args targets = {0};
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct psb_intel_sdvo_get_trained_inputs_response response;
+
+ BUILD_BUG_ON(sizeof(response) != 1);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct psb_intel_sdvo_pixel_clock_range clocks;
+
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct psb_intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (psb_intel_sdvo->is_lvds &&
+ (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ dtd->part1.clock = mode->clock / 10;
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= 0x4;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct psb_intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & 0x2)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & 0x4)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_encode encode;
+
+ BUILD_BUG_ON(sizeof(encode) != 2);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ psb_intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ DRM_INFO("HDMI is not supported yet");
+
+ return false;
+#if 0
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_sdvo_dtd output_dtd;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return false;
+
+ psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return false;
+
+ if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+ &psb_intel_sdvo->input_dtd))
+ return false;
+
+ psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (psb_intel_sdvo->is_tv) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (psb_intel_sdvo->is_lvds) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 sdvox;
+ struct psb_intel_sdvo_in_out_map in_out;
+ struct psb_intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = psb_intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+ input_dtd = psb_intel_sdvo->input_dtd;
+ } else {
+ /* Set the output timing to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+ }
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return;
+
+ if (psb_intel_sdvo->has_hdmi_monitor) {
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+ psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+ } else
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (psb_intel_sdvo->is_tv &&
+ !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+ return;
+
+ (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+ switch (pixel_multiplier) {
+ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+ switch (psb_intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+ if (psb_intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
+ if (psb_intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ /* FIXME: Check if this is needed for PSB
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ */
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ sdvox |= SDVO_STALL_SELECT;
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 temp;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG("DPMS_ON");
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG("DPMS_OFF");
+ break;
+ default:
+ DRM_DEBUG("DPMS: %d", mode);
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+ }
+ } else {
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ psb_intel_wait_for_vblank(dev);
+
+ status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(psb_intel_sdvo));
+ }
+
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+ }
+ return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (psb_intel_sdvo->is_lvds) {
+ if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+ struct drm_connector *connector = NULL;
+ struct psb_intel_sdvo *iout = NULL;
+ struct psb_intel_sdvo *sdvo;
+
+ /* find the sdvo connector */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ iout = to_psb_intel_sdvo(connector);
+
+ if (iout->type != INTEL_OUTPUT_SDVO)
+ continue;
+
+ sdvo = iout->dev_priv;
+
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
+ return connector;
+
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+ return connector;
+
+ }
+
+ return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ DRM_DEBUG_KMS("\n");
+
+ if (!connector)
+ return 0;
+
+ psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ if (on) {
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ } else {
+ response[0] = 0;
+ response[1] = 0;
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ }
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return NULL;
+}
+
+enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+ u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ psb_intel_sdvo->ddc_bus = ddc;
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ psb_intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (psb_intel_sdvo->is_hdmi) {
+ psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ if (status == connector_status_connected) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ if (psb_intel_sdvo_connector->force_audio)
+ psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+ }
+
+ return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+
+ /* add 30ms delay when the output type might be TV */
+ if (psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ mdelay(30);
+
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ psb_intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ psb_intel_sdvo->attached_output = response;
+
+ psb_intel_sdvo->has_hdmi_monitor = false;
+ psb_intel_sdvo->has_hdmi_audio = false;
+
+ if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(psb_intel_sdvo_connector))
+ ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->is_lvds = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+ return;
+
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+ if (list_empty(&connector->probed_modes) == false)
+ goto end;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ psb_intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
+ psb_intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_lvds_modes(connector);
+ else
+ psb_intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (psb_intel_sdvo_connector->left)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+ if (psb_intel_sdvo_connector->right)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+ if (psb_intel_sdvo_connector->top)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+ if (psb_intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+ if (psb_intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+ if (psb_intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+ if (psb_intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+ if (psb_intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+ if (psb_intel_sdvo_connector->hue)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+ if (psb_intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+ if (psb_intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+ if (psb_intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+ if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+ if (psb_intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+ if (psb_intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+ if (psb_intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+ if (psb_intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (psb_intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ psb_intel_sdvo_connector->tv_format);
+
+ psb_intel_sdvo_destroy_enhance_property(connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!psb_intel_sdvo->is_hdmi)
+ return false;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == psb_intel_sdvo_connector->force_audio)
+ return 0;
+
+ psb_intel_sdvo_connector->force_audio = i;
+
+ if (i == 0)
+ has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ psb_intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!psb_intel_sdvo->color_range)
+ return 0;
+
+ psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (psb_intel_sdvo_connector->name == property) { \
+ if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ psb_intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == psb_intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (psb_intel_sdvo->tv_format_index ==
+ psb_intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+ temp_value = val;
+ if (psb_intel_sdvo_connector->left == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->right, val);
+ if (psb_intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->right == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->left, val);
+ if (psb_intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->top == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->bottom, val);
+ if (psb_intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->bottom == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->top, val);
+ if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (psb_intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+ .dpms = psb_intel_sdvo_dpms,
+ .mode_fixup = psb_intel_sdvo_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = psb_intel_sdvo_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = psb_intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_sdvo_set_property,
+ .destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+ .get_modes = psb_intel_sdvo_get_modes,
+ .mode_valid = psb_intel_sdvo_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+ if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ psb_intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+ .destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+ /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+ * We need to figure out if this is true for all available poulsbo
+ * hardware, or if we need to fiddle with the guessing code above.
+ * The problem might go away if we can parse sdvo mappings from bios */
+ sdvo->ddc_bus = 2;
+
+#if 0
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
+
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
+ }
+
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+ gma_intel_gmbus_force_bit(sdvo->i2c, true);
+ } else
+ sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (IS_SDVOB(sdvo_reg)) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (IS_SDVOB(sdvo_reg))
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+ struct psb_intel_sdvo *encoder)
+{
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &psb_intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &psb_intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+ /* FIXME: We don't support HDMI at the moment
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ */
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ psb_intel_sdvo->is_hdmi = true;
+ }
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (psb_intel_sdvo->is_hdmi)
+ psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ psb_intel_sdvo->controlled_output |= type;
+ psb_intel_sdvo_connector->output_flag = type;
+
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+ if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+ goto err;
+
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+ psb_intel_sdvo);
+ return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+ psb_intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ psb_intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(psb_intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+ return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+ return false;
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ psb_intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+ psb_intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", psb_intel_sdvo_connector->format_supported_num);
+ if (!psb_intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ psb_intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ psb_intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ psb_intel_sdvo_connector->max_##name = data_value[0]; \
+ psb_intel_sdvo_connector->cur_##name = response; \
+ psb_intel_sdvo_connector->name = \
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ if (!psb_intel_sdvo_connector->name) return false; \
+ psb_intel_sdvo_connector->name->values[0] = 0; \
+ psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
+ drm_connector_attach_property(connector, \
+ psb_intel_sdvo_connector->name, \
+ psb_intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_hscan = data_value[0];
+ psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+ psb_intel_sdvo_connector->left =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ if (!psb_intel_sdvo_connector->left)
+ return false;
+
+ psb_intel_sdvo_connector->left->values[0] = 0;
+ psb_intel_sdvo_connector->left->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->left,
+ psb_intel_sdvo_connector->left_margin);
+
+ psb_intel_sdvo_connector->right =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ if (!psb_intel_sdvo_connector->right)
+ return false;
+
+ psb_intel_sdvo_connector->right->values[0] = 0;
+ psb_intel_sdvo_connector->right->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->right,
+ psb_intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_vscan = data_value[0];
+ psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+ psb_intel_sdvo_connector->top =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ if (!psb_intel_sdvo_connector->top)
+ return false;
+
+ psb_intel_sdvo_connector->top->values[0] = 0;
+ psb_intel_sdvo_connector->top->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->top,
+ psb_intel_sdvo_connector->top_margin);
+
+ psb_intel_sdvo_connector->bottom =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ if (!psb_intel_sdvo_connector->bottom)
+ return false;
+
+ psb_intel_sdvo_connector->bottom->values[0] = 0;
+ psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->bottom,
+ psb_intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_dot_crawl = 1;
+ psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ psb_intel_sdvo_connector->dot_crawl =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ if (!psb_intel_sdvo_connector->dot_crawl)
+ return false;
+
+ psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
+ psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->dot_crawl,
+ psb_intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+ union {
+ struct psb_intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+ enhancements.response = 0;
+ psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+ .master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
+ .functionality = psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ int i;
+
+ psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+ if (!psb_intel_sdvo)
+ return false;
+
+ psb_intel_sdvo->sdvo_reg = sdvo_reg;
+ psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+ if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+ kfree(psb_intel_sdvo);
+ return false;
+ }
+
+ /* encoder type will be decided later */
+ psb_intel_encoder = &psb_intel_sdvo->base;
+ psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+ }
+
+ if (IS_SDVOB(sdvo_reg))
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+ else
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+ drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+ /* In default case sdvo lvds is false */
+ if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+ goto err;
+
+ if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+ psb_intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+
+ psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ goto err;
+
+ if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+ &psb_intel_sdvo->pixel_clock_min,
+ &psb_intel_sdvo->pixel_clock_max))
+ goto err;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(psb_intel_sdvo),
+ psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+ psb_intel_sdvo->caps.device_rev_id,
+ psb_intel_sdvo->pixel_clock_min / 1000,
+ psb_intel_sdvo->pixel_clock_max / 1000,
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err:
+ drm_encoder_cleanup(&psb_intel_encoder->base);
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ kfree(psb_intel_sdvo);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644
index 000000000000..600e79744d68
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct psb_intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct psb_intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct psb_intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct psb_intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct psb_intel_sdvo_enhancements_arg {
+ u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct psb_intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644
index 000000000000..7be802baceb5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -0,0 +1,564 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+
+/*
+ * inline functions
+ */
+
+static inline u32
+psb_pipestat(int pipe)
+{
+ if (pipe == 0)
+ return PIPEASTAT;
+ if (pipe == 1)
+ return PIPEBSTAT;
+ if (pipe == 2)
+ return PIPECSTAT;
+ BUG();
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+ if (pipe == 0)
+ return _PSB_PIPEA_EVENT_FLAG;
+ if (pipe == 1)
+ return _MDFLD_PIPEB_EVENT_FLAG;
+ if (pipe == 2)
+ return _MDFLD_PIPEC_EVENT_FLAG;
+ BUG();
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+ if (pipe == 0)
+ return _PSB_VSYNC_PIPEA_FLAG;
+ if (pipe == 1)
+ return _PSB_VSYNC_PIPEB_FLAG;
+ if (pipe == 2)
+ return _MDFLD_PIPEC_VBLANK_FLAG;
+ BUG();
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+ if (pipe == 0)
+ return PIPEACONF;
+ if (pipe == 1)
+ return PIPEBCONF;
+ if (pipe == 2)
+ return PIPECCONF;
+ BUG();
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = psb_pipestat(pipe);
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 writeVal = PSB_RVDC32(reg);
+ writeVal |= (mask | (mask >> 16));
+ PSB_WVDC32(writeVal, reg);
+ (void) PSB_RVDC32(reg);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = psb_pipestat(pipe);
+ dev_priv->pipestat[pipe] &= ~mask;
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 writeVal = PSB_RVDC32(reg);
+ writeVal &= ~mask;
+ PSB_WVDC32(writeVal, reg);
+ (void) PSB_RVDC32(reg);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 pipe_event = mid_pipe_event(pipe);
+ dev_priv->vdc_irq_mask |= pipe_event;
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ gma_power_end(dev_priv->dev);
+ }
+}
+
+void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+ if (dev_priv->pipestat[pipe] == 0) {
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 pipe_event = mid_pipe_event(pipe);
+ dev_priv->vdc_irq_mask &= ~pipe_event;
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+
+ uint32_t pipe_stat_val = 0;
+ uint32_t pipe_stat_reg = psb_pipestat(pipe);
+ uint32_t pipe_enable = dev_priv->pipestat[pipe];
+ uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+ uint32_t pipe_clear;
+ uint32_t i = 0;
+
+ spin_lock(&dev_priv->irqmask_lock);
+
+ pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+ pipe_stat_val &= pipe_enable | pipe_status;
+ pipe_stat_val &= pipe_stat_val >> 16;
+
+ spin_unlock(&dev_priv->irqmask_lock);
+
+ /* Clear the 2nd level interrupt status bits
+ * Sometimes the bits are very sticky so we repeat until they unstick */
+ for (i = 0; i < 0xffff; i++) {
+ PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+ pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+
+ if (pipe_clear == 0)
+ break;
+ }
+
+ if (pipe_clear)
+ dev_err(dev->dev,
+ "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+ __func__, pipe, PSB_RVDC32(pipe_stat_reg));
+
+ if (pipe_stat_val & PIPE_VBLANK_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stat_val & PIPE_TE_STATUS)
+ drm_handle_vblank(dev, pipe);
+}
+
+/*
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+ mid_pipe_event_handler(dev, 0);
+
+ if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+ mid_pipe_event_handler(dev, 1);
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+
+ uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+ int handled = 0;
+
+ spin_lock(&dev_priv->irqmask_lock);
+
+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+ if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+ dsp_int = 1;
+
+ /* FIXME: Handle Medfield
+ if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+ dsp_int = 1;
+ */
+
+ if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+ sgx_int = 1;
+
+ vdc_stat &= dev_priv->vdc_irq_mask;
+ spin_unlock(&dev_priv->irqmask_lock);
+
+ if (dsp_int && gma_power_is_on(dev)) {
+ psb_vdc_interrupt(dev, vdc_stat);
+ handled = 1;
+ }
+
+ if (sgx_int) {
+ /* Not expected - we have it masked, shut it up */
+ u32 s, s2;
+ s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+ s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+ PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
+ PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
+ /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
+ we may as well poll even if we add that ! */
+ handled = 1;
+ }
+
+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+ DRM_READMEMORYBARRIER();
+
+ if (!handled)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (gma_power_is_on(dev))
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ if (dev->vblank_enabled[0])
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ if (dev->vblank_enabled[1])
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ /* FIXME: Handle Medfield irq mask
+ if (dev->vblank_enabled[1])
+ dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+ if (dev->vblank_enabled[2])
+ dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+ */
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+ if (dev->vblank_enabled[0])
+ psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[1])
+ psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[2])
+ psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+ if (dev->vblank_enabled[0])
+ psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[1])
+ psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[2])
+ psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+ _PSB_IRQ_MSVDX_FLAG |
+ _LNC_IRQ_TOPAZ_FLAG;
+
+ /* These two registers are safe even if display island is off */
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+ wmb();
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ u32 hist_reg;
+ u32 pwm_reg;
+
+ if (gma_power_begin(dev, false)) {
+ PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+ PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+ | PWM_PHASEIN_INT_ENABLE,
+ PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+ HISTOGRAM_INT_CONTROL);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+ PWM_CONTROL_LOGIC);
+
+ gma_power_end(dev);
+ }
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ /* enable DPST */
+ mid_enable_pipe_event(dev_priv, 0);
+ psb_irq_turn_on_dpst(dev);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ u32 hist_reg;
+ u32 pwm_reg;
+
+ if (gma_power_begin(dev, false)) {
+ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+ psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+ PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ gma_power_end(dev);
+ }
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_disable_pipe_event(dev_priv, 0);
+ psb_irq_turn_off_dpst(dev);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+ unsigned int *sequence, atomic_t *counter)
+{
+ unsigned int cur_vblank;
+ int ret = 0;
+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+ (((cur_vblank = atomic_read(counter))
+ - *sequence) <= (1 << 23)));
+ *sequence = cur_vblank;
+
+ return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t reg_val = 0;
+ uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+ if (gma_power_begin(dev, false)) {
+ reg_val = REG_READ(pipeconf_reg);
+ gma_power_end(dev);
+ }
+
+ if (!(reg_val & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ uint32_t high_frame = PIPEAFRAMEHIGH;
+ uint32_t low_frame = PIPEAFRAMEPIXEL;
+ uint32_t pipeconf_reg = PIPEACONF;
+ uint32_t reg_val = 0;
+ uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ high_frame = PIPEBFRAMEHIGH;
+ low_frame = PIPEBFRAMEPIXEL;
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ high_frame = PIPECFRAMEHIGH;
+ low_frame = PIPECFRAMEPIXEL;
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+ return 0;
+ }
+
+ if (!gma_power_begin(dev, false))
+ return 0;
+
+ reg_val = REG_READ(pipeconf_reg);
+
+ if (!(reg_val & PIPEACONF_ENABLE)) {
+ dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+ pipe);
+ goto psb_get_vblank_counter_exit;
+ }
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+ PIPE_FRAME_LOW_SHIFT);
+ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ } while (high1 != high2);
+
+ count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+ gma_power_end(dev);
+
+ return count;
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644
index 000000000000..216fda38b57d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _SYSIRQ_H_
+#define _SYSIRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+#endif /* _SYSIRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644
index 000000000000..b867aabe6bf3
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -0,0 +1,88 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/spinlock.h>
+
+static void psb_lid_timer_func(unsigned long data)
+{
+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+ struct timer_list *lid_timer = &dev_priv->lid_timer;
+ unsigned long irq_flags;
+ u32 *lid_state = dev_priv->lid_state;
+ u32 pp_status;
+
+ if (readl(lid_state) == dev_priv->lid_last_state)
+ goto lid_timer_schedule;
+
+ if ((readl(lid_state)) & 0x01) {
+ /*lid state is open*/
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ /*FIXME: should be backlight level before*/
+ psb_intel_lvds_set_brightness(dev, 100);
+ } else {
+ psb_intel_lvds_set_brightness(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+ }
+ dev_priv->lid_last_state = readl(lid_state);
+
+lid_timer_schedule:
+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+ if (!timer_pending(lid_timer)) {
+ lid_timer->expires = jiffies + PSB_LID_DELAY;
+ add_timer(lid_timer);
+ }
+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+{
+ struct timer_list *lid_timer = &dev_priv->lid_timer;
+ unsigned long irq_flags;
+
+ spin_lock_init(&dev_priv->lid_lock);
+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+
+ init_timer(lid_timer);
+
+ lid_timer->data = (unsigned long)dev_priv;
+ lid_timer->function = psb_lid_timer_func;
+ lid_timer->expires = jiffies + PSB_LID_DELAY;
+
+ add_timer(lid_timer);
+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+{
+ del_timer_sync(&dev_priv->lid_timer);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644
index 000000000000..b81c7c1e9c2d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -0,0 +1,582 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL 0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
+
+#define PSB_CR_CORE_ID 0x0010
+#define _PSB_CC_ID_ID_SHIFT (16)
+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT (0)
+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION 0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
+
+#define PSB_CR_SOFT_RESET 0x0080
+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
+#define _PSB_CS_RESET_USE_RESET (1 << 4)
+#define _PSB_CS_RESET_TA_RESET (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
+
+#define PSB_CR_EVENT_STATUS2 0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
+
+#define PSB_CR_EVENT_STATUS 0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
+#define _PSB_CE_SW_EVENT (1 << 14)
+#define _PSB_CE_TA_FINISHED (1 << 13)
+#define _PSB_CE_TA_TERMINATE (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK 0x0007FFFF
+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0 0x0A0C
+#define PSB_CR_USE_CODE_BASE1 0x0A10
+#define PSB_CR_USE_CODE_BASE2 0x0A14
+#define PSB_CR_USE_CODE_BASE3 0x0A18
+#define PSB_CR_USE_CODE_BASE4 0x0A1C
+#define PSB_CR_USE_CODE_BASE5 0x0A20
+#define PSB_CR_USE_CODE_BASE6 0x0A24
+#define PSB_CR_USE_CODE_BASE7 0x0A28
+#define PSB_CR_USE_CODE_BASE8 0x0A2C
+#define PSB_CR_USE_CODE_BASE9 0x0A30
+#define PSB_CR_USE_CODE_BASE10 0x0A34
+#define PSB_CR_USE_CODE_BASE11 0x0A38
+#define PSB_CR_USE_CODE_BASE12 0x0A3C
+#define PSB_CR_USE_CODE_BASE13 0x0A40
+#define PSB_CR_USE_CODE_BASE14 0x0A44
+#define PSB_CR_USE_CODE_BASE15 0x0A48
+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT (25)
+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX (0)
+#define _PSB_CUC_DM_PIXEL (1)
+#define _PSB_CUC_DM_RESERVED (2)
+#define _PSB_CUC_DM_EDM (3)
+
+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
+
+#define PSB_CR_EVENT_KICKER 0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK 0x0AC8
+#define _PSB_CE_KICK_NOW (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
+
+#define PSB_CR_BIF_CTRL 0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
+#define _PSB_CB_CTRL_INVALDC (1 << 3)
+#define _PSB_CB_CTRL_FLUSH (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT 0x0C04
+
+#define PSB_CR_BIF_FAULT 0x0C08
+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
+
+#define PSB_CR_BIF_BANK0 0x0C78
+#define PSB_CR_BIF_BANK1 0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
+
+#define PSB_CR_2D_SOCIF 0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS 0x0E04
+#define _PSB_C2B_STATUS_BUSY (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define PSB_2D_CLIP_BH (0x00000000)
+#define PSB_2D_PAT_BH (0x10000000)
+#define PSB_2D_CTRL_BH (0x20000000)
+#define PSB_2D_SRC_OFF_BH (0x30000000)
+#define PSB_2D_MASK_OFF_BH (0x40000000)
+#define PSB_2D_RESERVED1_BH (0x50000000)
+#define PSB_2D_RESERVED2_BH (0x60000000)
+#define PSB_2D_FENCE_BH (0x70000000)
+#define PSB_2D_BLIT_BH (0x80000000)
+#define PSB_2D_SRC_SURF_BH (0x90000000)
+#define PSB_2D_DST_SURF_BH (0xA0000000)
+#define PSB_2D_PAT_SURF_BH (0xB0000000)
+#define PSB_2D_SRC_PAL_BH (0xC0000000)
+#define PSB_2D_PAT_PAL_BH (0xD0000000)
+#define PSB_2D_MASK_SURF_BH (0xE0000000)
+#define PSB_2D_FLUSH_BH (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX (1)
+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT (0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT (12)
+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT (0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT (12)
+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT (5)
+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT (10)
+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL (0x00000001)
+#define PSB_2D_DSTCK_CTRL (0x00000002)
+#define PSB_2D_ALPHA_CTRL (0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
+#define PSB_2D_CK_COL_SHIFT (0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT (0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT (12)
+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK (3 << 25)
+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE (0 << 25)
+#define PSB_2D_ROT_90DEGS (1 << 25)
+#define PSB_2D_ROT_180DEGS (2 << 25)
+#define PSB_2D_ROT_270DEGS (3 << 25)
+
+#define PSB_2D_COPYORDER_MASK (3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR (0 << 23)
+#define PSB_2D_COPYORDER_BR2TL (1 << 23)
+#define PSB_2D_COPYORDER_TR2BL (2 << 23)
+#define PSB_2D_COPYORDER_BL2TR (3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE (0x00000000)
+#define PSB_2D_DSTCK_PASS (0x00200000)
+#define PSB_2D_DSTCK_REJECT (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE (0x00000000)
+#define PSB_2D_SRCCK_PASS (0x00080000)
+#define PSB_2D_SRCCK_REJECT (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK (0x00010000)
+#define PSB_2D_USE_PAT (0x00010000)
+#define PSB_2D_USE_FILL (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT (8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT (0)
+
+#define PSB_2D_ROP4_MASK (0x0000FFFF)
+/*
+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
+ * Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT (0)
+/*
+ * DWORD1: (Always Present)
+ * X Start (Dest)
+ * Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT (12)
+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT (0)
+/*
+ * DWORD2: (Always Present)
+ * X Size (Dest)
+ * Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT (12)
+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
+#define PSB_2D_SRC_1_PAL (0x00000000)
+#define PSB_2D_SRC_2_PAL (0x00008000)
+#define PSB_2D_SRC_4_PAL (0x00010000)
+#define PSB_2D_SRC_8_PAL (0x00018000)
+#define PSB_2D_SRC_8_ALPHA (0x00020000)
+#define PSB_2D_SRC_4_ALPHA (0x00028000)
+#define PSB_2D_SRC_332RGB (0x00030000)
+#define PSB_2D_SRC_4444ARGB (0x00038000)
+#define PSB_2D_SRC_555RGB (0x00040000)
+#define PSB_2D_SRC_1555ARGB (0x00048000)
+#define PSB_2D_SRC_565RGB (0x00050000)
+#define PSB_2D_SRC_0888ARGB (0x00058000)
+#define PSB_2D_SRC_8888ARGB (0x00060000)
+#define PSB_2D_SRC_8888UYVY (0x00068000)
+#define PSB_2D_SRC_RESERVED (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
+#define PSB_2D_PAT_1_PAL (0x00000000)
+#define PSB_2D_PAT_2_PAL (0x00008000)
+#define PSB_2D_PAT_4_PAL (0x00010000)
+#define PSB_2D_PAT_8_PAL (0x00018000)
+#define PSB_2D_PAT_8_ALPHA (0x00020000)
+#define PSB_2D_PAT_4_ALPHA (0x00028000)
+#define PSB_2D_PAT_332RGB (0x00030000)
+#define PSB_2D_PAT_4444ARGB (0x00038000)
+#define PSB_2D_PAT_555RGB (0x00040000)
+#define PSB_2D_PAT_1555ARGB (0x00048000)
+#define PSB_2D_PAT_565RGB (0x00050000)
+#define PSB_2D_PAT_0888ARGB (0x00058000)
+#define PSB_2D_PAT_8888ARGB (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
+#define PSB_2D_DST_332RGB (0x00030000)
+#define PSB_2D_DST_4444ARGB (0x00038000)
+#define PSB_2D_DST_555RGB (0x00040000)
+#define PSB_2D_DST_1555ARGB (0x00048000)
+#define PSB_2D_DST_565RGB (0x00050000)
+#define PSB_2D_DST_0888ARGB (0x00058000)
+#define PSB_2D_DST_8888ARGB (0x00060000)
+#define PSB_2D_DST_8888AYUV (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS (0x0000)
+#define PSB_2D_ROP3_SRC (0xCC)
+#define PSB_2D_ROP3_PAT (0xF0)
+#define PSB_2D_ROP3_DST (0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE 16
+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES 2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK 0
+#define PSB_RASTER 1
+#define PSB_RETURN 2
+#define PSB_TA 3
+
+/* Power management */
+#define PSB_PUNIT_PORT 0x04
+#define PSB_OSPMBA 0x78
+#define PSB_APMBA 0x7a
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PWRGT_VID_ENC_MASK 0x30
+#define PSB_PWRGT_VID_DEC_MASK 0xc
+#define PSB_PWRGT_GL3_MASK 0xc0
+
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK 0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0 0xc3
+#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 8f371e8d630f..f7c17b239833 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I810_WRITE(0x02080, 0x1ffff000);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device *dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
}
-}
-void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- i810_reclaim_buffers(dev, file_priv);
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ i810_driver_reclaim_buffers(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ } else {
+ /* master disappeared, clean up stuff anyway and hope nothing
+ * goes wrong */
+ i810_driver_reclaim_buffers(dev, file_priv);
+ }
+
}
int i810_driver_dma_quiescent(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d4266bdf6fb4..053f1ee58393 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
};
+static const struct file_operations i810_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -52,20 +63,9 @@ static struct drm_driver driver = {
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.device_is_agp = i810_driver_device_is_agp,
- .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &i810_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c9339f481795..6e0acad9e0f5 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
/* i810_dma.c */
extern int i810_driver_dma_quiescent(struct drm_device *dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
extern void i810_driver_lastclose(struct drm_device *dev);
extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ae6a7c5020f..808b255d7fc6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
+ intel_sprite.o \
intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e02dc95..11807989f918 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
@@ -1000,7 +1001,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
return 0;
}
-static int i915_drpc_info(struct seq_file *m, void *unused)
+static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -1067,6 +1068,90 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
return 0;
}
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, gt_core_status, rcctl1;
+ int count=0, ret;
+
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (atomic_read(&dev_priv->forcewake_count)) {
+ seq_printf(m, "RC information inaccurate because userspace "
+ "holds a reference \n");
+ } else {
+ /* NB: we cannot use forcewake, else we read the wrong values */
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+ seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+ }
+
+ gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_printf(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_printf(m, "Core Power Down\n");
+ else
+ seq_printf(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_printf(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_printf(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_printf(m, "RC7\n");
+ break;
+ default:
+ seq_printf(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ return gen6_drpc_info(m);
+ else
+ return ironlake_drpc_info(m);
+}
+
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93c..5f4d5893e983 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -781,6 +781,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1454,6 +1457,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@@ -1484,6 +1495,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
@@ -2295,6 +2308,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa9145d2b..8f7187915b0d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -788,6 +810,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
@@ -821,21 +858,7 @@ static struct drm_driver driver = {
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -900,12 +923,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b979804..602bc80baabb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -206,6 +207,8 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev);
+ void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -221,6 +224,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -334,6 +339,8 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
+ uint32_t last_acthd_bsd;
+ uint32_t last_acthd_blt;
uint32_t last_instdone;
uint32_t last_instdone1;
@@ -347,6 +354,7 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
@@ -710,6 +718,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@@ -998,11 +1007,11 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
@@ -1308,6 +1317,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1366,8 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8359dc777041..e55badb2d86d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2006,9 +2006,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
|| atomic_read(&dev_priv->mm.wedged));
ring->irq_put(ring);
- } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
+ } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
ret = -EBUSY;
ring->waiting_seqno = 0;
@@ -3309,6 +3309,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
+ } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000)) {
+ ret = -EBUSY;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f3..65e1f0043f9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Disable semaphores on SNB */
+ if (INTEL_INFO(dev)->gen == 6)
+ return 0;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@@ -954,6 +971,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
}
static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+ return 0;
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
@@ -967,6 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
+ u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -1004,6 +1047,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1017,18 +1061,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring,
- I915_EXEC_CONSTANTS_MASK << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
@@ -1159,6 +1194,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b40004b55977..5d433fc11ace 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1205,7 +1205,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
- crtc->y * crtc->fb->pitch +
+ crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -1649,13 +1649,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, tmp);
return true;
}
- if (IS_GEN6(dev) &&
- (tmp & RING_WAIT_SEMAPHORE)) {
- DRM_ERROR("Kicking stuck semaphore on %s\n",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
- return true;
- }
return false;
}
@@ -1669,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1;
+ uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
bool err = false;
if (!i915_enable_hangcheck)
@@ -1686,16 +1679,21 @@ void i915_hangcheck_elapsed(unsigned long data)
}
if (INTEL_INFO(dev)->gen < 4) {
- acthd = I915_READ(ACTHD);
instdone = I915_READ(INSTDONE);
instdone1 = 0;
} else {
- acthd = I915_READ(ACTHD_I965);
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
+ acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
+ acthd_bsd = HAS_BSD(dev) ?
+ intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
+ acthd_blt = HAS_BLT(dev) ?
+ intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
if (dev_priv->last_acthd == acthd &&
+ dev_priv->last_acthd_bsd == acthd_bsd &&
+ dev_priv->last_acthd_blt == acthd_blt &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
@@ -1727,6 +1725,8 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->hangcheck_count = 0;
dev_priv->last_acthd = acthd;
+ dev_priv->last_acthd_bsd = acthd_bsd;
+ dev_priv->last_acthd_blt = acthd_blt;
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc824001..c3afb783cb9d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,6 +442,7 @@
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -2321,6 +2322,7 @@
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@@ -2459,6 +2461,8 @@
#define WM3_LP_ILK 0x45110
#define WM3_LP_EN (1<<31)
#define WM1S_LP_ILK 0x45120
+#define WM2S_LP_IVB 0x45124
+#define WM3S_LP_IVB 0x45128
#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
@@ -2675,6 +2679,140 @@
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+/* Sprite A control */
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE (1<<31)
+#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_PIXFORMAT_MASK (3<<25)
+#define DVS_FORMAT_YUV422 (0<<25)
+#define DVS_FORMAT_RGBX101010 (1<<25)
+#define DVS_FORMAT_RGBX888 (2<<25)
+#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_SOURCE_KEY (1<<22)
+#define DVS_RGB_ORDER_RGBX (1<<20)
+#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define DVS_YUV_ORDER_YUYV (0<<16)
+#define DVS_YUV_ORDER_UYVY (1<<16)
+#define DVS_YUV_ORDER_YVYU (2<<16)
+#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_DEST_KEY (1<<2)
+#define DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define DVS_TILED (1<<10)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define _DVSASIZE 0x72190
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define _DVSASURFLIVE 0x721ac
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE (1<<31)
+#define DVS_FILTER_MASK (3<<29)
+#define DVS_FILTER_MEDIUM (0<<29)
+#define DVS_FILTER_ENHANCING (1<<29)
+#define DVS_FILTER_SOFTENING (2<<29)
+#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC 0x72300
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC 0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE (1<<31)
+#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_PIXFORMAT_MASK (7<<25)
+#define SPRITE_FORMAT_YUV422 (0<<25)
+#define SPRITE_FORMAT_RGBX101010 (1<<25)
+#define SPRITE_FORMAT_RGBX888 (2<<25)
+#define SPRITE_FORMAT_RGBX161616 (3<<25)
+#define SPRITE_FORMAT_YUV444 (4<<25)
+#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
+#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_SOURCE_KEY (1<<22)
+#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
+#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
+#define SPRITE_YUV_ORDER_YUYV (0<<16)
+#define SPRITE_YUV_ORDER_UYVY (1<<16)
+#define SPRITE_YUV_ORDER_YVYU (2<<16)
+#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
+#define SPRITE_INT_GAMMA_ENABLE (1<<13)
+#define SPRITE_TILED (1<<10)
+#define SPRITE_DEST_KEY (1<<2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define _SPRA_SIZE 0x70290
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE (1<<31)
+#define SPRITE_FILTER_MASK (3<<29)
+#define SPRITE_FILTER_MEDIUM (0<<29)
+#define SPRITE_FILTER_ENHANCING (1<<29)
+#define SPRITE_FILTER_SOFTENING (2<<29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _SPRA_GAMC 0x70400
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -2882,6 +3020,10 @@
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
+#define IVB_CHICKEN3 0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
@@ -3303,10 +3445,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3447,8 +3589,30 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
@@ -3478,7 +3642,11 @@
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
+#define GEN6_RP_MEDIA_HW_MODE (1<<9)
+#define GEN6_RP_MEDIA_SW_MODE (0<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
@@ -3535,6 +3703,14 @@
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define GEN6_GT_CORE_STATUS 0x138060
+#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -3547,17 +3723,23 @@
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
-#define GEN5_HDMIW_HDMIEDID_A 0xE2050
-#define GEN5_AUD_CNTL_ST_A 0xE20B4
-#define GEN5_ELD_BUFFER_SIZE (0x1f << 10)
-#define GEN5_ELD_ADDRESS (0x1f << 5)
-#define GEN5_ELD_ACK (1 << 4)
-#define GEN5_AUD_CNTL_ST2 0xE20C0
-#define GEN5_ELD_VALIDB (1 << 0)
-#define GEN5_CP_READYB (1 << 1)
-
-#define GEN7_HDMIW_HDMIEDID_A 0xE5050
-#define GEN7_AUD_CNTRL_ST_A 0xE50B4
-#define GEN7_AUD_CNTRL_ST2 0xE50C0
+#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
+#define IBX_ELD_ADDRESS (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 0xE20C0
+#define IBX_ELD_VALIDB (1 << 0)
+#define IBX_CP_READYB (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTRL_ST2 0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer. It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863a3833..2a3f707caab8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@ -915,8 +915,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
@@ -929,8 +929,6 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
@@ -1206,7 +1204,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val;
+ u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+ pll_sel = TRANSC_DPLL_ENABLE;
if (pipe > 1)
return;
@@ -1217,6 +1216,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
+ if (pipe == 0)
+ pll_sel |= TRANSC_DPLLA_SEL;
+ else if (pipe == 1)
+ pll_sel |= TRANSC_DPLLB_SEL;
+
+
+ if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ return;
+
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
@@ -1511,8 +1519,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
u32 fbc_ctl, fbc_ctl2;
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitch < cfb_pitch)
- cfb_pitch = fb->pitch;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -2073,11 +2081,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
- I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2154,11 +2162,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
- I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
@@ -4509,7 +4517,7 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
-static void sandybridge_update_wm(struct drm_device *dev)
+void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
@@ -4569,7 +4577,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (!single_plane_enabled(enabled))
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
@@ -4619,6 +4628,149 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+
+ line_time_us = (sprite_width * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -4659,6 +4811,16 @@ static void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -4670,6 +4832,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4844,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4923,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@@ -5019,6 +5189,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5137,7 +5317,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5480,7 +5660,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@@ -5804,14 +5984,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
-
drm_vblank_post_modeset(dev, pipe);
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+ if (ret)
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ else
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
return ret;
}
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -5828,6 +6039,12 @@ static void g4x_write_eld(struct drm_connector *connector,
else
eldv = G4X_ELDV_DEVCTG;
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
i = I915_READ(G4X_AUD_CNTL_ST);
i &= ~(eldv | G4X_ELD_ADDR);
len = (i >> 9) & 0x1f; /* ELD buffer size */
@@ -5858,14 +6075,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_cntl_st;
int aud_cntrl_st2;
- if (IS_IVYBRIDGE(connector->dev)) {
- hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
- aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
- aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
- aud_cntl_st = GEN5_AUD_CNTL_ST_A;
- aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
i = to_intel_crtc(crtc)->pipe;
@@ -5879,14 +6096,25 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
- eldv = GEN5_ELD_VALIDB;
- eldv |= GEN5_ELD_VALIDB << 4;
- eldv |= GEN5_ELD_VALIDB << 8;
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
- eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
}
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
@@ -5894,13 +6122,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!eld[0])
return;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- }
-
i = I915_READ(aud_cntl_st);
- i &= ~GEN5_ELD_ADDRESS;
+ i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
@@ -6280,7 +6503,7 @@ static struct drm_display_mode load_detect_mode = {
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
@@ -6322,7 +6545,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -6331,9 +6554,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
- mode_cmd.depth = depth;
- mode_cmd.bpp = bpp;
- mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = 0;
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
@@ -6354,11 +6577,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
fb = &dev_priv->fbdev->ifb.base;
- if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel))
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
return NULL;
- if (obj->base.size < mode->vdisplay * fb->pitch)
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
return fb;
@@ -6991,7 +7214,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7008,7 +7231,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -7032,7 +7255,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7046,7 +7269,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
@@ -7079,7 +7302,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7114,7 +7337,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(fb->pitches[0] | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7150,7 +7373,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto out;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
- intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
@@ -7189,11 +7412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -7212,10 +7440,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@@ -7238,7 +7462,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7470,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
@@ -7574,7 +7799,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
@@ -7582,21 +7807,25 @@ int intel_framebuffer_init(struct drm_device *dev,
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
- if (mode_cmd->pitch & 63)
+ if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (mode_cmd->bpp) {
- case 8:
- case 16:
- /* Only pre-ILK can handle 5:5:5 */
- if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
- return -EINVAL;
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ /* RGB formats are common across chipsets */
break;
-
- case 24:
- case 32:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
break;
default:
+ DRM_ERROR("unsupported pixel format\n");
return -EINVAL;
}
@@ -7614,11 +7843,12 @@ int intel_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
@@ -7887,6 +8117,31 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+ return 0;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +8178,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@@ -7950,7 +8205,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_HW_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
@@ -8205,6 +8460,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -8372,7 +8631,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8750,34 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
@@ -8510,6 +8797,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8523,6 +8811,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8706,7 +8995,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
drm_mode_config_init(dev);
@@ -8736,6 +9025,12 @@ void intel_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
+ if (HAS_PCH_SPLIT(dev)) {
+ ret = intel_plane_init(dev, i);
+ if (ret)
+ DRM_ERROR("plane %d init failed: %d\n",
+ i, ret);
+ }
}
/* Just disable it once at startup */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358fad937..db3b461ad412 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
/*
- * There are three kinds of DP registers:
+ * There are four kinds of DP registers:
*
* IBX PCH
- * CPU
+ * SNB CPU
+ * IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Split out the IBX/CPU vs CPT settings */
- if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- int voltage_max;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- voltage_max = I830_DP_VOLTAGE_MAX_CPT;
- else
- voltage_max = I830_DP_VOLTAGE_MAX;
+ voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
@@ -1833,6 +1926,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73da..1348705faf6b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
+#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -39,7 +40,7 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ if (W && drm_can_sleep()) msleep(W); \
} \
ret__; \
})
@@ -47,13 +48,6 @@
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-#define MSLEEP(x) do { \
- if (in_dbg_master()) \
- mdelay(x); \
- else \
- msleep(x); \
-} while (0)
-
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -110,6 +104,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -176,10 +171,32 @@ struct intel_crtc {
bool use_pll_a;
};
+struct intel_plane {
+ struct drm_plane base;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool primary_disabled;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define DIP_HEADER_SIZE 5
@@ -289,6 +306,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -359,7 +377,7 @@ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -379,9 +397,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width,
+ int pixel_size);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ec49bae73382..571375a3eef4 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+ 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, size);
if (!obj) {
@@ -148,7 +149,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
info->pixmap.size = 64*1024;
@@ -269,8 +270,14 @@ void intel_fb_restore_mode(struct drm_device *dev)
{
int ret;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
+
+ /* Be sure to shut off any planes that may be active */
+ list_for_each_entry(plane, &config->plane_list, head)
+ plane->funcs->disable_plane(plane);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d4f5a0b2120d..64541f7ef900 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_hdmi->has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~SDVO_ENABLE;
+ temp &= ~enable_bits;
} else {
- temp |= SDVO_ENABLE;
+ temp |= enable_bits;
}
I915_WRITE(intel_hdmi->sdvox_reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520de..e44191132ac4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7d69a3..04d79fd1dc9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ca70e2f10445..77e729d4e4f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
+ if (INTEL_INFO(dev)->gen >= 6) {
+ I915_WRITE(INSTPM,
+ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+ }
+
return ret;
}
@@ -787,6 +792,17 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
+gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ /* The BLT ring on IVB appears to have broken synchronization
+ * between the seqno write and the interrupt, so that the
+ * interrupt appears first. Returning false here makes
+ * i915_wait_request() do a polling loop, instead.
+ */
+ return false;
+}
+
+static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
struct drm_device *dev = ring->dev;
@@ -1119,7 +1135,16 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
}
trace_i915_ring_wait_begin(ring);
- end = jiffies + 3 * HZ;
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
+ else
+ end = jiffies + 3 * HZ;
+
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
@@ -1552,5 +1577,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
*ring = gen6_blt_ring;
+ if (IS_GEN7(dev))
+ ring->irq_get = gen7_blt_ring_get_irq;
+
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aefd..f7b9268df266 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 4aa6f343e49a..6b7b22f4d63e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2006-2007 Intel Corporation
+ * Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 000000000000..d13989fda501
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ int pixel_size;
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ sprctl |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ /* must disable */
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ sprctl |= SPRITE_ENABLE;
+ sprctl |= SPRITE_DEST_KEY;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ dev_priv->sprite_scaling_enabled = true;
+ sandybridge_update_wm(dev);
+ intel_wait_for_vblank(dev, pipe);
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+ } else {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ sandybridge_update_wm(dev);
+ }
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(SPRLINOFF(pipe), offset);
+ }
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_WRITE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe, pixel_size;
+ u32 dvscntr, dvsscale = 0;
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_RGBX;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ /* must disable */
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ dvscntr |= DVS_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(DVSLINOFF(pipe), offset);
+ }
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+snb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_WRITE(DVSSURF(pipe), 0);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static int
+snb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj, *old_obj;
+ int pipe = intel_plane->pipe;
+ int ret = 0;
+ int x = src_x >> 16, y = src_y >> 16;
+ int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+ bool disable_primary = false;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ old_obj = intel_plane->obj;
+
+ /* Pipe must be running... */
+ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ return -EINVAL;
+
+ if (crtc_x >= primary_w || crtc_y >= primary_h)
+ return -EINVAL;
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe)
+ return -EINVAL;
+
+ /*
+ * Clamp the width & height into the visible area. Note we don't
+ * try to scale the source if part of the visible region is offscreen.
+ * The caller must handle that by adjusting source offset and size.
+ */
+ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+ crtc_w += crtc_x;
+ crtc_x = 0;
+ }
+ if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+ goto out;
+ if ((crtc_x + crtc_w) > primary_w)
+ crtc_w = primary_w - crtc_x;
+
+ if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+ crtc_h += crtc_y;
+ crtc_y = 0;
+ }
+ if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+ goto out;
+ if (crtc_y + crtc_h > primary_h)
+ crtc_h = primary_h - crtc_y;
+
+ if (!crtc_w || !crtc_h) /* Again, nothing to display */
+ goto out;
+
+ /*
+ * We can take a larger source and scale it down, but
+ * only so much... 16x is the max on SNB.
+ */
+ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+ return -EINVAL;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ if ((crtc_x == 0) && (crtc_y == 0) &&
+ (crtc_w == primary_w) && (crtc_h == primary_h))
+ disable_primary = true;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin object\n");
+ goto out_unlock;
+ }
+
+ intel_plane->obj = obj;
+
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary && intel_plane->primary_disabled) {
+ intel_enable_primary(crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+ crtc_w, crtc_h, x, y, src_w, src_h);
+
+ if (disable_primary) {
+ intel_disable_primary(crtc);
+ intel_plane->primary_disabled = true;
+ }
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj) {
+ mutex_unlock(&dev->struct_mutex);
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ mutex_lock(&dev->struct_mutex);
+ }
+ i915_gem_object_unpin(old_obj);
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int ret = 0;
+
+ if (intel_plane->primary_disabled) {
+ intel_enable_primary(plane->crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->disable_plane(plane);
+
+ if (!intel_plane->obj)
+ goto out;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_object_unpin(intel_plane->obj);
+ intel_plane->obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+out:
+
+ return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ DRM_ERROR("new plane code only for SNB+\n");
+ return -ENODEV;
+ }
+
+ intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+ if (!intel_plane)
+ return -ENOMEM;
+
+ if (IS_GEN6(dev)) {
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = snb_update_plane;
+ intel_plane->disable_plane = snb_disable_plane;
+ intel_plane->update_colorkey = snb_update_colorkey;
+ intel_plane->get_colorkey = snb_get_colorkey;
+ } else if (IS_GEN7(dev)) {
+ intel_plane->max_downscale = 2;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+ }
+
+ intel_plane->pipe = pipe;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs, snb_plane_formats,
+ ARRAY_SIZE(snb_plane_formats), false);
+ if (ret)
+ kfree(intel_plane);
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 33daa29eea66..f9a925d58819 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
+static const struct file_operations mga_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mga_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -64,20 +78,7 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mga_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &mga_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 35ef5b1e3566..9f27e3d9e69a 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,9 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o nouveau_ramht.o \
+ nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mm.o nouveau_vm.o \
+ nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -19,9 +19,12 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
- nv84_crypt.o \
+ nv84_crypt.o nv98_crypt.o \
nva3_copy.o nvc0_copy.o \
nv31_mpeg.o nv50_mpeg.o \
+ nv84_bsp.o \
+ nv84_vp.o \
+ nv98_ppp.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 525744d593c1..7814a760c164 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -18,12 +18,6 @@
#include <linux/vga_switcheroo.h>
-#define NOUVEAU_DSM_SUPPORTED 0x00
-#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
-
-#define NOUVEAU_DSM_ACTIVE 0x01
-#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
-
#define NOUVEAU_DSM_LED 0x02
#define NOUVEAU_DSM_LED_STATE 0x00
#define NOUVEAU_DSM_LED_OFF 0x10
@@ -35,6 +29,9 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
+#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
+#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
+
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
@@ -61,7 +58,8 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
- int err;
+ int i, err;
+ char args_buff[4];
input.count = 4;
input.pointer = params;
@@ -73,7 +71,11 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 0;
+ params[3].buffer.length = 4;
+ /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
+ for (i = 0; i < 4; i++)
+ args_buff[i] = (arg >> i * 8) & 0xFF;
+ params[3].buffer.pointer = args_buff;
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
@@ -148,6 +150,23 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
return 0;
}
+/* Returns 1 if a DSM function is usable and 0 otherwise */
+static int nouveau_test_dsm(acpi_handle test_handle,
+ int (*dsm_func)(acpi_handle, int, int, uint32_t *),
+ int sfnc)
+{
+ u32 result = 0;
+
+ /* Function 0 returns a Buffer containing available functions. The args
+ * parameter is ignored for function 0, so just put 0 in it */
+ if (dsm_func(test_handle, 0, 0, &result))
+ return 0;
+
+ /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
+ * the n-th bit is enabled, function n is supported */
+ return result & 1 && result & (1 << sfnc);
+}
+
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
@@ -168,6 +187,10 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
+ /* perhaps the _DSM functions are mutually exclusive, but prepare for
+ * the future */
+ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
@@ -180,6 +203,11 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
+ /* Optimus laptops have the card already disabled in
+ * nouveau_switcheroo_set_state */
+ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ return 0;
+
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
@@ -212,8 +240,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, nvidia_handle;
acpi_status status;
- int ret, retval = 0;
- uint32_t result;
+ int retval = 0;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
@@ -224,13 +251,11 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
- ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
- if (ret == 0)
+ if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- ret = nouveau_optimus_dsm(dhandle, 0, 0, &result);
- if (ret == 0)
+ if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
+ NOUVEAU_DSM_OPTIMUS_FN))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval)
@@ -269,15 +294,22 @@ static bool nouveau_dsm_detect(void)
}
if (vga_count == 2 && has_dsm && guid_valid) {
- acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+ &buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
- acpi_method_name);
+ acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
- if (has_optimus == 1)
+ if (has_optimus == 1) {
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+ &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+ acpi_method_name);
nouveau_dsm_priv.optimus_detected = true;
+ ret = true;
+ }
return ret;
}
@@ -293,6 +325,17 @@ void nouveau_register_dsm_handler(void)
vga_switcheroo_register_handler(&nouveau_dsm_handler);
}
+/* Must be called for Optimus models before the card can be turned off */
+void nouveau_switcheroo_optimus_dsm(void)
+{
+ u32 result = 0;
+ if (!nouveau_dsm_priv.optimus_detected)
+ return;
+
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
+ NOUVEAU_DSM_OPTIMUS_ARGS, &result);
+}
+
void nouveau_unregister_dsm_handler(void)
{
vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5fc201b49d30..e5cbead85e50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -27,6 +27,7 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
+#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
@@ -34,9 +35,6 @@
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
-#define LEGACY_I2C_CRT 0x80
-#define LEGACY_I2C_PANEL 0x81
-#define LEGACY_I2C_TV 0x82
#define EDID1_LEN 128
@@ -723,115 +721,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max %d)\n", i2ctable[2],
- DCB_MAX_NUM_I2C_ENTRIES);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index >= i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40) {
- if (port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
- }
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
-
if (i2c_index == 0xff) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
- int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
- int default_indices = dcb->i2c_default_indices;
+ int idx = dcb_entry_idx_from_crtchead(dev);
+ i2c_index = NV_I2C_DEFAULT(0);
if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
- shift = 4;
-
- i2c_index = (default_indices >> shift) & 0xf;
+ i2c_index = NV_I2C_DEFAULT(1);
}
- if (i2c_index == 0x80) /* g80+ */
- i2c_index = dcb->i2c_default_indices & 0xf;
- else
- if (i2c_index == 0x81)
- i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
-
- if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
- NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
- return NULL;
- }
-
- /* Make sure i2c table entry has been parsed, it may not
- * have been if this is a bus not referenced by a DCB encoder
- */
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -1199,13 +1101,9 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
switch (cond) {
case 0:
- {
- struct dcb_connector_table_entry *ent =
- &bios->dcb.connector.entry[dcb->connector];
-
- if (ent->type != DCB_CONNECTOR_eDP)
+ entry = dcb_conn(dev, dcb->connector);
+ if (!entry || entry[0] != DCB_CONNECTOR_eDP)
iexec->execute = false;
- }
break;
case 1:
case 2:
@@ -3227,49 +3125,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 1;
}
-static void
-init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
- const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
- u32 r, s, v;
-
- /* Not a clue, needs de-magicing */
- r = nv50_gpio_ctl[gpio->line >> 4];
- s = (gpio->line & 0x0f);
- v = bios_rd32(bios, r) & ~(0x00010001 << s);
- switch ((gpio->entry & 0x06000000) >> 25) {
- case 1:
- v |= (0x00000001 << s);
- break;
- case 2:
- v |= (0x00010000 << s);
- break;
- default:
- break;
- }
-
- bios_wr32(bios, r, v);
-}
-
-static void
-init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
- u32 v, i;
-
- v = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
- v &= 0xffffff00;
- v |= (gpio->entry & 0x00ff0000) >> 16;
- bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
-
- i = (gpio->entry & 0x1f000000) >> 24;
- if (i) {
- v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
- v &= 0xffffff00;
- v |= gpio->line;
- bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
- }
-}
-
static int
init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@@ -3282,35 +3137,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* each GPIO according to various values listed in each entry
*/
- struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int i;
-
- if (dev_priv->card_type < NV_50) {
- NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
- return 1;
- }
-
- if (!iexec->execute)
- return 1;
-
- for (i = 0; i < bios->dcb.gpio.entries; i++) {
- struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
-
- BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
-
- BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
- offset, gpio->tag, gpio->state_default);
-
- if (!bios->execute)
- continue;
-
- pgpio->set(bios->dev, gpio->tag, gpio->state_default);
- if (dev_priv->card_type < NV_D0)
- init_gpio_unknv50(bios, gpio);
- else
- init_gpio_unknvd0(bios, gpio);
- }
+ if (iexec->execute && bios->execute)
+ nouveau_gpio_reset(bios->dev);
return 1;
}
@@ -4407,18 +4235,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
break;
}
- /* Dell Latitude D620 reports a too-high value for the dual-link
- * transition freq, causing us to program the panel incorrectly.
- *
- * It doesn't appear the VBIOS actually uses its transition freq
- * (90000kHz), instead it uses the "Number of LVDS channels" field
- * out of the panel ID structure (http://www.spwg.org/).
- *
- * For the moment, a quirk will do :)
- */
- if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
- bios->fp.duallink_transition_clk = 80000;
-
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4541,7 +4357,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
break;
}
@@ -4719,7 +4535,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_MEMORY, 0x004008 },
- { PLL_UNK05 , 0x004030 },
+ { PLL_VDEC , 0x004030 },
{ PLL_UNK41 , 0x00e818 },
{ PLL_VPLL0 , 0x614100 },
{ PLL_VPLL1 , 0x614900 },
@@ -5485,6 +5301,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
struct nvbios *bios = &dev_priv->vbios;
u8 entries, *entry;
+ if (bios->type != NVBIOS_BIT)
+ return -ENODEV;
+
entries = bios->data[bios->offset + 10];
entry = &bios->data[bios->offset + 12];
while (entries--) {
@@ -5493,7 +5312,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
bit->version = entry[1];
bit->length = ROM16(entry[2]);
bit->offset = ROM16(entry[4]);
- bit->data = ROMPTR(bios, entry[4]);
+ bit->data = ROMPTR(dev, entry[4]);
return 0;
}
@@ -5598,10 +5417,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
- bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
- bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
- bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
- bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
@@ -5709,14 +5524,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- if (bios->data[legacy_i2c_offset + 4])
- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- if (bios->data[legacy_i2c_offset + 5])
- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- if (bios->data[legacy_i2c_offset + 6])
- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- if (bios->data[legacy_i2c_offset + 7])
- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -5767,286 +5574,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static struct dcb_gpio_entry *
-new_gpio_entry(struct nvbios *bios)
-{
- struct drm_device *dev = bios->dev;
- struct dcb_gpio_table *gpio = &bios->dcb.gpio;
-
- if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
- NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
- return NULL;
- }
-
- return &gpio->entry[gpio->entries++];
-}
-
-struct dcb_gpio_entry *
-nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+void *
+dcb_table(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- int i;
-
- for (i = 0; i < bios->dcb.gpio.entries; i++) {
- if (bios->dcb.gpio.entry[i].tag != tag)
- continue;
+ u8 *dcb = NULL;
- return &bios->dcb.gpio.entry[i];
+ if (dev_priv->card_type > NV_04)
+ dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
+ if (!dcb) {
+ NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+ return NULL;
}
- return NULL;
-}
-
-static void
-parse_dcb_gpio_table(struct nvbios *bios)
-{
- struct drm_device *dev = bios->dev;
- struct dcb_gpio_entry *e;
- u8 headerlen, entries, recordlen;
- u8 *dcb, *gpio = NULL, *entry;
- int i;
-
- dcb = ROMPTR(bios, bios->data[0x36]);
+ if (dcb[0] >= 0x41) {
+ NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
+ return NULL;
+ } else
if (dcb[0] >= 0x30) {
- gpio = ROMPTR(bios, dcb[10]);
- if (!gpio)
- goto no_table;
-
- headerlen = gpio[1];
- entries = gpio[2];
- recordlen = gpio[3];
+ if (ROM32(dcb[6]) == 0x4edcbdcb)
+ return dcb;
} else
- if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
- gpio = ROMPTR(bios, dcb[-15]);
- if (!gpio)
- goto no_table;
-
- headerlen = 3;
- entries = gpio[2];
- recordlen = gpio[1];
+ if (dcb[0] >= 0x20) {
+ if (ROM32(dcb[4]) == 0x4edcbdcb)
+ return dcb;
} else
- if (dcb[0] >= 0x22) {
- /* No GPIO table present, parse the TVDAC GPIO data. */
- uint8_t *tvdac_gpio = &dcb[-5];
-
- if (tvdac_gpio[0] & 1) {
- e = new_gpio_entry(bios);
- e->tag = DCB_GPIO_TVDAC0;
- e->line = tvdac_gpio[1] >> 4;
- e->invert = tvdac_gpio[0] & 2;
- }
-
- goto no_table;
+ if (dcb[0] >= 0x15) {
+ if (!memcmp(&dcb[-7], "DEV_REC", 7))
+ return dcb;
} else {
- NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
- goto no_table;
- }
-
- entry = gpio + headerlen;
- for (i = 0; i < entries; i++, entry += recordlen) {
- e = new_gpio_entry(bios);
- if (!e)
- break;
-
- if (gpio[0] < 0x40) {
- e->entry = ROM16(entry[0]);
- e->tag = (e->entry & 0x07e0) >> 5;
- if (e->tag == 0x3f) {
- bios->dcb.gpio.entries--;
- continue;
- }
-
- e->line = (e->entry & 0x001f);
- e->invert = ((e->entry & 0xf800) >> 11) != 4;
- } else {
- e->entry = ROM32(entry[0]);
- e->tag = (e->entry & 0x0000ff00) >> 8;
- if (e->tag == 0xff) {
- bios->dcb.gpio.entries--;
- continue;
- }
-
- e->line = (e->entry & 0x0000001f) >> 0;
- if (gpio[0] == 0x40) {
- e->state_default = (e->entry & 0x01000000) >> 24;
- e->state[0] = (e->entry & 0x18000000) >> 27;
- e->state[1] = (e->entry & 0x60000000) >> 29;
- } else {
- e->state_default = (e->entry & 0x00000080) >> 7;
- e->state[0] = (entry[4] >> 4) & 3;
- e->state[1] = (entry[4] >> 6) & 3;
- }
- }
- }
-
-no_table:
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- e = new_gpio_entry(bios);
- if (e) {
- e->tag = DCB_GPIO_TVDAC0;
- e->line = 4;
- }
- }
-}
-
-struct dcb_connector_table_entry *
-nouveau_bios_connector_entry(struct drm_device *dev, int index)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct dcb_connector_table_entry *cte;
-
- if (index >= bios->dcb.connector.entries)
- return NULL;
-
- cte = &bios->dcb.connector.entry[index];
- if (cte->type == 0xff)
+ /*
+ * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+ * always has the same single (crt) entry, even when tv-out
+ * present, so the conclusion is this version cannot really
+ * be used.
+ *
+ * v1.2 tables (some NV6/10, and NV15+) normally have the
+ * same 5 entries, which are not specific to the card and so
+ * no use.
+ *
+ * v1.2 does have an I2C table that read_dcb_i2c_table can
+ * handle, but cards exist (nv11 in #14821) with a bad i2c
+ * table pointer, so use the indices parsed in
+ * parse_bmp_structure.
+ *
+ * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ */
+ NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
return NULL;
+ }
- return cte;
+ NV_WARNONCE(dev, "DCB header validation failed\n");
+ return NULL;
}
-static enum dcb_connector_type
-divine_connector_type(struct nvbios *bios, int index)
+void *
+dcb_outp(struct drm_device *dev, u8 idx)
{
- struct dcb_table *dcb = &bios->dcb;
- unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
- int i;
-
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].connector == index)
- encoders |= (1 << dcb->entry[i].type);
- }
-
- if (encoders & (1 << OUTPUT_DP)) {
- if (encoders & (1 << OUTPUT_TMDS))
- type = DCB_CONNECTOR_DP;
- else
- type = DCB_CONNECTOR_eDP;
- } else
- if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- type = DCB_CONNECTOR_DVI_I;
- else
- type = DCB_CONNECTOR_DVI_D;
- } else
- if (encoders & (1 << OUTPUT_ANALOG)) {
- type = DCB_CONNECTOR_VGA;
+ u8 *dcb = dcb_table(dev);
+ if (dcb && dcb[0] >= 0x30) {
+ if (idx < dcb[2])
+ return dcb + dcb[1] + (idx * dcb[3]);
} else
- if (encoders & (1 << OUTPUT_LVDS)) {
- type = DCB_CONNECTOR_LVDS;
+ if (dcb && dcb[0] >= 0x20) {
+ u8 *i2c = ROMPTR(dev, dcb[2]);
+ u8 *ent = dcb + 8 + (idx * 8);
+ if (i2c && ent < i2c)
+ return ent;
} else
- if (encoders & (1 << OUTPUT_TV)) {
- type = DCB_CONNECTOR_TV_0;
+ if (dcb && dcb[0] >= 0x15) {
+ u8 *i2c = ROMPTR(dev, dcb[2]);
+ u8 *ent = dcb + 4 + (idx * 10);
+ if (i2c && ent < i2c)
+ return ent;
}
- return type;
+ return NULL;
}
-static void
-apply_dcb_connector_quirks(struct nvbios *bios, int idx)
-{
- struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
- struct drm_device *dev = bios->dev;
+int
+dcb_outp_foreach(struct drm_device *dev, void *data,
+ int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
+{
+ int ret, idx = -1;
+ u8 *outp = NULL;
+ while ((outp = dcb_outp(dev, ++idx))) {
+ if (ROM32(outp[0]) == 0x00000000)
+ break; /* seen on an NV11 with DCB v1.5 */
+ if (ROM32(outp[0]) == 0xffffffff)
+ break; /* seen on an NV17 with DCB v2.0 */
+
+ if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
+ continue;
+ if ((outp[0] & 0x0f) == OUTPUT_EOL)
+ break;
- /* Gigabyte NX85T */
- if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
- if (cte->type == DCB_CONNECTOR_HDMI_1)
- cte->type = DCB_CONNECTOR_DVI_I;
+ ret = exec(dev, data, idx, outp);
+ if (ret)
+ return ret;
}
- /* Gigabyte GV-NX86T512H */
- if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
- if (cte->type == DCB_CONNECTOR_HDMI_1)
- cte->type = DCB_CONNECTOR_DVI_I;
- }
+ return 0;
}
-static const u8 hpd_gpio[16] = {
- 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
-};
-
-static void
-parse_dcb_connector_table(struct nvbios *bios)
+u8 *
+dcb_conntab(struct drm_device *dev)
{
- struct drm_device *dev = bios->dev;
- struct dcb_connector_table *ct = &bios->dcb.connector;
- struct dcb_connector_table_entry *cte;
- uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
- uint8_t *entry;
- int i;
-
- if (!bios->dcb.connector_table_ptr) {
- NV_DEBUG_KMS(dev, "No DCB connector table present\n");
- return;
- }
-
- NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
- conntab[0], conntab[1], conntab[2], conntab[3]);
- if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
- (conntab[3] != 2 && conntab[3] != 4)) {
- NV_ERROR(dev, " Unknown! Please report.\n");
- return;
+ u8 *dcb = dcb_table(dev);
+ if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
+ u8 *conntab = ROMPTR(dev, dcb[0x14]);
+ if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
+ return conntab;
}
+ return NULL;
+}
- ct->entries = conntab[2];
-
- entry = conntab + conntab[1];
- cte = &ct->entry[0];
- for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
- cte->index = i;
- if (conntab[3] == 2)
- cte->entry = ROM16(entry[0]);
- else
- cte->entry = ROM32(entry[0]);
-
- cte->type = (cte->entry & 0x000000ff) >> 0;
- cte->index2 = (cte->entry & 0x00000f00) >> 8;
-
- cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
- cte->gpio_tag = hpd_gpio[cte->gpio_tag];
-
- if (cte->type == 0xff)
- continue;
-
- apply_dcb_connector_quirks(bios, i);
-
- NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
- i, cte->entry, cte->type, cte->index, cte->gpio_tag);
-
- /* check for known types, fallback to guessing the type
- * from attached encoders if we hit an unknown.
- */
- switch (cte->type) {
- case DCB_CONNECTOR_VGA:
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- case DCB_CONNECTOR_DVI_I:
- case DCB_CONNECTOR_DVI_D:
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_DP:
- case DCB_CONNECTOR_eDP:
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- break;
- default:
- cte->type = divine_connector_type(bios, cte->index);
- NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
- break;
- }
-
- if (nouveau_override_conntype) {
- int type = divine_connector_type(bios, cte->index);
- if (type != cte->type)
- NV_WARN(dev, " -> type 0x%02x\n", cte->type);
- }
-
- }
+u8 *
+dcb_conn(struct drm_device *dev, u8 idx)
+{
+ u8 *conntab = dcb_conntab(dev);
+ if (conntab && idx < conntab[2])
+ return conntab + conntab[1] + (idx * conntab[3]);
+ return NULL;
}
static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -6079,8 +5728,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- if (dcb->version >= 0x40)
- entry->connector = (conn >> 12) & 0xf;
+ entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
@@ -6252,25 +5900,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
return true;
}
-static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
- uint32_t conn, uint32_t conf)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
- bool ret;
-
- if (dcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
- else
- ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
- if (!ret)
- return ret;
-
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- entry->i2c_index, &dcb->i2c[entry->i2c_index]);
-
- return true;
-}
-
static
void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
@@ -6431,154 +6060,118 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#endif
/* Make up some sane defaults */
- fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG,
+ bios->legacy.i2c_indices.crt, 1, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+ fabricate_dcb_output(dcb, OUTPUT_TV,
+ bios->legacy.i2c_indices.tv,
all_heads, 0);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
- fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+ fabricate_dcb_output(dcb, OUTPUT_TMDS,
+ bios->legacy.i2c_indices.panel,
all_heads, 1);
}
static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &bios->dcb;
- uint16_t dcbptr = 0, i2ctabptr = 0;
- uint8_t *dcbtable;
- uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
- bool configblock = true;
- int recordlength = 8, confofs = 4;
- int i;
-
- /* get the offset from 0x36 */
- if (dev_priv->card_type > NV_04) {
- dcbptr = ROM16(bios->data[0x36]);
- if (dcbptr == 0x0000)
- NV_WARN(dev, "No output data (DCB) found in BIOS\n");
- }
-
- /* this situation likely means a really old card, pre DCB */
- if (dcbptr == 0x0) {
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
- }
-
- dcbtable = &bios->data[dcbptr];
-
- /* get DCB version */
- dcb->version = dcbtable[0];
- NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
- dcb->version >> 4, dcb->version & 0xf);
-
- if (dcb->version >= 0x20) { /* NV17+ */
- uint32_t sig;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
+ u32 conn = ROM32(outp[0]);
+ bool ret;
- if (dcb->version >= 0x30) { /* NV40+ */
- headerlen = dcbtable[1];
- entries = dcbtable[2];
- recordlength = dcbtable[3];
- i2ctabptr = ROM16(dcbtable[4]);
- sig = ROM32(dcbtable[6]);
- dcb->gpio_table_ptr = ROM16(dcbtable[10]);
- dcb->connector_table_ptr = ROM16(dcbtable[20]);
- } else {
- i2ctabptr = ROM16(dcbtable[2]);
- sig = ROM32(dcbtable[4]);
- headerlen = 8;
- }
+ if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
+ struct dcb_entry *entry = new_dcb_entry(dcb);
- if (sig != 0x4edcbdcb) {
- NV_ERROR(dev, "Bad Display Configuration Block "
- "signature (%08X)\n", sig);
- return -EINVAL;
- }
- } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
- char sig[8] = { 0 };
+ NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
- strncpy(sig, (char *)&dcbtable[-7], 7);
- i2ctabptr = ROM16(dcbtable[2]);
- recordlength = 10;
- confofs = 6;
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+ else
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+ if (!ret)
+ return 1; /* stop parsing */
- if (strcmp(sig, "DEV_REC")) {
- NV_ERROR(dev, "Bad Display Configuration Block "
- "signature (%s)\n", sig);
- return -EINVAL;
- }
- } else {
- /*
- * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
- * has the same single (crt) entry, even when tv-out present, so
- * the conclusion is this version cannot really be used.
- * v1.2 tables (some NV6/10, and NV15+) normally have the same
- * 5 entries, which are not specific to the card and so no use.
- * v1.2 does have an I2C table that read_dcb_i2c_table can
- * handle, but cards exist (nv11 in #14821) with a bad i2c table
- * pointer, so use the indices parsed in parse_bmp_structure.
- * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ /* Ignore the I2C index for on-chip TV-out, as there
+ * are cards with bogus values (nv31m in bug 23212),
+ * and it's otherwise useless.
*/
- NV_TRACEWARN(dev, "No useful information in BIOS output table; "
- "adding all possible outputs\n");
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
+ if (entry->type == OUTPUT_TV &&
+ entry->location == DCB_LOC_ON_CHIP)
+ entry->i2c_index = 0x0f;
}
- if (!i2ctabptr)
- NV_WARN(dev, "No pointer to DCB I2C port table\n");
- else {
- dcb->i2c_table = &bios->data[i2ctabptr];
- if (dcb->version >= 0x30)
- dcb->i2c_default_indices = dcb->i2c_table[4];
+ return 0;
+}
- /*
- * Parse the "management" I2C bus, used for hardware
- * monitoring and some external TMDS transmitters.
- */
- if (dcb->version >= 0x22) {
- int idx = (dcb->version >= 0x40 ?
- dcb->i2c_default_indices & 0xf :
- 2);
+static void
+dcb_fake_connectors(struct nvbios *bios)
+{
+ struct dcb_table *dcbt = &bios->dcb;
+ u8 map[16] = { };
+ int i, idx = 0;
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- idx, &dcb->i2c[idx]);
- }
+ /* heuristic: if we ever get a non-zero connector field, assume
+ * that all the indices are valid and we don't need fake them.
+ */
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector)
+ return;
}
- if (entries > DCB_MAX_NUM_ENTRIES)
- entries = DCB_MAX_NUM_ENTRIES;
-
- for (i = 0; i < entries; i++) {
- uint32_t connection, config = 0;
-
- connection = ROM32(dcbtable[headerlen + recordlength * i]);
- if (configblock)
- config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
-
- /* seen on an NV11 with DCB v1.5 */
- if (connection == 0x00000000)
- break;
+ /* no useful connector info available, we need to make it up
+ * ourselves. the rule here is: anything on the same i2c bus
+ * is considered to be on the same connector. any output
+ * without an associated i2c bus is assigned its own unique
+ * connector index.
+ */
+ for (i = 0; i < dcbt->entries; i++) {
+ u8 i2c = dcbt->entry[i].i2c_index;
+ if (i2c == 0x0f) {
+ dcbt->entry[i].connector = idx++;
+ } else {
+ if (!map[i2c])
+ map[i2c] = ++idx;
+ dcbt->entry[i].connector = map[i2c] - 1;
+ }
+ }
- /* seen on an NV17 with DCB v2.0 */
- if (connection == 0xffffffff)
- break;
+ /* if we created more than one connector, destroy the connector
+ * table - just in case it has random, rather than stub, entries.
+ */
+ if (i > 1) {
+ u8 *conntab = dcb_conntab(bios->dev);
+ if (conntab)
+ conntab[0] = 0x00;
+ }
+}
- if ((connection & 0x0000000f) == 0x0000000f)
- continue;
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ u8 *dcbt, *conn;
+ int idx;
+
+ dcbt = dcb_table(dev);
+ if (!dcbt) {
+ /* handle pre-DCB boards */
+ if (bios->type == NVBIOS_BMP) {
+ fabricate_dcb_encoder_table(dev, bios);
+ return 0;
+ }
- if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
- continue;
+ return -EINVAL;
+ }
- NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
- dcb->entries, connection, config);
+ NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
- if (!parse_dcb_entry(dev, dcb, connection, config))
- break;
- }
+ dcb->version = dcbt[0];
+ dcb_outp_foreach(dev, NULL, parse_dcb_entry);
/*
* apart for v2.1+ not being known for requiring merging, this
@@ -6590,77 +6183,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (!dcb->entries)
return -ENXIO;
- parse_dcb_gpio_table(bios);
- parse_dcb_connector_table(bios);
- return 0;
-}
-
-static void
-fixup_legacy_connector(struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
-
- /*
- * DCB 3.0 also has the table in most cases, but there are some cards
- * where the table is filled with stub entries, and the DCB entriy
- * indices are all 0. We don't need the connector indices on pre-G80
- * chips (yet?) so limit the use to DCB 4.0 and above.
- */
- if (dcb->version >= 0x40)
- return;
-
- dcb->connector.entries = 0;
-
- /*
- * No known connector info before v3.0, so make it up. the rule here
- * is: anything on the same i2c bus is considered to be on the same
- * connector. any output without an associated i2c bus is assigned
- * its own unique connector index.
- */
- for (i = 0; i < dcb->entries; i++) {
- /*
- * Ignore the I2C index for on-chip TV-out, as there
- * are cards with bogus values (nv31m in bug 23212),
- * and it's otherwise useless.
- */
- if (dcb->entry[i].type == OUTPUT_TV &&
- dcb->entry[i].location == DCB_LOC_ON_CHIP)
- dcb->entry[i].i2c_index = 0xf;
- i2c = dcb->entry[i].i2c_index;
-
- if (i2c_conn[i2c]) {
- dcb->entry[i].connector = i2c_conn[i2c] - 1;
- continue;
+ /* dump connector table entries to log, if any exist */
+ idx = -1;
+ while ((conn = dcb_conn(dev, ++idx))) {
+ if (conn[0] != 0xff) {
+ NV_TRACE(dev, "DCB conn %02d: ", idx);
+ if (dcb_conntab(dev)[3] < 4)
+ printk("%04x\n", ROM16(conn[0]));
+ else
+ printk("%08x\n", ROM32(conn[0]));
}
-
- dcb->entry[i].connector = dcb->connector.entries++;
- if (i2c != 0xf)
- i2c_conn[i2c] = dcb->connector.entries;
- }
-
- /* Fake the connector table as well as just connector indices */
- for (i = 0; i < dcb->connector.entries; i++) {
- dcb->connector.entry[i].index = i;
- dcb->connector.entry[i].type = divine_connector_type(bios, i);
- dcb->connector.entry[i].gpio_tag = 0xff;
- }
-}
-
-static void
-fixup_legacy_i2c(struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- int i;
-
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
- if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
- if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
}
+ dcb_fake_connectors(bios);
+ return 0;
}
static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6879,19 +6414,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
return ret;
}
-static void
-nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct dcb_i2c_entry *entry;
- int i;
-
- entry = &bios->dcb.i2c[0];
- for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
- nouveau_i2c_fini(dev, entry);
-}
-
static bool
nouveau_bios_posted(struct drm_device *dev)
{
@@ -6928,12 +6450,17 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = parse_dcb_table(dev, bios);
+ ret = nouveau_i2c_init(dev);
if (ret)
return ret;
- fixup_legacy_i2c(bios);
- fixup_legacy_connector(bios);
+ ret = nouveau_mxm_init(dev);
+ if (ret)
+ return ret;
+
+ ret = parse_dcb_table(dev, bios);
+ if (ret)
+ return ret;
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
@@ -6971,5 +6498,6 @@ nouveau_bios_init(struct drm_device *dev)
void
nouveau_bios_takedown(struct drm_device *dev)
{
- nouveau_bios_i2c_devices_takedown(dev);
+ nouveau_mxm_fini(dev);
+ nouveau_i2c_fini(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 8adb69e4a6b1..1e382ad5a2b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,14 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
+#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROMPTR(d,x) ({ \
+ struct drm_nouveau_private *dev_priv = (d)->dev_private; \
+ ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
+})
struct bit_entry {
uint8_t id;
@@ -48,30 +53,12 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
-struct dcb_i2c_entry {
- uint32_t entry;
- uint8_t port_type;
- uint8_t read, write;
- struct nouveau_i2c_chan *chan;
-};
-
enum dcb_gpio_tag {
DCB_GPIO_TVDAC0 = 0xc,
DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
- enum dcb_gpio_tag tag;
- int line;
- bool invert;
- uint32_t entry;
- uint8_t state_default;
- uint8_t state[2];
-};
-
-struct dcb_gpio_table {
- int entries;
- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+ DCB_GPIO_PWM_FAN = 0x9,
+ DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_UNUSED = 0xff
};
enum dcb_connector_type {
@@ -90,20 +77,6 @@ enum dcb_connector_type {
DCB_CONNECTOR_NONE = 0xff
};
-struct dcb_connector_table_entry {
- uint8_t index;
- uint32_t entry;
- enum dcb_connector_type type;
- uint8_t index2;
- uint8_t gpio_tag;
- void *drm;
-};
-
-struct dcb_connector_table {
- int entries;
- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
enum dcb_type {
OUTPUT_ANALOG = 0,
OUTPUT_TV = 1,
@@ -111,6 +84,7 @@ enum dcb_type {
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
+ OUTPUT_UNUSED = 15,
OUTPUT_ANY = -1
};
@@ -155,18 +129,8 @@ struct dcb_entry {
struct dcb_table {
uint8_t version;
-
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
-
- uint8_t *i2c_table;
- uint8_t i2c_default_indices;
- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-
- uint16_t gpio_table_ptr;
- struct dcb_gpio_table gpio;
- uint16_t connector_table_ptr;
- struct dcb_connector_table connector;
};
enum nouveau_or {
@@ -195,7 +159,7 @@ enum pll_types {
PLL_SHADER = 0x02,
PLL_UNK03 = 0x03,
PLL_MEMORY = 0x04,
- PLL_UNK05 = 0x05,
+ PLL_VDEC = 0x05,
PLL_UNK40 = 0x40,
PLL_UNK41 = 0x41,
PLL_UNK42 = 0x42,
@@ -333,4 +297,11 @@ struct nvbios {
} legacy;
};
+void *dcb_table(struct drm_device *);
+void *dcb_outp(struct drm_device *, u8 idx);
+int dcb_outp_foreach(struct drm_device *, void *data,
+ int (*)(struct drm_device *, void *, int idx, u8 *outp));
+u8 *dcb_conntab(struct drm_device *);
+u8 *dcb_conn(struct drm_device *, u8 idx);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7cc37e690860..724b41a2b9e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include "drmP.h"
+#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ size_t acc_size;
int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+ sizeof(struct nouveau_bo));
+
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, size,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
*mem = val;
}
-static struct ttm_backend *
-nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+static struct ttm_tt *
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
- return ttm_agp_backend_init(bdev, dev->agp->bridge);
+ return ttm_agp_tt_create(bdev, dev->agp->bridge,
+ size, page_flags, dummy_read_page);
#endif
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
- return nouveau_sgdma_init_ttm(dev);
+ return nouveau_sgdma_create_ttm(bdev, size, page_flags,
+ dummy_read_page);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type);
@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
if (mem->mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, node);
else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
- node, node->pages);
+ nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
return 0;
}
@@ -801,19 +809,18 @@ out:
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
nouveau_vm_map(vma, new_mem->mm_node);
} else
- if (new_mem->mem_type == TTM_PL_TT &&
+ if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) {
nouveau_vm_map_sg(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
- node, node->pages);
+ new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -1044,8 +1051,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence);
}
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate((void *)ttm, dev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ttm_dma->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate((void *)ttm, dev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm_dma->dma_address[i]) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
- .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .ttm_tt_create = &nouveau_ttm_tt_create,
+ .ttm_tt_populate = &nouveau_ttm_tt_populate,
+ .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
@@ -1091,7 +1184,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
else
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+ nouveau_vm_map_sg(vma, 0, size, node);
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index bb6ec9ef8676..a018defb7621 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -187,6 +187,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_dma_pre_init(chan);
chan->user_put = 0x40;
chan->user_get = 0x44;
+ if (dev_priv->card_type >= NV_50)
+ chan->user_get_hi = 0x60;
/* disable the fifo caches */
pfifo->reassign(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index cea6696b1906..f3ce34be082a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -35,6 +35,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
#include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
return NULL;
}
-/*TODO: This could use improvement, and learn to handle the fixed
- * BIOS tables etc. It's fine currently, for its only user.
- */
-int
-nouveau_connector_bpp(struct drm_connector *connector)
-{
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
-
- if (nv_connector->edid && nv_connector->edid->revision >= 4) {
- u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
- if (bpc > 4)
- return bpc;
- }
-
- return 18;
-}
-
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv;
- struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
- pgpio = &dev_priv->engine.gpio;
- if (pgpio->irq_unregister) {
- pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
- nouveau_connector_hotplug, connector);
+ if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+ nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
+ nouveau_connector_hotplug, connector);
}
kfree(nv_connector->edid);
@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
+ if (dev_priv->card_type >= NV_50) {
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ } else
if (nv_encoder->dcb->type == OUTPUT_LVDS ||
nv_encoder->dcb->type == OUTPUT_TMDS) {
connector->doublescan_allowed = false;
@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
struct nouveau_encoder *nv_encoder;
int type;
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -420,15 +406,21 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
+ struct nouveau_crtc *nv_crtc;
int ret;
+ nv_crtc = NULL;
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
/* Scaling mode */
if (property == dev->mode_config.scaling_mode_property) {
- struct nouveau_crtc *nv_crtc = NULL;
bool modeset = false;
switch (value) {
@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
modeset = true;
nv_connector->scaling_mode = value;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
if (!nv_crtc)
return 0;
@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
if (!ret)
return -EINVAL;
} else {
- ret = nv_crtc->set_scale(nv_crtc, value, true);
+ ret = nv_crtc->set_scale(nv_crtc, true);
if (ret)
return ret;
}
@@ -475,23 +465,58 @@ nouveau_connector_set_property(struct drm_connector *connector,
return 0;
}
- /* Dithering */
- if (property == dev->mode_config.dithering_mode_property) {
- struct nouveau_crtc *nv_crtc = NULL;
+ /* Underscan */
+ if (property == disp->underscan_property) {
+ if (nv_connector->underscan != value) {
+ nv_connector->underscan = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
- if (value == DRM_MODE_DITHERING_ON)
- nv_connector->use_dithering = true;
- else
- nv_connector->use_dithering = false;
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ if (property == disp->underscan_hborder_property) {
+ if (nv_connector->underscan_hborder != value) {
+ nv_connector->underscan_hborder = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
+
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ if (property == disp->underscan_vborder_property) {
+ if (nv_connector->underscan_vborder != value) {
+ nv_connector->underscan_vborder = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
+
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ /* Dithering */
+ if (property == disp->dithering_mode) {
+ nv_connector->dithering_mode = value;
+ if (!nv_crtc || !nv_crtc->set_dither)
+ return 0;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
+ return nv_crtc->set_dither(nv_crtc, true);
+ }
+ if (property == disp->dithering_depth) {
+ nv_connector->dithering_depth = value;
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
- return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
- true);
+ return nv_crtc->set_dither(nv_crtc, true);
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -602,6 +627,46 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
return modes;
}
+static void
+nouveau_connector_detect_depth(struct drm_connector *connector)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct drm_display_mode *mode = nv_connector->native_mode;
+ bool duallink;
+
+ /* if the edid is feeling nice enough to provide this info, use it */
+ if (nv_connector->edid && connector->display_info.bpc)
+ return;
+
+ /* if not, we're out of options unless we're LVDS, default to 6bpc */
+ connector->display_info.bpc = 6;
+ if (nv_encoder->dcb->type != OUTPUT_LVDS)
+ return;
+
+ /* LVDS: panel straps */
+ if (bios->fp_no_ddc) {
+ if (bios->fp.if_is_24bit)
+ connector->display_info.bpc = 8;
+ return;
+ }
+
+ /* LVDS: DDC panel, need to first determine the number of links to
+ * know which if_is_24bit flag to check...
+ */
+ if (nv_connector->edid &&
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+ duallink = ((u8 *)nv_connector->edid)[121] == 2;
+ else
+ duallink = mode->clock >= bios->fp.duallink_transition_clk;
+
+ if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
+ ( duallink && (bios->fp.strapless_is_24bit & 2)))
+ connector->display_info.bpc = 8;
+}
+
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
@@ -631,6 +696,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
+ /* Determine display colour depth for everything except LVDS now,
+ * DP requires this before mode_valid() is called.
+ */
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
@@ -646,12 +717,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = 1;
}
+ /* Determine LVDS colour depth, must happen after determining
+ * "native" mode as some VBIOS tables require us to use the
+ * pixel clock as part of the lookup...
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
if (nv_encoder->dcb->type == OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
- nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
- nv_connector->dcb->type == DCB_CONNECTOR_eDP)
+ if (nv_connector->type == DCB_CONNECTOR_LVDS ||
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
+ nv_connector->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -710,7 +788,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case OUTPUT_DP:
max_clock = nv_encoder->dp.link_nr;
max_clock *= nv_encoder->dp.link_bw;
- clock = clock * nouveau_connector_bpp(connector) / 10;
+ clock = clock * (connector->display_info.bpc * 3) / 10;
break;
default:
BUG_ON(1);
@@ -768,96 +846,175 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
+static int
+drm_conntype_from_dcb(enum dcb_connector_type dcb)
+{
+ switch (dcb) {
+ case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
+ case DCB_CONNECTOR_TV_0 :
+ case DCB_CONNECTOR_TV_1 :
+ case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
+ case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
+ case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
+ case DCB_CONNECTOR_LVDS :
+ case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+ case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
+ case DCB_CONNECTOR_HDMI_0 :
+ case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
+ default:
+ break;
+ }
+
+ return DRM_MODE_CONNECTOR_Unknown;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = NULL;
- struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
int type, ret = 0;
+ bool dummy;
NV_DEBUG_KMS(dev, "\n");
- if (index >= dev_priv->vbios.dcb.connector.entries)
- return ERR_PTR(-EINVAL);
-
- dcb = &dev_priv->vbios.dcb.connector.entry[index];
- if (dcb->drm)
- return dcb->drm;
-
- switch (dcb->type) {
- case DCB_CONNECTOR_VGA:
- type = DRM_MODE_CONNECTOR_VGA;
- break;
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- type = DRM_MODE_CONNECTOR_TV;
- break;
- case DCB_CONNECTOR_DVI_I:
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case DCB_CONNECTOR_DVI_D:
- type = DRM_MODE_CONNECTOR_DVID;
- break;
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- type = DRM_MODE_CONNECTOR_HDMIA;
- break;
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- type = DRM_MODE_CONNECTOR_LVDS;
- funcs = &nouveau_connector_funcs_lvds;
- break;
- case DCB_CONNECTOR_DP:
- type = DRM_MODE_CONNECTOR_DisplayPort;
- break;
- case DCB_CONNECTOR_eDP:
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- default:
- NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
- return ERR_PTR(-EINVAL);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ nv_connector = nouveau_connector(connector);
+ if (nv_connector->index == index)
+ return connector;
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
- nv_connector->dcb = dcb;
+
connector = &nv_connector->base;
+ nv_connector->index = index;
+
+ /* attempt to parse vbios connector type and hotplug gpio */
+ nv_connector->dcb = dcb_conn(dev, index);
+ if (nv_connector->dcb) {
+ static const u8 hpd[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+ };
+
+ u32 entry = ROM16(nv_connector->dcb[0]);
+ if (dcb_conntab(dev)[3] >= 4)
+ entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
+
+ nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
+ nv_connector->hpd = hpd[nv_connector->hpd];
+
+ nv_connector->type = nv_connector->dcb[0];
+ if (drm_conntype_from_dcb(nv_connector->type) ==
+ DRM_MODE_CONNECTOR_Unknown) {
+ NV_WARN(dev, "unknown connector type %02x\n",
+ nv_connector->type);
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ }
- /* defaults, will get overridden in detect() */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
+ /* Gigabyte NX85T */
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+ if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ }
- drm_connector_init(dev, connector, funcs, type);
- drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+ /* Gigabyte GV-NX86T512H */
+ if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+ if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ }
+ } else {
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ nv_connector->hpd = DCB_GPIO_UNUSED;
+ }
+
+ /* no vbios data, or an unknown dcb connector type - attempt to
+ * figure out something suitable ourselves
+ */
+ if (nv_connector->type == DCB_CONNECTOR_NONE) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcbt = &dev_priv->vbios.dcb;
+ u32 encoders = 0;
+ int i;
+
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector == nv_connector->index)
+ encoders |= (1 << dcbt->entry[i].type);
+ }
- /* Check if we need dithering enabled */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- bool dummy, is_24bit = false;
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ nv_connector->type = DCB_CONNECTOR_DP;
+ else
+ nv_connector->type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ else
+ nv_connector->type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ nv_connector->type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ nv_connector->type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ nv_connector->type = DCB_CONNECTOR_TV_0;
+ }
+ }
- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+ type = drm_conntype_from_dcb(nv_connector->type);
+ if (type == DRM_MODE_CONNECTOR_LVDS) {
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
- NV_ERROR(dev, "Error parsing LVDS table, disabling "
- "LVDS\n");
- goto fail;
+ NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
+ kfree(nv_connector);
+ return ERR_PTR(ret);
}
- nv_connector->use_dithering = !is_24bit;
+ funcs = &nouveau_connector_funcs_lvds;
+ } else {
+ funcs = &nouveau_connector_funcs;
}
+ /* defaults, will get overridden in detect() */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_init(dev, connector, funcs, type);
+ drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
/* Init DVI-I specific properties */
- if (dcb->type == DCB_CONNECTOR_DVI_I) {
- drm_mode_create_dvi_i_properties(dev);
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I)
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs */
+ if (disp->underscan_property &&
+ (nv_connector->type == DCB_CONNECTOR_DVI_D ||
+ nv_connector->type == DCB_CONNECTOR_DVI_I ||
+ nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
+ nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
+ nv_connector->type == DCB_CONNECTOR_DP)) {
+ drm_connector_attach_property(connector,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_connector_attach_property(connector,
+ disp->underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(connector,
+ disp->underscan_vborder_property,
+ 0);
}
- switch (dcb->type) {
+ switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
@@ -876,32 +1033,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
- drm_connector_attach_property(connector,
- dev->mode_config.dithering_mode_property,
- nv_connector->use_dithering ?
- DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+ if (disp->dithering_mode) {
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
+ drm_connector_attach_property(connector,
+ disp->dithering_mode,
+ nv_connector->dithering_mode);
+ }
+ if (disp->dithering_depth) {
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
+ drm_connector_attach_property(connector,
+ disp->dithering_depth,
+ nv_connector->dithering_depth);
+ }
break;
}
- if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
- pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
- nouveau_connector_hotplug, connector);
-
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+ ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
+ nouveau_connector_hotplug,
+ connector);
+ if (ret == 0)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
}
drm_sysfs_connector_add(connector);
-
- dcb->drm = connector;
- return dcb->drm;
-
-fail:
- drm_connector_cleanup(connector);
- kfree(connector);
- return ERR_PTR(ret);
-
+ return connector;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 711b1e9203af..e4857021304c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,13 +30,43 @@
#include "drm_edid.h"
#include "nouveau_i2c.h"
+enum nouveau_underscan_type {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+};
+
+/* the enum values specifically defined here match nv50/nvd0 hw values, and
+ * the code relies on this
+ */
+enum nouveau_dithering_mode {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+};
+
+enum nouveau_dithering_depth {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+};
+
struct nouveau_connector {
struct drm_connector base;
+ enum dcb_connector_type type;
+ u8 index;
+ u8 *dcb;
+ u8 hpd;
- struct dcb_connector_table_entry *dcb;
-
+ int dithering_mode;
+ int dithering_depth;
int scaling_mode;
- bool use_dithering;
+ enum nouveau_underscan_type underscan;
+ u32 underscan_hborder;
+ u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index bf8e1289953d..686f6b4a1da3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -32,8 +32,6 @@ struct nouveau_crtc {
int index;
- struct drm_display_mode *mode;
-
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
@@ -67,8 +65,8 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+ int (*set_dither)(struct nouveau_crtc *crtc, bool update);
+ int (*set_scale)(struct nouveau_crtc *crtc, bool update);
};
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8e1592368cce..fa2ec491f6a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, "channel id : %d\n", chan->id);
seq_printf(m, "cpu fifo state:\n");
- seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
+ seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+ { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb4273..3cb52bc52b21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,8 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
#include "nv50_display.h"
static void
@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
int
nouveau_framebuffer_init(struct drm_device *dev,
struct nouveau_framebuffer *nv_fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
if (!tile_flags) {
if (dev_priv->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
else
- nv_fb->r_pitch = 0x01000000 | fb->pitch;
+ nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
} else {
u32 mode = nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
mode >>= 4;
- nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
}
}
@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
int ret;
- gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
@@ -147,11 +149,186 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
return &nouveau_fb->base;
}
-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+
+struct drm_prop_enum_list {
+ u8 gen_mask;
+ int type;
+ char *name;
+};
+
+static struct drm_prop_enum_list underscan[] = {
+ { 6, UNDERSCAN_AUTO, "auto" },
+ { 6, UNDERSCAN_OFF, "off" },
+ { 6, UNDERSCAN_ON, "on" },
+ {}
+};
+
+static struct drm_prop_enum_list dither_mode[] = {
+ { 7, DITHERING_MODE_AUTO, "auto" },
+ { 7, DITHERING_MODE_OFF, "off" },
+ { 1, DITHERING_MODE_ON, "on" },
+ { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+ { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+ { 4, DITHERING_MODE_TEMPORAL, "temporal" },
+ {}
+};
+
+static struct drm_prop_enum_list dither_depth[] = {
+ { 6, DITHERING_DEPTH_AUTO, "auto" },
+ { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+ { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+ {}
+};
+
+#define PROP_ENUM(p,gen,n,list) do { \
+ struct drm_prop_enum_list *l = (list); \
+ int c = 0; \
+ while (l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) \
+ c++; \
+ l++; \
+ } \
+ if (c) { \
+ p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
+ l = (list); \
+ c = 0; \
+ while (p && l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) { \
+ drm_property_add_enum(p, c, l->type, l->name); \
+ c++; \
+ } \
+ l++; \
+ } \
+ } \
+} while(0)
+
+int
+nouveau_display_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct drm_connector *connector;
+ int ret;
+
+ ret = disp->init(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
+ }
+
+ return ret;
+}
+
+void
+nouveau_display_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct drm_connector *connector;
+
+ /* disable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
+ }
+
+ drm_kms_helper_poll_disable(dev);
+ disp->fini(dev);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ int ret, gen;
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
+
+ if (dev_priv->card_type < NV_50)
+ gen = 0;
+ else
+ if (dev_priv->card_type < NV_D0)
+ gen = 1;
+ else
+ gen = 2;
+
+ PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+ PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+ PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+ disp->underscan_hborder_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "underscan hborder", 2);
+ disp->underscan_hborder_property->values[0] = 0;
+ disp->underscan_hborder_property->values[1] = 128;
+
+ disp->underscan_vborder_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "underscan vborder", 2);
+ disp->underscan_vborder_property->values[0] = 0;
+ disp->underscan_vborder_property->values[1] = 128;
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ if (dev_priv->card_type < NV_10) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+
+ drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_disable(dev);
+
+ ret = disp->create(dev);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+nouveau_display_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+
+ drm_vblank_cleanup(dev);
+
+ disp->destroy(dev);
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+}
+
int
nouveau_vblank_enable(struct drm_device *dev, int crtc)
{
@@ -294,7 +471,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
{ { }, event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
@@ -305,7 +482,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (dev_priv->card_type >= NV_50) {
- ret = nv50_display_flip_next(crtc, fb, chan);
+ if (dev_priv->card_type >= NV_D0)
+ ret = nvd0_display_flip_next(crtc, fb, chan, 0);
+ else
+ ret = nv50_display_flip_next(crtc, fb, chan);
if (ret) {
nouveau_channel_put(&chan);
goto fail_unreserve;
@@ -369,3 +549,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = gem->driver_private;
+ *poffset = bo->bo.addr_space_offset;
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 00bc6eaad558..4c2e4e5925fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -134,11 +134,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
* -EBUSY if timeout exceeded
*/
static inline int
-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
- uint32_t val;
+ uint64_t val;
val = nvchan_rd32(chan, chan->user_get);
+ if (chan->user_get_hi)
+ val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -218,8 +220,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
- uint32_t cnt = 0, prev_get = 0;
- int ret;
+ uint64_t prev_get = 0;
+ int ret, cnt = 0;
ret = nv50_dma_push_wait(chan, slots + 1);
if (unlikely(ret))
@@ -261,8 +263,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
int
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
- uint32_t prev_get = 0, cnt = 0;
- int get;
+ uint64_t prev_get = 0;
+ int cnt = 0, get;
if (chan->dma.ib_max)
return nv50_dma_wait(chan, slots, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de5efe71fefd..9b93b703ceab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -29,6 +29,7 @@
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
/******************************************************************************
* aux channel util functions
@@ -273,8 +274,6 @@ nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
u8 *
nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
struct bit_entry d;
u8 *table;
int i;
@@ -289,7 +288,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
return NULL;
}
- table = ROMPTR(bios, d.data[0]);
+ table = ROMPTR(dev, d.data[0]);
if (!table) {
NV_ERROR(dev, "displayport table pointer invalid\n");
return NULL;
@@ -306,7 +305,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
}
for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
return table;
}
@@ -336,7 +335,6 @@ struct dp_state {
static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int or = dp->or, link = dp->link;
u8 *entry, sink[2];
u32 dp_ctrl;
@@ -360,7 +358,7 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
* table, that has (among other things) pointers to more scripts that
* need to be executed, this time depending on link speed.
*/
- entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+ entry = ROMPTR(dev, dp->entry[10]);
if (entry) {
if (dp->table[0] < 0x30) {
while (dp->link_bw < (ROM16(entry[0]) * 10))
@@ -559,8 +557,6 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector =
@@ -581,7 +577,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
- dp.auxch = auxch->rd;
+ dp.auxch = auxch->drive;
dp.or = nv_encoder->or;
dp.link = !(nv_encoder->dcb->sorconf.link & 1);
dp.dpcd = nv_encoder->dp.dpcd;
@@ -590,7 +586,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
* we take during link training (DP_SET_POWER is one), we need
* to ignore them for the moment to avoid races.
*/
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
/* enable down-spreading, if possible */
if (dp.table[1] >= 16) {
@@ -639,7 +635,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
/* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
return true;
}
@@ -656,7 +652,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
if (!auxch)
return false;
- ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
+ ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
if (ret)
return false;
@@ -684,7 +680,7 @@ int
nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr)
{
- return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
+ return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb1295262..e4a7cfe7898d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -124,6 +124,10 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
int nouveau_ctxfw;
module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n");
+int nouveau_mxmdcb = 1;
+module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -178,8 +182,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(dev, "Disabling fbcon acceleration...\n");
- nouveau_fbcon_save_disable_accel(dev);
+ NV_INFO(dev, "Disabling display...\n");
+ nouveau_display_fini(dev);
+
+ NV_INFO(dev, "Disabling fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -220,7 +227,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
ret = dev_priv->eng[e]->fini(dev, e, true);
if (ret) {
- NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+ NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
goto out_abort;
}
}
@@ -246,10 +253,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pci_set_power_state(pdev, PCI_D3hot);
}
- console_lock();
- nouveau_fbcon_set_suspend(dev, 1);
- console_unlock();
- nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -275,8 +278,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- nouveau_fbcon_save_disable_accel(dev);
-
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -296,8 +297,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (ret)
return ret;
- nouveau_pm_resume(dev);
-
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
ret = nouveau_mem_init_agp(dev);
if (ret) {
@@ -337,6 +336,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
+ nouveau_pm_resume(dev);
+
NV_INFO(dev, "Restoring mode...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -358,16 +359,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
- engine->display.init(dev);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+ nouveau_fbcon_set_suspend(dev, 0);
+ nouveau_fbcon_zfill_all(dev);
- nv_crtc->cursor.set_offset(nv_crtc, offset);
- nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
- }
+ nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -376,18 +371,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
nv_crtc->lut.depth = 0;
}
- console_lock();
- nouveau_fbcon_set_suspend(dev, 0);
- console_unlock();
+ drm_helper_resume_force_mode(dev);
- nouveau_fbcon_zfill_all(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.offset;
- drm_helper_resume_force_mode(dev);
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
- nouveau_fbcon_restore_accel(dev);
return 0;
}
+static const struct file_operations nouveau_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = nouveau_ttm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,26 +425,16 @@ static struct drm_driver driver = {
.disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = nouveau_ttm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = nouveau_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &nouveau_driver_fops,
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
+ .dumb_create = nouveau_display_dumb_create,
+ .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_destroy = nouveau_display_dumb_destroy,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098b..b82709828931 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -163,6 +163,9 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_COPY0 3
#define NVOBJ_ENGINE_COPY1 4
#define NVOBJ_ENGINE_MPEG 5
+#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
+#define NVOBJ_ENGINE_BSP 6
+#define NVOBJ_ENGINE_VP 7
#define NVOBJ_ENGINE_DISPLAY 15
#define NVOBJ_ENGINE_NR 16
@@ -229,6 +232,7 @@ struct nouveau_channel {
/* mapping of the regs controlling the fifo */
void __iomem *user;
uint32_t user_get;
+ uint32_t user_get_hi;
uint32_t user_put;
/* Fencing */
@@ -246,7 +250,7 @@ struct nouveau_channel {
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
struct nouveau_vma pushbuf_vma;
- uint32_t pushbuf_base;
+ uint64_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
@@ -393,24 +397,25 @@ struct nouveau_display_engine {
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
- int (*init)(struct drm_device *);
void (*destroy)(struct drm_device *);
+ int (*init)(struct drm_device *);
+ void (*fini)(struct drm_device *);
+
+ struct drm_property *dithering_mode;
+ struct drm_property *dithering_depth;
+ struct drm_property *underscan_property;
+ struct drm_property *underscan_hborder_property;
+ struct drm_property *underscan_vborder_property;
};
struct nouveau_gpio_engine {
- void *priv;
-
- int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *);
-
- int (*get)(struct drm_device *, enum dcb_gpio_tag);
- int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
-
- int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
- void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
- bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+ spinlock_t lock;
+ struct list_head isr;
+ int (*init)(struct drm_device *);
+ void (*fini)(struct drm_device *);
+ int (*drive)(struct drm_device *, int line, int dir, int out);
+ int (*sense)(struct drm_device *, int line);
+ void (*irq_enable)(struct drm_device *, int line, bool);
};
struct nouveau_pm_voltage_level {
@@ -484,7 +489,7 @@ struct nouveau_pm_level {
u32 copy;
u32 daemon;
u32 vdec;
- u32 unk05; /* nv50:nva3, roughly.. */
+ u32 dom6;
u32 unka0; /* nva3:nvc0 */
u32 hub01; /* nvc0- */
u32 hub06; /* nvc0- */
@@ -518,6 +523,12 @@ struct nouveau_pm_memtimings {
int nr_timing;
};
+struct nouveau_pm_fan {
+ u32 min_duty;
+ u32 max_duty;
+ u32 pwm_freq;
+};
+
struct nouveau_pm_engine {
struct nouveau_pm_voltage voltage;
struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
@@ -525,6 +536,8 @@ struct nouveau_pm_engine {
struct nouveau_pm_memtimings memtimings;
struct nouveau_pm_temp_sensor_constants sensor_constants;
struct nouveau_pm_threshold_temp threshold_temp;
+ struct nouveau_pm_fan fan;
+ u32 pwm_divisor;
struct nouveau_pm_level boot;
struct nouveau_pm_level *cur;
@@ -532,19 +545,14 @@ struct nouveau_pm_engine {
struct device *hwmon;
struct notifier_block acpi_nb;
- int (*clock_get)(struct drm_device *, u32 id);
- void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
- void (*clock_set)(struct drm_device *, void *);
-
int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
- void (*clocks_set)(struct drm_device *, void *);
+ int (*clocks_set)(struct drm_device *, void *);
int (*voltage_get)(struct drm_device *);
int (*voltage_set)(struct drm_device *, int voltage);
- int (*fanspeed_get)(struct drm_device *);
- int (*fanspeed_set)(struct drm_device *, int fanspeed);
+ int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
+ int (*pwm_set)(struct drm_device *, int line, u32, u32);
int (*temp_get)(struct drm_device *);
};
@@ -780,6 +788,8 @@ struct drm_nouveau_private {
struct nouveau_vm *chan_vm;
struct nvbios vbios;
+ u8 *mxms;
+ struct list_head i2c_ports;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -850,6 +860,7 @@ extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
extern int nouveau_msi;
extern int nouveau_ctxfw;
+extern int nouveau_mxmdcb;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -1000,7 +1011,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
uint32_t offset);
-extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
/* nouveau_debugfs.c */
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1041,12 +1055,14 @@ extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
#if defined(CONFIG_ACPI)
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
+void nouveau_switcheroo_optimus_dsm(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
+static inline void nouveau_switcheroo_optimus_dsm(void) {}
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
@@ -1072,8 +1088,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
struct dcb_entry *, int crtc);
extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
-extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
- enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
extern u32 get_pll_register(struct drm_device *, enum pll_types);
@@ -1091,11 +1105,18 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
enum LVDS_script, int pxclk);
bool bios_encoder_match(struct dcb_entry *, u32 hash);
+/* nouveau_mxm.c */
+int nouveau_mxm_init(struct drm_device *dev);
+void nouveau_mxm_fini(struct drm_device *dev);
+
/* nouveau_ttm.c */
int nouveau_ttm_global_init(struct drm_nouveau_private *);
void nouveau_ttm_global_release(struct drm_nouveau_private *);
int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+/* nouveau_hdmi.c */
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
/* nouveau_dp.c */
int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr);
@@ -1222,6 +1243,9 @@ extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
+/* nv98_crypt.c */
+extern int nv98_crypt_create(struct drm_device *dev);
+
/* nva3_copy.c */
extern int nva3_copy_create(struct drm_device *dev);
@@ -1234,6 +1258,17 @@ extern int nv31_mpeg_create(struct drm_device *dev);
/* nv50_mpeg.c */
extern int nv50_mpeg_create(struct drm_device *dev);
+/* nv84_bsp.c */
+/* nv98_bsp.c */
+extern int nv84_bsp_create(struct drm_device *dev);
+
+/* nv84_vp.c */
+/* nv98_vp.c */
+extern int nv84_vp_create(struct drm_device *dev);
+
+/* nv98_ppp.c */
+extern int nv98_ppp_create(struct drm_device *dev);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
@@ -1311,13 +1346,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
extern int nv04_display_early_init(struct drm_device *);
extern void nv04_display_late_takedown(struct drm_device *);
extern int nv04_display_create(struct drm_device *);
-extern int nv04_display_init(struct drm_device *);
extern void nv04_display_destroy(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
+extern void nv04_display_fini(struct drm_device *);
/* nvd0_display.c */
extern int nvd0_display_create(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
extern void nvd0_display_destroy(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_fini(struct drm_device *);
+struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
+void nvd0_display_flip_stop(struct drm_crtc *);
+int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *, u32 swap_interval);
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1412,31 +1453,40 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
/* nouveau_display.c */
+int nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
int nouveau_vblank_enable(struct drm_device *dev, int crtc);
void nouveau_vblank_disable(struct drm_device *dev, int crtc);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t handle);
/* nv10_gpio.c */
-int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nv10_gpio_init(struct drm_device *dev);
+void nv10_gpio_fini(struct drm_device *dev);
+int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv10_gpio_sense(struct drm_device *dev, int line);
+void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
-void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
-bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
-
-/* nv50_calc. */
+int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv50_gpio_sense(struct drm_device *dev, int line);
+void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
+int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nvd0_gpio_sense(struct drm_device *dev, int line);
+
+/* nv50_calc.c */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
int *N1, int *M1, int *N2, int *M2, int *P);
int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1559,6 +1609,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+#define NV_WARNONCE(d, fmt, arg...) do { \
+ static int _warned = 0; \
+ if (!_warned) { \
+ NV_WARN(d, fmt, ##arg); \
+ _warned = 1; \
+ } \
+} while(0)
/* nouveau_reg_debug bitmask */
enum {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 95c843e684bb..f3fb649fe454 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-
int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
+ struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3a4cc32b9e44..9892218d7452 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/screen_info.h>
#include <linux/vga_switcheroo.h>
+#include <linux/console.h>
#include "drmP.h"
#include "drm.h"
@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct pci_dev *pdev = dev->pdev;
struct device *device = &pdev->dev;
int size, ret;
@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
- mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
- size = mode_cmd.pitch * mode_cmd.height;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* Set aperture base/size for vesafb takeover */
@@ -547,7 +549,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ console_lock();
+ if (state == 0)
+ nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+ if (state == 1)
+ nouveau_fbcon_restore_accel(dev);
+ console_unlock();
}
void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
new file mode 100644
index 000000000000..a580cc62337a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_gpio.h"
+
+static u8 *
+dcb_gpio_table(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (dcb) {
+ if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
+ return ROMPTR(dev, dcb[0x0a]);
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
+ return ROMPTR(dev, dcb[-15]);
+ }
+ return NULL;
+}
+
+static u8 *
+dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
+{
+ u8 *table = dcb_gpio_table(dev);
+ if (table) {
+ *version = table[0];
+ if (*version < 0x30 && ent < table[2])
+ return table + 3 + (ent * table[1]);
+ else if (ent < table[2])
+ return table + table[1] + (ent * table[3]);
+ }
+ return NULL;
+}
+
+int
+nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
+}
+
+int
+nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
+}
+
+int
+nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
+ struct gpio_func *gpio)
+{
+ u8 *table, *entry, version;
+ int i = -1;
+
+ if (line == 0xff && func == 0xff)
+ return -EINVAL;
+
+ while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
+ if (version < 0x40) {
+ u16 data = ROM16(entry[0]);
+ *gpio = (struct gpio_func) {
+ .line = (data & 0x001f) >> 0,
+ .func = (data & 0x07e0) >> 5,
+ .log[0] = (data & 0x1800) >> 11,
+ .log[1] = (data & 0x6000) >> 13,
+ };
+ } else
+ if (version < 0x41) {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x1f,
+ .func = entry[1],
+ .log[0] = (entry[3] & 0x18) >> 3,
+ .log[1] = (entry[3] & 0x60) >> 5,
+ };
+ } else {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x3f,
+ .func = entry[1],
+ .log[0] = (entry[4] & 0x30) >> 4,
+ .log[1] = (entry[4] & 0xc0) >> 6,
+ };
+ }
+
+ if ((line == 0xff || line == gpio->line) &&
+ (func == 0xff || func == gpio->func))
+ return 0;
+ }
+
+ /* DCB 2.2, fixed TVDAC GPIO data */
+ if ((table = dcb_table(dev)) && table[0] >= 0x22) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = table[-4] >> 4,
+ .log[0] = !!(table[-5] & 2),
+ .log[1] = !(table[-5] & 2),
+ };
+ return 0;
+ }
+ }
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = 4,
+ .log[0] = 0,
+ .log[1] = 1,
+ };
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ int dir = !!(gpio.log[state] & 0x02);
+ int out = !!(gpio.log[state] & 0x01);
+ ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ ret = nouveau_gpio_sense(dev, idx, gpio.line);
+ if (ret >= 0)
+ ret = (ret == (gpio.log[1] & 1));
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ if (idx == 0 && pgpio->irq_enable)
+ pgpio->irq_enable(dev, gpio.line, on);
+ else
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+struct gpio_isr {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ int idx;
+ struct gpio_func func;
+ void (*handler)(void *, int);
+ void *data;
+ bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+ struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+ struct drm_device *dev = isr->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ unsigned long flags;
+ int state;
+
+ state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
+ if (state >= 0)
+ isr->handler(isr->data, state);
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ isr->inhibit = false;
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+}
+
+void
+nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+
+ if (idx != 0)
+ return;
+
+ spin_lock(&pgpio->lock);
+ list_for_each_entry(isr, &pgpio->isr, head) {
+ if (line_mask & (1 << isr->func.line)) {
+ if (isr->inhibit)
+ continue;
+ isr->inhibit = true;
+ schedule_work(&isr->work);
+ }
+ }
+ spin_unlock(&pgpio->lock);
+}
+
+int
+nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+ unsigned long flags;
+ int ret;
+
+ isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+ if (!isr)
+ return -ENOMEM;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
+ if (ret) {
+ kfree(isr);
+ return ret;
+ }
+
+ INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+ isr->dev = dev;
+ isr->handler = handler;
+ isr->data = data;
+ isr->idx = idx;
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_add(&isr->head, &pgpio->isr);
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+ return 0;
+}
+
+void
+nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr, *tmp;
+ struct gpio_func func;
+ unsigned long flags;
+ LIST_HEAD(tofree);
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &func);
+ if (ret == 0) {
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
+ if (memcmp(&isr->func, &func, sizeof(func)) ||
+ isr->idx != idx ||
+ isr->handler != handler || isr->data != data)
+ continue;
+ list_move(&isr->head, &tofree);
+ }
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+
+ list_for_each_entry_safe(isr, tmp, &tofree, head) {
+ flush_work_sync(&isr->work);
+ kfree(isr);
+ }
+ }
+}
+
+int
+nouveau_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ INIT_LIST_HEAD(&pgpio->isr);
+ spin_lock_init(&pgpio->lock);
+
+ return nouveau_gpio_init(dev);
+}
+
+void
+nouveau_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ nouveau_gpio_fini(dev);
+ BUG_ON(!list_empty(&pgpio->isr));
+}
+
+int
+nouveau_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ int ret = 0;
+
+ if (pgpio->init)
+ ret = pgpio->init(dev);
+
+ return ret;
+}
+
+void
+nouveau_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ if (pgpio->fini)
+ pgpio->fini(dev);
+}
+
+void
+nouveau_gpio_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 *entry, version;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
+ u8 func = 0xff, line, defs, unk0, unk1;
+ if (version >= 0x41) {
+ defs = !!(entry[0] & 0x80);
+ line = entry[0] & 0x3f;
+ func = entry[1];
+ unk0 = entry[2];
+ unk1 = entry[3] & 0x1f;
+ } else
+ if (version >= 0x40) {
+ line = entry[0] & 0x1f;
+ func = entry[1];
+ defs = !!(entry[3] & 0x01);
+ unk0 = !!(entry[3] & 0x02);
+ unk1 = !!(entry[3] & 0x04);
+ } else {
+ break;
+ }
+
+ if (func == 0xff)
+ continue;
+
+ nouveau_gpio_func_set(dev, func, defs);
+
+ if (dev_priv->card_type >= NV_D0) {
+ nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ static const u32 regs[] = { 0xe100, 0xe28c };
+ u32 val = (unk1 << 16) | unk0;
+ u32 reg = regs[line >> 4]; line &= 0x0f;
+
+ nv_mask(dev, reg, 0x00010001 << line, val << line);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
new file mode 100644
index 000000000000..64c5cb077ace
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+struct gpio_func {
+ u8 func;
+ u8 line;
+ u8 log[2];
+};
+
+/* nouveau_gpio.c */
+int nouveau_gpio_create(struct drm_device *);
+void nouveau_gpio_destroy(struct drm_device *);
+int nouveau_gpio_init(struct drm_device *);
+void nouveau_gpio_fini(struct drm_device *);
+void nouveau_gpio_reset(struct drm_device *);
+int nouveau_gpio_drive(struct drm_device *, int idx, int line,
+ int dir, int out);
+int nouveau_gpio_sense(struct drm_device *, int idx, int line);
+int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
+ struct gpio_func *);
+int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
+int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
+int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
+void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
+int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+
+static inline bool
+nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
+{
+ struct gpio_func func;
+ return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
+}
+
+static inline int
+nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
+{
+ return nouveau_gpio_set(dev, 0, tag, 0xff, state);
+}
+
+static inline int
+nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
+{
+ return nouveau_gpio_get(dev, 0, tag, 0xff);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
new file mode 100644
index 000000000000..59ea1c14eca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+static bool
+hdmi_sor(struct drm_encoder *encoder)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ if (dev_priv->chipset < 0xa3)
+ return false;
+ return true;
+}
+
+static inline u32
+hdmi_base(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ if (!hdmi_sor(encoder))
+ return 0x616500 + (nv_crtc->index * 0x800);
+ return 0x61c500 + (nv_encoder->or * 0x800);
+}
+
+static void
+hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
+{
+ nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
+}
+
+static u32
+hdmi_rd32(struct drm_encoder *encoder, u32 reg)
+{
+ return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
+}
+
+static u32
+hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
+{
+ u32 tmp = hdmi_rd32(encoder, reg);
+ hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
+ return tmp;
+}
+
+static void
+nouveau_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ u32 or = nv_encoder->or * 0x800;
+
+ if (hdmi_sor(encoder)) {
+ nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
+ }
+}
+
+static void
+nouveau_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ u32 or = nv_encoder->or * 0x800;
+ int i;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid)) {
+ nouveau_audio_disconnect(encoder);
+ return;
+ }
+
+ if (hdmi_sor(encoder)) {
+ nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ if (nv_connector->base.eld[0]) {
+ u8 *eld = nv_connector->base.eld;
+ for (i = 0; i < eld[2] * 4; i++)
+ nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
+ for (i = eld[2] * 4; i < 0x60; i++)
+ nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
+ nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
+ }
+ }
+}
+
+static void
+nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
+{
+ /* calculate checksum for the infoframe */
+ u8 sum = 0, i;
+ for (i = 0; i < frame[2]; i++)
+ sum += frame[i];
+ frame[3] = 256 - sum;
+
+ /* disable infoframe, and write header */
+ hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
+ hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
+
+ /* register scans tell me the audio infoframe has only one set of
+ * subpack regs, according to tegra (gee nvidia, it'd be nice if we
+ * could get those docs too!), the hdmi block pads out the rest of
+ * the packet on its own.
+ */
+ if (ctrl == 0x020)
+ frame[2] = 6;
+
+ /* write out checksum and data, weird weird 7 byte register pairs */
+ for (i = 0; i < frame[2] + 1; i += 7) {
+ u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
+ u32 *subpack = (u32 *)&frame[3 + i];
+ hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
+ hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
+ }
+
+ /* enable the infoframe */
+ hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
+}
+
+static void
+nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
+ const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
+ const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
+ u8 frame[20];
+
+ frame[0x00] = 0x82; /* AVI infoframe */
+ frame[0x01] = 0x02; /* version */
+ frame[0x02] = 0x0d; /* length */
+ frame[0x03] = 0x00;
+ frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
+ frame[0x05] = (C << 6) | (M << 4) | R;
+ frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
+ frame[0x07] = VIC;
+ frame[0x08] = PR;
+ frame[0x09] = bar_top & 0xff;
+ frame[0x0a] = bar_top >> 8;
+ frame[0x0b] = bar_bottom & 0xff;
+ frame[0x0c] = bar_bottom >> 8;
+ frame[0x0d] = bar_left & 0xff;
+ frame[0x0e] = bar_left >> 8;
+ frame[0x0f] = bar_right & 0xff;
+ frame[0x10] = bar_right >> 8;
+ frame[0x11] = 0x00;
+ frame[0x12] = 0x00;
+ frame[0x13] = 0x00;
+
+ nouveau_hdmi_infoframe(encoder, 0x020, frame);
+}
+
+static void
+nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
+ const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
+ u8 frame[12];
+
+ frame[0x00] = 0x84; /* Audio infoframe */
+ frame[0x01] = 0x01; /* version */
+ frame[0x02] = 0x0a; /* length */
+ frame[0x03] = 0x00;
+ frame[0x04] = (CT << 4) | CC;
+ frame[0x05] = (SF << 2) | ceaSS;
+ frame[0x06] = FMT;
+ frame[0x07] = CA;
+ frame[0x08] = (DM_INH << 7) | (LSV << 3);
+ frame[0x09] = 0x00;
+ frame[0x0a] = 0x00;
+ frame[0x0b] = 0x00;
+
+ nouveau_hdmi_infoframe(encoder, 0x000, frame);
+}
+
+static void
+nouveau_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ nouveau_audio_disconnect(encoder);
+
+ /* disable audio and avi infoframes */
+ hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
+ hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
+
+ /* disable hdmi */
+ hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
+}
+
+void
+nouveau_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ u32 max_ac_packet, rekey;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!mode || !nv_connector || !nv_connector->edid ||
+ !drm_detect_hdmi_monitor(nv_connector->edid)) {
+ nouveau_hdmi_disconnect(encoder);
+ return;
+ }
+
+ nouveau_hdmi_video_infoframe(encoder, mode);
+ nouveau_hdmi_audio_infoframe(encoder, mode);
+
+ hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+ nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* value matches nvidia binary driver, and tegra constant */
+ rekey = 56;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ /* enable hdmi */
+ hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
+ 0x1f000000 | /* unknown */
+ max_ac_packet << 16 |
+ rekey);
+
+ nouveau_audio_mode_set(encoder, mode);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644
index 000000000000..697687593a81
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_HWSQ_H__
+#define __NOUVEAU_HWSQ_H__
+
+struct hwsq_ucode {
+ u8 data[0x200];
+ union {
+ u8 *u08;
+ u16 *u16;
+ u32 *u32;
+ } ptr;
+ u16 len;
+
+ u32 reg;
+ u32 val;
+};
+
+static inline void
+hwsq_init(struct hwsq_ucode *hwsq)
+{
+ hwsq->ptr.u08 = hwsq->data;
+ hwsq->reg = 0xffffffff;
+ hwsq->val = 0xffffffff;
+}
+
+static inline void
+hwsq_fini(struct hwsq_ucode *hwsq)
+{
+ do {
+ *hwsq->ptr.u08++ = 0x7f;
+ hwsq->len = hwsq->ptr.u08 - hwsq->data;
+ } while (hwsq->len & 3);
+ hwsq->ptr.u08 = hwsq->data;
+}
+
+static inline void
+hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
+{
+ u32 shift = 0;
+ while (usec & ~3) {
+ usec >>= 2;
+ shift++;
+ }
+
+ *hwsq->ptr.u08++ = (shift << 2) | usec;
+}
+
+static inline void
+hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
+{
+ flag += 0x80;
+ if (val >= 0)
+ flag += 0x20;
+ if (val >= 1)
+ flag += 0x20;
+ *hwsq->ptr.u08++ = flag;
+}
+
+static inline void
+hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
+{
+ *hwsq->ptr.u08++ = 0x5f;
+ *hwsq->ptr.u08++ = v0;
+ *hwsq->ptr.u08++ = v1;
+}
+
+static inline void
+hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
+{
+ if (val != hwsq->val) {
+ if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
+ *hwsq->ptr.u08++ = 0x42;
+ *hwsq->ptr.u16++ = (val & 0x0000ffff);
+ } else {
+ *hwsq->ptr.u08++ = 0xe2;
+ *hwsq->ptr.u32++ = val;
+ }
+
+ hwsq->val = val;
+ }
+
+ if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
+ *hwsq->ptr.u08++ = 0x40;
+ *hwsq->ptr.u16++ = (reg & 0x0000ffff);
+ } else {
+ *hwsq->ptr.u08++ = 0xe0;
+ *hwsq->ptr.u32++ = reg;
+ }
+ hwsq->reg = reg;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index d39b2202b197..820ae7f52044 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,262 +29,465 @@
#include "nouveau_i2c.h"
#include "nouveau_hw.h"
+#define T_TIMEOUT 2200000
+#define T_RISEFALL 1000
+#define T_HOLD 5000
+
static void
-nv04_i2c_setscl(void *data, int state)
+i2c_drive_scl(void *data, int state)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
- NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
}
static void
-nv04_i2c_setsda(void *data, int state)
+i2c_drive_sda(void *data, int state)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
- NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
}
static int
-nv04_i2c_getscl(void *data)
+i2c_sense_scl(void *data)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x01);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x10);
+ }
+ return 0;
}
static int
-nv04_i2c_getsda(void *data)
+i2c_sense_sda(void *data)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x02);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x20);
+ }
+ return 0;
}
static void
-nv4e_i2c_setscl(void *data, int state)
+i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
- nv_wr32(dev, i2c->wr, val | 0x01);
+ udelay((nsec + 500) / 1000);
}
-static void
-nv4e_i2c_setsda(void *data, int state)
+static bool
+i2c_raise_scl(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
+ u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+ i2c_drive_scl(port, 1);
+ do {
+ i2c_delay(port, T_RISEFALL);
+ } while (!i2c_sense_scl(port) && --timeout);
- val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
- nv_wr32(dev, i2c->wr, val | 0x01);
+ return timeout != 0;
}
static int
-nv4e_i2c_getscl(void *data)
+i2c_start(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ int ret = 0;
+
+ port->state = i2c_sense_scl(port);
+ port->state |= i2c_sense_sda(port) << 1;
+ if (port->state != 3) {
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 1);
+ if (!i2c_raise_scl(port))
+ ret = -EBUSY;
+ }
- return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+ i2c_drive_sda(port, 0);
+ i2c_delay(port, T_HOLD);
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return ret;
}
-static int
-nv4e_i2c_getsda(void *data)
+static void
+i2c_stop(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 0);
+ i2c_delay(port, T_RISEFALL);
+
+ i2c_drive_scl(port, 1);
+ i2c_delay(port, T_HOLD);
+ i2c_drive_sda(port, 1);
+ i2c_delay(port, T_HOLD);
}
-static const uint32_t nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
-
static int
-nv50_i2c_getscl(void *data)
+i2c_bitw(struct nouveau_i2c_chan *port, int sda)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ i2c_drive_sda(port, sda);
+ i2c_delay(port, T_RISEFALL);
- return !!(nv_rd32(dev, i2c->rd) & 1);
-}
+ if (!i2c_raise_scl(port))
+ return -ETIMEDOUT;
+ i2c_delay(port, T_HOLD);
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return 0;
+}
static int
-nv50_i2c_getsda(void *data)
+i2c_bitr(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ int sda;
+
+ i2c_drive_sda(port, 1);
+ i2c_delay(port, T_RISEFALL);
- return !!(nv_rd32(dev, i2c->rd) & 2);
+ if (!i2c_raise_scl(port))
+ return -ETIMEDOUT;
+ i2c_delay(port, T_HOLD);
+
+ sda = i2c_sense_sda(port);
+
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return sda;
}
-static void
-nv50_i2c_setscl(void *data, int state)
+static int
+i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
{
- struct nouveau_i2c_chan *i2c = data;
+ int i, bit;
+
+ *byte = 0;
+ for (i = 7; i >= 0; i--) {
+ bit = i2c_bitr(port);
+ if (bit < 0)
+ return bit;
+ *byte |= bit << i;
+ }
- nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+ return i2c_bitw(port, last ? 1 : 0);
}
-static void
-nv50_i2c_setsda(void *data, int state)
+static int
+i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
{
- struct nouveau_i2c_chan *i2c = data;
+ int i, ret;
+ for (i = 7; i >= 0; i--) {
+ ret = i2c_bitw(port, !!(byte & (1 << i)));
+ if (ret < 0)
+ return ret;
+ }
- nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
- i2c->data = state;
+ ret = i2c_bitr(port);
+ if (ret == 1) /* nack */
+ ret = -EIO;
+ return ret;
}
static int
-nvd0_i2c_getscl(void *data)
+i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
{
- struct nouveau_i2c_chan *i2c = data;
- return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+ u32 addr = msg->addr << 1;
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+ return i2c_put_byte(port, addr);
}
static int
-nvd0_i2c_getsda(void *data)
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_chan *i2c = data;
- return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+ struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
+ struct i2c_msg *msg = msgs;
+ int ret = 0, mcnt = num;
+
+ while (!ret && mcnt--) {
+ u8 remaining = msg->len;
+ u8 *ptr = msg->buf;
+
+ ret = i2c_start(port);
+ if (ret == 0)
+ ret = i2c_addr(port, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ while (!ret && remaining--)
+ ret = i2c_get_byte(port, ptr++, !remaining);
+ } else {
+ while (!ret && remaining--)
+ ret = i2c_put_byte(port, *ptr++);
+ }
+
+ msg++;
+ }
+
+ i2c_stop(port);
+ return (ret < 0) ? ret : num;
}
-int
-nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *i2c;
- int ret;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm i2c_bit_algo = {
+ .master_xfer = i2c_bit_xfer,
+ .functionality = i2c_bit_func
+};
+
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
- if (entry->chan)
- return -EEXIST;
+static u8 *
+i2c_table(struct drm_device *dev, u8 *version)
+{
+ u8 *dcb = dcb_table(dev), *i2c = NULL;
+ if (dcb) {
+ if (dcb[0] >= 0x15)
+ i2c = ROMPTR(dev, dcb[2]);
+ if (dcb[0] >= 0x30)
+ i2c = ROMPTR(dev, dcb[4]);
+ }
- if (dev_priv->card_type >= NV_50 &&
- dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
- NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
- return -EINVAL;
+ /* early revisions had no version number, use dcb version */
+ if (i2c) {
+ *version = dcb[0];
+ if (*version >= 0x30)
+ *version = i2c[0];
}
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
- if (i2c == NULL)
- return -ENOMEM;
-
- switch (entry->port_type) {
- case 0:
- i2c->bit.setsda = nv04_i2c_setsda;
- i2c->bit.setscl = nv04_i2c_setscl;
- i2c->bit.getsda = nv04_i2c_getsda;
- i2c->bit.getscl = nv04_i2c_getscl;
- i2c->rd = entry->read;
- i2c->wr = entry->write;
- break;
- case 4:
- i2c->bit.setsda = nv4e_i2c_setsda;
- i2c->bit.setscl = nv4e_i2c_setscl;
- i2c->bit.getsda = nv4e_i2c_getsda;
- i2c->bit.getscl = nv4e_i2c_getscl;
- i2c->rd = 0x600800 + entry->read;
- i2c->wr = 0x600800 + entry->write;
- break;
- case 5:
- i2c->bit.setsda = nv50_i2c_setsda;
- i2c->bit.setscl = nv50_i2c_setscl;
- if (dev_priv->card_type < NV_D0) {
- i2c->bit.getsda = nv50_i2c_getsda;
- i2c->bit.getscl = nv50_i2c_getscl;
- i2c->rd = nv50_i2c_port[entry->read];
- i2c->wr = i2c->rd;
- } else {
- i2c->bit.getsda = nvd0_i2c_getsda;
- i2c->bit.getscl = nvd0_i2c_getscl;
- i2c->rd = 0x00d014 + (entry->read * 0x20);
- i2c->wr = i2c->rd;
- }
- break;
- case 6:
- i2c->rd = entry->read;
- i2c->wr = entry->write;
- break;
- default:
- NV_ERROR(dev, "DCB I2C port type %d unknown\n",
- entry->port_type);
- kfree(i2c);
- return -EINVAL;
+ return i2c;
+}
+
+int
+nouveau_i2c_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct nouveau_i2c_chan *port;
+ u8 *i2c, *entry, legacy[2][4] = {};
+ u8 version, entries, recordlen;
+ int ret, i;
+
+ INIT_LIST_HEAD(&dev_priv->i2c_ports);
+
+ i2c = i2c_table(dev, &version);
+ if (!i2c) {
+ u8 *bmp = &bios->data[bios->offset];
+ if (bios->type != NVBIOS_BMP)
+ return -ENODEV;
+
+ legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
+ legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
+ legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
+ legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
+
+ /* BMP (from v4.0) has i2c info in the structure, it's in a
+ * fixed location on earlier VBIOS
+ */
+ if (bmp[5] < 4)
+ i2c = &bios->data[0x48];
+ else
+ i2c = &bmp[0x36];
+
+ if (i2c[4]) legacy[0][0] = i2c[4];
+ if (i2c[5]) legacy[0][1] = i2c[5];
+ if (i2c[6]) legacy[1][0] = i2c[6];
+ if (i2c[7]) legacy[1][1] = i2c[7];
}
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "nouveau-%s-%d", pci_name(dev->pdev), index);
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.dev.parent = &dev->pdev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
-
- if (entry->port_type < 6) {
- i2c->adapter.algo_data = &i2c->bit;
- i2c->bit.udelay = 40;
- i2c->bit.timeout = usecs_to_jiffies(5000);
- i2c->bit.data = i2c;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ if (i2c && version >= 0x30) {
+ entry = i2c[1] + i2c;
+ entries = i2c[2];
+ recordlen = i2c[3];
+ } else
+ if (i2c) {
+ entry = i2c;
+ entries = 16;
+ recordlen = 4;
} else {
- i2c->adapter.algo = &nouveau_dp_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ entry = legacy[0];
+ entries = 2;
+ recordlen = 4;
}
- if (ret) {
- NV_ERROR(dev, "Failed to register i2c %d\n", index);
- kfree(i2c);
- return ret;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (port == NULL) {
+ nouveau_i2c_fini(dev);
+ return -ENOMEM;
+ }
+
+ port->type = entry[3];
+ if (version < 0x30) {
+ port->type &= 0x07;
+ if (port->type == 0x07)
+ port->type = 0xff;
+ }
+
+ if (port->type == 0xff) {
+ kfree(port);
+ continue;
+ }
+
+ switch (port->type) {
+ case 0: /* NV04:NV50 */
+ port->drive = entry[0];
+ port->sense = entry[1];
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 4: /* NV4E */
+ port->drive = 0x600800 + entry[1];
+ port->sense = port->drive;
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 5: /* NV50- */
+ port->drive = entry[0] & 0x0f;
+ if (dev_priv->card_type < NV_D0) {
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
+ break;
+ port->drive = nv50_i2c_port[port->drive];
+ port->sense = port->drive;
+ } else {
+ port->drive = 0x00d014 + (port->drive * 0x20);
+ port->sense = port->drive;
+ }
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 6: /* NV50- DP AUX */
+ port->drive = entry[0];
+ port->sense = port->drive;
+ port->adapter.algo = &nouveau_dp_i2c_algo;
+ break;
+ default:
+ break;
+ }
+
+ if (!port->adapter.algo) {
+ NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
+ i, port->type, port->drive, port->sense);
+ kfree(port);
+ continue;
+ }
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), i);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &dev->pdev->dev;
+ port->dev = dev;
+ port->index = i;
+ port->dcb = ROM32(entry[0]);
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ ret = i2c_add_adapter(&port->adapter);
+ if (ret) {
+ NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
+ kfree(port);
+ continue;
+ }
+
+ list_add_tail(&port->head, &dev_priv->i2c_ports);
}
- entry->chan = i2c;
return 0;
}
void
-nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+nouveau_i2c_fini(struct drm_device *dev)
{
- if (!entry->chan)
- return;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *port, *tmp;
- i2c_del_adapter(&entry->chan->adapter);
- kfree(entry->chan);
- entry->chan = NULL;
+ list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
+ i2c_del_adapter(&port->adapter);
+ kfree(port);
+ }
}
struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, int index)
+nouveau_i2c_find(struct drm_device *dev, u8 index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
+ struct nouveau_i2c_chan *port;
+
+ if (index == NV_I2C_DEFAULT(0) ||
+ index == NV_I2C_DEFAULT(1)) {
+ u8 version, *i2c = i2c_table(dev, &version);
+ if (i2c && version >= 0x30) {
+ if (index == NV_I2C_DEFAULT(0))
+ index = (i2c[4] & 0x0f);
+ else
+ index = (i2c[4] & 0xf0) >> 4;
+ } else {
+ index = 2;
+ }
+ }
- if (index >= DCB_MAX_NUM_I2C_ENTRIES)
- return NULL;
+ list_for_each_entry(port, &dev_priv->i2c_ports, head) {
+ if (port->index == index)
+ break;
+ }
- if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
- uint32_t reg = 0xe500, val;
+ if (&port->head == &dev_priv->i2c_ports)
+ return NULL;
- if (i2c->port_type == 6) {
- reg += i2c->read * 0x50;
+ if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+ u32 reg = 0x00e500, val;
+ if (port->type == 6) {
+ reg += port->drive * 0x50;
val = 0x2002;
} else {
- reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
val = 0xe001;
}
@@ -294,9 +497,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
nv_mask(dev, reg + 0x00, 0x0000f003, val);
}
- if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
- return NULL;
- return i2c->chan;
+ return port;
}
bool
@@ -331,9 +532,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
int i;
- NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+ if (!i2c) {
+ NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
+ return -ENODEV;
+ }
- for (i = 0; i2c && info[i].addr; i++) {
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
+ for (i = 0; info[i].addr; i++) {
if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
(!match || match(i2c, &info[i]))) {
NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
@@ -342,6 +547,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
}
NV_DEBUG(dev, "No devices found.\n");
-
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 422b62fd8272..4d2e4e9031be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -27,20 +27,25 @@
#include <linux/i2c-algo-bit.h>
#include "drm_dp_helper.h"
-struct dcb_i2c_entry;
+#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_PORT_NUM 0x10
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- struct i2c_algo_bit_data bit;
- unsigned rd;
- unsigned wr;
- unsigned data;
+ struct list_head head;
+ u8 index;
+ u8 type;
+ u32 dcb;
+ u32 drive;
+ u32 sense;
+ u32 state;
};
-int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
-void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+int nouveau_i2c_init(struct drm_device *);
+void nouveau_i2c_fini(struct drm_device *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
int nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct i2c_board_info *info,
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 36bec4807701..c3a5745e9c79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
+ ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ if (ret) {
+ /* Reset to default value. */
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+ }
+
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -638,10 +644,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
return;
if (P.version == 1)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
else
if (P.version == 2)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
else {
NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
new file mode 100644
index 000000000000..8bccddf4eff0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
+#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
+
+static u8 *
+mxms_data(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->mxms;
+
+}
+
+static u16
+mxms_version(struct drm_device *dev)
+{
+ u8 *mxms = mxms_data(dev);
+ u16 version = (mxms[4] << 8) | mxms[5];
+ switch (version ) {
+ case 0x0200:
+ case 0x0201:
+ case 0x0300:
+ return version;
+ default:
+ break;
+ }
+
+ MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
+ return 0x0000;
+}
+
+static u16
+mxms_headerlen(struct drm_device *dev)
+{
+ return 8;
+}
+
+static u16
+mxms_structlen(struct drm_device *dev)
+{
+ return *(u16 *)&mxms_data(dev)[6];
+}
+
+static bool
+mxms_checksum(struct drm_device *dev)
+{
+ u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
+ u8 *mxms = mxms_data(dev), sum = 0;
+ while (size--)
+ sum += *mxms++;
+ if (sum) {
+ MXM_DBG(dev, "checksum invalid\n");
+ return false;
+ }
+ return true;
+}
+
+static bool
+mxms_valid(struct drm_device *dev)
+{
+ u8 *mxms = mxms_data(dev);
+ if (*(u32 *)mxms != 0x5f4d584d) {
+ MXM_DBG(dev, "signature invalid\n");
+ return false;
+ }
+
+ if (!mxms_version(dev) || !mxms_checksum(dev))
+ return false;
+
+ return true;
+}
+
+static bool
+mxms_foreach(struct drm_device *dev, u8 types,
+ bool (*exec)(struct drm_device *, u8 *, void *), void *info)
+{
+ u8 *mxms = mxms_data(dev);
+ u8 *desc = mxms + mxms_headerlen(dev);
+ u8 *fini = desc + mxms_structlen(dev) - 1;
+ while (desc < fini) {
+ u8 type = desc[0] & 0x0f;
+ u8 headerlen = 0;
+ u8 recordlen = 0;
+ u8 entries = 0;
+
+ switch (type) {
+ case 0: /* Output Device Structure */
+ if (mxms_version(dev) >= 0x0300)
+ headerlen = 8;
+ else
+ headerlen = 6;
+ break;
+ case 1: /* System Cooling Capability Structure */
+ case 2: /* Thermal Structure */
+ case 3: /* Input Power Structure */
+ headerlen = 4;
+ break;
+ case 4: /* GPIO Device Structure */
+ headerlen = 4;
+ recordlen = 2;
+ entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
+ break;
+ case 5: /* Vendor Specific Structure */
+ headerlen = 8;
+ break;
+ case 6: /* Backlight Control Structure */
+ if (mxms_version(dev) >= 0x0300) {
+ headerlen = 4;
+ recordlen = 8;
+ entries = (desc[1] & 0xf0) >> 4;
+ } else {
+ headerlen = 8;
+ }
+ break;
+ case 7: /* Fan Control Structure */
+ headerlen = 8;
+ recordlen = 4;
+ entries = desc[1] & 0x07;
+ break;
+ default:
+ MXM_DBG(dev, "unknown descriptor type %d\n", type);
+ return false;
+ }
+
+ if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
+ static const char * mxms_desc_name[] = {
+ "ODS", "SCCS", "TS", "IPS",
+ "GSD", "VSS", "BCS", "FCS",
+ };
+ u8 *dump = desc;
+ int i, j;
+
+ MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
+ for (j = headerlen - 1; j >= 0; j--)
+ printk("%02x", dump[j]);
+ printk("\n");
+ dump += headerlen;
+
+ for (i = 0; i < entries; i++, dump += recordlen) {
+ MXM_DBG(dev, " ");
+ for (j = recordlen - 1; j >= 0; j--)
+ printk("%02x", dump[j]);
+ printk("\n");
+ }
+ }
+
+ if (types & (1 << type)) {
+ if (!exec(dev, desc, info))
+ return false;
+ }
+
+ desc += headerlen + (entries * recordlen);
+ }
+
+ return true;
+}
+
+static u8 *
+mxm_table(struct drm_device *dev, u8 *size)
+{
+ struct bit_entry x;
+
+ if (bit_table(dev, 'x', &x)) {
+ MXM_DBG(dev, "BIT 'x' table not present\n");
+ return NULL;
+ }
+
+ if (x.version != 1 || x.length < 3) {
+ MXM_MSG(dev, "BIT x table %d/%d unknown\n",
+ x.version, x.length);
+ return NULL;
+ }
+
+ *size = x.length;
+ return x.data;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+ 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+ 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+ 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv96_sor_map[16] = {
+ 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
+ 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+ 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8
+mxm_sor_map(struct drm_device *dev, u8 conn)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 len, *mxm = mxm_table(dev, &len);
+ if (mxm && len >= 6) {
+ u8 *map = ROMPTR(dev, mxm[4]);
+ if (map) {
+ if (map[0] == 0x10) {
+ if (conn < map[3])
+ return map[map[1] + conn];
+ return 0x00;
+ }
+
+ MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
+ }
+ }
+
+ if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
+ return nv84_sor_map[conn];
+ if (dev_priv->chipset == 0x92)
+ return nv92_sor_map[conn];
+ if (dev_priv->chipset == 0x94)
+ return nv94_sor_map[conn];
+ if (dev_priv->chipset == 0x96)
+ return nv96_sor_map[conn];
+ if (dev_priv->chipset == 0x98)
+ return nv98_sor_map[conn];
+
+ MXM_MSG(dev, "missing sor map\n");
+ return 0x00;
+}
+
+static u8
+mxm_ddc_map(struct drm_device *dev, u8 port)
+{
+ u8 len, *mxm = mxm_table(dev, &len);
+ if (mxm && len >= 8) {
+ u8 *map = ROMPTR(dev, mxm[6]);
+ if (map) {
+ if (map[0] == 0x10) {
+ if (port < map[3])
+ return map[map[1] + port];
+ return 0x00;
+ }
+
+ MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
+ }
+ }
+
+ /* v2.x: directly write port as dcb i2cidx */
+ return (port << 4) | port;
+}
+
+struct mxms_odev {
+ u8 outp_type;
+ u8 conn_type;
+ u8 ddc_port;
+ u8 dig_conn;
+};
+
+static void
+mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
+{
+ u64 data = ROM32(pdata[0]);
+ if (mxms_version(dev) >= 0x0300)
+ data |= (u64)ROM16(pdata[4]) << 32;
+
+ desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+ desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
+ desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+ desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
+}
+
+struct context {
+ u32 *outp;
+ struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
+{
+ struct context *ctx = info;
+ struct mxms_odev desc;
+
+ mxms_output_device(dev, data, &desc);
+ if (desc.outp_type == 2 &&
+ desc.dig_conn == ctx->desc.dig_conn)
+ return false;
+ return true;
+}
+
+static bool
+mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
+{
+ struct context *ctx = info;
+ u64 desc = *(u64 *)data;
+
+ mxms_output_device(dev, data, &ctx->desc);
+
+ /* match dcb encoder type to mxm-ods device type */
+ if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+ return true;
+
+ /* digital output, have some extra stuff to match here, there's a
+ * table in the vbios that provides a mapping from the mxm digital
+ * connection enum values to SOR/link
+ */
+ if ((desc & 0x00000000000000f0) >= 0x20) {
+ /* check against sor index */
+ u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
+ if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+ return true;
+
+ /* check dcb entry has a compatible link field */
+ link = (link & 0x30) >> 4;
+ if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+ return true;
+ }
+
+ /* mark this descriptor accounted for by setting invalid device type,
+ * except of course some manufactures don't follow specs properly and
+ * we need to avoid killing off the TMDS function on DP connectors
+ * if MXM-SIS is missing an entry for it.
+ */
+ data[0] &= ~0xf0;
+ if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+ mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
+ data[0] |= 0x20; /* modify descriptor to match TMDS now */
+ } else {
+ data[0] |= 0xf0;
+ }
+
+ return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
+{
+ struct context ctx = { .outp = (u32 *)dcbe };
+ u8 type, i2cidx, link;
+ u8 *conn;
+
+ /* look for an output device structure that matches this dcb entry.
+ * if one isn't found, disable it.
+ */
+ if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
+ MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
+ idx, ctx.outp[0], ctx.outp[1]);
+ ctx.outp[0] |= 0x0000000f;
+ return 0;
+ }
+
+ /* modify the output's ddc/aux port, there's a pointer to a table
+ * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+ * vbios mxm table
+ */
+ i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
+ if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
+ i2cidx = (i2cidx & 0x0f) << 4;
+ else
+ i2cidx = (i2cidx & 0xf0);
+
+ if (i2cidx != 0xf0) {
+ ctx.outp[0] &= ~0x000000f0;
+ ctx.outp[0] |= i2cidx;
+ }
+
+ /* override dcb sorconf.link, based on what mxm data says */
+ switch (ctx.desc.outp_type) {
+ case 0x00: /* Analog CRT */
+ case 0x01: /* Analog TV/HDTV */
+ break;
+ default:
+ link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
+ ctx.outp[1] &= ~0x00000030;
+ ctx.outp[1] |= link;
+ break;
+ }
+
+ /* we may need to fixup various other vbios tables based on what
+ * the descriptor says the connector type should be.
+ *
+ * in a lot of cases, the vbios tables will claim DVI-I is possible,
+ * and the mxm data says the connector is really HDMI. another
+ * common example is DP->eDP.
+ */
+ conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
+ type = conn[0];
+ switch (ctx.desc.conn_type) {
+ case 0x01: /* LVDS */
+ ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+ /* XXX: modify default link width in LVDS table */
+ break;
+ case 0x02: /* HDMI */
+ type = DCB_CONNECTOR_HDMI_1;
+ break;
+ case 0x03: /* DVI-D */
+ type = DCB_CONNECTOR_DVI_D;
+ break;
+ case 0x0e: /* eDP, falls through to DPint */
+ ctx.outp[1] |= 0x00010000;
+ case 0x07: /* DP internal, wtf is this?? HP8670w */
+ ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+ type = DCB_CONNECTOR_eDP;
+ break;
+ default:
+ break;
+ }
+
+ if (mxms_version(dev) >= 0x0300)
+ conn[0] = type;
+
+ return 0;
+}
+
+static bool
+mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
+{
+ u64 desc = *(u64 *)data;
+ if ((desc & 0xf0) != 0xf0)
+ MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
+ return true;
+}
+
+static void
+mxm_dcb_sanitise(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (!dcb || dcb[0] != 0x40) {
+ MXM_DBG(dev, "unsupported DCB version\n");
+ return;
+ }
+
+ dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
+ mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
+}
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
+ u8 offset, u8 size, u8 *data)
+{
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+ { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+ };
+
+ return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c = NULL;
+ u8 i2cidx, mxms[6], addr, size;
+
+ i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
+ if (i2cidx < 0x0f)
+ i2c = nouveau_i2c_find(dev, i2cidx);
+ if (!i2c)
+ return false;
+
+ addr = 0x54;
+ if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
+ addr = 0x56;
+ if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
+ return false;
+ }
+
+ dev_priv->mxms = mxms;
+ size = mxms_headerlen(dev) + mxms_structlen(dev);
+ dev_priv->mxms = kmalloc(size, GFP_KERNEL);
+
+ if (dev_priv->mxms &&
+ mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
+ return true;
+
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+ return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ static char muid[] = {
+ 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+ 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+ };
+ u32 mxms_args[] = { 0x00000000 };
+ union acpi_object args[4] = {
+ /* _DSM MUID */
+ { .buffer.type = 3,
+ .buffer.length = sizeof(muid),
+ .buffer.pointer = muid,
+ },
+ /* spec says this can be zero to mean "highest revision", but
+ * of course there's at least one bios out there which fails
+ * unless you pass in exactly the version it supports..
+ */
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+ },
+ /* MXMS function */
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = 0x00000010,
+ },
+ /* Pointer to MXMS arguments */
+ { .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(mxms_args),
+ .buffer.pointer = (char *)mxms_args,
+ },
+ };
+ struct acpi_object_list list = { ARRAY_SIZE(args), args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_handle handle;
+ int ret;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle)
+ return false;
+
+ ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+ if (ret) {
+ MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
+ return false;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ dev_priv->mxms = kmemdup(obj->buffer.pointer,
+ obj->buffer.length, GFP_KERNEL);
+ } else
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+ }
+
+ kfree(obj);
+ return dev_priv->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static bool
+mxm_shadow_wmi(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+ struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ if (!wmi_has_guid(WMI_WMMX_GUID))
+ return false;
+
+ status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+ if (ACPI_FAILURE(status)) {
+ MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
+ return false;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ dev_priv->mxms = kmemdup(obj->buffer.pointer,
+ obj->buffer.length, GFP_KERNEL);
+ }
+
+ kfree(obj);
+ return dev_priv->mxms != NULL;
+}
+#endif
+
+struct mxm_shadow_h {
+ const char *name;
+ bool (*exec)(struct drm_device *, u8 version);
+} _mxm_shadow[] = {
+ { "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+ { "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+ { "WMI", mxm_shadow_wmi },
+#endif
+ {}
+};
+
+static int
+mxm_shadow(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct mxm_shadow_h *shadow = _mxm_shadow;
+ do {
+ MXM_DBG(dev, "checking %s\n", shadow->name);
+ if (shadow->exec(dev, version)) {
+ if (mxms_valid(dev))
+ return 0;
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+ }
+ } while ((++shadow)->name);
+ return -ENOENT;
+}
+
+int
+nouveau_mxm_init(struct drm_device *dev)
+{
+ u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
+ if (!mxm || !mxm[0]) {
+ MXM_MSG(dev, "no VBIOS data, nothing to do\n");
+ return 0;
+ }
+
+ MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
+
+ if (mxm_shadow(dev, mxm[0])) {
+ MXM_MSG(dev, "failed to locate valid SIS\n");
+ return -EINVAL;
+ }
+
+ MXM_MSG(dev, "MXMS Version %d.%d\n",
+ mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
+ mxms_foreach(dev, 0, NULL, NULL);
+
+ if (nouveau_mxmdcb)
+ mxm_dcb_sanitise(dev);
+ return 0;
+}
+
+void
+nouveau_mxm_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6abdbe6530a7..2ef883c4bbc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
- uint32_t offset;
+ uint64_t offset;
int target, ret;
mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540aee..cc419fae794b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size - base);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
/* map display semaphore buffers into channel's vm */
- if (dev_priv->card_type >= NV_D0)
- return 0;
-
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
-
- ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
- &chan->dispc_vma[i]);
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo;
+ if (dev_priv->card_type >= NV_D0)
+ bo = nvd0_display_crtc_sema(dev, i);
+ else
+ bo = nv50_display(dev)->crtc[i].sem.bo;
+
+ ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
if (ret)
return ret;
}
@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
+ if (dev_priv->card_type >= NV_D0) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
+ }
+ } else
+ if (dev_priv->card_type >= NV_50) {
struct nv50_display *disp = nv50_display(dev);
-
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 33d03fbf00df..58f497343cec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -41,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, bmp[0x73]);
+ perf = ROMPTR(dev, bmp[0x73]);
if (!perf) {
NV_DEBUG(dev, "No memclock table pointer found.\n");
return;
@@ -87,7 +87,7 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
* ramcfg to select the correct subentry
*/
if (P->version == 2) {
- u8 *tmap = ROMPTR(bios, P->data[4]);
+ u8 *tmap = ROMPTR(dev, P->data[4]);
if (!tmap) {
NV_DEBUG(dev, "no timing map pointer\n");
return NULL;
@@ -140,7 +140,6 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
u8 *vmap;
int id;
@@ -165,7 +164,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
return;
}
- vmap = ROMPTR(bios, P->data[32]);
+ vmap = ROMPTR(dev, P->data[32]);
if (!vmap) {
NV_DEBUG(dev, "volt map table pointer invalid\n");
return;
@@ -200,12 +199,14 @@ nouveau_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, P.data[0]);
+ perf = ROMPTR(dev, P.data[0]);
version = perf[0];
headerlen = perf[1];
if (version < 0x40) {
recordlen = perf[3] + (perf[4] * perf[5]);
entries = perf[2];
+
+ pm->pwm_divisor = ROM16(perf[6]);
} else {
recordlen = perf[2] + (perf[3] * perf[4]);
entries = perf[5];
@@ -216,7 +217,7 @@ nouveau_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+ perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
if (!perf) {
NV_DEBUG(dev, "perf table pointer invalid\n");
return;
@@ -283,7 +284,6 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->memory = ROM16(entry[11]) * 1000;
else
perflvl->memory = ROM16(entry[11]) * 2000;
-
break;
case 0x25:
perflvl->fanspeed = entry[4];
@@ -300,8 +300,8 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->core = ROM16(entry[8]) * 1000;
perflvl->shader = ROM16(entry[10]) * 1000;
perflvl->memory = ROM16(entry[12]) * 1000;
- /*XXX: confirm on 0x35 */
- perflvl->unk05 = ROM16(entry[16]) * 1000;
+ perflvl->vdec = ROM16(entry[16]) * 1000;
+ perflvl->dom6 = ROM16(entry[20]) * 1000;
break;
case 0x40:
#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a539fd257921..9064d7f19794 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_gpio.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -35,22 +36,95 @@
#include <linux/hwmon-sysfs.h>
static int
-nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u8 id, u32 khz)
+nouveau_pwmfan_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- void *pre_state;
+ struct gpio_func gpio;
+ u32 divs, duty;
+ int ret;
- if (khz == 0)
- return 0;
+ if (!pm->pwm_get)
+ return -ENODEV;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+ if (ret == 0) {
+ ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
+ if (ret == 0) {
+ divs = max(divs, duty);
+ if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+ duty = divs - duty;
+ return (duty * 100) / divs;
+ }
+
+ return nouveau_gpio_func_get(dev, gpio.func) * 100;
+ }
+
+ return -ENODEV;
+}
+
+static int
+nouveau_pwmfan_set(struct drm_device *dev, int percent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct gpio_func gpio;
+ u32 divs, duty;
+ int ret;
+
+ if (!pm->pwm_set)
+ return -ENODEV;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+ if (ret == 0) {
+ divs = pm->pwm_divisor;
+ if (pm->fan.pwm_freq) {
+ /*XXX: PNVIO clock more than likely... */
+ divs = 135000 / pm->fan.pwm_freq;
+ if (dev_priv->chipset < 0xa3)
+ divs /= 4;
+ }
+
+ duty = ((divs * percent) + 99) / 100;
+ if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+ duty = divs - duty;
- pre_state = pm->clock_pre(dev, perflvl, id, khz);
- if (IS_ERR(pre_state))
- return PTR_ERR(pre_state);
+ return pm->pwm_set(dev, gpio.line, divs, duty);
+ }
+
+ return -ENODEV;
+}
+
+static int
+nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ struct nouveau_pm_level *a, struct nouveau_pm_level *b)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ /*XXX: not on all boards, we should control based on temperature
+ * on recent boards.. or maybe on some other factor we don't
+ * know about?
+ */
+ if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+ ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
+ if (ret && ret != -ENODEV) {
+ NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pm->voltage.supported && pm->voltage_set) {
+ if (perflvl->volt_min && b->volt_min > a->volt_min) {
+ ret = pm->voltage_set(dev, perflvl->volt_min);
+ if (ret) {
+ NV_ERROR(dev, "voltage set failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
- if (pre_state)
- pm->clock_set(dev, pre_state);
return 0;
}
@@ -59,31 +133,24 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ void *state;
int ret;
if (perflvl == pm->cur)
return 0;
- if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
- ret = pm->voltage_set(dev, perflvl->volt_min);
- if (ret) {
- NV_ERROR(dev, "voltage_set %d failed: %d\n",
- perflvl->volt_min, ret);
- }
- }
+ ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
+ if (ret)
+ return ret;
- if (pm->clocks_pre) {
- void *state = pm->clocks_pre(dev, perflvl);
- if (IS_ERR(state))
- return PTR_ERR(state);
- pm->clocks_set(dev, state);
- } else
- if (pm->clock_set) {
- nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
- nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
- nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
- nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
- }
+ state = pm->clocks_pre(dev, perflvl);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+ pm->clocks_set(dev, state);
+
+ ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+ if (ret)
+ return ret;
pm->cur = perflvl;
return 0;
@@ -130,28 +197,9 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
memset(perflvl, 0, sizeof(*perflvl));
- if (pm->clocks_get) {
- ret = pm->clocks_get(dev, perflvl);
- if (ret)
- return ret;
- } else
- if (pm->clock_get) {
- ret = pm->clock_get(dev, PLL_CORE);
- if (ret > 0)
- perflvl->core = ret;
-
- ret = pm->clock_get(dev, PLL_MEMORY);
- if (ret > 0)
- perflvl->memory = ret;
-
- ret = pm->clock_get(dev, PLL_SHADER);
- if (ret > 0)
- perflvl->shader = ret;
-
- ret = pm->clock_get(dev, PLL_UNK05);
- if (ret > 0)
- perflvl->unk05 = ret;
- }
+ ret = pm->clocks_get(dev, perflvl);
+ if (ret)
+ return ret;
if (pm->voltage.supported && pm->voltage_get) {
ret = pm->voltage_get(dev);
@@ -161,6 +209,10 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
}
+ ret = nouveau_pwmfan_get(dev);
+ if (ret > 0)
+ perflvl->fanspeed = ret;
+
return 0;
}
@@ -412,6 +464,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
nouveau_hwmon_show_update_rate,
NULL, 0);
+static ssize_t
+nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct gpio_func gpio;
+ u32 cycles, cur, prev;
+ u64 start;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
+ if (ret)
+ return ret;
+
+ /* Monitor the GPIO input 0x3b for 250ms.
+ * When the fan spins, it changes the value of GPIO FAN_SENSE.
+ * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
+ */
+ start = ptimer->read(dev);
+ prev = nouveau_gpio_sense(dev, 0, gpio.line);
+ cycles = 0;
+ do {
+ cur = nouveau_gpio_sense(dev, 0, gpio.line);
+ if (prev != cur) {
+ cycles++;
+ prev = cur;
+ }
+
+ usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+ } while (ptimer->read(dev) - start < 250000000);
+
+ /* interpolate to get rpm */
+ return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
+}
+static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+ NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ int ret;
+
+ ret = nouveau_pwmfan_get(dev);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret = -ENODEV;
+ long value;
+
+ if (nouveau_perflvl_wr != 7777)
+ return -EPERM;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < pm->fan.min_duty)
+ value = pm->fan.min_duty;
+ if (value > pm->fan.max_duty)
+ value = pm->fan.max_duty;
+
+ ret = nouveau_pwmfan_set(dev, value);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0,
+ nouveau_hwmon_set_pwm0, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_min(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return sprintf(buf, "%i\n", pm->fan.min_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < 0)
+ value = 0;
+
+ if (pm->fan.max_duty - value < 10)
+ value = pm->fan.max_duty - 10;
+
+ if (value < 10)
+ pm->fan.min_duty = 10;
+ else
+ pm->fan.min_duty = value;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0_min,
+ nouveau_hwmon_set_pwm0_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_max(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return sprintf(buf, "%i\n", pm->fan.max_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < 0)
+ value = 0;
+
+ if (value - pm->fan.min_duty < 10)
+ value = pm->fan.min_duty + 10;
+
+ if (value > 100)
+ pm->fan.max_duty = 100;
+ else
+ pm->fan.max_duty = value;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0_max,
+ nouveau_hwmon_set_pwm0_max, 0);
+
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -420,20 +638,36 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+ &sensor_dev_attr_fan0_input.dev_attr.attr,
+ NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+ &sensor_dev_attr_pwm0.dev_attr.attr,
+ &sensor_dev_attr_pwm0_min.dev_attr.attr,
+ &sensor_dev_attr_pwm0_max.dev_attr.attr,
+ NULL
+};
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
};
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+ .attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+ .attrs = hwmon_pwm_fan_attributes,
+};
#endif
static int
nouveau_hwmon_init(struct drm_device *dev)
{
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct device *hwmon_dev;
- int ret;
+ int ret = 0;
if (!pm->temp_get)
return -ENODEV;
@@ -446,17 +680,46 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
+
+ /* default sysfs entries */
ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
- NV_ERROR(dev,
- "Unable to create hwmon sysfs file: %d\n", ret);
- hwmon_device_unregister(hwmon_dev);
- return ret;
+ if (ret)
+ goto error;
+ }
+
+ /* if the card has a pwm fan */
+ /*XXX: incorrect, need better detection for this, some boards have
+ * the gpio entries for pwm fan control even when there's no
+ * actual fan connected to it... therm table? */
+ if (nouveau_pwmfan_get(dev) >= 0) {
+ ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ &hwmon_pwm_fan_attrgroup);
+ if (ret)
+ goto error;
+ }
+
+ /* if the card can read the fan rpm */
+ if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
+ ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ &hwmon_fan_rpm_attrgroup);
+ if (ret)
+ goto error;
}
pm->hwmon = hwmon_dev;
-#endif
+
+ return 0;
+
+error:
+ NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
+ hwmon_device_unregister(hwmon_dev);
+ pm->hwmon = NULL;
+ return ret;
+#else
+ pm->hwmon = NULL;
return 0;
+#endif
}
static void
@@ -468,6 +731,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
if (pm->hwmon) {
sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
+
hwmon_device_unregister(pm->hwmon);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 8ac02cdd03a1..2f8e14fbcff8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -47,29 +47,33 @@ void nouveau_mem_timing_init(struct drm_device *);
void nouveau_mem_timing_fini(struct drm_device *);
/* nv04_pm.c */
-int nv04_pm_clock_get(struct drm_device *, u32 id);
-void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nv04_pm_clock_set(struct drm_device *, void *);
+int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv04_pm_clocks_set(struct drm_device *, void *);
/* nv40_pm.c */
int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
/* nv50_pm.c */
-int nv50_pm_clock_get(struct drm_device *, u32 id);
-void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nv50_pm_clock_set(struct drm_device *, void *);
+int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv50_pm_clocks_set(struct drm_device *, void *);
+int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
/* nva3_pm.c */
int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nva3_pm_clocks_set(struct drm_device *, void *);
+int nva3_pm_clocks_set(struct drm_device *, void *);
/* nvc0_pm.c */
int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nvc0_pm_clocks_set(struct drm_device *, void *);
/* nouveau_temp.c */
void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe44..47f245edf538 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,88 +8,30 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
- struct ttm_backend backend;
+ /* this has to be the first field so populate/unpopulated in
+ * nouve_bo.c works properly, otherwise have to move them here
+ */
+ struct ttm_dma_tt ttm;
struct drm_device *dev;
-
- dma_addr_t *pages;
- unsigned nr_pages;
- bool unmap_pages;
-
u64 offset;
- bool bound;
};
-static int
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
- int i;
-
- NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
-
- nvbe->pages = dma_addrs;
- nvbe->nr_pages = num_pages;
- nvbe->unmap_pages = true;
-
- /* this code path isn't called and is incorrect anyways */
- if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
- nvbe->unmap_pages = false;
- return 0;
- }
-
- for (i = 0; i < num_pages; i++) {
- nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
- nvbe->nr_pages = --i;
- be->func->clear(be);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
static void
-nouveau_sgdma_clear(struct ttm_backend *be)
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- if (nvbe->bound)
- be->func->unbind(be);
-
- if (nvbe->unmap_pages) {
- while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-}
-
-static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-
- if (be) {
+ if (ttm) {
NV_DEBUG(nvbe->dev, "\n");
-
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
- }
+ ttm_dma_tt_fini(&nvbe->ttm);
+ kfree(nvbe);
}
}
static int
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -99,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = nvbe->pages[i];
+ for (i = 0; i < ttm->num_pages; i++) {
+ dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -109,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
}
- nvbe->bound = true;
return 0;
}
static int
-nv04_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -124,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n");
- if (!nvbe->bound)
+ if (ttm->state != tt_bound)
return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
+ for (i = 0; i < ttm->num_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
@@ -158,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
}
static int
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -175,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = true;
return 0;
}
static int
-nv41_sgdma_unbind(struct ttm_backend *be)
+nv41_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
@@ -194,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+nv44_sgdma_flush(struct ttm_tt *ttm)
{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
- nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -270,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
}
static int
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2, tmp[4];
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
int i;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -305,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = true;
+ nv44_sgdma_flush(ttm);
return 0;
}
static int
-nv44_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
@@ -339,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = false;
+ nv44_sgdma_flush(ttm);
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
+
/* noop: bound in move_notify() */
- node->pages = nvbe->pages;
- nvbe->pages = (dma_addr_t *)node;
- nvbe->bound = true;
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv50_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
- nvbe->pages = node->pages;
- node->pages = NULL;
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
-struct ttm_backend *
-nouveau_sgdma_init_ttm(struct drm_device *dev)
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -395,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
return NULL;
nvbe->dev = dev;
+ nvbe->ttm.ttm.func = dev_priv->gart_info.func;
- nvbe->backend.func = dev_priv->gart_info.func;
- return &nvbe->backend;
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(nvbe);
+ return NULL;
+ }
+ return &nvbe->ttm.ttm;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index d8831ab42bb9..f80c5e0762ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
+#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
@@ -80,16 +81,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = NULL;
- engine->gpio.set = NULL;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -129,16 +126,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -178,16 +173,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -227,16 +220,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->vram.init = nouveau_mem_detect;
@@ -279,19 +270,22 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.init = nv10_gpio_init;
+ engine->gpio.fini = nv10_gpio_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->gpio.irq_enable = nv10_gpio_irq_enable;
engine->pm.clocks_get = nv40_pm_clocks_get;
engine->pm.clocks_pre = nv40_pm_clocks_pre;
engine->pm.clocks_set = nv40_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
+ engine->pm.pwm_get = nv40_pm_pwm_get;
+ engine->pm.pwm_set = nv40_pm_pwm_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -334,14 +328,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
+ engine->display.init = nv50_display_init;
+ engine->display.fini = nv50_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nv50_gpio_fini;
- engine->gpio.get = nv50_gpio_get;
- engine->gpio.set = nv50_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nv50_gpio_drive;
+ engine->gpio.sense = nv50_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
case 0x84:
@@ -354,9 +347,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
case 0xaa:
case 0xac:
case 0x50:
- engine->pm.clock_get = nv50_pm_clock_get;
- engine->pm.clock_pre = nv50_pm_clock_pre;
- engine->pm.clock_set = nv50_pm_clock_set;
+ engine->pm.clocks_get = nv50_pm_clocks_get;
+ engine->pm.clocks_pre = nv50_pm_clocks_pre;
+ engine->pm.clocks_set = nv50_pm_clocks_set;
break;
default:
engine->pm.clocks_get = nva3_pm_clocks_get;
@@ -370,6 +363,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
+ engine->pm.pwm_get = nv50_pm_pwm_get;
+ engine->pm.pwm_set = nv50_pm_pwm_set;
engine->vram.init = nv50_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new;
@@ -407,14 +402,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
+ engine->display.init = nv50_display_init;
+ engine->display.fini = nv50_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv50_gpio_get;
- engine->gpio.set = nv50_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nv50_gpio_drive;
+ engine->gpio.sense = nv50_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
@@ -423,8 +417,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.flags_valid = nvc0_vram_flags_valid;
engine->pm.temp_get = nv84_temp_get;
engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.clocks_pre = nvc0_pm_clocks_pre;
+ engine->pm.clocks_set = nvc0_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->pm.pwm_get = nv50_pm_pwm_get;
+ engine->pm.pwm_set = nv50_pm_pwm_set;
break;
case 0xd0:
engine->instmem.init = nvc0_instmem_init;
@@ -457,21 +455,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
- engine->display.init = nvd0_display_init;
engine->display.destroy = nvd0_display_destroy;
+ engine->display.init = nvd0_display_init;
+ engine->display.fini = nvd0_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nvd0_gpio_get;
- engine->gpio.set = nvd0_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nvd0_gpio_drive;
+ engine->gpio.sense = nvd0_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
+ engine->pm.temp_get = nv84_temp_get;
engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.clocks_pre = nvc0_pm_clocks_pre;
+ engine->pm.clocks_set = nvc0_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
@@ -525,6 +525,7 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
+ nouveau_switcheroo_optimus_dsm();
nouveau_pci_suspend(pdev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
@@ -615,7 +616,7 @@ nouveau_card_init(struct drm_device *dev)
goto out_gart;
/* PGPIO */
- ret = engine->gpio.init(dev);
+ ret = nouveau_gpio_create(dev);
if (ret)
goto out_mc;
@@ -648,6 +649,7 @@ nouveau_card_init(struct drm_device *dev)
nv50_graph_create(dev);
break;
case NV_C0:
+ case NV_D0:
nvc0_graph_create(dev);
break;
default:
@@ -663,6 +665,11 @@ nouveau_card_init(struct drm_device *dev)
case 0xa0:
nv84_crypt_create(dev);
break;
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ nv98_crypt_create(dev);
+ break;
}
switch (dev_priv->card_type) {
@@ -684,15 +691,25 @@ nouveau_card_init(struct drm_device *dev)
break;
}
+ if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
+ nv84_bsp_create(dev);
+ nv84_vp_create(dev);
+ nv98_ppp_create(dev);
+ } else
+ if (dev_priv->chipset >= 0x84) {
+ nv50_mpeg_create(dev);
+ nv84_bsp_create(dev);
+ nv84_vp_create(dev);
+ } else
+ if (dev_priv->chipset >= 0x50) {
+ nv50_mpeg_create(dev);
+ } else
if (dev_priv->card_type == NV_40 ||
dev_priv->chipset == 0x31 ||
dev_priv->chipset == 0x34 ||
- dev_priv->chipset == 0x36)
+ dev_priv->chipset == 0x36) {
nv31_mpeg_create(dev);
- else
- if (dev_priv->card_type == NV_50 &&
- (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
- nv50_mpeg_create(dev);
+ }
for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
if (dev_priv->eng[e]) {
@@ -712,27 +729,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_fifo;
- /* initialise general modesetting */
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dithering_property(dev);
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- if (dev_priv->card_type < NV_10) {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
- } else
- if (dev_priv->card_type < NV_50) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
- } else {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
- ret = engine->display.create(dev);
+ ret = nouveau_display_create(dev);
if (ret)
goto out_irq;
@@ -752,12 +749,11 @@ nouveau_card_init(struct drm_device *dev)
}
if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ ret = nouveau_display_init(dev);
if (ret)
goto out_chan;
nouveau_fbcon_init(dev);
- drm_kms_helper_poll_init(dev);
}
return 0;
@@ -768,7 +764,7 @@ out_fence:
nouveau_fence_fini(dev);
out_disp:
nouveau_backlight_exit(dev);
- engine->display.destroy(dev);
+ nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
out_fifo:
@@ -788,7 +784,7 @@ out_engine:
out_timer:
engine->timer.takedown(dev);
out_gpio:
- engine->gpio.takedown(dev);
+ nouveau_gpio_destroy(dev);
out_mc:
engine->mc.takedown(dev);
out_gart:
@@ -818,9 +814,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
int e;
if (dev->mode_config.num_crtc) {
- drm_kms_helper_poll_fini(dev);
nouveau_fbcon_fini(dev);
- drm_vblank_cleanup(dev);
+ nouveau_display_fini(dev);
}
if (dev_priv->channel) {
@@ -829,8 +824,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
nouveau_backlight_exit(dev);
- engine->display.destroy(dev);
- drm_mode_config_cleanup(dev);
+ nouveau_display_destroy(dev);
if (!dev_priv->noaccel) {
engine->fifo.takedown(dev);
@@ -843,7 +837,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
engine->fb.takedown(dev);
engine->timer.takedown(dev);
- engine->gpio.takedown(dev);
+ nouveau_gpio_destroy(dev);
engine->mc.takedown(dev);
engine->display.late_takedown(dev);
@@ -1110,13 +1104,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->noaccel = !!nouveau_noaccel;
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
-#if 0
- case 0xXX: /* known broken */
+ case 0xd9: /* known broken */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
break;
-#endif
default:
dev_priv->noaccel = false;
break;
@@ -1238,7 +1230,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = dev_priv->card_type < NV_D0;
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 5a46446dd5a8..0f5a30160556 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
temps->down_clock = 100;
temps->fan_boost = 90;
+ /* Set the default range for the pwm fan */
+ pm->fan.min_duty = 30;
+ pm->fan.max_duty = 100;
+
/* Set the known default values to setup the temperature sensor */
if (dev_priv->card_type >= NV_40) {
switch (dev_priv->chipset) {
@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
case 0x13:
sensor->slope_div = value;
break;
+ case 0x22:
+ pm->fan.min_duty = value & 0xff;
+ pm->fan.max_duty = (value & 0xff00) >> 8;
+ break;
+ case 0x26:
+ pm->fan.pwm_freq = value;
+ break;
}
temp += recordlen;
}
nouveau_temp_safety_checks(dev);
+
+ /* check the fan min/max settings */
+ if (pm->fan.min_duty < 10)
+ pm->fan.min_duty = 10;
+ if (pm->fan.max_duty > 100)
+ pm->fan.max_duty = 100;
+ if (pm->fan.max_duty < pm->fan.min_duty)
+ pm->fan.max_duty = pm->fan.min_duty;
}
static int
@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
static void
nouveau_temp_probe_i2c(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct i2c_board_info info[] = {
{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
{ I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
{ I2C_BOARD_INFO("lm99", 0x4c) },
{ }
};
- int idx = (dcb->version >= 0x40 ?
- dcb->i2c_default_indices & 0xf : 2);
nouveau_i2c_identify(dev, "monitoring device", info,
- probe_monitoring_device, idx);
+ probe_monitoring_device, NV_I2C_DEFAULT(0));
}
void
@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
return;
if (P.version == 1)
- temp = ROMPTR(bios, P.data[12]);
+ temp = ROMPTR(dev, P.data[12]);
else if (P.version == 2)
- temp = ROMPTR(bios, P.data[16]);
+ temp = ROMPTR(dev, P.data[16]);
else
NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index ef0832b29ad2..2bf6c0350b4b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem, dma_addr_t *list)
+ struct nouveau_mem *mem)
{
struct nouveau_vm *vm = vma->vm;
+ dma_addr_t *list = mem->pages;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 6ce995f7797e..4fb6e728734d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *, dma_addr_t *);
+ struct nouveau_mem *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 86d03e15735d..b010cb997b34 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_gpio.h"
static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
@@ -34,7 +35,6 @@ int
nouveau_voltage_gpio_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
u8 vid = 0;
int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
if (!(volt->vid_mask & (1 << i)))
continue;
- vid |= gpio->get(dev, vidtag[i]) << i;
+ vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
}
return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,7 +53,6 @@ int
nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
int vid, i;
@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
if (!(volt->vid_mask & (1 << i)))
continue;
- gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+ nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
}
return 0;
@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
return;
if (P.version == 1)
- volt = ROMPTR(bios, P.data[16]);
+ volt = ROMPTR(dev, P.data[16]);
else
if (P.version == 2)
- volt = ROMPTR(bios, P.data[12]);
+ volt = ROMPTR(dev, P.data[12]);
else {
NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
}
@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+ volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
}
if (!volt) {
@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+ if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 5e45398a9e2d..728d07584d39 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
/* framebuffer can be larger than crtc scanout area. */
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
/* framebuffer can be larger than crtc scanout area. */
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
regp->ramdac_gen_ctrl);
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
- regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+ regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index e000455e06d0..8300266ffaea 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -32,6 +32,7 @@
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
#include "nvreg.h"
int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
- saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
- saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+ saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
- gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
- gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
msleep(4);
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
- gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
- gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
return sample;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 12098bf839c4..2258746016f8 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *output_mode = &nv_encoder->mode;
+ struct drm_connector *connector = &nv_connector->base;
uint32_t mode_ratio, panel_ratio;
NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
output_mode->clock > 165000)
regp->fp_control |= (2 << 24);
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- bool duallink, dummy;
+ bool duallink = false, dummy;
+ if (nv_connector->edid &&
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ duallink = (((u8 *)nv_connector->edid)[121] == 2);
+ } else {
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
+ }
- nouveau_bios_parse_lvds_table(dev, output_mode->clock,
- &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
/* Output property. */
- if (nv_connector->use_dithering) {
+ if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
+ (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
+ encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
if (dev_priv->chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 6bd8518d7b2e..7047d37e8dab 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
+void
+nv04_display_fini(struct drm_device *dev)
+{
+}
+
static void
nv04_vblank_crtc0_isr(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 9ae92a87b8cc..6e7589918fa9 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -27,68 +27,111 @@
#include "nouveau_hw.h"
#include "nouveau_pm.h"
-struct nv04_pm_state {
+int
+nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ int ret;
+
+ ret = nouveau_hw_get_clock(dev, PLL_CORE);
+ if (ret < 0)
+ return ret;
+ perflvl->core = ret;
+
+ ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
+ if (ret < 0)
+ return ret;
+ perflvl->memory = ret;
+
+ return 0;
+}
+
+struct nv04_pm_clock {
struct pll_lims pll;
struct nouveau_pll_vals calc;
};
-int
-nv04_pm_clock_get(struct drm_device *dev, u32 id)
+struct nv04_pm_state {
+ struct nv04_pm_clock core;
+ struct nv04_pm_clock memory;
+};
+
+static int
+calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
{
- return nouveau_hw_get_clock(dev, id);
+ int ret;
+
+ ret = get_pll_limits(dev, id, &clk->pll);
+ if (ret)
+ return ret;
+
+ ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
}
void *
-nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nv04_pm_state *state;
+ struct nv04_pm_state *info;
int ret;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return ERR_PTR(-ENOMEM);
- ret = get_pll_limits(dev, id, &state->pll);
- if (ret) {
- kfree(state);
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
- }
+ ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
+ if (ret)
+ goto error;
- ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
- if (!ret) {
- kfree(state);
- return ERR_PTR(-EINVAL);
+ if (perflvl->memory) {
+ ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
+ if (ret)
+ goto error;
}
- return state;
+ return info;
+error:
+ kfree(info);
+ return ERR_PTR(ret);
}
-void
-nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+static void
+prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv04_pm_state *state = pre_state;
- u32 reg = state->pll.reg;
+ u32 reg = clk->pll.reg;
/* thank the insane nouveau_hw_setpll() interface for this */
if (dev_priv->card_type >= NV_40)
reg += 4;
- nouveau_hw_setpll(dev, reg, &state->calc);
+ nouveau_hw_setpll(dev, reg, &clk->calc);
+}
+
+int
+nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv04_pm_state *state = pre_state;
+
+ prog_pll(dev, &state->core);
- if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
- if (dev_priv->card_type == NV_20)
- nv_mask(dev, 0x1002c4, 0, 1 << 20);
+ if (state->memory.pll.reg) {
+ prog_pll(dev, &state->memory);
+ if (dev_priv->card_type < NV_30) {
+ if (dev_priv->card_type == NV_20)
+ nv_mask(dev, 0x1002c4, 0, 1 << 20);
- /* Reset the DLLs */
- nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ /* Reset the DLLs */
+ nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ }
}
- if (reg == NV_PRAMDAC_NVPLL_COEFF)
- ptimer->init(dev);
+ ptimer->init(dev);
kfree(state);
+ return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 263301b809dd..55c945290e52 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -2,6 +2,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_hw.h"
int
nv04_timer_init(struct drm_device *dev)
@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
/* determine base clock for timer source */
if (dev_priv->chipset < 0x40) {
- n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+ n = nouveau_hw_get_clock(dev, PLL_CORE);
} else
if (dev_priv->chipset == 0x40) {
/*XXX: figure this out */
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 007fc29e2f86..550ad3fcf0af 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -27,66 +27,97 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
-static bool
-get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
- uint32_t *mask)
+int
+nv10_gpio_sense(struct drm_device *dev, int line)
{
- if (ent->line < 2) {
- *reg = NV_PCRTC_GPIO;
- *shift = ent->line * 16;
- *mask = 0x11;
-
- } else if (ent->line < 10) {
- *reg = NV_PCRTC_GPIO_EXT;
- *shift = (ent->line - 2) * 4;
- *mask = 0x3;
+ if (line < 2) {
+ line = line * 16;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
+ return !!(line & 0x0100);
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
+ return !!(line & 0x04);
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
+ return !!(line & 0x04);
+ }
- } else if (ent->line < 14) {
- *reg = NV_PCRTC_850;
- *shift = (ent->line - 10) * 4;
- *mask = 0x3;
+ return -EINVAL;
+}
+int
+nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+ u32 reg, mask, data;
+
+ if (line < 2) {
+ line = line * 16;
+ reg = NV_PCRTC_GPIO;
+ mask = 0x00000011;
+ data = (dir << 4) | out;
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ reg = NV_PCRTC_GPIO_EXT;
+ mask = 0x00000003 << ((line - 2) * 4);
+ data = (dir << 1) | out;
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ reg = NV_PCRTC_850;
+ mask = 0x00000003;
+ data = (dir << 1) | out;
} else {
- return false;
+ return -EINVAL;
}
- return true;
+ mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
+ NVWriteCRTC(dev, 0, reg, mask | (data << line));
+ return 0;
}
-int
-nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
{
- struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
- uint32_t reg, shift, mask, value;
+ u32 mask = 0x00010001 << line;
- if (!ent)
- return -ENODEV;
+ nv_wr32(dev, 0x001104, mask);
+ nv_mask(dev, 0x001144, mask, on ? mask : 0);
+}
- if (!get_gpio_location(ent, &reg, &shift, &mask))
- return -ENODEV;
+static void
+nv10_gpio_isr(struct drm_device *dev)
+{
+ u32 intr = nv_rd32(dev, 0x1104);
+ u32 hi = (intr & 0x0000ffff) >> 0;
+ u32 lo = (intr & 0xffff0000) >> 16;
- value = NVReadCRTC(dev, 0, reg) >> shift;
+ nouveau_gpio_isr(dev, 0, hi | lo);
- return (ent->invert ? 1 : 0) ^ (value & 1);
+ nv_wr32(dev, 0x001104, intr);
}
int
-nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_init(struct drm_device *dev)
{
- struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
- uint32_t reg, shift, mask, value;
-
- if (!ent)
- return -ENODEV;
-
- if (!get_gpio_location(ent, &reg, &shift, &mask))
- return -ENODEV;
-
- value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
- mask = ~(mask << shift);
-
- NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
-
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001100, 0xffffffff);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nv_wr32(dev, 0x001104, 0xffffffff);
+ nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
return 0;
}
+
+void
+nv10_gpio_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nouveau_irq_unregister(dev, 28);
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 3900cebba560..696d7e7dc2a0 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -30,6 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
- gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+ gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- gpio->set(dev, DCB_GPIO_TVDAC1, true);
- gpio->set(dev, DCB_GPIO_TVDAC0, true);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
- gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -383,8 +381,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
- gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index e676b0d53478..c7615381c5d9 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
return true;
}
-void
+int
nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
struct bit_entry M;
u32 crtc_mask = 0;
u8 sr1[2];
- int i;
+ int i, ret = -EAGAIN;
/* determine which CRTCs are active, fetch VGA_SR1 for each */
for (i = 0; i < 2; i++) {
@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
goto resume;
+ ret = 0;
+
/* set engine clocks */
nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
nv_wr32(dev, 0x004004, info->npll_coef);
@@ -345,4 +347,48 @@ resume:
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
kfree(info);
+ return ret;
+}
+
+int
+nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+ if (line == 2) {
+ u32 reg = nv_rd32(dev, 0x0010f0);
+ if (reg & 0x80000000) {
+ *duty = (reg & 0x7fff0000) >> 16;
+ *divs = (reg & 0x00007fff);
+ return 0;
+ }
+ } else
+ if (line == 9) {
+ u32 reg = nv_rd32(dev, 0x0015f4);
+ if (reg & 0x80000000) {
+ *divs = nv_rd32(dev, 0x0015f8);
+ *duty = (reg & 0x7fffffff);
+ return 0;
+ }
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+
+ return -EINVAL;
+}
+
+int
+nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+ if (line == 2) {
+ nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+ } else
+ if (line == 9) {
+ nv_wr32(dev, 0x0015f8, divs);
+ nv_wr32(dev, 0x0015f4, duty | 0x80000000);
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 882080e0b4f5..8f6c2ace3adf 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -132,33 +132,42 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
}
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ int head = nv_crtc->index, ret;
+ u32 mode = 0x00;
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(dev, "no space while setting dither\n");
- return ret;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
- if (on)
- OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
- else
- OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+ OUT_RING (evo, mode);
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
+ FIRE_RING (evo);
+ }
}
- return 0;
+ return ret;
}
struct nouveau_connector *
@@ -180,80 +189,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_connector *nv_connector =
- nouveau_crtc_connector_get(nv_crtc);
- struct drm_device *dev = nv_crtc->base.dev;
+ struct nouveau_connector *nv_connector;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *native_mode = NULL;
- struct drm_display_mode *mode = &nv_crtc->base.mode;
- uint32_t outX, outY, horiz, vert;
- int ret;
+ struct drm_display_mode *umode = &crtc->mode;
+ struct drm_display_mode *omode;
+ int scaling_mode, ret;
+ u32 ctrl = 0, oX, oY;
NV_DEBUG_KMS(dev, "\n");
- switch (scaling_mode) {
- case DRM_MODE_SCALE_NONE:
- break;
- default:
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(dev, "No native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (!nv_connector || !nv_connector->native_mode) {
+ NV_ERROR(dev, "no native mode, forcing panel scaling\n");
+ scaling_mode = DRM_MODE_SCALE_NONE;
+ } else {
+ scaling_mode = nv_connector->scaling_mode;
+ }
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ if (scaling_mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
} else {
- native_mode = nv_connector->native_mode;
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
- break;
}
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
switch (scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
case DRM_MODE_SCALE_ASPECT:
- horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
- vert = (native_mode->vdisplay << 19) / mode->vdisplay;
-
- if (vert > horiz) {
- outX = (mode->hdisplay * horiz) >> 19;
- outY = (mode->vdisplay * horiz) >> 19;
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
} else {
- outX = (mode->hdisplay * vert) >> 19;
- outY = (mode->vdisplay * vert) >> 19;
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DRM_MODE_SCALE_FULLSCREEN:
- outX = native_mode->hdisplay;
- outY = native_mode->vdisplay;
- break;
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_NONE:
default:
- outX = mode->hdisplay;
- outY = mode->vdisplay;
break;
}
- ret = RING_SPACE(evo, update ? 7 : 5);
+ if (umode->hdisplay != oX || umode->vdisplay != oY ||
+ umode->flags & DRM_MODE_FLAG_INTERLACE ||
+ umode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
+
+ ret = RING_SPACE(evo, 5);
if (ret)
return ret;
- /* Got a better name for SCALER_ACTIVE? */
- /* One day i've got to really figure out why this is needed. */
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- mode->hdisplay != outX || mode->vdisplay != outY) {
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
- } else {
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
- }
-
+ OUT_RING (evo, ctrl);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING(evo, outY << 16 | outX);
- OUT_RING(evo, outY << 16 | outX);
+ OUT_RING (evo, oY << 16 | oX);
+ OUT_RING (evo, oY << 16 | oX);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ nv50_display_flip_stop(crtc);
+ nv50_display_sync(dev);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
return 0;
@@ -333,7 +365,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- kfree(nv_crtc->mode);
kfree(nv_crtc);
}
@@ -441,39 +472,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
-static int
-nv50_crtc_wait_complete(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- u64 start;
- int ret;
-
- ret = RING_SPACE(evo, 6);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
-
- nv_wo32(disp->ntfy, 0x000, 0x00000000);
- FIRE_RING (evo);
-
- start = ptimer->read(dev);
- do {
- if (nv_ro32(disp->ntfy, 0x000))
- return 0;
- } while (ptimer->read(dev) - start < 2000000000ULL);
-
- return -EBUSY;
-}
-
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -497,7 +495,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_crtc_wait_complete(crtc);
+ nv50_display_sync(dev);
nv50_display_flip_next(crtc, crtc->fb, NULL);
}
@@ -593,90 +591,76 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector = NULL;
- uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
- uint32_t hunk1, vunk1, vunk2a, vunk2b;
+ u32 head = nv_crtc->index * 0x400;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
int ret;
- /* Find the connector attached to this CRTC */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
-
- *nv_crtc->mode = *adjusted_mode;
-
- NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* hw timing description looks like this:
+ *
+ * <sync> <back porch> <---------display---------> <front porch>
+ * ______
+ * |____________|---------------------------|____________|
+ *
+ * ^ synce ^ blanke ^ blanks ^ active
+ *
+ * interlaced modes also have 2 additional values pointing at the end
+ * and start of the next field's blanking period.
+ */
- hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
- vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
- hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
- vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
- /* I can't give this a proper name, anyone else can? */
- hunk1 = adjusted_mode->htotal -
- adjusted_mode->hsync_start + adjusted_mode->hdisplay;
- vunk1 = adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vdisplay;
- /* Another strange value, this time only for interlaced adjusted_modes. */
- vunk2a = 2 * adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vdisplay;
- vunk2b = adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vtotal;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vsync_dur /= 2;
- vsync_start_to_end /= 2;
- vunk1 /= 2;
- vunk2a /= 2;
- vunk2b /= 2;
- /* magic */
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- vsync_start_to_end -= 1;
- vunk1 -= 1;
- vunk2a -= 1;
- vunk2b -= 1;
- }
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
}
- ret = RING_SPACE(evo, 17);
- if (ret)
- return ret;
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
- OUT_RING(evo, adjusted_mode->clock | 0x800000);
- OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
- OUT_RING(evo, 0);
- OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
- OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
- OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
- (hsync_start_to_end - 1));
- OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
- OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
- } else {
- OUT_RING(evo, 0);
- OUT_RING(evo, 0);
+ ret = RING_SPACE(evo, 18);
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, 0x0804 + head, 2);
+ OUT_RING (evo, 0x00800000 | mode->clock);
+ OUT_RING (evo, (ilace == 2) ? 2 : 0);
+ BEGIN_RING(evo, 0, 0x0810 + head, 6);
+ OUT_RING (evo, 0x00000000); /* border colour */
+ OUT_RING (evo, (vactive << 16) | hactive);
+ OUT_RING (evo, ( vsynce << 16) | hsynce);
+ OUT_RING (evo, (vblanke << 16) | hblanke);
+ OUT_RING (evo, (vblanks << 16) | hblanks);
+ OUT_RING (evo, (vblan2e << 16) | vblan2s);
+ BEGIN_RING(evo, 0, 0x082c + head, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0900 + head, 1);
+ OUT_RING (evo, 0x00000311); /* makes sync channel work */
+ BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+ OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
+ BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+ OUT_RING (evo, 0x00000000); /* screen position */
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
- OUT_RING(evo, 0);
-
- /* This is the actual resolution of the mode. */
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
- OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
-
- nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
- nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nv_crtc->set_dither(nv_crtc, false);
+ nv_crtc->set_scale(nv_crtc, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -692,7 +676,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- ret = nv50_crtc_wait_complete(crtc);
+ ret = nv50_display_sync(crtc->dev);
if (ret)
return ret;
@@ -711,7 +695,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
if (ret)
return ret;
- return nv50_crtc_wait_complete(crtc);
+ return nv50_display_sync(crtc->dev);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -737,12 +721,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
- nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
- if (!nv_crtc->mode) {
- kfree(nv_crtc);
- return -ENOMEM;
- }
-
/* Default CLUT parameters, will be activated on the hw upon
* first mode set.
*/
@@ -764,7 +742,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
if (ret) {
- kfree(nv_crtc->mode);
kfree(nv_crtc);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 808f3ec8f827..a0f2bebf49e3 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -200,11 +200,6 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
static void
-nv50_dac_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void
nv50_dac_commit(struct drm_encoder *encoder)
{
}
@@ -266,7 +261,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
.save = nv50_dac_save,
.restore = nv50_dac_restore,
.mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_prepare,
+ .prepare = nv50_dac_disconnect,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
.get_crtc = nv50_dac_crtc_get,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d62..7ba28e08ee31 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -50,9 +50,53 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
+static int
+evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
+{
+ int ret = 0;
+ nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x610304 + (ch * 0x08), data);
+ nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
+ if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
+ ret = -EBUSY;
+ if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
+ NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
+ nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
+ return ret;
+}
+
int
nv50_display_early_init(struct drm_device *dev)
{
+ u32 ctrl = nv_rd32(dev, 0x610200);
+ int i;
+
+ /* check if master evo channel is already active, a good a sign as any
+ * that the display engine is in a weird state (hibernate/kexec), if
+ * it is, do our best to reset the display engine...
+ */
+ if ((ctrl & 0x00000003) == 0x00000003) {
+ NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
+
+ /* deactivate both heads first, PDISP will disappear forever
+ * (well, until you power cycle) on some boards as soon as
+ * PMC_ENABLE is hit unless they are..
+ */
+ for (i = 0; i < 2; i++) {
+ evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
+ evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
+ }
+ evo_icmd(dev, 0, 0x0080, 0);
+
+ /* reset PDISP */
+ nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
+ }
+
return 0;
}
@@ -62,11 +106,40 @@ nv50_display_late_takedown(struct drm_device *dev)
}
int
-nv50_display_init(struct drm_device *dev)
+nv50_display_sync(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct drm_connector *connector;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+ }
+
+ return -EBUSY;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
struct nouveau_channel *evo;
int ret, i;
u32 val;
@@ -161,16 +234,6 @@ nv50_display_init(struct drm_device *dev)
NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
-
- if (conn->dcb->gpio_tag == 0xff)
- continue;
-
- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
- }
-
ret = nv50_evo_init(dev);
if (ret)
return ret;
@@ -178,36 +241,19 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 15);
+ ret = RING_SPACE(evo, 3);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NvEvoSync);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
- OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
- OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
- OUT_RING(evo, 0);
- /* required to make display sync channels not hate life */
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
- OUT_RING (evo, 0x00000311);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
- OUT_RING (evo, 0x00000311);
- FIRE_RING(evo);
- if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
- NV_ERROR(dev, "evo pushbuf stalled\n");
-
+ OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+ OUT_RING (evo, NvEvoSync);
- return 0;
+ return nv50_display_sync(dev);
}
-static int nv50_display_disable(struct drm_device *dev)
+void
+nv50_display_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
@@ -270,18 +316,10 @@ static int nv50_display_disable(struct drm_device *dev)
/* disable interrupts. */
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
-
- /* disable hotplug interrupts */
- nv_wr32(dev, 0xe054, 0xffffffff);
- nv_wr32(dev, 0xe050, 0x00000000);
- if (dev_priv->chipset >= 0x90) {
- nv_wr32(dev, 0xe074, 0xffffffff);
- nv_wr32(dev, 0xe070, 0x00000000);
- }
- return 0;
}
-int nv50_display_create(struct drm_device *dev)
+int
+nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -341,7 +379,7 @@ int nv50_display_create(struct drm_device *dev)
tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
- ret = nv50_display_init(dev);
+ ret = nv50_evo_create(dev);
if (ret) {
nv50_display_destroy(dev);
return ret;
@@ -357,7 +395,7 @@ nv50_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
- nv50_display_disable(dev);
+ nv50_evo_destroy(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
}
@@ -521,7 +559,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
} else {
/* determine number of lvds links */
if (nv_connector && nv_connector->edid &&
- nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
/* http://www.spwg.org */
if (((u8 *)nv_connector->edid)[121] == 2)
script |= 0x0100;
@@ -616,7 +654,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
@@ -708,7 +746,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
@@ -722,8 +760,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
if (crtc >= 0) {
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
pclk &= 0x003fffff;
-
- nv50_crtc_set_clock(dev, crtc, pclk);
+ if (pclk)
+ nv50_crtc_set_clock(dev, crtc, pclk);
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
tmp &= ~0x000000f;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c2da503a22aa..95874f7c043c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -69,14 +69,18 @@ int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
int nv50_display_init(struct drm_device *dev);
+void nv50_display_fini(struct drm_device *dev);
void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_sync(struct drm_device *);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
+int nv50_evo_create(struct drm_device *dev);
+void nv50_evo_destroy(struct drm_device *dev);
int nv50_evo_init(struct drm_device *dev);
void nv50_evo_fini(struct drm_device *dev);
void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c99d9751880c..9b962e989d7c 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
-static void
+void
nv50_evo_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
nv50_evo_channel_del(&disp->master);
}
-static int
+int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
int ret, i;
- if (!disp->master) {
- ret = nv50_evo_create(dev);
- if (ret)
- return ret;
- }
-
ret = nv50_evo_channel_init(disp->master);
if (ret)
return ret;
@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
if (disp->master)
nv50_evo_channel_fini(disp->master);
-
- nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index c34a074f7ea1..3bc2a565c20b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
unsigned long flags;
int ret;
@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x7c, 0x30000001);
nv_wo32(ramfc, 0x78, 0x00000000);
nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
- nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
nv_wo32(chan->ramin, 0, chan->id);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 793a5ccca121..f429e6a8ca7a 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -25,229 +25,95 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
#include "nv50_display.h"
-static void nv50_gpio_isr(struct drm_device *dev);
-static void nv50_gpio_isr_bh(struct work_struct *work);
-
-struct nv50_gpio_priv {
- struct list_head handlers;
- spinlock_t lock;
-};
-
-struct nv50_gpio_handler {
- struct drm_device *dev;
- struct list_head head;
- struct work_struct work;
- bool inhibit;
-
- struct dcb_gpio_entry *gpio;
-
- void (*handler)(void *data, int state);
- void *data;
-};
-
static int
-nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
{
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- if (gpio->line >= 32)
+ if (line >= 32)
return -EINVAL;
- *reg = nv50_gpio_reg[gpio->line >> 3];
- *shift = (gpio->line & 7) << 2;
+ *reg = nv50_gpio_reg[line >> 3];
+ *shift = (line & 7) << 2;
return 0;
}
int
-nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
{
- struct dcb_gpio_entry *gpio;
- uint32_t r, s, v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg, shift;
- if (nv50_gpio_location(gpio, &r, &s))
+ if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- v = nv_rd32(dev, r) >> (s + 2);
- return ((v & 1) == (gpio->state[1] & 1));
+ nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+ return 0;
}
int
-nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv50_gpio_sense(struct drm_device *dev, int line)
{
- struct dcb_gpio_entry *gpio;
- uint32_t r, s, v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg, shift;
- if (nv50_gpio_location(gpio, &r, &s))
+ if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- v = nv_rd32(dev, r) & ~(0x3 << s);
- v |= (gpio->state[state] ^ 2) << s;
- nv_wr32(dev, r, v);
- return 0;
+ return !!(nv_rd32(dev, reg) & (4 << shift));
}
-int
-nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
{
- struct dcb_gpio_entry *gpio;
- u32 v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg = line < 16 ? 0xe050 : 0xe070;
+ u32 mask = 0x00010001 << (line & 0xf);
- v = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
- v &= 0x00004000;
- return (!!v == (gpio->state[1] & 1));
+ nv_wr32(dev, reg + 4, mask);
+ nv_mask(dev, reg + 0, mask, on ? mask : 0);
}
int
-nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
{
- struct dcb_gpio_entry *gpio;
- u32 v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
-
- v = gpio->state[state] ^ 2;
-
- nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
+ nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
return 0;
}
int
-nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
- void (*handler)(void *, int), void *data)
+nvd0_gpio_sense(struct drm_device *dev, int line)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh;
- struct dcb_gpio_entry *gpio;
- unsigned long flags;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
-
- gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
- if (!gpioh)
- return -ENOMEM;
-
- INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
- gpioh->dev = dev;
- gpioh->gpio = gpio;
- gpioh->handler = handler;
- gpioh->data = data;
-
- spin_lock_irqsave(&priv->lock, flags);
- list_add(&gpioh->head, &priv->handlers);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
}
-void
-nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh, *tmp;
- struct dcb_gpio_entry *gpio;
- LIST_HEAD(tofree);
- unsigned long flags;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return;
-
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
- if (gpioh->gpio != gpio ||
- gpioh->handler != handler ||
- gpioh->data != data)
- continue;
- list_move(&gpioh->head, &tofree);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
- flush_work_sync(&gpioh->work);
- kfree(gpioh);
- }
-}
-
-bool
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
-{
- struct dcb_gpio_entry *gpio;
- u32 reg, mask;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return false;
-
- reg = gpio->line < 16 ? 0xe050 : 0xe070;
- mask = 0x00010001 << (gpio->line & 0xf);
-
- nv_wr32(dev, reg + 4, mask);
- reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
- return (reg & mask) == mask;
-}
-
-static int
-nv50_gpio_create(struct drm_device *dev)
+static void
+nv50_gpio_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo;
- INIT_LIST_HEAD(&priv->handlers);
- spin_lock_init(&priv->lock);
- pgpio->priv = priv;
- return 0;
-}
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-static void
-nv50_gpio_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ nouveau_gpio_isr(dev, 0, hi | lo);
- kfree(pgpio->priv);
- pgpio->priv = NULL;
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int ret;
-
- if (!pgpio->priv) {
- ret = nv50_gpio_create(dev);
- if (ret)
- return ret;
- }
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe070, 0x00000000);
nouveau_irq_unregister(dev, 21);
-
- nv50_gpio_destroy(dev);
-}
-
-static void
-nv50_gpio_isr_bh(struct work_struct *work)
-{
- struct nv50_gpio_handler *gpioh =
- container_of(work, struct nv50_gpio_handler, work);
- struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- unsigned long flags;
- int state;
-
- state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
- if (state < 0)
- return;
-
- gpioh->handler(gpioh->data, state);
-
- spin_lock_irqsave(&priv->lock, flags);
- gpioh->inhibit = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh;
- u32 intr0, intr1 = 0;
- u32 hi, lo, ch;
-
- intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
- hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- ch = hi | lo;
-
- nv_wr32(dev, 0xe054, intr0);
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, intr1);
-
- spin_lock(&priv->lock);
- list_for_each_entry(gpioh, &priv->handlers, head) {
- if (!(ch & (1 << gpioh->gpio->line)))
- continue;
-
- if (gpioh->inhibit)
- continue;
- gpioh->inhibit = true;
-
- schedule_work(&gpioh->work);
- }
- spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ac601f7c4e1a..33d5711a918d 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
}
break;
case 7: /* MP error */
- if (ustatus & 0x00010000) {
+ if (ustatus & 0x04030000) {
nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x00010000;
+ ustatus &= ~0x04030000;
}
break;
case 8: /* TPDMA error */
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 3d5a86b98282..03937212e9d8 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,122 +25,745 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_bios.h"
+#include "nouveau_hw.h"
#include "nouveau_pm.h"
+#include "nouveau_hwsq.h"
-struct nv50_pm_state {
- struct nouveau_pm_level *perflvl;
- struct pll_lims pll;
- enum pll_types type;
- int N, M, P;
+enum clk_src {
+ clk_src_crystal,
+ clk_src_href,
+ clk_src_hclk,
+ clk_src_hclkm3,
+ clk_src_hclkm3d2,
+ clk_src_host,
+ clk_src_nvclk,
+ clk_src_sclk,
+ clk_src_mclk,
+ clk_src_vdec,
+ clk_src_dom6
};
+static u32 read_clk(struct drm_device *, enum clk_src);
+
+static u32
+read_div(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ case 0xa0:
+ return nv_rd32(dev, 0x004700);
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ return nv_rd32(dev, 0x004800);
+ default:
+ return 0x00000000;
+ }
+}
+
+static u32
+read_pll_src(struct drm_device *dev, u32 base)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 coef, ref = read_clk(dev, clk_src_crystal);
+ u32 rsel = nv_rd32(dev, 0x00e18c);
+ int P, N, M, id;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0xa0:
+ switch (base) {
+ case 0x4020:
+ case 0x4028: id = !!(rsel & 0x00000004); break;
+ case 0x4008: id = !!(rsel & 0x00000008); break;
+ case 0x4030: id = 0; break;
+ default:
+ NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
+ ref *= (coef & 0x01000000) ? 2 : 4;
+ P = (coef & 0x00070000) >> 16;
+ N = ((coef & 0x0000ff00) >> 8) + 1;
+ M = ((coef & 0x000000ff) >> 0) + 1;
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ coef = nv_rd32(dev, 0x00e81c);
+ P = (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ rsel = nv_rd32(dev, 0x00c050);
+ switch (base) {
+ case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+ case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+ case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+ case 0x4030: rsel = 3; break;
+ default:
+ NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ switch (rsel) {
+ case 0: id = 1; break;
+ case 1: return read_clk(dev, clk_src_crystal);
+ case 2: return read_clk(dev, clk_src_href);
+ case 3: id = 0; break;
+ }
+
+ coef = nv_rd32(dev, 0x00e81c + (id * 0x28));
+ P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
+ P += (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (M)
+ return (ref * N / M) >> P;
+ return 0;
+}
+
+static u32
+read_pll_ref(struct drm_device *dev, u32 base)
+{
+ u32 src, mast = nv_rd32(dev, 0x00c040);
+
+ switch (base) {
+ case 0x004028:
+ src = !!(mast & 0x00200000);
+ break;
+ case 0x004020:
+ src = !!(mast & 0x00400000);
+ break;
+ case 0x004008:
+ src = !!(mast & 0x00010000);
+ break;
+ case 0x004030:
+ src = !!(mast & 0x02000000);
+ break;
+ case 0x00e810:
+ return read_clk(dev, clk_src_crystal);
+ default:
+ NV_ERROR(dev, "bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ if (src)
+ return read_clk(dev, clk_src_href);
+ return read_pll_src(dev, base);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 base)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 ctrl = nv_rd32(dev, base + 0);
+ u32 coef = nv_rd32(dev, base + 4);
+ u32 ref = read_pll_ref(dev, base);
+ u32 clk = 0;
+ int N1, N2, M1, M2;
+
+ if (base == 0x004028 && (mast & 0x00100000)) {
+ /* wtf, appears to only disable post-divider on nva0 */
+ if (dev_priv->chipset != 0xa0)
+ return read_clk(dev, clk_src_dom6);
+ }
+
+ N2 = (coef & 0xff000000) >> 24;
+ M2 = (coef & 0x00ff0000) >> 16;
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ clk = ref * N1 / M1;
+ if ((ctrl & 0x40000100) == 0x40000000) {
+ if (M2)
+ clk = clk * N2 / M2;
+ else
+ clk = 0;
+ }
+ }
+
+ return clk;
+}
+
+static u32
+read_clk(struct drm_device *dev, enum clk_src src)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 P = 0;
+
+ switch (src) {
+ case clk_src_crystal:
+ return dev_priv->crystal;
+ case clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case clk_src_hclk:
+ return read_clk(dev, clk_src_href) * 27778 / 10000;
+ case clk_src_hclkm3:
+ return read_clk(dev, clk_src_hclk) * 3;
+ case clk_src_hclkm3d2:
+ return read_clk(dev, clk_src_hclk) * 3 / 2;
+ case clk_src_host:
+ switch (mast & 0x30000000) {
+ case 0x00000000: return read_clk(dev, clk_src_href);
+ case 0x10000000: break;
+ case 0x20000000: /* !0x50 */
+ case 0x30000000: return read_clk(dev, clk_src_hclk);
+ }
+ break;
+ case clk_src_nvclk:
+ if (!(mast & 0x00100000))
+ P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
+ switch (mast & 0x00000003) {
+ case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000001: return read_clk(dev, clk_src_dom6);
+ case 0x00000002: return read_pll(dev, 0x004020) >> P;
+ case 0x00000003: return read_pll(dev, 0x004028) >> P;
+ }
+ break;
+ case clk_src_sclk:
+ P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000080)
+ return read_clk(dev, clk_src_host) >> P;
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(dev, 0x004028) >> P;
+ case 0x00000030: return read_pll(dev, 0x004020) >> P;
+ }
+ break;
+ case clk_src_mclk:
+ P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
+ if (nv_rd32(dev, 0x004008) & 0x00000200) {
+ switch (mast & 0x0000c000) {
+ case 0x00000000:
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00008000:
+ case 0x0000c000:
+ return read_clk(dev, clk_src_href) >> P;
+ }
+ } else {
+ return read_pll(dev, 0x004008) >> P;
+ }
+ break;
+ case clk_src_vdec:
+ P = (read_div(dev) & 0x00000700) >> 8;
+ switch (dev_priv->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ if (dev_priv->chipset == 0xa0) /* wtf?? */
+ return read_clk(dev, clk_src_nvclk) >> P;
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ if (mast & 0x01000000)
+ return read_pll(dev, 0x004028) >> P;
+ return read_pll(dev, 0x004030) >> P;
+ case 0x00000c00:
+ return read_clk(dev, clk_src_nvclk) >> P;
+ }
+ break;
+ case 0x98:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ return read_clk(dev, clk_src_nvclk) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ return read_clk(dev, clk_src_hclkm3d2) >> P;
+ case 0x00000c00:
+ return read_clk(dev, clk_src_mclk) >> P;
+ }
+ break;
+ }
+ break;
+ case clk_src_dom6:
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0xa0:
+ return read_pll(dev, 0x00e810) >> 2;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ P = (read_div(dev) & 0x00000007) >> 0;
+ switch (mast & 0x0c000000) {
+ case 0x00000000: return read_clk(dev, clk_src_href);
+ case 0x04000000: break;
+ case 0x08000000: return read_clk(dev, clk_src_hclk);
+ case 0x0c000000:
+ return read_clk(dev, clk_src_hclkm3) >> P;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
+ return 0;
+}
+
int
-nv50_pm_clock_get(struct drm_device *dev, u32 id)
+nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct pll_lims pll;
- int P, N, M, ret;
- u32 reg0, reg1;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ return 0;
- ret = get_pll_limits(dev, id, &pll);
+ perflvl->core = read_clk(dev, clk_src_nvclk);
+ perflvl->shader = read_clk(dev, clk_src_sclk);
+ perflvl->memory = read_clk(dev, clk_src_mclk);
+ if (dev_priv->chipset != 0x50) {
+ perflvl->vdec = read_clk(dev, clk_src_vdec);
+ perflvl->dom6 = read_clk(dev, clk_src_dom6);
+ }
+
+ return 0;
+}
+
+struct nv50_pm_state {
+ struct hwsq_ucode mclk_hwsq;
+ u32 mscript;
+
+ u32 emast;
+ u32 nctrl;
+ u32 ncoef;
+ u32 sctrl;
+ u32 scoef;
+
+ u32 amast;
+ u32 pdivs;
+};
+
+static u32
+calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+ u32 clk, int *N1, int *M1, int *log2P)
+{
+ struct nouveau_pll_vals coef;
+ int ret;
+
+ ret = get_pll_limits(dev, reg, pll);
if (ret)
- return ret;
+ return 0;
+
+ pll->vco2.maxfreq = 0;
+ pll->refclk = read_pll_ref(dev, reg);
+ if (!pll->refclk)
+ return 0;
- reg0 = nv_rd32(dev, pll.reg + 0);
- reg1 = nv_rd32(dev, pll.reg + 4);
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+ if (ret == 0)
+ return 0;
- if ((reg0 & 0x80000000) == 0) {
- if (id == PLL_SHADER) {
- NV_DEBUG(dev, "Shader PLL is disabled. "
- "Shader clock is twice the core\n");
- ret = nv50_pm_clock_get(dev, PLL_CORE);
- if (ret > 0)
- return ret << 1;
- } else if (id == PLL_MEMORY) {
- NV_DEBUG(dev, "Memory PLL is disabled. "
- "Memory clock is equal to the ref_clk\n");
- return pll.refclk;
+ *N1 = coef.N1;
+ *M1 = coef.M1;
+ *log2P = coef.log2P;
+ return ret;
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
}
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+ return ((a / 1000) == (b / 1000));
+}
+
+static int
+calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pll_lims pll;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 ctrl = nv_rd32(dev, 0x004008);
+ u32 coef = nv_rd32(dev, 0x00400c);
+ u32 orig = ctrl;
+ u32 crtc_mask = 0;
+ int N, M, P;
+ int ret, i;
+
+ /* use pcie refclock if possible, otherwise use mpll */
+ ctrl &= ~0x81ff0200;
+ if (clk_same(freq, read_clk(dev, clk_src_href))) {
+ ctrl |= 0x00000200 | (pll.log2p_bias << 19);
+ } else {
+ ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
+ if (ret == 0)
+ return -EINVAL;
+
+ ctrl |= 0x80000000 | (P << 22) | (P << 16);
+ ctrl |= pll.log2p_bias << 19;
+ coef = (N << 8) | M;
+ }
+
+ mast &= ~0xc0000000; /* get MCLK_2 from HREF */
+ mast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+ /* determine active crtcs */
+ for (i = 0; i < 2; i++) {
+ if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
+ crtc_mask |= (1 << i);
+ }
+
+ /* build the ucode which will reclock the memory for us */
+ hwsq_init(hwsq);
+ if (crtc_mask) {
+ hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
+ hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
}
+ if (dev_priv->chipset >= 0x92)
+ hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
+ hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+
+ /* prepare memory controller */
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+ hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
+ hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
+ hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
- P = (reg0 & 0x00070000) >> 16;
- N = (reg1 & 0x0000ff00) >> 8;
- M = (reg1 & 0x000000ff);
+ /* reclock memory */
+ hwsq_wr32(hwsq, 0xc040, mast);
+ hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
+ hwsq_wr32(hwsq, 0x400c, coef);
+ hwsq_wr32(hwsq, 0x4008, ctrl);
- return ((pll.refclk * N / M) >> P);
+ /* restart memory controller */
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+ hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
+ hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
+ hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
+
+ hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
+ hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
+ if (dev_priv->chipset >= 0x92)
+ hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
+ hwsq_fini(hwsq);
+ return 0;
}
void *
-nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nv50_pm_state *state;
- int dummy, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_pm_state *info;
+ struct pll_lims pll;
+ int ret = -EINVAL;
+ int N, M, P1, P2;
+ u32 clk, out;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ return ERR_PTR(-ENODEV);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return ERR_PTR(-ENOMEM);
- state->type = id;
- state->perflvl = perflvl;
- ret = get_pll_limits(dev, id, &state->pll);
- if (ret < 0) {
- kfree(state);
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ /* core: for the moment at least, always use nvpll */
+ clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ info->emast = 0x00000003;
+ info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+ info->ncoef = (N << 8) | M;
+
+ /* shader: tie to nvclk if possible, otherwise use spll. have to be
+ * very careful that the shader clock is at least twice the core, or
+ * some chipsets will be very unhappy. i expect most or all of these
+ * cases will be handled by tying to nvclk, but it's possible there's
+ * corners
+ */
+ if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+ info->emast |= 0x00000020;
+ info->sctrl = 0x00000000 | (P1 << 19) | (P1 << 16);
+ info->scoef = nv_rd32(dev, 0x004024);
+ } else {
+ clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ info->emast |= 0x00000030;
+ info->sctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+ info->scoef = (N << 8) | M;
+ }
+
+ /* memory: build hwsq ucode which we'll use to reclock memory */
+ info->mclk_hwsq.len = 0;
+ if (perflvl->memory) {
+ clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
+ if (clk < 0) {
+ ret = clk;
+ goto error;
+ }
+
+ info->mscript = perflvl->memscript;
+ }
+
+ /* vdec: avoid modifying xpll until we know exactly how the other
+ * clock domains work, i suspect at least some of them can also be
+ * tied to xpll...
+ */
+ info->amast = nv_rd32(dev, 0x00c040);
+ info->pdivs = read_div(dev);
+ if (perflvl->vdec) {
+ /* see how close we can get using nvclk as a source */
+ clk = calc_div(perflvl->core, perflvl->vdec, &P1);
+
+ /* see how close we can get using xpll/hclk as a source */
+ if (dev_priv->chipset != 0x98)
+ out = read_pll(dev, 0x004030);
+ else
+ out = read_clk(dev, clk_src_hclkm3d2);
+ out = calc_div(out, perflvl->vdec, &P2);
+
+ /* select whichever gets us closest */
+ info->amast &= ~0x00000c00;
+ info->pdivs &= ~0x00000700;
+ if (abs((int)perflvl->vdec - clk) <=
+ abs((int)perflvl->vdec - out)) {
+ if (dev_priv->chipset != 0x98)
+ info->amast |= 0x00000c00;
+ info->pdivs |= P1 << 8;
+ } else {
+ info->amast |= 0x00000800;
+ info->pdivs |= P2 << 8;
+ }
+ }
+
+ /* dom6: nfi what this is, but we're limited to various combinations
+ * of the host clock frequency
+ */
+ if (perflvl->dom6) {
+ info->amast &= ~0x0c000000;
+ if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
+ info->amast |= 0x00000000;
+ } else
+ if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
+ info->amast |= 0x08000000;
+ } else {
+ clk = read_clk(dev, clk_src_hclk) * 3;
+ clk = calc_div(clk, perflvl->dom6, &P1);
+
+ info->amast |= 0x0c000000;
+ info->pdivs = (info->pdivs & ~0x00000007) | P1;
+ }
}
- ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
- &dummy, &dummy, &state->P);
- if (ret < 0) {
- kfree(state);
- return ERR_PTR(ret);
+ return info;
+error:
+ kfree(info);
+ return ERR_PTR(ret);
+}
+
+static int
+prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 hwsq_data, hwsq_kick;
+ int i;
+
+ if (dev_priv->chipset < 0x90) {
+ hwsq_data = 0x001400;
+ hwsq_kick = 0x00000003;
+ } else {
+ hwsq_data = 0x080000;
+ hwsq_kick = 0x00000001;
}
- return state;
+ /* upload hwsq ucode */
+ nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
+ nv_wr32(dev, 0x001304, 0x00000000);
+ for (i = 0; i < hwsq->len / 4; i++)
+ nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+ nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
+
+ /* launch, and wait for completion */
+ nv_wr32(dev, 0x00130c, hwsq_kick);
+ if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
+ NV_ERROR(dev, "hwsq ucode exec timed out\n");
+ NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
+ for (i = 0; i < hwsq->len / 4; i++) {
+ NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+ nv_rd32(dev, 0x001400 + (i * 4)));
+ }
+
+ return -EIO;
+ }
+
+ return 0;
}
-void
-nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+int
+nv50_pm_clocks_set(struct drm_device *dev, void *data)
{
- struct nv50_pm_state *state = pre_state;
- struct nouveau_pm_level *perflvl = state->perflvl;
- u32 reg = state->pll.reg, tmp;
- struct bit_entry BIT_M;
- u16 script;
- int N = state->N;
- int M = state->M;
- int P = state->P;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_pm_state *info = data;
+ struct bit_entry M;
+ int ret = 0;
- if (state->type == PLL_MEMORY && perflvl->memscript &&
- bit_table(dev, 'M', &BIT_M) == 0 &&
- BIT_M.version == 1 && BIT_M.length >= 0x0b) {
- script = ROM16(BIT_M.data[0x05]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
- script = ROM16(BIT_M.data[0x07]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
- script = ROM16(BIT_M.data[0x09]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
+ /* halt and idle execution engines */
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
+ goto error;
- nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
+ /* memory: it is *very* important we change this first, the ucode
+ * we build in pre() now has hardcoded 0xc040 values, which can't
+ * change before we execute it or the engine clocks may end up
+ * messed up.
+ */
+ if (info->mclk_hwsq.len) {
+ /* execute some scripts that do ??? from the vbios.. */
+ if (!bit_table(dev, 'M', &M) && M.version == 1) {
+ if (M.length >= 6)
+ nouveau_bios_init_exec(dev, ROM16(M.data[5]));
+ if (M.length >= 8)
+ nouveau_bios_init_exec(dev, ROM16(M.data[7]));
+ if (M.length >= 10)
+ nouveau_bios_init_exec(dev, ROM16(M.data[9]));
+ nouveau_bios_init_exec(dev, info->mscript);
+ }
+
+ ret = prog_mclk(dev, &info->mclk_hwsq);
+ if (ret)
+ goto resume;
}
- if (state->type == PLL_MEMORY) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
+ /* reclock vdec/dom6 */
+ nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
+ switch (dev_priv->chipset) {
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
+ break;
+ default:
+ nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
+ break;
}
+ nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
- tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
- tmp |= 0x80000000 | (P << 16);
- nv_wr32(dev, reg + 0, tmp);
- nv_wr32(dev, reg + 4, (N << 8) | M);
+ /* core/shader: make sure sclk/nvclk are disconnected from their
+ * plls (nvclk to dom6, sclk to hclk), modify the plls, and
+ * reconnect sclk/nvclk to their new clock source
+ */
+ if (dev_priv->chipset < 0x92)
+ nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
+ else
+ nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
+ nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
+ nv_wr32(dev, 0x004024, info->scoef);
+ nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
+ nv_wr32(dev, 0x00402c, info->ncoef);
+ nv_mask(dev, 0x00c040, 0x00100033, info->emast);
+
+ goto resume;
+error:
+ ret = -EBUSY;
+resume:
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+ kfree(info);
+ return ret;
+}
- if (state->type == PLL_MEMORY) {
- nv_wr32(dev, 0x1002dc, 0);
- nv_wr32(dev, 0x100210, 0x80000000);
+static int
+pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
+{
+ if (*line == 0x04) {
+ *ctrl = 0x00e100;
+ *line = 4;
+ *indx = 0;
+ } else
+ if (*line == 0x09) {
+ *ctrl = 0x00e100;
+ *line = 9;
+ *indx = 1;
+ } else
+ if (*line == 0x10) {
+ *ctrl = 0x00e28c;
+ *line = 0;
+ *indx = 0;
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int
+nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+ int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+ if (ret)
+ return ret;
+
+ if (nv_rd32(dev, ctrl) & (1 << line)) {
+ *divs = nv_rd32(dev, 0x00e114 + (id * 8));
+ *duty = nv_rd32(dev, 0x00e118 + (id * 8));
+ return 0;
}
- kfree(state);
+ return -EINVAL;
}
+int
+nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+ int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+ if (ret)
+ return ret;
+
+ nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
+ nv_wr32(dev, 0x00e114 + (id * 8), divs);
+ nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 2633aa8554eb..c4423ba9c9bf 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -60,6 +60,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
+ nouveau_hdmi_mode_set(encoder, NULL);
+
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -172,6 +174,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
static void
nv50_sor_prepare(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_sor_disconnect(encoder);
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ /* avoid race between link training and supervisor intr */
+ nv50_display_sync(encoder->dev);
+ }
}
static void
@@ -180,8 +188,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
{
struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -193,24 +201,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
nv_encoder->or, nv_encoder->dcb->type, crtc->index);
+ nv_encoder->crtc = encoder->crtc;
switch (nv_encoder->dcb->type) {
case OUTPUT_TMDS:
if (nv_encoder->dcb->sorconf.link & 1) {
- if (adjusted_mode->clock < 165000)
+ if (mode->clock < 165000)
mode_ctl = 0x0100;
else
mode_ctl = 0x0500;
} else
mode_ctl = 0x0200;
+
+ nouveau_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_DP:
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
mode_ctl |= 0x00020000;
} else {
- nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
mode_ctl |= 0x00050000;
}
@@ -228,10 +239,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,12 +250,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while connecting SOR\n");
+ nv_encoder->crtc = NULL;
return;
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
-
- nv_encoder->crtc = encoder->crtc;
}
static struct drm_crtc *
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 40b84f22d819..6f38ceae3aa4 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
phys |= 0x60;
else if (coverage <= 64 * 1024 * 1024)
phys |= 0x40;
- else if (coverage < 128 * 1024 * 1024)
+ else if (coverage <= 128 * 1024 * 1024)
phys |= 0x20;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
new file mode 100644
index 000000000000..74875739bcc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_bsp.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_bsp.c...
+ */
+
+struct nv84_bsp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00008000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_bsp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+ return 0;
+}
+
+static void
+nv84_bsp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, BSP);
+
+ kfree(pbsp);
+}
+
+int
+nv84_bsp_create(struct drm_device *dev)
+{
+ struct nv84_bsp_engine *pbsp;
+
+ pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
+ if (!pbsp)
+ return -ENOMEM;
+
+ pbsp->base.destroy = nv84_bsp_destroy;
+ pbsp->base.init = nv84_bsp_init;
+ pbsp->base.fini = nv84_bsp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
new file mode 100644
index 000000000000..6570d300ab85
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_vp.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_vp.c...
+ */
+
+struct nv84_vp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00020000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_vp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+ return 0;
+}
+
+static void
+nv84_vp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, VP);
+
+ kfree(pvp);
+}
+
+int
+nv84_vp_create(struct drm_device *dev)
+{
+ struct nv84_vp_engine *pvp;
+
+ pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
+ if (!pvp)
+ return -ENOMEM;
+
+ pvp->base.destroy = nv84_vp_destroy;
+ pvp->base.init = nv84_vp_init;
+ pvp->base.fini = nv84_vp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
new file mode 100644
index 000000000000..db94ff0a9fab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_crypt_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00004000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+ return 0;
+}
+
+static void
+nv98_crypt_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, CRYPT);
+
+ kfree(pcrypt);
+}
+
+int
+nv98_crypt_create(struct drm_device *dev)
+{
+ struct nv98_crypt_engine *pcrypt;
+
+ pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
+ if (!pcrypt)
+ return -ENOMEM;
+
+ pcrypt->base.destroy = nv98_crypt_destroy;
+ pcrypt->base.init = nv98_crypt_init;
+ pcrypt->base.fini = nv98_crypt_fini;
+
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
new file mode 100644
index 000000000000..a987dd6e0036
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_ppp.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_ppp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00000002))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_ppp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+ return 0;
+}
+
+static void
+nv98_ppp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, PPP);
+
+ kfree(pppp);
+}
+
+int
+nv98_ppp_create(struct drm_device *dev)
+{
+ struct nv98_ppp_engine *pppp;
+
+ pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
+ if (!pppp)
+ return -ENOMEM;
+
+ pppp->base.destroy = nv98_ppp_destroy;
+ pppp->base.init = nv98_ppp_init;
+ pppp->base.fini = nv98_ppp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index eaf35f8321ee..abc36626fef0 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -31,8 +31,9 @@
*/
ifdef(`NVA3',
-.section nva3_pcopy_data,
-.section nvc0_pcopy_data
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
)
ctx_object: .b32 0
@@ -42,7 +43,7 @@ ctx_dma_query: .b32 0
ctx_dma_src: .b32 0
ctx_dma_dst: .b32 0
,)
-.equ ctx_dma_count 3
+.equ #ctx_dma_count 3
ctx_query_address_high: .b32 0
ctx_query_address_low: .b32 0
ctx_query_counter: .b32 0
@@ -78,64 +79,65 @@ ctx_ycnt: .b32 0
dispatch_table:
// mthd 0x0000, NAME
.b16 0x000 1
-.b32 ctx_object ~0xffffffff
+.b32 #ctx_object ~0xffffffff
// mthd 0x0100, NOP
.b16 0x040 1
-.b32 0x00010000 + cmd_nop ~0xffffffff
+.b32 0x00010000 + #cmd_nop ~0xffffffff
// mthd 0x0140, PM_TRIGGER
.b16 0x050 1
-.b32 0x00010000 + cmd_pm_trigger ~0xffffffff
+.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
ifdef(`NVA3', `
// mthd 0x0180-0x018c, DMA_
-.b16 0x060 ctx_dma_count
+.b16 0x060 #ctx_dma_count
dispatch_dma:
-.b32 0x00010000 + cmd_dma ~0xffffffff
-.b32 0x00010000 + cmd_dma ~0xffffffff
-.b32 0x00010000 + cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
',)
// mthd 0x0200-0x0218, SRC_TILE
.b16 0x80 7
-.b32 ctx_src_tile_mode ~0x00000fff
-.b32 ctx_src_xsize ~0x0007ffff
-.b32 ctx_src_ysize ~0x00001fff
-.b32 ctx_src_zsize ~0x000007ff
-.b32 ctx_src_zoff ~0x00000fff
-.b32 ctx_src_xoff ~0x0007ffff
-.b32 ctx_src_yoff ~0x00001fff
+.b32 #ctx_src_tile_mode ~0x00000fff
+.b32 #ctx_src_xsize ~0x0007ffff
+.b32 #ctx_src_ysize ~0x00001fff
+.b32 #ctx_src_zsize ~0x000007ff
+.b32 #ctx_src_zoff ~0x00000fff
+.b32 #ctx_src_xoff ~0x0007ffff
+.b32 #ctx_src_yoff ~0x00001fff
// mthd 0x0220-0x0238, DST_TILE
.b16 0x88 7
-.b32 ctx_dst_tile_mode ~0x00000fff
-.b32 ctx_dst_xsize ~0x0007ffff
-.b32 ctx_dst_ysize ~0x00001fff
-.b32 ctx_dst_zsize ~0x000007ff
-.b32 ctx_dst_zoff ~0x00000fff
-.b32 ctx_dst_xoff ~0x0007ffff
-.b32 ctx_dst_yoff ~0x00001fff
+.b32 #ctx_dst_tile_mode ~0x00000fff
+.b32 #ctx_dst_xsize ~0x0007ffff
+.b32 #ctx_dst_ysize ~0x00001fff
+.b32 #ctx_dst_zsize ~0x000007ff
+.b32 #ctx_dst_zoff ~0x00000fff
+.b32 #ctx_dst_xoff ~0x0007ffff
+.b32 #ctx_dst_yoff ~0x00001fff
// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
.b16 0xc0 2
-.b32 0x00010000 + cmd_exec ~0xffffffff
-.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
+.b32 0x00010000 + #cmd_exec ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
// mthd 0x030c-0x0340, various stuff
.b16 0xc3 14
-.b32 ctx_src_address_high ~0x000000ff
-.b32 ctx_src_address_low ~0xfffffff0
-.b32 ctx_dst_address_high ~0x000000ff
-.b32 ctx_dst_address_low ~0xfffffff0
-.b32 ctx_src_pitch ~0x0007ffff
-.b32 ctx_dst_pitch ~0x0007ffff
-.b32 ctx_xcnt ~0x0000ffff
-.b32 ctx_ycnt ~0x00001fff
-.b32 ctx_format ~0x0333ffff
-.b32 ctx_swz_const0 ~0xffffffff
-.b32 ctx_swz_const1 ~0xffffffff
-.b32 ctx_query_address_high ~0x000000ff
-.b32 ctx_query_address_low ~0xffffffff
-.b32 ctx_query_counter ~0xffffffff
+.b32 #ctx_src_address_high ~0x000000ff
+.b32 #ctx_src_address_low ~0xfffffff0
+.b32 #ctx_dst_address_high ~0x000000ff
+.b32 #ctx_dst_address_low ~0xfffffff0
+.b32 #ctx_src_pitch ~0x0007ffff
+.b32 #ctx_dst_pitch ~0x0007ffff
+.b32 #ctx_xcnt ~0x0000ffff
+.b32 #ctx_ycnt ~0x00001fff
+.b32 #ctx_format ~0x0333ffff
+.b32 #ctx_swz_const0 ~0xffffffff
+.b32 #ctx_swz_const1 ~0xffffffff
+.b32 #ctx_query_address_high ~0x000000ff
+.b32 #ctx_query_address_low ~0xffffffff
+.b32 #ctx_query_counter ~0xffffffff
.b16 0x800 0
ifdef(`NVA3',
-.section nva3_pcopy_code,
-.section nvc0_pcopy_code
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
)
main:
@@ -143,12 +145,12 @@ main:
mov $sp $r0
// setup i0 handler and route fifo and ctxswitch to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
movw $r2 0xfff3
sethi $r2 0
- iowr I[$r2 + 0x300] $r2
+ iowr I[$r1 + 0x300] $r2
// enable interrupts
or $r2 0xc
@@ -164,19 +166,19 @@ main:
bset $flags $p0
spin:
sleep $p0
- bra spin
+ bra #spin
// i0 handler
ih:
iord $r1 I[$r0 + 0x200]
and $r2 $r1 0x00000008
- bra e ih_no_chsw
- call chsw
+ bra e #ih_no_chsw
+ call #chsw
ih_no_chsw:
and $r2 $r1 0x00000004
- bra e ih_no_cmd
- call dispatch
+ bra e #ih_no_cmd
+ call #dispatch
ih_no_cmd:
and $r1 $r1 0x0000000c
@@ -235,9 +237,9 @@ ifdef(`NVA3', `
sethi $r4 0x60000
// swap!
- bra $p1 swctx_load
+ bra $p1 #swctx_load
xdst $r0 $r4
- bra swctx_done
+ bra #swctx_done
swctx_load:
xdld $r0 $r4
swctx_done:
@@ -251,9 +253,9 @@ chsw:
// if it's active, unload it and return
xbit $r15 $r3 0x1e
- bra e chsw_no_unload
+ bra e #chsw_no_unload
bclr $flags $p1
- call swctx
+ call #swctx
bclr $r3 0x1e
iowr I[$r2] $r3
mov $r4 1
@@ -266,20 +268,20 @@ chsw:
// is there a channel waiting to be loaded?
xbit $r13 $r3 0x1e
- bra e chsw_finish_load
+ bra e #chsw_finish_load
bset $flags $p1
- call swctx
+ call #swctx
ifdef(`NVA3',
// load dma objects back into TARGET regs
- mov $r5 ctx_dma
- mov $r6 ctx_dma_count
+ mov $r5 #ctx_dma
+ mov $r6 #ctx_dma_count
chsw_load_ctx_dma:
ld b32 $r7 D[$r5 + $r6 * 4]
add b32 $r8 $r6 0x180
shl b32 $r8 8
iowr I[$r8] $r7
sub b32 $r6 1
- bra nc chsw_load_ctx_dma
+ bra nc #chsw_load_ctx_dma
,)
chsw_finish_load:
@@ -297,7 +299,7 @@ dispatch:
shl b32 $r2 0x10
// lookup method in the dispatch table, ILLEGAL_MTHD if not found
- mov $r5 dispatch_table
+ mov $r5 #dispatch_table
clear b32 $r6
clear b32 $r7
dispatch_loop:
@@ -305,14 +307,14 @@ dispatch:
ld b16 $r7 D[$r5 + 2]
add b32 $r5 4
cmpu b32 $r4 $r6
- bra c dispatch_illegal_mthd
+ bra c #dispatch_illegal_mthd
add b32 $r7 $r6
cmpu b32 $r4 $r7
- bra c dispatch_valid_mthd
+ bra c #dispatch_valid_mthd
sub b32 $r7 $r6
shl b32 $r7 3
add b32 $r5 $r7
- bra dispatch_loop
+ bra #dispatch_loop
// ensure no bits set in reserved fields, INVALID_BITFIELD
dispatch_valid_mthd:
@@ -322,20 +324,20 @@ dispatch:
ld b32 $r5 D[$r4 + 4]
and $r5 $r3
cmpu b32 $r5 0
- bra ne dispatch_invalid_bitfield
+ bra ne #dispatch_invalid_bitfield
// depending on dispatch flags: execute method, or save data as state
ld b16 $r5 D[$r4 + 0]
ld b16 $r6 D[$r4 + 2]
cmpu b32 $r6 0
- bra ne dispatch_cmd
+ bra ne #dispatch_cmd
st b32 D[$r5] $r3
- bra dispatch_done
+ bra #dispatch_done
dispatch_cmd:
bclr $flags $p1
call $r5
- bra $p1 dispatch_error
- bra dispatch_done
+ bra $p1 #dispatch_error
+ bra #dispatch_done
dispatch_invalid_bitfield:
or $r2 2
@@ -353,7 +355,7 @@ dispatch:
iord $r2 I[$r0 + 0x200]
and $r2 0x40
cmpu b32 $r2 0
- bra ne hostirq_wait
+ bra ne #hostirq_wait
dispatch_done:
mov $r2 0x1d00
@@ -409,10 +411,10 @@ ifdef(`NVA3',
// $r2: hostirq state
// $r3: data
cmd_dma:
- sub b32 $r4 dispatch_dma
+ sub b32 $r4 #dispatch_dma
shr b32 $r4 1
bset $r3 0x1e
- st b32 D[$r4 + ctx_dma] $r3
+ st b32 D[$r4 + #ctx_dma] $r3
add b32 $r4 0x600
shl b32 $r4 6
iowr I[$r4] $r3
@@ -430,7 +432,7 @@ cmd_exec_set_format:
st b32 D[$sp + 0x0c] $r0
// extract cpp, src_ncomp and dst_ncomp from FORMAT
- ld b32 $r4 D[$r0 + ctx_format]
+ ld b32 $r4 D[$r0 + #ctx_format]
extr $r5 $r4 16:17
add b32 $r5 1
extr $r6 $r4 20:21
@@ -448,22 +450,22 @@ cmd_exec_set_format:
clear b32 $r11
bpc_loop:
cmpu b8 $r10 4
- bra nc cmp_c0
+ bra nc #cmp_c0
mulu $r12 $r10 $r5
add b32 $r12 $r11
bset $flags $p2
- bra bpc_next
+ bra #bpc_next
cmp_c0:
- bra ne cmp_c1
+ bra ne #cmp_c1
mov $r12 0x10
add b32 $r12 $r11
- bra bpc_next
+ bra #bpc_next
cmp_c1:
cmpu b8 $r10 6
- bra nc cmp_zero
+ bra nc #cmp_zero
mov $r12 0x14
add b32 $r12 $r11
- bra bpc_next
+ bra #bpc_next
cmp_zero:
mov $r12 0x80
bpc_next:
@@ -471,22 +473,22 @@ cmd_exec_set_format:
add b32 $r8 1
add b32 $r11 1
cmpu b32 $r11 $r5
- bra c bpc_loop
+ bra c #bpc_loop
add b32 $r9 1
cmpu b32 $r9 $r7
- bra c ncomp_loop
+ bra c #ncomp_loop
// SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
mulu $r6 $r5
- st b32 D[$r0 + ctx_src_cpp] $r6
- ld b32 $r8 D[$r0 + ctx_xcnt]
+ st b32 D[$r0 + #ctx_src_cpp] $r6
+ ld b32 $r8 D[$r0 + #ctx_xcnt]
mulu $r6 $r8
- bra $p2 dst_xcnt
+ bra $p2 #dst_xcnt
clear b32 $r6
dst_xcnt:
mulu $r7 $r5
- st b32 D[$r0 + ctx_dst_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
mulu $r7 $r8
mov $r5 0x810
@@ -494,10 +496,10 @@ cmd_exec_set_format:
iowr I[$r5 + 0x000] $r6
iowr I[$r5 + 0x100] $r7
add b32 $r5 0x800
- ld b32 $r6 D[$r0 + ctx_dst_cpp]
+ ld b32 $r6 D[$r0 + #ctx_dst_cpp]
sub b32 $r6 1
shl b32 $r6 8
- ld b32 $r7 D[$r0 + ctx_src_cpp]
+ ld b32 $r7 D[$r0 + #ctx_src_cpp]
sub b32 $r7 1
or $r6 $r7
iowr I[$r5 + 0x000] $r6
@@ -511,9 +513,9 @@ cmd_exec_set_format:
ld b32 $r6 D[$sp + 0x0c]
iowr I[$r5 + 0x300] $r6
add b32 $r5 0x400
- ld b32 $r6 D[$r0 + ctx_swz_const0]
+ ld b32 $r6 D[$r0 + #ctx_swz_const0]
iowr I[$r5 + 0x000] $r6
- ld b32 $r6 D[$r0 + ctx_swz_const1]
+ ld b32 $r6 D[$r0 + #ctx_swz_const1]
iowr I[$r5 + 0x100] $r6
add $sp 0x10
ret
@@ -543,7 +545,7 @@ cmd_exec_set_format:
//
cmd_exec_set_surface_tiled:
// translate TILE_MODE into Tp, Th, Td shift values
- ld b32 $r7 D[$r5 + ctx_src_tile_mode]
+ ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
extr $r9 $r7 8:11
extr $r8 $r7 4:7
ifdef(`NVA3',
@@ -553,9 +555,9 @@ ifdef(`NVA3',
)
extr $r7 $r7 0:3
cmp b32 $r7 0xe
- bra ne xtile64
+ bra ne #xtile64
mov $r7 4
- bra xtileok
+ bra #xtileok
xtile64:
xbit $r7 $flags $p2
add b32 $r7 17
@@ -565,8 +567,8 @@ ifdef(`NVA3',
// Op = (x * cpp) & ((1 << Tp) - 1)
// Tx = (x * cpp) >> Tp
- ld b32 $r10 D[$r5 + ctx_src_xoff]
- ld b32 $r11 D[$r5 + ctx_src_cpp]
+ ld b32 $r10 D[$r5 + #ctx_src_xoff]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
mulu $r10 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -576,7 +578,7 @@ ifdef(`NVA3',
// Tyo = y & ((1 << Th) - 1)
// Ty = y >> Th
- ld b32 $r13 D[$r5 + ctx_src_yoff]
+ ld b32 $r13 D[$r5 + #ctx_src_yoff]
mov $r14 1
shl b32 $r14 $r8
sub b32 $r14 1
@@ -598,8 +600,8 @@ ifdef(`NVA3',
add b32 $r12 $r11
// nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
- ld b32 $r15 D[$r5 + ctx_src_xsize]
- ld b32 $r11 D[$r5 + ctx_src_cpp]
+ ld b32 $r15 D[$r5 + #ctx_src_xsize]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
mulu $r15 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -609,7 +611,7 @@ ifdef(`NVA3',
push $r15
// nTy = (h + ((1 << Th) - 1)) >> Th
- ld b32 $r15 D[$r5 + ctx_src_ysize]
+ ld b32 $r15 D[$r5 + #ctx_src_ysize]
mov $r11 1
shl b32 $r11 $r8
sub b32 $r11 1
@@ -629,7 +631,7 @@ ifdef(`NVA3',
// Tz = z >> Td
// Op += Tzo << Tys
// Ts = Tys + Td
- ld b32 $r8 D[$r5 + ctx_src_zoff]
+ ld b32 $r8 D[$r5 + #ctx_src_zoff]
mov $r14 1
shl b32 $r14 $r9
sub b32 $r14 1
@@ -656,8 +658,8 @@ ifdef(`NVA3',
// SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
// CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
- ld b32 $r7 D[$r5 + ctx_src_address_low]
- ld b32 $r8 D[$r5 + ctx_src_address_high]
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ ld b32 $r8 D[$r5 + #ctx_src_address_high]
add b32 $r10 $r12
add b32 $r7 $r10
adc b32 $r8 0
@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
xbit $r6 $flags $p2
add b32 $r6 0x202
shl b32 $r6 8
- ld b32 $r7 D[$r5 + ctx_src_address_low]
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + ctx_src_address_high]
+ ld b32 $r7 D[$r5 + #ctx_src_address_high]
shl b32 $r7 16
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + ctx_src_pitch]
+ ld b32 $r7 D[$r5 + #ctx_src_pitch]
iowr I[$r6 + 0x000] $r7
ret
@@ -697,7 +699,7 @@ cmd_exec_wait:
loop:
iord $r1 I[$r0]
and $r1 1
- bra ne loop
+ bra ne #loop
pop $r1
pop $r0
ret
@@ -705,18 +707,18 @@ cmd_exec_wait:
cmd_exec_query:
// if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
xbit $r4 $r3 13
- bra ne query_counter
- call cmd_exec_wait
+ bra ne #query_counter
+ call #cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + ctx_query_address_low]
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
add b32 $r5 4
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0xc
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + ctx_query_address_high]
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -741,16 +743,16 @@ cmd_exec_query:
// write COUNTER
query_counter:
- call cmd_exec_wait
+ call #cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + ctx_query_address_low]
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0x4
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + ctx_query_address_high]
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -759,7 +761,7 @@ cmd_exec_query:
mov $r5 0x00001110
sethi $r5 0x13120000
iowr I[$r4 + 0x100] $r5
- ld b32 $r5 D[$r0 + ctx_query_counter]
+ ld b32 $r5 D[$r0 + #ctx_query_counter]
add b32 $r4 0x500
iowr I[$r4 + 0x000] $r5
mov $r5 0x00002601
@@ -787,22 +789,22 @@ cmd_exec_query:
// $r2: hostirq state
// $r3: data
cmd_exec:
- call cmd_exec_wait
+ call #cmd_exec_wait
// if format requested, call function to calculate it, otherwise
// fill in cpp/xcnt for both surfaces as if (cpp == 1)
xbit $r15 $r3 0
- bra e cmd_exec_no_format
- call cmd_exec_set_format
+ bra e #cmd_exec_no_format
+ call #cmd_exec_set_format
mov $r4 0x200
- bra cmd_exec_init_src_surface
+ bra #cmd_exec_init_src_surface
cmd_exec_no_format:
mov $r6 0x810
shl b32 $r6 6
mov $r7 1
- st b32 D[$r0 + ctx_src_cpp] $r7
- st b32 D[$r0 + ctx_dst_cpp] $r7
- ld b32 $r7 D[$r0 + ctx_xcnt]
+ st b32 D[$r0 + #ctx_src_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ ld b32 $r7 D[$r0 + #ctx_xcnt]
iowr I[$r6 + 0x000] $r7
iowr I[$r6 + 0x100] $r7
clear b32 $r4
@@ -811,28 +813,28 @@ cmd_exec:
bclr $flags $p2
clear b32 $r5
xbit $r15 $r3 4
- bra e src_tiled
- call cmd_exec_set_surface_linear
- bra cmd_exec_init_dst_surface
+ bra e #src_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_init_dst_surface
src_tiled:
- call cmd_exec_set_surface_tiled
+ call #cmd_exec_set_surface_tiled
bset $r4 7
cmd_exec_init_dst_surface:
bset $flags $p2
- mov $r5 ctx_dst_address_high - ctx_src_address_high
+ mov $r5 #ctx_dst_address_high - #ctx_src_address_high
xbit $r15 $r3 8
- bra e dst_tiled
- call cmd_exec_set_surface_linear
- bra cmd_exec_kick
+ bra e #dst_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_kick
dst_tiled:
- call cmd_exec_set_surface_tiled
+ call #cmd_exec_set_surface_tiled
bset $r4 8
cmd_exec_kick:
mov $r5 0x800
shl b32 $r5 6
- ld b32 $r6 D[$r0 + ctx_ycnt]
+ ld b32 $r6 D[$r0 + #ctx_ycnt]
iowr I[$r5 + 0x100] $r6
mov $r6 0x0041
// SRC_TARGET = 1, DST_TARGET = 2
@@ -842,8 +844,8 @@ cmd_exec:
// if requested, queue up a QUERY write after the copy has completed
xbit $r15 $r3 12
- bra e cmd_exec_done
- call cmd_exec_query
+ bra e #cmd_exec_done
+ call #cmd_exec_query
cmd_exec_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 2731de22ebe9..1f33fbdc00be 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x22d00023,
+ 0x12d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 618c144b7a30..9e636e6ef6d7 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
return false;
}
-void
+int
nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nva3_pm_state *info = pre_state;
unsigned long flags;
+ int ret = -EAGAIN;
/* prevent any new grctx switches from starting */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
nv_wr32(dev, 0x100210, 0x80000000);
}
+ ret = 0;
+
cleanup:
/* unfreeze PFIFO */
nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
@@ -339,4 +342,5 @@ cleanup:
nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
kfree(info);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index 419903880e9d..a8d17458ced1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x22d00023,
+ 0x12d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index a74e501afd25..8ee3963f9030 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
@@ -873,14 +875,16 @@ nvc0_graph_create(struct drm_device *dev)
case 0xcf: /* 4/0/0/0, 3 */
priv->magic_not_rop_nr = 0x03;
break;
+ case 0xd9: /* 1/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
}
if (!priv->magic_not_rop_nr) {
NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
priv->tp_nr[3], priv->rop_nr);
- /* use 0xc3's values... */
- priv->magic_not_rop_nr = 0x03;
+ priv->magic_not_rop_nr = 0x00;
}
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
index 2a4b6dc8f9de..e6b228844a32 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -71,9 +71,9 @@ queue_put:
ld b32 $r9 D[$r13 + 0x4] // PUT
xor $r8 8
cmpu b32 $r8 $r9
- bra ne queue_put_next
+ bra ne #queue_put_next
mov $r15 E_CMD_OVERFLOW
- call error
+ call #error
ret
// store cmd/data on queue
@@ -104,7 +104,7 @@ queue_get:
ld b32 $r8 D[$r13 + 0x0] // GET
ld b32 $r9 D[$r13 + 0x4] // PUT
cmpu b32 $r8 $r9
- bra e queue_get_done
+ bra e #queue_get_done
// fetch first cmd/data pair
and $r9 $r8 7
shl b32 $r9 3
@@ -135,9 +135,9 @@ nv_rd32:
nv_rd32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
- bra ne nv_rd32_wait
+ bra ne #nv_rd32_wait
mov $r10 6 // DONE_MMIO_RD
- call wait_doneo
+ call #wait_doneo
iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
ret
@@ -157,7 +157,7 @@ nv_wr32:
nv_wr32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
- bra ne nv_wr32_wait
+ bra ne #nv_wr32_wait
ret
// (re)set watchdog timer
@@ -193,7 +193,7 @@ $1:
shl b32 $r8 6
iord $r8 I[$r8 + 0x000] // DONE
xbit $r8 $r8 $r10
- bra $2 wait_done_$1
+ bra $2 #wait_done_$1
trace_clr(T_WAIT)
ret
')
@@ -216,7 +216,7 @@ mmctx_size:
add b32 $r9 $r8
add b32 $r14 4
cmpu b32 $r14 $r15
- bra ne nv_mmctx_size_loop
+ bra ne #nv_mmctx_size_loop
mov b32 $r15 $r9
ret
@@ -238,12 +238,12 @@ mmctx_xfer:
shl b32 $r8 6
clear b32 $r9
or $r11 $r11
- bra e mmctx_base_disabled
+ bra e #mmctx_base_disabled
iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
bset $r9 0 // BASE_EN
mmctx_base_disabled:
or $r14 $r14
- bra e mmctx_multi_disabled
+ bra e #mmctx_multi_disabled
iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
bset $r9 1 // MULTI_EN
@@ -264,7 +264,7 @@ mmctx_xfer:
mmctx_wait_free:
iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
and $r14 0x1f
- bra e mmctx_wait_free
+ bra e #mmctx_wait_free
// queue up an entry
ld b32 $r14 D[$r12]
@@ -272,19 +272,19 @@ mmctx_xfer:
iowr I[$r8 + 0x300] $r14
add b32 $r12 4
cmpu b32 $r12 $r13
- bra ne mmctx_exec_loop
+ bra ne #mmctx_exec_loop
xbit $r11 $r10 2
- bra ne mmctx_stop
+ bra ne #mmctx_stop
// wait for queue to empty
mmctx_fini_wait:
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
and $r11 0x1f
cmpu b32 $r11 0x10
- bra ne mmctx_fini_wait
+ bra ne #mmctx_fini_wait
mov $r10 2 // DONE_MMCTX
- call wait_donez
- bra mmctx_done
+ call #wait_donez
+ bra #mmctx_done
mmctx_stop:
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
@@ -295,7 +295,7 @@ mmctx_xfer:
// wait for STOP_TRIGGER to clear
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
xbit $r11 $r11 18
- bra ne mmctx_stop_wait
+ bra ne #mmctx_stop_wait
mmctx_done:
trace_clr(T_MMCTX)
ret
@@ -305,7 +305,7 @@ mmctx_xfer:
strand_wait:
push $r10
mov $r10 2
- call wait_donez
+ call #wait_donez
pop $r10
ret
@@ -316,7 +316,7 @@ strand_pre:
sethi $r8 0x20000
mov $r9 0xc
iowr I[$r8] $r9
- call strand_wait
+ call #strand_wait
ret
// unknown - call after issuing strand commands
@@ -326,7 +326,7 @@ strand_post:
sethi $r8 0x20000
mov $r9 0xd
iowr I[$r8] $r9
- call strand_wait
+ call #strand_wait
ret
// Selects strand set?!
@@ -341,11 +341,11 @@ strand_set:
iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
mov $r12 0xb
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call strand_wait
+ call #strand_wait
iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
mov $r12 0xa
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call strand_wait
+ call #strand_wait
ret
// Initialise strand context data
@@ -357,22 +357,22 @@ strand_set:
//
strand_ctx_init:
trace_set(T_STRINIT)
- call strand_pre
+ call #strand_pre
mov $r14 3
- call strand_set
+ call #strand_set
mov $r10 0x46fc
sethi $r10 0x20000
add b32 $r11 $r10 0x400
iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
mov $r12 1
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call strand_wait
+ call #strand_wait
sub b32 $r12 $r0 1
iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
mov $r12 2
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call strand_wait
- call strand_post
+ call #strand_wait
+ call #strand_post
// read the size of each strand, poke the context offset of
// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
@@ -391,7 +391,7 @@ strand_ctx_init:
add b32 $r14 $r10
add b32 $r8 4
sub b32 $r9 1
- bra ne ctx_init_strand_loop
+ bra ne #ctx_init_strand_loop
shl b32 $r14 8
sub b32 $r15 $r14 $r15
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 636fe9812f79..91d44ea662d9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
case 0xc1:
return 0x9197;
case 0xc8:
+ case 0xd9:
return 0x9297;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 96b0b93d94ca..de77842b31c0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
static void
nvc0_grctx_generate_90c0(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+ }
nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+ nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+ }
nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->chipset != 0xc1) {
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
- } else {
+ if (dev_priv->chipset == 0xd9) {
nv_wr32(dev, 0x405800, 0x0f8000bf);
nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x08000000);
+ } else
+ if (dev_priv->chipset == 0xc1) {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ } else {
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
}
- nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000);
nv_wr32(dev, 0x405870, 0x00000001);
@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000);
- if (dev_priv->chipset == 0xc1) {
+ if (dev_priv->chipset == 0xd9)
+ nv_wr32(dev, 0x4064bc, 0x00000000);
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9) {
nv_wr32(dev, 0x4064c0, 0x80140078);
nv_wr32(dev, 0x4064c4, 0x0086ffff);
}
@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
/* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040);
- nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
+ if (chipset == 0xd9) {
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x1043e005);
+ nv_wr32(dev, 0x408908, 0x00c8102f);
+ } else
+ if (chipset == 0xc1) {
+ nv_wr32(dev, 0x408808, 0x1003e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ } else {
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ }
nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d);
}
@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418408, 0x00000000);
nv_wr32(dev, 0x41840c, 0x00001008);
nv_wr32(dev, 0x418410, 0x0fff0fff);
- nv_wr32(dev, 0x418414, 0x00200fff);
+ nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
nv_wr32(dev, 0x418450, 0x00000000);
nv_wr32(dev, 0x418454, 0x00000000);
nv_wr32(dev, 0x418458, 0x00000000);
@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418700, 0x00000002);
nv_wr32(dev, 0x418704, 0x00000080);
nv_wr32(dev, 0x418708, 0x00000000);
- nv_wr32(dev, 0x41870c, 0x07c80000);
+ nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
nv_wr32(dev, 0x418710, 0x00000000);
- nv_wr32(dev, 0x418800, 0x0006860a);
+ nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
nv_wr32(dev, 0x418808, 0x00000000);
nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442);
- nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x418830, 0x10000001);
+ else
+ nv_wr32(dev, 0x418830, 0x00000001);
nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000);
- nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x4188fc, 0x20100008);
+ else if (chipset == 0xc1)
+ nv_wr32(dev, 0x4188fc, 0x00100018);
+ else
+ nv_wr32(dev, 0x4188fc, 0x00100000);
nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
}
- nv_wr32(dev, 0x418b00, 0x00000000);
+ nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
nv_wr32(dev, 0x418b08, 0x0a418820);
nv_wr32(dev, 0x418b0c, 0x062080e6);
nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000);
- if (chipset == 0xc1)
+ if (chipset == 0xc1 || chipset == 0xd9)
nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001);
@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419818, 0x00000000);
nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000);
- nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419864, 0x00000129);
+ else
+ nv_wr32(dev, 0x419864, 0x0000012a);
nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800);
- if (chipset != 0xc0 && chipset != 0xc8)
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x00419ac4, 0x0017f440);
+ else if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x00419ac4, 0x0007f440);
nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6);
@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103);
- nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419be0, 0x00400001);
+ else
+ nv_wr32(dev, 0x419be0, 0x00000001);
nv_wr32(dev, 0x419be4, 0x00000000);
- nv_wr32(dev, 0x419c00, 0x00000002);
+ nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- if (chipset == 0xce || chipset == 0xcf)
+ if (dev_priv->chipset == 0xd9) {
+ nv_wr32(dev, 0x419c24, 0x00084210);
+ nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
nv_wr32(dev, 0x419cb0, 0x00020048);
- else
+ } else
+ if (chipset == 0xce || chipset == 0xcf) {
+ nv_wr32(dev, 0x419cb0, 0x00020048);
+ } else {
nv_wr32(dev, 0x419cb0, 0x00060048);
+ }
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
- nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419d20, 0x12180000);
+ else
+ nv_wr32(dev, 0x419d20, 0x02180000);
nv_wr32(dev, 0x419d24, 0x00001fff);
- if (chipset == 0xc1)
+ if (chipset == 0xc1 || chipset == 0xd9)
nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000);
@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000215, 0x00000040);
nv_icmd(dev, 0x00000216, 0x00000040);
nv_icmd(dev, 0x00000217, 0x00000040);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0400; i <= 0x0417; i++)
+ nv_icmd(dev, i, 0x00000040);
+ }
nv_icmd(dev, 0x00000218, 0x0000c080);
nv_icmd(dev, 0x00000219, 0x0000c080);
nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000021d, 0x0000c080);
nv_icmd(dev, 0x0000021e, 0x0000c080);
nv_icmd(dev, 0x0000021f, 0x0000c080);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0440; i <= 0x0457; i++)
+ nv_icmd(dev, i, 0x0000c080);
+ }
nv_icmd(dev, 0x000000ad, 0x0000013e);
nv_icmd(dev, 0x000000e1, 0x00000010);
nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003);
- if (dev_priv->chipset == 0xc1)
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9)
nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080);
@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000957, 0x00000003);
nv_icmd(dev, 0x0000095e, 0x20164010);
nv_icmd(dev, 0x0000095f, 0x00000020);
+ if (dev_priv->chipset == 0xd9)
+ nv_icmd(dev, 0x0000097d, 0x00000020);
nv_icmd(dev, 0x00000683, 0x00000006);
nv_icmd(dev, 0x00000685, 0x003fffff);
nv_icmd(dev, 0x00000687, 0x00000c48);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
index 06f5e26d1e0f..15272be33b66 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -32,7 +32,7 @@
* - watchdog timer around ctx operations
*/
-.section nvc0_grgpc_data
+.section #nvc0_grgpc_data
include(`nvc0_graph.fuc')
gpc_id: .b32 0
gpc_mmio_list_head: .b32 0
@@ -48,40 +48,45 @@ cmd_queue: queue_init
// chipset descriptions
chipsets:
.b8 0xc0 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
.b8 0xc1 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc1_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc1_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
.b8 0xc3 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xc4 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xc8 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
.b8 0xce 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xcf 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvcf_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
mmctx_data(0x000c6c, 1);
nvc1_gpc_mmio_tail:
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
// TPC mmio lists
nvc0_tpc_mmio_head:
mmctx_data(0x000018, 1)
@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
mmctx_data(0x000544, 1)
nvc1_tpc_mmio_tail:
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
-.section nvc0_grgpc_code
-bra init
+.section #nvc0_grgpc_code
+bra #init
define(`include_code')
include(`nvc0_graph.fuc')
@@ -160,10 +219,10 @@ error:
push $r14
mov $r14 -0x67ec // 0x9814
sethi $r14 0x400000
- call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
add b32 $r14 0x41c
mov $r15 1
- call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
pop $r14
ret
@@ -190,7 +249,7 @@ init:
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -210,24 +269,24 @@ init:
and $r2 0x1f
shl b32 $r3 $r2
sub b32 $r3 1
- st b32 D[$r0 + tpc_count] $r2
- st b32 D[$r0 + tpc_mask] $r3
+ st b32 D[$r0 + #tpc_count] $r2
+ st b32 D[$r0 + #tpc_mask] $r3
add b32 $r1 0x400
iord $r2 I[$r1 + 0x000] // MYINDEX
- st b32 D[$r0 + gpc_id] $r2
+ st b32 D[$r0 + #gpc_id] $r2
// find context data for this chipset
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r1 chipsets - 12
+ mov $r1 #chipsets - 12
init_find_chipset:
add b32 $r1 12
ld b32 $r3 D[$r1 + 0x00]
cmpu b32 $r3 $r2
- bra e init_context
+ bra e #init_context
cmpu b32 $r3 0
- bra ne init_find_chipset
+ bra ne #init_find_chipset
// unknown chipset
ret
@@ -253,19 +312,19 @@ init:
clear b32 $r15
ld b16 $r14 D[$r1 + 4]
ld b16 $r15 D[$r1 + 6]
- st b16 D[$r0 + gpc_mmio_list_head] $r14
- st b16 D[$r0 + gpc_mmio_list_tail] $r15
- call mmctx_size
+ st b16 D[$r0 + #gpc_mmio_list_head] $r14
+ st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+ call #mmctx_size
add b32 $r2 $r15
add b32 $r3 $r15
// calculate per-TPC mmio context size, store the list pointers
ld b16 $r14 D[$r1 + 8]
ld b16 $r15 D[$r1 + 10]
- st b16 D[$r0 + tpc_mmio_list_head] $r14
- st b16 D[$r0 + tpc_mmio_list_tail] $r15
- call mmctx_size
- ld b32 $r14 D[$r0 + tpc_count]
+ st b16 D[$r0 + #tpc_mmio_list_head] $r14
+ st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+ call #mmctx_size
+ ld b32 $r14 D[$r0 + #tpc_count]
mulu $r14 $r15
add b32 $r2 $r14
add b32 $r3 $r14
@@ -283,7 +342,7 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
- call strand_ctx_init
+ call #strand_ctx_init
add b32 $r3 $r15
// save context size, and tell HUB we're done
@@ -301,13 +360,13 @@ init:
main:
bset $flags $p0
sleep $p0
- mov $r13 cmd_queue
- call queue_get
- bra $p1 main
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
// 0x0000-0x0003 are all context transfers
cmpu b32 $r14 0x04
- bra nc main_not_ctx_xfer
+ bra nc #main_not_ctx_xfer
// fetch $flags and mask off $p1/$p2
mov $r1 $flags
mov $r2 0x0006
@@ -318,14 +377,14 @@ main:
or $r1 $r14
mov $flags $r1
// transfer context data
- call ctx_xfer
- bra main
+ call #ctx_xfer
+ bra #main
main_not_ctx_xfer:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call error
- bra main
+ call #error
+ bra #main
// interrupt handler
ih:
@@ -342,13 +401,13 @@ ih:
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
- bra e ih_no_fifo
+ bra e #ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call queue_put
+ call #queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -374,11 +433,11 @@ ih:
//
hub_barrier_done:
mov $r15 1
- ld b32 $r14 D[$r0 + gpc_id]
+ ld b32 $r14 D[$r0 + #gpc_id]
shl b32 $r15 $r14
mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
sethi $r14 0x400000
- call nv_wr32
+ call #nv_wr32
ret
// Disables various things, waits a bit, and re-enables them..
@@ -395,7 +454,7 @@ ctx_redswitch:
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
- bra ne ctx_redswitch_delay
+ bra ne #ctx_redswitch_delay
mov $r15 0xa20
iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
ret
@@ -413,8 +472,8 @@ ctx_xfer:
mov $r1 0xa04
shl b32 $r1 6
iowr I[$r1 + 0x000] $r15// MEM_BASE
- bra not $p1 ctx_xfer_not_load
- call ctx_redswitch
+ bra not $p1 #ctx_xfer_not_load
+ call #ctx_redswitch
ctx_xfer_not_load:
// strands
@@ -422,7 +481,7 @@ ctx_xfer:
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call strand_wait
+ call #strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -435,46 +494,46 @@ ctx_xfer:
or $r10 2 // first
mov $r11 0x0000
sethi $r11 0x500000
- ld b32 $r12 D[$r0 + gpc_id]
+ ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
- ld b32 $r12 D[$r0 + gpc_mmio_list_head]
- ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+ ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
mov $r14 0 // not multi
- call mmctx_xfer
+ call #mmctx_xfer
// per-TPC mmio context
xbit $r10 $flags $p1 // direction
or $r10 4 // last
mov $r11 0x4000
sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
- ld b32 $r12 D[$r0 + gpc_id]
+ ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
- ld b32 $r12 D[$r0 + tpc_mmio_list_head]
- ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
- ld b32 $r15 D[$r0 + tpc_mask]
+ ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + #tpc_mask]
mov $r14 0x800 // stride = 0x800
- call mmctx_xfer
+ call #mmctx_xfer
// wait for strands to finish
- call strand_wait
+ call #strand_wait
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
// strand commands
- bra $p1 ctx_xfer_post
- bra not $p2 ctx_xfer_done
+ bra $p1 #ctx_xfer_post
+ bra not $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xd
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call strand_wait
+ call #strand_wait
// mark completion in HUB's barrier
ctx_xfer_done:
- call hub_barrier_done
+ call #hub_barrier_done
ret
.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
index 6f820324480e..a988b8ad00ac 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x011c00bc,
- 0x01700120,
+ 0x012800c8,
+ 0x01e40194,
0x000000c1,
- 0x012000bc,
- 0x01840120,
+ 0x012c00c8,
+ 0x01f80194,
0x000000c3,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000c4,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000c8,
- 0x011c00bc,
- 0x01700120,
+ 0x012800c8,
+ 0x01e40194,
0x000000ce,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000cf,
- 0x011c00bc,
- 0x017c0120,
+ 0x012800c8,
+ 0x01f00194,
+ 0x000000d9,
+ 0x0194012c,
+ 0x025401f8,
0x00000000,
0x00000380,
0x14000400,
@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
0x08001000,
0x00001014,
0x00000c6c,
+ 0x00000380,
+ 0x04000400,
+ 0x0800040c,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c6c,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
0x00000018,
0x0000003c,
0x00000048,
@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
0x000006e0,
0x000004bc,
0x00000544,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x000002c4,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x08000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x00000544,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x000006e0,
+ 0x08000750,
};
uint32_t nvc0_grgpc_code[] = {
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
index e4f8c7e89ddd..98acddb2c5bb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -27,7 +27,7 @@
* m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
*/
-.section nvc0_grhub_data
+.section #nvc0_grhub_data
include(`nvc0_graph.fuc')
gpc_count: .b32 0
rop_count: .b32 0
@@ -39,26 +39,29 @@ ctx_current: .b32 0
chipsets:
.b8 0xc0 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc1 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc1_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
.b8 0xc3 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc4 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc8 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xce 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xcf 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
.b8 0 0 0 0
nvc0_hub_mmio_head:
@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
mmctx_data(0x4064c0, 2)
nvc1_hub_mmio_tail:
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
.align 256
chan_data:
chan_mmio_count: .b32 0
@@ -113,8 +158,8 @@ chan_mmio_address: .b32 0
.align 256
xfer_data: .b32 0
-.section nvc0_grhub_code
-bra init
+.section #nvc0_grhub_code
+bra #init
define(`include_code')
include(`nvc0_graph.fuc')
@@ -157,7 +202,7 @@ init:
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -201,11 +246,11 @@ init:
// fetch enabled GPC/ROP counts
mov $r14 -0x69fc // 0x409604
sethi $r14 0x400000
- call nv_rd32
+ call #nv_rd32
extr $r1 $r15 16:20
- st b32 D[$r0 + rop_count] $r1
+ st b32 D[$r0 + #rop_count] $r1
and $r15 0x1f
- st b32 D[$r0 + gpc_count] $r15
+ st b32 D[$r0 + #gpc_count] $r15
// set BAR_REQMASK to GPC mask
mov $r1 1
@@ -220,14 +265,14 @@ init:
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r15 chipsets - 8
+ mov $r15 #chipsets - 8
init_find_chipset:
add b32 $r15 8
ld b32 $r3 D[$r15 + 0x00]
cmpu b32 $r3 $r2
- bra e init_context
+ bra e #init_context
cmpu b32 $r3 0
- bra ne init_find_chipset
+ bra ne #init_find_chipset
// unknown chipset
ret
@@ -239,9 +284,9 @@ init:
ld b16 $r14 D[$r15 + 4]
ld b16 $r15 D[$r15 + 6]
sethi $r14 0
- st b32 D[$r0 + hub_mmio_list_head] $r14
- st b32 D[$r0 + hub_mmio_list_tail] $r15
- call mmctx_size
+ st b32 D[$r0 + #hub_mmio_list_head] $r14
+ st b32 D[$r0 + #hub_mmio_list_tail] $r15
+ call #mmctx_size
// set mmctx base addresses now so we don't have to do it later,
// they don't (currently) ever change
@@ -260,7 +305,7 @@ init:
add b32 $r1 1
shl b32 $r1 8
mov b32 $r15 $r1
- call strand_ctx_init
+ call #strand_ctx_init
add b32 $r1 $r15
// initialise each GPC in sequence by passing in the offset of its
@@ -271,40 +316,40 @@ init:
// when it has completed, and return the size of its context data
// in GPCn_CC_SCRATCH[1]
//
- ld b32 $r3 D[$r0 + gpc_count]
+ ld b32 $r3 D[$r0 + #gpc_count]
mov $r4 0x2000
sethi $r4 0x500000
init_gpc:
// setup, and start GPC ucode running
add b32 $r14 $r4 0x804
mov b32 $r15 $r1
- call nv_wr32 // CC_SCRATCH[1] = ctx offset
+ call #nv_wr32 // CC_SCRATCH[1] = ctx offset
add b32 $r14 $r4 0x800
mov b32 $r15 $r2
- call nv_wr32 // CC_SCRATCH[0] = chipset
+ call #nv_wr32 // CC_SCRATCH[0] = chipset
add b32 $r14 $r4 0x10c
clear b32 $r15
- call nv_wr32
+ call #nv_wr32
add b32 $r14 $r4 0x104
- call nv_wr32 // ENTRY
+ call #nv_wr32 // ENTRY
add b32 $r14 $r4 0x100
mov $r15 2 // CTRL_START_TRIGGER
- call nv_wr32 // CTRL
+ call #nv_wr32 // CTRL
// wait for it to complete, and adjust context size
add b32 $r14 $r4 0x800
init_gpc_wait:
- call nv_rd32
+ call #nv_rd32
xbit $r15 $r15 31
- bra e init_gpc_wait
+ bra e #init_gpc_wait
add b32 $r14 $r4 0x804
- call nv_rd32
+ call #nv_rd32
add b32 $r1 $r15
// next!
add b32 $r4 0x8000
sub b32 $r3 1
- bra ne init_gpc
+ bra ne #init_gpc
// save context size, and tell host we're ready
mov $r2 0x800
@@ -322,13 +367,13 @@ main:
// sleep until we have something to do
bset $flags $p0
sleep $p0
- mov $r13 cmd_queue
- call queue_get
- bra $p1 main
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
- bra ne main_not_ctx_switch
+ bra ne #main_not_ctx_switch
trace_set(T_AUTO)
mov $r1 0xb00
shl b32 $r1 6
@@ -336,39 +381,39 @@ main:
iord $r1 I[$r1 + 0x000] // CHAN_CUR
xbit $r3 $r1 31
- bra e chsw_no_prev
+ bra e #chsw_no_prev
xbit $r3 $r2 31
- bra e chsw_prev_no_next
+ bra e #chsw_prev_no_next
push $r2
mov b32 $r2 $r1
trace_set(T_SAVE)
bclr $flags $p1
bset $flags $p2
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_SAVE);
pop $r2
trace_set(T_LOAD);
bset $flags $p1
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_LOAD);
- bra chsw_done
+ bra #chsw_done
chsw_prev_no_next:
push $r2
mov b32 $r2 $r1
bclr $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
pop $r2
mov $r1 0xb00
shl b32 $r1 6
iowr I[$r1] $r2
- bra chsw_done
+ bra #chsw_done
chsw_no_prev:
xbit $r3 $r2 31
- bra e chsw_done
+ bra e #chsw_done
bset $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
// ack the context switch request
chsw_done:
@@ -377,32 +422,32 @@ main:
mov $r2 1
iowr I[$r1 + 0x000] $r2 // 0x409b0c
trace_clr(T_AUTO)
- bra main
+ bra #main
// request to set current channel? (*not* a context switch)
main_not_ctx_switch:
cmpu b32 $r14 0x0001
- bra ne main_not_ctx_chan
+ bra ne #main_not_ctx_chan
mov b32 $r2 $r15
- call ctx_chan
- bra main_done
+ call #ctx_chan
+ bra #main_done
// request to store current channel context?
main_not_ctx_chan:
cmpu b32 $r14 0x0002
- bra ne main_not_ctx_save
+ bra ne #main_not_ctx_save
trace_set(T_SAVE)
bclr $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_SAVE)
- bra main_done
+ bra #main_done
main_not_ctx_save:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call error
- bra main
+ call #error
+ bra #main
main_done:
mov $r1 0x820
@@ -410,7 +455,7 @@ main:
clear b32 $r2
bset $r2 31
iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
- bra main
+ bra #main
// interrupt handler
ih:
@@ -427,13 +472,13 @@ ih:
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
- bra e ih_no_fifo
+ bra e #ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call queue_put
+ call #queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -441,18 +486,18 @@ ih:
// context switch request?
ih_no_fifo:
and $r11 $r10 0x00000100
- bra e ih_no_ctxsw
+ bra e #ih_no_ctxsw
// enqueue a context switch for later processing
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
mov $r14 0x4001
- call queue_put
+ call #queue_put
// anything we didn't handle, bring it to the host's attention
ih_no_ctxsw:
mov $r11 0x104
not b32 $r11
and $r11 $r10 $r11
- bra e ih_no_other
+ bra e #ih_no_other
mov $r10 0xc1c
shl b32 $r10 6
iowr I[$r10] $r11 // INTR_UP_SET
@@ -478,11 +523,11 @@ ctx_4160s:
mov $r14 0x4160
sethi $r14 0x400000
mov $r15 1
- call nv_wr32
+ call #nv_wr32
ctx_4160s_wait:
- call nv_rd32
+ call #nv_rd32
xbit $r15 $r15 4
- bra e ctx_4160s_wait
+ bra e #ctx_4160s_wait
ret
// Without clearing again at end of xfer, some things cause PGRAPH
@@ -492,7 +537,7 @@ ctx_4160c:
mov $r14 0x4160
sethi $r14 0x400000
clear b32 $r15
- call nv_wr32
+ call #nv_wr32
ret
// Again, not real sure
@@ -503,7 +548,7 @@ ctx_4170s:
mov $r14 0x4170
sethi $r14 0x400000
or $r15 0x10
- call nv_wr32
+ call #nv_wr32
ret
// Waits for a ctx_4170s() call to complete
@@ -511,9 +556,9 @@ ctx_4170s:
ctx_4170w:
mov $r14 0x4170
sethi $r14 0x400000
- call nv_rd32
+ call #nv_rd32
and $r15 0x10
- bra ne ctx_4170w
+ bra ne #ctx_4170w
ret
// Disables various things, waits a bit, and re-enables them..
@@ -530,7 +575,7 @@ ctx_redswitch:
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
- bra ne ctx_redswitch_delay
+ bra ne #ctx_redswitch_delay
mov $r15 0x770
iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
ret
@@ -546,10 +591,10 @@ ctx_86c:
iowr I[$r14] $r15 // HUB(0x86c) = val
mov $r14 -0x75ec
sethi $r14 0x400000
- call nv_wr32 // ROP(0xa14) = val
+ call #nv_wr32 // ROP(0xa14) = val
mov $r14 -0x5794
sethi $r14 0x410000
- call nv_wr32 // GPC(0x86c) = val
+ call #nv_wr32 // GPC(0x86c) = val
ret
// ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -561,7 +606,7 @@ ctx_load:
// switch to channel, somewhat magic in parts..
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa24
shl b32 $r1 6
iowr I[$r1 + 0x000] $r0 // 0x409a24
@@ -576,7 +621,7 @@ ctx_load:
ctx_chan_wait_0:
iord $r4 I[$r1 + 0x100]
and $r4 0x1f
- bra ne ctx_chan_wait_0
+ bra ne #ctx_chan_wait_0
iowr I[$r3 + 0x000] $r2 // CHAN_CUR
// load channel header, fetch PGRAPH context pointer
@@ -595,19 +640,19 @@ ctx_load:
sethi $r2 0x80000000
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
mov $r1 0x10 // chan + 0x0210
- mov $r2 xfer_data
+ mov $r2 #xfer_data
sethi $r2 0x00020000 // 16 bytes
xdld $r1 $r2
xdwait
trace_clr(T_LCHAN)
// update current context
- ld b32 $r1 D[$r0 + xfer_data + 4]
+ ld b32 $r1 D[$r0 + #xfer_data + 4]
shl b32 $r1 24
- ld b32 $r2 D[$r0 + xfer_data + 0]
+ ld b32 $r2 D[$r0 + #xfer_data + 0]
shr b32 $r2 8
or $r1 $r2
- st b32 D[$r0 + ctx_current] $r1
+ st b32 D[$r0 + #ctx_current] $r1
// set transfer base to start of context, and fetch context header
trace_set(T_LCTXH)
@@ -618,7 +663,7 @@ ctx_load:
mov $r1 0xa20
shl b32 $r1 6
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
- mov $r1 chan_data
+ mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdld $r0 $r1
xdwait
@@ -635,10 +680,10 @@ ctx_load:
// In: $r2 channel address
//
ctx_chan:
- call ctx_4160s
- call ctx_load
+ call #ctx_4160s
+ call #ctx_load
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
@@ -646,8 +691,8 @@ ctx_chan:
ctx_chan_wait:
iord $r2 I[$r1 + 0x000]
or $r2 $r2
- bra ne ctx_chan_wait
- call ctx_4160c
+ bra ne #ctx_chan_wait
+ call #ctx_4160c
ret
// Execute per-context state overrides list
@@ -661,7 +706,7 @@ ctx_chan:
//
ctx_mmio_exec:
// set transfer base to be the mmio list
- ld b32 $r3 D[$r0 + chan_mmio_address]
+ ld b32 $r3 D[$r0 + #chan_mmio_address]
mov $r2 0xa04
shl b32 $r2 6
iowr I[$r2 + 0x000] $r3 // MEM_BASE
@@ -670,31 +715,31 @@ ctx_mmio_exec:
ctx_mmio_loop:
// fetch next 256 bytes of mmio list if necessary
and $r4 $r3 0xff
- bra ne ctx_mmio_pull
- mov $r5 xfer_data
+ bra ne #ctx_mmio_pull
+ mov $r5 #xfer_data
sethi $r5 0x00060000 // 256 bytes
xdld $r3 $r5
xdwait
// execute a single list entry
ctx_mmio_pull:
- ld b32 $r14 D[$r4 + xfer_data + 0x00]
- ld b32 $r15 D[$r4 + xfer_data + 0x04]
- call nv_wr32
+ ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+ call #nv_wr32
// next!
add b32 $r3 8
sub b32 $r1 1
- bra ne ctx_mmio_loop
+ bra ne #ctx_mmio_loop
// set transfer base back to the current context
ctx_mmio_done:
- ld b32 $r3 D[$r0 + ctx_current]
+ ld b32 $r3 D[$r0 + #ctx_current]
iowr I[$r2 + 0x000] $r3 // MEM_BASE
// disable the mmio list now, we don't need/want to execute it again
- st b32 D[$r0 + chan_mmio_count] $r0
- mov $r1 chan_data
+ st b32 D[$r0 + #chan_mmio_count] $r0
+ mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdst $r0 $r1
xdwait
@@ -709,46 +754,46 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
- bra not $p1 ctx_xfer_pre
- bra $p2 ctx_xfer_pre_load
+ bra not $p1 #ctx_xfer_pre
+ bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
mov $r15 0x10
- call ctx_86c
- call ctx_4160s
- bra not $p1 ctx_xfer_exec
+ call #ctx_86c
+ call #ctx_4160s
+ bra not $p1 #ctx_xfer_exec
ctx_xfer_pre_load:
mov $r15 2
- call ctx_4170s
- call ctx_4170w
- call ctx_redswitch
+ call #ctx_4170s
+ call #ctx_4170w
+ call #ctx_redswitch
clear b32 $r15
- call ctx_4170s
- call ctx_load
+ call #ctx_4170s
+ call #ctx_load
// fetch context pointer, and initiate xfer on all GPCs
ctx_xfer_exec:
- ld b32 $r1 D[$r0 + ctx_current]
+ ld b32 $r1 D[$r0 + #ctx_current]
mov $r2 0x414
shl b32 $r2 6
iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
mov $r14 -0x5b00
sethi $r14 0x410000
mov b32 $r15 $r1
- call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
add b32 $r14 4
xbit $r15 $flags $p1
xbit $r2 $flags $p2
shl b32 $r2 1
or $r15 $r2
- call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+ call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
// strands
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call strand_wait
+ call #strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -760,22 +805,22 @@ ctx_xfer:
xbit $r10 $flags $p1 // direction
or $r10 6 // first, last
mov $r11 0 // base = 0
- ld b32 $r12 D[$r0 + hub_mmio_list_head]
- ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+ ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
mov $r14 0 // not multi
- call mmctx_xfer
+ call #mmctx_xfer
// wait for GPCs to all complete
mov $r10 8 // DONE_BAR
- call wait_doneo
+ call #wait_doneo
// wait for strand xfer to complete
- call strand_wait
+ call #strand_wait
// post-op
- bra $p1 ctx_xfer_post
+ bra $p1 #ctx_xfer_post
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
@@ -783,27 +828,27 @@ ctx_xfer:
ctx_xfer_post_save_wait:
iord $r2 I[$r1]
or $r2 $r2
- bra ne ctx_xfer_post_save_wait
+ bra ne #ctx_xfer_post_save_wait
- bra $p2 ctx_xfer_done
+ bra $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r15 2
- call ctx_4170s
+ call #ctx_4170s
clear b32 $r15
- call ctx_86c
- call strand_post
- call ctx_4170w
+ call #ctx_86c
+ call #strand_post
+ call #ctx_4170w
clear b32 $r15
- call ctx_4170s
+ call #ctx_4170s
- bra not $p1 ctx_xfer_no_post_mmio
- ld b32 $r1 D[$r0 + chan_mmio_count]
+ bra not $p1 #ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + #chan_mmio_count]
or $r1 $r1
- bra e ctx_xfer_no_post_mmio
- call ctx_mmio_exec
+ bra e #ctx_xfer_no_post_mmio
+ call #ctx_mmio_exec
ctx_xfer_no_post_mmio:
- call ctx_4160c
+ call #ctx_4160c
ctx_xfer_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
index 241d3263f1e5..c5ed307abeb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x01340098,
+ 0x013c00a0,
0x000000c1,
- 0x01380098,
+ 0x014000a0,
0x000000c3,
- 0x01340098,
+ 0x013c00a0,
0x000000c4,
- 0x01340098,
+ 0x013c00a0,
0x000000c8,
- 0x01340098,
+ 0x013c00a0,
0x000000ce,
- 0x01340098,
+ 0x013c00a0,
0x000000cf,
- 0x01340098,
+ 0x013c00a0,
+ 0x000000d9,
+ 0x01dc0140,
0x00000000,
0x0417e91c,
0x04400204,
@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
0x0c408900,
0x00408980,
0x044064c0,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0417e91c,
+ 0x04400204,
+ 0x24404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x04404178,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x104064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 929aded35cb5..e9992f62c1c0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
perflvl->vdec = read_clk(dev, 0x0e);
return 0;
}
+
+struct nvc0_pm_clock {
+ u32 freq;
+ u32 ssel;
+ u32 mdiv;
+ u32 dsrc;
+ u32 ddiv;
+ u32 coef;
+};
+
+struct nvc0_pm_state {
+ struct nvc0_pm_clock eng[16];
+};
+
+static u32
+calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+ u32 div = min((ref * 2) / freq, (u32)65);
+ if (div < 2)
+ div = 2;
+
+ *ddiv = div - 2;
+ return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+ u32 sclk;
+
+ /* use one of the fixed frequencies if possible */
+ *ddiv = 0x00000000;
+ switch (freq) {
+ case 27000:
+ case 108000:
+ *dsrc = 0x00000000;
+ if (freq == 108000)
+ *dsrc |= 0x00030000;
+ return freq;
+ case 100000:
+ *dsrc = 0x00000002;
+ return freq;
+ default:
+ *dsrc = 0x00000003;
+ break;
+ }
+
+ /* otherwise, calculate the closest divider */
+ sclk = read_vco(dev, clk);
+ if (clk < 7)
+ sclk = calc_div(dev, clk, sclk, freq, ddiv);
+ return sclk;
+}
+
+static u32
+calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
+{
+ struct pll_lims limits;
+ int N, M, P, ret;
+
+ ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
+ if (ret)
+ return 0;
+
+ limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
+ if (!limits.refclk)
+ return 0;
+
+ ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
+ if (ret <= 0)
+ return 0;
+
+ *coef = (P << 16) | (N << 8) | M;
+ return ret;
+}
+
+/* A (likely rather simplified and incomplete) view of the clock tree
+ *
+ * Key:
+ *
+ * S: source select
+ * D: divider
+ * P: pll
+ * F: switch
+ *
+ * Engine clocks:
+ *
+ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
+ * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
+ *
+ * Not all registers exist for all clocks. For example: clocks >= 8 don't
+ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
+ * they have the divider at 1371d0, though the source selection at 137160
+ * still exists. You must use the divider at 137250 for these instead.
+ *
+ * Memory clock:
+ *
+ * TBD, read_mem() above is likely very wrong...
+ *
+ */
+
+static int
+calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
+{
+ u32 src0, div0, div1D, div1P = 0;
+ u32 clk0, clk1 = 0;
+
+ /* invalid clock domain */
+ if (!freq)
+ return 0;
+
+ /* first possible path, using only dividers */
+ clk0 = calc_src(dev, clk, freq, &src0, &div0);
+ clk0 = calc_div(dev, clk, clk0, freq, &div1D);
+
+ /* see if we can get any closer using PLLs */
+ if (clk0 != freq) {
+ if (clk < 7)
+ clk1 = calc_pll(dev, clk, freq, &info->coef);
+ else
+ clk1 = read_pll(dev, 0x1370e0);
+ clk1 = calc_div(dev, clk, clk1, freq, &div1P);
+ }
+
+ /* select the method which gets closest to target freq */
+ if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+ info->dsrc = src0;
+ if (div0) {
+ info->ddiv |= 0x80000000;
+ info->ddiv |= div0 << 8;
+ info->ddiv |= div0;
+ }
+ if (div1D) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1D;
+ }
+ info->ssel = 0;
+ info->freq = clk0;
+ } else {
+ if (div1P) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1P << 8;
+ }
+ info->ssel = (1 << clk);
+ info->freq = clk1;
+ }
+
+ return 0;
+}
+
+void *
+nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_pm_state *info;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ /* NFI why this is still in the performance table, the ROPCs appear
+ * to get their clock from clock 2 ("hub07", actually hub05 on this
+ * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
+ * are always the same freq with the binary driver even when the
+ * performance table says they should differ.
+ */
+ if (dev_priv->chipset == 0xd9)
+ perflvl->rop = 0;
+
+ if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
+ (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
+ (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
+ (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
+ (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
+ (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
+ (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
+ (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
+ kfree(info);
+ return ERR_PTR(ret);
+ }
+
+ return info;
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
+{
+ /* program dividers at 137160/1371d0 first */
+ if (clk < 7 && !info->ssel) {
+ nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
+ }
+
+ /* switch clock to non-pll mode */
+ nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
+ nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
+
+ /* reprogram pll */
+ if (clk < 7) {
+ /* make sure it's disabled first... */
+ u32 base = 0x137000 + (clk * 0x20);
+ u32 ctrl = nv_rd32(dev, base + 0x00);
+ if (ctrl & 0x00000001) {
+ nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
+ nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
+ }
+ /* program it to new values, if necessary */
+ if (info->ssel) {
+ nv_wr32(dev, base + 0x04, info->coef);
+ nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
+ nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
+ nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
+ }
+ }
+
+ /* select pll/non-pll mode, and program final clock divider */
+ nv_mask(dev, 0x137100, (1 << clk), info->ssel);
+ nv_wait(dev, 0x137100, (1 << clk), info->ssel);
+ nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+int
+nvc0_pm_clocks_set(struct drm_device *dev, void *data)
+{
+ struct nvc0_pm_state *info = data;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ if (!info->eng[i].freq)
+ continue;
+ prog_clk(dev, i, &info->eng[i]);
+ }
+
+ kfree(info);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d77..d2ba2f07400b 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -35,12 +35,34 @@
#include "nouveau_fb.h"
#include "nv50_display.h"
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+struct evo {
+ int idx;
+ dma_addr_t handle;
+ u32 *ptr;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
struct nvd0_display {
struct nouveau_gpuobj *mem;
- struct {
- dma_addr_t handle;
- u32 *ptr;
- } evo[1];
+ struct nouveau_bo *sync;
+ struct evo evo[9];
struct tasklet_struct tasklet;
u32 modeset;
@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
return dev_priv->engine.display.priv;
}
+static struct drm_crtc *
+nvd0_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
static inline int
evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
{
@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
put = 0;
}
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+ NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
+
return disp->evo[id].ptr + put;
}
@@ -91,40 +125,264 @@ static void
evo_kick(u32 *push, struct drm_device *dev, int id)
{
struct nvd0_display *disp = nvd0_display(dev);
+
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
+ u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
+ u32 *cur = disp->evo[id].ptr + curp;
+
+ while (cur < push)
+ NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
+ NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
+ }
+
nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
#define evo_data(p,d) *((p)++) = (d)
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
+static int
+evo_init_dma(struct drm_device *dev, int ch)
{
- return nouveau_encoder(encoder)->crtc;
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 flags;
+
+ flags = 0x00000000;
+ if (ch == EVO_MASTER)
+ flags |= 0x01000000;
+
+ nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
+ nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
+ nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
+ nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
+ if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+ nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+ nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+ return 0;
+}
+
+static void
+evo_fini_dma(struct drm_device *dev, int ch)
+{
+ if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
+ return;
+
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
+ nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
+ nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+ nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static inline void
+evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
+{
+ nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
+}
+
+static int
+evo_init_pio(struct drm_device *dev, int ch)
+{
+ nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
+ if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
+ NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+ nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+ nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+ return 0;
+}
+
+static void
+evo_fini_pio(struct drm_device *dev, int ch)
+{
+ if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
+ return;
+
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
+ nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
+ nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+ nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+}
+
+static int
+evo_sync(struct drm_device *dev, int ch)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 *push = evo_wait(dev, ch, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, ch);
+ if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
+struct nouveau_bo *
+nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+ return nvd0_display(dev)->sync;
+}
+
+void
+nvd0_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nvd0_display *disp = nvd0_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+ u32 *push;
+
+ push = evo_wait(crtc->dev, evo->idx, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, evo->idx);
+ }
+}
+
+int
+nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan, u32 swap_interval)
+{
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nvd0_display *disp = nvd0_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+ u64 offset;
+ u32 *push;
+ int ret;
+
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(crtc->dev, evo->idx, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ ret = RING_SPACE(chan, 10);
+ if (ret)
+ return ret;
+
+ offset = chan->dispc_vma[nv_crtc->index].offset;
+ offset += evo->sem.offset;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | evo->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
+ 0xf00d0000 | evo->sem.value);
+ evo_sync(crtc->dev, EVO_MASTER);
+ }
+
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, evo->sem.offset);
+ evo_data(push, 0xf00d0000 | evo->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, evo->idx);
+
+ evo->sem.offset ^= 0x10;
+ evo->sem.value++;
+ return 0;
}
/******************************************************************************
* CRTC
*****************************************************************************/
static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- u32 *push, mode;
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
- mode = 0x00000000;
- if (on) {
- /* 0x11: 6bpc dynamic 2x2
- * 0x13: 8bpc dynamic 2x2
- * 0x19: 6bpc static 2x2
- * 0x1b: 8bpc static 2x2
- * 0x21: 6bpc temporal
- * 0x23: 8bpc temporal
- */
- mode = 0x00000011;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
+
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
}
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
evo_data(push, mode);
@@ -132,63 +390,98 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
return 0;
}
static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
+nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_display_mode *mode = &nv_crtc->base.mode;
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_crtc *crtc = &nv_crtc->base;
struct nouveau_connector *nv_connector;
- u32 *push, outX, outY;
-
- outX = mode->hdisplay;
- outY = mode->vdisplay;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- struct drm_display_mode *native = nv_connector->native_mode;
- u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
- u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
-
- switch (type) {
- case DRM_MODE_SCALE_ASPECT:
- if (xratio > yratio) {
- outX = (mode->hdisplay * yratio) >> 19;
- outY = (mode->vdisplay * yratio) >> 19;
- } else {
- outX = (mode->hdisplay * xratio) >> 19;
- outY = (mode->vdisplay * xratio) >> 19;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- outX = native->hdisplay;
- outY = native->vdisplay;
- break;
- default:
- break;
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
+
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
+ break;
+ default:
+ break;
}
- push = evo_wait(dev, 0, 16);
+ push = evo_wait(dev, EVO_MASTER, 8);
if (push) {
evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (outY << 16) | outX);
- evo_data(push, (outY << 16) | outX);
- evo_data(push, (outY << 16) | outX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
+ evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
+ evo_kick(push, dev, EVO_MASTER);
if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+ nvd0_display_flip_stop(crtc);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
- evo_kick(push, dev, 0);
}
return 0;
@@ -201,7 +494,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
u32 *push;
- push = evo_wait(fb->dev, 0, 16);
+ push = evo_wait(fb->dev, EVO_MASTER, 16);
if (push) {
evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
evo_data(push, nvfb->nvbo->bo.offset >> 8);
@@ -216,7 +509,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, fb->dev, 0);
+ evo_kick(push, fb->dev, EVO_MASTER);
}
nv_crtc->fb.tile_flags = nvfb->r_dma;
@@ -227,7 +520,7 @@ static void
nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, 0, 16);
+ u32 *push = evo_wait(dev, EVO_MASTER, 16);
if (push) {
if (show) {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
@@ -247,7 +540,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
evo_data(push, 0x00000000);
}
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
}
@@ -262,7 +555,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 *push;
- push = evo_wait(crtc->dev, 0, 2);
+ nvd0_display_flip_stop(crtc);
+
+ push = evo_wait(crtc->dev, EVO_MASTER, 2);
if (push) {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
@@ -270,7 +565,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
evo_data(push, 0x03000000);
evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, 0);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
nvd0_crtc_cursor_show(nv_crtc, false, false);
@@ -282,7 +577,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 *push;
- push = evo_wait(crtc->dev, 0, 32);
+ push = evo_wait(crtc->dev, EVO_MASTER, 32);
if (push) {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
evo_data(push, nv_crtc->fb.tile_flags);
@@ -295,10 +590,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
evo_data(push, NvEvoVRAM);
evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, 0);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
+ nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
static bool
@@ -333,21 +629,35 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector;
- u32 htotal = mode->htotal;
- u32 vtotal = mode->vtotal;
- u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
- u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
- u32 hfrntp = mode->hsync_start - mode->hdisplay;
- u32 vfrntp = mode->vsync_start - mode->vdisplay;
- u32 hbackp = mode->htotal - mode->hsync_end;
- u32 vbackp = mode->vtotal - mode->vsync_end;
- u32 hss2be = hsyncw + hbackp;
- u32 vss2be = vsyncw + vbackp;
- u32 hss2de = htotal - hfrntp;
- u32 vss2de = vtotal - vfrntp;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 magic = 0x31ec6000;
u32 syncs, *push;
int ret;
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ magic |= 0x00000001;
+ }
+
syncs = 0x00000001;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
syncs |= 0x00000008;
@@ -358,28 +668,33 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
if (ret)
return ret;
- push = evo_wait(crtc->dev, 0, 64);
+ push = evo_wait(crtc->dev, EVO_MASTER, 64);
if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
evo_data(push, 0x00000000);
- evo_data(push, (vtotal << 16) | htotal);
- evo_data(push, (vsyncw << 16) | hsyncw);
- evo_data(push, (vss2be << 16) | hss2be);
- evo_data(push, (vss2de << 16) | hss2de);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000); /* ??? */
evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
evo_data(push, mode->clock * 1000);
evo_data(push, 0x00200000); /* ??? */
evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
evo_data(push, syncs);
- evo_kick(push, crtc->dev, 0);
+ evo_data(push, magic);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
- nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nvd0_crtc_set_dither(nv_crtc, false);
+ nvd0_crtc_set_scale(nv_crtc, false);
nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
return 0;
}
@@ -400,7 +715,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
+ nvd0_display_flip_stop(crtc);
nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
return 0;
}
@@ -410,6 +727,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
enum mode_set_atomic state)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvd0_display_flip_stop(crtc);
nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
return 0;
}
@@ -472,10 +790,10 @@ static int
nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- const u32 data = (y << 16) | x;
+ int ch = EVO_CURS(nv_crtc->index);
- nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
- nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+ evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+ evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
return 0;
}
@@ -525,6 +843,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
.gamma_set = nvd0_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = nvd0_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
};
static void
@@ -659,12 +978,12 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, 0, 4);
+ push = evo_wait(encoder->dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
evo_data(push, 1 << nv_crtc->index);
evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, 0);
+ evo_kick(push, encoder->dev, EVO_MASTER);
}
nv_encoder->crtc = encoder->crtc;
@@ -680,13 +999,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
if (nv_encoder->crtc) {
nvd0_crtc_prepare(nv_encoder->crtc);
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
nv_encoder->crtc = NULL;
@@ -760,6 +1079,108 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
}
/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ int i, or = nv_encoder->or * 0x30;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ if (nv_connector->base.eld[0]) {
+ u8 *eld = nv_connector->base.eld;
+
+ for (i = 0; i < eld[2] * 4; i++)
+ nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
+ for (i = eld[2] * 4; i < 0x60; i++)
+ nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
+ }
+}
+
+static void
+nvd0_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ int or = nv_encoder->or * 0x30;
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ int head = nv_crtc->index * 0x800;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
+ return;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ /* AVI InfoFrame */
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x61671c + head, 0x000d0282);
+ nv_wr32(dev, 0x616720 + head, 0x0000006f);
+ nv_wr32(dev, 0x616724 + head, 0x00000000);
+ nv_wr32(dev, 0x616728 + head, 0x00000000);
+ nv_wr32(dev, 0x61672c + head, 0x00000000);
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x6167ac + head, 0x00000010);
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
+ max_ac_packet << 16);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
+
+ nvd0_audio_mode_set(encoder, mode);
+}
+
+static void
+nvd0_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct drm_device *dev = encoder->dev;
+ int head = nv_crtc->index * 0x800;
+
+ nvd0_audio_disconnect(encoder);
+
+ nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+}
+
+/******************************************************************************
* SOR
*****************************************************************************/
static void
@@ -780,7 +1201,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
continue;
if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->or) {
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
return;
break;
@@ -829,7 +1250,8 @@ static void
nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct drm_display_mode *mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
@@ -852,6 +1274,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
or_config = (mode_ctrl & 0x00000f00) >> 8;
if (mode->clock >= 165000)
or_config |= 0x0100;
+
+ nvd0_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_LVDS:
or_config = (mode_ctrl & 0x00000f00) >> 8;
@@ -861,7 +1285,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
if (bios->fp.if_is_24bit)
or_config |= 0x0200;
} else {
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
or_config |= 0x0100;
} else
@@ -889,12 +1313,12 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
evo_data(push, mode_ctrl);
evo_data(push, or_config);
- evo_kick(push, encoder->dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
nv_encoder->crtc = encoder->crtc;
@@ -910,15 +1334,17 @@ nvd0_sor_disconnect(struct drm_encoder *encoder)
if (nv_encoder->crtc) {
nvd0_crtc_prepare(nv_encoder->crtc);
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
+ nvd0_hdmi_disconnect(encoder);
+
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -1159,6 +1585,12 @@ nvd0_display_intr(struct drm_device *dev)
struct nvd0_display *disp = nvd0_display(dev);
u32 intr = nv_rd32(dev, 0x610088);
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x61008c);
+ nv_wr32(dev, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
if (intr & 0x00000002) {
u32 stat = nv_rd32(dev, 0x61009c);
int chid = ffs(stat) - 1;
@@ -1215,38 +1647,29 @@ nvd0_display_intr(struct drm_device *dev)
/******************************************************************************
* Init
*****************************************************************************/
-static void
+void
nvd0_display_fini(struct drm_device *dev)
{
int i;
- /* fini cursors */
- for (i = 14; i >= 13; i--) {
- if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
- continue;
-
- nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
- nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
- nv_mask(dev, 0x610090, 1 << i, 0x00000000);
- nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
+ /* fini cursors + overlays + flips */
+ for (i = 1; i >= 0; i--) {
+ evo_fini_pio(dev, EVO_CURS(i));
+ evo_fini_pio(dev, EVO_OIMM(i));
+ evo_fini_dma(dev, EVO_OVLY(i));
+ evo_fini_dma(dev, EVO_FLIP(i));
}
/* fini master */
- if (nv_rd32(dev, 0x610490) & 0x00000010) {
- nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
- nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
- nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
- nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
- nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
- }
+ evo_fini_dma(dev, EVO_MASTER);
}
int
nvd0_display_init(struct drm_device *dev)
{
struct nvd0_display *disp = nvd0_display(dev);
+ int ret, i;
u32 *push;
- int i;
if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
nv_wr32(dev, 0x6100ac, 0x00000100);
@@ -1271,7 +1694,7 @@ nvd0_display_init(struct drm_device *dev)
nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
}
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
@@ -1285,36 +1708,24 @@ nvd0_display_init(struct drm_device *dev)
nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
/* init master */
- nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
- nv_wr32(dev, 0x610498, 0x00010000);
- nv_wr32(dev, 0x61049c, 0x00000001);
- nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
- nv_wr32(dev, 0x640000, 0x00000000);
- nv_wr32(dev, 0x610490, 0x01000013);
- if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "PDISP: master 0x%08x\n",
- nv_rd32(dev, 0x610490));
- return -EBUSY;
+ ret = evo_init_dma(dev, EVO_MASTER);
+ if (ret)
+ goto error;
+
+ /* init flips + overlays + cursors */
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
+ (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
+ (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
+ (ret = evo_init_pio(dev, EVO_CURS(i))))
+ goto error;
}
- nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
- nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
- /* init cursors */
- for (i = 13; i <= 14; i++) {
- nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
- if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
- NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
- nv_rd32(dev, 0x610490 + (i * 0x10)));
- return -EBUSY;
- }
-
- nv_mask(dev, 0x610090, 1 << i, 1 << i);
- nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
+ push = evo_wait(dev, EVO_MASTER, 32);
+ if (!push) {
+ ret = -EBUSY;
+ goto error;
}
-
- push = evo_wait(dev, 0, 32);
- if (!push)
- return -EBUSY;
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_mthd(push, 0x0084, 1);
@@ -1323,9 +1734,12 @@ nvd0_display_init(struct drm_device *dev)
evo_data(push, 0x80000000);
evo_mthd(push, 0x008c, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
- return 0;
+error:
+ if (ret)
+ nvd0_display_fini(dev);
+ return ret;
}
void
@@ -1334,11 +1748,16 @@ nvd0_display_destroy(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvd0_display *disp = nvd0_display(dev);
struct pci_dev *pdev = dev->pdev;
+ int i;
- nvd0_display_fini(dev);
+ for (i = 0; i < EVO_DMA_NR; i++) {
+ struct evo *evo = &disp->evo[i];
+ pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
+ }
- pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
nouveau_gpuobj_ref(NULL, &disp->mem);
+ nouveau_bo_unmap(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
nouveau_irq_unregister(dev, 26);
dev_priv->engine.display.priv = NULL;
@@ -1410,61 +1829,83 @@ nvd0_display_create(struct drm_device *dev)
tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nvd0_display_intr);
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
+
+ if (ret)
+ goto out;
+
/* hash table and dma objects for the memory areas we care about */
ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
if (ret)
goto out;
- nv_wo32(disp->mem, 0x1000, 0x00000049);
- nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
- nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
- nv_wo32(disp->mem, 0x100c, 0x00000000);
- nv_wo32(disp->mem, 0x1010, 0x00000000);
- nv_wo32(disp->mem, 0x1014, 0x00000000);
- nv_wo32(disp->mem, 0x0000, NvEvoSync);
- nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1020, 0x00000049);
- nv_wo32(disp->mem, 0x1024, 0x00000000);
- nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x102c, 0x00000000);
- nv_wo32(disp->mem, 0x1030, 0x00000000);
- nv_wo32(disp->mem, 0x1034, 0x00000000);
- nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
- nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1040, 0x00000009);
- nv_wo32(disp->mem, 0x1044, 0x00000000);
- nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x104c, 0x00000000);
- nv_wo32(disp->mem, 0x1050, 0x00000000);
- nv_wo32(disp->mem, 0x1054, 0x00000000);
- nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
- nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1060, 0x0fe00009);
- nv_wo32(disp->mem, 0x1064, 0x00000000);
- nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x106c, 0x00000000);
- nv_wo32(disp->mem, 0x1070, 0x00000000);
- nv_wo32(disp->mem, 0x1074, 0x00000000);
- nv_wo32(disp->mem, 0x0018, NvEvoFB32);
- nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
-
- pinstmem->flush(dev);
+ /* create evo dma channels */
+ for (i = 0; i < EVO_DMA_NR; i++) {
+ struct evo *evo = &disp->evo[i];
+ u64 offset = disp->sync->bo.offset;
+ u32 dmao = 0x1000 + (i * 0x100);
+ u32 hash = 0x0000 + (i * 0x040);
+
+ evo->idx = i;
+ evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
+ evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
+ if (!evo->ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
- /* push buffers for evo channels */
- disp->evo[0].ptr =
- pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
- if (!disp->evo[0].ptr) {
- ret = -ENOMEM;
- goto out;
+ nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
+ nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
+ nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
+ nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
+ nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
+ ((dmao + 0x00) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
+ nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
+ nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
+ ((dmao + 0x20) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
+ nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
+ nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
+ ((dmao + 0x40) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
+ nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
+ nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
+ ((dmao + 0x60) << 9));
}
- ret = nvd0_display_init(dev);
- if (ret)
- goto out;
+ pinstmem->flush(dev);
out:
if (ret)
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 4c8796ba6dd8..6a5f4395838f 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
r128_PCI_IDS
};
+static const struct file_operations r128_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = r128_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -60,21 +74,7 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = r128_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
-
+ .fops = &r128_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index cf8b4bc3e73d..2139fe893ec5 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
+ radeon_semaphore.o radeon_sa.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
+CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 14cc88aaf3a7..d1bd239cd9e9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
+ else if (!drm_can_sleep())
+ mdelay(count);
else
msleep(count);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87631fede1f8..0fda830ef806 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -554,7 +554,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- if (connector)
+ if (connector && connector->display_info.bpc)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
@@ -1153,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1322,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 39c04c1b8472..f1f06ca9f1f5 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -409,8 +409,6 @@ int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -434,13 +432,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else if (radeon_connector->use_digital)
+ if (drm_detect_monitor_audio(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -448,13 +443,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ if (drm_detect_monitor_audio(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -465,13 +457,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ else if (drm_detect_monitor_audio(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d603a3335db..636660fca8c2 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -40,6 +40,8 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
@@ -82,6 +84,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +102,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -1306,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
/* set to DX10/11 mode */
- radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
/* FIXME: implement */
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(rdev, ib->length_dw);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
}
@@ -1355,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, evergreen_default_size + 19);
+ r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
- radeon_ring_write(rdev, evergreen_default_state[i]);
+ radeon_ring_write(ring, evergreen_default_state[i]);
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* Clear consts */
- radeon_ring_write(rdev, 0xc0036f00);
- radeon_ring_write(rdev, 0x00000bc4);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(rdev, 0xc0026900);
- radeon_ring_write(rdev, 0x00000316);
- radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(rdev, 0x00000010); /* */
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1437,13 +1448,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1451,8 +1462,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -1470,16 +1481,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ ring->rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
- rdev->cp.ready = true;
- r = radeon_ring_test(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, ring);
if (r) {
- rdev->cp.ready = false;
+ ring->ready = false;
return r;
}
return 0;
@@ -2348,7 +2359,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -2361,19 +2372,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2465,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0,
+ CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cayman_cp_int_cntl_setup(rdev, 1, 0);
+ cayman_cp_int_cntl_setup(rdev, 2, 0);
+ } else
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2510,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -2534,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
- if (rdev->irq.sw_int) {
- DRM_DEBUG("evergreen_irq_set: sw int\n");
- cp_int_cntl |= RB_INT_ENABLE;
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ if (rdev->family >= CHIP_CAYMAN) {
+ /* enable CP interrupts on all rings */
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+ if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+ cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+ }
+ if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+ cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+ }
+ } else {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
}
+
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2598,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
- WREG32(CP_INT_CNTL, cp_int_cntl);
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+ cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+ cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+ } else
+ WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3013,11 +3053,24 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev);
+ if (rdev->family >= CHIP_CAYMAN) {
+ switch (src_data) {
+ case 0:
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 1:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ break;
+ case 2:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ break;
+ }
+ } else
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3047,6 +3100,7 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -3101,6 +3155,12 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3110,7 +3170,9 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
@@ -3120,6 +3182,22 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -3139,31 +3217,30 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("evergreen startup failed on resume\n");
return r;
}
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- return r;
- }
-
return r;
}
int evergreen_suspend(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ ring->ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -3238,8 +3315,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3248,43 +3325,52 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+
+ /* Don't start up if the MC ucode is missing on BTC parts.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
}
}
+
return 0;
}
void evergreen_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 914e5af84163..2379849515c7 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -49,6 +49,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, pitch);
- radeon_ring_write(rdev, slice);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, cb_color_info);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, pitch);
+ radeon_ring_write(ring, slice);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, cb_color_info);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
}
/* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
* to the RB directly. For IBs, the CP programs this as part of the
* surface_sync packet.
*/
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, sync_type);
- radeon_ring_write(rdev, cp_coher_size);
- radeon_ring_write(rdev, mc_addr >> 8);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
}
/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 2);
+ radeon_ring_write(ring, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, 1);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 2);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
SQ_VTCX_SEL_Z(SQ_SEL_Z) |
SQ_VTCX_SEL_W(SQ_SEL_W);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(rdev, 0x580);
- radeon_ring_write(rdev, gpu_addr & 0xffffffff);
- radeon_ring_write(rdev, 48 - 1); /* size */
- radeon_ring_write(rdev, sq_vtx_constant_word2);
- radeon_ring_write(rdev, sq_vtx_constant_word3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0x580);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1); /* size */
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, sq_vtx_constant_word3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word0);
- radeon_ring_write(rdev, sq_tex_resource_word1);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, sq_tex_resource_word4);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word7);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word7);
}
/* emits 12 */
@@ -225,6 +230,7 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
x2 = 2;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, DI_PT_RECTLIST);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
- radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
- radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(rdev, 3);
- radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
}
@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
int dwords;
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
if (rdev->family < CHIP_CAYMAN) {
switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
/* disable dyn gprs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
/* setup LDS */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0x10001000);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0x10001000);
/* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_3);
}
/* CONTEXT_CONTROL */
- radeon_ring_write(rdev, 0xc0012800);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(ring, 0xc0012800);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* SET_SAMPLER */
- radeon_ring_write(rdev, 0xc0036e00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000012);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0036e00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000012);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* set to DX10/11 mode */
- radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(rdev, dwords);
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 38e1bda73d33..8e8cd85e5c00 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -481,7 +520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
}
}
break;
@@ -611,20 +649,15 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_INFO:
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
@@ -633,20 +666,15 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_INFO:
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
@@ -1317,11 +1355,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!p->keep_tiling_flags) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
}
texture = reloc->robj;
/* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
@@ -1475,3 +1572,242 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+ /* context regs are fine */
+ if (reg >= 0x28000)
+ return true;
+
+ /* check config regs */
+ switch (reg) {
+ case GRBM_GFX_INDEX:
+ case VGT_VTX_VECT_EJECT_REG:
+ case VGT_CACHE_INVALIDATION:
+ case VGT_GS_VERTEX_REUSE:
+ case VGT_PRIMITIVE_TYPE:
+ case VGT_INDEX_TYPE:
+ case VGT_NUM_INDICES:
+ case VGT_NUM_INSTANCES:
+ case VGT_COMPUTE_DIM_X:
+ case VGT_COMPUTE_DIM_Y:
+ case VGT_COMPUTE_DIM_Z:
+ case VGT_COMPUTE_START_X:
+ case VGT_COMPUTE_START_Y:
+ case VGT_COMPUTE_START_Z:
+ case VGT_COMPUTE_INDEX:
+ case VGT_COMPUTE_THREAD_GROUP_SIZE:
+ case VGT_HS_OFFCHIP_PARAM:
+ case PA_CL_ENHANCE:
+ case PA_SU_LINE_STIPPLE_VALUE:
+ case PA_SC_LINE_STIPPLE_STATE:
+ case PA_SC_ENHANCE:
+ case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+ case SQ_DYN_GPR_SIMD_LOCK_EN:
+ case SQ_CONFIG:
+ case SQ_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+ case SQ_CONST_MEM_BASE:
+ case SQ_STATIC_THREAD_MGMT_1:
+ case SQ_STATIC_THREAD_MGMT_2:
+ case SQ_STATIC_THREAD_MGMT_3:
+ case SPI_CONFIG_CNTL:
+ case SPI_CONFIG_CNTL_1:
+ case TA_CNTL_AUX:
+ case DB_DEBUG:
+ case DB_DEBUG2:
+ case DB_DEBUG3:
+ case DB_DEBUG4:
+ case DB_WATERMARKS:
+ case TD_PS_BORDER_COLOR_INDEX:
+ case TD_PS_BORDER_COLOR_RED:
+ case TD_PS_BORDER_COLOR_GREEN:
+ case TD_PS_BORDER_COLOR_BLUE:
+ case TD_PS_BORDER_COLOR_ALPHA:
+ case TD_VS_BORDER_COLOR_INDEX:
+ case TD_VS_BORDER_COLOR_RED:
+ case TD_VS_BORDER_COLOR_GREEN:
+ case TD_VS_BORDER_COLOR_BLUE:
+ case TD_VS_BORDER_COLOR_ALPHA:
+ case TD_GS_BORDER_COLOR_INDEX:
+ case TD_GS_BORDER_COLOR_RED:
+ case TD_GS_BORDER_COLOR_GREEN:
+ case TD_GS_BORDER_COLOR_BLUE:
+ case TD_GS_BORDER_COLOR_ALPHA:
+ case TD_HS_BORDER_COLOR_INDEX:
+ case TD_HS_BORDER_COLOR_RED:
+ case TD_HS_BORDER_COLOR_GREEN:
+ case TD_HS_BORDER_COLOR_BLUE:
+ case TD_HS_BORDER_COLOR_ALPHA:
+ case TD_LS_BORDER_COLOR_INDEX:
+ case TD_LS_BORDER_COLOR_RED:
+ case TD_LS_BORDER_COLOR_GREEN:
+ case TD_LS_BORDER_COLOR_BLUE:
+ case TD_LS_BORDER_COLOR_ALPHA:
+ case TD_CS_BORDER_COLOR_INDEX:
+ case TD_CS_BORDER_COLOR_RED:
+ case TD_CS_BORDER_COLOR_GREEN:
+ case TD_CS_BORDER_COLOR_BLUE:
+ case TD_CS_BORDER_COLOR_ALPHA:
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+ case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ u32 idx = pkt->idx + 1;
+ u32 idx_value = ib[idx];
+ u32 start_reg, end_reg, reg, i;
+
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_CLEAR_STATE:
+ case PACKET3_INDEX_BUFFER_SIZE:
+ case PACKET3_DISPATCH_DIRECT:
+ case PACKET3_DISPATCH_INDIRECT:
+ case PACKET3_MODE_CONTROL:
+ case PACKET3_SET_PREDICATION:
+ case PACKET3_COND_EXEC:
+ case PACKET3_PRED_EXEC:
+ case PACKET3_DRAW_INDIRECT:
+ case PACKET3_DRAW_INDEX_INDIRECT:
+ case PACKET3_INDEX_BASE:
+ case PACKET3_DRAW_INDEX_2:
+ case PACKET3_CONTEXT_CONTROL:
+ case PACKET3_DRAW_INDEX_OFFSET:
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_DRAW_INDEX:
+ case PACKET3_DRAW_INDEX_AUTO:
+ case PACKET3_DRAW_INDEX_IMMD:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+ case PACKET3_MPEG_INDEX:
+ case PACKET3_WAIT_REG_MEM:
+ case PACKET3_MEM_WRITE:
+ case PACKET3_SURFACE_SYNC:
+ case PACKET3_EVENT_WRITE:
+ case PACKET3_EVENT_WRITE_EOP:
+ case PACKET3_EVENT_WRITE_EOS:
+ case PACKET3_SET_CONTEXT_REG:
+ case PACKET3_SET_BOOL_CONST:
+ case PACKET3_SET_LOOP_CONST:
+ case PACKET3_SET_RESOURCE:
+ case PACKET3_SET_SAMPLER:
+ case PACKET3_SET_CTL_CONST:
+ case PACKET3_SET_RESOURCE_OFFSET:
+ case PACKET3_SET_CONTEXT_REG_INDIRECT:
+ case PACKET3_SET_RESOURCE_INDIRECT:
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ break;
+ case PACKET3_COND_WRITE:
+ if (idx_value & 0x100) {
+ reg = ib[idx + 5] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (idx_value & 0x2) {
+ reg = ib[idx + 3] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int ret = 0;
+ u32 idx = 0;
+ struct radeon_cs_packet pkt;
+
+ do {
+ pkt.idx = idx;
+ pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.one_reg_wr = 0;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ dev_err(rdev->dev, "Packet0 not allowed!\n");
+ ret = -EINVAL;
+ break;
+ case PACKET_TYPE2:
+ idx += 1;
+ break;
+ case PACKET_TYPE3:
+ pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+ idx += pkt.count + 2;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ break;
+ } while (idx < ib->length_dw);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c3451..4215de95477e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -35,6 +35,14 @@
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE 0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
+
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
@@ -42,6 +50,17 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +80,24 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
@@ -191,4 +228,9 @@
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE 0x7030
+
+#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d9..b502216d42af 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -242,6 +242,7 @@
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_ENHANCE 0x8BF0
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
@@ -319,6 +320,8 @@
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
@@ -337,6 +340,10 @@
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
+#define SQ_STATIC_THREAD_MGMT_1 0x8E20
+#define SQ_STATIC_THREAD_MGMT_2 0x8E24
+#define SQ_STATIC_THREAD_MGMT_3 0x8E28
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
#define SQ_MS_FIFO_SIZES 0x8CF0
@@ -691,6 +698,7 @@
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
@@ -768,6 +776,8 @@
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
+#define VGT_VTX_VECT_EJECT_REG 0x88b0
+
#define SQ_CONST_MEM_BASE 0x8df8
#define SQ_ESGS_RING_BASE 0x8c40
@@ -892,13 +902,36 @@
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
-#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_INDEX_TYPE 0x895C
+
+#define VGT_NUM_INDICES 0x8970
+
+#define VGT_COMPUTE_DIM_X 0x8990
+#define VGT_COMPUTE_DIM_Y 0x8994
+#define VGT_COMPUTE_DIM_Z 0x8998
+#define VGT_COMPUTE_START_X 0x899C
+#define VGT_COMPUTE_START_Y 0x89A0
+#define VGT_COMPUTE_START_Z 0x89A4
+#define VGT_COMPUTE_INDEX 0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
+#define VGT_HS_OFFCHIP_PARAM 0x89B0
+
+#define DB_DEBUG 0x9830
+#define DB_DEBUG2 0x9834
+#define DB_DEBUG3 0x9838
+#define DB_DEBUG4 0x983C
+#define DB_WATERMARKS 0x9854
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +984,29 @@
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1193,11 @@
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
@@ -1158,8 +1218,40 @@
#define SQ_VTX_CONSTANT_WORD6_0 0x30018
#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
+#define TD_PS_BORDER_COLOR_INDEX 0xA400
+#define TD_PS_BORDER_COLOR_RED 0xA404
+#define TD_PS_BORDER_COLOR_GREEN 0xA408
+#define TD_PS_BORDER_COLOR_BLUE 0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA 0xA410
+#define TD_VS_BORDER_COLOR_INDEX 0xA414
+#define TD_VS_BORDER_COLOR_RED 0xA418
+#define TD_VS_BORDER_COLOR_GREEN 0xA41C
+#define TD_VS_BORDER_COLOR_BLUE 0xA420
+#define TD_VS_BORDER_COLOR_ALPHA 0xA424
+#define TD_GS_BORDER_COLOR_INDEX 0xA428
+#define TD_GS_BORDER_COLOR_RED 0xA42C
+#define TD_GS_BORDER_COLOR_GREEN 0xA430
+#define TD_GS_BORDER_COLOR_BLUE 0xA434
+#define TD_GS_BORDER_COLOR_ALPHA 0xA438
+#define TD_HS_BORDER_COLOR_INDEX 0xA43C
+#define TD_HS_BORDER_COLOR_RED 0xA440
+#define TD_HS_BORDER_COLOR_GREEN 0xA444
+#define TD_HS_BORDER_COLOR_BLUE 0xA448
+#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
+#define TD_LS_BORDER_COLOR_INDEX 0xA450
+#define TD_LS_BORDER_COLOR_RED 0xA454
+#define TD_LS_BORDER_COLOR_GREEN 0xA458
+#define TD_LS_BORDER_COLOR_BLUE 0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA 0xA460
+#define TD_CS_BORDER_COLOR_INDEX 0xA464
+#define TD_CS_BORDER_COLOR_RED 0xA468
+#define TD_CS_BORDER_COLOR_GREEN 0xA46C
+#define TD_CS_BORDER_COLOR_BLUE 0xA470
+#define TD_CS_BORDER_COLOR_ALPHA 0xA474
+
/* cayman 3D regs */
-#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 0e5799857465..321137295400 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
- int r;
+ int i, r;
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
- /* disable context1-7 */
+
+ WREG32(0x15D4, 0);
+ WREG32(0x15D8, 0);
+ WREG32(0x15DC, 0);
+
+ /* empty context1-7 */
+ for (i = 1; i < 8; i++) {
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ rdev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-7 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl)
+{
+ u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+ WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
/*
* CP.
*/
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+}
+
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
static int cayman_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cayman_cp_enable(rdev, true);
- r = radeon_ring_lock(rdev, cayman_default_size + 19);
+ r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < cayman_default_size; i++)
- radeon_ring_write(rdev, cayman_default_state[i]);
+ radeon_ring_write(ring, cayman_default_state[i]);
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* Clear consts */
- radeon_ring_write(rdev, 0xc0036f00);
- radeon_ring_write(rdev, 0x00000bc4);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(rdev, 0xc0026900);
- radeon_ring_write(rdev, 0x00000316);
- radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(rdev, 0x00000010); /* */
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
/* XXX init other rings */
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1136,7 +1218,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1145,7 +1227,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1154,8 +1237,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp.wptr = 0;
- WREG32(CP_RB0_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB0_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1172,13 +1255,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
- WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
- rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ ring->rptr = RREG32(CP_RB0_RPTR);
/* ring1 - compute only */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1187,8 +1271,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp1.wptr = 0;
- WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB1_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1197,13 +1281,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
- WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
- rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+ ring->rptr = RREG32(CP_RB1_RPTR);
/* ring2 - compute only */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1212,8 +1297,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp2.wptr = 0;
- WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB2_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1222,28 +1307,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
- WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
- rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+ ring->rptr = RREG32(CP_RB2_RPTR);
/* start the rings */
cayman_cp_start(rdev);
- rdev->cp.ready = true;
- rdev->cp1.ready = true;
- rdev->cp2.ready = true;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
/* this only test cp0 */
- r = radeon_ring_test(rdev);
+ r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
- rdev->cp.ready = false;
- rdev->cp1.ready = false;
- rdev->cp2.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r;
}
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -1256,20 +1341,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
/* XXX deal with CP0,1,2 */
- rdev->cp.rptr = RREG32(CP_RB0_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1289,6 +1374,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1319,6 +1413,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
+
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
@@ -1338,6 +1433,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
static int cayman_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1378,6 +1474,24 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1387,7 +1501,9 @@ static int cayman_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
@@ -1397,6 +1513,21 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
+ r = radeon_vm_manager_start(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1411,32 +1542,26 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
DRM_ERROR("cayman startup failed on resume\n");
return r;
}
-
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
- return r;
- }
-
return r;
-
}
int cayman_suspend(struct radeon_device *rdev)
{
/* FIXME: we should wait for ring to be empty */
+ radeon_ib_pool_suspend(rdev);
+ radeon_vm_manager_suspend(rdev);
+ r600_blit_suspend(rdev);
cayman_cp_enable(rdev, false);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
-
return 0;
}
@@ -1448,6 +1573,7 @@ int cayman_suspend(struct radeon_device *rdev)
*/
int cayman_init(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* This don't do much */
@@ -1500,8 +1626,8 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1510,29 +1636,29 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+ }
+
r = cayman_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- }
- }
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
@@ -1552,11 +1678,13 @@ void cayman_fini(struct radeon_device *rdev)
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -1564,3 +1692,84 @@ void cayman_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+ /* number of VMs */
+ rdev->vm_manager.nvm = 8;
+ /* base offset of vram pages */
+ rdev->vm_manager.vram_base_offset = 0;
+ return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+{
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << id);
+ return 0;
+}
+
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ if (vm->id == -1)
+ return;
+
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags)
+{
+ uint32_t r600_flags = 0;
+
+ r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ r600_flags |= R600_PTE_SYSTEM;
+ r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return r600_flags;
+}
+
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags)
+{
+ void __iomem *ptr = (void *)vm->pt;
+
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= flags;
+ writeq(addr, ptr + (pfn * 8));
+}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 4672869cdb26..f9df2a645e79 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -42,6 +42,9 @@
#define CAYMAN_MAX_TCC_MASK 0xFF
#define DMIF_ADDR_CONFIG 0xBD4
+#define SRBM_GFX_CNTL 0x0E44
+#define RINGID(x) (((x) & 0x3) << 0)
+#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -219,6 +222,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_COHER_CNTL2 0x85E8
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -394,6 +398,12 @@
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
+
+#define CP_INT_CNTL 0xC124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
@@ -411,6 +421,10 @@
#define CP_ME_RAM_DATA 0xC160
#define CP_DEBUG 0xC1FC
+#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
+
/*
* PM4
*/
@@ -445,6 +459,7 @@
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
@@ -494,7 +509,27 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea49901..bfd36ab643a6 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -662,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
@@ -734,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
@@ -806,25 +811,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
- radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
- radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ /* Unused on older asics, since we don't have semaphores or multiple rings */
+ BUG();
}
int r100_copy_blit(struct radeon_device *rdev,
@@ -833,6 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
@@ -850,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
- r = radeon_ring_lock(rdev, ndw);
+ r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
@@ -864,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
/* pages are in Y direction - height
page width in X direction - width */
- radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+ radeon_ring_write(ring,
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_SRC_CLIPPING |
@@ -877,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
- radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
- radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
- radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, num_gpu_pages);
- radeon_ring_write(rdev, num_gpu_pages);
- radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
- }
- radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+ radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return r;
}
@@ -917,19 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
void r100_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
}
@@ -1030,6 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
@@ -1055,7 +1074,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
- r = radeon_ring_init(rdev, ring_size);
+ r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+ 0, 0x7fffff, RADEON_CP_PACKET2);
if (r) {
return r;
}
@@ -1064,7 +1085,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_blksz = 9;
/* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
- rdev->cp.align_mask = 16 - 1;
+ ring->align_mask = 16 - 1;
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64;
/* Force CP_RB_WPTR write if written more than one time before the
@@ -1094,13 +1115,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
- DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
- WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+ WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1116,7 +1137,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1125,12 +1146,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev);
- r = radeon_ring_test(rdev);
+ r = radeon_ring_test(rdev, ring);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
}
- rdev->cp.ready = true;
+ ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -1142,7 +1163,7 @@ void r100_cp_fini(struct radeon_device *rdev)
}
/* Disable ring */
r100_cp_disable(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
@@ -1150,7 +1171,7 @@ void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1160,13 +1181,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-void r100_cp_commit(struct radeon_device *rdev)
-{
- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
- (void)RREG32(RADEON_CP_RB_WPTR);
-}
-
-
/*
* CS functions
*/
@@ -2094,9 +2108,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
}
@@ -2121,20 +2135,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
* false positive when CP is just gived nothing to do.
*
**/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
- if (cp->rptr != lockup->last_cp_rptr) {
+ if (ring->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
@@ -2147,26 +2161,26 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
return false;
}
-bool r100_gpu_is_lockup(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
@@ -2182,8 +2196,7 @@ void r100_bm_disable(struct radeon_device *rdev)
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ pci_clear_master(rdev->pdev);
mdelay(1);
}
@@ -2574,21 +2587,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
- radeon_ring_free_size(rdev);
+ radeon_ring_free_size(rdev, ring);
rdp = RREG32(RADEON_CP_RB_RPTR);
wdp = RREG32(RADEON_CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
- seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
- seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (rdp + j) & ring->ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
}
return 0;
}
@@ -3630,7 +3644,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
}
}
-int r100_ring_test(struct radeon_device *rdev)
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
@@ -3643,15 +3657,15 @@ int r100_ring_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(rdev, PACKET0(scratch, 0));
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET0(scratch, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3672,9 +3686,11 @@ int r100_ring_test(struct radeon_device *rdev)
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
- radeon_ring_write(rdev, ib->gpu_addr);
- radeon_ring_write(rdev, ib->length_dw);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, ib->length_dw);
}
int r100_ib_test(struct radeon_device *rdev)
@@ -3691,7 +3707,7 @@ int r100_ib_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, &ib);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
if (r) {
return r;
}
@@ -3735,34 +3751,16 @@ int r100_ib_test(struct radeon_device *rdev)
void r100_ib_fini(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
radeon_ib_pool_fini(rdev);
}
-int r100_ib_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
- r100_ib_fini(rdev);
- return r;
- }
- r = r100_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- r100_ib_fini(rdev);
- return r;
- }
- return 0;
-}
-
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */
@@ -3900,6 +3898,12 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3909,11 +3913,18 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -3936,11 +3947,14 @@ int r100_resume(struct radeon_device *rdev)
r100_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r100_startup(rdev);
}
int r100_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -4059,7 +4073,14 @@ int r100_init(struct radeon_device *rdev)
return r;
}
r100_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r100_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a1f3ba063c2d..eba4cbfa78f6 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, (1 << 16));
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
- radeon_ring_write(rdev, PACKET0(0x720, 2));
- radeon_ring_write(rdev, src_offset);
- radeon_ring_write(rdev, dst_offset);
- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ radeon_ring_write(ring, PACKET0(0x720, 2));
+ radeon_ring_write(ring, src_offset);
+ radeon_ring_write(ring, dst_offset);
+ radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c93bc64707e1..3fc0d29a5f39 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+ radeon_ring_write(ring, 0);
/* Flush 3D cache */
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
void r300_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned gb_tile_config;
int r;
@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
break;
}
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
- radeon_ring_write(rdev, gb_tile_config);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(ring, gb_tile_config);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(ring,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
- radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(ring,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
- radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
- radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
- radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(ring,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
- radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
}
void r300_errata(struct radeon_device *rdev)
@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
}
int r300_asic_reset(struct radeon_device *rdev)
@@ -701,7 +704,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (p->keep_tiling_flags) {
+ if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
@@ -765,7 +768,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -850,7 +853,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -1396,6 +1399,12 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1405,11 +1414,18 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -1434,11 +1450,14 @@ int r300_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r300_startup(rdev);
}
int r300_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1539,7 +1558,14 @@ int r300_init(struct radeon_device *rdev)
return r;
}
r300_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r300_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 417fab81812f..666e28fe509c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
- radeon_ring_lock(rdev, 8);
- radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
- radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
- radeon_ring_lock(rdev, 8);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev, ring);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -264,11 +274,18 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_cp_errata_init(rdev);
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -297,11 +314,14 @@ int r420_resume(struct radeon_device *rdev)
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r420_startup(rdev);
}
int r420_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -414,7 +434,14 @@ int r420_init(struct radeon_device *rdev)
return r;
}
r420_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index fc437059918f..3bd8f1b1c606 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -573,6 +573,7 @@
#define AVIVO_TMDSA_CNTL 0x7880
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
@@ -633,6 +634,7 @@
#define AVIVO_LVTMA_CNTL 0x7a80
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3081d07f8de5..4ae1615e752f 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
return 0;
@@ -223,6 +235,8 @@ int r520_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r520_startup(rdev);
}
@@ -292,7 +306,14 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9cdda0b3b081..4f08e5e6ee9d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,27 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
int r600_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
if (rdev->family >= CHIP_RV770) {
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
} else {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
+ radeon_ring_write(ring, 0x3);
+ radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
}
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2173,6 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
int r600_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -2184,13 +2186,13 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2198,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -2217,42 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ ring->rptr = RREG32(CP_RB_RPTR);
r600_cp_start(rdev);
- rdev->cp.ready = true;
- r = radeon_ring_test(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, ring);
if (r) {
- rdev->cp.ready = false;
+ ring->ready = false;
return r;
}
return 0;
}
-void r600_cp_commit(struct radeon_device *rdev)
-{
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
- (void)RREG32(CP_RB_WPTR);
-}
-
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
{
u32 rb_bufsz;
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
- rdev->cp.ring_size = ring_size;
- rdev->cp.align_mask = 16 - 1;
+ ring->ring_size = ring_size;
+ ring->align_mask = 16 - 1;
}
void r600_cp_fini(struct radeon_device *rdev)
{
r600_cp_stop(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
@@ -2271,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
}
}
-int r600_ring_test(struct radeon_device *rdev)
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
+ unsigned i, ridx = radeon_ring_index(rdev, ring);
int r;
r = radeon_scratch_get(rdev, &scratch);
@@ -2284,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, 3);
+ r = radeon_ring_lock(rdev, ring, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2301,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test succeeded in %d usecs\n", i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
} else {
- DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ridx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
@@ -2314,49 +2310,63 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
if (rdev->wb.use_event) {
- u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
- (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
- radeon_ring_write(rdev, 0xFFFFFFFF);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
- radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(rdev, addr & 0xffffffff);
- radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
} else {
/* flush read cache over gart */
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
- radeon_ring_write(rdev, 0xFFFFFFFF);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 10); /* poll interval */
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
- radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
- radeon_ring_write(rdev, RB_INT_STAT);
+ radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(ring, RB_INT_STAT);
}
}
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+ unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2409,6 +2419,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
int r600_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -2447,6 +2458,12 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2456,7 +2473,10 @@ int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+
if (r)
return r;
r = r600_cp_load_microcode(rdev);
@@ -2466,6 +2486,17 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
return 0;
}
@@ -2494,18 +2525,13 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- return r;
- }
-
r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio resume failed\n");
@@ -2518,13 +2544,14 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -2595,8 +2622,8 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2605,30 +2632,24 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- } else {
- r = r600_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- }
- }
- }
r = r600_audio_init(rdev);
if (r)
@@ -2643,12 +2664,13 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2662,18 +2684,20 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
/* FIXME: implement */
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(rdev, ib->length_dw);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
}
-int r600_ib_test(struct radeon_device *rdev)
+int r600_ib_test(struct radeon_device *rdev, int ring)
{
struct radeon_ib *ib;
uint32_t scratch;
@@ -2687,7 +2711,7 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, &ib);
+ r = radeon_ib_get(rdev, ring, &ib, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2728,7 +2752,7 @@ int r600_ib_test(struct radeon_device *rdev)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test succeeded in %u usecs\n", i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -3075,7 +3099,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3459,11 +3483,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3496,30 +3520,6 @@ restart_ih:
*/
#if defined(CONFIG_DEBUG_FS)
-static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- unsigned count, i, j;
-
- radeon_ring_free_size(rdev);
- count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
- seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
- seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
- seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
- seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
- seq_printf(m, "%u dwords in ring\n", count);
- i = rdev->cp.rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
- i = (i + 1) & rdev->cp.ptr_mask;
- }
- return 0;
-}
-
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3533,7 +3533,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
- {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 846fae576399..ba66f3093d46 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -36,7 +36,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
+ return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
+ u32 value = 0;
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+ if (ASIC_IS_DCE4(rdev)) {
+ if (enable) {
+ value |= 0x81000000; /* Required to enable audio */
+ value |= 0x0e1000f0; /* fglrx sets that too */
+ }
+ WREG32(EVERGREEN_AUDIO_ENABLE, value);
+ } else {
+ WREG32_P(R600_AUDIO_ENABLE,
+ enable ? 0x81000000 : 0x0, ~0x81000000);
+ }
rdev->audio_enabled = enable;
}
@@ -248,22 +258,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
return;
}
- switch (dig->dig_encoder) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- default:
- dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
+ if (ASIC_IS_DCE4(rdev)) {
+ /* TODO: other PLLs? */
+ WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
+ WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
+ WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
+
+ /* Some magic trigger or src sel? */
+ WREG32_P(0x5ac, 0x01, ~0x77);
+ } else {
+ switch (dig->dig_encoder) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ default:
+ dev_err(rdev->dev,
+ "Unsupported DIG on encoder 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e09d2818f949..d996f4381130 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -50,6 +50,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -63,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(rdev, 2 << 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+ radeon_ring_write(ring, 2 << 0);
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (pitch << 0) | (slice << 10));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, cb_color_info);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
}
/* emits 5dw */
@@ -103,6 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -110,17 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, sync_type);
- radeon_ring_write(rdev, cp_coher_size);
- radeon_ring_write(rdev, mc_addr >> 8);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
}
/* emits 21dw + 1 surface sync = 26dw */
static void
set_shaders(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
@@ -129,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_pgm_resources);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 2);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -167,6 +170,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -175,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(rdev, 0x460);
- radeon_ring_write(rdev, gpu_addr & 0xffffffff);
- radeon_ring_write(rdev, 48 - 1);
- radeon_ring_write(rdev, sq_vtx_constant_word2);
- radeon_ring_write(rdev, 1 << 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0x460);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1);
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, 1 << 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
@@ -203,6 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1)
@@ -225,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word0);
- radeon_ring_write(rdev, sq_tex_resource_word1);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, sq_tex_resource_word4);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
}
/* emits 12 */
@@ -241,43 +246,45 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, DI_PT_RECTLIST);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
- radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
- radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(rdev, 3);
- radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
}
@@ -285,6 +292,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -440,24 +448,24 @@ set_default_state(struct radeon_device *rdev)
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(rdev, dwords);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
/* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
}
static uint32_t i2f(uint32_t input)
@@ -611,16 +619,17 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
{
int r;
- r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
+ &rdev->r600_blit.vb_ib, size);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
- rdev->r600_blit.vb_total = 64*1024;
+ rdev->r600_blit.vb_total = size;
rdev->r600_blit.vb_used = 0;
return 0;
}
@@ -679,15 +688,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
int ring_size;
int num_loops = 0;
int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
- r = r600_vb_ib_get(rdev);
- if (r)
- return r;
-
/* num loops */
while (num_gpu_pages) {
num_gpu_pages -=
@@ -696,10 +702,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
num_loops++;
}
+ /* 48 bytes for vertex per loop */
+ r = r600_vb_ib_get(rdev, (num_loops*48)+256);
+ if (r)
+ return r;
+
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring_size);
+ r = radeon_ring_lock(rdev, ring, ring_size);
if (r)
return r;
@@ -718,7 +729,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
if (fence)
r = radeon_fence_emit(rdev, fence);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c9db4931913f..84c546250955 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
dev_priv->ring.size_l2qw);
#endif
- RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
+ RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cb1acffd2430..38ce5d0427e3 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- if (!p->keep_tiling_flags &&
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -993,7 +993,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (!p->keep_tiling_flags &&
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -1293,7 +1293,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
@@ -1625,7 +1625,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f5ac7e788d81..0b5920671450 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
frame[0xD] = (right_bar >> 8);
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -313,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
if (!offset)
@@ -455,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u16 eg_offsets[] = {
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ };
+
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
}
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
/* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
+ dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
+ return;
+ }
+ radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
+ eg_offsets[dig->dig_encoder];
+ radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
+ + EVERGREEN_HDMI_CONFIG_OFFSET;
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -484,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
if (!radeon_encoder->hdmi_offset) {
@@ -497,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
+ } else if (ASIC_IS_DCE32(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* TODO */
+ } else if (rdev->family >= CHIP_R600) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+ WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+ WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
default:
@@ -518,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740) {
-
+ && rdev->family != CHIP_RS740
+ && !ASIC_IS_DCE4(rdev)) {
/* if irq is available use it */
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
@@ -544,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
offset = radeon_encoder->hdmi_offset;
@@ -563,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* disable polling */
r600_audio_disable_polling(encoder);
- if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
+ } else if (ASIC_IS_DCE32(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+ WREG32_P(AVIVO_TMDSA_CNTL, 0,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+ WREG32_P(AVIVO_LVTMA_CNTL, 0,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bfe1b5d92afe..3ee1fd7ef394 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -831,6 +831,8 @@
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
+# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8227e76b5c70..73e05cb85eca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -107,6 +107,21 @@ extern int radeon_msi;
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
+/* max number of rings */
+#define RADEON_NUM_RINGS 3
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX 0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
+
+/* hardcode those limit for now */
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+
/*
* Errata workarounds.
*/
@@ -192,14 +207,15 @@ extern int sumo_get_temp(struct radeon_device *rdev);
*/
struct radeon_fence_driver {
uint32_t scratch_reg;
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
atomic_t seq;
uint32_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
- rwlock_t lock;
struct list_head created;
- struct list_head emited;
+ struct list_head emitted;
struct list_head signaled;
bool initialized;
};
@@ -210,21 +226,26 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- bool emited;
+ bool emitted;
bool signaled;
+ /* RB, DMA, etc. */
+ int ring;
+ struct radeon_semaphore *semaphore;
};
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
-void radeon_fence_process(struct radeon_device *rdev);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev);
-int radeon_fence_wait_last(struct radeon_device *rdev);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -246,6 +267,21 @@ struct radeon_mman {
bool initialized;
};
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+ /* bo list is protected by bo being reserved */
+ struct list_head bo_list;
+ /* vm list is protected by vm mutex */
+ struct list_head vm_list;
+ /* constant after initialization */
+ struct radeon_vm *vm;
+ struct radeon_bo *bo;
+ uint64_t soffset;
+ uint64_t eoffset;
+ uint32_t flags;
+ bool valid;
+};
+
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
@@ -259,6 +295,10 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ /* list of all virtual address to which this bo
+ * is associated to
+ */
+ struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
@@ -274,6 +314,48 @@ struct radeon_bo_list {
u32 tiling_flags;
};
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+ struct radeon_bo *bo;
+ struct list_head sa_bo;
+ unsigned size;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+ uint32_t domain;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+ struct list_head list;
+ struct radeon_sa_manager *manager;
+ unsigned offset;
+ unsigned size;
+};
+
/*
* GEM objects.
*/
@@ -303,6 +385,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
uint32_t handle);
/*
+ * Semaphores.
+ */
+struct radeon_ring;
+
+#define RADEON_SEMAPHORE_BO_SIZE 256
+
+struct radeon_semaphore_driver {
+ rwlock_t lock;
+ struct list_head bo;
+};
+
+struct radeon_semaphore_bo;
+
+/* everything here is constant */
+struct radeon_semaphore {
+ struct list_head list;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+ struct radeon_semaphore_bo *bo;
+};
+
+struct radeon_semaphore_bo {
+ struct list_head list;
+ struct radeon_ib *ib;
+ struct list_head free;
+ struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
+ unsigned nused;
+};
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev);
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore);
+
+/*
* GART structures, functions & helpers
*/
struct radeon_mc;
@@ -310,6 +432,7 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
struct radeon_gart {
dma_addr_t table_addr;
@@ -320,7 +443,6 @@ struct radeon_gart {
unsigned table_size;
struct page **pages;
dma_addr_t *pages_addr;
- bool *ttm_alloced;
bool ready;
};
@@ -434,7 +556,7 @@ union radeon_irq_stat_regs {
struct radeon_irq {
bool installed;
- bool sw_int;
+ bool sw_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
bool pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue;
@@ -444,7 +566,7 @@ struct radeon_irq {
wait_queue_head_t idle_queue;
bool hdmi[RADEON_MAX_HDMI_BLOCKS];
spinlock_t sw_lock;
- int sw_refcount;
+ int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[RADEON_MAX_CRTCS];
int pflip_refcount[RADEON_MAX_CRTCS];
@@ -452,22 +574,23 @@ struct radeon_irq {
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
- * CP & ring.
+ * CP & rings.
*/
+
struct radeon_ib {
- struct list_head list;
+ struct radeon_sa_bo sa_bo;
unsigned idx;
+ uint32_t length_dw;
uint64_t gpu_addr;
- struct radeon_fence *fence;
uint32_t *ptr;
- uint32_t length_dw;
- bool free;
+ struct radeon_fence *fence;
+ unsigned vm_id;
};
/*
@@ -475,20 +598,22 @@ struct radeon_ib {
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
- struct mutex mutex;
- struct radeon_bo *robj;
- struct list_head bogus_ib;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct mutex mutex;
+ struct radeon_sa_manager sa_manager;
+ struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
+ bool ready;
+ unsigned head_id;
};
-struct radeon_cp {
+struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
+ unsigned rptr_reg;
unsigned wptr;
unsigned wptr_old;
+ unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
@@ -497,6 +622,61 @@ struct radeon_cp {
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
+ u32 ptr_reg_shift;
+ u32 ptr_reg_mask;
+ u32 nop;
+};
+
+/*
+ * VM
+ */
+struct radeon_vm {
+ struct list_head list;
+ struct list_head va;
+ int id;
+ unsigned last_pfn;
+ u64 pt_gpu_addr;
+ u64 *pt;
+ struct radeon_sa_bo sa_bo;
+ struct mutex mutex;
+ /* last fence for cs using this vm */
+ struct radeon_fence *fence;
+};
+
+struct radeon_vm_funcs {
+ int (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+ /* cs mutex must be lock for schedule_ib */
+ int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+ void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
+ void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
+ uint32_t (*page_flags)(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags);
+ void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags);
+};
+
+struct radeon_vm_manager {
+ struct list_head lru_vm;
+ uint32_t use_bitmap;
+ struct radeon_sa_manager sa_manager;
+ uint32_t max_pfn;
+ /* fields constant after init */
+ const struct radeon_vm_funcs *funcs;
+ /* number of VMIDs */
+ unsigned nvm;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+ struct radeon_vm vm;
};
/*
@@ -506,6 +686,7 @@ struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -549,23 +730,29 @@ struct r600_blit {
void r600_blit_suspend(struct radeon_device *rdev);
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib **ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_pool_start(struct radeon_device *rdev);
+int radeon_ib_pool_suspend(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
-extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
-void radeon_ring_free_size(struct radeon_device *rdev);
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_undo(struct radeon_device *rdev);
-int radeon_ring_test(struct radeon_device *rdev);
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
-void radeon_ring_fini(struct radeon_device *rdev);
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
/*
@@ -582,12 +769,12 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- int kpage_idx[2];
- uint32_t *kpage[2];
+ int kpage_idx[2];
+ uint32_t *kpage[2];
uint32_t *kdata;
- void __user *user_ptr;
- int last_copied_page;
- int last_page_index;
+ void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
};
struct radeon_cs_parser {
@@ -605,14 +792,18 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ bool sync_to_ring[RADEON_NUM_RINGS];
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
+ int chunk_flags_idx;
struct radeon_ib *ib;
void *track;
unsigned family;
int parser_error;
- bool keep_tiling_flags;
+ u32 cs_flags;
+ u32 ring;
+ s32 priority;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -869,11 +1060,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
* Testing
*/
void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *cpA,
+ struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
/*
* Debugfs
*/
+struct radeon_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
@@ -889,21 +1089,27 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
void (*cp_fini)(struct radeon_device *rdev);
void (*cp_disable)(struct radeon_device *rdev);
- void (*cp_commit)(struct radeon_device *rdev);
void (*ring_start)(struct radeon_device *rdev);
- int (*ring_test)(struct radeon_device *rdev);
- void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+
+ struct {
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ } ring[RADEON_NUM_RINGS];
+
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
- void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
int (*cs_parse)(struct radeon_cs_parser *p);
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
@@ -1132,6 +1338,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1231,11 +1439,10 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- struct radeon_fence_driver fence_drv;
- struct radeon_cp cp;
- /* cayman compute rings */
- struct radeon_cp cp1;
- struct radeon_cp cp2;
+ rwlock_t fence_lock;
+ struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
+ struct radeon_semaphore_driver semaphore_drv;
+ struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
@@ -1279,6 +1486,13 @@ struct radeon_device {
struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+ /* debugfs */
+ struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+ unsigned debugfs_count;
+ /* virtual memory */
+ struct radeon_vm_manager vm_manager;
+ /* ring used for bo copies */
+ u32 copy_ring;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
-
#if DRM_DEBUG_CODE == 0
-static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
- rdev->cp.ring[rdev->cp.wptr++] = v;
- rdev->cp.wptr &= rdev->cp.ptr_mask;
- rdev->cp.count_dw--;
- rdev->cp.ring_free_dw--;
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
/*
@@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
-#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
-#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
@@ -1503,6 +1717,33 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_manager_start(struct radeon_device *rdev);
+int radeon_vm_manager_suspend(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo);
+int radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ uint64_t offset,
+ uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo);
+
+
+/*
* R600 vram scratch functions
*/
int r600_vram_scratch_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7f..3516a6081dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2e1eae114ef..36a6192ce862 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -138,14 +138,18 @@ static struct radeon_asic r100_asic = {
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
@@ -186,14 +190,18 @@ static struct radeon_asic r200_asic = {
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -233,14 +241,18 @@ static struct radeon_asic r300_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -281,14 +293,18 @@ static struct radeon_asic r300_asic_pcie = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -328,14 +344,18 @@ static struct radeon_asic r420_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -376,14 +396,18 @@ static struct radeon_asic rs400_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -424,14 +448,18 @@ static struct radeon_asic rs600_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -472,14 +500,18 @@ static struct radeon_asic rs690_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -520,14 +552,18 @@ static struct radeon_asic rv515_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -568,14 +604,18 @@ static struct radeon_asic r520_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -611,18 +651,22 @@ static struct radeon_asic r600_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -658,18 +702,22 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -705,18 +753,22 @@ static struct radeon_asic rv770_asic = {
.fini = &rv770_fini,
.suspend = &rv770_suspend,
.resume = &rv770_resume,
- .cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -752,18 +804,22 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -799,18 +855,22 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -846,18 +906,22 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -888,23 +952,50 @@ static struct radeon_asic btc_asic = {
.post_page_flip = &evergreen_post_page_flip,
};
+static const struct radeon_vm_funcs cayman_vm_funcs = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .bind = &cayman_vm_bind,
+ .unbind = &cayman_vm_unbind,
+ .tlb_flush = &cayman_vm_tlb_flush,
+ .page_flags = &cayman_vm_page_flags,
+ .set_page = &cayman_vm_set_page,
+};
+
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -945,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 2;
+ /* set the ring used for bo copies */
+ rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -1050,6 +1144,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
+ rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 59914842a729..6304aef0d9b2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-bool r100_gpu_is_lockup(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_cp_commit(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ring_test(struct radeon_device *rdev);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_cp *cp);
+ struct radeon_ring *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
- struct radeon_cp *cp);
+ struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_init(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -154,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -293,22 +296,25 @@ int r600_resume(struct radeon_device *rdev);
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
-void r600_cp_commit(struct radeon_device *rdev);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-bool r600_gpu_is_lockup(struct radeon_device *rdev);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev);
+int r600_ib_test(struct radeon_device *rdev, int ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ring_test(struct radeon_device *rdev);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -328,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
@@ -397,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -423,12 +429,26 @@ int evergreen_blit_init(struct radeon_device *rdev);
/*
* cayman
*/
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d24baf30efcb..5082d17d14dc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2560,7 +2560,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ if (rdev->pm.default_power_state_index >= 0)
+ rdev->pm.current_vddc =
+ rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ else
+ rdev->pm.current_vddc = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 17e1a9b2d8fb..815f2341ab94 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r)
return r;
@@ -229,21 +229,21 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
break;
case 6:
/* GTT to VRAM, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 7:
/* VRAM to GTT, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 8:
/* VRAM to VRAM, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 29afd71e0840..435a3d970ab8 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < p->nrelocs; j++) {
+ for (j = 0; j < i; j++) {
if (r->handle == p->relocs[j].handle) {
p->relocs_ptr[i] = &p->relocs[j];
duplicate = true;
@@ -84,16 +84,75 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].flags = r->flags;
radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated);
- }
+
+ if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
+ struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
+ if (!radeon_fence_signaled(fence)) {
+ p->sync_to_ring[fence->ring] = true;
+ }
+ }
+ } else
+ p->relocs[i].handle = 0;
}
return radeon_bo_list_validate(&p->validated);
}
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+ p->priority = priority;
+
+ switch (ring) {
+ default:
+ DRM_ERROR("unknown ring id: %d\n", ring);
+ return -EINVAL;
+ case RADEON_CS_RING_GFX:
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ case RADEON_CS_RING_COMPUTE:
+ /* for now */
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ }
+ return 0;
+}
+
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+ int i, r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
+ continue;
+
+ if (!p->ib->fence->semaphore) {
+ r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+ if (r)
+ return r;
+ }
+
+ r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
+ if (r)
+ return r;
+ radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
+ radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+
+ r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
+ if (r)
+ return r;
+ radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
+ radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+ }
+ return 0;
+}
+
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i, flags = 0;
+ unsigned size, i;
+ u32 ring = RADEON_CS_RING_GFX;
+ s32 priority = 0;
if (!cs->num_chunks) {
return 0;
@@ -103,6 +162,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->idx = 0;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
+ p->chunk_flags_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -112,6 +172,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
+ p->cs_flags = 0;
p->nchunks = cs->num_chunks;
p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
if (p->chunks == NULL) {
@@ -140,16 +201,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
- !p->chunks[i].length_dw) {
- return -EINVAL;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags_idx = i;
+ /* zero length flags aren't useful */
+ if (p->chunks[i].length_dw == 0)
+ return -EINVAL;
}
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
- if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
+ if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+ (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
size = p->chunks[i].length_dw * sizeof(uint32_t);
p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -160,29 +224,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- flags = p->chunks[i].kdata[0];
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
}
- } else {
- p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
- return -ENOMEM;
- }
- p->chunks[i].kpage_idx[0] = -1;
- p->chunks[i].kpage_idx[1] = -1;
- p->chunks[i].last_copied_page = -1;
- p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
}
}
- if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
- DRM_ERROR("cs IB too big: %d\n",
- p->chunks[p->chunk_ib_idx].length_dw);
+
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ if (p->chunk_relocs_idx != -1)
+ kfree(p->chunks[p->chunk_relocs_idx].kdata);
+ if (p->chunk_flags_idx != -1)
+ kfree(p->chunks[p->chunk_flags_idx].kdata);
return -EINVAL;
}
- p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
+ if (radeon_cs_get_ring(p, ring, priority)) {
+ if (p->chunk_relocs_idx != -1)
+ kfree(p->chunks[p->chunk_relocs_idx].kdata);
+ if (p->chunk_flags_idx != -1)
+ kfree(p->chunks[p->chunk_flags_idx].kdata);
+ return -EINVAL;
+ }
+
+
+ /* deal with non-vm */
+ if ((p->chunk_ib_idx != -1) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+ (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ return -ENOMEM;
+ }
+ p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+ p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+ p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+ p->chunks[p->chunk_ib_idx].last_page_index =
+ ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+ }
+
return 0;
}
@@ -224,11 +317,139 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_ib_free(parser->rdev, &parser->ib);
}
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if (parser->cs_flags & RADEON_CS_USE_VM)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached).
+ */
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib->length_dw = ib_chunk->length_dw;
+ r = radeon_cs_parse(parser);
+ if (r || parser->parser_error) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_finish_pages(parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ DRM_ERROR("Failed to synchronize rings !\n");
+ }
+ parser->ib->vm_id = 0;
+ r = radeon_ib_schedule(rdev, parser->ib);
+ if (r) {
+ DRM_ERROR("Failed to schedule IB !\n");
+ }
+ return 0;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
+ int r;
+
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ bo = lobj->bo;
+ r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib->length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
+ r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ if (r) {
+ return r;
+ }
+
+ mutex_lock(&vm->mutex);
+ r = radeon_vm_bind(rdev, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_bo_vm_update_pte(parser, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ DRM_ERROR("Failed to synchronize rings !\n");
+ }
+ parser->ib->vm_id = vm->id;
+ /* ib pool is bind at 0 in virtual address space to gpu_addr is the
+ * offset inside the pool bo
+ */
+ parser->ib->gpu_addr = parser->ib->sa_bo.offset;
+ r = radeon_ib_schedule(rdev, parser->ib);
+out:
+ if (!r) {
+ if (vm->fence) {
+ radeon_fence_unref(&vm->fence);
+ }
+ vm->fence = radeon_fence_ref(parser->ib->fence);
+ }
+ mutex_unlock(&fpriv->vm.mutex);
+ return r;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_cs_parser parser;
- struct radeon_cs_chunk *ib_chunk;
int r;
radeon_mutex_lock(&rdev->cs_mutex);
@@ -245,13 +466,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
- r = radeon_ib_get(rdev, &parser.ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
- }
r = radeon_cs_parser_relocs(&parser);
if (r) {
if (r != -ERESTARTSYS)
@@ -260,29 +474,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
- /* Copy the packet into the IB, the parser will read from the
- * input memory (cached) and write to the IB (which can be
- * uncached). */
- ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- r = radeon_cs_parse(&parser);
- if (r || parser.parser_error) {
- DRM_ERROR("Invalid command stream !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
- }
- r = radeon_cs_finish_pages(&parser);
+ r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
- DRM_ERROR("Invalid command stream !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
+ goto out;
}
- r = radeon_ib_schedule(rdev, parser.ib);
+ r = radeon_cs_ib_vm_chunk(rdev, &parser);
if (r) {
- DRM_ERROR("Failed to schedule IB !\n");
+ goto out;
}
+out:
radeon_cs_parser_fini(&parser, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c4d00a171411..0afb13bd8dca 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
if (radeon_no_wb == 1)
rdev->wb.enabled = false;
else {
- /* often unreliable on AGP */
if (rdev->flags & RADEON_IS_AGP) {
+ /* often unreliable on AGP */
+ rdev->wb.enabled = false;
+ } else if (rdev->family < CHIP_R300) {
+ /* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
@@ -718,17 +721,24 @@ int radeon_device_init(struct radeon_device *rdev,
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
- mutex_init(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ mutex_init(&rdev->ring[i].mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_drv.lock);
+ rwlock_init(&rdev->fence_lock);
+ rwlock_init(&rdev->semaphore_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
+ INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ /* initialize vm here */
+ rdev->vm_manager.use_bitmap = 1;
+ rdev->vm_manager.max_pfn = 1 << 20;
+ INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
@@ -765,8 +775,14 @@ int radeon_device_init(struct radeon_device *rdev,
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
+ dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
+ r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+ printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+ }
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -814,15 +830,20 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
}
- if (radeon_testing) {
+ if ((radeon_testing & 1)) {
radeon_test_moves(rdev);
}
+ if ((radeon_testing & 2)) {
+ radeon_test_syncing(rdev);
+ }
if (radeon_benchmarking) {
radeon_benchmark(rdev, radeon_benchmarking);
}
return 0;
}
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
@@ -837,6 +858,7 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
+ radeon_debugfs_remove_files(rdev);
}
@@ -848,7 +870,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
- int r;
+ int i, r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -887,7 +909,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
- radeon_fence_wait_last(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ radeon_fence_wait_last(rdev, i);
radeon_save_bios_scratch_regs(rdev);
@@ -986,36 +1009,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/*
* Debugfs
*/
-struct radeon_debugfs {
- struct drm_info_list *files;
- unsigned num_files;
-};
-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
-static unsigned _radeon_debugfs_count = 0;
-
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
- for (i = 0; i < _radeon_debugfs_count; i++) {
- if (_radeon_debugfs[i].files == files) {
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ if (rdev->debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
- i = _radeon_debugfs_count + 1;
+ i = rdev->debugfs_count + 1;
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
- _radeon_debugfs[_radeon_debugfs_count].files = files;
- _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
- _radeon_debugfs_count = i;
+ rdev->debugfs[rdev->debugfs_count].files = files;
+ rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+ rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->control->debugfs_root,
@@ -1027,6 +1043,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
return 0;
}
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->control);
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->primary);
+ }
+#endif
+}
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
@@ -1035,11 +1067,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
void radeon_debugfs_cleanup(struct drm_minor *minor)
{
- unsigned i;
-
- for (i = 0; i < _radeon_debugfs_count; i++) {
- drm_debugfs_remove_files(_radeon_debugfs[i].files,
- _radeon_debugfs[i].num_files, minor);
- }
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a22d6e6a49a2..d3ffc18774a6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -406,7 +406,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
base -= radeon_crtc->legacy_display_base_addr;
- pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+ pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
@@ -1081,7 +1081,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
void
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
rfb->obj = obj;
@@ -1092,15 +1092,15 @@ radeon_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct radeon_framebuffer *radeon_fb;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handle);
+ "can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 71499fc3daf5..8032f1fedb11 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -54,9 +54,10 @@
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
* 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ * 2.13.0 - virtual memory support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 12
+#define KMS_DRIVER_MINOR 13
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -140,7 +145,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
module_param_named(agpmode, radeon_agpmode, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -206,6 +211,21 @@ static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+static const struct file_operations radeon_driver_old_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -232,21 +252,7 @@ static struct drm_driver driver_old = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &radeon_driver_old_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -304,6 +310,20 @@ radeon_pci_resume(struct pci_dev *pdev)
return radeon_resume_kms(dev);
}
+static const struct file_operations radeon_driver_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = radeon_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -335,24 +355,13 @@ static struct drm_driver kms_driver = {
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
+ .gem_open_object = radeon_gem_object_open,
+ .gem_close_object = radeon_gem_object_close,
.dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = radeon_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_kms_compat_ioctl,
-#endif
- },
-
+ .fops = &radeon_driver_kms_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0b7b486c97e8..cf2bf35b56b8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
/* need to align pitch with crtc limits */
- mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+ fb_tiled) * ((bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitch * height;
+ size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd->bpp) {
+ switch (bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd->pitch);
+ mode_cmd->pitches[0]);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
@@ -187,7 +191,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
@@ -201,8 +205,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
rbo = gem_to_radeon_bo(gobj);
@@ -228,7 +232,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -271,7 +275,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
- DRM_INFO(" pitch is %d\n", fb->pitch);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 76ec0e9ed8ae..64ea3dd9e6ff 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,32 +40,24 @@
#include "radeon.h"
#include "radeon_trace.h"
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
- } else
- WREG32(rdev->fence_drv.scratch_reg, seq);
+ *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+ } else {
+ WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+ }
}
-static u32 radeon_fence_read(struct radeon_device *rdev)
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
- u32 seq;
+ u32 seq = 0;
if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
- } else
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+ } else {
+ seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+ }
return seq;
}
@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (fence->emited) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (fence->emitted) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
- if (!rdev->cp.ready)
+ fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+ if (!rdev->ring[fence->ring].ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
- radeon_fence_write(rdev, fence->seq);
+ radeon_fence_write(rdev, fence->seq, fence->ring);
else
- radeon_fence_ring_emit(rdev, fence);
+ radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emited = true;
- list_move_tail(&fence->list, &rdev->fence_drv.emited);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ fence->emitted = true;
+ list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
{
struct radeon_fence *fence;
struct list_head *i, *n;
@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- seq = radeon_fence_read(rdev);
- if (seq != rdev->fence_drv.last_seq) {
- rdev->fence_drv.last_seq = seq;
- rdev->fence_drv.last_jiffies = jiffies;
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ seq = radeon_fence_read(rdev, ring);
+ if (seq != rdev->fence_drv[ring].last_seq) {
+ rdev->fence_drv[ring].last_seq = seq;
+ rdev->fence_drv[ring].last_jiffies = jiffies;
+ rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
- cjiffies -= rdev->fence_drv.last_jiffies;
- if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
+ cjiffies -= rdev->fence_drv[ring].last_jiffies;
+ if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
/* update the timeout */
- rdev->fence_drv.last_timeout -= cjiffies;
+ rdev->fence_drv[ring].last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
- rdev->fence_drv.last_timeout = 1;
+ rdev->fence_drv[ring].last_timeout = 1;
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
- rdev->fence_drv.last_jiffies = cjiffies;
+ rdev->fence_drv[ring].last_jiffies = cjiffies;
}
return false;
}
n = NULL;
- list_for_each(i, &rdev->fence_drv.emited) {
+ list_for_each(i, &rdev->fence_drv[ring].emitted) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
- list_move_tail(i, &rdev->fence_drv.signaled);
+ list_move_tail(i, &rdev->fence_drv[ring].signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
- } while (i != &rdev->fence_drv.emited);
+ } while (i != &rdev->fence_drv[ring].emitted);
wake = true;
}
return wake;
@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
list_del(&fence->list);
- fence->emited = false;
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ fence->emitted = false;
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+ if (fence->semaphore)
+ radeon_semaphore_free(fence->rdev, fence->semaphore);
kfree(fence);
}
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+int radeon_fence_create(struct radeon_device *rdev,
+ struct radeon_fence **fence,
+ int ring)
{
unsigned long irq_flags;
@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emited = false;
+ (*fence)->emitted = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
+ (*fence)->ring = ring;
+ (*fence)->semaphore = NULL;
INIT_LIST_HEAD(&(*fence)->list);
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (fence->rdev->gpu_lockup)
return true;
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
signaled = true;
}
- if (!fence->emited) {
- WARN(1, "Querying an unemited fence : %p !\n", fence);
+ if (!fence->emitted) {
+ WARN(1, "Querying an unemitted fence : %p !\n", fence);
signaled = true;
}
if (!signaled) {
- radeon_fence_poll_locked(fence->rdev);
+ radeon_fence_poll_locked(fence->rdev, fence->ring);
signaled = fence->signaled;
}
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
return signaled;
}
@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
- timeout = rdev->fence_drv.last_timeout;
+ timeout = rdev->fence_drv[fence->ring].last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv.last_seq;
+ seq = rdev->fence_drv[fence->ring].last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
- radeon_irq_kms_sw_irq_get(rdev);
- r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev);
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
if (unlikely(r < 0)) {
return r;
}
} else {
- radeon_irq_kms_sw_irq_get(rdev);
- r = wait_event_timeout(rdev->fence_drv.queue,
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev);
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
@@ -258,10 +255,11 @@ retry:
timeout = r;
goto retry;
}
- /* don't protect read access to rdev->fence_drv.last_seq
+ /* don't protect read access to rdev->fence_drv[t].last_seq
* if we experiencing a lockup the value doesn't change
*/
- if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ if (seq == rdev->fence_drv[fence->ring].last_seq &&
+ radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
@@ -272,20 +270,20 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
- radeon_fence_write(rdev, fence->seq);
+ radeon_fence_write(rdev, fence->seq, fence->ring);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv.last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv[fence->ring].last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (list_empty(&rdev->fence_drv.emited)) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv.emited.next,
+ fence = list_entry(rdev->fence_drv[ring].emitted.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
-int radeon_fence_wait_last(struct radeon_device *rdev)
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (list_empty(&rdev->fence_drv.emited)) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv.emited.prev,
+ fence = list_entry(rdev->fence_drv[ring].emitted.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
@@ -347,39 +345,95 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
bool wake;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ wake = radeon_fence_poll_locked(rdev, ring);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (wake) {
- wake_up_all(&rdev->fence_drv.queue);
+ wake_up_all(&rdev->fence_drv[ring].queue);
}
}
-int radeon_fence_driver_init(struct radeon_device *rdev)
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+ unsigned long irq_flags;
+ int not_processed = 0;
+
+ read_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (!rdev->fence_drv[ring].initialized)
+ return 0;
+
+ if (!list_empty(&rdev->fence_drv[ring].emitted)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
+ }
+ read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return not_processed;
+}
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
+ uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
- if (r) {
- dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- return r;
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ if (rdev->wb.use_event) {
+ rdev->fence_drv[ring].scratch_reg = 0;
+ index = R600_WB_EVENT_OFFSET + ring * 4;
+ } else {
+ r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+ if (r) {
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return r;
+ }
+ index = RADEON_WB_SCRATCH_OFFSET +
+ rdev->fence_drv[ring].scratch_reg -
+ rdev->scratch.reg_base;
}
- radeon_fence_write(rdev, 0);
- atomic_set(&rdev->fence_drv.seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv.created);
- INIT_LIST_HEAD(&rdev->fence_drv.emited);
- INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- init_waitqueue_head(&rdev->fence_drv.queue);
- rdev->fence_drv.initialized = true;
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+ radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ rdev->fence_drv[ring].initialized = true;
+ DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return 0;
+}
+
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+ rdev->fence_drv[ring].scratch_reg = -1;
+ rdev->fence_drv[ring].cpu_addr = NULL;
+ rdev->fence_drv[ring].gpu_addr = 0;
+ atomic_set(&rdev->fence_drv[ring].seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
+ init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].initialized = false;
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ int ring;
+
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ radeon_fence_driver_init_ring(rdev, ring);
+ }
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -389,14 +443,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
-
- if (!rdev->fence_drv.initialized)
- return;
- wake_up_all(&rdev->fence_drv.queue);
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- rdev->fence_drv.initialized = false;
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_wait_last(rdev, ring);
+ wake_up_all(&rdev->fence_drv[ring].queue);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ rdev->fence_drv[ring].initialized = false;
+ }
}
@@ -410,14 +468,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_fence *fence;
-
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev));
- if (!list_empty(&rdev->fence_drv.emited)) {
- fence = list_entry(rdev->fence_drv.emited.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emited fence %p with 0x%08X\n",
- fence, fence->seq);
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!rdev->fence_drv[i].initialized)
+ continue;
+
+ seq_printf(m, "--- ring %d ---\n", i);
+ seq_printf(m, "Last signaled fence 0x%08X\n",
+ radeon_fence_read(rdev, i));
+ if (!list_empty(&rdev->fence_drv[i].emitted)) {
+ fence = list_entry(rdev->fence_drv[i].emitted.prev,
+ struct radeon_fence, list);
+ seq_printf(m, "Last emitted fence %p with 0x%08X\n",
+ fence, fence->seq);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index ba7ab79e12c1..010dad8b66ae 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- if (!rdev->gart.ttm_alloced[p])
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we reverted the patch using dma_addr in TTM for now but this
- * code stops building on alpha so just comment it out for now */
- if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
- rdev->gart.ttm_alloced[p] = true;
- rdev->gart.pages_addr[p] = dma_addr[i];
- } else {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
- }
- }
+ rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
- rdev->gart.num_cpu_pages, GFP_KERNEL);
- if (rdev->gart.ttm_alloced == NULL) {
- radeon_gart_fini(rdev);
- return -ENOMEM;
- }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -296,10 +271,404 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
- kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
- rdev->gart.ttm_alloced = NULL;
radeon_dummy_page_fini(rdev);
}
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+ int r;
+
+ rdev->vm_manager.enabled = false;
+
+ /* mark first vm as always in use, it's the system one */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ rdev->vm_manager.max_pfn * 8,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
+
+ r = rdev->vm_manager.funcs->init(rdev);
+ if (r == 0)
+ rdev->vm_manager.enabled = true;
+
+ return r;
+}
+
+/* cs mutex must be lock */
+static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+
+ if (vm->id == -1) {
+ return;
+ }
+
+ /* wait for vm use to end */
+ if (vm->fence) {
+ radeon_fence_wait(vm->fence, false);
+ radeon_fence_unref(&vm->fence);
+ }
+
+ /* hw unbind */
+ rdev->vm_manager.funcs->unbind(rdev, vm);
+ rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
+ list_del_init(&vm->list);
+ vm->id = -1;
+ radeon_sa_bo_free(rdev, &vm->sa_bo);
+ vm->pt = NULL;
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+}
+
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+ if (rdev->vm_manager.sa_manager.bo == NULL)
+ return;
+ radeon_vm_manager_suspend(rdev);
+ rdev->vm_manager.funcs->fini(rdev);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
+}
+
+int radeon_vm_manager_start(struct radeon_device *rdev)
+{
+ if (rdev->vm_manager.sa_manager.bo == NULL) {
+ return -EINVAL;
+ }
+ return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+}
+
+int radeon_vm_manager_suspend(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm, *tmp;
+
+ radeon_mutex_lock(&rdev->cs_mutex);
+ /* unbind all active vm */
+ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+ radeon_vm_unbind_locked(rdev, vm);
+ }
+ rdev->vm_manager.funcs->fini(rdev);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+}
+
+/* cs mutex must be lock */
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ mutex_lock(&vm->mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ mutex_unlock(&vm->mutex);
+}
+
+/* cs mutex must be lock & vm mutex must be lock */
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_vm *vm_evict;
+ unsigned i;
+ int id = -1, r;
+
+ if (vm == NULL) {
+ return -EINVAL;
+ }
+
+ if (vm->id != -1) {
+ /* update lru */
+ list_del_init(&vm->list);
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return 0;
+ }
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
+ RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
+ RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (list_empty(&rdev->vm_manager.lru_vm)) {
+ return r;
+ }
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+ radeon_vm_unbind(rdev, vm_evict);
+ goto retry;
+ }
+ vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
+ vm->pt += (vm->sa_bo.offset >> 3);
+ vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
+ vm->pt_gpu_addr += vm->sa_bo.offset;
+ memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
+
+retry_id:
+ /* search for free vm */
+ for (i = 0; i < rdev->vm_manager.nvm; i++) {
+ if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
+ id = i;
+ break;
+ }
+ }
+ /* evict vm if necessary */
+ if (id == -1) {
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+ radeon_vm_unbind(rdev, vm_evict);
+ goto retry_id;
+ }
+
+ /* do hw bind */
+ r = rdev->vm_manager.funcs->bind(rdev, vm, id);
+ if (r) {
+ radeon_sa_bo_free(rdev, &vm->sa_bo);
+ return r;
+ }
+ rdev->vm_manager.use_bitmap |= 1 << id;
+ vm->id = id;
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
+ &rdev->ib_pool.sa_manager.bo->tbo.mem);
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ uint64_t offset,
+ uint32_t flags)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ struct list_head *head;
+ uint64_t size = radeon_bo_size(bo), last_offset = 0;
+ unsigned last_pfn;
+
+ bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (bo_va == NULL) {
+ return -ENOMEM;
+ }
+ bo_va->vm = vm;
+ bo_va->bo = bo;
+ bo_va->soffset = offset;
+ bo_va->eoffset = offset + size;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ INIT_LIST_HEAD(&bo_va->bo_list);
+ INIT_LIST_HEAD(&bo_va->vm_list);
+ /* make sure object fit at this offset */
+ if (bo_va->soffset >= bo_va->eoffset) {
+ kfree(bo_va);
+ return -EINVAL;
+ }
+
+ last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ kfree(bo_va);
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vm->mutex);
+ if (last_pfn > vm->last_pfn) {
+ /* grow va space 32M by 32M */
+ unsigned align = ((32 << 20) >> 12) - 1;
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ vm->last_pfn = (last_pfn + align) & ~align;
+ }
+ head = &vm->va;
+ last_offset = 0;
+ list_for_each_entry(tmp, &vm->va, vm_list) {
+ if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+ /* bo can be added before this one */
+ break;
+ }
+ if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+ bo, (unsigned)bo_va->soffset, tmp->bo,
+ (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ kfree(bo_va);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ last_offset = tmp->eoffset;
+ head = &tmp->vm_list;
+ }
+ list_add(&bo_va->vm_list, head);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+ return 0;
+}
+
+static u64 radeon_vm_get_addr(struct radeon_device *rdev,
+ struct ttm_mem_reg *mem,
+ unsigned pfn)
+{
+ u64 addr = 0;
+
+ switch (mem->mem_type) {
+ case TTM_PL_VRAM:
+ addr = (mem->start << PAGE_SHIFT);
+ addr += pfn * RADEON_GPU_PAGE_SIZE;
+ addr += rdev->vm_manager.vram_base_offset;
+ break;
+ case TTM_PL_TT:
+ /* offset inside page table */
+ addr = mem->start << PAGE_SHIFT;
+ addr += pfn * RADEON_GPU_PAGE_SIZE;
+ addr = addr >> PAGE_SHIFT;
+ /* page table offset */
+ addr = rdev->gart.pages_addr[addr];
+ /* in case cpu page size != gpu page size*/
+ addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
+ break;
+ default:
+ break;
+ }
+ return addr;
+}
+
+/* object have to be reserved & cs mutex took & vm mutex took */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct radeon_bo_va *bo_va;
+ unsigned ngpu_pages, i;
+ uint64_t addr = 0, pfn;
+ uint32_t flags;
+
+ /* nothing to do if vm isn't bound */
+ if (vm->id == -1)
+ return 0;;
+
+ bo_va = radeon_bo_va(bo, vm);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ if (bo_va->valid)
+ return 0;
+
+ ngpu_pages = radeon_bo_ngpu_pages(bo);
+ bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+ bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ if (mem) {
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ bo_va->flags |= RADEON_VM_PAGE_VALID;
+ bo_va->valid = true;
+ }
+ if (mem->mem_type == TTM_PL_TT) {
+ bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ }
+ }
+ pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
+ flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
+ for (i = 0, addr = 0; i < ngpu_pages; i++) {
+ if (mem && bo_va->valid) {
+ addr = radeon_vm_get_addr(rdev, mem, i);
+ }
+ rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
+ }
+ rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+ return 0;
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ bo_va = radeon_bo_va(bo, vm);
+ if (bo_va == NULL)
+ return 0;
+
+ list_del(&bo_va->bo_list);
+ mutex_lock(&vm->mutex);
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&vm->mutex);
+
+ kfree(bo_va);
+ return 0;
+}
+
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ bo_va->valid = false;
+ }
+}
+
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ int r;
+
+ vm->id = -1;
+ vm->fence = NULL;
+ mutex_init(&vm->mutex);
+ INIT_LIST_HEAD(&vm->list);
+ INIT_LIST_HEAD(&vm->va);
+ vm->last_pfn = 0;
+ /* map the ib pool buffer at 0 in virtual address space, set
+ * read only
+ */
+ r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+ return r;
+}
+
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ mutex_lock(&vm->mutex);
+
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+
+ /* remove all bo */
+ r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ if (!r) {
+ bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ list_del_init(&bo_va->bo_list);
+ list_del_init(&bo_va->vm_list);
+ radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ kfree(bo_va);
+ }
+ if (!list_empty(&vm->va)) {
+ dev_err(rdev->dev, "still active bo inside vm\n");
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+ list_del_init(&bo_va->vm_list);
+ r = radeon_bo_reserve(bo_va->bo, false);
+ if (!r) {
+ list_del_init(&bo_va->bo_list);
+ radeon_bo_unreserve(bo_va->bo);
+ kfree(bo_va);
+ }
+ }
+ mutex_unlock(&vm->mutex);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa1ca2dea42f..7337850af2fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -142,6 +142,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
radeon_bo_force_delete(rdev);
}
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = rbo->rdev;
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va, *tmp;
+
+ if (rdev->family < CHIP_CAYMAN) {
+ return;
+ }
+
+ if (radeon_bo_reserve(rbo, false)) {
+ return;
+ }
+ list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ /* remove from this vm address space */
+ mutex_lock(&vm->mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&vm->mutex);
+ list_del(&bo_va->bo_list);
+ kfree(bo_va);
+ }
+ }
+ radeon_bo_unreserve(rbo);
+}
+
/*
* GEM ioctls.
@@ -152,6 +190,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
+ unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
@@ -160,8 +199,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
- RADEON_IB_POOL_SIZE*64*1024;
+ args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+ for(i = 0; i < RADEON_NUM_RINGS; ++i)
+ args->gart_size -= rdev->ring[i].ring_size;
return 0;
}
@@ -352,6 +392,109 @@ out:
return r;
}
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_va *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv = filp->driver_priv;
+ struct radeon_bo *rbo;
+ struct radeon_bo_va *bo_va;
+ u32 invalid_flags;
+ int r = 0;
+
+ if (!rdev->vm_manager.enabled) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOTTY;
+ }
+
+ /* !! DONT REMOVE !!
+ * We don't support vm_id yet, to be sure we don't have have broken
+ * userspace, reject anyone trying to use non 0 value thus moving
+ * forward we can use those fields without breaking existant userspace
+ */
+ if (args->vm_id) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ if (args->offset < RADEON_VA_RESERVED_SIZE) {
+ dev_err(&dev->pdev->dev,
+ "offset 0x%lX is in reserved area 0x%X\n",
+ (unsigned long)args->offset,
+ RADEON_VA_RESERVED_SIZE);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ /* don't remove, we need to enforce userspace to set the snooped flag
+ * otherwise we will endup with broken userspace and we won't be able
+ * to enable this feature without adding new interface
+ */
+ invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+ if ((args->flags & invalid_flags)) {
+ dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ args->flags, invalid_flags);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+ if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+ dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ case RADEON_VA_UNMAP:
+ break;
+ default:
+ dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ args->operation);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOENT;
+ }
+ rbo = gem_to_radeon_bo(gobj);
+ r = radeon_bo_reserve(rbo, false);
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+ }
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ bo_va = radeon_bo_va(rbo, &fpriv->vm);
+ if (bo_va) {
+ args->operation = RADEON_VA_RESULT_VA_EXIST;
+ args->offset = bo_va->soffset;
+ goto out;
+ }
+ r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
+ args->offset, args->flags);
+ break;
+ case RADEON_VA_UNMAP:
+ r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+ break;
+ default:
+ break;
+ }
+ args->operation = RADEON_VA_RESULT_OK;
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ }
+out:
+ radeon_bo_unreserve(rbo);
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 8f86aeb26693..be38921bf761 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
unsigned i;
/* Disable *all* interrupts */
- rdev->irq.sw_int = false;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned i;
dev->max_vblank_count = 0x001fffff;
- rdev->irq.sw_int = true;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = true;
radeon_irq_set(rdev);
return 0;
}
@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
return;
}
/* Disable *all* interrupts */
- rdev->irq.sw_int = false;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work);
}
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
- rdev->irq.sw_int = true;
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
+ rdev->irq.sw_int[ring] = true;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
- rdev->irq.sw_int = false;
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
+ rdev->irq.sw_int[ring] = false;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index be2c1224e68a..d3352889a870 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -250,6 +250,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_VA_START:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ value = RADEON_VA_RESERVED_SIZE;
+ break;
+ case RADEON_INFO_IB_VM_MAX_SIZE:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ value = RADEON_IB_VM_MAX_SIZE;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -270,7 +282,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
return 0;
}
-
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
@@ -278,12 +289,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ file_priv->driver_priv = NULL;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN) {
+ struct radeon_fpriv *fpriv;
+ int r;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv)) {
+ return -ENOMEM;
+ }
+
+ r = radeon_vm_init(rdev, &fpriv->vm);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
+
+ file_priv->driver_priv = fpriv;
+ }
return 0;
}
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ file_priv->driver_priv = NULL;
+ }
}
void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -451,5 +495,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index daadf2111040..25a19c483075 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -437,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
crtc_offset_cntl = 0;
- pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 2c2e75ef8a37..08ff857c8fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -643,7 +643,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green
u16 *blue, int regno);
void radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1c851521f458..d45df1763598 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+ /* remove from all vm address space */
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&bo_va->vm->mutex);
+ list_del(&bo_va->bo_list);
+ kfree(bo_va);
+ }
+}
+
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ radeon_bo_clear_va(bo);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
unsigned long max_size = 0;
+ size_t acc_size;
int r;
size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
return -ENOMEM;
}
+ acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+ sizeof(struct radeon_bo));
+
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
@@ -130,12 +149,13 @@ retry:
bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL, size,
- &radeon_ttm_bo_destroy);
+ &bo->placement, page_align, 0, !kernel, NULL,
+ acc_size, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
@@ -483,6 +503,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
return;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
+ radeon_vm_bo_invalidate(rbo->rdev, rbo);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -556,3 +577,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
}
return 0;
}
+
+/* object have to be reserved */
+struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &rbo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index b07f0f9b8627..cde430308870 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
return !!atomic_read(&bo->tbo.reserved);
}
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+ return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+ return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
@@ -128,4 +138,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
+ struct radeon_vm *vm);
+
+/*
+ * sub allocation
+ */
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+ struct radeon_sa_bo *sa_bo);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 78a665bd9519..095148e29a1f 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- mutex_lock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->ring[i].ring_obj)
+ mutex_lock(&rdev->ring[i].mutex);
+ }
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
} else {
- if (rdev->cp.ready) {
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ if (ring->ready) {
struct radeon_fence *fence;
- radeon_ring_alloc(rdev, 64);
- radeon_fence_create(rdev, &fence);
+ radeon_ring_alloc(rdev, ring, 64);
+ radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev);
+ radeon_ring_commit(rdev, ring);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
}
@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- mutex_unlock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->ring[i].ring_obj)
+ mutex_unlock(&rdev->ring[i].mutex);
+ }
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
@@ -795,19 +802,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
- unsigned long irq_flags;
int not_processed = 0;
+ int i;
- read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (!list_empty(&rdev->fence_drv.emited)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv.emited) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
}
- read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 49d58202202c..e8bc70933d1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,7 @@
#include "atom.h"
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+int radeon_debugfs_ring_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
@@ -60,105 +61,106 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
- if (rdev->cp.count_dw <= 0) {
+ if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
}
#endif
- rdev->cp.ring[rdev->cp.wptr++] = v;
- rdev->cp.wptr &= rdev->cp.ptr_mask;
- rdev->cp.count_dw--;
- rdev->cp.ring_free_dw--;
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
}
-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
-{
- struct radeon_ib *ib, *n;
-
- list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
- list_del(&ib->list);
- vfree(ib->ptr);
- kfree(ib);
- }
-}
-
-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+/*
+ * IB.
+ */
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *bib;
-
- bib = kmalloc(sizeof(*bib), GFP_KERNEL);
- if (bib == NULL)
- return;
- bib->ptr = vmalloc(ib->length_dw * 4);
- if (bib->ptr == NULL) {
- kfree(bib);
- return;
+ bool done = false;
+
+ /* only free ib which have been emited */
+ if (ib->fence && ib->fence->emitted) {
+ if (radeon_fence_signaled(ib->fence)) {
+ radeon_fence_unref(&ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo);
+ done = true;
+ }
}
- memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
- bib->length_dw = ib->length_dw;
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
- mutex_unlock(&rdev->ib_pool.mutex);
+ return done;
}
-/*
- * IB.
- */
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib **ib, unsigned size)
{
struct radeon_fence *fence;
- struct radeon_ib *nib;
- int r = 0, i, c;
+ unsigned cretry = 0;
+ int r = 0, i, idx;
*ib = NULL;
- r = radeon_fence_create(rdev, &fence);
+ /* align size on 256 bytes */
+ size = ALIGN(size, 256);
+
+ r = radeon_fence_create(rdev, &fence, ring);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
+
mutex_lock(&rdev->ib_pool.mutex);
- for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
- i &= (RADEON_IB_POOL_SIZE - 1);
- if (rdev->ib_pool.ibs[i].free) {
- nib = &rdev->ib_pool.ibs[i];
- break;
- }
- }
- if (nib == NULL) {
- /* This should never happen, it means we allocated all
- * IB and haven't scheduled one yet, return EBUSY to
- * userspace hoping that on ioctl recall we get better
- * luck
- */
- dev_err(rdev->dev, "no free indirect buffer !\n");
+ idx = rdev->ib_pool.head_id;
+retry:
+ if (cretry > 5) {
+ dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
- return -EBUSY;
+ return -ENOMEM;
}
- rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- nib->free = false;
- if (nib->fence) {
- mutex_unlock(&rdev->ib_pool.mutex);
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
- nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
- mutex_lock(&rdev->ib_pool.mutex);
- nib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+ cretry++;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
+ if (rdev->ib_pool.ibs[idx].fence == NULL) {
+ r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+ &rdev->ib_pool.ibs[idx].sa_bo,
+ size, 256);
+ if (!r) {
+ *ib = &rdev->ib_pool.ibs[idx];
+ (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
+ (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ (*ib)->gpu_addr += (*ib)->sa_bo.offset;
+ (*ib)->fence = fence;
+ (*ib)->vm_id = 0;
+ /* ib are most likely to be allocated in a ring fashion
+ * thus rdev->ib_pool.head_id should be the id of the
+ * oldest ib
+ */
+ rdev->ib_pool.head_id = (1 + idx);
+ rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
+ }
}
- mutex_lock(&rdev->ib_pool.mutex);
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ }
+ /* this should be rare event, ie all ib scheduled none signaled yet.
+ */
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
+ r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
+ if (!r) {
+ goto retry;
+ }
+ /* an error happened */
+ break;
+ }
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
}
- radeon_fence_unref(&nib->fence);
- nib->fence = fence;
- nib->length_dw = 0;
mutex_unlock(&rdev->ib_pool.mutex);
- *ib = nib;
- return 0;
+ radeon_fence_unref(&fence);
+ return r;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -169,247 +171,255 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- if (!tmp->fence->emited)
- radeon_fence_unref(&tmp->fence);
mutex_lock(&rdev->ib_pool.mutex);
- tmp->free = true;
+ if (tmp->fence && !tmp->fence->emitted) {
+ radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ radeon_fence_unref(&tmp->fence);
+ }
mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
int r = 0;
- if (!ib->length_dw || !rdev->cp.ready) {
+ if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib);
+ radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
radeon_fence_emit(rdev, ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- /* once scheduled IB is considered free and protected by the fence */
- ib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- void *ptr;
- uint64_t gpu_addr;
- int i;
- int r = 0;
+ int i, r;
- if (rdev->ib_pool.robj)
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (rdev->ib_pool.ready) {
+ mutex_unlock(&rdev->ib_pool.mutex);
return 0;
- INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
- /* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
- PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
- &rdev->ib_pool.robj);
- if (r) {
- DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
- return r;
}
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->ib_pool.robj);
- DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
- return r;
- }
- r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GEM_DOMAIN_GTT);
if (r) {
- DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+ mutex_unlock(&rdev->ib_pool.mutex);
return r;
}
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- unsigned offset;
- offset = i * 64 * 1024;
- rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
- rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ rdev->ib_pool.ibs[i].fence = NULL;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- rdev->ib_pool.ibs[i].free = true;
+ INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
}
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
+
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
- return r;
+ if (radeon_debugfs_ring_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- int r;
- struct radeon_bo *robj;
+ unsigned i;
- if (!rdev->ib_pool.ready) {
- return;
- }
mutex_lock(&rdev->ib_pool.mutex);
- radeon_ib_bogus_cleanup(rdev);
- robj = rdev->ib_pool.robj;
- rdev->ib_pool.robj = NULL;
- mutex_unlock(&rdev->ib_pool.mutex);
-
- if (robj) {
- r = radeon_bo_reserve(robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(robj);
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
+ if (rdev->ib_pool.ready) {
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+ radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
}
- radeon_bo_unref(&robj);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
+ rdev->ib_pool.ready = false;
}
+ mutex_unlock(&rdev->ib_pool.mutex);
}
+int radeon_ib_pool_start(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+}
+
+int radeon_ib_pool_suspend(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+}
/*
* Ring.
*/
-void radeon_ring_free_size(struct radeon_device *rdev)
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
- if (rdev->wb.enabled)
- rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
- else {
- if (rdev->family >= CHIP_R600)
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- else
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ /* r1xx-r5xx only has CP ring */
+ if (rdev->family < CHIP_R600)
+ return RADEON_RING_TYPE_GFX_INDEX;
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
+ return CAYMAN_RING_TYPE_CP1_INDEX;
+ else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
+ return CAYMAN_RING_TYPE_CP2_INDEX;
}
+ return RADEON_RING_TYPE_GFX_INDEX;
+}
+
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
/* This works because ring_size is a power of 2 */
- rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
- rdev->cp.ring_free_dw -= rdev->cp.wptr;
- rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
- if (!rdev->cp.ring_free_dw) {
- rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+ ring->ring_free_dw -= ring->wptr;
+ ring->ring_free_dw &= ring->ptr_mask;
+ if (!ring->ring_free_dw) {
+ ring->ring_free_dw = ring->ring_size / 4;
}
}
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- while (ndw > (rdev->cp.ring_free_dw - 1)) {
- radeon_ring_free_size(rdev);
- if (ndw < rdev->cp.ring_free_dw) {
+ ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ while (ndw > (ring->ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev, ring);
+ if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev);
+ r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
- rdev->cp.count_dw = ndw;
- rdev->cp.wptr_old = rdev->cp.wptr;
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
return 0;
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
- mutex_lock(&rdev->cp.mutex);
- r = radeon_ring_alloc(rdev, ndw);
+ mutex_lock(&ring->mutex);
+ r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&ring->mutex);
return r;
}
return 0;
}
-void radeon_ring_commit(struct radeon_device *rdev)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned count_dw_pad;
unsigned i;
/* We pad to match fetch size */
- count_dw_pad = (rdev->cp.align_mask + 1) -
- (rdev->cp.wptr & rdev->cp.align_mask);
+ count_dw_pad = (ring->align_mask + 1) -
+ (ring->wptr & ring->align_mask);
for (i = 0; i < count_dw_pad; i++) {
- radeon_ring_write(rdev, 2 << 30);
+ radeon_ring_write(ring, ring->nop);
}
DRM_MEMORYBARRIER();
- radeon_cp_commit(rdev);
+ WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ (void)RREG32(ring->wptr_reg);
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- radeon_ring_commit(rdev);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_ring_commit(rdev, ring);
+ mutex_unlock(&ring->mutex);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev)
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
- rdev->cp.wptr = rdev->cp.wptr_old;
- mutex_unlock(&rdev->cp.mutex);
+ ring->wptr = ring->wptr_old;
+ mutex_unlock(&ring->mutex);
}
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
- rdev->cp.ring_size = ring_size;
+ ring->ring_size = ring_size;
+ ring->rptr_offs = rptr_offs;
+ ring->rptr_reg = rptr_reg;
+ ring->wptr_reg = wptr_reg;
+ ring->ptr_reg_shift = ptr_reg_shift;
+ ring->ptr_reg_mask = ptr_reg_mask;
+ ring->nop = nop;
/* Allocate ring buffer */
- if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
+ if (ring->ring_obj == NULL) {
+ r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.ring_obj);
+ &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ r = radeon_bo_reserve(ring->ring_obj, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &ring->gpu_addr);
if (r) {
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_unreserve(ring->ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_bo_kmap(rdev->cp.ring_obj,
- (void **)&rdev->cp.ring);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ r = radeon_bo_kmap(ring->ring_obj,
+ (void **)&ring->ring);
+ radeon_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
- rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
- rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ ring->ptr_mask = (ring->ring_size / 4) - 1;
+ ring->ring_free_dw = ring->ring_size / 4;
return 0;
}
-void radeon_ring_fini(struct radeon_device *rdev)
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&rdev->cp.mutex);
- ring_obj = rdev->cp.ring_obj;
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
- mutex_unlock(&rdev->cp.mutex);
+ mutex_lock(&ring->mutex);
+ ring_obj = ring->ring_obj;
+ ring->ring = NULL;
+ ring->ring_obj = NULL;
+ mutex_unlock(&ring->mutex);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -422,72 +432,83 @@ void radeon_ring_fini(struct radeon_device *rdev)
}
}
-
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_ib *ib = node->info_ent->data;
- unsigned i;
-
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ridx = *(int*)node->info_ent->data;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ unsigned count, i, j;
+
+ radeon_ring_free_size(rdev, ring);
+ count = (ring->ring_size / 4) - ring->ring_free_dw;
+ seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
+ seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ i = ring->rptr;
+ for (j = 0; j <= count; j++) {
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
}
return 0;
}
-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+};
+
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_device *rdev = node->info_ent->data;
- struct radeon_ib *ib;
+ struct radeon_ib *ib = node->info_ent->data;
unsigned i;
- mutex_lock(&rdev->ib_pool.mutex);
- if (list_empty(&rdev->ib_pool.bogus_ib)) {
- mutex_unlock(&rdev->ib_pool.mutex);
- seq_printf(m, "no bogus IB recorded\n");
+ if (ib == NULL) {
return 0;
}
- ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
- list_del_init(&ib->list);
- mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "IB %04u\n", ib->idx);
+ seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
- vfree(ib->ptr);
- kfree(ib);
return 0;
}
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
- {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
-};
+int radeon_debugfs_ring_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
+ ARRAY_SIZE(radeon_debugfs_ring_info_list));
+#else
+ return 0;
#endif
+}
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
- int r;
- radeon_debugfs_ib_bogus_info_list[0].data = rdev;
- r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
- if (r)
- return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 000000000000..4cce47e7dc0d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 domain)
+{
+ int r;
+
+ sa_manager->bo = NULL;
+ sa_manager->size = size;
+ sa_manager->domain = domain;
+ INIT_LIST_HEAD(&sa_manager->sa_bo);
+
+ r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+ return r;
+ }
+
+ return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (!list_empty(&sa_manager->sa_bo)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
+ list_del_init(&sa_bo->list);
+ }
+ radeon_bo_unref(&sa_manager->bo);
+ sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ /* map the buffer */
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(sa_manager->bo);
+ dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ radeon_bo_unreserve(sa_manager->bo);
+ return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (!r) {
+ radeon_bo_kunmap(sa_manager->bo);
+ radeon_bo_unpin(sa_manager->bo);
+ radeon_bo_unreserve(sa_manager->bo);
+ }
+ return r;
+}
+
+/*
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size
+ */
+int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ struct radeon_sa_bo *tmp;
+ struct list_head *head;
+ unsigned offset = 0, wasted = 0;
+
+ BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+ BUG_ON(size > sa_manager->size);
+
+ /* no one ? */
+ head = sa_manager->sa_bo.prev;
+ if (list_empty(&sa_manager->sa_bo)) {
+ goto out;
+ }
+
+ /* look for a hole big enough */
+ offset = 0;
+ list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+ /* room before this object ? */
+ if ((tmp->offset - offset) >= size) {
+ head = tmp->list.prev;
+ goto out;
+ }
+ offset = tmp->offset + tmp->size;
+ wasted = offset % align;
+ if (wasted) {
+ wasted = align - wasted;
+ }
+ offset += wasted;
+ }
+ /* room at the end ? */
+ head = sa_manager->sa_bo.prev;
+ tmp = list_entry(head, struct radeon_sa_bo, list);
+ offset = tmp->offset + tmp->size;
+ wasted = offset % align;
+ if (wasted) {
+ wasted = align - wasted;
+ }
+ offset += wasted;
+ if ((sa_manager->size - offset) < size) {
+ /* failed to find somethings big enough */
+ return -ENOMEM;
+ }
+
+out:
+ sa_bo->manager = sa_manager;
+ sa_bo->offset = offset;
+ sa_bo->size = size;
+ list_add(&sa_bo->list, head);
+ return 0;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+{
+ list_del_init(&sa_bo->list);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 000000000000..61dd4e3c9209
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <deathsimple@vodafone.de>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+static int radeon_semaphore_add_bo(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo;
+ unsigned long irq_flags;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+ int r, i;
+
+
+ bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
+ if (bo == NULL) {
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&bo->free);
+ INIT_LIST_HEAD(&bo->list);
+ bo->nused = 0;
+
+ r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
+ kfree(bo);
+ return r;
+ }
+ gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ gpu_addr += bo->ib->sa_bo.offset;
+ cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ cpu_ptr += (bo->ib->sa_bo.offset >> 2);
+ for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
+ bo->semaphores[i].gpu_addr = gpu_addr;
+ bo->semaphores[i].cpu_ptr = cpu_ptr;
+ bo->semaphores[i].bo = bo;
+ list_add_tail(&bo->semaphores[i].list, &bo->free);
+ gpu_addr += 8;
+ cpu_ptr += 2;
+ }
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ return 0;
+}
+
+static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
+ struct radeon_semaphore_bo *bo)
+{
+ radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+ radeon_fence_unref(&bo->ib->fence);
+ list_del(&bo->list);
+ kfree(bo);
+}
+
+void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo, *n;
+
+ if (list_empty(&rdev->semaphore_drv.bo)) {
+ return;
+ }
+ /* only shrink if first bo has free semaphore */
+ bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
+ if (list_empty(&bo->free)) {
+ return;
+ }
+ list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
+ if (bo->nused)
+ continue;
+ radeon_semaphore_del_bo_locked(rdev, bo);
+ }
+}
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore)
+{
+ struct radeon_semaphore_bo *bo;
+ unsigned long irq_flags;
+ bool do_retry = true;
+ int r;
+
+retry:
+ *semaphore = NULL;
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
+ if (list_empty(&bo->free))
+ continue;
+ *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
+ (*semaphore)->cpu_ptr[0] = 0;
+ (*semaphore)->cpu_ptr[1] = 0;
+ list_del(&(*semaphore)->list);
+ bo->nused++;
+ break;
+ }
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+
+ if (*semaphore == NULL) {
+ if (do_retry) {
+ do_retry = false;
+ r = radeon_semaphore_add_bo(rdev);
+ if (r)
+ return r;
+ goto retry;
+ }
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore)
+{
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ semaphore->bo->nused--;
+ list_add_tail(&semaphore->list, &semaphore->bo->free);
+ radeon_semaphore_shrink_locked(rdev);
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo, *n;
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ /* we force to free everything */
+ list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
+ if (!list_empty(&bo->free)) {
+ dev_err(rdev->dev, "still in use semaphore\n");
+ }
+ radeon_semaphore_del_bo_locked(rdev, bo);
+ }
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 602fa3541c45..dc5dcf483aa3 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ n -= rdev->ring[i].ring_size;
if (rdev->wb.wb_obj)
n -= RADEON_GPU_PAGE_SIZE;
if (rdev->ih.ring_obj)
@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
goto out_cleanup;
@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
goto out_cleanup;
@@ -232,3 +234,264 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB)
+{
+ struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int ridxA = radeon_ring_index(rdev, ringA);
+ int ridxB = radeon_ring_index(rdev, ringB);
+ int r;
+
+ r = radeon_fence_create(rdev, &fence1, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 1\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_create(rdev, &fence2, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fence1);
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fence2);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence1)) {
+ DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence1, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence2)) {
+ DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence2, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ if (semaphore)
+ radeon_semaphore_free(rdev, semaphore);
+
+ if (fence1)
+ radeon_fence_unref(&fence1);
+
+ if (fence2)
+ radeon_fence_unref(&fence2);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_ring_sync2(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB,
+ struct radeon_ring *ringC)
+{
+ struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int ridxA = radeon_ring_index(rdev, ringA);
+ int ridxB = radeon_ring_index(rdev, ringB);
+ int ridxC = radeon_ring_index(rdev, ringC);
+ bool sigA, sigB;
+ int i, r;
+
+ r = radeon_fence_create(rdev, &fenceA, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 1\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_create(rdev, &fenceB, ridxB);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fenceA);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
+ radeon_fence_emit(rdev, fenceB);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fenceA)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+ if (radeon_fence_signaled(fenceB)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ for (i = 0; i < 30; ++i) {
+ mdelay(100);
+ sigA = radeon_fence_signaled(fenceA);
+ sigB = radeon_fence_signaled(fenceB);
+ if (sigA || sigB)
+ break;
+ }
+
+ if (!sigA && !sigB) {
+ DRM_ERROR("Neither fence A nor B has been signaled\n");
+ goto out_cleanup;
+ } else if (sigA && sigB) {
+ DRM_ERROR("Both fence A and B has been signaled\n");
+ goto out_cleanup;
+ }
+
+ DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ mdelay(1000);
+
+ r = radeon_fence_wait(fenceA, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence A\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fenceB, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence B\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ if (semaphore)
+ radeon_semaphore_free(rdev, semaphore);
+
+ if (fenceA)
+ radeon_fence_unref(&fenceA);
+
+ if (fenceB)
+ radeon_fence_unref(&fenceB);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+ int i, j, k;
+
+ for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ringA = &rdev->ring[i];
+ if (!ringA->ready)
+ continue;
+
+ for (j = 0; j < i; ++j) {
+ struct radeon_ring *ringB = &rdev->ring[j];
+ if (!ringB->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+ radeon_test_ring_sync(rdev, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+ radeon_test_ring_sync(rdev, ringB, ringA);
+
+ for (k = 0; k < j; ++k) {
+ struct radeon_ring *ringC = &rdev->ring[k];
+ if (!ringC->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+ radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+ radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+ radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+ radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+ radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+ radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0b5468bfaf54..c421e77ace71 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
}
}
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
-
-static struct ttm_backend*
-radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
-{
- struct radeon_device *rdev;
-
- rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
- } else
-#endif
- {
- return radeon_ttm_backend_create(rdev);
- }
-}
-
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->cp.ready == false)
+ if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
- int r;
+ int r, i;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
if (unlikely(r)) {
return r;
}
@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
- if (!rdev->cp.ready) {
- DRM_ERROR("Trying to move memory with CP turned off.\n");
+ if (!rdev->ring[rdev->copy_ring].ready) {
+ DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+ /* sync other rings */
+ if (rdev->family >= CHIP_R600) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (i == rdev->copy_ring || !rdev->ring[i].ready)
+ continue;
+
+ if (!fence->semaphore) {
+ r = radeon_semaphore_create(rdev, &fence->semaphore);
+ /* FIXME: handle semaphore error */
+ if (r)
+ continue;
+ }
+
+ r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
+ /* FIXME: handle ring lock error */
+ if (r)
+ continue;
+ radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+
+ r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
+ /* FIXME: handle ring lock error */
+ if (r)
+ continue;
+ radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
+ }
+ }
+
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence);
@@ -398,7 +410,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
radeon_move_null(bo, new_mem);
return 0;
}
- if (!rdev->cp.ready || rdev->asic->copy == NULL) {
+ if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
/* use memcpy */
goto memcpy;
}
@@ -515,8 +527,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct radeon_device *rdev;
+ u64 offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct radeon_ttm_tt *gtt = (void*)ttm;
+ int r;
+
+ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ if (!ttm->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ ttm->num_pages, bo_mem, ttm);
+ }
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ ttm->num_pages, (unsigned)gtt->offset);
+ return r;
+ }
+ return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+ return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+ .bind = &radeon_ttm_backend_bind,
+ .unbind = &radeon_ttm_backend_unbind,
+ .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt;
+
+ rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+ size, page_flags, dummy_read_page);
+ }
+#endif
+
+ gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+ if (gtt == NULL) {
+ return NULL;
+ }
+ gtt->ttm.ttm.func = &radeon_backend_func;
+ gtt->rdev = rdev;
+ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(gtt);
+ return NULL;
+ }
+ return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate(&gtt->ttm, rdev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (gtt->ttm.dma_address[i]) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
static struct ttm_bo_driver radeon_bo_driver = {
- .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+ .ttm_tt_create = &radeon_ttm_tt_create,
+ .ttm_tt_populate = &radeon_ttm_tt_populate,
+ .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
@@ -680,124 +850,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
}
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_backend {
- struct ttm_backend backend;
- struct radeon_device *rdev;
- unsigned long num_pages;
- struct page **pages;
- struct page *dummy_read_page;
- dma_addr_t *dma_addrs;
- bool populated;
- bool bound;
- unsigned offset;
-};
-
-static int radeon_ttm_backend_populate(struct ttm_backend *backend,
- unsigned long num_pages,
- struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->pages = pages;
- gtt->dma_addrs = dma_addrs;
- gtt->num_pages = num_pages;
- gtt->dummy_read_page = dummy_read_page;
- gtt->populated = true;
- return 0;
-}
-
-static void radeon_ttm_backend_clear(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->pages = NULL;
- gtt->dma_addrs = NULL;
- gtt->num_pages = 0;
- gtt->dummy_read_page = NULL;
- gtt->populated = false;
- gtt->bound = false;
-}
-
-
-static int radeon_ttm_backend_bind(struct ttm_backend *backend,
- struct ttm_mem_reg *bo_mem)
-{
- struct radeon_ttm_backend *gtt;
- int r;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->offset = bo_mem->start << PAGE_SHIFT;
- if (!gtt->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- gtt->num_pages, bo_mem, backend);
- }
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages, gtt->dma_addrs);
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
- gtt->num_pages, gtt->offset);
- return r;
- }
- gtt->bound = true;
- return 0;
-}
-
-static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
- gtt->bound = false;
- return 0;
-}
-
-static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- if (gtt->bound) {
- radeon_ttm_backend_unbind(backend);
- }
- kfree(gtt);
-}
-
-static struct ttm_backend_func radeon_backend_func = {
- .populate = &radeon_ttm_backend_populate,
- .clear = &radeon_ttm_backend_clear,
- .bind = &radeon_ttm_backend_bind,
- .unbind = &radeon_ttm_backend_unbind,
- .destroy = &radeon_ttm_backend_destroy,
-};
-
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
- if (gtt == NULL) {
- return NULL;
- }
- gtt->backend.bdev = &rdev->mman.bdev;
- gtt->backend.flags = 0;
- gtt->backend.func = &radeon_backend_func;
- gtt->rdev = rdev;
- gtt->pages = NULL;
- gtt->num_pages = 0;
- gtt->dummy_read_page = NULL;
- gtt->populated = false;
- gtt->bound = false;
- return &gtt->backend;
-}
-
#define RADEON_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
@@ -820,8 +872,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -843,8 +895,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i].data = NULL;
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
+ radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
+ }
+#endif
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 06b90c87f8f3..b0ce84a20a68 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -419,11 +425,18 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -447,11 +460,14 @@ int rs400_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rs400_startup(rdev);
}
int rs400_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -530,7 +546,14 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
r300_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f65..ec46eb45e34c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -317,16 +322,6 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
-void rs600_bm_disable(struct radeon_device *rdev)
-{
- u32 tmp;
-
- /* disable bus mastering */
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
- mdelay(1);
-}
-
int rs600_asic_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -350,7 +345,8 @@ int rs600_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_CNTL, tmp);
pci_save_state(rdev->pdev);
/* disable bus mastering */
- rs600_bm_disable(rdev);
+ pci_clear_master(rdev->pdev);
+ mdelay(1);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
@@ -544,7 +540,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
@@ -637,7 +633,7 @@ int rs600_irq_process(struct radeon_device *rdev)
while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
@@ -844,6 +840,12 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -853,15 +855,21 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -886,11 +894,14 @@ int rs600_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rs600_startup(rdev);
}
int rs600_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -971,7 +982,14 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs600_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index a9049ed1a519..4f24a0fa8c82 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -663,11 +675,14 @@ int rs690_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rs690_startup(rdev);
}
int rs690_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -749,7 +764,14 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs690_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 6613ee9ecca3..880637fd1946 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
void rv515_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
- radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
- radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+ radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+ radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+ radeon_ring_write(ring,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
- radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+ radeon_ring_write(ring,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
- radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
- radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
- radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
- radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
- radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
- radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
- radeon_ring_write(rdev, PACKET0(0x20C8, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+ radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+ radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+ radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+ radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+ radeon_ring_write(ring, PACKET0(0x20C8, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -392,6 +393,12 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -401,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
return 0;
@@ -428,6 +441,8 @@ int rv515_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rv515_startup(rdev);
}
@@ -524,7 +539,14 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rv515_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab89..a1668b659ddd 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -352,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
r700_cp_stop(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
/*
@@ -1038,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1077,6 +1083,12 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1086,7 +1098,9 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
@@ -1096,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
return 0;
}
@@ -1110,18 +1135,13 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- return r;
- }
-
r = r600_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "radeon: audio init failed\n");
@@ -1135,13 +1155,14 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -1210,8 +1231,8 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1220,30 +1241,24 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- } else {
- r = r600_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- }
- }
- }
r = r600_audio_init(rdev);
if (r) {
@@ -1260,11 +1275,12 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 5468d1cd3296..89afe0b83643 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
+static const struct file_operations savage_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -46,17 +57,7 @@ static struct drm_driver driver = {
.reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &savage_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index a9c5716bea4e..06da063ece2e 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret)
- kfree(dev_priv);
+ idr_init(&dev->object_name_idr);
return ret;
}
@@ -59,32 +57,60 @@ static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ idr_remove_all(&dev_priv->object_idr);
+ idr_destroy(&dev_priv->object_idr);
+
kfree(dev_priv);
return 0;
}
+static const struct file_operations sis_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
+static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct sis_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct sis_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
+ .open = sis_driver_open,
+ .postclose = sis_driver_postclose,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &sis_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 194303c177ad..573758b2d2d6 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -44,7 +44,7 @@ enum sis_family {
SIS_CHIP_315 = 1,
};
-#include "drm_sman.h"
+#include "drm_mm.h"
#define SIS_BASE (dev_priv->mmio)
@@ -54,12 +54,15 @@ enum sis_family {
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
- struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
+ struct drm_mm vram_mm;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
} drm_sis_private_t;
extern int sis_idle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 7fe2b63412ce..dd4a316c3d74 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,40 +41,18 @@
#define AGP_TYPE 1
+struct sis_memblock {
+ struct drm_mm_node mm_node;
+ struct sis_memreq req;
+ struct list_head owner_list;
+};
+
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
#define SIS_MM_ALIGN_MASK 0
-static void *sis_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct sis_memreq req;
-
- req.size = size;
- sis_malloc(&req);
- if (req.size == 0)
- return NULL;
- else
- return (void *)(unsigned long)~req.offset;
-}
-
-static void sis_sman_mm_free(void *private, void *ref)
-{
- sis_free(~((unsigned long)ref));
-}
-
-static void sis_sman_mm_destroy(void *private)
-{
- ;
-}
-
-static unsigned long sis_sman_mm_offset(void *private, void *ref)
-{
- return ~((unsigned long)ref);
-}
-
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
- int ret;
mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- {
- struct drm_sman_mm sman_mm;
- sman_mm.private = (void *)0xFFFFFFFF;
- sman_mm.allocate = sis_sman_mm_allocate;
- sman_mm.free = sis_sman_mm_free;
- sman_mm.destroy = sis_sman_mm_destroy;
- sman_mm.offset = sis_sman_mm_offset;
- ret =
- drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
- }
-#else
- ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
- fb->size >> SIS_MM_ALIGN_SHIFT);
-#endif
-
- if (ret) {
- DRM_ERROR("VRAM memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ /* Unconditionally init the drm_mm, even though we don't use it when the
+ * fb sis driver is available - make cleanup easier. */
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int retval = 0;
- struct drm_memblock_item *item;
+ int retval = 0, user_key;
+ struct sis_memblock *item;
+ struct sis_file_private *file_priv = file->driver_priv;
+ unsigned long offset;
mutex_lock(&dev->struct_mutex);
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
return -EINVAL;
}
- mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
- (unsigned long)file_priv);
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
- mutex_unlock(&dev->struct_mutex);
- if (item) {
- mem->offset = ((pool == 0) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (item->mm->
- offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
- mem->free = item->user_hash.key;
- mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+ mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+ if (pool == AGP_TYPE) {
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ mem->size, 0);
+ offset = item->mm_node.start;
} else {
- mem->offset = 0;
- mem->size = 0;
- mem->free = 0;
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ item->req.size = mem->size;
+ sis_malloc(&item->req);
+ if (item->req.size == 0)
+ retval = -ENOMEM;
+ offset = item->req.offset;
+#else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ mem->size, 0);
+ offset = item->mm_node.start;
+#endif
+ }
+ if (retval)
+ goto fail_alloc;
+
+again:
+ if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
retval = -ENOMEM;
+ goto fail_idr;
}
+ retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+ if (retval == -EAGAIN)
+ goto again;
+ if (retval)
+ goto fail_idr;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((pool == 0) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ (offset << SIS_MM_ALIGN_SHIFT);
+ mem->free = user_key;
+ mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->free = 0;
+
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
mem->offset);
@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int ret;
+ struct sis_memblock *obj;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem->free);
+ obj = idr_find(&dev_priv->object_idr, mem->free);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->free);
+ list_del(&obj->owner_list);
+ if (drm_mm_node_allocated(&obj->mm_node))
+ drm_mm_remove_node(&obj->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ else
+ sis_free(obj->req.offset);
+#endif
+ kfree(obj);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("free = 0x%lx\n", mem->free);
- return ret;
+ return 0;
}
static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
- int ret;
dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
- agp->size >> SIS_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("AGP memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = 0;
- dev_priv->agp_initialized = 0;
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
void sis_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_sis_private_t *dev_priv = dev->dev_private;
+ struct sis_file_private *file_priv = file->driver_priv;
+ struct sis_memblock *entry, *next;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+ if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ if (drm_mm_node_allocated(&entry->mm_node))
+ drm_mm_remove_node(&entry->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ else
+ sis_free(entry->req.offset);
+#endif
+ kfree(entry);
+ }
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index cda29911e332..1613c78544c0 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
};
+static const struct file_operations tdfx_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f3cf6f02c997..b2b33dde2afb 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o
+ifeq ($(CONFIG_SWIOTLB),y)
+ttm-y += ttm_page_alloc_dma.o
+endif
+
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 1c4a72f681c1..747c1413fc95 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -31,6 +31,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
#ifdef TTM_HAS_AGP
#include "ttm/ttm_placement.h"
#include <linux/agp_backend.h>
@@ -40,45 +41,33 @@
#include <asm/agp.h>
struct ttm_agp_backend {
- struct ttm_backend backend;
+ struct ttm_tt ttm;
struct agp_memory *mem;
struct agp_bridge_data *bridge;
};
-static int ttm_agp_populate(struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct page **cur_page, **last_page = pages + num_pages;
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
+ int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ unsigned i;
- mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+ mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
mem->page_count = 0;
- for (cur_page = pages; cur_page < last_page; ++cur_page) {
- struct page *page = *cur_page;
+ for (i = 0; i < ttm->num_pages; i++) {
+ struct page *page = ttm->pages[i];
+
if (!page)
- page = dummy_read_page;
+ page = ttm->dummy_read_page;
mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
- return 0;
-}
-
-static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct drm_mm_node *node = bo_mem->mm_node;
- struct agp_memory *mem = agp_be->mem;
- int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
- int ret;
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +79,39 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
return ret;
}
-static int ttm_agp_unbind(struct ttm_backend *backend)
+static int ttm_agp_unbind(struct ttm_tt *ttm)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
- else
- return 0;
-}
-
-static void ttm_agp_clear(struct ttm_backend *backend)
-{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct agp_memory *mem = agp_be->mem;
-
- if (mem) {
- ttm_agp_unbind(backend);
- agp_free_memory(mem);
+ if (agp_be->mem) {
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ agp_free_memory(agp_be->mem);
+ agp_be->mem = NULL;
}
- agp_be->mem = NULL;
+ return 0;
}
-static void ttm_agp_destroy(struct ttm_backend *backend)
+static void ttm_agp_destroy(struct ttm_tt *ttm)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem)
- ttm_agp_clear(backend);
+ ttm_agp_unbind(ttm);
+ ttm_tt_fini(ttm);
kfree(agp_be);
}
static struct ttm_backend_func ttm_agp_func = {
- .populate = ttm_agp_populate,
- .clear = ttm_agp_clear,
.bind = ttm_agp_bind,
.unbind = ttm_agp_unbind,
.destroy = ttm_agp_destroy,
};
-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge)
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct ttm_agp_backend *agp_be;
@@ -143,10 +121,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- agp_be->backend.func = &ttm_agp_func;
- agp_be->backend.bdev = bdev;
- return &agp_be->backend;
+ agp_be->ttm.func = &ttm_agp_func;
+
+ if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ return NULL;
+ }
+
+ return &agp_be->ttm;
+}
+EXPORT_SYMBOL(ttm_agp_tt_create);
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ return ttm_pool_populate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_populate);
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
}
-EXPORT_SYMBOL(ttm_agp_backend_init);
+EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0bb0f5f713e6..2f0eab66ece6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
+ size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->destroy)
bo->destroy(bo);
else {
- ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
kfree(bo);
}
+ ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags, glob->dummy_read_page);
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
- case ttm_bo_type_user:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags | TTM_PAGE_FLAG_USER,
- glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL)) {
- ret = -ENOMEM;
- break;
- }
-
- ret = ttm_tt_set_user(bo->ttm, current,
- bo->buffer_start, bo->num_pages);
- if (unlikely(ret != 0)) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- break;
default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
ret = -EINVAL;
@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
}
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, mem);
-
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret)
goto out_err;
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
+
moved:
if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
@@ -472,6 +457,9 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
+ if (bo->bdev->driver->move_notify)
+ bo->bdev->driver->move_notify(bo, NULL);
+
if (bo->ttm) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
@@ -913,16 +901,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
}
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
- bool disallow_fixed,
uint32_t mem_type,
uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
- return false;
-
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
@@ -967,7 +951,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
mem_type,
placement->placement[i],
&cur_flags);
@@ -1015,7 +998,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!man->has_type)
continue;
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
mem_type,
placement->busy_placement[i],
&cur_flags))
@@ -1185,6 +1167,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
{
int ret = 0;
unsigned long num_pages;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (ret) {
+ printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ kfree(bo);
+ return -ENOMEM;
+ }
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1255,14 +1248,34 @@ out_err:
}
EXPORT_SYMBOL(ttm_bo_init);
-static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
{
- size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
- PAGE_MASK;
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
- return glob->ttm_bo_size + 2 * page_array_size;
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
+ return size;
}
+EXPORT_SYMBOL(ttm_bo_acc_size);
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ return size;
+}
+EXPORT_SYMBOL(ttm_bo_dma_acc_size);
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
@@ -1276,10 +1289,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
{
struct ttm_buffer_object *bo;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ size_t acc_size;
int ret;
- size_t acc_size =
- ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0))
return ret;
@@ -1465,13 +1478,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_shrink;
}
- glob->ttm_bo_extra_size =
- ttm_round_pot(sizeof(struct ttm_tt)) +
- ttm_round_pot(sizeof(struct ttm_backend));
-
- glob->ttm_bo_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct ttm_buffer_object));
-
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 082fcaea583f..f8187ead7b37 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
- struct page *d = ttm_tt_get_page(ttm, page);
+ struct page *d = ttm->pages[page];
void *dst;
if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
- struct page *s = ttm_tt_get_page(ttm, page);
+ struct page *s = ttm->pages[page];
void *src;
if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (old_iomap == NULL && ttm == NULL)
goto out2;
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ goto out1;
+ }
+
add = 0;
dir = 1;
@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
+ fbo->acc_size = 0;
*new_obj = fbo;
return 0;
@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
- struct page *d;
- int i;
+ int ret;
BUG_ON(!ttm);
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+ }
+
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
*/
map->bo_kmap_type = ttm_bo_map_kmap;
- map->page = ttm_tt_get_page(ttm, start_page);
+ map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
- /*
- * Populate the part we're mapping;
- */
- for (i = start_page; i < start_page + num_pages; ++i) {
- d = ttm_tt_get_page(ttm, i);
- if (!d)
- return -ENOMEM;
- }
-
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 221b924acebe..54412848de88 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+ /* Allocate all page at once, most common usage */
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ retval = VM_FAULT_OOM;
+ goto out_io_unlock;
+ }
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
-
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
- page = ttm_tt_get_page(ttm, page_offset);
+ page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e70ddd82dc02..9eba8e9a4e9c 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
zone->name, (unsigned long long) zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
+ ttm_dma_page_alloc_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 727e93daac3b..499debda791e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages, int ttm_flags,
- enum ttm_caching_state cstate, unsigned count)
+ struct list_head *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
@@ -660,17 +662,67 @@ out:
return count;
}
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ unsigned i;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ if (page_count(pages[i]) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ __free_page(pages[i]);
+ pages[i] = NULL;
+ }
+ }
+ return;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ if (page_count(pages[i]) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ list_add_tail(&pages[i]->lru, &pool->list);
+ pages[i] = NULL;
+ pool->npages++;
+ }
+ }
+ /* Check that we don't go over the pool limit */
+ npages = 0;
+ if (pool->npages > _manager->options.max_size) {
+ npages = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (npages)
+ ttm_page_pool_free(pool, npages);
+}
+
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
-int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count,
- dma_addr_t *dma_address)
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct list_head plist;
struct page *p = NULL;
gfp_t gfp_flags = GFP_USER;
+ unsigned count;
int r;
/* set zero flag for page allocation if required */
@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < count; ++r) {
+ for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
return -ENOMEM;
}
- list_add(&p->lru, pages);
+ pages[r] = p;
}
return 0;
}
-
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */
- count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+ INIT_LIST_HEAD(&plist);
+ npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+ count = 0;
+ list_for_each_entry(p, &plist, lru) {
+ pages[count++] = p;
+ }
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- list_for_each_entry(p, pages, lru) {
+ list_for_each_entry(p, &plist, lru) {
clear_page(page_address(p));
}
}
/* If pool didn't have enough pages allocate new one. */
- if (count > 0) {
+ if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
- r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ INIT_LIST_HEAD(&plist);
+ r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
+ list_for_each_entry(p, &plist, lru) {
+ pages[count++] = p;
+ }
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate, NULL);
+ ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
-
return 0;
}
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate, dma_addr_t *dma_address)
-{
- unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- struct page *p, *tmp;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
-
- list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
- }
- /* Make the pages list empty */
- INIT_LIST_HEAD(pages);
- return;
- }
- if (page_count == 0) {
- list_for_each_entry_safe(p, tmp, pages, lru) {
- ++page_count;
- }
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- list_splice_init(pages, &pool->list);
- pool->npages += page_count;
- /* Check that we don't go over the pool limit */
- page_count = 0;
- if (pool->npages > _manager->options.max_size) {
- page_count = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (page_count < NUM_PAGES_TO_ALLOC)
- page_count = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (page_count)
- ttm_page_pool_free(pool, page_count);
-}
-
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned i;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_get_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ if (ret != 0) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i]) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ ttm_put_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ }
+ }
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 000000000000..0c46d8cdc6ea
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1142 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ * the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ * when freed).
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 4
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED (0)
+#define IS_WC (1<<1)
+#define IS_UC (1<<2)
+#define IS_CACHED (1<<3)
+#define IS_DMA32 (1<<4)
+
+enum pool_type {
+ POOL_IS_UNDEFINED,
+ POOL_IS_WC = IS_WC,
+ POOL_IS_UC = IS_UC,
+ POOL_IS_CACHED = IS_CACHED,
+ POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+ POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+ POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ * - generic (not restricted to DMA32):
+ * - write combined, uncached, cached.
+ * - dma32 (up to 2^32 - so up 4GB):
+ * - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ * it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ * Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+ struct list_head pools; /* The 'struct device->dma_pools link */
+ enum pool_type type;
+ spinlock_t lock;
+ struct list_head inuse_list;
+ struct list_head free_list;
+ struct device *dev;
+ unsigned size;
+ unsigned npages_free;
+ unsigned npages_in_use;
+ unsigned long nfrees; /* Stats when shrunk. */
+ unsigned long nrefills; /* Stats when grown. */
+ gfp_t gfp_flags;
+ char name[13]; /* "cached dma32" */
+ char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ * via the DMA API, it will be -1.
+ */
+struct dma_page {
+ struct list_head page_list;
+ void *vaddr;
+ struct page *p;
+ dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+ struct list_head pools;
+ struct device *dev;
+ struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+ struct mutex lock;
+ struct list_head pools;
+ struct ttm_pool_opts options;
+ unsigned npools;
+ struct shrinker mm_shrink;
+ struct kobject kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+ struct page **pages, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ if (pool->type & IS_UC) {
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ pr_err(TTM_PFX
+ "%s: Failed to set %d pages to uc!\n",
+ pool->dev_name, cpages);
+ }
+ if (pool->type & IS_WC) {
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ pr_err(TTM_PFX
+ "%s: Failed to set %d pages to wc!\n",
+ pool->dev_name, cpages);
+ }
+ return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+ dma_addr_t dma = d_page->dma;
+ dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+ kfree(d_page);
+ d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+ struct dma_page *d_page;
+
+ d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+ if (!d_page)
+ return NULL;
+
+ d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+ &d_page->dma,
+ pool->gfp_flags);
+ if (d_page->vaddr)
+ d_page->p = virt_to_page(d_page->vaddr);
+ else {
+ kfree(d_page);
+ d_page = NULL;
+ }
+ return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+ enum pool_type type = IS_UNDEFINED;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ type |= IS_DMA32;
+ if (cstate == tt_cached)
+ type |= IS_CACHED;
+ else if (cstate == tt_uncached)
+ type |= IS_UC;
+ else
+ type |= IS_WC;
+
+ return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages_free -= freed_pages;
+ pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+ struct page *pages[], unsigned npages)
+{
+ struct dma_page *d_page, *tmp;
+
+ /* Don't set WB on WB page pool. */
+ if (npages && !(pool->type & IS_CACHED) &&
+ set_pages_array_wb(pages, npages))
+ pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, npages);
+
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+ pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+ struct list_head d_pages;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+ if (nr_free > 1) {
+ pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+ pool->dev_name, pool->name, current->pid,
+ npages_to_free, nr_free);
+ }
+#endif
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pages_to_free) {
+ pr_err(TTM_PFX
+ "%s: Failed to allocate memory for pool free operation.\n",
+ pool->dev_name);
+ return 0;
+ }
+ INIT_LIST_HEAD(&d_pages);
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ /* We picking the oldest ones off the list */
+ list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+ page_list) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ /* Move the dma_page from one list to another. */
+ list_move(&dma_p->page_list, &d_pages);
+
+ pages_to_free[freed_pages++] = dma_p->p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+ freed_pages);
+
+ INIT_LIST_HEAD(&d_pages);
+
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+ struct device_pools *p;
+ struct dma_pool *pool;
+
+ if (!dev)
+ return;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry_reverse(p, &_manager->pools, pools) {
+ if (p->dev != dev)
+ continue;
+ pool = p->pool;
+ if (pool->type != type)
+ continue;
+
+ list_del(&p->pools);
+ kfree(p);
+ _manager->npools--;
+ break;
+ }
+ list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ /* Takes a spinlock.. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ /* This code path is called after _all_ references to the
+ * struct device has been dropped - so nobody should be
+ * touching it. In case somebody is trying to _add_ we are
+ * guarded by the mutex. */
+ list_del(&pool->pools);
+ kfree(pool);
+ break;
+ }
+ mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+ struct dma_pool *pool = *(struct dma_pool **)res;
+
+ if (pool)
+ ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+ return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+ enum pool_type type)
+{
+ char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ struct device_pools *sec_pool = NULL;
+ struct dma_pool *pool = NULL, **ptr;
+ unsigned i;
+ int ret = -ENODEV;
+ char *p;
+
+ if (!dev)
+ return NULL;
+
+ ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ret = -ENOMEM;
+
+ pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!pool)
+ goto err_mem;
+
+ sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!sec_pool)
+ goto err_mem;
+
+ INIT_LIST_HEAD(&sec_pool->pools);
+ sec_pool->dev = dev;
+ sec_pool->pool = pool;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->inuse_list);
+ INIT_LIST_HEAD(&pool->pools);
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->npages_free = pool->npages_in_use = 0;
+ pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->size = PAGE_SIZE;
+ pool->type = type;
+ pool->nrefills = 0;
+ p = pool->name;
+ for (i = 0; i < 5; i++) {
+ if (type & t[i]) {
+ p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ "%s", n[i]);
+ }
+ }
+ *p = 0;
+ /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+ * - the kobj->name has already been deallocated.*/
+ snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+ dev_driver_string(dev), dev_name(dev));
+ mutex_lock(&_manager->lock);
+ /* You can get the dma_pool from either the global: */
+ list_add(&sec_pool->pools, &_manager->pools);
+ _manager->npools++;
+ /* or from 'struct device': */
+ list_add(&pool->pools, &dev->dma_pools);
+ mutex_unlock(&_manager->lock);
+
+ *ptr = pool;
+ devres_add(dev, ptr);
+
+ return pool;
+err_mem:
+ devres_free(ptr);
+ kfree(sec_pool);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+ enum pool_type type)
+{
+ struct dma_pool *pool, *tmp, *found = NULL;
+
+ if (type == IS_UNDEFINED)
+ return found;
+
+ /* NB: We iterate on the 'struct dev' which has no spinlock, but
+ * it does have a kref which we have taken. The kref is taken during
+ * graphic driver loading - in the drm_pci_init it calls either
+ * pci_dev_get or pci_register_driver which both end up taking a kref
+ * on 'struct device'.
+ *
+ * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+ * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+ * thing is at that point of time there are no pages associated with the
+ * driver so this function will not be called.
+ */
+ list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ found = pool;
+ break;
+ }
+ return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+ struct list_head *d_pages,
+ struct page **failed_pages,
+ unsigned cpages)
+{
+ struct dma_page *d_page, *tmp;
+ struct page *p;
+ unsigned i = 0;
+
+ p = failed_pages[0];
+ if (!p)
+ return;
+ /* Find the failed page. */
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ if (d_page->p != p)
+ continue;
+ /* .. and then progress over the full list. */
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ if (++i < cpages)
+ p = failed_pages[i];
+ else
+ break;
+ }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+ struct list_head *d_pages,
+ unsigned count)
+{
+ struct page **caching_array;
+ struct dma_page *dma_p;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ pr_err(TTM_PFX
+ "%s: Unable to allocate table for new pages.",
+ pool->dev_name);
+ return -ENOMEM;
+ }
+
+ if (count > 1) {
+ pr_debug("%s: (%s:%d) Getting %d pages\n",
+ pool->dev_name, pool->name, current->pid,
+ count);
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ dma_p = __ttm_dma_alloc_page(pool);
+ if (!dma_p) {
+ pr_err(TTM_PFX "%s: Unable to get page %u.\n",
+ pool->dev_name, i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+ /* Note: Cannot hold the spinlock */
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r) {
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+ list_add(&dma_p->page_list, d_pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array, cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(pool, d_pages,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+ unsigned long *irq_flags)
+{
+ unsigned count = _manager->options.small;
+ int r = pool->npages_free;
+
+ if (count > pool->npages_free) {
+ struct list_head d_pages;
+
+ INIT_LIST_HEAD(&d_pages);
+
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ /* Returns how many more are neccessary to fulfill the
+ * request. */
+ r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ if (!r) {
+ /* Add the fresh to the end.. */
+ list_splice(&d_pages, &pool->free_list);
+ ++pool->nrefills;
+ pool->npages_free += count;
+ r = count;
+ } else {
+ struct dma_page *d_page;
+ unsigned cpages = 0;
+
+ pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
+
+ list_for_each_entry(d_page, &d_pages, page_list) {
+ cpages++;
+ }
+ list_splice_tail(&d_pages, &pool->free_list);
+ pool->npages_free += cpages;
+ r = cpages;
+ }
+ }
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+ struct ttm_dma_tt *ttm_dma,
+ unsigned index)
+{
+ struct dma_page *d_page;
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ unsigned long irq_flags;
+ int count, r = -ENOMEM;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+ if (count) {
+ d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+ ttm->pages[index] = d_page->p;
+ ttm_dma->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ r = 0;
+ pool->npages_in_use += 1;
+ pool->npages_free -= 1;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct dma_pool *pool;
+ enum pool_type type;
+ unsigned i;
+ gfp_t gfp_flags;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool) {
+ pool = ttm_dma_pool_init(dev, gfp_flags, type);
+ if (IS_ERR_OR_NULL(pool)) {
+ return -ENOMEM;
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+ struct device_pools *p;
+ unsigned total = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ total += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct dma_pool *pool;
+ struct dma_page *d_page, *next;
+ enum pool_type type;
+ bool is_cached = false;
+ unsigned count = 0, i, npages = 0;
+ unsigned long irq_flags;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool)
+ return;
+
+ is_cached = (ttm_dma_find_pool(pool->dev,
+ ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+ /* make sure pages array match list and count number of pages */
+ list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ ttm->pages[count] = d_page->p;
+ count++;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ if (is_cached) {
+ pool->nfrees += count;
+ } else {
+ pool->npages_free += count;
+ list_splice(&ttm_dma->pages_list, &pool->free_list);
+ npages = count;
+ if (pool->npages_free > _manager->options.max_size) {
+ npages = pool->npages_free - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (is_cached) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p);
+ ttm_dma_page_put(pool, d_page);
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->pages[i] = NULL;
+ ttm_dma->dma_address[i] = 0;
+ }
+
+ /* shrink pool if necessary (only on !is_cached pools)*/
+ if (npages)
+ ttm_dma_page_pool_free(pool, npages);
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned idx = 0;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned shrink_pages = sc->nr_to_scan;
+ struct device_pools *p;
+
+ if (list_empty(&_manager->pools))
+ return 0;
+
+ mutex_lock(&_manager->lock);
+ pool_offset = pool_offset % _manager->npools;
+ list_for_each_entry(p, &_manager->pools, pools) {
+ unsigned nr_free;
+
+ if (!p->dev)
+ continue;
+ if (shrink_pages == 0)
+ break;
+ /* Do it in round-robin fashion. */
+ if (++idx < pool_offset)
+ continue;
+ nr_free = shrink_pages;
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ p->pool->dev_name, p->pool->name, current->pid, nr_free,
+ shrink_pages);
+ }
+ mutex_unlock(&_manager->lock);
+ /* return estimated number of unused pages in pool */
+ return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret = -ENOMEM;
+
+ WARN_ON(_manager);
+
+ printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
+
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ goto err_manager;
+
+ mutex_init(&_manager->lock);
+ INIT_LIST_HEAD(&_manager->pools);
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ /* This takes care of auto-freeing the _manager */
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "dma_pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager->kobj);
+ goto err;
+ }
+ ttm_dma_pool_mm_shrink_init(_manager);
+ return 0;
+err_manager:
+ kfree(_manager);
+ _manager = NULL;
+err:
+ return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+ struct device_pools *p, *t;
+
+ printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
+ ttm_dma_pool_mm_shrink_fini(_manager);
+
+ list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+ dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+ current->pid);
+ WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+ ttm_dma_pool_match, p->pool));
+ ttm_dma_free_pool(p->dev, p->pool->type);
+ }
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct device_pools *p;
+ struct dma_pool *pool = NULL;
+ char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+ "name", "virt", "busaddr"};
+
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+ h[0], h[1], h[2], h[3], h[4], h[5]);
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools) {
+ struct device *dev = p->dev;
+ if (!dev)
+ continue;
+ pool = p->pool;
+ seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+ pool->name, pool->nrefills,
+ pool->nfrees, pool->npages_in_use,
+ pool->npages_free,
+ pool->dev_name);
+ }
+ mutex_unlock(&_manager->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index f9cc548d6d98..2f75d203a2bf 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -43,139 +43,20 @@
#include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
-static int ttm_tt_swapin(struct ttm_tt *ttm);
-
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
- ttm->dma_address = drm_calloc_large(ttm->num_pages,
- sizeof(*ttm->dma_address));
-}
-
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
-{
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
- drm_free_large(ttm->dma_address);
- ttm->dma_address = NULL;
-}
-
-static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
-{
- int write;
- int dirty;
- struct page *page;
- int i;
- struct ttm_backend *be = ttm->be;
-
- BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
- write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
- dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
-
- if (be)
- be->func->clear(be);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = ttm->pages[i];
- if (page == NULL)
- continue;
-
- if (page == ttm->dummy_read_page) {
- BUG_ON(write);
- continue;
- }
-
- if (write && dirty && !PageReserved(page))
- set_page_dirty_lock(page);
-
- ttm->pages[i] = NULL;
- ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
- put_page(page);
- }
- ttm->state = tt_unpopulated;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
}
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- struct page *p;
- struct list_head h;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- int ret;
-
- while (NULL == (p = ttm->pages[index])) {
-
- INIT_LIST_HEAD(&h);
-
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
- &ttm->dma_address[index]);
-
- if (ret != 0)
- return NULL;
-
- p = list_first_entry(&h, struct page, lru);
-
- ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
- if (unlikely(ret != 0))
- goto out_err;
-
- if (PageHighMem(p))
- ttm->pages[--ttm->first_himem_page] = p;
- else
- ttm->pages[++ttm->last_lomem_page] = p;
- }
- return p;
-out_err:
- put_page(p);
- return NULL;
-}
-
-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
- int ret;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return NULL;
- }
- return __ttm_tt_get_page(ttm, index);
-}
-
-int ttm_tt_populate(struct ttm_tt *ttm)
-{
- struct page *page;
- unsigned long i;
- struct ttm_backend *be;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return ret;
- }
-
- be = ttm->be;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = __ttm_tt_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
- }
-
- be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page, ttm->dma_address);
- ttm->state = tt_unbound;
- return 0;
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+ ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address));
}
-EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
@@ -278,153 +159,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
-{
- int i;
- unsigned count = 0;
- struct list_head h;
- struct page *cur_page;
- struct ttm_backend *be = ttm->be;
-
- INIT_LIST_HEAD(&h);
-
- if (be)
- be->func->clear(be);
- for (i = 0; i < ttm->num_pages; ++i) {
-
- cur_page = ttm->pages[i];
- ttm->pages[i] = NULL;
- if (cur_page) {
- if (page_count(cur_page) != 1)
- printk(KERN_ERR TTM_PFX
- "Erroneous page count. "
- "Leaking pages.\n");
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- cur_page);
- list_add(&cur_page->lru, &h);
- count++;
- }
- }
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
- ttm->dma_address);
- ttm->state = tt_unpopulated;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
-}
-
void ttm_tt_destroy(struct ttm_tt *ttm)
{
- struct ttm_backend *be;
-
if (unlikely(ttm == NULL))
return;
- be = ttm->be;
- if (likely(be != NULL)) {
- be->func->destroy(be);
- ttm->be = NULL;
+ if (ttm->state == tt_bound) {
+ ttm_tt_unbind(ttm);
}
if (likely(ttm->pages != NULL)) {
- if (ttm->page_flags & TTM_PAGE_FLAG_USER)
- ttm_tt_free_user_pages(ttm);
- else
- ttm_tt_free_alloced_pages(ttm);
-
- ttm_tt_free_page_directory(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
fput(ttm->swap_storage);
- kfree(ttm);
+ ttm->swap_storage = NULL;
+ ttm->func->destroy(ttm);
}
-int ttm_tt_set_user(struct ttm_tt *ttm,
- struct task_struct *tsk,
- unsigned long start, unsigned long num_pages)
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct mm_struct *mm = tsk->mm;
- int ret;
- int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-
- BUG_ON(num_pages != ttm->num_pages);
- BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
-
- /**
- * Account user pages as lowmem pages for now.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(tsk, mm, start, num_pages,
- write, 0, ttm->pages, NULL);
- up_read(&mm->mmap_sem);
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
- if (ret != num_pages && write) {
- ttm_tt_free_user_pages(ttm);
- ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
return -ENOMEM;
}
-
- ttm->tsk = tsk;
- ttm->start = start;
- ttm->state = tt_unbound;
-
return 0;
}
+EXPORT_SYMBOL(ttm_tt_init);
-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t page_flags, struct page *dummy_read_page)
+void ttm_tt_fini(struct ttm_tt *ttm)
{
- struct ttm_bo_driver *bo_driver = bdev->driver;
- struct ttm_tt *ttm;
-
- if (!bo_driver)
- return NULL;
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
- ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
- if (!ttm)
- return NULL;
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
-
ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
- ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages) {
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
- return NULL;
- }
- ttm->be = bo_driver->create_ttm_backend_entry(bdev);
- if (!ttm->be) {
- ttm_tt_destroy(ttm);
- printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
- return NULL;
+ return -ENOMEM;
}
- ttm->state = tt_unpopulated;
- return ttm;
+ return 0;
}
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+ drm_free_large(ttm_dma->dma_address);
+ ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
- struct ttm_backend *be = ttm->be;
if (ttm->state == tt_bound) {
- ret = be->func->unbind(be);
+ ret = ttm->func->unbind(ttm);
BUG_ON(ret);
ttm->state = tt_unbound;
}
@@ -433,7 +261,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
- struct ttm_backend *be;
if (!ttm)
return -EINVAL;
@@ -441,25 +268,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound)
return 0;
- be = ttm->be;
-
- ret = ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
- ret = be->func->bind(be, bo_mem);
+ ret = ttm->func->bind(ttm, bo_mem);
if (unlikely(ret != 0))
return ret;
ttm->state = tt_bound;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER)
- ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
-static int ttm_tt_swapin(struct ttm_tt *ttm)
+int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
@@ -470,16 +293,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
int i;
int ret = -ENOMEM;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
- ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
- ttm->num_pages);
- if (unlikely(ret != 0))
- return ret;
-
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
- return 0;
- }
-
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
@@ -491,7 +304,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
ret = PTR_ERR(from_page);
goto out_err;
}
- to_page = __ttm_tt_get_page(ttm, i);
+ to_page = ttm->pages[i];
if (unlikely(to_page == NULL))
goto out_err;
@@ -512,7 +325,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
- ttm_tt_free_alloced_pages(ttm);
return ret;
}
@@ -530,18 +342,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
- /*
- * For user buffers, just unpin the pages, as there should be
- * vma references.
- */
-
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
- ttm_tt_free_user_pages(ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- ttm->swap_storage = NULL;
- return 0;
- }
-
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
@@ -576,7 +376,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
- ttm_tt_free_alloced_pages(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index a83e86d3956c..02661f35f7a0 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -30,16 +30,52 @@
#include "drm_pciids.h"
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
+static const struct file_operations via_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
+ .open = via_driver_open,
+ .postclose = via_driver_postclose,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
@@ -54,17 +90,7 @@ static struct drm_driver driver = {
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
.ioctls = via_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &via_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 9cf87d912325..88edacc93006 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -24,7 +24,7 @@
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#include "drm_sman.h"
+#include "drm_mm.h"
#define DRIVER_AUTHOR "Various"
#define DRIVER_NAME "via"
@@ -88,9 +88,12 @@ typedef struct drm_via_private {
uint32_t irq_pending_mask;
int *irq_map;
unsigned int idle_fault;
- struct drm_sman sman;
int vram_initialized;
+ struct drm_mm vram_mm;
int agp_initialized;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
unsigned long vram_offset;
unsigned long agp_offset;
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6cca9a709f7a..a2ab34365151 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret) {
- kfree(dev_priv);
- return ret;
- }
+ idr_init(&dev->object_name_idr);
ret = drm_vblank_init(dev, 1);
if (ret) {
- drm_sman_takedown(&dev_priv->sman);
kfree(dev_priv);
return ret;
}
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ idr_remove_all(&dev_priv->object_idr);
+ idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 6cc2dadae3ef..a3574d09a07d 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -28,26 +28,22 @@
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
-#include "drm_sman.h"
#define VIA_MM_ALIGN_SHIFT 4
#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+struct via_memblock {
+ struct drm_mm_node mm_node;
+ struct list_head owner_list;
+};
+
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_agp_t *agp = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
- agp->size >> VIA_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("AGP memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_fb_t *fb = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
- fb->size >> VIA_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("VRAM memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = 0;
- dev_priv->agp_initialized = 0;
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
mutex_unlock(&dev->struct_mutex);
}
int via_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_via_mem_t *mem = data;
- int retval = 0;
- struct drm_memblock_item *item;
+ int retval = 0, user_key;
+ struct via_memblock *item;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
unsigned long tmpSize;
if (mem->type > VIA_MEM_AGP) {
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
return -EINVAL;
}
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
+
tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
- (unsigned long)file_priv);
- mutex_unlock(&dev->struct_mutex);
- if (item) {
- mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (item->mm->
- offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
- mem->index = item->user_hash.key;
- } else {
- mem->offset = 0;
- mem->size = 0;
- mem->index = 0;
- DRM_DEBUG("Video memory allocation failed\n");
+ if (mem->type == VIA_MEM_AGP)
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ tmpSize, 0);
+ else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ tmpSize, 0);
+ if (retval)
+ goto fail_alloc;
+
+again:
+ if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
retval = -ENOMEM;
+ goto fail_idr;
}
+ retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+ if (retval == -EAGAIN)
+ goto again;
+ if (retval)
+ goto fail_idr;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+ mem->index = user_key;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->index = 0;
+ DRM_DEBUG("Video memory allocation failed\n");
+
return retval;
}
@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
drm_via_mem_t *mem = data;
- int ret;
+ struct via_memblock *obj;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem->index);
+ obj = idr_find(&dev_priv->object_idr, mem->index);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->index);
+ list_del(&obj->owner_list);
+ drm_mm_remove_node(&obj->mm_node);
+ kfree(obj);
mutex_unlock(&dev->struct_mutex);
+
DRM_DEBUG("free = 0x%lx\n", mem->index);
- return ret;
+ return 0;
}
void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_via_private_t *dev_priv = dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
+ struct via_memblock *entry, *next;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+ if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ drm_mm_remove_node(&entry->mm_node);
+ kfree(entry);
+ }
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 5a72ed908232..1e2c0fb7f786 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -28,6 +28,7 @@
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags
};
-struct vmw_ttm_backend {
- struct ttm_backend backend;
- struct page **pages;
- unsigned long num_pages;
+struct vmw_ttm_tt {
+ struct ttm_tt ttm;
struct vmw_private *dev_priv;
int gmr_id;
};
-static int vmw_ttm_populate(struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
-
- vmw_be->pages = pages;
- vmw_be->num_pages = num_pages;
-
- return 0;
-}
-
-static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_be->gmr_id = bo_mem->start;
- return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
- vmw_be->num_pages, vmw_be->gmr_id);
+ return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+ ttm->num_pages, vmw_be->gmr_id);
}
-static int vmw_ttm_unbind(struct ttm_backend *backend)
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
-static void vmw_ttm_clear(struct ttm_backend *backend)
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
-
- vmw_be->pages = NULL;
- vmw_be->num_pages = 0;
-}
-
-static void vmw_ttm_destroy(struct ttm_backend *backend)
-{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ ttm_tt_fini(ttm);
kfree(vmw_be);
}
static struct ttm_backend_func vmw_ttm_func = {
- .populate = vmw_ttm_populate,
- .clear = vmw_ttm_clear,
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct vmw_ttm_backend *vmw_be;
+ struct vmw_ttm_tt *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
- vmw_be->backend.func = &vmw_ttm_func;
+ vmw_be->ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
- return &vmw_be->backend;
+ if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(vmw_be);
+ return NULL;
+ }
+
+ return &vmw_be->ttm;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
}
struct ttm_bo_driver vmw_bo_driver = {
- .create_ttm_backend_entry = vmw_ttm_backend_init,
+ .ttm_tt_create = &vmw_ttm_tt_create,
+ .ttm_tt_populate = &ttm_pool_populate,
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dff8fc767152..f390f5f9cb68 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1064,6 +1064,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
.resume = vmw_pm_resume,
};
+static const struct file_operations vmwgfx_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = vmw_fops_poll,
+ .read = vmw_fops_read,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -1088,20 +1103,7 @@ static struct drm_driver driver = {
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
.postclose = vmw_postclose,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = vmw_unlocked_ioctl,
- .mmap = vmw_mmap,
- .poll = vmw_fops_poll,
- .read = vmw_fops_read,
- .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
+ .fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bde..dc279706ca70 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a7..a0c2f12b1e1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ hwversion = ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
+
if (hwversion == 0)
return false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..66917c6c3813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
- param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ param->value =
+ ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_no_fb;
}
-
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 880e285d7578..0af6ebdf205d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+ int num_rects,
+ struct vmw_clip_rect clip,
+ SVGASignedRect *out_rects,
+ int *out_num)
+{
+ int i, k;
+
+ for (i = 0, k = 0; i < num_rects; i++) {
+ int x1 = max_t(int, clip.x1, rects[i].x1);
+ int y1 = max_t(int, clip.y1, rects[i].y1);
+ int x2 = min_t(int, clip.x2, rects[i].x2);
+ int y2 = min_t(int, clip.y2, rects[i].y2);
+
+ if (x1 >= x2)
+ continue;
+ if (y1 >= y2)
+ continue;
+
+ out_rects[k].left = x1;
+ out_rects[k].top = y1;
+ out_rects[k].right = x2;
+ out_rects[k].bottom = y2;
+ k++;
+ }
+
+ *out_num = k;
+}
+
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+ int ret;
+
+ kmap_offset = 0;
+ kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+ hotspotX, hotspotY);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ return ret;
+}
+
+
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -EINVAL;
if (handle) {
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- handle, &surface);
- if (!ret) {
- if (!surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- vmw_surface_unreference(&surface);
- return -EINVAL;
- }
- } else {
- ret = vmw_user_dmabuf_lookup(tfile,
- handle, &dmabuf);
- if (ret) {
- DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
- }
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ handle, &surface, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
}
}
+ /* need to do this before taking down old image */
+ if (surface && !surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
+ return -EINVAL;
+ }
+
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- void *virtual;
- bool dummy;
-
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
- kmap_offset = 0;
- kmap_num = (64*64*4) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return -EINVAL;
- }
-
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- vmw_cursor_update_image(dev_priv, virtual, 64, 64,
- du->hotspot_x, du->hotspot_y);
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
-
+ ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+ du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
- struct drm_clip_rect *clips_ptr;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
} *cmd;
SVGASignedRect *blits;
-
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
}
+ /* setup blits pointer */
+ blits = (SVGASignedRect *)&cmd[1];
+
+ /* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
- blits = (SVGASignedRect *)&cmd[1];
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- blits[i].left = clips_ptr->x1 - left;
- blits[i].right = clips_ptr->x2 - left;
- blits[i].top = clips_ptr->y1 - top;
- blits[i].bottom = clips_ptr->y2 - top;
+ tmp[i].x1 = clips_ptr->x1 - left;
+ tmp[i].x2 = clips_ptr->x2 - left;
+ tmp[i].y1 = clips_ptr->y1 - top;
+ tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
- int clip_x1 = left - unit->crtc.x;
- int clip_y1 = top - unit->crtc.y;
- int clip_x2 = right - unit->crtc.x;
- int clip_y2 = bottom - unit->crtc.y;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left - unit->crtc.x;
+ clip.y1 = top - unit->crtc.y;
+ clip.x2 = right - unit->crtc.x;
+ clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
break;
}
+
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
+ /* Surface must be marked as a scanout. */
+ if (unlikely(!surface->scanout))
+ return -EINVAL;
+
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@@ -610,7 +690,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitch = mode_cmd->pitch;
+ vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
@@ -724,7 +804,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitch;
+ cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+ int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
clip_x2 <= 0 || clip_y2 <= 0)
continue;
+ /* clip size to crtc size */
+ clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+ clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+ /* translate both src and dest to bring clip into screen */
+ move_x = min_t(int, clip_x1, 0);
+ move_y = min_t(int, clip_y1, 0);
+
+ /* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
- blits[hit_num].body.destRect.left = clip_x1;
- blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+ blits[hit_num].body.destRect.left = clip_x1 - move_x;
+ blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
@@ -966,7 +1056,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitch = mode_cmd->pitch;
+ vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
@@ -995,7 +1085,7 @@ out_err1:
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd2)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1003,17 +1093,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- u64 required_size;
+ struct drm_mode_fb_cmd mode_cmd;
int ret;
+ mode_cmd.width = mode_cmd2->width;
+ mode_cmd.height = mode_cmd2->height;
+ mode_cmd.pitch = mode_cmd2->pitches[0];
+ mode_cmd.handle = mode_cmd2->handles[0];
+ drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+ &mode_cmd.bpp);
+
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
- required_size = mode_cmd->pitch * mode_cmd->height;
- if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode_cmd.pitch,
+ mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1027,7 +1125,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* command stream using user-space handles.
*/
- user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
@@ -1037,42 +1135,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* End conditioned code.
*/
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- mode_cmd->handle, &surface);
+ /* returns either a dmabuf or surface */
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ mode_cmd.handle,
+ &surface, &bo);
if (ret)
- goto try_dmabuf;
-
- if (!surface->scanout)
- goto err_not_scanout;
-
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
- &vfb, mode_cmd);
-
- /* vmw_user_surface_lookup takes one ref so does new_fb */
- vmw_surface_unreference(&surface);
-
- if (ret) {
- DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
- return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
- return &vfb->base;
-
-try_dmabuf:
- DRM_INFO("%s: trying buffer\n", __func__);
-
- ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
- if (ret) {
- DRM_ERROR("failed to find buffer: %i\n", ret);
- return ERR_PTR(-ENOENT);
- }
-
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd);
+ goto err_out;
+
+ /* Create the new framebuffer depending one what we got back */
+ if (bo)
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ &mode_cmd);
+ else if (surface)
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+ surface, &vfb, &mode_cmd);
+ else
+ BUG();
- /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
- vmw_dmabuf_unreference(&bo);
+err_out:
+ /* vmw_user_lookup_handle takes one ref so does new_fb */
+ if (bo)
+ vmw_dmabuf_unreference(&bo);
+ if (surface)
+ vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1167,6 @@ try_dmabuf:
vfb->user_obj = user_obj;
return &vfb->base;
-
-err_not_scanout:
- DRM_ERROR("surface not marked as scanout\n");
- /* vmw_user_surface_lookup takes one ref */
- vmw_surface_unreference(&surface);
- ttm_base_object_unref(&user_obj);
-
- return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1183,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
@@ -1127,60 +1206,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
+ }
+
+ left = clips->x;
+ right = clips->x + clips->w;
+ top = clips->y;
+ bottom = clips->y + clips->h;
+
+ for (i = 1; i < num_clips; i++) {
+ left = min_t(int, left, (int)clips[i].x);
+ right = max_t(int, right, (int)clips[i].x + clips[i].w);
+ top = min_t(int, top, (int)clips[i].y);
+ bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = 0;
- cmd->body.srcRect.right = surface->sizes[0].width;
- cmd->body.srcRect.top = 0;
- cmd->body.srcRect.bottom = surface->sizes[0].height;
blits = (SVGASignedRect *)&cmd[1];
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
for (i = 0; i < num_clips; i++) {
- blits[i].left = clips[i].x;
- blits[i].right = clips[i].x + clips[i].w;
- blits[i].top = clips[i].y;
- blits[i].bottom = clips[i].y + clips[i].h;
+ tmp[i].x1 = clips[i].x - left;
+ tmp[i].x2 = clips[i].x + clips[i].w - left;
+ tmp[i].y1 = clips[i].y - top;
+ tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
- int clip_x1 = destX - unit->crtc.x;
- int clip_y1 = destY - unit->crtc.y;
- int clip_x2 = clip_x1 + surface->sizes[0].width;
- int clip_y2 = clip_y1 + surface->sizes[0].height;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left + destX - unit->crtc.x;
+ clip.y1 = top + destY - unit->crtc.y;
+ clip.x2 = right + destX - unit->crtc.x;
+ clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -1189,6 +1303,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
}
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -1240,7 +1356,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitch;
+ cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
@@ -1809,7 +1925,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kzalloc(rects_size, GFP_KERNEL);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
@@ -1824,10 +1941,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
for (i = 0; i < arg->num_outputs; ++i) {
- if (rects->x < 0 ||
- rects->y < 0 ||
- rects->x + rects->w > mode_config->max_width ||
- rects->y + rects->h > mode_config->max_height) {
+ if (rects[i].x < 0 ||
+ rects[i].y < 0 ||
+ rects[i].x + rects[i].w > mode_config->max_width ||
+ rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5bd964..a4f7f034996a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,6 +29,7 @@
#define VMWGFX_KMS_H_
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "vmwgfx_drv.h"
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -62,9 +63,14 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
+
/**
* Base class display unit.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e3928491..f77b184be807 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
+ struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i = 0, ret;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -94,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
fb = entry->base.crtc.fb;
- return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
@@ -102,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.fb;
- vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
+
+ /* Find the first du with a cursor. */
+ list_for_each_entry(entry, &lds->active, active) {
+ du = &entry->base;
+
+ if (!du->cursor_dmabuf)
+ continue;
+
+ ret = vmw_cursor_update_dmabuf(dev_priv,
+ du->cursor_dmabuf,
+ 64, 64,
+ du->hotspot_x,
+ du->hotspot_y);
+ if (ret == 0)
+ break;
+
+ DRM_ERROR("Could not update cursor image\n");
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb31..a37abb581cbb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
write_unlock(lock);
}
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
+{
+ int ret;
+
+ BUG_ON(*out_surf || *out_buf);
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+ if (!ret)
+ return 0;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ return ret;
+}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -1517,29 +1540,10 @@ out_bad_surface:
/**
* Buffer management.
*/
-
-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
-{
- static size_t bo_user_size = ~0;
-
- size_t page_array_size =
- (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
-
- if (unlikely(bo_user_size == ~0)) {
- bo_user_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- }
-
- return bo_user_size + page_array_size;
-}
-
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -1550,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
BUG_ON(!bo_free);
- acc_size =
- vmw_dmabuf_acc_size(bdev->glob,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0)) {
- /* we must free the bo here as
- * ttm_buffer_object_init does so as well */
- bo_free(&vmw_bo->base);
- return ret;
- }
-
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1582,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 22a4a051f221..a421abdd1ab7 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -31,6 +31,11 @@ config HID
If unsure, say Y.
+config HID_BATTERY_STRENGTH
+ bool
+ depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ default y
+
config HIDRAW
bool "/dev/hidraw raw HID device support"
depends on HID
@@ -335,6 +340,7 @@ config HID_MULTITOUCH
Say Y here if you have one of the following devices:
- 3M PCT touch screens
- ActionStar dual touch panels
+ - Atmel panels
- Cando dual touch panels
- Chunghwa panels
- CVTouch panels
@@ -349,12 +355,15 @@ config HID_MULTITOUCH
- Lumio CrystalTouch panels
- MosArt dual-touch panels
- PenMount dual touch panels
+ - PixArt optical touch screen
- Pixcir dual touch panels
+ - Quanta panels
- eGalax dual-touch panels, including the Joojoo and Wetab tablets
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
- XAT optical touch panels
+ - Xiroku optical touch panels
If unsure, say N.
@@ -466,12 +475,6 @@ config HID_PRIMAX
Support for Primax devices that are not fully compliant with the
HID standard.
-config HID_QUANTA
- tristate "Quanta Optical Touch panels"
- depends on USB_HID
- ---help---
- Support for Quanta Optical Touch dual-touch panels.
-
config HID_ROCCAT
tristate "Roccat special event support"
depends on USB_HID
@@ -492,6 +495,13 @@ config HID_ROCCAT_ARVO
---help---
Support for Roccat Arvo keyboard.
+config HID_ROCCAT_ISKU
+ tristate "Roccat Isku keyboard support"
+ depends on USB_HID
+ depends on HID_ROCCAT
+ ---help---
+ Support for Roccat Isku keyboard.
+
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
@@ -560,6 +570,12 @@ config GREENASIA_FF
(like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
and want to enable force feedback support for it.
+config HID_HYPERV_MOUSE
+ tristate "Microsoft Hyper-V mouse driver"
+ depends on HYPERV
+ ---help---
+ Select this option to enable the Hyper-V mouse driver.
+
config HID_SMARTJOYPLUS
tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
@@ -620,9 +636,19 @@ config HID_WIIMOTE
depends on BT_HIDP
depends on LEDS_CLASS
select POWER_SUPPLY
+ select INPUT_FF_MEMLESS
---help---
Support for the Nintendo Wii Remote bluetooth device.
+config HID_WIIMOTE_EXT
+ bool "Nintendo Wii Remote Extension support"
+ depends on HID_WIIMOTE
+ default HID_WIIMOTE
+ ---help---
+ Support for extension controllers of the Nintendo Wii Remote. Say yes
+ here if you want to use the Nintendo Motion+, Nunchuck or Classic
+ extension controllers with your Wii Remote.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 1e0d2a638b28..8aefdc963cce 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -25,6 +25,14 @@ ifdef CONFIG_LOGIWHEELS_FF
hid-logitech-y += hid-lg4ff.o
endif
+hid-wiimote-y := hid-wiimote-core.o
+ifdef CONFIG_HID_WIIMOTE_EXT
+ hid-wiimote-y += hid-wiimote-ext.o
+endif
+ifdef CONFIG_DEBUG_FS
+ hid-wiimote-y += hid-wiimote-debug.o
+endif
+
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
@@ -38,6 +46,7 @@ obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
+obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -51,7 +60,6 @@ obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
-obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
@@ -59,6 +67,7 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
+obj-$(CONFIG_HID_ROCCAT_ISKU) += hid-roccat-isku.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o
obj-$(CONFIG_HID_ROCCAT_KOVAPLUS) += hid-roccat-kovaplus.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279c..af08ce7207d9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -90,7 +90,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
struct hid_field *field;
if (report->maxfield == HID_MAX_FIELDS) {
- dbg_hid("too many fields in report\n");
+ hid_err(report->device, "too many fields in report\n");
return NULL;
}
@@ -121,7 +121,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
usage = parser->local.usage[0];
if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
- dbg_hid("collection stack overflow\n");
+ hid_err(parser->device, "collection stack overflow\n");
return -1;
}
@@ -129,7 +129,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
collection = kmalloc(sizeof(struct hid_collection) *
parser->device->collection_size * 2, GFP_KERNEL);
if (collection == NULL) {
- dbg_hid("failed to reallocate collection array\n");
+ hid_err(parser->device, "failed to reallocate collection array\n");
return -1;
}
memcpy(collection, parser->device->collection,
@@ -165,7 +165,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
static int close_collection(struct hid_parser *parser)
{
if (!parser->collection_stack_ptr) {
- dbg_hid("collection stack underflow\n");
+ hid_err(parser->device, "collection stack underflow\n");
return -1;
}
parser->collection_stack_ptr--;
@@ -197,7 +197,7 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
static int hid_add_usage(struct hid_parser *parser, unsigned usage)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
- dbg_hid("usage index exceeded\n");
+ hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
@@ -222,12 +222,13 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
- dbg_hid("hid_register_report failed\n");
+ hid_err(parser->device, "hid_register_report failed\n");
return -1;
}
if (parser->global.logical_maximum < parser->global.logical_minimum) {
- dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
+ hid_err(parser->device, "logical range invalid %d %d\n",
+ parser->global.logical_minimum, parser->global.logical_maximum);
return -1;
}
@@ -307,7 +308,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_PUSH:
if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
- dbg_hid("global environment stack overflow\n");
+ hid_err(parser->device, "global environment stack overflow\n");
return -1;
}
@@ -318,7 +319,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_POP:
if (!parser->global_stack_ptr) {
- dbg_hid("global environment stack underflow\n");
+ hid_err(parser->device, "global environment stack underflow\n");
return -1;
}
@@ -362,8 +363,8 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
- if (parser->global.report_size > 32) {
- dbg_hid("invalid report_size %d\n",
+ if (parser->global.report_size > 96) {
+ hid_err(parser->device, "invalid report_size %d\n",
parser->global.report_size);
return -1;
}
@@ -372,7 +373,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
parser->global.report_count = item_udata(item);
if (parser->global.report_count > HID_MAX_USAGES) {
- dbg_hid("invalid report_count %d\n",
+ hid_err(parser->device, "invalid report_count %d\n",
parser->global.report_count);
return -1;
}
@@ -381,13 +382,13 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_ID:
parser->global.report_id = item_udata(item);
if (parser->global.report_id == 0) {
- dbg_hid("report_id 0 is invalid\n");
+ hid_err(parser->device, "report_id 0 is invalid\n");
return -1;
}
return 0;
default:
- dbg_hid("unknown global tag 0x%x\n", item->tag);
+ hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
return -1;
}
}
@@ -414,14 +415,14 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
* items and the first delimiter set.
*/
if (parser->local.delimiter_depth != 0) {
- dbg_hid("nested delimiters\n");
+ hid_err(parser->device, "nested delimiters\n");
return -1;
}
parser->local.delimiter_depth++;
parser->local.delimiter_branch++;
} else {
if (parser->local.delimiter_depth < 1) {
- dbg_hid("bogus close delimiter\n");
+ hid_err(parser->device, "bogus close delimiter\n");
return -1;
}
parser->local.delimiter_depth--;
@@ -506,7 +507,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- dbg_hid("unknown main item tag 0x%x\n", item->tag);
+ hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
@@ -678,12 +679,12 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
while ((start = fetch_item(start, end, &item)) != NULL) {
if (item.format != HID_ITEM_FORMAT_SHORT) {
- dbg_hid("unexpected long global item\n");
+ hid_err(device, "unexpected long global item\n");
goto err;
}
if (dispatch_type[item.type](parser, &item)) {
- dbg_hid("item %u %u %u %u parsing failed\n",
+ hid_err(device, "item %u %u %u %u parsing failed\n",
item.format, (unsigned)item.size,
(unsigned)item.type, (unsigned)item.tag);
goto err;
@@ -691,11 +692,11 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (start == end) {
if (parser->collection_stack_ptr) {
- dbg_hid("unbalanced collection at end of report description\n");
+ hid_err(device, "unbalanced collection at end of report description\n");
goto err;
}
if (parser->local.delimiter_depth) {
- dbg_hid("unbalanced delimiter at end of report description\n");
+ hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
vfree(parser);
@@ -703,7 +704,7 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
}
}
- dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
+ hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
vfree(parser);
return ret;
@@ -873,7 +874,7 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
ret = hdrv->event(hid, field, usage, value);
if (ret != 0) {
if (ret < 0)
- dbg_hid("%s's event failed with %d\n",
+ hid_err(hid, "%s's event failed with %d\n",
hdrv->name, ret);
return;
}
@@ -995,12 +996,13 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
hid_dump_input(field->report->device, field->usage + offset, value);
if (offset >= field->report_count) {
- dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
+ hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
+ offset, field->report_count);
return -1;
}
if (field->logical_minimum < 0) {
if (value != snto32(s32ton(value, size), size)) {
- dbg_hid("value %d is out of range\n", value);
+ hid_err(field->report->device, "value %d is out of range\n", value);
return -1;
}
}
@@ -1157,7 +1159,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-static const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1404,11 +1406,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
@@ -1423,6 +1427,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1498,11 +1503,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
@@ -1544,11 +1553,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
@@ -1768,11 +1787,12 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ee80d733801d..01dd9a7daf7a 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -114,6 +114,14 @@ static const struct hid_usage_entry hid_usage_table[] = {
{0, 0xbd, "FlareRelease"},
{0, 0xbe, "LandingGear"},
{0, 0xbf, "ToeBrake"},
+ { 6, 0, "GenericDeviceControls" },
+ {0, 0x20, "BatteryStrength" },
+ {0, 0x21, "WirelessChannel" },
+ {0, 0x22, "WirelessID" },
+ {0, 0x23, "DiscoverWirelessControl" },
+ {0, 0x24, "SecurityCodeCharacterEntered" },
+ {0, 0x25, "SecurityCodeCharactedErased" },
+ {0, 0x26, "SecurityCodeCleared" },
{ 7, 0, "Keyboard" },
{ 8, 0, "LED" },
{0, 0x01, "NumLock"},
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 9bdde867a02f..2630d483d262 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -140,7 +140,7 @@ err:
}
static const struct hid_device_id ems_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
{ }
};
MODULE_DEVICE_TABLE(hid, ems_devices);
diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/hid/hid-hyperv.c
index ccd39c70c527..0c33ae9cf0f0 100644
--- a/drivers/staging/hv/hv_mouse.c
+++ b/drivers/hid/hid-hyperv.c
@@ -14,11 +14,8 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -118,7 +115,6 @@ struct synthhid_input_report {
#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE)
#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE)
-#define NBITS(x) (((x)/BITS_PER_LONG)+1)
enum pipe_prot_msg_type {
PIPE_MESSAGE_INVALID,
@@ -148,7 +144,8 @@ struct mousevsc_prt_msg {
*/
struct mousevsc_dev {
struct hv_device *device;
- unsigned char init_complete;
+ bool init_complete;
+ bool connected;
struct mousevsc_prt_msg protocol_req;
struct mousevsc_prt_msg protocol_resp;
/* Synchronize the request/response if needed */
@@ -159,12 +156,11 @@ struct mousevsc_dev {
unsigned char *report_desc;
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
- int connected;
struct hid_device *hid_device;
};
-static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
+static struct mousevsc_dev *mousevsc_alloc_device(struct hv_device *device)
{
struct mousevsc_dev *input_dev;
@@ -176,11 +172,12 @@ static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
input_dev->device = device;
hv_set_drvdata(device, input_dev);
init_completion(&input_dev->wait_event);
+ input_dev->init_complete = false;
return input_dev;
}
-static void free_input_device(struct mousevsc_dev *device)
+static void mousevsc_free_device(struct mousevsc_dev *device)
{
kfree(device->hid_desc);
kfree(device->report_desc);
@@ -188,7 +185,6 @@ static void free_input_device(struct mousevsc_dev *device)
kfree(device);
}
-
static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct synthhid_device_info *device_info)
{
@@ -196,14 +192,12 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct hid_descriptor *desc;
struct mousevsc_prt_msg ack;
- /* Assume success for now */
- input_device->dev_info_status = 0;
-
- memcpy(&input_device->hid_dev_info, &device_info->hid_dev_info,
- sizeof(struct hv_input_dev_info));
+ input_device->dev_info_status = -ENOMEM;
+ input_device->hid_dev_info = device_info->hid_dev_info;
desc = &device_info->hid_descriptor;
- WARN_ON(desc->bLength == 0);
+ if (desc->bLength == 0)
+ goto cleanup;
input_device->hid_desc = kzalloc(desc->bLength, GFP_ATOMIC);
@@ -213,13 +207,18 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->hid_desc, desc, desc->bLength);
input_device->report_desc_size = desc->desc[0].wDescriptorLength;
- if (input_device->report_desc_size == 0)
+ if (input_device->report_desc_size == 0) {
+ input_device->dev_info_status = -EINVAL;
goto cleanup;
+ }
+
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
- if (!input_device->report_desc)
+ if (!input_device->report_desc) {
+ input_device->dev_info_status = -ENOMEM;
goto cleanup;
+ }
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
@@ -242,22 +241,14 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
(unsigned long)&ack,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- complete(&input_device->wait_event);
- return;
+ if (!ret)
+ input_device->dev_info_status = 0;
cleanup:
- kfree(input_device->hid_desc);
- input_device->hid_desc = NULL;
-
- kfree(input_device->report_desc);
- input_device->report_desc = NULL;
-
- input_device->dev_info_status = -1;
complete(&input_device->wait_event);
+
+ return;
}
static void mousevsc_on_receive(struct hv_device *device,
@@ -274,10 +265,22 @@ static void mousevsc_on_receive(struct hv_device *device,
if (pipe_msg->type != PIPE_MESSAGE_DATA)
return;
- hid_msg = (struct synthhid_msg *)&pipe_msg->data[0];
+ hid_msg = (struct synthhid_msg *)pipe_msg->data;
switch (hid_msg->header.type) {
case SYNTH_HID_PROTOCOL_RESPONSE:
+ /*
+ * While it will be impossible for us to protect against
+ * malicious/buggy hypervisor/host, add a check here to
+ * ensure we don't corrupt memory.
+ */
+ if ((pipe_msg->size + sizeof(struct pipe_prt_msg)
+ - sizeof(unsigned char))
+ > sizeof(struct mousevsc_prt_msg)) {
+ WARN_ON(1);
+ break;
+ }
+
memcpy(&input_dev->protocol_resp, pipe_msg,
pipe_msg->size + sizeof(struct pipe_prt_msg) -
sizeof(unsigned char));
@@ -292,11 +295,11 @@ static void mousevsc_on_receive(struct hv_device *device,
* hid desc and report desc
*/
mousevsc_on_receive_device_info(input_dev,
- (struct synthhid_device_info *)&pipe_msg->data[0]);
+ (struct synthhid_device_info *)pipe_msg->data);
break;
case SYNTH_HID_INPUT_REPORT:
input_report =
- (struct synthhid_input_report *)&pipe_msg->data[0];
+ (struct synthhid_input_report *)pipe_msg->data;
if (!input_dev->init_complete)
break;
hid_input_report(input_dev->hid_device,
@@ -313,73 +316,60 @@ static void mousevsc_on_receive(struct hv_device *device,
static void mousevsc_on_channel_callback(void *context)
{
- const int packetSize = 0x100;
- int ret = 0;
- struct hv_device *device = (struct hv_device *)context;
-
+ const int packet_size = 0x100;
+ int ret;
+ struct hv_device *device = context;
u32 bytes_recvd;
u64 req_id;
- unsigned char packet[0x100];
struct vmpacket_descriptor *desc;
- unsigned char *buffer = packet;
- int bufferlen = packetSize;
+ unsigned char *buffer;
+ int bufferlen = packet_size;
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer,
bufferlen, &bytes_recvd, &req_id);
- if (ret == 0) {
- if (bytes_recvd > 0) {
- desc = (struct vmpacket_descriptor *)buffer;
-
- switch (desc->type) {
- case VM_PKT_COMP:
- break;
-
- case VM_PKT_DATA_INBAND:
- mousevsc_on_receive(
- device, desc);
- break;
-
- default:
- pr_err("unhandled packet type %d, tid %llx len %d\n",
- desc->type,
- req_id,
- bytes_recvd);
- break;
- }
-
- /* reset */
- if (bufferlen > packetSize) {
- kfree(buffer);
-
- buffer = packet;
- bufferlen = packetSize;
- }
- } else {
- if (bufferlen > packetSize) {
- kfree(buffer);
-
- buffer = packet;
- bufferlen = packetSize;
- }
+ switch (ret) {
+ case 0:
+ if (bytes_recvd <= 0) {
+ kfree(buffer);
+ return;
+ }
+ desc = (struct vmpacket_descriptor *)buffer;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ mousevsc_on_receive(device, desc);
+ break;
+
+ default:
+ pr_err("unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
break;
}
- } else if (ret == -ENOBUFS) {
+
+ break;
+
+ case -ENOBUFS:
+ kfree(buffer);
/* Handle large packet */
bufferlen = bytes_recvd;
- buffer = kzalloc(bytes_recvd, GFP_ATOMIC);
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (buffer == NULL) {
- buffer = packet;
- bufferlen = packetSize;
- break;
- }
+ if (!buffer)
+ return;
+
+ break;
}
} while (1);
- return;
}
static int mousevsc_connect_to_vsp(struct hv_device *device)
@@ -390,19 +380,15 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
-
request = &input_dev->protocol_req;
-
memset(request, 0, sizeof(struct mousevsc_prt_msg));
request->type = PIPE_MESSAGE_DATA;
request->size = sizeof(struct synthhid_protocol_request);
-
request->request.header.type = SYNTH_HID_PROTOCOL_REQUEST;
request->request.header.size = sizeof(unsigned int);
request->request.version_requested.version = SYNTHHID_INPUT_VERSION;
-
ret = vmbus_sendpacket(device->channel, request,
sizeof(struct pipe_prt_msg) -
sizeof(unsigned char) +
@@ -410,11 +396,11 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
+ if (ret)
goto cleanup;
t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
- if (t == 0) {
+ if (!t) {
ret = -ETIMEDOUT;
goto cleanup;
}
@@ -422,14 +408,14 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
response = &input_dev->protocol_resp;
if (!response->response.approved) {
- pr_err("synthhid protocol request failed (version %d)",
+ pr_err("synthhid protocol request failed (version %d)\n",
SYNTHHID_INPUT_VERSION);
ret = -ENODEV;
goto cleanup;
}
t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
- if (t == 0) {
+ if (!t) {
ret = -ETIMEDOUT;
goto cleanup;
}
@@ -438,11 +424,9 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
* We should have gotten the device attr, hid desc and report
* desc at this point
*/
- if (input_dev->dev_info_status)
- ret = -ENOMEM;
+ ret = input_dev->dev_info_status;
cleanup:
-
return ret;
}
@@ -451,61 +435,40 @@ static int mousevsc_hid_open(struct hid_device *hid)
return 0;
}
+static int mousevsc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
static void mousevsc_hid_close(struct hid_device *hid)
{
}
+static void mousevsc_hid_stop(struct hid_device *hid)
+{
+}
+
static struct hid_ll_driver mousevsc_ll_driver = {
.open = mousevsc_hid_open,
.close = mousevsc_hid_close,
+ .start = mousevsc_hid_start,
+ .stop = mousevsc_hid_stop,
};
static struct hid_driver mousevsc_hid_driver;
-static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
-{
- struct hid_device *hid_dev;
- struct mousevsc_dev *input_device = hv_get_drvdata(dev);
-
- hid_dev = hid_allocate_device();
- if (IS_ERR(hid_dev))
- return;
-
- hid_dev->ll_driver = &mousevsc_ll_driver;
- hid_dev->driver = &mousevsc_hid_driver;
-
- if (hid_parse_report(hid_dev, packet, len))
- return;
-
- hid_dev->bus = BUS_VIRTUAL;
- hid_dev->vendor = input_device->hid_dev_info.vendor;
- hid_dev->product = input_device->hid_dev_info.product;
- hid_dev->version = input_device->hid_dev_info.version;
-
- sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
-
- if (!hidinput_connect(hid_dev, 0)) {
- hid_dev->claimed |= HID_CLAIMED_INPUT;
-
- input_device->connected = 1;
-
- }
-
- input_device->hid_device = hid_dev;
-}
-
-static int mousevsc_on_device_add(struct hv_device *device)
+static int mousevsc_probe(struct hv_device *device,
+ const struct hv_vmbus_device_id *dev_id)
{
- int ret = 0;
+ int ret;
struct mousevsc_dev *input_dev;
+ struct hid_device *hid_dev;
- input_dev = alloc_input_device(device);
+ input_dev = mousevsc_alloc_device(device);
if (!input_dev)
return -ENOMEM;
- input_dev->init_complete = false;
-
ret = vmbus_open(device->channel,
INPUTVSC_SEND_RING_BUFFER_SIZE,
INPUTVSC_RECV_RING_BUFFER_SIZE,
@@ -515,54 +478,78 @@ static int mousevsc_on_device_add(struct hv_device *device)
device
);
- if (ret != 0) {
- free_input_device(input_dev);
- return ret;
- }
-
+ if (ret)
+ goto probe_err0;
ret = mousevsc_connect_to_vsp(device);
- if (ret != 0) {
- vmbus_close(device->channel);
- free_input_device(input_dev);
- return ret;
- }
-
+ if (ret)
+ goto probe_err1;
/* workaround SA-167 */
if (input_dev->report_desc[14] == 0x25)
input_dev->report_desc[14] = 0x29;
- reportdesc_callback(device, input_dev->report_desc,
- input_dev->report_desc_size);
+ hid_dev = hid_allocate_device();
+ if (IS_ERR(hid_dev)) {
+ ret = PTR_ERR(hid_dev);
+ goto probe_err1;
+ }
+
+ hid_dev->ll_driver = &mousevsc_ll_driver;
+ hid_dev->driver = &mousevsc_hid_driver;
+ hid_dev->bus = BUS_VIRTUAL;
+ hid_dev->vendor = input_dev->hid_dev_info.vendor;
+ hid_dev->product = input_dev->hid_dev_info.product;
+ hid_dev->version = input_dev->hid_dev_info.version;
+ input_dev->hid_device = hid_dev;
+
+ sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
+
+ ret = hid_add_device(hid_dev);
+ if (ret)
+ goto probe_err1;
+
+ ret = hid_parse_report(hid_dev, input_dev->report_desc,
+ input_dev->report_desc_size);
+
+ if (ret) {
+ hid_err(hid_dev, "parse failed\n");
+ goto probe_err2;
+ }
+ ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
+
+ if (ret) {
+ hid_err(hid_dev, "hw start failed\n");
+ goto probe_err2;
+ }
+
+ input_dev->connected = true;
input_dev->init_complete = true;
return ret;
-}
-static int mousevsc_probe(struct hv_device *dev,
- const struct hv_vmbus_device_id *dev_id)
-{
+probe_err2:
+ hid_destroy_device(hid_dev);
- return mousevsc_on_device_add(dev);
+probe_err1:
+ vmbus_close(device->channel);
+probe_err0:
+ mousevsc_free_device(input_dev);
+
+ return ret;
}
+
static int mousevsc_remove(struct hv_device *dev)
{
struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
vmbus_close(dev->channel);
-
- if (input_dev->connected) {
- hidinput_disconnect(input_dev->hid_device);
- input_dev->connected = 0;
- hid_destroy_device(input_dev->hid_device);
- }
-
- free_input_device(input_dev);
+ hid_destroy_device(input_dev->hid_device);
+ mousevsc_free_device(input_dev);
return 0;
}
@@ -577,7 +564,7 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver mousevsc_drv = {
- .name = "mousevsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b65..b8574cddd953 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -21,6 +21,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
#define USB_DEVICE_ID_3M2256 0x0502
+#define USB_DEVICE_ID_3M3266 0x0506
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -124,6 +125,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
@@ -145,6 +147,9 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+#define USB_VENDOR_ID_ATMEL 0x03eb
+#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
+
#define USB_VENDOR_ID_AVERMEDIA 0x07ca
#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
@@ -230,11 +235,14 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -266,7 +274,7 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
@@ -356,6 +364,9 @@
#define USB_VENDOR_ID_HANVON 0x20b3
#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
+#define USB_VENDOR_ID_HANVON_ALT 0x22ed
+#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
+
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
@@ -571,6 +582,11 @@
#define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
#define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+#define USB_VENDOR_ID_PIXART 0x093a
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
+
#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
@@ -581,11 +597,14 @@
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
#define USB_VENDOR_ID_QUANTA 0x0408
-#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
+#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
@@ -679,6 +698,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD
#define USB_VENDOR_ID_WALTOP 0x172f
#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032
@@ -707,6 +727,17 @@
#define USB_VENDOR_ID_XAT 0x2505
#define USB_DEVICE_ID_XAT_CSR 0x0220
+#define USB_VENDOR_ID_XIROKU 0x1477
+#define USB_DEVICE_ID_XIROKU_SPX 0x1006
+#define USB_DEVICE_ID_XIROKU_MPX 0x1007
+#define USB_DEVICE_ID_XIROKU_CSR 0x100e
+#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
+#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
+#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
+#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
+#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
+#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
+
#define USB_VENDOR_ID_YEALINK 0x6993
#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f333139d1a48..9333d692a786 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -32,6 +32,8 @@
#include <linux/hid.h>
#include <linux/hid-debug.h>
+#include "hid-ids.h"
+
#define unk KEY_UNKNOWN
static const unsigned char hid_keyboard[256] = {
@@ -271,6 +273,161 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
return logical_extents / physical_extents;
}
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+static enum power_supply_property hidinput_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_STATUS
+};
+
+#define HID_BATTERY_QUIRK_PERCENT (1 << 0) /* always reports percent */
+#define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */
+
+static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ {}
+};
+
+static unsigned find_battery_quirk(struct hid_device *hdev)
+{
+ unsigned quirks = 0;
+ const struct hid_device_id *match;
+
+ match = hid_match_id(hdev, hid_battery_quirks);
+ if (match != NULL)
+ quirks = match->driver_data;
+
+ return quirks;
+}
+
+static int hidinput_get_battery_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct hid_device *dev = container_of(psy, struct hid_device, battery);
+ int ret = 0;
+ __u8 buf[2] = {};
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 1;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
+ buf, sizeof(buf),
+ dev->battery_report_type);
+
+ if (ret != 2) {
+ if (ret >= 0)
+ ret = -EINVAL;
+ break;
+ }
+
+ if (dev->battery_min < dev->battery_max &&
+ buf[1] >= dev->battery_min &&
+ buf[1] <= dev->battery_max)
+ val->intval = (100 * (buf[1] - dev->battery_min)) /
+ (dev->battery_max - dev->battery_min);
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = dev->name;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+{
+ struct power_supply *battery = &dev->battery;
+ int ret;
+ unsigned quirks;
+ s32 min, max;
+
+ if (field->usage->hid != HID_DC_BATTERYSTRENGTH)
+ return false; /* no match */
+
+ if (battery->name != NULL)
+ goto out; /* already initialized? */
+
+ battery->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq);
+ if (battery->name == NULL)
+ goto out;
+
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = hidinput_battery_props;
+ battery->num_properties = ARRAY_SIZE(hidinput_battery_props);
+ battery->use_for_apm = 0;
+ battery->get_property = hidinput_get_battery_property;
+
+ quirks = find_battery_quirk(dev);
+
+ hid_dbg(dev, "device %x:%x:%x %d quirks %d\n",
+ dev->bus, dev->vendor, dev->product, dev->version, quirks);
+
+ min = field->logical_minimum;
+ max = field->logical_maximum;
+
+ if (quirks & HID_BATTERY_QUIRK_PERCENT) {
+ min = 0;
+ max = 100;
+ }
+
+ if (quirks & HID_BATTERY_QUIRK_FEATURE)
+ report_type = HID_FEATURE_REPORT;
+
+ dev->battery_min = min;
+ dev->battery_max = max;
+ dev->battery_report_type = report_type;
+ dev->battery_report_id = field->report->id;
+
+ ret = power_supply_register(&dev->dev, battery);
+ if (ret != 0) {
+ hid_warn(dev, "can't register power supply: %d\n", ret);
+ kfree(battery->name);
+ battery->name = NULL;
+ }
+
+out:
+ return true;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+ if (!dev->battery.name)
+ return;
+
+ power_supply_unregister(&dev->battery);
+ kfree(dev->battery.name);
+ dev->battery.name = NULL;
+}
+#else /* !CONFIG_HID_BATTERY_STRENGTH */
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ struct hid_field *field)
+{
+ return false;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+}
+#endif /* CONFIG_HID_BATTERY_STRENGTH */
+
static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage)
{
@@ -629,6 +786,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case HID_UP_GENDEVCTRLS:
+ if (hidinput_setup_battery(device, HID_INPUT_REPORT, field))
+ goto ignore;
+ else
+ goto unknown;
+ break;
+
case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */
set_bit(EV_REP, input->evbit);
switch (usage->hid & HID_USAGE) {
@@ -822,6 +986,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}
+ /* Ignore out-of-range values as per HID specification, section 5.10 */
+ if (value < field->logical_minimum || value > field->logical_maximum) {
+ dbg_hid("Ignoring out-of-range value %x\n", value);
+ return;
+ }
+
/* report the usage code as scancode if the key status has changed */
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
@@ -861,6 +1031,48 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
}
EXPORT_SYMBOL_GPL(hidinput_find_field);
+struct hid_field *hidinput_get_led_field(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i, j;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+ list) {
+ for (i = 0; i < report->maxfield; i++) {
+ field = report->field[i];
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].type == EV_LED)
+ return field;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hidinput_get_led_field);
+
+unsigned int hidinput_count_leds(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i, j;
+ unsigned int count = 0;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+ list) {
+ for (i = 0; i < report->maxfield; i++) {
+ field = report->field[i];
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].type == EV_LED &&
+ field->value[j])
+ count += 1;
+ }
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(hidinput_count_leds);
+
static int hidinput_open(struct input_dev *dev)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -882,15 +1094,17 @@ static void report_features(struct hid_device *hid)
struct hid_report *rep;
int i, j;
- if (!drv->feature_mapping)
- return;
-
rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list)
for (i = 0; i < rep->maxfield; i++)
- for (j = 0; j < rep->field[i]->maxusage; j++)
- drv->feature_mapping(hid, rep->field[i],
- rep->field[i]->usage + j);
+ for (j = 0; j < rep->field[i]->maxusage; j++) {
+ /* Verify if Battery Strength feature is available */
+ hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
+
+ if (drv->feature_mapping)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
+ }
}
/*
@@ -1010,6 +1224,8 @@ void hidinput_disconnect(struct hid_device *hid)
{
struct hid_input *hidinput, *next;
+ hidinput_cleanup_battery(hid);
+
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
input_unregister_device(hidinput->input);
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 103f30d93f76..6ecc9e220440 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -430,7 +430,7 @@ int lg4ff_init(struct hid_device *hid)
}
/* Add the device to device_list */
- entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
if (!entry) {
hid_err(hid, "Cannot add device, insufficient memory.\n");
return -ENOMEM;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f1c909f1b239..24fc4423b937 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -50,7 +50,6 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_ALWAYS_VALID (1 << 4)
#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 7)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
struct mt_slot {
@@ -60,9 +59,19 @@ struct mt_slot {
bool seen_in_this_frame;/* has this slot been updated */
};
+struct mt_class {
+ __s32 name; /* MT_CLS */
+ __s32 quirks;
+ __s32 sn_move; /* Signal/noise ratio for move events */
+ __s32 sn_width; /* Signal/noise ratio for width events */
+ __s32 sn_height; /* Signal/noise ratio for height events */
+ __s32 sn_pressure; /* Signal/noise ratio for pressure events */
+ __u8 maxcontacts;
+};
+
struct mt_device {
struct mt_slot curdata; /* placeholder of incoming data */
- struct mt_class *mtclass; /* our mt device class */
+ struct mt_class mtclass; /* our mt device class */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
int last_mt_collection; /* last known mt-related collection */
@@ -74,30 +83,23 @@ struct mt_device {
struct mt_slot *slots;
};
-struct mt_class {
- __s32 name; /* MT_CLS */
- __s32 quirks;
- __s32 sn_move; /* Signal/noise ratio for move events */
- __s32 sn_width; /* Signal/noise ratio for width events */
- __s32 sn_height; /* Signal/noise ratio for height events */
- __s32 sn_pressure; /* Signal/noise ratio for pressure events */
- __u8 maxcontacts;
-};
-
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
#define MT_CLS_SERIAL 0x0002
#define MT_CLS_CONFIDENCE 0x0003
-#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
+#define MT_CLS_CONFIDENCE_CONTACT_ID 0x0004
+#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0006
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
+#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
+#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
/* vendor specific classes */
#define MT_CLS_3M 0x0101
#define MT_CLS_CYPRESS 0x0102
#define MT_CLS_EGALAX 0x0103
+#define MT_CLS_EGALAX_SERIAL 0x0104
#define MT_DEFAULT_MAXCONTACT 10
@@ -133,13 +135,16 @@ static int find_slot_from_contactid(struct mt_device *td)
return -1;
}
-struct mt_class mt_classes[] = {
+static struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
{ .name = MT_CLS_SERIAL,
.quirks = MT_QUIRK_ALWAYS_VALID},
{ .name = MT_CLS_CONFIDENCE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
+ { .name = MT_CLS_CONFIDENCE_CONTACT_ID,
+ .quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
+ MT_QUIRK_SLOT_IS_CONTACTID },
{ .name = MT_CLS_CONFIDENCE_MINUS_ONE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE },
@@ -155,6 +160,9 @@ struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2 },
+ { .name = MT_CLS_INRANGE_CONTACTNUMBER,
+ .quirks = MT_QUIRK_VALID_IS_INRANGE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER },
/*
* vendor specific classes
@@ -171,9 +179,13 @@ struct mt_class mt_classes[] = {
.maxcontacts = 10 },
{ .name = MT_CLS_EGALAX,
.quirks = MT_QUIRK_SLOT_IS_CONTACTID |
- MT_QUIRK_VALID_IS_INRANGE |
- MT_QUIRK_EGALAX_XYZ_FIXUP,
- .maxcontacts = 2,
+ MT_QUIRK_VALID_IS_INRANGE,
+ .sn_move = 4096,
+ .sn_pressure = 32,
+ },
+ { .name = MT_CLS_EGALAX_SERIAL,
+ .quirks = MT_QUIRK_SLOT_IS_CONTACTID |
+ MT_QUIRK_ALWAYS_VALID,
.sn_move = 4096,
.sn_pressure = 32,
},
@@ -181,6 +193,44 @@ struct mt_class mt_classes[] = {
{ }
};
+static ssize_t mt_show_quirks(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%u\n", td->mtclass.quirks);
+}
+
+static ssize_t mt_set_quirks(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ td->mtclass.quirks = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(quirks, S_IWUSR | S_IRUGO, mt_show_quirks, mt_set_quirks);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_quirks.attr,
+ NULL
+};
+
+static struct attribute_group mt_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -192,9 +242,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
break;
case HID_DG_CONTACTMAX:
td->maxcontacts = field->value[0];
- if (td->mtclass->maxcontacts)
+ if (td->mtclass.maxcontacts)
/* check if the maxcontacts is given by the class */
- td->maxcontacts = td->mtclass->maxcontacts;
+ td->maxcontacts = td->mtclass.maxcontacts;
break;
}
@@ -214,8 +264,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
struct mt_device *td = hid_get_drvdata(hdev);
- struct mt_class *cls = td->mtclass;
- __s32 quirks = cls->quirks;
+ struct mt_class *cls = &td->mtclass;
/* Only map fields from TouchScreen or TouchPad collections.
* We need to ignore fields that belong to other collections
@@ -227,13 +276,17 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
else
return 0;
+ /* eGalax devices provide a Digitizer.Stylus input which overrides
+ * the correct Digitizers.Finger X/Y ranges.
+ * Let's just ignore this input. */
+ if (field->physical == HID_DG_STYLUS)
+ return -1;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
set_abs(hi->input, ABS_MT_POSITION_X, field,
@@ -246,8 +299,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_GD_Y:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
set_abs(hi->input, ABS_MT_POSITION_Y, field,
@@ -315,8 +366,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_DG_TIPPRESSURE:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_minimum = 0;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_PRESSURE);
set_abs(hi->input, ABS_MT_PRESSURE, field,
@@ -363,7 +412,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int mt_compute_slot(struct mt_device *td)
{
- __s32 quirks = td->mtclass->quirks;
+ __s32 quirks = td->mtclass.quirks;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
return td->curdata.contactid;
@@ -407,7 +456,7 @@ static void mt_emit_event(struct mt_device *td, struct input_dev *input)
for (i = 0; i < td->maxcontacts; ++i) {
struct mt_slot *s = &(td->slots[i]);
- if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+ if ((td->mtclass.quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
!s->seen_in_this_frame) {
s->touch_state = false;
}
@@ -444,7 +493,7 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct mt_device *td = hid_get_drvdata(hid);
- __s32 quirks = td->mtclass->quirks;
+ __s32 quirks = td->mtclass.quirks;
if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
switch (usage->hid) {
@@ -552,7 +601,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
- td->mtclass = mtclass;
+ td->mtclass = *mtclass;
td->inputmode = -1;
td->last_mt_collection = -1;
hid_set_drvdata(hdev, td);
@@ -574,6 +623,8 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto fail;
}
+ ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+
mt_set_input_mode(hdev);
return 0;
@@ -594,6 +645,7 @@ static int mt_reset_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
kfree(td->slots);
kfree(td);
@@ -609,12 +661,20 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_3M,
HID_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M2256) },
+ { .driver_data = MT_CLS_3M,
+ HID_USB_DEVICE(USB_VENDOR_ID_3M,
+ USB_DEVICE_ID_3M3266) },
/* ActionStar panels */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
USB_DEVICE_ID_ACTIONSTAR_1011) },
+ /* Atmel panels */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
+ USB_DEVICE_ID_ATMEL_MULTITOUCH) },
+
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
@@ -645,23 +705,32 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
/* eGalax devices (resistive) */
- { .driver_data = MT_CLS_EGALAX,
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
/* eGalax devices (capacitive) */
- { .driver_data = MT_CLS_EGALAX,
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+ { .driver_data = MT_CLS_EGALAX_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
/* Elo TouchSystems IntelliTouch Plus panel */
{ .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
@@ -678,6 +747,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
+ /* Hanvon panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
+ USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+
/* Ideacom panel */
{ .driver_data = MT_CLS_SERIAL,
HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
@@ -722,6 +796,17 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
USB_DEVICE_ID_PENMOUNT_PCI) },
+ /* PixArt optical touch screen */
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+
/* PixCir-based panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
@@ -730,6 +815,17 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+ /* Quanta-based panels */
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) },
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
@@ -758,6 +854,35 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
+ /* Xiroku */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX2) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX2) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR2) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 01e7d2cd7c26..12f9777c385d 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -633,7 +633,7 @@ struct picolcd_fb_cleanup_item {
struct picolcd_fb_cleanup_item *next;
};
static struct picolcd_fb_cleanup_item *fb_pending;
-DEFINE_SPINLOCK(fb_pending_lock);
+static DEFINE_SPINLOCK(fb_pending_lock);
static void picolcd_fb_do_cleanup(struct work_struct *data)
{
@@ -658,7 +658,7 @@ static void picolcd_fb_do_cleanup(struct work_struct *data)
} while (item);
}
-DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
+static DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
static int picolcd_fb_open(struct fb_info *info, int u)
{
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 070f93a5c11b..47ed74c46b6b 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -9,10 +9,10 @@
* - contains two reports, one for each port (HID_QUIRK_MULTI_INPUT)
*
* 0e8f:0003 "GreenAsia Inc. USB Joystick "
- * - tested with Knig Gaming gamepad
+ * - tested with König Gaming gamepad
*
* 0e8f:0003 "GASIA USB Gamepad"
- * - another version of the Knig gamepad
+ * - another version of the König gamepad
*
* Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
*/
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index f779009104eb..b71b77ab0dc7 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -90,7 +90,7 @@ static const char longname[] = "Prodikeys PC-MIDI Keyboard";
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
module_param_array(id, charp, NULL, 0444);
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
deleted file mode 100644
index 87a54df4d4ac..000000000000
--- a/drivers/hid/hid-quanta.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * HID driver for Quanta Optical Touch dual-touch panels
- *
- * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
- *
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
-MODULE_DESCRIPTION("Quanta dual-touch panel");
-MODULE_LICENSE("GPL");
-
-#include "hid-ids.h"
-
-struct quanta_data {
- __u16 x, y;
- __u8 id;
- bool valid; /* valid finger data, or just placeholder? */
- bool first; /* is this the first finger in this frame? */
- bool activity_now; /* at least one active finger in this frame? */
- bool activity; /* at least one active finger previously? */
-};
-
-static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- switch (usage->hid & HID_USAGE_PAGE) {
-
- case HID_UP_GENDESK:
- switch (usage->hid) {
- case HID_GD_X:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_X);
- /* touchscreen emulation */
- input_set_abs_params(hi->input, ABS_X,
- field->logical_minimum,
- field->logical_maximum, 0, 0);
- return 1;
- case HID_GD_Y:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_Y);
- /* touchscreen emulation */
- input_set_abs_params(hi->input, ABS_Y,
- field->logical_minimum,
- field->logical_maximum, 0, 0);
- return 1;
- }
- return 0;
-
- case HID_UP_DIGITIZER:
- switch (usage->hid) {
- case HID_DG_CONFIDENCE:
- case HID_DG_TIPSWITCH:
- case HID_DG_INPUTMODE:
- case HID_DG_DEVICEINDEX:
- case HID_DG_CONTACTCOUNT:
- case HID_DG_CONTACTMAX:
- case HID_DG_TIPPRESSURE:
- case HID_DG_WIDTH:
- case HID_DG_HEIGHT:
- return -1;
- case HID_DG_INRANGE:
- /* touchscreen emulation */
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- return 1;
- case HID_DG_CONTACTID:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TRACKING_ID);
- return 1;
- }
- return 0;
-
- case 0xff000000:
- /* ignore vendor-specific features */
- return -1;
- }
-
- return 0;
-}
-
-static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- clear_bit(usage->code, *bit);
-
- return 0;
-}
-
-/*
- * this function is called when a whole finger has been parsed,
- * so that it can decide what to send to the input layer.
- */
-static void quanta_filter_event(struct quanta_data *td, struct input_dev *input)
-{
-
- td->first = !td->first; /* touchscreen emulation */
-
- if (!td->valid) {
- /*
- * touchscreen emulation: if no finger in this frame is valid
- * and there previously was finger activity, this is a release
- */
- if (!td->first && !td->activity_now && td->activity) {
- input_event(input, EV_KEY, BTN_TOUCH, 0);
- td->activity = false;
- }
- return;
- }
-
- input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
- input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
-
- input_mt_sync(input);
- td->valid = false;
-
- /* touchscreen emulation: if first active finger in this frame... */
- if (!td->activity_now) {
- /* if there was no previous activity, emit touch event */
- if (!td->activity) {
- input_event(input, EV_KEY, BTN_TOUCH, 1);
- td->activity = true;
- }
- td->activity_now = true;
- /* and in any case this is our preferred finger */
- input_event(input, EV_ABS, ABS_X, td->x);
- input_event(input, EV_ABS, ABS_Y, td->y);
- }
-}
-
-
-static int quanta_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
-{
- struct quanta_data *td = hid_get_drvdata(hid);
-
- if (hid->claimed & HID_CLAIMED_INPUT) {
- struct input_dev *input = field->hidinput->input;
-
- switch (usage->hid) {
- case HID_DG_INRANGE:
- td->valid = !!value;
- break;
- case HID_GD_X:
- td->x = value;
- break;
- case HID_GD_Y:
- td->y = value;
- quanta_filter_event(td, input);
- break;
- case HID_DG_CONTACTID:
- td->id = value;
- break;
- case HID_DG_CONTACTCOUNT:
- /* touch emulation: this is the last field in a frame */
- td->first = false;
- td->activity_now = false;
- break;
- case HID_DG_CONFIDENCE:
- case HID_DG_TIPSWITCH:
- /* avoid interference from generic hidinput handling */
- break;
-
- default:
- /* fallback to the generic hidinput handling */
- return 0;
- }
- }
-
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
-
- return 1;
-}
-
-static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
-{
- int ret;
- struct quanta_data *td;
-
- td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
- if (!td) {
- hid_err(hdev, "cannot allocate Quanta Touch data\n");
- return -ENOMEM;
- }
- td->valid = false;
- td->activity = false;
- td->activity_now = false;
- td->first = false;
- hid_set_drvdata(hdev, td);
-
- ret = hid_parse(hdev);
- if (!ret)
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
-
- if (ret)
- kfree(td);
-
- return ret;
-}
-
-static void quanta_remove(struct hid_device *hdev)
-{
- hid_hw_stop(hdev);
- kfree(hid_get_drvdata(hdev));
- hid_set_drvdata(hdev, NULL);
-}
-
-static const struct hid_device_id quanta_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
- { }
-};
-MODULE_DEVICE_TABLE(hid, quanta_devices);
-
-static const struct hid_usage_id quanta_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
-};
-
-static struct hid_driver quanta_driver = {
- .name = "quanta-touch",
- .id_table = quanta_devices,
- .probe = quanta_probe,
- .remove = quanta_remove,
- .input_mapping = quanta_input_mapping,
- .input_mapped = quanta_input_mapped,
- .usage_table = quanta_grabbed_usages,
- .event = quanta_event,
-};
-
-static int __init quanta_init(void)
-{
- return hid_register_driver(&quanta_driver);
-}
-
-static void __exit quanta_exit(void)
-{
- hid_unregister_driver(&quanta_driver);
-}
-
-module_init(quanta_init);
-module_exit(quanta_exit);
-
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index b07e7f96a358..a6d93992c75a 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -49,12 +49,10 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
char *buf;
int len;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, data, size);
-
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
new file mode 100644
index 000000000000..0e4a0ab47142
--- /dev/null
+++ b/drivers/hid/hid-roccat-isku.c
@@ -0,0 +1,487 @@
+/*
+ * Roccat Isku driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Isku is a gamer keyboard with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-isku.h"
+
+static struct class *isku_class;
+
+static void isku_profile_activated(struct isku_device *isku, uint new_profile)
+{
+ isku->actual_profile = new_profile;
+}
+
+static int isku_receive(struct usb_device *usb_dev, uint command,
+ void *buf, uint size)
+{
+ return roccat_common_receive(usb_dev, command, buf, size);
+}
+
+static int isku_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct isku_control control;
+
+ do {
+ msleep(50);
+ retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
+ &control, sizeof(struct isku_control));
+
+ if (retval)
+ return retval;
+
+ switch (control.value) {
+ case ISKU_CONTROL_VALUE_STATUS_OK:
+ return 0;
+ case ISKU_CONTROL_VALUE_STATUS_WAIT:
+ continue;
+ case ISKU_CONTROL_VALUE_STATUS_INVALID:
+ /* seems to be critical - replug necessary */
+ case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
+ return -EINVAL;
+ default:
+ hid_err(usb_dev, "isku_receive_control_status: "
+ "unknown response value 0x%x\n",
+ control.value);
+ return -EINVAL;
+ }
+
+ } while (1);
+}
+
+static int isku_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ return isku_receive_control_status(usb_dev);
+}
+
+static int isku_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct isku_actual_profile buf;
+ int retval;
+
+ retval = isku_receive(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct isku_actual_profile));
+ return retval ? retval : buf.actual_profile;
+}
+
+static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
+{
+ struct isku_actual_profile buf;
+
+ buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
+ buf.size = sizeof(struct isku_actual_profile);
+ buf.actual_profile = new_profile;
+ return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+ sizeof(struct isku_actual_profile));
+}
+
+static ssize_t isku_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct isku_device *isku =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile);
+}
+
+static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct isku_device *isku;
+ struct usb_device *usb_dev;
+ unsigned long profile;
+ int retval;
+ struct isku_roccat_report roccat_report;
+
+ dev = dev->parent->parent;
+ isku = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ if (profile > 4)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+
+ retval = isku_set_actual_profile(usb_dev, profile);
+ if (retval) {
+ mutex_unlock(&isku->isku_lock);
+ return retval;
+ }
+
+ isku_profile_activated(isku, profile);
+
+ roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE;
+ roccat_report.data1 = profile + 1;
+ roccat_report.data2 = 0;
+ roccat_report.profile = profile + 1;
+ roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report);
+
+ mutex_unlock(&isku->isku_lock);
+
+ return size;
+}
+
+static struct device_attribute isku_attributes[] = {
+ __ATTR(actual_profile, 0660,
+ isku_sysfs_show_actual_profile,
+ isku_sysfs_set_actual_profile),
+ __ATTR_NULL
+};
+
+static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+ retval = isku_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&isku->isku_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+ retval = isku_send(usb_dev, command, (void *)buf, real_size);
+ mutex_unlock(&isku->isku_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define ISKU_SYSFS_W(thingy, THINGY) \
+static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return isku_sysfs_write(fp, kobj, buf, off, count, \
+ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_R(thingy, THINGY) \
+static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return isku_sysfs_read(fp, kobj, buf, off, count, \
+ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_RW(thingy, THINGY) \
+ISKU_SYSFS_R(thingy, THINGY) \
+ISKU_SYSFS_W(thingy, THINGY)
+
+#define ISKU_BIN_ATTR_RW(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .read = isku_sysfs_read_ ## thingy, \
+ .write = isku_sysfs_write_ ## thingy \
+}
+
+#define ISKU_BIN_ATTR_R(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .read = isku_sysfs_read_ ## thingy, \
+}
+
+#define ISKU_BIN_ATTR_W(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .write = isku_sysfs_write_ ## thingy \
+}
+
+ISKU_SYSFS_RW(macro, MACRO)
+ISKU_SYSFS_RW(keys_function, KEYS_FUNCTION)
+ISKU_SYSFS_RW(keys_easyzone, KEYS_EASYZONE)
+ISKU_SYSFS_RW(keys_media, KEYS_MEDIA)
+ISKU_SYSFS_RW(keys_thumbster, KEYS_THUMBSTER)
+ISKU_SYSFS_RW(keys_macro, KEYS_MACRO)
+ISKU_SYSFS_RW(keys_capslock, KEYS_CAPSLOCK)
+ISKU_SYSFS_RW(light, LIGHT)
+ISKU_SYSFS_RW(key_mask, KEY_MASK)
+ISKU_SYSFS_RW(last_set, LAST_SET)
+ISKU_SYSFS_W(talk, TALK)
+ISKU_SYSFS_R(info, INFO)
+ISKU_SYSFS_W(control, CONTROL)
+
+static struct bin_attribute isku_bin_attributes[] = {
+ ISKU_BIN_ATTR_RW(macro),
+ ISKU_BIN_ATTR_RW(keys_function),
+ ISKU_BIN_ATTR_RW(keys_easyzone),
+ ISKU_BIN_ATTR_RW(keys_media),
+ ISKU_BIN_ATTR_RW(keys_thumbster),
+ ISKU_BIN_ATTR_RW(keys_macro),
+ ISKU_BIN_ATTR_RW(keys_capslock),
+ ISKU_BIN_ATTR_RW(light),
+ ISKU_BIN_ATTR_RW(key_mask),
+ ISKU_BIN_ATTR_RW(last_set),
+ ISKU_BIN_ATTR_W(talk),
+ ISKU_BIN_ATTR_R(info),
+ ISKU_BIN_ATTR_W(control),
+ __ATTR_NULL
+};
+
+static int isku_init_isku_device_struct(struct usb_device *usb_dev,
+ struct isku_device *isku)
+{
+ int retval;
+
+ mutex_init(&isku->isku_lock);
+
+ retval = isku_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ isku_profile_activated(isku, retval);
+
+ return 0;
+}
+
+static int isku_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct isku_device *isku;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ isku = kzalloc(sizeof(*isku), GFP_KERNEL);
+ if (!isku) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, isku);
+
+ retval = isku_init_isku_device_struct(usb_dev, isku);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct isku_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(isku_class, hdev,
+ sizeof(struct isku_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ isku->chrdev_minor = retval;
+ isku->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(isku);
+ return retval;
+}
+
+static void isku_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct isku_device *isku;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL)
+ return;
+
+ isku = hid_get_drvdata(hdev);
+ if (isku->roccat_claimed)
+ roccat_disconnect(isku->chrdev_minor);
+ kfree(isku);
+}
+
+static int isku_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = isku_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install keyboard\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void isku_remove(struct hid_device *hdev)
+{
+ isku_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void isku_keep_values_up_to_date(struct isku_device *isku,
+ u8 const *data)
+{
+ struct isku_report_button const *button_report;
+
+ switch (data[0]) {
+ case ISKU_REPORT_NUMBER_BUTTON:
+ button_report = (struct isku_report_button const *)data;
+ switch (button_report->event) {
+ case ISKU_REPORT_BUTTON_EVENT_PROFILE:
+ isku_profile_activated(isku, button_report->data1 - 1);
+ break;
+ }
+ break;
+ }
+}
+
+static void isku_report_to_chrdev(struct isku_device const *isku,
+ u8 const *data)
+{
+ struct isku_roccat_report roccat_report;
+ struct isku_report_button const *button_report;
+
+ if (data[0] != ISKU_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct isku_report_button const *)data;
+
+ roccat_report.event = button_report->event;
+ roccat_report.data1 = button_report->data1;
+ roccat_report.data2 = button_report->data2;
+ roccat_report.profile = isku->actual_profile + 1;
+ roccat_report_event(isku->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int isku_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct isku_device *isku = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL)
+ return 0;
+
+ if (isku == NULL)
+ return 0;
+
+ isku_keep_values_up_to_date(isku, data);
+
+ if (isku->roccat_claimed)
+ isku_report_to_chrdev(isku, data);
+
+ return 0;
+}
+
+static const struct hid_device_id isku_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, isku_devices);
+
+static struct hid_driver isku_driver = {
+ .name = "isku",
+ .id_table = isku_devices,
+ .probe = isku_probe,
+ .remove = isku_remove,
+ .raw_event = isku_raw_event
+};
+
+static int __init isku_init(void)
+{
+ int retval;
+ isku_class = class_create(THIS_MODULE, "isku");
+ if (IS_ERR(isku_class))
+ return PTR_ERR(isku_class);
+ isku_class->dev_attrs = isku_attributes;
+ isku_class->dev_bin_attrs = isku_bin_attributes;
+
+ retval = hid_register_driver(&isku_driver);
+ if (retval)
+ class_destroy(isku_class);
+ return retval;
+}
+
+static void __exit isku_exit(void)
+{
+ hid_unregister_driver(&isku_driver);
+ class_destroy(isku_class);
+}
+
+module_init(isku_init);
+module_exit(isku_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Isku driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
new file mode 100644
index 000000000000..075f6efaec58
--- /dev/null
+++ b/drivers/hid/hid-roccat-isku.h
@@ -0,0 +1,147 @@
+#ifndef __HID_ROCCAT_ISKU_H
+#define __HID_ROCCAT_ISKU_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ ISKU_PROFILE_NUM = 5,
+ ISKU_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct isku_control {
+ uint8_t command; /* ISKU_COMMAND_CONTROL */
+ uint8_t value;
+ uint8_t request;
+} __packed;
+
+enum isku_control_values {
+ ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
+ ISKU_CONTROL_VALUE_STATUS_OK = 1,
+ ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
+ ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
+};
+
+struct isku_actual_profile {
+ uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
+ uint8_t size; /* always 3 */
+ uint8_t actual_profile;
+} __packed;
+
+struct isku_key_mask {
+ uint8_t command; /* ISKU_COMMAND_KEY_MASK */
+ uint8_t size; /* 6 */
+ uint8_t profile_number; /* 0-4 */
+ uint8_t mask;
+ uint16_t checksum;
+} __packed;
+
+struct isku_keys_function {
+ uint8_t data[0x29];
+} __packed;
+
+struct isku_keys_easyzone {
+ uint8_t data[0x41];
+} __packed;
+
+struct isku_keys_media {
+ uint8_t data[0x1d];
+} __packed;
+
+struct isku_keys_thumbster {
+ uint8_t data[0x17];
+} __packed;
+
+struct isku_keys_macro {
+ uint8_t data[0x23];
+} __packed;
+
+struct isku_keys_capslock {
+ uint8_t data[0x6];
+} __packed;
+
+struct isku_macro {
+ uint8_t data[0x823];
+} __packed;
+
+struct isku_light {
+ uint8_t data[0xa];
+} __packed;
+
+struct isku_info {
+ uint8_t data[2];
+ uint8_t firmware_version;
+ uint8_t unknown[3];
+} __packed;
+
+struct isku_talk {
+ uint8_t data[0x10];
+} __packed;
+
+struct isku_last_set {
+ uint8_t data[0x14];
+} __packed;
+
+enum isku_commands {
+ ISKU_COMMAND_CONTROL = 0x4,
+ ISKU_COMMAND_ACTUAL_PROFILE = 0x5,
+ ISKU_COMMAND_KEY_MASK = 0x7,
+ ISKU_COMMAND_KEYS_FUNCTION = 0x8,
+ ISKU_COMMAND_KEYS_EASYZONE = 0x9,
+ ISKU_COMMAND_KEYS_MEDIA = 0xa,
+ ISKU_COMMAND_KEYS_THUMBSTER = 0xb,
+ ISKU_COMMAND_KEYS_MACRO = 0xd,
+ ISKU_COMMAND_MACRO = 0xe,
+ ISKU_COMMAND_INFO = 0xf,
+ ISKU_COMMAND_LIGHT = 0x10,
+ ISKU_COMMAND_KEYS_CAPSLOCK = 0x13,
+ ISKU_COMMAND_LAST_SET = 0x14,
+ ISKU_COMMAND_15 = 0x15,
+ ISKU_COMMAND_TALK = 0x16,
+ ISKU_COMMAND_FIRMWARE_WRITE = 0x1b,
+ ISKU_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
+};
+
+struct isku_report_button {
+ uint8_t number; /* ISKU_REPORT_NUMBER_BUTTON */
+ uint8_t zero;
+ uint8_t event;
+ uint8_t data1;
+ uint8_t data2;
+};
+
+enum isku_report_numbers {
+ ISKU_REPORT_NUMBER_BUTTON = 3,
+};
+
+enum isku_report_button_events {
+ ISKU_REPORT_BUTTON_EVENT_PROFILE = 0x2,
+};
+
+struct isku_roccat_report {
+ uint8_t event;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t profile;
+} __packed;
+
+struct isku_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex isku_lock;
+
+ int actual_profile;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index e2072afb34bb..40090d602158 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -78,12 +78,10 @@ static int kone_send(struct usb_device *usb_dev, uint usb_command,
char *buf;
int len;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, data, size);
-
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index c40afc57fc8f..f23456b1fd4b 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -3,7 +3,7 @@
*
* Based on hid-gyration.c
*
- * Copyright (c) 2009 Bruno Prmont <bonbons@linux-vserver.org>
+ * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org>
*/
/*
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 17bb88f782b6..b47e58b52d9f 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -9,6 +9,7 @@
* Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
* Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
+ * Copyright (c) 2011 Przemysław Firszt <przemo@firszt.eu>
*/
/*
@@ -33,6 +34,7 @@
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ __u8 features;
unsigned char high_speed;
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
int battery_capacity;
@@ -47,12 +49,14 @@ static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
static enum power_supply_property wacom_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_CAPACITY
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
};
static enum power_supply_property wacom_ac_props[] = {
POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_ONLINE
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_SCOPE,
};
static int wacom_battery_get_property(struct power_supply *psy,
@@ -68,6 +72,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
case POWER_SUPPLY_PROP_CAPACITY:
/* show 100% battery capacity when charging */
if (power_state == 0)
@@ -99,6 +106,9 @@ static int wacom_ac_get_property(struct power_supply *psy,
else
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
default:
ret = -EINVAL;
break;
@@ -107,6 +117,19 @@ static int wacom_ac_get_property(struct power_supply *psy,
}
#endif
+static void wacom_set_features(struct hid_device *hdev)
+{
+ int ret;
+ __u8 rep_data[2];
+
+ /*set high speed, tablet mode*/
+ rep_data[0] = 0x03;
+ rep_data[1] = 0x20;
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ return;
+}
+
static void wacom_poke(struct hid_device *hdev, u8 speed)
{
struct wacom_data *wdata = hid_get_drvdata(hdev);
@@ -177,26 +200,13 @@ static ssize_t wacom_store_speed(struct device *dev,
static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
wacom_show_speed, wacom_store_speed);
-static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
- u8 *raw_data, int size)
+static int wacom_gr_parse_report(struct hid_device *hdev,
+ struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- struct hid_input *hidinput;
- struct input_dev *input;
- unsigned char *data = (unsigned char *) raw_data;
int tool, x, y, rw;
- if (!(hdev->claimed & HID_CLAIMED_INPUT))
- return 0;
-
tool = 0;
- hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
- input = hidinput->input;
-
- /* Check if this is a tablet report */
- if (data[0] != 0x03)
- return 0;
-
/* Get X & Y positions */
x = le16_to_cpu(*(__le16 *) &data[2]);
y = le16_to_cpu(*(__le16 *) &data[4]);
@@ -304,6 +314,121 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
return 1;
}
+static void wacom_i4_parse_pen_report(struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
+{
+ __u16 x, y, pressure;
+ __u32 id;
+
+ switch (data[1]) {
+ case 0x80: /* Out of proximity report */
+ wdata->tool = 0;
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, wdata->tool, 0);
+ input_sync(input);
+ break;
+ case 0xC2: /* Tool report */
+ id = ((data[2] << 4) | (data[3] >> 4) |
+ ((data[7] & 0x0f) << 20) |
+ ((data[8] & 0xf0) << 12)) & 0xfffff;
+
+ switch (id) {
+ case 0x802:
+ wdata->tool = BTN_TOOL_PEN;
+ break;
+ case 0x80A:
+ wdata->tool = BTN_TOOL_RUBBER;
+ break;
+ }
+ break;
+ default: /* Position/pressure report */
+ x = data[2] << 9 | data[3] << 1 | ((data[9] & 0x02) >> 1);
+ y = data[4] << 9 | data[5] << 1 | (data[9] & 0x01);
+ pressure = (data[6] << 3) | ((data[7] & 0xC0) >> 5)
+ | (data[1] & 0x01);
+
+ input_report_key(input, BTN_TOUCH, pressure > 1);
+
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
+ input_report_key(input, wdata->tool, 1);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_sync(input);
+ break;
+ }
+
+ return;
+}
+
+static void wacom_i4_parse_report(struct hid_device *hdev,
+ struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
+{
+ switch (data[0]) {
+ case 0x00: /* Empty report */
+ break;
+ case 0x02: /* Pen report */
+ wacom_i4_parse_pen_report(wdata, input, data);
+ break;
+ case 0x03: /* Features Report */
+ wdata->features = data[2];
+ break;
+ case 0x0C: /* Button report */
+ break;
+ default:
+ hid_err(hdev, "Unknown report: %d,%d\n", data[0], data[1]);
+ break;
+ }
+}
+
+static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ struct hid_input *hidinput;
+ struct input_dev *input;
+ unsigned char *data = (unsigned char *) raw_data;
+ int i;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT))
+ return 0;
+
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input = hidinput->input;
+
+ /* Check if this is a tablet report */
+ if (data[0] != 0x03)
+ return 0;
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ return wacom_gr_parse_report(hdev, wdata, input, data);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ i = 1;
+
+ switch (data[0]) {
+ case 0x04:
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ i += 10;
+ /* fall through */
+ case 0x03:
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ i += 10;
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ break;
+ default:
+ hid_err(hdev, "Unknown report: %d,%d size:%d\n",
+ data[0], data[1], size);
+ return 0;
+ }
+ }
+ return 1;
+}
+
static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
int *max)
@@ -338,10 +463,19 @@ static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
__set_bit(BTN_TOOL_RUBBER, input->keybit);
__set_bit(BTN_TOOL_MOUSE, input->keybit);
- input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ input_set_abs_params(input, ABS_X, 0, 40640, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 25400, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 2047, 0, 0);
+ break;
+ }
return 0;
}
@@ -378,8 +512,16 @@ static int wacom_probe(struct hid_device *hdev,
hid_warn(hdev,
"can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode 2 with high reporting speed */
- wacom_poke(hdev, 1);
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ wdata->features = 0;
+ wacom_set_features(hdev);
+ break;
+ }
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
wdata->battery.properties = wacom_battery_props;
@@ -389,6 +531,8 @@ static int wacom_probe(struct hid_device *hdev,
wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
wdata->battery.use_for_apm = 0;
+ power_supply_powers(&wdata->battery, &hdev->dev);
+
ret = power_supply_register(&hdev->dev, &wdata->battery);
if (ret) {
hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
@@ -403,6 +547,8 @@ static int wacom_probe(struct hid_device *hdev,
wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
wdata->ac.use_for_apm = 0;
+ power_supply_powers(&wdata->battery, &hdev->dev);
+
ret = power_supply_register(&hdev->dev, &wdata->ac);
if (ret) {
hid_warn(hdev,
@@ -441,6 +587,7 @@ static void wacom_remove(struct hid_device *hdev)
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote-core.c
index 76739c07fa3c..fc253b472f9d 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -20,91 +20,7 @@
#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
-
-#define WIIMOTE_VERSION "0.2"
-#define WIIMOTE_NAME "Nintendo Wii Remote"
-#define WIIMOTE_BUFSIZE 32
-
-struct wiimote_buf {
- __u8 data[HID_MAX_BUFFER_SIZE];
- size_t size;
-};
-
-struct wiimote_state {
- spinlock_t lock;
- __u8 flags;
- __u8 accel_split[2];
-
- /* synchronous cmd requests */
- struct mutex sync;
- struct completion ready;
- int cmd;
- __u32 opt;
-
- /* results of synchronous requests */
- __u8 cmd_battery;
- __u8 cmd_err;
-};
-
-struct wiimote_data {
- struct hid_device *hdev;
- struct input_dev *input;
- struct led_classdev *leds[4];
- struct input_dev *accel;
- struct input_dev *ir;
- struct power_supply battery;
-
- spinlock_t qlock;
- __u8 head;
- __u8 tail;
- struct wiimote_buf outq[WIIMOTE_BUFSIZE];
- struct work_struct worker;
-
- struct wiimote_state state;
-};
-
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
-#define WIIPROTO_FLAG_RUMBLE 0x10
-#define WIIPROTO_FLAG_ACCEL 0x20
-#define WIIPROTO_FLAG_IR_BASIC 0x40
-#define WIIPROTO_FLAG_IR_EXT 0x80
-#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
-#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
- WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
-#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
- WIIPROTO_FLAG_IR_FULL)
-
-/* return flag for led \num */
-#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
-
-enum wiiproto_reqs {
- WIIPROTO_REQ_NULL = 0x0,
- WIIPROTO_REQ_RUMBLE = 0x10,
- WIIPROTO_REQ_LED = 0x11,
- WIIPROTO_REQ_DRM = 0x12,
- WIIPROTO_REQ_IR1 = 0x13,
- WIIPROTO_REQ_SREQ = 0x15,
- WIIPROTO_REQ_WMEM = 0x16,
- WIIPROTO_REQ_RMEM = 0x17,
- WIIPROTO_REQ_IR2 = 0x1a,
- WIIPROTO_REQ_STATUS = 0x20,
- WIIPROTO_REQ_DATA = 0x21,
- WIIPROTO_REQ_RETURN = 0x22,
- WIIPROTO_REQ_DRM_K = 0x30,
- WIIPROTO_REQ_DRM_KA = 0x31,
- WIIPROTO_REQ_DRM_KE = 0x32,
- WIIPROTO_REQ_DRM_KAI = 0x33,
- WIIPROTO_REQ_DRM_KEE = 0x34,
- WIIPROTO_REQ_DRM_KAE = 0x35,
- WIIPROTO_REQ_DRM_KIE = 0x36,
- WIIPROTO_REQ_DRM_KAIE = 0x37,
- WIIPROTO_REQ_DRM_E = 0x3d,
- WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
- WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
-};
+#include "hid-wiimote.h"
enum wiiproto_keys {
WIIPROTO_KEY_LEFT,
@@ -136,55 +52,10 @@ static __u16 wiiproto_keymap[] = {
};
static enum power_supply_property wiimote_battery_props[] = {
- POWER_SUPPLY_PROP_CAPACITY
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
};
-/* requires the state.lock spinlock to be held */
-static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
- __u32 opt)
-{
- return wdata->state.cmd == cmd && wdata->state.opt == opt;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
-{
- wdata->state.cmd = WIIPROTO_REQ_NULL;
- complete(&wdata->state.ready);
-}
-
-static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
-{
- return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
- __u32 opt)
-{
- INIT_COMPLETION(wdata->state.ready);
- wdata->state.cmd = cmd;
- wdata->state.opt = opt;
-}
-
-static inline void wiimote_cmd_release(struct wiimote_data *wdata)
-{
- mutex_unlock(&wdata->state.sync);
-}
-
-static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
-{
- int ret;
-
- ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
- if (ret < 0)
- return -ERESTARTSYS;
- else if (ret == 0)
- return -EIO;
- else
- return 0;
-}
-
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -329,6 +200,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
static __u8 select_drm(struct wiimote_data *wdata)
{
__u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+ bool ext = wiiext_active(wdata);
if (ir == WIIPROTO_FLAG_IR_BASIC) {
if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
@@ -340,14 +212,21 @@ static __u8 select_drm(struct wiimote_data *wdata)
} else if (ir == WIIPROTO_FLAG_IR_FULL) {
return WIIPROTO_REQ_DRM_SKAI1;
} else {
- if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
- return WIIPROTO_REQ_DRM_KA;
- else
- return WIIPROTO_REQ_DRM_K;
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
+ if (ext)
+ return WIIPROTO_REQ_DRM_KAE;
+ else
+ return WIIPROTO_REQ_DRM_KA;
+ } else {
+ if (ext)
+ return WIIPROTO_REQ_DRM_KE;
+ else
+ return WIIPROTO_REQ_DRM_K;
+ }
}
}
-static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
+void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
{
__u8 cmd[3];
@@ -358,6 +237,7 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
cmd[1] = 0;
cmd[2] = drm;
+ wdata->state.drm = drm;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
@@ -440,8 +320,33 @@ static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
wiimote_queue(wdata, cmd, sizeof(cmd));
}
+void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom, __u32 offset,
+ __u16 size)
+{
+ __u8 cmd[7];
+
+ if (size == 0) {
+ hid_warn(wdata->hdev, "Invalid length %d rmem request\n", size);
+ return;
+ }
+
+ cmd[0] = WIIPROTO_REQ_RMEM;
+ cmd[1] = 0;
+ cmd[2] = (offset >> 16) & 0xff;
+ cmd[3] = (offset >> 8) & 0xff;
+ cmd[4] = offset & 0xff;
+ cmd[5] = (size >> 8) & 0xff;
+ cmd[6] = size & 0xff;
+
+ if (!eeprom)
+ cmd[1] |= 0x04;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
/* requries the cmd-mutex to be held */
-static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
const __u8 *wmem, __u8 size)
{
unsigned long flags;
@@ -459,6 +364,36 @@ static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
return ret;
}
+/* requries the cmd-mutex to be held */
+ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset, __u8 *rmem,
+ __u8 size)
+{
+ unsigned long flags;
+ ssize_t ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_size = size;
+ wdata->state.cmd_read_buf = rmem;
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, offset & 0xffff);
+ wiiproto_req_rreg(wdata, offset, size);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_buf = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ if (!ret) {
+ if (wdata->state.cmd_read_size == 0)
+ ret = -EIO;
+ else
+ ret = wdata->state.cmd_read_size;
+ }
+
+ return ret;
+}
+
static int wiimote_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -468,6 +403,11 @@ static int wiimote_battery_get_property(struct power_supply *psy,
int ret = 0, state;
unsigned long flags;
+ if (psp == POWER_SUPPLY_PROP_SCOPE) {
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ return 0;
+ }
+
ret = wiimote_cmd_acquire(wdata);
if (ret)
return ret;
@@ -862,6 +802,8 @@ static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ wiiext_event(wdata, payload[2] & 0x02);
+
if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
wdata->state.cmd_battery = payload[5];
wiimote_cmd_complete(wdata);
@@ -870,7 +812,23 @@ static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
{
+ __u16 offset = payload[3] << 8 | payload[4];
+ __u8 size = (payload[2] >> 4) + 1;
+ __u8 err = payload[2] & 0x0f;
+
handler_keys(wdata, payload);
+
+ if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_RMEM, offset)) {
+ if (err)
+ size = 0;
+ else if (size > wdata->state.cmd_read_size)
+ size = wdata->state.cmd_read_size;
+
+ wdata->state.cmd_read_size = size;
+ if (wdata->state.cmd_read_buf)
+ memcpy(wdata->state.cmd_read_buf, &payload[5], size);
+ wiimote_cmd_complete(wdata);
+ }
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -898,6 +856,7 @@ static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
+ wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
@@ -914,6 +873,7 @@ static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
+ wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
@@ -924,12 +884,14 @@ static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
ir_to_input2(wdata, &payload[7], false);
ir_to_input3(wdata, &payload[9], true);
input_sync(wdata->ir);
+ wiiext_handle(wdata, &payload[12]);
}
static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
+ wiiext_handle(wdata, &payload[5]);
}
static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
@@ -941,10 +903,12 @@ static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
ir_to_input2(wdata, &payload[10], false);
ir_to_input3(wdata, &payload[12], true);
input_sync(wdata->ir);
+ wiiext_handle(wdata, &payload[15]);
}
static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
{
+ wiiext_handle(wdata, payload);
}
static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
@@ -1182,6 +1146,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
spin_lock_init(&wdata->state.lock);
init_completion(&wdata->state.ready);
mutex_init(&wdata->state.sync);
+ wdata->state.drm = WIIPROTO_REQ_DRM_K;
return wdata;
@@ -1196,6 +1161,8 @@ err:
static void wiimote_destroy(struct wiimote_data *wdata)
{
+ wiidebug_deinit(wdata);
+ wiiext_deinit(wdata);
wiimote_leds_destroy(wdata);
power_supply_unregister(&wdata->battery);
@@ -1214,6 +1181,8 @@ static int wiimote_hid_probe(struct hid_device *hdev,
struct wiimote_data *wdata;
int ret;
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
wdata = wiimote_create(hdev);
if (!wdata) {
hid_err(hdev, "Can't alloc device\n");
@@ -1257,6 +1226,8 @@ static int wiimote_hid_probe(struct hid_device *hdev,
wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
wdata->battery.use_for_apm = 0;
+ power_supply_powers(&wdata->battery, &hdev->dev);
+
ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
if (ret) {
hid_err(hdev, "Cannot register battery device\n");
@@ -1267,6 +1238,14 @@ static int wiimote_hid_probe(struct hid_device *hdev,
if (ret)
goto err_free;
+ ret = wiiext_init(wdata);
+ if (ret)
+ goto err_free;
+
+ ret = wiidebug_init(wdata);
+ if (ret)
+ goto err_free;
+
hid_info(hdev, "New device registered\n");
/* by default set led1 after device initialization */
@@ -1343,4 +1322,3 @@ module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
-MODULE_VERSION(WIIMOTE_VERSION);
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
new file mode 100644
index 000000000000..17dabc1f339e
--- /dev/null
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -0,0 +1,227 @@
+/*
+ * Debug support for HID Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "hid-wiimote.h"
+
+struct wiimote_debug {
+ struct wiimote_data *wdata;
+ struct dentry *eeprom;
+ struct dentry *drm;
+};
+
+static int wiidebug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
+ loff_t *off)
+{
+ struct wiimote_debug *dbg = f->private_data;
+ struct wiimote_data *wdata = dbg->wdata;
+ unsigned long flags;
+ ssize_t ret;
+ char buf[16];
+ __u16 size;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0xffffff)
+ return 0;
+ if (s > 16)
+ s = 16;
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_size = s;
+ wdata->state.cmd_read_buf = buf;
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, *off & 0xffff);
+ wiiproto_req_reeprom(wdata, *off, s);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (!ret)
+ size = wdata->state.cmd_read_size;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_buf = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ wiimote_cmd_release(wdata);
+
+ if (ret)
+ return ret;
+ else if (size == 0)
+ return -EIO;
+
+ if (copy_to_user(u, buf, size))
+ return -EFAULT;
+
+ *off += size;
+ ret = size;
+
+ return ret;
+}
+
+static const struct file_operations wiidebug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = wiidebug_eeprom_open,
+ .read = wiidebug_eeprom_read,
+ .llseek = generic_file_llseek,
+};
+
+static const char *wiidebug_drmmap[] = {
+ [WIIPROTO_REQ_NULL] = "NULL",
+ [WIIPROTO_REQ_DRM_K] = "K",
+ [WIIPROTO_REQ_DRM_KA] = "KA",
+ [WIIPROTO_REQ_DRM_KE] = "KE",
+ [WIIPROTO_REQ_DRM_KAI] = "KAI",
+ [WIIPROTO_REQ_DRM_KEE] = "KEE",
+ [WIIPROTO_REQ_DRM_KAE] = "KAE",
+ [WIIPROTO_REQ_DRM_KIE] = "KIE",
+ [WIIPROTO_REQ_DRM_KAIE] = "KAIE",
+ [WIIPROTO_REQ_DRM_E] = "E",
+ [WIIPROTO_REQ_DRM_SKAI1] = "SKAI1",
+ [WIIPROTO_REQ_DRM_SKAI2] = "SKAI2",
+ [WIIPROTO_REQ_MAX] = NULL
+};
+
+static int wiidebug_drm_show(struct seq_file *f, void *p)
+{
+ struct wiimote_debug *dbg = f->private;
+ const char *str = NULL;
+ unsigned long flags;
+ __u8 drm;
+
+ spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+ drm = dbg->wdata->state.drm;
+ spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+ if (drm < WIIPROTO_REQ_MAX)
+ str = wiidebug_drmmap[drm];
+ if (!str)
+ str = "unknown";
+
+ seq_printf(f, "%s\n", str);
+
+ return 0;
+}
+
+static int wiidebug_drm_open(struct inode *i, struct file *f)
+{
+ return single_open(f, wiidebug_drm_show, i->i_private);
+}
+
+static ssize_t wiidebug_drm_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct wiimote_debug *dbg = f->private_data;
+ unsigned long flags;
+ char buf[16];
+ ssize_t len;
+ int i;
+
+ if (s == 0)
+ return -EINVAL;
+
+ len = min((size_t) 15, s);
+ if (copy_from_user(buf, u, len))
+ return -EFAULT;
+
+ buf[15] = 0;
+
+ for (i = 0; i < WIIPROTO_REQ_MAX; ++i) {
+ if (!wiidebug_drmmap[i])
+ continue;
+ if (!strcasecmp(buf, wiidebug_drmmap[i]))
+ break;
+ }
+
+ if (i == WIIPROTO_REQ_MAX)
+ i = simple_strtoul(buf, NULL, 10);
+
+ spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+ wiiproto_req_drm(dbg->wdata, (__u8) i);
+ spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+ return len;
+}
+
+static const struct file_operations wiidebug_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = wiidebug_drm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = wiidebug_drm_write,
+ .release = single_release,
+};
+
+int wiidebug_init(struct wiimote_data *wdata)
+{
+ struct wiimote_debug *dbg;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ dbg->wdata = wdata;
+
+ dbg->eeprom = debugfs_create_file("eeprom", S_IRUSR,
+ dbg->wdata->hdev->debug_dir, dbg, &wiidebug_eeprom_fops);
+ if (!dbg->eeprom)
+ goto err;
+
+ dbg->drm = debugfs_create_file("drm", S_IRUSR,
+ dbg->wdata->hdev->debug_dir, dbg, &wiidebug_drm_fops);
+ if (!dbg->drm)
+ goto err_drm;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->debug = dbg;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+
+err_drm:
+ debugfs_remove(dbg->eeprom);
+err:
+ kfree(dbg);
+ return ret;
+}
+
+void wiidebug_deinit(struct wiimote_data *wdata)
+{
+ struct wiimote_debug *dbg = wdata->debug;
+ unsigned long flags;
+
+ if (!dbg)
+ return;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->debug = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ debugfs_remove(dbg->drm);
+ debugfs_remove(dbg->eeprom);
+ kfree(dbg);
+}
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
new file mode 100644
index 000000000000..aa958706c0e5
--- /dev/null
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -0,0 +1,752 @@
+/*
+ * HID driver for Nintendo Wiimote extension devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include "hid-wiimote.h"
+
+struct wiimote_ext {
+ struct wiimote_data *wdata;
+ struct work_struct worker;
+ struct input_dev *input;
+ struct input_dev *mp_input;
+
+ atomic_t opened;
+ atomic_t mp_opened;
+ bool plugged;
+ bool mp_plugged;
+ bool motionp;
+ __u8 ext_type;
+};
+
+enum wiiext_type {
+ WIIEXT_NONE, /* placeholder */
+ WIIEXT_CLASSIC, /* Nintendo classic controller */
+ WIIEXT_NUNCHUCK, /* Nintendo nunchuck controller */
+};
+
+enum wiiext_keys {
+ WIIEXT_KEY_C,
+ WIIEXT_KEY_Z,
+ WIIEXT_KEY_A,
+ WIIEXT_KEY_B,
+ WIIEXT_KEY_X,
+ WIIEXT_KEY_Y,
+ WIIEXT_KEY_ZL,
+ WIIEXT_KEY_ZR,
+ WIIEXT_KEY_PLUS,
+ WIIEXT_KEY_MINUS,
+ WIIEXT_KEY_HOME,
+ WIIEXT_KEY_LEFT,
+ WIIEXT_KEY_RIGHT,
+ WIIEXT_KEY_UP,
+ WIIEXT_KEY_DOWN,
+ WIIEXT_KEY_LT,
+ WIIEXT_KEY_RT,
+ WIIEXT_KEY_COUNT
+};
+
+static __u16 wiiext_keymap[] = {
+ BTN_C, /* WIIEXT_KEY_C */
+ BTN_Z, /* WIIEXT_KEY_Z */
+ BTN_A, /* WIIEXT_KEY_A */
+ BTN_B, /* WIIEXT_KEY_B */
+ BTN_X, /* WIIEXT_KEY_X */
+ BTN_Y, /* WIIEXT_KEY_Y */
+ BTN_TL2, /* WIIEXT_KEY_ZL */
+ BTN_TR2, /* WIIEXT_KEY_ZR */
+ KEY_NEXT, /* WIIEXT_KEY_PLUS */
+ KEY_PREVIOUS, /* WIIEXT_KEY_MINUS */
+ BTN_MODE, /* WIIEXT_KEY_HOME */
+ KEY_LEFT, /* WIIEXT_KEY_LEFT */
+ KEY_RIGHT, /* WIIEXT_KEY_RIGHT */
+ KEY_UP, /* WIIEXT_KEY_UP */
+ KEY_DOWN, /* WIIEXT_KEY_DOWN */
+ BTN_TL, /* WIIEXT_KEY_LT */
+ BTN_TR, /* WIIEXT_KEY_RT */
+};
+
+/* diable all extensions */
+static void ext_disable(struct wiimote_ext *ext)
+{
+ unsigned long flags;
+ __u8 wmem = 0x55;
+
+ if (!wiimote_cmd_acquire(ext->wdata)) {
+ wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+ wiimote_cmd_release(ext->wdata);
+ }
+
+ spin_lock_irqsave(&ext->wdata->state.lock, flags);
+ ext->motionp = false;
+ ext->ext_type = WIIEXT_NONE;
+ wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static bool motionp_read(struct wiimote_ext *ext)
+{
+ __u8 rmem[2], wmem;
+ ssize_t ret;
+ bool avail = false;
+
+ if (!atomic_read(&ext->mp_opened))
+ return false;
+
+ if (wiimote_cmd_acquire(ext->wdata))
+ return false;
+
+ /* initialize motion plus */
+ wmem = 0x55;
+ ret = wiimote_cmd_write(ext->wdata, 0xa600f0, &wmem, sizeof(wmem));
+ if (ret)
+ goto error;
+
+ /* read motion plus ID */
+ ret = wiimote_cmd_read(ext->wdata, 0xa600fe, rmem, 2);
+ if (ret == 2 || rmem[1] == 0x5)
+ avail = true;
+
+error:
+ wiimote_cmd_release(ext->wdata);
+ return avail;
+}
+
+static __u8 ext_read(struct wiimote_ext *ext)
+{
+ ssize_t ret;
+ __u8 rmem[2], wmem;
+ __u8 type = WIIEXT_NONE;
+
+ if (!ext->plugged || !atomic_read(&ext->opened))
+ return WIIEXT_NONE;
+
+ if (wiimote_cmd_acquire(ext->wdata))
+ return WIIEXT_NONE;
+
+ /* initialize extension */
+ wmem = 0x55;
+ ret = wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+ if (!ret) {
+ /* disable encryption */
+ wmem = 0x0;
+ wiimote_cmd_write(ext->wdata, 0xa400fb, &wmem, sizeof(wmem));
+ }
+
+ /* read extension ID */
+ ret = wiimote_cmd_read(ext->wdata, 0xa400fe, rmem, 2);
+ if (ret == 2) {
+ if (rmem[0] == 0 && rmem[1] == 0)
+ type = WIIEXT_NUNCHUCK;
+ else if (rmem[0] == 0x01 && rmem[1] == 0x01)
+ type = WIIEXT_CLASSIC;
+ }
+
+ wiimote_cmd_release(ext->wdata);
+
+ return type;
+}
+
+static void ext_enable(struct wiimote_ext *ext, bool motionp, __u8 ext_type)
+{
+ unsigned long flags;
+ __u8 wmem;
+ int ret;
+
+ if (motionp) {
+ if (wiimote_cmd_acquire(ext->wdata))
+ return;
+
+ if (ext_type == WIIEXT_CLASSIC)
+ wmem = 0x07;
+ else if (ext_type == WIIEXT_NUNCHUCK)
+ wmem = 0x05;
+ else
+ wmem = 0x04;
+
+ ret = wiimote_cmd_write(ext->wdata, 0xa600fe, &wmem, sizeof(wmem));
+ wiimote_cmd_release(ext->wdata);
+ if (ret)
+ return;
+ }
+
+ spin_lock_irqsave(&ext->wdata->state.lock, flags);
+ ext->motionp = motionp;
+ ext->ext_type = ext_type;
+ wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static void wiiext_worker(struct work_struct *work)
+{
+ struct wiimote_ext *ext = container_of(work, struct wiimote_ext,
+ worker);
+ bool motionp;
+ __u8 ext_type;
+
+ ext_disable(ext);
+ motionp = motionp_read(ext);
+ ext_type = ext_read(ext);
+ ext_enable(ext, motionp, ext_type);
+}
+
+/* schedule work only once, otherwise mark for reschedule */
+static void wiiext_schedule(struct wiimote_ext *ext)
+{
+ queue_work(system_nrt_wq, &ext->worker);
+}
+
+/*
+ * Reacts on extension port events
+ * Whenever the driver gets an event from the wiimote that an extension has been
+ * plugged or unplugged, this funtion shall be called. It checks what extensions
+ * are connected and initializes and activates them.
+ * This can be called in atomic context. The initialization is done in a
+ * separate worker thread. The state.lock spinlock must be held by the caller.
+ */
+void wiiext_event(struct wiimote_data *wdata, bool plugged)
+{
+ if (!wdata->ext)
+ return;
+
+ if (wdata->ext->plugged == plugged)
+ return;
+
+ wdata->ext->plugged = plugged;
+
+ if (!plugged)
+ wdata->ext->mp_plugged = false;
+
+ /*
+ * We need to call wiiext_schedule(wdata->ext) here, however, the
+ * extension initialization logic is not fully understood and so
+ * automatic initialization is not supported, yet.
+ */
+}
+
+/*
+ * Returns true if the current DRM mode should contain extension data and false
+ * if there is no interest in extension data.
+ * All supported extensions send 6 byte extension data so any DRM that contains
+ * extension bytes is fine.
+ * The caller must hold the state.lock spinlock.
+ */
+bool wiiext_active(struct wiimote_data *wdata)
+{
+ if (!wdata->ext)
+ return false;
+
+ return wdata->ext->motionp || wdata->ext->ext_type;
+}
+
+static void handler_motionp(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s32 x, y, z;
+ bool plugged;
+
+ /* | 8 7 6 5 4 3 | 2 | 1 |
+ * -----+------------------------------+-----+-----+
+ * 1 | Yaw Speed <7:0> |
+ * 2 | Roll Speed <7:0> |
+ * 3 | Pitch Speed <7:0> |
+ * -----+------------------------------+-----+-----+
+ * 4 | Yaw Speed <13:8> | Yaw |Pitch|
+ * -----+------------------------------+-----+-----+
+ * 5 | Roll Speed <13:8> |Roll | Ext |
+ * -----+------------------------------+-----+-----+
+ * 6 | Pitch Speed <13:8> | 1 | 0 |
+ * -----+------------------------------+-----+-----+
+ * The single bits Yaw, Roll, Pitch in the lower right corner specify
+ * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
+ * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
+ * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
+ * and 9 for slow.
+ * If the wiimote is not rotating the sensor reports 2^13 = 8192.
+ * Ext specifies whether an extension is connected to the motionp.
+ */
+
+ x = payload[0];
+ y = payload[1];
+ z = payload[2];
+
+ x |= (((__u16)payload[3]) << 6) & 0xff00;
+ y |= (((__u16)payload[4]) << 6) & 0xff00;
+ z |= (((__u16)payload[5]) << 6) & 0xff00;
+
+ x -= 8192;
+ y -= 8192;
+ z -= 8192;
+
+ if (!(payload[3] & 0x02))
+ x *= 18;
+ else
+ x *= 9;
+ if (!(payload[4] & 0x02))
+ y *= 18;
+ else
+ y *= 9;
+ if (!(payload[3] & 0x01))
+ z *= 18;
+ else
+ z *= 9;
+
+ input_report_abs(ext->mp_input, ABS_RX, x);
+ input_report_abs(ext->mp_input, ABS_RY, y);
+ input_report_abs(ext->mp_input, ABS_RZ, z);
+ input_sync(ext->mp_input);
+
+ plugged = payload[5] & 0x01;
+ if (plugged != ext->mp_plugged)
+ ext->mp_plugged = plugged;
+}
+
+static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s16 x, y, z, bx, by;
+
+ /* Byte | 8 7 | 6 5 | 4 3 | 2 | 1 |
+ * -----+----------+---------+---------+----+-----+
+ * 1 | Button X <7:0> |
+ * 2 | Button Y <7:0> |
+ * -----+----------+---------+---------+----+-----+
+ * 3 | Speed X <9:2> |
+ * 4 | Speed Y <9:2> |
+ * 5 | Speed Z <9:2> |
+ * -----+----------+---------+---------+----+-----+
+ * 6 | Z <1:0> | Y <1:0> | X <1:0> | BC | BZ |
+ * -----+----------+---------+---------+----+-----+
+ * Button X/Y is the analog stick. Speed X, Y and Z are the
+ * accelerometer data in the same format as the wiimote's accelerometer.
+ * The 6th byte contains the LSBs of the accelerometer data.
+ * BC and BZ are the C and Z buttons: 0 means pressed
+ *
+ * If reported interleaved with motionp, then the layout changes. The
+ * 5th and 6th byte changes to:
+ * -----+-----------------------------------+-----+
+ * 5 | Speed Z <9:3> | EXT |
+ * -----+--------+-----+-----+----+----+----+-----+
+ * 6 |Z <2:1> |Y <1>|X <1>| BC | BZ | 0 | 0 |
+ * -----+--------+-----+-----+----+----+----+-----+
+ * All three accelerometer values lose their LSB. The other data is
+ * still available but slightly moved.
+ *
+ * Center data for button values is 128. Center value for accelerometer
+ * values it 512 / 0x200
+ */
+
+ bx = payload[0];
+ by = payload[1];
+ bx -= 128;
+ by -= 128;
+
+ x = payload[2] << 2;
+ y = payload[3] << 2;
+ z = payload[4] << 2;
+
+ if (ext->motionp) {
+ x |= (payload[5] >> 3) & 0x02;
+ y |= (payload[5] >> 4) & 0x02;
+ z &= ~0x4;
+ z |= (payload[5] >> 5) & 0x06;
+ } else {
+ x |= (payload[5] >> 2) & 0x03;
+ y |= (payload[5] >> 4) & 0x03;
+ z |= (payload[5] >> 6) & 0x03;
+ }
+
+ x -= 0x200;
+ y -= 0x200;
+ z -= 0x200;
+
+ input_report_abs(ext->input, ABS_HAT0X, bx);
+ input_report_abs(ext->input, ABS_HAT0Y, by);
+
+ input_report_abs(ext->input, ABS_RX, x);
+ input_report_abs(ext->input, ABS_RY, y);
+ input_report_abs(ext->input, ABS_RZ, z);
+
+ if (ext->motionp) {
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+ } else {
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+}
+
+static void handler_classic(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s8 rx, ry, lx, ly, lt, rt;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | RX <5:4> | LX <5:0> |
+ * 2 | RX <3:2> | LY <5:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 3 |RX<1>| LT <5:4> | RY <5:1> |
+ * -----+-----+-----------+-----------------------------+
+ * 4 | LT <3:1> | RT <5:1> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BZL | BB | BY | BA | BX | BZR | BDL | BDU |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ * RX and RY are right analog stick
+ * LX and LY are left analog stick
+ * LT is left trigger, RT is right trigger
+ * BLT is 0 if left trigger is fully pressed
+ * BRT is 0 if right trigger is fully pressed
+ * BDR, BDD, BDL, BDU form the D-Pad with right, down, left, up buttons
+ * BZL is left Z button and BZR is right Z button
+ * B-, BH, B+ are +, HOME and - buttons
+ * BB, BY, BA, BX are A, B, X, Y buttons
+ * LSB of RX, RY, LT, and RT are not transmitted and always 0.
+ *
+ * With motionp enabled it changes slightly to this:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | RX <4:3> | LX <5:1> | BDU |
+ * 2 | RX <2:1> | LY <5:1> | BDL |
+ * -----+-----+-----+-----+-----------------------+-----+
+ * 3 |RX<0>| LT <4:3> | RY <4:0> |
+ * -----+-----+-----------+-----------------------------+
+ * 4 | LT <2:0> | RT <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | EXT |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BZL | BB | BY | BA | BX | BZR | 0 | 0 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * Only the LSBs of LX and LY are lost. BDU and BDL are moved, the rest
+ * is the same as before.
+ */
+
+ if (ext->motionp) {
+ lx = payload[0] & 0x3e;
+ ly = payload[0] & 0x3e;
+ } else {
+ lx = payload[0] & 0x3f;
+ ly = payload[0] & 0x3f;
+ }
+
+ rx = (payload[0] >> 3) & 0x14;
+ rx |= (payload[1] >> 5) & 0x06;
+ rx |= (payload[2] >> 7) & 0x01;
+ ry = payload[2] & 0x1f;
+
+ rt = payload[3] & 0x1f;
+ lt = (payload[2] >> 2) & 0x18;
+ lt |= (payload[3] >> 5) & 0x07;
+
+ rx <<= 1;
+ ry <<= 1;
+ rt <<= 1;
+ lt <<= 1;
+
+ input_report_abs(ext->input, ABS_HAT1X, lx - 0x20);
+ input_report_abs(ext->input, ABS_HAT1Y, ly - 0x20);
+ input_report_abs(ext->input, ABS_HAT2X, rx - 0x20);
+ input_report_abs(ext->input, ABS_HAT2Y, ry - 0x20);
+ input_report_abs(ext->input, ABS_HAT3X, rt - 0x20);
+ input_report_abs(ext->input, ABS_HAT3Y, lt - 0x20);
+
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RIGHT],
+ !!(payload[4] & 0x80));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_DOWN],
+ !!(payload[4] & 0x40));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LT],
+ !!(payload[4] & 0x20));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_MINUS],
+ !!(payload[4] & 0x10));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_HOME],
+ !!(payload[4] & 0x08));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_PLUS],
+ !!(payload[4] & 0x04));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RT],
+ !!(payload[4] & 0x02));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZL],
+ !!(payload[5] & 0x80));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_B],
+ !!(payload[5] & 0x40));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_Y],
+ !!(payload[5] & 0x20));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_A],
+ !!(payload[5] & 0x10));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_X],
+ !!(payload[5] & 0x08));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZR],
+ !!(payload[5] & 0x04));
+
+ if (ext->motionp) {
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+ !!(payload[0] & 0x01));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+ !!(payload[1] & 0x01));
+ } else {
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+ !!(payload[5] & 0x01));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+ !!(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+}
+
+/* call this with state.lock spinlock held */
+void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload)
+{
+ struct wiimote_ext *ext = wdata->ext;
+
+ if (!ext)
+ return;
+
+ if (ext->motionp && (payload[5] & 0x02)) {
+ handler_motionp(ext, payload);
+ } else if (ext->ext_type == WIIEXT_NUNCHUCK) {
+ handler_nunchuck(ext, payload);
+ } else if (ext->ext_type == WIIEXT_CLASSIC) {
+ handler_classic(ext, payload);
+ }
+}
+
+static ssize_t wiiext_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ __u8 type = WIIEXT_NONE;
+ bool motionp = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ if (wdata->ext) {
+ motionp = wdata->ext->motionp;
+ type = wdata->ext->ext_type;
+ }
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ if (type == WIIEXT_NUNCHUCK) {
+ if (motionp)
+ return sprintf(buf, "motionp+nunchuck\n");
+ else
+ return sprintf(buf, "nunchuck\n");
+ } else if (type == WIIEXT_CLASSIC) {
+ if (motionp)
+ return sprintf(buf, "motionp+classic\n");
+ else
+ return sprintf(buf, "classic\n");
+ } else {
+ if (motionp)
+ return sprintf(buf, "motionp\n");
+ else
+ return sprintf(buf, "none\n");
+ }
+}
+
+static DEVICE_ATTR(extension, S_IRUGO, wiiext_show, NULL);
+
+static int wiiext_input_open(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(ext->wdata->hdev);
+ if (ret)
+ return ret;
+
+ atomic_inc(&ext->opened);
+ wiiext_schedule(ext);
+
+ return 0;
+}
+
+static void wiiext_input_close(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+
+ atomic_dec(&ext->opened);
+ wiiext_schedule(ext);
+ hid_hw_close(ext->wdata->hdev);
+}
+
+static int wiiext_mp_open(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(ext->wdata->hdev);
+ if (ret)
+ return ret;
+
+ atomic_inc(&ext->mp_opened);
+ wiiext_schedule(ext);
+
+ return 0;
+}
+
+static void wiiext_mp_close(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+
+ atomic_dec(&ext->mp_opened);
+ wiiext_schedule(ext);
+ hid_hw_close(ext->wdata->hdev);
+}
+
+/* Initializes the extension driver of a wiimote */
+int wiiext_init(struct wiimote_data *wdata)
+{
+ struct wiimote_ext *ext;
+ unsigned long flags;
+ int ret, i;
+
+ ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->wdata = wdata;
+ INIT_WORK(&ext->worker, wiiext_worker);
+
+ ext->input = input_allocate_device();
+ if (!ext->input) {
+ ret = -ENOMEM;
+ goto err_input;
+ }
+
+ input_set_drvdata(ext->input, ext);
+ ext->input->open = wiiext_input_open;
+ ext->input->close = wiiext_input_close;
+ ext->input->dev.parent = &wdata->hdev->dev;
+ ext->input->id.bustype = wdata->hdev->bus;
+ ext->input->id.vendor = wdata->hdev->vendor;
+ ext->input->id.product = wdata->hdev->product;
+ ext->input->id.version = wdata->hdev->version;
+ ext->input->name = WIIMOTE_NAME " Extension";
+
+ set_bit(EV_KEY, ext->input->evbit);
+ for (i = 0; i < WIIEXT_KEY_COUNT; ++i)
+ set_bit(wiiext_keymap[i], ext->input->keybit);
+
+ set_bit(EV_ABS, ext->input->evbit);
+ set_bit(ABS_HAT0X, ext->input->absbit);
+ set_bit(ABS_HAT0Y, ext->input->absbit);
+ set_bit(ABS_HAT1X, ext->input->absbit);
+ set_bit(ABS_HAT1Y, ext->input->absbit);
+ set_bit(ABS_HAT2X, ext->input->absbit);
+ set_bit(ABS_HAT2Y, ext->input->absbit);
+ set_bit(ABS_HAT3X, ext->input->absbit);
+ set_bit(ABS_HAT3Y, ext->input->absbit);
+ input_set_abs_params(ext->input, ABS_HAT0X, -120, 120, 2, 4);
+ input_set_abs_params(ext->input, ABS_HAT0Y, -120, 120, 2, 4);
+ input_set_abs_params(ext->input, ABS_HAT1X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT1Y, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT2X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT2Y, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT3X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT3Y, -30, 30, 1, 1);
+ set_bit(ABS_RX, ext->input->absbit);
+ set_bit(ABS_RY, ext->input->absbit);
+ set_bit(ABS_RZ, ext->input->absbit);
+ input_set_abs_params(ext->input, ABS_RX, -500, 500, 2, 4);
+ input_set_abs_params(ext->input, ABS_RY, -500, 500, 2, 4);
+ input_set_abs_params(ext->input, ABS_RZ, -500, 500, 2, 4);
+
+ ret = input_register_device(ext->input);
+ if (ret) {
+ input_free_device(ext->input);
+ goto err_input;
+ }
+
+ ext->mp_input = input_allocate_device();
+ if (!ext->mp_input) {
+ ret = -ENOMEM;
+ goto err_mp;
+ }
+
+ input_set_drvdata(ext->mp_input, ext);
+ ext->mp_input->open = wiiext_mp_open;
+ ext->mp_input->close = wiiext_mp_close;
+ ext->mp_input->dev.parent = &wdata->hdev->dev;
+ ext->mp_input->id.bustype = wdata->hdev->bus;
+ ext->mp_input->id.vendor = wdata->hdev->vendor;
+ ext->mp_input->id.product = wdata->hdev->product;
+ ext->mp_input->id.version = wdata->hdev->version;
+ ext->mp_input->name = WIIMOTE_NAME " Motion+";
+
+ set_bit(EV_ABS, ext->mp_input->evbit);
+ set_bit(ABS_RX, ext->mp_input->absbit);
+ set_bit(ABS_RY, ext->mp_input->absbit);
+ set_bit(ABS_RZ, ext->mp_input->absbit);
+ input_set_abs_params(ext->mp_input, ABS_RX, -160000, 160000, 4, 8);
+ input_set_abs_params(ext->mp_input, ABS_RY, -160000, 160000, 4, 8);
+ input_set_abs_params(ext->mp_input, ABS_RZ, -160000, 160000, 4, 8);
+
+ ret = input_register_device(ext->mp_input);
+ if (ret) {
+ input_free_device(ext->mp_input);
+ goto err_mp;
+ }
+
+ ret = device_create_file(&wdata->hdev->dev, &dev_attr_extension);
+ if (ret)
+ goto err_dev;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->ext = ext;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+
+err_dev:
+ input_unregister_device(ext->mp_input);
+err_mp:
+ input_unregister_device(ext->input);
+err_input:
+ kfree(ext);
+ return ret;
+}
+
+/* Deinitializes the extension driver of a wiimote */
+void wiiext_deinit(struct wiimote_data *wdata)
+{
+ struct wiimote_ext *ext = wdata->ext;
+ unsigned long flags;
+
+ if (!ext)
+ return;
+
+ /*
+ * We first unset wdata->ext to avoid further input from the wiimote
+ * core. The worker thread does not access this pointer so it is not
+ * affected by this.
+ * We kill the worker after this so it does not get respawned during
+ * deinitialization.
+ */
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->ext = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ device_remove_file(&wdata->hdev->dev, &dev_attr_extension);
+ input_unregister_device(ext->mp_input);
+ input_unregister_device(ext->input);
+
+ cancel_work_sync(&ext->worker);
+ kfree(ext);
+}
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
new file mode 100644
index 000000000000..c81dbeb086c5
--- /dev/null
+++ b/drivers/hid/hid-wiimote.h
@@ -0,0 +1,208 @@
+#ifndef __HID_WIIMOTE_H
+#define __HID_WIIMOTE_H
+
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+
+#define WIIMOTE_NAME "Nintendo Wii Remote"
+#define WIIMOTE_BUFSIZE 32
+
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_RUMBLE 0x10
+#define WIIPROTO_FLAG_ACCEL 0x20
+#define WIIPROTO_FLAG_IR_BASIC 0x40
+#define WIIPROTO_FLAG_IR_EXT 0x80
+#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
+#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
+ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+ WIIPROTO_FLAG_IR_FULL)
+
+/* return flag for led \num */
+#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
+
+struct wiimote_buf {
+ __u8 data[HID_MAX_BUFFER_SIZE];
+ size_t size;
+};
+
+struct wiimote_state {
+ spinlock_t lock;
+ __u8 flags;
+ __u8 accel_split[2];
+ __u8 drm;
+
+ /* synchronous cmd requests */
+ struct mutex sync;
+ struct completion ready;
+ int cmd;
+ __u32 opt;
+
+ /* results of synchronous requests */
+ __u8 cmd_battery;
+ __u8 cmd_err;
+ __u8 *cmd_read_buf;
+ __u8 cmd_read_size;
+};
+
+struct wiimote_data {
+ struct hid_device *hdev;
+ struct input_dev *input;
+ struct led_classdev *leds[4];
+ struct input_dev *accel;
+ struct input_dev *ir;
+ struct power_supply battery;
+ struct wiimote_ext *ext;
+ struct wiimote_debug *debug;
+
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct wiimote_buf outq[WIIMOTE_BUFSIZE];
+ struct work_struct worker;
+
+ struct wiimote_state state;
+};
+
+enum wiiproto_reqs {
+ WIIPROTO_REQ_NULL = 0x0,
+ WIIPROTO_REQ_RUMBLE = 0x10,
+ WIIPROTO_REQ_LED = 0x11,
+ WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_IR1 = 0x13,
+ WIIPROTO_REQ_SREQ = 0x15,
+ WIIPROTO_REQ_WMEM = 0x16,
+ WIIPROTO_REQ_RMEM = 0x17,
+ WIIPROTO_REQ_IR2 = 0x1a,
+ WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_DATA = 0x21,
+ WIIPROTO_REQ_RETURN = 0x22,
+ WIIPROTO_REQ_DRM_K = 0x30,
+ WIIPROTO_REQ_DRM_KA = 0x31,
+ WIIPROTO_REQ_DRM_KE = 0x32,
+ WIIPROTO_REQ_DRM_KAI = 0x33,
+ WIIPROTO_REQ_DRM_KEE = 0x34,
+ WIIPROTO_REQ_DRM_KAE = 0x35,
+ WIIPROTO_REQ_DRM_KIE = 0x36,
+ WIIPROTO_REQ_DRM_KAIE = 0x37,
+ WIIPROTO_REQ_DRM_E = 0x3d,
+ WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+ WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
+ WIIPROTO_REQ_MAX
+};
+
+#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
+ dev))
+
+extern void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm);
+extern int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+ const __u8 *wmem, __u8 size);
+extern ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset,
+ __u8 *rmem, __u8 size);
+
+#define wiiproto_req_rreg(wdata, os, sz) \
+ wiiproto_req_rmem((wdata), false, (os), (sz))
+#define wiiproto_req_reeprom(wdata, os, sz) \
+ wiiproto_req_rmem((wdata), true, (os), (sz))
+extern void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom,
+ __u32 offset, __u16 size);
+
+#ifdef CONFIG_HID_WIIMOTE_EXT
+
+extern int wiiext_init(struct wiimote_data *wdata);
+extern void wiiext_deinit(struct wiimote_data *wdata);
+extern void wiiext_event(struct wiimote_data *wdata, bool plugged);
+extern bool wiiext_active(struct wiimote_data *wdata);
+extern void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload);
+
+#else
+
+static inline int wiiext_init(void *u) { return 0; }
+static inline void wiiext_deinit(void *u) { }
+static inline void wiiext_event(void *u, bool p) { }
+static inline bool wiiext_active(void *u) { return false; }
+static inline void wiiext_handle(void *u, const __u8 *p) { }
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+extern int wiidebug_init(struct wiimote_data *wdata);
+extern void wiidebug_deinit(struct wiimote_data *wdata);
+
+#else
+
+static inline int wiidebug_init(void *u) { return 0; }
+static inline void wiidebug_deinit(void *u) { }
+
+#endif
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+ wdata->state.cmd = WIIPROTO_REQ_NULL;
+ complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+ return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ INIT_COMPLETION(wdata->state.ready);
+ wdata->state.cmd = cmd;
+ wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+ mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ else if (ret == 0)
+ return -EIO;
+ else
+ return 0;
+}
+
+#endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b403fcef0b86..5bf91dbad59d 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -197,16 +197,24 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
+ int r;
if (!hid)
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
dbg("Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return r;
+ /* Asynchronously flush queue. */
+ set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
+ usb_autopm_put_interface_async(usbhid->intf);
}
+ wake_up(&usbhid->wait);
}
return kicked;
}
@@ -215,6 +223,7 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
+ int r;
WARN_ON(hid == NULL);
if (!hid)
@@ -222,10 +231,17 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
dbg("Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return r;
+ /* Asynchronously flush queue. */
+ set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
if (hid_submit_ctrl(hid)) {
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
+ usb_autopm_put_interface_async(usbhid->intf);
}
+ wake_up(&usbhid->wait);
}
return kicked;
}
@@ -304,30 +320,21 @@ static int hid_submit_out(struct hid_device *hid)
report = usbhid->out[usbhid->outtail].report;
raw_report = usbhid->out[usbhid->outtail].raw_report;
- r = usb_autopm_get_interface_async(usbhid->intf);
- if (r < 0)
- return -1;
-
- /*
- * if the device hasn't been woken, we leave the output
- * to resume()
- */
- if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
- usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- usbhid->urbout->dev = hid_to_usb_dev(hid);
- memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length);
- kfree(raw_report);
+ usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
+ 1 + (report->id > 0);
+ usbhid->urbout->dev = hid_to_usb_dev(hid);
+ memcpy(usbhid->outbuf, raw_report,
+ usbhid->urbout->transfer_buffer_length);
+ kfree(raw_report);
- dbg_hid("submitting out urb\n");
+ dbg_hid("submitting out urb\n");
- if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
- hid_err(hid, "usb_submit_urb(out) failed\n");
- usb_autopm_put_interface_async(usbhid->intf);
- return -1;
- }
- usbhid->last_out = jiffies;
+ r = usb_submit_urb(usbhid->urbout, GFP_ATOMIC);
+ if (r < 0) {
+ hid_err(hid, "usb_submit_urb(out) failed: %d\n", r);
+ return r;
}
-
+ usbhid->last_out = jiffies;
return 0;
}
@@ -343,50 +350,48 @@ static int hid_submit_ctrl(struct hid_device *hid)
raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
dir = usbhid->ctrl[usbhid->ctrltail].dir;
- r = usb_autopm_get_interface_async(usbhid->intf);
- if (r < 0)
- return -1;
- if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- if (dir == USB_DIR_OUT) {
- usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
- usbhid->urbctrl->transfer_buffer_length = len;
- memcpy(usbhid->ctrlbuf, raw_report, len);
- kfree(raw_report);
- } else {
- int maxpacket, padlen;
-
- usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
- maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0);
- if (maxpacket > 0) {
- padlen = DIV_ROUND_UP(len, maxpacket);
- padlen *= maxpacket;
- if (padlen > usbhid->bufsize)
- padlen = usbhid->bufsize;
- } else
- padlen = 0;
- usbhid->urbctrl->transfer_buffer_length = padlen;
- }
- usbhid->urbctrl->dev = hid_to_usb_dev(hid);
-
- usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
- usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT;
- usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id);
- usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
- usbhid->cr->wLength = cpu_to_le16(len);
-
- dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
- usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report",
- usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
-
- if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
- usb_autopm_put_interface_async(usbhid->intf);
- hid_err(hid, "usb_submit_urb(ctrl) failed\n");
- return -1;
- }
- usbhid->last_ctrl = jiffies;
+ len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ if (dir == USB_DIR_OUT) {
+ usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
+ usbhid->urbctrl->transfer_buffer_length = len;
+ memcpy(usbhid->ctrlbuf, raw_report, len);
+ kfree(raw_report);
+ } else {
+ int maxpacket, padlen;
+
+ usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
+ maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
+ usbhid->urbctrl->pipe, 0);
+ if (maxpacket > 0) {
+ padlen = DIV_ROUND_UP(len, maxpacket);
+ padlen *= maxpacket;
+ if (padlen > usbhid->bufsize)
+ padlen = usbhid->bufsize;
+ } else
+ padlen = 0;
+ usbhid->urbctrl->transfer_buffer_length = padlen;
}
-
+ usbhid->urbctrl->dev = hid_to_usb_dev(hid);
+
+ usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
+ usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT :
+ HID_REQ_GET_REPORT;
+ usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) |
+ report->id);
+ usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
+ usbhid->cr->wLength = cpu_to_le16(len);
+
+ dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
+ usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" :
+ "Get_Report",
+ usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
+
+ r = usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC);
+ if (r < 0) {
+ hid_err(hid, "usb_submit_urb(ctrl) failed: %d\n", r);
+ return r;
+ }
+ usbhid->last_ctrl = jiffies;
return 0;
}
@@ -423,11 +428,8 @@ static void hid_irq_out(struct urb *urb)
else
usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
- if (usbhid->outhead != usbhid->outtail) {
- if (hid_submit_out(hid)) {
- clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
- }
+ if (usbhid->outhead != usbhid->outtail && !hid_submit_out(hid)) {
+ /* Successfully submitted next urb in queue */
spin_unlock_irqrestore(&usbhid->lock, flags);
return;
}
@@ -474,13 +476,9 @@ static void hid_ctrl(struct urb *urb)
else
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
- if (usbhid->ctrlhead != usbhid->ctrltail) {
- if (hid_submit_ctrl(hid)) {
- clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
- }
+ if (usbhid->ctrlhead != usbhid->ctrltail && !hid_submit_ctrl(hid)) {
+ /* Successfully submitted next urb in queue */
spin_unlock(&usbhid->lock);
- usb_autopm_put_interface_async(usbhid->intf);
return;
}
@@ -515,9 +513,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->out[usbhid->outhead].report = report;
usbhid->outhead = head;
+ /* Try to awake from autosuspend... */
+ if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+ return;
+
+ /*
+ * But if still suspended, leave urb enqueued, don't submit.
+ * Submission will occur if/when resume() drains the queue.
+ */
+ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+ return;
+
if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
- if (hid_submit_out(hid))
+ if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
+ usb_autopm_put_interface_async(usbhid->intf);
+ }
+ wake_up(&usbhid->wait);
} else {
/*
* the queue is known to run
@@ -549,9 +561,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->ctrl[usbhid->ctrlhead].dir = dir;
usbhid->ctrlhead = head;
+ /* Try to awake from autosuspend... */
+ if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+ return;
+
+ /*
+ * If already suspended, leave urb enqueued, but don't submit.
+ * Submission will occur if/when resume() drains the queue.
+ */
+ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+ return;
+
if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
- if (hid_submit_ctrl(hid))
+ if (hid_submit_ctrl(hid)) {
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
+ usb_autopm_put_interface_async(usbhid->intf);
+ }
+ wake_up(&usbhid->wait);
} else {
/*
* the queue is known to run
@@ -576,6 +602,30 @@ void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, uns
}
EXPORT_SYMBOL_GPL(usbhid_submit_report);
+/* Workqueue routine to send requests to change LEDs */
+static void hid_led(struct work_struct *work)
+{
+ struct usbhid_device *usbhid =
+ container_of(work, struct usbhid_device, led_work);
+ struct hid_device *hid = usbhid->hid;
+ struct hid_field *field;
+ unsigned long flags;
+
+ field = hidinput_get_led_field(hid);
+ if (!field) {
+ hid_warn(hid, "LED event field not found\n");
+ return;
+ }
+
+ spin_lock_irqsave(&usbhid->lock, flags);
+ if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) {
+ usbhid->ledcount = hidinput_count_leds(hid);
+ hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount);
+ __usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+ }
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+}
+
static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -595,17 +645,15 @@ static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, un
return -1;
}
+ spin_lock_irqsave(&usbhid->lock, flags);
hid_set_field(field, offset, value);
- if (value) {
- spin_lock_irqsave(&usbhid->lock, flags);
- usbhid->ledcount++;
- spin_unlock_irqrestore(&usbhid->lock, flags);
- } else {
- spin_lock_irqsave(&usbhid->lock, flags);
- usbhid->ledcount--;
- spin_unlock_irqrestore(&usbhid->lock, flags);
- }
- usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+
+ /*
+ * Defer performing requested LED action.
+ * This is more likely gather all LED changes into a single URB.
+ */
+ schedule_work(&usbhid->led_work);
return 0;
}
@@ -1100,7 +1148,7 @@ static void usbhid_stop(struct hid_device *hid)
return;
clear_bit(HID_STARTED, &usbhid->iofl);
- spin_lock_irq(&usbhid->lock); /* Sync with error handler */
+ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
usb_kill_urb(usbhid->urbin);
@@ -1234,6 +1282,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
+ INIT_WORK(&usbhid->led_work, hid_led);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1266,6 +1316,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
{
del_timer_sync(&usbhid->io_retry);
cancel_work_sync(&usbhid->reset_work);
+ cancel_work_sync(&usbhid->led_work);
}
static void hid_cease_io(struct usbhid_device *usbhid)
@@ -1367,16 +1418,6 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
return -EIO;
}
- if (!ignoreled && PMSG_IS_AUTO(message)) {
- spin_lock_irq(&usbhid->lock);
- if (test_bit(HID_LED_ON, &usbhid->iofl)) {
- spin_unlock_irq(&usbhid->lock);
- usbhid_mark_busy(usbhid);
- return -EBUSY;
- }
- spin_unlock_irq(&usbhid->lock);
- }
-
hid_cancel_delayed_stuff(usbhid);
hid_cease_io(usbhid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 5028d60a22a1..c831af937481 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
+ { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
@@ -67,6 +68,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 4ef02b269a71..7c297d305d5d 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -859,7 +859,7 @@ static const struct file_operations hiddev_fops = {
.llseek = noop_llseek,
};
-static char *hiddev_devnode(struct device *dev, mode_t *mode)
+static char *hiddev_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 1673cac93d77..cb8f703efde5 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -55,7 +55,6 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_STARTED 8
#define HID_REPORTED_IDLE 9
#define HID_KEYS_PRESSED 10
-#define HID_LED_ON 11
/*
* USB-specific HID struct, to be pointed to
@@ -97,6 +96,8 @@ struct usbhid_device {
struct work_struct reset_work; /* Task context for resets */
wait_queue_head_t wait; /* For sleeping */
int ledcount; /* counting the number of active leds */
+
+ struct work_struct led_work; /* Task context for setting LEDs */
};
#define hid_to_usb_dev(hid_dev) \
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index bc445d7e3bf5..796086980f4a 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -64,6 +64,32 @@ static const unsigned char usb_kbd_keycode[256] = {
150,158,159,128,136,177,178,176,142,152,173,140
};
+
+/**
+ * struct usb_kbd - state of each attached keyboard
+ * @dev: input device associated with this keyboard
+ * @usbdev: usb device associated with this keyboard
+ * @old: data received in the past from the @irq URB representing which
+ * keys were pressed. By comparing with the current list of keys
+ * that are pressed, we are able to see key releases.
+ * @irq: URB for receiving a list of keys that are pressed when a
+ * new key is pressed or a key that was pressed is released.
+ * @led: URB for sending LEDs (e.g. numlock, ...)
+ * @newleds: data that will be sent with the @led URB representing which LEDs
+ should be on
+ * @name: Name of the keyboard. @dev's name field points to this buffer
+ * @phys: Physical path of the keyboard. @dev's phys field points to this
+ * buffer
+ * @new: Buffer for the @irq URB
+ * @cr: Control request for @led URB
+ * @leds: Buffer for the @led URB
+ * @new_dma: DMA address for @irq URB
+ * @leds_dma: DMA address for @led URB
+ * @leds_lock: spinlock that protects @leds, @newleds, and @led_urb_submitted
+ * @led_urb_submitted: indicates whether @led is in progress, i.e. it has been
+ * submitted and its completion handler has not returned yet
+ * without resubmitting @led
+ */
struct usb_kbd {
struct input_dev *dev;
struct usb_device *usbdev;
@@ -78,6 +104,10 @@ struct usb_kbd {
unsigned char *leds;
dma_addr_t new_dma;
dma_addr_t leds_dma;
+
+ spinlock_t leds_lock;
+ bool led_urb_submitted;
+
};
static void usb_kbd_irq(struct urb *urb)
@@ -136,44 +166,66 @@ resubmit:
static int usb_kbd_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
+ unsigned long flags;
struct usb_kbd *kbd = input_get_drvdata(dev);
if (type != EV_LED)
return -1;
+ spin_lock_irqsave(&kbd->leds_lock, flags);
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
- if (kbd->led->status == -EINPROGRESS)
+ if (kbd->led_urb_submitted){
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
+ }
- if (*(kbd->leds) == kbd->newleds)
+ if (*(kbd->leds) == kbd->newleds){
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
+ }
*(kbd->leds) = kbd->newleds;
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
pr_err("usb_submit_urb(leds) failed\n");
-
+ else
+ kbd->led_urb_submitted = true;
+
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
+
return 0;
}
static void usb_kbd_led(struct urb *urb)
{
+ unsigned long flags;
struct usb_kbd *kbd = urb->context;
if (urb->status)
hid_warn(urb->dev, "led urb status %d received\n",
urb->status);
- if (*(kbd->leds) == kbd->newleds)
+ spin_lock_irqsave(&kbd->leds_lock, flags);
+
+ if (*(kbd->leds) == kbd->newleds){
+ kbd->led_urb_submitted = false;
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return;
+ }
*(kbd->leds) = kbd->newleds;
+
kbd->led->dev = kbd->usbdev;
- if (usb_submit_urb(kbd->led, GFP_ATOMIC))
+ if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
+ kbd->led_urb_submitted = false;
+ }
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
+
}
static int usb_kbd_open(struct input_dev *dev)
@@ -252,6 +304,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
kbd->usbdev = dev;
kbd->dev = input_dev;
+ spin_lock_init(&kbd->leds_lock);
if (dev->manufacturer)
strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
@@ -334,6 +387,7 @@ static void usb_kbd_disconnect(struct usb_interface *intf)
if (kbd) {
usb_kill_urb(kbd->irq);
input_unregister_device(kbd->dev);
+ usb_kill_urb(kbd->led);
usb_kbd_free_mem(interface_to_usbdev(intf), kbd);
kfree(kbd);
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 89f52440fcf4..0e8343f585bb 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
- (wchar_t *)kvp_data->data.key);
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
- valuelen = utf8s_to_utf16s(value, strlen(value),
- (wchar_t *)kvp_data->data.value);
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c3875f72b5bf..a220e5746d67 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -627,10 +627,7 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
pr_info("unregistering driver %s\n", hv_driver->name);
if (!vmbus_exists())
- return;
-
- driver_unregister(&hv_driver->driver);
-
+ driver_unregister(&hv_driver->driver);
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 91be41f60809..02260406b9e4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -367,11 +367,11 @@ config SENSORS_F71882FG
will be called f71882fg.
config SENSORS_F75375S
- tristate "Fintek F75375S/SP and F75373"
+ tristate "Fintek F75375S/SP, F75373 and F75387"
depends on I2C
help
If you say yes here you get support for hardware monitoring
- features of the Fintek F75375S/SP and F75373
+ features of the Fintek F75375S/SP, F75373 and F75387
This driver can also be built as a module. If so, the module
will be called f75375s.
@@ -474,8 +474,8 @@ config SENSORS_IT87
select HWMON_VID
help
If you say yes here you get support for ITE IT8705F, IT8712F,
- IT8716F, IT8718F, IT8720F, IT8721F, IT8726F and IT8758E sensor
- chips, and the SiS960 clone.
+ IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F and IT8758E
+ sensor chips, and the SiS960 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -515,11 +515,11 @@ config SENSORS_LINEAGE
will be called lineage-pem.
config SENSORS_LM63
- tristate "National Semiconductor LM63 and LM64"
+ tristate "National Semiconductor LM63 and compatibles"
depends on I2C
help
If you say yes here you get support for the National
- Semiconductor LM63 and LM64 remote diode digital temperature
+ Semiconductor LM63, LM64, and LM96163 remote diode digital temperature
sensors with integrated fan control. Such chips are found
on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
others.
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 65a35cf5b3c5..3b728e8f169b 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -145,7 +145,7 @@ static const u8 abituguru_pwm_max[5] = { 0, 255, 255, 75, 75 };
/* Insmod parameters */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Set to one to force detection.");
static int bank1_types[ABIT_UGURU_MAX_BANK1_SENSORS] = { -1, -1, -1, -1, -1,
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d30855a75786..34a14a77e008 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -603,11 +603,11 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
/* Insmod parameters */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Set to one to force detection.");
/* Default verbose is 1, since this driver is still in the testing phase */
-static int verbose = 1;
+static bool verbose = 1;
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable/disable verbose error reporting");
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 66f67293341e..554f046bcf20 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -58,7 +58,7 @@ ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
#define POWER_ALARM_NAME "power1_alarm"
static int cap_in_hardware;
-static int force_cap_on;
+static bool force_cap_on;
static int can_cap_in_hardware(void)
{
@@ -170,7 +170,7 @@ static ssize_t set_avg_interval(struct device *dev,
unsigned long long data;
acpi_status status;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
@@ -241,7 +241,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
unsigned long long data;
acpi_status status;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
@@ -311,7 +311,7 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
int res;
unsigned long temp;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e85376..5d760f3d21c2 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index b2cacbe707a8..ceb24a365176 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -125,7 +125,7 @@ static ssize_t adcxx_set_max(struct device *dev,
struct adcxx *adc = spi_get_drvdata(spi);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
if (mutex_lock_interruptible(&adc->lock))
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 1ad0a885c5a5..0158cc35cb2e 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -103,7 +103,7 @@ static int adm1021_remove(struct i2c_client *client);
static struct adm1021_data *adm1021_update_device(struct device *dev);
/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */
-static int read_only;
+static bool read_only;
static const struct i2c_device_id adm1021_id[] = {
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0683e6be662c..97e2cfb0bc93 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -155,7 +155,8 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
#define TEMP_OFFSET_FROM_REG(val) TEMP_FROM_REG((val) < 0 ? \
(val) | 0x70 : (val))
-#define FAN_FROM_REG(reg, div) ((reg) ? (11250 * 60) / ((reg) * (div)) : 0)
+#define FAN_FROM_REG(reg, div) ((reg) ? \
+ (11250 * 60) / ((reg) * (div)) : 0)
static int FAN_TO_REG(int reg, int div)
{
@@ -174,8 +175,8 @@ static int FAN_TO_REG(int reg, int div)
(((reg) & 0x1F) | (((val) << 5) & 0xe0))
#define AUTO_TEMP_MIN_TO_REG(val, reg) \
- ((((val)/500) & 0xf8)|((reg) & 0x7))
-#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1<< ((reg)&0x7)))
+ ((((val) / 500) & 0xf8) | ((reg) & 0x7))
+#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1 << ((reg) & 0x7)))
#define AUTO_TEMP_MIN_FROM_REG(reg) (1000 * ((((reg) >> 3) & 0x1f) << 2))
#define AUTO_TEMP_MIN_FROM_REG_DEG(reg) ((((reg) >> 3) & 0x1f) << 2)
@@ -202,7 +203,7 @@ static int AUTO_TEMP_MAX_TO_REG(int val, int reg, int pwm)
/* FAN auto control */
#define GET_FAN_AUTO_BITFIELD(data, idx) \
- (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx%2]
+ (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx % 2]
/* The tables below contains the possible values for the auto fan
* control bitfields. the index in the table is the register value.
@@ -230,7 +231,7 @@ static const auto_chan_table_t auto_channel_select_table_adm1030 = {
*/
static int
get_fan_auto_nearest(struct adm1031_data *data,
- int chan, u8 val, u8 reg, u8 * new_reg)
+ int chan, u8 val, u8 reg, u8 *new_reg)
{
int i;
int first_match = -1, exact_match = -1;
@@ -258,13 +259,13 @@ get_fan_auto_nearest(struct adm1031_data *data,
}
}
- if (exact_match >= 0) {
+ if (exact_match >= 0)
*new_reg = exact_match;
- } else if (first_match >= 0) {
+ else if (first_match >= 0)
*new_reg = first_match;
- } else {
+ else
return -EINVAL;
- }
+
return 0;
}
@@ -283,23 +284,28 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
u8 reg;
int ret;
u8 old_fan_mode;
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
old_fan_mode = data->conf1;
mutex_lock(&data->update_lock);
- if ((ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg))) {
+ ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg);
+ if (ret) {
mutex_unlock(&data->update_lock);
return ret;
}
data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1);
if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) ^
(old_fan_mode & ADM1031_CONF1_AUTO_MODE)) {
- if (data->conf1 & ADM1031_CONF1_AUTO_MODE){
+ if (data->conf1 & ADM1031_CONF1_AUTO_MODE) {
/* Switch to Auto Fan Mode
* Save PWM registers
* Set PWM registers to 33% Both */
@@ -350,7 +356,12 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
@@ -374,10 +385,16 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
- data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]);
+ data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
+ data->pwm[nr]);
adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
data->temp_max[nr]);
mutex_unlock(&data->update_lock);
@@ -410,8 +427,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
- int reg;
+ long val;
+ int ret, reg;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) &&
@@ -449,9 +470,13 @@ static int trust_fan_readings(struct adm1031_data *data, int chan)
if (data->conf1 & ADM1031_CONF1_AUTO_MODE) {
switch (data->conf1 & 0x60) {
- case 0x00: /* remote temp1 controls fan1 remote temp2 controls fan2 */
+ case 0x00:
+ /*
+ * remote temp1 controls fan1,
+ * remote temp2 controls fan2
+ */
res = data->temp[chan+1] >=
- AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]);
+ AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]);
break;
case 0x20: /* remote temp1 controls both fans */
res =
@@ -515,7 +540,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
if (val) {
@@ -534,10 +564,15 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
u8 tmp;
int old_div;
int new_min;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
tmp = val == 8 ? 0xc0 :
val == 4 ? 0x80 :
@@ -631,9 +666,13 @@ static ssize_t set_temp_offset(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -15000, 15000);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
@@ -648,9 +687,13 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
@@ -665,9 +708,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
@@ -682,9 +729,13 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
@@ -711,7 +762,8 @@ temp_reg(2);
temp_reg(3);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", data->alarm);
@@ -767,7 +819,7 @@ static ssize_t set_update_interval(struct device *dev,
int i, err;
u8 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -919,12 +971,13 @@ static int adm1031_probe(struct i2c_client *client,
adm1031_init_client(client);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group)))
+ err = sysfs_create_group(&client->dev.kobj, &adm1031_group);
+ if (err)
goto exit_free;
if (data->chip_type == adm1031) {
- if ((err = sysfs_create_group(&client->dev.kobj,
- &adm1031_group_opt)))
+ err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt);
+ if (err)
goto exit_remove;
}
@@ -970,14 +1023,13 @@ static void adm1031_init_client(struct i2c_client *client)
}
/* Initialize the ADM1031 chip (enables fan speed reading ) */
read_val = adm1031_read_value(client, ADM1031_REG_CONF2);
- if ((read_val | mask) != read_val) {
- adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask);
- }
+ if ((read_val | mask) != read_val)
+ adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask);
read_val = adm1031_read_value(client, ADM1031_REG_CONF1);
if ((read_val | ADM1031_CONF1_MONITOR_ENABLE) != read_val) {
- adm1031_write_value(client, ADM1031_REG_CONF1, read_val |
- ADM1031_CONF1_MONITOR_ENABLE);
+ adm1031_write_value(client, ADM1031_REG_CONF1,
+ read_val | ADM1031_CONF1_MONITOR_ENABLE);
}
/* Read the chip's update rate */
@@ -1024,8 +1076,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
/* oldh is actually newer */
if (newh != oldh)
dev_warn(&client->dev,
- "Remote temperature may be "
- "wrong.\n");
+ "Remote temperature may be wrong.\n");
#endif
}
data->temp[chan] = newh;
@@ -1052,22 +1103,24 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
data->conf2 = adm1031_read_value(client, ADM1031_REG_CONF2);
data->alarm = adm1031_read_value(client, ADM1031_REG_STATUS(0))
- | (adm1031_read_value(client, ADM1031_REG_STATUS(1))
- << 8);
- if (data->chip_type == adm1030) {
+ | (adm1031_read_value(client, ADM1031_REG_STATUS(1)) << 8);
+ if (data->chip_type == adm1030)
data->alarm &= 0xc0ff;
- }
- for (chan=0; chan<(data->chip_type == adm1030 ? 1 : 2); chan++) {
+ for (chan = 0; chan < (data->chip_type == adm1030 ? 1 : 2);
+ chan++) {
data->fan_div[chan] =
- adm1031_read_value(client, ADM1031_REG_FAN_DIV(chan));
+ adm1031_read_value(client,
+ ADM1031_REG_FAN_DIV(chan));
data->fan_min[chan] =
- adm1031_read_value(client, ADM1031_REG_FAN_MIN(chan));
+ adm1031_read_value(client,
+ ADM1031_REG_FAN_MIN(chan));
data->fan[chan] =
- adm1031_read_value(client, ADM1031_REG_FAN_SPEED(chan));
+ adm1031_read_value(client,
+ ADM1031_REG_FAN_SPEED(chan));
data->pwm[chan] =
- 0xf & (adm1031_read_value(client, ADM1031_REG_PWM) >>
- (4*chan));
+ (adm1031_read_value(client,
+ ADM1031_REG_PWM) >> (4 * chan)) & 0x0f;
}
data->last_updated = jiffies;
data->valid = 1;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 9e234b981b83..3f63f5f9741d 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -503,7 +503,7 @@ static ssize_t chassis_clear(struct device *dev,
struct adm9240_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index cfcc3b6fb6bf..ed60242d6a0a 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -48,8 +48,8 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
/* Module parameters */
-static int se_input = 1; /* Default is SE, 0 == diff */
-static int int_vref = 1; /* Default is internal ref ON */
+static bool se_input = 1; /* Default is SE, 0 == diff */
+static bool int_vref = 1; /* Default is internal ref ON */
static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */
module_param(se_input, bool, S_IRUGO);
module_param(int_vref, bool, S_IRUGO);
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e182..04450f8bf5da 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 5cc3e3784b42..5b02f7a91018 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -197,7 +197,7 @@ static ssize_t adt7411_set_bit(struct device *dev,
int ret;
unsigned long flag;
- ret = strict_strtoul(buf, 0, &flag);
+ ret = kstrtoul(buf, 0, &flag);
if (ret || flag > 1)
return -EINVAL;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 2af0c7b6b4e4..7a1494846cfd 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -833,7 +833,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+ if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -871,7 +871,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+ if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -935,7 +935,7 @@ static ssize_t set_volt_max(struct device *dev,
int x = voltage_multiplier(data, attr->index);
long temp;
- if (strict_strtol(buf, 10, &temp) || !x)
+ if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
@@ -977,7 +977,7 @@ static ssize_t set_volt_min(struct device *dev,
int x = voltage_multiplier(data, attr->index);
long temp;
- if (strict_strtol(buf, 10, &temp) || !x)
+ if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
@@ -1066,7 +1066,7 @@ static ssize_t set_fan_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp ||
+ if (kstrtol(buf, 10, &temp) || !temp ||
!fan_enabled(data, attr->index))
return -EINVAL;
@@ -1115,7 +1115,7 @@ static ssize_t set_force_pwm_max(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
@@ -1147,7 +1147,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1177,7 +1177,7 @@ static ssize_t set_pwm_max(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1209,7 +1209,7 @@ static ssize_t set_pwm_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1243,7 +1243,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -1289,7 +1289,7 @@ static ssize_t set_pwm_tmax(struct device *dev,
int tmin, trange_value;
long trange;
- if (strict_strtol(buf, 10, &trange))
+ if (kstrtol(buf, 10, &trange))
return -EINVAL;
/* trange = tmax - tmin */
@@ -1330,7 +1330,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -1387,7 +1387,7 @@ static ssize_t set_pwm_auto(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
switch (temp) {
@@ -1446,7 +1446,7 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c6d1ce059aea..5e10c79f2dfd 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -449,7 +449,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 60000);
@@ -478,7 +478,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, -1, 10);
@@ -511,7 +511,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -545,7 +545,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -600,7 +600,7 @@ static ssize_t set_fan_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp)
+ if (kstrtol(buf, 10, &temp) || !temp)
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
@@ -637,7 +637,7 @@ static ssize_t set_fan_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp)
+ if (kstrtol(buf, 10, &temp) || !temp)
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
@@ -682,7 +682,7 @@ static ssize_t set_force_pwm_max(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
@@ -714,7 +714,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -746,7 +746,7 @@ static ssize_t set_pwm_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -779,7 +779,7 @@ static ssize_t set_pwm_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -822,7 +822,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -859,7 +859,7 @@ static ssize_t set_pwm_auto(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
if (attr->index % 2)
@@ -919,7 +919,7 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index b5fcd87931cb..7dab3547fee5 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -343,7 +343,7 @@ static ssize_t set_voltage(struct device *dev, struct device_attribute *attr,
unsigned char reg;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -432,7 +432,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
int temp;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -546,7 +546,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
int temp;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -602,7 +602,7 @@ static ssize_t set_tach(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -653,7 +653,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
unsigned char reg = 0;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -758,7 +758,7 @@ static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr,
int r;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -781,7 +781,7 @@ static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
int r;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -819,7 +819,7 @@ static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
int out;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
@@ -853,7 +853,7 @@ static ssize_t set_pwm_at_crit(struct device *dev,
struct adt7475_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
@@ -883,7 +883,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
struct adt7475_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 4033974d1bb3..89a6b9da0ec3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -238,7 +238,7 @@ static ssize_t set_temp(
int ix = to_sensor_dev_attr(attr)->index;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
val = SENSORS_LIMIT(val / 1000, -128, 127);
@@ -327,7 +327,7 @@ static ssize_t set_pwm1(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -356,7 +356,7 @@ static ssize_t set_pwm1_enable(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int config = strict_strtol(buf, 10, &val);
+ int config = kstrtol(buf, 10, &val);
if (config)
return config;
@@ -477,7 +477,7 @@ static ssize_t set_temp_auto_point_temp(
u8 reg;
int dpwm;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -556,7 +556,7 @@ static ssize_t set_pwm1_auto_point_pwm(
struct amc6821_data *data = i2c_get_clientdata(client);
int dpwm;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -623,7 +623,7 @@ static ssize_t set_fan(
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
int ix = to_sensor_dev_attr(attr)->index;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
val = 1 > val ? 0xFFFF : 6000000/val;
@@ -665,7 +665,7 @@ static ssize_t set_fan1_div(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int config = strict_strtol(buf, 10, &val);
+ int config = kstrtol(buf, 10, &val);
if (config)
return config;
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 4c0743660e9c..b9895531240d 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -782,7 +782,7 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
char newkey[5];
u8 buffer[2];
- if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
+ if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
return -EINVAL; /* Bigger than a 14-bit value */
sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
@@ -822,7 +822,7 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
unsigned long input;
u16 val;
- if (strict_strtoul(sysfsbuf, 10, &input) < 0)
+ if (kstrtoul(sysfsbuf, 10, &input) < 0)
return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
@@ -977,7 +977,7 @@ static ssize_t applesmc_key_at_index_store(struct device *dev,
{
unsigned long newkey;
- if (strict_strtoul(sysfsbuf, 10, &newkey) < 0
+ if (kstrtoul(sysfsbuf, 10, &newkey) < 0
|| newkey >= smcreg.key_count)
return -EINVAL;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index d2596cec18b5..3efd32449982 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -188,7 +188,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
SETUP_STORE_data_param(dev, attr);
long reqval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, 255);
@@ -221,7 +221,7 @@ static ssize_t store_bitmask(struct device *dev,
long reqval;
u8 currval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
@@ -265,7 +265,7 @@ static ssize_t store_fan16(struct device *dev,
SETUP_STORE_data_param(dev, attr);
long reqval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
/* If a minimum RPM of zero is requested, then we set the register to
@@ -338,7 +338,7 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
long reqval;
u8 nr = sda->index;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
@@ -371,7 +371,7 @@ static ssize_t store_temp8(struct device *dev,
long reqval;
s8 temp;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, -127000, 127000);
@@ -427,7 +427,7 @@ static ssize_t store_temp62(struct device *dev,
long reqval, i, f;
s8 temp;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, -32000, 31750);
@@ -482,7 +482,7 @@ static ssize_t store_ap2_temp(struct device *dev,
int i;
u8 currval, newval = 0;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -538,7 +538,7 @@ static ssize_t store_pwm_ac(struct device *dev,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
};
- if (strict_strtoul(buf, 10, &reqval))
+ if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
if (reqval > 31)
@@ -601,7 +601,7 @@ static ssize_t store_pwm_enable(struct device *dev,
long reqval;
u8 currval, config, altbit, newval, minoff = 255;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
switch (reqval) {
@@ -675,7 +675,7 @@ static ssize_t store_pwm_freq(struct device *dev,
u8 currval, newval = 255;
int i;
- if (strict_strtoul(buf, 10, &reqval))
+ if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_freq_map); i++) {
@@ -724,7 +724,7 @@ static ssize_t store_pwm_ast(struct device *dev,
u8 currval, newval = 255;
u32 i;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_auto_spinup_map); i++) {
@@ -771,7 +771,7 @@ static ssize_t store_temp_st(struct device *dev,
u8 currval, newval = 255;
u32 i;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_temp_smoothing_time_map); i++) {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 104b3767516c..a6c6ec36615e 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -57,16 +57,15 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#ifdef CONFIG_SMP
#define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
#define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
+#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
#else
-#define TO_PHYS_ID(cpu) (cpu)
-#define TO_CORE_ID(cpu) (cpu)
#define for_each_sibling(i, cpu) for (i = 0; false; )
#endif
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
/*
* Per-Core Temperature Data
@@ -191,7 +190,8 @@ static ssize_t show_temp(struct device *dev,
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
}
-static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
{
/* The 100C is default for both mobile and non mobile CPUs */
@@ -285,7 +285,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return tjmax;
}
-static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
{
int err;
u32 eax, edx;
@@ -324,7 +325,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return adjust_tjmax(c, id, dev);
}
-static int create_name_attr(struct platform_data *pdata, struct device *dev)
+static int __devinit create_name_attr(struct platform_data *pdata,
+ struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
pdata->name_attr.attr.name = "name";
@@ -333,8 +335,8 @@ static int create_name_attr(struct platform_data *pdata, struct device *dev)
return device_create_file(dev, &pdata->name_attr);
}
-static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int attr_no)
+static int __cpuinit create_core_attrs(struct temp_data *tdata,
+ struct device *dev, int attr_no)
{
int err, i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -384,7 +386,7 @@ static int __cpuinit chk_ucode_version(unsigned int cpu)
return 0;
}
-static struct platform_device *coretemp_get_pdev(unsigned int cpu)
+static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
{
u16 phys_proc_id = TO_PHYS_ID(cpu);
struct pdev_entry *p;
@@ -401,7 +403,8 @@ static struct platform_device *coretemp_get_pdev(unsigned int cpu)
return NULL;
}
-static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
+static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
+ int pkg_flag)
{
struct temp_data *tdata;
@@ -419,7 +422,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
return tdata;
}
-static int create_core_data(struct platform_device *pdev,
+static int __cpuinit create_core_data(struct platform_device *pdev,
unsigned int cpu, int pkg_flag)
{
struct temp_data *tdata;
@@ -490,7 +493,7 @@ exit_free:
return err;
}
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void __cpuinit coretemp_add_core(unsigned int cpu, int pkg_flag)
{
struct platform_device *pdev = coretemp_get_pdev(cpu);
int err;
@@ -619,7 +622,7 @@ exit:
return err;
}
-static void coretemp_device_remove(unsigned int cpu)
+static void __cpuinit coretemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p, *n;
u16 phys_proc_id = TO_PHYS_ID(cpu);
@@ -635,7 +638,7 @@ static void coretemp_device_remove(unsigned int cpu)
mutex_unlock(&pdev_list_mutex);
}
-static bool is_any_core_online(struct platform_data *pdata)
+static bool __cpuinit is_any_core_online(struct platform_data *pdata)
{
int i;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index d9c592713919..ffb229af7861 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -45,7 +45,7 @@
static struct platform_device *pdev;
/* Module load parameters */
-static int force_start;
+static bool force_start;
module_param(force_start, bool, 0);
MODULE_PARM_DESC(force_start, "Force the chip to start monitoring inputs");
@@ -53,7 +53,7 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static int probe_all_addr;
+static bool probe_all_addr;
module_param(probe_all_addr, bool, 0);
MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
"addresses");
@@ -1223,7 +1223,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
}
static struct attribute *dme1737_pwm_chmod_attr[];
-static void dme1737_chmod_file(struct device*, struct attribute*, mode_t);
+static void dme1737_chmod_file(struct device*, struct attribute*, umode_t);
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1961,7 +1961,7 @@ static inline void dme1737_sio_outb(int sio_cip, int reg, int val)
static int dme1737_i2c_get_features(int, struct dme1737_data*);
static void dme1737_chmod_file(struct device *dev,
- struct attribute *attr, mode_t mode)
+ struct attribute *attr, umode_t mode)
{
if (sysfs_chmod_file(&dev->kobj, attr, mode)) {
dev_warn(dev, "Failed to change permissions of %s.\n",
@@ -1971,7 +1971,7 @@ static void dme1737_chmod_file(struct device *dev,
static void dme1737_chmod_group(struct device *dev,
const struct attribute_group *group,
- mode_t mode)
+ umode_t mode)
{
struct attribute **attr;
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 225ae4f36583..300c3d4d67df 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -161,7 +161,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
struct ds620_data *data = i2c_get_clientdata(client);
- res = strict_strtol(buf, 10, &val);
+ res = kstrtol(buf, 10, &val);
if (res)
return res;
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index cd2a6e437aec..270ffab711cb 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -80,7 +80,7 @@ static ssize_t store_temp(struct device *dev,
unsigned long val;
int retval;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
retval = i2c_smbus_write_byte_data(client, sda->index,
DIV_ROUND_CLOSEST(val, 1000));
@@ -98,7 +98,7 @@ static ssize_t store_bit(struct device *dev,
unsigned long val;
int retval;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->mutex);
@@ -151,7 +151,7 @@ static ssize_t store_hyst(struct device *dev,
int hyst;
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->mutex);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index af914ad93ece..865063914d76 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -55,7 +55,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
* it. Default is to leave the device in the state it's already in (-1).
* This parameter allows APD mode to be optionally forced on or off */
static int apd = -1;
-module_param(apd, bool, 0);
+module_param(apd, bint, 0);
MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
struct temperature {
@@ -244,7 +244,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
- int result = strict_strtol(buf, 10, &val);
+ int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
@@ -268,7 +268,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
- int result = strict_strtol(buf, 10, &val);
+ int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
@@ -314,7 +314,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
int new_range_bits, old_div = 8 / data->fan_multiplier;
long new_div;
- int status = strict_strtol(buf, 10, &new_div);
+ int status = kstrtol(buf, 10, &new_div);
if (status < 0)
return -EINVAL;
@@ -388,7 +388,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
long rpm_target;
- int result = strict_strtol(buf, 10, &rpm_target);
+ int result = kstrtol(buf, 10, &rpm_target);
if (result < 0)
return -EINVAL;
@@ -434,7 +434,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
long new_value;
u8 conf_reg;
- int result = strict_strtol(buf, 10, &new_value);
+ int result = kstrtol(buf, 10, &new_value);
if (result < 0)
return -EINVAL;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 0064432f361f..6ebb9b738c9c 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -212,7 +212,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
long val;
u8 reg;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -249,7 +249,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
long val;
u8 reg;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -291,7 +291,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
int err;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f6..f2359a0093bd 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
.resume = exynos4_tmu_resume,
};
-static int __init exynos4_tmu_driver_init(void)
-{
- return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
- platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 59dd881c71d8..e50305819f01 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1333,7 +1333,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1367,7 +1367,7 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1420,7 +1420,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
int err;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1454,7 +1454,7 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1524,7 +1524,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1566,7 +1566,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
u8 reg;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1609,7 +1609,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1670,7 +1670,7 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1737,7 +1737,7 @@ static ssize_t store_pwm(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1788,7 +1788,7 @@ static ssize_t store_simple_pwm(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1835,7 +1835,7 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1915,7 +1915,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
int point = to_sensor_dev_attr_2(devattr)->nr;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1969,7 +1969,7 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
u8 reg;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -2015,7 +2015,7 @@ static ssize_t store_pwm_interpolate(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -2055,7 +2055,7 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -2106,7 +2106,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
int point = to_sensor_dev_attr_2(devattr)->nr;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 95cbfb3a7077..eedf574ab539 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -1,16 +1,19 @@
/*
- * f75375s.c - driver for the Fintek F75375/SP and F75373
- * hardware monitoring features
+ * f75375s.c - driver for the Fintek F75375/SP, F75373 and
+ * F75387SG/RG hardware monitoring features
* Copyright (C) 2006-2007 Riku Voipio
*
* Datasheets available at:
*
* f75375:
- * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
+ * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
*
* f75373:
* http://www.fintek.com.tw/files/productfiles/F75373_V025P.pdf
*
+ * f75387:
+ * http://www.fintek.com.tw/files/productfiles/F75387_V027P.pdf
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -40,7 +43,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
-enum chips { f75373, f75375 };
+enum chips { f75373, f75375, f75387 };
/* Fintek F75375 registers */
#define F75375_REG_CONFIG0 0x0
@@ -59,6 +62,7 @@ enum chips { f75373, f75375 };
#define F75375_REG_VOLT_LOW(nr) (0x21 + (nr) * 2)
#define F75375_REG_TEMP(nr) (0x14 + (nr))
+#define F75387_REG_TEMP11_LSB(nr) (0x1a + (nr))
#define F75375_REG_TEMP_HIGH(nr) (0x28 + (nr) * 2)
#define F75375_REG_TEMP_HYST(nr) (0x29 + (nr) * 2)
@@ -78,8 +82,11 @@ enum chips { f75373, f75375 };
#define F75375_REG_PWM1_DROP_DUTY 0x6B
#define F75375_REG_PWM2_DROP_DUTY 0x6C
-#define FAN_CTRL_LINEAR(nr) (4 + nr)
+#define F75375_FAN_CTRL_LINEAR(nr) (4 + nr)
+#define F75387_FAN_CTRL_LINEAR(nr) (1 + ((nr) * 4))
#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
+#define F75387_FAN_DUTY_MODE(nr) (2 + ((nr) * 4))
+#define F75387_FAN_MANU_MODE(nr) ((nr) * 4)
/*
* Data structures and manipulation thereof
@@ -102,13 +109,18 @@ struct f75375_data {
u8 in_min[4];
u16 fan[2];
u16 fan_min[2];
- u16 fan_full[2];
- u16 fan_exp[2];
+ u16 fan_max[2];
+ u16 fan_target[2];
u8 fan_timer;
u8 pwm[2];
u8 pwm_mode[2];
u8 pwm_enable[2];
- s8 temp[2];
+ /*
+ * f75387: For remote temperature reading, it uses signed 11-bit
+ * values with LSB = 0.125 degree Celsius, left-justified in 16-bit
+ * registers. For original 8-bit temp readings, the LSB just is 0.
+ */
+ s16 temp11[2];
s8 temp_high[2];
s8 temp_max_hyst[2];
};
@@ -122,6 +134,7 @@ static int f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
{ "f75373", f75373 },
{ "f75375", f75375 },
+ { "f75387", f75387 },
{ }
};
MODULE_DEVICE_TABLE(i2c, f75375_id);
@@ -146,8 +159,8 @@ static inline int f75375_read8(struct i2c_client *client, u8 reg)
/* in most cases, should be called while holding update_lock */
static inline u16 f75375_read16(struct i2c_client *client, u8 reg)
{
- return ((i2c_smbus_read_byte_data(client, reg) << 8)
- | i2c_smbus_read_byte_data(client, reg + 1));
+ return (i2c_smbus_read_byte_data(client, reg) << 8)
+ | i2c_smbus_read_byte_data(client, reg + 1);
}
static inline void f75375_write8(struct i2c_client *client, u8 reg,
@@ -181,11 +194,11 @@ static struct f75375_data *f75375_update_device(struct device *dev)
f75375_read8(client, F75375_REG_TEMP_HIGH(nr));
data->temp_max_hyst[nr] =
f75375_read8(client, F75375_REG_TEMP_HYST(nr));
- data->fan_full[nr] =
+ data->fan_max[nr] =
f75375_read16(client, F75375_REG_FAN_FULL(nr));
data->fan_min[nr] =
f75375_read16(client, F75375_REG_FAN_MIN(nr));
- data->fan_exp[nr] =
+ data->fan_target[nr] =
f75375_read16(client, F75375_REG_FAN_EXP(nr));
data->pwm[nr] = f75375_read8(client,
F75375_REG_FAN_PWM_DUTY(nr));
@@ -205,8 +218,14 @@ static struct f75375_data *f75375_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + 2 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
- data->temp[nr] =
- f75375_read8(client, F75375_REG_TEMP(nr));
+ /* assign MSB, therefore shift it by 8 bits */
+ data->temp11[nr] =
+ f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
+ if (data->kind == f75387)
+ /* merge F75387's temperature LSB (11-bit) */
+ data->temp11[nr] |=
+ f75375_read8(client,
+ F75387_REG_TEMP11_LSB(nr));
data->fan[nr] =
f75375_read16(client, F75375_REG_FAN(nr));
}
@@ -226,14 +245,14 @@ static inline u16 rpm_from_reg(u16 reg)
{
if (reg == 0 || reg == 0xffff)
return 0;
- return (1500000 / reg);
+ return 1500000 / reg;
}
static inline u16 rpm_to_reg(int rpm)
{
if (rpm < 367 || rpm > 0xffff)
return 0xffff;
- return (1500000 / rpm);
+ return 1500000 / rpm;
}
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
@@ -242,7 +261,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = rpm_to_reg(val);
@@ -251,17 +275,22 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_exp(struct device *dev, struct device_attribute *attr,
+static ssize_t set_fan_target(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
- data->fan_exp[nr] = rpm_to_reg(val);
- f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_exp[nr]);
+ data->fan_target[nr] = rpm_to_reg(val);
+ f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_target[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -272,7 +301,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
@@ -294,28 +328,54 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
struct f75375_data *data = i2c_get_clientdata(client);
u8 fanmode;
- if (val < 0 || val > 4)
+ if (val < 0 || val > 3)
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode &= ~(3 << FAN_CTRL_MODE(nr));
-
- switch (val) {
- case 0: /* Full speed */
- fanmode |= (3 << FAN_CTRL_MODE(nr));
- data->pwm[nr] = 255;
- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
- data->pwm[nr]);
- break;
- case 1: /* PWM */
- fanmode |= (3 << FAN_CTRL_MODE(nr));
- break;
- case 2: /* AUTOMATIC*/
- fanmode |= (2 << FAN_CTRL_MODE(nr));
- break;
- case 3: /* fan speed */
- break;
+ if (data->kind == f75387) {
+ /* clear each fanX_mode bit before setting them properly */
+ fanmode &= ~(1 << F75387_FAN_DUTY_MODE(nr));
+ fanmode &= ~(1 << F75387_FAN_MANU_MODE(nr));
+ switch (val) {
+ case 0: /* full speed */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ data->pwm[nr] = 255;
+ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+ data->pwm[nr]);
+ break;
+ case 1: /* PWM */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ break;
+ case 2: /* AUTOMATIC*/
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ break;
+ case 3: /* fan speed */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ break;
+ }
+ } else {
+ /* clear each fanX_mode bit before setting them properly */
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
+ switch (val) {
+ case 0: /* full speed */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ data->pwm[nr] = 255;
+ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+ data->pwm[nr]);
+ break;
+ case 1: /* PWM */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ break;
+ case 2: /* AUTOMATIC*/
+ fanmode |= (2 << FAN_CTRL_MODE(nr));
+ break;
+ case 3: /* fan speed */
+ break;
+ }
}
+
f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
data->pwm_enable[nr] = val;
return 0;
@@ -327,8 +387,12 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
- int err = 0;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
err = set_pwm_enable_direct(client, nr, val);
@@ -342,20 +406,39 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
- u8 conf = 0;
+ unsigned long val;
+ int err;
+ u8 conf;
+ char reg, ctrl;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
if (!(val == 0 || val == 1))
return -EINVAL;
+ /* F75373 does not support DC (linear voltage) fan control mode */
+ if (data->kind == f75373 && val == 0)
+ return -EINVAL;
+
+ /* take care for different registers */
+ if (data->kind == f75387) {
+ reg = F75375_REG_FAN_TIMER;
+ ctrl = F75387_FAN_CTRL_LINEAR(nr);
+ } else {
+ reg = F75375_REG_CONFIG1;
+ ctrl = F75375_FAN_CTRL_LINEAR(nr);
+ }
+
mutex_lock(&data->update_lock);
- conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf &= ~(1 << FAN_CTRL_LINEAR(nr));
+ conf = f75375_read8(client, reg);
+ conf &= ~(1 << ctrl);
if (val == 0)
- conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
+ conf |= (1 << ctrl);
- f75375_write8(client, F75375_REG_CONFIG1, conf);
+ f75375_write8(client, reg, conf);
data->pwm_mode[nr] = val;
mutex_unlock(&data->update_lock);
return count;
@@ -410,7 +493,13 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_max[nr] = val;
@@ -425,7 +514,13 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_min[nr] = val;
@@ -435,13 +530,14 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
}
#define TEMP_FROM_REG(val) ((val) * 1000)
#define TEMP_TO_REG(val) ((val) / 1000)
+#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t show_temp11(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
+ return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[nr]));
}
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
@@ -466,7 +562,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtol(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_high[nr] = val;
@@ -481,7 +583,13 @@ static ssize_t set_temp_max_hyst(struct device *dev,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtol(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_max_hyst[nr] = val;
@@ -502,8 +610,8 @@ static ssize_t show_##thing(struct device *dev, struct device_attribute *attr, \
show_fan(fan);
show_fan(fan_min);
-show_fan(fan_full);
-show_fan(fan_exp);
+show_fan(fan_max);
+show_fan(fan_target);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO|S_IWUSR,
@@ -525,28 +633,28 @@ static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 3);
static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 3);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 1);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_full, S_IRUGO, show_fan_full, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, show_fan_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan1_exp, S_IRUGO|S_IWUSR,
- show_fan_exp, set_fan_exp, 0);
+static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO|S_IWUSR,
+ show_fan_target, set_fan_target, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan2_full, S_IRUGO, show_fan_full, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_max, S_IRUGO, show_fan_max, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan2_exp, S_IRUGO|S_IWUSR,
- show_fan_exp, set_fan_exp, 1);
+static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO|S_IWUSR,
+ show_fan_target, set_fan_target, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR,
show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR,
@@ -568,13 +676,13 @@ static struct attribute *f75375_attributes[] = {
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_full.dev_attr.attr,
+ &sensor_dev_attr_fan1_max.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan1_exp.dev_attr.attr,
+ &sensor_dev_attr_fan1_target.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan2_full.dev_attr.attr,
+ &sensor_dev_attr_fan2_max.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
- &sensor_dev_attr_fan2_exp.dev_attr.attr,
+ &sensor_dev_attr_fan2_target.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
@@ -604,6 +712,51 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
struct f75375s_platform_data *f75375s_pdata)
{
int nr;
+
+ if (!f75375s_pdata) {
+ u8 conf, mode;
+ int nr;
+
+ conf = f75375_read8(client, F75375_REG_CONFIG1);
+ mode = f75375_read8(client, F75375_REG_FAN_TIMER);
+ for (nr = 0; nr < 2; nr++) {
+ if (data->kind == f75387) {
+ bool manu, duty;
+
+ if (!(conf & (1 << F75387_FAN_CTRL_LINEAR(nr))))
+ data->pwm_mode[nr] = 1;
+
+ manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
+ duty = ((mode >> F75387_FAN_DUTY_MODE(nr)) & 1);
+ if (manu && duty)
+ /* speed */
+ data->pwm_enable[nr] = 3;
+ else if (!manu && duty)
+ /* automatic */
+ data->pwm_enable[nr] = 2;
+ else
+ /* manual */
+ data->pwm_enable[nr] = 1;
+ } else {
+ if (!(conf & (1 << F75375_FAN_CTRL_LINEAR(nr))))
+ data->pwm_mode[nr] = 1;
+
+ switch ((mode >> FAN_CTRL_MODE(nr)) & 3) {
+ case 0: /* speed */
+ data->pwm_enable[nr] = 3;
+ break;
+ case 1: /* automatic */
+ data->pwm_enable[nr] = 2;
+ break;
+ default: /* manual */
+ data->pwm_enable[nr] = 1;
+ break;
+ }
+ }
+ }
+ return;
+ }
+
set_pwm_enable_direct(client, 0, f75375s_pdata->pwm_enable[0]);
set_pwm_enable_direct(client, 1, f75375s_pdata->pwm_enable[1]);
for (nr = 0; nr < 2; nr++) {
@@ -624,14 +777,16 @@ static int f75375_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- if (!(data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL)))
+ data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->kind = id->driver_data;
- if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group)))
+ err = sysfs_create_group(&client->dev.kobj, &f75375_group);
+ if (err)
goto exit_free;
if (data->kind == f75375) {
@@ -653,8 +808,7 @@ static int f75375_probe(struct i2c_client *client,
goto exit_remove;
}
- if (f75375s_pdata != NULL)
- f75375_init(client, data, f75375s_pdata);
+ f75375_init(client, data, f75375s_pdata);
return 0;
@@ -685,10 +839,15 @@ static int f75375_detect(struct i2c_client *client,
vendid = f75375_read16(client, F75375_REG_VENDOR);
chipid = f75375_read16(client, F75375_CHIP_ID);
- if (chipid == 0x0306 && vendid == 0x1934)
+ if (vendid != 0x1934)
+ return -ENODEV;
+
+ if (chipid == 0x0306)
name = "f75375";
- else if (chipid == 0x0204 && vendid == 0x1934)
+ else if (chipid == 0x0204)
name = "f75373";
+ else if (chipid == 0x0410)
+ name = "f75387";
else
return -ENODEV;
@@ -711,7 +870,7 @@ static void __exit sensors_f75375_exit(void)
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("F75373/F75375 hardware monitoring driver");
+MODULE_DESCRIPTION("F75373/F75375/F75387 hardware monitoring driver");
module_init(sensors_f75375_init);
module_exit(sensors_f75375_exit);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 1d6a6fa31fb4..781277ddbaa5 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -166,7 +166,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
struct g760a_data *data = g760a_update_client(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743af..2ce8c44a0e07 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -224,7 +224,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int speed_index;
int ret = count;
- if (strict_strtoul(buf, 10, &pwm) || pwm > 255)
+ if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
mutex_lock(&fan_data->lock);
@@ -257,7 +257,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
if (fan_data->pwm_enable == val)
@@ -314,7 +314,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
unsigned long rpm;
int ret = count;
- if (strict_strtoul(buf, 10, &rpm))
+ if (kstrtoul(buf, 10, &rpm))
return -EINVAL;
mutex_lock(&fan_data->lock);
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
},
};
-static int __init gpio_fan_init(void)
-{
- return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
- platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 6a967d7dbdee..cc2981f749a6 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -904,7 +904,7 @@ static ssize_t aem_set_power_period(struct device *dev,
unsigned long temp;
int res;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d912649fac50..0054d6f9cec9 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -17,6 +17,7 @@
* IT8720F Super I/O chip w/LPC interface
* IT8721F Super I/O chip w/LPC interface
* IT8726F Super I/O chip w/LPC interface
+ * IT8728F Super I/O chip w/LPC interface
* IT8758E Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
*
@@ -58,7 +59,7 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720, it8721 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -135,6 +136,7 @@ static inline void superio_exit(void)
#define IT8720F_DEVID 0x8720
#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
+#define IT8728F_DEVID 0x8728
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -146,10 +148,10 @@ static inline void superio_exit(void)
#define IT87_SIO_BEEP_PIN_REG 0xf6 /* Beep pin mapping */
/* Update battery voltage after every reading if true */
-static int update_vbat;
+static bool update_vbat;
/* Not all BIOSes properly configure the PWM registers */
-static int fix_pwm_polarity;
+static bool fix_pwm_polarity;
/* Many IT87 constants specified below */
@@ -274,11 +276,31 @@ struct it87_data {
s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
+static inline int has_12mv_adc(const struct it87_data *data)
+{
+ /*
+ * IT8721F and later have a 12 mV ADC, also with internal scaling
+ * on selected inputs.
+ */
+ return data->type == it8721
+ || data->type == it8728;
+}
+
+static inline int has_newer_autopwm(const struct it87_data *data)
+{
+ /*
+ * IT8721F and later have separate registers for the temperature
+ * mapping and the manual duty cycle.
+ */
+ return data->type == it8721
+ || data->type == it8728;
+}
+
static u8 in_to_reg(const struct it87_data *data, int nr, long val)
{
long lsb;
- if (data->type == it8721) {
+ if (has_12mv_adc(data)) {
if (data->in_scaled & (1 << nr))
lsb = 24;
else
@@ -292,7 +314,7 @@ static u8 in_to_reg(const struct it87_data *data, int nr, long val)
static int in_from_reg(const struct it87_data *data, int nr, int val)
{
- if (data->type == it8721) {
+ if (has_12mv_adc(data)) {
if (data->in_scaled & (1 << nr))
return val * 24;
else
@@ -329,7 +351,7 @@ static inline u16 FAN16_TO_REG(long rpm)
static u8 pwm_to_reg(const struct it87_data *data, long val)
{
- if (data->type == it8721)
+ if (has_newer_autopwm(data))
return val;
else
return val >> 1;
@@ -337,7 +359,7 @@ static u8 pwm_to_reg(const struct it87_data *data, long val)
static int pwm_from_reg(const struct it87_data *data, u8 reg)
{
- if (data->type == it8721)
+ if (has_newer_autopwm(data))
return reg;
else
return (reg & 0x7f) << 1;
@@ -374,7 +396,8 @@ static inline int has_16bit_fans(const struct it87_data *data)
|| data->type == it8716
|| data->type == it8718
|| data->type == it8720
- || data->type == it8721;
+ || data->type == it8721
+ || data->type == it8728;
}
static inline int has_old_autopwm(const struct it87_data *data)
@@ -444,7 +467,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -463,7 +486,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -539,7 +562,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -557,7 +580,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -604,7 +627,7 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
@@ -718,7 +741,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -751,7 +774,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
int min;
u8 old;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -820,7 +843,7 @@ static ssize_t set_pwm_enable(struct device *dev,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 2)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
return -EINVAL;
/* Check trip points before switching to automatic mode */
@@ -842,7 +865,7 @@ static ssize_t set_pwm_enable(struct device *dev,
data->fan_main_ctrl);
} else {
if (val == 1) /* Manual mode */
- data->pwm_ctrl[nr] = data->type == it8721 ?
+ data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
data->pwm_temp_map[nr] :
data->pwm_duty[nr];
else /* Automatic mode */
@@ -866,11 +889,11 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
- if (data->type == it8721) {
+ if (has_newer_autopwm(data)) {
/* If we are in automatic mode, the PWM duty cycle register
* is read-only so we can't write the value */
if (data->pwm_ctrl[nr] & 0x80) {
@@ -900,7 +923,7 @@ static ssize_t set_pwm_freq(struct device *dev,
unsigned long val;
int i;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
/* Search for the nearest available frequency */
@@ -949,7 +972,7 @@ static ssize_t set_pwm_temp_map(struct device *dev,
return -EINVAL;
}
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
switch (val) {
@@ -1001,7 +1024,7 @@ static ssize_t set_auto_pwm(struct device *dev,
int point = sensor_attr->index;
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1034,7 +1057,7 @@ static ssize_t set_auto_temp(struct device *dev,
int point = sensor_attr->index;
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
+ if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1126,7 +1149,7 @@ static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1180,7 +1203,7 @@ static ssize_t clear_intrusion(struct device *dev, struct device_attribute
long val;
int config;
- if (strict_strtol(buf, 10, &val) < 0 || val != 0)
+ if (kstrtol(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1231,7 +1254,7 @@ static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0
+ if (kstrtol(buf, 10, &val) < 0
|| (val != 0 && val != 1))
return -EINVAL;
@@ -1278,7 +1301,7 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
data->vrm = val;
@@ -1311,8 +1334,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(attr)->index;
- return sprintf(buf, "%s\n", data->type == it8721 ? labels_it8721[nr]
- : labels[nr]);
+ return sprintf(buf, "%s\n", has_12mv_adc(data) ? labels_it8721[nr]
+ : labels[nr]);
}
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
@@ -1605,6 +1628,9 @@ static int __init it87_find(unsigned short *address,
case IT8721F_DEVID:
sio_data->type = it8721;
break;
+ case IT8728F_DEVID:
+ sio_data->type = it8728;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1646,8 +1672,11 @@ static int __init it87_find(unsigned short *address,
superio_select(GPIO);
reg = superio_inb(IT87_SIO_GPIO3_REG);
- if (sio_data->type == it8721) {
- /* The IT8721F/IT8758E doesn't have VID pins at all */
+ if (sio_data->type == it8721 || sio_data->type == it8728) {
+ /*
+ * The IT8721F/IT8758E doesn't have VID pins at all,
+ * not sure about the IT8728F.
+ */
sio_data->skip_vid = 1;
} else {
/* We need at least 4 VID pins */
@@ -1692,7 +1721,8 @@ static int __init it87_find(unsigned short *address,
}
if (reg & (1 << 0))
sio_data->internal |= (1 << 0);
- if ((reg & (1 << 1)) || sio_data->type == it8721)
+ if ((reg & (1 << 1)) || sio_data->type == it8721 ||
+ sio_data->type == it8728)
sio_data->internal |= (1 << 1);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
@@ -1770,6 +1800,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
"it8718",
"it8720",
"it8721",
+ "it8728",
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1807,7 +1838,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
enable_pwm_interface = it87_check_pwm(dev);
/* Starting with IT8721F, we handle scaling of internal voltages */
- if (data->type == it8721) {
+ if (has_12mv_adc(data)) {
if (sio_data->internal & (1 << 0))
data->in_scaled |= (1 << 3); /* in3 is AVCC */
if (sio_data->internal & (1 << 1))
@@ -2093,7 +2124,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
{
data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
- if (data->type == it8721) {
+ if (has_newer_autopwm(data)) {
data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
data->pwm_duty[nr] = it87_read_value(data,
IT87_REG_PWM_DUTY(nr));
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 2d3d72805ff4..28c09eead36b 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -309,7 +309,7 @@ static ssize_t set_##value(struct device *dev, \
struct jc42_data *data = i2c_get_clientdata(client); \
int err, ret = count; \
long val; \
- if (strict_strtol(buf, 10, &val) < 0) \
+ if (kstrtol(buf, 10, &val) < 0) \
return -EINVAL; \
mutex_lock(&data->update_lock); \
data->value = jc42_temp_to_reg(val, data->extended); \
@@ -337,7 +337,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
int err;
int ret = count;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
diff = jc42_temp_from_reg(data->temp_crit) - val;
@@ -413,7 +413,7 @@ static struct attribute *jc42_attributes[] = {
NULL
};
-static mode_t jc42_attribute_mode(struct kobject *kobj,
+static umode_t jc42_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d43407..5253d23361d9 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
- unsigned long t;
+ long t;
unsigned long val;
int ret;
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
},
};
-static int __init jz4740_hwmon_init(void)
-{
- return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
- platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 508cb291f71b..5e6457a6644d 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -47,10 +47,14 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
/*
* Addresses to scan
- * Address is fully defined internally and cannot be changed.
+ * Address is fully defined internally and cannot be changed except for
+ * LM64 which has one pin dedicated to address selection.
+ * LM63 and LM96163 have address 0x4c.
+ * LM64 can have address 0x18 or 0x4e.
*/
static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
@@ -60,6 +64,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
*/
#define LM63_REG_CONFIG1 0x03
+#define LM63_REG_CONVRATE 0x04
#define LM63_REG_CONFIG2 0xBF
#define LM63_REG_CONFIG_FAN 0x4A
@@ -70,6 +75,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
#define LM63_REG_PWM_VALUE 0x4C
#define LM63_REG_PWM_FREQ 0x4D
+#define LM63_REG_LUT_TEMP_HYST 0x4F
+#define LM63_REG_LUT_TEMP(nr) (0x50 + 2 * (nr))
+#define LM63_REG_LUT_PWM(nr) (0x51 + 2 * (nr))
#define LM63_REG_LOCAL_TEMP 0x00
#define LM63_REG_LOCAL_HIGH 0x05
@@ -91,6 +99,16 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
#define LM63_REG_MAN_ID 0xFE
#define LM63_REG_CHIP_ID 0xFF
+#define LM96163_REG_TRUTHERM 0x30
+#define LM96163_REG_REMOTE_TEMP_U_MSB 0x31
+#define LM96163_REG_REMOTE_TEMP_U_LSB 0x32
+#define LM96163_REG_CONFIG_ENHANCED 0x45
+
+#define LM63_MAX_CONVRATE 9
+
+#define LM63_MAX_CONVRATE_HZ 32
+#define LM96163_MAX_CONVRATE_HZ 26
+
/*
* Conversions and various macros
* For tachometer counts, the LM63 uses 16-bit values.
@@ -112,15 +130,24 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
(val) >= 127000 ? 127 : \
(val) < 0 ? ((val) - 500) / 1000 : \
((val) + 500) / 1000)
+#define TEMP8U_TO_REG(val) ((val) <= 0 ? 0 : \
+ (val) >= 255000 ? 255 : \
+ ((val) + 500) / 1000)
#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
#define TEMP11_TO_REG(val) ((val) <= -128000 ? 0x8000 : \
(val) >= 127875 ? 0x7FE0 : \
(val) < 0 ? ((val) - 62) / 125 * 32 : \
((val) + 62) / 125 * 32)
+#define TEMP11U_TO_REG(val) ((val) <= 0 ? 0 : \
+ (val) >= 255875 ? 0xFFE0 : \
+ ((val) + 62) / 125 * 32)
#define HYST_TO_REG(val) ((val) <= 0 ? 0 : \
(val) >= 127000 ? 127 : \
((val) + 500) / 1000)
+#define UPDATE_INTERVAL(max, rate) \
+ ((1000 << (LM63_MAX_CONVRATE - (rate))) / (max))
+
/*
* Functions declaration
*/
@@ -134,7 +161,7 @@ static struct lm63_data *lm63_update_device(struct device *dev);
static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
-enum chips { lm63, lm64 };
+enum chips { lm63, lm64, lm96163 };
/*
* Driver data (common to all clients)
@@ -143,6 +170,7 @@ enum chips { lm63, lm64 };
static const struct i2c_device_id lm63_id[] = {
{ "lm63", lm63 },
{ "lm64", lm64 },
+ { "lm96163", lm96163 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -167,26 +195,53 @@ struct lm63_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
+ char lut_valid; /* zero until lut fields are valid */
unsigned long last_updated; /* in jiffies */
- int kind;
+ unsigned long lut_last_updated; /* in jiffies */
+ enum chips kind;
int temp2_offset;
+ int update_interval; /* in milliseconds */
+ int max_convrate_hz;
+ int lut_size; /* 8 or 12 */
+
/* registers values */
u8 config, config_fan;
u16 fan[2]; /* 0: input
1: low limit */
u8 pwm1_freq;
- u8 pwm1_value;
- s8 temp8[3]; /* 0: local input
+ u8 pwm1[13]; /* 0: current output
+ 1-12: lookup table */
+ s8 temp8[15]; /* 0: local input
1: local high limit
- 2: remote critical limit */
- s16 temp11[3]; /* 0: remote input
+ 2: remote critical limit
+ 3-14: lookup table */
+ s16 temp11[4]; /* 0: remote input
1: remote low limit
- 2: remote high limit */
+ 2: remote high limit
+ 3: remote offset */
+ u16 temp11u; /* remote input (unsigned) */
u8 temp2_crit_hyst;
+ u8 lut_temp_hyst;
u8 alarms;
+ bool pwm_highres;
+ bool lut_temp_highres;
+ bool remote_unsigned; /* true if unsigned remote upper limits */
+ bool trutherm;
};
+static inline int temp8_from_reg(struct lm63_data *data, int nr)
+{
+ if (data->remote_unsigned)
+ return TEMP8_FROM_REG((u8)data->temp8[nr]);
+ return TEMP8_FROM_REG(data->temp8[nr]);
+}
+
+static inline int lut_temp_from_reg(struct lm63_data *data, int nr)
+{
+ return data->temp8[nr] * (data->lut_temp_highres ? 500 : 1000);
+}
+
/*
* Sysfs callback functions and files
*/
@@ -204,7 +259,12 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->fan[1] = FAN_TO_REG(val);
@@ -216,13 +276,22 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
return count;
}
-static ssize_t show_pwm1(struct device *dev, struct device_attribute *dummy,
+static ssize_t show_pwm1(struct device *dev, struct device_attribute *devattr,
char *buf)
{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm1_value >= 2 * data->pwm1_freq ?
- 255 : (data->pwm1_value * 255 + data->pwm1_freq) /
- (2 * data->pwm1_freq));
+ int nr = attr->index;
+ int pwm;
+
+ if (data->pwm_highres)
+ pwm = data->pwm1[nr];
+ else
+ pwm = data->pwm1[nr] >= 2 * data->pwm1_freq ?
+ 255 : (data->pwm1[nr] * 255 + data->pwm1_freq) /
+ (2 * data->pwm1_freq);
+
+ return sprintf(buf, "%d\n", pwm);
}
static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy,
@@ -231,22 +300,26 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy,
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long val;
-
+ int err;
+
if (!(data->config_fan & 0x20)) /* register is read-only */
return -EPERM;
- val = simple_strtoul(buf, NULL, 10);
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
- data->pwm1_value = val <= 0 ? 0 :
- val >= 255 ? 2 * data->pwm1_freq :
- (val * data->pwm1_freq * 2 + 127) / 255;
- i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1_value);
+ data->pwm1[0] = data->pwm_highres ? val :
+ (val * data->pwm1_freq * 2 + 127) / 255;
+ i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1[0]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dummy,
- char *buf)
+static ssize_t show_pwm1_enable(struct device *dev,
+ struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
@@ -273,21 +346,47 @@ static ssize_t show_remote_temp8(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+ return sprintf(buf, "%d\n", temp8_from_reg(data, attr->index)
+ + data->temp2_offset);
+}
+
+static ssize_t show_lut_temp(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct lm63_data *data = lm63_update_device(dev);
+ return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+ data->temp2_offset);
}
-static ssize_t set_local_temp8(struct device *dev,
- struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ int nr = attr->index;
+ int reg = nr == 2 ? LM63_REG_REMOTE_TCRIT : LM63_REG_LOCAL_HIGH;
+ long val;
+ int err;
+ int temp;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
- data->temp8[1] = TEMP8_TO_REG(val);
- i2c_smbus_write_byte_data(client, LM63_REG_LOCAL_HIGH, data->temp8[1]);
+ if (nr == 2) {
+ if (data->remote_unsigned)
+ temp = TEMP8U_TO_REG(val - data->temp2_offset);
+ else
+ temp = TEMP8_TO_REG(val - data->temp2_offset);
+ } else {
+ temp = TEMP8_TO_REG(val);
+ }
+ data->temp8[nr] = temp;
+ i2c_smbus_write_byte_data(client, reg, temp);
mutex_unlock(&data->update_lock);
return count;
}
@@ -297,28 +396,56 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
- + data->temp2_offset);
+ int nr = attr->index;
+ int temp;
+
+ if (!nr) {
+ /*
+ * Use unsigned temperature unless its value is zero.
+ * If it is zero, use signed temperature.
+ */
+ if (data->temp11u)
+ temp = TEMP11_FROM_REG(data->temp11u);
+ else
+ temp = TEMP11_FROM_REG(data->temp11[nr]);
+ } else {
+ if (data->remote_unsigned && nr == 2)
+ temp = TEMP11_FROM_REG((u16)data->temp11[nr]);
+ else
+ temp = TEMP11_FROM_REG(data->temp11[nr]);
+ }
+ return sprintf(buf, "%d\n", temp + data->temp2_offset);
}
static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- static const u8 reg[4] = {
+ static const u8 reg[6] = {
LM63_REG_REMOTE_LOW_MSB,
LM63_REG_REMOTE_LOW_LSB,
LM63_REG_REMOTE_HIGH_MSB,
LM63_REG_REMOTE_HIGH_LSB,
+ LM63_REG_REMOTE_OFFSET_MSB,
+ LM63_REG_REMOTE_OFFSET_LSB,
};
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+ int err;
int nr = attr->index;
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
mutex_lock(&data->update_lock);
- data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+ if (data->remote_unsigned && nr == 2)
+ data->temp11[nr] = TEMP11U_TO_REG(val - data->temp2_offset);
+ else
+ data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
data->temp11[nr] >> 8);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -327,35 +454,143 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
return count;
}
-/* Hysteresis register holds a relative value, while we want to present
- an absolute to user-space */
-static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy,
- char *buf)
+/*
+ * Hysteresis register holds a relative value, while we want to present
+ * an absolute to user-space
+ */
+static ssize_t show_temp2_crit_hyst(struct device *dev,
+ struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+ return sprintf(buf, "%d\n", temp8_from_reg(data, 2)
+ data->temp2_offset
- TEMP8_FROM_REG(data->temp2_crit_hyst));
}
-/* And now the other way around, user-space provides an absolute
- hysteresis value and we have to store a relative one */
-static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy,
+static ssize_t show_lut_temp_hyst(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct lm63_data *data = lm63_update_device(dev);
+
+ return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+ + data->temp2_offset
+ - TEMP8_FROM_REG(data->lut_temp_hyst));
+}
+
+/*
+ * And now the other way around, user-space provides an absolute
+ * hysteresis value and we have to store a relative one
+ */
+static ssize_t set_temp2_crit_hyst(struct device *dev,
+ struct device_attribute *dummy,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+ int err;
long hyst;
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
mutex_lock(&data->update_lock);
- hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
+ hyst = temp8_from_reg(data, 2) + data->temp2_offset - val;
i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
HYST_TO_REG(hyst));
mutex_unlock(&data->update_lock);
return count;
}
+/*
+ * Set conversion rate.
+ * client->update_lock must be held when calling this function.
+ */
+static void lm63_set_convrate(struct i2c_client *client, struct lm63_data *data,
+ unsigned int interval)
+{
+ int i;
+ unsigned int update_interval;
+
+ /* Shift calculations to avoid rounding errors */
+ interval <<= 6;
+
+ /* find the nearest update rate */
+ update_interval = (1 << (LM63_MAX_CONVRATE + 6)) * 1000
+ / data->max_convrate_hz;
+ for (i = 0; i < LM63_MAX_CONVRATE; i++, update_interval >>= 1)
+ if (interval >= update_interval * 3 / 4)
+ break;
+
+ i2c_smbus_write_byte_data(client, LM63_REG_CONVRATE, i);
+ data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i);
+}
+
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm63_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", data->update_interval);
+}
+
+static ssize_t set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, data->trutherm ? "1\n" : "2\n");
+}
+
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int ret;
+ u8 reg;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val != 1 && val != 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->trutherm = val == 1;
+ reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02;
+ i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM,
+ reg | (data->trutherm ? 0x02 : 0x00));
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
char *buf)
{
@@ -377,27 +612,87 @@ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
set_fan, 1);
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0);
static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO, show_pwm1, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IRUGO,
+ show_lut_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IRUGO, show_pwm1, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp, S_IRUGO,
+ show_lut_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO, show_pwm1, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp, S_IRUGO,
+ show_lut_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IRUGO, show_pwm1, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp, S_IRUGO,
+ show_lut_temp, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IRUGO, show_pwm1, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp, S_IRUGO,
+ show_lut_temp, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_pwm, S_IRUGO, show_pwm1, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp, S_IRUGO,
+ show_lut_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_pwm, S_IRUGO, show_pwm1, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp, S_IRUGO,
+ show_lut_temp, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_pwm, S_IRUGO, show_pwm1, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp, S_IRUGO,
+ show_lut_temp, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_pwm, S_IRUGO, show_pwm1, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp, S_IRUGO,
+ show_lut_temp, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_pwm, S_IRUGO, show_pwm1, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp, S_IRUGO,
+ show_lut_temp, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_pwm, S_IRUGO, show_pwm1, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp, S_IRUGO,
+ show_lut_temp, NULL, 13);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 13);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_pwm, S_IRUGO, show_pwm1, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp, S_IRUGO,
+ show_lut_temp, NULL, 14);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 14);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
- set_local_temp8, 1);
+ set_temp8, 1);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 2);
-/*
- * On LM63, temp2_crit can be set only once, which should be job
- * of the bootloader.
- */
+static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 3);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
- NULL, 2);
+ set_temp8, 2);
static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
set_temp2_crit_hyst);
+static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type);
+
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -408,14 +703,43 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
/* Raw alarm file for compatibility */
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+ set_update_interval);
+
static struct attribute *lm63_attributes[] = {
- &dev_attr_pwm1.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
&dev_attr_pwm1_enable.attr,
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr,
+
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&dev_attr_temp2_crit_hyst.attr,
@@ -425,10 +749,54 @@ static struct attribute *lm63_attributes[] = {
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_update_interval.attr,
NULL
};
+static struct attribute *lm63_attributes_extra_lut[] = {
+ &sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point11_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point11_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point11_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point12_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point12_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point12_temp_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lm63_group_extra_lut = {
+ .attrs = lm63_attributes_extra_lut,
+};
+
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ * On LM64, temp2_crit can always be set.
+ * On LM96163, temp2_crit can be set if bit 1 of the configuration
+ * register is true.
+ */
+static umode_t lm63_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+
+ if (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr
+ && (data->kind == lm64 ||
+ (data->kind == lm96163 && (data->config & 0x02))))
+ return attr->mode | S_IWUSR;
+
+ return attr->mode;
+}
+
static const struct attribute_group lm63_group = {
+ .is_visible = lm63_attribute_mode,
.attrs = lm63_attributes,
};
@@ -487,6 +855,8 @@ static int lm63_detect(struct i2c_client *new_client,
strlcpy(info->type, "lm63", I2C_NAME_SIZE);
else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ else if (chip_id == 0x49 && address == 0x4c)
+ strlcpy(info->type, "lm96163", I2C_NAME_SIZE);
else
return -ENODEV;
@@ -518,12 +888,24 @@ static int lm63_probe(struct i2c_client *new_client,
lm63_init_client(new_client);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj,
- &lm63_group)))
+ err = sysfs_create_group(&new_client->dev.kobj, &lm63_group);
+ if (err)
goto exit_free;
if (data->config & 0x04) { /* tachometer enabled */
- if ((err = sysfs_create_group(&new_client->dev.kobj,
- &lm63_group_fan1)))
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm63_group_fan1);
+ if (err)
+ goto exit_remove_files;
+ }
+ if (data->kind == lm96163) {
+ err = device_create_file(&new_client->dev,
+ &dev_attr_temp2_type);
+ if (err)
+ goto exit_remove_files;
+
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm63_group_extra_lut);
+ if (err)
goto exit_remove_files;
}
@@ -538,17 +920,25 @@ static int lm63_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &lm63_group);
sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1);
+ if (data->kind == lm96163) {
+ device_remove_file(&new_client->dev, &dev_attr_temp2_type);
+ sysfs_remove_group(&new_client->dev.kobj,
+ &lm63_group_extra_lut);
+ }
exit_free:
kfree(data);
exit:
return err;
}
-/* Idealy we shouldn't have to initialize anything, since the BIOS
- should have taken care of everything */
+/*
+ * Ideally we shouldn't have to initialize anything, since the BIOS
+ * should have taken care of everything
+ */
static void lm63_init_client(struct i2c_client *client)
{
struct lm63_data *data = i2c_get_clientdata(client);
+ u8 convrate;
data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1);
data->config_fan = i2c_smbus_read_byte_data(client,
@@ -561,16 +951,57 @@ static void lm63_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1,
data->config);
}
+ /* Tachometer is always enabled on LM64 */
+ if (data->kind == lm64)
+ data->config |= 0x04;
/* We may need pwm1_freq before ever updating the client data */
data->pwm1_freq = i2c_smbus_read_byte_data(client, LM63_REG_PWM_FREQ);
if (data->pwm1_freq == 0)
data->pwm1_freq = 1;
+ switch (data->kind) {
+ case lm63:
+ case lm64:
+ data->max_convrate_hz = LM63_MAX_CONVRATE_HZ;
+ data->lut_size = 8;
+ break;
+ case lm96163:
+ data->max_convrate_hz = LM96163_MAX_CONVRATE_HZ;
+ data->lut_size = 12;
+ data->trutherm
+ = i2c_smbus_read_byte_data(client,
+ LM96163_REG_TRUTHERM) & 0x02;
+ break;
+ }
+ convrate = i2c_smbus_read_byte_data(client, LM63_REG_CONVRATE);
+ if (unlikely(convrate > LM63_MAX_CONVRATE))
+ convrate = LM63_MAX_CONVRATE;
+ data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz,
+ convrate);
+
+ /*
+ * For LM96163, check if high resolution PWM
+ * and unsigned temperature format is enabled.
+ */
+ if (data->kind == lm96163) {
+ u8 config_enhanced
+ = i2c_smbus_read_byte_data(client,
+ LM96163_REG_CONFIG_ENHANCED);
+ if (config_enhanced & 0x20)
+ data->lut_temp_highres = true;
+ if ((config_enhanced & 0x10)
+ && !(data->config_fan & 0x08) && data->pwm1_freq == 8)
+ data->pwm_highres = true;
+ if (config_enhanced & 0x08)
+ data->remote_unsigned = true;
+ }
+
/* Show some debug info about the LM63 configuration */
- dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
- (data->config & 0x04) ? "tachometer input" :
- "alert output");
+ if (data->kind == lm63)
+ dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
+ (data->config & 0x04) ? "tachometer input" :
+ "alert output");
dev_dbg(&client->dev, "PWM clock %s kHz, output frequency %u Hz\n",
(data->config_fan & 0x08) ? "1.4" : "360",
((data->config_fan & 0x08) ? 700 : 180000) / data->pwm1_freq);
@@ -586,6 +1017,10 @@ static int lm63_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm63_group);
sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
+ if (data->kind == lm96163) {
+ device_remove_file(&client->dev, &dev_attr_temp2_type);
+ sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
+ }
kfree(data);
return 0;
@@ -595,10 +1030,15 @@ static struct lm63_data *lm63_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
+ int i;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ next_update = data->last_updated
+ + msecs_to_jiffies(data->update_interval) + 1;
+
+ if (time_after(jiffies, next_update) || !data->valid) {
if (data->config & 0x04) { /* tachometer enabled */
/* order matters for fan1_input */
data->fan[0] = i2c_smbus_read_byte_data(client,
@@ -615,8 +1055,8 @@ static struct lm63_data *lm63_update_device(struct device *dev)
LM63_REG_PWM_FREQ);
if (data->pwm1_freq == 0)
data->pwm1_freq = 1;
- data->pwm1_value = i2c_smbus_read_byte_data(client,
- LM63_REG_PWM_VALUE);
+ data->pwm1[0] = i2c_smbus_read_byte_data(client,
+ LM63_REG_PWM_VALUE);
data->temp8[0] = i2c_smbus_read_byte_data(client,
LM63_REG_LOCAL_TEMP);
@@ -636,6 +1076,17 @@ static struct lm63_data *lm63_update_device(struct device *dev)
LM63_REG_REMOTE_HIGH_MSB) << 8)
| i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_HIGH_LSB);
+ data->temp11[3] = (i2c_smbus_read_byte_data(client,
+ LM63_REG_REMOTE_OFFSET_MSB) << 8)
+ | i2c_smbus_read_byte_data(client,
+ LM63_REG_REMOTE_OFFSET_LSB);
+
+ if (data->kind == lm96163)
+ data->temp11u = (i2c_smbus_read_byte_data(client,
+ LM96163_REG_REMOTE_TEMP_U_MSB) << 8)
+ | i2c_smbus_read_byte_data(client,
+ LM96163_REG_REMOTE_TEMP_U_LSB);
+
data->temp8[2] = i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_TCRIT);
data->temp2_crit_hyst = i2c_smbus_read_byte_data(client,
@@ -648,6 +1099,21 @@ static struct lm63_data *lm63_update_device(struct device *dev)
data->valid = 1;
}
+ if (time_after(jiffies, data->lut_last_updated + 5 * HZ) ||
+ !data->lut_valid) {
+ for (i = 0; i < data->lut_size; i++) {
+ data->pwm1[1 + i] = i2c_smbus_read_byte_data(client,
+ LM63_REG_LUT_PWM(i));
+ data->temp8[3 + i] = i2c_smbus_read_byte_data(client,
+ LM63_REG_LUT_TEMP(i));
+ }
+ data->lut_temp_hyst = i2c_smbus_read_byte_data(client,
+ LM63_REG_LUT_TEMP_HYST);
+
+ data->lut_last_updated = jiffies;
+ data->lut_valid = 1;
+ }
+
mutex_unlock(&data->update_lock);
return data;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9e64d96620d3..9c8093c4b307 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -50,7 +50,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
long temp;
short value;
- int status = strict_strtol(buf, 10, &temp);
+ int status = kstrtol(buf, 10, &temp);
if (status < 0)
return status;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 1888dd0fc05f..b3311b1d3d92 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -93,6 +93,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
@@ -107,7 +111,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
long temp;
int error;
- error = strict_strtol(buf, 10, &temp);
+ error = kstrtol(buf, 10, &temp);
if (error)
return error;
@@ -402,6 +406,7 @@ static struct lm75_data *lm75_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm75_data *data = i2c_get_clientdata(client);
+ struct lm75_data *ret = data;
mutex_lock(&data->update_lock);
@@ -414,19 +419,23 @@ static struct lm75_data *lm75_update_device(struct device *dev)
int status;
status = lm75_read_value(client, LM75_REG_TEMP[i]);
- if (status < 0)
- dev_dbg(&client->dev, "reg %d, err %d\n",
- LM75_REG_TEMP[i], status);
- else
- data->temp[i] = status;
+ if (unlikely(status < 0)) {
+ dev_dbg(dev,
+ "LM75: Failed to read value: reg %d, error %d\n",
+ LM75_REG_TEMP[i], status);
+ ret = ERR_PTR(status);
+ data->valid = 0;
+ goto abort;
+ }
+ data->temp[i] = status;
}
data->last_updated = jiffies;
data->valid = 1;
}
+abort:
mutex_unlock(&data->update_lock);
-
- return data;
+ return ret;
}
/*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index e547a3eb4de3..89aa9098ba5b 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -1,6 +1,6 @@
/*
lm75.h - Part of lm_sensors, Linux kernel modules for hardware
- monitoring
+ monitoring
Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
This program is free software; you can redistribute it and/or modify
@@ -37,7 +37,7 @@
static inline u16 LM75_TEMP_TO_REG(long temp)
{
int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
- ntemp += (ntemp<0 ? -250 : 250);
+ ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
@@ -47,4 +47,3 @@ static inline int LM75_TEMP_FROM_REG(u16 reg)
guarantee arithmetic shift and preserve the sign */
return ((s16)reg / 128) * 500;
}
-
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 18a0e6c5fe88..0891b38ffec0 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -66,19 +66,19 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
these macros are called: arguments may be evaluated more than once.
Fixing this is just not worth it. */
-#define IN_TO_REG(val) (SENSORS_LIMIT(((val)+5)/10,0,255))
-#define IN_FROM_REG(val) ((val)*10)
+#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_FROM_REG(val) ((val) * 10)
static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
{
if (rpm == 0)
return 255;
rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm*div / 2) / (rpm*div), 1, 254);
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
-#define FAN_FROM_REG(val,div) ((val)==0?-1:\
- (val)==255?0:1350000/((div)*(val)))
+#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : 1350000/((div) * (val)))
static inline long TEMP_FROM_REG(u16 temp)
{
@@ -93,10 +93,11 @@ static inline long TEMP_FROM_REG(u16 temp)
return res / 10;
}
-#define TEMP_LIMIT_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000)
+#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
+ (val) - 0x100 : (val)) * 1000)
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val)<0?\
- ((val)-500)/1000:((val)+500)/1000,0,255)
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \
+ ((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
#define DIV_FROM_REG(val) (1 << (val))
@@ -164,7 +165,8 @@ static struct i2c_driver lm80_driver = {
*/
#define show_in(suffix, value) \
-static ssize_t show_in_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_in_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
@@ -175,14 +177,14 @@ show_in(max, in_max)
show_in(input, in)
#define set_in(suffix, value, reg) \
-static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
+static ssize_t set_in_##suffix(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val = simple_strtol(buf, NULL, 10); \
- \
+\
mutex_lock(&data->update_lock);\
data->value[nr] = IN_TO_REG(val); \
lm80_write_value(client, reg(nr), data->value[nr]); \
@@ -193,7 +195,8 @@ set_in(min, in_min, LM80_REG_IN_MIN)
set_in(max, in_max, LM80_REG_IN_MAX)
#define show_fan(suffix, value) \
-static ssize_t show_fan_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_fan_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
@@ -245,10 +248,18 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr]));
switch (val) {
- case 1: data->fan_div[nr] = 0; break;
- case 2: data->fan_div[nr] = 1; break;
- case 4: data->fan_div[nr] = 2; break;
- case 8: data->fan_div[nr] = 3; break;
+ case 1:
+ data->fan_div[nr] = 0;
+ break;
+ case 2:
+ data->fan_div[nr] = 1;
+ break;
+ case 4:
+ data->fan_div[nr] = 2;
+ break;
+ case 8:
+ data->fan_div[nr] = 3;
+ break;
default:
dev_err(&client->dev, "fan_div value %ld not "
"supported. Choose one of 1, 2, 4 or 8!\n", val);
@@ -268,14 +279,16 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_input1(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_temp_input1(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp));
}
#define show_temp(suffix, value) \
-static ssize_t show_temp_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_temp_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
struct lm80_data *data = lm80_update_device(dev); \
return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \
@@ -286,13 +299,13 @@ show_temp(os_max, temp_os_max);
show_temp(os_hyst, temp_os_hyst);
#define set_temp(suffix, value, reg) \
-static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
+static ssize_t set_temp_##suffix(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val = simple_strtoul(buf, NULL, 10); \
- \
+\
mutex_lock(&data->update_lock); \
data->value = TEMP_LIMIT_TO_REG(val); \
lm80_write_value(client, reg, data->value); \
@@ -366,13 +379,13 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, set_fan_div, 1);
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max,
- set_temp_hot_max);
+ set_temp_hot_max);
static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst,
- set_temp_hot_hyst);
+ set_temp_hot_hyst);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max,
- set_temp_os_max);
+ set_temp_os_max);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst,
- set_temp_os_hyst);
+ set_temp_os_hyst);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -459,7 +472,7 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
- return -ENODEV;
+ return -ENODEV;
}
strlcpy(info->type, "lm80", I2C_NAME_SIZE);
@@ -490,7 +503,8 @@ static int lm80_probe(struct i2c_client *client,
data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group)))
+ err = sysfs_create_group(&client->dev.kobj, &lm80_group);
+ if (err)
goto error_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 615bc4f4e530..d2dd5f90496d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -730,7 +730,7 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -798,7 +798,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -859,7 +859,7 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
int err;
int temp;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -912,12 +912,12 @@ static ssize_t set_update_interval(struct device *dev,
unsigned long val;
int err;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
- lm90_set_convrate(client, data, val);
+ lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
@@ -1080,7 +1080,7 @@ static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 3b43df418613..8bd6c5c9e05b 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -151,12 +151,12 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/* Insmod parameters */
-static int disable_block;
+static bool disable_block;
module_param(disable_block, bool, 0);
MODULE_PARM_DESC(disable_block,
"Set to non-zero to disable SMBus block data transactions.");
-static int init;
+static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to non-zero to force chip initialization.");
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 513901d592a9..70bca671e083 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -169,7 +169,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
int shift;
u8 mask = to_sensor_dev_attr(attr)->index;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 1 && val != 2)
return -EINVAL;
@@ -216,7 +216,7 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
if (val < -128000)
return -EINVAL;
@@ -254,7 +254,7 @@ static ssize_t set_max(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
if (val >= 256000)
return -EINVAL;
@@ -290,7 +290,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
data->interval = val * HZ / 1000;
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index dce9e68241e6..5e5fc1b0ace1 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -254,7 +254,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
int index = to_sensor_dev_attr(attr)->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -279,7 +279,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -316,7 +316,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 1 && val != 2)
return -EINVAL;
@@ -363,7 +363,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 4b50601027d3..ce5235560f01 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -85,6 +85,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
"Failed to read ADC value: error %d\n",
val);
ret = ERR_PTR(val);
+ data->valid = 0;
goto abort;
}
data->regs[i] = val;
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index c97b78ef9116..482ca901db30 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -6,7 +6,7 @@
* Copyright (C) 2004-2005 Richard Purdie
*
* Copyright (C) 2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -106,11 +106,14 @@ static ssize_t show_adc(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ /* assume the reference voltage to be 2.048V, with an 8-bit sample,
+ * the LSB weight is 8mV
+ */
+ return sprintf(buf, "%d\n", ret * 8);
}
#define MAX1111_ADC_ATTR(_id) \
- SENSOR_DEVICE_ATTR(adc##_id##_in, S_IRUGO, show_adc, NULL, _id)
+ SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id)
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static MAX1111_ADC_ATTR(0);
@@ -120,10 +123,10 @@ static MAX1111_ADC_ATTR(3);
static struct attribute *max1111_attributes[] = {
&dev_attr_name.attr,
- &sensor_dev_attr_adc0_in.dev_attr.attr,
- &sensor_dev_attr_adc1_in.dev_attr.attr,
- &sensor_dev_attr_adc2_in.dev_attr.attr,
- &sensor_dev_attr_adc3_in.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
NULL,
};
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 385886a4f224..f8e323ac6cb3 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -230,7 +230,7 @@ static ssize_t max16065_set_limit(struct device *dev,
int err;
int limit;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (unlikely(err < 0))
return err;
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 20d1b2ddffb6..88953f99e914 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -59,7 +59,7 @@ static unsigned short max1668_addr_list[] = {
#define DEV_ID_MAX1989 0xb
/* read only mode module parameter */
-static int read_only;
+static bool read_only;
module_param(read_only, bool, 0);
MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
@@ -335,10 +335,10 @@ static struct attribute *max1668_attribute_unique[] = {
NULL
};
-static mode_t max1668_attribute_mode(struct kobject *kobj,
+static umode_t max1668_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
- int ret = S_IRUGO;
+ umode_t ret = S_IRUGO;
if (read_only)
return ret;
if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index f20d9978ee78..e10a092c603c 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -208,7 +208,7 @@ static ssize_t set_temp_max(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -241,7 +241,7 @@ static ssize_t set_temp_crit(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -275,7 +275,7 @@ static ssize_t set_temp_emergency(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -308,7 +308,7 @@ static ssize_t set_pwm(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index e855d3b0bd1f..209e8a526eb1 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -234,7 +234,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct max6642_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index ece3aafa54b3..2fc034aeca09 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -464,7 +464,7 @@ static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO2);
-static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
+static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dced..9b382ec2c3bd 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
.id_table = ntc_thermistor_id,
};
-static int __init ntc_thermistor_init(void)
-{
- return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
- platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
MODULE_DESCRIPTION("NTC Thermistor Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 8da2181630b1..cb35461d52d9 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -418,7 +418,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
unsigned long val;
int iobase = data->address[LD_FAN];
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->lock);
@@ -572,7 +572,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
int nr = to_sensor_dev_attr(devattr)->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0 || val > 2)
+ if (kstrtoul(buf, 10, &val) < 0 || val > 2)
return -EINVAL;
/* Can't go to automatic mode if it isn't configured */
if (val == 2 && !(data->pwm_auto_ok & (1 << nr)))
@@ -604,7 +604,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
int iobase = data->address[LD_FAN];
u8 mode;
- if (strict_strtoul(buf, 10, &val) < 0 || val > 0xff)
+ if (kstrtoul(buf, 10, &val) < 0 || val > 0xff)
return -EINVAL;
mutex_lock(&data->lock);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 4b26f51920ba..cfec923f42b7 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -19,8 +19,8 @@ config SENSORS_PMBUS
default y
help
If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
- BMR453, BMR454, NCP4200, and NCP4208.
+ PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
+ NCP4200, and NCP4208.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -113,8 +113,9 @@ config SENSORS_ZL6100
default n
help
If you say yes here you get hardware monitoring support for Intersil
- ZL2004, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105 Digital
- DC/DC Controllers.
+ ZL2004, ZL2005, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105
+ Digital DC/DC Controllers, as well as for Ericsson BMR450, BMR451,
+ BMR462, BMR463, and BMR464.
This driver can also be built as a module. If so, the module will
be called zl6100.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 980a4d9d5028..81c7c2ead6f3 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -170,35 +170,71 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static const struct i2c_device_id adm1275_id[] = {
+ { "adm1275", adm1275 },
+ { "adm1276", adm1276 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adm1275_id);
+
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
struct pmbus_driver_info *info;
struct adm1275_data *data;
+ const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
- data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
+ dev_err(&client->dev, "Unsupported Manufacturer ID\n");
+ return -ENODEV;
+ }
- config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
- if (config < 0) {
- ret = config;
- goto err_mem;
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ for (mid = adm1275_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
}
+ if (id->driver_data != mid->driver_data)
+ dev_notice(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ id->name, mid->name);
+
+ config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ if (config < 0)
+ return config;
+
device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
- if (device_config < 0) {
- ret = device_config;
- goto err_mem;
- }
+ if (device_config < 0)
+ return device_config;
+
+ data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = mid->driver_data;
- data->id = id->driver_data;
info = &data->info;
info->pages = 1;
@@ -233,7 +269,7 @@ static int adm1275_probe(struct i2c_client *client,
if (device_config & ADM1275_IOUT_WARN2_SELECT)
data->have_oc_fault = true;
- switch (id->driver_data) {
+ switch (data->id) {
case adm1275:
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
@@ -281,13 +317,6 @@ static int adm1275_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id adm1275_id[] = {
- { "adm1275", adm1275 },
- { "adm1276", adm1276 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, adm1275_id);
-
static struct i2c_driver adm1275_driver = {
.driver = {
.name = "adm1275",
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 995e873197e3..18a385e753d7 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -200,8 +200,6 @@ static int pmbus_remove(struct i2c_client *client)
*/
static const struct i2c_device_id pmbus_id[] = {
{"adp4000", 1},
- {"bmr450", 1},
- {"bmr451", 1},
{"bmr453", 1},
{"bmr454", 1},
{"ncp4200", 1},
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 2bc980006f83..48c7b4a716ae 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -28,7 +28,7 @@
#include <linux/delay.h>
#include "pmbus.h"
-enum chips { zl2004, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
+enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
struct zl6100_data {
int id;
@@ -38,8 +38,11 @@ struct zl6100_data {
#define to_zl6100_data(x) container_of(x, struct zl6100_data, info)
+#define ZL6100_MFR_CONFIG 0xd0
#define ZL6100_DEVICE_ID 0xe4
+#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
+
#define ZL6100_WAIT_TIME 1000 /* uS */
static ushort delay = ZL6100_WAIT_TIME;
@@ -65,6 +68,19 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
if (page || reg >= PMBUS_VIRT_BASE)
return -ENXIO;
+ if (data->id == zl2005) {
+ /*
+ * Limit register detection is not reliable on ZL2005.
+ * Make sure registers are not erroneously detected.
+ */
+ switch (reg) {
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ return -ENXIO;
+ }
+ }
+
zl6100_wait(data);
ret = pmbus_read_word_data(client, page, reg);
data->access = ktime_get();
@@ -122,7 +138,13 @@ static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
}
static const struct i2c_device_id zl6100_id[] = {
+ {"bmr450", zl2005},
+ {"bmr451", zl2005},
+ {"bmr462", zl2008},
+ {"bmr463", zl2008},
+ {"bmr464", zl2008},
{"zl2004", zl2004},
+ {"zl2005", zl2005},
{"zl2006", zl2006},
{"zl2008", zl2008},
{"zl2105", zl2105},
@@ -143,7 +165,7 @@ static int zl6100_probe(struct i2c_client *client,
const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA
+ I2C_FUNC_SMBUS_READ_WORD_DATA
| I2C_FUNC_SMBUS_READ_BLOCK_DATA))
return -ENODEV;
@@ -177,8 +199,9 @@ static int zl6100_probe(struct i2c_client *client,
data->id = mid->driver_data;
/*
- * ZL2008, ZL2105, and ZL6100 are known to require a wait time
+ * ZL2005, ZL2008, ZL2105, and ZL6100 are known to require a wait time
* between I2C accesses. ZL2004 and ZL6105 are known to be safe.
+ * Other chips have not yet been tested.
*
* Only clear the wait time for chips known to be safe. The wait time
* can be cleared later for additional chips if tests show that it
@@ -190,12 +213,9 @@ static int zl6100_probe(struct i2c_client *client,
/*
* Since there was a direct I2C device access above, wait before
* accessing the chip again.
- * Set the timestamp, wait, then set it again. This should provide
- * enough buffer time to be safe.
*/
data->access = ktime_get();
zl6100_wait(data);
- data->access = ktime_get();
info = &data->info;
@@ -203,7 +223,16 @@ static int zl6100_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
| PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
- | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+
+ ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
+ if (ret < 0)
+ goto err_mem;
+ if (ret & ZL6100_MFR_XTEMP_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
info->read_word_data = zl6100_read_word_data;
info->read_byte_data = zl6100_read_byte_data;
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752a..f6c26d19f521 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
.remove = __devexit_p(s3c_hwmon_remove),
};
-static int __init s3c_hwmon_init(void)
-{
- return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
- platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c25..79b6dabe3161 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
.remove = sch5627_remove,
};
-static int __init sch5627_init(void)
-{
- return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
- platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79fc..9d5236fb09b4 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
.remove = sch5636_remove,
};
-static int __init sch5636_init(void)
-{
- return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
- platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index fe4104c6b764..6ddeae049058 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -683,7 +683,7 @@ static ssize_t sht15_store_heater(struct device *dev,
long value;
u8 status;
- if (strict_strtol(buf, 10, &value))
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
mutex_lock(&data->read_lock);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 643aa8c94535..c08eee21d76e 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -112,7 +112,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
long val;
int status;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
val = SENSORS_LIMIT(val, -256000, 255000);
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ad8d535235c5..8b9a77486d57 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -334,7 +334,7 @@ static ssize_t store_temp_min(struct device *dev, struct device_attribute
long val;
u16 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_temp_to_register(val, data->config);
@@ -361,7 +361,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
long val;
u16 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_temp_to_register(val, data->config);
@@ -388,7 +388,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_crit_temp_to_register(val, data->config);
@@ -413,7 +413,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (data->config & TMP401_CONFIG_RANGE)
@@ -447,7 +447,7 @@ static ssize_t reset_temp_history(struct device *dev,
{
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val != 1) {
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 0517a8f09d35..c48381f2cd02 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -157,7 +157,7 @@ static ssize_t show_fault(struct device *dev,
return sprintf(buf, "0\n");
}
-static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
+static umode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b161..0018c7dd0097 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
},
};
-static int __init twl4030_madc_hwmon_init(void)
-{
- return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dca..b9a87e89bab4 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
.remove = __devexit_p(env_remove),
};
-static int __init env_init(void)
-{
- return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
- platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 93f5fc7d6059..0e0af0445222 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -937,7 +937,7 @@ store_in_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
@@ -1054,7 +1054,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
unsigned int reg;
u8 new_div;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1199,7 +1199,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
int err; \
long val; \
- err = strict_strtol(buf, 10, &val); \
+ err = kstrtol(buf, 10, &val); \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
@@ -1324,7 +1324,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
int err;
u16 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1351,7 +1351,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
unsigned long val;
int err;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1376,7 +1376,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
int err;
u16 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1430,7 +1430,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -1455,7 +1455,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -1556,7 +1556,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
val = SENSORS_LIMIT(val, 1, 255); \
@@ -1595,7 +1595,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
@@ -1702,7 +1702,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
unsigned long val;
u16 reg, mask;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mask = to_sensor_dev_attr_2(attr)->nr;
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index bde50e34d013..374118f2b9f9 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -71,7 +71,7 @@ module_param(force_i2c, byte, 0);
MODULE_PARM_DESC(force_i2c,
"Initialize the i2c address of the sensors");
-static int init = 1;
+static bool init = 1;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization");
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 65b685e2c7b7..17a8fa2d9ae9 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -67,11 +67,11 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to one to reset chip on load");
-static int init = 1;
+static bool init = 1;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization");
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 8c2844e5691c..35aa5149307a 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -58,11 +58,11 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to one to force a hardware chip reset");
-static int init;
+static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force extra software initialization");
@@ -711,7 +711,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -756,7 +756,7 @@ static ssize_t store_pwmenable(struct device *dev,
u8 val_shift = 0;
u8 keep_mask = 0;
- int ret = strict_strtoul(buf, 10, &val);
+ int ret = kstrtoul(buf, 10, &val);
if (ret || val < 1 || val > 3)
return -EINVAL;
@@ -819,7 +819,7 @@ static ssize_t store_temp_target(struct device *dev,
unsigned long val;
u8 target_mask;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -863,7 +863,7 @@ static ssize_t store_temp_tolerance(struct device *dev,
u8 val_shift = 0;
u8 keep_mask = 0;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
switch (nr) {
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index f3e7130c4cda..d3100eab6b2f 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -56,7 +56,7 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int init;
+static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force chip initialization");
@@ -749,7 +749,7 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 reg;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 854f9117f1aa..45ec7e7c3c27 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -61,7 +61,7 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
@@ -450,7 +450,7 @@ store_chassis_clear(struct device *dev,
unsigned long val;
u8 reg;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 845232d7f611..aa58b25565bc 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -42,7 +42,7 @@ static const unsigned short normal_i2c[] = {
};
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
@@ -730,7 +730,7 @@ store_beep(struct device *dev, struct device_attribute *attr,
u8 beep_bit = 1 << shift;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
@@ -755,7 +755,7 @@ store_chassis_clear(struct device *dev,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0 || val != 0)
+ if (kstrtoul(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -801,7 +801,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
val = fan_to_reg(val);
@@ -863,7 +863,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -924,7 +924,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
unsigned long val;
int i;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val < 1 || val > 2)
return -EINVAL;
@@ -1021,7 +1021,7 @@ store_temp_src(struct device *dev, struct device_attribute *attr,
unsigned long channel;
u8 val = index / 2;
- if (strict_strtoul(buf, 10, &channel) < 0 ||
+ if (kstrtoul(buf, 10, &channel) < 0 ||
channel < 1 || channel > 14)
return -EINVAL;
@@ -1088,7 +1088,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long tmp;
- if (strict_strtoul(buf, 10, &tmp) < 0)
+ if (kstrtoul(buf, 10, &tmp) < 0)
return -EINVAL;
switch (nr) {
@@ -1149,7 +1149,7 @@ store_fanin(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1198,7 +1198,7 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 tmp;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -1257,7 +1257,7 @@ store_sf4_pwm(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1293,7 +1293,7 @@ store_sf4_temp(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -1333,7 +1333,7 @@ store_temp(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
- if (strict_strtol(buf, 10, &tmp) < 0)
+ if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1394,7 +1394,7 @@ store_dts_ext(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
- if (strict_strtol(buf, 10, &tmp) < 0)
+ if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1436,7 +1436,7 @@ store_temp_mode(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 tmp;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if ((val != 4) && (val != 3))
return -EINVAL;
@@ -1512,7 +1512,7 @@ store_in(struct device *dev, struct device_attribute *attr,
u8 tmp;
u8 lsb_idx;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val = in_to_reg(index, val);
@@ -1569,7 +1569,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
switch (nr) {
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 0254e181893d..063bd9508d8a 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -39,7 +39,7 @@ static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
/* Insmod parameters */
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a471..9b598ed26020 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
},
};
-static int __init wm831x_hwmon_init(void)
-{
- return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
- platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca86..3ff67edbdc44 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
},
};
-static int __init wm8350_hwmon_init(void)
-{
- return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
- platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a3afac4be734..3101dd59e379 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -299,11 +299,11 @@ config I2C_AT91
unless your system can cope with those limitations.
config I2C_AU1550
- tristate "Au1550/Au1200 SMBus interface"
+ tristate "Au1550/Au1200/Au1300 SMBus interface"
depends on MIPS_ALCHEMY
help
If you say yes to this option, support will be included for the
- Au1550 and Au1200 SMBus interface.
+ Au1550/Au1200/Au1300 SMBus interface.
This driver can also be built as a module. If so, the module
will be called i2c-au1550.
@@ -682,19 +682,19 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_EG20T
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
depends on PCI
help
This driver is for PCH(Platform controller Hub) I2C of EG20T which
is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH I2C bus device.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
comment "External I2C/SMBus adapter drivers"
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index b6807db7b36f..e66d248fc126 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -132,7 +132,8 @@
#define ALI1535_SMBIO_EN 0x04 /* SMB I/O Space enable */
static struct pci_driver ali1535_driver;
-static unsigned short ali1535_smba;
+static unsigned long ali1535_smba;
+static unsigned short ali1535_offset;
/* Detect whether a ALI1535 can be found, and initialize it, where necessary.
Note the differences between kernels with the old PCI BIOS interface and
@@ -140,7 +141,7 @@ static unsigned short ali1535_smba;
defined to make the transition easier. */
static int __devinit ali1535_setup(struct pci_dev *dev)
{
- int retval = -ENODEV;
+ int retval;
unsigned char temp;
/* Check the following things:
@@ -149,15 +150,28 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
- We can use the addresses
*/
+ retval = pci_enable_device(dev);
+ if (retval) {
+ dev_err(&dev->dev, "ALI1535_smb can't enable device\n");
+ goto exit;
+ }
+
/* Determine the address of the SMBus area */
- pci_read_config_word(dev, SMBBA, &ali1535_smba);
- ali1535_smba &= (0xffff & ~(ALI1535_SMB_IOSIZE - 1));
- if (ali1535_smba == 0) {
+ pci_read_config_word(dev, SMBBA, &ali1535_offset);
+ dev_dbg(&dev->dev, "ALI1535_smb is at offset 0x%04x\n", ali1535_offset);
+ ali1535_offset &= (0xffff & ~(ALI1535_SMB_IOSIZE - 1));
+ if (ali1535_offset == 0) {
dev_warn(&dev->dev,
"ALI1535_smb region uninitialized - upgrade BIOS?\n");
+ retval = -ENODEV;
goto exit;
}
+ if (pci_resource_flags(dev, 0) & IORESOURCE_IO)
+ ali1535_smba = pci_resource_start(dev, 0) + ali1535_offset;
+ else
+ ali1535_smba = ali1535_offset;
+
retval = acpi_check_region(ali1535_smba, ALI1535_SMB_IOSIZE,
ali1535_driver.name);
if (retval)
@@ -165,8 +179,9 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
if (!request_region(ali1535_smba, ALI1535_SMB_IOSIZE,
ali1535_driver.name)) {
- dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
+ dev_err(&dev->dev, "ALI1535_smb region 0x%lx already in use!\n",
ali1535_smba);
+ retval = -EBUSY;
goto exit;
}
@@ -174,6 +189,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
pci_read_config_byte(dev, SMBCFG, &temp);
if ((temp & ALI1535_SMBIO_EN) == 0) {
dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
+ retval = -ENODEV;
goto exit_free;
}
@@ -181,6 +197,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
pci_read_config_byte(dev, SMBHSTCFG, &temp);
if ((temp & 1) == 0) {
dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
+ retval = -ENODEV;
goto exit_free;
}
@@ -196,14 +213,13 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
*/
pci_read_config_byte(dev, SMBREV, &temp);
dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
- dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
+ dev_dbg(&dev->dev, "ALI1535_smba = 0x%lx\n", ali1535_smba);
- retval = 0;
-exit:
- return retval;
+ return 0;
exit_free:
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+exit:
return retval;
}
@@ -498,7 +514,7 @@ static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_
ali1535_adapter.dev.parent = &dev->dev;
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
- "SMBus ALI1535 adapter at %04x", ali1535_smba);
+ "SMBus ALI1535 adapter at %04x", ali1535_offset);
return i2c_add_adapter(&ali1535_adapter);
}
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index a409cfcf0629..47ae0091e027 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
ali1563_shutdown(dev);
}
-static const struct pci_device_id ali1563_id_table[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(ali1563_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 83e8a60cdc86..087ea9caa74d 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id ali15x3_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 03bcd07c4697..eb778bf15c18 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
"nVidia nForce", "AMD8111",
};
-static const struct pci_device_id amd756_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd756_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 6b6a6b1d7025..e5ac53b99b04 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -415,7 +415,7 @@ static const struct i2c_algorithm smbus_algorithm = {
};
-static const struct pci_device_id amd8111_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 305c07504f7e..1679deef9c89 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -295,9 +295,6 @@ static int at91_i2c_resume(struct platform_device *pdev)
#define at91_i2c_resume NULL
#endif
-/* work with "modprobe at91_i2c" from hotplugging or coldplugging */
-MODULE_ALIAS("platform:at91_i2c");
-
static struct platform_driver at91_i2c_driver = {
.probe = at91_i2c_probe,
.remove = __devexit_p(at91_i2c_remove),
@@ -309,19 +306,9 @@ static struct platform_driver at91_i2c_driver = {
},
};
-static int __init at91_i2c_init(void)
-{
- return platform_driver_register(&at91_i2c_driver);
-}
-
-static void __exit at91_i2c_exit(void)
-{
- platform_driver_unregister(&at91_i2c_driver);
-}
-
-module_init(at91_i2c_init);
-module_exit(at91_i2c_exit);
+module_platform_driver(at91_i2c_driver);
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index f314d7f433d3..582d616db346 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -426,20 +426,9 @@ static struct platform_driver au1xpsc_smbus_driver = {
.remove = __devexit_p(i2c_au1550_remove),
};
-static int __init i2c_au1550_init(void)
-{
- return platform_driver_register(&au1xpsc_smbus_driver);
-}
-
-static void __exit i2c_au1550_exit(void)
-{
- platform_driver_unregister(&au1xpsc_smbus_driver);
-}
+module_platform_driver(au1xpsc_smbus_driver);
MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC.");
MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:au1xpsc_smbus");
-
-module_init (i2c_au1550_init);
-module_exit (i2c_au1550_exit);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b1d9cd28d8da..c1e1096ba069 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -724,18 +724,7 @@ static struct platform_driver cpm_i2c_driver = {
},
};
-static int __init cpm_i2c_init(void)
-{
- return platform_driver_register(&cpm_i2c_driver);
-}
-
-static void __exit cpm_i2c_exit(void)
-{
- platform_driver_unregister(&cpm_i2c_driver);
-}
-
-module_init(cpm_i2c_init);
-module_exit(cpm_i2c_exit);
+module_platform_driver(cpm_i2c_driver);
MODULE_AUTHOR("Jochen Friedrich <jochen@scram.de>");
MODULE_DESCRIPTION("I2C-Bus adapter routines for CPM boards");
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 9e89e7313d62..37f42113af31 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -349,7 +349,7 @@ static void __devexit i2c_dw_pci_remove(struct pci_dev *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("i2c_designware-pci");
-DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
/* Moorestown */
{ PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
{ PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2d3657ab1258..5244c4724df7 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -137,6 +138,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
sizeof(adap->name));
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
@@ -144,6 +146,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
+ of_i2c_register_devices(adap);
return 0;
@@ -187,6 +190,14 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dw_i2c_of_match[] = {
+ { .compatible = "snps,designware-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -195,6 +206,7 @@ static struct platform_driver dw_i2c_driver = {
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dw_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aeac..ca8877641040 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -136,7 +136,8 @@
/*
Set the number of I2C instance max
Intel EG20T PCH : 1ch
-OKI SEMICONDUCTOR ML7213 IOH : 2ch
+LAPIS Semiconductor ML7213 IOH : 2ch
+LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_I2C_MAX_DEV 2
@@ -180,15 +181,17 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
+#define PCI_DEVICE_ID_ML7831_I2C 0x8817
-static struct pci_device_id __devinitdata pch_pcidev_id[] = {
+static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
@@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
if (pch_clk > PCH_MAX_CLK)
pch_clk = 62500;
- pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
/* Set transfer speed in I2CBC */
iowrite32(pch_i2cbc, p + PCH_I2CBC);
@@ -893,6 +896,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -910,28 +920,25 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
- ret = i2c_add_adapter(pch_adap);
+ pch_i2c_init(&adap_info->pch_data[i]);
+
+ pch_adap->nr = i;
+ ret = i2c_add_numbered_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
- goto err_i2c_add_adapter;
+ goto err_add_adapter;
}
-
- pch_i2c_init(&adap_info->pch_data[i]);
- }
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_i2c_add_adapter:
+err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+ free_irq(pdev->irq, adap_info);
+err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
@@ -1056,8 +1063,8 @@ static void __exit pch_pci_exit(void)
}
module_exit(pch_pci_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 63bb1cc2a042..19515df61021 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -52,7 +52,7 @@ struct highlander_i2c_dev {
size_t buf_len;
};
-static int iic_force_poll, iic_force_normal;
+static bool iic_force_poll, iic_force_normal;
static int iic_timeout = 1000, iic_read_delay;
static inline void highlander_i2c_irq_enable(struct highlander_i2c_dev *dev)
@@ -468,18 +468,7 @@ static struct platform_driver highlander_i2c_driver = {
.remove = __devexit_p(highlander_i2c_remove),
};
-static int __init highlander_i2c_init(void)
-{
- return platform_driver_register(&highlander_i2c_driver);
-}
-
-static void __exit highlander_i2c_exit(void)
-{
- platform_driver_unregister(&highlander_i2c_driver);
-}
-
-module_init(highlander_i2c_init);
-module_exit(highlander_i2c_exit);
+module_platform_driver(highlander_i2c_driver);
MODULE_AUTHOR("Paul Mundt");
MODULE_DESCRIPTION("Renesas Highlander FPGA I2C/SMBus adapter");
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 9ff1695d8458..c527de17db4f 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
.algo_data = &hydra_bit_data,
};
-static const struct pci_device_id hydra_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(hydra_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ab26840d0c70..5d2e2816831f 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -609,7 +609,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
-static const struct pci_device_id i801_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 3c110fbc409b..806e225f3de7 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -51,11 +51,11 @@
MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION);
MODULE_LICENSE("GPL");
-static int iic_force_poll;
+static bool iic_force_poll;
module_param(iic_force_poll, bool, 0);
MODULE_PARM_DESC(iic_force_poll, "Force polling mode");
-static int iic_force_fast;
+static bool iic_force_fast;
module_param(iic_force_fast, bool, 0);
MODULE_PARM_DESC(iic_force_fast, "Force fast mode (400 kHz)");
@@ -815,15 +815,4 @@ static struct platform_driver ibm_iic_driver = {
.remove = __devexit_p(iic_remove),
};
-static int __init iic_init(void)
-{
- return platform_driver_register(&ibm_iic_driver);
-}
-
-static void __exit iic_exit(void)
-{
- platform_driver_unregister(&ibm_iic_driver);
-}
-
-module_init(iic_init);
-module_exit(iic_exit);
+module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index e828ac85cfa7..365bad5b890b 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -1093,7 +1093,7 @@ static void __devexit intel_mid_i2c_remove(struct pci_dev *dev)
pci_release_region(dev, 0);
}
-static struct pci_device_id intel_mid_i2c_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(intel_mid_i2c_ids) = {
/* Moorestown */
{ PCI_VDEVICE(INTEL, 0x0802), 0 },
{ PCI_VDEVICE(INTEL, 0x0803), 1 },
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index f09c9319a2ba..93f147a96b62 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -523,21 +523,7 @@ static struct platform_driver iop3xx_i2c_driver = {
},
};
-static int __init
-i2c_iop3xx_init (void)
-{
- return platform_driver_register(&iop3xx_i2c_driver);
-}
-
-static void __exit
-i2c_iop3xx_exit (void)
-{
- platform_driver_unregister(&iop3xx_i2c_driver);
- return;
-}
-
-module_init (i2c_iop3xx_init);
-module_exit (i2c_iop3xx_exit);
+module_platform_driver(iop3xx_i2c_driver);
MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>");
MODULE_DESCRIPTION("IOP3xx iic algorithm and driver");
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 0682f8f277b0..6561d275b8cf 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -306,20 +306,9 @@ static struct platform_driver smbus_sch_driver = {
.remove = __devexit_p(smbus_sch_remove),
};
-static int __init i2c_sch_init(void)
-{
- return platform_driver_register(&smbus_sch_driver);
-}
-
-static void __exit i2c_sch_exit(void)
-{
- platform_driver_unregister(&smbus_sch_driver);
-}
+module_platform_driver(smbus_sch_driver);
MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
MODULE_DESCRIPTION("Intel SCH SMBus driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_sch_init);
-module_exit(i2c_sch_exit);
MODULE_ALIAS("platform:isch_smbus");
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index c01e9519f6c1..5d263f9014d6 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -148,18 +148,7 @@ static struct platform_driver ixp2000_i2c_driver = {
},
};
-static int __init ixp2000_i2c_init(void)
-{
- return platform_driver_register(&ixp2000_i2c_driver);
-}
-
-static void __exit ixp2000_i2c_exit(void)
-{
- platform_driver_unregister(&ixp2000_i2c_driver);
-}
-
-module_init(ixp2000_i2c_init);
-module_exit(ixp2000_i2c_exit);
+module_platform_driver(ixp2000_i2c_driver);
MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 107397a606b4..a8ebb84e23f9 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -715,18 +715,7 @@ static struct platform_driver mpc_i2c_driver = {
},
};
-static int __init fsl_i2c_init(void)
-{
- return platform_driver_register(&mpc_i2c_driver);
-}
-
-static void __exit fsl_i2c_exit(void)
-{
- platform_driver_unregister(&mpc_i2c_driver);
-}
-
-module_init(fsl_i2c_init);
-module_exit(fsl_i2c_exit);
+module_platform_driver(mpc_i2c_driver);
MODULE_AUTHOR("Adrian Cox <adrian@humboldt.co.uk>");
MODULE_DESCRIPTION("I2C-Bus adapter for MPC107 bridge and "
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index a9941c65f226..4f44a33017b0 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -611,20 +611,7 @@ static struct platform_driver mv64xxx_i2c_driver = {
},
};
-static int __init
-mv64xxx_i2c_init(void)
-{
- return platform_driver_register(&mv64xxx_i2c_driver);
-}
-
-static void __exit
-mv64xxx_i2c_exit(void)
-{
- platform_driver_unregister(&mv64xxx_i2c_driver);
-}
-
-module_init(mv64xxx_i2c_init);
-module_exit(mv64xxx_i2c_exit);
+module_platform_driver(mv64xxx_i2c_driver);
MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
MODULE_DESCRIPTION("Marvell mv64xxx host bridge i2c ctlr driver");
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ff1e127dfea8..43a96a123920 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -309,7 +309,7 @@ static struct i2c_algorithm smbus_algorithm = {
};
-static const struct pci_device_id nforce2_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
error = acpi_check_region(smbus->base, smbus->size,
nforce2_driver.name);
if (error)
- return -1;
+ return error;
if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc2..03b615778887 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 1b46a9d9f907..18068dee48f1 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -394,9 +394,6 @@ static struct of_device_id ocores_i2c_match[] = {
};
MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ocores-i2c");
-
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
.remove = __devexit_p(ocores_i2c_remove),
@@ -409,19 +406,9 @@ static struct platform_driver ocores_i2c_driver = {
},
};
-static int __init ocores_i2c_init(void)
-{
- return platform_driver_register(&ocores_i2c_driver);
-}
-
-static void __exit ocores_i2c_exit(void)
-{
- platform_driver_unregister(&ocores_i2c_driver);
-}
-
-module_init(ocores_i2c_init);
-module_exit(ocores_i2c_exit);
+module_platform_driver(ocores_i2c_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
MODULE_DESCRIPTION("OpenCores I2C bus driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ocores-i2c");
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 56dbe54e8811..ee139a598814 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -629,24 +629,10 @@ static struct platform_driver octeon_i2c_driver = {
},
};
-static int __init octeon_i2c_init(void)
-{
- int rv;
-
- rv = platform_driver_register(&octeon_i2c_driver);
- return rv;
-}
-
-static void __exit octeon_i2c_exit(void)
-{
- platform_driver_unregister(&octeon_i2c_driver);
-}
+module_platform_driver(octeon_i2c_driver);
MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(octeon_i2c_init);
-module_exit(octeon_i2c_exit);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446a..f713eac55047 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -37,6 +37,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
@@ -182,7 +185,9 @@ struct omap_i2c_dev {
u32 latency; /* maximum mpu wkup latency */
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
- u32 speed; /* Speed of bus in Khz */
+ u32 speed; /* Speed of bus in kHz */
+ u32 dtrev; /* extra revision from DT */
+ u32 flags;
u16 cmd_err;
u8 *buf;
u8 *regs;
@@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_BUF_REG] = 0x94,
[OMAP_I2C_CNT_REG] = 0x98,
[OMAP_I2C_DATA_REG] = 0x9c,
- [OMAP_I2C_SYSC_REG] = 0x20,
+ [OMAP_I2C_SYSC_REG] = 0x10,
[OMAP_I2C_CON_REG] = 0xa4,
[OMAP_I2C_OA_REG] = 0xa8,
[OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
static void omap_i2c_unidle(struct omap_i2c_dev *dev)
{
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
-
- if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
static void omap_i2c_idle(struct omap_i2c_dev *dev)
{
- struct omap_i2c_bus_platform_data *pdata;
u16 iv;
- pdata = dev->dev->platform_data;
-
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long timeout;
unsigned long internal_clk = 0;
struct clk *fclk;
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
/* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+ if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+ if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* The filter is iclk (fclk for HS) period.
*/
if (dev->speed > 400 ||
- pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+ dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
@@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->errata = 0;
- if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
dev->errata |= I2C_OMAP_ERRATA_I207;
/* Enable interrupts */
@@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id)
u16 bits;
u16 stat, w;
int err, count = 0;
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
if (pm_runtime_suspended(dev->dev))
return IRQ_NONE;
@@ -830,11 +822,9 @@ complete:
~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- if (stat & OMAP_I2C_STAT_NACK) {
+ if (stat & OMAP_I2C_STAT_NACK)
err |= OMAP_I2C_STAT_NACK;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
- OMAP_I2C_CON_STP);
- }
+
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (pdata->flags &
+ if (dev->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (pdata->flags &
+ if (dev->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = {
.functionality = omap_i2c_func,
};
+#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap3_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
+ OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
+ OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap4_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_2,
+};
+
+static const struct of_device_id omap_i2c_of_match[] = {
+ {
+ .compatible = "ti,omap4-i2c",
+ .data = &omap4_pdata,
+ },
+ {
+ .compatible = "ti,omap3-i2c",
+ .data = &omap3_pdata,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
+#endif
+
static int __devinit
omap_i2c_probe(struct platform_device *pdev)
{
@@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
irq_handler_t isr;
int r;
- u32 speed = 0;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- if (pdata != NULL) {
- speed = pdata->clkrate;
+ match = of_match_device(omap_i2c_of_match, &pdev->dev);
+ if (match) {
+ u32 freq = 100000; /* default to 100000 Hz */
+
+ pdata = match->data;
+ dev->dtrev = pdata->rev;
+ dev->flags = pdata->flags;
+
+ of_property_read_u32(node, "clock-frequency", &freq);
+ /* convert DT freq value in Hz into kHz for speed */
+ dev->speed = freq / 1000;
+ } else if (pdata != NULL) {
+ dev->speed = pdata->clkrate;
+ dev->flags = pdata->flags;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- } else {
- speed = 100; /* Default speed */
- dev->set_mpu_wkup_lat = NULL;
+ dev->dtrev = pdata->rev;
}
- dev->speed = speed;
dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+ dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
dev->regs = (u8 *)reg_map_ip_v2;
else
dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev)
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
- if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+ if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -1047,17 +1073,18 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
- dev->fifo_size = 0;
+
+ dev->fifo_size = (dev->fifo_size / 2);
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
- (1000 * speed / 8);
+ (1000 * dev->speed / 8);
}
/* reset ASAP, clearing any IRQs */
@@ -1073,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
}
dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
- pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
pm_runtime_put(dev->dev);
@@ -1084,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
@@ -1093,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ of_i2c_register_devices(adap);
+
return 0;
err_free_irq:
@@ -1165,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
.pm = OMAP_I2C_PM_OPS,
+ .of_match_table = of_match_ptr(omap_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 837b8c1aa02a..eaaea73209c5 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -401,7 +401,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
kfree(smbus);
}
-static const struct pci_device_id pasemi_smb_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_smb_ids) = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index ace67995d7de..2adbf1a8fdea 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -286,20 +286,8 @@ static struct platform_driver i2c_pca_pf_driver = {
},
};
-static int __init i2c_pca_pf_init(void)
-{
- return platform_driver_register(&i2c_pca_pf_driver);
-}
-
-static void __exit i2c_pca_pf_exit(void)
-{
- platform_driver_unregister(&i2c_pca_pf_driver);
-}
+module_platform_driver(i2c_pca_pf_driver);
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_pca_pf_init);
-module_exit(i2c_pca_pf_exit);
-
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 6d14ac2e3c41..c14d48dd601a 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id piix4_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 127051b06921..07b7447ecbc9 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -627,9 +627,6 @@ static struct i2c_adapter pmcmsptwi_adapter = {
.name = DRV_NAME,
};
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" DRV_NAME);
-
static struct platform_driver pmcmsptwi_driver = {
.probe = pmcmsptwi_probe,
.remove = __devexit_p(pmcmsptwi_remove),
@@ -639,18 +636,8 @@ static struct platform_driver pmcmsptwi_driver = {
},
};
-static int __init pmcmsptwi_init(void)
-{
- return platform_driver_register(&pmcmsptwi_driver);
-}
-
-static void __exit pmcmsptwi_exit(void)
-{
- platform_driver_unregister(&pmcmsptwi_driver);
-}
+module_platform_driver(pmcmsptwi_driver);
MODULE_DESCRIPTION("PMC MSP TWI/SMBus/I2C driver");
MODULE_LICENSE("GPL");
-
-module_init(pmcmsptwi_init);
-module_exit(pmcmsptwi_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index b289ec99eeba..7b397c6f607e 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -312,10 +312,6 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
return rc;
}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:i2c-powermac");
-
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
.remove = __devexit_p(i2c_powermac_remove),
@@ -325,17 +321,6 @@ static struct platform_driver i2c_powermac_driver = {
},
};
-static int __init i2c_powermac_init(void)
-{
- platform_driver_register(&i2c_powermac_driver);
- return 0;
-}
+module_platform_driver(i2c_powermac_driver);
-
-static void __exit i2c_powermac_cleanup(void)
-{
- platform_driver_unregister(&i2c_powermac_driver);
-}
-
-module_init(i2c_powermac_init);
-module_exit(i2c_powermac_cleanup);
+MODULE_ALIAS("platform:i2c-powermac");
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index fac673940849..93709fbe30eb 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -276,8 +276,6 @@ static int puv3_i2c_resume(struct platform_device *dev)
#define puv3_i2c_resume NULL
#endif
-MODULE_ALIAS("platform:puv3_i2c");
-
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
.remove = __devexit_p(puv3_i2c_remove),
@@ -289,18 +287,8 @@ static struct platform_driver puv3_i2c_driver = {
}
};
-static int __init puv3_i2c_init(void)
-{
- return platform_driver_register(&puv3_i2c_driver);
-}
-
-static void __exit puv3_i2c_exit(void)
-{
- platform_driver_unregister(&puv3_i2c_driver);
-}
-
-module_init(puv3_i2c_init);
-module_exit(puv3_i2c_exit);
+module_platform_driver(puv3_i2c_driver);
MODULE_DESCRIPTION("PKUnity v3 I2C driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:puv3_i2c");
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 632e088760a3..a05817980556 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -150,7 +150,7 @@ static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
kfree(sds);
}
-static struct pci_device_id ce4100_i2c_devices[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ce4100_i2c_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
{ },
};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a06..4c1718081685 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
+ cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
- return -EINVAL;
+ return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index a67132b2e092..c0c9dffbdb12 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -560,18 +560,7 @@ static struct platform_driver sh7760_i2c_drv = {
.remove = __devexit_p(sh7760_i2c_remove),
};
-static int __init sh7760_i2c_init(void)
-{
- return platform_driver_register(&sh7760_i2c_drv);
-}
-
-static void __exit sh7760_i2c_exit(void)
-{
- platform_driver_unregister(&sh7760_i2c_drv);
-}
-
-module_init(sh7760_i2c_init);
-module_exit(sh7760_i2c_exit);
+module_platform_driver(sh7760_i2c_drv);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SH7760 I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 2fc08fbf67a2..4fc87e7c94c9 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -156,12 +156,8 @@ static int simtec_i2c_remove(struct platform_device *dev)
return 0;
}
-
/* device driver */
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:simtec-i2c");
-
static struct platform_driver simtec_i2c_driver = {
.driver = {
.name = "simtec-i2c",
@@ -171,19 +167,9 @@ static struct platform_driver simtec_i2c_driver = {
.remove = simtec_i2c_remove,
};
-static int __init i2c_adap_simtec_init(void)
-{
- return platform_driver_register(&simtec_i2c_driver);
-}
-
-static void __exit i2c_adap_simtec_exit(void)
-{
- platform_driver_unregister(&simtec_i2c_driver);
-}
-
-module_init(i2c_adap_simtec_init);
-module_exit(i2c_adap_simtec_exit);
+module_platform_driver(simtec_i2c_driver);
MODULE_DESCRIPTION("Simtec Generic I2C Bus driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:simtec-i2c");
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 437586611d4a..87e5126d449c 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
u16 a;
u8 val;
int *i;
- int retval = -ENODEV;
+ int retval;
/* Look for imposters */
for (i = blacklist; *i != 0; i++) {
@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
error:
release_region(sis5595_base + SMB_INDEX, 2);
- return retval;
+ return -ENODEV;
}
static int sis5595_transaction(struct i2c_adapter *adap)
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id sis5595_ids[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index e6f539e26f65..15cf78f65ce0 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -93,8 +93,8 @@
static struct pci_driver sis630_driver;
/* insmod parameters */
-static int high_clock;
-static int force;
+static bool high_clock;
+static bool force;
module_param(high_clock, bool, 0);
MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz).");
module_param(force, bool, 0);
@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
- int retval = -ENODEV, i;
+ int retval, i;
/* check for supported SiS devices */
for (i=0; supported[i] > 0 ; i++) {
@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
*/
if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
+ retval = -ENODEV;
goto exit;
}
/* if ACPI already enabled , do nothing */
if (!(b & 0x80) &&
pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
+ retval = -ENODEV;
goto exit;
}
/* Determine the ACPI base address */
if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
+ retval = -ENODEV;
goto exit;
}
@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
sis630_driver.name)) {
dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
"in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
+ retval = -EBUSY;
goto exit;
}
@@ -468,7 +472,7 @@ static struct i2c_adapter sis630_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id sis630_ids[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 86837f0c4cb9..cc5d149413f7 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id sis96x_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 46b6500c5478..6381604696d3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -558,7 +558,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
.functionality = tegra_i2c_func,
};
-static int tegra_i2c_probe(struct platform_device *pdev)
+static int __devinit tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
@@ -636,7 +636,10 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_clk_rate = be32_to_cpup(prop);
}
- if (pdev->id == 3)
+ if (pdev->dev.of_node)
+ i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
+ "nvidia,tegra20-i2c-dvc");
+ else if (pdev->id == 3)
i2c_dev->is_dvc = 1;
init_completion(&i2c_dev->msg_complete);
@@ -690,7 +693,7 @@ err_iounmap:
return ret;
}
-static int tegra_i2c_remove(struct platform_device *pdev)
+static int __devexit tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
@@ -742,6 +745,7 @@ static int tegra_i2c_resume(struct platform_device *pdev)
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
{ .compatible = "nvidia,tegra20-i2c", },
+ { .compatible = "nvidia,tegra20-i2c-dvc", },
{},
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7799fe5bda88..713d31ade26b 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
};
-static const struct pci_device_id vt586b_ids[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(vt586b_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 0b012f1f8ac5..333011c83d52 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -91,7 +91,7 @@ static unsigned short SMBHSTCFG = 0xD2;
/* If force is set to anything different from 0, we forcibly enable the
VT596. DANGEROUS! */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Forcibly enable the SMBus. DANGEROUS!");
@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned char temp;
- int error = -ENODEV;
+ int error;
/* Determine the address of the SMBus areas */
if (force_addr) {
@@ -390,6 +390,7 @@ found:
dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
"controller not enabled! - upgrade BIOS or "
"use force=1\n");
+ error = -ENODEV;
goto release_region;
}
}
@@ -422,9 +423,11 @@ found:
"SMBus Via Pro adapter at %04x", vt596_smba);
vt596_pdev = pci_dev_get(pdev);
- if (i2c_add_adapter(&vt596_adapter)) {
+ error = i2c_add_adapter(&vt596_adapter);
+ if (error) {
pci_dev_put(vt596_pdev);
vt596_pdev = NULL;
+ goto release_region;
}
/* Always return failure here. This is to allow other drivers to bind
@@ -438,7 +441,7 @@ release_region:
return error;
}
-static const struct pci_device_id vt596_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(vt596_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4bb68f35caf2..2bded7647ef2 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -426,7 +426,7 @@ static void xiic_process(struct xiic_i2c *i2c)
xiic_wakeup(i2c, STATE_ERROR);
} else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
- /* Transmit register/FIFO is empty or empty */
+ /* Transmit register/FIFO is empty or ½ empty */
clr = pend &
(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
@@ -795,10 +795,6 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
return 0;
}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:"DRIVER_NAME);
-
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = __devexit_p(xiic_i2c_remove),
@@ -808,19 +804,9 @@ static struct platform_driver xiic_i2c_driver = {
},
};
-static int __init xiic_i2c_init(void)
-{
- return platform_driver_register(&xiic_i2c_driver);
-}
-
-static void __exit xiic_i2c_exit(void)
-{
- platform_driver_unregister(&xiic_i2c_driver);
-}
-
-module_init(xiic_i2c_init);
-module_exit(xiic_i2c_exit);
+module_platform_driver(xiic_i2c_driver);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 91e349c884c5..2eacb7784d56 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -559,7 +559,7 @@ static struct platform_driver scx200_pci_driver = {
.remove = __devexit_p(scx200_remove),
};
-static const struct pci_device_id scx200_isa[] __initconst = {
+static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
{ 0, }
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 57a45ce84b2d..10e7f1e76586 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -251,15 +251,10 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
- rdwr_pa = kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), GFP_KERNEL);
- if (!rdwr_pa)
- return -ENOMEM;
-
- if (copy_from_user(rdwr_pa, rdwr_arg.msgs,
- rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
- kfree(rdwr_pa);
- return -EFAULT;
- }
+ rdwr_pa = memdup_user(rdwr_arg.msgs,
+ rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
index 7b6ce624cd6e..e5fa695eb0fa 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -165,18 +165,7 @@ static struct platform_driver gpiomux_driver = {
},
};
-static int __init gpiomux_init(void)
-{
- return platform_driver_register(&gpiomux_driver);
-}
-
-static void __exit gpiomux_exit(void)
-{
- platform_driver_unregister(&gpiomux_driver);
-}
-
-module_init(gpiomux_init);
-module_exit(gpiomux_exit);
+module_platform_driver(gpiomux_driver);
MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
index 25b9fe3a9f8e..d3be99fb4154 100644
--- a/drivers/ide/ali14xx.c
+++ b/drivers/ide/ali14xx.c
@@ -221,7 +221,7 @@ static int __init ali14xx_probe(void)
return ide_legacy_device_add(&ali14xx_port_info, 0);
}
-static int probe_ali14xx;
+static bool probe_ali14xx;
module_param_named(probe, probe_ali14xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 6dede8f366c5..41d415529479 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -314,7 +314,7 @@ static int __init at91_ide_probe(struct platform_device *pdev)
apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
/* with GPIO interrupt we have to do quirks in handler */
- if (board->irq_pin >= PIN_BASE)
+ if (gpio_is_valid(board->irq_pin))
host->irq_handler = at91_irq_handler;
host->ports[0]->select_data = board->chipselect;
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index a81bd7575792..14717304b388 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -111,7 +111,7 @@
#define DRV_NAME "cmd640"
-static int cmd640_vlb;
+static bool cmd640_vlb;
/*
* CMD640 specific registers definition.
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
index 6929f7fce93a..46af4743b3e6 100644
--- a/drivers/ide/dtc2278.c
+++ b/drivers/ide/dtc2278.c
@@ -130,7 +130,7 @@ static int __init dtc2278_probe(void)
return ide_legacy_device_add(&dtc2278_port_info, 0);
}
-static int probe_dtc2278;
+static bool probe_dtc2278;
module_param_named(probe, probe_dtc2278, bool, 0);
MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index 3feaa26410be..51beb85250d4 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -50,7 +50,7 @@
GAYLE_NUM_HWIFS-1)
#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
-static int ide_doubler;
+static bool ide_doubler;
module_param_named(doubler, ide_doubler, bool, 0);
MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index 808bcdcbf8e1..986f2513eab4 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -317,7 +317,7 @@ static void __init ht6560b_init_dev(ide_drive_t *drive)
ide_set_drivedata(drive, (void *)t);
}
-static int probe_ht6560b;
+static bool probe_ht6560b;
module_param_named(probe, probe_ht6560b, bool, 0);
MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
index 979d342c338a..547d7cf2e016 100644
--- a/drivers/ide/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
@@ -6,7 +6,7 @@
#define DRV_NAME "ide-4drives"
-static int probe_4drives;
+static bool probe_4drives;
module_param_named(probe, probe_4drives, bool, 0);
MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index f22edc66b030..f1a6796b165c 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -53,15 +53,15 @@ struct ide_acpi_hwif_link {
#define DEBPRINT(fmt, args...) do {} while (0)
#endif /* DEBUGGING */
-static int ide_noacpi;
+static bool ide_noacpi;
module_param_named(noacpi, ide_noacpi, bool, 0);
MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
-static int ide_acpigtf;
+static bool ide_acpigtf;
module_param_named(acpigtf, ide_acpigtf, bool, 0);
MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
-static int ide_acpionboot;
+static bool ide_acpionboot;
module_param_named(acpionboot, ide_acpionboot, bool, 0);
MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index d267b7affad6..a22ca8467010 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
* and CDROM_SEND_PACKET (legacy) ioctls
*/
if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
- mode, cmd, argp);
+ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
if (err == -ENOTTY)
err = generic_ide_ioctl(drive, bdev, cmd, arg);
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index a743e68a8903..7f56b738d762 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -28,7 +28,7 @@
#define DRV_NAME "ide_pci_generic"
-static int ide_generic_all; /* Set to claim all devices */
+static bool ide_generic_all; /* Set to claim all devices */
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 3f0244fd8e62..8bbfe5557c7b 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -417,7 +417,7 @@ static int __init qd_probe(int base)
return rc;
}
-static int probe_qd65xx;
+static bool probe_qd65xx;
module_param_named(probe, probe_qd65xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 47adcd09cb26..5cfb78120669 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -160,7 +160,7 @@ static int __init umc8672_probe(void)
return ide_legacy_device_add(&umc8672_port_info, 0);
}
-static int probe_umc8672;
+static bool probe_umc8672;
module_param_named(probe, probe_umc8672, bool, 0);
MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5d2f8e13cf0e..20bce51c2e82 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
};
-static int get_driver_data(int cstate)
+static long get_driver_data(int cstate)
{
int driver_data;
switch (cstate) {
@@ -232,6 +232,7 @@ static int get_driver_data(int cstate)
* @drv: cpuidle driver
* @index: index of cpuidle state
*
+ * Must be called under local_irq_disable().
*/
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev,
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
- local_irq_disable();
-
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@ static int intel_idle_probe(void)
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
return -ENODEV;
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
else {
- smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
register_cpu_notifier(&setup_broadcast_notifier);
}
@@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void)
}
if (auto_demotion_disable_flags)
- smp_call_function(auto_demotion_disable, NULL, 1);
+ on_each_cpu(auto_demotion_disable, NULL, 1);
return 0;
}
/*
- * intel_idle_cpuidle_devices_init()
+ * intel_idle_cpu_init()
* allocate, initialize, register cpuidle_devices
+ * @cpu: cpu/core to initialize
*/
-static int intel_idle_cpuidle_devices_init(void)
+int intel_idle_cpu_init(int cpu)
{
- int i, cstate;
+ int cstate;
struct cpuidle_device *dev;
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
- dev->state_count = 1;
+ dev->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
- if (cstate > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n",
- max_cstate);
- break;
- }
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
- /* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
- & MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL) {
- continue;
- }
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (mwait_substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL)
+ continue;
- dev->states_usage[dev->state_count].driver_data =
- (void *)get_driver_data(cstate);
+ dev->states_usage[dev->state_count].driver_data =
+ (void *)get_driver_data(cstate);
dev->state_count += 1;
}
+ dev->cpu = cpu;
- dev->cpu = i;
- if (cpuidle_register_device(dev)) {
- pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
- i);
- intel_idle_cpuidle_devices_uninit();
- return -EIO;
- }
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
}
+ if (auto_demotion_disable_flags)
+ smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+
return 0;
}
static int __init intel_idle_init(void)
{
- int retval;
+ int retval, i;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@ static int __init intel_idle_init(void)
return retval;
}
- retval = intel_idle_cpuidle_devices_init();
- if (retval) {
- cpuidle_unregister_driver(&intel_idle_driver);
- return retval;
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_online_cpu(i) {
+ retval = intel_idle_cpu_init(i);
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
+ }
}
return 0;
@@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void)
cpuidle_unregister_driver(&intel_idle_driver);
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
- smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+ on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
unregister_cpu_notifier(&setup_broadcast_notifier);
}
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index eb0e2ccc79ae..73d453159408 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = 127;
dev->tx_queue_len = 10;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0f9a84c1046a..eb0add311dc8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
source "drivers/infiniband/ulp/srp/Kconfig"
+source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 9cc7a47d3e67..a3b2d8eac86e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
+obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd78..1612cfd50f39 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+ struct neighbour *n;
+ int ret;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ if (!n || !(n->nud_state & NUD_VALID)) {
+ if (n)
+ neigh_event_send(n, NULL);
+ ret = -ENODATA;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, n->ha);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int addr4_resolve(struct sockaddr_in *src_in,
struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
- struct neighbour *neigh;
struct flowi4 fl4;
int ret;
@@ -214,18 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
goto put;
}
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
- ret = -ENODATA;
- if (neigh)
- goto release;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
- neigh_release(neigh);
+ ret = dst_fetch_ha(&rt->dst, addr);
put:
ip_rt_put(rt);
out:
@@ -238,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct rdma_dev_addr *addr)
{
struct flowi6 fl6;
- struct neighbour *neigh;
struct dst_entry *dst;
int ret;
memset(&fl6, 0, sizeof fl6);
- ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
- ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -258,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
src_in->sin6_family = AF_INET6;
- ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+ src_in->sin6_addr = fl6.saddr;
}
if (dst->dev->flags & IFF_LOOPBACK) {
@@ -274,15 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
- neigh = dst_get_neighbour(dst);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- if (neigh)
- neigh_event_send(neigh, NULL);
- ret = -ENODATA;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+ ret = dst_fetch_ha(dst, addr);
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 8b72f39202fb..c889aaef3416 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3659,7 +3659,7 @@ static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj
};
-static char *cm_devnode(struct device *dev, mode_t *mode)
+static char *cm_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 505db2a59e7f..7da9b2102341 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -799,6 +799,7 @@ struct cm_apr_msg {
u8 info_length;
u8 ap_status;
+ __be16 rsvd;
u8 info[IB_CM_APR_INFO_LENGTH];
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821c0af0..e3e470fecaa9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1110,7 +1110,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
- ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+ ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
} else {
ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
&rt->addr.dev_addr);
@@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
if (cma_zero_addr(src)) {
dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
if ((src->sa_family = dst->sa_family) == AF_INET) {
- ((struct sockaddr_in *) src)->sin_addr.s_addr =
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ ((struct sockaddr_in *)src)->sin_addr =
+ ((struct sockaddr_in *)dst)->sin_addr;
} else {
- ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
+ ((struct sockaddr_in6 *)src)->sin6_addr =
+ ((struct sockaddr_in6 *)dst)->sin6_addr;
}
}
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;
@@ -2920,7 +2926,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
mutex_lock(&id_priv->qp_mutex);
if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- multicast->rec.mlid);
+ be16_to_cpu(multicast->rec.mlid));
mutex_unlock(&id_priv->qp_mutex);
memset(&event, 0, sizeof event);
@@ -3181,7 +3187,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
if (id->qp)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
- mc->multicast.ib->rec.mlid);
+ be16_to_cpu(mc->multicast.ib->rec.mlid));
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index b8a0b4a7811b..06f08713f487 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -106,9 +106,6 @@ enum {
IB_UCM_MAX_DEVICES = 32
};
-/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-extern struct class cm_class;
-
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
static void ib_ucm_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 07db22997e97..f0d588f8859e 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1175,7 +1175,7 @@ static void ib_umad_remove_one(struct ib_device *device)
kref_put(&umad_dev->ref, ib_umad_release_dev);
}
-static char *umad_devnode(struct device *dev, mode_t *mode)
+static char *umad_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 254f1649c734..b930da4c0c63 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -241,11 +241,24 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
+static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
+{
+ struct ib_uobject *uobj;
+
+ uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
+ return uobj ? uobj->object : NULL;
+}
+
static void put_qp_read(struct ib_qp *qp)
{
put_uobj_read(qp->uobject);
}
+static void put_qp_write(struct ib_qp *qp)
+{
+ put_uobj_write(qp->uobject);
+}
+
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
@@ -2375,7 +2388,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
if (!qp)
return -EINVAL;
@@ -2404,7 +2417,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
kfree(mcast);
out_put:
- put_qp_read(qp);
+ put_qp_write(qp);
return ret ? ret : in_len;
}
@@ -2422,7 +2435,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
if (!qp)
return -EINVAL;
@@ -2441,14 +2454,14 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
}
out_put:
- put_qp_read(qp);
+ put_qp_write(qp);
return ret ? ret : in_len;
}
-int __uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_uverbs_create_xsrq *cmd,
- struct ib_udata *udata)
+static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
+ struct ib_uverbs_create_xsrq *cmd,
+ struct ib_udata *udata)
{
struct ib_uverbs_create_srq_resp resp;
struct ib_usrq_object *obj;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 879636746373..604556d73d25 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -846,7 +846,7 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kfree(uverbs_dev);
}
-static char *uverbs_devnode(struct device *dev, mode_t *mode)
+static char *uverbs_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e609..740dcc065cf2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *child_ep, *parent_ep = ctx;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
struct l2t_entry *l2t;
struct rtable *rt;
@@ -1375,8 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- neigh = dst_get_neighbour(dst);
- l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+ l2t = t3_l2t_get(tdev, dst, NULL);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1887,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct neighbour *neigh;
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
@@ -1945,11 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
-
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c558..0668bb3472d0 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
- if (mpa_rev_to_use == 1)
+ if (mpa_rev_to_use == 1) {
ep->tried_with_mpa_v1 = 1;
+ ep->retry_with_mpa_v1 = 0;
+ }
if (mpa_rev_to_use == 2) {
mpa->private_data_size +=
@@ -1554,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
return;
}
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+ struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+ struct neighbour *n;
+ int err, step;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ err = -ENODEV;
+ if (!n)
+ goto out;
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ struct net_device *pdev;
+
+ pdev = ip_dev_find(&init_net, peer_ip);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, n->dev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(n->dev);
+ ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(n->dev) * step];
+
+ if (clear_mpa_v1) {
+ ep->retry_with_mpa_v1 = 0;
+ ep->tried_with_mpa_v1 = 0;
+ }
+ }
+ err = 0;
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep, *parent_ep;
@@ -1561,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
- struct l2t_entry *l2t;
struct rtable *rt;
__be32 local_ip, peer_ip;
__be16 local_port, peer_port;
- struct net_device *pdev;
- u32 tx_chan, smac_idx;
- u16 rss_qid;
- u32 mtu;
- int step;
- int txq_idx, ctrlq_idx;
+ int err;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1594,47 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
- neigh = dst_get_neighbour(dst);
- if (neigh->dev->flags & IFF_LOOPBACK) {
- pdev = ip_dev_find(&init_net, peer_ip);
- BUG_ON(!pdev);
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
- mtu = pdev->mtu;
- tx_chan = cxgb4_port_chan(pdev);
- smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(pdev) * step;
- ctrlq_idx = cxgb4_port_idx(pdev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
- mtu = dst_mtu(dst);
- tx_chan = cxgb4_port_chan(neigh->dev);
- smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- if (!l2t) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
dst_release(dst);
goto reject;
}
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ err = import_ep(child_ep, peer_ip, dst, dev, false);
+ if (err) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
- cxgb4_l2t_release(l2t);
dst_release(dst);
+ kfree(child_ep);
goto reject;
}
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1647,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
- child_ep->l2t = l2t;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- child_ep->tx_chan = tx_chan;
- child_ep->smac_idx = smac_idx;
- child_ep->rss_qid = rss_qid;
- child_ep->mtu = mtu;
- child_ep->txq_idx = txq_idx;
- child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
- tx_chan, smac_idx, rss_qid);
+ child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1788,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
static int c4iw_reconnect(struct c4iw_ep *ep)
{
- int err = 0;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
@@ -1820,45 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- ep->com.cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- if (!ep->l2t) {
+ err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, false);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
@@ -2234,13 +2222,10 @@ err:
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
- int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_ep *ep;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2301,47 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- ep->retry_with_mpa_v1 = 0;
- ep->tried_with_mpa_v1 = 0;
- }
- if (!ep->l2t) {
+ err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, true);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e7..0f1607c8325a 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
- (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
(*count)++;
if (++ptr == cq->size)
ptr = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index aaf6023a4835..f08f6eaf3fa8 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -379,8 +379,8 @@ extern spinlock_t shca_list_lock;
extern int ehca_static_rate;
extern int ehca_port_act_time;
-extern int ehca_use_hp_mr;
-extern int ehca_scaling_code;
+extern bool ehca_use_hp_mr;
+extern bool ehca_scaling_code;
extern int ehca_lock_hcalls;
extern int ehca_nr_ports;
extern int ehca_max_cq;
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c240e9972cb0..832e7a7d0aee 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -59,16 +59,16 @@ MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
MODULE_VERSION(HCAD_VERSION);
-static int ehca_open_aqp1 = 0;
+static bool ehca_open_aqp1 = 0;
static int ehca_hw_level = 0;
-static int ehca_poll_all_eqs = 1;
+static bool ehca_poll_all_eqs = 1;
int ehca_debug_level = 0;
int ehca_nr_ports = -1;
-int ehca_use_hp_mr = 0;
+bool ehca_use_hp_mr = 0;
int ehca_port_act_time = 30;
int ehca_static_rate = -1;
-int ehca_scaling_code = 0;
+bool ehca_scaling_code = 0;
int ehca_lock_hcalls = -1;
int ehca_max_cq = -1;
int ehca_max_qp = -1;
@@ -82,7 +82,7 @@ module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
-module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
+module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 31ae1b108aea..b7d4216db3c3 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -46,7 +46,7 @@
static struct super_block *ipath_super;
static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, const struct file_operations *fops,
+ umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
@@ -61,7 +61,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_private = data;
- if ((mode & S_IFMT) == S_IFDIR) {
+ if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
@@ -76,7 +76,7 @@ bail:
return error;
}
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 4b8f9c49397e..a251becdaa98 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -126,7 +126,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah->av.ib.dlid = cpu_to_be16(0xc000);
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
return &ah->ibah;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index e8df155bc3b0..5ecf38d97269 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -715,13 +715,17 @@ repoll:
}
wc->slid = be16_to_cpu(cqe->rlid);
- wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
+ if (rdma_port_get_link_layer(wc->qp->device,
+ (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
+ else
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f36da994a85a..95c94d8f0254 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_WRAPPED);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc0aaa1..7b445df6a667 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
- return dev->caps.port_mask & (1 << (port_num - 1)) ?
+ return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
- MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
}
err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
memcpy(gids, gw->gids, sizeof gw->gids);
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
printk(KERN_WARNING "set port command failed\n");
else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_ib_version);
+ if (mlx4_is_mfunc(dev)) {
+ printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+ return NULL;
+ }
+
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
@@ -1244,7 +1250,8 @@ err_reg:
err_counter:
for (; i; --i)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+ if (ibdev->counters[i - 1] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@@ -1275,7 +1282,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a16f0c8e6f3f..aa2aefa4236c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -962,7 +962,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
if (is_eth) {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+ ((port - 1) << 6) | ((ah->sl & 7) << 3);
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
@@ -1437,7 +1437,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
u16 pcp;
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
- pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+ pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 5965b3df8f2f..7013da5e9eda 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -96,7 +96,7 @@ unsigned int wqm_quanta = 0x10000;
module_param(wqm_quanta, int, 0644);
MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
-static unsigned int limit_maxrdreqsz;
+static bool limit_maxrdreqsz;
module_param(limit_maxrdreqsz, bool, 0644);
MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a39..425065b36b8c 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ rcu_read_lock();
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (neigh) {
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
neigh->ha, ETH_ALEN)) {
/* Mac address same as in nes_arp_table */
- neigh_release(neigh);
ip_rt_put(rt);
return rc;
}
@@ -1373,13 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
NES_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
}
- neigh_release(neigh);
}
-
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+ rcu_read_unlock();
ip_rt_put(rt);
return rc;
}
@@ -2836,6 +2834,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
issue_disconn = 1;
issue_close = 1;
nesqp->cm_id = NULL;
+ del_timer(&nesqp->terminate_timer);
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
issue_flush = 1;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 7c0ff19ce382..055f4b545df0 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1529,7 +1529,7 @@ int nes_init_phy(struct nes_device *nesdev)
} else {
/* setup 10G MDIO operation */
tx_config &= 0xFFFFFFE3;
- tx_config |= 0x15;
+ tx_config |= 0x1D;
}
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
@@ -3619,10 +3619,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
}
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- if (nesqp->term_flags) {
- nes_terminate_done(nesqp, 0);
- return;
- }
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index c00d2f3f8966..4b3fa711a247 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_pauseparam = nes_netdev_set_pauseparam,
};
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
@@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index cd10968bfa22..8b4c2ff54888 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -56,7 +56,7 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
u32 mh_detected;
u32 mh_pauses_sent;
-u32 nes_set_pau(struct nes_device *nesdev)
+static u32 nes_set_pau(struct nes_device *nesdev)
{
u32 ret = 0;
u32 counter;
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
index 21f374aa0631..a5356cb4252e 100644
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -97,7 +97,7 @@ struct qib_chippport_specific {
u64 iblnkerrsnap;
u64 ibcctrl; /* kr_ibcctrl shadow */
u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
- u64 chase_end;
+ unsigned long chase_end;
u32 last_delay_mult;
};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index c90a55f4120f..6fc9365ba8a6 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -371,9 +371,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
lnh == QIB_LRH_GRH,
qp,
be32_to_cpu(ohdr->bth[0]));
- if (ruc_res) {
+ if (ruc_res)
goto unlock;
- }
/* Only deal with RDMA Writes for now */
if (opcode <
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600ef5b42..a7403248d83d 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
- dd->freectxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
- dd->freectxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index df7fa251dcdc..05e0f17c5b44 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -47,7 +47,7 @@ static struct super_block *qib_super;
#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, const struct file_operations *fops,
+ umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
@@ -67,7 +67,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_mtime = inode->i_atime;
inode->i_ctime = inode->i_atime;
inode->i_private = data;
- if ((mode & S_IFMT) == S_IFDIR) {
+ if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
@@ -82,7 +82,7 @@ bail:
return error;
}
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 781a802a321f..4f18e2d332df 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 439d3c503cd5..3c722f79d6f6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1051,7 +1051,7 @@ static void reenable_7220_chase(unsigned long opaque)
static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
- u64 tnow;
+ unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
@@ -1066,9 +1066,9 @@ static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
case IB_7220_LT_STATE_CFGWAITRMT:
case IB_7220_LT_STATE_TXREVLANES:
case IB_7220_LT_STATE_CFGENH:
- tnow = get_jiffies_64();
+ tnow = jiffies;
if (ppd->cpspec->chase_end &&
- time_after64(tnow, ppd->cpspec->chase_end)) {
+ time_after(tnow, ppd->cpspec->chase_end)) {
ppd->cpspec->chase_end = 0;
qib_set_ib_7220_lstate(ppd,
QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -2725,9 +2725,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95dc..41e92089e41b 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -615,8 +615,8 @@ struct qib_chippport_specific {
u64 ibmalfsnap;
u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
- u64 qdr_dfe_time;
- u64 chase_end;
+ unsigned long qdr_dfe_time;
+ unsigned long chase_end;
u32 autoneg_tries;
u32 recovery_init;
u32 qdr_dfe_on;
@@ -1672,7 +1672,8 @@ static void reenable_chase(unsigned long opaque)
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
}
-static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
+ u8 ibclt)
{
ppd->cpspec->chase_end = 0;
@@ -1688,7 +1689,7 @@ static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
- u64 tnow;
+ unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
@@ -1703,9 +1704,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
case IB_7322_LT_STATE_CFGWAITRMT:
case IB_7322_LT_STATE_TXREVLANES:
case IB_7322_LT_STATE_CFGENH:
- tnow = get_jiffies_64();
+ tnow = jiffies;
if (ppd->cpspec->chase_end &&
- time_after64(tnow, ppd->cpspec->chase_end))
+ time_after(tnow, ppd->cpspec->chase_end))
disable_chase(ppd, tnow, ibclt);
else if (!ppd->cpspec->chase_end)
ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
@@ -2307,19 +2308,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2378,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -2714,7 +2715,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
pins >>= SYM_LSB(EXTStatus, GPIOIn);
if (!(pins & mask)) {
++handled;
- qd->t_insert = get_jiffies_64();
+ qd->t_insert = jiffies;
queue_work(ib_wq, &qd->work);
}
}
@@ -3602,7 +3603,7 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
if (qib_rcvhdrcnt)
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
else
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
dd->num_pports > 1 ? 1024U : 2048U);
}
@@ -4082,10 +4083,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
*/
if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
adjust_rcv_timeout(rcd, npkts);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
@@ -4794,7 +4797,7 @@ static void qib_get_7322_faststats(unsigned long opaque)
(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE)) &&
ppd->cpspec->qdr_dfe_time &&
- time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+ time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
ppd->cpspec->qdr_dfe_on = 0;
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
@@ -5240,8 +5243,8 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/* schedule the qsfp refresh which should turn the link
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
- qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ qd->t_insert = jiffies;
+ queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
@@ -5592,7 +5595,7 @@ static void qsfp_7322_event(struct work_struct *work)
{
struct qib_qsfp_data *qd;
struct qib_pportdata *ppd;
- u64 pwrup;
+ unsigned long pwrup;
unsigned long flags;
int ret;
u32 le2;
@@ -5620,8 +5623,7 @@ static void qsfp_7322_event(struct work_struct *work)
* to insertion.
*/
while (1) {
- u64 now = get_jiffies_64();
- if (time_after64(now, pwrup))
+ if (time_is_before_jiffies(pwrup))
break;
msleep(20);
}
@@ -7506,7 +7508,7 @@ static int serdes_7322_init_old(struct qib_pportdata *ppd)
static int serdes_7322_init_new(struct qib_pportdata *ppd)
{
- u64 tstart;
+ unsigned long tend;
u32 le_val, rxcaldone;
int chan, chan_done = (1 << SERDES_CHANS) - 1;
@@ -7611,10 +7613,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
msleep(20);
/* Start Calibration */
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
- tstart = get_jiffies_64();
- while (chan_done &&
- !time_after64(get_jiffies_64(),
- tstart + msecs_to_jiffies(500))) {
+ tend = jiffies + msecs_to_jiffies(500);
+ while (chan_done && !time_is_before_jiffies(tend)) {
msleep(20);
for (chan = 0; chan < SERDES_CHANS; ++chan) {
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 58b0f8ad4a29..cf0cd30adc8d 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1015,7 +1015,7 @@ static int __devinit qib_init_one(struct pci_dev *,
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
-static const struct pci_device_id qib_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 97a8bdf68e60..f695061d688e 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -560,9 +560,9 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
* BIOS may not set PCIe bus-utilization parameters for best performance.
* Check and optionally adjust them to maximize our throughput.
*/
-static int qib_pcie_caps;
+static int qib_pcie_caps = 0x51;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
static int qib_tune_pcie_caps(struct qib_devdata *dd)
{
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f1..fa71b1e666c5 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- if (!qib_qsfp_mod_present(qd->ppd))
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index 46002a9417c0..91908f533a2b 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -177,7 +177,7 @@ struct qib_qsfp_data {
struct qib_pportdata *ppd;
struct work_struct work;
struct qib_qsfp_cache cache;
- u64 t_insert;
+ unsigned long t_insert;
u8 modpresent;
};
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index de1a4b2f33c0..ac065dd6b693 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -300,7 +300,7 @@ bail:
}
static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
- const char *where)
+ const char *where)
{
int ret, chn, baduns;
u64 val;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 78fbd56879d4..dae51604cfcd 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -150,7 +150,7 @@ static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
* For userland compatibility, these offsets must remain fixed.
* They are strings for QIB_STATUS_*
*/
-static const char *qib_status_str[] = {
+static const char * const qib_status_str[] = {
"Initted",
"",
"",
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a894762da462..7b6c3bffa9d9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -913,8 +913,8 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
__raw_writel(last, piobuf);
}
-static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp, int *retp)
+static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp)
{
struct qib_verbs_txreq *tx;
unsigned long flags;
@@ -926,8 +926,9 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
struct list_head *l = dev->txreq_free.next;
list_del(l);
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- *retp = 0;
} else {
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
list_empty(&qp->iowait)) {
@@ -935,14 +936,33 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
qp->s_flags |= QIB_S_WAIT_TX;
list_add_tail(&qp->iowait, &dev->txwait);
}
- tx = NULL;
qp->s_flags &= ~QIB_S_BUSY;
- *retp = -EBUSY;
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ tx = ERR_PTR(-EBUSY);
}
+ return tx;
+}
- spin_unlock(&dev->pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
+static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp)
+{
+ struct qib_verbs_txreq *tx;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ /* assume the list non empty */
+ if (likely(!list_empty(&dev->txreq_free))) {
+ struct list_head *l = dev->txreq_free.next;
+
+ list_del(l);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ } else {
+ /* call slow path to get the extra lock */
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ tx = __get_txreq(dev, qp);
+ }
return tx;
}
@@ -1122,9 +1142,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
goto bail;
}
- tx = get_txreq(dev, qp, &ret);
- if (!tx)
- goto bail;
+ tx = get_txreq(dev, qp);
+ if (IS_ERR(tx))
+ goto bail_tx;
control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
be16_to_cpu(hdr->lrh[0]) >> 12);
@@ -1195,6 +1215,9 @@ unaligned:
ibp->n_unaligned++;
bail:
return ret;
+bail_tx:
+ ret = PTR_ERR(tx);
+ goto bail;
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997d..4115be54ba3b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
+ struct ib_ah *vah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
- ah->ah = ib_create_ah(pd, attr);
- if (IS_ERR(ah->ah)) {
+ vah = ib_create_ah(pd, attr);
+ if (IS_ERR(vah)) {
kfree(ah);
- ah = NULL;
- } else
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ }
return ah;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b6000230..3514ca05deea 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev)
return 0;
}
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- if (ah) {
+ if (!IS_ERR_OR_NULL(ah)) {
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -555,15 +555,14 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
- struct neighbour *n;
unsigned long flags;
- n = dst_get_neighbour(skb_dst(skb));
neigh = ipoib_neigh_alloc(n, skb->dev);
if (!neigh) {
++dev->stats.tx_dropped;
@@ -636,16 +635,14 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n;
/* Look up path record for unicasts */
- n = dst_get_neighbour(dst);
if (n->ha[4] != 0xff) {
- neigh_add_path(skb, dev);
+ neigh_add_path(skb, n, dev);
return;
}
@@ -720,13 +717,19 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct neighbour *n = NULL;
unsigned long flags;
- if (likely(skb_dst(skb)))
- n = dst_get_neighbour(skb_dst(skb));
-
+ rcu_read_lock();
+ if (likely(skb_dst(skb))) {
+ n = dst_get_neighbour_noref(skb_dst(skb));
+ if (!n) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+ }
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
- ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ ipoib_path_lookup(skb, n, dev);
+ goto unlock;
}
neigh = *to_ipoib_neigh(n);
@@ -748,18 +751,18 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_del(&neigh->list);
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ ipoib_path_lookup(skb, n, dev);
+ goto unlock;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- return NETDEV_TX_OK;
+ goto unlock;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- return NETDEV_TX_OK;
+ goto unlock;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
phdr->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- return NETDEV_TX_OK;
+ goto unlock;
}
unicast_arp_send(skb, dev, phdr);
}
}
-
+unlock:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
dst = skb_dst(skb);
n = NULL;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@ -1218,6 +1222,8 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a97686356..f7ff9dd66cda 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
av.grh.dgid = mcast->mcmember.mgid;
ah = ipoib_create_ah(dev, priv->pd, &av);
- if (!ah) {
- ipoib_warn(priv, "ib_address_create failed\n");
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
} else {
spin_lock_irq(&priv->lock);
mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,8 +725,10 @@ out:
if (mcast && mcast->ah) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
+
+ rcu_read_lock();
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n && !*to_ipoib_neigh(n)) {
struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
skb->dev);
@@ -734,7 +739,7 @@ out:
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
-
+ rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7e7373a700e6..9a43cb07f294 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -638,7 +638,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
iser_conn_terminate(ib_conn);
}
-static mode_t iser_attr_is_visible(int param_type, int param)
+static umode_t iser_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 000000000000..31ee83d528d9
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
+config INFINIBAND_SRPT
+ tristate "InfiniBand SCSI RDMA Protocol target support"
+ depends on INFINIBAND && TARGET_CORE
+ ---help---
+
+ Support for the SCSI RDMA Protocol (SRP) Target driver. The
+ SRP protocol is a protocol that allows an initiator to access
+ a block storage device on another host (target) over a network
+ that supports the RDMA protocol. Currently the RDMA protocol is
+ supported by InfiniBand and by iWarp network hardware. More
+ information about the SRP protocol can be found on the website
+ of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 000000000000..e3ee4bdfffa5
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
+ccflags-y := -Idrivers/target
+obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 000000000000..fb1de1f6f297
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_DM_MAD_H
+#define IB_DM_MAD_H
+
+#include <linux/types.h>
+
+#include <rdma/ib_mad.h>
+
+enum {
+ /*
+ * See also section 13.4.7 Status Field, table 115 MAD Common Status
+ * Field Bit Values and also section 16.3.1.1 Status Field in the
+ * InfiniBand Architecture Specification.
+ */
+ DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
+ DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
+ DM_MAD_STATUS_INVALID_FIELD = 0x001c,
+ DM_MAD_STATUS_NO_IOC = 0x0100,
+
+ /*
+ * See also the Device Management chapter, section 16.3.3 Attributes,
+ * table 279 Device Management Attributes in the InfiniBand
+ * Architecture Specification.
+ */
+ DM_ATTR_CLASS_PORT_INFO = 0x01,
+ DM_ATTR_IOU_INFO = 0x10,
+ DM_ATTR_IOC_PROFILE = 0x11,
+ DM_ATTR_SVC_ENTRIES = 0x12
+};
+
+struct ib_dm_hdr {
+ u8 reserved[28];
+};
+
+/*
+ * Structure of management datagram sent by the SRP target implementation.
+ * Contains a management datagram header, reliable multi-packet transaction
+ * protocol (RMPP) header and ib_dm_hdr. Notes:
+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
+ * management datagrams.
+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
+ * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
+ * - The maximum supported size for a management datagram when not using RMPP
+ * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
+ */
+struct ib_dm_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ struct ib_dm_hdr dm_hdr;
+ u8 data[IB_MGMT_DEVICE_DATA];
+};
+
+/*
+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
+ * Architecture Specification.
+ */
+struct ib_dm_iou_info {
+ __be16 change_id;
+ u8 max_controllers;
+ u8 op_rom;
+ u8 controller_list[128];
+};
+
+/*
+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
+ * the InfiniBand Architecture Specification.
+ */
+struct ib_dm_ioc_profile {
+ __be64 guid;
+ __be32 vendor_id;
+ __be32 device_id;
+ __be16 device_version;
+ __be16 reserved1;
+ __be32 subsys_vendor_id;
+ __be32 subsys_device_id;
+ __be16 io_class;
+ __be16 io_subclass;
+ __be16 protocol;
+ __be16 protocol_version;
+ __be16 service_conn;
+ __be16 initiators_supported;
+ __be16 send_queue_depth;
+ u8 reserved2;
+ u8 rdma_read_depth;
+ __be32 send_size;
+ __be32 rdma_size;
+ u8 op_cap_mask;
+ u8 svc_cap_mask;
+ u8 num_svc_entries;
+ u8 reserved3[9];
+ u8 id_string[64];
+};
+
+struct ib_dm_svc_entry {
+ u8 name[40];
+ __be64 id;
+};
+
+/*
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
+ */
+struct ib_dm_svc_entries {
+ struct ib_dm_svc_entry service_entries[4];
+};
+
+#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 000000000000..cd5d05e22a77
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4073 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <scsi/scsi_tcq.h>
+#include <target/configfs_macros.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include "ib_srpt.h"
+
+/* Name of this kernel module. */
+#define DRV_NAME "ib_srpt"
+#define DRV_VERSION "2.0.0"
+#define DRV_RELDATE "2011-02-14"
+
+#define SRPT_ID_STRING "Linux SRP target"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME " " fmt
+
+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
+ "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Global Variables
+ */
+
+static u64 srpt_service_guid;
+static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */
+static struct list_head srpt_dev_list; /* List of srpt_device structures. */
+
+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+module_param(srp_max_req_size, int, 0444);
+MODULE_PARM_DESC(srp_max_req_size,
+ "Maximum size of SRP request messages in bytes.");
+
+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
+module_param(srpt_srq_size, int, 0444);
+MODULE_PARM_DESC(srpt_srq_size,
+ "Shared receive queue (SRQ) size.");
+
+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+}
+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+ 0444);
+MODULE_PARM_DESC(srpt_service_guid,
+ "Using this value for ioc_guid, id_ext, and cm_listen_id"
+ " instead of using the node_guid of the first HCA.");
+
+static struct ib_client srpt_client;
+static struct target_fabric_configfs *srpt_target;
+static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static int srpt_queue_status(struct se_cmd *cmd);
+
+/**
+ * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
+ */
+static inline
+enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
+ default: return dir;
+ }
+}
+
+/**
+ * srpt_sdev_name() - Return the name associated with the HCA.
+ *
+ * Examples are ib0, ib1, ...
+ */
+static inline const char *srpt_sdev_name(struct srpt_device *sdev)
+{
+ return sdev->device->name;
+}
+
+static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
+{
+ unsigned long flags;
+ enum rdma_ch_state state;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ state = ch->state;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return state;
+}
+
+static enum rdma_ch_state
+srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ ch->state = new_state;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return prev;
+}
+
+/**
+ * srpt_test_and_set_ch_state() - Test and set the channel state.
+ *
+ * Returns true if and only if the channel state has been set to the new state.
+ */
+static bool
+srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
+ enum rdma_ch_state new)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ if (prev == old)
+ ch->state = new;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return prev == old;
+}
+
+/**
+ * srpt_event_handler() - Asynchronous IB event callback function.
+ *
+ * Callback function called by the InfiniBand core when an asynchronous IB
+ * event occurs. This callback may occur in interrupt context. See also
+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
+ * Architecture Specification.
+ */
+static void srpt_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+
+ sdev = ib_get_client_data(event->device, &srpt_client);
+ if (!sdev || sdev->device != event->device)
+ return;
+
+ pr_debug("ASYNC event= %d on device= %s\n", event->event,
+ srpt_sdev_name(sdev));
+
+ switch (event->event) {
+ case IB_EVENT_PORT_ERR:
+ if (event->element.port_num <= sdev->device->phys_port_cnt) {
+ sport = &sdev->port[event->element.port_num - 1];
+ sport->lid = 0;
+ sport->sm_lid = 0;
+ }
+ break;
+ case IB_EVENT_PORT_ACTIVE:
+ case IB_EVENT_LID_CHANGE:
+ case IB_EVENT_PKEY_CHANGE:
+ case IB_EVENT_SM_CHANGE:
+ case IB_EVENT_CLIENT_REREGISTER:
+ /* Refresh port data asynchronously. */
+ if (event->element.port_num <= sdev->device->phys_port_cnt) {
+ sport = &sdev->port[event->element.port_num - 1];
+ if (!sport->lid && !sport->sm_lid)
+ schedule_work(&sport->work);
+ }
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB event %d\n",
+ event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_srq_event() - SRQ event callback function.
+ */
+static void srpt_srq_event(struct ib_event *event, void *ctx)
+{
+ printk(KERN_INFO "SRQ event %d\n", event->event);
+}
+
+/**
+ * srpt_qp_event() - QP event callback function.
+ */
+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+{
+ pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
+ event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ ib_cm_notify(ch->cm_id, event->event);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
+ CH_RELEASING))
+ srpt_release_channel(ch);
+ else
+ pr_debug("%s: state %d - ignored LAST_WQE.\n",
+ ch->sess_name, srpt_get_ch_state(ch));
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB QP event %d\n",
+ event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
+ *
+ * @slot: one-based slot number.
+ * @value: four-bit value.
+ *
+ * Copies the lowest four bits of value in element slot of the array of four
+ * bit elements called c_list (controller list). The index slot is one-based.
+ */
+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
+{
+ u16 id;
+ u8 tmp;
+
+ id = (slot - 1) / 2;
+ if (slot & 0x1) {
+ tmp = c_list[id] & 0xf;
+ c_list[id] = (value << 4) | tmp;
+ } else {
+ tmp = c_list[id] & 0xf0;
+ c_list[id] = (value & 0xf) | tmp;
+ }
+}
+
+/**
+ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ *
+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
+ * Specification.
+ */
+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
+{
+ struct ib_class_port_info *cif;
+
+ cif = (struct ib_class_port_info *)mad->data;
+ memset(cif, 0, sizeof *cif);
+ cif->base_version = 1;
+ cif->class_version = 1;
+ cif->resp_time_value = 20;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ *
+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
+ */
+static void srpt_get_iou(struct ib_dm_mad *mad)
+{
+ struct ib_dm_iou_info *ioui;
+ u8 slot;
+ int i;
+
+ ioui = (struct ib_dm_iou_info *)mad->data;
+ ioui->change_id = __constant_cpu_to_be16(1);
+ ioui->max_controllers = 16;
+
+ /* set present for slot 1 and empty for the rest */
+ srpt_set_ioc(ioui->controller_list, 1, 1);
+ for (i = 1, slot = 2; i < 16; i++, slot++)
+ srpt_set_ioc(ioui->controller_list, slot, 0);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ *
+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
+ * Architecture Specification. See also section B.7, table B.7 in the SRP
+ * r16a document.
+ */
+static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
+ struct ib_dm_mad *mad)
+{
+ struct srpt_device *sdev = sport->sdev;
+ struct ib_dm_ioc_profile *iocp;
+
+ iocp = (struct ib_dm_ioc_profile *)mad->data;
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ memset(iocp, 0, sizeof *iocp);
+ strcpy(iocp->id_string, SRPT_ID_STRING);
+ iocp->guid = cpu_to_be64(srpt_service_guid);
+ iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+ iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
+ iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
+ iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+ iocp->subsys_device_id = 0x0;
+ iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+ iocp->rdma_read_depth = 4;
+ iocp->send_size = cpu_to_be32(srp_max_req_size);
+ iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
+ 1U << 24));
+ iocp->num_svc_entries = 1;
+ iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
+ SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ *
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
+ */
+static void srpt_get_svc_entries(u64 ioc_guid,
+ u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
+{
+ struct ib_dm_svc_entries *svc_entries;
+
+ WARN_ON(!ioc_guid);
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2 || lo > hi || hi > 1) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ svc_entries = (struct ib_dm_svc_entries *)mad->data;
+ memset(svc_entries, 0, sizeof *svc_entries);
+ svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
+ snprintf(svc_entries->service_entries[0].name,
+ sizeof(svc_entries->service_entries[0].name),
+ "%s%016llx",
+ SRP_SERVICE_NAME_PREFIX,
+ ioc_guid);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_mgmt_method_get() - Process a received management datagram.
+ * @sp: source port through which the MAD has been received.
+ * @rq_mad: received MAD.
+ * @rsp_mad: response MAD.
+ */
+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
+ struct ib_dm_mad *rsp_mad)
+{
+ u16 attr_id;
+ u32 slot;
+ u8 hi, lo;
+
+ attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
+ switch (attr_id) {
+ case DM_ATTR_CLASS_PORT_INFO:
+ srpt_get_class_port_info(rsp_mad);
+ break;
+ case DM_ATTR_IOU_INFO:
+ srpt_get_iou(rsp_mad);
+ break;
+ case DM_ATTR_IOC_PROFILE:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ srpt_get_ioc(sp, slot, rsp_mad);
+ break;
+ case DM_ATTR_SVC_ENTRIES:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ hi = (u8) ((slot >> 8) & 0xff);
+ lo = (u8) (slot & 0xff);
+ slot = (u16) ((slot >> 16) & 0xffff);
+ srpt_get_svc_entries(srpt_service_guid,
+ slot, hi, lo, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ }
+}
+
+/**
+ * srpt_mad_send_handler() - Post MAD-send callback function.
+ */
+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_wc)
+{
+ ib_destroy_ah(mad_wc->send_buf->ah);
+ ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * srpt_mad_recv_handler() - MAD reception callback function.
+ */
+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
+ struct ib_ah *ah;
+ struct ib_mad_send_buf *rsp;
+ struct ib_dm_mad *dm_mad;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad)
+ return;
+
+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+ mad_wc->recv_buf.grh, mad_agent->port_num);
+ if (IS_ERR(ah))
+ goto err;
+
+ BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
+
+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+ mad_wc->wc->pkey_index, 0,
+ IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
+ GFP_KERNEL);
+ if (IS_ERR(rsp))
+ goto err_rsp;
+
+ rsp->ah = ah;
+
+ dm_mad = rsp->mad;
+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+ dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+ dm_mad->mad_hdr.status = 0;
+
+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
+ break;
+ case IB_MGMT_METHOD_SET:
+ dm_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ default:
+ dm_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ break;
+ }
+
+ if (!ib_post_send_mad(rsp, NULL)) {
+ ib_free_recv_mad(mad_wc);
+ /* will destroy_ah & free_send_mad in send completion */
+ return;
+ }
+
+ ib_free_send_mad(rsp);
+
+err_rsp:
+ ib_destroy_ah(ah);
+err:
+ ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * srpt_refresh_port() - Configure a HCA port.
+ *
+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
+ * lid and gid values, and register a callback function for processing MADs
+ * on the specified port.
+ *
+ * Note: It is safe to call this function more than once for the same port.
+ */
+static int srpt_refresh_port(struct srpt_port *sport)
+{
+ struct ib_mad_reg_req reg_req;
+ struct ib_port_modify port_modify;
+ struct ib_port_attr port_attr;
+ int ret;
+
+ memset(&port_modify, 0, sizeof port_modify);
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret)
+ goto err_mod_port;
+
+ ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
+ if (ret)
+ goto err_query_port;
+
+ sport->sm_lid = port_attr.sm_lid;
+ sport->lid = port_attr.lid;
+
+ ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
+ if (ret)
+ goto err_query_port;
+
+ if (!sport->mad_agent) {
+ memset(&reg_req, 0, sizeof reg_req);
+ reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
+ reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+ sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+ sport->port,
+ IB_QPT_GSI,
+ &reg_req, 0,
+ srpt_mad_send_handler,
+ srpt_mad_recv_handler,
+ sport);
+ if (IS_ERR(sport->mad_agent)) {
+ ret = PTR_ERR(sport->mad_agent);
+ sport->mad_agent = NULL;
+ goto err_query_port;
+ }
+ }
+
+ return 0;
+
+err_query_port:
+
+ port_modify.set_port_cap_mask = 0;
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+
+err_mod_port:
+
+ return ret;
+}
+
+/**
+ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ *
+ * Note: It is safe to call this function more than once for the same device.
+ */
+static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+{
+ struct ib_port_modify port_modify = {
+ .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+ };
+ struct srpt_port *sport;
+ int i;
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ WARN_ON(sport->port != i);
+ if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
+ printk(KERN_ERR "disabling MAD processing failed.\n");
+ if (sport->mad_agent) {
+ ib_unregister_mad_agent(sport->mad_agent);
+ sport->mad_agent = NULL;
+ }
+ }
+}
+
+/**
+ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ */
+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
+ int ioctx_size, int dma_size,
+ enum dma_data_direction dir)
+{
+ struct srpt_ioctx *ioctx;
+
+ ioctx = kmalloc(ioctx_size, GFP_KERNEL);
+ if (!ioctx)
+ goto err;
+
+ ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
+ if (!ioctx->buf)
+ goto err_free_ioctx;
+
+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
+ if (ib_dma_mapping_error(sdev->device, ioctx->dma))
+ goto err_free_buf;
+
+ return ioctx;
+
+err_free_buf:
+ kfree(ioctx->buf);
+err_free_ioctx:
+ kfree(ioctx);
+err:
+ return NULL;
+}
+
+/**
+ * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ */
+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
+ int dma_size, enum dma_data_direction dir)
+{
+ if (!ioctx)
+ return;
+
+ ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
+ kfree(ioctx->buf);
+ kfree(ioctx);
+}
+
+/**
+ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * @sdev: Device to allocate the I/O context ring for.
+ * @ring_size: Number of elements in the I/O context ring.
+ * @ioctx_size: I/O context size.
+ * @dma_size: DMA buffer size.
+ * @dir: DMA data direction.
+ */
+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
+ int ring_size, int ioctx_size,
+ int dma_size, enum dma_data_direction dir)
+{
+ struct srpt_ioctx **ring;
+ int i;
+
+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
+ && ioctx_size != sizeof(struct srpt_send_ioctx));
+
+ ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
+ if (!ring)
+ goto out;
+ for (i = 0; i < ring_size; ++i) {
+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
+ if (!ring[i])
+ goto err;
+ ring[i]->index = i;
+ }
+ goto out;
+
+err:
+ while (--i >= 0)
+ srpt_free_ioctx(sdev, ring[i], dma_size, dir);
+ kfree(ring);
+out:
+ return ring;
+}
+
+/**
+ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ */
+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
+ struct srpt_device *sdev, int ring_size,
+ int dma_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ring_size; ++i)
+ srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
+ kfree(ioctx_ring);
+}
+
+/**
+ * srpt_get_cmd_state() - Get the state of a SCSI command.
+ */
+static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ return state;
+}
+
+/**
+ * srpt_set_cmd_state() - Set the state of a SCSI command.
+ *
+ * Does not modify the state of aborted commands. Returns the previous command
+ * state.
+ */
+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ previous = ioctx->state;
+ if (previous != SRPT_STATE_DONE)
+ ioctx->state = new;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ return previous;
+}
+
+/**
+ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ *
+ * Returns true if and only if the previous command state was equal to 'old'.
+ */
+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state old,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+ unsigned long flags;
+
+ WARN_ON(!ioctx);
+ WARN_ON(old == SRPT_STATE_DONE);
+ WARN_ON(new == SRPT_STATE_NEW);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ previous = ioctx->state;
+ if (previous == old)
+ ioctx->state = new;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ return previous == old;
+}
+
+/**
+ * srpt_post_recv() - Post an IB receive request.
+ */
+static int srpt_post_recv(struct srpt_device *sdev,
+ struct srpt_recv_ioctx *ioctx)
+{
+ struct ib_sge list;
+ struct ib_recv_wr wr, *bad_wr;
+
+ BUG_ON(!sdev);
+ wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = srp_max_req_size;
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+}
+
+/**
+ * srpt_post_send() - Post an IB send request.
+ *
+ * Returns zero upon success and a non-zero value upon failure.
+ */
+static int srpt_post_send(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, int len)
+{
+ struct ib_sge list;
+ struct ib_send_wr wr, *bad_wr;
+ struct srpt_device *sdev = ch->sport->sdev;
+ int ret;
+
+ atomic_inc(&ch->req_lim);
+
+ ret = -ENOMEM;
+ if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
+ printk(KERN_WARNING "IB send queue full (needed 1)\n");
+ goto out;
+ }
+
+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
+ DMA_TO_DEVICE);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = len;
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
+ wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+
+out:
+ if (ret < 0) {
+ atomic_inc(&ch->sq_wr_avail);
+ atomic_dec(&ch->req_lim);
+ }
+ return ret;
+}
+
+/**
+ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * @ioctx: Pointer to the I/O context associated with the request.
+ * @srp_cmd: Pointer to the SRP_CMD request data.
+ * @dir: Pointer to the variable to which the transfer direction will be
+ * written.
+ * @data_len: Pointer to the variable to which the total data length of all
+ * descriptors in the SRP_CMD request will be written.
+ *
+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
+ *
+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
+ * -ENOMEM when memory allocation fails and zero upon success.
+ */
+static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
+ struct srp_cmd *srp_cmd,
+ enum dma_data_direction *dir, u64 *data_len)
+{
+ struct srp_indirect_buf *idb;
+ struct srp_direct_buf *db;
+ unsigned add_cdb_offset;
+ int ret;
+
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+ && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ BUG_ON(!dir);
+ BUG_ON(!data_len);
+
+ ret = 0;
+ *data_len = 0;
+
+ /*
+ * The lower four bits of the buffer format field contain the DATA-IN
+ * buffer descriptor format, and the highest four bits contain the
+ * DATA-OUT buffer descriptor format.
+ */
+ *dir = DMA_NONE;
+ if (srp_cmd->buf_fmt & 0xf)
+ /* DATA-IN: transfer data from target to initiator (read). */
+ *dir = DMA_FROM_DEVICE;
+ else if (srp_cmd->buf_fmt >> 4)
+ /* DATA-OUT: transfer data from initiator to target (write). */
+ *dir = DMA_TO_DEVICE;
+
+ /*
+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+ * CDB LENGTH' field are reserved and the size in bytes of this field
+ * is four times the value specified in bits 3..7. Hence the "& ~3".
+ */
+ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+ ioctx->n_rbuf = 1;
+ ioctx->rbufs = &ioctx->single_rbuf;
+
+ db = (struct srp_direct_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+ memcpy(ioctx->rbufs, db, sizeof *db);
+ *data_len = be32_to_cpu(db->len);
+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+
+ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+
+ if (ioctx->n_rbuf >
+ (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
+ printk(KERN_ERR "received unsupported SRP_CMD request"
+ " type (%u out + %u in != %u / %zu)\n",
+ srp_cmd->data_out_desc_cnt,
+ srp_cmd->data_in_desc_cnt,
+ be32_to_cpu(idb->table_desc.len),
+ sizeof(*db));
+ ioctx->n_rbuf = 0;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ioctx->n_rbuf == 1)
+ ioctx->rbufs = &ioctx->single_rbuf;
+ else {
+ ioctx->rbufs =
+ kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+ if (!ioctx->rbufs) {
+ ioctx->n_rbuf = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ db = idb->desc_list;
+ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+ *data_len = be32_to_cpu(idb->len);
+ }
+out:
+ return ret;
+}
+
+/**
+ * srpt_init_ch_qp() - Initialize queue pair attributes.
+ *
+ * Initialized the attributes of queue pair 'qp' by allowing local write,
+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
+ */
+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr *attr;
+ int ret;
+
+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->qp_state = IB_QPS_INIT;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ attr->port_num = ch->sport->port;
+ attr->pkey_index = 0;
+
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
+ IB_QP_PKEY_INDEX);
+
+ kfree(attr);
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_dest_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ */
+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_attr qp_attr;
+
+ qp_attr.qp_state = IB_QPS_ERR;
+ return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
+}
+
+/**
+ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
+ */
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct scatterlist *sg;
+ enum dma_data_direction dir;
+
+ BUG_ON(!ch);
+ BUG_ON(!ioctx);
+ BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
+
+ while (ioctx->n_rdma)
+ kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
+
+ kfree(ioctx->rdma_ius);
+ ioctx->rdma_ius = NULL;
+
+ if (ioctx->mapped_sg_count) {
+ sg = ioctx->sg;
+ WARN_ON(!sg);
+ dir = ioctx->cmd.data_direction;
+ BUG_ON(dir == DMA_NONE);
+ ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
+ opposite_dma_dir(dir));
+ ioctx->mapped_sg_count = 0;
+ }
+}
+
+/**
+ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
+ */
+static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct se_cmd *cmd;
+ struct scatterlist *sg, *sg_orig;
+ int sg_cnt;
+ enum dma_data_direction dir;
+ struct rdma_iu *riu;
+ struct srp_direct_buf *db;
+ dma_addr_t dma_addr;
+ struct ib_sge *sge;
+ u64 raddr;
+ u32 rsize;
+ u32 tsize;
+ u32 dma_len;
+ int count, nrdma;
+ int i, j, k;
+
+ BUG_ON(!ch);
+ BUG_ON(!ioctx);
+ cmd = &ioctx->cmd;
+ dir = cmd->data_direction;
+ BUG_ON(dir == DMA_NONE);
+
+ transport_do_task_sg_chain(cmd);
+ ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
+ ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
+
+ count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
+ opposite_dma_dir(dir));
+ if (unlikely(!count))
+ return -EAGAIN;
+
+ ioctx->mapped_sg_count = count;
+
+ if (ioctx->rdma_ius && ioctx->n_rdma_ius)
+ nrdma = ioctx->n_rdma_ius;
+ else {
+ nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
+ + ioctx->n_rbuf;
+
+ ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
+ if (!ioctx->rdma_ius)
+ goto free_mem;
+
+ ioctx->n_rdma_ius = nrdma;
+ }
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+ dma_len = sg_dma_len(&sg[0]);
+ riu = ioctx->rdma_ius;
+
+ /*
+ * For each remote desc - calculate the #ib_sge.
+ * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
+ * each remote desc rdma_iu is required a rdma wr;
+ * else
+ * we need to allocate extra rdma_iu to carry extra #ib_sge in
+ * another rdma wr
+ */
+ for (i = 0, j = 0;
+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+ rsize = be32_to_cpu(db->len);
+ raddr = be64_to_cpu(db->va);
+ riu->raddr = raddr;
+ riu->rkey = be32_to_cpu(db->key);
+ riu->sge_cnt = 0;
+
+ /* calculate how many sge required for this remote_buf */
+ while (rsize > 0 && tsize > 0) {
+
+ if (rsize >= dma_len) {
+ tsize -= dma_len;
+ rsize -= dma_len;
+ raddr += dma_len;
+
+ if (tsize > 0) {
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+ dma_len = sg_dma_len(sg);
+ }
+ }
+ } else {
+ tsize -= rsize;
+ dma_len -= rsize;
+ rsize = 0;
+ }
+
+ ++riu->sge_cnt;
+
+ if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
+ ++ioctx->n_rdma;
+ riu->sge =
+ kmalloc(riu->sge_cnt * sizeof *riu->sge,
+ GFP_KERNEL);
+ if (!riu->sge)
+ goto free_mem;
+
+ ++riu;
+ riu->sge_cnt = 0;
+ riu->raddr = raddr;
+ riu->rkey = be32_to_cpu(db->key);
+ }
+ }
+
+ ++ioctx->n_rdma;
+ riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
+ GFP_KERNEL);
+ if (!riu->sge)
+ goto free_mem;
+ }
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+ riu = ioctx->rdma_ius;
+ sg = sg_orig;
+ dma_len = sg_dma_len(&sg[0]);
+ dma_addr = sg_dma_address(&sg[0]);
+
+ /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
+ for (i = 0, j = 0;
+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+ rsize = be32_to_cpu(db->len);
+ sge = riu->sge;
+ k = 0;
+
+ while (rsize > 0 && tsize > 0) {
+ sge->addr = dma_addr;
+ sge->lkey = ch->sport->sdev->mr->lkey;
+
+ if (rsize >= dma_len) {
+ sge->length =
+ (tsize < dma_len) ? tsize : dma_len;
+ tsize -= dma_len;
+ rsize -= dma_len;
+
+ if (tsize > 0) {
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+ dma_len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+ }
+ }
+ } else {
+ sge->length = (tsize < rsize) ? tsize : rsize;
+ tsize -= rsize;
+ dma_len -= rsize;
+ dma_addr += rsize;
+ rsize = 0;
+ }
+
+ ++k;
+ if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
+ ++riu;
+ sge = riu->sge;
+ k = 0;
+ } else if (rsize > 0 && tsize > 0)
+ ++sge;
+ }
+ }
+
+ return 0;
+
+free_mem:
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+
+ return -ENOMEM;
+}
+
+/**
+ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ */
+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
+{
+ struct srpt_send_ioctx *ioctx;
+ unsigned long flags;
+
+ BUG_ON(!ch);
+
+ ioctx = NULL;
+ spin_lock_irqsave(&ch->spinlock, flags);
+ if (!list_empty(&ch->free_list)) {
+ ioctx = list_first_entry(&ch->free_list,
+ struct srpt_send_ioctx, free_list);
+ list_del(&ioctx->free_list);
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (!ioctx)
+ return ioctx;
+
+ BUG_ON(ioctx->ch != ch);
+ kref_init(&ioctx->kref);
+ spin_lock_init(&ioctx->spinlock);
+ ioctx->state = SRPT_STATE_NEW;
+ ioctx->n_rbuf = 0;
+ ioctx->rbufs = NULL;
+ ioctx->n_rdma = 0;
+ ioctx->n_rdma_ius = 0;
+ ioctx->rdma_ius = NULL;
+ ioctx->mapped_sg_count = 0;
+ init_completion(&ioctx->tx_done);
+ ioctx->queue_status_only = false;
+ /*
+ * transport_init_se_cmd() does not initialize all fields, so do it
+ * here.
+ */
+ memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+ memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+
+ return ioctx;
+}
+
+/**
+ * srpt_put_send_ioctx() - Free up resources.
+ */
+static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
+{
+ struct srpt_rdma_ch *ch;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
+
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+
+ if (ioctx->n_rbuf > 1) {
+ kfree(ioctx->rbufs);
+ ioctx->rbufs = NULL;
+ ioctx->n_rbuf = 0;
+ }
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+}
+
+static void srpt_put_send_ioctx_kref(struct kref *kref)
+{
+ srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
+}
+
+/**
+ * srpt_abort_cmd() - Abort a SCSI command.
+ * @ioctx: I/O context associated with the SCSI command.
+ * @context: Preferred execution context.
+ */
+static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ /*
+ * If the command is in a state where the target core is waiting for
+ * the ib_srpt driver, change the state to the next state. Changing
+ * the state of the command from SRPT_STATE_NEED_DATA to
+ * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
+ * function a second time.
+ */
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEED_DATA:
+ ioctx->state = SRPT_STATE_DATA_IN;
+ break;
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_CMD_RSP_SENT:
+ case SRPT_STATE_MGMT_RSP_SENT:
+ ioctx->state = SRPT_STATE_DONE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ if (state == SRPT_STATE_DONE)
+ goto out;
+
+ pr_debug("Aborting cmd with state %d and tag %lld\n", state,
+ ioctx->tag);
+
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_MGMT:
+ /*
+ * Do nothing - defer abort processing until
+ * srpt_queue_response() is invoked.
+ */
+ WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
+ break;
+ case SRPT_STATE_NEED_DATA:
+ /* DMA_TO_DEVICE (write) - RDMA read error. */
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ transport_generic_handle_data(&ioctx->cmd);
+ break;
+ case SRPT_STATE_CMD_RSP_SENT:
+ /*
+ * SRP_RSP sending failed or the SRP_RSP send completion has
+ * not been received in time.
+ */
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ break;
+ case SRPT_STATE_MGMT_RSP_SENT:
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ break;
+ default:
+ WARN_ON("ERROR: unexpected command state");
+ break;
+ }
+
+out:
+ return state;
+}
+
+/**
+ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
+ */
+static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
+{
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state state;
+ struct se_cmd *cmd;
+ u32 index;
+
+ atomic_inc(&ch->sq_wr_avail);
+
+ index = idx_from_wr_id(wr_id);
+ ioctx = ch->ioctx_ring[index];
+ state = srpt_get_cmd_state(ioctx);
+ cmd = &ioctx->cmd;
+
+ WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+ && state != SRPT_STATE_MGMT_RSP_SENT
+ && state != SRPT_STATE_NEED_DATA
+ && state != SRPT_STATE_DONE);
+
+ /* If SRP_RSP sending failed, undo the ch->req_lim change. */
+ if (state == SRPT_STATE_CMD_RSP_SENT
+ || state == SRPT_STATE_MGMT_RSP_SENT)
+ atomic_dec(&ch->req_lim);
+
+ srpt_abort_cmd(ioctx);
+}
+
+/**
+ * srpt_handle_send_comp() - Process an IB send completion notification.
+ */
+static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+
+ atomic_inc(&ch->sq_wr_avail);
+
+ state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+
+ if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+ && state != SRPT_STATE_MGMT_RSP_SENT
+ && state != SRPT_STATE_DONE))
+ pr_debug("state = %d\n", state);
+
+ if (state != SRPT_STATE_DONE)
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ else
+ printk(KERN_ERR "IB completion has been received too late for"
+ " wr_id = %u.\n", ioctx->ioctx.index);
+}
+
+/**
+ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
+ *
+ * Note: transport_generic_handle_data() is asynchronous so unmapping the
+ * data that has been transferred via IB RDMA must be postponed until the
+ * check_stop_free() callback.
+ */
+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ enum srpt_opcode opcode)
+{
+ WARN_ON(ioctx->n_rdma <= 0);
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+
+ if (opcode == SRPT_RDMA_READ_LAST) {
+ if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+ SRPT_STATE_DATA_IN))
+ transport_generic_handle_data(&ioctx->cmd);
+ else
+ printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+ __LINE__, srpt_get_cmd_state(ioctx));
+ } else if (opcode == SRPT_RDMA_ABORT) {
+ ioctx->rdma_aborted = true;
+ } else {
+ WARN(true, "unexpected opcode %d\n", opcode);
+ }
+}
+
+/**
+ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
+ */
+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ enum srpt_opcode opcode)
+{
+ struct se_cmd *cmd;
+ enum srpt_command_state state;
+
+ cmd = &ioctx->cmd;
+ state = srpt_get_cmd_state(ioctx);
+ switch (opcode) {
+ case SRPT_RDMA_READ_LAST:
+ if (ioctx->n_rdma <= 0) {
+ printk(KERN_ERR "Received invalid RDMA read"
+ " error completion with idx %d\n",
+ ioctx->ioctx.index);
+ break;
+ }
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ if (state == SRPT_STATE_NEED_DATA)
+ srpt_abort_cmd(ioctx);
+ else
+ printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+ __func__, __LINE__, state);
+ break;
+ case SRPT_RDMA_WRITE_LAST:
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ break;
+ default:
+ printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
+ __LINE__, opcode);
+ break;
+ }
+}
+
+/**
+ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
+ * be built in the buffer ioctx->buf points at and hence this function will
+ * overwrite the request data.
+ * @tag: tag of the request for which this response is being generated.
+ * @status: value for the STATUS field of the SRP_RSP information unit.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response. See also SPC-2 for more information about sense data.
+ */
+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, u64 tag,
+ int status)
+{
+ struct srp_rsp *srp_rsp;
+ const u8 *sense_data;
+ int sense_data_len, max_sense_len;
+
+ /*
+ * The lowest bit of all SAM-3 status codes is zero (see also
+ * paragraph 5.3 in SAM-3).
+ */
+ WARN_ON(status & 1);
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+
+ sense_data = ioctx->sense_data;
+ sense_data_len = ioctx->cmd.scsi_sense_length;
+ WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
+
+ memset(srp_rsp, 0, sizeof *srp_rsp);
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta =
+ __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+ srp_rsp->status = status;
+
+ if (sense_data_len) {
+ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+ if (sense_data_len > max_sense_len) {
+ printk(KERN_WARNING "truncated sense data from %d to %d"
+ " bytes\n", sense_data_len, max_sense_len);
+ sense_data_len = max_sense_len;
+ }
+
+ srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+ srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
+ memcpy(srp_rsp + 1, sense_data, sense_data_len);
+ }
+
+ return sizeof(*srp_rsp) + sense_data_len;
+}
+
+/**
+ * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context in which the SRP_RSP response will be built.
+ * @rsp_code: RSP_CODE that will be stored in the response.
+ * @tag: Tag of the request for which this response is being generated.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response.
+ */
+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ u8 rsp_code, u64 tag)
+{
+ struct srp_rsp *srp_rsp;
+ int resp_data_len;
+ int resp_len;
+
+ resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+ resp_len = sizeof(*srp_rsp) + resp_data_len;
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+ memset(srp_rsp, 0, sizeof *srp_rsp);
+
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
+ + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+
+ if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
+ srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+ srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+ srp_rsp->data[3] = rsp_code;
+ }
+
+ return resp_len;
+}
+
+#define NO_SUCH_LUN ((uint64_t)-1LL)
+
+/*
+ * SCSI LUN addressing method. See also SAM-2 and the section about
+ * eight byte LUNs.
+ */
+enum scsi_lun_addr_method {
+ SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
+ SCSI_LUN_ADDR_METHOD_FLAT = 1,
+ SCSI_LUN_ADDR_METHOD_LUN = 2,
+ SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
+};
+
+/*
+ * srpt_unpack_lun() - Convert from network LUN to linear LUN.
+ *
+ * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
+ * order (big endian) to a linear LUN. Supports three LUN addressing methods:
+ * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
+ */
+static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
+{
+ uint64_t res = NO_SUCH_LUN;
+ int addressing_method;
+
+ if (unlikely(len < 2)) {
+ printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
+ "more", len);
+ goto out;
+ }
+
+ switch (len) {
+ case 8:
+ if ((*((__be64 *)lun) &
+ __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+ goto out_err;
+ break;
+ case 4:
+ if (*((__be16 *)&lun[2]) != 0)
+ goto out_err;
+ break;
+ case 6:
+ if (*((__be32 *)&lun[2]) != 0)
+ goto out_err;
+ break;
+ case 2:
+ break;
+ default:
+ goto out_err;
+ }
+
+ addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
+ switch (addressing_method) {
+ case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
+ case SCSI_LUN_ADDR_METHOD_FLAT:
+ case SCSI_LUN_ADDR_METHOD_LUN:
+ res = *(lun + 1) | (((*lun) & 0x3f) << 8);
+ break;
+
+ case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
+ default:
+ printk(KERN_ERR "Unimplemented LUN addressing method %u",
+ addressing_method);
+ break;
+ }
+
+out:
+ return res;
+
+out_err:
+ printk(KERN_ERR "Support for multi-level LUNs has not yet been"
+ " implemented");
+ goto out;
+}
+
+static int srpt_check_stop_free(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+}
+
+/**
+ * srpt_handle_cmd() - Process SRP_CMD.
+ */
+static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct se_cmd *cmd;
+ struct srp_cmd *srp_cmd;
+ uint64_t unpacked_lun;
+ u64 data_len;
+ enum dma_data_direction dir;
+ int ret;
+
+ BUG_ON(!send_ioctx);
+
+ srp_cmd = recv_ioctx->ioctx.buf;
+ kref_get(&send_ioctx->kref);
+ cmd = &send_ioctx->cmd;
+ send_ioctx->tag = srp_cmd->tag;
+
+ switch (srp_cmd->task_attr) {
+ case SRP_CMD_SIMPLE_Q:
+ cmd->sam_task_attr = MSG_SIMPLE_TAG;
+ break;
+ case SRP_CMD_ORDERED_Q:
+ default:
+ cmd->sam_task_attr = MSG_ORDERED_TAG;
+ break;
+ case SRP_CMD_HEAD_OF_Q:
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ break;
+ case SRP_CMD_ACA:
+ cmd->sam_task_attr = MSG_ACA_TAG;
+ break;
+ }
+
+ ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
+ if (ret) {
+ printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+ srp_cmd->tag);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ goto send_sense;
+ }
+
+ cmd->data_length = data_len;
+ cmd->data_direction = dir;
+ unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
+ sizeof(srp_cmd->lun));
+ if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
+ goto send_sense;
+ ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+ if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+ srpt_queue_status(cmd);
+ else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
+ goto send_sense;
+ else
+ WARN_ON_ONCE(ret);
+
+ transport_handle_cdb_direct(cmd);
+ return 0;
+
+send_sense:
+ transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
+ 0);
+ return -1;
+}
+
+/**
+ * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+ * @ch: RDMA channel of the task management request.
+ * @fn: Task management function to perform.
+ * @req_tag: Tag of the SRP task management request.
+ * @mgmt_ioctx: I/O context of the task management request.
+ *
+ * Returns zero if the target core will process the task management
+ * request asynchronously.
+ *
+ * Note: It is assumed that the initiator serializes tag-based task management
+ * requests.
+ */
+static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+{
+ struct srpt_device *sdev;
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *target;
+ int ret, i;
+
+ ret = -EINVAL;
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+ BUG_ON(!ch->sport);
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+ spin_lock_irq(&sdev->spinlock);
+ for (i = 0; i < ch->rq_size; ++i) {
+ target = ch->ioctx_ring[i];
+ if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+ target->tag == tag &&
+ srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+ ret = 0;
+ /* now let the target core abort &target->cmd; */
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+ return ret;
+}
+
+static int srp_tmr_to_tcm(int fn)
+{
+ switch (fn) {
+ case SRP_TSK_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case SRP_TSK_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case SRP_TSK_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case SRP_TSK_LUN_RESET:
+ return TMR_LUN_RESET;
+ case SRP_TSK_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ default:
+ return -1;
+ }
+}
+
+/**
+ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ *
+ * Returns 0 if and only if the request will be processed by the target core.
+ *
+ * For more information about SRP_TSK_MGMT information units, see also section
+ * 6.7 in the SRP r16a document.
+ */
+static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_tsk_mgmt *srp_tsk;
+ struct se_cmd *cmd;
+ uint64_t unpacked_lun;
+ int tcm_tmr;
+ int res;
+
+ BUG_ON(!send_ioctx);
+
+ srp_tsk = recv_ioctx->ioctx.buf;
+ cmd = &send_ioctx->cmd;
+
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
+ " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
+ srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+
+ srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ send_ioctx->tag = srp_tsk->tag;
+ tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+ if (tcm_tmr < 0) {
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response =
+ TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ goto process_tmr;
+ }
+ cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
+ if (!cmd->se_tmr_req) {
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ goto process_tmr;
+ }
+
+ unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ sizeof(srp_tsk->lun));
+ res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
+ if (res) {
+ pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ goto process_tmr;
+ }
+
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
+ srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+
+process_tmr:
+ kref_get(&send_ioctx->kref);
+ if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+ transport_generic_handle_tmr(&send_ioctx->cmd);
+ else
+ transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+
+}
+
+/**
+ * srpt_handle_new_iu() - Process a newly received information unit.
+ * @ch: RDMA channel through which the information unit has been received.
+ * @ioctx: SRPT I/O context associated with the information unit.
+ */
+static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_cmd *srp_cmd;
+ enum rdma_ch_state ch_state;
+
+ BUG_ON(!ch);
+ BUG_ON(!recv_ioctx);
+
+ ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
+ recv_ioctx->ioctx.dma, srp_max_req_size,
+ DMA_FROM_DEVICE);
+
+ ch_state = srpt_get_ch_state(ch);
+ if (unlikely(ch_state == CH_CONNECTING)) {
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ goto out;
+ }
+
+ if (unlikely(ch_state != CH_LIVE))
+ goto out;
+
+ srp_cmd = recv_ioctx->ioctx.buf;
+ if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
+ if (!send_ioctx)
+ send_ioctx = srpt_get_send_ioctx(ch);
+ if (unlikely(!send_ioctx)) {
+ list_add_tail(&recv_ioctx->wait_list,
+ &ch->cmd_wait_list);
+ goto out;
+ }
+ }
+
+ transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
+ 0, DMA_NONE, MSG_SIMPLE_TAG,
+ send_ioctx->sense_data);
+
+ switch (srp_cmd->opcode) {
+ case SRP_CMD:
+ srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_TSK_MGMT:
+ srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_I_LOGOUT:
+ printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+ break;
+ case SRP_CRED_RSP:
+ pr_debug("received SRP_CRED_RSP\n");
+ break;
+ case SRP_AER_RSP:
+ pr_debug("received SRP_AER_RSP\n");
+ break;
+ case SRP_RSP:
+ printk(KERN_ERR "Received SRP_RSP\n");
+ break;
+ default:
+ printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+ srp_cmd->opcode);
+ break;
+ }
+
+ srpt_post_recv(ch->sport->sdev, recv_ioctx);
+out:
+ return;
+}
+
+static void srpt_process_rcv_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ struct ib_wc *wc)
+{
+ struct srpt_device *sdev = ch->sport->sdev;
+ struct srpt_recv_ioctx *ioctx;
+ u32 index;
+
+ index = idx_from_wr_id(wc->wr_id);
+ if (wc->status == IB_WC_SUCCESS) {
+ int req_lim;
+
+ req_lim = atomic_dec_return(&ch->req_lim);
+ if (unlikely(req_lim < 0))
+ printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+ ioctx = sdev->ioctx_ring[index];
+ srpt_handle_new_iu(ch, ioctx, NULL);
+ } else {
+ printk(KERN_INFO "receiving failed for idx %u with status %d\n",
+ index, wc->status);
+ }
+}
+
+/**
+ * srpt_process_send_completion() - Process an IB send completion.
+ *
+ * Note: Although this has not yet been observed during tests, at least in
+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
+ * value in each response is set to one, and it is possible that this response
+ * makes the initiator send a new request before the send completion for that
+ * response has been processed. This could e.g. happen if the call to
+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
+ * if IB retransmission causes generation of the send completion to be
+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
+ * are queued on cmd_wait_list. The code below processes these delayed
+ * requests one at a time.
+ */
+static void srpt_process_send_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ struct ib_wc *wc)
+{
+ struct srpt_send_ioctx *send_ioctx;
+ uint32_t index;
+ enum srpt_opcode opcode;
+
+ index = idx_from_wr_id(wc->wr_id);
+ opcode = opcode_from_wr_id(wc->wr_id);
+ send_ioctx = ch->ioctx_ring[index];
+ if (wc->status == IB_WC_SUCCESS) {
+ if (opcode == SRPT_SEND)
+ srpt_handle_send_comp(ch, send_ioctx);
+ else {
+ WARN_ON(opcode != SRPT_RDMA_ABORT &&
+ wc->opcode != IB_WC_RDMA_READ);
+ srpt_handle_rdma_comp(ch, send_ioctx, opcode);
+ }
+ } else {
+ if (opcode == SRPT_SEND) {
+ printk(KERN_INFO "sending response for idx %u failed"
+ " with status %d\n", index, wc->status);
+ srpt_handle_send_err_comp(ch, wc->wr_id);
+ } else if (opcode != SRPT_RDMA_MID) {
+ printk(KERN_INFO "RDMA t %d for idx %u failed with"
+ " status %d", opcode, index, wc->status);
+ srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
+ }
+ }
+
+ while (unlikely(opcode == SRPT_SEND
+ && !list_empty(&ch->cmd_wait_list)
+ && srpt_get_ch_state(ch) == CH_LIVE
+ && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
+ struct srpt_recv_ioctx *recv_ioctx;
+
+ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+ struct srpt_recv_ioctx,
+ wait_list);
+ list_del(&recv_ioctx->wait_list);
+ srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
+ }
+}
+
+static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
+{
+ struct ib_wc *const wc = ch->wc;
+ int i, n;
+
+ WARN_ON(cq != ch->cq);
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
+ for (i = 0; i < n; i++) {
+ if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
+ srpt_process_rcv_completion(cq, ch, &wc[i]);
+ else
+ srpt_process_send_completion(cq, ch, &wc[i]);
+ }
+ }
+}
+
+/**
+ * srpt_completion() - IB completion queue callback function.
+ *
+ * Notes:
+ * - It is guaranteed that a completion handler will never be invoked
+ * concurrently on two different CPUs for the same completion queue. See also
+ * Documentation/infiniband/core_locking.txt and the implementation of
+ * handle_edge_irq() in kernel/irq/chip.c.
+ * - When threaded IRQs are enabled, completion handlers are invoked in thread
+ * context instead of interrupt context.
+ */
+static void srpt_completion(struct ib_cq *cq, void *ctx)
+{
+ struct srpt_rdma_ch *ch = ctx;
+
+ wake_up_interruptible(&ch->wait_queue);
+}
+
+static int srpt_compl_thread(void *arg)
+{
+ struct srpt_rdma_ch *ch;
+
+ /* Hibernation / freezing of the SRPT kernel thread is not supported. */
+ current->flags |= PF_NOFREEZE;
+
+ ch = arg;
+ BUG_ON(!ch);
+ printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
+ ch->sess_name, ch->thread->comm, current->pid);
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(ch->wait_queue,
+ (srpt_process_completion(ch->cq, ch),
+ kthread_should_stop()));
+ }
+ printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
+ ch->sess_name, ch->thread->comm, current->pid);
+ return 0;
+}
+
+/**
+ * srpt_create_ch_ib() - Create receive and send completion queues.
+ */
+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_init_attr *qp_init;
+ struct srpt_port *sport = ch->sport;
+ struct srpt_device *sdev = sport->sdev;
+ u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int ret;
+
+ WARN_ON(ch->rq_size < 1);
+
+ ret = -ENOMEM;
+ qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+ if (!qp_init)
+ goto out;
+
+ ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+ ch->rq_size + srp_sq_size, 0);
+ if (IS_ERR(ch->cq)) {
+ ret = PTR_ERR(ch->cq);
+ printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+ ch->rq_size + srp_sq_size, ret);
+ goto out;
+ }
+
+ qp_init->qp_context = (void *)ch;
+ qp_init->event_handler
+ = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->send_cq = ch->cq;
+ qp_init->recv_cq = ch->cq;
+ qp_init->srq = sdev->srq;
+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_init->qp_type = IB_QPT_RC;
+ qp_init->cap.max_send_wr = srp_sq_size;
+ qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+
+ ch->qp = ib_create_qp(sdev->pd, qp_init);
+ if (IS_ERR(ch->qp)) {
+ ret = PTR_ERR(ch->qp);
+ printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+
+ atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+ qp_init->cap.max_send_wr, ch->cm_id);
+
+ ret = srpt_init_ch_qp(ch, ch->qp);
+ if (ret)
+ goto err_destroy_qp;
+
+ init_waitqueue_head(&ch->wait_queue);
+
+ pr_debug("creating thread for session %s\n", ch->sess_name);
+
+ ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
+ if (IS_ERR(ch->thread)) {
+ printk(KERN_ERR "failed to create kernel thread %ld\n",
+ PTR_ERR(ch->thread));
+ ch->thread = NULL;
+ goto err_destroy_qp;
+ }
+
+out:
+ kfree(qp_init);
+ return ret;
+
+err_destroy_qp:
+ ib_destroy_qp(ch->qp);
+err_destroy_cq:
+ ib_destroy_cq(ch->cq);
+ goto out;
+}
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
+ if (ch->thread)
+ kthread_stop(ch->thread);
+
+ ib_destroy_qp(ch->qp);
+ ib_destroy_cq(ch->cq);
+}
+
+/**
+ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ *
+ * Reset the QP and make sure all resources associated with the channel will
+ * be deallocated at an appropriate time.
+ *
+ * Note: The caller must hold ch->sport->sdev->spinlock.
+ */
+static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
+ enum rdma_ch_state prev_state;
+ unsigned long flags;
+
+ sdev = ch->sport->sdev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev_state = ch->state;
+ switch (prev_state) {
+ case CH_CONNECTING:
+ case CH_LIVE:
+ ch->state = CH_DISCONNECTING;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ switch (prev_state) {
+ case CH_CONNECTING:
+ ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
+ NULL, 0);
+ /* fall through */
+ case CH_LIVE:
+ if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
+ printk(KERN_ERR "sending CM DREQ failed.\n");
+ break;
+ case CH_DISCONNECTING:
+ break;
+ case CH_DRAINING:
+ case CH_RELEASING:
+ break;
+ }
+}
+
+/**
+ * srpt_close_ch() - Close an RDMA channel.
+ */
+static void srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
+
+ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+}
+
+/**
+ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
+ * @cm_id: Pointer to the CM ID of the channel to be drained.
+ *
+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
+ * waits until all target sessions for the associated IB device have been
+ * unregistered and target session registration involves a call to
+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
+ * this function has finished).
+ */
+static void srpt_drain_channel(struct ib_cm_id *cm_id)
+{
+ struct srpt_device *sdev;
+ struct srpt_rdma_ch *ch;
+ int ret;
+ bool do_reset = false;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ sdev = cm_id->context;
+ BUG_ON(!sdev);
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->cm_id == cm_id) {
+ do_reset = srpt_test_and_set_ch_state(ch,
+ CH_CONNECTING, CH_DRAINING) ||
+ srpt_test_and_set_ch_state(ch,
+ CH_LIVE, CH_DRAINING) ||
+ srpt_test_and_set_ch_state(ch,
+ CH_DISCONNECTING, CH_DRAINING);
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+
+ if (do_reset) {
+ ret = srpt_ch_qp_err(ch);
+ if (ret < 0)
+ printk(KERN_ERR "Setting queue pair in error state"
+ " failed: %d\n", ret);
+ }
+}
+
+/**
+ * srpt_find_channel() - Look up an RDMA channel.
+ * @cm_id: Pointer to the CM ID of the channel to be looked up.
+ *
+ * Return NULL if no matching RDMA channel has been found.
+ */
+static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
+ struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ bool found;
+
+ WARN_ON_ONCE(irqs_disabled());
+ BUG_ON(!sdev);
+
+ found = false;
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->cm_id == cm_id) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+
+ return found ? ch : NULL;
+}
+
+/**
+ * srpt_release_channel() - Release channel resources.
+ *
+ * Schedules the actual release because:
+ * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
+ * trigger a deadlock.
+ * - It is not safe to call TCM transport_* functions from interrupt context.
+ */
+static void srpt_release_channel(struct srpt_rdma_ch *ch)
+{
+ schedule_work(&ch->release_work);
+}
+
+static void srpt_release_channel_work(struct work_struct *w)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+
+ ch = container_of(w, struct srpt_rdma_ch, release_work);
+ pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
+ ch->release_done);
+
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+
+ transport_deregister_session_configfs(ch->sess);
+ transport_deregister_session(ch->sess);
+ ch->sess = NULL;
+
+ srpt_destroy_ch_ib(ch);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_size, DMA_TO_DEVICE);
+
+ spin_lock_irq(&sdev->spinlock);
+ list_del(&ch->list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ ib_destroy_cm_id(ch->cm_id);
+
+ if (ch->release_done)
+ complete(ch->release_done);
+
+ wake_up(&sdev->ch_releaseQ);
+
+ kfree(ch);
+}
+
+static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
+ u8 i_port_id[16])
+{
+ struct srpt_node_acl *nacl;
+
+ list_for_each_entry(nacl, &sport->port_acl_list, list)
+ if (memcmp(nacl->i_port_id, i_port_id,
+ sizeof(nacl->i_port_id)) == 0)
+ return nacl;
+
+ return NULL;
+}
+
+static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
+ u8 i_port_id[16])
+{
+ struct srpt_node_acl *nacl;
+
+ spin_lock_irq(&sport->port_acl_lock);
+ nacl = __srpt_lookup_acl(sport, i_port_id);
+ spin_unlock_irq(&sport->port_acl_lock);
+
+ return nacl;
+}
+
+/**
+ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ *
+ * Ownership of the cm_id is transferred to the target session if this
+ * functions returns zero. Otherwise the caller remains the owner of cm_id.
+ */
+static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ struct srpt_device *sdev = cm_id->context;
+ struct srpt_port *sport = &sdev->port[param->port - 1];
+ struct srp_login_req *req;
+ struct srp_login_rsp *rsp;
+ struct srp_login_rej *rej;
+ struct ib_cm_rep_param *rep_param;
+ struct srpt_rdma_ch *ch, *tmp_ch;
+ struct srpt_node_acl *nacl;
+ u32 it_iu_len;
+ int i;
+ int ret = 0;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ if (WARN_ON(!sdev || !private_data))
+ return -EINVAL;
+
+ req = (struct srp_login_req *)private_data;
+
+ it_iu_len = be32_to_cpu(req->req_it_iu_len);
+
+ printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+ " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+ " (guid=0x%llx:0x%llx)\n",
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+ it_iu_len,
+ param->port,
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+
+ rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
+ rej = kzalloc(sizeof *rej, GFP_KERNEL);
+ rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+
+ if (!rsp || !rej || !rep_param) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ ret = -EINVAL;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+ " length (%d bytes) is out of range (%d .. %d)\n",
+ it_iu_len, 64, srp_max_req_size);
+ goto reject;
+ }
+
+ if (!sport->enabled) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ ret = -EINVAL;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+ " has not yet been enabled\n");
+ goto reject;
+ }
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ spin_lock_irq(&sdev->spinlock);
+
+ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
+ if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
+ && !memcmp(ch->t_port_id, req->target_port_id, 16)
+ && param->port == ch->sport->port
+ && param->listen_id == ch->sport->sdev->cm_id
+ && ch->cm_id) {
+ enum rdma_ch_state ch_state;
+
+ ch_state = srpt_get_ch_state(ch);
+ if (ch_state != CH_CONNECTING
+ && ch_state != CH_LIVE)
+ continue;
+
+ /* found an existing channel */
+ pr_debug("Found existing channel %s"
+ " cm_id= %p state= %d\n",
+ ch->sess_name, ch->cm_id, ch_state);
+
+ __srpt_close_ch(ch);
+
+ rsp->rsp_flags =
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ }
+
+ spin_unlock_irq(&sdev->spinlock);
+
+ } else
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+
+ if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
+ || *(__be64 *)(req->target_port_id + 8) !=
+ cpu_to_be64(srpt_service_guid)) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ ret = -ENOMEM;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+ " has an invalid target port identifier.\n");
+ goto reject;
+ }
+
+ ch = kzalloc(sizeof *ch, GFP_KERNEL);
+ if (!ch) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+ ret = -ENOMEM;
+ goto reject;
+ }
+
+ INIT_WORK(&ch->release_work, srpt_release_channel_work);
+ memcpy(ch->i_port_id, req->initiator_port_id, 16);
+ memcpy(ch->t_port_id, req->target_port_id, 16);
+ ch->sport = &sdev->port[param->port - 1];
+ ch->cm_id = cm_id;
+ /*
+ * Avoid QUEUE_FULL conditions by limiting the number of buffers used
+ * for the SRP protocol to the command queue size.
+ */
+ ch->rq_size = SRPT_RQ_SIZE;
+ spin_lock_init(&ch->spinlock);
+ ch->state = CH_CONNECTING;
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+ ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+ ch->ioctx_ring = (struct srpt_send_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_ring[0]),
+ ch->rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring)
+ goto free_ch;
+
+ INIT_LIST_HEAD(&ch->free_list);
+ for (i = 0; i < ch->rq_size; i++) {
+ ch->ioctx_ring[i]->ch = ch;
+ list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+ }
+
+ ret = srpt_create_ch_ib(ch);
+ if (ret) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+ " a new RDMA channel failed.\n");
+ goto free_ring;
+ }
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+ " RTR failed (error code = %d)\n", ret);
+ goto destroy_ib;
+ }
+ /*
+ * Use the initator port identifier as the session name.
+ */
+ snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)ch->i_port_id),
+ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+
+ pr_debug("registering session %s\n", ch->sess_name);
+
+ nacl = srpt_lookup_acl(sport, ch->i_port_id);
+ if (!nacl) {
+ printk(KERN_INFO "Rejected login because no ACL has been"
+ " configured yet for initiator %s.\n", ch->sess_name);
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto destroy_ib;
+ }
+
+ ch->sess = transport_init_session();
+ if (!ch->sess) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_debug("Failed to create session\n");
+ goto deregister_session;
+ }
+ ch->sess->se_node_acl = &nacl->nacl;
+ transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
+
+ pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
+ ch->sess_name, ch->cm_id);
+
+ /* create srp_login_response */
+ rsp->opcode = SRP_LOGIN_RSP;
+ rsp->tag = req->tag;
+ rsp->max_it_iu_len = req->req_it_iu_len;
+ rsp->max_ti_iu_len = req->req_it_iu_len;
+ ch->max_ti_iu_len = it_iu_len;
+ rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
+ rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
+ atomic_set(&ch->req_lim, ch->rq_size);
+ atomic_set(&ch->req_lim_delta, 0);
+
+ /* create cm reply */
+ rep_param->qp_num = ch->qp->qp_num;
+ rep_param->private_data = (void *)rsp;
+ rep_param->private_data_len = sizeof *rsp;
+ rep_param->rnr_retry_count = 7;
+ rep_param->flow_control = 1;
+ rep_param->failover_accepted = 0;
+ rep_param->srq = 1;
+ rep_param->responder_resources = 4;
+ rep_param->initiator_depth = 4;
+
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ if (ret) {
+ printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+ " (error code = %d)\n", ret);
+ goto release_channel;
+ }
+
+ spin_lock_irq(&sdev->spinlock);
+ list_add_tail(&ch->list, &sdev->rch_list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ goto out;
+
+release_channel:
+ srpt_set_ch_state(ch, CH_RELEASING);
+ transport_deregister_session_configfs(ch->sess);
+
+deregister_session:
+ transport_deregister_session(ch->sess);
+ ch->sess = NULL;
+
+destroy_ib:
+ srpt_destroy_ch_ib(ch);
+
+free_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_size, DMA_TO_DEVICE);
+free_ch:
+ kfree(ch);
+
+reject:
+ rej->opcode = SRP_LOGIN_REJ;
+ rej->tag = req->tag;
+ rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
+
+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ (void *)rej, sizeof *rej);
+
+out:
+ kfree(rep_param);
+ kfree(rsp);
+ kfree(rej);
+
+ return ret;
+}
+
+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ *
+ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
+ * and that the recipient may begin transmitting (RTU = ready to use).
+ */
+static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ int ret;
+
+ ch = srpt_find_channel(cm_id->context, cm_id);
+ BUG_ON(!ch);
+
+ if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
+ struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
+
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+
+ list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
+ wait_list) {
+ list_del(&ioctx->wait_list);
+ srpt_handle_new_iu(ch, ioctx, NULL);
+ }
+ if (ret)
+ srpt_close_ch(ch);
+ }
+}
+
+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
+ */
+static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ unsigned long flags;
+ bool send_drep = false;
+
+ ch = srpt_find_channel(cm_id->context, cm_id);
+ BUG_ON(!ch);
+
+ pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ switch (ch->state) {
+ case CH_CONNECTING:
+ case CH_LIVE:
+ send_drep = true;
+ ch->state = CH_DISCONNECTING;
+ break;
+ case CH_DISCONNECTING:
+ case CH_DRAINING:
+ case CH_RELEASING:
+ WARN(true, "unexpected channel state %d\n", ch->state);
+ break;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (send_drep) {
+ if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
+ printk(KERN_ERR "Sending IB DREP failed.\n");
+ printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
+ ch->sess_name);
+ }
+}
+
+/**
+ * srpt_cm_drep_recv() - Process reception of a DREP message.
+ */
+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
+ cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_handler() - IB connection manager callback function.
+ *
+ * A non-zero return value will cause the caller destroy the CM ID.
+ *
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
+ * ib_destroy_cm_id() call in srpt_release_channel().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
+ int ret;
+
+ ret = 0;
+ switch (event->event) {
+ case IB_CM_REQ_RECEIVED:
+ ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
+ break;
+ case IB_CM_REJ_RECEIVED:
+ srpt_cm_rej_recv(cm_id);
+ break;
+ case IB_CM_RTU_RECEIVED:
+ case IB_CM_USER_ESTABLISHED:
+ srpt_cm_rtu_recv(cm_id);
+ break;
+ case IB_CM_DREQ_RECEIVED:
+ srpt_cm_dreq_recv(cm_id);
+ break;
+ case IB_CM_DREP_RECEIVED:
+ srpt_cm_drep_recv(cm_id);
+ break;
+ case IB_CM_TIMEWAIT_EXIT:
+ srpt_cm_timewait_exit(cm_id);
+ break;
+ case IB_CM_REP_ERROR:
+ srpt_cm_rep_error(cm_id);
+ break;
+ case IB_CM_DREQ_ERROR:
+ printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+ break;
+ case IB_CM_MRA_RECEIVED:
+ printk(KERN_INFO "Received IB MRA event\n");
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * srpt_perform_rdmas() - Perform IB RDMA.
+ *
+ * Returns zero upon success or a negative number upon failure.
+ */
+static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct ib_send_wr wr;
+ struct ib_send_wr *bad_wr;
+ struct rdma_iu *riu;
+ int i;
+ int ret;
+ int sq_wr_avail;
+ enum dma_data_direction dir;
+ const int n_rdma = ioctx->n_rdma;
+
+ dir = ioctx->cmd.data_direction;
+ if (dir == DMA_TO_DEVICE) {
+ /* write */
+ ret = -ENOMEM;
+ sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
+ if (sq_wr_avail < 0) {
+ printk(KERN_WARNING "IB send queue full (needed %d)\n",
+ n_rdma);
+ goto out;
+ }
+ }
+
+ ioctx->rdma_aborted = false;
+ ret = 0;
+ riu = ioctx->rdma_ius;
+ memset(&wr, 0, sizeof wr);
+
+ for (i = 0; i < n_rdma; ++i, ++riu) {
+ if (dir == DMA_FROM_DEVICE) {
+ wr.opcode = IB_WR_RDMA_WRITE;
+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+ SRPT_RDMA_WRITE_LAST :
+ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ } else {
+ wr.opcode = IB_WR_RDMA_READ;
+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+ SRPT_RDMA_READ_LAST :
+ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ }
+ wr.next = NULL;
+ wr.wr.rdma.remote_addr = riu->raddr;
+ wr.wr.rdma.rkey = riu->rkey;
+ wr.num_sge = riu->sge_cnt;
+ wr.sg_list = riu->sge;
+
+ /* only get completion event for the last rdma write */
+ if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+ __func__, __LINE__, ret, i, n_rdma);
+ if (ret && i > 0) {
+ wr.num_sge = 0;
+ wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
+ wr.send_flags = IB_SEND_SIGNALED;
+ while (ch->state == CH_LIVE &&
+ ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
+ printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+ ioctx->ioctx.index);
+ msleep(1000);
+ }
+ while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
+ printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+ ioctx->ioctx.index);
+ msleep(1000);
+ }
+ }
+out:
+ if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
+ atomic_add(n_rdma, &ch->sq_wr_avail);
+ return ret;
+}
+
+/**
+ * srpt_xfer_data() - Start data transfer from initiator to target.
+ */
+static int srpt_xfer_data(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ int ret;
+
+ ret = srpt_map_sg_to_ib_sge(ch, ioctx);
+ if (ret) {
+ printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+ goto out;
+ }
+
+ ret = srpt_perform_rdmas(ch, ioctx);
+ if (ret) {
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
+ __func__, __LINE__, ret);
+ else
+ printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+ __func__, __LINE__, ret);
+ goto out_unmap;
+ }
+
+out:
+ return ret;
+out_unmap:
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ goto out;
+}
+
+static int srpt_write_pending_status(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+}
+
+/*
+ * srpt_write_pending() - Start data transfer from initiator to target (write).
+ */
+static int srpt_write_pending(struct se_cmd *se_cmd)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state new_state;
+ enum rdma_ch_state ch_state;
+ int ret;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+
+ new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
+ WARN_ON(new_state == SRPT_STATE_DONE);
+
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ ch_state = srpt_get_ch_state(ch);
+ switch (ch_state) {
+ case CH_CONNECTING:
+ WARN(true, "unexpected channel state %d\n", ch_state);
+ ret = -EINVAL;
+ goto out;
+ case CH_LIVE:
+ break;
+ case CH_DISCONNECTING:
+ case CH_DRAINING:
+ case CH_RELEASING:
+ pr_debug("cmd with tag %lld: channel disconnecting\n",
+ ioctx->tag);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = srpt_xfer_data(ch, ioctx);
+
+out:
+ return ret;
+}
+
+static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
+{
+ switch (tcm_mgmt_status) {
+ case TMR_FUNCTION_COMPLETE:
+ return SRP_TSK_MGMT_SUCCESS;
+ case TMR_FUNCTION_REJECTED:
+ return SRP_TSK_MGMT_FUNC_NOT_SUPP;
+ }
+ return SRP_TSK_MGMT_FAILED;
+}
+
+/**
+ * srpt_queue_response() - Transmits the response to a SCSI command.
+ *
+ * Callback function called by the TCM core. Must not block since it can be
+ * invoked on the context of the IB completion handler.
+ */
+static int srpt_queue_response(struct se_cmd *cmd)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state state;
+ unsigned long flags;
+ int ret;
+ enum dma_data_direction dir;
+ int resp_len;
+ u8 srp_tm_status;
+
+ ret = 0;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ ioctx->state = SRPT_STATE_CMD_RSP_SENT;
+ break;
+ case SRPT_STATE_MGMT:
+ ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
+ break;
+ default:
+ WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
+ ch, ioctx->ioctx.index, ioctx->state);
+ break;
+ }
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
+ || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
+ atomic_inc(&ch->req_lim_delta);
+ srpt_abort_cmd(ioctx);
+ goto out;
+ }
+
+ dir = ioctx->cmd.data_direction;
+
+ /* For read commands, transfer the data to the initiator. */
+ if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+ !ioctx->queue_status_only) {
+ ret = srpt_xfer_data(ch, ioctx);
+ if (ret) {
+ printk(KERN_ERR "xfer_data failed for tag %llu\n",
+ ioctx->tag);
+ goto out;
+ }
+ }
+
+ if (state != SRPT_STATE_MGMT)
+ resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+ cmd->scsi_status);
+ else {
+ srp_tm_status
+ = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
+ resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
+ ioctx->tag);
+ }
+ ret = srpt_post_send(ch, ioctx, resp_len);
+ if (ret) {
+ printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+ ioctx->tag);
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ }
+
+out:
+ return ret;
+}
+
+static int srpt_queue_status(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ BUG_ON(ioctx->sense_data != cmd->sense_buffer);
+ if (cmd->se_cmd_flags &
+ (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
+ WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
+ ioctx->queue_status_only = true;
+ return srpt_queue_response(cmd);
+}
+
+static void srpt_refresh_port_work(struct work_struct *work)
+{
+ struct srpt_port *sport = container_of(work, struct srpt_port, work);
+
+ srpt_refresh_port(sport);
+}
+
+static int srpt_ch_list_empty(struct srpt_device *sdev)
+{
+ int res;
+
+ spin_lock_irq(&sdev->spinlock);
+ res = list_empty(&sdev->rch_list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ return res;
+}
+
+/**
+ * srpt_release_sdev() - Free the channel resources associated with a target.
+ */
+static int srpt_release_sdev(struct srpt_device *sdev)
+{
+ struct srpt_rdma_ch *ch, *tmp_ch;
+ int res;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ BUG_ON(!sdev);
+
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+
+ res = wait_event_interruptible(sdev->ch_releaseQ,
+ srpt_ch_list_empty(sdev));
+ if (res)
+ printk(KERN_ERR "%s: interrupted.\n", __func__);
+
+ return 0;
+}
+
+static struct srpt_port *__srpt_lookup_port(const char *name)
+{
+ struct ib_device *dev;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ int i;
+
+ list_for_each_entry(sdev, &srpt_dev_list, list) {
+ dev = sdev->device;
+ if (!dev)
+ continue;
+
+ for (i = 0; i < dev->phys_port_cnt; i++) {
+ sport = &sdev->port[i];
+
+ if (!strcmp(sport->port_guid, name))
+ return sport;
+ }
+ }
+
+ return NULL;
+}
+
+static struct srpt_port *srpt_lookup_port(const char *name)
+{
+ struct srpt_port *sport;
+
+ spin_lock(&srpt_dev_lock);
+ sport = __srpt_lookup_port(name);
+ spin_unlock(&srpt_dev_lock);
+
+ return sport;
+}
+
+/**
+ * srpt_add_one() - Infiniband device addition callback function.
+ */
+static void srpt_add_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ struct ib_srq_init_attr srq_attr;
+ int i;
+
+ pr_debug("device = %p, device->dma_ops = %p\n", device,
+ device->dma_ops);
+
+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ if (!sdev)
+ goto err;
+
+ sdev->device = device;
+ INIT_LIST_HEAD(&sdev->rch_list);
+ init_waitqueue_head(&sdev->ch_releaseQ);
+ spin_lock_init(&sdev->spinlock);
+
+ if (ib_query_device(device, &sdev->dev_attr))
+ goto free_dev;
+
+ sdev->pd = ib_alloc_pd(device);
+ if (IS_ERR(sdev->pd))
+ goto free_dev;
+
+ sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(sdev->mr))
+ goto err_pd;
+
+ sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
+
+ srq_attr.event_handler = srpt_srq_event;
+ srq_attr.srq_context = (void *)sdev;
+ srq_attr.attr.max_wr = sdev->srq_size;
+ srq_attr.attr.max_sge = 1;
+ srq_attr.attr.srq_limit = 0;
+
+ sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
+ if (IS_ERR(sdev->srq))
+ goto err_mr;
+
+ pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
+ __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
+ device->name);
+
+ if (!srpt_service_guid)
+ srpt_service_guid = be64_to_cpu(device->node_guid);
+
+ sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+ if (IS_ERR(sdev->cm_id))
+ goto err_srq;
+
+ /* print out target login information */
+ pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
+ "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
+ srpt_service_guid, srpt_service_guid);
+
+ /*
+ * We do not have a consistent service_id (ie. also id_ext of target_id)
+ * to identify this target. We currently use the guid of the first HCA
+ * in the system as service_id; therefore, the target_id will change
+ * if this HCA is gone bad and replaced by different HCA
+ */
+ if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
+ goto err_cm;
+
+ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+ srpt_event_handler);
+ if (ib_register_event_handler(&sdev->event_handler))
+ goto err_cm;
+
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ srp_max_req_size, DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring)
+ goto err_event;
+
+ for (i = 0; i < sdev->srq_size; ++i)
+ srpt_post_recv(sdev, sdev->ioctx_ring[i]);
+
+ WARN_ON(sdev->device->phys_port_cnt
+ > sizeof(sdev->port)/sizeof(sdev->port[0]));
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ sport->sdev = sdev;
+ sport->port = i;
+ sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
+ sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
+ sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ INIT_WORK(&sport->work, srpt_refresh_port_work);
+ INIT_LIST_HEAD(&sport->port_acl_list);
+ spin_lock_init(&sport->port_acl_lock);
+
+ if (srpt_refresh_port(sport)) {
+ printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+ srpt_sdev_name(sdev), i);
+ goto err_ring;
+ }
+ snprintf(sport->port_guid, sizeof(sport->port_guid),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+ }
+
+ spin_lock(&srpt_dev_lock);
+ list_add_tail(&sdev->list, &srpt_dev_list);
+ spin_unlock(&srpt_dev_lock);
+
+out:
+ ib_set_client_data(device, &srpt_client, sdev);
+ pr_debug("added %s.\n", device->name);
+ return;
+
+err_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size,
+ DMA_FROM_DEVICE);
+err_event:
+ ib_unregister_event_handler(&sdev->event_handler);
+err_cm:
+ ib_destroy_cm_id(sdev->cm_id);
+err_srq:
+ ib_destroy_srq(sdev->srq);
+err_mr:
+ ib_dereg_mr(sdev->mr);
+err_pd:
+ ib_dealloc_pd(sdev->pd);
+free_dev:
+ kfree(sdev);
+err:
+ sdev = NULL;
+ printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+ goto out;
+}
+
+/**
+ * srpt_remove_one() - InfiniBand device removal callback function.
+ */
+static void srpt_remove_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ int i;
+
+ sdev = ib_get_client_data(device, &srpt_client);
+ if (!sdev) {
+ printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
+ device->name);
+ return;
+ }
+
+ srpt_unregister_mad_agent(sdev);
+
+ ib_unregister_event_handler(&sdev->event_handler);
+
+ /* Cancel any work queued by the just unregistered IB event handler. */
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ cancel_work_sync(&sdev->port[i].work);
+
+ ib_destroy_cm_id(sdev->cm_id);
+
+ /*
+ * Unregistering a target must happen after destroying sdev->cm_id
+ * such that no new SRP_LOGIN_REQ information units can arrive while
+ * destroying the target.
+ */
+ spin_lock(&srpt_dev_lock);
+ list_del(&sdev->list);
+ spin_unlock(&srpt_dev_lock);
+ srpt_release_sdev(sdev);
+
+ ib_destroy_srq(sdev->srq);
+ ib_dereg_mr(sdev->mr);
+ ib_dealloc_pd(sdev->pd);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+ sdev->ioctx_ring = NULL;
+ kfree(sdev);
+}
+
+static struct ib_client srpt_client = {
+ .name = DRV_NAME,
+ .add = srpt_add_one,
+ .remove = srpt_remove_one
+};
+
+static int srpt_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int srpt_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *srpt_get_fabric_name(void)
+{
+ return "srpt";
+}
+
+static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ return SCSI_TRANSPORTID_PROTOCOLID_SRP;
+}
+
+static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
+{
+ struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+
+ return sport->port_guid;
+}
+
+static u16 srpt_get_tag(struct se_portal_group *tpg)
+{
+ return 1;
+}
+
+static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code, unsigned char *buf)
+{
+ struct srpt_node_acl *nacl;
+ struct spc_rdma_transport_id *tr_id;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ tr_id = (void *)buf;
+ tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
+ memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
+ return sizeof(*tr_id);
+}
+
+static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ return sizeof(struct spc_rdma_transport_id);
+}
+
+static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+ const char *buf, u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct spc_rdma_transport_id *tr_id;
+
+ *port_nexus_ptr = NULL;
+ *out_tid_len = sizeof(struct spc_rdma_transport_id);
+ tr_id = (void *)buf;
+ return (char *)tr_id->i_port_id;
+}
+
+static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ struct srpt_node_acl *nacl;
+
+ nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
+ if (!nacl) {
+ printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
+ return NULL;
+ }
+
+ return &nacl->nacl;
+}
+
+static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct srpt_node_acl *nacl;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ kfree(nacl);
+}
+
+static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void srpt_release_cmd(struct se_cmd *se_cmd)
+{
+}
+
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+ return true;
+}
+
+/**
+ * srpt_close_session() - Forcibly close a session.
+ *
+ * Callback function invoked by the TCM core to clean up sessions associated
+ * with a node ACL when the user invokes
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_close_session(struct se_session *se_sess)
+{
+ DECLARE_COMPLETION_ONSTACK(release_done);
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+ int res;
+
+ ch = se_sess->fabric_sess_ptr;
+ WARN_ON(ch->sess != se_sess);
+
+ pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+
+ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
+ BUG_ON(ch->release_done);
+ ch->release_done = &release_done;
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+
+ res = wait_for_completion_timeout(&release_done, 60 * HZ);
+ WARN_ON(res <= 0);
+}
+
+/**
+ * To do: Find out whether stop_session() has a meaning for transports
+ * other than iSCSI.
+ */
+static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
+ int conn_sleep)
+{
+}
+
+static void srpt_reset_nexus(struct se_session *sess)
+{
+ printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
+}
+
+static int srpt_sess_logged_in(struct se_session *se_sess)
+{
+ return true;
+}
+
+/**
+ * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ *
+ * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
+ * This object represents an arbitrary integer used to uniquely identify a
+ * particular attached remote initiator port to a particular SCSI target port
+ * within a particular SCSI target device within a particular SCSI instance.
+ */
+static u32 srpt_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return ioctx->tag;
+}
+
+/* Note: only used from inside debug printk's by the TCM core. */
+static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return srpt_get_cmd_state(ioctx);
+}
+
+static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
+{
+ return 0;
+}
+
+static u16 srpt_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static int srpt_is_state_remove(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/**
+ * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * @name: ASCII representation of a 128-bit initiator port ID.
+ * @i_port_id: Binary 128-bit port ID.
+ */
+static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+{
+ const char *p;
+ unsigned len, count, leading_zero_bytes;
+ int ret, rc;
+
+ p = name;
+ if (strnicmp(p, "0x", 2) == 0)
+ p += 2;
+ ret = -EINVAL;
+ len = strlen(p);
+ if (len % 2)
+ goto out;
+ count = min(len / 2, 16U);
+ leading_zero_bytes = 16 - count;
+ memset(i_port_id, 0, leading_zero_bytes);
+ rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (rc < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * configfs callback function invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct srpt_node_acl *nacl;
+ int ret = 0;
+ u32 nexus_depth = 1;
+ u8 i_port_id[16];
+
+ if (srpt_parse_i_port_id(i_port_id, name) < 0) {
+ printk(KERN_ERR "invalid initiator port ID %s\n", name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ se_nacl_new = srpt_alloc_fabric_acl(tpg);
+ if (!se_nacl_new) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ /*
+ * nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a node ACL from demo mode to explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
+ nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ ret = PTR_ERR(se_nacl);
+ goto err;
+ }
+ /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
+ nacl->sport = sport;
+
+ spin_lock_irq(&sport->port_acl_lock);
+ list_add_tail(&nacl->list, &sport->port_acl_list);
+ spin_unlock_irq(&sport->port_acl_lock);
+
+ return se_nacl;
+err:
+ return ERR_PTR(ret);
+}
+
+/*
+ * configfs callback function invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+{
+ struct srpt_node_acl *nacl;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ sport = nacl->sport;
+ sdev = sport->sdev;
+ spin_lock_irq(&sport->port_acl_lock);
+ list_del(&nacl->list);
+ spin_unlock_irq(&sport->port_acl_lock);
+ core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
+ srpt_release_fabric_acl(NULL, se_nacl);
+}
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RDMA_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
+ MAX_SRPT_RDMA_SIZE);
+ return -EINVAL;
+ }
+ if (val < DEFAULT_MAX_RDMA_SIZE) {
+ pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
+ val, DEFAULT_MAX_RDMA_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rdma_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RSP_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
+ MAX_SRPT_RSP_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_MAX_RSP_SIZE) {
+ pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
+ MIN_MAX_RSP_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rsp_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_sq_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_sq_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
+ MAX_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
+ MIN_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_sq_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
+ &srpt_tpg_attrib_srp_max_rdma_size.attr,
+ &srpt_tpg_attrib_srp_max_rsp_size.attr,
+ &srpt_tpg_attrib_srp_sq_size.attr,
+ NULL,
+};
+
+static ssize_t srpt_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
+}
+
+static ssize_t srpt_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+ return -EINVAL;
+ }
+ if (tmp == 1)
+ sport->enabled = true;
+ else
+ sport->enabled = false;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrs[] = {
+ &srpt_tpg_enable.attr,
+ NULL,
+};
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+ int res;
+
+ /* Initialize sport->port_wwn and sport->port_tpg_1 */
+ res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+ &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+ if (res)
+ return ERR_PTR(res);
+
+ return &sport->port_tpg_1;
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static void srpt_drop_tpg(struct se_portal_group *tpg)
+{
+ struct srpt_port *sport = container_of(tpg,
+ struct srpt_port, port_tpg_1);
+
+ sport->enabled = false;
+ core_tpg_deregister(&sport->port_tpg_1);
+}
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port
+ */
+static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport;
+ int ret;
+
+ sport = srpt_lookup_port(name);
+ pr_debug("make_tport(%s)\n", name);
+ ret = -EINVAL;
+ if (!sport)
+ goto err;
+
+ return &sport->port_wwn;
+
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port
+ */
+static void srpt_drop_tport(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+
+ pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
+}
+
+static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+TF_WWN_ATTR_RO(srpt, version);
+
+static struct configfs_attribute *srpt_wwn_attrs[] = {
+ &srpt_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops srpt_template = {
+ .get_fabric_name = srpt_get_fabric_name,
+ .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
+ .tpg_get_wwn = srpt_get_fabric_wwn,
+ .tpg_get_tag = srpt_get_tag,
+ .tpg_get_default_depth = srpt_get_default_depth,
+ .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = srpt_check_false,
+ .tpg_check_demo_mode_cache = srpt_check_true,
+ .tpg_check_demo_mode_write_protect = srpt_check_true,
+ .tpg_check_prod_mode_write_protect = srpt_check_false,
+ .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
+ .tpg_release_fabric_acl = srpt_release_fabric_acl,
+ .tpg_get_inst_index = srpt_tpg_get_inst_index,
+ .release_cmd = srpt_release_cmd,
+ .check_stop_free = srpt_check_stop_free,
+ .shutdown_session = srpt_shutdown_session,
+ .close_session = srpt_close_session,
+ .stop_session = srpt_stop_session,
+ .fall_back_to_erl0 = srpt_reset_nexus,
+ .sess_logged_in = srpt_sess_logged_in,
+ .sess_get_index = srpt_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = srpt_write_pending,
+ .write_pending_status = srpt_write_pending_status,
+ .set_default_node_attributes = srpt_set_default_node_attrs,
+ .get_task_tag = srpt_get_task_tag,
+ .get_cmd_state = srpt_get_tcm_cmd_state,
+ .queue_data_in = srpt_queue_response,
+ .queue_status = srpt_queue_status,
+ .queue_tm_rsp = srpt_queue_response,
+ .get_fabric_sense_len = srpt_get_fabric_sense_len,
+ .set_fabric_sense_len = srpt_set_fabric_sense_len,
+ .is_state_remove = srpt_is_state_remove,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = srpt_make_tport,
+ .fabric_drop_wwn = srpt_drop_tport,
+ .fabric_make_tpg = srpt_make_tpg,
+ .fabric_drop_tpg = srpt_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = srpt_make_nodeacl,
+ .fabric_drop_nodeacl = srpt_drop_nodeacl,
+};
+
+/**
+ * srpt_init_module() - Kernel module initialization.
+ *
+ * Note: Since ib_register_client() registers callback functions, and since at
+ * least one of these callback functions (srpt_add_one()) calls target core
+ * functions, this driver must be registered with the target core before
+ * ib_register_client() is called.
+ */
+static int __init srpt_init_module(void)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
+ printk(KERN_ERR "invalid value %d for kernel module parameter"
+ " srp_max_req_size -- must be at least %d.\n",
+ srp_max_req_size, MIN_MAX_REQ_SIZE);
+ goto out;
+ }
+
+ if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
+ || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
+ printk(KERN_ERR "invalid value %d for kernel module parameter"
+ " srpt_srq_size -- must be in the range [%d..%d].\n",
+ srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
+ goto out;
+ }
+
+ spin_lock_init(&srpt_dev_lock);
+ INIT_LIST_HEAD(&srpt_dev_list);
+
+ ret = -ENODEV;
+ srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
+ if (!srpt_target) {
+ printk(KERN_ERR "couldn't register\n");
+ goto out;
+ }
+
+ srpt_target->tf_ops = srpt_template;
+
+ /* Enable SG chaining */
+ srpt_target->tf_ops.task_sg_chaining = true;
+
+ /*
+ * Set up default attribute lists.
+ */
+ srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+ ret = target_fabric_configfs_register(srpt_target);
+ if (ret < 0) {
+ printk(KERN_ERR "couldn't register\n");
+ goto out_free_target;
+ }
+
+ ret = ib_register_client(&srpt_client);
+ if (ret) {
+ printk(KERN_ERR "couldn't register IB client\n");
+ goto out_unregister_target;
+ }
+
+ return 0;
+
+out_unregister_target:
+ target_fabric_configfs_deregister(srpt_target);
+ srpt_target = NULL;
+out_free_target:
+ if (srpt_target)
+ target_fabric_configfs_free(srpt_target);
+out:
+ return ret;
+}
+
+static void __exit srpt_cleanup_module(void)
+{
+ ib_unregister_client(&srpt_client);
+ target_fabric_configfs_deregister(srpt_target);
+ srpt_target = NULL;
+}
+
+module_init(srpt_init_module);
+module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 000000000000..b4b4bbcd7f16
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_SRPT_H
+#define IB_SRPT_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+
+#include <scsi/srp.h>
+
+#include "ib_dm_mad.h"
+
+/*
+ * The prefix the ServiceName field must start with in the device management
+ * ServiceEntries attribute pair. See also the SRP specification.
+ */
+#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+
+enum {
+ /*
+ * SRP IOControllerProfile attributes for SRP target ports that have
+ * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
+ * in the SRP specification.
+ */
+ SRP_PROTOCOL = 0x0108,
+ SRP_PROTOCOL_VERSION = 0x0001,
+ SRP_IO_SUBCLASS = 0x609e,
+ SRP_SEND_TO_IOC = 0x01,
+ SRP_SEND_FROM_IOC = 0x02,
+ SRP_RDMA_READ_FROM_IOC = 0x08,
+ SRP_RDMA_WRITE_FROM_IOC = 0x20,
+
+ /*
+ * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
+ * specification.
+ */
+ SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
+ SRP_LOSOLNT = 0x10, /* logout solicited notification */
+ SRP_CRSOLNT = 0x20, /* credit request solicited notification */
+ SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
+
+ /*
+ * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
+ * 18 and 20 in the SRP specification.
+ */
+ SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
+ SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
+
+ /*
+ * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
+ * 16 and 22 in the SRP specification.
+ */
+ SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
+
+ /* See also table 24 in the SRP specification. */
+ SRP_TSK_MGMT_SUCCESS = 0x00,
+ SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
+ SRP_TSK_MGMT_FAILED = 0x05,
+
+ /* See also table 21 in the SRP specification. */
+ SRP_CMD_SIMPLE_Q = 0x0,
+ SRP_CMD_HEAD_OF_Q = 0x1,
+ SRP_CMD_ORDERED_Q = 0x2,
+ SRP_CMD_ACA = 0x4,
+
+ SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
+ SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+
+ SRPT_DEF_SG_TABLESIZE = 128,
+ SRPT_DEF_SG_PER_WQE = 16,
+
+ MIN_SRPT_SQ_SIZE = 16,
+ DEF_SRPT_SQ_SIZE = 4096,
+ SRPT_RQ_SIZE = 128,
+ MIN_SRPT_SRQ_SIZE = 4,
+ DEFAULT_SRPT_SRQ_SIZE = 4095,
+ MAX_SRPT_SRQ_SIZE = 65535,
+ MAX_SRPT_RDMA_SIZE = 1U << 24,
+ MAX_SRPT_RSP_SIZE = 1024,
+
+ MIN_MAX_REQ_SIZE = 996,
+ DEFAULT_MAX_REQ_SIZE
+ = sizeof(struct srp_cmd)/*48*/
+ + sizeof(struct srp_indirect_buf)/*20*/
+ + 128 * sizeof(struct srp_direct_buf)/*16*/,
+
+ MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
+ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+ DEFAULT_MAX_RDMA_SIZE = 65536,
+};
+
+enum srpt_opcode {
+ SRPT_RECV,
+ SRPT_SEND,
+ SRPT_RDMA_MID,
+ SRPT_RDMA_ABORT,
+ SRPT_RDMA_READ_LAST,
+ SRPT_RDMA_WRITE_LAST,
+};
+
+static inline u64 encode_wr_id(u8 opcode, u32 idx)
+{
+ return ((u64)opcode << 32) | idx;
+}
+static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
+{
+ return wr_id >> 32;
+}
+static inline u32 idx_from_wr_id(u64 wr_id)
+{
+ return (u32)wr_id;
+}
+
+struct rdma_iu {
+ u64 raddr;
+ u32 rkey;
+ struct ib_sge *sge;
+ u32 sge_cnt;
+ int mem_id;
+};
+
+/**
+ * enum srpt_command_state - SCSI command state managed by SRPT.
+ * @SRPT_STATE_NEW: New command arrived and is being processed.
+ * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
+ * for data arrival.
+ * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
+ * being processed.
+ * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
+ * @SRPT_STATE_MGMT: Processing a SCSI task management command.
+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
+ * @SRPT_STATE_DONE: Command processing finished successfully, command
+ * processing has been aborted or command processing
+ * failed.
+ */
+enum srpt_command_state {
+ SRPT_STATE_NEW = 0,
+ SRPT_STATE_NEED_DATA = 1,
+ SRPT_STATE_DATA_IN = 2,
+ SRPT_STATE_CMD_RSP_SENT = 3,
+ SRPT_STATE_MGMT = 4,
+ SRPT_STATE_MGMT_RSP_SENT = 5,
+ SRPT_STATE_DONE = 6,
+};
+
+/**
+ * struct srpt_ioctx - Shared SRPT I/O context information.
+ * @buf: Pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @index: Index of the I/O context in its ioctx_ring array.
+ */
+struct srpt_ioctx {
+ void *buf;
+ dma_addr_t dma;
+ uint32_t index;
+};
+
+/**
+ * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * @ioctx: See above.
+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
+ */
+struct srpt_recv_ioctx {
+ struct srpt_ioctx ioctx;
+ struct list_head wait_list;
+};
+
+/**
+ * struct srpt_send_ioctx - SRPT send I/O context.
+ * @ioctx: See above.
+ * @ch: Channel pointer.
+ * @free_list: Node in srpt_rdma_ch.free_list.
+ * @n_rbuf: Number of data buffers in the received SRP command.
+ * @rbufs: Pointer to SRP data buffer array.
+ * @single_rbuf: SRP data buffer if the command has only a single buffer.
+ * @sg: Pointer to sg-list associated with this I/O context.
+ * @sg_cnt: SG-list size.
+ * @mapped_sg_count: ib_dma_map_sg() return value.
+ * @n_rdma_ius: Number of elements in the rdma_ius array.
+ * @rdma_ius: Array with information about the RDMA mapping.
+ * @tag: Tag of the received SRP information unit.
+ * @spinlock: Protects 'state'.
+ * @state: I/O context state.
+ * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
+ * the already initiated transfers have finished.
+ * @cmd: Target core command data structure.
+ * @sense_data: SCSI sense data.
+ */
+struct srpt_send_ioctx {
+ struct srpt_ioctx ioctx;
+ struct srpt_rdma_ch *ch;
+ struct kref kref;
+ struct rdma_iu *rdma_ius;
+ struct srp_direct_buf *rbufs;
+ struct srp_direct_buf single_rbuf;
+ struct scatterlist *sg;
+ struct list_head free_list;
+ spinlock_t spinlock;
+ enum srpt_command_state state;
+ bool rdma_aborted;
+ struct se_cmd cmd;
+ struct completion tx_done;
+ u64 tag;
+ int sg_cnt;
+ int mapped_sg_count;
+ u16 n_rdma_ius;
+ u8 n_rdma;
+ u8 n_rbuf;
+ bool queue_status_only;
+ u8 sense_data[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * enum rdma_ch_state - SRP channel state.
+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
+ * @CH_LIVE: QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
+ * or DREQ has been send and waiting for DREP
+ * or .
+ * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
+ * @CH_RELEASING: Last WQE event has been received; releasing resources.
+ */
+enum rdma_ch_state {
+ CH_CONNECTING,
+ CH_LIVE,
+ CH_DISCONNECTING,
+ CH_DRAINING,
+ CH_RELEASING
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel.
+ * @wait_queue: Allows the kernel thread to wait for more work.
+ * @thread: Kernel thread that processes the IB queues associated with
+ * the channel.
+ * @cm_id: IB CM ID associated with the channel.
+ * @qp: IB queue pair used for communicating over this channel.
+ * @cq: IB completion queue for this channel.
+ * @rq_size: IB receive queue size.
+ * @rsp_size IB response message size in bytes.
+ * @sq_wr_avail: number of work requests available in the send queue.
+ * @sport: pointer to the information of the HCA port used by this
+ * channel.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
+ * @req_lim: request limit: maximum number of requests that may be sent
+ * by the initiator without having received a response.
+ * @req_lim_delta: Number of credits not yet sent back to the initiator.
+ * @spinlock: Protects free_list and state.
+ * @free_list: Head of list with free send I/O contexts.
+ * @state: channel state. See also enum rdma_ch_state.
+ * @ioctx_ring: Send ring.
+ * @wc: IB work completion array for srpt_process_completion().
+ * @list: Node for insertion in the srpt_device.rch_list list.
+ * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
+ * list contains struct srpt_ioctx elements and is protected
+ * against concurrent modification by the cm_id spinlock.
+ * @sess: Session information associated with this SRP channel.
+ * @sess_name: Session name.
+ * @release_work: Allows scheduling of srpt_release_channel().
+ * @release_done: Enables waiting for srpt_release_channel() completion.
+ */
+struct srpt_rdma_ch {
+ wait_queue_head_t wait_queue;
+ struct task_struct *thread;
+ struct ib_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ int rq_size;
+ u32 rsp_size;
+ atomic_t sq_wr_avail;
+ struct srpt_port *sport;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+ int max_ti_iu_len;
+ atomic_t req_lim;
+ atomic_t req_lim_delta;
+ spinlock_t spinlock;
+ struct list_head free_list;
+ enum rdma_ch_state state;
+ struct srpt_send_ioctx **ioctx_ring;
+ struct ib_wc wc[16];
+ struct list_head list;
+ struct list_head cmd_wait_list;
+ struct se_session *sess;
+ u8 sess_name[36];
+ struct work_struct release_work;
+ struct completion *release_done;
+};
+
+/**
+ * struct srpt_port_attib - Attributes for SRPT port
+ * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
+ * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
+ * @srp_sq_size: Shared receive queue (SRQ) size.
+ */
+struct srpt_port_attrib {
+ u32 srp_max_rdma_size;
+ u32 srp_max_rsp_size;
+ u32 srp_sq_size;
+};
+
+/**
+ * struct srpt_port - Information associated by SRPT with a single IB port.
+ * @sdev: backpointer to the HCA information.
+ * @mad_agent: per-port management datagram processing information.
+ * @enabled: Whether or not this target port is enabled.
+ * @port_guid: ASCII representation of Port GUID
+ * @port: one-based port number.
+ * @sm_lid: cached value of the port's sm_lid.
+ * @lid: cached value of the port's lid.
+ * @gid: cached value of the port's gid.
+ * @port_acl_lock spinlock for port_acl_list:
+ * @work: work structure for refreshing the aforementioned cached values.
+ * @port_tpg_1 Target portal group = 1 data.
+ * @port_wwn: Target core WWN data.
+ * @port_acl_list: Head of the list with all node ACLs for this port.
+ */
+struct srpt_port {
+ struct srpt_device *sdev;
+ struct ib_mad_agent *mad_agent;
+ bool enabled;
+ u8 port_guid[64];
+ u8 port;
+ u16 sm_lid;
+ u16 lid;
+ union ib_gid gid;
+ spinlock_t port_acl_lock;
+ struct work_struct work;
+ struct se_portal_group port_tpg_1;
+ struct se_wwn port_wwn;
+ struct list_head port_acl_list;
+ struct srpt_port_attrib port_attrib;
+};
+
+/**
+ * struct srpt_device - Information associated by SRPT with a single HCA.
+ * @device: Backpointer to the struct ib_device managed by the IB core.
+ * @pd: IB protection domain.
+ * @mr: L_Key (local key) with write access to all local memory.
+ * @srq: Per-HCA SRQ (shared receive queue).
+ * @cm_id: Connection identifier.
+ * @dev_attr: Attributes of the InfiniBand device as obtained during the
+ * ib_client.add() callback.
+ * @srq_size: SRQ size.
+ * @ioctx_ring: Per-HCA SRQ.
+ * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
+ * @ch_releaseQ: Enables waiting for removal from rch_list.
+ * @spinlock: Protects rch_list and tpg.
+ * @port: Information about the ports owned by this HCA.
+ * @event_handler: Per-HCA asynchronous IB event handler.
+ * @list: Node in srpt_dev_list.
+ */
+struct srpt_device {
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct ib_srq *srq;
+ struct ib_cm_id *cm_id;
+ struct ib_device_attr dev_attr;
+ int srq_size;
+ struct srpt_recv_ioctx **ioctx_ring;
+ struct list_head rch_list;
+ wait_queue_head_t ch_releaseQ;
+ spinlock_t spinlock;
+ struct srpt_port port[2];
+ struct ib_event_handler event_handler;
+ struct list_head list;
+};
+
+/**
+ * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @i_port_id: 128-bit SRP initiator port ID.
+ * @sport: port information.
+ * @nacl: Target core node ACL information.
+ * @list: Element of the per-HCA ACL list.
+ */
+struct srpt_node_acl {
+ u8 i_port_id[16];
+ struct srpt_port *sport;
+ struct se_node_acl nacl;
+ struct list_head list;
+};
+
+/*
+ * SRP-releated SCSI persistent reservation definitions.
+ *
+ * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
+ * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
+ * SCSI over an RDMA interface).
+ */
+
+enum {
+ SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
+};
+
+struct spc_rdma_transport_id {
+ uint8_t protocol_identifier;
+ uint8_t reserved[7];
+ uint8_t i_port_id[16];
+};
+
+#endif /* IB_SRPT_H */
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4cf25347b015..76457d50bc34 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -369,7 +369,7 @@ static int evdev_fetch_next_event(struct evdev_client *client,
spin_lock_irq(&client->buffer_lock);
- have_event = client->head != client->tail;
+ have_event = client->packet_head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
@@ -391,14 +391,13 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
if (count < input_event_size())
return -EINVAL;
- if (client->packet_head == client->tail && evdev->exist &&
- (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
-
- retval = wait_event_interruptible(evdev->wait,
- client->packet_head != client->tail || !evdev->exist);
- if (retval)
- return retval;
+ if (!(file->f_flags & O_NONBLOCK)) {
+ retval = wait_event_interruptible(evdev->wait,
+ client->packet_head != client->tail ||
+ !evdev->exist);
+ if (retval)
+ return retval;
+ }
if (!evdev->exist)
return -ENODEV;
@@ -412,6 +411,9 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
retval += input_event_size();
}
+ if (retval == 0 && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
return retval;
}
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 7dfe1009fae0..7f161d93203c 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -84,10 +84,12 @@ static ssize_t input_polldev_set_poll(struct device *dev,
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
struct input_dev *input = polldev->input;
- unsigned long interval;
+ unsigned int interval;
+ int err;
- if (strict_strtoul(buf, 0, &interval))
- return -EINVAL;
+ err = kstrtouint(buf, 0, &interval);
+ if (err)
+ return err;
if (interval < polldev->poll_interval_min)
return -EINVAL;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index da38d97a51b1..1f78c957a75a 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1624,7 +1624,7 @@ static struct device_type input_dev_type = {
#endif
};
-static char *input_devnode(struct device *dev, mode_t *mode)
+static char *input_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 32bbd4c77b7c..fd7a0d5bc94d 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -98,15 +98,15 @@
#define XTYPE_XBOX360W 2
#define XTYPE_UNKNOWN 3
-static int dpad_to_buttons;
+static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
-static int triggers_to_buttons;
+static bool triggers_to_buttons;
module_param(triggers_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(triggers_to_buttons, "Map triggers to buttons rather than axes for unknown pads");
-static int sticks_to_null;
+static bool sticks_to_null;
module_param(sticks_to_null, bool, S_IRUGO);
MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 615c21f2a553..cdc385b2cf7d 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -221,6 +221,22 @@ config KEYBOARD_TCA6416
To compile this driver as a module, choose M here: the
module will be called tca6416_keypad.
+config KEYBOARD_TCA8418
+ tristate "TCA8418 Keypad Support"
+ depends on I2C
+ help
+ This driver implements basic keypad functionality
+ for keys connected through TCA8418 keypad decoder.
+
+ Say Y here if your device has keys connected to
+ TCA8418 keypad decoder.
+
+ If enabled the complete TCA8418 device will be managed through
+ this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tca8418_keypad.
+
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
@@ -425,9 +441,10 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
- depends on SAMSUNG_DEV_KEYPAD
+ depends on HAVE_CLK
help
- Say Y here if you want to use the Samsung keypad.
+ Say Y here if you want to use the keypad on your Samsung mobile
+ device.
To compile this driver as a module, choose M here: the
module will be called samsung-keypad.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index ddde0fd476f7..df7061f12918 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
+obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index 3db8006dac3a..e9e8674dfda1 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -202,18 +202,7 @@ static struct platform_driver adp5520_keys_driver = {
.probe = adp5520_keys_probe,
.remove = __devexit_p(adp5520_keys_remove),
};
-
-static int __init adp5520_keys_init(void)
-{
- return platform_driver_register(&adp5520_keys_driver);
-}
-module_init(adp5520_keys_init);
-
-static void __exit adp5520_keys_exit(void)
-{
- platform_driver_unregister(&adp5520_keys_driver);
-}
-module_exit(adp5520_keys_exit);
+module_platform_driver(adp5520_keys_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Keys ADP5520 Driver");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 19cfc0cf558c..e05a2e7073c6 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1305,7 +1305,7 @@ static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_extra;
unsigned char old_set;
@@ -1313,7 +1313,11 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->extra != value) {
@@ -1389,11 +1393,15 @@ static ssize_t atkbd_show_scroll(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_scroll(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_scroll;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->scroll != value) {
@@ -1433,7 +1441,7 @@ static ssize_t atkbd_show_set(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
unsigned char old_set;
bool old_extra;
@@ -1441,7 +1449,11 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || (value != 2 && value != 3))
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value != 2 && value != 3)
return -EINVAL;
if (atkbd->set != value) {
@@ -1484,14 +1496,18 @@ static ssize_t atkbd_show_softrepeat(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_softrepeat, old_softraw;
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->softrepeat != value) {
@@ -1534,11 +1550,15 @@ static ssize_t atkbd_show_softraw(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_softraw(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_softraw;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->softraw != value) {
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 7d989603f875..8eb9116e0a5f 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -384,7 +384,7 @@ static int bfin_kpad_resume(struct platform_device *pdev)
# define bfin_kpad_resume NULL
#endif
-struct platform_driver bfin_kpad_device_driver = {
+static struct platform_driver bfin_kpad_device_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -394,19 +394,7 @@ struct platform_driver bfin_kpad_device_driver = {
.suspend = bfin_kpad_suspend,
.resume = bfin_kpad_resume,
};
-
-static int __init bfin_kpad_init(void)
-{
- return platform_driver_register(&bfin_kpad_device_driver);
-}
-
-static void __exit bfin_kpad_exit(void)
-{
- platform_driver_unregister(&bfin_kpad_device_driver);
-}
-
-module_init(bfin_kpad_init);
-module_exit(bfin_kpad_exit);
+module_platform_driver(bfin_kpad_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 4662c5da8018..0ba69f3fcb52 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -390,19 +390,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.suspend = ep93xx_keypad_suspend,
.resume = ep93xx_keypad_resume,
};
-
-static int __init ep93xx_keypad_init(void)
-{
- return platform_driver_register(&ep93xx_keypad_driver);
-}
-
-static void __exit ep93xx_keypad_exit(void)
-{
- platform_driver_unregister(&ep93xx_keypad_driver);
-}
-
-module_init(ep93xx_keypad_init);
-module_exit(ep93xx_keypad_exit);
+module_platform_driver(ep93xx_keypad_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 4c17aff20657..20c8ab172214 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -241,19 +241,7 @@ static struct platform_driver gpio_keys_polled_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init gpio_keys_polled_init(void)
-{
- return platform_driver_register(&gpio_keys_polled_driver);
-}
-
-static void __exit gpio_keys_polled_exit(void)
-{
- platform_driver_unregister(&gpio_keys_polled_driver);
-}
-
-module_init(gpio_keys_polled_init);
-module_exit(gpio_keys_polled_exit);
+module_platform_driver(gpio_keys_polled_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index ccebd2d09151..fb87b3bcadb9 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -619,19 +619,7 @@ static struct platform_driver imx_keypad_driver = {
.probe = imx_keypad_probe,
.remove = __devexit_p(imx_keypad_remove),
};
-
-static int __init imx_keypad_init(void)
-{
- return platform_driver_register(&imx_keypad_driver);
-}
-
-static void __exit imx_keypad_exit(void)
-{
- platform_driver_unregister(&imx_keypad_driver);
-}
-
-module_init(imx_keypad_init);
-module_exit(imx_keypad_exit);
+module_platform_driver(imx_keypad_driver);
MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
MODULE_DESCRIPTION("IMX Keypad Port Driver");
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 7197c5698747..24f3ea01c4d5 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -260,19 +260,7 @@ static struct platform_driver jornada680kbd_driver = {
.probe = jornada680kbd_probe,
.remove = __devexit_p(jornada680kbd_remove),
};
-
-static int __init jornada680kbd_init(void)
-{
- return platform_driver_register(&jornada680kbd_driver);
-}
-
-static void __exit jornada680kbd_exit(void)
-{
- platform_driver_unregister(&jornada680kbd_driver);
-}
-
-module_init(jornada680kbd_init);
-module_exit(jornada680kbd_exit);
+module_platform_driver(jornada680kbd_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 620/660/680/690 Keyboard Driver");
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 0aa6740e60d0..eeafc30b207b 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -174,16 +174,4 @@ static struct platform_driver jornada720_kbd_driver = {
.probe = jornada720_kbd_probe,
.remove = __devexit_p(jornada720_kbd_remove),
};
-
-static int __init jornada720_kbd_init(void)
-{
- return platform_driver_register(&jornada720_kbd_driver);
-}
-
-static void __exit jornada720_kbd_exit(void)
-{
- platform_driver_unregister(&jornada720_kbd_driver);
-}
-
-module_init(jornada720_kbd_init);
-module_exit(jornada720_kbd_exit);
+module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 82d1dc8badd5..21823bfc7911 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -545,13 +545,12 @@ static ssize_t lm8323_pwm_store_time(struct device *dev,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm8323_pwm *pwm = cdev_to_pwm(led_cdev);
- int ret;
- unsigned long time;
+ int ret, time;
- ret = strict_strtoul(buf, 10, &time);
+ ret = kstrtoint(buf, 10, &time);
/* Numbers only, please. */
if (ret)
- return -EINVAL;
+ return ret;
pwm->fade_time = time;
@@ -613,9 +612,9 @@ static ssize_t lm8323_set_disable(struct device *dev,
{
struct lm8323_chip *lm = dev_get_drvdata(dev);
int ret;
- unsigned long i;
+ unsigned int i;
- ret = strict_strtoul(buf, 10, &i);
+ ret = kstrtouint(buf, 10, &i);
mutex_lock(&lm->lock);
lm->kp_enabled = !i;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e2ae657717ea..9b223d73de32 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -496,19 +496,7 @@ static struct platform_driver matrix_keypad_driver = {
#endif
},
};
-
-static int __init matrix_keypad_init(void)
-{
- return platform_driver_register(&matrix_keypad_driver);
-}
-
-static void __exit matrix_keypad_exit(void)
-{
- platform_driver_unregister(&matrix_keypad_driver);
-}
-
-module_init(matrix_keypad_init);
-module_exit(matrix_keypad_exit);
+module_platform_driver(matrix_keypad_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index fcdec5e2b297..e35566aa102f 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -379,7 +379,7 @@ static const struct dev_pm_ops ske_keypad_dev_pm_ops = {
};
#endif
-struct platform_driver ske_keypad_driver = {
+static struct platform_driver ske_keypad_driver = {
.driver = {
.name = "nmk-ske-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 323bcdfff248..6b630d9d3dff 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -473,20 +473,7 @@ static struct platform_driver omap_kp_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init omap_kp_init(void)
-{
- printk(KERN_INFO "OMAP Keypad Driver\n");
- return platform_driver_register(&omap_kp_driver);
-}
-
-static void __exit omap_kp_exit(void)
-{
- platform_driver_unregister(&omap_kp_driver);
-}
-
-module_init(omap_kp_init);
-module_exit(omap_kp_exit);
+module_platform_driver(omap_kp_driver);
MODULE_AUTHOR("Timo Teräs");
MODULE_DESCRIPTION("OMAP Keypad Driver");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index c51a3c4a7feb..d5c5d77f4b82 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -335,18 +335,7 @@ static struct platform_driver omap4_keypad_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init omap4_keypad_init(void)
-{
- return platform_driver_register(&omap4_keypad_driver);
-}
-module_init(omap4_keypad_init);
-
-static void __exit omap4_keypad_exit(void)
-{
- platform_driver_unregister(&omap4_keypad_driver);
-}
-module_exit(omap4_keypad_exit);
+module_platform_driver(omap4_keypad_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP4 Keypad Driver");
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index 1f1a5563f60a..abe728c7b88e 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -163,18 +163,7 @@ static struct platform_driver opencores_kbd_device_driver = {
.name = "opencores-kbd",
},
};
-
-static int __init opencores_kbd_init(void)
-{
- return platform_driver_register(&opencores_kbd_device_driver);
-}
-module_init(opencores_kbd_init);
-
-static void __exit opencores_kbd_exit(void)
-{
- platform_driver_unregister(&opencores_kbd_device_driver);
-}
-module_exit(opencores_kbd_exit);
+module_platform_driver(opencores_kbd_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Javier Herrero <jherrero@hvsistemas.es>");
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index e7cc51d0fb34..01a1c9f8a383 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -780,18 +780,7 @@ static struct platform_driver pmic8xxx_kp_driver = {
.pm = &pm8xxx_kp_pm_ops,
},
};
-
-static int __init pmic8xxx_kp_init(void)
-{
- return platform_driver_register(&pmic8xxx_kp_driver);
-}
-module_init(pmic8xxx_kp_init);
-
-static void __exit pmic8xxx_kp_exit(void)
-{
- platform_driver_unregister(&pmic8xxx_kp_driver);
-}
-module_exit(pmic8xxx_kp_exit);
+module_platform_driver(pmic8xxx_kp_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC8XXX keypad driver");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index eca6ae63de14..29fe1b2be1c1 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -602,19 +602,7 @@ static struct platform_driver pxa27x_keypad_driver = {
#endif
},
};
-
-static int __init pxa27x_keypad_init(void)
-{
- return platform_driver_register(&pxa27x_keypad_driver);
-}
-
-static void __exit pxa27x_keypad_exit(void)
-{
- platform_driver_unregister(&pxa27x_keypad_driver);
-}
-
-module_init(pxa27x_keypad_init);
-module_exit(pxa27x_keypad_exit);
+module_platform_driver(pxa27x_keypad_driver);
MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 35451bf780c7..d7f1134b789e 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -195,18 +195,7 @@ static struct platform_driver pxa930_rotary_driver = {
.probe = pxa930_rotary_probe,
.remove = __devexit_p(pxa930_rotary_remove),
};
-
-static int __init pxa930_rotary_init(void)
-{
- return platform_driver_register(&pxa930_rotary_driver);
-}
-module_init(pxa930_rotary_init);
-
-static void __exit pxa930_rotary_exit(void)
-{
- platform_driver_unregister(&pxa930_rotary_driver);
-}
-module_exit(pxa930_rotary_exit);
+module_platform_driver(pxa930_rotary_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller");
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index f689f49e3109..17ba7f9f80f3 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -20,9 +20,13 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/sched.h>
-#include <plat/keypad.h>
+#include <linux/input/samsung-keypad.h>
#define SAMSUNG_KEYIFCON 0x00
#define SAMSUNG_KEYIFSTSCLR 0x04
@@ -63,36 +67,33 @@ enum samsung_keypad_type {
struct samsung_keypad {
struct input_dev *input_dev;
+ struct platform_device *pdev;
struct clk *clk;
void __iomem *base;
wait_queue_head_t wait;
bool stopped;
+ bool wake_enabled;
int irq;
+ enum samsung_keypad_type type;
unsigned int row_shift;
unsigned int rows;
unsigned int cols;
unsigned int row_state[SAMSUNG_MAX_COLS];
+#ifdef CONFIG_OF
+ int row_gpios[SAMSUNG_MAX_ROWS];
+ int col_gpios[SAMSUNG_MAX_COLS];
+#endif
unsigned short keycodes[];
};
-static int samsung_keypad_is_s5pv210(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- enum samsung_keypad_type type =
- platform_get_device_id(pdev)->driver_data;
-
- return type == KEYPAD_TYPE_S5PV210;
-}
-
static void samsung_keypad_scan(struct samsung_keypad *keypad,
unsigned int *row_state)
{
- struct device *dev = keypad->input_dev->dev.parent;
unsigned int col;
unsigned int val;
for (col = 0; col < keypad->cols; col++) {
- if (samsung_keypad_is_s5pv210(dev)) {
+ if (keypad->type == KEYPAD_TYPE_S5PV210) {
val = S5PV210_KEYIFCOLEN_MASK;
val &= ~(1 << col) << 8;
} else {
@@ -158,6 +159,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
unsigned int val;
bool key_down;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
do {
val = readl(keypad->base + SAMSUNG_KEYIFSTSCLR);
/* Clear interrupt. */
@@ -172,6 +175,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
} while (key_down && !keypad->stopped);
+ pm_runtime_put_sync(&keypad->pdev->dev);
+
return IRQ_HANDLED;
}
@@ -179,6 +184,8 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
{
unsigned int val;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
/* Tell IRQ thread that it may poll the device. */
keypad->stopped = false;
@@ -191,12 +198,16 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
/* KEYIFCOL reg clear. */
writel(0, keypad->base + SAMSUNG_KEYIFCOL);
+
+ pm_runtime_put_sync(&keypad->pdev->dev);
}
static void samsung_keypad_stop(struct samsung_keypad *keypad)
{
unsigned int val;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
/* Signal IRQ thread to stop polling and disable the handler. */
keypad->stopped = true;
wake_up(&keypad->wait);
@@ -217,6 +228,8 @@ static void samsung_keypad_stop(struct samsung_keypad *keypad)
* re-enable the handler.
*/
enable_irq(keypad->irq);
+
+ pm_runtime_put_sync(&keypad->pdev->dev);
}
static int samsung_keypad_open(struct input_dev *input_dev)
@@ -235,6 +248,126 @@ static void samsung_keypad_close(struct input_dev *input_dev)
samsung_keypad_stop(keypad);
}
+#ifdef CONFIG_OF
+static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
+ struct device *dev)
+{
+ struct samsung_keypad_platdata *pdata;
+ struct matrix_keymap_data *keymap_data;
+ uint32_t *keymap, num_rows = 0, num_cols = 0;
+ struct device_node *np = dev->of_node, *key_np;
+ unsigned int key_count = 0;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return NULL;
+ }
+
+ of_property_read_u32(np, "samsung,keypad-num-rows", &num_rows);
+ of_property_read_u32(np, "samsung,keypad-num-columns", &num_cols);
+ if (!num_rows || !num_cols) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return NULL;
+ }
+ pdata->rows = num_rows;
+ pdata->cols = num_cols;
+
+ keymap_data = devm_kzalloc(dev, sizeof(*keymap_data), GFP_KERNEL);
+ if (!keymap_data) {
+ dev_err(dev, "could not allocate memory for keymap data\n");
+ return NULL;
+ }
+ pdata->keymap_data = keymap_data;
+
+ for_each_child_of_node(np, key_np)
+ key_count++;
+
+ keymap_data->keymap_size = key_count;
+ keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL);
+ if (!keymap) {
+ dev_err(dev, "could not allocate memory for keymap\n");
+ return NULL;
+ }
+ keymap_data->keymap = keymap;
+
+ for_each_child_of_node(np, key_np) {
+ u32 row, col, key_code;
+ of_property_read_u32(key_np, "keypad,row", &row);
+ of_property_read_u32(key_np, "keypad,column", &col);
+ of_property_read_u32(key_np, "linux,code", &key_code);
+ *keymap++ = KEY(row, col, key_code);
+ }
+
+ if (of_get_property(np, "linux,input-no-autorepeat", NULL))
+ pdata->no_autorepeat = true;
+ if (of_get_property(np, "linux,input-wakeup", NULL))
+ pdata->wakeup = true;
+
+ return pdata;
+}
+
+static void samsung_keypad_parse_dt_gpio(struct device *dev,
+ struct samsung_keypad *keypad)
+{
+ struct device_node *np = dev->of_node;
+ int gpio, ret, row, col;
+
+ for (row = 0; row < keypad->rows; row++) {
+ gpio = of_get_named_gpio(np, "row-gpios", row);
+ keypad->row_gpios[row] = gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "keypad row[%d]: invalid gpio %d\n",
+ row, gpio);
+ continue;
+ }
+
+ ret = gpio_request(gpio, "keypad-row");
+ if (ret)
+ dev_err(dev, "keypad row[%d] gpio request failed\n",
+ row);
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ gpio = of_get_named_gpio(np, "col-gpios", col);
+ keypad->col_gpios[col] = gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "keypad column[%d]: invalid gpio %d\n",
+ col, gpio);
+ continue;
+ }
+
+ ret = gpio_request(gpio, "keypad-col");
+ if (ret)
+ dev_err(dev, "keypad column[%d] gpio request failed\n",
+ col);
+ }
+}
+
+static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < keypad->rows; cnt++)
+ if (gpio_is_valid(keypad->row_gpios[cnt]))
+ gpio_free(keypad->row_gpios[cnt]);
+
+ for (cnt = 0; cnt < keypad->cols; cnt++)
+ if (gpio_is_valid(keypad->col_gpios[cnt]))
+ gpio_free(keypad->col_gpios[cnt]);
+}
+#else
+static
+struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+
+static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
+{
+}
+#endif
+
static int __devinit samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
@@ -246,7 +379,10 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
unsigned int keymap_size;
int error;
- pdata = pdev->dev.platform_data;
+ if (pdev->dev.of_node)
+ pdata = samsung_keypad_parse_dt(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
return -EINVAL;
@@ -298,11 +434,23 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
}
keypad->input_dev = input_dev;
+ keypad->pdev = pdev;
keypad->row_shift = row_shift;
keypad->rows = pdata->rows;
keypad->cols = pdata->cols;
+ keypad->stopped = true;
init_waitqueue_head(&keypad->wait);
+ if (pdev->dev.of_node) {
+#ifdef CONFIG_OF
+ samsung_keypad_parse_dt_gpio(&pdev->dev, keypad);
+ keypad->type = of_device_is_compatible(pdev->dev.of_node,
+ "samsung,s5pv210-keypad");
+#endif
+ } else {
+ keypad->type = platform_get_device_id(pdev)->driver_data;
+ }
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
@@ -337,18 +485,29 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+ platform_set_drvdata(pdev, keypad);
+ pm_runtime_enable(&pdev->dev);
+
error = input_register_device(keypad->input_dev);
if (error)
goto err_free_irq;
- device_init_wakeup(&pdev->dev, pdata->wakeup);
- platform_set_drvdata(pdev, keypad);
+ if (pdev->dev.of_node) {
+ devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
+ devm_kfree(&pdev->dev, (void *)pdata->keymap_data);
+ devm_kfree(&pdev->dev, (void *)pdata);
+ }
return 0;
err_free_irq:
free_irq(keypad->irq, keypad);
+ pm_runtime_disable(&pdev->dev);
+ device_init_wakeup(&pdev->dev, 0);
+ platform_set_drvdata(pdev, NULL);
err_put_clk:
clk_put(keypad->clk);
+ samsung_keypad_dt_gpio_free(keypad);
err_unmap_base:
iounmap(keypad->base);
err_free_mem:
@@ -362,6 +521,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
@@ -374,6 +534,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
free_irq(keypad->irq, keypad);
clk_put(keypad->clk);
+ samsung_keypad_dt_gpio_free(keypad);
iounmap(keypad->base);
kfree(keypad);
@@ -381,11 +542,57 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_RUNTIME
+static int samsung_keypad_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned int val;
+ int error;
+
+ if (keypad->stopped)
+ return 0;
+
+ /* This may fail on some SoCs due to lack of controller support */
+ error = enable_irq_wake(keypad->irq);
+ if (!error)
+ keypad->wake_enabled = true;
+
+ val = readl(keypad->base + SAMSUNG_KEYIFCON);
+ val |= SAMSUNG_KEYIFCON_WAKEUPEN;
+ writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+ clk_disable(keypad->clk);
+
+ return 0;
+}
+
+static int samsung_keypad_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ if (keypad->stopped)
+ return 0;
+
+ clk_enable(keypad->clk);
+
+ val = readl(keypad->base + SAMSUNG_KEYIFCON);
+ val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
+ writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+ if (keypad->wake_enabled)
+ disable_irq_wake(keypad->irq);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
bool enable)
{
- struct device *dev = keypad->input_dev->dev.parent;
unsigned int val;
clk_enable(keypad->clk);
@@ -393,11 +600,11 @@ static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
val = readl(keypad->base + SAMSUNG_KEYIFCON);
if (enable) {
val |= SAMSUNG_KEYIFCON_WAKEUPEN;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(&keypad->pdev->dev))
enable_irq_wake(keypad->irq);
} else {
val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(&keypad->pdev->dev))
disable_irq_wake(keypad->irq);
}
writel(val, keypad->base + SAMSUNG_KEYIFCON);
@@ -440,11 +647,23 @@ static int samsung_keypad_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops samsung_keypad_pm_ops = {
- .suspend = samsung_keypad_suspend,
- .resume = samsung_keypad_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(samsung_keypad_suspend, samsung_keypad_resume)
+ SET_RUNTIME_PM_OPS(samsung_keypad_runtime_suspend,
+ samsung_keypad_runtime_resume, NULL)
};
+
+#ifdef CONFIG_OF
+static const struct of_device_id samsung_keypad_dt_match[] = {
+ { .compatible = "samsung,s3c6410-keypad" },
+ { .compatible = "samsung,s5pv210-keypad" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
+#else
+#define samsung_keypad_dt_match NULL
#endif
static struct platform_device_id samsung_keypad_driver_ids[] = {
@@ -465,27 +684,14 @@ static struct platform_driver samsung_keypad_driver = {
.driver = {
.name = "samsung-keypad",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = samsung_keypad_dt_match,
.pm = &samsung_keypad_pm_ops,
-#endif
},
.id_table = samsung_keypad_driver_ids,
};
-
-static int __init samsung_keypad_init(void)
-{
- return platform_driver_register(&samsung_keypad_driver);
-}
-module_init(samsung_keypad_init);
-
-static void __exit samsung_keypad_exit(void)
-{
- platform_driver_unregister(&samsung_keypad_driver);
-}
-module_exit(samsung_keypad_exit);
+module_platform_driver(samsung_keypad_driver);
MODULE_DESCRIPTION("Samsung keypad driver");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-keypad");
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 934aeb583b30..da54ad5db154 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -337,19 +337,7 @@ static struct platform_driver sh_keysc_device_driver = {
.pm = &sh_keysc_dev_pm_ops,
}
};
-
-static int __init sh_keysc_init(void)
-{
- return platform_driver_register(&sh_keysc_device_driver);
-}
-
-static void __exit sh_keysc_exit(void)
-{
- platform_driver_unregister(&sh_keysc_device_driver);
-}
-
-module_init(sh_keysc_init);
-module_exit(sh_keysc_exit);
+module_platform_driver(sh_keysc_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH KEYSC Keypad Driver");
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index d712dffd2157..c88bd63dc9cc 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -326,18 +326,7 @@ static struct platform_driver spear_kbd_driver = {
#endif
},
};
-
-static int __init spear_kbd_init(void)
-{
- return platform_driver_register(&spear_kbd_driver);
-}
-module_init(spear_kbd_init);
-
-static void __exit spear_kbd_exit(void)
-{
- platform_driver_unregister(&spear_kbd_driver);
-}
-module_exit(spear_kbd_exit);
+module_platform_driver(spear_kbd_driver);
MODULE_AUTHOR("Rajeev Kumar");
MODULE_DESCRIPTION("SPEAr Keyboard Driver");
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ab7610ca10eb..9397cf9c625c 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -368,18 +368,7 @@ static struct platform_driver stmpe_keypad_driver = {
.probe = stmpe_keypad_probe,
.remove = __devexit_p(stmpe_keypad_remove),
};
-
-static int __init stmpe_keypad_init(void)
-{
- return platform_driver_register(&stmpe_keypad_driver);
-}
-module_init(stmpe_keypad_init);
-
-static void __exit stmpe_keypad_exit(void)
-{
- platform_driver_unregister(&stmpe_keypad_driver);
-}
-module_exit(stmpe_keypad_exit);
+module_platform_driver(stmpe_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPExxxx keypad driver");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index f60c9e82f204..2dee3e4e7c6f 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -74,11 +74,13 @@
/**
* struct tc_keypad - data structure used by keypad driver
+ * @tc3589x: pointer to tc35893
* @input: pointer to input device object
* @board: keypad platform device
* @krow: number of rows
* @kcol: number of coloumns
* @keymap: matrix scan code table for keycodes
+ * @keypad_stopped: holds keypad status
*/
struct tc_keypad {
struct tc3589x *tc3589x;
@@ -453,18 +455,7 @@ static struct platform_driver tc3589x_keypad_driver = {
.probe = tc3589x_keypad_probe,
.remove = __devexit_p(tc3589x_keypad_remove),
};
-
-static int __init tc3589x_keypad_init(void)
-{
- return platform_driver_register(&tc3589x_keypad_driver);
-}
-module_init(tc3589x_keypad_init);
-
-static void __exit tc3589x_keypad_exit(void)
-{
- return platform_driver_unregister(&tc3589x_keypad_driver);
-}
-module_exit(tc3589x_keypad_exit);
+module_platform_driver(tc3589x_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
new file mode 100644
index 000000000000..958ec107bfbc
--- /dev/null
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -0,0 +1,430 @@
+/*
+ * Driver for TCA8418 I2C keyboard
+ *
+ * Copyright (C) 2011 Fuel7, Inc. All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/tca8418_keypad.h>
+
+/* TCA8418 hardware limits */
+#define TCA8418_MAX_ROWS 8
+#define TCA8418_MAX_COLS 10
+
+/* TCA8418 register offsets */
+#define REG_CFG 0x01
+#define REG_INT_STAT 0x02
+#define REG_KEY_LCK_EC 0x03
+#define REG_KEY_EVENT_A 0x04
+#define REG_KEY_EVENT_B 0x05
+#define REG_KEY_EVENT_C 0x06
+#define REG_KEY_EVENT_D 0x07
+#define REG_KEY_EVENT_E 0x08
+#define REG_KEY_EVENT_F 0x09
+#define REG_KEY_EVENT_G 0x0A
+#define REG_KEY_EVENT_H 0x0B
+#define REG_KEY_EVENT_I 0x0C
+#define REG_KEY_EVENT_J 0x0D
+#define REG_KP_LCK_TIMER 0x0E
+#define REG_UNLOCK1 0x0F
+#define REG_UNLOCK2 0x10
+#define REG_GPIO_INT_STAT1 0x11
+#define REG_GPIO_INT_STAT2 0x12
+#define REG_GPIO_INT_STAT3 0x13
+#define REG_GPIO_DAT_STAT1 0x14
+#define REG_GPIO_DAT_STAT2 0x15
+#define REG_GPIO_DAT_STAT3 0x16
+#define REG_GPIO_DAT_OUT1 0x17
+#define REG_GPIO_DAT_OUT2 0x18
+#define REG_GPIO_DAT_OUT3 0x19
+#define REG_GPIO_INT_EN1 0x1A
+#define REG_GPIO_INT_EN2 0x1B
+#define REG_GPIO_INT_EN3 0x1C
+#define REG_KP_GPIO1 0x1D
+#define REG_KP_GPIO2 0x1E
+#define REG_KP_GPIO3 0x1F
+#define REG_GPI_EM1 0x20
+#define REG_GPI_EM2 0x21
+#define REG_GPI_EM3 0x22
+#define REG_GPIO_DIR1 0x23
+#define REG_GPIO_DIR2 0x24
+#define REG_GPIO_DIR3 0x25
+#define REG_GPIO_INT_LVL1 0x26
+#define REG_GPIO_INT_LVL2 0x27
+#define REG_GPIO_INT_LVL3 0x28
+#define REG_DEBOUNCE_DIS1 0x29
+#define REG_DEBOUNCE_DIS2 0x2A
+#define REG_DEBOUNCE_DIS3 0x2B
+#define REG_GPIO_PULL1 0x2C
+#define REG_GPIO_PULL2 0x2D
+#define REG_GPIO_PULL3 0x2E
+
+/* TCA8418 bit definitions */
+#define CFG_AI BIT(7)
+#define CFG_GPI_E_CFG BIT(6)
+#define CFG_OVR_FLOW_M BIT(5)
+#define CFG_INT_CFG BIT(4)
+#define CFG_OVR_FLOW_IEN BIT(3)
+#define CFG_K_LCK_IEN BIT(2)
+#define CFG_GPI_IEN BIT(1)
+#define CFG_KE_IEN BIT(0)
+
+#define INT_STAT_CAD_INT BIT(4)
+#define INT_STAT_OVR_FLOW_INT BIT(3)
+#define INT_STAT_K_LCK_INT BIT(2)
+#define INT_STAT_GPI_INT BIT(1)
+#define INT_STAT_K_INT BIT(0)
+
+/* TCA8418 register masks */
+#define KEY_LCK_EC_KEC 0x7
+#define KEY_EVENT_CODE 0x7f
+#define KEY_EVENT_VALUE 0x80
+
+
+static const struct i2c_device_id tca8418_id[] = {
+ { TCA8418_NAME, 8418, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+
+struct tca8418_keypad {
+ unsigned int rows;
+ unsigned int cols;
+ unsigned int keypad_mask; /* Mask for keypad col/rol regs */
+ unsigned int irq;
+ unsigned int row_shift;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ /* Flexible array member, must be at end of struct */
+ unsigned short keymap[];
+};
+
+/*
+ * Write a byte to the TCA8418
+ */
+static int tca8418_write_byte(struct tca8418_keypad *keypad_data,
+ int reg, u8 val)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(keypad_data->client, reg, val);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "%s failed, reg: %d, val: %d, error: %d\n",
+ __func__, reg, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Read a byte from the TCA8418
+ */
+static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
+ int reg, u8 *val)
+{
+ int error;
+
+ error = i2c_smbus_read_byte_data(keypad_data->client, reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "%s failed, reg: %d, error: %d\n",
+ __func__, reg, error);
+ return error;
+ }
+
+ *val = (u8)error;
+
+ return 0;
+}
+
+static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+{
+ int error, col, row;
+ u8 reg, state, code;
+
+ /* Initial read of the key event FIFO */
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+
+ /* Assume that key code 0 signifies empty FIFO */
+ while (error >= 0 && reg > 0) {
+ state = reg & KEY_EVENT_VALUE;
+ code = reg & KEY_EVENT_CODE;
+
+ row = code / TCA8418_MAX_COLS;
+ col = code % TCA8418_MAX_COLS;
+
+ row = (col) ? row : row - 1;
+ col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
+
+ code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
+ input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad_data->input,
+ keypad_data->keymap[code], state);
+
+ /* Read for next loop */
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ }
+
+ if (error < 0)
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+
+ input_sync(keypad_data->input);
+}
+
+/*
+ * Threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
+{
+ struct tca8418_keypad *keypad_data = dev_id;
+ u8 reg;
+ int error;
+
+ error = tca8418_read_byte(keypad_data, REG_INT_STAT, &reg);
+ if (error) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_INT_STAT\n");
+ goto exit;
+ }
+
+ if (reg & INT_STAT_OVR_FLOW_INT)
+ dev_warn(&keypad_data->client->dev, "overflow occurred\n");
+
+ if (reg & INT_STAT_K_INT)
+ tca8418_read_keypad(keypad_data);
+
+exit:
+ /* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
+ reg = 0xff;
+ error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
+ if (error)
+ dev_err(&keypad_data->client->dev,
+ "unable to clear REG_INT_STAT\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Configure the TCA8418 for keypad operation
+ */
+static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+{
+ int reg, error;
+
+ /* Write config register, if this fails assume device not present */
+ error = tca8418_write_byte(keypad_data, REG_CFG,
+ CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+ if (error < 0)
+ return -ENODEV;
+
+
+ /* Assemble a mask for row and column registers */
+ reg = ~(~0 << keypad_data->rows);
+ reg += (~(~0 << keypad_data->cols)) << 8;
+ keypad_data->keypad_mask = reg;
+
+ /* Set registers to keypad mode */
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO2, reg >> 8);
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO3, reg >> 16);
+
+ /* Enable column debouncing */
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS1, reg);
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
+
+ return error;
+}
+
+static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct tca8418_keypad_platform_data *pdata =
+ client->dev.platform_data;
+ struct tca8418_keypad *keypad_data;
+ struct input_dev *input;
+ int error, row_shift, max_keys;
+
+ /* Copy the platform data */
+ if (!pdata) {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->keymap_data) {
+ dev_err(&client->dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
+ dev_err(&client->dev, "invalid rows\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
+ dev_err(&client->dev, "invalid columns\n");
+ return -EINVAL;
+ }
+
+ /* Check i2c driver capabilities */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ row_shift = get_count_order(pdata->cols);
+ max_keys = pdata->rows << row_shift;
+
+ /* Allocate memory for keypad_data, keymap and input device */
+ keypad_data = kzalloc(sizeof(*keypad_data) +
+ max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+ if (!keypad_data)
+ return -ENOMEM;
+
+ keypad_data->rows = pdata->rows;
+ keypad_data->cols = pdata->cols;
+ keypad_data->client = client;
+ keypad_data->row_shift = row_shift;
+
+ /* Initialize the chip or fail if chip isn't present */
+ error = tca8418_configure(keypad_data);
+ if (error < 0)
+ goto fail1;
+
+ /* Configure input device */
+ input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+ keypad_data->input = input;
+
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x001;
+ input->id.version = 0x0001;
+
+ input->keycode = keypad_data->keymap;
+ input->keycodesize = sizeof(keypad_data->keymap[0]);
+ input->keycodemax = max_keys;
+
+ __set_bit(EV_KEY, input->evbit);
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ input_set_drvdata(input, keypad_data);
+
+ matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
+ input->keycode, input->keybit);
+
+ if (pdata->irq_is_gpio)
+ client->irq = gpio_to_irq(client->irq);
+
+ error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
+ IRQF_TRIGGER_FALLING,
+ client->name, keypad_data);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to claim irq %d; error %d\n",
+ client->irq, error);
+ goto fail2;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to register input device, error: %d\n", error);
+ goto fail3;
+ }
+
+ i2c_set_clientdata(client, keypad_data);
+ return 0;
+
+fail3:
+ free_irq(client->irq, keypad_data);
+fail2:
+ input_free_device(input);
+fail1:
+ kfree(keypad_data);
+ return error;
+}
+
+static int __devexit tca8418_keypad_remove(struct i2c_client *client)
+{
+ struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
+
+ free_irq(keypad_data->client->irq, keypad_data);
+
+ input_unregister_device(keypad_data->input);
+
+ kfree(keypad_data);
+
+ return 0;
+}
+
+
+static struct i2c_driver tca8418_keypad_driver = {
+ .driver = {
+ .name = TCA8418_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tca8418_keypad_probe,
+ .remove = __devexit_p(tca8418_keypad_remove),
+ .id_table = tca8418_id,
+};
+
+static int __init tca8418_keypad_init(void)
+{
+ return i2c_add_driver(&tca8418_keypad_driver);
+}
+subsys_initcall(tca8418_keypad_init);
+
+static void __exit tca8418_keypad_exit(void)
+{
+ i2c_del_driver(&tca8418_keypad_driver);
+}
+module_exit(tca8418_keypad_exit);
+
+MODULE_AUTHOR("Kyle Manna <kyle.manna@fuel7.com>");
+MODULE_DESCRIPTION("Keypad driver for TCA8418");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index cf3228b0ab90..a136e2e832be 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <mach/clk.h>
@@ -52,6 +53,7 @@
/* KBC Interrupt Register */
#define KBC_INT_0 0x4
#define KBC_INT_FIFO_CNT_INT_STATUS (1 << 2)
+#define KBC_INT_KEYPRESS_INT_STATUS (1 << 0)
#define KBC_ROW_CFG0_0 0x8
#define KBC_COL_CFG0_0 0x18
@@ -74,15 +76,17 @@ struct tegra_kbc {
unsigned int cp_to_wkup_dly;
bool use_fn_map;
bool use_ghost_filter;
+ bool keypress_caused_wake;
const struct tegra_kbc_platform_data *pdata;
unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
+ u32 wakeup_key;
struct timer_list timer;
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] = {
+static const u32 tegra_kbc_default_keymap[] __devinitdata = {
KEY(0, 2, KEY_W),
KEY(0, 3, KEY_S),
KEY(0, 4, KEY_A),
@@ -217,7 +221,8 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(31, 4, KEY_HELP),
};
-static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+static const
+struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
.keymap = tegra_kbc_default_keymap,
.keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
};
@@ -409,6 +414,9 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
*/
tegra_kbc_set_fifo_interrupt(kbc, false);
mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+ } else if (val & KBC_INT_KEYPRESS_INT_STATUS) {
+ /* We can be here only through system resume path */
+ kbc->keypress_caused_wake = true;
}
spin_unlock_irqrestore(&kbc->lock, flags);
@@ -576,6 +584,56 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
return true;
}
+#ifdef CONFIG_OF
+static struct tegra_kbc_platform_data * __devinit
+tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
+{
+ struct tegra_kbc_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return NULL;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (!of_property_read_u32(np, "debounce-delay", &prop))
+ pdata->debounce_cnt = prop;
+
+ if (!of_property_read_u32(np, "repeat-delay", &prop))
+ pdata->repeat_cnt = prop;
+
+ if (of_find_property(np, "needs-ghost-filter", NULL))
+ pdata->use_ghost_filter = true;
+
+ if (of_find_property(np, "wakeup-source", NULL))
+ pdata->wakeup = true;
+
+ /*
+ * All currently known keymaps with device tree support use the same
+ * pin_cfg, so set it up here.
+ */
+ for (i = 0; i < KBC_MAX_ROW; i++) {
+ pdata->pin_cfg[i].num = i;
+ pdata->pin_cfg[i].is_row = true;
+ }
+
+ for (i = 0; i < KBC_MAX_COL; i++) {
+ pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
+ pdata->pin_cfg[KBC_MAX_ROW + i].is_row = false;
+ }
+
+ return pdata;
+}
+#else
+static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
+ struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
@@ -590,21 +648,28 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
unsigned int scan_time_rows;
if (!pdata)
- return -EINVAL;
+ pdata = tegra_kbc_dt_parse_pdata(pdev);
- if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+ if (!pdata)
return -EINVAL;
+ if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
+ err = -EINVAL;
+ goto err_free_pdata;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- return -ENXIO;
+ err = -ENXIO;
+ goto err_free_pdata;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
- return -ENXIO;
+ err = -ENXIO;
+ goto err_free_pdata;
}
kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
@@ -674,9 +739,10 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
+ kbc->wakeup_key = pdata->wakeup_key;
- err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
- pdev->name, kbc);
+ err = request_irq(kbc->irq, tegra_kbc_isr,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
if (err) {
dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
goto err_put_clk;
@@ -706,6 +772,9 @@ err_free_mem_region:
err_free_mem:
input_free_device(input_dev);
kfree(kbc);
+err_free_pdata:
+ if (!pdev->dev.platform_data)
+ kfree(pdata);
return err;
}
@@ -715,6 +784,8 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
struct resource *res;
+ platform_set_drvdata(pdev, NULL);
+
free_irq(kbc->irq, pdev);
clk_put(kbc->clk);
@@ -723,9 +794,14 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- kfree(kbc);
+ /*
+ * If we do not have platform data attached to the device we
+ * allocated it ourselves and thus need to free it.
+ */
+ if (!pdev->dev.platform_data)
+ kfree(kbc->pdata);
- platform_set_drvdata(pdev, NULL);
+ kfree(kbc);
return 0;
}
@@ -754,6 +830,8 @@ static int tegra_kbc_suspend(struct device *dev)
tegra_kbc_setup_wakekeys(kbc, true);
msleep(30);
+ kbc->keypress_caused_wake = false;
+ enable_irq(kbc->irq);
enable_irq_wake(kbc->irq);
} else {
if (kbc->idev->users)
@@ -780,7 +858,19 @@ static int tegra_kbc_resume(struct device *dev)
tegra_kbc_set_fifo_interrupt(kbc, true);
- enable_irq(kbc->irq);
+ if (kbc->keypress_caused_wake && kbc->wakeup_key) {
+ /*
+ * We can't report events directly from the ISR
+ * because timekeeping is stopped when processing
+ * wakeup request and we get a nasty warning when
+ * we try to call do_gettimeofday() in evdev
+ * handler.
+ */
+ input_report_key(kbc->idev, kbc->wakeup_key, 1);
+ input_sync(kbc->idev);
+ input_report_key(kbc->idev, kbc->wakeup_key, 0);
+ input_sync(kbc->idev);
+ }
} else {
if (kbc->idev->users)
err = tegra_kbc_start(kbc);
@@ -793,6 +883,12 @@ static int tegra_kbc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+static const struct of_device_id tegra_kbc_of_match[] = {
+ { .compatible = "nvidia,tegra20-kbc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
+
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
.remove = __devexit_p(tegra_kbc_remove),
@@ -800,20 +896,10 @@ static struct platform_driver tegra_kbc_driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
.pm = &tegra_kbc_pm_ops,
+ .of_match_table = tegra_kbc_of_match,
},
};
-
-static void __exit tegra_kbc_exit(void)
-{
- platform_driver_unregister(&tegra_kbc_driver);
-}
-module_exit(tegra_kbc_exit);
-
-static int __init tegra_kbc_init(void)
-{
- return platform_driver_register(&tegra_kbc_driver);
-}
-module_init(tegra_kbc_init);
+module_platform_driver(tegra_kbc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 66e55e5cfdd6..fb39c94b6fdd 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -322,19 +322,7 @@ static struct platform_driver keypad_driver = {
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
-
-static int __init keypad_init(void)
-{
- return platform_driver_register(&keypad_driver);
-}
-
-static void __exit keypad_exit(void)
-{
- platform_driver_unregister(&keypad_driver);
-}
-
-module_init(keypad_init);
-module_exit(keypad_exit);
+module_platform_driver(keypad_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Keypad Driver");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a26922cf0e84..a588578037eb 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -460,18 +460,7 @@ static struct platform_driver twl4030_kp_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init twl4030_kp_init(void)
-{
- return platform_driver_register(&twl4030_kp_driver);
-}
-module_init(twl4030_kp_init);
-
-static void __exit twl4030_kp_exit(void)
-{
- platform_driver_unregister(&twl4030_kp_driver);
-}
-module_exit(twl4030_kp_exit);
+module_platform_driver(twl4030_kp_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TWL4030 Keypad Driver");
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 318586dadacf..99bbb7e775ae 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -262,19 +262,7 @@ static struct platform_driver w90p910_keypad_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init w90p910_keypad_init(void)
-{
- return platform_driver_register(&w90p910_keypad_driver);
-}
-
-static void __exit w90p910_keypad_exit(void)
-{
- platform_driver_unregister(&w90p910_keypad_driver);
-}
-
-module_init(w90p910_keypad_init);
-module_exit(w90p910_keypad_exit);
+module_platform_driver(w90p910_keypad_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 keypad driver");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 3dca3c14510e..f2e0cbc5ab64 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -137,18 +137,7 @@ static struct platform_driver pm860x_onkey_driver = {
.probe = pm860x_onkey_probe,
.remove = __devexit_p(pm860x_onkey_remove),
};
-
-static int __init pm860x_onkey_init(void)
-{
- return platform_driver_register(&pm860x_onkey_driver);
-}
-module_init(pm860x_onkey_init);
-
-static void __exit pm860x_onkey_exit(void)
-{
- platform_driver_unregister(&pm860x_onkey_driver);
-}
-module_exit(pm860x_onkey_exit);
+module_platform_driver(pm860x_onkey_driver);
MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 22d875fde53a..7b46781c30c9 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -179,6 +179,31 @@ config INPUT_APANEL
To compile this driver as a module, choose M here: the module will
be called apanel.
+config INPUT_GP2A
+ tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
+ depends on I2C
+ depends on GENERIC_GPIO
+ help
+ Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
+ hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gp2ap002a00f.
+
+config INPUT_GPIO_TILT_POLLED
+ tristate "Polled GPIO tilt switch"
+ depends on GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for tilt switches connected
+ to GPIO pins that are not capable of generating interrupts.
+
+ The list of gpios to use and the mapping of their states
+ to specific angles is done via platform data.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio_tilt_polled.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index a244fc6a781c..46671a875b91 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
+obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
+obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 3d3288a78fdc..350fd0c385d2 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -12,7 +12,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/slab.h>
/**
@@ -139,18 +139,7 @@ static struct platform_driver ab8500_ponkey_driver = {
.probe = ab8500_ponkey_probe,
.remove = __devexit_p(ab8500_ponkey_remove),
};
-
-static int __init ab8500_ponkey_init(void)
-{
- return platform_driver_register(&ab8500_ponkey_driver);
-}
-module_init(ab8500_ponkey_init);
-
-static void __exit ab8500_ponkey_exit(void)
-{
- platform_driver_unregister(&ab8500_ponkey_driver);
-}
-module_exit(ab8500_ponkey_exit);
+module_platform_driver(ab8500_ponkey_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index f29de22fdda0..34d401efd4a1 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -122,7 +122,6 @@ static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &adxl34x_spi_pm,
},
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 09244804fb97..1cf72fe513e6 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -452,10 +452,10 @@ static ssize_t adxl34x_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -541,10 +541,10 @@ static ssize_t adxl34x_rate_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned char val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtou8(buf, 10, &val);
if (error)
return error;
@@ -576,10 +576,10 @@ static ssize_t adxl34x_autosleep_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -623,13 +623,13 @@ static ssize_t adxl34x_write_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
/*
* This allows basic ADXL register write access for debug purposes.
*/
- error = strict_strtoul(buf, 16, &val);
+ error = kstrtouint(buf, 16, &val);
if (error)
return error;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 874a51c2fbb2..f63341f20b91 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -42,13 +42,13 @@ static int ati_remote2_set_mask(const char *val,
const struct kernel_param *kp,
unsigned int max)
{
- unsigned long mask;
+ unsigned int mask;
int ret;
if (!val)
return -EINVAL;
- ret = strict_strtoul(val, 0, &mask);
+ ret = kstrtouint(val, 0, &mask);
if (ret)
return ret;
@@ -720,11 +720,12 @@ static ssize_t ati_remote2_store_channel_mask(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
struct ati_remote2 *ar2 = usb_get_intfdata(intf);
- unsigned long mask;
+ unsigned int mask;
int r;
- if (strict_strtoul(buf, 0, &mask))
- return -EINVAL;
+ r = kstrtouint(buf, 0, &mask);
+ if (r)
+ return r;
if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK)
return -EINVAL;
@@ -769,10 +770,12 @@ static ssize_t ati_remote2_store_mode_mask(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
struct ati_remote2 *ar2 = usb_get_intfdata(intf);
- unsigned long mask;
+ unsigned int mask;
+ int err;
- if (strict_strtoul(buf, 0, &mask))
- return -EINVAL;
+ err = kstrtouint(buf, 0, &mask);
+ if (err)
+ return err;
if (mask & ~ATI_REMOTE2_MAX_MODE_MASK)
return -EINVAL;
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index d00edc9f39d1..1c4146fccfdf 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -264,18 +264,7 @@ static struct platform_driver bfin_rotary_device_driver = {
#endif
},
};
-
-static int __init bfin_rotary_init(void)
-{
- return platform_driver_register(&bfin_rotary_device_driver);
-}
-module_init(bfin_rotary_init);
-
-static void __exit bfin_rotary_exit(void)
-{
- platform_driver_unregister(&bfin_rotary_device_driver);
-}
-module_exit(bfin_rotary_exit);
+module_platform_driver(bfin_rotary_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1608eb..06517e60e50c 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
- int datax, datay, dataz;
- u8 ctrl, mode, range, intr_status;
+ int datax, datay, dataz, intr_status;
+ u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index fd8407a29631..53e43d295148 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -163,16 +163,4 @@ static struct platform_driver cobalt_buttons_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init cobalt_buttons_init(void)
-{
- return platform_driver_register(&cobalt_buttons_driver);
-}
-
-static void __exit cobalt_buttons_exit(void)
-{
- platform_driver_unregister(&cobalt_buttons_driver);
-}
-
-module_init(cobalt_buttons_init);
-module_exit(cobalt_buttons_exit);
+module_platform_driver(cobalt_buttons_driver);
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 7283dd2a1ad3..35083c6836c3 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -267,17 +267,6 @@ static struct platform_driver dm355evm_keys_driver = {
.name = "dm355evm_keys",
},
};
-
-static int __init dm355evm_keys_init(void)
-{
- return platform_driver_register(&dm355evm_keys_driver);
-}
-module_init(dm355evm_keys_init);
-
-static void __exit dm355evm_keys_exit(void)
-{
- platform_driver_unregister(&dm355evm_keys_driver);
-}
-module_exit(dm355evm_keys_exit);
+module_platform_driver(dm355evm_keys_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
new file mode 100644
index 000000000000..71fba8c2fc66
--- /dev/null
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
+ *
+ * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
+ * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/gp2ap002a00f.h>
+
+struct gp2a_data {
+ struct input_dev *input;
+ const struct gp2a_platform_data *pdata;
+ struct i2c_client *i2c_client;
+};
+
+enum gp2a_addr {
+ GP2A_ADDR_PROX = 0x0,
+ GP2A_ADDR_GAIN = 0x1,
+ GP2A_ADDR_HYS = 0x2,
+ GP2A_ADDR_CYCLE = 0x3,
+ GP2A_ADDR_OPMOD = 0x4,
+ GP2A_ADDR_CON = 0x6
+};
+
+enum gp2a_controls {
+ /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
+ GP2A_CTRL_SSD = 0x01
+};
+
+static int gp2a_report(struct gp2a_data *dt)
+{
+ int vo = gpio_get_value(dt->pdata->vout_gpio);
+
+ input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
+ input_sync(dt->input);
+
+ return 0;
+}
+
+static irqreturn_t gp2a_irq(int irq, void *handle)
+{
+ struct gp2a_data *dt = handle;
+
+ gp2a_report(dt);
+
+ return IRQ_HANDLED;
+}
+
+static int gp2a_enable(struct gp2a_data *dt)
+{
+ return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+ GP2A_CTRL_SSD);
+}
+
+static int gp2a_disable(struct gp2a_data *dt)
+{
+ return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+ 0x00);
+}
+
+static int gp2a_device_open(struct input_dev *dev)
+{
+ struct gp2a_data *dt = input_get_drvdata(dev);
+ int error;
+
+ error = gp2a_enable(dt);
+ if (error < 0) {
+ dev_err(&dt->i2c_client->dev,
+ "unable to activate, err %d\n", error);
+ return error;
+ }
+
+ gp2a_report(dt);
+
+ return 0;
+}
+
+static void gp2a_device_close(struct input_dev *dev)
+{
+ struct gp2a_data *dt = input_get_drvdata(dev);
+ int error;
+
+ error = gp2a_disable(dt);
+ if (error < 0)
+ dev_err(&dt->i2c_client->dev,
+ "unable to deactivate, err %d\n", error);
+}
+
+static int __devinit gp2a_initialize(struct gp2a_data *dt)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
+ 0x08);
+ if (error < 0)
+ return error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
+ 0xc2);
+ if (error < 0)
+ return error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
+ 0x04);
+ if (error < 0)
+ return error;
+
+ error = gp2a_disable(dt);
+
+ return error;
+}
+
+static int __devinit gp2a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct gp2a_platform_data *pdata = client->dev.platform_data;
+ struct gp2a_data *dt;
+ int error;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->hw_setup) {
+ error = pdata->hw_setup(client);
+ if (error < 0)
+ return error;
+ }
+
+ error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
+ if (error)
+ goto err_hw_shutdown;
+
+ dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
+ if (!dt) {
+ error = -ENOMEM;
+ goto err_free_gpio;
+ }
+
+ dt->pdata = pdata;
+ dt->i2c_client = client;
+
+ error = gp2a_initialize(dt);
+ if (error < 0)
+ goto err_free_mem;
+
+ dt->input = input_allocate_device();
+ if (!dt->input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ input_set_drvdata(dt->input, dt);
+
+ dt->input->open = gp2a_device_open;
+ dt->input->close = gp2a_device_close;
+ dt->input->name = GP2A_I2C_NAME;
+ dt->input->id.bustype = BUS_I2C;
+ dt->input->dev.parent = &client->dev;
+
+ input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
+
+ error = request_threaded_irq(client->irq, NULL, gp2a_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ GP2A_I2C_NAME, dt);
+ if (error) {
+ dev_err(&client->dev, "irq request failed\n");
+ goto err_free_input_dev;
+ }
+
+ error = input_register_device(dt->input);
+ if (error) {
+ dev_err(&client->dev, "device registration failed\n");
+ goto err_free_irq;
+ }
+
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ i2c_set_clientdata(client, dt);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, dt);
+err_free_input_dev:
+ input_free_device(dt->input);
+err_free_mem:
+ kfree(dt);
+err_free_gpio:
+ gpio_free(pdata->vout_gpio);
+err_hw_shutdown:
+ if (pdata->hw_shutdown)
+ pdata->hw_shutdown(client);
+ return error;
+}
+
+static int __devexit gp2a_remove(struct i2c_client *client)
+{
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ const struct gp2a_platform_data *pdata = dt->pdata;
+
+ device_init_wakeup(&client->dev, false);
+
+ free_irq(client->irq, dt);
+
+ input_unregister_device(dt->input);
+ kfree(dt);
+
+ gpio_free(pdata->vout_gpio);
+
+ if (pdata->hw_shutdown)
+ pdata->hw_shutdown(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gp2a_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ int retval = 0;
+
+ if (device_may_wakeup(&client->dev)) {
+ enable_irq_wake(client->irq);
+ } else {
+ mutex_lock(&dt->input->mutex);
+ if (dt->input->users)
+ retval = gp2a_disable(dt);
+ mutex_unlock(&dt->input->mutex);
+ }
+
+ return retval;
+}
+
+static int gp2a_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ int retval = 0;
+
+ if (device_may_wakeup(&client->dev)) {
+ disable_irq_wake(client->irq);
+ } else {
+ mutex_lock(&dt->input->mutex);
+ if (dt->input->users)
+ retval = gp2a_enable(dt);
+ mutex_unlock(&dt->input->mutex);
+ }
+
+ return retval;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
+
+static const struct i2c_device_id gp2a_i2c_id[] = {
+ { GP2A_I2C_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver gp2a_i2c_driver = {
+ .driver = {
+ .name = GP2A_I2C_NAME,
+ .owner = THIS_MODULE,
+ .pm = &gp2a_pm,
+ },
+ .probe = gp2a_probe,
+ .remove = __devexit_p(gp2a_remove),
+ .id_table = gp2a_i2c_id,
+};
+
+static int __init gp2a_init(void)
+{
+ return i2c_add_driver(&gp2a_i2c_driver);
+}
+
+static void __exit gp2a_exit(void)
+{
+ i2c_del_driver(&gp2a_i2c_driver);
+}
+
+module_init(gp2a_init);
+module_exit(gp2a_exit);
+
+MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
+MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
new file mode 100644
index 000000000000..277a0574c199
--- /dev/null
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -0,0 +1,213 @@
+/*
+ * Driver for tilt switches connected via GPIO lines
+ * not capable of generating interrupts
+ *
+ * Copyright (C) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on: drivers/input/keyboard/gpio_keys_polled.c
+ *
+ * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/input/gpio_tilt.h>
+
+#define DRV_NAME "gpio-tilt-polled"
+
+struct gpio_tilt_polled_dev {
+ struct input_polled_dev *poll_dev;
+ struct device *dev;
+ const struct gpio_tilt_platform_data *pdata;
+
+ int last_state;
+
+ int threshold;
+ int count;
+};
+
+static void gpio_tilt_polled_poll(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+ struct input_dev *input = dev->input;
+ struct gpio_tilt_state *tilt_state = NULL;
+ int state, i;
+
+ if (tdev->count < tdev->threshold) {
+ tdev->count++;
+ } else {
+ state = 0;
+ for (i = 0; i < pdata->nr_gpios; i++)
+ state |= (!!gpio_get_value(pdata->gpios[i].gpio) << i);
+
+ if (state != tdev->last_state) {
+ for (i = 0; i < pdata->nr_states; i++)
+ if (pdata->states[i].gpios == state)
+ tilt_state = &pdata->states[i];
+
+ if (tilt_state) {
+ for (i = 0; i < pdata->nr_axes; i++)
+ input_report_abs(input,
+ pdata->axes[i].axis,
+ tilt_state->axes[i]);
+
+ input_sync(input);
+ }
+
+ tdev->count = 0;
+ tdev->last_state = state;
+ }
+ }
+}
+
+static void gpio_tilt_polled_open(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ if (pdata->enable)
+ pdata->enable(tdev->dev);
+
+ /* report initial state of the axes */
+ tdev->last_state = -1;
+ tdev->count = tdev->threshold;
+ gpio_tilt_polled_poll(tdev->poll_dev);
+}
+
+static void gpio_tilt_polled_close(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ if (pdata->disable)
+ pdata->disable(tdev->dev);
+}
+
+static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+{
+ const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct gpio_tilt_polled_dev *tdev;
+ struct input_polled_dev *poll_dev;
+ struct input_dev *input;
+ int error, i;
+
+ if (!pdata || !pdata->poll_interval)
+ return -EINVAL;
+
+ tdev = kzalloc(sizeof(struct gpio_tilt_polled_dev), GFP_KERNEL);
+ if (!tdev) {
+ dev_err(dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ error = gpio_request_array(pdata->gpios, pdata->nr_gpios);
+ if (error) {
+ dev_err(dev,
+ "Could not request tilt GPIOs: %d\n", error);
+ goto err_free_tdev;
+ }
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev) {
+ dev_err(dev, "no memory for polled device\n");
+ error = -ENOMEM;
+ goto err_free_gpios;
+ }
+
+ poll_dev->private = tdev;
+ poll_dev->poll = gpio_tilt_polled_poll;
+ poll_dev->poll_interval = pdata->poll_interval;
+ poll_dev->open = gpio_tilt_polled_open;
+ poll_dev->close = gpio_tilt_polled_close;
+
+ input = poll_dev->input;
+
+ input->name = pdev->name;
+ input->phys = DRV_NAME"/input0";
+ input->dev.parent = &pdev->dev;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ __set_bit(EV_ABS, input->evbit);
+ for (i = 0; i < pdata->nr_axes; i++)
+ input_set_abs_params(input, pdata->axes[i].axis,
+ pdata->axes[i].min, pdata->axes[i].max,
+ pdata->axes[i].fuzz, pdata->axes[i].flat);
+
+ tdev->threshold = DIV_ROUND_UP(pdata->debounce_interval,
+ pdata->poll_interval);
+
+ tdev->poll_dev = poll_dev;
+ tdev->dev = dev;
+ tdev->pdata = pdata;
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(dev, "unable to register polled device, err=%d\n",
+ error);
+ goto err_free_polldev;
+ }
+
+ platform_set_drvdata(pdev, tdev);
+
+ return 0;
+
+err_free_polldev:
+ input_free_polled_device(poll_dev);
+err_free_gpios:
+ gpio_free_array(pdata->gpios, pdata->nr_gpios);
+err_free_tdev:
+ kfree(tdev);
+
+ return error;
+}
+
+static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+{
+ struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ platform_set_drvdata(pdev, NULL);
+
+ input_unregister_polled_device(tdev->poll_dev);
+ input_free_polled_device(tdev->poll_dev);
+
+ gpio_free_array(pdata->gpios, pdata->nr_gpios);
+
+ kfree(tdev);
+
+ return 0;
+}
+
+static struct platform_driver gpio_tilt_polled_driver = {
+ .probe = gpio_tilt_polled_probe,
+ .remove = __devexit_p(gpio_tilt_polled_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(gpio_tilt_polled_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Polled GPIO tilt driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 302ab46ce752..50e283068301 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -168,16 +168,5 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
.remove = __devexit_p(ixp4xx_spkr_remove),
.shutdown = ixp4xx_spkr_shutdown,
};
+module_platform_driver(ixp4xx_spkr_platform_driver);
-static int __init ixp4xx_spkr_init(void)
-{
- return platform_driver_register(&ixp4xx_spkr_platform_driver);
-}
-
-static void __exit ixp4xx_spkr_exit(void)
-{
- platform_driver_unregister(&ixp4xx_spkr_platform_driver);
-}
-
-module_init(ixp4xx_spkr_init);
-module_exit(ixp4xx_spkr_exit);
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 7de0ded4ccc3..23cf08271049 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -166,18 +166,7 @@ static struct platform_driver max8925_onkey_driver = {
.probe = max8925_onkey_probe,
.remove = __devexit_p(max8925_onkey_remove),
};
-
-static int __init max8925_onkey_init(void)
-{
- return platform_driver_register(&max8925_onkey_driver);
-}
-module_init(max8925_onkey_init);
-
-static void __exit max8925_onkey_exit(void)
-{
- platform_driver_unregister(&max8925_onkey_driver);
-}
-module_exit(max8925_onkey_exit);
+module_platform_driver(max8925_onkey_driver);
MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 09b052288657..8428f1e8e83e 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -255,7 +255,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver mc13783_pwrbutton_driver = {
+static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
.remove = __devexit_p(mc13783_pwrbutton_remove),
.driver = {
@@ -264,17 +264,7 @@ struct platform_driver mc13783_pwrbutton_driver = {
},
};
-static int __init mc13783_pwrbutton_init(void)
-{
- return platform_driver_register(&mc13783_pwrbutton_driver);
-}
-module_init(mc13783_pwrbutton_init);
-
-static void __exit mc13783_pwrbutton_exit(void)
-{
- platform_driver_unregister(&mc13783_pwrbutton_driver);
-}
-module_exit(mc13783_pwrbutton_exit);
+module_platform_driver(mc13783_pwrbutton_driver);
MODULE_ALIAS("platform:mc13783-pwrbutton");
MODULE_DESCRIPTION("MC13783 Power Button");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index f71dc728da58..208d1a1cc7f3 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -41,18 +41,67 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#define MPU3050_CHIP_ID_REG 0x00
#define MPU3050_CHIP_ID 0x69
-#define MPU3050_XOUT_H 0x1D
-#define MPU3050_PWR_MGM 0x3E
-#define MPU3050_PWR_MGM_POS 6
-#define MPU3050_PWR_MGM_MASK 0x40
#define MPU3050_AUTO_DELAY 1000
#define MPU3050_MIN_VALUE -32768
#define MPU3050_MAX_VALUE 32767
+#define MPU3050_DEFAULT_POLL_INTERVAL 200
+#define MPU3050_DEFAULT_FS_RANGE 3
+
+/* Register map */
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_SMPLRT_DIV 0x15
+#define MPU3050_DLPF_FS_SYNC 0x16
+#define MPU3050_INT_CFG 0x17
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_PWR_MGM 0x3E
+#define MPU3050_PWR_MGM_POS 6
+
+/* Register bits */
+
+/* DLPF_FS_SYNC */
+#define MPU3050_EXT_SYNC_NONE 0x00
+#define MPU3050_EXT_SYNC_TEMP 0x20
+#define MPU3050_EXT_SYNC_GYROX 0x40
+#define MPU3050_EXT_SYNC_GYROY 0x60
+#define MPU3050_EXT_SYNC_GYROZ 0x80
+#define MPU3050_EXT_SYNC_ACCELX 0xA0
+#define MPU3050_EXT_SYNC_ACCELY 0xC0
+#define MPU3050_EXT_SYNC_ACCELZ 0xE0
+#define MPU3050_EXT_SYNC_MASK 0xE0
+#define MPU3050_FS_250DPS 0x00
+#define MPU3050_FS_500DPS 0x08
+#define MPU3050_FS_1000DPS 0x10
+#define MPU3050_FS_2000DPS 0x18
+#define MPU3050_FS_MASK 0x18
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
+#define MPU3050_DLPF_CFG_188HZ 0x01
+#define MPU3050_DLPF_CFG_98HZ 0x02
+#define MPU3050_DLPF_CFG_42HZ 0x03
+#define MPU3050_DLPF_CFG_20HZ 0x04
+#define MPU3050_DLPF_CFG_10HZ 0x05
+#define MPU3050_DLPF_CFG_5HZ 0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
+#define MPU3050_DLPF_CFG_MASK 0x07
+/* INT_CFG */
+#define MPU3050_RAW_RDY_EN 0x01
+#define MPU3050_MPU_RDY_EN 0x02
+#define MPU3050_LATCH_INT_EN 0x04
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X 0x01
+#define MPU3050_PWR_MGM_PLL_Y 0x02
+#define MPU3050_PWR_MGM_PLL_Z 0x03
+#define MPU3050_PWR_MGM_CLKSEL 0x07
+#define MPU3050_PWR_MGM_STBY_ZG 0x08
+#define MPU3050_PWR_MGM_STBY_YG 0x10
+#define MPU3050_PWR_MGM_STBY_XG 0x20
+#define MPU3050_PWR_MGM_SLEEP 0x40
+#define MPU3050_PWR_MGM_RESET 0x80
+#define MPU3050_PWR_MGM_MASK 0x40
+
struct axis_data {
s16 x;
s16 y;
@@ -148,9 +197,20 @@ static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
static int mpu3050_input_open(struct input_dev *input)
{
struct mpu3050_sensor *sensor = input_get_drvdata(input);
+ int error;
pm_runtime_get(sensor->dev);
+ /* Enable interrupts */
+ error = i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG,
+ MPU3050_LATCH_INT_EN |
+ MPU3050_RAW_RDY_EN |
+ MPU3050_MPU_RDY_EN);
+ if (error < 0) {
+ pm_runtime_put(sensor->dev);
+ return error;
+ }
+
return 0;
}
@@ -192,6 +252,51 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
}
/**
+ * mpu3050_hw_init - initialize hardware
+ * @sensor: the sensor
+ *
+ * Called during device probe; configures the sampling method.
+ */
+static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+{
+ struct i2c_client *client = sensor->client;
+ int ret;
+ u8 reg;
+
+ /* Reset */
+ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MPU3050_PWR_MGM_CLKSEL;
+ ret |= MPU3050_PWR_MGM_PLL_Z;
+ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Output frequency divider. The poll interval */
+ ret = i2c_smbus_write_byte_data(client, MPU3050_SMPLRT_DIV,
+ MPU3050_DEFAULT_POLL_INTERVAL - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set low pass filter and full scale */
+ reg = MPU3050_DEFAULT_FS_RANGE;
+ reg |= MPU3050_DLPF_CFG_42HZ << 3;
+ reg |= MPU3050_EXT_SYNC_NONE << 5;
+ ret = i2c_smbus_write_byte_data(client, MPU3050_DLPF_FS_SYNC, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
* mpu3050_probe - device detection callback
* @client: i2c client of found device
* @id: id match information
@@ -256,10 +361,14 @@ static int __devinit mpu3050_probe(struct i2c_client *client,
pm_runtime_set_active(&client->dev);
+ error = mpu3050_hw_init(sensor);
+ if (error)
+ goto err_pm_set_suspended;
+
error = request_threaded_irq(client->irq,
NULL, mpu3050_interrupt_thread,
IRQF_TRIGGER_RISING,
- "mpu_int", sensor);
+ "mpu3050", sensor);
if (error) {
dev_err(&client->dev,
"can't get IRQ %d, error %d\n", client->irq, error);
@@ -348,11 +457,18 @@ static const struct i2c_device_id mpu3050_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
+static const struct of_device_id mpu3050_of_match[] = {
+ { .compatible = "invn,mpu3050", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_of_match);
+
static struct i2c_driver mpu3050_i2c_driver = {
.driver = {
.name = "mpu3050",
.owner = THIS_MODULE,
.pm = &mpu3050_pm,
+ .of_match_table = mpu3050_of_match,
},
.probe = mpu3050_probe,
.remove = __devexit_p(mpu3050_remove),
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index 99335c286250..e09b4fe81913 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -125,19 +125,7 @@ static struct platform_driver pcap_keys_device_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init pcap_keys_init(void)
-{
- return platform_driver_register(&pcap_keys_device_driver);
-};
-
-static void __exit pcap_keys_exit(void)
-{
- platform_driver_unregister(&pcap_keys_device_driver);
-};
-
-module_init(pcap_keys_init);
-module_exit(pcap_keys_exit);
+module_platform_driver(pcap_keys_device_driver);
MODULE_DESCRIPTION("Motorola PCAP2 input events driver");
MODULE_AUTHOR("Ilya Petrov <ilya.muromec@gmail.com>");
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 95562735728d..53891de80b0e 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -113,18 +113,7 @@ static struct platform_driver pcf50633_input_driver = {
.probe = pcf50633_input_probe,
.remove = __devexit_p(pcf50633_input_remove),
};
-
-static int __init pcf50633_input_init(void)
-{
- return platform_driver_register(&pcf50633_input_driver);
-}
-module_init(pcf50633_input_init);
-
-static void __exit pcf50633_input_exit(void)
-{
- platform_driver_unregister(&pcf50633_input_driver);
-}
-module_exit(pcf50633_input_exit);
+module_platform_driver(pcf50633_input_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 input driver");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 34f4d2e0f50f..b2484aa07f32 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -134,17 +134,5 @@ static struct platform_driver pcspkr_platform_driver = {
.remove = __devexit_p(pcspkr_remove),
.shutdown = pcspkr_shutdown,
};
+module_platform_driver(pcspkr_platform_driver);
-
-static int __init pcspkr_init(void)
-{
- return platform_driver_register(&pcspkr_platform_driver);
-}
-
-static void __exit pcspkr_exit(void)
-{
- platform_driver_unregister(&pcspkr_platform_driver);
-}
-
-module_init(pcspkr_init);
-module_exit(pcspkr_exit);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 43192930824b..dfbfb463ea5d 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -277,18 +277,7 @@ static struct platform_driver pm8xxx_vib_driver = {
.pm = &pm8xxx_vib_pm_ops,
},
};
-
-static int __init pm8xxx_vib_init(void)
-{
- return platform_driver_register(&pm8xxx_vib_driver);
-}
-module_init(pm8xxx_vib_init);
-
-static void __exit pm8xxx_vib_exit(void)
-{
- platform_driver_unregister(&pm8xxx_vib_driver);
-}
-module_exit(pm8xxx_vib_exit);
+module_platform_driver(pm8xxx_vib_driver);
MODULE_ALIAS("platform:pm8xxx_vib");
MODULE_DESCRIPTION("PMIC8xxx vibrator driver based on ff-memless framework");
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index b3cfb9c71e66..0f83d0f1d015 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -213,18 +213,7 @@ static struct platform_driver pmic8xxx_pwrkey_driver = {
.pm = &pm8xxx_pwr_key_pm_ops,
},
};
-
-static int __init pmic8xxx_pwrkey_init(void)
-{
- return platform_driver_register(&pmic8xxx_pwrkey_driver);
-}
-module_init(pmic8xxx_pwrkey_init);
-
-static void __exit pmic8xxx_pwrkey_exit(void)
-{
- platform_driver_unregister(&pmic8xxx_pwrkey_driver);
-}
-module_exit(pmic8xxx_pwrkey_exit);
+module_platform_driver(pmic8xxx_pwrkey_driver);
MODULE_ALIAS("platform:pmic8xxx_pwrkey");
MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 57c294f07198..fc84c8a51147 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -180,18 +180,7 @@ static struct platform_driver pwm_beeper_driver = {
.pm = PWM_BEEPER_PM_OPS,
},
};
-
-static int __init pwm_beeper_init(void)
-{
- return platform_driver_register(&pwm_beeper_driver);
-}
-module_init(pwm_beeper_init);
-
-static void __exit pwm_beeper_exit(void)
-{
- platform_driver_unregister(&pwm_beeper_driver);
-}
-module_exit(pwm_beeper_exit);
+module_platform_driver(pwm_beeper_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PWM beeper driver");
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e2c7f622a0b5..aeb02bcf7233 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -100,19 +100,7 @@ static struct platform_driver rb532_button_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init rb532_button_init(void)
-{
- return platform_driver_register(&rb532_button_driver);
-}
-
-static void __exit rb532_button_exit(void)
-{
- platform_driver_unregister(&rb532_button_driver);
-}
-
-module_init(rb532_button_init);
-module_exit(rb532_button_exit);
+module_platform_driver(rb532_button_driver);
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 2be21694fac1..f07f784198b9 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -284,19 +284,7 @@ static struct platform_driver rotary_encoder_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init rotary_encoder_init(void)
-{
- return platform_driver_register(&rotary_encoder_driver);
-}
-
-static void __exit rotary_encoder_exit(void)
-{
- platform_driver_unregister(&rotary_encoder_driver);
-}
-
-module_init(rotary_encoder_init);
-module_exit(rotary_encoder_exit);
+module_platform_driver(rotary_encoder_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DESCRIPTION("GPIO rotary encoder driver");
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 1a80c0dab83b..5d9fd5571199 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -164,17 +164,6 @@ static struct platform_driver sgi_buttons_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init sgi_buttons_init(void)
-{
- return platform_driver_register(&sgi_buttons_driver);
-}
-
-static void __exit sgi_buttons_exit(void)
-{
- platform_driver_unregister(&sgi_buttons_driver);
-}
+module_platform_driver(sgi_buttons_driver);
MODULE_LICENSE("GPL");
-module_init(sgi_buttons_init);
-module_exit(sgi_buttons_exit);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 3c1a432c14dc..37651373a95b 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -278,21 +278,9 @@ static struct platform_driver twl4030_vibra_driver = {
#endif
},
};
-
-static int __init twl4030_vibra_init(void)
-{
- return platform_driver_register(&twl4030_vibra_driver);
-}
-module_init(twl4030_vibra_init);
-
-static void __exit twl4030_vibra_exit(void)
-{
- platform_driver_unregister(&twl4030_vibra_driver);
-}
-module_exit(twl4030_vibra_exit);
+module_platform_driver(twl4030_vibra_driver);
MODULE_ALIAS("platform:twl4030-vibra");
-
MODULE_DESCRIPTION("TWL4030 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ad153a417eed..45874fed523a 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -410,18 +410,7 @@ static struct platform_driver twl6040_vibra_driver = {
.pm = &twl6040_vibra_pm_ops,
},
};
-
-static int __init twl6040_vibra_init(void)
-{
- return platform_driver_register(&twl6040_vibra_driver);
-}
-module_init(twl6040_vibra_init);
-
-static void __exit twl6040_vibra_exit(void)
-{
- platform_driver_unregister(&twl6040_vibra_driver);
-}
-module_exit(twl6040_vibra_exit);
+module_platform_driver(twl6040_vibra_driver);
MODULE_ALIAS("platform:twl6040-vibra");
MODULE_DESCRIPTION("TWL6040 Vibra driver");
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 52b419348983..e2bdfd4bea70 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -48,7 +48,7 @@ MODULE_DESCRIPTION("Wistron laptop button driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.3");
-static int force; /* = 0; */
+static bool force; /* = 0; */
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Load even if computer is not in database");
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index c3d7ba5f5b47..47f18d6bce46 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -145,18 +145,7 @@ static struct platform_driver wm831x_on_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init wm831x_on_init(void)
-{
- return platform_driver_register(&wm831x_on_driver);
-}
-module_init(wm831x_on_init);
-
-static void __exit wm831x_on_exit(void)
-{
- platform_driver_unregister(&wm831x_on_driver);
-}
-module_exit(wm831x_on_exit);
+module_platform_driver(wm831x_on_driver);
MODULE_ALIAS("platform:wm831x-on");
MODULE_DESCRIPTION("WM831x ON pin");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index ad2e51c04db8..02ca8680ea5b 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -361,15 +361,12 @@ static const struct xenbus_device_id xenkbd_ids[] = {
{ "" }
};
-static struct xenbus_driver xenkbd_driver = {
- .name = "vkbd",
- .owner = THIS_MODULE,
- .ids = xenkbd_ids,
+static DEFINE_XENBUS_DRIVER(xenkbd, ,
.probe = xenkbd_probe,
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
-};
+);
static int __init xenkbd_init(void)
{
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 003587c71f43..bd87380bd879 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -17,13 +17,63 @@
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
#include "psmouse.h"
#include "alps.h"
-#define ALPS_OLDPROTO 0x01 /* old style input */
+/*
+ * Definitions for ALPS version 3 and 4 command mode protocol
+ */
+#define ALPS_V3_X_MAX 2000
+#define ALPS_V3_Y_MAX 1400
+
+#define ALPS_BITMAP_X_BITS 15
+#define ALPS_BITMAP_Y_BITS 11
+
+#define ALPS_CMD_NIBBLE_10 0x01f2
+
+static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
+ { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
+ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
+static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
+ { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
+ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
+
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -35,30 +85,33 @@
6-byte ALPS packet */
static const struct alps_model_info alps_model_data[] = {
- { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
- { { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */
- { { 0x53, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
- { { 0x60, 0x03, 0xc8 }, 0xf8, 0xf8, 0 }, /* HP ze1115 */
- { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
- { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
- { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
- { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
- { { 0x73, 0x00, 0x0a }, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
- { { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
- { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
- { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
- { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
+ { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
+ { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */
+ { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */
+ { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
+ { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
+ { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
+ { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
+ { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
+ { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
+ { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
+ { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
+ { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
+ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
- { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
- { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
+ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+ { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+ { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+ { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
};
/*
@@ -67,42 +120,7 @@ static const struct alps_model_info alps_model_data[] = {
* isn't valid per PS/2 spec.
*/
-/*
- * PS/2 packet format
- *
- * byte 0: 0 0 YSGN XSGN 1 M R L
- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- *
- * Note that the device never signals overflow condition.
- *
- * ALPS absolute Mode - new format
- *
- * byte 0: 1 ? ? ? 1 ? ? ?
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 ? fin ges
- * byte 3: 0 y9 y8 y7 1 M R L
- * byte 4: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 5: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * Dualpoint device -- interleaved packet format
- *
- * byte 0: 1 1 0 0 1 1 1 1
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 0 fin ges
- * byte 3: 0 0 YSGN XSGN 1 1 1 1
- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- * byte 6: 0 y9 y8 y7 1 m r l
- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * CAPITALS = stick, miniscules = touchpad
- *
- * ?'s can have different meanings on different models,
- * such as wheel rotation, extra buttons, stick buttons
- * on a dualpoint, etc.
- */
+/* Packet formats are described in Documentation/input/alps.txt */
static bool alps_is_valid_first_byte(const struct alps_model_info *model,
unsigned char data)
@@ -137,7 +155,7 @@ static void alps_report_buttons(struct psmouse *psmouse,
input_sync(dev2);
}
-static void alps_process_packet(struct psmouse *psmouse)
+static void alps_process_packet_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
const struct alps_model_info *model = priv->i;
@@ -147,7 +165,7 @@ static void alps_process_packet(struct psmouse *psmouse)
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if (model->flags & ALPS_OLDPROTO) {
+ if (model->proto_version == ALPS_PROTO_V1) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
middle = 0;
@@ -239,6 +257,403 @@ static void alps_process_packet(struct psmouse *psmouse)
input_sync(dev);
}
+/*
+ * Process bitmap data from v3 and v4 protocols. Returns the number of
+ * fingers detected. A return value of 0 means at least one of the
+ * bitmaps was empty.
+ *
+ * The bitmaps don't have enough data to track fingers, so this function
+ * only generates points representing a bounding box of all contacts.
+ * These points are returned in x1, y1, x2, and y2 when the return value
+ * is greater than 0.
+ */
+static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+ int *x1, int *y1, int *x2, int *y2)
+{
+ struct alps_bitmap_point {
+ int start_bit;
+ int num_bits;
+ };
+
+ int fingers_x = 0, fingers_y = 0, fingers;
+ int i, bit, prev_bit;
+ struct alps_bitmap_point x_low = {0,}, x_high = {0,};
+ struct alps_bitmap_point y_low = {0,}, y_high = {0,};
+ struct alps_bitmap_point *point;
+
+ if (!x_map || !y_map)
+ return 0;
+
+ *x1 = *y1 = *x2 = *y2 = 0;
+
+ prev_bit = 0;
+ point = &x_low;
+ for (i = 0; x_map != 0; i++, x_map >>= 1) {
+ bit = x_map & 1;
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ fingers_x++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = &x_high;
+ else
+ point->num_bits = 0;
+ }
+ prev_bit = bit;
+ }
+
+ /*
+ * y bitmap is reversed for what we need (lower positions are in
+ * higher bits), so we process from the top end.
+ */
+ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+ prev_bit = 0;
+ point = &y_low;
+ for (i = 0; y_map != 0; i++, y_map <<= 1) {
+ bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1));
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ fingers_y++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = &y_high;
+ else
+ point->num_bits = 0;
+ }
+ prev_bit = bit;
+ }
+
+ /*
+ * Fingers can overlap, so we use the maximum count of fingers
+ * on either axis as the finger count.
+ */
+ fingers = max(fingers_x, fingers_y);
+
+ /*
+ * If total fingers is > 1 but either axis reports only a single
+ * contact, we have overlapping or adjacent fingers. For the
+ * purposes of creating a bounding box, divide the single contact
+ * (roughly) equally between the two points.
+ */
+ if (fingers > 1) {
+ if (fingers_x == 1) {
+ i = x_low.num_bits / 2;
+ x_low.num_bits = x_low.num_bits - i;
+ x_high.start_bit = x_low.start_bit + i;
+ x_high.num_bits = max(i, 1);
+ } else if (fingers_y == 1) {
+ i = y_low.num_bits / 2;
+ y_low.num_bits = y_low.num_bits - i;
+ y_high.start_bit = y_low.start_bit + i;
+ y_high.num_bits = max(i, 1);
+ }
+ }
+
+ *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_X_BITS - 1));
+ *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_Y_BITS - 1));
+
+ if (fingers > 1) {
+ *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_X_BITS - 1));
+ *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_Y_BITS - 1));
+ }
+
+ return fingers;
+}
+
+static void alps_set_slot(struct input_dev *dev, int slot, bool active,
+ int x, int y)
+{
+ input_mt_slot(dev, slot);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+ if (active) {
+ input_report_abs(dev, ABS_MT_POSITION_X, x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, y);
+ }
+}
+
+static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers,
+ int x1, int y1, int x2, int y2)
+{
+ alps_set_slot(dev, 0, num_fingers != 0, x1, y1);
+ alps_set_slot(dev, 1, num_fingers == 2, x2, y2);
+}
+
+static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = priv->dev2;
+ int x, y, z, left, right, middle;
+
+ /* Sanity check packet */
+ if (!(packet[0] & 0x40)) {
+ psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n");
+ return;
+ }
+
+ /*
+ * There's a special packet that seems to indicate the end
+ * of a stream of trackstick data. Filter these out.
+ */
+ if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f)
+ return;
+
+ x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
+ y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
+ z = (packet[4] & 0x7c) >> 2;
+
+ /*
+ * The x and y values tend to be quite large, and when used
+ * alone the trackstick is difficult to use. Scale them down
+ * to compensate.
+ */
+ x /= 8;
+ y /= 8;
+
+ input_report_rel(dev, REL_X, x);
+ input_report_rel(dev, REL_Y, -y);
+
+ /*
+ * Most ALPS models report the trackstick buttons in the touchpad
+ * packets, but a few report them here. No reliable way has been
+ * found to differentiate between the models upfront, so we enable
+ * the quirk in response to seeing a button press in the trackstick
+ * packet.
+ */
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) &&
+ (left || right || middle))
+ priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS;
+
+ if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) {
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
+ }
+
+ input_sync(dev);
+ return;
+}
+
+static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
+ int x, y, z;
+ int left, right, middle;
+ int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+ int fingers = 0, bmap_fingers;
+ unsigned int x_bitmap, y_bitmap;
+
+ /*
+ * There's no single feature of touchpad position and bitmap packets
+ * that can be used to distinguish between them. We rely on the fact
+ * that a bitmap packet should always follow a position packet with
+ * bit 6 of packet[4] set.
+ */
+ if (priv->multi_packet) {
+ /*
+ * Sometimes a position packet will indicate a multi-packet
+ * sequence, but then what follows is another position
+ * packet. Check for this, and when it happens process the
+ * position packet as usual.
+ */
+ if (packet[0] & 0x40) {
+ fingers = (packet[5] & 0x3) + 1;
+ x_bitmap = ((packet[4] & 0x7e) << 8) |
+ ((packet[1] & 0x7f) << 2) |
+ ((packet[0] & 0x30) >> 4);
+ y_bitmap = ((packet[3] & 0x70) << 4) |
+ ((packet[2] & 0x7f) << 1) |
+ (packet[4] & 0x01);
+
+ bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ &x1, &y1, &x2, &y2);
+
+ /*
+ * We shouldn't report more than one finger if
+ * we don't have two coordinates.
+ */
+ if (fingers > 1 && bmap_fingers < 2)
+ fingers = bmap_fingers;
+
+ /* Now process position packet */
+ packet = priv->multi_data;
+ } else {
+ priv->multi_packet = 0;
+ }
+ }
+
+ /*
+ * Bit 6 of byte 0 is not usually set in position packets. The only
+ * times it seems to be set is in situations where the data is
+ * suspect anyway, e.g. a palm resting flat on the touchpad. Given
+ * this combined with the fact that this bit is useful for filtering
+ * out misidentified bitmap packets, we reject anything with this
+ * bit set.
+ */
+ if (packet[0] & 0x40)
+ return;
+
+ if (!priv->multi_packet && (packet[4] & 0x40)) {
+ priv->multi_packet = 1;
+ memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
+ return;
+ }
+
+ priv->multi_packet = 0;
+
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
+ z = packet[5] & 0x7f;
+
+ /*
+ * Sometimes the hardware sends a single packet with z = 0
+ * in the middle of a stream. Real releases generate packets
+ * with x, y, and z all zero, so these seem to be flukes.
+ * Ignore them.
+ */
+ if (x && y && !z)
+ return;
+
+ /*
+ * If we don't have MT data or the bitmaps were empty, we have
+ * to rely on ST data.
+ */
+ if (!fingers) {
+ x1 = x;
+ y1 = y;
+ fingers = z > 0 ? 1 : 0;
+ }
+
+ if (z >= 64)
+ input_report_key(dev, BTN_TOUCH, 1);
+ else
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+
+ input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+ input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+ input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+ input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
+
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+ input_report_abs(dev, ABS_PRESSURE, z);
+
+ input_sync(dev);
+
+ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
+ left = packet[3] & 0x10;
+ right = packet[3] & 0x20;
+ middle = packet[3] & 0x40;
+
+ input_report_key(dev2, BTN_LEFT, left);
+ input_report_key(dev2, BTN_RIGHT, right);
+ input_report_key(dev2, BTN_MIDDLE, middle);
+ input_sync(dev2);
+ }
+}
+
+static void alps_process_packet_v3(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ /*
+ * v3 protocol packets come in three types, two representing
+ * touchpad data and one representing trackstick data.
+ * Trackstick packets seem to be distinguished by always
+ * having 0x3f in the last byte. This value has never been
+ * observed in the last byte of either of the other types
+ * of packets.
+ */
+ if (packet[5] == 0x3f) {
+ alps_process_trackstick_packet_v3(psmouse);
+ return;
+ }
+
+ alps_process_touchpad_packet_v3(psmouse);
+}
+
+static void alps_process_packet_v4(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ int x, y, z;
+ int left, right;
+
+ left = packet[4] & 0x01;
+ right = packet[4] & 0x02;
+
+ x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
+ z = packet[5] & 0x7f;
+
+ if (z >= 64)
+ input_report_key(dev, BTN_TOUCH, 1);
+ else
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+ input_report_abs(dev, ABS_PRESSURE, z);
+
+ input_report_key(dev, BTN_TOOL_FINGER, z > 0);
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+
+ input_sync(dev);
+}
+
+static void alps_process_packet(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ alps_process_packet_v1_v2(psmouse);
+ break;
+ case ALPS_PROTO_V3:
+ alps_process_packet_v3(psmouse);
+ break;
+ case ALPS_PROTO_V4:
+ alps_process_packet_v4(psmouse);
+ break;
+ }
+}
+
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
@@ -344,7 +759,7 @@ static void alps_flush_packet(unsigned long data)
serio_pause_rx(psmouse->ps2dev.serio);
- if (psmouse->pktcnt == 6) {
+ if (psmouse->pktcnt == psmouse->pktsize) {
/*
* We did not any more data in reasonable amount of time.
@@ -395,8 +810,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- /* Bytes 2 - 6 should have 0 in the highest bit */
- if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
+ /* Bytes 2 - pktsize should have 0 in the highest bit */
+ if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
psmouse->pktcnt - 1,
@@ -404,7 +819,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- if (psmouse->pktcnt == 6) {
+ if (psmouse->pktcnt == psmouse->pktsize) {
alps_process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
}
@@ -412,11 +827,127 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_GOOD_DATA;
}
+static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct alps_data *priv = psmouse->private;
+ int command;
+ unsigned char *param;
+ unsigned char dummy[4];
+
+ BUG_ON(nibble > 0xf);
+
+ command = priv->nibble_commands[nibble].command;
+ param = (command & 0x0f00) ?
+ dummy : (unsigned char *)&priv->nibble_commands[nibble].data;
+
+ if (ps2_command(ps2dev, param, command))
+ return -1;
+
+ return 0;
+}
+
+static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct alps_data *priv = psmouse->private;
+ int i, nibble;
+
+ if (ps2_command(ps2dev, NULL, priv->addr_command))
+ return -1;
+
+ for (i = 12; i >= 0; i -= 4) {
+ nibble = (addr >> i) & 0xf;
+ if (alps_command_mode_send_nibble(psmouse, nibble))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4];
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ /*
+ * The address being read is returned in the first two bytes
+ * of the result. Check that this address matches the expected
+ * address.
+ */
+ if (addr != ((param[0] << 8) | param[1]))
+ return -1;
+
+ return param[2];
+}
+
+static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+ if (alps_command_mode_set_addr(psmouse, addr))
+ return -1;
+ return __alps_command_mode_read_reg(psmouse, addr);
+}
+
+static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value)
+{
+ if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf))
+ return -1;
+ if (alps_command_mode_send_nibble(psmouse, value & 0xf))
+ return -1;
+ return 0;
+}
+
+static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
+ u8 value)
+{
+ if (alps_command_mode_set_addr(psmouse, addr))
+ return -1;
+ return __alps_command_mode_write_reg(psmouse, value);
+}
+
+static int alps_enter_command_mode(struct psmouse *psmouse,
+ unsigned char *resp)
+{
+ unsigned char param[4];
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_err(psmouse, "failed to enter command mode\n");
+ return -1;
+ }
+
+ if (param[0] != 0x88 && param[1] != 0x07) {
+ psmouse_dbg(psmouse,
+ "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
+ return -1;
+ }
+
+ if (resp)
+ *resp = param[2];
+ return 0;
+}
+
+static inline int alps_exit_command_mode(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM))
+ return -1;
+ return 0;
+}
+
static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
unsigned char param[4];
+ const struct alps_model_info *model = NULL;
int i;
/*
@@ -464,12 +995,41 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
*version = (param[0] << 8) | (param[1] << 4) | i;
}
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++)
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
if (!memcmp(param, alps_model_data[i].signature,
- sizeof(alps_model_data[i].signature)))
- return alps_model_data + i;
+ sizeof(alps_model_data[i].signature))) {
+ model = alps_model_data + i;
+ break;
+ }
+ }
- return NULL;
+ if (model && model->proto_version > ALPS_PROTO_V2) {
+ /*
+ * Need to check command mode response to identify
+ * model
+ */
+ model = NULL;
+ if (alps_enter_command_mode(psmouse, param)) {
+ psmouse_warn(psmouse,
+ "touchpad failed to enter command mode\n");
+ } else {
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+ if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
+ alps_model_data[i].command_mode_resp == param[0]) {
+ model = alps_model_data + i;
+ break;
+ }
+ }
+ alps_exit_command_mode(psmouse);
+
+ if (!model)
+ psmouse_dbg(psmouse,
+ "Unknown command mode response %2.2x\n",
+ param[0]);
+ }
+ }
+
+ return model;
}
/*
@@ -477,7 +1037,7 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
* subsequent commands. It looks like glidepad is behind stickpointer,
* I'd thought it would be other way around...
*/
-static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
@@ -494,7 +1054,7 @@ static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
return 0;
}
-static int alps_absolute_mode(struct psmouse *psmouse)
+static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
@@ -565,17 +1125,17 @@ static int alps_tap_mode(struct psmouse *psmouse, int enable)
static int alps_poll(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- unsigned char buf[6];
+ unsigned char buf[sizeof(psmouse->packet)];
bool poll_failed;
if (priv->i->flags & ALPS_PASS)
- alps_passthrough_mode(psmouse, true);
+ alps_passthrough_mode_v2(psmouse, true);
poll_failed = ps2_command(&psmouse->ps2dev, buf,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
if (priv->i->flags & ALPS_PASS)
- alps_passthrough_mode(psmouse, false);
+ alps_passthrough_mode_v2(psmouse, false);
if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
return -1;
@@ -592,13 +1152,13 @@ static int alps_poll(struct psmouse *psmouse)
return 0;
}
-static int alps_hw_init(struct psmouse *psmouse)
+static int alps_hw_init_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
const struct alps_model_info *model = priv->i;
if ((model->flags & ALPS_PASS) &&
- alps_passthrough_mode(psmouse, true)) {
+ alps_passthrough_mode_v2(psmouse, true)) {
return -1;
}
@@ -607,13 +1167,13 @@ static int alps_hw_init(struct psmouse *psmouse)
return -1;
}
- if (alps_absolute_mode(psmouse)) {
+ if (alps_absolute_mode_v1_v2(psmouse)) {
psmouse_err(psmouse, "Failed to enable absolute mode\n");
return -1;
}
if ((model->flags & ALPS_PASS) &&
- alps_passthrough_mode(psmouse, false)) {
+ alps_passthrough_mode_v2(psmouse, false)) {
return -1;
}
@@ -626,6 +1186,297 @@ static int alps_hw_init(struct psmouse *psmouse)
return 0;
}
+/*
+ * Enable or disable passthrough mode to the trackstick. Must be in
+ * command mode when calling this function.
+ */
+static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ if (reg_val == -1)
+ return -1;
+
+ if (enable)
+ reg_val |= 0x01;
+ else
+ reg_val &= ~0x01;
+
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v3(struct psmouse *psmouse)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+ if (reg_val == -1)
+ return -1;
+
+ reg_val |= 0x06;
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val;
+ unsigned char param[4];
+
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error;
+
+ /* Check for trackstick */
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ if (reg_val == -1)
+ goto error;
+ if (reg_val & 0x80) {
+ if (alps_passthrough_mode_v3(psmouse, true))
+ goto error;
+ if (alps_exit_command_mode(psmouse))
+ goto error;
+
+ /*
+ * E7 report for the trackstick
+ *
+ * There have been reports of failures to seem to trace back
+ * to the above trackstick check failing. When these occur
+ * this E7 report fails, so when that happens we continue
+ * with the assumption that there isn't a trackstick after
+ * all.
+ */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_warn(psmouse, "trackstick E7 report failed\n");
+ } else {
+ psmouse_dbg(psmouse,
+ "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
+
+ /*
+ * Not sure what this does, but it is absolutely
+ * essential. Without it, the touchpad does not
+ * work at all and the trackstick just emits normal
+ * PS/2 packets.
+ */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ alps_command_mode_send_nibble(psmouse, 0x9) ||
+ alps_command_mode_send_nibble(psmouse, 0x4)) {
+ psmouse_err(psmouse,
+ "Error sending magic E6 sequence\n");
+ goto error_passthrough;
+ }
+ }
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error_passthrough;
+ if (alps_passthrough_mode_v3(psmouse, false))
+ goto error;
+ }
+
+ if (alps_absolute_mode_v3(psmouse)) {
+ psmouse_err(psmouse, "Failed to enter absolute mode\n");
+ goto error;
+ }
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0006);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+ goto error;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0007);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0144) == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, 0x04))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0159) == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, 0x03))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0163) == -1)
+ goto error;
+ if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0162) == -1)
+ goto error;
+ if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
+ goto error;
+
+ /*
+ * This ensures the trackstick packets are in the format
+ * supported by this driver. If bit 1 isn't set the packet
+ * format is different.
+ */
+ if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+
+ /* Set rate and enable data reporting */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+ psmouse_err(psmouse, "Failed to enable data reporting\n");
+ return -1;
+ }
+
+ return 0;
+
+error_passthrough:
+ /* Something failed while in passthrough mode, so try to get out */
+ if (!alps_enter_command_mode(psmouse, NULL))
+ alps_passthrough_mode_v3(psmouse, false);
+error:
+ /*
+ * Leaving the touchpad in command mode will essentially render
+ * it unusable until the machine reboots, so exit it here just
+ * to be safe
+ */
+ alps_exit_command_mode(psmouse);
+ return -1;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v4(struct psmouse *psmouse)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+ if (reg_val == -1)
+ return -1;
+
+ reg_val |= 0x02;
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+static int alps_hw_init_v4(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4];
+
+ priv->nibble_commands = alps_v4_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_DISABLE;
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error;
+
+ if (alps_absolute_mode_v4(psmouse)) {
+ psmouse_err(psmouse, "Failed to enter absolute mode\n");
+ goto error;
+ }
+
+ if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+
+ /*
+ * This sequence changes the output from a 9-byte to an
+ * 8-byte format. All the same data seems to be present,
+ * just in a more compact format.
+ */
+ param[0] = 0xc8;
+ param[1] = 0x64;
+ param[2] = 0x50;
+ if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
+ return -1;
+
+ /* Set rate and enable data reporting */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+ psmouse_err(psmouse, "Failed to enable data reporting\n");
+ return -1;
+ }
+
+ return 0;
+
+error:
+ /*
+ * Leaving the touchpad in command mode will essentially render
+ * it unusable until the machine reboots, so exit it here just
+ * to be safe
+ */
+ alps_exit_command_mode(psmouse);
+ return -1;
+}
+
+static int alps_hw_init(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+ int ret = -1;
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ ret = alps_hw_init_v1_v2(psmouse);
+ break;
+ case ALPS_PROTO_V3:
+ ret = alps_hw_init_v3(psmouse);
+ break;
+ case ALPS_PROTO_V4:
+ ret = alps_hw_init_v4(psmouse);
+ break;
+ }
+
+ return ret;
+}
+
static int alps_reconnect(struct psmouse *psmouse)
{
const struct alps_model_info *model;
@@ -666,6 +1517,8 @@ int alps_init(struct psmouse *psmouse)
psmouse->private = priv;
+ psmouse_reset(psmouse);
+
model = alps_get_model(psmouse, &version);
if (!model)
goto init_fail;
@@ -693,8 +1546,29 @@ int alps_init(struct psmouse *psmouse)
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+ break;
+ case ALPS_PROTO_V3:
+ set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+ input_mt_init_slots(dev1, 2);
+ input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+
+ set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+ /* fall through */
+ case ALPS_PROTO_V4:
+ input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+ break;
+ }
+
input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
if (model->flags & ALPS_WHEEL) {
@@ -737,7 +1611,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->poll = alps_poll;
psmouse->disconnect = alps_disconnect;
psmouse->reconnect = alps_reconnect;
- psmouse->pktsize = 6;
+ psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
/* We are having trouble resyncing ALPS touchpads so disable it for now */
psmouse->resync_time = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 904ed8b3c8be..a00a4ab92a0f 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,20 +12,39 @@
#ifndef _ALPS_H
#define _ALPS_H
+#define ALPS_PROTO_V1 0
+#define ALPS_PROTO_V2 1
+#define ALPS_PROTO_V3 2
+#define ALPS_PROTO_V4 3
+
struct alps_model_info {
unsigned char signature[3];
+ unsigned char command_mode_resp; /* v3/v4 only */
+ unsigned char proto_version;
unsigned char byte0, mask0;
unsigned char flags;
};
+struct alps_nibble_commands {
+ int command;
+ unsigned char data;
+};
+
struct alps_data {
struct input_dev *dev2; /* Relative device */
char phys[32]; /* Phys */
const struct alps_model_info *i;/* Info */
+ const struct alps_nibble_commands *nibble_commands;
+ int addr_command; /* Command to set register address */
int prev_fin; /* Finger bit from previous packet */
+ int multi_packet; /* Multi-packet data in progress */
+ unsigned char multi_data[6]; /* Saved multi-packet data */
+ u8 quirks;
struct timer_list timer;
};
+#define ALPS_QUIRK_TRACKSTICK_BUTTONS 1 /* trakcstick buttons in trackstick packet */
+
#ifdef CONFIG_MOUSE_PS2_ALPS
int alps_detect(struct psmouse *psmouse, bool set_properties);
int alps_init(struct psmouse *psmouse);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index cf87f8b18e34..927e479c2649 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -433,6 +433,9 @@ static void setup_events_to_report(struct input_dev *input_dev,
__set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
__set_bit(BTN_LEFT, input_dev->keybit);
+ if (cfg->caps & HAS_INTEGRATED_BUTTON)
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
input_set_events_per_packet(input_dev, 60);
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e2a9867c19d5..d2c0db159b18 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -43,6 +43,24 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
}
/*
+ * V3 and later support this fast command
+ */
+static int elantech_send_cmd(struct psmouse *psmouse, unsigned char c,
+ unsigned char *param)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ ps2_command(ps2dev, NULL, c) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
* A retrying version of ps2_command
*/
static int elantech_ps2_command(struct psmouse *psmouse,
@@ -863,13 +881,13 @@ static int elantech_set_range(struct psmouse *psmouse,
i = (etd->fw_version > 0x020800 &&
etd->fw_version < 0x020900) ? 1 : 2;
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
fixed_dpi = param[1] & 0x10;
if (((etd->fw_version >> 16) == 0x14) && fixed_dpi) {
- if (synaptics_send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
return -1;
*x_max = (etd->capabilities[1] - i) * param[1] / 2;
@@ -888,7 +906,7 @@ static int elantech_set_range(struct psmouse *psmouse,
break;
case 3:
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
*x_max = (0x0f & param[0]) << 8 | param[1];
@@ -896,7 +914,7 @@ static int elantech_set_range(struct psmouse *psmouse,
break;
case 4:
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
*x_max = (0x0f & param[0]) << 8 | param[1];
@@ -913,6 +931,30 @@ static int elantech_set_range(struct psmouse *psmouse,
}
/*
+ * (value from firmware) * 10 + 790 = dpi
+ * we also have to convert dpi to dots/mm (*10/254 to avoid floating point)
+ */
+static unsigned int elantech_convert_res(unsigned int val)
+{
+ return (val * 10 + 790) * 10 / 254;
+}
+
+static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ unsigned int *x_res,
+ unsigned int *y_res)
+{
+ unsigned char param[3];
+
+ if (elantech_send_cmd(psmouse, ETP_RESOLUTION_QUERY, param))
+ return -1;
+
+ *x_res = elantech_convert_res(param[1] & 0x0f);
+ *y_res = elantech_convert_res((param[1] & 0xf0) >> 4);
+
+ return 0;
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -920,6 +962,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
+ unsigned int x_res = 0, y_res = 0;
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
@@ -967,10 +1010,20 @@ static int elantech_set_input_params(struct psmouse *psmouse)
break;
case 4:
+ if (elantech_get_resolution_v4(psmouse, &x_res, &y_res)) {
+ /*
+ * if query failed, print a warning and leave the values
+ * zero to resemble synaptics.c behavior.
+ */
+ psmouse_warn(psmouse, "couldn't query resolution data.\n");
+ }
+
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
+ input_abs_set_res(dev, ABS_X, x_res);
+ input_abs_set_res(dev, ABS_Y, y_res);
/*
* range of pressure and width is the same as v2,
* report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -983,6 +1036,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_mt_init_slots(dev, ETP_MAX_FINGERS);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
/*
@@ -1031,16 +1086,13 @@ static ssize_t elantech_set_int_attr(struct psmouse *psmouse,
struct elantech_data *etd = psmouse->private;
struct elantech_attr_data *attr = data;
unsigned char *reg = (unsigned char *) etd + attr->field_offset;
- unsigned long value;
+ unsigned char value;
int err;
- err = strict_strtoul(buf, 16, &value);
+ err = kstrtou8(buf, 16, &value);
if (err)
return err;
- if (value > 0xff)
- return -EINVAL;
-
/* Do we need to preserve some bits for version 2 hardware too? */
if (etd->hw_version == 1) {
if (attr->reg == 0x10)
@@ -1233,9 +1285,11 @@ static int elantech_set_properties(struct elantech_data *etd)
}
}
- /*
- * Turn on packet checking by default.
- */
+ /* decide which send_cmd we're gonna use early */
+ etd->send_cmd = etd->hw_version >= 3 ? elantech_send_cmd :
+ synaptics_send_cmd;
+
+ /* Turn on packet checking by default */
etd->paritycheck = 1;
/*
@@ -1291,7 +1345,7 @@ int elantech_init(struct psmouse *psmouse)
"assuming hardware version %d (with firmware version 0x%02x%02x%02x)\n",
etd->hw_version, param[0], param[1], param[2]);
- if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
+ if (etd->send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
etd->capabilities)) {
psmouse_err(psmouse, "failed to query capabilities.\n");
goto init_fail;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 9e5f1aabea7e..46db3be45ac9 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -20,6 +20,7 @@
#define ETP_FW_VERSION_QUERY 0x01
#define ETP_CAPABILITIES_QUERY 0x02
#define ETP_SAMPLE_QUERY 0x03
+#define ETP_RESOLUTION_QUERY 0x04
/*
* Command values for register reading or writing
@@ -135,6 +136,7 @@ struct elantech_data {
unsigned int width;
struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
};
#ifdef CONFIG_MOUSE_PS2_ELANTECH
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 58902fbb9896..a9ad8e1402be 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -178,18 +178,7 @@ static struct platform_driver gpio_mouse_device_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init gpio_mouse_init(void)
-{
- return platform_driver_register(&gpio_mouse_device_driver);
-}
-module_init(gpio_mouse_init);
-
-static void __exit gpio_mouse_exit(void)
-{
- platform_driver_unregister(&gpio_mouse_device_driver);
-}
-module_exit(gpio_mouse_exit);
+module_platform_driver(gpio_mouse_device_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("GPIO mouse driver");
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 0470dd46b566..1c5d521de600 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -789,11 +789,14 @@ static ssize_t hgpk_set_powered(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct hgpk_data *priv = psmouse->private;
- unsigned long value;
+ unsigned int value;
int err;
- err = strict_strtoul(buf, 10, &value);
- if (err || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (value != priv->powered) {
@@ -881,11 +884,14 @@ static ssize_t hgpk_trigger_recal(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct hgpk_data *priv = psmouse->private;
- unsigned long value;
+ unsigned int value;
int err;
- err = strict_strtoul(buf, 10, &value);
- if (err || value != 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value != 1)
return -EINVAL;
/*
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index faac2c3bef74..84de2fc6acc1 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -155,9 +155,14 @@ static ssize_t ps2pp_attr_show_smartscroll(struct psmouse *psmouse,
static ssize_t ps2pp_attr_set_smartscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
ps2pp_set_smartscroll(psmouse, value);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9f352fbd7b4f..e6c9931f02c7 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -60,7 +60,7 @@ static unsigned int psmouse_rate = 100;
module_param_named(rate, psmouse_rate, uint, 0644);
MODULE_PARM_DESC(rate, "Report rate, in reports per second.");
-static unsigned int psmouse_smartscroll = 1;
+static bool psmouse_smartscroll = 1;
module_param_named(smartscroll, psmouse_smartscroll, bool, 0644);
MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled.");
@@ -127,7 +127,7 @@ struct psmouse_protocol {
* relevant events to the input module once full packet has arrived.
*/
-static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
unsigned char *packet = psmouse->packet;
@@ -418,6 +418,49 @@ int psmouse_reset(struct psmouse *psmouse)
return 0;
}
+/*
+ * Here we set the mouse resolution.
+ */
+
+void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+ static const unsigned char params[] = { 0, 1, 2, 2, 3 };
+ unsigned char p;
+
+ if (resolution == 0 || resolution > 200)
+ resolution = 200;
+
+ p = params[resolution / 50];
+ ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
+ psmouse->resolution = 25 << p;
+}
+
+/*
+ * Here we set the mouse report rate.
+ */
+
+static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+ static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
+ unsigned char r;
+ int i = 0;
+
+ while (rates[i] > rate) i++;
+ r = rates[i];
+ ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
+ psmouse->rate = r;
+}
+
+/*
+ * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+ return ps2_command(&psmouse->ps2dev, psmouse->packet,
+ PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
/*
* Genius NetMouse magic init.
@@ -603,6 +646,56 @@ static int cortron_detect(struct psmouse *psmouse, bool set_properties)
}
/*
+ * Apply default settings to the psmouse structure. Most of them will
+ * be overridden by individual protocol initialization routines.
+ */
+
+static void psmouse_apply_defaults(struct psmouse *psmouse)
+{
+ struct input_dev *input_dev = psmouse->dev;
+
+ memset(input_dev->evbit, 0, sizeof(input_dev->evbit));
+ memset(input_dev->keybit, 0, sizeof(input_dev->keybit));
+ memset(input_dev->relbit, 0, sizeof(input_dev->relbit));
+ memset(input_dev->absbit, 0, sizeof(input_dev->absbit));
+ memset(input_dev->mscbit, 0, sizeof(input_dev->mscbit));
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_REL, input_dev->evbit);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+
+ __set_bit(REL_X, input_dev->relbit);
+ __set_bit(REL_Y, input_dev->relbit);
+
+ psmouse->set_rate = psmouse_set_rate;
+ psmouse->set_resolution = psmouse_set_resolution;
+ psmouse->poll = psmouse_poll;
+ psmouse->protocol_handler = psmouse_process_byte;
+ psmouse->pktsize = 3;
+ psmouse->reconnect = NULL;
+ psmouse->disconnect = NULL;
+ psmouse->cleanup = NULL;
+ psmouse->pt_activate = NULL;
+ psmouse->pt_deactivate = NULL;
+}
+
+/*
+ * Apply default settings to the psmouse structure and call specified
+ * protocol detection or initialization routine.
+ */
+static int psmouse_do_detect(int (*detect)(struct psmouse *psmouse,
+ bool set_properties),
+ struct psmouse *psmouse, bool set_properties)
+{
+ if (set_properties)
+ psmouse_apply_defaults(psmouse);
+
+ return detect(psmouse, set_properties);
+}
+
+/*
* psmouse_extensions() probes for any extensions to the basic PS/2 protocol
* the mouse may have.
*/
@@ -616,7 +709,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
*/
- if (lifebook_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(lifebook_detect, psmouse, set_properties) == 0) {
if (max_proto > PSMOUSE_IMEX) {
if (!set_properties || lifebook_init(psmouse) == 0)
return PSMOUSE_LIFEBOOK;
@@ -628,15 +721,18 @@ static int psmouse_extensions(struct psmouse *psmouse,
* upsets the thinkingmouse).
*/
- if (max_proto > PSMOUSE_IMEX && thinking_detect(psmouse, set_properties) == 0)
+ if (max_proto > PSMOUSE_IMEX &&
+ psmouse_do_detect(thinking_detect, psmouse, set_properties) == 0) {
return PSMOUSE_THINKPS;
+ }
/*
* Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol
* support is disabled in config - we need to know if it is synaptics so we
* can reset it properly after probing for intellimouse.
*/
- if (max_proto > PSMOUSE_PS2 && synaptics_detect(psmouse, set_properties) == 0) {
+ if (max_proto > PSMOUSE_PS2 &&
+ psmouse_do_detect(synaptics_detect, psmouse, set_properties) == 0) {
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
@@ -667,7 +763,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
*/
if (max_proto > PSMOUSE_IMEX) {
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
- if (alps_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(alps_detect,
+ psmouse, set_properties) == 0) {
if (!set_properties || alps_init(psmouse) == 0)
return PSMOUSE_ALPS;
/*
@@ -681,7 +778,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Try OLPC HGPK touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
- hgpk_detect(psmouse, set_properties) == 0) {
+ psmouse_do_detect(hgpk_detect, psmouse, set_properties) == 0) {
if (!set_properties || hgpk_init(psmouse) == 0)
return PSMOUSE_HGPK;
/*
@@ -694,7 +791,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Try Elantech touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
- elantech_detect(psmouse, set_properties) == 0) {
+ psmouse_do_detect(elantech_detect, psmouse, set_properties) == 0) {
if (!set_properties || elantech_init(psmouse) == 0)
return PSMOUSE_ELANTECH;
/*
@@ -703,18 +800,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
max_proto = PSMOUSE_IMEX;
}
-
if (max_proto > PSMOUSE_IMEX) {
- if (genius_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(genius_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_GENPS;
- if (ps2pp_init(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(ps2pp_init,
+ psmouse, set_properties) == 0)
return PSMOUSE_PS2PP;
- if (trackpoint_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(trackpoint_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_TRACKPOINT;
- if (touchkit_ps2_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(touchkit_ps2_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
@@ -723,7 +823,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Trackpoint devices (causing TP_READ_ID command to time out).
*/
if (max_proto > PSMOUSE_IMEX) {
- if (fsp_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(fsp_detect,
+ psmouse, set_properties) == 0) {
if (!set_properties || fsp_init(psmouse) == 0)
return PSMOUSE_FSP;
/*
@@ -741,17 +842,23 @@ static int psmouse_extensions(struct psmouse *psmouse,
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
psmouse_reset(psmouse);
- if (max_proto >= PSMOUSE_IMEX && im_explorer_detect(psmouse, set_properties) == 0)
+ if (max_proto >= PSMOUSE_IMEX &&
+ psmouse_do_detect(im_explorer_detect,
+ psmouse, set_properties) == 0) {
return PSMOUSE_IMEX;
+ }
- if (max_proto >= PSMOUSE_IMPS && intellimouse_detect(psmouse, set_properties) == 0)
+ if (max_proto >= PSMOUSE_IMPS &&
+ psmouse_do_detect(intellimouse_detect,
+ psmouse, set_properties) == 0) {
return PSMOUSE_IMPS;
+ }
/*
* Okay, all failed, we have a standard mouse here. The number of the buttons
* is still a question, though. We assume 3.
*/
- ps2bare_detect(psmouse, set_properties);
+ psmouse_do_detect(ps2bare_detect, psmouse, set_properties);
if (synaptics_hardware) {
/*
@@ -819,6 +926,13 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.detect = synaptics_detect,
.init = synaptics_init,
},
+ {
+ .type = PSMOUSE_SYNAPTICS_RELATIVE,
+ .name = "SynRelPS/2",
+ .alias = "synaptics-relative",
+ .detect = synaptics_detect,
+ .init = synaptics_init_relative,
+ },
#endif
#ifdef CONFIG_MOUSE_PS2_ALPS
{
@@ -958,39 +1072,6 @@ static int psmouse_probe(struct psmouse *psmouse)
}
/*
- * Here we set the mouse resolution.
- */
-
-void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
-{
- static const unsigned char params[] = { 0, 1, 2, 2, 3 };
- unsigned char p;
-
- if (resolution == 0 || resolution > 200)
- resolution = 200;
-
- p = params[resolution / 50];
- ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
- psmouse->resolution = 25 << p;
-}
-
-/*
- * Here we set the mouse report rate.
- */
-
-static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
-{
- static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
- unsigned char r;
- int i = 0;
-
- while (rates[i] > rate) i++;
- r = rates[i];
- ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
- psmouse->rate = r;
-}
-
-/*
* psmouse_initialize() initializes the mouse to a sane state.
*/
@@ -1035,16 +1116,6 @@ static void psmouse_deactivate(struct psmouse *psmouse)
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
}
-/*
- * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
- */
-
-static int psmouse_poll(struct psmouse *psmouse)
-{
- return ps2_command(&psmouse->ps2dev, psmouse->packet,
- PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
-}
-
/*
* psmouse_resync() attempts to re-validate current protocol.
@@ -1245,18 +1316,9 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] =
- BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
- input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-
- psmouse->set_rate = psmouse_set_rate;
- psmouse->set_resolution = psmouse_set_resolution;
- psmouse->poll = psmouse_poll;
- psmouse->protocol_handler = psmouse_process_byte;
- psmouse->pktsize = 3;
-
if (proto && (proto->detect || proto->init)) {
+ psmouse_apply_defaults(psmouse);
+
if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
@@ -1558,13 +1620,12 @@ static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char
static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
- unsigned long value;
-
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ unsigned int value;
+ int err;
- if ((unsigned int)value != value)
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
*field = value;
@@ -1671,10 +1732,12 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
psmouse->set_rate(psmouse, value);
return count;
@@ -1682,10 +1745,12 @@ static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const
static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
psmouse->set_resolution(psmouse, value);
return count;
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 9b84b0c4e371..6a417092d010 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -8,6 +8,7 @@
#define PSMOUSE_CMD_SETSTREAM 0x00ea
#define PSMOUSE_CMD_SETPOLL 0x00f0
#define PSMOUSE_CMD_POLL 0x00eb /* caller sets number of bytes to receive */
+#define PSMOUSE_CMD_RESET_WRAP 0x00ec
#define PSMOUSE_CMD_GETID 0x02f2
#define PSMOUSE_CMD_SETRATE 0x10f3
#define PSMOUSE_CMD_ENABLE 0x00f4
@@ -93,6 +94,7 @@ enum psmouse_type {
PSMOUSE_HGPK,
PSMOUSE_ELANTECH,
PSMOUSE_FSP,
+ PSMOUSE_SYNAPTICS_RELATIVE,
PSMOUSE_AUTO /* This one should always be last */
};
@@ -102,6 +104,7 @@ int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command);
int psmouse_reset(struct psmouse *psmouse);
void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state);
void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution);
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse);
struct psmouse_attribute {
struct device_attribute dattr;
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index ee3b0ca9d592..a9e4bfdf31f4 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -250,19 +250,7 @@ static struct platform_driver pxa930_trkball_driver = {
.probe = pxa930_trkball_probe,
.remove = __devexit_p(pxa930_trkball_remove),
};
-
-static int __init pxa930_trkball_init(void)
-{
- return platform_driver_register(&pxa930_trkball_driver);
-}
-
-static void __exit pxa930_trkball_exit(void)
-{
- platform_driver_unregister(&pxa930_trkball_driver);
-}
-
-module_init(pxa930_trkball_init);
-module_exit(pxa930_trkball_exit);
+module_platform_driver(pxa930_trkball_driver);
MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>");
MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver");
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index c5b12d2e955a..e36847de7617 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
/* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
};
int val;
- if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+ if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
return -EIO;
*btn = buttons[(val & 0x30) >> 4];
@@ -408,7 +408,7 @@ static int fsp_onpad_hscr(struct psmouse *psmouse, bool enable)
static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long reg, val;
+ int reg, val;
char *rest;
ssize_t retval;
@@ -416,7 +416,11 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
if (rest == buf || *rest != ' ' || reg > 0xff)
return -EINVAL;
- if (strict_strtoul(rest + 1, 16, &val) || val > 0xff)
+ retval = kstrtoint(rest + 1, 16, &val);
+ if (retval)
+ return retval;
+
+ if (val > 0xff)
return -EINVAL;
if (fsp_reg_write_enable(psmouse, true))
@@ -448,10 +452,13 @@ static ssize_t fsp_attr_set_getreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct fsp_data *pad = psmouse->private;
- unsigned long reg;
- int val;
+ int reg, val, err;
+
+ err = kstrtoint(buf, 16, &reg);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 16, &reg) || reg > 0xff)
+ if (reg > 0xff)
return -EINVAL;
if (fsp_reg_read(psmouse, reg, &val))
@@ -480,9 +487,13 @@ static ssize_t fsp_attr_show_pagereg(struct psmouse *psmouse,
static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ int val, err;
+
+ err = kstrtoint(buf, 16, &val);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 16, &val) || val > 0xff)
+ if (val > 0xff)
return -EINVAL;
if (fsp_page_reg_write(psmouse, val))
@@ -505,9 +516,14 @@ static ssize_t fsp_attr_show_vscroll(struct psmouse *psmouse,
static ssize_t fsp_attr_set_vscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ unsigned int val;
+ int err;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val > 1)
return -EINVAL;
fsp_onpad_vscr(psmouse, val);
@@ -529,9 +545,14 @@ static ssize_t fsp_attr_show_hscroll(struct psmouse *psmouse,
static ssize_t fsp_attr_set_hscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (val > 1)
return -EINVAL;
fsp_onpad_hscr(psmouse, val);
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index ed1395ac7b8b..2e4af24f8c15 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
/* Finger-sensing Pad control registers */
#define FSP_REG_SYSCTL1 0x10
#define FSP_BIT_EN_REG_CLK BIT(5)
+#define FSP_REG_TMOD_STATUS 0x20
#define FSP_REG_OPC_QDOWN 0x31
#define FSP_BIT_EN_OPC_TAG BIT(7)
#define FSP_REG_OPTZ_XLO 0x34
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b828e5dc..8081a0a5d602 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@@ -268,19 +269,49 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
return 0;
}
-static int synaptics_set_absolute_mode(struct psmouse *psmouse)
+static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
{
+ static unsigned char param = 0xc8;
struct synaptics_data *priv = psmouse->private;
- priv->mode = SYN_BIT_ABSOLUTE_MODE;
- if (SYN_ID_MAJOR(priv->identity) >= 4)
+ if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+ return 0;
+
+ if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
+ return -1;
+
+ if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ /* Advanced gesture mode also sends multi finger data */
+ priv->capabilities |= BIT(1);
+
+ return 0;
+}
+
+static int synaptics_set_mode(struct psmouse *psmouse)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ priv->mode = 0;
+ if (priv->absolute_mode)
+ priv->mode |= SYN_BIT_ABSOLUTE_MODE;
+ if (priv->disable_gesture)
priv->mode |= SYN_BIT_DISABLE_GESTURE;
+ if (psmouse->rate >= 80)
+ priv->mode |= SYN_BIT_HIGH_RATE;
if (SYN_CAP_EXTENDED(priv->capabilities))
priv->mode |= SYN_BIT_W_MODE;
if (synaptics_mode_cmd(psmouse, priv->mode))
return -1;
+ if (priv->absolute_mode &&
+ synaptics_set_advanced_gesture_mode(psmouse)) {
+ psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+ return -1;
+ }
+
return 0;
}
@@ -299,26 +330,6 @@ static void synaptics_set_rate(struct psmouse *psmouse, unsigned int rate)
synaptics_mode_cmd(psmouse, priv->mode);
}
-static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
-{
- static unsigned char param = 0xc8;
- struct synaptics_data *priv = psmouse->private;
-
- if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
- return 0;
-
- if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
- return -1;
- if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
- return -1;
-
- /* Advanced gesture mode also sends multi finger data */
- priv->capabilities |= BIT(1);
-
- return 0;
-}
-
/*****************************************************************************
* Synaptics pass-through PS/2 port support
****************************************************************************/
@@ -1142,8 +1153,24 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
{
int i;
+ /* Things that apply to both modes */
__set_bit(INPUT_PROP_POINTER, dev->propbit);
+ __set_bit(EV_KEY, dev->evbit);
+ __set_bit(BTN_LEFT, dev->keybit);
+ __set_bit(BTN_RIGHT, dev->keybit);
+
+ if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
+ __set_bit(BTN_MIDDLE, dev->keybit);
+ if (!priv->absolute_mode) {
+ /* Relative mode */
+ __set_bit(EV_REL, dev->evbit);
+ __set_bit(REL_X, dev->relbit);
+ __set_bit(REL_Y, dev->relbit);
+ return;
+ }
+
+ /* Absolute mode */
__set_bit(EV_ABS, dev->evbit);
set_abs_position_params(dev, priv, ABS_X, ABS_Y);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
@@ -1169,20 +1196,14 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
if (SYN_CAP_PALMDETECT(priv->capabilities))
input_set_abs_params(dev, ABS_TOOL_WIDTH, 0, 15, 0, 0);
- __set_bit(EV_KEY, dev->evbit);
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
- __set_bit(BTN_LEFT, dev->keybit);
- __set_bit(BTN_RIGHT, dev->keybit);
if (SYN_CAP_MULTIFINGER(priv->capabilities)) {
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
}
- if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
- __set_bit(BTN_MIDDLE, dev->keybit);
-
if (SYN_CAP_FOUR_BUTTON(priv->capabilities) ||
SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
__set_bit(BTN_FORWARD, dev->keybit);
@@ -1204,10 +1225,58 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
}
}
+static ssize_t synaptics_show_disable_gesture(struct psmouse *psmouse,
+ void *data, char *buf)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ return sprintf(buf, "%c\n", priv->disable_gesture ? '1' : '0');
+}
+
+static ssize_t synaptics_set_disable_gesture(struct psmouse *psmouse,
+ void *data, const char *buf,
+ size_t len)
+{
+ struct synaptics_data *priv = psmouse->private;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
+ return -EINVAL;
+
+ if (value == priv->disable_gesture)
+ return len;
+
+ priv->disable_gesture = value;
+ if (value)
+ priv->mode |= SYN_BIT_DISABLE_GESTURE;
+ else
+ priv->mode &= ~SYN_BIT_DISABLE_GESTURE;
+
+ if (synaptics_mode_cmd(psmouse, priv->mode))
+ return -EIO;
+
+ return len;
+}
+
+PSMOUSE_DEFINE_ATTR(disable_gesture, S_IWUSR | S_IRUGO, NULL,
+ synaptics_show_disable_gesture,
+ synaptics_set_disable_gesture);
+
static void synaptics_disconnect(struct psmouse *psmouse)
{
+ struct synaptics_data *priv = psmouse->private;
+
+ if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity))
+ device_remove_file(&psmouse->ps2dev.serio->dev,
+ &psmouse_attr_disable_gesture.dattr);
+
synaptics_reset(psmouse);
- kfree(psmouse->private);
+ kfree(priv);
psmouse->private = NULL;
}
@@ -1220,6 +1289,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
+ if (retry) {
+ /*
+ * On some boxes, right after resuming, the touchpad
+ * needs some time to finish initializing (I assume
+ * it needs time to calibrate) and start responding
+ * to Synaptics-specific queries, so let's wait a
+ * bit.
+ */
+ ssleep(1);
+ }
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
@@ -1234,17 +1313,11 @@ static int synaptics_reconnect(struct psmouse *psmouse)
return -1;
}
- if (synaptics_set_absolute_mode(psmouse)) {
+ if (synaptics_set_mode(psmouse)) {
psmouse_err(psmouse, "Unable to initialize device.\n");
return -1;
}
- if (synaptics_set_advanced_gesture_mode(psmouse)) {
- psmouse_err(psmouse,
- "Advanced gesture mode reconnect failed.\n");
- return -1;
- }
-
if (old_priv.identity != priv->identity ||
old_priv.model_id != priv->model_id ||
old_priv.capabilities != priv->capabilities ||
@@ -1321,20 +1394,18 @@ void __init synaptics_module_init(void)
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
}
-int synaptics_init(struct psmouse *psmouse)
+static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
{
struct synaptics_data *priv;
+ int err = -1;
/*
- * The OLPC XO has issues with Synaptics' absolute mode; similarly to
- * the HGPK, it quickly degrades and the hardware becomes jumpy and
- * overly sensitive. Not only that, but the constant packet spew
- * (even at a lowered 40pps rate) overloads the EC such that key
- * presses on the keyboard are missed. Given all of that, don't
- * even attempt to use Synaptics mode. Relative mode seems to work
- * just fine.
+ * The OLPC XO has issues with Synaptics' absolute mode; the constant
+ * packet spew overloads the EC such that key presses on the keyboard
+ * are missed. Given that, don't even attempt to use Absolute mode.
+ * Relative mode seems to work just fine.
*/
- if (broken_olpc_ec) {
+ if (absolute_mode && broken_olpc_ec) {
psmouse_info(psmouse,
"OLPC XO detected, not enabling Synaptics protocol.\n");
return -ENODEV;
@@ -1351,13 +1422,12 @@ int synaptics_init(struct psmouse *psmouse)
goto init_fail;
}
- if (synaptics_set_absolute_mode(psmouse)) {
- psmouse_err(psmouse, "Unable to initialize device.\n");
- goto init_fail;
- }
+ priv->absolute_mode = absolute_mode;
+ if (SYN_ID_DISGEST_SUPPORTED(priv->identity))
+ priv->disable_gesture = true;
- if (synaptics_set_advanced_gesture_mode(psmouse)) {
- psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+ if (synaptics_set_mode(psmouse)) {
+ psmouse_err(psmouse, "Unable to initialize device.\n");
goto init_fail;
}
@@ -1382,12 +1452,19 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->model = ((priv->model_id & 0x00ff0000) >> 8) |
(priv->model_id & 0x000000ff);
- psmouse->protocol_handler = synaptics_process_byte;
+ if (absolute_mode) {
+ psmouse->protocol_handler = synaptics_process_byte;
+ psmouse->pktsize = 6;
+ } else {
+ /* Relative mode follows standard PS/2 mouse protocol */
+ psmouse->protocol_handler = psmouse_process_byte;
+ psmouse->pktsize = 3;
+ }
+
psmouse->set_rate = synaptics_set_rate;
psmouse->disconnect = synaptics_disconnect;
psmouse->reconnect = synaptics_reconnect;
psmouse->cleanup = synaptics_reset;
- psmouse->pktsize = 6;
/* Synaptics can usually stay in sync without extra help */
psmouse->resync_time = 0;
@@ -1406,11 +1483,32 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->rate = 40;
}
+ if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity)) {
+ err = device_create_file(&psmouse->ps2dev.serio->dev,
+ &psmouse_attr_disable_gesture.dattr);
+ if (err) {
+ psmouse_err(psmouse,
+ "Failed to create disable_gesture attribute (%d)",
+ err);
+ goto init_fail;
+ }
+ }
+
return 0;
init_fail:
kfree(priv);
- return -1;
+ return err;
+}
+
+int synaptics_init(struct psmouse *psmouse)
+{
+ return __synaptics_init(psmouse, true);
+}
+
+int synaptics_init_relative(struct psmouse *psmouse)
+{
+ return __synaptics_init(psmouse, false);
}
bool synaptics_supported(void)
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 622aea8dd7e0..fd26ccca13d7 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -100,6 +100,7 @@
#define SYN_ID_MINOR(i) (((i) >> 16) & 0xff)
#define SYN_ID_FULL(i) ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i))
#define SYN_ID_IS_SYNAPTICS(i) ((((i) >> 8) & 0xff) == 0x47)
+#define SYN_ID_DISGEST_SUPPORTED(i) (SYN_ID_MAJOR(i) >= 4)
/* synaptics special commands */
#define SYN_PS_SET_MODE2 0x14
@@ -159,6 +160,9 @@ struct synaptics_data {
unsigned char mode; /* current mode byte */
int scroll;
+ bool absolute_mode; /* run in Absolute mode */
+ bool disable_gesture; /* disable gestures */
+
struct serio *pt_port; /* Pass-through serio port */
struct synaptics_mt_state mt_state; /* Current mt finger state */
@@ -175,6 +179,7 @@ struct synaptics_data {
void synaptics_module_init(void);
int synaptics_detect(struct psmouse *psmouse, bool set_properties);
int synaptics_init(struct psmouse *psmouse);
+int synaptics_init_relative(struct psmouse *psmouse);
void synaptics_reset(struct psmouse *psmouse);
bool synaptics_supported(void);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 4b755cb5b38c..1c58aafa523f 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -185,17 +185,17 @@
#define NO_DATA_SLEEP_MSECS (MSEC_PER_SEC / 4)
/* Control touchpad's No Deceleration option */
-static int no_decel = 1;
+static bool no_decel = 1;
module_param(no_decel, bool, 0644);
MODULE_PARM_DESC(no_decel, "No Deceleration. Default = 1 (on)");
/* Control touchpad's Reduced Reporting option */
-static int reduce_report;
+static bool reduce_report;
module_param(reduce_report, bool, 0644);
MODULE_PARM_DESC(reduce_report, "Reduced Reporting. Default = 0 (off)");
/* Control touchpad's No Filter option */
-static int no_filter;
+static bool no_filter;
module_param(no_filter, bool, 0644);
MODULE_PARM_DESC(no_filter, "No Filter. Default = 0 (off)");
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 54b2fa892e19..22b218018137 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -89,10 +89,12 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned long value;
+ unsigned char value;
+ int err;
- if (strict_strtoul(buf, 10, &value) || value > 255)
- return -EINVAL;
+ err = kstrtou8(buf, 10, &value);
+ if (err)
+ return err;
*field = value;
trackpoint_write(&psmouse->ps2dev, attr->command, value);
@@ -115,9 +117,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned long value;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ if (value > 1)
return -EINVAL;
if (attr->inverted)
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index d363dc4571a3..35864c6130bb 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -196,18 +196,7 @@ static struct platform_driver altera_ps2_driver = {
.of_match_table = altera_ps2_match,
},
};
-
-static int __init altera_ps2_init(void)
-{
- return platform_driver_register(&altera_ps2_driver);
-}
-module_init(altera_ps2_init);
-
-static void __exit altera_ps2_exit(void)
-{
- platform_driver_unregister(&altera_ps2_driver);
-}
-module_exit(altera_ps2_exit);
+module_platform_driver(altera_ps2_driver);
MODULE_DESCRIPTION("Altera University Program PS2 controller driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 12abc50508e5..8407d5b0ced8 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -195,6 +195,8 @@ static struct amba_id amba_kmi_idtable[] = {
{ 0, 0 }
};
+MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
+
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 979c443bf1ef..be3316073ae7 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL(__hp_sdc_enqueue_transaction);
EXPORT_SYMBOL(hp_sdc_enqueue_transaction);
EXPORT_SYMBOL(hp_sdc_dequeue_transaction);
-static unsigned int hp_sdc_disabled;
+static bool hp_sdc_disabled;
module_param_named(no_hpsdc, hp_sdc_disabled, bool, 0);
MODULE_PARM_DESC(no_hpsdc, "Do not enable HP SDC driver.");
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d37a48e099d0..86564414b75a 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -991,7 +991,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(void)
+static void i8042_controller_reset(bool force_reset)
{
i8042_flush();
@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void)
* Reset the controller if requested.
*/
- if (i8042_reset)
+ if (i8042_reset || force_reset)
i8042_controller_selftest();
/*
@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset)
* upsetting it.
*/
-static int i8042_pm_reset(struct device *dev)
+static int i8042_pm_suspend(struct device *dev)
{
- i8042_controller_reset();
+ i8042_controller_reset(true);
return 0;
}
@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev)
return 0;
}
+static int i8042_pm_reset(struct device *dev)
+{
+ i8042_controller_reset(false);
+
+ return 0;
+}
+
static int i8042_pm_restore(struct device *dev)
{
return i8042_controller_resume(false);
}
static const struct dev_pm_ops i8042_pm_ops = {
- .suspend = i8042_pm_reset,
+ .suspend = i8042_pm_suspend,
.resume = i8042_pm_resume,
.thaw = i8042_pm_thaw,
.poweroff = i8042_pm_reset,
@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = {
static void i8042_shutdown(struct platform_device *dev)
{
- i8042_controller_reset();
+ i8042_controller_reset(false);
}
static int __init i8042_create_kbd_port(void)
@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev)
out_fail:
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
- i8042_controller_reset();
+ i8042_controller_reset(false);
i8042_platform_device = NULL;
return error;
@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
- i8042_controller_reset();
+ i8042_controller_reset(false);
i8042_platform_device = NULL;
return 0;
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 7ec3c97dc1b9..8b44ddc8041c 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -143,16 +143,4 @@ static struct platform_driver rpckbd_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init rpckbd_init(void)
-{
- return platform_driver_register(&rpckbd_driver);
-}
-
-static void __exit rpckbd_exit(void)
-{
- platform_driver_unregister(&rpckbd_driver);
-}
-
-module_init(rpckbd_init);
-module_exit(rpckbd_exit);
+module_platform_driver(rpckbd_driver);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 4d4cd142bbbb..8250299fd64f 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -220,11 +220,11 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
goto out;
}
written++;
- };
+ }
out:
mutex_unlock(&serio_raw_mutex);
- return written;
+ return written ?: retval;
}
static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
@@ -237,9 +237,9 @@ static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
mask = serio_raw->dead ? POLLHUP | POLLERR : POLLOUT | POLLWRNORM;
if (serio_raw->head != serio_raw->tail)
- return POLLIN | POLLRDNORM;
+ mask |= POLLIN | POLLRDNORM;
- return 0;
+ return mask;
}
static const struct file_operations serio_raw_fops = {
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index d64c5a43aaad..d96d4c2a76a9 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -253,7 +253,7 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
}
/* Get IRQ for the device */
- if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
+ if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -369,19 +369,7 @@ static struct platform_driver xps2_of_driver = {
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
-
-static int __init xps2_init(void)
-{
- return platform_driver_register(&xps2_of_driver);
-}
-
-static void __exit xps2_cleanup(void)
-{
- platform_driver_unregister(&xps2_of_driver);
-}
-
-module_init(xps2_init);
-module_exit(xps2_cleanup);
+module_platform_driver(xps2_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx XPS PS/2 driver");
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index d5ef3debd045..205d16aab441 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1198,9 +1198,9 @@ static ssize_t
store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long x;
+ int x;
- if (strict_strtol(buf, 10, &x)) {
+ if (kstrtoint(buf, 10, &x)) {
size_t len = buf[count - 1] == '\n' ? count - 1 : count;
if (strncmp(buf, "disable", len))
@@ -1240,9 +1240,9 @@ static ssize_t
store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long y;
+ int y;
- if (strict_strtol(buf, 10, &y)) {
+ if (kstrtoint(buf, 10, &y)) {
size_t len = buf[count - 1] == '\n' ? count - 1 : count;
if (strncmp(buf, "disable", len))
@@ -1277,12 +1277,13 @@ static ssize_t
store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long j;
+ int err, j;
- if (strict_strtol(buf, 10, &j))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &j);
+ if (err)
+ return err;
- aiptek->newSetting.jitterDelay = (int)j;
+ aiptek->newSetting.jitterDelay = j;
return count;
}
@@ -1306,12 +1307,13 @@ static ssize_t
store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long d;
+ int err, d;
- if (strict_strtol(buf, 10, &d))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &d);
+ if (err)
+ return err;
- aiptek->newSetting.programmableDelay = (int)d;
+ aiptek->newSetting.programmableDelay = d;
return count;
}
@@ -1557,11 +1559,13 @@ static ssize_t
store_tabletWheel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long w;
+ int err, w;
- if (strict_strtol(buf, 10, &w)) return -EINVAL;
+ err = kstrtoint(buf, 10, &w);
+ if (err)
+ return err;
- aiptek->newSetting.wheel = (int)w;
+ aiptek->newSetting.wheel = w;
return count;
}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 8f9cde3e0ec2..2a97b7e76db1 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -28,7 +28,9 @@
#define HID_USAGE_Y_TILT 0x3e
#define HID_USAGE_FINGER 0x22
#define HID_USAGE_STYLUS 0x20
-#define HID_COLLECTION 0xc0
+#define HID_COLLECTION 0xa1
+#define HID_COLLECTION_LOGICAL 0x02
+#define HID_COLLECTION_END 0xc0
enum {
WCM_UNDEFINED = 0,
@@ -66,7 +68,8 @@ static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id,
do {
retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_REPORT,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE,
(type << 8) + id,
intf->altsetting[0].desc.bInterfaceNumber,
buf, size, 100);
@@ -164,7 +167,70 @@ static void wacom_close(struct input_dev *dev)
usb_autopm_put_interface(wacom->intf);
}
-static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
+static int wacom_parse_logical_collection(unsigned char *report,
+ struct wacom_features *features)
+{
+ int length = 0;
+
+ if (features->type == BAMBOO_PT) {
+
+ /* Logical collection is only used by 3rd gen Bamboo Touch */
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+ features->device_type = BTN_TOOL_DOUBLETAP;
+
+ /*
+ * Stylus and Touch have same active area
+ * so compute physical size based on stylus
+ * data before its overwritten.
+ */
+ features->x_phy =
+ (features->x_max * features->x_resolution) / 100;
+ features->y_phy =
+ (features->y_max * features->y_resolution) / 100;
+
+ features->x_max = features->y_max =
+ get_unaligned_le16(&report[10]);
+
+ length = 11;
+ }
+ return length;
+}
+
+/*
+ * Interface Descriptor of wacom devices can be incomplete and
+ * inconsistent so wacom_features table is used to store stylus
+ * device's packet lengths, various maximum values, and tablet
+ * resolution based on product ID's.
+ *
+ * For devices that contain 2 interfaces, wacom_features table is
+ * inaccurate for the touch interface. Since the Interface Descriptor
+ * for touch interfaces has pretty complete data, this function exists
+ * to query tablet for this missing information instead of hard coding in
+ * an additional table.
+ *
+ * A typical Interface Descriptor for a stylus will contain a
+ * boot mouse application collection that is not of interest and this
+ * function will ignore it.
+ *
+ * It also contains a digitizer application collection that also is not
+ * of interest since any information it contains would be duplicate
+ * of what is in wacom_features. Usually it defines a report of an array
+ * of bytes that could be used as max length of the stylus packet returned.
+ * If it happens to define a Digitizer-Stylus Physical Collection then
+ * the X and Y logical values contain valid data but it is ignored.
+ *
+ * A typical Interface Descriptor for a touch interface will contain a
+ * Digitizer-Finger Physical Collection which will define both logical
+ * X/Y maximum as well as the physical size of tablet. Since touch
+ * interfaces haven't supported pressure or distance, this is enough
+ * information to override invalid values in the wacom_features table.
+ *
+ * 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
+ * Collection. Instead they define a Logical Collection with a single
+ * Logical Maximum for both X and Y.
+ */
+static int wacom_parse_hid(struct usb_interface *intf,
+ struct hid_descriptor *hid_desc,
struct wacom_features *features)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -244,8 +310,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- if (features->type == BAMBOO_PT)
- features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->x_max =
get_unaligned_le16(&report[i + 3]);
@@ -287,8 +351,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- if (features->type == BAMBOO_PT)
- features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->y_max =
get_unaligned_le16(&report[i + 3]);
@@ -302,6 +364,11 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
i++;
break;
+ /*
+ * Requiring Stylus Usage will ignore boot mouse
+ * X/Y values and some cases of invalid Digitizer X/Y
+ * values commonly reported.
+ */
case HID_USAGE_STYLUS:
pen = 1;
i++;
@@ -309,10 +376,20 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
}
break;
- case HID_COLLECTION:
+ case HID_COLLECTION_END:
/* reset UsagePage and Finger */
finger = usage = 0;
break;
+
+ case HID_COLLECTION:
+ i++;
+ switch (report[i]) {
+ case HID_COLLECTION_LOGICAL:
+ i += wacom_parse_logical_collection(&report[i],
+ features);
+ break;
+ }
+ break;
}
}
@@ -348,7 +425,8 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
WAC_HID_FEATURE_REPORT,
report_id, rep_data, 4, 1);
} while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
- } else if (features->type != TABLETPC) {
+ } else if (features->type != TABLETPC &&
+ features->device_type == BTN_TOOL_PEN) {
do {
rep_data[0] = 2;
rep_data[1] = 2;
@@ -485,7 +563,8 @@ static int wacom_led_control(struct wacom *wacom)
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type == WACOM_21UX2)
+ if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
+ wacom->wacom_wac.features.type == WACOM_24HD)
led = (wacom->led.select[1] << 4) | 0x40;
led |= wacom->led.select[0] | 0x4;
@@ -704,6 +783,7 @@ static int wacom_initialize_leds(struct wacom *wacom)
&intuos4_led_attr_group);
break;
+ case WACOM_24HD:
case WACOM_21UX2:
wacom->led.select[0] = 0;
wacom->led.select[1] = 0;
@@ -738,6 +818,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
&intuos4_led_attr_group);
break;
+ case WACOM_24HD:
case WACOM_21UX2:
sysfs_remove_group(&wacom->intf->dev.kobj,
&cintiq_led_attr_group);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e778..88672ec296c1 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -452,7 +452,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- features->type == WACOM_21UX2) {
+ features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -519,6 +519,56 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type == WACOM_24HD) {
+ input_report_key(input, BTN_0, (data[6] & 0x01));
+ input_report_key(input, BTN_1, (data[6] & 0x02));
+ input_report_key(input, BTN_2, (data[6] & 0x04));
+ input_report_key(input, BTN_3, (data[6] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x10));
+ input_report_key(input, BTN_5, (data[6] & 0x20));
+ input_report_key(input, BTN_6, (data[6] & 0x40));
+ input_report_key(input, BTN_7, (data[6] & 0x80));
+ input_report_key(input, BTN_8, (data[8] & 0x01));
+ input_report_key(input, BTN_9, (data[8] & 0x02));
+ input_report_key(input, BTN_A, (data[8] & 0x04));
+ input_report_key(input, BTN_B, (data[8] & 0x08));
+ input_report_key(input, BTN_C, (data[8] & 0x10));
+ input_report_key(input, BTN_X, (data[8] & 0x20));
+ input_report_key(input, BTN_Y, (data[8] & 0x40));
+ input_report_key(input, BTN_Z, (data[8] & 0x80));
+
+ /*
+ * Three "buttons" are available on the 24HD which are
+ * physically implemented as a touchstrip. Each button
+ * is approximately 3 bits wide with a 2 bit spacing.
+ * The raw touchstrip bits are stored at:
+ * ((data[3] & 0x1f) << 8) | data[4])
+ */
+ input_report_key(input, KEY_PROG1, data[4] & 0x07);
+ input_report_key(input, KEY_PROG2, data[4] & 0xE0);
+ input_report_key(input, KEY_PROG3, data[3] & 0x1C);
+
+ if (data[1] & 0x80) {
+ input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
+ } else {
+ /* Out of proximity, clear wheel value. */
+ input_report_abs(input, ABS_WHEEL, 0);
+ }
+
+ if (data[2] & 0x80) {
+ input_report_abs(input, ABS_THROTTLE, (data[2] & 0x7f));
+ } else {
+ /* Out of proximity, clear second wheel value. */
+ input_report_abs(input, ABS_THROTTLE, 0);
+ }
+
+ if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else {
if (features->type == WACOM_21UX2) {
input_report_key(input, BTN_0, (data[5] & 0x01));
@@ -799,6 +849,9 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
unsigned char *data = wacom->data;
int i;
+ if (data[0] != 0x02)
+ return 0;
+
for (i = 0; i < 2; i++) {
int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
bool touch = data[offset + 3] & 0x80;
@@ -837,18 +890,77 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
return 0;
}
+static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+ struct input_dev *input = wacom->input;
+ int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */
+ bool touch = data[1] & 0x80;
+
+ touch = touch && !wacom->shared->stylus_in_proximity;
+
+ input_mt_slot(input, slot_id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+
+ if (touch) {
+ int x = (data[2] << 4) | (data[4] >> 4);
+ int y = (data[3] << 4) | (data[4] & 0x0f);
+ int w = data[6];
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, w);
+ }
+}
+
+static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+ struct input_dev *input = wacom->input;
+
+ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
+ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
+}
+
+static int wacom_bpt3_touch(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ unsigned char *data = wacom->data;
+ int count = data[1] & 0x03;
+ int i;
+
+ if (data[0] != 0x02)
+ return 0;
+
+ /* data has up to 7 fixed sized 8-byte messages starting at data[2] */
+ for (i = 0; i < count; i++) {
+ int offset = (8 * i) + 2;
+ int msg_id = data[offset];
+
+ if (msg_id >= 2 && msg_id <= 17)
+ wacom_bpt3_touch_msg(wacom, data + offset);
+ else if (msg_id == 128)
+ wacom_bpt3_button_msg(wacom, data + offset);
+
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+
+ input_sync(input);
+
+ return 0;
+}
+
static int wacom_bpt_pen(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
- /*
- * Similar to Graphire protocol, data[1] & 0x20 is proximity and
- * data[1] & 0x18 is tool ID. 0x30 is safety check to ignore
- * 2 unused tool ID's.
- */
- prox = (data[1] & 0x30) == 0x30;
+ if (data[0] != 0x02)
+ return 0;
+
+ prox = (data[1] & 0x20) == 0x20;
/*
* All reports shared between PEN and RUBBER tool must be
@@ -912,7 +1024,9 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
{
if (len == WACOM_PKGLEN_BBTOUCH)
return wacom_bpt_touch(wacom);
- else if (len == WACOM_PKGLEN_BBFUN)
+ else if (len == WACOM_PKGLEN_BBTOUCH3)
+ return wacom_bpt3_touch(wacom);
+ else if (len == WACOM_PKGLEN_BBFUN || len == WACOM_PKGLEN_BBPEN)
return wacom_bpt_pen(wacom);
return 0;
@@ -955,6 +1069,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case CINTIQ:
case WACOM_BEE:
case WACOM_21UX2:
+ case WACOM_24HD:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1031,9 +1146,9 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->type == BAMBOO_PT)
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
- /* quirks for bamboo touch */
+ /* quirk for bamboo touch with 2 low res touches */
if (features->type == BAMBOO_PT &&
- features->device_type == BTN_TOOL_DOUBLETAP) {
+ features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->x_max <<= 5;
features->y_max <<= 5;
features->x_fuzz <<= 5;
@@ -1110,6 +1225,26 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case WACOM_24HD:
+ __set_bit(BTN_A, input_dev->keybit);
+ __set_bit(BTN_B, input_dev->keybit);
+ __set_bit(BTN_C, input_dev->keybit);
+ __set_bit(BTN_X, input_dev->keybit);
+ __set_bit(BTN_Y, input_dev->keybit);
+ __set_bit(BTN_Z, input_dev->keybit);
+
+ for (i = 0; i < 10; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+ wacom_setup_cintiq(wacom_wac);
+ break;
+
case WACOM_21UX2:
__set_bit(BTN_A, input_dev->keybit);
__set_bit(BTN_B, input_dev->keybit);
@@ -1240,7 +1375,21 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- input_mt_init_slots(input_dev, 2);
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ __set_bit(BTN_TOOL_TRIPLETAP,
+ input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP,
+ input_dev->keybit);
+
+ input_mt_init_slots(input_dev, 16);
+
+ input_set_abs_params(input_dev,
+ ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+ } else {
+ input_mt_init_slots(input_dev, 2);
+ }
+
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, features->x_max,
features->x_fuzz, 0);
@@ -1425,6 +1574,9 @@ static const struct wacom_features wacom_features_0xBB =
static const struct wacom_features wacom_features_0xBC =
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xF4 =
+ { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -1470,6 +1622,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+ { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1506,6 +1661,15 @@ static const struct wacom_features wacom_features_0xDA =
static struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDD =
+ { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDE =
+ { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDF =
+ { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1601,6 +1765,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xD8) },
{ USB_DEVICE_WACOM(0xDA) },
{ USB_DEVICE_WACOM(0xDB) },
+ { USB_DEVICE_WACOM(0xDD) },
+ { USB_DEVICE_WACOM(0xDE) },
+ { USB_DEVICE_WACOM(0xDF) },
{ USB_DEVICE_WACOM(0xF0) },
{ USB_DEVICE_WACOM(0xCC) },
{ USB_DEVICE_WACOM(0x90) },
@@ -1611,7 +1778,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
+ { USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
+ { USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 53eb71b68330..050acaefee7d 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 32
+#define WACOM_PKGLEN_MAX 64
/* packet length for individual models */
#define WACOM_PKGLEN_PENPRTN 7
@@ -22,6 +22,8 @@
#define WACOM_PKGLEN_TPC1FG 5
#define WACOM_PKGLEN_TPC2FG 14
#define WACOM_PKGLEN_BBTOUCH 20
+#define WACOM_PKGLEN_BBTOUCH3 64
+#define WACOM_PKGLEN_BBPEN 10
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -57,6 +59,7 @@ enum {
INTUOS4S,
INTUOS4,
INTUOS4L,
+ WACOM_24HD,
WACOM_21UX2,
CINTIQ,
WACOM_BEE,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index b3aebc2166ba..05f30b73c3c3 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -217,18 +217,7 @@ static struct platform_driver pm860x_touch_driver = {
.probe = pm860x_touch_probe,
.remove = __devexit_p(pm860x_touch_remove),
};
-
-static int __init pm860x_touch_init(void)
-{
- return platform_driver_register(&pm860x_touch_driver);
-}
-module_init(pm860x_touch_init);
-
-static void __exit pm860x_touch_exit(void)
-{
- platform_driver_unregister(&pm860x_touch_driver);
-}
-module_exit(pm860x_touch_exit);
+module_platform_driver(pm860x_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Marvell Semiconductor 88PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 3488ffe1fa0a..4af2a18eb3ba 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -98,6 +98,19 @@ config TOUCHSCREEN_ATMEL_MXT
To compile this driver as a module, choose M here: the
module will be called atmel_mxt_ts.
+config TOUCHSCREEN_AUO_PIXCIR
+ tristate "AUO in-cell touchscreen using Pixcir ICs"
+ depends on I2C
+ depends on GPIOLIB
+ help
+ Say Y here if you have a AUO display with in-cell touchscreen
+ using Pixcir ICs.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called auo-pixcir-ts.
+
config TOUCHSCREEN_BITSY
tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
depends on SA1100_BITSY
@@ -177,6 +190,16 @@ config TOUCHSCREEN_EETI
To compile this driver as a module, choose M here: the
module will be called eeti_ts.
+config TOUCHSCREEN_EGALAX
+ tristate "EETI eGalax multi-touch panel support"
+ depends on I2C
+ help
+ Say Y here to enable support for I2C connected EETI
+ eGalax multi-touch panels.
+
+ To compile this driver as a module, choose M here: the
+ module will be called egalax_ts.
+
config TOUCHSCREEN_FUJITSU
tristate "Fujitsu serial touchscreen"
select SERIO
@@ -435,6 +458,18 @@ config TOUCHSCREEN_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_ts.
+config TOUCHSCREEN_PIXCIR
+ tristate "PIXCIR I2C touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a pixcir i2c touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pixcir_i2c_ts.
+
config TOUCHSCREEN_WM831X
tristate "Support for WM831x touchscreen controllers"
depends on MFD_WM831X
@@ -541,6 +576,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- GoTop Super_Q2/GogoPen/PenPower tablets
- JASTEC USB Touch Controller/DigiTech DTR-02U
- Zytronic controllers
+ - Elo TouchSystems 2700 IntelliTouch
Have a look at <http://linux.chapter7.ch/touchkit/> for
a usage description and the required user-space stuff.
@@ -620,6 +656,11 @@ config TOUCHSCREEN_USB_JASTEC
bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
+config TOUCHSCREEN_USB_ELO
+ default y
+ bool "Elo TouchSystems 2700 IntelliTouch controller device support" if EXPERT
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
config TOUCHSCREEN_USB_E2I
default y
bool "e2i Touchscreen controller (e.g. from Mimo 740)"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f957676035a4..496091e88460 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
@@ -39,6 +41,7 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
+obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 400131df677b..49a36df0b752 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -488,10 +488,10 @@ static ssize_t ad7877_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -518,10 +518,10 @@ static ssize_t ad7877_dac_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -548,10 +548,10 @@ static ssize_t ad7877_gpio3_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -579,10 +579,10 @@ static ssize_t ad7877_gpio4_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -612,10 +612,10 @@ static struct attribute *ad7877_attributes[] = {
NULL
};
-static mode_t ad7877_attr_is_visible(struct kobject *kobj,
+static umode_t ad7877_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (attr == &dev_attr_aux3.attr) {
if (gpio3)
@@ -853,7 +853,6 @@ static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
static struct spi_driver ad7877_driver = {
.driver = {
.name = "ad7877",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &ad7877_pm,
},
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index c789b974c795..0dac6712f42b 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -16,30 +16,6 @@
#define AD7879_DEVID 0x79 /* AD7879-1/AD7889-1 */
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_i2c_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ad7879 *ts = i2c_get_clientdata(client);
-
- ad7879_suspend(ts);
-
- return 0;
-}
-
-static int ad7879_i2c_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ad7879 *ts = i2c_get_clientdata(client);
-
- ad7879_resume(ts);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
-
/* All registers are word-sized.
* AD7879 uses a high-byte first convention.
*/
@@ -47,7 +23,7 @@ static int ad7879_i2c_read(struct device *dev, u8 reg)
{
struct i2c_client *client = to_i2c_client(dev);
- return swab16(i2c_smbus_read_word_data(client, reg));
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int ad7879_i2c_multi_read(struct device *dev,
@@ -68,7 +44,7 @@ static int ad7879_i2c_write(struct device *dev, u8 reg, u16 val)
{
struct i2c_client *client = to_i2c_client(dev);
- return i2c_smbus_write_word_data(client, reg, swab16(val));
+ return i2c_smbus_write_word_swapped(client, reg, val);
}
static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
@@ -119,7 +95,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.owner = THIS_MODULE,
- .pm = &ad7879_i2c_pm,
+ .pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
.remove = __devexit_p(ad7879_i2c_remove),
@@ -141,4 +117,3 @@ module_exit(ad7879_i2c_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("AD7879(-1) touchscreen I2C bus driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad7879");
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index b1643c8fa7c9..9b2e1c2b1971 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -22,30 +22,6 @@
#define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
#define AD7879_READCMD(reg) (AD7879_CMD(reg) | AD7879_CMD_READ)
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_spi_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ad7879 *ts = spi_get_drvdata(spi);
-
- ad7879_suspend(ts);
-
- return 0;
-}
-
-static int ad7879_spi_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ad7879 *ts = spi_get_drvdata(spi);
-
- ad7879_resume(ts);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_spi_pm, ad7879_spi_suspend, ad7879_spi_resume);
-
/*
* ad7879_read/write are only used for initial setup and for sysfs controls.
* The main traffic is done in ad7879_collect().
@@ -174,9 +150,8 @@ static int __devexit ad7879_spi_remove(struct spi_device *spi)
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
- .pm = &ad7879_spi_pm,
+ .pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
.remove = __devexit_p(ad7879_spi_remove),
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 3b2e9ed2aeec..e2482b40da51 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -281,8 +281,11 @@ static void ad7879_close(struct input_dev* input)
__ad7879_disable(ts);
}
-void ad7879_suspend(struct ad7879 *ts)
+#ifdef CONFIG_PM_SLEEP
+static int ad7879_suspend(struct device *dev)
{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
mutex_lock(&ts->input->mutex);
if (!ts->suspended && !ts->disabled && ts->input->users)
@@ -291,11 +294,14 @@ void ad7879_suspend(struct ad7879 *ts)
ts->suspended = true;
mutex_unlock(&ts->input->mutex);
+
+ return 0;
}
-EXPORT_SYMBOL(ad7879_suspend);
-void ad7879_resume(struct ad7879 *ts)
+static int ad7879_resume(struct device *dev)
{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
mutex_lock(&ts->input->mutex);
if (ts->suspended && !ts->disabled && ts->input->users)
@@ -304,8 +310,13 @@ void ad7879_resume(struct ad7879 *ts)
ts->suspended = false;
mutex_unlock(&ts->input->mutex);
+
+ return 0;
}
-EXPORT_SYMBOL(ad7879_resume);
+#endif
+
+SIMPLE_DEV_PM_OPS(ad7879_pm_ops, ad7879_suspend, ad7879_resume);
+EXPORT_SYMBOL(ad7879_pm_ops);
static void ad7879_toggle(struct ad7879 *ts, bool disable)
{
@@ -340,10 +351,10 @@ static ssize_t ad7879_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7879 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
diff --git a/drivers/input/touchscreen/ad7879.h b/drivers/input/touchscreen/ad7879.h
index 6b45a27236c7..6fd13c48d373 100644
--- a/drivers/input/touchscreen/ad7879.h
+++ b/drivers/input/touchscreen/ad7879.h
@@ -21,8 +21,8 @@ struct ad7879_bus_ops {
int (*write)(struct device *dev, u8 reg, u16 val);
};
-void ad7879_suspend(struct ad7879 *);
-void ad7879_resume(struct ad7879 *);
+extern const struct dev_pm_ops ad7879_pm_ops;
+
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned irq,
const struct ad7879_bus_ops *bops);
void ad7879_remove(struct ad7879 *);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index de31ec6fe9e4..23fd90185659 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -602,10 +602,12 @@ static ssize_t ads7846_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ads7846 *ts = dev_get_drvdata(dev);
- unsigned long i;
+ unsigned int i;
+ int err;
- if (strict_strtoul(buf, 10, &i))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &i);
+ if (err)
+ return err;
if (i)
ads7846_disable(ts);
@@ -1424,7 +1426,6 @@ static int __devexit ads7846_remove(struct spi_device *spi)
static struct spi_driver ads7846_driver = {
.driver = {
.name = "ads7846",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &ads7846_pm,
},
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index 122a87883659..201b2d2ec1b3 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -351,20 +351,7 @@ static struct platform_driver atmel_tsadcc_driver = {
.name = "atmel_tsadcc",
},
};
-
-static int __init atmel_tsadcc_init(void)
-{
- return platform_driver_register(&atmel_tsadcc_driver);
-}
-
-static void __exit atmel_tsadcc_exit(void)
-{
- platform_driver_unregister(&atmel_tsadcc_driver);
-}
-
-module_init(atmel_tsadcc_init);
-module_exit(atmel_tsadcc_exit);
-
+module_platform_driver(atmel_tsadcc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Atmel TouchScreen Driver");
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
new file mode 100644
index 000000000000..94fb9fbb08a9
--- /dev/null
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -0,0 +1,652 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * loosely based on auo_touch.c from Dell Streak vendor-kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/input/auo-pixcir-ts.h>
+
+/*
+ * Coordinate calculation:
+ * X1 = X1_LSB + X1_MSB*256
+ * Y1 = Y1_LSB + Y1_MSB*256
+ * X2 = X2_LSB + X2_MSB*256
+ * Y2 = Y2_LSB + Y2_MSB*256
+ */
+#define AUO_PIXCIR_REG_X1_LSB 0x00
+#define AUO_PIXCIR_REG_X1_MSB 0x01
+#define AUO_PIXCIR_REG_Y1_LSB 0x02
+#define AUO_PIXCIR_REG_Y1_MSB 0x03
+#define AUO_PIXCIR_REG_X2_LSB 0x04
+#define AUO_PIXCIR_REG_X2_MSB 0x05
+#define AUO_PIXCIR_REG_Y2_LSB 0x06
+#define AUO_PIXCIR_REG_Y2_MSB 0x07
+
+#define AUO_PIXCIR_REG_STRENGTH 0x0d
+#define AUO_PIXCIR_REG_STRENGTH_X1_LSB 0x0e
+#define AUO_PIXCIR_REG_STRENGTH_X1_MSB 0x0f
+
+#define AUO_PIXCIR_REG_RAW_DATA_X 0x2b
+#define AUO_PIXCIR_REG_RAW_DATA_Y 0x4f
+
+#define AUO_PIXCIR_REG_X_SENSITIVITY 0x6f
+#define AUO_PIXCIR_REG_Y_SENSITIVITY 0x70
+#define AUO_PIXCIR_REG_INT_SETTING 0x71
+#define AUO_PIXCIR_REG_INT_WIDTH 0x72
+#define AUO_PIXCIR_REG_POWER_MODE 0x73
+
+#define AUO_PIXCIR_REG_VERSION 0x77
+#define AUO_PIXCIR_REG_CALIBRATE 0x78
+
+#define AUO_PIXCIR_REG_TOUCHAREA_X1 0x1e
+#define AUO_PIXCIR_REG_TOUCHAREA_Y1 0x1f
+#define AUO_PIXCIR_REG_TOUCHAREA_X2 0x20
+#define AUO_PIXCIR_REG_TOUCHAREA_Y2 0x21
+
+#define AUO_PIXCIR_REG_EEPROM_CALIB_X 0x42
+#define AUO_PIXCIR_REG_EEPROM_CALIB_Y 0xad
+
+#define AUO_PIXCIR_INT_TPNUM_MASK 0xe0
+#define AUO_PIXCIR_INT_TPNUM_SHIFT 5
+#define AUO_PIXCIR_INT_RELEASE (1 << 4)
+#define AUO_PIXCIR_INT_ENABLE (1 << 3)
+#define AUO_PIXCIR_INT_POL_HIGH (1 << 2)
+#define AUO_PIXCIR_INT_MODE_MASK 0x03
+
+/*
+ * Power modes:
+ * active: scan speed 60Hz
+ * sleep: scan speed 10Hz can be auto-activated, wakeup on 1st touch
+ * deep sleep: scan speed 1Hz can only be entered or left manually.
+ */
+#define AUO_PIXCIR_POWER_ACTIVE 0x00
+#define AUO_PIXCIR_POWER_SLEEP 0x01
+#define AUO_PIXCIR_POWER_DEEP_SLEEP 0x02
+#define AUO_PIXCIR_POWER_MASK 0x03
+
+#define AUO_PIXCIR_POWER_ALLOW_SLEEP (1 << 2)
+#define AUO_PIXCIR_POWER_IDLE_TIME(ms) ((ms & 0xf) << 4)
+
+#define AUO_PIXCIR_CALIBRATE 0x03
+
+#define AUO_PIXCIR_EEPROM_CALIB_X_LEN 62
+#define AUO_PIXCIR_EEPROM_CALIB_Y_LEN 36
+
+#define AUO_PIXCIR_RAW_DATA_X_LEN 18
+#define AUO_PIXCIR_RAW_DATA_Y_LEN 11
+
+#define AUO_PIXCIR_STRENGTH_ENABLE (1 << 0)
+
+/* Touchscreen absolute values */
+#define AUO_PIXCIR_REPORT_POINTS 2
+#define AUO_PIXCIR_MAX_AREA 0xff
+#define AUO_PIXCIR_PENUP_TIMEOUT_MS 10
+
+struct auo_pixcir_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ char phys[32];
+
+ /* special handling for touch_indicate interupt mode */
+ bool touch_ind_mode;
+
+ wait_queue_head_t wait;
+ bool stopped;
+};
+
+struct auo_point_t {
+ int coord_x;
+ int coord_y;
+ int area_major;
+ int area_minor;
+ int orientation;
+};
+
+static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
+ struct auo_point_t *point)
+{
+ struct i2c_client *client = ts->client;
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ uint8_t raw_coord[8];
+ uint8_t raw_area[4];
+ int i, ret;
+
+ /* touch coordinates */
+ ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_X1_LSB,
+ 8, raw_coord);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read coordinate, %d\n", ret);
+ return ret;
+ }
+
+ /* touch area */
+ ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_TOUCHAREA_X1,
+ 4, raw_area);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not read touch area, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+ point[i].coord_x =
+ raw_coord[4 * i + 1] << 8 | raw_coord[4 * i];
+ point[i].coord_y =
+ raw_coord[4 * i + 3] << 8 | raw_coord[4 * i + 2];
+
+ if (point[i].coord_x > pdata->x_max ||
+ point[i].coord_y > pdata->y_max) {
+ dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+ point[i].coord_x, point[i].coord_y);
+ point[i].coord_x = point[i].coord_y = 0;
+ }
+
+ /* determine touch major, minor and orientation */
+ point[i].area_major = max(raw_area[2 * i], raw_area[2 * i + 1]);
+ point[i].area_minor = min(raw_area[2 * i], raw_area[2 * i + 1]);
+ point[i].orientation = raw_area[2 * i] > raw_area[2 * i + 1];
+ }
+
+ return 0;
+}
+
+static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
+{
+ struct auo_pixcir_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ struct auo_point_t point[AUO_PIXCIR_REPORT_POINTS];
+ int i;
+ int ret;
+ int fingers = 0;
+ int abs = -1;
+
+ while (!ts->stopped) {
+
+ /* check for up event in touch touch_ind_mode */
+ if (ts->touch_ind_mode) {
+ if (gpio_get_value(pdata->gpio_int) == 0) {
+ input_mt_sync(ts->input);
+ input_report_key(ts->input, BTN_TOUCH, 0);
+ input_sync(ts->input);
+ break;
+ }
+ }
+
+ ret = auo_pixcir_collect_data(ts, point);
+ if (ret < 0) {
+ /* we want to loop only in touch_ind_mode */
+ if (!ts->touch_ind_mode)
+ break;
+
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+ continue;
+ }
+
+ for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+ if (point[i].coord_x > 0 || point[i].coord_y > 0) {
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ point[i].coord_x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ point[i].coord_y);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ point[i].area_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+ point[i].area_minor);
+ input_report_abs(ts->input, ABS_MT_ORIENTATION,
+ point[i].orientation);
+ input_mt_sync(ts->input);
+
+ /* use first finger as source for singletouch */
+ if (fingers == 0)
+ abs = i;
+
+ /* number of touch points could also be queried
+ * via i2c but would require an additional call
+ */
+ fingers++;
+ }
+ }
+
+ input_report_key(ts->input, BTN_TOUCH, fingers > 0);
+
+ if (abs > -1) {
+ input_report_abs(ts->input, ABS_X, point[abs].coord_x);
+ input_report_abs(ts->input, ABS_Y, point[abs].coord_y);
+ }
+
+ input_sync(ts->input);
+
+ /* we want to loop only in touch_ind_mode */
+ if (!ts->touch_ind_mode)
+ break;
+
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set the power mode of the device.
+ * Valid modes are
+ * - AUO_PIXCIR_POWER_ACTIVE
+ * - AUO_PIXCIR_POWER_SLEEP - automatically left on first touch
+ * - AUO_PIXCIR_POWER_DEEP_SLEEP
+ */
+static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_POWER_MODE);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ ret &= ~AUO_PIXCIR_POWER_MASK;
+ ret |= mode;
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_POWER_MODE, ret);
+ if (ret) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+ int int_setting)
+{
+ struct i2c_client *client = ts->client;
+ struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ ret &= ~AUO_PIXCIR_INT_MODE_MASK;
+ ret |= int_setting;
+ ret |= AUO_PIXCIR_INT_POL_HIGH; /* always use high for interrupts */
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+ ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ ts->touch_ind_mode = pdata->int_setting == AUO_PIXCIR_INT_TOUCH_IND;
+
+ return 0;
+}
+
+/* control the generation of interrupts on the device side */
+static int auo_pixcir_int_toggle(struct auo_pixcir_ts *ts, bool enable)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ if (enable)
+ ret |= AUO_PIXCIR_INT_ENABLE;
+ else
+ ret &= ~AUO_PIXCIR_INT_ENABLE;
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+ ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int auo_pixcir_start(struct auo_pixcir_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_ACTIVE);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not set power mode, %d\n",
+ ret);
+ return ret;
+ }
+
+ ts->stopped = false;
+ mb();
+ enable_irq(client->irq);
+
+ ret = auo_pixcir_int_toggle(ts, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not enable interrupt, %d\n",
+ ret);
+ disable_irq(client->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int auo_pixcir_stop(struct auo_pixcir_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = auo_pixcir_int_toggle(ts, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not disable interrupt, %d\n",
+ ret);
+ return ret;
+ }
+
+ /* disable receiving of interrupts */
+ disable_irq(client->irq);
+ ts->stopped = true;
+ mb();
+ wake_up(&ts->wait);
+
+ return auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_DEEP_SLEEP);
+}
+
+static int auo_pixcir_input_open(struct input_dev *dev)
+{
+ struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+ int ret;
+
+ ret = auo_pixcir_start(ts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void auo_pixcir_input_close(struct input_dev *dev)
+{
+ struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+
+ auo_pixcir_stop(ts);
+
+ return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int auo_pixcir_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ /* when configured as wakeup source, device should always wake system
+ * therefore start device if necessary
+ */
+ if (device_may_wakeup(&client->dev)) {
+ /* need to start device if not open, to be wakeup source */
+ if (!input->users) {
+ ret = auo_pixcir_start(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ enable_irq_wake(client->irq);
+ ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_SLEEP);
+ } else if (input->users) {
+ ret = auo_pixcir_stop(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+
+static int auo_pixcir_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(&client->dev)) {
+ disable_irq_wake(client->irq);
+
+ /* need to stop device if it was not open on suspend */
+ if (!input->users) {
+ ret = auo_pixcir_stop(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ /* device wakes automatically from SLEEP */
+ } else if (input->users) {
+ ret = auo_pixcir_start(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
+ auo_pixcir_resume);
+
+static int __devinit auo_pixcir_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ struct auo_pixcir_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+
+ if (!pdata)
+ return -EINVAL;
+
+ ts = kzalloc(sizeof(struct auo_pixcir_ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ret = gpio_request(pdata->gpio_int, "auo_pixcir_ts_int");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_int, ret);
+ goto err_gpio_int;
+ }
+
+ if (pdata->init_hw)
+ pdata->init_hw(client);
+
+ ts->client = client;
+ ts->touch_ind_mode = 0;
+ init_waitqueue_head(&ts->wait);
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "could not allocate input device\n");
+ goto err_input_alloc;
+ }
+
+ ts->input = input_dev;
+
+ input_dev->name = "AUO-Pixcir touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ input_dev->open = auo_pixcir_input_open;
+ input_dev->close = auo_pixcir_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, pdata->y_max, 0, 0);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ pdata->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+ AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_VERSION);
+ if (ret < 0)
+ goto err_fw_vers;
+ dev_info(&client->dev, "firmware version 0x%X\n", ret);
+
+ ret = auo_pixcir_int_config(ts, pdata->int_setting);
+ if (ret)
+ goto err_fw_vers;
+
+ input_set_drvdata(ts->input, ts);
+ ts->stopped = true;
+
+ ret = request_threaded_irq(client->irq, NULL, auo_pixcir_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ input_dev->name, ts);
+ if (ret) {
+ dev_err(&client->dev, "irq %d requested failed\n", client->irq);
+ goto err_fw_vers;
+ }
+
+ /* stop device and put it into deep sleep until it is opened */
+ ret = auo_pixcir_stop(ts);
+ if (ret < 0)
+ goto err_input_register;
+
+ ret = input_register_device(input_dev);
+ if (ret) {
+ dev_err(&client->dev, "could not register input device\n");
+ goto err_input_register;
+ }
+
+ i2c_set_clientdata(client, ts);
+
+ return 0;
+
+err_input_register:
+ free_irq(client->irq, ts);
+err_fw_vers:
+ input_free_device(input_dev);
+err_input_alloc:
+ if (pdata->exit_hw)
+ pdata->exit_hw(client);
+ gpio_free(pdata->gpio_int);
+err_gpio_int:
+ kfree(ts);
+
+ return ret;
+}
+
+static int __devexit auo_pixcir_remove(struct i2c_client *client)
+{
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+
+ free_irq(client->irq, ts);
+
+ input_unregister_device(ts->input);
+
+ if (pdata->exit_hw)
+ pdata->exit_hw(client);
+
+ gpio_free(pdata->gpio_int);
+
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id auo_pixcir_idtable[] = {
+ { "auo_pixcir_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, auo_pixcir_idtable);
+
+static struct i2c_driver auo_pixcir_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_pixcir_ts",
+ .pm = &auo_pixcir_pm_ops,
+ },
+ .probe = auo_pixcir_probe,
+ .remove = __devexit_p(auo_pixcir_remove),
+ .id_table = auo_pixcir_idtable,
+};
+
+static int __init auo_pixcir_init(void)
+{
+ return i2c_add_driver(&auo_pixcir_driver);
+}
+module_init(auo_pixcir_init);
+
+static void __exit auo_pixcir_exit(void)
+{
+ i2c_del_driver(&auo_pixcir_driver);
+}
+module_exit(auo_pixcir_exit);
+
+MODULE_DESCRIPTION("AUO-PIXCIR touchscreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 2b72a5923c16..36b65cf10d7f 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -379,18 +379,7 @@ static struct platform_driver da9034_touch_driver = {
.probe = da9034_touch_probe,
.remove = __devexit_p(da9034_touch_remove),
};
-
-static int __init da9034_touch_init(void)
-{
- return platform_driver_register(&da9034_touch_driver);
-}
-module_init(da9034_touch_init);
-
-static void __exit da9034_touch_exit(void)
-{
- platform_driver_unregister(&da9034_touch_driver);
-}
-module_exit(da9034_touch_exit);
+module_platform_driver(da9034_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 7f8f538a9806..1df19bb8534a 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -35,11 +35,11 @@
#include <linux/input/eeti_ts.h>
#include <linux/slab.h>
-static int flip_x;
+static bool flip_x;
module_param(flip_x, bool, 0644);
MODULE_PARM_DESC(flip_x, "flip x coordinate");
-static int flip_y;
+static bool flip_y;
module_param(flip_y, bool, 0644);
MODULE_PARM_DESC(flip_y, "flip y coordinate");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
new file mode 100644
index 000000000000..eadcc2e83c77
--- /dev/null
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -0,0 +1,303 @@
+/*
+ * Driver for EETI eGalax Multiple Touch Controller
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * based on max11801_ts.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* EETI eGalax serial touch screen controller is a I2C based multiple
+ * touch screen controller, it supports 5 point multiple touch. */
+
+/* TODO:
+ - auto idle mode support
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/input/mt.h>
+
+/*
+ * Mouse Mode: some panel may configure the controller to mouse mode,
+ * which can only report one point at a given time.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_MOUSE 0x1
+/*
+ * Vendor Mode: this mode is used to transfer some vendor specific
+ * messages.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_VENDOR 0x3
+/* Multiple Touch Mode */
+#define REPORT_MODE_MTTOUCH 0x4
+
+#define MAX_SUPPORT_POINTS 5
+
+#define EVENT_VALID_OFFSET 7
+#define EVENT_VALID_MASK (0x1 << EVENT_VALID_OFFSET)
+#define EVENT_ID_OFFSET 2
+#define EVENT_ID_MASK (0xf << EVENT_ID_OFFSET)
+#define EVENT_IN_RANGE (0x1 << 1)
+#define EVENT_DOWN_UP (0X1 << 0)
+
+#define MAX_I2C_DATA_LEN 10
+
+#define EGALAX_MAX_X 32760
+#define EGALAX_MAX_Y 32760
+#define EGALAX_MAX_TRIES 100
+
+struct egalax_ts {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+};
+
+static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
+{
+ struct egalax_ts *ts = dev_id;
+ struct input_dev *input_dev = ts->input_dev;
+ struct i2c_client *client = ts->client;
+ u8 buf[MAX_I2C_DATA_LEN];
+ int id, ret, x, y, z;
+ int tries = 0;
+ bool down, valid;
+ u8 state;
+
+ do {
+ ret = i2c_master_recv(client, buf, MAX_I2C_DATA_LEN);
+ } while (ret == -EAGAIN && tries++ < EGALAX_MAX_TRIES);
+
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (buf[0] != REPORT_MODE_MTTOUCH) {
+ /* ignore mouse events and vendor events */
+ return IRQ_HANDLED;
+ }
+
+ state = buf[1];
+ x = (buf[3] << 8) | buf[2];
+ y = (buf[5] << 8) | buf[4];
+ z = (buf[7] << 8) | buf[6];
+
+ valid = state & EVENT_VALID_MASK;
+ id = (state & EVENT_ID_MASK) >> EVENT_ID_OFFSET;
+ down = state & EVENT_DOWN_UP;
+
+ if (!valid || id > MAX_SUPPORT_POINTS) {
+ dev_dbg(&client->dev, "point invalid\n");
+ return IRQ_HANDLED;
+ }
+
+ input_mt_slot(input_dev, id);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
+
+ dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
+ down ? "down" : "up", id, x, y, z);
+
+ if (down) {
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, z);
+ }
+
+ input_mt_report_pointer_emulation(input_dev, true);
+ input_sync(input_dev);
+
+ return IRQ_HANDLED;
+}
+
+/* wake up controller by an falling edge of interrupt gpio. */
+static int egalax_wake_up_device(struct i2c_client *client)
+{
+ int gpio = irq_to_gpio(client->irq);
+ int ret;
+
+ ret = gpio_request(gpio, "egalax_irq");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "request gpio failed, cannot wake up controller: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* wake up controller via an falling edge on IRQ gpio. */
+ gpio_direction_output(gpio, 0);
+ gpio_set_value(gpio, 1);
+
+ /* controller should be waken up, return irq. */
+ gpio_direction_input(gpio);
+ gpio_free(gpio);
+
+ return 0;
+}
+
+static int __devinit egalax_firmware_version(struct i2c_client *client)
+{
+ static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
+ int ret;
+
+ ret = i2c_master_send(client, cmd, MAX_I2C_DATA_LEN);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit egalax_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct egalax_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+ int error;
+
+ ts = kzalloc(sizeof(struct egalax_ts), GFP_KERNEL);
+ if (!ts) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_ts;
+ }
+
+ ts->client = client;
+ ts->input_dev = input_dev;
+
+ /* controller may be in sleep, wake it up. */
+ egalax_wake_up_device(client);
+
+ ret = egalax_firmware_version(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read firmware version\n");
+ error = -EIO;
+ goto err_free_dev;
+ }
+
+ input_dev->name = "EETI eGalax Touch Screen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, EGALAX_MAX_X, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, EGALAX_MAX_Y, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+ input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS);
+
+ input_set_drvdata(input_dev, ts);
+
+ error = request_threaded_irq(client->irq, NULL, egalax_ts_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "egalax_ts", ts);
+ if (error < 0) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_dev;
+ }
+
+ error = input_register_device(ts->input_dev);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, ts);
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, ts);
+err_free_dev:
+ input_free_device(input_dev);
+err_free_ts:
+ kfree(ts);
+
+ return error;
+}
+
+static __devexit int egalax_ts_remove(struct i2c_client *client)
+{
+ struct egalax_ts *ts = i2c_get_clientdata(client);
+
+ free_irq(client->irq, ts);
+
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id egalax_ts_id[] = {
+ { "egalax_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, egalax_ts_id);
+
+#ifdef CONFIG_PM_SLEEP
+static int egalax_ts_suspend(struct device *dev)
+{
+ static const u8 suspend_cmd[MAX_I2C_DATA_LEN] = {
+ 0x3, 0x6, 0xa, 0x3, 0x36, 0x3f, 0x2, 0, 0, 0
+ };
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
+ return ret > 0 ? 0 : ret;
+}
+
+static int egalax_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return egalax_wake_up_device(client);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
+
+static struct i2c_driver egalax_ts_driver = {
+ .driver = {
+ .name = "egalax_ts",
+ .owner = THIS_MODULE,
+ .pm = &egalax_ts_pm_ops,
+ },
+ .id_table = egalax_ts_id,
+ .probe = egalax_ts_probe,
+ .remove = __devexit_p(egalax_ts_remove),
+};
+
+static int __init egalax_ts_init(void)
+{
+ return i2c_add_driver(&egalax_ts_driver);
+}
+
+static void __exit egalax_ts_exit(void)
+{
+ i2c_del_driver(&egalax_ts_driver);
+}
+
+module_init(egalax_ts_init);
+module_exit(egalax_ts_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Touchscreen driver for EETI eGalax touch controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 62811de6f18f..d13143b68b3e 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -40,19 +40,13 @@ MODULE_LICENSE("GPL");
#define X_AXIS_MAX 2040
#define Y_AXIS_MAX 2040
-static int invert_x;
+static bool invert_x;
module_param(invert_x, bool, 0644);
MODULE_PARM_DESC(invert_x, "If set, X axis is inverted");
-static int invert_y;
+static bool invert_y;
module_param(invert_y, bool, 0644);
MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted");
-static struct pnp_device_id pnp_ids[] = {
- { .id = "PNP0cc0" },
- { .id = "" }
-};
-MODULE_DEVICE_TABLE(pnp, pnp_ids);
-
static irqreturn_t htcpen_interrupt(int irq, void *handle)
{
struct input_dev *htcpen_dev = handle;
@@ -237,6 +231,7 @@ static struct dmi_system_id __initdata htcshift_dmi_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(dmi, htcshift_dmi_table);
static int __init htcpen_isa_init(void)
{
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 327695268e06..3cd7a837f82b 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -664,18 +664,7 @@ static struct platform_driver mrstouch_driver = {
.probe = mrstouch_probe,
.remove = __devexit_p(mrstouch_remove),
};
-
-static int __init mrstouch_init(void)
-{
- return platform_driver_register(&mrstouch_driver);
-}
-module_init(mrstouch_init);
-
-static void __exit mrstouch_exit(void)
-{
- platform_driver_unregister(&mrstouch_driver);
-}
-module_exit(mrstouch_exit);
+module_platform_driver(mrstouch_driver);
MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 50076c2d59e2..c3848ad2325b 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -172,16 +172,4 @@ static struct platform_driver jornada720_ts_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init jornada720_ts_init(void)
-{
- return platform_driver_register(&jornada720_ts_driver);
-}
-
-static void __exit jornada720_ts_exit(void)
-{
- platform_driver_unregister(&jornada720_ts_driver);
-}
-
-module_init(jornada720_ts_init);
-module_exit(jornada720_ts_exit);
+module_platform_driver(jornada720_ts_driver);
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 0a484ed5295c..afcd0691ec67 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -392,18 +392,7 @@ static struct platform_driver lpc32xx_ts_driver = {
.pm = LPC32XX_TS_PM_OPS,
},
};
-
-static int __init lpc32xx_ts_init(void)
-{
- return platform_driver_register(&lpc32xx_ts_driver);
-}
-module_init(lpc32xx_ts_init);
-
-static void __exit lpc32xx_ts_exit(void)
-{
- platform_driver_unregister(&lpc32xx_ts_driver);
-}
-module_exit(lpc32xx_ts_exit);
+module_platform_driver(lpc32xx_ts_driver);
MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com");
MODULE_DESCRIPTION("LPC32XX TSC Driver");
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index e966c29ff1bb..7d2b2136e5ad 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -302,19 +302,7 @@ static struct platform_driver mainstone_wm97xx_driver = {
.name = "wm97xx-touch",
},
};
-
-static int __init mainstone_wm97xx_init(void)
-{
- return platform_driver_register(&mainstone_wm97xx_driver);
-}
-
-static void __exit mainstone_wm97xx_exit(void)
-{
- platform_driver_unregister(&mainstone_wm97xx_driver);
-}
-
-module_init(mainstone_wm97xx_init);
-module_exit(mainstone_wm97xx_exit);
+module_platform_driver(mainstone_wm97xx_driver);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 5803bd0c1cca..5226194aa78e 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -36,7 +36,6 @@
struct migor_ts_priv {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
int irq;
};
@@ -44,15 +43,24 @@ static const u_int8_t migor_ts_ena_seq[17] = { 0x33, 0x22, 0x11,
0x01, 0x06, 0x07, };
static const u_int8_t migor_ts_dis_seq[17] = { };
-static void migor_ts_poscheck(struct work_struct *work)
+static irqreturn_t migor_ts_isr(int irq, void *dev_id)
{
- struct migor_ts_priv *priv = container_of(work,
- struct migor_ts_priv,
- work.work);
+ struct migor_ts_priv *priv = dev_id;
unsigned short xpos, ypos;
unsigned char event;
u_int8_t buf[16];
+ /*
+ * The touch screen controller chip is hooked up to the CPU
+ * using I2C and a single interrupt line. The interrupt line
+ * is pulled low whenever someone taps the screen. To deassert
+ * the interrupt line we need to acknowledge the interrupt by
+ * communicating with the controller over the slow i2c bus.
+ *
+ * Since I2C bus controller may sleep we are using threaded
+ * IRQ here.
+ */
+
memset(buf, 0, sizeof(buf));
/* Set Index 0 */
@@ -72,41 +80,25 @@ static void migor_ts_poscheck(struct work_struct *work)
xpos = ((buf[11] & 0x03) << 8 | buf[10]);
event = buf[12];
- if (event == EVENT_PENDOWN || event == EVENT_REPEAT) {
+ switch (event) {
+ case EVENT_PENDOWN:
+ case EVENT_REPEAT:
input_report_key(priv->input, BTN_TOUCH, 1);
input_report_abs(priv->input, ABS_X, ypos); /*X-Y swap*/
input_report_abs(priv->input, ABS_Y, xpos);
input_sync(priv->input);
- } else if (event == EVENT_PENUP) {
+ break;
+
+ case EVENT_PENUP:
input_report_key(priv->input, BTN_TOUCH, 0);
input_sync(priv->input);
+ break;
}
- out:
- enable_irq(priv->irq);
-}
-
-static irqreturn_t migor_ts_isr(int irq, void *dev_id)
-{
- struct migor_ts_priv *priv = dev_id;
-
- /* the touch screen controller chip is hooked up to the cpu
- * using i2c and a single interrupt line. the interrupt line
- * is pulled low whenever someone taps the screen. to deassert
- * the interrupt line we need to acknowledge the interrupt by
- * communicating with the controller over the slow i2c bus.
- *
- * we can't acknowledge from interrupt context since the i2c
- * bus controller may sleep, so we just disable the interrupt
- * here and handle the acknowledge using delayed work.
- */
-
- disable_irq_nosync(irq);
- schedule_delayed_work(&priv->work, HZ / 20);
+ out:
return IRQ_HANDLED;
}
-
static int migor_ts_open(struct input_dev *dev)
{
struct migor_ts_priv *priv = input_get_drvdata(dev);
@@ -131,15 +123,6 @@ static void migor_ts_close(struct input_dev *dev)
disable_irq(priv->irq);
- /* cancel pending work and wait for migor_ts_poscheck() to finish */
- if (cancel_delayed_work_sync(&priv->work)) {
- /*
- * if migor_ts_poscheck was canceled we need to enable IRQ
- * here to balance disable done in migor_ts_isr.
- */
- enable_irq(priv->irq);
- }
-
/* disable controller */
i2c_master_send(client, migor_ts_dis_seq, sizeof(migor_ts_dis_seq));
@@ -154,23 +137,20 @@ static int migor_ts_probe(struct i2c_client *client,
int error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev, "failed to allocate driver data\n");
- error = -ENOMEM;
- goto err0;
- }
-
- dev_set_drvdata(&client->dev, priv);
-
input = input_allocate_device();
- if (!input) {
- dev_err(&client->dev, "Failed to allocate input device.\n");
+ if (!priv || !input) {
+ dev_err(&client->dev, "failed to allocate memory\n");
error = -ENOMEM;
- goto err1;
+ goto err_free_mem;
}
+ priv->client = client;
+ priv->input = input;
+ priv->irq = client->irq;
+
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ __set_bit(BTN_TOUCH, input->keybit);
input_set_abs_params(input, ABS_X, 95, 955, 0, 0);
input_set_abs_params(input, ABS_Y, 85, 935, 0, 0);
@@ -184,39 +164,34 @@ static int migor_ts_probe(struct i2c_client *client,
input_set_drvdata(input, priv);
- priv->client = client;
- priv->input = input;
- INIT_DELAYED_WORK(&priv->work, migor_ts_poscheck);
- priv->irq = client->irq;
-
- error = input_register_device(input);
- if (error)
- goto err1;
-
- error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW,
- client->name, priv);
+ error = request_threaded_irq(priv->irq, NULL, migor_ts_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, priv);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err2;
+ goto err_free_mem;
}
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, priv);
device_init_wakeup(&client->dev, 1);
+
return 0;
- err2:
- input_unregister_device(input);
- input = NULL; /* so we dont try to free it below */
- err1:
+ err_free_irq:
+ free_irq(priv->irq, priv);
+ err_free_mem:
input_free_device(input);
kfree(priv);
- err0:
- dev_set_drvdata(&client->dev, NULL);
return error;
}
static int migor_ts_remove(struct i2c_client *client)
{
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
free_irq(priv->irq, priv);
input_unregister_device(priv->input);
@@ -230,7 +205,7 @@ static int migor_ts_remove(struct i2c_client *client)
static int migor_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
enable_irq_wake(priv->irq);
@@ -241,7 +216,7 @@ static int migor_ts_suspend(struct device *dev)
static int migor_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
disable_irq_wake(priv->irq);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index ea6ef16e59b4..f57aeb80f7e3 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -252,19 +252,7 @@ static struct platform_driver pcap_ts_driver = {
.pm = PCAP_TS_PM_OPS,
},
};
-
-static int __init pcap_ts_init(void)
-{
- return platform_driver_register(&pcap_ts_driver);
-}
-
-static void __exit pcap_ts_exit(void)
-{
- platform_driver_unregister(&pcap_ts_driver);
-}
-
-module_init(pcap_ts_init);
-module_exit(pcap_ts_exit);
+module_platform_driver(pcap_ts_driver);
MODULE_DESCRIPTION("Motorola PCAP2 touchscreen driver");
MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
new file mode 100644
index 000000000000..d5ac09a1ee56
--- /dev/null
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for Pixcir I2C touchscreen controllers.
+ *
+ * Copyright (C) 2010-2011 Pixcir, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/pixcir_ts.h>
+
+struct pixcir_i2c_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ const struct pixcir_ts_platform_data *chip;
+ bool exiting;
+};
+
+static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
+{
+ struct pixcir_i2c_ts_data *tsdata = data;
+ u8 rdbuf[10], wrbuf[1] = { 0 };
+ u8 touch;
+ int ret;
+
+ ret = i2c_master_send(tsdata->client, wrbuf, sizeof(wrbuf));
+ if (ret != sizeof(wrbuf)) {
+ dev_err(&tsdata->client->dev,
+ "%s: i2c_master_send failed(), ret=%d\n",
+ __func__, ret);
+ return;
+ }
+
+ ret = i2c_master_recv(tsdata->client, rdbuf, sizeof(rdbuf));
+ if (ret != sizeof(rdbuf)) {
+ dev_err(&tsdata->client->dev,
+ "%s: i2c_master_recv failed(), ret=%d\n",
+ __func__, ret);
+ return;
+ }
+
+ touch = rdbuf[0];
+ if (touch) {
+ u16 posx1 = (rdbuf[3] << 8) | rdbuf[2];
+ u16 posy1 = (rdbuf[5] << 8) | rdbuf[4];
+ u16 posx2 = (rdbuf[7] << 8) | rdbuf[6];
+ u16 posy2 = (rdbuf[9] << 8) | rdbuf[8];
+
+ input_report_key(tsdata->input, BTN_TOUCH, 1);
+ input_report_abs(tsdata->input, ABS_X, posx1);
+ input_report_abs(tsdata->input, ABS_Y, posy1);
+
+ input_report_abs(tsdata->input, ABS_MT_POSITION_X, posx1);
+ input_report_abs(tsdata->input, ABS_MT_POSITION_Y, posy1);
+ input_mt_sync(tsdata->input);
+
+ if (touch == 2) {
+ input_report_abs(tsdata->input,
+ ABS_MT_POSITION_X, posx2);
+ input_report_abs(tsdata->input,
+ ABS_MT_POSITION_Y, posy2);
+ input_mt_sync(tsdata->input);
+ }
+ } else {
+ input_report_key(tsdata->input, BTN_TOUCH, 0);
+ }
+
+ input_sync(tsdata->input);
+}
+
+static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
+{
+ struct pixcir_i2c_ts_data *tsdata = dev_id;
+
+ while (!tsdata->exiting) {
+ pixcir_ts_poscheck(tsdata);
+
+ if (tsdata->chip->attb_read_val())
+ break;
+
+ msleep(20);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pixcir_i2c_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int pixcir_i2c_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
+ pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
+
+static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
+ struct pixcir_i2c_ts_data *tsdata;
+ struct input_dev *input;
+ int error;
+
+ if (!pdata) {
+ dev_err(&client->dev, "platform data not defined\n");
+ return -EINVAL;
+ }
+
+ tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!tsdata || !input) {
+ dev_err(&client->dev, "Failed to allocate driver data!\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsdata->client = client;
+ tsdata->input = input;
+ tsdata->chip = pdata;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &client->dev;
+
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+
+ input_set_drvdata(input, tsdata);
+
+ error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
+ IRQF_TRIGGER_FALLING,
+ client->name, tsdata);
+ if (error) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+ goto err_free_mem;
+ }
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, tsdata);
+ device_init_wakeup(&client->dev, 1);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, tsdata);
+err_free_mem:
+ input_free_device(input);
+ kfree(tsdata);
+ return error;
+}
+
+static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+{
+ struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
+
+ device_init_wakeup(&client->dev, 0);
+
+ tsdata->exiting = true;
+ mb();
+ free_irq(client->irq, tsdata);
+
+ input_unregister_device(tsdata->input);
+ kfree(tsdata);
+
+ return 0;
+}
+
+static const struct i2c_device_id pixcir_i2c_ts_id[] = {
+ { "pixcir_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+
+static struct i2c_driver pixcir_i2c_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pixcir_ts",
+ .pm = &pixcir_dev_pm_ops,
+ },
+ .probe = pixcir_i2c_ts_probe,
+ .remove = __devexit_p(pixcir_i2c_ts_remove),
+ .id_table = pixcir_i2c_ts_id,
+};
+
+static int __init pixcir_i2c_ts_init(void)
+{
+ return i2c_add_driver(&pixcir_i2c_ts_driver);
+}
+module_init(pixcir_i2c_ts_init);
+
+static void __exit pixcir_i2c_ts_exit(void)
+{
+ i2c_del_driver(&pixcir_i2c_ts_driver);
+}
+module_exit(pixcir_i2c_ts_exit);
+
+MODULE_AUTHOR("Jianchun Bian <jcbian@pixcir.com.cn>, Dequan Meng <dqmeng@pixcir.com.cn>");
+MODULE_DESCRIPTION("Pixcir I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 64ce697a3456..bf1a06400067 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -432,19 +432,7 @@ static struct platform_driver s3c_ts_driver = {
.probe = s3c2410ts_probe,
.remove = __devexit_p(s3c2410ts_remove),
};
-
-static int __init s3c2410ts_init(void)
-{
- return platform_driver_register(&s3c_ts_driver);
-}
-
-static void __exit s3c2410ts_exit(void)
-{
- platform_driver_unregister(&s3c_ts_driver);
-}
-
-module_init(s3c2410ts_init);
-module_exit(s3c2410ts_exit);
+module_platform_driver(s3c_ts_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, "
"Ben Dooks <ben@simtec.co.uk>, "
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 4ab371358b33..8825fe37d433 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -46,6 +47,7 @@ struct st1232_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
struct st1232_ts_finger finger[MAX_FINGERS];
+ struct dev_pm_qos_request low_latency_req;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -118,8 +120,17 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
}
/* SYN_MT_REPORT only if no contact */
- if (!count)
+ if (!count) {
input_mt_sync(input_dev);
+ if (ts->low_latency_req.dev) {
+ dev_pm_qos_remove_request(&ts->low_latency_req);
+ ts->low_latency_req.dev = NULL;
+ }
+ } else if (!ts->low_latency_req.dev) {
+ /* First contact, request 100 us latency. */
+ dev_pm_qos_add_ancestor_request(&ts->client->dev,
+ &ts->low_latency_req, 100);
+ }
/* SYN_REPORT */
input_sync(input_dev);
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index ae88e13c99ff..692b685720ce 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -379,20 +379,7 @@ static struct platform_driver stmpe_ts_driver = {
.probe = stmpe_input_probe,
.remove = __devexit_p(stmpe_ts_remove),
};
-
-static int __init stmpe_ts_init(void)
-{
- return platform_driver_register(&stmpe_ts_driver);
-}
-
-module_init(stmpe_ts_init);
-
-static void __exit stmpe_ts_exit(void)
-{
- platform_driver_unregister(&stmpe_ts_driver);
-}
-
-module_exit(stmpe_ts_exit);
+module_platform_driver(stmpe_ts_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 0e8f63e5b36f..7e7488097359 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -378,19 +378,7 @@ static struct platform_driver tsc_driver = {
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
-
-static int __init tsc_init(void)
-{
- return platform_driver_register(&tsc_driver);
-}
-
-static void __exit tsc_exit(void)
-{
- platform_driver_unregister(&tsc_driver);
-}
-
-module_init(tsc_init);
-module_exit(tsc_exit);
+module_platform_driver(tsc_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 43031492d733..6c6f6d8ea9b4 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -371,18 +371,7 @@ static struct platform_driver tps6507x_ts_driver = {
.probe = tps6507x_ts_probe,
.remove = __devexit_p(tps6507x_ts_remove),
};
-
-static int __init tps6507x_ts_init(void)
-{
- return platform_driver_register(&tps6507x_ts_driver);
-}
-module_init(tps6507x_ts_init);
-
-static void __exit tps6507x_ts_exit(void)
-{
- platform_driver_unregister(&tps6507x_ts_driver);
-}
-module_exit(tps6507x_ts_exit);
+module_platform_driver(tps6507x_ts_driver);
MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index cbf0ff322676..067d95662997 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -450,13 +450,13 @@ static struct attribute *tsc2005_attrs[] = {
NULL
};
-static mode_t tsc2005_attr_is_visible(struct kobject *kobj,
+static umode_t tsc2005_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (attr == &dev_attr_selftest.attr) {
if (!ts->set_reset)
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 3b5b5df04dd6..46e83ad53f43 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -20,24 +20,24 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/input.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/suspend.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/ucb1400.h>
-static int adcsync;
+#define UCB1400_TS_POLL_PERIOD 10 /* ms */
+
+static bool adcsync;
static int ts_delay = 55; /* us */
static int ts_delay_pressure; /* us */
/* Switch to interrupt mode. */
-static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
+static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_TS_CR,
+ ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_INT);
@@ -47,13 +47,15 @@ static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
* Switch to pressure mode, and read pressure. We don't need to wait
* here, since both plates are being driven.
*/
-static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+
udelay(ts_delay_pressure);
+
return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
}
@@ -63,7 +65,7 @@ static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
-static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -86,7 +88,7 @@ static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
-static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
+static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -107,7 +109,7 @@ static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
* Switch to X plate resistance mode. Set MX to ground, PX to
* supply. Measure current.
*/
-static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -119,7 +121,7 @@ static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
* Switch to Y plate resistance mode. Set MY to ground, PY to
* supply. Measure current.
*/
-static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -127,26 +129,26 @@ static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
return ucb1400_adc_read(ucb->ac97, 0, adcsync);
}
-static inline int ucb1400_ts_pen_up(struct snd_ac97 *ac97)
+static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb)
{
- unsigned short val = ucb1400_reg_read(ac97, UCB_TS_CR);
+ unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR);
return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW);
}
-static inline void ucb1400_ts_irq_enable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
- ucb1400_reg_write(ac97, UCB_IE_CLEAR, 0);
- ucb1400_reg_write(ac97, UCB_IE_FAL, UCB_IE_TSPX);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX);
}
-static inline void ucb1400_ts_irq_disable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_IE_FAL, 0);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
}
-static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16 y)
+static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y)
{
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
@@ -162,7 +164,7 @@ static void ucb1400_ts_event_release(struct input_dev *idev)
input_sync(idev);
}
-static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
+static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb)
{
unsigned int isr;
@@ -171,32 +173,34 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
if (isr & UCB_IE_TSPX)
- ucb1400_ts_irq_disable(ucb->ac97);
+ ucb1400_ts_irq_disable(ucb);
else
- dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr);
- enable_irq(ucb->irq);
+ dev_dbg(&ucb->ts_idev->dev,
+ "ucb1400: unexpected IE_STATUS = %#x\n", isr);
}
-static int ucb1400_ts_thread(void *_ucb)
+/*
+ * A restriction with interrupts exists when using the ucb1400, as
+ * the codec read/write routines may sleep while waiting for codec
+ * access completion and uses semaphores for access control to the
+ * AC97 bus. Therefore the driver is forced to use threaded interrupt
+ * handler.
+ */
+static irqreturn_t ucb1400_irq(int irqnr, void *devid)
{
- struct ucb1400_ts *ucb = _ucb;
- struct task_struct *tsk = current;
- int valid = 0;
- struct sched_param param = { .sched_priority = 1 };
+ struct ucb1400_ts *ucb = devid;
+ unsigned int x, y, p;
+ bool penup;
- sched_setscheduler(tsk, SCHED_FIFO, &param);
+ if (unlikely(irqnr != ucb->irq))
+ return IRQ_NONE;
- set_freezable();
- while (!kthread_should_stop()) {
- unsigned int x, y, p;
- long timeout;
+ ucb1400_clear_pending_irq(ucb);
- ucb->ts_restart = 0;
+ /* Start with a small delay before checking pendown state */
+ msleep(UCB1400_TS_POLL_PERIOD);
- if (ucb->irq_pending) {
- ucb->irq_pending = 0;
- ucb1400_handle_pending_irq(ucb);
- }
+ while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
ucb1400_adc_enable(ucb->ac97);
x = ucb1400_ts_read_xpos(ucb);
@@ -204,91 +208,62 @@ static int ucb1400_ts_thread(void *_ucb)
p = ucb1400_ts_read_pressure(ucb);
ucb1400_adc_disable(ucb->ac97);
- /* Switch back to interrupt mode. */
- ucb1400_ts_mode_int(ucb->ac97);
-
- msleep(10);
-
- if (ucb1400_ts_pen_up(ucb->ac97)) {
- ucb1400_ts_irq_enable(ucb->ac97);
-
- /*
- * If we spat out a valid sample set last time,
- * spit out a "pen off" sample here.
- */
- if (valid) {
- ucb1400_ts_event_release(ucb->ts_idev);
- valid = 0;
- }
-
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- valid = 1;
- ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
- timeout = msecs_to_jiffies(10);
- }
+ ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
- wait_event_freezable_timeout(ucb->ts_wait,
- ucb->irq_pending || ucb->ts_restart ||
- kthread_should_stop(), timeout);
+ wait_event_timeout(ucb->ts_wait, ucb->stopped,
+ msecs_to_jiffies(UCB1400_TS_POLL_PERIOD));
}
- /* Send the "pen off" if we are stopping with the pen still active */
- if (valid)
- ucb1400_ts_event_release(ucb->ts_idev);
+ ucb1400_ts_event_release(ucb->ts_idev);
- ucb->ts_task = NULL;
- return 0;
+ if (!ucb->stopped) {
+ /* Switch back to interrupt mode. */
+ ucb1400_ts_mode_int(ucb);
+ ucb1400_ts_irq_enable(ucb);
+ }
+
+ return IRQ_HANDLED;
}
-/*
- * A restriction with interrupts exists when using the ucb1400, as
- * the codec read/write routines may sleep while waiting for codec
- * access completion and uses semaphores for access control to the
- * AC97 bus. A complete codec read cycle could take anywhere from
- * 60 to 100uSec so we *definitely* don't want to spin inside the
- * interrupt handler waiting for codec access. So, we handle the
- * interrupt by scheduling a RT kernel thread to run in process
- * context instead of interrupt context.
- */
-static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
+static void ucb1400_ts_stop(struct ucb1400_ts *ucb)
{
- struct ucb1400_ts *ucb = devid;
+ /* Signal IRQ thread to stop polling and disable the handler. */
+ ucb->stopped = true;
+ mb();
+ wake_up(&ucb->ts_wait);
+ disable_irq(ucb->irq);
- if (irqnr == ucb->irq) {
- disable_irq_nosync(ucb->irq);
- ucb->irq_pending = 1;
- wake_up(&ucb->ts_wait);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ ucb1400_ts_irq_disable(ucb);
+ ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+}
+
+/* Must be called with ts->lock held */
+static void ucb1400_ts_start(struct ucb1400_ts *ucb)
+{
+ /* Tell IRQ thread that it may poll the device. */
+ ucb->stopped = false;
+ mb();
+
+ ucb1400_ts_mode_int(ucb);
+ ucb1400_ts_irq_enable(ucb);
+
+ enable_irq(ucb->irq);
}
static int ucb1400_ts_open(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
- int ret = 0;
- BUG_ON(ucb->ts_task);
+ ucb1400_ts_start(ucb);
- ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
- if (IS_ERR(ucb->ts_task)) {
- ret = PTR_ERR(ucb->ts_task);
- ucb->ts_task = NULL;
- }
-
- return ret;
+ return 0;
}
static void ucb1400_ts_close(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
- if (ucb->ts_task)
- kthread_stop(ucb->ts_task);
-
- ucb1400_ts_irq_disable(ucb->ac97);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+ ucb1400_ts_stop(ucb);
}
#ifndef NO_IRQ
@@ -299,7 +274,8 @@ static void ucb1400_ts_close(struct input_dev *idev)
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
-static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
+static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+ struct platform_device *pdev)
{
unsigned long mask, timeout;
@@ -321,7 +297,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
UCB_ADC_DAT_VALID)) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
+ dev_err(&pdev->dev, "timed out in IRQ probe\n");
probe_irq_off(mask);
return -ENODEV;
}
@@ -342,11 +318,11 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
return 0;
}
-static int ucb1400_ts_probe(struct platform_device *dev)
+static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
{
+ struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
u16 fcsr;
- struct ucb1400_ts *ucb = dev->dev.platform_data;
ucb->ts_idev = input_allocate_device();
if (!ucb->ts_idev) {
@@ -356,27 +332,19 @@ static int ucb1400_ts_probe(struct platform_device *dev)
/* Only in case the IRQ line wasn't supplied, try detecting it */
if (ucb->irq < 0) {
- error = ucb1400_ts_detect_irq(ucb);
+ error = ucb1400_ts_detect_irq(ucb, pdev);
if (error) {
- printk(KERN_ERR "UCB1400: IRQ probe failed\n");
+ dev_err(&pdev->dev, "IRQ probe failed\n");
goto err_free_devs;
}
}
+ dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq);
init_waitqueue_head(&ucb->ts_wait);
- error = request_irq(ucb->irq, ucb1400_hard_irq, IRQF_TRIGGER_RISING,
- "UCB1400", ucb);
- if (error) {
- printk(KERN_ERR "ucb1400: unable to grab irq%d: %d\n",
- ucb->irq, error);
- goto err_free_devs;
- }
- printk(KERN_DEBUG "UCB1400: found IRQ %d\n", ucb->irq);
-
input_set_drvdata(ucb->ts_idev, ucb);
- ucb->ts_idev->dev.parent = &dev->dev;
+ ucb->ts_idev->dev.parent = &pdev->dev;
ucb->ts_idev->name = "UCB1400 touchscreen interface";
ucb->ts_idev->id.vendor = ucb1400_reg_read(ucb->ac97,
AC97_VENDOR_ID1);
@@ -398,12 +366,23 @@ static int ucb1400_ts_probe(struct platform_device *dev)
x_res = ucb1400_ts_read_xres(ucb);
y_res = ucb1400_ts_read_yres(ucb);
ucb1400_adc_disable(ucb->ac97);
- printk(KERN_DEBUG "UCB1400: x/y = %d/%d\n", x_res, y_res);
+ dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res);
input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0);
+ ucb1400_ts_stop(ucb);
+
+ error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "UCB1400", ucb);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to grab irq%d: %d\n", ucb->irq, error);
+ goto err_free_devs;
+ }
+
error = input_register_device(ucb->ts_idev);
if (error)
goto err_free_irq;
@@ -416,56 +395,61 @@ err_free_devs:
input_free_device(ucb->ts_idev);
err:
return error;
-
}
-static int ucb1400_ts_remove(struct platform_device *dev)
+static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = dev->dev.platform_data;
+ struct ucb1400_ts *ucb = pdev->dev.platform_data;
free_irq(ucb->irq, ucb);
input_unregister_device(ucb->ts_idev);
+
return 0;
}
-#ifdef CONFIG_PM
-static int ucb1400_ts_resume(struct platform_device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int ucb1400_ts_suspend(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->dev.platform_data;
-
- if (ucb->ts_task) {
- /*
- * Restart the TS thread to ensure the
- * TS interrupt mode is set up again
- * after sleep.
- */
- ucb->ts_restart = 1;
- wake_up(&ucb->ts_wait);
- }
+ struct ucb1400_ts *ucb = dev->platform_data;
+ struct input_dev *idev = ucb->ts_idev;
+
+ mutex_lock(&idev->mutex);
+
+ if (idev->users)
+ ucb1400_ts_start(ucb);
+
+ mutex_unlock(&idev->mutex);
+ return 0;
+}
+
+static int ucb1400_ts_resume(struct device *dev)
+{
+ struct ucb1400_ts *ucb = dev->platform_data;
+ struct input_dev *idev = ucb->ts_idev;
+
+ mutex_lock(&idev->mutex);
+
+ if (idev->users)
+ ucb1400_ts_stop(ucb);
+
+ mutex_unlock(&idev->mutex);
return 0;
}
-#else
-#define ucb1400_ts_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
+ ucb1400_ts_suspend, ucb1400_ts_resume);
+
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
- .remove = ucb1400_ts_remove,
- .resume = ucb1400_ts_resume,
+ .remove = __devexit_p(ucb1400_ts_remove),
.driver = {
.name = "ucb1400_ts",
+ .owner = THIS_MODULE,
+ .pm = &ucb1400_ts_pm_ops,
},
};
-
-static int __init ucb1400_ts_init(void)
-{
- return platform_driver_register(&ucb1400_ts_driver);
-}
-
-static void __exit ucb1400_ts_exit(void)
-{
- platform_driver_unregister(&ucb1400_ts_driver);
-}
+module_platform_driver(ucb1400_ts_driver);
module_param(adcsync, bool, 0444);
MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin.");
@@ -479,8 +463,5 @@ MODULE_PARM_DESC(ts_delay_pressure,
"delay between panel setup and pressure read."
" Default = 0us.");
-module_init(ucb1400_ts_init);
-module_exit(ucb1400_ts_exit);
-
MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index e539d92cc626..3a5ebf452e81 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -16,6 +16,7 @@
* - JASTEC USB touch controller/DigiTech DTR-02U
* - Zytronic capacitive touchscreen
* - NEXIO/iNexio
+ * - Elo TouchSystems 2700 IntelliTouch
*
* Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch>
* Copyright (C) by Todd E. Johnson (mtouchusb.c)
@@ -59,11 +60,11 @@
#define DRIVER_AUTHOR "Daniel Ritz <daniel.ritz@gmx.ch>"
#define DRIVER_DESC "USB Touchscreen Driver"
-static int swap_xy;
+static bool swap_xy;
module_param(swap_xy, bool, 0644);
MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped.");
-static int hwcalib_xy;
+static bool hwcalib_xy;
module_param(hwcalib_xy, bool, 0644);
MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available");
@@ -138,6 +139,7 @@ enum {
DEVTYPE_ZYTRONIC,
DEVTYPE_TC45USB,
DEVTYPE_NEXIO,
+ DEVTYPE_ELO,
};
#define USB_DEVICE_HID_CLASS(vend, prod) \
@@ -239,6 +241,10 @@ static const struct usb_device_id usbtouch_devices[] = {
.driver_info = DEVTYPE_NEXIO},
#endif
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO},
+#endif
+
{}
};
@@ -945,6 +951,24 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
/*****************************************************************************
+ * ELO part
+ */
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+
+static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
+{
+ dev->x = (pkt[3] << 8) | pkt[2];
+ dev->y = (pkt[5] << 8) | pkt[4];
+ dev->touch = pkt[6] > 0;
+ dev->press = pkt[6];
+
+ return 1;
+}
+#endif
+
+
+/*****************************************************************************
* the different device descriptors
*/
#ifdef MULTI_PACKET
@@ -953,6 +977,18 @@ static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
#endif
static struct usbtouch_device_info usbtouch_dev_info[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ [DEVTYPE_ELO] = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = elo_read_data,
+ },
+#endif
+
#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
[DEVTYPE_EGALAX] = {
.min_xc = 0x0,
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 217aa51135c5..9396b21d0e8f 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -331,19 +331,7 @@ static struct platform_driver w90x900ts_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init w90x900ts_init(void)
-{
- return platform_driver_register(&w90x900ts_driver);
-}
-
-static void __exit w90x900ts_exit(void)
-{
- platform_driver_unregister(&w90x900ts_driver);
-}
-
-module_init(w90x900ts_init);
-module_exit(w90x900ts_exit);
+module_platform_driver(w90x900ts_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 9175d49d2546..4bc851a9dc3d 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -401,18 +401,7 @@ static struct platform_driver wm831x_ts_driver = {
.probe = wm831x_ts_probe,
.remove = __devexit_p(wm831x_ts_remove),
};
-
-static int __init wm831x_ts_init(void)
-{
- return platform_driver_register(&wm831x_ts_driver);
-}
-module_init(wm831x_ts_init);
-
-static void __exit wm831x_ts_exit(void)
-{
- platform_driver_unregister(&wm831x_ts_driver);
-}
-module_exit(wm831x_ts_exit);
+module_platform_driver(wm831x_ts_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index f6328c0cded6..bf0869a7a78e 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -192,8 +193,8 @@ static int zylonite_wm97xx_probe(struct platform_device *pdev)
else
gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
- wm->pen_irq = IRQ_GPIO(gpio_touch_irq);
- irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
+ wm->pen_irq = gpio_to_irq(gpio_touch_irq);
+ irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
WM97XX_GPIO_POL_HIGH,
@@ -223,19 +224,7 @@ static struct platform_driver zylonite_wm97xx_driver = {
.name = "wm97xx-touch",
},
};
-
-static int __init zylonite_wm97xx_init(void)
-{
- return platform_driver_register(&zylonite_wm97xx_driver);
-}
-
-static void __exit zylonite_wm97xx_exit(void)
-{
- platform_driver_unregister(&zylonite_wm97xx_driver);
-}
-
-module_init(zylonite_wm97xx_init);
-module_exit(zylonite_wm97xx_exit);
+module_platform_driver(zylonite_wm97xx_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5414253b185a..6bea6962f8ee 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -34,7 +34,9 @@ config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
select PCI_MSI
- select PCI_IOV
+ select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
select IOMMU_API
depends on X86_64 && PCI && ACPI
---help---
@@ -58,6 +60,15 @@ config AMD_IOMMU_STATS
information to userspace via debugfs.
If unsure, say N.
+config AMD_IOMMU_V2
+ tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
+ depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
+ select MMU_NOTIFIER
+ ---help---
+ This option enables support for the AMD IOMMUv2 features of the IOMMU
+ hardware. Select this option if you want to use devices that support
+ the the PCI PRI and PASID interface.
+
# Intel IOMMU support
config DMAR_TABLE
bool
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 2f4448794bc7..0e36b4934aff 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4ee277a8521a..cce1f03b8895 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
@@ -28,6 +29,8 @@
#include <linux/iommu.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
#include <asm/msidef.h>
#include <asm/proto.h>
#include <asm/iommu.h>
@@ -41,6 +44,24 @@
#define LOOP_TIMEOUT 100000
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define AMD_IOMMU_PGSIZES (~0xFFFUL)
+
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
/* A list of preallocated protection domains */
@@ -59,6 +80,9 @@ static struct protection_domain *pt_domain;
static struct iommu_ops amd_iommu_ops;
+static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
+int amd_iommu_max_glx_val = -1;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -67,6 +91,7 @@ struct iommu_cmd {
};
static void update_domain(struct protection_domain *domain);
+static int __init alloc_passthrough_domain(void);
/****************************************************************************
*
@@ -147,6 +172,33 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
return dev->archdata.iommu;
}
+static bool pci_iommuv2_capable(struct pci_dev *pdev)
+{
+ static const int caps[] = {
+ PCI_EXT_CAP_ID_ATS,
+ PCI_EXT_CAP_ID_PRI,
+ PCI_EXT_CAP_ID_PASID,
+ };
+ int i, pos;
+
+ for (i = 0; i < 3; ++i) {
+ pos = pci_find_ext_capability(pdev, caps[i]);
+ if (pos == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = get_dev_data(&pdev->dev);
+
+ return dev_data->errata & (1 << erratum) ? true : false;
+}
+
/*
* In this function the list of preallocated protection domains is traversed to
* find the domain for a specific device
@@ -204,6 +256,7 @@ static bool check_device(struct device *dev)
static int iommu_init_device(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
u16 alias;
@@ -228,6 +281,13 @@ static int iommu_init_device(struct device *dev)
dev_data->alias_data = alias_data;
}
+ if (pci_iommuv2_capable(pdev)) {
+ struct amd_iommu *iommu;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+ dev_data->iommu_v2 = iommu->is_iommu_v2;
+ }
+
dev->archdata.iommu = dev_data;
return 0;
@@ -317,6 +377,11 @@ DECLARE_STATS_COUNTER(domain_flush_single);
DECLARE_STATS_COUNTER(domain_flush_all);
DECLARE_STATS_COUNTER(alloced_io_mem);
DECLARE_STATS_COUNTER(total_map_requests);
+DECLARE_STATS_COUNTER(complete_ppr);
+DECLARE_STATS_COUNTER(invalidate_iotlb);
+DECLARE_STATS_COUNTER(invalidate_iotlb_all);
+DECLARE_STATS_COUNTER(pri_requests);
+
static struct dentry *stats_dir;
static struct dentry *de_fflush;
@@ -351,6 +416,10 @@ static void amd_iommu_stats_init(void)
amd_iommu_stats_add(&domain_flush_all);
amd_iommu_stats_add(&alloced_io_mem);
amd_iommu_stats_add(&total_map_requests);
+ amd_iommu_stats_add(&complete_ppr);
+ amd_iommu_stats_add(&invalidate_iotlb);
+ amd_iommu_stats_add(&invalidate_iotlb_all);
+ amd_iommu_stats_add(&pri_requests);
}
#endif
@@ -365,8 +434,8 @@ static void dump_dte_entry(u16 devid)
{
int i;
- for (i = 0; i < 8; ++i)
- pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
+ for (i = 0; i < 4; ++i)
+ pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
amd_iommu_dev_table[devid].data[i]);
}
@@ -461,12 +530,84 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+{
+ struct amd_iommu_fault fault;
+ volatile u64 *raw;
+ int i;
+
+ INC_STATS_COUNTER(pri_requests);
+
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is written to
+ * memory. If this happens we need to wait for the entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+ pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
+ return;
+ }
+
+ fault.address = raw[1];
+ fault.pasid = PPR_PASID(raw[0]);
+ fault.device_id = PPR_DEVID(raw[0]);
+ fault.tag = PPR_TAG(raw[0]);
+ fault.flags = PPR_FLAGS(raw[0]);
+
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * to back to zero.
+ */
+ raw[0] = raw[1] = 0;
+
+ atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
+}
+
+static void iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+ unsigned long flags;
+ u32 head, tail;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ while (head != tail) {
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, head);
+
+ /* Update and refresh ring-buffer state*/
+ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+ }
+
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
struct amd_iommu *iommu;
- for_each_iommu(iommu)
+ for_each_iommu(iommu) {
iommu_poll_events(iommu);
+ iommu_poll_ppr_log(iommu);
+ }
return IRQ_HANDLED;
}
@@ -595,6 +736,60 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+ u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = pasid & PASID_MASK;
+ cmd->data[1] = domid;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+}
+
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+ int qdep, u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = devid;
+ cmd->data[0] |= (pasid & 0xff) << 16;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+ cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ cmd->data[3] = upper_32_bits(address);
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+ int status, int tag, bool gn)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->data[0] = devid;
+ if (gn) {
+ cmd->data[1] = pasid & PASID_MASK;
+ cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
+ cmd->data[3] = tag & 0x1ff;
+ cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
+
+ CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
+}
+
static void build_inv_all(struct iommu_cmd *cmd)
{
memset(cmd, 0, sizeof(*cmd));
@@ -1496,6 +1691,48 @@ static void free_pagetable(struct protection_domain *domain)
domain->pt_root = NULL;
}
+static void free_gcr3_tbl_level1(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = __va(tbl[i] & PAGE_MASK);
+
+ free_page((unsigned long)ptr);
+ }
+}
+
+static void free_gcr3_tbl_level2(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = __va(tbl[i] & PAGE_MASK);
+
+ free_gcr3_tbl_level1(ptr);
+ }
+}
+
+static void free_gcr3_table(struct protection_domain *domain)
+{
+ if (domain->glx == 2)
+ free_gcr3_tbl_level2(domain->gcr3_tbl);
+ else if (domain->glx == 1)
+ free_gcr3_tbl_level1(domain->gcr3_tbl);
+ else if (domain->glx != 0)
+ BUG();
+
+ free_page((unsigned long)domain->gcr3_tbl);
+}
+
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
@@ -1582,20 +1819,52 @@ static bool dma_ops_domain(struct protection_domain *domain)
static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
{
- u64 pte_root = virt_to_phys(domain->pt_root);
- u32 flags = 0;
+ u64 pte_root = 0;
+ u64 flags = 0;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ pte_root = virt_to_phys(domain->pt_root);
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
+ flags = amd_iommu_dev_table[devid].data[1];
+
if (ats)
flags |= DTE_FLAG_IOTLB;
- amd_iommu_dev_table[devid].data[3] |= flags;
- amd_iommu_dev_table[devid].data[2] = domain->id;
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ u64 gcr3 = __pa(domain->gcr3_tbl);
+ u64 glx = domain->glx;
+ u64 tmp;
+
+ pte_root |= DTE_FLAG_GV;
+ pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
+
+ /* First mask out possible old values for GCR3 table */
+ tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+ flags &= ~tmp;
+
+ tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ flags &= ~tmp;
+
+ /* Encode GCR3 table into DTE */
+ tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
+ pte_root |= tmp;
+
+ tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
+ flags |= tmp;
+
+ tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
+ flags |= tmp;
+ }
+
+ flags &= ~(0xffffUL);
+ flags |= domain->id;
+
+ amd_iommu_dev_table[devid].data[1] = flags;
+ amd_iommu_dev_table[devid].data[0] = pte_root;
}
static void clear_dte_entry(u16 devid)
@@ -1603,7 +1872,6 @@ static void clear_dte_entry(u16 devid)
/* remove entry from the device table seen by the hardware */
amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
amd_iommu_dev_table[devid].data[1] = 0;
- amd_iommu_dev_table[devid].data[2] = 0;
amd_iommu_apply_erratum_63(devid);
}
@@ -1696,6 +1964,93 @@ out_unlock:
return ret;
}
+
+static void pdev_iommuv2_disable(struct pci_dev *pdev)
+{
+ pci_disable_ats(pdev);
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+}
+
+/* FIXME: Change generic reset-function to do the same */
+static int pri_reset_while_enabled(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control |= PCI_PRI_CTRL_RESET;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+
+static int pdev_iommuv2_enable(struct pci_dev *pdev)
+{
+ bool reset_enable;
+ int reqs, ret;
+
+ /* FIXME: Hardcode number of outstanding requests for now */
+ reqs = 32;
+ if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
+ reqs = 1;
+ reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+
+ /* Only allow access to user-accessible pages */
+ ret = pci_enable_pasid(pdev, 0);
+ if (ret)
+ goto out_err;
+
+ /* First reset the PRI state of the device */
+ ret = pci_reset_pri(pdev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PRI */
+ ret = pci_enable_pri(pdev, reqs);
+ if (ret)
+ goto out_err;
+
+ if (reset_enable) {
+ ret = pri_reset_while_enabled(pdev);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+
+ return ret;
+}
+
+/* FIXME: Move this to PCI code */
+#define PCI_PRI_TLP_OFF (1 << 2)
+
+bool pci_pri_tlp_required(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+
+ return (control & PCI_PRI_TLP_OFF) ? true : false;
+}
+
/*
* If a device is not yet associated with a domain, this function does
* assigns it visible for the hardware
@@ -1710,7 +2065,18 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);
- if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ if (!dev_data->iommu_v2 || !dev_data->passthrough)
+ return -EINVAL;
+
+ if (pdev_iommuv2_enable(pdev) != 0)
+ return -EINVAL;
+
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ } else if (amd_iommu_iotlb_sup &&
+ pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
}
@@ -1760,7 +2126,7 @@ static void __detach_device(struct iommu_dev_data *dev_data)
* passthrough domain if it is detached from any other domain.
* Make sure we can deassign from the pt_domain itself.
*/
- if (iommu_pass_through &&
+ if (dev_data->passthrough &&
(dev_data->domain == NULL && domain != pt_domain))
__attach_device(dev_data, pt_domain);
}
@@ -1770,20 +2136,24 @@ static void __detach_device(struct iommu_dev_data *dev_data)
*/
static void detach_device(struct device *dev)
{
+ struct protection_domain *domain;
struct iommu_dev_data *dev_data;
unsigned long flags;
dev_data = get_dev_data(dev);
+ domain = dev_data->domain;
/* lock device table */
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- if (dev_data->ats.enabled) {
+ if (domain->flags & PD_IOMMUV2_MASK)
+ pdev_iommuv2_disable(to_pci_dev(dev));
+ else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
- dev_data->ats.enabled = false;
- }
+
+ dev_data->ats.enabled = false;
}
/*
@@ -1818,18 +2188,20 @@ static struct protection_domain *domain_for_device(struct device *dev)
static int device_change_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct device *dev = data;
- u16 devid;
- struct protection_domain *domain;
struct dma_ops_domain *dma_domain;
+ struct protection_domain *domain;
+ struct iommu_dev_data *dev_data;
+ struct device *dev = data;
struct amd_iommu *iommu;
unsigned long flags;
+ u16 devid;
if (!check_device(dev))
return 0;
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ devid = get_device_id(dev);
+ iommu = amd_iommu_rlookup_table[devid];
+ dev_data = get_dev_data(dev);
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -1838,7 +2210,7 @@ static int device_change_notifier(struct notifier_block *nb,
if (!domain)
goto out;
- if (iommu_pass_through)
+ if (dev_data->passthrough)
break;
detach_device(dev);
break;
@@ -2434,8 +2806,9 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
*/
static void prealloc_protection_domains(void)
{
- struct pci_dev *dev = NULL;
+ struct iommu_dev_data *dev_data;
struct dma_ops_domain *dma_dom;
+ struct pci_dev *dev = NULL;
u16 devid;
for_each_pci_dev(dev) {
@@ -2444,6 +2817,16 @@ static void prealloc_protection_domains(void)
if (!check_device(&dev->dev))
continue;
+ dev_data = get_dev_data(&dev->dev);
+ if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
+ /* Make sure passthrough domain is allocated */
+ alloc_passthrough_domain();
+ dev_data->passthrough = true;
+ attach_device(&dev->dev, pt_domain);
+ pr_info("AMD-Vi: Using passthough domain for device %s\n",
+ dev_name(&dev->dev));
+ }
+
/* Is there already any domain for it? */
if (domain_for_device(&dev->dev))
continue;
@@ -2474,6 +2857,7 @@ static struct dma_map_ops amd_iommu_dma_ops = {
static unsigned device_dma_ops_init(void)
{
+ struct iommu_dev_data *dev_data;
struct pci_dev *pdev = NULL;
unsigned unhandled = 0;
@@ -2483,7 +2867,12 @@ static unsigned device_dma_ops_init(void)
continue;
}
- pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ dev_data = get_dev_data(&pdev->dev);
+
+ if (!dev_data->passthrough)
+ pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ else
+ pdev->dev.archdata.dma_ops = &nommu_dma_ops;
}
return unhandled;
@@ -2610,6 +2999,20 @@ out_err:
return NULL;
}
+static int __init alloc_passthrough_domain(void)
+{
+ if (pt_domain != NULL)
+ return 0;
+
+ /* allocate passthrough domain */
+ pt_domain = protection_domain_alloc();
+ if (!pt_domain)
+ return -ENOMEM;
+
+ pt_domain->mode = PAGE_MODE_NONE;
+
+ return 0;
+}
static int amd_iommu_domain_init(struct iommu_domain *dom)
{
struct protection_domain *domain;
@@ -2623,6 +3026,8 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
if (!domain->pt_root)
goto out_free;
+ domain->iommu_domain = dom;
+
dom->priv = domain;
return 0;
@@ -2645,7 +3050,11 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
BUG_ON(domain->dev_cnt != 0);
- free_pagetable(domain);
+ if (domain->mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
+
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
protection_domain_free(domain);
@@ -2702,13 +3111,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- unsigned long page_size = 0x1000UL << gfp_order;
struct protection_domain *domain = dom->priv;
int prot = 0;
int ret;
+ if (domain->mode == PAGE_MODE_NONE)
+ return -EINVAL;
+
if (iommu_prot & IOMMU_READ)
prot |= IOMMU_PROT_IR;
if (iommu_prot & IOMMU_WRITE)
@@ -2721,13 +3132,14 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
return ret;
}
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- int gfp_order)
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ size_t page_size)
{
struct protection_domain *domain = dom->priv;
- unsigned long page_size, unmap_size;
+ size_t unmap_size;
- page_size = 0x1000UL << gfp_order;
+ if (domain->mode == PAGE_MODE_NONE)
+ return -EINVAL;
mutex_lock(&domain->api_lock);
unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +3147,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
domain_flush_tlb_pde(domain);
- return get_order(unmap_size);
+ return unmap_size;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2746,6 +3158,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
phys_addr_t paddr;
u64 *pte, __pte;
+ if (domain->mode == PAGE_MODE_NONE)
+ return iova;
+
pte = fetch_pte(domain, iova);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
@@ -2773,6 +3188,26 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid;
+
+ if (!dev_data)
+ return -ENODEV;
+
+ if (pdev->is_virtfn || !iommu_group_mf)
+ devid = dev_data->devid;
+ else
+ devid = calc_devid(pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+
+ *groupid = amd_iommu_alias_table[devid];
+
+ return 0;
+}
+
static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
@@ -2782,6 +3217,8 @@ static struct iommu_ops amd_iommu_ops = {
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
+ .device_group = amd_iommu_device_group,
+ .pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
/*****************************************************************************
@@ -2796,21 +3233,23 @@ static struct iommu_ops amd_iommu_ops = {
int __init amd_iommu_init_passthrough(void)
{
- struct amd_iommu *iommu;
+ struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
+ struct amd_iommu *iommu;
u16 devid;
+ int ret;
- /* allocate passthrough domain */
- pt_domain = protection_domain_alloc();
- if (!pt_domain)
- return -ENOMEM;
-
- pt_domain->mode |= PAGE_MODE_NONE;
+ ret = alloc_passthrough_domain();
+ if (ret)
+ return ret;
for_each_pci_dev(dev) {
if (!check_device(&dev->dev))
continue;
+ dev_data = get_dev_data(&dev->dev);
+ dev_data->passthrough = true;
+
devid = get_device_id(&dev->dev);
iommu = amd_iommu_rlookup_table[devid];
@@ -2820,7 +3259,375 @@ int __init amd_iommu_init_passthrough(void)
attach_device(&dev->dev, pt_domain);
}
+ amd_iommu_stats_init();
+
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
return 0;
}
+
+/* IOMMUv2 specific functions */
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
+
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+
+void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /* Update data structure */
+ domain->mode = PAGE_MODE_NONE;
+ domain->updated = true;
+
+ /* Make changes visible to IOMMUs */
+ update_domain(domain);
+
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ free_pagetable(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+EXPORT_SYMBOL(amd_iommu_domain_direct_map);
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int levels, ret;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ /* Number of GCR3 table levels required */
+ for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
+ levels += 1;
+
+ if (levels > amd_iommu_max_glx_val)
+ return -EINVAL;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /*
+ * Save us all sanity checks whether devices already in the
+ * domain support IOMMUv2. Just force that the domain has no
+ * devices attached when it is switched into IOMMUv2 mode.
+ */
+ ret = -EBUSY;
+ if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+ goto out;
+
+ ret = -ENOMEM;
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ goto out;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+ domain->updated = true;
+
+ update_domain(domain);
+
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
+
+static int __flush_pasid(struct protection_domain *domain, int pasid,
+ u64 address, bool size)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_cmd cmd;
+ int i, ret;
+
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return -EINVAL;
+
+ build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+
+ /*
+ * IOMMU TLB needs to be flushed before Device TLB to
+ * prevent device TLB refill from IOMMU TLB
+ */
+ for (i = 0; i < amd_iommus_present; ++i) {
+ if (domain->dev_iommu[i] == 0)
+ continue;
+
+ ret = iommu_queue_command(amd_iommus[i], &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until IOMMU TLB flushes are complete */
+ domain_flush_complete(domain);
+
+ /* Now flush device TLBs */
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu;
+ int qdep;
+
+ BUG_ON(!dev_data->ats.enabled);
+
+ qdep = dev_data->ats.qdep;
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
+ qdep, address, size);
+
+ ret = iommu_queue_command(iommu, &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until all device TLBs are flushed */
+ domain_flush_complete(domain);
+
+ ret = 0;
+
+out:
+
+ return ret;
+}
+
+static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+ u64 address)
+{
+ INC_STATS_COUNTER(invalidate_iotlb);
+
+ return __flush_pasid(domain, pasid, address, false);
+}
+
+int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_page(domain, pasid, address);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_page);
+
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+{
+ INC_STATS_COUNTER(invalidate_iotlb_all);
+
+ return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ true);
+}
+
+int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_tlb(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_tlb);
+
+static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = __pa(root) | GCR3_VALID;
+ }
+
+ root = __va(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int __set_gcr3(struct protection_domain *domain, int pasid,
+ unsigned long cr3)
+{
+ u64 *pte;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
+
+ *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+static int __clear_gcr3(struct protection_domain *domain, int pasid)
+{
+ u64 *pte;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+ if (pte == NULL)
+ return 0;
+
+ *pte = 0;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __set_gcr3(domain, pasid, cr3);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
+
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __clear_gcr3(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ INC_STATS_COUNTER(complete_ppr);
+
+ dev_data = get_dev_data(&pdev->dev);
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+EXPORT_SYMBOL(amd_iommu_complete_ppr);
+
+struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
+{
+ struct protection_domain *domain;
+
+ domain = get_domain(&pdev->dev);
+ if (IS_ERR(domain))
+ return NULL;
+
+ /* Only return IOMMUv2 domains */
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return NULL;
+
+ return domain->iommu_domain;
+}
+EXPORT_SYMBOL(amd_iommu_get_v2_domain);
+
+void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
+{
+ struct iommu_dev_data *dev_data;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ dev_data = get_dev_data(&pdev->dev);
+ dev_data->errata |= (1 << erratum);
+}
+EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
+
+int amd_iommu_device_info(struct pci_dev *pdev,
+ struct amd_iommu_device_info *info)
+{
+ int max_pasids;
+ int pos;
+
+ if (pdev == NULL || info == NULL)
+ return -EINVAL;
+
+ if (!amd_iommu_v2_supported())
+ return -EINVAL;
+
+ memset(info, 0, sizeof(*info));
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (pos) {
+ int features;
+
+ max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
+ max_pasids = min(max_pasids, (1 << 20));
+
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+ info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
+
+ features = pci_pasid_features(pdev);
+ if (features & PCI_PASID_CAP_EXEC)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
+ if (features & PCI_PASID_CAP_PRIV)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_device_info);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 82d2410f4205..bdea288dc185 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/amd-iommu.h>
+#include <linux/export.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -141,6 +142,12 @@ int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true;
+u32 amd_iommu_max_pasids __read_mostly = ~0;
+
+bool amd_iommu_v2_present __read_mostly;
+
+bool amd_iommu_force_isolation __read_mostly;
+
/*
* The ACPI table parsing functions set this variable on an error
*/
@@ -299,6 +306,16 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
+static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+{
+ u32 ctrl;
+
+ ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~CTRL_INV_TO_MASK;
+ ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
+ writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
/* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu)
{
@@ -581,21 +598,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
}
+/* allocates the memory where the IOMMU will log its events to */
+static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
+{
+ iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(PPR_LOG_SIZE));
+
+ if (iommu->ppr_log == NULL)
+ return NULL;
+
+ return iommu->ppr_log;
+}
+
+static void iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+}
+
+static void __init free_ppr_log(struct amd_iommu *iommu)
+{
+ if (iommu->ppr_log == NULL)
+ return;
+
+ free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+static void iommu_enable_gt(struct amd_iommu *iommu)
+{
+ if (!iommu_feature(iommu, FEATURE_GT))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GT_EN);
+}
+
/* sets a specific bit in the device table entry. */
static void set_dev_entry_bit(u16 devid, u8 bit)
{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
+ amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
}
static int get_dev_entry_bit(u16 devid, u8 bit)
{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
+ return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
@@ -699,6 +764,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
iommu->features = ((u64)high << 32) | low;
+ if (iommu_feature(iommu, FEATURE_GT)) {
+ int glxval;
+ u32 pasids;
+ u64 shift;
+
+ shift = iommu->features & FEATURE_PASID_MASK;
+ shift >>= FEATURE_PASID_SHIFT;
+ pasids = (1 << shift);
+
+ amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
+
+ glxval = iommu->features & FEATURE_GLXVAL_MASK;
+ glxval >>= FEATURE_GLXVAL_SHIFT;
+
+ if (amd_iommu_max_glx_val == -1)
+ amd_iommu_max_glx_val = glxval;
+ else
+ amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+ }
+
+ if (iommu_feature(iommu, FEATURE_GT) &&
+ iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->is_iommu_v2 = true;
+ amd_iommu_v2_present = true;
+ }
+
if (!is_rd890_iommu(iommu->dev))
return;
@@ -901,6 +992,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_command_buffer(iommu);
free_event_buffer(iommu);
+ free_ppr_log(iommu);
iommu_unmap_mmio_space(iommu);
}
@@ -964,6 +1056,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu);
+ if (iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->ppr_log = alloc_ppr_log(iommu);
+ if (!iommu->ppr_log)
+ return -ENOMEM;
+ }
+
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
@@ -1050,6 +1148,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
iommu->int_enabled = true;
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+ if (iommu->ppr_log != NULL)
+ iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+
return 0;
}
@@ -1209,6 +1310,9 @@ static void iommu_init_flags(struct amd_iommu *iommu)
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+
+ /* Set IOTLB invalidation timeout to 1s */
+ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
}
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -1274,6 +1378,8 @@ static void enable_iommus(void)
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
+ iommu_enable_ppr_log(iommu);
+ iommu_enable_gt(iommu);
iommu_set_exclusion_range(iommu);
iommu_init_msi(iommu);
iommu_enable(iommu);
@@ -1303,13 +1409,6 @@ static void amd_iommu_resume(void)
/* re-load the hardware */
enable_iommus();
-
- /*
- * we have to flush after the IOMMUs are enabled because a
- * disabled IOMMU will never execute the commands we send
- */
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
}
static int amd_iommu_suspend(void)
@@ -1560,6 +1659,8 @@ static int __init parse_amd_iommu_options(char *str)
amd_iommu_unmap_flush = true;
if (strncmp(str, "off", 3) == 0)
amd_iommu_disabled = true;
+ if (strncmp(str, "force_isolation", 15) == 0)
+ amd_iommu_force_isolation = true;
}
return 1;
@@ -1572,3 +1673,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
0,
0);
+
+bool amd_iommu_v2_supported(void)
+{
+ return amd_iommu_v2_present;
+}
+EXPORT_SYMBOL(amd_iommu_v2_supported);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 7ffaa64410b0..1a7f41c6cc66 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void);
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#define PPR_SUCCESS 0x0
+#define PPR_INVALID 0x1
+#define PPR_FAILURE 0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag);
+
#ifndef CONFIG_AMD_IOMMU_STATS
static inline void amd_iommu_stats_init(void) { }
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 5b9c5075e81a..2452f3b71736 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -69,11 +69,14 @@
#define MMIO_EXCL_BASE_OFFSET 0x0020
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_EXT_FEATURES 0x0030
+#define MMIO_PPR_LOG_OFFSET 0x0038
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
#define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020
+#define MMIO_PPR_HEAD_OFFSET 0x2030
+#define MMIO_PPR_TAIL_OFFSET 0x2038
/* Extended Feature Bits */
@@ -87,8 +90,17 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
+#define FEATURE_PASID_SHIFT 32
+#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
+
+#define FEATURE_GLXVAL_SHIFT 14
+#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+
+#define PASID_MASK 0x000fffff
+
/* MMIO status bits */
-#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
+#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
@@ -115,6 +127,7 @@
#define CONTROL_EVT_LOG_EN 0x02ULL
#define CONTROL_EVT_INT_EN 0x03ULL
#define CONTROL_COMWAIT_EN 0x04ULL
+#define CONTROL_INV_TIMEOUT 0x05ULL
#define CONTROL_PASSPW_EN 0x08ULL
#define CONTROL_RESPASSPW_EN 0x09ULL
#define CONTROL_COHERENT_EN 0x0aULL
@@ -122,18 +135,34 @@
#define CONTROL_CMDBUF_EN 0x0cULL
#define CONTROL_PPFLOG_EN 0x0dULL
#define CONTROL_PPFINT_EN 0x0eULL
+#define CONTROL_PPR_EN 0x0fULL
+#define CONTROL_GT_EN 0x10ULL
+
+#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_NONE 0
+#define CTRL_INV_TO_1MS 1
+#define CTRL_INV_TO_10MS 2
+#define CTRL_INV_TO_100MS 3
+#define CTRL_INV_TO_1S 4
+#define CTRL_INV_TO_10S 5
+#define CTRL_INV_TO_100S 6
/* command specific defines */
#define CMD_COMPL_WAIT 0x01
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_INV_IOTLB_PAGES 0x04
+#define CMD_COMPLETE_PPR 0x07
#define CMD_INV_ALL 0x08
#define CMD_COMPL_WAIT_STORE_MASK 0x01
#define CMD_COMPL_WAIT_INT_MASK 0x02
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
+#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
+
+#define PPR_STATUS_MASK 0xf
+#define PPR_STATUS_SHIFT 12
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
@@ -165,6 +194,23 @@
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56)
+/* Constants for PPR Log handling */
+#define PPR_LOG_ENTRIES 512
+#define PPR_LOG_SIZE_SHIFT 56
+#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
+#define PPR_ENTRY_SIZE 16
+#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+
+#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
+#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
+#define PPR_DEVID(x) ((x) & 0xffffULL)
+#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
+#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
+#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
+#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
+
+#define PPR_REQ_FAULT 0x01
+
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
@@ -230,7 +276,24 @@
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
-#define DTE_FLAG_IOTLB 0x01
+#define DTE_FLAG_IOTLB (0x01UL << 32)
+#define DTE_FLAG_GV (0x01ULL << 55)
+#define DTE_GLX_SHIFT (56)
+#define DTE_GLX_MASK (3)
+
+#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
+#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+
+#define DTE_GCR3_INDEX_A 0
+#define DTE_GCR3_INDEX_B 1
+#define DTE_GCR3_INDEX_C 1
+
+#define DTE_GCR3_SHIFT_A 58
+#define DTE_GCR3_SHIFT_B 16
+#define DTE_GCR3_SHIFT_C 43
+
+#define GCR3_VALID 0x01ULL
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
@@ -257,6 +320,7 @@
domain for an IOMMU */
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
+#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
@@ -285,6 +349,29 @@ extern bool amd_iommu_iotlb_sup;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
+
+/*
+ * This struct is used to pass information about
+ * incoming PPR faults around.
+ */
+struct amd_iommu_fault {
+ u64 address; /* IO virtual address of the fault*/
+ u32 pasid; /* Address space identifier */
+ u16 device_id; /* Originating PCI device id */
+ u16 tag; /* PPR tag */
+ u16 flags; /* Fault flags */
+
+};
+
+#define PPR_FAULT_EXEC (1 << 1)
+#define PPR_FAULT_READ (1 << 2)
+#define PPR_FAULT_WRITE (1 << 5)
+#define PPR_FAULT_USER (1 << 6)
+#define PPR_FAULT_RSVD (1 << 7)
+#define PPR_FAULT_GN (1 << 8)
+
+struct iommu_domain;
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -297,11 +384,15 @@ struct protection_domain {
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
+ int glx; /* Number of levels for GCR3 table */
+ u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
void *priv; /* private data */
+ struct iommu_domain *iommu_domain; /* Pointer to generic
+ domain structure */
};
@@ -315,10 +406,15 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reverent count */
u16 devid; /* PCI Device ID */
+ bool iommu_v2; /* Device can make use of IOMMUv2 */
+ bool passthrough; /* Default for device is pt_domain */
struct {
bool enabled;
int qdep;
} ats; /* ATS state */
+ bool pri_tlp; /* PASID TLB required for
+ PPR completions */
+ u32 errata; /* Bitmap for errata to apply */
};
/*
@@ -399,6 +495,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* IOMMUv2 */
+ bool is_iommu_v2;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -431,6 +530,9 @@ struct amd_iommu {
/* MSI number for event interrupt */
u16 evt_msi_num;
+ /* Base of the PPR log, if present */
+ u8 *ppr_log;
+
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
@@ -484,7 +586,7 @@ extern struct list_head amd_iommu_pd_list;
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u32 data[8];
+ u64 data[4];
};
/*
@@ -549,6 +651,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
*/
extern bool amd_iommu_unmap_flush;
+/* Smallest number of PASIDs supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasids;
+
+extern bool amd_iommu_v2_present;
+
+extern bool amd_iommu_force_isolation;
+
+/* Max levels of glxval supported */
+extern int amd_iommu_max_glx_val;
+
/* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
new file mode 100644
index 000000000000..8add9f125d3e
--- /dev/null
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mmu_notifier.h>
+#include <linux/amd-iommu.h>
+#include <linux/mm_types.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/iommu.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu_proto.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
+
+#define MAX_DEVICES 0x10000
+#define PRI_QUEUE_SIZE 512
+
+struct pri_queue {
+ atomic_t inflight;
+ bool finish;
+ int status;
+};
+
+struct pasid_state {
+ struct list_head list; /* For global state-list */
+ atomic_t count; /* Reference count */
+ struct task_struct *task; /* Task bound to this PASID */
+ struct mm_struct *mm; /* mm_struct for the faults */
+ struct mmu_notifier mn; /* mmu_otifier handle */
+ struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
+ struct device_state *device_state; /* Link to our device_state */
+ int pasid; /* PASID index */
+ spinlock_t lock; /* Protect pri_queues */
+ wait_queue_head_t wq; /* To wait for count == 0 */
+};
+
+struct device_state {
+ atomic_t count;
+ struct pci_dev *pdev;
+ struct pasid_state **states;
+ struct iommu_domain *domain;
+ int pasid_levels;
+ int max_pasids;
+ amd_iommu_invalid_ppr_cb inv_ppr_cb;
+ amd_iommu_invalidate_ctx inv_ctx_cb;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+};
+
+struct fault {
+ struct work_struct work;
+ struct device_state *dev_state;
+ struct pasid_state *state;
+ struct mm_struct *mm;
+ u64 address;
+ u16 devid;
+ u16 pasid;
+ u16 tag;
+ u16 finish;
+ u16 flags;
+};
+
+struct device_state **state_table;
+static spinlock_t state_lock;
+
+/* List and lock for all pasid_states */
+static LIST_HEAD(pasid_state_list);
+static DEFINE_SPINLOCK(ps_lock);
+
+static struct workqueue_struct *iommu_wq;
+
+/*
+ * Empty page table - Used between
+ * mmu_notifier_invalidate_range_start and
+ * mmu_notifier_invalidate_range_end
+ */
+static u64 *empty_page_table;
+
+static void free_pasid_states(struct device_state *dev_state);
+static void unbind_pasid(struct device_state *dev_state, int pasid);
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
+
+static u16 device_id(struct pci_dev *pdev)
+{
+ u16 devid;
+
+ devid = pdev->bus->number;
+ devid = (devid << 8) | pdev->devfn;
+
+ return devid;
+}
+
+static struct device_state *get_device_state(u16 devid)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ dev_state = state_table[devid];
+ if (dev_state != NULL)
+ atomic_inc(&dev_state->count);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return dev_state;
+}
+
+static void free_device_state(struct device_state *dev_state)
+{
+ /*
+ * First detach device from domain - No more PRI requests will arrive
+ * from that device after it is unbound from the IOMMUv2 domain.
+ */
+ iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+
+ /* Everything is down now, free the IOMMUv2 domain */
+ iommu_domain_free(dev_state->domain);
+
+ /* Finally get rid of the device-state */
+ kfree(dev_state);
+}
+
+static void put_device_state(struct device_state *dev_state)
+{
+ if (atomic_dec_and_test(&dev_state->count))
+ wake_up(&dev_state->wq);
+}
+
+static void put_device_state_wait(struct device_state *dev_state)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (!atomic_dec_and_test(&dev_state->count))
+ schedule();
+ finish_wait(&dev_state->wq, &wait);
+
+ free_device_state(dev_state);
+}
+
+static struct notifier_block profile_nb = {
+ .notifier_call = task_exit,
+};
+
+static void link_pasid_state(struct pasid_state *pasid_state)
+{
+ spin_lock(&ps_lock);
+ list_add_tail(&pasid_state->list, &pasid_state_list);
+ spin_unlock(&ps_lock);
+}
+
+static void __unlink_pasid_state(struct pasid_state *pasid_state)
+{
+ list_del(&pasid_state->list);
+}
+
+static void unlink_pasid_state(struct pasid_state *pasid_state)
+{
+ spin_lock(&ps_lock);
+ __unlink_pasid_state(pasid_state);
+ spin_unlock(&ps_lock);
+}
+
+/* Must be called under dev_state->lock */
+static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
+ int pasid, bool alloc)
+{
+ struct pasid_state **root, **ptr;
+ int level, index;
+
+ level = dev_state->pasid_levels;
+ root = dev_state->states;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ ptr = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (*ptr == NULL) {
+ if (!alloc)
+ return NULL;
+
+ *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (*ptr == NULL)
+ return NULL;
+ }
+
+ root = (struct pasid_state **)*ptr;
+ level -= 1;
+ }
+
+ return ptr;
+}
+
+static int set_pasid_state(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ int pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ ret = -ENOMEM;
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = -ENOMEM;
+ if (*ptr != NULL)
+ goto out_unlock;
+
+ *ptr = pasid_state;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void clear_pasid_state(struct device_state *dev_state, int pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ *ptr = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+}
+
+static struct pasid_state *get_pasid_state(struct device_state *dev_state,
+ int pasid)
+{
+ struct pasid_state **ptr, *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, false);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = *ptr;
+ if (ret)
+ atomic_inc(&ret->count);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void free_pasid_state(struct pasid_state *pasid_state)
+{
+ kfree(pasid_state);
+}
+
+static void put_pasid_state(struct pasid_state *pasid_state)
+{
+ if (atomic_dec_and_test(&pasid_state->count)) {
+ put_device_state(pasid_state->device_state);
+ wake_up(&pasid_state->wq);
+ }
+}
+
+static void put_pasid_state_wait(struct pasid_state *pasid_state)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ if (atomic_dec_and_test(&pasid_state->count))
+ put_device_state(pasid_state->device_state);
+ else
+ schedule();
+
+ finish_wait(&pasid_state->wq, &wait);
+ mmput(pasid_state->mm);
+ free_pasid_state(pasid_state);
+}
+
+static void __unbind_pasid(struct pasid_state *pasid_state)
+{
+ struct iommu_domain *domain;
+
+ domain = pasid_state->device_state->domain;
+
+ amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
+ clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
+
+ /* Make sure no more pending faults are in the queue */
+ flush_workqueue(iommu_wq);
+
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+ put_pasid_state(pasid_state); /* Reference taken in bind() function */
+}
+
+static void unbind_pasid(struct device_state *dev_state, int pasid)
+{
+ struct pasid_state *pasid_state;
+
+ pasid_state = get_pasid_state(dev_state, pasid);
+ if (pasid_state == NULL)
+ return;
+
+ unlink_pasid_state(pasid_state);
+ __unbind_pasid(pasid_state);
+ put_pasid_state_wait(pasid_state); /* Reference taken in this function */
+}
+
+static void free_pasid_states_level1(struct pasid_state **tbl)
+{
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ free_page((unsigned long)tbl[i]);
+ }
+}
+
+static void free_pasid_states_level2(struct pasid_state **tbl)
+{
+ struct pasid_state **ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ ptr = (struct pasid_state **)tbl[i];
+ free_pasid_states_level1(ptr);
+ }
+}
+
+static void free_pasid_states(struct device_state *dev_state)
+{
+ struct pasid_state *pasid_state;
+ int i;
+
+ for (i = 0; i < dev_state->max_pasids; ++i) {
+ pasid_state = get_pasid_state(dev_state, i);
+ if (pasid_state == NULL)
+ continue;
+
+ put_pasid_state(pasid_state);
+ unbind_pasid(dev_state, i);
+ }
+
+ if (dev_state->pasid_levels == 2)
+ free_pasid_states_level2(dev_state->states);
+ else if (dev_state->pasid_levels == 1)
+ free_pasid_states_level1(dev_state->states);
+ else if (dev_state->pasid_levels != 0)
+ BUG();
+
+ free_page((unsigned long)dev_state->states);
+}
+
+static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct pasid_state, mn);
+}
+
+static void __mn_flush_page(struct mmu_notifier *mn,
+ unsigned long address)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
+}
+
+static int mn_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ __mn_flush_page(mn, address);
+
+ return 0;
+}
+
+static void mn_change_pte(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+ pte_t pte)
+{
+ __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+ __pa(empty_page_table));
+}
+
+static void mn_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+ __pa(pasid_state->mm->pgd));
+}
+
+static struct mmu_notifier_ops iommu_mn = {
+ .clear_flush_young = mn_clear_flush_young,
+ .change_pte = mn_change_pte,
+ .invalidate_page = mn_invalidate_page,
+ .invalidate_range_start = mn_invalidate_range_start,
+ .invalidate_range_end = mn_invalidate_range_end,
+};
+
+static void set_pri_tag_status(struct pasid_state *pasid_state,
+ u16 tag, int status)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ pasid_state->pri[tag].status = status;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void finish_pri_tag(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ u16 tag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
+ pasid_state->pri[tag].finish) {
+ amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
+ pasid_state->pri[tag].status, tag);
+ pasid_state->pri[tag].finish = false;
+ pasid_state->pri[tag].status = PPR_SUCCESS;
+ }
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void do_fault(struct work_struct *work)
+{
+ struct fault *fault = container_of(work, struct fault, work);
+ int npages, write;
+ struct page *page;
+
+ write = !!(fault->flags & PPR_FAULT_WRITE);
+
+ npages = get_user_pages(fault->state->task, fault->state->mm,
+ fault->address, 1, write, 0, &page, NULL);
+
+ if (npages == 1) {
+ put_page(page);
+ } else if (fault->dev_state->inv_ppr_cb) {
+ int status;
+
+ status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+ fault->pasid,
+ fault->address,
+ fault->flags);
+ switch (status) {
+ case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+ set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_INVALID:
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_FAIL:
+ set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+ break;
+ default:
+ BUG();
+ }
+ } else {
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ }
+
+ finish_pri_tag(fault->dev_state, fault->state, fault->tag);
+
+ put_pasid_state(fault->state);
+
+ kfree(fault);
+}
+
+static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct amd_iommu_fault *iommu_fault;
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ unsigned long flags;
+ struct fault *fault;
+ bool finish;
+ u16 tag;
+ int ret;
+
+ iommu_fault = data;
+ tag = iommu_fault->tag & 0x1ff;
+ finish = (iommu_fault->tag >> 9) & 1;
+
+ ret = NOTIFY_DONE;
+ dev_state = get_device_state(iommu_fault->device_id);
+ if (dev_state == NULL)
+ goto out;
+
+ pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
+ if (pasid_state == NULL) {
+ /* We know the device but not the PASID -> send INVALID */
+ amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
+ PPR_INVALID, tag);
+ goto out_drop_state;
+ }
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ atomic_inc(&pasid_state->pri[tag].inflight);
+ if (finish)
+ pasid_state->pri[tag].finish = true;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+
+ fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
+ if (fault == NULL) {
+ /* We are OOM - send success and let the device re-fault */
+ finish_pri_tag(dev_state, pasid_state, tag);
+ goto out_drop_state;
+ }
+
+ fault->dev_state = dev_state;
+ fault->address = iommu_fault->address;
+ fault->state = pasid_state;
+ fault->tag = tag;
+ fault->finish = finish;
+ fault->flags = iommu_fault->flags;
+ INIT_WORK(&fault->work, do_fault);
+
+ queue_work(iommu_wq, &fault->work);
+
+ ret = NOTIFY_OK;
+
+out_drop_state:
+ put_device_state(dev_state);
+
+out:
+ return ret;
+}
+
+static struct notifier_block ppr_nb = {
+ .notifier_call = ppr_notifier,
+};
+
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct pasid_state *pasid_state;
+ struct task_struct *task;
+
+ task = data;
+
+ /*
+ * Using this notifier is a hack - but there is no other choice
+ * at the moment. What I really want is a sleeping notifier that
+ * is called when an MM goes down. But such a notifier doesn't
+ * exist yet. The notifier needs to sleep because it has to make
+ * sure that the device does not use the PASID and the address
+ * space anymore before it is destroyed. This includes waiting
+ * for pending PRI requests to pass the workqueue. The
+ * MMU-Notifiers would be a good fit, but they use RCU and so
+ * they are not allowed to sleep. Lets see how we can solve this
+ * in a more intelligent way in the future.
+ */
+again:
+ spin_lock(&ps_lock);
+ list_for_each_entry(pasid_state, &pasid_state_list, list) {
+ struct device_state *dev_state;
+ int pasid;
+
+ if (pasid_state->task != task)
+ continue;
+
+ /* Drop Lock and unbind */
+ spin_unlock(&ps_lock);
+
+ dev_state = pasid_state->device_state;
+ pasid = pasid_state->pasid;
+
+ if (pasid_state->device_state->inv_ctx_cb)
+ dev_state->inv_ctx_cb(dev_state->pdev, pasid);
+
+ unbind_pasid(dev_state, pasid);
+
+ /* Task may be in the list multiple times */
+ goto again;
+ }
+ spin_unlock(&ps_lock);
+
+ return NOTIFY_OK;
+}
+
+int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+ struct task_struct *task)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ u16 devid;
+ int ret;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+ dev_state = get_device_state(devid);
+
+ if (dev_state == NULL)
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (pasid < 0 || pasid >= dev_state->max_pasids)
+ goto out;
+
+ ret = -ENOMEM;
+ pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
+ if (pasid_state == NULL)
+ goto out;
+
+ atomic_set(&pasid_state->count, 1);
+ init_waitqueue_head(&pasid_state->wq);
+ pasid_state->task = task;
+ pasid_state->mm = get_task_mm(task);
+ pasid_state->device_state = dev_state;
+ pasid_state->pasid = pasid;
+ pasid_state->mn.ops = &iommu_mn;
+
+ if (pasid_state->mm == NULL)
+ goto out_free;
+
+ mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
+
+ ret = set_pasid_state(dev_state, pasid_state, pasid);
+ if (ret)
+ goto out_unregister;
+
+ ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
+ __pa(pasid_state->mm->pgd));
+ if (ret)
+ goto out_clear_state;
+
+ link_pasid_state(pasid_state);
+
+ return 0;
+
+out_clear_state:
+ clear_pasid_state(dev_state, pasid);
+
+out_unregister:
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+out_free:
+ free_pasid_state(pasid_state);
+
+out:
+ put_device_state(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_bind_pasid);
+
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+{
+ struct device_state *dev_state;
+ u16 devid;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ devid = device_id(pdev);
+ dev_state = get_device_state(devid);
+ if (dev_state == NULL)
+ return;
+
+ if (pasid < 0 || pasid >= dev_state->max_pasids)
+ goto out;
+
+ unbind_pasid(dev_state, pasid);
+
+out:
+ put_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_unbind_pasid);
+
+int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ int ret, tmp;
+ u16 devid;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ devid = device_id(pdev);
+
+ dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
+ if (dev_state == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&dev_state->lock);
+ init_waitqueue_head(&dev_state->wq);
+ dev_state->pdev = pdev;
+
+ tmp = pasids;
+ for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
+ dev_state->pasid_levels += 1;
+
+ atomic_set(&dev_state->count, 1);
+ dev_state->max_pasids = pasids;
+
+ ret = -ENOMEM;
+ dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
+ if (dev_state->states == NULL)
+ goto out_free_dev_state;
+
+ dev_state->domain = iommu_domain_alloc(&pci_bus_type);
+ if (dev_state->domain == NULL)
+ goto out_free_states;
+
+ amd_iommu_domain_direct_map(dev_state->domain);
+
+ ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
+ if (ret)
+ goto out_free_domain;
+
+ ret = iommu_attach_device(dev_state->domain, &pdev->dev);
+ if (ret != 0)
+ goto out_free_domain;
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (state_table[devid] != NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ ret = -EBUSY;
+ goto out_free_domain;
+ }
+
+ state_table[devid] = dev_state;
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return 0;
+
+out_free_domain:
+ iommu_domain_free(dev_state->domain);
+
+out_free_states:
+ free_page((unsigned long)dev_state->states);
+
+out_free_dev_state:
+ kfree(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_init_device);
+
+void amd_iommu_free_device(struct pci_dev *pdev)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ dev_state = state_table[devid];
+ if (dev_state == NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return;
+ }
+
+ state_table[devid] = NULL;
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ /* Get rid of any remaining pasid states */
+ free_pasid_states(dev_state);
+
+ put_device_state_wait(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_free_device);
+
+int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+ amd_iommu_invalid_ppr_cb cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = state_table[devid];
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ppr_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
+
+int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+ amd_iommu_invalidate_ctx cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = state_table[devid];
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ctx_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
+
+static int __init amd_iommu_v2_init(void)
+{
+ size_t state_table_size;
+ int ret;
+
+ pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>");
+
+ spin_lock_init(&state_lock);
+
+ state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+ state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(state_table_size));
+ if (state_table == NULL)
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+ iommu_wq = create_workqueue("amd_iommu_v2");
+ if (iommu_wq == NULL)
+ goto out_free;
+
+ ret = -ENOMEM;
+ empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
+ if (empty_page_table == NULL)
+ goto out_destroy_wq;
+
+ amd_iommu_register_ppr_notifier(&ppr_nb);
+ profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
+
+ return 0;
+
+out_destroy_wq:
+ destroy_workqueue(iommu_wq);
+
+out_free:
+ free_pages((unsigned long)state_table, get_order(state_table_size));
+
+ return ret;
+}
+
+static void __exit amd_iommu_v2_exit(void)
+{
+ struct device_state *dev_state;
+ size_t state_table_size;
+ int i;
+
+ profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
+ amd_iommu_unregister_ppr_notifier(&ppr_nb);
+
+ flush_workqueue(iommu_wq);
+
+ /*
+ * The loop below might call flush_workqueue(), so call
+ * destroy_workqueue() after it
+ */
+ for (i = 0; i < MAX_DEVICES; ++i) {
+ dev_state = get_device_state(i);
+
+ if (dev_state == NULL)
+ continue;
+
+ WARN_ON_ONCE(1);
+
+ put_device_state(dev_state);
+ amd_iommu_free_device(dev_state->pdev);
+ }
+
+ destroy_workqueue(iommu_wq);
+
+ state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+ free_pages((unsigned long)state_table, get_order(state_table_size));
+
+ free_page((unsigned long)empty_page_table);
+}
+
+module_init(amd_iommu_v2_init);
+module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bcbd693b351a..c9c6053198d4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,6 +41,7 @@
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -78,6 +79,24 @@
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
+
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
@@ -405,6 +424,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -3518,7 +3540,7 @@ found:
return 0;
}
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3641,6 +3663,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
@@ -3973,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- int gfp_order, int iommu_prot)
+ size_t size, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int prot = 0;
- size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3988,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
u64 end;
@@ -4011,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
return ret;
}
-static int intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- size_t size = PAGE_SIZE << gfp_order;
int order;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4024,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return order;
+ return PAGE_SIZE << order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4054,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+/*
+ * Group numbers are arbitrary. Device with the same group number
+ * indicate the iommu cannot differentiate between them. To avoid
+ * tracking used groups we just use the seg|bus|devfn of the lowest
+ * level we're able to differentiate devices
+ */
+static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *bridge;
+ union {
+ struct {
+ u8 devfn;
+ u8 bus;
+ u16 segment;
+ } pci;
+ u32 group;
+ } id;
+
+ if (iommu_no_mapping(dev))
+ return -ENODEV;
+
+ id.pci.segment = pci_domain_nr(pdev->bus);
+ id.pci.bus = pdev->bus->number;
+ id.pci.devfn = pdev->devfn;
+
+ if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+ return -ENODEV;
+
+ bridge = pci_find_upstream_pcie_bridge(pdev);
+ if (bridge) {
+ if (pci_is_pcie(bridge)) {
+ id.pci.bus = bridge->subordinate->number;
+ id.pci.devfn = 0;
+ } else {
+ id.pci.bus = bridge->bus->number;
+ id.pci.devfn = bridge->devfn;
+ }
+ }
+
+ if (!pdev->is_virtfn && iommu_group_mf)
+ id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+
+ *groupid = id.group;
+
+ return 0;
+}
+
static struct iommu_ops intel_iommu_ops = {
.domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy,
@@ -4063,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
+ .device_group = intel_iommu_device_group,
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f314..6777ca049471 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
{
if (!intr_remapping_enabled)
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df553..2198b2dbbcd3 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bug.h>
@@ -25,8 +27,59 @@
#include <linux/errno.h>
#include <linux/iommu.h>
+static ssize_t show_iommu_group(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid))
+ return 0;
+
+ return sprintf(buf, "%u", groupid);
+}
+static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+
+static int add_iommu_group(struct device *dev, void *data)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid) == 0)
+ return device_create_file(dev, &dev_attr_iommu_group);
+
+ return 0;
+}
+
+static int remove_iommu_group(struct device *dev)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid) == 0)
+ device_remove_file(dev, &dev_attr_iommu_group);
+
+ return 0;
+}
+
+static int iommu_device_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ return add_iommu_group(dev, NULL);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ return remove_iommu_group(dev);
+
+ return 0;
+}
+
+static struct notifier_block iommu_device_nb = {
+ .notifier_call = iommu_device_notifier,
+};
+
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{
+ bus_register_notifier(bus, &iommu_device_nb);
+ bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
}
/**
@@ -90,7 +143,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
@@ -157,32 +210,134 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot)
{
- size_t size;
+ unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
+ size_t orig_size = size;
+ int ret = 0;
if (unlikely(domain->ops->map == NULL))
return -ENODEV;
- size = PAGE_SIZE << gfp_order;
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
+ "0x%x\n", iova, (unsigned long)paddr,
+ (unsigned long)size, min_pagesz);
+ return -EINVAL;
+ }
+
+ pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
+ (unsigned long)paddr, (unsigned long)size);
+
+ while (size) {
+ unsigned long pgsize, addr_merge = iova | paddr;
+ unsigned int pgsize_idx;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by both iova and paddr */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
- BUG_ON(!IS_ALIGNED(iova | paddr, size));
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= domain->ops->pgsize_bitmap;
- return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+ /* make sure we're still sane */
+ BUG_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
+ (unsigned long)paddr, pgsize);
+
+ ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ iommu_unmap(domain, orig_iova, orig_size - size);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);
-int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
- size_t size;
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
if (unlikely(domain->ops->unmap == NULL))
return -ENODEV;
- size = PAGE_SIZE << gfp_order;
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ /*
+ * The virtual address, as well as the size of the mapping, must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
+ iova, (unsigned long)size, min_pagesz);
+ return -EINVAL;
+ }
+
+ pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
+ (unsigned long)size);
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t left = size - unmapped;
+
+ unmapped_page = domain->ops->unmap(domain, iova, left);
+ if (!unmapped_page)
+ break;
+
+ pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
+ (unsigned long)unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ return unmapped;
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
- BUG_ON(!IS_ALIGNED(iova, size));
+int iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
+ return dev->bus->iommu_ops->device_group(dev, groupid);
- return domain->ops->unmap(domain, iova, gfp_order);
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(iommu_unmap);
+EXPORT_SYMBOL_GPL(iommu_device_group);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2e28f9..08a90b88e40d 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
static int msm_iommu_tex_class[4];
DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t len, int prot)
{
struct msm_priv *priv;
unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- size_t len = 0x1000UL << order;
int ret = 0, tex, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- int order)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
{
struct msm_priv *priv;
unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_table;
unsigned long *sl_pte;
unsigned long sl_offset;
- size_t len = 0x1000UL << order;
int i, ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
ret = __flush_iotlb(domain);
- /*
- * the IOMMU API requires us to return the order of the unmapped
- * page (on success).
- */
- if (!ret)
- ret = order;
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.iova_to_phys = msm_iommu_iova_to_phys,
- .domain_has_cap = msm_iommu_domain_has_cap
+ .domain_has_cap = msm_iommu_domain_has_cap,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
};
static int __init get_tex_class(int icp, int ocp, int mt, int nos)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2bf7587..d8edd979d01b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -33,6 +33,9 @@
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++)
+/* bitmap of the page sizes currently supported */
+#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
@@ -86,20 +89,24 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
- * @obj: target iommu
+ * @dev: client device
**/
-void omap_iommu_save_ctx(struct omap_iommu *obj)
+void omap_iommu_save_ctx(struct device *dev)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
arch_iommu->save_ctx(obj);
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
/**
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
- * @obj: target iommu
+ * @dev: client device
**/
-void omap_iommu_restore_ctx(struct omap_iommu *obj)
+void omap_iommu_restore_ctx(struct device *dev)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
arch_iommu->restore_ctx(obj);
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -820,35 +827,23 @@ static int device_match_by_alias(struct device *dev, void *data)
}
/**
- * omap_find_iommu_device() - find an omap iommu device by name
- * @name: name of the iommu device
- *
- * The generic iommu API requires the caller to provide the device
- * he wishes to attach to a certain iommu domain.
- *
- * Drivers generally should not bother with this as it should just
- * be taken care of by the DMA-API using dev_archdata.
- *
- * This function is provided as an interim solution until the latter
- * materializes, and omap3isp is fully migrated to the DMA-API.
- */
-struct device *omap_find_iommu_device(const char *name)
-{
- return driver_find_device(&omap_iommu_driver.driver, NULL,
- (void *)name,
- device_match_by_alias);
-}
-EXPORT_SYMBOL_GPL(omap_find_iommu_device);
-
-/**
* omap_iommu_attach() - attach iommu device to an iommu domain
- * @dev: target omap iommu device
+ * @name: name of target omap iommu device
* @iopgd: page table
**/
-static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
+static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
{
int err = -ENOMEM;
- struct omap_iommu *obj = to_iommu(dev);
+ struct device *dev;
+ struct omap_iommu *obj;
+
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL,
+ (void *)name,
+ device_match_by_alias);
+ if (!dev)
+ return NULL;
+
+ obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
@@ -1019,12 +1014,11 @@ static void iopte_cachep_ctor(void *iopte)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t bytes = PAGE_SIZE << order;
struct iotlb_entry e;
int omap_pgsz;
u32 ret, flags;
@@ -1049,19 +1043,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return ret;
}
-static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- int order)
+static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+ size_t size)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t unmap_size;
-
- dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
- unmap_size = iopgtable_clear_entry(oiommu, da);
+ dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
- return unmap_size ? get_order(unmap_size) : -EINVAL;
+ return iopgtable_clear_entry(oiommu, da);
}
static int
@@ -1069,6 +1060,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
spin_lock(&omap_domain->lock);
@@ -1081,14 +1073,14 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* get a handle to and enable the omap iommu */
- oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
+ oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
if (IS_ERR(oiommu)) {
ret = PTR_ERR(oiommu);
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
- omap_domain->iommu_dev = oiommu;
+ omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
oiommu->domain = domain;
out:
@@ -1100,7 +1092,8 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu *oiommu = to_iommu(dev);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
spin_lock(&omap_domain->lock);
@@ -1114,7 +1107,7 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
- omap_domain->iommu_dev = NULL;
+ omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
out:
spin_unlock(&omap_domain->lock);
@@ -1183,14 +1176,14 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
else if (iopte_is_large(*pte))
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
- dev_err(dev, "bogus pte 0x%x", *pte);
+ dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
else if (iopgd_is_super(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
- dev_err(dev, "bogus pgd 0x%x", *pgd);
+ dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
}
return ret;
@@ -1211,6 +1204,7 @@ static struct iommu_ops omap_iommu_ops = {
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.domain_has_cap = omap_iommu_domain_has_cap,
+ .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
static int __init omap_iommu_init(void)
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 46be456fcc00..2e10c3e0a7ae 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -231,12 +231,14 @@ static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
/**
* omap_find_iovm_area - find iovma which includes @da
+ * @dev: client device
* @da: iommu device virtual address
*
* Find the existing iovma starting at @da
*/
-struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
+struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct iovm_struct *area;
mutex_lock(&obj->mmap_lock);
@@ -343,14 +345,15 @@ static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
/**
* omap_da_to_va - convert (d) to (v)
- * @obj: objective iommu
+ * @dev: client device
* @da: iommu device virtual address
* @va: mpu virtual address
*
* Returns mpu virtual addr which corresponds to a given device virtual addr
*/
-void *omap_da_to_va(struct omap_iommu *obj, u32 da)
+void *omap_da_to_va(struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va = NULL;
struct iovm_struct *area;
@@ -410,7 +413,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
unsigned int i, j;
struct scatterlist *sg;
u32 da = new->da_start;
- int order;
if (!domain || !sgt)
return -EINVAL;
@@ -429,12 +431,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
if (bytes_to_iopgsz(bytes) < 0)
goto err_out;
- order = get_order(bytes);
-
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
i, da, pa, bytes);
- err = iommu_map(domain, da, pa, order, flags);
+ err = iommu_map(domain, da, pa, bytes, flags);
if (err)
goto err_out;
@@ -449,10 +449,9 @@ err_out:
size_t bytes;
bytes = sg->length + sg->offset;
- order = get_order(bytes);
/* ignore failures.. we're already handling one */
- iommu_unmap(domain, da, order);
+ iommu_unmap(domain, da, bytes);
da += bytes;
}
@@ -467,7 +466,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
size_t total = area->da_end - area->da_start;
const struct sg_table *sgt = area->sgt;
struct scatterlist *sg;
- int i, err;
+ int i;
+ size_t unmapped;
BUG_ON(!sgtable_ok(sgt));
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +475,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
start = area->da_start;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
- int order;
bytes = sg->length + sg->offset;
- order = get_order(bytes);
- err = iommu_unmap(domain, start, order);
- if (err < 0)
+ unmapped = iommu_unmap(domain, start, bytes);
+ if (unmapped < bytes)
break;
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
@@ -582,16 +580,18 @@ __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
/**
* omap_iommu_vmap - (d)-(p)-(v) address mapper
- * @obj: objective iommu
+ * @domain: iommu domain
+ * @dev: client device
* @sgt: address of scatter gather table
* @flags: iovma and page property
*
* Creates 1-n-1 mapping with given @sgt and returns @da.
* All @sgt element must be io page size aligned.
*/
-u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
const struct sg_table *sgt, u32 flags)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
size_t bytes;
void *va = NULL;
@@ -622,15 +622,17 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmap);
/**
* omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
- * @obj: objective iommu
+ * @domain: iommu domain
+ * @dev: client device
* @da: iommu device virtual address
*
* Free the iommu virtually contiguous memory area starting at
* @da, which was returned by 'omap_iommu_vmap()'.
*/
struct sg_table *
-omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
+omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
/*
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
@@ -647,7 +649,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
/**
* omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
- * @obj: objective iommu
+ * @dev: client device
* @da: contiguous iommu virtual memory
* @bytes: allocation size
* @flags: iovma and page property
@@ -656,9 +658,10 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32
-omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
size_t bytes, u32 flags)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va;
struct sg_table *sgt;
@@ -698,15 +701,16 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
/**
* omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
- * @obj: objective iommu
+ * @dev: client device
* @da: iommu device virtual address
*
* Frees the iommu virtually continuous memory area starting at
* @da, as obtained from 'omap_iommu_vmalloc()'.
*/
-void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
const u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
sgt = unmap_vm_area(domain, obj, da, vfree,
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e467772..9021182c4b76 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 04231cb2f031..1793ba1b6a89 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
{
isdn_if *iif;
- pr_info("ISDN4Linux interface\n");
-
iif = kmalloc(sizeof *iif, GFP_KERNEL);
if (!iif) {
pr_err("out of memory\n");
@@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs)
*/
void gigaset_isdn_regdrv(void)
{
+ pr_info("ISDN4Linux interface\n");
/* nothing to do */
}
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 9c8d7aa053c5..a0ed668d4d2a 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -40,7 +40,7 @@ MODULE_DESCRIPTION("CAPI4Linux: DMA support for active AVM cards");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
-static int suppress_pollack = 0;
+static bool suppress_pollack = 0;
module_param(suppress_pollack, bool, 0);
/* ------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index d3530f6e8115..9743b24ef9d6 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -40,7 +40,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
/* ------------------------------------------------------------- */
-static int suppress_pollack;
+static bool suppress_pollack;
static struct pci_device_id c4_pci_tbl[] = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index 26264abf1f58..f55d29d60826 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -333,7 +333,7 @@ static void __devinit en_cs_init(struct IsdnCard *card,
cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
/* Reset an */
- cs->hw.njet.ctrl_reg = 0x07; // gendert von 0xff
+ cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
/* 20 ms Pause */
mdelay(20);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 9c6650ea848e..2302fbe70ac6 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -6,7 +6,7 @@ if ISDN_I4L
config ISDN_PPP
bool "Support synchronous PPP"
- depends on INET
+ depends on INET && NETDEVICES
select SLHC
help
Over digital connections such as ISDN, there is no need to
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e024..2339d7396b9e 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index ca710ab278ec..023de789f250 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -30,7 +30,7 @@ static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCo
static unsigned int io[] = {0,0,0,0};
static unsigned char irq[] = {0,0,0,0};
static unsigned long ram[] = {0,0,0,0};
-static int do_reset = 0;
+static bool do_reset = 0;
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff203a421863..c957c344233f 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -347,7 +347,8 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2
+ depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || \
+ MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2
default y
help
This option enable support for the dual-GPIO LED found on the
@@ -387,6 +388,21 @@ config LEDS_RENESAS_TPU
pin function. The latter to support brightness control.
Brightness control is supported but hardware blinking is not.
+config LEDS_TCA6507
+ tristate "LED Support for TCA6507 I2C chip"
+ depends on LEDS_CLASS && I2C
+ help
+ This option enables support for LEDs connected to TC6507
+ LED driver chips accessed via the I2C bus.
+ Driver support brightness control and hardware-assisted blinking.
+
+config LEDS_MAX8997
+ tristate "LED support for MAX8997 PMIC"
+ depends on LEDS_CLASS && MFD_MAX8997
+ help
+ This option enables support for on-chip LED drivers on
+ MAXIM MAX8997 PMIC.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e4f6bf568880..b8a9723477f0 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
+obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o
+obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6d5628bb0601..0c8739c448b1 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -15,7 +15,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/ctype.h>
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6f1ff93d7cec..46b4c766335d 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 0810604dc701..4ca00624bd18 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -238,17 +238,7 @@ static struct platform_driver pm860x_led_driver = {
.remove = pm860x_led_remove,
};
-static int __devinit pm860x_led_init(void)
-{
- return platform_driver_register(&pm860x_led_driver);
-}
-module_init(pm860x_led_init);
-
-static void __devexit pm860x_led_exit(void)
-{
- platform_driver_unregister(&pm860x_led_driver);
-}
-module_exit(pm860x_led_exit);
+module_platform_driver(pm860x_led_driver);
MODULE_DESCRIPTION("LED driver for Marvell PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 7ba4c7b5b97e..b1400db3f839 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -213,17 +213,7 @@ static struct platform_driver adp5520_led_driver = {
.remove = __devexit_p(adp5520_led_remove),
};
-static int __init adp5520_led_init(void)
-{
- return platform_driver_register(&adp5520_led_driver);
-}
-module_init(adp5520_led_init);
-
-static void __exit adp5520_led_exit(void)
-{
- platform_driver_unregister(&adp5520_led_driver);
-}
-module_exit(adp5520_led_exit);
+module_platform_driver(adp5520_led_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 8c00937bf7e7..07428357c83f 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -118,18 +118,7 @@ static struct platform_driver ams_delta_led_driver = {
},
};
-static int __init ams_delta_led_init(void)
-{
- return platform_driver_register(&ams_delta_led_driver);
-}
-
-static void __exit ams_delta_led_exit(void)
-{
- platform_driver_unregister(&ams_delta_led_driver);
-}
-
-module_init(ams_delta_led_init);
-module_exit(ams_delta_led_exit);
+module_platform_driver(ams_delta_led_driver);
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
MODULE_DESCRIPTION("Amstrad Delta LED driver");
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 48d9fe61bdfc..525a92492837 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -179,21 +179,9 @@ static struct platform_driver asic3_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-asic3");
-
-static int __init asic3_led_init(void)
-{
- return platform_driver_register(&asic3_led_driver);
-}
-
-static void __exit asic3_led_exit(void)
-{
- platform_driver_unregister(&asic3_led_driver);
-}
-
-module_init(asic3_led_init);
-module_exit(asic3_led_exit);
+module_platform_driver(asic3_led_driver);
MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
MODULE_DESCRIPTION("HTC ASIC3 LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-asic3");
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 109c875ea233..800243b6037e 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -134,29 +134,18 @@ static int __exit pwmled_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:leds-atmel-pwm");
-
static struct platform_driver pwmled_driver = {
.driver = {
.name = "leds-atmel-pwm",
.owner = THIS_MODULE,
},
/* REVISIT add suspend() and resume() methods */
+ .probe = pwmled_probe,
.remove = __exit_p(pwmled_remove),
};
-static int __init modinit(void)
-{
- return platform_driver_probe(&pwmled_driver, pwmled_probe);
-}
-module_init(modinit);
-
-static void __exit modexit(void)
-{
- platform_driver_unregister(&pwmled_driver);
-}
-module_exit(modexit);
+module_platform_driver(pwmled_driver);
MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-atmel-pwm");
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index ea2185531f82..591cbdf5a046 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -688,8 +688,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
i2c_set_clientdata(client, led);
/* Configure RESET GPIO (L: RESET, H: RESET cancel) */
- gpio_request(pdata->reset_gpio, "RGB_RESETB");
- gpio_direction_output(pdata->reset_gpio, 1);
+ gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");
/* Tacss = min 0.1ms */
udelay(100);
@@ -813,17 +812,7 @@ static struct i2c_driver bd2802_i2c_driver = {
.id_table = bd2802_id,
};
-static int __init bd2802_init(void)
-{
- return i2c_add_driver(&bd2802_i2c_driver);
-}
-module_init(bd2802_init);
-
-static void __exit bd2802_exit(void)
-{
- i2c_del_driver(&bd2802_i2c_driver);
-}
-module_exit(bd2802_exit);
+module_i2c_driver(bd2802_i2c_driver);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index a498135a4e80..1ed1677c916f 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -18,7 +18,7 @@ MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>");
MODULE_DESCRIPTION("Clevo mail LED driver");
MODULE_LICENSE("GPL");
-static unsigned int __initdata nodetect;
+static bool __initdata nodetect;
module_param_named(nodetect, nodetect, bool, 0);
MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index da5fb016b1a5..6a8725cc7b4d 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -75,9 +75,6 @@ static int __devexit cobalt_qube_led_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:cobalt-qube-leds");
-
static struct platform_driver cobalt_qube_led_driver = {
.probe = cobalt_qube_led_probe,
.remove = __devexit_p(cobalt_qube_led_remove),
@@ -87,19 +84,9 @@ static struct platform_driver cobalt_qube_led_driver = {
},
};
-static int __init cobalt_qube_led_init(void)
-{
- return platform_driver_register(&cobalt_qube_led_driver);
-}
-
-static void __exit cobalt_qube_led_exit(void)
-{
- platform_driver_unregister(&cobalt_qube_led_driver);
-}
-
-module_init(cobalt_qube_led_init);
-module_exit(cobalt_qube_led_exit);
+module_platform_driver(cobalt_qube_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Front LED support for Cobalt Server");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_ALIAS("platform:cobalt-qube-leds");
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index f28931cf6781..d9cd73ebd6c4 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -158,17 +158,7 @@ static struct platform_driver da903x_led_driver = {
.remove = __devexit_p(da903x_led_remove),
};
-static int __init da903x_led_init(void)
-{
- return platform_driver_register(&da903x_led_driver);
-}
-module_init(da903x_led_init);
-
-static void __exit da903x_led_exit(void)
-{
- platform_driver_unregister(&da903x_led_driver);
-}
-module_exit(da903x_led_exit);
+module_platform_driver(da903x_led_driver);
MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 31cf0d60a9a5..d56c14269ff0 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -131,18 +131,7 @@ static struct spi_driver dac124s085_driver = {
},
};
-static int __init dac124s085_leds_init(void)
-{
- return spi_register_driver(&dac124s085_driver);
-}
-
-static void __exit dac124s085_leds_exit(void)
-{
- spi_unregister_driver(&dac124s085_driver);
-}
-
-module_init(dac124s085_leds_init);
-module_exit(dac124s085_leds_exit);
+module_spi_driver(dac124s085_driver);
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_DESCRIPTION("DAC124S085 LED driver");
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 49aceffaa5b6..b9053fa6e253 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -224,20 +224,7 @@ static struct platform_driver fsg_led_driver = {
},
};
-
-static int __init fsg_led_init(void)
-{
- return platform_driver_register(&fsg_led_driver);
-}
-
-static void __exit fsg_led_exit(void)
-{
- platform_driver_unregister(&fsg_led_driver);
-}
-
-
-module_init(fsg_led_init);
-module_exit(fsg_led_exit);
+module_platform_driver(fsg_led_driver);
MODULE_AUTHOR("Rod Whitby <rod@whitby.id.au>");
MODULE_DESCRIPTION("Freecom FSG-3 LED driver");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 399a86f2013a..7df74cb97e70 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -293,21 +293,9 @@ static struct platform_driver gpio_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-gpio");
-
-static int __init gpio_led_init(void)
-{
- return platform_driver_register(&gpio_led_driver);
-}
-
-static void __exit gpio_led_exit(void)
-{
- platform_driver_unregister(&gpio_led_driver);
-}
-
-module_init(gpio_led_init);
-module_exit(gpio_led_exit);
+module_platform_driver(gpio_led_driver);
MODULE_AUTHOR("Raphael Assenat <raph@8d.com>, Trent Piepho <tpiepho@freescale.com>");
MODULE_DESCRIPTION("GPIO LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-gpio");
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index bcfbd3a60eab..366b6055e330 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -79,9 +79,6 @@ static int hp6xxled_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:hp6xx-led");
-
static struct platform_driver hp6xxled_driver = {
.probe = hp6xxled_probe,
.remove = hp6xxled_remove,
@@ -91,19 +88,9 @@ static struct platform_driver hp6xxled_driver = {
},
};
-static int __init hp6xxled_init(void)
-{
- return platform_driver_register(&hp6xxled_driver);
-}
-
-static void __exit hp6xxled_exit(void)
-{
- platform_driver_unregister(&hp6xxled_driver);
-}
-
-module_init(hp6xxled_init);
-module_exit(hp6xxled_exit);
+module_platform_driver(hp6xxled_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hp6xx-led");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 0630e4f4b286..45e6878d7374 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -457,18 +457,7 @@ static struct i2c_driver lm3530_i2c_driver = {
},
};
-static int __init lm3530_init(void)
-{
- return i2c_add_driver(&lm3530_i2c_driver);
-}
-
-static void __exit lm3530_exit(void)
-{
- i2c_del_driver(&lm3530_i2c_driver);
-}
-
-module_init(lm3530_init);
-module_exit(lm3530_exit);
+module_i2c_driver(lm3530_i2c_driver);
MODULE_DESCRIPTION("Back Light driver for LM3530");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 9010c054615e..b8f9f0a5d431 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -453,18 +453,7 @@ static struct i2c_driver lp3944_driver = {
.id_table = lp3944_id,
};
-static int __init lp3944_module_init(void)
-{
- return i2c_add_driver(&lp3944_driver);
-}
-
-static void __exit lp3944_module_exit(void)
-{
- i2c_del_driver(&lp3944_driver);
-}
-
-module_init(lp3944_module_init);
-module_exit(lp3944_module_exit);
+module_i2c_driver(lp3944_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("LP3944 Fun Light Chip");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index cb641f1b3342..d62a7982a5e6 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -797,25 +797,7 @@ static struct i2c_driver lp5521_driver = {
.id_table = lp5521_id,
};
-static int __init lp5521_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&lp5521_driver);
-
- if (ret < 0)
- printk(KERN_ALERT "Adding lp5521 driver failed\n");
-
- return ret;
-}
-
-static void __exit lp5521_exit(void)
-{
- i2c_del_driver(&lp5521_driver);
-}
-
-module_init(lp5521_init);
-module_exit(lp5521_exit);
+module_i2c_driver(lp5521_driver);
MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
MODULE_DESCRIPTION("LP5521 LED engine");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 5971e309b234..73e791ae7259 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -870,8 +870,6 @@ static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
return 0;
}
-static struct i2c_driver lp5523_driver;
-
static int __devinit lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1021,25 +1019,7 @@ static struct i2c_driver lp5523_driver = {
.id_table = lp5523_id,
};
-static int __init lp5523_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&lp5523_driver);
-
- if (ret < 0)
- printk(KERN_ALERT "Adding lp5523 driver failed\n");
-
- return ret;
-}
-
-static void __exit lp5523_exit(void)
-{
- i2c_del_driver(&lp5523_driver);
-}
-
-module_init(lp5523_init);
-module_exit(lp5523_exit);
+module_i2c_driver(lp5523_driver);
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
MODULE_DESCRIPTION("LP5523 LED engine");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 53f67b8ce55d..e311a96c4469 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -199,21 +199,9 @@ static struct platform_driver lt3593_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-lt3593");
-
-static int __init lt3593_led_init(void)
-{
- return platform_driver_register(&lt3593_led_driver);
-}
-
-static void __exit lt3593_led_exit(void)
-{
- platform_driver_unregister(&lt3593_led_driver);
-}
-
-module_init(lt3593_led_init);
-module_exit(lt3593_led_exit);
+module_platform_driver(lt3593_led_driver);
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
MODULE_DESCRIPTION("LED driver for LT3593 controllers");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-lt3593");
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
new file mode 100644
index 000000000000..f4c0e37fad1e
--- /dev/null
+++ b/drivers/leds/leds-max8997.c
@@ -0,0 +1,372 @@
+/*
+ * leds-max8997.c - LED class driver for MAX8997 LEDs.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+#include <linux/platform_device.h>
+
+#define MAX8997_LED_FLASH_SHIFT 3
+#define MAX8997_LED_FLASH_CUR_MASK 0xf8
+#define MAX8997_LED_MOVIE_SHIFT 4
+#define MAX8997_LED_MOVIE_CUR_MASK 0xf0
+
+#define MAX8997_LED_FLASH_MAX_BRIGHTNESS 0x1f
+#define MAX8997_LED_MOVIE_MAX_BRIGHTNESS 0xf
+#define MAX8997_LED_NONE_MAX_BRIGHTNESS 0
+
+#define MAX8997_LED0_FLASH_MASK 0x1
+#define MAX8997_LED0_FLASH_PIN_MASK 0x5
+#define MAX8997_LED0_MOVIE_MASK 0x8
+#define MAX8997_LED0_MOVIE_PIN_MASK 0x28
+
+#define MAX8997_LED1_FLASH_MASK 0x2
+#define MAX8997_LED1_FLASH_PIN_MASK 0x6
+#define MAX8997_LED1_MOVIE_MASK 0x10
+#define MAX8997_LED1_MOVIE_PIN_MASK 0x30
+
+#define MAX8997_LED_BOOST_ENABLE_MASK (1 << 6)
+
+struct max8997_led {
+ struct max8997_dev *iodev;
+ struct led_classdev cdev;
+ bool enabled;
+ int id;
+ enum max8997_led_mode led_mode;
+ struct mutex mutex;
+};
+
+static void max8997_led_clear_mode(struct max8997_led *led,
+ enum max8997_led_mode mode)
+{
+ struct i2c_client *client = led->iodev->i2c;
+ u8 val = 0, mask = 0;
+ int ret;
+
+ switch (mode) {
+ case MAX8997_FLASH_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
+ break;
+ case MAX8997_MOVIE_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
+ break;
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
+ break;
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
+ break;
+ default:
+ break;
+ }
+
+ if (mask) {
+ ret = max8997_update_reg(client,
+ MAX8997_REG_LEN_CNTL, val, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+ }
+}
+
+static void max8997_led_set_mode(struct max8997_led *led,
+ enum max8997_led_mode mode)
+{
+ int ret;
+ struct i2c_client *client = led->iodev->i2c;
+ u8 mask = 0;
+
+ /* First, clear the previous mode */
+ max8997_led_clear_mode(led, led->led_mode);
+
+ switch (mode) {
+ case MAX8997_FLASH_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
+ led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
+ break;
+ case MAX8997_MOVIE_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
+ led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
+ break;
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
+ led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
+ break;
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
+ led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
+ break;
+ default:
+ led->cdev.max_brightness = MAX8997_LED_NONE_MAX_BRIGHTNESS;
+ break;
+ }
+
+ if (mask) {
+ ret = max8997_update_reg(client,
+ MAX8997_REG_LEN_CNTL, mask, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+ }
+
+ led->led_mode = mode;
+}
+
+static void max8997_led_enable(struct max8997_led *led, bool enable)
+{
+ int ret;
+ struct i2c_client *client = led->iodev->i2c;
+ u8 val = 0, mask = MAX8997_LED_BOOST_ENABLE_MASK;
+
+ if (led->enabled == enable)
+ return;
+
+ val = enable ? MAX8997_LED_BOOST_ENABLE_MASK : 0;
+
+ ret = max8997_update_reg(client, MAX8997_REG_BOOST_CNTL, val, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+
+ led->enabled = enable;
+}
+
+static void max8997_led_set_current(struct max8997_led *led,
+ enum led_brightness value)
+{
+ int ret;
+ struct i2c_client *client = led->iodev->i2c;
+ u8 val = 0, mask = 0, reg = 0;
+
+ switch (led->led_mode) {
+ case MAX8997_FLASH_MODE:
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ val = value << MAX8997_LED_FLASH_SHIFT;
+ mask = MAX8997_LED_FLASH_CUR_MASK;
+ reg = led->id ? MAX8997_REG_FLASH2_CUR : MAX8997_REG_FLASH1_CUR;
+ break;
+ case MAX8997_MOVIE_MODE:
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ val = value << MAX8997_LED_MOVIE_SHIFT;
+ mask = MAX8997_LED_MOVIE_CUR_MASK;
+ reg = MAX8997_REG_MOVIE_CUR;
+ break;
+ default:
+ break;
+ }
+
+ if (mask) {
+ ret = max8997_update_reg(client, reg, val, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+ }
+}
+
+static void max8997_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct max8997_led *led =
+ container_of(led_cdev, struct max8997_led, cdev);
+
+ if (value) {
+ max8997_led_set_current(led, value);
+ max8997_led_enable(led, true);
+ } else {
+ max8997_led_set_current(led, value);
+ max8997_led_enable(led, false);
+ }
+}
+
+static ssize_t max8997_led_show_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct max8997_led *led =
+ container_of(led_cdev, struct max8997_led, cdev);
+ ssize_t ret = 0;
+
+ mutex_lock(&led->mutex);
+
+ switch (led->led_mode) {
+ case MAX8997_FLASH_MODE:
+ ret += sprintf(buf, "FLASH\n");
+ break;
+ case MAX8997_MOVIE_MODE:
+ ret += sprintf(buf, "MOVIE\n");
+ break;
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ ret += sprintf(buf, "FLASH_PIN_CONTROL\n");
+ break;
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ ret += sprintf(buf, "MOVIE_PIN_CONTROL\n");
+ break;
+ default:
+ ret += sprintf(buf, "NONE\n");
+ break;
+ }
+
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static ssize_t max8997_led_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct max8997_led *led =
+ container_of(led_cdev, struct max8997_led, cdev);
+ enum max8997_led_mode mode;
+
+ mutex_lock(&led->mutex);
+
+ if (!strncmp(buf, "FLASH_PIN_CONTROL", 17))
+ mode = MAX8997_FLASH_PIN_CONTROL_MODE;
+ else if (!strncmp(buf, "MOVIE_PIN_CONTROL", 17))
+ mode = MAX8997_MOVIE_PIN_CONTROL_MODE;
+ else if (!strncmp(buf, "FLASH", 5))
+ mode = MAX8997_FLASH_MODE;
+ else if (!strncmp(buf, "MOVIE", 5))
+ mode = MAX8997_MOVIE_MODE;
+ else
+ mode = MAX8997_NONE;
+
+ max8997_led_set_mode(led, mode);
+
+ mutex_unlock(&led->mutex);
+
+ return size;
+}
+
+static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
+
+static int __devinit max8997_led_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8997_led *led;
+ char name[20];
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+
+ led = kzalloc(sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ led->id = pdev->id;
+ snprintf(name, sizeof(name), "max8997-led%d", pdev->id);
+
+ led->cdev.name = name;
+ led->cdev.brightness_set = max8997_led_brightness_set;
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led->cdev.brightness = 0;
+ led->iodev = iodev;
+
+ /* initialize mode and brightness according to platform_data */
+ if (pdata->led_pdata) {
+ u8 mode = 0, brightness = 0;
+
+ mode = pdata->led_pdata->mode[led->id];
+ brightness = pdata->led_pdata->brightness[led->id];
+
+ max8997_led_set_mode(led, pdata->led_pdata->mode[led->id]);
+
+ if (brightness > led->cdev.max_brightness)
+ brightness = led->cdev.max_brightness;
+ max8997_led_set_current(led, brightness);
+ led->cdev.brightness = brightness;
+ } else {
+ max8997_led_set_mode(led, MAX8997_NONE);
+ max8997_led_set_current(led, 0);
+ }
+
+ mutex_init(&led->mutex);
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret < 0)
+ goto err_led;
+
+ ret = device_create_file(led->cdev.dev, &dev_attr_mode);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "failed to create file: %d\n", ret);
+ goto err_file;
+ }
+
+ return 0;
+
+err_file:
+ led_classdev_unregister(&led->cdev);
+err_led:
+ kfree(led);
+err_mem:
+ return ret;
+}
+
+static int __devexit max8997_led_remove(struct platform_device *pdev)
+{
+ struct max8997_led *led = platform_get_drvdata(pdev);
+
+ device_remove_file(led->cdev.dev, &dev_attr_mode);
+ led_classdev_unregister(&led->cdev);
+ kfree(led);
+
+ return 0;
+}
+
+static struct platform_driver max8997_led_driver = {
+ .driver = {
+ .name = "max8997-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_led_probe,
+ .remove = __devexit_p(max8997_led_remove),
+};
+
+static int __init max8997_led_init(void)
+{
+ return platform_driver_register(&max8997_led_driver);
+}
+module_init(max8997_led_init);
+
+static void __exit max8997_led_exit(void)
+{
+ platform_driver_unregister(&max8997_led_driver);
+}
+module_exit(max8997_led_exit);
+
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_DESCRIPTION("MAX8997 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8997-led");
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index b3393a9f2139..8bc491541550 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -275,7 +275,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+ if (pdata->num_leds < 1 || pdata->num_leds > (MC13783_LED_MAX + 1)) {
dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
return -EINVAL;
}
@@ -385,17 +385,7 @@ static struct platform_driver mc13783_led_driver = {
.remove = __devexit_p(mc13783_led_remove),
};
-static int __init mc13783_led_init(void)
-{
- return platform_driver_register(&mc13783_led_driver);
-}
-module_init(mc13783_led_init);
-
-static void __exit mc13783_led_exit(void)
-{
- platform_driver_unregister(&mc13783_led_driver);
-}
-module_exit(mc13783_led_exit);
+module_platform_driver(mc13783_led_driver);
MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index f2e51c134399..d8433f2d53bc 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -81,35 +81,23 @@ static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
/* Configure address GPIOs. */
for (i = 0; i < gpio_ext->num_addr; i++) {
- err = gpio_request(gpio_ext->addr[i], "GPIO extension addr");
+ err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW,
+ "GPIO extension addr");
if (err)
goto err_free_addr;
- err = gpio_direction_output(gpio_ext->addr[i], 0);
- if (err) {
- gpio_free(gpio_ext->addr[i]);
- goto err_free_addr;
- }
}
/* Configure data GPIOs. */
for (i = 0; i < gpio_ext->num_data; i++) {
- err = gpio_request(gpio_ext->data[i], "GPIO extension data");
+ err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW,
+ "GPIO extension data");
if (err)
goto err_free_data;
- err = gpio_direction_output(gpio_ext->data[i], 0);
- if (err) {
- gpio_free(gpio_ext->data[i]);
- goto err_free_data;
- }
}
/* Configure "enable select" GPIO. */
- err = gpio_request(gpio_ext->enable, "GPIO extension enable");
+ err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW,
+ "GPIO extension enable");
if (err)
goto err_free_data;
- err = gpio_direction_output(gpio_ext->enable, 0);
- if (err) {
- gpio_free(gpio_ext->enable);
- goto err_free_data;
- }
return 0;
@@ -429,21 +417,10 @@ static struct platform_driver netxbig_led_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:leds-netxbig");
-static int __init netxbig_led_init(void)
-{
- return platform_driver_register(&netxbig_led_driver);
-}
-
-static void __exit netxbig_led_exit(void)
-{
- platform_driver_unregister(&netxbig_led_driver);
-}
-
-module_init(netxbig_led_init);
-module_exit(netxbig_led_exit);
+module_platform_driver(netxbig_led_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("LED driver for LaCie xBig Network boards");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-netxbig");
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 37b7d0cfe586..2f0a14421a73 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -323,21 +323,10 @@ static struct platform_driver ns2_led_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:leds-ns2");
-
-static int __init ns2_led_init(void)
-{
- return platform_driver_register(&ns2_led_driver);
-}
-static void __exit ns2_led_exit(void)
-{
- platform_driver_unregister(&ns2_led_driver);
-}
-
-module_init(ns2_led_init);
-module_exit(ns2_led_exit);
+module_platform_driver(ns2_led_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("Network Space v2 LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ns2");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index a2c874623e35..ceccab44b5b8 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -489,20 +489,8 @@ static int pca9532_remove(struct i2c_client *client)
return 0;
}
-static int __init pca9532_init(void)
-{
- return i2c_add_driver(&pca9532_driver);
-}
-
-static void __exit pca9532_exit(void)
-{
- i2c_del_driver(&pca9532_driver);
-}
+module_i2c_driver(pca9532_driver);
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCA 9532 LED dimmer");
-
-module_init(pca9532_init);
-module_exit(pca9532_exit);
-
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 66aa3e8e786f..dcc3bc3d38db 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -371,18 +371,7 @@ static struct i2c_driver pca955x_driver = {
.id_table = pca955x_id,
};
-static int __init pca955x_leds_init(void)
-{
- return i2c_add_driver(&pca955x_driver);
-}
-
-static void __exit pca955x_leds_exit(void)
-{
- i2c_del_driver(&pca955x_driver);
-}
-
-module_init(pca955x_leds_init);
-module_exit(pca955x_leds_exit);
+module_i2c_driver(pca955x_driver);
MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>");
MODULE_DESCRIPTION("PCA955x LED driver");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 666daf77872e..3ed92f34bd44 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -135,18 +135,7 @@ static struct platform_driver led_pwm_driver = {
},
};
-static int __init led_pwm_init(void)
-{
- return platform_driver_register(&led_pwm_driver);
-}
-
-static void __exit led_pwm_exit(void)
-{
- platform_driver_unregister(&led_pwm_driver);
-}
-
-module_init(led_pwm_init);
-module_exit(led_pwm_exit);
+module_platform_driver(led_pwm_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("PWM LED driver for PXA");
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index c3525f37f73d..a7815b6cd856 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -57,21 +57,9 @@ static struct platform_driver rb532_led_driver = {
},
};
-static int __init rb532_led_init(void)
-{
- return platform_driver_register(&rb532_led_driver);
-}
-
-static void __exit rb532_led_exit(void)
-{
- platform_driver_unregister(&rb532_led_driver);
-}
-
-module_init(rb532_led_init);
-module_exit(rb532_led_exit);
-
-MODULE_ALIAS("platform:rb532-led");
+module_platform_driver(rb532_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("User LED support for Routerboard532");
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
+MODULE_ALIAS("platform:rb532-led");
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 8497f56f8e46..df7e963bddd3 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -229,17 +229,7 @@ static struct platform_driver regulator_led_driver = {
.remove = __devexit_p(regulator_led_remove),
};
-static int __init regulator_led_init(void)
-{
- return platform_driver_register(&regulator_led_driver);
-}
-module_init(regulator_led_init);
-
-static void __exit regulator_led_exit(void)
-{
- platform_driver_unregister(&regulator_led_driver);
-}
-module_exit(regulator_led_exit);
+module_platform_driver(regulator_led_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("Regulator driven LED driver");
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 3ee540eb127e..32fe337d5c68 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -339,18 +339,7 @@ static struct platform_driver r_tpu_device_driver = {
}
};
-static int __init r_tpu_init(void)
-{
- return platform_driver_register(&r_tpu_device_driver);
-}
-
-static void __exit r_tpu_exit(void)
-{
- platform_driver_unregister(&r_tpu_device_driver);
-}
-
-module_init(r_tpu_init);
-module_exit(r_tpu_exit);
+module_platform_driver(r_tpu_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas TPU LED Driver");
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 29f8b0f0e2c6..bd0a5ed49c42 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -121,18 +121,7 @@ static struct platform_driver s3c24xx_led_driver = {
},
};
-static int __init s3c24xx_led_init(void)
-{
- return platform_driver_register(&s3c24xx_led_driver);
-}
-
-static void __exit s3c24xx_led_exit(void)
-{
- platform_driver_unregister(&s3c24xx_led_driver);
-}
-
-module_init(s3c24xx_led_init);
-module_exit(s3c24xx_led_exit);
+module_platform_driver(s3c24xx_led_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX LED driver");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 614ebebaaa28..57371e1485ab 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -79,7 +79,7 @@ static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
return 1;
}
-static unsigned int __initdata nodetect;
+static bool __initdata nodetect;
module_param_named(nodetect, nodetect, bool, 0);
MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
new file mode 100644
index 000000000000..133f89fb7071
--- /dev/null
+++ b/drivers/leds/leds-tca6507.c
@@ -0,0 +1,779 @@
+/*
+ * leds-tca6507
+ *
+ * The TCA6507 is a programmable LED controller that can drive 7
+ * separate lines either by holding them low, or by pulsing them
+ * with modulated width.
+ * The modulation can be varied in a simple pattern to produce a blink or
+ * double-blink.
+ *
+ * This driver can configure each line either as a 'GPIO' which is out-only
+ * (no pull-up) or as an LED with variable brightness and hardware-assisted
+ * blinking.
+ *
+ * Apart from OFF and ON there are three programmable brightness levels which
+ * can be programmed from 0 to 15 and indicate how many 500usec intervals in
+ * each 8msec that the led is 'on'. The levels are named MASTER, BANK0 and
+ * BANK1.
+ *
+ * There are two different blink rates that can be programmed, each with
+ * separate time for rise, on, fall, off and second-off. Thus if 3 or more
+ * different non-trivial rates are required, software must be used for the extra
+ * rates. The two different blink rates must align with the two levels BANK0 and
+ * BANK1.
+ * This driver does not support double-blink so 'second-off' always matches
+ * 'off'.
+ *
+ * Only 16 different times can be programmed in a roughly logarithmic scale from
+ * 64ms to 16320ms. To be precise the possible times are:
+ * 0, 64, 128, 192, 256, 384, 512, 768,
+ * 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+ *
+ * Times that cannot be closely matched with these must be
+ * handled in software. This driver allows 12.5% error in matching.
+ *
+ * This driver does not allow rise/fall rates to be set explicitly. When trying
+ * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
+ * 'hold' times are chosen to get a close match. If the target delay is even,
+ * the 'change' number will be the smaller; if odd, the 'hold' number will be
+ * the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match delays in the
+ * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings. For example
+ * 1536 == 1536+0, 1024+512, or 768+768. This driver will always choose the
+ * pairing with the least maximum - 768+768 in this case. Other pairings are
+ * not available.
+ *
+ * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
+ * Access can be shared by multiple leds if they have the same level and
+ * either same blink rates, or some don't blink.
+ * When a led changes, it relinquishes access and tries again, so it might
+ * lose access to hardware blink.
+ * If a blink engine cannot be allocated, software blink is used.
+ * If the desired brightness cannot be allocated, the closest available non-zero
+ * brightness is used. As 'full' is always available, the worst case would be
+ * to have two different blink rates at '1', with Max at '2', then other leds
+ * will have to choose between '2' and '16'. Hopefully this is not likely.
+ *
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
+ * and LEDs using the blink. It can only be reprogrammed when the appropriate
+ * counter is zero. The MASTER level has a single usage count.
+ *
+ * Each Led has programmable 'on' and 'off' time as milliseconds. With each
+ * there is a flag saying if it was explicitly requested or defaulted.
+ * Similarly the banks know if each time was explicit or a default. Defaults
+ * are permitted to be changed freely - they are not recognised when matching.
+ *
+ *
+ * An led-tca6507 device must be provided with platform data. This data
+ * lists for each output: the name, default trigger, and whether the signal
+ * is being used as a GPiO rather than an led. 'struct led_plaform_data'
+ * is used for this. If 'name' is NULL, the output isn't used. If 'flags'
+ * is TCA6507_MAKE_CPIO, the output is a GPO.
+ * The "struct led_platform_data" can be embedded in a
+ * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
+ * and a 'setup' callback which is called once the GPiOs are available.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/leds-tca6507.h>
+
+/* LED select registers determine the source that drives LED outputs */
+#define TCA6507_LS_LED_OFF 0x0 /* Output HI-Z (off) */
+#define TCA6507_LS_LED_OFF1 0x1 /* Output HI-Z (off) - not used */
+#define TCA6507_LS_LED_PWM0 0x2 /* Output LOW with Bank0 rate */
+#define TCA6507_LS_LED_PWM1 0x3 /* Output LOW with Bank1 rate */
+#define TCA6507_LS_LED_ON 0x4 /* Output LOW (on) */
+#define TCA6507_LS_LED_MIR 0x5 /* Output LOW with Master Intensity */
+#define TCA6507_LS_BLINK0 0x6 /* Blink at Bank0 rate */
+#define TCA6507_LS_BLINK1 0x7 /* Blink at Bank1 rate */
+
+enum {
+ BANK0,
+ BANK1,
+ MASTER,
+};
+static int bank_source[3] = {
+ TCA6507_LS_LED_PWM0,
+ TCA6507_LS_LED_PWM1,
+ TCA6507_LS_LED_MIR,
+};
+static int blink_source[2] = {
+ TCA6507_LS_BLINK0,
+ TCA6507_LS_BLINK1,
+};
+
+/* PWM registers */
+#define TCA6507_REG_CNT 11
+
+/*
+ * 0x00, 0x01, 0x02 encode the TCA6507_LS_* values, each output
+ * owns one bit in each register
+ */
+#define TCA6507_FADE_ON 0x03
+#define TCA6507_FULL_ON 0x04
+#define TCA6507_FADE_OFF 0x05
+#define TCA6507_FIRST_OFF 0x06
+#define TCA6507_SECOND_OFF 0x07
+#define TCA6507_MAX_INTENSITY 0x08
+#define TCA6507_MASTER_INTENSITY 0x09
+#define TCA6507_INITIALIZE 0x0A
+
+#define INIT_CODE 0x8
+
+#define TIMECODES 16
+static int time_codes[TIMECODES] = {
+ 0, 64, 128, 192, 256, 384, 512, 768,
+ 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+};
+
+/* Convert an led.brightness level (0..255) to a TCA6507 level (0..15) */
+static inline int TO_LEVEL(int brightness)
+{
+ return brightness >> 4;
+}
+
+/* ...and convert back */
+static inline int TO_BRIGHT(int level)
+{
+ if (level)
+ return (level << 4) | 0xf;
+ return 0;
+}
+
+#define NUM_LEDS 7
+struct tca6507_chip {
+ int reg_set; /* One bit per register where
+ * a '1' means the register
+ * should be written */
+ u8 reg_file[TCA6507_REG_CNT];
+ /* Bank 2 is Master Intensity and doesn't use times */
+ struct bank {
+ int level;
+ int ontime, offtime;
+ int on_dflt, off_dflt;
+ int time_use, level_use;
+ } bank[3];
+ struct i2c_client *client;
+ struct work_struct work;
+ spinlock_t lock;
+
+ struct tca6507_led {
+ struct tca6507_chip *chip;
+ struct led_classdev led_cdev;
+ int num;
+ int ontime, offtime;
+ int on_dflt, off_dflt;
+ int bank; /* Bank used, or -1 */
+ int blink; /* Set if hardware-blinking */
+ } leds[NUM_LEDS];
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+ const char *gpio_name[NUM_LEDS];
+ int gpio_map[NUM_LEDS];
+#endif
+};
+
+static const struct i2c_device_id tca6507_id[] = {
+ { "tca6507" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6507_id);
+
+static int choose_times(int msec, int *c1p, int *c2p)
+{
+ /*
+ * Choose two timecodes which add to 'msec' as near as possible.
+ * The first returned is the 'on' or 'off' time. The second is to be
+ * used as a 'fade-on' or 'fade-off' time. If 'msec' is even,
+ * the first will not be smaller than the second. If 'msec' is odd,
+ * the first will not be larger than the second.
+ * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
+ * otherwise return the sum that was achieved, plus 1 if the first is
+ * smaller.
+ * If two possibilities are equally good (e.g. 512+0, 256+256), choose
+ * the first pair so there is more change-time visible (i.e. it is
+ * softer).
+ */
+ int c1, c2;
+ int tmax = msec * 9 / 8;
+ int tmin = msec * 7 / 8;
+ int diff = 65536;
+
+ /* We start at '1' to ensure we never even think of choosing a
+ * total time of '0'.
+ */
+ for (c1 = 1; c1 < TIMECODES; c1++) {
+ int t = time_codes[c1];
+ if (t*2 < tmin)
+ continue;
+ if (t > tmax)
+ break;
+ for (c2 = 0; c2 <= c1; c2++) {
+ int tt = t + time_codes[c2];
+ int d;
+ if (tt < tmin)
+ continue;
+ if (tt > tmax)
+ break;
+ /* This works! */
+ d = abs(msec - tt);
+ if (d >= diff)
+ continue;
+ /* Best yet */
+ *c1p = c1;
+ *c2p = c2;
+ diff = d;
+ if (d == 0)
+ return msec;
+ }
+ }
+ if (diff < 65536) {
+ int actual;
+ if (msec & 1) {
+ c1 = *c2p;
+ *c2p = *c1p;
+ *c1p = c1;
+ }
+ actual = time_codes[*c1p] + time_codes[*c2p];
+ if (*c1p < *c2p)
+ return actual + 1;
+ else
+ return actual;
+ }
+ /* No close match */
+ return -EINVAL;
+}
+
+/*
+ * Update the register file with the appropriate 3-bit state for
+ * the given led.
+ */
+static void set_select(struct tca6507_chip *tca, int led, int val)
+{
+ int mask = (1 << led);
+ int bit;
+
+ for (bit = 0; bit < 3; bit++) {
+ int n = tca->reg_file[bit] & ~mask;
+ if (val & (1 << bit))
+ n |= mask;
+ if (tca->reg_file[bit] != n) {
+ tca->reg_file[bit] = n;
+ tca->reg_set |= (1 << bit);
+ }
+ }
+}
+
+/* Update the register file with the appropriate 4-bit code for
+ * one bank or other. This can be used for timers, for levels, or
+ * for initialisation.
+ */
+static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
+{
+ int mask = 0xF;
+ int n;
+ if (bank) {
+ mask <<= 4;
+ new <<= 4;
+ }
+ n = tca->reg_file[reg] & ~mask;
+ n |= new;
+ if (tca->reg_file[reg] != n) {
+ tca->reg_file[reg] = n;
+ tca->reg_set |= 1 << reg;
+ }
+}
+
+/* Update brightness level. */
+static void set_level(struct tca6507_chip *tca, int bank, int level)
+{
+ switch (bank) {
+ case BANK0:
+ case BANK1:
+ set_code(tca, TCA6507_MAX_INTENSITY, bank, level);
+ break;
+ case MASTER:
+ set_code(tca, TCA6507_MASTER_INTENSITY, 0, level);
+ break;
+ }
+ tca->bank[bank].level = level;
+}
+
+/* Record all relevant time code for a given bank */
+static void set_times(struct tca6507_chip *tca, int bank)
+{
+ int c1, c2;
+ int result;
+
+ result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+ dev_dbg(&tca->client->dev,
+ "Chose on times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ c2, time_codes[c2], tca->bank[bank].ontime);
+ set_code(tca, TCA6507_FADE_ON, bank, c2);
+ set_code(tca, TCA6507_FULL_ON, bank, c1);
+ tca->bank[bank].ontime = result;
+
+ result = choose_times(tca->bank[bank].offtime, &c1, &c2);
+ dev_dbg(&tca->client->dev,
+ "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ c2, time_codes[c2], tca->bank[bank].offtime);
+ set_code(tca, TCA6507_FADE_OFF, bank, c2);
+ set_code(tca, TCA6507_FIRST_OFF, bank, c1);
+ set_code(tca, TCA6507_SECOND_OFF, bank, c1);
+ tca->bank[bank].offtime = result;
+
+ set_code(tca, TCA6507_INITIALIZE, bank, INIT_CODE);
+}
+
+/* Write all needed register of tca6507 */
+
+static void tca6507_work(struct work_struct *work)
+{
+ struct tca6507_chip *tca = container_of(work, struct tca6507_chip,
+ work);
+ struct i2c_client *cl = tca->client;
+ int set;
+ u8 file[TCA6507_REG_CNT];
+ int r;
+
+ spin_lock_irq(&tca->lock);
+ set = tca->reg_set;
+ memcpy(file, tca->reg_file, TCA6507_REG_CNT);
+ tca->reg_set = 0;
+ spin_unlock_irq(&tca->lock);
+
+ for (r = 0; r < TCA6507_REG_CNT; r++)
+ if (set & (1<<r))
+ i2c_smbus_write_byte_data(cl, r, file[r]);
+}
+
+static void led_release(struct tca6507_led *led)
+{
+ /* If led owns any resource, release it. */
+ struct tca6507_chip *tca = led->chip;
+ if (led->bank >= 0) {
+ struct bank *b = tca->bank + led->bank;
+ if (led->blink)
+ b->time_use--;
+ b->level_use--;
+ }
+ led->blink = 0;
+ led->bank = -1;
+}
+
+static int led_prepare(struct tca6507_led *led)
+{
+ /* Assign this led to a bank, configuring that bank if necessary. */
+ int level = TO_LEVEL(led->led_cdev.brightness);
+ struct tca6507_chip *tca = led->chip;
+ int c1, c2;
+ int i;
+ struct bank *b;
+ int need_init = 0;
+
+ led->led_cdev.brightness = TO_BRIGHT(level);
+ if (level == 0) {
+ set_select(tca, led->num, TCA6507_LS_LED_OFF);
+ return 0;
+ }
+
+ if (led->ontime == 0 || led->offtime == 0) {
+ /*
+ * Just set the brightness, choosing first usable bank.
+ * If none perfect, choose best.
+ * Count backwards so we check MASTER bank first
+ * to avoid wasting a timer.
+ */
+ int best = -1;/* full-on */
+ int diff = 15-level;
+
+ if (level == 15) {
+ set_select(tca, led->num, TCA6507_LS_LED_ON);
+ return 0;
+ }
+
+ for (i = MASTER; i >= BANK0; i--) {
+ int d;
+ if (tca->bank[i].level == level ||
+ tca->bank[i].level_use == 0) {
+ best = i;
+ break;
+ }
+ d = abs(level - tca->bank[i].level);
+ if (d < diff) {
+ diff = d;
+ best = i;
+ }
+ }
+ if (best == -1) {
+ /* Best brightness is full-on */
+ set_select(tca, led->num, TCA6507_LS_LED_ON);
+ led->led_cdev.brightness = LED_FULL;
+ return 0;
+ }
+
+ if (!tca->bank[best].level_use)
+ set_level(tca, best, level);
+
+ tca->bank[best].level_use++;
+ led->bank = best;
+ set_select(tca, led->num, bank_source[best]);
+ led->led_cdev.brightness = TO_BRIGHT(tca->bank[best].level);
+ return 0;
+ }
+
+ /*
+ * We have on/off time so we need to try to allocate a timing bank.
+ * First check if times are compatible with hardware and give up if
+ * not.
+ */
+ if (choose_times(led->ontime, &c1, &c2) < 0)
+ return -EINVAL;
+ if (choose_times(led->offtime, &c1, &c2) < 0)
+ return -EINVAL;
+
+ for (i = BANK0; i <= BANK1; i++) {
+ if (tca->bank[i].level_use == 0)
+ /* not in use - it is ours! */
+ break;
+ if (tca->bank[i].level != level)
+ /* Incompatible level - skip */
+ /* FIX: if timer matches we maybe should consider
+ * this anyway...
+ */
+ continue;
+
+ if (tca->bank[i].time_use == 0)
+ /* Timer not in use, and level matches - use it */
+ break;
+
+ if (!(tca->bank[i].on_dflt ||
+ led->on_dflt ||
+ tca->bank[i].ontime == led->ontime))
+ /* on time is incompatible */
+ continue;
+
+ if (!(tca->bank[i].off_dflt ||
+ led->off_dflt ||
+ tca->bank[i].offtime == led->offtime))
+ /* off time is incompatible */
+ continue;
+
+ /* looks like a suitable match */
+ break;
+ }
+
+ if (i > BANK1)
+ /* Nothing matches - how sad */
+ return -EINVAL;
+
+ b = &tca->bank[i];
+ if (b->level_use == 0)
+ set_level(tca, i, level);
+ b->level_use++;
+ led->bank = i;
+
+ if (b->on_dflt ||
+ !led->on_dflt ||
+ b->time_use == 0) {
+ b->ontime = led->ontime;
+ b->on_dflt = led->on_dflt;
+ need_init = 1;
+ }
+
+ if (b->off_dflt ||
+ !led->off_dflt ||
+ b->time_use == 0) {
+ b->offtime = led->offtime;
+ b->off_dflt = led->off_dflt;
+ need_init = 1;
+ }
+
+ if (need_init)
+ set_times(tca, i);
+
+ led->ontime = b->ontime;
+ led->offtime = b->offtime;
+
+ b->time_use++;
+ led->blink = 1;
+ led->led_cdev.brightness = TO_BRIGHT(b->level);
+ set_select(tca, led->num, blink_source[i]);
+ return 0;
+}
+
+static int led_assign(struct tca6507_led *led)
+{
+ struct tca6507_chip *tca = led->chip;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tca->lock, flags);
+ led_release(led);
+ err = led_prepare(led);
+ if (err) {
+ /*
+ * Can only fail on timer setup. In that case we need to
+ * re-establish as steady level.
+ */
+ led->ontime = 0;
+ led->offtime = 0;
+ led_prepare(led);
+ }
+ spin_unlock_irqrestore(&tca->lock, flags);
+
+ if (tca->reg_set)
+ schedule_work(&tca->work);
+ return err;
+}
+
+static void tca6507_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+ led_cdev);
+ led->led_cdev.brightness = brightness;
+ led->ontime = 0;
+ led->offtime = 0;
+ led_assign(led);
+}
+
+static int tca6507_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+ led_cdev);
+
+ if (*delay_on == 0)
+ led->on_dflt = 1;
+ else if (delay_on != &led_cdev->blink_delay_on)
+ led->on_dflt = 0;
+ led->ontime = *delay_on;
+
+ if (*delay_off == 0)
+ led->off_dflt = 1;
+ else if (delay_off != &led_cdev->blink_delay_off)
+ led->off_dflt = 0;
+ led->offtime = *delay_off;
+
+ if (led->ontime == 0)
+ led->ontime = 512;
+ if (led->offtime == 0)
+ led->offtime = 512;
+
+ if (led->led_cdev.brightness == LED_OFF)
+ led->led_cdev.brightness = LED_FULL;
+ if (led_assign(led) < 0) {
+ led->ontime = 0;
+ led->offtime = 0;
+ led->led_cdev.brightness = LED_OFF;
+ return -EINVAL;
+ }
+ *delay_on = led->ontime;
+ *delay_off = led->offtime;
+ return 0;
+}
+
+#ifdef CONFIG_GPIOLIB
+static void tca6507_gpio_set_value(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ struct tca6507_chip *tca = container_of(gc, struct tca6507_chip, gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tca->lock, flags);
+ /*
+ * 'OFF' is floating high, and 'ON' is pulled down, so it has the
+ * inverse sense of 'val'.
+ */
+ set_select(tca, tca->gpio_map[offset],
+ val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
+ spin_unlock_irqrestore(&tca->lock, flags);
+ if (tca->reg_set)
+ schedule_work(&tca->work);
+}
+
+static int tca6507_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ tca6507_gpio_set_value(gc, offset, val);
+ return 0;
+}
+
+static int tca6507_probe_gpios(struct i2c_client *client,
+ struct tca6507_chip *tca,
+ struct tca6507_platform_data *pdata)
+{
+ int err;
+ int i = 0;
+ int gpios = 0;
+
+ for (i = 0; i < NUM_LEDS; i++)
+ if (pdata->leds.leds[i].name && pdata->leds.leds[i].flags) {
+ /* Configure as a gpio */
+ tca->gpio_name[gpios] = pdata->leds.leds[i].name;
+ tca->gpio_map[gpios] = i;
+ gpios++;
+ }
+
+ if (!gpios)
+ return 0;
+
+ tca->gpio.label = "gpio-tca6507";
+ tca->gpio.names = tca->gpio_name;
+ tca->gpio.ngpio = gpios;
+ tca->gpio.base = pdata->gpio_base;
+ tca->gpio.owner = THIS_MODULE;
+ tca->gpio.direction_output = tca6507_gpio_direction_output;
+ tca->gpio.set = tca6507_gpio_set_value;
+ tca->gpio.dev = &client->dev;
+ err = gpiochip_add(&tca->gpio);
+ if (err) {
+ tca->gpio.ngpio = 0;
+ return err;
+ }
+ if (pdata->setup)
+ pdata->setup(tca->gpio.base, tca->gpio.ngpio);
+ return 0;
+}
+
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+ if (tca->gpio.ngpio) {
+ int err = gpiochip_remove(&tca->gpio);
+ dev_err(&tca->client->dev, "%s failed, %d\n",
+ "gpiochip_remove()", err);
+ }
+}
+#else /* CONFIG_GPIOLIB */
+static int tca6507_probe_gpios(struct i2c_client *client,
+ struct tca6507_chip *tca,
+ struct tca6507_platform_data *pdata)
+{
+ return 0;
+}
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+}
+#endif /* CONFIG_GPIOLIB */
+
+static int __devinit tca6507_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tca6507_chip *tca;
+ struct i2c_adapter *adapter;
+ struct tca6507_platform_data *pdata;
+ int err;
+ int i = 0;
+
+ adapter = to_i2c_adapter(client->dev.parent);
+ pdata = client->dev.platform_data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -EIO;
+
+ if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
+ dev_err(&client->dev, "Need %d entries in platform-data list\n",
+ NUM_LEDS);
+ return -ENODEV;
+ }
+ err = -ENOMEM;
+ tca = kzalloc(sizeof(*tca), GFP_KERNEL);
+ if (!tca)
+ goto exit;
+
+ tca->client = client;
+ INIT_WORK(&tca->work, tca6507_work);
+ spin_lock_init(&tca->lock);
+ i2c_set_clientdata(client, tca);
+
+ for (i = 0; i < NUM_LEDS; i++) {
+ struct tca6507_led *l = tca->leds + i;
+
+ l->chip = tca;
+ l->num = i;
+ if (pdata->leds.leds[i].name && !pdata->leds.leds[i].flags) {
+ l->led_cdev.name = pdata->leds.leds[i].name;
+ l->led_cdev.default_trigger
+ = pdata->leds.leds[i].default_trigger;
+ l->led_cdev.brightness_set = tca6507_brightness_set;
+ l->led_cdev.blink_set = tca6507_blink_set;
+ l->bank = -1;
+ err = led_classdev_register(&client->dev,
+ &l->led_cdev);
+ if (err < 0)
+ goto exit;
+ }
+ }
+ err = tca6507_probe_gpios(client, tca, pdata);
+ if (err)
+ goto exit;
+ /* set all registers to known state - zero */
+ tca->reg_set = 0x7f;
+ schedule_work(&tca->work);
+
+ return 0;
+exit:
+ while (i--)
+ if (tca->leds[i].led_cdev.name)
+ led_classdev_unregister(&tca->leds[i].led_cdev);
+ cancel_work_sync(&tca->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(tca);
+ return err;
+}
+
+static int __devexit tca6507_remove(struct i2c_client *client)
+{
+ int i;
+ struct tca6507_chip *tca = i2c_get_clientdata(client);
+ struct tca6507_led *tca_leds = tca->leds;
+
+ for (i = 0; i < NUM_LEDS; i++) {
+ if (tca_leds[i].led_cdev.name)
+ led_classdev_unregister(&tca_leds[i].led_cdev);
+ }
+ tca6507_remove_gpio(tca);
+ cancel_work_sync(&tca->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(tca);
+
+ return 0;
+}
+
+static struct i2c_driver tca6507_driver = {
+ .driver = {
+ .name = "leds-tca6507",
+ .owner = THIS_MODULE,
+ },
+ .probe = tca6507_probe,
+ .remove = __devexit_p(tca6507_remove),
+ .id_table = tca6507_id,
+};
+
+static int __init tca6507_leds_init(void)
+{
+ return i2c_add_driver(&tca6507_driver);
+}
+
+static void __exit tca6507_leds_exit(void)
+{
+ i2c_del_driver(&tca6507_driver);
+}
+
+module_init(tca6507_leds_init);
+module_exit(tca6507_leds_exit);
+
+MODULE_AUTHOR("NeilBrown <neilb@suse.de>");
+MODULE_DESCRIPTION("TCA6507 LED/GPO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index b1eb34c3e81f..74a24cf897c3 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -237,7 +237,8 @@ static int wm831x_status_probe(struct platform_device *pdev)
goto err;
}
- drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status),
+ GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, drvdata);
@@ -300,7 +301,6 @@ static int wm831x_status_probe(struct platform_device *pdev)
err_led:
led_classdev_unregister(&drvdata->cdev);
- kfree(drvdata);
err:
return ret;
}
@@ -311,7 +311,6 @@ static int wm831x_status_remove(struct platform_device *pdev)
device_remove_file(drvdata->cdev.dev, &dev_attr_src);
led_classdev_unregister(&drvdata->cdev);
- kfree(drvdata);
return 0;
}
@@ -325,17 +324,7 @@ static struct platform_driver wm831x_status_driver = {
.remove = wm831x_status_remove,
};
-static int __devinit wm831x_status_init(void)
-{
- return platform_driver_register(&wm831x_status_driver);
-}
-module_init(wm831x_status_init);
-
-static void wm831x_status_exit(void)
-{
- platform_driver_unregister(&wm831x_status_driver);
-}
-module_exit(wm831x_status_exit);
+module_platform_driver(wm831x_status_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x status LED driver");
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 4a1276578352..918d4baff1c7 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -227,7 +227,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
goto err_isink;
}
- led = kzalloc(sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
ret = -ENOMEM;
goto err_dcdc;
@@ -259,12 +259,10 @@ static int wm8350_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
- goto err_led;
+ goto err_dcdc;
return 0;
- err_led:
- kfree(led);
err_dcdc:
regulator_put(dcdc);
err_isink:
@@ -281,7 +279,6 @@ static int wm8350_led_remove(struct platform_device *pdev)
wm8350_led_disable(led);
regulator_put(led->dcdc);
regulator_put(led->isink);
- kfree(led);
return 0;
}
@@ -295,17 +292,7 @@ static struct platform_driver wm8350_led_driver = {
.shutdown = wm8350_led_shutdown,
};
-static int __devinit wm8350_led_init(void)
-{
- return platform_driver_register(&wm8350_led_driver);
-}
-module_init(wm8350_led_init);
-
-static void wm8350_led_exit(void)
-{
- platform_driver_unregister(&wm8350_led_driver);
-}
-module_exit(wm8350_led_exit);
+module_platform_driver(wm8350_led_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 LED driver");
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index 8ac947c7e7c7..c4197503900e 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -18,7 +18,7 @@ Mastery: PREFIX=M
Beer:
@for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
- @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
+ @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
Puppy:
@clear
@printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n"
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 0dc30ffde5ad..9e8388efd88e 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -241,7 +241,7 @@ static void lg_notify(struct virtqueue *vq)
}
/* An extern declaration inside a C file is bad form. Don't do it. */
-extern void lguest_setup_irq(unsigned int irq);
+extern int lguest_setup_irq(unsigned int irq);
/*
* This routine finds the Nth virtqueue described in the configuration of
@@ -292,17 +292,21 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
/*
* OK, tell virtio_ring.c to set up a virtqueue now we know its size
- * and we've got a pointer to its pages.
+ * and we've got a pointer to its pages. Note that we set weak_barriers
+ * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
+ * barriers.
*/
- vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
- vdev, lvq->pages, lg_notify, callback, name);
+ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev,
+ true, lvq->pages, lg_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
}
/* Make sure the interrupt is allocated. */
- lguest_setup_irq(lvq->config.irq);
+ err = lguest_setup_irq(lvq->config.irq);
+ if (err)
+ goto destroy_vring;
/*
* Tell the interrupt for this virtqueue to go to the virtio_ring
@@ -315,7 +319,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vq);
if (err)
- goto destroy_vring;
+ goto free_desc;
/*
* Last of all we hook up our 'struct lguest_vq_info" to the
@@ -324,6 +328,8 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
vq->priv = lvq;
return vq;
+free_desc:
+ irq_free_desc(lvq->config.irq);
destroy_vring:
vring_del_virtqueue(vq);
unmap:
@@ -381,6 +387,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
@@ -392,6 +403,7 @@ static struct virtio_config_ops lguest_config_ops = {
.reset = lg_reset,
.find_vqs = lg_find_vqs,
.del_vqs = lg_del_vqs,
+ .bus_name = lg_bus_name,
};
/*
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index ede46581351a..c4fb424dfddb 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -81,8 +81,8 @@ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
* sometimes careless and leaves this as 0, even though it's
* running at privilege level 1. If so, we fix it here.
*/
- if ((cpu->arch.gdt[i].b & 0x00006000) == 0)
- cpu->arch.gdt[i].b |= (GUEST_PL << 13);
+ if (cpu->arch.gdt[i].dpl == 0)
+ cpu->arch.gdt[i].dpl |= GUEST_PL;
/*
* Each descriptor has an "accessed" bit. If we don't set it
@@ -90,7 +90,7 @@ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
* that entry into a segment register. But the GDT isn't
* writable by the Guest, so bad things can happen.
*/
- cpu->arch.gdt[i].b |= 0x00000100;
+ cpu->arch.gdt[i].type |= 0x1;
}
}
@@ -114,13 +114,19 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
/*
* The TSS segment refers to the TSS entry for this particular CPU.
- * Forgive the magic flags: the 0x8900 means the entry is Present, it's
- * privilege level 0 Available 386 TSS system segment, and the 0x67
- * means Saturn is eclipsed by Mercury in the twelfth house.
*/
- gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
- gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
- | ((tss >> 16) & 0x000000FF);
+ gdt[GDT_ENTRY_TSS].a = 0;
+ gdt[GDT_ENTRY_TSS].b = 0;
+
+ gdt[GDT_ENTRY_TSS].limit0 = 0x67;
+ gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF;
+ gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF;
+ gdt[GDT_ENTRY_TSS].base2 = tss >> 24;
+ gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */
+ gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */
+ gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */
+ gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */
+
}
/*
@@ -135,8 +141,8 @@ void setup_guest_gdt(struct lg_cpu *cpu)
*/
cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
- cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
- cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL;
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL;
}
/*H:650
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 65af42f2d593..39809035320a 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -697,7 +697,7 @@ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
* interrupts are enabled. We always leave interrupts enabled while
* running the Guest.
*/
- regs->eflags = X86_EFLAGS_IF | 0x2;
+ regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
/*
* The "Extended Instruction Pointer" register says where the Guest is
diff --git a/drivers/macintosh/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c
index 399beb1638d1..5c6a2d876562 100644
--- a/drivers/macintosh/ams/ams-core.c
+++ b/drivers/macintosh/ams/ams-core.c
@@ -31,7 +31,7 @@
/* There is only one motion sensor per machine */
struct ams ams_info;
-static unsigned int verbose;
+static bool verbose;
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Show free falls and shocks in kernel output");
diff --git a/drivers/macintosh/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c
index 8a712392cd38..b27e530a87a4 100644
--- a/drivers/macintosh/ams/ams-input.c
+++ b/drivers/macintosh/ams/ams-input.c
@@ -19,11 +19,11 @@
#include "ams.h"
-static unsigned int joystick;
+static bool joystick;
module_param(joystick, bool, S_IRUGO);
MODULE_PARM_DESC(joystick, "Enable the input class device on module load");
-static unsigned int invert;
+static bool invert;
module_param(invert, bool, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(invert, "Invert input data on X and Y axis");
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 2637c139777b..6dc26b61219b 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice;
*/
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
- cputime64_t retval;
+ u64 retval;
- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
- kstat_cpu(cpu).cpustat.iowait);
+ retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
+ kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice)
- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+ retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
return retval;
}
@@ -220,13 +220,11 @@ static void rackmeter_do_timer(struct work_struct *work)
int i, offset, load, cumm, pause;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
- total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
- rcpu->prev_wall);
+ total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
rcpu->prev_wall = cur_jiffies;
total_idle_ticks = get_cpu_idle_time(cpu);
- idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
- rcpu->prev_idle);
+ idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 116a49ce74b2..54ac7ffacb40 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -32,7 +32,6 @@
#include <linux/completion.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
-#include <linux/sysdev.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
@@ -681,9 +680,6 @@ static struct platform_driver smu_of_platform_driver =
static int __init smu_init_sysfs(void)
{
/*
- * Due to sysfs bogosity, a sysdev is not a real device, so
- * we should in fact create both if we want sysdev semantics
- * for power management.
* For now, we don't power manage machines with an SMU chip,
* I'm a bit too far from figuring out how that works with those
* new chipsets, but that will come back and bite us
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 02367308ff2e..c60d025044ee 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -52,7 +52,7 @@ static const char *sensor_location[3];
static int limit_adjust;
static int fan_speed = -1;
-static int verbose;
+static bool verbose;
MODULE_AUTHOR("Colin Leroy <colin@colino.net>");
MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and "
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721bf..cdf36b1e9aa6 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1147,12 +1149,12 @@ void bitmap_daemon_work(struct mddev *mddev)
return;
}
if (time_before(jiffies, bitmap->daemon_lastrun
- + bitmap->mddev->bitmap_info.daemon_sleep))
+ + mddev->bitmap_info.daemon_sleep))
goto done;
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
- bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
goto done;
}
bitmap->allclean = 1;
@@ -1204,7 +1206,7 @@ void bitmap_daemon_work(struct mddev *mddev)
* sure that events_cleared is up-to-date.
*/
if (bitmap->need_sync &&
- bitmap->mddev->bitmap_info.external == 0) {
+ mddev->bitmap_info.external == 0) {
bitmap_super_t *sb;
bitmap->need_sync = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -1268,8 +1270,8 @@ void bitmap_daemon_work(struct mddev *mddev)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout =
- bitmap->mddev->bitmap_info.daemon_sleep;
+ mddev->thread->timeout =
+ mddev->bitmap_info.daemon_sleep;
mutex_unlock(&mddev->bitmap_info.mutex);
}
@@ -1391,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
- if (bitmap->mddev->degraded)
- /* Never clear bits or update events_cleared when degraded */
- success = 0;
while (sectors) {
sector_t blocks;
@@ -1407,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
- if (success &&
+ if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
@@ -1588,7 +1587,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
}
if (!*bmc) {
struct page *page;
- *bmc = 1 | (needed ? NEEDED_MASK : 0);
+ *bmc = 2 | (needed ? NEEDED_MASK : 0);
bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
@@ -1605,7 +1604,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index f84c08029b21..9fb18c147825 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
{
struct flakey_c *fc = ti->private;
+ struct dm_dev *dev = fc->dev;
+ int r = 0;
- return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (fc->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 3921e3bb43c1..9728839f844a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct linear_c *lc = (struct linear_c *) ti->private;
- return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
+ struct dm_dev *dev = lc->dev;
+ int r = 0;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (lc->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 5e0090ef4182..801d92d237cf 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
spin_unlock_irqrestore(&m->lock, flags);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8e9132130142..63cc54289aff 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -699,7 +699,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
- blk_set_default_limits(&ti_limits);
+ blk_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
@@ -1221,10 +1221,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits ti_limits;
unsigned i = 0;
- blk_set_default_limits(limits);
+ blk_set_stacking_limits(limits);
while (i < dm_table_get_num_targets(table)) {
- blk_set_default_limits(&ti_limits);
+ blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i++);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4720f68f817e..b89c548ec3f8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -14,7 +14,6 @@
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273efd08cb..627456542fb3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e4..9417ae2fa0bb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,8 +36,7 @@
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev */
+#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -570,7 +569,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
@@ -1714,6 +1713,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
+ set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
@@ -1767,6 +1768,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
}
+ if (test_bit(Replacement, &rdev->flags))
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
@@ -2546,7 +2550,8 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -2559,6 +2564,15 @@ state_show(struct md_rdev *rdev, char *page)
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
+ if (test_bit(WantReplacement, &rdev->flags)) {
+ len += sprintf(page+len, "%swant_replacement", sep);
+ sep = ",";
+ }
+ if (test_bit(Replacement, &rdev->flags)) {
+ len += sprintf(page+len, "%sreplacement", sep);
+ sep = ",";
+ }
+
return len+sprintf(page+len, "\n");
}
@@ -2627,6 +2641,42 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "want_replacement")) {
+ /* Any non-spare device that is not a replacement can
+ * become want_replacement at any time, but we then need to
+ * check if recovery is needed.
+ */
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Replacement, &rdev->flags))
+ set_bit(WantReplacement, &rdev->flags);
+ set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+ md_wakeup_thread(rdev->mddev->thread);
+ err = 0;
+ } else if (cmd_match(buf, "-want_replacement")) {
+ /* Clearing 'want_replacement' is always allowed.
+ * Once replacements starts it is too late though.
+ */
+ err = 0;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else if (cmd_match(buf, "replacement")) {
+ /* Can only set a device as a replacement when array has not
+ * yet been started. Once running, replacement is automatic
+ * from spares, or by assigning 'slot'.
+ */
+ if (rdev->mddev->pers)
+ err = -EBUSY;
+ else {
+ set_bit(Replacement, &rdev->flags);
+ err = 0;
+ }
+ } else if (cmd_match(buf, "-replacement")) {
+ /* Similarly, can only clear Replacement before start */
+ if (rdev->mddev->pers)
+ err = -EBUSY;
+ else {
+ clear_bit(Replacement, &rdev->flags);
+ err = 0;
+ }
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2688,7 +2738,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
- hot_remove_disk(rdev->mddev, rdev->raid_disk);
+ hot_remove_disk(rdev->mddev, rdev);
if (err)
return err;
sysfs_unlink_rdev(rdev->mddev, rdev);
@@ -2696,7 +2746,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
- struct md_rdev *rdev2;
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
@@ -2710,10 +2759,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
- list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
- if (rdev2->raid_disk == slot)
- return -EEXIST;
-
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
@@ -3788,6 +3833,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
@@ -4487,11 +4534,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4507,13 +4563,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4604,6 +4666,7 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
+ blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
@@ -6036,8 +6099,15 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
struct mddev *mddev = NULL;
int ro;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ switch (cmd) {
+ case RAID_VERSION:
+ case GET_ARRAY_INFO:
+ case GET_DISK_INFO:
+ break;
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ }
/*
* Commands dealing with the RAID driver but not any
@@ -6697,8 +6767,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
- } else if (rdev->raid_disk < 0)
+ }
+ if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
+ if (test_bit(Replacement, &rdev->flags))
+ seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
@@ -7310,6 +7383,7 @@ static int remove_and_add_spares(struct mddev *mddev)
{
struct md_rdev *rdev;
int spares = 0;
+ int removed = 0;
mddev->curr_resync_completed = 0;
@@ -7320,30 +7394,32 @@ static int remove_and_add_spares(struct mddev *mddev)
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
+ mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
+ removed++;
}
}
+ if (removed)
+ sysfs_notify(&mddev->kobj, NULL,
+ "degraded");
- if (mddev->degraded) {
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags))
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
+ spares++;
+ if (rdev->raid_disk < 0
+ && !test_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
+ if (mddev->pers->
+ hot_add_disk(mddev, rdev) == 0) {
+ if (sysfs_link_rdev(mddev, rdev))
+ /* failure here is OK */;
spares++;
- if (rdev->raid_disk < 0
- && !test_bit(Faulty, &rdev->flags)) {
- rdev->recovery_offset = 0;
- if (mddev->pers->
- hot_add_disk(mddev, rdev) == 0) {
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
- spares++;
- md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- } else
- break;
+ md_new_event(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
}
@@ -7458,7 +7534,7 @@ void md_check_recovery(struct mddev *mddev)
test_bit(Faulty, &rdev->flags) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
+ mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
@@ -7840,6 +7916,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf742d9306ec..44c63dfeeb2b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -72,34 +72,7 @@ struct md_rdev {
* This reduces the burden of testing multiple flags in many cases
*/
- unsigned long flags;
-#define Faulty 1 /* device is known to have a fault */
-#define In_sync 2 /* device is in_sync with rest of array */
-#define WriteMostly 4 /* Avoid reading if at all possible */
-#define AutoDetected 7 /* added by auto-detect */
-#define Blocked 8 /* An error occurred but has not yet
- * been acknowledged by the metadata
- * handler, so don't allow writes
- * until it is cleared */
-#define WriteErrorSeen 9 /* A write error has been seen on this
- * device
- */
-#define FaultRecorded 10 /* Intermediate state for clearing
- * Blocked. The Fault is/will-be
- * recorded in the metadata, but that
- * metadata hasn't been stored safely
- * on disk yet.
- */
-#define BlockedBadBlocks 11 /* A writer is blocked because they
- * found an unacknowledged bad-block.
- * This can safely be cleared at any
- * time, and the writer will re-check.
- * It may be set at any time, and at
- * worst the writer will timeout and
- * re-check. So setting it as
- * accurately as possible is good, but
- * not absolutely critical.
- */
+ unsigned long flags; /* bit set of 'enum flag_bits' bits. */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -152,6 +125,44 @@ struct md_rdev {
sector_t size; /* in sectors */
} badblocks;
};
+enum flag_bits {
+ Faulty, /* device is known to have a fault */
+ In_sync, /* device is in_sync with rest of array */
+ WriteMostly, /* Avoid reading if at all possible */
+ AutoDetected, /* added by auto-detect */
+ Blocked, /* An error occurred but has not yet
+ * been acknowledged by the metadata
+ * handler, so don't allow writes
+ * until it is cleared */
+ WriteErrorSeen, /* A write error has been seen on this
+ * device
+ */
+ FaultRecorded, /* Intermediate state for clearing
+ * Blocked. The Fault is/will-be
+ * recorded in the metadata, but that
+ * metadata hasn't been stored safely
+ * on disk yet.
+ */
+ BlockedBadBlocks, /* A writer is blocked because they
+ * found an unacknowledged bad-block.
+ * This can safely be cleared at any
+ * time, and the writer will re-check.
+ * It may be set at any time, and at
+ * worst the writer will timeout and
+ * re-check. So setting it as
+ * accurately as possible is good, but
+ * not absolutely critical.
+ */
+ WantReplacement, /* This device is a candidate to be
+ * hot-replaced, either because it has
+ * reported some faults, or because
+ * of explicit request.
+ */
+ Replacement, /* This device is a replacement for
+ * a want_replacement device with same
+ * raid_disk number.
+ */
+};
#define BB_LEN_MASK (0x00000000000001FFULL)
#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
@@ -428,7 +439,7 @@ struct md_personality
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
- int (*hot_remove_disk) (struct mddev *mddev, int number);
+ int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
int (*resize) (struct mddev *mddev, sector_t sectors);
@@ -482,15 +493,20 @@ static inline char * mdname (struct mddev * mddev)
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ if (!test_bit(Replacement, &rdev->flags)) {
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ } else
+ return 0;
}
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ if (!test_bit(Replacement, &rdev->flags)) {
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
}
/*
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 5899246fa37e..a222f516660e 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -292,17 +292,16 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
-static int multipath_remove_disk(struct mddev *mddev, int number)
+static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
struct multipath_info *p = conf->multipaths + number;
print_multipath_conf(conf);
- rdev = p->rdev;
- if (rdev) {
+ if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified"
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ede2461e79c5..a368db2431a5 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -135,7 +135,7 @@ out_free_pages:
put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
- while ( ++j < pi->raid_disks )
+ while (++j < pi->raid_disks)
bio_put(r1_bio->bios[j]);
r1bio_pool_free(r1_bio, data);
return NULL;
@@ -164,7 +164,7 @@ static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
{
int i;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio **bio = r1_bio->bios + i;
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
@@ -185,7 +185,7 @@ static void put_buf(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
int i;
- for (i=0; i<conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio *bio = r1_bio->bios[i];
if (bio->bi_end_io)
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
@@ -277,13 +277,14 @@ static inline void update_head_pos(int disk, struct r1bio *r1_bio)
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
{
int mirror;
- int raid_disks = r1_bio->mddev->raid_disks;
+ struct r1conf *conf = r1_bio->mddev->private;
+ int raid_disks = conf->raid_disks;
- for (mirror = 0; mirror < raid_disks; mirror++)
+ for (mirror = 0; mirror < raid_disks * 2; mirror++)
if (r1_bio->bios[mirror] == bio)
break;
- BUG_ON(mirror == raid_disks);
+ BUG_ON(mirror == raid_disks * 2);
update_head_pos(mirror, r1_bio);
return mirror;
@@ -390,6 +391,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
if (!uptodate) {
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &conf->mirrors[mirror].rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ conf->mddev->recovery);
+
set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
@@ -505,7 +511,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
start_disk = conf->last_used;
}
- for (i = 0 ; i < conf->raid_disks ; i++) {
+ for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
sector_t dist;
sector_t first_bad;
int bad_sectors;
@@ -525,8 +531,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (test_bit(WriteMostly, &rdev->flags)) {
/* Don't balance among write-mostly, just
* use the first as a last resort */
- if (best_disk < 0)
+ if (best_disk < 0) {
+ if (is_badblock(rdev, this_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (first_bad < this_sector)
+ /* Cannot use this */
+ continue;
+ best_good_sectors = first_bad - this_sector;
+ } else
+ best_good_sectors = sectors;
best_disk = disk;
+ }
continue;
}
/* This is a reasonable device to use. It might
@@ -609,7 +624,7 @@ int md_raid1_congested(struct mddev *mddev, int bits)
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -974,7 +989,7 @@ read_again:
*/
plugged = mddev_check_plugged(mddev);
- disks = conf->raid_disks;
+ disks = conf->raid_disks * 2;
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
@@ -988,7 +1003,8 @@ read_again:
}
r1_bio->bios[i] = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (i < conf->raid_disks)
+ set_bit(R1BIO_Degraded, &r1_bio->state);
continue;
}
@@ -1263,6 +1279,25 @@ static int raid1_spare_active(struct mddev *mddev)
*/
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
+ struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+ if (repl
+ && repl->recovery_offset == MaxSector
+ && !test_bit(Faulty, &repl->flags)
+ && !test_and_set_bit(In_sync, &repl->flags)) {
+ /* replacement has just become active */
+ if (!rdev ||
+ !test_and_clear_bit(In_sync, &rdev->flags))
+ count++;
+ if (rdev) {
+ /* Replaced device not technically
+ * faulty, but we need to be sure
+ * it gets removed and never re-added
+ */
+ set_bit(Faulty, &rdev->flags);
+ sysfs_notify_dirent_safe(
+ rdev->sysfs_state);
+ }
+ }
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
@@ -1286,7 +1321,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int mirror = 0;
struct mirror_info *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
@@ -1294,8 +1329,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- for (mirror = first; mirror <= last; mirror++)
- if ( !(p=conf->mirrors+mirror)->rdev) {
+ for (mirror = first; mirror <= last; mirror++) {
+ p = conf->mirrors+mirror;
+ if (!p->rdev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -1322,21 +1358,35 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p[conf->raid_disks].rdev == NULL) {
+ /* Add this device as a replacement */
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = mirror;
+ err = 0;
+ conf->fullsync = 1;
+ rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
+ break;
+ }
+ }
md_integrity_add_rdev(rdev, mddev);
print_conf(conf);
return err;
}
-static int raid1_remove_disk(struct mddev *mddev, int number)
+static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
struct mirror_info *p = conf->mirrors+ number;
+ if (rdev != p->rdev)
+ p = conf->mirrors + conf->raid_disks + number;
+
print_conf(conf);
- rdev = p->rdev;
- if (rdev) {
+ if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
@@ -1358,7 +1408,21 @@ static int raid1_remove_disk(struct mddev *mddev, int number)
err = -EBUSY;
p->rdev = rdev;
goto abort;
- }
+ } else if (conf->mirrors[conf->raid_disks + number].rdev) {
+ /* We just removed a device that is being replaced.
+ * Move down the replacement. We drain all IO before
+ * doing this to avoid confusion.
+ */
+ struct md_rdev *repl =
+ conf->mirrors[conf->raid_disks + number].rdev;
+ raise_barrier(conf);
+ clear_bit(Replacement, &repl->flags);
+ p->rdev = repl;
+ conf->mirrors[conf->raid_disks + number].rdev = NULL;
+ lower_barrier(conf);
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ clear_bit(WantReplacement, &rdev->flags);
err = md_integrity_register(mddev);
}
abort:
@@ -1411,6 +1475,10 @@ static void end_sync_write(struct bio *bio, int error)
} while (sectors_to_go > 0);
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &conf->mirrors[mirror].rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state);
} else if (is_badblock(conf->mirrors[mirror].rdev,
r1_bio->sector,
@@ -1441,8 +1509,13 @@ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE)
+ if (rw == WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ rdev->mddev->recovery);
+ }
/* need to record an error - either for the block or the device */
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
md_error(rdev->mddev, rdev);
@@ -1493,7 +1566,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
}
}
d++;
- if (d == conf->raid_disks)
+ if (d == conf->raid_disks * 2)
d = 0;
} while (!success && d != r1_bio->read_disk);
@@ -1510,7 +1583,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
mdname(mddev),
bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
- for (d = 0; d < conf->raid_disks; d++) {
+ for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
@@ -1536,7 +1609,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
/* write it back and re-read */
while (d != r1_bio->read_disk) {
if (d == 0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
@@ -1551,7 +1624,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
d = start;
while (d != r1_bio->read_disk) {
if (d == 0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
@@ -1584,7 +1657,7 @@ static int process_checks(struct r1bio *r1_bio)
int primary;
int i;
- for (primary = 0; primary < conf->raid_disks; primary++)
+ for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
r1_bio->bios[primary]->bi_end_io = NULL;
@@ -1592,7 +1665,7 @@ static int process_checks(struct r1bio *r1_bio)
break;
}
r1_bio->read_disk = primary;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
struct bio *pbio = r1_bio->bios[primary];
@@ -1656,7 +1729,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
{
struct r1conf *conf = mddev->private;
int i;
- int disks = conf->raid_disks;
+ int disks = conf->raid_disks * 2;
struct bio *bio, *wbio;
bio = r1_bio->bios[r1_bio->read_disk];
@@ -1737,7 +1810,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
success = 1;
else {
d++;
- if (d == conf->raid_disks)
+ if (d == conf->raid_disks * 2)
d = 0;
}
} while (!success && d != read_disk);
@@ -1753,7 +1826,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
start = d;
while (d != read_disk) {
if (d==0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
@@ -1765,7 +1838,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
while (d != read_disk) {
char b[BDEVNAME_SIZE];
if (d==0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
@@ -1887,7 +1960,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
{
int m;
int s = r1_bio->sectors;
- for (m = 0; m < conf->raid_disks ; m++) {
+ for (m = 0; m < conf->raid_disks * 2 ; m++) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL)
@@ -1909,7 +1982,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
- for (m = 0; m < conf->raid_disks ; m++)
+ for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks(rdev,
@@ -2184,7 +2257,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
r1_bio->state = 0;
set_bit(R1BIO_IsSync, &r1_bio->state);
- for (i=0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev;
bio = r1_bio->bios[i];
@@ -2203,7 +2276,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) {
- still_degraded = 1;
+ if (i < conf->raid_disks)
+ still_degraded = 1;
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
@@ -2254,7 +2328,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
* need to mark them bad on all write targets
*/
int ok = 1;
- for (i = 0 ; i < conf->raid_disks ; i++)
+ for (i = 0 ; i < conf->raid_disks * 2 ; i++)
if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
struct md_rdev *rdev =
rcu_dereference(conf->mirrors[i].rdev);
@@ -2323,7 +2397,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
len = sync_blocks<<9;
}
- for (i=0 ; i < conf->raid_disks; i++) {
+ for (i = 0 ; i < conf->raid_disks * 2; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io) {
page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
@@ -2356,7 +2430,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
atomic_set(&r1_bio->remaining, read_targets);
- for (i=0; i<conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors);
@@ -2393,7 +2467,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf)
goto abort;
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)
+ * mddev->raid_disks * 2,
GFP_KERNEL);
if (!conf->mirrors)
goto abort;
@@ -2405,7 +2480,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo)
goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks;
+ conf->poolinfo->raid_disks = mddev->raid_disks * 2;
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free,
conf->poolinfo);
@@ -2414,14 +2489,20 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->poolinfo->mddev = mddev;
+ err = -EINVAL;
spin_lock_init(&conf->device_lock);
list_for_each_entry(rdev, &mddev->disks, same_set) {
int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
continue;
- disk = conf->mirrors + disk_idx;
+ if (test_bit(Replacement, &rdev->flags))
+ disk = conf->mirrors + conf->raid_disks + disk_idx;
+ else
+ disk = conf->mirrors + disk_idx;
+ if (disk->rdev)
+ goto abort;
disk->rdev = rdev;
disk->head_position = 0;
@@ -2437,11 +2518,27 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
+ err = -EIO;
conf->last_used = -1;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
disk = conf->mirrors + i;
+ if (i < conf->raid_disks &&
+ disk[conf->raid_disks].rdev) {
+ /* This slot has a replacement. */
+ if (!disk->rdev) {
+ /* No original, just make the replacement
+ * a recovering spare
+ */
+ disk->rdev =
+ disk[conf->raid_disks].rdev;
+ disk[conf->raid_disks].rdev = NULL;
+ } else if (!test_bit(In_sync, &disk->rdev->flags))
+ /* Original is not in_sync - bad */
+ goto abort;
+ }
+
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
@@ -2455,7 +2552,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->last_used = i;
}
- err = -EIO;
if (conf->last_used < 0) {
printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
@@ -2665,7 +2761,7 @@ static int raid1_reshape(struct mddev *mddev)
if (!newpoolinfo)
return -ENOMEM;
newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks;
+ newpoolinfo->raid_disks = raid_disks * 2;
newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free, newpoolinfo);
@@ -2673,7 +2769,8 @@ static int raid1_reshape(struct mddev *mddev)
kfree(newpoolinfo);
return -ENOMEM;
}
- newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
+ newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
+ GFP_KERNEL);
if (!newmirrors) {
kfree(newpoolinfo);
mempool_destroy(newpool);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c732b6cce935..80ded139314c 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -12,6 +12,9 @@ struct mirror_info {
* pool was allocated for, so they know how much to allocate and free.
* mddev->raid_disks cannot be used, as it can change while a pool is active
* These two datums are stored in a kmalloced struct.
+ * The 'raid_disks' here is twice the raid_disks in r1conf.
+ * This allows space for each 'real' device can have a replacement in the
+ * second half of the array.
*/
struct pool_info {
@@ -21,7 +24,9 @@ struct pool_info {
struct r1conf {
struct mddev *mddev;
- struct mirror_info *mirrors;
+ struct mirror_info *mirrors; /* twice 'raid_disks' to
+ * allow for replacements.
+ */
int raid_disks;
/* When choose the best device for a read (read_balance())
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 685ddf325ee4..6e8aa213f0d5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -73,7 +73,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
struct r10conf *conf = data;
int size = offsetof(struct r10bio, devs[conf->copies]);
- /* allocate a r10bio with room for raid_disks entries in the bios array */
+ /* allocate a r10bio with room for raid_disks entries in the
+ * bios array */
return kzalloc(size, gfp_flags);
}
@@ -123,12 +124,19 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
+ if (!conf->have_replacement)
+ continue;
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ if (!bio)
+ goto out_free_bio;
+ r10_bio->devs[j].repl_bio = bio;
}
/*
* Allocate RESYNC_PAGES data pages and attach them
* where needed.
*/
for (j = 0 ; j < nalloc; j++) {
+ struct bio *rbio = r10_bio->devs[j].repl_bio;
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
@@ -143,6 +151,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
goto out_free_pages;
bio->bi_io_vec[i].bv_page = page;
+ if (rbio)
+ rbio->bi_io_vec[i].bv_page = page;
}
}
@@ -156,8 +166,11 @@ out_free_pages:
safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
- while ( ++j < nalloc )
+ while (++j < nalloc) {
bio_put(r10_bio->devs[j].bio);
+ if (r10_bio->devs[j].repl_bio)
+ bio_put(r10_bio->devs[j].repl_bio);
+ }
r10bio_pool_free(r10_bio, conf);
return NULL;
}
@@ -178,6 +191,9 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
}
bio_put(bio);
}
+ bio = r10bio->devs[j].repl_bio;
+ if (bio)
+ bio_put(bio);
}
r10bio_pool_free(r10bio, conf);
}
@@ -191,6 +207,10 @@ static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
+ bio = &r10_bio->devs[i].repl_bio;
+ if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
+ bio_put(*bio);
+ *bio = NULL;
}
}
@@ -275,19 +295,27 @@ static inline void update_head_pos(int slot, struct r10bio *r10_bio)
* Find the disk number which triggered given bio
*/
static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
- struct bio *bio, int *slotp)
+ struct bio *bio, int *slotp, int *replp)
{
int slot;
+ int repl = 0;
- for (slot = 0; slot < conf->copies; slot++)
+ for (slot = 0; slot < conf->copies; slot++) {
if (r10_bio->devs[slot].bio == bio)
break;
+ if (r10_bio->devs[slot].repl_bio == bio) {
+ repl = 1;
+ break;
+ }
+ }
BUG_ON(slot == conf->copies);
update_head_pos(slot, r10_bio);
if (slotp)
*slotp = slot;
+ if (replp)
+ *replp = repl;
return r10_bio->devs[slot].devnum;
}
@@ -296,11 +324,13 @@ static void raid10_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
int slot, dev;
+ struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
+ rdev = r10_bio->devs[slot].rdev;
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
@@ -318,7 +348,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
*/
set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
} else {
/*
* oops, read error - keep the refcount on the rdev
@@ -327,7 +357,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
printk_ratelimited(KERN_ERR
"md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(conf->mirrors[dev].rdev->bdev, b),
+ bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
@@ -366,17 +396,35 @@ static void raid10_end_write_request(struct bio *bio, int error)
int dev;
int dec_rdev = 1;
struct r10conf *conf = r10_bio->mddev->private;
- int slot;
+ int slot, repl;
+ struct md_rdev *rdev = NULL;
- dev = find_bio_disk(conf, r10_bio, bio, &slot);
+ dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[dev].replacement;
+ if (!rdev) {
+ smp_rmb();
+ repl = 0;
+ rdev = conf->mirrors[dev].rdev;
+ }
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
- set_bit(R10BIO_WriteError, &r10_bio->state);
- dec_rdev = 0;
+ if (repl)
+ /* Never record new bad blocks to replacement,
+ * just fail it.
+ */
+ md_error(rdev->mddev, rdev);
+ else {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ dec_rdev = 0;
+ }
} else {
/*
* Set R10BIO_Uptodate in our master bio, so that
@@ -393,12 +441,15 @@ static void raid10_end_write_request(struct bio *bio, int error)
set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(conf->mirrors[dev].rdev,
+ if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
&first_bad, &bad_sectors)) {
bio_put(bio);
- r10_bio->devs[slot].bio = IO_MADE_GOOD;
+ if (repl)
+ r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+ else
+ r10_bio->devs[slot].bio = IO_MADE_GOOD;
dec_rdev = 0;
set_bit(R10BIO_MadeGood, &r10_bio->state);
}
@@ -414,7 +465,6 @@ static void raid10_end_write_request(struct bio *bio, int error)
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}
-
/*
* RAID10 layout manager
* As well as the chunksize and raid_disks count, there are two
@@ -562,14 +612,16 @@ static int raid10_mergeable_bvec(struct request_queue *q,
* FIXME: possibly should rethink readbalancing and do it differently
* depending on near_copies / far_copies geometry.
*/
-static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_sectors)
+static struct md_rdev *read_balance(struct r10conf *conf,
+ struct r10bio *r10_bio,
+ int *max_sectors)
{
const sector_t this_sector = r10_bio->sector;
int disk, slot;
int sectors = r10_bio->sectors;
int best_good_sectors;
sector_t new_distance, best_dist;
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *best_rdev;
int do_balance;
int best_slot;
@@ -578,6 +630,7 @@ static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_s
retry:
sectors = r10_bio->sectors;
best_slot = -1;
+ best_rdev = NULL;
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
@@ -599,10 +652,16 @@ retry:
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ rdev = rcu_dereference(conf->mirrors[disk].replacement);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+ r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (rdev == NULL)
continue;
- if (!test_bit(In_sync, &rdev->flags))
+ if (test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!test_bit(In_sync, &rdev->flags) &&
+ r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
continue;
dev_sector = r10_bio->devs[slot].addr;
@@ -627,6 +686,7 @@ retry:
if (good_sectors > best_good_sectors) {
best_good_sectors = good_sectors;
best_slot = slot;
+ best_rdev = rdev;
}
if (!do_balance)
/* Must read from here */
@@ -655,16 +715,15 @@ retry:
if (new_distance < best_dist) {
best_dist = new_distance;
best_slot = slot;
+ best_rdev = rdev;
}
}
- if (slot == conf->copies)
+ if (slot >= conf->copies) {
slot = best_slot;
+ rdev = best_rdev;
+ }
if (slot >= 0) {
- disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
- if (!rdev)
- goto retry;
atomic_inc(&rdev->nr_pending);
if (test_bit(Faulty, &rdev->flags)) {
/* Cannot risk returning a device that failed
@@ -675,11 +734,11 @@ retry:
}
r10_bio->read_slot = slot;
} else
- disk = -1;
+ rdev = NULL;
rcu_read_unlock();
*max_sectors = best_good_sectors;
- return disk;
+ return rdev;
}
static int raid10_congested(void *data, int bits)
@@ -846,7 +905,6 @@ static void unfreeze_array(struct r10conf *conf)
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
- struct mirror_info *mirror;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
@@ -945,27 +1003,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/*
* read balancing logic:
*/
- int disk;
+ struct md_rdev *rdev;
int slot;
read_again:
- disk = read_balance(conf, r10_bio, &max_sectors);
- slot = r10_bio->read_slot;
- if (disk < 0) {
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (!rdev) {
raid_end_bio_io(r10_bio);
return;
}
- mirror = conf->mirrors + disk;
+ slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = read_bio;
+ r10_bio->devs[slot].rdev = rdev;
read_bio->bi_sector = r10_bio->devs[slot].addr +
- mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
+ rdev->data_offset;
+ read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
@@ -1025,6 +1083,7 @@ read_again:
*/
plugged = mddev_check_plugged(mddev);
+ r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
retry_write:
blocked_rdev = NULL;
@@ -1034,12 +1093,25 @@ retry_write:
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ struct md_rdev *rrdev = rcu_dereference(
+ conf->mirrors[d].replacement);
+ if (rdev == rrdev)
+ rrdev = NULL;
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
atomic_inc(&rdev->nr_pending);
blocked_rdev = rdev;
break;
}
+ if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
+ atomic_inc(&rrdev->nr_pending);
+ blocked_rdev = rrdev;
+ break;
+ }
+ if (rrdev && test_bit(Faulty, &rrdev->flags))
+ rrdev = NULL;
+
r10_bio->devs[i].bio = NULL;
+ r10_bio->devs[i].repl_bio = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
set_bit(R10BIO_Degraded, &r10_bio->state);
continue;
@@ -1088,6 +1160,10 @@ retry_write:
}
r10_bio->devs[i].bio = bio;
atomic_inc(&rdev->nr_pending);
+ if (rrdev) {
+ r10_bio->devs[i].repl_bio = bio;
+ atomic_inc(&rrdev->nr_pending);
+ }
}
rcu_read_unlock();
@@ -1096,11 +1172,23 @@ retry_write:
int j;
int d;
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
if (r10_bio->devs[j].bio) {
d = r10_bio->devs[j].devnum;
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
}
+ if (r10_bio->devs[j].repl_bio) {
+ struct md_rdev *rdev;
+ d = r10_bio->devs[j].devnum;
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ /* Race with remove_disk */
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+ rdev_dec_pending(rdev, mddev);
+ }
+ }
allow_barrier(conf);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
@@ -1147,6 +1235,31 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+
+ if (!r10_bio->devs[i].repl_bio)
+ continue;
+
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].repl_bio = mbio;
+
+ /* We are actively writing to the original device
+ * so it cannot disappear, so the replacement cannot
+ * become NULL here
+ */
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ conf->mirrors[d].replacement->data_offset);
+ mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua;
+ mbio->bi_private = r10_bio;
+
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1309,9 +1422,27 @@ static int raid10_spare_active(struct mddev *mddev)
*/
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i;
- if (tmp->rdev
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+ if (tmp->replacement
+ && tmp->replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->replacement->flags)
+ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ /* Replacement has just become active */
+ if (!tmp->rdev
+ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ count++;
+ if (tmp->rdev) {
+ /* Replaced device not technically faulty,
+ * but we need to be sure it gets removed
+ * and never re-added.
+ */
+ set_bit(Faulty, &tmp->rdev->flags);
+ sysfs_notify_dirent_safe(
+ tmp->rdev->sysfs_state);
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
@@ -1353,8 +1484,25 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct mirror_info *p = &conf->mirrors[mirror];
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
- if (p->rdev)
- continue;
+ if (p->rdev) {
+ if (!test_bit(WantReplacement, &p->rdev->flags) ||
+ p->replacement != NULL)
+ continue;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = mirror;
+ err = 0;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ break;
+ }
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -1385,40 +1533,61 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
-static int raid10_remove_disk(struct mddev *mddev, int number)
+static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
- struct mirror_info *p = conf->mirrors+ number;
+ int number = rdev->raid_disk;
+ struct md_rdev **rdevp;
+ struct mirror_info *p = conf->mirrors + number;
print_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- /* Only remove faulty devices in recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != p->recovery_disabled &&
- enough(conf, -1)) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- }
- err = md_integrity_register(mddev);
+ if (rdev == p->rdev)
+ rdevp = &p->rdev;
+ else if (rdev == p->replacement)
+ rdevp = &p->replacement;
+ else
+ return 0;
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending)) {
+ err = -EBUSY;
+ goto abort;
}
+ /* Only remove faulty devices if recovery
+ * is not possible.
+ */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != p->recovery_disabled &&
+ (!p->replacement || p->replacement == rdev) &&
+ enough(conf, -1)) {
+ err = -EBUSY;
+ goto abort;
+ }
+ *rdevp = NULL;
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ goto abort;
+ } else if (p->replacement) {
+ /* We must have just cleared 'rdev' */
+ p->rdev = p->replacement;
+ clear_bit(Replacement, &p->replacement->flags);
+ smp_mb(); /* Make sure other CPUs may see both as identical
+ * but will never see neither -- if they are careful.
+ */
+ p->replacement = NULL;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ /* We might have just remove the Replacement as faulty
+ * Clear the flag just in case
+ */
+ clear_bit(WantReplacement, &rdev->flags);
+
+ err = md_integrity_register(mddev);
+
abort:
print_conf(conf);
@@ -1432,7 +1601,7 @@ static void end_sync_read(struct bio *bio, int error)
struct r10conf *conf = r10_bio->mddev->private;
int d;
- d = find_bio_disk(conf, r10_bio, bio, NULL);
+ d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -1493,19 +1662,34 @@ static void end_sync_write(struct bio *bio, int error)
sector_t first_bad;
int bad_sectors;
int slot;
-
- d = find_bio_disk(conf, r10_bio, bio, &slot);
+ int repl;
+ struct md_rdev *rdev = NULL;
+
+ d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
- set_bit(R10BIO_WriteError, &r10_bio->state);
- } else if (is_badblock(conf->mirrors[d].rdev,
+ if (repl)
+ md_error(mddev, rdev);
+ else {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ }
+ } else if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
&first_bad, &bad_sectors))
set_bit(R10BIO_MadeGood, &r10_bio->state);
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+ rdev_dec_pending(rdev, mddev);
end_sync_request(r10_bio);
}
@@ -1609,6 +1793,29 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
generic_make_request(tbio);
}
+ /* Now write out to any replacement devices
+ * that are active
+ */
+ for (i = 0; i < conf->copies; i++) {
+ int j, d;
+ int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
+
+ tbio = r10_bio->devs[i].repl_bio;
+ if (!tbio || !tbio->bi_end_io)
+ continue;
+ if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
+ && r10_bio->devs[i].bio != fbio)
+ for (j = 0; j < vcnt; j++)
+ memcpy(page_address(tbio->bi_io_vec[j].bv_page),
+ page_address(fbio->bi_io_vec[j].bv_page),
+ PAGE_SIZE);
+ d = r10_bio->devs[i].devnum;
+ atomic_inc(&r10_bio->remaining);
+ md_sync_acct(conf->mirrors[d].replacement->bdev,
+ tbio->bi_size >> 9);
+ generic_make_request(tbio);
+ }
+
done:
if (atomic_dec_and_test(&r10_bio->remaining)) {
md_done_sync(mddev, r10_bio->sectors, 1);
@@ -1668,8 +1875,13 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
s << 9,
bio->bi_io_vec[idx].bv_page,
WRITE, false);
- if (!ok)
+ if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ }
}
if (!ok) {
/* We don't worry if we cannot set a bad block -
@@ -1709,7 +1921,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int d;
- struct bio *wbio;
+ struct bio *wbio, *wbio2;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
fix_recovery_read_error(r10_bio);
@@ -1721,12 +1933,20 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* share the pages with the first bio
* and submit the write request
*/
- wbio = r10_bio->devs[1].bio;
d = r10_bio->devs[1].devnum;
-
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
- generic_make_request(wbio);
+ wbio = r10_bio->devs[1].bio;
+ wbio2 = r10_bio->devs[1].repl_bio;
+ if (wbio->bi_end_io) {
+ atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+ generic_make_request(wbio);
+ }
+ if (wbio2 && wbio2->bi_end_io) {
+ atomic_inc(&conf->mirrors[d].replacement->nr_pending);
+ md_sync_acct(conf->mirrors[d].replacement->bdev,
+ wbio2->bi_size >> 9);
+ generic_make_request(wbio2);
+ }
}
@@ -1779,8 +1999,12 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE)
+ if (rw == WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ }
/* need to record an error - either for the block or the device */
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
md_error(rdev->mddev, rdev);
@@ -2060,10 +2284,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
{
int slot = r10_bio->read_slot;
- int mirror = r10_bio->devs[slot].devnum;
struct bio *bio;
struct r10conf *conf = mddev->private;
- struct md_rdev *rdev;
+ struct md_rdev *rdev = r10_bio->devs[slot].rdev;
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
@@ -2081,15 +2304,15 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
}
- rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+ rdev_dec_pending(rdev, mddev);
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
r10_bio->devs[slot].bio =
mddev->ro ? IO_BLOCKED : NULL;
read_more:
- mirror = read_balance(conf, r10_bio, &max_sectors);
- if (mirror == -1) {
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (rdev == NULL) {
printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
mdname(mddev), b,
@@ -2103,7 +2326,6 @@ read_more:
if (bio)
bio_put(bio);
slot = r10_bio->read_slot;
- rdev = conf->mirrors[mirror].rdev;
printk_ratelimited(
KERN_ERR
"md/raid10:%s: %s: redirecting"
@@ -2117,6 +2339,7 @@ read_more:
r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = bio;
+ r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -2187,6 +2410,22 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
r10_bio->sectors, 0))
md_error(conf->mddev, rdev);
}
+ rdev = conf->mirrors[dev].replacement;
+ if (r10_bio->devs[m].repl_bio == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE,
+ &r10_bio->devs[m].repl_bio->bi_flags)) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ } else {
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors, 0))
+ md_error(conf->mddev, rdev);
+ }
}
put_buf(r10_bio);
} else {
@@ -2209,6 +2448,15 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
}
rdev_dec_pending(rdev, conf->mddev);
}
+ bio = r10_bio->devs[m].repl_bio;
+ rdev = conf->mirrors[dev].replacement;
+ if (rdev && bio == IO_MADE_GOOD) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
@@ -2272,9 +2520,14 @@ static void raid10d(struct mddev *mddev)
static int init_resync(struct r10conf *conf)
{
int buffs;
+ int i;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
BUG_ON(conf->r10buf_pool);
+ conf->have_replacement = 0;
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].replacement)
+ conf->have_replacement = 1;
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
if (!conf->r10buf_pool)
return -ENOMEM;
@@ -2355,9 +2608,22 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_end_sync(mddev->bitmap, sect,
&sync_blocks, 1);
}
- } else /* completed sync */
+ } else {
+ /* completed sync */
+ if ((!mddev->bitmap || conf->fullsync)
+ && conf->have_replacement
+ && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ /* Completed a full sync so the replacements
+ * are now fully recovered.
+ */
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].replacement)
+ conf->mirrors[i].replacement
+ ->recovery_offset
+ = MaxSector;
+ }
conf->fullsync = 0;
-
+ }
bitmap_close_sync(mddev->bitmap);
close_sync(conf);
*skipped = 1;
@@ -2414,23 +2680,30 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
-
- if (conf->mirrors[i].rdev == NULL ||
- test_bit(In_sync, &conf->mirrors[i].rdev->flags))
+ struct mirror_info *mirror = &conf->mirrors[i];
+
+ if ((mirror->rdev == NULL ||
+ test_bit(In_sync, &mirror->rdev->flags))
+ &&
+ (mirror->replacement == NULL ||
+ test_bit(Faulty,
+ &mirror->replacement->flags)))
continue;
still_degraded = 0;
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
- /* Unless we are doing a full sync, we only need
- * to recover the block if it is set in the bitmap
+ /* Unless we are doing a full sync, or a replacement
+ * we only need to recover the block if it is set in
+ * the bitmap
*/
must_sync = bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, 1);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
+ mirror->replacement == NULL &&
!conf->fullsync) {
/* yep, skip the sync_blocks here, but don't assume
* that there will never be anything to do here
@@ -2500,33 +2773,60 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr +
- conf->mirrors[d].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[d].rdev->bdev;
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- atomic_inc(&r10_bio->remaining);
- /* and we write to 'i' */
+ bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&rdev->nr_pending);
+ /* and we write to 'i' (if not in_sync) */
for (k=0; k<conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
BUG_ON(k == conf->copies);
- bio = r10_bio->devs[1].bio;
- bio->bi_next = biolist;
- biolist = bio;
- bio->bi_private = r10_bio;
- bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
to_addr = r10_bio->devs[k].addr;
- bio->bi_sector = to_addr +
- conf->mirrors[i].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
r10_bio->devs[0].devnum = d;
r10_bio->devs[0].addr = from_addr;
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
+ rdev = mirror->rdev;
+ if (!test_bit(In_sync, &rdev->flags)) {
+ bio = r10_bio->devs[1].bio;
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = to_addr
+ + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&r10_bio->remaining);
+ } else
+ r10_bio->devs[1].bio->bi_end_io = NULL;
+
+ /* and maybe write to replacement */
+ bio = r10_bio->devs[1].repl_bio;
+ if (bio)
+ bio->bi_end_io = NULL;
+ rdev = mirror->replacement;
+ /* Note: if rdev != NULL, then bio
+ * cannot be NULL as r10buf_pool_alloc will
+ * have allocated it.
+ * So the second test here is pointless.
+ * But it keeps semantic-checkers happy, and
+ * this comment keeps human reviewers
+ * happy.
+ */
+ if (rdev == NULL || bio == NULL ||
+ test_bit(Faulty, &rdev->flags))
+ break;
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&r10_bio->remaining);
break;
}
if (j == conf->copies) {
@@ -2544,8 +2844,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
- if (!rdev_set_badblocks(
- conf->mirrors[i].rdev,
+ if (!test_bit(In_sync,
+ &mirror->rdev->flags)
+ && !rdev_set_badblocks(
+ mirror->rdev,
+ r10_bio->devs[k].addr,
+ max_sync, 0))
+ any_working = 0;
+ if (mirror->replacement &&
+ !rdev_set_badblocks(
+ mirror->replacement,
r10_bio->devs[k].addr,
max_sync, 0))
any_working = 0;
@@ -2556,7 +2864,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
printk(KERN_INFO "md/raid10:%s: insufficient "
"working devices for recovery.\n",
mdname(mddev));
- conf->mirrors[i].recovery_disabled
+ mirror->recovery_disabled
= mddev->recovery_disabled;
}
break;
@@ -2605,6 +2913,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t first_bad, sector;
int bad_sectors;
+ if (r10_bio->devs[i].repl_bio)
+ r10_bio->devs[i].repl_bio->bi_end_io = NULL;
+
bio = r10_bio->devs[i].bio;
bio->bi_end_io = NULL;
clear_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -2635,6 +2946,27 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
+
+ if (conf->mirrors[d].replacement == NULL ||
+ test_bit(Faulty,
+ &conf->mirrors[d].replacement->flags))
+ continue;
+
+ /* Need to set up for writing to the replacement */
+ bio = r10_bio->devs[i].repl_bio;
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ sector = r10_bio->devs[i].addr;
+ atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = sector +
+ conf->mirrors[d].replacement->data_offset;
+ bio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ count++;
}
if (count < 2) {
@@ -2643,6 +2975,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (r10_bio->devs[i].bio->bi_end_io)
rdev_dec_pending(conf->mirrors[d].rdev,
mddev);
+ if (r10_bio->devs[i].repl_bio &&
+ r10_bio->devs[i].repl_bio->bi_end_io)
+ rdev_dec_pending(
+ conf->mirrors[d].replacement,
+ mddev);
}
put_buf(r10_bio);
biolist = NULL;
@@ -2896,6 +3233,16 @@ static int run(struct mddev *mddev)
continue;
disk = conf->mirrors + disk_idx;
+ if (test_bit(Replacement, &rdev->flags)) {
+ if (disk->replacement)
+ goto out_free_conf;
+ disk->replacement = rdev;
+ } else {
+ if (disk->rdev)
+ goto out_free_conf;
+ disk->rdev = rdev;
+ }
+
disk->rdev = rdev;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -2923,6 +3270,13 @@ static int run(struct mddev *mddev)
disk = conf->mirrors + i;
+ if (!disk->rdev && disk->replacement) {
+ /* The replacement is all we have - use it */
+ disk->rdev = disk->replacement;
+ disk->replacement = NULL;
+ clear_bit(Replacement, &disk->rdev->flags);
+ }
+
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 7facfdf841f4..7c615613c381 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -2,7 +2,7 @@
#define _RAID10_H
struct mirror_info {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *replacement;
sector_t head_position;
int recovery_disabled; /* matches
* mddev->recovery_disabled
@@ -18,12 +18,13 @@ struct r10conf {
spinlock_t device_lock;
/* geometry */
- int near_copies; /* number of copies laid out raid0 style */
+ int near_copies; /* number of copies laid out
+ * raid0 style */
int far_copies; /* number of copies laid out
* at large strides across drives
*/
- int far_offset; /* far_copies are offset by 1 stripe
- * instead of many
+ int far_offset; /* far_copies are offset by 1
+ * stripe instead of many
*/
int copies; /* near_copies * far_copies.
* must be <= raid_disks
@@ -34,10 +35,11 @@ struct r10conf {
* 1 stripe.
*/
- sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+ sector_t dev_sectors; /* temp copy of
+ * mddev->dev_sectors */
- int chunk_shift; /* shift from chunks to sectors */
- sector_t chunk_mask;
+ int chunk_shift; /* shift from chunks to sectors */
+ sector_t chunk_mask;
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
@@ -45,20 +47,22 @@ struct r10conf {
int pending_count;
spinlock_t resync_lock;
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending;
+ int nr_waiting;
+ int nr_queued;
+ int barrier;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
* (fresh device added).
* Cleared when a sync completes.
*/
-
+ int have_replacement; /* There is at least one
+ * replacement device.
+ */
wait_queue_head_t wait_barrier;
- mempool_t *r10bio_pool;
- mempool_t *r10buf_pool;
+ mempool_t *r10bio_pool;
+ mempool_t *r10buf_pool;
struct page *tmppage;
/* When taking over an array from a different personality, we store
@@ -98,11 +102,18 @@ struct r10bio {
* When resyncing we also use one for each copy.
* When reconstructing, we use 2 bios, one for read, one for write.
* We choose the number when they are allocated.
+ * We sometimes need an extra bio to write to the replacement.
*/
struct {
- struct bio *bio;
- sector_t addr;
- int devnum;
+ struct bio *bio;
+ union {
+ struct bio *repl_bio; /* used for resync and
+ * writes */
+ struct md_rdev *rdev; /* used for reads
+ * (read_slot >= 0) */
+ };
+ sector_t addr;
+ int devnum;
} devs[0];
};
@@ -121,17 +132,19 @@ struct r10bio {
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r10bio.state */
-#define R10BIO_Uptodate 0
-#define R10BIO_IsSync 1
-#define R10BIO_IsRecover 2
-#define R10BIO_Degraded 3
+enum r10bio_state {
+ R10BIO_Uptodate,
+ R10BIO_IsSync,
+ R10BIO_IsRecover,
+ R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/
-#define R10BIO_ReadError 4
+ R10BIO_ReadError,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
-#define R10BIO_MadeGood 5
-#define R10BIO_WriteError 6
+ R10BIO_MadeGood,
+ R10BIO_WriteError,
+};
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e26092178..360f2b98f62b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -370,12 +370,10 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
-static int has_failed(struct r5conf *conf)
+static int calc_degraded(struct r5conf *conf)
{
- int degraded;
+ int degraded, degraded2;
int i;
- if (conf->mddev->reshape_position == MaxSector)
- return conf->mddev->degraded > conf->max_degraded;
rcu_read_lock();
degraded = 0;
@@ -399,14 +397,14 @@ static int has_failed(struct r5conf *conf)
degraded++;
}
rcu_read_unlock();
- if (degraded > conf->max_degraded)
- return 1;
+ if (conf->raid_disks == conf->previous_raid_disks)
+ return degraded;
rcu_read_lock();
- degraded = 0;
+ degraded2 = 0;
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || test_bit(Faulty, &rdev->flags))
- degraded++;
+ degraded2++;
else if (test_bit(In_sync, &rdev->flags))
;
else
@@ -416,9 +414,22 @@ static int has_failed(struct r5conf *conf)
* almost certainly hasn't.
*/
if (conf->raid_disks <= conf->previous_raid_disks)
- degraded++;
+ degraded2++;
}
rcu_read_unlock();
+ if (degraded2 > degraded)
+ return degraded2;
+ return degraded;
+}
+
+static int has_failed(struct r5conf *conf)
+{
+ int degraded;
+
+ if (conf->mddev->reshape_position == MaxSector)
+ return conf->mddev->degraded > conf->max_degraded;
+
+ degraded = calc_degraded(conf);
if (degraded > conf->max_degraded)
return 1;
return 0;
@@ -492,8 +503,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
for (i = disks; i--; ) {
int rw;
- struct bio *bi;
- struct md_rdev *rdev;
+ int replace_only = 0;
+ struct bio *bi, *rbi;
+ struct md_rdev *rdev, *rrdev = NULL;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
@@ -501,27 +513,57 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rw = WRITE;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
rw = READ;
- else
+ else if (test_and_clear_bit(R5_WantReplace,
+ &sh->dev[i].flags)) {
+ rw = WRITE;
+ replace_only = 1;
+ } else
continue;
bi = &sh->dev[i].req;
+ rbi = &sh->dev[i].rreq; /* For writing to replacement */
bi->bi_rw = rw;
- if (rw & WRITE)
+ rbi->bi_rw = rw;
+ if (rw & WRITE) {
bi->bi_end_io = raid5_end_write_request;
- else
+ rbi->bi_end_io = raid5_end_write_request;
+ } else
bi->bi_end_io = raid5_end_read_request;
rcu_read_lock();
+ rrdev = rcu_dereference(conf->disks[i].replacement);
+ smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (!rdev) {
+ rdev = rrdev;
+ rrdev = NULL;
+ }
+ if (rw & WRITE) {
+ if (replace_only)
+ rdev = NULL;
+ if (rdev == rrdev)
+ /* We raced and saw duplicates */
+ rrdev = NULL;
+ } else {
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+ rdev = rrdev;
+ rrdev = NULL;
+ }
+
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
+ if (rrdev && test_bit(Faulty, &rrdev->flags))
+ rrdev = NULL;
+ if (rrdev)
+ atomic_inc(&rrdev->nr_pending);
rcu_read_unlock();
/* We have already checked bad blocks for reads. Now
- * need to check for writes.
+ * need to check for writes. We never accept write errors
+ * on the replacement, so we don't to check rrdev.
*/
while ((rw & WRITE) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -551,7 +593,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
}
if (rdev) {
- if (s->syncing || s->expanding || s->expanded)
+ if (s->syncing || s->expanding || s->expanded
+ || s->replacing)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
set_bit(STRIPE_IO_STARTED, &sh->state);
@@ -563,16 +606,38 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset;
bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
+ if (rrdev)
+ set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
generic_make_request(bi);
- } else {
+ }
+ if (rrdev) {
+ if (s->syncing || s->expanding || s->expanded
+ || s->replacing)
+ md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
+
+ set_bit(STRIPE_IO_STARTED, &sh->state);
+
+ rbi->bi_bdev = rrdev->bdev;
+ pr_debug("%s: for %llu schedule op %ld on "
+ "replacement disc %d\n",
+ __func__, (unsigned long long)sh->sector,
+ rbi->bi_rw, i);
+ atomic_inc(&sh->count);
+ rbi->bi_sector = sh->sector + rrdev->data_offset;
+ rbi->bi_flags = 1 << BIO_UPTODATE;
+ rbi->bi_idx = 0;
+ rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ rbi->bi_io_vec[0].bv_offset = 0;
+ rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_next = NULL;
+ generic_make_request(rbi);
+ }
+ if (!rdev && !rrdev) {
if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
@@ -1583,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL;
for (i=0 ; i<disks; i++)
@@ -1597,11 +1662,23 @@ static void raid5_end_read_request(struct bio * bi, int error)
BUG();
return;
}
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ /* If replacement finished while this request was outstanding,
+ * 'replacement' might be NULL already.
+ * In that case it moved down to 'rdev'.
+ * rdev is not removed until all requests are finished.
+ */
+ rdev = conf->disks[i].replacement;
+ if (!rdev)
+ rdev = conf->disks[i].rdev;
if (uptodate) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- rdev = conf->disks[i].rdev;
+ /* Note that this cannot happen on a
+ * replacement device. We just fail those on
+ * any error
+ */
printk_ratelimited(
KERN_INFO
"md/raid:%s: read error corrected"
@@ -1614,16 +1691,24 @@ static void raid5_end_read_request(struct bio * bi, int error)
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
- if (atomic_read(&conf->disks[i].rdev->read_errors))
- atomic_set(&conf->disks[i].rdev->read_errors, 0);
+ if (atomic_read(&rdev->read_errors))
+ atomic_set(&rdev->read_errors, 0);
} else {
- const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
+ const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
- rdev = conf->disks[i].rdev;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
- if (conf->mddev->degraded >= conf->max_degraded)
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error on replacement device "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
+ else if (conf->mddev->degraded >= conf->max_degraded)
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
@@ -1657,7 +1742,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
md_error(conf->mddev, rdev);
}
}
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
@@ -1668,14 +1753,30 @@ static void raid5_end_write_request(struct bio *bi, int error)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
+ struct md_rdev *uninitialized_var(rdev);
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
sector_t first_bad;
int bad_sectors;
+ int replacement = 0;
- for (i=0 ; i<disks; i++)
- if (bi == &sh->dev[i].req)
+ for (i = 0 ; i < disks; i++) {
+ if (bi == &sh->dev[i].req) {
+ rdev = conf->disks[i].rdev;
break;
-
+ }
+ if (bi == &sh->dev[i].rreq) {
+ rdev = conf->disks[i].replacement;
+ if (rdev)
+ replacement = 1;
+ else
+ /* rdev was removed and 'replacement'
+ * replaced it. rdev is not removed
+ * until all requests are finished.
+ */
+ rdev = conf->disks[i].rdev;
+ break;
+ }
+ }
pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
@@ -1684,21 +1785,33 @@ static void raid5_end_write_request(struct bio *bi, int error)
return;
}
- if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
- set_bit(R5_WriteError, &sh->dev[i].flags);
- } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
- &first_bad, &bad_sectors))
- set_bit(R5_MadeGood, &sh->dev[i].flags);
+ if (replacement) {
+ if (!uptodate)
+ md_error(conf->mddev, rdev);
+ else if (is_badblock(rdev, sh->sector,
+ STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ } else {
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ } else if (is_badblock(rdev, sh->sector,
+ STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGood, &sh->dev[i].flags);
+ }
+ rdev_dec_pending(rdev, conf->mddev);
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
-
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
-
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
@@ -1709,12 +1822,15 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
dev->req.bi_io_vec = &dev->vec;
dev->req.bi_vcnt++;
dev->req.bi_max_vecs++;
+ dev->req.bi_private = sh;
dev->vec.bv_page = dev->page;
- dev->vec.bv_len = STRIPE_SIZE;
- dev->vec.bv_offset = 0;
- dev->req.bi_sector = sh->sector;
- dev->req.bi_private = sh;
+ bio_init(&dev->rreq);
+ dev->rreq.bi_io_vec = &dev->rvec;
+ dev->rreq.bi_vcnt++;
+ dev->rreq.bi_max_vecs++;
+ dev->rreq.bi_private = sh;
+ dev->rvec.bv_page = dev->page;
dev->flags = 0;
dev->sector = compute_blocknr(sh, i, previous);
@@ -1724,18 +1840,15 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r5conf *conf = mddev->private;
+ unsigned long flags;
pr_debug("raid456: error called\n");
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- /*
- * if recovery was running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- }
+ spin_lock_irqsave(&conf->device_lock, flags);
+ clear_bit(In_sync, &rdev->flags);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -2362,8 +2475,9 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
clear_bit(STRIPE_SYNCING, &sh->state);
s->syncing = 0;
+ s->replacing = 0;
/* There is nothing more to do for sync/check/repair.
- * For recover we need to record a bad block on all
+ * For recover/replace we need to record a bad block on all
* non-sync devices, or abort the recovery
*/
if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
@@ -2373,12 +2487,18 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
*/
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->disks[i].rdev;
- if (!rdev
- || test_bit(Faulty, &rdev->flags)
- || test_bit(In_sync, &rdev->flags))
- continue;
- if (!rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ rdev = conf->disks[i].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
abort = 1;
}
if (abort) {
@@ -2387,6 +2507,22 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
}
}
+static int want_replace(struct stripe_head *sh, int disk_idx)
+{
+ struct md_rdev *rdev;
+ int rv = 0;
+ /* Doing recovery so rcu locking not required */
+ rdev = sh->raid_conf->disks[disk_idx].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && (rdev->recovery_offset <= sh->sector
+ || rdev->mddev->recovery_cp <= sh->sector))
+ rv = 1;
+
+ return rv;
+}
+
/* fetch_block - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
@@ -2406,6 +2542,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
+ (s->replacing && want_replace(sh, disk_idx)) ||
(s->failed >= 1 && fdev[0]->toread) ||
(s->failed >= 2 && fdev[1]->toread) ||
(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
@@ -2959,22 +3096,18 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
}
}
-
/*
* handle_stripe - do things to a stripe.
*
- * We lock the stripe and then examine the state of various bits
- * to see what needs to be done.
+ * We lock the stripe by setting STRIPE_ACTIVE and then examine the
+ * state of various bits to see what needs to be done.
* Possible results:
- * return some read request which now have data
- * return some write requests which are safely on disc
+ * return some read requests which now have data
+ * return some write requests which are safely on storage
* schedule a read on some buffers
* schedule a write of some buffers
* return confirmation of parity correctness
*
- * buffers are taken off read_list or write_list, and bh_cache buffers
- * get BH_Lock set before the stripe lock is released.
- *
*/
static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
@@ -2983,10 +3116,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
int disks = sh->disks;
struct r5dev *dev;
int i;
+ int do_recovery = 0;
memset(s, 0, sizeof(*s));
- s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
s->failed_num[0] = -1;
@@ -3004,7 +3137,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
dev = &sh->dev[i];
pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
+ i, dev->flags,
+ dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read
*
* new wantfill requests are only permitted while
@@ -3035,7 +3169,23 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
if (dev->written)
s->written++;
- rdev = rcu_dereference(conf->disks[i].rdev);
+ /* Prefer to use the replacement for reads, but only
+ * if it is recovered enough and has no bad blocks.
+ */
+ rdev = rcu_dereference(conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags) &&
+ rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
+ !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_ReadRepl, &dev->flags);
+ else {
+ if (rdev)
+ set_bit(R5_NeedReplace, &dev->flags);
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ clear_bit(R5_ReadRepl, &dev->flags);
+ }
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
@@ -3063,26 +3213,50 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
- if (test_bit(R5_WriteError, &dev->flags)) {
- clear_bit(R5_Insync, &dev->flags);
- if (!test_bit(Faulty, &rdev->flags)) {
+ set_bit(R5_Insync, &dev->flags);
+ else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Expanded, &dev->flags))
+ /* If we've reshaped into here, we assume it is Insync.
+ * We will shortly update recovery_offset to make
+ * it official.
+ */
+ set_bit(R5_Insync, &dev->flags);
+
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].rdev);
+ if (rdev2 == rdev)
+ clear_bit(R5_Insync, &dev->flags);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
- atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev2->nr_pending);
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
- if (!test_bit(Faulty, &rdev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].rdev);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
- atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev2->nr_pending);
} else
clear_bit(R5_MadeGood, &dev->flags);
}
+ if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev2->nr_pending);
+ } else
+ clear_bit(R5_MadeGoodRepl, &dev->flags);
+ }
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
@@ -3094,9 +3268,25 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (s->failed < 2)
s->failed_num[s->failed] = i;
s->failed++;
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
}
}
spin_unlock_irq(&conf->device_lock);
+ if (test_bit(STRIPE_SYNCING, &sh->state)) {
+ /* If there is a failed device being replaced,
+ * we must be recovering.
+ * else if we are after recovery_cp, we must be syncing
+ * else we can only be replacing
+ * sync and recovery both need to read all devices, and so
+ * use the same flag.
+ */
+ if (do_recovery ||
+ sh->sector >= conf->mddev->recovery_cp)
+ s->syncing = 1;
+ else
+ s->replacing = 1;
+ }
rcu_read_unlock();
}
@@ -3138,7 +3328,7 @@ static void handle_stripe(struct stripe_head *sh)
if (unlikely(s.blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
- s.to_write || s.written) {
+ s.replacing || s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
goto finish;
}
@@ -3164,7 +3354,7 @@ static void handle_stripe(struct stripe_head *sh)
sh->reconstruct_state = 0;
if (s.to_read+s.to_write+s.written)
handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
- if (s.syncing)
+ if (s.syncing + s.replacing)
handle_failed_sync(conf, sh, &s);
}
@@ -3195,7 +3385,9 @@ static void handle_stripe(struct stripe_head *sh)
*/
if (s.to_read || s.non_overwrite
|| (conf->level == 6 && s.to_write && s.failed)
- || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
handle_stripe_fill(sh, &s, disks);
/* Now we check to see if any write operations have recently
@@ -3257,7 +3449,20 @@ static void handle_stripe(struct stripe_head *sh)
handle_parity_checks5(conf, sh, &s, disks);
}
- if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+ if (s.replacing && s.locked == 0
+ && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ /* Write out to replacement devices where possible */
+ for (i = 0; i < conf->raid_disks; i++)
+ if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
+ test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ set_bit(R5_WantReplace, &sh->dev[i].flags);
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ s.locked++;
+ }
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+ if ((s.syncing || s.replacing) && s.locked == 0 &&
+ test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
@@ -3355,6 +3560,15 @@ finish:
STRIPE_SECTORS);
rdev_dec_pending(rdev, conf->mddev);
}
+ if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
+ rdev = conf->disks[i].replacement;
+ if (!rdev)
+ /* rdev have been moved down */
+ rdev = conf->disks[i].rdev;
+ rdev_clear_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
if (s.ops_request)
@@ -3578,6 +3792,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
int dd_idx;
struct bio* align_bi;
struct md_rdev *rdev;
+ sector_t end_sector;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("chunk_aligned_read : non aligned\n");
@@ -3602,9 +3817,19 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
0,
&dd_idx, NULL);
+ end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
rcu_read_lock();
- rdev = rcu_dereference(conf->disks[dd_idx].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags)) {
+ rdev = rcu_dereference(conf->disks[dd_idx].replacement);
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ rdev->recovery_offset < end_sector) {
+ rdev = rcu_dereference(conf->disks[dd_idx].rdev);
+ if (rdev &&
+ (test_bit(Faulty, &rdev->flags) ||
+ !(test_bit(In_sync, &rdev->flags) ||
+ rdev->recovery_offset >= end_sector)))
+ rdev = NULL;
+ }
+ if (rdev) {
sector_t first_bad;
int bad_sectors;
@@ -4129,7 +4354,6 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
-
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
@@ -4200,7 +4424,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
raid5_set_bi_hw_segments(raid_bio, scnt);
@@ -4627,7 +4850,15 @@ static struct r5conf *setup_conf(struct mddev *mddev)
continue;
disk = conf->disks + raid_disk;
- disk->rdev = rdev;
+ if (test_bit(Replacement, &rdev->flags)) {
+ if (disk->replacement)
+ goto abort;
+ disk->replacement = rdev;
+ } else {
+ if (disk->rdev)
+ goto abort;
+ disk->rdev = rdev;
+ }
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
@@ -4716,6 +4947,7 @@ static int run(struct mddev *mddev)
int dirty_parity_disks = 0;
struct md_rdev *rdev;
sector_t reshape_offset = 0;
+ int i;
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid:%s: not clean"
@@ -4805,12 +5037,25 @@ static int run(struct mddev *mddev)
conf->thread = NULL;
mddev->private = conf;
- /*
- * 0 for a fully functional array, 1 or 2 for a degraded array.
- */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk < 0)
+ for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
+ i++) {
+ rdev = conf->disks[i].rdev;
+ if (!rdev && conf->disks[i].replacement) {
+ /* The replacement is all we have yet */
+ rdev = conf->disks[i].replacement;
+ conf->disks[i].replacement = NULL;
+ clear_bit(Replacement, &rdev->flags);
+ conf->disks[i].rdev = rdev;
+ }
+ if (!rdev)
continue;
+ if (conf->disks[i].replacement &&
+ conf->reshape_progress != MaxSector) {
+ /* replacements and reshape simply do not mix. */
+ printk(KERN_ERR "md: cannot handle concurrent "
+ "replacement and reshape.\n");
+ goto abort;
+ }
if (test_bit(In_sync, &rdev->flags)) {
working_disks++;
continue;
@@ -4844,8 +5089,10 @@ static int run(struct mddev *mddev)
dirty_parity_disks++;
}
- mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
- - working_disks);
+ /*
+ * 0 for a fully functional array, 1 or 2 for a degraded array.
+ */
+ mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
printk(KERN_ERR "md/raid:%s: not enough operational devices"
@@ -5008,7 +5255,25 @@ static int raid5_spare_active(struct mddev *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
- if (tmp->rdev
+ if (tmp->replacement
+ && tmp->replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->replacement->flags)
+ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ /* Replacement has just become active. */
+ if (!tmp->rdev
+ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ count++;
+ if (tmp->rdev) {
+ /* Replaced device not technically faulty,
+ * but we need to be sure it gets removed
+ * and never re-added.
+ */
+ set_bit(Faulty, &tmp->rdev->flags);
+ sysfs_notify_dirent_safe(
+ tmp->rdev->sysfs_state);
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
@@ -5017,49 +5282,68 @@ static int raid5_spare_active(struct mddev *mddev)
}
}
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded -= count;
+ mddev->degraded = calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
return count;
}
-static int raid5_remove_disk(struct mddev *mddev, int number)
+static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
+ struct md_rdev **rdevp;
struct disk_info *p = conf->disks + number;
print_raid5_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (number >= conf->raid_disks &&
- conf->reshape_progress == MaxSector)
- clear_bit(In_sync, &rdev->flags);
+ if (rdev == p->rdev)
+ rdevp = &p->rdev;
+ else if (rdev == p->replacement)
+ rdevp = &p->replacement;
+ else
+ return 0;
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- /* Only remove non-faulty devices if recovery
- * isn't possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- !has_failed(conf) &&
- number < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- }
+ if (number >= conf->raid_disks &&
+ conf->reshape_progress == MaxSector)
+ clear_bit(In_sync, &rdev->flags);
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending)) {
+ err = -EBUSY;
+ goto abort;
}
+ /* Only remove non-faulty devices if recovery
+ * isn't possible.
+ */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
+ !has_failed(conf) &&
+ (!p->replacement || p->replacement == rdev) &&
+ number < conf->raid_disks) {
+ err = -EBUSY;
+ goto abort;
+ }
+ *rdevp = NULL;
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ } else if (p->replacement) {
+ /* We must have just cleared 'rdev' */
+ p->rdev = p->replacement;
+ clear_bit(Replacement, &p->replacement->flags);
+ smp_mb(); /* Make sure other CPUs may see both as identical
+ * but will never see neither - if they are careful
+ */
+ p->replacement = NULL;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ /* We might have just removed the Replacement as faulty-
+ * clear the bit just in case
+ */
+ clear_bit(WantReplacement, &rdev->flags);
abort:
print_raid5_conf(conf);
@@ -5095,8 +5379,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk = rdev->saved_raid_disk;
else
disk = first;
- for ( ; disk <= last ; disk++)
- if ((p=conf->disks + disk)->rdev == NULL) {
+ for ( ; disk <= last ; disk++) {
+ p = conf->disks + disk;
+ if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
err = 0;
@@ -5105,6 +5390,17 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p->replacement == NULL) {
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = disk;
+ err = 0;
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ break;
+ }
+ }
print_raid5_conf(conf);
return err;
}
@@ -5278,8 +5574,7 @@ static int raid5_start_reshape(struct mddev *mddev)
* pre and post number of devices.
*/
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- - added_devices;
+ mddev->degraded = calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
mddev->raid_disks = conf->raid_disks;
@@ -5348,17 +5643,15 @@ static void raid5_finish_reshape(struct mddev *mddev)
revalidate_disk(mddev->gendisk);
} else {
int d;
- mddev->degraded = conf->raid_disks;
- for (d = 0; d < conf->raid_disks ; d++)
- if (conf->disks[d].rdev &&
- test_bit(In_sync,
- &conf->disks[d].rdev->flags))
- mddev->degraded--;
+ spin_lock_irq(&conf->device_lock);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irq(&conf->device_lock);
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
d++) {
struct md_rdev *rdev = conf->disks[d].rdev;
- if (rdev && raid5_remove_disk(mddev, d) == 0) {
+ if (rdev &&
+ raid5_remove_disk(mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index e10c5531f9c5..8d8e13934a48 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -27,7 +27,7 @@
* The possible state transitions are:
*
* Empty -> Want - on read or write to get old data for parity calc
- * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
+ * Empty -> Dirty - on compute_parity to satisfy write/sync request.
* Empty -> Clean - on compute_block when computing a block for failed drive
* Want -> Empty - on failed read
* Want -> Clean - on successful completion of read request
@@ -226,8 +226,11 @@ struct stripe_head {
#endif
} ops;
struct r5dev {
- struct bio req;
- struct bio_vec vec;
+ /* rreq and rvec are used for the replacement device when
+ * writing data to both devices.
+ */
+ struct bio req, rreq;
+ struct bio_vec vec, rvec;
struct page *page;
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
@@ -239,7 +242,13 @@ struct stripe_head {
* for handle_stripe.
*/
struct stripe_head_state {
- int syncing, expanding, expanded;
+ /* 'syncing' means that we need to read all devices, either
+ * to check/correct parity, or to reconstruct a missing device.
+ * 'replacing' means we are replacing one or more drives and
+ * the source is valid at this point so we don't need to
+ * read all devices, just the replacement targets.
+ */
+ int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
int failed_num[2];
@@ -252,38 +261,41 @@ struct stripe_head_state {
int handle_bad_blocks;
};
-/* Flags */
-#define R5_UPTODATE 0 /* page contains current data */
-#define R5_LOCKED 1 /* IO has been submitted on "req" */
-#define R5_OVERWRITE 2 /* towrite covers whole page */
+/* Flags for struct r5dev.flags */
+enum r5dev_flags {
+ R5_UPTODATE, /* page contains current data */
+ R5_LOCKED, /* IO has been submitted on "req" */
+ R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
+ R5_OVERWRITE, /* towrite covers whole page */
/* and some that are internal to handle_stripe */
-#define R5_Insync 3 /* rdev && rdev->in_sync at start */
-#define R5_Wantread 4 /* want to schedule a read */
-#define R5_Wantwrite 5
-#define R5_Overlap 7 /* There is a pending overlapping request on this block */
-#define R5_ReadError 8 /* seen a read error here recently */
-#define R5_ReWrite 9 /* have tried to over-write the readerror */
+ R5_Insync, /* rdev && rdev->in_sync at start */
+ R5_Wantread, /* want to schedule a read */
+ R5_Wantwrite,
+ R5_Overlap, /* There is a pending overlapping request
+ * on this block */
+ R5_ReadError, /* seen a read error here recently */
+ R5_ReWrite, /* have tried to over-write the readerror */
-#define R5_Expanded 10 /* This block now has post-expand data */
-#define R5_Wantcompute 11 /* compute_block in progress treat as
- * uptodate
- */
-#define R5_Wantfill 12 /* dev->toread contains a bio that needs
- * filling
- */
-#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
-#define R5_WantFUA 14 /* Write should be FUA */
-#define R5_WriteError 15 /* got a write error - need to record it */
-#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
-/*
- * Write method
- */
-#define RECONSTRUCT_WRITE 1
-#define READ_MODIFY_WRITE 2
-/* not a write method, but a compute_parity mode */
-#define CHECK_PARITY 3
-/* Additional compute_parity mode -- updates the parity w/o LOCKING */
-#define UPDATE_PARITY 4
+ R5_Expanded, /* This block now has post-expand data */
+ R5_Wantcompute, /* compute_block in progress treat as
+ * uptodate
+ */
+ R5_Wantfill, /* dev->toread contains a bio that needs
+ * filling
+ */
+ R5_Wantdrain, /* dev->towrite needs to be drained */
+ R5_WantFUA, /* Write should be FUA */
+ R5_WriteError, /* got a write error - need to record it */
+ R5_MadeGood, /* A bad block has been fixed by writing to it */
+ R5_ReadRepl, /* Will/did read from replacement rather than orig */
+ R5_MadeGoodRepl,/* A bad block on the replacement device has been
+ * fixed by writing to it */
+ R5_NeedReplace, /* This device has a replacement which is not
+ * up-to-date at this stripe. */
+ R5_WantReplace, /* We need to update the replacement, we have read
+ * data in, and now is a good time to write it out.
+ */
+};
/*
* Stripe state
@@ -311,13 +323,14 @@ enum {
/*
* Operation request flags
*/
-#define STRIPE_OP_BIOFILL 0
-#define STRIPE_OP_COMPUTE_BLK 1
-#define STRIPE_OP_PREXOR 2
-#define STRIPE_OP_BIODRAIN 3
-#define STRIPE_OP_RECONSTRUCT 4
-#define STRIPE_OP_CHECK 5
-
+enum {
+ STRIPE_OP_BIOFILL,
+ STRIPE_OP_COMPUTE_BLK,
+ STRIPE_OP_PREXOR,
+ STRIPE_OP_BIODRAIN,
+ STRIPE_OP_RECONSTRUCT,
+ STRIPE_OP_CHECK,
+};
/*
* Plugging:
*
@@ -344,13 +357,12 @@ enum {
struct disk_info {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *replacement;
};
struct r5conf {
struct hlist_head *stripe_hashtbl;
struct mddev *mddev;
- struct disk_info *spare;
int chunk_sectors;
int level, algorithm;
int max_degraded;
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 996302ae210e..4a6d5cef3964 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -26,7 +26,7 @@ config MEDIA_TUNER
select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
- select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE && EXPERIMENTAL
select MEDIA_TUNER_TEA5767 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE
@@ -116,6 +116,13 @@ config MEDIA_TUNER_MT2060
help
A driver for the silicon IF tuner MT2060 from Microtune.
+config MEDIA_TUNER_MT2063
+ tristate "Microtune MT2063 silicon IF tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ A driver for the silicon IF tuner MT2063 from Microtune.
+
config MEDIA_TUNER_MT2266
tristate "Microtune MT2266 silicon tuner"
depends on VIDEO_MEDIA && I2C
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index 196c12a55f9a..8295854ab94b 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
+obj-$(CONFIG_MEDIA_TUNER_MT2063) += mt2063.o
obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
obj-$(CONFIG_MEDIA_TUNER_MT2131) += mt2131.o
diff --git a/drivers/media/common/tuners/max2165.c b/drivers/media/common/tuners/max2165.c
index 9883617b7862..cb2c98fbad1b 100644
--- a/drivers/media/common/tuners/max2165.c
+++ b/drivers/media/common/tuners/max2165.c
@@ -151,7 +151,7 @@ static int max2165_set_bandwidth(struct max2165_priv *priv, u32 bw)
{
u8 val;
- if (bw == BANDWIDTH_8_MHZ)
+ if (bw == 8000000)
val = priv->bb_filter_8mhz_cfg;
else
val = priv->bb_filter_7mhz_cfg;
@@ -257,39 +257,28 @@ static void max2165_debug_status(struct max2165_priv *priv)
dprintk("VCO: %d, VCO Sub-band: %d, ADC: %d\n", vco, vco_sub_band, adc);
}
-static int max2165_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int max2165_set_params(struct dvb_frontend *fe)
{
struct max2165_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dprintk("%s() frequency=%d (Hz)\n", __func__, params->frequency);
- if (fe->ops.info.type == FE_ATSC) {
- return -EINVAL;
- } else if (fe->ops.info.type == FE_OFDM) {
- dprintk("%s() OFDM\n", __func__);
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
- return -EINVAL;
- case BANDWIDTH_7_MHZ:
- case BANDWIDTH_8_MHZ:
- priv->frequency = params->frequency;
- priv->bandwidth = params->u.ofdm.bandwidth;
- break;
- default:
- printk(KERN_ERR "MAX2165 bandwidth not set!\n");
- return -EINVAL;
- }
- } else {
- printk(KERN_ERR "MAX2165 modulation type not supported!\n");
+ switch (c->bandwidth_hz) {
+ case 7000000:
+ case 8000000:
+ priv->frequency = c->frequency;
+ break;
+ default:
+ printk(KERN_INFO "MAX2165: bandwidth %d Hz not supported.\n",
+ c->bandwidth_hz);
return -EINVAL;
}
- dprintk("%s() frequency=%d\n", __func__, priv->frequency);
+ dprintk("%s() frequency=%d\n", __func__, c->frequency);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- max2165_set_bandwidth(priv, priv->bandwidth);
+ max2165_set_bandwidth(priv, c->bandwidth_hz);
ret = max2165_set_rf(priv, priv->frequency);
mdelay(50);
max2165_debug_status(priv);
@@ -370,7 +359,7 @@ static int max2165_init(struct dvb_frontend *fe)
max2165_read_rom_table(priv);
- max2165_set_bandwidth(priv, BANDWIDTH_8_MHZ);
+ max2165_set_bandwidth(priv, 8000000);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
diff --git a/drivers/media/common/tuners/mc44s803.c b/drivers/media/common/tuners/mc44s803.c
index fe5c4b8d83ee..5ddce7e326f7 100644
--- a/drivers/media/common/tuners/mc44s803.c
+++ b/drivers/media/common/tuners/mc44s803.c
@@ -214,22 +214,22 @@ exit:
return err;
}
-static int mc44s803_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int mc44s803_set_params(struct dvb_frontend *fe)
{
struct mc44s803_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 r1, r2, n1, n2, lo1, lo2, freq, val;
int err;
- priv->frequency = params->frequency;
+ priv->frequency = c->frequency;
r1 = MC44S803_OSC / 1000000;
r2 = MC44S803_OSC / 100000;
- n1 = (params->frequency + MC44S803_IF1 + 500000) / 1000000;
+ n1 = (c->frequency + MC44S803_IF1 + 500000) / 1000000;
freq = MC44S803_OSC / r1 * n1;
lo1 = ((60 * n1) + (r1 / 2)) / r1;
- freq = freq - params->frequency;
+ freq = freq - c->frequency;
n2 = (freq - MC44S803_IF2 + 50000) / 100000;
lo2 = ((60 * n2) + (r2 / 2)) / r2;
diff --git a/drivers/media/common/tuners/mt2060.c b/drivers/media/common/tuners/mt2060.c
index 2d0e7689c6a2..13381de58a84 100644
--- a/drivers/media/common/tuners/mt2060.c
+++ b/drivers/media/common/tuners/mt2060.c
@@ -153,8 +153,9 @@ static int mt2060_spurcheck(u32 lo1,u32 lo2,u32 if2)
#define IF2 36150 // IF2 frequency = 36.150 MHz
#define FREF 16000 // Quartz oscillator 16 MHz
-static int mt2060_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int mt2060_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct mt2060_priv *priv;
int ret=0;
int i=0;
@@ -176,8 +177,7 @@ static int mt2060_set_params(struct dvb_frontend *fe, struct dvb_frontend_parame
mt2060_writeregs(priv,b,2);
- freq = params->frequency / 1000; // Hz -> kHz
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
+ freq = c->frequency / 1000; /* Hz -> kHz */
f_lo1 = freq + if1 * 1000;
f_lo1 = (f_lo1 / 250) * 250;
@@ -293,10 +293,9 @@ static int mt2060_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int mt2060_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+static int mt2060_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- struct mt2060_priv *priv = fe->tuner_priv;
- *bandwidth = priv->bandwidth;
+ *frequency = IF2 * 1000;
return 0;
}
@@ -356,7 +355,7 @@ static const struct dvb_tuner_ops mt2060_tuner_ops = {
.set_params = mt2060_set_params,
.get_frequency = mt2060_get_frequency,
- .get_bandwidth = mt2060_get_bandwidth
+ .get_if_frequency = mt2060_get_if_frequency,
};
/* This functions tries to identify a MT2060 tuner by reading the PART/REV register. This is hasty. */
diff --git a/drivers/media/common/tuners/mt2060_priv.h b/drivers/media/common/tuners/mt2060_priv.h
index 5eaccdefd0b0..2b60de6c707d 100644
--- a/drivers/media/common/tuners/mt2060_priv.h
+++ b/drivers/media/common/tuners/mt2060_priv.h
@@ -97,7 +97,6 @@ struct mt2060_priv {
struct i2c_adapter *i2c;
u32 frequency;
- u32 bandwidth;
u16 if1_freq;
u8 fmfreq;
};
diff --git a/drivers/media/common/tuners/mt2063.c b/drivers/media/common/tuners/mt2063.c
new file mode 100644
index 000000000000..c89af3cd5eba
--- /dev/null
+++ b/drivers/media/common/tuners/mt2063.c
@@ -0,0 +1,2307 @@
+/*
+ * Driver for mt2063 Micronas tuner
+ *
+ * Copyright (c) 2011 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This driver came from a driver originally written by:
+ * Henry Wang <Henry.wang@AzureWave.com>
+ * Made publicly available by Terratec, at:
+ * http://linux.terratec.de/files/TERRATEC_H7/20110323_TERRATEC_H7_Linux.tar.gz
+ * The original driver's license is GPL, as declared with MODULE_LICENSE()
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation under version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include "mt2063.h"
+
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Set Verbosity level");
+
+#define dprintk(level, fmt, arg...) do { \
+if (debug >= level) \
+ printk(KERN_DEBUG "mt2063 %s: " fmt, __func__, ## arg); \
+} while (0)
+
+
+/* positive error codes used internally */
+
+/* Info: Unavoidable LO-related spur may be present in the output */
+#define MT2063_SPUR_PRESENT_ERR (0x00800000)
+
+/* Info: Mask of bits used for # of LO-related spurs that were avoided during tuning */
+#define MT2063_SPUR_CNT_MASK (0x001f0000)
+#define MT2063_SPUR_SHIFT (16)
+
+/* Info: Upconverter frequency is out of range (may be reason for MT_UPC_UNLOCK) */
+#define MT2063_UPC_RANGE (0x04000000)
+
+/* Info: Downconverter frequency is out of range (may be reason for MT_DPC_UNLOCK) */
+#define MT2063_DNC_RANGE (0x08000000)
+
+/*
+ * Constant defining the version of the following structure
+ * and therefore the API for this code.
+ *
+ * When compiling the tuner driver, the preprocessor will
+ * check against this version number to make sure that
+ * it matches the version that the tuner driver knows about.
+ */
+
+/* DECT Frequency Avoidance */
+#define MT2063_DECT_AVOID_US_FREQS 0x00000001
+
+#define MT2063_DECT_AVOID_EURO_FREQS 0x00000002
+
+#define MT2063_EXCLUDE_US_DECT_FREQUENCIES(s) (((s) & MT2063_DECT_AVOID_US_FREQS) != 0)
+
+#define MT2063_EXCLUDE_EURO_DECT_FREQUENCIES(s) (((s) & MT2063_DECT_AVOID_EURO_FREQS) != 0)
+
+enum MT2063_DECT_Avoid_Type {
+ MT2063_NO_DECT_AVOIDANCE = 0, /* Do not create DECT exclusion zones. */
+ MT2063_AVOID_US_DECT = MT2063_DECT_AVOID_US_FREQS, /* Avoid US DECT frequencies. */
+ MT2063_AVOID_EURO_DECT = MT2063_DECT_AVOID_EURO_FREQS, /* Avoid European DECT frequencies. */
+ MT2063_AVOID_BOTH /* Avoid both regions. Not typically used. */
+};
+
+#define MT2063_MAX_ZONES 48
+
+struct MT2063_ExclZone_t {
+ u32 min_;
+ u32 max_;
+ struct MT2063_ExclZone_t *next_;
+};
+
+/*
+ * Structure of data needed for Spur Avoidance
+ */
+struct MT2063_AvoidSpursData_t {
+ u32 f_ref;
+ u32 f_in;
+ u32 f_LO1;
+ u32 f_if1_Center;
+ u32 f_if1_Request;
+ u32 f_if1_bw;
+ u32 f_LO2;
+ u32 f_out;
+ u32 f_out_bw;
+ u32 f_LO1_Step;
+ u32 f_LO2_Step;
+ u32 f_LO1_FracN_Avoid;
+ u32 f_LO2_FracN_Avoid;
+ u32 f_zif_bw;
+ u32 f_min_LO_Separation;
+ u32 maxH1;
+ u32 maxH2;
+ enum MT2063_DECT_Avoid_Type avoidDECT;
+ u32 bSpurPresent;
+ u32 bSpurAvoided;
+ u32 nSpursFound;
+ u32 nZones;
+ struct MT2063_ExclZone_t *freeZones;
+ struct MT2063_ExclZone_t *usedZones;
+ struct MT2063_ExclZone_t MT2063_ExclZones[MT2063_MAX_ZONES];
+};
+
+/*
+ * Parameter for function MT2063_SetPowerMask that specifies the power down
+ * of various sections of the MT2063.
+ */
+enum MT2063_Mask_Bits {
+ MT2063_REG_SD = 0x0040, /* Shutdown regulator */
+ MT2063_SRO_SD = 0x0020, /* Shutdown SRO */
+ MT2063_AFC_SD = 0x0010, /* Shutdown AFC A/D */
+ MT2063_PD_SD = 0x0002, /* Enable power detector shutdown */
+ MT2063_PDADC_SD = 0x0001, /* Enable power detector A/D shutdown */
+ MT2063_VCO_SD = 0x8000, /* Enable VCO shutdown */
+ MT2063_LTX_SD = 0x4000, /* Enable LTX shutdown */
+ MT2063_LT1_SD = 0x2000, /* Enable LT1 shutdown */
+ MT2063_LNA_SD = 0x1000, /* Enable LNA shutdown */
+ MT2063_UPC_SD = 0x0800, /* Enable upconverter shutdown */
+ MT2063_DNC_SD = 0x0400, /* Enable downconverter shutdown */
+ MT2063_VGA_SD = 0x0200, /* Enable VGA shutdown */
+ MT2063_AMP_SD = 0x0100, /* Enable AMP shutdown */
+ MT2063_ALL_SD = 0xFF73, /* All shutdown bits for this tuner */
+ MT2063_NONE_SD = 0x0000 /* No shutdown bits */
+};
+
+/*
+ * Possible values for MT2063_DNC_OUTPUT
+ */
+enum MT2063_DNC_Output_Enable {
+ MT2063_DNC_NONE = 0,
+ MT2063_DNC_1,
+ MT2063_DNC_2,
+ MT2063_DNC_BOTH
+};
+
+/*
+ * Two-wire serial bus subaddresses of the tuner registers.
+ * Also known as the tuner's register addresses.
+ */
+enum MT2063_Register_Offsets {
+ MT2063_REG_PART_REV = 0, /* 0x00: Part/Rev Code */
+ MT2063_REG_LO1CQ_1, /* 0x01: LO1C Queued Byte 1 */
+ MT2063_REG_LO1CQ_2, /* 0x02: LO1C Queued Byte 2 */
+ MT2063_REG_LO2CQ_1, /* 0x03: LO2C Queued Byte 1 */
+ MT2063_REG_LO2CQ_2, /* 0x04: LO2C Queued Byte 2 */
+ MT2063_REG_LO2CQ_3, /* 0x05: LO2C Queued Byte 3 */
+ MT2063_REG_RSVD_06, /* 0x06: Reserved */
+ MT2063_REG_LO_STATUS, /* 0x07: LO Status */
+ MT2063_REG_FIFFC, /* 0x08: FIFF Center */
+ MT2063_REG_CLEARTUNE, /* 0x09: ClearTune Filter */
+ MT2063_REG_ADC_OUT, /* 0x0A: ADC_OUT */
+ MT2063_REG_LO1C_1, /* 0x0B: LO1C Byte 1 */
+ MT2063_REG_LO1C_2, /* 0x0C: LO1C Byte 2 */
+ MT2063_REG_LO2C_1, /* 0x0D: LO2C Byte 1 */
+ MT2063_REG_LO2C_2, /* 0x0E: LO2C Byte 2 */
+ MT2063_REG_LO2C_3, /* 0x0F: LO2C Byte 3 */
+ MT2063_REG_RSVD_10, /* 0x10: Reserved */
+ MT2063_REG_PWR_1, /* 0x11: PWR Byte 1 */
+ MT2063_REG_PWR_2, /* 0x12: PWR Byte 2 */
+ MT2063_REG_TEMP_STATUS, /* 0x13: Temp Status */
+ MT2063_REG_XO_STATUS, /* 0x14: Crystal Status */
+ MT2063_REG_RF_STATUS, /* 0x15: RF Attn Status */
+ MT2063_REG_FIF_STATUS, /* 0x16: FIF Attn Status */
+ MT2063_REG_LNA_OV, /* 0x17: LNA Attn Override */
+ MT2063_REG_RF_OV, /* 0x18: RF Attn Override */
+ MT2063_REG_FIF_OV, /* 0x19: FIF Attn Override */
+ MT2063_REG_LNA_TGT, /* 0x1A: Reserved */
+ MT2063_REG_PD1_TGT, /* 0x1B: Pwr Det 1 Target */
+ MT2063_REG_PD2_TGT, /* 0x1C: Pwr Det 2 Target */
+ MT2063_REG_RSVD_1D, /* 0x1D: Reserved */
+ MT2063_REG_RSVD_1E, /* 0x1E: Reserved */
+ MT2063_REG_RSVD_1F, /* 0x1F: Reserved */
+ MT2063_REG_RSVD_20, /* 0x20: Reserved */
+ MT2063_REG_BYP_CTRL, /* 0x21: Bypass Control */
+ MT2063_REG_RSVD_22, /* 0x22: Reserved */
+ MT2063_REG_RSVD_23, /* 0x23: Reserved */
+ MT2063_REG_RSVD_24, /* 0x24: Reserved */
+ MT2063_REG_RSVD_25, /* 0x25: Reserved */
+ MT2063_REG_RSVD_26, /* 0x26: Reserved */
+ MT2063_REG_RSVD_27, /* 0x27: Reserved */
+ MT2063_REG_FIFF_CTRL, /* 0x28: FIFF Control */
+ MT2063_REG_FIFF_OFFSET, /* 0x29: FIFF Offset */
+ MT2063_REG_CTUNE_CTRL, /* 0x2A: Reserved */
+ MT2063_REG_CTUNE_OV, /* 0x2B: Reserved */
+ MT2063_REG_CTRL_2C, /* 0x2C: Reserved */
+ MT2063_REG_FIFF_CTRL2, /* 0x2D: Fiff Control */
+ MT2063_REG_RSVD_2E, /* 0x2E: Reserved */
+ MT2063_REG_DNC_GAIN, /* 0x2F: DNC Control */
+ MT2063_REG_VGA_GAIN, /* 0x30: VGA Gain Ctrl */
+ MT2063_REG_RSVD_31, /* 0x31: Reserved */
+ MT2063_REG_TEMP_SEL, /* 0x32: Temperature Selection */
+ MT2063_REG_RSVD_33, /* 0x33: Reserved */
+ MT2063_REG_RSVD_34, /* 0x34: Reserved */
+ MT2063_REG_RSVD_35, /* 0x35: Reserved */
+ MT2063_REG_RSVD_36, /* 0x36: Reserved */
+ MT2063_REG_RSVD_37, /* 0x37: Reserved */
+ MT2063_REG_RSVD_38, /* 0x38: Reserved */
+ MT2063_REG_RSVD_39, /* 0x39: Reserved */
+ MT2063_REG_RSVD_3A, /* 0x3A: Reserved */
+ MT2063_REG_RSVD_3B, /* 0x3B: Reserved */
+ MT2063_REG_RSVD_3C, /* 0x3C: Reserved */
+ MT2063_REG_END_REGS
+};
+
+struct mt2063_state {
+ struct i2c_adapter *i2c;
+
+ bool init;
+
+ const struct mt2063_config *config;
+ struct dvb_tuner_ops ops;
+ struct dvb_frontend *frontend;
+ struct tuner_state status;
+
+ u32 frequency;
+ u32 srate;
+ u32 bandwidth;
+ u32 reference;
+
+ u32 tuner_id;
+ struct MT2063_AvoidSpursData_t AS_Data;
+ u32 f_IF1_actual;
+ u32 rcvr_mode;
+ u32 ctfilt_sw;
+ u32 CTFiltMax[31];
+ u32 num_regs;
+ u8 reg[MT2063_REG_END_REGS];
+};
+
+/*
+ * mt2063_write - Write data into the I2C bus
+ */
+static u32 mt2063_write(struct mt2063_state *state, u8 reg, u8 *data, u32 len)
+{
+ struct dvb_frontend *fe = state->frontend;
+ int ret;
+ u8 buf[60];
+ struct i2c_msg msg = {
+ .addr = state->config->tuner_address,
+ .flags = 0,
+ .buf = buf,
+ .len = len + 1
+ };
+
+ dprintk(2, "\n");
+
+ msg.buf[0] = reg;
+ memcpy(msg.buf + 1, data, len);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ ret = i2c_transfer(state->i2c, &msg, 1);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (ret < 0)
+ printk(KERN_ERR "%s error ret=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/*
+ * mt2063_write - Write register data into the I2C bus, caching the value
+ */
+static u32 mt2063_setreg(struct mt2063_state *state, u8 reg, u8 val)
+{
+ u32 status;
+
+ dprintk(2, "\n");
+
+ if (reg >= MT2063_REG_END_REGS)
+ return -ERANGE;
+
+ status = mt2063_write(state, reg, &val, 1);
+ if (status < 0)
+ return status;
+
+ state->reg[reg] = val;
+
+ return 0;
+}
+
+/*
+ * mt2063_read - Read data from the I2C bus
+ */
+static u32 mt2063_read(struct mt2063_state *state,
+ u8 subAddress, u8 *pData, u32 cnt)
+{
+ u32 status = 0; /* Status to be returned */
+ struct dvb_frontend *fe = state->frontend;
+ u32 i = 0;
+
+ dprintk(2, "addr 0x%02x, cnt %d\n", subAddress, cnt);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ for (i = 0; i < cnt; i++) {
+ u8 b0[] = { subAddress + i };
+ struct i2c_msg msg[] = {
+ {
+ .addr = state->config->tuner_address,
+ .flags = 0,
+ .buf = b0,
+ .len = 1
+ }, {
+ .addr = state->config->tuner_address,
+ .flags = I2C_M_RD,
+ .buf = pData + i,
+ .len = 1
+ }
+ };
+
+ status = i2c_transfer(state->i2c, msg, 2);
+ dprintk(2, "addr 0x%02x, ret = %d, val = 0x%02x\n",
+ subAddress + i, status, *(pData + i));
+ if (status < 0)
+ break;
+ }
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (status < 0)
+ printk(KERN_ERR "Can't read from address 0x%02x,\n",
+ subAddress + i);
+
+ return status;
+}
+
+/*
+ * FIXME: Is this really needed?
+ */
+static int MT2063_Sleep(struct dvb_frontend *fe)
+{
+ /*
+ * ToDo: Add code here to implement a OS blocking
+ */
+ msleep(10);
+
+ return 0;
+}
+
+/*
+ * Microtune spur avoidance
+ */
+
+/* Implement ceiling, floor functions. */
+#define ceil(n, d) (((n) < 0) ? (-((-(n))/(d))) : (n)/(d) + ((n)%(d) != 0))
+#define floor(n, d) (((n) < 0) ? (-((-(n))/(d))) - ((n)%(d) != 0) : (n)/(d))
+
+struct MT2063_FIFZone_t {
+ s32 min_;
+ s32 max_;
+};
+
+static struct MT2063_ExclZone_t *InsertNode(struct MT2063_AvoidSpursData_t
+ *pAS_Info,
+ struct MT2063_ExclZone_t *pPrevNode)
+{
+ struct MT2063_ExclZone_t *pNode;
+
+ dprintk(2, "\n");
+
+ /* Check for a node in the free list */
+ if (pAS_Info->freeZones != NULL) {
+ /* Use one from the free list */
+ pNode = pAS_Info->freeZones;
+ pAS_Info->freeZones = pNode->next_;
+ } else {
+ /* Grab a node from the array */
+ pNode = &pAS_Info->MT2063_ExclZones[pAS_Info->nZones];
+ }
+
+ if (pPrevNode != NULL) {
+ pNode->next_ = pPrevNode->next_;
+ pPrevNode->next_ = pNode;
+ } else { /* insert at the beginning of the list */
+
+ pNode->next_ = pAS_Info->usedZones;
+ pAS_Info->usedZones = pNode;
+ }
+
+ pAS_Info->nZones++;
+ return pNode;
+}
+
+static struct MT2063_ExclZone_t *RemoveNode(struct MT2063_AvoidSpursData_t
+ *pAS_Info,
+ struct MT2063_ExclZone_t *pPrevNode,
+ struct MT2063_ExclZone_t
+ *pNodeToRemove)
+{
+ struct MT2063_ExclZone_t *pNext = pNodeToRemove->next_;
+
+ dprintk(2, "\n");
+
+ /* Make previous node point to the subsequent node */
+ if (pPrevNode != NULL)
+ pPrevNode->next_ = pNext;
+
+ /* Add pNodeToRemove to the beginning of the freeZones */
+ pNodeToRemove->next_ = pAS_Info->freeZones;
+ pAS_Info->freeZones = pNodeToRemove;
+
+ /* Decrement node count */
+ pAS_Info->nZones--;
+
+ return pNext;
+}
+
+/*
+ * MT_AddExclZone()
+ *
+ * Add (and merge) an exclusion zone into the list.
+ * If the range (f_min, f_max) is totally outside the
+ * 1st IF BW, ignore the entry.
+ * If the range (f_min, f_max) is negative, ignore the entry.
+ */
+static void MT2063_AddExclZone(struct MT2063_AvoidSpursData_t *pAS_Info,
+ u32 f_min, u32 f_max)
+{
+ struct MT2063_ExclZone_t *pNode = pAS_Info->usedZones;
+ struct MT2063_ExclZone_t *pPrev = NULL;
+ struct MT2063_ExclZone_t *pNext = NULL;
+
+ dprintk(2, "\n");
+
+ /* Check to see if this overlaps the 1st IF filter */
+ if ((f_max > (pAS_Info->f_if1_Center - (pAS_Info->f_if1_bw / 2)))
+ && (f_min < (pAS_Info->f_if1_Center + (pAS_Info->f_if1_bw / 2)))
+ && (f_min < f_max)) {
+ /*
+ * 1 2 3 4 5 6
+ *
+ * New entry: |---| |--| |--| |-| |---| |--|
+ * or or or or or
+ * Existing: |--| |--| |--| |---| |-| |--|
+ */
+
+ /* Check for our place in the list */
+ while ((pNode != NULL) && (pNode->max_ < f_min)) {
+ pPrev = pNode;
+ pNode = pNode->next_;
+ }
+
+ if ((pNode != NULL) && (pNode->min_ < f_max)) {
+ /* Combine me with pNode */
+ if (f_min < pNode->min_)
+ pNode->min_ = f_min;
+ if (f_max > pNode->max_)
+ pNode->max_ = f_max;
+ } else {
+ pNode = InsertNode(pAS_Info, pPrev);
+ pNode->min_ = f_min;
+ pNode->max_ = f_max;
+ }
+
+ /* Look for merging possibilities */
+ pNext = pNode->next_;
+ while ((pNext != NULL) && (pNext->min_ < pNode->max_)) {
+ if (pNext->max_ > pNode->max_)
+ pNode->max_ = pNext->max_;
+ /* Remove pNext, return ptr to pNext->next */
+ pNext = RemoveNode(pAS_Info, pNode, pNext);
+ }
+ }
+}
+
+/*
+ * Reset all exclusion zones.
+ * Add zones to protect the PLL FracN regions near zero
+ */
+static void MT2063_ResetExclZones(struct MT2063_AvoidSpursData_t *pAS_Info)
+{
+ u32 center;
+
+ dprintk(2, "\n");
+
+ pAS_Info->nZones = 0; /* this clears the used list */
+ pAS_Info->usedZones = NULL; /* reset ptr */
+ pAS_Info->freeZones = NULL; /* reset ptr */
+
+ center =
+ pAS_Info->f_ref *
+ ((pAS_Info->f_if1_Center - pAS_Info->f_if1_bw / 2 +
+ pAS_Info->f_in) / pAS_Info->f_ref) - pAS_Info->f_in;
+ while (center <
+ pAS_Info->f_if1_Center + pAS_Info->f_if1_bw / 2 +
+ pAS_Info->f_LO1_FracN_Avoid) {
+ /* Exclude LO1 FracN */
+ MT2063_AddExclZone(pAS_Info,
+ center - pAS_Info->f_LO1_FracN_Avoid,
+ center - 1);
+ MT2063_AddExclZone(pAS_Info, center + 1,
+ center + pAS_Info->f_LO1_FracN_Avoid);
+ center += pAS_Info->f_ref;
+ }
+
+ center =
+ pAS_Info->f_ref *
+ ((pAS_Info->f_if1_Center - pAS_Info->f_if1_bw / 2 -
+ pAS_Info->f_out) / pAS_Info->f_ref) + pAS_Info->f_out;
+ while (center <
+ pAS_Info->f_if1_Center + pAS_Info->f_if1_bw / 2 +
+ pAS_Info->f_LO2_FracN_Avoid) {
+ /* Exclude LO2 FracN */
+ MT2063_AddExclZone(pAS_Info,
+ center - pAS_Info->f_LO2_FracN_Avoid,
+ center - 1);
+ MT2063_AddExclZone(pAS_Info, center + 1,
+ center + pAS_Info->f_LO2_FracN_Avoid);
+ center += pAS_Info->f_ref;
+ }
+
+ if (MT2063_EXCLUDE_US_DECT_FREQUENCIES(pAS_Info->avoidDECT)) {
+ /* Exclude LO1 values that conflict with DECT channels */
+ MT2063_AddExclZone(pAS_Info, 1920836000 - pAS_Info->f_in, 1922236000 - pAS_Info->f_in); /* Ctr = 1921.536 */
+ MT2063_AddExclZone(pAS_Info, 1922564000 - pAS_Info->f_in, 1923964000 - pAS_Info->f_in); /* Ctr = 1923.264 */
+ MT2063_AddExclZone(pAS_Info, 1924292000 - pAS_Info->f_in, 1925692000 - pAS_Info->f_in); /* Ctr = 1924.992 */
+ MT2063_AddExclZone(pAS_Info, 1926020000 - pAS_Info->f_in, 1927420000 - pAS_Info->f_in); /* Ctr = 1926.720 */
+ MT2063_AddExclZone(pAS_Info, 1927748000 - pAS_Info->f_in, 1929148000 - pAS_Info->f_in); /* Ctr = 1928.448 */
+ }
+
+ if (MT2063_EXCLUDE_EURO_DECT_FREQUENCIES(pAS_Info->avoidDECT)) {
+ MT2063_AddExclZone(pAS_Info, 1896644000 - pAS_Info->f_in, 1898044000 - pAS_Info->f_in); /* Ctr = 1897.344 */
+ MT2063_AddExclZone(pAS_Info, 1894916000 - pAS_Info->f_in, 1896316000 - pAS_Info->f_in); /* Ctr = 1895.616 */
+ MT2063_AddExclZone(pAS_Info, 1893188000 - pAS_Info->f_in, 1894588000 - pAS_Info->f_in); /* Ctr = 1893.888 */
+ MT2063_AddExclZone(pAS_Info, 1891460000 - pAS_Info->f_in, 1892860000 - pAS_Info->f_in); /* Ctr = 1892.16 */
+ MT2063_AddExclZone(pAS_Info, 1889732000 - pAS_Info->f_in, 1891132000 - pAS_Info->f_in); /* Ctr = 1890.432 */
+ MT2063_AddExclZone(pAS_Info, 1888004000 - pAS_Info->f_in, 1889404000 - pAS_Info->f_in); /* Ctr = 1888.704 */
+ MT2063_AddExclZone(pAS_Info, 1886276000 - pAS_Info->f_in, 1887676000 - pAS_Info->f_in); /* Ctr = 1886.976 */
+ MT2063_AddExclZone(pAS_Info, 1884548000 - pAS_Info->f_in, 1885948000 - pAS_Info->f_in); /* Ctr = 1885.248 */
+ MT2063_AddExclZone(pAS_Info, 1882820000 - pAS_Info->f_in, 1884220000 - pAS_Info->f_in); /* Ctr = 1883.52 */
+ MT2063_AddExclZone(pAS_Info, 1881092000 - pAS_Info->f_in, 1882492000 - pAS_Info->f_in); /* Ctr = 1881.792 */
+ }
+}
+
+/*
+ * MT_ChooseFirstIF - Choose the best available 1st IF
+ * If f_Desired is not excluded, choose that first.
+ * Otherwise, return the value closest to f_Center that is
+ * not excluded
+ */
+static u32 MT2063_ChooseFirstIF(struct MT2063_AvoidSpursData_t *pAS_Info)
+{
+ /*
+ * Update "f_Desired" to be the nearest "combinational-multiple" of
+ * "f_LO1_Step".
+ * The resulting number, F_LO1 must be a multiple of f_LO1_Step.
+ * And F_LO1 is the arithmetic sum of f_in + f_Center.
+ * Neither f_in, nor f_Center must be a multiple of f_LO1_Step.
+ * However, the sum must be.
+ */
+ const u32 f_Desired =
+ pAS_Info->f_LO1_Step *
+ ((pAS_Info->f_if1_Request + pAS_Info->f_in +
+ pAS_Info->f_LO1_Step / 2) / pAS_Info->f_LO1_Step) -
+ pAS_Info->f_in;
+ const u32 f_Step =
+ (pAS_Info->f_LO1_Step >
+ pAS_Info->f_LO2_Step) ? pAS_Info->f_LO1_Step : pAS_Info->
+ f_LO2_Step;
+ u32 f_Center;
+ s32 i;
+ s32 j = 0;
+ u32 bDesiredExcluded = 0;
+ u32 bZeroExcluded = 0;
+ s32 tmpMin, tmpMax;
+ s32 bestDiff;
+ struct MT2063_ExclZone_t *pNode = pAS_Info->usedZones;
+ struct MT2063_FIFZone_t zones[MT2063_MAX_ZONES];
+
+ dprintk(2, "\n");
+
+ if (pAS_Info->nZones == 0)
+ return f_Desired;
+
+ /*
+ * f_Center needs to be an integer multiple of f_Step away
+ * from f_Desired
+ */
+ if (pAS_Info->f_if1_Center > f_Desired)
+ f_Center =
+ f_Desired +
+ f_Step *
+ ((pAS_Info->f_if1_Center - f_Desired +
+ f_Step / 2) / f_Step);
+ else
+ f_Center =
+ f_Desired -
+ f_Step *
+ ((f_Desired - pAS_Info->f_if1_Center +
+ f_Step / 2) / f_Step);
+
+ /*
+ * Take MT_ExclZones, center around f_Center and change the
+ * resolution to f_Step
+ */
+ while (pNode != NULL) {
+ /* floor function */
+ tmpMin =
+ floor((s32) (pNode->min_ - f_Center), (s32) f_Step);
+
+ /* ceil function */
+ tmpMax =
+ ceil((s32) (pNode->max_ - f_Center), (s32) f_Step);
+
+ if ((pNode->min_ < f_Desired) && (pNode->max_ > f_Desired))
+ bDesiredExcluded = 1;
+
+ if ((tmpMin < 0) && (tmpMax > 0))
+ bZeroExcluded = 1;
+
+ /* See if this zone overlaps the previous */
+ if ((j > 0) && (tmpMin < zones[j - 1].max_))
+ zones[j - 1].max_ = tmpMax;
+ else {
+ /* Add new zone */
+ zones[j].min_ = tmpMin;
+ zones[j].max_ = tmpMax;
+ j++;
+ }
+ pNode = pNode->next_;
+ }
+
+ /*
+ * If the desired is okay, return with it
+ */
+ if (bDesiredExcluded == 0)
+ return f_Desired;
+
+ /*
+ * If the desired is excluded and the center is okay, return with it
+ */
+ if (bZeroExcluded == 0)
+ return f_Center;
+
+ /* Find the value closest to 0 (f_Center) */
+ bestDiff = zones[0].min_;
+ for (i = 0; i < j; i++) {
+ if (abs(zones[i].min_) < abs(bestDiff))
+ bestDiff = zones[i].min_;
+ if (abs(zones[i].max_) < abs(bestDiff))
+ bestDiff = zones[i].max_;
+ }
+
+ if (bestDiff < 0)
+ return f_Center - ((u32) (-bestDiff) * f_Step);
+
+ return f_Center + (bestDiff * f_Step);
+}
+
+/**
+ * gcd() - Uses Euclid's algorithm
+ *
+ * @u, @v: Unsigned values whose GCD is desired.
+ *
+ * Returns THE greatest common divisor of u and v, if either value is 0,
+ * the other value is returned as the result.
+ */
+static u32 MT2063_gcd(u32 u, u32 v)
+{
+ u32 r;
+
+ while (v != 0) {
+ r = u % v;
+ u = v;
+ v = r;
+ }
+
+ return u;
+}
+
+/**
+ * IsSpurInBand() - Checks to see if a spur will be present within the IF's
+ * bandwidth. (fIFOut +/- fIFBW, -fIFOut +/- fIFBW)
+ *
+ * ma mb mc md
+ * <--+-+-+-------------------+-------------------+-+-+-->
+ * | ^ 0 ^ |
+ * ^ b=-fIFOut+fIFBW/2 -b=+fIFOut-fIFBW/2 ^
+ * a=-fIFOut-fIFBW/2 -a=+fIFOut+fIFBW/2
+ *
+ * Note that some equations are doubled to prevent round-off
+ * problems when calculating fIFBW/2
+ *
+ * @pAS_Info: Avoid Spurs information block
+ * @fm: If spur, amount f_IF1 has to move negative
+ * @fp: If spur, amount f_IF1 has to move positive
+ *
+ * Returns 1 if an LO spur would be present, otherwise 0.
+ */
+static u32 IsSpurInBand(struct MT2063_AvoidSpursData_t *pAS_Info,
+ u32 *fm, u32 * fp)
+{
+ /*
+ ** Calculate LO frequency settings.
+ */
+ u32 n, n0;
+ const u32 f_LO1 = pAS_Info->f_LO1;
+ const u32 f_LO2 = pAS_Info->f_LO2;
+ const u32 d = pAS_Info->f_out + pAS_Info->f_out_bw / 2;
+ const u32 c = d - pAS_Info->f_out_bw;
+ const u32 f = pAS_Info->f_zif_bw / 2;
+ const u32 f_Scale = (f_LO1 / (UINT_MAX / 2 / pAS_Info->maxH1)) + 1;
+ s32 f_nsLO1, f_nsLO2;
+ s32 f_Spur;
+ u32 ma, mb, mc, md, me, mf;
+ u32 lo_gcd, gd_Scale, gc_Scale, gf_Scale, hgds, hgfs, hgcs;
+
+ dprintk(2, "\n");
+
+ *fm = 0;
+
+ /*
+ ** For each edge (d, c & f), calculate a scale, based on the gcd
+ ** of f_LO1, f_LO2 and the edge value. Use the larger of this
+ ** gcd-based scale factor or f_Scale.
+ */
+ lo_gcd = MT2063_gcd(f_LO1, f_LO2);
+ gd_Scale = max((u32) MT2063_gcd(lo_gcd, d), f_Scale);
+ hgds = gd_Scale / 2;
+ gc_Scale = max((u32) MT2063_gcd(lo_gcd, c), f_Scale);
+ hgcs = gc_Scale / 2;
+ gf_Scale = max((u32) MT2063_gcd(lo_gcd, f), f_Scale);
+ hgfs = gf_Scale / 2;
+
+ n0 = DIV_ROUND_UP(f_LO2 - d, f_LO1 - f_LO2);
+
+ /* Check out all multiples of LO1 from n0 to m_maxLOSpurHarmonic */
+ for (n = n0; n <= pAS_Info->maxH1; ++n) {
+ md = (n * ((f_LO1 + hgds) / gd_Scale) -
+ ((d + hgds) / gd_Scale)) / ((f_LO2 + hgds) / gd_Scale);
+
+ /* If # fLO2 harmonics > m_maxLOSpurHarmonic, then no spurs present */
+ if (md >= pAS_Info->maxH1)
+ break;
+
+ ma = (n * ((f_LO1 + hgds) / gd_Scale) +
+ ((d + hgds) / gd_Scale)) / ((f_LO2 + hgds) / gd_Scale);
+
+ /* If no spurs between +/- (f_out + f_IFBW/2), then try next harmonic */
+ if (md == ma)
+ continue;
+
+ mc = (n * ((f_LO1 + hgcs) / gc_Scale) -
+ ((c + hgcs) / gc_Scale)) / ((f_LO2 + hgcs) / gc_Scale);
+ if (mc != md) {
+ f_nsLO1 = (s32) (n * (f_LO1 / gc_Scale));
+ f_nsLO2 = (s32) (mc * (f_LO2 / gc_Scale));
+ f_Spur =
+ (gc_Scale * (f_nsLO1 - f_nsLO2)) +
+ n * (f_LO1 % gc_Scale) - mc * (f_LO2 % gc_Scale);
+
+ *fp = ((f_Spur - (s32) c) / (mc - n)) + 1;
+ *fm = (((s32) d - f_Spur) / (mc - n)) + 1;
+ return 1;
+ }
+
+ /* Location of Zero-IF-spur to be checked */
+ me = (n * ((f_LO1 + hgfs) / gf_Scale) +
+ ((f + hgfs) / gf_Scale)) / ((f_LO2 + hgfs) / gf_Scale);
+ mf = (n * ((f_LO1 + hgfs) / gf_Scale) -
+ ((f + hgfs) / gf_Scale)) / ((f_LO2 + hgfs) / gf_Scale);
+ if (me != mf) {
+ f_nsLO1 = n * (f_LO1 / gf_Scale);
+ f_nsLO2 = me * (f_LO2 / gf_Scale);
+ f_Spur =
+ (gf_Scale * (f_nsLO1 - f_nsLO2)) +
+ n * (f_LO1 % gf_Scale) - me * (f_LO2 % gf_Scale);
+
+ *fp = ((f_Spur + (s32) f) / (me - n)) + 1;
+ *fm = (((s32) f - f_Spur) / (me - n)) + 1;
+ return 1;
+ }
+
+ mb = (n * ((f_LO1 + hgcs) / gc_Scale) +
+ ((c + hgcs) / gc_Scale)) / ((f_LO2 + hgcs) / gc_Scale);
+ if (ma != mb) {
+ f_nsLO1 = n * (f_LO1 / gc_Scale);
+ f_nsLO2 = ma * (f_LO2 / gc_Scale);
+ f_Spur =
+ (gc_Scale * (f_nsLO1 - f_nsLO2)) +
+ n * (f_LO1 % gc_Scale) - ma * (f_LO2 % gc_Scale);
+
+ *fp = (((s32) d + f_Spur) / (ma - n)) + 1;
+ *fm = (-(f_Spur + (s32) c) / (ma - n)) + 1;
+ return 1;
+ }
+ }
+
+ /* No spurs found */
+ return 0;
+}
+
+/*
+ * MT_AvoidSpurs() - Main entry point to avoid spurs.
+ * Checks for existing spurs in present LO1, LO2 freqs
+ * and if present, chooses spur-free LO1, LO2 combination
+ * that tunes the same input/output frequencies.
+ */
+static u32 MT2063_AvoidSpurs(struct MT2063_AvoidSpursData_t *pAS_Info)
+{
+ u32 status = 0;
+ u32 fm, fp; /* restricted range on LO's */
+ pAS_Info->bSpurAvoided = 0;
+ pAS_Info->nSpursFound = 0;
+
+ dprintk(2, "\n");
+
+ if (pAS_Info->maxH1 == 0)
+ return 0;
+
+ /*
+ * Avoid LO Generated Spurs
+ *
+ * Make sure that have no LO-related spurs within the IF output
+ * bandwidth.
+ *
+ * If there is an LO spur in this band, start at the current IF1 frequency
+ * and work out until we find a spur-free frequency or run up against the
+ * 1st IF SAW band edge. Use temporary copies of fLO1 and fLO2 so that they
+ * will be unchanged if a spur-free setting is not found.
+ */
+ pAS_Info->bSpurPresent = IsSpurInBand(pAS_Info, &fm, &fp);
+ if (pAS_Info->bSpurPresent) {
+ u32 zfIF1 = pAS_Info->f_LO1 - pAS_Info->f_in; /* current attempt at a 1st IF */
+ u32 zfLO1 = pAS_Info->f_LO1; /* current attempt at an LO1 freq */
+ u32 zfLO2 = pAS_Info->f_LO2; /* current attempt at an LO2 freq */
+ u32 delta_IF1;
+ u32 new_IF1;
+
+ /*
+ ** Spur was found, attempt to find a spur-free 1st IF
+ */
+ do {
+ pAS_Info->nSpursFound++;
+
+ /* Raise f_IF1_upper, if needed */
+ MT2063_AddExclZone(pAS_Info, zfIF1 - fm, zfIF1 + fp);
+
+ /* Choose next IF1 that is closest to f_IF1_CENTER */
+ new_IF1 = MT2063_ChooseFirstIF(pAS_Info);
+
+ if (new_IF1 > zfIF1) {
+ pAS_Info->f_LO1 += (new_IF1 - zfIF1);
+ pAS_Info->f_LO2 += (new_IF1 - zfIF1);
+ } else {
+ pAS_Info->f_LO1 -= (zfIF1 - new_IF1);
+ pAS_Info->f_LO2 -= (zfIF1 - new_IF1);
+ }
+ zfIF1 = new_IF1;
+
+ if (zfIF1 > pAS_Info->f_if1_Center)
+ delta_IF1 = zfIF1 - pAS_Info->f_if1_Center;
+ else
+ delta_IF1 = pAS_Info->f_if1_Center - zfIF1;
+
+ pAS_Info->bSpurPresent = IsSpurInBand(pAS_Info, &fm, &fp);
+ /*
+ * Continue while the new 1st IF is still within the 1st IF bandwidth
+ * and there is a spur in the band (again)
+ */
+ } while ((2 * delta_IF1 + pAS_Info->f_out_bw <= pAS_Info->f_if1_bw) && pAS_Info->bSpurPresent);
+
+ /*
+ * Use the LO-spur free values found. If the search went all
+ * the way to the 1st IF band edge and always found spurs, just
+ * leave the original choice. It's as "good" as any other.
+ */
+ if (pAS_Info->bSpurPresent == 1) {
+ status |= MT2063_SPUR_PRESENT_ERR;
+ pAS_Info->f_LO1 = zfLO1;
+ pAS_Info->f_LO2 = zfLO2;
+ } else
+ pAS_Info->bSpurAvoided = 1;
+ }
+
+ status |=
+ ((pAS_Info->
+ nSpursFound << MT2063_SPUR_SHIFT) & MT2063_SPUR_CNT_MASK);
+
+ return status;
+}
+
+/*
+ * Constants used by the tuning algorithm
+ */
+#define MT2063_REF_FREQ (16000000UL) /* Reference oscillator Frequency (in Hz) */
+#define MT2063_IF1_BW (22000000UL) /* The IF1 filter bandwidth (in Hz) */
+#define MT2063_TUNE_STEP_SIZE (50000UL) /* Tune in steps of 50 kHz */
+#define MT2063_SPUR_STEP_HZ (250000UL) /* Step size (in Hz) to move IF1 when avoiding spurs */
+#define MT2063_ZIF_BW (2000000UL) /* Zero-IF spur-free bandwidth (in Hz) */
+#define MT2063_MAX_HARMONICS_1 (15UL) /* Highest intra-tuner LO Spur Harmonic to be avoided */
+#define MT2063_MAX_HARMONICS_2 (5UL) /* Highest inter-tuner LO Spur Harmonic to be avoided */
+#define MT2063_MIN_LO_SEP (1000000UL) /* Minimum inter-tuner LO frequency separation */
+#define MT2063_LO1_FRACN_AVOID (0UL) /* LO1 FracN numerator avoid region (in Hz) */
+#define MT2063_LO2_FRACN_AVOID (199999UL) /* LO2 FracN numerator avoid region (in Hz) */
+#define MT2063_MIN_FIN_FREQ (44000000UL) /* Minimum input frequency (in Hz) */
+#define MT2063_MAX_FIN_FREQ (1100000000UL) /* Maximum input frequency (in Hz) */
+#define MT2063_MIN_FOUT_FREQ (36000000UL) /* Minimum output frequency (in Hz) */
+#define MT2063_MAX_FOUT_FREQ (57000000UL) /* Maximum output frequency (in Hz) */
+#define MT2063_MIN_DNC_FREQ (1293000000UL) /* Minimum LO2 frequency (in Hz) */
+#define MT2063_MAX_DNC_FREQ (1614000000UL) /* Maximum LO2 frequency (in Hz) */
+#define MT2063_MIN_UPC_FREQ (1396000000UL) /* Minimum LO1 frequency (in Hz) */
+#define MT2063_MAX_UPC_FREQ (2750000000UL) /* Maximum LO1 frequency (in Hz) */
+
+/*
+ * Define the supported Part/Rev codes for the MT2063
+ */
+#define MT2063_B0 (0x9B)
+#define MT2063_B1 (0x9C)
+#define MT2063_B2 (0x9D)
+#define MT2063_B3 (0x9E)
+
+/**
+ * mt2063_lockStatus - Checks to see if LO1 and LO2 are locked
+ *
+ * @state: struct mt2063_state pointer
+ *
+ * This function returns 0, if no lock, 1 if locked and a value < 1 if error
+ */
+static unsigned int mt2063_lockStatus(struct mt2063_state *state)
+{
+ const u32 nMaxWait = 100; /* wait a maximum of 100 msec */
+ const u32 nPollRate = 2; /* poll status bits every 2 ms */
+ const u32 nMaxLoops = nMaxWait / nPollRate;
+ const u8 LO1LK = 0x80;
+ u8 LO2LK = 0x08;
+ u32 status;
+ u32 nDelays = 0;
+
+ dprintk(2, "\n");
+
+ /* LO2 Lock bit was in a different place for B0 version */
+ if (state->tuner_id == MT2063_B0)
+ LO2LK = 0x40;
+
+ do {
+ status = mt2063_read(state, MT2063_REG_LO_STATUS,
+ &state->reg[MT2063_REG_LO_STATUS], 1);
+
+ if (status < 0)
+ return status;
+
+ if ((state->reg[MT2063_REG_LO_STATUS] & (LO1LK | LO2LK)) ==
+ (LO1LK | LO2LK)) {
+ return TUNER_STATUS_LOCKED | TUNER_STATUS_STEREO;
+ }
+ msleep(nPollRate); /* Wait between retries */
+ } while (++nDelays < nMaxLoops);
+
+ /*
+ * Got no lock or partial lock
+ */
+ return 0;
+}
+
+/*
+ * Constants for setting receiver modes.
+ * (6 modes defined at this time, enumerated by mt2063_delivery_sys)
+ * (DNC1GC & DNC2GC are the values, which are used, when the specific
+ * DNC Output is selected, the other is always off)
+ *
+ * enum mt2063_delivery_sys
+ * -------------+----------------------------------------------
+ * Mode 0 : | MT2063_CABLE_QAM
+ * Mode 1 : | MT2063_CABLE_ANALOG
+ * Mode 2 : | MT2063_OFFAIR_COFDM
+ * Mode 3 : | MT2063_OFFAIR_COFDM_SAWLESS
+ * Mode 4 : | MT2063_OFFAIR_ANALOG
+ * Mode 5 : | MT2063_OFFAIR_8VSB
+ * --------------+----------------------------------------------
+ *
+ * |<---------- Mode -------------->|
+ * Reg Field | 0 | 1 | 2 | 3 | 4 | 5 |
+ * ------------+-----+-----+-----+-----+-----+-----+
+ * RFAGCen | OFF | OFF | OFF | OFF | OFF | OFF
+ * LNARin | 0 | 0 | 3 | 3 | 3 | 3
+ * FIFFQen | 1 | 1 | 1 | 1 | 1 | 1
+ * FIFFq | 0 | 0 | 0 | 0 | 0 | 0
+ * DNC1gc | 0 | 0 | 0 | 0 | 0 | 0
+ * DNC2gc | 0 | 0 | 0 | 0 | 0 | 0
+ * GCU Auto | 1 | 1 | 1 | 1 | 1 | 1
+ * LNA max Atn | 31 | 31 | 31 | 31 | 31 | 31
+ * LNA Target | 44 | 43 | 43 | 43 | 43 | 43
+ * ign RF Ovl | 0 | 0 | 0 | 0 | 0 | 0
+ * RF max Atn | 31 | 31 | 31 | 31 | 31 | 31
+ * PD1 Target | 36 | 36 | 38 | 38 | 36 | 38
+ * ign FIF Ovl | 0 | 0 | 0 | 0 | 0 | 0
+ * FIF max Atn | 5 | 5 | 5 | 5 | 5 | 5
+ * PD2 Target | 40 | 33 | 42 | 42 | 33 | 42
+ */
+
+enum mt2063_delivery_sys {
+ MT2063_CABLE_QAM = 0,
+ MT2063_CABLE_ANALOG,
+ MT2063_OFFAIR_COFDM,
+ MT2063_OFFAIR_COFDM_SAWLESS,
+ MT2063_OFFAIR_ANALOG,
+ MT2063_OFFAIR_8VSB,
+ MT2063_NUM_RCVR_MODES
+};
+
+static const char *mt2063_mode_name[] = {
+ [MT2063_CABLE_QAM] = "digital cable",
+ [MT2063_CABLE_ANALOG] = "analog cable",
+ [MT2063_OFFAIR_COFDM] = "digital offair",
+ [MT2063_OFFAIR_COFDM_SAWLESS] = "digital offair without SAW",
+ [MT2063_OFFAIR_ANALOG] = "analog offair",
+ [MT2063_OFFAIR_8VSB] = "analog offair 8vsb",
+};
+
+static const u8 RFAGCEN[] = { 0, 0, 0, 0, 0, 0 };
+static const u8 LNARIN[] = { 0, 0, 3, 3, 3, 3 };
+static const u8 FIFFQEN[] = { 1, 1, 1, 1, 1, 1 };
+static const u8 FIFFQ[] = { 0, 0, 0, 0, 0, 0 };
+static const u8 DNC1GC[] = { 0, 0, 0, 0, 0, 0 };
+static const u8 DNC2GC[] = { 0, 0, 0, 0, 0, 0 };
+static const u8 ACLNAMAX[] = { 31, 31, 31, 31, 31, 31 };
+static const u8 LNATGT[] = { 44, 43, 43, 43, 43, 43 };
+static const u8 RFOVDIS[] = { 0, 0, 0, 0, 0, 0 };
+static const u8 ACRFMAX[] = { 31, 31, 31, 31, 31, 31 };
+static const u8 PD1TGT[] = { 36, 36, 38, 38, 36, 38 };
+static const u8 FIFOVDIS[] = { 0, 0, 0, 0, 0, 0 };
+static const u8 ACFIFMAX[] = { 29, 29, 29, 29, 29, 29 };
+static const u8 PD2TGT[] = { 40, 33, 38, 42, 30, 38 };
+
+/*
+ * mt2063_set_dnc_output_enable()
+ */
+static u32 mt2063_get_dnc_output_enable(struct mt2063_state *state,
+ enum MT2063_DNC_Output_Enable *pValue)
+{
+ dprintk(2, "\n");
+
+ if ((state->reg[MT2063_REG_DNC_GAIN] & 0x03) == 0x03) { /* if DNC1 is off */
+ if ((state->reg[MT2063_REG_VGA_GAIN] & 0x03) == 0x03) /* if DNC2 is off */
+ *pValue = MT2063_DNC_NONE;
+ else
+ *pValue = MT2063_DNC_2;
+ } else { /* DNC1 is on */
+ if ((state->reg[MT2063_REG_VGA_GAIN] & 0x03) == 0x03) /* if DNC2 is off */
+ *pValue = MT2063_DNC_1;
+ else
+ *pValue = MT2063_DNC_BOTH;
+ }
+ return 0;
+}
+
+/*
+ * mt2063_set_dnc_output_enable()
+ */
+static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
+ enum MT2063_DNC_Output_Enable nValue)
+{
+ u32 status = 0; /* Status to be returned */
+ u8 val = 0;
+
+ dprintk(2, "\n");
+
+ /* selects, which DNC output is used */
+ switch (nValue) {
+ case MT2063_DNC_NONE:
+ val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | 0x03; /* Set DNC1GC=3 */
+ if (state->reg[MT2063_REG_DNC_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_DNC_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | 0x03; /* Set DNC2GC=3 */
+ if (state->reg[MT2063_REG_VGA_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_VGA_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_RSVD_20] & ~0x40); /* Set PD2MUX=0 */
+ if (state->reg[MT2063_REG_RSVD_20] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_RSVD_20,
+ val);
+
+ break;
+ case MT2063_DNC_1:
+ val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | (DNC1GC[state->rcvr_mode] & 0x03); /* Set DNC1GC=x */
+ if (state->reg[MT2063_REG_DNC_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_DNC_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | 0x03; /* Set DNC2GC=3 */
+ if (state->reg[MT2063_REG_VGA_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_VGA_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_RSVD_20] & ~0x40); /* Set PD2MUX=0 */
+ if (state->reg[MT2063_REG_RSVD_20] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_RSVD_20,
+ val);
+
+ break;
+ case MT2063_DNC_2:
+ val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | 0x03; /* Set DNC1GC=3 */
+ if (state->reg[MT2063_REG_DNC_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_DNC_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | (DNC2GC[state->rcvr_mode] & 0x03); /* Set DNC2GC=x */
+ if (state->reg[MT2063_REG_VGA_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_VGA_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_RSVD_20] | 0x40); /* Set PD2MUX=1 */
+ if (state->reg[MT2063_REG_RSVD_20] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_RSVD_20,
+ val);
+
+ break;
+ case MT2063_DNC_BOTH:
+ val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | (DNC1GC[state->rcvr_mode] & 0x03); /* Set DNC1GC=x */
+ if (state->reg[MT2063_REG_DNC_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_DNC_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | (DNC2GC[state->rcvr_mode] & 0x03); /* Set DNC2GC=x */
+ if (state->reg[MT2063_REG_VGA_GAIN] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_VGA_GAIN,
+ val);
+
+ val = (state->reg[MT2063_REG_RSVD_20] | 0x40); /* Set PD2MUX=1 */
+ if (state->reg[MT2063_REG_RSVD_20] !=
+ val)
+ status |=
+ mt2063_setreg(state,
+ MT2063_REG_RSVD_20,
+ val);
+
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/*
+ * MT2063_SetReceiverMode() - Set the MT2063 receiver mode, according with
+ * the selected enum mt2063_delivery_sys type.
+ *
+ * (DNC1GC & DNC2GC are the values, which are used, when the specific
+ * DNC Output is selected, the other is always off)
+ *
+ * @state: ptr to mt2063_state structure
+ * @Mode: desired reciever delivery system
+ *
+ * Note: Register cache must be valid for it to work
+ */
+
+static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
+ enum mt2063_delivery_sys Mode)
+{
+ u32 status = 0; /* Status to be returned */
+ u8 val;
+ u32 longval;
+
+ dprintk(2, "\n");
+
+ if (Mode >= MT2063_NUM_RCVR_MODES)
+ status = -ERANGE;
+
+ /* RFAGCen */
+ if (status >= 0) {
+ val =
+ (state->
+ reg[MT2063_REG_PD1_TGT] & (u8) ~0x40) | (RFAGCEN[Mode]
+ ? 0x40 :
+ 0x00);
+ if (state->reg[MT2063_REG_PD1_TGT] != val)
+ status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
+ }
+
+ /* LNARin */
+ if (status >= 0) {
+ u8 val = (state->reg[MT2063_REG_CTRL_2C] & (u8) ~0x03) |
+ (LNARIN[Mode] & 0x03);
+ if (state->reg[MT2063_REG_CTRL_2C] != val)
+ status |= mt2063_setreg(state, MT2063_REG_CTRL_2C, val);
+ }
+
+ /* FIFFQEN and FIFFQ */
+ if (status >= 0) {
+ val =
+ (state->
+ reg[MT2063_REG_FIFF_CTRL2] & (u8) ~0xF0) |
+ (FIFFQEN[Mode] << 7) | (FIFFQ[Mode] << 4);
+ if (state->reg[MT2063_REG_FIFF_CTRL2] != val) {
+ status |=
+ mt2063_setreg(state, MT2063_REG_FIFF_CTRL2, val);
+ /* trigger FIFF calibration, needed after changing FIFFQ */
+ val =
+ (state->reg[MT2063_REG_FIFF_CTRL] | (u8) 0x01);
+ status |=
+ mt2063_setreg(state, MT2063_REG_FIFF_CTRL, val);
+ val =
+ (state->
+ reg[MT2063_REG_FIFF_CTRL] & (u8) ~0x01);
+ status |=
+ mt2063_setreg(state, MT2063_REG_FIFF_CTRL, val);
+ }
+ }
+
+ /* DNC1GC & DNC2GC */
+ status |= mt2063_get_dnc_output_enable(state, &longval);
+ status |= mt2063_set_dnc_output_enable(state, longval);
+
+ /* acLNAmax */
+ if (status >= 0) {
+ u8 val = (state->reg[MT2063_REG_LNA_OV] & (u8) ~0x1F) |
+ (ACLNAMAX[Mode] & 0x1F);
+ if (state->reg[MT2063_REG_LNA_OV] != val)
+ status |= mt2063_setreg(state, MT2063_REG_LNA_OV, val);
+ }
+
+ /* LNATGT */
+ if (status >= 0) {
+ u8 val = (state->reg[MT2063_REG_LNA_TGT] & (u8) ~0x3F) |
+ (LNATGT[Mode] & 0x3F);
+ if (state->reg[MT2063_REG_LNA_TGT] != val)
+ status |= mt2063_setreg(state, MT2063_REG_LNA_TGT, val);
+ }
+
+ /* ACRF */
+ if (status >= 0) {
+ u8 val = (state->reg[MT2063_REG_RF_OV] & (u8) ~0x1F) |
+ (ACRFMAX[Mode] & 0x1F);
+ if (state->reg[MT2063_REG_RF_OV] != val)
+ status |= mt2063_setreg(state, MT2063_REG_RF_OV, val);
+ }
+
+ /* PD1TGT */
+ if (status >= 0) {
+ u8 val = (state->reg[MT2063_REG_PD1_TGT] & (u8) ~0x3F) |
+ (PD1TGT[Mode] & 0x3F);
+ if (state->reg[MT2063_REG_PD1_TGT] != val)
+ status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
+ }
+
+ /* FIFATN */
+ if (status >= 0) {
+ u8 val = ACFIFMAX[Mode];
+ if (state->reg[MT2063_REG_PART_REV] != MT2063_B3 && val > 5)
+ val = 5;
+ val = (state->reg[MT2063_REG_FIF_OV] & (u8) ~0x1F) |
+ (val & 0x1F);
+ if (state->reg[MT2063_REG_FIF_OV] != val)
+ status |= mt2063_setreg(state, MT2063_REG_FIF_OV, val);
+ }
+
+ /* PD2TGT */
+ if (status >= 0) {
+ u8 val = (state->reg[MT2063_REG_PD2_TGT] & (u8) ~0x3F) |
+ (PD2TGT[Mode] & 0x3F);
+ if (state->reg[MT2063_REG_PD2_TGT] != val)
+ status |= mt2063_setreg(state, MT2063_REG_PD2_TGT, val);
+ }
+
+ /* Ignore ATN Overload */
+ if (status >= 0) {
+ val = (state->reg[MT2063_REG_LNA_TGT] & (u8) ~0x80) |
+ (RFOVDIS[Mode] ? 0x80 : 0x00);
+ if (state->reg[MT2063_REG_LNA_TGT] != val)
+ status |= mt2063_setreg(state, MT2063_REG_LNA_TGT, val);
+ }
+
+ /* Ignore FIF Overload */
+ if (status >= 0) {
+ val = (state->reg[MT2063_REG_PD1_TGT] & (u8) ~0x80) |
+ (FIFOVDIS[Mode] ? 0x80 : 0x00);
+ if (state->reg[MT2063_REG_PD1_TGT] != val)
+ status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
+ }
+
+ if (status >= 0) {
+ state->rcvr_mode = Mode;
+ dprintk(1, "mt2063 mode changed to %s\n",
+ mt2063_mode_name[state->rcvr_mode]);
+ }
+
+ return status;
+}
+
+/*
+ * MT2063_ClearPowerMaskBits () - Clears the power-down mask bits for various
+ * sections of the MT2063
+ *
+ * @Bits: Mask bits to be cleared.
+ *
+ * See definition of MT2063_Mask_Bits type for description
+ * of each of the power bits.
+ */
+static u32 MT2063_ClearPowerMaskBits(struct mt2063_state *state,
+ enum MT2063_Mask_Bits Bits)
+{
+ u32 status = 0;
+
+ dprintk(2, "\n");
+ Bits = (enum MT2063_Mask_Bits)(Bits & MT2063_ALL_SD); /* Only valid bits for this tuner */
+ if ((Bits & 0xFF00) != 0) {
+ state->reg[MT2063_REG_PWR_2] &= ~(u8) (Bits >> 8);
+ status |=
+ mt2063_write(state,
+ MT2063_REG_PWR_2,
+ &state->reg[MT2063_REG_PWR_2], 1);
+ }
+ if ((Bits & 0xFF) != 0) {
+ state->reg[MT2063_REG_PWR_1] &= ~(u8) (Bits & 0xFF);
+ status |=
+ mt2063_write(state,
+ MT2063_REG_PWR_1,
+ &state->reg[MT2063_REG_PWR_1], 1);
+ }
+
+ return status;
+}
+
+/*
+ * MT2063_SoftwareShutdown() - Enables or disables software shutdown function.
+ * When Shutdown is 1, any section whose power
+ * mask is set will be shutdown.
+ */
+static u32 MT2063_SoftwareShutdown(struct mt2063_state *state, u8 Shutdown)
+{
+ u32 status;
+
+ dprintk(2, "\n");
+ if (Shutdown == 1)
+ state->reg[MT2063_REG_PWR_1] |= 0x04;
+ else
+ state->reg[MT2063_REG_PWR_1] &= ~0x04;
+
+ status = mt2063_write(state,
+ MT2063_REG_PWR_1,
+ &state->reg[MT2063_REG_PWR_1], 1);
+
+ if (Shutdown != 1) {
+ state->reg[MT2063_REG_BYP_CTRL] =
+ (state->reg[MT2063_REG_BYP_CTRL] & 0x9F) | 0x40;
+ status |=
+ mt2063_write(state,
+ MT2063_REG_BYP_CTRL,
+ &state->reg[MT2063_REG_BYP_CTRL],
+ 1);
+ state->reg[MT2063_REG_BYP_CTRL] =
+ (state->reg[MT2063_REG_BYP_CTRL] & 0x9F);
+ status |=
+ mt2063_write(state,
+ MT2063_REG_BYP_CTRL,
+ &state->reg[MT2063_REG_BYP_CTRL],
+ 1);
+ }
+
+ return status;
+}
+
+static u32 MT2063_Round_fLO(u32 f_LO, u32 f_LO_Step, u32 f_ref)
+{
+ return f_ref * (f_LO / f_ref)
+ + f_LO_Step * (((f_LO % f_ref) + (f_LO_Step / 2)) / f_LO_Step);
+}
+
+/**
+ * fLO_FractionalTerm() - Calculates the portion contributed by FracN / denom.
+ * This function preserves maximum precision without
+ * risk of overflow. It accurately calculates
+ * f_ref * num / denom to within 1 HZ with fixed math.
+ *
+ * @num : Fractional portion of the multiplier
+ * @denom: denominator portion of the ratio
+ * @f_Ref: SRO frequency.
+ *
+ * This calculation handles f_ref as two separate 14-bit fields.
+ * Therefore, a maximum value of 2^28-1 may safely be used for f_ref.
+ * This is the genesis of the magic number "14" and the magic mask value of
+ * 0x03FFF.
+ *
+ * This routine successfully handles denom values up to and including 2^18.
+ * Returns: f_ref * num / denom
+ */
+static u32 MT2063_fLO_FractionalTerm(u32 f_ref, u32 num, u32 denom)
+{
+ u32 t1 = (f_ref >> 14) * num;
+ u32 term1 = t1 / denom;
+ u32 loss = t1 % denom;
+ u32 term2 =
+ (((f_ref & 0x00003FFF) * num + (loss << 14)) + (denom / 2)) / denom;
+ return (term1 << 14) + term2;
+}
+
+/*
+ * CalcLO1Mult()- Calculates Integer divider value and the numerator
+ * value for a FracN PLL.
+ *
+ * This function assumes that the f_LO and f_Ref are
+ * evenly divisible by f_LO_Step.
+ *
+ * @Div: OUTPUT: Whole number portion of the multiplier
+ * @FracN: OUTPUT: Fractional portion of the multiplier
+ * @f_LO: desired LO frequency.
+ * @f_LO_Step: Minimum step size for the LO (in Hz).
+ * @f_Ref: SRO frequency.
+ * @f_Avoid: Range of PLL frequencies to avoid near integer multiples
+ * of f_Ref (in Hz).
+ *
+ * Returns: Recalculated LO frequency.
+ */
+static u32 MT2063_CalcLO1Mult(u32 *Div,
+ u32 *FracN,
+ u32 f_LO,
+ u32 f_LO_Step, u32 f_Ref)
+{
+ /* Calculate the whole number portion of the divider */
+ *Div = f_LO / f_Ref;
+
+ /* Calculate the numerator value (round to nearest f_LO_Step) */
+ *FracN =
+ (64 * (((f_LO % f_Ref) + (f_LO_Step / 2)) / f_LO_Step) +
+ (f_Ref / f_LO_Step / 2)) / (f_Ref / f_LO_Step);
+
+ return (f_Ref * (*Div)) + MT2063_fLO_FractionalTerm(f_Ref, *FracN, 64);
+}
+
+/**
+ * CalcLO2Mult() - Calculates Integer divider value and the numerator
+ * value for a FracN PLL.
+ *
+ * This function assumes that the f_LO and f_Ref are
+ * evenly divisible by f_LO_Step.
+ *
+ * @Div: OUTPUT: Whole number portion of the multiplier
+ * @FracN: OUTPUT: Fractional portion of the multiplier
+ * @f_LO: desired LO frequency.
+ * @f_LO_Step: Minimum step size for the LO (in Hz).
+ * @f_Ref: SRO frequency.
+ * @f_Avoid: Range of PLL frequencies to avoid near
+ * integer multiples of f_Ref (in Hz).
+ *
+ * Returns: Recalculated LO frequency.
+ */
+static u32 MT2063_CalcLO2Mult(u32 *Div,
+ u32 *FracN,
+ u32 f_LO,
+ u32 f_LO_Step, u32 f_Ref)
+{
+ /* Calculate the whole number portion of the divider */
+ *Div = f_LO / f_Ref;
+
+ /* Calculate the numerator value (round to nearest f_LO_Step) */
+ *FracN =
+ (8191 * (((f_LO % f_Ref) + (f_LO_Step / 2)) / f_LO_Step) +
+ (f_Ref / f_LO_Step / 2)) / (f_Ref / f_LO_Step);
+
+ return (f_Ref * (*Div)) + MT2063_fLO_FractionalTerm(f_Ref, *FracN,
+ 8191);
+}
+
+/*
+ * FindClearTuneFilter() - Calculate the corrrect ClearTune filter to be
+ * used for a given input frequency.
+ *
+ * @state: ptr to tuner data structure
+ * @f_in: RF input center frequency (in Hz).
+ *
+ * Returns: ClearTune filter number (0-31)
+ */
+static u32 FindClearTuneFilter(struct mt2063_state *state, u32 f_in)
+{
+ u32 RFBand;
+ u32 idx; /* index loop */
+
+ /*
+ ** Find RF Band setting
+ */
+ RFBand = 31; /* def when f_in > all */
+ for (idx = 0; idx < 31; ++idx) {
+ if (state->CTFiltMax[idx] >= f_in) {
+ RFBand = idx;
+ break;
+ }
+ }
+ return RFBand;
+}
+
+/*
+ * MT2063_Tune() - Change the tuner's tuned frequency to RFin.
+ */
+static u32 MT2063_Tune(struct mt2063_state *state, u32 f_in)
+{ /* RF input center frequency */
+
+ u32 status = 0;
+ u32 LO1; /* 1st LO register value */
+ u32 Num1; /* Numerator for LO1 reg. value */
+ u32 f_IF1; /* 1st IF requested */
+ u32 LO2; /* 2nd LO register value */
+ u32 Num2; /* Numerator for LO2 reg. value */
+ u32 ofLO1, ofLO2; /* last time's LO frequencies */
+ u8 fiffc = 0x80; /* FIFF center freq from tuner */
+ u32 fiffof; /* Offset from FIFF center freq */
+ const u8 LO1LK = 0x80; /* Mask for LO1 Lock bit */
+ u8 LO2LK = 0x08; /* Mask for LO2 Lock bit */
+ u8 val;
+ u32 RFBand;
+
+ dprintk(2, "\n");
+ /* Check the input and output frequency ranges */
+ if ((f_in < MT2063_MIN_FIN_FREQ) || (f_in > MT2063_MAX_FIN_FREQ))
+ return -EINVAL;
+
+ if ((state->AS_Data.f_out < MT2063_MIN_FOUT_FREQ)
+ || (state->AS_Data.f_out > MT2063_MAX_FOUT_FREQ))
+ return -EINVAL;
+
+ /*
+ * Save original LO1 and LO2 register values
+ */
+ ofLO1 = state->AS_Data.f_LO1;
+ ofLO2 = state->AS_Data.f_LO2;
+
+ /*
+ * Find and set RF Band setting
+ */
+ if (state->ctfilt_sw == 1) {
+ val = (state->reg[MT2063_REG_CTUNE_CTRL] | 0x08);
+ if (state->reg[MT2063_REG_CTUNE_CTRL] != val) {
+ status |=
+ mt2063_setreg(state, MT2063_REG_CTUNE_CTRL, val);
+ }
+ val = state->reg[MT2063_REG_CTUNE_OV];
+ RFBand = FindClearTuneFilter(state, f_in);
+ state->reg[MT2063_REG_CTUNE_OV] =
+ (u8) ((state->reg[MT2063_REG_CTUNE_OV] & ~0x1F)
+ | RFBand);
+ if (state->reg[MT2063_REG_CTUNE_OV] != val) {
+ status |=
+ mt2063_setreg(state, MT2063_REG_CTUNE_OV, val);
+ }
+ }
+
+ /*
+ * Read the FIFF Center Frequency from the tuner
+ */
+ if (status >= 0) {
+ status |=
+ mt2063_read(state,
+ MT2063_REG_FIFFC,
+ &state->reg[MT2063_REG_FIFFC], 1);
+ fiffc = state->reg[MT2063_REG_FIFFC];
+ }
+ /*
+ * Assign in the requested values
+ */
+ state->AS_Data.f_in = f_in;
+ /* Request a 1st IF such that LO1 is on a step size */
+ state->AS_Data.f_if1_Request =
+ MT2063_Round_fLO(state->AS_Data.f_if1_Request + f_in,
+ state->AS_Data.f_LO1_Step,
+ state->AS_Data.f_ref) - f_in;
+
+ /*
+ * Calculate frequency settings. f_IF1_FREQ + f_in is the
+ * desired LO1 frequency
+ */
+ MT2063_ResetExclZones(&state->AS_Data);
+
+ f_IF1 = MT2063_ChooseFirstIF(&state->AS_Data);
+
+ state->AS_Data.f_LO1 =
+ MT2063_Round_fLO(f_IF1 + f_in, state->AS_Data.f_LO1_Step,
+ state->AS_Data.f_ref);
+
+ state->AS_Data.f_LO2 =
+ MT2063_Round_fLO(state->AS_Data.f_LO1 - state->AS_Data.f_out - f_in,
+ state->AS_Data.f_LO2_Step, state->AS_Data.f_ref);
+
+ /*
+ * Check for any LO spurs in the output bandwidth and adjust
+ * the LO settings to avoid them if needed
+ */
+ status |= MT2063_AvoidSpurs(&state->AS_Data);
+ /*
+ * MT_AvoidSpurs spurs may have changed the LO1 & LO2 values.
+ * Recalculate the LO frequencies and the values to be placed
+ * in the tuning registers.
+ */
+ state->AS_Data.f_LO1 =
+ MT2063_CalcLO1Mult(&LO1, &Num1, state->AS_Data.f_LO1,
+ state->AS_Data.f_LO1_Step, state->AS_Data.f_ref);
+ state->AS_Data.f_LO2 =
+ MT2063_Round_fLO(state->AS_Data.f_LO1 - state->AS_Data.f_out - f_in,
+ state->AS_Data.f_LO2_Step, state->AS_Data.f_ref);
+ state->AS_Data.f_LO2 =
+ MT2063_CalcLO2Mult(&LO2, &Num2, state->AS_Data.f_LO2,
+ state->AS_Data.f_LO2_Step, state->AS_Data.f_ref);
+
+ /*
+ * Check the upconverter and downconverter frequency ranges
+ */
+ if ((state->AS_Data.f_LO1 < MT2063_MIN_UPC_FREQ)
+ || (state->AS_Data.f_LO1 > MT2063_MAX_UPC_FREQ))
+ status |= MT2063_UPC_RANGE;
+ if ((state->AS_Data.f_LO2 < MT2063_MIN_DNC_FREQ)
+ || (state->AS_Data.f_LO2 > MT2063_MAX_DNC_FREQ))
+ status |= MT2063_DNC_RANGE;
+ /* LO2 Lock bit was in a different place for B0 version */
+ if (state->tuner_id == MT2063_B0)
+ LO2LK = 0x40;
+
+ /*
+ * If we have the same LO frequencies and we're already locked,
+ * then skip re-programming the LO registers.
+ */
+ if ((ofLO1 != state->AS_Data.f_LO1)
+ || (ofLO2 != state->AS_Data.f_LO2)
+ || ((state->reg[MT2063_REG_LO_STATUS] & (LO1LK | LO2LK)) !=
+ (LO1LK | LO2LK))) {
+ /*
+ * Calculate the FIFFOF register value
+ *
+ * IF1_Actual
+ * FIFFOF = ------------ - 8 * FIFFC - 4992
+ * f_ref/64
+ */
+ fiffof =
+ (state->AS_Data.f_LO1 -
+ f_in) / (state->AS_Data.f_ref / 64) - 8 * (u32) fiffc -
+ 4992;
+ if (fiffof > 0xFF)
+ fiffof = 0xFF;
+
+ /*
+ * Place all of the calculated values into the local tuner
+ * register fields.
+ */
+ if (status >= 0) {
+ state->reg[MT2063_REG_LO1CQ_1] = (u8) (LO1 & 0xFF); /* DIV1q */
+ state->reg[MT2063_REG_LO1CQ_2] = (u8) (Num1 & 0x3F); /* NUM1q */
+ state->reg[MT2063_REG_LO2CQ_1] = (u8) (((LO2 & 0x7F) << 1) /* DIV2q */
+ |(Num2 >> 12)); /* NUM2q (hi) */
+ state->reg[MT2063_REG_LO2CQ_2] = (u8) ((Num2 & 0x0FF0) >> 4); /* NUM2q (mid) */
+ state->reg[MT2063_REG_LO2CQ_3] = (u8) (0xE0 | (Num2 & 0x000F)); /* NUM2q (lo) */
+
+ /*
+ * Now write out the computed register values
+ * IMPORTANT: There is a required order for writing
+ * (0x05 must follow all the others).
+ */
+ status |= mt2063_write(state, MT2063_REG_LO1CQ_1, &state->reg[MT2063_REG_LO1CQ_1], 5); /* 0x01 - 0x05 */
+ if (state->tuner_id == MT2063_B0) {
+ /* Re-write the one-shot bits to trigger the tune operation */
+ status |= mt2063_write(state, MT2063_REG_LO2CQ_3, &state->reg[MT2063_REG_LO2CQ_3], 1); /* 0x05 */
+ }
+ /* Write out the FIFF offset only if it's changing */
+ if (state->reg[MT2063_REG_FIFF_OFFSET] !=
+ (u8) fiffof) {
+ state->reg[MT2063_REG_FIFF_OFFSET] =
+ (u8) fiffof;
+ status |=
+ mt2063_write(state,
+ MT2063_REG_FIFF_OFFSET,
+ &state->
+ reg[MT2063_REG_FIFF_OFFSET],
+ 1);
+ }
+ }
+
+ /*
+ * Check for LO's locking
+ */
+
+ if (status < 0)
+ return status;
+
+ status = mt2063_lockStatus(state);
+ if (status < 0)
+ return status;
+ if (!status)
+ return -EINVAL; /* Couldn't lock */
+
+ /*
+ * If we locked OK, assign calculated data to mt2063_state structure
+ */
+ state->f_IF1_actual = state->AS_Data.f_LO1 - f_in;
+ }
+
+ return status;
+}
+
+static const u8 MT2063B0_defaults[] = {
+ /* Reg, Value */
+ 0x19, 0x05,
+ 0x1B, 0x1D,
+ 0x1C, 0x1F,
+ 0x1D, 0x0F,
+ 0x1E, 0x3F,
+ 0x1F, 0x0F,
+ 0x20, 0x3F,
+ 0x22, 0x21,
+ 0x23, 0x3F,
+ 0x24, 0x20,
+ 0x25, 0x3F,
+ 0x27, 0xEE,
+ 0x2C, 0x27, /* bit at 0x20 is cleared below */
+ 0x30, 0x03,
+ 0x2C, 0x07, /* bit at 0x20 is cleared here */
+ 0x2D, 0x87,
+ 0x2E, 0xAA,
+ 0x28, 0xE1, /* Set the FIFCrst bit here */
+ 0x28, 0xE0, /* Clear the FIFCrst bit here */
+ 0x00
+};
+
+/* writing 0x05 0xf0 sw-resets all registers, so we write only needed changes */
+static const u8 MT2063B1_defaults[] = {
+ /* Reg, Value */
+ 0x05, 0xF0,
+ 0x11, 0x10, /* New Enable AFCsd */
+ 0x19, 0x05,
+ 0x1A, 0x6C,
+ 0x1B, 0x24,
+ 0x1C, 0x28,
+ 0x1D, 0x8F,
+ 0x1E, 0x14,
+ 0x1F, 0x8F,
+ 0x20, 0x57,
+ 0x22, 0x21, /* New - ver 1.03 */
+ 0x23, 0x3C, /* New - ver 1.10 */
+ 0x24, 0x20, /* New - ver 1.03 */
+ 0x2C, 0x24, /* bit at 0x20 is cleared below */
+ 0x2D, 0x87, /* FIFFQ=0 */
+ 0x2F, 0xF3,
+ 0x30, 0x0C, /* New - ver 1.11 */
+ 0x31, 0x1B, /* New - ver 1.11 */
+ 0x2C, 0x04, /* bit at 0x20 is cleared here */
+ 0x28, 0xE1, /* Set the FIFCrst bit here */
+ 0x28, 0xE0, /* Clear the FIFCrst bit here */
+ 0x00
+};
+
+/* writing 0x05 0xf0 sw-resets all registers, so we write only needed changes */
+static const u8 MT2063B3_defaults[] = {
+ /* Reg, Value */
+ 0x05, 0xF0,
+ 0x19, 0x3D,
+ 0x2C, 0x24, /* bit at 0x20 is cleared below */
+ 0x2C, 0x04, /* bit at 0x20 is cleared here */
+ 0x28, 0xE1, /* Set the FIFCrst bit here */
+ 0x28, 0xE0, /* Clear the FIFCrst bit here */
+ 0x00
+};
+
+static int mt2063_init(struct dvb_frontend *fe)
+{
+ u32 status;
+ struct mt2063_state *state = fe->tuner_priv;
+ u8 all_resets = 0xF0; /* reset/load bits */
+ const u8 *def = NULL;
+ char *step;
+ u32 FCRUN;
+ s32 maxReads;
+ u32 fcu_osc;
+ u32 i;
+
+ dprintk(2, "\n");
+
+ state->rcvr_mode = MT2063_CABLE_QAM;
+
+ /* Read the Part/Rev code from the tuner */
+ status = mt2063_read(state, MT2063_REG_PART_REV,
+ &state->reg[MT2063_REG_PART_REV], 1);
+ if (status < 0) {
+ printk(KERN_ERR "Can't read mt2063 part ID\n");
+ return status;
+ }
+
+ /* Check the part/rev code */
+ switch (state->reg[MT2063_REG_PART_REV]) {
+ case MT2063_B0:
+ step = "B0";
+ break;
+ case MT2063_B1:
+ step = "B1";
+ break;
+ case MT2063_B2:
+ step = "B2";
+ break;
+ case MT2063_B3:
+ step = "B3";
+ break;
+ default:
+ printk(KERN_ERR "mt2063: Unknown mt2063 device ID (0x%02x)\n",
+ state->reg[MT2063_REG_PART_REV]);
+ return -ENODEV; /* Wrong tuner Part/Rev code */
+ }
+
+ /* Check the 2nd byte of the Part/Rev code from the tuner */
+ status = mt2063_read(state, MT2063_REG_RSVD_3B,
+ &state->reg[MT2063_REG_RSVD_3B], 1);
+
+ /* b7 != 0 ==> NOT MT2063 */
+ if (status < 0 || ((state->reg[MT2063_REG_RSVD_3B] & 0x80) != 0x00)) {
+ printk(KERN_ERR "mt2063: Unknown part ID (0x%02x%02x)\n",
+ state->reg[MT2063_REG_PART_REV],
+ state->reg[MT2063_REG_RSVD_3B]);
+ return -ENODEV; /* Wrong tuner Part/Rev code */
+ }
+
+ printk(KERN_INFO "mt2063: detected a mt2063 %s\n", step);
+
+ /* Reset the tuner */
+ status = mt2063_write(state, MT2063_REG_LO2CQ_3, &all_resets, 1);
+ if (status < 0)
+ return status;
+
+ /* change all of the default values that vary from the HW reset values */
+ /* def = (state->reg[PART_REV] == MT2063_B0) ? MT2063B0_defaults : MT2063B1_defaults; */
+ switch (state->reg[MT2063_REG_PART_REV]) {
+ case MT2063_B3:
+ def = MT2063B3_defaults;
+ break;
+
+ case MT2063_B1:
+ def = MT2063B1_defaults;
+ break;
+
+ case MT2063_B0:
+ def = MT2063B0_defaults;
+ break;
+
+ default:
+ return -ENODEV;
+ break;
+ }
+
+ while (status >= 0 && *def) {
+ u8 reg = *def++;
+ u8 val = *def++;
+ status = mt2063_write(state, reg, &val, 1);
+ }
+ if (status < 0)
+ return status;
+
+ /* Wait for FIFF location to complete. */
+ FCRUN = 1;
+ maxReads = 10;
+ while (status >= 0 && (FCRUN != 0) && (maxReads-- > 0)) {
+ msleep(2);
+ status = mt2063_read(state,
+ MT2063_REG_XO_STATUS,
+ &state->
+ reg[MT2063_REG_XO_STATUS], 1);
+ FCRUN = (state->reg[MT2063_REG_XO_STATUS] & 0x40) >> 6;
+ }
+
+ if (FCRUN != 0 || status < 0)
+ return -ENODEV;
+
+ status = mt2063_read(state,
+ MT2063_REG_FIFFC,
+ &state->reg[MT2063_REG_FIFFC], 1);
+ if (status < 0)
+ return status;
+
+ /* Read back all the registers from the tuner */
+ status = mt2063_read(state,
+ MT2063_REG_PART_REV,
+ state->reg, MT2063_REG_END_REGS);
+ if (status < 0)
+ return status;
+
+ /* Initialize the tuner state. */
+ state->tuner_id = state->reg[MT2063_REG_PART_REV];
+ state->AS_Data.f_ref = MT2063_REF_FREQ;
+ state->AS_Data.f_if1_Center = (state->AS_Data.f_ref / 8) *
+ ((u32) state->reg[MT2063_REG_FIFFC] + 640);
+ state->AS_Data.f_if1_bw = MT2063_IF1_BW;
+ state->AS_Data.f_out = 43750000UL;
+ state->AS_Data.f_out_bw = 6750000UL;
+ state->AS_Data.f_zif_bw = MT2063_ZIF_BW;
+ state->AS_Data.f_LO1_Step = state->AS_Data.f_ref / 64;
+ state->AS_Data.f_LO2_Step = MT2063_TUNE_STEP_SIZE;
+ state->AS_Data.maxH1 = MT2063_MAX_HARMONICS_1;
+ state->AS_Data.maxH2 = MT2063_MAX_HARMONICS_2;
+ state->AS_Data.f_min_LO_Separation = MT2063_MIN_LO_SEP;
+ state->AS_Data.f_if1_Request = state->AS_Data.f_if1_Center;
+ state->AS_Data.f_LO1 = 2181000000UL;
+ state->AS_Data.f_LO2 = 1486249786UL;
+ state->f_IF1_actual = state->AS_Data.f_if1_Center;
+ state->AS_Data.f_in = state->AS_Data.f_LO1 - state->f_IF1_actual;
+ state->AS_Data.f_LO1_FracN_Avoid = MT2063_LO1_FRACN_AVOID;
+ state->AS_Data.f_LO2_FracN_Avoid = MT2063_LO2_FRACN_AVOID;
+ state->num_regs = MT2063_REG_END_REGS;
+ state->AS_Data.avoidDECT = MT2063_AVOID_BOTH;
+ state->ctfilt_sw = 0;
+
+ state->CTFiltMax[0] = 69230000;
+ state->CTFiltMax[1] = 105770000;
+ state->CTFiltMax[2] = 140350000;
+ state->CTFiltMax[3] = 177110000;
+ state->CTFiltMax[4] = 212860000;
+ state->CTFiltMax[5] = 241130000;
+ state->CTFiltMax[6] = 274370000;
+ state->CTFiltMax[7] = 309820000;
+ state->CTFiltMax[8] = 342450000;
+ state->CTFiltMax[9] = 378870000;
+ state->CTFiltMax[10] = 416210000;
+ state->CTFiltMax[11] = 456500000;
+ state->CTFiltMax[12] = 495790000;
+ state->CTFiltMax[13] = 534530000;
+ state->CTFiltMax[14] = 572610000;
+ state->CTFiltMax[15] = 598970000;
+ state->CTFiltMax[16] = 635910000;
+ state->CTFiltMax[17] = 672130000;
+ state->CTFiltMax[18] = 714840000;
+ state->CTFiltMax[19] = 739660000;
+ state->CTFiltMax[20] = 770410000;
+ state->CTFiltMax[21] = 814660000;
+ state->CTFiltMax[22] = 846950000;
+ state->CTFiltMax[23] = 867820000;
+ state->CTFiltMax[24] = 915980000;
+ state->CTFiltMax[25] = 947450000;
+ state->CTFiltMax[26] = 983110000;
+ state->CTFiltMax[27] = 1021630000;
+ state->CTFiltMax[28] = 1061870000;
+ state->CTFiltMax[29] = 1098330000;
+ state->CTFiltMax[30] = 1138990000;
+
+ /*
+ ** Fetch the FCU osc value and use it and the fRef value to
+ ** scale all of the Band Max values
+ */
+
+ state->reg[MT2063_REG_CTUNE_CTRL] = 0x0A;
+ status = mt2063_write(state, MT2063_REG_CTUNE_CTRL,
+ &state->reg[MT2063_REG_CTUNE_CTRL], 1);
+ if (status < 0)
+ return status;
+
+ /* Read the ClearTune filter calibration value */
+ status = mt2063_read(state, MT2063_REG_FIFFC,
+ &state->reg[MT2063_REG_FIFFC], 1);
+ if (status < 0)
+ return status;
+
+ fcu_osc = state->reg[MT2063_REG_FIFFC];
+
+ state->reg[MT2063_REG_CTUNE_CTRL] = 0x00;
+ status = mt2063_write(state, MT2063_REG_CTUNE_CTRL,
+ &state->reg[MT2063_REG_CTUNE_CTRL], 1);
+ if (status < 0)
+ return status;
+
+ /* Adjust each of the values in the ClearTune filter cross-over table */
+ for (i = 0; i < 31; i++)
+ state->CTFiltMax[i] = (state->CTFiltMax[i] / 768) * (fcu_osc + 640);
+
+ status = MT2063_SoftwareShutdown(state, 1);
+ if (status < 0)
+ return status;
+ status = MT2063_ClearPowerMaskBits(state, MT2063_ALL_SD);
+ if (status < 0)
+ return status;
+
+ state->init = true;
+
+ return 0;
+}
+
+static int mt2063_get_status(struct dvb_frontend *fe, u32 *tuner_status)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+ int status;
+
+ dprintk(2, "\n");
+
+ if (!state->init)
+ return -ENODEV;
+
+ *tuner_status = 0;
+ status = mt2063_lockStatus(state);
+ if (status < 0)
+ return status;
+ if (status)
+ *tuner_status = TUNER_STATUS_LOCKED;
+
+ dprintk(1, "Tuner status: %d", *tuner_status);
+
+ return 0;
+}
+
+static int mt2063_release(struct dvb_frontend *fe)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+
+ dprintk(2, "\n");
+
+ fe->tuner_priv = NULL;
+ kfree(state);
+
+ return 0;
+}
+
+static int mt2063_set_analog_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+ s32 pict_car;
+ s32 pict2chanb_vsb;
+ s32 ch_bw;
+ s32 if_mid;
+ s32 rcvr_mode;
+ int status;
+
+ dprintk(2, "\n");
+
+ if (!state->init) {
+ status = mt2063_init(fe);
+ if (status < 0)
+ return status;
+ }
+
+ switch (params->mode) {
+ case V4L2_TUNER_RADIO:
+ pict_car = 38900000;
+ ch_bw = 8000000;
+ pict2chanb_vsb = -(ch_bw / 2);
+ rcvr_mode = MT2063_OFFAIR_ANALOG;
+ break;
+ case V4L2_TUNER_ANALOG_TV:
+ rcvr_mode = MT2063_CABLE_ANALOG;
+ if (params->std & ~V4L2_STD_MN) {
+ pict_car = 38900000;
+ ch_bw = 6000000;
+ pict2chanb_vsb = -1250000;
+ } else if (params->std & V4L2_STD_PAL_G) {
+ pict_car = 38900000;
+ ch_bw = 7000000;
+ pict2chanb_vsb = -1250000;
+ } else { /* PAL/SECAM standards */
+ pict_car = 38900000;
+ ch_bw = 8000000;
+ pict2chanb_vsb = -1250000;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ if_mid = pict_car - (pict2chanb_vsb + (ch_bw / 2));
+
+ state->AS_Data.f_LO2_Step = 125000; /* FIXME: probably 5000 for FM */
+ state->AS_Data.f_out = if_mid;
+ state->AS_Data.f_out_bw = ch_bw + 750000;
+ status = MT2063_SetReceiverMode(state, rcvr_mode);
+ if (status < 0)
+ return status;
+
+ dprintk(1, "Tuning to frequency: %d, bandwidth %d, foffset %d\n",
+ params->frequency, ch_bw, pict2chanb_vsb);
+
+ status = MT2063_Tune(state, (params->frequency + (pict2chanb_vsb + (ch_bw / 2))));
+ if (status < 0)
+ return status;
+
+ state->frequency = params->frequency;
+ return 0;
+}
+
+/*
+ * As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
+ * So, the amount of the needed bandwith is given by:
+ * Bw = Symbol_rate * (1 + 0.15)
+ * As such, the maximum symbol rate supported by 6 MHz is given by:
+ * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
+ */
+#define MAX_SYMBOL_RATE_6MHz 5217391
+
+static int mt2063_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct mt2063_state *state = fe->tuner_priv;
+ int status;
+ s32 pict_car;
+ s32 pict2chanb_vsb;
+ s32 ch_bw;
+ s32 if_mid;
+ s32 rcvr_mode;
+
+ if (!state->init) {
+ status = mt2063_init(fe);
+ if (status < 0)
+ return status;
+ }
+
+ dprintk(2, "\n");
+
+ if (c->bandwidth_hz == 0)
+ return -EINVAL;
+ if (c->bandwidth_hz <= 6000000)
+ ch_bw = 6000000;
+ else if (c->bandwidth_hz <= 7000000)
+ ch_bw = 7000000;
+ else
+ ch_bw = 8000000;
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ rcvr_mode = MT2063_OFFAIR_COFDM;
+ pict_car = 36125000;
+ pict2chanb_vsb = -(ch_bw / 2);
+ break;
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ rcvr_mode = MT2063_CABLE_QAM;
+ pict_car = 36125000;
+ pict2chanb_vsb = -(ch_bw / 2);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if_mid = pict_car - (pict2chanb_vsb + (ch_bw / 2));
+
+ state->AS_Data.f_LO2_Step = 125000; /* FIXME: probably 5000 for FM */
+ state->AS_Data.f_out = if_mid;
+ state->AS_Data.f_out_bw = ch_bw + 750000;
+ status = MT2063_SetReceiverMode(state, rcvr_mode);
+ if (status < 0)
+ return status;
+
+ dprintk(1, "Tuning to frequency: %d, bandwidth %d, foffset %d\n",
+ c->frequency, ch_bw, pict2chanb_vsb);
+
+ status = MT2063_Tune(state, (c->frequency + (pict2chanb_vsb + (ch_bw / 2))));
+
+ if (status < 0)
+ return status;
+
+ state->frequency = c->frequency;
+ return 0;
+}
+
+static int mt2063_get_if_frequency(struct dvb_frontend *fe, u32 *freq)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+
+ dprintk(2, "\n");
+
+ if (!state->init)
+ return -ENODEV;
+
+ *freq = state->AS_Data.f_out;
+
+ dprintk(1, "IF frequency: %d\n", *freq);
+
+ return 0;
+}
+
+static int mt2063_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+
+ dprintk(2, "\n");
+
+ if (!state->init)
+ return -ENODEV;
+
+ *bw = state->AS_Data.f_out_bw - 750000;
+
+ dprintk(1, "bandwidth: %d\n", *bw);
+
+ return 0;
+}
+
+static struct dvb_tuner_ops mt2063_ops = {
+ .info = {
+ .name = "MT2063 Silicon Tuner",
+ .frequency_min = 45000000,
+ .frequency_max = 850000000,
+ .frequency_step = 0,
+ },
+
+ .init = mt2063_init,
+ .sleep = MT2063_Sleep,
+ .get_status = mt2063_get_status,
+ .set_analog_params = mt2063_set_analog_params,
+ .set_params = mt2063_set_params,
+ .get_if_frequency = mt2063_get_if_frequency,
+ .get_bandwidth = mt2063_get_bandwidth,
+ .release = mt2063_release,
+};
+
+struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
+ struct mt2063_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct mt2063_state *state = NULL;
+
+ dprintk(2, "\n");
+
+ state = kzalloc(sizeof(struct mt2063_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ state->config = config;
+ state->i2c = i2c;
+ state->frontend = fe;
+ state->reference = config->refclock / 1000; /* kHz */
+ fe->tuner_priv = state;
+ fe->ops.tuner_ops = mt2063_ops;
+
+ printk(KERN_INFO "%s: Attaching MT2063\n", __func__);
+ return fe;
+
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mt2063_attach);
+
+/*
+ * Ancillary routines visible outside mt2063
+ * FIXME: Remove them in favor of using standard tuner callbacks
+ */
+unsigned int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+ int err = 0;
+
+ dprintk(2, "\n");
+
+ err = MT2063_SoftwareShutdown(state, 1);
+ if (err < 0)
+ printk(KERN_ERR "%s: Couldn't shutdown\n", __func__);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(tuner_MT2063_SoftwareShutdown);
+
+unsigned int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe)
+{
+ struct mt2063_state *state = fe->tuner_priv;
+ int err = 0;
+
+ dprintk(2, "\n");
+
+ err = MT2063_ClearPowerMaskBits(state, MT2063_ALL_SD);
+ if (err < 0)
+ printk(KERN_ERR "%s: Invalid parameter\n", __func__);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(tuner_MT2063_ClearPowerMaskBits);
+
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_DESCRIPTION("MT2063 Silicon tuner");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/mt2063.h b/drivers/media/common/tuners/mt2063.h
new file mode 100644
index 000000000000..62d0e8ec4e99
--- /dev/null
+++ b/drivers/media/common/tuners/mt2063.h
@@ -0,0 +1,36 @@
+#ifndef __MT2063_H__
+#define __MT2063_H__
+
+#include "dvb_frontend.h"
+
+struct mt2063_config {
+ u8 tuner_address;
+ u32 refclock;
+};
+
+#if defined(CONFIG_MEDIA_TUNER_MT2063) || (defined(CONFIG_MEDIA_TUNER_MT2063_MODULE) && defined(MODULE))
+struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
+ struct mt2063_config *config,
+ struct i2c_adapter *i2c);
+
+#else
+
+static inline struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
+ struct mt2063_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+int mt2063_setTune(struct dvb_frontend *fe, u32 f_in,
+ u32 bw_in,
+ enum MTTune_atv_standard tv_type);
+
+/* FIXME: Should use the standard DVB attachment interfaces */
+unsigned int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe);
+unsigned int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe);
+
+#endif /* CONFIG_DVB_MT2063 */
+
+#endif /* __MT2063_H__ */
diff --git a/drivers/media/common/tuners/mt2131.c b/drivers/media/common/tuners/mt2131.c
index a4f830bb25d1..f83b0c1ea6c8 100644
--- a/drivers/media/common/tuners/mt2131.c
+++ b/drivers/media/common/tuners/mt2131.c
@@ -92,9 +92,9 @@ static int mt2131_writeregs(struct mt2131_priv *priv,u8 *buf, u8 len)
return 0;
}
-static int mt2131_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int mt2131_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct mt2131_priv *priv;
int ret=0, i;
u32 freq;
@@ -105,12 +105,8 @@ static int mt2131_set_params(struct dvb_frontend *fe,
u8 lockval = 0;
priv = fe->tuner_priv;
- if (fe->ops.info.type == FE_OFDM)
- priv->bandwidth = params->u.ofdm.bandwidth;
- else
- priv->bandwidth = 0;
- freq = params->frequency / 1000; // Hz -> kHz
+ freq = c->frequency / 1000; /* Hz -> kHz */
dprintk(1, "%s() freq=%d\n", __func__, freq);
f_lo1 = freq + MT2131_IF1 * 1000;
@@ -193,14 +189,6 @@ static int mt2131_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int mt2131_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
-{
- struct mt2131_priv *priv = fe->tuner_priv;
- dprintk(1, "%s()\n", __func__);
- *bandwidth = priv->bandwidth;
- return 0;
-}
-
static int mt2131_get_status(struct dvb_frontend *fe, u32 *status)
{
struct mt2131_priv *priv = fe->tuner_priv;
@@ -263,7 +251,6 @@ static const struct dvb_tuner_ops mt2131_tuner_ops = {
.set_params = mt2131_set_params,
.get_frequency = mt2131_get_frequency,
- .get_bandwidth = mt2131_get_bandwidth,
.get_status = mt2131_get_status
};
@@ -281,7 +268,6 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
return NULL;
priv->cfg = cfg;
- priv->bandwidth = 6000000; /* 6MHz */
priv->i2c = i2c;
if (mt2131_readreg(priv, 0, &id) != 0) {
diff --git a/drivers/media/common/tuners/mt2131_priv.h b/drivers/media/common/tuners/mt2131_priv.h
index 4e05a67e88c1..62aeedf5c550 100644
--- a/drivers/media/common/tuners/mt2131_priv.h
+++ b/drivers/media/common/tuners/mt2131_priv.h
@@ -38,7 +38,6 @@ struct mt2131_priv {
struct i2c_adapter *i2c;
u32 frequency;
- u32 bandwidth;
};
#endif /* __MT2131_PRIV_H__ */
diff --git a/drivers/media/common/tuners/mt2266.c b/drivers/media/common/tuners/mt2266.c
index 25a8ea342c46..bca4d75e42d4 100644
--- a/drivers/media/common/tuners/mt2266.c
+++ b/drivers/media/common/tuners/mt2266.c
@@ -122,8 +122,9 @@ static u8 mt2266_vhf[] = { 0x1d, 0xfe, 0x00, 0x00, 0xb4, 0x03, 0xa5, 0xa5,
#define FREF 30000 // Quartz oscillator 30 MHz
-static int mt2266_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int mt2266_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct mt2266_priv *priv;
int ret=0;
u32 freq;
@@ -135,32 +136,32 @@ static int mt2266_set_params(struct dvb_frontend *fe, struct dvb_frontend_parame
priv = fe->tuner_priv;
- freq = params->frequency / 1000; // Hz -> kHz
+ freq = priv->frequency / 1000; /* Hz -> kHz */
if (freq < 470000 && freq > 230000)
return -EINVAL; /* Gap between VHF and UHF bands */
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
- priv->frequency = freq * 1000;
+ priv->frequency = c->frequency;
tune = 2 * freq * (8192/16) / (FREF/16);
band = (freq < 300000) ? MT2266_VHF : MT2266_UHF;
if (band == MT2266_VHF)
tune *= 2;
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (c->bandwidth_hz) {
+ case 6000000:
mt2266_writeregs(priv, mt2266_init_6mhz,
sizeof(mt2266_init_6mhz));
break;
- case BANDWIDTH_7_MHZ:
- mt2266_writeregs(priv, mt2266_init_7mhz,
- sizeof(mt2266_init_7mhz));
- break;
- case BANDWIDTH_8_MHZ:
- default:
+ case 8000000:
mt2266_writeregs(priv, mt2266_init_8mhz,
sizeof(mt2266_init_8mhz));
break;
+ case 7000000:
+ default:
+ mt2266_writeregs(priv, mt2266_init_7mhz,
+ sizeof(mt2266_init_7mhz));
+ break;
}
+ priv->bandwidth = c->bandwidth_hz;
if (band == MT2266_VHF && priv->band == MT2266_UHF) {
dprintk("Switch from UHF to VHF");
diff --git a/drivers/media/common/tuners/mxl5005s.c b/drivers/media/common/tuners/mxl5005s.c
index 54be9e6faaaf..6133315fb0e3 100644
--- a/drivers/media/common/tuners/mxl5005s.c
+++ b/drivers/media/common/tuners/mxl5005s.c
@@ -3979,54 +3979,47 @@ static int mxl5005s_AssignTunerMode(struct dvb_frontend *fe, u32 mod_type,
return 0;
}
-static int mxl5005s_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int mxl5005s_set_params(struct dvb_frontend *fe)
{
struct mxl5005s_state *state = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ u32 bw = c->bandwidth_hz;
u32 req_mode, req_bw = 0;
int ret;
dprintk(1, "%s()\n", __func__);
- if (fe->ops.info.type == FE_ATSC) {
- switch (params->u.vsb.modulation) {
- case VSB_8:
- req_mode = MXL_ATSC; break;
- default:
- case QAM_64:
- case QAM_256:
- case QAM_AUTO:
- req_mode = MXL_QAM; break;
- }
- } else
+ switch (delsys) {
+ case SYS_ATSC:
+ req_mode = MXL_ATSC;
+ req_bw = MXL5005S_BANDWIDTH_6MHZ;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ req_mode = MXL_QAM;
+ req_bw = MXL5005S_BANDWIDTH_6MHZ;
+ break;
+ default: /* Assume DVB-T */
req_mode = MXL_DVBT;
-
- /* Change tuner for new modulation type if reqd */
- if (req_mode != state->current_mode) {
- switch (req_mode) {
- case MXL_ATSC:
- case MXL_QAM:
- req_bw = MXL5005S_BANDWIDTH_6MHZ;
+ switch (bw) {
+ case 6000000:
+ req_bw = MXL5005S_BANDWIDTH_6MHZ;
+ break;
+ case 7000000:
+ req_bw = MXL5005S_BANDWIDTH_7MHZ;
+ break;
+ case 8000000:
+ case 0:
+ req_bw = MXL5005S_BANDWIDTH_8MHZ;
break;
- case MXL_DVBT:
default:
- /* Assume DVB-T */
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
- req_bw = MXL5005S_BANDWIDTH_6MHZ;
- break;
- case BANDWIDTH_7_MHZ:
- req_bw = MXL5005S_BANDWIDTH_7MHZ;
- break;
- case BANDWIDTH_AUTO:
- case BANDWIDTH_8_MHZ:
- req_bw = MXL5005S_BANDWIDTH_8MHZ;
- break;
- default:
- return -EINVAL;
- }
+ return -EINVAL;
}
+ }
+ /* Change tuner for new modulation type if reqd */
+ if (req_mode != state->current_mode ||
+ req_bw != state->Chan_Bandwidth) {
state->current_mode = req_mode;
ret = mxl5005s_reconfigure(fe, req_mode, req_bw);
@@ -4034,8 +4027,8 @@ static int mxl5005s_set_params(struct dvb_frontend *fe,
ret = 0;
if (ret == 0) {
- dprintk(1, "%s() freq=%d\n", __func__, params->frequency);
- ret = mxl5005s_SetRfFreqHz(fe, params->frequency);
+ dprintk(1, "%s() freq=%d\n", __func__, c->frequency);
+ ret = mxl5005s_SetRfFreqHz(fe, c->frequency);
}
return ret;
diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
index 7eb1bf75cd07..69e453ef0a1a 100644
--- a/drivers/media/common/tuners/mxl5007t.c
+++ b/drivers/media/common/tuners/mxl5007t.c
@@ -165,6 +165,8 @@ struct mxl5007t_state {
struct reg_pair_t tab_init_cable[ARRAY_SIZE(init_tab_cable)];
struct reg_pair_t tab_rftune[ARRAY_SIZE(reg_pair_rftune)];
+ enum mxl5007t_if_freq if_freq;
+
u32 frequency;
u32 bandwidth;
};
@@ -286,6 +288,8 @@ static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
/* set inverted IF or normal IF */
set_reg_bits(state->tab_init, 0x02, 0x10, invert_if ? 0x10 : 0x00);
+ state->if_freq = if_freq;
+
return;
}
@@ -488,9 +492,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
+ u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
@@ -611,47 +616,43 @@ fail:
/* ------------------------------------------------------------------------- */
-static int mxl5007t_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int mxl5007t_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
struct mxl5007t_state *state = fe->tuner_priv;
enum mxl5007t_bw_mhz bw;
enum mxl5007t_mode mode;
int ret;
- u32 freq = params->frequency;
+ u32 freq = c->frequency;
- if (fe->ops.info.type == FE_ATSC) {
- switch (params->u.vsb.modulation) {
- case VSB_8:
- case VSB_16:
- mode = MxL_MODE_ATSC;
- break;
- case QAM_64:
- case QAM_256:
- mode = MxL_MODE_CABLE;
- break;
- default:
- mxl_err("modulation not set!");
- return -EINVAL;
- }
+ switch (delsys) {
+ case SYS_ATSC:
+ mode = MxL_MODE_ATSC;
+ bw = MxL_BW_6MHz;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ mode = MxL_MODE_CABLE;
bw = MxL_BW_6MHz;
- } else if (fe->ops.info.type == FE_OFDM) {
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ break;
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ mode = MxL_MODE_DVBT;
+ switch (c->bandwidth_hz) {
+ case 6000000:
bw = MxL_BW_6MHz;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
bw = MxL_BW_7MHz;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
bw = MxL_BW_8MHz;
break;
default:
- mxl_err("bandwidth not set!");
return -EINVAL;
}
- mode = MxL_MODE_DVBT;
- } else {
+ break;
+ default:
mxl_err("modulation type not supported!");
return -EINVAL;
}
@@ -670,8 +671,7 @@ static int mxl5007t_set_params(struct dvb_frontend *fe,
goto fail;
state->frequency = freq;
- state->bandwidth = (fe->ops.info.type == FE_OFDM) ?
- params->u.ofdm.bandwidth : 0;
+ state->bandwidth = c->bandwidth_hz;
fail:
mutex_unlock(&state->lock);
@@ -737,6 +737,50 @@ static int mxl5007t_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return 0;
}
+static int mxl5007t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct mxl5007t_state *state = fe->tuner_priv;
+
+ *frequency = 0;
+
+ switch (state->if_freq) {
+ case MxL_IF_4_MHZ:
+ *frequency = 4000000;
+ break;
+ case MxL_IF_4_5_MHZ:
+ *frequency = 4500000;
+ break;
+ case MxL_IF_4_57_MHZ:
+ *frequency = 4570000;
+ break;
+ case MxL_IF_5_MHZ:
+ *frequency = 5000000;
+ break;
+ case MxL_IF_5_38_MHZ:
+ *frequency = 5380000;
+ break;
+ case MxL_IF_6_MHZ:
+ *frequency = 6000000;
+ break;
+ case MxL_IF_6_28_MHZ:
+ *frequency = 6280000;
+ break;
+ case MxL_IF_9_1915_MHZ:
+ *frequency = 9191500;
+ break;
+ case MxL_IF_35_25_MHZ:
+ *frequency = 35250000;
+ break;
+ case MxL_IF_36_15_MHZ:
+ *frequency = 36150000;
+ break;
+ case MxL_IF_44_MHZ:
+ *frequency = 44000000;
+ break;
+ }
+ return 0;
+}
+
static int mxl5007t_release(struct dvb_frontend *fe)
{
struct mxl5007t_state *state = fe->tuner_priv;
@@ -766,6 +810,7 @@ static struct dvb_tuner_ops mxl5007t_tuner_ops = {
.get_frequency = mxl5007t_get_frequency,
.get_bandwidth = mxl5007t_get_bandwidth,
.release = mxl5007t_release,
+ .get_if_frequency = mxl5007t_get_if_frequency,
};
static int mxl5007t_get_chip_id(struct mxl5007t_state *state)
diff --git a/drivers/media/common/tuners/qt1010.c b/drivers/media/common/tuners/qt1010.c
index 9f5dba244cb8..2d79b1f5d5eb 100644
--- a/drivers/media/common/tuners/qt1010.c
+++ b/drivers/media/common/tuners/qt1010.c
@@ -82,9 +82,9 @@ static void qt1010_dump_regs(struct qt1010_priv *priv)
printk(KERN_CONT "\n");
}
-static int qt1010_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int qt1010_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct qt1010_priv *priv;
int err;
u32 freq, div, mod1, mod2;
@@ -144,13 +144,11 @@ static int qt1010_set_params(struct dvb_frontend *fe,
#define FREQ2 4000000 /* 4 MHz Quartz oscillator in the stick? */
priv = fe->tuner_priv;
- freq = params->frequency;
+ freq = c->frequency;
div = (freq + QT1010_OFFSET) / QT1010_STEP;
freq = (div * QT1010_STEP) - QT1010_OFFSET;
mod1 = (freq + QT1010_OFFSET) % FREQ1;
mod2 = (freq + QT1010_OFFSET) % FREQ2;
- priv->bandwidth =
- (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
priv->frequency = freq;
if (fe->ops.i2c_gate_ctrl)
@@ -320,7 +318,7 @@ static u8 qt1010_init_meas2(struct qt1010_priv *priv,
static int qt1010_init(struct dvb_frontend *fe)
{
struct qt1010_priv *priv = fe->tuner_priv;
- struct dvb_frontend_parameters params;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int err = 0;
u8 i, tmpval, *valptr = NULL;
@@ -397,9 +395,9 @@ static int qt1010_init(struct dvb_frontend *fe)
if ((err = qt1010_init_meas2(priv, i, &tmpval)))
return err;
- params.frequency = 545000000; /* Sigmatek DVB-110 545000000 */
+ c->frequency = 545000000; /* Sigmatek DVB-110 545000000 */
/* MSI Megasky 580 GL861 533000000 */
- return qt1010_set_params(fe, &params);
+ return qt1010_set_params(fe);
}
static int qt1010_release(struct dvb_frontend *fe)
@@ -416,10 +414,9 @@ static int qt1010_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static int qt1010_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+static int qt1010_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- struct qt1010_priv *priv = fe->tuner_priv;
- *bandwidth = priv->bandwidth;
+ *frequency = 36125000;
return 0;
}
@@ -437,7 +434,7 @@ static const struct dvb_tuner_ops qt1010_tuner_ops = {
.set_params = qt1010_set_params,
.get_frequency = qt1010_get_frequency,
- .get_bandwidth = qt1010_get_bandwidth
+ .get_if_frequency = qt1010_get_if_frequency,
};
struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/qt1010_priv.h b/drivers/media/common/tuners/qt1010_priv.h
index 090cf475f099..2c42d3f01636 100644
--- a/drivers/media/common/tuners/qt1010_priv.h
+++ b/drivers/media/common/tuners/qt1010_priv.h
@@ -99,7 +99,6 @@ struct qt1010_priv {
u8 reg25_init_val;
u32 frequency;
- u32 bandwidth;
};
#endif
diff --git a/drivers/media/common/tuners/tda18212.c b/drivers/media/common/tuners/tda18212.c
index e29cc2bc113a..602c2e392b17 100644
--- a/drivers/media/common/tuners/tda18212.c
+++ b/drivers/media/common/tuners/tda18212.c
@@ -25,6 +25,8 @@
struct tda18212_priv {
struct tda18212_config *cfg;
struct i2c_adapter *i2c;
+
+ u32 if_frequency;
};
#define dbg(fmt, arg...) \
@@ -128,20 +130,31 @@ static void tda18212_dump_regs(struct tda18212_priv *priv)
}
#endif
-static int tda18212_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int tda18212_set_params(struct dvb_frontend *fe)
{
struct tda18212_priv *priv = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u32 if_khz;
u8 buf[9];
+ #define DVBT_6 0
+ #define DVBT_7 1
+ #define DVBT_8 2
+ #define DVBT2_6 3
+ #define DVBT2_7 4
+ #define DVBT2_8 5
+ #define DVBC_6 6
+ #define DVBC_8 7
static const u8 bw_params[][3] = {
- /* 0f 13 23 */
- { 0xb3, 0x20, 0x03 }, /* DVB-T 6 MHz */
- { 0xb3, 0x31, 0x01 }, /* DVB-T 7 MHz */
- { 0xb3, 0x22, 0x01 }, /* DVB-T 8 MHz */
- { 0x92, 0x53, 0x03 }, /* DVB-C */
+ /* reg: 0f 13 23 */
+ [DVBT_6] = { 0xb3, 0x20, 0x03 },
+ [DVBT_7] = { 0xb3, 0x31, 0x01 },
+ [DVBT_8] = { 0xb3, 0x22, 0x01 },
+ [DVBT2_6] = { 0xbc, 0x20, 0x03 },
+ [DVBT2_7] = { 0xbc, 0x72, 0x03 },
+ [DVBT2_8] = { 0xbc, 0x22, 0x01 },
+ [DVBC_6] = { 0x92, 0x50, 0x03 },
+ [DVBC_8] = { 0x92, 0x53, 0x03 },
};
dbg("delsys=%d RF=%d BW=%d\n",
@@ -155,24 +168,44 @@ static int tda18212_set_params(struct dvb_frontend *fe,
switch (c->bandwidth_hz) {
case 6000000:
if_khz = priv->cfg->if_dvbt_6;
- i = 0;
+ i = DVBT_6;
break;
case 7000000:
if_khz = priv->cfg->if_dvbt_7;
- i = 1;
+ i = DVBT_7;
break;
case 8000000:
if_khz = priv->cfg->if_dvbt_8;
- i = 2;
+ i = DVBT_8;
break;
default:
ret = -EINVAL;
goto error;
}
break;
- case SYS_DVBC_ANNEX_AC:
+ case SYS_DVBT2:
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ if_khz = priv->cfg->if_dvbt2_6;
+ i = DVBT2_6;
+ break;
+ case 7000000:
+ if_khz = priv->cfg->if_dvbt2_7;
+ i = DVBT2_7;
+ break;
+ case 8000000:
+ if_khz = priv->cfg->if_dvbt2_8;
+ i = DVBT2_8;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
if_khz = priv->cfg->if_dvbc;
- i = 3;
+ i = DVBC_8;
break;
default:
ret = -EINVAL;
@@ -194,7 +227,7 @@ static int tda18212_set_params(struct dvb_frontend *fe,
buf[0] = 0x02;
buf[1] = bw_params[i][1];
buf[2] = 0x03; /* default value */
- buf[3] = if_khz / 50;
+ buf[3] = DIV_ROUND_CLOSEST(if_khz, 50);
buf[4] = ((c->frequency / 1000) >> 16) & 0xff;
buf[5] = ((c->frequency / 1000) >> 8) & 0xff;
buf[6] = ((c->frequency / 1000) >> 0) & 0xff;
@@ -204,6 +237,9 @@ static int tda18212_set_params(struct dvb_frontend *fe,
if (ret)
goto error;
+ /* actual IF rounded as it is on register */
+ priv->if_frequency = buf[3] * 50 * 1000;
+
exit:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
@@ -215,6 +251,15 @@ error:
goto exit;
}
+static int tda18212_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct tda18212_priv *priv = fe->tuner_priv;
+
+ *frequency = priv->if_frequency;
+
+ return 0;
+}
+
static int tda18212_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
@@ -234,6 +279,7 @@ static const struct dvb_tuner_ops tda18212_tuner_ops = {
.release = tda18212_release,
.set_params = tda18212_set_params,
+ .get_if_frequency = tda18212_get_if_frequency,
};
struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/tda18212.h b/drivers/media/common/tuners/tda18212.h
index 83b497f59e1b..9bd5da4aabb7 100644
--- a/drivers/media/common/tuners/tda18212.h
+++ b/drivers/media/common/tuners/tda18212.h
@@ -29,6 +29,10 @@ struct tda18212_config {
u16 if_dvbt_6;
u16 if_dvbt_7;
u16 if_dvbt_8;
+ u16 if_dvbt2_5;
+ u16 if_dvbt2_6;
+ u16 if_dvbt2_7;
+ u16 if_dvbt2_8;
u16 if_dvbc;
};
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
index aacfe2387e28..dfb3a831df45 100644
--- a/drivers/media/common/tuners/tda18218.c
+++ b/drivers/media/common/tuners/tda18218.c
@@ -109,10 +109,11 @@ static int tda18218_rd_reg(struct tda18218_priv *priv, u8 reg, u8 *val)
return tda18218_rd_regs(priv, reg, val, 1);
}
-static int tda18218_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int tda18218_set_params(struct dvb_frontend *fe)
{
struct tda18218_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 bw = c->bandwidth_hz;
int ret;
u8 buf[3], i, BP_Filter, LP_Fc;
u32 LO_Frac;
@@ -138,22 +139,19 @@ static int tda18218_set_params(struct dvb_frontend *fe,
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
/* low-pass filter cut-off frequency */
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ if (bw <= 6000000) {
LP_Fc = 0;
- LO_Frac = params->frequency + 4000000;
- break;
- case BANDWIDTH_7_MHZ:
+ priv->if_frequency = 3000000;
+ } else if (bw <= 7000000) {
LP_Fc = 1;
- LO_Frac = params->frequency + 3500000;
- break;
- case BANDWIDTH_8_MHZ:
- default:
+ priv->if_frequency = 3500000;
+ } else {
LP_Fc = 2;
- LO_Frac = params->frequency + 4000000;
- break;
+ priv->if_frequency = 4000000;
}
+ LO_Frac = c->frequency + priv->if_frequency;
+
/* band-pass filter */
if (LO_Frac < 188000000)
BP_Filter = 3;
@@ -206,6 +204,14 @@ error:
return ret;
}
+static int tda18218_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct tda18218_priv *priv = fe->tuner_priv;
+ *frequency = priv->if_frequency;
+ dbg("%s: if=%d", __func__, *frequency);
+ return 0;
+}
+
static int tda18218_sleep(struct dvb_frontend *fe)
{
struct tda18218_priv *priv = fe->tuner_priv;
@@ -268,6 +274,8 @@ static const struct dvb_tuner_ops tda18218_tuner_ops = {
.sleep = tda18218_sleep,
.set_params = tda18218_set_params,
+
+ .get_if_frequency = tda18218_get_if_frequency,
};
struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/tda18218_priv.h b/drivers/media/common/tuners/tda18218_priv.h
index 904e5365c78c..dc52b72e1407 100644
--- a/drivers/media/common/tuners/tda18218_priv.h
+++ b/drivers/media/common/tuners/tda18218_priv.h
@@ -100,6 +100,8 @@ struct tda18218_priv {
struct tda18218_config *cfg;
struct i2c_adapter *i2c;
+ u32 if_frequency;
+
u8 regs[TDA18218_NUM_REGS];
};
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index 63cc4004e211..2e67f4459904 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -928,59 +928,49 @@ fail:
/* ------------------------------------------------------------------ */
-static int tda18271_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int tda18271_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ u32 bw = c->bandwidth_hz;
+ u32 freq = c->frequency;
struct tda18271_priv *priv = fe->tuner_priv;
struct tda18271_std_map *std_map = &priv->std;
struct tda18271_std_map_item *map;
int ret;
- u32 bw, freq = params->frequency;
priv->mode = TDA18271_DIGITAL;
- if (fe->ops.info.type == FE_ATSC) {
- switch (params->u.vsb.modulation) {
- case VSB_8:
- case VSB_16:
- map = &std_map->atsc_6;
- break;
- case QAM_64:
- case QAM_256:
- map = &std_map->qam_6;
- break;
- default:
- tda_warn("modulation not set!\n");
- return -EINVAL;
- }
-#if 0
- /* userspace request is already center adjusted */
- freq += 1750000; /* Adjust to center (+1.75MHZ) */
-#endif
+ switch (delsys) {
+ case SYS_ATSC:
+ map = &std_map->atsc_6;
bw = 6000000;
- } else if (fe->ops.info.type == FE_OFDM) {
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
- bw = 6000000;
+ break;
+ case SYS_ISDBT:
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ if (bw <= 6000000) {
map = &std_map->dvbt_6;
- break;
- case BANDWIDTH_7_MHZ:
- bw = 7000000;
+ } else if (bw <= 7000000) {
map = &std_map->dvbt_7;
- break;
- case BANDWIDTH_8_MHZ:
- bw = 8000000;
+ } else {
map = &std_map->dvbt_8;
- break;
- default:
- tda_warn("bandwidth not set!\n");
- return -EINVAL;
}
- } else if (fe->ops.info.type == FE_QAM) {
- /* DVB-C */
- map = &std_map->qam_8;
- bw = 8000000;
- } else {
+ break;
+ case SYS_DVBC_ANNEX_B:
+ bw = 6000000;
+ /* falltrough */
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ if (bw <= 6000000) {
+ map = &std_map->qam_6;
+ } else if (bw <= 7000000) {
+ map = &std_map->qam_7;
+ } else {
+ map = &std_map->qam_8;
+ }
+ break;
+ default:
tda_warn("modulation type not supported!\n");
return -EINVAL;
}
@@ -994,9 +984,9 @@ static int tda18271_set_params(struct dvb_frontend *fe,
if (tda_fail(ret))
goto fail;
+ priv->if_freq = map->if_freq;
priv->frequency = freq;
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ?
- params->u.ofdm.bandwidth : 0;
+ priv->bandwidth = bw;
fail:
return ret;
}
@@ -1050,6 +1040,7 @@ static int tda18271_set_analog_params(struct dvb_frontend *fe,
if (tda_fail(ret))
goto fail;
+ priv->if_freq = map->if_freq;
priv->frequency = freq;
priv->bandwidth = 0;
fail:
@@ -1086,6 +1077,13 @@ static int tda18271_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return 0;
}
+static int tda18271_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct tda18271_priv *priv = fe->tuner_priv;
+ *frequency = (u32)priv->if_freq * 1000;
+ return 0;
+}
+
/* ------------------------------------------------------------------ */
#define tda18271_update_std(std_cfg, name) do { \
@@ -1245,6 +1243,7 @@ static const struct dvb_tuner_ops tda18271_tuner_ops = {
.set_config = tda18271_set_config,
.get_frequency = tda18271_get_frequency,
.get_bandwidth = tda18271_get_bandwidth,
+ .get_if_frequency = tda18271_get_if_frequency,
};
struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
diff --git a/drivers/media/common/tuners/tda18271-maps.c b/drivers/media/common/tuners/tda18271-maps.c
index 3d5b6ab7e332..fb881c667c94 100644
--- a/drivers/media/common/tuners/tda18271-maps.c
+++ b/drivers/media/common/tuners/tda18271-maps.c
@@ -1213,6 +1213,8 @@ static struct tda18271_std_map tda18271c1_std_map = {
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1e */
.qam_6 = { .if_freq = 4000, .fm_rfn = 0, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
+ .qam_7 = { .if_freq = 4500, .fm_rfn = 0, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1e */
.qam_8 = { .if_freq = 5000, .fm_rfn = 0, .agc_mode = 3, .std = 7,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1f */
};
@@ -1244,6 +1246,8 @@ static struct tda18271_std_map tda18271c2_std_map = {
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
.qam_6 = { .if_freq = 4000, .fm_rfn = 0, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1d */
+ .qam_7 = { .if_freq = 4500, .fm_rfn = 0, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1e */
.qam_8 = { .if_freq = 5000, .fm_rfn = 0, .agc_mode = 3, .std = 7,
.if_lvl = 1, .rfagc_top = 0x37, }, /* EP3[4:0] 0x1f */
};
diff --git a/drivers/media/common/tuners/tda18271-priv.h b/drivers/media/common/tuners/tda18271-priv.h
index 94340f47562b..454c152ccaa0 100644
--- a/drivers/media/common/tuners/tda18271-priv.h
+++ b/drivers/media/common/tuners/tda18271-priv.h
@@ -122,6 +122,8 @@ struct tda18271_priv {
struct mutex lock;
+ u16 if_freq;
+
u32 frequency;
u32 bandwidth;
};
diff --git a/drivers/media/common/tuners/tda18271.h b/drivers/media/common/tuners/tda18271.h
index 50cfa8cebb93..640bae4e6a5a 100644
--- a/drivers/media/common/tuners/tda18271.h
+++ b/drivers/media/common/tuners/tda18271.h
@@ -53,6 +53,7 @@ struct tda18271_std_map {
struct tda18271_std_map_item dvbt_7;
struct tda18271_std_map_item dvbt_8;
struct tda18271_std_map_item qam_6;
+ struct tda18271_std_map_item qam_7;
struct tda18271_std_map_item qam_8;
};
diff --git a/drivers/media/common/tuners/tda827x.c b/drivers/media/common/tuners/tda827x.c
index e0d5b43772b8..a0d176267470 100644
--- a/drivers/media/common/tuners/tda827x.c
+++ b/drivers/media/common/tuners/tda827x.c
@@ -152,9 +152,9 @@ static int tuner_transfer(struct dvb_frontend *fe,
return rc;
}
-static int tda827xo_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int tda827xo_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct tda827x_priv *priv = fe->tuner_priv;
u8 buf[14];
int rc;
@@ -165,18 +165,16 @@ static int tda827xo_set_params(struct dvb_frontend *fe,
u32 N;
dprintk("%s:\n", __func__);
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ if (c->bandwidth_hz == 0) {
+ if_freq = 5000000;
+ } else if (c->bandwidth_hz <= 6000000) {
if_freq = 4000000;
- break;
- case BANDWIDTH_7_MHZ:
+ } else if (c->bandwidth_hz <= 7000000) {
if_freq = 4500000;
- break;
- default: /* 8 MHz or Auto */
+ } else { /* 8 MHz */
if_freq = 5000000;
- break;
}
- tuner_freq = params->frequency;
+ tuner_freq = c->frequency;
i = 0;
while (tda827x_table[i].lomax < tuner_freq) {
@@ -220,8 +218,8 @@ static int tda827xo_set_params(struct dvb_frontend *fe,
if (rc < 0)
goto err;
- priv->frequency = params->frequency;
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
+ priv->frequency = c->frequency;
+ priv->bandwidth = c->bandwidth_hz;
return 0;
@@ -513,9 +511,9 @@ static void tda827xa_lna_gain(struct dvb_frontend *fe, int high,
}
}
-static int tda827xa_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int tda827xa_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct tda827x_priv *priv = fe->tuner_priv;
struct tda827xa_data *frequency_map = tda827xa_dvbt;
u8 buf[11];
@@ -531,22 +529,25 @@ static int tda827xa_set_params(struct dvb_frontend *fe,
tda827xa_lna_gain(fe, 1, NULL);
msleep(20);
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ if (c->bandwidth_hz == 0) {
+ if_freq = 5000000;
+ } else if (c->bandwidth_hz <= 6000000) {
if_freq = 4000000;
- break;
- case BANDWIDTH_7_MHZ:
+ } else if (c->bandwidth_hz <= 7000000) {
if_freq = 4500000;
- break;
- default: /* 8 MHz or Auto */
+ } else { /* 8 MHz */
if_freq = 5000000;
- break;
}
- tuner_freq = params->frequency;
+ tuner_freq = c->frequency;
- if (fe->ops.info.type == FE_QAM) {
+ switch (c->delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
dprintk("%s select tda827xa_dvbc\n", __func__);
frequency_map = tda827xa_dvbc;
+ break;
+ default:
+ break;
}
i = 0;
@@ -645,9 +646,8 @@ static int tda827xa_set_params(struct dvb_frontend *fe,
if (rc < 0)
goto err;
- priv->frequency = params->frequency;
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
-
+ priv->frequency = c->frequency;
+ priv->bandwidth = c->bandwidth_hz;
return 0;
diff --git a/drivers/media/common/tuners/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c
index f8ee29e6059c..39e7e583c8c0 100644
--- a/drivers/media/common/tuners/tuner-simple.c
+++ b/drivers/media/common/tuners/tuner-simple.c
@@ -751,6 +751,17 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
if (4 != rc)
tuner_warn("i2c i/o error: rc == %d (should be 4)\n", rc);
+ /* Write AUX byte */
+ switch (priv->type) {
+ case TUNER_PHILIPS_FM1216ME_MK3:
+ buffer[2] = 0x98;
+ buffer[3] = 0x20; /* set TOP AGC */
+ rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4);
+ if (4 != rc)
+ tuner_warn("i2c i/o error: rc == %d (should be 4)\n", rc);
+ break;
+ }
+
return 0;
}
@@ -780,24 +791,26 @@ static int simple_set_params(struct dvb_frontend *fe,
}
static void simple_set_dvb(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+ const u32 delsys,
+ const u32 frequency,
+ const u32 bandwidth)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
switch (priv->type) {
case TUNER_PHILIPS_FMD1216ME_MK3:
case TUNER_PHILIPS_FMD1216MEX_MK3:
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ &&
- params->frequency >= 158870000)
+ if (bandwidth == 8000000 &&
+ frequency >= 158870000)
buf[3] |= 0x08;
break;
case TUNER_PHILIPS_TD1316:
/* determine band */
- buf[3] |= (params->frequency < 161000000) ? 1 :
- (params->frequency < 444000000) ? 2 : 4;
+ buf[3] |= (frequency < 161000000) ? 1 :
+ (frequency < 444000000) ? 2 : 4;
/* setup PLL filter */
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
+ if (bandwidth == 8000000)
buf[3] |= 1 << 3;
break;
case TUNER_PHILIPS_TUV1236D:
@@ -808,12 +821,11 @@ static void simple_set_dvb(struct dvb_frontend *fe, u8 *buf,
if (dtv_input[priv->nr])
new_rf = dtv_input[priv->nr];
else
- switch (params->u.vsb.modulation) {
- case QAM_64:
- case QAM_256:
+ switch (delsys) {
+ case SYS_DVBC_ANNEX_B:
new_rf = 1;
break;
- case VSB_8:
+ case SYS_ATSC:
default:
new_rf = 0;
break;
@@ -827,7 +839,9 @@ static void simple_set_dvb(struct dvb_frontend *fe, u8 *buf,
}
static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+ const u32 delsys,
+ const u32 freq,
+ const u32 bw)
{
/* This function returns the tuned frequency on success, 0 on error */
struct tuner_simple_priv *priv = fe->tuner_priv;
@@ -836,7 +850,7 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
u8 config, cb;
u32 div;
int ret;
- unsigned frequency = params->frequency / 62500;
+ u32 frequency = freq / 62500;
if (!tun->stepsize) {
/* tuner-core was loaded before the digital tuner was
@@ -860,7 +874,7 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
buf[2] = config;
buf[3] = cb;
- simple_set_dvb(fe, buf, params);
+ simple_set_dvb(fe, buf, delsys, freq, bw);
tuner_dbg("%s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
tun->name, div, buf[0], buf[1], buf[2], buf[3]);
@@ -870,32 +884,37 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
}
static int simple_dvb_calc_regs(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
u8 *buf, int buf_len)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ u32 bw = c->bandwidth_hz;
struct tuner_simple_priv *priv = fe->tuner_priv;
u32 frequency;
if (buf_len < 5)
return -EINVAL;
- frequency = simple_dvb_configure(fe, buf+1, params);
+ frequency = simple_dvb_configure(fe, buf+1, delsys, c->frequency, bw);
if (frequency == 0)
return -EINVAL;
buf[0] = priv->i2c_props.addr;
priv->frequency = frequency;
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ?
- params->u.ofdm.bandwidth : 0;
+ priv->bandwidth = c->bandwidth_hz;
return 5;
}
-static int simple_dvb_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int simple_dvb_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ u32 bw = c->bandwidth_hz;
+ u32 freq = c->frequency;
struct tuner_simple_priv *priv = fe->tuner_priv;
+ u32 frequency;
u32 prev_freq, prev_bw;
int ret;
u8 buf[5];
@@ -906,9 +925,14 @@ static int simple_dvb_set_params(struct dvb_frontend *fe,
prev_freq = priv->frequency;
prev_bw = priv->bandwidth;
- ret = simple_dvb_calc_regs(fe, params, buf, 5);
- if (ret != 5)
- goto fail;
+ frequency = simple_dvb_configure(fe, buf+1, delsys, freq, bw);
+ if (frequency == 0)
+ return -EINVAL;
+
+ buf[0] = priv->i2c_props.addr;
+
+ priv->frequency = frequency;
+ priv->bandwidth = bw;
/* put analog demod in standby when tuning digital */
if (fe->ops.analog_ops.standby)
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 3acbaa04e1b3..b5ee3ebfcfca 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,6 +24,21 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+/* Registers (Write-only) */
+#define XREG_INIT 0x00
+#define XREG_RF_FREQ 0x02
+#define XREG_POWER_DOWN 0x08
+
+/* Registers (Read-only) */
+#define XREG_FREQ_ERROR 0x01
+#define XREG_LOCK 0x02
+#define XREG_VERSION 0x04
+#define XREG_PRODUCT_ID 0x08
+#define XREG_HSYNC_FREQ 0x10
+#define XREG_FRAME_LINES 0x20
+#define XREG_SNR 0x40
+
+#define XREG_ADC_ENV 0x0100
static int debug;
module_param(debug, int, 0644);
@@ -311,7 +326,7 @@ static int load_all_firmwares(struct dvb_frontend *fe)
n_array, fname, name,
priv->firm_version >> 8, priv->firm_version & 0xff);
- priv->firm = kzalloc(sizeof(*priv->firm) * n_array, GFP_KERNEL);
+ priv->firm = kcalloc(n_array, sizeof(*priv->firm), GFP_KERNEL);
if (priv->firm == NULL) {
tuner_err("Not enough memory to load firmware file.\n");
rc = -ENOMEM;
@@ -885,16 +900,16 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
- rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
+ rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
if (rc < 0)
goto ret;
/* Frequency is locked */
if (frq_lock == 1)
- signal = 32768;
+ signal = 1 << 11;
/* Get SNR of the video signal */
- rc = xc2028_get_reg(priv, 0x0040, &signal);
+ rc = xc2028_get_reg(priv, XREG_SNR, &signal);
if (rc < 0)
goto ret;
@@ -962,14 +977,24 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
* For DTV 7/8, the firmware uses BW = 8000, so it needs a
* further adjustment to get the frequency center on VHF
*/
+
+ /*
+ * The firmware DTV78 used to work fine in UHF band (8 MHz
+ * bandwidth) but not at all in VHF band (7 MHz bandwidth).
+ * The real problem was connected to the formula used to
+ * calculate the center frequency offset in VHF band.
+ * In fact, removing the 500KHz adjustment fixed the problem.
+ * This is coherent to what was implemented for the DTV7
+ * firmware.
+ * In the end, now the center frequency is the same for all 3
+ * firmwares (DTV7, DTV8, DTV78) and doesn't depend on channel
+ * bandwidth.
+ */
+
if (priv->cur_fw.type & DTV6)
offset = 1750000;
- else if (priv->cur_fw.type & DTV7)
- offset = 2250000;
- else /* DTV8 or DTV78 */
+ else /* DTV7 or DTV8 or DTV78 */
offset = 2750000;
- if ((priv->cur_fw.type & DTV78) && freq < 470000000)
- offset -= 500000;
/*
* xc3028 additional "magic"
@@ -979,17 +1004,13 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
* newer firmwares
*/
-#if 1
/*
* The proper adjustment would be to do it at s-code table.
* However, this didn't work, as reported by
* Robert Lowery <rglowery@exemail.com.au>
*/
- if (priv->cur_fw.type & DTV7)
- offset += 500000;
-
-#else
+#if 0
/*
* Still need tests for XC3028L (firmware 3.2 or upper)
* So, for now, let's just comment the per-firmware
@@ -1013,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
/* CMD= Set frequency */
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
if (rc < 0)
goto ret;
@@ -1084,68 +1105,28 @@ static int xc2028_set_analog_freq(struct dvb_frontend *fe,
V4L2_TUNER_ANALOG_TV, type, p->std, 0);
}
-static int xc2028_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int xc2028_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ u32 bw = c->bandwidth_hz;
struct xc2028_data *priv = fe->tuner_priv;
unsigned int type=0;
- fe_bandwidth_t bw = BANDWIDTH_8_MHZ;
u16 demod = 0;
tuner_dbg("%s called\n", __func__);
- switch(fe->ops.info.type) {
- case FE_OFDM:
- bw = p->u.ofdm.bandwidth;
+ switch (delsys) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
/*
* The only countries with 6MHz seem to be Taiwan/Uruguay.
* Both seem to require QAM firmware for OFDM decoding
* Tested in Taiwan by Terry Wu <terrywu2009@gmail.com>
*/
- if (bw == BANDWIDTH_6_MHZ)
+ if (bw <= 6000000)
type |= QAM;
- break;
- case FE_ATSC:
- bw = BANDWIDTH_6_MHZ;
- /* The only ATSC firmware (at least on v2.7) is D2633 */
- type |= ATSC | D2633;
- break;
- /* DVB-S and pure QAM (FE_QAM) are not supported */
- default:
- return -EINVAL;
- }
-
- switch (bw) {
- case BANDWIDTH_8_MHZ:
- if (p->frequency < 470000000)
- priv->ctrl.vhfbw7 = 0;
- else
- priv->ctrl.uhfbw8 = 1;
- type |= (priv->ctrl.vhfbw7 && priv->ctrl.uhfbw8) ? DTV78 : DTV8;
- type |= F8MHZ;
- break;
- case BANDWIDTH_7_MHZ:
- if (p->frequency < 470000000)
- priv->ctrl.vhfbw7 = 1;
- else
- priv->ctrl.uhfbw8 = 0;
- type |= (priv->ctrl.vhfbw7 && priv->ctrl.uhfbw8) ? DTV78 : DTV7;
- type |= F8MHZ;
- break;
- case BANDWIDTH_6_MHZ:
- type |= DTV6;
- priv->ctrl.vhfbw7 = 0;
- priv->ctrl.uhfbw8 = 0;
- break;
- default:
- tuner_err("error: bandwidth not supported.\n");
- };
- /*
- Selects between D2633 or D2620 firmware.
- It doesn't make sense for ATSC, since it should be D2633 on all cases
- */
- if (fe->ops.info.type != FE_ATSC) {
switch (priv->ctrl.type) {
case XC2028_D2633:
type |= D2633;
@@ -1161,6 +1142,34 @@ static int xc2028_set_params(struct dvb_frontend *fe,
else
type |= D2620;
}
+ break;
+ case SYS_ATSC:
+ /* The only ATSC firmware (at least on v2.7) is D2633 */
+ type |= ATSC | D2633;
+ break;
+ /* DVB-S and pure QAM (FE_QAM) are not supported */
+ default:
+ return -EINVAL;
+ }
+
+ if (bw <= 6000000) {
+ type |= DTV6;
+ priv->ctrl.vhfbw7 = 0;
+ priv->ctrl.uhfbw8 = 0;
+ } else if (bw <= 7000000) {
+ if (c->frequency < 470000000)
+ priv->ctrl.vhfbw7 = 1;
+ else
+ priv->ctrl.uhfbw8 = 0;
+ type |= (priv->ctrl.vhfbw7 && priv->ctrl.uhfbw8) ? DTV78 : DTV7;
+ type |= F8MHZ;
+ } else {
+ if (c->frequency < 470000000)
+ priv->ctrl.vhfbw7 = 0;
+ else
+ priv->ctrl.uhfbw8 = 1;
+ type |= (priv->ctrl.vhfbw7 && priv->ctrl.uhfbw8) ? DTV78 : DTV8;
+ type |= F8MHZ;
}
/* All S-code tables need a 200kHz shift */
@@ -1185,7 +1194,7 @@ static int xc2028_set_params(struct dvb_frontend *fe,
*/
}
- return generic_set_freq(fe, p->frequency,
+ return generic_set_freq(fe, c->frequency,
V4L2_TUNER_DIGITAL_TV, type, 0, demod);
}
@@ -1207,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
mutex_lock(&priv->lock);
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
priv->cur_fw.type = 0; /* need firmware reload */
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
index 634f4d9b6c63..68397110b7d9 100644
--- a/drivers/media/common/tuners/xc4000.c
+++ b/drivers/media/common/tuners/xc4000.c
@@ -154,6 +154,8 @@ struct xc4000_priv {
#define XREG_SNR 0x06
#define XREG_VERSION 0x07
#define XREG_PRODUCT_ID 0x08
+#define XREG_SIGNAL_LEVEL 0x0A
+#define XREG_NOISE_LEVEL 0x0B
/*
Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
return xc4000_readreg(priv, XREG_QUALITY, quality);
}
+static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
+{
+ return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
+}
+
+static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
+{
+ return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
+}
+
static u16 xc_wait_for_lock(struct xc4000_priv *priv)
{
u16 lock_state = 0;
@@ -758,7 +770,7 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
n_array, fname, name,
priv->firm_version >> 8, priv->firm_version & 0xff);
- priv->firm = kzalloc(sizeof(*priv->firm) * n_array, GFP_KERNEL);
+ priv->firm = kcalloc(n_array, sizeof(*priv->firm), GFP_KERNEL);
if (priv->firm == NULL) {
printk(KERN_ERR "Not enough memory to load firmware file.\n");
rc = -ENOMEM;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
u32 hsync_freq_hz = 0;
u16 frame_lines;
u16 quality;
+ u16 signal = 0;
+ u16 noise = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
@@ -1119,85 +1133,70 @@ static void xc_debug_dump(struct xc4000_priv *priv)
xc_get_quality(priv, &quality);
dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+
+ xc_get_signal_level(priv, &signal);
+ dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
+
+ xc_get_noise_level(priv, &noise);
+ dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
}
-static int xc4000_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int xc4000_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ u32 bw = c->bandwidth_hz;
struct xc4000_priv *priv = fe->tuner_priv;
unsigned int type;
int ret = -EREMOTEIO;
- dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
+ dprintk(1, "%s() frequency=%d (Hz)\n", __func__, c->frequency);
mutex_lock(&priv->lock);
- if (fe->ops.info.type == FE_ATSC) {
- dprintk(1, "%s() ATSC\n", __func__);
- switch (params->u.vsb.modulation) {
- case VSB_8:
- case VSB_16:
- dprintk(1, "%s() VSB modulation\n", __func__);
- priv->rf_mode = XC_RF_MODE_AIR;
- priv->freq_hz = params->frequency - 1750000;
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = XC4000_DTV6;
- type = DTV6;
- break;
- case QAM_64:
- case QAM_256:
- case QAM_AUTO:
- dprintk(1, "%s() QAM modulation\n", __func__);
- priv->rf_mode = XC_RF_MODE_CABLE;
- priv->freq_hz = params->frequency - 1750000;
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = XC4000_DTV6;
- type = DTV6;
- break;
- default:
- ret = -EINVAL;
- goto fail;
- }
- } else if (fe->ops.info.type == FE_OFDM) {
+ switch (delsys) {
+ case SYS_ATSC:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+ priv->freq_hz = c->frequency - 1750000;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ priv->freq_hz = c->frequency - 1750000;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ case SYS_DVBT:
+ case SYS_DVBT2:
dprintk(1, "%s() OFDM\n", __func__);
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
- priv->bandwidth = BANDWIDTH_6_MHZ;
+ if (bw == 0) {
+ if (c->frequency < 400000000) {
+ priv->freq_hz = c->frequency - 2250000;
+ } else {
+ priv->freq_hz = c->frequency - 2750000;
+ }
+ priv->video_standard = XC4000_DTV7_8;
+ type = DTV78;
+ } else if (bw <= 6000000) {
priv->video_standard = XC4000_DTV6;
- priv->freq_hz = params->frequency - 1750000;
+ priv->freq_hz = c->frequency - 1750000;
type = DTV6;
- break;
- case BANDWIDTH_7_MHZ:
- priv->bandwidth = BANDWIDTH_7_MHZ;
+ } else if (bw <= 7000000) {
priv->video_standard = XC4000_DTV7;
- priv->freq_hz = params->frequency - 2250000;
+ priv->freq_hz = c->frequency - 2250000;
type = DTV7;
- break;
- case BANDWIDTH_8_MHZ:
- priv->bandwidth = BANDWIDTH_8_MHZ;
+ } else {
priv->video_standard = XC4000_DTV8;
- priv->freq_hz = params->frequency - 2750000;
+ priv->freq_hz = c->frequency - 2750000;
type = DTV8;
- break;
- case BANDWIDTH_AUTO:
- if (params->frequency < 400000000) {
- priv->bandwidth = BANDWIDTH_7_MHZ;
- priv->freq_hz = params->frequency - 2250000;
- } else {
- priv->bandwidth = BANDWIDTH_8_MHZ;
- priv->freq_hz = params->frequency - 2750000;
- }
- priv->video_standard = XC4000_DTV7_8;
- type = DTV78;
- break;
- default:
- printk(KERN_ERR "xc4000 bandwidth not set!\n");
- ret = -EINVAL;
- goto fail;
}
priv->rf_mode = XC_RF_MODE_AIR;
- } else {
- printk(KERN_ERR "xc4000 modulation type not supported!\n");
+ break;
+ default:
+ printk(KERN_ERR "xc4000 delivery system not supported!\n");
ret = -EINVAL;
goto fail;
}
@@ -1209,6 +1208,8 @@ static int xc4000_set_params(struct dvb_frontend *fe,
if (check_firmware(fe, type, 0, priv->if_khz) != 0)
goto fail;
+ priv->bandwidth = c->bandwidth_hz;
+
ret = xc_set_signal_source(priv, priv->rf_mode);
if (ret != 0) {
printk(KERN_ERR "xc4000: xc_set_signal_source(%d) failed\n",
@@ -1451,6 +1452,71 @@ fail:
return ret;
}
+static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ u16 value = 0;
+ int rc;
+
+ mutex_lock(&priv->lock);
+ rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
+ mutex_unlock(&priv->lock);
+
+ if (rc < 0)
+ goto ret;
+
+ /* Informations from real testing of DVB-T and radio part,
+ coeficient for one dB is 0xff.
+ */
+ tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
+
+ /* all known digital modes */
+ if ((priv->video_standard == XC4000_DTV6) ||
+ (priv->video_standard == XC4000_DTV7) ||
+ (priv->video_standard == XC4000_DTV7_8) ||
+ (priv->video_standard == XC4000_DTV8))
+ goto digital;
+
+ /* Analog mode has NOISE LEVEL important, signal
+ depends only on gain of antenna and amplifiers,
+ but it doesn't tell anything about real quality
+ of reception.
+ */
+ mutex_lock(&priv->lock);
+ rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
+ mutex_unlock(&priv->lock);
+
+ tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
+
+ /* highest noise level: 32dB */
+ if (value >= 0x2000) {
+ value = 0;
+ } else {
+ value = ~value << 3;
+ }
+
+ goto ret;
+
+ /* Digital mode has SIGNAL LEVEL important and real
+ noise level is stored in demodulator registers.
+ */
+digital:
+ /* best signal: -50dB */
+ if (value <= 0x3200) {
+ value = 0xffff;
+ /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
+ } else if (value >= 0x713A) {
+ value = 0;
+ } else {
+ value = ~(value - 0x3200) << 2;
+ }
+
+ret:
+ *strength = value;
+
+ return rc;
+}
+
static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
@@ -1578,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
.set_params = xc4000_set_params,
.set_analog_params = xc4000_set_analog_params,
.get_frequency = xc4000_get_frequency,
+ .get_rf_strength = xc4000_get_signal,
.get_bandwidth = xc4000_get_bandwidth,
.get_status = xc4000_get_status
};
@@ -1605,7 +1672,7 @@ struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
break;
case 1:
/* new tuner instance */
- priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->bandwidth = 6000000;
/* set default configuration */
priv->if_khz = 4560;
priv->default_pm = 0;
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index aa1b2e844d32..296df05b8cda 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -628,20 +628,13 @@ static void xc_debug_dump(struct xc5000_priv *priv)
dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
}
-/*
- * As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
- * So, the amount of the needed bandwith is given by:
- * Bw = Symbol_rate * (1 + 0.15)
- * As such, the maximum symbol rate supported by 6 MHz is given by:
- * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
- */
-#define MAX_SYMBOL_RATE_6MHz 5217391
-
-static int xc5000_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int xc5000_set_params(struct dvb_frontend *fe)
{
+ int ret, b;
struct xc5000_priv *priv = fe->tuner_priv;
- int ret;
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+ u32 freq = fe->dtv_property_cache.frequency;
+ u32 delsys = fe->dtv_property_cache.delivery_system;
if (xc5000_is_firmware_loaded(fe) != XC_RESULT_SUCCESS) {
if (xc_load_fw_and_init_tuner(fe) != XC_RESULT_SUCCESS) {
@@ -650,88 +643,69 @@ static int xc5000_set_params(struct dvb_frontend *fe,
}
}
- dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
-
- if (fe->ops.info.type == FE_ATSC) {
- dprintk(1, "%s() ATSC\n", __func__);
- switch (params->u.vsb.modulation) {
- case VSB_8:
- case VSB_16:
- dprintk(1, "%s() VSB modulation\n", __func__);
- priv->rf_mode = XC_RF_MODE_AIR;
- priv->freq_hz = params->frequency - 1750000;
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = DTV6;
- break;
- case QAM_64:
- case QAM_256:
- case QAM_AUTO:
- dprintk(1, "%s() QAM modulation\n", __func__);
- priv->rf_mode = XC_RF_MODE_CABLE;
- priv->freq_hz = params->frequency - 1750000;
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = DTV6;
- break;
- default:
- return -EINVAL;
- }
- } else if (fe->ops.info.type == FE_OFDM) {
+ dprintk(1, "%s() frequency=%d (Hz)\n", __func__, freq);
+
+ switch (delsys) {
+ case SYS_ATSC:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+ priv->freq_hz = freq - 1750000;
+ priv->video_standard = DTV6;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ priv->freq_hz = freq - 1750000;
+ priv->video_standard = DTV6;
+ break;
+ case SYS_DVBT:
+ case SYS_DVBT2:
dprintk(1, "%s() OFDM\n", __func__);
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
- priv->bandwidth = BANDWIDTH_6_MHZ;
+ switch (bw) {
+ case 6000000:
priv->video_standard = DTV6;
- priv->freq_hz = params->frequency - 1750000;
+ priv->freq_hz = freq - 1750000;
break;
- case BANDWIDTH_7_MHZ:
- printk(KERN_ERR "xc5000 bandwidth 7MHz not supported\n");
- return -EINVAL;
- case BANDWIDTH_8_MHZ:
- priv->bandwidth = BANDWIDTH_8_MHZ;
+ case 7000000:
+ priv->video_standard = DTV7;
+ priv->freq_hz = freq - 2250000;
+ break;
+ case 8000000:
priv->video_standard = DTV8;
- priv->freq_hz = params->frequency - 2750000;
+ priv->freq_hz = freq - 2750000;
break;
default:
printk(KERN_ERR "xc5000 bandwidth not set!\n");
return -EINVAL;
}
priv->rf_mode = XC_RF_MODE_AIR;
- } else if (fe->ops.info.type == FE_QAM) {
- switch (params->u.qam.modulation) {
- case QAM_256:
- case QAM_AUTO:
- case QAM_16:
- case QAM_32:
- case QAM_64:
- case QAM_128:
- dprintk(1, "%s() QAM modulation\n", __func__);
- priv->rf_mode = XC_RF_MODE_CABLE;
- /*
- * Using a 8MHz bandwidth sometimes fail
- * with 6MHz-spaced channels, due to inter-carrier
- * interference. So, use DTV6 firmware
- */
- if (params->u.qam.symbol_rate <= MAX_SYMBOL_RATE_6MHz) {
- priv->bandwidth = BANDWIDTH_6_MHZ;
- priv->video_standard = DTV6;
- priv->freq_hz = params->frequency - 1750000;
- } else {
- priv->bandwidth = BANDWIDTH_8_MHZ;
- priv->video_standard = DTV7_8;
- priv->freq_hz = params->frequency - 2750000;
- }
- break;
- default:
- dprintk(1, "%s() Unsupported QAM type\n", __func__);
- return -EINVAL;
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ if (bw <= 6000000) {
+ priv->video_standard = DTV6;
+ priv->freq_hz = freq - 1750000;
+ b = 6;
+ } else if (bw <= 7000000) {
+ priv->video_standard = DTV7;
+ priv->freq_hz = freq - 2250000;
+ b = 7;
+ } else {
+ priv->video_standard = DTV7_8;
+ priv->freq_hz = freq - 2750000;
+ b = 8;
}
- } else {
- printk(KERN_ERR "xc5000 modulation type not supported!\n");
+ dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
+ b, bw);
+ break;
+ default:
+ printk(KERN_ERR "xc5000: delivery system is not supported!\n");
return -EINVAL;
}
- dprintk(1, "%s() frequency=%d (compensated)\n",
- __func__, priv->freq_hz);
+ dprintk(1, "%s() frequency=%d (compensated to %d)\n",
+ __func__, freq, priv->freq_hz);
ret = xc_SetSignalSource(priv, priv->rf_mode);
if (ret != XC_RESULT_SUCCESS) {
@@ -763,6 +737,8 @@ static int xc5000_set_params(struct dvb_frontend *fe,
if (debug)
xc_debug_dump(priv);
+ priv->bandwidth = bw;
+
return 0;
}
@@ -968,6 +944,14 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
return 0;
}
+static int xc5000_get_if_frequency(struct dvb_frontend *fe, u32 *freq)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ dprintk(1, "%s()\n", __func__);
+ *freq = priv->if_khz * 1000;
+ return 0;
+}
+
static int xc5000_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -1108,6 +1092,7 @@ static const struct dvb_tuner_ops xc5000_tuner_ops = {
.set_params = xc5000_set_params,
.set_analog_params = xc5000_set_analog_params,
.get_frequency = xc5000_get_frequency,
+ .get_if_frequency = xc5000_get_if_frequency,
.get_bandwidth = xc5000_get_bandwidth,
.get_status = xc5000_get_status
};
@@ -1135,7 +1120,7 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
break;
case 1:
/* new tuner instance */
- priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->bandwidth = 6000000;
fe->tuner_priv = priv;
break;
default:
diff --git a/drivers/media/dvb/b2c2/flexcop.c b/drivers/media/dvb/b2c2/flexcop.c
index 2df1b0214dcd..b1e8c99f469b 100644
--- a/drivers/media/dvb/b2c2/flexcop.c
+++ b/drivers/media/dvb/b2c2/flexcop.c
@@ -86,7 +86,8 @@ static int flexcop_dvb_init(struct flexcop_device *fc)
fc->demux.stop_feed = flexcop_dvb_stop_feed;
fc->demux.write_to_decoder = NULL;
- if ((ret = dvb_dmx_init(&fc->demux)) < 0) {
+ ret = dvb_dmx_init(&fc->demux);
+ if (ret < 0) {
err("dvb_dmx failed: error %d", ret);
goto err_dmx;
}
@@ -96,32 +97,42 @@ static int flexcop_dvb_init(struct flexcop_device *fc)
fc->dmxdev.filternum = fc->demux.feednum;
fc->dmxdev.demux = &fc->demux.dmx;
fc->dmxdev.capabilities = 0;
- if ((ret = dvb_dmxdev_init(&fc->dmxdev, &fc->dvb_adapter)) < 0) {
+ ret = dvb_dmxdev_init(&fc->dmxdev, &fc->dvb_adapter);
+ if (ret < 0) {
err("dvb_dmxdev_init failed: error %d", ret);
goto err_dmx_dev;
}
- if ((ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->hw_frontend)) < 0) {
+ ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->hw_frontend);
+ if (ret < 0) {
err("adding hw_frontend to dmx failed: error %d", ret);
goto err_dmx_add_hw_frontend;
}
fc->mem_frontend.source = DMX_MEMORY_FE;
- if ((ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->mem_frontend)) < 0) {
+ ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->mem_frontend);
+ if (ret < 0) {
err("adding mem_frontend to dmx failed: error %d", ret);
goto err_dmx_add_mem_frontend;
}
- if ((ret = fc->demux.dmx.connect_frontend(&fc->demux.dmx, &fc->hw_frontend)) < 0) {
+ ret = fc->demux.dmx.connect_frontend(&fc->demux.dmx, &fc->hw_frontend);
+ if (ret < 0) {
err("connect frontend failed: error %d", ret);
goto err_connect_frontend;
}
- dvb_net_init(&fc->dvb_adapter, &fc->dvbnet, &fc->demux.dmx);
+ ret = dvb_net_init(&fc->dvb_adapter, &fc->dvbnet, &fc->demux.dmx);
+ if (ret < 0) {
+ err("dvb_net_init failed: error %d", ret);
+ goto err_net;
+ }
fc->init_state |= FC_STATE_DVB_INIT;
return 0;
+err_net:
+ fc->demux.dmx.disconnect_frontend(&fc->demux.dmx);
err_connect_frontend:
fc->demux.dmx.remove_frontend(&fc->demux.dmx, &fc->mem_frontend);
err_dmx_add_mem_frontend:
@@ -254,7 +265,8 @@ int flexcop_device_initialize(struct flexcop_device *fc)
flexcop_hw_filter_init(fc);
flexcop_smc_ctrl(fc, 0);
- if ((ret = flexcop_dvb_init(fc)))
+ ret = flexcop_dvb_init(fc);
+ if (ret)
goto error;
/* i2c has to be done before doing EEProm stuff -
@@ -272,7 +284,8 @@ int flexcop_device_initialize(struct flexcop_device *fc)
} else
warn("reading of MAC address failed.\n");
- if ((ret = flexcop_frontend_init(fc)))
+ ret = flexcop_frontend_init(fc);
+ if (ret)
goto error;
flexcop_device_name(fc,"initialization of","complete");
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index caa4e18ed1c1..430b3eb11815 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -386,7 +386,7 @@ static int dst_set_freq(struct dst_state *state, u32 freq)
return 0;
}
-static int dst_set_bandwidth(struct dst_state *state, fe_bandwidth_t bandwidth)
+static int dst_set_bandwidth(struct dst_state *state, u32 bandwidth)
{
state->bandwidth = bandwidth;
@@ -394,7 +394,7 @@ static int dst_set_bandwidth(struct dst_state *state, fe_bandwidth_t bandwidth)
return -EOPNOTSUPP;
switch (bandwidth) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
if (state->dst_hw_cap & DST_TYPE_HAS_CA)
state->tx_tuna[7] = 0x06;
else {
@@ -402,7 +402,7 @@ static int dst_set_bandwidth(struct dst_state *state, fe_bandwidth_t bandwidth)
state->tx_tuna[7] = 0x00;
}
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
if (state->dst_hw_cap & DST_TYPE_HAS_CA)
state->tx_tuna[7] = 0x07;
else {
@@ -410,7 +410,7 @@ static int dst_set_bandwidth(struct dst_state *state, fe_bandwidth_t bandwidth)
state->tx_tuna[7] = 0x00;
}
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
if (state->dst_hw_cap & DST_TYPE_HAS_CA)
state->tx_tuna[7] = 0x08;
else {
@@ -1561,7 +1561,7 @@ static int dst_init(struct dvb_frontend *fe)
state->tone = SEC_TONE_OFF;
state->diseq_flags = 0;
state->k22 = 0x02;
- state->bandwidth = BANDWIDTH_7_MHZ;
+ state->bandwidth = 7000000;
state->cur_jiff = jiffies;
if (state->dst_type == DST_TYPE_IS_SAT)
memcpy(state->tx_tuna, ((state->type_flags & DST_TYPE_HAS_VLF) ? sat_tuna_188 : sat_tuna_204), sizeof (sat_tuna_204));
@@ -1609,8 +1609,9 @@ static int dst_read_snr(struct dvb_frontend *fe, u16 *snr)
return retval;
}
-static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int dst_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
int retval = -EINVAL;
struct dst_state *state = fe->demodulator_priv;
@@ -1623,17 +1624,17 @@ static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_paramet
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
dst_set_inversion(state, p->inversion);
- dst_set_fec(state, p->u.qpsk.fec_inner);
- dst_set_symbolrate(state, p->u.qpsk.symbol_rate);
+ dst_set_fec(state, p->fec_inner);
+ dst_set_symbolrate(state, p->symbol_rate);
dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->u.qpsk.symbol_rate);
+ dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
} else if (state->dst_type == DST_TYPE_IS_TERR)
- dst_set_bandwidth(state, p->u.ofdm.bandwidth);
+ dst_set_bandwidth(state, p->bandwidth_hz);
else if (state->dst_type == DST_TYPE_IS_CABLE) {
- dst_set_fec(state, p->u.qam.fec_inner);
- dst_set_symbolrate(state, p->u.qam.symbol_rate);
- dst_set_modulation(state, p->u.qam.modulation);
+ dst_set_fec(state, p->fec_inner);
+ dst_set_symbolrate(state, p->symbol_rate);
+ dst_set_modulation(state, p->modulation);
}
retval = dst_write_tuna(fe);
}
@@ -1642,31 +1643,32 @@ static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_paramet
}
static int dst_tune_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters* p,
+ bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
{
struct dst_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- if (p != NULL) {
+ if (re_tune) {
dst_set_freq(state, p->frequency);
dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
dst_set_inversion(state, p->inversion);
- dst_set_fec(state, p->u.qpsk.fec_inner);
- dst_set_symbolrate(state, p->u.qpsk.symbol_rate);
+ dst_set_fec(state, p->fec_inner);
+ dst_set_symbolrate(state, p->symbol_rate);
dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->u.qpsk.symbol_rate);
+ dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
} else if (state->dst_type == DST_TYPE_IS_TERR)
- dst_set_bandwidth(state, p->u.ofdm.bandwidth);
+ dst_set_bandwidth(state, p->bandwidth_hz);
else if (state->dst_type == DST_TYPE_IS_CABLE) {
- dst_set_fec(state, p->u.qam.fec_inner);
- dst_set_symbolrate(state, p->u.qam.symbol_rate);
- dst_set_modulation(state, p->u.qam.modulation);
+ dst_set_fec(state, p->fec_inner);
+ dst_set_symbolrate(state, p->symbol_rate);
+ dst_set_modulation(state, p->modulation);
}
dst_write_tuna(fe);
}
@@ -1683,22 +1685,23 @@ static int dst_get_tuning_algo(struct dvb_frontend *fe)
return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
}
-static int dst_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int dst_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct dst_state *state = fe->demodulator_priv;
p->frequency = state->decode_freq;
if (state->dst_type == DST_TYPE_IS_SAT) {
if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
p->inversion = state->inversion;
- p->u.qpsk.symbol_rate = state->symbol_rate;
- p->u.qpsk.fec_inner = dst_get_fec(state);
+ p->symbol_rate = state->symbol_rate;
+ p->fec_inner = dst_get_fec(state);
} else if (state->dst_type == DST_TYPE_IS_TERR) {
- p->u.ofdm.bandwidth = state->bandwidth;
+ p->bandwidth_hz = state->bandwidth;
} else if (state->dst_type == DST_TYPE_IS_CABLE) {
- p->u.qam.symbol_rate = state->symbol_rate;
- p->u.qam.fec_inner = dst_get_fec(state);
- p->u.qam.modulation = dst_get_modulation(state);
+ p->symbol_rate = state->symbol_rate;
+ p->fec_inner = dst_get_fec(state);
+ p->modulation = dst_get_modulation(state);
}
return 0;
@@ -1756,10 +1759,9 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
EXPORT_SYMBOL(dst_attach);
static struct dvb_frontend_ops dst_dvbt_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "DST DVB-T",
- .type = FE_OFDM,
.frequency_min = 137000000,
.frequency_max = 858000000,
.frequency_stepsize = 166667,
@@ -1786,10 +1788,9 @@ static struct dvb_frontend_ops dst_dvbt_ops = {
};
static struct dvb_frontend_ops dst_dvbs_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "DST DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1000, /* kHz for QPSK frontends */
@@ -1816,10 +1817,9 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
};
static struct dvb_frontend_ops dst_dvbc_ops = {
-
+ .delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "DST DVB-C",
- .type = FE_QAM,
.frequency_stepsize = 62500,
.frequency_min = 51000000,
.frequency_max = 858000000,
@@ -1846,9 +1846,9 @@ static struct dvb_frontend_ops dst_dvbc_ops = {
};
static struct dvb_frontend_ops dst_atsc_ops = {
+ .delsys = { SYS_ATSC },
.info = {
.name = "DST ATSC",
- .type = FE_ATSC,
.frequency_stepsize = 62500,
.frequency_min = 510000000,
.frequency_max = 858000000,
diff --git a/drivers/media/dvb/bt8xx/dst_common.h b/drivers/media/dvb/bt8xx/dst_common.h
index d88cf2add82b..d70d98f1a571 100644
--- a/drivers/media/dvb/bt8xx/dst_common.h
+++ b/drivers/media/dvb/bt8xx/dst_common.h
@@ -124,7 +124,7 @@ struct dst_state {
u16 decode_snr;
unsigned long cur_jiff;
u8 k22;
- fe_bandwidth_t bandwidth;
+ u32 bandwidth;
u32 dst_hw_cap;
u8 dst_fw_version;
fe_sec_mini_cmd_t minicmd;
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 521d69104982..81fab9adc1ca 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) "dvb_bt8xx: " fmt
+
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -148,8 +150,9 @@ static int thomson_dtt7579_demod_init(struct dvb_frontend* fe)
return 0;
}
-static int thomson_dtt7579_tuner_calc_regs(struct dvb_frontend* fe, struct dvb_frontend_parameters* params, u8* pllbuf, int buf_len)
+static int thomson_dtt7579_tuner_calc_regs(struct dvb_frontend *fe, u8* pllbuf, int buf_len)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 div;
unsigned char bs = 0;
unsigned char cp = 0;
@@ -157,18 +160,18 @@ static int thomson_dtt7579_tuner_calc_regs(struct dvb_frontend* fe, struct dvb_f
if (buf_len < 5)
return -EINVAL;
- div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
+ div = (((c->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
- if (params->frequency < 542000000)
+ if (c->frequency < 542000000)
cp = 0xb4;
- else if (params->frequency < 771000000)
+ else if (c->frequency < 771000000)
cp = 0xbc;
else
cp = 0xf4;
- if (params->frequency == 0)
+ if (c->frequency == 0)
bs = 0x03;
- else if (params->frequency < 443250000)
+ else if (c->frequency < 443250000)
bs = 0x02;
else
bs = 0x08;
@@ -191,13 +194,12 @@ static struct zl10353_config thomson_dtt7579_zl10353_config = {
.demod_address = 0x0f,
};
-static int cx24108_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int cx24108_tuner_set_params(struct dvb_frontend *fe)
{
- u32 freq = params->frequency;
-
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 freq = c->frequency;
int i, a, n, pump;
u32 band, pll;
-
u32 osci[]={950000,1019000,1075000,1178000,1296000,1432000,
1576000,1718000,1856000,2036000,2150000};
u32 bandsel[]={0,0x00020000,0x00040000,0x00100800,0x00101000,
@@ -205,7 +207,7 @@ static int cx24108_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend
0x00120000,0x00140000};
#define XTAL 1011100 /* Hz, really 1.0111 MHz and a /10 prescaler */
- printk("cx24108 debug: entering SetTunerFreq, freq=%d\n",freq);
+ dprintk("cx24108 debug: entering SetTunerFreq, freq=%d\n", freq);
/* This is really the bit driving the tuner chip cx24108 */
@@ -216,7 +218,7 @@ static int cx24108_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend
/* decide which VCO to use for the input frequency */
for(i = 1; (i < ARRAY_SIZE(osci) - 1) && (osci[i] < freq); i++);
- printk("cx24108 debug: select vco #%d (f=%d)\n",i,freq);
+ dprintk("cx24108 debug: select vco #%d (f=%d)\n", i, freq);
band=bandsel[i];
/* the gain values must be set by SetSymbolrate */
/* compute the pll divider needed, from Conexant data sheet,
@@ -232,7 +234,7 @@ static int cx24108_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend
((a&0x1f)<<11);
/* everything is shifted left 11 bits to left-align the bits in the
32bit word. Output to the tuner goes MSB-aligned, after all */
- printk("cx24108 debug: pump=%d, n=%d, a=%d\n",pump,n,a);
+ dprintk("cx24108 debug: pump=%d, n=%d, a=%d\n", pump, n, a);
cx24110_pll_write(fe,band);
/* set vga and vca to their widest-band settings, as a precaution.
SetSymbolrate might not be called to set this up */
@@ -267,31 +269,32 @@ static struct cx24110_config pctvsat_config = {
.demod_address = 0x55,
};
-static int microtune_mt7202dtf_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int microtune_mt7202dtf_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *) fe->dvb->priv;
u8 cfg, cpump, band_select;
u8 data[4];
u32 div;
struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = data, .len = sizeof(data) };
- div = (36000000 + params->frequency + 83333) / 166666;
+ div = (36000000 + c->frequency + 83333) / 166666;
cfg = 0x88;
- if (params->frequency < 175000000)
+ if (c->frequency < 175000000)
cpump = 2;
- else if (params->frequency < 390000000)
+ else if (c->frequency < 390000000)
cpump = 1;
- else if (params->frequency < 470000000)
+ else if (c->frequency < 470000000)
cpump = 2;
- else if (params->frequency < 750000000)
+ else if (c->frequency < 750000000)
cpump = 2;
else
cpump = 3;
- if (params->frequency < 175000000)
+ if (c->frequency < 175000000)
band_select = 0x0e;
- else if (params->frequency < 470000000)
+ else if (c->frequency < 470000000)
band_select = 0x05;
else
band_select = 0x03;
@@ -342,50 +345,51 @@ static int advbt771_samsung_tdtc9251dh0_demod_init(struct dvb_frontend* fe)
return 0;
}
-static int advbt771_samsung_tdtc9251dh0_tuner_calc_regs(struct dvb_frontend* fe, struct dvb_frontend_parameters* params, u8* pllbuf, int buf_len)
+static int advbt771_samsung_tdtc9251dh0_tuner_calc_regs(struct dvb_frontend *fe, u8 *pllbuf, int buf_len)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 div;
unsigned char bs = 0;
unsigned char cp = 0;
if (buf_len < 5) return -EINVAL;
- div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
+ div = (((c->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
- if (params->frequency < 150000000)
+ if (c->frequency < 150000000)
cp = 0xB4;
- else if (params->frequency < 173000000)
+ else if (c->frequency < 173000000)
cp = 0xBC;
- else if (params->frequency < 250000000)
+ else if (c->frequency < 250000000)
cp = 0xB4;
- else if (params->frequency < 400000000)
+ else if (c->frequency < 400000000)
cp = 0xBC;
- else if (params->frequency < 420000000)
+ else if (c->frequency < 420000000)
cp = 0xF4;
- else if (params->frequency < 470000000)
+ else if (c->frequency < 470000000)
cp = 0xFC;
- else if (params->frequency < 600000000)
+ else if (c->frequency < 600000000)
cp = 0xBC;
- else if (params->frequency < 730000000)
+ else if (c->frequency < 730000000)
cp = 0xF4;
else
cp = 0xFC;
- if (params->frequency < 150000000)
+ if (c->frequency < 150000000)
bs = 0x01;
- else if (params->frequency < 173000000)
+ else if (c->frequency < 173000000)
bs = 0x01;
- else if (params->frequency < 250000000)
+ else if (c->frequency < 250000000)
bs = 0x02;
- else if (params->frequency < 400000000)
+ else if (c->frequency < 400000000)
bs = 0x02;
- else if (params->frequency < 420000000)
+ else if (c->frequency < 420000000)
bs = 0x02;
- else if (params->frequency < 470000000)
+ else if (c->frequency < 470000000)
bs = 0x02;
- else if (params->frequency < 600000000)
+ else if (c->frequency < 600000000)
bs = 0x08;
- else if (params->frequency < 730000000)
+ else if (c->frequency < 730000000)
bs = 0x08;
else
bs = 0x08;
@@ -461,25 +465,26 @@ static struct or51211_config or51211_config = {
.sleep = or51211_sleep,
};
-static int vp3021_alps_tded4_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int vp3021_alps_tded4_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *) fe->dvb->priv;
u8 buf[4];
u32 div;
struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf) };
- div = (params->frequency + 36166667) / 166667;
+ div = (c->frequency + 36166667) / 166667;
buf[0] = (div >> 8) & 0x7F;
buf[1] = div & 0xFF;
buf[2] = 0x85;
- if ((params->frequency >= 47000000) && (params->frequency < 153000000))
+ if ((c->frequency >= 47000000) && (c->frequency < 153000000))
buf[3] = 0x01;
- else if ((params->frequency >= 153000000) && (params->frequency < 430000000))
+ else if ((c->frequency >= 153000000) && (c->frequency < 430000000))
buf[3] = 0x02;
- else if ((params->frequency >= 430000000) && (params->frequency < 824000000))
+ else if ((c->frequency >= 430000000) && (c->frequency < 824000000))
buf[3] = 0x0C;
- else if ((params->frequency >= 824000000) && (params->frequency < 863000000))
+ else if ((c->frequency >= 824000000) && (c->frequency < 863000000))
buf[3] = 0x8C;
else
return -EINVAL;
@@ -513,31 +518,31 @@ static int digitv_alps_tded4_demod_init(struct dvb_frontend* fe)
return 0;
}
-static int digitv_alps_tded4_tuner_calc_regs(struct dvb_frontend* fe, struct dvb_frontend_parameters* params, u8* pllbuf, int buf_len)
+static int digitv_alps_tded4_tuner_calc_regs(struct dvb_frontend *fe, u8 *pllbuf, int buf_len)
{
u32 div;
- struct dvb_ofdm_parameters *op = &params->u.ofdm;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
if (buf_len < 5)
return -EINVAL;
- div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
+ div = (((c->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
pllbuf[0] = 0x61;
pllbuf[1] = (div >> 8) & 0x7F;
pllbuf[2] = div & 0xFF;
pllbuf[3] = 0x85;
- dprintk("frequency %u, div %u\n", params->frequency, div);
+ dprintk("frequency %u, div %u\n", c->frequency, div);
- if (params->frequency < 470000000)
+ if (c->frequency < 470000000)
pllbuf[4] = 0x02;
- else if (params->frequency > 823000000)
+ else if (c->frequency > 823000000)
pllbuf[4] = 0x88;
else
pllbuf[4] = 0x08;
- if (op->bandwidth == 8)
+ if (c->bandwidth_hz == 8000000)
pllbuf[4] |= 0x04;
return 5;
@@ -663,7 +668,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
/* DST is not a frontend driver !!! */
state = kmalloc(sizeof (struct dst_state), GFP_KERNEL);
if (!state) {
- printk("dvb_bt8xx: No memory\n");
+ pr_err("No memory\n");
break;
}
/* Setup the Card */
@@ -673,7 +678,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
state->dst_ca = NULL;
/* DST is not a frontend, attaching the ASIC */
if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
- printk("%s: Could not find a Twinhan DST.\n", __func__);
+ pr_err("%s: Could not find a Twinhan DST\n", __func__);
break;
}
/* Attach other DST peripherals if any */
@@ -702,14 +707,14 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
}
if (card->fe == NULL)
- printk("dvb-bt8xx: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
+ pr_err("A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
card->bt->dev->vendor,
card->bt->dev->device,
card->bt->dev->subsystem_vendor,
card->bt->dev->subsystem_device);
else
if (dvb_register_frontend(&card->dvb_adapter, card->fe)) {
- printk("dvb-bt8xx: Frontend registration failed!\n");
+ pr_err("Frontend registration failed!\n");
dvb_frontend_detach(card->fe);
card->fe = NULL;
}
@@ -723,7 +728,7 @@ static int __devinit dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
THIS_MODULE, &card->bt->dev->dev,
adapter_nr);
if (result < 0) {
- printk("dvb_bt8xx: dvb_register_adapter failed (errno = %d)\n", result);
+ pr_err("dvb_register_adapter failed (errno = %d)\n", result);
return result;
}
card->dvb_adapter.priv = card;
@@ -741,66 +746,69 @@ static int __devinit dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
card->demux.stop_feed = dvb_bt8xx_stop_feed;
card->demux.write_to_decoder = NULL;
- if ((result = dvb_dmx_init(&card->demux)) < 0) {
- printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result);
-
- dvb_unregister_adapter(&card->dvb_adapter);
- return result;
+ result = dvb_dmx_init(&card->demux);
+ if (result < 0) {
+ pr_err("dvb_dmx_init failed (errno = %d)\n", result);
+ goto err_unregister_adaptor;
}
card->dmxdev.filternum = 256;
card->dmxdev.demux = &card->demux.dmx;
card->dmxdev.capabilities = 0;
- if ((result = dvb_dmxdev_init(&card->dmxdev, &card->dvb_adapter)) < 0) {
- printk("dvb_bt8xx: dvb_dmxdev_init failed (errno = %d)\n", result);
-
- dvb_dmx_release(&card->demux);
- dvb_unregister_adapter(&card->dvb_adapter);
- return result;
+ result = dvb_dmxdev_init(&card->dmxdev, &card->dvb_adapter);
+ if (result < 0) {
+ pr_err("dvb_dmxdev_init failed (errno = %d)\n", result);
+ goto err_dmx_release;
}
card->fe_hw.source = DMX_FRONTEND_0;
- if ((result = card->demux.dmx.add_frontend(&card->demux.dmx, &card->fe_hw)) < 0) {
- printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result);
-
- dvb_dmxdev_release(&card->dmxdev);
- dvb_dmx_release(&card->demux);
- dvb_unregister_adapter(&card->dvb_adapter);
- return result;
+ result = card->demux.dmx.add_frontend(&card->demux.dmx, &card->fe_hw);
+ if (result < 0) {
+ pr_err("dvb_dmx_init failed (errno = %d)\n", result);
+ goto err_dmxdev_release;
}
card->fe_mem.source = DMX_MEMORY_FE;
- if ((result = card->demux.dmx.add_frontend(&card->demux.dmx, &card->fe_mem)) < 0) {
- printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result);
-
- card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_hw);
- dvb_dmxdev_release(&card->dmxdev);
- dvb_dmx_release(&card->demux);
- dvb_unregister_adapter(&card->dvb_adapter);
- return result;
+ result = card->demux.dmx.add_frontend(&card->demux.dmx, &card->fe_mem);
+ if (result < 0) {
+ pr_err("dvb_dmx_init failed (errno = %d)\n", result);
+ goto err_remove_hw_frontend;
}
- if ((result = card->demux.dmx.connect_frontend(&card->demux.dmx, &card->fe_hw)) < 0) {
- printk("dvb_bt8xx: dvb_dmx_init failed (errno = %d)\n", result);
-
- card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_mem);
- card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_hw);
- dvb_dmxdev_release(&card->dmxdev);
- dvb_dmx_release(&card->demux);
- dvb_unregister_adapter(&card->dvb_adapter);
- return result;
+ result = card->demux.dmx.connect_frontend(&card->demux.dmx, &card->fe_hw);
+ if (result < 0) {
+ pr_err("dvb_dmx_init failed (errno = %d)\n", result);
+ goto err_remove_mem_frontend;
}
- dvb_net_init(&card->dvb_adapter, &card->dvbnet, &card->demux.dmx);
+ result = dvb_net_init(&card->dvb_adapter, &card->dvbnet, &card->demux.dmx);
+ if (result < 0) {
+ pr_err("dvb_net_init failed (errno = %d)\n", result);
+ goto err_disconnect_frontend;
+ }
tasklet_init(&card->bt->tasklet, dvb_bt8xx_task, (unsigned long) card);
frontend_init(card, type);
return 0;
+
+err_disconnect_frontend:
+ card->demux.dmx.disconnect_frontend(&card->demux.dmx);
+err_remove_mem_frontend:
+ card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_mem);
+err_remove_hw_frontend:
+ card->demux.dmx.remove_frontend(&card->demux.dmx, &card->fe_hw);
+err_dmxdev_release:
+ dvb_dmxdev_release(&card->dmxdev);
+err_dmx_release:
+ dvb_dmx_release(&card->demux);
+err_unregister_adaptor:
+ dvb_unregister_adapter(&card->dvb_adapter);
+ return result;
}
static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
@@ -881,8 +889,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
break;
default:
- printk(KERN_WARNING "dvb_bt8xx: Unknown bttv card type: %d.\n",
- sub->core->type);
+ pr_err("Unknown bttv card type: %d\n", sub->core->type);
kfree(card);
return -ENODEV;
}
@@ -890,16 +897,14 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
dprintk("dvb_bt8xx: identified card%d as %s\n", card->bttv_nr, card->card_name);
if (!(bttv_pci_dev = bttv_get_pcidev(card->bttv_nr))) {
- printk("dvb_bt8xx: no pci device for card %d\n", card->bttv_nr);
+ pr_err("no pci device for card %d\n", card->bttv_nr);
kfree(card);
return -ENODEV;
}
if (!(card->bt = dvb_bt8xx_878_match(card->bttv_nr, bttv_pci_dev))) {
- printk("dvb_bt8xx: unable to determine DMA core of card %d,\n",
- card->bttv_nr);
- printk("dvb_bt8xx: if you have the ALSA bt87x audio driver "
- "installed, try removing it.\n");
+ pr_err("unable to determine DMA core of card %d,\n", card->bttv_nr);
+ pr_err("if you have the ALSA bt87x audio driver installed, try removing it.\n");
kfree(card);
return -ENODEV;
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index ba9a643b9c6a..ce4f85849e7b 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -580,7 +580,7 @@ static int demod_attach_drxk(struct ddb_input *input)
memset(&config, 0, sizeof(config));
config.adr = 0x29 + (input->nr & 1);
- fe = input->fe = dvb_attach(drxk_attach, &config, i2c, &input->fe2);
+ fe = input->fe = dvb_attach(drxk_attach, &config, i2c);
if (!input->fe) {
printk(KERN_ERR "No DRXK found!\n");
return -ENODEV;
@@ -1480,7 +1480,7 @@ static const struct file_operations ddb_fops = {
.open = ddb_open,
};
-static char *ddb_devnode(struct device *device, mode_t *mode)
+static char *ddb_devnode(struct device *device, umode_t *mode)
{
struct ddb *dev = dev_get_drvdata(device);
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 55e6533f15e9..a609b3a9b146 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -1115,11 +1115,14 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
if (ret < 0)
goto err_remove_mem_frontend;
- ret = frontend_init(dev);
+ ret = dvb_net_init(dvb_adapter, &dev->dvbnet, dmx);
if (ret < 0)
goto err_disconnect_frontend;
- dvb_net_init(dvb_adapter, &dev->dvbnet, dmx);
+ ret = frontend_init(dev);
+ if (ret < 0)
+ goto err_dvb_net;
+
dm1105_ir_init(dev);
INIT_WORK(&dev->work, dm1105_dmx_buffer);
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 7ea517b7e186..9be65a3b931f 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -1306,6 +1306,10 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
/* fragment the packets & store in the buffer */
while (fragpos < count) {
fraglen = ca->slot_info[slot].link_buf_size - 2;
+ if (fraglen < 0)
+ break;
+ if (fraglen > HOST_LINK_BUF_SIZE - 2)
+ fraglen = HOST_LINK_BUF_SIZE - 2;
if ((count - fragpos) < fraglen)
fraglen = count - fragpos;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 2c0acdb4d811..fbbe545a74cb 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -25,6 +25,9 @@
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
+/* Enables DVBv3 compatibility bits at the headers */
+#define __DVB_CORE__
+
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -105,7 +108,6 @@ struct dvb_frontend_private {
/* thread/frontend values */
struct dvb_device *dvbdev;
- struct dvb_frontend_parameters parameters_in;
struct dvb_frontend_parameters parameters_out;
struct dvb_fe_events events;
struct semaphore sem;
@@ -139,6 +141,62 @@ struct dvb_frontend_private {
};
static void dvb_frontend_wakeup(struct dvb_frontend *fe);
+static int dtv_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p_out);
+
+static bool has_get_frontend(struct dvb_frontend *fe)
+{
+ return fe->ops.get_frontend;
+}
+
+/*
+ * Due to DVBv3 API calls, a delivery system should be mapped into one of
+ * the 4 DVBv3 delivery systems (FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC),
+ * otherwise, a DVBv3 call will fail.
+ */
+enum dvbv3_emulation_type {
+ DVBV3_UNKNOWN,
+ DVBV3_QPSK,
+ DVBV3_QAM,
+ DVBV3_OFDM,
+ DVBV3_ATSC,
+};
+
+static enum dvbv3_emulation_type dvbv3_type(u32 delivery_system)
+{
+ switch (delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ return DVBV3_QAM;
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_ISDBS:
+ case SYS_DSS:
+ return DVBV3_QPSK;
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ case SYS_ISDBT:
+ case SYS_DMBTH:
+ return DVBV3_OFDM;
+ case SYS_ATSC:
+ case SYS_DVBC_ANNEX_B:
+ return DVBV3_ATSC;
+ case SYS_UNDEFINED:
+ case SYS_ISDBC:
+ case SYS_DVBH:
+ case SYS_DAB:
+ case SYS_ATSCMH:
+ default:
+ /*
+ * Doesn't know how to emulate those types and/or
+ * there's no frontend driver from this type yet
+ * with some emulation code, so, we're not sure yet how
+ * to handle them, or they're not compatible with a DVBv3 call.
+ */
+ return DVBV3_UNKNOWN;
+ }
+}
static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status)
{
@@ -149,8 +207,8 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status)
dprintk ("%s\n", __func__);
- if ((status & FE_HAS_LOCK) && fe->ops.get_frontend)
- fe->ops.get_frontend(fe, &fepriv->parameters_out);
+ if ((status & FE_HAS_LOCK) && has_get_frontend(fe))
+ dtv_get_frontend(fe, &fepriv->parameters_out);
mutex_lock(&events->mtx);
@@ -277,12 +335,13 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra
int ready = 0;
int fe_set_err = 0;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- int original_inversion = fepriv->parameters_in.inversion;
- u32 original_frequency = fepriv->parameters_in.frequency;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp;
+ int original_inversion = c->inversion;
+ u32 original_frequency = c->frequency;
/* are we using autoinversion? */
autoinversion = ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) &&
- (fepriv->parameters_in.inversion == INVERSION_AUTO));
+ (c->inversion == INVERSION_AUTO));
/* setup parameters correctly */
while(!ready) {
@@ -348,19 +407,20 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra
fepriv->auto_step, fepriv->auto_sub_step, fepriv->started_auto_step);
/* set the frontend itself */
- fepriv->parameters_in.frequency += fepriv->lnb_drift;
+ c->frequency += fepriv->lnb_drift;
if (autoinversion)
- fepriv->parameters_in.inversion = fepriv->inversion;
+ c->inversion = fepriv->inversion;
+ tmp = *c;
if (fe->ops.set_frontend)
- fe_set_err = fe->ops.set_frontend(fe, &fepriv->parameters_in);
- fepriv->parameters_out = fepriv->parameters_in;
+ fe_set_err = fe->ops.set_frontend(fe);
+ *c = tmp;
if (fe_set_err < 0) {
fepriv->state = FESTATE_ERROR;
return fe_set_err;
}
- fepriv->parameters_in.frequency = original_frequency;
- fepriv->parameters_in.inversion = original_inversion;
+ c->frequency = original_frequency;
+ c->inversion = original_inversion;
fepriv->auto_sub_step++;
return 0;
@@ -371,6 +431,7 @@ static void dvb_frontend_swzigzag(struct dvb_frontend *fe)
fe_status_t s = 0;
int retval = 0;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp;
/* if we've got no parameters, just keep idling */
if (fepriv->state & FESTATE_IDLE) {
@@ -382,10 +443,10 @@ static void dvb_frontend_swzigzag(struct dvb_frontend *fe)
/* in SCAN mode, we just set the frontend when asked and leave it alone */
if (fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT) {
if (fepriv->state & FESTATE_RETUNE) {
+ tmp = *c;
if (fe->ops.set_frontend)
- retval = fe->ops.set_frontend(fe,
- &fepriv->parameters_in);
- fepriv->parameters_out = fepriv->parameters_in;
+ retval = fe->ops.set_frontend(fe);
+ *c = tmp;
if (retval < 0)
fepriv->state = FESTATE_ERROR;
else
@@ -415,8 +476,8 @@ static void dvb_frontend_swzigzag(struct dvb_frontend *fe)
/* if we're tuned, then we have determined the correct inversion */
if ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) &&
- (fepriv->parameters_in.inversion == INVERSION_AUTO)) {
- fepriv->parameters_in.inversion = fepriv->inversion;
+ (c->inversion == INVERSION_AUTO)) {
+ c->inversion = fepriv->inversion;
}
return;
}
@@ -507,7 +568,7 @@ static int dvb_frontend_is_exiting(struct dvb_frontend *fe)
return 1;
if (fepriv->dvbdev->writers == 1)
- if (time_after(jiffies, fepriv->release_jiffies +
+ if (time_after_eq(jiffies, fepriv->release_jiffies +
dvb_shutdown_timeout * HZ))
return 1;
@@ -540,7 +601,7 @@ static int dvb_frontend_thread(void *data)
fe_status_t s;
enum dvbfe_algo algo;
- struct dvb_frontend_parameters *params;
+ bool re_tune = false;
dprintk("%s\n", __func__);
@@ -589,18 +650,15 @@ restart:
switch (algo) {
case DVBFE_ALGO_HW:
dprintk("%s: Frontend ALGO = DVBFE_ALGO_HW\n", __func__);
- params = NULL; /* have we been asked to RETUNE ? */
if (fepriv->state & FESTATE_RETUNE) {
dprintk("%s: Retune requested, FESTATE_RETUNE\n", __func__);
- params = &fepriv->parameters_in;
+ re_tune = true;
fepriv->state = FESTATE_TUNED;
}
if (fe->ops.tune)
- fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s);
- if (params)
- fepriv->parameters_out = *params;
+ fe->ops.tune(fe, re_tune, fepriv->tune_mode_flags, &fepriv->delay, &s);
if (s != fepriv->status && !(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) {
dprintk("%s: state changed, adding current state\n", __func__);
@@ -624,7 +682,7 @@ restart:
*/
if (fepriv->algo_status & DVBFE_ALGO_SEARCH_AGAIN) {
if (fe->ops.search) {
- fepriv->algo_status = fe->ops.search(fe, &fepriv->parameters_in);
+ fepriv->algo_status = fe->ops.search(fe);
/* We did do a search as was requested, the flags are
* now unset as well and has the flags wrt to search.
*/
@@ -633,14 +691,10 @@ restart:
}
}
/* Track the carrier if the search was successful */
- if (fepriv->algo_status == DVBFE_ALGO_SEARCH_SUCCESS) {
- if (fe->ops.track)
- fe->ops.track(fe, &fepriv->parameters_in);
- } else {
+ if (fepriv->algo_status != DVBFE_ALGO_SEARCH_SUCCESS) {
fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
fepriv->delay = HZ / 2;
}
- fepriv->parameters_out = fepriv->parameters_in;
fe->ops.read_status(fe, &s);
if (s != fepriv->status) {
dvb_frontend_add_event(fe, s); /* update event list */
@@ -807,52 +861,40 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
fe->dvb->num,fe->id);
}
-static int dvb_frontend_check_parameters(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *parms)
+static int dvb_frontend_check_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 freq_min;
u32 freq_max;
/* range check: frequency */
dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max);
- if ((freq_min && parms->frequency < freq_min) ||
- (freq_max && parms->frequency > freq_max)) {
+ if ((freq_min && c->frequency < freq_min) ||
+ (freq_max && c->frequency > freq_max)) {
printk(KERN_WARNING "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n",
- fe->dvb->num, fe->id, parms->frequency, freq_min, freq_max);
+ fe->dvb->num, fe->id, c->frequency, freq_min, freq_max);
return -EINVAL;
}
/* range check: symbol rate */
- if (fe->ops.info.type == FE_QPSK) {
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
if ((fe->ops.info.symbol_rate_min &&
- parms->u.qpsk.symbol_rate < fe->ops.info.symbol_rate_min) ||
+ c->symbol_rate < fe->ops.info.symbol_rate_min) ||
(fe->ops.info.symbol_rate_max &&
- parms->u.qpsk.symbol_rate > fe->ops.info.symbol_rate_max)) {
+ c->symbol_rate > fe->ops.info.symbol_rate_max)) {
printk(KERN_WARNING "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n",
- fe->dvb->num, fe->id, parms->u.qpsk.symbol_rate,
- fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max);
+ fe->dvb->num, fe->id, c->symbol_rate,
+ fe->ops.info.symbol_rate_min,
+ fe->ops.info.symbol_rate_max);
return -EINVAL;
}
-
- } else if (fe->ops.info.type == FE_QAM) {
- if ((fe->ops.info.symbol_rate_min &&
- parms->u.qam.symbol_rate < fe->ops.info.symbol_rate_min) ||
- (fe->ops.info.symbol_rate_max &&
- parms->u.qam.symbol_rate > fe->ops.info.symbol_rate_max)) {
- printk(KERN_WARNING "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n",
- fe->dvb->num, fe->id, parms->u.qam.symbol_rate,
- fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max);
- return -EINVAL;
- }
- }
-
- /* check for supported modulation */
- if (fe->ops.info.type == FE_QAM &&
- (parms->u.qam.modulation > QAM_AUTO ||
- !((1 << (parms->u.qam.modulation + 10)) & fe->ops.info.caps))) {
- printk(KERN_WARNING "DVB: adapter %i frontend %i modulation %u not supported\n",
- fe->dvb->num, fe->id, parms->u.qam.modulation);
- return -EINVAL;
+ default:
+ break;
}
return 0;
@@ -862,32 +904,59 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i;
+ u32 delsys;
+ delsys = c->delivery_system;
memset(c, 0, sizeof(struct dtv_frontend_properties));
+ c->delivery_system = delsys;
c->state = DTV_CLEAR;
- c->delivery_system = SYS_UNDEFINED;
- c->inversion = INVERSION_AUTO;
- c->fec_inner = FEC_AUTO;
+
+ dprintk("%s() Clearing cache for delivery system %d\n", __func__,
+ c->delivery_system);
+
c->transmission_mode = TRANSMISSION_MODE_AUTO;
- c->bandwidth_hz = BANDWIDTH_AUTO;
+ c->bandwidth_hz = 0; /* AUTO */
c->guard_interval = GUARD_INTERVAL_AUTO;
c->hierarchy = HIERARCHY_AUTO;
- c->symbol_rate = QAM_AUTO;
+ c->symbol_rate = 0;
c->code_rate_HP = FEC_AUTO;
c->code_rate_LP = FEC_AUTO;
-
- c->isdbt_partial_reception = -1;
- c->isdbt_sb_mode = -1;
- c->isdbt_sb_subchannel = -1;
- c->isdbt_sb_segment_idx = -1;
- c->isdbt_sb_segment_count = -1;
- c->isdbt_layer_enabled = 0x7;
+ c->fec_inner = FEC_AUTO;
+ c->rolloff = ROLLOFF_AUTO;
+ c->voltage = SEC_VOLTAGE_OFF;
+ c->sectone = SEC_TONE_OFF;
+ c->pilot = PILOT_AUTO;
+
+ c->isdbt_partial_reception = 0;
+ c->isdbt_sb_mode = 0;
+ c->isdbt_sb_subchannel = 0;
+ c->isdbt_sb_segment_idx = 0;
+ c->isdbt_sb_segment_count = 0;
+ c->isdbt_layer_enabled = 0;
for (i = 0; i < 3; i++) {
c->layer[i].fec = FEC_AUTO;
c->layer[i].modulation = QAM_AUTO;
- c->layer[i].interleaving = -1;
- c->layer[i].segment_count = -1;
+ c->layer[i].interleaving = 0;
+ c->layer[i].segment_count = 0;
+ }
+
+ c->isdbs_ts_id = 0;
+ c->dvbt2_plp_id = 0;
+
+ switch (c->delivery_system) {
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ case SYS_TURBO:
+ c->modulation = QPSK; /* implied for DVB-S in legacy API */
+ c->rolloff = ROLLOFF_35;/* implied for DVB-S */
+ break;
+ case SYS_ATSC:
+ c->modulation = VSB_8;
+ break;
+ default:
+ c->modulation = QAM_AUTO;
+ break;
}
return 0;
@@ -943,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
- _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
- _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
-
_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
_DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
@@ -973,6 +1023,8 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_GUARD_INTERVAL, 0, 0),
_DTV_CMD(DTV_TRANSMISSION_MODE, 0, 0),
_DTV_CMD(DTV_HIERARCHY, 0, 0),
+
+ _DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
};
static void dtv_property_dump(struct dtv_property *tvp)
@@ -1006,70 +1058,54 @@ static void dtv_property_dump(struct dtv_property *tvp)
dprintk("%s() tvp.u.data = 0x%08x\n", __func__, tvp->u.data);
}
-static int is_legacy_delivery_system(fe_delivery_system_t s)
-{
- if((s == SYS_UNDEFINED) || (s == SYS_DVBC_ANNEX_AC) ||
- (s == SYS_DVBC_ANNEX_B) || (s == SYS_DVBT) || (s == SYS_DVBS) ||
- (s == SYS_ATSC))
- return 1;
-
- return 0;
-}
-
-/* Initialize the cache with some default values derived from the
- * legacy frontend_info structure.
- */
-static void dtv_property_cache_init(struct dvb_frontend *fe,
- struct dtv_frontend_properties *c)
-{
- switch (fe->ops.info.type) {
- case FE_QPSK:
- c->modulation = QPSK; /* implied for DVB-S in legacy API */
- c->rolloff = ROLLOFF_35;/* implied for DVB-S */
- c->delivery_system = SYS_DVBS;
- break;
- case FE_QAM:
- c->delivery_system = SYS_DVBC_ANNEX_AC;
- break;
- case FE_OFDM:
- c->delivery_system = SYS_DVBT;
- break;
- case FE_ATSC:
- break;
- }
-}
-
/* Synchronise the legacy tuning parameters into the cache, so that demodulator
* drivers can use a single set_frontend tuning function, regardless of whether
* it's being used for the legacy or new API, reducing code and complexity.
*/
-static void dtv_property_cache_sync(struct dvb_frontend *fe,
- struct dtv_frontend_properties *c,
- const struct dvb_frontend_parameters *p)
+static int dtv_property_cache_sync(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c,
+ const struct dvb_frontend_parameters *p)
{
c->frequency = p->frequency;
c->inversion = p->inversion;
- switch (fe->ops.info.type) {
- case FE_QPSK:
+ switch (dvbv3_type(c->delivery_system)) {
+ case DVBV3_QPSK:
+ dprintk("%s() Preparing QPSK req\n", __func__);
c->symbol_rate = p->u.qpsk.symbol_rate;
c->fec_inner = p->u.qpsk.fec_inner;
break;
- case FE_QAM:
+ case DVBV3_QAM:
+ dprintk("%s() Preparing QAM req\n", __func__);
c->symbol_rate = p->u.qam.symbol_rate;
c->fec_inner = p->u.qam.fec_inner;
c->modulation = p->u.qam.modulation;
break;
- case FE_OFDM:
- if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ)
- c->bandwidth_hz = 6000000;
- else if (p->u.ofdm.bandwidth == BANDWIDTH_7_MHZ)
- c->bandwidth_hz = 7000000;
- else if (p->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
+ case DVBV3_OFDM:
+ dprintk("%s() Preparing OFDM req\n", __func__);
+ switch (p->u.ofdm.bandwidth) {
+ case BANDWIDTH_10_MHZ:
+ c->bandwidth_hz = 10000000;
+ break;
+ case BANDWIDTH_8_MHZ:
c->bandwidth_hz = 8000000;
- else
- /* Including BANDWIDTH_AUTO */
+ break;
+ case BANDWIDTH_7_MHZ:
+ c->bandwidth_hz = 7000000;
+ break;
+ case BANDWIDTH_6_MHZ:
+ c->bandwidth_hz = 6000000;
+ break;
+ case BANDWIDTH_5_MHZ:
+ c->bandwidth_hz = 5000000;
+ break;
+ case BANDWIDTH_1_712_MHZ:
+ c->bandwidth_hz = 1712000;
+ break;
+ case BANDWIDTH_AUTO:
c->bandwidth_hz = 0;
+ }
+
c->code_rate_HP = p->u.ofdm.code_rate_HP;
c->code_rate_LP = p->u.ofdm.code_rate_LP;
c->modulation = p->u.ofdm.constellation;
@@ -1077,50 +1113,78 @@ static void dtv_property_cache_sync(struct dvb_frontend *fe,
c->guard_interval = p->u.ofdm.guard_interval;
c->hierarchy = p->u.ofdm.hierarchy_information;
break;
- case FE_ATSC:
+ case DVBV3_ATSC:
+ dprintk("%s() Preparing ATSC req\n", __func__);
c->modulation = p->u.vsb.modulation;
if ((c->modulation == VSB_8) || (c->modulation == VSB_16))
c->delivery_system = SYS_ATSC;
else
c->delivery_system = SYS_DVBC_ANNEX_B;
break;
+ case DVBV3_UNKNOWN:
+ printk(KERN_ERR
+ "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n",
+ __func__, c->delivery_system);
+ return -EINVAL;
}
+
+ return 0;
}
/* Ensure the cached values are set correctly in the frontend
* legacy tuning structures, for the advanced tuning API.
*/
-static void dtv_property_legacy_params_sync(struct dvb_frontend *fe)
+static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
{
const struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dvb_frontend_parameters *p = &fepriv->parameters_in;
p->frequency = c->frequency;
p->inversion = c->inversion;
- switch (fe->ops.info.type) {
- case FE_QPSK:
+ switch (dvbv3_type(c->delivery_system)) {
+ case DVBV3_UNKNOWN:
+ printk(KERN_ERR
+ "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n",
+ __func__, c->delivery_system);
+ return -EINVAL;
+ case DVBV3_QPSK:
dprintk("%s() Preparing QPSK req\n", __func__);
p->u.qpsk.symbol_rate = c->symbol_rate;
p->u.qpsk.fec_inner = c->fec_inner;
break;
- case FE_QAM:
+ case DVBV3_QAM:
dprintk("%s() Preparing QAM req\n", __func__);
p->u.qam.symbol_rate = c->symbol_rate;
p->u.qam.fec_inner = c->fec_inner;
p->u.qam.modulation = c->modulation;
break;
- case FE_OFDM:
+ case DVBV3_OFDM:
dprintk("%s() Preparing OFDM req\n", __func__);
- if (c->bandwidth_hz == 6000000)
- p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
- else if (c->bandwidth_hz == 7000000)
- p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
- else if (c->bandwidth_hz == 8000000)
+
+ switch (c->bandwidth_hz) {
+ case 10000000:
+ p->u.ofdm.bandwidth = BANDWIDTH_10_MHZ;
+ break;
+ case 8000000:
p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
- else
+ break;
+ case 7000000:
+ p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+ break;
+ case 6000000:
+ p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+ break;
+ case 5000000:
+ p->u.ofdm.bandwidth = BANDWIDTH_5_MHZ;
+ break;
+ case 1712000:
+ p->u.ofdm.bandwidth = BANDWIDTH_1_712_MHZ;
+ break;
+ case 0:
+ default:
p->u.ofdm.bandwidth = BANDWIDTH_AUTO;
+ }
p->u.ofdm.code_rate_HP = c->code_rate_HP;
p->u.ofdm.code_rate_LP = c->code_rate_LP;
p->u.ofdm.constellation = c->modulation;
@@ -1128,78 +1192,40 @@ static void dtv_property_legacy_params_sync(struct dvb_frontend *fe)
p->u.ofdm.guard_interval = c->guard_interval;
p->u.ofdm.hierarchy_information = c->hierarchy;
break;
- case FE_ATSC:
+ case DVBV3_ATSC:
dprintk("%s() Preparing VSB req\n", __func__);
p->u.vsb.modulation = c->modulation;
break;
}
+ return 0;
}
-/* Ensure the cached values are set correctly in the frontend
- * legacy tuning structures, for the legacy tuning API.
+/**
+ * dtv_get_frontend - calls a callback for retrieving DTV parameters
+ * @fe: struct dvb_frontend pointer
+ * @c: struct dtv_frontend_properties pointer (DVBv5 cache)
+ * @p_out struct dvb_frontend_parameters pointer (DVBv3 FE struct)
+ *
+ * This routine calls either the DVBv3 or DVBv5 get_frontend call.
+ * If c is not null, it will update the DVBv5 cache struct pointed by it.
+ * If p_out is not null, it will update the DVBv3 params pointed by it.
*/
-static void dtv_property_adv_params_sync(struct dvb_frontend *fe)
+static int dtv_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p_out)
{
- const struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dvb_frontend_parameters *p = &fepriv->parameters_in;
-
- p->frequency = c->frequency;
- p->inversion = c->inversion;
-
- if (c->delivery_system == SYS_DSS ||
- c->delivery_system == SYS_DVBS ||
- c->delivery_system == SYS_DVBS2 ||
- c->delivery_system == SYS_ISDBS ||
- c->delivery_system == SYS_TURBO) {
- p->u.qpsk.symbol_rate = c->symbol_rate;
- p->u.qpsk.fec_inner = c->fec_inner;
- }
+ int r;
- /* Fake out a generic DVB-T request so we pass validation in the ioctl */
- if ((c->delivery_system == SYS_ISDBT) ||
- (c->delivery_system == SYS_DVBT2)) {
- p->u.ofdm.constellation = QAM_AUTO;
- p->u.ofdm.code_rate_HP = FEC_AUTO;
- p->u.ofdm.code_rate_LP = FEC_AUTO;
- p->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO;
- p->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO;
- p->u.ofdm.hierarchy_information = HIERARCHY_AUTO;
- if (c->bandwidth_hz == 8000000)
- p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
- else if (c->bandwidth_hz == 7000000)
- p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
- else if (c->bandwidth_hz == 6000000)
- p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
- else
- p->u.ofdm.bandwidth = BANDWIDTH_AUTO;
+ if (fe->ops.get_frontend) {
+ r = fe->ops.get_frontend(fe);
+ if (unlikely(r < 0))
+ return r;
+ if (p_out)
+ dtv_property_legacy_params_sync(fe, p_out);
+ return 0;
}
-}
-static void dtv_property_cache_submit(struct dvb_frontend *fe)
-{
- const struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- /* For legacy delivery systems we don't need the delivery_system to
- * be specified, but we populate the older structures from the cache
- * so we can call set_frontend on older drivers.
- */
- if(is_legacy_delivery_system(c->delivery_system)) {
-
- dprintk("%s() legacy, modulation = %d\n", __func__, c->modulation);
- dtv_property_legacy_params_sync(fe);
-
- } else {
- dprintk("%s() adv, modulation = %d\n", __func__, c->modulation);
-
- /* For advanced delivery systems / modulation types ...
- * we seed the lecacy dvb_frontend_parameters structure
- * so that the sanity checking code later in the IOCTL processing
- * can validate our basic frequency ranges, symbolrates, modulation
- * etc.
- */
- dtv_property_adv_params_sync(fe);
- }
+ /* As everything is in cache, get_frontend fops are always supported */
+ return 0;
}
static int dvb_frontend_ioctl_legacy(struct file *file,
@@ -1208,25 +1234,21 @@ static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
+ const struct dtv_frontend_properties *c,
struct dtv_property *tvp,
struct file *file)
{
- const struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dtv_frontend_properties cdetected;
- int r;
-
- /*
- * If the driver implements a get_frontend function, then convert
- * detected parameters to S2API properties.
- */
- if (fe->ops.get_frontend) {
- cdetected = *c;
- dtv_property_cache_sync(fe, &cdetected, &fepriv->parameters_out);
- c = &cdetected;
- }
+ int r, ncaps;
switch(tvp->cmd) {
+ case DTV_ENUM_DELSYS:
+ ncaps = 0;
+ while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ tvp->u.buffer.data[ncaps] = fe->ops.delsys[ncaps];
+ ncaps++;
+ }
+ tvp->u.buffer.len = ncaps;
+ break;
case DTV_FREQUENCY:
tvp->u.data = c->frequency;
break;
@@ -1356,14 +1378,168 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
return 0;
}
+static int dtv_set_frontend(struct dvb_frontend *fe);
+
+static bool is_dvbv3_delsys(u32 delsys)
+{
+ bool status;
+
+ status = (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
+ (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
+
+ return status;
+}
+
+static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
+{
+ int ncaps, i;
+ u32 delsys = SYS_UNDEFINED;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ enum dvbv3_emulation_type type;
+
+ /*
+ * It was reported that some old DVBv5 applications were
+ * filling delivery_system with SYS_UNDEFINED. If this happens,
+ * assume that the application wants to use the first supported
+ * delivery system.
+ */
+ if (c->delivery_system == SYS_UNDEFINED)
+ c->delivery_system = fe->ops.delsys[0];
+
+ if (desired_system == SYS_UNDEFINED) {
+ /*
+ * A DVBv3 call doesn't know what's the desired system.
+ * Also, DVBv3 applications don't know that ops.info->type
+ * could be changed, and they simply dies when it doesn't
+ * match.
+ * So, don't change the current delivery system, as it
+ * may be trying to do the wrong thing, like setting an
+ * ISDB-T frontend as DVB-T. Instead, find the closest
+ * DVBv3 system that matches the delivery system.
+ */
+ if (is_dvbv3_delsys(c->delivery_system)) {
+ dprintk("%s() Using delivery system to %d\n",
+ __func__, c->delivery_system);
+ return 0;
+ }
+ type = dvbv3_type(c->delivery_system);
+ switch (type) {
+ case DVBV3_QPSK:
+ desired_system = SYS_DVBS;
+ break;
+ case DVBV3_QAM:
+ desired_system = SYS_DVBC_ANNEX_A;
+ break;
+ case DVBV3_ATSC:
+ desired_system = SYS_ATSC;
+ break;
+ case DVBV3_OFDM:
+ desired_system = SYS_DVBT;
+ break;
+ default:
+ dprintk("%s(): This frontend doesn't support DVBv3 calls\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ /*
+ * This is a DVBv5 call. So, it likely knows the supported
+ * delivery systems.
+ */
+
+ /* Check if the desired delivery system is supported */
+ ncaps = 0;
+ while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ if (fe->ops.delsys[ncaps] == desired_system) {
+ c->delivery_system = desired_system;
+ dprintk("%s() Changing delivery system to %d\n",
+ __func__, desired_system);
+ return 0;
+ }
+ ncaps++;
+ }
+ type = dvbv3_type(desired_system);
+
+ /*
+ * The delivery system is not supported. See if it can be
+ * emulated.
+ * The emulation only works if the desired system is one of the
+ * DVBv3 delivery systems
+ */
+ if (!is_dvbv3_delsys(desired_system)) {
+ dprintk("%s() can't use a DVBv3 FE_SET_FRONTEND call on this frontend\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Get the last non-DVBv3 delivery system that has the same type
+ * of the desired system
+ */
+ ncaps = 0;
+ while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ if ((dvbv3_type(fe->ops.delsys[ncaps]) == type) &&
+ !is_dvbv3_delsys(fe->ops.delsys[ncaps]))
+ delsys = fe->ops.delsys[ncaps];
+ ncaps++;
+ }
+ /* There's nothing compatible with the desired delivery system */
+ if (delsys == SYS_UNDEFINED) {
+ dprintk("%s() Incompatible DVBv3 FE_SET_FRONTEND call for this frontend\n",
+ __func__);
+ return -EINVAL;
+ }
+ c->delivery_system = delsys;
+ }
+
+ /*
+ * The DVBv3 or DVBv5 call is requesting a different system. So,
+ * emulation is needed.
+ *
+ * Emulate newer delivery systems like ISDBT, DVBT and DMBTH
+ * for older DVBv5 applications. The emulation will try to use
+ * the auto mode for most things, and will assume that the desired
+ * delivery system is the last one at the ops.delsys[] array
+ */
+ dprintk("%s() Using delivery system %d emulated as if it were a %d\n",
+ __func__, delsys, desired_system);
+
+ /*
+ * For now, handles ISDB-T calls. More code may be needed here for the
+ * other emulated stuff
+ */
+ if (type == DVBV3_OFDM) {
+ if (c->delivery_system == SYS_ISDBT) {
+ dprintk("%s() Using defaults for SYS_ISDBT\n",
+ __func__);
+ if (!c->bandwidth_hz)
+ c->bandwidth_hz = 6000000;
+
+ c->isdbt_partial_reception = 0;
+ c->isdbt_sb_mode = 0;
+ c->isdbt_sb_subchannel = 0;
+ c->isdbt_sb_segment_idx = 0;
+ c->isdbt_sb_segment_count = 0;
+ c->isdbt_layer_enabled = 0;
+ for (i = 0; i < 3; i++) {
+ c->layer[i].fec = FEC_AUTO;
+ c->layer[i].modulation = QAM_AUTO;
+ c->layer[i].interleaving = 0;
+ c->layer[i].segment_count = 0;
+ }
+ }
+ }
+ dprintk("change delivery system on cache to %d\n", c->delivery_system);
+
+ return 0;
+}
+
static int dtv_property_process_set(struct dvb_frontend *fe,
struct dtv_property *tvp,
struct file *file)
{
int r = 0;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- dtv_property_dump(tvp);
/* Allow the frontend to validate incoming properties */
if (fe->ops.set_property) {
@@ -1374,11 +1550,11 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
switch(tvp->cmd) {
case DTV_CLEAR:
- /* Reset a cache of data specific to the frontend here. This does
+ /*
+ * Reset a cache of data specific to the frontend here. This does
* not effect hardware.
*/
dvb_frontend_clear_cache(fe);
- dprintk("%s() Flushing property cache\n", __func__);
break;
case DTV_TUNE:
/* interpret the cache of data, build either a traditional frontend
@@ -1387,10 +1563,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
*/
c->state = tvp->cmd;
dprintk("%s() Finalised property cache\n", __func__);
- dtv_property_cache_submit(fe);
- r = dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND,
- &fepriv->parameters_in);
+ r = dtv_set_frontend(fe);
break;
case DTV_FREQUENCY:
c->frequency = tvp->u.data;
@@ -1417,7 +1591,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
c->rolloff = tvp->u.data;
break;
case DTV_DELIVERY_SYSTEM:
- c->delivery_system = tvp->u.data;
+ r = set_delivery_system(fe, tvp->u.data);
break;
case DTV_VOLTAGE:
c->voltage = tvp->u.data;
@@ -1551,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int err = 0;
@@ -1594,7 +1769,6 @@ static int dvb_frontend_ioctl_properties(struct file *file,
} else
if(cmd == FE_GET_PROPERTY) {
-
tvps = (struct dtv_properties __user *)parg;
dprintk("%s() properties.num = %d\n", __func__, tvps->num);
@@ -1616,8 +1790,18 @@ static int dvb_frontend_ioctl_properties(struct file *file,
goto out;
}
+ /*
+ * Fills the cache out struct with the cache contents, plus
+ * the data retrieved from get_frontend, if the frontend
+ * is not idle. Otherwise, returns the cached content
+ */
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, NULL);
+ if (err < 0)
+ goto out;
+ }
for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_get(fe, tvp + i, file);
+ err = dtv_property_process_get(fe, c, tvp + i, file);
if (err < 0)
goto out;
(tvp + i)->result = err;
@@ -1636,12 +1820,121 @@ out:
return err;
}
+static int dtv_set_frontend(struct dvb_frontend *fe)
+{
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct dvb_frontend_tune_settings fetunesettings;
+ u32 rolloff = 0;
+
+ if (dvb_frontend_check_parameters(fe) < 0)
+ return -EINVAL;
+
+ /*
+ * Be sure that the bandwidth will be filled for all
+ * non-satellite systems, as tuners need to know what
+ * low pass/Nyquist half filter should be applied, in
+ * order to avoid inter-channel noise.
+ *
+ * ISDB-T and DVB-T/T2 already sets bandwidth.
+ * ATSC and DVB-C don't set, so, the core should fill it.
+ *
+ * On DVB-C Annex A and C, the bandwidth is a function of
+ * the roll-off and symbol rate. Annex B defines different
+ * roll-off factors depending on the modulation. Fortunately,
+ * Annex B is only used with 6MHz, so there's no need to
+ * calculate it.
+ *
+ * While not officially supported, a side effect of handling it at
+ * the cache level is that a program could retrieve the bandwidth
+ * via DTV_BANDWIDTH_HZ, which may be useful for test programs.
+ */
+ switch (c->delivery_system) {
+ case SYS_ATSC:
+ case SYS_DVBC_ANNEX_B:
+ c->bandwidth_hz = 6000000;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ rolloff = 115;
+ break;
+ case SYS_DVBC_ANNEX_C:
+ rolloff = 113;
+ break;
+ default:
+ break;
+ }
+ if (rolloff)
+ c->bandwidth_hz = (c->symbol_rate * rolloff) / 100;
+
+ /* force auto frequency inversion if requested */
+ if (dvb_force_auto_inversion)
+ c->inversion = INVERSION_AUTO;
+
+ /*
+ * without hierarchical coding code_rate_LP is irrelevant,
+ * so we tolerate the otherwise invalid FEC_NONE setting
+ */
+ if (c->hierarchy == HIERARCHY_NONE && c->code_rate_LP == FEC_NONE)
+ c->code_rate_LP = FEC_AUTO;
+
+ /* get frontend-specific tuning settings */
+ memset(&fetunesettings, 0, sizeof(struct dvb_frontend_tune_settings));
+ if (fe->ops.get_tune_settings && (fe->ops.get_tune_settings(fe, &fetunesettings) == 0)) {
+ fepriv->min_delay = (fetunesettings.min_delay_ms * HZ) / 1000;
+ fepriv->max_drift = fetunesettings.max_drift;
+ fepriv->step_size = fetunesettings.step_size;
+ } else {
+ /* default values */
+ switch (c->delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ fepriv->min_delay = HZ / 20;
+ fepriv->step_size = c->symbol_rate / 16000;
+ fepriv->max_drift = c->symbol_rate / 2000;
+ break;
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ case SYS_ISDBT:
+ case SYS_DMBTH:
+ fepriv->min_delay = HZ / 20;
+ fepriv->step_size = fe->ops.info.frequency_stepsize * 2;
+ fepriv->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ break;
+ default:
+ /*
+ * FIXME: This sounds wrong! if freqency_stepsize is
+ * defined by the frontend, why not use it???
+ */
+ fepriv->min_delay = HZ / 20;
+ fepriv->step_size = 0; /* no zigzag */
+ fepriv->max_drift = 0;
+ break;
+ }
+ }
+ if (dvb_override_tune_delay > 0)
+ fepriv->min_delay = (dvb_override_tune_delay * HZ) / 1000;
+
+ fepriv->state = FESTATE_RETUNE;
+
+ /* Request the search algorithm to search */
+ fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
+
+ dvb_frontend_clear_events(fe);
+ dvb_frontend_add_event(fe, 0);
+ dvb_frontend_wakeup(fe);
+ fepriv->status = 0;
+
+ return 0;
+}
+
+
static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int cb_err, err = -EOPNOTSUPP;
if (fe->dvb->fe_ioctl_override) {
@@ -1658,9 +1951,43 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
switch (cmd) {
case FE_GET_INFO: {
struct dvb_frontend_info* info = parg;
+
memcpy(info, &fe->ops.info, sizeof(struct dvb_frontend_info));
dvb_frontend_get_frequency_limits(fe, &info->frequency_min, &info->frequency_max);
+ /*
+ * Associate the 4 delivery systems supported by DVBv3
+ * API with their DVBv5 counterpart. For the other standards,
+ * use the closest type, assuming that it would hopefully
+ * work with a DVBv3 application.
+ * It should be noticed that, on multi-frontend devices with
+ * different types (terrestrial and cable, for example),
+ * a pure DVBv3 application won't be able to use all delivery
+ * systems. Yet, changing the DVBv5 cache to the other delivery
+ * system should be enough for making it work.
+ */
+ switch (dvbv3_type(c->delivery_system)) {
+ case DVBV3_QPSK:
+ info->type = FE_QPSK;
+ break;
+ case DVBV3_ATSC:
+ info->type = FE_ATSC;
+ break;
+ case DVBV3_QAM:
+ info->type = FE_QAM;
+ break;
+ case DVBV3_OFDM:
+ info->type = FE_OFDM;
+ break;
+ default:
+ printk(KERN_ERR
+ "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n",
+ __func__, c->delivery_system);
+ fe->ops.info.type = FE_OFDM;
+ }
+ dprintk("current delivery system on cache: %d, V3 type: %d\n",
+ c->delivery_system, fe->ops.info.type);
+
/* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
* do it, it is done for it. */
info->caps |= FE_CAN_INVERSION_AUTO;
@@ -1819,108 +2146,22 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
break;
- case FE_SET_FRONTEND: {
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct dvb_frontend_tune_settings fetunesettings;
-
- if (c->state == DTV_TUNE) {
- if (dvb_frontend_check_parameters(fe, &fepriv->parameters_in) < 0) {
- err = -EINVAL;
- break;
- }
- } else {
- if (dvb_frontend_check_parameters(fe, parg) < 0) {
- err = -EINVAL;
- break;
- }
-
- memcpy (&fepriv->parameters_in, parg,
- sizeof (struct dvb_frontend_parameters));
- dtv_property_cache_init(fe, c);
- dtv_property_cache_sync(fe, c, &fepriv->parameters_in);
- }
-
- /*
- * Initialize output parameters to match the values given by
- * the user. FE_SET_FRONTEND triggers an initial frontend event
- * with status = 0, which copies output parameters to userspace.
- */
- fepriv->parameters_out = fepriv->parameters_in;
-
- memset(&fetunesettings, 0, sizeof(struct dvb_frontend_tune_settings));
- memcpy(&fetunesettings.parameters, parg,
- sizeof (struct dvb_frontend_parameters));
-
- /* force auto frequency inversion if requested */
- if (dvb_force_auto_inversion) {
- fepriv->parameters_in.inversion = INVERSION_AUTO;
- fetunesettings.parameters.inversion = INVERSION_AUTO;
- }
- if (fe->ops.info.type == FE_OFDM) {
- /* without hierarchical coding code_rate_LP is irrelevant,
- * so we tolerate the otherwise invalid FEC_NONE setting */
- if (fepriv->parameters_in.u.ofdm.hierarchy_information == HIERARCHY_NONE &&
- fepriv->parameters_in.u.ofdm.code_rate_LP == FEC_NONE)
- fepriv->parameters_in.u.ofdm.code_rate_LP = FEC_AUTO;
- }
-
- /* get frontend-specific tuning settings */
- if (fe->ops.get_tune_settings && (fe->ops.get_tune_settings(fe, &fetunesettings) == 0)) {
- fepriv->min_delay = (fetunesettings.min_delay_ms * HZ) / 1000;
- fepriv->max_drift = fetunesettings.max_drift;
- fepriv->step_size = fetunesettings.step_size;
- } else {
- /* default values */
- switch(fe->ops.info.type) {
- case FE_QPSK:
- fepriv->min_delay = HZ/20;
- fepriv->step_size = fepriv->parameters_in.u.qpsk.symbol_rate / 16000;
- fepriv->max_drift = fepriv->parameters_in.u.qpsk.symbol_rate / 2000;
- break;
-
- case FE_QAM:
- fepriv->min_delay = HZ/20;
- fepriv->step_size = 0; /* no zigzag */
- fepriv->max_drift = 0;
- break;
-
- case FE_OFDM:
- fepriv->min_delay = HZ/20;
- fepriv->step_size = fe->ops.info.frequency_stepsize * 2;
- fepriv->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
- break;
- case FE_ATSC:
- fepriv->min_delay = HZ/20;
- fepriv->step_size = 0;
- fepriv->max_drift = 0;
- break;
- }
- }
- if (dvb_override_tune_delay > 0)
- fepriv->min_delay = (dvb_override_tune_delay * HZ) / 1000;
-
- fepriv->state = FESTATE_RETUNE;
-
- /* Request the search algorithm to search */
- fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
+ case FE_SET_FRONTEND:
+ err = set_delivery_system(fe, SYS_UNDEFINED);
+ if (err)
+ break;
- dvb_frontend_clear_events(fe);
- dvb_frontend_add_event(fe, 0);
- dvb_frontend_wakeup(fe);
- fepriv->status = 0;
- err = 0;
+ err = dtv_property_cache_sync(fe, c, parg);
+ if (err)
+ break;
+ err = dtv_set_frontend(fe);
break;
- }
-
case FE_GET_EVENT:
err = dvb_frontend_get_event (fe, parg, file->f_flags);
break;
case FE_GET_FRONTEND:
- if (fe->ops.get_frontend) {
- err = fe->ops.get_frontend(fe, &fepriv->parameters_out);
- memcpy(parg, &fepriv->parameters_out, sizeof(struct dvb_frontend_parameters));
- }
+ err = dtv_get_frontend(fe, parg);
break;
case FE_SET_FRONTEND_TUNE_MODE:
@@ -2061,12 +2302,15 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
dprintk ("%s\n", __func__);
- if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
fepriv->release_jiffies = jiffies;
+ mb();
+ }
ret = dvb_generic_release (inode, file);
if (dvbdev->users == -1) {
+ wake_up(&fepriv->wait_queue);
if (fepriv->exit != DVB_FE_NO_EXIT) {
fops_put(file->f_op);
file->f_op = NULL;
@@ -2127,6 +2371,14 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
dvb_register_device (fe->dvb, &fepriv->dvbdev, &dvbdev_template,
fe, DVB_DEVICE_FRONTEND);
+ /*
+ * Initialize the cache to the proper values according with the
+ * first supported delivery system (ops->delsys[0])
+ */
+
+ fe->dtv_property_cache.delivery_system = fe->ops.delsys[0];
+ dvb_frontend_clear_cache(fe);
+
mutex_unlock(&frontend_mutex);
return 0;
}
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index 67bbfa728016..d63a8215fe03 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -42,11 +42,16 @@
#include "dvbdev.h"
+/*
+ * Maximum number of Delivery systems per frontend. It
+ * should be smaller or equal to 32
+ */
+#define MAX_DELSYS 8
+
struct dvb_frontend_tune_settings {
int min_delay_ms;
int step_size;
int max_drift;
- struct dvb_frontend_parameters parameters;
};
struct dvb_frontend;
@@ -198,11 +203,11 @@ struct dvb_tuner_ops {
int (*sleep)(struct dvb_frontend *fe);
/** This is for simple PLLs - set all parameters in one go. */
- int (*set_params)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
+ int (*set_params)(struct dvb_frontend *fe);
int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p);
/** This is support for demods like the mt352 - fills out the supplied buffer with what to write. */
- int (*calc_regs)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p, u8 *buf, int buf_len);
+ int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len);
/** This is to allow setting tuner-specific configs */
int (*set_config)(struct dvb_frontend *fe, void *priv_cfg);
@@ -250,10 +255,14 @@ struct analog_demod_ops {
int (*set_config)(struct dvb_frontend *fe, void *priv_cfg);
};
+struct dtv_frontend_properties;
+
struct dvb_frontend_ops {
struct dvb_frontend_info info;
+ u8 delsys[MAX_DELSYS];
+
void (*release)(struct dvb_frontend* fe);
void (*release_sec)(struct dvb_frontend* fe);
@@ -264,7 +273,7 @@ struct dvb_frontend_ops {
/* if this is set, it overrides the default swzigzag */
int (*tune)(struct dvb_frontend* fe,
- struct dvb_frontend_parameters* params,
+ bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status);
@@ -272,10 +281,10 @@ struct dvb_frontend_ops {
enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe);
/* these two are only used for the swzigzag code */
- int (*set_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
+ int (*set_frontend)(struct dvb_frontend *fe);
int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings);
- int (*get_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
+ int (*get_frontend)(struct dvb_frontend *fe);
int (*read_status)(struct dvb_frontend* fe, fe_status_t* status);
int (*read_ber)(struct dvb_frontend* fe, u32* ber);
@@ -297,8 +306,7 @@ struct dvb_frontend_ops {
/* These callbacks are for devices that implement their own
* tuning algorithms, rather than a simple swzigzag
*/
- enum dvbfe_search (*search)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
- int (*track)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
+ enum dvbfe_search (*search)(struct dvb_frontend *fe);
struct dvb_tuner_ops tuner_ops;
struct analog_demod_ops analog_ops;
@@ -307,6 +315,7 @@ struct dvb_frontend_ops {
int (*get_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
};
+#ifdef __DVB_CORE__
#define MAX_EVENT 8
struct dvb_fe_events {
@@ -317,6 +326,7 @@ struct dvb_fe_events {
wait_queue_head_t wait_queue;
struct mutex mtx;
};
+#endif
struct dtv_frontend_properties {
@@ -374,6 +384,7 @@ struct dvb_frontend {
void *analog_demod_priv;
struct dtv_frontend_properties dtv_property_cache;
#define DVB_FRONTEND_COMPONENT_TUNER 0
+#define DVB_FRONTEND_COMPONENT_DEMOD 1
int (*callback)(void *adapter_priv, int component, int cmd, int arg);
int id;
};
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 93d9869e0f15..8766ce8c354d 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1510,9 +1510,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
for (i=0; i<DVB_NET_DEVICES_MAX; i++)
dvbnet->state[i] = 0;
- dvb_register_device (adap, &dvbnet->dvbdev, &dvbdev_net,
+ return dvb_register_device(adap, &dvbnet->dvbdev, &dvbdev_net,
dvbnet, DVB_DEVICE_NET);
-
- return 0;
}
EXPORT_SYMBOL(dvb_net_init);
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index f73287775953..00a67326c193 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -450,7 +450,7 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static char *dvb_devnode(struct device *dev, mode_t *mode)
+static char *dvb_devnode(struct device *dev, umode_t *mode)
{
struct dvb_device *dvbdev = dev_get_drvdata(dev);
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 58257165761e..9f203c6767a6 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -311,6 +311,7 @@ config DVB_USB_ANYSEE
select DVB_STV0900 if !DVB_FE_CUSTOMISE
select DVB_STV6110 if !DVB_FE_CUSTOMISE
select DVB_ISL6423 if !DVB_FE_CUSTOMISE
+ select DVB_CXD2820R if !DVB_FE_CUSTOMISE
help
Say Y here to support the Anysee E30, Anysee E30 Plus or
Anysee E30 C Plus DVB USB2.0 receiver.
@@ -340,7 +341,7 @@ config DVB_USB_AF9015
config DVB_USB_CE6230
tristate "Intel CE6230 DVB-T USB2.0 support"
- depends on DVB_USB && EXPERIMENTAL
+ depends on DVB_USB
select DVB_ZL10353
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
help
@@ -354,7 +355,7 @@ config DVB_USB_FRIIO
config DVB_USB_EC168
tristate "E3C EC168 DVB-T USB2.0 support"
- depends on DVB_USB && EXPERIMENTAL
+ depends on DVB_USB
select DVB_EC100
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
help
diff --git a/drivers/media/dvb/dvb-usb/af9005-fe.c b/drivers/media/dvb/dvb-usb/af9005-fe.c
index 3263e9749d09..740f3f496f12 100644
--- a/drivers/media/dvb/dvb-usb/af9005-fe.c
+++ b/drivers/media/dvb/dvb-usb/af9005-fe.c
@@ -303,7 +303,7 @@ static int af9005_get_pre_vit_err_bit_count(struct dvb_frontend *fe,
return -EINVAL;
}
- /* read constellation mode */
+ /* read modulation mode */
ret =
af9005_read_register_bits(state->d, xd_g_reg_tpsd_const,
reg_tpsd_const_pos, reg_tpsd_const_len,
@@ -321,7 +321,7 @@ static int af9005_get_pre_vit_err_bit_count(struct dvb_frontend *fe,
bits = 6;
break;
default:
- err("invalid constellation mode");
+ err("invalid modulation mode");
return -EINVAL;
}
*pre_bit_count = super_frame_count * 68 * 4 * x * bits;
@@ -533,13 +533,13 @@ static int af9005_fe_read_signal_strength(struct dvb_frontend *fe,
static int af9005_fe_read_snr(struct dvb_frontend *fe, u16 * snr)
{
- /* the snr can be derived from the ber and the constellation
+ /* the snr can be derived from the ber and the modulation
but I don't think this kind of complex calculations belong
in the driver. I may be wrong.... */
return -ENOSYS;
}
-static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
+static int af9005_fe_program_cfoe(struct dvb_usb_device *d, u32 bw)
{
u8 temp0, temp1, temp2, temp3, buf[4];
int ret;
@@ -551,7 +551,7 @@ static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
u32 NS_coeff2_8k;
switch (bw) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
NS_coeff1_2048Nu = 0x2ADB6DC;
NS_coeff1_8191Nu = 0xAB7313;
NS_coeff1_8192Nu = 0xAB6DB7;
@@ -560,7 +560,7 @@ static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
NS_coeff2_8k = 0x55B6DC;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
NS_coeff1_2048Nu = 0x3200001;
NS_coeff1_8191Nu = 0xC80640;
NS_coeff1_8192Nu = 0xC80000;
@@ -569,7 +569,7 @@ static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
NS_coeff2_8k = 0x640000;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
NS_coeff1_2048Nu = 0x3924926;
NS_coeff1_8191Nu = 0xE4996E;
NS_coeff1_8192Nu = 0xE49249;
@@ -773,17 +773,17 @@ static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
}
-static int af9005_fe_select_bw(struct dvb_usb_device *d, fe_bandwidth_t bw)
+static int af9005_fe_select_bw(struct dvb_usb_device *d, u32 bw)
{
u8 temp;
switch (bw) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
temp = 0;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
temp = 1;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
temp = 2;
break;
default:
@@ -930,10 +930,11 @@ static int af9005_fe_init(struct dvb_frontend *fe)
/* init other parameters: program cfoe and select bandwidth */
deb_info("program cfoe\n");
- if ((ret = af9005_fe_program_cfoe(state->d, BANDWIDTH_6_MHZ)))
+ ret = af9005_fe_program_cfoe(state->d, 6000000);
+ if (ret)
return ret;
- /* set read-update bit for constellation */
- deb_info("set read-update bit for constellation\n");
+ /* set read-update bit for modulation */
+ deb_info("set read-update bit for modulation\n");
if ((ret =
af9005_write_register_bits(state->d, xd_p_reg_feq_read_update,
reg_feq_read_update_pos,
@@ -943,8 +944,8 @@ static int af9005_fe_init(struct dvb_frontend *fe)
/* sample code has a set MPEG TS code here
but sniffing reveals that it doesn't do it */
- /* set read-update bit to 1 for DCA constellation */
- deb_info("set read-update bit 1 for DCA constellation\n");
+ /* set read-update bit to 1 for DCA modulation */
+ deb_info("set read-update bit 1 for DCA modulation\n");
if ((ret =
af9005_write_register_bits(state->d, xd_p_reg_dca_read_update,
reg_dca_read_update_pos,
@@ -1099,15 +1100,15 @@ static int af9005_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
return 0;
}
-static int af9005_fe_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int af9005_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct af9005_fe_state *state = fe->demodulator_priv;
int ret;
u8 temp, temp0, temp1, temp2;
deb_info("af9005_fe_set_frontend freq %d bw %d\n", fep->frequency,
- fep->u.ofdm.bandwidth);
+ fep->bandwidth_hz);
if (fe->ops.tuner_ops.release == NULL) {
err("Tuner not attached");
return -ENODEV;
@@ -1167,10 +1168,10 @@ static int af9005_fe_set_frontend(struct dvb_frontend *fe,
/* select bandwidth */
deb_info("select bandwidth");
- ret = af9005_fe_select_bw(state->d, fep->u.ofdm.bandwidth);
+ ret = af9005_fe_select_bw(state->d, fep->bandwidth_hz);
if (ret)
return ret;
- ret = af9005_fe_program_cfoe(state->d, fep->u.ofdm.bandwidth);
+ ret = af9005_fe_program_cfoe(state->d, fep->bandwidth_hz);
if (ret)
return ret;
@@ -1189,7 +1190,7 @@ static int af9005_fe_set_frontend(struct dvb_frontend *fe,
return ret;
/* set tuner */
deb_info("set tuner\n");
- ret = fe->ops.tuner_ops.set_params(fe, fep);
+ ret = fe->ops.tuner_ops.set_params(fe);
if (ret)
return ret;
@@ -1225,9 +1226,9 @@ static int af9005_fe_set_frontend(struct dvb_frontend *fe,
return 0;
}
-static int af9005_fe_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int af9005_fe_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct af9005_fe_state *state = fe->demodulator_priv;
int ret;
u8 temp;
@@ -1239,19 +1240,19 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
&temp);
if (ret)
return ret;
- deb_info("===== fe_get_frontend ==============\n");
+ deb_info("===== fe_get_frontend_legacy = =============\n");
deb_info("CONSTELLATION ");
switch (temp) {
case 0:
- fep->u.ofdm.constellation = QPSK;
+ fep->modulation = QPSK;
deb_info("QPSK\n");
break;
case 1:
- fep->u.ofdm.constellation = QAM_16;
+ fep->modulation = QAM_16;
deb_info("QAM_16\n");
break;
case 2:
- fep->u.ofdm.constellation = QAM_64;
+ fep->modulation = QAM_64;
deb_info("QAM_64\n");
break;
}
@@ -1266,19 +1267,19 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
deb_info("HIERARCHY ");
switch (temp) {
case 0:
- fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ fep->hierarchy = HIERARCHY_NONE;
deb_info("NONE\n");
break;
case 1:
- fep->u.ofdm.hierarchy_information = HIERARCHY_1;
+ fep->hierarchy = HIERARCHY_1;
deb_info("1\n");
break;
case 2:
- fep->u.ofdm.hierarchy_information = HIERARCHY_2;
+ fep->hierarchy = HIERARCHY_2;
deb_info("2\n");
break;
case 3:
- fep->u.ofdm.hierarchy_information = HIERARCHY_4;
+ fep->hierarchy = HIERARCHY_4;
deb_info("4\n");
break;
}
@@ -1302,23 +1303,23 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
deb_info("CODERATE HP ");
switch (temp) {
case 0:
- fep->u.ofdm.code_rate_HP = FEC_1_2;
+ fep->code_rate_HP = FEC_1_2;
deb_info("FEC_1_2\n");
break;
case 1:
- fep->u.ofdm.code_rate_HP = FEC_2_3;
+ fep->code_rate_HP = FEC_2_3;
deb_info("FEC_2_3\n");
break;
case 2:
- fep->u.ofdm.code_rate_HP = FEC_3_4;
+ fep->code_rate_HP = FEC_3_4;
deb_info("FEC_3_4\n");
break;
case 3:
- fep->u.ofdm.code_rate_HP = FEC_5_6;
+ fep->code_rate_HP = FEC_5_6;
deb_info("FEC_5_6\n");
break;
case 4:
- fep->u.ofdm.code_rate_HP = FEC_7_8;
+ fep->code_rate_HP = FEC_7_8;
deb_info("FEC_7_8\n");
break;
}
@@ -1333,23 +1334,23 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
deb_info("CODERATE LP ");
switch (temp) {
case 0:
- fep->u.ofdm.code_rate_LP = FEC_1_2;
+ fep->code_rate_LP = FEC_1_2;
deb_info("FEC_1_2\n");
break;
case 1:
- fep->u.ofdm.code_rate_LP = FEC_2_3;
+ fep->code_rate_LP = FEC_2_3;
deb_info("FEC_2_3\n");
break;
case 2:
- fep->u.ofdm.code_rate_LP = FEC_3_4;
+ fep->code_rate_LP = FEC_3_4;
deb_info("FEC_3_4\n");
break;
case 3:
- fep->u.ofdm.code_rate_LP = FEC_5_6;
+ fep->code_rate_LP = FEC_5_6;
deb_info("FEC_5_6\n");
break;
case 4:
- fep->u.ofdm.code_rate_LP = FEC_7_8;
+ fep->code_rate_LP = FEC_7_8;
deb_info("FEC_7_8\n");
break;
}
@@ -1363,19 +1364,19 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
deb_info("GUARD INTERVAL ");
switch (temp) {
case 0:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ fep->guard_interval = GUARD_INTERVAL_1_32;
deb_info("1_32\n");
break;
case 1:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ fep->guard_interval = GUARD_INTERVAL_1_16;
deb_info("1_16\n");
break;
case 2:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ fep->guard_interval = GUARD_INTERVAL_1_8;
deb_info("1_8\n");
break;
case 3:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ fep->guard_interval = GUARD_INTERVAL_1_4;
deb_info("1_4\n");
break;
}
@@ -1390,11 +1391,11 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
deb_info("TRANSMISSION MODE ");
switch (temp) {
case 0:
- fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ fep->transmission_mode = TRANSMISSION_MODE_2K;
deb_info("2K\n");
break;
case 1:
- fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ fep->transmission_mode = TRANSMISSION_MODE_8K;
deb_info("8K\n");
break;
}
@@ -1406,15 +1407,15 @@ static int af9005_fe_get_frontend(struct dvb_frontend *fe,
deb_info("BANDWIDTH ");
switch (temp) {
case 0:
- fep->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+ fep->bandwidth_hz = 6000000;
deb_info("6\n");
break;
case 1:
- fep->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+ fep->bandwidth_hz = 7000000;
deb_info("7\n");
break;
case 2:
- fep->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ fep->bandwidth_hz = 8000000;
deb_info("8\n");
break;
}
@@ -1454,9 +1455,9 @@ struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
}
static struct dvb_frontend_ops af9005_fe_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "AF9005 USB DVB-T",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 250000,
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index bd51a764351b..af176b6ce738 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(debug,
"set debugging level (1=info,xfer=2,rc=4,reg=8,i2c=16,fw=32 (or-able))."
DVB_USB_DEBUG_STATUS);
/* enable obnoxious led */
-int dvb_usb_af9005_led = 1;
+bool dvb_usb_af9005_led = 1;
module_param_named(led, dvb_usb_af9005_led, bool, 0644);
MODULE_PARM_DESC(led, "enable led (default: 1).");
@@ -977,11 +977,20 @@ static int af9005_usb_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr);
}
+enum af9005_usb_table_entry {
+ AFATECH_AF9005,
+ TERRATEC_AF9005,
+ ANSONIC_AF9005,
+};
+
static struct usb_device_id af9005_usb_table[] = {
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9005)},
- {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_USB_XE)},
- {USB_DEVICE(USB_VID_ANSONIC, USB_PID_ANSONIC_DVBT_USB)},
- {0},
+ [AFATECH_AF9005] = {USB_DEVICE(USB_VID_AFATECH,
+ USB_PID_AFATECH_AF9005)},
+ [TERRATEC_AF9005] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_T_USB_XE)},
+ [ANSONIC_AF9005] = {USB_DEVICE(USB_VID_ANSONIC,
+ USB_PID_ANSONIC_DVBT_USB)},
+ { }
};
MODULE_DEVICE_TABLE(usb, af9005_usb_table);
@@ -1041,15 +1050,15 @@ static struct dvb_usb_device_properties af9005_properties = {
.num_device_descs = 3,
.devices = {
{.name = "Afatech DVB-T USB1.1 stick",
- .cold_ids = {&af9005_usb_table[0], NULL},
+ .cold_ids = {&af9005_usb_table[AFATECH_AF9005], NULL},
.warm_ids = {NULL},
},
{.name = "TerraTec Cinergy T USB XE",
- .cold_ids = {&af9005_usb_table[1], NULL},
+ .cold_ids = {&af9005_usb_table[TERRATEC_AF9005], NULL},
.warm_ids = {NULL},
},
{.name = "Ansonic DVB-T USB1.1 stick",
- .cold_ids = {&af9005_usb_table[2], NULL},
+ .cold_ids = {&af9005_usb_table[ANSONIC_AF9005], NULL},
.warm_ids = {NULL},
},
{NULL},
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
index c71c77bd7f4b..6a2bf3de8456 100644
--- a/drivers/media/dvb/dvb-usb/af9005.h
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -35,7 +35,7 @@ extern int dvb_usb_af9005_debug;
#define deb_i2c(args...) dprintk(dvb_usb_af9005_debug,0x10,args)
#define deb_fw(args...) dprintk(dvb_usb_af9005_debug,0x20,args)
-extern int dvb_usb_af9005_led;
+extern bool dvb_usb_af9005_led;
/* firmware */
#define FW_BULKOUT_SIZE 250
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 56cbd3636c31..282a43d648df 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -50,14 +50,14 @@ static int af9015_properties_count = ARRAY_SIZE(af9015_properties);
static struct af9013_config af9015_af9013_config[] = {
{
- .demod_address = AF9015_I2C_DEMOD,
- .output_mode = AF9013_OUTPUT_MODE_USB,
+ .i2c_addr = AF9015_I2C_DEMOD,
+ .ts_mode = AF9013_TS_USB,
.api_version = { 0, 1, 9, 0 },
.gpio[0] = AF9013_GPIO_HI,
.gpio[3] = AF9013_GPIO_TUNER_ON,
}, {
- .output_mode = AF9013_OUTPUT_MODE_SERIAL,
+ .ts_mode = AF9013_TS_SERIAL,
.api_version = { 0, 1, 9, 0 },
.gpio[0] = AF9013_GPIO_TUNER_ON,
.gpio[1] = AF9013_GPIO_LO,
@@ -216,8 +216,8 @@ static int af9015_write_reg_i2c(struct dvb_usb_device *d, u8 addr, u16 reg,
{
struct req_t req = {WRITE_I2C, addr, reg, 1, 1, 1, &val};
- if (addr == af9015_af9013_config[0].demod_address ||
- addr == af9015_af9013_config[1].demod_address)
+ if (addr == af9015_af9013_config[0].i2c_addr ||
+ addr == af9015_af9013_config[1].i2c_addr)
req.addr_len = 3;
return af9015_ctrl_msg(d, &req);
@@ -228,8 +228,8 @@ static int af9015_read_reg_i2c(struct dvb_usb_device *d, u8 addr, u16 reg,
{
struct req_t req = {READ_I2C, addr, reg, 0, 1, 1, val};
- if (addr == af9015_af9013_config[0].demod_address ||
- addr == af9015_af9013_config[1].demod_address)
+ if (addr == af9015_af9013_config[0].i2c_addr ||
+ addr == af9015_af9013_config[1].i2c_addr)
req.addr_len = 3;
return af9015_ctrl_msg(d, &req);
@@ -271,8 +271,8 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
return -EAGAIN;
while (i < num) {
- if (msg[i].addr == af9015_af9013_config[0].demod_address ||
- msg[i].addr == af9015_af9013_config[1].demod_address) {
+ if (msg[i].addr == af9015_af9013_config[0].i2c_addr ||
+ msg[i].addr == af9015_af9013_config[1].i2c_addr) {
addr = msg[i].buf[0] << 8;
addr += msg[i].buf[1];
mbox = msg[i].buf[2];
@@ -288,8 +288,7 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
ret = -EOPNOTSUPP;
goto error;
}
- if (msg[i].addr ==
- af9015_af9013_config[0].demod_address)
+ if (msg[i].addr == af9015_af9013_config[0].i2c_addr)
req.cmd = READ_MEMORY;
else
req.cmd = READ_I2C;
@@ -307,7 +306,7 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
goto error;
}
if (msg[i].addr ==
- af9015_af9013_config[0].demod_address) {
+ af9015_af9013_config[0].i2c_addr) {
ret = -EINVAL;
goto error;
}
@@ -325,8 +324,7 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
ret = -EOPNOTSUPP;
goto error;
}
- if (msg[i].addr ==
- af9015_af9013_config[0].demod_address)
+ if (msg[i].addr == af9015_af9013_config[0].i2c_addr)
req.cmd = WRITE_MEMORY;
else
req.cmd = WRITE_I2C;
@@ -508,7 +506,7 @@ static int af9015_copy_firmware(struct dvb_usb_device *d)
msleep(100);
ret = af9015_read_reg_i2c(d,
- af9015_af9013_config[1].demod_address, 0x98be, &val);
+ af9015_af9013_config[1].i2c_addr, 0x98be, &val);
if (ret)
goto error;
else
@@ -536,7 +534,7 @@ static int af9015_copy_firmware(struct dvb_usb_device *d)
goto error;
/* request boot firmware */
- ret = af9015_write_reg_i2c(d, af9015_af9013_config[1].demod_address,
+ ret = af9015_write_reg_i2c(d, af9015_af9013_config[1].i2c_addr,
0xe205, 1);
deb_info("%s: firmware boot cmd status:%d\n", __func__, ret);
if (ret)
@@ -547,7 +545,7 @@ static int af9015_copy_firmware(struct dvb_usb_device *d)
/* check firmware status */
ret = af9015_read_reg_i2c(d,
- af9015_af9013_config[1].demod_address, 0x98be, &val);
+ af9015_af9013_config[1].i2c_addr, 0x98be, &val);
deb_info("%s: firmware status cmd status:%d fw status:%02x\n",
__func__, ret, val);
if (ret)
@@ -840,7 +838,7 @@ static int af9015_read_config(struct usb_device *udev)
if (ret)
goto error;
- deb_info("%s: IR mode:%d\n", __func__, val);
+ deb_info("%s: IR mode=%d\n", __func__, val);
for (i = 0; i < af9015_properties_count; i++) {
if (val == AF9015_IR_MODE_DISABLED)
af9015_properties[i].rc.core.rc_codes = NULL;
@@ -854,7 +852,7 @@ static int af9015_read_config(struct usb_device *udev)
if (ret)
goto error;
af9015_config.dual_mode = val;
- deb_info("%s: TS mode:%d\n", __func__, af9015_config.dual_mode);
+ deb_info("%s: TS mode=%d\n", __func__, af9015_config.dual_mode);
/* Set adapter0 buffer size according to USB port speed, adapter1 buffer
size can be static because it is enabled only USB2.0 */
@@ -878,7 +876,7 @@ static int af9015_read_config(struct usb_device *udev)
ret = af9015_rw_udev(udev, &req);
if (ret)
goto error;
- af9015_af9013_config[1].demod_address = val;
+ af9015_af9013_config[1].i2c_addr = val;
/* enable 2nd adapter */
for (i = 0; i < af9015_properties_count; i++)
@@ -900,34 +898,38 @@ static int af9015_read_config(struct usb_device *udev)
goto error;
switch (val) {
case 0:
- af9015_af9013_config[i].adc_clock = 28800;
+ af9015_af9013_config[i].clock = 28800000;
break;
case 1:
- af9015_af9013_config[i].adc_clock = 20480;
+ af9015_af9013_config[i].clock = 20480000;
break;
case 2:
- af9015_af9013_config[i].adc_clock = 28000;
+ af9015_af9013_config[i].clock = 28000000;
break;
case 3:
- af9015_af9013_config[i].adc_clock = 25000;
+ af9015_af9013_config[i].clock = 25000000;
break;
};
- deb_info("%s: [%d] xtal:%d set adc_clock:%d\n", __func__, i,
- val, af9015_af9013_config[i].adc_clock);
+ deb_info("%s: [%d] xtal=%d set clock=%d\n", __func__, i,
+ val, af9015_af9013_config[i].clock);
- /* tuner IF */
+ /* IF frequency */
req.addr = AF9015_EEPROM_IF1H + offset;
ret = af9015_rw_udev(udev, &req);
if (ret)
goto error;
- af9015_af9013_config[i].tuner_if = val << 8;
+
+ af9015_af9013_config[i].if_frequency = val << 8;
+
req.addr = AF9015_EEPROM_IF1L + offset;
ret = af9015_rw_udev(udev, &req);
if (ret)
goto error;
- af9015_af9013_config[i].tuner_if += val;
- deb_info("%s: [%d] IF1:%d\n", __func__, i,
- af9015_af9013_config[0].tuner_if);
+
+ af9015_af9013_config[i].if_frequency += val;
+ af9015_af9013_config[i].if_frequency *= 1000;
+ deb_info("%s: [%d] IF frequency=%d\n", __func__, i,
+ af9015_af9013_config[0].if_frequency);
/* MT2060 IF1 */
req.addr = AF9015_EEPROM_MT2060_IF1H + offset;
@@ -940,7 +942,7 @@ static int af9015_read_config(struct usb_device *udev)
if (ret)
goto error;
af9015_config.mt2060_if1[i] += val;
- deb_info("%s: [%d] MT2060 IF1:%d\n", __func__, i,
+ deb_info("%s: [%d] MT2060 IF1=%d\n", __func__, i,
af9015_config.mt2060_if1[i]);
/* tuner */
@@ -957,30 +959,30 @@ static int af9015_read_config(struct usb_device *udev)
case AF9013_TUNER_TDA18271:
case AF9013_TUNER_QT1010A:
case AF9013_TUNER_TDA18218:
- af9015_af9013_config[i].rf_spec_inv = 1;
+ af9015_af9013_config[i].spec_inv = 1;
break;
case AF9013_TUNER_MXL5003D:
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
case AF9013_TUNER_MXL5007T:
- af9015_af9013_config[i].rf_spec_inv = 0;
+ af9015_af9013_config[i].spec_inv = 0;
break;
case AF9013_TUNER_MC44S803:
af9015_af9013_config[i].gpio[1] = AF9013_GPIO_LO;
- af9015_af9013_config[i].rf_spec_inv = 1;
+ af9015_af9013_config[i].spec_inv = 1;
break;
default:
- warn("tuner id:%d not supported, please report!", val);
+ warn("tuner id=%d not supported, please report!", val);
return -ENODEV;
};
af9015_af9013_config[i].tuner = val;
- deb_info("%s: [%d] tuner id:%d\n", __func__, i, val);
+ deb_info("%s: [%d] tuner id=%d\n", __func__, i, val);
}
error:
if (ret)
- err("eeprom read failed:%d", ret);
+ err("eeprom read failed=%d", ret);
/* AverMedia AVerTV Volar Black HD (A850) device have bad EEPROM
content :-( Override some wrong values here. Ditto for the
@@ -998,7 +1000,7 @@ error:
af9015_properties[i].num_adapters = 1;
/* set correct IF */
- af9015_af9013_config[0].tuner_if = 4570;
+ af9015_af9013_config[0].if_frequency = 4570000;
}
return ret;
@@ -1093,9 +1095,79 @@ error:
return ret;
}
+/* override demod callbacks for resource locking */
+static int af9015_af9013_set_frontend(struct dvb_frontend *fe)
+{
+ int ret;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct af9015_state *priv = adap->dev->priv;
+
+ if (mutex_lock_interruptible(&adap->dev->usb_mutex))
+ return -EAGAIN;
+
+ ret = priv->set_frontend[adap->id](fe);
+
+ mutex_unlock(&adap->dev->usb_mutex);
+
+ return ret;
+}
+
+/* override demod callbacks for resource locking */
+static int af9015_af9013_read_status(struct dvb_frontend *fe,
+ fe_status_t *status)
+{
+ int ret;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct af9015_state *priv = adap->dev->priv;
+
+ if (mutex_lock_interruptible(&adap->dev->usb_mutex))
+ return -EAGAIN;
+
+ ret = priv->read_status[adap->id](fe, status);
+
+ mutex_unlock(&adap->dev->usb_mutex);
+
+ return ret;
+}
+
+/* override demod callbacks for resource locking */
+static int af9015_af9013_init(struct dvb_frontend *fe)
+{
+ int ret;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct af9015_state *priv = adap->dev->priv;
+
+ if (mutex_lock_interruptible(&adap->dev->usb_mutex))
+ return -EAGAIN;
+
+ ret = priv->init[adap->id](fe);
+
+ mutex_unlock(&adap->dev->usb_mutex);
+
+ return ret;
+}
+
+/* override demod callbacks for resource locking */
+static int af9015_af9013_sleep(struct dvb_frontend *fe)
+{
+ int ret;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct af9015_state *priv = adap->dev->priv;
+
+ if (mutex_lock_interruptible(&adap->dev->usb_mutex))
+ return -EAGAIN;
+
+ ret = priv->sleep[adap->id](fe);
+
+ mutex_unlock(&adap->dev->usb_mutex);
+
+ return ret;
+}
+
static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
{
int ret;
+ struct af9015_state *state = adap->dev->priv;
if (adap->id == 1) {
/* copy firmware to 2nd demodulator */
@@ -1116,6 +1188,32 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
&adap->dev->i2c_adap);
+ /*
+ * AF9015 firmware does not like if it gets interrupted by I2C adapter
+ * request on some critical phases. During normal operation I2C adapter
+ * is used only 2nd demodulator and tuner on dual tuner devices.
+ * Override demodulator callbacks and use mutex for limit access to
+ * those "critical" paths to keep AF9015 happy.
+ * Note: we abuse unused usb_mutex here.
+ */
+ if (adap->fe_adap[0].fe) {
+ state->set_frontend[adap->id] =
+ adap->fe_adap[0].fe->ops.set_frontend;
+ adap->fe_adap[0].fe->ops.set_frontend =
+ af9015_af9013_set_frontend;
+
+ state->read_status[adap->id] =
+ adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status =
+ af9015_af9013_read_status;
+
+ state->init[adap->id] = adap->fe_adap[0].fe->ops.init;
+ adap->fe_adap[0].fe->ops.init = af9015_af9013_init;
+
+ state->sleep[adap->id] = adap->fe_adap[0].fe->ops.sleep;
+ adap->fe_adap[0].fe->ops.sleep = af9015_af9013_sleep;
+ }
+
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
@@ -1245,49 +1343,112 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
return ret;
}
+enum af9015_usb_table_entry {
+ AFATECH_9015,
+ AFATECH_9016,
+ WINFAST_DTV_GOLD,
+ PINNACLE_PCTV_71E,
+ KWORLD_PLUSTV_399U,
+ TINYTWIN,
+ AZUREWAVE_TU700,
+ TERRATEC_AF9015,
+ KWORLD_PLUSTV_PC160,
+ AVERTV_VOLAR_X,
+ XTENSIONS_380U,
+ MSI_DIGIVOX_DUO,
+ AVERTV_VOLAR_X_REV2,
+ TELESTAR_STARSTICK_2,
+ AVERMEDIA_A309_USB,
+ MSI_DIGIVOX_MINI_III,
+ KWORLD_E396,
+ KWORLD_E39B,
+ KWORLD_E395,
+ TREKSTOR_DVBT,
+ AVERTV_A850,
+ AVERTV_A805,
+ CONCEPTRONIC_CTVDIGRCU,
+ KWORLD_MC810,
+ GENIUS_TVGO_DVB_T03,
+ KWORLD_399U_2,
+ KWORLD_PC160_T,
+ SVEON_STV20,
+ TINYTWIN_2,
+ WINFAST_DTV2000DS,
+ KWORLD_UB383_T,
+ KWORLD_E39A,
+ AVERMEDIA_A815M,
+ CINERGY_T_STICK_RC,
+ CINERGY_T_DUAL_RC,
+ AVERTV_A850T,
+ TINYTWIN_3,
+ SVEON_STV22,
+};
+
static struct usb_device_id af9015_usb_table[] = {
-/* 0 */{USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9015)},
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9016)},
- {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_GOLD)},
- {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV71E)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U)},
-/* 5 */{USB_DEVICE(USB_VID_VISIONPLUS,
- USB_PID_TINYTWIN)},
- {USB_DEVICE(USB_VID_VISIONPLUS,
- USB_PID_AZUREWAVE_AD_TU700)},
- {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_2T)},
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X)},
-/* 10 */{USB_DEVICE(USB_VID_XTENSIONS, USB_PID_XTENSIONS_XD_380)},
- {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGIVOX_DUO)},
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X_2)},
- {USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
-/* 15 */{USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
-/* 20 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
- {USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
-/* 25 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_T)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
- {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
-/* 30 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
- {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_RC)},
- {USB_DEVICE(USB_VID_TERRATEC,
- USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
-/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
- {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
- {0},
+ [AFATECH_9015] =
+ {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9015)},
+ [AFATECH_9016] =
+ {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9016)},
+ [WINFAST_DTV_GOLD] =
+ {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_GOLD)},
+ [PINNACLE_PCTV_71E] =
+ {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV71E)},
+ [KWORLD_PLUSTV_399U] =
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U)},
+ [TINYTWIN] = {USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TINYTWIN)},
+ [AZUREWAVE_TU700] =
+ {USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_AZUREWAVE_AD_TU700)},
+ [TERRATEC_AF9015] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2)},
+ [KWORLD_PLUSTV_PC160] =
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_2T)},
+ [AVERTV_VOLAR_X] =
+ {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X)},
+ [XTENSIONS_380U] =
+ {USB_DEVICE(USB_VID_XTENSIONS, USB_PID_XTENSIONS_XD_380)},
+ [MSI_DIGIVOX_DUO] =
+ {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGIVOX_DUO)},
+ [AVERTV_VOLAR_X_REV2] =
+ {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X_2)},
+ [TELESTAR_STARSTICK_2] =
+ {USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
+ [AVERMEDIA_A309_USB] =
+ {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
+ [MSI_DIGIVOX_MINI_III] =
+ {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
+ [KWORLD_E396] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
+ [KWORLD_E39B] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
+ [KWORLD_E395] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
+ [TREKSTOR_DVBT] = {USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
+ [AVERTV_A850] = {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
+ [AVERTV_A805] = {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
+ [CONCEPTRONIC_CTVDIGRCU] =
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
+ [KWORLD_MC810] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
+ [GENIUS_TVGO_DVB_T03] =
+ {USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
+ [KWORLD_399U_2] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2)},
+ [KWORLD_PC160_T] =
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_T)},
+ [SVEON_STV20] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
+ [TINYTWIN_2] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
+ [WINFAST_DTV2000DS] =
+ {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
+ [KWORLD_UB383_T] =
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
+ [KWORLD_E39A] =
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
+ [AVERMEDIA_A815M] =
+ {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
+ [CINERGY_T_STICK_RC] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_T_STICK_RC)},
+ [CINERGY_T_DUAL_RC] = {USB_DEVICE(USB_VID_TERRATEC,
+ USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
+ [AVERTV_A850T] =
+ {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
+ [TINYTWIN_3] = {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
+ [SVEON_STV22] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
+ { }
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1362,68 +1523,104 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.devices = {
{
.name = "Afatech AF9015 DVB-T USB2.0 stick",
- .cold_ids = {&af9015_usb_table[0],
- &af9015_usb_table[1], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AFATECH_9015],
+ &af9015_usb_table[AFATECH_9016],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Leadtek WinFast DTV Dongle Gold",
- .cold_ids = {&af9015_usb_table[2], NULL},
+ .cold_ids = {
+ &af9015_usb_table[WINFAST_DTV_GOLD],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Pinnacle PCTV 71e",
- .cold_ids = {&af9015_usb_table[3], NULL},
+ .cold_ids = {
+ &af9015_usb_table[PINNACLE_PCTV_71E],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "KWorld PlusTV Dual DVB-T Stick " \
"(DVB-T 399U)",
- .cold_ids = {&af9015_usb_table[4],
- &af9015_usb_table[25], NULL},
+ .cold_ids = {
+ &af9015_usb_table[KWORLD_PLUSTV_399U],
+ &af9015_usb_table[KWORLD_399U_2],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "DigitalNow TinyTwin DVB-T Receiver",
- .cold_ids = {&af9015_usb_table[5],
- &af9015_usb_table[28],
- &af9015_usb_table[36], NULL},
+ .cold_ids = {
+ &af9015_usb_table[TINYTWIN],
+ &af9015_usb_table[TINYTWIN_2],
+ &af9015_usb_table[TINYTWIN_3],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "TwinHan AzureWave AD-TU700(704J)",
- .cold_ids = {&af9015_usb_table[6], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AZUREWAVE_TU700],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "TerraTec Cinergy T USB XE",
- .cold_ids = {&af9015_usb_table[7], NULL},
+ .cold_ids = {
+ &af9015_usb_table[TERRATEC_AF9015],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "KWorld PlusTV Dual DVB-T PCI " \
"(DVB-T PC160-2T)",
- .cold_ids = {&af9015_usb_table[8], NULL},
+ .cold_ids = {
+ &af9015_usb_table[KWORLD_PLUSTV_PC160],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "AVerMedia AVerTV DVB-T Volar X",
- .cold_ids = {&af9015_usb_table[9], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERTV_VOLAR_X],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "TerraTec Cinergy T Stick RC",
- .cold_ids = {&af9015_usb_table[33], NULL},
+ .cold_ids = {
+ &af9015_usb_table[CINERGY_T_STICK_RC],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "TerraTec Cinergy T Stick Dual RC",
- .cold_ids = {&af9015_usb_table[34], NULL},
+ .cold_ids = {
+ &af9015_usb_table[CINERGY_T_DUAL_RC],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "AverMedia AVerTV Red HD+ (A850T)",
- .cold_ids = {&af9015_usb_table[35], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERTV_A850T],
+ NULL
+ },
.warm_ids = {NULL},
},
}
@@ -1496,57 +1693,87 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.devices = {
{
.name = "Xtensions XD-380",
- .cold_ids = {&af9015_usb_table[10], NULL},
+ .cold_ids = {
+ &af9015_usb_table[XTENSIONS_380U],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "MSI DIGIVOX Duo",
- .cold_ids = {&af9015_usb_table[11], NULL},
+ .cold_ids = {
+ &af9015_usb_table[MSI_DIGIVOX_DUO],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Fujitsu-Siemens Slim Mobile USB DVB-T",
- .cold_ids = {&af9015_usb_table[12], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERTV_VOLAR_X_REV2],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Telestar Starstick 2",
- .cold_ids = {&af9015_usb_table[13], NULL},
+ .cold_ids = {
+ &af9015_usb_table[TELESTAR_STARSTICK_2],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "AVerMedia A309",
- .cold_ids = {&af9015_usb_table[14], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERMEDIA_A309_USB],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "MSI Digi VOX mini III",
- .cold_ids = {&af9015_usb_table[15], NULL},
+ .cold_ids = {
+ &af9015_usb_table[MSI_DIGIVOX_MINI_III],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "KWorld USB DVB-T TV Stick II " \
"(VS-DVB-T 395U)",
- .cold_ids = {&af9015_usb_table[16],
- &af9015_usb_table[17],
- &af9015_usb_table[18],
- &af9015_usb_table[31], NULL},
+ .cold_ids = {
+ &af9015_usb_table[KWORLD_E396],
+ &af9015_usb_table[KWORLD_E39B],
+ &af9015_usb_table[KWORLD_E395],
+ &af9015_usb_table[KWORLD_E39A],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "TrekStor DVB-T USB Stick",
- .cold_ids = {&af9015_usb_table[19], NULL},
+ .cold_ids = {
+ &af9015_usb_table[TREKSTOR_DVBT],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "AverMedia AVerTV Volar Black HD " \
"(A850)",
- .cold_ids = {&af9015_usb_table[20], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERTV_A850],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Sveon STV22 Dual USB DVB-T Tuner HDTV",
- .cold_ids = {&af9015_usb_table[37], NULL},
+ .cold_ids = {
+ &af9015_usb_table[SVEON_STV22],
+ NULL
+ },
.warm_ids = {NULL},
},
}
@@ -1619,50 +1846,77 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.devices = {
{
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
- .cold_ids = {&af9015_usb_table[21], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERTV_A805],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Conceptronic USB2.0 DVB-T CTVDIGRCU " \
"V3.0",
- .cold_ids = {&af9015_usb_table[22], NULL},
+ .cold_ids = {
+ &af9015_usb_table[CONCEPTRONIC_CTVDIGRCU],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "KWorld Digial MC-810",
- .cold_ids = {&af9015_usb_table[23], NULL},
+ .cold_ids = {
+ &af9015_usb_table[KWORLD_MC810],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Genius TVGo DVB-T03",
- .cold_ids = {&af9015_usb_table[24], NULL},
+ .cold_ids = {
+ &af9015_usb_table[GENIUS_TVGO_DVB_T03],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "KWorld PlusTV DVB-T PCI Pro Card " \
"(DVB-T PC160-T)",
- .cold_ids = {&af9015_usb_table[26], NULL},
+ .cold_ids = {
+ &af9015_usb_table[KWORLD_PC160_T],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Sveon STV20 Tuner USB DVB-T HDTV",
- .cold_ids = {&af9015_usb_table[27], NULL},
+ .cold_ids = {
+ &af9015_usb_table[SVEON_STV20],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "Leadtek WinFast DTV2000DS",
- .cold_ids = {&af9015_usb_table[29], NULL},
+ .cold_ids = {
+ &af9015_usb_table[WINFAST_DTV2000DS],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "KWorld USB DVB-T Stick Mobile " \
"(UB383-T)",
- .cold_ids = {&af9015_usb_table[30], NULL},
+ .cold_ids = {
+ &af9015_usb_table[KWORLD_UB383_T],
+ NULL
+ },
.warm_ids = {NULL},
},
{
.name = "AverMedia AVerTV Volar M (A815Mac)",
- .cold_ids = {&af9015_usb_table[32], NULL},
+ .cold_ids = {
+ &af9015_usb_table[AVERMEDIA_A815M],
+ NULL
+ },
.warm_ids = {NULL},
},
}
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index 6252ea6c1904..f619063fa72f 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -102,6 +102,12 @@ struct af9015_state {
u8 rc_repeat;
u32 rc_keycode;
u8 rc_last[4];
+
+ /* for demod callback override */
+ int (*set_frontend[2]) (struct dvb_frontend *fe);
+ int (*read_status[2]) (struct dvb_frontend *fe, fe_status_t *status);
+ int (*init[2]) (struct dvb_frontend *fe);
+ int (*sleep[2]) (struct dvb_frontend *fe);
};
struct af9015_config {
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index b39f14f85e71..1455e2644ab5 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -41,6 +41,7 @@
#include "stv0900.h"
#include "stv6110.h"
#include "isl6423.h"
+#include "cxd2820r.h"
/* debug */
static int dvb_usb_anysee_debug;
@@ -66,10 +67,12 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
if (mutex_lock_interruptible(&anysee_usb_mutex) < 0)
return -EAGAIN;
+ deb_xfer(">>> ");
+ debug_dump(buf, slen, deb_xfer);
+
/* We need receive one message more after dvb_usb_generic_rw due
to weird transaction flow, which is 1 x send + 2 x receive. */
ret = dvb_usb_generic_rw(d, buf, sizeof(buf), buf, sizeof(buf), 0);
-
if (!ret) {
/* receive 2nd answer */
ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev,
@@ -79,7 +82,10 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
err("%s: recv bulk message failed: %d", __func__, ret);
else {
deb_xfer("<<< ");
- debug_dump(buf, act_len, deb_xfer);
+ debug_dump(buf, rlen, deb_xfer);
+
+ if (buf[63] != 0x4f)
+ deb_info("%s: cmd failed\n", __func__);
}
}
@@ -129,6 +135,29 @@ static int anysee_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val,
return anysee_write_reg(d, reg, val);
}
+/* read single register with mask */
+static int anysee_rd_reg_mask(struct dvb_usb_device *d, u16 reg, u8 *val,
+ u8 mask)
+{
+ int ret, i;
+ u8 tmp;
+
+ ret = anysee_read_reg(d, reg, &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = tmp >> i;
+
+ return 0;
+}
+
static int anysee_get_hw_info(struct dvb_usb_device *d, u8 *id)
{
u8 buf[] = {CMD_GET_HW_INFO};
@@ -156,22 +185,6 @@ static int anysee_ir_ctrl(struct dvb_usb_device *d, u8 onoff)
return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
}
-static int anysee_init(struct dvb_usb_device *d)
-{
- int ret;
- /* LED light */
- ret = anysee_led_ctrl(d, 0x01, 0x03);
- if (ret)
- return ret;
-
- /* enable IR */
- ret = anysee_ir_ctrl(d, 1);
- if (ret)
- return ret;
-
- return 0;
-}
-
/* I2C */
static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int num)
@@ -297,7 +310,7 @@ static struct tda10023_config anysee_tda10023_tda18212_config = {
.pll_m = 12,
.pll_p = 3,
.pll_n = 1,
- .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_C,
+ .output_mode = TDA10023_OUTPUT_MODE_PARALLEL_B,
.deltaf = 0xba02,
};
@@ -309,6 +322,17 @@ static struct tda18212_config anysee_tda18212_config = {
.if_dvbc = 5000,
};
+static struct tda18212_config anysee_tda18212_config2 = {
+ .i2c_address = 0x60 /* (0xc0 >> 1) */,
+ .if_dvbt_6 = 3550,
+ .if_dvbt_7 = 3700,
+ .if_dvbt_8 = 4150,
+ .if_dvbt2_6 = 3250,
+ .if_dvbt2_7 = 4000,
+ .if_dvbt2_8 = 4000,
+ .if_dvbc = 5000,
+};
+
static struct cx24116_config anysee_cx24116_config = {
.demod_address = (0xaa >> 1),
.mpg_clk_pos_pol = 0x00,
@@ -339,6 +363,11 @@ static struct isl6423_config anysee_isl6423_config = {
.addr = (0x10 >> 1),
};
+static struct cxd2820r_config anysee_cxd2820r_config = {
+ .i2c_address = 0x6d, /* (0xda >> 1) */
+ .ts_mode = 0x38,
+};
+
/*
* New USB device strings: Mfr=1, Product=2, SerialNumber=0
* Manufacturer: AMT.CO.KR
@@ -421,6 +450,14 @@ static struct isl6423_config anysee_isl6423_config = {
* IOA[7] TS 1=enabled
* IOE[5] STV0903 1=enabled
*
+ * E7 T2C VID=1c73 PID=861f HW=20 FW=0.1 AMTCI=0.5 "anysee-E7T2C(LP)"
+ * PCB: 508T2C (rev0.3)
+ * parts: DNOQ44QCH106A(CXD2820R, TDA18212), TDA8024
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
+ * IOA[7] TS 1=enabled
+ * IOE[5] CXD2820R 1=enabled
+ *
* E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)"
* PCB: 508PTC (rev0.5)
* parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
@@ -437,7 +474,7 @@ static struct isl6423_config anysee_isl6423_config = {
* IOD[6] ZL10353 1=enabled
* IOE[0] IF 0=enabled
*
- * E7 S2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)"
+ * E7 PS2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)"
* PCB: 508PS2 (rev0.4)
* parts: DNBU10512IST(STV0903, STV6110), ISL6423
* OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
@@ -446,6 +483,16 @@ static struct isl6423_config anysee_isl6423_config = {
* IOE[5] STV0903 1=enabled
*/
+
+/* external I2C gate used for DNOD44CDH086A(TDA18212) tuner module */
+static int anysee_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+
+ /* enable / disable tuner access on IOE[4] */
+ return anysee_wr_reg_mask(adap->dev, REG_IOE, (enable << 4), 0x10);
+}
+
static int anysee_frontend_ctrl(struct dvb_frontend *fe, int onoff)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
@@ -577,7 +624,8 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
/* detect hardware only once */
if (adap->fe_adap[0].fe == NULL) {
/* Check which hardware we have.
- * We must do this call two times to get reliable values (hw bug).
+ * We must do this call two times to get reliable values
+ * (hw/fw bug).
*/
ret = anysee_get_hw_info(adap->dev, hw_info);
if (ret)
@@ -606,14 +654,14 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
break;
/* attach demod */
- adap->fe_adap[0].fe = dvb_attach(mt352_attach, &anysee_mt352_config,
- &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(mt352_attach,
+ &anysee_mt352_config, &adap->dev->i2c_adap);
if (adap->fe_adap[0].fe)
break;
/* attach demod */
- adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &anysee_zl10353_config,
- &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
+ &anysee_zl10353_config, &adap->dev->i2c_adap);
break;
case ANYSEE_HW_507CD: /* 6 */
@@ -665,8 +713,8 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach demod */
- adap->fe_adap[0].fe = dvb_attach(cx24116_attach, &anysee_cx24116_config,
- &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(cx24116_attach,
+ &anysee_cx24116_config, &adap->dev->i2c_adap);
break;
case ANYSEE_HW_507FA: /* 15 */
@@ -747,17 +795,19 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
}
}
+ /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */
+ if (tmp == 0xc7) {
+ if (adap->fe_adap[state->fe_id].fe)
+ adap->fe_adap[state->fe_id].fe->ops.i2c_gate_ctrl =
+ anysee_i2c_gate_ctrl;
+ }
+
break;
case ANYSEE_HW_508TC: /* 18 */
case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
/* E7 PTC */
- /* enable transport stream on IOA[7] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
- if (ret)
- goto error;
-
if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) {
/* disable DVB-T demod on IOD[6] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6),
@@ -772,7 +822,8 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach demod */
- adap->fe_adap[state->fe_id].fe = dvb_attach(tda10023_attach,
+ adap->fe_adap[state->fe_id].fe =
+ dvb_attach(tda10023_attach,
&anysee_tda10023_tda18212_config,
&adap->dev->i2c_adap, 0x48);
} else {
@@ -789,11 +840,19 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
goto error;
/* attach demod */
- adap->fe_adap[state->fe_id].fe = dvb_attach(zl10353_attach,
+ adap->fe_adap[state->fe_id].fe =
+ dvb_attach(zl10353_attach,
&anysee_zl10353_tda18212_config,
&adap->dev->i2c_adap);
}
+ /* I2C gate for DNOD44CDH086A(TDA18212) tuner module */
+ if (adap->fe_adap[state->fe_id].fe)
+ adap->fe_adap[state->fe_id].fe->ops.i2c_gate_ctrl =
+ anysee_i2c_gate_ctrl;
+
+ state->has_ci = true;
+
break;
case ANYSEE_HW_508S2: /* 19 */
case ANYSEE_HW_508PS2: /* 22 */
@@ -803,19 +862,35 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
if (state->fe_id)
break;
- /* enable transport stream on IOA[7] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
+ /* enable DVB-S/S2 demod on IOE[5] */
+ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
- /* enable DVB-S/S2 demod on IOE[5] */
+ /* attach demod */
+ adap->fe_adap[0].fe = dvb_attach(stv0900_attach,
+ &anysee_stv0900_config, &adap->dev->i2c_adap, 0);
+
+ state->has_ci = true;
+
+ break;
+ case ANYSEE_HW_508T2C: /* 20 */
+ /* E7 T2C */
+
+ if (state->fe_id)
+ break;
+
+ /* enable DVB-T/T2/C demod on IOE[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
/* attach demod */
- adap->fe_adap[0].fe = dvb_attach(stv0900_attach, &anysee_stv0900_config,
- &adap->dev->i2c_adap, 0);
+ adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
+ &anysee_cxd2820r_config, &adap->dev->i2c_adap,
+ NULL);
+
+ state->has_ci = true;
break;
}
@@ -842,24 +917,26 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* E30 */
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc2 >> 1),
- NULL, DVB_PLL_THOMSON_DTT7579);
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe,
+ (0xc2 >> 1), NULL, DVB_PLL_THOMSON_DTT7579);
break;
case ANYSEE_HW_507CD: /* 6 */
/* E30 Plus */
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc2 >> 1),
- &adap->dev->i2c_adap, DVB_PLL_THOMSON_DTT7579);
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe,
+ (0xc2 >> 1), &adap->dev->i2c_adap,
+ DVB_PLL_THOMSON_DTT7579);
break;
case ANYSEE_HW_507DC: /* 10 */
/* E30 C Plus */
/* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, (0xc0 >> 1),
- &adap->dev->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A);
+ fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe,
+ (0xc0 >> 1), &adap->dev->i2c_adap,
+ DVB_PLL_SAMSUNG_DTOS403IH102A);
break;
case ANYSEE_HW_507SI: /* 11 */
@@ -877,22 +954,12 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* Try first attach TDA18212 silicon tuner on IOE[4], if that
* fails attach old simple PLL. */
- /* enable tuner on IOE[4] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10);
- if (ret)
- goto error;
-
/* attach tuner */
fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
&adap->dev->i2c_adap, &anysee_tda18212_config);
if (fe)
break;
- /* disable tuner on IOE[4] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 4), 0x10);
- if (ret)
- goto error;
-
/* attach tuner */
fe = dvb_attach(dvb_pll_attach, adap->fe_adap[state->fe_id].fe,
(0xc0 >> 1), &adap->dev->i2c_adap,
@@ -904,11 +971,6 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
/* E7 TC */
/* E7 PTC */
- /* enable tuner on IOE[4] */
- ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10);
- if (ret)
- goto error;
-
/* attach tuner */
fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
&adap->dev->i2c_adap, &anysee_tda18212_config);
@@ -930,6 +992,15 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
}
break;
+
+ case ANYSEE_HW_508T2C: /* 20 */
+ /* E7 T2C */
+
+ /* attach tuner */
+ fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
+ &adap->dev->i2c_adap, &anysee_tda18212_config2);
+
+ break;
default:
fe = NULL;
}
@@ -939,7 +1010,6 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
else
ret = -ENODEV;
-error:
return ret;
}
@@ -969,6 +1039,201 @@ static int anysee_rc_query(struct dvb_usb_device *d)
return 0;
}
+static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot,
+ int addr)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+ u8 buf[] = {CMD_CI, 0x02, 0x40 | addr >> 8, addr & 0xff, 0x00, 1};
+ u8 val;
+
+ ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int anysee_ci_write_attribute_mem(struct dvb_ca_en50221 *ci, int slot,
+ int addr, u8 val)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+ u8 buf[] = {CMD_CI, 0x03, 0x40 | addr >> 8, addr & 0xff, 0x00, 1, val};
+
+ ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int anysee_ci_read_cam_control(struct dvb_ca_en50221 *ci, int slot,
+ u8 addr)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+ u8 buf[] = {CMD_CI, 0x04, 0x40, addr, 0x00, 1};
+ u8 val;
+
+ ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int anysee_ci_write_cam_control(struct dvb_ca_en50221 *ci, int slot,
+ u8 addr, u8 val)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+ u8 buf[] = {CMD_CI, 0x05, 0x40, addr, 0x00, 1, val};
+
+ ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int anysee_ci_slot_reset(struct dvb_ca_en50221 *ci, int slot)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+ struct anysee_state *state = d->priv;
+
+ state->ci_cam_ready = jiffies + msecs_to_jiffies(1000);
+
+ ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80);
+ if (ret)
+ return ret;
+
+ msleep(300);
+
+ ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int anysee_ci_slot_shutdown(struct dvb_ca_en50221 *ci, int slot)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+
+ ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80);
+ if (ret)
+ return ret;
+
+ msleep(30);
+
+ ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int anysee_ci_slot_ts_enable(struct dvb_ca_en50221 *ci, int slot)
+{
+ struct dvb_usb_device *d = ci->data;
+ int ret;
+
+ ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 1), 0x02);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot,
+ int open)
+{
+ struct dvb_usb_device *d = ci->data;
+ struct anysee_state *state = d->priv;
+ int ret;
+ u8 tmp;
+
+ ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40);
+ if (ret)
+ return ret;
+
+ if (tmp == 0) {
+ ret = DVB_CA_EN50221_POLL_CAM_PRESENT;
+ if (time_after(jiffies, state->ci_cam_ready))
+ ret |= DVB_CA_EN50221_POLL_CAM_READY;
+ }
+
+ return ret;
+}
+
+static int anysee_ci_init(struct dvb_usb_device *d)
+{
+ struct anysee_state *state = d->priv;
+ int ret;
+
+ state->ci.owner = THIS_MODULE;
+ state->ci.read_attribute_mem = anysee_ci_read_attribute_mem;
+ state->ci.write_attribute_mem = anysee_ci_write_attribute_mem;
+ state->ci.read_cam_control = anysee_ci_read_cam_control;
+ state->ci.write_cam_control = anysee_ci_write_cam_control;
+ state->ci.slot_reset = anysee_ci_slot_reset;
+ state->ci.slot_shutdown = anysee_ci_slot_shutdown;
+ state->ci.slot_ts_enable = anysee_ci_slot_ts_enable;
+ state->ci.poll_slot_status = anysee_ci_poll_slot_status;
+ state->ci.data = d;
+
+ ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80);
+ if (ret)
+ return ret;
+
+ ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void anysee_ci_release(struct dvb_usb_device *d)
+{
+ struct anysee_state *state = d->priv;
+
+ /* detach CI */
+ if (state->has_ci)
+ dvb_ca_en50221_release(&state->ci);
+
+ return;
+}
+
+static int anysee_init(struct dvb_usb_device *d)
+{
+ struct anysee_state *state = d->priv;
+ int ret;
+
+ /* LED light */
+ ret = anysee_led_ctrl(d, 0x01, 0x03);
+ if (ret)
+ return ret;
+
+ /* enable IR */
+ ret = anysee_ir_ctrl(d, 1);
+ if (ret)
+ return ret;
+
+ /* attach CI */
+ if (state->has_ci) {
+ ret = anysee_ci_init(d);
+ if (ret) {
+ state->has_ci = false;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties anysee_properties;
@@ -1010,6 +1275,16 @@ static int anysee_probe(struct usb_interface *intf,
return anysee_init(d);
}
+static void anysee_disconnect(struct usb_interface *intf)
+{
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+
+ anysee_ci_release(d);
+ dvb_usb_device_exit(intf);
+
+ return;
+}
+
static struct usb_device_id anysee_table[] = {
{ USB_DEVICE(USB_VID_CYPRESS, USB_PID_ANYSEE) },
{ USB_DEVICE(USB_VID_AMT, USB_PID_ANYSEE) },
@@ -1029,7 +1304,7 @@ static struct dvb_usb_device_properties anysee_properties = {
{
.num_frontends = 2,
.frontend_ctrl = anysee_frontend_ctrl,
- .fe = {{
+ .fe = { {
.streaming_ctrl = anysee_streaming_ctrl,
.frontend_attach = anysee_frontend_attach,
.tuner_attach = anysee_tuner_attach,
@@ -1057,7 +1332,7 @@ static struct dvb_usb_device_properties anysee_properties = {
}
}
},
- }},
+ } },
}
},
@@ -1087,7 +1362,7 @@ static struct dvb_usb_device_properties anysee_properties = {
static struct usb_driver anysee_driver = {
.name = "dvb_usb_anysee",
.probe = anysee_probe,
- .disconnect = dvb_usb_device_exit,
+ .disconnect = anysee_disconnect,
.id_table = anysee_table,
};
diff --git a/drivers/media/dvb/dvb-usb/anysee.h b/drivers/media/dvb/dvb-usb/anysee.h
index 57ee500b8c0e..8ac879431540 100644
--- a/drivers/media/dvb/dvb-usb/anysee.h
+++ b/drivers/media/dvb/dvb-usb/anysee.h
@@ -36,6 +36,7 @@
#define DVB_USB_LOG_PREFIX "anysee"
#include "dvb-usb.h"
+#include "dvb_ca_en50221.h"
#define deb_info(args...) dprintk(dvb_usb_anysee_debug, 0x01, args)
#define deb_xfer(args...) dprintk(dvb_usb_anysee_debug, 0x02, args)
@@ -54,12 +55,16 @@ enum cmd {
CMD_GET_IR_CODE = 0x41,
CMD_GET_HW_INFO = 0x19,
CMD_SMARTCARD = 0x34,
+ CMD_CI = 0x37,
};
struct anysee_state {
u8 hw; /* PCB ID */
u8 seq;
u8 fe_id:1; /* frondend ID */
+ u8 has_ci:1;
+ struct dvb_ca_en50221 ci;
+ unsigned long ci_cam_ready; /* jiffies */
};
#define ANYSEE_HW_507T 2 /* E30 */
@@ -69,6 +74,7 @@ struct anysee_state {
#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */
#define ANYSEE_HW_508TC 18 /* E7 TC */
#define ANYSEE_HW_508S2 19 /* E7 S2 */
+#define ANYSEE_HW_508T2C 20 /* E7 T2C */
#define ANYSEE_HW_508PTC 21 /* E7 PTC Plus */
#define ANYSEE_HW_508PS2 22 /* E7 PS2 Plus */
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-fe.c b/drivers/media/dvb/dvb-usb/cinergyT2-fe.c
index 9cd51ac12076..8a57ed8272de 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-fe.c
@@ -40,9 +40,8 @@
* We replace errornous fields by default TPS fields (the ones with value 0).
*/
-static uint16_t compute_tps(struct dvb_frontend_parameters *p)
+static uint16_t compute_tps(struct dtv_frontend_properties *op)
{
- struct dvb_ofdm_parameters *op = &p->u.ofdm;
uint16_t tps = 0;
switch (op->code_rate_HP) {
@@ -83,7 +82,7 @@ static uint16_t compute_tps(struct dvb_frontend_parameters *p)
/* tps |= (0 << 4) */;
}
- switch (op->constellation) {
+ switch (op->modulation) {
case QAM_16:
tps |= (1 << 13);
break;
@@ -119,7 +118,7 @@ static uint16_t compute_tps(struct dvb_frontend_parameters *p)
/* tps |= (0 << 2) */;
}
- switch (op->hierarchy_information) {
+ switch (op->hierarchy) {
case HIERARCHY_1:
tps |= (1 << 10);
break;
@@ -263,9 +262,9 @@ static int cinergyt2_fe_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct cinergyt2_fe_state *state = fe->demodulator_priv;
struct dvbt_set_parameters_msg param;
char result[2];
@@ -274,9 +273,20 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe,
param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
param.tps = cpu_to_le16(compute_tps(fep));
param.freq = cpu_to_le32(fep->frequency / 1000);
- param.bandwidth = 8 - fep->u.ofdm.bandwidth - BANDWIDTH_8_MHZ;
param.flags = 0;
+ switch (fep->bandwidth_hz) {
+ case 8000000:
+ param.bandwidth = 0;
+ break;
+ case 7000000:
+ param.bandwidth = 1;
+ break;
+ case 6000000:
+ param.bandwidth = 2;
+ break;
+ }
+
err = dvb_usb_generic_rw(state->d,
(char *)&param, sizeof(param),
result, sizeof(result), 0);
@@ -286,12 +296,6 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe,
return (err < 0) ? err : 0;
}
-static int cinergyt2_fe_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
-{
- return 0;
-}
-
static void cinergyt2_fe_release(struct dvb_frontend *fe)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
@@ -316,9 +320,9 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
static struct dvb_frontend_ops cinergyt2_fe_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = DRIVER_NAME,
- .type = FE_OFDM,
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 166667,
@@ -341,7 +345,6 @@ static struct dvb_frontend_ops cinergyt2_fe_ops = {
.sleep = cinergyt2_fe_sleep,
.set_frontend = cinergyt2_fe_set_frontend,
- .get_frontend = cinergyt2_fe_get_frontend,
.get_tune_settings = cinergyt2_fe_get_tune_settings,
.read_status = cinergyt2_fe_read_status,
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 949ea1bc0aae..3940bb0f9ef6 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -1067,18 +1067,17 @@ static struct dib0070_config dib7070p_dib0070_config = {
};
struct dib0700_adapter_state {
- int (*set_param_save) (struct dvb_frontend *,
- struct dvb_frontend_parameters *);
+ int (*set_param_save) (struct dvb_frontend *);
};
-static int dib7070_set_param_override(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int dib7070_set_param_override(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dib0700_adapter_state *state = adap->priv;
u16 offset;
- u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
switch (band) {
case BAND_VHF: offset = 950; break;
default:
@@ -1087,7 +1086,7 @@ static int dib7070_set_param_override(struct dvb_frontend *fe,
dib7000p_set_wbd_ref(fe, offset + dib0070_wbd_offset(fe));
- return state->set_param_save(fe, fep);
+ return state->set_param_save(fe);
}
static int cxusb_dualdig4_rev2_tuner_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 9bd6d51b3b93..7de125c0b36f 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,6 +48,8 @@ struct dib0700_state {
u8 disable_streaming_master_mode;
u32 fw_version;
u32 nb_packet_buffer_size;
+ int (*read_status)(struct dvb_frontend *, fe_status_t *);
+ int (*sleep)(struct dvb_frontend* fe);
u8 buf[255];
};
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 206999476f02..070e82aa53f5 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
module_usb_driver(dib0700_driver);
+MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
MODULE_VERSION("1.0");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f313182eb9d5..f9e966aa26e7 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
"if applicable for the device (default: 0=automatic/off).");
struct dib0700_adapter_state {
- int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
+ int (*set_param_save) (struct dvb_frontend *);
const struct firmware *frontend_firmware;
};
@@ -804,13 +804,14 @@ static struct dib0070_config dib7770p_dib0070_config = {
.charge_pump = 2,
};
-static int dib7070_set_param_override(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib7070_set_param_override(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dib0700_adapter_state *state = adap->priv;
u16 offset;
- u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
switch (band) {
case BAND_VHF: offset = 950; break;
case BAND_UHF:
@@ -818,17 +819,17 @@ static int dib7070_set_param_override(struct dvb_frontend *fe, struct dvb_fronte
}
deb_info("WBD for DiB7000P: %d\n", offset + dib0070_wbd_offset(fe));
dib7000p_set_wbd_ref(fe, offset + dib0070_wbd_offset(fe));
- return state->set_param_save(fe, fep);
+ return state->set_param_save(fe);
}
-static int dib7770_set_param_override(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int dib7770_set_param_override(struct dvb_frontend *fe)
{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
- struct dib0700_adapter_state *state = adap->priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dib0700_adapter_state *state = adap->priv;
u16 offset;
- u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
switch (band) {
case BAND_VHF:
dib7000p_set_gpio(fe, 0, 0, 1);
@@ -842,7 +843,7 @@ static int dib7770_set_param_override(struct dvb_frontend *fe,
}
deb_info("WBD for DiB7000P: %d\n", offset + dib0070_wbd_offset(fe));
dib7000p_set_wbd_ref(fe, offset + dib0070_wbd_offset(fe));
- return state->set_param_save(fe, fep);
+ return state->set_param_save(fe);
}
static int dib7770p_tuner_attach(struct dvb_usb_adapter *adap)
@@ -1205,14 +1206,14 @@ static struct dib0070_config dib807x_dib0070_config[2] = {
}
};
-static int dib807x_set_param_override(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int dib807x_set_param_override(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dib0700_adapter_state *state = adap->priv;
u16 offset = dib0070_wbd_offset(fe);
- u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
switch (band) {
case BAND_VHF:
offset += 750;
@@ -1224,7 +1225,7 @@ static int dib807x_set_param_override(struct dvb_frontend *fe,
deb_info("WBD for DiB8000: %d\n", offset);
dib8000_set_wbd_ref(fe, offset);
- return state->set_param_save(fe, fep);
+ return state->set_param_save(fe);
}
static int dib807x_tuner_attach(struct dvb_usb_adapter *adap)
@@ -1279,7 +1280,7 @@ static int stk807x_frontend_attach(struct dvb_usb_adapter *adap)
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
- 0x80);
+ 0x80, 0);
adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
&dib807x_dib8000_config[0]);
@@ -1308,7 +1309,7 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
/* initialize IC 0 */
- dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80);
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80, 0);
adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
&dib807x_dib8000_config[0]);
@@ -1319,7 +1320,7 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
{
/* initialize IC 1 */
- dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82);
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82, 0);
adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82,
&dib807x_dib8000_config[1]);
@@ -1328,7 +1329,7 @@ static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
}
/* STK8096GP */
-struct dibx000_agc_config dib8090_agc_config[2] = {
+static struct dibx000_agc_config dib8090_agc_config[2] = {
{
BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
@@ -1503,22 +1504,22 @@ static struct dib0090_config dib809x_dib0090_config = {
.fref_clock_ratio = 6,
};
-static int dib8096_set_param_override(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int dib8096_set_param_override(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dib0700_adapter_state *state = adap->priv;
- u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u8 band = BAND_OF_FREQUENCY(p->frequency/1000);
u16 target;
int ret = 0;
enum frontend_tune_state tune_state = CT_SHUTDOWN;
u16 ltgain, rf_gain_limit;
- ret = state->set_param_save(fe, fep);
+ ret = state->set_param_save(fe);
if (ret < 0)
return ret;
- target = (dib0090_get_wbd_offset(fe) * 8 * 18 / 33 + 1) / 2;
+ target = (dib0090_get_wbd_target(fe) * 8 * 18 / 33 + 1) / 2;
dib8000_set_wbd_ref(fe, target);
@@ -1578,7 +1579,7 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
msleep(10);
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
- dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80);
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80, 0);
adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
@@ -1629,7 +1630,7 @@ static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap)
msleep(20);
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
- dib8000_i2c_enumeration(&adap->dev->i2c_adap, 2, 18, 0x80);
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 2, 18, 0x80, 0);
adap->fe_adap[0].fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]);
if (adap->fe_adap[0].fe == NULL)
@@ -1641,6 +1642,261 @@ static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap)
return fe_slave == NULL ? -ENODEV : 0;
}
+/* TFE8096P */
+static struct dibx000_agc_config dib8096p_agc_config[2] = {
+ {
+ .band_caps = BAND_UHF,
+ /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0,
+ P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0,
+ P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
+ P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5,
+ P_agc_write=0 */
+ .setup = (0 << 15) | (0 << 14) | (5 << 11)
+ | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5)
+ | (0 << 4) | (5 << 1) | (0 << 0),
+
+ .inv_gain = 684,
+ .time_stabiliz = 10,
+
+ .alpha_level = 0,
+ .thlock = 118,
+
+ .wbd_inv = 0,
+ .wbd_ref = 1200,
+ .wbd_sel = 3,
+ .wbd_alpha = 5,
+
+ .agc1_max = 65535,
+ .agc1_min = 0,
+
+ .agc2_max = 32767,
+ .agc2_min = 0,
+
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 0,
+ .agc1_pt3 = 105,
+ .agc1_slope1 = 0,
+ .agc1_slope2 = 156,
+ .agc2_pt1 = 105,
+ .agc2_pt2 = 255,
+ .agc2_slope1 = 54,
+ .agc2_slope2 = 0,
+
+ .alpha_mant = 28,
+ .alpha_exp = 26,
+ .beta_mant = 31,
+ .beta_exp = 51,
+
+ .perform_agc_softsplit = 0,
+ } , {
+ .band_caps = BAND_FM | BAND_VHF | BAND_CBAND,
+ /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0,
+ P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0,
+ P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
+ P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5,
+ P_agc_write=0 */
+ .setup = (0 << 15) | (0 << 14) | (5 << 11)
+ | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5)
+ | (0 << 4) | (5 << 1) | (0 << 0),
+
+ .inv_gain = 732,
+ .time_stabiliz = 10,
+
+ .alpha_level = 0,
+ .thlock = 118,
+
+ .wbd_inv = 0,
+ .wbd_ref = 1200,
+ .wbd_sel = 3,
+ .wbd_alpha = 5,
+
+ .agc1_max = 65535,
+ .agc1_min = 0,
+
+ .agc2_max = 32767,
+ .agc2_min = 0,
+
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 0,
+ .agc1_pt3 = 98,
+ .agc1_slope1 = 0,
+ .agc1_slope2 = 167,
+ .agc2_pt1 = 98,
+ .agc2_pt2 = 255,
+ .agc2_slope1 = 52,
+ .agc2_slope2 = 0,
+
+ .alpha_mant = 28,
+ .alpha_exp = 26,
+ .beta_mant = 31,
+ .beta_exp = 51,
+
+ .perform_agc_softsplit = 0,
+ }
+};
+
+static struct dibx000_bandwidth_config dib8096p_clock_config_12_mhz = {
+ 108000, 13500,
+ 1, 9, 1, 0, 0,
+ 0, 0, 0, 0, 2,
+ (3 << 14) | (1 << 12) | (524 << 0),
+ (0 << 25) | 0,
+ 20199729,
+ 12000000,
+};
+
+static struct dib8000_config tfe8096p_dib8000_config = {
+ .output_mpeg2_in_188_bytes = 1,
+ .hostbus_diversity = 1,
+ .update_lna = NULL,
+
+ .agc_config_count = 2,
+ .agc = dib8096p_agc_config,
+ .pll = &dib8096p_clock_config_12_mhz,
+
+ .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB8000_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS,
+
+ .agc_control = NULL,
+ .diversity_delay = 48,
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .enMpegOutput = 1,
+};
+
+static struct dib0090_wbd_slope dib8096p_wbd_table[] = {
+ { 380, 81, 850, 64, 540, 4},
+ { 860, 51, 866, 21, 375, 4},
+ {1700, 0, 250, 0, 100, 6},
+ {2600, 0, 250, 0, 100, 6},
+ { 0xFFFF, 0, 0, 0, 0, 0},
+};
+
+static const struct dib0090_config tfe8096p_dib0090_config = {
+ .io.clock_khz = 12000,
+ .io.pll_bypass = 0,
+ .io.pll_range = 0,
+ .io.pll_prediv = 3,
+ .io.pll_loopdiv = 6,
+ .io.adc_clock_ratio = 0,
+ .io.pll_int_loop_filt = 0,
+ .reset = dib8096p_tuner_sleep,
+ .sleep = dib8096p_tuner_sleep,
+
+ .freq_offset_khz_uhf = -143,
+ .freq_offset_khz_vhf = -143,
+
+ .get_adc_power = dib8090_get_adc_power,
+
+ .clkouttobamse = 1,
+ .analog_output = 0,
+
+ .wbd_vhf_offset = 0,
+ .wbd_cband_offset = 0,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 0,
+
+ .fref_clock_ratio = 1,
+
+ .wbd = dib8096p_wbd_table,
+
+ .ls_cfg_pad_drv = 0,
+ .data_tx_drv = 0,
+ .low_if = NULL,
+ .in_soc = 1,
+ .force_cband_input = 0,
+};
+
+struct dibx090p_adc {
+ u32 freq; /* RF freq MHz */
+ u32 timf; /* New Timf */
+ u32 pll_loopdiv; /* New prediv */
+ u32 pll_prediv; /* New loopdiv */
+};
+
+struct dibx090p_adc dib8090p_adc_tab[] = {
+ { 50000, 17043521, 16, 3}, /* 64 MHz */
+ {878000, 20199729, 9, 1}, /* 60 MHz */
+ {0xffffffff, 0, 0, 0}, /* 60 MHz */
+};
+
+static int dib8096p_agc_startup(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dib0700_adapter_state *state = adap->priv;
+ struct dibx000_bandwidth_config pll;
+ u16 target;
+ int better_sampling_freq = 0, ret;
+ struct dibx090p_adc *adc_table = &dib8090p_adc_tab[0];
+
+ ret = state->set_param_save(fe);
+ if (ret < 0)
+ return ret;
+ memset(&pll, 0, sizeof(struct dibx000_bandwidth_config));
+
+ dib0090_pwm_gain_reset(fe);
+ /* dib0090_get_wbd_target is returning any possible
+ temperature compensated wbd-target */
+ target = (dib0090_get_wbd_target(fe) * 8 + 1) / 2;
+ dib8000_set_wbd_ref(fe, target);
+
+
+ while (p->frequency / 1000 > adc_table->freq) {
+ better_sampling_freq = 1;
+ adc_table++;
+ }
+
+ if ((adc_table->freq != 0xffffffff) && better_sampling_freq) {
+ pll.pll_ratio = adc_table->pll_loopdiv;
+ pll.pll_prediv = adc_table->pll_prediv;
+ dib8000_update_pll(fe, &pll);
+ dib8000_ctrl_timf(fe, DEMOD_TIMF_SET, adc_table->timf);
+ }
+ return 0;
+}
+
+static int tfe8096p_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x80, 1);
+
+ adap->fe_adap[0].fe = dvb_attach(dib8000_attach,
+ &adap->dev->i2c_adap, 0x80, &tfe8096p_dib8000_config);
+
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
+}
+
+static int tfe8096p_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c = dib8096p_get_i2c_tuner(adap->fe_adap[0].fe);
+
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c,
+ &tfe8096p_dib0090_config) == NULL)
+ return -ENODEV;
+
+ dib8000_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
+
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096p_agc_startup;
+ return 0;
+}
+
/* STK9090M */
static int dib90x0_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
{
@@ -1883,7 +2139,7 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
return -ENODEV;
- dib0700_set_i2c_speed(adap->dev, 2000);
+ dib0700_set_i2c_speed(adap->dev, 1500);
if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0)
return -ENODEV;
release_firmware(state->frontend_firmware);
@@ -1962,7 +2218,8 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
return -ENODEV;
- dib0700_set_i2c_speed(adap->dev, 2000);
+
+ dib0700_set_i2c_speed(adap->dev, 1500);
if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0)
return -ENODEV;
@@ -1975,7 +2232,7 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
if (dvb_attach(dib0090_fw_register, fe_slave, i2c, &nim9090md_dib0090_config[1]) == NULL)
return -ENODEV;
fe_slave->dvb = adap->fe_adap[0].fe->dvb;
- dib9000_fw_set_component_bus_speed(adap->fe_adap[0].fe, 2000);
+ dib9000_fw_set_component_bus_speed(adap->fe_adap[0].fe, 1500);
if (dib9000_firmware_post_pll_init(fe_slave) < 0)
return -ENODEV;
}
@@ -2064,7 +2321,7 @@ static int dib7090p_get_best_sampling(struct dvb_frontend *fe , struct dib7090p_
return 0;
}
-static int dib7090_agc_startup(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib7090_agc_startup(struct dvb_frontend *fe)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dib0700_adapter_state *state = adap->priv;
@@ -2073,13 +2330,13 @@ static int dib7090_agc_startup(struct dvb_frontend *fe, struct dvb_frontend_para
struct dib7090p_best_adc adc;
int ret;
- ret = state->set_param_save(fe, fep);
+ ret = state->set_param_save(fe);
if (ret < 0)
return ret;
memset(&pll, 0, sizeof(struct dibx000_bandwidth_config));
dib0090_pwm_gain_reset(fe);
- target = (dib0090_get_wbd_offset(fe) * 8 + 1) / 2;
+ target = (dib0090_get_wbd_target(fe) * 8 + 1) / 2;
dib7000p_set_wbd_ref(fe, target);
if (dib7090p_get_best_sampling(fe, &adc) == 0) {
@@ -2092,6 +2349,49 @@ static int dib7090_agc_startup(struct dvb_frontend *fe, struct dvb_frontend_para
return 0;
}
+static int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart)
+{
+ deb_info("AGC restart callback: %d", restart);
+ if (restart == 0) /* before AGC startup */
+ dib0090_set_dc_servo(fe, 1);
+ return 0;
+}
+
+static int dib7090e_update_lna(struct dvb_frontend *fe, u16 agc_global)
+{
+ u16 agc1 = 0, agc2, wbd = 0, wbd_target, wbd_offset, threshold_agc1;
+ s16 wbd_delta;
+
+ if ((fe->dtv_property_cache.frequency) < 400000000)
+ threshold_agc1 = 25000;
+ else
+ threshold_agc1 = 30000;
+
+ wbd_target = (dib0090_get_wbd_target(fe)*8+1)/2;
+ wbd_offset = dib0090_get_wbd_offset(fe);
+ dib7000p_get_agc_values(fe, NULL, &agc1, &agc2, &wbd);
+ wbd_delta = (s16)wbd - (((s16)wbd_offset+10)*4) ;
+
+ deb_info("update lna, agc_global=%d agc1=%d agc2=%d",
+ agc_global, agc1, agc2);
+ deb_info("update lna, wbd=%d wbd target=%d wbd offset=%d wbd delta=%d",
+ wbd, wbd_target, wbd_offset, wbd_delta);
+
+ if ((agc1 < threshold_agc1) && (wbd_delta > 0)) {
+ dib0090_set_switch(fe, 1, 1, 1);
+ dib0090_set_vga(fe, 0);
+ dib0090_update_rframp_7090(fe, 0);
+ dib0090_update_tuning_table_7090(fe, 0);
+ } else {
+ dib0090_set_vga(fe, 1);
+ dib0090_update_rframp_7090(fe, 1);
+ dib0090_update_tuning_table_7090(fe, 1);
+ dib0090_set_switch(fe, 0, 0, 0);
+ }
+
+ return 0;
+}
+
static struct dib0090_wbd_slope dib7090_wbd_table[] = {
{ 380, 81, 850, 64, 540, 4},
{ 860, 51, 866, 21, 375, 4},
@@ -2100,7 +2400,16 @@ static struct dib0090_wbd_slope dib7090_wbd_table[] = {
{ 0xFFFF, 0, 0, 0, 0, 0},
};
-struct dibx000_agc_config dib7090_agc_config[2] = {
+static struct dib0090_wbd_slope dib7090e_wbd_table[] = {
+ { 380, 81, 850, 64, 540, 4},
+ { 700, 51, 866, 21, 320, 4},
+ { 860, 48, 666, 18, 330, 6},
+ {1700, 0, 250, 0, 100, 6},
+ {2600, 0, 250, 0, 100, 6},
+ { 0xFFFF, 0, 0, 0, 0, 0},
+};
+
+static struct dibx000_agc_config dib7090_agc_config[2] = {
{
.band_caps = BAND_UHF,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
@@ -2278,6 +2587,34 @@ static struct dib7000p_config tfe7090pvr_dib7000p_config[2] = {
}
};
+static struct dib7000p_config tfe7090e_dib7000p_config = {
+ .output_mpeg2_in_188_bytes = 1,
+ .hostbus_diversity = 1,
+ .tuner_is_baseband = 1,
+ .update_lna = dib7090e_update_lna,
+
+ .agc_config_count = 2,
+ .agc = dib7090_agc_config,
+
+ .bw = &dib7090_clock_config_12_mhz,
+
+ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+ .pwm_freq_div = 0,
+
+ .agc_control = dib7090_agc_restart,
+
+ .spur_protect = 0,
+ .disable_sample_and_hold = 0,
+ .enable_current_mirror = 0,
+ .diversity_delay = 0,
+
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .enMpegOutput = 1,
+};
+
static const struct dib0090_config nim7090_dib0090_config = {
.io.clock_khz = 12000,
.io.pll_bypass = 0,
@@ -2312,6 +2649,107 @@ static const struct dib0090_config nim7090_dib0090_config = {
.in_soc = 1,
};
+static const struct dib0090_config tfe7090e_dib0090_config = {
+ .io.clock_khz = 12000,
+ .io.pll_bypass = 0,
+ .io.pll_range = 0,
+ .io.pll_prediv = 3,
+ .io.pll_loopdiv = 6,
+ .io.adc_clock_ratio = 0,
+ .io.pll_int_loop_filt = 0,
+ .reset = dib7090_tuner_sleep,
+ .sleep = dib7090_tuner_sleep,
+
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = 0,
+
+ .get_adc_power = dib7090_get_adc_power,
+
+ .clkouttobamse = 1,
+ .analog_output = 0,
+
+ .wbd_vhf_offset = 0,
+ .wbd_cband_offset = 0,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 0,
+
+ .fref_clock_ratio = 0,
+
+ .wbd = dib7090e_wbd_table,
+
+ .ls_cfg_pad_drv = 0,
+ .data_tx_drv = 0,
+ .low_if = NULL,
+ .in_soc = 1,
+ .force_cband_input = 1,
+ .is_dib7090e = 1,
+};
+
+static struct dib7000p_config tfe7790e_dib7000p_config = {
+ .output_mpeg2_in_188_bytes = 1,
+ .hostbus_diversity = 1,
+ .tuner_is_baseband = 1,
+ .update_lna = dib7090e_update_lna,
+
+ .agc_config_count = 2,
+ .agc = dib7090_agc_config,
+
+ .bw = &dib7090_clock_config_12_mhz,
+
+ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+ .pwm_freq_div = 0,
+
+ .agc_control = dib7090_agc_restart,
+
+ .spur_protect = 0,
+ .disable_sample_and_hold = 0,
+ .enable_current_mirror = 0,
+ .diversity_delay = 0,
+
+ .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK,
+ .enMpegOutput = 1,
+};
+
+static const struct dib0090_config tfe7790e_dib0090_config = {
+ .io.clock_khz = 12000,
+ .io.pll_bypass = 0,
+ .io.pll_range = 0,
+ .io.pll_prediv = 3,
+ .io.pll_loopdiv = 6,
+ .io.adc_clock_ratio = 0,
+ .io.pll_int_loop_filt = 0,
+ .reset = dib7090_tuner_sleep,
+ .sleep = dib7090_tuner_sleep,
+
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = 0,
+
+ .get_adc_power = dib7090_get_adc_power,
+
+ .clkouttobamse = 1,
+ .analog_output = 0,
+
+ .wbd_vhf_offset = 0,
+ .wbd_cband_offset = 0,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 0,
+
+ .fref_clock_ratio = 0,
+
+ .wbd = dib7090e_wbd_table,
+
+ .ls_cfg_pad_drv = 0,
+ .data_tx_drv = 0,
+ .low_if = NULL,
+ .in_soc = 1,
+ .force_cband_input = 1,
+ .is_dib7090e = 1,
+ .force_crystal_mode = 1,
+};
+
static const struct dib0090_config tfe7090pvr_dib0090_config[2] = {
{
.io.clock_khz = 12000,
@@ -2504,6 +2942,97 @@ static int tfe7090pvr_tuner1_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int tfe7090e_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap,
+ 1, 0x10, &tfe7090e_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
+ __func__);
+ return -ENODEV;
+ }
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap,
+ 0x80, &tfe7090e_dib7000p_config);
+
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
+}
+
+static int tfe7790e_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_state *st = adap->dev->priv;
+
+ /* The TFE7790E requires the dib0700 to not be in master mode */
+ st->disable_streaming_master_mode = 1;
+
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+ msleep(20);
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(20);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap,
+ 1, 0x10, &tfe7790e_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
+ __func__);
+ return -ENODEV;
+ }
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap,
+ 0x80, &tfe7790e_dib7000p_config);
+
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
+}
+
+static int tfe7790e_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c =
+ dib7090_get_i2c_tuner(adap->fe_adap[0].fe);
+
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c,
+ &tfe7790e_dib0090_config) == NULL)
+ return -ENODEV;
+
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
+
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ return 0;
+}
+
+static int tfe7090e_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c =
+ dib7090_get_i2c_tuner(adap->fe_adap[0].fe);
+
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c,
+ &tfe7090e_dib0090_config) == NULL)
+ return -ENODEV;
+
+ dib7000p_set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
+
+ st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
+ adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup;
+ return 0;
+}
+
/* STK7070PD */
static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
{
@@ -2537,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
}
};
-static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+static void stk7070pd_init(struct dvb_usb_device *dev)
{
- dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
msleep(10);
- dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
- dib0700_ctrl_clock(adap->dev, 72, 1);
+ dib0700_ctrl_clock(dev, 72, 1);
msleep(10);
- dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
+}
+
+static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+{
+ stk7070pd_init(adap->dev);
+
msleep(10);
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
@@ -2570,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
+static int novatd_read_status_override(struct dvb_frontend *fe,
+ fe_status_t *stat)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *state = dev->priv;
+ int ret;
+
+ ret = state->read_status(fe, stat);
+
+ if (!ret)
+ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
+ !!(*stat & FE_HAS_LOCK));
+
+ return ret;
+}
+
+static int novatd_sleep_override(struct dvb_frontend* fe)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *state = dev->priv;
+
+ /* turn off LED */
+ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
+
+ return state->sleep(fe);
+}
+
+/**
+ * novatd_frontend_attach - Nova-TD specific attach
+ *
+ * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
+ * information purposes.
+ */
+static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *st = dev->priv;
+
+ if (adap->id == 0) {
+ stk7070pd_init(dev);
+
+ /* turn the power LED on, the other two off (just in case) */
+ dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
+ stk7070pd_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
+ adap->id == 0 ? 0x80 : 0x82,
+ &stk7070pd_dib7000p_config[adap->id]);
+
+ if (adap->fe_adap[0].fe == NULL)
+ return -ENODEV;
+
+ st->read_status = adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
+ st->sleep = adap->fe_adap[0].fe->ops.sleep;
+ adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
+
+ return 0;
+}
+
/* S5H1411 */
static struct s5h1411_config pinnacle_801e_config = {
.output_mode = S5H1411_PARALLEL_OUTPUT,
@@ -2960,6 +3566,9 @@ struct usb_device_id dib0700_usb_id_table[] = {
/* 75 */{ USB_DEVICE(USB_VID_MEDION, USB_PID_CREATIX_CTX1921) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E_SE) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090E) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7790E) },
+/* 80 */{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE8096P) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3338,6 +3947,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
.pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = novatd_frontend_attach,
+ .tuner_attach = dib7070p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ }, {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = novatd_frontend_attach,
+ .tuner_attach = dib7070p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ }},
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge Nova-TD Stick (52009)",
+ { &dib0700_usb_id_table[35], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
.frontend_attach = stk7070pd_frontend_attach0,
.tuner_attach = dib7070p_tuner_attach,
@@ -3360,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 6,
+ .num_device_descs = 5,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -3370,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[18], NULL },
{ NULL },
},
- { "Hauppauge Nova-TD Stick (52009)",
- { &dib0700_usb_id_table[35], NULL },
- { NULL },
- },
{ "Hauppauge Nova-TD-500 (84xxx)",
{ &dib0700_usb_id_table[36], NULL },
{ NULL },
@@ -4025,6 +4681,127 @@ struct dvb_usb_device_properties dib0700_devices[] = {
RC_TYPE_NEC,
.change_protocol = dib0700_change_protocol,
},
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = tfe7090e_frontend_attach,
+ .tuner_attach = tfe7090e_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ } },
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom TFE7090E reference design",
+ { &dib0700_usb_id_table[78], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = tfe7790e_frontend_attach,
+ .tuner_attach = tfe7790e_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ } },
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom TFE7790E reference design",
+ { &dib0700_usb_id_table[79], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+ .frontend_attach = tfe8096p_frontend_attach,
+ .tuner_attach = tfe8096p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ } },
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom TFE8096P reference design",
+ { &dib0700_usb_id_table[80], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
},
};
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 0a9a79820f26..ff34419a4c88 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -118,12 +118,12 @@ static struct mt352_config digitv_mt352_config = {
.demod_init = digitv_mt352_demod_init,
};
-static int digitv_nxt6000_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int digitv_nxt6000_tuner_set_params(struct dvb_frontend *fe)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
u8 b[5];
- fe->ops.tuner_ops.calc_regs(fe, fep, b, sizeof(b));
+ fe->ops.tuner_ops.calc_regs(fe, b, sizeof(b));
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0);
diff --git a/drivers/media/dvb/dvb-usb/dtt200u-fe.c b/drivers/media/dvb/dvb-usb/dtt200u-fe.c
index 17413adec7a1..3d81daa49172 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u-fe.c
@@ -16,7 +16,7 @@ struct dtt200u_fe_state {
fe_status_t stat;
- struct dvb_frontend_parameters fep;
+ struct dtv_frontend_properties fep;
struct dvb_frontend frontend;
};
@@ -100,22 +100,27 @@ static int dtt200u_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_fron
return 0;
}
-static int dtt200u_fe_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
int i;
fe_status_t st;
u16 freq = fep->frequency / 250000;
u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
- switch (fep->u.ofdm.bandwidth) {
- case BANDWIDTH_8_MHZ: bwbuf[1] = 8; break;
- case BANDWIDTH_7_MHZ: bwbuf[1] = 7; break;
- case BANDWIDTH_6_MHZ: bwbuf[1] = 6; break;
- case BANDWIDTH_AUTO: return -EOPNOTSUPP;
- default:
- return -EINVAL;
+ switch (fep->bandwidth_hz) {
+ case 8000000:
+ bwbuf[1] = 8;
+ break;
+ case 7000000:
+ bwbuf[1] = 7;
+ break;
+ case 6000000:
+ bwbuf[1] = 6;
+ break;
+ default:
+ return -EINVAL;
}
dvb_usb_generic_write(state->d,bwbuf,2);
@@ -134,11 +139,11 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend* fe,
return 0;
}
-static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dtt200u_fe_get_frontend(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
- memcpy(fep,&state->fep,sizeof(struct dvb_frontend_parameters));
+ memcpy(fep, &state->fep, sizeof(struct dtv_frontend_properties));
return 0;
}
@@ -172,9 +177,9 @@ error:
}
static struct dvb_frontend_ops dtt200u_fe_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "WideView USB DVB-T",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 250000,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c b/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
index ba4a7517354f..ddf282f355b3 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
@@ -141,11 +141,17 @@ int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums)
goto err_dmx_dev;
}
- dvb_net_init(&adap->dvb_adap, &adap->dvb_net, &adap->demux.dmx);
+ if ((ret = dvb_net_init(&adap->dvb_adap, &adap->dvb_net,
+ &adap->demux.dmx)) < 0) {
+ err("dvb_net_init failed: error %d",ret);
+ goto err_net_init;
+ }
adap->state |= DVB_USB_ADAP_STATE_DVB;
return 0;
+err_net_init:
+ dvb_dmxdev_release(&adap->dmxdev);
err_dmx_dev:
dvb_dmx_release(&adap->demux);
err_dmx:
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 2d08c9b5128a..d390ddaa5a53 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -109,10 +109,13 @@
#define USB_PID_DIBCOM_STK807XPVR 0x1f98
#define USB_PID_DIBCOM_STK8096GP 0x1fa0
#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
+#define USB_PID_DIBCOM_TFE8096P 0x1f9C
#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
#define USB_PID_DIBCOM_STK7770P 0x1e80
#define USB_PID_DIBCOM_NIM7090 0x1bb2
#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4
+#define USB_PID_DIBCOM_TFE7090E 0x1bb7
+#define USB_PID_DIBCOM_TFE7790E 0x1e6e
#define USB_PID_DIBCOM_NIM9090M 0x2383
#define USB_PID_DIBCOM_NIM9090MD 0x2384
#define USB_PID_DPOSH_M9206_COLD 0x9206
@@ -128,6 +131,8 @@
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
#define USB_PID_INTEL_CE9500 0x9500
#define USB_PID_ITETECH_IT9135 0x9135
+#define USB_PID_ITETECH_IT9135_9005 0x9005
+#define USB_PID_ITETECH_IT9135_9006 0x9006
#define USB_PID_KWORLD_399U 0xe399
#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_395U 0xe396
@@ -322,6 +327,7 @@
#define USB_PID_TVWAY_PLUS 0x0002
#define USB_PID_SVEON_STV20 0xe39d
#define USB_PID_SVEON_STV22 0xe401
+#define USB_PID_SVEON_STV22_IT9137 0xe411
#define USB_PID_AZUREWAVE_AZ6027 0x3275
#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index ff941d20e6b7..451c5a7adfb2 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1435,22 +1435,40 @@ static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
+enum dw2102_table_entry {
+ CYPRESS_DW2102,
+ CYPRESS_DW2101,
+ CYPRESS_DW2104,
+ TEVII_S650,
+ TERRATEC_CINERGY_S,
+ CYPRESS_DW3101,
+ TEVII_S630,
+ PROF_1100,
+ TEVII_S660,
+ PROF_7500,
+ GENIATECH_SU3000,
+ TERRATEC_CINERGY_S2,
+ TEVII_S480_1,
+ TEVII_S480_2,
+ X3M_SPC1400HD,
+};
+
static struct usb_device_id dw2102_table[] = {
- {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)},
- {USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
- {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)},
- {USB_DEVICE(0x9022, USB_PID_TEVII_S650)},
- {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
- {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
- {USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
- {USB_DEVICE(0x3011, USB_PID_PROF_1100)},
- {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
- {USB_DEVICE(0x3034, 0x7500)},
- {USB_DEVICE(0x1f4d, 0x3000)},
- {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)},
- {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
- {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
- {USB_DEVICE(0x1f4d, 0x3100)},
+ [CYPRESS_DW2102] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)},
+ [CYPRESS_DW2101] = {USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
+ [CYPRESS_DW2104] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)},
+ [TEVII_S650] = {USB_DEVICE(0x9022, USB_PID_TEVII_S650)},
+ [TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
+ [CYPRESS_DW3101] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
+ [TEVII_S630] = {USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
+ [PROF_1100] = {USB_DEVICE(0x3011, USB_PID_PROF_1100)},
+ [TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
+ [PROF_7500] = {USB_DEVICE(0x3034, 0x7500)},
+ [GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)},
+ [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)},
+ [TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
+ [TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
+ [X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
{ }
};
@@ -1610,15 +1628,15 @@ static struct dvb_usb_device_properties dw2102_properties = {
.num_device_descs = 3,
.devices = {
{"DVBWorld DVB-S 2102 USB2.0",
- {&dw2102_table[0], NULL},
+ {&dw2102_table[CYPRESS_DW2102], NULL},
{NULL},
},
{"DVBWorld DVB-S 2101 USB2.0",
- {&dw2102_table[1], NULL},
+ {&dw2102_table[CYPRESS_DW2101], NULL},
{NULL},
},
{"TerraTec Cinergy S USB",
- {&dw2102_table[4], NULL},
+ {&dw2102_table[TERRATEC_CINERGY_S], NULL},
{NULL},
},
}
@@ -1664,11 +1682,11 @@ static struct dvb_usb_device_properties dw2104_properties = {
.num_device_descs = 2,
.devices = {
{ "DVBWorld DW2104 USB2.0",
- {&dw2102_table[2], NULL},
+ {&dw2102_table[CYPRESS_DW2104], NULL},
{NULL},
},
{ "TeVii S650 USB2.0",
- {&dw2102_table[3], NULL},
+ {&dw2102_table[TEVII_S650], NULL},
{NULL},
},
}
@@ -1715,7 +1733,7 @@ static struct dvb_usb_device_properties dw3101_properties = {
.num_device_descs = 1,
.devices = {
{ "DVBWorld DVB-C 3101 USB2.0",
- {&dw2102_table[5], NULL},
+ {&dw2102_table[CYPRESS_DW3101], NULL},
{NULL},
},
}
@@ -1761,7 +1779,7 @@ static struct dvb_usb_device_properties s6x0_properties = {
.num_device_descs = 1,
.devices = {
{"TeVii S630 USB",
- {&dw2102_table[6], NULL},
+ {&dw2102_table[TEVII_S630], NULL},
{NULL},
},
}
@@ -1770,33 +1788,33 @@ static struct dvb_usb_device_properties s6x0_properties = {
struct dvb_usb_device_properties *p1100;
static struct dvb_usb_device_description d1100 = {
"Prof 1100 USB ",
- {&dw2102_table[7], NULL},
+ {&dw2102_table[PROF_1100], NULL},
{NULL},
};
struct dvb_usb_device_properties *s660;
static struct dvb_usb_device_description d660 = {
"TeVii S660 USB",
- {&dw2102_table[8], NULL},
+ {&dw2102_table[TEVII_S660], NULL},
{NULL},
};
static struct dvb_usb_device_description d480_1 = {
"TeVii S480.1 USB",
- {&dw2102_table[12], NULL},
+ {&dw2102_table[TEVII_S480_1], NULL},
{NULL},
};
static struct dvb_usb_device_description d480_2 = {
"TeVii S480.2 USB",
- {&dw2102_table[13], NULL},
+ {&dw2102_table[TEVII_S480_2], NULL},
{NULL},
};
struct dvb_usb_device_properties *p7500;
static struct dvb_usb_device_description d7500 = {
"Prof 7500 USB DVB-S2",
- {&dw2102_table[9], NULL},
+ {&dw2102_table[PROF_7500], NULL},
{NULL},
};
@@ -1842,15 +1860,15 @@ static struct dvb_usb_device_properties su3000_properties = {
.num_device_descs = 3,
.devices = {
{ "SU3000HD DVB-S USB2.0",
- { &dw2102_table[10], NULL },
+ { &dw2102_table[GENIATECH_SU3000], NULL },
{ NULL },
},
{ "Terratec Cinergy S2 USB HD",
- { &dw2102_table[11], NULL },
+ { &dw2102_table[TERRATEC_CINERGY_S2], NULL },
{ NULL },
},
{ "X3M TV SPC1400HD PCI",
- { &dw2102_table[14], NULL },
+ { &dw2102_table[X3M_SPC1400HD], NULL },
{ NULL },
},
}
@@ -1859,12 +1877,11 @@ static struct dvb_usb_device_properties su3000_properties = {
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- p1100 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
+ p1100 = kmemdup(&s6x0_properties,
+ sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!p1100)
return -ENOMEM;
/* copy default structure */
- memcpy(p1100, &s6x0_properties,
- sizeof(struct dvb_usb_device_properties));
/* fill only different fields */
p1100->firmware = "dvb-usb-p1100.fw";
p1100->devices[0] = d1100;
@@ -1872,13 +1889,12 @@ static int dw2102_probe(struct usb_interface *intf,
p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table);
p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach;
- s660 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
+ s660 = kmemdup(&s6x0_properties,
+ sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!s660) {
kfree(p1100);
return -ENOMEM;
}
- memcpy(s660, &s6x0_properties,
- sizeof(struct dvb_usb_device_properties));
s660->firmware = "dvb-usb-s660.fw";
s660->num_device_descs = 3;
s660->devices[0] = d660;
@@ -1886,14 +1902,13 @@ static int dw2102_probe(struct usb_interface *intf,
s660->devices[2] = d480_2;
s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach;
- p7500 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
+ p7500 = kmemdup(&s6x0_properties,
+ sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!p7500) {
kfree(p1100);
kfree(s660);
return -ENOMEM;
}
- memcpy(p7500, &s6x0_properties,
- sizeof(struct dvb_usb_device_properties));
p7500->firmware = "dvb-usb-p7500.fw";
p7500->devices[0] = d7500;
p7500->rc.legacy.rc_map_table = rc_map_tbs_table;
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index 015b4e8af1a5..90a70c66a96e 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -282,23 +282,24 @@ static int jdvbt90502_set_property(struct dvb_frontend *fe,
return r;
}
-static int jdvbt90502_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int jdvbt90502_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
p->inversion = INVERSION_AUTO;
- p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
- p->u.ofdm.code_rate_HP = FEC_AUTO;
- p->u.ofdm.code_rate_LP = FEC_AUTO;
- p->u.ofdm.constellation = QAM_64;
- p->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO;
- p->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO;
- p->u.ofdm.hierarchy_information = HIERARCHY_AUTO;
+ p->bandwidth_hz = 6000000;
+ p->code_rate_HP = FEC_AUTO;
+ p->code_rate_LP = FEC_AUTO;
+ p->modulation = QAM_64;
+ p->transmission_mode = TRANSMISSION_MODE_AUTO;
+ p->guard_interval = GUARD_INTERVAL_AUTO;
+ p->hierarchy = HIERARCHY_AUTO;
return 0;
}
-static int jdvbt90502_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
/**
* NOTE: ignore all the parameters except frequency.
* others should be fixed to the proper value for ISDB-T,
@@ -438,14 +439,12 @@ error:
}
static struct dvb_frontend_ops jdvbt90502_ops = {
-
+ .delsys = { SYS_ISDBT },
.info = {
.name = "Comtech JDVBT90502 ISDB-T",
- .type = FE_OFDM,
.frequency_min = 473000000, /* UHF 13ch, center */
.frequency_max = 767142857, /* UHF 62ch, center */
- .frequency_stepsize = JDVBT90502_PLL_CLK /
- JDVBT90502_PLL_DIVIDER,
+ .frequency_stepsize = JDVBT90502_PLL_CLK / JDVBT90502_PLL_DIVIDER,
.frequency_tolerance = 0,
/* NOTE: this driver ignores all parameters but frequency. */
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index 5426267980c7..67957dd99ede 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -113,28 +113,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
return 0;
}
-static int gp8psk_fe_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- deb_fe("%s(..)\n", __func__);
- return 0;
-}
-
-static int gp8psk_fe_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- deb_fe("%s(..)\n", __func__);
- return 0;
-}
-
-
-static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
{
struct gp8psk_fe_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 cmd[10];
- u32 freq = fep->frequency * 1000;
+ u32 freq = c->frequency * 1000;
int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
deb_fe("%s()\n", __func__);
@@ -342,9 +326,9 @@ success:
static struct dvb_frontend_ops gp8psk_fe_ops = {
+ .delsys = { SYS_DVBS },
.info = {
.name = "Genpix DVB-S",
- .type = FE_QPSK,
.frequency_min = 800000,
.frequency_max = 2250000,
.frequency_stepsize = 100,
@@ -366,8 +350,6 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
.init = NULL,
.sleep = NULL,
- .set_property = gp8psk_fe_set_property,
- .get_property = gp8psk_fe_get_property,
.set_frontend = gp8psk_fe_set_frontend,
.get_tune_settings = gp8psk_fe_get_tune_settings,
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 67094b879bb4..9f01cd7a6e3f 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -52,42 +52,59 @@ static int pid_filter;
module_param_named(pid, pid_filter, int, 0644);
MODULE_PARM_DESC(pid, "set default 0=on 1=off");
+static int dvb_usb_it913x_firmware;
+module_param_named(firmware, dvb_usb_it913x_firmware, int, 0644);
+MODULE_PARM_DESC(firmware, "set firmware 0=auto 1=IT9137 2=IT9135V1");
+
+
int cmd_counter;
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct it913x_state {
u8 id;
-};
-
-struct ite_config {
- u8 chip_ver;
- u16 chip_type;
- u32 firmware;
- u8 tuner_id_0;
- u8 tuner_id_1;
- u8 dual_mode;
+ struct ite_config it913x_config;
};
struct ite_config it913x_config;
+#define IT913X_RETRY 10
+#define IT913X_SND_TIMEOUT 100
+#define IT913X_RCV_TIMEOUT 200
+
static int it913x_bulk_write(struct usb_device *dev,
u8 *snd, int len, u8 pipe)
{
- int ret, actual_l;
+ int ret, actual_l, i;
+
+ for (i = 0; i < IT913X_RETRY; i++) {
+ ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+ snd, len , &actual_l, IT913X_SND_TIMEOUT);
+ if (ret == 0 || ret != -EBUSY || ret != -ETIMEDOUT)
+ break;
+ }
+
+ if (len != actual_l && ret == 0)
+ ret = -EAGAIN;
- ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
- snd, len , &actual_l, 100);
return ret;
}
static int it913x_bulk_read(struct usb_device *dev,
u8 *rev, int len, u8 pipe)
{
- int ret, actual_l;
+ int ret, actual_l, i;
+
+ for (i = 0; i < IT913X_RETRY; i++) {
+ ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+ rev, len , &actual_l, IT913X_RCV_TIMEOUT);
+ if (ret == 0 || ret != -EBUSY || ret != -ETIMEDOUT)
+ break;
+ }
+
+ if (len != actual_l && ret == 0)
+ ret = -EAGAIN;
- ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
- rev, len , &actual_l, 200);
return ret;
}
@@ -100,7 +117,7 @@ static u16 check_sum(u8 *p, u8 len)
return ~sum;
}
-static int it913x_io(struct usb_device *udev, u8 mode, u8 pro,
+static int it913x_usb_talk(struct usb_device *udev, u8 mode, u8 pro,
u8 cmd, u32 reg, u8 addr, u8 *data, u8 len)
{
int ret = 0, i, buf_size = 1;
@@ -159,22 +176,41 @@ static int it913x_io(struct usb_device *udev, u8 mode, u8 pro,
buff[buf_size++] = (chk_sum & 0xff);
ret = it913x_bulk_write(udev, buff, buf_size , 0x02);
+ if (ret < 0)
+ goto error;
- ret |= it913x_bulk_read(udev, buff, (mode & 1) ?
+ ret = it913x_bulk_read(udev, buff, (mode & 1) ?
5 : len + 5 , 0x01);
+ if (ret < 0)
+ goto error;
rlen = (mode & 0x1) ? 0x1 : len;
if (mode & 1)
- ret |= buff[2];
+ ret = buff[2];
else
memcpy(data, &buff[3], rlen);
cmd_counter++;
- kfree(buff);
+error: kfree(buff);
- return (ret < 0) ? -ENODEV : 0;
+ return ret;
+}
+
+static int it913x_io(struct usb_device *udev, u8 mode, u8 pro,
+ u8 cmd, u32 reg, u8 addr, u8 *data, u8 len)
+{
+ int ret, i;
+
+ for (i = 0; i < IT913X_RETRY; i++) {
+ ret = it913x_usb_talk(udev, mode, pro,
+ cmd, reg, addr, data, len);
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ return ret;
}
static int it913x_wr_reg(struct usb_device *udev, u8 pro, u32 reg , u8 data)
@@ -223,15 +259,15 @@ static u32 it913x_query(struct usb_device *udev, u8 pro)
static int it913x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- int ret = 0;
+ struct usb_device *udev = adap->dev->udev;
+ int ret;
u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0)
return -EAGAIN;
deb_info(1, "PID_C (%02x)", onoff);
- if (!onoff)
- ret = it913x_wr_reg(adap->dev->udev, pro, PID_RST, 0x1);
+ ret = it913x_wr_reg(udev, pro, PID_EN, onoff);
mutex_unlock(&adap->dev->i2c_mutex);
return ret;
@@ -241,27 +277,20 @@ static int it913x_pid_filter(struct dvb_usb_adapter *adap,
int index, u16 pid, int onoff)
{
struct usb_device *udev = adap->dev->udev;
- int ret = 0;
+ int ret;
u8 pro = (adap->id == 0) ? DEV_0_DMOD : DEV_1_DMOD;
- if (pid_filter > 0)
- return 0;
-
if (mutex_lock_interruptible(&adap->dev->i2c_mutex) < 0)
return -EAGAIN;
deb_info(1, "PID_F (%02x)", onoff);
- if (onoff) {
- ret = it913x_wr_reg(udev, pro, PID_EN, 0x1);
-
- ret |= it913x_wr_reg(udev, pro, PID_LSB, (u8)(pid & 0xff));
- ret |= it913x_wr_reg(udev, pro, PID_MSB, (u8)(pid >> 8));
+ ret = it913x_wr_reg(udev, pro, PID_LSB, (u8)(pid & 0xff));
- ret |= it913x_wr_reg(udev, pro, PID_INX_EN, (u8)onoff);
+ ret |= it913x_wr_reg(udev, pro, PID_MSB, (u8)(pid >> 8));
- ret |= it913x_wr_reg(udev, pro, PID_INX, (u8)(index & 0x1f));
+ ret |= it913x_wr_reg(udev, pro, PID_INX_EN, (u8)onoff);
- }
+ ret |= it913x_wr_reg(udev, pro, PID_INX, (u8)(index & 0x1f));
mutex_unlock(&adap->dev->i2c_mutex);
return 0;
@@ -337,15 +366,73 @@ static int it913x_rc_query(struct dvb_usb_device *d)
if ((ibuf[2] + ibuf[3]) == 0xff) {
key = ibuf[2];
- key += ibuf[0] << 8;
- deb_info(1, "INT Key =%08x", key);
+ key += ibuf[0] << 16;
+ key += ibuf[1] << 8;
+ deb_info(1, "NEC Extended Key =%08x", key);
if (d->rc_dev != NULL)
rc_keydown(d->rc_dev, key, 0);
}
+
mutex_unlock(&d->i2c_mutex);
return ret;
}
+
+/* Firmware sets raw */
+const char fw_it9135_v1[] = "dvb-usb-it9135-01.fw";
+const char fw_it9135_v2[] = "dvb-usb-it9135-02.fw";
+const char fw_it9137[] = "dvb-usb-it9137-01.fw";
+
+static int ite_firmware_select(struct usb_device *udev,
+ struct dvb_usb_device_properties *props)
+{
+ int sw;
+ /* auto switch */
+ if (le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_ITETECH_IT9135)
+ sw = IT9135_V1_FW;
+ else if (le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_ITETECH_IT9135_9005)
+ sw = IT9135_V1_FW;
+ else if (le16_to_cpu(udev->descriptor.idProduct) ==
+ USB_PID_ITETECH_IT9135_9006) {
+ sw = IT9135_V2_FW;
+ if (it913x_config.tuner_id_0 == 0)
+ it913x_config.tuner_id_0 = IT9135_60;
+ } else
+ sw = IT9137_FW;
+
+ /* force switch */
+ if (dvb_usb_it913x_firmware != IT9135_AUTO)
+ sw = dvb_usb_it913x_firmware;
+
+ switch (sw) {
+ case IT9135_V1_FW:
+ it913x_config.firmware_ver = 1;
+ it913x_config.adc_x2 = 1;
+ props->firmware = fw_it9135_v1;
+ break;
+ case IT9135_V2_FW:
+ it913x_config.firmware_ver = 1;
+ it913x_config.adc_x2 = 1;
+ props->firmware = fw_it9135_v2;
+ break;
+ case IT9137_FW:
+ default:
+ it913x_config.firmware_ver = 0;
+ it913x_config.adc_x2 = 0;
+ props->firmware = fw_it9137;
+ }
+
+ return 0;
+}
+
+#define TS_MPEG_PKT_SIZE 188
+#define EP_LOW 21
+#define TS_BUFFER_SIZE_PID (EP_LOW*TS_MPEG_PKT_SIZE)
+#define EP_HIGH 348
+#define TS_BUFFER_SIZE_MAX (EP_HIGH*TS_MPEG_PKT_SIZE)
+
static int it913x_identify_state(struct usb_device *udev,
struct dvb_usb_device_properties *props,
struct dvb_usb_device_description **desc,
@@ -359,6 +446,19 @@ static int it913x_identify_state(struct usb_device *udev,
/* checnk for dual mode */
it913x_config.dual_mode = it913x_read_reg(udev, 0x49c5);
+ if (udev->speed != USB_SPEED_HIGH) {
+ props->adapter[0].fe[0].pid_filter_count = 5;
+ info("USB 1 low speed mode - connect to USB 2 port");
+ if (pid_filter > 0)
+ pid_filter = 0;
+ if (it913x_config.dual_mode) {
+ it913x_config.dual_mode = 0;
+ info("Dual mode not supported in USB 1");
+ }
+ } else /* For replugging */
+ if(props->adapter[0].fe[0].pid_filter_count == 5)
+ props->adapter[0].fe[0].pid_filter_count = 31;
+
/* TODO different remotes */
remote = it913x_read_reg(udev, 0x49ac); /* Remote */
if (remote == 0)
@@ -370,6 +470,28 @@ static int it913x_identify_state(struct usb_device *udev,
info("Dual mode=%x Remote=%x Tuner Type=%x", it913x_config.dual_mode
, remote, it913x_config.tuner_id_0);
+ /* Select Stream Buffer Size and pid filter option*/
+ if (pid_filter) {
+ props->adapter[0].fe[0].stream.u.bulk.buffersize =
+ TS_BUFFER_SIZE_MAX;
+ props->adapter[0].fe[0].caps &=
+ ~DVB_USB_ADAP_NEED_PID_FILTERING;
+ } else
+ props->adapter[0].fe[0].stream.u.bulk.buffersize =
+ TS_BUFFER_SIZE_PID;
+
+ if (it913x_config.dual_mode) {
+ props->adapter[1].fe[0].stream.u.bulk.buffersize =
+ props->adapter[0].fe[0].stream.u.bulk.buffersize;
+ props->num_adapters = 2;
+ if (pid_filter)
+ props->adapter[1].fe[0].caps =
+ props->adapter[0].fe[0].caps;
+ } else
+ props->num_adapters = 1;
+
+ ret = ite_firmware_select(udev, props);
+
if (firm_no > 0) {
*cold = 0;
return 0;
@@ -391,18 +513,22 @@ static int it913x_identify_state(struct usb_device *udev,
ret = it913x_wr_reg(udev, DEV_0,
GPIOH1_O, 0x0);
}
- props->num_adapters = 2;
- } else
- props->num_adapters = 1;
+ }
reg = it913x_read_reg(udev, IO_MUX_POWER_CLK);
if (it913x_config.dual_mode) {
ret |= it913x_wr_reg(udev, DEV_0, 0x4bfb, CHIP2_I2C_ADDR);
- ret |= it913x_wr_reg(udev, DEV_0, CLK_O_EN, 0x1);
+ if (it913x_config.firmware_ver == 1)
+ ret |= it913x_wr_reg(udev, DEV_0, 0xcfff, 0x1);
+ else
+ ret |= it913x_wr_reg(udev, DEV_0, CLK_O_EN, 0x1);
} else {
ret |= it913x_wr_reg(udev, DEV_0, 0x4bfb, 0x0);
- ret |= it913x_wr_reg(udev, DEV_0, CLK_O_EN, 0x0);
+ if (it913x_config.firmware_ver == 1)
+ ret |= it913x_wr_reg(udev, DEV_0, 0xcfff, 0x0);
+ else
+ ret |= it913x_wr_reg(udev, DEV_0, CLK_O_EN, 0x0);
}
*cold = 1;
@@ -428,35 +554,45 @@ static int it913x_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return ret;
}
-
static int it913x_download_firmware(struct usb_device *udev,
const struct firmware *fw)
{
- int ret = 0, i;
- u8 packet_size, dlen;
+ int ret = 0, i = 0, pos = 0;
+ u8 packet_size, min_pkt;
u8 *fw_data;
- packet_size = 0x29;
-
ret = it913x_wr_reg(udev, DEV_0, I2C_CLK, I2C_CLK_100);
info("FRM Starting Firmware Download");
- /* This uses scatter write firmware headers follow */
- /* 03 XX 00 XX = chip number? */
-
- for (i = 0; i < fw->size; i += packet_size) {
- if (i > 0)
- packet_size = 0x39;
- fw_data = (u8 *)(fw->data + i);
- dlen = ((i + packet_size) > fw->size)
- ? (fw->size - i) : packet_size;
- ret |= it913x_io(udev, WRITE_DATA, DEV_0,
- CMD_SCATTER_WRITE, 0, 0, fw_data, dlen);
- udelay(1000);
+
+ /* Multi firmware loader */
+ /* This uses scatter write firmware headers */
+ /* The firmware must start with 03 XX 00 */
+ /* and be the extact firmware length */
+
+ if (it913x_config.chip_ver == 2)
+ min_pkt = 0x11;
+ else
+ min_pkt = 0x19;
+
+ while (i <= fw->size) {
+ if (((fw->data[i] == 0x3) && (fw->data[i + 2] == 0x0))
+ || (i == fw->size)) {
+ packet_size = i - pos;
+ if ((packet_size > min_pkt) || (i == fw->size)) {
+ fw_data = (u8 *)(fw->data + pos);
+ pos += packet_size;
+ if (packet_size > 0)
+ ret |= it913x_io(udev, WRITE_DATA,
+ DEV_0, CMD_SCATTER_WRITE, 0,
+ 0, fw_data, packet_size);
+ udelay(1000);
+ }
+ }
+ i++;
}
- ret |= it913x_io(udev, WRITE_CMD, DEV_0,
- CMD_BOOT, 0, 0, NULL, 0);
+ ret |= it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
msleep(100);
@@ -474,12 +610,17 @@ static int it913x_download_firmware(struct usb_device *udev,
/* Tuner function */
if (it913x_config.dual_mode)
ret |= it913x_wr_reg(udev, DEV_0_DMOD , 0xec4c, 0xa0);
-
- ret |= it913x_wr_reg(udev, DEV_0, PADODPU, 0x0);
- ret |= it913x_wr_reg(udev, DEV_0, AGC_O_D, 0x0);
- if (it913x_config.dual_mode) {
- ret |= it913x_wr_reg(udev, DEV_1, PADODPU, 0x0);
- ret |= it913x_wr_reg(udev, DEV_1, AGC_O_D, 0x0);
+ else
+ ret |= it913x_wr_reg(udev, DEV_0_DMOD , 0xec4c, 0x68);
+
+ if ((it913x_config.chip_ver == 1) &&
+ (it913x_config.chip_type == 0x9135)) {
+ ret |= it913x_wr_reg(udev, DEV_0, PADODPU, 0x0);
+ ret |= it913x_wr_reg(udev, DEV_0, AGC_O_D, 0x0);
+ if (it913x_config.dual_mode) {
+ ret |= it913x_wr_reg(udev, DEV_1, PADODPU, 0x0);
+ ret |= it913x_wr_reg(udev, DEV_1, AGC_O_D, 0x0);
+ }
}
return (ret < 0) ? -ENODEV : 0;
@@ -500,32 +641,23 @@ static int it913x_name(struct dvb_usb_adapter *adap)
static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct usb_device *udev = adap->dev->udev;
+ struct it913x_state *st = adap->dev->priv;
int ret = 0;
- u8 adf = it913x_read_reg(udev, IO_MUX_POWER_CLK);
u8 adap_addr = I2C_BASE_ADDR + (adap->id << 5);
- u16 ep_size = adap->props.fe[0].stream.u.bulk.buffersize;
- u8 tuner_id, tuner_type;
+ u16 ep_size = adap->props.fe[0].stream.u.bulk.buffersize / 4;
+ u8 pkt_size = 0x80;
+
+ if (adap->dev->udev->speed != USB_SPEED_HIGH)
+ pkt_size = 0x10;
+
+ it913x_config.adf = it913x_read_reg(udev, IO_MUX_POWER_CLK);
if (adap->id == 0)
- tuner_id = it913x_config.tuner_id_0;
- else
- tuner_id = it913x_config.tuner_id_1;
-
- /* TODO we always use IT9137 possible references here*/
- /* Documentation suggests don't care */
- switch (tuner_id) {
- case 0x51:
- case 0x52:
- case 0x60:
- case 0x61:
- case 0x62:
- default:
- case 0x38:
- tuner_type = IT9137;
- }
+ memcpy(&st->it913x_config, &it913x_config,
+ sizeof(struct ite_config));
adap->fe_adap[0].fe = dvb_attach(it913x_fe_attach,
- &adap->dev->i2c_adap, adap_addr, adf, tuner_type);
+ &adap->dev->i2c_adap, adap_addr, &st->it913x_config);
if (adap->id == 0 && adap->fe_adap[0].fe) {
ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2_SW_RST, 0x1);
@@ -536,13 +668,13 @@ static int it913x_frontend_attach(struct dvb_usb_adapter *adap)
ret = it913x_wr_reg(udev, DEV_0, EP4_TX_LEN_LSB,
ep_size & 0xff);
ret = it913x_wr_reg(udev, DEV_0, EP4_TX_LEN_MSB, ep_size >> 8);
- ret = it913x_wr_reg(udev, DEV_0, EP4_MAX_PKT, 0x80);
+ ret = it913x_wr_reg(udev, DEV_0, EP4_MAX_PKT, pkt_size);
} else if (adap->id == 1 && adap->fe_adap[0].fe) {
ret = it913x_wr_reg(udev, DEV_0, EP0_TX_EN, 0x6f);
ret = it913x_wr_reg(udev, DEV_0, EP5_TX_LEN_LSB,
ep_size & 0xff);
ret = it913x_wr_reg(udev, DEV_0, EP5_TX_LEN_MSB, ep_size >> 8);
- ret = it913x_wr_reg(udev, DEV_0, EP5_MAX_PKT, 0x80);
+ ret = it913x_wr_reg(udev, DEV_0, EP5_MAX_PKT, pkt_size);
ret = it913x_wr_reg(udev, DEV_0_DMOD, MP2IF2_EN, 0x1);
ret = it913x_wr_reg(udev, DEV_1_DMOD, MP2IF_SERIAL, 0x1);
ret = it913x_wr_reg(udev, DEV_1, TOP_HOSTB_SER_MODE, 0x1);
@@ -582,6 +714,9 @@ static int it913x_probe(struct usb_interface *intf,
static struct usb_device_id it913x_table[] = {
{ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09) },
{ USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135) },
+ { USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22_IT9137) },
+ { USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9005) },
+ { USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006) },
{} /* Terminating entry */
};
@@ -614,8 +749,8 @@ static struct dvb_usb_device_properties it913x_properties = {
.endpoint = 0x04,
.u = {/* Keep Low if PID filter on */
.bulk = {
- .buffersize = 3584,
-
+ .buffersize =
+ TS_BUFFER_SIZE_PID,
}
}
}
@@ -639,8 +774,8 @@ static struct dvb_usb_device_properties it913x_properties = {
.endpoint = 0x05,
.u = {
.bulk = {
- .buffersize = 3584,
-
+ .buffersize =
+ TS_BUFFER_SIZE_PID,
}
}
}
@@ -654,10 +789,10 @@ static struct dvb_usb_device_properties it913x_properties = {
.rc_query = it913x_rc_query,
.rc_interval = IT913X_POLL,
.allowed_protos = RC_TYPE_NEC,
- .rc_codes = RC_MAP_KWORLD_315U,
+ .rc_codes = RC_MAP_MSI_DIGIVOX_III,
},
.i2c_algo = &it913x_i2c_algo,
- .num_device_descs = 2,
+ .num_device_descs = 5,
.devices = {
{ "Kworld UB499-2T T09(IT9137)",
{ &it913x_table[0], NULL },
@@ -665,6 +800,15 @@ static struct dvb_usb_device_properties it913x_properties = {
{ "ITE 9135 Generic",
{ &it913x_table[1], NULL },
},
+ { "Sveon STV22 Dual DVB-T HDTV(IT9137)",
+ { &it913x_table[2], NULL },
+ },
+ { "ITE 9135(9005) Generic",
+ { &it913x_table[3], NULL },
+ },
+ { "ITE 9135(9006) Generic",
+ { &it913x_table[4], NULL },
+ },
}
};
@@ -679,5 +823,5 @@ module_usb_driver(it913x_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("it913x USB 2 Driver");
-MODULE_VERSION("1.07");
+MODULE_VERSION("1.22");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 1a876a65ed56..b3fe05bbffc9 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -388,8 +388,7 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
deb_info(3, "%s PID=%04x Index=%04x onoff=%02x", __func__,
pid, index, onoff);
- if (onoff)
- if (!pid_filter) {
+ if (onoff) {
ret = mutex_lock_interruptible(&adap->dev->i2c_mutex);
if (ret < 0)
return -EAGAIN;
@@ -654,6 +653,9 @@ static int lme2510_identify_state(struct usb_device *udev,
struct dvb_usb_device_description **desc,
int *cold)
{
+ if (pid_filter > 0)
+ props->adapter[0].fe[0].caps &=
+ ~DVB_USB_ADAP_NEED_PID_FILTERING;
*cold = 0;
return 0;
}
@@ -1293,5 +1295,5 @@ module_usb_driver(lme2510_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.90");
+MODULE_VERSION("1.91");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-demod.c b/drivers/media/dvb/dvb-usb/mxl111sf-demod.c
index d1f58371c711..d83df4bb72d3 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-demod.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-demod.c
@@ -102,8 +102,8 @@ fail:
}
static
-int mxl1x1sf_demod_get_tps_constellation(struct mxl111sf_demod_state *state,
- fe_modulation_t *constellation)
+int mxl1x1sf_demod_get_tps_modulation(struct mxl111sf_demod_state *state,
+ fe_modulation_t *modulation)
{
u8 val;
int ret = mxl111sf_demod_read_reg(state, V6_MODORDER_TPS_REG, &val);
@@ -113,13 +113,13 @@ int mxl1x1sf_demod_get_tps_constellation(struct mxl111sf_demod_state *state,
switch ((val & V6_PARAM_CONSTELLATION_MASK) >> 4) {
case 0:
- *constellation = QPSK;
+ *modulation = QPSK;
break;
case 1:
- *constellation = QAM_16;
+ *modulation = QAM_16;
break;
case 2:
- *constellation = QAM_64;
+ *modulation = QAM_64;
break;
}
fail:
@@ -284,8 +284,7 @@ static int mxl1x1sf_demod_reset_irq_status(struct mxl111sf_demod_state *state)
/* ------------------------------------------------------------------------ */
-static int mxl111sf_demod_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int mxl111sf_demod_set_frontend(struct dvb_frontend *fe)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
int ret = 0;
@@ -303,7 +302,7 @@ static int mxl111sf_demod_set_frontend(struct dvb_frontend *fe,
mxl_dbg("()");
if (fe->ops.tuner_ops.set_params) {
- ret = fe->ops.tuner_ops.set_params(fe, param);
+ ret = fe->ops.tuner_ops.set_params(fe);
if (mxl_fail(ret))
goto fail;
msleep(50);
@@ -481,13 +480,13 @@ static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
struct mxl111sf_demod_state *state = fe->demodulator_priv;
- fe_modulation_t constellation;
+ fe_modulation_t modulation;
u16 snr;
mxl111sf_demod_calc_snr(state, &snr);
- mxl1x1sf_demod_get_tps_constellation(state, &constellation);
+ mxl1x1sf_demod_get_tps_modulation(state, &modulation);
- switch (constellation) {
+ switch (modulation) {
case QPSK:
*signal_strength = (snr >= 1300) ?
min(65535, snr * 44) : snr * 38;
@@ -508,9 +507,9 @@ static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mxl111sf_demod_state *state = fe->demodulator_priv;
mxl_dbg("()");
@@ -518,18 +517,18 @@ static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe,
p->inversion = /* FIXME */ ? INVERSION_ON : INVERSION_OFF;
#endif
if (fe->ops.tuner_ops.get_bandwidth)
- fe->ops.tuner_ops.get_bandwidth(fe, &p->u.ofdm.bandwidth);
+ fe->ops.tuner_ops.get_bandwidth(fe, &p->bandwidth_hz);
if (fe->ops.tuner_ops.get_frequency)
fe->ops.tuner_ops.get_frequency(fe, &p->frequency);
- mxl1x1sf_demod_get_tps_code_rate(state, &p->u.ofdm.code_rate_HP);
- mxl1x1sf_demod_get_tps_code_rate(state, &p->u.ofdm.code_rate_LP);
- mxl1x1sf_demod_get_tps_constellation(state, &p->u.ofdm.constellation);
+ mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_HP);
+ mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_LP);
+ mxl1x1sf_demod_get_tps_modulation(state, &p->modulation);
mxl1x1sf_demod_get_tps_guard_fft_mode(state,
- &p->u.ofdm.transmission_mode);
+ &p->transmission_mode);
mxl1x1sf_demod_get_tps_guard_interval(state,
- &p->u.ofdm.guard_interval);
+ &p->guard_interval);
mxl1x1sf_demod_get_tps_hierarchy(state,
- &p->u.ofdm.hierarchy_information);
+ &p->hierarchy);
return 0;
}
@@ -551,10 +550,9 @@ static void mxl111sf_demod_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops mxl111sf_demod_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "MaxLinear MxL111SF DVB-T demodulator",
- .type = FE_OFDM,
.frequency_min = 177000000,
.frequency_max = 858000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
index a6341058c4e7..72db6eef4b9c 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
@@ -38,6 +38,8 @@ struct mxl111sf_tuner_state {
struct mxl111sf_tuner_config *cfg;
+ enum mxl_if_freq if_freq;
+
u32 frequency;
u32 bandwidth;
};
@@ -186,7 +188,10 @@ static int mxl1x1sf_tuner_set_if_output_freq(struct mxl111sf_tuner_state *state)
ctrl = iffcw & 0x00ff;
#endif
ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_FCW_REG, ctrl);
- mxl_fail(ret);
+ if (mxl_fail(ret))
+ goto fail;
+
+ state->if_freq = state->cfg->if_freq;
fail:
return ret;
}
@@ -267,55 +272,49 @@ static int mxl1x1sf_tuner_loop_thru_ctrl(struct mxl111sf_tuner_state *state,
/* ------------------------------------------------------------------------ */
-static int mxl111sf_tuner_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int mxl111sf_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
struct mxl111sf_tuner_state *state = fe->tuner_priv;
int ret;
u8 bw;
mxl_dbg("()");
- if (fe->ops.info.type == FE_ATSC) {
- switch (params->u.vsb.modulation) {
- case VSB_8:
- case VSB_16:
- bw = 0; /* ATSC */
- break;
- case QAM_64:
- case QAM_256:
- bw = 1; /* US CABLE */
- break;
- default:
- err("%s: modulation not set!", __func__);
- return -EINVAL;
- }
- } else if (fe->ops.info.type == FE_OFDM) {
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (delsys) {
+ case SYS_ATSC:
+ bw = 0; /* ATSC */
+ break;
+ case SYS_DVBC_ANNEX_B:
+ bw = 1; /* US CABLE */
+ break;
+ case SYS_DVBT:
+ switch (c->bandwidth_hz) {
+ case 6000000:
bw = 6;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
bw = 7;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
bw = 8;
break;
default:
err("%s: bandwidth not set!", __func__);
return -EINVAL;
}
- } else {
+ break;
+ default:
err("%s: modulation type not supported!", __func__);
return -EINVAL;
}
- ret = mxl1x1sf_tune_rf(fe, params->frequency, bw);
+ ret = mxl1x1sf_tune_rf(fe, c->frequency, bw);
if (mxl_fail(ret))
goto fail;
- state->frequency = params->frequency;
- state->bandwidth = (fe->ops.info.type == FE_OFDM) ?
- params->u.ofdm.bandwidth : 0;
+ state->frequency = c->frequency;
+ state->bandwidth = c->bandwidth_hz;
fail:
return ret;
}
@@ -407,6 +406,54 @@ static int mxl111sf_tuner_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return 0;
}
+static int mxl111sf_tuner_get_if_frequency(struct dvb_frontend *fe,
+ u32 *frequency)
+{
+ struct mxl111sf_tuner_state *state = fe->tuner_priv;
+
+ *frequency = 0;
+
+ switch (state->if_freq) {
+ case MXL_IF_4_0: /* 4.0 MHz */
+ *frequency = 4000000;
+ break;
+ case MXL_IF_4_5: /* 4.5 MHz */
+ *frequency = 4500000;
+ break;
+ case MXL_IF_4_57: /* 4.57 MHz */
+ *frequency = 4570000;
+ break;
+ case MXL_IF_5_0: /* 5.0 MHz */
+ *frequency = 5000000;
+ break;
+ case MXL_IF_5_38: /* 5.38 MHz */
+ *frequency = 5380000;
+ break;
+ case MXL_IF_6_0: /* 6.0 MHz */
+ *frequency = 6000000;
+ break;
+ case MXL_IF_6_28: /* 6.28 MHz */
+ *frequency = 6280000;
+ break;
+ case MXL_IF_7_2: /* 7.2 MHz */
+ *frequency = 7200000;
+ break;
+ case MXL_IF_35_25: /* 35.25 MHz */
+ *frequency = 35250000;
+ break;
+ case MXL_IF_36: /* 36 MHz */
+ *frequency = 36000000;
+ break;
+ case MXL_IF_36_15: /* 36.15 MHz */
+ *frequency = 36150000;
+ break;
+ case MXL_IF_44: /* 44 MHz */
+ *frequency = 44000000;
+ break;
+ }
+ return 0;
+}
+
static int mxl111sf_tuner_release(struct dvb_frontend *fe)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
@@ -436,6 +483,7 @@ static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
.get_rf_strength = mxl111sf_get_rf_strength,
.get_frequency = mxl111sf_tuner_get_frequency,
.get_bandwidth = mxl111sf_tuner_get_bandwidth,
+ .get_if_frequency = mxl111sf_tuner_get_if_frequency,
.release = mxl111sf_tuner_release,
};
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
index 825a8b242e09..38ef0253d3b5 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -758,6 +758,7 @@ MODULE_DEVICE_TABLE(usb, mxl111sf_table);
#define MXL111SF_EP4_BULK_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
.streaming_ctrl = mxl111sf_ep4_streaming_ctrl, \
.stream = { \
.type = USB_BULK, \
@@ -772,6 +773,7 @@ MODULE_DEVICE_TABLE(usb, mxl111sf_table);
/* FIXME: works for v6 but not v8 silicon */
#define MXL111SF_EP4_ISOC_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
.streaming_ctrl = mxl111sf_ep4_streaming_ctrl, \
.stream = { \
.type = USB_ISOC, \
@@ -788,6 +790,7 @@ MODULE_DEVICE_TABLE(usb, mxl111sf_table);
}
#define MXL111SF_EP6_BULK_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
.streaming_ctrl = mxl111sf_ep6_streaming_ctrl, \
.stream = { \
.type = USB_BULK, \
@@ -802,6 +805,7 @@ MODULE_DEVICE_TABLE(usb, mxl111sf_table);
/* FIXME */
#define MXL111SF_EP6_ISOC_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
.streaming_ctrl = mxl111sf_ep6_streaming_ctrl, \
.stream = { \
.type = USB_ISOC, \
@@ -839,8 +843,6 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties = {
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
.num_frontends = 1,
.fe = {{
- .size_of_priv = sizeof(struct mxl111sf_adap_state),
-
.frontend_attach = mxl111sf_attach_demod,
.tuner_attach = mxl111sf_attach_tuner,
@@ -883,8 +885,6 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties = {
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
.num_frontends = 1,
.fe = {{
- .size_of_priv = sizeof(struct mxl111sf_adap_state),
-
.frontend_attach = mxl111sf_attach_demod,
.tuner_attach = mxl111sf_attach_tuner,
@@ -927,16 +927,12 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
.num_frontends = 2,
.fe = {{
- .size_of_priv = sizeof(struct mxl111sf_adap_state),
-
.frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
MXL111SF_EP6_BULK_STREAMING_CONFIG,
},
{
- .size_of_priv = sizeof(struct mxl111sf_adap_state),
-
.frontend_attach = mxl111sf_attach_demod,
.tuner_attach = mxl111sf_attach_tuner,
@@ -992,16 +988,12 @@ static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
.num_frontends = 2,
.fe = {{
- .size_of_priv = sizeof(struct mxl111sf_adap_state),
-
.frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
MXL111SF_EP6_ISOC_STREAMING_CONFIG,
},
{
- .size_of_priv = sizeof(struct mxl111sf_adap_state),
-
.frontend_attach = mxl111sf_attach_demod,
.tuner_attach = mxl111sf_attach_tuner,
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index 56acf8e55d50..e53a1061cb8e 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -75,10 +75,18 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct ttusb2_state *st = d->priv;
- u8 s[wlen+4],r[64] = { 0 };
+ u8 *s, *r = NULL;
int ret = 0;
- memset(s,0,wlen+4);
+ s = kzalloc(wlen+4, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ r = kzalloc(64, GFP_KERNEL);
+ if (!r) {
+ kfree(s);
+ return -ENOMEM;
+ }
s[0] = 0xaa;
s[1] = ++st->id;
@@ -94,12 +102,17 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
r[2] != cmd ||
(rlen > 0 && r[3] != rlen)) {
warn("there might have been an error during control message transfer. (rlen = %d, was %d)",rlen,r[3]);
+ kfree(s);
+ kfree(r);
return -EIO;
}
if (rlen > 0)
memcpy(rbuf, &r[4], rlen);
+ kfree(s);
+ kfree(r);
+
return 0;
}
@@ -384,7 +397,7 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
memcpy(&obuf[3], msg[i].buf, msg[i].len);
- if (ttusb2_msg(d, CMD_I2C_XFER, obuf, msg[i].len+3, ibuf, obuf[2] + 3) < 0) {
+ if (ttusb2_msg(d, CMD_I2C_XFER, obuf, obuf[1]+3, ibuf, obuf[2] + 3) < 0) {
err("i2c transfer failed.");
break;
}
diff --git a/drivers/media/dvb/dvb-usb/vp702x-fe.c b/drivers/media/dvb/dvb-usb/vp702x-fe.c
index 2bb8d4cc8d88..5eab468dd904 100644
--- a/drivers/media/dvb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/dvb/dvb-usb/vp702x-fe.c
@@ -135,9 +135,9 @@ static int vp702x_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
return 0;
}
-static int vp702x_fe_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int vp702x_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct vp702x_fe_state *st = fe->demodulator_priv;
struct vp702x_device_state *dst = st->d->priv;
u32 freq = fep->frequency/1000;
@@ -155,14 +155,14 @@ static int vp702x_fe_set_frontend(struct dvb_frontend* fe,
cmd[1] = freq & 0xff;
cmd[2] = 1; /* divrate == 4 -> frequencyRef[1] -> 1 here */
- sr = (u64) (fep->u.qpsk.symbol_rate/1000) << 20;
+ sr = (u64) (fep->symbol_rate/1000) << 20;
do_div(sr,88000);
cmd[3] = (sr >> 12) & 0xff;
cmd[4] = (sr >> 4) & 0xff;
cmd[5] = (sr << 4) & 0xf0;
deb_fe("setting frontend to: %u -> %u (%x) LNB-based GHz, symbolrate: %d -> %lu (%lx)\n",
- fep->frequency,freq,freq, fep->u.qpsk.symbol_rate,
+ fep->frequency, freq, freq, fep->symbol_rate,
(unsigned long) sr, (unsigned long) sr);
/* if (fep->inversion == INVERSION_ON)
@@ -171,7 +171,7 @@ static int vp702x_fe_set_frontend(struct dvb_frontend* fe,
if (st->voltage == SEC_VOLTAGE_18)
cmd[6] |= 0x40;
-/* if (fep->u.qpsk.symbol_rate > 8000000)
+/* if (fep->symbol_rate > 8000000)
cmd[6] |= 0x20;
if (fep->frequency < 1531000)
@@ -211,13 +211,6 @@ static int vp702x_fe_sleep(struct dvb_frontend *fe)
return 0;
}
-static int vp702x_fe_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
-{
- deb_fe("%s\n",__func__);
- return 0;
-}
-
static int vp702x_fe_send_diseqc_msg (struct dvb_frontend* fe,
struct dvb_diseqc_master_cmd *m)
{
@@ -350,9 +343,9 @@ error:
static struct dvb_frontend_ops vp702x_fe_ops = {
+ .delsys = { SYS_DVBS },
.info = {
.name = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1000, /* kHz for QPSK frontends */
@@ -371,7 +364,6 @@ static struct dvb_frontend_ops vp702x_fe_ops = {
.sleep = vp702x_fe_sleep,
.set_frontend = vp702x_fe_set_frontend,
- .get_frontend = vp702x_fe_get_frontend,
.get_tune_settings = vp702x_fe_get_tune_settings,
.read_status = vp702x_fe_read_status,
diff --git a/drivers/media/dvb/dvb-usb/vp7045-fe.c b/drivers/media/dvb/dvb-usb/vp7045-fe.c
index 8452eef90322..b8825b18c003 100644
--- a/drivers/media/dvb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/dvb/dvb-usb/vp7045-fe.c
@@ -103,9 +103,9 @@ static int vp7045_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
return 0;
}
-static int vp7045_fe_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int vp7045_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct vp7045_fe_state *state = fe->demodulator_priv;
u8 buf[5];
u32 freq = fep->frequency / 1000;
@@ -115,25 +115,24 @@ static int vp7045_fe_set_frontend(struct dvb_frontend* fe,
buf[2] = freq & 0xff;
buf[3] = 0;
- switch (fep->u.ofdm.bandwidth) {
- case BANDWIDTH_8_MHZ: buf[4] = 8; break;
- case BANDWIDTH_7_MHZ: buf[4] = 7; break;
- case BANDWIDTH_6_MHZ: buf[4] = 6; break;
- case BANDWIDTH_AUTO: return -EOPNOTSUPP;
- default:
- return -EINVAL;
+ switch (fep->bandwidth_hz) {
+ case 8000000:
+ buf[4] = 8;
+ break;
+ case 7000000:
+ buf[4] = 7;
+ break;
+ case 6000000:
+ buf[4] = 6;
+ break;
+ default:
+ return -EINVAL;
}
vp7045_usb_op(state->d,LOCK_TUNER_COMMAND,buf,5,NULL,0,200);
return 0;
}
-static int vp7045_fe_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
-{
- return 0;
-}
-
static void vp7045_fe_release(struct dvb_frontend* fe)
{
struct vp7045_fe_state *state = fe->demodulator_priv;
@@ -159,9 +158,9 @@ error:
static struct dvb_frontend_ops vp7045_fe_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "Twinhan VP7045/46 USB DVB-T",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 1000,
@@ -181,7 +180,6 @@ static struct dvb_frontend_ops vp7045_fe_ops = {
.sleep = vp7045_fe_sleep,
.set_frontend = vp7045_fe_set_frontend,
- .get_frontend = vp7045_fe_get_frontend,
.get_tune_settings = vp7045_fe_get_tune_settings,
.read_status = vp7045_fe_read_status,
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 489ae8245867..d1a1a1324ef8 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -335,7 +335,7 @@ static int add_pid_filter(struct firedtv *fdtv, u8 *operand)
* (not supported by the AVC standard)
*/
static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params)
+ struct dtv_frontend_properties *p)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
@@ -349,15 +349,15 @@ static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
else
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
- c->operand[4] = (params->frequency >> 24) & 0xff;
- c->operand[5] = (params->frequency >> 16) & 0xff;
- c->operand[6] = (params->frequency >> 8) & 0xff;
- c->operand[7] = params->frequency & 0xff;
+ c->operand[4] = (p->frequency >> 24) & 0xff;
+ c->operand[5] = (p->frequency >> 16) & 0xff;
+ c->operand[6] = (p->frequency >> 8) & 0xff;
+ c->operand[7] = p->frequency & 0xff;
- c->operand[8] = ((params->u.qpsk.symbol_rate / 1000) >> 8) & 0xff;
- c->operand[9] = (params->u.qpsk.symbol_rate / 1000) & 0xff;
+ c->operand[8] = ((p->symbol_rate / 1000) >> 8) & 0xff;
+ c->operand[9] = (p->symbol_rate / 1000) & 0xff;
- switch (params->u.qpsk.fec_inner) {
+ switch (p->fec_inner) {
case FEC_1_2: c->operand[10] = 0x1; break;
case FEC_2_3: c->operand[10] = 0x2; break;
case FEC_3_4: c->operand[10] = 0x3; break;
@@ -392,10 +392,11 @@ static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
default: c->operand[13] = 0x2; break;
}
switch (fdtv->fe.dtv_property_cache.rolloff) {
- case ROLLOFF_AUTO: c->operand[14] = 0x2; break;
case ROLLOFF_35: c->operand[14] = 0x2; break;
case ROLLOFF_20: c->operand[14] = 0x0; break;
case ROLLOFF_25: c->operand[14] = 0x1; break;
+ case ROLLOFF_AUTO:
+ default: c->operand[14] = 0x2; break;
/* case ROLLOFF_NONE: c->operand[14] = 0xff; break; */
}
switch (fdtv->fe.dtv_property_cache.pilot) {
@@ -415,7 +416,7 @@ static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
}
static int avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params)
+ struct dtv_frontend_properties *p)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
@@ -434,8 +435,8 @@ static int avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
| 1 << 4 /* Frequency */
| 1 << 3 /* Symbol_Rate */
| 0 << 2 /* FEC_outer */
- | (params->u.qam.fec_inner != FEC_AUTO ? 1 << 1 : 0)
- | (params->u.qam.modulation != QAM_AUTO ? 1 << 0 : 0);
+ | (p->fec_inner != FEC_AUTO ? 1 << 1 : 0)
+ | (p->modulation != QAM_AUTO ? 1 << 0 : 0);
/* multiplex_valid_flags, low byte */
c->operand[6] = 0 << 7 /* NetworkID */
@@ -446,15 +447,15 @@ static int avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
c->operand[9] = 0x00;
c->operand[10] = 0x00;
- c->operand[11] = (((params->frequency / 4000) >> 16) & 0xff) | (2 << 6);
- c->operand[12] = ((params->frequency / 4000) >> 8) & 0xff;
- c->operand[13] = (params->frequency / 4000) & 0xff;
- c->operand[14] = ((params->u.qpsk.symbol_rate / 1000) >> 12) & 0xff;
- c->operand[15] = ((params->u.qpsk.symbol_rate / 1000) >> 4) & 0xff;
- c->operand[16] = ((params->u.qpsk.symbol_rate / 1000) << 4) & 0xf0;
+ c->operand[11] = (((p->frequency / 4000) >> 16) & 0xff) | (2 << 6);
+ c->operand[12] = ((p->frequency / 4000) >> 8) & 0xff;
+ c->operand[13] = (p->frequency / 4000) & 0xff;
+ c->operand[14] = ((p->symbol_rate / 1000) >> 12) & 0xff;
+ c->operand[15] = ((p->symbol_rate / 1000) >> 4) & 0xff;
+ c->operand[16] = ((p->symbol_rate / 1000) << 4) & 0xf0;
c->operand[17] = 0x00;
- switch (params->u.qpsk.fec_inner) {
+ switch (p->fec_inner) {
case FEC_1_2: c->operand[18] = 0x1; break;
case FEC_2_3: c->operand[18] = 0x2; break;
case FEC_3_4: c->operand[18] = 0x3; break;
@@ -466,7 +467,7 @@ static int avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
default: c->operand[18] = 0x0;
}
- switch (params->u.qam.modulation) {
+ switch (p->modulation) {
case QAM_16: c->operand[19] = 0x08; break;
case QAM_32: c->operand[19] = 0x10; break;
case QAM_64: c->operand[19] = 0x18; break;
@@ -483,9 +484,8 @@ static int avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
}
static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params)
+ struct dtv_frontend_properties *p)
{
- struct dvb_ofdm_parameters *ofdm = &params->u.ofdm;
struct avc_command_frame *c = (void *)fdtv->avc_data;
c->opcode = AVC_OPCODE_DSD;
@@ -500,42 +500,42 @@ static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
c->operand[5] =
0 << 7 /* reserved */
| 1 << 6 /* CenterFrequency */
- | (ofdm->bandwidth != BANDWIDTH_AUTO ? 1 << 5 : 0)
- | (ofdm->constellation != QAM_AUTO ? 1 << 4 : 0)
- | (ofdm->hierarchy_information != HIERARCHY_AUTO ? 1 << 3 : 0)
- | (ofdm->code_rate_HP != FEC_AUTO ? 1 << 2 : 0)
- | (ofdm->code_rate_LP != FEC_AUTO ? 1 << 1 : 0)
- | (ofdm->guard_interval != GUARD_INTERVAL_AUTO ? 1 << 0 : 0);
+ | (p->bandwidth_hz != 0 ? 1 << 5 : 0)
+ | (p->modulation != QAM_AUTO ? 1 << 4 : 0)
+ | (p->hierarchy != HIERARCHY_AUTO ? 1 << 3 : 0)
+ | (p->code_rate_HP != FEC_AUTO ? 1 << 2 : 0)
+ | (p->code_rate_LP != FEC_AUTO ? 1 << 1 : 0)
+ | (p->guard_interval != GUARD_INTERVAL_AUTO ? 1 << 0 : 0);
/* multiplex_valid_flags, low byte */
c->operand[6] =
0 << 7 /* NetworkID */
- | (ofdm->transmission_mode != TRANSMISSION_MODE_AUTO ? 1 << 6 : 0)
+ | (p->transmission_mode != TRANSMISSION_MODE_AUTO ? 1 << 6 : 0)
| 0 << 5 /* OtherFrequencyFlag */
| 0 << 0 /* reserved */ ;
c->operand[7] = 0x0;
- c->operand[8] = (params->frequency / 10) >> 24;
- c->operand[9] = ((params->frequency / 10) >> 16) & 0xff;
- c->operand[10] = ((params->frequency / 10) >> 8) & 0xff;
- c->operand[11] = (params->frequency / 10) & 0xff;
-
- switch (ofdm->bandwidth) {
- case BANDWIDTH_7_MHZ: c->operand[12] = 0x20; break;
- case BANDWIDTH_8_MHZ:
- case BANDWIDTH_6_MHZ: /* not defined by AVC spec */
- case BANDWIDTH_AUTO:
+ c->operand[8] = (p->frequency / 10) >> 24;
+ c->operand[9] = ((p->frequency / 10) >> 16) & 0xff;
+ c->operand[10] = ((p->frequency / 10) >> 8) & 0xff;
+ c->operand[11] = (p->frequency / 10) & 0xff;
+
+ switch (p->bandwidth_hz) {
+ case 7000000: c->operand[12] = 0x20; break;
+ case 8000000:
+ case 6000000: /* not defined by AVC spec */
+ case 0:
default: c->operand[12] = 0x00;
}
- switch (ofdm->constellation) {
+ switch (p->modulation) {
case QAM_16: c->operand[13] = 1 << 6; break;
case QAM_64: c->operand[13] = 2 << 6; break;
case QPSK:
default: c->operand[13] = 0x00;
}
- switch (ofdm->hierarchy_information) {
+ switch (p->hierarchy) {
case HIERARCHY_1: c->operand[13] |= 1 << 3; break;
case HIERARCHY_2: c->operand[13] |= 2 << 3; break;
case HIERARCHY_4: c->operand[13] |= 3 << 3; break;
@@ -544,7 +544,7 @@ static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
default: break;
}
- switch (ofdm->code_rate_HP) {
+ switch (p->code_rate_HP) {
case FEC_2_3: c->operand[13] |= 1; break;
case FEC_3_4: c->operand[13] |= 2; break;
case FEC_5_6: c->operand[13] |= 3; break;
@@ -553,7 +553,7 @@ static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
default: break;
}
- switch (ofdm->code_rate_LP) {
+ switch (p->code_rate_LP) {
case FEC_2_3: c->operand[14] = 1 << 5; break;
case FEC_3_4: c->operand[14] = 2 << 5; break;
case FEC_5_6: c->operand[14] = 3 << 5; break;
@@ -562,7 +562,7 @@ static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
default: c->operand[14] = 0x00; break;
}
- switch (ofdm->guard_interval) {
+ switch (p->guard_interval) {
case GUARD_INTERVAL_1_16: c->operand[14] |= 1 << 3; break;
case GUARD_INTERVAL_1_8: c->operand[14] |= 2 << 3; break;
case GUARD_INTERVAL_1_4: c->operand[14] |= 3 << 3; break;
@@ -571,7 +571,7 @@ static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
default: break;
}
- switch (ofdm->transmission_mode) {
+ switch (p->transmission_mode) {
case TRANSMISSION_MODE_8K: c->operand[14] |= 1 << 1; break;
case TRANSMISSION_MODE_2K:
case TRANSMISSION_MODE_AUTO:
@@ -585,7 +585,7 @@ static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
}
int avc_tuner_dsd(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params)
+ struct dtv_frontend_properties *p)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
int pos, ret;
@@ -597,9 +597,9 @@ int avc_tuner_dsd(struct firedtv *fdtv,
switch (fdtv->type) {
case FIREDTV_DVB_S:
- case FIREDTV_DVB_S2: pos = avc_tuner_tuneqpsk(fdtv, params); break;
- case FIREDTV_DVB_C: pos = avc_tuner_dsd_dvb_c(fdtv, params); break;
- case FIREDTV_DVB_T: pos = avc_tuner_dsd_dvb_t(fdtv, params); break;
+ case FIREDTV_DVB_S2: pos = avc_tuner_tuneqpsk(fdtv, p); break;
+ case FIREDTV_DVB_C: pos = avc_tuner_dsd_dvb_c(fdtv, p); break;
+ case FIREDTV_DVB_T: pos = avc_tuner_dsd_dvb_t(fdtv, p); break;
default:
BUG();
}
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
index fd8bbbfa5c59..eb7496eab130 100644
--- a/drivers/media/dvb/firewire/firedtv-dvb.c
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -203,7 +203,9 @@ int fdtv_dvb_register(struct firedtv *fdtv, const char *name)
if (err)
goto fail_rem_frontend;
- dvb_net_init(&fdtv->adapter, &fdtv->dvbnet, &fdtv->demux.dmx);
+ err = dvb_net_init(&fdtv->adapter, &fdtv->dvbnet, &fdtv->demux.dmx);
+ if (err)
+ goto fail_disconnect_frontend;
fdtv_frontend_init(fdtv, name);
err = dvb_register_frontend(&fdtv->adapter, &fdtv->fe);
@@ -218,6 +220,7 @@ int fdtv_dvb_register(struct firedtv *fdtv, const char *name)
fail_net_release:
dvb_net_release(&fdtv->dvbnet);
+fail_disconnect_frontend:
fdtv->demux.dmx.close(&fdtv->demux.dmx);
fail_rem_frontend:
fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
diff --git a/drivers/media/dvb/firewire/firedtv-fe.c b/drivers/media/dvb/firewire/firedtv-fe.c
index 8748a61be73d..6fe9793b98b3 100644
--- a/drivers/media/dvb/firewire/firedtv-fe.c
+++ b/drivers/media/dvb/firewire/firedtv-fe.c
@@ -141,28 +141,12 @@ static int fdtv_read_uncorrected_blocks(struct dvb_frontend *fe, u32 *ucblocks)
return -EOPNOTSUPP;
}
-static int fdtv_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int fdtv_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct firedtv *fdtv = fe->sec_priv;
- return avc_tuner_dsd(fdtv, params);
-}
-
-static int fdtv_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
-{
- return -EOPNOTSUPP;
-}
-
-static int fdtv_get_property(struct dvb_frontend *fe, struct dtv_property *tvp)
-{
- return 0;
-}
-
-static int fdtv_set_property(struct dvb_frontend *fe, struct dtv_property *tvp)
-{
- return 0;
+ return avc_tuner_dsd(fdtv, p);
}
void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
@@ -174,10 +158,6 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
ops->sleep = fdtv_sleep;
ops->set_frontend = fdtv_set_frontend;
- ops->get_frontend = fdtv_get_frontend;
-
- ops->get_property = fdtv_get_property;
- ops->set_property = fdtv_set_property;
ops->read_status = fdtv_read_status;
ops->read_ber = fdtv_read_ber;
@@ -192,7 +172,7 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
switch (fdtv->type) {
case FIREDTV_DVB_S:
- fi->type = FE_QPSK;
+ ops->delsys[0] = SYS_DVBS;
fi->frequency_min = 950000;
fi->frequency_max = 2150000;
@@ -211,7 +191,8 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
break;
case FIREDTV_DVB_S2:
- fi->type = FE_QPSK;
+ ops->delsys[0] = SYS_DVBS;
+ ops->delsys[1] = SYS_DVBS2;
fi->frequency_min = 950000;
fi->frequency_max = 2150000;
@@ -231,7 +212,7 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
break;
case FIREDTV_DVB_C:
- fi->type = FE_QAM;
+ ops->delsys[0] = SYS_DVBC_ANNEX_A;
fi->frequency_min = 47000000;
fi->frequency_max = 866000000;
@@ -249,7 +230,7 @@ void fdtv_frontend_init(struct firedtv *fdtv, const char *name)
break;
case FIREDTV_DVB_T:
- fi->type = FE_OFDM;
+ ops->delsys[0] = SYS_DVBT;
fi->frequency_min = 49000000;
fi->frequency_max = 861000000;
diff --git a/drivers/media/dvb/firewire/firedtv.h b/drivers/media/dvb/firewire/firedtv.h
index bd00b04e079d..4fdcd8cb7530 100644
--- a/drivers/media/dvb/firewire/firedtv.h
+++ b/drivers/media/dvb/firewire/firedtv.h
@@ -112,8 +112,8 @@ struct firedtv {
/* firedtv-avc.c */
int avc_recv(struct firedtv *fdtv, void *data, size_t length);
int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat);
-struct dvb_frontend_parameters;
-int avc_tuner_dsd(struct firedtv *fdtv, struct dvb_frontend_parameters *params);
+struct dtv_frontend_properties;
+int avc_tuner_dsd(struct firedtv *fdtv, struct dtv_frontend_properties *params);
int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[]);
int avc_tuner_get_ts(struct firedtv *fdtv);
int avc_identify_subunit(struct firedtv *fdtv);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 4a2d2e6c91ab..ebb5ed7a7783 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -404,6 +404,13 @@ config DVB_EC100
help
Say Y when you want to support this frontend.
+config DVB_HD29L2
+ tristate "HDIC HD29L2"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Say Y when you want to support this frontend.
+
config DVB_STV0367
tristate "ST STV0367 based"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index f639f6781551..00a20636df62 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_DVB_STV090x) += stv090x.o
obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
+obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index 345311c33383..6bcbcf543b38 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -2,6 +2,7 @@
* Afatech AF9013 demodulator driver
*
* Copyright (C) 2007 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
* Thanks to Afatech who kindly provided information.
*
@@ -21,25 +22,15 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-
-#include "dvb_frontend.h"
#include "af9013_priv.h"
-#include "af9013.h"
int af9013_debug;
+module_param_named(debug, af9013_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
struct af9013_state {
struct i2c_adapter *i2c;
- struct dvb_frontend frontend;
-
+ struct dvb_frontend fe;
struct af9013_config config;
/* tuner/demod RF and IF AGC limits used for signal strength calc */
@@ -48,107 +39,178 @@ struct af9013_state {
u32 ber;
u32 ucblocks;
u16 snr;
- u32 frequency;
- unsigned long next_statistics_check;
+ u32 bandwidth_hz;
+ fe_status_t fe_status;
+ unsigned long set_frontend_jiffies;
+ unsigned long read_status_jiffies;
+ bool first_tune;
+ bool i2c_gate_state;
+ unsigned int statistics_step:3;
+ struct delayed_work statistics_work;
};
-static u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
-
-static int af9013_write_regs(struct af9013_state *state, u8 mbox, u16 reg,
- u8 *val, u8 len)
+/* write multiple registers */
+static int af9013_wr_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg,
+ const u8 *val, int len)
{
+ int ret;
u8 buf[3+len];
- struct i2c_msg msg = {
- .addr = state->config.demod_address,
- .flags = 0,
- .len = sizeof(buf),
- .buf = buf };
-
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->config.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
buf[2] = mbox;
memcpy(&buf[3], val, len);
- if (i2c_transfer(state->i2c, &msg, 1) != 1) {
- warn("I2C write failed reg:%04x len:%d", reg, len);
- return -EREMOTEIO;
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c wr failed=%d reg=%04x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
}
- return 0;
+ return ret;
}
-static int af9013_write_ofdm_regs(struct af9013_state *state, u16 reg, u8 *val,
- u8 len)
+/* read multiple registers */
+static int af9013_rd_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg,
+ u8 *val, int len)
{
- u8 mbox = (1 << 0)|(1 << 1)|((len - 1) << 2)|(0 << 6)|(0 << 7);
- return af9013_write_regs(state, mbox, reg, val, len);
+ int ret;
+ u8 buf[3];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->config.i2c_addr,
+ .flags = 0,
+ .len = 3,
+ .buf = buf,
+ }, {
+ .addr = priv->config.i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
+ buf[2] = mbox;
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ ret = 0;
+ } else {
+ warn("i2c rd failed=%d reg=%04x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+ return ret;
}
-static int af9013_write_ofsm_regs(struct af9013_state *state, u16 reg, u8 *val,
- u8 len)
+/* write multiple registers */
+static int af9013_wr_regs(struct af9013_state *priv, u16 reg, const u8 *val,
+ int len)
+{
+ int ret, i;
+ u8 mbox = (0 << 7)|(0 << 6)|(1 << 1)|(1 << 0);
+
+ if ((priv->config.ts_mode == AF9013_TS_USB) &&
+ ((reg & 0xff00) != 0xff00) && ((reg & 0xff00) != 0xae00)) {
+ mbox |= ((len - 1) << 2);
+ ret = af9013_wr_regs_i2c(priv, mbox, reg, val, len);
+ } else {
+ for (i = 0; i < len; i++) {
+ ret = af9013_wr_regs_i2c(priv, mbox, reg+i, val+i, 1);
+ if (ret)
+ goto err;
+ }
+ }
+
+err:
+ return 0;
+}
+
+/* read multiple registers */
+static int af9013_rd_regs(struct af9013_state *priv, u16 reg, u8 *val, int len)
{
- u8 mbox = (1 << 0)|(1 << 1)|((len - 1) << 2)|(1 << 6)|(1 << 7);
- return af9013_write_regs(state, mbox, reg, val, len);
+ int ret, i;
+ u8 mbox = (0 << 7)|(0 << 6)|(1 << 1)|(0 << 0);
+
+ if ((priv->config.ts_mode == AF9013_TS_USB) &&
+ ((reg & 0xff00) != 0xff00) && ((reg & 0xff00) != 0xae00)) {
+ mbox |= ((len - 1) << 2);
+ ret = af9013_rd_regs_i2c(priv, mbox, reg, val, len);
+ } else {
+ for (i = 0; i < len; i++) {
+ ret = af9013_rd_regs_i2c(priv, mbox, reg+i, val+i, 1);
+ if (ret)
+ goto err;
+ }
+ }
+
+err:
+ return 0;
}
/* write single register */
-static int af9013_write_reg(struct af9013_state *state, u16 reg, u8 val)
+static int af9013_wr_reg(struct af9013_state *priv, u16 reg, u8 val)
{
- return af9013_write_ofdm_regs(state, reg, &val, 1);
+ return af9013_wr_regs(priv, reg, &val, 1);
}
/* read single register */
-static int af9013_read_reg(struct af9013_state *state, u16 reg, u8 *val)
+static int af9013_rd_reg(struct af9013_state *priv, u16 reg, u8 *val)
{
- u8 obuf[3] = { reg >> 8, reg & 0xff, 0 };
- u8 ibuf[1];
- struct i2c_msg msg[2] = {
- {
- .addr = state->config.demod_address,
- .flags = 0,
- .len = sizeof(obuf),
- .buf = obuf
- }, {
- .addr = state->config.demod_address,
- .flags = I2C_M_RD,
- .len = sizeof(ibuf),
- .buf = ibuf
- }
- };
+ return af9013_rd_regs(priv, reg, val, 1);
+}
- if (i2c_transfer(state->i2c, msg, 2) != 2) {
- warn("I2C read failed reg:%04x", reg);
- return -EREMOTEIO;
- }
- *val = ibuf[0];
- return 0;
+static int af9013_write_ofsm_regs(struct af9013_state *state, u16 reg, u8 *val,
+ u8 len)
+{
+ u8 mbox = (1 << 7)|(1 << 6)|((len - 1) << 2)|(1 << 1)|(1 << 0);
+ return af9013_wr_regs_i2c(state, mbox, reg, val, len);
}
-static int af9013_write_reg_bits(struct af9013_state *state, u16 reg, u8 pos,
- u8 len, u8 val)
+static int af9013_wr_reg_bits(struct af9013_state *state, u16 reg, int pos,
+ int len, u8 val)
{
int ret;
u8 tmp, mask;
- ret = af9013_read_reg(state, reg, &tmp);
- if (ret)
- return ret;
+ /* no need for read if whole reg is written */
+ if (len != 8) {
+ ret = af9013_rd_reg(state, reg, &tmp);
+ if (ret)
+ return ret;
- mask = regmask[len - 1] << pos;
- tmp = (tmp & ~mask) | ((val << pos) & mask);
+ mask = (0xff >> (8 - len)) << pos;
+ val <<= pos;
+ tmp &= ~mask;
+ val |= tmp;
+ }
- return af9013_write_reg(state, reg, tmp);
+ return af9013_wr_reg(state, reg, val);
}
-static int af9013_read_reg_bits(struct af9013_state *state, u16 reg, u8 pos,
- u8 len, u8 *val)
+static int af9013_rd_reg_bits(struct af9013_state *state, u16 reg, int pos,
+ int len, u8 *val)
{
int ret;
u8 tmp;
- ret = af9013_read_reg(state, reg, &tmp);
+ ret = af9013_rd_reg(state, reg, &tmp);
if (ret)
return ret;
- *val = (tmp >> pos) & regmask[len - 1];
+
+ *val = (tmp >> pos);
+ *val &= (0xff >> (8 - len));
+
return 0;
}
@@ -157,10 +219,13 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
int ret;
u8 pos;
u16 addr;
- deb_info("%s: gpio:%d gpioval:%02x\n", __func__, gpio, gpioval);
-/* GPIO0 & GPIO1 0xd735
- GPIO2 & GPIO3 0xd736 */
+ dbg("%s: gpio=%d gpioval=%02x", __func__, gpio, gpioval);
+
+ /*
+ * GPIO0 & GPIO1 0xd735
+ * GPIO2 & GPIO3 0xd736
+ */
switch (gpio) {
case 0:
@@ -175,7 +240,7 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
default:
err("invalid gpio:%d\n", gpio);
ret = -EINVAL;
- goto error;
+ goto err;
};
switch (gpio) {
@@ -190,16 +255,21 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
break;
};
- ret = af9013_write_reg_bits(state, addr, pos, 4, gpioval);
+ ret = af9013_wr_reg_bits(state, addr, pos, 4, gpioval);
+ if (ret)
+ goto err;
-error:
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
static u32 af913_div(u32 a, u32 b, u32 x)
{
u32 r = 0, c = 0, i;
- deb_info("%s: a:%d b:%d x:%d\n", __func__, a, b, x);
+
+ dbg("%s: a=%d b=%d x=%d", __func__, a, b, x);
if (a > b) {
c = a / b;
@@ -216,205 +286,407 @@ static u32 af913_div(u32 a, u32 b, u32 x)
}
r = (c << (u32)x) + r;
- deb_info("%s: a:%d b:%d x:%d r:%d r:%x\n", __func__, a, b, x, r, r);
+ dbg("%s: a=%d b=%d x=%d r=%x", __func__, a, b, x, r);
return r;
}
-static int af9013_set_coeff(struct af9013_state *state, fe_bandwidth_t bw)
+static int af9013_power_ctrl(struct af9013_state *state, u8 onoff)
{
- int ret, i, j, found;
- deb_info("%s: adc_clock:%d bw:%d\n", __func__,
- state->config.adc_clock, bw);
-
- /* lookup coeff from table */
- for (i = 0, found = 0; i < ARRAY_SIZE(coeff_table); i++) {
- if (coeff_table[i].adc_clock == state->config.adc_clock &&
- coeff_table[i].bw == bw) {
- found = 1;
- break;
- }
- }
+ int ret, i;
+ u8 tmp;
- if (!found) {
- err("invalid bw or clock");
- ret = -EINVAL;
- goto error;
+ dbg("%s: onoff=%d", __func__, onoff);
+
+ /* enable reset */
+ ret = af9013_wr_reg_bits(state, 0xd417, 4, 1, 1);
+ if (ret)
+ goto err;
+
+ /* start reset mechanism */
+ ret = af9013_wr_reg(state, 0xaeff, 1);
+ if (ret)
+ goto err;
+
+ /* wait reset performs */
+ for (i = 0; i < 150; i++) {
+ ret = af9013_rd_reg_bits(state, 0xd417, 1, 1, &tmp);
+ if (ret)
+ goto err;
+
+ if (tmp)
+ break; /* reset done */
+
+ usleep_range(5000, 25000);
}
- deb_info("%s: coeff: ", __func__);
- debug_dump(coeff_table[i].val, sizeof(coeff_table[i].val), deb_info);
+ if (!tmp)
+ return -ETIMEDOUT;
- /* program */
- for (j = 0; j < sizeof(coeff_table[i].val); j++) {
- ret = af9013_write_reg(state, 0xae00 + j,
- coeff_table[i].val[j]);
+ if (onoff) {
+ /* clear reset */
+ ret = af9013_wr_reg_bits(state, 0xd417, 1, 1, 0);
if (ret)
- break;
+ goto err;
+
+ /* disable reset */
+ ret = af9013_wr_reg_bits(state, 0xd417, 4, 1, 0);
+
+ /* power on */
+ ret = af9013_wr_reg_bits(state, 0xd73a, 3, 1, 0);
+ } else {
+ /* power off */
+ ret = af9013_wr_reg_bits(state, 0xd73a, 3, 1, 1);
}
-error:
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int af9013_statistics_ber_unc_start(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ int ret;
+
+ dbg("%s", __func__);
+
+ /* reset and start BER counter */
+ ret = af9013_wr_reg_bits(state, 0xd391, 4, 1, 1);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
-static int af9013_set_adc_ctrl(struct af9013_state *state)
+static int af9013_statistics_ber_unc_result(struct dvb_frontend *fe)
{
+ struct af9013_state *state = fe->demodulator_priv;
int ret;
- u8 buf[3], tmp, i;
- u32 adc_cw;
+ u8 buf[5];
- deb_info("%s: adc_clock:%d\n", __func__, state->config.adc_clock);
+ dbg("%s", __func__);
- /* adc frequency type */
- switch (state->config.adc_clock) {
- case 28800: /* 28.800 MHz */
- tmp = 0;
- break;
- case 20480: /* 20.480 MHz */
- tmp = 1;
+ /* check if error bit count is ready */
+ ret = af9013_rd_reg_bits(state, 0xd391, 4, 1, &buf[0]);
+ if (ret)
+ goto err;
+
+ if (!buf[0]) {
+ dbg("%s: not ready", __func__);
+ return 0;
+ }
+
+ ret = af9013_rd_regs(state, 0xd387, buf, 5);
+ if (ret)
+ goto err;
+
+ state->ber = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ state->ucblocks += (buf[4] << 8) | buf[3];
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int af9013_statistics_snr_start(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ int ret;
+
+ dbg("%s", __func__);
+
+ /* start SNR meas */
+ ret = af9013_wr_reg_bits(state, 0xd2e1, 3, 1, 1);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int af9013_statistics_snr_result(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ int ret, i, len;
+ u8 buf[3], tmp;
+ u32 snr_val;
+ const struct af9013_snr *uninitialized_var(snr_lut);
+
+ dbg("%s", __func__);
+
+ /* check if SNR ready */
+ ret = af9013_rd_reg_bits(state, 0xd2e1, 3, 1, &tmp);
+ if (ret)
+ goto err;
+
+ if (!tmp) {
+ dbg("%s: not ready", __func__);
+ return 0;
+ }
+
+ /* read value */
+ ret = af9013_rd_regs(state, 0xd2e3, buf, 3);
+ if (ret)
+ goto err;
+
+ snr_val = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ /* read current modulation */
+ ret = af9013_rd_reg(state, 0xd3c1, &tmp);
+ if (ret)
+ goto err;
+
+ switch ((tmp >> 6) & 3) {
+ case 0:
+ len = ARRAY_SIZE(qpsk_snr_lut);
+ snr_lut = qpsk_snr_lut;
break;
- case 28000: /* 28.000 MHz */
- tmp = 2;
+ case 1:
+ len = ARRAY_SIZE(qam16_snr_lut);
+ snr_lut = qam16_snr_lut;
break;
- case 25000: /* 25.000 MHz */
- tmp = 3;
+ case 2:
+ len = ARRAY_SIZE(qam64_snr_lut);
+ snr_lut = qam64_snr_lut;
break;
default:
- err("invalid xtal");
- return -EINVAL;
+ goto err;
+ break;
}
- adc_cw = af913_div(state->config.adc_clock*1000, 1000000ul, 19ul);
+ for (i = 0; i < len; i++) {
+ tmp = snr_lut[i].snr;
- buf[0] = (u8) ((adc_cw & 0x000000ff));
- buf[1] = (u8) ((adc_cw & 0x0000ff00) >> 8);
- buf[2] = (u8) ((adc_cw & 0x00ff0000) >> 16);
+ if (snr_val < snr_lut[i].val)
+ break;
+ }
+ state->snr = tmp * 10; /* dB/10 */
- deb_info("%s: adc_cw:", __func__);
- debug_dump(buf, sizeof(buf), deb_info);
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int af9013_statistics_signal_strength(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ int ret = 0;
+ u8 buf[2], rf_gain, if_gain;
+ int signal_strength;
+
+ dbg("%s", __func__);
+
+ if (!state->signal_strength_en)
+ return 0;
+
+ ret = af9013_rd_regs(state, 0xd07c, buf, 2);
+ if (ret)
+ goto err;
+
+ rf_gain = buf[0];
+ if_gain = buf[1];
+
+ signal_strength = (0xffff / \
+ (9 * (state->rf_50 + state->if_50) - \
+ 11 * (state->rf_80 + state->if_80))) * \
+ (10 * (rf_gain + if_gain) - \
+ 11 * (state->rf_80 + state->if_80));
+ if (signal_strength < 0)
+ signal_strength = 0;
+ else if (signal_strength > 0xffff)
+ signal_strength = 0xffff;
+
+ state->signal_strength = signal_strength;
- /* program */
- for (i = 0; i < sizeof(buf); i++) {
- ret = af9013_write_reg(state, 0xd180 + i, buf[i]);
- if (ret)
- goto error;
- }
- ret = af9013_write_reg_bits(state, 0x9bd2, 0, 4, tmp);
-error:
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
-static int af9013_set_freq_ctrl(struct af9013_state *state, fe_bandwidth_t bw)
+static void af9013_statistics_work(struct work_struct *work)
{
int ret;
- u16 addr;
- u8 buf[3], i, j;
- u32 adc_freq, freq_cw;
- s8 bfs_spec_inv;
- int if_sample_freq;
-
- for (j = 0; j < 3; j++) {
- if (j == 0) {
- addr = 0xd140; /* fcw normal */
- bfs_spec_inv = state->config.rf_spec_inv ? -1 : 1;
- } else if (j == 1) {
- addr = 0x9be7; /* fcw dummy ram */
- bfs_spec_inv = state->config.rf_spec_inv ? -1 : 1;
- } else {
- addr = 0x9bea; /* fcw inverted */
- bfs_spec_inv = state->config.rf_spec_inv ? 1 : -1;
- }
+ struct af9013_state *state = container_of(work,
+ struct af9013_state, statistics_work.work);
+ unsigned int next_msec;
+
+ /* update only signal strength when demod is not locked */
+ if (!(state->fe_status & FE_HAS_LOCK)) {
+ state->statistics_step = 0;
+ state->ber = 0;
+ state->snr = 0;
+ }
+
+ switch (state->statistics_step) {
+ default:
+ state->statistics_step = 0;
+ case 0:
+ ret = af9013_statistics_signal_strength(&state->fe);
+ state->statistics_step++;
+ next_msec = 300;
+ break;
+ case 1:
+ ret = af9013_statistics_snr_start(&state->fe);
+ state->statistics_step++;
+ next_msec = 200;
+ break;
+ case 2:
+ ret = af9013_statistics_ber_unc_start(&state->fe);
+ state->statistics_step++;
+ next_msec = 1000;
+ break;
+ case 3:
+ ret = af9013_statistics_snr_result(&state->fe);
+ state->statistics_step++;
+ next_msec = 400;
+ break;
+ case 4:
+ ret = af9013_statistics_ber_unc_result(&state->fe);
+ state->statistics_step++;
+ next_msec = 100;
+ break;
+ }
- adc_freq = state->config.adc_clock * 1000;
- if_sample_freq = state->config.tuner_if * 1000;
+ schedule_delayed_work(&state->statistics_work,
+ msecs_to_jiffies(next_msec));
- /* TDA18271 uses different sampling freq for every bw */
- if (state->config.tuner == AF9013_TUNER_TDA18271) {
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- if_sample_freq = 3300000; /* 3.3 MHz */
- break;
- case BANDWIDTH_7_MHZ:
- if_sample_freq = 3500000; /* 3.5 MHz */
- break;
- case BANDWIDTH_8_MHZ:
- default:
- if_sample_freq = 4000000; /* 4.0 MHz */
- break;
- }
- } else if (state->config.tuner == AF9013_TUNER_TDA18218) {
- switch (bw) {
- case BANDWIDTH_6_MHZ:
- if_sample_freq = 3000000; /* 3 MHz */
- break;
- case BANDWIDTH_7_MHZ:
- if_sample_freq = 3500000; /* 3.5 MHz */
- break;
- case BANDWIDTH_8_MHZ:
- default:
- if_sample_freq = 4000000; /* 4 MHz */
+ return;
+}
+
+static int af9013_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *fesettings)
+{
+ fesettings->min_delay_ms = 800;
+ fesettings->step_size = 0;
+ fesettings->max_drift = 0;
+
+ return 0;
+}
+
+static int af9013_set_frontend(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, sampling_freq;
+ bool auto_mode, spec_inv;
+ u8 buf[6];
+ u32 if_frequency, freq_cw;
+
+ dbg("%s: frequency=%d bandwidth_hz=%d", __func__,
+ c->frequency, c->bandwidth_hz);
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+
+ /* program CFOE coefficients */
+ if (c->bandwidth_hz != state->bandwidth_hz) {
+ for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
+ if (coeff_lut[i].clock == state->config.clock &&
+ coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
break;
}
}
- while (if_sample_freq > (adc_freq / 2))
- if_sample_freq = if_sample_freq - adc_freq;
+ ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
+ sizeof(coeff_lut[i].val));
+ }
- if (if_sample_freq >= 0)
- bfs_spec_inv = bfs_spec_inv * (-1);
+ /* program frequency control */
+ if (c->bandwidth_hz != state->bandwidth_hz || state->first_tune) {
+ /* get used IF frequency */
+ if (fe->ops.tuner_ops.get_if_frequency)
+ fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
else
- if_sample_freq = if_sample_freq * (-1);
+ if_frequency = state->config.if_frequency;
+
+ sampling_freq = if_frequency;
- freq_cw = af913_div(if_sample_freq, adc_freq, 23ul);
+ while (sampling_freq > (state->config.clock / 2))
+ sampling_freq -= state->config.clock;
- if (bfs_spec_inv == -1)
- freq_cw = 0x00800000 - freq_cw;
+ if (sampling_freq < 0) {
+ sampling_freq *= -1;
+ spec_inv = state->config.spec_inv;
+ } else {
+ spec_inv = !state->config.spec_inv;
+ }
- buf[0] = (u8) ((freq_cw & 0x000000ff));
- buf[1] = (u8) ((freq_cw & 0x0000ff00) >> 8);
- buf[2] = (u8) ((freq_cw & 0x007f0000) >> 16);
+ freq_cw = af913_div(sampling_freq, state->config.clock, 23);
+ if (spec_inv)
+ freq_cw = 0x800000 - freq_cw;
- deb_info("%s: freq_cw:", __func__);
- debug_dump(buf, sizeof(buf), deb_info);
+ buf[0] = (freq_cw >> 0) & 0xff;
+ buf[1] = (freq_cw >> 8) & 0xff;
+ buf[2] = (freq_cw >> 16) & 0x7f;
- /* program */
- for (i = 0; i < sizeof(buf); i++) {
- ret = af9013_write_reg(state, addr++, buf[i]);
- if (ret)
- goto error;
- }
+ freq_cw = 0x800000 - freq_cw;
+
+ buf[3] = (freq_cw >> 0) & 0xff;
+ buf[4] = (freq_cw >> 8) & 0xff;
+ buf[5] = (freq_cw >> 16) & 0x7f;
+
+ ret = af9013_wr_regs(state, 0xd140, buf, 3);
+ if (ret)
+ goto err;
+
+ ret = af9013_wr_regs(state, 0x9be7, buf, 6);
+ if (ret)
+ goto err;
}
-error:
- return ret;
-}
-static int af9013_set_ofdm_params(struct af9013_state *state,
- struct dvb_ofdm_parameters *params, u8 *auto_mode)
-{
- int ret;
- u8 i, buf[3] = {0, 0, 0};
- *auto_mode = 0; /* set if parameters are requested to auto set */
+ /* clear TPS lock flag */
+ ret = af9013_wr_reg_bits(state, 0xd330, 3, 1, 1);
+ if (ret)
+ goto err;
+
+ /* clear MPEG2 lock flag */
+ ret = af9013_wr_reg_bits(state, 0xd507, 6, 1, 0);
+ if (ret)
+ goto err;
+
+ /* empty channel function */
+ ret = af9013_wr_reg_bits(state, 0x9bfe, 0, 1, 0);
+ if (ret)
+ goto err;
- /* Try auto-detect transmission parameters in case of AUTO requested or
- garbage parameters given by application for compatibility.
- MPlayer seems to provide garbage parameters currently. */
+ /* empty DVB-T channel function */
+ ret = af9013_wr_reg_bits(state, 0x9bc2, 0, 1, 0);
+ if (ret)
+ goto err;
+
+ /* transmission parameters */
+ auto_mode = false;
+ memset(buf, 0, 3);
- switch (params->transmission_mode) {
+ switch (c->transmission_mode) {
case TRANSMISSION_MODE_AUTO:
- *auto_mode = 1;
+ auto_mode = 1;
+ break;
case TRANSMISSION_MODE_2K:
break;
case TRANSMISSION_MODE_8K:
buf[0] |= (1 << 0);
break;
default:
- deb_info("%s: invalid transmission_mode\n", __func__);
- *auto_mode = 1;
+ dbg("%s: invalid transmission_mode", __func__);
+ auto_mode = 1;
}
- switch (params->guard_interval) {
+ switch (c->guard_interval) {
case GUARD_INTERVAL_AUTO:
- *auto_mode = 1;
+ auto_mode = 1;
+ break;
case GUARD_INTERVAL_1_32:
break;
case GUARD_INTERVAL_1_16:
@@ -427,13 +699,14 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
buf[0] |= (3 << 2);
break;
default:
- deb_info("%s: invalid guard_interval\n", __func__);
- *auto_mode = 1;
+ dbg("%s: invalid guard_interval", __func__);
+ auto_mode = 1;
}
- switch (params->hierarchy_information) {
+ switch (c->hierarchy) {
case HIERARCHY_AUTO:
- *auto_mode = 1;
+ auto_mode = 1;
+ break;
case HIERARCHY_NONE:
break;
case HIERARCHY_1:
@@ -446,13 +719,14 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
buf[0] |= (3 << 4);
break;
default:
- deb_info("%s: invalid hierarchy_information\n", __func__);
- *auto_mode = 1;
+ dbg("%s: invalid hierarchy", __func__);
+ auto_mode = 1;
};
- switch (params->constellation) {
+ switch (c->modulation) {
case QAM_AUTO:
- *auto_mode = 1;
+ auto_mode = 1;
+ break;
case QPSK:
break;
case QAM_16:
@@ -462,16 +736,17 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
buf[1] |= (2 << 6);
break;
default:
- deb_info("%s: invalid constellation\n", __func__);
- *auto_mode = 1;
+ dbg("%s: invalid modulation", __func__);
+ auto_mode = 1;
}
/* Use HP. How and which case we can switch to LP? */
buf[1] |= (1 << 4);
- switch (params->code_rate_HP) {
+ switch (c->code_rate_HP) {
case FEC_AUTO:
- *auto_mode = 1;
+ auto_mode = 1;
+ break;
case FEC_1_2:
break;
case FEC_2_3:
@@ -487,16 +762,14 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
buf[2] |= (4 << 0);
break;
default:
- deb_info("%s: invalid code_rate_HP\n", __func__);
- *auto_mode = 1;
+ dbg("%s: invalid code_rate_HP", __func__);
+ auto_mode = 1;
}
- switch (params->code_rate_LP) {
+ switch (c->code_rate_LP) {
case FEC_AUTO:
- /* if HIERARCHY_NONE and FEC_NONE then LP FEC is set to FEC_AUTO
- by dvb_frontend.c for compatibility */
- if (params->hierarchy_information != HIERARCHY_NONE)
- *auto_mode = 1;
+ auto_mode = 1;
+ break;
case FEC_1_2:
break;
case FEC_2_3:
@@ -512,709 +785,373 @@ static int af9013_set_ofdm_params(struct af9013_state *state,
buf[2] |= (4 << 3);
break;
case FEC_NONE:
- if (params->hierarchy_information == HIERARCHY_AUTO)
- break;
+ break;
default:
- deb_info("%s: invalid code_rate_LP\n", __func__);
- *auto_mode = 1;
+ dbg("%s: invalid code_rate_LP", __func__);
+ auto_mode = 1;
}
- switch (params->bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (c->bandwidth_hz) {
+ case 6000000:
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
buf[1] |= (1 << 2);
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
buf[1] |= (2 << 2);
break;
default:
- deb_info("%s: invalid bandwidth\n", __func__);
- buf[1] |= (2 << 2); /* cannot auto-detect BW, try 8 MHz */
- }
-
- /* program */
- for (i = 0; i < sizeof(buf); i++) {
- ret = af9013_write_reg(state, 0xd3c0 + i, buf[i]);
- if (ret)
- break;
+ dbg("%s: invalid bandwidth_hz", __func__);
+ ret = -EINVAL;
+ goto err;
}
- return ret;
-}
-
-static int af9013_reset(struct af9013_state *state, u8 sleep)
-{
- int ret;
- u8 tmp, i;
- deb_info("%s\n", __func__);
-
- /* enable OFDM reset */
- ret = af9013_write_reg_bits(state, 0xd417, 4, 1, 1);
- if (ret)
- goto error;
-
- /* start reset mechanism */
- ret = af9013_write_reg(state, 0xaeff, 1);
+ ret = af9013_wr_regs(state, 0xd3c0, buf, 3);
if (ret)
- goto error;
+ goto err;
- /* reset is done when bit 1 is set */
- for (i = 0; i < 150; i++) {
- ret = af9013_read_reg_bits(state, 0xd417, 1, 1, &tmp);
- if (ret)
- goto error;
- if (tmp)
- break; /* reset done */
- msleep(10);
- }
- if (!tmp)
- return -ETIMEDOUT;
-
- /* don't clear reset when going to sleep */
- if (!sleep) {
- /* clear OFDM reset */
- ret = af9013_write_reg_bits(state, 0xd417, 1, 1, 0);
+ if (auto_mode) {
+ /* clear easy mode flag */
+ ret = af9013_wr_reg(state, 0xaefd, 0);
if (ret)
- goto error;
-
- /* disable OFDM reset */
- ret = af9013_write_reg_bits(state, 0xd417, 4, 1, 0);
- }
-error:
- return ret;
-}
-
-static int af9013_power_ctrl(struct af9013_state *state, u8 onoff)
-{
- int ret;
- deb_info("%s: onoff:%d\n", __func__, onoff);
+ goto err;
- if (onoff) {
- /* power on */
- ret = af9013_write_reg_bits(state, 0xd73a, 3, 1, 0);
- if (ret)
- goto error;
- ret = af9013_write_reg_bits(state, 0xd417, 1, 1, 0);
- if (ret)
- goto error;
- ret = af9013_write_reg_bits(state, 0xd417, 4, 1, 0);
+ dbg("%s: auto params", __func__);
} else {
- /* power off */
- ret = af9013_reset(state, 1);
+ /* set easy mode flag */
+ ret = af9013_wr_reg(state, 0xaefd, 1);
if (ret)
- goto error;
- ret = af9013_write_reg_bits(state, 0xd73a, 3, 1, 1);
- }
-error:
- return ret;
-}
-
-static int af9013_lock_led(struct af9013_state *state, u8 onoff)
-{
- deb_info("%s: onoff:%d\n", __func__, onoff);
-
- return af9013_write_reg_bits(state, 0xd730, 0, 1, onoff);
-}
-
-static int af9013_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
-{
- struct af9013_state *state = fe->demodulator_priv;
- int ret;
- u8 auto_mode; /* auto set TPS */
+ goto err;
- deb_info("%s: freq:%d bw:%d\n", __func__, params->frequency,
- params->u.ofdm.bandwidth);
-
- state->frequency = params->frequency;
-
- /* program tuner */
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, params);
-
- /* program CFOE coefficients */
- ret = af9013_set_coeff(state, params->u.ofdm.bandwidth);
- if (ret)
- goto error;
-
- /* program frequency control */
- ret = af9013_set_freq_ctrl(state, params->u.ofdm.bandwidth);
- if (ret)
- goto error;
-
- /* clear TPS lock flag (inverted flag) */
- ret = af9013_write_reg_bits(state, 0xd330, 3, 1, 1);
- if (ret)
- goto error;
-
- /* clear MPEG2 lock flag */
- ret = af9013_write_reg_bits(state, 0xd507, 6, 1, 0);
- if (ret)
- goto error;
-
- /* empty channel function */
- ret = af9013_write_reg_bits(state, 0x9bfe, 0, 1, 0);
- if (ret)
- goto error;
-
- /* empty DVB-T channel function */
- ret = af9013_write_reg_bits(state, 0x9bc2, 0, 1, 0);
- if (ret)
- goto error;
-
- /* program TPS and bandwidth, check if auto mode needed */
- ret = af9013_set_ofdm_params(state, &params->u.ofdm, &auto_mode);
- if (ret)
- goto error;
-
- if (auto_mode) {
- /* clear easy mode flag */
- ret = af9013_write_reg(state, 0xaefd, 0);
- deb_info("%s: auto TPS\n", __func__);
- } else {
- /* set easy mode flag */
- ret = af9013_write_reg(state, 0xaefd, 1);
+ ret = af9013_wr_reg(state, 0xaefe, 0);
if (ret)
- goto error;
- ret = af9013_write_reg(state, 0xaefe, 0);
- deb_info("%s: manual TPS\n", __func__);
+ goto err;
+
+ dbg("%s: manual params", __func__);
}
- if (ret)
- goto error;
- /* everything is set, lets try to receive channel - OFSM GO! */
- ret = af9013_write_reg(state, 0xffff, 0);
+ /* tune */
+ ret = af9013_wr_reg(state, 0xffff, 0);
if (ret)
- goto error;
+ goto err;
+
+ state->bandwidth_hz = c->bandwidth_hz;
+ state->set_frontend_jiffies = jiffies;
+ state->first_tune = false;
-error:
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
-static int af9013_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int af9013_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct af9013_state *state = fe->demodulator_priv;
int ret;
- u8 i, buf[3];
- deb_info("%s\n", __func__);
+ u8 buf[3];
- /* read TPS registers */
- for (i = 0; i < 3; i++) {
- ret = af9013_read_reg(state, 0xd3c0 + i, &buf[i]);
- if (ret)
- goto error;
- }
+ dbg("%s", __func__);
+
+ ret = af9013_rd_regs(state, 0xd3c0, buf, 3);
+ if (ret)
+ goto err;
switch ((buf[1] >> 6) & 3) {
case 0:
- p->u.ofdm.constellation = QPSK;
+ c->modulation = QPSK;
break;
case 1:
- p->u.ofdm.constellation = QAM_16;
+ c->modulation = QAM_16;
break;
case 2:
- p->u.ofdm.constellation = QAM_64;
+ c->modulation = QAM_64;
break;
}
switch ((buf[0] >> 0) & 3) {
case 0:
- p->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ c->transmission_mode = TRANSMISSION_MODE_2K;
break;
case 1:
- p->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ c->transmission_mode = TRANSMISSION_MODE_8K;
}
switch ((buf[0] >> 2) & 3) {
case 0:
- p->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ c->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
- p->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ c->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
- p->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ c->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- p->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ c->guard_interval = GUARD_INTERVAL_1_4;
break;
}
switch ((buf[0] >> 4) & 7) {
case 0:
- p->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ c->hierarchy = HIERARCHY_NONE;
break;
case 1:
- p->u.ofdm.hierarchy_information = HIERARCHY_1;
+ c->hierarchy = HIERARCHY_1;
break;
case 2:
- p->u.ofdm.hierarchy_information = HIERARCHY_2;
+ c->hierarchy = HIERARCHY_2;
break;
case 3:
- p->u.ofdm.hierarchy_information = HIERARCHY_4;
+ c->hierarchy = HIERARCHY_4;
break;
}
switch ((buf[2] >> 0) & 7) {
case 0:
- p->u.ofdm.code_rate_HP = FEC_1_2;
+ c->code_rate_HP = FEC_1_2;
break;
case 1:
- p->u.ofdm.code_rate_HP = FEC_2_3;
+ c->code_rate_HP = FEC_2_3;
break;
case 2:
- p->u.ofdm.code_rate_HP = FEC_3_4;
+ c->code_rate_HP = FEC_3_4;
break;
case 3:
- p->u.ofdm.code_rate_HP = FEC_5_6;
+ c->code_rate_HP = FEC_5_6;
break;
case 4:
- p->u.ofdm.code_rate_HP = FEC_7_8;
+ c->code_rate_HP = FEC_7_8;
break;
}
switch ((buf[2] >> 3) & 7) {
case 0:
- p->u.ofdm.code_rate_LP = FEC_1_2;
+ c->code_rate_LP = FEC_1_2;
break;
case 1:
- p->u.ofdm.code_rate_LP = FEC_2_3;
+ c->code_rate_LP = FEC_2_3;
break;
case 2:
- p->u.ofdm.code_rate_LP = FEC_3_4;
+ c->code_rate_LP = FEC_3_4;
break;
case 3:
- p->u.ofdm.code_rate_LP = FEC_5_6;
+ c->code_rate_LP = FEC_5_6;
break;
case 4:
- p->u.ofdm.code_rate_LP = FEC_7_8;
+ c->code_rate_LP = FEC_7_8;
break;
}
switch ((buf[1] >> 2) & 3) {
case 0:
- p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+ c->bandwidth_hz = 6000000;
break;
case 1:
- p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+ c->bandwidth_hz = 7000000;
break;
case 2:
- p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ c->bandwidth_hz = 8000000;
break;
}
- p->inversion = INVERSION_AUTO;
- p->frequency = state->frequency;
-
-error:
return ret;
-}
-
-static int af9013_update_ber_unc(struct dvb_frontend *fe)
-{
- struct af9013_state *state = fe->demodulator_priv;
- int ret;
- u8 buf[3], i;
- u32 error_bit_count = 0;
- u32 total_bit_count = 0;
- u32 abort_packet_count = 0;
-
- state->ber = 0;
-
- /* check if error bit count is ready */
- ret = af9013_read_reg_bits(state, 0xd391, 4, 1, &buf[0]);
- if (ret)
- goto error;
- if (!buf[0])
- goto exit;
-
- /* get RSD packet abort count */
- for (i = 0; i < 2; i++) {
- ret = af9013_read_reg(state, 0xd38a + i, &buf[i]);
- if (ret)
- goto error;
- }
- abort_packet_count = (buf[1] << 8) + buf[0];
-
- /* get error bit count */
- for (i = 0; i < 3; i++) {
- ret = af9013_read_reg(state, 0xd387 + i, &buf[i]);
- if (ret)
- goto error;
- }
- error_bit_count = (buf[2] << 16) + (buf[1] << 8) + buf[0];
- error_bit_count = error_bit_count - abort_packet_count * 8 * 8;
-
- /* get used RSD counting period (10000 RSD packets used) */
- for (i = 0; i < 2; i++) {
- ret = af9013_read_reg(state, 0xd385 + i, &buf[i]);
- if (ret)
- goto error;
- }
- total_bit_count = (buf[1] << 8) + buf[0];
- total_bit_count = total_bit_count - abort_packet_count;
- total_bit_count = total_bit_count * 204 * 8;
-
- if (total_bit_count)
- state->ber = error_bit_count * 1000000000 / total_bit_count;
-
- state->ucblocks += abort_packet_count;
-
- deb_info("%s: err bits:%d total bits:%d abort count:%d\n", __func__,
- error_bit_count, total_bit_count, abort_packet_count);
-
- /* set BER counting range */
- ret = af9013_write_reg(state, 0xd385, 10000 & 0xff);
- if (ret)
- goto error;
- ret = af9013_write_reg(state, 0xd386, 10000 >> 8);
- if (ret)
- goto error;
- /* reset and start BER counter */
- ret = af9013_write_reg_bits(state, 0xd391, 4, 1, 1);
- if (ret)
- goto error;
-
-exit:
-error:
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
-static int af9013_update_snr(struct dvb_frontend *fe)
+static int af9013_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct af9013_state *state = fe->demodulator_priv;
int ret;
- u8 buf[3], i, len;
- u32 quant = 0;
- struct snr_table *uninitialized_var(snr_table);
-
- /* check if quantizer ready (for snr) */
- ret = af9013_read_reg_bits(state, 0xd2e1, 3, 1, &buf[0]);
- if (ret)
- goto error;
- if (buf[0]) {
- /* quantizer ready - read it */
- for (i = 0; i < 3; i++) {
- ret = af9013_read_reg(state, 0xd2e3 + i, &buf[i]);
- if (ret)
- goto error;
- }
- quant = (buf[2] << 16) + (buf[1] << 8) + buf[0];
-
- /* read current constellation */
- ret = af9013_read_reg(state, 0xd3c1, &buf[0]);
- if (ret)
- goto error;
-
- switch ((buf[0] >> 6) & 3) {
- case 0:
- len = ARRAY_SIZE(qpsk_snr_table);
- snr_table = qpsk_snr_table;
- break;
- case 1:
- len = ARRAY_SIZE(qam16_snr_table);
- snr_table = qam16_snr_table;
- break;
- case 2:
- len = ARRAY_SIZE(qam64_snr_table);
- snr_table = qam64_snr_table;
- break;
- default:
- len = 0;
- break;
- }
-
- if (len) {
- for (i = 0; i < len; i++) {
- if (quant < snr_table[i].val) {
- state->snr = snr_table[i].snr * 10;
- break;
- }
- }
- }
-
- /* set quantizer super frame count */
- ret = af9013_write_reg(state, 0xd2e2, 1);
- if (ret)
- goto error;
-
- /* check quantizer availability */
- for (i = 0; i < 10; i++) {
- msleep(10);
- ret = af9013_read_reg_bits(state, 0xd2e6, 0, 1,
- &buf[0]);
- if (ret)
- goto error;
- if (!buf[0])
- break;
- }
-
- /* reset quantizer */
- ret = af9013_write_reg_bits(state, 0xd2e1, 3, 1, 1);
- if (ret)
- goto error;
- }
-
-error:
- return ret;
-}
-
-static int af9013_update_signal_strength(struct dvb_frontend *fe)
-{
- struct af9013_state *state = fe->demodulator_priv;
- int ret = 0;
- u8 rf_gain, if_gain;
- int signal_strength;
-
- deb_info("%s\n", __func__);
+ u8 tmp;
- if (state->signal_strength_en) {
- ret = af9013_read_reg(state, 0xd07c, &rf_gain);
- if (ret)
- goto error;
- ret = af9013_read_reg(state, 0xd07d, &if_gain);
- if (ret)
- goto error;
- signal_strength = (0xffff / \
- (9 * (state->rf_50 + state->if_50) - \
- 11 * (state->rf_80 + state->if_80))) * \
- (10 * (rf_gain + if_gain) - \
- 11 * (state->rf_80 + state->if_80));
- if (signal_strength < 0)
- signal_strength = 0;
- else if (signal_strength > 0xffff)
- signal_strength = 0xffff;
-
- state->signal_strength = signal_strength;
+ /*
+ * Return status from the cache if it is younger than 2000ms with the
+ * exception of last tune is done during 4000ms.
+ */
+ if (time_is_after_jiffies(
+ state->read_status_jiffies + msecs_to_jiffies(2000)) &&
+ time_is_before_jiffies(
+ state->set_frontend_jiffies + msecs_to_jiffies(4000))
+ ) {
+ *status = state->fe_status;
+ return 0;
} else {
- state->signal_strength = 0;
+ *status = 0;
}
-error:
- return ret;
-}
-
-static int af9013_update_statistics(struct dvb_frontend *fe)
-{
- struct af9013_state *state = fe->demodulator_priv;
- int ret;
-
- if (time_before(jiffies, state->next_statistics_check))
- return 0;
-
- /* set minimum statistic update interval */
- state->next_statistics_check = jiffies + msecs_to_jiffies(1200);
-
- ret = af9013_update_signal_strength(fe);
- if (ret)
- goto error;
- ret = af9013_update_snr(fe);
- if (ret)
- goto error;
- ret = af9013_update_ber_unc(fe);
- if (ret)
- goto error;
-
-error:
- return ret;
-}
-
-static int af9013_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *fesettings)
-{
- fesettings->min_delay_ms = 800;
- fesettings->step_size = 0;
- fesettings->max_drift = 0;
-
- return 0;
-}
-
-static int af9013_read_status(struct dvb_frontend *fe, fe_status_t *status)
-{
- struct af9013_state *state = fe->demodulator_priv;
- int ret = 0;
- u8 tmp;
- *status = 0;
-
/* MPEG2 lock */
- ret = af9013_read_reg_bits(state, 0xd507, 6, 1, &tmp);
+ ret = af9013_rd_reg_bits(state, 0xd507, 6, 1, &tmp);
if (ret)
- goto error;
+ goto err;
+
if (tmp)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
FE_HAS_SYNC | FE_HAS_LOCK;
if (!*status) {
/* TPS lock */
- ret = af9013_read_reg_bits(state, 0xd330, 3, 1, &tmp);
+ ret = af9013_rd_reg_bits(state, 0xd330, 3, 1, &tmp);
if (ret)
- goto error;
+ goto err;
+
if (tmp)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
}
- if (!*status) {
- /* CFO lock */
- ret = af9013_read_reg_bits(state, 0xd333, 7, 1, &tmp);
- if (ret)
- goto error;
- if (tmp)
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
- }
-
- if (!*status) {
- /* SFOE lock */
- ret = af9013_read_reg_bits(state, 0xd334, 6, 1, &tmp);
- if (ret)
- goto error;
- if (tmp)
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
- }
+ state->fe_status = *status;
+ state->read_status_jiffies = jiffies;
- if (!*status) {
- /* AGC lock */
- ret = af9013_read_reg_bits(state, 0xd1a0, 6, 1, &tmp);
- if (ret)
- goto error;
- if (tmp)
- *status |= FE_HAS_SIGNAL;
- }
-
- ret = af9013_update_statistics(fe);
-
-error:
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
-
-static int af9013_read_ber(struct dvb_frontend *fe, u32 *ber)
+static int af9013_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct af9013_state *state = fe->demodulator_priv;
- int ret;
- ret = af9013_update_statistics(fe);
- *ber = state->ber;
- return ret;
+ *snr = state->snr;
+ return 0;
}
static int af9013_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct af9013_state *state = fe->demodulator_priv;
- int ret;
- ret = af9013_update_statistics(fe);
*strength = state->signal_strength;
- return ret;
+ return 0;
}
-static int af9013_read_snr(struct dvb_frontend *fe, u16 *snr)
+static int af9013_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct af9013_state *state = fe->demodulator_priv;
- int ret;
- ret = af9013_update_statistics(fe);
- *snr = state->snr;
- return ret;
+ *ber = state->ber;
+ return 0;
}
static int af9013_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct af9013_state *state = fe->demodulator_priv;
- int ret;
- ret = af9013_update_statistics(fe);
*ucblocks = state->ucblocks;
- return ret;
-}
-
-static int af9013_sleep(struct dvb_frontend *fe)
-{
- struct af9013_state *state = fe->demodulator_priv;
- int ret;
- deb_info("%s\n", __func__);
-
- ret = af9013_lock_led(state, 0);
- if (ret)
- goto error;
-
- ret = af9013_power_ctrl(state, 0);
-error:
- return ret;
+ return 0;
}
static int af9013_init(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
int ret, i, len;
- u8 tmp0, tmp1;
- struct regdesc *init;
- deb_info("%s\n", __func__);
+ u8 buf[3], tmp;
+ u32 adc_cw;
+ const struct af9013_reg_bit *init;
- /* reset OFDM */
- ret = af9013_reset(state, 0);
- if (ret)
- goto error;
+ dbg("%s", __func__);
/* power on */
ret = af9013_power_ctrl(state, 1);
if (ret)
- goto error;
+ goto err;
/* enable ADC */
- ret = af9013_write_reg(state, 0xd73a, 0xa4);
+ ret = af9013_wr_reg(state, 0xd73a, 0xa4);
if (ret)
- goto error;
+ goto err;
/* write API version to firmware */
- for (i = 0; i < sizeof(state->config.api_version); i++) {
- ret = af9013_write_reg(state, 0x9bf2 + i,
- state->config.api_version[i]);
- if (ret)
- goto error;
- }
+ ret = af9013_wr_regs(state, 0x9bf2, state->config.api_version, 4);
+ if (ret)
+ goto err;
/* program ADC control */
- ret = af9013_set_adc_ctrl(state);
+ switch (state->config.clock) {
+ case 28800000: /* 28.800 MHz */
+ tmp = 0;
+ break;
+ case 20480000: /* 20.480 MHz */
+ tmp = 1;
+ break;
+ case 28000000: /* 28.000 MHz */
+ tmp = 2;
+ break;
+ case 25000000: /* 25.000 MHz */
+ tmp = 3;
+ break;
+ default:
+ err("invalid clock");
+ return -EINVAL;
+ }
+
+ adc_cw = af913_div(state->config.clock, 1000000ul, 19);
+ buf[0] = (adc_cw >> 0) & 0xff;
+ buf[1] = (adc_cw >> 8) & 0xff;
+ buf[2] = (adc_cw >> 16) & 0xff;
+
+ ret = af9013_wr_regs(state, 0xd180, buf, 3);
+ if (ret)
+ goto err;
+
+ ret = af9013_wr_reg_bits(state, 0x9bd2, 0, 4, tmp);
if (ret)
- goto error;
+ goto err;
/* set I2C master clock */
- ret = af9013_write_reg(state, 0xd416, 0x14);
+ ret = af9013_wr_reg(state, 0xd416, 0x14);
if (ret)
- goto error;
+ goto err;
/* set 16 embx */
- ret = af9013_write_reg_bits(state, 0xd700, 1, 1, 1);
+ ret = af9013_wr_reg_bits(state, 0xd700, 1, 1, 1);
if (ret)
- goto error;
+ goto err;
/* set no trigger */
- ret = af9013_write_reg_bits(state, 0xd700, 2, 1, 0);
+ ret = af9013_wr_reg_bits(state, 0xd700, 2, 1, 0);
if (ret)
- goto error;
+ goto err;
/* set read-update bit for constellation */
- ret = af9013_write_reg_bits(state, 0xd371, 1, 1, 1);
+ ret = af9013_wr_reg_bits(state, 0xd371, 1, 1, 1);
if (ret)
- goto error;
+ goto err;
- /* enable FEC monitor */
- ret = af9013_write_reg_bits(state, 0xd392, 1, 1, 1);
+ /* settings for mp2if */
+ if (state->config.ts_mode == AF9013_TS_USB) {
+ /* AF9015 split PSB to 1.5k + 0.5k */
+ ret = af9013_wr_reg_bits(state, 0xd50b, 2, 1, 1);
+ if (ret)
+ goto err;
+ } else {
+ /* AF9013 change the output bit to data7 */
+ ret = af9013_wr_reg_bits(state, 0xd500, 3, 1, 1);
+ if (ret)
+ goto err;
+
+ /* AF9013 set mpeg to full speed */
+ ret = af9013_wr_reg_bits(state, 0xd502, 4, 1, 1);
+ if (ret)
+ goto err;
+ }
+
+ ret = af9013_wr_reg_bits(state, 0xd520, 4, 1, 1);
if (ret)
- goto error;
+ goto err;
/* load OFSM settings */
- deb_info("%s: load ofsm settings\n", __func__);
+ dbg("%s: load ofsm settings", __func__);
len = ARRAY_SIZE(ofsm_init);
init = ofsm_init;
for (i = 0; i < len; i++) {
- ret = af9013_write_reg_bits(state, init[i].addr, init[i].pos,
+ ret = af9013_wr_reg_bits(state, init[i].addr, init[i].pos,
init[i].len, init[i].val);
if (ret)
- goto error;
+ goto err;
}
/* load tuner specific settings */
- deb_info("%s: load tuner specific settings\n", __func__);
+ dbg("%s: load tuner specific settings", __func__);
switch (state->config.tuner) {
case AF9013_TUNER_MXL5003D:
len = ARRAY_SIZE(tuner_init_mxl5003d);
@@ -1260,65 +1197,133 @@ static int af9013_init(struct dvb_frontend *fe)
}
for (i = 0; i < len; i++) {
- ret = af9013_write_reg_bits(state, init[i].addr, init[i].pos,
+ ret = af9013_wr_reg_bits(state, init[i].addr, init[i].pos,
init[i].len, init[i].val);
if (ret)
- goto error;
+ goto err;
}
- /* set TS mode */
- deb_info("%s: setting ts mode\n", __func__);
- tmp0 = 0; /* parallel mode */
- tmp1 = 0; /* serial mode */
- switch (state->config.output_mode) {
- case AF9013_OUTPUT_MODE_PARALLEL:
- tmp0 = 1;
- break;
- case AF9013_OUTPUT_MODE_SERIAL:
- tmp1 = 1;
- break;
- case AF9013_OUTPUT_MODE_USB:
- /* usb mode for AF9015 */
- default:
- break;
- }
- ret = af9013_write_reg_bits(state, 0xd500, 1, 1, tmp0); /* parallel */
+ /* TS mode */
+ ret = af9013_wr_reg_bits(state, 0xd500, 1, 2, state->config.ts_mode);
if (ret)
- goto error;
- ret = af9013_write_reg_bits(state, 0xd500, 2, 1, tmp1); /* serial */
- if (ret)
- goto error;
+ goto err;
/* enable lock led */
- ret = af9013_lock_led(state, 1);
+ ret = af9013_wr_reg_bits(state, 0xd730, 0, 1, 1);
if (ret)
- goto error;
+ goto err;
- /* read values needed for signal strength calculation */
- ret = af9013_read_reg_bits(state, 0x9bee, 0, 1,
- &state->signal_strength_en);
- if (ret)
- goto error;
+ /* check if we support signal strength */
+ if (!state->signal_strength_en) {
+ ret = af9013_rd_reg_bits(state, 0x9bee, 0, 1,
+ &state->signal_strength_en);
+ if (ret)
+ goto err;
+ }
- if (state->signal_strength_en) {
- ret = af9013_read_reg(state, 0x9bbd, &state->rf_50);
+ /* read values needed for signal strength calculation */
+ if (state->signal_strength_en && !state->rf_50) {
+ ret = af9013_rd_reg(state, 0x9bbd, &state->rf_50);
if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9bd0, &state->rf_80);
+ goto err;
+
+ ret = af9013_rd_reg(state, 0x9bd0, &state->rf_80);
if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9be2, &state->if_50);
+ goto err;
+
+ ret = af9013_rd_reg(state, 0x9be2, &state->if_50);
if (ret)
- goto error;
- ret = af9013_read_reg(state, 0x9be4, &state->if_80);
+ goto err;
+
+ ret = af9013_rd_reg(state, 0x9be4, &state->if_80);
if (ret)
- goto error;
+ goto err;
}
-error:
+ /* SNR */
+ ret = af9013_wr_reg(state, 0xd2e2, 1);
+ if (ret)
+ goto err;
+
+ /* BER / UCB */
+ buf[0] = (10000 >> 0) & 0xff;
+ buf[1] = (10000 >> 8) & 0xff;
+ ret = af9013_wr_regs(state, 0xd385, buf, 2);
+ if (ret)
+ goto err;
+
+ /* enable FEC monitor */
+ ret = af9013_wr_reg_bits(state, 0xd392, 1, 1, 1);
+ if (ret)
+ goto err;
+
+ state->first_tune = true;
+ schedule_delayed_work(&state->statistics_work, msecs_to_jiffies(400));
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
return ret;
}
+static int af9013_sleep(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ int ret;
+
+ dbg("%s", __func__);
+
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&state->statistics_work);
+
+ /* disable lock led */
+ ret = af9013_wr_reg_bits(state, 0xd730, 0, 1, 0);
+ if (ret)
+ goto err;
+
+ /* power off */
+ ret = af9013_power_ctrl(state, 0);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int af9013_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ int ret;
+ struct af9013_state *state = fe->demodulator_priv;
+
+ dbg("%s: enable=%d", __func__, enable);
+
+ /* gate already open or close */
+ if (state->i2c_gate_state == enable)
+ return 0;
+
+ if (state->config.ts_mode == AF9013_TS_USB)
+ ret = af9013_wr_reg_bits(state, 0xd417, 3, 1, enable);
+ else
+ ret = af9013_wr_reg_bits(state, 0xd607, 2, 1, enable);
+ if (ret)
+ goto err;
+
+ state->i2c_gate_state = enable;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static void af9013_release(struct dvb_frontend *fe)
+{
+ struct af9013_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
static struct dvb_frontend_ops af9013_ops;
static int af9013_download_firmware(struct af9013_state *state)
@@ -1332,11 +1337,11 @@ static int af9013_download_firmware(struct af9013_state *state)
msleep(100);
/* check whether firmware is already running */
- ret = af9013_read_reg(state, 0x98be, &val);
+ ret = af9013_rd_reg(state, 0x98be, &val);
if (ret)
- goto error;
+ goto err;
else
- deb_info("%s: firmware status:%02x\n", __func__, val);
+ dbg("%s: firmware status=%02x", __func__, val);
if (val == 0x0c) /* fw is running, no need for download */
goto exit;
@@ -1351,7 +1356,7 @@ static int af9013_download_firmware(struct af9013_state *state)
"Please see linux/Documentation/dvb/ for more details" \
" on firmware-problems. (%d)",
fw_file, ret);
- goto error;
+ goto err;
}
info("downloading firmware from file '%s'", fw_file);
@@ -1369,7 +1374,7 @@ static int af9013_download_firmware(struct af9013_state *state)
ret = af9013_write_ofsm_regs(state, 0x50fc,
fw_params, sizeof(fw_params));
if (ret)
- goto error_release;
+ goto err_release;
#define FW_ADDR 0x5100 /* firmware start address */
#define LEN_MAX 16 /* max packet size */
@@ -1383,24 +1388,24 @@ static int af9013_download_firmware(struct af9013_state *state)
(u8 *) &fw->data[fw->size - remaining], len);
if (ret) {
err("firmware download failed:%d", ret);
- goto error_release;
+ goto err_release;
}
}
/* request boot firmware */
- ret = af9013_write_reg(state, 0xe205, 1);
+ ret = af9013_wr_reg(state, 0xe205, 1);
if (ret)
- goto error_release;
+ goto err_release;
for (i = 0; i < 15; i++) {
msleep(100);
/* check firmware status */
- ret = af9013_read_reg(state, 0x98be, &val);
+ ret = af9013_rd_reg(state, 0x98be, &val);
if (ret)
- goto error_release;
+ goto err_release;
- deb_info("%s: firmware status:%02x\n", __func__, val);
+ dbg("%s: firmware status=%02x", __func__, val);
if (val == 0x0c || val == 0x04) /* success or fail */
break;
@@ -1408,43 +1413,21 @@ static int af9013_download_firmware(struct af9013_state *state)
if (val == 0x04) {
err("firmware did not run");
- ret = -1;
+ ret = -ENODEV;
} else if (val != 0x0c) {
err("firmware boot timeout");
- ret = -1;
+ ret = -ENODEV;
}
-error_release:
+err_release:
release_firmware(fw);
-error:
+err:
exit:
if (!ret)
info("found a '%s' in warm state.", af9013_ops.info.name);
return ret;
}
-static int af9013_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- int ret;
- struct af9013_state *state = fe->demodulator_priv;
- deb_info("%s: enable:%d\n", __func__, enable);
-
- if (state->config.output_mode == AF9013_OUTPUT_MODE_USB)
- ret = af9013_write_reg_bits(state, 0xd417, 3, 1, enable);
- else
- ret = af9013_write_reg_bits(state, 0xd607, 2, 1, enable);
-
- return ret;
-}
-
-static void af9013_release(struct dvb_frontend *fe)
-{
- struct af9013_state *state = fe->demodulator_priv;
- kfree(state);
-}
-
-static struct dvb_frontend_ops af9013_ops;
-
struct dvb_frontend *af9013_attach(const struct af9013_config *config,
struct i2c_adapter *i2c)
{
@@ -1455,91 +1438,65 @@ struct dvb_frontend *af9013_attach(const struct af9013_config *config,
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct af9013_state), GFP_KERNEL);
if (state == NULL)
- goto error;
+ goto err;
/* setup the state */
state->i2c = i2c;
memcpy(&state->config, config, sizeof(struct af9013_config));
/* download firmware */
- if (state->config.output_mode != AF9013_OUTPUT_MODE_USB) {
+ if (state->config.ts_mode != AF9013_TS_USB) {
ret = af9013_download_firmware(state);
if (ret)
- goto error;
+ goto err;
}
/* firmware version */
- for (i = 0; i < 4; i++) {
- ret = af9013_read_reg(state, 0x5103 + i, &buf[i]);
- if (ret)
- goto error;
- }
- info("firmware version:%d.%d.%d.%d", buf[0], buf[1], buf[2], buf[3]);
-
- /* chip version */
- ret = af9013_read_reg_bits(state, 0xd733, 4, 4, &buf[2]);
+ ret = af9013_rd_regs(state, 0x5103, buf, 4);
if (ret)
- goto error;
+ goto err;
- /* ROM version */
- for (i = 0; i < 2; i++) {
- ret = af9013_read_reg(state, 0x116b + i, &buf[i]);
- if (ret)
- goto error;
- }
- deb_info("%s: chip version:%d ROM version:%d.%d\n", __func__,
- buf[2], buf[0], buf[1]);
-
- /* settings for mp2if */
- if (state->config.output_mode == AF9013_OUTPUT_MODE_USB) {
- /* AF9015 split PSB to 1.5k + 0.5k */
- ret = af9013_write_reg_bits(state, 0xd50b, 2, 1, 1);
- } else {
- /* AF9013 change the output bit to data7 */
- ret = af9013_write_reg_bits(state, 0xd500, 3, 1, 1);
- if (ret)
- goto error;
- /* AF9013 set mpeg to full speed */
- ret = af9013_write_reg_bits(state, 0xd502, 4, 1, 1);
- }
- if (ret)
- goto error;
- ret = af9013_write_reg_bits(state, 0xd520, 4, 1, 1);
- if (ret)
- goto error;
+ info("firmware version %d.%d.%d.%d", buf[0], buf[1], buf[2], buf[3]);
/* set GPIOs */
for (i = 0; i < sizeof(state->config.gpio); i++) {
ret = af9013_set_gpio(state, i, state->config.gpio[i]);
if (ret)
- goto error;
+ goto err;
}
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &af9013_ops,
+ memcpy(&state->fe.ops, &af9013_ops,
sizeof(struct dvb_frontend_ops));
- state->frontend.demodulator_priv = state;
+ state->fe.demodulator_priv = state;
+
+ INIT_DELAYED_WORK(&state->statistics_work, af9013_statistics_work);
- return &state->frontend;
-error:
+ return &state->fe;
+err:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(af9013_attach);
static struct dvb_frontend_ops af9013_ops = {
+ .delsys = { SYS_DVBT },
.info = {
- .name = "Afatech AF9013 DVB-T",
- .type = FE_OFDM,
+ .name = "Afatech AF9013",
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 250000,
.frequency_tolerance = 0,
- .caps =
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
- FE_CAN_QPSK | FE_CAN_QAM_16 |
- FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
@@ -1548,24 +1505,22 @@ static struct dvb_frontend_ops af9013_ops = {
},
.release = af9013_release,
+
.init = af9013_init,
.sleep = af9013_sleep,
- .i2c_gate_ctrl = af9013_i2c_gate_ctrl,
+ .get_tune_settings = af9013_get_tune_settings,
.set_frontend = af9013_set_frontend,
.get_frontend = af9013_get_frontend,
- .get_tune_settings = af9013_get_tune_settings,
-
.read_status = af9013_read_status,
- .read_ber = af9013_read_ber,
- .read_signal_strength = af9013_read_signal_strength,
.read_snr = af9013_read_snr,
+ .read_signal_strength = af9013_read_signal_strength,
+ .read_ber = af9013_read_ber,
.read_ucblocks = af9013_read_ucblocks,
-};
-module_param_named(debug, af9013_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+ .i2c_gate_ctrl = af9013_i2c_gate_ctrl,
+};
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Afatech AF9013 DVB-T demodulator driver");
diff --git a/drivers/media/dvb/frontends/af9013.h b/drivers/media/dvb/frontends/af9013.h
index e53d873f7555..b973fc5a0384 100644
--- a/drivers/media/dvb/frontends/af9013.h
+++ b/drivers/media/dvb/frontends/af9013.h
@@ -2,6 +2,7 @@
* Afatech AF9013 demodulator driver
*
* Copyright (C) 2007 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
* Thanks to Afatech who kindly provided information.
*
@@ -21,33 +22,11 @@
*
*/
-#ifndef _AF9013_H_
-#define _AF9013_H_
+#ifndef AF9013_H
+#define AF9013_H
#include <linux/dvb/frontend.h>
-enum af9013_ts_mode {
- AF9013_OUTPUT_MODE_PARALLEL,
- AF9013_OUTPUT_MODE_SERIAL,
- AF9013_OUTPUT_MODE_USB, /* only for AF9015 */
-};
-
-enum af9013_tuner {
- AF9013_TUNER_MXL5003D = 3, /* MaxLinear */
- AF9013_TUNER_MXL5005D = 13, /* MaxLinear */
- AF9013_TUNER_MXL5005R = 30, /* MaxLinear */
- AF9013_TUNER_ENV77H11D5 = 129, /* Panasonic */
- AF9013_TUNER_MT2060 = 130, /* Microtune */
- AF9013_TUNER_MC44S803 = 133, /* Freescale */
- AF9013_TUNER_QT1010 = 134, /* Quantek */
- AF9013_TUNER_UNKNOWN = 140, /* for can tuners ? */
- AF9013_TUNER_MT2060_2 = 147, /* Microtune */
- AF9013_TUNER_TDA18271 = 156, /* NXP */
- AF9013_TUNER_QT1010A = 162, /* Quantek */
- AF9013_TUNER_MXL5007T = 177, /* MaxLinear */
- AF9013_TUNER_TDA18218 = 179, /* NXP */
-};
-
/* AF9013/5 GPIOs (mostly guessed)
demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
demod#1-gpio#1 - xtal setting (?)
@@ -55,44 +34,74 @@ enum af9013_tuner {
demod#2-gpio#0 - tuner#2
demod#2-gpio#1 - xtal setting (?)
*/
+
+struct af9013_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_addr;
+
+ /*
+ * clock
+ * 20480000, 25000000, 28000000, 28800000
+ */
+ u32 clock;
+
+ /*
+ * tuner
+ */
+#define AF9013_TUNER_MXL5003D 3 /* MaxLinear */
+#define AF9013_TUNER_MXL5005D 13 /* MaxLinear */
+#define AF9013_TUNER_MXL5005R 30 /* MaxLinear */
+#define AF9013_TUNER_ENV77H11D5 129 /* Panasonic */
+#define AF9013_TUNER_MT2060 130 /* Microtune */
+#define AF9013_TUNER_MC44S803 133 /* Freescale */
+#define AF9013_TUNER_QT1010 134 /* Quantek */
+#define AF9013_TUNER_UNKNOWN 140 /* for can tuners ? */
+#define AF9013_TUNER_MT2060_2 147 /* Microtune */
+#define AF9013_TUNER_TDA18271 156 /* NXP */
+#define AF9013_TUNER_QT1010A 162 /* Quantek */
+#define AF9013_TUNER_MXL5007T 177 /* MaxLinear */
+#define AF9013_TUNER_TDA18218 179 /* NXP */
+ u8 tuner;
+
+ /*
+ * IF frequency
+ */
+ u32 if_frequency;
+
+ /*
+ * TS settings
+ */
+#define AF9013_TS_USB 0
+#define AF9013_TS_PARALLEL 1
+#define AF9013_TS_SERIAL 2
+ u8 ts_mode:2;
+
+ /*
+ * input spectrum inversion
+ */
+ bool spec_inv;
+
+ /*
+ * firmware API version
+ */
+ u8 api_version[4];
+
+ /*
+ * GPIOs
+ */
#define AF9013_GPIO_ON (1 << 0)
#define AF9013_GPIO_EN (1 << 1)
#define AF9013_GPIO_O (1 << 2)
#define AF9013_GPIO_I (1 << 3)
-
#define AF9013_GPIO_LO (AF9013_GPIO_ON|AF9013_GPIO_EN)
#define AF9013_GPIO_HI (AF9013_GPIO_ON|AF9013_GPIO_EN|AF9013_GPIO_O)
-
#define AF9013_GPIO_TUNER_ON (AF9013_GPIO_ON|AF9013_GPIO_EN)
#define AF9013_GPIO_TUNER_OFF (AF9013_GPIO_ON|AF9013_GPIO_EN|AF9013_GPIO_O)
-
-struct af9013_config {
- /* demodulator's I2C address */
- u8 demod_address;
-
- /* frequencies in kHz */
- u32 adc_clock;
-
- /* tuner ID */
- u8 tuner;
-
- /* tuner IF */
- u16 tuner_if;
-
- /* TS data output mode */
- u8 output_mode:2;
-
- /* RF spectrum inversion */
- u8 rf_spec_inv:1;
-
- /* API version */
- u8 api_version[4];
-
- /* GPIOs */
u8 gpio[4];
};
-
#if defined(CONFIG_DVB_AF9013) || \
(defined(CONFIG_DVB_AF9013_MODULE) && defined(MODULE))
extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
@@ -106,4 +115,4 @@ const struct af9013_config *config, struct i2c_adapter *i2c)
}
#endif /* CONFIG_DVB_AF9013 */
-#endif /* _AF9013_H_ */
+#endif /* AF9013_H */
diff --git a/drivers/media/dvb/frontends/af9013_priv.h b/drivers/media/dvb/frontends/af9013_priv.h
index e00b2a4a2db6..fa848af6e9b4 100644
--- a/drivers/media/dvb/frontends/af9013_priv.h
+++ b/drivers/media/dvb/frontends/af9013_priv.h
@@ -2,6 +2,7 @@
* Afatech AF9013 demodulator driver
*
* Copyright (C) 2007 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
* Thanks to Afatech who kindly provided information.
*
@@ -21,24 +22,19 @@
*
*/
-#ifndef _AF9013_PRIV_
-#define _AF9013_PRIV_
+#ifndef AF9013_PRIV_H
+#define AF9013_PRIV_H
-#define LOG_PREFIX "af9013"
-extern int af9013_debug;
-
-#define dprintk(var, level, args...) \
- do { if ((var & level)) printk(args); } while (0)
+#include "dvb_frontend.h"
+#include "af9013.h"
+#include <linux/firmware.h>
-#define debug_dump(b, l, func) {\
- int loop_; \
- for (loop_ = 0; loop_ < l; loop_++) \
- func("%02x ", b[loop_]); \
- func("\n");\
-}
-
-#define deb_info(args...) dprintk(af9013_debug, 0x01, args)
+#define LOG_PREFIX "af9013"
+#undef dbg
+#define dbg(f, arg...) \
+ if (af9013_debug) \
+ printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
#undef err
#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
#undef info
@@ -48,70 +44,71 @@ extern int af9013_debug;
#define AF9013_DEFAULT_FIRMWARE "dvb-fe-af9013.fw"
-struct regdesc {
+struct af9013_reg_bit {
u16 addr;
u8 pos:4;
u8 len:4;
u8 val;
};
-struct snr_table {
+struct af9013_snr {
u32 val;
u8 snr;
};
-struct coeff {
- u32 adc_clock;
- fe_bandwidth_t bw;
+struct af9013_coeff {
+ u32 clock;
+ u32 bandwidth_hz;
u8 val[24];
};
/* pre-calculated coeff lookup table */
-static struct coeff coeff_table[] = {
+static const struct af9013_coeff coeff_lut[] = {
/* 28.800 MHz */
- { 28800, BANDWIDTH_8_MHZ, { 0x02, 0x8a, 0x28, 0xa3, 0x05, 0x14,
+ { 28800000, 8000000, { 0x02, 0x8a, 0x28, 0xa3, 0x05, 0x14,
0x51, 0x11, 0x00, 0xa2, 0x8f, 0x3d, 0x00, 0xa2, 0x8a,
0x29, 0x00, 0xa2, 0x85, 0x14, 0x01, 0x45, 0x14, 0x14 } },
- { 28800, BANDWIDTH_7_MHZ, { 0x02, 0x38, 0xe3, 0x8e, 0x04, 0x71,
+ { 28800000, 7000000, { 0x02, 0x38, 0xe3, 0x8e, 0x04, 0x71,
0xc7, 0x07, 0x00, 0x8e, 0x3d, 0x55, 0x00, 0x8e, 0x38,
0xe4, 0x00, 0x8e, 0x34, 0x72, 0x01, 0x1c, 0x71, 0x32 } },
- { 28800, BANDWIDTH_6_MHZ, { 0x01, 0xe7, 0x9e, 0x7a, 0x03, 0xcf,
+ { 28800000, 6000000, { 0x01, 0xe7, 0x9e, 0x7a, 0x03, 0xcf,
0x3c, 0x3d, 0x00, 0x79, 0xeb, 0x6e, 0x00, 0x79, 0xe7,
0x9e, 0x00, 0x79, 0xe3, 0xcf, 0x00, 0xf3, 0xcf, 0x0f } },
/* 20.480 MHz */
- { 20480, BANDWIDTH_8_MHZ, { 0x03, 0x92, 0x49, 0x26, 0x07, 0x24,
+ { 20480000, 8000000, { 0x03, 0x92, 0x49, 0x26, 0x07, 0x24,
0x92, 0x13, 0x00, 0xe4, 0x99, 0x6e, 0x00, 0xe4, 0x92,
0x49, 0x00, 0xe4, 0x8b, 0x25, 0x01, 0xc9, 0x24, 0x25 } },
- { 20480, BANDWIDTH_7_MHZ, { 0x03, 0x20, 0x00, 0x01, 0x06, 0x40,
+ { 20480000, 7000000, { 0x03, 0x20, 0x00, 0x01, 0x06, 0x40,
0x00, 0x00, 0x00, 0xc8, 0x06, 0x40, 0x00, 0xc8, 0x00,
0x00, 0x00, 0xc7, 0xf9, 0xc0, 0x01, 0x90, 0x00, 0x00 } },
- { 20480, BANDWIDTH_6_MHZ, { 0x02, 0xad, 0xb6, 0xdc, 0x05, 0x5b,
+ { 20480000, 6000000, { 0x02, 0xad, 0xb6, 0xdc, 0x05, 0x5b,
0x6d, 0x2e, 0x00, 0xab, 0x73, 0x13, 0x00, 0xab, 0x6d,
0xb7, 0x00, 0xab, 0x68, 0x5c, 0x01, 0x56, 0xdb, 0x1c } },
/* 28.000 MHz */
- { 28000, BANDWIDTH_8_MHZ, { 0x02, 0x9c, 0xbc, 0x15, 0x05, 0x39,
+ { 28000000, 8000000, { 0x02, 0x9c, 0xbc, 0x15, 0x05, 0x39,
0x78, 0x0a, 0x00, 0xa7, 0x34, 0x3f, 0x00, 0xa7, 0x2f,
0x05, 0x00, 0xa7, 0x29, 0xcc, 0x01, 0x4e, 0x5e, 0x03 } },
- { 28000, BANDWIDTH_7_MHZ, { 0x02, 0x49, 0x24, 0x92, 0x04, 0x92,
+ { 28000000, 7000000, { 0x02, 0x49, 0x24, 0x92, 0x04, 0x92,
0x49, 0x09, 0x00, 0x92, 0x4d, 0xb7, 0x00, 0x92, 0x49,
0x25, 0x00, 0x92, 0x44, 0x92, 0x01, 0x24, 0x92, 0x12 } },
- { 28000, BANDWIDTH_6_MHZ, { 0x01, 0xf5, 0x8d, 0x10, 0x03, 0xeb,
+ { 28000000, 6000000, { 0x01, 0xf5, 0x8d, 0x10, 0x03, 0xeb,
0x1a, 0x08, 0x00, 0x7d, 0x67, 0x2f, 0x00, 0x7d, 0x63,
0x44, 0x00, 0x7d, 0x5f, 0x59, 0x00, 0xfa, 0xc6, 0x22 } },
/* 25.000 MHz */
- { 25000, BANDWIDTH_8_MHZ, { 0x02, 0xec, 0xfb, 0x9d, 0x05, 0xd9,
+ { 25000000, 8000000, { 0x02, 0xec, 0xfb, 0x9d, 0x05, 0xd9,
0xf7, 0x0e, 0x00, 0xbb, 0x44, 0xc1, 0x00, 0xbb, 0x3e,
0xe7, 0x00, 0xbb, 0x39, 0x0d, 0x01, 0x76, 0x7d, 0x34 } },
- { 25000, BANDWIDTH_7_MHZ, { 0x02, 0x8f, 0x5c, 0x29, 0x05, 0x1e,
+ { 25000000, 7000000, { 0x02, 0x8f, 0x5c, 0x29, 0x05, 0x1e,
0xb8, 0x14, 0x00, 0xa3, 0xdc, 0x29, 0x00, 0xa3, 0xd7,
0x0a, 0x00, 0xa3, 0xd1, 0xec, 0x01, 0x47, 0xae, 0x05 } },
- { 25000, BANDWIDTH_6_MHZ, { 0x02, 0x31, 0xbc, 0xb5, 0x04, 0x63,
+ { 25000000, 6000000, { 0x02, 0x31, 0xbc, 0xb5, 0x04, 0x63,
0x79, 0x1b, 0x00, 0x8c, 0x73, 0x91, 0x00, 0x8c, 0x6f,
0x2d, 0x00, 0x8c, 0x6a, 0xca, 0x01, 0x18, 0xde, 0x17 } },
};
/* QPSK SNR lookup table */
-static struct snr_table qpsk_snr_table[] = {
+static const struct af9013_snr qpsk_snr_lut[] = {
+ { 0x000000, 0 },
{ 0x0b4771, 0 },
{ 0x0c1aed, 1 },
{ 0x0d0d27, 2 },
@@ -131,7 +128,8 @@ static struct snr_table qpsk_snr_table[] = {
};
/* QAM16 SNR lookup table */
-static struct snr_table qam16_snr_table[] = {
+static const struct af9013_snr qam16_snr_lut[] = {
+ { 0x000000, 0 },
{ 0x05eb62, 5 },
{ 0x05fecf, 6 },
{ 0x060b80, 7 },
@@ -151,7 +149,8 @@ static struct snr_table qam16_snr_table[] = {
};
/* QAM64 SNR lookup table */
-static struct snr_table qam64_snr_table[] = {
+static const struct af9013_snr qam64_snr_lut[] = {
+ { 0x000000, 0 },
{ 0x03109b, 12 },
{ 0x0310d4, 13 },
{ 0x031920, 14 },
@@ -170,7 +169,7 @@ static struct snr_table qam64_snr_table[] = {
{ 0xffffff, 27 },
};
-static struct regdesc ofsm_init[] = {
+static const struct af9013_reg_bit ofsm_init[] = {
{ 0xd73a, 0, 8, 0xa1 },
{ 0xd73b, 0, 8, 0x1f },
{ 0xd73c, 4, 4, 0x0a },
@@ -252,7 +251,7 @@ static struct regdesc ofsm_init[] = {
/* Panasonic ENV77H11D5 tuner init
AF9013_TUNER_ENV77H11D5 = 129 */
-static struct regdesc tuner_init_env77h11d5[] = {
+static const struct af9013_reg_bit tuner_init_env77h11d5[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x03 },
{ 0x9bbe, 0, 8, 0x01 },
@@ -318,7 +317,7 @@ static struct regdesc tuner_init_env77h11d5[] = {
/* Microtune MT2060 tuner init
AF9013_TUNER_MT2060 = 130 */
-static struct regdesc tuner_init_mt2060[] = {
+static const struct af9013_reg_bit tuner_init_mt2060[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x07 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -395,7 +394,7 @@ static struct regdesc tuner_init_mt2060[] = {
/* Microtune MT2060 tuner init
AF9013_TUNER_MT2060_2 = 147 */
-static struct regdesc tuner_init_mt2060_2[] = {
+static const struct af9013_reg_bit tuner_init_mt2060_2[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x06 },
{ 0x9bbe, 0, 8, 0x01 },
@@ -462,7 +461,7 @@ static struct regdesc tuner_init_mt2060_2[] = {
/* MaxLinear MXL5003 tuner init
AF9013_TUNER_MXL5003D = 3 */
-static struct regdesc tuner_init_mxl5003d[] = {
+static const struct af9013_reg_bit tuner_init_mxl5003d[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x09 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -534,7 +533,7 @@ static struct regdesc tuner_init_mxl5003d[] = {
AF9013_TUNER_MXL5005D = 13
AF9013_TUNER_MXL5005R = 30
AF9013_TUNER_MXL5007T = 177 */
-static struct regdesc tuner_init_mxl5005[] = {
+static const struct af9013_reg_bit tuner_init_mxl5005[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x07 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -613,7 +612,7 @@ static struct regdesc tuner_init_mxl5005[] = {
/* Quantek QT1010 tuner init
AF9013_TUNER_QT1010 = 134
AF9013_TUNER_QT1010A = 162 */
-static struct regdesc tuner_init_qt1010[] = {
+static const struct af9013_reg_bit tuner_init_qt1010[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x09 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -690,7 +689,7 @@ static struct regdesc tuner_init_qt1010[] = {
/* Freescale MC44S803 tuner init
AF9013_TUNER_MC44S803 = 133 */
-static struct regdesc tuner_init_mc44s803[] = {
+static const struct af9013_reg_bit tuner_init_mc44s803[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x06 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -772,7 +771,7 @@ static struct regdesc tuner_init_mc44s803[] = {
/* unknown, probably for tin can tuner, tuner init
AF9013_TUNER_UNKNOWN = 140 */
-static struct regdesc tuner_init_unknown[] = {
+static const struct af9013_reg_bit tuner_init_unknown[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x02 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -845,7 +844,7 @@ static struct regdesc tuner_init_unknown[] = {
/* NXP TDA18271 & TDA18218 tuner init
AF9013_TUNER_TDA18271 = 156
AF9013_TUNER_TDA18218 = 179 */
-static struct regdesc tuner_init_tda18271[] = {
+static const struct af9013_reg_bit tuner_init_tda18271[] = {
{ 0x9bd5, 0, 8, 0x01 },
{ 0x9bd6, 0, 8, 0x04 },
{ 0xd1a0, 1, 1, 0x01 },
@@ -920,4 +919,4 @@ static struct regdesc tuner_init_tda18271[] = {
{ 0x9bee, 0, 1, 0x01 },
};
-#endif /* _AF9013_PRIV_ */
+#endif /* AF9013_PRIV_H */
diff --git a/drivers/media/dvb/frontends/atbm8830.c b/drivers/media/dvb/frontends/atbm8830.c
index 1539ea1f81ac..a2261ea2cf82 100644
--- a/drivers/media/dvb/frontends/atbm8830.c
+++ b/drivers/media/dvb/frontends/atbm8830.c
@@ -267,8 +267,7 @@ static void atbm8830_release(struct dvb_frontend *fe)
kfree(state);
}
-static int atbm8830_set_fe(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fe_params)
+static int atbm8830_set_fe(struct dvb_frontend *fe)
{
struct atbm_state *priv = fe->demodulator_priv;
int i;
@@ -279,7 +278,7 @@ static int atbm8830_set_fe(struct dvb_frontend *fe,
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- fe->ops.tuner_ops.set_params(fe, fe_params);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -298,31 +297,31 @@ static int atbm8830_set_fe(struct dvb_frontend *fe,
return 0;
}
-static int atbm8830_get_fe(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fe_params)
+static int atbm8830_get_fe(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
dprintk("%s\n", __func__);
/* TODO: get real readings from device */
/* inversion status */
- fe_params->inversion = INVERSION_OFF;
+ c->inversion = INVERSION_OFF;
/* bandwidth */
- fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ c->bandwidth_hz = 8000000;
- fe_params->u.ofdm.code_rate_HP = FEC_AUTO;
- fe_params->u.ofdm.code_rate_LP = FEC_AUTO;
+ c->code_rate_HP = FEC_AUTO;
+ c->code_rate_LP = FEC_AUTO;
- fe_params->u.ofdm.constellation = QAM_AUTO;
+ c->modulation = QAM_AUTO;
/* transmission mode */
- fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO;
+ c->transmission_mode = TRANSMISSION_MODE_AUTO;
/* guard interval */
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO;
+ c->guard_interval = GUARD_INTERVAL_AUTO;
/* hierarchy */
- fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ c->hierarchy = HIERARCHY_NONE;
return 0;
}
@@ -429,9 +428,9 @@ static int atbm8830_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
}
static struct dvb_frontend_ops atbm8830_ops = {
+ .delsys = { SYS_DMBTH },
.info = {
.name = "AltoBeam ATBM8830/8831 DMB-TH",
- .type = FE_OFDM,
.frequency_min = 474000000,
.frequency_max = 858000000,
.frequency_stepsize = 10000,
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index 1d572940e243..c688b95df486 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -576,19 +576,19 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int au8522_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int au8522_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct au8522_state *state = fe->demodulator_priv;
int ret = -EINVAL;
- dprintk("%s(frequency=%d)\n", __func__, p->frequency);
+ dprintk("%s(frequency=%d)\n", __func__, c->frequency);
- if ((state->current_frequency == p->frequency) &&
- (state->current_modulation == p->u.vsb.modulation))
+ if ((state->current_frequency == c->frequency) &&
+ (state->current_modulation == c->modulation))
return 0;
- au8522_enable_modulation(fe, p->u.vsb.modulation);
+ au8522_enable_modulation(fe, c->modulation);
/* Allow the demod to settle */
msleep(100);
@@ -596,7 +596,7 @@ static int au8522_set_frontend(struct dvb_frontend *fe,
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- ret = fe->ops.tuner_ops.set_params(fe, p);
+ ret = fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -604,7 +604,7 @@ static int au8522_set_frontend(struct dvb_frontend *fe,
if (ret < 0)
return ret;
- state->current_frequency = p->frequency;
+ state->current_frequency = c->frequency;
return 0;
}
@@ -862,7 +862,36 @@ static int au8522_read_snr(struct dvb_frontend *fe, u16 *snr)
static int au8522_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
- return au8522_read_snr(fe, signal_strength);
+ /* borrowed from lgdt330x.c
+ *
+ * Calculate strength from SNR up to 35dB
+ * Even though the SNR can go higher than 35dB,
+ * there is some comfort factor in having a range of
+ * strong signals that can show at 100%
+ */
+ u16 snr;
+ u32 tmp;
+ int ret = au8522_read_snr(fe, &snr);
+
+ *signal_strength = 0;
+
+ if (0 == ret) {
+ /* The following calculation method was chosen
+ * purely for the sake of code re-use from the
+ * other demod drivers that use this method */
+
+ /* Convert from SNR in dB * 10 to 8.24 fixed-point */
+ tmp = (snr * ((1 << 24) / 10));
+
+ /* Convert from 8.24 fixed-point to
+ * scale the range 0 - 35*2^24 into 0 - 65535*/
+ if (tmp >= 8960 * 0x10000)
+ *signal_strength = 0xffff;
+ else
+ *signal_strength = tmp / 8960;
+ }
+
+ return ret;
}
static int au8522_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
@@ -882,13 +911,13 @@ static int au8522_read_ber(struct dvb_frontend *fe, u32 *ber)
return au8522_read_ucblocks(fe, ber);
}
-static int au8522_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int au8522_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct au8522_state *state = fe->demodulator_priv;
- p->frequency = state->current_frequency;
- p->u.vsb.modulation = state->current_modulation;
+ c->frequency = state->current_frequency;
+ c->modulation = state->current_modulation;
return 0;
}
@@ -981,10 +1010,9 @@ error:
EXPORT_SYMBOL(au8522_attach);
static struct dvb_frontend_ops au8522_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/bcm3510.c b/drivers/media/dvb/frontends/bcm3510.c
index 8aff5868a5e1..033cd7ad3ca2 100644
--- a/drivers/media/dvb/frontends/bcm3510.c
+++ b/drivers/media/dvb/frontends/bcm3510.c
@@ -479,16 +479,16 @@ static int bcm3510_set_freq(struct bcm3510_state* st,u32 freq)
return -EINVAL;
}
-static int bcm3510_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *p)
+static int bcm3510_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct bcm3510_state* st = fe->demodulator_priv;
struct bcm3510_hab_cmd_ext_acquire cmd;
struct bcm3510_hab_cmd_bert_control bert;
int ret;
memset(&cmd,0,sizeof(cmd));
- switch (p->u.vsb.modulation) {
+ switch (c->modulation) {
case QAM_256:
cmd.ACQUIRE0.MODE = 0x1;
cmd.ACQUIRE1.SYM_RATE = 0x1;
@@ -499,7 +499,8 @@ static int bcm3510_set_frontend(struct dvb_frontend* fe,
cmd.ACQUIRE1.SYM_RATE = 0x2;
cmd.ACQUIRE1.IF_FREQ = 0x1;
break;
-/* case QAM_256:
+#if 0
+ case QAM_256:
cmd.ACQUIRE0.MODE = 0x3;
break;
case QAM_128:
@@ -513,7 +514,8 @@ static int bcm3510_set_frontend(struct dvb_frontend* fe,
break;
case QAM_16:
cmd.ACQUIRE0.MODE = 0x7;
- break;*/
+ break;
+#endif
case VSB_8:
cmd.ACQUIRE0.MODE = 0x8;
cmd.ACQUIRE1.SYM_RATE = 0x0;
@@ -552,7 +554,8 @@ static int bcm3510_set_frontend(struct dvb_frontend* fe,
bcm3510_bert_reset(st);
- if ((ret = bcm3510_set_freq(st,p->frequency)) < 0)
+ ret = bcm3510_set_freq(st, c->frequency);
+ if (ret < 0)
return ret;
memset(&st->status1,0,sizeof(st->status1));
@@ -819,10 +822,9 @@ error:
EXPORT_SYMBOL(bcm3510_attach);
static struct dvb_frontend_ops bcm3510_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Broadcom BCM3510 VSB/QAM frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 803000000,
/* stepsize is just a guess */
diff --git a/drivers/media/dvb/frontends/bsbe1.h b/drivers/media/dvb/frontends/bsbe1.h
index 5e431ebd089b..53e4d0dbb745 100644
--- a/drivers/media/dvb/frontends/bsbe1.h
+++ b/drivers/media/dvb/frontends/bsbe1.h
@@ -69,18 +69,19 @@ static int alps_bsbe1_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ra
return 0;
}
-static int alps_bsbe1_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int alps_bsbe1_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
int ret;
u8 data[4];
u32 div;
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
struct i2c_adapter *i2c = fe->tuner_priv;
- if ((params->frequency < 950000) || (params->frequency > 2150000))
+ if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
- div = params->frequency / 1000;
+ div = p->frequency / 1000;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x80 | ((div & 0x18000) >> 10) | 0x1;
diff --git a/drivers/media/dvb/frontends/bsru6.h b/drivers/media/dvb/frontends/bsru6.h
index c480c839b302..c2a578e1314d 100644
--- a/drivers/media/dvb/frontends/bsru6.h
+++ b/drivers/media/dvb/frontends/bsru6.h
@@ -101,23 +101,24 @@ static int alps_bsru6_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ra
return 0;
}
-static int alps_bsru6_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int alps_bsru6_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
u8 buf[4];
u32 div;
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
struct i2c_adapter *i2c = fe->tuner_priv;
- if ((params->frequency < 950000) || (params->frequency > 2150000))
+ if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
- div = (params->frequency + (125 - 1)) / 125; // round correctly
+ div = (p->frequency + (125 - 1)) / 125; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
buf[3] = 0xC4;
- if (params->frequency > 1530000)
+ if (p->frequency > 1530000)
buf[3] = 0xc0;
if (fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/dvb/frontends/cx22700.c b/drivers/media/dvb/frontends/cx22700.c
index 0142214b0133..f2a90f990ce3 100644
--- a/drivers/media/dvb/frontends/cx22700.c
+++ b/drivers/media/dvb/frontends/cx22700.c
@@ -121,7 +121,8 @@ static int cx22700_set_inversion (struct cx22700_state* state, int inversion)
}
}
-static int cx22700_set_tps (struct cx22700_state *state, struct dvb_ofdm_parameters *p)
+static int cx22700_set_tps(struct cx22700_state *state,
+ struct dtv_frontend_properties *p)
{
static const u8 qam_tab [4] = { 0, 1, 0, 2 };
static const u8 fec_tab [6] = { 0, 1, 2, 0, 3, 4 };
@@ -146,25 +147,25 @@ static int cx22700_set_tps (struct cx22700_state *state, struct dvb_ofdm_paramet
p->transmission_mode != TRANSMISSION_MODE_8K)
return -EINVAL;
- if (p->constellation != QPSK &&
- p->constellation != QAM_16 &&
- p->constellation != QAM_64)
+ if (p->modulation != QPSK &&
+ p->modulation != QAM_16 &&
+ p->modulation != QAM_64)
return -EINVAL;
- if (p->hierarchy_information < HIERARCHY_NONE ||
- p->hierarchy_information > HIERARCHY_4)
+ if (p->hierarchy < HIERARCHY_NONE ||
+ p->hierarchy > HIERARCHY_4)
return -EINVAL;
- if (p->bandwidth < BANDWIDTH_8_MHZ || p->bandwidth > BANDWIDTH_6_MHZ)
+ if (p->bandwidth_hz > 8000000 || p->bandwidth_hz < 6000000)
return -EINVAL;
- if (p->bandwidth == BANDWIDTH_7_MHZ)
+ if (p->bandwidth_hz == 7000000)
cx22700_writereg (state, 0x09, cx22700_readreg (state, 0x09 | 0x10));
else
cx22700_writereg (state, 0x09, cx22700_readreg (state, 0x09 & ~0x10));
- val = qam_tab[p->constellation - QPSK];
- val |= p->hierarchy_information - HIERARCHY_NONE;
+ val = qam_tab[p->modulation - QPSK];
+ val |= p->hierarchy - HIERARCHY_NONE;
cx22700_writereg (state, 0x04, val);
@@ -184,7 +185,8 @@ static int cx22700_set_tps (struct cx22700_state *state, struct dvb_ofdm_paramet
return 0;
}
-static int cx22700_get_tps (struct cx22700_state* state, struct dvb_ofdm_parameters *p)
+static int cx22700_get_tps(struct cx22700_state *state,
+ struct dtv_frontend_properties *p)
{
static const fe_modulation_t qam_tab [3] = { QPSK, QAM_16, QAM_64 };
static const fe_code_rate_t fec_tab [5] = { FEC_1_2, FEC_2_3, FEC_3_4,
@@ -199,14 +201,14 @@ static int cx22700_get_tps (struct cx22700_state* state, struct dvb_ofdm_paramet
val = cx22700_readreg (state, 0x01);
if ((val & 0x7) > 4)
- p->hierarchy_information = HIERARCHY_AUTO;
+ p->hierarchy = HIERARCHY_AUTO;
else
- p->hierarchy_information = HIERARCHY_NONE + (val & 0x7);
+ p->hierarchy = HIERARCHY_NONE + (val & 0x7);
if (((val >> 3) & 0x3) > 2)
- p->constellation = QAM_AUTO;
+ p->modulation = QAM_AUTO;
else
- p->constellation = qam_tab[(val >> 3) & 0x3];
+ p->modulation = qam_tab[(val >> 3) & 0x3];
val = cx22700_readreg (state, 0x02);
@@ -318,33 +320,35 @@ static int cx22700_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int cx22700_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx22700_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22700_state* state = fe->demodulator_priv;
cx22700_writereg (state, 0x00, 0x02); /* XXX CHECKME: soft reset*/
cx22700_writereg (state, 0x00, 0x00);
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- cx22700_set_inversion (state, p->inversion);
- cx22700_set_tps (state, &p->u.ofdm);
+ cx22700_set_inversion(state, c->inversion);
+ cx22700_set_tps(state, c);
cx22700_writereg (state, 0x37, 0x01); /* PAL loop filter off */
cx22700_writereg (state, 0x00, 0x01); /* restart acquire */
return 0;
}
-static int cx22700_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx22700_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22700_state* state = fe->demodulator_priv;
u8 reg09 = cx22700_readreg (state, 0x09);
- p->inversion = reg09 & 0x1 ? INVERSION_ON : INVERSION_OFF;
- return cx22700_get_tps (state, &p->u.ofdm);
+ c->inversion = reg09 & 0x1 ? INVERSION_ON : INVERSION_OFF;
+ return cx22700_get_tps(state, c);
}
static int cx22700_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
@@ -401,10 +405,9 @@ error:
}
static struct dvb_frontend_ops cx22700_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22700 DVB-T",
- .type = FE_OFDM,
.frequency_min = 470000000,
.frequency_max = 860000000,
.frequency_stepsize = 166667,
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 3139558148ba..faba82485086 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -146,7 +146,7 @@ static int cx22702_set_inversion(struct cx22702_state *state, int inversion)
/* Retrieve the demod settings */
static int cx22702_get_tps(struct cx22702_state *state,
- struct dvb_ofdm_parameters *p)
+ struct dtv_frontend_properties *p)
{
u8 val;
@@ -157,27 +157,27 @@ static int cx22702_get_tps(struct cx22702_state *state,
val = cx22702_readreg(state, 0x01);
switch ((val & 0x18) >> 3) {
case 0:
- p->constellation = QPSK;
+ p->modulation = QPSK;
break;
case 1:
- p->constellation = QAM_16;
+ p->modulation = QAM_16;
break;
case 2:
- p->constellation = QAM_64;
+ p->modulation = QAM_64;
break;
}
switch (val & 0x07) {
case 0:
- p->hierarchy_information = HIERARCHY_NONE;
+ p->hierarchy = HIERARCHY_NONE;
break;
case 1:
- p->hierarchy_information = HIERARCHY_1;
+ p->hierarchy = HIERARCHY_1;
break;
case 2:
- p->hierarchy_information = HIERARCHY_2;
+ p->hierarchy = HIERARCHY_2;
break;
case 3:
- p->hierarchy_information = HIERARCHY_4;
+ p->hierarchy = HIERARCHY_4;
break;
}
@@ -260,14 +260,14 @@ static int cx22702_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int cx22702_set_tps(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx22702_set_tps(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
u8 val;
struct cx22702_state *state = fe->demodulator_priv;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -277,14 +277,14 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
/* set bandwidth */
val = cx22702_readreg(state, 0x0C) & 0xcf;
- switch (p->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (p->bandwidth_hz) {
+ case 6000000:
val |= 0x20;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
val |= 0x10;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
break;
default:
dprintk("%s: invalid bandwidth\n", __func__);
@@ -292,15 +292,15 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x0C, val);
- p->u.ofdm.code_rate_LP = FEC_AUTO; /* temp hack as manual not working */
+ p->code_rate_LP = FEC_AUTO; /* temp hack as manual not working */
/* use auto configuration? */
- if ((p->u.ofdm.hierarchy_information == HIERARCHY_AUTO) ||
- (p->u.ofdm.constellation == QAM_AUTO) ||
- (p->u.ofdm.code_rate_HP == FEC_AUTO) ||
- (p->u.ofdm.code_rate_LP == FEC_AUTO) ||
- (p->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO) ||
- (p->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO)) {
+ if ((p->hierarchy == HIERARCHY_AUTO) ||
+ (p->modulation == QAM_AUTO) ||
+ (p->code_rate_HP == FEC_AUTO) ||
+ (p->code_rate_LP == FEC_AUTO) ||
+ (p->guard_interval == GUARD_INTERVAL_AUTO) ||
+ (p->transmission_mode == TRANSMISSION_MODE_AUTO)) {
/* TPS Source - use hardware driven values */
cx22702_writereg(state, 0x06, 0x10);
@@ -316,7 +316,7 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
/* manually programmed values */
- switch (p->u.ofdm.constellation) { /* mask 0x18 */
+ switch (p->modulation) { /* mask 0x18 */
case QPSK:
val = 0x00;
break;
@@ -327,10 +327,10 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
val = 0x10;
break;
default:
- dprintk("%s: invalid constellation\n", __func__);
+ dprintk("%s: invalid modulation\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.hierarchy_information) { /* mask 0x07 */
+ switch (p->hierarchy) { /* mask 0x07 */
case HIERARCHY_NONE:
break;
case HIERARCHY_1:
@@ -348,7 +348,7 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x06, val);
- switch (p->u.ofdm.code_rate_HP) { /* mask 0x38 */
+ switch (p->code_rate_HP) { /* mask 0x38 */
case FEC_NONE:
case FEC_1_2:
val = 0x00;
@@ -369,7 +369,7 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
dprintk("%s: invalid code_rate_HP\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.code_rate_LP) { /* mask 0x07 */
+ switch (p->code_rate_LP) { /* mask 0x07 */
case FEC_NONE:
case FEC_1_2:
break;
@@ -391,7 +391,7 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
}
cx22702_writereg(state, 0x07, val);
- switch (p->u.ofdm.guard_interval) { /* mask 0x0c */
+ switch (p->guard_interval) { /* mask 0x0c */
case GUARD_INTERVAL_1_32:
val = 0x00;
break;
@@ -408,7 +408,7 @@ static int cx22702_set_tps(struct dvb_frontend *fe,
dprintk("%s: invalid guard_interval\n", __func__);
return -EINVAL;
}
- switch (p->u.ofdm.transmission_mode) { /* mask 0x03 */
+ switch (p->transmission_mode) { /* mask 0x03 */
case TRANSMISSION_MODE_2K:
break;
case TRANSMISSION_MODE_8K:
@@ -546,15 +546,15 @@ static int cx22702_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int cx22702_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx22702_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22702_state *state = fe->demodulator_priv;
u8 reg0C = cx22702_readreg(state, 0x0C);
- p->inversion = reg0C & 0x1 ? INVERSION_ON : INVERSION_OFF;
- return cx22702_get_tps(state, &p->u.ofdm);
+ c->inversion = reg0C & 0x1 ? INVERSION_ON : INVERSION_OFF;
+ return cx22702_get_tps(state, c);
}
static int cx22702_get_tune_settings(struct dvb_frontend *fe,
@@ -603,10 +603,9 @@ error:
EXPORT_SYMBOL(cx22702_attach);
static const struct dvb_frontend_ops cx22702_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Conexant CX22702 DVB-T",
- .type = FE_OFDM,
.frequency_min = 177000000,
.frequency_max = 858000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index bf9c999aa470..5101f10f2d7a 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -531,26 +531,27 @@ static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int cx24110_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx24110_set_frontend(struct dvb_frontend *fe)
{
struct cx24110_state *state = fe->demodulator_priv;
-
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- cx24110_set_inversion (state, p->inversion);
- cx24110_set_fec (state, p->u.qpsk.fec_inner);
- cx24110_set_symbolrate (state, p->u.qpsk.symbol_rate);
+ cx24110_set_inversion(state, p->inversion);
+ cx24110_set_fec(state, p->fec_inner);
+ cx24110_set_symbolrate(state, p->symbol_rate);
cx24110_writereg(state,0x04,0x05); /* start acquisition */
return 0;
}
-static int cx24110_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int cx24110_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx24110_state *state = fe->demodulator_priv;
s32 afc; unsigned sclk;
@@ -571,7 +572,7 @@ static int cx24110_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
p->frequency += afc;
p->inversion = (cx24110_readreg (state, 0x22) & 0x10) ?
INVERSION_ON : INVERSION_OFF;
- p->u.qpsk.fec_inner = cx24110_get_fec (state);
+ p->fec_inner = cx24110_get_fec(state);
return 0;
}
@@ -623,10 +624,9 @@ error:
}
static struct dvb_frontend_ops cx24110_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24110 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/cx24113.c b/drivers/media/dvb/frontends/cx24113.c
index c341d57d5e81..3883c3b31aef 100644
--- a/drivers/media/dvb/frontends/cx24113.c
+++ b/drivers/media/dvb/frontends/cx24113.c
@@ -476,21 +476,21 @@ static int cx24113_init(struct dvb_frontend *fe)
return ret;
}
-static int cx24113_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx24113_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx24113_state *state = fe->tuner_priv;
/* for a ROLL-OFF factor of 0.35, 0.2: 600, 0.25: 625 */
u32 roll_off = 675;
u32 bw;
- bw = ((p->u.qpsk.symbol_rate/100) * roll_off) / 1000;
+ bw = ((c->symbol_rate/100) * roll_off) / 1000;
bw += (10000000/100) + 5;
bw /= 10;
bw += 1000;
cx24113_set_bandwidth(state, bw);
- cx24113_set_frequency(state, p->frequency);
+ cx24113_set_frequency(state, c->frequency);
msleep(5);
return cx24113_get_status(fe, &bw);
}
@@ -547,11 +547,9 @@ static const struct dvb_tuner_ops cx24113_tuner_ops = {
.release = cx24113_release,
.init = cx24113_init,
- .sleep = NULL,
.set_params = cx24113_set_params,
.get_frequency = cx24113_get_frequency,
- .get_bandwidth = NULL,
.get_status = cx24113_get_status,
};
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index ccd05255d527..b48879186537 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -1212,25 +1212,10 @@ static int cx24116_sleep(struct dvb_frontend *fe)
return 0;
}
-static int cx24116_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
-static int cx24116_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
/* dvb-core told us to tune, the tv property cache will be complete,
* it's safe for is to pull values and use them for tuning purposes.
*/
-static int cx24116_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx24116_set_frontend(struct dvb_frontend *fe)
{
struct cx24116_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -1455,12 +1440,20 @@ tuned: /* Set/Reset B/W */
return cx24116_cmd_execute(fe, &cmd);
}
-static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params,
+static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
unsigned int mode_flags, unsigned int *delay, fe_status_t *status)
{
+ /*
+ * It is safe to discard "params" here, as the DVB core will sync
+ * fe->dtv_property_cache with fepriv->parameters_in, where the
+ * DVBv3 params are stored. The only practical usage for it indicate
+ * that re-tuning is needed, e. g. (fepriv->state & FESTATE_RETUNE) is
+ * true.
+ */
+
*delay = HZ / 5;
- if (params) {
- int ret = cx24116_set_frontend(fe, params);
+ if (re_tune) {
+ int ret = cx24116_set_frontend(fe);
if (ret)
return ret;
}
@@ -1473,10 +1466,9 @@ static int cx24116_get_algo(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops cx24116_ops = {
-
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "Conexant CX24116/CX24118",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
@@ -1507,8 +1499,6 @@ static struct dvb_frontend_ops cx24116_ops = {
.get_frontend_algo = cx24116_get_algo,
.tune = cx24116_tune,
- .set_property = cx24116_set_property,
- .get_property = cx24116_get_property,
.set_frontend = cx24116_set_frontend,
};
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index b1dd8acc607a..7e28b4ee7d4f 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -526,9 +526,9 @@ static int cx24123_set_symbolrate(struct cx24123_state *state, u32 srate)
* to be configured and the correct band selected.
* Calculate those values.
*/
-static int cx24123_pll_calculate(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx24123_pll_calculate(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx24123_state *state = fe->demodulator_priv;
u32 ndiv = 0, adiv = 0, vco_div = 0;
int i = 0;
@@ -548,8 +548,8 @@ static int cx24123_pll_calculate(struct dvb_frontend *fe,
* FILTUNE programming bits */
for (i = 0; i < ARRAY_SIZE(cx24123_AGC_vals); i++) {
agcv = &cx24123_AGC_vals[i];
- if ((agcv->symbolrate_low <= p->u.qpsk.symbol_rate) &&
- (agcv->symbolrate_high >= p->u.qpsk.symbol_rate)) {
+ if ((agcv->symbolrate_low <= p->symbol_rate) &&
+ (agcv->symbolrate_high >= p->symbol_rate)) {
state->VCAarg = agcv->VCAprogdata;
state->VGAarg = agcv->VGAprogdata;
state->FILTune = agcv->FILTune;
@@ -601,8 +601,7 @@ static int cx24123_pll_calculate(struct dvb_frontend *fe,
* Tuner cx24109 is written through a dedicated 3wire interface
* on the demod chip.
*/
-static int cx24123_pll_writereg(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p, u32 data)
+static int cx24123_pll_writereg(struct dvb_frontend *fe, u32 data)
{
struct cx24123_state *state = fe->demodulator_priv;
unsigned long timeout;
@@ -659,26 +658,26 @@ static int cx24123_pll_writereg(struct dvb_frontend *fe,
return 0;
}
-static int cx24123_pll_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx24123_pll_tune(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx24123_state *state = fe->demodulator_priv;
u8 val;
dprintk("frequency=%i\n", p->frequency);
- if (cx24123_pll_calculate(fe, p) != 0) {
+ if (cx24123_pll_calculate(fe) != 0) {
err("%s: cx24123_pll_calcutate failed\n", __func__);
return -EINVAL;
}
/* Write the new VCO/VGA */
- cx24123_pll_writereg(fe, p, state->VCAarg);
- cx24123_pll_writereg(fe, p, state->VGAarg);
+ cx24123_pll_writereg(fe, state->VCAarg);
+ cx24123_pll_writereg(fe, state->VGAarg);
/* Write the new bandselect and pll args */
- cx24123_pll_writereg(fe, p, state->bandselectarg);
- cx24123_pll_writereg(fe, p, state->pllarg);
+ cx24123_pll_writereg(fe, state->bandselectarg);
+ cx24123_pll_writereg(fe, state->pllarg);
/* set the FILTUNE voltage */
val = cx24123_readreg(state, 0x28) & ~0x3;
@@ -925,10 +924,10 @@ static int cx24123_read_snr(struct dvb_frontend *fe, u16 *snr)
return 0;
}
-static int cx24123_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx24123_set_frontend(struct dvb_frontend *fe)
{
struct cx24123_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
dprintk("\n");
@@ -936,16 +935,16 @@ static int cx24123_set_frontend(struct dvb_frontend *fe,
state->config->set_ts_params(fe, 0);
state->currentfreq = p->frequency;
- state->currentsymbolrate = p->u.qpsk.symbol_rate;
+ state->currentsymbolrate = p->symbol_rate;
cx24123_set_inversion(state, p->inversion);
- cx24123_set_fec(state, p->u.qpsk.fec_inner);
- cx24123_set_symbolrate(state, p->u.qpsk.symbol_rate);
+ cx24123_set_fec(state, p->fec_inner);
+ cx24123_set_symbolrate(state, p->symbol_rate);
if (!state->config->dont_use_pll)
- cx24123_pll_tune(fe, p);
+ cx24123_pll_tune(fe);
else if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
else
err("it seems I don't have a tuner...");
@@ -960,9 +959,9 @@ static int cx24123_set_frontend(struct dvb_frontend *fe,
return 0;
}
-static int cx24123_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cx24123_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx24123_state *state = fe->demodulator_priv;
dprintk("\n");
@@ -971,12 +970,12 @@ static int cx24123_get_frontend(struct dvb_frontend *fe,
err("%s: Failed to get inversion status\n", __func__);
return -EREMOTEIO;
}
- if (cx24123_get_fec(state, &p->u.qpsk.fec_inner) != 0) {
+ if (cx24123_get_fec(state, &p->fec_inner) != 0) {
err("%s: Failed to get fec status\n", __func__);
return -EREMOTEIO;
}
p->frequency = state->currentfreq;
- p->u.qpsk.symbol_rate = state->currentsymbolrate;
+ p->symbol_rate = state->currentsymbolrate;
return 0;
}
@@ -1007,15 +1006,15 @@ static int cx24123_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
}
static int cx24123_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
+ bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
{
int retval = 0;
- if (params != NULL)
- retval = cx24123_set_frontend(fe, params);
+ if (re_tune)
+ retval = cx24123_set_frontend(fe);
if (!(mode_flags & FE_TUNE_MODE_ONESHOT))
cx24123_read_status(fe, status);
@@ -1126,10 +1125,9 @@ error:
EXPORT_SYMBOL(cx24123_attach);
static struct dvb_frontend_ops cx24123_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Conexant CX24123/CX24109",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/cxd2820r.h b/drivers/media/dvb/frontends/cxd2820r.h
index 03cab7b547fb..cf0f546aa1d1 100644
--- a/drivers/media/dvb/frontends/cxd2820r.h
+++ b/drivers/media/dvb/frontends/cxd2820r.h
@@ -63,19 +63,6 @@ struct cxd2820r_config {
*/
bool spec_inv;
- /* IFs for all used modes.
- * Default: none, must set
- * Values: <kHz>
- */
- u16 if_dvbt_6;
- u16 if_dvbt_7;
- u16 if_dvbt_8;
- u16 if_dvbt2_5;
- u16 if_dvbt2_6;
- u16 if_dvbt2_7;
- u16 if_dvbt2_8;
- u16 if_dvbc;
-
/* GPIOs for all used modes.
* Default: none, disabled
* Values: <see above>
diff --git a/drivers/media/dvb/frontends/cxd2820r_c.c b/drivers/media/dvb/frontends/cxd2820r_c.c
index b85f5011e344..945404991529 100644
--- a/drivers/media/dvb/frontends/cxd2820r_c.c
+++ b/drivers/media/dvb/frontends/cxd2820r_c.c
@@ -21,13 +21,13 @@
#include "cxd2820r_priv.h"
-int cxd2820r_set_frontend_c(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 buf[2];
+ u32 if_freq;
u16 if_ctl;
u64 num;
struct reg_val_mask tab[] = {
@@ -56,9 +56,9 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe,
/* program tuner */
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, params);
+ fe->ops.tuner_ops.set_params(fe);
- if (priv->delivery_system != SYS_DVBC_ANNEX_AC) {
+ if (priv->delivery_system != SYS_DVBC_ANNEX_A) {
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
tab[i].val, tab[i].mask);
@@ -67,10 +67,20 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe,
}
}
- priv->delivery_system = SYS_DVBC_ANNEX_AC;
+ priv->delivery_system = SYS_DVBC_ANNEX_A;
priv->ber_running = 0; /* tune stops BER counter */
- num = priv->cfg.if_dvbc;
+ /* program IF frequency */
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ if (ret)
+ goto error;
+ } else
+ if_freq = 0;
+
+ dbg("%s: if_freq=%d", __func__, if_freq);
+
+ num = if_freq / 1000; /* Hz => kHz */
num *= 0x4000;
if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
buf[0] = (if_ctl >> 8) & 0x3f;
@@ -94,8 +104,7 @@ error:
return ret;
}
-int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 036480f967b7..caae7f79c837 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -240,422 +240,239 @@ error:
return ret;
}
-/* lock FE */
-static int cxd2820r_lock(struct cxd2820r_priv *priv, int active_fe)
-{
- int ret = 0;
- dbg("%s: active_fe=%d", __func__, active_fe);
-
- mutex_lock(&priv->fe_lock);
-
- /* -1=NONE, 0=DVB-T/T2, 1=DVB-C */
- if (priv->active_fe == active_fe)
- ;
- else if (priv->active_fe == -1)
- priv->active_fe = active_fe;
- else
- ret = -EBUSY;
-
- mutex_unlock(&priv->fe_lock);
-
- return ret;
-}
-
-/* unlock FE */
-static void cxd2820r_unlock(struct cxd2820r_priv *priv, int active_fe)
-{
- dbg("%s: active_fe=%d", __func__, active_fe);
-
- mutex_lock(&priv->fe_lock);
-
- /* -1=NONE, 0=DVB-T/T2, 1=DVB-C */
- if (priv->active_fe == active_fe)
- priv->active_fe = -1;
-
- mutex_unlock(&priv->fe_lock);
-
- return;
-}
-
/* 64 bit div with round closest, like DIV_ROUND_CLOSEST but 64 bit */
u32 cxd2820r_div_u64_round_closest(u64 dividend, u32 divisor)
{
return div_u64(dividend + (divisor / 2), divisor);
}
-static int cxd2820r_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cxd2820r_set_frontend(struct dvb_frontend *fe)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (priv->delivery_system) {
- case SYS_UNDEFINED:
- if (c->delivery_system == SYS_DVBT) {
- /* SLEEP => DVB-T */
- ret = cxd2820r_set_frontend_t(fe, p);
- } else {
- /* SLEEP => DVB-T2 */
- ret = cxd2820r_set_frontend_t2(fe, p);
- }
- break;
- case SYS_DVBT:
- if (c->delivery_system == SYS_DVBT) {
- /* DVB-T => DVB-T */
- ret = cxd2820r_set_frontend_t(fe, p);
- } else if (c->delivery_system == SYS_DVBT2) {
- /* DVB-T => DVB-T2 */
- ret = cxd2820r_sleep_t(fe);
- if (ret)
- break;
- ret = cxd2820r_set_frontend_t2(fe, p);
- }
- break;
- case SYS_DVBT2:
- if (c->delivery_system == SYS_DVBT2) {
- /* DVB-T2 => DVB-T2 */
- ret = cxd2820r_set_frontend_t2(fe, p);
- } else if (c->delivery_system == SYS_DVBT) {
- /* DVB-T2 => DVB-T */
- ret = cxd2820r_sleep_t2(fe);
- if (ret)
- break;
- ret = cxd2820r_set_frontend_t(fe, p);
- }
- break;
- default:
- dbg("%s: error state=%d", __func__,
- priv->delivery_system);
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
-
- ret = cxd2820r_set_frontend_c(fe, p);
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_init_t(fe);
+ if (ret < 0)
+ goto err;
+ ret = cxd2820r_set_frontend_t(fe);
+ if (ret < 0)
+ goto err;
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_init_t(fe);
+ if (ret < 0)
+ goto err;
+ ret = cxd2820r_set_frontend_t2(fe);
+ if (ret < 0)
+ goto err;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = cxd2820r_init_c(fe);
+ if (ret < 0)
+ goto err;
+ ret = cxd2820r_set_frontend_c(fe);
+ if (ret < 0)
+ goto err;
+ break;
+ default:
+ dbg("%s: error state=%d", __func__, fe->dtv_property_cache.delivery_system);
+ ret = -EINVAL;
+ break;
}
-
+err:
return ret;
}
-
static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_status_t(fe, status);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_status_t2(fe, status);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_read_status_t(fe, status);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_read_status_t2(fe, status);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_read_status_c(fe, status);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
-static int cxd2820r_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int cxd2820r_get_frontend(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_get_frontend_t(fe, p);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_get_frontend_t2(fe, p);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ if (priv->delivery_system == SYS_UNDEFINED)
+ return 0;
- ret = cxd2820r_get_frontend_c(fe, p);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_get_frontend_t(fe);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_get_frontend_t2(fe);
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = cxd2820r_get_frontend_c(fe);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
static int cxd2820r_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_ber_t(fe, ber);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_ber_t2(fe, ber);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_read_ber_t(fe, ber);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_read_ber_t2(fe, ber);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_read_ber_c(fe, ber);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
static int cxd2820r_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_signal_strength_t(fe, strength);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_signal_strength_t2(fe, strength);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_read_signal_strength_t(fe, strength);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_read_signal_strength_t2(fe, strength);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_read_signal_strength_c(fe, strength);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
static int cxd2820r_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_snr_t(fe, snr);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_snr_t2(fe, snr);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_read_snr_t(fe, snr);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_read_snr_t2(fe, snr);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_read_snr_c(fe, snr);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
static int cxd2820r_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_ucblocks_t(fe, ucblocks);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_ucblocks_t2(fe, ucblocks);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_read_ucblocks_t(fe, ucblocks);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_read_ucblocks_t2(fe, ucblocks);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_read_ucblocks_c(fe, ucblocks);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
static int cxd2820r_init(struct dvb_frontend *fe)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- priv->delivery_system = SYS_UNDEFINED;
- /* delivery system is unknown at that (init) phase */
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- ret = cxd2820r_init_t(fe);
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
-
- ret = cxd2820r_init_c(fe);
- }
-
- return ret;
+ return 0;
}
static int cxd2820r_sleep(struct dvb_frontend *fe)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_sleep_t(fe);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_sleep_t2(fe);
- break;
- default:
- ret = -EINVAL;
- }
-
- cxd2820r_unlock(priv, 0);
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_sleep_t(fe);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_sleep_t2(fe);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_sleep_c(fe);
-
- cxd2820r_unlock(priv, 1);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
static int cxd2820r_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *s)
+ struct dvb_frontend_tune_settings *s)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
- dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (fe->ops.info.type == FE_OFDM) {
- /* DVB-T/T2 */
- ret = cxd2820r_lock(priv, 0);
- if (ret)
- return ret;
-
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_get_tune_settings_t(fe, s);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_get_tune_settings_t2(fe, s);
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* DVB-C */
- ret = cxd2820r_lock(priv, 1);
- if (ret)
- return ret;
+ dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ ret = cxd2820r_get_tune_settings_t(fe, s);
+ break;
+ case SYS_DVBT2:
+ ret = cxd2820r_get_tune_settings_t2(fe, s);
+ break;
+ case SYS_DVBC_ANNEX_A:
ret = cxd2820r_get_tune_settings_c(fe, s);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
return ret;
}
-static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -667,12 +484,12 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe,
if (priv->last_tune_failed) {
if (priv->delivery_system == SYS_DVBT)
c->delivery_system = SYS_DVBT2;
- else
+ else if (priv->delivery_system == SYS_DVBT2)
c->delivery_system = SYS_DVBT;
}
/* set frontend */
- ret = cxd2820r_set_frontend(fe, p);
+ ret = cxd2820r_set_frontend(fe);
if (ret)
goto error;
@@ -680,6 +497,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe,
/* frontend lock wait loop count */
switch (priv->delivery_system) {
case SYS_DVBT:
+ case SYS_DVBC_ANNEX_A:
i = 20;
break;
case SYS_DVBT2:
@@ -727,9 +545,7 @@ static void cxd2820r_release(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
dbg("%s", __func__);
- if (fe->ops.info.type == FE_OFDM)
- kfree(priv);
-
+ kfree(priv);
return;
}
@@ -742,128 +558,79 @@ static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
return cxd2820r_wr_reg_mask(priv, 0xdb, enable ? 1 : 0, 0x1);
}
-static const struct dvb_frontend_ops cxd2820r_ops[2];
+static const struct dvb_frontend_ops cxd2820r_ops = {
+ .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
+ /* default: DVB-T/T2 */
+ .info = {
+ .name = "Sony CXD2820R (DVB-T/T2)",
+
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION
+ },
-struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
- struct i2c_adapter *i2c, struct dvb_frontend *fe)
-{
- int ret;
- struct cxd2820r_priv *priv = NULL;
- u8 tmp;
+ .release = cxd2820r_release,
+ .init = cxd2820r_init,
+ .sleep = cxd2820r_sleep,
- if (fe == NULL) {
- /* FE0 */
- /* allocate memory for the internal priv */
- priv = kzalloc(sizeof(struct cxd2820r_priv), GFP_KERNEL);
- if (priv == NULL)
- goto error;
+ .get_tune_settings = cxd2820r_get_tune_settings,
+ .i2c_gate_ctrl = cxd2820r_i2c_gate_ctrl,
- /* setup the priv */
- priv->i2c = i2c;
- memcpy(&priv->cfg, cfg, sizeof(struct cxd2820r_config));
- mutex_init(&priv->fe_lock);
+ .get_frontend = cxd2820r_get_frontend,
- priv->active_fe = -1; /* NONE */
+ .get_frontend_algo = cxd2820r_get_frontend_algo,
+ .search = cxd2820r_search,
- /* check if the demod is there */
- priv->bank[0] = priv->bank[1] = 0xff;
- ret = cxd2820r_rd_reg(priv, 0x000fd, &tmp);
- dbg("%s: chip id=%02x", __func__, tmp);
- if (ret || tmp != 0xe1)
- goto error;
+ .read_status = cxd2820r_read_status,
+ .read_snr = cxd2820r_read_snr,
+ .read_ber = cxd2820r_read_ber,
+ .read_ucblocks = cxd2820r_read_ucblocks,
+ .read_signal_strength = cxd2820r_read_signal_strength,
+};
- /* create frontends */
- memcpy(&priv->fe[0].ops, &cxd2820r_ops[0],
- sizeof(struct dvb_frontend_ops));
- memcpy(&priv->fe[1].ops, &cxd2820r_ops[1],
- sizeof(struct dvb_frontend_ops));
+struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend *fe)
+{
+ struct cxd2820r_priv *priv = NULL;
+ int ret;
+ u8 tmp;
- priv->fe[0].demodulator_priv = priv;
- priv->fe[1].demodulator_priv = priv;
+ priv = kzalloc(sizeof (struct cxd2820r_priv), GFP_KERNEL);
+ if (!priv)
+ goto error;
- return &priv->fe[0];
+ priv->i2c = i2c;
+ memcpy(&priv->cfg, cfg, sizeof (struct cxd2820r_config));
- } else {
- /* FE1: FE0 given as pointer, just return FE1 we have
- * already created */
- priv = fe->demodulator_priv;
- return &priv->fe[1];
- }
+ priv->bank[0] = priv->bank[1] = 0xff;
+ ret = cxd2820r_rd_reg(priv, 0x000fd, &tmp);
+ dbg("%s: chip id=%02x", __func__, tmp);
+ if (ret || tmp != 0xe1)
+ goto error;
+ memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof (struct dvb_frontend_ops));
+ priv->fe.demodulator_priv = priv;
+ return &priv->fe;
error:
kfree(priv);
return NULL;
}
EXPORT_SYMBOL(cxd2820r_attach);
-static const struct dvb_frontend_ops cxd2820r_ops[2] = {
- {
- /* DVB-T/T2 */
- .info = {
- .name = "Sony CXD2820R (DVB-T/T2)",
- .type = FE_OFDM,
- .caps =
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
- FE_CAN_QPSK | FE_CAN_QAM_16 |
- FE_CAN_QAM_64 | FE_CAN_QAM_256 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION
- },
-
- .release = cxd2820r_release,
- .init = cxd2820r_init,
- .sleep = cxd2820r_sleep,
-
- .get_tune_settings = cxd2820r_get_tune_settings,
- .i2c_gate_ctrl = cxd2820r_i2c_gate_ctrl,
-
- .get_frontend = cxd2820r_get_frontend,
-
- .get_frontend_algo = cxd2820r_get_frontend_algo,
- .search = cxd2820r_search,
-
- .read_status = cxd2820r_read_status,
- .read_snr = cxd2820r_read_snr,
- .read_ber = cxd2820r_read_ber,
- .read_ucblocks = cxd2820r_read_ucblocks,
- .read_signal_strength = cxd2820r_read_signal_strength,
- },
- {
- /* DVB-C */
- .info = {
- .name = "Sony CXD2820R (DVB-C)",
- .type = FE_QAM,
- .caps =
- FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
- FE_CAN_QAM_128 | FE_CAN_QAM_256 |
- FE_CAN_FEC_AUTO
- },
-
- .release = cxd2820r_release,
- .init = cxd2820r_init,
- .sleep = cxd2820r_sleep,
-
- .get_tune_settings = cxd2820r_get_tune_settings,
- .i2c_gate_ctrl = cxd2820r_i2c_gate_ctrl,
-
- .set_frontend = cxd2820r_set_frontend,
- .get_frontend = cxd2820r_get_frontend,
-
- .read_status = cxd2820r_read_status,
- .read_snr = cxd2820r_read_snr,
- .read_ber = cxd2820r_read_ber,
- .read_ucblocks = cxd2820r_read_ucblocks,
- .read_signal_strength = cxd2820r_read_signal_strength,
- },
-};
-
-
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Sony CXD2820R demodulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/cxd2820r_priv.h b/drivers/media/dvb/frontends/cxd2820r_priv.h
index 95539134efdb..9a9822cad9cd 100644
--- a/drivers/media/dvb/frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb/frontends/cxd2820r_priv.h
@@ -48,12 +48,9 @@ struct reg_val_mask {
struct cxd2820r_priv {
struct i2c_adapter *i2c;
- struct dvb_frontend fe[2];
+ struct dvb_frontend fe;
struct cxd2820r_config cfg;
- struct mutex fe_lock; /* FE lock */
- int active_fe:2; /* FE lock, -1=NONE, 0=DVB-T/T2, 1=DVB-C */
-
bool ber_running;
u8 bank[2];
@@ -89,11 +86,9 @@ int cxd2820r_rd_reg(struct cxd2820r_priv *priv, u32 reg, u8 *val);
/* cxd2820r_c.c */
-int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p);
+int cxd2820r_get_frontend_c(struct dvb_frontend *fe);
-int cxd2820r_set_frontend_c(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params);
+int cxd2820r_set_frontend_c(struct dvb_frontend *fe);
int cxd2820r_read_status_c(struct dvb_frontend *fe, fe_status_t *status);
@@ -114,11 +109,9 @@ int cxd2820r_get_tune_settings_c(struct dvb_frontend *fe,
/* cxd2820r_t.c */
-int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p);
+int cxd2820r_get_frontend_t(struct dvb_frontend *fe);
-int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params);
+int cxd2820r_set_frontend_t(struct dvb_frontend *fe);
int cxd2820r_read_status_t(struct dvb_frontend *fe, fe_status_t *status);
@@ -139,11 +132,9 @@ int cxd2820r_get_tune_settings_t(struct dvb_frontend *fe,
/* cxd2820r_t2.c */
-int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p);
+int cxd2820r_get_frontend_t2(struct dvb_frontend *fe);
-int cxd2820r_set_frontend_t2(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params);
+int cxd2820r_set_frontend_t2(struct dvb_frontend *fe);
int cxd2820r_read_status_t2(struct dvb_frontend *fe, fe_status_t *status);
diff --git a/drivers/media/dvb/frontends/cxd2820r_t.c b/drivers/media/dvb/frontends/cxd2820r_t.c
index a04f9c810101..1a026239cdcc 100644
--- a/drivers/media/dvb/frontends/cxd2820r_t.c
+++ b/drivers/media/dvb/frontends/cxd2820r_t.c
@@ -21,13 +21,12 @@
#include "cxd2820r_priv.h"
-int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
- u32 if_khz, if_ctl;
+ int ret, i, bw_i;
+ u32 if_freq, if_ctl;
u64 num;
u8 buf[3], bw_param;
u8 bw_params1[][5] = {
@@ -57,6 +56,23 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
dbg("%s: RF=%d BW=%d", __func__, c->frequency, c->bandwidth_hz);
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ bw_i = 0;
+ bw_param = 2;
+ break;
+ case 7000000:
+ bw_i = 1;
+ bw_param = 1;
+ break;
+ case 8000000:
+ bw_i = 2;
+ bw_param = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* update GPIOs */
ret = cxd2820r_gpio(fe);
if (ret)
@@ -64,7 +80,7 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
/* program tuner */
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (priv->delivery_system != SYS_DVBT) {
for (i = 0; i < ARRAY_SIZE(tab); i++) {
@@ -78,27 +94,17 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
priv->delivery_system = SYS_DVBT;
priv->ber_running = 0; /* tune stops BER counter */
- switch (c->bandwidth_hz) {
- case 6000000:
- if_khz = priv->cfg.if_dvbt_6;
- i = 0;
- bw_param = 2;
- break;
- case 7000000:
- if_khz = priv->cfg.if_dvbt_7;
- i = 1;
- bw_param = 1;
- break;
- case 8000000:
- if_khz = priv->cfg.if_dvbt_8;
- i = 2;
- bw_param = 0;
- break;
- default:
- return -EINVAL;
- }
+ /* program IF frequency */
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ if (ret)
+ goto error;
+ } else
+ if_freq = 0;
+
+ dbg("%s: if_freq=%d", __func__, if_freq);
- num = if_khz;
+ num = if_freq / 1000; /* Hz => kHz */
num *= 0x1000000;
if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
buf[0] = ((if_ctl >> 16) & 0xff);
@@ -109,7 +115,7 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
if (ret)
goto error;
- ret = cxd2820r_wr_regs(priv, 0x0009f, bw_params1[i], 5);
+ ret = cxd2820r_wr_regs(priv, 0x0009f, bw_params1[bw_i], 5);
if (ret)
goto error;
@@ -117,7 +123,7 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe,
if (ret)
goto error;
- ret = cxd2820r_wr_regs(priv, 0x000d9, bw_params2[i], 2);
+ ret = cxd2820r_wr_regs(priv, 0x000d9, bw_params2[bw_i], 2);
if (ret)
goto error;
@@ -135,8 +141,7 @@ error:
return ret;
}
-int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+int cxd2820r_get_frontend_t(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
diff --git a/drivers/media/dvb/frontends/cxd2820r_t2.c b/drivers/media/dvb/frontends/cxd2820r_t2.c
index 6548588309f7..3a5759e0d235 100644
--- a/drivers/media/dvb/frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb/frontends/cxd2820r_t2.c
@@ -21,13 +21,12 @@
#include "cxd2820r_priv.h"
-int cxd2820r_set_frontend_t2(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
- u32 if_khz, if_ctl;
+ int ret, i, bw_i;
+ u32 if_freq, if_ctl;
u64 num;
u8 buf[3], bw_param;
u8 bw_params1[][5] = {
@@ -71,6 +70,27 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe,
dbg("%s: RF=%d BW=%d", __func__, c->frequency, c->bandwidth_hz);
+ switch (c->bandwidth_hz) {
+ case 5000000:
+ bw_i = 0;
+ bw_param = 3;
+ break;
+ case 6000000:
+ bw_i = 1;
+ bw_param = 2;
+ break;
+ case 7000000:
+ bw_i = 2;
+ bw_param = 1;
+ break;
+ case 8000000:
+ bw_i = 3;
+ bw_param = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* update GPIOs */
ret = cxd2820r_gpio(fe);
if (ret)
@@ -78,7 +98,7 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe,
/* program tuner */
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, params);
+ fe->ops.tuner_ops.set_params(fe);
if (priv->delivery_system != SYS_DVBT2) {
for (i = 0; i < ARRAY_SIZE(tab); i++) {
@@ -91,32 +111,17 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe,
priv->delivery_system = SYS_DVBT2;
- switch (c->bandwidth_hz) {
- case 5000000:
- if_khz = priv->cfg.if_dvbt2_5;
- i = 0;
- bw_param = 3;
- break;
- case 6000000:
- if_khz = priv->cfg.if_dvbt2_6;
- i = 1;
- bw_param = 2;
- break;
- case 7000000:
- if_khz = priv->cfg.if_dvbt2_7;
- i = 2;
- bw_param = 1;
- break;
- case 8000000:
- if_khz = priv->cfg.if_dvbt2_8;
- i = 3;
- bw_param = 0;
- break;
- default:
- return -EINVAL;
- }
+ /* program IF frequency */
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ if (ret)
+ goto error;
+ } else
+ if_freq = 0;
+
+ dbg("%s: if_freq=%d", __func__, if_freq);
- num = if_khz;
+ num = if_freq / 1000; /* Hz => kHz */
num *= 0x1000000;
if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
buf[0] = ((if_ctl >> 16) & 0xff);
@@ -127,7 +132,7 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe,
if (ret)
goto error;
- ret = cxd2820r_wr_regs(priv, 0x0209f, bw_params1[i], 5);
+ ret = cxd2820r_wr_regs(priv, 0x0209f, bw_params1[bw_i], 5);
if (ret)
goto error;
@@ -150,8 +155,7 @@ error:
}
-int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+int cxd2820r_get_frontend_t2(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
index dc1cb17a6ea7..3b024bfe980a 100644
--- a/drivers/media/dvb/frontends/dib0070.c
+++ b/drivers/media/dvb/frontends/dib0070.c
@@ -150,7 +150,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
} \
} while (0)
-static int dib0070_set_bandwidth(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
+static int dib0070_set_bandwidth(struct dvb_frontend *fe)
{
struct dib0070_state *state = fe->tuner_priv;
u16 tmp = dib0070_read_reg(state, 0x02) & 0x3fff;
@@ -335,7 +335,7 @@ static const struct dib0070_lna_match dib0070_lna[] = {
};
#define LPF 100
-static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
+static int dib0070_tune_digital(struct dvb_frontend *fe)
{
struct dib0070_state *state = fe->tuner_priv;
@@ -507,7 +507,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_par
*tune_state = CT_TUNER_STEP_5;
} else if (*tune_state == CT_TUNER_STEP_5) {
- dib0070_set_bandwidth(fe, ch);
+ dib0070_set_bandwidth(fe);
*tune_state = CT_TUNER_STOP;
} else {
ret = FE_CALLBACK_TIME_NEVER; /* tuner finished, time to call again infinite */
@@ -516,7 +516,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_par
}
-static int dib0070_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int dib0070_tune(struct dvb_frontend *fe)
{
struct dib0070_state *state = fe->tuner_priv;
uint32_t ret;
@@ -524,7 +524,7 @@ static int dib0070_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters
state->tune_state = CT_TUNER_START;
do {
- ret = dib0070_tune_digital(fe, p);
+ ret = dib0070_tune_digital(fe);
if (ret != FE_CALLBACK_TIME_NEVER)
msleep(ret/10);
else
diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
index b174d1c78583..224d81e85091 100644
--- a/drivers/media/dvb/frontends/dib0090.c
+++ b/drivers/media/dvb/frontends/dib0090.c
@@ -717,6 +717,34 @@ static const u16 rf_ramp_pwm_cband_7090[] = {
(0 << 10) | 109, /* RF_RAMP4, LNA 4 */
};
+static const uint16_t rf_ramp_pwm_cband_7090e_sensitivity[] = {
+ 186,
+ 40,
+ 746,
+ (10 << 10) | 345,
+ (0 << 10) | 746,
+ (0 << 10) | 0,
+ (0 << 10) | 0,
+ (28 << 10) | 200,
+ (0 << 10) | 345,
+ (20 << 10) | 0,
+ (0 << 10) | 200,
+};
+
+static const uint16_t rf_ramp_pwm_cband_7090e_aci[] = {
+ 86,
+ 40,
+ 345,
+ (0 << 10) | 0,
+ (0 << 10) | 0,
+ (0 << 10) | 0,
+ (0 << 10) | 0,
+ (28 << 10) | 200,
+ (0 << 10) | 345,
+ (20 << 10) | 0,
+ (0 << 10) | 200,
+};
+
static const u16 rf_ramp_pwm_cband_8090[] = {
345, /* max RF gain in 10th of dB */
29, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */
@@ -1076,8 +1104,16 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal_socs);
if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1)
dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_8090);
- else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1)
- dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090);
+ else if (state->identity.version == SOC_7090_P1G_11R1
+ || state->identity.version == SOC_7090_P1G_21R1) {
+ if (state->config->is_dib7090e) {
+ if (state->rf_ramp == NULL)
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090e_sensitivity);
+ else
+ dib0090_set_rframp_pwm(state, state->rf_ramp);
+ } else
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband_7090);
+ }
} else {
dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband);
dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
@@ -1112,13 +1148,21 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
else
dib0090_write_reg(state, 0x32, (0 << 11));
- dib0090_write_reg(state, 0x04, 0x01);
+ dib0090_write_reg(state, 0x04, 0x03);
dib0090_write_reg(state, 0x39, (1 << 10));
}
}
EXPORT_SYMBOL(dib0090_pwm_gain_reset);
+void dib0090_set_dc_servo(struct dvb_frontend *fe, u8 DC_servo_cutoff)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ if (DC_servo_cutoff < 4)
+ dib0090_write_reg(state, 0x04, DC_servo_cutoff);
+}
+EXPORT_SYMBOL(dib0090_set_dc_servo);
+
static u32 dib0090_get_slow_adc_val(struct dib0090_state *state)
{
u16 adc_val = dib0090_read_reg(state, 0x1d);
@@ -1305,7 +1349,7 @@ void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 *
EXPORT_SYMBOL(dib0090_get_current_gain);
-u16 dib0090_get_wbd_offset(struct dvb_frontend *fe)
+u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
{
struct dib0090_state *state = fe->tuner_priv;
u32 f_MHz = state->fe->dtv_property_cache.frequency / 1000000;
@@ -1342,9 +1386,57 @@ u16 dib0090_get_wbd_offset(struct dvb_frontend *fe)
return state->wbd_offset + wbd_tcold;
}
+EXPORT_SYMBOL(dib0090_get_wbd_target);
+u16 dib0090_get_wbd_offset(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ return state->wbd_offset;
+}
EXPORT_SYMBOL(dib0090_get_wbd_offset);
+int dib0090_set_switch(struct dvb_frontend *fe, u8 sw1, u8 sw2, u8 sw3)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ dib0090_write_reg(state, 0x0b, (dib0090_read_reg(state, 0x0b) & 0xfff8)
+ | ((sw3 & 1) << 2) | ((sw2 & 1) << 1) | (sw1 & 1));
+
+ return 0;
+}
+EXPORT_SYMBOL(dib0090_set_switch);
+
+int dib0090_set_vga(struct dvb_frontend *fe, u8 onoff)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ dib0090_write_reg(state, 0x09, (dib0090_read_reg(state, 0x09) & 0x7fff)
+ | ((onoff & 1) << 15));
+ return 0;
+}
+EXPORT_SYMBOL(dib0090_set_vga);
+
+int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ if ((!state->identity.p1g) || (!state->identity.in_soc)
+ || ((state->identity.version != SOC_7090_P1G_21R1)
+ && (state->identity.version != SOC_7090_P1G_11R1))) {
+ dprintk("%s() function can only be used for dib7090P", __func__);
+ return -ENODEV;
+ }
+
+ if (cfg_sensitivity)
+ state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_sensitivity;
+ else
+ state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_aci;
+ dib0090_pwm_gain_reset(fe);
+
+ return 0;
+}
+EXPORT_SYMBOL(dib0090_update_rframp_7090);
+
static const u16 dib0090_defaults[] = {
25, 0x01,
@@ -1430,7 +1522,7 @@ static void dib0090_set_default_config(struct dib0090_state *state, const u16 *
#define POLY_MIN (u8) 0
#define POLY_MAX (u8) 8
-void dib0090_set_EFUSE(struct dib0090_state *state)
+static void dib0090_set_EFUSE(struct dib0090_state *state)
{
u8 c, h, n;
u16 e2, e4;
@@ -1505,7 +1597,10 @@ static int dib0090_reset(struct dvb_frontend *fe)
dib0090_set_EFUSE(state);
/* Congigure in function of the crystal */
- if (state->config->io.clock_khz >= 24000)
+ if (state->config->force_crystal_mode != 0)
+ dib0090_write_reg(state, 0x14,
+ state->config->force_crystal_mode & 3);
+ else if (state->config->io.clock_khz >= 24000)
dib0090_write_reg(state, 0x14, 1);
else
dib0090_write_reg(state, 0x14, 2);
@@ -1951,6 +2046,52 @@ static const struct dib0090_tuning dib0090_tuning_table_cband_7090[] = {
#endif
};
+static const struct dib0090_tuning dib0090_tuning_table_cband_7090e_sensitivity[] = {
+#ifdef CONFIG_BAND_CBAND
+ { 300000, 0 , 3, 0x8105, 0x2c0, 0x2d12, 0xb84e, EN_CAB },
+ { 380000, 0 , 10, 0x810F, 0x2c0, 0x2d12, 0xb84e, EN_CAB },
+ { 600000, 0 , 10, 0x815E, 0x280, 0x2d12, 0xb84e, EN_CAB },
+ { 660000, 0 , 5, 0x85E3, 0x280, 0x2d12, 0xb84e, EN_CAB },
+ { 720000, 0 , 5, 0x852E, 0x280, 0x2d12, 0xb84e, EN_CAB },
+ { 860000, 0 , 4, 0x85E5, 0x280, 0x2d12, 0xb84e, EN_CAB },
+#endif
+};
+
+int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
+ u8 cfg_sensitivity)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ const struct dib0090_tuning *tune =
+ dib0090_tuning_table_cband_7090e_sensitivity;
+ const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = {
+ { 300000, 0 , 3, 0x8165, 0x2c0, 0x2d12, 0xb84e, EN_CAB },
+ { 650000, 0 , 4, 0x815B, 0x280, 0x2d12, 0xb84e, EN_CAB },
+ { 860000, 0 , 5, 0x84EF, 0x280, 0x2d12, 0xb84e, EN_CAB },
+ };
+
+ if ((!state->identity.p1g) || (!state->identity.in_soc)
+ || ((state->identity.version != SOC_7090_P1G_21R1)
+ && (state->identity.version != SOC_7090_P1G_11R1))) {
+ dprintk("%s() function can only be used for dib7090", __func__);
+ return -ENODEV;
+ }
+
+ if (cfg_sensitivity)
+ tune = dib0090_tuning_table_cband_7090e_sensitivity;
+ else
+ tune = dib0090_tuning_table_cband_7090e_aci;
+
+ while (state->rf_request > tune->max_freq)
+ tune++;
+
+ dib0090_write_reg(state, 0x09, (dib0090_read_reg(state, 0x09) & 0x8000)
+ | (tune->lna_bias & 0x7fff));
+ dib0090_write_reg(state, 0x0b, (dib0090_read_reg(state, 0x0b) & 0xf83f)
+ | ((tune->lna_tune << 6) & 0x07c0));
+ return 0;
+}
+EXPORT_SYMBOL(dib0090_update_tuning_table_7090);
+
static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tune_state *tune_state)
{
int ret = 0;
@@ -2199,12 +2340,18 @@ static int dib0090_tune(struct dvb_frontend *fe)
if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF
|| state->current_band & BAND_UHF) {
state->current_band = BAND_CBAND;
- tune = dib0090_tuning_table_cband_7090;
+ if (state->config->is_dib7090e)
+ tune = dib0090_tuning_table_cband_7090e_sensitivity;
+ else
+ tune = dib0090_tuning_table_cband_7090;
}
} else { /* Use the CBAND input for all band under UHF */
if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF) {
state->current_band = BAND_CBAND;
- tune = dib0090_tuning_table_cband_7090;
+ if (state->config->is_dib7090e)
+ tune = dib0090_tuning_table_cband_7090e_sensitivity;
+ else
+ tune = dib0090_tuning_table_cband_7090;
}
}
} else
@@ -2419,7 +2566,7 @@ static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency)
return 0;
}
-static int dib0090_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int dib0090_set_params(struct dvb_frontend *fe)
{
struct dib0090_state *state = fe->tuner_priv;
u32 ret;
diff --git a/drivers/media/dvb/frontends/dib0090.h b/drivers/media/dvb/frontends/dib0090.h
index 13d85244ec16..781dc49de45b 100644
--- a/drivers/media/dvb/frontends/dib0090.h
+++ b/drivers/media/dvb/frontends/dib0090.h
@@ -71,6 +71,8 @@ struct dib0090_config {
u8 fref_clock_ratio;
u16 force_cband_input;
struct dib0090_wbd_slope *wbd;
+ u8 is_dib7090e;
+ u8 force_crystal_mode;
};
#if defined(CONFIG_DVB_TUNER_DIB0090) || (defined(CONFIG_DVB_TUNER_DIB0090_MODULE) && defined(MODULE))
@@ -78,13 +80,21 @@ extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c
extern struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
extern void dib0090_pwm_gain_reset(struct dvb_frontend *fe);
-extern u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner);
+extern u16 dib0090_get_wbd_target(struct dvb_frontend *tuner);
+extern u16 dib0090_get_wbd_offset(struct dvb_frontend *fe);
extern int dib0090_gain_control(struct dvb_frontend *fe);
extern enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe);
extern int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state);
extern void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt);
+extern void dib0090_set_dc_servo(struct dvb_frontend *fe, u8 DC_servo_cutoff);
+extern int dib0090_set_switch(struct dvb_frontend *fe, u8 sw1, u8 sw2, u8 sw3);
+extern int dib0090_set_vga(struct dvb_frontend *fe, u8 onoff);
+extern int dib0090_update_rframp_7090(struct dvb_frontend *fe,
+ u8 cfg_sensitivity);
+extern int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
+ u8 cfg_sensitivity);
#else
-static inline struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0090_config *config)
+static inline struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
@@ -106,7 +116,13 @@ static inline void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
}
-static inline u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner)
+static inline u16 dib0090_get_wbd_target(struct dvb_frontend *tuner)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
+
+static inline u16 dib0090_get_wbd_offset(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return 0;
@@ -134,6 +150,38 @@ static inline void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
}
+
+static inline void dib0090_set_dc_servo(struct dvb_frontend *fe, u8 DC_servo_cutoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+}
+
+static inline int dib0090_set_switch(struct dvb_frontend *fe,
+ u8 sw1, u8 sw2, u8 sw3)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib0090_set_vga(struct dvb_frontend *fe, u8 onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib0090_update_rframp_7090(struct dvb_frontend *fe,
+ u8 cfg_sensitivity)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
+ u8 cfg_sensitivity)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
#endif
#endif
diff --git a/drivers/media/dvb/frontends/dib3000mb.c b/drivers/media/dvb/frontends/dib3000mb.c
index 437904cbf3e6..af91e0c92339 100644
--- a/drivers/media/dvb/frontends/dib3000mb.c
+++ b/drivers/media/dvb/frontends/dib3000mb.c
@@ -112,39 +112,37 @@ static u16 dib3000_seq[2][2][2] = /* fft,gua, inv */
}
};
-static int dib3000mb_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep);
+static int dib3000mb_get_frontend(struct dvb_frontend* fe);
-static int dib3000mb_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep, int tuner)
+static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
{
struct dib3000_state* state = fe->demodulator_priv;
- struct dvb_ofdm_parameters *ofdm = &fep->u.ofdm;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
fe_code_rate_t fe_cr = FEC_NONE;
int search_state, seq;
if (tuner && fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, fep);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
deb_setf("bandwidth: ");
- switch (ofdm->bandwidth) {
- case BANDWIDTH_8_MHZ:
+ switch (c->bandwidth_hz) {
+ case 8000000:
deb_setf("8 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz);
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
deb_setf("7 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[1]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_7mhz);
break;
- case BANDWIDTH_6_MHZ:
+ case 6000000:
deb_setf("6 MHz\n");
wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[0]);
wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_6mhz);
break;
- case BANDWIDTH_AUTO:
+ case 0:
return -EOPNOTSUPP;
default:
err("unknown bandwidth value.");
@@ -154,7 +152,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4);
deb_setf("transmission mode: ");
- switch (ofdm->transmission_mode) {
+ switch (c->transmission_mode) {
case TRANSMISSION_MODE_2K:
deb_setf("2k\n");
wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K);
@@ -171,7 +169,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
}
deb_setf("guard: ");
- switch (ofdm->guard_interval) {
+ switch (c->guard_interval) {
case GUARD_INTERVAL_1_32:
deb_setf("1_32\n");
wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32);
@@ -196,7 +194,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
}
deb_setf("inversion: ");
- switch (fep->inversion) {
+ switch (c->inversion) {
case INVERSION_OFF:
deb_setf("off\n");
wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF);
@@ -212,8 +210,8 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
return -EINVAL;
}
- deb_setf("constellation: ");
- switch (ofdm->constellation) {
+ deb_setf("modulation: ");
+ switch (c->modulation) {
case QPSK:
deb_setf("qpsk\n");
wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK);
@@ -232,7 +230,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
return -EINVAL;
}
deb_setf("hierarchy: ");
- switch (ofdm->hierarchy_information) {
+ switch (c->hierarchy) {
case HIERARCHY_NONE:
deb_setf("none ");
/* fall through */
@@ -256,16 +254,16 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
}
deb_setf("hierarchy: ");
- if (ofdm->hierarchy_information == HIERARCHY_NONE) {
+ if (c->hierarchy == HIERARCHY_NONE) {
deb_setf("none\n");
wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_OFF);
wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_HP);
- fe_cr = ofdm->code_rate_HP;
- } else if (ofdm->hierarchy_information != HIERARCHY_AUTO) {
+ fe_cr = c->code_rate_HP;
+ } else if (c->hierarchy != HIERARCHY_AUTO) {
deb_setf("on\n");
wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_ON);
wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_LP);
- fe_cr = ofdm->code_rate_LP;
+ fe_cr = c->code_rate_LP;
}
deb_setf("fec: ");
switch (fe_cr) {
@@ -300,9 +298,9 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
}
seq = dib3000_seq
- [ofdm->transmission_mode == TRANSMISSION_MODE_AUTO]
- [ofdm->guard_interval == GUARD_INTERVAL_AUTO]
- [fep->inversion == INVERSION_AUTO];
+ [c->transmission_mode == TRANSMISSION_MODE_AUTO]
+ [c->guard_interval == GUARD_INTERVAL_AUTO]
+ [c->inversion == INVERSION_AUTO];
deb_setf("seq? %d\n", seq);
@@ -310,8 +308,8 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
wr(DIB3000MB_REG_ISI, seq ? DIB3000MB_ISI_INHIBIT : DIB3000MB_ISI_ACTIVATE);
- if (ofdm->transmission_mode == TRANSMISSION_MODE_2K) {
- if (ofdm->guard_interval == GUARD_INTERVAL_1_8) {
+ if (c->transmission_mode == TRANSMISSION_MODE_2K) {
+ if (c->guard_interval == GUARD_INTERVAL_1_8) {
wr(DIB3000MB_REG_SYNC_IMPROVEMENT, DIB3000MB_SYNC_IMPROVE_2K_1_8);
} else {
wr(DIB3000MB_REG_SYNC_IMPROVEMENT, DIB3000MB_SYNC_IMPROVE_DEFAULT);
@@ -339,10 +337,10 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_low);
/* something has to be auto searched */
- if (ofdm->constellation == QAM_AUTO ||
- ofdm->hierarchy_information == HIERARCHY_AUTO ||
+ if (c->modulation == QAM_AUTO ||
+ c->hierarchy == HIERARCHY_AUTO ||
fe_cr == FEC_AUTO ||
- fep->inversion == INVERSION_AUTO) {
+ c->inversion == INVERSION_AUTO) {
int as_count=0;
deb_setf("autosearch enabled.\n");
@@ -361,10 +359,9 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe,
deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count);
if (search_state == 1) {
- struct dvb_frontend_parameters feps;
- if (dib3000mb_get_frontend(fe, &feps) == 0) {
+ if (dib3000mb_get_frontend(fe) == 0) {
deb_setf("reading tuning data from frontend succeeded.\n");
- return dib3000mb_set_frontend(fe, &feps, 0);
+ return dib3000mb_set_frontend(fe, 0);
}
}
@@ -453,11 +450,10 @@ static int dib3000mb_fe_init(struct dvb_frontend* fe, int mobile_mode)
return 0;
}
-static int dib3000mb_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib3000mb_get_frontend(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dib3000_state* state = fe->demodulator_priv;
- struct dvb_ofdm_parameters *ofdm = &fep->u.ofdm;
fe_code_rate_t *cr;
u16 tps_val;
int inv_test1,inv_test2;
@@ -484,25 +480,25 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
else
inv_test2 = 2;
- fep->inversion =
+ c->inversion =
((inv_test2 == 2) && (inv_test1==1 || inv_test1==0)) ||
((inv_test2 == 0) && (inv_test1==1 || inv_test1==2)) ?
INVERSION_ON : INVERSION_OFF;
- deb_getf("inversion %d %d, %d\n", inv_test2, inv_test1, fep->inversion);
+ deb_getf("inversion %d %d, %d\n", inv_test2, inv_test1, c->inversion);
switch ((tps_val = rd(DIB3000MB_REG_TPS_QAM))) {
case DIB3000_CONSTELLATION_QPSK:
deb_getf("QPSK ");
- ofdm->constellation = QPSK;
+ c->modulation = QPSK;
break;
case DIB3000_CONSTELLATION_16QAM:
deb_getf("QAM16 ");
- ofdm->constellation = QAM_16;
+ c->modulation = QAM_16;
break;
case DIB3000_CONSTELLATION_64QAM:
deb_getf("QAM64 ");
- ofdm->constellation = QAM_64;
+ c->modulation = QAM_64;
break;
default:
err("Unexpected constellation returned by TPS (%d)", tps_val);
@@ -512,24 +508,24 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
if (rd(DIB3000MB_REG_TPS_HRCH)) {
deb_getf("HRCH ON\n");
- cr = &ofdm->code_rate_LP;
- ofdm->code_rate_HP = FEC_NONE;
+ cr = &c->code_rate_LP;
+ c->code_rate_HP = FEC_NONE;
switch ((tps_val = rd(DIB3000MB_REG_TPS_VIT_ALPHA))) {
case DIB3000_ALPHA_0:
deb_getf("HIERARCHY_NONE ");
- ofdm->hierarchy_information = HIERARCHY_NONE;
+ c->hierarchy = HIERARCHY_NONE;
break;
case DIB3000_ALPHA_1:
deb_getf("HIERARCHY_1 ");
- ofdm->hierarchy_information = HIERARCHY_1;
+ c->hierarchy = HIERARCHY_1;
break;
case DIB3000_ALPHA_2:
deb_getf("HIERARCHY_2 ");
- ofdm->hierarchy_information = HIERARCHY_2;
+ c->hierarchy = HIERARCHY_2;
break;
case DIB3000_ALPHA_4:
deb_getf("HIERARCHY_4 ");
- ofdm->hierarchy_information = HIERARCHY_4;
+ c->hierarchy = HIERARCHY_4;
break;
default:
err("Unexpected ALPHA value returned by TPS (%d)", tps_val);
@@ -540,9 +536,9 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
tps_val = rd(DIB3000MB_REG_TPS_CODE_RATE_LP);
} else {
deb_getf("HRCH OFF\n");
- cr = &ofdm->code_rate_HP;
- ofdm->code_rate_LP = FEC_NONE;
- ofdm->hierarchy_information = HIERARCHY_NONE;
+ cr = &c->code_rate_HP;
+ c->code_rate_LP = FEC_NONE;
+ c->hierarchy = HIERARCHY_NONE;
tps_val = rd(DIB3000MB_REG_TPS_CODE_RATE_HP);
}
@@ -577,19 +573,19 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
switch ((tps_val = rd(DIB3000MB_REG_TPS_GUARD_TIME))) {
case DIB3000_GUARD_TIME_1_32:
deb_getf("GUARD_INTERVAL_1_32 ");
- ofdm->guard_interval = GUARD_INTERVAL_1_32;
+ c->guard_interval = GUARD_INTERVAL_1_32;
break;
case DIB3000_GUARD_TIME_1_16:
deb_getf("GUARD_INTERVAL_1_16 ");
- ofdm->guard_interval = GUARD_INTERVAL_1_16;
+ c->guard_interval = GUARD_INTERVAL_1_16;
break;
case DIB3000_GUARD_TIME_1_8:
deb_getf("GUARD_INTERVAL_1_8 ");
- ofdm->guard_interval = GUARD_INTERVAL_1_8;
+ c->guard_interval = GUARD_INTERVAL_1_8;
break;
case DIB3000_GUARD_TIME_1_4:
deb_getf("GUARD_INTERVAL_1_4 ");
- ofdm->guard_interval = GUARD_INTERVAL_1_4;
+ c->guard_interval = GUARD_INTERVAL_1_4;
break;
default:
err("Unexpected Guard Time returned by TPS (%d)", tps_val);
@@ -600,11 +596,11 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
switch ((tps_val = rd(DIB3000MB_REG_TPS_FFT))) {
case DIB3000_TRANSMISSION_MODE_2K:
deb_getf("TRANSMISSION_MODE_2K ");
- ofdm->transmission_mode = TRANSMISSION_MODE_2K;
+ c->transmission_mode = TRANSMISSION_MODE_2K;
break;
case DIB3000_TRANSMISSION_MODE_8K:
deb_getf("TRANSMISSION_MODE_8K ");
- ofdm->transmission_mode = TRANSMISSION_MODE_8K;
+ c->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
err("unexpected transmission mode return by TPS (%d)", tps_val);
@@ -701,9 +697,9 @@ static int dib3000mb_fe_init_nonmobile(struct dvb_frontend* fe)
return dib3000mb_fe_init(fe, 0);
}
-static int dib3000mb_set_frontend_and_tuner(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep)
+static int dib3000mb_set_frontend_and_tuner(struct dvb_frontend *fe)
{
- return dib3000mb_set_frontend(fe, fep, 1);
+ return dib3000mb_set_frontend(fe, 1);
}
static void dib3000mb_release(struct dvb_frontend* fe)
@@ -794,10 +790,9 @@ error:
}
static struct dvb_frontend_ops dib3000mb_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000M-B DVB-T",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/dib3000mb_priv.h b/drivers/media/dvb/frontends/dib3000mb_priv.h
index 16c526591f36..9dc235aa44b7 100644
--- a/drivers/media/dvb/frontends/dib3000mb_priv.h
+++ b/drivers/media/dvb/frontends/dib3000mb_priv.h
@@ -98,7 +98,7 @@ struct dib3000_state {
int timing_offset;
int timing_offset_comp_done;
- fe_bandwidth_t last_tuned_bw;
+ u32 last_tuned_bw;
u32 last_tuned_freq;
};
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index 088e7fadbe3d..ffad181a9692 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -40,7 +40,7 @@ struct dib3000mc_state {
u32 timf;
- fe_bandwidth_t current_bandwidth;
+ u32 current_bandwidth;
u16 dev_id;
@@ -438,11 +438,14 @@ static void dib3000mc_set_adp_cfg(struct dib3000mc_state *state, s16 qam)
dib3000mc_write_word(state, reg, cfg[reg - 129]);
}
-static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dvb_frontend_parameters *ch, u16 seq)
+static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state,
+ struct dtv_frontend_properties *ch, u16 seq)
{
u16 value;
- dib3000mc_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
- dib3000mc_set_timing(state, ch->u.ofdm.transmission_mode, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth), 0);
+ u32 bw = BANDWIDTH_TO_KHZ(ch->bandwidth_hz);
+
+ dib3000mc_set_bandwidth(state, bw);
+ dib3000mc_set_timing(state, ch->transmission_mode, bw, 0);
// if (boost)
// dib3000mc_write_word(state, 100, (11 << 6) + 6);
@@ -471,22 +474,22 @@ static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dvb_
dib3000mc_write_word(state, 97,0);
dib3000mc_write_word(state, 98,0);
- dib3000mc_set_impulse_noise(state, 0, ch->u.ofdm.transmission_mode);
+ dib3000mc_set_impulse_noise(state, 0, ch->transmission_mode);
value = 0;
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= (0 << 7); break;
default:
case TRANSMISSION_MODE_8K: value |= (1 << 7); break;
}
- switch (ch->u.ofdm.guard_interval) {
+ switch (ch->guard_interval) {
case GUARD_INTERVAL_1_32: value |= (0 << 5); break;
case GUARD_INTERVAL_1_16: value |= (1 << 5); break;
case GUARD_INTERVAL_1_4: value |= (3 << 5); break;
default:
case GUARD_INTERVAL_1_8: value |= (2 << 5); break;
}
- switch (ch->u.ofdm.constellation) {
+ switch (ch->modulation) {
case QPSK: value |= (0 << 3); break;
case QAM_16: value |= (1 << 3); break;
default:
@@ -502,11 +505,11 @@ static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dvb_
dib3000mc_write_word(state, 5, (1 << 8) | ((seq & 0xf) << 4));
value = 0;
- if (ch->u.ofdm.hierarchy_information == 1)
+ if (ch->hierarchy == 1)
value |= (1 << 4);
if (1 == 1)
value |= 1;
- switch ((ch->u.ofdm.hierarchy_information == 0 || 1 == 1) ? ch->u.ofdm.code_rate_HP : ch->u.ofdm.code_rate_LP) {
+ switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) {
case FEC_2_3: value |= (2 << 1); break;
case FEC_3_4: value |= (3 << 1); break;
case FEC_5_6: value |= (5 << 1); break;
@@ -517,12 +520,12 @@ static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dvb_
dib3000mc_write_word(state, 181, value);
// diversity synchro delay add 50% SFN margin
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_8K: value = 256; break;
case TRANSMISSION_MODE_2K:
default: value = 64; break;
}
- switch (ch->u.ofdm.guard_interval) {
+ switch (ch->guard_interval) {
case GUARD_INTERVAL_1_16: value *= 2; break;
case GUARD_INTERVAL_1_8: value *= 4; break;
case GUARD_INTERVAL_1_4: value *= 8; break;
@@ -540,27 +543,28 @@ static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dvb_
msleep(30);
- dib3000mc_set_impulse_noise(state, state->cfg->impulse_noise_mode, ch->u.ofdm.transmission_mode);
+ dib3000mc_set_impulse_noise(state, state->cfg->impulse_noise_mode, ch->transmission_mode);
}
-static int dib3000mc_autosearch_start(struct dvb_frontend *demod, struct dvb_frontend_parameters *chan)
+static int dib3000mc_autosearch_start(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *chan = &demod->dtv_property_cache;
struct dib3000mc_state *state = demod->demodulator_priv;
u16 reg;
// u32 val;
- struct dvb_frontend_parameters schan;
+ struct dtv_frontend_properties schan;
schan = *chan;
/* TODO what is that ? */
/* a channel for autosearch */
- schan.u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
- schan.u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
- schan.u.ofdm.constellation = QAM_64;
- schan.u.ofdm.code_rate_HP = FEC_2_3;
- schan.u.ofdm.code_rate_LP = FEC_2_3;
- schan.u.ofdm.hierarchy_information = 0;
+ schan.transmission_mode = TRANSMISSION_MODE_8K;
+ schan.guard_interval = GUARD_INTERVAL_1_32;
+ schan.modulation = QAM_64;
+ schan.code_rate_HP = FEC_2_3;
+ schan.code_rate_LP = FEC_2_3;
+ schan.hierarchy = 0;
dib3000mc_set_channel_cfg(state, &schan, 11);
@@ -586,8 +590,9 @@ static int dib3000mc_autosearch_is_irq(struct dvb_frontend *demod)
return 0; // still pending
}
-static int dib3000mc_tune(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib3000mc_tune(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib3000mc_state *state = demod->demodulator_priv;
// ** configure demod **
@@ -603,8 +608,8 @@ static int dib3000mc_tune(struct dvb_frontend *demod, struct dvb_frontend_parame
dib3000mc_write_word(state, 108, 0x0000); // P_pha3_force_pha_shift
}
- dib3000mc_set_adp_cfg(state, (u8)ch->u.ofdm.constellation);
- if (ch->u.ofdm.transmission_mode == TRANSMISSION_MODE_8K) {
+ dib3000mc_set_adp_cfg(state, (u8)ch->modulation);
+ if (ch->transmission_mode == TRANSMISSION_MODE_8K) {
dib3000mc_write_word(state, 26, 38528);
dib3000mc_write_word(state, 33, 8);
} else {
@@ -613,7 +618,8 @@ static int dib3000mc_tune(struct dvb_frontend *demod, struct dvb_frontend_parame
}
if (dib3000mc_read_word(state, 509) & 0x80)
- dib3000mc_set_timing(state, ch->u.ofdm.transmission_mode, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth), 1);
+ dib3000mc_set_timing(state, ch->transmission_mode,
+ BANDWIDTH_TO_KHZ(ch->bandwidth_hz), 1);
return 0;
}
@@ -626,87 +632,87 @@ struct i2c_adapter * dib3000mc_get_tuner_i2c_master(struct dvb_frontend *demod,
EXPORT_SYMBOL(dib3000mc_get_tuner_i2c_master);
-static int dib3000mc_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib3000mc_get_frontend(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib3000mc_state *state = fe->demodulator_priv;
u16 tps = dib3000mc_read_word(state,458);
fep->inversion = INVERSION_AUTO;
- fep->u.ofdm.bandwidth = state->current_bandwidth;
+ fep->bandwidth_hz = state->current_bandwidth;
switch ((tps >> 8) & 0x1) {
- case 0: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; break;
- case 1: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; break;
+ case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break;
+ case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break;
}
switch (tps & 0x3) {
- case 0: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break;
- case 1: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break;
- case 2: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break;
- case 3: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break;
+ case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break;
+ case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break;
+ case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break;
+ case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break;
}
switch ((tps >> 13) & 0x3) {
- case 0: fep->u.ofdm.constellation = QPSK; break;
- case 1: fep->u.ofdm.constellation = QAM_16; break;
+ case 0: fep->modulation = QPSK; break;
+ case 1: fep->modulation = QAM_16; break;
case 2:
- default: fep->u.ofdm.constellation = QAM_64; break;
+ default: fep->modulation = QAM_64; break;
}
/* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
/* (tps >> 12) & 0x1 == hrch is used, (tps >> 9) & 0x7 == alpha */
- fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ fep->hierarchy = HIERARCHY_NONE;
switch ((tps >> 5) & 0x7) {
- case 1: fep->u.ofdm.code_rate_HP = FEC_1_2; break;
- case 2: fep->u.ofdm.code_rate_HP = FEC_2_3; break;
- case 3: fep->u.ofdm.code_rate_HP = FEC_3_4; break;
- case 5: fep->u.ofdm.code_rate_HP = FEC_5_6; break;
+ case 1: fep->code_rate_HP = FEC_1_2; break;
+ case 2: fep->code_rate_HP = FEC_2_3; break;
+ case 3: fep->code_rate_HP = FEC_3_4; break;
+ case 5: fep->code_rate_HP = FEC_5_6; break;
case 7:
- default: fep->u.ofdm.code_rate_HP = FEC_7_8; break;
+ default: fep->code_rate_HP = FEC_7_8; break;
}
switch ((tps >> 2) & 0x7) {
- case 1: fep->u.ofdm.code_rate_LP = FEC_1_2; break;
- case 2: fep->u.ofdm.code_rate_LP = FEC_2_3; break;
- case 3: fep->u.ofdm.code_rate_LP = FEC_3_4; break;
- case 5: fep->u.ofdm.code_rate_LP = FEC_5_6; break;
+ case 1: fep->code_rate_LP = FEC_1_2; break;
+ case 2: fep->code_rate_LP = FEC_2_3; break;
+ case 3: fep->code_rate_LP = FEC_3_4; break;
+ case 5: fep->code_rate_LP = FEC_5_6; break;
case 7:
- default: fep->u.ofdm.code_rate_LP = FEC_7_8; break;
+ default: fep->code_rate_LP = FEC_7_8; break;
}
return 0;
}
-static int dib3000mc_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib3000mc_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib3000mc_state *state = fe->demodulator_priv;
- int ret;
+ int ret;
dib3000mc_set_output_mode(state, OUTMODE_HIGH_Z);
- state->current_bandwidth = fep->u.ofdm.bandwidth;
- dib3000mc_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->u.ofdm.bandwidth));
+ state->current_bandwidth = fep->bandwidth_hz;
+ dib3000mc_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz));
/* maybe the parameter has been changed */
state->sfn_workaround_active = buggy_sfn_workaround;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, fep);
+ fe->ops.tuner_ops.set_params(fe);
msleep(100);
}
- if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
- fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ||
- fep->u.ofdm.constellation == QAM_AUTO ||
- fep->u.ofdm.code_rate_HP == FEC_AUTO) {
+ if (fep->transmission_mode == TRANSMISSION_MODE_AUTO ||
+ fep->guard_interval == GUARD_INTERVAL_AUTO ||
+ fep->modulation == QAM_AUTO ||
+ fep->code_rate_HP == FEC_AUTO) {
int i = 1000, found;
- dib3000mc_autosearch_start(fe, fep);
+ dib3000mc_autosearch_start(fe);
do {
msleep(1);
found = dib3000mc_autosearch_is_irq(fe);
@@ -716,14 +722,14 @@ static int dib3000mc_set_frontend(struct dvb_frontend* fe,
if (found == 0 || found == 1)
return 0; // no channel found
- dib3000mc_get_frontend(fe, fep);
+ dib3000mc_get_frontend(fe);
}
- ret = dib3000mc_tune(fe, fep);
+ ret = dib3000mc_tune(fe);
/* make this a config parameter */
dib3000mc_set_output_mode(state, OUTMODE_MPEG2_FIFO);
- return ret;
+ return ret;
}
static int dib3000mc_read_status(struct dvb_frontend *fe, fe_status_t *stat)
@@ -897,9 +903,9 @@ error:
EXPORT_SYMBOL(dib3000mc_attach);
static struct dvb_frontend_ops dib3000mc_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 3000MC/P",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index dbb76d75c932..148bf79236fb 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -38,7 +38,7 @@ struct dib7000m_state {
u16 wbd_ref;
u8 current_band;
- fe_bandwidth_t current_bandwidth;
+ u32 current_bandwidth;
struct dibx000_agc_config *current_agc;
u32 timf;
u32 timf_default;
@@ -313,6 +313,9 @@ static int dib7000m_set_bandwidth(struct dib7000m_state *state, u32 bw)
{
u32 timf;
+ if (!bw)
+ bw = 8000;
+
// store the current bandwidth for later use
state->current_bandwidth = bw;
@@ -742,8 +745,9 @@ static void dib7000m_update_timf(struct dib7000m_state *state)
dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->timf_default);
}
-static int dib7000m_agc_startup(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib7000m_agc_startup(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000m_state *state = demod->demodulator_priv;
u16 cfg_72 = dib7000m_read_word(state, 72);
int ret = -1;
@@ -832,28 +836,29 @@ static int dib7000m_agc_startup(struct dvb_frontend *demod, struct dvb_frontend_
return ret;
}
-static void dib7000m_set_channel(struct dib7000m_state *state, struct dvb_frontend_parameters *ch, u8 seq)
+static void dib7000m_set_channel(struct dib7000m_state *state, struct dtv_frontend_properties *ch,
+ u8 seq)
{
u16 value, est[4];
- dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
/* nfft, guard, qam, alpha */
value = 0;
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= (0 << 7); break;
case TRANSMISSION_MODE_4K: value |= (2 << 7); break;
default:
case TRANSMISSION_MODE_8K: value |= (1 << 7); break;
}
- switch (ch->u.ofdm.guard_interval) {
+ switch (ch->guard_interval) {
case GUARD_INTERVAL_1_32: value |= (0 << 5); break;
case GUARD_INTERVAL_1_16: value |= (1 << 5); break;
case GUARD_INTERVAL_1_4: value |= (3 << 5); break;
default:
case GUARD_INTERVAL_1_8: value |= (2 << 5); break;
}
- switch (ch->u.ofdm.constellation) {
+ switch (ch->modulation) {
case QPSK: value |= (0 << 3); break;
case QAM_16: value |= (1 << 3); break;
default:
@@ -872,11 +877,11 @@ static void dib7000m_set_channel(struct dib7000m_state *state, struct dvb_fronte
value = 0;
if (1 != 0)
value |= (1 << 6);
- if (ch->u.ofdm.hierarchy_information == 1)
+ if (ch->hierarchy == 1)
value |= (1 << 4);
if (1 == 1)
value |= 1;
- switch ((ch->u.ofdm.hierarchy_information == 0 || 1 == 1) ? ch->u.ofdm.code_rate_HP : ch->u.ofdm.code_rate_LP) {
+ switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) {
case FEC_2_3: value |= (2 << 1); break;
case FEC_3_4: value |= (3 << 1); break;
case FEC_5_6: value |= (5 << 1); break;
@@ -901,13 +906,13 @@ static void dib7000m_set_channel(struct dib7000m_state *state, struct dvb_fronte
dib7000m_write_word(state, 33, (0 << 4) | 0x5);
/* P_dvsy_sync_wait */
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_8K: value = 256; break;
case TRANSMISSION_MODE_4K: value = 128; break;
case TRANSMISSION_MODE_2K:
default: value = 64; break;
}
- switch (ch->u.ofdm.guard_interval) {
+ switch (ch->guard_interval) {
case GUARD_INTERVAL_1_16: value *= 2; break;
case GUARD_INTERVAL_1_8: value *= 4; break;
case GUARD_INTERVAL_1_4: value *= 8; break;
@@ -925,7 +930,7 @@ static void dib7000m_set_channel(struct dib7000m_state *state, struct dvb_fronte
dib7000m_set_diversity_in(&state->demod, state->div_state);
/* channel estimation fine configuration */
- switch (ch->u.ofdm.constellation) {
+ switch (ch->modulation) {
case QAM_64:
est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
@@ -952,25 +957,26 @@ static void dib7000m_set_channel(struct dib7000m_state *state, struct dvb_fronte
dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD);
}
-static int dib7000m_autosearch_start(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib7000m_autosearch_start(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000m_state *state = demod->demodulator_priv;
- struct dvb_frontend_parameters schan;
+ struct dtv_frontend_properties schan;
int ret = 0;
u32 value, factor;
schan = *ch;
- schan.u.ofdm.constellation = QAM_64;
- schan.u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
- schan.u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
- schan.u.ofdm.code_rate_HP = FEC_2_3;
- schan.u.ofdm.code_rate_LP = FEC_3_4;
- schan.u.ofdm.hierarchy_information = 0;
+ schan.modulation = QAM_64;
+ schan.guard_interval = GUARD_INTERVAL_1_32;
+ schan.transmission_mode = TRANSMISSION_MODE_8K;
+ schan.code_rate_HP = FEC_2_3;
+ schan.code_rate_LP = FEC_3_4;
+ schan.hierarchy = 0;
dib7000m_set_channel(state, &schan, 7);
- factor = BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth);
+ factor = BANDWIDTH_TO_KHZ(schan.bandwidth_hz);
if (factor >= 5000)
factor = 1;
else
@@ -1027,8 +1033,9 @@ static int dib7000m_autosearch_is_irq(struct dvb_frontend *demod)
return dib7000m_autosearch_irq(state, 537);
}
-static int dib7000m_tune(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib7000m_tune(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000m_state *state = demod->demodulator_priv;
int ret = 0;
u16 value;
@@ -1055,7 +1062,7 @@ static int dib7000m_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
//dump_reg(state);
/* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
value = (6 << 8) | 0x80;
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= (7 << 12); break;
case TRANSMISSION_MODE_4K: value |= (8 << 12); break;
default:
@@ -1065,7 +1072,7 @@ static int dib7000m_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
value = (0 << 4);
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= 0x6; break;
case TRANSMISSION_MODE_4K: value |= 0x7; break;
default:
@@ -1075,7 +1082,7 @@ static int dib7000m_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
value = (0 << 4);
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= 0x6; break;
case TRANSMISSION_MODE_4K: value |= 0x7; break;
default:
@@ -1087,7 +1094,7 @@ static int dib7000m_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
if ((dib7000m_read_word(state, 535) >> 6) & 0x1)
dib7000m_update_timf(state);
- dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
return ret;
}
@@ -1147,57 +1154,57 @@ static int dib7000m_identify(struct dib7000m_state *state)
}
-static int dib7000m_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib7000m_get_frontend(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000m_state *state = fe->demodulator_priv;
u16 tps = dib7000m_read_word(state,480);
fep->inversion = INVERSION_AUTO;
- fep->u.ofdm.bandwidth = state->current_bandwidth;
+ fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth);
switch ((tps >> 8) & 0x3) {
- case 0: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K; break;
- case 1: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K; break;
- /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */
+ case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break;
+ case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break;
+ /* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */
}
switch (tps & 0x3) {
- case 0: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32; break;
- case 1: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16; break;
- case 2: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8; break;
- case 3: fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4; break;
+ case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break;
+ case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break;
+ case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break;
+ case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break;
}
switch ((tps >> 14) & 0x3) {
- case 0: fep->u.ofdm.constellation = QPSK; break;
- case 1: fep->u.ofdm.constellation = QAM_16; break;
+ case 0: fep->modulation = QPSK; break;
+ case 1: fep->modulation = QAM_16; break;
case 2:
- default: fep->u.ofdm.constellation = QAM_64; break;
+ default: fep->modulation = QAM_64; break;
}
/* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
/* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */
- fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ fep->hierarchy = HIERARCHY_NONE;
switch ((tps >> 5) & 0x7) {
- case 1: fep->u.ofdm.code_rate_HP = FEC_1_2; break;
- case 2: fep->u.ofdm.code_rate_HP = FEC_2_3; break;
- case 3: fep->u.ofdm.code_rate_HP = FEC_3_4; break;
- case 5: fep->u.ofdm.code_rate_HP = FEC_5_6; break;
+ case 1: fep->code_rate_HP = FEC_1_2; break;
+ case 2: fep->code_rate_HP = FEC_2_3; break;
+ case 3: fep->code_rate_HP = FEC_3_4; break;
+ case 5: fep->code_rate_HP = FEC_5_6; break;
case 7:
- default: fep->u.ofdm.code_rate_HP = FEC_7_8; break;
+ default: fep->code_rate_HP = FEC_7_8; break;
}
switch ((tps >> 2) & 0x7) {
- case 1: fep->u.ofdm.code_rate_LP = FEC_1_2; break;
- case 2: fep->u.ofdm.code_rate_LP = FEC_2_3; break;
- case 3: fep->u.ofdm.code_rate_LP = FEC_3_4; break;
- case 5: fep->u.ofdm.code_rate_LP = FEC_5_6; break;
+ case 1: fep->code_rate_LP = FEC_1_2; break;
+ case 2: fep->code_rate_LP = FEC_2_3; break;
+ case 3: fep->code_rate_LP = FEC_3_4; break;
+ case 5: fep->code_rate_LP = FEC_5_6; break;
case 7:
- default: fep->u.ofdm.code_rate_LP = FEC_7_8; break;
+ default: fep->code_rate_LP = FEC_7_8; break;
}
/* native interleaver: (dib7000m_read_word(state, 481) >> 5) & 0x1 */
@@ -1205,35 +1212,34 @@ static int dib7000m_get_frontend(struct dvb_frontend* fe,
return 0;
}
-static int dib7000m_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fep)
+static int dib7000m_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000m_state *state = fe->demodulator_priv;
int time, ret;
- dib7000m_set_output_mode(state, OUTMODE_HIGH_Z);
+ dib7000m_set_output_mode(state, OUTMODE_HIGH_Z);
- state->current_bandwidth = fep->u.ofdm.bandwidth;
- dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->u.ofdm.bandwidth));
+ dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz));
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, fep);
+ fe->ops.tuner_ops.set_params(fe);
/* start up the AGC */
state->agc_state = 0;
do {
- time = dib7000m_agc_startup(fe, fep);
+ time = dib7000m_agc_startup(fe);
if (time != -1)
msleep(time);
} while (time != -1);
- if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
- fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO ||
- fep->u.ofdm.constellation == QAM_AUTO ||
- fep->u.ofdm.code_rate_HP == FEC_AUTO) {
+ if (fep->transmission_mode == TRANSMISSION_MODE_AUTO ||
+ fep->guard_interval == GUARD_INTERVAL_AUTO ||
+ fep->modulation == QAM_AUTO ||
+ fep->code_rate_HP == FEC_AUTO) {
int i = 800, found;
- dib7000m_autosearch_start(fe, fep);
+ dib7000m_autosearch_start(fe);
do {
msleep(1);
found = dib7000m_autosearch_is_irq(fe);
@@ -1243,10 +1249,10 @@ static int dib7000m_set_frontend(struct dvb_frontend* fe,
if (found == 0 || found == 1)
return 0; // no channel found
- dib7000m_get_frontend(fe, fep);
+ dib7000m_get_frontend(fe);
}
- ret = dib7000m_tune(fe, fep);
+ ret = dib7000m_tune(fe);
/* make this a config parameter */
dib7000m_set_output_mode(state, OUTMODE_MPEG2_FIFO);
@@ -1430,9 +1436,9 @@ error:
EXPORT_SYMBOL(dib7000m_attach);
static struct dvb_frontend_ops dib7000m_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000MA/MB/PA/PB/MC",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index ce8534ff142e..5ceadc285b3a 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -70,6 +70,8 @@ struct dib7000p_state {
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
struct mutex i2c_buffer_lock;
+
+ u8 input_mode_mpeg;
};
enum dib7000p_power_mode {
@@ -78,8 +80,11 @@ enum dib7000p_power_mode {
DIB7000P_POWER_INTERFACE_ONLY,
};
+/* dib7090 specific fonctions */
static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode);
static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
+static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode);
+static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode);
static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
{
@@ -276,17 +281,23 @@ static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_p
dib7000p_write_word(state, 774, reg_774);
dib7000p_write_word(state, 775, reg_775);
dib7000p_write_word(state, 776, reg_776);
- dib7000p_write_word(state, 899, reg_899);
dib7000p_write_word(state, 1280, reg_1280);
+ if (state->version != SOC7090)
+ dib7000p_write_word(state, 899, reg_899);
return 0;
}
static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_adc_states no)
{
- u16 reg_908 = dib7000p_read_word(state, 908), reg_909 = dib7000p_read_word(state, 909);
+ u16 reg_908 = 0, reg_909 = 0;
u16 reg;
+ if (state->version != SOC7090) {
+ reg_908 = dib7000p_read_word(state, 908);
+ reg_909 = dib7000p_read_word(state, 909);
+ }
+
switch (no) {
case DIBX000_SLOW_ADC_ON:
if (state->version == SOC7090) {
@@ -342,8 +353,10 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad
reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4;
reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
- dib7000p_write_word(state, 908, reg_908);
- dib7000p_write_word(state, 909, reg_909);
+ if (state->version != SOC7090) {
+ dib7000p_write_word(state, 908, reg_908);
+ dib7000p_write_word(state, 909, reg_909);
+ }
}
static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
@@ -398,6 +411,24 @@ int dib7000p_set_wbd_ref(struct dvb_frontend *demod, u16 value)
}
EXPORT_SYMBOL(dib7000p_set_wbd_ref);
+int dib7000p_get_agc_values(struct dvb_frontend *fe,
+ u16 *agc_global, u16 *agc1, u16 *agc2, u16 *wbd)
+{
+ struct dib7000p_state *state = fe->demodulator_priv;
+
+ if (agc_global != NULL)
+ *agc_global = dib7000p_read_word(state, 394);
+ if (agc1 != NULL)
+ *agc1 = dib7000p_read_word(state, 392);
+ if (agc2 != NULL)
+ *agc2 = dib7000p_read_word(state, 393);
+ if (wbd != NULL)
+ *wbd = dib7000p_read_word(state, 397);
+
+ return 0;
+}
+EXPORT_SYMBOL(dib7000p_get_agc_values);
+
static void dib7000p_reset_pll(struct dib7000p_state *state)
{
struct dibx000_bandwidth_config *bw = &state->cfg.bw[0];
@@ -519,7 +550,7 @@ static u16 dib7000p_defaults[] = {
// auto search configuration
3, 2,
0x0004,
- 0x1000,
+ (1<<3)|(1<<11)|(1<<12)|(1<<13),
0x0814, /* Equal Lock */
12, 6,
@@ -595,13 +626,6 @@ static u16 dib7000p_defaults[] = {
1, 235,
0x0062,
- 2, 901,
- 0x0006,
- (3 << 10) | (1 << 6),
-
- 1, 905,
- 0x2c8e,
-
0,
};
@@ -618,15 +642,18 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_write_word(state, 770, 0xffff);
dib7000p_write_word(state, 771, 0xffff);
dib7000p_write_word(state, 772, 0x001f);
- dib7000p_write_word(state, 898, 0x0003);
dib7000p_write_word(state, 1280, 0x001f - ((1 << 4) | (1 << 3)));
dib7000p_write_word(state, 770, 0);
dib7000p_write_word(state, 771, 0);
dib7000p_write_word(state, 772, 0);
- dib7000p_write_word(state, 898, 0);
dib7000p_write_word(state, 1280, 0);
+ if (state->version != SOC7090) {
+ dib7000p_write_word(state, 898, 0x0003);
+ dib7000p_write_word(state, 898, 0);
+ }
+
/* default */
dib7000p_reset_pll(state);
@@ -640,7 +667,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_write_word(state, 42, (1<<5) | 3); /* P_iqc_thsat_ipc = 1 ; P_iqc_win2 = 3 */
dib7000p_write_word(state, 43, 0x2d4); /*-300 fag P_iqc_dect_min = -280 */
dib7000p_write_word(state, 44, 300); /* 300 fag P_iqc_dect_min = +280 */
- dib7000p_write_word(state, 273, (1<<6) | 30);
+ dib7000p_write_word(state, 273, (0<<6) | 30);
}
if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
dprintk("OUTPUT_MODE could not be reset.");
@@ -655,7 +682,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
dib7000p_set_bandwidth(state, 8000);
if (state->version == SOC7090) {
- dib7000p_write_word(state, 36, 0x5755);/* P_iqc_impnc_on =1 & P_iqc_corr_inh = 1 for impulsive noise */
+ dib7000p_write_word(state, 36, 0x0755);/* P_iqc_impnc_on =1 & P_iqc_corr_inh = 1 for impulsive noise */
} else {
if (state->cfg.tuner_is_baseband)
dib7000p_write_word(state, 36, 0x0755);
@@ -664,6 +691,11 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
}
dib7000p_write_tab(state, dib7000p_defaults);
+ if (state->version != SOC7090) {
+ dib7000p_write_word(state, 901, 0x0006);
+ dib7000p_write_word(state, 902, (3 << 10) | (1 << 6));
+ dib7000p_write_word(state, 905, 0x2c8e);
+ }
dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
@@ -780,8 +812,9 @@ static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
}
}
-static int dib7000p_agc_startup(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib7000p_agc_startup(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000p_state *state = demod->demodulator_priv;
int ret = -1;
u8 *agc_state = &state->agc_state;
@@ -904,15 +937,16 @@ u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf)
}
EXPORT_SYMBOL(dib7000p_ctrl_timf);
-static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_frontend_parameters *ch, u8 seq)
+static void dib7000p_set_channel(struct dib7000p_state *state,
+ struct dtv_frontend_properties *ch, u8 seq)
{
u16 value, est[4];
- dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
/* nfft, guard, qam, alpha */
value = 0;
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
value |= (0 << 7);
break;
@@ -924,7 +958,7 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
value |= (1 << 7);
break;
}
- switch (ch->u.ofdm.guard_interval) {
+ switch (ch->guard_interval) {
case GUARD_INTERVAL_1_32:
value |= (0 << 5);
break;
@@ -939,7 +973,7 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
value |= (2 << 5);
break;
}
- switch (ch->u.ofdm.constellation) {
+ switch (ch->modulation) {
case QPSK:
value |= (0 << 3);
break;
@@ -970,11 +1004,11 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
value = 0;
if (1 != 0)
value |= (1 << 6);
- if (ch->u.ofdm.hierarchy_information == 1)
+ if (ch->hierarchy == 1)
value |= (1 << 4);
if (1 == 1)
value |= 1;
- switch ((ch->u.ofdm.hierarchy_information == 0 || 1 == 1) ? ch->u.ofdm.code_rate_HP : ch->u.ofdm.code_rate_LP) {
+ switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) {
case FEC_2_3:
value |= (2 << 1);
break;
@@ -1001,7 +1035,7 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
dib7000p_write_word(state, 33, 0x0005);
/* P_dvsy_sync_wait */
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_8K:
value = 256;
break;
@@ -1013,7 +1047,7 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
value = 64;
break;
}
- switch (ch->u.ofdm.guard_interval) {
+ switch (ch->guard_interval) {
case GUARD_INTERVAL_1_16:
value *= 2;
break;
@@ -1034,11 +1068,11 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay;
/* deactive the possibility of diversity reception if extended interleaver */
- state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K;
+ state->div_force_off = !1 && ch->transmission_mode != TRANSMISSION_MODE_8K;
dib7000p_set_diversity_in(&state->demod, state->div_state);
/* channel estimation fine configuration */
- switch (ch->u.ofdm.constellation) {
+ switch (ch->modulation) {
case QAM_64:
est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
@@ -1062,27 +1096,31 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
dib7000p_write_word(state, 187 + value, est[value]);
}
-static int dib7000p_autosearch_start(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib7000p_autosearch_start(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000p_state *state = demod->demodulator_priv;
- struct dvb_frontend_parameters schan;
+ struct dtv_frontend_properties schan;
u32 value, factor;
u32 internal = dib7000p_get_internal_freq(state);
schan = *ch;
- schan.u.ofdm.constellation = QAM_64;
- schan.u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
- schan.u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
- schan.u.ofdm.code_rate_HP = FEC_2_3;
- schan.u.ofdm.code_rate_LP = FEC_3_4;
- schan.u.ofdm.hierarchy_information = 0;
+ schan.modulation = QAM_64;
+ schan.guard_interval = GUARD_INTERVAL_1_32;
+ schan.transmission_mode = TRANSMISSION_MODE_8K;
+ schan.code_rate_HP = FEC_2_3;
+ schan.code_rate_LP = FEC_3_4;
+ schan.hierarchy = 0;
dib7000p_set_channel(state, &schan, 7);
- factor = BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth);
- if (factor >= 5000)
- factor = 1;
- else
+ factor = BANDWIDTH_TO_KHZ(ch->bandwidth_hz);
+ if (factor >= 5000) {
+ if (state->version == SOC7090)
+ factor = 2;
+ else
+ factor = 1;
+ } else
factor = 6;
value = 30 * internal * factor;
@@ -1205,8 +1243,9 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
dib7000p_write_word(state, 143, 0);
}
-static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_parameters *ch)
+static int dib7000p_tune(struct dvb_frontend *demod)
{
+ struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000p_state *state = demod->demodulator_priv;
u16 tmp = 0;
@@ -1239,7 +1278,7 @@ static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
tmp = (6 << 8) | 0x80;
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
tmp |= (2 << 12);
break;
@@ -1255,7 +1294,7 @@ static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
tmp = (0 << 4);
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
tmp |= 0x6;
break;
@@ -1271,7 +1310,7 @@ static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
/* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
tmp = (0 << 4);
- switch (ch->u.ofdm.transmission_mode) {
+ switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K:
tmp |= 0x6;
break;
@@ -1303,9 +1342,9 @@ static int dib7000p_tune(struct dvb_frontend *demod, struct dvb_frontend_paramet
}
if (state->cfg.spur_protect)
- dib7000p_spur_protect(state, ch->frequency / 1000, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000p_spur_protect(state, ch->frequency / 1000, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
- dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->u.ofdm.bandwidth));
+ dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
return 0;
}
@@ -1323,7 +1362,7 @@ static int dib7000p_sleep(struct dvb_frontend *demod)
{
struct dib7000p_state *state = demod->demodulator_priv;
if (state->version == SOC7090)
- return dib7090_set_output_mode(demod, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
+ return dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
return dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY);
}
@@ -1345,93 +1384,94 @@ static int dib7000p_identify(struct dib7000p_state *st)
return 0;
}
-static int dib7000p_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib7000p_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000p_state *state = fe->demodulator_priv;
u16 tps = dib7000p_read_word(state, 463);
fep->inversion = INVERSION_AUTO;
- fep->u.ofdm.bandwidth = BANDWIDTH_TO_INDEX(state->current_bandwidth);
+ fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth);
switch ((tps >> 8) & 0x3) {
case 0:
- fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ fep->transmission_mode = TRANSMISSION_MODE_2K;
break;
case 1:
- fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ fep->transmission_mode = TRANSMISSION_MODE_8K;
break;
- /* case 2: fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_4K; break; */
+ /* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */
}
switch (tps & 0x3) {
case 0:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ fep->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ fep->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ fep->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ fep->guard_interval = GUARD_INTERVAL_1_4;
break;
}
switch ((tps >> 14) & 0x3) {
case 0:
- fep->u.ofdm.constellation = QPSK;
+ fep->modulation = QPSK;
break;
case 1:
- fep->u.ofdm.constellation = QAM_16;
+ fep->modulation = QAM_16;
break;
case 2:
default:
- fep->u.ofdm.constellation = QAM_64;
+ fep->modulation = QAM_64;
break;
}
/* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
/* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */
- fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ fep->hierarchy = HIERARCHY_NONE;
switch ((tps >> 5) & 0x7) {
case 1:
- fep->u.ofdm.code_rate_HP = FEC_1_2;
+ fep->code_rate_HP = FEC_1_2;
break;
case 2:
- fep->u.ofdm.code_rate_HP = FEC_2_3;
+ fep->code_rate_HP = FEC_2_3;
break;
case 3:
- fep->u.ofdm.code_rate_HP = FEC_3_4;
+ fep->code_rate_HP = FEC_3_4;
break;
case 5:
- fep->u.ofdm.code_rate_HP = FEC_5_6;
+ fep->code_rate_HP = FEC_5_6;
break;
case 7:
default:
- fep->u.ofdm.code_rate_HP = FEC_7_8;
+ fep->code_rate_HP = FEC_7_8;
break;
}
switch ((tps >> 2) & 0x7) {
case 1:
- fep->u.ofdm.code_rate_LP = FEC_1_2;
+ fep->code_rate_LP = FEC_1_2;
break;
case 2:
- fep->u.ofdm.code_rate_LP = FEC_2_3;
+ fep->code_rate_LP = FEC_2_3;
break;
case 3:
- fep->u.ofdm.code_rate_LP = FEC_3_4;
+ fep->code_rate_LP = FEC_3_4;
break;
case 5:
- fep->u.ofdm.code_rate_LP = FEC_5_6;
+ fep->code_rate_LP = FEC_5_6;
break;
case 7:
default:
- fep->u.ofdm.code_rate_LP = FEC_7_8;
+ fep->code_rate_LP = FEC_7_8;
break;
}
@@ -1440,36 +1480,36 @@ static int dib7000p_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_pa
return 0;
}
-static int dib7000p_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib7000p_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000p_state *state = fe->demodulator_priv;
int time, ret;
- if (state->version == SOC7090) {
+ if (state->version == SOC7090)
dib7090_set_diversity_in(fe, 0);
- dib7090_set_output_mode(fe, OUTMODE_HIGH_Z);
- } else
+ else
dib7000p_set_output_mode(state, OUTMODE_HIGH_Z);
/* maybe the parameter has been changed */
state->sfn_workaround_active = buggy_sfn_workaround;
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, fep);
+ fe->ops.tuner_ops.set_params(fe);
/* start up the AGC */
state->agc_state = 0;
do {
- time = dib7000p_agc_startup(fe, fep);
+ time = dib7000p_agc_startup(fe);
if (time != -1)
msleep(time);
} while (time != -1);
- if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
- fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) {
+ if (fep->transmission_mode == TRANSMISSION_MODE_AUTO ||
+ fep->guard_interval == GUARD_INTERVAL_AUTO || fep->modulation == QAM_AUTO || fep->code_rate_HP == FEC_AUTO) {
int i = 800, found;
- dib7000p_autosearch_start(fe, fep);
+ dib7000p_autosearch_start(fe);
do {
msleep(1);
found = dib7000p_autosearch_is_irq(fe);
@@ -1479,15 +1519,19 @@ static int dib7000p_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_pa
if (found == 0 || found == 1)
return 0;
- dib7000p_get_frontend(fe, fep);
+ dib7000p_get_frontend(fe);
}
- ret = dib7000p_tune(fe, fep);
+ ret = dib7000p_tune(fe);
/* make this a config parameter */
- if (state->version == SOC7090)
+ if (state->version == SOC7090) {
dib7090_set_output_mode(fe, state->cfg.output_mode);
- else
+ if (state->cfg.enMpegOutput == 0) {
+ dib7090_setDibTxMux(state, MPEG_ON_DIBTX);
+ dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
+ }
+ } else
dib7000p_set_output_mode(state, state->cfg.output_mode);
return ret;
@@ -1831,7 +1875,8 @@ static int w7090p_tuner_rw_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg m
return num;
}
-int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num, u16 apb_address)
+static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num, u16 apb_address)
{
struct dib7000p_state *state = i2c_get_adapdata(i2c_adap);
u16 word;
@@ -1933,10 +1978,10 @@ static int dib7090_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
apb_address = 915;
break;
case 0x27:
- apb_address = 916;
+ apb_address = 917;
break;
case 0x28:
- apb_address = 917;
+ apb_address = 916;
break;
case 0x1d:
i = ((dib7000p_read_word(state, 72) >> 12) & 0x3);
@@ -2031,12 +2076,7 @@ static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32
static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize)
{
- u8 index_buf;
- u16 rx_copy_buf[22];
-
dprintk("Configure DibStream Tx");
- for (index_buf = 0; index_buf < 22; index_buf++)
- rx_copy_buf[index_buf] = dib7000p_read_word(state, 1536+index_buf);
dib7000p_write_word(state, 1615, 1);
dib7000p_write_word(state, 1603, P_Kin);
@@ -2048,9 +2088,6 @@ static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout
dib7000p_write_word(state, 1612, syncSize);
dib7000p_write_word(state, 1615, 0);
- for (index_buf = 0; index_buf < 22; index_buf++)
- dib7000p_write_word(state, 1536+index_buf, rx_copy_buf[index_buf]);
-
return 0;
}
@@ -2077,109 +2114,121 @@ static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout
return 0;
}
-static int dib7090_enDivOnHostBus(struct dib7000p_state *state)
-{
- u16 reg;
-
- dprintk("Enable Diversity on host bus");
- reg = (1 << 8) | (1 << 5);
- dib7000p_write_word(state, 1288, reg);
-
- return dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
-}
-
-static int dib7090_enAdcOnHostBus(struct dib7000p_state *state)
-{
- u16 reg;
-
- dprintk("Enable ADC on host bus");
- reg = (1 << 7) | (1 << 5);
- dib7000p_write_word(state, 1288, reg);
-
- return dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
-}
-
-static int dib7090_enMpegOnHostBus(struct dib7000p_state *state)
+static void dib7090_enMpegMux(struct dib7000p_state *state, int onoff)
{
- u16 reg;
-
- dprintk("Enable Mpeg on host bus");
- reg = (1 << 9) | (1 << 5);
- dib7000p_write_word(state, 1288, reg);
+ u16 reg_1287 = dib7000p_read_word(state, 1287);
- return dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
-}
+ switch (onoff) {
+ case 1:
+ reg_1287 &= ~(1<<7);
+ break;
+ case 0:
+ reg_1287 |= (1<<7);
+ break;
+ }
-static int dib7090_enMpegInput(struct dib7000p_state *state)
-{
- dprintk("Enable Mpeg input");
- return dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0); /*outputRate = 8 */
+ dib7000p_write_word(state, 1287, reg_1287);
}
-static int dib7090_enMpegMux(struct dib7000p_state *state, u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
+static void dib7090_configMpegMux(struct dib7000p_state *state,
+ u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
{
- u16 reg = (1 << 7) | ((pulseWidth & 0x1f) << 2) | ((enSerialMode & 0x1) << 1) | (enSerialClkDiv2 & 0x1);
-
dprintk("Enable Mpeg mux");
- dib7000p_write_word(state, 1287, reg);
- reg &= ~(1 << 7);
- dib7000p_write_word(state, 1287, reg);
+ dib7090_enMpegMux(state, 0);
- reg = (1 << 4);
- dib7000p_write_word(state, 1288, reg);
+ /* If the input mode is MPEG do not divide the serial clock */
+ if ((enSerialMode == 1) && (state->input_mode_mpeg == 1))
+ enSerialClkDiv2 = 0;
- return 0;
+ dib7000p_write_word(state, 1287, ((pulseWidth & 0x1f) << 2)
+ | ((enSerialMode & 0x1) << 1)
+ | (enSerialClkDiv2 & 0x1));
+
+ dib7090_enMpegMux(state, 1);
}
-static int dib7090_disableMpegMux(struct dib7000p_state *state)
+static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode)
{
- u16 reg;
-
- dprintk("Disable Mpeg mux");
- dib7000p_write_word(state, 1288, 0);
-
- reg = dib7000p_read_word(state, 1287);
- reg &= ~(1 << 7);
- dib7000p_write_word(state, 1287, reg);
+ u16 reg_1288 = dib7000p_read_word(state, 1288) & ~(0x7 << 7);
- return 0;
+ switch (mode) {
+ case MPEG_ON_DIBTX:
+ dprintk("SET MPEG ON DIBSTREAM TX");
+ dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
+ reg_1288 |= (1<<9);
+ break;
+ case DIV_ON_DIBTX:
+ dprintk("SET DIV_OUT ON DIBSTREAM TX");
+ dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
+ reg_1288 |= (1<<8);
+ break;
+ case ADC_ON_DIBTX:
+ dprintk("SET ADC_OUT ON DIBSTREAM TX");
+ dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
+ reg_1288 |= (1<<7);
+ break;
+ default:
+ break;
+ }
+ dib7000p_write_word(state, 1288, reg_1288);
}
-static int dib7090_set_input_mode(struct dvb_frontend *fe, int mode)
+static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode)
{
- struct dib7000p_state *state = fe->demodulator_priv;
+ u16 reg_1288 = dib7000p_read_word(state, 1288) & ~(0x7 << 4);
switch (mode) {
- case INPUT_MODE_DIVERSITY:
- dprintk("Enable diversity INPUT");
- dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
+ case DEMOUT_ON_HOSTBUS:
+ dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+ dib7090_enMpegMux(state, 0);
+ reg_1288 |= (1<<6);
+ break;
+ case DIBTX_ON_HOSTBUS:
+ dprintk("SET DIBSTREAM TX ON HOST BUS");
+ dib7090_enMpegMux(state, 0);
+ reg_1288 |= (1<<5);
break;
- case INPUT_MODE_MPEG:
- dprintk("Enable Mpeg INPUT");
- dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0); /*outputRate = 8 */
+ case MPEG_ON_HOSTBUS:
+ dprintk("SET MPEG MUX ON HOST BUS");
+ reg_1288 |= (1<<4);
break;
- case INPUT_MODE_OFF:
default:
- dprintk("Disable INPUT");
- dib7090_cfg_DibRx(state, 0, 0, 0, 0, 0, 0, 0);
break;
}
- return 0;
+ dib7000p_write_word(state, 1288, reg_1288);
}
-static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
+int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
{
+ struct dib7000p_state *state = fe->demodulator_priv;
+ u16 reg_1287;
+
switch (onoff) {
- case 0: /* only use the internal way - not the diversity input */
- dib7090_set_input_mode(fe, INPUT_MODE_MPEG);
- break;
- case 1: /* both ways */
- case 2: /* only the diversity input */
- dib7090_set_input_mode(fe, INPUT_MODE_DIVERSITY);
- break;
+ case 0: /* only use the internal way - not the diversity input */
+ dprintk("%s mode OFF : by default Enable Mpeg INPUT", __func__);
+ dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
+
+ /* Do not divide the serial clock of MPEG MUX */
+ /* in SERIAL MODE in case input mode MPEG is used */
+ reg_1287 = dib7000p_read_word(state, 1287);
+ /* enSerialClkDiv2 == 1 ? */
+ if ((reg_1287 & 0x1) == 1) {
+ /* force enSerialClkDiv2 = 0 */
+ reg_1287 &= ~0x1;
+ dib7000p_write_word(state, 1287, reg_1287);
+ }
+ state->input_mode_mpeg = 1;
+ break;
+ case 1: /* both ways */
+ case 2: /* only the diversity input */
+ dprintk("%s ON : Enable diversity INPUT", __func__);
+ dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
+ state->input_mode_mpeg = 0;
+ break;
}
+ dib7000p_set_diversity_in(&state->demod, onoff);
return 0;
}
@@ -2204,69 +2253,63 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_SERIAL:
if (prefer_mpeg_mux_use) {
- dprintk("Sip 7090P setting output mode TS_SERIAL using Mpeg Mux");
- dib7090_enMpegOnHostBus(state);
- dib7090_enMpegInput(state);
- if (state->cfg.enMpegOutput == 1)
- dib7090_enMpegMux(state, 3, 1, 1);
-
- } else { /* Use Smooth block */
- dprintk("Sip 7090P setting output mode TS_SERIAL using Smooth bloc");
- dib7090_disableMpegMux(state);
- dib7000p_write_word(state, 1288, (1 << 6));
- outreg |= (2 << 6) | (0 << 1);
+ dprintk("setting output mode TS_SERIAL using Mpeg Mux");
+ dib7090_configMpegMux(state, 3, 1, 1);
+ dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
+ } else {/* Use Smooth block */
+ dprintk("setting output mode TS_SERIAL using Smooth bloc");
+ dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
+ outreg |= (2<<6) | (0 << 1);
}
break;
case OUTMODE_MPEG2_PAR_GATED_CLK:
if (prefer_mpeg_mux_use) {
- dprintk("Sip 7090P setting output mode TS_PARALLEL_GATED using Mpeg Mux");
- dib7090_enMpegOnHostBus(state);
- dib7090_enMpegInput(state);
- if (state->cfg.enMpegOutput == 1)
- dib7090_enMpegMux(state, 2, 0, 0);
- } else { /* Use Smooth block */
- dprintk("Sip 7090P setting output mode TS_PARALLEL_GATED using Smooth block");
- dib7090_disableMpegMux(state);
- dib7000p_write_word(state, 1288, (1 << 6));
- outreg |= (0 << 6);
+ dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dib7090_configMpegMux(state, 2, 0, 0);
+ dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
+ } else { /* Use Smooth block */
+ dprintk("setting output mode TS_PARALLEL_GATED using Smooth block");
+ dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
+ outreg |= (0<<6);
}
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
- dprintk("Sip 7090P setting output mode TS_PARALLEL_CONT using Smooth block");
- dib7090_disableMpegMux(state);
- dib7000p_write_word(state, 1288, (1 << 6));
- outreg |= (1 << 6);
+ dprintk("setting output mode TS_PARALLEL_CONT using Smooth block");
+ dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
+ outreg |= (1<<6);
break;
case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
- dprintk("Sip 7090P setting output mode TS_FIFO using Smooth block");
- dib7090_disableMpegMux(state);
- dib7000p_write_word(state, 1288, (1 << 6));
- outreg |= (5 << 6);
+ dprintk("setting output mode TS_FIFO using Smooth block");
+ dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
+ outreg |= (5<<6);
smo_mode |= (3 << 1);
fifo_threshold = 512;
break;
case OUTMODE_DIVERSITY:
- dprintk("Sip 7090P setting output mode MODE_DIVERSITY");
- dib7090_disableMpegMux(state);
- dib7090_enDivOnHostBus(state);
+ dprintk("setting output mode MODE_DIVERSITY");
+ dib7090_setDibTxMux(state, DIV_ON_DIBTX);
+ dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
case OUTMODE_ANALOG_ADC:
- dprintk("Sip 7090P setting output mode MODE_ANALOG_ADC");
- dib7090_enAdcOnHostBus(state);
+ dprintk("setting output mode MODE_ANALOG_ADC");
+ dib7090_setDibTxMux(state, ADC_ON_DIBTX);
+ dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
break;
}
+ if (mode != OUTMODE_HIGH_Z)
+ outreg |= (1 << 10);
if (state->cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
ret |= dib7000p_write_word(state, 235, smo_mode);
ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */
- ret |= dib7000p_write_word(state, 1286, outreg | (1 << 10)); /* allways set Dout active = 1 !!! */
+ ret |= dib7000p_write_word(state, 1286, outreg);
return ret;
}
@@ -2296,13 +2339,6 @@ int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
}
EXPORT_SYMBOL(dib7090_tuner_sleep);
-int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart)
-{
- dprintk("AGC restart callback: %d", restart);
- return 0;
-}
-EXPORT_SYMBOL(dib7090_agc_restart);
-
int dib7090_get_adc_power(struct dvb_frontend *fe)
{
return dib7000p_get_adc_power(fe);
@@ -2391,9 +2427,9 @@ error:
EXPORT_SYMBOL(dib7000p_attach);
static struct dvb_frontend_ops dib7000p_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000PC",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
index 0179f9474bac..b61b03a6e1ed 100644
--- a/drivers/media/dvb/frontends/dib7000p.h
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -56,11 +56,12 @@ extern int dib7000p_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
extern int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
extern int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth_config *bw);
extern u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf);
-extern int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart);
extern int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff);
extern int dib7090_get_adc_power(struct dvb_frontend *fe);
extern struct i2c_adapter *dib7090_get_i2c_tuner(struct dvb_frontend *fe);
extern int dib7090_slave_reset(struct dvb_frontend *fe);
+extern int dib7000p_get_agc_values(struct dvb_frontend *fe,
+ u16 *agc_global, u16 *agc1, u16 *agc2, u16 *wbd);
#else
static inline struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
{
@@ -122,12 +123,6 @@ static inline u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf)
return 0;
}
-static inline int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
-
static inline int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
@@ -151,6 +146,13 @@ static inline int dib7090_slave_reset(struct dvb_frontend *fe)
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
+
+static inline int dib7000p_get_agc_values(struct dvb_frontend *fe,
+ u16 *agc_global, u16 *agc1, u16 *agc2, u16 *wbd)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
#endif
#endif
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index fe284d5292f5..9ca34f495009 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -81,11 +81,15 @@ struct dib8000_state {
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
struct mutex i2c_buffer_lock;
+ u8 input_mode_mpeg;
+
+ u16 tuner_enable;
+ struct i2c_adapter dib8096p_tuner_adap;
};
enum dib8000_power_mode {
- DIB8000M_POWER_ALL = 0,
- DIB8000M_POWER_INTERFACE_ONLY,
+ DIB8000_POWER_ALL = 0,
+ DIB8000_POWER_INTERFACE_ONLY,
};
static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
@@ -428,20 +432,31 @@ static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_pow
/* by default everything is going to be powered off */
u16 reg_774 = 0x3fff, reg_775 = 0xffff, reg_776 = 0xffff,
reg_900 = (dib8000_read_word(state, 900) & 0xfffc) | 0x3,
+ reg_1280;
+
+ if (state->revision != 0x8090)
reg_1280 = (dib8000_read_word(state, 1280) & 0x00ff) | 0xff00;
+ else
+ reg_1280 = (dib8000_read_word(state, 1280) & 0x707f) | 0x8f80;
/* now, depending on the requested mode, we power on */
switch (mode) {
/* power up everything in the demod */
- case DIB8000M_POWER_ALL:
+ case DIB8000_POWER_ALL:
reg_774 = 0x0000;
reg_775 = 0x0000;
reg_776 = 0x0000;
reg_900 &= 0xfffc;
- reg_1280 &= 0x00ff;
+ if (state->revision != 0x8090)
+ reg_1280 &= 0x00ff;
+ else
+ reg_1280 &= 0x707f;
break;
- case DIB8000M_POWER_INTERFACE_ONLY:
- reg_1280 &= 0x00ff;
+ case DIB8000_POWER_INTERFACE_ONLY:
+ if (state->revision != 0x8090)
+ reg_1280 &= 0x00ff;
+ else
+ reg_1280 &= 0xfa7b;
break;
}
@@ -453,19 +468,67 @@ static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_pow
dib8000_write_word(state, 1280, reg_1280);
}
+static int dib8000_init_sdram(struct dib8000_state *state)
+{
+ u16 reg = 0;
+ dprintk("Init sdram");
+
+ reg = dib8000_read_word(state, 274)&0xfff0;
+ /* P_dintlv_delay_ram = 7 because of MobileSdram */
+ dib8000_write_word(state, 274, reg | 0x7);
+
+ dib8000_write_word(state, 1803, (7<<2));
+
+ reg = dib8000_read_word(state, 1280);
+ /* force restart P_restart_sdram */
+ dib8000_write_word(state, 1280, reg | (1<<2));
+
+ /* release restart P_restart_sdram */
+ dib8000_write_word(state, 1280, reg);
+
+ return 0;
+}
+
static int dib8000_set_adc_state(struct dib8000_state *state, enum dibx000_adc_states no)
{
int ret = 0;
- u16 reg_907 = dib8000_read_word(state, 907), reg_908 = dib8000_read_word(state, 908);
+ u16 reg, reg_907 = dib8000_read_word(state, 907);
+ u16 reg_908 = dib8000_read_word(state, 908);
switch (no) {
case DIBX000_SLOW_ADC_ON:
- reg_908 |= (1 << 1) | (1 << 0);
- ret |= dib8000_write_word(state, 908, reg_908);
- reg_908 &= ~(1 << 1);
+ if (state->revision != 0x8090) {
+ reg_908 |= (1 << 1) | (1 << 0);
+ ret |= dib8000_write_word(state, 908, reg_908);
+ reg_908 &= ~(1 << 1);
+ } else {
+ reg = dib8000_read_word(state, 1925);
+ /* en_slowAdc = 1 & reset_sladc = 1 */
+ dib8000_write_word(state, 1925, reg |
+ (1<<4) | (1<<2));
+
+ /* read acces to make it works... strange ... */
+ reg = dib8000_read_word(state, 1925);
+ msleep(20);
+ /* en_slowAdc = 1 & reset_sladc = 0 */
+ dib8000_write_word(state, 1925, reg & ~(1<<4));
+
+ reg = dib8000_read_word(state, 921) & ~((0x3 << 14)
+ | (0x3 << 12));
+ /* ref = Vin1 => Vbg ; sel = Vin0 or Vin3 ;
+ (Vin2 = Vcm) */
+ dib8000_write_word(state, 921, reg | (1 << 14)
+ | (3 << 12));
+ }
break;
case DIBX000_SLOW_ADC_OFF:
+ if (state->revision == 0x8090) {
+ reg = dib8000_read_word(state, 1925);
+ /* reset_sladc = 1 en_slowAdc = 0 */
+ dib8000_write_word(state, 1925,
+ (reg & ~(1<<2)) | (1<<4));
+ }
reg_908 |= (1 << 1) | (1 << 0);
break;
@@ -521,7 +584,12 @@ static int dib8000_set_bandwidth(struct dvb_frontend *fe, u32 bw)
static int dib8000_sad_calib(struct dib8000_state *state)
{
-/* internal */
+ if (state->revision == 0x8090) {
+ dprintk("%s: the sad calibration is not needed for the dib8096P",
+ __func__);
+ return 0;
+ }
+ /* internal */
dib8000_write_word(state, 923, (0 << 1) | (0 << 0));
dib8000_write_word(state, 924, 776); // 0.625*3.3 / 4096
@@ -546,48 +614,129 @@ EXPORT_SYMBOL(dib8000_set_wbd_ref);
static void dib8000_reset_pll_common(struct dib8000_state *state, const struct dibx000_bandwidth_config *bw)
{
dprintk("ifreq: %d %x, inversion: %d", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
- dib8000_write_word(state, 23, (u16) (((bw->internal * 1000) >> 16) & 0xffff)); /* P_sec_len */
- dib8000_write_word(state, 24, (u16) ((bw->internal * 1000) & 0xffff));
+ if (state->revision != 0x8090) {
+ dib8000_write_word(state, 23,
+ (u16) (((bw->internal * 1000) >> 16) & 0xffff));
+ dib8000_write_word(state, 24,
+ (u16) ((bw->internal * 1000) & 0xffff));
+ } else {
+ dib8000_write_word(state, 23, (u16) (((bw->internal / 2 * 1000) >> 16) & 0xffff));
+ dib8000_write_word(state, 24,
+ (u16) ((bw->internal / 2 * 1000) & 0xffff));
+ }
dib8000_write_word(state, 27, (u16) ((bw->ifreq >> 16) & 0x01ff));
dib8000_write_word(state, 28, (u16) (bw->ifreq & 0xffff));
dib8000_write_word(state, 26, (u16) ((bw->ifreq >> 25) & 0x0003));
- dib8000_write_word(state, 922, bw->sad_cfg);
+ if (state->revision != 0x8090)
+ dib8000_write_word(state, 922, bw->sad_cfg);
}
static void dib8000_reset_pll(struct dib8000_state *state)
{
const struct dibx000_bandwidth_config *pll = state->cfg.pll;
- u16 clk_cfg1;
-
- // clk_cfg0
- dib8000_write_word(state, 901, (pll->pll_prediv << 8) | (pll->pll_ratio << 0));
-
- // clk_cfg1
- clk_cfg1 = (1 << 10) | (0 << 9) | (pll->IO_CLK_en_core << 8) |
- (pll->bypclk_div << 5) | (pll->enable_refdiv << 4) | (1 << 3) |
- (pll->pll_range << 1) | (pll->pll_reset << 0);
-
- dib8000_write_word(state, 902, clk_cfg1);
- clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3);
- dib8000_write_word(state, 902, clk_cfg1);
-
- dprintk("clk_cfg1: 0x%04x", clk_cfg1); /* 0x507 1 0 1 000 0 0 11 1 */
-
- /* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */
- if (state->cfg.pll->ADClkSrc == 0)
- dib8000_write_word(state, 904, (0 << 15) | (0 << 12) | (0 << 10) |
- (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1));
- else if (state->cfg.refclksel != 0)
- dib8000_write_word(state, 904, (0 << 15) | (1 << 12) |
- ((state->cfg.refclksel & 0x3) << 10) | (pll->modulo << 8) |
- (pll->ADClkSrc << 7) | (0 << 1));
- else
- dib8000_write_word(state, 904, (0 << 15) | (1 << 12) | (3 << 10) | (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1));
+ u16 clk_cfg1, reg;
+
+ if (state->revision != 0x8090) {
+ dib8000_write_word(state, 901,
+ (pll->pll_prediv << 8) | (pll->pll_ratio << 0));
+
+ clk_cfg1 = (1 << 10) | (0 << 9) | (pll->IO_CLK_en_core << 8) |
+ (pll->bypclk_div << 5) | (pll->enable_refdiv << 4) |
+ (1 << 3) | (pll->pll_range << 1) |
+ (pll->pll_reset << 0);
+
+ dib8000_write_word(state, 902, clk_cfg1);
+ clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3);
+ dib8000_write_word(state, 902, clk_cfg1);
+
+ dprintk("clk_cfg1: 0x%04x", clk_cfg1);
+
+ /* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */
+ if (state->cfg.pll->ADClkSrc == 0)
+ dib8000_write_word(state, 904,
+ (0 << 15) | (0 << 12) | (0 << 10) |
+ (pll->modulo << 8) |
+ (pll->ADClkSrc << 7) | (0 << 1));
+ else if (state->cfg.refclksel != 0)
+ dib8000_write_word(state, 904, (0 << 15) | (1 << 12) |
+ ((state->cfg.refclksel & 0x3) << 10) |
+ (pll->modulo << 8) |
+ (pll->ADClkSrc << 7) | (0 << 1));
+ else
+ dib8000_write_word(state, 904, (0 << 15) | (1 << 12) |
+ (3 << 10) | (pll->modulo << 8) |
+ (pll->ADClkSrc << 7) | (0 << 1));
+ } else {
+ dib8000_write_word(state, 1856, (!pll->pll_reset<<13) |
+ (pll->pll_range<<12) | (pll->pll_ratio<<6) |
+ (pll->pll_prediv));
+
+ reg = dib8000_read_word(state, 1857);
+ dib8000_write_word(state, 1857, reg|(!pll->pll_bypass<<15));
+
+ reg = dib8000_read_word(state, 1858); /* Force clk out pll /2 */
+ dib8000_write_word(state, 1858, reg | 1);
+
+ dib8000_write_word(state, 904, (pll->modulo << 8));
+ }
dib8000_reset_pll_common(state, pll);
}
+int dib8000_update_pll(struct dvb_frontend *fe,
+ struct dibx000_bandwidth_config *pll)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u16 reg_1857, reg_1856 = dib8000_read_word(state, 1856);
+ u8 loopdiv, prediv;
+ u32 internal, xtal;
+
+ /* get back old values */
+ prediv = reg_1856 & 0x3f;
+ loopdiv = (reg_1856 >> 6) & 0x3f;
+
+ if ((pll != NULL) && (pll->pll_prediv != prediv ||
+ pll->pll_ratio != loopdiv)) {
+ dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
+ reg_1856 &= 0xf000;
+ reg_1857 = dib8000_read_word(state, 1857);
+ /* disable PLL */
+ dib8000_write_word(state, 1857, reg_1857 & ~(1 << 15));
+
+ dib8000_write_word(state, 1856, reg_1856 |
+ ((pll->pll_ratio & 0x3f) << 6) |
+ (pll->pll_prediv & 0x3f));
+
+ /* write new system clk into P_sec_len */
+ internal = dib8000_read32(state, 23) / 1000;
+ dprintk("Old Internal = %d", internal);
+ xtal = 2 * (internal / loopdiv) * prediv;
+ internal = 1000 * (xtal/pll->pll_prediv) * pll->pll_ratio;
+ dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d", xtal, internal/1000, internal/2000, internal/8000);
+ dprintk("New Internal = %d", internal);
+
+ dib8000_write_word(state, 23,
+ (u16) (((internal / 2) >> 16) & 0xffff));
+ dib8000_write_word(state, 24, (u16) ((internal / 2) & 0xffff));
+ /* enable PLL */
+ dib8000_write_word(state, 1857, reg_1857 | (1 << 15));
+
+ while (((dib8000_read_word(state, 1856)>>15)&0x1) != 1)
+ dprintk("Waiting for PLL to lock");
+
+ /* verify */
+ reg_1856 = dib8000_read_word(state, 1856);
+ dprintk("PLL Updated with prediv = %d and loopdiv = %d",
+ reg_1856&0x3f, (reg_1856>>6)&0x3f);
+
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(dib8000_update_pll);
+
+
static int dib8000_reset_gpio(struct dib8000_state *st)
{
/* reset the GPIOs */
@@ -721,9 +870,6 @@ static const u16 dib8000_defaults[] = {
(3 << 5) | /* P_ctrl_pre_freq_step=3 */
(1 << 0), /* P_pre_freq_win_len=1 */
- 1, 903,
- (0 << 4) | 2, // P_divclksel=0 P_divbitsel=2 (was clk=3,bit=1 for MPW)
-
0,
};
@@ -740,7 +886,8 @@ static u16 dib8000_identify(struct i2c_device *client)
}
value = dib8000_i2c_read16(client, 897);
- if (value != 0x8000 && value != 0x8001 && value != 0x8002) {
+ if (value != 0x8000 && value != 0x8001 &&
+ value != 0x8002 && value != 0x8090) {
dprintk("wrong Device ID (%x)", value);
return 0;
}
@@ -755,6 +902,9 @@ static u16 dib8000_identify(struct i2c_device *client)
case 0x8002:
dprintk("found DiB8000C");
break;
+ case 0x8090:
+ dprintk("found DiB8096P");
+ break;
}
return value;
}
@@ -763,17 +913,19 @@ static int dib8000_reset(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
- dib8000_write_word(state, 1287, 0x0003); /* sram lead in, rdy */
-
if ((state->revision = dib8000_identify(&state->i2c)) == 0)
return -EINVAL;
+ /* sram lead in, rdy */
+ if (state->revision != 0x8090)
+ dib8000_write_word(state, 1287, 0x0003);
+
if (state->revision == 0x8000)
dprintk("error : dib8000 MA not supported");
dibx000_reset_i2c_master(&state->i2c_master);
- dib8000_set_power_mode(state, DIB8000M_POWER_ALL);
+ dib8000_set_power_mode(state, DIB8000_POWER_ALL);
/* always leave the VBG voltage on - it consumes almost nothing but takes a long time to start */
dib8000_set_adc_state(state, DIBX000_VBG_ENABLE);
@@ -782,8 +934,10 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_write_word(state, 770, 0xffff);
dib8000_write_word(state, 771, 0xffff);
dib8000_write_word(state, 772, 0xfffc);
- dib8000_write_word(state, 898, 0x000c); // sad
- dib8000_write_word(state, 1280, 0x004d);
+ if (state->revision == 0x8090)
+ dib8000_write_word(state, 1280, 0x0045);
+ else
+ dib8000_write_word(state, 1280, 0x004d);
dib8000_write_word(state, 1281, 0x000c);
dib8000_write_word(state, 770, 0x0000);
@@ -794,19 +948,25 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_write_word(state, 1281, 0x0000);
/* drives */
- if (state->cfg.drives)
- dib8000_write_word(state, 906, state->cfg.drives);
- else {
- dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.");
- dib8000_write_word(state, 906, 0x2d98); // min drive SDRAM - not optimal - adjust
+ if (state->revision != 0x8090) {
+ if (state->cfg.drives)
+ dib8000_write_word(state, 906, state->cfg.drives);
+ else {
+ dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.");
+ /* min drive SDRAM - not optimal - adjust */
+ dib8000_write_word(state, 906, 0x2d98);
+ }
}
dib8000_reset_pll(state);
+ if (state->revision != 0x8090)
+ dib8000_write_word(state, 898, 0x0004);
if (dib8000_reset_gpio(state) != 0)
dprintk("GPIO reset was not successful.");
- if (dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0)
+ if ((state->revision != 0x8090) &&
+ (dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0))
dprintk("OUTPUT_MODE could not be resetted.");
state->current_agc = NULL;
@@ -832,6 +992,8 @@ static int dib8000_reset(struct dvb_frontend *fe)
l = *n++;
}
}
+ if (state->revision != 0x8090)
+ dib8000_write_word(state, 903, (0 << 4) | 2);
state->isdbt_cfg_loaded = 0;
//div_cfg override for special configs
@@ -844,10 +1006,12 @@ static int dib8000_reset(struct dvb_frontend *fe)
dib8000_set_bandwidth(fe, 6000);
dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON);
- dib8000_sad_calib(state);
- dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF);
+ if (state->revision != 0x8090) {
+ dib8000_sad_calib(state);
+ dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF);
+ }
- dib8000_set_power_mode(state, DIB8000M_POWER_INTERFACE_ONLY);
+ dib8000_set_power_mode(state, DIB8000_POWER_INTERFACE_ONLY);
return 0;
}
@@ -879,6 +1043,8 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
{
struct dibx000_agc_config *agc = NULL;
int i;
+ u16 reg;
+
if (state->current_band == band && state->current_agc != NULL)
return 0;
state->current_band = band;
@@ -914,6 +1080,12 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
dib8000_write_word(state, 106, state->wbd_ref);
else // use default
dib8000_write_word(state, 106, agc->wbd_ref);
+
+ if (state->revision == 0x8090) {
+ reg = dib8000_read_word(state, 922) & (0x3 << 2);
+ dib8000_write_word(state, 922, reg | (agc->wbd_sel << 2));
+ }
+
dib8000_write_word(state, 107, (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8));
dib8000_write_word(state, 108, agc->agc1_max);
dib8000_write_word(state, 109, agc->agc1_min);
@@ -925,7 +1097,10 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
dib8000_write_word(state, 115, (agc->agc2_slope1 << 8) | agc->agc2_slope2);
dib8000_write_word(state, 75, agc->agc1_pt3);
- dib8000_write_word(state, 923, (dib8000_read_word(state, 923) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2)); /*LB : 929 -> 923 */
+ if (state->revision != 0x8090)
+ dib8000_write_word(state, 923,
+ (dib8000_read_word(state, 923) & 0xffe3) |
+ (agc->wbd_inv << 4) | (agc->wbd_sel << 2));
return 0;
}
@@ -968,14 +1143,30 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
enum frontend_tune_state *tune_state = &state->tune_state;
-
int ret = 0;
+ u16 reg, upd_demod_gain_period = 0x8000;
switch (*tune_state) {
case CT_AGC_START:
// set power-up level: interf+analog+AGC
- dib8000_set_adc_state(state, DIBX000_ADC_ON);
+ if (state->revision != 0x8090)
+ dib8000_set_adc_state(state, DIBX000_ADC_ON);
+ else {
+ dib8000_set_power_mode(state, DIB8000_POWER_ALL);
+
+ reg = dib8000_read_word(state, 1947)&0xff00;
+ dib8000_write_word(state, 1946,
+ upd_demod_gain_period & 0xFFFF);
+ /* bit 14 = enDemodGain */
+ dib8000_write_word(state, 1947, reg | (1<<14) |
+ ((upd_demod_gain_period >> 16) & 0xFF));
+
+ /* enable adc i & q */
+ reg = dib8000_read_word(state, 1920);
+ dib8000_write_word(state, 1920, (reg | 0x3) &
+ (~(1 << 7)));
+ }
if (dib8000_set_agc_config(state, (unsigned char)(BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000))) != 0) {
*tune_state = CT_AGC_STOP;
@@ -1026,6 +1217,579 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
}
+static void dib8096p_host_bus_drive(struct dib8000_state *state, u8 drive)
+{
+ u16 reg;
+
+ drive &= 0x7;
+
+ /* drive host bus 2, 3, 4 */
+ reg = dib8000_read_word(state, 1798) &
+ ~(0x7 | (0x7 << 6) | (0x7 << 12));
+ reg |= (drive<<12) | (drive<<6) | drive;
+ dib8000_write_word(state, 1798, reg);
+
+ /* drive host bus 5,6 */
+ reg = dib8000_read_word(state, 1799) & ~((0x7 << 2) | (0x7 << 8));
+ reg |= (drive<<8) | (drive<<2);
+ dib8000_write_word(state, 1799, reg);
+
+ /* drive host bus 7, 8, 9 */
+ reg = dib8000_read_word(state, 1800) &
+ ~(0x7 | (0x7 << 6) | (0x7 << 12));
+ reg |= (drive<<12) | (drive<<6) | drive;
+ dib8000_write_word(state, 1800, reg);
+
+ /* drive host bus 10, 11 */
+ reg = dib8000_read_word(state, 1801) & ~((0x7 << 2) | (0x7 << 8));
+ reg |= (drive<<8) | (drive<<2);
+ dib8000_write_word(state, 1801, reg);
+
+ /* drive host bus 12, 13, 14 */
+ reg = dib8000_read_word(state, 1802) &
+ ~(0x7 | (0x7 << 6) | (0x7 << 12));
+ reg |= (drive<<12) | (drive<<6) | drive;
+ dib8000_write_word(state, 1802, reg);
+}
+
+static u32 dib8096p_calcSyncFreq(u32 P_Kin, u32 P_Kout,
+ u32 insertExtSynchro, u32 syncSize)
+{
+ u32 quantif = 3;
+ u32 nom = (insertExtSynchro * P_Kin+syncSize);
+ u32 denom = P_Kout;
+ u32 syncFreq = ((nom << quantif) / denom);
+
+ if ((syncFreq & ((1 << quantif) - 1)) != 0)
+ syncFreq = (syncFreq >> quantif) + 1;
+ else
+ syncFreq = (syncFreq >> quantif);
+
+ if (syncFreq != 0)
+ syncFreq = syncFreq - 1;
+
+ return syncFreq;
+}
+
+static void dib8096p_cfg_DibTx(struct dib8000_state *state, u32 P_Kin,
+ u32 P_Kout, u32 insertExtSynchro, u32 synchroMode,
+ u32 syncWord, u32 syncSize)
+{
+ dprintk("Configure DibStream Tx");
+
+ dib8000_write_word(state, 1615, 1);
+ dib8000_write_word(state, 1603, P_Kin);
+ dib8000_write_word(state, 1605, P_Kout);
+ dib8000_write_word(state, 1606, insertExtSynchro);
+ dib8000_write_word(state, 1608, synchroMode);
+ dib8000_write_word(state, 1609, (syncWord >> 16) & 0xffff);
+ dib8000_write_word(state, 1610, syncWord & 0xffff);
+ dib8000_write_word(state, 1612, syncSize);
+ dib8000_write_word(state, 1615, 0);
+}
+
+static void dib8096p_cfg_DibRx(struct dib8000_state *state, u32 P_Kin,
+ u32 P_Kout, u32 synchroMode, u32 insertExtSynchro,
+ u32 syncWord, u32 syncSize, u32 dataOutRate)
+{
+ u32 syncFreq;
+
+ dprintk("Configure DibStream Rx synchroMode = %d", synchroMode);
+
+ if ((P_Kin != 0) && (P_Kout != 0)) {
+ syncFreq = dib8096p_calcSyncFreq(P_Kin, P_Kout,
+ insertExtSynchro, syncSize);
+ dib8000_write_word(state, 1542, syncFreq);
+ }
+
+ dib8000_write_word(state, 1554, 1);
+ dib8000_write_word(state, 1536, P_Kin);
+ dib8000_write_word(state, 1537, P_Kout);
+ dib8000_write_word(state, 1539, synchroMode);
+ dib8000_write_word(state, 1540, (syncWord >> 16) & 0xffff);
+ dib8000_write_word(state, 1541, syncWord & 0xffff);
+ dib8000_write_word(state, 1543, syncSize);
+ dib8000_write_word(state, 1544, dataOutRate);
+ dib8000_write_word(state, 1554, 0);
+}
+
+static void dib8096p_enMpegMux(struct dib8000_state *state, int onoff)
+{
+ u16 reg_1287;
+
+ reg_1287 = dib8000_read_word(state, 1287);
+
+ switch (onoff) {
+ case 1:
+ reg_1287 &= ~(1 << 8);
+ break;
+ case 0:
+ reg_1287 |= (1 << 8);
+ break;
+ }
+
+ dib8000_write_word(state, 1287, reg_1287);
+}
+
+static void dib8096p_configMpegMux(struct dib8000_state *state,
+ u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
+{
+ u16 reg_1287;
+
+ dprintk("Enable Mpeg mux");
+
+ dib8096p_enMpegMux(state, 0);
+
+ /* If the input mode is MPEG do not divide the serial clock */
+ if ((enSerialMode == 1) && (state->input_mode_mpeg == 1))
+ enSerialClkDiv2 = 0;
+
+ reg_1287 = ((pulseWidth & 0x1f) << 3) |
+ ((enSerialMode & 0x1) << 2) | (enSerialClkDiv2 & 0x1);
+ dib8000_write_word(state, 1287, reg_1287);
+
+ dib8096p_enMpegMux(state, 1);
+}
+
+static void dib8096p_setDibTxMux(struct dib8000_state *state, int mode)
+{
+ u16 reg_1288 = dib8000_read_word(state, 1288) & ~(0x7 << 7);
+
+ switch (mode) {
+ case MPEG_ON_DIBTX:
+ dprintk("SET MPEG ON DIBSTREAM TX");
+ dib8096p_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
+ reg_1288 |= (1 << 9); break;
+ case DIV_ON_DIBTX:
+ dprintk("SET DIV_OUT ON DIBSTREAM TX");
+ dib8096p_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
+ reg_1288 |= (1 << 8); break;
+ case ADC_ON_DIBTX:
+ dprintk("SET ADC_OUT ON DIBSTREAM TX");
+ dib8096p_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
+ reg_1288 |= (1 << 7); break;
+ default:
+ break;
+ }
+ dib8000_write_word(state, 1288, reg_1288);
+}
+
+static void dib8096p_setHostBusMux(struct dib8000_state *state, int mode)
+{
+ u16 reg_1288 = dib8000_read_word(state, 1288) & ~(0x7 << 4);
+
+ switch (mode) {
+ case DEMOUT_ON_HOSTBUS:
+ dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+ dib8096p_enMpegMux(state, 0);
+ reg_1288 |= (1 << 6);
+ break;
+ case DIBTX_ON_HOSTBUS:
+ dprintk("SET DIBSTREAM TX ON HOST BUS");
+ dib8096p_enMpegMux(state, 0);
+ reg_1288 |= (1 << 5);
+ break;
+ case MPEG_ON_HOSTBUS:
+ dprintk("SET MPEG MUX ON HOST BUS");
+ reg_1288 |= (1 << 4);
+ break;
+ default:
+ break;
+ }
+ dib8000_write_word(state, 1288, reg_1288);
+}
+
+static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u16 reg_1287;
+
+ switch (onoff) {
+ case 0: /* only use the internal way - not the diversity input */
+ dprintk("%s mode OFF : by default Enable Mpeg INPUT",
+ __func__);
+ /* outputRate = 8 */
+ dib8096p_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
+
+ /* Do not divide the serial clock of MPEG MUX in
+ SERIAL MODE in case input mode MPEG is used */
+ reg_1287 = dib8000_read_word(state, 1287);
+ /* enSerialClkDiv2 == 1 ? */
+ if ((reg_1287 & 0x1) == 1) {
+ /* force enSerialClkDiv2 = 0 */
+ reg_1287 &= ~0x1;
+ dib8000_write_word(state, 1287, reg_1287);
+ }
+ state->input_mode_mpeg = 1;
+ break;
+ case 1: /* both ways */
+ case 2: /* only the diversity input */
+ dprintk("%s ON : Enable diversity INPUT", __func__);
+ dib8096p_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
+ state->input_mode_mpeg = 0;
+ break;
+ }
+
+ dib8000_set_diversity_in(state->fe[0], onoff);
+ return 0;
+}
+
+static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u16 outreg, smo_mode, fifo_threshold;
+ u8 prefer_mpeg_mux_use = 1;
+ int ret = 0;
+
+ dib8096p_host_bus_drive(state, 1);
+
+ fifo_threshold = 1792;
+ smo_mode = (dib8000_read_word(state, 299) & 0x0050) | (1 << 1);
+ outreg = dib8000_read_word(state, 1286) &
+ ~((1 << 10) | (0x7 << 6) | (1 << 1));
+
+ switch (mode) {
+ case OUTMODE_HIGH_Z:
+ outreg = 0;
+ break;
+
+ case OUTMODE_MPEG2_SERIAL:
+ if (prefer_mpeg_mux_use) {
+ dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux");
+ dib8096p_configMpegMux(state, 3, 1, 1);
+ dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
+ } else {/* Use Smooth block */
+ dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc");
+ dib8096p_setHostBusMux(state,
+ DEMOUT_ON_HOSTBUS);
+ outreg |= (2 << 6) | (0 << 1);
+ }
+ break;
+
+ case OUTMODE_MPEG2_PAR_GATED_CLK:
+ if (prefer_mpeg_mux_use) {
+ dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+ dib8096p_configMpegMux(state, 2, 0, 0);
+ dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
+ } else { /* Use Smooth block */
+ dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block");
+ dib8096p_setHostBusMux(state,
+ DEMOUT_ON_HOSTBUS);
+ outreg |= (0 << 6);
+ }
+ break;
+
+ case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
+ dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block");
+ dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
+ outreg |= (1 << 6);
+ break;
+
+ case OUTMODE_MPEG2_FIFO:
+ /* Using Smooth block because not supported
+ by new Mpeg Mux bloc */
+ dprintk("dib8096P setting output mode TS_FIFO using Smooth block");
+ dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
+ outreg |= (5 << 6);
+ smo_mode |= (3 << 1);
+ fifo_threshold = 512;
+ break;
+
+ case OUTMODE_DIVERSITY:
+ dprintk("dib8096P setting output mode MODE_DIVERSITY");
+ dib8096p_setDibTxMux(state, DIV_ON_DIBTX);
+ dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
+ break;
+
+ case OUTMODE_ANALOG_ADC:
+ dprintk("dib8096P setting output mode MODE_ANALOG_ADC");
+ dib8096p_setDibTxMux(state, ADC_ON_DIBTX);
+ dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
+ break;
+ }
+
+ if (mode != OUTMODE_HIGH_Z)
+ outreg |= (1<<10);
+
+ dprintk("output_mpeg2_in_188_bytes = %d",
+ state->cfg.output_mpeg2_in_188_bytes);
+ if (state->cfg.output_mpeg2_in_188_bytes)
+ smo_mode |= (1 << 5);
+
+ ret |= dib8000_write_word(state, 299, smo_mode);
+ /* synchronous fread */
+ ret |= dib8000_write_word(state, 299 + 1, fifo_threshold);
+ ret |= dib8000_write_word(state, 1286, outreg);
+
+ return ret;
+}
+
+static int map_addr_to_serpar_number(struct i2c_msg *msg)
+{
+ if (msg->buf[0] <= 15)
+ msg->buf[0] -= 1;
+ else if (msg->buf[0] == 17)
+ msg->buf[0] = 15;
+ else if (msg->buf[0] == 16)
+ msg->buf[0] = 17;
+ else if (msg->buf[0] == 19)
+ msg->buf[0] = 16;
+ else if (msg->buf[0] >= 21 && msg->buf[0] <= 25)
+ msg->buf[0] -= 3;
+ else if (msg->buf[0] == 28)
+ msg->buf[0] = 23;
+ else if (msg->buf[0] == 99)
+ msg->buf[0] = 99;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int dib8096p_tuner_write_serpar(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
+{
+ struct dib8000_state *state = i2c_get_adapdata(i2c_adap);
+ u8 n_overflow = 1;
+ u16 i = 1000;
+ u16 serpar_num = msg[0].buf[0];
+
+ while (n_overflow == 1 && i) {
+ n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
+ i--;
+ if (i == 0)
+ dprintk("Tuner ITF: write busy (overflow)");
+ }
+ dib8000_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
+ dib8000_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
+
+ return num;
+}
+
+static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
+{
+ struct dib8000_state *state = i2c_get_adapdata(i2c_adap);
+ u8 n_overflow = 1, n_empty = 1;
+ u16 i = 1000;
+ u16 serpar_num = msg[0].buf[0];
+ u16 read_word;
+
+ while (n_overflow == 1 && i) {
+ n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
+ i--;
+ if (i == 0)
+ dprintk("TunerITF: read busy (overflow)");
+ }
+ dib8000_write_word(state, 1985, (0<<6) | (serpar_num&0x3f));
+
+ i = 1000;
+ while (n_empty == 1 && i) {
+ n_empty = dib8000_read_word(state, 1984)&0x1;
+ i--;
+ if (i == 0)
+ dprintk("TunerITF: read busy (empty)");
+ }
+
+ read_word = dib8000_read_word(state, 1987);
+ msg[1].buf[0] = (read_word >> 8) & 0xff;
+ msg[1].buf[1] = (read_word) & 0xff;
+
+ return num;
+}
+
+static int dib8096p_tuner_rw_serpar(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
+{
+ if (map_addr_to_serpar_number(&msg[0]) == 0) {
+ if (num == 1) /* write */
+ return dib8096p_tuner_write_serpar(i2c_adap, msg, 1);
+ else /* read */
+ return dib8096p_tuner_read_serpar(i2c_adap, msg, 2);
+ }
+ return num;
+}
+
+static int dib8096p_rw_on_apb(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num, u16 apb_address)
+{
+ struct dib8000_state *state = i2c_get_adapdata(i2c_adap);
+ u16 word;
+
+ if (num == 1) { /* write */
+ dib8000_write_word(state, apb_address,
+ ((msg[0].buf[1] << 8) | (msg[0].buf[2])));
+ } else {
+ word = dib8000_read_word(state, apb_address);
+ msg[1].buf[0] = (word >> 8) & 0xff;
+ msg[1].buf[1] = (word) & 0xff;
+ }
+ return num;
+}
+
+static int dib8096p_tuner_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
+{
+ struct dib8000_state *state = i2c_get_adapdata(i2c_adap);
+ u16 apb_address = 0, word;
+ int i = 0;
+
+ switch (msg[0].buf[0]) {
+ case 0x12:
+ apb_address = 1920;
+ break;
+ case 0x14:
+ apb_address = 1921;
+ break;
+ case 0x24:
+ apb_address = 1922;
+ break;
+ case 0x1a:
+ apb_address = 1923;
+ break;
+ case 0x22:
+ apb_address = 1924;
+ break;
+ case 0x33:
+ apb_address = 1926;
+ break;
+ case 0x34:
+ apb_address = 1927;
+ break;
+ case 0x35:
+ apb_address = 1928;
+ break;
+ case 0x36:
+ apb_address = 1929;
+ break;
+ case 0x37:
+ apb_address = 1930;
+ break;
+ case 0x38:
+ apb_address = 1931;
+ break;
+ case 0x39:
+ apb_address = 1932;
+ break;
+ case 0x2a:
+ apb_address = 1935;
+ break;
+ case 0x2b:
+ apb_address = 1936;
+ break;
+ case 0x2c:
+ apb_address = 1937;
+ break;
+ case 0x2d:
+ apb_address = 1938;
+ break;
+ case 0x2e:
+ apb_address = 1939;
+ break;
+ case 0x2f:
+ apb_address = 1940;
+ break;
+ case 0x30:
+ apb_address = 1941;
+ break;
+ case 0x31:
+ apb_address = 1942;
+ break;
+ case 0x32:
+ apb_address = 1943;
+ break;
+ case 0x3e:
+ apb_address = 1944;
+ break;
+ case 0x3f:
+ apb_address = 1945;
+ break;
+ case 0x40:
+ apb_address = 1948;
+ break;
+ case 0x25:
+ apb_address = 936;
+ break;
+ case 0x26:
+ apb_address = 937;
+ break;
+ case 0x27:
+ apb_address = 938;
+ break;
+ case 0x28:
+ apb_address = 939;
+ break;
+ case 0x1d:
+ /* get sad sel request */
+ i = ((dib8000_read_word(state, 921) >> 12)&0x3);
+ word = dib8000_read_word(state, 924+i);
+ msg[1].buf[0] = (word >> 8) & 0xff;
+ msg[1].buf[1] = (word) & 0xff;
+ return num;
+ case 0x1f:
+ if (num == 1) { /* write */
+ word = (u16) ((msg[0].buf[1] << 8) |
+ msg[0].buf[2]);
+ /* in the VGAMODE Sel are located on bit 0/1 */
+ word &= 0x3;
+ word = (dib8000_read_word(state, 921) &
+ ~(3<<12)) | (word<<12);
+ /* Set the proper input */
+ dib8000_write_word(state, 921, word);
+ return num;
+ }
+ }
+
+ if (apb_address != 0) /* R/W acces via APB */
+ return dib8096p_rw_on_apb(i2c_adap, msg, num, apb_address);
+ else /* R/W access via SERPAR */
+ return dib8096p_tuner_rw_serpar(i2c_adap, msg, num);
+
+ return 0;
+}
+
+static u32 dib8096p_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm dib8096p_tuner_xfer_algo = {
+ .master_xfer = dib8096p_tuner_xfer,
+ .functionality = dib8096p_i2c_func,
+};
+
+struct i2c_adapter *dib8096p_get_i2c_tuner(struct dvb_frontend *fe)
+{
+ struct dib8000_state *st = fe->demodulator_priv;
+ return &st->dib8096p_tuner_adap;
+}
+EXPORT_SYMBOL(dib8096p_get_i2c_tuner);
+
+int dib8096p_tuner_sleep(struct dvb_frontend *fe, int onoff)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ u16 en_cur_state;
+
+ dprintk("sleep dib8096p: %d", onoff);
+
+ en_cur_state = dib8000_read_word(state, 1922);
+
+ /* LNAs and MIX are ON and therefore it is a valid configuration */
+ if (en_cur_state > 0xff)
+ state->tuner_enable = en_cur_state ;
+
+ if (onoff)
+ en_cur_state &= 0x00ff;
+ else {
+ if (state->tuner_enable != 0)
+ en_cur_state = state->tuner_enable;
+ }
+
+ dib8000_write_word(state, 1922, en_cur_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(dib8096p_tuner_sleep);
+
static const s32 lut_1000ln_mant[] =
{
908, 7003, 7090, 7170, 7244, 7313, 7377, 7438, 7495, 7549, 7600
@@ -1051,6 +1815,26 @@ s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
}
EXPORT_SYMBOL(dib8000_get_adc_power);
+int dib8090p_get_dc_power(struct dvb_frontend *fe, u8 IQ)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ int val = 0;
+
+ switch (IQ) {
+ case 1:
+ val = dib8000_read_word(state, 403);
+ break;
+ case 0:
+ val = dib8000_read_word(state, 404);
+ break;
+ }
+ if (val & 0x200)
+ val -= 1024;
+
+ return val;
+}
+EXPORT_SYMBOL(dib8090p_get_dc_power);
+
static void dib8000_update_timf(struct dib8000_state *state)
{
u32 timf = state->timf = dib8000_read32(state, 435);
@@ -1060,6 +1844,26 @@ static void dib8000_update_timf(struct dib8000_state *state)
dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default);
}
+u32 dib8000_ctrl_timf(struct dvb_frontend *fe, uint8_t op, uint32_t timf)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+
+ switch (op) {
+ case DEMOD_TIMF_SET:
+ state->timf = timf;
+ break;
+ case DEMOD_TIMF_UPDATE:
+ dib8000_update_timf(state);
+ break;
+ case DEMOD_TIMF_GET:
+ break;
+ }
+ dib8000_set_bandwidth(state->fe[0], 6000);
+
+ return state->timf;
+}
+EXPORT_SYMBOL(dib8000_ctrl_timf);
+
static const u16 adc_target_16dB[11] = {
(1 << 13) - 825 - 117,
(1 << 13) - 837 - 117,
@@ -1086,6 +1890,9 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
u16 init_prbs = 0xfff;
u16 ana_gain = 0;
+ if (state->revision == 0x8090)
+ dib8000_init_sdram(state);
+
if (state->ber_monitored_layer != LAYER_ALL)
dib8000_write_word(state, 285, (dib8000_read_word(state, 285) & 0x60) | state->ber_monitored_layer);
else
@@ -1418,7 +2225,10 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
dprintk("nbseg_diff = %X (%d)", seg_diff_mask, seg_diff_mask);
state->differential_constellation = (seg_diff_mask != 0);
- dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
+ if (state->revision != 0x8090)
+ dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
+ else
+ dib8096p_set_diversity_in(state->fe[0], state->diversity_onoff);
if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) {
if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 1)
@@ -1870,7 +2680,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
int ret = 0;
- u16 value, mode = fft_to_mode(state);
+ u16 lock, value, mode = fft_to_mode(state);
// we are already tuned - just resuming from suspend
if (state == NULL)
@@ -1924,7 +2734,11 @@ static int dib8000_tune(struct dvb_frontend *fe)
}
// we achieved a coff_cpil_lock - it's time to update the timf
- if ((dib8000_read_word(state, 568) >> 11) & 0x1)
+ if (state->revision != 0x8090)
+ lock = dib8000_read_word(state, 568);
+ else
+ lock = dib8000_read_word(state, 570);
+ if ((lock >> 11) & 0x1)
dib8000_update_timf(state);
//now that tune is finished, lock0 should lock on fec_mpeg to output this lock on MP_LOCK. It's changed in autosearch start
@@ -1946,11 +2760,14 @@ static int dib8000_wakeup(struct dvb_frontend *fe)
u8 index_frontend;
int ret;
- dib8000_set_power_mode(state, DIB8000M_POWER_ALL);
+ dib8000_set_power_mode(state, DIB8000_POWER_ALL);
dib8000_set_adc_state(state, DIBX000_ADC_ON);
if (dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
dprintk("could not start Slow ADC");
+ if (state->revision != 0x8090)
+ dib8000_sad_calib(state);
+
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
ret = state->fe[index_frontend]->ops.init(state->fe[index_frontend]);
if (ret < 0)
@@ -1972,8 +2789,9 @@ static int dib8000_sleep(struct dvb_frontend *fe)
return ret;
}
- dib8000_set_output_mode(fe, OUTMODE_HIGH_Z);
- dib8000_set_power_mode(state, DIB8000M_POWER_INTERFACE_ONLY);
+ if (state->revision != 0x8090)
+ dib8000_set_output_mode(fe, OUTMODE_HIGH_Z);
+ dib8000_set_power_mode(state, DIB8000_POWER_INTERFACE_ONLY);
return dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(state, DIBX000_ADC_OFF);
}
@@ -1992,7 +2810,7 @@ int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tun
}
EXPORT_SYMBOL(dib8000_set_tune_state);
-static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib8000_get_frontend(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
u16 i, val = 0;
@@ -2006,7 +2824,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
if (stat&FE_HAS_SYNC) {
dprintk("TMCC lock on the slave%i", index_frontend);
/* synchronize the cache with the other frontends */
- state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep);
+ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) {
if (sub_index_frontend != index_frontend) {
state->fe[sub_index_frontend]->dtv_property_cache.isdbt_sb_mode = state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode;
@@ -2028,7 +2846,10 @@ static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
fe->dtv_property_cache.isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1;
- val = dib8000_read_word(state, 570);
+ if (state->revision == 0x8090)
+ val = dib8000_read_word(state, 572);
+ else
+ val = dib8000_read_word(state, 570);
fe->dtv_property_cache.inversion = (val & 0x40) >> 6;
switch ((val & 0x30) >> 4) {
case 1:
@@ -2135,7 +2956,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
return 0;
}
-static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib8000_set_frontend(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
u8 nbr_pending, exit_condition, index_frontend;
@@ -2158,9 +2979,14 @@ static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_ISDBT;
memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties));
- dib8000_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z);
+ if (state->revision != 0x8090)
+ dib8000_set_output_mode(state->fe[index_frontend],
+ OUTMODE_HIGH_Z);
+ else
+ dib8096p_set_output_mode(state->fe[index_frontend],
+ OUTMODE_HIGH_Z);
if (state->fe[index_frontend]->ops.tuner_ops.set_params)
- state->fe[index_frontend]->ops.tuner_ops.set_params(state->fe[index_frontend], fep);
+ state->fe[index_frontend]->ops.tuner_ops.set_params(state->fe[index_frontend]);
dib8000_set_tune_state(state->fe[index_frontend], CT_AGC_START);
}
@@ -2215,7 +3041,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
((state->fe[0]->dtv_property_cache.layer[1].segment_count == 0) ||
((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (2 << 0)) == 0)) &&
((state->fe[0]->dtv_property_cache.layer[2].segment_count == 0) || ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (3 << 0)) == 0)))) {
- int i = 80000;
+ int i = 100;
u8 found = 0;
u8 tune_failed = 0;
@@ -2243,6 +3069,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
default:
dprintk("unhandled autosearch result");
case 1:
+ tune_failed |= (1 << index_frontend);
dprintk("autosearch failed for the frontend%i", index_frontend);
break;
}
@@ -2261,21 +3088,44 @@ static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
dprintk("tune success on frontend%i", index_frontend_success);
- dib8000_get_frontend(fe, fep);
+ dib8000_get_frontend(fe);
}
for (index_frontend = 0, ret = 0; (ret >= 0) && (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
ret = dib8000_tune(state->fe[index_frontend]);
/* set output mode and diversity input */
- dib8000_set_output_mode(state->fe[0], state->cfg.output_mode);
- for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
- dib8000_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY);
- dib8000_set_diversity_in(state->fe[index_frontend-1], 1);
- }
+ if (state->revision != 0x8090) {
+ dib8000_set_output_mode(state->fe[0], state->cfg.output_mode);
+ for (index_frontend = 1;
+ (index_frontend < MAX_NUMBER_OF_FRONTENDS) &&
+ (state->fe[index_frontend] != NULL);
+ index_frontend++) {
+ dib8000_set_output_mode(state->fe[index_frontend],
+ OUTMODE_DIVERSITY);
+ dib8000_set_diversity_in(state->fe[index_frontend-1], 1);
+ }
- /* turn off the diversity of the last chip */
- dib8000_set_diversity_in(state->fe[index_frontend-1], 0);
+ /* turn off the diversity of the last chip */
+ dib8000_set_diversity_in(state->fe[index_frontend-1], 0);
+ } else {
+ dib8096p_set_output_mode(state->fe[0], state->cfg.output_mode);
+ if (state->cfg.enMpegOutput == 0) {
+ dib8096p_setDibTxMux(state, MPEG_ON_DIBTX);
+ dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
+ }
+ for (index_frontend = 1;
+ (index_frontend < MAX_NUMBER_OF_FRONTENDS) &&
+ (state->fe[index_frontend] != NULL);
+ index_frontend++) {
+ dib8096p_set_output_mode(state->fe[index_frontend],
+ OUTMODE_DIVERSITY);
+ dib8096p_set_diversity_in(state->fe[index_frontend-1], 1);
+ }
+
+ /* turn off the diversity of the last chip */
+ dib8096p_set_diversity_in(state->fe[index_frontend-1], 0);
+ }
return ret;
}
@@ -2284,15 +3134,22 @@ static u16 dib8000_read_lock(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
+ if (state->revision == 0x8090)
+ return dib8000_read_word(state, 570);
return dib8000_read_word(state, 568);
}
static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib8000_state *state = fe->demodulator_priv;
- u16 lock_slave = 0, lock = dib8000_read_word(state, 568);
+ u16 lock_slave = 0, lock;
u8 index_frontend;
+ if (state->revision == 0x8090)
+ lock = dib8000_read_word(state, 570);
+ else
+ lock = dib8000_read_word(state, 568);
+
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
lock_slave |= dib8000_read_lock(state->fe[index_frontend]);
@@ -2330,14 +3187,26 @@ static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
static int dib8000_read_ber(struct dvb_frontend *fe, u32 * ber)
{
struct dib8000_state *state = fe->demodulator_priv;
- *ber = (dib8000_read_word(state, 560) << 16) | dib8000_read_word(state, 561); // 13 segments
+
+ /* 13 segments */
+ if (state->revision == 0x8090)
+ *ber = (dib8000_read_word(state, 562) << 16) |
+ dib8000_read_word(state, 563);
+ else
+ *ber = (dib8000_read_word(state, 560) << 16) |
+ dib8000_read_word(state, 561);
return 0;
}
static int dib8000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
{
struct dib8000_state *state = fe->demodulator_priv;
- *unc = dib8000_read_word(state, 565); // packet error on 13 seg
+
+ /* packet error on 13 seg */
+ if (state->revision == 0x8090)
+ *unc = dib8000_read_word(state, 567);
+ else
+ *unc = dib8000_read_word(state, 565);
return 0;
}
@@ -2370,14 +3239,20 @@ static u32 dib8000_get_snr(struct dvb_frontend *fe)
u32 n, s, exp;
u16 val;
- val = dib8000_read_word(state, 542);
+ if (state->revision != 0x8090)
+ val = dib8000_read_word(state, 542);
+ else
+ val = dib8000_read_word(state, 544);
n = (val >> 6) & 0xff;
exp = (val & 0x3f);
if ((exp & 0x20) != 0)
exp -= 0x40;
n <<= exp+16;
- val = dib8000_read_word(state, 543);
+ if (state->revision != 0x8090)
+ val = dib8000_read_word(state, 543);
+ else
+ val = dib8000_read_word(state, 545);
s = (val >> 6) & 0xff;
exp = (val & 0x3f);
if ((exp & 0x20) != 0)
@@ -2401,7 +3276,7 @@ static int dib8000_read_snr(struct dvb_frontend *fe, u16 * snr)
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
snr_master += dib8000_get_snr(state->fe[index_frontend]);
- if (snr_master != 0) {
+ if ((snr_master >> 16) != 0) {
snr_master = 10*intlog10(snr_master>>16);
*snr = snr_master / ((1 << 24) / 10);
}
@@ -2458,7 +3333,8 @@ struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int sla
EXPORT_SYMBOL(dib8000_get_slave_frontend);
-int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
+int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
+ u8 default_addr, u8 first_addr, u8 is_dib8096p)
{
int k = 0, ret = 0;
u8 new_addr = 0;
@@ -2488,9 +3364,12 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
new_addr = first_addr + (k << 1);
client.addr = new_addr;
- dib8000_i2c_write16(&client, 1287, 0x0003); /* sram lead in, rdy */
- if (dib8000_identify(&client) == 0) {
+ if (!is_dib8096p)
dib8000_i2c_write16(&client, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib8000_identify(&client) == 0) {
+ /* sram lead in, rdy */
+ if (!is_dib8096p)
+ dib8000_i2c_write16(&client, 1287, 0x0003);
client.addr = default_addr;
if (dib8000_identify(&client) == 0) {
dprintk("#%d: not identified", k);
@@ -2549,6 +3428,7 @@ static void dib8000_release(struct dvb_frontend *fe)
dvb_frontend_detach(st->fe[index_frontend]);
dibx000_exit_i2c_master(&st->i2c_master);
+ i2c_del_adapter(&st->dib8096p_tuner_adap);
kfree(st->fe[0]);
kfree(st);
}
@@ -2581,9 +3461,9 @@ int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
EXPORT_SYMBOL(dib8000_pid_filter);
static const struct dvb_frontend_ops dib8000_ops = {
+ .delsys = { SYS_ISDBT },
.info = {
.name = "DiBcom 8000 ISDB-T",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
@@ -2651,6 +3531,15 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr);
+ /* init 8096p tuner adapter */
+ strncpy(state->dib8096p_tuner_adap.name, "DiB8096P tuner interface",
+ sizeof(state->dib8096p_tuner_adap.name));
+ state->dib8096p_tuner_adap.algo = &dib8096p_tuner_xfer_algo;
+ state->dib8096p_tuner_adap.algo_data = NULL;
+ state->dib8096p_tuner_adap.dev.parent = state->i2c.adap->dev.parent;
+ i2c_set_adapdata(&state->dib8096p_tuner_adap, state);
+ i2c_add_adapter(&state->dib8096p_tuner_adap);
+
dib8000_reset(fe);
dib8000_write_word(state, 285, (dib8000_read_word(state, 285) & ~0x60) | (3 << 5)); /* ber_rs_len = 3 */
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index 617f9eba3a09..39591bb172c1 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -32,6 +32,7 @@ struct dib8000_config {
u8 div_cfg;
u8 output_mode;
u8 refclksel;
+ u8 enMpegOutput:1;
};
#define DEFAULT_DIB8000_I2C_ADDRESS 18
@@ -40,7 +41,8 @@ struct dib8000_config {
extern struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
extern struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *, enum dibx000_i2c_interface, int);
-extern int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr);
+extern int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
+ u8 default_addr, u8 first_addr, u8 is_dib8096p);
extern int dib8000_set_gpio(struct dvb_frontend *, u8 num, u8 dir, u8 val);
extern int dib8000_set_wbd_ref(struct dvb_frontend *, u16 value);
@@ -50,6 +52,13 @@ extern int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_st
extern enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe);
extern void dib8000_pwm_agc_reset(struct dvb_frontend *fe);
extern s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode);
+extern struct i2c_adapter *dib8096p_get_i2c_tuner(struct dvb_frontend *fe);
+extern int dib8096p_tuner_sleep(struct dvb_frontend *fe, int onoff);
+extern int dib8090p_get_dc_power(struct dvb_frontend *fe, u8 IQ);
+extern u32 dib8000_ctrl_timf(struct dvb_frontend *fe,
+ uint8_t op, uint32_t timf);
+extern int dib8000_update_pll(struct dvb_frontend *fe,
+ struct dibx000_bandwidth_config *pll);
extern int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave);
extern int dib8000_remove_slave_frontend(struct dvb_frontend *fe);
extern struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index);
@@ -66,7 +75,9 @@ static inline struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *fe
return NULL;
}
-static inline int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
+static inline int dib8000_i2c_enumeration(struct i2c_adapter *host,
+ int no_of_demods, u8 default_addr, u8 first_addr,
+ u8 is_dib8096p)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
@@ -109,11 +120,38 @@ static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
}
+static inline struct i2c_adapter *dib8096p_get_i2c_tuner(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+static inline int dib8096p_tuner_sleep(struct dvb_frontend *fe, int onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return 0;
}
+static inline int dib8090p_get_dc_power(struct dvb_frontend *fe, u8 IQ)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
+static inline u32 dib8000_ctrl_timf(struct dvb_frontend *fe,
+ uint8_t op, uint32_t timf)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
+static inline int dib8000_update_pll(struct dvb_frontend *fe,
+ struct dibx000_bandwidth_config *pll)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
static inline int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
index 660f80661ed4..863ef3cfab9f 100644
--- a/drivers/media/dvb/frontends/dib9000.c
+++ b/drivers/media/dvb/frontends/dib9000.c
@@ -1136,7 +1136,7 @@ static int dib9000_fw_init(struct dib9000_state *state)
return 0;
}
-static void dib9000_fw_set_channel_head(struct dib9000_state *state, struct dvb_frontend_parameters *ch)
+static void dib9000_fw_set_channel_head(struct dib9000_state *state)
{
u8 b[9];
u32 freq = state->fe[0]->dtv_property_cache.frequency / 1000;
@@ -1157,7 +1157,7 @@ static void dib9000_fw_set_channel_head(struct dib9000_state *state, struct dvb_
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_HEAD, b);
}
-static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
+static int dib9000_fw_get_channel(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
struct dibDVBTChannel {
@@ -1309,7 +1309,7 @@ error:
return ret;
}
-static int dib9000_fw_set_channel_union(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
+static int dib9000_fw_set_channel_union(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
struct dibDVBTChannel {
@@ -1454,7 +1454,7 @@ static int dib9000_fw_set_channel_union(struct dvb_frontend *fe, struct dvb_fron
return 0;
}
-static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
+static int dib9000_fw_tune(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
int ret = 10, search = state->channel_status.status == CHANNEL_STATUS_PARAMETERS_UNKNOWN;
@@ -1462,7 +1462,7 @@ static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_paramete
switch (state->tune_state) {
case CT_DEMOD_START:
- dib9000_fw_set_channel_head(state, ch);
+ dib9000_fw_set_channel_head(state);
/* write the channel context - a channel is initialized to 0, so it is OK */
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_CONTEXT, (u8 *) fe_info);
@@ -1471,7 +1471,7 @@ static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_paramete
if (search)
dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_SEARCH, NULL, 0);
else {
- dib9000_fw_set_channel_union(fe, ch);
+ dib9000_fw_set_channel_union(fe);
dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_TUNE, NULL, 0);
}
state->tune_state = CT_DEMOD_STEP_1;
@@ -1867,7 +1867,7 @@ static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_fron
return 0;
}
-static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib9000_get_frontend(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend, sub_index_frontend;
@@ -1883,7 +1883,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
dprintk("TPS lock on the slave%i", index_frontend);
/* synchronize the cache with the other frontends */
- state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep);
+ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL);
sub_index_frontend++) {
if (sub_index_frontend != index_frontend) {
@@ -1911,7 +1911,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
}
/* get the channel from master chip */
- ret = dib9000_fw_get_channel(fe, fep);
+ ret = dib9000_fw_get_channel(fe);
if (ret != 0)
goto return_value;
@@ -1958,7 +1958,7 @@ static int dib9000_set_channel_status(struct dvb_frontend *fe, struct dvb_fronte
return 0;
}
-static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
+static int dib9000_set_frontend(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
int sleep_time, sleep_time_slave;
@@ -1983,8 +1983,10 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
fe->dtv_property_cache.delivery_system = SYS_DVBT;
/* set the master status */
- if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
- fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) {
+ if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO ||
+ state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO ||
+ state->fe[0]->dtv_property_cache.modulation == QAM_AUTO ||
+ state->fe[0]->dtv_property_cache.code_rate_HP == FEC_AUTO) {
/* no channel specified, autosearch the channel */
state->channel_status.status = CHANNEL_STATUS_PARAMETERS_UNKNOWN;
} else
@@ -2008,9 +2010,9 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */
index_frontend_success = 0;
do {
- sleep_time = dib9000_fw_tune(state->fe[0], NULL);
+ sleep_time = dib9000_fw_tune(state->fe[0]);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
- sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
+ sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend]);
if (sleep_time == FE_CALLBACK_TIME_NEVER)
sleep_time = sleep_time_slave;
else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
@@ -2052,7 +2054,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
/* synchronize all the channel cache */
state->get_frontend_internal = 1;
- dib9000_get_frontend(state->fe[0], fep);
+ dib9000_get_frontend(state->fe[0]);
state->get_frontend_internal = 0;
/* retune the other frontends with the found channel */
@@ -2068,7 +2070,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
sleep_time = FE_CALLBACK_TIME_NEVER;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
if (index_frontend != index_frontend_success) {
- sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
+ sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend]);
if (sleep_time == FE_CALLBACK_TIME_NEVER)
sleep_time = sleep_time_slave;
else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
@@ -2495,9 +2497,9 @@ error:
EXPORT_SYMBOL(dib9000_attach);
static struct dvb_frontend_ops dib9000_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 9000",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
index 5e011474be43..5f484881d7b1 100644
--- a/drivers/media/dvb/frontends/dibx000_common.h
+++ b/drivers/media/dvb/frontends/dibx000_common.h
@@ -146,14 +146,8 @@ enum dibx000_adc_states {
DIBX000_VBG_DISABLE,
};
-#define BANDWIDTH_TO_KHZ(v) ((v) == BANDWIDTH_8_MHZ ? 8000 : \
- (v) == BANDWIDTH_7_MHZ ? 7000 : \
- (v) == BANDWIDTH_6_MHZ ? 6000 : 8000)
-
-#define BANDWIDTH_TO_INDEX(v) ( \
- (v) == 8000 ? BANDWIDTH_8_MHZ : \
- (v) == 7000 ? BANDWIDTH_7_MHZ : \
- (v) == 6000 ? BANDWIDTH_6_MHZ : BANDWIDTH_8_MHZ )
+#define BANDWIDTH_TO_KHZ(v) ((v) / 1000)
+#define BANDWIDTH_TO_HZ(v) ((v) * 1000)
/* Chip output mode. */
#define OUTMODE_HIGH_Z 0
@@ -276,4 +270,11 @@ struct dibSubbandSelection {
#define DEMOD_TIMF_GET 0x01
#define DEMOD_TIMF_UPDATE 0x02
+#define MPEG_ON_DIBTX 1
+#define DIV_ON_DIBTX 2
+#define ADC_ON_DIBTX 3
+#define DEMOUT_ON_HOSTBUS 4
+#define DIBTX_ON_HOSTBUS 5
+#define MPEG_ON_HOSTBUS 6
+
#endif
diff --git a/drivers/media/dvb/frontends/drxd.h b/drivers/media/dvb/frontends/drxd.h
index 7113535844f2..34398738f9bc 100644
--- a/drivers/media/dvb/frontends/drxd.h
+++ b/drivers/media/dvb/frontends/drxd.h
@@ -48,8 +48,6 @@ struct drxd_config {
u8 disable_i2c_gate_ctrl;
u32 IF;
- int (*pll_set) (void *priv, void *priv_params,
- u8 pll_addr, u8 demoda_addr, s32 *off);
s16(*osc_deviation) (void *priv, s16 dev, int flag);
};
diff --git a/drivers/media/dvb/frontends/drxd_hard.c b/drivers/media/dvb/frontends/drxd_hard.c
index 88e46f4cdbb2..7bf39cda83c5 100644
--- a/drivers/media/dvb/frontends/drxd_hard.c
+++ b/drivers/media/dvb/frontends/drxd_hard.c
@@ -120,7 +120,7 @@ enum EIFFilter {
struct drxd_state {
struct dvb_frontend frontend;
struct dvb_frontend_ops ops;
- struct dvb_frontend_parameters param;
+ struct dtv_frontend_properties props;
const struct firmware *fw;
struct device *dev;
@@ -914,14 +914,13 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
return -EIO;
}
- state->microcode = kmalloc(fw->size, GFP_KERNEL);
+ state->microcode = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (state->microcode == NULL) {
release_firmware(fw);
printk(KERN_ERR "drxd: firmware load failure: no memory\n");
return -ENOMEM;
}
- memcpy(state->microcode, fw->data, fw->size);
state->microcode_length = fw->size;
release_firmware(fw);
return 0;
@@ -1622,14 +1621,14 @@ static int CorrectSysClockDeviation(struct drxd_state *state)
break;
}
- switch (state->param.u.ofdm.bandwidth) {
- case BANDWIDTH_8_MHZ:
+ switch (state->props.bandwidth_hz) {
+ case 8000000:
bandwidth = DRXD_BANDWIDTH_8MHZ_IN_HZ;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
bandwidth = DRXD_BANDWIDTH_7MHZ_IN_HZ;
break;
- case BANDWIDTH_6_MHZ:
+ case 6000000:
bandwidth = DRXD_BANDWIDTH_6MHZ_IN_HZ;
break;
default:
@@ -1804,7 +1803,7 @@ static int StartDiversity(struct drxd_state *state)
status = WriteTable(state, state->m_StartDiversityEnd);
if (status < 0)
break;
- if (state->param.u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
+ if (state->props.bandwidth_hz == 8000000) {
status = WriteTable(state, state->m_DiversityDelay8MHZ);
if (status < 0)
break;
@@ -1906,7 +1905,7 @@ static int SetCfgNoiseCalibration(struct drxd_state *state,
static int DRX_Start(struct drxd_state *state, s32 off)
{
- struct dvb_ofdm_parameters *p = &state->param.u.ofdm;
+ struct dtv_frontend_properties *p = &state->props;
int status;
u16 transmissionParams = 0;
@@ -1971,7 +1970,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
if (status < 0)
break;
- mirrorFreqSpect = (state->param.inversion == INVERSION_ON);
+ mirrorFreqSpect = (state->props.inversion == INVERSION_ON);
switch (p->transmission_mode) {
default: /* Not set, detect it automatically */
@@ -2021,7 +2020,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
break;
}
- switch (p->hierarchy_information) {
+ switch (p->hierarchy) {
case HIERARCHY_1:
transmissionParams |= SC_RA_RAM_OP_PARAM_HIER_A1;
if (state->type_A) {
@@ -2147,7 +2146,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
if (status < 0)
break;
- switch (p->constellation) {
+ switch (p->modulation) {
default:
operationMode |= SC_RA_RAM_OP_AUTO_CONST__M;
/* fall through , try first guess
@@ -2331,9 +2330,11 @@ static int DRX_Start(struct drxd_state *state, s32 off)
by SC for fix for some 8K,1/8 guard but is restored by
InitEC and ResetEC
functions */
- switch (p->bandwidth) {
- case BANDWIDTH_AUTO:
- case BANDWIDTH_8_MHZ:
+ switch (p->bandwidth_hz) {
+ case 0:
+ p->bandwidth_hz = 8000000;
+ /* fall through */
+ case 8000000:
/* (64/7)*(8/8)*1000000 */
bandwidth = DRXD_BANDWIDTH_8MHZ_IN_HZ;
@@ -2341,14 +2342,14 @@ static int DRX_Start(struct drxd_state *state, s32 off)
status = Write16(state,
FE_AG_REG_IND_DEL__A, 50, 0x0000);
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
/* (64/7)*(7/8)*1000000 */
bandwidth = DRXD_BANDWIDTH_7MHZ_IN_HZ;
bandwidthParam = 0x4807; /*binary:0100 1000 0000 0111 */
status = Write16(state,
FE_AG_REG_IND_DEL__A, 59, 0x0000);
break;
- case BANDWIDTH_6_MHZ:
+ case 6000000:
/* (64/7)*(6/8)*1000000 */
bandwidth = DRXD_BANDWIDTH_6MHZ_IN_HZ;
bandwidthParam = 0x0F07; /*binary: 0000 1111 0000 0111 */
@@ -2887,41 +2888,26 @@ static int drxd_sleep(struct dvb_frontend *fe)
return 0;
}
-static int drxd_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
-{
- return 0;
-}
-
static int drxd_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
return drxd_config_i2c(fe, enable);
}
-static int drxd_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int drxd_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct drxd_state *state = fe->demodulator_priv;
s32 off = 0;
- state->param = *param;
+ state->props = *p;
DRX_Stop(state);
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
- /* FIXME: move PLL drivers */
- if (state->config.pll_set &&
- state->config.pll_set(state->priv, param,
- state->config.pll_address,
- state->config.demoda_address, &off) < 0) {
- printk(KERN_ERR "Error in pll_set\n");
- return -1;
- }
-
msleep(200);
return DRX_Start(state, off);
@@ -2935,10 +2921,9 @@ static void drxd_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops drxd_ops = {
-
+ .delsys = { SYS_DVBT},
.info = {
.name = "Micronas DRXD DVB-T",
- .type = FE_OFDM,
.frequency_min = 47125000,
.frequency_max = 855250000,
.frequency_stepsize = 166667,
@@ -2958,7 +2943,6 @@ static struct dvb_frontend_ops drxd_ops = {
.i2c_gate_ctrl = drxd_i2c_gate_ctrl,
.set_frontend = drxd_set_frontend,
- .get_frontend = drxd_get_frontend,
.get_tune_settings = drxd_get_tune_settings,
.read_status = drxd_read_status,
diff --git a/drivers/media/dvb/frontends/drxk.h b/drivers/media/dvb/frontends/drxk.h
index 58baf419560c..020981844a86 100644
--- a/drivers/media/dvb/frontends/drxk.h
+++ b/drivers/media/dvb/frontends/drxk.h
@@ -8,6 +8,8 @@
* struct drxk_config - Configure the initial parameters for DRX-K
*
* adr: I2C Address of the DRX-K
+ * parallel_ts: true means that the device uses parallel TS,
+ * Serial otherwise.
* single_master: Device is on the single master mode
* no_i2c_bridge: Don't switch the I2C bridge to talk with tuner
* antenna_gpio: GPIO bit used to control the antenna
@@ -22,22 +24,23 @@ struct drxk_config {
u8 adr;
bool single_master;
bool no_i2c_bridge;
+ bool parallel_ts;
bool antenna_dvbt;
u16 antenna_gpio;
+ int chunk_size;
+
const char *microcode_name;
};
#if defined(CONFIG_DVB_DRXK) || (defined(CONFIG_DVB_DRXK_MODULE) \
&& defined(MODULE))
extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
- struct i2c_adapter *i2c,
- struct dvb_frontend **fe_t);
+ struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend *drxk_attach(const struct drxk_config *config,
- struct i2c_adapter *i2c,
- struct dvb_frontend **fe_t)
+ struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index f6431ef827dc..6980ed7b8786 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -368,10 +368,10 @@ static int i2c_read(struct i2c_adapter *adap,
}
if (debug > 2) {
int i;
- dprintk(2, ": read from ");
+ dprintk(2, ": read from");
for (i = 0; i < len; i++)
printk(KERN_CONT " %02x", msg[i]);
- printk(KERN_CONT "Value = ");
+ printk(KERN_CONT ", value = ");
for (i = 0; i < alen; i++)
printk(KERN_CONT " %02x", answ[i]);
printk(KERN_CONT "\n");
@@ -660,7 +660,6 @@ static int init_state(struct drxk_state *state)
/* io_pad_cfg_mode output mode is drive always */
/* io_pad_cfg_drive is set to power 2 (23 mA) */
u32 ulGPIOCfg = 0x0113;
- u32 ulSerialMode = 1;
u32 ulInvertTSClock = 0;
u32 ulTSDataStrength = DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH;
u32 ulTSClockkStrength = DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH;
@@ -681,7 +680,8 @@ static int init_state(struct drxk_state *state)
state->m_hasOOB = false;
state->m_hasAudio = false;
- state->m_ChunkSize = 124;
+ if (!state->m_ChunkSize)
+ state->m_ChunkSize = 124;
state->m_oscClockFreq = 0;
state->m_smartAntInverted = false;
@@ -810,8 +810,6 @@ static int init_state(struct drxk_state *state)
/* MPEG output configuration */
state->m_enableMPEGOutput = true; /* If TRUE; enable MPEG ouput */
state->m_insertRSByte = false; /* If TRUE; insert RS byte */
- state->m_enableParallel = true; /* If TRUE;
- parallel out otherwise serial */
state->m_invertDATA = false; /* If TRUE; invert DATA signals */
state->m_invertERR = false; /* If TRUE; invert ERR signal */
state->m_invertSTR = false; /* If TRUE; invert STR signals */
@@ -856,8 +854,6 @@ static int init_state(struct drxk_state *state)
state->m_bPowerDown = false;
state->m_currentPowerMode = DRX_POWER_DOWN;
- state->m_enableParallel = (ulSerialMode == 0);
-
state->m_rfmirror = (ulRfMirror == 0);
state->m_IfAgcPol = false;
return 0;
@@ -946,6 +942,9 @@ static int GetDeviceCapabilities(struct drxk_state *state)
status = read32(state, SIO_TOP_JTAGID_LO__A, &sioTopJtagidLo);
if (status < 0)
goto error;
+
+printk(KERN_ERR "drxk: status = 0x%08x\n", sioTopJtagidLo);
+
/* driver 0.9.0 */
switch ((sioTopJtagidLo >> 29) & 0xF) {
case 0:
@@ -963,7 +962,8 @@ static int GetDeviceCapabilities(struct drxk_state *state)
default:
state->m_deviceSpin = DRXK_SPIN_UNKNOWN;
status = -EINVAL;
- printk(KERN_ERR "drxk: Spin unknown\n");
+ printk(KERN_ERR "drxk: Spin %d unknown\n",
+ (sioTopJtagidLo >> 29) & 0xF);
goto error2;
}
switch ((sioTopJtagidLo >> 12) & 0xFF) {
@@ -1190,7 +1190,9 @@ static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
u16 sioPdrMclkCfg = 0;
u16 sioPdrMdxCfg = 0;
- dprintk(1, "\n");
+ dprintk(1, ": mpeg %s, %s mode\n",
+ mpegEnable ? "enable" : "disable",
+ state->m_enableParallel ? "parallel" : "serial");
/* stop lock indicator process */
status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
@@ -1846,6 +1848,7 @@ static int SetOperationMode(struct drxk_state *state,
*/
switch (oMode) {
case OM_DVBT:
+ dprintk(1, ": DVB-T\n");
state->m_OperationMode = oMode;
status = SetDVBTStandard(state, oMode);
if (status < 0)
@@ -1853,6 +1856,8 @@ static int SetOperationMode(struct drxk_state *state,
break;
case OM_QAM_ITU_A: /* fallthrough */
case OM_QAM_ITU_C:
+ dprintk(1, ": DVB-C Annex %c\n",
+ (state->m_OperationMode == OM_QAM_ITU_A) ? 'A' : 'C');
state->m_OperationMode = oMode;
status = SetQAMStandard(state, oMode);
if (status < 0)
@@ -1881,7 +1886,7 @@ static int Start(struct drxk_state *state, s32 offsetFreq,
state->m_DrxkState != DRXK_DTV_STARTED)
goto error;
- state->m_bMirrorFreqSpect = (state->param.inversion == INVERSION_ON);
+ state->m_bMirrorFreqSpect = (state->props.inversion == INVERSION_ON);
if (IntermediateFrequency < 0) {
state->m_bMirrorFreqSpect = !state->m_bMirrorFreqSpect;
@@ -2503,7 +2508,7 @@ static int GetQAMSignalToNoise(struct drxk_state *state,
u16 qamSlErrPower = 0; /* accum. error between
raw and sliced symbols */
u32 qamSlSigPower = 0; /* used for MER, depends of
- QAM constellation */
+ QAM modulation */
u32 qamSlMer = 0; /* QAM MER */
dprintk(1, "\n");
@@ -2517,7 +2522,7 @@ static int GetQAMSignalToNoise(struct drxk_state *state,
return -EINVAL;
}
- switch (state->param.u.qam.modulation) {
+ switch (state->props.modulation) {
case QAM_16:
qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM16 << 2;
break;
@@ -2748,7 +2753,7 @@ static int GetDVBCQuality(struct drxk_state *state, s32 *pQuality)
if (status < 0)
break;
- switch (state->param.u.qam.modulation) {
+ switch (state->props.modulation) {
case QAM_16:
SignalToNoiseRel = SignalToNoise - 200;
break;
@@ -3813,7 +3818,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
/*== Write channel settings to device =====================================*/
/* mode */
- switch (state->param.u.ofdm.transmission_mode) {
+ switch (state->props.transmission_mode) {
case TRANSMISSION_MODE_AUTO:
default:
operationMode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
@@ -3827,7 +3832,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
}
/* guard */
- switch (state->param.u.ofdm.guard_interval) {
+ switch (state->props.guard_interval) {
default:
case GUARD_INTERVAL_AUTO:
operationMode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
@@ -3847,7 +3852,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
}
/* hierarchy */
- switch (state->param.u.ofdm.hierarchy_information) {
+ switch (state->props.hierarchy) {
case HIERARCHY_AUTO:
case HIERARCHY_NONE:
default:
@@ -3867,8 +3872,8 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
}
- /* constellation */
- switch (state->param.u.ofdm.constellation) {
+ /* modulation */
+ switch (state->props.modulation) {
case QAM_AUTO:
default:
operationMode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
@@ -3911,7 +3916,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
#endif
/* coderate */
- switch (state->param.u.ofdm.code_rate_HP) {
+ switch (state->props.code_rate_HP) {
case FEC_AUTO:
default:
operationMode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
@@ -3940,9 +3945,11 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
/* Also set parameters for EC_OC fix, note EC_OC_REG_TMD_HIL_MAR is changed
by SC for fix for some 8K,1/8 guard but is restored by InitEC and ResetEC
functions */
- switch (state->param.u.ofdm.bandwidth) {
- case BANDWIDTH_AUTO:
- case BANDWIDTH_8_MHZ:
+ switch (state->props.bandwidth_hz) {
+ case 0:
+ state->props.bandwidth_hz = 8000000;
+ /* fall though */
+ case 8000000:
bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3052);
if (status < 0)
@@ -3961,7 +3968,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
if (status < 0)
goto error;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
bandwidth = DRXK_BANDWIDTH_7MHZ_IN_HZ;
status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3491);
if (status < 0)
@@ -3980,7 +3987,7 @@ static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
if (status < 0)
goto error;
break;
- case BANDWIDTH_6_MHZ:
+ case 6000000:
bandwidth = DRXK_BANDWIDTH_6MHZ_IN_HZ;
status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 4073);
if (status < 0)
@@ -4187,7 +4194,7 @@ error:
/**
* \brief Setup of the QAM Measurement intervals for signal quality
* \param demod instance of demod.
-* \param constellation current constellation.
+* \param modulation current modulation.
* \return DRXStatus_t.
*
* NOTE:
@@ -4196,7 +4203,7 @@ error:
*
*/
static int SetQAMMeasurement(struct drxk_state *state,
- enum EDrxkConstellation constellation,
+ enum EDrxkConstellation modulation,
u32 symbolRate)
{
u32 fecBitsDesired = 0; /* BER accounting period */
@@ -4210,11 +4217,11 @@ static int SetQAMMeasurement(struct drxk_state *state,
fecRsPrescale = 1;
/* fecBitsDesired = symbolRate [kHz] *
FrameLenght [ms] *
- (constellation + 1) *
+ (modulation + 1) *
SyncLoss (== 1) *
ViterbiLoss (==1)
*/
- switch (constellation) {
+ switch (modulation) {
case DRX_CONSTELLATION_QAM16:
fecBitsDesired = 4 * symbolRate;
break;
@@ -5281,12 +5288,12 @@ static int QAMSetSymbolrate(struct drxk_state *state)
/* Select & calculate correct IQM rate */
adcFrequency = (state->m_sysClockFreq * 1000) / 3;
ratesel = 0;
- /* printk(KERN_DEBUG "drxk: SR %d\n", state->param.u.qam.symbol_rate); */
- if (state->param.u.qam.symbol_rate <= 1188750)
+ /* printk(KERN_DEBUG "drxk: SR %d\n", state->props.symbol_rate); */
+ if (state->props.symbol_rate <= 1188750)
ratesel = 3;
- else if (state->param.u.qam.symbol_rate <= 2377500)
+ else if (state->props.symbol_rate <= 2377500)
ratesel = 2;
- else if (state->param.u.qam.symbol_rate <= 4755000)
+ else if (state->props.symbol_rate <= 4755000)
ratesel = 1;
status = write16(state, IQM_FD_RATESEL__A, ratesel);
if (status < 0)
@@ -5295,7 +5302,7 @@ static int QAMSetSymbolrate(struct drxk_state *state)
/*
IqmRcRate = ((Fadc / (symbolrate * (4<<ratesel))) - 1) * (1<<23)
*/
- symbFreq = state->param.u.qam.symbol_rate * (1 << ratesel);
+ symbFreq = state->props.symbol_rate * (1 << ratesel);
if (symbFreq == 0) {
/* Divide by zero */
status = -EINVAL;
@@ -5311,7 +5318,7 @@ static int QAMSetSymbolrate(struct drxk_state *state)
/*
LcSymbFreq = round (.125 * symbolrate / adcFreq * (1<<15))
*/
- symbFreq = state->param.u.qam.symbol_rate;
+ symbFreq = state->props.symbol_rate;
if (adcFrequency == 0) {
/* Divide by zero */
status = -EINVAL;
@@ -5412,7 +5419,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
goto error;
/* Set params */
- switch (state->param.u.qam.modulation) {
+ switch (state->props.modulation) {
case QAM_256:
state->m_Constellation = DRX_CONSTELLATION_QAM256;
break;
@@ -5435,7 +5442,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
}
if (status < 0)
goto error;
- setParamParameters[0] = state->m_Constellation; /* constellation */
+ setParamParameters[0] = state->m_Constellation; /* modulation */
setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
if (state->m_OperationMode == OM_QAM_ITU_C)
setParamParameters[2] = QAM_TOP_ANNEX_C;
@@ -5457,7 +5464,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
if (status < 0)
goto error;
- setParamParameters[0] = state->m_Constellation; /* constellation */
+ setParamParameters[0] = state->m_Constellation; /* modulation */
setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 2, setParamParameters, 1, &cmdResult);
}
@@ -5466,7 +5473,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
/*
* STEP 3: enable the system in a mode where the ADC provides valid
- * signal setup constellation independent registers
+ * signal setup modulation independent registers
*/
#if 0
status = SetFrequency(channel, tunerFreqOffset));
@@ -5478,7 +5485,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
goto error;
/* Setup BER measurement */
- status = SetQAMMeasurement(state, state->m_Constellation, state->param.u. qam.symbol_rate);
+ status = SetQAMMeasurement(state, state->m_Constellation, state->props.symbol_rate);
if (status < 0)
goto error;
@@ -5560,8 +5567,8 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
if (status < 0)
goto error;
- /* STEP 4: constellation specific setup */
- switch (state->param.u.qam.modulation) {
+ /* STEP 4: modulation specific setup */
+ switch (state->props.modulation) {
case QAM_16:
status = SetQAM16(state);
break;
@@ -5591,7 +5598,7 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
goto error;
/* Re-configure MPEG output, requires knowledge of channel bitrate */
- /* extAttr->currentChannel.constellation = channel->constellation; */
+ /* extAttr->currentChannel.modulation = channel->modulation; */
/* extAttr->currentChannel.symbolrate = channel->symbolrate; */
status = MPEGTSDtoSetup(state, state->m_OperationMode);
if (status < 0)
@@ -6167,7 +6174,7 @@ error:
return status;
}
-static void drxk_c_release(struct dvb_frontend *fe)
+static void drxk_release(struct dvb_frontend *fe)
{
struct drxk_state *state = fe->demodulator_priv;
@@ -6175,24 +6182,12 @@ static void drxk_c_release(struct dvb_frontend *fe)
kfree(state);
}
-static int drxk_c_init(struct dvb_frontend *fe)
-{
- struct drxk_state *state = fe->demodulator_priv;
-
- dprintk(1, "\n");
- if (mutex_trylock(&state->ctlock) == 0)
- return -EBUSY;
- SetOperationMode(state, OM_QAM_ITU_A);
- return 0;
-}
-
-static int drxk_c_sleep(struct dvb_frontend *fe)
+static int drxk_sleep(struct dvb_frontend *fe)
{
struct drxk_state *state = fe->demodulator_priv;
dprintk(1, "\n");
ShutDown(state);
- mutex_unlock(&state->ctlock);
return 0;
}
@@ -6204,9 +6199,10 @@ static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
return ConfigureI2CBridge(state, enable ? true : false);
}
-static int drxk_set_parameters(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int drxk_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 delsys = p->delivery_system, old_delsys;
struct drxk_state *state = fe->demodulator_priv;
u32 IF;
@@ -6218,14 +6214,39 @@ static int drxk_set_parameters(struct dvb_frontend *fe,
return -EINVAL;
}
-
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
- state->param = *p;
+
+ old_delsys = state->props.delivery_system;
+ state->props = *p;
+
+ if (old_delsys != delsys) {
+ ShutDown(state);
+ switch (delsys) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ if (!state->m_hasDVBC)
+ return -EINVAL;
+ state->m_itut_annex_c = (delsys == SYS_DVBC_ANNEX_C) ? true : false;
+ if (state->m_itut_annex_c)
+ SetOperationMode(state, OM_QAM_ITU_C);
+ else
+ SetOperationMode(state, OM_QAM_ITU_A);
+ break;
+ case SYS_DVBT:
+ if (!state->m_hasDVBT)
+ return -EINVAL;
+ SetOperationMode(state, OM_DVBT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
fe->ops.tuner_ops.get_if_frequency(fe, &IF);
Start(state, 0, IF);
@@ -6234,13 +6255,6 @@ static int drxk_set_parameters(struct dvb_frontend *fe,
return 0;
}
-static int drxk_c_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
-{
- dprintk(1, "\n");
- return 0;
-}
-
static int drxk_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct drxk_state *state = fe->demodulator_priv;
@@ -6300,102 +6314,54 @@ static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int drxk_c_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings
+static int drxk_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings
*sets)
{
- dprintk(1, "\n");
- sets->min_delay_ms = 3000;
- sets->max_drift = 0;
- sets->step_size = 0;
- return 0;
-}
-
-static void drxk_t_release(struct dvb_frontend *fe)
-{
- /*
- * There's nothing to release here, as the state struct
- * is already freed by drxk_c_release.
- */
-}
-
-static int drxk_t_init(struct dvb_frontend *fe)
-{
- struct drxk_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
dprintk(1, "\n");
- if (mutex_trylock(&state->ctlock) == 0)
- return -EBUSY;
- SetOperationMode(state, OM_DVBT);
- return 0;
-}
-
-static int drxk_t_sleep(struct dvb_frontend *fe)
-{
- struct drxk_state *state = fe->demodulator_priv;
-
- dprintk(1, "\n");
- mutex_unlock(&state->ctlock);
- return 0;
-}
-
-static int drxk_t_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
-{
- dprintk(1, "\n");
-
- return 0;
+ switch (p->delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ sets->min_delay_ms = 3000;
+ sets->max_drift = 0;
+ sets->step_size = 0;
+ return 0;
+ default:
+ /*
+ * For DVB-T, let it use the default DVB core way, that is:
+ * fepriv->step_size = fe->ops.info.frequency_stepsize * 2
+ */
+ return -EINVAL;
+ }
}
-static struct dvb_frontend_ops drxk_c_ops = {
+static struct dvb_frontend_ops drxk_ops = {
+ /* .delsys will be filled dynamically */
.info = {
- .name = "DRXK DVB-C",
- .type = FE_QAM,
- .frequency_stepsize = 62500,
- .frequency_min = 47000000,
- .frequency_max = 862000000,
- .symbol_rate_min = 870000,
- .symbol_rate_max = 11700000,
- .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
- FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO},
- .release = drxk_c_release,
- .init = drxk_c_init,
- .sleep = drxk_c_sleep,
+ .name = "DRXK",
+ .frequency_min = 47000000,
+ .frequency_max = 865000000,
+ /* For DVB-C */
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000,
+ /* For DVB-T */
+ .frequency_stepsize = 166667,
+
+ .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_MUTE_TS |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER |
+ FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO
+ },
+
+ .release = drxk_release,
+ .sleep = drxk_sleep,
.i2c_gate_ctrl = drxk_gate_ctrl,
.set_frontend = drxk_set_parameters,
- .get_frontend = drxk_c_get_frontend,
- .get_tune_settings = drxk_c_get_tune_settings,
-
- .read_status = drxk_read_status,
- .read_ber = drxk_read_ber,
- .read_signal_strength = drxk_read_signal_strength,
- .read_snr = drxk_read_snr,
- .read_ucblocks = drxk_read_ucblocks,
-};
-
-static struct dvb_frontend_ops drxk_t_ops = {
- .info = {
- .name = "DRXK DVB-T",
- .type = FE_OFDM,
- .frequency_min = 47125000,
- .frequency_max = 865000000,
- .frequency_stepsize = 166667,
- .frequency_tolerance = 0,
- .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 | FE_CAN_QAM_64 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS},
- .release = drxk_t_release,
- .init = drxk_t_init,
- .sleep = drxk_t_sleep,
- .i2c_gate_ctrl = drxk_gate_ctrl,
-
- .set_frontend = drxk_set_parameters,
- .get_frontend = drxk_t_get_frontend,
+ .get_tune_settings = drxk_get_tune_settings,
.read_status = drxk_read_status,
.read_ber = drxk_read_ber,
@@ -6405,9 +6371,10 @@ static struct dvb_frontend_ops drxk_t_ops = {
};
struct dvb_frontend *drxk_attach(const struct drxk_config *config,
- struct i2c_adapter *i2c,
- struct dvb_frontend **fe_t)
+ struct i2c_adapter *i2c)
{
+ int n;
+
struct drxk_state *state = NULL;
u8 adr = config->adr;
@@ -6423,6 +6390,12 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
state->no_i2c_bridge = config->no_i2c_bridge;
state->antenna_gpio = config->antenna_gpio;
state->antenna_dvbt = config->antenna_dvbt;
+ state->m_ChunkSize = config->chunk_size;
+
+ if (config->parallel_ts)
+ state->m_enableParallel = true;
+ else
+ state->m_enableParallel = false;
/* NOTE: as more UIO bits will be used, add them to the mask */
state->UIO_mask = config->antenna_gpio;
@@ -6434,21 +6407,30 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
state->m_GPIO &= ~state->antenna_gpio;
mutex_init(&state->mutex);
- mutex_init(&state->ctlock);
- memcpy(&state->c_frontend.ops, &drxk_c_ops,
- sizeof(struct dvb_frontend_ops));
- memcpy(&state->t_frontend.ops, &drxk_t_ops,
- sizeof(struct dvb_frontend_ops));
- state->c_frontend.demodulator_priv = state;
- state->t_frontend.demodulator_priv = state;
+ memcpy(&state->frontend.ops, &drxk_ops, sizeof(drxk_ops));
+ state->frontend.demodulator_priv = state;
init_state(state);
if (init_drxk(state) < 0)
goto error;
- *fe_t = &state->t_frontend;
- return &state->c_frontend;
+ /* Initialize the supported delivery systems */
+ n = 0;
+ if (state->m_hasDVBC) {
+ state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_A;
+ state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_C;
+ strlcat(state->frontend.ops.info.name, " DVB-C",
+ sizeof(state->frontend.ops.info.name));
+ }
+ if (state->m_hasDVBT) {
+ state->frontend.ops.delsys[n++] = SYS_DVBT;
+ strlcat(state->frontend.ops.info.name, " DVB-T",
+ sizeof(state->frontend.ops.info.name));
+ }
+
+ printk(KERN_INFO "drxk: frontend initialized.\n");
+ return &state->frontend;
error:
printk(KERN_ERR "drxk: not found\n");
diff --git a/drivers/media/dvb/frontends/drxk_hard.h b/drivers/media/dvb/frontends/drxk_hard.h
index a05c32eecdcc..3a58b73eb9b9 100644
--- a/drivers/media/dvb/frontends/drxk_hard.h
+++ b/drivers/media/dvb/frontends/drxk_hard.h
@@ -195,9 +195,8 @@ struct DRXKOfdmScCmd_t {
};
struct drxk_state {
- struct dvb_frontend c_frontend;
- struct dvb_frontend t_frontend;
- struct dvb_frontend_parameters param;
+ struct dvb_frontend frontend;
+ struct dtv_frontend_properties props;
struct device *dev;
struct i2c_adapter *i2c;
@@ -205,7 +204,6 @@ struct drxk_state {
void *priv;
struct mutex mutex;
- struct mutex ctlock;
u32 m_Instance; /**< Channel 1,2,3 or 4 */
@@ -263,6 +261,8 @@ struct drxk_state {
u8 m_TSDataStrength;
u8 m_TSClockkStrength;
+ bool m_itut_annex_c; /* If true, uses ITU-T DVB-C Annex C, instead of Annex A */
+
enum DRXMPEGStrWidth_t m_widthSTR; /**< MPEG start width */
u32 m_mpegTsStaticBitrate; /**< Maximum bitrate in b/s in case
static clockrate is selected */
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index 90bf573308b0..af65d013db11 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -934,20 +934,6 @@ error2:
}
EXPORT_SYMBOL(ds3000_attach);
-static int ds3000_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
-static int ds3000_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
s32 carrier_offset_khz)
{
@@ -967,8 +953,7 @@ static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
return 0;
}
-static int ds3000_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int ds3000_set_frontend(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -994,15 +979,15 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
div4 = 0;
/* calculate and set freq divider */
- if (p->frequency < 1146000) {
+ if (c->frequency < 1146000) {
ds3000_tuner_writereg(state, 0x10, 0x11);
div4 = 1;
- ndiv = ((p->frequency * (6 + 8) * 4) +
+ ndiv = ((c->frequency * (6 + 8) * 4) +
(DS3000_XTAL_FREQ / 2)) /
DS3000_XTAL_FREQ - 1024;
} else {
ds3000_tuner_writereg(state, 0x10, 0x01);
- ndiv = ((p->frequency * (6 + 8) * 2) +
+ ndiv = ((c->frequency * (6 + 8) * 2) +
(DS3000_XTAL_FREQ / 2)) /
DS3000_XTAL_FREQ - 1024;
}
@@ -1101,7 +1086,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
msleep(60);
offset_khz = (ndiv - ndiv % 2 + 1024) * DS3000_XTAL_FREQ
- / (6 + 8) / (div4 + 1) / 2 - p->frequency;
+ / (6 + 8) / (div4 + 1) / 2 - c->frequency;
/* ds3000 global reset */
ds3000_writereg(state, 0x07, 0x80);
@@ -1210,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
- if (status && FE_HAS_LOCK)
+ if (status & FE_HAS_LOCK)
break;
msleep(10);
@@ -1220,13 +1205,13 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
}
static int ds3000_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p,
+ bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
{
- if (p) {
- int ret = ds3000_set_frontend(fe, p);
+ if (re_tune) {
+ int ret = ds3000_set_frontend(fe);
if (ret)
return ret;
}
@@ -1279,10 +1264,9 @@ static int ds3000_sleep(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops ds3000_ops = {
-
+ .delsys = { SYS_DVBS, SYS_DVBS2},
.info = {
.name = "Montage Technology DS3000/TS2020",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
@@ -1312,8 +1296,6 @@ static struct dvb_frontend_ops ds3000_ops = {
.diseqc_send_burst = ds3000_diseqc_send_burst,
.get_frontend_algo = ds3000_get_algo,
- .set_property = ds3000_set_property,
- .get_property = ds3000_get_property,
.set_frontend = ds3000_set_frontend,
.tune = ds3000_tune,
};
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index 62a65efdf8d6..1ab34838221c 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -61,8 +61,7 @@ struct dvb_pll_desc {
u32 min;
u32 max;
u32 iffreq;
- void (*set)(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params);
+ void (*set)(struct dvb_frontend *fe, u8 *buf);
u8 *initdata;
u8 *initdata2;
u8 *sleepdata;
@@ -93,10 +92,10 @@ static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
},
};
-static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
{
- if (BANDWIDTH_7_MHZ == params->u.ofdm.bandwidth)
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+ if (bw == 7000000)
buf[3] |= 0x10;
}
@@ -186,10 +185,10 @@ static struct dvb_pll_desc dvb_pll_env57h1xd5 = {
/* Philips TDA6650/TDA6651
* used in Panasonic ENV77H11D5
*/
-static void tda665x_bw(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
{
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+ if (bw == 8000000)
buf[3] |= 0x08;
}
@@ -220,10 +219,10 @@ static struct dvb_pll_desc dvb_pll_tda665x = {
/* Infineon TUA6034
* used in LG TDTP E102P
*/
-static void tua6034_bw(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
{
- if (BANDWIDTH_7_MHZ != params->u.ofdm.bandwidth)
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+ if (bw == 7000000)
buf[3] |= 0x08;
}
@@ -244,10 +243,10 @@ static struct dvb_pll_desc dvb_pll_tua6034 = {
/* ALPS TDED4
* used in Nebula-Cards and USB boxes
*/
-static void tded4_bw(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
{
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+ if (bw == 8000000)
buf[3] |= 0x04;
}
@@ -319,11 +318,11 @@ static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
},
};
-static void opera1_bw(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_pll_priv *priv = fe->tuner_priv;
- u32 b_w = (params->u.qpsk.symbol_rate * 27) / 32000;
+ u32 b_w = (c->symbol_rate * 27) / 32000;
struct i2c_msg msg = {
.addr = priv->pll_i2c_address,
.flags = 0,
@@ -392,8 +391,7 @@ static struct dvb_pll_desc dvb_pll_opera1 = {
}
};
-static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
{
struct dvb_pll_priv *priv = fe->tuner_priv;
struct i2c_msg msg = {
@@ -537,30 +535,29 @@ static struct dvb_pll_desc *pll_list[] = {
/* code */
static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
- const struct dvb_frontend_parameters *params)
+ const u32 frequency)
{
struct dvb_pll_priv *priv = fe->tuner_priv;
struct dvb_pll_desc *desc = priv->pll_desc;
u32 div;
int i;
- if (params->frequency != 0 && (params->frequency < desc->min ||
- params->frequency > desc->max))
+ if (frequency && (frequency < desc->min || frequency > desc->max))
return -EINVAL;
for (i = 0; i < desc->count; i++) {
- if (params->frequency > desc->entries[i].limit)
+ if (frequency > desc->entries[i].limit)
continue;
break;
}
if (debug)
printk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
- params->frequency, i, desc->count);
+ frequency, i, desc->count);
if (i == desc->count)
return -EINVAL;
- div = (params->frequency + desc->iffreq +
+ div = (frequency + desc->iffreq +
desc->entries[i].stepsize/2) / desc->entries[i].stepsize;
buf[0] = div >> 8;
buf[1] = div & 0xff;
@@ -568,7 +565,7 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
buf[3] = desc->entries[i].cb;
if (desc->set)
- desc->set(fe, buf, params);
+ desc->set(fe, buf);
if (debug)
printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
@@ -611,9 +608,9 @@ static int dvb_pll_sleep(struct dvb_frontend *fe)
return -EINVAL;
}
-static int dvb_pll_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int dvb_pll_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_pll_priv *priv = fe->tuner_priv;
u8 buf[4];
struct i2c_msg msg =
@@ -625,7 +622,8 @@ static int dvb_pll_set_params(struct dvb_frontend *fe,
if (priv->i2c == NULL)
return -EINVAL;
- if ((result = dvb_pll_configure(fe, buf, params)) < 0)
+ result = dvb_pll_configure(fe, buf, c->frequency);
+ if (result < 0)
return result;
else
frequency = result;
@@ -637,15 +635,15 @@ static int dvb_pll_set_params(struct dvb_frontend *fe,
}
priv->frequency = frequency;
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
+ priv->bandwidth = c->bandwidth_hz;
return 0;
}
static int dvb_pll_calc_regs(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
u8 *buf, int buf_len)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_pll_priv *priv = fe->tuner_priv;
int result;
u32 frequency = 0;
@@ -653,7 +651,8 @@ static int dvb_pll_calc_regs(struct dvb_frontend *fe,
if (buf_len < 5)
return -EINVAL;
- if ((result = dvb_pll_configure(fe, buf+1, params)) < 0)
+ result = dvb_pll_configure(fe, buf + 1, c->frequency);
+ if (result < 0)
return result;
else
frequency = result;
@@ -661,7 +660,7 @@ static int dvb_pll_calc_regs(struct dvb_frontend *fe,
buf[0] = priv->pll_i2c_address;
priv->frequency = frequency;
- priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
+ priv->bandwidth = c->bandwidth_hz;
return 5;
}
diff --git a/drivers/media/dvb/frontends/dvb_dummy_fe.c b/drivers/media/dvb/frontends/dvb_dummy_fe.c
index a7fc7e53a551..dcfc902c8678 100644
--- a/drivers/media/dvb/frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb/frontends/dvb_dummy_fe.c
@@ -68,15 +68,18 @@ static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int dvb_dummy_fe_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+/*
+ * Only needed if it actually reads something from the hardware
+ */
+static int dvb_dummy_fe_get_frontend(struct dvb_frontend *fe)
{
return 0;
}
-static int dvb_dummy_fe_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int dvb_dummy_fe_set_frontend(struct dvb_frontend *fe)
{
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -171,10 +174,9 @@ error:
}
static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Dummy DVB-T",
- .type = FE_OFDM,
.frequency_min = 0,
.frequency_max = 863250000,
.frequency_stepsize = 62500,
@@ -203,10 +205,9 @@ static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
};
static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
-
+ .delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "Dummy DVB-C",
- .type = FE_QAM,
.frequency_stepsize = 62500,
.frequency_min = 51000000,
.frequency_max = 858000000,
@@ -233,10 +234,9 @@ static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
};
static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Dummy DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 250, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/ec100.c b/drivers/media/dvb/frontends/ec100.c
index 2414dc6ee5d9..c56fddbf53b7 100644
--- a/drivers/media/dvb/frontends/ec100.c
+++ b/drivers/media/dvb/frontends/ec100.c
@@ -76,19 +76,19 @@ static int ec100_read_reg(struct ec100_state *state, u8 reg, u8 *val)
return 0;
}
-static int ec100_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int ec100_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct ec100_state *state = fe->demodulator_priv;
int ret;
u8 tmp, tmp2;
- deb_info("%s: freq:%d bw:%d\n", __func__, params->frequency,
- params->u.ofdm.bandwidth);
+ deb_info("%s: freq:%d bw:%d\n", __func__, c->frequency,
+ c->bandwidth_hz);
/* program tuner */
if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe, params);
+ fe->ops.tuner_ops.set_params(fe);
ret = ec100_write_reg(state, 0x04, 0x06);
if (ret)
@@ -108,16 +108,16 @@ static int ec100_set_frontend(struct dvb_frontend *fe,
B 0x1b | 0xb7 | 0x00 | 0x49
B 0x1c | 0x55 | 0x64 | 0x72 */
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (c->bandwidth_hz) {
+ case 6000000:
tmp = 0xb7;
tmp2 = 0x55;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
tmp = 0x00;
tmp2 = 0x64;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
default:
tmp = 0x49;
tmp2 = 0x72;
@@ -306,9 +306,9 @@ error:
EXPORT_SYMBOL(ec100_attach);
static struct dvb_frontend_ops ec100_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "E3C EC100 DVB-T",
- .type = FE_OFDM,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb/frontends/hd29l2.c b/drivers/media/dvb/frontends/hd29l2.c
new file mode 100644
index 000000000000..a00318190837
--- /dev/null
+++ b/drivers/media/dvb/frontends/hd29l2.c
@@ -0,0 +1,861 @@
+/*
+ * HDIC HD29L2 DMB-TH demodulator driver
+ *
+ * Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
+ *
+ * Author: Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "hd29l2_priv.h"
+
+int hd29l2_debug;
+module_param_named(debug, hd29l2_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+
+/* write multiple registers */
+static int hd29l2_wr_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
+{
+ int ret;
+ u8 buf[2 + len];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ buf[0] = 0x00;
+ buf[1] = reg;
+ memcpy(&buf[2], val, len);
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c wr failed=%d reg=%02x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int hd29l2_rd_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
+{
+ int ret;
+ u8 buf[2] = { 0x00, reg };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ }, {
+ .addr = priv->cfg.i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ ret = 0;
+ } else {
+ warn("i2c rd failed=%d reg=%02x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* write single register */
+static int hd29l2_wr_reg(struct hd29l2_priv *priv, u8 reg, u8 val)
+{
+ return hd29l2_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register */
+static int hd29l2_rd_reg(struct hd29l2_priv *priv, u8 reg, u8 *val)
+{
+ return hd29l2_rd_regs(priv, reg, val, 1);
+}
+
+/* write single register with mask */
+static int hd29l2_wr_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = hd29l2_rd_regs(priv, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return hd29l2_wr_regs(priv, reg, &val, 1);
+}
+
+/* read single register with mask */
+int hd29l2_rd_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 *val, u8 mask)
+{
+ int ret, i;
+ u8 tmp;
+
+ ret = hd29l2_rd_regs(priv, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = tmp >> i;
+
+ return 0;
+}
+
+static int hd29l2_soft_reset(struct hd29l2_priv *priv)
+{
+ int ret;
+ u8 tmp;
+
+ ret = hd29l2_rd_reg(priv, 0x26, &tmp);
+ if (ret)
+ goto err;
+
+ ret = hd29l2_wr_reg(priv, 0x26, 0x0d);
+ if (ret)
+ goto err;
+
+ usleep_range(10000, 20000);
+
+ ret = hd29l2_wr_reg(priv, 0x26, tmp);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ int ret, i;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ u8 tmp;
+
+ dbg("%s: enable=%d", __func__, enable);
+
+ /* set tuner address for demod */
+ if (!priv->tuner_i2c_addr_programmed && enable) {
+ /* no need to set tuner address every time, once is enough */
+ ret = hd29l2_wr_reg(priv, 0x9d, priv->cfg.tuner_i2c_addr << 1);
+ if (ret)
+ goto err;
+
+ priv->tuner_i2c_addr_programmed = true;
+ }
+
+ /* open / close gate */
+ ret = hd29l2_wr_reg(priv, 0x9f, enable);
+ if (ret)
+ goto err;
+
+ /* wait demod ready */
+ for (i = 10; i; i--) {
+ ret = hd29l2_rd_reg(priv, 0x9e, &tmp);
+ if (ret)
+ goto err;
+
+ if (tmp == enable)
+ break;
+
+ usleep_range(5000, 10000);
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ int ret;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ u8 buf[2];
+
+ *status = 0;
+
+ ret = hd29l2_rd_reg(priv, 0x05, &buf[0]);
+ if (ret)
+ goto err;
+
+ if (buf[0] & 0x01) {
+ /* full lock */
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
+ FE_HAS_SYNC | FE_HAS_LOCK;
+ } else {
+ ret = hd29l2_rd_reg(priv, 0x0d, &buf[1]);
+ if (ret)
+ goto err;
+
+ if ((buf[1] & 0xfe) == 0x78)
+ /* partial lock */
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC;
+ }
+
+ priv->fe_status = *status;
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ int ret;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ u8 buf[2];
+ u16 tmp;
+
+ if (!(priv->fe_status & FE_HAS_LOCK)) {
+ *snr = 0;
+ ret = 0;
+ goto err;
+ }
+
+ ret = hd29l2_rd_regs(priv, 0x0b, buf, 2);
+ if (ret)
+ goto err;
+
+ tmp = (buf[0] << 8) | buf[1];
+
+ /* report SNR in dB * 10 */
+ #define LOG10_20736_24 72422627 /* log10(20736) << 24 */
+ if (tmp)
+ *snr = (LOG10_20736_24 - intlog10(tmp)) / ((1 << 24) / 100);
+ else
+ *snr = 0;
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ int ret;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ u8 buf[2];
+ u16 tmp;
+
+ *strength = 0;
+
+ ret = hd29l2_rd_regs(priv, 0xd5, buf, 2);
+ if (ret)
+ goto err;
+
+ tmp = buf[0] << 8 | buf[1];
+ tmp = ~tmp & 0x0fff;
+
+ /* scale value to 0x0000-0xffff from 0x0000-0x0fff */
+ *strength = tmp * 0xffff / 0x0fff;
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ int ret;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ u8 buf[2];
+
+ if (!(priv->fe_status & FE_HAS_SYNC)) {
+ *ber = 0;
+ ret = 0;
+ goto err;
+ }
+
+ ret = hd29l2_rd_regs(priv, 0xd9, buf, 2);
+ if (ret) {
+ *ber = 0;
+ goto err;
+ }
+
+ /* LDPC BER */
+ *ber = ((buf[0] & 0x0f) << 8) | buf[1];
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ /* no way to read? */
+ *ucblocks = 0;
+ return 0;
+}
+
+static enum dvbfe_search hd29l2_search(struct dvb_frontend *fe)
+{
+ int ret, i;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u8 tmp, buf[3];
+ u8 modulation, carrier, guard_interval, interleave, code_rate;
+ u64 num64;
+ u32 if_freq, if_ctl;
+ bool auto_mode;
+
+ dbg("%s: delivery_system=%d frequency=%d bandwidth_hz=%d " \
+ "modulation=%d inversion=%d fec_inner=%d guard_interval=%d",
+ __func__,
+ c->delivery_system, c->frequency, c->bandwidth_hz,
+ c->modulation, c->inversion, c->fec_inner, c->guard_interval);
+
+ /* as for now we detect always params automatically */
+ auto_mode = true;
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+
+ /* get and program IF */
+ if (fe->ops.tuner_ops.get_if_frequency)
+ fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ else
+ if_freq = 0;
+
+ if (if_freq) {
+ /* normal IF */
+
+ /* calc IF control value */
+ num64 = if_freq;
+ num64 *= 0x800000;
+ num64 = div_u64(num64, HD29L2_XTAL);
+ num64 -= 0x800000;
+ if_ctl = num64;
+
+ tmp = 0xfc; /* tuner type normal */
+ } else {
+ /* zero IF */
+ if_ctl = 0;
+ tmp = 0xfe; /* tuner type Zero-IF */
+ }
+
+ buf[0] = ((if_ctl >> 0) & 0xff);
+ buf[1] = ((if_ctl >> 8) & 0xff);
+ buf[2] = ((if_ctl >> 16) & 0xff);
+
+ /* program IF control */
+ ret = hd29l2_wr_regs(priv, 0x14, buf, 3);
+ if (ret)
+ goto err;
+
+ /* program tuner type */
+ ret = hd29l2_wr_reg(priv, 0xab, tmp);
+ if (ret)
+ goto err;
+
+ dbg("%s: if_freq=%d if_ctl=%x", __func__, if_freq, if_ctl);
+
+ if (auto_mode) {
+ /*
+ * use auto mode
+ */
+
+ /* disable quick mode */
+ ret = hd29l2_wr_reg_mask(priv, 0xac, 0 << 7, 0x80);
+ if (ret)
+ goto err;
+
+ ret = hd29l2_wr_reg_mask(priv, 0x82, 1 << 1, 0x02);
+ if (ret)
+ goto err;
+
+ /* enable auto mode */
+ ret = hd29l2_wr_reg_mask(priv, 0x7d, 1 << 6, 0x40);
+ if (ret)
+ goto err;
+
+ ret = hd29l2_wr_reg_mask(priv, 0x81, 1 << 3, 0x08);
+ if (ret)
+ goto err;
+
+ /* soft reset */
+ ret = hd29l2_soft_reset(priv);
+ if (ret)
+ goto err;
+
+ /* detect modulation */
+ for (i = 30; i; i--) {
+ msleep(100);
+
+ ret = hd29l2_rd_reg(priv, 0x0d, &tmp);
+ if (ret)
+ goto err;
+
+ if ((((tmp & 0xf0) >= 0x10) &&
+ ((tmp & 0x0f) == 0x08)) || (tmp >= 0x2c))
+ break;
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ if (i == 0)
+ /* detection failed */
+ return DVBFE_ALGO_SEARCH_FAILED;
+
+ /* read modulation */
+ ret = hd29l2_rd_reg_mask(priv, 0x7d, &modulation, 0x07);
+ if (ret)
+ goto err;
+ } else {
+ /*
+ * use manual mode
+ */
+
+ modulation = HD29L2_QAM64;
+ carrier = HD29L2_CARRIER_MULTI;
+ guard_interval = HD29L2_PN945;
+ interleave = HD29L2_INTERLEAVER_420;
+ code_rate = HD29L2_CODE_RATE_08;
+
+ tmp = (code_rate << 3) | modulation;
+ ret = hd29l2_wr_reg_mask(priv, 0x7d, tmp, 0x5f);
+ if (ret)
+ goto err;
+
+ tmp = (carrier << 2) | guard_interval;
+ ret = hd29l2_wr_reg_mask(priv, 0x81, tmp, 0x0f);
+ if (ret)
+ goto err;
+
+ tmp = interleave;
+ ret = hd29l2_wr_reg_mask(priv, 0x82, tmp, 0x03);
+ if (ret)
+ goto err;
+ }
+
+ /* ensure modulation validy */
+ /* 0=QAM4_NR, 1=QAM4, 2=QAM16, 3=QAM32, 4=QAM64 */
+ if (modulation > (ARRAY_SIZE(reg_mod_vals_tab[0].val) - 1)) {
+ dbg("%s: modulation=%d not valid", __func__, modulation);
+ goto err;
+ }
+
+ /* program registers according to modulation */
+ for (i = 0; i < ARRAY_SIZE(reg_mod_vals_tab); i++) {
+ ret = hd29l2_wr_reg(priv, reg_mod_vals_tab[i].reg,
+ reg_mod_vals_tab[i].val[modulation]);
+ if (ret)
+ goto err;
+ }
+
+ /* read guard interval */
+ ret = hd29l2_rd_reg_mask(priv, 0x81, &guard_interval, 0x03);
+ if (ret)
+ goto err;
+
+ /* read carrier mode */
+ ret = hd29l2_rd_reg_mask(priv, 0x81, &carrier, 0x04);
+ if (ret)
+ goto err;
+
+ dbg("%s: modulation=%d guard_interval=%d carrier=%d",
+ __func__, modulation, guard_interval, carrier);
+
+ if ((carrier == HD29L2_CARRIER_MULTI) && (modulation == HD29L2_QAM64) &&
+ (guard_interval == HD29L2_PN945)) {
+ dbg("%s: C=3780 && QAM64 && PN945", __func__);
+
+ ret = hd29l2_wr_reg(priv, 0x42, 0x33);
+ if (ret)
+ goto err;
+
+ ret = hd29l2_wr_reg(priv, 0xdd, 0x01);
+ if (ret)
+ goto err;
+ }
+
+ usleep_range(10000, 20000);
+
+ /* soft reset */
+ ret = hd29l2_soft_reset(priv);
+ if (ret)
+ goto err;
+
+ /* wait demod lock */
+ for (i = 30; i; i--) {
+ msleep(100);
+
+ /* read lock bit */
+ ret = hd29l2_rd_reg_mask(priv, 0x05, &tmp, 0x01);
+ if (ret)
+ goto err;
+
+ if (tmp)
+ break;
+ }
+
+ dbg("%s: loop=%d", __func__, i);
+
+ if (i == 0)
+ return DVBFE_ALGO_SEARCH_AGAIN;
+
+ return DVBFE_ALGO_SEARCH_SUCCESS;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return DVBFE_ALGO_SEARCH_ERROR;
+}
+
+static int hd29l2_get_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_CUSTOM;
+}
+
+static int hd29l2_get_frontend(struct dvb_frontend *fe)
+{
+ int ret;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u8 buf[3];
+ u32 if_ctl;
+ char *str_constellation, *str_code_rate, *str_constellation_code_rate,
+ *str_guard_interval, *str_carrier, *str_guard_interval_carrier,
+ *str_interleave, *str_interleave_;
+
+ ret = hd29l2_rd_reg(priv, 0x7d, &buf[0]);
+ if (ret)
+ goto err;
+
+ ret = hd29l2_rd_regs(priv, 0x81, &buf[1], 2);
+ if (ret)
+ goto err;
+
+ /* constellation, 0x7d[2:0] */
+ switch ((buf[0] >> 0) & 0x07) {
+ case 0: /* QAM4NR */
+ str_constellation = "QAM4NR";
+ c->modulation = QAM_AUTO; /* FIXME */
+ break;
+ case 1: /* QAM4 */
+ str_constellation = "QAM4";
+ c->modulation = QPSK; /* FIXME */
+ break;
+ case 2:
+ str_constellation = "QAM16";
+ c->modulation = QAM_16;
+ break;
+ case 3:
+ str_constellation = "QAM32";
+ c->modulation = QAM_32;
+ break;
+ case 4:
+ str_constellation = "QAM64";
+ c->modulation = QAM_64;
+ break;
+ default:
+ str_constellation = "?";
+ }
+
+ /* LDPC code rate, 0x7d[4:3] */
+ switch ((buf[0] >> 3) & 0x03) {
+ case 0: /* 0.4 */
+ str_code_rate = "0.4";
+ c->fec_inner = FEC_AUTO; /* FIXME */
+ break;
+ case 1: /* 0.6 */
+ str_code_rate = "0.6";
+ c->fec_inner = FEC_3_5;
+ break;
+ case 2: /* 0.8 */
+ str_code_rate = "0.8";
+ c->fec_inner = FEC_4_5;
+ break;
+ default:
+ str_code_rate = "?";
+ }
+
+ /* constellation & code rate set, 0x7d[6] */
+ switch ((buf[0] >> 6) & 0x01) {
+ case 0:
+ str_constellation_code_rate = "manual";
+ break;
+ case 1:
+ str_constellation_code_rate = "auto";
+ break;
+ default:
+ str_constellation_code_rate = "?";
+ }
+
+ /* frame header, 0x81[1:0] */
+ switch ((buf[1] >> 0) & 0x03) {
+ case 0: /* PN945 */
+ str_guard_interval = "PN945";
+ c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
+ break;
+ case 1: /* PN595 */
+ str_guard_interval = "PN595";
+ c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
+ break;
+ case 2: /* PN420 */
+ str_guard_interval = "PN420";
+ c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
+ break;
+ default:
+ str_guard_interval = "?";
+ }
+
+ /* carrier, 0x81[2] */
+ switch ((buf[1] >> 2) & 0x01) {
+ case 0:
+ str_carrier = "C=1";
+ break;
+ case 1:
+ str_carrier = "C=3780";
+ break;
+ default:
+ str_carrier = "?";
+ }
+
+ /* frame header & carrier set, 0x81[3] */
+ switch ((buf[1] >> 3) & 0x01) {
+ case 0:
+ str_guard_interval_carrier = "manual";
+ break;
+ case 1:
+ str_guard_interval_carrier = "auto";
+ break;
+ default:
+ str_guard_interval_carrier = "?";
+ }
+
+ /* interleave, 0x82[0] */
+ switch ((buf[2] >> 0) & 0x01) {
+ case 0:
+ str_interleave = "M=720";
+ break;
+ case 1:
+ str_interleave = "M=240";
+ break;
+ default:
+ str_interleave = "?";
+ }
+
+ /* interleave set, 0x82[1] */
+ switch ((buf[2] >> 1) & 0x01) {
+ case 0:
+ str_interleave_ = "manual";
+ break;
+ case 1:
+ str_interleave_ = "auto";
+ break;
+ default:
+ str_interleave_ = "?";
+ }
+
+ /*
+ * We can read out current detected NCO and use that value next
+ * time instead of calculating new value from targed IF.
+ * I think it will not effect receiver sensitivity but gaining lock
+ * after tune could be easier...
+ */
+ ret = hd29l2_rd_regs(priv, 0xb1, &buf[0], 3);
+ if (ret)
+ goto err;
+
+ if_ctl = (buf[0] << 16) | ((buf[1] - 7) << 8) | buf[2];
+
+ dbg("%s: %s %s %s | %s %s %s | %s %s | NCO=%06x", __func__,
+ str_constellation, str_code_rate, str_constellation_code_rate,
+ str_guard_interval, str_carrier, str_guard_interval_carrier,
+ str_interleave, str_interleave_, if_ctl);
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int hd29l2_init(struct dvb_frontend *fe)
+{
+ int ret, i;
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ u8 tmp;
+ static const struct reg_val tab[] = {
+ { 0x3a, 0x06 },
+ { 0x3b, 0x03 },
+ { 0x3c, 0x04 },
+ { 0xaf, 0x06 },
+ { 0xb0, 0x1b },
+ { 0x80, 0x64 },
+ { 0x10, 0x38 },
+ };
+
+ dbg("%s:", __func__);
+
+ /* reset demod */
+ /* it is recommended to HW reset chip using RST_N pin */
+ if (fe->callback) {
+ ret = fe->callback(fe, DVB_FRONTEND_COMPONENT_DEMOD, 0, 0);
+ if (ret)
+ goto err;
+
+ /* reprogramming needed because HW reset clears registers */
+ priv->tuner_i2c_addr_programmed = false;
+ }
+
+ /* init */
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = hd29l2_wr_reg(priv, tab[i].reg, tab[i].val);
+ if (ret)
+ goto err;
+ }
+
+ /* TS params */
+ ret = hd29l2_rd_reg(priv, 0x36, &tmp);
+ if (ret)
+ goto err;
+
+ tmp &= 0x1b;
+ tmp |= priv->cfg.ts_mode;
+ ret = hd29l2_wr_reg(priv, 0x36, tmp);
+ if (ret)
+ goto err;
+
+ ret = hd29l2_rd_reg(priv, 0x31, &tmp);
+ tmp &= 0xef;
+
+ if (!(priv->cfg.ts_mode >> 7))
+ /* set b4 for serial TS */
+ tmp |= 0x10;
+
+ ret = hd29l2_wr_reg(priv, 0x31, tmp);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static void hd29l2_release(struct dvb_frontend *fe)
+{
+ struct hd29l2_priv *priv = fe->demodulator_priv;
+ kfree(priv);
+}
+
+static struct dvb_frontend_ops hd29l2_ops;
+
+struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
+ struct i2c_adapter *i2c)
+{
+ int ret;
+ struct hd29l2_priv *priv = NULL;
+ u8 tmp;
+
+ /* allocate memory for the internal state */
+ priv = kzalloc(sizeof(struct hd29l2_priv), GFP_KERNEL);
+ if (priv == NULL)
+ goto err;
+
+ /* setup the state */
+ priv->i2c = i2c;
+ memcpy(&priv->cfg, config, sizeof(struct hd29l2_config));
+
+
+ /* check if the demod is there */
+ ret = hd29l2_rd_reg(priv, 0x00, &tmp);
+ if (ret)
+ goto err;
+
+ /* create dvb_frontend */
+ memcpy(&priv->fe.ops, &hd29l2_ops, sizeof(struct dvb_frontend_ops));
+ priv->fe.demodulator_priv = priv;
+
+ return &priv->fe;
+err:
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL(hd29l2_attach);
+
+static struct dvb_frontend_ops hd29l2_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "HDIC HD29L2 DMB-TH",
+ .frequency_min = 474000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 10000,
+ .caps = FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_BANDWIDTH_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_RECOVER
+ },
+
+ .release = hd29l2_release,
+
+ .init = hd29l2_init,
+
+ .get_frontend_algo = hd29l2_get_frontend_algo,
+ .search = hd29l2_search,
+ .get_frontend = hd29l2_get_frontend,
+
+ .read_status = hd29l2_read_status,
+ .read_snr = hd29l2_read_snr,
+ .read_signal_strength = hd29l2_read_signal_strength,
+ .read_ber = hd29l2_read_ber,
+ .read_ucblocks = hd29l2_read_ucblocks,
+
+ .i2c_gate_ctrl = hd29l2_i2c_gate_ctrl,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("HDIC HD29L2 DMB-TH demodulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/hd29l2.h b/drivers/media/dvb/frontends/hd29l2.h
new file mode 100644
index 000000000000..a7a64431364d
--- /dev/null
+++ b/drivers/media/dvb/frontends/hd29l2.h
@@ -0,0 +1,66 @@
+/*
+ * HDIC HD29L2 DMB-TH demodulator driver
+ *
+ * Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
+ *
+ * Author: Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef HD29L2_H
+#define HD29L2_H
+
+#include <linux/dvb/frontend.h>
+
+struct hd29l2_config {
+ /*
+ * demodulator I2C address
+ */
+ u8 i2c_addr;
+
+ /*
+ * tuner I2C address
+ * only needed when tuner is behind demod I2C-gate
+ */
+ u8 tuner_i2c_addr;
+
+ /*
+ * TS settings
+ */
+#define HD29L2_TS_SERIAL 0x00
+#define HD29L2_TS_PARALLEL 0x80
+#define HD29L2_TS_CLK_NORMAL 0x40
+#define HD29L2_TS_CLK_INVERTED 0x00
+#define HD29L2_TS_CLK_GATED 0x20
+#define HD29L2_TS_CLK_FREE 0x00
+ u8 ts_mode;
+};
+
+
+#if defined(CONFIG_DVB_HD29L2) || \
+ (defined(CONFIG_DVB_HD29L2_MODULE) && defined(MODULE))
+extern struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *hd29l2_attach(
+const struct hd29l2_config *config, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* HD29L2_H */
diff --git a/drivers/media/dvb/frontends/hd29l2_priv.h b/drivers/media/dvb/frontends/hd29l2_priv.h
new file mode 100644
index 000000000000..ba16dc3ec2bd
--- /dev/null
+++ b/drivers/media/dvb/frontends/hd29l2_priv.h
@@ -0,0 +1,314 @@
+/*
+ * HDIC HD29L2 DMB-TH demodulator driver
+ *
+ * Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
+ *
+ * Author: Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef HD29L2_PRIV
+#define HD29L2_PRIV
+
+#include <linux/dvb/version.h>
+#include "dvb_frontend.h"
+#include "dvb_math.h"
+#include "hd29l2.h"
+
+#define LOG_PREFIX "hd29l2"
+
+#undef dbg
+#define dbg(f, arg...) \
+ if (hd29l2_debug) \
+ printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+#define HD29L2_XTAL 30400000 /* Hz */
+
+
+#define HD29L2_QAM4NR 0x00
+#define HD29L2_QAM4 0x01
+#define HD29L2_QAM16 0x02
+#define HD29L2_QAM32 0x03
+#define HD29L2_QAM64 0x04
+
+#define HD29L2_CODE_RATE_04 0x00
+#define HD29L2_CODE_RATE_06 0x08
+#define HD29L2_CODE_RATE_08 0x10
+
+#define HD29L2_PN945 0x00
+#define HD29L2_PN595 0x01
+#define HD29L2_PN420 0x02
+
+#define HD29L2_CARRIER_SINGLE 0x00
+#define HD29L2_CARRIER_MULTI 0x01
+
+#define HD29L2_INTERLEAVER_720 0x00
+#define HD29L2_INTERLEAVER_420 0x01
+
+struct reg_val {
+ u8 reg;
+ u8 val;
+};
+
+struct reg_mod_vals {
+ u8 reg;
+ u8 val[5];
+};
+
+struct hd29l2_priv {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+ struct hd29l2_config cfg;
+ u8 tuner_i2c_addr_programmed:1;
+
+ fe_status_t fe_status;
+};
+
+static const struct reg_mod_vals reg_mod_vals_tab[] = {
+ /* REG, QAM4NR, QAM4,QAM16,QAM32,QAM64 */
+ { 0x01, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
+ { 0x02, { 0x07, 0x07, 0x07, 0x07, 0x07 } },
+ { 0x03, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
+ { 0x04, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x05, { 0x61, 0x60, 0x60, 0x61, 0x60 } },
+ { 0x06, { 0xff, 0xff, 0xff, 0xff, 0xff } },
+ { 0x07, { 0xff, 0xff, 0xff, 0xff, 0xff } },
+ { 0x08, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x09, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x0a, { 0x15, 0x15, 0x03, 0x03, 0x03 } },
+ { 0x0d, { 0x78, 0x78, 0x88, 0x78, 0x78 } },
+ { 0x0e, { 0xa0, 0x90, 0xa0, 0xa0, 0xa0 } },
+ { 0x0f, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x10, { 0xa0, 0xa0, 0x58, 0x38, 0x38 } },
+ { 0x11, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x12, { 0x5a, 0x5a, 0x5a, 0x5a, 0x5a } },
+ { 0x13, { 0xa2, 0xa2, 0xa2, 0xa2, 0xa2 } },
+ { 0x17, { 0x40, 0x40, 0x40, 0x40, 0x40 } },
+ { 0x18, { 0x21, 0x21, 0x42, 0x52, 0x42 } },
+ { 0x19, { 0x21, 0x21, 0x62, 0x72, 0x62 } },
+ { 0x1a, { 0x32, 0x43, 0xa9, 0xb9, 0xa9 } },
+ { 0x1b, { 0x32, 0x43, 0xb9, 0xd8, 0xb9 } },
+ { 0x1c, { 0x02, 0x02, 0x03, 0x02, 0x03 } },
+ { 0x1d, { 0x0c, 0x0c, 0x01, 0x02, 0x02 } },
+ { 0x1e, { 0x02, 0x02, 0x02, 0x01, 0x02 } },
+ { 0x1f, { 0x02, 0x02, 0x01, 0x02, 0x04 } },
+ { 0x20, { 0x01, 0x02, 0x01, 0x01, 0x01 } },
+ { 0x21, { 0x08, 0x08, 0x0a, 0x0a, 0x0a } },
+ { 0x22, { 0x06, 0x06, 0x04, 0x05, 0x05 } },
+ { 0x23, { 0x06, 0x06, 0x05, 0x03, 0x05 } },
+ { 0x24, { 0x08, 0x08, 0x05, 0x07, 0x07 } },
+ { 0x25, { 0x16, 0x10, 0x10, 0x0a, 0x10 } },
+ { 0x26, { 0x14, 0x14, 0x04, 0x04, 0x04 } },
+ { 0x27, { 0x58, 0x58, 0x58, 0x5c, 0x58 } },
+ { 0x28, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0x29, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0x2a, { 0x08, 0x0a, 0x08, 0x08, 0x08 } },
+ { 0x2b, { 0x08, 0x08, 0x08, 0x08, 0x08 } },
+ { 0x2c, { 0x06, 0x06, 0x06, 0x06, 0x06 } },
+ { 0x2d, { 0x05, 0x06, 0x06, 0x06, 0x06 } },
+ { 0x2e, { 0x21, 0x21, 0x21, 0x21, 0x21 } },
+ { 0x2f, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x30, { 0x14, 0x14, 0x14, 0x14, 0x14 } },
+ { 0x33, { 0xb7, 0xb7, 0xb7, 0xb7, 0xb7 } },
+ { 0x34, { 0x81, 0x81, 0x81, 0x81, 0x81 } },
+ { 0x35, { 0x80, 0x80, 0x80, 0x80, 0x80 } },
+ { 0x37, { 0x70, 0x70, 0x70, 0x70, 0x70 } },
+ { 0x38, { 0x04, 0x04, 0x02, 0x02, 0x02 } },
+ { 0x39, { 0x07, 0x07, 0x05, 0x05, 0x05 } },
+ { 0x3a, { 0x06, 0x06, 0x06, 0x06, 0x06 } },
+ { 0x3b, { 0x03, 0x03, 0x03, 0x03, 0x03 } },
+ { 0x3c, { 0x07, 0x06, 0x04, 0x04, 0x04 } },
+ { 0x3d, { 0xf0, 0xf0, 0xf0, 0xf0, 0x80 } },
+ { 0x3e, { 0x60, 0x60, 0x60, 0x60, 0xff } },
+ { 0x3f, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x40, { 0x5b, 0x5b, 0x5b, 0x57, 0x50 } },
+ { 0x41, { 0x30, 0x30, 0x30, 0x30, 0x18 } },
+ { 0x42, { 0x20, 0x20, 0x20, 0x00, 0x30 } },
+ { 0x43, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x44, { 0x3f, 0x3f, 0x3f, 0x3f, 0x3f } },
+ { 0x45, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x46, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0x47, { 0x00, 0x00, 0x95, 0x00, 0x95 } },
+ { 0x48, { 0xc0, 0xc0, 0xc0, 0xc0, 0xc0 } },
+ { 0x49, { 0xc0, 0xc0, 0xc0, 0xc0, 0xc0 } },
+ { 0x4a, { 0x40, 0x40, 0x33, 0x11, 0x11 } },
+ { 0x4b, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
+ { 0x4c, { 0x40, 0x40, 0x99, 0x11, 0x11 } },
+ { 0x4d, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
+ { 0x4e, { 0x40, 0x40, 0x66, 0x77, 0x77 } },
+ { 0x4f, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
+ { 0x50, { 0x40, 0x40, 0x88, 0x33, 0x11 } },
+ { 0x51, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
+ { 0x52, { 0x40, 0x40, 0x88, 0x02, 0x02 } },
+ { 0x53, { 0x40, 0x40, 0x00, 0x02, 0x02 } },
+ { 0x54, { 0x00, 0x00, 0x88, 0x33, 0x33 } },
+ { 0x55, { 0x40, 0x40, 0x00, 0x00, 0x00 } },
+ { 0x56, { 0x00, 0x00, 0x00, 0x0b, 0x00 } },
+ { 0x57, { 0x40, 0x40, 0x0a, 0x0b, 0x0a } },
+ { 0x58, { 0xaa, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x59, { 0x7a, 0x40, 0x02, 0x02, 0x02 } },
+ { 0x5a, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
+ { 0x5b, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
+ { 0x5c, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
+ { 0x5d, { 0x18, 0x18, 0x01, 0x01, 0x01 } },
+ { 0x5e, { 0xc0, 0xc0, 0xc0, 0xff, 0xc0 } },
+ { 0x5f, { 0xc0, 0xc0, 0xc0, 0xff, 0xc0 } },
+ { 0x60, { 0x40, 0x40, 0x00, 0x30, 0x30 } },
+ { 0x61, { 0x40, 0x40, 0x10, 0x30, 0x30 } },
+ { 0x62, { 0x40, 0x40, 0x00, 0x30, 0x30 } },
+ { 0x63, { 0x40, 0x40, 0x05, 0x30, 0x30 } },
+ { 0x64, { 0x40, 0x40, 0x06, 0x00, 0x30 } },
+ { 0x65, { 0x40, 0x40, 0x06, 0x08, 0x30 } },
+ { 0x66, { 0x40, 0x40, 0x00, 0x00, 0x20 } },
+ { 0x67, { 0x40, 0x40, 0x01, 0x04, 0x20 } },
+ { 0x68, { 0x00, 0x00, 0x30, 0x00, 0x20 } },
+ { 0x69, { 0xa0, 0xa0, 0x00, 0x08, 0x20 } },
+ { 0x6a, { 0x00, 0x00, 0x30, 0x00, 0x25 } },
+ { 0x6b, { 0xa0, 0xa0, 0x00, 0x06, 0x25 } },
+ { 0x6c, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x6d, { 0xa0, 0x60, 0x0c, 0x03, 0x0c } },
+ { 0x6e, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x6f, { 0xa0, 0x60, 0x04, 0x01, 0x04 } },
+ { 0x70, { 0x58, 0x58, 0xaa, 0xaa, 0xaa } },
+ { 0x71, { 0x58, 0x58, 0xaa, 0xaa, 0xaa } },
+ { 0x72, { 0x58, 0x58, 0xff, 0xff, 0xff } },
+ { 0x73, { 0x58, 0x58, 0xff, 0xff, 0xff } },
+ { 0x74, { 0x06, 0x06, 0x09, 0x05, 0x05 } },
+ { 0x75, { 0x06, 0x06, 0x0a, 0x10, 0x10 } },
+ { 0x76, { 0x10, 0x10, 0x06, 0x0a, 0x0a } },
+ { 0x77, { 0x12, 0x18, 0x28, 0x10, 0x28 } },
+ { 0x78, { 0xf8, 0xf8, 0xf8, 0xf8, 0xf8 } },
+ { 0x79, { 0x15, 0x15, 0x03, 0x03, 0x03 } },
+ { 0x7a, { 0x02, 0x02, 0x01, 0x04, 0x03 } },
+ { 0x7b, { 0x01, 0x02, 0x03, 0x03, 0x03 } },
+ { 0x7c, { 0x28, 0x28, 0x28, 0x28, 0x28 } },
+ { 0x7f, { 0x25, 0x92, 0x5f, 0x17, 0x2d } },
+ { 0x80, { 0x64, 0x64, 0x64, 0x74, 0x64 } },
+ { 0x83, { 0x06, 0x03, 0x04, 0x04, 0x04 } },
+ { 0x84, { 0xff, 0xff, 0xff, 0xff, 0xff } },
+ { 0x85, { 0x05, 0x05, 0x05, 0x05, 0x05 } },
+ { 0x86, { 0x00, 0x00, 0x11, 0x11, 0x11 } },
+ { 0x87, { 0x03, 0x03, 0x03, 0x03, 0x03 } },
+ { 0x88, { 0x09, 0x09, 0x09, 0x09, 0x09 } },
+ { 0x89, { 0x20, 0x20, 0x30, 0x20, 0x20 } },
+ { 0x8a, { 0x03, 0x03, 0x02, 0x03, 0x02 } },
+ { 0x8b, { 0x00, 0x07, 0x09, 0x00, 0x09 } },
+ { 0x8c, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x8d, { 0x4f, 0x4f, 0x4f, 0x3f, 0x4f } },
+ { 0x8e, { 0xf0, 0xf0, 0x60, 0xf0, 0xa0 } },
+ { 0x8f, { 0xe8, 0xe8, 0xe8, 0xe8, 0xe8 } },
+ { 0x90, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
+ { 0x91, { 0x40, 0x40, 0x70, 0x70, 0x10 } },
+ { 0x92, { 0x00, 0x00, 0x00, 0x00, 0x04 } },
+ { 0x93, { 0x60, 0x60, 0x60, 0x60, 0x60 } },
+ { 0x94, { 0x00, 0x00, 0x00, 0x00, 0x03 } },
+ { 0x95, { 0x09, 0x09, 0x47, 0x47, 0x47 } },
+ { 0x96, { 0x80, 0xa0, 0xa0, 0x40, 0xa0 } },
+ { 0x97, { 0x60, 0x60, 0x60, 0x60, 0x60 } },
+ { 0x98, { 0x50, 0x50, 0x50, 0x30, 0x50 } },
+ { 0x99, { 0x10, 0x10, 0x10, 0x10, 0x10 } },
+ { 0x9a, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0x9b, { 0x40, 0x40, 0x40, 0x30, 0x40 } },
+ { 0x9c, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa0, { 0xf0, 0xf0, 0xf0, 0xf0, 0xf0 } },
+ { 0xa1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa2, { 0x30, 0x30, 0x00, 0x30, 0x00 } },
+ { 0xa3, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa4, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa5, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa6, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa7, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xa8, { 0x77, 0x77, 0x77, 0x77, 0x77 } },
+ { 0xa9, { 0x02, 0x02, 0x02, 0x02, 0x02 } },
+ { 0xaa, { 0x40, 0x40, 0x40, 0x40, 0x40 } },
+ { 0xac, { 0x1f, 0x1f, 0x1f, 0x1f, 0x1f } },
+ { 0xad, { 0x14, 0x14, 0x14, 0x14, 0x14 } },
+ { 0xae, { 0x78, 0x78, 0x78, 0x78, 0x78 } },
+ { 0xaf, { 0x06, 0x06, 0x06, 0x06, 0x07 } },
+ { 0xb0, { 0x1b, 0x1b, 0x1b, 0x19, 0x1b } },
+ { 0xb1, { 0x18, 0x17, 0x17, 0x18, 0x17 } },
+ { 0xb2, { 0x35, 0x82, 0x82, 0x38, 0x82 } },
+ { 0xb3, { 0xb6, 0xce, 0xc7, 0x5c, 0xb0 } },
+ { 0xb4, { 0x3f, 0x3e, 0x3e, 0x3f, 0x3e } },
+ { 0xb5, { 0x70, 0x58, 0x50, 0x68, 0x50 } },
+ { 0xb6, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xb7, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xb8, { 0x03, 0x03, 0x01, 0x01, 0x01 } },
+ { 0xb9, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xba, { 0x06, 0x06, 0x0a, 0x05, 0x0a } },
+ { 0xbb, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xbc, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xbd, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xbe, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xbf, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xc0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xc1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xc2, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xc3, { 0x00, 0x00, 0x88, 0x66, 0x88 } },
+ { 0xc4, { 0x10, 0x10, 0x00, 0x00, 0x00 } },
+ { 0xc5, { 0x00, 0x00, 0x44, 0x60, 0x44 } },
+ { 0xc6, { 0x10, 0x0a, 0x00, 0x00, 0x00 } },
+ { 0xc7, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xc8, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xc9, { 0x90, 0x04, 0x00, 0x00, 0x00 } },
+ { 0xca, { 0x90, 0x08, 0x01, 0x01, 0x01 } },
+ { 0xcb, { 0xa0, 0x04, 0x00, 0x44, 0x00 } },
+ { 0xcc, { 0xa0, 0x10, 0x03, 0x00, 0x03 } },
+ { 0xcd, { 0x06, 0x06, 0x06, 0x05, 0x06 } },
+ { 0xce, { 0x05, 0x05, 0x01, 0x01, 0x01 } },
+ { 0xcf, { 0x40, 0x20, 0x18, 0x18, 0x18 } },
+ { 0xd0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xd1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xd2, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xd3, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xd4, { 0x05, 0x05, 0x05, 0x05, 0x05 } },
+ { 0xd5, { 0x05, 0x05, 0x05, 0x03, 0x05 } },
+ { 0xd6, { 0xac, 0x22, 0xca, 0x8f, 0xca } },
+ { 0xd7, { 0x20, 0x20, 0x20, 0x20, 0x20 } },
+ { 0xd8, { 0x01, 0x01, 0x01, 0x01, 0x01 } },
+ { 0xd9, { 0x00, 0x00, 0x0f, 0x00, 0x0f } },
+ { 0xda, { 0x00, 0xff, 0xff, 0x0e, 0xff } },
+ { 0xdb, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0xdc, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0xdd, { 0x05, 0x05, 0x05, 0x05, 0x05 } },
+ { 0xde, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0xdf, { 0x42, 0x42, 0x44, 0x44, 0x04 } },
+ { 0xe0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xe1, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xe2, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xe3, { 0x00, 0x00, 0x26, 0x06, 0x26 } },
+ { 0xe4, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xe5, { 0x01, 0x0a, 0x01, 0x01, 0x01 } },
+ { 0xe6, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xe7, { 0x08, 0x08, 0x08, 0x08, 0x08 } },
+ { 0xe8, { 0x63, 0x63, 0x63, 0x63, 0x63 } },
+ { 0xe9, { 0x59, 0x59, 0x59, 0x59, 0x59 } },
+ { 0xea, { 0x80, 0x80, 0x20, 0x80, 0x80 } },
+ { 0xeb, { 0x37, 0x37, 0x78, 0x37, 0x77 } },
+ { 0xec, { 0x1f, 0x1f, 0x25, 0x25, 0x25 } },
+ { 0xed, { 0x0a, 0x0a, 0x0a, 0x0a, 0x0a } },
+ { 0xee, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+ { 0xef, { 0x70, 0x70, 0x58, 0x38, 0x58 } },
+ { 0xf0, { 0x00, 0x00, 0x00, 0x00, 0x00 } },
+};
+
+#endif /* HD29L2_PRIV */
diff --git a/drivers/media/dvb/frontends/it913x-fe-priv.h b/drivers/media/dvb/frontends/it913x-fe-priv.h
index 1c6fb4b66255..93b086ea7e1c 100644
--- a/drivers/media/dvb/frontends/it913x-fe-priv.h
+++ b/drivers/media/dvb/frontends/it913x-fe-priv.h
@@ -22,126 +22,126 @@ struct adctable { u32 adcFrequency;
/* clock and coeff tables only table 3 is used with IT9137*/
/* TODO other tables relate AF9035 may be removed */
static struct adctable tab1[] = {
- { 20156250, BANDWIDTH_6_MHZ,
+ { 20156250, 6000000,
0x02b8ba6e, 0x015c5d37, 0x00ae340d, 0x00ae2e9b, 0x00ae292a,
0x015c5d37, 0x00ae2e9b, 0x0057174e, 0x02f1, 0x015c },
- { 20156250, BANDWIDTH_7_MHZ,
+ { 20156250, 7000000,
0x032cd980, 0x01966cc0, 0x00cb3cba, 0x00cb3660, 0x00cb3007,
0x01966cc0, 0x00cb3660, 0x00659b30, 0x0285, 0x0196 },
- { 20156250, BANDWIDTH_8_MHZ,
+ { 20156250, 8000000,
0x03a0f893, 0x01d07c49, 0x00e84567, 0x00e83e25, 0x00e836e3,
0x01d07c49, 0x00e83e25, 0x00741f12, 0x0234, 0x01d0 },
- { 20156250, BANDWIDTH_5_MHZ,
+ { 20156250, 5000000,
0x02449b5c, 0x01224dae, 0x00912b60, 0x009126d7, 0x0091224e,
0x01224dae, 0x009126d7, 0x0048936b, 0x0387, 0x0122 }
};
static struct adctable tab2[] = {
- { 20187500, BANDWIDTH_6_MHZ,
+ { 20187500, 6000000,
0x02b7a654, 0x015bd32a, 0x00adef04, 0x00ade995, 0x00ade426,
0x015bd32a, 0x00ade995, 0x0056f4ca, 0x02f2, 0x015c },
- { 20187500, BANDWIDTH_7_MHZ,
+ { 20187500, 7000000,
0x032b9761, 0x0195cbb1, 0x00caec30, 0x00cae5d8, 0x00cadf81,
0x0195cbb1, 0x00cae5d8, 0x006572ec, 0x0286, 0x0196 },
- { 20187500, BANDWIDTH_8_MHZ,
+ { 20187500, 8000000,
0x039f886f, 0x01cfc438, 0x00e7e95b, 0x00e7e21c, 0x00e7dadd,
0x01cfc438, 0x00e7e21c, 0x0073f10e, 0x0235, 0x01d0 },
- { 20187500, BANDWIDTH_5_MHZ,
+ { 20187500, 5000000,
0x0243b546, 0x0121daa3, 0x0090f1d9, 0x0090ed51, 0x0090e8ca,
0x0121daa3, 0x0090ed51, 0x004876a9, 0x0388, 0x0122 }
};
static struct adctable tab3[] = {
- { 20250000, BANDWIDTH_6_MHZ,
+ { 20250000, 6000000,
0x02b580ad, 0x015ac057, 0x00ad6597, 0x00ad602b, 0x00ad5ac1,
0x015ac057, 0x00ad602b, 0x0056b016, 0x02f4, 0x015b },
- { 20250000, BANDWIDTH_7_MHZ,
+ { 20250000, 7000000,
0x03291620, 0x01948b10, 0x00ca4bda, 0x00ca4588, 0x00ca3f36,
0x01948b10, 0x00ca4588, 0x006522c4, 0x0288, 0x0195 },
- { 20250000, BANDWIDTH_8_MHZ,
+ { 20250000, 8000000,
0x039cab92, 0x01ce55c9, 0x00e7321e, 0x00e72ae4, 0x00e723ab,
0x01ce55c9, 0x00e72ae4, 0x00739572, 0x0237, 0x01ce },
- { 20250000, BANDWIDTH_5_MHZ,
+ { 20250000, 5000000,
0x0241eb3b, 0x0120f59e, 0x00907f53, 0x00907acf, 0x0090764b,
0x0120f59e, 0x00907acf, 0x00483d67, 0x038b, 0x0121 }
};
static struct adctable tab4[] = {
- { 20583333, BANDWIDTH_6_MHZ,
+ { 20583333, 6000000,
0x02aa4598, 0x015522cc, 0x00aa96bb, 0x00aa9166, 0x00aa8c12,
0x015522cc, 0x00aa9166, 0x005548b3, 0x0300, 0x0155 },
- { 20583333, BANDWIDTH_7_MHZ,
+ { 20583333, 7000000,
0x031bfbdc, 0x018dfdee, 0x00c7052f, 0x00c6fef7, 0x00c6f8bf,
0x018dfdee, 0x00c6fef7, 0x00637f7b, 0x0293, 0x018e },
- { 20583333, BANDWIDTH_8_MHZ,
+ { 20583333, 8000000,
0x038db21f, 0x01c6d910, 0x00e373a3, 0x00e36c88, 0x00e3656d,
0x01c6d910, 0x00e36c88, 0x0071b644, 0x0240, 0x01c7 },
- { 20583333, BANDWIDTH_5_MHZ,
+ { 20583333, 5000000,
0x02388f54, 0x011c47aa, 0x008e2846, 0x008e23d5, 0x008e1f64,
0x011c47aa, 0x008e23d5, 0x004711ea, 0x039a, 0x011c }
};
static struct adctable tab5[] = {
- { 20416667, BANDWIDTH_6_MHZ,
+ { 20416667, 6000000,
0x02afd765, 0x0157ebb3, 0x00abfb39, 0x00abf5d9, 0x00abf07a,
0x0157ebb3, 0x00abf5d9, 0x0055faed, 0x02fa, 0x0158 },
- { 20416667, BANDWIDTH_7_MHZ,
+ { 20416667, 7000000,
0x03227b4b, 0x01913da6, 0x00c8a518, 0x00c89ed3, 0x00c8988e,
0x01913da6, 0x00c89ed3, 0x00644f69, 0x028d, 0x0191 },
- { 20416667, BANDWIDTH_8_MHZ,
+ { 20416667, 8000000,
0x03951f32, 0x01ca8f99, 0x00e54ef7, 0x00e547cc, 0x00e540a2,
0x01ca8f99, 0x00e547cc, 0x0072a3e6, 0x023c, 0x01cb },
- { 20416667, BANDWIDTH_5_MHZ,
+ { 20416667, 5000000,
0x023d337f, 0x011e99c0, 0x008f515a, 0x008f4ce0, 0x008f4865,
0x011e99c0, 0x008f4ce0, 0x0047a670, 0x0393, 0x011f }
};
static struct adctable tab6[] = {
- { 20480000, BANDWIDTH_6_MHZ,
+ { 20480000, 6000000,
0x02adb6db, 0x0156db6e, 0x00ab7312, 0x00ab6db7, 0x00ab685c,
0x0156db6e, 0x00ab6db7, 0x0055b6db, 0x02fd, 0x0157 },
- { 20480000, BANDWIDTH_7_MHZ,
+ { 20480000, 7000000,
0x03200000, 0x01900000, 0x00c80640, 0x00c80000, 0x00c7f9c0,
0x01900000, 0x00c80000, 0x00640000, 0x028f, 0x0190 },
- { 20480000, BANDWIDTH_8_MHZ,
+ { 20480000, 8000000,
0x03924925, 0x01c92492, 0x00e4996e, 0x00e49249, 0x00e48b25,
0x01c92492, 0x00e49249, 0x00724925, 0x023d, 0x01c9 },
- { 20480000, BANDWIDTH_5_MHZ,
+ { 20480000, 5000000,
0x023b6db7, 0x011db6db, 0x008edfe5, 0x008edb6e, 0x008ed6f7,
0x011db6db, 0x008edb6e, 0x00476db7, 0x0396, 0x011e }
};
static struct adctable tab7[] = {
- { 20500000, BANDWIDTH_6_MHZ,
+ { 20500000, 6000000,
0x02ad0b99, 0x015685cc, 0x00ab4840, 0x00ab42e6, 0x00ab3d8c,
0x015685cc, 0x00ab42e6, 0x0055a173, 0x02fd, 0x0157 },
- { 20500000, BANDWIDTH_7_MHZ,
+ { 20500000, 7000000,
0x031f3832, 0x018f9c19, 0x00c7d44b, 0x00c7ce0c, 0x00c7c7ce,
0x018f9c19, 0x00c7ce0c, 0x0063e706, 0x0290, 0x0190 },
- { 20500000, BANDWIDTH_8_MHZ,
+ { 20500000, 8000000,
0x039164cb, 0x01c8b266, 0x00e46056, 0x00e45933, 0x00e45210,
0x01c8b266, 0x00e45933, 0x00722c99, 0x023e, 0x01c9 },
- { 20500000, BANDWIDTH_5_MHZ,
+ { 20500000, 5000000,
0x023adeff, 0x011d6f80, 0x008ebc36, 0x008eb7c0, 0x008eb34a,
0x011d6f80, 0x008eb7c0, 0x00475be0, 0x0396, 0x011d }
};
static struct adctable tab8[] = {
- { 20625000, BANDWIDTH_6_MHZ,
+ { 20625000, 6000000,
0x02a8e4bd, 0x0154725e, 0x00aa3e81, 0x00aa392f, 0x00aa33de,
0x0154725e, 0x00aa392f, 0x00551c98, 0x0302, 0x0154 },
- { 20625000, BANDWIDTH_7_MHZ,
+ { 20625000, 7000000,
0x031a6032, 0x018d3019, 0x00c69e41, 0x00c6980c, 0x00c691d8,
0x018d3019, 0x00c6980c, 0x00634c06, 0x0294, 0x018d },
- { 20625000, BANDWIDTH_8_MHZ,
+ { 20625000, 8000000,
0x038bdba6, 0x01c5edd3, 0x00e2fe02, 0x00e2f6ea, 0x00e2efd2,
0x01c5edd3, 0x00e2f6ea, 0x00717b75, 0x0242, 0x01c6 },
- { 20625000, BANDWIDTH_5_MHZ,
+ { 20625000, 5000000,
0x02376948, 0x011bb4a4, 0x008ddec1, 0x008dda52, 0x008dd5e3,
0x011bb4a4, 0x008dda52, 0x0046ed29, 0x039c, 0x011c }
@@ -153,8 +153,7 @@ struct table {
};
static struct table fe_clockTable[] = {
- {12000000, tab3}, /* FPGA */
- {16384000, tab6}, /* 16.38MHz */
+ {12000000, tab3}, /* 12.00MHz */
{20480000, tab6}, /* 20.48MHz */
{36000000, tab3}, /* 36.00MHz */
{30000000, tab1}, /* 30.00MHz */
@@ -164,7 +163,6 @@ static struct table fe_clockTable[] = {
{34000000, tab2}, /* 34.00MHz */
{24000000, tab1}, /* 24.00MHz */
{22000000, tab8}, /* 22.00MHz */
- {12000000, tab3} /* 12.00MHz */
};
/* fe get */
@@ -205,6 +203,10 @@ fe_modulation_t fe_con[] = {
/* Standard demodulator functions */
static struct it913xset set_solo_fe[] = {
+ {PRO_LINK, GPIOH5_EN, {0x01}, 0x01},
+ {PRO_LINK, GPIOH5_ON, {0x01}, 0x01},
+ {PRO_LINK, GPIOH5_O, {0x00}, 0x01},
+ {PRO_LINK, GPIOH5_O, {0x01}, 0x01},
{PRO_LINK, DVBT_INTEN, {0x04}, 0x01},
{PRO_LINK, DVBT_ENABLE, {0x05}, 0x01},
{PRO_DMOD, MP2IF_MPEG_PAR_MODE, {0x00}, 0x01},
@@ -228,13 +230,127 @@ static struct it913xset init_1[] = {
{PRO_LINK, LOCK3_OUT, {0x01}, 0x01},
{PRO_LINK, PADMISCDRSR, {0x01}, 0x01},
{PRO_LINK, PADMISCDR2, {0x00}, 0x01},
+ {PRO_DMOD, 0xec57, {0x00, 0x00}, 0x02},
{PRO_LINK, PADMISCDR4, {0x00}, 0x01}, /* Power up */
{PRO_LINK, PADMISCDR8, {0x00}, 0x01},
{0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
};
-/* ---------IT9137 0x38 tuner init---------- */
-static struct it913xset it9137_set[] = {
+
+/* Version 1 types */
+static struct it913xset it9135_v1[] = {
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a}, 0x01},
+ {PRO_DMOD, 0x007e, {0x04}, 0x01},
+ {PRO_DMOD, 0x0081, {0x0a}, 0x01},
+ {PRO_DMOD, 0x008a, {0x01}, 0x01},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06}, 0x01},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009f, {0xe1}, 0x01},
+ {PRO_DMOD, 0x00a0, {0xcf}, 0x01},
+ {PRO_DMOD, 0x00a3, {0x01}, 0x01},
+ {PRO_DMOD, 0x00a5, {0x01}, 0x01},
+ {PRO_DMOD, 0x00a6, {0x01}, 0x01},
+ {PRO_DMOD, 0x00a9, {0x00}, 0x01},
+ {PRO_DMOD, 0x00aa, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00c2, {0x05}, 0x01},
+ {PRO_DMOD, 0x00c6, {0x19}, 0x01},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf016, {0x10}, 0x01},
+ {PRO_DMOD, 0xf017, {0x04}, 0x01},
+ {PRO_DMOD, 0xf018, {0x05}, 0x01},
+ {PRO_DMOD, 0xf019, {0x04}, 0x01},
+ {PRO_DMOD, 0xf01a, {0x05}, 0x01},
+ {PRO_DMOD, 0xf021, {0x03}, 0x01},
+ {PRO_DMOD, 0xf022, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf023, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf02b, {0x00}, 0x01},
+ {PRO_DMOD, 0xf02c, {0x01}, 0x01},
+ {PRO_DMOD, 0xf064, {0x03}, 0x01},
+ {PRO_DMOD, 0xf065, {0xf9}, 0x01},
+ {PRO_DMOD, 0xf066, {0x03}, 0x01},
+ {PRO_DMOD, 0xf067, {0x01}, 0x01},
+ {PRO_DMOD, 0xf06f, {0xe0}, 0x01},
+ {PRO_DMOD, 0xf070, {0x03}, 0x01},
+ {PRO_DMOD, 0xf072, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf073, {0x03}, 0x01},
+ {PRO_DMOD, 0xf078, {0x00}, 0x01},
+ {PRO_DMOD, 0xf087, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09b, {0x3f}, 0x01},
+ {PRO_DMOD, 0xf09c, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09d, {0x20}, 0x01},
+ {PRO_DMOD, 0xf09e, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09f, {0x0c}, 0x01},
+ {PRO_DMOD, 0xf0a0, {0x00}, 0x01},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14d, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00}, 0x01},
+ {PRO_DMOD, 0xf15b, {0x08}, 0x01},
+ {PRO_DMOD, 0xf15d, {0x03}, 0x01},
+ {PRO_DMOD, 0xf15e, {0x05}, 0x01},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01}, 0x01},
+ {PRO_DMOD, 0xf167, {0x40}, 0x01},
+ {PRO_DMOD, 0xf168, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf17a, {0x00}, 0x01},
+ {PRO_DMOD, 0xf17b, {0x00}, 0x01},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36}, 0x01},
+ {PRO_DMOD, 0xf1bd, {0x00}, 0x01},
+ {PRO_DMOD, 0xf1cb, {0xa0}, 0x01},
+ {PRO_DMOD, 0xf1cc, {0x01}, 0x01},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf40e, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf40f, {0x40}, 0x01},
+ {PRO_DMOD, 0xf410, {0x08}, 0x01},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15}, 0x01},
+ {PRO_DMOD, 0xf562, {0x20}, 0x01},
+ {PRO_DMOD, 0xf5df, {0xfb}, 0x01},
+ {PRO_DMOD, 0xf5e0, {0x00}, 0x01},
+ {PRO_DMOD, 0xf5e3, {0x09}, 0x01},
+ {PRO_DMOD, 0xf5e4, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5e5, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
+ {PRO_DMOD, 0xf600, {0x05}, 0x01},
+ {PRO_DMOD, 0xf601, {0x08}, 0x01},
+ {PRO_DMOD, 0xf602, {0x0b}, 0x01},
+ {PRO_DMOD, 0xf603, {0x0e}, 0x01},
+ {PRO_DMOD, 0xf604, {0x11}, 0x01},
+ {PRO_DMOD, 0xf605, {0x14}, 0x01},
+ {PRO_DMOD, 0xf606, {0x17}, 0x01},
+ {PRO_DMOD, 0xf607, {0x1f}, 0x01},
+ {PRO_DMOD, 0xf60e, {0x00}, 0x01},
+ {PRO_DMOD, 0xf60f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf610, {0x32}, 0x01},
+ {PRO_DMOD, 0xf611, {0x10}, 0x01},
+ {PRO_DMOD, 0xf707, {0xfc}, 0x01},
+ {PRO_DMOD, 0xf708, {0x00}, 0x01},
+ {PRO_DMOD, 0xf709, {0x37}, 0x01},
+ {PRO_DMOD, 0xf70a, {0x00}, 0x01},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40}, 0x01},
+ {PRO_DMOD, 0xf810, {0x54}, 0x01},
+ {PRO_DMOD, 0xf811, {0x5a}, 0x01},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+static struct it913xset it9135_38[] = {
{PRO_DMOD, 0x0043, {0x00}, 0x01},
{PRO_DMOD, 0x0046, {0x38}, 0x01},
{PRO_DMOD, 0x0051, {0x01}, 0x01},
@@ -244,7 +360,7 @@ static struct it913xset it9137_set[] = {
{PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xc8, 0x01}, 0x05},
{PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
{PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x02, 0x0a, 0x03, 0xc8, 0xb8,
- 0xd0, 0xc3, 0x01 }, 0x0a},
+ 0xd0, 0xc3, 0x01}, 0x0a},
{PRO_DMOD, 0x008e, {0x01}, 0x01},
{PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
{PRO_DMOD, 0x0099, {0x01}, 0x01},
@@ -262,15 +378,25 @@ static struct it913xset it9137_set[] = {
{PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
{PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
{PRO_DMOD, 0x00fc, { 0x02, 0x02, 0x02, 0x09, 0x50, 0x7b, 0x77,
- 0x00, 0x02, 0xc8, 0x05, 0x7b }, 0x0c},
+ 0x00, 0x02, 0xc8, 0x05, 0x7b}, 0x0c},
{PRO_DMOD, 0x0109, {0x02}, 0x01},
- {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
- {PRO_DMOD, 0x011a, {0xc8, 0x7b, 0xbc, 0xa0}, 0x04},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03, 0x02, 0x80}, 0x04},
+ {PRO_DMOD, 0x011a, {0xc8, 0x7b, 0x8a, 0xa0}, 0x04},
{PRO_DMOD, 0x0122, {0x02, 0x18, 0xc3}, 0x03},
{PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
{PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
{PRO_DMOD, 0x0137, {0x01, 0x00, 0x07, 0x00, 0x06}, 0x05},
- {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xc8}, 0x04},
+ {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xc8, 0x59}, 0x05},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf016, {0x10, 0x04, 0x05, 0x04, 0x05}, 0x05},
+ {PRO_DMOD, 0xf01f, {0x8c, 0x00, 0x03, 0x0a, 0x0a}, 0x05},
+ {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00, 0x01}, 0x04},
+ {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
+ {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
+ {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
+ {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
+ {PRO_DMOD, 0xf085, {0x00, 0x02, 0x00}, 0x03},
+ {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
{PRO_DMOD, 0xf130, {0x04}, 0x01},
{PRO_DMOD, 0xf132, {0x04}, 0x01},
{PRO_DMOD, 0xf144, {0x1a}, 0x01},
@@ -301,7 +427,7 @@ static struct it913xset it9137_set[] = {
{PRO_DMOD, 0xf5f8, {0x01}, 0x01},
{PRO_DMOD, 0xf5fd, {0x01}, 0x01},
{PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
- 0x1f }, 0x08},
+ 0x1f}, 0x08},
{PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
{PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
{PRO_DMOD, 0xf78b, {0x01}, 0x01},
@@ -309,21 +435,605 @@ static struct it913xset it9137_set[] = {
{PRO_DMOD, 0xf905, {0x01}, 0x01},
{PRO_DMOD, 0xfb06, {0x03}, 0x01},
{PRO_DMOD, 0xfd8b, {0x00}, 0x01},
- {PRO_LINK, GPIOH5_EN, {0x01}, 0x01},
- {PRO_LINK, GPIOH5_ON, {0x01}, 0x01},
- {PRO_LINK, GPIOH5_O, {0x00}, 0x01},
- {PRO_LINK, GPIOH5_O, {0x01}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+static struct it913xset it9135_51[] = {
+ {PRO_DMOD, 0x0043, {0x00}, 0x01},
+ {PRO_DMOD, 0x0046, {0x51}, 0x01},
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0x0068, {0x0a}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a, 0x06, 0x02}, 0x03},
+ {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xc8, 0x01}, 0x05},
+ {PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
+ {PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x02, 0x0a, 0x03, 0xc0, 0x96,
+ 0xcf, 0xc3, 0x01}, 0x0a},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
+ {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
+ {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
+ {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b3, {0x02, 0x3c}, 0x02},
+ {PRO_DMOD, 0x00b6, {0x14}, 0x01},
+ {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05}, 0x03},
+ {PRO_DMOD, 0x00c4, {0x00}, 0x01},
+ {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
+ {PRO_DMOD, 0x00cc, {0x2e, 0x51, 0x33}, 0x03},
+ {PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
+ {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
+ {PRO_DMOD, 0x00fc, { 0x03, 0x02, 0x02, 0x09, 0x50, 0x7a, 0x77,
+ 0x01, 0x02, 0xb0, 0x02, 0x7a}, 0x0c},
+ {PRO_DMOD, 0x0109, {0x02}, 0x01},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03, 0x02, 0x80}, 0x04},
+ {PRO_DMOD, 0x011a, {0xc0, 0x7a, 0xac, 0x8c}, 0x04},
+ {PRO_DMOD, 0x0122, {0x02, 0x70, 0xa4}, 0x03},
+ {PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
+ {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
+ {PRO_DMOD, 0x0137, {0x01, 0x00, 0x07, 0x00, 0x06}, 0x05},
+ {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xc0, 0x59}, 0x05},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf016, {0x10, 0x04, 0x05, 0x04, 0x05}, 0x05},
+ {PRO_DMOD, 0xf01f, {0x8c, 0x00, 0x03, 0x0a, 0x0a}, 0x05},
+ {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00, 0x01}, 0x04},
+ {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
+ {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
+ {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
+ {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
+ {PRO_DMOD, 0xf085, {0xc0, 0x01, 0x00}, 0x03},
+ {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
+ {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
+ {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
+ {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
+ {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
+ {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
+ {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
+ {PRO_DMOD, 0xf5df, {0xfb, 0x00}, 0x02},
+ {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
+ {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
+ {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
+ 0x1f}, 0x08},
+ {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
+ {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+static struct it913xset it9135_52[] = {
+ {PRO_DMOD, 0x0043, {0x00}, 0x01},
+ {PRO_DMOD, 0x0046, {0x52}, 0x01},
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0x0068, {0x10}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
+ {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0xa0, 0x01}, 0x05},
+ {PRO_DMOD, 0x007e, {0x04, 0x00}, 0x02},
+ {PRO_DMOD, 0x0081, { 0x0a, 0x12, 0x03, 0x0a, 0x03, 0xb3, 0x97,
+ 0xc0, 0x9e, 0x01}, 0x0a},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
+ {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
+ {PRO_DMOD, 0x00a3, {0x01, 0x5c, 0x01, 0x01}, 0x04},
+ {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b3, {0x02, 0x3c}, 0x02},
+ {PRO_DMOD, 0x00b6, {0x14}, 0x01},
+ {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05}, 0x03},
+ {PRO_DMOD, 0x00c4, {0x00}, 0x01},
+ {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
+ {PRO_DMOD, 0x00cc, {0x2e, 0x51, 0x33}, 0x03},
+ {PRO_DMOD, 0x00f3, {0x05, 0x91, 0x8c}, 0x03},
+ {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
+ {PRO_DMOD, 0x00fc, { 0x03, 0x02, 0x02, 0x09, 0x50, 0x74, 0x77,
+ 0x02, 0x02, 0xae, 0x02, 0x6e}, 0x0c},
+ {PRO_DMOD, 0x0109, {0x02}, 0x01},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03, 0x02, 0x80}, 0x04},
+ {PRO_DMOD, 0x011a, {0xcd, 0x62, 0xa4, 0x8c}, 0x04},
+ {PRO_DMOD, 0x0122, {0x03, 0x18, 0x9e}, 0x03},
+ {PRO_DMOD, 0x0127, {0x00, 0x07}, 0x02},
+ {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
+ {PRO_DMOD, 0x0137, {0x00, 0x00, 0x07, 0x00, 0x06}, 0x05},
+ {PRO_DMOD, 0x013d, {0x00, 0x01, 0x5b, 0xb6, 0x59}, 0x05},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf016, {0x10, 0x04, 0x05, 0x04, 0x05}, 0x05},
+ {PRO_DMOD, 0xf01f, {0x8c, 0x00, 0x03, 0x0a, 0x0a}, 0x05},
+ {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00, 0x01}, 0x04},
+ {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
+ {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
+ {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
+ {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
+ {PRO_DMOD, 0xf085, {0xc0, 0x01, 0x00}, 0x03},
+ {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
+ {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
+ {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
+ {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
+ {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
+ {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
+ {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
+ {PRO_DMOD, 0xf5df, {0xfb, 0x00}, 0x02},
+ {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
+ {PRO_DMOD, 0xf5f8, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5fd, {0x01}, 0x01},
+ {PRO_DMOD, 0xf600, {0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
+ 0x1f}, 0x08},
+ {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
+ {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
};
+/* Version 2 types */
+static struct it913xset it9135_v2[] = {
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a}, 0x01},
+ {PRO_DMOD, 0x007e, {0x04}, 0x01},
+ {PRO_DMOD, 0x0081, {0x0a}, 0x01},
+ {PRO_DMOD, 0x008a, {0x01}, 0x01},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06}, 0x01},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009f, {0xe1}, 0x01},
+ {PRO_DMOD, 0x00a0, {0xcf}, 0x01},
+ {PRO_DMOD, 0x00a3, {0x01}, 0x01},
+ {PRO_DMOD, 0x00a5, {0x01}, 0x01},
+ {PRO_DMOD, 0x00a6, {0x01}, 0x01},
+ {PRO_DMOD, 0x00a9, {0x00}, 0x01},
+ {PRO_DMOD, 0x00aa, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00c2, {0x05}, 0x01},
+ {PRO_DMOD, 0x00c6, {0x19}, 0x01},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf02b, {0x00}, 0x01},
+ {PRO_DMOD, 0xf064, {0x03}, 0x01},
+ {PRO_DMOD, 0xf065, {0xf9}, 0x01},
+ {PRO_DMOD, 0xf066, {0x03}, 0x01},
+ {PRO_DMOD, 0xf067, {0x01}, 0x01},
+ {PRO_DMOD, 0xf06f, {0xe0}, 0x01},
+ {PRO_DMOD, 0xf070, {0x03}, 0x01},
+ {PRO_DMOD, 0xf072, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf073, {0x03}, 0x01},
+ {PRO_DMOD, 0xf078, {0x00}, 0x01},
+ {PRO_DMOD, 0xf087, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09b, {0x3f}, 0x01},
+ {PRO_DMOD, 0xf09c, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09d, {0x20}, 0x01},
+ {PRO_DMOD, 0xf09e, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09f, {0x0c}, 0x01},
+ {PRO_DMOD, 0xf0a0, {0x00}, 0x01},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14d, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00}, 0x01},
+ {PRO_DMOD, 0xf15b, {0x08}, 0x01},
+ {PRO_DMOD, 0xf15d, {0x03}, 0x01},
+ {PRO_DMOD, 0xf15e, {0x05}, 0x01},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01}, 0x01},
+ {PRO_DMOD, 0xf167, {0x40}, 0x01},
+ {PRO_DMOD, 0xf168, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf17a, {0x00}, 0x01},
+ {PRO_DMOD, 0xf17b, {0x00}, 0x01},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36}, 0x01},
+ {PRO_DMOD, 0xf1bd, {0x00}, 0x01},
+ {PRO_DMOD, 0xf1cb, {0xa0}, 0x01},
+ {PRO_DMOD, 0xf1cc, {0x01}, 0x01},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf40e, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf40f, {0x40}, 0x01},
+ {PRO_DMOD, 0xf410, {0x08}, 0x01},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15}, 0x01},
+ {PRO_DMOD, 0xf562, {0x20}, 0x01},
+ {PRO_DMOD, 0xf5e3, {0x09}, 0x01},
+ {PRO_DMOD, 0xf5e4, {0x01}, 0x01},
+ {PRO_DMOD, 0xf5e5, {0x01}, 0x01},
+ {PRO_DMOD, 0xf600, {0x05}, 0x01},
+ {PRO_DMOD, 0xf601, {0x08}, 0x01},
+ {PRO_DMOD, 0xf602, {0x0b}, 0x01},
+ {PRO_DMOD, 0xf603, {0x0e}, 0x01},
+ {PRO_DMOD, 0xf604, {0x11}, 0x01},
+ {PRO_DMOD, 0xf605, {0x14}, 0x01},
+ {PRO_DMOD, 0xf606, {0x17}, 0x01},
+ {PRO_DMOD, 0xf607, {0x1f}, 0x01},
+ {PRO_DMOD, 0xf60e, {0x00}, 0x01},
+ {PRO_DMOD, 0xf60f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf610, {0x32}, 0x01},
+ {PRO_DMOD, 0xf611, {0x10}, 0x01},
+ {PRO_DMOD, 0xf707, {0xfc}, 0x01},
+ {PRO_DMOD, 0xf708, {0x00}, 0x01},
+ {PRO_DMOD, 0xf709, {0x37}, 0x01},
+ {PRO_DMOD, 0xf70a, {0x00}, 0x01},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40}, 0x01},
+ {PRO_DMOD, 0xf810, {0x54}, 0x01},
+ {PRO_DMOD, 0xf811, {0x5a}, 0x01},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+static struct it913xset it9135_60[] = {
+ {PRO_DMOD, 0x0043, {0x00}, 0x01},
+ {PRO_DMOD, 0x0046, {0x60}, 0x01},
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0x0068, {0x0a}, 0x01},
+ {PRO_DMOD, 0x006a, {0x03}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
+ {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0x8c, 0x01}, 0x05},
+ {PRO_DMOD, 0x007e, {0x04}, 0x01},
+ {PRO_DMOD, 0x0081, {0x0a, 0x12}, 0x02},
+ {PRO_DMOD, 0x0084, {0x0a, 0x33, 0xbe, 0xa0, 0xc6, 0xb6, 0x01}, 0x07},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
+ {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
+ {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
+ {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b3, {0x02, 0x3a}, 0x02},
+ {PRO_DMOD, 0x00b6, {0x14}, 0x01},
+ {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05, 0x01, 0x00}, 0x05},
+ {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
+ {PRO_DMOD, 0x00cb, {0x32, 0x2c, 0x4f, 0x30}, 0x04},
+ {PRO_DMOD, 0x00f3, {0x05, 0xa0, 0x8c}, 0x03},
+ {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
+ {PRO_DMOD, 0x00fc, { 0x03, 0x03, 0x02, 0x0a, 0x50, 0x7b, 0x8c,
+ 0x00, 0x02, 0xbe, 0x00}, 0x0b},
+ {PRO_DMOD, 0x0109, {0x02}, 0x01},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
+ {PRO_DMOD, 0x011a, {0xbe}, 0x01},
+ {PRO_DMOD, 0x0124, {0xae}, 0x01},
+ {PRO_DMOD, 0x0127, {0x00}, 0x01},
+ {PRO_DMOD, 0x012a, {0x56, 0x50, 0x47, 0x42}, 0x04},
+ {PRO_DMOD, 0x0137, {0x00}, 0x01},
+ {PRO_DMOD, 0x013b, {0x08}, 0x01},
+ {PRO_DMOD, 0x013f, {0x5b}, 0x01},
+ {PRO_DMOD, 0x0141, { 0x59, 0xf9, 0x19, 0x19, 0x8c, 0x8c, 0x8c,
+ 0x6e, 0x8c, 0x50, 0x8c, 0x8c, 0xac, 0xc6,
+ 0x33}, 0x0f},
+ {PRO_DMOD, 0x0151, {0x28}, 0x01},
+ {PRO_DMOD, 0x0153, {0xbc}, 0x01},
+ {PRO_DMOD, 0x0178, {0x09}, 0x01},
+ {PRO_DMOD, 0x0181, {0x94, 0x6e}, 0x02},
+ {PRO_DMOD, 0x0185, {0x24}, 0x01},
+ {PRO_DMOD, 0x0187, {0x00, 0x00, 0xbe, 0x02, 0x80}, 0x05},
+ {PRO_DMOD, 0xed02, {0xff}, 0x01},
+ {PRO_DMOD, 0xee42, {0xff}, 0x01},
+ {PRO_DMOD, 0xee82, {0xff}, 0x01},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf01f, {0x8c, 0x00}, 0x02},
+ {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00}, 0x03},
+ {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
+ {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
+ {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
+ {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
+ {PRO_DMOD, 0xf087, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
+ {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
+ {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
+ {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
+ {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
+ {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
+ {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
+ {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
+ {PRO_DMOD, 0xf600, {0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17
+ , 0x1f}, 0x08},
+ {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
+ {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+static struct it913xset it9135_61[] = {
+ {PRO_DMOD, 0x0043, {0x00}, 0x01},
+ {PRO_DMOD, 0x0046, {0x61}, 0x01},
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0x0068, {0x06}, 0x01},
+ {PRO_DMOD, 0x006a, {0x03}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
+ {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0x90, 0x01}, 0x05},
+ {PRO_DMOD, 0x007e, {0x04}, 0x01},
+ {PRO_DMOD, 0x0081, {0x0a, 0x12}, 0x02},
+ {PRO_DMOD, 0x0084, {0x0a, 0x33, 0xbc, 0x9c, 0xcc, 0xa8, 0x01}, 0x07},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
+ {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
+ {PRO_DMOD, 0x00a3, {0x01, 0x5c, 0x01, 0x01}, 0x04},
+ {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b3, {0x02, 0x3a}, 0x02},
+ {PRO_DMOD, 0x00b6, {0x14}, 0x01},
+ {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05, 0x01, 0x00}, 0x05},
+ {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
+ {PRO_DMOD, 0x00cb, {0x32, 0x2c, 0x4f, 0x30}, 0x04},
+ {PRO_DMOD, 0x00f3, {0x05, 0xa0, 0x8c}, 0x03},
+ {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
+ {PRO_DMOD, 0x00fc, { 0x03, 0x03, 0x02, 0x08, 0x50, 0x7b, 0x8c,
+ 0x01, 0x02, 0xc8, 0x00}, 0x0b},
+ {PRO_DMOD, 0x0109, {0x02}, 0x01},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
+ {PRO_DMOD, 0x011a, {0xc6}, 0x01},
+ {PRO_DMOD, 0x0124, {0xa8}, 0x01},
+ {PRO_DMOD, 0x0127, {0x00}, 0x01},
+ {PRO_DMOD, 0x012a, {0x59, 0x50, 0x47, 0x42}, 0x04},
+ {PRO_DMOD, 0x0137, {0x00}, 0x01},
+ {PRO_DMOD, 0x013b, {0x05}, 0x01},
+ {PRO_DMOD, 0x013f, {0x5b}, 0x01},
+ {PRO_DMOD, 0x0141, { 0x59, 0xf9, 0x59, 0x59, 0x8c, 0x8c, 0x8c,
+ 0x7b, 0x8c, 0x50, 0x8c, 0x8c, 0xa8, 0xc6,
+ 0x33}, 0x0f},
+ {PRO_DMOD, 0x0151, {0x28}, 0x01},
+ {PRO_DMOD, 0x0153, {0xcc}, 0x01},
+ {PRO_DMOD, 0x0178, {0x09}, 0x01},
+ {PRO_DMOD, 0x0181, {0x9c, 0x76}, 0x02},
+ {PRO_DMOD, 0x0185, {0x28}, 0x01},
+ {PRO_DMOD, 0x0187, {0x01, 0x00, 0xaa, 0x02, 0x80}, 0x05},
+ {PRO_DMOD, 0xed02, {0xff}, 0x01},
+ {PRO_DMOD, 0xee42, {0xff}, 0x01},
+ {PRO_DMOD, 0xee82, {0xff}, 0x01},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf01f, {0x8c, 0x00}, 0x02},
+ {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00}, 0x03},
+ {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
+ {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
+ {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
+ {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
+ {PRO_DMOD, 0xf087, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
+ {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
+ {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
+ {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
+ {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
+ {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
+ {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
+ {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
+ {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
+ 0x1f}, 0x08},
+ {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
+ {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+static struct it913xset it9135_62[] = {
+ {PRO_DMOD, 0x0043, {0x00}, 0x01},
+ {PRO_DMOD, 0x0046, {0x62}, 0x01},
+ {PRO_DMOD, 0x0051, {0x01}, 0x01},
+ {PRO_DMOD, 0x005f, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0x0068, {0x0a}, 0x01},
+ {PRO_DMOD, 0x006a, {0x03}, 0x01},
+ {PRO_DMOD, 0x0070, {0x0a, 0x05, 0x02}, 0x03},
+ {PRO_DMOD, 0x0075, {0x8c, 0x8c, 0x8c, 0x8c, 0x01}, 0x05},
+ {PRO_DMOD, 0x007e, {0x04}, 0x01},
+ {PRO_DMOD, 0x0081, {0x0a, 0x12}, 0x02},
+ {PRO_DMOD, 0x0084, { 0x0a, 0x33, 0xb8, 0x9c, 0xb2, 0xa6, 0x01},
+ 0x07},
+ {PRO_DMOD, 0x008e, {0x01}, 0x01},
+ {PRO_DMOD, 0x0092, {0x06, 0x00, 0x00, 0x00, 0x00}, 0x05},
+ {PRO_DMOD, 0x0099, {0x01}, 0x01},
+ {PRO_DMOD, 0x009b, {0x3c, 0x28}, 0x02},
+ {PRO_DMOD, 0x009f, {0xe1, 0xcf}, 0x02},
+ {PRO_DMOD, 0x00a3, {0x01, 0x5a, 0x01, 0x01}, 0x04},
+ {PRO_DMOD, 0x00a9, {0x00, 0x01}, 0x02},
+ {PRO_DMOD, 0x00b0, {0x01}, 0x01},
+ {PRO_DMOD, 0x00b3, {0x02, 0x3a}, 0x02},
+ {PRO_DMOD, 0x00b6, {0x14}, 0x01},
+ {PRO_DMOD, 0x00c0, {0x11, 0x00, 0x05, 0x01, 0x00}, 0x05},
+ {PRO_DMOD, 0x00c6, {0x19, 0x00}, 0x02},
+ {PRO_DMOD, 0x00cb, {0x32, 0x2c, 0x4f, 0x30}, 0x04},
+ {PRO_DMOD, 0x00f3, {0x05, 0x8c, 0x8c}, 0x03},
+ {PRO_DMOD, 0x00f8, {0x03, 0x06, 0x06}, 0x03},
+ {PRO_DMOD, 0x00fc, { 0x02, 0x03, 0x02, 0x09, 0x50, 0x6e, 0x8c,
+ 0x02, 0x02, 0xc2, 0x00}, 0x0b},
+ {PRO_DMOD, 0x0109, {0x02}, 0x01},
+ {PRO_DMOD, 0x0115, {0x0a, 0x03}, 0x02},
+ {PRO_DMOD, 0x011a, {0xb8}, 0x01},
+ {PRO_DMOD, 0x0124, {0xa8}, 0x01},
+ {PRO_DMOD, 0x0127, {0x00}, 0x01},
+ {PRO_DMOD, 0x012a, {0x53, 0x51, 0x4e, 0x43}, 0x04},
+ {PRO_DMOD, 0x0137, {0x00}, 0x01},
+ {PRO_DMOD, 0x013b, {0x05}, 0x01},
+ {PRO_DMOD, 0x013f, {0x5b}, 0x01},
+ {PRO_DMOD, 0x0141, { 0x59, 0xf9, 0x59, 0x19, 0x8c, 0x8c, 0x8c,
+ 0x7b, 0x8c, 0x50, 0x70, 0x8c, 0x96, 0xd0,
+ 0x33}, 0x0f},
+ {PRO_DMOD, 0x0151, {0x28}, 0x01},
+ {PRO_DMOD, 0x0153, {0xb2}, 0x01},
+ {PRO_DMOD, 0x0178, {0x09}, 0x01},
+ {PRO_DMOD, 0x0181, {0x9c, 0x6e}, 0x02},
+ {PRO_DMOD, 0x0185, {0x24}, 0x01},
+ {PRO_DMOD, 0x0187, {0x00, 0x00, 0xb8, 0x02, 0x80}, 0x05},
+ {PRO_DMOD, 0xed02, {0xff}, 0x01},
+ {PRO_DMOD, 0xee42, {0xff}, 0x01},
+ {PRO_DMOD, 0xee82, {0xff}, 0x01},
+ {PRO_DMOD, 0xf000, {0x0f}, 0x01},
+ {PRO_DMOD, 0xf01f, {0x8c, 0x00}, 0x02},
+ {PRO_DMOD, 0xf029, {0x8c, 0x00, 0x00}, 0x03},
+ {PRO_DMOD, 0xf064, {0x03, 0xf9, 0x03, 0x01}, 0x04},
+ {PRO_DMOD, 0xf06f, {0xe0, 0x03}, 0x02},
+ {PRO_DMOD, 0xf072, {0x0f, 0x03}, 0x02},
+ {PRO_DMOD, 0xf077, {0x01, 0x00}, 0x02},
+ {PRO_DMOD, 0xf087, {0x00}, 0x01},
+ {PRO_DMOD, 0xf09b, {0x3f, 0x00, 0x20, 0x00, 0x0c, 0x00}, 0x06},
+ {PRO_DMOD, 0xf130, {0x04}, 0x01},
+ {PRO_DMOD, 0xf132, {0x04}, 0x01},
+ {PRO_DMOD, 0xf144, {0x1a}, 0x01},
+ {PRO_DMOD, 0xf146, {0x00}, 0x01},
+ {PRO_DMOD, 0xf14a, {0x01}, 0x01},
+ {PRO_DMOD, 0xf14c, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf14f, {0x04}, 0x01},
+ {PRO_DMOD, 0xf158, {0x7f}, 0x01},
+ {PRO_DMOD, 0xf15a, {0x00, 0x08}, 0x02},
+ {PRO_DMOD, 0xf15d, {0x03, 0x05}, 0x02},
+ {PRO_DMOD, 0xf163, {0x05}, 0x01},
+ {PRO_DMOD, 0xf166, {0x01, 0x40, 0x0f}, 0x03},
+ {PRO_DMOD, 0xf17a, {0x00, 0x00}, 0x02},
+ {PRO_DMOD, 0xf183, {0x01}, 0x01},
+ {PRO_DMOD, 0xf19d, {0x40}, 0x01},
+ {PRO_DMOD, 0xf1bc, {0x36, 0x00}, 0x02},
+ {PRO_DMOD, 0xf1cb, {0xa0, 0x01}, 0x02},
+ {PRO_DMOD, 0xf204, {0x10}, 0x01},
+ {PRO_DMOD, 0xf214, {0x00}, 0x01},
+ {PRO_DMOD, 0xf24c, {0x88, 0x95, 0x9a, 0x90}, 0x04},
+ {PRO_DMOD, 0xf25a, {0x07, 0xe8, 0x03, 0xb0, 0x04}, 0x05},
+ {PRO_DMOD, 0xf270, {0x01, 0x02, 0x01, 0x02}, 0x04},
+ {PRO_DMOD, 0xf40e, {0x0a, 0x40, 0x08}, 0x03},
+ {PRO_DMOD, 0xf55f, {0x0a}, 0x01},
+ {PRO_DMOD, 0xf561, {0x15, 0x20}, 0x02},
+ {PRO_DMOD, 0xf5e3, {0x09, 0x01, 0x01}, 0x03},
+ {PRO_DMOD, 0xf600, { 0x05, 0x08, 0x0b, 0x0e, 0x11, 0x14, 0x17,
+ 0x1f}, 0x08},
+ {PRO_DMOD, 0xf60e, {0x00, 0x04, 0x32, 0x10}, 0x04},
+ {PRO_DMOD, 0xf707, {0xfc, 0x00, 0x37, 0x00}, 0x04},
+ {PRO_DMOD, 0xf78b, {0x01}, 0x01},
+ {PRO_DMOD, 0xf80f, {0x40, 0x54, 0x5a}, 0x03},
+ {PRO_DMOD, 0xf905, {0x01}, 0x01},
+ {PRO_DMOD, 0xfb06, {0x03}, 0x01},
+ {PRO_DMOD, 0xfd8b, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00} /* Terminating Entry */
+};
+
+/* Tuner setting scripts (still keeping it9137) */
static struct it913xset it9137_tuner_off[] = {
{PRO_DMOD, 0xfba8, {0x01}, 0x01}, /* Tuner Clock Off */
{PRO_DMOD, 0xec40, {0x00}, 0x01}, /* Power Down Tuner */
{PRO_DMOD, 0xec02, {0x3f, 0x1f, 0x3f, 0x3f}, 0x04},
+ {PRO_DMOD, 0xec06, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00}, 0x0c},
+ {PRO_DMOD, 0xec12, {0x00, 0x00, 0x00, 0x00}, 0x04},
+ {PRO_DMOD, 0xec17, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00}, 0x09},
+ {PRO_DMOD, 0xec22, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00}, 0x0a},
+ {PRO_DMOD, 0xec20, {0x00}, 0x01},
{PRO_DMOD, 0xec3f, {0x01}, 0x01},
{0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
};
+static struct it913xset set_it9135_template[] = {
+ {PRO_DMOD, 0xee06, {0x00}, 0x01},
+ {PRO_DMOD, 0xec56, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4c, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4d, {0x00}, 0x01},
+ {PRO_DMOD, 0xec4e, {0x00}, 0x01},
+ {PRO_DMOD, 0x011e, {0x00}, 0x01}, /* Older Devices */
+ {PRO_DMOD, 0x011f, {0x00}, 0x01},
+ {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
+};
+
static struct it913xset set_it9137_template[] = {
{PRO_DMOD, 0xee06, {0x00}, 0x01},
{PRO_DMOD, 0xec56, {0x00}, 0x01},
diff --git a/drivers/media/dvb/frontends/it913x-fe.c b/drivers/media/dvb/frontends/it913x-fe.c
index d4bd24eb4700..ccc36bf2deb4 100644
--- a/drivers/media/dvb/frontends/it913x-fe.c
+++ b/drivers/media/dvb/frontends/it913x-fe.c
@@ -46,13 +46,17 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
dprintk(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \
*p, *(p+1), *(p+2), *(p+3), *(p+4), \
*(p+5), *(p+6), *(p+7));
+#define info(format, arg...) \
+ printk(KERN_INFO "it913x-fe: " format "\n" , ## arg)
struct it913x_fe_state {
struct dvb_frontend frontend;
struct i2c_adapter *i2c_adap;
+ struct ite_config *config;
u8 i2c_addr;
u32 frequency;
- u8 adf;
+ fe_modulation_t constellation;
+ fe_transmit_mode_t transmission_mode;
u32 crystalFrequency;
u32 adcFrequency;
u8 tuner_type;
@@ -62,6 +66,7 @@ struct it913x_fe_state {
u8 tun_fdiv;
u8 tun_clk_mode;
u32 tun_fn_min;
+ u32 ucblocks;
};
static int it913x_read_reg(struct it913x_fe_state *state,
@@ -211,20 +216,24 @@ static int it913x_init_tuner(struct it913x_fe_state *state)
state->tun_fn_min /= (state->tun_fdiv * nv_val);
deb_info("Tuner fn_min %d", state->tun_fn_min);
- for (i = 0; i < 50; i++) {
- reg = it913x_read_reg_u8(state, 0xec82);
- if (reg > 0)
- break;
- if (reg < 0)
- return -ENODEV;
- udelay(2000);
+ if (state->config->chip_ver > 1)
+ msleep(50);
+ else {
+ for (i = 0; i < 50; i++) {
+ reg = it913x_read_reg_u8(state, 0xec82);
+ if (reg > 0)
+ break;
+ if (reg < 0)
+ return -ENODEV;
+ udelay(2000);
+ }
}
return it913x_write_reg(state, PRO_DMOD, 0xed81, val);
}
static int it9137_set_tuner(struct it913x_fe_state *state,
- enum fe_bandwidth bandwidth, u32 frequency_m)
+ u32 bandwidth, u32 frequency_m)
{
struct it913xset *set_tuner = set_it9137_template;
int ret, reg;
@@ -237,6 +246,11 @@ static int it9137_set_tuner(struct it913x_fe_state *state,
u8 lna_band;
u8 bw;
+ if (state->config->firmware_ver == 1)
+ set_tuner = set_it9135_template;
+ else
+ set_tuner = set_it9137_template;
+
deb_info("Tuner Frequency %d Bandwidth %d", frequency, bandwidth);
if (frequency >= 51000 && frequency <= 440000) {
@@ -273,16 +287,21 @@ static int it9137_set_tuner(struct it913x_fe_state *state,
return -EINVAL;
set_tuner[0].reg[0] = lna_band;
- if (bandwidth == BANDWIDTH_5_MHZ)
+ switch (bandwidth) {
+ case 5000000:
bw = 0;
- else if (bandwidth == BANDWIDTH_6_MHZ)
+ break;
+ case 6000000:
bw = 2;
- else if (bandwidth == BANDWIDTH_7_MHZ)
+ break;
+ case 7000000:
bw = 4;
- else if (bandwidth == BANDWIDTH_8_MHZ)
- bw = 6;
- else
+ break;
+ default:
+ case 8000000:
bw = 6;
+ break;
+ }
set_tuner[1].reg[0] = bw;
set_tuner[2].reg[0] = 0xa0 | (l_band << 3);
@@ -361,7 +380,7 @@ static int it9137_set_tuner(struct it913x_fe_state *state,
}
static int it913x_fe_select_bw(struct it913x_fe_state *state,
- enum fe_bandwidth bandwidth, u32 adcFrequency)
+ u32 bandwidth, u32 adcFrequency)
{
int ret, i;
u8 buffer[256];
@@ -374,17 +393,21 @@ static int it913x_fe_select_bw(struct it913x_fe_state *state,
deb_info("Bandwidth %d Adc %d", bandwidth, adcFrequency);
- if (bandwidth == BANDWIDTH_5_MHZ)
+ switch (bandwidth) {
+ case 5000000:
bw = 3;
- else if (bandwidth == BANDWIDTH_6_MHZ)
+ break;
+ case 6000000:
bw = 0;
- else if (bandwidth == BANDWIDTH_7_MHZ)
+ break;
+ case 7000000:
bw = 1;
- else if (bandwidth == BANDWIDTH_8_MHZ)
- bw = 2;
- else
+ break;
+ default:
+ case 8000000:
bw = 2;
-
+ break;
+ }
ret = it913x_write_reg(state, PRO_DMOD, REG_BW, bw);
if (state->table == NULL)
@@ -492,31 +515,79 @@ static int it913x_fe_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-static int it913x_fe_read_snr(struct dvb_frontend *fe, u16* snr)
+static int it913x_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret = it913x_read_reg_u8(state, SIGNAL_QUALITY);
- ret = (ret * 0xff) / 0x64;
- ret |= (ret << 0x8);
- *snr = ~ret;
- return 0;
+ int ret;
+ u8 reg[3];
+ u32 snr_val, snr_min, snr_max;
+ u32 temp;
+
+ ret = it913x_read_reg(state, 0x2c, reg, sizeof(reg));
+
+ snr_val = (u32)(reg[2] << 16) | (reg[1] << 8) | reg[0];
+
+ ret |= it913x_read_reg(state, 0xf78b, reg, 1);
+ if (reg[0])
+ snr_val /= reg[0];
+
+ if (state->transmission_mode == TRANSMISSION_MODE_2K)
+ snr_val *= 4;
+ else if (state->transmission_mode == TRANSMISSION_MODE_4K)
+ snr_val *= 2;
+
+ if (state->constellation == QPSK) {
+ snr_min = 0xb4711;
+ snr_max = 0x191451;
+ } else if (state->constellation == QAM_16) {
+ snr_min = 0x4f0d5;
+ snr_max = 0xc7925;
+ } else if (state->constellation == QAM_64) {
+ snr_min = 0x256d0;
+ snr_max = 0x626be;
+ } else
+ return -EINVAL;
+
+ if (snr_val < snr_min)
+ *snr = 0;
+ else if (snr_val < snr_max) {
+ temp = (snr_val - snr_min) >> 5;
+ temp *= 0xffff;
+ temp /= (snr_max - snr_min) >> 5;
+ *snr = (u16)temp;
+ } else
+ *snr = 0xffff;
+
+ return (ret < 0) ? -ENODEV : 0;
}
static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- *ber = 0;
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 reg[5];
+ /* Read Aborted Packets and Pre-Viterbi error rate 5 bytes */
+ ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
+ state->ucblocks += (u32)(reg[1] << 8) | reg[0];
+ *ber = (u32)(reg[4] << 16) | (reg[3] << 8) | reg[2];
return 0;
}
static int it913x_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- *ucblocks = 0;
- return 0;
+ struct it913x_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 reg[2];
+ /* Aborted Packets */
+ ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
+ state->ucblocks += (u32)(reg[1] << 8) | reg[0];
+ *ucblocks = state->ucblocks;
+ return ret;
}
-static int it913x_fe_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int it913x_fe_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct it913x_fe_state *state = fe->demodulator_priv;
int ret;
u8 reg[8];
@@ -524,26 +595,30 @@ static int it913x_fe_get_frontend(struct dvb_frontend *fe,
ret = it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
if (reg[3] < 3)
- p->u.ofdm.constellation = fe_con[reg[3]];
+ p->modulation = fe_con[reg[3]];
if (reg[0] < 3)
- p->u.ofdm.transmission_mode = fe_mode[reg[0]];
+ p->transmission_mode = fe_mode[reg[0]];
if (reg[1] < 4)
- p->u.ofdm.guard_interval = fe_gi[reg[1]];
+ p->guard_interval = fe_gi[reg[1]];
if (reg[2] < 4)
- p->u.ofdm.hierarchy_information = fe_hi[reg[2]];
+ p->hierarchy = fe_hi[reg[2]];
+
+ p->code_rate_HP = (reg[6] < 6) ? fe_code[reg[6]] : FEC_NONE;
+ p->code_rate_LP = (reg[7] < 6) ? fe_code[reg[7]] : FEC_NONE;
- p->u.ofdm.code_rate_HP = (reg[6] < 6) ? fe_code[reg[6]] : FEC_NONE;
- p->u.ofdm.code_rate_LP = (reg[7] < 6) ? fe_code[reg[7]] : FEC_NONE;
+ /* Update internal state to reflect the autodetected props */
+ state->constellation = p->modulation;
+ state->transmission_mode = p->transmission_mode;
return 0;
}
-static int it913x_fe_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int it913x_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct it913x_fe_state *state = fe->demodulator_priv;
int ret, i;
u8 empty_ch, last_ch;
@@ -551,7 +626,7 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe,
state->it913x_status = 0;
/* Set bw*/
- ret = it913x_fe_select_bw(state, p->u.ofdm.bandwidth,
+ ret = it913x_fe_select_bw(state, p->bandwidth_hz,
state->adcFrequency);
/* Training Mode Off */
@@ -571,20 +646,25 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe,
i = 1;
else if ((p->frequency >= 1450000000) && (p->frequency <= 1680000000))
i = 2;
- else
- return -EOPNOTSUPP;
+ else
+ return -EOPNOTSUPP;
ret = it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
deb_info("Frontend Set Tuner Type %02x", state->tuner_type);
switch (state->tuner_type) {
- case IT9137: /* Tuner type 0x38 */
+ case IT9135_38:
+ case IT9135_51:
+ case IT9135_52:
+ case IT9135_60:
+ case IT9135_61:
+ case IT9135_62:
ret = it9137_set_tuner(state,
- p->u.ofdm.bandwidth, p->frequency);
+ p->bandwidth_hz, p->frequency);
break;
default:
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -678,16 +758,19 @@ static u32 compute_div(u32 a, u32 b, u32 x)
static int it913x_fe_start(struct it913x_fe_state *state)
{
- struct it913xset *set_fe;
+ struct it913xset *set_lna;
struct it913xset *set_mode;
int ret;
- u8 adf = (state->adf & 0xf);
+ u8 adf = (state->config->adf & 0xf);
u32 adc, xtal;
u8 b[4];
- ret = it913x_init_tuner(state);
+ if (state->config->chip_ver == 1)
+ ret = it913x_init_tuner(state);
+
+ info("ADF table value :%02x", adf);
- if (adf < 12) {
+ if (adf < 10) {
state->crystalFrequency = fe_clockTable[adf].xtal ;
state->table = fe_clockTable[adf].table;
state->adcFrequency = state->table->adcFrequency;
@@ -698,9 +781,6 @@ static int it913x_fe_start(struct it913x_fe_state *state)
} else
return -EINVAL;
- deb_info("Xtal Freq :%d Adc Freq :%d Adc %08x Xtal %08x",
- state->crystalFrequency, state->adcFrequency, adc, xtal);
-
/* Set LED indicator on GPIOH3 */
ret = it913x_write_reg(state, PRO_LINK, GPIOH3_EN, 0x1);
ret |= it913x_write_reg(state, PRO_LINK, GPIOH3_ON, 0x1);
@@ -721,22 +801,71 @@ static int it913x_fe_start(struct it913x_fe_state *state)
b[2] = (adc >> 16) & 0xff;
ret |= it913x_write(state, PRO_DMOD, ADC_FREQ, b, 3);
+ if (state->config->adc_x2)
+ ret |= it913x_write_reg(state, PRO_DMOD, ADC_X_2, 0x01);
+ b[0] = 0;
+ b[1] = 0;
+ b[2] = 0;
+ ret |= it913x_write(state, PRO_DMOD, 0x0029, b, 3);
+
+ info("Crystal Frequency :%d Adc Frequency :%d ADC X2: %02x",
+ state->crystalFrequency, state->adcFrequency,
+ state->config->adc_x2);
+ deb_info("Xtal value :%04x Adc value :%04x", xtal, adc);
+
+ if (ret < 0)
+ return -ENODEV;
+
+ /* v1 or v2 tuner script */
+ if (state->config->chip_ver > 1)
+ ret = it913x_fe_script_loader(state, it9135_v2);
+ else
+ ret = it913x_fe_script_loader(state, it9135_v1);
+ if (ret < 0)
+ return ret;
+
+ /* LNA Scripts */
switch (state->tuner_type) {
- case IT9137: /* Tuner type 0x38 */
- set_fe = it9137_set;
+ case IT9135_51:
+ set_lna = it9135_51;
+ break;
+ case IT9135_52:
+ set_lna = it9135_52;
+ break;
+ case IT9135_60:
+ set_lna = it9135_60;
+ break;
+ case IT9135_61:
+ set_lna = it9135_61;
break;
+ case IT9135_62:
+ set_lna = it9135_62;
+ break;
+ case IT9135_38:
default:
- return -EINVAL;
+ set_lna = it9135_38;
}
+ info("Tuner LNA type :%02x", state->tuner_type);
+
+ ret = it913x_fe_script_loader(state, set_lna);
+ if (ret < 0)
+ return ret;
+
+ if (state->config->chip_ver == 2) {
+ ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x1);
+ ret |= it913x_write_reg(state, PRO_LINK, PADODPU, 0x0);
+ ret |= it913x_write_reg(state, PRO_LINK, AGC_O_D, 0x0);
+ ret |= it913x_init_tuner(state);
+ }
+ if (ret < 0)
+ return -ENODEV;
- /* set the demod */
- ret = it913x_fe_script_loader(state, set_fe);
/* Always solo frontend */
set_mode = set_solo_fe;
ret |= it913x_fe_script_loader(state, set_mode);
ret |= it913x_fe_suspend(state);
- return 0;
+ return (ret < 0) ? -ENODEV : 0;
}
static int it913x_fe_init(struct dvb_frontend *fe)
@@ -746,17 +875,11 @@ static int it913x_fe_init(struct dvb_frontend *fe)
/* Power Up Tuner - common all versions */
ret = it913x_write_reg(state, PRO_DMOD, 0xec40, 0x1);
- ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x0);
-
ret |= it913x_fe_script_loader(state, init_1);
- switch (state->tuner_type) {
- case IT9137:
- ret |= it913x_write_reg(state, PRO_DMOD, 0xfba8, 0x0);
- break;
- default:
- return -EINVAL;
- }
+ ret |= it913x_write_reg(state, PRO_DMOD, AFE_MEM0, 0x0);
+
+ ret |= it913x_write_reg(state, PRO_DMOD, 0xfba8, 0x0);
return (ret < 0) ? -ENODEV : 0;
}
@@ -770,19 +893,34 @@ static void it913x_fe_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops it913x_fe_ofdm_ops;
struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap,
- u8 i2c_addr, u8 adf, u8 type)
+ u8 i2c_addr, struct ite_config *config)
{
struct it913x_fe_state *state = NULL;
int ret;
+
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct it913x_fe_state), GFP_KERNEL);
if (state == NULL)
+ return NULL;
+ if (config == NULL)
goto error;
state->i2c_adap = i2c_adap;
state->i2c_addr = i2c_addr;
- state->adf = adf;
- state->tuner_type = type;
+ state->config = config;
+
+ switch (state->config->tuner_id_0) {
+ case IT9135_51:
+ case IT9135_52:
+ case IT9135_60:
+ case IT9135_61:
+ case IT9135_62:
+ state->tuner_type = state->config->tuner_id_0;
+ break;
+ default:
+ case IT9135_38:
+ state->tuner_type = IT9135_38;
+ }
ret = it913x_fe_start(state);
if (ret < 0)
@@ -802,10 +940,9 @@ error:
EXPORT_SYMBOL(it913x_fe_attach);
static struct dvb_frontend_ops it913x_fe_ofdm_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "it913x-fe DVB-T",
- .type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 1680000000,
.frequency_stepsize = 62500,
@@ -835,5 +972,5 @@ static struct dvb_frontend_ops it913x_fe_ofdm_ops = {
MODULE_DESCRIPTION("it913x Frontend and it9137 tuner");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
-MODULE_VERSION("1.07");
+MODULE_VERSION("1.13");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/it913x-fe.h b/drivers/media/dvb/frontends/it913x-fe.h
index 9d97f32e690b..c4a908e354e0 100644
--- a/drivers/media/dvb/frontends/it913x-fe.h
+++ b/drivers/media/dvb/frontends/it913x-fe.h
@@ -23,13 +23,27 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+
+struct ite_config {
+ u8 chip_ver;
+ u16 chip_type;
+ u32 firmware;
+ u8 firmware_ver;
+ u8 adc_x2;
+ u8 tuner_id_0;
+ u8 tuner_id_1;
+ u8 dual_mode;
+ u8 adf;
+};
+
#if defined(CONFIG_DVB_IT913X_FE) || (defined(CONFIG_DVB_IT913X_FE_MODULE) && \
defined(MODULE))
extern struct dvb_frontend *it913x_fe_attach(struct i2c_adapter *i2c_adap,
- u8 i2c_addr, u8 adf, u8 type);
+ u8 i2c_addr, struct ite_config *config);
#else
static inline struct dvb_frontend *it913x_fe_attach(
- struct i2c_adapter *i2c_adap, u8 i2c_addr, u8 adf, u8 type)
+ struct i2c_adapter *i2c_adap,
+ u8 i2c_addr, struct ite_config *config)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
@@ -134,6 +148,16 @@ static inline struct dvb_frontend *it913x_fe_attach(
#define COEFF_1_2048 0x0001
#define XTAL_CLK 0x0025
#define BFS_FCW 0x0029
+
+/* Error Regs */
+#define RSD_ABORT_PKT_LSB 0x0032
+#define RSD_ABORT_PKT_MSB 0x0033
+#define RSD_BIT_ERR_0_7 0x0034
+#define RSD_BIT_ERR_8_15 0x0035
+#define RSD_BIT_ERR_23_16 0x0036
+#define RSD_BIT_COUNT_LSB 0x0037
+#define RSD_BIT_COUNT_MSB 0x0038
+
#define TPSD_LOCK 0x003c
#define TRAINING_MODE 0x0040
#define ADC_X_2 0x0045
@@ -144,8 +168,14 @@ static inline struct dvb_frontend *it913x_fe_attach(
#define EST_SIGNAL_LEVEL 0x004a
#define FREE_BAND 0x004b
#define SUSPEND_FLAG 0x004c
-/* Build in tuners */
+/* Build in tuner types */
#define IT9137 0x38
+#define IT9135_38 0x38
+#define IT9135_51 0x51
+#define IT9135_52 0x52
+#define IT9135_60 0x60
+#define IT9135_61 0x61
+#define IT9135_62 0x62
enum {
CMD_DEMOD_READ = 0,
@@ -193,4 +223,11 @@ enum {
WRITE_CMD,
};
+enum {
+ IT9135_AUTO = 0,
+ IT9137_FW,
+ IT9135_V1_FW,
+ IT9135_V2_FW,
+};
+
#endif /* IT913X_FE_H */
diff --git a/drivers/media/dvb/frontends/itd1000.c b/drivers/media/dvb/frontends/itd1000.c
index aa9ccb821fa5..316457584fe7 100644
--- a/drivers/media/dvb/frontends/itd1000.c
+++ b/drivers/media/dvb/frontends/itd1000.c
@@ -250,13 +250,14 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
itd1000_set_vco(state, freq_khz);
}
-static int itd1000_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int itd1000_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct itd1000_state *state = fe->tuner_priv;
u8 pllcon1;
- itd1000_set_lo(state, p->frequency);
- itd1000_set_lpf_bw(state, p->u.qpsk.symbol_rate);
+ itd1000_set_lo(state, c->frequency);
+ itd1000_set_lpf_bw(state, c->symbol_rate);
pllcon1 = itd1000_read_reg(state, PLLCON1) & 0x7f;
itd1000_write_reg(state, PLLCON1, pllcon1 | (1 << 7));
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
index 9a517a4bf96d..bc5a82082aaa 100644
--- a/drivers/media/dvb/frontends/ix2505v.c
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -129,12 +129,12 @@ static int ix2505v_release(struct dvb_frontend *fe)
* 1 -> 8 -> 6
*/
-static int ix2505v_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int ix2505v_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct ix2505v_state *state = fe->tuner_priv;
- u32 frequency = params->frequency;
- u32 b_w = (params->u.qpsk.symbol_rate * 27) / 32000;
+ u32 frequency = c->frequency;
+ u32 b_w = (c->symbol_rate * 27) / 32000;
u32 div_factor, N , A, x;
int ret = 0, len;
u8 gain, cc, ref, psc, local_osc, lpf;
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index 445fa1068064..36fcf559e361 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -117,18 +117,17 @@ static int reset_and_configure (struct l64781_state* state)
return (i2c_transfer(state->i2c, &msg, 1) == 1) ? 0 : -ENODEV;
}
-static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_parameters *param)
+static int apply_frontend_param(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct l64781_state* state = fe->demodulator_priv;
/* The coderates for FEC_NONE, FEC_4_5 and FEC_FEC_6_7 are arbitrary */
static const u8 fec_tab[] = { 7, 0, 1, 2, 9, 3, 10, 4 };
/* QPSK, QAM_16, QAM_64 */
static const u8 qam_tab [] = { 2, 4, 0, 6 };
- static const u8 bw_tab [] = { 8, 7, 6 }; /* 8Mhz, 7MHz, 6MHz */
static const u8 guard_tab [] = { 1, 2, 4, 8 };
/* The Grundig 29504-401.04 Tuner comes with 18.432MHz crystal. */
static const u32 ppm = 8000;
- struct dvb_ofdm_parameters *p = &param->u.ofdm;
u32 ddfs_offset_fixed;
/* u32 ddfs_offset_variable = 0x6000-((1000000UL+ppm)/ */
/* bw_tab[p->bandWidth]<<10)/15625; */
@@ -137,18 +136,29 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
u8 val0x04;
u8 val0x05;
u8 val0x06;
- int bw = p->bandwidth - BANDWIDTH_8_MHZ;
+ int bw;
+
+ switch (p->bandwidth_hz) {
+ case 8000000:
+ bw = 8;
+ break;
+ case 7000000:
+ bw = 7;
+ break;
+ case 6000000:
+ bw = 6;
+ break;
+ default:
+ return -EINVAL;
+ }
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- if (param->inversion != INVERSION_ON &&
- param->inversion != INVERSION_OFF)
- return -EINVAL;
-
- if (bw < 0 || bw > 2)
+ if (p->inversion != INVERSION_ON &&
+ p->inversion != INVERSION_OFF)
return -EINVAL;
if (p->code_rate_HP != FEC_1_2 && p->code_rate_HP != FEC_2_3 &&
@@ -156,14 +166,14 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
p->code_rate_HP != FEC_7_8)
return -EINVAL;
- if (p->hierarchy_information != HIERARCHY_NONE &&
+ if (p->hierarchy != HIERARCHY_NONE &&
(p->code_rate_LP != FEC_1_2 && p->code_rate_LP != FEC_2_3 &&
p->code_rate_LP != FEC_3_4 && p->code_rate_LP != FEC_5_6 &&
p->code_rate_LP != FEC_7_8))
return -EINVAL;
- if (p->constellation != QPSK && p->constellation != QAM_16 &&
- p->constellation != QAM_64)
+ if (p->modulation != QPSK && p->modulation != QAM_16 &&
+ p->modulation != QAM_64)
return -EINVAL;
if (p->transmission_mode != TRANSMISSION_MODE_2K &&
@@ -174,22 +184,22 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
p->guard_interval > GUARD_INTERVAL_1_4)
return -EINVAL;
- if (p->hierarchy_information < HIERARCHY_NONE ||
- p->hierarchy_information > HIERARCHY_4)
+ if (p->hierarchy < HIERARCHY_NONE ||
+ p->hierarchy > HIERARCHY_4)
return -EINVAL;
- ddfs_offset_fixed = 0x4000-(ppm<<16)/bw_tab[p->bandwidth]/1000000;
+ ddfs_offset_fixed = 0x4000-(ppm<<16)/bw/1000000;
/* This works up to 20000 ppm, it overflows if too large ppm! */
init_freq = (((8UL<<25) + (8UL<<19) / 25*ppm / (15625/25)) /
- bw_tab[p->bandwidth] & 0xFFFFFF);
+ bw & 0xFFFFFF);
/* SPI bias calculation is slightly modified to fit in 32bit */
/* will work for high ppm only... */
spi_bias = 378 * (1 << 10);
spi_bias *= 16;
- spi_bias *= bw_tab[p->bandwidth];
- spi_bias *= qam_tab[p->constellation];
+ spi_bias *= bw;
+ spi_bias *= qam_tab[p->modulation];
spi_bias /= p->code_rate_HP + 1;
spi_bias /= (guard_tab[p->guard_interval] + 32);
spi_bias *= 1000;
@@ -199,10 +209,10 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
val0x04 = (p->transmission_mode << 2) | p->guard_interval;
val0x05 = fec_tab[p->code_rate_HP];
- if (p->hierarchy_information != HIERARCHY_NONE)
+ if (p->hierarchy != HIERARCHY_NONE)
val0x05 |= (p->code_rate_LP - FEC_1_2) << 3;
- val0x06 = (p->hierarchy_information << 2) | p->constellation;
+ val0x06 = (p->hierarchy << 2) | p->modulation;
l64781_writereg (state, 0x04, val0x04);
l64781_writereg (state, 0x05, val0x05);
@@ -220,7 +230,7 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
l64781_writereg (state, 0x1b, spi_bias & 0xff);
l64781_writereg (state, 0x1c, (spi_bias >> 8) & 0xff);
l64781_writereg (state, 0x1d, ((spi_bias >> 16) & 0x7f) |
- (param->inversion == INVERSION_ON ? 0x80 : 0x00));
+ (p->inversion == INVERSION_ON ? 0x80 : 0x00));
l64781_writereg (state, 0x22, ddfs_offset_fixed & 0xff);
l64781_writereg (state, 0x23, (ddfs_offset_fixed >> 8) & 0x3f);
@@ -233,8 +243,9 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
return 0;
}
-static int get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters* param)
+static int get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct l64781_state* state = fe->demodulator_priv;
int tmp;
@@ -242,98 +253,95 @@ static int get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters*
tmp = l64781_readreg(state, 0x04);
switch(tmp & 3) {
case 0:
- param->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ p->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
- param->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ p->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
- param->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ p->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- param->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ p->guard_interval = GUARD_INTERVAL_1_4;
break;
}
switch((tmp >> 2) & 3) {
case 0:
- param->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ p->transmission_mode = TRANSMISSION_MODE_2K;
break;
case 1:
- param->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ p->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
- printk("Unexpected value for transmission_mode\n");
+ printk(KERN_WARNING "Unexpected value for transmission_mode\n");
}
-
-
tmp = l64781_readreg(state, 0x05);
switch(tmp & 7) {
case 0:
- param->u.ofdm.code_rate_HP = FEC_1_2;
+ p->code_rate_HP = FEC_1_2;
break;
case 1:
- param->u.ofdm.code_rate_HP = FEC_2_3;
+ p->code_rate_HP = FEC_2_3;
break;
case 2:
- param->u.ofdm.code_rate_HP = FEC_3_4;
+ p->code_rate_HP = FEC_3_4;
break;
case 3:
- param->u.ofdm.code_rate_HP = FEC_5_6;
+ p->code_rate_HP = FEC_5_6;
break;
case 4:
- param->u.ofdm.code_rate_HP = FEC_7_8;
+ p->code_rate_HP = FEC_7_8;
break;
default:
printk("Unexpected value for code_rate_HP\n");
}
switch((tmp >> 3) & 7) {
case 0:
- param->u.ofdm.code_rate_LP = FEC_1_2;
+ p->code_rate_LP = FEC_1_2;
break;
case 1:
- param->u.ofdm.code_rate_LP = FEC_2_3;
+ p->code_rate_LP = FEC_2_3;
break;
case 2:
- param->u.ofdm.code_rate_LP = FEC_3_4;
+ p->code_rate_LP = FEC_3_4;
break;
case 3:
- param->u.ofdm.code_rate_LP = FEC_5_6;
+ p->code_rate_LP = FEC_5_6;
break;
case 4:
- param->u.ofdm.code_rate_LP = FEC_7_8;
+ p->code_rate_LP = FEC_7_8;
break;
default:
printk("Unexpected value for code_rate_LP\n");
}
-
tmp = l64781_readreg(state, 0x06);
switch(tmp & 3) {
case 0:
- param->u.ofdm.constellation = QPSK;
+ p->modulation = QPSK;
break;
case 1:
- param->u.ofdm.constellation = QAM_16;
+ p->modulation = QAM_16;
break;
case 2:
- param->u.ofdm.constellation = QAM_64;
+ p->modulation = QAM_64;
break;
default:
- printk("Unexpected value for constellation\n");
+ printk(KERN_WARNING "Unexpected value for modulation\n");
}
switch((tmp >> 2) & 7) {
case 0:
- param->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ p->hierarchy = HIERARCHY_NONE;
break;
case 1:
- param->u.ofdm.hierarchy_information = HIERARCHY_1;
+ p->hierarchy = HIERARCHY_1;
break;
case 2:
- param->u.ofdm.hierarchy_information = HIERARCHY_2;
+ p->hierarchy = HIERARCHY_2;
break;
case 3:
- param->u.ofdm.hierarchy_information = HIERARCHY_4;
+ p->hierarchy = HIERARCHY_4;
break;
default:
printk("Unexpected value for hierarchy\n");
@@ -341,12 +349,12 @@ static int get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters*
tmp = l64781_readreg (state, 0x1d);
- param->inversion = (tmp & 0x80) ? INVERSION_ON : INVERSION_OFF;
+ p->inversion = (tmp & 0x80) ? INVERSION_ON : INVERSION_OFF;
tmp = (int) (l64781_readreg (state, 0x08) |
(l64781_readreg (state, 0x09) << 8) |
(l64781_readreg (state, 0x0a) << 16));
- param->frequency += tmp;
+ p->frequency += tmp;
return 0;
}
@@ -564,10 +572,9 @@ error:
}
static struct dvb_frontend_ops l64781_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "LSI L64781 DVB-T",
- .type = FE_OFDM,
/* .frequency_min = ???,*/
/* .frequency_max = ???,*/
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/lgdt3305.c b/drivers/media/dvb/frontends/lgdt3305.c
index 3272881cb112..1d2c47378cf8 100644
--- a/drivers/media/dvb/frontends/lgdt3305.c
+++ b/drivers/media/dvb/frontends/lgdt3305.c
@@ -266,7 +266,7 @@ fail:
}
static int lgdt3305_set_modulation(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
u8 opermode;
int ret;
@@ -279,7 +279,7 @@ static int lgdt3305_set_modulation(struct lgdt3305_state *state,
opermode &= ~0x03;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
opermode |= 0x03;
break;
@@ -298,11 +298,11 @@ fail:
}
static int lgdt3305_set_filter_extension(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
int val;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
val = 0;
break;
@@ -321,11 +321,11 @@ static int lgdt3305_set_filter_extension(struct lgdt3305_state *state,
/* ------------------------------------------------------------------------ */
static int lgdt3305_passband_digital_agc(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
u16 agc_ref;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
agc_ref = 0x32c4;
break;
@@ -348,11 +348,11 @@ static int lgdt3305_passband_digital_agc(struct lgdt3305_state *state,
}
static int lgdt3305_rfagc_loop(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
u16 ifbw, rfbw, agcdelay;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
agcdelay = 0x04c0;
rfbw = 0x8000;
@@ -398,11 +398,11 @@ static int lgdt3305_rfagc_loop(struct lgdt3305_state *state,
}
static int lgdt3305_agc_setup(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
int lockdten, acqen;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
lockdten = 0;
acqen = 0;
@@ -432,15 +432,15 @@ static int lgdt3305_agc_setup(struct lgdt3305_state *state,
return -EINVAL;
}
- return lgdt3305_rfagc_loop(state, param);
+ return lgdt3305_rfagc_loop(state, p);
}
static int lgdt3305_set_agc_power_ref(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
u16 usref = 0;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
if (state->cfg->usref_8vsb)
usref = state->cfg->usref_8vsb;
@@ -473,14 +473,14 @@ static int lgdt3305_set_agc_power_ref(struct lgdt3305_state *state,
/* ------------------------------------------------------------------------ */
static int lgdt3305_spectral_inversion(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param,
+ struct dtv_frontend_properties *p,
int inversion)
{
int ret;
lg_dbg("(%d)\n", inversion);
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
ret = lgdt3305_write_reg(state, LGDT3305_CR_CTRL_7,
inversion ? 0xf9 : 0x79);
@@ -497,13 +497,13 @@ static int lgdt3305_spectral_inversion(struct lgdt3305_state *state,
}
static int lgdt3305_set_if(struct lgdt3305_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
u16 if_freq_khz;
u8 nco1, nco2, nco3, nco4;
u64 nco;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
if_freq_khz = state->cfg->vsb_if_khz;
break;
@@ -517,7 +517,7 @@ static int lgdt3305_set_if(struct lgdt3305_state *state,
nco = if_freq_khz / 10;
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
nco <<= 24;
do_div(nco, 625);
@@ -677,37 +677,37 @@ fail:
return ret;
}
-static int lgdt3304_set_parameters(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int lgdt3304_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgdt3305_state *state = fe->demodulator_priv;
int ret;
- lg_dbg("(%d, %d)\n", param->frequency, param->u.vsb.modulation);
+ lg_dbg("(%d, %d)\n", p->frequency, p->modulation);
if (fe->ops.tuner_ops.set_params) {
- ret = fe->ops.tuner_ops.set_params(fe, param);
+ ret = fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (lg_fail(ret))
goto fail;
- state->current_frequency = param->frequency;
+ state->current_frequency = p->frequency;
}
- ret = lgdt3305_set_modulation(state, param);
+ ret = lgdt3305_set_modulation(state, p);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_passband_digital_agc(state, param);
+ ret = lgdt3305_passband_digital_agc(state, p);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_agc_setup(state, param);
+ ret = lgdt3305_agc_setup(state, p);
if (lg_fail(ret))
goto fail;
/* reg 0x030d is 3304-only... seen in vsb and qam usbsnoops... */
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
lgdt3305_write_reg(state, 0x030d, 0x00);
lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_1, 0x4f);
@@ -718,7 +718,7 @@ static int lgdt3304_set_parameters(struct dvb_frontend *fe,
case QAM_64:
case QAM_256:
lgdt3305_write_reg(state, 0x030d, 0x14);
- ret = lgdt3305_set_if(state, param);
+ ret = lgdt3305_set_if(state, p);
if (lg_fail(ret))
goto fail;
break;
@@ -727,13 +727,13 @@ static int lgdt3304_set_parameters(struct dvb_frontend *fe,
}
- ret = lgdt3305_spectral_inversion(state, param,
+ ret = lgdt3305_spectral_inversion(state, p,
state->cfg->spectral_inversion
? 1 : 0);
if (lg_fail(ret))
goto fail;
- state->current_modulation = param->u.vsb.modulation;
+ state->current_modulation = p->modulation;
ret = lgdt3305_mpeg_mode(state, state->cfg->mpeg_mode);
if (lg_fail(ret))
@@ -747,34 +747,34 @@ fail:
return ret;
}
-static int lgdt3305_set_parameters(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int lgdt3305_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgdt3305_state *state = fe->demodulator_priv;
int ret;
- lg_dbg("(%d, %d)\n", param->frequency, param->u.vsb.modulation);
+ lg_dbg("(%d, %d)\n", p->frequency, p->modulation);
if (fe->ops.tuner_ops.set_params) {
- ret = fe->ops.tuner_ops.set_params(fe, param);
+ ret = fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (lg_fail(ret))
goto fail;
- state->current_frequency = param->frequency;
+ state->current_frequency = p->frequency;
}
- ret = lgdt3305_set_modulation(state, param);
+ ret = lgdt3305_set_modulation(state, p);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_passband_digital_agc(state, param);
+ ret = lgdt3305_passband_digital_agc(state, p);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_set_agc_power_ref(state, param);
+ ret = lgdt3305_set_agc_power_ref(state, p);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_agc_setup(state, param);
+ ret = lgdt3305_agc_setup(state, p);
if (lg_fail(ret))
goto fail;
@@ -786,20 +786,20 @@ static int lgdt3305_set_parameters(struct dvb_frontend *fe,
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_set_if(state, param);
+ ret = lgdt3305_set_if(state, p);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_spectral_inversion(state, param,
+ ret = lgdt3305_spectral_inversion(state, p,
state->cfg->spectral_inversion
? 1 : 0);
if (lg_fail(ret))
goto fail;
- ret = lgdt3305_set_filter_extension(state, param);
+ ret = lgdt3305_set_filter_extension(state, p);
if (lg_fail(ret))
goto fail;
- state->current_modulation = param->u.vsb.modulation;
+ state->current_modulation = p->modulation;
ret = lgdt3305_mpeg_mode(state, state->cfg->mpeg_mode);
if (lg_fail(ret))
@@ -813,15 +813,15 @@ fail:
return ret;
}
-static int lgdt3305_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int lgdt3305_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgdt3305_state *state = fe->demodulator_priv;
lg_dbg("\n");
- param->u.vsb.modulation = state->current_modulation;
- param->frequency = state->current_frequency;
+ p->modulation = state->current_modulation;
+ p->frequency = state->current_frequency;
return 0;
}
@@ -1166,9 +1166,9 @@ fail:
EXPORT_SYMBOL(lgdt3305_attach);
static struct dvb_frontend_ops lgdt3304_ops = {
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3304 VSB/QAM Frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
@@ -1188,9 +1188,9 @@ static struct dvb_frontend_ops lgdt3304_ops = {
};
static struct dvb_frontend_ops lgdt3305_ops = {
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "LG Electronics LGDT3305 VSB/QAM Frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index 43971e63baa7..c990d35a13dc 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -288,6 +288,8 @@ static int lgdt330x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
int err;
u8 buf[2];
+ *ucblocks = 0;
+
switch (state->config->demod_chip) {
case LGDT3302:
err = i2c_read_demod_bytes(state, LGDT3302_PACKET_ERR_COUNTER1,
@@ -302,14 +304,16 @@ static int lgdt330x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
"Only LGDT3302 and LGDT3303 are supported chips.\n");
err = -ENODEV;
}
+ if (err < 0)
+ return err;
*ucblocks = (buf[0] << 8) | buf[1];
return 0;
}
-static int lgdt330x_set_parameters(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *param)
+static int lgdt330x_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
/*
* Array of byte pairs <address, value>
* to initialize 8VSB for lgdt3303 chip 50 MHz IF
@@ -343,10 +347,10 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
static u8 top_ctrl_cfg[] = { TOP_CONTROL, 0x03 };
- int err;
+ int err = 0;
/* Change only if we are actually changing the modulation */
- if (state->current_modulation != param->u.vsb.modulation) {
- switch(param->u.vsb.modulation) {
+ if (state->current_modulation != p->modulation) {
+ switch (p->modulation) {
case VSB_8:
dprintk("%s: VSB_8 MODE\n", __func__);
@@ -395,9 +399,14 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
}
break;
default:
- printk(KERN_WARNING "lgdt330x: %s: Modulation type(%d) UNSUPPORTED\n", __func__, param->u.vsb.modulation);
+ printk(KERN_WARNING "lgdt330x: %s: Modulation type(%d) UNSUPPORTED\n", __func__, p->modulation);
return -1;
}
+ if (err < 0)
+ printk(KERN_WARNING "lgdt330x: %s: error blasting "
+ "bytes to lgdt3303 for modulation type(%d)\n",
+ __func__, p->modulation);
+
/*
* select serial or parallel MPEG harware interface
* Serial: 0x04 for LGDT3302 or 0x40 for LGDT3303
@@ -410,29 +419,29 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
sizeof(top_ctrl_cfg));
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
- state->current_modulation = param->u.vsb.modulation;
+ state->current_modulation = p->modulation;
}
/* Tune to the specified frequency */
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
/* Keep track of the new frequency */
/* FIXME this is the wrong way to do this... */
/* The tuner is shared with the video4linux analog API */
- state->current_frequency = param->frequency;
+ state->current_frequency = p->frequency;
lgdt330x_SwReset(state);
return 0;
}
-static int lgdt330x_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters* param)
+static int lgdt330x_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgdt330x_state *state = fe->demodulator_priv;
- param->frequency = state->current_frequency;
+ p->frequency = state->current_frequency;
return 0;
}
@@ -762,9 +771,9 @@ error:
}
static struct dvb_frontend_ops lgdt3302_ops = {
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name= "LG Electronics LGDT3302 VSB/QAM Frontend",
- .type = FE_ATSC,
.frequency_min= 54000000,
.frequency_max= 858000000,
.frequency_stepsize= 62500,
@@ -785,9 +794,9 @@ static struct dvb_frontend_ops lgdt3302_ops = {
};
static struct dvb_frontend_ops lgdt3303_ops = {
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name= "LG Electronics LGDT3303 VSB/QAM Frontend",
- .type = FE_ATSC,
.frequency_min= 54000000,
.frequency_max= 858000000,
.frequency_stepsize= 62500,
diff --git a/drivers/media/dvb/frontends/lgs8gl5.c b/drivers/media/dvb/frontends/lgs8gl5.c
index bb37ed289a05..2cec8041a106 100644
--- a/drivers/media/dvb/frontends/lgs8gl5.c
+++ b/drivers/media/dvb/frontends/lgs8gl5.c
@@ -311,18 +311,18 @@ lgs8gl5_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int
-lgs8gl5_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+lgs8gl5_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgs8gl5_state *state = fe->demodulator_priv;
dprintk("%s\n", __func__);
- if (p->u.ofdm.bandwidth != BANDWIDTH_8_MHZ)
+ if (p->bandwidth_hz != 8000000)
return -EINVAL;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -336,22 +336,21 @@ lgs8gl5_set_frontend(struct dvb_frontend *fe,
static int
-lgs8gl5_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+lgs8gl5_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgs8gl5_state *state = fe->demodulator_priv;
u8 inv = lgs8gl5_read_reg(state, REG_INVERSION);
- struct dvb_ofdm_parameters *o = &p->u.ofdm;
p->inversion = (inv & REG_INVERSION_ON) ? INVERSION_ON : INVERSION_OFF;
- o->code_rate_HP = FEC_1_2;
- o->code_rate_LP = FEC_7_8;
- o->guard_interval = GUARD_INTERVAL_1_32;
- o->transmission_mode = TRANSMISSION_MODE_2K;
- o->constellation = QAM_64;
- o->hierarchy_information = HIERARCHY_NONE;
- o->bandwidth = BANDWIDTH_8_MHZ;
+ p->code_rate_HP = FEC_1_2;
+ p->code_rate_LP = FEC_7_8;
+ p->guard_interval = GUARD_INTERVAL_1_32;
+ p->transmission_mode = TRANSMISSION_MODE_2K;
+ p->modulation = QAM_64;
+ p->hierarchy = HIERARCHY_NONE;
+ p->bandwidth_hz = 8000000;
return 0;
}
@@ -413,9 +412,9 @@ EXPORT_SYMBOL(lgs8gl5_attach);
static struct dvb_frontend_ops lgs8gl5_ops = {
+ .delsys = { SYS_DMBTH },
.info = {
.name = "Legend Silicon LGS-8GL5 DMB-TH",
- .type = FE_OFDM,
.frequency_min = 474000000,
.frequency_max = 858000000,
.frequency_stepsize = 10000,
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index 1172b54689f8..4de1d3520cd2 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -669,16 +669,16 @@ static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len)
return lgs8gxx_write_reg(priv, buf[0], buf[1]);
}
-static int lgs8gxx_set_fe(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fe_params)
+static int lgs8gxx_set_fe(struct dvb_frontend *fe)
{
+
struct lgs8gxx_state *priv = fe->demodulator_priv;
dprintk("%s\n", __func__);
/* set frequency */
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, fe_params);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -691,9 +691,9 @@ static int lgs8gxx_set_fe(struct dvb_frontend *fe,
return 0;
}
-static int lgs8gxx_get_fe(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fe_params)
+static int lgs8gxx_get_fe(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
dprintk("%s\n", __func__);
/* TODO: get real readings from device */
@@ -701,21 +701,21 @@ static int lgs8gxx_get_fe(struct dvb_frontend *fe,
fe_params->inversion = INVERSION_OFF;
/* bandwidth */
- fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ fe_params->bandwidth_hz = 8000000;
- fe_params->u.ofdm.code_rate_HP = FEC_AUTO;
- fe_params->u.ofdm.code_rate_LP = FEC_AUTO;
+ fe_params->code_rate_HP = FEC_AUTO;
+ fe_params->code_rate_LP = FEC_AUTO;
- fe_params->u.ofdm.constellation = QAM_AUTO;
+ fe_params->modulation = QAM_AUTO;
/* transmission mode */
- fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO;
+ fe_params->transmission_mode = TRANSMISSION_MODE_AUTO;
/* guard interval */
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO;
+ fe_params->guard_interval = GUARD_INTERVAL_AUTO;
/* hierarchy */
- fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ fe_params->hierarchy = HIERARCHY_NONE;
return 0;
}
@@ -994,9 +994,9 @@ static int lgs8gxx_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
}
static struct dvb_frontend_ops lgs8gxx_ops = {
+ .delsys = { SYS_DMBTH },
.info = {
.name = "Legend Silicon LGS8913/LGS8GXX DMB-TH",
- .type = FE_OFDM,
.frequency_min = 474000000,
.frequency_max = 858000000,
.frequency_stepsize = 10000,
diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
index c283112051b1..9ae40abfd71a 100644
--- a/drivers/media/dvb/frontends/mb86a16.c
+++ b/drivers/media/dvb/frontends/mb86a16.c
@@ -1621,13 +1621,13 @@ err:
return -EREMOTEIO;
}
-static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mb86a16_state *state = fe->demodulator_priv;
state->frequency = p->frequency / 1000;
- state->srate = p->u.qpsk.symbol_rate / 1000;
+ state->srate = p->symbol_rate / 1000;
if (!mb86a16_set_fe(state)) {
dprintk(verbose, MB86A16_ERROR, 1, "Successfully acquired LOCK");
@@ -1814,9 +1814,9 @@ static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops mb86a16_ops = {
+ .delsys = { SYS_DVBS },
.info = {
.name = "Fujitsu MB86A16 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 3000,
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index 0f867a5055fb..fade566927c3 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -61,244 +61,111 @@ static struct regdata mb86a20s_init[] = {
{ 0x70, 0xff },
{ 0x08, 0x01 },
{ 0x09, 0x3e },
- { 0x50, 0xd1 },
- { 0x51, 0x22 },
+ { 0x50, 0xd1 }, { 0x51, 0x22 },
{ 0x39, 0x01 },
{ 0x71, 0x00 },
- { 0x28, 0x2a },
- { 0x29, 0x00 },
- { 0x2a, 0xff },
- { 0x2b, 0x80 },
- { 0x28, 0x20 },
- { 0x29, 0x33 },
- { 0x2a, 0xdf },
- { 0x2b, 0xa9 },
+ { 0x28, 0x2a }, { 0x29, 0x00 }, { 0x2a, 0xff }, { 0x2b, 0x80 },
+ { 0x28, 0x20 }, { 0x29, 0x33 }, { 0x2a, 0xdf }, { 0x2b, 0xa9 },
+ { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
{ 0x3b, 0x21 },
{ 0x3c, 0x3a },
{ 0x01, 0x0d },
- { 0x04, 0x08 },
- { 0x05, 0x05 },
- { 0x04, 0x0e },
- { 0x05, 0x00 },
- { 0x04, 0x0f },
- { 0x05, 0x14 },
- { 0x04, 0x0b },
- { 0x05, 0x8c },
- { 0x04, 0x00 },
- { 0x05, 0x00 },
- { 0x04, 0x01 },
- { 0x05, 0x07 },
- { 0x04, 0x02 },
- { 0x05, 0x0f },
- { 0x04, 0x03 },
- { 0x05, 0xa0 },
- { 0x04, 0x09 },
- { 0x05, 0x00 },
- { 0x04, 0x0a },
- { 0x05, 0xff },
- { 0x04, 0x27 },
- { 0x05, 0x64 },
- { 0x04, 0x28 },
- { 0x05, 0x00 },
- { 0x04, 0x1e },
- { 0x05, 0xff },
- { 0x04, 0x29 },
- { 0x05, 0x0a },
- { 0x04, 0x32 },
- { 0x05, 0x0a },
- { 0x04, 0x14 },
- { 0x05, 0x02 },
- { 0x04, 0x04 },
- { 0x05, 0x00 },
- { 0x04, 0x05 },
- { 0x05, 0x22 },
- { 0x04, 0x06 },
- { 0x05, 0x0e },
- { 0x04, 0x07 },
- { 0x05, 0xd8 },
- { 0x04, 0x12 },
- { 0x05, 0x00 },
- { 0x04, 0x13 },
- { 0x05, 0xff },
+ { 0x04, 0x08 }, { 0x05, 0x05 },
+ { 0x04, 0x0e }, { 0x05, 0x00 },
+ { 0x04, 0x0f }, { 0x05, 0x14 },
+ { 0x04, 0x0b }, { 0x05, 0x8c },
+ { 0x04, 0x00 }, { 0x05, 0x00 },
+ { 0x04, 0x01 }, { 0x05, 0x07 },
+ { 0x04, 0x02 }, { 0x05, 0x0f },
+ { 0x04, 0x03 }, { 0x05, 0xa0 },
+ { 0x04, 0x09 }, { 0x05, 0x00 },
+ { 0x04, 0x0a }, { 0x05, 0xff },
+ { 0x04, 0x27 }, { 0x05, 0x64 },
+ { 0x04, 0x28 }, { 0x05, 0x00 },
+ { 0x04, 0x1e }, { 0x05, 0xff },
+ { 0x04, 0x29 }, { 0x05, 0x0a },
+ { 0x04, 0x32 }, { 0x05, 0x0a },
+ { 0x04, 0x14 }, { 0x05, 0x02 },
+ { 0x04, 0x04 }, { 0x05, 0x00 },
+ { 0x04, 0x05 }, { 0x05, 0x22 },
+ { 0x04, 0x06 }, { 0x05, 0x0e },
+ { 0x04, 0x07 }, { 0x05, 0xd8 },
+ { 0x04, 0x12 }, { 0x05, 0x00 },
+ { 0x04, 0x13 }, { 0x05, 0xff },
+ { 0x04, 0x15 }, { 0x05, 0x4e },
+ { 0x04, 0x16 }, { 0x05, 0x20 },
{ 0x52, 0x01 },
- { 0x50, 0xa7 },
- { 0x51, 0x00 },
- { 0x50, 0xa8 },
- { 0x51, 0xff },
- { 0x50, 0xa9 },
- { 0x51, 0xff },
- { 0x50, 0xaa },
- { 0x51, 0x00 },
- { 0x50, 0xab },
- { 0x51, 0xff },
- { 0x50, 0xac },
- { 0x51, 0xff },
- { 0x50, 0xad },
- { 0x51, 0x00 },
- { 0x50, 0xae },
- { 0x51, 0xff },
- { 0x50, 0xaf },
- { 0x51, 0xff },
+ { 0x50, 0xa7 }, { 0x51, 0xff },
+ { 0x50, 0xa8 }, { 0x51, 0xff },
+ { 0x50, 0xa9 }, { 0x51, 0xff },
+ { 0x50, 0xaa }, { 0x51, 0xff },
+ { 0x50, 0xab }, { 0x51, 0xff },
+ { 0x50, 0xac }, { 0x51, 0xff },
+ { 0x50, 0xad }, { 0x51, 0xff },
+ { 0x50, 0xae }, { 0x51, 0xff },
+ { 0x50, 0xaf }, { 0x51, 0xff },
{ 0x5e, 0x07 },
- { 0x50, 0xdc },
- { 0x51, 0x01 },
- { 0x50, 0xdd },
- { 0x51, 0xf4 },
- { 0x50, 0xde },
- { 0x51, 0x01 },
- { 0x50, 0xdf },
- { 0x51, 0xf4 },
- { 0x50, 0xe0 },
- { 0x51, 0x01 },
- { 0x50, 0xe1 },
- { 0x51, 0xf4 },
- { 0x50, 0xb0 },
- { 0x51, 0x07 },
- { 0x50, 0xb2 },
- { 0x51, 0xff },
- { 0x50, 0xb3 },
- { 0x51, 0xff },
- { 0x50, 0xb4 },
- { 0x51, 0xff },
- { 0x50, 0xb5 },
- { 0x51, 0xff },
- { 0x50, 0xb6 },
- { 0x51, 0xff },
- { 0x50, 0xb7 },
- { 0x51, 0xff },
- { 0x50, 0x50 },
- { 0x51, 0x02 },
- { 0x50, 0x51 },
- { 0x51, 0x04 },
+ { 0x50, 0xdc }, { 0x51, 0x01 },
+ { 0x50, 0xdd }, { 0x51, 0xf4 },
+ { 0x50, 0xde }, { 0x51, 0x01 },
+ { 0x50, 0xdf }, { 0x51, 0xf4 },
+ { 0x50, 0xe0 }, { 0x51, 0x01 },
+ { 0x50, 0xe1 }, { 0x51, 0xf4 },
+ { 0x50, 0xb0 }, { 0x51, 0x07 },
+ { 0x50, 0xb2 }, { 0x51, 0xff },
+ { 0x50, 0xb3 }, { 0x51, 0xff },
+ { 0x50, 0xb4 }, { 0x51, 0xff },
+ { 0x50, 0xb5 }, { 0x51, 0xff },
+ { 0x50, 0xb6 }, { 0x51, 0xff },
+ { 0x50, 0xb7 }, { 0x51, 0xff },
+ { 0x50, 0x50 }, { 0x51, 0x02 },
+ { 0x50, 0x51 }, { 0x51, 0x04 },
{ 0x45, 0x04 },
{ 0x48, 0x04 },
- { 0x50, 0xd5 },
- { 0x51, 0x01 }, /* Serial */
- { 0x50, 0xd6 },
- { 0x51, 0x1f },
- { 0x50, 0xd2 },
- { 0x51, 0x03 },
- { 0x50, 0xd7 },
- { 0x51, 0x3f },
+ { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
+ { 0x50, 0xd6 }, { 0x51, 0x1f },
+ { 0x50, 0xd2 }, { 0x51, 0x03 },
+ { 0x50, 0xd7 }, { 0x51, 0x3f },
+ { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x28, 0x74 }, { 0x29, 0x40 },
+ { 0x28, 0x46 }, { 0x29, 0x2c }, { 0x28, 0x46 }, { 0x29, 0x0c },
+ { 0x04, 0x40 }, { 0x05, 0x01 },
+ { 0x28, 0x00 }, { 0x29, 0x10 },
+ { 0x28, 0x05 }, { 0x29, 0x02 },
{ 0x1c, 0x01 },
- { 0x28, 0x06 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x03 },
- { 0x28, 0x07 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x0d },
- { 0x28, 0x08 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x02 },
- { 0x28, 0x09 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x01 },
- { 0x28, 0x0a },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x21 },
- { 0x28, 0x0b },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x29 },
- { 0x28, 0x0c },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x16 },
- { 0x28, 0x0d },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x31 },
- { 0x28, 0x0e },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x0e },
- { 0x28, 0x0f },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x4e },
- { 0x28, 0x10 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x46 },
- { 0x28, 0x11 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x0f },
- { 0x28, 0x12 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x56 },
- { 0x28, 0x13 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x35 },
- { 0x28, 0x14 },
- { 0x29, 0x00 },
- { 0x2a, 0x01 },
- { 0x2b, 0xbe },
- { 0x28, 0x15 },
- { 0x29, 0x00 },
- { 0x2a, 0x01 },
- { 0x2b, 0x84 },
- { 0x28, 0x16 },
- { 0x29, 0x00 },
- { 0x2a, 0x03 },
- { 0x2b, 0xee },
- { 0x28, 0x17 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x98 },
- { 0x28, 0x18 },
- { 0x29, 0x00 },
- { 0x2a, 0x00 },
- { 0x2b, 0x9f },
- { 0x28, 0x19 },
- { 0x29, 0x00 },
- { 0x2a, 0x07 },
- { 0x2b, 0xb2 },
- { 0x28, 0x1a },
- { 0x29, 0x00 },
- { 0x2a, 0x06 },
- { 0x2b, 0xc2 },
- { 0x28, 0x1b },
- { 0x29, 0x00 },
- { 0x2a, 0x07 },
- { 0x2b, 0x4a },
- { 0x28, 0x1c },
- { 0x29, 0x00 },
- { 0x2a, 0x01 },
- { 0x2b, 0xbc },
- { 0x28, 0x1d },
- { 0x29, 0x00 },
- { 0x2a, 0x04 },
- { 0x2b, 0xba },
- { 0x28, 0x1e },
- { 0x29, 0x00 },
- { 0x2a, 0x06 },
- { 0x2b, 0x14 },
- { 0x50, 0x1e },
- { 0x51, 0x5d },
- { 0x50, 0x22 },
- { 0x51, 0x00 },
- { 0x50, 0x23 },
- { 0x51, 0xc8 },
- { 0x50, 0x24 },
- { 0x51, 0x00 },
- { 0x50, 0x25 },
- { 0x51, 0xf0 },
- { 0x50, 0x26 },
- { 0x51, 0x00 },
- { 0x50, 0x27 },
- { 0x51, 0xc3 },
- { 0x50, 0x39 },
- { 0x51, 0x02 },
- { 0x50, 0xd5 },
- { 0x51, 0x01 },
+ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
+ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
+ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
+ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
+ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
+ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
+ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
+ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
+ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
+ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
+ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
+ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
+ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
+ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
+ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
+ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
+ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
+ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
+ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
+ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
+ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
+ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
+ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
+ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
+ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
+ { 0x50, 0x1e }, { 0x51, 0x5d },
+ { 0x50, 0x22 }, { 0x51, 0x00 },
+ { 0x50, 0x23 }, { 0x51, 0xc8 },
+ { 0x50, 0x24 }, { 0x51, 0x00 },
+ { 0x50, 0x25 }, { 0x51, 0xf0 },
+ { 0x50, 0x26 }, { 0x51, 0x00 },
+ { 0x50, 0x27 }, { 0x51, 0xc3 },
+ { 0x50, 0x39 }, { 0x51, 0x02 },
+ { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
{ 0xd0, 0x00 },
};
@@ -485,18 +352,23 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, fe_status_t *status)
return 0;
}
-static int mb86a20s_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int mb86a20s_set_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
int rc;
+#if 0
+ /*
+ * FIXME: Properly implement the set frontend properties
+ */
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+#endif
dprintk("\n");
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
dprintk("Calling tuner set parameters\n");
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
/*
* Make it more reliable: if, for some reason, the initial
@@ -520,22 +392,212 @@ static int mb86a20s_set_frontend(struct dvb_frontend *fe,
return rc;
}
-static int mb86a20s_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int mb86a20s_get_modulation(struct mb86a20s_state *state,
+ unsigned layer)
+{
+ int rc;
+ static unsigned char reg[] = {
+ [0] = 0x86, /* Layer A */
+ [1] = 0x8a, /* Layer B */
+ [2] = 0x8e, /* Layer C */
+ };
+
+ if (layer >= ARRAY_SIZE(reg))
+ return -EINVAL;
+ rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc < 0)
+ return rc;
+ switch ((rc & 0x70) >> 4) {
+ case 0:
+ return DQPSK;
+ case 1:
+ return QPSK;
+ case 2:
+ return QAM_16;
+ case 3:
+ return QAM_64;
+ default:
+ return QAM_AUTO;
+ }
+}
+
+static int mb86a20s_get_fec(struct mb86a20s_state *state,
+ unsigned layer)
{
+ int rc;
- /* FIXME: For now, it does nothing */
+ static unsigned char reg[] = {
+ [0] = 0x87, /* Layer A */
+ [1] = 0x8b, /* Layer B */
+ [2] = 0x8f, /* Layer C */
+ };
- fe->dtv_property_cache.bandwidth_hz = 6000000;
- fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO;
- fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO;
- fe->dtv_property_cache.isdbt_partial_reception = 0;
+ if (layer >= ARRAY_SIZE(reg))
+ return -EINVAL;
+ rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc < 0)
+ return rc;
+ switch (rc) {
+ case 0:
+ return FEC_1_2;
+ case 1:
+ return FEC_2_3;
+ case 2:
+ return FEC_3_4;
+ case 3:
+ return FEC_5_6;
+ case 4:
+ return FEC_7_8;
+ default:
+ return FEC_AUTO;
+ }
+}
+
+static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
+ unsigned layer)
+{
+ int rc;
+
+ static unsigned char reg[] = {
+ [0] = 0x88, /* Layer A */
+ [1] = 0x8c, /* Layer B */
+ [2] = 0x90, /* Layer C */
+ };
+
+ if (layer >= ARRAY_SIZE(reg))
+ return -EINVAL;
+ rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc < 0)
+ return rc;
+ if (rc > 3)
+ return -EINVAL; /* Not used */
+ return rc;
+}
+
+static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
+ unsigned layer)
+{
+ int rc, count;
+
+ static unsigned char reg[] = {
+ [0] = 0x89, /* Layer A */
+ [1] = 0x8d, /* Layer B */
+ [2] = 0x91, /* Layer C */
+ };
+
+ if (layer >= ARRAY_SIZE(reg))
+ return -EINVAL;
+ rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
+ if (rc < 0)
+ return rc;
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc < 0)
+ return rc;
+ count = (rc >> 4) & 0x0f;
+
+ return count;
+}
+
+static int mb86a20s_get_frontend(struct dvb_frontend *fe)
+{
+ struct mb86a20s_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ int i, rc;
+
+ /* Fixed parameters */
+ p->delivery_system = SYS_ISDBT;
+ p->bandwidth_hz = 6000000;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ /* Check for partial reception */
+ rc = mb86a20s_writereg(state, 0x6d, 0x85);
+ if (rc >= 0)
+ rc = mb86a20s_readreg(state, 0x6e);
+ if (rc >= 0)
+ p->isdbt_partial_reception = (rc & 0x10) ? 1 : 0;
+
+ /* Get per-layer data */
+ p->isdbt_layer_enabled = 0;
+ for (i = 0; i < 3; i++) {
+ rc = mb86a20s_get_segment_count(state, i);
+ if (rc >= 0 && rc < 14)
+ p->layer[i].segment_count = rc;
+ if (rc == 0x0f)
+ continue;
+ p->isdbt_layer_enabled |= 1 << i;
+ rc = mb86a20s_get_modulation(state, i);
+ if (rc >= 0)
+ p->layer[i].modulation = rc;
+ rc = mb86a20s_get_fec(state, i);
+ if (rc >= 0)
+ p->layer[i].fec = rc;
+ rc = mb86a20s_get_interleaving(state, i);
+ if (rc >= 0)
+ p->layer[i].interleaving = rc;
+ }
+
+ p->isdbt_sb_mode = 0;
+ rc = mb86a20s_writereg(state, 0x6d, 0x84);
+ if ((rc >= 0) && ((rc & 0x60) == 0x20)) {
+ p->isdbt_sb_mode = 1;
+ /* At least, one segment should exist */
+ if (!p->isdbt_sb_segment_count)
+ p->isdbt_sb_segment_count = 1;
+ } else
+ p->isdbt_sb_segment_count = 0;
+
+ /* Get transmission mode and guard interval */
+ p->transmission_mode = TRANSMISSION_MODE_AUTO;
+ p->guard_interval = GUARD_INTERVAL_AUTO;
+ rc = mb86a20s_readreg(state, 0x07);
+ if (rc >= 0) {
+ if ((rc & 0x60) == 0x20) {
+ switch (rc & 0x0c >> 2) {
+ case 0:
+ p->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ p->transmission_mode = TRANSMISSION_MODE_4K;
+ break;
+ case 2:
+ p->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+ }
+ if (!(rc & 0x10)) {
+ switch (rc & 0x3) {
+ case 0:
+ p->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ case 1:
+ p->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 2:
+ p->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ }
+ }
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
return 0;
}
static int mb86a20s_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
+ bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
@@ -544,8 +606,8 @@ static int mb86a20s_tune(struct dvb_frontend *fe,
dprintk("\n");
- if (params != NULL)
- rc = mb86a20s_set_frontend(fe, params);
+ if (re_tune)
+ rc = mb86a20s_set_frontend(fe);
if (!(mode_flags & FE_TUNE_MODE_ONESHOT))
mb86a20s_read_status(fe, status);
@@ -608,10 +670,10 @@ error:
EXPORT_SYMBOL(mb86a20s_attach);
static struct dvb_frontend_ops mb86a20s_ops = {
+ .delsys = { SYS_ISDBT },
/* Use dib8000 values per default */
.info = {
.name = "Fujitsu mb86A20s",
- .type = FE_OFDM,
.caps = FE_CAN_INVERSION_AUTO | FE_CAN_RECOVER |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index 83e6f1a1b700..e20bf13aa860 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -531,9 +531,9 @@ static int mt312_read_ucblocks(struct dvb_frontend *fe, u32 *ubc)
return 0;
}
-static int mt312_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int mt312_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mt312_state *state = fe->demodulator_priv;
int ret;
u8 buf[5], config_val;
@@ -553,16 +553,16 @@ static int mt312_set_frontend(struct dvb_frontend *fe,
|| (p->inversion > INVERSION_ON))
return -EINVAL;
- if ((p->u.qpsk.symbol_rate < fe->ops.info.symbol_rate_min)
- || (p->u.qpsk.symbol_rate > fe->ops.info.symbol_rate_max))
+ if ((p->symbol_rate < fe->ops.info.symbol_rate_min)
+ || (p->symbol_rate > fe->ops.info.symbol_rate_max))
return -EINVAL;
- if ((p->u.qpsk.fec_inner < FEC_NONE)
- || (p->u.qpsk.fec_inner > FEC_AUTO))
+ if ((p->fec_inner < FEC_NONE)
+ || (p->fec_inner > FEC_AUTO))
return -EINVAL;
- if ((p->u.qpsk.fec_inner == FEC_4_5)
- || (p->u.qpsk.fec_inner == FEC_8_9))
+ if ((p->fec_inner == FEC_4_5)
+ || (p->fec_inner == FEC_8_9))
return -EINVAL;
switch (state->id) {
@@ -574,7 +574,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe,
ret = mt312_readreg(state, CONFIG, &config_val);
if (ret < 0)
return ret;
- if (p->u.qpsk.symbol_rate >= 30000000) {
+ if (p->symbol_rate >= 30000000) {
/* Note that 30MS/s should use 90MHz */
if (state->freq_mult == 6) {
/* We are running 60MHz */
@@ -603,25 +603,25 @@ static int mt312_set_frontend(struct dvb_frontend *fe,
}
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
/* sr = (u16)(sr * 256.0 / 1000000.0) */
- sr = mt312_div(p->u.qpsk.symbol_rate * 4, 15625);
+ sr = mt312_div(p->symbol_rate * 4, 15625);
/* SYM_RATE */
buf[0] = (sr >> 8) & 0x3f;
buf[1] = (sr >> 0) & 0xff;
/* VIT_MODE */
- buf[2] = inv_tab[p->inversion] | fec_tab[p->u.qpsk.fec_inner];
+ buf[2] = inv_tab[p->inversion] | fec_tab[p->fec_inner];
/* QPSK_CTRL */
buf[3] = 0x40; /* swap I and Q before QPSK demodulation */
- if (p->u.qpsk.symbol_rate < 10000000)
+ if (p->symbol_rate < 10000000)
buf[3] |= 0x04; /* use afc mode */
/* GO */
@@ -636,9 +636,9 @@ static int mt312_set_frontend(struct dvb_frontend *fe,
return 0;
}
-static int mt312_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int mt312_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mt312_state *state = fe->demodulator_priv;
int ret;
@@ -646,11 +646,11 @@ static int mt312_get_frontend(struct dvb_frontend *fe,
if (ret < 0)
return ret;
- ret = mt312_get_symbol_rate(state, &p->u.qpsk.symbol_rate);
+ ret = mt312_get_symbol_rate(state, &p->symbol_rate);
if (ret < 0)
return ret;
- ret = mt312_get_code_rate(state, &p->u.qpsk.fec_inner);
+ ret = mt312_get_code_rate(state, &p->fec_inner);
if (ret < 0)
return ret;
@@ -738,10 +738,9 @@ static void mt312_release(struct dvb_frontend *fe)
#define MT312_SYS_CLK 90000000UL /* 90 MHz */
static struct dvb_frontend_ops mt312_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Zarlink ???? DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
/* FIXME: adjust freq to real used xtal */
diff --git a/drivers/media/dvb/frontends/mt352.c b/drivers/media/dvb/frontends/mt352.c
index 319672f8e1a7..2c3b50e828d7 100644
--- a/drivers/media/dvb/frontends/mt352.c
+++ b/drivers/media/dvb/frontends/mt352.c
@@ -111,20 +111,20 @@ static int mt352_sleep(struct dvb_frontend* fe)
}
static void mt352_calc_nominal_rate(struct mt352_state* state,
- enum fe_bandwidth bandwidth,
+ u32 bandwidth,
unsigned char *buf)
{
u32 adc_clock = 20480; /* 20.340 MHz */
u32 bw,value;
switch (bandwidth) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
bw = 6;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
bw = 7;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
default:
bw = 8;
break;
@@ -166,15 +166,14 @@ static void mt352_calc_input_freq(struct mt352_state* state,
buf[1] = lsb(value);
}
-static int mt352_set_parameters(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *param)
+static int mt352_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *op = &fe->dtv_property_cache;
struct mt352_state* state = fe->demodulator_priv;
unsigned char buf[13];
static unsigned char tuner_go[] = { 0x5d, 0x01 };
static unsigned char fsm_go[] = { 0x5e, 0x01 };
unsigned int tps = 0;
- struct dvb_ofdm_parameters *op = &param->u.ofdm;
switch (op->code_rate_HP) {
case FEC_2_3:
@@ -213,14 +212,14 @@ static int mt352_set_parameters(struct dvb_frontend* fe,
case FEC_AUTO:
break;
case FEC_NONE:
- if (op->hierarchy_information == HIERARCHY_AUTO ||
- op->hierarchy_information == HIERARCHY_NONE)
+ if (op->hierarchy == HIERARCHY_AUTO ||
+ op->hierarchy == HIERARCHY_NONE)
break;
default:
return -EINVAL;
}
- switch (op->constellation) {
+ switch (op->modulation) {
case QPSK:
break;
case QAM_AUTO:
@@ -262,7 +261,7 @@ static int mt352_set_parameters(struct dvb_frontend* fe,
return -EINVAL;
}
- switch (op->hierarchy_information) {
+ switch (op->hierarchy) {
case HIERARCHY_AUTO:
case HIERARCHY_NONE:
break;
@@ -288,12 +287,12 @@ static int mt352_set_parameters(struct dvb_frontend* fe,
buf[3] = 0x50; // old
// buf[3] = 0xf4; // pinnacle
- mt352_calc_nominal_rate(state, op->bandwidth, buf+4);
+ mt352_calc_nominal_rate(state, op->bandwidth_hz, buf+4);
mt352_calc_input_freq(state, buf+6);
if (state->config.no_tuner) {
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -302,7 +301,7 @@ static int mt352_set_parameters(struct dvb_frontend* fe,
_mt352_write(fe, fsm_go, 2);
} else {
if (fe->ops.tuner_ops.calc_regs) {
- fe->ops.tuner_ops.calc_regs(fe, param, buf+8, 5);
+ fe->ops.tuner_ops.calc_regs(fe, buf+8, 5);
buf[8] <<= 1;
_mt352_write(fe, buf, sizeof(buf));
_mt352_write(fe, tuner_go, 2);
@@ -312,14 +311,13 @@ static int mt352_set_parameters(struct dvb_frontend* fe,
return 0;
}
-static int mt352_get_parameters(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *param)
+static int mt352_get_parameters(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *op = &fe->dtv_property_cache;
struct mt352_state* state = fe->demodulator_priv;
u16 tps;
u16 div;
u8 trl;
- struct dvb_ofdm_parameters *op = &param->u.ofdm;
static const u8 tps_fec_to_api[8] =
{
FEC_1_2,
@@ -348,16 +346,16 @@ static int mt352_get_parameters(struct dvb_frontend* fe,
switch ( (tps >> 13) & 3)
{
case 0:
- op->constellation = QPSK;
+ op->modulation = QPSK;
break;
case 1:
- op->constellation = QAM_16;
+ op->modulation = QAM_16;
break;
case 2:
- op->constellation = QAM_64;
+ op->modulation = QAM_64;
break;
default:
- op->constellation = QAM_AUTO;
+ op->modulation = QAM_AUTO;
break;
}
@@ -385,36 +383,36 @@ static int mt352_get_parameters(struct dvb_frontend* fe,
switch ( (tps >> 10) & 7)
{
case 0:
- op->hierarchy_information = HIERARCHY_NONE;
+ op->hierarchy = HIERARCHY_NONE;
break;
case 1:
- op->hierarchy_information = HIERARCHY_1;
+ op->hierarchy = HIERARCHY_1;
break;
case 2:
- op->hierarchy_information = HIERARCHY_2;
+ op->hierarchy = HIERARCHY_2;
break;
case 3:
- op->hierarchy_information = HIERARCHY_4;
+ op->hierarchy = HIERARCHY_4;
break;
default:
- op->hierarchy_information = HIERARCHY_AUTO;
+ op->hierarchy = HIERARCHY_AUTO;
break;
}
- param->frequency = ( 500 * (div - IF_FREQUENCYx6) ) / 3 * 1000;
+ op->frequency = (500 * (div - IF_FREQUENCYx6)) / 3 * 1000;
if (trl == 0x72)
- op->bandwidth = BANDWIDTH_8_MHZ;
+ op->bandwidth_hz = 8000000;
else if (trl == 0x64)
- op->bandwidth = BANDWIDTH_7_MHZ;
+ op->bandwidth_hz = 7000000;
else
- op->bandwidth = BANDWIDTH_6_MHZ;
+ op->bandwidth_hz = 6000000;
if (mt352_read_register(state, STATUS_2) & 0x02)
- param->inversion = INVERSION_OFF;
+ op->inversion = INVERSION_OFF;
else
- param->inversion = INVERSION_ON;
+ op->inversion = INVERSION_ON;
return 0;
}
@@ -569,10 +567,9 @@ error:
}
static struct dvb_frontend_ops mt352_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Zarlink MT352 DVB-T",
- .type = FE_OFDM,
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 166667,
diff --git a/drivers/media/dvb/frontends/nxt200x.c b/drivers/media/dvb/frontends/nxt200x.c
index eac20650499f..49ca78d883b1 100644
--- a/drivers/media/dvb/frontends/nxt200x.c
+++ b/drivers/media/dvb/frontends/nxt200x.c
@@ -528,9 +528,9 @@ static int nxt2004_load_firmware (struct dvb_frontend* fe, const struct firmware
return 0;
};
-static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
- struct dvb_frontend_parameters *p)
+static int nxt200x_setup_frontend_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct nxt200x_state* state = fe->demodulator_priv;
u8 buf[5];
@@ -546,7 +546,7 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
}
/* set additional params */
- switch (p->u.vsb.modulation) {
+ switch (p->modulation) {
case QAM_64:
case QAM_256:
/* Set punctured clock for QAM */
@@ -566,7 +566,7 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
if (fe->ops.tuner_ops.calc_regs) {
/* get tuning information */
- fe->ops.tuner_ops.calc_regs(fe, p, buf, 5);
+ fe->ops.tuner_ops.calc_regs(fe, buf, 5);
/* write frequency information */
nxt200x_writetuner(state, buf);
@@ -576,7 +576,7 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
nxt200x_agc_reset(state);
/* set target power level */
- switch (p->u.vsb.modulation) {
+ switch (p->modulation) {
case QAM_64:
case QAM_256:
buf[0] = 0x74;
@@ -620,7 +620,7 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
}
/* write sdmx input */
- switch (p->u.vsb.modulation) {
+ switch (p->modulation) {
case QAM_64:
buf[0] = 0x68;
break;
@@ -714,7 +714,7 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
}
/* write agc ucgp0 */
- switch (p->u.vsb.modulation) {
+ switch (p->modulation) {
case QAM_64:
buf[0] = 0x02;
break;
@@ -1203,10 +1203,9 @@ error:
}
static struct dvb_frontend_ops nxt200x_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Nextwave NXT200X VSB/QAM frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 860000000,
.frequency_stepsize = 166666, /* stepsize is just a guess */
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c
index 6599b8fea9e9..90ae6c72c0e3 100644
--- a/drivers/media/dvb/frontends/nxt6000.c
+++ b/drivers/media/dvb/frontends/nxt6000.c
@@ -81,22 +81,21 @@ static void nxt6000_reset(struct nxt6000_state* state)
nxt6000_writereg(state, OFDM_COR_CTL, val | COREACT);
}
-static int nxt6000_set_bandwidth(struct nxt6000_state* state, fe_bandwidth_t bandwidth)
+static int nxt6000_set_bandwidth(struct nxt6000_state *state, u32 bandwidth)
{
u16 nominal_rate;
int result;
switch (bandwidth) {
-
- case BANDWIDTH_6_MHZ:
+ case 6000000:
nominal_rate = 0x55B7;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
nominal_rate = 0x6400;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
nominal_rate = 0x7249;
break;
@@ -457,23 +456,31 @@ static int nxt6000_init(struct dvb_frontend* fe)
return 0;
}
-static int nxt6000_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *param)
+static int nxt6000_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct nxt6000_state* state = fe->demodulator_priv;
int result;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- if ((result = nxt6000_set_bandwidth(state, param->u.ofdm.bandwidth)) < 0)
+ result = nxt6000_set_bandwidth(state, p->bandwidth_hz);
+ if (result < 0)
return result;
- if ((result = nxt6000_set_guard_interval(state, param->u.ofdm.guard_interval)) < 0)
+
+ result = nxt6000_set_guard_interval(state, p->guard_interval);
+ if (result < 0)
return result;
- if ((result = nxt6000_set_transmission_mode(state, param->u.ofdm.transmission_mode)) < 0)
+
+ result = nxt6000_set_transmission_mode(state, p->transmission_mode);
+ if (result < 0)
return result;
- if ((result = nxt6000_set_inversion(state, param->inversion)) < 0)
+
+ result = nxt6000_set_inversion(state, p->inversion);
+ if (result < 0)
return result;
msleep(500);
@@ -566,10 +573,9 @@ error:
}
static struct dvb_frontend_ops nxt6000_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "NxtWave NXT6000 DVB-T",
- .type = FE_OFDM,
.frequency_min = 0,
.frequency_max = 863250000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 38e67accb8c3..5ef921823c15 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -306,9 +306,9 @@ static int modulation_fw_class(fe_modulation_t modulation)
}
}
-static int or51132_set_parameters(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *param)
+static int or51132_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
int ret;
struct or51132_state* state = fe->demodulator_priv;
const struct firmware *fw;
@@ -317,8 +317,8 @@ static int or51132_set_parameters(struct dvb_frontend* fe,
/* Upload new firmware only if we need a different one */
if (modulation_fw_class(state->current_modulation) !=
- modulation_fw_class(param->u.vsb.modulation)) {
- switch(modulation_fw_class(param->u.vsb.modulation)) {
+ modulation_fw_class(p->modulation)) {
+ switch (modulation_fw_class(p->modulation)) {
case MOD_FWCLASS_VSB:
dprintk("set_parameters VSB MODE\n");
fwname = OR51132_VSB_FIRMWARE;
@@ -335,7 +335,7 @@ static int or51132_set_parameters(struct dvb_frontend* fe,
break;
default:
printk("or51132: Modulation type(%d) UNSUPPORTED\n",
- param->u.vsb.modulation);
+ p->modulation);
return -1;
}
printk("or51132: Waiting for firmware upload(%s)...\n",
@@ -357,13 +357,13 @@ static int or51132_set_parameters(struct dvb_frontend* fe,
state->config->set_ts_params(fe, clock_mode);
}
/* Change only if we are actually changing the modulation */
- if (state->current_modulation != param->u.vsb.modulation) {
- state->current_modulation = param->u.vsb.modulation;
+ if (state->current_modulation != p->modulation) {
+ state->current_modulation = p->modulation;
or51132_setmode(fe);
}
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -371,13 +371,13 @@ static int or51132_set_parameters(struct dvb_frontend* fe,
or51132_setmode(fe);
/* Update current frequency */
- state->current_frequency = param->frequency;
+ state->current_frequency = p->frequency;
return 0;
}
-static int or51132_get_parameters(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *param)
+static int or51132_get_parameters(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct or51132_state* state = fe->demodulator_priv;
int status;
int retry = 1;
@@ -389,21 +389,28 @@ start:
return -EREMOTEIO;
}
switch(status&0xff) {
- case 0x06: param->u.vsb.modulation = VSB_8; break;
- case 0x43: param->u.vsb.modulation = QAM_64; break;
- case 0x45: param->u.vsb.modulation = QAM_256; break;
- default:
- if (retry--) goto start;
- printk(KERN_WARNING "or51132: unknown status 0x%02x\n",
- status&0xff);
- return -EREMOTEIO;
+ case 0x06:
+ p->modulation = VSB_8;
+ break;
+ case 0x43:
+ p->modulation = QAM_64;
+ break;
+ case 0x45:
+ p->modulation = QAM_256;
+ break;
+ default:
+ if (retry--)
+ goto start;
+ printk(KERN_WARNING "or51132: unknown status 0x%02x\n",
+ status&0xff);
+ return -EREMOTEIO;
}
/* FIXME: Read frequency from frontend, take AFC into account */
- param->frequency = state->current_frequency;
+ p->frequency = state->current_frequency;
/* FIXME: How to read inversion setting? Receiver 6 register? */
- param->inversion = INVERSION_AUTO;
+ p->inversion = INVERSION_AUTO;
return 0;
}
@@ -579,10 +586,9 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config,
}
static struct dvb_frontend_ops or51132_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51132 VSB/QAM Frontend",
- .type = FE_ATSC,
.frequency_min = 44000000,
.frequency_max = 958000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index c709ce6771c8..c625b57b4333 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -218,15 +218,15 @@ static int or51211_setmode(struct dvb_frontend* fe, int mode)
return 0;
}
-static int or51211_set_parameters(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *param)
+static int or51211_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct or51211_state* state = fe->demodulator_priv;
/* Change only if we are actually changing the channel */
- if (state->current_frequency != param->frequency) {
+ if (state->current_frequency != p->frequency) {
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -234,7 +234,7 @@ static int or51211_set_parameters(struct dvb_frontend* fe,
or51211_setmode(fe,0);
/* Update current frequency */
- state->current_frequency = param->frequency;
+ state->current_frequency = p->frequency;
}
return 0;
}
@@ -544,10 +544,9 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
}
static struct dvb_frontend_ops or51211_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Oren OR51211 VSB Frontend",
- .type = FE_ATSC,
.frequency_min = 44000000,
.frequency_max = 958000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/s5h1409.c b/drivers/media/dvb/frontends/s5h1409.c
index 0e2f61a8978f..f71b06221e14 100644
--- a/drivers/media/dvb/frontends/s5h1409.c
+++ b/drivers/media/dvb/frontends/s5h1409.c
@@ -631,9 +631,9 @@ static void s5h1409_set_qam_interleave_mode_legacy(struct dvb_frontend *fe)
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int s5h1409_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s5h1409_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1409_state *state = fe->demodulator_priv;
dprintk("%s(frequency=%d)\n", __func__, p->frequency);
@@ -642,12 +642,12 @@ static int s5h1409_set_frontend(struct dvb_frontend *fe,
state->current_frequency = p->frequency;
- s5h1409_enable_modulation(fe, p->u.vsb.modulation);
+ s5h1409_enable_modulation(fe, p->modulation);
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -879,7 +879,36 @@ static int s5h1409_read_snr(struct dvb_frontend *fe, u16 *snr)
static int s5h1409_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
- return s5h1409_read_snr(fe, signal_strength);
+ /* borrowed from lgdt330x.c
+ *
+ * Calculate strength from SNR up to 35dB
+ * Even though the SNR can go higher than 35dB,
+ * there is some comfort factor in having a range of
+ * strong signals that can show at 100%
+ */
+ u16 snr;
+ u32 tmp;
+ int ret = s5h1409_read_snr(fe, &snr);
+
+ *signal_strength = 0;
+
+ if (0 == ret) {
+ /* The following calculation method was chosen
+ * purely for the sake of code re-use from the
+ * other demod drivers that use this method */
+
+ /* Convert from SNR in dB * 10 to 8.24 fixed-point */
+ tmp = (snr * ((1 << 24) / 10));
+
+ /* Convert from 8.24 fixed-point to
+ * scale the range 0 - 35*2^24 into 0 - 65535*/
+ if (tmp >= 8960 * 0x10000)
+ *signal_strength = 0xffff;
+ else
+ *signal_strength = tmp / 8960;
+ }
+
+ return ret;
}
static int s5h1409_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
@@ -896,13 +925,13 @@ static int s5h1409_read_ber(struct dvb_frontend *fe, u32 *ber)
return s5h1409_read_ucblocks(fe, ber);
}
-static int s5h1409_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s5h1409_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1409_state *state = fe->demodulator_priv;
p->frequency = state->current_frequency;
- p->u.vsb.modulation = state->current_modulation;
+ p->modulation = state->current_modulation;
return 0;
}
@@ -967,10 +996,9 @@ error:
EXPORT_SYMBOL(s5h1409_attach);
static struct dvb_frontend_ops s5h1409_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1409 QAM/8VSB Frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index d8adf1e32019..6cc4b7a9dd60 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -585,9 +585,9 @@ static int s5h1411_register_reset(struct dvb_frontend *fe)
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int s5h1411_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s5h1411_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1411_state *state = fe->demodulator_priv;
dprintk("%s(frequency=%d)\n", __func__, p->frequency);
@@ -596,13 +596,13 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
state->current_frequency = p->frequency;
- s5h1411_enable_modulation(fe, p->u.vsb.modulation);
+ s5h1411_enable_modulation(fe, p->modulation);
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
@@ -794,7 +794,36 @@ static int s5h1411_read_snr(struct dvb_frontend *fe, u16 *snr)
static int s5h1411_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
- return s5h1411_read_snr(fe, signal_strength);
+ /* borrowed from lgdt330x.c
+ *
+ * Calculate strength from SNR up to 35dB
+ * Even though the SNR can go higher than 35dB,
+ * there is some comfort factor in having a range of
+ * strong signals that can show at 100%
+ */
+ u16 snr;
+ u32 tmp;
+ int ret = s5h1411_read_snr(fe, &snr);
+
+ *signal_strength = 0;
+
+ if (0 == ret) {
+ /* The following calculation method was chosen
+ * purely for the sake of code re-use from the
+ * other demod drivers that use this method */
+
+ /* Convert from SNR in dB * 10 to 8.24 fixed-point */
+ tmp = (snr * ((1 << 24) / 10));
+
+ /* Convert from 8.24 fixed-point to
+ * scale the range 0 - 35*2^24 into 0 - 65535*/
+ if (tmp >= 8960 * 0x10000)
+ *signal_strength = 0xffff;
+ else
+ *signal_strength = tmp / 8960;
+ }
+
+ return ret;
}
static int s5h1411_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
@@ -811,13 +840,13 @@ static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber)
return s5h1411_read_ucblocks(fe, ber);
}
-static int s5h1411_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s5h1411_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1411_state *state = fe->demodulator_priv;
p->frequency = state->current_frequency;
- p->u.vsb.modulation = state->current_modulation;
+ p->modulation = state->current_modulation;
return 0;
}
@@ -886,10 +915,9 @@ error:
EXPORT_SYMBOL(s5h1411_attach);
static struct dvb_frontend_ops s5h1411_ops = {
-
+ .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Samsung S5H1411 QAM/8VSB Frontend",
- .type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 3879d2e378aa..2322257c69ae 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -472,15 +472,15 @@ static void s5h1420_reset(struct s5h1420_state* state)
}
static void s5h1420_setsymbolrate(struct s5h1420_state* state,
- struct dvb_frontend_parameters *p)
+ struct dtv_frontend_properties *p)
{
u8 v;
u64 val;
dprintk("enter %s\n", __func__);
- val = ((u64) p->u.qpsk.symbol_rate / 1000ULL) * (1ULL<<24);
- if (p->u.qpsk.symbol_rate < 29000000)
+ val = ((u64) p->symbol_rate / 1000ULL) * (1ULL<<24);
+ if (p->symbol_rate < 29000000)
val *= 2;
do_div(val, (state->fclk / 1000));
@@ -543,7 +543,7 @@ static int s5h1420_getfreqoffset(struct s5h1420_state* state)
}
static void s5h1420_setfec_inversion(struct s5h1420_state* state,
- struct dvb_frontend_parameters *p)
+ struct dtv_frontend_properties *p)
{
u8 inversion = 0;
u8 vit08, vit09;
@@ -555,11 +555,11 @@ static void s5h1420_setfec_inversion(struct s5h1420_state* state,
else if (p->inversion == INVERSION_ON)
inversion = state->config->invert ? 0 : 0x08;
- if ((p->u.qpsk.fec_inner == FEC_AUTO) || (p->inversion == INVERSION_AUTO)) {
+ if ((p->fec_inner == FEC_AUTO) || (p->inversion == INVERSION_AUTO)) {
vit08 = 0x3f;
vit09 = 0;
} else {
- switch(p->u.qpsk.fec_inner) {
+ switch (p->fec_inner) {
case FEC_1_2:
vit08 = 0x01; vit09 = 0x10;
break;
@@ -628,9 +628,9 @@ static fe_spectral_inversion_t s5h1420_getinversion(struct s5h1420_state* state)
return INVERSION_OFF;
}
-static int s5h1420_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *p)
+static int s5h1420_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1420_state* state = fe->demodulator_priv;
int frequency_delta;
struct dvb_frontend_tune_settings fesettings;
@@ -639,17 +639,16 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
dprintk("enter %s\n", __func__);
/* check if we should do a fast-tune */
- memcpy(&fesettings.parameters, p, sizeof(struct dvb_frontend_parameters));
s5h1420_get_tune_settings(fe, &fesettings);
frequency_delta = p->frequency - state->tunedfreq;
if ((frequency_delta > -fesettings.max_drift) &&
(frequency_delta < fesettings.max_drift) &&
(frequency_delta != 0) &&
- (state->fec_inner == p->u.qpsk.fec_inner) &&
- (state->symbol_rate == p->u.qpsk.symbol_rate)) {
+ (state->fec_inner == p->fec_inner) &&
+ (state->symbol_rate == p->symbol_rate)) {
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
if (fe->ops.tuner_ops.get_frequency) {
@@ -669,13 +668,13 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
s5h1420_reset(state);
/* set s5h1420 fclk PLL according to desired symbol rate */
- if (p->u.qpsk.symbol_rate > 33000000)
+ if (p->symbol_rate > 33000000)
state->fclk = 80000000;
- else if (p->u.qpsk.symbol_rate > 28500000)
+ else if (p->symbol_rate > 28500000)
state->fclk = 59000000;
- else if (p->u.qpsk.symbol_rate > 25000000)
+ else if (p->symbol_rate > 25000000)
state->fclk = 86000000;
- else if (p->u.qpsk.symbol_rate > 1900000)
+ else if (p->symbol_rate > 1900000)
state->fclk = 88000000;
else
state->fclk = 44000000;
@@ -705,7 +704,7 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
s5h1420_writereg(state, DiS01, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
/* TODO DC offset removal, config parameter ? */
- if (p->u.qpsk.symbol_rate > 29000000)
+ if (p->symbol_rate > 29000000)
s5h1420_writereg(state, QPSK01, 0xae | 0x10);
else
s5h1420_writereg(state, QPSK01, 0xac | 0x10);
@@ -718,15 +717,15 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
s5h1420_writereg(state, Loop01, 0xF0);
s5h1420_writereg(state, Loop02, 0x2a); /* e7 for s5h1420 */
s5h1420_writereg(state, Loop03, 0x79); /* 78 for s5h1420 */
- if (p->u.qpsk.symbol_rate > 20000000)
+ if (p->symbol_rate > 20000000)
s5h1420_writereg(state, Loop04, 0x79);
else
s5h1420_writereg(state, Loop04, 0x58);
s5h1420_writereg(state, Loop05, 0x6b);
- if (p->u.qpsk.symbol_rate >= 8000000)
+ if (p->symbol_rate >= 8000000)
s5h1420_writereg(state, Post01, (0 << 6) | 0x10);
- else if (p->u.qpsk.symbol_rate >= 4000000)
+ else if (p->symbol_rate >= 4000000)
s5h1420_writereg(state, Post01, (1 << 6) | 0x10);
else
s5h1420_writereg(state, Post01, (3 << 6) | 0x10);
@@ -744,7 +743,7 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
/* set tuner PLL */
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
s5h1420_setfreqoffset(state, 0);
@@ -757,8 +756,8 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
/* start QPSK */
s5h1420_writereg(state, QPSK01, s5h1420_readreg(state, QPSK01) | 1);
- state->fec_inner = p->u.qpsk.fec_inner;
- state->symbol_rate = p->u.qpsk.symbol_rate;
+ state->fec_inner = p->fec_inner;
+ state->symbol_rate = p->symbol_rate;
state->postlocked = 0;
state->tunedfreq = p->frequency;
@@ -766,15 +765,15 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
return 0;
}
-static int s5h1420_get_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *p)
+static int s5h1420_get_frontend(struct dvb_frontend* fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1420_state* state = fe->demodulator_priv;
p->frequency = state->tunedfreq + s5h1420_getfreqoffset(state);
p->inversion = s5h1420_getinversion(state);
- p->u.qpsk.symbol_rate = s5h1420_getsymbolrate(state);
- p->u.qpsk.fec_inner = s5h1420_getfec(state);
+ p->symbol_rate = s5h1420_getsymbolrate(state);
+ p->fec_inner = s5h1420_getfec(state);
return 0;
}
@@ -782,29 +781,30 @@ static int s5h1420_get_frontend(struct dvb_frontend* fe,
static int s5h1420_get_tune_settings(struct dvb_frontend* fe,
struct dvb_frontend_tune_settings* fesettings)
{
- if (fesettings->parameters.u.qpsk.symbol_rate > 20000000) {
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ if (p->symbol_rate > 20000000) {
fesettings->min_delay_ms = 50;
fesettings->step_size = 2000;
fesettings->max_drift = 8000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 12000000) {
+ } else if (p->symbol_rate > 12000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 1500;
fesettings->max_drift = 9000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 8000000) {
+ } else if (p->symbol_rate > 8000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 1000;
fesettings->max_drift = 8000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 4000000) {
+ } else if (p->symbol_rate > 4000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 500;
fesettings->max_drift = 7000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 2000000) {
+ } else if (p->symbol_rate > 2000000) {
fesettings->min_delay_ms = 200;
- fesettings->step_size = (fesettings->parameters.u.qpsk.symbol_rate / 8000);
+ fesettings->step_size = (p->symbol_rate / 8000);
fesettings->max_drift = 14 * fesettings->step_size;
} else {
fesettings->min_delay_ms = 200;
- fesettings->step_size = (fesettings->parameters.u.qpsk.symbol_rate / 8000);
+ fesettings->step_size = (p->symbol_rate / 8000);
fesettings->max_drift = 18 * fesettings->step_size;
}
@@ -937,10 +937,9 @@ error:
EXPORT_SYMBOL(s5h1420_attach);
static struct dvb_frontend_ops s5h1420_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/s5h1432.c b/drivers/media/dvb/frontends/s5h1432.c
index 0c6dcb90d168..8352ce1c9556 100644
--- a/drivers/media/dvb/frontends/s5h1432.c
+++ b/drivers/media/dvb/frontends/s5h1432.c
@@ -178,9 +178,9 @@ static int s5h1432_set_IF(struct dvb_frontend *fe, u32 ifFreqHz)
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
-static int s5h1432_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s5h1432_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
u32 dvb_bandwidth = 8;
struct s5h1432_state *state = fe->demodulator_priv;
@@ -188,26 +188,26 @@ static int s5h1432_set_frontend(struct dvb_frontend *fe,
/*current_frequency = p->frequency; */
/*state->current_frequency = p->frequency; */
} else {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
msleep(300);
s5h1432_set_channel_bandwidth(fe, dvb_bandwidth);
- switch (p->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (p->bandwidth_hz) {
+ case 6000000:
dvb_bandwidth = 6;
s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
dvb_bandwidth = 7;
s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
dvb_bandwidth = 8;
s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
break;
default:
return 0;
}
- /*fe->ops.tuner_ops.set_params(fe, p); */
+ /*fe->ops.tuner_ops.set_params(fe); */
/*Soft Reset chip*/
msleep(30);
s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
@@ -215,23 +215,23 @@ static int s5h1432_set_frontend(struct dvb_frontend *fe,
s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1b);
s5h1432_set_channel_bandwidth(fe, dvb_bandwidth);
- switch (p->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (p->bandwidth_hz) {
+ case 6000000:
dvb_bandwidth = 6;
s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
dvb_bandwidth = 7;
s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
dvb_bandwidth = 8;
s5h1432_set_IF(fe, IF_FREQ_4_MHZ);
break;
default:
return 0;
}
- /*fe->ops.tuner_ops.set_params(fe,p); */
+ /*fe->ops.tuner_ops.set_params(fe); */
/*Soft Reset chip*/
msleep(30);
s5h1432_writereg(state, S5H1432_I2C_TOP_ADDR, 0x09, 0x1a);
@@ -329,12 +329,6 @@ static int s5h1432_read_ber(struct dvb_frontend *fe, u32 *ber)
return 0;
}
-static int s5h1432_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
-{
- return 0;
-}
-
static int s5h1432_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *tune)
{
@@ -381,10 +375,9 @@ error:
EXPORT_SYMBOL(s5h1432_attach);
static struct dvb_frontend_ops s5h1432_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Samsung s5h1432 DVB-T Frontend",
- .type = FE_OFDM,
.frequency_min = 177000000,
.frequency_max = 858000000,
.frequency_stepsize = 166666,
@@ -397,7 +390,6 @@ static struct dvb_frontend_ops s5h1432_ops = {
.init = s5h1432_init,
.sleep = s5h1432_sleep,
.set_frontend = s5h1432_set_frontend,
- .get_frontend = s5h1432_get_frontend,
.get_tune_settings = s5h1432_get_tune_settings,
.read_status = s5h1432_read_status,
.read_ber = s5h1432_read_ber,
diff --git a/drivers/media/dvb/frontends/s921.c b/drivers/media/dvb/frontends/s921.c
index ca0103d5f148..cd2288c07147 100644
--- a/drivers/media/dvb/frontends/s921.c
+++ b/drivers/media/dvb/frontends/s921.c
@@ -262,9 +262,9 @@ static int s921_i2c_readreg(struct s921_state *state, u8 i2c_addr, u8 reg)
s921_i2c_writeregdata(state, state->config->demod_address, \
regdata, ARRAY_SIZE(regdata))
-static int s921_pll_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s921_pll_tune(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s921_state *state = fe->demodulator_priv;
int band, rc, i;
unsigned long f_offset;
@@ -414,9 +414,9 @@ static int s921_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
}
-static int s921_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s921_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s921_state *state = fe->demodulator_priv;
int rc;
@@ -424,7 +424,7 @@ static int s921_set_frontend(struct dvb_frontend *fe,
/* FIXME: We don't know how to use non-auto mode */
- rc = s921_pll_tune(fe, p);
+ rc = s921_pll_tune(fe);
if (rc < 0)
return rc;
@@ -433,19 +433,20 @@ static int s921_set_frontend(struct dvb_frontend *fe,
return 0;
}
-static int s921_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int s921_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s921_state *state = fe->demodulator_priv;
/* FIXME: Probably it is possible to get it from regs f1 and f2 */
p->frequency = state->currentfreq;
+ p->delivery_system = SYS_ISDBT;
return 0;
}
static int s921_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
+ bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
@@ -454,8 +455,8 @@ static int s921_tune(struct dvb_frontend *fe,
dprintk("\n");
- if (params != NULL)
- rc = s921_set_frontend(fe, params);
+ if (re_tune)
+ rc = s921_set_frontend(fe);
if (!(mode_flags & FE_TUNE_MODE_ONESHOT))
s921_read_status(fe, status);
@@ -510,10 +511,10 @@ rcor:
EXPORT_SYMBOL(s921_attach);
static struct dvb_frontend_ops s921_ops = {
+ .delsys = { SYS_ISDBT },
/* Use dib8000 values per default */
.info = {
.name = "Sharp S921",
- .type = FE_OFDM,
.frequency_min = 470000000,
/*
* Max should be 770MHz instead, according with Sharp docs,
diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c
index 4b0c99a08a85..a68a64800df7 100644
--- a/drivers/media/dvb/frontends/si21xx.c
+++ b/drivers/media/dvb/frontends/si21xx.c
@@ -690,20 +690,7 @@ static int si21xx_setacquire(struct dvb_frontend *fe, int symbrate,
return status;
}
-static int si21xx_set_property(struct dvb_frontend *fe, struct dtv_property *p)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
-static int si21xx_get_property(struct dvb_frontend *fe, struct dtv_property *p)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
-static int si21xx_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *dfp)
+static int si21xx_set_frontend(struct dvb_frontend *fe)
{
struct si21xx_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -877,10 +864,9 @@ static void si21xx_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops si21xx_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "SL SI21XX DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
@@ -908,8 +894,6 @@ static struct dvb_frontend_ops si21xx_ops = {
.set_tone = si21xx_set_tone,
.set_voltage = si21xx_set_voltage,
- .set_property = si21xx_set_property,
- .get_property = si21xx_get_property,
.set_frontend = si21xx_set_frontend,
};
diff --git a/drivers/media/dvb/frontends/sp8870.c b/drivers/media/dvb/frontends/sp8870.c
index b85eb60a893e..e37274c8f14e 100644
--- a/drivers/media/dvb/frontends/sp8870.c
+++ b/drivers/media/dvb/frontends/sp8870.c
@@ -168,13 +168,13 @@ static int sp8870_read_data_valid_signal(struct sp8870_state* state)
return (sp8870_readreg(state, 0x0D02) > 0);
}
-static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
+static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05)
{
int known_parameters = 1;
*reg0xc05 = 0x000;
- switch (p->u.ofdm.constellation) {
+ switch (p->modulation) {
case QPSK:
break;
case QAM_16:
@@ -190,7 +190,7 @@ static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
return -EINVAL;
};
- switch (p->u.ofdm.hierarchy_information) {
+ switch (p->hierarchy) {
case HIERARCHY_NONE:
break;
case HIERARCHY_1:
@@ -209,7 +209,7 @@ static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
return -EINVAL;
};
- switch (p->u.ofdm.code_rate_HP) {
+ switch (p->code_rate_HP) {
case FEC_1_2:
break;
case FEC_2_3:
@@ -245,9 +245,9 @@ static int sp8870_wake_up(struct sp8870_state* state)
return sp8870_writereg(state, 0xC18, 0x00D);
}
-static int sp8870_set_frontend_parameters (struct dvb_frontend* fe,
- struct dvb_frontend_parameters *p)
+static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct sp8870_state* state = fe->demodulator_priv;
int err;
u16 reg0xc05;
@@ -260,7 +260,7 @@ static int sp8870_set_frontend_parameters (struct dvb_frontend* fe,
// set tuner parameters
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -277,15 +277,15 @@ static int sp8870_set_frontend_parameters (struct dvb_frontend* fe,
sp8870_writereg(state, 0x030A, 0x0000);
// filter for 6/7/8 Mhz channel
- if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ)
+ if (p->bandwidth_hz == 6000000)
sp8870_writereg(state, 0x0311, 0x0002);
- else if (p->u.ofdm.bandwidth == BANDWIDTH_7_MHZ)
+ else if (p->bandwidth_hz == 7000000)
sp8870_writereg(state, 0x0311, 0x0001);
else
sp8870_writereg(state, 0x0311, 0x0000);
// scan order: 2k first = 0x0000, 8k first = 0x0001
- if (p->u.ofdm.transmission_mode == TRANSMISSION_MODE_2K)
+ if (p->transmission_mode == TRANSMISSION_MODE_2K)
sp8870_writereg(state, 0x0338, 0x0000);
else
sp8870_writereg(state, 0x0338, 0x0001);
@@ -459,8 +459,9 @@ static int lockups;
/* only for debugging: counter for channel switches */
static int switches;
-static int sp8870_set_frontend (struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int sp8870_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct sp8870_state* state = fe->demodulator_priv;
/*
@@ -479,7 +480,8 @@ static int sp8870_set_frontend (struct dvb_frontend* fe, struct dvb_frontend_par
for (trials = 1; trials <= MAXTRIALS; trials++) {
- if ((err = sp8870_set_frontend_parameters(fe, p)))
+ err = sp8870_set_frontend_parameters(fe);
+ if (err)
return err;
for (check_count = 0; check_count < MAXCHECKS; check_count++) {
@@ -579,10 +581,9 @@ error:
}
static struct dvb_frontend_ops sp8870_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Spase SP8870 DVB-T",
- .type = FE_OFDM,
.frequency_min = 470000000,
.frequency_max = 860000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/sp887x.c b/drivers/media/dvb/frontends/sp887x.c
index 4a7c3d842608..f4096ccb226e 100644
--- a/drivers/media/dvb/frontends/sp887x.c
+++ b/drivers/media/dvb/frontends/sp887x.c
@@ -209,13 +209,13 @@ static int sp887x_initial_setup (struct dvb_frontend* fe, const struct firmware
return 0;
};
-static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
+static int configure_reg0xc05(struct dtv_frontend_properties *p, u16 *reg0xc05)
{
int known_parameters = 1;
*reg0xc05 = 0x000;
- switch (p->u.ofdm.constellation) {
+ switch (p->modulation) {
case QPSK:
break;
case QAM_16:
@@ -231,7 +231,7 @@ static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
return -EINVAL;
};
- switch (p->u.ofdm.hierarchy_information) {
+ switch (p->hierarchy) {
case HIERARCHY_NONE:
break;
case HIERARCHY_1:
@@ -250,7 +250,7 @@ static int configure_reg0xc05 (struct dvb_frontend_parameters *p, u16 *reg0xc05)
return -EINVAL;
};
- switch (p->u.ofdm.code_rate_HP) {
+ switch (p->code_rate_HP) {
case FEC_1_2:
break;
case FEC_2_3:
@@ -303,17 +303,30 @@ static void divide (int n, int d, int *quotient_i, int *quotient_f)
}
static void sp887x_correct_offsets (struct sp887x_state* state,
- struct dvb_frontend_parameters *p,
+ struct dtv_frontend_properties *p,
int actual_freq)
{
static const u32 srate_correction [] = { 1879617, 4544878, 8098561 };
- int bw_index = p->u.ofdm.bandwidth - BANDWIDTH_8_MHZ;
+ int bw_index;
int freq_offset = actual_freq - p->frequency;
int sysclock = 61003; //[kHz]
int ifreq = 36000000;
int freq;
int frequency_shift;
+ switch (p->bandwidth_hz) {
+ default:
+ case 8000000:
+ bw_index = 0;
+ break;
+ case 7000000:
+ bw_index = 1;
+ break;
+ case 6000000:
+ bw_index = 2;
+ break;
+ }
+
if (p->inversion == INVERSION_ON)
freq = ifreq - freq_offset;
else
@@ -333,17 +346,17 @@ static void sp887x_correct_offsets (struct sp887x_state* state,
sp887x_writereg(state, 0x30a, frequency_shift & 0xfff);
}
-static int sp887x_setup_frontend_parameters (struct dvb_frontend* fe,
- struct dvb_frontend_parameters *p)
+static int sp887x_setup_frontend_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct sp887x_state* state = fe->demodulator_priv;
unsigned actual_freq;
int err;
u16 val, reg0xc05;
- if (p->u.ofdm.bandwidth != BANDWIDTH_8_MHZ &&
- p->u.ofdm.bandwidth != BANDWIDTH_7_MHZ &&
- p->u.ofdm.bandwidth != BANDWIDTH_6_MHZ)
+ if (p->bandwidth_hz != 8000000 &&
+ p->bandwidth_hz != 7000000 &&
+ p->bandwidth_hz != 6000000)
return -EINVAL;
if ((err = configure_reg0xc05(p, &reg0xc05)))
@@ -353,7 +366,7 @@ static int sp887x_setup_frontend_parameters (struct dvb_frontend* fe,
/* setup the PLL */
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
if (fe->ops.tuner_ops.get_frequency) {
@@ -369,9 +382,9 @@ static int sp887x_setup_frontend_parameters (struct dvb_frontend* fe,
sp887x_correct_offsets(state, p, actual_freq);
/* filter for 6/7/8 Mhz channel */
- if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ)
+ if (p->bandwidth_hz == 6000000)
val = 2;
- else if (p->u.ofdm.bandwidth == BANDWIDTH_7_MHZ)
+ else if (p->bandwidth_hz == 7000000)
val = 1;
else
val = 0;
@@ -379,16 +392,16 @@ static int sp887x_setup_frontend_parameters (struct dvb_frontend* fe,
sp887x_writereg(state, 0x311, val);
/* scan order: 2k first = 0, 8k first = 1 */
- if (p->u.ofdm.transmission_mode == TRANSMISSION_MODE_2K)
+ if (p->transmission_mode == TRANSMISSION_MODE_2K)
sp887x_writereg(state, 0x338, 0x000);
else
sp887x_writereg(state, 0x338, 0x001);
sp887x_writereg(state, 0xc05, reg0xc05);
- if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ)
+ if (p->bandwidth_hz == 6000000)
val = 2 << 3;
- else if (p->u.ofdm.bandwidth == BANDWIDTH_7_MHZ)
+ else if (p->bandwidth_hz == 7000000)
val = 3 << 3;
else
val = 0 << 3;
@@ -579,10 +592,9 @@ error:
}
static struct dvb_frontend_ops sp887x_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Spase SP887x DVB-T",
- .type = FE_OFDM,
.frequency_min = 50500000,
.frequency_max = 858000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
index 8408ef877b4b..38565beafe23 100644
--- a/drivers/media/dvb/frontends/stb0899_drv.c
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -1431,7 +1431,7 @@ static void stb0899_set_iterations(struct stb0899_state *state)
stb0899_write_s2reg(state, STB0899_S2FEC, STB0899_BASE_MAX_ITER, STB0899_OFF0_MAX_ITER, reg);
}
-static enum dvbfe_search stb0899_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static enum dvbfe_search stb0899_search(struct dvb_frontend *fe)
{
struct stb0899_state *state = fe->demodulator_priv;
struct stb0899_params *i_params = &state->params;
@@ -1441,8 +1441,8 @@ static enum dvbfe_search stb0899_search(struct dvb_frontend *fe, struct dvb_fron
u32 SearchRange, gain;
- i_params->freq = p->frequency;
- i_params->srate = p->u.qpsk.symbol_rate;
+ i_params->freq = props->frequency;
+ i_params->srate = props->symbol_rate;
state->delsys = props->delivery_system;
dprintk(state->verbose, FE_DEBUG, 1, "delivery system=%d", state->delsys);
@@ -1568,34 +1568,15 @@ static enum dvbfe_search stb0899_search(struct dvb_frontend *fe, struct dvb_fron
return DVBFE_ALGO_SEARCH_ERROR;
}
-/*
- * stb0899_track
- * periodically check the signal level against a specified
- * threshold level and perform derotator centering.
- * called once we have a lock from a successful search
- * event.
- *
- * Will be called periodically called to maintain the
- * lock.
- *
- * Will be used to get parameters as well as info from
- * the decoded baseband header
- *
- * Once a new lock has established, the internal state
- * frequency (internal->freq) is updated
- */
-static int stb0899_track(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
-{
- return 0;
-}
-static int stb0899_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int stb0899_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stb0899_state *state = fe->demodulator_priv;
struct stb0899_internal *internal = &state->internal;
dprintk(state->verbose, FE_DEBUG, 1, "Get params");
- p->u.qpsk.symbol_rate = internal->srate;
+ p->symbol_rate = internal->srate;
return 0;
}
@@ -1606,10 +1587,9 @@ static enum dvbfe_algo stb0899_frontend_algo(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops stb0899_ops = {
-
+ .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STB0899 Multistandard",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 0,
@@ -1632,8 +1612,7 @@ static struct dvb_frontend_ops stb0899_ops = {
.get_frontend_algo = stb0899_frontend_algo,
.search = stb0899_search,
- .track = stb0899_track,
- .get_frontend = stb0899_get_frontend,
+ .get_frontend = stb0899_get_frontend,
.read_status = stb0899_read_status,
diff --git a/drivers/media/dvb/frontends/stb6000.c b/drivers/media/dvb/frontends/stb6000.c
index ed699647050e..a0c3c526b132 100644
--- a/drivers/media/dvb/frontends/stb6000.c
+++ b/drivers/media/dvb/frontends/stb6000.c
@@ -75,9 +75,9 @@ static int stb6000_sleep(struct dvb_frontend *fe)
return (ret == 1) ? 0 : ret;
}
-static int stb6000_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int stb6000_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stb6000_priv *priv = fe->tuner_priv;
unsigned int n, m;
int ret;
@@ -93,8 +93,8 @@ static int stb6000_set_params(struct dvb_frontend *fe,
dprintk("%s:\n", __func__);
- freq_mhz = params->frequency / 1000;
- bandwidth = params->u.qpsk.symbol_rate / 1000000;
+ freq_mhz = p->frequency / 1000;
+ bandwidth = p->symbol_rate / 1000000;
if (bandwidth > 31)
bandwidth = 31;
diff --git a/drivers/media/dvb/frontends/stb6100.c b/drivers/media/dvb/frontends/stb6100.c
index bc1a8af4f6e1..def88abb30bf 100644
--- a/drivers/media/dvb/frontends/stb6100.c
+++ b/drivers/media/dvb/frontends/stb6100.c
@@ -327,7 +327,7 @@ static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency)
int rc;
const struct stb6100_lkup *ptr;
struct stb6100_state *state = fe->tuner_priv;
- struct dvb_frontend_parameters p;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
u32 srate = 0, fvco, nint, nfrac;
u8 regs[STB6100_NUMREGS];
@@ -337,9 +337,9 @@ static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency)
if (fe->ops.get_frontend) {
dprintk(verbose, FE_DEBUG, 1, "Get frontend parameters");
- fe->ops.get_frontend(fe, &p);
+ fe->ops.get_frontend(fe);
}
- srate = p.u.qpsk.symbol_rate;
+ srate = p->symbol_rate;
/* Set up tuner cleanly, LPF calibration on */
rc = stb6100_write_reg(state, STB6100_FCCK, 0x4d | STB6100_FCCK_FCCK);
diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c
index 0aa3962ff18b..fb5548a82208 100644
--- a/drivers/media/dvb/frontends/stv0288.c
+++ b/drivers/media/dvb/frontends/stv0288.c
@@ -452,14 +452,7 @@ static int stv0288_set_property(struct dvb_frontend *fe, struct dtv_property *p)
return 0;
}
-static int stv0288_get_property(struct dvb_frontend *fe, struct dtv_property *p)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
-static int stv0288_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *dfp)
+static int stv0288_set_frontend(struct dvb_frontend *fe)
{
struct stv0288_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -481,10 +474,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe,
state->config->set_ts_params(fe, 0);
/* only frequency & symbol_rate are used for tuner*/
- dfp->frequency = c->frequency;
- dfp->u.qpsk.symbol_rate = c->symbol_rate;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, dfp);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -545,10 +536,9 @@ static void stv0288_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops stv0288_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "ST STV0288 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1000, /* kHz for QPSK frontends */
@@ -578,7 +568,6 @@ static struct dvb_frontend_ops stv0288_ops = {
.set_voltage = stv0288_set_voltage,
.set_property = stv0288_set_property,
- .get_property = stv0288_get_property,
.set_frontend = stv0288_set_frontend,
};
diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c
index 84d88f33275e..85c157a1fe5e 100644
--- a/drivers/media/dvb/frontends/stv0297.c
+++ b/drivers/media/dvb/frontends/stv0297.c
@@ -404,8 +404,9 @@ static int stv0297_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
return 0;
}
-static int stv0297_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int stv0297_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0297_state *state = fe->demodulator_priv;
int u_threshold;
int initial_u;
@@ -417,7 +418,7 @@ static int stv0297_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
unsigned long timeout;
fe_spectral_inversion_t inversion;
- switch (p->u.qam.modulation) {
+ switch (p->modulation) {
case QAM_16:
case QAM_32:
case QAM_64:
@@ -455,7 +456,7 @@ static int stv0297_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
stv0297_init(fe);
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -519,16 +520,16 @@ static int stv0297_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
stv0297_writereg_mask(state, 0x69, 0x0f, 0x00);
/* set parameters */
- stv0297_set_qam(state, p->u.qam.modulation);
- stv0297_set_symbolrate(state, p->u.qam.symbol_rate / 1000);
- stv0297_set_sweeprate(state, sweeprate, p->u.qam.symbol_rate / 1000);
+ stv0297_set_qam(state, p->modulation);
+ stv0297_set_symbolrate(state, p->symbol_rate / 1000);
+ stv0297_set_sweeprate(state, sweeprate, p->symbol_rate / 1000);
stv0297_set_carrieroffset(state, carrieroffset);
stv0297_set_inversion(state, inversion);
/* kick off lock */
/* Disable corner detection for higher QAMs */
- if (p->u.qam.modulation == QAM_128 ||
- p->u.qam.modulation == QAM_256)
+ if (p->modulation == QAM_128 ||
+ p->modulation == QAM_256)
stv0297_writereg_mask(state, 0x88, 0x08, 0x00);
else
stv0297_writereg_mask(state, 0x88, 0x08, 0x08);
@@ -613,8 +614,9 @@ timeout:
return 0;
}
-static int stv0297_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int stv0297_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0297_state *state = fe->demodulator_priv;
int reg_00, reg_83;
@@ -625,24 +627,24 @@ static int stv0297_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
p->inversion = (reg_83 & 0x08) ? INVERSION_ON : INVERSION_OFF;
if (state->config->invert)
p->inversion = (p->inversion == INVERSION_ON) ? INVERSION_OFF : INVERSION_ON;
- p->u.qam.symbol_rate = stv0297_get_symbolrate(state) * 1000;
- p->u.qam.fec_inner = FEC_NONE;
+ p->symbol_rate = stv0297_get_symbolrate(state) * 1000;
+ p->fec_inner = FEC_NONE;
switch ((reg_00 >> 4) & 0x7) {
case 0:
- p->u.qam.modulation = QAM_16;
+ p->modulation = QAM_16;
break;
case 1:
- p->u.qam.modulation = QAM_32;
+ p->modulation = QAM_32;
break;
case 2:
- p->u.qam.modulation = QAM_128;
+ p->modulation = QAM_128;
break;
case 3:
- p->u.qam.modulation = QAM_256;
+ p->modulation = QAM_256;
break;
case 4:
- p->u.qam.modulation = QAM_64;
+ p->modulation = QAM_64;
break;
}
@@ -688,10 +690,9 @@ error:
}
static struct dvb_frontend_ops stv0297_ops = {
-
+ .delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0297 DVB-C",
- .type = FE_QAM,
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 42684bec8883..057b5f8effc0 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -559,8 +559,9 @@ static int stv0299_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int stv0299_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters * p)
+static int stv0299_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0299_state* state = fe->demodulator_priv;
int invval = 0;
@@ -579,24 +580,25 @@ static int stv0299_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
stv0299_writeregI(state, 0x0c, (stv0299_readreg(state, 0x0c) & 0xfe) | invval);
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- stv0299_set_FEC (state, p->u.qpsk.fec_inner);
- stv0299_set_symbolrate (fe, p->u.qpsk.symbol_rate);
+ stv0299_set_FEC(state, p->fec_inner);
+ stv0299_set_symbolrate(fe, p->symbol_rate);
stv0299_writeregI(state, 0x22, 0x00);
stv0299_writeregI(state, 0x23, 0x00);
state->tuner_frequency = p->frequency;
- state->fec_inner = p->u.qpsk.fec_inner;
- state->symbol_rate = p->u.qpsk.symbol_rate;
+ state->fec_inner = p->fec_inner;
+ state->symbol_rate = p->symbol_rate;
return 0;
}
-static int stv0299_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters * p)
+static int stv0299_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0299_state* state = fe->demodulator_priv;
s32 derot_freq;
int invval;
@@ -614,8 +616,8 @@ static int stv0299_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
if (state->config->invert) invval = (~invval) & 1;
p->inversion = invval ? INVERSION_ON : INVERSION_OFF;
- p->u.qpsk.fec_inner = stv0299_get_fec (state);
- p->u.qpsk.symbol_rate = stv0299_get_symbolrate (state);
+ p->fec_inner = stv0299_get_fec(state);
+ p->symbol_rate = stv0299_get_symbolrate(state);
return 0;
}
@@ -646,14 +648,15 @@ static int stv0299_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
static int stv0299_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
struct stv0299_state* state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
fesettings->min_delay_ms = state->config->min_delay_ms;
- if (fesettings->parameters.u.qpsk.symbol_rate < 10000000) {
- fesettings->step_size = fesettings->parameters.u.qpsk.symbol_rate / 32000;
+ if (p->symbol_rate < 10000000) {
+ fesettings->step_size = p->symbol_rate / 32000;
fesettings->max_drift = 5000;
} else {
- fesettings->step_size = fesettings->parameters.u.qpsk.symbol_rate / 16000;
- fesettings->max_drift = fesettings->parameters.u.qpsk.symbol_rate / 2000;
+ fesettings->step_size = p->symbol_rate / 16000;
+ fesettings->max_drift = p->symbol_rate / 2000;
}
return 0;
}
@@ -705,10 +708,9 @@ error:
}
static struct dvb_frontend_ops stv0299_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "ST STV0299 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/stv0367.c b/drivers/media/dvb/frontends/stv0367.c
index e57ab53e2e27..fdd20c7737b5 100644
--- a/drivers/media/dvb/frontends/stv0367.c
+++ b/drivers/media/dvb/frontends/stv0367.c
@@ -1577,9 +1577,9 @@ int stv0367ter_init(struct dvb_frontend *fe)
return 0;
}
-static int stv0367ter_algo(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int stv0367ter_algo(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
int offset = 0, tempo = 0;
@@ -1591,7 +1591,7 @@ static int stv0367ter_algo(struct dvb_frontend *fe,
dprintk("%s:\n", __func__);
- ter_state->frequency = param->frequency;
+ ter_state->frequency = p->frequency;
ter_state->force = FE_TER_FORCENONE
+ stv0367_readbits(state, F367TER_FORCE) * 2;
ter_state->if_iq_mode = state->config->if_iq_mode;
@@ -1620,7 +1620,7 @@ static int stv0367ter_algo(struct dvb_frontend *fe,
usleep_range(5000, 7000);
- switch (param->inversion) {
+ switch (p->inversion) {
case INVERSION_AUTO:
default:
dprintk("%s: inversion AUTO\n", __func__);
@@ -1636,10 +1636,10 @@ static int stv0367ter_algo(struct dvb_frontend *fe,
case INVERSION_OFF:
if (ter_state->if_iq_mode == FE_TER_IQ_TUNER)
stv0367_writebits(state, F367TER_IQ_INVERT,
- param->inversion);
+ p->inversion);
else
stv0367_writebits(state, F367TER_INV_SPECTR,
- param->inversion);
+ p->inversion);
break;
}
@@ -1806,10 +1806,9 @@ static int stv0367ter_algo(struct dvb_frontend *fe,
return 0;
}
-static int stv0367ter_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int stv0367ter_set_frontend(struct dvb_frontend *fe)
{
- struct dvb_ofdm_parameters *op = &param->u.ofdm;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
@@ -1822,12 +1821,12 @@ static int stv0367ter_set_frontend(struct dvb_frontend *fe,
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
- switch (op->transmission_mode) {
+ switch (p->transmission_mode) {
default:
case TRANSMISSION_MODE_AUTO:
case TRANSMISSION_MODE_2K:
@@ -1841,34 +1840,34 @@ static int stv0367ter_set_frontend(struct dvb_frontend *fe,
break;
}
- switch (op->guard_interval) {
+ switch (p->guard_interval) {
default:
case GUARD_INTERVAL_1_32:
case GUARD_INTERVAL_1_16:
case GUARD_INTERVAL_1_8:
case GUARD_INTERVAL_1_4:
- ter_state->guard = op->guard_interval;
+ ter_state->guard = p->guard_interval;
break;
case GUARD_INTERVAL_AUTO:
ter_state->guard = GUARD_INTERVAL_1_32;
break;
}
- switch (op->bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (p->bandwidth_hz) {
+ case 6000000:
ter_state->bw = FE_TER_CHAN_BW_6M;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
ter_state->bw = FE_TER_CHAN_BW_7M;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
default:
ter_state->bw = FE_TER_CHAN_BW_8M;
}
ter_state->hierarchy = FE_TER_HIER_NONE;
- switch (param->inversion) {
+ switch (p->inversion) {
case INVERSION_OFF:
case INVERSION_ON:
num_trials = 1;
@@ -1885,14 +1884,14 @@ static int stv0367ter_set_frontend(struct dvb_frontend *fe,
while (((index) < num_trials) && (ter_state->state != FE_TER_LOCKOK)) {
if (!ter_state->first_lock) {
- if (param->inversion == INVERSION_AUTO)
+ if (p->inversion == INVERSION_AUTO)
ter_state->sense = SenseTrials[index];
}
- stv0367ter_algo(fe,/* &pLook, result,*/ param);
+ stv0367ter_algo(fe);
if ((ter_state->state == FE_TER_LOCKOK) &&
- (param->inversion == INVERSION_AUTO) &&
+ (p->inversion == INVERSION_AUTO) &&
(index == 1)) {
/* invert spectrum sense */
SenseTrials[index] = SenseTrials[0];
@@ -1927,50 +1926,48 @@ static int stv0367ter_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int stv0367ter_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int stv0367ter_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
- struct dvb_ofdm_parameters *op = &param->u.ofdm;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int error = 0;
enum stv0367_ter_mode mode;
int constell = 0,/* snr = 0,*/ Data = 0;
- param->frequency = stv0367_get_tuner_freq(fe);
- if ((int)param->frequency < 0)
- param->frequency = c->frequency;
+ p->frequency = stv0367_get_tuner_freq(fe);
+ if ((int)p->frequency < 0)
+ p->frequency = -p->frequency;
constell = stv0367_readbits(state, F367TER_TPS_CONST);
if (constell == 0)
- op->constellation = QPSK;
+ p->modulation = QPSK;
else if (constell == 1)
- op->constellation = QAM_16;
+ p->modulation = QAM_16;
else
- op->constellation = QAM_64;
+ p->modulation = QAM_64;
- param->inversion = stv0367_readbits(state, F367TER_INV_SPECTR);
+ p->inversion = stv0367_readbits(state, F367TER_INV_SPECTR);
/* Get the Hierarchical mode */
Data = stv0367_readbits(state, F367TER_TPS_HIERMODE);
switch (Data) {
case 0:
- op->hierarchy_information = HIERARCHY_NONE;
+ p->hierarchy = HIERARCHY_NONE;
break;
case 1:
- op->hierarchy_information = HIERARCHY_1;
+ p->hierarchy = HIERARCHY_1;
break;
case 2:
- op->hierarchy_information = HIERARCHY_2;
+ p->hierarchy = HIERARCHY_2;
break;
case 3:
- op->hierarchy_information = HIERARCHY_4;
+ p->hierarchy = HIERARCHY_4;
break;
default:
- op->hierarchy_information = HIERARCHY_AUTO;
+ p->hierarchy = HIERARCHY_AUTO;
break; /* error */
}
@@ -1982,22 +1979,22 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe,
switch (Data) {
case 0:
- op->code_rate_HP = FEC_1_2;
+ p->code_rate_HP = FEC_1_2;
break;
case 1:
- op->code_rate_HP = FEC_2_3;
+ p->code_rate_HP = FEC_2_3;
break;
case 2:
- op->code_rate_HP = FEC_3_4;
+ p->code_rate_HP = FEC_3_4;
break;
case 3:
- op->code_rate_HP = FEC_5_6;
+ p->code_rate_HP = FEC_5_6;
break;
case 4:
- op->code_rate_HP = FEC_7_8;
+ p->code_rate_HP = FEC_7_8;
break;
default:
- op->code_rate_HP = FEC_AUTO;
+ p->code_rate_HP = FEC_AUTO;
break; /* error */
}
@@ -2005,19 +2002,19 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe,
switch (mode) {
case FE_TER_MODE_2K:
- op->transmission_mode = TRANSMISSION_MODE_2K;
+ p->transmission_mode = TRANSMISSION_MODE_2K;
break;
/* case FE_TER_MODE_4K:
- op->transmission_mode = TRANSMISSION_MODE_4K;
+ p->transmission_mode = TRANSMISSION_MODE_4K;
break;*/
case FE_TER_MODE_8K:
- op->transmission_mode = TRANSMISSION_MODE_8K;
+ p->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
- op->transmission_mode = TRANSMISSION_MODE_AUTO;
+ p->transmission_mode = TRANSMISSION_MODE_AUTO;
}
- op->guard_interval = stv0367_readbits(state, F367TER_SYR_GUARD);
+ p->guard_interval = stv0367_readbits(state, F367TER_SYR_GUARD);
return error;
}
@@ -2265,9 +2262,9 @@ static void stv0367_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops stv0367ter_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "ST STV0367 DVB-T",
- .type = FE_OFDM,
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_stepsize = 15625,
@@ -2822,9 +2819,8 @@ int stv0367cab_init(struct dvb_frontend *fe)
}
static
enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
- struct dvb_frontend_parameters *param)
+ struct dtv_frontend_properties *p)
{
- struct dvb_qam_parameters *op = &param->u.qam;
struct stv0367cab_state *cab_state = state->cab_state;
enum stv0367_cab_signal_type signalType = FE_CAB_NOAGC;
u32 QAMFEC_Lock, QAM_Lock, u32_tmp,
@@ -2839,7 +2835,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
/* A max lock time of 25 ms is allowed for delayed AGC */
AGCTimeOut = 25;
/* 100000 symbols needed by the TRL as a maximum value */
- TRLTimeOut = 100000000 / op->symbol_rate;
+ TRLTimeOut = 100000000 / p->symbol_rate;
/* CRLSymbols is the needed number of symbols to achieve a lock
within [-4%, +4%] of the symbol rate.
CRL timeout is calculated
@@ -2849,7 +2845,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
A characterization must be performed
with these echoes to get new timeout values.
*/
- switch (op->modulation) {
+ switch (p->modulation) {
case QAM_16:
CRLSymbols = 150000;
EQLTimeOut = 100;
@@ -2883,9 +2879,9 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
} else
#endif
CRLTimeOut = (25 * CRLSymbols * (cab_state->search_range / 1000)) /
- (op->symbol_rate / 1000);
+ (p->symbol_rate / 1000);
- CRLTimeOut = (1000 * CRLTimeOut) / op->symbol_rate;
+ CRLTimeOut = (1000 * CRLTimeOut) / p->symbol_rate;
/* Timeouts below 50ms are coerced */
if (CRLTimeOut < 50)
CRLTimeOut = 50;
@@ -2915,7 +2911,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
stv0367cab_set_derot_freq(state, cab_state->adc_clk,
(1000 * (s32)state->config->if_khz + cab_state->derot_offset));
/* Disable the Allpass Filter when the symbol rate is out of range */
- if ((op->symbol_rate > 10800000) | (op->symbol_rate < 1800000)) {
+ if ((p->symbol_rate > 10800000) | (p->symbol_rate < 1800000)) {
stv0367_writebits(state, F367CAB_ADJ_EN, 0);
stv0367_writebits(state, F367CAB_ALLPASSFILT_EN, 0);
}
@@ -2999,7 +2995,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
if (QAMFEC_Lock) {
signalType = FE_CAB_DATAOK;
- cab_state->modulation = op->modulation;
+ cab_state->modulation = p->modulation;
cab_state->spect_inv = stv0367_readbits(state,
F367CAB_QUAD_INV);
#if 0
@@ -3081,20 +3077,19 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
return signalType;
}
-static int stv0367cab_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int stv0367cab_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
- struct dvb_qam_parameters *op = &param->u.qam;
enum stv0367cab_mod QAMSize = 0;
dprintk("%s: freq = %d, srate = %d\n", __func__,
- param->frequency, op->symbol_rate);
+ p->frequency, p->symbol_rate);
cab_state->derot_offset = 0;
- switch (op->modulation) {
+ switch (p->modulation) {
case QAM_16:
QAMSize = FE_CAB_MOD_QAM16;
break;
@@ -3120,77 +3115,76 @@ static int stv0367cab_set_frontend(struct dvb_frontend *fe,
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
stv0367cab_SetQamSize(
state,
- op->symbol_rate,
+ p->symbol_rate,
QAMSize);
stv0367cab_set_srate(state,
cab_state->adc_clk,
cab_state->mclk,
- op->symbol_rate,
+ p->symbol_rate,
QAMSize);
/* Search algorithm launch, [-1.1*RangeOffset, +1.1*RangeOffset] scan */
- cab_state->state = stv0367cab_algo(state, param);
+ cab_state->state = stv0367cab_algo(state, p);
return 0;
}
-static int stv0367cab_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int stv0367cab_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
- struct dvb_qam_parameters *op = &param->u.qam;
enum stv0367cab_mod QAMSize;
dprintk("%s:\n", __func__);
- op->symbol_rate = stv0367cab_GetSymbolRate(state, cab_state->mclk);
+ p->symbol_rate = stv0367cab_GetSymbolRate(state, cab_state->mclk);
QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
switch (QAMSize) {
case FE_CAB_MOD_QAM16:
- op->modulation = QAM_16;
+ p->modulation = QAM_16;
break;
case FE_CAB_MOD_QAM32:
- op->modulation = QAM_32;
+ p->modulation = QAM_32;
break;
case FE_CAB_MOD_QAM64:
- op->modulation = QAM_64;
+ p->modulation = QAM_64;
break;
case FE_CAB_MOD_QAM128:
- op->modulation = QAM_128;
+ p->modulation = QAM_128;
break;
case QAM_256:
- op->modulation = QAM_256;
+ p->modulation = QAM_256;
break;
default:
break;
}
- param->frequency = stv0367_get_tuner_freq(fe);
+ p->frequency = stv0367_get_tuner_freq(fe);
- dprintk("%s: tuner frequency = %d\n", __func__, param->frequency);
+ dprintk("%s: tuner frequency = %d\n", __func__, p->frequency);
if (state->config->if_khz == 0) {
- param->frequency +=
+ p->frequency +=
(stv0367cab_get_derot_freq(state, cab_state->adc_clk) -
cab_state->adc_clk / 4000);
return 0;
}
if (state->config->if_khz > cab_state->adc_clk / 1000)
- param->frequency += (state->config->if_khz
+ p->frequency += (state->config->if_khz
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
- cab_state->adc_clk / 1000);
else
- param->frequency += (state->config->if_khz
+ p->frequency += (state->config->if_khz
- stv0367cab_get_derot_freq(state, cab_state->adc_clk));
return 0;
@@ -3386,9 +3380,9 @@ static int stv0367cab_read_ucblcks(struct dvb_frontend *fe, u32 *ucblocks)
};
static struct dvb_frontend_ops stv0367cab_ops = {
+ .delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0367 DVB-C",
- .type = FE_QAM,
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_stepsize = 62500,
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 0ca316d6fffa..7f1badaf0d03 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -973,22 +973,6 @@ static enum dvbfe_algo stv0900_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_CUSTOM;
}
-static int stb0900_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- dprintk("%s(..)\n", __func__);
-
- return 0;
-}
-
-static int stb0900_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- dprintk("%s(..)\n", __func__);
-
- return 0;
-}
-
void stv0900_start_search(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod)
{
@@ -1574,8 +1558,7 @@ static int stv0900_status(struct stv0900_internal *intp,
return locked;
}
-static enum dvbfe_search stv0900_search(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static enum dvbfe_search stv0900_search(struct dvb_frontend *fe)
{
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
@@ -1675,12 +1658,6 @@ static int stv0900_read_status(struct dvb_frontend *fe, enum fe_status *status)
return 0;
}
-static int stv0900_track(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
-{
- return 0;
-}
-
static int stv0900_stop_ts(struct dvb_frontend *fe, int stop_ts)
{
@@ -1866,24 +1843,23 @@ static int stv0900_sleep(struct dvb_frontend *fe)
return 0;
}
-static int stv0900_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int stv0900_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
struct stv0900_signal_info p_result = intp->result[demod];
p->frequency = p_result.locked ? p_result.frequency : 0;
- p->u.qpsk.symbol_rate = p_result.locked ? p_result.symbol_rate : 0;
+ p->symbol_rate = p_result.locked ? p_result.symbol_rate : 0;
return 0;
}
static struct dvb_frontend_ops stv0900_ops = {
-
+ .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV0900 frontend",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125,
@@ -1907,10 +1883,7 @@ static struct dvb_frontend_ops stv0900_ops = {
.diseqc_send_burst = stv0900_send_burst,
.diseqc_recv_slave_reply = stv0900_recv_slave_reply,
.set_tone = stv0900_set_tone,
- .set_property = stb0900_set_property,
- .get_property = stb0900_get_property,
.search = stv0900_search,
- .track = stv0900_track,
.read_status = stv0900_read_status,
.read_ber = stv0900_read_ber,
.read_signal_strength = stv0900_read_signal_strength,
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index ebda41936b90..4aef1877ed42 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -3427,17 +3427,17 @@ err:
return -1;
}
-static enum dvbfe_search stv090x_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static enum dvbfe_search stv090x_search(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *props = &fe->dtv_property_cache;
- if (p->frequency == 0)
+ if (props->frequency == 0)
return DVBFE_ALGO_SEARCH_INVALID;
state->delsys = props->delivery_system;
- state->frequency = p->frequency;
- state->srate = p->u.qpsk.symbol_rate;
+ state->frequency = props->frequency;
+ state->srate = props->symbol_rate;
state->search_mode = STV090x_SEARCH_AUTO;
state->algo = STV090x_COLD_SEARCH;
state->fec = STV090x_PRERR;
@@ -4712,10 +4712,9 @@ int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir, u8 value,
EXPORT_SYMBOL(stv090x_set_gpio);
static struct dvb_frontend_ops stv090x_ops = {
-
+ .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "STV090x Multistandard",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 0,
@@ -4743,7 +4742,7 @@ static struct dvb_frontend_ops stv090x_ops = {
.read_status = stv090x_read_status,
.read_ber = stv090x_read_per,
.read_signal_strength = stv090x_read_signal_strength,
- .read_snr = stv090x_read_cnr
+ .read_snr = stv090x_read_cnr,
};
diff --git a/drivers/media/dvb/frontends/stv6110.c b/drivers/media/dvb/frontends/stv6110.c
index 2dca7c8e5148..20b5fa92c53e 100644
--- a/drivers/media/dvb/frontends/stv6110.c
+++ b/drivers/media/dvb/frontends/stv6110.c
@@ -347,8 +347,7 @@ static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
return 0;
}
-static int stv6110_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int stv6110_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 bandwidth = carrier_width(c->symbol_rate, c->rolloff);
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 6ca533ea0f0e..1bff7f457e19 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -224,47 +224,86 @@ static int tda10021_init (struct dvb_frontend *fe)
return 0;
}
-static int tda10021_set_parameters (struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+struct qam_params {
+ u8 conf, agcref, lthr, mseth, aref;
+};
+
+static int tda10021_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ unsigned qam = c->modulation;
+ bool is_annex_c;
+ u32 reg0x3d;
struct tda10021_state* state = fe->demodulator_priv;
+ static const struct qam_params qam_params[] = {
+ /* Modulation Conf AGCref LTHR MSETH AREF */
+ [QPSK] = { 0x14, 0x78, 0x78, 0x8c, 0x96 },
+ [QAM_16] = { 0x00, 0x8c, 0x87, 0xa2, 0x91 },
+ [QAM_32] = { 0x04, 0x8c, 0x64, 0x74, 0x96 },
+ [QAM_64] = { 0x08, 0x6a, 0x46, 0x43, 0x6a },
+ [QAM_128] = { 0x0c, 0x78, 0x36, 0x34, 0x7e },
+ [QAM_256] = { 0x10, 0x5c, 0x26, 0x23, 0x6b },
+ };
+
+ switch (delsys) {
+ case SYS_DVBC_ANNEX_A:
+ is_annex_c = false;
+ break;
+ case SYS_DVBC_ANNEX_C:
+ is_annex_c = true;
+ break;
+ default:
+ return -EINVAL;
+ }
- //table for QAM4-QAM256 ready QAM4 QAM16 QAM32 QAM64 QAM128 QAM256
- //CONF
- static const u8 reg0x00 [] = { 0x14, 0x00, 0x04, 0x08, 0x0c, 0x10 };
- //AGCREF value
- static const u8 reg0x01 [] = { 0x78, 0x8c, 0x8c, 0x6a, 0x78, 0x5c };
- //LTHR value
- static const u8 reg0x05 [] = { 0x78, 0x87, 0x64, 0x46, 0x36, 0x26 };
- //MSETH
- static const u8 reg0x08 [] = { 0x8c, 0xa2, 0x74, 0x43, 0x34, 0x23 };
- //AREF
- static const u8 reg0x09 [] = { 0x96, 0x91, 0x96, 0x6a, 0x7e, 0x6b };
-
- int qam = p->u.qam.modulation;
-
- if (qam < 0 || qam > 5)
+ /*
+ * gcc optimizes the code bellow the same way as it would code:
+ * "if (qam > 5) return -EINVAL;"
+ * Yet, the code is clearer, as it shows what QAM standards are
+ * supported by the driver, and avoids the usage of magic numbers on
+ * it.
+ */
+ switch (qam) {
+ case QPSK:
+ case QAM_16:
+ case QAM_32:
+ case QAM_64:
+ case QAM_128:
+ case QAM_256:
+ break;
+ default:
return -EINVAL;
+ }
- if (p->inversion != INVERSION_ON && p->inversion != INVERSION_OFF)
+ if (c->inversion != INVERSION_ON && c->inversion != INVERSION_OFF)
return -EINVAL;
- //printk("tda10021: set frequency to %d qam=%d symrate=%d\n", p->frequency,qam,p->u.qam.symbol_rate);
+ /*printk("tda10021: set frequency to %d qam=%d symrate=%d\n", p->frequency,qam,p->symbol_rate);*/
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- tda10021_set_symbolrate (state, p->u.qam.symbol_rate);
- _tda10021_writereg (state, 0x34, state->pwm);
-
- _tda10021_writereg (state, 0x01, reg0x01[qam]);
- _tda10021_writereg (state, 0x05, reg0x05[qam]);
- _tda10021_writereg (state, 0x08, reg0x08[qam]);
- _tda10021_writereg (state, 0x09, reg0x09[qam]);
-
- tda10021_setup_reg0 (state, reg0x00[qam], p->inversion);
+ tda10021_set_symbolrate(state, c->symbol_rate);
+ _tda10021_writereg(state, 0x34, state->pwm);
+
+ _tda10021_writereg(state, 0x01, qam_params[qam].agcref);
+ _tda10021_writereg(state, 0x05, qam_params[qam].lthr);
+ _tda10021_writereg(state, 0x08, qam_params[qam].mseth);
+ _tda10021_writereg(state, 0x09, qam_params[qam].aref);
+
+ /*
+ * Bit 0 == 0 means roll-off = 0.15 (Annex A)
+ * == 1 means roll-off = 0.13 (Annex C)
+ */
+ reg0x3d = tda10021_readreg (state, 0x3d);
+ if (is_annex_c)
+ _tda10021_writereg (state, 0x3d, 0x01 | reg0x3d);
+ else
+ _tda10021_writereg (state, 0x3d, 0xfe & reg0x3d);
+ tda10021_setup_reg0(state, qam_params[qam].conf, c->inversion);
return 0;
}
@@ -347,8 +386,9 @@ static int tda10021_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int tda10021_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int tda10021_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10021_state* state = fe->demodulator_priv;
int sync;
s8 afc = 0;
@@ -360,17 +400,17 @@ static int tda10021_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_pa
printk(sync & 2 ? "DVB: TDA10021(%d): AFC (%d) %dHz\n" :
"DVB: TDA10021(%d): [AFC (%d) %dHz]\n",
state->frontend.dvb->num, afc,
- -((s32)p->u.qam.symbol_rate * afc) >> 10);
+ -((s32)p->symbol_rate * afc) >> 10);
}
p->inversion = ((state->reg0 & 0x20) == 0x20) ^ (state->config->invert != 0) ? INVERSION_ON : INVERSION_OFF;
- p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16;
+ p->modulation = ((state->reg0 >> 2) & 7) + QAM_16;
- p->u.qam.fec_inner = FEC_NONE;
+ p->fec_inner = FEC_NONE;
p->frequency = ((p->frequency + 31250) / 62500) * 62500;
if (sync & 2)
- p->frequency -= ((s32)p->u.qam.symbol_rate * afc) >> 10;
+ p->frequency -= ((s32)p->symbol_rate * afc) >> 10;
return 0;
}
@@ -444,10 +484,9 @@ error:
}
static struct dvb_frontend_ops tda10021_ops = {
-
+ .delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10021 DVB-C",
- .type = FE_QAM,
.frequency_stepsize = 62500,
.frequency_min = 47000000,
.frequency_max = 862000000,
diff --git a/drivers/media/dvb/frontends/tda10023.c b/drivers/media/dvb/frontends/tda10023.c
index a3c34eecdee9..ca1e0d54b69a 100644
--- a/drivers/media/dvb/frontends/tda10023.c
+++ b/drivers/media/dvb/frontends/tda10023.c
@@ -298,42 +298,80 @@ static int tda10023_init (struct dvb_frontend *fe)
return 0;
}
-static int tda10023_set_parameters (struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+struct qam_params {
+ u8 qam, lockthr, mseth, aref, agcrefnyq, eragnyq_thd;
+};
+
+static int tda10023_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ u32 delsys = c->delivery_system;
+ unsigned qam = c->modulation;
+ bool is_annex_c;
struct tda10023_state* state = fe->demodulator_priv;
-
- static int qamvals[6][6] = {
- // QAM LOCKTHR MSETH AREF AGCREFNYQ ERAGCNYQ_THD
- { (5<<2), 0x78, 0x8c, 0x96, 0x78, 0x4c }, // 4 QAM
- { (0<<2), 0x87, 0xa2, 0x91, 0x8c, 0x57 }, // 16 QAM
- { (1<<2), 0x64, 0x74, 0x96, 0x8c, 0x57 }, // 32 QAM
- { (2<<2), 0x46, 0x43, 0x6a, 0x6a, 0x44 }, // 64 QAM
- { (3<<2), 0x36, 0x34, 0x7e, 0x78, 0x4c }, // 128 QAM
- { (4<<2), 0x26, 0x23, 0x6c, 0x5c, 0x3c }, // 256 QAM
+ static const struct qam_params qam_params[] = {
+ /* Modulation QAM LOCKTHR MSETH AREF AGCREFNYQ ERAGCNYQ_THD */
+ [QPSK] = { (5<<2), 0x78, 0x8c, 0x96, 0x78, 0x4c },
+ [QAM_16] = { (0<<2), 0x87, 0xa2, 0x91, 0x8c, 0x57 },
+ [QAM_32] = { (1<<2), 0x64, 0x74, 0x96, 0x8c, 0x57 },
+ [QAM_64] = { (2<<2), 0x46, 0x43, 0x6a, 0x6a, 0x44 },
+ [QAM_128] = { (3<<2), 0x36, 0x34, 0x7e, 0x78, 0x4c },
+ [QAM_256] = { (4<<2), 0x26, 0x23, 0x6c, 0x5c, 0x3c },
};
- int qam = p->u.qam.modulation;
+ switch (delsys) {
+ case SYS_DVBC_ANNEX_A:
+ is_annex_c = false;
+ break;
+ case SYS_DVBC_ANNEX_C:
+ is_annex_c = true;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (qam < 0 || qam > 5)
+ /*
+ * gcc optimizes the code bellow the same way as it would code:
+ * "if (qam > 5) return -EINVAL;"
+ * Yet, the code is clearer, as it shows what QAM standards are
+ * supported by the driver, and avoids the usage of magic numbers on
+ * it.
+ */
+ switch (qam) {
+ case QPSK:
+ case QAM_16:
+ case QAM_32:
+ case QAM_64:
+ case QAM_128:
+ case QAM_256:
+ break;
+ default:
return -EINVAL;
+ }
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- tda10023_set_symbolrate (state, p->u.qam.symbol_rate);
- tda10023_writereg (state, 0x05, qamvals[qam][1]);
- tda10023_writereg (state, 0x08, qamvals[qam][2]);
- tda10023_writereg (state, 0x09, qamvals[qam][3]);
- tda10023_writereg (state, 0xb4, qamvals[qam][4]);
- tda10023_writereg (state, 0xb6, qamvals[qam][5]);
-
-// tda10023_writereg (state, 0x04, (p->inversion?0x12:0x32));
-// tda10023_writebit (state, 0x04, 0x60, (p->inversion?0:0x20));
- tda10023_writebit (state, 0x04, 0x40, 0x40);
- tda10023_setup_reg0 (state, qamvals[qam][0]);
+ tda10023_set_symbolrate(state, c->symbol_rate);
+ tda10023_writereg(state, 0x05, qam_params[qam].lockthr);
+ tda10023_writereg(state, 0x08, qam_params[qam].mseth);
+ tda10023_writereg(state, 0x09, qam_params[qam].aref);
+ tda10023_writereg(state, 0xb4, qam_params[qam].agcrefnyq);
+ tda10023_writereg(state, 0xb6, qam_params[qam].eragnyq_thd);
+#if 0
+ tda10023_writereg(state, 0x04, (c->inversion ? 0x12 : 0x32));
+ tda10023_writebit(state, 0x04, 0x60, (c->inversion ? 0 : 0x20));
+#endif
+ tda10023_writebit(state, 0x04, 0x40, 0x40);
+
+ if (is_annex_c)
+ tda10023_writebit(state, 0x3d, 0xfc, 0x03);
+ else
+ tda10023_writebit(state, 0x3d, 0xfc, 0x02);
+
+ tda10023_setup_reg0(state, qam_params[qam].qam);
return 0;
}
@@ -418,8 +456,9 @@ static int tda10023_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int tda10023_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int tda10023_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10023_state* state = fe->demodulator_priv;
int sync,inv;
s8 afc = 0;
@@ -433,17 +472,17 @@ static int tda10023_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_pa
printk(sync & 2 ? "DVB: TDA10023(%d): AFC (%d) %dHz\n" :
"DVB: TDA10023(%d): [AFC (%d) %dHz]\n",
state->frontend.dvb->num, afc,
- -((s32)p->u.qam.symbol_rate * afc) >> 10);
+ -((s32)p->symbol_rate * afc) >> 10);
}
p->inversion = (inv&0x20?0:1);
- p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16;
+ p->modulation = ((state->reg0 >> 2) & 7) + QAM_16;
- p->u.qam.fec_inner = FEC_NONE;
+ p->fec_inner = FEC_NONE;
p->frequency = ((p->frequency + 31250) / 62500) * 62500;
if (sync & 2)
- p->frequency -= ((s32)p->u.qam.symbol_rate * afc) >> 10;
+ p->frequency -= ((s32)p->symbol_rate * afc) >> 10;
return 0;
}
@@ -534,10 +573,9 @@ error:
}
static struct dvb_frontend_ops tda10023_ops = {
-
+ .delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
.info = {
.name = "Philips TDA10023 DVB-C",
- .type = FE_QAM,
.frequency_stepsize = 62500,
.frequency_min = 47000000,
.frequency_max = 862000000,
@@ -557,7 +595,6 @@ static struct dvb_frontend_ops tda10023_ops = {
.set_frontend = tda10023_set_parameters,
.get_frontend = tda10023_get_frontend,
-
.read_status = tda10023_read_status,
.read_ber = tda10023_read_ber,
.read_signal_strength = tda10023_read_signal_strength,
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 7f105946a434..71fb63299de7 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -153,7 +153,7 @@ struct tda10048_state {
u32 pll_pfactor;
u32 sample_freq;
- enum fe_bandwidth bandwidth;
+ u32 bandwidth;
};
static struct init_tab {
@@ -341,21 +341,14 @@ static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
{
struct tda10048_state *state = fe->demodulator_priv;
u64 t, z;
- u32 b = 8000000;
dprintk(1, "%s()\n", __func__);
if (sample_freq_hz == 0)
return -EINVAL;
- if (bw == BANDWIDTH_6_MHZ)
- b = 6000000;
- else
- if (bw == BANDWIDTH_7_MHZ)
- b = 7000000;
-
/* WREF = (B / (7 * fs)) * 2^31 */
- t = b * 10;
+ t = bw * 10;
/* avoid warning: this decimal constant is unsigned only in ISO C90 */
/* t *= 2147483648 on 32bit platforms */
t *= (2048 * 1024);
@@ -378,25 +371,18 @@ static int tda10048_set_invwref(struct dvb_frontend *fe, u32 sample_freq_hz,
{
struct tda10048_state *state = fe->demodulator_priv;
u64 t;
- u32 b = 8000000;
dprintk(1, "%s()\n", __func__);
if (sample_freq_hz == 0)
return -EINVAL;
- if (bw == BANDWIDTH_6_MHZ)
- b = 6000000;
- else
- if (bw == BANDWIDTH_7_MHZ)
- b = 7000000;
-
/* INVWREF = ((7 * fs) / B) * 2^5 */
t = sample_freq_hz;
t *= 7;
t *= 32;
t *= 10;
- do_div(t, b);
+ do_div(t, bw);
t += 5;
do_div(t, 10);
@@ -407,16 +393,16 @@ static int tda10048_set_invwref(struct dvb_frontend *fe, u32 sample_freq_hz,
}
static int tda10048_set_bandwidth(struct dvb_frontend *fe,
- enum fe_bandwidth bw)
+ u32 bw)
{
struct tda10048_state *state = fe->demodulator_priv;
dprintk(1, "%s(bw=%d)\n", __func__, bw);
/* Bandwidth setting may need to be adjusted */
switch (bw) {
- case BANDWIDTH_6_MHZ:
- case BANDWIDTH_7_MHZ:
- case BANDWIDTH_8_MHZ:
+ case 6000000:
+ case 7000000:
+ case 8000000:
tda10048_set_wref(fe, state->sample_freq, bw);
tda10048_set_invwref(fe, state->sample_freq, bw);
break;
@@ -430,7 +416,7 @@ static int tda10048_set_bandwidth(struct dvb_frontend *fe,
return 0;
}
-static int tda10048_set_if(struct dvb_frontend *fe, enum fe_bandwidth bw)
+static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
{
struct tda10048_state *state = fe->demodulator_priv;
struct tda10048_config *config = &state->config;
@@ -441,13 +427,13 @@ static int tda10048_set_if(struct dvb_frontend *fe, enum fe_bandwidth bw)
/* based on target bandwidth and clk we calculate pll factors */
switch (bw) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
if_freq_khz = config->dtv6_if_freq_khz;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
if_freq_khz = config->dtv7_if_freq_khz;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
if_freq_khz = config->dtv8_if_freq_khz;
break;
default:
@@ -601,7 +587,7 @@ static int tda10048_set_inversion(struct dvb_frontend *fe, int inversion)
/* Retrieve the demod settings */
static int tda10048_get_tps(struct tda10048_state *state,
- struct dvb_ofdm_parameters *p)
+ struct dtv_frontend_properties *p)
{
u8 val;
@@ -612,27 +598,27 @@ static int tda10048_get_tps(struct tda10048_state *state,
val = tda10048_readreg(state, TDA10048_OUT_CONF2);
switch ((val & 0x60) >> 5) {
case 0:
- p->constellation = QPSK;
+ p->modulation = QPSK;
break;
case 1:
- p->constellation = QAM_16;
+ p->modulation = QAM_16;
break;
case 2:
- p->constellation = QAM_64;
+ p->modulation = QAM_64;
break;
}
switch ((val & 0x18) >> 3) {
case 0:
- p->hierarchy_information = HIERARCHY_NONE;
+ p->hierarchy = HIERARCHY_NONE;
break;
case 1:
- p->hierarchy_information = HIERARCHY_1;
+ p->hierarchy = HIERARCHY_1;
break;
case 2:
- p->hierarchy_information = HIERARCHY_2;
+ p->hierarchy = HIERARCHY_2;
break;
case 3:
- p->hierarchy_information = HIERARCHY_4;
+ p->hierarchy = HIERARCHY_4;
break;
}
switch (val & 0x07) {
@@ -738,17 +724,17 @@ static int tda10048_output_mode(struct dvb_frontend *fe, int serial)
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
/* TODO: Support manual tuning with specific params */
-static int tda10048_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int tda10048_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10048_state *state = fe->demodulator_priv;
dprintk(1, "%s(frequency=%d)\n", __func__, p->frequency);
/* Update the I/F pll's if the bandwidth changes */
- if (p->u.ofdm.bandwidth != state->bandwidth) {
- tda10048_set_if(fe, p->u.ofdm.bandwidth);
- tda10048_set_bandwidth(fe, p->u.ofdm.bandwidth);
+ if (p->bandwidth_hz != state->bandwidth) {
+ tda10048_set_if(fe, p->bandwidth_hz);
+ tda10048_set_bandwidth(fe, p->bandwidth_hz);
}
if (fe->ops.tuner_ops.set_params) {
@@ -756,7 +742,7 @@ static int tda10048_set_frontend(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
@@ -797,8 +783,8 @@ static int tda10048_init(struct dvb_frontend *fe)
tda10048_set_inversion(fe, config->inversion);
/* Establish default RF values */
- tda10048_set_if(fe, BANDWIDTH_8_MHZ);
- tda10048_set_bandwidth(fe, BANDWIDTH_8_MHZ);
+ tda10048_set_if(fe, 8000000);
+ tda10048_set_bandwidth(fe, 8000000);
/* Ensure we leave the gate closed */
tda10048_i2c_gate_ctrl(fe, 0);
@@ -1042,9 +1028,9 @@ static int tda10048_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int tda10048_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int tda10048_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10048_state *state = fe->demodulator_priv;
dprintk(1, "%s()\n", __func__);
@@ -1052,7 +1038,7 @@ static int tda10048_get_frontend(struct dvb_frontend *fe,
p->inversion = tda10048_readreg(state, TDA10048_CONF_C1_1)
& 0x20 ? INVERSION_ON : INVERSION_OFF;
- return tda10048_get_tps(state, &p->u.ofdm);
+ return tda10048_get_tps(state, p);
}
static int tda10048_get_tune_settings(struct dvb_frontend *fe,
@@ -1126,7 +1112,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
memcpy(&state->config, config, sizeof(*config));
state->i2c = i2c;
state->fwloaded = config->no_firmware;
- state->bandwidth = BANDWIDTH_8_MHZ;
+ state->bandwidth = 8000000;
/* check if the demod is present */
if (tda10048_readreg(state, TDA10048_IDENTITY) != 0x048)
@@ -1152,11 +1138,11 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
tda10048_establish_defaults(&state->frontend);
/* Set the xtal and freq defaults */
- if (tda10048_set_if(&state->frontend, BANDWIDTH_8_MHZ) != 0)
+ if (tda10048_set_if(&state->frontend, 8000000) != 0)
goto error;
/* Default bandwidth */
- if (tda10048_set_bandwidth(&state->frontend, BANDWIDTH_8_MHZ) != 0)
+ if (tda10048_set_bandwidth(&state->frontend, 8000000) != 0)
goto error;
/* Leave the gate closed */
@@ -1171,10 +1157,9 @@ error:
EXPORT_SYMBOL(tda10048_attach);
static struct dvb_frontend_ops tda10048_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "NXP TDA10048HN DVB-T",
- .type = FE_OFDM,
.frequency_min = 177000000,
.frequency_max = 858000000,
.frequency_stepsize = 166666,
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index ea485d923550..ae6f22aae677 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -224,22 +224,22 @@ static int tda1004x_disable_tuner_i2c(struct tda1004x_state *state)
}
static int tda10045h_set_bandwidth(struct tda1004x_state *state,
- fe_bandwidth_t bandwidth)
+ u32 bandwidth)
{
static u8 bandwidth_6mhz[] = { 0x02, 0x00, 0x3d, 0x00, 0x60, 0x1e, 0xa7, 0x45, 0x4f };
static u8 bandwidth_7mhz[] = { 0x02, 0x00, 0x37, 0x00, 0x4a, 0x2f, 0x6d, 0x76, 0xdb };
static u8 bandwidth_8mhz[] = { 0x02, 0x00, 0x3d, 0x00, 0x48, 0x17, 0x89, 0xc7, 0x14 };
switch (bandwidth) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_6mhz, sizeof(bandwidth_6mhz));
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_7mhz, sizeof(bandwidth_7mhz));
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_8mhz, sizeof(bandwidth_8mhz));
break;
@@ -253,7 +253,7 @@ static int tda10045h_set_bandwidth(struct tda1004x_state *state,
}
static int tda10046h_set_bandwidth(struct tda1004x_state *state,
- fe_bandwidth_t bandwidth)
+ u32 bandwidth)
{
static u8 bandwidth_6mhz_53M[] = { 0x7b, 0x2e, 0x11, 0xf0, 0xd2 };
static u8 bandwidth_7mhz_53M[] = { 0x6a, 0x02, 0x6a, 0x43, 0x9f };
@@ -270,7 +270,7 @@ static int tda10046h_set_bandwidth(struct tda1004x_state *state,
else
tda10046_clk53m = 1;
switch (bandwidth) {
- case BANDWIDTH_6_MHZ:
+ case 6000000:
if (tda10046_clk53m)
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_53M,
sizeof(bandwidth_6mhz_53M));
@@ -283,7 +283,7 @@ static int tda10046h_set_bandwidth(struct tda1004x_state *state,
}
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
if (tda10046_clk53m)
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_53M,
sizeof(bandwidth_7mhz_53M));
@@ -296,7 +296,7 @@ static int tda10046h_set_bandwidth(struct tda1004x_state *state,
}
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
if (tda10046_clk53m)
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_53M,
sizeof(bandwidth_8mhz_53M));
@@ -409,7 +409,7 @@ static int tda10045_fwupload(struct dvb_frontend* fe)
msleep(10);
/* set parameters */
- tda10045h_set_bandwidth(state, BANDWIDTH_8_MHZ);
+ tda10045h_set_bandwidth(state, 8000000);
ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10045H_FWPAGE, TDA10045H_CODE_IN);
release_firmware(fw);
@@ -473,7 +473,7 @@ static void tda10046_init_plls(struct dvb_frontend* fe)
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x3f);
break;
}
- tda10046h_set_bandwidth(state, BANDWIDTH_8_MHZ); // default bandwidth 8 MHz
+ tda10046h_set_bandwidth(state, 8000000); /* default bandwidth 8 MHz */
/* let the PLLs settle */
msleep(120);
}
@@ -697,9 +697,9 @@ static int tda10046_init(struct dvb_frontend* fe)
return 0;
}
-static int tda1004x_set_fe(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fe_params)
+static int tda1004x_set_fe(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda1004x_state* state = fe->demodulator_priv;
int tmp;
int inversion;
@@ -718,7 +718,7 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
// set frequency
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, fe_params);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -726,37 +726,37 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
// Hardcoded to use auto as much as possible on the TDA10045 as it
// is very unreliable if AUTO mode is _not_ used.
if (state->demod_type == TDA1004X_DEMOD_TDA10045) {
- fe_params->u.ofdm.code_rate_HP = FEC_AUTO;
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO;
- fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO;
+ fe_params->code_rate_HP = FEC_AUTO;
+ fe_params->guard_interval = GUARD_INTERVAL_AUTO;
+ fe_params->transmission_mode = TRANSMISSION_MODE_AUTO;
}
// Set standard params.. or put them to auto
- if ((fe_params->u.ofdm.code_rate_HP == FEC_AUTO) ||
- (fe_params->u.ofdm.code_rate_LP == FEC_AUTO) ||
- (fe_params->u.ofdm.constellation == QAM_AUTO) ||
- (fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) {
+ if ((fe_params->code_rate_HP == FEC_AUTO) ||
+ (fe_params->code_rate_LP == FEC_AUTO) ||
+ (fe_params->modulation == QAM_AUTO) ||
+ (fe_params->hierarchy == HIERARCHY_AUTO)) {
tda1004x_write_mask(state, TDA1004X_AUTO, 1, 1); // enable auto
- tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x03, 0); // turn off constellation bits
+ tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x03, 0); /* turn off modulation bits */
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0); // turn off hierarchy bits
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0x3f, 0); // turn off FEC bits
} else {
tda1004x_write_mask(state, TDA1004X_AUTO, 1, 0); // disable auto
// set HP FEC
- tmp = tda1004x_encode_fec(fe_params->u.ofdm.code_rate_HP);
+ tmp = tda1004x_encode_fec(fe_params->code_rate_HP);
if (tmp < 0)
return tmp;
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 7, tmp);
// set LP FEC
- tmp = tda1004x_encode_fec(fe_params->u.ofdm.code_rate_LP);
+ tmp = tda1004x_encode_fec(fe_params->code_rate_LP);
if (tmp < 0)
return tmp;
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0x38, tmp << 3);
- // set constellation
- switch (fe_params->u.ofdm.constellation) {
+ /* set modulation */
+ switch (fe_params->modulation) {
case QPSK:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 0);
break;
@@ -774,7 +774,7 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
}
// set hierarchy
- switch (fe_params->u.ofdm.hierarchy_information) {
+ switch (fe_params->hierarchy) {
case HIERARCHY_NONE:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0 << 5);
break;
@@ -799,11 +799,11 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
// set bandwidth
switch (state->demod_type) {
case TDA1004X_DEMOD_TDA10045:
- tda10045h_set_bandwidth(state, fe_params->u.ofdm.bandwidth);
+ tda10045h_set_bandwidth(state, fe_params->bandwidth_hz);
break;
case TDA1004X_DEMOD_TDA10046:
- tda10046h_set_bandwidth(state, fe_params->u.ofdm.bandwidth);
+ tda10046h_set_bandwidth(state, fe_params->bandwidth_hz);
break;
}
@@ -825,7 +825,7 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
}
// set guard interval
- switch (fe_params->u.ofdm.guard_interval) {
+ switch (fe_params->guard_interval) {
case GUARD_INTERVAL_1_32:
tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 0 << 2);
@@ -856,7 +856,7 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
}
// set transmission mode
- switch (fe_params->u.ofdm.transmission_mode) {
+ switch (fe_params->transmission_mode) {
case TRANSMISSION_MODE_2K:
tda1004x_write_mask(state, TDA1004X_AUTO, 4, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 0 << 4);
@@ -895,8 +895,9 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
return 0;
}
-static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params)
+static int tda1004x_get_fe(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda1004x_state* state = fe->demodulator_priv;
dprintk("%s\n", __func__);
@@ -913,13 +914,13 @@ static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_paramete
case TDA1004X_DEMOD_TDA10045:
switch (tda1004x_read_byte(state, TDA10045H_WREF_LSB)) {
case 0x14:
- fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ fe_params->bandwidth_hz = 8000000;
break;
case 0xdb:
- fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+ fe_params->bandwidth_hz = 7000000;
break;
case 0x4f:
- fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+ fe_params->bandwidth_hz = 6000000;
break;
}
break;
@@ -927,73 +928,73 @@ static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_paramete
switch (tda1004x_read_byte(state, TDA10046H_TIME_WREF1)) {
case 0x5c:
case 0x54:
- fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ fe_params->bandwidth_hz = 8000000;
break;
case 0x6a:
case 0x60:
- fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+ fe_params->bandwidth_hz = 7000000;
break;
case 0x7b:
case 0x70:
- fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+ fe_params->bandwidth_hz = 6000000;
break;
}
break;
}
// FEC
- fe_params->u.ofdm.code_rate_HP =
+ fe_params->code_rate_HP =
tda1004x_decode_fec(tda1004x_read_byte(state, TDA1004X_OUT_CONF2) & 7);
- fe_params->u.ofdm.code_rate_LP =
+ fe_params->code_rate_LP =
tda1004x_decode_fec((tda1004x_read_byte(state, TDA1004X_OUT_CONF2) >> 3) & 7);
- // constellation
+ /* modulation */
switch (tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 3) {
case 0:
- fe_params->u.ofdm.constellation = QPSK;
+ fe_params->modulation = QPSK;
break;
case 1:
- fe_params->u.ofdm.constellation = QAM_16;
+ fe_params->modulation = QAM_16;
break;
case 2:
- fe_params->u.ofdm.constellation = QAM_64;
+ fe_params->modulation = QAM_64;
break;
}
// transmission mode
- fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ fe_params->transmission_mode = TRANSMISSION_MODE_2K;
if (tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x10)
- fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ fe_params->transmission_mode = TRANSMISSION_MODE_8K;
// guard interval
switch ((tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x0c) >> 2) {
case 0:
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ fe_params->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ fe_params->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ fe_params->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ fe_params->guard_interval = GUARD_INTERVAL_1_4;
break;
}
// hierarchy
switch ((tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x60) >> 5) {
case 0:
- fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ fe_params->hierarchy = HIERARCHY_NONE;
break;
case 1:
- fe_params->u.ofdm.hierarchy_information = HIERARCHY_1;
+ fe_params->hierarchy = HIERARCHY_1;
break;
case 2:
- fe_params->u.ofdm.hierarchy_information = HIERARCHY_2;
+ fe_params->hierarchy = HIERARCHY_2;
break;
case 3:
- fe_params->u.ofdm.hierarchy_information = HIERARCHY_4;
+ fe_params->hierarchy = HIERARCHY_4;
break;
}
@@ -1231,9 +1232,9 @@ static void tda1004x_release(struct dvb_frontend* fe)
}
static struct dvb_frontend_ops tda10045_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10045H DVB-T",
- .type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 858000000,
.frequency_stepsize = 166667,
@@ -1301,9 +1302,9 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
}
static struct dvb_frontend_ops tda10046_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "Philips TDA10046H DVB-T",
- .type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 858000000,
.frequency_stepsize = 166667,
diff --git a/drivers/media/dvb/frontends/tda10071.c b/drivers/media/dvb/frontends/tda10071.c
index 0c37434d19e2..a99205026751 100644
--- a/drivers/media/dvb/frontends/tda10071.c
+++ b/drivers/media/dvb/frontends/tda10071.c
@@ -636,8 +636,7 @@ error:
return ret;
}
-static int tda10071_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int tda10071_set_frontend(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
@@ -777,8 +776,7 @@ error:
return ret;
}
-static int tda10071_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int tda10071_get_frontend(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -1217,9 +1215,9 @@ error:
EXPORT_SYMBOL(tda10071_attach);
static struct dvb_frontend_ops tda10071_ops = {
+ .delsys = { SYS_DVBT, SYS_DVBT2 },
.info = {
.name = "NXP TDA10071",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_tolerance = 5000,
diff --git a/drivers/media/dvb/frontends/tda10086.c b/drivers/media/dvb/frontends/tda10086.c
index f2c8faac6f36..fcfe2e080cb0 100644
--- a/drivers/media/dvb/frontends/tda10086.c
+++ b/drivers/media/dvb/frontends/tda10086.c
@@ -267,7 +267,7 @@ static int tda10086_send_burst (struct dvb_frontend* fe, fe_sec_mini_cmd_t minic
}
static int tda10086_set_inversion(struct tda10086_state *state,
- struct dvb_frontend_parameters *fe_params)
+ struct dtv_frontend_properties *fe_params)
{
u8 invval = 0x80;
@@ -292,7 +292,7 @@ static int tda10086_set_inversion(struct tda10086_state *state,
}
static int tda10086_set_symbol_rate(struct tda10086_state *state,
- struct dvb_frontend_parameters *fe_params)
+ struct dtv_frontend_properties *fe_params)
{
u8 dfn = 0;
u8 afs = 0;
@@ -303,7 +303,7 @@ static int tda10086_set_symbol_rate(struct tda10086_state *state,
u32 tmp;
u32 bdr;
u32 bdri;
- u32 symbol_rate = fe_params->u.qpsk.symbol_rate;
+ u32 symbol_rate = fe_params->symbol_rate;
dprintk ("%s %i\n", __func__, symbol_rate);
@@ -367,13 +367,13 @@ static int tda10086_set_symbol_rate(struct tda10086_state *state,
}
static int tda10086_set_fec(struct tda10086_state *state,
- struct dvb_frontend_parameters *fe_params)
+ struct dtv_frontend_properties *fe_params)
{
u8 fecval;
- dprintk ("%s %i\n", __func__, fe_params->u.qpsk.fec_inner);
+ dprintk("%s %i\n", __func__, fe_params->fec_inner);
- switch(fe_params->u.qpsk.fec_inner) {
+ switch (fe_params->fec_inner) {
case FEC_1_2:
fecval = 0x00;
break;
@@ -409,9 +409,9 @@ static int tda10086_set_fec(struct tda10086_state *state,
return 0;
}
-static int tda10086_set_frontend(struct dvb_frontend* fe,
- struct dvb_frontend_parameters *fe_params)
+static int tda10086_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda10086_state *state = fe->demodulator_priv;
int ret;
u32 freq = 0;
@@ -425,7 +425,7 @@ static int tda10086_set_frontend(struct dvb_frontend* fe,
/* set params */
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, fe_params);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
@@ -452,13 +452,14 @@ static int tda10086_set_frontend(struct dvb_frontend* fe,
tda10086_write_mask(state, 0x10, 0x40, 0x40);
tda10086_write_mask(state, 0x00, 0x01, 0x00);
- state->symbol_rate = fe_params->u.qpsk.symbol_rate;
+ state->symbol_rate = fe_params->symbol_rate;
state->frequency = fe_params->frequency;
return 0;
}
-static int tda10086_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params)
+static int tda10086_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda10086_state* state = fe->demodulator_priv;
u8 val;
int tmp;
@@ -467,7 +468,7 @@ static int tda10086_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_pa
dprintk ("%s\n", __func__);
/* check for invalid symbol rate */
- if (fe_params->u.qpsk.symbol_rate < 500000)
+ if (fe_params->symbol_rate < 500000)
return -EINVAL;
/* calculate the updated frequency (note: we convert from Hz->kHz) */
@@ -516,34 +517,34 @@ static int tda10086_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_pa
tmp |= 0xffffff00;
tmp = (tmp * 480 * (1<<1)) / 128;
tmp = ((state->symbol_rate/1000) * tmp) / (1000000/1000);
- fe_params->u.qpsk.symbol_rate = state->symbol_rate + tmp;
+ fe_params->symbol_rate = state->symbol_rate + tmp;
/* the FEC */
val = (tda10086_read_byte(state, 0x0d) & 0x70) >> 4;
switch(val) {
case 0x00:
- fe_params->u.qpsk.fec_inner = FEC_1_2;
+ fe_params->fec_inner = FEC_1_2;
break;
case 0x01:
- fe_params->u.qpsk.fec_inner = FEC_2_3;
+ fe_params->fec_inner = FEC_2_3;
break;
case 0x02:
- fe_params->u.qpsk.fec_inner = FEC_3_4;
+ fe_params->fec_inner = FEC_3_4;
break;
case 0x03:
- fe_params->u.qpsk.fec_inner = FEC_4_5;
+ fe_params->fec_inner = FEC_4_5;
break;
case 0x04:
- fe_params->u.qpsk.fec_inner = FEC_5_6;
+ fe_params->fec_inner = FEC_5_6;
break;
case 0x05:
- fe_params->u.qpsk.fec_inner = FEC_6_7;
+ fe_params->fec_inner = FEC_6_7;
break;
case 0x06:
- fe_params->u.qpsk.fec_inner = FEC_7_8;
+ fe_params->fec_inner = FEC_7_8;
break;
case 0x07:
- fe_params->u.qpsk.fec_inner = FEC_8_9;
+ fe_params->fec_inner = FEC_8_9;
break;
}
@@ -664,29 +665,31 @@ static int tda10086_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
static int tda10086_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
- if (fesettings->parameters.u.qpsk.symbol_rate > 20000000) {
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ if (p->symbol_rate > 20000000) {
fesettings->min_delay_ms = 50;
fesettings->step_size = 2000;
fesettings->max_drift = 8000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 12000000) {
+ } else if (p->symbol_rate > 12000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 1500;
fesettings->max_drift = 9000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 8000000) {
+ } else if (p->symbol_rate > 8000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 1000;
fesettings->max_drift = 8000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 4000000) {
+ } else if (p->symbol_rate > 4000000) {
fesettings->min_delay_ms = 100;
fesettings->step_size = 500;
fesettings->max_drift = 7000;
- } else if (fesettings->parameters.u.qpsk.symbol_rate > 2000000) {
+ } else if (p->symbol_rate > 2000000) {
fesettings->min_delay_ms = 200;
- fesettings->step_size = (fesettings->parameters.u.qpsk.symbol_rate / 8000);
+ fesettings->step_size = p->symbol_rate / 8000;
fesettings->max_drift = 14 * fesettings->step_size;
} else {
fesettings->min_delay_ms = 200;
- fesettings->step_size = (fesettings->parameters.u.qpsk.symbol_rate / 8000);
+ fesettings->step_size = p->symbol_rate / 8000;
fesettings->max_drift = 18 * fesettings->step_size;
}
@@ -701,10 +704,9 @@ static void tda10086_release(struct dvb_frontend* fe)
}
static struct dvb_frontend_ops tda10086_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA10086 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
index 1b1bf200c55c..ad7c72e8f517 100644
--- a/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
@@ -1123,55 +1122,51 @@ static int release(struct dvb_frontend *fe)
return 0;
}
-/*
- * As defined on EN 300 429 Annex A and on ITU-T J.83 annex A, the DVB-C
- * roll-off factor is 0.15.
- * According with the specs, the amount of the needed bandwith is given by:
- * Bw = Symbol_rate * (1 + 0.15)
- * As such, the maximum symbol rate supported by 6 MHz is
- * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
- *NOTE: For ITU-T J.83 Annex C, the roll-off factor is 0.13. So:
- * max_symbol_rate = 6 MHz / 1.13 = 5309735 Baud
- * That means that an adjustment is needed for Japan,
- * but, as currently DRX-K is hardcoded to Annex A, let's stick
- * with 0.15 roll-off factor.
- */
-#define MAX_SYMBOL_RATE_6MHz 5217391
-static int set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int set_params(struct dvb_frontend *fe)
{
struct tda_state *state = fe->tuner_priv;
int status = 0;
int Standard;
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+ u32 delsys = fe->dtv_property_cache.delivery_system;
- state->m_Frequency = params->frequency;
+ state->m_Frequency = fe->dtv_property_cache.frequency;
- if (fe->ops.info.type == FE_OFDM)
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (delsys) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ switch (bw) {
+ case 6000000:
Standard = HF_DVBT_6MHZ;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
Standard = HF_DVBT_7MHZ;
break;
- default:
- case BANDWIDTH_8_MHZ:
+ case 8000000:
Standard = HF_DVBT_8MHZ;
break;
+ default:
+ return -EINVAL;
}
- else if (fe->ops.info.type == FE_QAM) {
- if (params->u.qam.symbol_rate <= MAX_SYMBOL_RATE_6MHz)
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
+ if (bw <= 6000000)
Standard = HF_DVBC_6MHZ;
+ else if (bw <= 7000000)
+ Standard = HF_DVBC_7MHZ;
else
Standard = HF_DVBC_8MHZ;
- } else
+ break;
+ default:
return -EINVAL;
+ }
do {
- status = RFTrackingFiltersCorrection(state, params->frequency);
+ status = RFTrackingFiltersCorrection(state, state->m_Frequency);
if (status < 0)
break;
- status = ChannelConfiguration(state, params->frequency, Standard);
+ status = ChannelConfiguration(state, state->m_Frequency,
+ Standard);
if (status < 0)
break;
diff --git a/drivers/media/dvb/frontends/tda8083.c b/drivers/media/dvb/frontends/tda8083.c
index 9369f7442f27..15912c96926a 100644
--- a/drivers/media/dvb/frontends/tda8083.c
+++ b/drivers/media/dvb/frontends/tda8083.c
@@ -315,18 +315,19 @@ static int tda8083_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int tda8083_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int tda8083_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda8083_state* state = fe->demodulator_priv;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
tda8083_set_inversion (state, p->inversion);
- tda8083_set_fec (state, p->u.qpsk.fec_inner);
- tda8083_set_symbolrate (state, p->u.qpsk.symbol_rate);
+ tda8083_set_fec(state, p->fec_inner);
+ tda8083_set_symbolrate(state, p->symbol_rate);
tda8083_writereg (state, 0x00, 0x3c);
tda8083_writereg (state, 0x00, 0x04);
@@ -334,16 +335,17 @@ static int tda8083_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
return 0;
}
-static int tda8083_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int tda8083_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda8083_state* state = fe->demodulator_priv;
/* FIXME: get symbolrate & frequency offset...*/
/*p->frequency = ???;*/
p->inversion = (tda8083_readreg (state, 0x0e) & 0x80) ?
INVERSION_ON : INVERSION_OFF;
- p->u.qpsk.fec_inner = tda8083_get_fec (state);
- /*p->u.qpsk.symbol_rate = tda8083_get_symbolrate (state);*/
+ p->fec_inner = tda8083_get_fec(state);
+ /*p->symbol_rate = tda8083_get_symbolrate (state);*/
return 0;
}
@@ -438,10 +440,9 @@ error:
}
static struct dvb_frontend_ops tda8083_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "Philips TDA8083 DVB-S",
- .type = FE_QPSK,
.frequency_min = 920000, /* TDA8060 */
.frequency_max = 2200000, /* TDA8060 */
.frequency_stepsize = 125, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/tda826x.c b/drivers/media/dvb/frontends/tda826x.c
index 06c94800b940..04bbcc24de0a 100644
--- a/drivers/media/dvb/frontends/tda826x.c
+++ b/drivers/media/dvb/frontends/tda826x.c
@@ -71,8 +71,9 @@ static int tda826x_sleep(struct dvb_frontend *fe)
return (ret == 1) ? 0 : ret;
}
-static int tda826x_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int tda826x_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda826x_priv *priv = fe->tuner_priv;
int ret;
u32 div;
@@ -83,11 +84,11 @@ static int tda826x_set_params(struct dvb_frontend *fe, struct dvb_frontend_param
dprintk("%s:\n", __func__);
- div = (params->frequency + (1000-1)) / 1000;
+ div = (p->frequency + (1000-1)) / 1000;
/* BW = ((1 + RO) * SR/2 + 5) * 1.3 [SR in MSPS, BW in MHz] */
/* with R0 = 0.35 and some transformations: */
- ksyms = params->u.qpsk.symbol_rate / 1000;
+ ksyms = p->symbol_rate / 1000;
bandwidth = (878 * ksyms + 6500000) / 1000000 + 1;
if (bandwidth < 5)
bandwidth = 5;
diff --git a/drivers/media/dvb/frontends/tdhd1.h b/drivers/media/dvb/frontends/tdhd1.h
index 51f170678650..17750985db0c 100644
--- a/drivers/media/dvb/frontends/tdhd1.h
+++ b/drivers/media/dvb/frontends/tdhd1.h
@@ -40,24 +40,25 @@ static struct tda1004x_config alps_tdhd1_204a_config = {
.request_firmware = alps_tdhd1_204_request_firmware
};
-static int alps_tdhd1_204a_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int alps_tdhd1_204a_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct i2c_adapter *i2c = fe->tuner_priv;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
u32 div;
- div = (params->frequency + 36166666) / 166666;
+ div = (p->frequency + 36166666) / 166666;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85;
- if (params->frequency >= 174000000 && params->frequency <= 230000000)
+ if (p->frequency >= 174000000 && p->frequency <= 230000000)
data[3] = 0x02;
- else if (params->frequency >= 470000000 && params->frequency <= 823000000)
+ else if (p->frequency >= 470000000 && p->frequency <= 823000000)
data[3] = 0x0C;
- else if (params->frequency > 823000000 && params->frequency <= 862000000)
+ else if (p->frequency > 823000000 && p->frequency <= 862000000)
data[3] = 0x8C;
else
return -EINVAL;
diff --git a/drivers/media/dvb/frontends/tua6100.c b/drivers/media/dvb/frontends/tua6100.c
index bcb95c2ef296..029384d1fddd 100644
--- a/drivers/media/dvb/frontends/tua6100.c
+++ b/drivers/media/dvb/frontends/tua6100.c
@@ -67,9 +67,9 @@ static int tua6100_sleep(struct dvb_frontend *fe)
return (ret == 1) ? 0 : ret;
}
-static int tua6100_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int tua6100_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct tua6100_priv *priv = fe->tuner_priv;
u32 div;
u32 prediv;
@@ -85,36 +85,37 @@ static int tua6100_set_params(struct dvb_frontend *fe,
#define _ri 4000000
// setup register 0
- if (params->frequency < 2000000) {
+ if (c->frequency < 2000000)
reg0[1] = 0x03;
- } else {
+ else
reg0[1] = 0x07;
- }
// setup register 1
- if (params->frequency < 1630000) {
+ if (c->frequency < 1630000)
reg1[1] = 0x2c;
- } else {
+ else
reg1[1] = 0x0c;
- }
+
if (_P == 64)
reg1[1] |= 0x40;
- if (params->frequency >= 1525000)
+ if (c->frequency >= 1525000)
reg1[1] |= 0x80;
// register 2
reg2[1] = (_R >> 8) & 0x03;
reg2[2] = _R;
- if (params->frequency < 1455000) {
+ if (c->frequency < 1455000)
reg2[1] |= 0x1c;
- } else if (params->frequency < 1630000) {
+ else if (c->frequency < 1630000)
reg2[1] |= 0x0c;
- } else {
+ else
reg2[1] |= 0x1c;
- }
- // The N divisor ratio (note: params->frequency is in kHz, but we need it in Hz)
- prediv = (params->frequency * _R) / (_ri / 1000);
+ /*
+ * The N divisor ratio (note: c->frequency is in kHz, but we
+ * need it in Hz)
+ */
+ prediv = (c->frequency * _R) / (_ri / 1000);
div = prediv / _P;
reg1[1] |= (div >> 9) & 0x03;
reg1[2] = div >> 1;
diff --git a/drivers/media/dvb/frontends/ves1820.c b/drivers/media/dvb/frontends/ves1820.c
index 550a07a8a997..bb42b563c42d 100644
--- a/drivers/media/dvb/frontends/ves1820.c
+++ b/drivers/media/dvb/frontends/ves1820.c
@@ -205,25 +205,26 @@ static int ves1820_init(struct dvb_frontend* fe)
return 0;
}
-static int ves1820_set_parameters(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int ves1820_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ves1820_state* state = fe->demodulator_priv;
static const u8 reg0x00[] = { 0x00, 0x04, 0x08, 0x0c, 0x10 };
static const u8 reg0x01[] = { 140, 140, 106, 100, 92 };
static const u8 reg0x05[] = { 135, 100, 70, 54, 38 };
static const u8 reg0x08[] = { 162, 116, 67, 52, 35 };
static const u8 reg0x09[] = { 145, 150, 106, 126, 107 };
- int real_qam = p->u.qam.modulation - QAM_16;
+ int real_qam = p->modulation - QAM_16;
if (real_qam < 0 || real_qam > 4)
return -EINVAL;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- ves1820_set_symbolrate(state, p->u.qam.symbol_rate);
+ ves1820_set_symbolrate(state, p->symbol_rate);
ves1820_writereg(state, 0x34, state->pwm);
ves1820_writereg(state, 0x01, reg0x01[real_qam]);
@@ -309,8 +310,9 @@ static int ves1820_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int ves1820_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int ves1820_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ves1820_state* state = fe->demodulator_priv;
int sync;
s8 afc = 0;
@@ -320,7 +322,7 @@ static int ves1820_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
if (verbose) {
/* AFC only valid when carrier has been recovered */
printk(sync & 2 ? "ves1820: AFC (%d) %dHz\n" :
- "ves1820: [AFC (%d) %dHz]\n", afc, -((s32) p->u.qam.symbol_rate * afc) >> 10);
+ "ves1820: [AFC (%d) %dHz]\n", afc, -((s32) p->symbol_rate * afc) >> 10);
}
if (!state->config->invert) {
@@ -329,13 +331,13 @@ static int ves1820_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
p->inversion = (!(state->reg0 & 0x20)) ? INVERSION_ON : INVERSION_OFF;
}
- p->u.qam.modulation = ((state->reg0 >> 2) & 7) + QAM_16;
+ p->modulation = ((state->reg0 >> 2) & 7) + QAM_16;
- p->u.qam.fec_inner = FEC_NONE;
+ p->fec_inner = FEC_NONE;
p->frequency = ((p->frequency + 31250) / 62500) * 62500;
if (sync & 2)
- p->frequency -= ((s32) p->u.qam.symbol_rate * afc) >> 10;
+ p->frequency -= ((s32) p->symbol_rate * afc) >> 10;
return 0;
}
@@ -405,10 +407,9 @@ error:
}
static struct dvb_frontend_ops ves1820_ops = {
-
+ .delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "VLSI VES1820 DVB-C",
- .type = FE_QAM,
.frequency_stepsize = 62500,
.frequency_min = 47000000,
.frequency_max = 862000000,
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c
index 8d7854c2fb0c..9c17eacaec24 100644
--- a/drivers/media/dvb/frontends/ves1x93.c
+++ b/drivers/media/dvb/frontends/ves1x93.c
@@ -46,6 +46,7 @@ struct ves1x93_state {
u8 *init_1x93_wtab;
u8 tab_size;
u8 demod_type;
+ u32 frequency;
};
static int debug;
@@ -384,31 +385,34 @@ static int ves1x93_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int ves1x93_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int ves1x93_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ves1x93_state* state = fe->demodulator_priv;
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, p);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
ves1x93_set_inversion (state, p->inversion);
- ves1x93_set_fec (state, p->u.qpsk.fec_inner);
- ves1x93_set_symbolrate (state, p->u.qpsk.symbol_rate);
+ ves1x93_set_fec(state, p->fec_inner);
+ ves1x93_set_symbolrate(state, p->symbol_rate);
state->inversion = p->inversion;
+ state->frequency = p->frequency;
return 0;
}
-static int ves1x93_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int ves1x93_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ves1x93_state* state = fe->demodulator_priv;
int afc;
afc = ((int)((char)(ves1x93_readreg (state, 0x0a) << 1)))/2;
- afc = (afc * (int)(p->u.qpsk.symbol_rate/1000/8))/16;
+ afc = (afc * (int)(p->symbol_rate/1000/8))/16;
- p->frequency -= afc;
+ p->frequency = state->frequency - afc;
/*
* inversion indicator is only valid
@@ -417,7 +421,7 @@ static int ves1x93_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_par
if (state->inversion == INVERSION_AUTO)
p->inversion = (ves1x93_readreg (state, 0x0f) & 2) ?
INVERSION_OFF : INVERSION_ON;
- p->u.qpsk.fec_inner = ves1x93_get_fec (state);
+ p->fec_inner = ves1x93_get_fec(state);
/* XXX FIXME: timing offset !! */
return 0;
@@ -506,10 +510,9 @@ error:
}
static struct dvb_frontend_ops ves1x93_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "VLSI VES1x93 DVB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
diff --git a/drivers/media/dvb/frontends/zl10036.c b/drivers/media/dvb/frontends/zl10036.c
index 81aa984c551f..0903d461b8fa 100644
--- a/drivers/media/dvb/frontends/zl10036.c
+++ b/drivers/media/dvb/frontends/zl10036.c
@@ -305,12 +305,12 @@ static int zl10036_set_gain_params(struct zl10036_state *state,
return zl10036_write(state, buf, sizeof(buf));
}
-static int zl10036_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int zl10036_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct zl10036_state *state = fe->tuner_priv;
int ret = 0;
- u32 frequency = params->frequency;
+ u32 frequency = p->frequency;
u32 fbw;
int i;
u8 c;
@@ -326,7 +326,7 @@ static int zl10036_set_params(struct dvb_frontend *fe,
* fBW = (alpha*symbolrate)/(2*0.8)
* 1.35 / (2*0.8) = 27 / 32
*/
- fbw = (27 * params->u.qpsk.symbol_rate) / 32;
+ fbw = (27 * p->symbol_rate) / 32;
/* scale to kHz */
fbw /= 1000;
@@ -353,7 +353,7 @@ static int zl10036_set_params(struct dvb_frontend *fe,
if (ret < 0)
goto error;
- ret = zl10036_set_frequency(state, params->frequency);
+ ret = zl10036_set_frequency(state, p->frequency);
if (ret < 0)
goto error;
diff --git a/drivers/media/dvb/frontends/zl10039.c b/drivers/media/dvb/frontends/zl10039.c
index c085e58a94bf..eff9c5fde50a 100644
--- a/drivers/media/dvb/frontends/zl10039.c
+++ b/drivers/media/dvb/frontends/zl10039.c
@@ -176,9 +176,9 @@ static int zl10039_sleep(struct dvb_frontend *fe)
return 0;
}
-static int zl10039_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int zl10039_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct zl10039_state *state = fe->tuner_priv;
u8 buf[6];
u8 bf;
@@ -188,12 +188,12 @@ static int zl10039_set_params(struct dvb_frontend *fe,
dprintk("%s\n", __func__);
dprintk("Set frequency = %d, symbol rate = %d\n",
- params->frequency, params->u.qpsk.symbol_rate);
+ c->frequency, c->symbol_rate);
/* Assumed 10.111 MHz crystal oscillator */
/* Cancelled num/den 80 to prevent overflow */
- div = (params->frequency * 1000) / 126387;
- fbw = (params->u.qpsk.symbol_rate * 27) / 32000;
+ div = (c->frequency * 1000) / 126387;
+ fbw = (c->symbol_rate * 27) / 32000;
/* Cancelled num/den 10 to prevent overflow */
bf = ((fbw * 5088) / 1011100) - 1;
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index adbbf6d3d044..ac7237891374 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -37,9 +37,9 @@ struct zl10353_state {
struct zl10353_config config;
- enum fe_bandwidth bandwidth;
- u32 ucblocks;
- u32 frequency;
+ u32 bandwidth;
+ u32 ucblocks;
+ u32 frequency;
};
static int debug;
@@ -122,30 +122,17 @@ static void zl10353_dump_regs(struct dvb_frontend *fe)
}
static void zl10353_calc_nominal_rate(struct dvb_frontend *fe,
- enum fe_bandwidth bandwidth,
+ u32 bandwidth,
u16 *nominal_rate)
{
struct zl10353_state *state = fe->demodulator_priv;
u32 adc_clock = 450560; /* 45.056 MHz */
u64 value;
- u8 bw;
+ u8 bw = bandwidth / 1000000;
if (state->config.adc_clock)
adc_clock = state->config.adc_clock;
- switch (bandwidth) {
- case BANDWIDTH_6_MHZ:
- bw = 6;
- break;
- case BANDWIDTH_7_MHZ:
- bw = 7;
- break;
- case BANDWIDTH_8_MHZ:
- default:
- bw = 8;
- break;
- }
-
value = (u64)10 * (1 << 23) / 7 * 125;
value = (bw * value) + adc_clock / 2;
do_div(value, adc_clock);
@@ -192,16 +179,15 @@ static int zl10353_sleep(struct dvb_frontend *fe)
return 0;
}
-static int zl10353_set_parameters(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int zl10353_set_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct zl10353_state *state = fe->demodulator_priv;
u16 nominal_rate, input_freq;
u8 pllbuf[6] = { 0x67 }, acq_ctl = 0;
u16 tps = 0;
- struct dvb_ofdm_parameters *op = &param->u.ofdm;
- state->frequency = param->frequency;
+ state->frequency = c->frequency;
zl10353_single_write(fe, RESET, 0x80);
udelay(200);
@@ -211,42 +197,44 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
zl10353_single_write(fe, AGC_TARGET, 0x28);
- if (op->transmission_mode != TRANSMISSION_MODE_AUTO)
+ if (c->transmission_mode != TRANSMISSION_MODE_AUTO)
acq_ctl |= (1 << 0);
- if (op->guard_interval != GUARD_INTERVAL_AUTO)
+ if (c->guard_interval != GUARD_INTERVAL_AUTO)
acq_ctl |= (1 << 1);
zl10353_single_write(fe, ACQ_CTL, acq_ctl);
- switch (op->bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (c->bandwidth_hz) {
+ case 6000000:
/* These are extrapolated from the 7 and 8MHz values */
zl10353_single_write(fe, MCLK_RATIO, 0x97);
zl10353_single_write(fe, 0x64, 0x34);
zl10353_single_write(fe, 0xcc, 0xdd);
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
zl10353_single_write(fe, MCLK_RATIO, 0x86);
zl10353_single_write(fe, 0x64, 0x35);
zl10353_single_write(fe, 0xcc, 0x73);
break;
- case BANDWIDTH_8_MHZ:
default:
+ c->bandwidth_hz = 8000000;
+ /* fall though */
+ case 8000000:
zl10353_single_write(fe, MCLK_RATIO, 0x75);
zl10353_single_write(fe, 0x64, 0x36);
zl10353_single_write(fe, 0xcc, 0x73);
}
- zl10353_calc_nominal_rate(fe, op->bandwidth, &nominal_rate);
+ zl10353_calc_nominal_rate(fe, c->bandwidth_hz, &nominal_rate);
zl10353_single_write(fe, TRL_NOMINAL_RATE_1, msb(nominal_rate));
zl10353_single_write(fe, TRL_NOMINAL_RATE_0, lsb(nominal_rate));
- state->bandwidth = op->bandwidth;
+ state->bandwidth = c->bandwidth_hz;
zl10353_calc_input_freq(fe, &input_freq);
zl10353_single_write(fe, INPUT_FREQ_1, msb(input_freq));
zl10353_single_write(fe, INPUT_FREQ_0, lsb(input_freq));
/* Hint at TPS settings */
- switch (op->code_rate_HP) {
+ switch (c->code_rate_HP) {
case FEC_2_3:
tps |= (1 << 7);
break;
@@ -266,7 +254,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
return -EINVAL;
}
- switch (op->code_rate_LP) {
+ switch (c->code_rate_LP) {
case FEC_2_3:
tps |= (1 << 4);
break;
@@ -283,14 +271,14 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
case FEC_AUTO:
break;
case FEC_NONE:
- if (op->hierarchy_information == HIERARCHY_AUTO ||
- op->hierarchy_information == HIERARCHY_NONE)
+ if (c->hierarchy == HIERARCHY_AUTO ||
+ c->hierarchy == HIERARCHY_NONE)
break;
default:
return -EINVAL;
}
- switch (op->constellation) {
+ switch (c->modulation) {
case QPSK:
break;
case QAM_AUTO:
@@ -304,7 +292,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
return -EINVAL;
}
- switch (op->transmission_mode) {
+ switch (c->transmission_mode) {
case TRANSMISSION_MODE_2K:
case TRANSMISSION_MODE_AUTO:
break;
@@ -315,7 +303,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
return -EINVAL;
}
- switch (op->guard_interval) {
+ switch (c->guard_interval) {
case GUARD_INTERVAL_1_32:
case GUARD_INTERVAL_AUTO:
break;
@@ -332,7 +320,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
return -EINVAL;
}
- switch (op->hierarchy_information) {
+ switch (c->hierarchy) {
case HIERARCHY_AUTO:
case HIERARCHY_NONE:
break;
@@ -362,12 +350,12 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
*/
if (state->config.no_tuner) {
if (fe->ops.tuner_ops.set_params) {
- fe->ops.tuner_ops.set_params(fe, param);
+ fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
} else if (fe->ops.tuner_ops.calc_regs) {
- fe->ops.tuner_ops.calc_regs(fe, param, pllbuf + 1, 5);
+ fe->ops.tuner_ops.calc_regs(fe, pllbuf + 1, 5);
pllbuf[1] <<= 1;
zl10353_write(fe, pllbuf, sizeof(pllbuf));
}
@@ -383,11 +371,10 @@ static int zl10353_set_parameters(struct dvb_frontend *fe,
return 0;
}
-static int zl10353_get_parameters(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int zl10353_get_parameters(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct zl10353_state *state = fe->demodulator_priv;
- struct dvb_ofdm_parameters *op = &param->u.ofdm;
int s6, s9;
u16 tps;
static const u8 tps_fec_to_api[8] = {
@@ -411,66 +398,66 @@ static int zl10353_get_parameters(struct dvb_frontend *fe,
tps = zl10353_read_register(state, TPS_RECEIVED_1) << 8 |
zl10353_read_register(state, TPS_RECEIVED_0);
- op->code_rate_HP = tps_fec_to_api[(tps >> 7) & 7];
- op->code_rate_LP = tps_fec_to_api[(tps >> 4) & 7];
+ c->code_rate_HP = tps_fec_to_api[(tps >> 7) & 7];
+ c->code_rate_LP = tps_fec_to_api[(tps >> 4) & 7];
switch ((tps >> 13) & 3) {
case 0:
- op->constellation = QPSK;
+ c->modulation = QPSK;
break;
case 1:
- op->constellation = QAM_16;
+ c->modulation = QAM_16;
break;
case 2:
- op->constellation = QAM_64;
+ c->modulation = QAM_64;
break;
default:
- op->constellation = QAM_AUTO;
+ c->modulation = QAM_AUTO;
break;
}
- op->transmission_mode = (tps & 0x01) ? TRANSMISSION_MODE_8K :
+ c->transmission_mode = (tps & 0x01) ? TRANSMISSION_MODE_8K :
TRANSMISSION_MODE_2K;
switch ((tps >> 2) & 3) {
case 0:
- op->guard_interval = GUARD_INTERVAL_1_32;
+ c->guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
- op->guard_interval = GUARD_INTERVAL_1_16;
+ c->guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
- op->guard_interval = GUARD_INTERVAL_1_8;
+ c->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
- op->guard_interval = GUARD_INTERVAL_1_4;
+ c->guard_interval = GUARD_INTERVAL_1_4;
break;
default:
- op->guard_interval = GUARD_INTERVAL_AUTO;
+ c->guard_interval = GUARD_INTERVAL_AUTO;
break;
}
switch ((tps >> 10) & 7) {
case 0:
- op->hierarchy_information = HIERARCHY_NONE;
+ c->hierarchy = HIERARCHY_NONE;
break;
case 1:
- op->hierarchy_information = HIERARCHY_1;
+ c->hierarchy = HIERARCHY_1;
break;
case 2:
- op->hierarchy_information = HIERARCHY_2;
+ c->hierarchy = HIERARCHY_2;
break;
case 3:
- op->hierarchy_information = HIERARCHY_4;
+ c->hierarchy = HIERARCHY_4;
break;
default:
- op->hierarchy_information = HIERARCHY_AUTO;
+ c->hierarchy = HIERARCHY_AUTO;
break;
}
- param->frequency = state->frequency;
- op->bandwidth = state->bandwidth;
- param->inversion = INVERSION_AUTO;
+ c->frequency = state->frequency;
+ c->bandwidth_hz = state->bandwidth;
+ c->inversion = INVERSION_AUTO;
return 0;
}
@@ -651,10 +638,9 @@ error:
}
static struct dvb_frontend_ops zl10353_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "Zarlink ZL10353 DVB-T",
- .type = FE_OFDM,
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 166667,
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.c b/drivers/media/dvb/mantis/mantis_vp1033.c
index 2ae0afa7756b..ad013e93ed11 100644
--- a/drivers/media/dvb/mantis/mantis_vp1033.c
+++ b/drivers/media/dvb/mantis/mantis_vp1033.c
@@ -83,9 +83,9 @@ u8 lgtdqcs001f_inittab[] = {
#define MANTIS_MODEL_NAME "VP-1033"
#define MANTIS_DEV_TYPE "DVB-S/DSS"
-int lgtdqcs001f_tuner_set(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+int lgtdqcs001f_tuner_set(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
struct i2c_adapter *adapter = &mantis->adapter;
@@ -95,14 +95,14 @@ int lgtdqcs001f_tuner_set(struct dvb_frontend *fe,
struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf)};
- div = params->frequency / 250;
+ div = p->frequency / 250;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x83;
buf[3] = 0xc0;
- if (params->frequency < 1531000)
+ if (p->frequency < 1531000)
buf[3] |= 0x04;
else
buf[3] &= ~0x04;
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.c b/drivers/media/dvb/mantis/mantis_vp2033.c
index 06da0ddf05a7..1ca6837fbe46 100644
--- a/drivers/media/dvb/mantis/mantis_vp2033.c
+++ b/drivers/media/dvb/mantis/mantis_vp2033.c
@@ -65,8 +65,9 @@ static u8 read_pwm(struct mantis_pci *mantis)
return pwm;
}
-static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
struct i2c_adapter *adapter = &mantis->adapter;
@@ -77,13 +78,13 @@ static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_fronten
#define CU1216_IF 36125000
#define TUNER_MUL 62500
- u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+ u32 div = (p->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0xce;
- buf[3] = (params->frequency < 150000000 ? 0x01 :
- params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[3] = (p->frequency < 150000000 ? 0x01 :
+ p->frequency < 445000000 ? 0x02 : 0x04);
buf[4] = 0xde;
buf[5] = 0x20;
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.c b/drivers/media/dvb/mantis/mantis_vp2040.c
index f72b137b7652..d480741afd78 100644
--- a/drivers/media/dvb/mantis/mantis_vp2040.c
+++ b/drivers/media/dvb/mantis/mantis_vp2040.c
@@ -47,8 +47,9 @@ struct tda10023_config vp2040_tda10023_cu1216_config = {
.invert = 1,
};
-static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mantis_pci *mantis = fe->dvb->priv;
struct i2c_adapter *adapter = &mantis->adapter;
@@ -59,13 +60,13 @@ static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_fronten
#define CU1216_IF 36125000
#define TUNER_MUL 62500
- u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+ u32 div = (p->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0xce;
- buf[3] = (params->frequency < 150000000 ? 0x01 :
- params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[3] = (p->frequency < 150000000 ? 0x01 :
+ p->frequency < 445000000 ? 0x02 : 0x04);
buf[4] = 0xde;
buf[5] = 0x20;
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index 056419228363..8418c02bcefe 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -218,7 +218,7 @@ static int demod_attach_drxk(struct ngene_channel *chan,
memset(&config, 0, sizeof(config));
config.adr = 0x29 + (chan->number ^ 2);
- chan->fe = dvb_attach(drxk_attach, &config, i2c, &chan->fe2);
+ chan->fe = dvb_attach(drxk_attach, &config, i2c);
if (!chan->fe) {
printk(KERN_ERR "No DRXK found!\n");
return -ENODEV;
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 80fb51004461..e1f20c236989 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -445,9 +445,9 @@ static inline u32 divide(u32 numerator, u32 denominator)
}
/* LG Innotek TDTE-E001P (Infineon TUA6034) */
-static int lg_tdtpe001p_tuner_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int lg_tdtpe001p_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct pluto *pluto = frontend_to_pluto(fe);
struct i2c_msg msg;
int ret;
@@ -478,7 +478,7 @@ static int lg_tdtpe001p_tuner_set_params(struct dvb_frontend *fe,
else
buf[3] = 0x04;
- if (p->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
+ if (p->bandwidth_hz == 8000000)
buf[3] |= 0x08;
if (sizeof(buf) == 6) {
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.c b/drivers/media/dvb/pt1/va1j5jf8007s.c
index 451641c0c1d2..d980dfb21e5e 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.c
@@ -385,7 +385,7 @@ va1j5jf8007s_check_ts_id(struct va1j5jf8007s_state *state, int *lock)
static int
va1j5jf8007s_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
+ bool re_tune,
unsigned int mode_flags, unsigned int *delay,
fe_status_t *status)
{
@@ -395,7 +395,7 @@ va1j5jf8007s_tune(struct dvb_frontend *fe,
state = fe->demodulator_priv;
- if (params != NULL)
+ if (re_tune)
state->tune_state = VA1J5JF8007S_SET_FREQUENCY_1;
switch (state->tune_state) {
@@ -579,9 +579,9 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops va1j5jf8007s_ops = {
+ .delsys = { SYS_ISDBS },
.info = {
.name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1000,
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.c b/drivers/media/dvb/pt1/va1j5jf8007t.c
index 0f085c3e571b..2db15159d514 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.c
@@ -264,7 +264,7 @@ static int va1j5jf8007t_check_modulation(struct va1j5jf8007t_state *state,
static int
va1j5jf8007t_tune(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params,
+ bool re_tune,
unsigned int mode_flags, unsigned int *delay,
fe_status_t *status)
{
@@ -274,7 +274,7 @@ va1j5jf8007t_tune(struct dvb_frontend *fe,
state = fe->demodulator_priv;
- if (params != NULL)
+ if (re_tune)
state->tune_state = VA1J5JF8007T_SET_FREQUENCY;
switch (state->tune_state) {
@@ -428,9 +428,9 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
}
static struct dvb_frontend_ops va1j5jf8007t_ops = {
+ .delsys = { SYS_ISDBT },
.info = {
.name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
- .type = FE_OFDM,
.frequency_min = 90000000,
.frequency_max = 770000000,
.frequency_stepsize = 142857,
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 37c594f82782..654685c9303e 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -50,7 +50,7 @@ struct smsdvb_client_t {
struct completion tune_done;
/* todo: save freq/band instead whole struct */
- struct dvb_frontend_parameters fe_params;
+ struct dtv_frontend_properties fe_params;
struct SMSHOSTLIB_STATISTICS_DVB_S sms_stat_dvb;
int event_fe_state;
@@ -591,8 +591,7 @@ static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static int smsdvb_dvbt_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int smsdvb_dvbt_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct smsdvb_client_t *client =
@@ -658,8 +657,7 @@ static int smsdvb_dvbt_set_frontend(struct dvb_frontend *fe,
&client->tune_done);
}
-static int smsdvb_isdbt_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int smsdvb_isdbt_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct smsdvb_client_t *client =
@@ -723,8 +721,7 @@ static int smsdvb_isdbt_set_frontend(struct dvb_frontend *fe,
&client->tune_done);
}
-static int smsdvb_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int smsdvb_set_frontend(struct dvb_frontend *fe)
{
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
@@ -733,18 +730,18 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
switch (smscore_get_device_mode(coredev)) {
case DEVICE_MODE_DVBT:
case DEVICE_MODE_DVBT_BDA:
- return smsdvb_dvbt_set_frontend(fe, fep);
+ return smsdvb_dvbt_set_frontend(fe);
case DEVICE_MODE_ISDBT:
case DEVICE_MODE_ISDBT_BDA:
- return smsdvb_isdbt_set_frontend(fe, fep);
+ return smsdvb_isdbt_set_frontend(fe);
default:
return -EINVAL;
}
}
-static int smsdvb_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int smsdvb_get_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
@@ -752,7 +749,7 @@ static int smsdvb_get_frontend(struct dvb_frontend *fe,
/* todo: */
memcpy(fep, &client->fe_params,
- sizeof(struct dvb_frontend_parameters));
+ sizeof(struct dtv_frontend_properties));
return 0;
}
@@ -789,7 +786,6 @@ static void smsdvb_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops smsdvb_fe_ops = {
.info = {
.name = "Siano Mobile Digital MDTV Receiver",
- .type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 250000,
@@ -873,6 +869,17 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
memcpy(&client->frontend.ops, &smsdvb_fe_ops,
sizeof(struct dvb_frontend_ops));
+ switch (smscore_get_device_mode(coredev)) {
+ case DEVICE_MODE_DVBT:
+ case DEVICE_MODE_DVBT_BDA:
+ smsdvb_fe_ops.delsys[0] = SYS_DVBT;
+ break;
+ case DEVICE_MODE_ISDBT:
+ case DEVICE_MODE_ISDBT_BDA:
+ smsdvb_fe_ops.delsys[0] = SYS_ISDBT;
+ break;
+ }
+
rc = dvb_register_frontend(&client->adapter, &client->frontend);
if (rc < 0) {
sms_err("frontend registration failed %d", rc);
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 3d20719fce1a..6ecbcf614878 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -991,7 +991,7 @@ static int av7110_start_feed(struct dvb_demux_feed *feed)
if (feed->type == DMX_TYPE_TS) {
if ((feed->ts_type & TS_DECODER) &&
- (feed->pes_type < DMX_TS_PES_OTHER)) {
+ (feed->pes_type <= DMX_TS_PES_PCR)) {
switch (demux->dmx.frontend->source) {
case DMX_MEMORY_FE:
if (feed->ts_type & TS_DECODER)
@@ -1568,20 +1568,27 @@ static int get_firmware(struct av7110* av7110)
return ret;
}
-static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
- u32 div = (params->frequency + 479500) / 125;
+ u32 div = (p->frequency + 479500) / 125;
- if (params->frequency > 2000000) pwr = 3;
- else if (params->frequency > 1800000) pwr = 2;
- else if (params->frequency > 1600000) pwr = 1;
- else if (params->frequency > 1200000) pwr = 0;
- else if (params->frequency >= 1100000) pwr = 1;
- else pwr = 2;
+ if (p->frequency > 2000000)
+ pwr = 3;
+ else if (p->frequency > 1800000)
+ pwr = 2;
+ else if (p->frequency > 1600000)
+ pwr = 1;
+ else if (p->frequency > 1200000)
+ pwr = 0;
+ else if (p->frequency >= 1100000)
+ pwr = 1;
+ else
+ pwr = 2;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
@@ -1604,19 +1611,20 @@ static struct ves1x93_config alps_bsrv2_config = {
.invert_pwm = 0,
};
-static int alps_tdbe2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
- div = (params->frequency + 35937500 + 31250) / 62500;
+ div = (p->frequency + 35937500 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85 | ((div >> 10) & 0x60);
- data[3] = (params->frequency < 174000000 ? 0x88 : params->frequency < 470000000 ? 0x84 : 0x81);
+ data[3] = (p->frequency < 174000000 ? 0x88 : p->frequency < 470000000 ? 0x84 : 0x81);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
@@ -1635,14 +1643,15 @@ static struct ves1820_config alps_tdbe2_config = {
-static int grundig_29504_451_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
- div = params->frequency / 125;
+ div = p->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
@@ -1661,11 +1670,12 @@ static struct tda8083_config grundig_29504_451_config = {
-static int philips_cd1516_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int philips_cd1516_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
- u32 f = params->frequency;
+ u32 f = p->frequency;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
@@ -1692,16 +1702,17 @@ static struct ves1820_config philips_cd1516_config = {
-static int alps_tdlb7_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int alps_tdlb7_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div, pwr;
u8 data[4];
struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = data, .len = sizeof(data) };
- div = (params->frequency + 36200000) / 166666;
+ div = (p->frequency + 36200000) / 166666;
- if (params->frequency <= 782000000)
+ if (p->frequency <= 782000000)
pwr = 1;
else
pwr = 2;
@@ -1829,8 +1840,9 @@ static u8 nexusca_stv0297_inittab[] = {
0xff, 0xff,
};
-static int nexusca_stv0297_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int nexusca_stv0297_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 data[4];
@@ -1838,19 +1850,19 @@ static int nexusca_stv0297_tuner_set_params(struct dvb_frontend* fe, struct dvb_
struct i2c_msg readmsg = { .addr = 0x63, .flags = I2C_M_RD, .buf = data, .len = 1 };
int i;
- div = (params->frequency + 36150000 + 31250) / 62500;
+ div = (p->frequency + 36150000 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xce;
- if (params->frequency < 45000000)
+ if (p->frequency < 45000000)
return -EINVAL;
- else if (params->frequency < 137000000)
+ else if (p->frequency < 137000000)
data[3] = 0x01;
- else if (params->frequency < 403000000)
+ else if (p->frequency < 403000000)
data[3] = 0x02;
- else if (params->frequency < 860000000)
+ else if (p->frequency < 860000000)
data[3] = 0x04;
else
return -EINVAL;
@@ -1884,27 +1896,36 @@ static struct stv0297_config nexusca_stv0297_config = {
-static int grundig_29504_401_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params)
+static int grundig_29504_401_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 cfg, cpump, band_select;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
- div = (36125000 + params->frequency) / 166666;
+ div = (36125000 + p->frequency) / 166666;
cfg = 0x88;
- if (params->frequency < 175000000) cpump = 2;
- else if (params->frequency < 390000000) cpump = 1;
- else if (params->frequency < 470000000) cpump = 2;
- else if (params->frequency < 750000000) cpump = 1;
- else cpump = 3;
+ if (p->frequency < 175000000)
+ cpump = 2;
+ else if (p->frequency < 390000000)
+ cpump = 1;
+ else if (p->frequency < 470000000)
+ cpump = 2;
+ else if (p->frequency < 750000000)
+ cpump = 1;
+ else
+ cpump = 3;
- if (params->frequency < 175000000) band_select = 0x0e;
- else if (params->frequency < 470000000) band_select = 0x05;
- else band_select = 0x03;
+ if (p->frequency < 175000000)
+ band_select = 0x0e;
+ else if (p->frequency < 470000000)
+ band_select = 0x05;
+ else
+ band_select = 0x03;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
@@ -1964,15 +1985,14 @@ static int av7110_fe_lock_fix(struct av7110* av7110, fe_status_t status)
return ret;
}
-static int av7110_fe_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int av7110_fe_set_frontend(struct dvb_frontend *fe)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
- if (!ret) {
- av7110->saved_fe_params = *params;
- ret = av7110->fe_set_frontend(fe, params);
- }
+ if (!ret)
+ ret = av7110->fe_set_frontend(fe);
+
return ret;
}
@@ -2081,7 +2101,7 @@ static void dvb_s_recover(struct av7110* av7110)
msleep(20);
av7110_fe_set_tone(av7110->fe, av7110->saved_tone);
- av7110_fe_set_frontend(av7110->fe, &av7110->saved_fe_params);
+ av7110_fe_set_frontend(av7110->fe);
}
static u8 read_pwm(struct av7110* av7110)
diff --git a/drivers/media/dvb/ttpci/av7110.h b/drivers/media/dvb/ttpci/av7110.h
index d85b8512ac30..88b3b2d6cc0e 100644
--- a/drivers/media/dvb/ttpci/av7110.h
+++ b/drivers/media/dvb/ttpci/av7110.h
@@ -272,7 +272,6 @@ struct av7110 {
/* crash recovery */
void (*recover)(struct av7110* av7110);
- struct dvb_frontend_parameters saved_fe_params;
fe_sec_voltage_t saved_voltage;
fe_sec_tone_mode_t saved_tone;
struct dvb_diseqc_master_cmd saved_master_cmd;
@@ -286,7 +285,7 @@ struct av7110 {
int (*fe_set_tone)(struct dvb_frontend* fe, fe_sec_tone_mode_t tone);
int (*fe_set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
int (*fe_dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd);
- int (*fe_set_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
+ int (*fe_set_frontend)(struct dvb_frontend *fe);
};
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 78d32f7e49fc..8b32e282bf5d 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -502,33 +502,33 @@ static int philips_su1278_ty_ci_set_symbol_rate(struct dvb_frontend *fe, u32 sra
return 0;
}
-static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 div;
u8 buf[4];
struct budget *budget = (struct budget *) fe->dvb->priv;
struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
- if ((params->frequency < 950000) || (params->frequency > 2150000))
+ if ((c->frequency < 950000) || (c->frequency > 2150000))
return -EINVAL;
- div = (params->frequency + (125 - 1)) / 125; // round correctly
+ div = (c->frequency + (125 - 1)) / 125; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
buf[3] = 0x20;
- if (params->u.qpsk.symbol_rate < 4000000)
+ if (c->symbol_rate < 4000000)
buf[3] |= 1;
- if (params->frequency < 1250000)
+ if (c->frequency < 1250000)
buf[3] |= 0;
- else if (params->frequency < 1550000)
+ else if (c->frequency < 1550000)
buf[3] |= 0x40;
- else if (params->frequency < 2050000)
+ else if (c->frequency < 2050000)
buf[3] |= 0x80;
- else if (params->frequency < 2150000)
+ else if (c->frequency < 2150000)
buf[3] |= 0xC0;
if (fe->ops.i2c_gate_ctrl)
@@ -617,8 +617,9 @@ static struct stv0299_config cinergy_1200s_1894_0010_config = {
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
-static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = (struct budget *) fe->dvb->priv;
u8 buf[6];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
@@ -627,13 +628,13 @@ static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
#define CU1216_IF 36125000
#define TUNER_MUL 62500
- u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+ u32 div = (c->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0xce;
- buf[3] = (params->frequency < 150000000 ? 0x01 :
- params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[3] = (c->frequency < 150000000 ? 0x01 :
+ c->frequency < 445000000 ? 0x02 : 0x04);
buf[4] = 0xde;
buf[5] = 0x20;
@@ -697,8 +698,9 @@ static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
return 0;
}
-static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = (struct budget *) fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf,.len =
@@ -707,7 +709,7 @@ static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
u8 band, cp, filter;
// determine charge pump
- tuner_frequency = params->frequency + 36166000;
+ tuner_frequency = c->frequency + 36166000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
@@ -732,28 +734,28 @@ static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
return -EINVAL;
// determine band
- if (params->frequency < 49000000)
+ if (c->frequency < 49000000)
return -EINVAL;
- else if (params->frequency < 161000000)
+ else if (c->frequency < 161000000)
band = 1;
- else if (params->frequency < 444000000)
+ else if (c->frequency < 444000000)
band = 2;
- else if (params->frequency < 861000000)
+ else if (c->frequency < 861000000)
band = 4;
else
return -EINVAL;
// setup PLL filter
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (c->bandwidth_hz) {
+ case 6000000:
filter = 0;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
filter = 0;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
filter = 1;
break;
@@ -763,7 +765,7 @@ static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
// calculate divisor
// ((36166000+((1000000/6)/2)) + Finput)/(1000000/6)
- tuner_frequency = (((params->frequency / 1000) * 6) + 217496) / 1000;
+ tuner_frequency = (((c->frequency / 1000) * 6) + 217496) / 1000;
// setup tuner buffer
tuner_buf[0] = (tuner_frequency >> 8) & 0x7f;
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index ca02e9722172..98e524178765 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -193,7 +193,6 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
dev->input_phys = budget_ci->ir.phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.version = 1;
- dev->scanmask = 0xff;
if (saa->pci->subsystem_vendor) {
dev->input_id.vendor = saa->pci->subsystem_vendor;
dev->input_id.product = saa->pci->subsystem_device;
@@ -234,6 +233,8 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
dev->map_name = RC_MAP_BUDGET_CI_OLD;
break;
}
+ if (!budget_ci->ir.full_rc5)
+ dev->scanmask = 0xff;
error = rc_register_device(dev);
if (error) {
@@ -659,33 +660,33 @@ static int philips_su1278_tt_set_symbol_rate(struct dvb_frontend *fe, u32 srate,
return 0;
}
-static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
u32 div;
u8 buf[4];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
- if ((params->frequency < 950000) || (params->frequency > 2150000))
+ if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
- div = (params->frequency + (500 - 1)) / 500; // round correctly
+ div = (p->frequency + (500 - 1)) / 500; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 2;
buf[3] = 0x20;
- if (params->u.qpsk.symbol_rate < 4000000)
+ if (p->symbol_rate < 4000000)
buf[3] |= 1;
- if (params->frequency < 1250000)
+ if (p->frequency < 1250000)
buf[3] |= 0;
- else if (params->frequency < 1550000)
+ else if (p->frequency < 1550000)
buf[3] |= 0x40;
- else if (params->frequency < 2050000)
+ else if (p->frequency < 2050000)
buf[3] |= 0x80;
- else if (params->frequency < 2150000)
+ else if (p->frequency < 2150000)
buf[3] |= 0xC0;
if (fe->ops.i2c_gate_ctrl)
@@ -740,8 +741,9 @@ static int philips_tdm1316l_tuner_init(struct dvb_frontend *fe)
return 0;
}
-static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = tuner_buf,.len = sizeof(tuner_buf) };
@@ -749,7 +751,7 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struct dvb
u8 band, cp, filter;
// determine charge pump
- tuner_frequency = params->frequency + 36130000;
+ tuner_frequency = p->frequency + 36130000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
@@ -774,30 +776,30 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struct dvb
return -EINVAL;
// determine band
- if (params->frequency < 49000000)
+ if (p->frequency < 49000000)
return -EINVAL;
- else if (params->frequency < 159000000)
+ else if (p->frequency < 159000000)
band = 1;
- else if (params->frequency < 444000000)
+ else if (p->frequency < 444000000)
band = 2;
- else if (params->frequency < 861000000)
+ else if (p->frequency < 861000000)
band = 4;
else
return -EINVAL;
// setup PLL filter and TDA9889
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (p->bandwidth_hz) {
+ case 6000000:
tda1004x_writereg(fe, 0x0C, 0x14);
filter = 0;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
tda1004x_writereg(fe, 0x0C, 0x80);
filter = 0;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
tda1004x_writereg(fe, 0x0C, 0x14);
filter = 1;
break;
@@ -808,7 +810,7 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struct dvb
// calculate divisor
// ((36130000+((1000000/6)/2)) + Finput)/(1000000/6)
- tuner_frequency = (((params->frequency / 1000) * 6) + 217280) / 1000;
+ tuner_frequency = (((p->frequency / 1000) * 6) + 217280) / 1000;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
@@ -855,8 +857,9 @@ static struct tda1004x_config philips_tdm1316l_config_invert = {
.request_firmware = philips_tdm1316l_request_firmware,
};
-static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
u8 tuner_buf[5];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,
@@ -867,7 +870,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struc
u8 band, cp, filter;
// determine charge pump
- tuner_frequency = params->frequency + 36125000;
+ tuner_frequency = p->frequency + 36125000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000) {
@@ -904,7 +907,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struc
filter = 1;
// calculate divisor
- tuner_frequency = (params->frequency + 36125000 + (62500/2)) / 62500;
+ tuner_frequency = (p->frequency + 36125000 + (62500/2)) / 62500;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index 3395d1a90516..2cb35c23d2ac 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -261,19 +261,25 @@ static int budget_patch_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_c
return 0;
}
-static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
- u32 div = (params->frequency + 479500) / 125;
-
- if (params->frequency > 2000000) pwr = 3;
- else if (params->frequency > 1800000) pwr = 2;
- else if (params->frequency > 1600000) pwr = 1;
- else if (params->frequency > 1200000) pwr = 0;
- else if (params->frequency >= 1100000) pwr = 1;
+ u32 div = (p->frequency + 479500) / 125;
+
+ if (p->frequency > 2000000)
+ pwr = 3;
+ else if (p->frequency > 1800000)
+ pwr = 2;
+ else if (p->frequency > 1600000)
+ pwr = 1;
+ else if (p->frequency > 1200000)
+ pwr = 0;
+ else if (p->frequency >= 1100000)
+ pwr = 1;
else pwr = 2;
buf[0] = (div >> 8) & 0x7f;
@@ -297,14 +303,15 @@ static struct ves1x93_config alps_bsrv2_config = {
.invert_pwm = 0,
};
-static int grundig_29504_451_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
- div = params->frequency / 125;
+ div = p->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index d238fb9371a7..b21bcce66708 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -200,19 +200,25 @@ static int budget_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t m
return 0;
}
-static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget* budget = (struct budget*) fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
- u32 div = (params->frequency + 479500) / 125;
-
- if (params->frequency > 2000000) pwr = 3;
- else if (params->frequency > 1800000) pwr = 2;
- else if (params->frequency > 1600000) pwr = 1;
- else if (params->frequency > 1200000) pwr = 0;
- else if (params->frequency >= 1100000) pwr = 1;
+ u32 div = (c->frequency + 479500) / 125;
+
+ if (c->frequency > 2000000)
+ pwr = 3;
+ else if (c->frequency > 1800000)
+ pwr = 2;
+ else if (c->frequency > 1600000)
+ pwr = 1;
+ else if (c->frequency > 1200000)
+ pwr = 0;
+ else if (c->frequency >= 1100000)
+ pwr = 1;
else pwr = 2;
buf[0] = (div >> 8) & 0x7f;
@@ -236,19 +242,20 @@ static struct ves1x93_config alps_bsrv2_config =
.invert_pwm = 0,
};
-static int alps_tdbe2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget* budget = (struct budget*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
- div = (params->frequency + 35937500 + 31250) / 62500;
+ div = (c->frequency + 35937500 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85 | ((div >> 10) & 0x60);
- data[3] = (params->frequency < 174000000 ? 0x88 : params->frequency < 470000000 ? 0x84 : 0x81);
+ data[3] = (c->frequency < 174000000 ? 0x88 : c->frequency < 470000000 ? 0x84 : 0x81);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
@@ -263,8 +270,9 @@ static struct ves1820_config alps_tdbe2_config = {
.selagc = VES1820_SELAGC_SIGNAMPERR,
};
-static int grundig_29504_401_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int grundig_29504_401_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget *budget = fe->dvb->priv;
u8 *tuner_addr = fe->tuner_priv;
u32 div;
@@ -277,19 +285,27 @@ static int grundig_29504_401_tuner_set_params(struct dvb_frontend* fe, struct dv
else
msg.addr = 0x61;
- div = (36125000 + params->frequency) / 166666;
+ div = (36125000 + c->frequency) / 166666;
cfg = 0x88;
- if (params->frequency < 175000000) cpump = 2;
- else if (params->frequency < 390000000) cpump = 1;
- else if (params->frequency < 470000000) cpump = 2;
- else if (params->frequency < 750000000) cpump = 1;
- else cpump = 3;
+ if (c->frequency < 175000000)
+ cpump = 2;
+ else if (c->frequency < 390000000)
+ cpump = 1;
+ else if (c->frequency < 470000000)
+ cpump = 2;
+ else if (c->frequency < 750000000)
+ cpump = 1;
+ else
+ cpump = 3;
- if (params->frequency < 175000000) band_select = 0x0e;
- else if (params->frequency < 470000000) band_select = 0x05;
- else band_select = 0x03;
+ if (c->frequency < 175000000)
+ band_select = 0x0e;
+ else if (c->frequency < 470000000)
+ band_select = 0x05;
+ else
+ band_select = 0x03;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
@@ -312,14 +328,15 @@ static struct l64781_config grundig_29504_401_config_activy = {
static u8 tuner_address_grundig_29504_401_activy = 0x60;
-static int grundig_29504_451_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget* budget = (struct budget*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
- div = params->frequency / 125;
+ div = c->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
@@ -335,14 +352,15 @@ static struct tda8083_config grundig_29504_451_config = {
.demod_address = 0x68,
};
-static int s5h1420_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int s5h1420_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct budget* budget = (struct budget*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
- div = params->frequency / 1000;
+ div = c->frequency / 1000;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xc2;
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index e90192fdde11..5b682cc4c814 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1017,19 +1017,20 @@ static u32 functionality(struct i2c_adapter *adapter)
-static int alps_tdmb7_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int alps_tdmb7_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusb* ttusb = (struct ttusb*) fe->dvb->priv;
u8 data[4];
struct i2c_msg msg = {.addr=0x61, .flags=0, .buf=data, .len=sizeof(data) };
u32 div;
- div = (params->frequency + 36166667) / 166667;
+ div = (p->frequency + 36166667) / 166667;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = ((div >> 10) & 0x60) | 0x85;
- data[3] = params->frequency < 592000000 ? 0x40 : 0x80;
+ data[3] = p->frequency < 592000000 ? 0x40 : 0x80;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
@@ -1071,8 +1072,9 @@ static int philips_tdm1316l_tuner_init(struct dvb_frontend* fe)
return 0;
}
-static int philips_tdm1316l_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusb* ttusb = (struct ttusb*) fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr=0x60, .flags=0, .buf=tuner_buf, .len=sizeof(tuner_buf) };
@@ -1080,7 +1082,7 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend* fe, struct dvb
u8 band, cp, filter;
// determine charge pump
- tuner_frequency = params->frequency + 36130000;
+ tuner_frequency = p->frequency + 36130000;
if (tuner_frequency < 87000000) return -EINVAL;
else if (tuner_frequency < 130000000) cp = 3;
else if (tuner_frequency < 160000000) cp = 5;
@@ -1094,25 +1096,29 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend* fe, struct dvb
else return -EINVAL;
// determine band
- if (params->frequency < 49000000) return -EINVAL;
- else if (params->frequency < 159000000) band = 1;
- else if (params->frequency < 444000000) band = 2;
- else if (params->frequency < 861000000) band = 4;
+ if (p->frequency < 49000000)
+ return -EINVAL;
+ else if (p->frequency < 159000000)
+ band = 1;
+ else if (p->frequency < 444000000)
+ band = 2;
+ else if (p->frequency < 861000000)
+ band = 4;
else return -EINVAL;
// setup PLL filter
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (p->bandwidth_hz) {
+ case 6000000:
tda1004x_writereg(fe, 0x0C, 0);
filter = 0;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
tda1004x_writereg(fe, 0x0C, 0);
filter = 0;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
tda1004x_writereg(fe, 0x0C, 0xFF);
filter = 1;
break;
@@ -1123,7 +1129,7 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend* fe, struct dvb
// calculate divisor
// ((36130000+((1000000/6)/2)) + Finput)/(1000000/6)
- tuner_frequency = (((params->frequency / 1000) * 6) + 217280) / 1000;
+ tuner_frequency = (((p->frequency / 1000) * 6) + 217280) / 1000;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
@@ -1273,23 +1279,24 @@ static int alps_stv0299_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32
return 0;
}
-static int philips_tsa5059_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int philips_tsa5059_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusb* ttusb = (struct ttusb*) fe->dvb->priv;
u8 buf[4];
u32 div;
struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
- if ((params->frequency < 950000) || (params->frequency > 2150000))
+ if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
- div = (params->frequency + (125 - 1)) / 125; // round correctly
+ div = (p->frequency + (125 - 1)) / 125; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4;
buf[3] = 0xC4;
- if (params->frequency > 1530000)
+ if (p->frequency > 1530000)
buf[3] = 0xC0;
/* BSBE1 wants XCE bit set */
@@ -1316,14 +1323,15 @@ static struct stv0299_config alps_stv0299_config = {
.set_symbol_rate = alps_stv0299_set_symbol_rate,
};
-static int ttusb_novas_grundig_29504_491_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int ttusb_novas_grundig_29504_491_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusb* ttusb = (struct ttusb*) fe->dvb->priv;
u8 buf[4];
u32 div;
struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
- div = params->frequency / 125;
+ div = p->frequency / 125;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
@@ -1343,19 +1351,20 @@ static struct tda8083_config ttusb_novas_grundig_29504_491_config = {
.demod_address = 0x68,
};
-static int alps_tdbe2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params)
+static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusb* ttusb = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
- div = (params->frequency + 35937500 + 31250) / 62500;
+ div = (p->frequency + 35937500 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85 | ((div >> 10) & 0x60);
- data[3] = (params->frequency < 174000000 ? 0x88 : params->frequency < 470000000 ? 0x84 : 0x81);
+ data[3] = (p->frequency < 174000000 ? 0x88 : p->frequency < 470000000 ? 0x84 : 0x81);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
@@ -1387,8 +1396,9 @@ static u8 read_pwm(struct ttusb* ttusb)
}
-static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusb *ttusb = (struct ttusb *) fe->dvb->priv;
u8 tuner_buf[5];
struct i2c_msg tuner_msg = {.addr = 0x60,
@@ -1399,7 +1409,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struc
u8 band, cp, filter;
// determine charge pump
- tuner_frequency = params->frequency;
+ tuner_frequency = p->frequency;
if (tuner_frequency < 87000000) {return -EINVAL;}
else if (tuner_frequency < 130000000) {cp = 3; band = 1;}
else if (tuner_frequency < 160000000) {cp = 5; band = 1;}
@@ -1417,7 +1427,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe, struc
// calculate divisor
// (Finput + Fif)/Fref; Fif = 36125000 Hz, Fref = 62500 Hz
- tuner_frequency = ((params->frequency + 36125000) / 62500);
+ tuner_frequency = ((p->frequency + 36125000) / 62500);
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
@@ -1694,10 +1704,8 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
ttusb->i2c_adap.dev.parent = &udev->dev;
result = i2c_add_adapter(&ttusb->i2c_adap);
- if (result) {
- dvb_unregister_adapter (&ttusb->adapter);
- return result;
- }
+ if (result)
+ goto err_unregister_adapter;
memset(&ttusb->dvb_demux, 0, sizeof(ttusb->dvb_demux));
@@ -1714,33 +1722,29 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
ttusb->dvb_demux.stop_feed = ttusb_stop_feed;
ttusb->dvb_demux.write_to_decoder = NULL;
- if ((result = dvb_dmx_init(&ttusb->dvb_demux)) < 0) {
+ result = dvb_dmx_init(&ttusb->dvb_demux);
+ if (result < 0) {
printk("ttusb_dvb: dvb_dmx_init failed (errno = %d)\n", result);
- i2c_del_adapter(&ttusb->i2c_adap);
- dvb_unregister_adapter (&ttusb->adapter);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_i2c_del_adapter;
}
//FIXME dmxdev (nur WAS?)
ttusb->dmxdev.filternum = ttusb->dvb_demux.filternum;
ttusb->dmxdev.demux = &ttusb->dvb_demux.dmx;
ttusb->dmxdev.capabilities = 0;
- if ((result = dvb_dmxdev_init(&ttusb->dmxdev, &ttusb->adapter)) < 0) {
+ result = dvb_dmxdev_init(&ttusb->dmxdev, &ttusb->adapter);
+ if (result < 0) {
printk("ttusb_dvb: dvb_dmxdev_init failed (errno = %d)\n",
result);
- dvb_dmx_release(&ttusb->dvb_demux);
- i2c_del_adapter(&ttusb->i2c_adap);
- dvb_unregister_adapter (&ttusb->adapter);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_release_dmx;
}
if (dvb_net_init(&ttusb->adapter, &ttusb->dvbnet, &ttusb->dvb_demux.dmx)) {
printk("ttusb_dvb: dvb_net_init failed!\n");
- dvb_dmxdev_release(&ttusb->dmxdev);
- dvb_dmx_release(&ttusb->dvb_demux);
- i2c_del_adapter(&ttusb->i2c_adap);
- dvb_unregister_adapter (&ttusb->adapter);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_release_dmxdev;
}
usb_set_intfdata(intf, (void *) ttusb);
@@ -1748,6 +1752,16 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
frontend_init(ttusb);
return 0;
+
+err_release_dmxdev:
+ dvb_dmxdev_release(&ttusb->dmxdev);
+err_release_dmx:
+ dvb_dmx_release(&ttusb->dvb_demux);
+err_i2c_del_adapter:
+ i2c_del_adapter(&ttusb->i2c_adap);
+err_unregister_adapter:
+ dvb_unregister_adapter (&ttusb->adapter);
+ return result;
}
static void ttusb_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
index 21260aad1e54..5c45c9d0712d 100644
--- a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
@@ -87,8 +87,9 @@ static int ttusbdecfe_dvbt_read_status(struct dvb_frontend *fe,
return 0;
}
-static int ttusbdecfe_dvbt_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int ttusbdecfe_dvbt_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
@@ -113,8 +114,9 @@ static int ttusbdecfe_dvbt_get_tune_settings(struct dvb_frontend* fe,
return 0;
}
-static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x01,
@@ -135,7 +137,7 @@ static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend* fe, struct dvb_fron
freq = htonl(p->frequency +
(state->hi_band ? LOF_HI : LOF_LO));
memcpy(&b[4], &freq, sizeof(u32));
- sym_rate = htonl(p->u.qam.symbol_rate);
+ sym_rate = htonl(p->symbol_rate);
memcpy(&b[12], &sym_rate, sizeof(u32));
band = htonl(state->hi_band ? LOF_HI : LOF_LO);
memcpy(&b[24], &band, sizeof(u32));
@@ -241,10 +243,9 @@ struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* conf
}
static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
-
+ .delsys = { SYS_DVBT },
.info = {
.name = "TechnoTrend/Hauppauge DEC2000-t Frontend",
- .type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
@@ -265,10 +266,9 @@ static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
};
static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
-
+ .delsys = { SYS_DVBS },
.info = {
.name = "TechnoTrend/Hauppauge DEC3000-s Frontend",
- .type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125,
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 6edc9ba81203..6f9eb94e85b3 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -108,8 +108,7 @@ static long media_device_enum_entities(struct media_device *mdev,
u_ent.group_id = ent->group_id;
u_ent.pads = ent->num_pads;
u_ent.links = ent->num_links - ent->num_backlinks;
- u_ent.v4l.major = ent->v4l.major;
- u_ent.v4l.minor = ent->v4l.minor;
+ memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
return -EFAULT;
return 0;
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index ccd5f0d8a012..e954781c90bf 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -11,6 +11,162 @@ menuconfig RADIO_ADAPTERS
if RADIO_ADAPTERS && VIDEO_V4L2
+config RADIO_SI470X
+ bool "Silicon Labs Si470x FM Radio Receiver support"
+ depends on VIDEO_V4L2
+
+source "drivers/media/radio/si470x/Kconfig"
+
+config USB_MR800
+ tristate "AverMedia MR 800 USB FM radio support"
+ depends on USB && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-mr800.
+
+config USB_DSBR
+ tristate "D-Link/GemTek USB FM radio support"
+ depends on USB && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to connect this type of radio to your
+ computer's USB port. Note that the audio is not digital, and
+ you must connect the line out connector to a sound card or a
+ set of speakers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dsbr100.
+
+config RADIO_MAXIRADIO
+ tristate "Guillemot MAXI Radio FM 2000 radio"
+ depends on VIDEO_V4L2 && PCI
+ ---help---
+ Choose Y here if you have this radio card. This card may also be
+ found as Gemtek PCI FM.
+
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux API. Information on
+ this API and pointers to "v4l" programs may be found at
+ <file:Documentation/video4linux/API.html>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-maxiradio.
+
+
+config I2C_SI4713
+ tristate "I2C driver for Silicon Labs Si4713 device"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ Say Y here if you want support to Si4713 I2C device.
+ This device driver supports only i2c bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si4713.
+
+config RADIO_SI4713
+ tristate "Silicon Labs Si4713 FM Radio Transmitter support"
+ depends on I2C && VIDEO_V4L2
+ select I2C_SI4713
+ ---help---
+ Say Y here if you want support to Si4713 FM Radio Transmitter.
+ This device can transmit audio through FM. It can transmit
+ RDS and RBDS signals as well. This module is the v4l2 radio
+ interface for the i2c driver of this device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-si4713.
+
+config RADIO_TEA5764
+ tristate "TEA5764 I2C FM radio support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to use the TEA5764 FM chip found in
+ EZX phones. This FM chip is present in EZX phones from Motorola,
+ connected to internal pxa I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-tea5764.
+
+config RADIO_TEA5764_XTAL
+ bool "TEA5764 crystal reference"
+ depends on RADIO_TEA5764=y
+ default y
+ help
+ Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
+ here if TEA5764 reference frequency is connected in FREQIN.
+
+config RADIO_SAA7706H
+ tristate "SAA7706H Car Radio DSP"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to use the SAA7706H Car radio Digital
+ Signal Processor, found for instance on the Russellville development
+ board. On the russellville the device is connected to internal
+ timberdale I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called SAA7706H.
+
+config RADIO_TEF6862
+ tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ Say Y here if you want to use the TEF6862 Car Radio Enhanced
+ Selectivity Tuner, found for instance on the Russellville development
+ board. On the russellville the device is connected to internal
+ timberdale I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called TEF6862.
+
+config RADIO_TIMBERDALE
+ tristate "Enable the Timberdale radio driver"
+ depends on MFD_TIMBERDALE && VIDEO_V4L2
+ depends on I2C # for RADIO_SAA7706H
+ select RADIO_TEF6862
+ select RADIO_SAA7706H
+ ---help---
+ This is a kind of umbrella driver for the Radio Tuner and DSP
+ found behind the Timberdale FPGA on the Russellville board.
+ Enabling this driver will automatically select the DSP and tuner.
+
+config RADIO_WL1273
+ tristate "Texas Instruments WL1273 I2C FM Radio"
+ depends on I2C && VIDEO_V4L2
+ select MFD_CORE
+ select MFD_WL1273_CORE
+ select FW_LOADER
+ ---help---
+ Choose Y here if you have this FM radio chip.
+
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux 2 API. Information on
+ this API and pointers to "v4l2" programs may be found at
+ <file:Documentation/video4linux/API.html>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-wl1273.
+
+# TI's ST based wl128x FM radio
+source "drivers/media/radio/wl128x/Kconfig"
+
+#
+# ISA drivers configuration
+#
+
+menuconfig V4L_RADIO_ISA_DRIVERS
+ bool "ISA radio devices"
+ depends on ISA
+ default n
+ ---help---
+ Say Y here to enable support for these ISA drivers.
+
+if V4L_RADIO_ISA_DRIVERS
+
config RADIO_CADET
tristate "ADS Cadet AM/FM Tuner"
depends on ISA && VIDEO_V4L2
@@ -151,21 +307,6 @@ config RADIO_GEMTEK_PROBE
following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
0x28c.
-config RADIO_MAXIRADIO
- tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on VIDEO_V4L2 && PCI
- ---help---
- Choose Y here if you have this radio card. This card may also be
- found as Gemtek PCI FM.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux API. Information on
- this API and pointers to "v4l" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-maxiradio.
-
config RADIO_MIROPCM20
tristate "miroSOUND PCM20 radio"
depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
@@ -316,130 +457,6 @@ config RADIO_ZOLTRIX_PORT
help
Enter the I/O port of your Zoltrix radio card.
-config I2C_SI4713
- tristate "I2C driver for Silicon Labs Si4713 device"
- depends on I2C && VIDEO_V4L2
- ---help---
- Say Y here if you want support to Si4713 I2C device.
- This device driver supports only i2c bus.
-
- To compile this driver as a module, choose M here: the
- module will be called si4713.
-
-config RADIO_SI4713
- tristate "Silicon Labs Si4713 FM Radio Transmitter support"
- depends on I2C && VIDEO_V4L2
- select I2C_SI4713
- ---help---
- Say Y here if you want support to Si4713 FM Radio Transmitter.
- This device can transmit audio through FM. It can transmit
- RDS and RBDS signals as well. This module is the v4l2 radio
- interface for the i2c driver of this device.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-si4713.
-
-config USB_DSBR
- tristate "D-Link/GemTek USB FM radio support"
- depends on USB && VIDEO_V4L2
- ---help---
- Say Y here if you want to connect this type of radio to your
- computer's USB port. Note that the audio is not digital, and
- you must connect the line out connector to a sound card or a
- set of speakers.
-
- To compile this driver as a module, choose M here: the
- module will be called dsbr100.
-
-config RADIO_SI470X
- bool "Silicon Labs Si470x FM Radio Receiver support"
- depends on VIDEO_V4L2
-
-source "drivers/media/radio/si470x/Kconfig"
-
-config USB_MR800
- tristate "AverMedia MR 800 USB FM radio support"
- depends on USB && VIDEO_V4L2
- ---help---
- Say Y here if you want to connect this type of radio to your
- computer's USB port. Note that the audio is not digital, and
- you must connect the line out connector to a sound card or a
- set of speakers.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-mr800.
-
-config RADIO_TEA5764
- tristate "TEA5764 I2C FM radio support"
- depends on I2C && VIDEO_V4L2
- ---help---
- Say Y here if you want to use the TEA5764 FM chip found in
- EZX phones. This FM chip is present in EZX phones from Motorola,
- connected to internal pxa I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-tea5764.
-
-config RADIO_TEA5764_XTAL
- bool "TEA5764 crystal reference"
- depends on RADIO_TEA5764=y
- default y
- help
- Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
- here if TEA5764 reference frequency is connected in FREQIN.
-
-config RADIO_SAA7706H
- tristate "SAA7706H Car Radio DSP"
- depends on I2C && VIDEO_V4L2
- ---help---
- Say Y here if you want to use the SAA7706H Car radio Digital
- Signal Processor, found for instance on the Russellville development
- board. On the russellville the device is connected to internal
- timberdale I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called SAA7706H.
-
-config RADIO_TEF6862
- tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
- depends on I2C && VIDEO_V4L2
- ---help---
- Say Y here if you want to use the TEF6862 Car Radio Enhanced
- Selectivity Tuner, found for instance on the Russellville development
- board. On the russellville the device is connected to internal
- timberdale I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called TEF6862.
-
-config RADIO_TIMBERDALE
- tristate "Enable the Timberdale radio driver"
- depends on MFD_TIMBERDALE && VIDEO_V4L2
- depends on I2C # for RADIO_SAA7706H
- select RADIO_TEF6862
- select RADIO_SAA7706H
- ---help---
- This is a kind of umbrella driver for the Radio Tuner and DSP
- found behind the Timberdale FPGA on the Russellville board.
- Enabling this driver will automatically select the DSP and tuner.
-
-config RADIO_WL1273
- tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C && VIDEO_V4L2
- select MFD_WL1273_CORE
- select FW_LOADER
- ---help---
- Choose Y here if you have this FM radio chip.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/video4linux/API.html>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-wl1273.
-
-# TI's ST based wl128x FM radio
-source "drivers/media/radio/wl128x/Kconfig"
+endif # V4L_RADIO_ISA_DRIVERS
endif # RADIO_ADAPTERS
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index edadc8449a3d..36ce0611c037 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -47,11 +47,11 @@ MODULE_VERSION("0.0.4");
#endif
static int io = CONFIG_RADIO_GEMTEK_PORT;
-static int probe = CONFIG_RADIO_GEMTEK_PROBE;
-static int hardmute;
-static int shutdown = 1;
-static int keepmuted = 1;
-static int initmute = 1;
+static bool probe = CONFIG_RADIO_GEMTEK_PROBE;
+static bool hardmute;
+static bool shutdown = 1;
+static bool keepmuted = 1;
+static bool initmute = 1;
static int radio_nr = -1;
module_param(io, int, 0444);
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 3fb76e3834c9..87c1ee13b058 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -23,7 +23,7 @@ static int radio_nr = -1;
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-static int mono;
+static bool mono;
module_param(mono, bool, 0);
MODULE_PARM_DESC(mono, "Force tuner into mono mode.");
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index d1fab5885061..c54210c7fef9 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -355,17 +355,4 @@ static struct platform_driver radio_si4713_pdriver = {
.remove = __exit_p(radio_si4713_pdriver_remove),
};
-/* Module Interface */
-static int __init radio_si4713_module_init(void)
-{
- return platform_driver_register(&radio_si4713_pdriver);
-}
-
-static void __exit radio_si4713_module_exit(void)
-{
- platform_driver_unregister(&radio_si4713_pdriver);
-}
-
-module_init(radio_si4713_module_init);
-module_exit(radio_si4713_module_exit);
-
+module_platform_driver(radio_si4713_pdriver);
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 3e9209f84e09..5d9a90ac3a1c 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -226,20 +226,7 @@ static struct platform_driver timbradio_platform_driver = {
.remove = timbradio_remove,
};
-/*--------------------------------------------------------------------------*/
-
-static int __init timbradio_init(void)
-{
- return platform_driver_register(&timbradio_platform_driver);
-}
-
-static void __exit timbradio_exit(void)
-{
- platform_driver_unregister(&timbradio_platform_driver);
-}
-
-module_init(timbradio_init);
-module_exit(timbradio_exit);
+module_platform_driver(timbradio_platform_driver);
MODULE_DESCRIPTION("Timberdale Radio driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 8aa4968d57bc..f1b607099b6c 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2148,8 +2148,6 @@ pdata_err:
return r;
}
-MODULE_ALIAS("platform:wl1273_fm_radio");
-
static struct platform_driver wl1273_fm_radio_driver = {
.probe = wl1273_fm_radio_probe,
.remove = __devexit_p(wl1273_fm_radio_remove),
@@ -2159,20 +2157,9 @@ static struct platform_driver wl1273_fm_radio_driver = {
},
};
-static int __init wl1273_fm_module_init(void)
-{
- pr_info("%s\n", __func__);
- return platform_driver_register(&wl1273_fm_radio_driver);
-}
-module_init(wl1273_fm_module_init);
-
-static void __exit wl1273_fm_module_exit(void)
-{
- platform_driver_unregister(&wl1273_fm_radio_driver);
- pr_info(DRIVER_DESC ", Exiting.\n");
-}
-module_exit(wl1273_fm_module_exit);
+module_platform_driver(wl1273_fm_radio_driver);
MODULE_AUTHOR("Matti Aaltonen <matti.j.aaltonen@nokia.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wl1273_fm_radio");
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 0991e1973678..3408685b690c 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -118,9 +118,11 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
i2cmsg[2] = pll & 0xff;
err = i2c_master_send(client, i2cmsg, sizeof(i2cmsg));
- if (!err)
- state->freq = f->frequency;
- return err;
+ if (err != sizeof(i2cmsg))
+ return err < 0 ? err : -EIO;
+
+ state->freq = f->frequency;
+ return 0;
}
static int tef6862_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index 749f67b192e7..86b28579f0c7 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -5,7 +5,7 @@ menu "Texas Instruments WL128x FM driver (ST based)"
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
depends on VIDEO_V4L2 && RFKILL
- select TI_ST
+ select TI_ST if NET && GPIOLIB
help
Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 5991ab60303d..bf867a6b5ea0 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -387,7 +387,7 @@ static void send_tasklet(unsigned long arg)
* Queues FM Channel-8 packet to FM TX queue and schedules FM TX tasklet for
* transmission
*/
-static u32 fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
+static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
int payload_len, struct completion *wait_completion)
{
struct sk_buff *skb;
@@ -456,13 +456,13 @@ static u32 fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
}
/* Sends FM Channel-8 command to the chip and waits for the response */
-u32 fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
+int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
unsigned int payload_len, void *response, int *response_len)
{
struct sk_buff *skb;
struct fm_event_msg_hdr *evt_hdr;
unsigned long flags;
- u32 ret;
+ int ret;
init_completion(&fmdev->maintask_comp);
ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
@@ -470,8 +470,8 @@ u32 fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&fmdev->maintask_comp, FM_DRV_TX_TIMEOUT);
- if (!ret) {
+ if (!wait_for_completion_timeout(&fmdev->maintask_comp,
+ FM_DRV_TX_TIMEOUT)) {
fmerr("Timeout(%d sec),didn't get reg"
"completion signal from RX tasklet\n",
jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
@@ -508,7 +508,7 @@ u32 fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
}
/* --- Helper functions used in FM interrupt handlers ---*/
-static inline u32 check_cmdresp_status(struct fmdev *fmdev,
+static inline int check_cmdresp_status(struct fmdev *fmdev,
struct sk_buff **skb)
{
struct fm_event_msg_hdr *fm_evt_hdr;
@@ -1058,7 +1058,7 @@ static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
}
/* Returns availability of RDS data in internel buffer */
-u32 fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
+int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
struct poll_table_struct *pts)
{
poll_wait(file, &fmdev->rx.rds.read_queue, pts);
@@ -1069,7 +1069,7 @@ u32 fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
}
/* Copies RDS data from internal buffer to user buffer */
-u32 fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
+int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
u8 __user *buf, size_t count)
{
u32 block_count;
@@ -1113,7 +1113,7 @@ u32 fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
return ret;
}
-u32 fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
+int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
{
switch (fmdev->curr_fmmode) {
case FM_MODE_RX:
@@ -1127,7 +1127,7 @@ u32 fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
}
}
-u32 fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
+int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
{
if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
fmerr("RX frequency is not set\n");
@@ -1153,7 +1153,7 @@ u32 fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
}
-u32 fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
+int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
{
switch (fmdev->curr_fmmode) {
case FM_MODE_RX:
@@ -1167,7 +1167,7 @@ u32 fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
}
}
-u32 fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
+int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
{
switch (fmdev->curr_fmmode) {
case FM_MODE_RX:
@@ -1181,7 +1181,7 @@ u32 fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
}
}
-u32 fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
+int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
{
switch (fmdev->curr_fmmode) {
case FM_MODE_RX:
@@ -1195,7 +1195,7 @@ u32 fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
}
}
-u32 fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
+int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
{
switch (fmdev->curr_fmmode) {
case FM_MODE_RX:
@@ -1210,10 +1210,10 @@ u32 fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
}
/* Sends power off command to the chip */
-static u32 fm_power_down(struct fmdev *fmdev)
+static int fm_power_down(struct fmdev *fmdev)
{
u16 payload;
- u32 ret;
+ int ret;
if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
fmerr("FM core is not ready\n");
@@ -1234,7 +1234,7 @@ static u32 fm_power_down(struct fmdev *fmdev)
}
/* Reads init command from FM firmware file and loads to the chip */
-static u32 fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
+static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
{
const struct firmware *fw_entry;
struct bts_header *fw_header;
@@ -1299,7 +1299,7 @@ rel_fw:
}
/* Loads default RX configuration to the chip */
-static u32 load_default_rx_configuration(struct fmdev *fmdev)
+static int load_default_rx_configuration(struct fmdev *fmdev)
{
int ret;
@@ -1311,7 +1311,7 @@ static u32 load_default_rx_configuration(struct fmdev *fmdev)
}
/* Does FM power on sequence */
-static u32 fm_power_up(struct fmdev *fmdev, u8 mode)
+static int fm_power_up(struct fmdev *fmdev, u8 mode)
{
u16 payload, asic_id, asic_ver;
int resp_len, ret;
@@ -1374,7 +1374,7 @@ rel:
}
/* Set FM Modes(TX, RX, OFF) */
-u32 fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
+int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
{
int ret = 0;
@@ -1427,7 +1427,7 @@ u32 fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
}
/* Returns current FM mode (TX, RX, OFF) */
-u32 fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
+int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
{
if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
fmerr("FM core is not ready\n");
@@ -1483,10 +1483,10 @@ static void fm_st_reg_comp_cb(void *arg, char data)
* This function will be called from FM V4L2 open function.
* Register with ST driver and initialize driver data.
*/
-u32 fmc_prepare(struct fmdev *fmdev)
+int fmc_prepare(struct fmdev *fmdev)
{
static struct st_proto_s fm_st_proto;
- u32 ret;
+ int ret;
if (test_bit(FM_CORE_READY, &fmdev->flag)) {
fmdbg("FM Core is already up\n");
@@ -1512,10 +1512,8 @@ u32 fmc_prepare(struct fmdev *fmdev)
fmdev->streg_cbdata = -EINPROGRESS;
fmdbg("%s waiting for ST reg completion signal\n", __func__);
- ret = wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
- FM_ST_REG_TIMEOUT);
-
- if (!ret) {
+ if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
+ FM_ST_REG_TIMEOUT)) {
fmerr("Timeout(%d sec), didn't get reg "
"completion signal from ST\n",
jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
@@ -1589,10 +1587,10 @@ u32 fmc_prepare(struct fmdev *fmdev)
* This function will be called from FM V4L2 release function.
* Unregister from ST driver.
*/
-u32 fmc_release(struct fmdev *fmdev)
+int fmc_release(struct fmdev *fmdev)
{
static struct st_proto_s fm_st_proto;
- u32 ret;
+ int ret;
if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
fmdbg("FM Core is already down\n");
@@ -1631,7 +1629,7 @@ u32 fmc_release(struct fmdev *fmdev)
static int __init fm_drv_init(void)
{
struct fmdev *fmdev = NULL;
- u32 ret = -ENOMEM;
+ int ret = -ENOMEM;
fmdbg("FM driver version %s\n", FM_DRV_VERSION);
diff --git a/drivers/media/radio/wl128x/fmdrv_common.h b/drivers/media/radio/wl128x/fmdrv_common.h
index aee243bb6630..d9b9c6cf83b4 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.h
+++ b/drivers/media/radio/wl128x/fmdrv_common.h
@@ -368,27 +368,27 @@ struct fm_event_msg_hdr {
#define FM_TX_ANT_IMP_500 2
/* Functions exported by FM common sub-module */
-u32 fmc_prepare(struct fmdev *);
-u32 fmc_release(struct fmdev *);
+int fmc_prepare(struct fmdev *);
+int fmc_release(struct fmdev *);
void fmc_update_region_info(struct fmdev *, u8);
-u32 fmc_send_cmd(struct fmdev *, u8, u16,
+int fmc_send_cmd(struct fmdev *, u8, u16,
void *, unsigned int, void *, int *);
-u32 fmc_is_rds_data_available(struct fmdev *, struct file *,
+int fmc_is_rds_data_available(struct fmdev *, struct file *,
struct poll_table_struct *);
-u32 fmc_transfer_rds_from_internal_buff(struct fmdev *, struct file *,
+int fmc_transfer_rds_from_internal_buff(struct fmdev *, struct file *,
u8 __user *, size_t);
-u32 fmc_set_freq(struct fmdev *, u32);
-u32 fmc_set_mode(struct fmdev *, u8);
-u32 fmc_set_region(struct fmdev *, u8);
-u32 fmc_set_mute_mode(struct fmdev *, u8);
-u32 fmc_set_stereo_mono(struct fmdev *, u16);
-u32 fmc_set_rds_mode(struct fmdev *, u8);
+int fmc_set_freq(struct fmdev *, u32);
+int fmc_set_mode(struct fmdev *, u8);
+int fmc_set_region(struct fmdev *, u8);
+int fmc_set_mute_mode(struct fmdev *, u8);
+int fmc_set_stereo_mono(struct fmdev *, u16);
+int fmc_set_rds_mode(struct fmdev *, u8);
-u32 fmc_get_freq(struct fmdev *, u32 *);
-u32 fmc_get_region(struct fmdev *, u8 *);
-u32 fmc_get_mode(struct fmdev *, u8 *);
+int fmc_get_freq(struct fmdev *, u32 *);
+int fmc_get_region(struct fmdev *, u8 *);
+int fmc_get_mode(struct fmdev *, u8 *);
/*
* channel spacing
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index ec529b55b040..43fb72291bea 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -43,12 +43,13 @@ void fm_rx_reset_station_info(struct fmdev *fmdev)
fmdev->rx.stat_info.af_list_max = 0;
}
-u32 fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
+int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
{
unsigned long timeleft;
u16 payload, curr_frq, intr_flag;
u32 curr_frq_in_khz;
- u32 ret, resp_len;
+ u32 resp_len;
+ int ret;
if (freq < fmdev->rx.region.bot_freq || freq > fmdev->rx.region.top_freq) {
fmerr("Invalid frequency %d\n", freq);
@@ -141,10 +142,10 @@ exit:
return ret;
}
-static u32 fm_rx_set_channel_spacing(struct fmdev *fmdev, u32 spacing)
+static int fm_rx_set_channel_spacing(struct fmdev *fmdev, u32 spacing)
{
u16 payload;
- u32 ret;
+ int ret;
if (spacing > 0 && spacing <= 50000)
spacing = FM_CHANNEL_SPACING_50KHZ;
@@ -165,7 +166,7 @@ static u32 fm_rx_set_channel_spacing(struct fmdev *fmdev, u32 spacing)
return ret;
}
-u32 fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
+int fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
u32 wrap_around, u32 spacing)
{
u32 resp_len;
@@ -173,7 +174,7 @@ u32 fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
u16 payload, int_reason, intr_flag;
u16 offset, space_idx;
unsigned long timeleft;
- u32 ret;
+ int ret;
/* Set channel spacing */
ret = fm_rx_set_channel_spacing(fmdev, spacing);
@@ -296,10 +297,10 @@ again:
return ret;
}
-u32 fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
+int fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -322,7 +323,7 @@ u32 fm_rx_set_volume(struct fmdev *fmdev, u16 vol_to_set)
}
/* Get volume */
-u32 fm_rx_get_volume(struct fmdev *fmdev, u16 *curr_vol)
+int fm_rx_get_volume(struct fmdev *fmdev, u16 *curr_vol)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -338,7 +339,7 @@ u32 fm_rx_get_volume(struct fmdev *fmdev, u16 *curr_vol)
}
/* To get current band's bottom and top frequency */
-u32 fm_rx_get_band_freq_range(struct fmdev *fmdev, u32 *bot_freq, u32 *top_freq)
+int fm_rx_get_band_freq_range(struct fmdev *fmdev, u32 *bot_freq, u32 *top_freq)
{
if (bot_freq != NULL)
*bot_freq = fmdev->rx.region.bot_freq;
@@ -356,11 +357,11 @@ void fm_rx_get_region(struct fmdev *fmdev, u8 *region)
}
/* Sets band (0-Europe/US; 1-Japan) */
-u32 fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
+int fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
{
u16 payload;
u32 new_frq = 0;
- u32 ret;
+ int ret;
if (region_to_set != FM_BAND_EUROPE_US &&
region_to_set != FM_BAND_JAPAN) {
@@ -399,7 +400,7 @@ u32 fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
}
/* Reads current mute mode (Mute Off/On/Attenuate)*/
-u32 fm_rx_get_mute_mode(struct fmdev *fmdev, u8 *curr_mute_mode)
+int fm_rx_get_mute_mode(struct fmdev *fmdev, u8 *curr_mute_mode)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -414,10 +415,10 @@ u32 fm_rx_get_mute_mode(struct fmdev *fmdev, u8 *curr_mute_mode)
return 0;
}
-static u32 fm_config_rx_mute_reg(struct fmdev *fmdev)
+static int fm_config_rx_mute_reg(struct fmdev *fmdev)
{
u16 payload, muteval;
- u32 ret;
+ int ret;
muteval = 0;
switch (fmdev->rx.mute_mode) {
@@ -448,10 +449,10 @@ static u32 fm_config_rx_mute_reg(struct fmdev *fmdev)
}
/* Configures mute mode (Mute Off/On/Attenuate) */
-u32 fm_rx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
+int fm_rx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
{
u8 org_state;
- u32 ret;
+ int ret;
if (fmdev->rx.mute_mode == mute_mode_toset)
return 0;
@@ -469,7 +470,7 @@ u32 fm_rx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
}
/* Gets RF dependent soft mute mode enable/disable status */
-u32 fm_rx_get_rfdepend_softmute(struct fmdev *fmdev, u8 *curr_mute_mode)
+int fm_rx_get_rfdepend_softmute(struct fmdev *fmdev, u8 *curr_mute_mode)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -485,10 +486,10 @@ u32 fm_rx_get_rfdepend_softmute(struct fmdev *fmdev, u8 *curr_mute_mode)
}
/* Sets RF dependent soft mute mode */
-u32 fm_rx_set_rfdepend_softmute(struct fmdev *fmdev, u8 rfdepend_mute)
+int fm_rx_set_rfdepend_softmute(struct fmdev *fmdev, u8 rfdepend_mute)
{
u8 org_state;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -514,11 +515,11 @@ u32 fm_rx_set_rfdepend_softmute(struct fmdev *fmdev, u8 rfdepend_mute)
}
/* Returns the signal strength level of current channel */
-u32 fm_rx_get_rssi_level(struct fmdev *fmdev, u16 *rssilvl)
+int fm_rx_get_rssi_level(struct fmdev *fmdev, u16 *rssilvl)
{
u16 curr_rssi_lel;
u32 resp_len;
- u32 ret;
+ int ret;
if (rssilvl == NULL) {
fmerr("Invalid memory\n");
@@ -539,10 +540,10 @@ u32 fm_rx_get_rssi_level(struct fmdev *fmdev, u16 *rssilvl)
* Sets the signal strength level that once reached
* will stop the auto search process
*/
-u32 fm_rx_set_rssi_threshold(struct fmdev *fmdev, short rssi_lvl_toset)
+int fm_rx_set_rssi_threshold(struct fmdev *fmdev, short rssi_lvl_toset)
{
u16 payload;
- u32 ret;
+ int ret;
if (rssi_lvl_toset < FM_RX_RSSI_THRESHOLD_MIN ||
rssi_lvl_toset > FM_RX_RSSI_THRESHOLD_MAX) {
@@ -561,7 +562,7 @@ u32 fm_rx_set_rssi_threshold(struct fmdev *fmdev, short rssi_lvl_toset)
}
/* Returns current RX RSSI threshold value */
-u32 fm_rx_get_rssi_threshold(struct fmdev *fmdev, short *curr_rssi_lvl)
+int fm_rx_get_rssi_threshold(struct fmdev *fmdev, short *curr_rssi_lvl)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -577,10 +578,10 @@ u32 fm_rx_get_rssi_threshold(struct fmdev *fmdev, short *curr_rssi_lvl)
}
/* Sets RX stereo/mono modes */
-u32 fm_rx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
+int fm_rx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
{
u16 payload;
- u32 ret;
+ int ret;
if (mode != FM_STEREO_MODE && mode != FM_MONO_MODE) {
fmerr("Invalid mode\n");
@@ -605,10 +606,11 @@ u32 fm_rx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
}
/* Gets current RX stereo/mono mode */
-u32 fm_rx_get_stereo_mono(struct fmdev *fmdev, u16 *mode)
+int fm_rx_get_stereo_mono(struct fmdev *fmdev, u16 *mode)
{
u16 curr_mode;
- u32 ret, resp_len;
+ u32 resp_len;
+ int ret;
if (mode == NULL) {
fmerr("Invalid memory\n");
@@ -626,10 +628,10 @@ u32 fm_rx_get_stereo_mono(struct fmdev *fmdev, u16 *mode)
}
/* Choose RX de-emphasis filter mode (50us/75us) */
-u32 fm_rx_set_deemphasis_mode(struct fmdev *fmdev, u16 mode)
+int fm_rx_set_deemphasis_mode(struct fmdev *fmdev, u16 mode)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -652,7 +654,7 @@ u32 fm_rx_set_deemphasis_mode(struct fmdev *fmdev, u16 mode)
}
/* Gets current RX de-emphasis filter mode */
-u32 fm_rx_get_deemph_mode(struct fmdev *fmdev, u16 *curr_deemphasis_mode)
+int fm_rx_get_deemph_mode(struct fmdev *fmdev, u16 *curr_deemphasis_mode)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -668,10 +670,10 @@ u32 fm_rx_get_deemph_mode(struct fmdev *fmdev, u16 *curr_deemphasis_mode)
}
/* Enable/Disable RX RDS */
-u32 fm_rx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
+int fm_rx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
{
u16 payload;
- u32 ret;
+ int ret;
if (rds_en_dis != FM_RDS_ENABLE && rds_en_dis != FM_RDS_DISABLE) {
fmerr("Invalid rds option\n");
@@ -743,7 +745,7 @@ u32 fm_rx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
}
/* Returns current RX RDS enable/disable status */
-u32 fm_rx_get_rds_mode(struct fmdev *fmdev, u8 *curr_rds_en_dis)
+int fm_rx_get_rds_mode(struct fmdev *fmdev, u8 *curr_rds_en_dis)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -759,10 +761,10 @@ u32 fm_rx_get_rds_mode(struct fmdev *fmdev, u8 *curr_rds_en_dis)
}
/* Sets RDS operation mode (RDS/RDBS) */
-u32 fm_rx_set_rds_system(struct fmdev *fmdev, u8 rds_mode)
+int fm_rx_set_rds_system(struct fmdev *fmdev, u8 rds_mode)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -784,7 +786,7 @@ u32 fm_rx_set_rds_system(struct fmdev *fmdev, u8 rds_mode)
}
/* Returns current RDS operation mode */
-u32 fm_rx_get_rds_system(struct fmdev *fmdev, u8 *rds_mode)
+int fm_rx_get_rds_system(struct fmdev *fmdev, u8 *rds_mode)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -800,10 +802,10 @@ u32 fm_rx_get_rds_system(struct fmdev *fmdev, u8 *rds_mode)
}
/* Configures Alternate Frequency switch mode */
-u32 fm_rx_set_af_switch(struct fmdev *fmdev, u8 af_mode)
+int fm_rx_set_af_switch(struct fmdev *fmdev, u8 af_mode)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
@@ -831,7 +833,7 @@ u32 fm_rx_set_af_switch(struct fmdev *fmdev, u8 af_mode)
}
/* Returns Alternate Frequency switch status */
-u32 fm_rx_get_af_switch(struct fmdev *fmdev, u8 *af_mode)
+int fm_rx_get_af_switch(struct fmdev *fmdev, u8 *af_mode)
{
if (fmdev->curr_fmmode != FM_MODE_RX)
return -EPERM;
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.h b/drivers/media/radio/wl128x/fmdrv_rx.h
index 329e62f6be76..32add81f8d87 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.h
+++ b/drivers/media/radio/wl128x/fmdrv_rx.h
@@ -22,38 +22,38 @@
#ifndef _FMDRV_RX_H
#define _FMDRV_RX_H
-u32 fm_rx_set_freq(struct fmdev *, u32);
-u32 fm_rx_set_mute_mode(struct fmdev *, u8);
-u32 fm_rx_set_stereo_mono(struct fmdev *, u16);
-u32 fm_rx_set_rds_mode(struct fmdev *, u8);
-u32 fm_rx_set_rds_system(struct fmdev *, u8);
-u32 fm_rx_set_volume(struct fmdev *, u16);
-u32 fm_rx_set_rssi_threshold(struct fmdev *, short);
-u32 fm_rx_set_region(struct fmdev *, u8);
-u32 fm_rx_set_rfdepend_softmute(struct fmdev *, u8);
-u32 fm_rx_set_deemphasis_mode(struct fmdev *, u16);
-u32 fm_rx_set_af_switch(struct fmdev *, u8);
+int fm_rx_set_freq(struct fmdev *, u32);
+int fm_rx_set_mute_mode(struct fmdev *, u8);
+int fm_rx_set_stereo_mono(struct fmdev *, u16);
+int fm_rx_set_rds_mode(struct fmdev *, u8);
+int fm_rx_set_rds_system(struct fmdev *, u8);
+int fm_rx_set_volume(struct fmdev *, u16);
+int fm_rx_set_rssi_threshold(struct fmdev *, short);
+int fm_rx_set_region(struct fmdev *, u8);
+int fm_rx_set_rfdepend_softmute(struct fmdev *, u8);
+int fm_rx_set_deemphasis_mode(struct fmdev *, u16);
+int fm_rx_set_af_switch(struct fmdev *, u8);
void fm_rx_reset_rds_cache(struct fmdev *);
void fm_rx_reset_station_info(struct fmdev *);
-u32 fm_rx_seek(struct fmdev *, u32, u32, u32);
+int fm_rx_seek(struct fmdev *, u32, u32, u32);
-u32 fm_rx_get_rds_mode(struct fmdev *, u8 *);
-u32 fm_rx_get_rds_system(struct fmdev *, u8 *);
-u32 fm_rx_get_mute_mode(struct fmdev *, u8 *);
-u32 fm_rx_get_volume(struct fmdev *, u16 *);
-u32 fm_rx_get_band_freq_range(struct fmdev *,
+int fm_rx_get_rds_mode(struct fmdev *, u8 *);
+int fm_rx_get_rds_system(struct fmdev *, u8 *);
+int fm_rx_get_mute_mode(struct fmdev *, u8 *);
+int fm_rx_get_volume(struct fmdev *, u16 *);
+int fm_rx_get_band_freq_range(struct fmdev *,
u32 *, u32 *);
-u32 fm_rx_get_stereo_mono(struct fmdev *, u16 *);
-u32 fm_rx_get_rssi_level(struct fmdev *, u16 *);
-u32 fm_rx_get_rssi_threshold(struct fmdev *, short *);
-u32 fm_rx_get_rfdepend_softmute(struct fmdev *, u8 *);
-u32 fm_rx_get_deemph_mode(struct fmdev *, u16 *);
-u32 fm_rx_get_af_switch(struct fmdev *, u8 *);
+int fm_rx_get_stereo_mono(struct fmdev *, u16 *);
+int fm_rx_get_rssi_level(struct fmdev *, u16 *);
+int fm_rx_get_rssi_threshold(struct fmdev *, short *);
+int fm_rx_get_rfdepend_softmute(struct fmdev *, u8 *);
+int fm_rx_get_deemph_mode(struct fmdev *, u16 *);
+int fm_rx_get_af_switch(struct fmdev *, u8 *);
void fm_rx_get_region(struct fmdev *, u8 *);
-u32 fm_rx_set_chanl_spacing(struct fmdev *, u8);
-u32 fm_rx_get_chanl_spacing(struct fmdev *, u8 *);
+int fm_rx_set_chanl_spacing(struct fmdev *, u8);
+int fm_rx_get_chanl_spacing(struct fmdev *, u8 *);
#endif
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.c b/drivers/media/radio/wl128x/fmdrv_tx.c
index be54068b56a8..6ea33e09d63b 100644
--- a/drivers/media/radio/wl128x/fmdrv_tx.c
+++ b/drivers/media/radio/wl128x/fmdrv_tx.c
@@ -24,10 +24,10 @@
#include "fmdrv_common.h"
#include "fmdrv_tx.h"
-u32 fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
+int fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->tx_data.aud_mode == mode)
return 0;
@@ -46,10 +46,10 @@ u32 fm_tx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
return ret;
}
-static u32 set_rds_text(struct fmdev *fmdev, u8 *rds_text)
+static int set_rds_text(struct fmdev *fmdev, u8 *rds_text)
{
u16 payload;
- u32 ret;
+ int ret;
ret = fmc_send_cmd(fmdev, RDS_DATA_SET, REG_WR, rds_text,
strlen(rds_text), NULL, NULL);
@@ -66,10 +66,10 @@ static u32 set_rds_text(struct fmdev *fmdev, u8 *rds_text)
return 0;
}
-static u32 set_rds_data_mode(struct fmdev *fmdev, u8 mode)
+static int set_rds_data_mode(struct fmdev *fmdev, u8 mode)
{
u16 payload;
- u32 ret;
+ int ret;
/* Setting unique PI TODO: how unique? */
payload = (u16)0xcafe;
@@ -89,10 +89,10 @@ static u32 set_rds_data_mode(struct fmdev *fmdev, u8 mode)
return 0;
}
-static u32 set_rds_len(struct fmdev *fmdev, u8 type, u16 len)
+static int set_rds_len(struct fmdev *fmdev, u8 type, u16 len)
{
u16 payload;
- u32 ret;
+ int ret;
len |= type << 8;
payload = len;
@@ -105,10 +105,10 @@ static u32 set_rds_len(struct fmdev *fmdev, u8 type, u16 len)
return 0;
}
-u32 fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
+int fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
{
u16 payload;
- u32 ret;
+ int ret;
u8 rds_text[] = "Zoom2\n";
fmdbg("rds_en_dis:%d(E:%d, D:%d)\n", rds_en_dis,
@@ -148,10 +148,10 @@ u32 fm_tx_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
return 0;
}
-u32 fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type)
+int fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_TX)
return -EPERM;
@@ -176,10 +176,10 @@ u32 fm_tx_set_radio_text(struct fmdev *fmdev, u8 *rds_text, u8 rds_type)
return 0;
}
-u32 fm_tx_set_af(struct fmdev *fmdev, u32 af)
+int fm_tx_set_af(struct fmdev *fmdev, u32 af)
{
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_TX)
return -EPERM;
@@ -196,10 +196,10 @@ u32 fm_tx_set_af(struct fmdev *fmdev, u32 af)
return 0;
}
-u32 fm_tx_set_region(struct fmdev *fmdev, u8 region)
+int fm_tx_set_region(struct fmdev *fmdev, u8 region)
{
u16 payload;
- u32 ret;
+ int ret;
if (region != FM_BAND_EUROPE_US && region != FM_BAND_JAPAN) {
fmerr("Invalid band\n");
@@ -216,10 +216,10 @@ u32 fm_tx_set_region(struct fmdev *fmdev, u8 region)
return 0;
}
-u32 fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
+int fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
{
u16 payload;
- u32 ret;
+ int ret;
fmdbg("tx: mute mode %d\n", mute_mode_toset);
@@ -233,11 +233,11 @@ u32 fm_tx_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
}
/* Set TX Audio I/O */
-static u32 set_audio_io(struct fmdev *fmdev)
+static int set_audio_io(struct fmdev *fmdev)
{
struct fmtx_data *tx = &fmdev->tx_data;
u16 payload;
- u32 ret;
+ int ret;
/* Set Audio I/O Enable */
payload = tx->audio_io;
@@ -251,12 +251,12 @@ static u32 set_audio_io(struct fmdev *fmdev)
}
/* Start TX Transmission */
-static u32 enable_xmit(struct fmdev *fmdev, u8 new_xmit_state)
+static int enable_xmit(struct fmdev *fmdev, u8 new_xmit_state)
{
struct fmtx_data *tx = &fmdev->tx_data;
unsigned long timeleft;
u16 payload;
- u32 ret;
+ int ret;
/* Enable POWER_ENB interrupts */
payload = FM_POW_ENB_EVENT;
@@ -289,11 +289,11 @@ static u32 enable_xmit(struct fmdev *fmdev, u8 new_xmit_state)
}
/* Set TX power level */
-u32 fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl)
+int fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl)
{
u16 payload;
struct fmtx_data *tx = &fmdev->tx_data;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_TX)
return -EPERM;
@@ -328,11 +328,11 @@ u32 fm_tx_set_pwr_lvl(struct fmdev *fmdev, u8 new_pwr_lvl)
* Sets FM TX pre-emphasis filter value (OFF, 50us, or 75us)
* Convert V4L2 specified filter values to chip specific filter values.
*/
-u32 fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis)
+int fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis)
{
struct fmtx_data *tx = &fmdev->tx_data;
u16 payload;
- u32 ret;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_TX)
return -EPERM;
@@ -360,10 +360,11 @@ u32 fm_tx_set_preemph_filter(struct fmdev *fmdev, u32 preemphasis)
}
/* Get the TX tuning capacitor value.*/
-u32 fm_tx_get_tune_cap_val(struct fmdev *fmdev)
+int fm_tx_get_tune_cap_val(struct fmdev *fmdev)
{
u16 curr_val;
- u32 ret, resp_len;
+ u32 resp_len;
+ int ret;
if (fmdev->curr_fmmode != FM_MODE_TX)
return -EPERM;
@@ -379,11 +380,11 @@ u32 fm_tx_get_tune_cap_val(struct fmdev *fmdev)
}
/* Set TX Frequency */
-u32 fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set)
+int fm_tx_set_freq(struct fmdev *fmdev, u32 freq_to_set)
{
struct fmtx_data *tx = &fmdev->tx_data;
u16 payload, chanl_index;
- u32 ret;
+ int ret;
if (test_bit(FM_CORE_TX_XMITING, &fmdev->flag)) {
enable_xmit(fmdev, 0);
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.h b/drivers/media/radio/wl128x/fmdrv_tx.h
index e393a2bdd49e..11ae2e4c2d03 100644
--- a/drivers/media/radio/wl128x/fmdrv_tx.h
+++ b/drivers/media/radio/wl128x/fmdrv_tx.h
@@ -22,16 +22,16 @@
#ifndef _FMDRV_TX_H
#define _FMDRV_TX_H
-u32 fm_tx_set_freq(struct fmdev *, u32);
-u32 fm_tx_set_pwr_lvl(struct fmdev *, u8);
-u32 fm_tx_set_region(struct fmdev *, u8);
-u32 fm_tx_set_mute_mode(struct fmdev *, u8);
-u32 fm_tx_set_stereo_mono(struct fmdev *, u16);
-u32 fm_tx_set_rds_mode(struct fmdev *, u8);
-u32 fm_tx_set_radio_text(struct fmdev *, u8 *, u8);
-u32 fm_tx_set_af(struct fmdev *, u32);
-u32 fm_tx_set_preemph_filter(struct fmdev *, u32);
-u32 fm_tx_get_tune_cap_val(struct fmdev *);
+int fm_tx_set_freq(struct fmdev *, u32);
+int fm_tx_set_pwr_lvl(struct fmdev *, u8);
+int fm_tx_set_region(struct fmdev *, u8);
+int fm_tx_set_mute_mode(struct fmdev *, u8);
+int fm_tx_set_stereo_mono(struct fmdev *, u16);
+int fm_tx_set_rds_mode(struct fmdev *, u8);
+int fm_tx_set_radio_text(struct fmdev *, u8 *, u8);
+int fm_tx_set_af(struct fmdev *, u32);
+int fm_tx_set_preemph_filter(struct fmdev *, u32);
+int fm_tx_get_tune_cap_val(struct fmdev *);
#endif
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 4f5c43d2566c..077d369a0173 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -84,6 +84,7 @@ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
struct fmdev *fmdev;
ret = copy_from_user(&rds, buf, sizeof(rds));
+ rds.text[sizeof(rds.text) - 1] = '\0';
fmdbg("(%d)type: %d, text %s, af %d\n",
ret, rds.text_type, rds.text, rds.af_freq);
if (ret)
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index aeb7f43dfb65..4df4affeea5f 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -87,6 +87,16 @@ config IR_RC5_SZ_DECODER
uses an IR protocol that is almost standard RC-5, but not quite,
as it uses an additional bit).
+config IR_SANYO_DECODER
+ tristate "Enable IR raw decoder for the Sanyo protocol"
+ depends on RC_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the Sanyo protocol (Sanyo, Aiwa, Chinon remotes),
+ and you need software decoding support.
+
config IR_MCE_KBD_DECODER
tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 2156e786b557..fb3dee2dd845 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
+obj-$(CONFIG_IR_SANYO_DECODER) += ir-sanyo-decoder.o
obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 4363bc38aab0..baf907b3ce76 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -189,7 +189,7 @@ struct ati_remote {
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
- unsigned char old_data[2]; /* Detect duplicate events */
+ unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
/* Translation table from hardware messages to input events. */
static const struct {
short kind;
- unsigned char data1, data2;
+ unsigned char data;
int type;
unsigned int code;
int value;
} ati_remote_tbl[] = {
/* Directional control pad axes */
- {KIND_ACCEL, 0x35, 0x70, EV_REL, REL_X, -1}, /* left */
- {KIND_ACCEL, 0x36, 0x71, EV_REL, REL_X, 1}, /* right */
- {KIND_ACCEL, 0x37, 0x72, EV_REL, REL_Y, -1}, /* up */
- {KIND_ACCEL, 0x38, 0x73, EV_REL, REL_Y, 1}, /* down */
+ {KIND_ACCEL, 0x70, EV_REL, REL_X, -1}, /* left */
+ {KIND_ACCEL, 0x71, EV_REL, REL_X, 1}, /* right */
+ {KIND_ACCEL, 0x72, EV_REL, REL_Y, -1}, /* up */
+ {KIND_ACCEL, 0x73, EV_REL, REL_Y, 1}, /* down */
/* Directional control pad diagonals */
- {KIND_LU, 0x39, 0x74, EV_REL, 0, 0}, /* left up */
- {KIND_RU, 0x3a, 0x75, EV_REL, 0, 0}, /* right up */
- {KIND_LD, 0x3c, 0x77, EV_REL, 0, 0}, /* left down */
- {KIND_RD, 0x3b, 0x76, EV_REL, 0, 0}, /* right down */
+ {KIND_LU, 0x74, EV_REL, 0, 0}, /* left up */
+ {KIND_RU, 0x75, EV_REL, 0, 0}, /* right up */
+ {KIND_LD, 0x77, EV_REL, 0, 0}, /* left down */
+ {KIND_RD, 0x76, EV_REL, 0, 0}, /* right down */
/* "Mouse button" buttons */
- {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
- {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
- {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
- {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+ {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+ {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+ {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+ {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
/* Artificial "doubleclick" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
- {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
- {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+ {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+ {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
/* Non-mouse events are handled by rc-core */
- {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+ {KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
@@ -397,25 +397,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
}
/*
- * ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
- int i;
-
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- /*
- * Decide if the table entry matches the remote input.
- */
- if (ati_remote_tbl[i].data1 == d1 &&
- ati_remote_tbl[i].data2 == d2)
- return i;
-
- }
- return -1;
-}
-
-/*
* ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
int index = -1;
int acc;
int remote_num;
- unsigned char scancode[2];
+ unsigned char scancode;
+ int i;
+
+ /*
+ * data[0] = 0x14
+ * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+ * data[2] = the key code (with toggle bit in MSB with some models)
+ * data[3] = channel << 4 (the low 4 bits must be zero)
+ */
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
+ if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+ dbginfo(&ati_remote->interface->dev,
+ "wrong checksum in input: %02x %02x %02x %02x\n",
+ data[0], data[1], data[2], data[3]);
+ return;
+ }
+
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
- scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
/*
- * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
- * so we have to clear them. The first bit is a bit tricky as the
- * "non-toggled" state depends on remote_num, so we xor it with the
- * second bit which is only used for toggle.
+ * MSB is a toggle code, though only used by some devices
+ * (e.g. SnapStream Firefly)
*/
- scancode[0] ^= (data[2] & 0x80);
-
- scancode[1] = data[2] & ~0x80;
+ scancode = data[2] & 0x7f;
- /* Look up event code index in mouse translation table. */
- index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
if (index >= 0) {
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
- remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+ remote_num, data[2], index, ati_remote_tbl[index].code);
if (!dev)
return; /* no mouse device */
} else
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
- remote_num, data[1], data[2], scancode[0], scancode[1]);
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
- if (ati_remote->old_data[0] == data[1] &&
- ati_remote->old_data[1] == data[2] &&
+ if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
ati_remote->first_jiffies = now;
}
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
- u32 rc_code = (scancode[0] << 8) | scancode[1];
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
- rc_keydown_notimeout(ati_remote->rdev, rc_code,
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
data[2]);
rc_keyup(ati_remote->rdev);
return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
input_sync(dev);
ati_remote->old_jiffies = jiffies;
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
}
}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index cf10ecf5acec..860c112e0fd2 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -324,7 +324,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
return dev->extra_buf2_address + r_pointer;
}
- dbg("attempt to read beyong ring bufer end");
+ dbg("attempt to read beyond ring buffer end");
return 0;
}
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index fd108d90f750..6f978e85db8c 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -227,7 +227,7 @@ struct ene_device {
/* TX buffer */
unsigned *tx_buffer; /* input samples buffer*/
- int tx_pos; /* position in that bufer */
+ int tx_pos; /* position in that buffer */
int tx_len; /* current len of tx buffer */
int tx_done; /* done transmitting */
/* one more sample pending*/
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 17f8db00435a..3c9431a9f62d 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -194,8 +194,8 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
return 0;
}
- IR_dprintk(1, "NEC decode failed at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ IR_dprintk(1, "NEC decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 2e5cd3100b64..95e630998aaf 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -357,6 +357,7 @@ static void init_decoders(struct work_struct *work)
load_rc6_decode();
load_jvc_decode();
load_sony_decode();
+ load_sanyo_decode();
load_mce_kbd_decode();
load_lirc_codec();
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 140fb67e2f89..4cfdd7fa4bbd 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -18,24 +18,31 @@
/*
* This decoder currently supports:
* RC6-0-16 (standard toggle bit in header)
+ * RC6-6A-20 (no toggle bit)
* RC6-6A-24 (no toggle bit)
* RC6-6A-32 (MCE version with toggle bit in body)
*/
-#define RC6_UNIT 444444 /* us */
+#define RC6_UNIT 444444 /* nanosecs */
#define RC6_HEADER_NBITS 4 /* not including toggle bit */
#define RC6_0_NBITS 16
-#define RC6_6A_SMALL_NBITS 24
-#define RC6_6A_LARGE_NBITS 32
+#define RC6_6A_32_NBITS 32
+#define RC6_6A_NBITS 128 /* Variable 8..128 */
#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
#define RC6_BIT_START (1 * RC6_UNIT)
#define RC6_BIT_END (1 * RC6_UNIT)
#define RC6_TOGGLE_START (2 * RC6_UNIT)
#define RC6_TOGGLE_END (2 * RC6_UNIT)
+#define RC6_SUFFIX_SPACE (6 * RC6_UNIT)
#define RC6_MODE_MASK 0x07 /* for the header bits */
#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
+#define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */
+#define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */
+#ifndef CHAR_BIT
+#define CHAR_BIT 8 /* Normally in <limits.h> */
+#endif
enum rc6_mode {
RC6_MODE_0,
@@ -125,6 +132,7 @@ again:
break;
data->state = STATE_HEADER_BIT_START;
+ data->header = 0;
return 0;
case STATE_HEADER_BIT_START:
@@ -171,20 +179,14 @@ again:
data->state = STATE_BODY_BIT_START;
decrease_duration(&ev, RC6_TOGGLE_END);
data->count = 0;
+ data->body = 0;
switch (rc6_mode(data)) {
case RC6_MODE_0:
data->wanted_bits = RC6_0_NBITS;
break;
case RC6_MODE_6A:
- /* This might look weird, but we basically
- check the value of the first body bit to
- determine the number of bits in mode 6A */
- if ((!ev.pulse && !geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) ||
- geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
- data->wanted_bits = RC6_6A_LARGE_NBITS;
- else
- data->wanted_bits = RC6_6A_SMALL_NBITS;
+ data->wanted_bits = RC6_6A_NBITS;
break;
default:
IR_dprintk(1, "RC6 unknown mode\n");
@@ -193,15 +195,21 @@ again:
goto again;
case STATE_BODY_BIT_START:
- if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
- break;
-
- data->body <<= 1;
- if (ev.pulse)
- data->body |= 1;
- data->count++;
- data->state = STATE_BODY_BIT_END;
- return 0;
+ if (eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2)) {
+ /* Discard LSB's that won't fit in data->body */
+ if (data->count++ < CHAR_BIT * sizeof data->body) {
+ data->body <<= 1;
+ if (ev.pulse)
+ data->body |= 1;
+ }
+ data->state = STATE_BODY_BIT_END;
+ return 0;
+ } else if (RC6_MODE_6A == rc6_mode(data) && !ev.pulse &&
+ geq_margin(ev.duration, RC6_SUFFIX_SPACE, RC6_UNIT / 2)) {
+ data->state = STATE_FINISHED;
+ goto again;
+ }
+ break;
case STATE_BODY_BIT_END:
if (!is_transition(&ev, &dev->raw->prev_ev))
@@ -221,20 +229,27 @@ again:
switch (rc6_mode(data)) {
case RC6_MODE_0:
- scancode = data->body & 0xffff;
+ scancode = data->body;
toggle = data->toggle;
IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n",
scancode, toggle);
break;
case RC6_MODE_6A:
- if (data->wanted_bits == RC6_6A_LARGE_NBITS) {
- toggle = data->body & RC6_6A_MCE_TOGGLE_MASK ? 1 : 0;
- scancode = data->body & ~RC6_6A_MCE_TOGGLE_MASK;
+ if (data->count > CHAR_BIT * sizeof data->body) {
+ IR_dprintk(1, "RC6 too many (%u) data bits\n",
+ data->count);
+ goto out;
+ }
+
+ scancode = data->body;
+ if (data->count == RC6_6A_32_NBITS &&
+ (scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
+ /* MCE RC */
+ toggle = (scancode & RC6_6A_MCE_TOGGLE_MASK) ? 1 : 0;
+ scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
} else {
toggle = 0;
- scancode = data->body & 0xffffff;
}
-
IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n",
scancode, toggle);
break;
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
new file mode 100644
index 000000000000..d38fbdd0b25a
--- /dev/null
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -0,0 +1,205 @@
+/* ir-sanyo-decoder.c - handle SANYO IR Pulse/Space protocol
+ *
+ * Copyright (C) 2011 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This protocol uses the NEC protocol timings. However, data is formatted as:
+ * 13 bits Custom Code
+ * 13 bits NOT(Custom Code)
+ * 8 bits Key data
+ * 8 bits NOT(Key data)
+ *
+ * According with LIRC, this protocol is used on Sanyo, Aiwa and Chinon
+ * Information for this protocol is available at the Sanyo LC7461 datasheet.
+ */
+
+#include <linux/module.h>
+#include <linux/bitrev.h>
+#include "rc-core-priv.h"
+
+#define SANYO_NBITS (13+13+8+8)
+#define SANYO_UNIT 562500 /* ns */
+#define SANYO_HEADER_PULSE (16 * SANYO_UNIT)
+#define SANYO_HEADER_SPACE (8 * SANYO_UNIT)
+#define SANYO_BIT_PULSE (1 * SANYO_UNIT)
+#define SANYO_BIT_0_SPACE (1 * SANYO_UNIT)
+#define SANYO_BIT_1_SPACE (3 * SANYO_UNIT)
+#define SANYO_REPEAT_SPACE (150 * SANYO_UNIT)
+#define SANYO_TRAILER_PULSE (1 * SANYO_UNIT)
+#define SANYO_TRAILER_SPACE (10 * SANYO_UNIT) /* in fact, 42 */
+
+enum sanyo_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+/**
+ * ir_sanyo_decode() - Decode one SANYO pulse or space
+ * @dev: the struct rc_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
+{
+ struct sanyo_dec *data = &dev->raw->sanyo;
+ u32 scancode;
+ u8 address, not_address, command, not_command;
+
+ if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset) {
+ IR_dprintk(1, "SANYO event reset received. reset to state 0\n");
+ data->state = STATE_INACTIVE;
+ }
+ return 0;
+ }
+
+ IR_dprintk(2, "SANYO decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, SANYO_HEADER_PULSE, SANYO_UNIT / 2)) {
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+ }
+ break;
+
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, SANYO_HEADER_SPACE, SANYO_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+ break;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SANYO_BIT_PULSE, SANYO_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!data->count && geq_margin(ev.duration, SANYO_REPEAT_SPACE, SANYO_UNIT / 2)) {
+ if (!dev->keypressed) {
+ IR_dprintk(1, "SANYO discarding last key repeat: event after key up\n");
+ } else {
+ rc_repeat(dev);
+ IR_dprintk(1, "SANYO repeat last key\n");
+ data->state = STATE_INACTIVE;
+ }
+ return 0;
+ }
+
+ data->bits <<= 1;
+ if (eq_margin(ev.duration, SANYO_BIT_1_SPACE, SANYO_UNIT / 2))
+ data->bits |= 1;
+ else if (!eq_margin(ev.duration, SANYO_BIT_0_SPACE, SANYO_UNIT / 2))
+ break;
+ data->count++;
+
+ if (data->count == SANYO_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SANYO_TRAILER_PULSE, SANYO_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SANYO_TRAILER_SPACE, SANYO_UNIT / 2))
+ break;
+
+ address = bitrev16((data->bits >> 29) & 0x1fff) >> 3;
+ not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3;
+ command = bitrev8((data->bits >> 8) & 0xff);
+ not_command = bitrev8((data->bits >> 0) & 0xff);
+
+ if ((command ^ not_command) != 0xff) {
+ IR_dprintk(1, "SANYO checksum error: received 0x%08Lx\n",
+ data->bits);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ scancode = address << 8 | command;
+ IR_dprintk(1, "SANYO scancode: 0x%06x\n", scancode);
+ rc_keydown(dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(1, "SANYO decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static struct ir_raw_handler sanyo_handler = {
+ .protocols = RC_TYPE_SANYO,
+ .decode = ir_sanyo_decode,
+};
+
+static int __init ir_sanyo_decode_init(void)
+{
+ ir_raw_handler_register(&sanyo_handler);
+
+ printk(KERN_INFO "IR SANYO protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_sanyo_decode_exit(void)
+{
+ ir_raw_handler_unregister(&sanyo_handler);
+}
+
+module_init(ir_sanyo_decode_init);
+module_exit(ir_sanyo_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("SANYO IR protocol decoder");
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index e1b8b2605c48..81506440eded 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -27,55 +27,55 @@
#include <media/rc-map.h>
static struct rc_map_table ati_x10[] = {
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xc500, KEY_A },
- { 0xc601, KEY_B },
- { 0xde19, KEY_C },
- { 0xe01b, KEY_D },
- { 0xe621, KEY_E },
- { 0xe823, KEY_F },
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x00, KEY_A },
+ { 0x01, KEY_B },
+ { 0x19, KEY_C },
+ { 0x1b, KEY_D },
+ { 0x21, KEY_E },
+ { 0x23, KEY_F },
- { 0xdd18, KEY_KPENTER }, /* "check" */
- { 0xdb16, KEY_MENU }, /* "menu" */
- { 0xc702, KEY_POWER }, /* Power */
- { 0xc803, KEY_TV }, /* TV */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xca05, KEY_WWW }, /* WEB */
- { 0xcb06, KEY_BOOKMARKS }, /* "book" */
- { 0xcc07, KEY_EDIT }, /* "hand" */
- { 0xe11c, KEY_COFFEE }, /* "timer" */
- { 0xe520, KEY_FRONT }, /* "max" */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe31e, KEY_OK }, /* "OK" */
- { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
- { 0xcd08, KEY_VOLUMEUP }, /* VOL - */
- { 0xcf0a, KEY_MUTE }, /* MUTE */
- { 0xd00b, KEY_CHANNELUP }, /* CH + */
- { 0xd10c, KEY_CHANNELDOWN },/* CH - */
- { 0xec27, KEY_RECORD }, /* ( o) red */
- { 0xea25, KEY_PLAY }, /* ( >) */
- { 0xe924, KEY_REWIND }, /* (<<) */
- { 0xeb26, KEY_FORWARD }, /* (>>) */
- { 0xed28, KEY_STOP }, /* ([]) */
- { 0xee29, KEY_PAUSE }, /* ('') */
- { 0xf02b, KEY_PREVIOUS }, /* (<-) */
- { 0xef2a, KEY_NEXT }, /* (>+) */
- { 0xf22d, KEY_INFO }, /* PLAYING */
- { 0xf32e, KEY_HOME }, /* TOP */
- { 0xf42f, KEY_END }, /* END */
- { 0xf530, KEY_SELECT }, /* SELECT */
+ { 0x18, KEY_KPENTER }, /* "check" */
+ { 0x16, KEY_MENU }, /* "menu" */
+ { 0x02, KEY_POWER }, /* Power */
+ { 0x03, KEY_TV }, /* TV */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x05, KEY_WWW }, /* WEB */
+ { 0x06, KEY_BOOKMARKS }, /* "book" */
+ { 0x07, KEY_EDIT }, /* "hand" */
+ { 0x1c, KEY_COFFEE }, /* "timer" */
+ { 0x20, KEY_FRONT }, /* "max" */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1a, KEY_UP }, /* up */
+ { 0x1e, KEY_OK }, /* "OK" */
+ { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+ { 0x08, KEY_VOLUMEUP }, /* VOL - */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+ { 0x0b, KEY_CHANNELUP }, /* CH + */
+ { 0x0c, KEY_CHANNELDOWN },/* CH - */
+ { 0x27, KEY_RECORD }, /* ( o) red */
+ { 0x25, KEY_PLAY }, /* ( >) */
+ { 0x24, KEY_REWIND }, /* (<<) */
+ { 0x26, KEY_FORWARD }, /* (>>) */
+ { 0x28, KEY_STOP }, /* ([]) */
+ { 0x29, KEY_PAUSE }, /* ('') */
+ { 0x2b, KEY_PREVIOUS }, /* (<-) */
+ { 0x2a, KEY_NEXT }, /* (>+) */
+ { 0x2d, KEY_INFO }, /* PLAYING */
+ { 0x2e, KEY_HOME }, /* TOP */
+ { 0x2f, KEY_END }, /* END */
+ { 0x30, KEY_SELECT }, /* SELECT */
};
static struct rc_map_list ati_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c
index e51c6163378b..929bbbc16393 100644
--- a/drivers/media/rc/keymaps/rc-hauppauge.c
+++ b/drivers/media/rc/keymaps/rc-hauppauge.c
@@ -183,6 +183,57 @@ static struct rc_map_table rc5_hauppauge_new[] = {
{ 0x1d3f, KEY_HOME },
/*
+ * Keycodes for PT# R-005 remote bundled with Haupauge HVR-930C
+ * Keycodes start with address = 0x1c
+ */
+ { 0x1c3b, KEY_GOTO },
+ { 0x1c3d, KEY_POWER },
+
+ { 0x1c14, KEY_UP },
+ { 0x1c15, KEY_DOWN },
+ { 0x1c16, KEY_LEFT },
+ { 0x1c17, KEY_RIGHT },
+ { 0x1c25, KEY_OK },
+
+ { 0x1c00, KEY_0 },
+ { 0x1c01, KEY_1 },
+ { 0x1c02, KEY_2 },
+ { 0x1c03, KEY_3 },
+ { 0x1c04, KEY_4 },
+ { 0x1c05, KEY_5 },
+ { 0x1c06, KEY_6 },
+ { 0x1c07, KEY_7 },
+ { 0x1c08, KEY_8 },
+ { 0x1c09, KEY_9 },
+
+ { 0x1c1f, KEY_EXIT }, /* BACK */
+ { 0x1c0d, KEY_MENU },
+ { 0x1c1c, KEY_TV },
+
+ { 0x1c10, KEY_VOLUMEUP },
+ { 0x1c11, KEY_VOLUMEDOWN },
+
+ { 0x1c20, KEY_CHANNELUP },
+ { 0x1c21, KEY_CHANNELDOWN },
+
+ { 0x1c0f, KEY_MUTE },
+ { 0x1c12, KEY_PREVIOUS }, /* Prev */
+
+ { 0x1c36, KEY_STOP },
+ { 0x1c37, KEY_RECORD },
+
+ { 0x1c24, KEY_LAST }, /* <| */
+ { 0x1c1e, KEY_NEXT }, /* >| */
+
+ { 0x1c0a, KEY_TEXT },
+ { 0x1c0e, KEY_SUBTITLE }, /* CC */
+
+ { 0x1c32, KEY_REWIND },
+ { 0x1c30, KEY_PAUSE },
+ { 0x1c35, KEY_PLAY },
+ { 0x1c34, KEY_FASTFORWARD },
+
+ /*
* Keycodes for the old Black Remote Controller
* This one also uses RC-5 protocol
* Keycodes start with address = 0x00
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 09e2cc01d110..479cdb897810 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -25,70 +25,70 @@
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
- { 0xf12c, KEY_TV }, /* TV */
- { 0xf22d, KEY_VCR }, /* VCR */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
- { 0xf32e, KEY_RADIO }, /* RADIO */
- { 0xca05, KEY_DIRECTORY }, /* PHOTO */
- { 0xf42f, KEY_INFO }, /* TV-PREVIEW */
- { 0xf530, KEY_LIST }, /* CHANNEL-LST */
-
- { 0xe01b, KEY_SETUP }, /* SETUP */
- { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
- { 0xcd08, KEY_VOLUMEDOWN }, /* VOL - */
- { 0xce09, KEY_VOLUMEUP }, /* VOL + */
- { 0xd00b, KEY_CHANNELUP }, /* CHAN + */
- { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
- { 0xc500, KEY_MUTE }, /* MUTE */
-
- { 0xf732, KEY_RED }, /* red */
- { 0xf833, KEY_GREEN }, /* green */
- { 0xf934, KEY_YELLOW }, /* yellow */
- { 0xfa35, KEY_BLUE }, /* blue */
- { 0xdb16, KEY_TEXT }, /* TXT */
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
- { 0xe520, KEY_DELETE }, /* DELETE */
-
- { 0xfb36, KEY_KEYBOARD }, /* RENAME */
- { 0xdd18, KEY_SCREEN }, /* SNAPSHOT */
-
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe31e, KEY_OK }, /* OK */
-
- { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
- { 0xfd38, KEY_EDIT }, /* EDIT IMAGE */
-
- { 0xe924, KEY_REWIND }, /* rewind (<<) */
- { 0xea25, KEY_PLAY }, /* play ( >) */
- { 0xeb26, KEY_FORWARD }, /* forward (>>) */
- { 0xec27, KEY_RECORD }, /* record ( o) */
- { 0xed28, KEY_STOP }, /* stop ([]) */
- { 0xee29, KEY_PAUSE }, /* pause ('') */
-
- { 0xe621, KEY_PREVIOUS }, /* prev */
- { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
- { 0xe823, KEY_NEXT }, /* next */
- { 0xde19, KEY_MENU }, /* MENU */
- { 0xff3a, KEY_LANGUAGE }, /* AUDIO */
-
- { 0xc702, KEY_POWER }, /* POWER */
+ { 0x2c, KEY_TV }, /* TV */
+ { 0x2d, KEY_VCR }, /* VCR */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x06, KEY_AUDIO }, /* MUSIC */
+
+ { 0x2e, KEY_RADIO }, /* RADIO */
+ { 0x05, KEY_DIRECTORY }, /* PHOTO */
+ { 0x2f, KEY_INFO }, /* TV-PREVIEW */
+ { 0x30, KEY_LIST }, /* CHANNEL-LST */
+
+ { 0x1b, KEY_SETUP }, /* SETUP */
+ { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+ { 0x08, KEY_VOLUMEDOWN }, /* VOL - */
+ { 0x09, KEY_VOLUMEUP }, /* VOL + */
+ { 0x0b, KEY_CHANNELUP }, /* CHAN + */
+ { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+ { 0x00, KEY_MUTE }, /* MUTE */
+
+ { 0x32, KEY_RED }, /* red */
+ { 0x33, KEY_GREEN }, /* green */
+ { 0x34, KEY_YELLOW }, /* yellow */
+ { 0x35, KEY_BLUE }, /* blue */
+ { 0x16, KEY_TEXT }, /* TXT */
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+ { 0x20, KEY_DELETE }, /* DELETE */
+
+ { 0x36, KEY_KEYBOARD }, /* RENAME */
+ { 0x18, KEY_SCREEN }, /* SNAPSHOT */
+
+ { 0x1a, KEY_UP }, /* up */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x1e, KEY_OK }, /* OK */
+
+ { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+ { 0x38, KEY_EDIT }, /* EDIT IMAGE */
+
+ { 0x24, KEY_REWIND }, /* rewind (<<) */
+ { 0x25, KEY_PLAY }, /* play ( >) */
+ { 0x26, KEY_FORWARD }, /* forward (>>) */
+ { 0x27, KEY_RECORD }, /* record ( o) */
+ { 0x28, KEY_STOP }, /* stop ([]) */
+ { 0x29, KEY_PAUSE }, /* pause ('') */
+
+ { 0x21, KEY_PREVIOUS }, /* prev */
+ { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+ { 0x23, KEY_NEXT }, /* next */
+ { 0x19, KEY_MENU }, /* MENU */
+ { 0x3a, KEY_LANGUAGE }, /* AUDIO */
+
+ { 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index ef146520931c..c7f33ec719b4 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -22,63 +22,63 @@
#include <media/rc-map.h>
static struct rc_map_table snapstream_firefly[] = {
- { 0xf12c, KEY_ZOOM }, /* Maximize */
- { 0xc702, KEY_CLOSE },
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xdb16, KEY_BACK },
- { 0xdd18, KEY_KPENTER }, /* ent */
-
- { 0xce09, KEY_VOLUMEUP },
- { 0xcd08, KEY_VOLUMEDOWN },
- { 0xcf0a, KEY_MUTE },
- { 0xd00b, KEY_CHANNELUP },
- { 0xd10c, KEY_CHANNELDOWN },
- { 0xc500, KEY_VENDOR }, /* firefly */
-
- { 0xf32e, KEY_INFO },
- { 0xf42f, KEY_OPTION },
-
- { 0xe21d, KEY_LEFT },
- { 0xe41f, KEY_RIGHT },
- { 0xe722, KEY_DOWN },
- { 0xdf1a, KEY_UP },
- { 0xe31e, KEY_OK },
-
- { 0xe11c, KEY_MENU },
- { 0xe520, KEY_EXIT },
-
- { 0xec27, KEY_RECORD },
- { 0xea25, KEY_PLAY },
- { 0xed28, KEY_STOP },
- { 0xe924, KEY_REWIND },
- { 0xeb26, KEY_FORWARD },
- { 0xee29, KEY_PAUSE },
- { 0xf02b, KEY_PREVIOUS },
- { 0xef2a, KEY_NEXT },
-
- { 0xcb06, KEY_AUDIO }, /* Music */
- { 0xca05, KEY_IMAGES }, /* Photos */
- { 0xc904, KEY_DVD },
- { 0xc803, KEY_TV },
- { 0xcc07, KEY_VIDEO },
-
- { 0xc601, KEY_HELP },
- { 0xf22d, KEY_MODE }, /* Mouse */
-
- { 0xde19, KEY_A },
- { 0xe01b, KEY_B },
- { 0xe621, KEY_C },
- { 0xe823, KEY_D },
+ { 0x2c, KEY_ZOOM }, /* Maximize */
+ { 0x02, KEY_CLOSE },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x16, KEY_BACK },
+ { 0x18, KEY_KPENTER }, /* ent */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0a, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x00, KEY_VENDOR }, /* firefly */
+
+ { 0x2e, KEY_INFO },
+ { 0x2f, KEY_OPTION },
+
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x22, KEY_DOWN },
+ { 0x1a, KEY_UP },
+ { 0x1e, KEY_OK },
+
+ { 0x1c, KEY_MENU },
+ { 0x20, KEY_EXIT },
+
+ { 0x27, KEY_RECORD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_PREVIOUS },
+ { 0x2a, KEY_NEXT },
+
+ { 0x06, KEY_AUDIO }, /* Music */
+ { 0x05, KEY_IMAGES }, /* Photos */
+ { 0x04, KEY_DVD },
+ { 0x03, KEY_TV },
+ { 0x07, KEY_VIDEO },
+
+ { 0x01, KEY_HELP },
+ { 0x2d, KEY_MODE }, /* Mouse */
+
+ { 0x19, KEY_A },
+ { 0x1b, KEY_B },
+ { 0x21, KEY_C },
+ { 0x23, KEY_D },
};
static struct rc_map_list snapstream_firefly_map = {
diff --git a/drivers/media/rc/keymaps/rc-videomate-m1f.c b/drivers/media/rc/keymaps/rc-videomate-m1f.c
index 3bd1de1f585c..23ee05e53949 100644
--- a/drivers/media/rc/keymaps/rc-videomate-m1f.c
+++ b/drivers/media/rc/keymaps/rc-videomate-m1f.c
@@ -1,4 +1,4 @@
-/* videomate-m1f.h - Keytable for videomate_m1f Remote Controller
+/* videomate-k100.h - Keytable for videomate_k100 Remote Controller
*
* keymap imported from ir-keymaps.c
*
@@ -13,7 +13,7 @@
#include <media/rc-map.h>
#include <linux/module.h>
-static struct rc_map_table videomate_m1f[] = {
+static struct rc_map_table videomate_k100[] = {
{ 0x01, KEY_POWER },
{ 0x31, KEY_TUNER },
{ 0x33, KEY_VIDEO },
@@ -67,27 +67,27 @@ static struct rc_map_table videomate_m1f[] = {
{ 0x18, KEY_TEXT },
};
-static struct rc_map_list videomate_m1f_map = {
+static struct rc_map_list videomate_k100_map = {
.map = {
- .scan = videomate_m1f,
- .size = ARRAY_SIZE(videomate_m1f),
+ .scan = videomate_k100,
+ .size = ARRAY_SIZE(videomate_k100),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
- .name = RC_MAP_VIDEOMATE_M1F,
+ .name = RC_MAP_VIDEOMATE_K100,
}
};
-static int __init init_rc_map_videomate_m1f(void)
+static int __init init_rc_map_videomate_k100(void)
{
- return rc_map_register(&videomate_m1f_map);
+ return rc_map_register(&videomate_k100_map);
}
-static void __exit exit_rc_map_videomate_m1f(void)
+static void __exit exit_rc_map_videomate_k100(void)
{
- rc_map_unregister(&videomate_m1f_map);
+ rc_map_unregister(&videomate_k100_map);
}
-module_init(init_rc_map_videomate_m1f)
-module_exit(exit_rc_map_videomate_m1f)
+module_init(init_rc_map_videomate_k100)
+module_exit(exit_rc_map_videomate_k100)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pavel Osnova <pvosnova@gmail.com>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 27997a9ceb0d..ca12d3289bfe 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -38,7 +38,7 @@
#include <media/lirc.h>
#include <media/lirc_dev.h>
-static int debug;
+static bool debug;
#define IRCTL_DEV_NAME "BaseRemoteCtl"
#define NOPLUG -1
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 20bb12d6fbbe..21105bf9594d 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -156,9 +156,9 @@
/* module parameters */
#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
#define mce_dbg(dev, fmt, ...) \
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index c6ca870e8b7e..b72f8580e317 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -84,6 +84,11 @@ struct ir_raw_event_ctrl {
unsigned count;
unsigned wanted_bits;
} rc5_sz;
+ struct sanyo_dec {
+ int state;
+ unsigned count;
+ u64 bits;
+ } sanyo;
struct mce_kbd_dec {
struct input_dev *idev;
struct timer_list rx_timeout;
@@ -193,6 +198,13 @@ static inline void load_jvc_decode(void) { }
static inline void load_sony_decode(void) { }
#endif
+/* from ir-sanyo-decoder.c */
+#ifdef CONFIG_IR_SANYO_DECODER_MODULE
+#define load_sanyo_decode() request_module("ir-sanyo-decoder")
+#else
+static inline void load_sanyo_decode(void) { }
+#endif
+
/* from ir-mce_kbd-decoder.c */
#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 29f900065d8a..f6a930b70c69 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -715,7 +715,7 @@ static void ir_close(struct input_dev *idev)
}
/* class for /sys/class/rc */
-static char *ir_devnode(struct device *dev, mode_t *mode)
+static char *ir_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
}
@@ -736,6 +736,7 @@ static struct {
{ RC_TYPE_JVC, "jvc" },
{ RC_TYPE_SONY, "sony" },
{ RC_TYPE_RC5_SZ, "rc-5-sz" },
+ { RC_TYPE_SANYO, "sanyo" },
{ RC_TYPE_MCE_KBD, "mce_kbd" },
{ RC_TYPE_LIRC, "lirc" },
{ RC_TYPE_OTHER, "other" },
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 07322fb75eff..ad95c67a4dba 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -286,12 +286,6 @@ static void redrat3_issue_async(struct redrat3_dev *rr3)
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
- if (!rr3->det_enabled) {
- dev_warn(rr3->dev, "not issuing async read, "
- "detector not enabled\n");
- return;
- }
-
memset(rr3->bulk_in_buf, 0, rr3->ep_in->wMaxPacketSize);
res = usb_submit_urb(rr3->read_urb, GFP_ATOMIC);
if (res)
@@ -827,6 +821,7 @@ out:
static void redrat3_handle_async(struct urb *urb, struct pt_regs *regs)
{
struct redrat3_dev *rr3;
+ int ret;
if (!urb)
return;
@@ -840,15 +835,13 @@ static void redrat3_handle_async(struct urb *urb, struct pt_regs *regs)
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
- if (!rr3->det_enabled) {
- rr3_dbg(rr3->dev, "received a read callback but detector "
- "disabled - ignoring\n");
- return;
- }
-
switch (urb->status) {
case 0:
- redrat3_get_ir_data(rr3, urb->actual_length);
+ ret = redrat3_get_ir_data(rr3, urb->actual_length);
+ if (!ret) {
+ /* no error, prepare to read more */
+ redrat3_issue_async(rr3);
+ }
break;
case -ECONNRESET:
@@ -865,11 +858,6 @@ static void redrat3_handle_async(struct urb *urb, struct pt_regs *regs)
rr3->pkttype = 0;
break;
}
-
- if (!rr3->transmitting)
- redrat3_issue_async(rr3);
- else
- rr3_dbg(rr3->dev, "IR transmit in progress\n");
}
static void redrat3_write_bulk_callback(struct urb *urb, struct pt_regs *regs)
@@ -896,21 +884,24 @@ static u16 mod_freq_to_val(unsigned int mod_freq)
return (u16)(65536 - (mult / mod_freq));
}
-static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+static int redrat3_set_tx_carrier(struct rc_dev *rcdev, u32 carrier)
{
- struct redrat3_dev *rr3 = dev->priv;
+ struct redrat3_dev *rr3 = rcdev->priv;
+ struct device *dev = rr3->dev;
+ rr3_dbg(dev, "Setting modulation frequency to %u", carrier);
rr3->carrier = carrier;
return carrier;
}
-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
+ unsigned count)
{
struct redrat3_dev *rr3 = rcdev->priv;
struct device *dev = rr3->dev;
struct redrat3_signal_header header;
- int i, j, count, ret, ret_len, offset;
+ int i, j, ret, ret_len, offset;
int lencheck, cur_sample_len, pipe;
char *buffer = NULL, *sigdata = NULL;
int *sample_lens = NULL;
@@ -928,20 +919,13 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
return -EAGAIN;
}
- count = n / sizeof(int);
if (count > (RR3_DRIVER_MAXLENS * 2))
return -EINVAL;
+ /* rr3 will disable rc detector on transmit */
+ rr3->det_enabled = false;
rr3->transmitting = true;
- redrat3_disable_detector(rr3);
-
- if (rr3->det_enabled) {
- dev_err(dev, "%s: cannot tx while rx is enabled\n", __func__);
- ret = -EIO;
- goto out;
- }
-
sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
if (!sample_lens) {
ret = -ENOMEM;
@@ -1055,7 +1039,7 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
if (ret < 0)
dev_err(dev, "Error: control msg send failed, rc %d\n", ret);
else
- ret = n;
+ ret = count;
out:
kfree(sample_lens);
@@ -1063,8 +1047,8 @@ out:
kfree(sigdata);
rr3->transmitting = false;
-
- redrat3_enable_detector(rr3);
+ /* rr3 re-enables rc detector because it was enabled before */
+ rr3->det_enabled = true;
return ret;
}
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index b1d29d09eeae..d6f4bfe09391 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -43,9 +43,9 @@
#define DRIVER_DESC "Streamzap Remote Control driver"
#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
#define USB_STREAMZAP_VENDOR_ID 0x0e9c
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 13f54b51194a..b09c5fae489b 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -226,11 +226,11 @@ module_param(protocol, uint, 0444);
MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command "
"(0 = RC5, 1 = NEC, 2 = RC6A, default)");
-static int invert; /* default = 0 */
+static bool invert; /* default = 0 */
module_param(invert, bool, 0444);
MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
-static int txandrx; /* default = 0 */
+static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
@@ -1176,6 +1176,6 @@ wbcir_exit(void)
module_init(wbcir_init);
module_exit(wbcir_exit);
-MODULE_AUTHOR("David Hrdeman <david@hardeman.nu>");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index b303a3f8a9f8..9adada0d7447 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -533,6 +533,13 @@ config VIDEO_ADP1653
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
+config VIDEO_AS3645A
+ tristate "AS3645A flash driver support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ ---help---
+ This is a driver for the AS3645A and LM3555 flash controllers. It has
+ build in control for flash, torch and indicator LEDs.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -580,25 +587,6 @@ config VIDEO_M52790
endmenu # encoder / decoder chips
-config VIDEO_SH_VOU
- tristate "SuperH VOU video output driver"
- depends on VIDEO_DEV && ARCH_SHMOBILE
- select VIDEOBUF_DMA_CONTIG
- help
- Support for the Video Output Unit (VOU) on SuperH SoCs.
-
-config VIDEO_VIU
- tristate "Freescale VIU Video Driver"
- depends on VIDEO_V4L2 && PPC_MPC512x
- select VIDEOBUF_DMA_CONTIG
- default y
- ---help---
- Support for Freescale VIU video driver. This device captures
- video data, or overlays video on DIU frame buffer.
-
- Say Y here if you want to enable VIU device on MPC5121e Rev2+.
- In doubt, say N.
-
config VIDEO_VIVI
tristate "Virtual Video Driver"
depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
@@ -613,66 +601,130 @@ config VIDEO_VIVI
Say Y here if you want to test video apps or debug V4L devices.
In doubt, say N.
-source "drivers/media/video/davinci/Kconfig"
+#
+# USB Multimedia device configuration
+#
-source "drivers/media/video/omap/Kconfig"
+menuconfig V4L_USB_DRIVERS
+ bool "V4L USB devices"
+ depends on USB
+ default y
-source "drivers/media/video/bt8xx/Kconfig"
+if V4L_USB_DRIVERS
-config VIDEO_PMS
- tristate "Mediavision Pro Movie Studio Video For Linux"
- depends on ISA && VIDEO_V4L2
- help
- Say Y if you have such a thing.
+source "drivers/media/video/uvc/Kconfig"
+
+source "drivers/media/video/gspca/Kconfig"
+
+source "drivers/media/video/pvrusb2/Kconfig"
+
+source "drivers/media/video/hdpvr/Kconfig"
+
+source "drivers/media/video/em28xx/Kconfig"
+
+source "drivers/media/video/tlg2300/Kconfig"
+
+source "drivers/media/video/cx231xx/Kconfig"
+
+source "drivers/media/video/tm6000/Kconfig"
+
+source "drivers/media/video/usbvision/Kconfig"
+
+source "drivers/media/video/et61x251/Kconfig"
+
+source "drivers/media/video/sn9c102/Kconfig"
+
+source "drivers/media/video/pwc/Kconfig"
+
+source "drivers/media/video/cpia2/Kconfig"
+
+config USB_ZR364XX
+ tristate "USB ZR364XX Camera support"
+ depends on VIDEO_V4L2
+ select VIDEOBUF_GEN
+ select VIDEOBUF_VMALLOC
+ ---help---
+ Say Y here if you want to connect this type of camera to your
+ computer's USB port.
+ See <file:Documentation/video4linux/zr364xx.txt> for more info
+ and list of supported cameras.
To compile this driver as a module, choose M here: the
- module will be called pms.
+ module will be called zr364xx.
-config VIDEO_BWQCAM
- tristate "Quickcam BW Video For Linux"
- depends on PARPORT && VIDEO_V4L2
- help
- Say Y have if you the black and white version of the QuickCam
- camera. See the next option for the color version.
+config USB_STKWEBCAM
+ tristate "USB Syntek DC1125 Camera support"
+ depends on VIDEO_V4L2 && EXPERIMENTAL
+ ---help---
+ Say Y here if you want to use this type of camera.
+ Supported devices are typically found in some Asus laptops,
+ with USB id 174f:a311 and 05e1:0501. Other Syntek cameras
+ may be supported by the stk11xx driver, from which this is
+ derived, see <http://sourceforge.net/projects/syntekdriver/>
To compile this driver as a module, choose M here: the
- module will be called bw-qcam.
+ module will be called stkwebcam.
-config VIDEO_CQCAM
- tristate "QuickCam Colour Video For Linux (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PARPORT && VIDEO_V4L2
+config USB_S2255
+ tristate "USB Sensoray 2255 video capture device"
+ depends on VIDEO_V4L2
+ select VIDEOBUF_VMALLOC
+ default n
help
- This is the video4linux driver for the colour version of the
- Connectix QuickCam. If you have one of these cameras, say Y here,
- otherwise say N. This driver does not work with the original
- monochrome QuickCam, QuickCam VC or QuickClip. It is also available
- as a module (c-qcam).
- Read <file:Documentation/video4linux/CQcam.txt> for more information.
+ Say Y here if you want support for the Sensoray 2255 USB device.
+ This driver can be compiled as a module, called s2255drv.
-config VIDEO_W9966
- tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
- depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
- help
- Video4linux driver for Winbond's w9966 based Webcams.
- Currently tested with the LifeView FlyCam Supra.
- If you have one of these cameras, say Y here
- otherwise say N.
- This driver is also available as a module (w9966).
+endif # V4L_USB_DRIVERS
- Check out <file:Documentation/video4linux/w9966.txt> for more
- information.
+#
+# PCI drivers configuration
+#
-source "drivers/media/video/cpia2/Kconfig"
+menuconfig V4L_PCI_DRIVERS
+ bool "V4L PCI(e) devices"
+ depends on PCI
+ default y
+ ---help---
+ Say Y here to enable support for these PCI(e) drivers.
-config VIDEO_VINO
- tristate "SGI Vino Video For Linux (EXPERIMENTAL)"
- depends on I2C && SGI_IP22 && EXPERIMENTAL && VIDEO_V4L2
- select VIDEO_SAA7191 if VIDEO_HELPER_CHIPS_AUTO
- help
- Say Y here to build in support for the Vino video input system found
- on SGI Indy machines.
+if V4L_PCI_DRIVERS
-source "drivers/media/video/zoran/Kconfig"
+source "drivers/media/video/au0828/Kconfig"
+
+source "drivers/media/video/bt8xx/Kconfig"
+
+source "drivers/media/video/cx18/Kconfig"
+
+source "drivers/media/video/cx23885/Kconfig"
+
+source "drivers/media/video/cx25821/Kconfig"
+
+source "drivers/media/video/cx88/Kconfig"
+
+config VIDEO_HEXIUM_GEMINI
+ tristate "Hexium Gemini frame grabber"
+ depends on PCI && VIDEO_V4L2 && I2C
+ select VIDEO_SAA7146_VV
+ ---help---
+ This is a video4linux driver for the Hexium Gemini frame
+ grabber card by Hexium. Please note that the Gemini Dual
+ card is *not* fully supported.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hexium_gemini.
+
+config VIDEO_HEXIUM_ORION
+ tristate "Hexium HV-PCI6 and Orion frame grabber"
+ depends on PCI && VIDEO_V4L2 && I2C
+ select VIDEO_SAA7146_VV
+ ---help---
+ This is a video4linux driver for the Hexium HV-PCI6 and
+ Orion frame grabber cards by Hexium.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hexium_orion.
+
+source "drivers/media/video/ivtv/Kconfig"
config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
@@ -688,8 +740,6 @@ config VIDEO_MEYE
To compile this driver as a module, choose M here: the
module will be called meye.
-source "drivers/media/video/saa7134/Kconfig"
-
config VIDEO_MXB
tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
depends on PCI && VIDEO_V4L2 && I2C
@@ -706,28 +756,119 @@ config VIDEO_MXB
To compile this driver as a module, choose M here: the
module will be called mxb.
-config VIDEO_HEXIUM_ORION
- tristate "Hexium HV-PCI6 and Orion frame grabber"
- depends on PCI && VIDEO_V4L2 && I2C
- select VIDEO_SAA7146_VV
+source "drivers/media/video/saa7134/Kconfig"
+
+source "drivers/media/video/saa7164/Kconfig"
+
+source "drivers/media/video/zoran/Kconfig"
+
+endif # V4L_PCI_DRIVERS
+
+#
+# ISA & parallel port drivers configuration
+#
+
+menuconfig V4L_ISA_PARPORT_DRIVERS
+ bool "V4L ISA and parallel port devices"
+ depends on ISA || PARPORT
+ default n
---help---
- This is a video4linux driver for the Hexium HV-PCI6 and
- Orion frame grabber cards by Hexium.
+ Say Y here to enable support for these ISA and parallel port drivers.
+
+if V4L_ISA_PARPORT_DRIVERS
+
+config VIDEO_BWQCAM
+ tristate "Quickcam BW Video For Linux"
+ depends on PARPORT && VIDEO_V4L2
+ help
+ Say Y have if you the black and white version of the QuickCam
+ camera. See the next option for the color version.
To compile this driver as a module, choose M here: the
- module will be called hexium_orion.
+ module will be called bw-qcam.
-config VIDEO_HEXIUM_GEMINI
- tristate "Hexium Gemini frame grabber"
- depends on PCI && VIDEO_V4L2 && I2C
- select VIDEO_SAA7146_VV
- ---help---
- This is a video4linux driver for the Hexium Gemini frame
- grabber card by Hexium. Please note that the Gemini Dual
- card is *not* fully supported.
+config VIDEO_CQCAM
+ tristate "QuickCam Colour Video For Linux"
+ depends on PARPORT && VIDEO_V4L2
+ help
+ This is the video4linux driver for the colour version of the
+ Connectix QuickCam. If you have one of these cameras, say Y here,
+ otherwise say N. This driver does not work with the original
+ monochrome QuickCam, QuickCam VC or QuickClip. It is also available
+ as a module (c-qcam).
+ Read <file:Documentation/video4linux/CQcam.txt> for more information.
+
+config VIDEO_PMS
+ tristate "Mediavision Pro Movie Studio Video For Linux"
+ depends on ISA && VIDEO_V4L2
+ help
+ Say Y if you have the ISA Mediavision Pro Movie Studio
+ capture card.
To compile this driver as a module, choose M here: the
- module will be called hexium_gemini.
+ module will be called pms.
+
+config VIDEO_W9966
+ tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
+ depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
+ help
+ Video4linux driver for Winbond's w9966 based Webcams.
+ Currently tested with the LifeView FlyCam Supra.
+ If you have one of these cameras, say Y here
+ otherwise say N.
+ This driver is also available as a module (w9966).
+
+ Check out <file:Documentation/video4linux/w9966.txt> for more
+ information.
+
+endif # V4L_ISA_PARPORT_DRIVERS
+
+menuconfig V4L_PLATFORM_DRIVERS
+ bool "V4L platform devices"
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific V4L drivers.
+
+if V4L_PLATFORM_DRIVERS
+
+source "drivers/media/video/marvell-ccic/Kconfig"
+
+config VIDEO_VIA_CAMERA
+ tristate "VIAFB camera controller support"
+ depends on FB_VIA
+ select VIDEOBUF_DMA_SG
+ select VIDEO_OV7670
+ help
+ Driver support for the integrated camera controller in VIA
+ Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
+ with ov7670 sensors.
+
+#
+# Platform multimedia device configuration
+#
+
+source "drivers/media/video/davinci/Kconfig"
+
+source "drivers/media/video/omap/Kconfig"
+
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on VIDEO_DEV && ARCH_SHMOBILE
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
+config VIDEO_VIU
+ tristate "Freescale VIU Video Driver"
+ depends on VIDEO_V4L2 && PPC_MPC512x
+ select VIDEOBUF_DMA_CONTIG
+ default y
+ ---help---
+ Support for Freescale VIU video driver. This device captures
+ video data, or overlays video on DIU frame buffer.
+
+ Say Y here if you want to enable VIU device on MPC5121e Rev2+.
+ In doubt, say N.
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
@@ -739,21 +880,13 @@ config VIDEO_TIMBERDALE
---help---
Add support for the Video In peripherial of the timberdale FPGA.
-source "drivers/media/video/cx88/Kconfig"
-
-source "drivers/media/video/cx23885/Kconfig"
-
-source "drivers/media/video/cx25821/Kconfig"
-
-source "drivers/media/video/au0828/Kconfig"
-
-source "drivers/media/video/ivtv/Kconfig"
-
-source "drivers/media/video/cx18/Kconfig"
-
-source "drivers/media/video/saa7164/Kconfig"
-
-source "drivers/media/video/marvell-ccic/Kconfig"
+config VIDEO_VINO
+ tristate "SGI Vino Video For Linux"
+ depends on I2C && SGI_IP22 && VIDEO_V4L2
+ select VIDEO_SAA7191 if VIDEO_HELPER_CHIPS_AUTO
+ help
+ Say Y here to build in support for the Vino video input system found
+ on SGI Indy machines.
config VIDEO_M32R_AR
tristate "AR devices"
@@ -774,16 +907,6 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
-config VIDEO_VIA_CAMERA
- tristate "VIAFB camera controller support"
- depends on FB_VIA
- select VIDEOBUF_DMA_SG
- select VIDEO_OV7670
- help
- Driver support for the integrated camera controller in VIA
- Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
- with ov7670 sensors.
-
config VIDEO_OMAP3
tristate "OMAP 3 Camera support (EXPERIMENTAL)"
depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
@@ -1002,78 +1125,7 @@ config VIDEO_S5P_MIPI_CSIS
source "drivers/media/video/s5p-tv/Kconfig"
-#
-# USB Multimedia device configuration
-#
-
-menuconfig V4L_USB_DRIVERS
- bool "V4L USB devices"
- depends on USB
- default y
-
-if V4L_USB_DRIVERS && USB
-
-source "drivers/media/video/uvc/Kconfig"
-
-source "drivers/media/video/gspca/Kconfig"
-
-source "drivers/media/video/pvrusb2/Kconfig"
-
-source "drivers/media/video/hdpvr/Kconfig"
-
-source "drivers/media/video/em28xx/Kconfig"
-
-source "drivers/media/video/tlg2300/Kconfig"
-
-source "drivers/media/video/cx231xx/Kconfig"
-
-source "drivers/media/video/tm6000/Kconfig"
-
-source "drivers/media/video/usbvision/Kconfig"
-
-source "drivers/media/video/et61x251/Kconfig"
-
-source "drivers/media/video/sn9c102/Kconfig"
-
-source "drivers/media/video/pwc/Kconfig"
-
-config USB_ZR364XX
- tristate "USB ZR364XX Camera support"
- depends on VIDEO_V4L2
- select VIDEOBUF_GEN
- select VIDEOBUF_VMALLOC
- ---help---
- Say Y here if you want to connect this type of camera to your
- computer's USB port.
- See <file:Documentation/video4linux/zr364xx.txt> for more info
- and list of supported cameras.
-
- To compile this driver as a module, choose M here: the
- module will be called zr364xx.
-
-config USB_STKWEBCAM
- tristate "USB Syntek DC1125 Camera support"
- depends on VIDEO_V4L2 && EXPERIMENTAL
- ---help---
- Say Y here if you want to use this type of camera.
- Supported devices are typically found in some Asus laptops,
- with USB id 174f:a311 and 05e1:0501. Other Syntek cameras
- may be supported by the stk11xx driver, from which this is
- derived, see <http://sourceforge.net/projects/syntekdriver/>
-
- To compile this driver as a module, choose M here: the
- module will be called stkwebcam.
-
-config USB_S2255
- tristate "USB Sensoray 2255 video capture device"
- depends on VIDEO_V4L2
- select VIDEOBUF_VMALLOC
- default n
- help
- Say Y here if you want support for the Sensoray 2255 USB device.
- This driver can be compiled as a module, called s2255drv.
-
-endif # V4L_USB_DRIVERS
+endif # V4L_PLATFORM_DRIVERS
endif # VIDEO_CAPTURE_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
@@ -1098,6 +1150,23 @@ config VIDEO_MEM2MEM_TESTDEV
This is a virtual test device for the memory-to-memory driver
framework.
+config VIDEO_SAMSUNG_S5P_G2D
+ tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Samsung S5P and EXYNOS4 G2D
+ 2d graphics accelerator.
+
+config VIDEO_SAMSUNG_S5P_JPEG
+ tristate "Samsung S5P/Exynos4 JPEG codec driver (EXPERIMENTAL)"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P && EXPERIMENTAL
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a v4l2 driver for Samsung S5P and EXYNOS4 JPEG codec
config VIDEO_SAMSUNG_S5P_MFC
tristate "Samsung S5P MFC 5.1 Video Codec"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 117f9c4b4cb9..354138804cda 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
+obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
@@ -177,9 +178,12 @@ obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV) += s5p-tv/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
+
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 23ba5c37c3e4..879f1d839760 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -64,6 +64,11 @@ static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd)
static char *inputs[] = { "pass_through", "play_back" };
+static enum v4l2_mbus_pixelcode adv7170_codes[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+ V4L2_MBUS_FMT_UYVY8_1X16,
+};
+
/* ----------------------------------------------------------------------- */
static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value)
@@ -258,6 +263,60 @@ static int adv7170_s_routing(struct v4l2_subdev *sd,
return 0;
}
+static int adv7170_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(adv7170_codes))
+ return -EINVAL;
+
+ *code = adv7170_codes[index];
+ return 0;
+}
+
+static int adv7170_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ u8 val = adv7170_read(sd, 0x7);
+
+ if ((val & 0x40) == (1 << 6))
+ mf->code = V4L2_MBUS_FMT_UYVY8_1X16;
+ else
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ mf->width = 0;
+ mf->height = 0;
+ mf->field = V4L2_FIELD_ANY;
+
+ return 0;
+}
+
+static int adv7170_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ u8 val = adv7170_read(sd, 0x7);
+ int ret;
+
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ val &= ~0x40;
+ break;
+
+ case V4L2_MBUS_FMT_UYVY8_1X16:
+ val |= 0x40;
+ break;
+
+ default:
+ v4l2_dbg(1, debug, sd,
+ "illegal v4l2_mbus_framefmt code: %d\n", mf->code);
+ return -EINVAL;
+ }
+
+ ret = adv7170_write(sd, 0x7, val);
+
+ return ret;
+}
+
static int adv7170_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -274,6 +333,9 @@ static const struct v4l2_subdev_core_ops adv7170_core_ops = {
static const struct v4l2_subdev_video_ops adv7170_video_ops = {
.s_std_output = adv7170_s_std_output,
.s_routing = adv7170_s_routing,
+ .s_mbus_fmt = adv7170_s_fmt,
+ .g_mbus_fmt = adv7170_g_fmt,
+ .enum_mbus_fmt = adv7170_enum_fmt,
};
static const struct v4l2_subdev_ops adv7170_ops = {
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
new file mode 100644
index 000000000000..f241702a0f36
--- /dev/null
+++ b/drivers/media/video/as3645a.c
@@ -0,0 +1,905 @@
+/*
+ * drivers/media/video/as3645a.c - AS3645A and LM3555 flash controllers driver
+ *
+ * Copyright (C) 2008-2011 Nokia Corporation
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * TODO:
+ * - Check hardware FSTROBE control when sensor driver add support for this
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <media/as3645a.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define AS_TIMER_MS_TO_CODE(t) (((t) - 100) / 50)
+#define AS_TIMER_CODE_TO_MS(c) (50 * (c) + 100)
+
+/* Register definitions */
+
+/* Read-only Design info register: Reset state: xxxx 0001 */
+#define AS_DESIGN_INFO_REG 0x00
+#define AS_DESIGN_INFO_FACTORY(x) (((x) >> 4))
+#define AS_DESIGN_INFO_MODEL(x) ((x) & 0x0f)
+
+/* Read-only Version control register: Reset state: 0000 0000
+ * for first engineering samples
+ */
+#define AS_VERSION_CONTROL_REG 0x01
+#define AS_VERSION_CONTROL_RFU(x) (((x) >> 4))
+#define AS_VERSION_CONTROL_VERSION(x) ((x) & 0x0f)
+
+/* Read / Write (Indicator and timer register): Reset state: 0000 1111 */
+#define AS_INDICATOR_AND_TIMER_REG 0x02
+#define AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT 0
+#define AS_INDICATOR_AND_TIMER_VREF_SHIFT 4
+#define AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT 6
+
+/* Read / Write (Current set register): Reset state: 0110 1001 */
+#define AS_CURRENT_SET_REG 0x03
+#define AS_CURRENT_ASSIST_LIGHT_SHIFT 0
+#define AS_CURRENT_LED_DET_ON (1 << 3)
+#define AS_CURRENT_FLASH_CURRENT_SHIFT 4
+
+/* Read / Write (Control register): Reset state: 1011 0100 */
+#define AS_CONTROL_REG 0x04
+#define AS_CONTROL_MODE_SETTING_SHIFT 0
+#define AS_CONTROL_STROBE_ON (1 << 2)
+#define AS_CONTROL_OUT_ON (1 << 3)
+#define AS_CONTROL_EXT_TORCH_ON (1 << 4)
+#define AS_CONTROL_STROBE_TYPE_EDGE (0 << 5)
+#define AS_CONTROL_STROBE_TYPE_LEVEL (1 << 5)
+#define AS_CONTROL_COIL_PEAK_SHIFT 6
+
+/* Read only (D3 is read / write) (Fault and info): Reset state: 0000 x000 */
+#define AS_FAULT_INFO_REG 0x05
+#define AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT (1 << 1)
+#define AS_FAULT_INFO_INDICATOR_LED (1 << 2)
+#define AS_FAULT_INFO_LED_AMOUNT (1 << 3)
+#define AS_FAULT_INFO_TIMEOUT (1 << 4)
+#define AS_FAULT_INFO_OVER_TEMPERATURE (1 << 5)
+#define AS_FAULT_INFO_SHORT_CIRCUIT (1 << 6)
+#define AS_FAULT_INFO_OVER_VOLTAGE (1 << 7)
+
+/* Boost register */
+#define AS_BOOST_REG 0x0d
+#define AS_BOOST_CURRENT_DISABLE (0 << 0)
+#define AS_BOOST_CURRENT_ENABLE (1 << 0)
+
+/* Password register is used to unlock boost register writing */
+#define AS_PASSWORD_REG 0x0f
+#define AS_PASSWORD_UNLOCK_VALUE 0x55
+
+enum as_mode {
+ AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
+ AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
+ AS_MODE_ASSIST = 2 << AS_CONTROL_MODE_SETTING_SHIFT,
+ AS_MODE_FLASH = 3 << AS_CONTROL_MODE_SETTING_SHIFT,
+};
+
+/*
+ * struct as3645a
+ *
+ * @subdev: V4L2 subdev
+ * @pdata: Flash platform data
+ * @power_lock: Protects power_count
+ * @power_count: Power reference count
+ * @led_mode: V4L2 flash LED mode
+ * @timeout: Flash timeout in microseconds
+ * @flash_current: Flash current (0=200mA ... 15=500mA). Maximum
+ * values are 400mA for two LEDs and 500mA for one LED.
+ * @assist_current: Torch/Assist light current (0=20mA, 1=40mA ... 7=160mA)
+ * @indicator_current: Indicator LED current (0=0mA, 1=2.5mA ... 4=10mA)
+ * @strobe_source: Flash strobe source (software or external)
+ */
+struct as3645a {
+ struct v4l2_subdev subdev;
+ const struct as3645a_platform_data *pdata;
+
+ struct mutex power_lock;
+ int power_count;
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrls;
+
+ enum v4l2_flash_led_mode led_mode;
+ unsigned int timeout;
+ u8 flash_current;
+ u8 assist_current;
+ u8 indicator_current;
+ enum v4l2_flash_strobe_source strobe_source;
+};
+
+#define to_as3645a(sd) container_of(sd, struct as3645a, subdev)
+
+/* Return negative errno else zero on success */
+static int as3645a_write(struct as3645a *flash, u8 addr, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ rval = i2c_smbus_write_byte_data(client, addr, val);
+
+ dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
+ rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+/* Return negative errno else a data byte received from the device. */
+static int as3645a_read(struct as3645a *flash, u8 addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ rval = i2c_smbus_read_byte_data(client, addr);
+
+ dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval,
+ rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration and trigger
+ */
+
+/*
+ * as3645a_set_config - Set flash configuration registers
+ * @flash: The flash
+ *
+ * Configure the hardware with flash, assist and indicator currents, as well as
+ * flash timeout.
+ *
+ * Return 0 on success, or a negative error code if an I2C communication error
+ * occurred.
+ */
+static int as3645a_set_config(struct as3645a *flash)
+{
+ int ret;
+ u8 val;
+
+ val = (flash->flash_current << AS_CURRENT_FLASH_CURRENT_SHIFT)
+ | (flash->assist_current << AS_CURRENT_ASSIST_LIGHT_SHIFT)
+ | AS_CURRENT_LED_DET_ON;
+
+ ret = as3645a_write(flash, AS_CURRENT_SET_REG, val);
+ if (ret < 0)
+ return ret;
+
+ val = AS_TIMER_MS_TO_CODE(flash->timeout / 1000)
+ << AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT;
+
+ val |= (flash->pdata->vref << AS_INDICATOR_AND_TIMER_VREF_SHIFT)
+ | ((flash->indicator_current ? flash->indicator_current - 1 : 0)
+ << AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT);
+
+ return as3645a_write(flash, AS_INDICATOR_AND_TIMER_REG, val);
+}
+
+/*
+ * as3645a_set_control - Set flash control register
+ * @flash: The flash
+ * @mode: Desired output mode
+ * @on: Desired output state
+ *
+ * Configure the hardware with output mode and state.
+ *
+ * Return 0 on success, or a negative error code if an I2C communication error
+ * occurred.
+ */
+static int
+as3645a_set_control(struct as3645a *flash, enum as_mode mode, bool on)
+{
+ u8 reg;
+
+ /* Configure output parameters and operation mode. */
+ reg = (flash->pdata->peak << AS_CONTROL_COIL_PEAK_SHIFT)
+ | (on ? AS_CONTROL_OUT_ON : 0)
+ | mode;
+
+ if (flash->led_mode == V4L2_FLASH_LED_MODE_FLASH &&
+ flash->strobe_source == V4L2_FLASH_STROBE_SOURCE_EXTERNAL) {
+ reg |= AS_CONTROL_STROBE_TYPE_LEVEL
+ | AS_CONTROL_STROBE_ON;
+ }
+
+ return as3645a_write(flash, AS_CONTROL_REG, reg);
+}
+
+/*
+ * as3645a_set_output - Configure output and operation mode
+ * @flash: Flash controller
+ * @strobe: Strobe the flash (only valid in flash mode)
+ *
+ * Turn the LEDs output on/off and set the operation mode based on the current
+ * parameters.
+ *
+ * The AS3645A can't control the indicator LED independently of the flash/torch
+ * LED. If the flash controller is in V4L2_FLASH_LED_MODE_NONE mode, set the
+ * chip to indicator mode. Otherwise set it to assist light (torch) or flash
+ * mode.
+ *
+ * In indicator and assist modes, turn the output on/off based on the indicator
+ * and torch currents. In software strobe flash mode, turn the output on/off
+ * based on the strobe parameter.
+ */
+static int as3645a_set_output(struct as3645a *flash, bool strobe)
+{
+ enum as_mode mode;
+ bool on;
+
+ switch (flash->led_mode) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ on = flash->indicator_current != 0;
+ mode = AS_MODE_INDICATOR;
+ break;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ on = true;
+ mode = AS_MODE_ASSIST;
+ break;
+ case V4L2_FLASH_LED_MODE_FLASH:
+ on = strobe;
+ mode = AS_MODE_FLASH;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Configure output parameters and operation mode. */
+ return as3645a_set_control(flash, mode, on);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int as3645a_is_active(struct as3645a *flash)
+{
+ int ret;
+
+ ret = as3645a_read(flash, AS_CONTROL_REG);
+ return ret < 0 ? ret : !!(ret & AS_CONTROL_OUT_ON);
+}
+
+static int as3645a_read_fault(struct as3645a *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ /* NOTE: reading register clear fault status */
+ rval = as3645a_read(flash, AS_FAULT_INFO_REG);
+ if (rval < 0)
+ return rval;
+
+ if (rval & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
+ dev_dbg(&client->dev, "Inductor Peak limit fault\n");
+
+ if (rval & AS_FAULT_INFO_INDICATOR_LED)
+ dev_dbg(&client->dev, "Indicator LED fault: "
+ "Short circuit or open loop\n");
+
+ dev_dbg(&client->dev, "%u connected LEDs\n",
+ rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1);
+
+ if (rval & AS_FAULT_INFO_TIMEOUT)
+ dev_dbg(&client->dev, "Timeout fault\n");
+
+ if (rval & AS_FAULT_INFO_OVER_TEMPERATURE)
+ dev_dbg(&client->dev, "Over temperature fault\n");
+
+ if (rval & AS_FAULT_INFO_SHORT_CIRCUIT)
+ dev_dbg(&client->dev, "Short circuit fault\n");
+
+ if (rval & AS_FAULT_INFO_OVER_VOLTAGE)
+ dev_dbg(&client->dev, "Over voltage fault: "
+ "Indicates missing capacitor or open connection\n");
+
+ return rval;
+}
+
+static int as3645a_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct as3645a *flash =
+ container_of(ctrl->handler, struct as3645a, ctrls);
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int value;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_FAULT:
+ value = as3645a_read_fault(flash);
+ if (value < 0)
+ return value;
+
+ ctrl->cur.val = 0;
+ if (value & AS_FAULT_INFO_SHORT_CIRCUIT)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
+ if (value & AS_FAULT_INFO_OVER_TEMPERATURE)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
+ if (value & AS_FAULT_INFO_TIMEOUT)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
+ if (value & AS_FAULT_INFO_OVER_VOLTAGE)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
+ if (value & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_CURRENT;
+ if (value & AS_FAULT_INFO_INDICATOR_LED)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_INDICATOR;
+ break;
+
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
+ ctrl->cur.val = 0;
+ break;
+ }
+
+ value = as3645a_is_active(flash);
+ if (value < 0)
+ return value;
+
+ ctrl->cur.val = value;
+ break;
+ }
+
+ dev_dbg(&client->dev, "G_CTRL %08x:%d\n", ctrl->id, ctrl->cur.val);
+
+ return 0;
+}
+
+static int as3645a_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct as3645a *flash =
+ container_of(ctrl->handler, struct as3645a, ctrls);
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int ret;
+
+ dev_dbg(&client->dev, "S_CTRL %08x:%d\n", ctrl->id, ctrl->val);
+
+ /* If a control that doesn't apply to the current mode is modified,
+ * we store the value and return immediately. The setting will be
+ * applied when the LED mode is changed. Otherwise we apply the setting
+ * immediately.
+ */
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_LED_MODE:
+ if (flash->indicator_current)
+ return -EBUSY;
+
+ ret = as3645a_set_config(flash);
+ if (ret < 0)
+ return ret;
+
+ flash->led_mode = ctrl->val;
+ return as3645a_set_output(flash, false);
+
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ flash->strobe_source = ctrl->val;
+
+ /* Applies to flash mode only. */
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ break;
+
+ return as3645a_set_output(flash, false);
+
+ case V4L2_CID_FLASH_STROBE:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+
+ return as3645a_set_output(flash, true);
+
+ case V4L2_CID_FLASH_STROBE_STOP:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+
+ return as3645a_set_output(flash, false);
+
+ case V4L2_CID_FLASH_TIMEOUT:
+ flash->timeout = ctrl->val;
+
+ /* Applies to flash mode only. */
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ break;
+
+ return as3645a_set_config(flash);
+
+ case V4L2_CID_FLASH_INTENSITY:
+ flash->flash_current = (ctrl->val - AS3645A_FLASH_INTENSITY_MIN)
+ / AS3645A_FLASH_INTENSITY_STEP;
+
+ /* Applies to flash mode only. */
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
+ break;
+
+ return as3645a_set_config(flash);
+
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ flash->assist_current =
+ (ctrl->val - AS3645A_TORCH_INTENSITY_MIN)
+ / AS3645A_TORCH_INTENSITY_STEP;
+
+ /* Applies to torch mode only. */
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_TORCH)
+ break;
+
+ return as3645a_set_config(flash);
+
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ if (flash->led_mode != V4L2_FLASH_LED_MODE_NONE)
+ return -EBUSY;
+
+ flash->indicator_current =
+ (ctrl->val - AS3645A_INDICATOR_INTENSITY_MIN)
+ / AS3645A_INDICATOR_INTENSITY_STEP;
+
+ ret = as3645a_set_config(flash);
+ if (ret < 0)
+ return ret;
+
+ if ((ctrl->val == 0) == (ctrl->cur.val == 0))
+ break;
+
+ return as3645a_set_output(flash, false);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops as3645a_ctrl_ops = {
+ .g_volatile_ctrl = as3645a_get_ctrl,
+ .s_ctrl = as3645a_set_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+/* Put device into know state. */
+static int as3645a_setup(struct as3645a *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int ret;
+
+ /* clear errors */
+ ret = as3645a_read(flash, AS_FAULT_INFO_REG);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&client->dev, "Fault info: %02x\n", ret);
+
+ ret = as3645a_set_config(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = as3645a_set_output(flash, false);
+ if (ret < 0)
+ return ret;
+
+ /* read status */
+ ret = as3645a_read_fault(flash);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&client->dev, "AS_INDICATOR_AND_TIMER_REG: %02x\n",
+ as3645a_read(flash, AS_INDICATOR_AND_TIMER_REG));
+ dev_dbg(&client->dev, "AS_CURRENT_SET_REG: %02x\n",
+ as3645a_read(flash, AS_CURRENT_SET_REG));
+ dev_dbg(&client->dev, "AS_CONTROL_REG: %02x\n",
+ as3645a_read(flash, AS_CONTROL_REG));
+
+ return ret & ~AS_FAULT_INFO_LED_AMOUNT ? -EIO : 0;
+}
+
+static int __as3645a_set_power(struct as3645a *flash, int on)
+{
+ int ret;
+
+ if (!on)
+ as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
+
+ if (flash->pdata->set_power) {
+ ret = flash->pdata->set_power(&flash->subdev, on);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!on)
+ return 0;
+
+ ret = as3645a_setup(flash);
+ if (ret < 0) {
+ if (flash->pdata->set_power)
+ flash->pdata->set_power(&flash->subdev, 0);
+ }
+
+ return ret;
+}
+
+static int as3645a_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct as3645a *flash = to_as3645a(sd);
+ int ret = 0;
+
+ mutex_lock(&flash->power_lock);
+
+ if (flash->power_count == !on) {
+ ret = __as3645a_set_power(flash, !!on);
+ if (ret < 0)
+ goto done;
+ }
+
+ flash->power_count += on ? 1 : -1;
+ WARN_ON(flash->power_count < 0);
+
+done:
+ mutex_unlock(&flash->power_lock);
+ return ret;
+}
+
+static int as3645a_registered(struct v4l2_subdev *sd)
+{
+ struct as3645a *flash = to_as3645a(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int rval, man, model, rfu, version;
+ const char *vendor;
+
+ /* Power up the flash driver and read manufacturer ID, model ID, RFU
+ * and version.
+ */
+ rval = as3645a_set_power(&flash->subdev, 1);
+ if (rval < 0)
+ return rval;
+
+ rval = as3645a_read(flash, AS_DESIGN_INFO_REG);
+ if (rval < 0)
+ goto power_off;
+
+ man = AS_DESIGN_INFO_FACTORY(rval);
+ model = AS_DESIGN_INFO_MODEL(rval);
+
+ rval = as3645a_read(flash, AS_VERSION_CONTROL_REG);
+ if (rval < 0)
+ goto power_off;
+
+ rfu = AS_VERSION_CONTROL_RFU(rval);
+ version = AS_VERSION_CONTROL_VERSION(rval);
+
+ /* Verify the chip model and version. */
+ if (model != 0x01 || rfu != 0x00) {
+ dev_err(&client->dev, "AS3645A not detected "
+ "(model %d rfu %d)\n", model, rfu);
+ rval = -ENODEV;
+ goto power_off;
+ }
+
+ switch (man) {
+ case 1:
+ vendor = "AMS, Austria Micro Systems";
+ break;
+ case 2:
+ vendor = "ADI, Analog Devices Inc.";
+ break;
+ case 3:
+ vendor = "NSC, National Semiconductor";
+ break;
+ case 4:
+ vendor = "NXP";
+ break;
+ case 5:
+ vendor = "TI, Texas Instrument";
+ break;
+ default:
+ vendor = "Unknown";
+ }
+
+ dev_info(&client->dev, "Chip vendor: %s (%d) Version: %d\n", vendor,
+ man, version);
+
+ rval = as3645a_write(flash, AS_PASSWORD_REG, AS_PASSWORD_UNLOCK_VALUE);
+ if (rval < 0)
+ goto power_off;
+
+ rval = as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE);
+ if (rval < 0)
+ goto power_off;
+
+ /* Setup default values. This makes sure that the chip is in a known
+ * state, in case the power rail can't be controlled.
+ */
+ rval = as3645a_setup(flash);
+
+power_off:
+ as3645a_set_power(&flash->subdev, 0);
+
+ return rval;
+}
+
+static int as3645a_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return as3645a_set_power(sd, 1);
+}
+
+static int as3645a_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return as3645a_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_core_ops as3645a_core_ops = {
+ .s_power = as3645a_set_power,
+};
+
+static const struct v4l2_subdev_ops as3645a_ops = {
+ .core = &as3645a_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops as3645a_internal_ops = {
+ .registered = as3645a_registered,
+ .open = as3645a_open,
+ .close = as3645a_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * I2C driver
+ */
+#ifdef CONFIG_PM
+
+static int as3645a_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct as3645a *flash = to_as3645a(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ rval = __as3645a_set_power(flash, 0);
+
+ dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok");
+
+ return rval;
+}
+
+static int as3645a_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct as3645a *flash = to_as3645a(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ rval = __as3645a_set_power(flash, 1);
+
+ dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+#else
+
+#define as3645a_suspend NULL
+#define as3645a_resume NULL
+
+#endif /* CONFIG_PM */
+
+/*
+ * as3645a_init_controls - Create controls
+ * @flash: The flash
+ *
+ * The number of LEDs reported in platform data is used to compute default
+ * limits. Parameters passed through platform data can override those limits.
+ */
+static int as3645a_init_controls(struct as3645a *flash)
+{
+ const struct as3645a_platform_data *pdata = flash->pdata;
+ struct v4l2_ctrl *ctrl;
+ int maximum;
+
+ v4l2_ctrl_handler_init(&flash->ctrls, 10);
+
+ /* V4L2_CID_FLASH_LED_MODE */
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE, 2, ~7,
+ V4L2_FLASH_LED_MODE_NONE);
+
+ /* V4L2_CID_FLASH_STROBE_SOURCE */
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_SOURCE,
+ pdata->ext_strobe ? 1 : 0,
+ pdata->ext_strobe ? ~3 : ~1,
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+
+ flash->strobe_source = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
+
+ /* V4L2_CID_FLASH_STROBE */
+ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+
+ /* V4L2_CID_FLASH_STROBE_STOP */
+ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
+
+ /* V4L2_CID_FLASH_STROBE_STATUS */
+ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_STATUS, 0, 1, 1, 1);
+ if (ctrl != NULL)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ /* V4L2_CID_FLASH_TIMEOUT */
+ maximum = pdata->timeout_max;
+
+ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_TIMEOUT, AS3645A_FLASH_TIMEOUT_MIN,
+ maximum, AS3645A_FLASH_TIMEOUT_STEP, maximum);
+
+ flash->timeout = maximum;
+
+ /* V4L2_CID_FLASH_INTENSITY */
+ maximum = pdata->flash_max_current;
+
+ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_INTENSITY, AS3645A_FLASH_INTENSITY_MIN,
+ maximum, AS3645A_FLASH_INTENSITY_STEP, maximum);
+
+ flash->flash_current = (maximum - AS3645A_FLASH_INTENSITY_MIN)
+ / AS3645A_FLASH_INTENSITY_STEP;
+
+ /* V4L2_CID_FLASH_TORCH_INTENSITY */
+ maximum = pdata->torch_max_current;
+
+ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_TORCH_INTENSITY,
+ AS3645A_TORCH_INTENSITY_MIN, maximum,
+ AS3645A_TORCH_INTENSITY_STEP,
+ AS3645A_TORCH_INTENSITY_MIN);
+
+ flash->assist_current = 0;
+
+ /* V4L2_CID_FLASH_INDICATOR_INTENSITY */
+ v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_INDICATOR_INTENSITY,
+ AS3645A_INDICATOR_INTENSITY_MIN,
+ AS3645A_INDICATOR_INTENSITY_MAX,
+ AS3645A_INDICATOR_INTENSITY_STEP,
+ AS3645A_INDICATOR_INTENSITY_MIN);
+
+ flash->indicator_current = 0;
+
+ /* V4L2_CID_FLASH_FAULT */
+ ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
+ V4L2_CID_FLASH_FAULT, 0,
+ V4L2_FLASH_FAULT_OVER_VOLTAGE |
+ V4L2_FLASH_FAULT_TIMEOUT |
+ V4L2_FLASH_FAULT_OVER_TEMPERATURE |
+ V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0);
+ if (ctrl != NULL)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ flash->subdev.ctrl_handler = &flash->ctrls;
+
+ return flash->ctrls.error;
+}
+
+static int as3645a_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct as3645a *flash;
+ int ret;
+
+ if (client->dev.platform_data == NULL)
+ return -ENODEV;
+
+ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->pdata = client->dev.platform_data;
+
+ v4l2_i2c_subdev_init(&flash->subdev, client, &as3645a_ops);
+ flash->subdev.internal_ops = &as3645a_internal_ops;
+ flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ ret = as3645a_init_controls(flash);
+ if (ret < 0)
+ goto done;
+
+ ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
+ if (ret < 0)
+ goto done;
+
+ flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
+
+ mutex_init(&flash->power_lock);
+
+ flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
+
+done:
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ kfree(flash);
+ }
+
+ return ret;
+}
+
+static int __exit as3645a_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct as3645a *flash = to_as3645a(subdev);
+
+ v4l2_device_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ media_entity_cleanup(&flash->subdev.entity);
+ mutex_destroy(&flash->power_lock);
+ kfree(flash);
+
+ return 0;
+}
+
+static const struct i2c_device_id as3645a_id_table[] = {
+ { AS3645A_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, as3645a_id_table);
+
+static const struct dev_pm_ops as3645a_pm_ops = {
+ .suspend = as3645a_suspend,
+ .resume = as3645a_resume,
+};
+
+static struct i2c_driver as3645a_i2c_driver = {
+ .driver = {
+ .name = AS3645A_NAME,
+ .pm = &as3645a_pm_ops,
+ },
+ .probe = as3645a_probe,
+ .remove = __exit_p(as3645a_remove),
+ .id_table = as3645a_id_table,
+};
+
+static int __init as3645a_init(void)
+{
+ int rval;
+
+ rval = i2c_add_driver(&as3645a_i2c_driver);
+ if (rval)
+ pr_err("%s: Failed to register the driver\n", AS3645A_NAME);
+
+ return rval;
+}
+
+static void __exit as3645a_exit(void)
+{
+ i2c_del_driver(&as3645a_i2c_driver);
+}
+
+module_init(as3645a_init);
+module_exit(as3645a_exit);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("LED flash driver for AS3645A, LM3555 and their clones");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
index 8c775c59e120..9fe4519176a4 100644
--- a/drivers/media/video/atmel-isi.c
+++ b/drivers/media/video/atmel-isi.c
@@ -90,7 +90,10 @@ struct atmel_isi {
struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
struct completion complete;
+ /* ISI peripherial clock */
struct clk *pclk;
+ /* ISI_MCK, feed to camera sensor to generate pixel clock */
+ struct clk *mck;
unsigned int irq;
struct isi_platform_data *pdata;
@@ -766,6 +769,12 @@ static int isi_camera_add_device(struct soc_camera_device *icd)
if (ret)
return ret;
+ ret = clk_enable(isi->mck);
+ if (ret) {
+ clk_disable(isi->pclk);
+ return ret;
+ }
+
isi->icd = icd;
dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
icd->devnum);
@@ -779,6 +788,7 @@ static void isi_camera_remove_device(struct soc_camera_device *icd)
BUG_ON(icd != isi->icd);
+ clk_disable(isi->mck);
clk_disable(isi->pclk);
isi->icd = NULL;
@@ -803,7 +813,7 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
return 0;
}
-static int isi_camera_set_bus_param(struct soc_camera_device *icd, u32 pixfmt)
+static int isi_camera_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -874,7 +884,7 @@ static int isi_camera_set_bus_param(struct soc_camera_device *icd, u32 pixfmt)
if (isi->pdata->has_emb_sync)
cfg1 |= ISI_CFG1_EMB_SYNC;
- if (isi->pdata->isi_full_mode)
+ if (isi->pdata->full_mode)
cfg1 |= ISI_CFG1_FULL_MODE;
isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
@@ -912,6 +922,7 @@ static int __devexit atmel_isi_remove(struct platform_device *pdev)
isi->fb_descriptors_phys);
iounmap(isi->regs);
+ clk_put(isi->mck);
clk_put(isi->pclk);
kfree(isi);
@@ -930,7 +941,7 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev)
struct isi_platform_data *pdata;
pdata = dev->platform_data;
- if (!pdata || !pdata->data_width_flags) {
+ if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) {
dev_err(&pdev->dev,
"No config available for Atmel ISI\n");
return -EINVAL;
@@ -959,6 +970,19 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&isi->video_buffer_list);
INIT_LIST_HEAD(&isi->dma_desc_head);
+ /* Get ISI_MCK, provided by programmable clock or external clock */
+ isi->mck = clk_get(dev, "isi_mck");
+ if (IS_ERR(isi->mck)) {
+ dev_err(dev, "Failed to get isi_mck\n");
+ ret = PTR_ERR(isi->mck);
+ goto err_clk_get;
+ }
+
+ /* Set ISI_MCK's frequency, it should be faster than pixel clock */
+ ret = clk_set_rate(isi->mck, pdata->mck_hz);
+ if (ret < 0)
+ goto err_set_mck_rate;
+
isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
sizeof(struct fbd) * MAX_BUFFER_NUM,
&isi->fb_descriptors_phys,
@@ -1034,9 +1058,12 @@ err_alloc_ctx:
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
err_alloc_descriptors:
+err_set_mck_rate:
+ clk_put(isi->mck);
+err_clk_get:
kfree(isi);
err_alloc_isi:
- clk_put(isi->pclk);
+ clk_put(pclk);
return ret;
}
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index 0c3a5ba0e857..81ba9d9d1b52 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
+ depends on DVB_CAPTURE_DRIVERS
select I2C_ALGOBIT
select VIDEO_TVEEPROM
select VIDEOBUF_VMALLOC
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 39fc923fc46b..1c6015a04f96 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
switch (tv.model) {
case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+ { USB_DEVICE(0x2040, 0x7260),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7213),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/video/au0828/au0828-i2c.c b/drivers/media/video/au0828/au0828-i2c.c
index cbdb65c34f21..05c299fa5d79 100644
--- a/drivers/media/video/au0828/au0828-i2c.c
+++ b/drivers/media/video/au0828/au0828-i2c.c
@@ -348,7 +348,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
}
}
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int au0828_i2c_register(struct au0828_dev *dev)
{
dprintk(1, "%s()\n", __func__);
diff --git a/drivers/media/video/bt8xx/bt848.h b/drivers/media/video/bt8xx/bt848.h
index 0bcd95303bb0..c37e6acffded 100644
--- a/drivers/media/video/bt8xx/bt848.h
+++ b/drivers/media/video/bt8xx/bt848.h
@@ -30,6 +30,10 @@
#ifndef PCI_DEVICE_ID_BT849
#define PCI_DEVICE_ID_BT849 0x351
#endif
+#ifndef PCI_DEVICE_ID_FUSION879
+#define PCI_DEVICE_ID_FUSION879 0x36c
+#endif
+
#ifndef PCI_DEVICE_ID_BT878
#define PCI_DEVICE_ID_BT878 0x36e
#endif
@@ -37,7 +41,6 @@
#define PCI_DEVICE_ID_BT879 0x36f
#endif
-
/* Brooktree 848 registers */
#define BT848_DSTATUS 0x000
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 5939021d8eba..ff2933ab705f 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -80,6 +80,8 @@ static void phytec_muxsel(struct bttv *btv, unsigned int input);
static void gv800s_muxsel(struct bttv *btv, unsigned int input);
static void gv800s_init(struct bttv *btv);
+static void td3116_muxsel(struct bttv *btv, unsigned int input);
+
static int terratec_active_radio_upgrade(struct bttv *btv);
static int tea5757_read(struct bttv *btv);
static int tea5757_write(struct bttv *btv, int value);
@@ -284,7 +286,8 @@ static struct CARD {
{ 0x10b42636, BTTV_BOARD_HAUPPAUGE878, "STB ???" },
{ 0x217d6606, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" },
{ 0xfff6f6ff, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" },
- { 0x03116000, BTTV_BOARD_SENSORAY311, "Sensoray 311" },
+ { 0x03116000, BTTV_BOARD_SENSORAY311_611, "Sensoray 311" },
+ { 0x06116000, BTTV_BOARD_SENSORAY311_611, "Sensoray 611" },
{ 0x00790e11, BTTV_BOARD_WINDVR, "Canopus WinDVR PCI" },
{ 0xa0fca1a0, BTTV_BOARD_ZOLTRIX, "Face to Face Tvmax" },
{ 0x82b2aa6a, BTTV_BOARD_SIMUS_GVC1100, "SIMUS GVC1100" },
@@ -341,6 +344,7 @@ static struct CARD {
{ 0x15401835, BTTV_BOARD_PV183, "Provideo PV183-6" },
{ 0x15401836, BTTV_BOARD_PV183, "Provideo PV183-7" },
{ 0x15401837, BTTV_BOARD_PV183, "Provideo PV183-8" },
+ { 0x3116f200, BTTV_BOARD_TVT_TD3116, "Tongwei Video Technology TD-3116" },
{ 0, -1, NULL }
};
@@ -1526,10 +1530,10 @@ struct tvcard bttv_tvcards[] = {
GPIO20,22,23: R30,R29,R28
*/
},
- [BTTV_BOARD_SENSORAY311] = {
+ [BTTV_BOARD_SENSORAY311_611] = {
/* Clay Kunz <ckunz@mail.arc.nasa.gov> */
- /* you must jumper JP5 for the card to work */
- .name = "Sensoray 311",
+ /* you must jumper JP5 for the 311 card (PC/104+) to work */
+ .name = "Sensoray 311/611",
.video_inputs = 5,
/* .audio_inputs= 0, */
.svhs = 4,
@@ -2879,6 +2883,16 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
+ [BTTV_BOARD_TVT_TD3116] = {
+ .name = "Tongwei Video Technology TD-3116",
+ .video_inputs = 16,
+ .gpiomask = 0xc00ff,
+ .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
+ .muxsel_hook = td3116_muxsel,
+ .svhs = NO_SVHS,
+ .pll = PLL_28,
+ .tuner_type = TUNER_ABSENT,
+ },
};
static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
@@ -3228,6 +3242,42 @@ static void geovision_muxsel(struct bttv *btv, unsigned int input)
gpio_bits(0xf, inmux);
}
+/*
+ * The TD3116 has 2 74HC4051 muxes wired to the MUX0 input of a bt878.
+ * The first 74HC4051 has the lower 8 inputs, the second one the higher 8.
+ * The muxes are controlled via a 74HC373 latch which is connected to
+ * GPIOs 0-7. GPIO 18 is connected to the LE signal of the latch.
+ * Q0 of the latch is connected to the Enable (~E) input of the first
+ * 74HC4051. Q1 - Q3 are connected to S0 - S2 of the same 74HC4051.
+ * Q4 - Q7 are connected to the second 74HC4051 in the same way.
+ */
+
+static void td3116_latch_value(struct bttv *btv, u32 value)
+{
+ gpio_bits((1<<18) | 0xff, value);
+ gpio_bits((1<<18) | 0xff, (1<<18) | value);
+ udelay(1);
+ gpio_bits((1<<18) | 0xff, value);
+}
+
+static void td3116_muxsel(struct bttv *btv, unsigned int input)
+{
+ u32 value;
+ u32 highbit;
+
+ highbit = (input & 0x8) >> 3 ;
+
+ /* Disable outputs and set value in the mux */
+ value = 0x11; /* Disable outputs */
+ value |= ((input & 0x7) << 1) << (4 * highbit);
+ td3116_latch_value(btv, value);
+
+ /* Enable the correct output */
+ value &= ~0x11;
+ value |= ((highbit ^ 0x1) << 4) | highbit;
+ td3116_latch_value(btv, value);
+}
+
/* ----------------------------------------------------------------------- */
static void bttv_reset_audio(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 3dd06607aec2..76c301f05095 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4572,6 +4572,7 @@ static struct pci_device_id bttv_pci_tbl[] = {
{PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT849), 0},
{PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT878), 0},
{PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT879), 0},
+ {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_FUSION879), 0},
{0,}
};
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index e3952af7e56e..580c8e682392 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -346,7 +346,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
}
}
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int __devinit init_bttv_i2c(struct bttv *btv)
{
strlcpy(btv->i2c_client.name, "bttv internal", I2C_NAME_SIZE);
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index c6333595c6b9..c5171619ac79 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -96,7 +96,7 @@
#define BTTV_BOARD_PV_BT878P_PLUS 0x46
#define BTTV_BOARD_FLYVIDEO98EZ 0x47
#define BTTV_BOARD_PV_BT878P_9B 0x48
-#define BTTV_BOARD_SENSORAY311 0x49
+#define BTTV_BOARD_SENSORAY311_611 0x49
#define BTTV_BOARD_RV605 0x4a
#define BTTV_BOARD_POWERCLR_MTV878 0x4b
#define BTTV_BOARD_WINDVR 0x4c
@@ -183,6 +183,7 @@
#define BTTV_BOARD_GEOVISION_GV800S 0x9d
#define BTTV_BOARD_GEOVISION_GV800S_SL 0x9e
#define BTTV_BOARD_PV183 0x9f
+#define BTTV_BOARD_TVT_TD3116 0xa0
/* more card-specific defines */
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index cd8ff0473184..fda32f52554a 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -72,7 +72,7 @@ struct qcam {
static int parport[MAX_CAMS] = { [1 ... MAX_CAMS-1] = -1 };
static int probe = 2;
-static int force_rgb;
+static bool force_rgb;
static int video_nr = -1;
/* FIXME: parport=auto would never have worked, surely? --RR */
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index 5909f2557ab4..1d64af9adf71 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC");
MODULE_AUTHOR("Hans Verkuil");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index d93e5ab45fd3..51c5b9ad67d8 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
MODULE_AUTHOR("Martin Vaughan");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index c6ff32a6137c..349bd9c2aff5 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -75,7 +75,7 @@ static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static unsigned cardtype_c = 1;
static unsigned tuner_c = 1;
-static unsigned radio_c = 1;
+static bool radio_c = 1;
static char pal[] = "--";
static char secam[] = "--";
static char ntsc[] = "-";
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 14cb961c22bd..4bfd865a4106 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
CX18_DEBUG_IOCTL("close() of %s\n", s->name);
- v4l2_fh_del(fh);
- v4l2_fh_exit(fh);
-
- /* Easy case first: this stream was never claimed by us */
- if (s->id != id->open_id) {
- kfree(id);
- return 0;
- }
-
- /* 'Unclaim' this stream */
-
- /* Stop radio */
mutex_lock(&cx->serialize_lock);
- if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+ /* Stop radio */
+ if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
cx18_mute(cx);
/* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
}
/* Done! Unmute and continue. */
cx18_unmute(cx);
- cx18_release_stream(s);
- } else {
- cx18_stop_capture(id, 0);
}
+
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+
+ /* 'Unclaim' this stream */
+ if (s->id == id->open_id)
+ cx18_stop_capture(id, 0);
kfree(id);
mutex_unlock(&cx->serialize_lock);
return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
item->open_id = cx->open_id++;
filp->private_data = &item->fh;
+ v4l2_fh_add(&item->fh);
- if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
- /* Try to claim this stream */
- if (cx18_claim_stream(item, item->type)) {
- /* No, it's already in use */
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return -EBUSY;
- }
-
+ if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
if (atomic_read(&cx->ana_capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- cx18_release_stream(s);
+ v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
/* Done! Unmute and continue. */
cx18_unmute(cx);
}
- v4l2_fh_add(&item->fh);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index 040aaa87579d..51609d5c88ce 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -232,7 +232,7 @@ static struct i2c_algo_bit_data cx18_i2c_algo_template = {
.timeout = CX18_ALGO_BIT_TIMEOUT*HZ /* jiffies */
};
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int init_cx18_i2c(struct cx18 *cx)
{
int i, err;
diff --git a/drivers/media/video/cx18/cx18-i2c.h b/drivers/media/video/cx18/cx18-i2c.h
index bdfd1921e300..1180fdc8d983 100644
--- a/drivers/media/video/cx18/cx18-i2c.h
+++ b/drivers/media/video/cx18/cx18-i2c.h
@@ -24,6 +24,6 @@
int cx18_i2c_register(struct cx18 *cx, unsigned idx);
struct v4l2_subdev *cx18_find_hw(struct cx18 *cx, u32 hw);
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int init_cx18_i2c(struct cx18 *cx);
void exit_cx18_i2c(struct cx18 *cx);
diff --git a/drivers/media/video/cx231xx/Kconfig b/drivers/media/video/cx231xx/Kconfig
index ae85a7a7bd73..446f692aabb7 100644
--- a/drivers/media/video/cx231xx/Kconfig
+++ b/drivers/media/video/cx231xx/Kconfig
@@ -40,10 +40,10 @@ config VIDEO_CX231XX_ALSA
config VIDEO_CX231XX_DVB
tristate "DVB/ATSC Support for Cx231xx based TV cards"
- depends on VIDEO_CX231XX && DVB_CORE
+ depends on VIDEO_CX231XX && DVB_CORE && DVB_CAPTURE_DRIVERS
select VIDEOBUF_DVB
- select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMISE
- select MEDIA_TUNER_NXP18271 if !DVB_FE_CUSTOMISE
+ select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
select DVB_MB86A20S if !DVB_FE_CUSTOMISE
---help---
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 30d13c15739a..a2c2b7d343ec 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -111,6 +111,9 @@ static void cx231xx_audio_isocirq(struct urb *urb)
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
+ if (dev->state & DEV_DISCONNECTED)
+ return;
+
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
@@ -196,6 +199,9 @@ static void cx231xx_audio_bulkirq(struct urb *urb)
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
+ if (dev->state & DEV_DISCONNECTED)
+ return;
+
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
@@ -273,6 +279,9 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
cx231xx_info("%s: Starting ISO AUDIO transfers\n", __func__);
+ if (dev->state & DEV_DISCONNECTED)
+ return -ENODEV;
+
sb_size = CX231XX_ISO_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
@@ -298,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvisocpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
@@ -331,6 +340,9 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
cx231xx_info("%s: Starting BULK AUDIO transfers\n", __func__);
+ if (dev->state & DEV_DISCONNECTED)
+ return -ENODEV;
+
sb_size = CX231XX_NUM_AUDIO_PACKETS * dev->adev.max_pkt_size;
for (i = 0; i < CX231XX_AUDIO_BUFS; i++) {
@@ -356,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvbulkpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = 0;
+ urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->complete = cx231xx_audio_bulkirq;
urb->transfer_buffer_length = sb_size;
@@ -432,6 +444,11 @@ static int snd_cx231xx_capture_open(struct snd_pcm_substream *substream)
return -ENODEV;
}
+ if (dev->state & DEV_DISCONNECTED) {
+ cx231xx_errdev("Can't open. the device was removed.\n");
+ return -ENODEV;
+ }
+
/* Sets volume, mute, etc */
dev->mute = 0;
@@ -571,6 +588,9 @@ static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
struct cx231xx *dev = snd_pcm_substream_chip(substream);
int retval;
+ if (dev->state & DEV_DISCONNECTED)
+ return -ENODEV;
+
spin_lock(&dev->adev.slock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 60b021e79864..875a7ce94736 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -843,25 +843,34 @@ void cx231xx_release_resources(struct cx231xx *dev)
cx231xx_remove_from_devlist(dev);
+ cx231xx_ir_exit(dev);
+
/* Release I2C buses */
cx231xx_dev_uninit(dev);
- cx231xx_ir_exit(dev);
+ /* delete v4l2 device */
+ v4l2_device_unregister(&dev->v4l2_dev);
usb_put_dev(dev->udev);
/* Mark device as unused */
- cx231xx_devused &= ~(1 << dev->devno);
+ clear_bit(dev->devno, &cx231xx_devused);
+
+ kfree(dev->video_mode.alt_max_pkt_size);
+ kfree(dev->vbi_mode.alt_max_pkt_size);
+ kfree(dev->sliced_cc_mode.alt_max_pkt_size);
+ kfree(dev->ts1_mode.alt_max_pkt_size);
+ kfree(dev);
+ dev = NULL;
}
/*
* cx231xx_init_dev()
* allocates and inits the device structs, registers i2c bus and v4l device
*/
-static int cx231xx_init_dev(struct cx231xx **devhandle, struct usb_device *udev,
+static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
int minor)
{
- struct cx231xx *dev = *devhandle;
int retval = -ENOMEM;
int errCode;
unsigned int maxh, maxw;
@@ -1016,7 +1025,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
int i, isoc_pipe = 0;
char *speed;
char descr[255] = "";
- struct usb_interface *lif = NULL;
struct usb_interface_assoc_descriptor *assoc_desc;
udev = usb_get_dev(interface_to_usbdev(interface));
@@ -1030,21 +1038,21 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
return -ENODEV;
/* Check to see next free device and mark as used */
- nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS);
- cx231xx_devused |= 1 << nr;
-
- if (nr >= CX231XX_MAXBOARDS) {
- cx231xx_err(DRIVER_NAME
- ": Supports only %i cx231xx boards.\n", CX231XX_MAXBOARDS);
- cx231xx_devused &= ~(1 << nr);
- return -ENOMEM;
- }
+ do {
+ nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS);
+ if (nr >= CX231XX_MAXBOARDS) {
+ /* No free device slots */
+ cx231xx_err(DRIVER_NAME ": Supports only %i devices.\n",
+ CX231XX_MAXBOARDS);
+ return -ENOMEM;
+ }
+ } while (test_and_set_bit(nr, &cx231xx_devused));
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
cx231xx_err(DRIVER_NAME ": out of memory!\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(nr, &cx231xx_devused);
return -ENOMEM;
}
@@ -1071,9 +1079,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* init CIR module TBD */
- /* store the current interface */
- lif = interface;
-
/*mode_tv: digital=1 or analog=0*/
dev->mode_tv = 0;
@@ -1113,9 +1118,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
le16_to_cpu(udev->descriptor.idProduct),
dev->max_iad_interface_count);
- /* store the interface 0 back */
- lif = udev->actconfig->interface[0];
-
/* increment interface count */
dev->interface_count++;
@@ -1126,7 +1128,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (assoc_desc->bFirstInterface != ifnum) {
cx231xx_err(DRIVER_NAME ": Not found "
"matching IAD interface\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(dev->devno, &cx231xx_devused);
kfree(dev);
dev = NULL;
return -ENODEV;
@@ -1135,7 +1137,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
cx231xx_info("registering interface %d\n", ifnum);
/* save our data pointer in this interface device */
- usb_set_intfdata(lif, dev);
+ usb_set_intfdata(interface, dev);
/*
* AV device initialization - only done at the last interface
@@ -1145,19 +1147,19 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
if (retval) {
cx231xx_errdev("v4l2_device_register failed\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(dev->devno, &cx231xx_devused);
kfree(dev);
dev = NULL;
return -EIO;
}
/* allocate device struct */
- retval = cx231xx_init_dev(&dev, udev, nr);
+ retval = cx231xx_init_dev(dev, udev, nr);
if (retval) {
- cx231xx_devused &= ~(1 << dev->devno);
+ clear_bit(dev->devno, &cx231xx_devused);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
dev = NULL;
- usb_set_intfdata(lif, NULL);
+ usb_set_intfdata(interface, NULL);
return retval;
}
@@ -1178,7 +1180,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->video_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(dev->devno, &cx231xx_devused);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
dev = NULL;
@@ -1212,7 +1214,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->vbi_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(dev->devno, &cx231xx_devused);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
dev = NULL;
@@ -1247,7 +1249,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(dev->devno, &cx231xx_devused);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
dev = NULL;
@@ -1283,7 +1285,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (dev->ts1_mode.alt_max_pkt_size == NULL) {
cx231xx_errdev("out of memory!\n");
- cx231xx_devused &= ~(1 << nr);
+ clear_bit(dev->devno, &cx231xx_devused);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
dev = NULL;
@@ -1334,10 +1336,9 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
if (!dev->udev)
return;
- flush_request_modules(dev);
+ dev->state |= DEV_DISCONNECTED;
- /* delete v4l2 device */
- v4l2_device_unregister(&dev->v4l2_dev);
+ flush_request_modules(dev);
/* wait until all current v4l2 io is finished then deallocate
resources */
@@ -1351,31 +1352,24 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
"deallocation are deferred on close.\n",
video_device_node_name(dev->vdev));
- dev->state |= DEV_MISCONFIGURED;
+ /* Even having users, it is safe to remove the RC i2c driver */
+ cx231xx_ir_exit(dev);
+
if (dev->USE_ISO)
cx231xx_uninit_isoc(dev);
else
cx231xx_uninit_bulk(dev);
- dev->state |= DEV_DISCONNECTED;
wake_up_interruptible(&dev->wait_frame);
wake_up_interruptible(&dev->wait_stream);
} else {
- dev->state |= DEV_DISCONNECTED;
- cx231xx_release_resources(dev);
}
cx231xx_close_extension(dev);
mutex_unlock(&dev->lock);
- if (!dev->users) {
- kfree(dev->video_mode.alt_max_pkt_size);
- kfree(dev->vbi_mode.alt_max_pkt_size);
- kfree(dev->sliced_cc_mode.alt_max_pkt_size);
- kfree(dev->ts1_mode.alt_max_pkt_size);
- kfree(dev);
- dev = NULL;
- }
+ if (!dev->users)
+ cx231xx_release_resources(dev);
}
static struct usb_driver cx231xx_usb_driver = {
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index d4457f9488ee..08dd930f882a 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -166,6 +166,9 @@ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus,
u8 _i2c_nostop = 0;
u8 _i2c_reserve = 0;
+ if (dev->state & DEV_DISCONNECTED)
+ return -ENODEV;
+
/* Get the I2C period, nostop and reserve parameters */
_i2c_period = i2c_bus->i2c_period;
_i2c_nostop = i2c_bus->i2c_nostop;
@@ -1071,7 +1074,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
sb_size, cx231xx_isoc_irq_callback, dma_q, 1);
urb->number_of_packets = max_packets;
- urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
k = 0;
for (j = 0; j < max_packets; j++) {
@@ -1182,7 +1185,7 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
return -ENOMEM;
}
dev->video_mode.bulk_ctl.urb[i] = urb;
- urb->transfer_flags = 0;
+ urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
dev->video_mode.bulk_ctl.transfer_buffer[i] =
usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index da9a4a0aab79..7c4e360ba9bc 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -196,7 +196,7 @@ static inline int dvb_isoc_copy(struct cx231xx *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
@@ -228,7 +228,7 @@ static inline int dvb_bulk_copy(struct cx231xx *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index 45e14cac4622..96176e9db5a2 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -27,12 +27,16 @@
static int get_key_isdbt(struct IR_i2c *ir, u32 *ir_key,
u32 *ir_raw)
{
+ int rc;
u8 cmd, scancode;
dev_dbg(&ir->rc->input_dev->dev, "%s\n", __func__);
/* poll IR chip */
- if (1 != i2c_master_recv(ir->c, &cmd, 1))
+ rc = i2c_master_recv(ir->c, &cmd, 1);
+ if (rc < 0)
+ return rc;
+ if (rc != 1)
return -EIO;
/* it seems that 0xFE indicates that a button is still hold
@@ -102,11 +106,14 @@ int cx231xx_ir_init(struct cx231xx *dev)
ir_i2c_bus = cx231xx_boards[dev->model].ir_i2c_master;
dev_dbg(&dev->udev->dev, "Trying to bind ir at bus %d, addr 0x%02x\n",
ir_i2c_bus, info.addr);
- i2c_new_device(&dev->i2c_bus[ir_i2c_bus].i2c_adap, &info);
+ dev->ir_i2c_client = i2c_new_device(&dev->i2c_bus[ir_i2c_bus].i2c_adap, &info);
return 0;
}
void cx231xx_ir_exit(struct cx231xx *dev)
{
+ if (dev->ir_i2c_client)
+ i2c_unregister_device(dev->ir_i2c_client);
+ dev->ir_i2c_client = NULL;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 1c7a4daafecf..8cdee5f78f13 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -93,7 +93,7 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
@@ -452,7 +452,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
return -ENOMEM;
}
dev->vbi_mode.bulk_ctl.urb[i] = urb;
- urb->transfer_flags = 0;
+ urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 6e81f970dc7d..829a41b0c9ef 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -337,7 +337,7 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
@@ -440,7 +440,7 @@ static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
+ if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
@@ -1000,12 +1000,6 @@ static int check_dev(struct cx231xx *dev)
cx231xx_errdev("v4l2 ioctl: device not present\n");
return -ENODEV;
}
-
- if (dev->state & DEV_MISCONFIGURED) {
- cx231xx_errdev("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
- return -EIO;
- }
return 0;
}
@@ -2347,7 +2341,8 @@ static int cx231xx_v4l2_close(struct file *filp)
return 0;
}
- if (dev->users == 1) {
+ dev->users--;
+ if (!dev->users) {
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
@@ -2374,7 +2369,6 @@ static int cx231xx_v4l2_close(struct file *filp)
cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0);
}
kfree(fh);
- dev->users--;
wake_up_interruptible_nr(&dev->open, 1);
return 0;
}
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 2000bc64c497..e17447554a0d 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -377,7 +377,6 @@ struct cx231xx_board {
enum cx231xx_dev_state {
DEV_INITIALIZED = 0x01,
DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04,
};
enum AFE_MODE {
@@ -621,6 +620,7 @@ struct cx231xx {
/* For I2C IR support */
struct IR_i2c_init_data init_data;
+ struct i2c_client *ir_i2c_client;
unsigned int stream_on:1; /* Locks streams */
unsigned int vbi_stream_on:1; /* Locks streams for VBI */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 67c4a59bd882..f5c79e53e5a1 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -900,6 +900,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
int i, retval = 0;
u32 value = 0;
u32 gpio_output = 0;
+ u32 gpio_value;
u32 checksum = 0;
u32 *dataptr;
@@ -907,7 +908,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
/* Save GPIO settings before reset of APU */
retval |= mc417_memory_read(dev, 0x9020, &gpio_output);
- retval |= mc417_memory_read(dev, 0x900C, &value);
+ retval |= mc417_memory_read(dev, 0x900C, &gpio_value);
retval = mc417_register_write(dev,
IVTV_REG_VPU, 0xFFFFFFED);
@@ -991,11 +992,18 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
/* F/W power up disturbs the GPIOs, restore state */
retval |= mc417_register_write(dev, 0x9020, gpio_output);
- retval |= mc417_register_write(dev, 0x900C, value);
+ retval |= mc417_register_write(dev, 0x900C, gpio_value);
retval |= mc417_register_read(dev, IVTV_REG_VPU, &value);
retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8);
+ /* Hardcoded GPIO's here */
+ retval |= mc417_register_write(dev, 0x9020, 0x4000);
+ retval |= mc417_register_write(dev, 0x900C, 0x4000);
+
+ mc417_register_read(dev, 0x9020, &gpio_output);
+ mc417_register_read(dev, 0x900C, &gpio_value);
+
if (retval < 0)
printk(KERN_ERR "%s: Error with mc417_register_write\n",
__func__);
@@ -1015,6 +1023,12 @@ static void cx23885_codec_settings(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
+ /* Dynamically change the height based on video standard */
+ if (dev->encodernorm.id & V4L2_STD_525_60)
+ dev->ts1.height = 480;
+ else
+ dev->ts1.height = 576;
+
/* assign frame size */
cx23885_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
dev->ts1.height, dev->ts1.width);
@@ -1030,7 +1044,7 @@ static void cx23885_codec_settings(struct cx23885_dev *dev)
cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 4, 1);
}
-static int cx23885_initialize_codec(struct cx23885_dev *dev)
+static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
{
int version;
int retval;
@@ -1112,9 +1126,11 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev)
mc417_memory_write(dev, 2120, 0x00000080);
/* start capturing to the host interface */
- cx23885_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
- CX23885_MPEG_CAPTURE, CX23885_RAW_BITS_NONE);
- msleep(10);
+ if (startencoder) {
+ cx23885_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
+ CX23885_MPEG_CAPTURE, CX23885_RAW_BITS_NONE);
+ msleep(10);
+ }
return 0;
}
@@ -1196,6 +1212,16 @@ static int cx23885_querymenu(struct cx23885_dev *dev,
cx2341x_ctrl_get_menu(&dev->mpeg_params, qmenu->id));
}
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct cx23885_fh *fh = file->private_data;
+ struct cx23885_dev *dev = fh->dev;
+
+ call_all(dev, core, g_std, id);
+
+ return 0;
+}
+
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct cx23885_fh *fh = file->private_data;
@@ -1208,55 +1234,31 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
if (i == ARRAY_SIZE(cx23885_tvnorms))
return -EINVAL;
dev->encodernorm = cx23885_tvnorms[i];
+
+ /* Have the drier core notify the subdevices */
+ mutex_lock(&dev->lock);
+ cx23885_set_tvnorm(dev, *id);
+ mutex_unlock(&dev->lock);
+
return 0;
}
static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *i)
+ struct v4l2_input *i)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
- struct cx23885_input *input;
- int n;
-
- if (i->index >= 4)
- return -EINVAL;
-
- input = &cx23885_boards[dev->board].input[i->index];
-
- if (input->type == 0)
- return -EINVAL;
-
- /* FIXME
- * strcpy(i->name, input->name); */
- strcpy(i->name, "unset");
-
- if (input->type == CX23885_VMUX_TELEVISION ||
- input->type == CX23885_VMUX_CABLE)
- i->type = V4L2_INPUT_TYPE_TUNER;
- else
- i->type = V4L2_INPUT_TYPE_CAMERA;
-
- for (n = 0; n < ARRAY_SIZE(cx23885_tvnorms); n++)
- i->std |= cx23885_tvnorms[n].id;
- return 0;
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ dprintk(1, "%s()\n", __func__);
+ return cx23885_enum_input(dev, i);
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- *i = dev->input;
- return 0;
+ return cx23885_get_input(file, priv, i);
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- if (i >= 4)
- return -EINVAL;
-
- return 0;
+ return cx23885_set_input(file, priv, i);
}
static int vidioc_g_tuner(struct file *file, void *priv,
@@ -1309,43 +1311,25 @@ static int vidioc_g_frequency(struct file *file, void *priv,
}
static int vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
+ struct v4l2_frequency *f)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- cx23885_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- CX23885_END_NOW, CX23885_MPEG_CAPTURE,
- CX23885_RAW_BITS_NONE);
-
- dprintk(1, "VIDIOC_S_FREQUENCY: dev type %d, f\n",
- dev->tuner_type);
- dprintk(1, "VIDIOC_S_FREQUENCY: f tuner %d, f type %d\n",
- f->tuner, f->type);
- if (UNSET == dev->tuner_type)
- return -EINVAL;
- if (f->tuner != 0)
- return -EINVAL;
- if (f->type != V4L2_TUNER_ANALOG_TV)
- return -EINVAL;
- dev->freq = f->frequency;
-
- call_all(dev, tuner, s_frequency, f);
+ return cx23885_set_frequency(file, priv, f);
+}
- cx23885_initialize_codec(dev);
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctl)
+{
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
- return 0;
+ return cx23885_get_control(dev, ctl);
}
static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
+ struct v4l2_control *ctl)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
- /* Update the A/V core */
- call_all(dev, core, s_ctrl, ctl);
- return 0;
+ return cx23885_set_control(dev, ctl);
}
static int vidioc_querycap(struct file *file, void *priv,
@@ -1636,7 +1620,7 @@ static ssize_t mpeg_read(struct file *file, char __user *data,
/* Start mpeg encoder on first read. */
if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
- if (cx23885_initialize_codec(dev) < 0)
+ if (cx23885_initialize_codec(dev, 1) < 0)
return -EINVAL;
}
}
@@ -1677,6 +1661,8 @@ static struct v4l2_file_operations mpeg_fops = {
};
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
+ .vidioc_querystd = vidioc_g_std,
+ .vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
@@ -1686,6 +1672,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1746,8 +1733,8 @@ static struct video_device *cx23885_video_dev_alloc(
if (NULL == vfd)
return NULL;
*vfd = *template;
- snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
- type, cx23885_boards[tsport->dev->board].name);
+ snprintf(vfd->name, sizeof(vfd->name), "%s (%s)",
+ cx23885_boards[tsport->dev->board].name, type);
vfd->parent = &pci->dev;
vfd->release = video_device_release;
return vfd;
@@ -1791,5 +1778,11 @@ int cx23885_417_register(struct cx23885_dev *dev)
printk(KERN_INFO "%s: registered device %s [mpeg]\n",
dev->name, video_device_node_name(dev->v4l_device));
+ /* ST: Configure the encoder paramaters, but don't begin
+ * encoding, this resolves an issue where the first time the
+ * encoder is started video can be choppy.
+ */
+ cx23885_initialize_codec(dev, 0);
+
return 0;
}
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index c3cf08945e4c..19b5499d2624 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC4000,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC4000,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
@@ -335,8 +335,33 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1850] = {
.name = "Hauppauge WinTV-HVR1850",
+ .porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_ENCODER,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
},
[CX23885_BOARD_COMPRO_VIDEOMATE_E800] = {
.name = "Compro VideoMate E800",
@@ -438,6 +463,41 @@ struct cx23885_board cx23885_boards[] = {
.gpio0 = 0,
} },
},
+ [CX23885_BOARD_MYGICA_X8507] = {
+ .name = "Mygica X8507",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
+ .tuner_bus = 1,
+ .porta = CX23885_ANALOG_VIDEO,
+ .input = {
+ {
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_COMPOSITE2,
+ .amux = CX25840_AUDIO8,
+ },
+ {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_COMPOSITE8,
+ },
+ {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_SVIDEO_LUMA3 |
+ CX25840_SVIDEO_CHROMA4,
+ },
+ {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_COMPONENT_ON |
+ CX25840_VIN1_CH1 |
+ CX25840_VIN6_CH2 |
+ CX25840_VIN7_CH3,
+ },
+ },
+ },
+ [CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL] = {
+ .name = "TerraTec Cinergy T PCIe Dual",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ }
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -637,6 +697,14 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x1b55,
.subdevice = 0xe2e4,
.card = CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF,
+ }, {
+ .subvendor = 0x14f1,
+ .subdevice = 0x8502,
+ .card = CX23885_BOARD_MYGICA_X8507,
+ }, {
+ .subvendor = 0x153b,
+ .subdevice = 0x117e,
+ .card = CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1068,6 +1136,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
+ case CX23885_BOARD_MYGICA_X8507:
/* GPIO-0 (0)Analog / (1)Digital TV */
/* GPIO-1 reset XC5000 */
/* GPIO-2 reset LGS8GL5 / LGS8G75 */
@@ -1367,6 +1436,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
/* Defaults for VID B - Analog encoder */
/* DREQ_POL, SMODE, PUNC_CLK, MCLK_POL Serial bus + punc clk */
@@ -1377,6 +1447,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
/* APB_TSVALERR_POL (active low)*/
ts1->vld_misc_val = 0x2000;
ts1->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4 | 0xc);
+ cx_write(0x130184, 0xc);
/* Defaults for VID C */
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
@@ -1396,6 +1467,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
@@ -1431,7 +1503,6 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
- case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
@@ -1468,6 +1539,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_MPX885:
+ case CX23885_BOARD_MYGICA_X8507:
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 40e68b22015e..6ad227029a0f 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -206,12 +206,12 @@ static struct sram_channel cx23887_sram_channels[] = {
.cnt2_reg = DMA1_CNT2,
},
[SRAM_CH02] = {
- .name = "ch2",
- .cmds_start = 0x0,
- .ctrl_start = 0x0,
- .cdt = 0x0,
- .fifo_start = 0x0,
- .fifo_size = 0x0,
+ .name = "VID A (VBI)",
+ .cmds_start = 0x10050,
+ .ctrl_start = 0x105F0,
+ .cdt = 0x10810,
+ .fifo_start = 0x3000,
+ .fifo_size = 0x1000,
.ptr1_reg = DMA2_PTR1,
.ptr2_reg = DMA2_PTR2,
.cnt1_reg = DMA2_CNT1,
@@ -266,12 +266,12 @@ static struct sram_channel cx23887_sram_channels[] = {
.cnt2_reg = DMA5_CNT2,
},
[SRAM_CH07] = {
- .name = "ch7",
- .cmds_start = 0x0,
- .ctrl_start = 0x0,
- .cdt = 0x0,
- .fifo_start = 0x0,
- .fifo_size = 0x0,
+ .name = "TV Audio",
+ .cmds_start = 0x10190,
+ .ctrl_start = 0x106B0,
+ .cdt = 0x10930,
+ .fifo_start = 0x7000,
+ .fifo_size = 0x1000,
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index bcb45be44bb2..6835eb1fc093 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -61,6 +61,8 @@
#include "cx23885-f300.h"
#include "altera-ci.h"
#include "stv0367.h"
+#include "drxk.h"
+#include "mt2063.h"
static unsigned int debug;
@@ -111,6 +113,8 @@ static void dvb_buf_release(struct videobuf_queue *q,
cx23885_free_buffer(q, (struct cx23885_buffer *)vb);
}
+static int cx23885_dvb_set_frontend(struct dvb_frontend *fe);
+
static void cx23885_dvb_gate_ctrl(struct cx23885_tsport *port, int open)
{
struct videobuf_dvb_frontends *f;
@@ -125,6 +129,12 @@ static void cx23885_dvb_gate_ctrl(struct cx23885_tsport *port, int open)
if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
+
+ /*
+ * FIXME: Improve this path to avoid calling the
+ * cx23885_dvb_set_frontend() every time it passes here.
+ */
+ cx23885_dvb_set_frontend(fe->dvb.frontend);
}
static struct videobuf_queue_ops dvb_qops = {
@@ -479,15 +489,15 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
.if_khz = 5380,
};
-static int cx23885_dvb_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *param)
+static int cx23885_dvb_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx23885_tsport *port = fe->dvb->priv;
struct cx23885_dev *dev = port->dev;
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1275:
- switch (param->u.vsb.modulation) {
+ switch (p->modulation) {
case VSB_8:
cx23885_gpio_clear(dev, GPIO_5);
break;
@@ -507,31 +517,6 @@ static int cx23885_dvb_set_frontend(struct dvb_frontend *fe,
return 0;
}
-static int cx23885_dvb_fe_ioctl_override(struct dvb_frontend *fe,
- unsigned int cmd, void *parg,
- unsigned int stage)
-{
- int err = 0;
-
- switch (stage) {
- case DVB_FE_IOCTL_PRE:
-
- switch (cmd) {
- case FE_SET_FRONTEND:
- err = cx23885_dvb_set_frontend(fe,
- (struct dvb_frontend_parameters *) parg);
- break;
- }
- break;
-
- case DVB_FE_IOCTL_POST:
- /* no post-ioctl handling required */
- break;
- }
- return err;
-};
-
-
static struct lgs8gxx_config magicpro_prohdtve2_lgs8g75_config = {
.prod = LGS8GXX_PROD_LGS8G75,
.demod_address = 0x19,
@@ -617,6 +602,24 @@ static struct xc5000_config netup_xc5000_config[] = {
},
};
+static struct drxk_config terratec_drxk_config[] = {
+ {
+ .adr = 0x29,
+ .no_i2c_bridge = 1,
+ }, {
+ .adr = 0x2a,
+ .no_i2c_bridge = 1,
+ },
+};
+
+static struct mt2063_config terratec_mt2063_config[] = {
+ {
+ .tuner_address = 0x60,
+ }, {
+ .tuner_address = 0x67,
+ },
+};
+
int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -940,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
&dev->i2c_bus[1].i2c_adap, &cfg);
+ if (!fe) {
+ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ dev->name);
+ goto frontend_detach;
+ }
}
break;
case CX23885_BOARD_TBS_6920:
@@ -1043,6 +1051,20 @@ static int dvb_register(struct cx23885_tsport *port)
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(s5h1411_attach,
+ &hcw_s5h1411_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL)
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_bus[0].i2c_adap,
+ &hauppauge_tda18271_config);
+
+ tda18271_attach(&dev->ts1.analog_fe,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
+
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1290:
i2c_bus = &dev->i2c_bus[0];
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
@@ -1118,6 +1140,39 @@ static int dvb_register(struct cx23885_tsport *port)
goto frontend_detach;
}
break;
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
+
+ switch (port->nr) {
+ /* port b */
+ case 1:
+ fe0->dvb.frontend = dvb_attach(drxk_attach,
+ &terratec_drxk_config[0],
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ if (!dvb_attach(mt2063_attach,
+ fe0->dvb.frontend,
+ &terratec_mt2063_config[0],
+ &i2c_bus2->i2c_adap))
+ goto frontend_detach;
+ }
+ break;
+ /* port c */
+ case 2:
+ fe0->dvb.frontend = dvb_attach(drxk_attach,
+ &terratec_drxk_config[1],
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ if (!dvb_attach(mt2063_attach,
+ fe0->dvb.frontend,
+ &terratec_mt2063_config[1],
+ &i2c_bus2->i2c_adap))
+ goto frontend_detach;
+ }
+ break;
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
@@ -1151,7 +1206,7 @@ static int dvb_register(struct cx23885_tsport *port)
/* register everything */
ret = videobuf_dvb_register_bus(&port->frontends, THIS_MODULE, port,
&dev->pci->dev, adapter_nr, mfe_shared,
- cx23885_dvb_fe_ioctl_override);
+ NULL);
if (ret)
goto frontend_detach;
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index 0ff7a9e98f3e..be1e21d8295c 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -309,7 +309,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
}
}
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int cx23885_i2c_register(struct cx23885_i2c *bus)
{
struct cx23885_dev *dev = bus->dev;
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index e730b9263016..c654bdc7ccb2 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -253,9 +253,9 @@ static struct cx23885_ctrl cx23885_ctls[] = {
.id = V4L2_CID_AUDIO_VOLUME,
.name = "Volume",
.minimum = 0,
- .maximum = 0x3f,
- .step = 1,
- .default_value = 0x3f,
+ .maximum = 65535,
+ .step = 65535 / 100,
+ .default_value = 65535,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.reg = PATH1_VOL_CTL,
@@ -316,7 +316,7 @@ void cx23885_video_wakeup(struct cx23885_dev *dev,
__func__, bc);
}
-static int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
+int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
{
dprintk(1, "%s(norm = 0x%08x) name: [%s]\n",
__func__,
@@ -344,8 +344,8 @@ static struct video_device *cx23885_vdev_init(struct cx23885_dev *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
- snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
- dev->name, type, cx23885_boards[dev->board].name);
+ snprintf(vfd->name, sizeof(vfd->name), "%s (%s)",
+ cx23885_boards[dev->board].name, type);
video_set_drvdata(vfd, dev);
return vfd;
}
@@ -492,7 +492,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
dev->input = input;
if (dev->board == CX23885_BOARD_MYGICA_X8506 ||
- dev->board == CX23885_BOARD_MAGICPRO_PROHDTVE2) {
+ dev->board == CX23885_BOARD_MAGICPRO_PROHDTVE2 ||
+ dev->board == CX23885_BOARD_MYGICA_X8507) {
/* Select Analog TV */
if (INPUT(input)->type == CX23885_VMUX_TELEVISION)
cx23885_gpio_clear(dev, GPIO_0);
@@ -503,7 +504,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
INPUT(input)->vmux, 0, 0);
if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
- (dev->board == CX23885_BOARD_MPX885)) {
+ (dev->board == CX23885_BOARD_MPX885) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
INPUT(input)->amux, 0, 0);
@@ -649,6 +651,7 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
int rc, init_buffer = 0;
u32 line0_offset, line1_offset;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+ int field_tff;
BUG_ON(NULL == fh->fmt);
if (fh->width < 48 || fh->width > norm_maxw(dev->tvnorm) ||
@@ -690,15 +693,25 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
buf->bpl, 0, buf->vb.height);
break;
case V4L2_FIELD_INTERLACED:
- if (dev->tvnorm & V4L2_STD_NTSC) {
+ if (dev->tvnorm & V4L2_STD_NTSC)
+ /* NTSC or */
+ field_tff = 1;
+ else
+ field_tff = 0;
+
+ if (cx23885_boards[dev->board].force_bff)
+ /* PAL / SECAM OR 888 in NTSC MODE */
+ field_tff = 0;
+
+ if (field_tff) {
/* cx25840 transmits NTSC bottom field first */
- dprintk(1, "%s() Creating NTSC risc\n",
+ dprintk(1, "%s() Creating TFF/NTSC risc\n",
__func__);
line0_offset = buf->bpl;
line1_offset = 0;
} else {
/* All other formats are top field first */
- dprintk(1, "%s() Creating PAL/SECAM risc\n",
+ dprintk(1, "%s() Creating BFF/PAL/SECAM risc\n",
__func__);
line0_offset = 0;
line1_offset = buf->bpl;
@@ -981,6 +994,8 @@ static int video_release(struct file *file)
}
videobuf_mmap_free(&fh->vidq);
+ videobuf_mmap_free(&fh->vbiq);
+
file->private_data = NULL;
kfree(fh);
@@ -1002,7 +1017,7 @@ static int video_mmap(struct file *file, struct vm_area_struct *vma)
/* ------------------------------------------------------------------ */
/* VIDEO CTRL IOCTLS */
-static int cx23885_get_control(struct cx23885_dev *dev,
+int cx23885_get_control(struct cx23885_dev *dev,
struct v4l2_control *ctl)
{
dprintk(1, "%s() calling cx25840(VIDIOC_G_CTRL)\n", __func__);
@@ -1010,7 +1025,7 @@ static int cx23885_get_control(struct cx23885_dev *dev,
return 0;
}
-static int cx23885_set_control(struct cx23885_dev *dev,
+int cx23885_set_control(struct cx23885_dev *dev,
struct v4l2_control *ctl)
{
dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)\n", __func__);
@@ -1229,6 +1244,16 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
return 0;
}
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ dprintk(1, "%s()\n", __func__);
+
+ call_all(dev, core, g_std, id);
+
+ return 0;
+}
+
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *tvnorms)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
@@ -1241,7 +1266,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *tvnorms)
return 0;
}
-static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
+int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
{
static const char *iname[] = {
[CX23885_VMUX_COMPOSITE1] = "Composite1",
@@ -1278,6 +1303,15 @@ static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
if (INPUT(n)->type != CX23885_VMUX_TELEVISION)
i->audioset = 0x3;
+ if (dev->input == n) {
+ /* enum'd input matches our configured input.
+ * Ask the video decoder to process the call
+ * and give it an oppertunity to update the
+ * status field.
+ */
+ call_all(dev, video, g_input_status, &i->status);
+ }
+
return 0;
}
@@ -1289,7 +1323,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
return cx23885_enum_input(dev, i);
}
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+int cx23885_get_input(struct file *file, void *priv, unsigned int *i)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
@@ -1298,7 +1332,12 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ return cx23885_get_input(file, priv, i);
+}
+
+int cx23885_set_input(struct file *file, void *priv, unsigned int i)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
@@ -1322,6 +1361,11 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return cx23885_set_input(file, priv, i);
+}
+
static int vidioc_log_status(struct file *file, void *priv)
{
struct cx23885_fh *fh = priv;
@@ -1329,11 +1373,11 @@ static int vidioc_log_status(struct file *file, void *priv)
printk(KERN_INFO
"%s/0: ============ START LOG STATUS ============\n",
- dev->name);
+ dev->name);
call_all(dev, core, log_status);
printk(KERN_INFO
"%s/0: ============= END LOG STATUS =============\n",
- dev->name);
+ dev->name);
return 0;
}
@@ -1471,6 +1515,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
static int cx23885_set_freq(struct cx23885_dev *dev, struct v4l2_frequency *f)
{
+ struct v4l2_control ctrl;
+
if (unlikely(UNSET == dev->tuner_type))
return -EINVAL;
if (unlikely(f->tuner != 0))
@@ -1479,29 +1525,103 @@ static int cx23885_set_freq(struct cx23885_dev *dev, struct v4l2_frequency *f)
mutex_lock(&dev->lock);
dev->freq = f->frequency;
+ /* I need to mute audio here */
+ ctrl.id = V4L2_CID_AUDIO_MUTE;
+ ctrl.value = 1;
+ cx23885_set_control(dev, &ctrl);
+
call_all(dev, tuner, s_frequency, f);
/* When changing channels it is required to reset TVAUDIO */
- msleep(10);
+ msleep(100);
+
+ /* I need to unmute audio here */
+ ctrl.value = 0;
+ cx23885_set_control(dev, &ctrl);
mutex_unlock(&dev->lock);
return 0;
}
-static int vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
+static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
+ struct v4l2_frequency *f)
+{
+ struct v4l2_control ctrl;
+ struct videobuf_dvb_frontend *vfe;
+ struct dvb_frontend *fe;
+
+ struct analog_parameters params = {
+ .mode = V4L2_TUNER_ANALOG_TV,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .std = dev->tvnorm,
+ .frequency = f->frequency
+ };
+
+ mutex_lock(&dev->lock);
+ dev->freq = f->frequency;
+
+ /* I need to mute audio here */
+ ctrl.id = V4L2_CID_AUDIO_MUTE;
+ ctrl.value = 1;
+ cx23885_set_control(dev, &ctrl);
+
+ /* If HVR1850 */
+ dprintk(1, "%s() frequency=%d tuner=%d std=0x%llx\n", __func__,
+ params.frequency, f->tuner, params.std);
+
+ vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
+ if (!vfe) {
+ mutex_unlock(&dev->lock);
+ return -EINVAL;
+ }
+
+ fe = vfe->dvb.frontend;
+
+ if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+ fe = &dev->ts1.analog_fe;
+
+ if (fe && fe->ops.tuner_ops.set_analog_params) {
+ call_all(dev, core, s_std, dev->tvnorm);
+ fe->ops.tuner_ops.set_analog_params(fe, &params);
+ }
+ else
+ printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+
+ /* When changing channels it is required to reset TVAUDIO */
+ msleep(100);
+
+ /* I need to unmute audio here */
+ ctrl.value = 0;
+ cx23885_set_control(dev, &ctrl);
+
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+int cx23885_set_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
+ int ret;
- if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
- return -EINVAL;
- if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
- return -EINVAL;
+ switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1850:
+ ret = cx23885_set_freq_via_ops(dev, f);
+ break;
+ default:
+ ret = cx23885_set_freq(dev, f);
+ }
- return
- cx23885_set_freq(dev, f);
+ return ret;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ return cx23885_set_frequency(file, priv, f);
}
/* ----------------------------------------------------------- */
@@ -1613,6 +1733,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_querystd = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index b49036fe3ffd..f020f0568df4 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -87,6 +87,8 @@
#define CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF 30
#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000 31
#define CX23885_BOARD_MPX885 32
+#define CX23885_BOARD_MYGICA_X8507 33
+#define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
@@ -226,6 +228,8 @@ struct cx23885_board {
u32 clk_freq;
struct cx23885_input input[MAX_CX23885_INPUT];
int ci_type; /* for NetUP */
+ /* Force bottom field first during DMA (888 workaround) */
+ u32 force_bff;
};
struct cx23885_subid {
@@ -310,6 +314,9 @@ struct cx23885_tsport {
u32 num_frontends;
void (*gate_ctrl)(struct cx23885_tsport *port, int open);
void *port_priv;
+
+ /* Workaround for a temp dvb_frontend that the tuner can attached to */
+ struct dvb_frontend analog_fe;
};
struct cx23885_kernel_ir {
@@ -574,6 +581,13 @@ extern void cx23885_video_unregister(struct cx23885_dev *dev);
extern int cx23885_video_irq(struct cx23885_dev *dev, u32 status);
extern void cx23885_video_wakeup(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q, u32 count);
+int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i);
+int cx23885_set_input(struct file *file, void *priv, unsigned int i);
+int cx23885_get_input(struct file *file, void *priv, unsigned int *i);
+int cx23885_set_frequency(struct file *file, void *priv, struct v4l2_frequency *f);
+int cx23885_set_control(struct cx23885_dev *dev, struct v4l2_control *ctl);
+int cx23885_get_control(struct cx23885_dev *dev, struct v4l2_control *ctl);
+int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm);
/* ----------------------------------------------------------- */
/* cx23885-vbi.c */
diff --git a/drivers/media/video/cx25821/cx25821-alsa.c b/drivers/media/video/cx25821/cx25821-alsa.c
index 09e99de5fd21..03cfac476b03 100644
--- a/drivers/media/video/cx25821/cx25821-alsa.c
+++ b/drivers/media/video/cx25821/cx25821-alsa.c
@@ -102,7 +102,7 @@ struct cx25821_audio_dev {
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
+static bool enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled.");
@@ -176,8 +176,7 @@ static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip)
/* Set the input mode to 16-bit */
tmp = cx_read(AUD_A_CFG);
- cx_write(AUD_A_CFG,
- tmp | FLD_AUD_DST_PK_MODE | FLD_AUD_DST_ENABLE |
+ cx_write(AUD_A_CFG, tmp | FLD_AUD_DST_PK_MODE | FLD_AUD_DST_ENABLE |
FLD_AUD_CLK_ENABLE);
/*
@@ -188,9 +187,8 @@ static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip)
*/
/* Enables corresponding bits at AUD_INT_STAT */
- cx_write(AUD_A_INT_MSK,
- FLD_AUD_DST_RISCI1 | FLD_AUD_DST_OF | FLD_AUD_DST_SYNC |
- FLD_AUD_DST_OPC_ERR);
+ cx_write(AUD_A_INT_MSK, FLD_AUD_DST_RISCI1 | FLD_AUD_DST_OF |
+ FLD_AUD_DST_SYNC | FLD_AUD_DST_OPC_ERR);
/* Clean any pending interrupt bits already set */
cx_write(AUD_A_INT_STAT, ~0);
@@ -200,8 +198,8 @@ static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip)
/* Turn on audio downstream fifo and risc enable 0x101 */
tmp = cx_read(AUD_INT_DMA_CTL);
- cx_set(AUD_INT_DMA_CTL,
- tmp | (FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN));
+ cx_set(AUD_INT_DMA_CTL, tmp |
+ (FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN));
mdelay(100);
return 0;
@@ -220,9 +218,8 @@ static int _cx25821_stop_audio_dma(struct cx25821_audio_dev *chip)
/* disable irqs */
cx_clear(PCI_INT_MSK, PCI_MSK_AUD_INT);
- cx_clear(AUD_A_INT_MSK,
- AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI2 |
- AUD_INT_DN_RISCI1);
+ cx_clear(AUD_A_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC |
+ AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
return 0;
}
@@ -234,15 +231,15 @@ static int _cx25821_stop_audio_dma(struct cx25821_audio_dev *chip)
*/
static char *cx25821_aud_irqs[32] = {
"dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */
- NULL, /* reserved */
+ NULL, /* reserved */
"dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */
- NULL, /* reserved */
- "dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */
- NULL, /* reserved */
- "dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */
- NULL, /* reserved */
- "opc_err", "par_err", "rip_err", /* 16-18 */
- "pci_abort", "ber_irq", "mchg_irq" /* 19-21 */
+ NULL, /* reserved */
+ "dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */
+ NULL, /* reserved */
+ "dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */
+ NULL, /* reserved */
+ "opc_err", "par_err", "rip_err", /* 16-18 */
+ "pci_abort", "ber_irq", "mchg_irq" /* 19-21 */
};
/*
@@ -258,10 +255,8 @@ static void cx25821_aud_irq(struct cx25821_audio_dev *chip, u32 status,
cx_write(AUD_A_INT_STAT, status);
if (debug > 1 || (status & mask & ~0xff))
- cx25821_print_irqbits(dev->name, "irq aud",
- cx25821_aud_irqs,
- ARRAY_SIZE(cx25821_aud_irqs), status,
- mask);
+ cx25821_print_irqbits(dev->name, "irq aud", cx25821_aud_irqs,
+ ARRAY_SIZE(cx25821_aud_irqs), status, mask);
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
@@ -270,8 +265,7 @@ static void cx25821_aud_irq(struct cx25821_audio_dev *chip, u32 status,
cx_clear(AUD_INT_DMA_CTL,
FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN);
cx25821_sram_channel_dump_audio(dev,
- &cx25821_sram_channels
- [AUDIO_SRAM_CHANNEL]);
+ &cx25821_sram_channels[AUDIO_SRAM_CHANNEL]);
}
if (status & AUD_INT_DN_SYNC) {
pr_warn("WARNING %s: Downstream sync error!\n", dev->name);
@@ -317,8 +311,9 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
cx25821_aud_irq(chip, audint_status,
audint_mask);
break;
- } else
+ } else {
goto out;
+ }
}
handled = 1;
@@ -361,9 +356,8 @@ static int dsp_buffer_free(struct cx25821_audio_dev *chip)
*/
#define DEFAULT_FIFO_SIZE 384
static struct snd_pcm_hardware snd_cx25821_digital_hw = {
- .info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID,
+ .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
@@ -396,8 +390,8 @@ static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream)
return -ENODEV;
}
- err =
- snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS);
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
goto _error;
@@ -468,8 +462,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
dma = &buf->dma;
videobuf_dma_init(dma);
ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE,
- (PAGE_ALIGN(chip->dma_size) >>
- PAGE_SHIFT));
+ (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
@@ -477,10 +470,8 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
goto error;
- ret =
- cx25821_risc_databuffer_audio(chip->pci, &buf->risc, dma->sglist,
- chip->period_size, chip->num_periods,
- 1);
+ ret = cx25821_risc_databuffer_audio(chip->pci, &buf->risc, dma->sglist,
+ chip->period_size, chip->num_periods, 1);
if (ret < 0) {
pr_info("DEBUG: ERROR after cx25821_risc_databuffer_audio()\n");
goto error;
@@ -686,7 +677,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
}
err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(struct cx25821_audio_dev), &card);
+ sizeof(struct cx25821_audio_dev), &card);
if (err < 0) {
pr_info("DEBUG ERROR: cannot create snd_card_new in %s\n",
__func__);
@@ -711,8 +702,8 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
IRQF_SHARED, chip->dev->name, chip);
if (err < 0) {
- pr_err("ERROR %s: can't get IRQ %d for ALSA\n",
- chip->dev->name, dev->pci->irq);
+ pr_err("ERROR %s: can't get IRQ %d for ALSA\n", chip->dev->name,
+ dev->pci->irq);
goto error;
}
@@ -730,8 +721,8 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
chip->iobase, chip->irq);
strcpy(card->mixername, "CX25821");
- pr_info("%s/%i: ALSA support for cx25821 boards\n",
- card->driver, devno);
+ pr_info("%s/%i: ALSA support for cx25821 boards\n", card->driver,
+ devno);
err = snd_card_register(card);
if (err < 0) {
diff --git a/drivers/media/video/cx25821/cx25821-audio-upstream.c b/drivers/media/video/cx25821/cx25821-audio-upstream.c
index c20d6dece154..20c7ca3351a8 100644
--- a/drivers/media/video/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-audio-upstream.c
@@ -107,7 +107,7 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
{
unsigned int line;
struct sram_channel *sram_ch =
- dev->channels[dev->_audio_upstream_channel].sram_channels;
+ dev->channels[dev->_audio_upstream_channel].sram_channels;
int offset = 0;
/* scan lines */
@@ -175,10 +175,8 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
}
rp = cx25821_risc_field_upstream_audio(dev, rp,
- dev->
- _audiodata_buf_phys_addr
- + databuf_offset, bpl,
- fifo_enable);
+ dev->_audiodata_buf_phys_addr + databuf_offset,
+ bpl, fifo_enable);
if (USE_RISC_NOOP_AUDIO) {
for (i = 0; i < NUM_NO_OPS; i++)
@@ -193,7 +191,7 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
/* Recalculate virtual address based on frame index */
rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 +
- (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4);
+ (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4);
}
return 0;
@@ -218,7 +216,7 @@ void cx25821_free_memory_audio(struct cx25821_dev *dev)
void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
{
struct sram_channel *sram_ch =
- dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels;
+ dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels;
u32 tmp = 0;
if (!dev->_audio_is_running) {
@@ -286,14 +284,14 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
} else {
if (!(myfile->f_op)) {
pr_err("%s(): File has no file operations registered!\n",
- __func__);
+ __func__);
filp_close(myfile, NULL);
return -EIO;
}
if (!myfile->f_op->read) {
pr_err("%s(): File has no READ operations registered!\n",
- __func__);
+ __func__);
filp_close(myfile, NULL);
return -EIO;
}
@@ -305,14 +303,14 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
for (i = 0; i < dev->_audio_lines_count; i++) {
pos = file_offset;
- vfs_read_retval =
- vfs_read(myfile, mybuf, line_size, &pos);
+ vfs_read_retval = vfs_read(myfile, mybuf, line_size,
+ &pos);
if (vfs_read_retval > 0 && vfs_read_retval == line_size
&& dev->_audiodata_buf_virt_addr != NULL) {
memcpy((void *)(dev->_audiodata_buf_virt_addr +
frame_offset / 4), mybuf,
- vfs_read_retval);
+ vfs_read_retval);
}
file_offset += vfs_read_retval;
@@ -328,8 +326,8 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
if (i > 0)
dev->_audioframe_count++;
- dev->_audiofile_status =
- (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE;
+ dev->_audiofile_status = (vfs_read_retval == line_size) ?
+ IN_PROGRESS : END_OF_FILE;
set_fs(old_fs);
filp_close(myfile, NULL);
@@ -340,12 +338,12 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
static void cx25821_audioups_handler(struct work_struct *work)
{
- struct cx25821_dev *dev =
- container_of(work, struct cx25821_dev, _audio_work_entry);
+ struct cx25821_dev *dev = container_of(work, struct cx25821_dev,
+ _audio_work_entry);
if (!dev) {
pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n",
- __func__);
+ __func__);
return;
}
@@ -370,19 +368,19 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
pr_err("%s(): ERROR opening file(%s) with errno = %d!\n",
- __func__, dev->_audiofilename, open_errno);
+ __func__, dev->_audiofilename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
pr_err("%s(): File has no file operations registered!\n",
- __func__);
+ __func__);
filp_close(myfile, NULL);
return -EIO;
}
if (!myfile->f_op->read) {
pr_err("%s(): File has no READ operations registered!\n",
- __func__);
+ __func__);
filp_close(myfile, NULL);
return -EIO;
}
@@ -395,12 +393,12 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
for (i = 0; i < dev->_audio_lines_count; i++) {
pos = offset;
- vfs_read_retval =
- vfs_read(myfile, mybuf, line_size, &pos);
+ vfs_read_retval = vfs_read(myfile, mybuf,
+ line_size, &pos);
- if (vfs_read_retval > 0
- && vfs_read_retval == line_size
- && dev->_audiodata_buf_virt_addr != NULL) {
+ if (vfs_read_retval > 0 &&
+ vfs_read_retval == line_size &&
+ dev->_audiodata_buf_virt_addr != NULL) {
memcpy((void *)(dev->
_audiodata_buf_virt_addr
+ offset / 4), mybuf,
@@ -423,8 +421,8 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
break;
}
- dev->_audiofile_status =
- (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE;
+ dev->_audiofile_status = (vfs_read_retval == line_size) ?
+ IN_PROGRESS : END_OF_FILE;
set_fs(old_fs);
myfile->f_pos = 0;
@@ -444,9 +442,8 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
cx25821_free_memory_audio(dev);
- dev->_risc_virt_addr =
- pci_alloc_consistent(dev->pci, dev->audio_upstream_riscbuf_size,
- &dma_addr);
+ dev->_risc_virt_addr = pci_alloc_consistent(dev->pci,
+ dev->audio_upstream_riscbuf_size, &dma_addr);
dev->_risc_virt_start_addr = dev->_risc_virt_addr;
dev->_risc_phys_start_addr = dma_addr;
dev->_risc_phys_addr = dma_addr;
@@ -454,22 +451,21 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
if (!dev->_risc_virt_addr) {
printk(KERN_DEBUG
- pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n"));
+ pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n"));
return -ENOMEM;
}
/* Clear out memory at address */
memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size);
/* For Audio Data buffer allocation */
- dev->_audiodata_buf_virt_addr =
- pci_alloc_consistent(dev->pci, dev->audio_upstream_databuf_size,
- &data_dma_addr);
+ dev->_audiodata_buf_virt_addr = pci_alloc_consistent(dev->pci,
+ dev->audio_upstream_databuf_size, &data_dma_addr);
dev->_audiodata_buf_phys_addr = data_dma_addr;
dev->_audiodata_buf_size = dev->audio_upstream_databuf_size;
if (!dev->_audiodata_buf_virt_addr) {
printk(KERN_DEBUG
- pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n"));
+ pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n"));
return -ENOMEM;
}
/* Clear out memory at address */
@@ -480,12 +476,11 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
return ret;
/* Creating RISC programs */
- ret =
- cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl,
- dev->_audio_lines_count);
+ ret = cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl,
+ dev->_audio_lines_count);
if (ret < 0) {
printk(KERN_DEBUG
- pr_fmt("ERROR creating audio upstream RISC programs!\n"));
+ pr_fmt("ERROR creating audio upstream RISC programs!\n"));
goto error;
}
@@ -533,9 +528,9 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
if (dev->_risc_virt_start_addr != NULL) {
risc_phys_jump_addr =
- dev->_risc_phys_start_addr +
- RISC_SYNC_INSTRUCTION_SIZE +
- AUDIO_RISC_DMA_BUF_SIZE;
+ dev->_risc_phys_start_addr +
+ RISC_SYNC_INSTRUCTION_SIZE +
+ AUDIO_RISC_DMA_BUF_SIZE;
rp = cx25821_risc_field_upstream_audio(dev,
dev->_risc_virt_start_addr + 1,
@@ -632,7 +627,7 @@ static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
/* 10 millisecond timeout */
if (count++ > 1000) {
pr_err("ERROR: %s() fifo is NOT turned on. Timeout!\n",
- __func__);
+ __func__);
return;
}
@@ -661,9 +656,9 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
/* Set the input mode to 16-bit */
tmp = cx_read(sram_ch->aud_cfg);
- tmp |=
- FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE |
- FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D | FLD_AUD_SONY_MODE;
+ tmp |= FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE |
+ FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D |
+ FLD_AUD_SONY_MODE;
cx_write(sram_ch->aud_cfg, tmp);
/* Read and write back the interrupt status register to clear it */
@@ -678,12 +673,11 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
- err =
- request_irq(dev->pci->irq, cx25821_upstream_irq_audio,
+ err = request_irq(dev->pci->irq, cx25821_upstream_irq_audio,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
- pr_err("%s: can't get upstream IRQ %d\n",
- dev->name, dev->pci->irq);
+ pr_err("%s: can't get upstream IRQ %d\n", dev->name,
+ dev->pci->irq);
goto fail_irq;
}
@@ -726,7 +720,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
if (!dev->_irq_audio_queues) {
printk(KERN_DEBUG
- pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n"));
+ pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n"));
return -ENOMEM;
}
@@ -739,33 +733,30 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
if (dev->input_audiofilename) {
str_length = strlen(dev->input_audiofilename);
- dev->_audiofilename = kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kmemdup(dev->input_audiofilename,
+ str_length + 1, GFP_KERNEL);
if (!dev->_audiofilename)
goto error;
- memcpy(dev->_audiofilename, dev->input_audiofilename,
- str_length + 1);
-
/* Default if filename is empty string */
if (strcmp(dev->input_audiofilename, "") == 0)
dev->_audiofilename = "/root/audioGOOD.wav";
} else {
str_length = strlen(_defaultAudioName);
- dev->_audiofilename = kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kmemdup(_defaultAudioName,
+ str_length + 1, GFP_KERNEL);
if (!dev->_audiofilename)
goto error;
-
- memcpy(dev->_audiofilename, _defaultAudioName, str_length + 1);
}
retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch,
_line_size, 0);
dev->audio_upstream_riscbuf_size =
- AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
- RISC_SYNC_INSTRUCTION_SIZE;
+ AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS +
+ RISC_SYNC_INSTRUCTION_SIZE;
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
/* Allocating buffers and prepare RISC program */
@@ -773,7 +764,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
_line_size);
if (retval < 0) {
pr_err("%s: Failed to set up Audio upstream buffers!\n",
- dev->name);
+ dev->name);
goto error;
}
/* Start RISC engine */
diff --git a/drivers/media/video/cx25821/cx25821-audio.h b/drivers/media/video/cx25821/cx25821-audio.h
index 8eb55b7b88cb..1fc2d24f5110 100644
--- a/drivers/media/video/cx25821/cx25821-audio.h
+++ b/drivers/media/video/cx25821/cx25821-audio.h
@@ -23,39 +23,40 @@
#ifndef __CX25821_AUDIO_H__
#define __CX25821_AUDIO_H__
-#define USE_RISC_NOOP 1
-#define LINES_PER_BUFFER 15
-#define AUDIO_LINE_SIZE 128
+#define USE_RISC_NOOP 1
+#define LINES_PER_BUFFER 15
+#define AUDIO_LINE_SIZE 128
/* Number of buffer programs to use at once. */
-#define NUMBER_OF_PROGRAMS 8
+#define NUMBER_OF_PROGRAMS 8
/*
* Max size of the RISC program for a buffer. - worst case is 2 writes per line
* Space is also added for the 4 no-op instructions added on the end.
*/
#ifndef USE_RISC_NOOP
-#define MAX_BUFFER_PROGRAM_SIZE \
- (2 * LINES_PER_BUFFER * RISC_WRITE_INSTRUCTION_SIZE + \
- RISC_WRITECR_INSTRUCTION_SIZE * 4)
+#define MAX_BUFFER_PROGRAM_SIZE \
+ (2 * LINES_PER_BUFFER * RISC_WRITE_INSTRUCTION_SIZE + \
+ RISC_WRITECR_INSTRUCTION_SIZE * 4)
#endif
/* MAE 12 July 2005 Try to use NOOP RISC instruction instead */
#ifdef USE_RISC_NOOP
-#define MAX_BUFFER_PROGRAM_SIZE \
- (2 * LINES_PER_BUFFER * RISC_WRITE_INSTRUCTION_SIZE + \
- RISC_NOOP_INSTRUCTION_SIZE * 4)
+#define MAX_BUFFER_PROGRAM_SIZE \
+ (2 * LINES_PER_BUFFER * RISC_WRITE_INSTRUCTION_SIZE + \
+ RISC_NOOP_INSTRUCTION_SIZE * 4)
#endif
/* Sizes of various instructions in bytes. Used when adding instructions. */
-#define RISC_WRITE_INSTRUCTION_SIZE 12
-#define RISC_JUMP_INSTRUCTION_SIZE 12
-#define RISC_SKIP_INSTRUCTION_SIZE 4
-#define RISC_SYNC_INSTRUCTION_SIZE 4
-#define RISC_WRITECR_INSTRUCTION_SIZE 16
-#define RISC_NOOP_INSTRUCTION_SIZE 4
-
-#define MAX_AUDIO_DMA_BUFFER_SIZE \
-(MAX_BUFFER_PROGRAM_SIZE * NUMBER_OF_PROGRAMS + RISC_SYNC_INSTRUCTION_SIZE)
+#define RISC_WRITE_INSTRUCTION_SIZE 12
+#define RISC_JUMP_INSTRUCTION_SIZE 12
+#define RISC_SKIP_INSTRUCTION_SIZE 4
+#define RISC_SYNC_INSTRUCTION_SIZE 4
+#define RISC_WRITECR_INSTRUCTION_SIZE 16
+#define RISC_NOOP_INSTRUCTION_SIZE 4
+
+#define MAX_AUDIO_DMA_BUFFER_SIZE \
+ (MAX_BUFFER_PROGRAM_SIZE * NUMBER_OF_PROGRAMS + \
+ RISC_SYNC_INSTRUCTION_SIZE)
#endif
diff --git a/drivers/media/video/cx25821/cx25821-cards.c b/drivers/media/video/cx25821/cx25821-cards.c
index 6ace60313b49..99988c988095 100644
--- a/drivers/media/video/cx25821/cx25821-cards.c
+++ b/drivers/media/video/cx25821/cx25821-cards.c
@@ -67,6 +67,6 @@ void cx25821_card_setup(struct cx25821_dev *dev)
if (dev->i2c_bus[0].i2c_rc == 0) {
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
- sizeof(eeprom));
+ sizeof(eeprom));
}
}
diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
index a7fa38f9594e..f617474f9073 100644
--- a/drivers/media/video/cx25821/cx25821-core.c
+++ b/drivers/media/video/cx25821/cx25821-core.c
@@ -804,8 +804,8 @@ void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel_select,
u32 format)
{
if (channel_select <= 7 && channel_select >= 0) {
- cx_write(dev->channels[channel_select].
- sram_channels->pix_frmt, format);
+ cx_write(dev->channels[channel_select].sram_channels->pix_frmt,
+ format);
dev->channels[channel_select].pixel_formats = format;
}
}
@@ -855,21 +855,19 @@ static void cx25821_initialize(struct cx25821_dev *dev)
}
cx25821_sram_channel_setup_audio(dev,
- dev->channels[SRAM_CH08].sram_channels,
- 128, 0);
+ dev->channels[SRAM_CH08].sram_channels, 128, 0);
cx25821_gpio_init(dev);
}
static int cx25821_get_resources(struct cx25821_dev *dev)
{
- if (request_mem_region
- (pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0),
- dev->name))
+ if (request_mem_region(pci_resource_start(dev->pci, 0),
+ pci_resource_len(dev->pci, 0), dev->name))
return 0;
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
- dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
+ dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
@@ -972,8 +970,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
dev->lmmio = ioremap(dev->base_io_addr, pci_resource_len(dev->pci, 0));
if (!dev->lmmio) {
- CX25821_ERR
- ("ioremap failed, maybe increasing __VMALLOC_RESERVE in page.h\n");
+ CX25821_ERR("ioremap failed, maybe increasing __VMALLOC_RESERVE in page.h\n");
cx25821_iounmap(dev);
return -ENOMEM;
}
@@ -994,7 +991,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
* cx25821_i2c_register(&dev->i2c_bus[2]); */
CX25821_INFO("i2c register! bus->i2c_rc = %d\n",
- dev->i2c_bus[0].i2c_rc);
+ dev->i2c_bus[0].i2c_rc);
cx25821_card_setup(dev);
@@ -1004,9 +1001,8 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
cx25821_video_register(dev);
/* register IOCTL device */
- dev->ioctl_dev =
- cx25821_vdev_init(dev, dev->pci, &cx25821_videoioctl_template,
- "video");
+ dev->ioctl_dev = cx25821_vdev_init(dev, dev->pci,
+ &cx25821_videoioctl_template, "video");
if (video_register_device
(dev->ioctl_dev, VFL_TYPE_GRABBER, VIDEO_IOCTL_CH) < 0) {
@@ -1103,16 +1099,15 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
}
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
- *(rp++) =
- cpu_to_le32(RISC_WRITE | RISC_SOL | RISC_EOL | bpl);
+ *(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL | RISC_EOL |
+ bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
- *(rp++) =
- cpu_to_le32(RISC_WRITE | RISC_SOL |
+ *(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
@@ -1120,8 +1115,8 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
offset = 0;
sg++;
while (todo > sg_dma_len(sg)) {
- *(rp++) =
- cpu_to_le32(RISC_WRITE | sg_dma_len(sg));
+ *(rp++) = cpu_to_le32(RISC_WRITE |
+ sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
@@ -1160,8 +1155,8 @@ int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
/* write and jump need and extra dword */
- instructions =
- fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
+ instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE +
+ lines);
instructions += 2;
rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
@@ -1215,8 +1210,8 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
- *(rp++) =
- cpu_to_le32(RISC_WRITE | sol | RISC_EOL | bpl);
+ *(rp++) = cpu_to_le32(RISC_WRITE | sol | RISC_EOL |
+ bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
@@ -1224,7 +1219,7 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | sol |
- (sg_dma_len(sg) - offset));
+ (sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
@@ -1232,7 +1227,7 @@ static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
sg++;
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
- sg_dma_len(sg));
+ sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
@@ -1339,8 +1334,8 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
sram_channels->int_stat);
if (vid_status)
- handled +=
- cx25821_video_irq(dev, i, vid_status);
+ handled += cx25821_video_irq(dev, i,
+ vid_status);
cx_write(PCI_INT_STAT, mask[i]);
}
@@ -1427,9 +1422,8 @@ static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
goto fail_irq;
}
- err =
- request_irq(pci_dev->irq, cx25821_irq, IRQF_SHARED,
- dev->name, dev);
+ err = request_irq(pci_dev->irq, cx25821_irq,
+ IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq);
@@ -1512,6 +1506,5 @@ static void __exit cx25821_fini(void)
pci_unregister_driver(&cx25821_pci_driver);
}
-
module_init(cx25821_init);
module_exit(cx25821_fini);
diff --git a/drivers/media/video/cx25821/cx25821-i2c.c b/drivers/media/video/cx25821/cx25821-i2c.c
index 4d3d0ce40785..12d7300fa1e9 100644
--- a/drivers/media/video/cx25821/cx25821-i2c.c
+++ b/drivers/media/video/cx25821/cx25821-i2c.c
@@ -252,8 +252,8 @@ static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
} else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) &&
msgs[i].addr == msgs[i + 1].addr) {
/* write then read from same address */
- retval =
- i2c_sendbytes(i2c_adap, &msgs[i], msgs[i + 1].len);
+ retval = i2c_sendbytes(i2c_adap, &msgs[i],
+ msgs[i + 1].len);
if (retval < 0)
goto err;
@@ -276,10 +276,8 @@ err:
static u32 cx25821_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
}
static struct i2c_algorithm cx25821_i2c_algo_template = {
@@ -300,7 +298,7 @@ static struct i2c_client cx25821_i2c_client_template = {
.name = "cx25821 internal",
};
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int cx25821_i2c_register(struct cx25821_i2c *bus)
{
struct cx25821_dev *dev = bus->dev;
diff --git a/drivers/media/video/cx25821/cx25821-medusa-defines.h b/drivers/media/video/cx25821/cx25821-medusa-defines.h
index 60d197f57556..7a9e6470ba22 100644
--- a/drivers/media/video/cx25821/cx25821-medusa-defines.h
+++ b/drivers/media/video/cx25821/cx25821-medusa-defines.h
@@ -23,7 +23,7 @@
#ifndef _MEDUSA_DEF_H_
#define _MEDUSA_DEF_H_
-/* Video deocder that we supported */
+/* Video decoder that we supported */
#define VDEC_A 0
#define VDEC_B 1
#define VDEC_C 2
@@ -34,9 +34,9 @@
#define VDEC_H 7
/* end of display sequence */
-#define END_OF_SEQ 0xF;
+#define END_OF_SEQ 0xF;
/* registry string size */
-#define MAX_REGISTRY_SZ 40;
+#define MAX_REGISTRY_SZ 40;
#endif
diff --git a/drivers/media/video/cx25821/cx25821-medusa-reg.h b/drivers/media/video/cx25821/cx25821-medusa-reg.h
index 1c1c228352d1..c98ac946b277 100644
--- a/drivers/media/video/cx25821/cx25821-medusa-reg.h
+++ b/drivers/media/video/cx25821/cx25821-medusa-reg.h
@@ -28,22 +28,22 @@
#define HOST_REGISTER2 0x0001
/* Chip Configuration Registers */
-#define CHIP_CTRL 0x0100
-#define AFE_AB_CTRL 0x0104
-#define AFE_CD_CTRL 0x0108
-#define AFE_EF_CTRL 0x010C
-#define AFE_GH_CTRL 0x0110
+#define CHIP_CTRL 0x0100
+#define AFE_AB_CTRL 0x0104
+#define AFE_CD_CTRL 0x0108
+#define AFE_EF_CTRL 0x010C
+#define AFE_GH_CTRL 0x0110
#define DENC_AB_CTRL 0x0114
-#define BYP_AB_CTRL 0x0118
-#define MON_A_CTRL 0x011C
-#define DISP_SEQ_A 0x0120
-#define DISP_SEQ_B 0x0124
-#define DISP_AB_CNT 0x0128
-#define DISP_CD_CNT 0x012C
-#define DISP_EF_CNT 0x0130
-#define DISP_GH_CNT 0x0134
-#define DISP_IJ_CNT 0x0138
-#define PIN_OE_CTRL 0x013C
+#define BYP_AB_CTRL 0x0118
+#define MON_A_CTRL 0x011C
+#define DISP_SEQ_A 0x0120
+#define DISP_SEQ_B 0x0124
+#define DISP_AB_CNT 0x0128
+#define DISP_CD_CNT 0x012C
+#define DISP_EF_CNT 0x0130
+#define DISP_GH_CNT 0x0134
+#define DISP_IJ_CNT 0x0138
+#define PIN_OE_CTRL 0x013C
#define PIN_SPD_CTRL 0x0140
#define PIN_SPD_CTRL2 0x0144
#define IRQ_STAT_CTRL 0x0148
@@ -51,8 +51,8 @@
#define POWER_CTRL_CD 0x0150
#define POWER_CTRL_EF 0x0154
#define POWER_CTRL_GH 0x0158
-#define TUNE_CTRL 0x015C
-#define BIAS_CTRL 0x0160
+#define TUNE_CTRL 0x015C
+#define BIAS_CTRL 0x0160
#define AFE_AB_DIAG_CTRL 0x0164
#define AFE_CD_DIAG_CTRL 0x0168
#define AFE_EF_DIAG_CTRL 0x016C
@@ -61,17 +61,17 @@
#define PLL_CD_DIAG_CTRL 0x0178
#define PLL_EF_DIAG_CTRL 0x017C
#define PLL_GH_DIAG_CTRL 0x0180
-#define TEST_CTRL 0x0184
-#define BIST_STAT 0x0188
-#define BIST_STAT2 0x018C
-#define BIST_VID_PLL_AB_STAT 0x0190
-#define BIST_VID_PLL_CD_STAT 0x0194
-#define BIST_VID_PLL_EF_STAT 0x0198
-#define BIST_VID_PLL_GH_STAT 0x019C
+#define TEST_CTRL 0x0184
+#define BIST_STAT 0x0188
+#define BIST_STAT2 0x018C
+#define BIST_VID_PLL_AB_STAT 0x0190
+#define BIST_VID_PLL_CD_STAT 0x0194
+#define BIST_VID_PLL_EF_STAT 0x0198
+#define BIST_VID_PLL_GH_STAT 0x019C
#define DLL_DIAG_CTRL 0x01A0
#define DEV_CH_ID_CTRL 0x01A4
#define ABIST_CTRL_STATUS 0x01A8
-#define ABIST_FREQ 0x01AC
+#define ABIST_FREQ 0x01AC
#define ABIST_GOERT_SHIFT 0x01B0
#define ABIST_COEF12 0x01B4
#define ABIST_COEF34 0x01B8
@@ -92,357 +92,357 @@
#define ABIST_CLAMP_E 0x01F4
#define ABIST_CLAMP_F 0x01F8
-/* Digital Video Encoder A Registers */
-#define DENC_A_REG_1 0x0200
-#define DENC_A_REG_2 0x0204
-#define DENC_A_REG_3 0x0208
-#define DENC_A_REG_4 0x020C
-#define DENC_A_REG_5 0x0210
-#define DENC_A_REG_6 0x0214
-#define DENC_A_REG_7 0x0218
-#define DENC_A_REG_8 0x021C
+/* Digital Video Encoder A Registers */
+#define DENC_A_REG_1 0x0200
+#define DENC_A_REG_2 0x0204
+#define DENC_A_REG_3 0x0208
+#define DENC_A_REG_4 0x020C
+#define DENC_A_REG_5 0x0210
+#define DENC_A_REG_6 0x0214
+#define DENC_A_REG_7 0x0218
+#define DENC_A_REG_8 0x021C
-/* Digital Video Encoder B Registers */
-#define DENC_B_REG_1 0x0300
-#define DENC_B_REG_2 0x0304
-#define DENC_B_REG_3 0x0308
-#define DENC_B_REG_4 0x030C
-#define DENC_B_REG_5 0x0310
-#define DENC_B_REG_6 0x0314
-#define DENC_B_REG_7 0x0318
-#define DENC_B_REG_8 0x031C
+/* Digital Video Encoder B Registers */
+#define DENC_B_REG_1 0x0300
+#define DENC_B_REG_2 0x0304
+#define DENC_B_REG_3 0x0308
+#define DENC_B_REG_4 0x030C
+#define DENC_B_REG_5 0x0310
+#define DENC_B_REG_6 0x0314
+#define DENC_B_REG_7 0x0318
+#define DENC_B_REG_8 0x031C
-/* Video Decoder A Registers */
-#define MODE_CTRL 0x1000
-#define OUT_CTRL1 0x1004
-#define OUT_CTRL_NS 0x1008
-#define GEN_STAT 0x100C
-#define INT_STAT_MASK 0x1010
-#define LUMA_CTRL 0x1014
-#define CHROMA_CTRL 0x1018
-#define CRUSH_CTRL 0x101C
-#define HORIZ_TIM_CTRL 0x1020
-#define VERT_TIM_CTRL 0x1024
-#define MISC_TIM_CTRL 0x1028
-#define FIELD_COUNT 0x102C
-#define HSCALE_CTRL 0x1030
-#define VSCALE_CTRL 0x1034
-#define MAN_VGA_CTRL 0x1038
-#define MAN_AGC_CTRL 0x103C
-#define DFE_CTRL1 0x1040
-#define DFE_CTRL2 0x1044
-#define DFE_CTRL3 0x1048
-#define PLL_CTRL 0x104C
-#define PLL_CTRL_FAST 0x1050
-#define HTL_CTRL 0x1054
-#define SRC_CFG 0x1058
-#define SC_STEP_SIZE 0x105C
-#define SC_CONVERGE_CTRL 0x1060
-#define SC_LOOP_CTRL 0x1064
-#define COMB_2D_HFS_CFG 0x1068
-#define COMB_2D_HFD_CFG 0x106C
-#define COMB_2D_LF_CFG 0x1070
-#define COMB_2D_BLEND 0x1074
-#define COMB_MISC_CTRL 0x1078
+/* Video Decoder A Registers */
+#define MODE_CTRL 0x1000
+#define OUT_CTRL1 0x1004
+#define OUT_CTRL_NS 0x1008
+#define GEN_STAT 0x100C
+#define INT_STAT_MASK 0x1010
+#define LUMA_CTRL 0x1014
+#define CHROMA_CTRL 0x1018
+#define CRUSH_CTRL 0x101C
+#define HORIZ_TIM_CTRL 0x1020
+#define VERT_TIM_CTRL 0x1024
+#define MISC_TIM_CTRL 0x1028
+#define FIELD_COUNT 0x102C
+#define HSCALE_CTRL 0x1030
+#define VSCALE_CTRL 0x1034
+#define MAN_VGA_CTRL 0x1038
+#define MAN_AGC_CTRL 0x103C
+#define DFE_CTRL1 0x1040
+#define DFE_CTRL2 0x1044
+#define DFE_CTRL3 0x1048
+#define PLL_CTRL 0x104C
+#define PLL_CTRL_FAST 0x1050
+#define HTL_CTRL 0x1054
+#define SRC_CFG 0x1058
+#define SC_STEP_SIZE 0x105C
+#define SC_CONVERGE_CTRL 0x1060
+#define SC_LOOP_CTRL 0x1064
+#define COMB_2D_HFS_CFG 0x1068
+#define COMB_2D_HFD_CFG 0x106C
+#define COMB_2D_LF_CFG 0x1070
+#define COMB_2D_BLEND 0x1074
+#define COMB_MISC_CTRL 0x1078
#define COMB_FLAT_THRESH_CTRL 0x107C
-#define COMB_TEST 0x1080
-#define BP_MISC_CTRL 0x1084
-#define VCR_DET_CTRL 0x1088
-#define NOISE_DET_CTRL 0x108C
+#define COMB_TEST 0x1080
+#define BP_MISC_CTRL 0x1084
+#define VCR_DET_CTRL 0x1088
+#define NOISE_DET_CTRL 0x108C
#define COMB_FLAT_NOISE_CTRL 0x1090
-#define VERSION 0x11F8
-#define SOFT_RST_CTRL 0x11FC
+#define VERSION 0x11F8
+#define SOFT_RST_CTRL 0x11FC
-/* Video Decoder B Registers */
-#define VDEC_B_MODE_CTRL 0x1200
-#define VDEC_B_OUT_CTRL1 0x1204
-#define VDEC_B_OUT_CTRL_NS 0x1208
-#define VDEC_B_GEN_STAT 0x120C
+/* Video Decoder B Registers */
+#define VDEC_B_MODE_CTRL 0x1200
+#define VDEC_B_OUT_CTRL1 0x1204
+#define VDEC_B_OUT_CTRL_NS 0x1208
+#define VDEC_B_GEN_STAT 0x120C
#define VDEC_B_INT_STAT_MASK 0x1210
-#define VDEC_B_LUMA_CTRL 0x1214
-#define VDEC_B_CHROMA_CTRL 0x1218
-#define VDEC_B_CRUSH_CTRL 0x121C
+#define VDEC_B_LUMA_CTRL 0x1214
+#define VDEC_B_CHROMA_CTRL 0x1218
+#define VDEC_B_CRUSH_CTRL 0x121C
#define VDEC_B_HORIZ_TIM_CTRL 0x1220
#define VDEC_B_VERT_TIM_CTRL 0x1224
#define VDEC_B_MISC_TIM_CTRL 0x1228
-#define VDEC_B_FIELD_COUNT 0x122C
-#define VDEC_B_HSCALE_CTRL 0x1230
-#define VDEC_B_VSCALE_CTRL 0x1234
-#define VDEC_B_MAN_VGA_CTRL 0x1238
-#define VDEC_B_MAN_AGC_CTRL 0x123C
-#define VDEC_B_DFE_CTRL1 0x1240
-#define VDEC_B_DFE_CTRL2 0x1244
-#define VDEC_B_DFE_CTRL3 0x1248
-#define VDEC_B_PLL_CTRL 0x124C
+#define VDEC_B_FIELD_COUNT 0x122C
+#define VDEC_B_HSCALE_CTRL 0x1230
+#define VDEC_B_VSCALE_CTRL 0x1234
+#define VDEC_B_MAN_VGA_CTRL 0x1238
+#define VDEC_B_MAN_AGC_CTRL 0x123C
+#define VDEC_B_DFE_CTRL1 0x1240
+#define VDEC_B_DFE_CTRL2 0x1244
+#define VDEC_B_DFE_CTRL3 0x1248
+#define VDEC_B_PLL_CTRL 0x124C
#define VDEC_B_PLL_CTRL_FAST 0x1250
-#define VDEC_B_HTL_CTRL 0x1254
-#define VDEC_B_SRC_CFG 0x1258
-#define VDEC_B_SC_STEP_SIZE 0x125C
+#define VDEC_B_HTL_CTRL 0x1254
+#define VDEC_B_SRC_CFG 0x1258
+#define VDEC_B_SC_STEP_SIZE 0x125C
#define VDEC_B_SC_CONVERGE_CTRL 0x1260
-#define VDEC_B_SC_LOOP_CTRL 0x1264
+#define VDEC_B_SC_LOOP_CTRL 0x1264
#define VDEC_B_COMB_2D_HFS_CFG 0x1268
#define VDEC_B_COMB_2D_HFD_CFG 0x126C
#define VDEC_B_COMB_2D_LF_CFG 0x1270
#define VDEC_B_COMB_2D_BLEND 0x1274
#define VDEC_B_COMB_MISC_CTRL 0x1278
-#define VDEC_B_COMB_FLAT_THRESH_CTRL 0x127C
-#define VDEC_B_COMB_TEST 0x1280
-#define VDEC_B_BP_MISC_CTRL 0x1284
-#define VDEC_B_VCR_DET_CTRL 0x1288
+#define VDEC_B_COMB_FLAT_THRESH_CTRL 0x127C
+#define VDEC_B_COMB_TEST 0x1280
+#define VDEC_B_BP_MISC_CTRL 0x1284
+#define VDEC_B_VCR_DET_CTRL 0x1288
#define VDEC_B_NOISE_DET_CTRL 0x128C
#define VDEC_B_COMB_FLAT_NOISE_CTRL 0x1290
-#define VDEC_B_VERSION 0x13F8
+#define VDEC_B_VERSION 0x13F8
#define VDEC_B_SOFT_RST_CTRL 0x13FC
/* Video Decoder C Registers */
-#define VDEC_C_MODE_CTRL 0x1400
-#define VDEC_C_OUT_CTRL1 0x1404
-#define VDEC_C_OUT_CTRL_NS 0x1408
-#define VDEC_C_GEN_STAT 0x140C
+#define VDEC_C_MODE_CTRL 0x1400
+#define VDEC_C_OUT_CTRL1 0x1404
+#define VDEC_C_OUT_CTRL_NS 0x1408
+#define VDEC_C_GEN_STAT 0x140C
#define VDEC_C_INT_STAT_MASK 0x1410
-#define VDEC_C_LUMA_CTRL 0x1414
-#define VDEC_C_CHROMA_CTRL 0x1418
-#define VDEC_C_CRUSH_CTRL 0x141C
+#define VDEC_C_LUMA_CTRL 0x1414
+#define VDEC_C_CHROMA_CTRL 0x1418
+#define VDEC_C_CRUSH_CTRL 0x141C
#define VDEC_C_HORIZ_TIM_CTRL 0x1420
#define VDEC_C_VERT_TIM_CTRL 0x1424
#define VDEC_C_MISC_TIM_CTRL 0x1428
-#define VDEC_C_FIELD_COUNT 0x142C
-#define VDEC_C_HSCALE_CTRL 0x1430
-#define VDEC_C_VSCALE_CTRL 0x1434
-#define VDEC_C_MAN_VGA_CTRL 0x1438
-#define VDEC_C_MAN_AGC_CTRL 0x143C
-#define VDEC_C_DFE_CTRL1 0x1440
-#define VDEC_C_DFE_CTRL2 0x1444
-#define VDEC_C_DFE_CTRL3 0x1448
-#define VDEC_C_PLL_CTRL 0x144C
+#define VDEC_C_FIELD_COUNT 0x142C
+#define VDEC_C_HSCALE_CTRL 0x1430
+#define VDEC_C_VSCALE_CTRL 0x1434
+#define VDEC_C_MAN_VGA_CTRL 0x1438
+#define VDEC_C_MAN_AGC_CTRL 0x143C
+#define VDEC_C_DFE_CTRL1 0x1440
+#define VDEC_C_DFE_CTRL2 0x1444
+#define VDEC_C_DFE_CTRL3 0x1448
+#define VDEC_C_PLL_CTRL 0x144C
#define VDEC_C_PLL_CTRL_FAST 0x1450
-#define VDEC_C_HTL_CTRL 0x1454
-#define VDEC_C_SRC_CFG 0x1458
-#define VDEC_C_SC_STEP_SIZE 0x145C
+#define VDEC_C_HTL_CTRL 0x1454
+#define VDEC_C_SRC_CFG 0x1458
+#define VDEC_C_SC_STEP_SIZE 0x145C
#define VDEC_C_SC_CONVERGE_CTRL 0x1460
-#define VDEC_C_SC_LOOP_CTRL 0x1464
+#define VDEC_C_SC_LOOP_CTRL 0x1464
#define VDEC_C_COMB_2D_HFS_CFG 0x1468
#define VDEC_C_COMB_2D_HFD_CFG 0x146C
#define VDEC_C_COMB_2D_LF_CFG 0x1470
#define VDEC_C_COMB_2D_BLEND 0x1474
#define VDEC_C_COMB_MISC_CTRL 0x1478
-#define VDEC_C_COMB_FLAT_THRESH_CTRL 0x147C
-#define VDEC_C_COMB_TEST 0x1480
-#define VDEC_C_BP_MISC_CTRL 0x1484
-#define VDEC_C_VCR_DET_CTRL 0x1488
+#define VDEC_C_COMB_FLAT_THRESH_CTRL 0x147C
+#define VDEC_C_COMB_TEST 0x1480
+#define VDEC_C_BP_MISC_CTRL 0x1484
+#define VDEC_C_VCR_DET_CTRL 0x1488
#define VDEC_C_NOISE_DET_CTRL 0x148C
#define VDEC_C_COMB_FLAT_NOISE_CTRL 0x1490
-#define VDEC_C_VERSION 0x15F8
+#define VDEC_C_VERSION 0x15F8
#define VDEC_C_SOFT_RST_CTRL 0x15FC
/* Video Decoder D Registers */
-#define VDEC_D_MODE_CTRL 0x1600
-#define VDEC_D_OUT_CTRL1 0x1604
-#define VDEC_D_OUT_CTRL_NS 0x1608
-#define VDEC_D_GEN_STAT 0x160C
+#define VDEC_D_MODE_CTRL 0x1600
+#define VDEC_D_OUT_CTRL1 0x1604
+#define VDEC_D_OUT_CTRL_NS 0x1608
+#define VDEC_D_GEN_STAT 0x160C
#define VDEC_D_INT_STAT_MASK 0x1610
-#define VDEC_D_LUMA_CTRL 0x1614
-#define VDEC_D_CHROMA_CTRL 0x1618
-#define VDEC_D_CRUSH_CTRL 0x161C
+#define VDEC_D_LUMA_CTRL 0x1614
+#define VDEC_D_CHROMA_CTRL 0x1618
+#define VDEC_D_CRUSH_CTRL 0x161C
#define VDEC_D_HORIZ_TIM_CTRL 0x1620
#define VDEC_D_VERT_TIM_CTRL 0x1624
#define VDEC_D_MISC_TIM_CTRL 0x1628
-#define VDEC_D_FIELD_COUNT 0x162C
-#define VDEC_D_HSCALE_CTRL 0x1630
-#define VDEC_D_VSCALE_CTRL 0x1634
-#define VDEC_D_MAN_VGA_CTRL 0x1638
-#define VDEC_D_MAN_AGC_CTRL 0x163C
-#define VDEC_D_DFE_CTRL1 0x1640
-#define VDEC_D_DFE_CTRL2 0x1644
-#define VDEC_D_DFE_CTRL3 0x1648
-#define VDEC_D_PLL_CTRL 0x164C
+#define VDEC_D_FIELD_COUNT 0x162C
+#define VDEC_D_HSCALE_CTRL 0x1630
+#define VDEC_D_VSCALE_CTRL 0x1634
+#define VDEC_D_MAN_VGA_CTRL 0x1638
+#define VDEC_D_MAN_AGC_CTRL 0x163C
+#define VDEC_D_DFE_CTRL1 0x1640
+#define VDEC_D_DFE_CTRL2 0x1644
+#define VDEC_D_DFE_CTRL3 0x1648
+#define VDEC_D_PLL_CTRL 0x164C
#define VDEC_D_PLL_CTRL_FAST 0x1650
-#define VDEC_D_HTL_CTRL 0x1654
-#define VDEC_D_SRC_CFG 0x1658
-#define VDEC_D_SC_STEP_SIZE 0x165C
+#define VDEC_D_HTL_CTRL 0x1654
+#define VDEC_D_SRC_CFG 0x1658
+#define VDEC_D_SC_STEP_SIZE 0x165C
#define VDEC_D_SC_CONVERGE_CTRL 0x1660
-#define VDEC_D_SC_LOOP_CTRL 0x1664
+#define VDEC_D_SC_LOOP_CTRL 0x1664
#define VDEC_D_COMB_2D_HFS_CFG 0x1668
#define VDEC_D_COMB_2D_HFD_CFG 0x166C
#define VDEC_D_COMB_2D_LF_CFG 0x1670
#define VDEC_D_COMB_2D_BLEND 0x1674
#define VDEC_D_COMB_MISC_CTRL 0x1678
-#define VDEC_D_COMB_FLAT_THRESH_CTRL 0x167C
-#define VDEC_D_COMB_TEST 0x1680
-#define VDEC_D_BP_MISC_CTRL 0x1684
-#define VDEC_D_VCR_DET_CTRL 0x1688
+#define VDEC_D_COMB_FLAT_THRESH_CTRL 0x167C
+#define VDEC_D_COMB_TEST 0x1680
+#define VDEC_D_BP_MISC_CTRL 0x1684
+#define VDEC_D_VCR_DET_CTRL 0x1688
#define VDEC_D_NOISE_DET_CTRL 0x168C
#define VDEC_D_COMB_FLAT_NOISE_CTRL 0x1690
-#define VDEC_D_VERSION 0x17F8
+#define VDEC_D_VERSION 0x17F8
#define VDEC_D_SOFT_RST_CTRL 0x17FC
/* Video Decoder E Registers */
-#define VDEC_E_MODE_CTRL 0x1800
-#define VDEC_E_OUT_CTRL1 0x1804
-#define VDEC_E_OUT_CTRL_NS 0x1808
-#define VDEC_E_GEN_STAT 0x180C
+#define VDEC_E_MODE_CTRL 0x1800
+#define VDEC_E_OUT_CTRL1 0x1804
+#define VDEC_E_OUT_CTRL_NS 0x1808
+#define VDEC_E_GEN_STAT 0x180C
#define VDEC_E_INT_STAT_MASK 0x1810
-#define VDEC_E_LUMA_CTRL 0x1814
-#define VDEC_E_CHROMA_CTRL 0x1818
-#define VDEC_E_CRUSH_CTRL 0x181C
+#define VDEC_E_LUMA_CTRL 0x1814
+#define VDEC_E_CHROMA_CTRL 0x1818
+#define VDEC_E_CRUSH_CTRL 0x181C
#define VDEC_E_HORIZ_TIM_CTRL 0x1820
#define VDEC_E_VERT_TIM_CTRL 0x1824
#define VDEC_E_MISC_TIM_CTRL 0x1828
-#define VDEC_E_FIELD_COUNT 0x182C
-#define VDEC_E_HSCALE_CTRL 0x1830
-#define VDEC_E_VSCALE_CTRL 0x1834
-#define VDEC_E_MAN_VGA_CTRL 0x1838
-#define VDEC_E_MAN_AGC_CTRL 0x183C
-#define VDEC_E_DFE_CTRL1 0x1840
-#define VDEC_E_DFE_CTRL2 0x1844
-#define VDEC_E_DFE_CTRL3 0x1848
-#define VDEC_E_PLL_CTRL 0x184C
+#define VDEC_E_FIELD_COUNT 0x182C
+#define VDEC_E_HSCALE_CTRL 0x1830
+#define VDEC_E_VSCALE_CTRL 0x1834
+#define VDEC_E_MAN_VGA_CTRL 0x1838
+#define VDEC_E_MAN_AGC_CTRL 0x183C
+#define VDEC_E_DFE_CTRL1 0x1840
+#define VDEC_E_DFE_CTRL2 0x1844
+#define VDEC_E_DFE_CTRL3 0x1848
+#define VDEC_E_PLL_CTRL 0x184C
#define VDEC_E_PLL_CTRL_FAST 0x1850
-#define VDEC_E_HTL_CTRL 0x1854
-#define VDEC_E_SRC_CFG 0x1858
-#define VDEC_E_SC_STEP_SIZE 0x185C
+#define VDEC_E_HTL_CTRL 0x1854
+#define VDEC_E_SRC_CFG 0x1858
+#define VDEC_E_SC_STEP_SIZE 0x185C
#define VDEC_E_SC_CONVERGE_CTRL 0x1860
-#define VDEC_E_SC_LOOP_CTRL 0x1864
+#define VDEC_E_SC_LOOP_CTRL 0x1864
#define VDEC_E_COMB_2D_HFS_CFG 0x1868
#define VDEC_E_COMB_2D_HFD_CFG 0x186C
#define VDEC_E_COMB_2D_LF_CFG 0x1870
#define VDEC_E_COMB_2D_BLEND 0x1874
#define VDEC_E_COMB_MISC_CTRL 0x1878
-#define VDEC_E_COMB_FLAT_THRESH_CTRL 0x187C
-#define VDEC_E_COMB_TEST 0x1880
-#define VDEC_E_BP_MISC_CTRL 0x1884
-#define VDEC_E_VCR_DET_CTRL 0x1888
+#define VDEC_E_COMB_FLAT_THRESH_CTRL 0x187C
+#define VDEC_E_COMB_TEST 0x1880
+#define VDEC_E_BP_MISC_CTRL 0x1884
+#define VDEC_E_VCR_DET_CTRL 0x1888
#define VDEC_E_NOISE_DET_CTRL 0x188C
#define VDEC_E_COMB_FLAT_NOISE_CTRL 0x1890
-#define VDEC_E_VERSION 0x19F8
+#define VDEC_E_VERSION 0x19F8
#define VDEC_E_SOFT_RST_CTRL 0x19FC
/* Video Decoder F Registers */
-#define VDEC_F_MODE_CTRL 0x1A00
-#define VDEC_F_OUT_CTRL1 0x1A04
-#define VDEC_F_OUT_CTRL_NS 0x1A08
-#define VDEC_F_GEN_STAT 0x1A0C
+#define VDEC_F_MODE_CTRL 0x1A00
+#define VDEC_F_OUT_CTRL1 0x1A04
+#define VDEC_F_OUT_CTRL_NS 0x1A08
+#define VDEC_F_GEN_STAT 0x1A0C
#define VDEC_F_INT_STAT_MASK 0x1A10
-#define VDEC_F_LUMA_CTRL 0x1A14
-#define VDEC_F_CHROMA_CTRL 0x1A18
-#define VDEC_F_CRUSH_CTRL 0x1A1C
+#define VDEC_F_LUMA_CTRL 0x1A14
+#define VDEC_F_CHROMA_CTRL 0x1A18
+#define VDEC_F_CRUSH_CTRL 0x1A1C
#define VDEC_F_HORIZ_TIM_CTRL 0x1A20
#define VDEC_F_VERT_TIM_CTRL 0x1A24
#define VDEC_F_MISC_TIM_CTRL 0x1A28
-#define VDEC_F_FIELD_COUNT 0x1A2C
-#define VDEC_F_HSCALE_CTRL 0x1A30
-#define VDEC_F_VSCALE_CTRL 0x1A34
-#define VDEC_F_MAN_VGA_CTRL 0x1A38
-#define VDEC_F_MAN_AGC_CTRL 0x1A3C
-#define VDEC_F_DFE_CTRL1 0x1A40
-#define VDEC_F_DFE_CTRL2 0x1A44
-#define VDEC_F_DFE_CTRL3 0x1A48
-#define VDEC_F_PLL_CTRL 0x1A4C
+#define VDEC_F_FIELD_COUNT 0x1A2C
+#define VDEC_F_HSCALE_CTRL 0x1A30
+#define VDEC_F_VSCALE_CTRL 0x1A34
+#define VDEC_F_MAN_VGA_CTRL 0x1A38
+#define VDEC_F_MAN_AGC_CTRL 0x1A3C
+#define VDEC_F_DFE_CTRL1 0x1A40
+#define VDEC_F_DFE_CTRL2 0x1A44
+#define VDEC_F_DFE_CTRL3 0x1A48
+#define VDEC_F_PLL_CTRL 0x1A4C
#define VDEC_F_PLL_CTRL_FAST 0x1A50
-#define VDEC_F_HTL_CTRL 0x1A54
-#define VDEC_F_SRC_CFG 0x1A58
-#define VDEC_F_SC_STEP_SIZE 0x1A5C
+#define VDEC_F_HTL_CTRL 0x1A54
+#define VDEC_F_SRC_CFG 0x1A58
+#define VDEC_F_SC_STEP_SIZE 0x1A5C
#define VDEC_F_SC_CONVERGE_CTRL 0x1A60
-#define VDEC_F_SC_LOOP_CTRL 0x1A64
+#define VDEC_F_SC_LOOP_CTRL 0x1A64
#define VDEC_F_COMB_2D_HFS_CFG 0x1A68
#define VDEC_F_COMB_2D_HFD_CFG 0x1A6C
#define VDEC_F_COMB_2D_LF_CFG 0x1A70
#define VDEC_F_COMB_2D_BLEND 0x1A74
#define VDEC_F_COMB_MISC_CTRL 0x1A78
-#define VDEC_F_COMB_FLAT_THRESH_CTRL 0x1A7C
-#define VDEC_F_COMB_TEST 0x1A80
-#define VDEC_F_BP_MISC_CTRL 0x1A84
-#define VDEC_F_VCR_DET_CTRL 0x1A88
+#define VDEC_F_COMB_FLAT_THRESH_CTRL 0x1A7C
+#define VDEC_F_COMB_TEST 0x1A80
+#define VDEC_F_BP_MISC_CTRL 0x1A84
+#define VDEC_F_VCR_DET_CTRL 0x1A88
#define VDEC_F_NOISE_DET_CTRL 0x1A8C
#define VDEC_F_COMB_FLAT_NOISE_CTRL 0x1A90
-#define VDEC_F_VERSION 0x1BF8
+#define VDEC_F_VERSION 0x1BF8
#define VDEC_F_SOFT_RST_CTRL 0x1BFC
/* Video Decoder G Registers */
-#define VDEC_G_MODE_CTRL 0x1C00
-#define VDEC_G_OUT_CTRL1 0x1C04
-#define VDEC_G_OUT_CTRL_NS 0x1C08
-#define VDEC_G_GEN_STAT 0x1C0C
+#define VDEC_G_MODE_CTRL 0x1C00
+#define VDEC_G_OUT_CTRL1 0x1C04
+#define VDEC_G_OUT_CTRL_NS 0x1C08
+#define VDEC_G_GEN_STAT 0x1C0C
#define VDEC_G_INT_STAT_MASK 0x1C10
-#define VDEC_G_LUMA_CTRL 0x1C14
-#define VDEC_G_CHROMA_CTRL 0x1C18
-#define VDEC_G_CRUSH_CTRL 0x1C1C
+#define VDEC_G_LUMA_CTRL 0x1C14
+#define VDEC_G_CHROMA_CTRL 0x1C18
+#define VDEC_G_CRUSH_CTRL 0x1C1C
#define VDEC_G_HORIZ_TIM_CTRL 0x1C20
#define VDEC_G_VERT_TIM_CTRL 0x1C24
#define VDEC_G_MISC_TIM_CTRL 0x1C28
-#define VDEC_G_FIELD_COUNT 0x1C2C
-#define VDEC_G_HSCALE_CTRL 0x1C30
-#define VDEC_G_VSCALE_CTRL 0x1C34
-#define VDEC_G_MAN_VGA_CTRL 0x1C38
-#define VDEC_G_MAN_AGC_CTRL 0x1C3C
-#define VDEC_G_DFE_CTRL1 0x1C40
-#define VDEC_G_DFE_CTRL2 0x1C44
-#define VDEC_G_DFE_CTRL3 0x1C48
-#define VDEC_G_PLL_CTRL 0x1C4C
+#define VDEC_G_FIELD_COUNT 0x1C2C
+#define VDEC_G_HSCALE_CTRL 0x1C30
+#define VDEC_G_VSCALE_CTRL 0x1C34
+#define VDEC_G_MAN_VGA_CTRL 0x1C38
+#define VDEC_G_MAN_AGC_CTRL 0x1C3C
+#define VDEC_G_DFE_CTRL1 0x1C40
+#define VDEC_G_DFE_CTRL2 0x1C44
+#define VDEC_G_DFE_CTRL3 0x1C48
+#define VDEC_G_PLL_CTRL 0x1C4C
#define VDEC_G_PLL_CTRL_FAST 0x1C50
-#define VDEC_G_HTL_CTRL 0x1C54
-#define VDEC_G_SRC_CFG 0x1C58
-#define VDEC_G_SC_STEP_SIZE 0x1C5C
+#define VDEC_G_HTL_CTRL 0x1C54
+#define VDEC_G_SRC_CFG 0x1C58
+#define VDEC_G_SC_STEP_SIZE 0x1C5C
#define VDEC_G_SC_CONVERGE_CTRL 0x1C60
-#define VDEC_G_SC_LOOP_CTRL 0x1C64
+#define VDEC_G_SC_LOOP_CTRL 0x1C64
#define VDEC_G_COMB_2D_HFS_CFG 0x1C68
#define VDEC_G_COMB_2D_HFD_CFG 0x1C6C
#define VDEC_G_COMB_2D_LF_CFG 0x1C70
#define VDEC_G_COMB_2D_BLEND 0x1C74
#define VDEC_G_COMB_MISC_CTRL 0x1C78
-#define VDEC_G_COMB_FLAT_THRESH_CTRL 0x1C7C
-#define VDEC_G_COMB_TEST 0x1C80
-#define VDEC_G_BP_MISC_CTRL 0x1C84
-#define VDEC_G_VCR_DET_CTRL 0x1C88
+#define VDEC_G_COMB_FLAT_THRESH_CTRL 0x1C7C
+#define VDEC_G_COMB_TEST 0x1C80
+#define VDEC_G_BP_MISC_CTRL 0x1C84
+#define VDEC_G_VCR_DET_CTRL 0x1C88
#define VDEC_G_NOISE_DET_CTRL 0x1C8C
#define VDEC_G_COMB_FLAT_NOISE_CTRL 0x1C90
-#define VDEC_G_VERSION 0x1DF8
+#define VDEC_G_VERSION 0x1DF8
#define VDEC_G_SOFT_RST_CTRL 0x1DFC
-/* Video Decoder H Registers */
-#define VDEC_H_MODE_CTRL 0x1E00
-#define VDEC_H_OUT_CTRL1 0x1E04
-#define VDEC_H_OUT_CTRL_NS 0x1E08
-#define VDEC_H_GEN_STAT 0x1E0C
+/* Video Decoder H Registers */
+#define VDEC_H_MODE_CTRL 0x1E00
+#define VDEC_H_OUT_CTRL1 0x1E04
+#define VDEC_H_OUT_CTRL_NS 0x1E08
+#define VDEC_H_GEN_STAT 0x1E0C
#define VDEC_H_INT_STAT_MASK 0x1E1E
-#define VDEC_H_LUMA_CTRL 0x1E14
-#define VDEC_H_CHROMA_CTRL 0x1E18
-#define VDEC_H_CRUSH_CTRL 0x1E1C
+#define VDEC_H_LUMA_CTRL 0x1E14
+#define VDEC_H_CHROMA_CTRL 0x1E18
+#define VDEC_H_CRUSH_CTRL 0x1E1C
#define VDEC_H_HORIZ_TIM_CTRL 0x1E20
#define VDEC_H_VERT_TIM_CTRL 0x1E24
#define VDEC_H_MISC_TIM_CTRL 0x1E28
-#define VDEC_H_FIELD_COUNT 0x1E2C
-#define VDEC_H_HSCALE_CTRL 0x1E30
-#define VDEC_H_VSCALE_CTRL 0x1E34
-#define VDEC_H_MAN_VGA_CTRL 0x1E38
-#define VDEC_H_MAN_AGC_CTRL 0x1E3C
-#define VDEC_H_DFE_CTRL1 0x1E40
-#define VDEC_H_DFE_CTRL2 0x1E44
-#define VDEC_H_DFE_CTRL3 0x1E48
-#define VDEC_H_PLL_CTRL 0x1E4C
+#define VDEC_H_FIELD_COUNT 0x1E2C
+#define VDEC_H_HSCALE_CTRL 0x1E30
+#define VDEC_H_VSCALE_CTRL 0x1E34
+#define VDEC_H_MAN_VGA_CTRL 0x1E38
+#define VDEC_H_MAN_AGC_CTRL 0x1E3C
+#define VDEC_H_DFE_CTRL1 0x1E40
+#define VDEC_H_DFE_CTRL2 0x1E44
+#define VDEC_H_DFE_CTRL3 0x1E48
+#define VDEC_H_PLL_CTRL 0x1E4C
#define VDEC_H_PLL_CTRL_FAST 0x1E50
-#define VDEC_H_HTL_CTRL 0x1E54
-#define VDEC_H_SRC_CFG 0x1E58
-#define VDEC_H_SC_STEP_SIZE 0x1E5C
+#define VDEC_H_HTL_CTRL 0x1E54
+#define VDEC_H_SRC_CFG 0x1E58
+#define VDEC_H_SC_STEP_SIZE 0x1E5C
#define VDEC_H_SC_CONVERGE_CTRL 0x1E60
-#define VDEC_H_SC_LOOP_CTRL 0x1E64
+#define VDEC_H_SC_LOOP_CTRL 0x1E64
#define VDEC_H_COMB_2D_HFS_CFG 0x1E68
#define VDEC_H_COMB_2D_HFD_CFG 0x1E6C
#define VDEC_H_COMB_2D_LF_CFG 0x1E70
#define VDEC_H_COMB_2D_BLEND 0x1E74
#define VDEC_H_COMB_MISC_CTRL 0x1E78
-#define VDEC_H_COMB_FLAT_THRESH_CTRL 0x1E7C
-#define VDEC_H_COMB_TEST 0x1E80
-#define VDEC_H_BP_MISC_CTRL 0x1E84
-#define VDEC_H_VCR_DET_CTRL 0x1E88
+#define VDEC_H_COMB_FLAT_THRESH_CTRL 0x1E7C
+#define VDEC_H_COMB_TEST 0x1E80
+#define VDEC_H_BP_MISC_CTRL 0x1E84
+#define VDEC_H_VCR_DET_CTRL 0x1E88
#define VDEC_H_NOISE_DET_CTRL 0x1E8C
#define VDEC_H_COMB_FLAT_NOISE_CTRL 0x1E90
-#define VDEC_H_VERSION 0x1FF8
+#define VDEC_H_VERSION 0x1FF8
#define VDEC_H_SOFT_RST_CTRL 0x1FFC
/*****************************************************************************/
/* LUMA_CTRL register fields */
-#define VDEC_A_BRITE_CTRL 0x1014
+#define VDEC_A_BRITE_CTRL 0x1014
#define VDEC_A_CNTRST_CTRL 0x1015
#define VDEC_A_PEAK_SEL 0x1016
diff --git a/drivers/media/video/cx25821/cx25821-medusa-video.c b/drivers/media/video/cx25821/cx25821-medusa-video.c
index fc780d0908dc..298a68d98c2f 100644
--- a/drivers/media/video/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/video/cx25821/cx25821-medusa-video.c
@@ -99,82 +99,67 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
for (i = 0; i < MAX_DECODERS; i++) {
/* set video format NTSC-M */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
- &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ MODE_CTRL + (0x200 * i), &tmp);
value &= 0xFFFFFFF0;
/* enable the fast locking mode bit[16] */
value |= 0x10001;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
- value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ MODE_CTRL + (0x200 * i), value);
/* resolution NTSC 720x480 */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- HORIZ_TIM_CTRL + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ HORIZ_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x612D0074;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- HORIZ_TIM_CTRL + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ HORIZ_TIM_CTRL + (0x200 * i), value);
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VERT_TIM_CTRL + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x1C1E001A; /* vblank_cnt + 2 to get camera ID */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- VERT_TIM_CTRL + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ VERT_TIM_CTRL + (0x200 * i), value);
/* chroma subcarrier step size */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- SC_STEP_SIZE + (0x200 * i), 0x43E00000);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ SC_STEP_SIZE + (0x200 * i), 0x43E00000);
/* enable VIP optional active */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- OUT_CTRL_NS + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ OUT_CTRL_NS + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- OUT_CTRL_NS + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ OUT_CTRL_NS + (0x200 * i), value);
/* enable VIP optional active (VIP_OPT_AL) for direct output. */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
- &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ OUT_CTRL1 + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
- value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ OUT_CTRL1 + (0x200 * i), value);
/*
* clear VPRES_VERT_EN bit, fixes the chroma run away problem
* when the input switching rate < 16 fields
*/
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- MISC_TIM_CTRL + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ MISC_TIM_CTRL + (0x200 * i), &tmp);
/* disable special play detection */
value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- MISC_TIM_CTRL + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ MISC_TIM_CTRL + (0x200 * i), value);
/* set vbi_gate_en to 0 */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
- &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DFE_CTRL1 + (0x200 * i), &tmp);
value = clearBitAtPos(value, 29);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
- value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DFE_CTRL1 + (0x200 * i), value);
/* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
@@ -182,61 +167,49 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
for (i = 0; i < MAX_ENCODERS; i++) {
/* NTSC hclock */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_1 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_1 + (0x100 * i), &tmp);
value &= 0xF000FC00;
value |= 0x06B402D0;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_1 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_1 + (0x100 * i), value);
/* burst begin and burst end */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_2 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_2 + (0x100 * i), &tmp);
value &= 0xFF000000;
value |= 0x007E9054;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_2 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_2 + (0x100 * i), value);
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_3 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_3 + (0x100 * i), &tmp);
value &= 0xFC00FE00;
value |= 0x00EC00F0;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_3 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_3 + (0x100 * i), value);
/* set NTSC vblank, no phase alternation, 7.5 IRE pedestal */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_4 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_4 + (0x100 * i), &tmp);
value &= 0x00FCFFFF;
value |= 0x13020000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_4 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_4 + (0x100 * i), value);
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_5 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_5 + (0x100 * i), &tmp);
value &= 0xFFFF0000;
value |= 0x0000E575;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_5 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_5 + (0x100 * i), value);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_6 + (0x100 * i), 0x009A89C1);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_6 + (0x100 * i), 0x009A89C1);
/* Subcarrier Increment */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_7 + (0x100 * i), 0x21F07C1F);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_7 + (0x100 * i), 0x21F07C1F);
}
/* set picture resolutions */
@@ -261,34 +234,27 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
u32 value = 0, tmp = 0;
/* Setup for 2D threshold */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_HFS_CFG + (0x200 * dec),
- 0x20002861);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_HFD_CFG + (0x200 * dec),
- 0x20002861);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_LF_CFG + (0x200 * dec),
- 0x200A1023);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ COMB_2D_HFS_CFG + (0x200 * dec), 0x20002861);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ COMB_2D_HFD_CFG + (0x200 * dec), 0x20002861);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ COMB_2D_LF_CFG + (0x200 * dec), 0x200A1023);
/* Setup flat chroma and luma thresholds */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- COMB_FLAT_THRESH_CTRL + (0x200 * dec), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ COMB_FLAT_THRESH_CTRL + (0x200 * dec), &tmp);
value &= 0x06230000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- COMB_FLAT_THRESH_CTRL + (0x200 * dec), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ COMB_FLAT_THRESH_CTRL + (0x200 * dec), value);
/* set comb 2D blend */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_BLEND + (0x200 * dec),
- 0x210F0F0F);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ COMB_2D_BLEND + (0x200 * dec), 0x210F0F0F);
/* COMB MISC CONTROL */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], COMB_MISC_CTRL + (0x200 * dec),
- 0x41120A7F);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ COMB_MISC_CTRL + (0x200 * dec), 0x41120A7F);
return ret_val;
}
@@ -304,83 +270,68 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
for (i = 0; i < MAX_DECODERS; i++) {
/* set video format PAL-BDGHI */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
- &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ MODE_CTRL + (0x200 * i), &tmp);
value &= 0xFFFFFFF0;
/* enable the fast locking mode bit[16] */
value |= 0x10004;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
- value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ MODE_CTRL + (0x200 * i), value);
/* resolution PAL 720x576 */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- HORIZ_TIM_CTRL + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ HORIZ_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x632D007D;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- HORIZ_TIM_CTRL + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ HORIZ_TIM_CTRL + (0x200 * i), value);
/* vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24 */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VERT_TIM_CTRL + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
value |= 0x28240026; /* vblank_cnt + 2 to get camera ID */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- VERT_TIM_CTRL + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ VERT_TIM_CTRL + (0x200 * i), value);
/* chroma subcarrier step size */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- SC_STEP_SIZE + (0x200 * i), 0x5411E2D0);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ SC_STEP_SIZE + (0x200 * i), 0x5411E2D0);
/* enable VIP optional active */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- OUT_CTRL_NS + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ OUT_CTRL_NS + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- OUT_CTRL_NS + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ OUT_CTRL_NS + (0x200 * i), value);
/* enable VIP optional active (VIP_OPT_AL) for direct output. */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
- &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ OUT_CTRL1 + (0x200 * i), &tmp);
value &= 0xFFFBFFFF;
value |= 0x00040000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
- value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ OUT_CTRL1 + (0x200 * i), value);
/*
* clear VPRES_VERT_EN bit, fixes the chroma run away problem
* when the input switching rate < 16 fields
*/
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- MISC_TIM_CTRL + (0x200 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ MISC_TIM_CTRL + (0x200 * i), &tmp);
/* disable special play detection */
value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- MISC_TIM_CTRL + (0x200 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ MISC_TIM_CTRL + (0x200 * i), value);
/* set vbi_gate_en to 0 */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
- &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DFE_CTRL1 + (0x200 * i), &tmp);
value = clearBitAtPos(value, 29);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
- value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DFE_CTRL1 + (0x200 * i), value);
medusa_PALCombInit(dev, i);
@@ -390,62 +341,50 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
for (i = 0; i < MAX_ENCODERS; i++) {
/* PAL hclock */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_1 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_1 + (0x100 * i), &tmp);
value &= 0xF000FC00;
value |= 0x06C002D0;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_1 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_1 + (0x100 * i), value);
/* burst begin and burst end */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_2 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_2 + (0x100 * i), &tmp);
value &= 0xFF000000;
value |= 0x007E9754;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_2 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_2 + (0x100 * i), value);
/* hblank and vactive */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_3 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_3 + (0x100 * i), &tmp);
value &= 0xFC00FE00;
value |= 0x00FC0120;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_3 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_3 + (0x100 * i), value);
/* set PAL vblank, phase alternation, 0 IRE pedestal */
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_4 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_4 + (0x100 * i), &tmp);
value &= 0x00FCFFFF;
value |= 0x14010000;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_4 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_4 + (0x100 * i), value);
- value =
- cx25821_i2c_read(&dev->i2c_bus[0],
- DENC_A_REG_5 + (0x100 * i), &tmp);
+ value = cx25821_i2c_read(&dev->i2c_bus[0],
+ DENC_A_REG_5 + (0x100 * i), &tmp);
value &= 0xFFFF0000;
value |= 0x0000F078;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_5 + (0x100 * i), value);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_5 + (0x100 * i), value);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_6 + (0x100 * i), 0x00A493CF);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_6 + (0x100 * i), 0x00A493CF);
/* Subcarrier Increment */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- DENC_A_REG_7 + (0x100 * i), 0x2A098ACB);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ DENC_A_REG_7 + (0x100 * i), 0x2A098ACB);
}
/* set picture resolutions */
@@ -499,7 +438,7 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
mutex_lock(&dev->lock);
- /* validate the width - cannot be negative */
+ /* validate the width */
if (width > MAX_WIDTH) {
pr_info("%s(): width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH\n",
__func__, width, MAX_WIDTH);
@@ -543,12 +482,10 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
for (; decoder < decoder_count; decoder++) {
/* write scaling values for each decoder */
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- HSCALE_CTRL + (0x200 * decoder), hscale);
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0],
- VSCALE_CTRL + (0x200 * decoder), vscale);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ HSCALE_CTRL + (0x200 * decoder), hscale);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ VSCALE_CTRL + (0x200 * decoder), vscale);
}
mutex_unlock(&dev->lock);
@@ -606,8 +543,8 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
}
/* Map to Medusa register setting */
-static int mapM(int srcMin,
- int srcMax, int srcVal, int dstMin, int dstMax, int *dstVal)
+static int mapM(int srcMin, int srcMax, int srcVal, int dstMin, int dstMax,
+ int *dstVal)
{
int numerator;
int denominator;
@@ -654,23 +591,19 @@ int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder)
u32 val = 0, tmp = 0;
mutex_lock(&dev->lock);
- if ((brightness > VIDEO_PROCAMP_MAX)
- || (brightness < VIDEO_PROCAMP_MIN)) {
+ if ((brightness > VIDEO_PROCAMP_MAX) ||
+ (brightness < VIDEO_PROCAMP_MIN)) {
mutex_unlock(&dev->lock);
return -1;
}
- ret_val =
- mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, brightness,
- SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value);
+ ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, brightness,
+ SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value);
value = convert_to_twos(value, 8);
- val =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VDEC_A_BRITE_CTRL + (0x200 * decoder), &tmp);
+ val = cx25821_i2c_read(&dev->i2c_bus[0],
+ VDEC_A_BRITE_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
- ret_val |=
- cx25821_i2c_write(&dev->i2c_bus[0],
- VDEC_A_BRITE_CTRL + (0x200 * decoder),
- val | value);
+ ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
+ VDEC_A_BRITE_CTRL + (0x200 * decoder), val | value);
mutex_unlock(&dev->lock);
return ret_val;
}
@@ -688,17 +621,13 @@ int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder)
return -1;
}
- ret_val =
- mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, contrast,
- UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value);
- val =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VDEC_A_CNTRST_CTRL + (0x200 * decoder), &tmp);
+ ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, contrast,
+ UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value);
+ val = cx25821_i2c_read(&dev->i2c_bus[0],
+ VDEC_A_CNTRST_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
- ret_val |=
- cx25821_i2c_write(&dev->i2c_bus[0],
- VDEC_A_CNTRST_CTRL + (0x200 * decoder),
- val | value);
+ ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
+ VDEC_A_CNTRST_CTRL + (0x200 * decoder), val | value);
mutex_unlock(&dev->lock);
return ret_val;
@@ -717,19 +646,16 @@ int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder)
return -1;
}
- ret_val =
- mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, hue, SIGNED_BYTE_MIN,
- SIGNED_BYTE_MAX, &value);
+ ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, hue,
+ SIGNED_BYTE_MIN, SIGNED_BYTE_MAX, &value);
value = convert_to_twos(value, 8);
- val =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VDEC_A_HUE_CTRL + (0x200 * decoder), &tmp);
+ val = cx25821_i2c_read(&dev->i2c_bus[0],
+ VDEC_A_HUE_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
- ret_val |=
- cx25821_i2c_write(&dev->i2c_bus[0],
- VDEC_A_HUE_CTRL + (0x200 * decoder), val | value);
+ ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
+ VDEC_A_HUE_CTRL + (0x200 * decoder), val | value);
mutex_unlock(&dev->lock);
return ret_val;
@@ -743,33 +669,26 @@ int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
mutex_lock(&dev->lock);
- if ((saturation > VIDEO_PROCAMP_MAX)
- || (saturation < VIDEO_PROCAMP_MIN)) {
+ if ((saturation > VIDEO_PROCAMP_MAX) ||
+ (saturation < VIDEO_PROCAMP_MIN)) {
mutex_unlock(&dev->lock);
return -1;
}
- ret_val =
- mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, saturation,
- UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value);
+ ret_val = mapM(VIDEO_PROCAMP_MIN, VIDEO_PROCAMP_MAX, saturation,
+ UNSIGNED_BYTE_MIN, UNSIGNED_BYTE_MAX, &value);
- val =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VDEC_A_USAT_CTRL + (0x200 * decoder), &tmp);
+ val = cx25821_i2c_read(&dev->i2c_bus[0],
+ VDEC_A_USAT_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
- ret_val |=
- cx25821_i2c_write(&dev->i2c_bus[0],
- VDEC_A_USAT_CTRL + (0x200 * decoder),
- val | value);
-
- val =
- cx25821_i2c_read(&dev->i2c_bus[0],
- VDEC_A_VSAT_CTRL + (0x200 * decoder), &tmp);
+ ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
+ VDEC_A_USAT_CTRL + (0x200 * decoder), val | value);
+
+ val = cx25821_i2c_read(&dev->i2c_bus[0],
+ VDEC_A_VSAT_CTRL + (0x200 * decoder), &tmp);
val &= 0xFFFFFF00;
- ret_val |=
- cx25821_i2c_write(&dev->i2c_bus[0],
- VDEC_A_VSAT_CTRL + (0x200 * decoder),
- val | value);
+ ret_val |= cx25821_i2c_write(&dev->i2c_bus[0],
+ VDEC_A_VSAT_CTRL + (0x200 * decoder), val | value);
mutex_unlock(&dev->lock);
return ret_val;
@@ -830,9 +749,8 @@ int medusa_video_init(struct cx25821_dev *dev)
/* select AFE clock to output mode */
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
value &= 0x83FFFFFF;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL,
- value | 0x10000000);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL,
+ value | 0x10000000);
if (ret_val < 0)
goto error;
diff --git a/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
index 2a724ddfa53f..5a157cf4a95e 100644
--- a/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
@@ -65,9 +65,8 @@ static __le32 *cx25821_update_riscprogram_ch2(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(dev->_data_buf_phys_addr_ch2 + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
- if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1))
- || !(dev->_isNTSC_ch2)) {
+ if ((lines <= NTSC_FIELD_HEIGHT) ||
+ (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC_ch2)) {
offset += dist_betwn_starts;
}
}
@@ -85,7 +84,7 @@ static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
{
unsigned int line, i;
struct sram_channel *sram_ch =
- dev->channels[dev->_channel2_upstream_select].sram_channels;
+ dev->channels[dev->_channel2_upstream_select].sram_channels;
int dist_betwn_starts = bpl * 2;
/* sync instruction */
@@ -103,9 +102,8 @@ static __le32 *cx25821_risc_field_upstream_ch2(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(databuf_phys_addr + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
- if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1))
- || !(dev->_isNTSC_ch2)) {
+ if ((lines <= NTSC_FIELD_HEIGHT) ||
+ (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC_ch2)) {
offset += dist_betwn_starts;
}
@@ -173,7 +171,7 @@ int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
fifo_enable = FIFO_DISABLE;
- /* Even field */
+ /* Even field */
rp = cx25821_risc_field_upstream_ch2(dev, rp,
dev->_data_buf_phys_addr_ch2 + databuf_offset,
bottom_offset, 0x200, bpl, singlefield_lines,
@@ -189,9 +187,9 @@ int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
}
/*
- Loop to 2ndFrameRISC or to Start of
- Risc program & generate IRQ
- */
+ * Loop to 2ndFrameRISC or to Start of
+ * Risc program & generate IRQ
+ */
*(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
@@ -203,7 +201,7 @@ int cx25821_risc_buffer_upstream_ch2(struct cx25821_dev *dev,
void cx25821_stop_upstream_video_ch2(struct cx25821_dev *dev)
{
struct sram_channel *sram_ch =
- dev->channels[VID_UPSTREAM_SRAM_CHANNEL_J].sram_channels;
+ dev->channels[VID_UPSTREAM_SRAM_CHANNEL_J].sram_channels;
u32 tmp = 0;
if (!dev->_is_running_ch2) {
@@ -262,9 +260,8 @@ int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
struct file *myfile;
int frame_index_temp = dev->_frame_index_ch2;
int i = 0;
- int line_size =
- (dev->_pixel_format_ch2 ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ;
+ int line_size = (dev->_pixel_format_ch2 == PIXEL_FRMT_411) ?
+ Y411_LINE_SZ : Y422_LINE_SZ;
int frame_size = 0;
int frame_offset = 0;
ssize_t vfs_read_retval = 0;
@@ -277,14 +274,11 @@ int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
return 0;
if (dev->_isNTSC_ch2) {
- frame_size =
- (line_size ==
- Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 :
- FRAME_SIZE_NTSC_Y422;
+ frame_size = (line_size == Y411_LINE_SZ) ?
+ FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
} else {
- frame_size =
- (line_size ==
- Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
+ frame_size = (line_size == Y411_LINE_SZ) ?
+ FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
}
frame_offset = (frame_index_temp > 0) ? frame_size : 0;
@@ -318,14 +312,14 @@ int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
for (i = 0; i < dev->_lines_count_ch2; i++) {
pos = file_offset;
- vfs_read_retval =
- vfs_read(myfile, mybuf, line_size, &pos);
+ vfs_read_retval = vfs_read(myfile, mybuf, line_size,
+ &pos);
if (vfs_read_retval > 0 && vfs_read_retval == line_size
&& dev->_data_buf_virt_addr_ch2 != NULL) {
memcpy((void *)(dev->_data_buf_virt_addr_ch2 +
frame_offset / 4), mybuf,
- vfs_read_retval);
+ vfs_read_retval);
}
file_offset += vfs_read_retval;
@@ -341,8 +335,8 @@ int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (i > 0)
dev->_frame_count_ch2++;
- dev->_file_status_ch2 =
- (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE;
+ dev->_file_status_ch2 = (vfs_read_retval == line_size) ?
+ IN_PROGRESS : END_OF_FILE;
set_fs(old_fs);
filp_close(myfile, NULL);
@@ -353,8 +347,8 @@ int cx25821_get_frame_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
static void cx25821_vidups_handler_ch2(struct work_struct *work)
{
- struct cx25821_dev *dev =
- container_of(work, struct cx25821_dev, _irq_work_entry_ch2);
+ struct cx25821_dev *dev = container_of(work, struct cx25821_dev,
+ _irq_work_entry_ch2);
if (!dev) {
pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n",
@@ -362,18 +356,16 @@ static void cx25821_vidups_handler_ch2(struct work_struct *work)
return;
}
- cx25821_get_frame_ch2(dev,
- dev->channels[dev->
- _channel2_upstream_select].sram_channels);
+ cx25821_get_frame_ch2(dev, dev->channels[dev->
+ _channel2_upstream_select].sram_channels);
}
int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
- int line_size =
- (dev->_pixel_format_ch2 ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ;
+ int line_size = (dev->_pixel_format_ch2 == PIXEL_FRMT_411) ?
+ Y411_LINE_SZ : Y422_LINE_SZ;
ssize_t vfs_read_retval = 0;
char mybuf[line_size];
loff_t pos;
@@ -410,16 +402,16 @@ int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
for (i = 0; i < dev->_lines_count_ch2; i++) {
pos = offset;
- vfs_read_retval =
- vfs_read(myfile, mybuf, line_size, &pos);
+ vfs_read_retval = vfs_read(myfile, mybuf,
+ line_size, &pos);
- if (vfs_read_retval > 0
- && vfs_read_retval == line_size
- && dev->_data_buf_virt_addr_ch2 != NULL) {
+ if (vfs_read_retval > 0 &&
+ vfs_read_retval == line_size &&
+ dev->_data_buf_virt_addr_ch2 != NULL) {
memcpy((void *)(dev->
_data_buf_virt_addr_ch2
+ offset / 4), mybuf,
- vfs_read_retval);
+ vfs_read_retval);
}
offset += vfs_read_retval;
@@ -438,8 +430,8 @@ int cx25821_openfile_ch2(struct cx25821_dev *dev, struct sram_channel *sram_ch)
break;
}
- dev->_file_status_ch2 =
- (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE;
+ dev->_file_status_ch2 = (vfs_read_retval == line_size) ?
+ IN_PROGRESS : END_OF_FILE;
set_fs(old_fs);
myfile->f_pos = 0;
@@ -463,9 +455,8 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
dev->_dma_phys_addr_ch2);
}
- dev->_dma_virt_addr_ch2 =
- pci_alloc_consistent(dev->pci, dev->upstream_riscbuf_size_ch2,
- &dma_addr);
+ dev->_dma_virt_addr_ch2 = pci_alloc_consistent(dev->pci,
+ dev->upstream_riscbuf_size_ch2, &dma_addr);
dev->_dma_virt_start_addr_ch2 = dev->_dma_virt_addr_ch2;
dev->_dma_phys_start_addr_ch2 = dma_addr;
dev->_dma_phys_addr_ch2 = dma_addr;
@@ -485,9 +476,8 @@ static int cx25821_upstream_buffer_prepare_ch2(struct cx25821_dev *dev,
dev->_data_buf_phys_addr_ch2);
}
/* For Video Data buffer allocation */
- dev->_data_buf_virt_addr_ch2 =
- pci_alloc_consistent(dev->pci, dev->upstream_databuf_size_ch2,
- &data_dma_addr);
+ dev->_data_buf_virt_addr_ch2 = pci_alloc_consistent(dev->pci,
+ dev->upstream_databuf_size_ch2, &data_dma_addr);
dev->_data_buf_phys_addr_ch2 = data_dma_addr;
dev->_data_buf_size_ch2 = dev->upstream_databuf_size_ch2;
@@ -563,8 +553,8 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
else
line_size_in_bytes = Y422_LINE_SZ;
risc_phys_jump_addr =
- dev->_dma_phys_start_addr_ch2 +
- odd_risc_prog_size;
+ dev->_dma_phys_start_addr_ch2 +
+ odd_risc_prog_size;
rp = cx25821_update_riscprogram_ch2(dev,
dev->_dma_virt_start_addr_ch2,
@@ -612,11 +602,9 @@ static irqreturn_t cx25821_upstream_irq_ch2(int irq, void *dev_id)
vid_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
- if (vid_status) {
- handled =
- cx25821_video_upstream_irq_ch2(dev, channel_num,
- vid_status);
- }
+ if (vid_status)
+ handled = cx25821_video_upstream_irq_ch2(dev, channel_num,
+ vid_status);
if (handled < 0)
cx25821_stop_upstream_video_ch2(dev);
@@ -691,8 +679,7 @@ int cx25821_start_video_dma_upstream_ch2(struct cx25821_dev *dev,
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
- err =
- request_irq(dev->pci->irq, cx25821_upstream_irq_ch2,
+ err = request_irq(dev->pci->irq, cx25821_upstream_irq_ch2,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get upstream IRQ %d\n",
@@ -752,45 +739,38 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
dev->_file_status_ch2 = RESET_STATUS;
dev->_lines_count_ch2 = dev->_isNTSC_ch2 ? 480 : 576;
dev->_pixel_format_ch2 = pixel_format;
- dev->_line_size_ch2 =
- (dev->_pixel_format_ch2 ==
- PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
+ dev->_line_size_ch2 = (dev->_pixel_format_ch2 == PIXEL_FRMT_422) ?
+ (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
data_frame_size = dev->_isNTSC_ch2 ? NTSC_DATA_BUF_SZ : PAL_DATA_BUF_SZ;
- risc_buffer_size =
- dev->_isNTSC_ch2 ? NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
+ risc_buffer_size = dev->_isNTSC_ch2 ?
+ NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
if (dev->input_filename_ch2) {
str_length = strlen(dev->input_filename_ch2);
- dev->_filename_ch2 = kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename_ch2 = kmemdup(dev->input_filename_ch2,
+ str_length + 1, GFP_KERNEL);
if (!dev->_filename_ch2)
goto error;
-
- memcpy(dev->_filename_ch2, dev->input_filename_ch2,
- str_length + 1);
} else {
str_length = strlen(dev->_defaultname_ch2);
- dev->_filename_ch2 = kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename_ch2 = kmemdup(dev->_defaultname_ch2,
+ str_length + 1, GFP_KERNEL);
if (!dev->_filename_ch2)
goto error;
-
- memcpy(dev->_filename_ch2, dev->_defaultname_ch2,
- str_length + 1);
}
/* Default if filename is empty string */
if (strcmp(dev->input_filename_ch2, "") == 0) {
if (dev->_isNTSC_ch2) {
- dev->_filename_ch2 =
- (dev->_pixel_format_ch2 ==
- PIXEL_FRMT_411) ? "/root/vid411.yuv" :
- "/root/vidtest.yuv";
+ dev->_filename_ch2 = (dev->_pixel_format_ch2 ==
+ PIXEL_FRMT_411) ? "/root/vid411.yuv" :
+ "/root/vidtest.yuv";
} else {
- dev->_filename_ch2 =
- (dev->_pixel_format_ch2 ==
- PIXEL_FRMT_411) ? "/root/pal411.yuv" :
- "/root/pal422.yuv";
+ dev->_filename_ch2 = (dev->_pixel_format_ch2 ==
+ PIXEL_FRMT_411) ? "/root/pal411.yuv" :
+ "/root/pal422.yuv";
}
}
diff --git a/drivers/media/video/cx25821/cx25821-video-upstream.c b/drivers/media/video/cx25821/cx25821-video-upstream.c
index c0b80068f468..21e7d657f049 100644
--- a/drivers/media/video/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream.c
@@ -136,7 +136,7 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
{
unsigned int line, i;
struct sram_channel *sram_ch =
- dev->channels[dev->_channel_upstream_select].sram_channels;
+ dev->channels[dev->_channel_upstream_select].sram_channels;
int dist_betwn_starts = bpl * 2;
/* sync instruction */
@@ -194,15 +194,12 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
if (dev->_isNTSC) {
odd_num_lines = singlefield_lines + 1;
risc_program_size = FRAME1_VID_PROG_SIZE;
- frame_size =
- (bpl ==
- Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 :
- FRAME_SIZE_NTSC_Y422;
+ frame_size = (bpl == Y411_LINE_SZ) ?
+ FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
} else {
risc_program_size = PAL_VID_PROG_SIZE;
- frame_size =
- (bpl ==
- Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
+ frame_size = (bpl == Y411_LINE_SZ) ?
+ FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
}
/* Virtual address of Risc buffer program */
@@ -214,13 +211,9 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
if (UNSET != top_offset) {
fifo_enable = (frame == 0) ? FIFO_ENABLE : FIFO_DISABLE;
rp = cx25821_risc_field_upstream(dev, rp,
- dev->
- _data_buf_phys_addr +
- databuf_offset,
- top_offset, 0, bpl,
- odd_num_lines,
- fifo_enable,
- ODD_FIELD);
+ dev->_data_buf_phys_addr +
+ databuf_offset, top_offset, 0, bpl,
+ odd_num_lines, fifo_enable, ODD_FIELD);
}
fifo_enable = FIFO_DISABLE;
@@ -234,8 +227,8 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
if (frame == 0) {
risc_flag = RISC_CNT_RESET;
- risc_phys_jump_addr =
- dev->_dma_phys_start_addr + risc_program_size;
+ risc_phys_jump_addr = dev->_dma_phys_start_addr +
+ risc_program_size;
} else {
risc_phys_jump_addr = dev->_dma_phys_start_addr;
risc_flag = RISC_CNT_INC;
@@ -255,7 +248,7 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
void cx25821_stop_upstream_video_ch1(struct cx25821_dev *dev)
{
struct sram_channel *sram_ch =
- dev->channels[VID_UPSTREAM_SRAM_CHANNEL_I].sram_channels;
+ dev->channels[VID_UPSTREAM_SRAM_CHANNEL_I].sram_channels;
u32 tmp = 0;
if (!dev->_is_running) {
@@ -312,9 +305,8 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
struct file *myfile;
int frame_index_temp = dev->_frame_index;
int i = 0;
- int line_size =
- (dev->_pixel_format ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ;
+ int line_size = (dev->_pixel_format == PIXEL_FRMT_411) ?
+ Y411_LINE_SZ : Y422_LINE_SZ;
int frame_size = 0;
int frame_offset = 0;
ssize_t vfs_read_retval = 0;
@@ -326,16 +318,12 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (dev->_file_status == END_OF_FILE)
return 0;
- if (dev->_isNTSC) {
- frame_size =
- (line_size ==
- Y411_LINE_SZ) ? FRAME_SIZE_NTSC_Y411 :
- FRAME_SIZE_NTSC_Y422;
- } else {
- frame_size =
- (line_size ==
- Y411_LINE_SZ) ? FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
- }
+ if (dev->_isNTSC)
+ frame_size = (line_size == Y411_LINE_SZ) ?
+ FRAME_SIZE_NTSC_Y411 : FRAME_SIZE_NTSC_Y422;
+ else
+ frame_size = (line_size == Y411_LINE_SZ) ?
+ FRAME_SIZE_PAL_Y411 : FRAME_SIZE_PAL_Y422;
frame_offset = (frame_index_temp > 0) ? frame_size : 0;
file_offset = dev->_frame_count * frame_size;
@@ -369,8 +357,8 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
for (i = 0; i < dev->_lines_count; i++) {
pos = file_offset;
- vfs_read_retval =
- vfs_read(myfile, mybuf, line_size, &pos);
+ vfs_read_retval = vfs_read(myfile, mybuf, line_size,
+ &pos);
if (vfs_read_retval > 0 && vfs_read_retval == line_size
&& dev->_data_buf_virt_addr != NULL) {
@@ -392,8 +380,8 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (i > 0)
dev->_frame_count++;
- dev->_file_status =
- (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE;
+ dev->_file_status = (vfs_read_retval == line_size) ?
+ IN_PROGRESS : END_OF_FILE;
set_fs(old_fs);
filp_close(myfile, NULL);
@@ -404,8 +392,8 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
static void cx25821_vidups_handler(struct work_struct *work)
{
- struct cx25821_dev *dev =
- container_of(work, struct cx25821_dev, _irq_work_entry);
+ struct cx25821_dev *dev = container_of(work, struct cx25821_dev,
+ _irq_work_entry);
if (!dev) {
pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n",
@@ -413,18 +401,16 @@ static void cx25821_vidups_handler(struct work_struct *work)
return;
}
- cx25821_get_frame(dev,
- dev->channels[dev->_channel_upstream_select].
- sram_channels);
+ cx25821_get_frame(dev, dev->channels[dev->_channel_upstream_select].
+ sram_channels);
}
int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
{
struct file *myfile;
int i = 0, j = 0;
- int line_size =
- (dev->_pixel_format ==
- PIXEL_FRMT_411) ? Y411_LINE_SZ : Y422_LINE_SZ;
+ int line_size = (dev->_pixel_format == PIXEL_FRMT_411) ?
+ Y411_LINE_SZ : Y422_LINE_SZ;
ssize_t vfs_read_retval = 0;
char mybuf[line_size];
loff_t pos;
@@ -461,8 +447,8 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
for (i = 0; i < dev->_lines_count; i++) {
pos = offset;
- vfs_read_retval =
- vfs_read(myfile, mybuf, line_size, &pos);
+ vfs_read_retval = vfs_read(myfile, mybuf,
+ line_size, &pos);
if (vfs_read_retval > 0
&& vfs_read_retval == line_size
@@ -489,8 +475,8 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
break;
}
- dev->_file_status =
- (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE;
+ dev->_file_status = (vfs_read_retval == line_size) ?
+ IN_PROGRESS : END_OF_FILE;
set_fs(old_fs);
myfile->f_pos = 0;
@@ -507,14 +493,12 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
dma_addr_t dma_addr;
dma_addr_t data_dma_addr;
- if (dev->_dma_virt_addr != NULL) {
+ if (dev->_dma_virt_addr != NULL)
pci_free_consistent(dev->pci, dev->upstream_riscbuf_size,
- dev->_dma_virt_addr, dev->_dma_phys_addr);
- }
+ dev->_dma_virt_addr, dev->_dma_phys_addr);
- dev->_dma_virt_addr =
- pci_alloc_consistent(dev->pci, dev->upstream_riscbuf_size,
- &dma_addr);
+ dev->_dma_virt_addr = pci_alloc_consistent(dev->pci,
+ dev->upstream_riscbuf_size, &dma_addr);
dev->_dma_virt_start_addr = dev->_dma_virt_addr;
dev->_dma_phys_start_addr = dma_addr;
dev->_dma_phys_addr = dma_addr;
@@ -528,15 +512,13 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
/* Clear memory at address */
memset(dev->_dma_virt_addr, 0, dev->_risc_size);
- if (dev->_data_buf_virt_addr != NULL) {
+ if (dev->_data_buf_virt_addr != NULL)
pci_free_consistent(dev->pci, dev->upstream_databuf_size,
- dev->_data_buf_virt_addr,
- dev->_data_buf_phys_addr);
- }
+ dev->_data_buf_virt_addr,
+ dev->_data_buf_phys_addr);
/* For Video Data buffer allocation */
- dev->_data_buf_virt_addr =
- pci_alloc_consistent(dev->pci, dev->upstream_databuf_size,
- &data_dma_addr);
+ dev->_data_buf_virt_addr = pci_alloc_consistent(dev->pci,
+ dev->upstream_databuf_size, &data_dma_addr);
dev->_data_buf_phys_addr = data_dma_addr;
dev->_data_buf_size = dev->upstream_databuf_size;
@@ -553,9 +535,8 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
return ret;
/* Create RISC programs */
- ret =
- cx25821_risc_buffer_upstream(dev, dev->pci, 0, bpl,
- dev->_lines_count);
+ ret = cx25821_risc_buffer_upstream(dev, dev->pci, 0, bpl,
+ dev->_lines_count);
if (ret < 0) {
pr_info("Failed creating Video Upstream Risc programs!\n");
goto error;
@@ -672,10 +653,9 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
vid_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
- if (vid_status) {
- handled =
- cx25821_video_upstream_irq(dev, channel_num, vid_status);
- }
+ if (vid_status)
+ handled = cx25821_video_upstream_irq(dev, channel_num,
+ vid_status);
if (handled < 0)
cx25821_stop_upstream_video_ch1(dev);
@@ -747,8 +727,7 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
- err =
- request_irq(dev->pci->irq, cx25821_upstream_irq,
+ err = request_irq(dev->pci->irq, cx25821_upstream_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get upstream IRQ %d\n",
@@ -807,43 +786,38 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_file_status = RESET_STATUS;
dev->_lines_count = dev->_isNTSC ? 480 : 576;
dev->_pixel_format = pixel_format;
- dev->_line_size =
- (dev->_pixel_format ==
- PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
+ dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ?
+ (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
data_frame_size = dev->_isNTSC ? NTSC_DATA_BUF_SZ : PAL_DATA_BUF_SZ;
- risc_buffer_size =
- dev->_isNTSC ? NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
+ risc_buffer_size = dev->_isNTSC ?
+ NTSC_RISC_BUF_SIZE : PAL_RISC_BUF_SIZE;
if (dev->input_filename) {
str_length = strlen(dev->input_filename);
- dev->_filename = kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename = kmemdup(dev->input_filename, str_length + 1,
+ GFP_KERNEL);
if (!dev->_filename)
goto error;
-
- memcpy(dev->_filename, dev->input_filename, str_length + 1);
} else {
str_length = strlen(dev->_defaultname);
- dev->_filename = kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename = kmemdup(dev->_defaultname, str_length + 1,
+ GFP_KERNEL);
if (!dev->_filename)
goto error;
-
- memcpy(dev->_filename, dev->_defaultname, str_length + 1);
}
/* Default if filename is empty string */
if (strcmp(dev->input_filename, "") == 0) {
if (dev->_isNTSC) {
dev->_filename =
- (dev->_pixel_format ==
- PIXEL_FRMT_411) ? "/root/vid411.yuv" :
- "/root/vidtest.yuv";
+ (dev->_pixel_format == PIXEL_FRMT_411) ?
+ "/root/vid411.yuv" : "/root/vidtest.yuv";
} else {
dev->_filename =
- (dev->_pixel_format ==
- PIXEL_FRMT_411) ? "/root/pal411.yuv" :
- "/root/pal422.yuv";
+ (dev->_pixel_format == PIXEL_FRMT_411) ?
+ "/root/pal411.yuv" : "/root/pal422.yuv";
}
}
@@ -852,13 +826,11 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->_file_status = RESET_STATUS;
dev->_lines_count = dev->_isNTSC ? 480 : 576;
dev->_pixel_format = pixel_format;
- dev->_line_size =
- (dev->_pixel_format ==
- PIXEL_FRMT_422) ? (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
+ dev->_line_size = (dev->_pixel_format == PIXEL_FRMT_422) ?
+ (WIDTH_D1 * 2) : (WIDTH_D1 * 3) / 2;
- retval =
- cx25821_sram_channel_setup_upstream(dev, sram_ch, dev->_line_size,
- 0);
+ retval = cx25821_sram_channel_setup_upstream(dev, sram_ch,
+ dev->_line_size, 0);
/* setup fifo + format */
cx25821_set_pixelengine(dev, sram_ch, dev->_pixel_format);
diff --git a/drivers/media/video/cx25821/cx25821-video.c b/drivers/media/video/cx25821/cx25821-video.c
index 4d6907cda75b..ffd8bc79c02e 100644
--- a/drivers/media/video/cx25821/cx25821-video.c
+++ b/drivers/media/video/cx25821/cx25821-video.c
@@ -118,12 +118,12 @@ void cx25821_dump_video_queue(struct cx25821_dev *dev,
if (!list_empty(&q->active)) {
list_for_each(item, &q->active)
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
+ buf = list_entry(item, struct cx25821_buffer, vb.queue);
}
if (!list_empty(&q->queued)) {
list_for_each(item, &q->queued)
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
+ buf = list_entry(item, struct cx25821_buffer, vb.queue);
}
}
@@ -140,8 +140,8 @@ void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
break;
}
- buf =
- list_entry(q->active.next, struct cx25821_buffer, vb.queue);
+ buf = list_entry(q->active.next, struct cx25821_buffer,
+ vb.queue);
/* count comes from the hw and it is 16bit wide --
* this trick handles wrap-arounds correctly for
@@ -318,8 +318,8 @@ int cx25821_restart_video_queue(struct cx25821_dev *dev,
struct list_head *item;
if (!list_empty(&q->active)) {
- buf =
- list_entry(q->active.next, struct cx25821_buffer, vb.queue);
+ buf = list_entry(q->active.next, struct cx25821_buffer,
+ vb.queue);
cx25821_start_video_dma(dev, q, buf, channel);
@@ -337,8 +337,8 @@ int cx25821_restart_video_queue(struct cx25821_dev *dev,
if (list_empty(&q->queued))
return 0;
- buf =
- list_entry(q->queued.next, struct cx25821_buffer, vb.queue);
+ buf = list_entry(q->queued.next, struct cx25821_buffer,
+ vb.queue);
if (NULL == prev) {
list_move_tail(&buf->vb.queue, &q->active);
@@ -375,8 +375,8 @@ void cx25821_vid_timeout(unsigned long data)
spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&q->active)) {
- buf =
- list_entry(q->active.next, struct cx25821_buffer, vb.queue);
+ buf = list_entry(q->active.next, struct cx25821_buffer,
+ vb.queue);
list_del(&buf->vb.queue);
buf->vb.state = VIDEOBUF_ERROR;
@@ -484,8 +484,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
cx25821_init_controls(dev, i);
cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper,
- dev->channels[i].sram_channels->dma_ctl,
- 0x11, 0);
+ dev->channels[i].sram_channels->dma_ctl, 0x11, 0);
dev->channels[i].sram_channels = &cx25821_sram_channels[i];
dev->channels[i].video_dev = NULL;
@@ -499,15 +498,14 @@ int cx25821_video_register(struct cx25821_dev *dev)
dev->channels[i].timeout_data.dev = dev;
dev->channels[i].timeout_data.channel =
&cx25821_sram_channels[i];
- dev->channels[i].vidq.timeout.function =
- cx25821_vid_timeout;
+ dev->channels[i].vidq.timeout.function = cx25821_vid_timeout;
dev->channels[i].vidq.timeout.data =
(unsigned long)&dev->channels[i].timeout_data;
init_timer(&dev->channels[i].vidq.timeout);
/* register v4l devices */
- dev->channels[i].video_dev = cx25821_vdev_init(dev,
- dev->pci, &cx25821_video_device, "video");
+ dev->channels[i].video_dev = cx25821_vdev_init(dev, dev->pci,
+ &cx25821_video_device, "video");
err = video_register_device(dev->channels[i].video_dev,
VFL_TYPE_GRABBER, video_nr[dev->nr]);
@@ -528,7 +526,6 @@ int cx25821_video_register(struct cx25821_dev *dev)
#endif
mutex_unlock(&dev->lock);
-
return 0;
fail_unreg:
@@ -558,7 +555,7 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct cx25821_fh *fh = q->priv_data;
struct cx25821_dev *dev = fh->dev;
struct cx25821_buffer *buf =
- container_of(vb, struct cx25821_buffer, vb);
+ container_of(vb, struct cx25821_buffer, vb);
int rc, init_buffer = 0;
u32 line0_offset, line1_offset;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
@@ -617,14 +614,13 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
if (channel_opened >= 0 && channel_opened <= 7) {
if (dev->channels[channel_opened]
.use_cif_resolution) {
- if (dev->tvnorm & V4L2_STD_PAL_BG
- || dev->tvnorm & V4L2_STD_PAL_DK)
+ if (dev->tvnorm & V4L2_STD_PAL_BG ||
+ dev->tvnorm & V4L2_STD_PAL_DK)
bpl_local = 352 << 1;
else
- bpl_local =
- dev->channels[channel_opened].
- cif_width <<
- 1;
+ bpl_local = dev->channels[
+ channel_opened].
+ cif_width << 1;
}
}
}
@@ -685,7 +681,7 @@ void cx25821_buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
struct cx25821_buffer *buf =
- container_of(vb, struct cx25821_buffer, vb);
+ container_of(vb, struct cx25821_buffer, vb);
cx25821_free_buffer(q, buf);
}
@@ -723,7 +719,7 @@ int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma)
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct cx25821_buffer *buf =
- container_of(vb, struct cx25821_buffer, vb);
+ container_of(vb, struct cx25821_buffer, vb);
struct cx25821_buffer *prev;
struct cx25821_fh *fh = vq->priv_data;
struct cx25821_dev *dev = fh->dev;
@@ -814,7 +810,7 @@ static int video_open(struct file *file)
for (i = 0; i < MAX_VID_CHANNEL_NUM; i++) {
if (h->channels[i].video_dev &&
- h->channels[i].video_dev->minor == minor) {
+ h->channels[i].video_dev->minor == minor) {
dev = h;
ch_id = i;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -848,11 +844,10 @@ static int video_open(struct file *file)
v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio);
- videobuf_queue_sg_init(&fh->vidq, &cx25821_video_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx25821_buffer), fh, NULL);
+ videobuf_queue_sg_init(&fh->vidq, &cx25821_video_qops, &dev->pci->dev,
+ &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED, sizeof(struct cx25821_buffer),
+ fh, NULL);
dprintk(1, "post videobuf_queue_init()\n");
mutex_unlock(&cx25821_devlist_mutex);
@@ -1168,8 +1163,8 @@ int cx25821_vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx25821_boards[dev->board].name, sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
cap->version = CX25821_VERSION_CODE;
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
if (UNSET != dev->tuner_type)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
@@ -1454,38 +1449,38 @@ static const struct v4l2_queryctrl no_ctl = {
static struct v4l2_queryctrl cx25821_ctls[] = {
/* --- video --- */
{
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 10000,
- .step = 1,
- .default_value = 6200,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 10000,
- .step = 1,
- .default_value = 5000,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 10000,
- .step = 1,
- .default_value = 5000,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_HUE,
- .name = "Hue",
- .minimum = 0,
- .maximum = 10000,
- .step = 1,
- .default_value = 5000,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }
+ .id = V4L2_CID_BRIGHTNESS,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 10000,
+ .step = 1,
+ .default_value = 6200,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 10000,
+ .step = 1,
+ .default_value = 5000,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 10000,
+ .step = 1,
+ .default_value = 5000,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ }, {
+ .id = V4L2_CID_HUE,
+ .name = "Hue",
+ .minimum = 0,
+ .maximum = 10000,
+ .step = 1,
+ .default_value = 5000,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ }
};
static const int CX25821_CTLS = ARRAY_SIZE(cx25821_ctls);
@@ -1623,7 +1618,8 @@ int cx25821_vidioc_cropcap(struct file *file, void *priv,
if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cropcap->bounds.top = cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
cropcap->bounds.width = 720;
cropcap->bounds.height = dev->tvnorm == V4L2_STD_PAL_BG ? 576 : 480;
cropcap->pixelaspect.numerator =
@@ -1829,8 +1825,11 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
struct downstream_user_struct *data_from_user;
int command;
int width = 720;
- int selected_channel = 0, pix_format = 0, i = 0;
- int cif_enable = 0, cif_width = 0;
+ int selected_channel = 0;
+ int pix_format = 0;
+ int i = 0;
+ int cif_enable = 0;
+ int cif_width = 0;
u32 value = 0;
data_from_user = (struct downstream_user_struct *)arg;
@@ -1895,8 +1894,8 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
}
if (selected_channel <= 7 && selected_channel >= 0) {
- dev->channels[selected_channel].
- use_cif_resolution = cif_enable;
+ dev->channels[selected_channel].use_cif_resolution =
+ cif_enable;
dev->channels[selected_channel].cif_width = width;
} else {
for (i = 0; i < VID_CHANNEL_NUM; i++) {
@@ -1932,9 +1931,9 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
static long cx25821_video_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- int ret = 0;
+ int ret = 0;
- struct cx25821_fh *fh = file->private_data;
+ struct cx25821_fh *fh = file->private_data;
/* check to see if it's the video upstream */
if (fh->channel_id == SRAM_CH09) {
diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
index 2d2d00932823..b9aa801b00a7 100644
--- a/drivers/media/video/cx25821/cx25821.h
+++ b/drivers/media/video/cx25821/cx25821.h
@@ -67,7 +67,7 @@
#define MAX_CAMERAS 16
/* Max number of inputs by card */
-#define MAX_CX25821_INPUT 8
+#define MAX_CX25821_INPUT 8
#define INPUT(nr) (&cx25821_boards[dev->board].input[nr])
#define RESOURCE_VIDEO0 1
#define RESOURCE_VIDEO1 2
@@ -85,7 +85,7 @@
#define BUFFER_TIMEOUT (HZ) /* 0.5 seconds */
-#define UNKNOWN_BOARD 0
+#define UNKNOWN_BOARD 0
#define CX25821_BOARD 1
/* Currently supported by the driver */
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index 005f11093642..34b96c7cfd62 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -480,7 +480,6 @@ void cx25840_audio_set_path(struct i2c_client *client)
static void set_volume(struct i2c_client *client, int volume)
{
- struct cx25840_state *state = to_state(i2c_get_clientdata(client));
int vol;
/* Convert the volume to msp3400 values (0-127) */
@@ -496,14 +495,7 @@ static void set_volume(struct i2c_client *client, int volume)
}
/* PATH1_VOLUME */
- if (is_cx2388x(state)) {
- /* for cx23885 volume doesn't work,
- * the calculation always results in
- * e4 regardless.
- */
- cx25840_write(client, 0x8d4, volume);
- } else
- cx25840_write(client, 0x8d4, 228 - (vol * 2));
+ cx25840_write(client, 0x8d4, 228 - (vol * 2));
}
static void set_balance(struct i2c_client *client, int balance)
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index cd9976408ab3..05247d4c340a 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -18,6 +18,9 @@
* CX2388[578] IRQ handling, IO Pin mux configuration and other small fixes are
* Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
*
+ * CX23888 DIF support for the HVR1850
+ * Copyright (C) 2011 Steven Toth <stoth@kernellabs.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -40,6 +43,7 @@
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/math64.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
#include <media/cx25840.h>
@@ -80,6 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
/* ----------------------------------------------------------------------- */
+static void cx23885_std_setup(struct i2c_client *client);
int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
{
@@ -498,8 +503,13 @@ static void cx23885_initialize(struct i2c_client *client)
* 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz
* 572.73 MHz before post divide
*/
- cx25840_write4(client, 0x11c, 0x00e8ba26);
- cx25840_write4(client, 0x118, 0x0000040b);
+ /* HVR1850 or 50MHz xtal */
+ cx25840_write(client, 0x2, 0x71);
+ cx25840_write4(client, 0x11c, 0x01d1744c);
+ cx25840_write4(client, 0x118, 0x00000416);
+ cx25840_write4(client, 0x404, 0x0010253e);
+ cx25840_write4(client, 0x42c, 0x42600000);
+ cx25840_write4(client, 0x44c, 0x161f1000);
break;
case V4L2_IDENT_CX23887_AV:
/*
@@ -533,8 +543,18 @@ static void cx23885_initialize(struct i2c_client *client)
* 28.636363 MHz * (0xf + 0x02be2c9/0x2000000)/4 = 8 * 13.5 MHz
* 432.0 MHz before post divide
*/
- cx25840_write4(client, 0x10c, 0x002be2c9);
- cx25840_write4(client, 0x108, 0x0000040f);
+
+ /* HVR1850 */
+ switch (state->id) {
+ case V4L2_IDENT_CX23888_AV:
+ /* 888/HVR1250 specific */
+ cx25840_write4(client, 0x10c, 0x13333333);
+ cx25840_write4(client, 0x108, 0x00000515);
+ break;
+ default:
+ cx25840_write4(client, 0x10c, 0x002be2c9);
+ cx25840_write4(client, 0x108, 0x0000040f);
+ }
/* Luma */
cx25840_write4(client, 0x414, 0x00107d12);
@@ -556,8 +576,9 @@ static void cx23885_initialize(struct i2c_client *client)
* 368.64 MHz before post divide
* 122.88 MHz / 0xa = 12.288 MHz
*/
- cx25840_write4(client, 0x114, 0x00bedfa4);
- cx25840_write4(client, 0x110, 0x000a0307);
+ /* HVR1850 or 50MHz xtal */
+ cx25840_write4(client, 0x114, 0x017dbf48);
+ cx25840_write4(client, 0x110, 0x000a030e);
break;
case V4L2_IDENT_CX23887_AV:
/*
@@ -617,7 +638,10 @@ static void cx23885_initialize(struct i2c_client *client)
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
- cx25840_std_setup(client);
+ /* Call the cx23885 specific std setup func, we no longer rely on
+ * the generic cx24840 func.
+ */
+ cx23885_std_setup(client);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
@@ -631,6 +655,37 @@ static void cx23885_initialize(struct i2c_client *client)
/* Disable and clear audio interrupts - we don't use them */
cx25840_write(client, CX25840_AUD_INT_CTRL_REG, 0xff);
cx25840_write(client, CX25840_AUD_INT_STAT_REG, 0xff);
+
+ /* CC raw enable */
+ /* - VIP 1.1 control codes - 10bit, blue field enable.
+ * - enable raw data during vertical blanking.
+ * - enable ancillary Data insertion for 656 or VIP.
+ */
+ cx25840_write4(client, 0x404, 0x0010253e);
+
+ /* CC on - Undocumented Register */
+ cx25840_write(client, 0x42f, 0x66);
+
+ /* HVR-1250 / HVR1850 DIF related */
+ /* Power everything up */
+ cx25840_write4(client, 0x130, 0x0);
+
+ /* Undocumented */
+ cx25840_write4(client, 0x478, 0x6628021F);
+
+ /* AFE_CLK_OUT_CTRL - Select the clock output source as output */
+ cx25840_write4(client, 0x144, 0x5);
+
+ /* I2C_OUT_CTL - I2S output configuration as
+ * Master, Sony, Left justified, left sample on WS=1
+ */
+ cx25840_write4(client, 0x918, 0x1a0);
+
+ /* AFE_DIAG_CTRL1 */
+ cx25840_write4(client, 0x134, 0x000a1800);
+
+ /* AFE_DIAG_CTRL3 - Inverted Polarity for Audio and Video */
+ cx25840_write4(client, 0x13c, 0x00310000);
}
/* ----------------------------------------------------------------------- */
@@ -945,9 +1000,14 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
vid_input <= CX25840_COMPOSITE8);
u8 is_component = (vid_input & CX25840_COMPONENT_ON) ==
CX25840_COMPONENT_ON;
+ u8 is_dif = (vid_input & CX25840_DIF_ON) ==
+ CX25840_DIF_ON;
+ u8 is_svideo = (vid_input & CX25840_SVIDEO_ON) ==
+ CX25840_SVIDEO_ON;
int luma = vid_input & 0xf0;
int chroma = vid_input & 0xf00;
u8 reg;
+ u32 val;
v4l_dbg(1, cx25840_debug, client,
"decoder set video input %d, audio input %d\n",
@@ -1012,6 +1072,66 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
else
cx25840_and_or(client, 0x401, ~0x6, is_composite ? 0 : 0x02);
+ if (is_cx2388x(state)) {
+
+ /* Enable or disable the DIF for tuner use */
+ if (is_dif) {
+ cx25840_and_or(client, 0x102, ~0x80, 0x80);
+
+ /* Set of defaults for NTSC and PAL */
+ cx25840_write4(client, 0x31c, 0xc2262600);
+ cx25840_write4(client, 0x320, 0xc2262600);
+
+ /* 18271 IF - Nobody else yet uses a different
+ * tuner with the DIF, so these are reasonable
+ * assumptions (HVR1250 and HVR1850 specific).
+ */
+ cx25840_write4(client, 0x318, 0xda262600);
+ cx25840_write4(client, 0x33c, 0x2a24c800);
+ cx25840_write4(client, 0x104, 0x0704dd00);
+ } else {
+ cx25840_write4(client, 0x300, 0x015c28f5);
+
+ cx25840_and_or(client, 0x102, ~0x80, 0);
+ cx25840_write4(client, 0x340, 0xdf7df83);
+ cx25840_write4(client, 0x104, 0x0704dd80);
+ cx25840_write4(client, 0x314, 0x22400600);
+ cx25840_write4(client, 0x318, 0x40002600);
+ cx25840_write4(client, 0x324, 0x40002600);
+ cx25840_write4(client, 0x32c, 0x0250e620);
+ cx25840_write4(client, 0x39c, 0x01FF0B00);
+
+ cx25840_write4(client, 0x410, 0xffff0dbf);
+ cx25840_write4(client, 0x414, 0x00137d03);
+ cx25840_write4(client, 0x418, 0x01008080);
+ cx25840_write4(client, 0x41c, 0x00000000);
+ cx25840_write4(client, 0x420, 0x001c3e0f);
+ cx25840_write4(client, 0x42c, 0x42600000);
+ cx25840_write4(client, 0x430, 0x0000039b);
+ cx25840_write4(client, 0x438, 0x00000000);
+
+ cx25840_write4(client, 0x440, 0xF8E3E824);
+ cx25840_write4(client, 0x444, 0x401040dc);
+ cx25840_write4(client, 0x448, 0xcd3f02a0);
+ cx25840_write4(client, 0x44c, 0x161f1000);
+ cx25840_write4(client, 0x450, 0x00000802);
+
+ cx25840_write4(client, 0x91c, 0x01000000);
+ cx25840_write4(client, 0x8e0, 0x03063870);
+ cx25840_write4(client, 0x8d4, 0x7FFF0024);
+ cx25840_write4(client, 0x8d0, 0x00063073);
+
+ cx25840_write4(client, 0x8c8, 0x00010000);
+ cx25840_write4(client, 0x8cc, 0x00080023);
+
+ /* DIF BYPASS */
+ cx25840_write4(client, 0x33c, 0x2a04c800);
+ }
+
+ /* Reset the DIF */
+ cx25840_write4(client, 0x398, 0);
+ }
+
if (!is_cx2388x(state) && !is_cx231xx(state)) {
/* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
cx25840_and_or(client, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0);
@@ -1036,6 +1156,33 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_and_or(client, 0x102, ~0x2, 0);
}
}
+
+ /* cx23885 / SVIDEO */
+ if (is_cx2388x(state) && is_svideo) {
+#define AFE_CTRL (0x104)
+#define MODE_CTRL (0x400)
+ cx25840_and_or(client, 0x102, ~0x2, 0x2);
+
+ val = cx25840_read4(client, MODE_CTRL);
+ val &= 0xFFFFF9FF;
+
+ /* YC */
+ val |= 0x00000200;
+ val &= ~0x2000;
+ cx25840_write4(client, MODE_CTRL, val);
+
+ val = cx25840_read4(client, AFE_CTRL);
+
+ /* Chroma in select */
+ val |= 0x00001000;
+ val &= 0xfffffe7f;
+ /* Clear VGA_SEL_CH2 and VGA_SEL_CH3 (bits 7 and 8).
+ * This sets them to use video rather than audio.
+ * Only one of the two will be in use.
+ */
+ cx25840_write4(client, AFE_CTRL, val);
+ } else
+ cx25840_and_or(client, 0x102, ~0x2, 0);
}
state->vid_input = vid_input;
@@ -1086,6 +1233,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x8d0, 0x1f063870);
}
+ if (is_cx2388x(state)) {
+ /* HVR1850 */
+ /* AUD_IO_CTRL - I2S Input, Parallel1*/
+ /* - Channel 1 src - Parallel1 (Merlin out) */
+ /* - Channel 2 src - Parallel2 (Merlin out) */
+ /* - Channel 3 src - Parallel3 (Merlin AC97 out) */
+ /* - I2S source and dir - Merlin, output */
+ cx25840_write4(client, 0x124, 0x100);
+
+ if (!is_dif) {
+ /* Stop microcontroller if we don't need it
+ * to avoid audio popping on svideo/composite use.
+ */
+ cx25840_and_or(client, 0x803, ~0x10, 0x00);
+ }
+ }
+
return 0;
}
@@ -1134,7 +1298,10 @@ static int set_v4lstd(struct i2c_client *client)
}
cx25840_and_or(client, 0x400, ~0xf, fmt);
cx25840_and_or(client, 0x403, ~0x3, pal_m);
- cx25840_std_setup(client);
+ if (is_cx2388x(state))
+ cx23885_std_setup(client);
+ else
+ cx25840_std_setup(client);
if (!is_cx2583x(state))
input_change(client);
return 0;
@@ -1539,6 +1706,56 @@ static int cx25840_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+/* Query the current detected video format */
+static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_std_id stds[] = {
+ /* 0000 */ V4L2_STD_UNKNOWN,
+
+ /* 0001 */ V4L2_STD_NTSC_M,
+ /* 0010 */ V4L2_STD_NTSC_M_JP,
+ /* 0011 */ V4L2_STD_NTSC_443,
+ /* 0100 */ V4L2_STD_PAL,
+ /* 0101 */ V4L2_STD_PAL_M,
+ /* 0110 */ V4L2_STD_PAL_N,
+ /* 0111 */ V4L2_STD_PAL_Nc,
+ /* 1000 */ V4L2_STD_PAL_60,
+
+ /* 1001 */ V4L2_STD_UNKNOWN,
+ /* 1010 */ V4L2_STD_UNKNOWN,
+ /* 1001 */ V4L2_STD_UNKNOWN,
+ /* 1010 */ V4L2_STD_UNKNOWN,
+ /* 1011 */ V4L2_STD_UNKNOWN,
+ /* 1110 */ V4L2_STD_UNKNOWN,
+ /* 1111 */ V4L2_STD_UNKNOWN
+ };
+
+ u32 fmt = (cx25840_read4(client, 0x40c) >> 8) & 0xf;
+ *std = stds[ fmt ];
+
+ v4l_dbg(1, cx25840_debug, client, "g_std fmt = %x, v4l2_std_id = 0x%x\n",
+ fmt, (unsigned int)stds[ fmt ]);
+
+ return 0;
+}
+
+static int cx25840_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* A limited function that checks for signal status and returns
+ * the state.
+ */
+
+ /* Check for status of Horizontal lock (SRC lock isn't reliable) */
+ if ((cx25840_read4(client, 0x40c) & 0x00010000) == 0)
+ *status |= V4L2_IN_ST_NO_SIGNAL;
+
+ return 0;
+}
+
static int cx25840_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct cx25840_state *state = to_state(sd);
@@ -1565,6 +1782,9 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ if (is_cx2388x(state))
+ cx23885_std_setup(client);
+
return set_input(client, input, state->aud_input);
}
@@ -1574,6 +1794,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ if (is_cx2388x(state))
+ cx23885_std_setup(client);
return set_input(client, state->vid_input, input);
}
@@ -1786,6 +2008,3007 @@ static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
/* ----------------------------------------------------------------------- */
+#define DIF_PLL_FREQ_WORD (0x300)
+#define DIF_BPF_COEFF01 (0x348)
+#define DIF_BPF_COEFF23 (0x34c)
+#define DIF_BPF_COEFF45 (0x350)
+#define DIF_BPF_COEFF67 (0x354)
+#define DIF_BPF_COEFF89 (0x358)
+#define DIF_BPF_COEFF1011 (0x35c)
+#define DIF_BPF_COEFF1213 (0x360)
+#define DIF_BPF_COEFF1415 (0x364)
+#define DIF_BPF_COEFF1617 (0x368)
+#define DIF_BPF_COEFF1819 (0x36c)
+#define DIF_BPF_COEFF2021 (0x370)
+#define DIF_BPF_COEFF2223 (0x374)
+#define DIF_BPF_COEFF2425 (0x378)
+#define DIF_BPF_COEFF2627 (0x37c)
+#define DIF_BPF_COEFF2829 (0x380)
+#define DIF_BPF_COEFF3031 (0x384)
+#define DIF_BPF_COEFF3233 (0x388)
+#define DIF_BPF_COEFF3435 (0x38c)
+#define DIF_BPF_COEFF36 (0x390)
+
+void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
+{
+ u64 pll_freq;
+ u32 pll_freq_word;
+
+ v4l_dbg(1, cx25840_debug, client, "%s(%d)\n", __func__, ifHz);
+
+ /* Assuming TV */
+ /* Calculate the PLL frequency word based on the adjusted ifHz */
+ pll_freq = div_u64((u64)ifHz * 268435456, 50000000);
+ pll_freq_word = (u32)pll_freq;
+
+ cx25840_write4(client, DIF_PLL_FREQ_WORD, pll_freq_word);
+
+ /* Round down to the nearest 100KHz */
+ ifHz = (ifHz / 100000) * 100000;
+
+ if (ifHz < 3000000)
+ ifHz = 3000000;
+
+ if (ifHz > 16000000)
+ ifHz = 16000000;
+
+ v4l_dbg(1, cx25840_debug, client, "%s(%d) again\n", __func__, ifHz);
+
+ switch (ifHz) {
+ case 3000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0024);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x001bfff8);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff50);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed8fe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe34);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfebaffc7);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d031f);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04f0065d);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x07010688);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x04c901d6);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f9d3);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f342);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f337);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf64efb22);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105070f);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0c460fce);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00220032);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00370026);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff91);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0efe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fdcc);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe0afedb);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440224);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0434060c);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738074e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x06090361);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99fb39);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef3b6);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21af2a5);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf573fa33);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034067d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bfb0fb9);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000e);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00200038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x004c004f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x002fffdf);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff5cfeb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dfd92);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7ffe03);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36010a);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x03410575);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x072607d2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x071804d5);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134fcb7);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff451);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223f22e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4a7f94b);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xff6405e8);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bae0fa4);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00000008);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0036);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0056006d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00670030);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffbdff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46fd8d);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd25fd4f);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35ffe0);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0224049f);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9080e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x07ef0627);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9fe45);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f513);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250f1d2);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3ecf869);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930552);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b5f0f8f);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0001);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000f002c);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0054007d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0093007c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0024ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6fdbb);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd03fcca);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51feb9);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x00eb0392);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270802);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08880750);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x044dffdb);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf5f8);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0f193);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf342f78f);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc404b9);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b0e0f78);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff9);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0002001b);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0046007d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad00ba);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00870000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe1a);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1bfc7e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fda4);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xffa5025c);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x054507ad);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08dd0847);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80172);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef6ff);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313f170);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2abf6bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6041f);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0abc0f61);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff3);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff50006);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x002f006c);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b200e3);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00dc007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9fea0);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd6bfc71);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fcb1);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe65010b);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x042d0713);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ec0906);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020302);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff823);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7f16a);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf228f5f5);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2a0384);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a670f4a);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7ffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9fff1);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0010004d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00a100f2);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x011a00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053ff44);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdedfca2);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fbef);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd39ffae);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x02ea0638);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08b50987);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230483);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f960);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45bf180);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1b8f537);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb6102e7);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a110f32);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffdd);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00024);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x007c00e5);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013a014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e6fff8);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe98fd0f);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fb67);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc32fe54);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x01880525);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x083909c7);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x091505ee);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7fab3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52df1b4);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf15df484);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9b0249);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x09ba0f19);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 3900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff0);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffcf);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1fff6);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x004800be);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x01390184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x016300ac);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff5efdb1);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb23);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb5cfd0d);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x001703e4);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x077b09c4);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d2073c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251fc18);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61cf203);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf118f3dc);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d801aa);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x09600eff);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffefff4);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffc8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffca);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x000b0082);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x01170198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10152);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0030fe7b);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb24);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfac3fbe9);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5027f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0683097f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a560867);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2fd89);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723f26f);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0e8f341);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919010a);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x09060ee5);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fffb);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8ffca);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffa4);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffcd0036);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d70184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f601dc);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ffff60);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb6d);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6efaf5);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd410103);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x055708f9);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e0969);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543ff02);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842f2f5);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cef2b2);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85e006b);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x08aa0ecb);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00050003);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff3ffd3);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaff8b);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ffe5);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0080014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe023f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ba0050);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fbf8);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa62fa3b);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9ff7e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x04010836);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa90a3d);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f007f);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf975f395);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cbf231);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ffcb);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x084c0eaf);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000a);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0000ffe4);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff81);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff96);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x001c00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70271);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0254013b);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fcbd);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa9ff9c5);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbfdfe);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x028c073b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a750adf);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e101fa);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab8f44e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0ddf1be);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ff2b);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x07ed0e94);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000f);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000efff8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff87);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff54);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffb5007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01860270);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c00210);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fdb2);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb22f997);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2fc90);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0102060f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a050b4c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902036e);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0af51e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf106f15a);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64efe8b);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x078d0e77);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0019000e);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff9e);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff25);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff560000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112023b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f702c0);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfec8);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbe5f9b3);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947fb41);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xff7004b9);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a0b81);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a0004d8);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd65f603);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf144f104);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aafdec);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x072b0e5a);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00060012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00200022);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ffc1);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff09ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x008601d7);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f50340);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fff0);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcddfa19);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa1e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfde30343);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x08790b7f);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50631);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec7f6fc);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf198f0bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50dfd4e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x06c90e3d);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00220030);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffed);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff87ff15);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed6ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffed014c);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b90386);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110119);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfdfefac4);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6f92f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc6701b7);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x07670b44);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0776);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x002df807);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf200f086);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477fcb1);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x06650e1e);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0009);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x003f001b);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffbcff36);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec2feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff5600a5);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0248038d);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00232);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xff39fbab);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4f87f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb060020);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x062a0ad2);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf908a3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0192f922);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf27df05e);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8fc14);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x06000e00);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 4900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0002);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00160037);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00510046);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xfff9ff6d);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed0fe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefff0);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01aa0356);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413032b);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x007ffcc5);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cf812);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9cefe87);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c90a2c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4309b4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f3fa4a);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf30ef046);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361fb7a);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x059b0de0);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000a002d);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00570067);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0037ffb5);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfefffe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62ff3d);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ec02e3);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x043503f6);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x01befe05);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa27f7ee);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8c6fcf8);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x034c0954);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0aa4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x044cfb7e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3b1f03f);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2fae1);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x05340dc0);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff4);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfffd001e);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0051007b);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x006e0006);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff48fe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfe9a);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x001d023e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130488);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x02e6ff5b);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1ef812);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7f7fb7f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bc084e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430b72);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x059afcba);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf467f046);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cfa4a);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x04cd0da0);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff00009);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x003f007f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00980056);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffa5feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe15);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff4b0170);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b004d7);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x03e800b9);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc48f87f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf768fa23);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022071f);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90c1b);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x06dafdfd);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf52df05e);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef9b5);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x04640d7f);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6fff3);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00250072);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00af009c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x000cff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13fdb8);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe870089);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104e1);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04b8020f);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd98f92f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf71df8f0);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe8805ce);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0c9c);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0808ff44);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf603f086);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af922);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x03fb0d5e);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe0);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00050056);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b000d1);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0071ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53fd8c);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfddfff99);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104a3);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x054a034d);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xff01fa1e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf717f7ed);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf50461);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50cf4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0921008d);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf6e7f0bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff891);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x03920d3b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffffff3);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffd1);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5002f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x009c00ed);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00cb0000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfebafd94);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd61feb0);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d0422);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05970464);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0074fb41);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf759f721);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb7502de);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000d21);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a2201d4);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7d9f104);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf804);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x03280d19);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fffa);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3ffc9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc90002);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x007500ef);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x010e007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3dfdcf);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd16fddd);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440365);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x059b0548);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3fc90);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7dff691);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0f014d);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020d23);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0318);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8d7f15a);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f779);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x02bd0cf6);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00060001);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffecffc9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffd4);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x004000d5);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013600f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd3fe39);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd04fd31);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff360277);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x055605ef);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x033efdfe);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8a5f642);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbffb6);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10cfb);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50456);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9dff1be);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f6f2);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x02520cd2);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080009);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff8ffd2);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffac);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x000200a3);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013c014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x006dfec9);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2bfcb7);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe350165);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04cb0651);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477ff7e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9a5f635);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1fe20);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0ca8);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c81058b);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfaf0f231);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f66d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x01e60cae);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 5900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000e);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0005ffe1);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffacff90);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5005f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x01210184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fcff72);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd8afc77);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51003f);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04020669);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830103);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfad7f66b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8fc93);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430c2b);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d06b5);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfc08f2b2);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af5ec);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x017b0c89);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0012fff5);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaff82);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff8e000f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e80198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750028);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe18fc75);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99ff15);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x03050636);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0656027f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc32f6e2);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614fb17);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20b87);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d7707d2);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfd26f341);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf56f);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x010f0c64);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00050012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001c000b);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff84);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ffbe);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00960184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd00da);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeccfcb2);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fdf9);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x01e005bc);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e703e4);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfdabf798);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f9b3);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510abd);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf08df);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfe48f3dc);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f4f6);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x00a20c3e);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0002000f);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0021001f);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff97);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff74);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0034014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa0179);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff97fd2a);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fcfa);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x00a304fe);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310525);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xff37f886);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf86e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c709d0);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de209db);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xff6df484);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf481);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0x00360c18);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe000a);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0021002f);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ffb8);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff3b);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffcc00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa01fa);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0069fdd4);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fc26);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xff5d0407);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310638);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x00c9f9a8);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf74e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xff3908c3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de20ac3);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0093f537);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf410);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xffca0bf2);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb0003);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001c0037);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x002fffe2);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ff17);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff6a007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd0251);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0134fea5);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb8b);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe2002e0);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e70713);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0255faf5);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f658);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0799);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf0b96);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x01b8f5f5);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f3a3);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xff5e0bca);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00120037);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00460010);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff8eff0f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff180000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750276);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01e8ff8d);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb31);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcfb0198);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x065607ad);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x03cefc64);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614f592);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0656);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d770c52);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x02daf6bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf33b);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfef10ba3);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff5);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0005002f);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0054003c);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5ff22);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedfff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fc0267);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0276007e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb1c);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbfe003e);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830802);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0529fdec);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8f4fe);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd04ff);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d0cf6);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x03f8f78f);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af2d7);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe850b7b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff80020);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00560060);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0002ff4e);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec4ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x006d0225);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02d50166);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb4e);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb35fee1);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477080e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x065bff82);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1f4a0);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610397);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c810d80);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0510f869);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f278);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe1a0b52);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffaffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffec000c);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0078);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0040ff8e);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecafeb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd301b6);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fc0235);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fbc5);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaaafd90);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x033e07d2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x075b011b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbf47a);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f0224);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50def);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0621f94b);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f21e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfdae0b29);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 6900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3fff6);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0037007f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0075ffdc);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef2fe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3d0122);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02ea02dd);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fc79);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa65fc5d);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3074e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x082102ad);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0ff48c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe00a9);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0e43);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0729fa33);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f1c9);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfd430b00);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0001fff3);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe2);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x001b0076);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x009c002d);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff35fe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfeba0076);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x029f0352);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfd60);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa69fb53);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x00740688);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08a7042d);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb75f4d6);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600ff2d);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a220e7a);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0827fb22);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf17a);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfcd80ad6);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff9);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffd2);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb005e);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b0007a);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8ffe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53ffc1);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0221038c);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fe6e);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfab6fa80);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xff010587);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e90590);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf5f556);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfdb3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x09210e95);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0919fc15);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff12f);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc6e0aab);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00070000);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6ffc9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0039);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00af00b8);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfff4feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01790388);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311ff92);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb48f9ed);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd980453);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e306cd);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe88f60a);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fc40);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x08080e93);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x09fdfd0c);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af0ea);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc050a81);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080008);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff0ffc9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1000d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x009800e2);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x005bff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe74);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00b50345);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000bc);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc18f9a1);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc4802f9);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x089807dc);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022f6f0);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fada);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x06da0e74);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ad3fe06);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef0ab);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb9c0a55);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000e);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfffdffd0);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffdf);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x006e00f2);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00b8ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfdf8);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xffe302c8);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x041301dc);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd1af99e);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1e0183);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x080908b5);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bcf801);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf985);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x059a0e38);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b99ff03);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cf071);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb330a2a);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00070011);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000affdf);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffa9ffb5);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x003700e6);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x01010000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62fda8);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff140219);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x043502e1);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe42f9e6);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa270000);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x073a0953);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x034cf939);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a4f845);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x044c0de1);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c4f0000);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2f03c);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfacc09fe);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00040012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0016fff3);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffafff95);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xfff900c0);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0130007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefd89);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe560146);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x041303bc);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xff81fa76);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cfe7d);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x063209b1);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c9fa93);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf71e);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f30d6e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cf200fd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361f00e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa6509d1);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00010010);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0008);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ff84);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffbc0084);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013e00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff56fd9f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdb8005c);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00460);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x00c7fb45);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4fd07);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x04fa09ce);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x062afc07);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407f614);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x01920ce0);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d8301fa);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8efe5);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa0009a4);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd000b);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001d);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff82);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff870039);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x012a014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffedfde7);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd47ff6b);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104c6);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0202fc4c);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6fbad);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x039909a7);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0767fd8e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482f52b);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x002d0c39);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e0002f4);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477efc2);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf99b0977);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 7900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa0004);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0020002d);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff91);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ffe8);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f70184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0086fe5c);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0bfe85);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104e5);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0323fd7d);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa79);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x021d093f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0879ff22);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bf465);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec70b79);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e6803eb);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50defa5);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf937094a);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffd);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00190036);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x001bffaf);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff99);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00aa0198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112fef3);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd09fdb9);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04be);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x041bfecc);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947f978);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x00900897);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a00b9);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f3c5);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd650aa3);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ebc04de);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aaef8e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf8d5091c);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff6);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000e0038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ffd7);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff56);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x004b0184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0186ffa1);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd40fd16);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440452);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04de0029);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2f8b2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfefe07b5);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a05024d);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef34d);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0a09b8);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0efa05cd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64eef7d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf87308ed);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00000031);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0005);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff27);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffe4014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70057);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdacfca6);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3603a7);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05610184);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbf82e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd74069f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a7503d6);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff2ff);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab808b9);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f2306b5);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ef72);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf81308bf);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff30022);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00560032);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8000f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe0106);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe46fc71);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3502c7);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x059e02ce);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9f7f2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfbff055b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa9054c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f2db);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf97507aa);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f350797);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ef6d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7b40890);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8000f);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00540058);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffcdff14);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff29007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f6019e);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff01fc7c);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5101bf);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x059203f6);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd41f7fe);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfaa903f3);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e06a9);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf2e2);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842068b);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f320871);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85eef6e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7560860);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fff2);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1fff9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00460073);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x000bff34);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee90000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10215);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xffd0fcc5);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99009d);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x053d04f1);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5f853);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf97d0270);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a5607e4);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef314);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723055f);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f180943);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919ef75);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6fa0830);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff8);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe4);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x002f007f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0048ff6b);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec7ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0163025f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00a2fd47);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff73);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04a405b2);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0017f8ed);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf88500dc);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d208f9);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff370);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61c0429);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ee80a0b);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d8ef82);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6a00800);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0007ffff);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffd4);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0010007a);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x007cffb2);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec6ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e60277);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0168fdf9);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fe50);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x03ce0631);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0188f9c8);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7c7ff43);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x091509e3);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f3f6);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52d02ea);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ea30ac9);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9bef95);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf64607d0);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00090007);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9ffca);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00065);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00a10003);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee6feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053025b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0213fed0);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fd46);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x02c70668);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x02eafadb);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf74bfdae);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230a9c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f4a3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45b01a6);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e480b7c);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb61efae);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf5ef079f);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 8900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000d);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff5ffc8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10043);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b20053);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff24fe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9020c);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0295ffbb);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fc64);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x019b0654);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x042dfc1c);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf714fc2a);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020b21);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f575);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7005e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0dd80c24);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2aefcd);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf599076e);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00060011);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0002ffcf);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffba0018);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad009a);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff79fe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff260192);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e500ab);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fbb6);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x005b05f7);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545fd81);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf723fabf);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80b70);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f669);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313ff15);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d550cbf);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6eff2);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544073d);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00030012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000fffdd);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffea);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x009300cf);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdcfe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea600f7);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fd0190);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb46);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xff150554);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0627fefd);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf778f978);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x044d0b87);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f77d);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0fdcf);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbe0d4e);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc4f01d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2070b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00000010);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001afff0);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffbf);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x006700ed);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe460047);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02db0258);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb1b);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddc0473);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c90082);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf811f85e);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c90b66);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff8ad);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250fc8d);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c140dcf);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe93f04d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a106d9);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc000c);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00200006);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff9c);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x002f00ef);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a4ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dff92);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x028102f7);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb37);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbf035e);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x07260202);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e8f778);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x01340b0d);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f9f4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223fb51);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b590e42);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xff64f083);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf45206a7);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90005);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001a);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff86);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xfff000d7);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fee5);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f60362);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb99);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbcc0222);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x07380370);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f7f6cc);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xff990a7e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902fb50);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21afa1f);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8d0ea6);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034f0bf);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4050675);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffe);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001e002b);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff81);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffb400a5);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe50);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01460390);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfc3a);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb1000ce);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x070104bf);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb37f65f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe0009bc);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00fcbb);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f8f8);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b20efc);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105f101);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3ba0642);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff7);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00150036);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff8c);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff810061);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013d007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe71fddf);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x007c0380);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fd13);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa94ff70);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x068005e2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9bf633);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc7308ca);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5fe30);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf274f7e0);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c90f43);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d4f147);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371060f);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fff1);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00090038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffa7);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff5e0012);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013200f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee3fd9b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaa0331);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fe15);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa60fe18);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bd06d1);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1bf64a);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafa07ae);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7effab);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d5f6d7);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d30f7a);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a3f194);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf32905dc);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfffb0032);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x003fffcd);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effc1);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6efd8a);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfedd02aa);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0ff34);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa74fcd7);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bf0781);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaaf6a3);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99e066b);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90128);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf359f5e1);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d20fa2);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0370f1e5);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e405a8);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 9900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffef0024);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0051fffa);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff54ff77);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00be0184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0006fdad);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe2701f3);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413005e);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad1fbba);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x039007ee);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x013bf73d);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868050a);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4302a1);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fdf4fe);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c70fba);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x043bf23c);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a10575);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fff1);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe50011);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00570027);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff70ff3c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00620198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x009efe01);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95011a);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350183);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb71fad0);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x023c0812);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c3f811);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75e0390);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0411);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c1f432);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b30fc1);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0503f297);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2610541);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0006fff7);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdffffc);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00510050);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff9dff18);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffc0184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0128fe80);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32002e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130292);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4dfa21);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d107ee);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0435f91c);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6850205);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430573);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a1f37d);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x03990fba);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c7f2f8);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222050d);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffe);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdfffe7);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x003f006e);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffd6ff0f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0197ff1f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd05ff3e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0037c);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd59f9b7);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5d0781);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0585fa56);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4006f);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf906c4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69df2e0);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x02790fa2);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0688f35d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e604d8);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00090005);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4ffd6);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0025007e);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0014ff20);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3c00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e1ffd0);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd12fe5c);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110433);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88f996);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf106d1);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aafbb7);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57efed8);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e07ff);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b0f25e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x01560f7a);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0745f3c7);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1ac04a4);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000c);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffedffcb);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0005007d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0050ff4c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ff0086);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd58fd97);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104ad);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xffcaf9c0);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc9905e2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x079afd35);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf555fd46);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50920);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d9f1f6);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x00310f43);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fdf435);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174046f);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00050011);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfffaffc8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5006b);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0082ff8c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f00130);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd2fcfc);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04e3);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x010efa32);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb6404bf);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x084efec5);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569fbc2);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000a23);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa15f1ab);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0b0efc);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b0f4a7);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13f043a);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00020012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0007ffcd);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9004c);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00a4ffd9);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b401c1);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe76fc97);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404d2);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0245fae8);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5f0370);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1005f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bcfa52);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020b04);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb60f17b);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde70ea6);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x095df51e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10c0405);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0011);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0014ffdb);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40023);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2002a);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedbff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150022d);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff38fc6f);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36047b);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x035efbda);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9940202);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ee01f5);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf649f8fe);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10bc2);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb6f169);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc60e42);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a04f599);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0db03d0);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb000d);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001dffed);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaafff5);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00aa0077);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00ce026b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x000afc85);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3503e3);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x044cfcfb);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90c0082);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5037f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf710f7cc);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0c59);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe16f173);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaa0dcf);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa5f617);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0ad039b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 10900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90006);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00210003);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffc8);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x008e00b6);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff63fe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x003a0275);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00dafcda);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd510313);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0501fe40);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cbfefd);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x087604f0);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80af6c2);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430cc8);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7af19a);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa940d4e);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3ff699);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0810365);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffff);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00210018);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffa3);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x006000e1);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc4fe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0024b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x019afd66);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc990216);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0575ff99);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4fd81);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d40640);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf932f5e6);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20d0d);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x00dff1de);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9860cbf);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd1f71e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058032f);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff8);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001b0029);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff8a);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x002600f2);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x002cfe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0f01f0);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x023bfe20);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc1700fa);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a200f7);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf927fc1c);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f40765);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa82f53b);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510d27);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0243f23d);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8810c24);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5cf7a7);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf03102fa);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00110035);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff81);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffe700e7);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x008ffeb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94016d);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b0fefb);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3ffd1);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05850249);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c1fadb);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x05de0858);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf2f4c4);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c70d17);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a0f2b8);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7870b7c);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdff833);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00d02c4);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00040038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ff88);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffac00c2);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e2ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe3900cb);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f1ffe9);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3feaa);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05210381);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9cf9c8);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x04990912);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7af484);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xff390cdb);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f4f34d);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69a0ac9);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5af8c1);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xefec028e);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0000ffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff60033);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x002fff9f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff7b0087);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x011eff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe080018);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f900d8);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fd96);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x04790490);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbadf8ed);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x032f098e);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xff10f47d);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0c75);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x063cf3fc);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5ba0a0b);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dccf952);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcd0258);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff1);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffea0026);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0046ffc3);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff5a003c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04ff63);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c801b8);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fca6);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397056a);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcecf853);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x01ad09c9);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x00acf4ad);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0be7);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0773f4c2);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e90943);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35f9e6);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb10221);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff6);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe20014);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0054ffee);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effeb);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2efebb);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260027a);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fbe6);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x02870605);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4af7fe);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x001d09c1);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0243f515);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd0b32);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0897f59e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4280871);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e95fa7c);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef9701eb);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffd);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffff);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0056001d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff57ff9c);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x011300f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe82fe2e);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ca0310);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb62);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155065a);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xffbaf7f2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8c0977);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x03cef5b2);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610a58);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a5f68f);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3790797);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eebfb14);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef8001b5);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080004);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0047);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff75ff58);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef9fdc8);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111036f);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb21);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x00120665);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x012df82e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd0708ec);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542f682);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f095c);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9af792);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2db06b5);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f38fbad);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6c017e);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 11900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000b);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe7ffd8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00370068);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffa4ff28);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00790184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87fd91);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00430392);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb26);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfece0626);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294f8b2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb990825);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0698f77f);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe0842);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b73f8a7);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf25105cd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7bfc48);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5a0148);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00050010);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff2ffcc);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x001b007b);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffdfff10);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00140198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0020fd8e);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff710375);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb73);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9a059f);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e0f978);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4e0726);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c8f8a7);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600070c);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2ff9c9);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1db04de);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4fce5);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4b0111);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00010012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffffffc8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb007e);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x001dff14);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffad0184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b7fdbe);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9031b);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fc01);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc8504d6);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0504fa79);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf93005f6);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x08caf9f2);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52b05c0);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccbfaf9);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf17903eb);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3fd83);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3f00db);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe0011);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000cffcc);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0071);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0058ff32);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4f014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x013cfe1f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfb028a);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fcc9);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9d03d6);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f4fbad);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848049d);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0999fb5b);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf4820461);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d46fc32);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12d02f4);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x1007fe21);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3600a4);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0017ffd9);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc10055);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0088ff68);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0400f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a6fea7);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7501cc);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fdc0);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaef02a8);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a7fd07);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79d0326);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a31fcda);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf40702f3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9ffd72);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f601fa);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x1021fec0);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2f006d);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80007);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001fffeb);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaf002d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00a8ffb0);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e9ff4c);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2000ee);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fed8);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82015c);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0715fe7d);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7340198);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8dfe69);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bd017c);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd5feb8);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d500fd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x1031ff60);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2b0037);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff70000);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00220000);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffa90000);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b30000);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec20000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x02000000);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd030000);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350000);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa5e0000);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x073b0000);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7110000);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aac0000);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a40000);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de70000);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0c90000);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x10360000);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef290000);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff9);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001f0015);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffd3);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00a80050);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e900b4);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd20ff12);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130128);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82fea4);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x07150183);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf734fe68);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8d0197);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdfe84);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd50148);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d5ff03);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x103100a0);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2bffc9);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00170027);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ffab);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00880098);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff04ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a60159);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd75fe34);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00240);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaeffd58);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a702f9);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79dfcda);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a310326);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fd0d);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9f028e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f6fe06);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x10210140);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2fff93);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000c0034);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff8f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x005800ce);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4ffeb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x013c01e1);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfbfd76);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110337);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9dfc2a);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f40453);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848fb63);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x099904a5);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fb9f);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d4603ce);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12dfd0c);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x100701df);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef36ff5c);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 12900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0001ffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffff0038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff82);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x001d00ec);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffadfe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b70242);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9fce5);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x024103ff);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc85fb2a);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05040587);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf930fa0a);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x08ca060e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfa40);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccb0507);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf179fc15);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3027d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3fff25);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff0);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff20034);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x001bff85);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffdf00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0014fe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00200272);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff71fc8b);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d048d);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9afa61);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e00688);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4ef8da);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c80759);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f8f4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2f0637);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1dbfb22);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4031b);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4bfeef);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff5);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe70028);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ff98);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffa400d8);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0079fe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87026f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0043fc6e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404da);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfecef9da);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294074e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb99f7db);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x06980881);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef7be);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b730759);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf251fa33);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7b03b8);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5afeb8);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffc);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe00017);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x004cffb9);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff7500a8);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef90238);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111fc91);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604df);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0012f99b);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x012d07d2);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd07f714);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542097e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff6a4);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9a086e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2dbf94b);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f380453);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6cfe82);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080003);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffde0001);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0056ffe3);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff570064);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0113ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe8201d2);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01cafcf0);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35049e);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155f9a6);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xffba080e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8cf689);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x03ce0a4e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f5a8);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a50971);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf379f869);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eeb04ec);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef80fe4b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000a);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe2ffec);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00540012);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e0015);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2e0145);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260fd86);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51041a);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0287f9fb);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4a0802);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x001df63f);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x02430aeb);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf4ce);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x08970a62);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf428f78f);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e950584);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xef97fe15);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000f);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffeaffda);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0046003d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff5affc4);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04009d);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c8fe48);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99035a);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397fa96);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcec07ad);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x01adf637);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x00ac0b53);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef419);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x07730b3e);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e9f6bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35061a);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb1fddf);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00000012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfff6ffcd);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x002f0061);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff7bff79);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x011e007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe08ffe8);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f9ff28);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17026a);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0479fb70);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbad0713);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x032ff672);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xff100b83);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff38b);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x063c0c04);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5baf5f5);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dcc06ae);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcdfda8);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0004ffc8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00100078);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffacff3e);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e200f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe39ff35);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f10017);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd30156);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0521fc7f);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9c0638);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0499f6ee);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7a0b7c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f325);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f40cb3);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69af537);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5a073f);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xefecfd72);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0011ffcb);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0007f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffe7ff19);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x008f014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94fe93);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b00105);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3002f);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x0585fdb7);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c10525);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x05def7a8);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf20b3c);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f2e9);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a00d48);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf787f484);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdf07cd);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00dfd3c);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 13900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80008);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001bffd7);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10076);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0026ff0e);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x002c0184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0ffe10);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x023b01e0);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff06);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a2ff09);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf92703e4);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f4f89b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa820ac5);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f2d9);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x02430dc3);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf881f3dc);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5c0859);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf031fd06);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80001);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0021ffe8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffba005d);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0060ff1f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc40198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0fdb5);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x019a029a);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fdea);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x05750067);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4027f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d4f9c0);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf9320a1a);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f2f3);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0x00df0e22);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xf986f341);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd108e2);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058fcd1);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0021fffd);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0038);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x008eff4a);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff630184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x003afd8b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x00da0326);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fced);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x050101c0);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cb0103);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x0876fb10);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80a093e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f338);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7a0e66);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa94f2b2);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3f0967);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf081fc9b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff3);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001d0013);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa000b);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00aaff89);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00cefd95);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x000a037b);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fc1d);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x044c0305);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90cff7e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5fc81);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7100834);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff3a7);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe160e8d);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaaf231);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa509e9);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0adfc65);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00140025);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffdd);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2ffd6);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedb00f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150fdd3);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xff380391);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb85);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x035e0426);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xf994fdfe);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08eefe0b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6490702);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f43e);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb60e97);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc6f1be);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a040a67);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0dbfc30);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0002ffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00070033);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ffb4);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00a40027);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b4fe3f);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe760369);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb2e);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x02450518);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5ffc90);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1ffa1);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bc05ae);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902f4fc);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb600e85);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde7f15a);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x095d0ae2);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10cfbfb);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0005ffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfffa0038);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff95);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00820074);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f0fed0);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd20304);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb1d);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x010e05ce);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb64fb41);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x084e013b);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569043e);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00f5dd);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa150e55);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0bf104);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b00b59);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13ffbc6);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fff4);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffed0035);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff83);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x005000b4);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6ff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ffff7a);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd580269);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fb53);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xffca0640);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc99fa1e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x079a02cb);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55502ba);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5f6e0);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d90e0a);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0031f0bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fd0bcb);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174fb91);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0009fffb);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4002a);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ff82);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x001400e0);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3cff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e10030);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1201a4);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fbcd);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88066a);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf1f92f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aa0449);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57e0128);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7ef801);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b00da2);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0156f086);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x07450c39);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1acfb5c);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00080002);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0019);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x003fff92);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffd600f1);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x019700e1);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0500c2);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fc84);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd590649);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5df87f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x058505aa);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4ff91);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9f93c);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69d0d20);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0279f05e);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x06880ca3);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e6fb28);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 14900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x00060009);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0004);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0051ffb0);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff9d00e8);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffcfe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x01280180);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32ffd2);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fd6e);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4d05df);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d1f812);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x043506e4);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf685fdfb);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fa8d);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a10c83);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0399f046);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c70d08);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222faf3);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffe5ffef);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x0057ffd9);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff7000c4);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0062fe68);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x009e01ff);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95fee6);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0435fe7d);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb710530);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x023cf7ee);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c307ef);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75efc70);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5cfbef);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c10bce);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b3f03f);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x05030d69);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf261fabf);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15100000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xffefffdc);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00510006);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff540089);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00befe7c);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0x00060253);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe27fe0d);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413ffa2);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad10446);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0390f812);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0x013b08c3);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868faf6);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fd5f);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fd0b02);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c7f046);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x043b0dc4);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a1fa8b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15200000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0012);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0xfffbffce);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x003f0033);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e003f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106feb6);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6e0276);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeddfd56);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000cc);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa740329);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bff87f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaa095d);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99ef995);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9fed8);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3590a1f);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d2f05e);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x03700e1b);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e4fa58);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15300000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9000f);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0009ffc8);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00250059);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff5effee);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0132ff10);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee30265);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaafccf);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x031101eb);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6001e8);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bdf92f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1b09b6);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafaf852);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0055);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d50929);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d3f086);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a30e6c);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf329fa24);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15400000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80009);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0015ffca);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0x00050074);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xff81ff9f);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x013dff82);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe710221);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x007cfc80);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x024102ed);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa940090);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0680fa1e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9b09cd);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc73f736);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad501d0);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2740820);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c9f0bd);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d40eb9);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371f9f1);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15500000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80002);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001effd5);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5007f);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff5b);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2401b0);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0146fc70);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d03c6);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb10ff32);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0701fb41);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb3709a1);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f644);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000345);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2350708);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b2f104);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x01050eff);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3baf9be);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15600000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0022ffe6);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9007a);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff29);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2007e);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01011b);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f6fc9e);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440467);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbccfdde);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738fc90);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f70934);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99f582);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x090204b0);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21a05e1);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8df15a);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0x00340f41);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf405f98b);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15700000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcfff4);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x0020fffa);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40064);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x002fff11);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a400f0);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0d006e);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x0281fd09);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604c9);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbffca2);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0726fdfe);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e80888);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134f4f3);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1060c);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf22304af);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b59f1be);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xff640f7d);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf452f959);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15800000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0000fff0);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0010);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa0041);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0067ff13);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043014a);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46ffb9);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02dbfda8);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3504e5);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddcfb8d);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9ff7e);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf81107a2);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9f49a);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0753);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2500373);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c14f231);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930fb3);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a1f927);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 15900000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0003ffee);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x000f0023);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0016);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x0093ff31);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdc0184);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6ff09);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fdfe70);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5104ba);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0xff15faac);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270103);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7780688);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x044df479);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430883);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a00231);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbef2b2);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc40fe3);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2f8f5);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+
+ case 16000000:
+ cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
+ cx25840_write4(client, DIF_BPF_COEFF23, 0x0006ffef);
+ cx25840_write4(client, DIF_BPF_COEFF45, 0x00020031);
+ cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffe8);
+ cx25840_write4(client, DIF_BPF_COEFF89, 0x00adff66);
+ cx25840_write4(client, DIF_BPF_COEFF1011, 0xff790198);
+ cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe6e);
+ cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e5ff55);
+ cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99044a);
+ cx25840_write4(client, DIF_BPF_COEFF1819, 0x005bfa09);
+ cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545027f);
+ cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7230541);
+ cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b8f490);
+ cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20997);
+ cx25840_write4(client, DIF_BPF_COEFF2829, 0xf31300eb);
+ cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d55f341);
+ cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6100e);
+ cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544f8c3);
+ cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
+ break;
+ }
+}
+
+static void cx23885_std_setup(struct i2c_client *client)
+{
+ struct cx25840_state *state = to_state(i2c_get_clientdata(client));
+ v4l2_std_id std = state->std;
+ u32 ifHz;
+
+ cx25840_write4(client, 0x478, 0x6628021F);
+ cx25840_write4(client, 0x400, 0x0);
+ cx25840_write4(client, 0x4b4, 0x20524030);
+ cx25840_write4(client, 0x47c, 0x010a8263);
+
+ if (std & V4L2_STD_NTSC) {
+ v4l_dbg(1, cx25840_debug, client, "%s() Selecting NTSC",
+ __func__);
+
+ /* Horiz / vert timing */
+ cx25840_write4(client, 0x428, 0x1e1e601a);
+ cx25840_write4(client, 0x424, 0x5b2d007a);
+
+ /* DIF NTSC */
+ cx25840_write4(client, 0x304, 0x6503bc0c);
+ cx25840_write4(client, 0x308, 0xbd038c85);
+ cx25840_write4(client, 0x30c, 0x1db4640a);
+ cx25840_write4(client, 0x310, 0x00008800);
+ cx25840_write4(client, 0x314, 0x44400400);
+ cx25840_write4(client, 0x32c, 0x0c800800);
+ cx25840_write4(client, 0x330, 0x27000100);
+ cx25840_write4(client, 0x334, 0x1f296e1f);
+ cx25840_write4(client, 0x338, 0x009f50c1);
+ cx25840_write4(client, 0x340, 0x1befbf06);
+ cx25840_write4(client, 0x344, 0x000035e8);
+
+ /* DIF I/F */
+ ifHz = 5400000;
+
+ } else {
+ v4l_dbg(1, cx25840_debug, client, "%s() Selecting PAL-BG",
+ __func__);
+
+ /* Horiz / vert timing */
+ cx25840_write4(client, 0x428, 0x28244024);
+ cx25840_write4(client, 0x424, 0x5d2d0084);
+
+ /* DIF */
+ cx25840_write4(client, 0x304, 0x6503bc0c);
+ cx25840_write4(client, 0x308, 0xbd038c85);
+ cx25840_write4(client, 0x30c, 0x1db4640a);
+ cx25840_write4(client, 0x310, 0x00008800);
+ cx25840_write4(client, 0x314, 0x44400600);
+ cx25840_write4(client, 0x32c, 0x0c800800);
+ cx25840_write4(client, 0x330, 0x27000100);
+ cx25840_write4(client, 0x334, 0x213530ec);
+ cx25840_write4(client, 0x338, 0x00a65ba8);
+ cx25840_write4(client, 0x340, 0x1befbf06);
+ cx25840_write4(client, 0x344, 0x000035e8);
+
+ /* DIF I/F */
+ ifHz = 6000000;
+ }
+
+ cx23885_dif_setup(client, ifHz);
+
+ /* Explicitly ensure the inputs are reconfigured after
+ * a standard change.
+ */
+ set_input(client, state->vid_input, state->aud_input);
+}
+
+/* ----------------------------------------------------------------------- */
+
static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
.s_ctrl = cx25840_s_ctrl,
};
@@ -1801,6 +5024,7 @@ static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
.s_std = cx25840_s_std,
+ .g_std = cx25840_g_std,
.reset = cx25840_reset,
.load_fw = cx25840_load_fw,
.s_io_pin_config = common_s_io_pin_config,
@@ -1828,6 +5052,7 @@ static const struct v4l2_subdev_video_ops cx25840_video_ops = {
.s_routing = cx25840_s_video_routing,
.s_mbus_fmt = cx25840_s_mbus_fmt,
.s_stream = cx25840_s_stream,
+ .g_input_status = cx25840_g_input_status,
};
static const struct v4l2_subdev_vbi_ops cx25840_vbi_ops = {
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 5c42abdf422f..3598dc087b08 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -70,11 +70,6 @@ config VIDEO_CX88_DVB
To compile this driver as a module, choose M here: the
module will be called cx88-dvb.
-config VIDEO_CX88_MPEG
- tristate
- depends on VIDEO_CX88_DVB || VIDEO_CX88_BLACKBIRD
- default y
-
config VIDEO_CX88_VP3054
tristate "VP-3054 Secondary I2C Bus Support"
default m
@@ -84,3 +79,8 @@ config VIDEO_CX88_VP3054
Conexant 2388x chip and the MT352 demodulator,
which also require support for the VP-3054
Secondary I2C bus, such at DNTV Live! DVB-T Pro.
+
+config VIDEO_CX88_MPEG
+ tristate
+ depends on VIDEO_CX88_DVB || VIDEO_CX88_BLACKBIRD
+ default y
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 68d1240f493c..04bf6627d362 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -96,7 +96,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 0d719faafd8a..cbd5d119a2c6 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1306,7 +1306,7 @@ static const struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_WINFAST_DTV2000H_J] = {
.name = "WinFast DTV2000 H rev. J",
- .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
+ .tuner_type = TUNER_PHILIPS_FMD1216MEX_MK3,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Pinnacle Hybrid PCTV",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -1643,6 +1643,78 @@ static const struct cx88_board cx88_boards[] = {
.gpio3 = 0x0000,
},
},
+ [CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36] = {
+ .name = "Leadtek TV2000 XP Global (SC4100)",
+ .tuner_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */
+ .gpio3 = 0x0000,
+ },
+ },
+ [CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43] = {
+ .name = "Leadtek TV2000 XP Global (XC4100)",
+ .tuner_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6040, /* pin 14 = 1, pin 13 = 0 */
+ .gpio2 = 0x0000,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 14 = 1, pin 13 = 1 */
+ .gpio2 = 0x0000,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 14 = 1, pin 13 = 1 */
+ .gpio2 = 0x0000,
+ .gpio3 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6000, /* pin 14 = 1, pin 13 = 0 */
+ .gpio2 = 0x0000,
+ .gpio3 = 0x0000,
+ },
+ },
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
.name = "PowerColor RA330", /* Long names may confuse LIRC. */
.tuner_type = TUNER_XC2028,
@@ -2043,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Terratec Cinergy HT PCI MKII",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -2082,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV1800H] = {
.name = "Leadtek WinFast DTV1800 Hybrid",
.tuner_type = TUNER_XC2028,
- .radio_type = TUNER_XC2028,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO setting
*
@@ -2123,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
.name = "Leadtek WinFast DTV1800 H (XC4000)",
.tuner_type = TUNER_XC4000,
- .radio_type = TUNER_XC4000,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO setting
*
@@ -2164,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
.name = "Leadtek WinFast DTV2000 H PLUS",
.tuner_type = TUNER_XC4000,
- .radio_type = TUNER_XC4000,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO
* 2: 1: mute audio
@@ -2719,6 +2791,21 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6618,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
+ /* TV2000 XP Global [107d:6618] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6619,
+ .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
+ }, {
+ /* WinFast TV2000 XP Global with XC4000 tuner */
+ .subvendor = 0x107d,
+ .subdevice = 0x6f36,
+ .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36,
+ }, {
+ /* WinFast TV2000 XP Global with XC4000 tuner and different GPIOs */
+ .subvendor = 0x107d,
+ .subdevice = 0x6f43,
+ .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43,
+ }, {
.subvendor = 0xb034,
.subdevice = 0x3034,
.card = CX88_BOARD_PROF_7301,
@@ -3075,6 +3162,8 @@ static int cx88_xc4000_tuner_callback(struct cx88_core *core,
switch (core->boardnr) {
case CX88_BOARD_WINFAST_DTV1800H_XC4000:
case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43:
return cx88_xc4000_winfast2000h_plus_callback(core,
command, arg);
}
@@ -3232,6 +3321,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
cx_set(MO_GP0_IO, 0x00001010);
break;
+ case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_HAUPPAUGE_HVR3000:
case CX88_BOARD_HAUPPAUGE_HVR4000:
/* Init GPIO */
@@ -3250,6 +3340,8 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_WINFAST_DTV1800H_XC4000:
case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43:
cx88_xc4000_winfast2000h_plus_callback(core,
XC4000_TUNER_RESET, 0);
break;
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index cf3d33ab541b..003937cd72f5 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -815,9 +815,9 @@ static const u8 samsung_smt_7020_inittab[] = {
};
-static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx8802_dev *dev = fe->dvb->priv;
u8 buf[4];
u32 div;
@@ -827,14 +827,14 @@ static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe,
.buf = buf,
.len = sizeof(buf) };
- div = params->frequency / 125;
+ div = c->frequency / 125;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x84; /* 0xC4 */
buf[3] = 0x00;
- if (params->frequency < 1500000)
+ if (c->frequency < 1500000)
buf[3] |= 0x10;
if (fe->ops.i2c_gate_ctrl)
@@ -954,6 +954,7 @@ static int dvb_register(struct cx8802_dev *dev)
struct cx88_core *core = dev->core;
struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
int mfe_shared = 0; /* bus not shared by default */
+ int res = -EINVAL;
if (0 != core->i2c_rc) {
printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
@@ -999,7 +1000,6 @@ static int dvb_register(struct cx8802_dev *dev)
}
break;
case CX88_BOARD_WINFAST_DTV2000H:
- case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_HAUPPAUGE_HVR1100:
case CX88_BOARD_HAUPPAUGE_HVR1100LP:
case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1013,6 +1013,17 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
}
break;
+ case CX88_BOARD_WINFAST_DTV2000H_J:
+ fe0->dvb.frontend = dvb_attach(cx22702_attach,
+ &hauppauge_hvr_config,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216MEX_MK3))
+ goto frontend_detach;
+ }
+ break;
case CX88_BOARD_HAUPPAUGE_HVR3000:
/* MFE frontend 1 */
mfe_shared = 1;
@@ -1566,13 +1577,16 @@ static int dvb_register(struct cx8802_dev *dev)
call_all(core, core, s_power, 0);
/* register everything */
- return videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
- &dev->pci->dev, adapter_nr, mfe_shared, NULL);
+ res = videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
+ &dev->pci->dev, adapter_nr, mfe_shared, NULL);
+ if (res)
+ goto frontend_detach;
+ return res;
frontend_detach:
core->gate_ctrl = NULL;
videobuf_dvb_dealloc_frontends(&dev->frontends);
- return -EINVAL;
+ return res;
}
/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index a1fe0abb6e43..de0f1af74e41 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -132,7 +132,7 @@ static void do_i2c_scan(const char *name, struct i2c_client *c)
}
}
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
{
/* Prevents usage of invalid delay values */
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index e614201b5ed3..ebf448c48ca3 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -103,6 +103,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
case CX88_BOARD_WINFAST_DTV1800H_XC4000:
case CX88_BOARD_WINFAST_DTV2000H_PLUS:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43:
gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
auxgpio = gpio;
break;
@@ -302,6 +304,8 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_WINFAST2000XP_EXPERT:
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36:
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43:
ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index fa8d307e1a3d..c9659def2a78 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -244,6 +244,8 @@ extern const struct sram_channel const cx88_sram_channels[];
#define CX88_BOARD_TEVII_S464 86
#define CX88_BOARD_WINFAST_DTV2000H_PLUS 87
#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
+#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36 89
+#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43 90
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
diff --git a/drivers/media/video/davinci/dm355_ccdc.c b/drivers/media/video/davinci/dm355_ccdc.c
index bd443ee76fff..f83baf3a52b0 100644
--- a/drivers/media/video/davinci/dm355_ccdc.c
+++ b/drivers/media/video/davinci/dm355_ccdc.c
@@ -1069,15 +1069,4 @@ static struct platform_driver dm355_ccdc_driver = {
.probe = dm355_ccdc_probe,
};
-static int __init dm355_ccdc_init(void)
-{
- return platform_driver_register(&dm355_ccdc_driver);
-}
-
-static void __exit dm355_ccdc_exit(void)
-{
- platform_driver_unregister(&dm355_ccdc_driver);
-}
-
-module_init(dm355_ccdc_init);
-module_exit(dm355_ccdc_exit);
+module_platform_driver(dm355_ccdc_driver);
diff --git a/drivers/media/video/davinci/dm644x_ccdc.c b/drivers/media/video/davinci/dm644x_ccdc.c
index 8051c2956478..9303fe553b07 100644
--- a/drivers/media/video/davinci/dm644x_ccdc.c
+++ b/drivers/media/video/davinci/dm644x_ccdc.c
@@ -1078,15 +1078,4 @@ static struct platform_driver dm644x_ccdc_driver = {
.probe = dm644x_ccdc_probe,
};
-static int __init dm644x_ccdc_init(void)
-{
- return platform_driver_register(&dm644x_ccdc_driver);
-}
-
-static void __exit dm644x_ccdc_exit(void)
-{
- platform_driver_unregister(&dm644x_ccdc_driver);
-}
-
-module_init(dm644x_ccdc_init);
-module_exit(dm644x_ccdc_exit);
+module_platform_driver(dm644x_ccdc_driver);
diff --git a/drivers/media/video/davinci/isif.c b/drivers/media/video/davinci/isif.c
index 29c29c668596..1e63852374be 100644
--- a/drivers/media/video/davinci/isif.c
+++ b/drivers/media/video/davinci/isif.c
@@ -1156,17 +1156,6 @@ static struct platform_driver isif_driver = {
.probe = isif_probe,
};
-static int __init isif_init(void)
-{
- return platform_driver_register(&isif_driver);
-}
-
-static void isif_exit(void)
-{
- platform_driver_unregister(&isif_driver);
-}
-
-module_init(isif_init);
-module_exit(isif_exit);
+module_platform_driver(isif_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/davinci/vpbe.c b/drivers/media/video/davinci/vpbe.c
index d773d30de221..c4a82a1a8a97 100644
--- a/drivers/media/video/davinci/vpbe.c
+++ b/drivers/media/video/davinci/vpbe.c
@@ -141,11 +141,12 @@ static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
return 0;
}
-static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode)
+static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
+ int output_index)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
struct vpbe_enc_mode_info var;
- int curr_output = vpbe_dev->current_out_index;
+ int curr_output = output_index;
int i;
if (NULL == mode)
@@ -245,6 +246,8 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
struct encoder_config_info *curr_enc_info =
vpbe_current_encoder_info(vpbe_dev);
struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct venc_platform_data *venc_device = vpbe_dev->venc_device;
+ enum v4l2_mbus_pixelcode if_params;
int enc_out_index;
int sd_index;
int ret = 0;
@@ -274,6 +277,8 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
goto out;
}
+ if_params = cfg->outputs[index].if_params;
+ venc_device->setup_if_config(if_params);
if (ret)
goto out;
}
@@ -293,7 +298,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
* encoder.
*/
ret = vpbe_get_mode_info(vpbe_dev,
- cfg->outputs[index].default_mode);
+ cfg->outputs[index].default_mode, index);
if (!ret) {
struct osd_state *osd_device = vpbe_dev->osd_device;
@@ -367,6 +372,11 @@ static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev,
ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
s_dv_preset, dv_preset);
+ if (!ret && (vpbe_dev->amp != NULL)) {
+ /* Call amplifier subdevice */
+ ret = v4l2_subdev_call(vpbe_dev->amp, video,
+ s_dv_preset, dv_preset);
+ }
/* set the lcd controller output for the given mode */
if (!ret) {
struct osd_state *osd_device = vpbe_dev->osd_device;
@@ -566,6 +576,8 @@ static int platform_device_get(struct device *dev, void *data)
if (strcmp("vpbe-osd", pdev->name) == 0)
vpbe_dev->osd_device = platform_get_drvdata(pdev);
+ if (strcmp("vpbe-venc", pdev->name) == 0)
+ vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
return 0;
}
@@ -584,6 +596,7 @@ static int platform_device_get(struct device *dev, void *data)
static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
{
struct encoder_config_info *enc_info;
+ struct amp_config_info *amp_info;
struct v4l2_subdev **enc_subdev;
struct osd_state *osd_device;
struct i2c_adapter *i2c_adap;
@@ -704,6 +717,32 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
" currently not supported");
}
+ /* Add amplifier subdevice for dm365 */
+ if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
+ vpbe_dev->cfg->amp != NULL) {
+ amp_info = vpbe_dev->cfg->amp;
+ if (amp_info->is_i2c) {
+ vpbe_dev->amp = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &amp_info->board_info, NULL);
+ if (!vpbe_dev->amp) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "amplifier %s failed to register",
+ amp_info->module_name);
+ ret = -ENODEV;
+ goto vpbe_fail_amp_register;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ amp_info->module_name);
+ } else {
+ vpbe_dev->amp = NULL;
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers"
+ " currently not supported");
+ }
+ } else {
+ vpbe_dev->amp = NULL;
+ }
/* set the current encoder and output to that of venc by default */
vpbe_dev->current_sd_index = 0;
@@ -731,6 +770,8 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
/* TBD handling of bootargs for default output and mode */
return 0;
+vpbe_fail_amp_register:
+ kfree(vpbe_dev->amp);
vpbe_fail_sd_register:
kfree(vpbe_dev->encoders);
vpbe_fail_v4l2_device:
@@ -757,6 +798,7 @@ static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
clk_put(vpbe_dev->dac_clk);
+ kfree(vpbe_dev->amp);
kfree(vpbe_dev->encoders);
vpbe_dev->initialized = 0;
/* disable vpss clocks */
@@ -811,8 +853,10 @@ static __devinit int vpbe_probe(struct platform_device *pdev)
if (cfg->outputs->num_modes > 0)
vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
- else
+ else {
+ kfree(vpbe_dev);
return -ENODEV;
+ }
/* set the driver data in platform device */
platform_set_drvdata(pdev, vpbe_dev);
@@ -839,26 +883,4 @@ static struct platform_driver vpbe_driver = {
.remove = vpbe_remove,
};
-/**
- * vpbe_init: initialize the vpbe driver
- *
- * This function registers device and driver to the kernel
- */
-static __init int vpbe_init(void)
-{
- return platform_driver_register(&vpbe_driver);
-}
-
-/**
- * vpbe_cleanup : cleanup function for vpbe driver
- *
- * This will un-registers the device and driver to the kernel
- */
-static void vpbe_cleanup(void)
-{
- platform_driver_unregister(&vpbe_driver);
-}
-
-/* Function for module initialization and cleanup */
-module_init(vpbe_init);
-module_exit(vpbe_cleanup);
+module_platform_driver(vpbe_driver);
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
index 8588a86d9b45..1f3b1c729252 100644
--- a/drivers/media/video/davinci/vpbe_display.c
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -1746,15 +1746,16 @@ static __devinit int vpbe_display_probe(struct platform_device *pdev)
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
err = -ENODEV;
- goto probe_out;
+ goto probe_out_irq;
}
}
printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
return 0;
-probe_out:
+probe_out_irq:
free_irq(res->start, disp_dev);
+probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
/* Get the pointer to the layer object */
vpbe_display_layer = disp_dev->dev[k];
@@ -1816,43 +1817,7 @@ static struct platform_driver vpbe_display_driver = {
.remove = __devexit_p(vpbe_display_remove),
};
-/*
- * vpbe_display_init()
- * This function registers device and driver to the kernel, requests irq
- * handler and allocates memory for layer objects
- */
-static __devinit int vpbe_display_init(void)
-{
- int err;
-
- printk(KERN_DEBUG "vpbe_display_init\n");
-
- /* Register driver to the kernel */
- err = platform_driver_register(&vpbe_display_driver);
- if (0 != err)
- return err;
-
- printk(KERN_DEBUG "vpbe_display_init:"
- "VPBE V4L2 Display Driver V1.0 loaded\n");
- return 0;
-}
-
-/*
- * vpbe_display_cleanup()
- * This function un-registers device and driver to the kernel, frees requested
- * irq handler and de-allocates memory allocated for layer objects.
- */
-static void vpbe_display_cleanup(void)
-{
- printk(KERN_DEBUG "vpbe_display_cleanup\n");
-
- /* platform driver unregister */
- platform_driver_unregister(&vpbe_display_driver);
-}
-
-/* Function for module initialization and cleanup */
-module_init(vpbe_display_init);
-module_exit(vpbe_display_cleanup);
+module_platform_driver(vpbe_display_driver);
MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
index ceccf4302518..d6488b79ae3b 100644
--- a/drivers/media/video/davinci/vpbe_osd.c
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -248,11 +248,29 @@ static void _osd_set_rec601_attenuation(struct osd_state *sd,
osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
enable ? OSD_OSDWIN0MD_ATN0E : 0,
OSD_OSDWIN0MD);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2))
+ osd_modify(sd, OSD_EXTMODE_ATNOSD0EN,
+ enable ? OSD_EXTMODE_ATNOSD0EN : 0,
+ OSD_EXTMODE);
break;
case OSDWIN_OSD1:
osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
enable ? OSD_OSDWIN1MD_ATN1E : 0,
OSD_OSDWIN1MD);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2))
+ osd_modify(sd, OSD_EXTMODE_ATNOSD1EN,
+ enable ? OSD_EXTMODE_ATNOSD1EN : 0,
+ OSD_EXTMODE);
break;
}
}
@@ -273,15 +291,71 @@ static void _osd_set_blending_factor(struct osd_state *sd,
}
}
+static void _osd_enable_rgb888_pixblend(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+
+ osd_modify(sd, OSD_MISCCTL_BLDSEL, 0, OSD_MISCCTL);
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_EXTMODE_OSD0BLDCHR,
+ OSD_EXTMODE_OSD0BLDCHR, OSD_EXTMODE);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_EXTMODE_OSD1BLDCHR,
+ OSD_EXTMODE_OSD1BLDCHR, OSD_EXTMODE);
+ break;
+ }
+}
+
static void _osd_enable_color_key(struct osd_state *sd,
enum osd_win_layer osdwin,
unsigned colorkey,
enum osd_pix_format pixfmt)
{
switch (pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_TRANSPBMPIDX_BMP0,
+ colorkey <<
+ OSD_TRANSPBMPIDX_BMP0_SHIFT,
+ OSD_TRANSPBMPIDX);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_TRANSPBMPIDX_BMP1,
+ colorkey <<
+ OSD_TRANSPBMPIDX_BMP1_SHIFT,
+ OSD_TRANSPBMPIDX);
+ break;
+ }
+ }
+ break;
case PIXFMT_RGB565:
- osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
- OSD_TRANSPVAL);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
+ OSD_TRANSPVAL);
+ else if (sd->vpbe_type == VPBE_VERSION_3)
+ osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+ OSD_TRANSPVALL);
+ break;
+ case PIXFMT_YCbCrI:
+ case PIXFMT_YCrCbI:
+ if (sd->vpbe_type == VPBE_VERSION_3)
+ osd_modify(sd, OSD_TRANSPVALU_Y, colorkey,
+ OSD_TRANSPVALU);
+ break;
+ case PIXFMT_RGB888:
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
+ OSD_TRANSPVALL);
+ osd_modify(sd, OSD_TRANSPVALU_RGBU, colorkey >> 16,
+ OSD_TRANSPVALU);
+ }
break;
default:
break;
@@ -470,23 +544,188 @@ static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
return 0;
}
+#define OSD_SRC_ADDR_HIGH4 0x7800000
+#define OSD_SRC_ADDR_HIGH7 0x7F0000
+#define OSD_SRCADD_OFSET_SFT 23
+#define OSD_SRCADD_ADD_SFT 16
+#define OSD_WINADL_MASK 0xFFFF
+#define OSD_WINOFST_MASK 0x1000
+#define VPBE_REG_BASE 0x80000000
+
static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
unsigned long fb_base_phys,
unsigned long cbcr_ofst)
{
- switch (layer) {
- case WIN_OSD0:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
- break;
- case WIN_VID0:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
- break;
- case WIN_OSD1:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
- break;
- case WIN_VID1:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
- break;
+
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ switch (layer) {
+ case WIN_OSD0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
+ break;
+ case WIN_VID0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ break;
+ case WIN_OSD1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
+ break;
+ case WIN_VID1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
+ break;
+ }
+ } else if (sd->vpbe_type == VPBE_VERSION_3) {
+ unsigned long fb_offset_32 =
+ (fb_base_phys - VPBE_REG_BASE) >> 5;
+
+ switch (layer) {
+ case WIN_OSD0:
+ osd_modify(sd, OSD_OSDWINADH_O0AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O0AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_OSDWIN0ADL_O0AL,
+ OSD_OSDWIN0ADL);
+ break;
+ case WIN_VID0:
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_VIDWIN0ADL_V0AL,
+ OSD_VIDWIN0ADL);
+ break;
+ case WIN_OSD1:
+ osd_modify(sd, OSD_OSDWINADH_O1AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O1AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_OSDWIN1ADL_O1AL,
+ OSD_OSDWIN1ADL);
+ break;
+ case WIN_VID1:
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_VIDWIN1ADL_V1AL,
+ OSD_VIDWIN1ADL);
+ break;
+ }
+ } else if (sd->vpbe_type == VPBE_VERSION_2) {
+ struct osd_window_state *win = &sd->win[layer];
+ unsigned long fb_offset_32, cbcr_offset_32;
+
+ fb_offset_32 = fb_base_phys - VPBE_REG_BASE;
+ if (cbcr_ofst)
+ cbcr_offset_32 = cbcr_ofst;
+ else
+ cbcr_offset_32 = win->lconfig.line_length *
+ win->lconfig.ysize;
+ cbcr_offset_32 += fb_offset_32;
+ fb_offset_32 = fb_offset_32 >> 5;
+ cbcr_offset_32 = cbcr_offset_32 >> 5;
+ /*
+ * DM365: start address is 27-bit long address b26 - b23 are
+ * in offset register b12 - b9, and * bit 26 has to be '1'
+ */
+ if (win->lconfig.pixfmt == PIXFMT_NV12) {
+ switch (layer) {
+ case WIN_VID0:
+ case WIN_VID1:
+ /* Y is in VID0 */
+ osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN0ADL);
+ /* CbCr is in VID1 */
+ osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+ ((cbcr_offset_32 &
+ OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ (cbcr_offset_32 &
+ OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, cbcr_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN1ADL);
+ break;
+ default:
+ break;
+ }
+ }
+
+ switch (layer) {
+ case WIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0OFST_O0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+ OSD_OSDWIN0OFST);
+ osd_modify(sd, OSD_OSDWINADH_O0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O0AH_SHIFT), OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_OSDWIN0ADL);
+ break;
+ case WIN_VID0:
+ if (win->lconfig.pixfmt != PIXFMT_NV12) {
+ osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
+ osd_modify(sd, OSD_VIDWINADH_V0AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V0AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN0ADL);
+ }
+ break;
+ case WIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1OFST_O1AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
+ OSD_OSDWIN1OFST);
+ osd_modify(sd, OSD_OSDWINADH_O1AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_OSDWINADH_O1AH_SHIFT),
+ OSD_OSDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_OSDWIN1ADL);
+ break;
+ case WIN_VID1:
+ if (win->lconfig.pixfmt != PIXFMT_NV12) {
+ osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
+ ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
+ (OSD_SRCADD_OFSET_SFT -
+ OSD_WINOFST_AH_SHIFT)) |
+ OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
+ osd_modify(sd, OSD_VIDWINADH_V1AH,
+ (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
+ (OSD_SRCADD_ADD_SFT -
+ OSD_VIDWINADH_V1AH_SHIFT),
+ OSD_VIDWINADH);
+ osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
+ OSD_VIDWIN1ADL);
+ }
+ break;
+ }
}
}
@@ -545,7 +784,7 @@ static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
{
struct osd_state *osd = sd;
struct osd_window_state *win = &osd->win[layer];
- int bad_config;
+ int bad_config = 0;
/* verify that the pixel format is compatible with the layer */
switch (lconfig->pixfmt) {
@@ -554,17 +793,25 @@ static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
case PIXFMT_4BPP:
case PIXFMT_8BPP:
case PIXFMT_RGB565:
- bad_config = !is_osd_win(layer);
+ if (osd->vpbe_type == VPBE_VERSION_1)
+ bad_config = !is_vid_win(layer);
break;
case PIXFMT_YCbCrI:
case PIXFMT_YCrCbI:
bad_config = !is_vid_win(layer);
break;
case PIXFMT_RGB888:
- bad_config = !is_vid_win(layer);
+ if (osd->vpbe_type == VPBE_VERSION_1)
+ bad_config = !is_vid_win(layer);
+ else if ((osd->vpbe_type == VPBE_VERSION_3) ||
+ (osd->vpbe_type == VPBE_VERSION_2))
+ bad_config = !is_osd_win(layer);
break;
case PIXFMT_NV12:
- bad_config = 1;
+ if (osd->vpbe_type != VPBE_VERSION_2)
+ bad_config = 1;
+ else
+ bad_config = is_osd_win(layer);
break;
case PIXFMT_OSD_ATTR:
bad_config = (layer != WIN_OSD1);
@@ -584,7 +831,8 @@ static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
/* DM6446: */
/* only one OSD window at a time can use RGB pixel formats */
- if (is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
+ if ((osd->vpbe_type == VPBE_VERSION_1) &&
+ is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
enum osd_pix_format pixfmt;
if (layer == WIN_OSD0)
pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
@@ -602,7 +850,8 @@ static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
}
/* DM6446: only one video window at a time can use RGB888 */
- if (is_vid_win(layer) && lconfig->pixfmt == PIXFMT_RGB888) {
+ if ((osd->vpbe_type == VPBE_VERSION_1) && is_vid_win(layer) &&
+ lconfig->pixfmt == PIXFMT_RGB888) {
enum osd_pix_format pixfmt;
if (layer == WIN_VID0)
@@ -652,7 +901,8 @@ static void _osd_disable_vid_rgb888(struct osd_state *sd)
* The caller must ensure that neither video window is currently
* configured for RGB888 pixel format.
*/
- osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+ if (sd->vpbe_type == VPBE_VERSION_1)
+ osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
}
static void _osd_enable_vid_rgb888(struct osd_state *sd,
@@ -665,13 +915,14 @@ static void _osd_enable_vid_rgb888(struct osd_state *sd,
* currently configured for RGB888 pixel format, as this routine will
* disable RGB888 pixel format for the other window.
*/
- if (layer == WIN_VID0) {
- osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
- OSD_MISCCTL_RGBEN, OSD_MISCCTL);
- } else if (layer == WIN_VID1) {
- osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
- OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
- OSD_MISCCTL);
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ if (layer == WIN_VID0)
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+ else if (layer == WIN_VID1)
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL);
}
}
@@ -697,9 +948,30 @@ static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
switch (layer) {
case WIN_OSD0:
- winmd_mask |= OSD_OSDWIN0MD_RGB0E;
- if (lconfig->pixfmt == PIXFMT_RGB565)
- winmd |= OSD_OSDWIN0MD_RGB0E;
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN0MD_RGB0E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN0MD_RGB0E;
+ } else if ((sd->vpbe_type == VPBE_VERSION_3) ||
+ (sd->vpbe_type == VPBE_VERSION_2)) {
+ winmd_mask |= OSD_OSDWIN0MD_BMP0MD;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_RGB565:
+ winmd |= (1 <<
+ OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ break;
+ case PIXFMT_RGB888:
+ winmd |= (2 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ _osd_enable_rgb888_pixblend(sd, OSDWIN_OSD0);
+ break;
+ case PIXFMT_YCbCrI:
+ case PIXFMT_YCrCbI:
+ winmd |= (3 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
+ break;
+ default:
+ break;
+ }
+ }
winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
@@ -749,12 +1021,59 @@ static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
* For YUV420P format the register contents are
* duplicated in both VID registers
*/
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ (lconfig->pixfmt == PIXFMT_NV12)) {
+ /* other window also */
+ if (lconfig->interlaced) {
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ winmd |= OSD_VIDWINMD_VFF1;
+ osd_modify(sd, winmd_mask, winmd,
+ OSD_VIDWINMD);
+ }
+
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ OSD_MISCCTL_S420D, OSD_MISCCTL);
+ osd_write(sd, lconfig->line_length >> 5,
+ OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * if NV21 pixfmt and line length not 32B
+ * aligned (e.g. NTSC), Need to set window
+ * X pixel size to be 32B aligned as well
+ */
+ if (lconfig->xsize % 32) {
+ osd_write(sd,
+ ((lconfig->xsize + 31) & ~31),
+ OSD_VIDWIN1XL);
+ osd_write(sd,
+ ((lconfig->xsize + 31) & ~31),
+ OSD_VIDWIN0XL);
+ }
+ } else if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ (lconfig->pixfmt != PIXFMT_NV12)) {
+ osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
+ OSD_MISCCTL);
+ }
+
if (lconfig->interlaced) {
osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos >> 1,
+ OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1,
+ OSD_VIDWIN1YL);
+ }
} else {
osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ }
}
break;
case WIN_OSD1:
@@ -764,14 +1083,43 @@ static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
* attribute mode to a normal mode.
*/
if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
- winmd_mask |=
- OSD_OSDWIN1MD_ATN1E | OSD_OSDWIN1MD_RGB1E |
- OSD_OSDWIN1MD_CLUTS1 |
- OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN1MD_ATN1E |
+ OSD_OSDWIN1MD_RGB1E | OSD_OSDWIN1MD_CLUTS1 |
+ OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+ } else {
+ winmd_mask |= OSD_OSDWIN1MD_BMP1MD |
+ OSD_OSDWIN1MD_CLUTS1 | OSD_OSDWIN1MD_BLND1 |
+ OSD_OSDWIN1MD_TE1;
+ }
} else {
- winmd_mask |= OSD_OSDWIN1MD_RGB1E;
- if (lconfig->pixfmt == PIXFMT_RGB565)
- winmd |= OSD_OSDWIN1MD_RGB1E;
+ if (sd->vpbe_type == VPBE_VERSION_1) {
+ winmd_mask |= OSD_OSDWIN1MD_RGB1E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN1MD_RGB1E;
+ } else if ((sd->vpbe_type == VPBE_VERSION_3)
+ || (sd->vpbe_type == VPBE_VERSION_2)) {
+ winmd_mask |= OSD_OSDWIN1MD_BMP1MD;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_RGB565:
+ winmd |=
+ (1 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ break;
+ case PIXFMT_RGB888:
+ winmd |=
+ (2 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ _osd_enable_rgb888_pixblend(sd,
+ OSDWIN_OSD1);
+ break;
+ case PIXFMT_YCbCrI:
+ case PIXFMT_YCrCbI:
+ winmd |=
+ (3 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
+ break;
+ default:
+ break;
+ }
+ }
winmd_mask |= OSD_OSDWIN1MD_BMW1;
switch (lconfig->pixfmt) {
@@ -822,15 +1170,45 @@ static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
* For YUV420P format the register contents are
* duplicated in both VID registers
*/
- osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
- OSD_MISCCTL);
+ if (sd->vpbe_type == VPBE_VERSION_2) {
+ if (lconfig->pixfmt == PIXFMT_NV12) {
+ /* other window also */
+ if (lconfig->interlaced) {
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ winmd |= OSD_VIDWINMD_VFF0;
+ osd_modify(sd, winmd_mask, winmd,
+ OSD_VIDWINMD);
+ }
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ OSD_MISCCTL_S420D, OSD_MISCCTL);
+ osd_write(sd, lconfig->line_length >> 5,
+ OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ } else {
+ osd_modify(sd, OSD_MISCCTL_S420D,
+ ~OSD_MISCCTL_S420D, OSD_MISCCTL);
+ }
+ }
if (lconfig->interlaced) {
osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos >> 1,
+ OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1,
+ OSD_VIDWIN0YL);
+ }
} else {
osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ if ((sd->vpbe_type == VPBE_VERSION_2) &&
+ lconfig->pixfmt == PIXFMT_NV12) {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ }
}
break;
}
@@ -1089,6 +1467,11 @@ static void _osd_init(struct osd_state *sd)
osd_write(sd, 0, OSD_OSDWIN1MD);
osd_write(sd, 0, OSD_RECTCUR);
osd_write(sd, 0, OSD_MISCCTL);
+ if (sd->vpbe_type == VPBE_VERSION_3) {
+ osd_write(sd, 0, OSD_VBNDRY);
+ osd_write(sd, 0, OSD_EXTMODE);
+ osd_write(sd, OSD_MISCCTL_DMANG, OSD_MISCCTL);
+ }
}
static void osd_set_left_margin(struct osd_state *sd, u32 val)
@@ -1110,6 +1493,14 @@ static int osd_initialize(struct osd_state *osd)
/* set default Cb/Cr order */
osd->yc_pixfmt = PIXFMT_YCbCrI;
+ if (osd->vpbe_type == VPBE_VERSION_3) {
+ /*
+ * ROM CLUT1 on the DM355 is similar (identical?) to ROM CLUT0
+ * on the DM6446, so make ROM_CLUT1 the default on the DM355.
+ */
+ osd->rom_clut = ROM_CLUT1;
+ }
+
_osd_set_field_inversion(osd, osd->field_inversion);
_osd_set_rom_clut(osd, osd->rom_clut);
@@ -1208,23 +1599,7 @@ static struct platform_driver osd_driver = {
},
};
-static int osd_init(void)
-{
- if (platform_driver_register(&osd_driver)) {
- printk(KERN_ERR "Unable to register davinci osd driver\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void osd_exit(void)
-{
- platform_driver_unregister(&osd_driver);
-}
-
-module_init(osd_init);
-module_exit(osd_exit);
+module_platform_driver(osd_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
diff --git a/drivers/media/video/davinci/vpbe_venc.c b/drivers/media/video/davinci/vpbe_venc.c
index 03a3e5c65ee7..00e80f59d5d5 100644
--- a/drivers/media/video/davinci/vpbe_venc.c
+++ b/drivers/media/video/davinci/vpbe_venc.c
@@ -99,6 +99,8 @@ static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
return val;
}
+#define VDAC_COMPONENT 0x543
+#define VDAC_S_VIDEO 0x210
/* This function sets the dac of the VPBE for various outputs
*/
static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
@@ -109,11 +111,12 @@ static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
venc_write(sd, VENC_DACSEL, 0);
break;
case 1:
- v4l2_dbg(debug, 1, sd, "Setting output to S-Video\n");
- venc_write(sd, VENC_DACSEL, 0x210);
+ v4l2_dbg(debug, 1, sd, "Setting output to Component\n");
+ venc_write(sd, VENC_DACSEL, VDAC_COMPONENT);
break;
- case 2:
- venc_write(sd, VENC_DACSEL, 0x543);
+ case 2:
+ v4l2_dbg(debug, 1, sd, "Setting output to S-video\n");
+ venc_write(sd, VENC_DACSEL, VDAC_S_VIDEO);
break;
default:
return -EINVAL;
@@ -124,6 +127,8 @@ static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
if (benable) {
@@ -155,7 +160,8 @@ static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
/* Disable LCD output control (accepting default polarity) */
venc_write(sd, VENC_LCDOUT, 0);
- venc_write(sd, VENC_CMPNT, 0x100);
+ if (pdata->venc_type != VPBE_VERSION_3)
+ venc_write(sd, VENC_CMPNT, 0x100);
venc_write(sd, VENC_HSPLS, 0);
venc_write(sd, VENC_HINT, 0);
venc_write(sd, VENC_HSTART, 0);
@@ -178,11 +184,14 @@ static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
}
}
+#define VDAC_CONFIG_SD_V3 0x0E21A6B6
+#define VDAC_CONFIG_SD_V2 0x081141CF
/*
* setting NTSC mode
*/
static int venc_set_ntsc(struct v4l2_subdev *sd)
{
+ u32 val;
struct venc_state *venc = to_state(sd);
struct venc_platform_data *pdata = venc->pdata;
@@ -195,12 +204,22 @@ static int venc_set_ntsc(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
- venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
- /* Set REC656 Mode */
- venc_write(sd, VENC_YCCCTL, 0x1);
- venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
- venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+ if (pdata->venc_type == VPBE_VERSION_3) {
+ venc_write(sd, VENC_CLKCTL, 0x01);
+ venc_write(sd, VENC_VIDCTL, 0);
+ val = vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+ } else if (pdata->venc_type == VPBE_VERSION_2) {
+ venc_write(sd, VENC_CLKCTL, 0x01);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+ } else {
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+ }
venc_write(sd, VENC_VMOD, 0);
venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
@@ -220,6 +239,7 @@ static int venc_set_ntsc(struct v4l2_subdev *sd)
static int venc_set_pal(struct v4l2_subdev *sd)
{
struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
@@ -230,10 +250,20 @@ static int venc_set_pal(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
- /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
- venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
- /* Set REC656 Mode */
- venc_write(sd, VENC_YCCCTL, 0x1);
+ if (pdata->venc_type == VPBE_VERSION_3) {
+ venc_write(sd, VENC_CLKCTL, 0x1);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
+ } else if (pdata->venc_type == VPBE_VERSION_2) {
+ venc_write(sd, VENC_CLKCTL, 0x1);
+ venc_write(sd, VENC_VIDCTL, 0);
+ vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
+ } else {
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ }
venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
VENC_SYNCCTL_OVD);
@@ -252,6 +282,7 @@ static int venc_set_pal(struct v4l2_subdev *sd)
return 0;
}
+#define VDAC_CONFIG_HD_V2 0x081141EF
/*
* venc_set_480p59_94
*
@@ -263,6 +294,9 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
struct venc_platform_data *pdata = venc->pdata;
v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
+ if ((pdata->venc_type != VPBE_VERSION_1) &&
+ (pdata->venc_type != VPBE_VERSION_2))
+ return -EINVAL;
/* Setup clock at VPSS & VENC for SD */
if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_480P59_94) < 0)
@@ -270,12 +304,18 @@ static int venc_set_480p59_94(struct v4l2_subdev *sd)
venc_enabledigitaloutput(sd, 0);
+ if (pdata->venc_type == VPBE_VERSION_2)
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
venc_write(sd, VENC_OSDCLK0, 0);
venc_write(sd, VENC_OSDCLK1, 1);
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
- VENC_VDPRO_DAFRQ);
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
- VENC_VDPRO_DAUPS);
+
+ if (pdata->venc_type == VPBE_VERSION_1) {
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ }
+
venc_write(sd, VENC_VMOD, 0);
venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
VENC_VMOD_VIE);
@@ -302,19 +342,27 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
+ if ((pdata->venc_type != VPBE_VERSION_1) &&
+ (pdata->venc_type != VPBE_VERSION_2))
+ return -EINVAL;
/* Setup clock at VPSS & VENC for SD */
if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_576P50) < 0)
return -EINVAL;
venc_enabledigitaloutput(sd, 0);
+ if (pdata->venc_type == VPBE_VERSION_2)
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+
venc_write(sd, VENC_OSDCLK0, 0);
venc_write(sd, VENC_OSDCLK1, 1);
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
- VENC_VDPRO_DAFRQ);
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
- VENC_VDPRO_DAUPS);
+ if (pdata->venc_type == VPBE_VERSION_1) {
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ }
venc_write(sd, VENC_VMOD, 0);
venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
@@ -330,6 +378,63 @@ static int venc_set_576p50(struct v4l2_subdev *sd)
return 0;
}
+/*
+ * venc_set_720p60_internal - Setup 720p60 in venc for dm365 only
+ */
+static int venc_set_720p60_internal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_720P60) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ venc_write(sd, VENC_VMOD, 0);
+ /* DM365 component HD mode */
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_720P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+ venc_write(sd, VENC_XHINTVL, 0);
+ return 0;
+}
+
+/*
+ * venc_set_1080i30_internal - Setup 1080i30 in venc for dm365 only
+ */
+static int venc_set_1080i30_internal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_1080P30) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+
+ venc_write(sd, VENC_VMOD, 0);
+ /* DM365 component HD mode */
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_1080I << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+ venc_write(sd, VENC_XHINTVL, 0);
+ return 0;
+}
+
static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
{
v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
@@ -345,13 +450,30 @@ static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
static int venc_s_dv_preset(struct v4l2_subdev *sd,
struct v4l2_dv_preset *dv_preset)
{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
v4l2_dbg(debug, 1, sd, "venc_s_dv_preset\n");
if (dv_preset->preset == V4L2_DV_576P50)
return venc_set_576p50(sd);
else if (dv_preset->preset == V4L2_DV_480P59_94)
return venc_set_480p59_94(sd);
-
+ else if ((dv_preset->preset == V4L2_DV_720P60) &&
+ (venc->pdata->venc_type == VPBE_VERSION_2)) {
+ /* TBD setup internal 720p mode here */
+ ret = venc_set_720p60_internal(sd);
+ /* for DM365 VPBE, there is DAC inside */
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ return ret;
+ } else if ((dv_preset->preset == V4L2_DV_1080I30) &&
+ (venc->pdata->venc_type == VPBE_VERSION_2)) {
+ /* TBD setup internal 1080i mode here */
+ ret = venc_set_1080i30_internal(sd);
+ /* for DM365 VPBE, there is DAC inside */
+ vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
+ return ret;
+ }
return -EINVAL;
}
@@ -508,11 +630,41 @@ static int venc_probe(struct platform_device *pdev)
goto release_venc_mem_region;
}
+ if (venc->pdata->venc_type != VPBE_VERSION_1) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(venc->pdev,
+ "Unable to get VDAC_CONFIG address map\n");
+ ret = -ENODEV;
+ goto unmap_venc_io;
+ }
+
+ if (!request_mem_region(res->start,
+ resource_size(res), "venc")) {
+ dev_err(venc->pdev,
+ "Unable to reserve VDAC_CONFIG MMIO region\n");
+ ret = -ENODEV;
+ goto unmap_venc_io;
+ }
+
+ venc->vdaccfg_reg = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!venc->vdaccfg_reg) {
+ dev_err(venc->pdev,
+ "Unable to map VDAC_CONFIG IO space\n");
+ ret = -ENODEV;
+ goto release_vdaccfg_mem_region;
+ }
+ }
spin_lock_init(&venc->lock);
platform_set_drvdata(pdev, venc);
dev_notice(venc->pdev, "VENC sub device probe success\n");
return 0;
+release_vdaccfg_mem_region:
+ release_mem_region(res->start, resource_size(res));
+unmap_venc_io:
+ iounmap(venc->venc_base);
release_venc_mem_region:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -529,6 +681,11 @@ static int venc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap((void *)venc->venc_base);
release_mem_region(res->start, resource_size(res));
+ if (venc->pdata->venc_type != VPBE_VERSION_1) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ iounmap((void *)venc->vdaccfg_reg);
+ release_mem_region(res->start, resource_size(res));
+ }
kfree(venc);
return 0;
@@ -543,23 +700,7 @@ static struct platform_driver venc_driver = {
},
};
-static int venc_init(void)
-{
- if (platform_driver_register(&venc_driver)) {
- printk(KERN_ERR "Unable to register venc driver\n");
- return -ENODEV;
- }
- return 0;
-}
-
-static void venc_exit(void)
-{
- platform_driver_unregister(&venc_driver);
- return;
-}
-
-module_init(venc_init);
-module_exit(venc_exit);
+module_platform_driver(venc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VPBE VENC Driver");
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 5b38fc93ff28..20cf271a774b 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -2076,20 +2076,4 @@ static struct platform_driver vpfe_driver = {
.remove = __devexit_p(vpfe_remove),
};
-static __init int vpfe_init(void)
-{
- printk(KERN_NOTICE "vpfe_init\n");
- /* Register driver to the kernel */
- return platform_driver_register(&vpfe_driver);
-}
-
-/*
- * vpfe_cleanup : This function un-registers device driver
- */
-static void vpfe_cleanup(void)
-{
- platform_driver_unregister(&vpfe_driver);
-}
-
-module_init(vpfe_init);
-module_exit(vpfe_cleanup);
+module_platform_driver(vpfe_driver);
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 10550bd93b06..25036cb11bea 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -20,6 +20,7 @@
#include <linux/videodev2.h>
#include <mach/hardware.h>
#include <mach/dm646x.h>
+#include <media/davinci/vpif_types.h>
/* Maximum channel allowed */
#define VPIF_NUM_CHANNELS (4)
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 49e4deb50043..6504e40a31dd 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2177,6 +2177,12 @@ static __init int vpif_probe(struct platform_device *pdev)
return err;
}
+ err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+ return err;
+ }
+
k = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
@@ -2246,12 +2252,6 @@ static __init int vpif_probe(struct platform_device *pdev)
goto probe_out;
}
- err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
- if (err) {
- v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
- goto probe_subdev_out;
- }
-
for (i = 0; i < subdev_count; i++) {
subdevdata = &config->subdev_info[i];
vpif_obj.sd[i] =
@@ -2281,7 +2281,6 @@ probe_subdev_out:
j = VPIF_CAPTURE_MAX_DEVICES;
probe_out:
- v4l2_device_unregister(&vpif_obj.v4l2_dev);
for (k = 0; k < j; k++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[k];
@@ -2303,6 +2302,7 @@ vpif_int_err:
if (res)
i = res->end;
}
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
return err;
}
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 064550f5ce4a..a693d4ebda55 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -27,7 +27,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
-#include <mach/dm646x.h>
+#include <media/davinci/vpif_types.h>
#include "vpif.h"
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index 5d1936dafed2..56879d1a0684 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -22,6 +22,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
+#include <media/davinci/vpif_types.h>
#include "vpif.h"
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index cff0768afbf5..e2a7b77c39c7 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -193,7 +193,7 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
urb->dev = dev->udev;
urb->context = dev;
- urb->pipe = usb_rcvisocpipe(dev->udev, 0x83);
+ urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 93807dcf944e..4561cd89938d 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -336,6 +336,23 @@ static struct em28xx_reg_seq pctv_460e[] = {
{ -1, -1, -1, -1},
};
+#if 0
+static struct em28xx_reg_seq hauppauge_930c_gpio[] = {
+ {EM2874_R80_GPIO, 0x6f, 0xff, 10},
+ {EM2874_R80_GPIO, 0x4f, 0xff, 10}, /* xc5000 reset */
+ {EM2874_R80_GPIO, 0x6f, 0xff, 10},
+ {EM2874_R80_GPIO, 0x4f, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq hauppauge_930c_digital[] = {
+ {EM2874_R80_GPIO, 0xf6, 0xff, 10},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+#endif
+
/*
* Board definitions
*/
@@ -839,6 +856,10 @@ struct em28xx_board em28xx_boards[] = {
[EM2870_BOARD_KWORLD_355U] = {
.name = "Kworld 355 U DVB-T",
.valid = EM28XX_BOARD_NOT_VALIDATED,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = default_tuner_gpio,
+ .has_dvb = 1,
+ .dvb_gpio = default_digital,
},
[EM2870_BOARD_PINNACLE_PCTV_DVB] = {
.name = "Pinnacle PCTV DVB-T",
@@ -887,6 +908,37 @@ struct em28xx_board em28xx_boards[] = {
.tuner_addr = 0x41,
.dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
.tuner_gpio = terratec_h5_gpio,
+#else
+ .tuner_type = TUNER_ABSENT,
+#endif
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
+ [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = {
+ .name = "Hauppauge WinTV HVR 930C",
+ .has_dvb = 1,
+#if 0 /* FIXME: Add analog support */
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x41,
+ .dvb_gpio = hauppauge_930c_digital,
+ .tuner_gpio = hauppauge_930c_gpio,
+#else
+ .tuner_type = TUNER_ABSENT,
+#endif
+ .ir_codes = RC_MAP_HAUPPAUGE,
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
+ [EM2884_BOARD_CINERGY_HTC_STICK] = {
+ .name = "Terratec Cinergy HTC Stick",
+ .has_dvb = 1,
+#if 0
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x41,
+ .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
+ .tuner_gpio = terratec_h5_gpio,
#endif
.i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
EM28XX_I2C_CLK_WAIT_ENABLE |
@@ -1127,7 +1179,7 @@ struct em28xx_board em28xx_boards[] = {
.name = "Terratec Cinergy 200 USB",
.is_em2800 = 1,
.has_ir_i2c = 1,
- .tuner_type = TUNER_LG_PAL_NEW_TAPC,
+ .tuner_type = TUNER_LG_TALN,
.tda9887_conf = TDA9887_PRESENT,
.decoder = EM28XX_SAA711X,
.input = { {
@@ -1218,7 +1270,7 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2820_BOARD_PINNACLE_DVC_90] = {
.name = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker "
- "/ Kworld DVD Maker 2",
+ "/ Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
.tuner_type = TUNER_ABSENT, /* capture only board */
.decoder = EM28XX_SAA711X,
.input = { {
@@ -1840,6 +1892,22 @@ struct em28xx_board em28xx_boards[] = {
.has_dvb = 1,
.ir_codes = RC_MAP_PINNACLE_PCTV_HD,
},
+ /* eb1a:5006 Honestech VIDBOX NW03
+ * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner */
+ [EM2860_BOARD_HT_VIDBOX_NW03] = {
+ .name = "Honestech Vidbox NW03",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = SAA7115_SVIDEO3, /* S-VIDEO needs confirming */
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1899,6 +1967,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2800_BOARD_GRABBEEX_USB2800 },
{ USB_DEVICE(0xeb1a, 0xe357),
.driver_info = EM2870_BOARD_KWORLD_355U },
+ { USB_DEVICE(0xeb1a, 0xe359),
+ .driver_info = EM2870_BOARD_KWORLD_355U },
{ USB_DEVICE(0x1b80, 0xe302),
.driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, /* Kaiser Baas Video to DVD maker */
{ USB_DEVICE(0x1b80, 0xe304),
@@ -1914,17 +1984,23 @@ struct usb_device_id em28xx_id_table[] = {
{ USB_DEVICE(0x0ccd, 0x0042),
.driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS },
{ USB_DEVICE(0x0ccd, 0x0043),
+ .driver_info = EM2870_BOARD_TERRATEC_XS },
+ { USB_DEVICE(0x0ccd, 0x008e), /* Cinergy HTC USB XS Rev. 1 */
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x00ac), /* Cinergy HTC USB XS Rev. 2 */
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10a2), /* H5 Rev. 1 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
- { USB_DEVICE(0x0ccd, 0x10a2), /* Rev. 1 */
+ { USB_DEVICE(0x0ccd, 0x10ad), /* H5 Rev. 2 */
.driver_info = EM2884_BOARD_TERRATEC_H5 },
- { USB_DEVICE(0x0ccd, 0x10ad), /* Rev. 2 */
- .driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS },
{ USB_DEVICE(0x0ccd, 0x0084),
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
{ USB_DEVICE(0x0ccd, 0x0096),
.driver_info = EM2860_BOARD_TERRATEC_GRABBY },
{ USB_DEVICE(0x0ccd, 0x10AF),
.driver_info = EM2860_BOARD_TERRATEC_GRABBY },
+ { USB_DEVICE(0x0ccd, 0x00b2),
+ .driver_info = EM2884_BOARD_CINERGY_HTC_STICK },
{ USB_DEVICE(0x0fd9, 0x0033),
.driver_info = EM2860_BOARD_ELGATO_VIDEO_CAPTURE},
{ USB_DEVICE(0x185b, 0x2870),
@@ -1963,6 +2039,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2880_BOARD_PINNACLE_PCTV_HD_PRO },
{ USB_DEVICE(0x0413, 0x6023),
.driver_info = EM2800_BOARD_LEADTEK_WINFAST_USBII },
+ { USB_DEVICE(0x093b, 0xa003),
+ .driver_info = EM2820_BOARD_PINNACLE_DVC_90 },
{ USB_DEVICE(0x093b, 0xa005),
.driver_info = EM2861_BOARD_PLEXTOR_PX_TV100U },
{ USB_DEVICE(0x04bb, 0x0515),
@@ -1975,6 +2053,12 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28174_BOARD_PCTV_290E },
{ USB_DEVICE(0x2013, 0x024c),
.driver_info = EM28174_BOARD_PCTV_460E },
+ { USB_DEVICE(0x2040, 0x1605),
+ .driver_info = EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C },
+ { USB_DEVICE(0xeb1a, 0x5006),
+ .driver_info = EM2860_BOARD_HT_VIDBOX_NW03 },
+ { USB_DEVICE(0x1b80, 0xe309), /* Sveon STV40 */
+ .driver_info = EM2860_BOARD_EASYCAP },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2028,10 +2112,10 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg)
int rc = 0;
struct em28xx *dev = ptr;
- if (dev->tuner_type != TUNER_XC2028)
+ if (dev->tuner_type != TUNER_XC2028 && dev->tuner_type != TUNER_XC5000)
return 0;
- if (command != XC2028_TUNER_RESET)
+ if (command != XC2028_TUNER_RESET && command != XC5000_TUNER_RESET)
return 0;
rc = em28xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -2203,7 +2287,8 @@ void em28xx_pre_card_setup(struct em28xx *dev)
/* Set the initial XCLK and I2C clock values based on the board
definition */
em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk & 0x7f);
- em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
+ if (!dev->board.is_em2800)
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
msleep(50);
/* request some modules */
@@ -2832,11 +2917,10 @@ void em28xx_release_resources(struct em28xx *dev)
* em28xx_init_dev()
* allocates and inits the device structs, registers i2c bus and v4l device
*/
-static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
+static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
struct usb_interface *interface,
int minor)
{
- struct em28xx *dev = *devhandle;
int retval;
dev->udev = udev;
@@ -2931,7 +3015,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
if (!dev->board.is_em2800) {
/* Resets I2C speed */
- em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
+ retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
if (retval < 0) {
em28xx_errdev("%s: em28xx_write_reg failed!"
" retval [%d]\n",
@@ -3031,12 +3115,11 @@ unregister_dev:
static int em28xx_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- const struct usb_endpoint_descriptor *endpoint;
struct usb_device *udev;
struct em28xx *dev = NULL;
int retval;
- bool is_audio_only = false, has_audio = false;
- int i, nr, isoc_pipe;
+ bool has_audio = false, has_video = false, has_dvb = false;
+ int i, nr;
const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
char *speed;
char descr[255] = "";
@@ -3068,54 +3151,65 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err;
}
+ /* allocate memory for our device state and initialize it */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ em28xx_err(DRIVER_NAME ": out of memory!\n");
+ retval = -ENOMEM;
+ goto err;
+ }
+
+ /* compute alternate max packet sizes */
+ dev->alt_max_pkt_size = kmalloc(sizeof(dev->alt_max_pkt_size[0]) *
+ interface->num_altsetting, GFP_KERNEL);
+ if (dev->alt_max_pkt_size == NULL) {
+ em28xx_errdev("out of memory!\n");
+ kfree(dev);
+ retval = -ENOMEM;
+ goto err;
+ }
+
/* Get endpoints */
for (i = 0; i < interface->num_altsetting; i++) {
int ep;
for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
- struct usb_host_endpoint *e;
- e = &interface->altsetting[i].endpoint[ep];
-
- if (e->desc.bEndpointAddress == 0x83)
- has_audio = true;
+ const struct usb_endpoint_descriptor *e;
+ int sizedescr, size;
+
+ e = &interface->altsetting[i].endpoint[ep].desc;
+
+ sizedescr = le16_to_cpu(e->wMaxPacketSize);
+ size = sizedescr & 0x7ff;
+
+ if (udev->speed == USB_SPEED_HIGH)
+ size = size * hb_mult(sizedescr);
+
+ if (usb_endpoint_xfer_isoc(e) &&
+ usb_endpoint_dir_in(e)) {
+ switch (e->bEndpointAddress) {
+ case EM28XX_EP_AUDIO:
+ has_audio = true;
+ break;
+ case EM28XX_EP_ANALOG:
+ has_video = true;
+ dev->alt_max_pkt_size[i] = size;
+ break;
+ case EM28XX_EP_DIGITAL:
+ has_dvb = true;
+ if (size > dev->dvb_max_pkt_size) {
+ dev->dvb_max_pkt_size = size;
+ dev->dvb_alt = i;
+ }
+ break;
+ }
+ }
}
}
- endpoint = &interface->cur_altsetting->endpoint[0].desc;
-
- /* check if the device has the iso in endpoint at the correct place */
- if (usb_endpoint_xfer_isoc(endpoint)
- &&
- (interface->altsetting[1].endpoint[0].desc.wMaxPacketSize == 940)) {
- /* It's a newer em2874/em2875 device */
- isoc_pipe = 0;
- } else {
- int check_interface = 1;
- isoc_pipe = 1;
- endpoint = &interface->cur_altsetting->endpoint[1].desc;
- if (!usb_endpoint_xfer_isoc(endpoint))
- check_interface = 0;
-
- if (usb_endpoint_dir_out(endpoint))
- check_interface = 0;
-
- if (!check_interface) {
- if (has_audio) {
- is_audio_only = true;
- } else {
- em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
- "interface %i, class %i found.\n",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct),
- ifnum,
- interface->altsetting[0].desc.bInterfaceClass);
- em28xx_err(DRIVER_NAME " This is an anciliary "
- "interface not used by the driver\n");
-
- retval = -ENODEV;
- goto err;
- }
- }
+ if (!(has_audio || has_video || has_dvb)) {
+ retval = -ENODEV;
+ goto err_free;
}
switch (udev->speed) {
@@ -3141,6 +3235,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
strlcat(descr, " ", sizeof(descr));
strlcat(descr, udev->product, sizeof(descr));
}
+
if (*descr)
strlcat(descr, " ", sizeof(descr));
@@ -3157,6 +3252,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
printk(KERN_INFO DRIVER_NAME
": Audio Vendor Class interface %i found\n",
ifnum);
+ if (has_video)
+ printk(KERN_INFO DRIVER_NAME
+ ": Video interface %i found\n",
+ ifnum);
+ if (has_dvb)
+ printk(KERN_INFO DRIVER_NAME
+ ": DVB interface %i found\n",
+ ifnum);
/*
* Make sure we have 480 Mbps of bandwidth, otherwise things like
@@ -3168,22 +3271,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
printk(DRIVER_NAME ": Device must be connected to a high-speed"
" USB 2.0 port.\n");
retval = -ENODEV;
- goto err;
- }
-
- /* allocate memory for our device state and initialize it */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- em28xx_err(DRIVER_NAME ": out of memory!\n");
- retval = -ENOMEM;
- goto err;
+ goto err_free;
}
snprintf(dev->name, sizeof(dev->name), "em28xx #%d", nr);
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
- dev->is_audio_only = is_audio_only;
+ dev->is_audio_only = has_audio && !(has_video || has_dvb);
dev->has_alsa_audio = has_audio;
dev->audio_ifnum = ifnum;
@@ -3196,26 +3291,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
}
- /* compute alternate max packet sizes */
dev->num_alt = interface->num_altsetting;
- dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL);
-
- if (dev->alt_max_pkt_size == NULL) {
- em28xx_errdev("out of memory!\n");
- kfree(dev);
- retval = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < dev->num_alt ; i++) {
- u16 tmp = le16_to_cpu(interface->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
- unsigned int size = tmp & 0x7ff;
-
- if (udev->speed == USB_SPEED_HIGH)
- size = size * hb_mult(tmp);
-
- dev->alt_max_pkt_size[i] = size;
- }
if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
dev->model = card[nr];
@@ -3226,12 +3302,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* allocate device struct */
mutex_init(&dev->lock);
mutex_lock(&dev->lock);
- retval = em28xx_init_dev(&dev, udev, interface, nr);
+ retval = em28xx_init_dev(dev, udev, interface, nr);
if (retval) {
- mutex_unlock(&dev->lock);
- kfree(dev->alt_max_pkt_size);
- kfree(dev);
- goto err;
+ goto unlock_and_free;
}
request_modules(dev);
@@ -3250,6 +3323,13 @@ static int em28xx_usb_probe(struct usb_interface *interface,
return 0;
+unlock_and_free:
+ mutex_unlock(&dev->lock);
+
+err_free:
+ kfree(dev->alt_max_pkt_size);
+ kfree(dev);
+
err:
clear_bit(nr, &em28xx_devused);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 804a4ab47ac6..0aacc96f9a23 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -568,7 +568,7 @@ int em28xx_audio_setup(struct em28xx *dev)
em28xx_warn("AC97 features = 0x%04x\n", feat);
/* Try to identify what audio processor we have */
- if ((vid == 0xffffffff) && (feat == 0x6a90))
+ if (((vid == 0xffffffff) || (vid == 0x83847650)) && (feat == 0x6a90))
dev->audio_mode.ac97 = EM28XX_AC97_EM202;
else if ((vid >> 8) == 0x838476)
dev->audio_mode.ac97 = EM28XX_AC97_SIGMATEL;
@@ -1070,7 +1070,8 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
should also be using 'desc.bInterval'
*/
pipe = usb_rcvisocpipe(dev->udev,
- dev->mode == EM28XX_ANALOG_MODE ? 0x82 : 0x84);
+ dev->mode == EM28XX_ANALOG_MODE ?
+ EM28XX_EP_ANALOG : EM28XX_EP_DIGITAL);
usb_fill_int_urb(urb, dev->udev, pipe,
dev->isoc_ctl.transfer_buffer[i], sb_size,
@@ -1108,62 +1109,6 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
}
EXPORT_SYMBOL_GPL(em28xx_init_isoc);
-/* Determine the packet size for the DVB stream for the given device
- (underlying value programmed into the eeprom) */
-int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
-{
- unsigned int chip_cfg2;
- unsigned int packet_size;
-
- switch (dev->chip_id) {
- case CHIP_ID_EM2710:
- case CHIP_ID_EM2750:
- case CHIP_ID_EM2800:
- case CHIP_ID_EM2820:
- case CHIP_ID_EM2840:
- case CHIP_ID_EM2860:
- /* No DVB support */
- return -EINVAL;
- case CHIP_ID_EM2870:
- case CHIP_ID_EM2883:
- /* TS max packet size stored in bits 1-0 of R01 */
- chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2);
- switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) {
- case EM28XX_CHIPCFG2_TS_PACKETSIZE_188:
- packet_size = 188;
- break;
- case EM28XX_CHIPCFG2_TS_PACKETSIZE_376:
- packet_size = 376;
- break;
- case EM28XX_CHIPCFG2_TS_PACKETSIZE_564:
- packet_size = 564;
- break;
- case EM28XX_CHIPCFG2_TS_PACKETSIZE_752:
- packet_size = 752;
- break;
- }
- break;
- case CHIP_ID_EM2874:
- /*
- * FIXME: for now assumes 564 like it was before, but the
- * em2874 code should be added to return the proper value
- */
- packet_size = 564;
- break;
- case CHIP_ID_EM2884:
- case CHIP_ID_EM28174:
- default:
- /*
- * FIXME: same as em2874. 564 was enough for 22 Mbit DVB-T
- * but not enough for 44 Mbit DVB-C.
- */
- packet_size = 752;
- }
-
- return packet_size;
-}
-EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize);
-
/*
* em28xx_wake_i2c()
* configure i2c attached devices
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index cef7a2d409cb..9449423098e0 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -44,6 +44,7 @@
#include "drxk.h"
#include "tda10071.h"
#include "a8293.h"
+#include "qt1010.h"
MODULE_DESCRIPTION("driver for em28xx based DVB cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
@@ -163,12 +164,12 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
struct em28xx *dev = dvb->adapter.priv;
int max_dvb_packet_size;
- usb_set_interface(dev->udev, 0, 1);
+ usb_set_interface(dev->udev, 0, dev->dvb_alt);
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
- max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev);
+ max_dvb_packet_size = dev->dvb_max_pkt_size;
if (max_dvb_packet_size < 0)
return max_dvb_packet_size;
dprintk(1, "Using %d buffers each with %d bytes\n",
@@ -302,10 +303,12 @@ static struct zl10353_config em28xx_zl10353_xc3028_no_i2c_gate = {
};
static struct drxd_config em28xx_drxd = {
- .index = 0, .demod_address = 0x70, .demod_revision = 0xa2,
- .demoda_address = 0x00, .pll_address = 0x00,
- .pll_type = DRXD_PLL_NONE, .clock = 12000, .insert_rs_byte = 1,
- .pll_set = NULL, .osc_deviation = NULL, .IF = 42800000,
+ .demod_address = 0x70,
+ .demod_revision = 0xa2,
+ .pll_type = DRXD_PLL_NONE,
+ .clock = 12000,
+ .insert_rs_byte = 1,
+ .IF = 42800000,
.disable_i2c_gate_ctrl = 1,
};
@@ -316,6 +319,14 @@ struct drxk_config terratec_h5_drxk = {
.microcode_name = "dvb-usb-terratec-h5-drxk.fw",
};
+struct drxk_config hauppauge_930c_drxk = {
+ .adr = 0x29,
+ .single_master = 1,
+ .no_i2c_bridge = 1,
+ .microcode_name = "dvb-usb-hauppauge-hvr930c-drxk.fw",
+ .chunk_size = 56,
+};
+
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct em28xx_dvb *dvb = fe->sec_priv;
@@ -334,6 +345,73 @@ static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
return status;
}
+static void hauppauge_hvr930c_init(struct em28xx *dev)
+{
+ int i;
+
+ struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
+ {EM2874_R80_GPIO, 0xff, 0xff, 0x65},
+ {EM2874_R80_GPIO, 0xfb, 0xff, 0x32},
+ {EM2874_R80_GPIO, 0xff, 0xff, 0xb8},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
+ {EM2874_R80_GPIO, 0xef, 0xff, 0x01},
+ {EM2874_R80_GPIO, 0xaf, 0xff, 0x65},
+ {EM2874_R80_GPIO, 0xef, 0xff, 0x76},
+ {EM2874_R80_GPIO, 0xef, 0xff, 0x01},
+ {EM2874_R80_GPIO, 0xcf, 0xff, 0x0b},
+ {EM2874_R80_GPIO, 0xef, 0xff, 0x40},
+
+ {EM2874_R80_GPIO, 0xcf, 0xff, 0x65},
+ {EM2874_R80_GPIO, 0xef, 0xff, 0x65},
+ {EM2874_R80_GPIO, 0xcf, 0xff, 0x0b},
+ {EM2874_R80_GPIO, 0xef, 0xff, 0x65},
+
+ { -1, -1, -1, -1},
+ };
+
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
+ {{ 0x04, 0x00 }, 2},
+ {{ 0x00, 0x04 }, 2},
+ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
+ {{ 0x04, 0x14 }, 2},
+ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
+ };
+
+ em28xx_gpio_set(dev, hauppauge_hvr930c_init);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+ em28xx_gpio_set(dev, hauppauge_hvr930c_end);
+
+ msleep(100);
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
+ msleep(30);
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x45);
+ msleep(10);
+
+}
+
static void terratec_h5_init(struct em28xx *dev)
{
int i;
@@ -425,13 +503,6 @@ static struct tda10023_config em28xx_tda10023_config = {
static struct cxd2820r_config em28xx_cxd2820r_config = {
.i2c_address = (0xd8 >> 1),
.ts_mode = CXD2820R_TS_SERIAL,
- .if_dvbt_6 = 3300,
- .if_dvbt_7 = 3500,
- .if_dvbt_8 = 4000,
- .if_dvbt2_6 = 3300,
- .if_dvbt2_7 = 3500,
- .if_dvbt2_8 = 4000,
- .if_dvbc = 5000,
/* enable LNA for DVB-T2 and DVB-C */
.gpio_dvbt2[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
@@ -456,6 +527,17 @@ static const struct a8293_config em28xx_a8293_config = {
.i2c_addr = 0x08, /* (0x10 >> 1) */
};
+static struct zl10353_config em28xx_zl10353_no_i2c_gate_dev = {
+ .demod_address = (0x1e >> 1),
+ .disable_i2c_gate_ctrl = 1,
+ .no_tuner = 1,
+ .parallel_ts = 1,
+};
+static struct qt1010_config em28xx_qt1010_config = {
+ .i2c_address = 0x62
+
+};
+
/* ------------------------------------------------------------------ */
static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
@@ -708,6 +790,14 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2870_BOARD_KWORLD_355U:
+ dvb->fe[0] = dvb_attach(zl10353_attach,
+ &em28xx_zl10353_no_i2c_gate_dev,
+ &dev->i2c_adap);
+ if (dvb->fe[0] != NULL)
+ dvb_attach(qt1010_attach, dvb->fe[0],
+ &dev->i2c_adap, &em28xx_qt1010_config);
+ break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
case EM2882_BOARD_EVGA_INDTUBE:
dvb->fe[0] = dvb_attach(s5h1409_attach,
@@ -761,50 +851,72 @@ static int em28xx_dvb_init(struct em28xx *dev)
&dev->i2c_adap, &kworld_a340_config);
break;
case EM28174_BOARD_PCTV_290E:
- /* MFE
- * FE 0 = DVB-T/T2 + FE 1 = DVB-C, both sharing same tuner. */
- /* FE 0 */
dvb->fe[0] = dvb_attach(cxd2820r_attach,
- &em28xx_cxd2820r_config, &dev->i2c_adap, NULL);
+ &em28xx_cxd2820r_config,
+ &dev->i2c_adap,
+ NULL);
if (dvb->fe[0]) {
/* FE 0 attach tuner */
- if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
- &dev->i2c_adap, &em28xx_cxd2820r_tda18271_config)) {
+ if (!dvb_attach(tda18271_attach,
+ dvb->fe[0],
+ 0x60,
+ &dev->i2c_adap,
+ &em28xx_cxd2820r_tda18271_config)) {
+
dvb_frontend_detach(dvb->fe[0]);
result = -EINVAL;
goto out_free;
}
- /* FE 1. This dvb_attach() cannot fail. */
- dvb->fe[1] = dvb_attach(cxd2820r_attach, NULL, NULL,
- dvb->fe[0]);
- dvb->fe[1]->id = 1;
- /* FE 1 attach tuner */
- if (!dvb_attach(tda18271_attach, dvb->fe[1], 0x60,
- &dev->i2c_adap, &em28xx_cxd2820r_tda18271_config)) {
- dvb_frontend_detach(dvb->fe[1]);
- /* leave FE 0 still active */
- }
+ }
+ break;
+ case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
+ {
+ struct xc5000_config cfg;
+ hauppauge_hvr930c_init(dev);
+
+ dvb->fe[0] = dvb_attach(drxk_attach,
+ &hauppauge_930c_drxk, &dev->i2c_adap);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ /* FIXME: do we need a pll semaphore? */
+ dvb->fe[0]->sec_priv = dvb;
+ sema_init(&dvb->pll_mutex, 1);
+ dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
+ dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
- mfe_shared = 1;
+ /* Attach xc5000 */
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.i2c_address = 0x61;
+ cfg.if_khz = 4000;
+
+ if (dvb->fe[0]->ops.i2c_gate_ctrl)
+ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 1);
+ if (!dvb_attach(xc5000_attach, dvb->fe[0], &dev->i2c_adap,
+ &cfg)) {
+ result = -EINVAL;
+ goto out_free;
}
+ if (dvb->fe[0]->ops.i2c_gate_ctrl)
+ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
+
break;
+ }
case EM2884_BOARD_TERRATEC_H5:
+ case EM2884_BOARD_CINERGY_HTC_STICK:
terratec_h5_init(dev);
- dvb->dont_attach_fe1 = 1;
-
- dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap, &dvb->fe[1]);
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap);
if (!dvb->fe[0]) {
result = -EINVAL;
goto out_free;
}
-
/* FIXME: do we need a pll semaphore? */
dvb->fe[0]->sec_priv = dvb;
sema_init(&dvb->pll_mutex, 1);
dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
- dvb->fe[1]->id = 1;
/* Attach tda18271 to DVB-C frontend */
if (dvb->fe[0]->ops.i2c_gate_ctrl)
@@ -816,12 +928,6 @@ static int em28xx_dvb_init(struct em28xx *dev)
if (dvb->fe[0]->ops.i2c_gate_ctrl)
dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
- /* Hack - needed by drxk/tda18271c2dd */
- dvb->fe[1]->tuner_priv = dvb->fe[0]->tuner_priv;
- memcpy(&dvb->fe[1]->ops.tuner_ops,
- &dvb->fe[0]->ops.tuner_ops,
- sizeof(dvb->fe[0]->ops.tuner_ops));
-
break;
case EM28174_BOARD_PCTV_460E:
/* attach demod */
@@ -845,6 +951,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
/* define general-purpose callback pointer */
dvb->fe[0]->callback = em28xx_tuner_callback;
+ if (dvb->fe[1])
+ dvb->fe[1]->callback = em28xx_tuner_callback;
/* register everything */
result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 679da4804281..2630b265b0e8 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -306,7 +306,8 @@ static void em28xx_ir_handle_key(struct em28xx_IR *ir)
poll_result.rc_data[0],
poll_result.toggle_bit);
- if (ir->dev->chip_id == CHIP_ID_EM2874)
+ if (ir->dev->chip_id == CHIP_ID_EM2874 ||
+ ir->dev->chip_id == CHIP_ID_EM2884)
/* The em2874 clears the readcount field every time the
register is read. The em2860/2880 datasheet says that it
is supposed to clear the readcount, but it doesn't. So with
@@ -371,13 +372,15 @@ int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
case CHIP_ID_EM2883:
ir->get_key = default_polling_getkey;
break;
+ case CHIP_ID_EM2884:
case CHIP_ID_EM2874:
case CHIP_ID_EM28174:
ir->get_key = em2874_polling_getkey;
em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
break;
default:
- printk("Unrecognized em28xx chip id: IR not supported\n");
+ printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
+ dev->chip_id);
rc = -EINVAL;
}
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index 66f792361b97..2f6268505726 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -12,6 +12,11 @@
#define EM_GPO_2 (1 << 2)
#define EM_GPO_3 (1 << 3)
+/* em28xx endpoints */
+#define EM28XX_EP_ANALOG 0x82
+#define EM28XX_EP_AUDIO 0x83
+#define EM28XX_EP_DIGITAL 0x84
+
/* em2800 registers */
#define EM2800_R08_AUDIOSRC 0x08
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 9b4557a2f6d0..613300b51a9e 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1070,6 +1070,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
/* the em2800 can only scale down to 50% */
height = height > (3 * maxh / 4) ? maxh : maxh / 2;
width = width > (3 * maxw / 4) ? maxw : maxw / 2;
+ /* MaxPacketSize for em2800 is too small to capture at full resolution
+ * use half of maxw as the scaler can only scale to 50% */
+ if (width == maxw && height == maxh)
+ width /= 2;
} else {
/* width must even because of the YUYV format
height must be even because of interlacing */
@@ -2503,6 +2507,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
{
u8 val;
int ret;
+ unsigned int maxw;
printk(KERN_INFO "%s: v4l2 driver version %s\n",
dev->name, EM28XX_VERSION);
@@ -2515,8 +2520,15 @@ int em28xx_register_analog_devices(struct em28xx *dev)
/* Analog specific initialization */
dev->format = &format[0];
+
+ maxw = norm_maxw(dev);
+ /* MaxPacketSize for em2800 is too small to capture at full resolution
+ * use half of maxw as the scaler can only scale to 50% */
+ if (dev->board.is_em2800)
+ maxw /= 2;
+
em28xx_set_video_format(dev, format[0].fourcc,
- norm_maxw(dev), norm_maxh(dev));
+ maxw, norm_maxh(dev));
video_mux(dev, dev->ctl_input);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 2a2cb7ed0014..22e252bcc41e 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -38,6 +38,7 @@
#include <media/videobuf-dvb.h>
#endif
#include "tuner-xc2028.h"
+#include "xc5000.h"
#include "em28xx-reg.h"
/* Boards supported by driver */
@@ -121,6 +122,9 @@
#define EM28174_BOARD_PCTV_290E 78
#define EM2884_BOARD_TERRATEC_H5 79
#define EM28174_BOARD_PCTV_460E 80
+#define EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C 81
+#define EM2884_BOARD_CINERGY_HTC_STICK 82
+#define EM2860_BOARD_HT_VIDBOX_NW03 83
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -594,6 +598,8 @@ struct em28xx {
int max_pkt_size; /* max packet size of isoc transaction */
int num_alt; /* Number of alternative settings */
unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
+ int dvb_alt; /* alternate for DVB */
+ unsigned int dvb_max_pkt_size; /* wMaxPacketSize for DVB */
struct urb *urb[EM28XX_NUM_BUFS]; /* urb for isoc transfers */
char *transfer_buffer[EM28XX_NUM_BUFS]; /* transfer buffers for isoc
transfer */
@@ -825,7 +831,7 @@ static inline unsigned int norm_maxw(struct em28xx *dev)
if (dev->board.is_webcam)
return dev->sensor_xres;
- if (dev->board.max_range_640_480 || dev->board.is_em2800)
+ if (dev->board.max_range_640_480)
return 640;
return 720;
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 40f214ab924f..5539f09440ac 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -76,8 +76,8 @@ MODULE_PARM_DESC(video_nr,
"\none and for every other camera."
"\n");
-static short force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] =
- ET61X251_FORCE_MUNMAP};
+static bool force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] =
+ ET61X251_FORCE_MUNMAP};
module_param_array(force_munmap, bool, NULL, 0444);
MODULE_PARM_DESC(force_munmap,
"\n<0|1[,...]> Force the application to unmap previously"
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 27cb197d0bd6..27e3e0c0b219 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -1661,18 +1661,7 @@ static struct platform_driver viu_of_platform_driver = {
},
};
-static int __init viu_init(void)
-{
- return platform_driver_register(&viu_of_platform_driver);
-}
-
-static void __exit viu_exit(void)
-{
- platform_driver_unregister(&viu_of_platform_driver);
-}
-
-module_init(viu_init);
-module_exit(viu_exit);
+module_platform_driver(viu_of_platform_driver);
MODULE_DESCRIPTION("Freescale Video-In(VIU)");
MODULE_AUTHOR("Hongjun Chen");
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 103af3fe5aa0..dfe268bfa4f8 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -77,6 +77,16 @@ config USB_GSPCA_JEILINJ
To compile this driver as a module, choose M here: the
module will be called gspca_jeilinj.
+config USB_GSPCA_JL2005BCD
+ tristate "JL2005B/C/D USB V4L2 driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based the
+ JL2005B, JL2005C, or JL2005D chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_jl2005bcd.
+
config USB_GSPCA_KINECT
tristate "Kinect sensor device USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index f345f494d0f3..79ebe46e1ad7 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_USB_GSPCA_CPIA1) += gspca_cpia1.o
obj-$(CONFIG_USB_GSPCA_ETOMS) += gspca_etoms.o
obj-$(CONFIG_USB_GSPCA_FINEPIX) += gspca_finepix.o
obj-$(CONFIG_USB_GSPCA_JEILINJ) += gspca_jeilinj.o
+obj-$(CONFIG_USB_GSPCA_JL2005BCD) += gspca_jl2005bcd.o
obj-$(CONFIG_USB_GSPCA_KINECT) += gspca_kinect.o
obj-$(CONFIG_USB_GSPCA_KONICA) += gspca_konica.o
obj-$(CONFIG_USB_GSPCA_MARS) += gspca_mars.o
@@ -49,6 +50,7 @@ gspca_cpia1-objs := cpia1.o
gspca_etoms-objs := etoms.o
gspca_finepix-objs := finepix.o
gspca_jeilinj-objs := jeilinj.o
+gspca_jl2005bcd-objs := jl2005bcd.o
gspca_kinect-objs := kinect.o
gspca_konica-objs := konica.o
gspca_mars-objs := mars.o
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 636627b57dc9..9769f17915c0 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -76,7 +76,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
gspca_dev->cam.no_urb_create = 1;
- gspca_dev->cam.reverse_alts = 1;
return 0;
}
@@ -135,13 +134,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ struct usb_interface *intf;
+
reg_w(gspca_dev, 0x003c, 0x0003);
reg_w(gspca_dev, 0x003c, 0x0004);
reg_w(gspca_dev, 0x003c, 0x0005);
reg_w(gspca_dev, 0x003c, 0x0006);
reg_w(gspca_dev, 0x003c, 0x0007);
+
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
usb_set_interface(gspca_dev->dev, gspca_dev->iface,
- gspca_dev->nbalt - 1);
+ intf->num_altsetting - 1);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index a8f54c20e585..c84e26006fc3 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -337,7 +337,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
return -1;
cam = &gspca_dev->cam;
- gspca_dev->nbalt = 4;
switch (sd->sensor) {
case ID_MI1320:
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 881e04c7ffe6..ca5a2b139d0b 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -633,23 +633,32 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev)
u32 bandwidth;
int i;
+ /* get the (max) image size */
i = gspca_dev->curr_mode;
bandwidth = gspca_dev->cam.cam_mode[i].sizeimage;
- /* if the image is compressed, estimate the mean image size */
- if (bandwidth < gspca_dev->cam.cam_mode[i].width *
+ /* if the image is compressed, estimate its mean size */
+ if (!gspca_dev->cam.needs_full_bandwidth &&
+ bandwidth < gspca_dev->cam.cam_mode[i].width *
gspca_dev->cam.cam_mode[i].height)
- bandwidth /= 3;
+ bandwidth = bandwidth * 3 / 8; /* 0.375 */
/* estimate the frame rate */
if (gspca_dev->sd_desc->get_streamparm) {
struct v4l2_streamparm parm;
- parm.parm.capture.timeperframe.denominator = 15;
gspca_dev->sd_desc->get_streamparm(gspca_dev, &parm);
bandwidth *= parm.parm.capture.timeperframe.denominator;
+ bandwidth /= parm.parm.capture.timeperframe.numerator;
} else {
- bandwidth *= 15; /* 15 fps */
+
+ /* don't hope more than 15 fps with USB 1.1 and
+ * image resolution >= 640x480 */
+ if (gspca_dev->width >= 640
+ && gspca_dev->dev->speed == USB_SPEED_FULL)
+ bandwidth *= 15; /* 15 fps */
+ else
+ bandwidth *= 30; /* 30 fps */
}
PDEBUG(D_STREAM, "min bandwidth: %d", bandwidth);
@@ -667,9 +676,8 @@ struct ep_tb_s {
* build the table of the endpoints
* and compute the minimum bandwidth for the image transfer
*/
-static int build_ep_tb(struct gspca_dev *gspca_dev,
+static int build_isoc_ep_tb(struct gspca_dev *gspca_dev,
struct usb_interface *intf,
- int xfer,
struct ep_tb_s *ep_tb)
{
struct usb_host_endpoint *ep;
@@ -687,17 +695,21 @@ static int build_ep_tb(struct gspca_dev *gspca_dev,
ep_tb->bandwidth = 2000 * 2000 * 120;
found = 0;
for (j = 0; j < nbalt; j++) {
- ep = alt_xfer(&intf->altsetting[j], xfer);
+ ep = alt_xfer(&intf->altsetting[j],
+ USB_ENDPOINT_XFER_ISOC);
if (ep == NULL)
continue;
+ if (ep->desc.bInterval == 0) {
+ pr_err("alt %d iso endp with 0 interval\n", j);
+ continue;
+ }
psize = le16_to_cpu(ep->desc.wMaxPacketSize);
- if (!gspca_dev->cam.bulk) /* isoc */
- psize = (psize & 0x07ff) *
- (1 + ((psize >> 11) & 3));
- bandwidth = psize * ep->desc.bInterval * 1000;
+ psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
+ bandwidth = psize * 1000;
if (gspca_dev->dev->speed == USB_SPEED_HIGH
|| gspca_dev->dev->speed == USB_SPEED_SUPER)
bandwidth *= 8;
+ bandwidth /= 1 << (ep->desc.bInterval - 1);
if (bandwidth <= last_bw)
continue;
if (bandwidth < ep_tb->bandwidth) {
@@ -715,6 +727,23 @@ static int build_ep_tb(struct gspca_dev *gspca_dev,
ep_tb++;
}
+ /*
+ * If the camera:
+ * has a usb audio class interface (a built in usb mic); and
+ * is a usb 1 full speed device; and
+ * uses the max full speed iso bandwidth; and
+ * and has more than 1 alt setting
+ * then skip the highest alt setting to spare bandwidth for the mic
+ */
+ if (gspca_dev->audio &&
+ gspca_dev->dev->speed == USB_SPEED_FULL &&
+ last_bw >= 1000000 &&
+ i > 1) {
+ PDEBUG(D_STREAM, "dev has usb audio, skipping highest alt");
+ i--;
+ ep_tb--;
+ }
+
/* get the requested bandwidth and start at the highest atlsetting */
bandwidth = which_bandwidth(gspca_dev);
ep_tb--;
@@ -790,10 +819,7 @@ static int create_urbs(struct gspca_dev *gspca_dev,
ep->desc.bEndpointAddress);
urb->transfer_flags = URB_ISO_ASAP
| URB_NO_TRANSFER_DMA_MAP;
- if (gspca_dev->dev->speed == USB_SPEED_LOW)
- urb->interval = ep->desc.bInterval;
- else
- urb->interval = 1 << (ep->desc.bInterval - 1);
+ urb->interval = 1 << (ep->desc.bInterval - 1);
urb->complete = isoc_irq;
urb->number_of_packets = npkt;
for (i = 0; i < npkt; i++) {
@@ -838,17 +864,17 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = 0;
/* do the specific subdriver stuff before endpoint selection */
- gspca_dev->alt = 0;
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
goto unlock;
}
- intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
- /* if the subdriver forced an altsetting, get the endpoint */
+ /* if bulk or the subdriver forced an altsetting, get the endpoint */
if (gspca_dev->alt != 0) {
gspca_dev->alt--; /* (previous version compatibility) */
ep = alt_xfer(&intf->altsetting[gspca_dev->alt], xfer);
@@ -863,7 +889,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
/* else, compute the minimum bandwidth
* and build the endpoint table */
- alt_idx = build_ep_tb(gspca_dev, intf, xfer, ep_tb);
+ alt_idx = build_isoc_ep_tb(gspca_dev, intf, ep_tb);
if (alt_idx <= 0) {
pr_err("no transfer endpoint found\n");
ret = -EIO;
@@ -880,7 +906,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
for (;;) {
if (alt != gspca_dev->alt) {
alt = gspca_dev->alt;
- if (gspca_dev->nbalt > 1) {
+ if (intf->num_altsetting > 1) {
ret = usb_set_interface(gspca_dev->dev,
gspca_dev->iface,
alt);
@@ -957,7 +983,7 @@ retry:
ret = -EIO;
goto out;
}
- alt = ep_tb[--alt_idx].alt;
+ gspca_dev->alt = ep_tb[--alt_idx].alt;
}
}
out:
@@ -2300,15 +2326,14 @@ int gspca_dev_probe2(struct usb_interface *intf,
}
gspca_dev->dev = dev;
gspca_dev->iface = intf->cur_altsetting->desc.bInterfaceNumber;
- gspca_dev->nbalt = intf->num_altsetting;
/* check if any audio device */
- if (dev->config->desc.bNumInterfaces != 1) {
+ if (dev->actconfig->desc.bNumInterfaces != 1) {
int i;
struct usb_interface *intf2;
- for (i = 0; i < dev->config->desc.bNumInterfaces; i++) {
- intf2 = dev->config->interface[i];
+ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
+ intf2 = dev->actconfig->interface[i];
if (intf2 != NULL
&& intf2->altsetting != NULL
&& intf2->altsetting->desc.bInterfaceClass ==
@@ -2389,7 +2414,7 @@ int gspca_dev_probe(struct usb_interface *intf,
}
/* the USB video interface must be the first one */
- if (dev->config->desc.bNumInterfaces != 1
+ if (dev->actconfig->desc.bNumInterfaces != 1
&& intf->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index e444f16e1497..589009f4496f 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -69,7 +69,9 @@ struct cam {
u8 bulk; /* image transfer by 0:isoc / 1:bulk */
u8 npkt; /* number of packets in an ISOC message
* 0 is the default value: 32 packets */
- u8 reverse_alts; /* Alt settings are in high to low order */
+ u8 needs_full_bandwidth;/* Set this flag to notify the bandwidth calc.
+ * code that the cam fills all image buffers to
+ * the max, even when using compression. */
};
struct gspca_dev;
@@ -208,7 +210,6 @@ struct gspca_dev {
char memory; /* memory type (V4L2_MEMORY_xxx) */
__u8 iface; /* USB interface number */
__u8 alt; /* USB alternate setting */
- __u8 nbalt; /* number of USB alternate settings */
u8 audio; /* presence of audio device */
};
diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c
new file mode 100644
index 000000000000..53f58ef367cf
--- /dev/null
+++ b/drivers/media/video/gspca/jl2005bcd.c
@@ -0,0 +1,554 @@
+/*
+ * Jeilin JL2005B/C/D library
+ *
+ * Copyright (C) 2011 Theodore Kilgore <kilgota@auburn.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define MODULE_NAME "jl2005bcd"
+
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include "gspca.h"
+
+
+MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_DESCRIPTION("JL2005B/C/D USB Camera Driver");
+MODULE_LICENSE("GPL");
+
+/* Default timeouts, in ms */
+#define JL2005C_CMD_TIMEOUT 500
+#define JL2005C_DATA_TIMEOUT 1000
+
+/* Maximum transfer size to use. */
+#define JL2005C_MAX_TRANSFER 0x200
+#define FRAME_HEADER_LEN 16
+
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ unsigned char firmware_id[6];
+ const struct v4l2_pix_format *cap_mode;
+ /* Driver stuff */
+ struct work_struct work_struct;
+ struct workqueue_struct *work_thread;
+ u8 frame_brightness;
+ int block_size; /* block size of camera */
+ int vga; /* 1 if vga cam, 0 if cif cam */
+};
+
+
+/* Camera has two resolution settings. What they are depends on model. */
+static const struct v4l2_pix_format cif_mode[] = {
+ {176, 144, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
+ {352, 288, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
+};
+
+static const struct v4l2_pix_format vga_mode[] = {
+ {320, 240, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
+ {640, 480, V4L2_PIX_FMT_JL2005BCD, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0},
+};
+
+/*
+ * cam uses endpoint 0x03 to send commands, 0x84 for read commands,
+ * and 0x82 for bulk data transfer.
+ */
+
+/* All commands are two bytes only */
+static int jl2005c_write2(struct gspca_dev *gspca_dev, unsigned char *command)
+{
+ int retval;
+
+ memcpy(gspca_dev->usb_buf, command, 2);
+ retval = usb_bulk_msg(gspca_dev->dev,
+ usb_sndbulkpipe(gspca_dev->dev, 3),
+ gspca_dev->usb_buf, 2, NULL, 500);
+ if (retval < 0)
+ pr_err("command write [%02x] error %d\n",
+ gspca_dev->usb_buf[0], retval);
+ return retval;
+}
+
+/* Response to a command is one byte in usb_buf[0], only if requested. */
+static int jl2005c_read1(struct gspca_dev *gspca_dev)
+{
+ int retval;
+
+ retval = usb_bulk_msg(gspca_dev->dev,
+ usb_rcvbulkpipe(gspca_dev->dev, 0x84),
+ gspca_dev->usb_buf, 1, NULL, 500);
+ if (retval < 0)
+ pr_err("read command [0x%02x] error %d\n",
+ gspca_dev->usb_buf[0], retval);
+ return retval;
+}
+
+/* Response appears in gspca_dev->usb_buf[0] */
+static int jl2005c_read_reg(struct gspca_dev *gspca_dev, unsigned char reg)
+{
+ int retval;
+
+ static u8 instruction[2] = {0x95, 0x00};
+ /* put register to read in byte 1 */
+ instruction[1] = reg;
+ /* Send the read request */
+ retval = jl2005c_write2(gspca_dev, instruction);
+ if (retval < 0)
+ return retval;
+ retval = jl2005c_read1(gspca_dev);
+
+ return retval;
+}
+
+static int jl2005c_start_new_frame(struct gspca_dev *gspca_dev)
+{
+ int i;
+ int retval;
+ int frame_brightness = 0;
+
+ static u8 instruction[2] = {0x7f, 0x01};
+
+ retval = jl2005c_write2(gspca_dev, instruction);
+ if (retval < 0)
+ return retval;
+
+ i = 0;
+ while (i < 20 && !frame_brightness) {
+ /* If we tried 20 times, give up. */
+ retval = jl2005c_read_reg(gspca_dev, 0x7e);
+ if (retval < 0)
+ return retval;
+ frame_brightness = gspca_dev->usb_buf[0];
+ retval = jl2005c_read_reg(gspca_dev, 0x7d);
+ if (retval < 0)
+ return retval;
+ i++;
+ }
+ PDEBUG(D_FRAM, "frame_brightness is 0x%02x", gspca_dev->usb_buf[0]);
+ return retval;
+}
+
+static int jl2005c_write_reg(struct gspca_dev *gspca_dev, unsigned char reg,
+ unsigned char value)
+{
+ int retval;
+ u8 instruction[2];
+
+ instruction[0] = reg;
+ instruction[1] = value;
+
+ retval = jl2005c_write2(gspca_dev, instruction);
+ if (retval < 0)
+ return retval;
+
+ return retval;
+}
+
+static int jl2005c_get_firmware_id(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ int i = 0;
+ int retval = -1;
+ unsigned char regs_to_read[] = {0x57, 0x02, 0x03, 0x5d, 0x5e, 0x5f};
+
+ PDEBUG(D_PROBE, "Running jl2005c_get_firmware_id");
+ /* Read the first ID byte once for warmup */
+ retval = jl2005c_read_reg(gspca_dev, regs_to_read[0]);
+ PDEBUG(D_PROBE, "response is %02x", gspca_dev->usb_buf[0]);
+ if (retval < 0)
+ return retval;
+ /* Now actually get the ID string */
+ for (i = 0; i < 6; i++) {
+ retval = jl2005c_read_reg(gspca_dev, regs_to_read[i]);
+ if (retval < 0)
+ return retval;
+ sd->firmware_id[i] = gspca_dev->usb_buf[0];
+ }
+ PDEBUG(D_PROBE, "firmware ID is %02x%02x%02x%02x%02x%02x",
+ sd->firmware_id[0],
+ sd->firmware_id[1],
+ sd->firmware_id[2],
+ sd->firmware_id[3],
+ sd->firmware_id[4],
+ sd->firmware_id[5]);
+ return 0;
+}
+
+static int jl2005c_stream_start_vga_lg
+ (struct gspca_dev *gspca_dev)
+{
+ int i;
+ int retval = -1;
+ static u8 instruction[][2] = {
+ {0x05, 0x00},
+ {0x7c, 0x00},
+ {0x7d, 0x18},
+ {0x02, 0x00},
+ {0x01, 0x00},
+ {0x04, 0x52},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(instruction); i++) {
+ msleep(60);
+ retval = jl2005c_write2(gspca_dev, instruction[i]);
+ if (retval < 0)
+ return retval;
+ }
+ msleep(60);
+ return retval;
+}
+
+static int jl2005c_stream_start_vga_small(struct gspca_dev *gspca_dev)
+{
+ int i;
+ int retval = -1;
+ static u8 instruction[][2] = {
+ {0x06, 0x00},
+ {0x7c, 0x00},
+ {0x7d, 0x1a},
+ {0x02, 0x00},
+ {0x01, 0x00},
+ {0x04, 0x52},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(instruction); i++) {
+ msleep(60);
+ retval = jl2005c_write2(gspca_dev, instruction[i]);
+ if (retval < 0)
+ return retval;
+ }
+ msleep(60);
+ return retval;
+}
+
+static int jl2005c_stream_start_cif_lg(struct gspca_dev *gspca_dev)
+{
+ int i;
+ int retval = -1;
+ static u8 instruction[][2] = {
+ {0x05, 0x00},
+ {0x7c, 0x00},
+ {0x7d, 0x30},
+ {0x02, 0x00},
+ {0x01, 0x00},
+ {0x04, 0x42},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(instruction); i++) {
+ msleep(60);
+ retval = jl2005c_write2(gspca_dev, instruction[i]);
+ if (retval < 0)
+ return retval;
+ }
+ msleep(60);
+ return retval;
+}
+
+static int jl2005c_stream_start_cif_small(struct gspca_dev *gspca_dev)
+{
+ int i;
+ int retval = -1;
+ static u8 instruction[][2] = {
+ {0x06, 0x00},
+ {0x7c, 0x00},
+ {0x7d, 0x32},
+ {0x02, 0x00},
+ {0x01, 0x00},
+ {0x04, 0x42},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(instruction); i++) {
+ msleep(60);
+ retval = jl2005c_write2(gspca_dev, instruction[i]);
+ if (retval < 0)
+ return retval;
+ }
+ msleep(60);
+ return retval;
+}
+
+
+static int jl2005c_stop(struct gspca_dev *gspca_dev)
+{
+ int retval;
+
+ retval = jl2005c_write_reg(gspca_dev, 0x07, 0x00);
+ return retval;
+}
+
+/* This function is called as a workqueue function and runs whenever the camera
+ * is streaming data. Because it is a workqueue function it is allowed to sleep
+ * so we can use synchronous USB calls. To avoid possible collisions with other
+ * threads attempting to use the camera's USB interface the gspca usb_lock is
+ * used when performing the one USB control operation inside the workqueue,
+ * which tells the camera to close the stream. In practice the only thing
+ * which needs to be protected against is the usb_set_interface call that
+ * gspca makes during stream_off. Otherwise the camera doesn't provide any
+ * controls that the user could try to change.
+ */
+static void jl2005c_dostream(struct work_struct *work)
+{
+ struct sd *dev = container_of(work, struct sd, work_struct);
+ struct gspca_dev *gspca_dev = &dev->gspca_dev;
+ int bytes_left = 0; /* bytes remaining in current frame. */
+ int data_len; /* size to use for the next read. */
+ int header_read = 0;
+ unsigned char header_sig[2] = {0x4a, 0x4c};
+ int act_len;
+ int packet_type;
+ int ret;
+ u8 *buffer;
+
+ buffer = kmalloc(JL2005C_MAX_TRANSFER, GFP_KERNEL | GFP_DMA);
+ if (!buffer) {
+ pr_err("Couldn't allocate USB buffer\n");
+ goto quit_stream;
+ }
+
+ while (gspca_dev->present && gspca_dev->streaming) {
+ /* Check if this is a new frame. If so, start the frame first */
+ if (!header_read) {
+ mutex_lock(&gspca_dev->usb_lock);
+ ret = jl2005c_start_new_frame(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
+ if (ret < 0)
+ goto quit_stream;
+ ret = usb_bulk_msg(gspca_dev->dev,
+ usb_rcvbulkpipe(gspca_dev->dev, 0x82),
+ buffer, JL2005C_MAX_TRANSFER, &act_len,
+ JL2005C_DATA_TIMEOUT);
+ PDEBUG(D_PACK,
+ "Got %d bytes out of %d for header",
+ act_len, JL2005C_MAX_TRANSFER);
+ if (ret < 0 || act_len < JL2005C_MAX_TRANSFER)
+ goto quit_stream;
+ /* Check whether we actually got the first blodk */
+ if (memcmp(header_sig, buffer, 2) != 0) {
+ pr_err("First block is not the first block\n");
+ goto quit_stream;
+ }
+ /* total size to fetch is byte 7, times blocksize
+ * of which we already got act_len */
+ bytes_left = buffer[0x07] * dev->block_size - act_len;
+ PDEBUG(D_PACK, "bytes_left = 0x%x", bytes_left);
+ /* We keep the header. It has other information, too.*/
+ packet_type = FIRST_PACKET;
+ gspca_frame_add(gspca_dev, packet_type,
+ buffer, act_len);
+ header_read = 1;
+ }
+ while (bytes_left > 0 && gspca_dev->present) {
+ data_len = bytes_left > JL2005C_MAX_TRANSFER ?
+ JL2005C_MAX_TRANSFER : bytes_left;
+ ret = usb_bulk_msg(gspca_dev->dev,
+ usb_rcvbulkpipe(gspca_dev->dev, 0x82),
+ buffer, data_len, &act_len,
+ JL2005C_DATA_TIMEOUT);
+ if (ret < 0 || act_len < data_len)
+ goto quit_stream;
+ PDEBUG(D_PACK,
+ "Got %d bytes out of %d for frame",
+ data_len, bytes_left);
+ bytes_left -= data_len;
+ if (bytes_left == 0) {
+ packet_type = LAST_PACKET;
+ header_read = 0;
+ } else
+ packet_type = INTER_PACKET;
+ gspca_frame_add(gspca_dev, packet_type,
+ buffer, data_len);
+ }
+ }
+quit_stream:
+ if (gspca_dev->present) {
+ mutex_lock(&gspca_dev->usb_lock);
+ jl2005c_stop(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
+ }
+ kfree(buffer);
+}
+
+
+
+
+/* This function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct cam *cam;
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ cam = &gspca_dev->cam;
+ /* We don't use the buffer gspca allocates so make it small. */
+ cam->bulk_size = 64;
+ cam->bulk = 1;
+ /* For the rest, the camera needs to be detected */
+ jl2005c_get_firmware_id(gspca_dev);
+ /* Here are some known firmware IDs
+ * First some JL2005B cameras
+ * {0x41, 0x07, 0x04, 0x2c, 0xe8, 0xf2} Sakar KidzCam
+ * {0x45, 0x02, 0x08, 0xb9, 0x00, 0xd2} No-name JL2005B
+ * JL2005C cameras
+ * {0x01, 0x0c, 0x16, 0x10, 0xf8, 0xc8} Argus DC-1512
+ * {0x12, 0x04, 0x03, 0xc0, 0x00, 0xd8} ICarly
+ * {0x86, 0x08, 0x05, 0x02, 0x00, 0xd4} Jazz
+ *
+ * Based upon this scanty evidence, we can detect a CIF camera by
+ * testing byte 0 for 0x4x.
+ */
+ if ((sd->firmware_id[0] & 0xf0) == 0x40) {
+ cam->cam_mode = cif_mode;
+ cam->nmodes = ARRAY_SIZE(cif_mode);
+ sd->block_size = 0x80;
+ } else {
+ cam->cam_mode = vga_mode;
+ cam->nmodes = ARRAY_SIZE(vga_mode);
+ sd->block_size = 0x200;
+ }
+
+ INIT_WORK(&sd->work_struct, jl2005c_dostream);
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ return 0;
+}
+
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+
+ struct sd *sd = (struct sd *) gspca_dev;
+ sd->cap_mode = gspca_dev->cam.cam_mode;
+
+ switch (gspca_dev->width) {
+ case 640:
+ PDEBUG(D_STREAM, "Start streaming at vga resolution");
+ jl2005c_stream_start_vga_lg(gspca_dev);
+ break;
+ case 320:
+ PDEBUG(D_STREAM, "Start streaming at qvga resolution");
+ jl2005c_stream_start_vga_small(gspca_dev);
+ break;
+ case 352:
+ PDEBUG(D_STREAM, "Start streaming at cif resolution");
+ jl2005c_stream_start_cif_lg(gspca_dev);
+ break;
+ case 176:
+ PDEBUG(D_STREAM, "Start streaming at qcif resolution");
+ jl2005c_stream_start_cif_small(gspca_dev);
+ break;
+ default:
+ pr_err("Unknown resolution specified\n");
+ return -1;
+ }
+
+ /* Start the workqueue function to do the streaming */
+ sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ queue_work(sd->work_thread, &sd->work_struct);
+
+ return 0;
+}
+
+/* called on streamoff with alt==0 and on disconnect */
+/* the usb_lock is held at entry - restore on exit */
+static void sd_stop0(struct gspca_dev *gspca_dev)
+{
+ struct sd *dev = (struct sd *) gspca_dev;
+
+ /* wait for the work queue to terminate */
+ mutex_unlock(&gspca_dev->usb_lock);
+ /* This waits for sq905c_dostream to finish */
+ destroy_workqueue(dev->work_thread);
+ dev->work_thread = NULL;
+ mutex_lock(&gspca_dev->usb_lock);
+}
+
+
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ /* .ctrls = none have been detected */
+ /* .nctrls = ARRAY_SIZE(sd_ctrls), */
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stop0 = sd_stop0,
+};
+
+/* -- module initialisation -- */
+static const __devinitdata struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x0979, 0x0227)},
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ int ret;
+
+ ret = usb_register(&sd_driver);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index b1da7f4096c8..f0c0d74dfe92 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -247,9 +247,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
gspca_dev->cam.no_urb_create = 1;
- /* The highest alt setting has an isoc packetsize of 0, so we
- don't want to use it */
- gspca_dev->nbalt--;
sd->brightness = BRIGHTNESS_DEFAULT;
sd->contrast = CONTRAST_DEFAULT;
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 9fe3816b2aa0..0c4493675438 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -27,8 +27,8 @@
/* Kernel module parameters */
int force_sensor;
-static int dump_bridge;
-int dump_sensor;
+static bool dump_bridge;
+bool dump_sensor;
static const struct usb_device_id m5602_table[] = {
{USB_DEVICE(0x0402, 0x5602)},
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index b1f0c492036a..8c672b5c8c6a 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -106,7 +106,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int mt9m111_probe(struct sd *sd);
int mt9m111_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h
index 2efd607987ec..2b6a13b508f7 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h
@@ -86,7 +86,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int ov7660_probe(struct sd *sd);
int ov7660_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index da9a129b739d..f7aa5bf68983 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -135,7 +135,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int ov9650_probe(struct sd *sd);
int ov9650_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index 338359596398..81a2bcb88fe3 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -147,7 +147,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int po1030_probe(struct sd *sd);
int po1030_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index 8cc7a3f6da72..8e0035e731c7 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -65,7 +65,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int s5k4aa_probe(struct sd *sd);
int s5k4aa_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 80a63a236e24..79952247b534 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -41,7 +41,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int s5k83a_probe(struct sd *sd);
int s5k83a_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 5c2ea05c46b4..b0231465afae 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -263,7 +263,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode);
cam->ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
- gspca_dev->nbalt = 9; /* use the altsetting 08 */
return 0;
}
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index d4bec9321771..7167cac7359c 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -1763,8 +1763,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
if ((unsigned) webcam >= NWEBCAMS)
webcam = 0;
sd->webcam = webcam;
- gspca_dev->cam.reverse_alts = 1;
gspca_dev->cam.ctrls = sd->ctrls;
+ gspca_dev->cam.needs_full_bandwidth = 1;
sd->ag_cnt = -1;
/*
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 08b8ce1dee18..739e8a2a2d30 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -3348,7 +3348,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
case BRIDGE_W9968CF:
cam->cam_mode = w9968cf_vga_mode;
cam->nmodes = ARRAY_SIZE(w9968cf_vga_mode);
- cam->reverse_alts = 1;
break;
}
@@ -3684,8 +3683,8 @@ static void ov511_mode_init_regs(struct sd *sd)
/* Check if we have enough bandwidth to disable compression */
fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
- /* 1400 is a conservative estimate of the max nr of isoc packets/sec */
- if (needed > 1400 * packet_size) {
+ /* 1000 isoc packets/sec */
+ if (needed > 1000 * packet_size) {
/* Enable Y and UV quantization and compression */
reg_w(sd, R511_COMP_EN, 0x07);
reg_w(sd, R511_COMP_LUT_EN, 0x03);
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index f30060d50633..fbfa02affa13 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -71,6 +71,7 @@ struct sd {
enum sensors {
SENSOR_OV965x, /* ov9657 */
SENSOR_OV971x, /* ov9712 */
+ SENSOR_OV562x, /* ov5621 */
NSENSORS
};
@@ -207,6 +208,14 @@ static const struct v4l2_pix_format ov971x_mode[] = {
}
};
+static const struct v4l2_pix_format ov562x_mode[] = {
+ {2592, 1680, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 2592,
+ .sizeimage = 2592 * 1680,
+ .colorspace = V4L2_COLORSPACE_SRGB
+ }
+};
+
static const u8 bridge_init[][2] = {
{0x88, 0xf8},
{0x89, 0xff},
@@ -830,6 +839,124 @@ static const u8 ov965x_start_2_sxga[][2] = {
{0xa3, 0x41}, /* bd60 */
};
+static const u8 ov562x_init[][2] = {
+ {0x88, 0x20},
+ {0x89, 0x0a},
+ {0x8a, 0x90},
+ {0x8b, 0x06},
+ {0x8c, 0x01},
+ {0x8d, 0x10},
+ {0x1c, 0x00},
+ {0x1d, 0x48},
+ {0x1d, 0x00},
+ {0x1d, 0xff},
+ {0x1c, 0x0a},
+ {0x1d, 0x2e},
+ {0x1d, 0x1e},
+};
+
+static const u8 ov562x_init_2[][2] = {
+ {0x12, 0x80},
+ {0x11, 0x41},
+ {0x13, 0x00},
+ {0x10, 0x1e},
+ {0x3b, 0x07},
+ {0x5b, 0x40},
+ {0x39, 0x07},
+ {0x53, 0x02},
+ {0x54, 0x60},
+ {0x04, 0x20},
+ {0x27, 0x04},
+ {0x3d, 0x40},
+ {0x36, 0x00},
+ {0xc5, 0x04},
+ {0x4e, 0x00},
+ {0x4f, 0x93},
+ {0x50, 0x7b},
+ {0xca, 0x0c},
+ {0xcb, 0x0f},
+ {0x39, 0x07},
+ {0x4a, 0x10},
+ {0x3e, 0x0a},
+ {0x3d, 0x00},
+ {0x0c, 0x38},
+ {0x38, 0x90},
+ {0x46, 0x30},
+ {0x4f, 0x93},
+ {0x50, 0x7b},
+ {0xab, 0x00},
+ {0xca, 0x0c},
+ {0xcb, 0x0f},
+ {0x37, 0x02},
+ {0x44, 0x48},
+ {0x8d, 0x44},
+ {0x2a, 0x00},
+ {0x2b, 0x00},
+ {0x32, 0x00},
+ {0x38, 0x90},
+ {0x53, 0x02},
+ {0x54, 0x60},
+ {0x12, 0x00},
+ {0x17, 0x12},
+ {0x18, 0xb4},
+ {0x19, 0x0c},
+ {0x1a, 0xf4},
+ {0x03, 0x4a},
+ {0x89, 0x20},
+ {0x83, 0x80},
+ {0xb7, 0x9d},
+ {0xb6, 0x11},
+ {0xb5, 0x55},
+ {0xb4, 0x00},
+ {0xa9, 0xf0},
+ {0xa8, 0x0a},
+ {0xb8, 0xf0},
+ {0xb9, 0xf0},
+ {0xba, 0xf0},
+ {0x81, 0x07},
+ {0x63, 0x44},
+ {0x13, 0xc7},
+ {0x14, 0x60},
+ {0x33, 0x75},
+ {0x2c, 0x00},
+ {0x09, 0x00},
+ {0x35, 0x30},
+ {0x27, 0x04},
+ {0x3c, 0x07},
+ {0x3a, 0x0a},
+ {0x3b, 0x07},
+ {0x01, 0x40},
+ {0x02, 0x40},
+ {0x16, 0x40},
+ {0x52, 0xb0},
+ {0x51, 0x83},
+ {0x21, 0xbb},
+ {0x22, 0x10},
+ {0x23, 0x03},
+ {0x35, 0x38},
+ {0x20, 0x90},
+ {0x28, 0x30},
+ {0x73, 0xe1},
+ {0x6c, 0x00},
+ {0x6d, 0x80},
+ {0x6e, 0x00},
+ {0x70, 0x04},
+ {0x71, 0x00},
+ {0x8d, 0x04},
+ {0x64, 0x00},
+ {0x65, 0x00},
+ {0x66, 0x00},
+ {0x67, 0x00},
+ {0x68, 0x00},
+ {0x69, 0x00},
+ {0x6a, 0x00},
+ {0x6b, 0x00},
+ {0x71, 0x94},
+ {0x74, 0x20},
+ {0x80, 0x09},
+ {0x85, 0xc0},
+};
+
static void reg_w_i(struct gspca_dev *gspca_dev, u16 reg, u8 val)
{
struct usb_device *udev = gspca_dev->dev;
@@ -1210,6 +1337,17 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x56, 0x1f);
else
reg_w(gspca_dev, 0x56, 0x17);
+ } else if ((sensor_id & 0xfff0) == 0x5620) {
+ sd->sensor = SENSOR_OV562x;
+
+ gspca_dev->cam.cam_mode = ov562x_mode;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(ov562x_mode);
+
+ reg_w_array(gspca_dev, ov562x_init,
+ ARRAY_SIZE(ov562x_init));
+ sccb_w_array(gspca_dev, ov562x_init_2,
+ ARRAY_SIZE(ov562x_init_2));
+ reg_w(gspca_dev, 0xe0, 0x00);
} else {
err("Unknown sensor %04x", sensor_id);
return -EINVAL;
@@ -1222,7 +1360,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->sensor == SENSOR_OV971x)
+ if (sd->sensor == SENSOR_OV971x || sd->sensor == SENSOR_OV562x)
return gspca_dev->usb_err;
switch (gspca_dev->curr_mode) {
case QVGA_MODE: /* 320x240 */
@@ -1409,6 +1547,7 @@ static const struct sd_desc sd_desc = {
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x05a9, 0x8065)},
{USB_DEVICE(0x06f8, 0x3003)},
+ {USB_DEVICE(0x05a9, 0x1550)},
{}
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index ece8b1e82a13..3844c49f269c 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -41,14 +41,14 @@ MODULE_LICENSE("GPL");
#define PAC207_BRIGHTNESS_DEFAULT 46
#define PAC207_EXPOSURE_MIN 3
-#define PAC207_EXPOSURE_MAX 26
+#define PAC207_EXPOSURE_MAX 90 /* 1 sec expo time / 1 fps */
#define PAC207_EXPOSURE_DEFAULT 5 /* power on default: 3 */
-#define PAC207_EXPOSURE_KNEE 8 /* 4 = 30 fps, 11 = 8, 15 = 6 */
+#define PAC207_EXPOSURE_KNEE 9 /* fps: 90 / exposure -> 9: 10 fps */
#define PAC207_GAIN_MIN 0
#define PAC207_GAIN_MAX 31
-#define PAC207_GAIN_DEFAULT 9 /* power on default: 9 */
-#define PAC207_GAIN_KNEE 31
+#define PAC207_GAIN_DEFAULT 7 /* power on default: 9 */
+#define PAC207_GAIN_KNEE 15
#define PAC207_AUTOGAIN_DEADZONE 30
@@ -332,7 +332,7 @@ static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
if (sd->autogain_ignore_frames > 0)
sd->autogain_ignore_frames--;
else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum,
- 100, PAC207_AUTOGAIN_DEADZONE,
+ 90, PAC207_AUTOGAIN_DEADZONE,
PAC207_GAIN_KNEE, PAC207_EXPOSURE_KNEE))
sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
}
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 2811195258c4..9db2b34d172c 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1197,6 +1197,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x262a)},
{USB_DEVICE(0x093a, 0x262c)},
+ {USB_DEVICE(0x145f, 0x013c)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
index 1494e1829d36..bb70092c2229 100644
--- a/drivers/media/video/gspca/se401.c
+++ b/drivers/media/video/gspca/se401.c
@@ -376,7 +376,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->bulk_size = BULK_SIZE;
cam->bulk_nurbs = 4;
cam->ctrls = sd->ctrls;
- gspca_dev->nbalt = 1; /* Ignore the bogus isoc alt settings */
sd->resetlevel = 0x2d; /* Set initial resetlevel */
/* See if the camera supports brightness */
@@ -395,6 +394,14 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
+/* function called at start time before URB creation */
+static int sd_isoc_init(struct gspca_dev *gspca_dev)
+{
+ gspca_dev->alt = 1; /* Ignore the bogus isoc alt settings */
+
+ return gspca_dev->usb_err;
+}
+
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
@@ -714,6 +721,7 @@ static const struct sd_desc sd_desc = {
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 33cabc342dcf..9e198b45c3c8 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2048,6 +2048,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct cam *cam;
cam = &gspca_dev->cam;
+ cam->needs_full_bandwidth = 1;
sd->sensor = (id->driver_info >> 8) & 0xff;
sd->i2c_addr = id->driver_info & 0xff;
@@ -2233,6 +2234,42 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
}
}
+static int sd_isoc_init(struct gspca_dev *gspca_dev)
+{
+ struct usb_interface *intf;
+ u32 flags = gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv;
+
+ /*
+ * When using the SN9C20X_I420 fmt the sn9c20x needs more bandwidth
+ * than our regular bandwidth calculations reserve, so we force the
+ * use of a specific altsetting when using the SN9C20X_I420 fmt.
+ */
+ if (!(flags & (MODE_RAW | MODE_JPEG))) {
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+
+ if (intf->num_altsetting != 9) {
+ pr_warn("sn9c20x camera with unknown number of alt "
+ "settings (%d), please report!\n",
+ intf->num_altsetting);
+ gspca_dev->alt = intf->num_altsetting;
+ return 0;
+ }
+
+ switch (gspca_dev->width) {
+ case 160: /* 160x120 */
+ gspca_dev->alt = 2;
+ break;
+ case 320: /* 320x240 */
+ gspca_dev->alt = 6;
+ break;
+ default: /* >= 640x480 */
+ gspca_dev->alt = 9;
+ }
+ }
+
+ return 0;
+}
+
#define HW_WIN(mode, hstart, vstart) \
((const u8 []){hstart, 0, vstart, 0, \
(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \
@@ -2473,6 +2510,7 @@ static const struct sd_desc sd_desc = {
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index ddb392dc4f2d..6a1148d7fe92 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1079,20 +1079,23 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
cam->npkt = 36; /* 36 packets per ISOC message */
- if (sensor_data[sd->sensor].flags & F_COARSE_EXPO) {
- sd->ctrls[EXPOSURE].min = COARSE_EXPOSURE_MIN;
- sd->ctrls[EXPOSURE].max = COARSE_EXPOSURE_MAX;
- sd->ctrls[EXPOSURE].def = COARSE_EXPOSURE_DEF;
- }
-
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
const __u8 stop = 0x09; /* Disable stream turn of LED */
+ if (sensor_data[sd->sensor].flags & F_COARSE_EXPO) {
+ sd->ctrls[EXPOSURE].min = COARSE_EXPOSURE_MIN;
+ sd->ctrls[EXPOSURE].max = COARSE_EXPOSURE_MAX;
+ sd->ctrls[EXPOSURE].def = COARSE_EXPOSURE_DEF;
+ if (sd->ctrls[EXPOSURE].val > COARSE_EXPOSURE_MAX)
+ sd->ctrls[EXPOSURE].val = COARSE_EXPOSURE_DEF;
+ }
+
reg_w(gspca_dev, 0x01, &stop, 1);
return 0;
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index afa3186b8038..0c9e6ddabd2c 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -1235,7 +1235,7 @@ static const u8 po2030n_sensor_param1[][8] = {
{DELAY, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
{0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
- {0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10},
+ {0xd1, 0x6e, 0x16, 0x40, 0x40, 0x40, 0x40, 0x10}, /* RGBG gains */
/*param2*/
{0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
@@ -1779,10 +1779,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->ag_cnt = -1;
sd->quality = QUALITY_DEF;
- /* if USB 1.1, let some bandwidth for the audio device */
- if (gspca_dev->audio && gspca_dev->dev->speed < USB_SPEED_HIGH)
- gspca_dev->nbalt--;
-
INIT_WORK(&sd->work, qual_upd);
return 0;
@@ -2063,6 +2059,16 @@ static void setredblue(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ if (sd->sensor == SENSOR_PO2030N) {
+ u8 rg1b[] = /* red green1 blue (no g2) */
+ {0xc1, 0x6e, 0x16, 0x00, 0x40, 0x00, 0x00, 0x10};
+
+ /* 0x40 = normal value = gain x 1 */
+ rg1b[3] = sd->ctrls[RED].val * 2;
+ rg1b[5] = sd->ctrls[BLUE].val * 2;
+ i2c_w8(gspca_dev, rg1b);
+ return;
+ }
reg_w1(gspca_dev, 0x05, sd->ctrls[RED].val);
/* reg_w1(gspca_dev, 0x07, 32); */
reg_w1(gspca_dev, 0x06, sd->ctrls[BLUE].val);
@@ -2397,7 +2403,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x17, reg17);
reg01 &= ~S_PWR_DN; /* sensor power on */
reg_w1(gspca_dev, 0x01, reg01);
- reg01 &= ~SYS_SEL_48M;
+ reg01 &= ~SCL_SEL_OD; /* remove open-drain mode */
reg_w1(gspca_dev, 0x01, reg01);
switch (sd->sensor) {
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 259a0c73c664..4a5f209ce719 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -451,7 +451,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
}
cam = &gspca_dev->cam;
- gspca_dev->nbalt = 7 + 1; /* choose alternate 7 first */
+ cam->needs_full_bandwidth = 1;
sd->chip_revision = id->driver_info;
if (sd->chip_revision == Rev012A) {
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 0ab425fbea9a..91d99b4cc57b 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -36,8 +36,8 @@ MODULE_AUTHOR("Erik Andrén");
MODULE_DESCRIPTION("STV06XX USB Camera Driver");
MODULE_LICENSE("GPL");
-static int dump_bridge;
-static int dump_sensor;
+static bool dump_bridge;
+static bool dump_sensor;
int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data)
{
@@ -304,7 +304,7 @@ static int stv06xx_isoc_init(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
- alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
alt->endpoint[0].desc.wMaxPacketSize =
cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]);
@@ -317,7 +317,7 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev)
struct usb_host_interface *alt;
struct sd *sd = (struct sd *) gspca_dev;
- alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode];
if (packet_size <= min_packet_size)
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index ea44deb66af4..9b9f85a8e60e 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -30,6 +30,7 @@
#define MODULE_NAME "t613"
+#include <linux/input.h>
#include <linux/slab.h>
#include "gspca.h"
@@ -57,6 +58,7 @@ struct sd {
u8 effect;
u8 sensor;
+ u8 button_pressed;
};
enum sensors {
SENSOR_OM6802,
@@ -1095,15 +1097,35 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
msleep(20);
reg_w(gspca_dev, 0x0309);
}
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ /* If the last button state is pressed, release it now! */
+ if (sd->button_pressed) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ sd->button_pressed = 0;
+ }
+#endif
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
+ struct sd *sd = (struct sd *) gspca_dev;
int pkt_type;
if (data[0] == 0x5a) {
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ if (len > 20) {
+ u8 state = (data[20] & 0x80) ? 1 : 0;
+ if (sd->button_pressed != state) {
+ input_report_key(gspca_dev->input_dev,
+ KEY_CAMERA, state);
+ input_sync(gspca_dev->input_dev);
+ sd->button_pressed = state;
+ }
+ }
+#endif
/* Control Packet, after this came the header again,
* but extra bytes came in the packet before this,
* sometimes an EOF arrives, sometimes not... */
@@ -1410,6 +1432,9 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
.querymenu = sd_querymenu,
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ .other_input = 1,
+#endif
};
/* -- module initialisation -- */
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index b2695b1dc603..444d3c5b9079 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -3946,7 +3946,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
/* 640x480 * 30 fps does not work */
if (i == 6 /* if 30 fps */
&& gspca_dev->width == 640)
- i = 0x86; /* 15 fps */
+ i = 0x05; /* 15 fps */
} else {
for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) {
if (sd->framerate >= rates_6810[i])
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index d12ea1518ace..911152e169d6 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -324,7 +324,8 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
dev->work_thread = NULL;
mutex_lock(&gspca_dev->usb_lock);
- vicam_set_camera_power(gspca_dev, 0);
+ if (gspca_dev->present)
+ vicam_set_camera_power(gspca_dev, 0);
}
/* Table of supported USB devices */
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index fbb6ed25ec31..ecada178bceb 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -995,14 +995,12 @@ static int sd_config(struct gspca_dev *gspca_dev,
case CIT_MODEL0:
cam->cam_mode = model0_mode;
cam->nmodes = ARRAY_SIZE(model0_mode);
- cam->reverse_alts = 1;
gspca_dev->ctrl_dis = ~((1 << SD_CONTRAST) | (1 << SD_HFLIP));
sd->sof_len = 4;
break;
case CIT_MODEL1:
cam->cam_mode = cif_yuv_mode;
cam->nmodes = ARRAY_SIZE(cif_yuv_mode);
- cam->reverse_alts = 1;
gspca_dev->ctrl_dis = (1 << SD_HUE) | (1 << SD_HFLIP);
sd->sof_len = 4;
break;
@@ -2791,7 +2789,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
}
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
- alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
return 0;
@@ -2814,7 +2812,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
break;
}
- alt = &gspca_dev->dev->config->intf_cache[0]->altsetting[1];
+ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
if (packet_size <= min_packet_size)
return -EIO;
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 0202fead6b97..b9e15bb0328b 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5381,12 +5381,12 @@ static const struct usb_action tas5130c_NoFlikerScale[] = {
{}
};
-static const struct usb_action gc0303_InitialScale[] = {
+/* from usbvm305.inf 0ac8:305b 07/06/15 (3 - tas5130c) */
+static const struct usb_action gc0303_Initial[] = {
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc, */
{0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, /* 00,08,02,cc, */
{0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc, */
- {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT}, /* 00,02,00,cc,
- * 0<->10 */
+ {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT},
{0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc, */
{0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc, */
{0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc, */
@@ -5405,29 +5405,22 @@ static const struct usb_action gc0303_InitialScale[] = {
* 6<->8 */
{0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, /* 00,87,10,cc, */
{0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc, */
- {0xaa, 0x1b, 0x0024}, /* 00,1b,24,aa, */
- {0xdd, 0x00, 0x0080}, /* 00,00,80,dd, */
- {0xaa, 0x1b, 0x0000}, /* 00,1b,00,aa, */
- {0xaa, 0x13, 0x0002}, /* 00,13,02,aa, */
- {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */
-/*?? {0xaa, 0x01, 0x0000}, */
{0xaa, 0x01, 0x0000},
{0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa, */
{0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa, */
+ {0xaa, 0x1b, 0x0000},
{0xa0, 0x82, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,82,cc, */
{0xa0, 0x83, ZC3XX_R087_EXPTIMEMID}, /* 00,87,83,cc, */
{0xa0, 0x84, ZC3XX_R088_EXPTIMELOW}, /* 00,88,84,cc, */
{0xaa, 0x05, 0x0010}, /* 00,05,10,aa, */
- {0xaa, 0x0a, 0x0000}, /* 00,0a,00,aa, */
- {0xaa, 0x0b, 0x00a0}, /* 00,0b,a0,aa, */
- {0xaa, 0x0c, 0x0000}, /* 00,0c,00,aa, */
- {0xaa, 0x0d, 0x00a0}, /* 00,0d,a0,aa, */
- {0xaa, 0x0e, 0x0000}, /* 00,0e,00,aa, */
- {0xaa, 0x0f, 0x00a0}, /* 00,0f,a0,aa, */
- {0xaa, 0x10, 0x0000}, /* 00,10,00,aa, */
- {0xaa, 0x11, 0x00a0}, /* 00,11,a0,aa, */
-/*?? {0xa0, 0x00, 0x0039},
- {0xa1, 0x01, 0x0037}, */
+ {0xaa, 0x0a, 0x0002},
+ {0xaa, 0x0b, 0x0000},
+ {0xaa, 0x0c, 0x0002},
+ {0xaa, 0x0d, 0x0000},
+ {0xaa, 0x0e, 0x0002},
+ {0xaa, 0x0f, 0x0000},
+ {0xaa, 0x10, 0x0002},
+ {0xaa, 0x11, 0x0000},
{0xaa, 0x16, 0x0001}, /* 00,16,01,aa, */
{0xaa, 0x17, 0x00e8}, /* 00,17,e6,aa, (e6 -> e8) */
{0xaa, 0x18, 0x0002}, /* 00,18,02,aa, */
@@ -5442,17 +5435,18 @@ static const struct usb_action gc0303_InitialScale[] = {
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc, */
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc, */
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc, */
- {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc, */
+ {0xa0, 0x58, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x61, ZC3XX_R116_RGAIN}, /* 01,16,61,cc, */
{0xa0, 0x65, ZC3XX_R118_BGAIN}, /* 01,18,65,cc */
+ {0xaa, 0x1b, 0x0000},
{}
};
-static const struct usb_action gc0303_Initial[] = {
+static const struct usb_action gc0303_InitialScale[] = {
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL}, /* 00,00,01,cc, */
{0xa0, 0x02, ZC3XX_R008_CLOCKSETTING}, /* 00,08,02,cc, */
{0xa0, 0x01, ZC3XX_R010_CMOSSENSORSELECT}, /* 00,10,01,cc, */
- {0xa0, 0x00, ZC3XX_R002_CLOCKSELECT}, /* 00,02,10,cc, */
+ {0xa0, 0x10, ZC3XX_R002_CLOCKSELECT},
{0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH}, /* 00,03,02,cc, */
{0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW}, /* 00,04,80,cc, */
{0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH}, /* 00,05,01,cc, */
@@ -5471,34 +5465,26 @@ static const struct usb_action gc0303_Initial[] = {
* 8<->6 */
{0xa0, 0x10, ZC3XX_R087_EXPTIMEMID}, /* 00,87,10,cc, */
{0xa0, 0x98, ZC3XX_R08B_I2CDEVICEADDR}, /* 00,8b,98,cc, */
- {0xaa, 0x1b, 0x0024}, /* 00,1b,24,aa, */
- {0xdd, 0x00, 0x0080}, /* 00,00,80,dd, */
- {0xaa, 0x1b, 0x0000}, /* 00,1b,00,aa, */
- {0xaa, 0x13, 0x0002}, /* 00,13,02,aa, */
- {0xaa, 0x15, 0x0004}, /* 00,15,04,aa */
-/*?? {0xaa, 0x01, 0x0000}, */
{0xaa, 0x01, 0x0000},
{0xaa, 0x1a, 0x0000}, /* 00,1a,00,aa, */
{0xaa, 0x1c, 0x0017}, /* 00,1c,17,aa, */
+ {0xaa, 0x1b, 0x0000},
{0xa0, 0x82, ZC3XX_R086_EXPTIMEHIGH}, /* 00,86,82,cc, */
{0xa0, 0x83, ZC3XX_R087_EXPTIMEMID}, /* 00,87,83,cc, */
{0xa0, 0x84, ZC3XX_R088_EXPTIMELOW}, /* 00,88,84,cc, */
{0xaa, 0x05, 0x0010}, /* 00,05,10,aa, */
- {0xaa, 0x0a, 0x0000}, /* 00,0a,00,aa, */
- {0xaa, 0x0b, 0x00a0}, /* 00,0b,a0,aa, */
- {0xaa, 0x0c, 0x0000}, /* 00,0c,00,aa, */
- {0xaa, 0x0d, 0x00a0}, /* 00,0d,a0,aa, */
- {0xaa, 0x0e, 0x0000}, /* 00,0e,00,aa, */
- {0xaa, 0x0f, 0x00a0}, /* 00,0f,a0,aa, */
- {0xaa, 0x10, 0x0000}, /* 00,10,00,aa, */
- {0xaa, 0x11, 0x00a0}, /* 00,11,a0,aa, */
-/*?? {0xa0, 0x00, 0x0039},
- {0xa1, 0x01, 0x0037}, */
+ {0xaa, 0x0a, 0x0001},
+ {0xaa, 0x0b, 0x0000},
+ {0xaa, 0x0c, 0x0001},
+ {0xaa, 0x0d, 0x0000},
+ {0xaa, 0x0e, 0x0001},
+ {0xaa, 0x0f, 0x0000},
+ {0xaa, 0x10, 0x0001},
+ {0xaa, 0x11, 0x0000},
{0xaa, 0x16, 0x0001}, /* 00,16,01,aa, */
{0xaa, 0x17, 0x00e8}, /* 00,17,e6,aa (e6 -> e8) */
{0xaa, 0x18, 0x0002}, /* 00,18,02,aa, */
{0xaa, 0x19, 0x0088}, /* 00,19,88,aa, */
- {0xaa, 0x20, 0x0020}, /* 00,20,20,aa, */
{0xa0, 0xb7, ZC3XX_R101_SENSORCORRECTION}, /* 01,01,b7,cc, */
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC}, /* 00,12,05,cc, */
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0d,cc, */
@@ -5508,36 +5494,37 @@ static const struct usb_action gc0303_Initial[] = {
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05}, /* 01,cb,13,cc, */
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE}, /* 02,50,08,cc, */
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS}, /* 03,01,08,cc, */
- {0xa0, 0x60, ZC3XX_R1A8_DIGITALGAIN}, /* 01,a8,60,cc, */
+ {0xa0, 0x58, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x61, ZC3XX_R116_RGAIN}, /* 01,16,61,cc, */
{0xa0, 0x65, ZC3XX_R118_BGAIN}, /* 01,18,65,cc */
+ {0xaa, 0x1b, 0x0000},
{}
};
-static const struct usb_action gc0303_50HZScale[] = {
+static const struct usb_action gc0303_50HZ[] = {
{0xaa, 0x82, 0x0000}, /* 00,82,00,aa */
{0xaa, 0x83, 0x0001}, /* 00,83,01,aa */
- {0xaa, 0x84, 0x00aa}, /* 00,84,aa,aa */
+ {0xaa, 0x84, 0x0063},
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */
{0xa0, 0x06, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0d,cc, */
{0xa0, 0xa8, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,50,cc, */
{0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */
{0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */
- {0xa0, 0x8e, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,47,cc, */
+ {0xa0, 0x47, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,47,cc, */
{0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */
{0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */
{0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc, */
- {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc, */
+ {0xa0, 0x48, ZC3XX_R1AA_DIGITALGAINSTEP},
{0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */
{0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */
{0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */
{0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */
{0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */
- {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,8d,78,cc */
+ {0xa0, 0x7f, ZC3XX_R18D_YTARGET},
{}
};
-static const struct usb_action gc0303_50HZ[] = {
+static const struct usb_action gc0303_50HZScale[] = {
{0xaa, 0x82, 0x0000}, /* 00,82,00,aa */
{0xaa, 0x83, 0x0003}, /* 00,83,03,aa */
{0xaa, 0x84, 0x0054}, /* 00,84,54,aa */
@@ -5550,21 +5537,21 @@ static const struct usb_action gc0303_50HZ[] = {
{0xa0, 0x0e, ZC3XX_R18C_AEFREEZE}, /* 01,8c,0e,cc, */
{0xa0, 0x15, ZC3XX_R18F_AEUNFREEZE}, /* 01,8f,15,cc, */
{0xa0, 0x10, ZC3XX_R1A9_DIGITALLIMITDIFF}, /* 01,a9,10,cc, */
- {0xa0, 0x24, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc, */
+ {0xa0, 0x48, ZC3XX_R1AA_DIGITALGAINSTEP}, /* 01,aa,24,cc, */
{0xa0, 0x62, ZC3XX_R01D_HSYNC_0}, /* 00,1d,62,cc, */
{0xa0, 0x90, ZC3XX_R01E_HSYNC_1}, /* 00,1e,90,cc, */
{0xa0, 0xc8, ZC3XX_R01F_HSYNC_2}, /* 00,1f,c8,cc, */
{0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */
{0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */
- {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,8d,78,cc */
+ {0xa0, 0x7f, ZC3XX_R18D_YTARGET},
{}
};
-static const struct usb_action gc0303_60HZScale[] = {
+static const struct usb_action gc0303_60HZ[] = {
{0xaa, 0x82, 0x0000}, /* 00,82,00,aa */
- {0xaa, 0x83, 0x0001}, /* 00,83,01,aa */
- {0xaa, 0x84, 0x0062}, /* 00,84,62,aa */
+ {0xaa, 0x83, 0x0000},
+ {0xaa, 0x84, 0x003b},
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */
{0xa0, 0x05, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,05,cc, */
{0xa0, 0x88, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,88,cc, */
@@ -5581,14 +5568,14 @@ static const struct usb_action gc0303_60HZScale[] = {
{0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,20,ff,cc, */
{0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,1d,58,cc, */
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */
- {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,8d,78,cc */
+ {0xa0, 0x80, ZC3XX_R18D_YTARGET},
{}
};
-static const struct usb_action gc0303_60HZ[] = {
+static const struct usb_action gc0303_60HZScale[] = {
{0xaa, 0x82, 0x0000}, /* 00,82,00,aa */
- {0xaa, 0x83, 0x0002}, /* 00,83,02,aa */
- {0xaa, 0x84, 0x00c4}, /* 00,84,c4,aa */
+ {0xaa, 0x83, 0x0000},
+ {0xaa, 0x84, 0x0076},
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */
{0xa0, 0x0b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,1,0b,cc, */
{0xa0, 0x10, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,2,10,cc, */
@@ -5605,18 +5592,18 @@ static const struct usb_action gc0303_60HZ[] = {
{0xa0, 0xff, ZC3XX_R020_HSYNC_3}, /* 00,0,ff,cc, */
{0xa0, 0x58, ZC3XX_R11D_GLOBALGAIN}, /* 01,d,58,cc, */
{0xa0, 0x42, ZC3XX_R180_AUTOCORRECTENABLE}, /* 01,80,42,cc, */
- {0xa0, 0x78, ZC3XX_R18D_YTARGET}, /* 01,d,78,cc */
+ {0xa0, 0x80, ZC3XX_R18D_YTARGET},
{}
};
-static const struct usb_action gc0303_NoFlikerScale[] = {
+static const struct usb_action gc0303_NoFliker[] = {
{0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc, */
{0xaa, 0x82, 0x0000}, /* 00,82,00,aa */
{0xaa, 0x83, 0x0000}, /* 00,83,00,aa */
{0xaa, 0x84, 0x0020}, /* 00,84,20,aa */
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,0,00,cc, */
- {0xa0, 0x05, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,05,cc, */
- {0xa0, 0x88, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,88,cc, */
+ {0xa0, 0x00, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x48, ZC3XX_R192_EXPOSURELIMITLOW},
{0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */
{0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */
{0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc, */
@@ -5631,14 +5618,14 @@ static const struct usb_action gc0303_NoFlikerScale[] = {
{}
};
-static const struct usb_action gc0303_NoFliker[] = {
+static const struct usb_action gc0303_NoFlikerScale[] = {
{0xa0, 0x0c, ZC3XX_R100_OPERATIONMODE}, /* 01,00,0c,cc, */
{0xaa, 0x82, 0x0000}, /* 00,82,00,aa */
{0xaa, 0x83, 0x0000}, /* 00,83,00,aa */
{0xaa, 0x84, 0x0020}, /* 00,84,20,aa */
{0xa0, 0x00, ZC3XX_R190_EXPOSURELIMITHIGH}, /* 01,90,00,cc, */
- {0xa0, 0x0b, ZC3XX_R191_EXPOSURELIMITMID}, /* 01,91,0b,cc, */
- {0xa0, 0x10, ZC3XX_R192_EXPOSURELIMITLOW}, /* 01,92,10,cc, */
+ {0xa0, 0x00, ZC3XX_R191_EXPOSURELIMITMID},
+ {0xa0, 0x48, ZC3XX_R192_EXPOSURELIMITLOW},
{0xa0, 0x00, ZC3XX_R195_ANTIFLICKERHIGH}, /* 01,95,00,cc, */
{0xa0, 0x00, ZC3XX_R196_ANTIFLICKERMID}, /* 01,96,00,cc, */
{0xa0, 0x10, ZC3XX_R197_ANTIFLICKERLOW}, /* 01,97,10,cc, */
@@ -5809,7 +5796,7 @@ static void setmatrix(struct gspca_dev *gspca_dev)
static const u8 tas5130c_matrix[9] =
{0x68, 0xec, 0xec, 0xec, 0x68, 0xec, 0xec, 0xec, 0x68};
static const u8 gc0303_matrix[9] =
- {0x7b, 0xea, 0xea, 0xea, 0x7b, 0xea, 0xea, 0xea, 0x7b};
+ {0x6c, 0xea, 0xea, 0xea, 0x6c, 0xea, 0xea, 0xea, 0x6c};
static const u8 *matrix_tb[SENSOR_MAX] = {
[SENSOR_ADCM2700] = adcm2700_matrix,
[SENSOR_CS2102] = ov7620_matrix,
@@ -6426,10 +6413,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
- /* if USB 1.1, let some bandwidth for the audio device */
- if (gspca_dev->audio && gspca_dev->dev->speed < USB_SPEED_HIGH)
- gspca_dev->nbalt--;
-
return 0;
}
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 3f1a5b1beeba..e5eb56a5b618 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -49,7 +49,7 @@ module_param(default_audio_input, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / "
"1=RCA front / 2=S/PDIF");
-static int boost_audio;
+static bool boost_audio;
module_param(boost_audio, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(boost_audio, "boost the audio signal");
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 3ab875d036e1..a7c41d32f414 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -244,7 +244,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir,
/* ----------------------------------------------------------------------- */
-static void ir_key_poll(struct IR_i2c *ir)
+static int ir_key_poll(struct IR_i2c *ir)
{
static u32 ir_key, ir_raw;
int rc;
@@ -253,20 +253,28 @@ static void ir_key_poll(struct IR_i2c *ir)
rc = ir->get_key(ir, &ir_key, &ir_raw);
if (rc < 0) {
dprintk(2,"error\n");
- return;
+ return rc;
}
if (rc) {
dprintk(1, "%s: keycode = 0x%04x\n", __func__, ir_key);
rc_keydown(ir->rc, ir_key, 0);
}
+ return 0;
}
static void ir_work(struct work_struct *work)
{
+ int rc;
struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work);
- ir_key_poll(ir);
+ rc = ir_key_poll(ir);
+ if (rc == -ENODEV) {
+ rc_unregister_device(ir->rc);
+ ir->rc = NULL;
+ return;
+ }
+
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval));
}
@@ -446,7 +454,8 @@ static int ir_remove(struct i2c_client *client)
cancel_delayed_work_sync(&ir->work);
/* unregister device */
- rc_unregister_device(ir->rc);
+ if (ir->rc)
+ rc_unregister_device(ir->rc);
/* free memory */
kfree(ir);
@@ -489,11 +498,3 @@ static void __exit ir_fini(void)
module_init(ir_init);
module_exit(ir_fini);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 41108a9a195e..3949b7dc2368 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -99,7 +99,7 @@ static int i2c_clock_period[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
static unsigned int cardtype_c = 1;
static unsigned int tuner_c = 1;
-static unsigned int radio_c = 1;
+static bool radio_c = 1;
static unsigned int i2c_clock_period_c = 1;
static char pal[] = "---";
static char secam[] = "--";
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
- /* start counting open_id at 1 */
- itv->open_id = 1;
-
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 8f9cc17b518e..06f3d78389bf 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -332,7 +332,7 @@ struct ivtv_stream {
const char *name; /* name of the stream */
int type; /* stream type */
- u32 id;
+ struct v4l2_fh *fh; /* pointer to the streaming filehandle */
spinlock_t qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
struct ivtv_open_id {
struct v4l2_fh fh;
- u32 open_id; /* unique ID for this file descriptor */
int type; /* stream type */
int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
struct ivtv *itv;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 38f052257f46..2cd6c89b7d91 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
- if (s->id == id->open_id) {
+ if (s->fh == &id->fh) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
- if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+ if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
- s->id = id->open_id;
+ s->fh = &id->fh;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
- s->id = id->open_id;
+ s->fh = &id->fh;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
- s->id = -1;
+ s->fh = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
/* was already cleared */
return;
}
- if (s_vbi->id != -1) {
+ if (s_vbi->fh) {
/* VBI stream still claimed by a file descriptor */
return;
}
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
}
/* wait for more data to arrive */
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become available before we were added to the waitqueue */
if (!s->q_full.buffers)
schedule();
finish_wait(&s->waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
/* return if a signal was received */
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
size_t tot_written = 0;
int single_frame = 0;
- if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
+ if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
- mutex_unlock(&itv->serialize_lock);
if (rc)
return rc;
return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
/* Start decoder (returns 0 if already started) */
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_decoding(id, itv->speed);
- mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
@@ -627,11 +625,13 @@ retry:
break;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become free before we were added to the waitqueue */
if (!s->q_free.buffers)
schedule();
finish_wait(&s->waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
return -EINTR;
@@ -686,12 +686,14 @@ retry:
if (mode == OUT_YUV)
ivtv_yuv_setup_stream_frame(itv);
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (!(got_sig = signal_pending(current)) &&
test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (got_sig) {
IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
int rc;
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
- mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
- s->id = -1;
+ s->fh = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
IVTV_DEBUG_FILE("close %s\n", s->name);
- v4l2_fh_del(fh);
- v4l2_fh_exit(fh);
-
- /* Easy case first: this stream was never claimed by us */
- if (s->id != id->open_id) {
- kfree(id);
- return 0;
- }
-
- /* 'Unclaim' this stream */
-
/* Stop radio */
- mutex_lock(&itv->serialize_lock);
- if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
+ if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
ivtv_mute(itv);
/* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* Undo video mute */
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
- v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
- (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
+ v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+ (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Done! Unmute and continue. */
ivtv_unmute(itv);
- ivtv_release_stream(s);
- } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+ }
+
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+
+ /* Easy case first: this stream was never claimed by us */
+ if (s->fh != &id->fh) {
+ kfree(id);
+ return 0;
+ }
+
+ /* 'Unclaim' this stream */
+
+ if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
ivtv_stop_capture(id, 0);
}
kfree(id);
- mutex_unlock(&itv->serialize_lock);
return 0;
}
-static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
+int ivtv_v4l2_open(struct file *filp)
{
-#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vdev = video_devdata(filp);
-#endif
+ struct ivtv_stream *s = video_get_drvdata(vdev);
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
+ if (ivtv_init_on_first_open(itv)) {
+ IVTV_ERR("Failed to initialize on device %s\n",
+ video_device_node_name(vdev));
+ return -ENXIO;
+ }
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
/* Unless ivtv_fw_debug is set, error out if firmware dead. */
if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
return -ENOMEM;
}
v4l2_fh_init(&item->fh, s->vdev);
- if (res < 0) {
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return res;
- }
item->itv = itv;
item->type = s->type;
- item->open_id = itv->open_id++;
filp->private_data = &item->fh;
+ v4l2_fh_add(&item->fh);
- if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
- /* Try to claim this stream */
- if (ivtv_claim_stream(item, item->type)) {
- /* No, it's already in use */
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return -EBUSY;
- }
-
+ if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- ivtv_release_stream(s);
+ v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
- v4l2_fh_add(&item->fh);
return 0;
}
-int ivtv_v4l2_open(struct file *filp)
-{
- int res;
- struct ivtv *itv = NULL;
- struct ivtv_stream *s = NULL;
- struct video_device *vdev = video_devdata(filp);
-
- s = video_get_drvdata(vdev);
- itv = s->itv;
-
- mutex_lock(&itv->serialize_lock);
- if (ivtv_init_on_first_open(itv)) {
- IVTV_ERR("Failed to initialize on device %s\n",
- video_device_node_name(vdev));
- mutex_unlock(&itv->serialize_lock);
- return -ENXIO;
- }
- res = ivtv_serialized_open(s, filp);
- mutex_unlock(&itv->serialize_lock);
- return res;
-}
-
void ivtv_mute(struct ivtv *itv)
{
if (atomic_read(&itv->capturing))
diff --git a/drivers/media/video/ivtv/ivtv-i2c.h b/drivers/media/video/ivtv/ivtv-i2c.h
index 9332920ca4ff..7b9ec1cfeb80 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.h
+++ b/drivers/media/video/ivtv/ivtv-i2c.h
@@ -25,7 +25,7 @@ struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv);
int ivtv_i2c_register(struct ivtv *itv, unsigned idx);
struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw);
-/* init + register i2c algo-bit adapter */
+/* init + register i2c adapter */
int init_ivtv_i2c(struct ivtv *itv);
void exit_ivtv_i2c(struct ivtv *itv);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index ecafa697326e..c4bc48143098 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
/* Wait for any DMA to finish */
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (got_sig)
return -EINTR;
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
* happens within the first 100 lines of the top field.
* Make 4 attempts to sync to the decoder before giving up.
*/
+ mutex_unlock(&itv->serialize_lock);
for (f = 0; f < 4; f++) {
prepare_to_wait(&itv->vsync_waitq, &wait,
TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
schedule_timeout(msecs_to_jiffies(25));
}
finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (f == 4)
IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
return 0;
}
-static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
- unsigned int cmd, unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
return ret;
}
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ivtv_open_id *id = fh2id(filp->private_data);
- struct ivtv *itv = id->itv;
- long res;
-
- /* DQEVENT can block, so this should not run with the serialize lock */
- if (cmd == VIDIOC_DQEVENT)
- return ivtv_serialized_ioctl(itv, filp, cmd, arg);
- mutex_lock(&itv->serialize_lock);
- res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
- mutex_unlock(&itv->serialize_lock);
- return res;
-}
-
static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_querycap = ivtv_querycap,
.vidioc_s_audio = ivtv_s_audio,
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9c29e964d400..1b3b9578bf47 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
- if (s->id == -1) {
+ if (s->fh == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
}
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
- if (s->id != -1)
+ if (s->fh)
wake_up(&s->waitq);
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index e7794dc1330e..c6e28b4ebbed 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
spin_lock_init(&s->qlock);
init_waitqueue_head(&s->waitq);
- s->id = -1;
s->sg_handle = IVTV_DMA_UNMAPPED;
ivtv_queue_init(&s->q_free);
ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
s->vdev->fops = ivtv_stream_info[type].fops;
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
+ s->vdev->lock = &itv->serialize_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
ivtv_set_funcs(s->vdev);
return 0;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index dcbab6ad4c26..2ad65eb29832 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
+ int res;
ivtv_yuv_setup_stream_frame(itv);
/* We only need to supply source addresses for this */
dma_args.y_source = src;
dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
- return ivtv_yuv_udma_frame(itv, &dma_args);
+ /* Wait for frame DMA. Note that serialize_lock is locked,
+ so to allow other processes to access the driver while
+ we are waiting unlock first and later lock again. */
+ mutex_unlock(&itv->serialize_lock);
+ res = ivtv_yuv_udma_frame(itv, &dma_args);
+ mutex_lock(&itv->serialize_lock);
+ return res;
}
/* IVTV_IOC_DMA_FRAME ioctl handler */
int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
-/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
+ int res;
+/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
ivtv_yuv_next_free(itv);
ivtv_yuv_setup_frame(itv, args);
- return ivtv_yuv_udma_frame(itv, args);
+ /* Wait for frame DMA. Note that serialize_lock is locked,
+ so to allow other processes to access the driver while
+ we are waiting unlock first and later lock again. */
+ mutex_unlock(&itv->serialize_lock);
+ res = ivtv_yuv_udma_frame(itv, args);
+ mutex_lock(&itv->serialize_lock);
+ return res;
}
void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
int h_filter, v_filter_1, v_filter_2;
IVTV_DEBUG_YUV("ivtv_yuv_close\n");
+ mutex_unlock(&itv->serialize_lock);
ivtv_waitq(&itv->vsync_waitq);
+ mutex_lock(&itv->serialize_lock);
yi->running = 0;
atomic_set(&yi->next_dma_frame, -1);
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 6b7c9c823330..d0fbfcf7133d 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -58,7 +58,7 @@
/* card parameters */
static int ivtvfb_card_id = -1;
static int ivtvfb_debug = 0;
-static int osd_laced;
+static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 89d09a8914f8..4b021e1ee5f2 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -162,9 +162,7 @@ struct m5mols_version {
* @pad: media pad
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
- * @code: current code
* @irq_waitq: waitqueue for the capture
- * @work_irq: workqueue for the IRQ
* @flags: state variable for the interrupt handler
* @handle: control handler
* @autoexposure: Auto Exposure control
@@ -176,14 +174,12 @@ struct m5mols_version {
* @ver: information of the version
* @cap: the capture mode attributes
* @power: current sensor's power status
- * @ctrl_sync: true means all controls of the sensor are initialized
- * @int_capture: true means the capture interrupt is issued once
+ * @isp_ready: 1 when the ISP controller has completed booting
+ * @ctrl_sync: 1 when the control handler state is restored in H/W
* @lock_ae: true means the Auto Exposure is locked
* @lock_awb: true means the Aut WhiteBalance is locked
* @resolution: register value for current resolution
- * @interrupt: register value for current interrupt status
* @mode: register value for current operation mode
- * @mode_save: register value for current operation mode for saving
* @set_power: optional power callback to the board code
*/
struct m5mols_info {
@@ -192,17 +188,16 @@ struct m5mols_info {
struct media_pad pad;
struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
int res_type;
- enum v4l2_mbus_pixelcode code;
+
wait_queue_head_t irq_waitq;
- struct work_struct work_irq;
- unsigned long flags;
+ atomic_t irq_done;
struct v4l2_ctrl_handler handle;
+
/* Autoexposure/exposure control cluster */
- struct {
- struct v4l2_ctrl *autoexposure;
- struct v4l2_ctrl *exposure;
- };
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+
struct v4l2_ctrl *autowb;
struct v4l2_ctrl *colorfx;
struct v4l2_ctrl *saturation;
@@ -210,21 +205,19 @@ struct m5mols_info {
struct m5mols_version ver;
struct m5mols_capture cap;
- bool power;
- bool ctrl_sync;
+
+ unsigned int isp_ready:1;
+ unsigned int power:1;
+ unsigned int ctrl_sync:1;
+
bool lock_ae;
bool lock_awb;
u8 resolution;
- u8 interrupt;
u8 mode;
- u8 mode_save;
+
int (*set_power)(struct device *dev, int on);
};
-#define ST_CAPT_IRQ 0
-
-#define is_powered(__info) (__info->power)
-#define is_ctrl_synced(__info) (__info->ctrl_sync)
#define is_available_af(__info) (__info->ver.af)
#define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code)
#define is_manufacturer(__info, __manufacturer) \
@@ -259,7 +252,15 @@ int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg_comb, u8 *val);
int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg_comb, u16 *val);
int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
+
+int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
+ int timeout);
+
+/* Mask value for busy waiting until M-5MOLS I2C interface is initialized */
+#define M5MOLS_I2C_RDY_WAIT_FL (1 << 16)
+/* ISP state transition timeout, in ms */
+#define M5MOLS_MODE_CHANGE_TIMEOUT 200
+#define M5MOLS_BUSY_WAIT_DEF_TIMEOUT 250
/*
* Mode operation of the M-5MOLS
@@ -284,7 +285,8 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
int m5mols_mode(struct m5mols_info *info, u8 mode);
int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
-int m5mols_sync_controls(struct m5mols_info *info);
+int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 condition, u32 timeout);
+int m5mols_restore_controls(struct m5mols_info *info);
int m5mols_start_capture(struct m5mols_info *info);
int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
int m5mols_lock_3a(struct m5mols_info *info, bool lock);
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index 3248ac805711..ba25e8e2ba4c 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -1,3 +1,4 @@
+
/*
* The Capture code for Fujitsu M-5MOLS ISP
*
@@ -25,26 +26,11 @@
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/m5mols.h>
+#include <media/s5p_fimc.h>
#include "m5mols.h"
#include "m5mols_reg.h"
-static int m5mols_capture_error_handler(struct m5mols_info *info,
- int timeout)
-{
- int ret;
-
- /* Disable all interrupts and clear relevant interrupt staus bits */
- ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE,
- info->interrupt & ~(REG_INT_CAPTURE));
- if (ret)
- return ret;
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- return 0;
-}
/**
* m5mols_read_rational - I2C read of a rational number
*
@@ -121,69 +107,54 @@ int m5mols_start_capture(struct m5mols_info *info)
{
struct v4l2_subdev *sd = &info->sd;
u8 resolution = info->resolution;
- int timeout;
int ret;
/*
- * Preparing capture. Setting control & interrupt before entering
- * capture mode
- *
- * 1) change to MONITOR mode for operating control & interrupt
- * 2) set controls (considering v4l2_control value & lock 3A)
- * 3) set interrupt
- * 4) change to CAPTURE mode
+ * Synchronize the controls, set the capture frame resolution and color
+ * format. The frame capture is initiated during switching from Monitor
+ * to Capture mode.
*/
ret = m5mols_mode(info, REG_MONITOR);
if (!ret)
- ret = m5mols_sync_controls(info);
+ ret = m5mols_restore_controls(info);
if (!ret)
- ret = m5mols_lock_3a(info, true);
+ ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
+ if (!ret)
+ ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
if (!ret)
- ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+ ret = m5mols_lock_3a(info, true);
if (!ret)
ret = m5mols_mode(info, REG_CAPTURE);
- if (!ret) {
- /* Wait for capture interrupt, after changing capture mode */
- timeout = wait_event_interruptible_timeout(info->irq_waitq,
- test_bit(ST_CAPT_IRQ, &info->flags),
- msecs_to_jiffies(2000));
- if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags))
- ret = m5mols_capture_error_handler(info, timeout);
- }
+ if (!ret)
+ /* Wait until a frame is captured to ISP internal memory */
+ ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000);
if (!ret)
ret = m5mols_lock_3a(info, false);
if (ret)
return ret;
+
/*
- * Starting capture. Setting capture frame count and resolution and
- * the format(available format: JPEG, Bayer RAW, YUV).
- *
- * 1) select single or multi(enable to 25), format, size
- * 2) set interrupt
- * 3) start capture(for main image, now)
- * 4) get information
- * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device)
+ * Initiate the captured data transfer to a MIPI-CSI receiver.
*/
ret = m5mols_write(sd, CAPC_SEL_FRAME, 1);
if (!ret)
- ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
- if (!ret)
- ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
- if (!ret)
- ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
- if (!ret)
ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN);
if (!ret) {
+ bool captured = false;
+ unsigned int size;
+
/* Wait for the capture completion interrupt */
- timeout = wait_event_interruptible_timeout(info->irq_waitq,
- test_bit(ST_CAPT_IRQ, &info->flags),
- msecs_to_jiffies(2000));
- if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) {
+ ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000);
+ if (!ret) {
+ captured = true;
ret = m5mols_capture_info(info);
- if (!ret)
- v4l2_subdev_notify(sd, 0, &info->cap.total);
}
+ size = captured ? info->cap.main : 0;
+ v4l2_dbg(1, m5mols_debug, sd, "%s: size: %d, thumb.: %d B\n",
+ __func__, size, info->cap.thumb);
+
+ v4l2_subdev_notify(sd, S5P_FIMC_TX_END_NOTIFY, &size);
}
- return m5mols_capture_error_handler(info, timeout);
+ return ret;
}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 05ab3700647e..93d768db9f33 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -135,10 +135,13 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
* @reg: combination of size, category and command for the I2C packet
* @size: desired size of I2C packet
* @val: read value
+ *
+ * Returns 0 on success, or else negative errno.
*/
static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct m5mols_info *info = to_m5mols(sd);
u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
u8 category = I2C_CATEGORY(reg);
u8 cmd = I2C_COMMAND(reg);
@@ -168,15 +171,17 @@ static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
usleep_range(200, 200);
ret = i2c_transfer(client->adapter, msg, 2);
- if (ret < 0) {
- v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
- size, category, cmd, ret);
- return ret;
+
+ if (ret == 2) {
+ *val = m5mols_swap_byte(&rbuf[1], size);
+ return 0;
}
- *val = m5mols_swap_byte(&rbuf[1], size);
+ if (info->isp_ready)
+ v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
+ size, category, cmd, ret);
- return 0;
+ return ret < 0 ? ret : -EIO;
}
int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val)
@@ -229,10 +234,13 @@ int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
* m5mols_write - I2C command write function
* @reg: combination of size, category and command for the I2C packet
* @val: value to write
+ *
+ * Returns 0 on success, or else negative errno.
*/
int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct m5mols_info *info = to_m5mols(sd);
u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4];
u8 category = I2C_CATEGORY(reg);
u8 cmd = I2C_COMMAND(reg);
@@ -263,28 +271,45 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
usleep_range(200, 200);
ret = i2c_transfer(client->adapter, msg, 1);
- if (ret < 0) {
- v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n",
- size, category, cmd, ret);
- return ret;
- }
+ if (ret == 1)
+ return 0;
- return 0;
+ if (info->isp_ready)
+ v4l2_err(sd, "write failed: cat:%02x cmd:%02x ret:%d\n",
+ category, cmd, ret);
+
+ return ret < 0 ? ret : -EIO;
}
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 mask)
+/**
+ * m5mols_busy_wait - Busy waiting with I2C register polling
+ * @reg: the I2C_REG() address of an 8-bit status register to check
+ * @value: expected status register value
+ * @mask: bit mask for the read status register value
+ * @timeout: timeout in miliseconds, or -1 for default timeout
+ *
+ * The @reg register value is ORed with @mask before comparing with @value.
+ *
+ * Return: 0 if the requested condition became true within less than
+ * @timeout ms, or else negative errno.
+ */
+int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
+ int timeout)
{
- u8 busy;
- int i;
- int ret;
+ int ms = timeout < 0 ? M5MOLS_BUSY_WAIT_DEF_TIMEOUT : timeout;
+ unsigned long end = jiffies + msecs_to_jiffies(ms);
+ u8 status;
- for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
- ret = m5mols_read_u8(sd, I2C_REG(category, cmd, 1), &busy);
- if (ret < 0)
+ do {
+ int ret = m5mols_read_u8(sd, reg, &status);
+
+ if (ret < 0 && !(mask & M5MOLS_I2C_RDY_WAIT_FL))
return ret;
- if ((busy & mask) == mask)
+ if (!ret && (status & mask & 0xff) == (value & 0xff))
return 0;
- }
+ usleep_range(100, 250);
+ } while (ms > 0 && time_is_after_jiffies(end));
+
return -EBUSY;
}
@@ -307,6 +332,20 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
return ret;
}
+int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+
+ int ret = wait_event_interruptible_timeout(info->irq_waitq,
+ atomic_add_unless(&info->irq_done, -1, 0),
+ msecs_to_jiffies(timeout));
+ if (ret <= 0)
+ return ret ? ret : -ETIMEDOUT;
+
+ return m5mols_busy_wait(sd, SYSTEM_INT_FACTOR, irq_mask,
+ M5MOLS_I2C_RDY_WAIT_FL | irq_mask, -1);
+}
+
/**
* m5mols_reg_mode - Write the mode and check busy status
*
@@ -316,8 +355,10 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
{
int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
-
- return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode);
+ if (ret < 0)
+ return ret;
+ return m5mols_busy_wait(sd, SYSTEM_SYSMODE, mode, 0xff,
+ M5MOLS_MODE_CHANGE_TIMEOUT);
}
/**
@@ -334,17 +375,17 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
int ret = -EINVAL;
u8 reg;
- if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ if (mode < REG_PARAMETER || mode > REG_CAPTURE)
return ret;
ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
- if ((!ret && reg == mode) || ret)
+ if (ret || reg == mode)
return ret;
switch (reg) {
case REG_PARAMETER:
ret = m5mols_reg_mode(sd, REG_MONITOR);
- if (!ret && mode == REG_MONITOR)
+ if (mode == REG_MONITOR)
break;
if (!ret)
ret = m5mols_reg_mode(sd, REG_CAPTURE);
@@ -361,7 +402,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
case REG_CAPTURE:
ret = m5mols_reg_mode(sd, REG_MONITOR);
- if (!ret && mode == REG_MONITOR)
+ if (mode == REG_MONITOR)
break;
if (!ret)
ret = m5mols_reg_mode(sd, REG_PARAMETER);
@@ -511,9 +552,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct m5mols_info *info = to_m5mols(sd);
struct v4l2_mbus_framefmt *format;
- if (fmt->pad != 0)
- return -EINVAL;
-
format = __find_format(info, fh, fmt->which, info->res_type);
if (!format)
return -EINVAL;
@@ -532,9 +570,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
u32 resolution = 0;
int ret;
- if (fmt->pad != 0)
- return -EINVAL;
-
ret = __find_resolution(sd, format, &type, &resolution);
if (ret < 0)
return ret;
@@ -543,13 +578,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (!sfmt)
return 0;
- *sfmt = m5mols_default_ffmt[type];
- sfmt->width = format->width;
- sfmt->height = format->height;
+
+ format->code = m5mols_default_ffmt[type].code;
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ *sfmt = *format;
info->resolution = resolution;
- info->code = format->code;
info->res_type = type;
}
@@ -575,26 +611,25 @@ static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
};
/**
- * m5mols_sync_controls - Apply default scene mode and the current controls
+ * m5mols_restore_controls - Apply current control values to the registers
*
- * This is used only streaming for syncing between v4l2_ctrl framework and
- * m5mols's controls. First, do the scenemode to the sensor, then call
- * v4l2_ctrl_handler_setup. It can be same between some commands and
- * the scenemode's in the default v4l2_ctrls. But, such commands of control
- * should be prior to the scenemode's one.
+ * m5mols_do_scenemode() handles all parameters for which there is yet no
+ * individual control. It should be replaced at some point by setting each
+ * control individually, in required register set up order.
*/
-int m5mols_sync_controls(struct m5mols_info *info)
+int m5mols_restore_controls(struct m5mols_info *info)
{
- int ret = -EINVAL;
+ int ret;
- if (!is_ctrl_synced(info)) {
- ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
- if (ret)
- return ret;
+ if (info->ctrl_sync)
+ return 0;
- v4l2_ctrl_handler_setup(&info->handle);
- info->ctrl_sync = true;
- }
+ ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
+ if (ret)
+ return ret;
+
+ ret = v4l2_ctrl_handler_setup(&info->handle);
+ info->ctrl_sync = !ret;
return ret;
}
@@ -618,7 +653,7 @@ static int m5mols_start_monitor(struct m5mols_info *info)
if (!ret)
ret = m5mols_mode(info, REG_MONITOR);
if (!ret)
- ret = m5mols_sync_controls(info);
+ ret = m5mols_restore_controls(info);
return ret;
}
@@ -626,13 +661,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
{
struct m5mols_info *info = to_m5mols(sd);
+ u32 code = info->ffmt[info->res_type].code;
if (enable) {
int ret = -EINVAL;
- if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
- if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
return ret;
@@ -649,17 +685,25 @@ static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct m5mols_info *info = to_m5mols(sd);
+ int ispstate = info->mode;
int ret;
- info->mode_save = info->mode;
+ /*
+ * If needed, defer restoring the controls until
+ * the device is fully initialized.
+ */
+ if (!info->isp_ready) {
+ info->ctrl_sync = 0;
+ return 0;
+ }
ret = m5mols_mode(info, REG_PARAMETER);
- if (!ret)
- ret = m5mols_set_ctrl(ctrl);
- if (!ret)
- ret = m5mols_mode(info, info->mode_save);
-
- return ret;
+ if (ret < 0)
+ return ret;
+ ret = m5mols_set_ctrl(ctrl);
+ if (ret < 0)
+ return ret;
+ return m5mols_mode(info, ispstate);
}
static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
@@ -673,10 +717,10 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
const struct m5mols_platform_data *pdata = info->pdata;
int ret;
- if (enable) {
- if (is_powered(info))
- return 0;
+ if (info->power == enable)
+ return 0;
+ if (enable) {
if (info->set_power) {
ret = info->set_power(&client->dev, 1);
if (ret)
@@ -690,15 +734,11 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
}
gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
- usleep_range(1000, 1000);
- info->power = true;
+ info->power = 1;
return ret;
}
- if (!is_powered(info))
- return 0;
-
ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
if (ret)
return ret;
@@ -707,8 +747,9 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
info->set_power(&client->dev, 0);
gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
- usleep_range(1000, 1000);
- info->power = false;
+
+ info->isp_ready = 0;
+ info->power = 0;
return ret;
}
@@ -721,21 +762,29 @@ int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
}
/**
- * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core.
+ * m5mols_fw_start - M-5MOLS internal ARM controller initialization
*
- * Booting internal ARM core makes the M-5MOLS is ready for getting commands
- * with I2C. It's the first thing to be done after it powered up. It must wait
- * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting.
+ * Execute the M-5MOLS internal ARM controller initialization sequence.
+ * This function should be called after the supply voltage has been
+ * applied and before any requests to the device are made.
*/
-static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
+static int m5mols_fw_start(struct v4l2_subdev *sd)
{
+ struct m5mols_info *info = to_m5mols(sd);
int ret;
- ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
+ atomic_set(&info->irq_done, 0);
+ /* Wait until I2C slave is initialized in Flash Writer mode */
+ ret = m5mols_busy_wait(sd, FLASH_CAM_START, REG_IN_FLASH_MODE,
+ M5MOLS_I2C_RDY_WAIT_FL | 0xff, -1);
+ if (!ret)
+ ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
+ if (!ret)
+ ret = m5mols_wait_interrupt(sd, REG_INT_MODE, 2000);
if (ret < 0)
return ret;
- msleep(520);
+ info->isp_ready = 1;
ret = m5mols_get_version(sd);
if (!ret)
@@ -747,7 +796,8 @@ static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI);
if (!ret)
- ret = m5mols_enable_interrupt(sd, REG_INT_AF);
+ ret = m5mols_enable_interrupt(sd,
+ REG_INT_AF | REG_INT_CAPTURE);
return ret;
}
@@ -784,7 +834,7 @@ static int m5mols_init_controls(struct m5mols_info *info)
4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
&m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
- 1, 0, V4L2_EXPOSURE_MANUAL);
+ 1, 0, V4L2_EXPOSURE_AUTO);
sd->ctrl_handler = &info->handle;
if (info->handle.error) {
@@ -813,16 +863,7 @@ static int m5mols_s_power(struct v4l2_subdev *sd, int on)
if (on) {
ret = m5mols_sensor_power(info, true);
if (!ret)
- ret = m5mols_sensor_armboot(sd);
- if (!ret)
- ret = m5mols_init_controls(info);
- if (ret)
- return ret;
-
- info->ffmt[M5MOLS_RESTYPE_MONITOR] =
- m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR];
- info->ffmt[M5MOLS_RESTYPE_CAPTURE] =
- m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE];
+ ret = m5mols_fw_start(sd);
return ret;
}
@@ -833,17 +874,14 @@ static int m5mols_s_power(struct v4l2_subdev *sd, int on)
if (!ret)
ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF);
if (!ret)
- ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS,
- REG_AF_IDLE);
- if (!ret)
- v4l2_info(sd, "Success soft-landing lens\n");
+ ret = m5mols_busy_wait(sd, SYSTEM_STATUS, REG_AF_IDLE,
+ 0xff, -1);
+ if (ret < 0)
+ v4l2_warn(sd, "Soft landing lens failed\n");
}
ret = m5mols_sensor_power(info, false);
- if (!ret) {
- v4l2_ctrl_handler_free(&info->handle);
- info->ctrl_sync = false;
- }
+ info->ctrl_sync = 0;
return ret;
}
@@ -869,52 +907,33 @@ static const struct v4l2_subdev_core_ops m5mols_core_ops = {
.log_status = m5mols_log_status,
};
+/*
+ * V4L2 subdev internal operations
+ */
+static int m5mols_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+
+ *format = m5mols_default_ffmt[0];
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops m5mols_subdev_internal_ops = {
+ .open = m5mols_open,
+};
+
static const struct v4l2_subdev_ops m5mols_ops = {
.core = &m5mols_core_ops,
.pad = &m5mols_pad_ops,
.video = &m5mols_video_ops,
};
-static void m5mols_irq_work(struct work_struct *work)
-{
- struct m5mols_info *info =
- container_of(work, struct m5mols_info, work_irq);
- struct v4l2_subdev *sd = &info->sd;
- u8 reg;
- int ret;
-
- if (!is_powered(info) ||
- m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &info->interrupt))
- return;
-
- switch (info->interrupt & REG_INT_MASK) {
- case REG_INT_AF:
- if (!is_available_af(info))
- break;
- ret = m5mols_read_u8(sd, AF_STATUS, &reg);
- v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
- reg == REG_AF_FAIL ? "Failed" :
- reg == REG_AF_SUCCESS ? "Success" :
- reg == REG_AF_IDLE ? "Idle" : "Busy");
- break;
- case REG_INT_CAPTURE:
- if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags))
- wake_up_interruptible(&info->irq_waitq);
-
- v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n");
- break;
- default:
- v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg);
- break;
- };
-}
-
static irqreturn_t m5mols_irq_handler(int irq, void *data)
{
- struct v4l2_subdev *sd = data;
- struct m5mols_info *info = to_m5mols(sd);
+ struct m5mols_info *info = to_m5mols(data);
- schedule_work(&info->work_irq);
+ atomic_set(&info->irq_done, 1);
+ wake_up_interruptible(&info->irq_waitq);
return IRQ_HANDLED;
}
@@ -965,7 +984,9 @@ static int __devinit m5mols_probe(struct i2c_client *client,
sd = &info->sd;
strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
v4l2_i2c_subdev_init(sd, client, &m5mols_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &m5mols_subdev_internal_ops;
info->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
if (ret < 0)
@@ -973,7 +994,6 @@ static int __devinit m5mols_probe(struct i2c_client *client,
sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
init_waitqueue_head(&info->irq_waitq);
- INIT_WORK(&info->work_irq, m5mols_irq_work);
ret = request_irq(client->irq, m5mols_irq_handler,
IRQF_TRIGGER_RISING, MODULE_NAME, sd);
if (ret) {
@@ -981,7 +1001,20 @@ static int __devinit m5mols_probe(struct i2c_client *client,
goto out_me;
}
info->res_type = M5MOLS_RESTYPE_MONITOR;
- return 0;
+ info->ffmt[0] = m5mols_default_ffmt[0];
+ info->ffmt[1] = m5mols_default_ffmt[1];
+
+ ret = m5mols_sensor_power(info, true);
+ if (ret)
+ goto out_me;
+
+ ret = m5mols_fw_start(sd);
+ if (!ret)
+ ret = m5mols_init_controls(info);
+
+ m5mols_sensor_power(info, false);
+ if (!ret)
+ return 0;
out_me:
media_entity_cleanup(&sd->entity);
out_reg:
@@ -999,6 +1032,7 @@ static int __devexit m5mols_remove(struct i2c_client *client)
struct m5mols_info *info = to_m5mols(sd);
v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
free_irq(client->irq, sd);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index c755bd6edfe9..ae4aced0f9b2 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -55,39 +55,31 @@
* There is many registers between customer version address and awb one. For
* more specific contents, see definition if file m5mols.h.
*/
-#define CAT0_VER_CUSTOMER 0x00 /* customer version */
-#define CAT0_VER_PROJECT 0x01 /* project version */
-#define CAT0_VER_FIRMWARE 0x02 /* Firmware version */
-#define CAT0_VER_HARDWARE 0x04 /* Hardware version */
-#define CAT0_VER_PARAMETER 0x06 /* Parameter version */
-#define CAT0_VER_AWB 0x08 /* Auto WB version */
-#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
-#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
-#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
-#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
-#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
-
-#define SYSTEM_VER_CUSTOMER I2C_REG(CAT_SYSTEM, CAT0_VER_CUSTOMER, 1)
-#define SYSTEM_VER_PROJECT I2C_REG(CAT_SYSTEM, CAT0_VER_PROJECT, 1)
-#define SYSTEM_VER_FIRMWARE I2C_REG(CAT_SYSTEM, CAT0_VER_FIRMWARE, 2)
-#define SYSTEM_VER_HARDWARE I2C_REG(CAT_SYSTEM, CAT0_VER_HARDWARE, 2)
-#define SYSTEM_VER_PARAMETER I2C_REG(CAT_SYSTEM, CAT0_VER_PARAMETER, 2)
-#define SYSTEM_VER_AWB I2C_REG(CAT_SYSTEM, CAT0_VER_AWB, 2)
-
-#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
+#define SYSTEM_VER_CUSTOMER I2C_REG(CAT_SYSTEM, 0x00, 1)
+#define SYSTEM_VER_PROJECT I2C_REG(CAT_SYSTEM, 0x01, 1)
+#define SYSTEM_VER_FIRMWARE I2C_REG(CAT_SYSTEM, 0x02, 2)
+#define SYSTEM_VER_HARDWARE I2C_REG(CAT_SYSTEM, 0x04, 2)
+#define SYSTEM_VER_PARAMETER I2C_REG(CAT_SYSTEM, 0x06, 2)
+#define SYSTEM_VER_AWB I2C_REG(CAT_SYSTEM, 0x08, 2)
+
+#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, 0x0b, 1)
#define REG_SYSINIT 0x00 /* SYSTEM mode */
#define REG_PARAMETER 0x01 /* PARAMETER mode */
#define REG_MONITOR 0x02 /* MONITOR mode */
#define REG_CAPTURE 0x03 /* CAPTURE mode */
#define SYSTEM_CMD(__cmd) I2C_REG(CAT_SYSTEM, cmd, 1)
-#define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1)
+#define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, 0x0a, 1)
#define REG_SAMSUNG_ELECTRO "SE" /* Samsung Electro-Mechanics */
#define REG_SAMSUNG_OPTICS "OP" /* Samsung Fiber-Optics */
#define REG_SAMSUNG_TECHWIN "TB" /* Samsung Techwin */
+/* SYSTEM mode status */
+#define SYSTEM_STATUS I2C_REG(CAT_SYSTEM, 0x0c, 1)
-#define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1)
-#define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1)
+/* Interrupt pending register */
+#define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, 0x10, 1)
+/* interrupt enable register */
+#define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, 0x11, 1)
#define REG_INT_MODE (1 << 0)
#define REG_INT_AF (1 << 1)
#define REG_INT_ZOOM (1 << 2)
@@ -105,20 +97,20 @@
* can handle with preview(MONITOR) resolution size/frame per second/interface
* between the sensor and the Application Processor/even the image effect.
*/
-#define CAT1_DATA_INTERFACE 0x00 /* interface between sensor and AP */
-#define CAT1_MONITOR_SIZE 0x01 /* resolution at the MONITOR mode */
-#define CAT1_MONITOR_FPS 0x02 /* frame per second at this mode */
-#define CAT1_EFFECT 0x0b /* image effects */
-#define PARM_MON_SIZE I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1)
+/* Resolution in the MONITOR mode */
+#define PARM_MON_SIZE I2C_REG(CAT_PARAM, 0x01, 1)
-#define PARM_MON_FPS I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1)
+/* Frame rate */
+#define PARM_MON_FPS I2C_REG(CAT_PARAM, 0x02, 1)
#define REG_FPS_30 0x02
-#define PARM_INTERFACE I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1)
+/* Video bus between the sensor and a host processor */
+#define PARM_INTERFACE I2C_REG(CAT_PARAM, 0x00, 1)
#define REG_INTERFACE_MIPI 0x02
-#define PARM_EFFECT I2C_REG(CAT_PARAM, CAT1_EFFECT, 1)
+/* Image effects */
+#define PARM_EFFECT I2C_REG(CAT_PARAM, 0x0b, 1)
#define REG_EFFECT_OFF 0x00
#define REG_EFFECT_NEGA 0x01
#define REG_EFFECT_EMBOSS 0x06
@@ -135,39 +127,37 @@
* another options like zoom/color effect(different with effect in PARAMETER
* mode)/anti hand shaking algorithm.
*/
-#define CAT2_ZOOM 0x01 /* set the zoom position & execute */
-#define CAT2_ZOOM_STEP 0x03 /* set the zoom step */
-#define CAT2_CFIXB 0x09 /* CB value for color effect */
-#define CAT2_CFIXR 0x0a /* CR value for color effect */
-#define CAT2_COLOR_EFFECT 0x0b /* set on/off of color effect */
-#define CAT2_CHROMA_LVL 0x0f /* set chroma level */
-#define CAT2_CHROMA_EN 0x10 /* set on/off of choroma */
-#define CAT2_EDGE_LVL 0x11 /* set sharpness level */
-#define CAT2_EDGE_EN 0x12 /* set on/off sharpness */
-#define CAT2_TONE_CTL 0x25 /* set tone color(contrast) */
-
-#define MON_ZOOM I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1)
-
-#define MON_CFIXR I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1)
-#define MON_CFIXB I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1)
+
+/* Target digital zoom position */
+#define MON_ZOOM I2C_REG(CAT_MONITOR, 0x01, 1)
+
+/* CR value for color effect */
+#define MON_CFIXR I2C_REG(CAT_MONITOR, 0x0a, 1)
+/* CB value for color effect */
+#define MON_CFIXB I2C_REG(CAT_MONITOR, 0x09, 1)
#define REG_CFIXB_SEPIA 0xd8
#define REG_CFIXR_SEPIA 0x18
-#define MON_EFFECT I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1)
+#define MON_EFFECT I2C_REG(CAT_MONITOR, 0x0b, 1)
#define REG_COLOR_EFFECT_OFF 0x00
#define REG_COLOR_EFFECT_ON 0x01
-#define MON_CHROMA_EN I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1)
-#define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1)
+/* Chroma enable */
+#define MON_CHROMA_EN I2C_REG(CAT_MONITOR, 0x10, 1)
+/* Chroma level */
+#define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, 0x0f, 1)
#define REG_CHROMA_OFF 0x00
#define REG_CHROMA_ON 0x01
-#define MON_EDGE_EN I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1)
-#define MON_EDGE_LVL I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1)
+/* Sharpness on/off */
+#define MON_EDGE_EN I2C_REG(CAT_MONITOR, 0x12, 1)
+/* Sharpness level */
+#define MON_EDGE_LVL I2C_REG(CAT_MONITOR, 0x11, 1)
#define REG_EDGE_OFF 0x00
#define REG_EDGE_ON 0x01
-#define MON_TONE_CTL I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1)
+/* Set color tone (contrast) */
+#define MON_TONE_CTL I2C_REG(CAT_MONITOR, 0x25, 1)
/*
* Category 3 - Auto Exposure
@@ -179,27 +169,20 @@
* different. So, this category also provide getting the max/min values. And,
* each MONITOR and CAPTURE mode has each gain/shutter/max exposure values.
*/
-#define CAT3_AE_LOCK 0x00 /* locking Auto exposure */
-#define CAT3_AE_MODE 0x01 /* set AE mode, mode means range */
-#define CAT3_ISO 0x05 /* set ISO */
-#define CAT3_EV_PRESET_MONITOR 0x0a /* EV(scenemode) preset for MONITOR */
-#define CAT3_EV_PRESET_CAPTURE 0x0b /* EV(scenemode) preset for CAPTURE */
-#define CAT3_MANUAL_GAIN_MON 0x12 /* meteoring value for the MONITOR */
-#define CAT3_MAX_GAIN_MON 0x1a /* max gain value for the MONITOR */
-#define CAT3_MANUAL_GAIN_CAP 0x26 /* meteoring value for the CAPTURE */
-#define CAT3_AE_INDEX 0x38 /* AE index */
-
-#define AE_LOCK I2C_REG(CAT_AE, CAT3_AE_LOCK, 1)
+
+/* Auto Exposure locking */
+#define AE_LOCK I2C_REG(CAT_AE, 0x00, 1)
#define REG_AE_UNLOCK 0x00
#define REG_AE_LOCK 0x01
-#define AE_MODE I2C_REG(CAT_AE, CAT3_AE_MODE, 1)
+/* Auto Exposure algorithm mode */
+#define AE_MODE I2C_REG(CAT_AE, 0x01, 1)
#define REG_AE_OFF 0x00 /* AE off */
#define REG_AE_ALL 0x01 /* calc AE in all block integral */
#define REG_AE_CENTER 0x03 /* calc AE in center weighted */
#define REG_AE_SPOT 0x06 /* calc AE in specific spot */
-#define AE_ISO I2C_REG(CAT_AE, CAT3_ISO, 1)
+#define AE_ISO I2C_REG(CAT_AE, 0x05, 1)
#define REG_ISO_AUTO 0x00
#define REG_ISO_50 0x01
#define REG_ISO_100 0x02
@@ -207,8 +190,10 @@
#define REG_ISO_400 0x04
#define REG_ISO_800 0x05
-#define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1)
-#define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1)
+/* EV (scenemode) preset for MONITOR */
+#define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, 0x0a, 1)
+/* EV (scenemode) preset for CAPTURE */
+#define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, 0x0b, 1)
#define REG_SCENE_NORMAL 0x00
#define REG_SCENE_PORTRAIT 0x01
#define REG_SCENE_LANDSCAPE 0x02
@@ -224,11 +209,14 @@
#define REG_SCENE_TEXT 0x0c
#define REG_SCENE_CANDLE 0x0d
-#define AE_MAN_GAIN_MON I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2)
-#define AE_MAX_GAIN_MON I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2)
-#define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2)
+/* Manual gain in MONITOR mode */
+#define AE_MAN_GAIN_MON I2C_REG(CAT_AE, 0x12, 2)
+/* Maximum gain in MONITOR mode */
+#define AE_MAX_GAIN_MON I2C_REG(CAT_AE, 0x1a, 2)
+/* Manual gain in CAPTURE mode */
+#define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, 0x26, 2)
-#define AE_INDEX I2C_REG(CAT_AE, CAT3_AE_INDEX, 1)
+#define AE_INDEX I2C_REG(CAT_AE, 0x38, 1)
#define REG_AE_INDEX_20_NEG 0x00
#define REG_AE_INDEX_15_NEG 0x01
#define REG_AE_INDEX_10_NEG 0x02
@@ -241,22 +229,19 @@
/*
* Category 6 - White Balance
- *
- * This category provide AWB locking/mode/preset/speed/gain bias, etc.
*/
-#define CAT6_AWB_LOCK 0x00 /* locking Auto Whitebalance */
-#define CAT6_AWB_MODE 0x02 /* set Auto or Manual */
-#define CAT6_AWB_MANUAL 0x03 /* set Manual(preset) value */
-#define AWB_LOCK I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1)
+/* Auto Whitebalance locking */
+#define AWB_LOCK I2C_REG(CAT_WB, 0x00, 1)
#define REG_AWB_UNLOCK 0x00
#define REG_AWB_LOCK 0x01
-#define AWB_MODE I2C_REG(CAT_WB, CAT6_AWB_MODE, 1)
+#define AWB_MODE I2C_REG(CAT_WB, 0x02, 1)
#define REG_AWB_AUTO 0x01 /* AWB off */
#define REG_AWB_PRESET 0x02 /* AWB preset */
-#define AWB_MANUAL I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1)
+/* Manual WB (preset) */
+#define AWB_MANUAL I2C_REG(CAT_WB, 0x03, 1)
#define REG_AWB_INCANDESCENT 0x01
#define REG_AWB_FLUORESCENT_1 0x02
#define REG_AWB_FLUORESCENT_2 0x03
@@ -269,42 +254,25 @@
/*
* Category 7 - EXIF information
*/
-#define CAT7_INFO_EXPTIME_NU 0x00
-#define CAT7_INFO_EXPTIME_DE 0x04
-#define CAT7_INFO_TV_NU 0x08
-#define CAT7_INFO_TV_DE 0x0c
-#define CAT7_INFO_AV_NU 0x10
-#define CAT7_INFO_AV_DE 0x14
-#define CAT7_INFO_BV_NU 0x18
-#define CAT7_INFO_BV_DE 0x1c
-#define CAT7_INFO_EBV_NU 0x20
-#define CAT7_INFO_EBV_DE 0x24
-#define CAT7_INFO_ISO 0x28
-#define CAT7_INFO_FLASH 0x2a
-#define CAT7_INFO_SDR 0x2c
-#define CAT7_INFO_QVAL 0x2e
-
-#define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4)
-#define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4)
-#define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4)
-#define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4)
-#define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4)
-#define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4)
-#define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4)
-#define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4)
-#define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4)
-#define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4)
-#define EXIF_INFO_ISO I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2)
-#define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2)
-#define EXIF_INFO_SDR I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2)
-#define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2)
+#define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, 0x00, 4)
+#define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, 0x04, 4)
+#define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, 0x08, 4)
+#define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, 0x0c, 4)
+#define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, 0x10, 4)
+#define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, 0x14, 4)
+#define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, 0x18, 4)
+#define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, 0x1c, 4)
+#define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, 0x20, 4)
+#define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, 0x24, 4)
+#define EXIF_INFO_ISO I2C_REG(CAT_EXIF, 0x28, 2)
+#define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, 0x2a, 2)
+#define EXIF_INFO_SDR I2C_REG(CAT_EXIF, 0x2c, 2)
+#define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, 0x2e, 2)
/*
* Category 9 - Face Detection
*/
-#define CAT9_FD_CTL 0x00
-
-#define FD_CTL I2C_REG(CAT_FD, CAT9_FD_CTL, 1)
+#define FD_CTL I2C_REG(CAT_FD, 0x00, 1)
#define BIT_FD_EN 0
#define BIT_FD_DRAW_FACE_FRAME 4
#define BIT_FD_DRAW_SMILE_LVL 6
@@ -314,62 +282,50 @@
/*
* Category A - Lens Parameter
*/
-#define CATA_AF_MODE 0x01
-#define CATA_AF_EXECUTE 0x02
-#define CATA_AF_STATUS 0x03
-#define CATA_AF_VERSION 0x0a
-
-#define AF_MODE I2C_REG(CAT_LENS, CATA_AF_MODE, 1)
+#define AF_MODE I2C_REG(CAT_LENS, 0x01, 1)
#define REG_AF_NORMAL 0x00 /* Normal AF, one time */
#define REG_AF_MACRO 0x01 /* Macro AF, one time */
#define REG_AF_POWEROFF 0x07
-#define AF_EXECUTE I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1)
+#define AF_EXECUTE I2C_REG(CAT_LENS, 0x02, 1)
#define REG_AF_STOP 0x00
#define REG_AF_EXE_AUTO 0x01
#define REG_AF_EXE_CAF 0x02
-#define AF_STATUS I2C_REG(CAT_LENS, CATA_AF_STATUS, 1)
+#define AF_STATUS I2C_REG(CAT_LENS, 0x03, 1)
#define REG_AF_FAIL 0x00
#define REG_AF_SUCCESS 0x02
#define REG_AF_IDLE 0x04
#define REG_AF_BUSY 0x05
-#define AF_VERSION I2C_REG(CAT_LENS, CATA_AF_VERSION, 1)
+#define AF_VERSION I2C_REG(CAT_LENS, 0x0a, 1)
/*
* Category B - CAPTURE Parameter
*/
-#define CATB_YUVOUT_MAIN 0x00
-#define CATB_MAIN_IMAGE_SIZE 0x01
-#define CATB_MCC_MODE 0x1d
-#define CATB_WDR_EN 0x2c
-#define CATB_LIGHT_CTRL 0x40
-#define CATB_FLASH_CTRL 0x41
-
-#define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1)
+#define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, 0x00, 1)
#define REG_YUV422 0x00
#define REG_BAYER10 0x05
#define REG_BAYER8 0x06
#define REG_JPEG 0x10
-#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1)
+#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, 0x01, 1)
-#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1)
+#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, 0x1d, 1)
#define REG_MCC_OFF 0x00
#define REG_MCC_NORMAL 0x01
-#define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1)
+#define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, 0x2c, 1)
#define REG_WDR_OFF 0x00
#define REG_WDR_ON 0x01
#define REG_WDR_AUTO 0x02
-#define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1)
+#define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, 0x40, 1)
#define REG_LIGHT_OFF 0x00
#define REG_LIGHT_ON 0x01
#define REG_LIGHT_AUTO 0x02
-#define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1)
+#define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, 0x41, 1)
#define REG_FLASH_OFF 0x00
#define REG_FLASH_ON 0x01
#define REG_FLASH_AUTO 0x02
@@ -377,34 +333,29 @@
/*
* Category C - CAPTURE Control
*/
-#define CATC_CAP_MODE 0x00
-#define CATC_CAP_SEL_FRAME 0x06 /* It determines Single or Multi */
-#define CATC_CAP_START 0x09
-#define CATC_CAP_IMAGE_SIZE 0x0d
-#define CATC_CAP_THUMB_SIZE 0x11
-
-#define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1)
+#define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, 0x00, 1)
#define REG_CAP_NONE 0x00
#define REG_CAP_ANTI_SHAKE 0x02
-#define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1)
+/* Select single- or multi-shot capture */
+#define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, 0x06, 1)
-#define CAPC_START I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1)
+#define CAPC_START I2C_REG(CAT_CAPT_CTRL, 0x09, 1)
#define REG_CAP_START_MAIN 0x01
#define REG_CAP_START_THUMB 0x03
-#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 4)
-#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 4)
+#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, 0x0d, 4)
+#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, 0x11, 4)
/*
* Category F - Flash
*
* This mode provides functions about internal flash stuff and system startup.
*/
-#define CATF_CAM_START 0x12 /* It starts internal ARM core booting
- * after power-up */
-#define FLASH_CAM_START I2C_REG(CAT_FLASH, CATF_CAM_START, 1)
-#define REG_START_ARM_BOOT 0x01
+/* Starts internal ARM core booting after power-up */
+#define FLASH_CAM_START I2C_REG(CAT_FLASH, 0x12, 1)
+#define REG_START_ARM_BOOT 0x01 /* write value */
+#define REG_IN_FLASH_MODE 0x00 /* read value */
#endif /* M5MOLS_REG_H */
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
index 80ec64d2d6d8..37d20e73908a 100644
--- a/drivers/media/video/marvell-ccic/mcam-core.c
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -51,7 +51,7 @@ static int delivered;
* sense.
*/
-static int alloc_bufs_at_read;
+static bool alloc_bufs_at_read;
module_param(alloc_bufs_at_read, bool, 0444);
MODULE_PARM_DESC(alloc_bufs_at_read,
"Non-zero value causes DMA buffers to be allocated when the "
@@ -73,11 +73,11 @@ MODULE_PARM_DESC(dma_buf_size,
"parameters require larger buffers, an attempt to reallocate "
"will be made.");
#else /* MCAM_MODE_VMALLOC */
-static const int alloc_bufs_at_read = 0;
+static const bool alloc_bufs_at_read = 0;
static const int n_dma_bufs = 3; /* Used by S/G_PARM */
#endif /* MCAM_MODE_VMALLOC */
-static int flip;
+static bool flip;
module_param(flip, bool, 0444);
MODULE_PARM_DESC(flip,
"If set, the sensor will be instructed to flip the image "
@@ -522,6 +522,15 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
*/
static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
{
+ /*
+ * The list-empty condition can hit us at resume time
+ * if the buffer list was empty when the system was suspended.
+ */
+ if (list_empty(&cam->buffers)) {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ return;
+ }
+
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
mcam_sg_next_buffer(cam);
mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
@@ -566,6 +575,7 @@ static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
} else {
set_bit(CF_SG_RESTART, &cam->flags);
singles++;
+ cam->vb_bufs[0] = NULL;
}
/*
* Now we can give the completed frame back to user space.
@@ -661,10 +671,10 @@ static int mcam_ctlr_configure(struct mcam_camera *cam)
unsigned long flags;
spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
cam->dma_setup(cam);
mcam_ctlr_image(cam);
mcam_set_config_needed(cam, 0);
- clear_bit(CF_SG_RESTART, &cam->flags);
spin_unlock_irqrestore(&cam->dev_lock, flags);
return 0;
}
@@ -873,7 +883,8 @@ static int mcam_read_setup(struct mcam_camera *cam)
mcam_reset_buffers(cam);
mcam_ctlr_irq_enable(cam);
cam->state = S_STREAMING;
- mcam_ctlr_start(cam);
+ if (!test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_ctlr_start(cam);
spin_unlock_irqrestore(&cam->dev_lock, flags);
return 0;
}
@@ -1818,11 +1829,15 @@ void mccic_shutdown(struct mcam_camera *cam)
void mccic_suspend(struct mcam_camera *cam)
{
- enum mcam_state cstate = cam->state;
+ mutex_lock(&cam->s_mutex);
+ if (cam->users > 0) {
+ enum mcam_state cstate = cam->state;
- mcam_ctlr_stop_dma(cam);
- mcam_ctlr_power_down(cam);
- cam->state = cstate;
+ mcam_ctlr_stop_dma(cam);
+ mcam_ctlr_power_down(cam);
+ cam->state = cstate;
+ }
+ mutex_unlock(&cam->s_mutex);
}
int mccic_resume(struct mcam_camera *cam)
@@ -1839,8 +1854,15 @@ int mccic_resume(struct mcam_camera *cam)
mutex_unlock(&cam->s_mutex);
set_bit(CF_CONFIG_NEEDED, &cam->flags);
- if (cam->state == S_STREAMING)
+ if (cam->state == S_STREAMING) {
+ /*
+ * If there was a buffer in the DMA engine at suspend
+ * time, put it back on the queue or we'll forget about it.
+ */
+ if (cam->buffer_mode == B_DMA_sg && cam->vb_bufs[0])
+ list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
ret = mcam_read_setup(cam);
+ }
return ret;
}
#endif /* CONFIG_PM */
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
index fb0b124b35f3..0d64e2d7474a 100644
--- a/drivers/media/video/marvell-ccic/mmp-driver.c
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/pm.h>
#include "mcam-core.h"
@@ -310,10 +311,44 @@ static int mmpcam_platform_remove(struct platform_device *pdev)
return mmpcam_remove(cam);
}
+/*
+ * Suspend/resume support.
+ */
+#ifdef CONFIG_PM
+
+static int mmpcam_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (state.event != PM_EVENT_SUSPEND)
+ return 0;
+ mccic_suspend(&cam->mcam);
+ return 0;
+}
+
+static int mmpcam_resume(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ /*
+ * Power up unconditionally just in case the core tries to
+ * touch a register even if nothing was active before; trust
+ * me, it's better this way.
+ */
+ mmpcam_power_up(&cam->mcam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif
+
static struct platform_driver mmpcam_driver = {
.probe = mmpcam_probe,
.remove = mmpcam_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = mmpcam_suspend,
+ .resume = mmpcam_resume,
+#endif
.driver = {
.name = "mmp-camera",
.owner = THIS_MODULE
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index d0f538857285..d7cd0f633f63 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -69,12 +69,12 @@ MODULE_LICENSE("GPL");
/* module parameters */
static int opmode = OPMODE_AUTO;
int msp_debug; /* msp_debug output */
-int msp_once; /* no continuous stereo monitoring */
-int msp_amsound; /* hard-wire AM sound at 6.5 Hz (france),
+bool msp_once; /* no continuous stereo monitoring */
+bool msp_amsound; /* hard-wire AM sound at 6.5 Hz (france),
the autoscan seems work well only with FM... */
int msp_standard = 1; /* Override auto detect of audio msp_standard,
if needed. */
-int msp_dolby;
+bool msp_dolby;
int msp_stereo_thresh = 0x190; /* a2 threshold for stereo/bilingual
(msp34xxg only) 0x00a0-0x03c0 */
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index 831e8db4368c..fbe5e0715f93 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -44,10 +44,10 @@
/* module parameters */
extern int msp_debug;
-extern int msp_once;
-extern int msp_amsound;
+extern bool msp_once;
+extern bool msp_amsound;
extern int msp_standard;
-extern int msp_dolby;
+extern bool msp_dolby;
extern int msp_stereo_thresh;
struct msp_state {
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index e2b1029b16cd..097c9d3d04a8 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -109,14 +109,13 @@ static struct mt9m001 *to_mt9m001(const struct i2c_client *client)
static int reg_read(struct i2c_client *client, const u8 reg)
{
- s32 data = i2c_smbus_read_word_data(client, reg);
- return data < 0 ? data : swab16(data);
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int reg_write(struct i2c_client *client, const u8 reg,
const u16 data)
{
- return i2c_smbus_write_word_data(client, reg, swab16(data));
+ return i2c_smbus_write_word_swapped(client, reg, data);
}
static int reg_set(struct i2c_client *client, const u8 reg,
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cf2c0fb95f2f..bee65bff46e8 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -139,25 +139,52 @@
#define MT9M111_MAX_HEIGHT 1024
#define MT9M111_MAX_WIDTH 1280
+struct mt9m111_context {
+ u16 read_mode;
+ u16 blanking_h;
+ u16 blanking_v;
+ u16 reducer_xzoom;
+ u16 reducer_yzoom;
+ u16 reducer_xsize;
+ u16 reducer_ysize;
+ u16 output_fmt_ctrl2;
+ u16 control;
+};
+
+static struct mt9m111_context context_a = {
+ .read_mode = MT9M111_READ_MODE_A,
+ .blanking_h = MT9M111_HORIZONTAL_BLANKING_A,
+ .blanking_v = MT9M111_VERTICAL_BLANKING_A,
+ .reducer_xzoom = MT9M111_REDUCER_XZOOM_A,
+ .reducer_yzoom = MT9M111_REDUCER_YZOOM_A,
+ .reducer_xsize = MT9M111_REDUCER_XSIZE_A,
+ .reducer_ysize = MT9M111_REDUCER_YSIZE_A,
+ .output_fmt_ctrl2 = MT9M111_OUTPUT_FORMAT_CTRL2_A,
+ .control = MT9M111_CTXT_CTRL_RESTART,
+};
+
+static struct mt9m111_context context_b = {
+ .read_mode = MT9M111_READ_MODE_B,
+ .blanking_h = MT9M111_HORIZONTAL_BLANKING_B,
+ .blanking_v = MT9M111_VERTICAL_BLANKING_B,
+ .reducer_xzoom = MT9M111_REDUCER_XZOOM_B,
+ .reducer_yzoom = MT9M111_REDUCER_YZOOM_B,
+ .reducer_xsize = MT9M111_REDUCER_XSIZE_B,
+ .reducer_ysize = MT9M111_REDUCER_YSIZE_B,
+ .output_fmt_ctrl2 = MT9M111_OUTPUT_FORMAT_CTRL2_B,
+ .control = MT9M111_CTXT_CTRL_RESTART |
+ MT9M111_CTXT_CTRL_DEFECTCOR_B | MT9M111_CTXT_CTRL_RESIZE_B |
+ MT9M111_CTXT_CTRL_CTRL2_B | MT9M111_CTXT_CTRL_GAMMA_B |
+ MT9M111_CTXT_CTRL_READ_MODE_B | MT9M111_CTXT_CTRL_VBLANK_SEL_B |
+ MT9M111_CTXT_CTRL_HBLANK_SEL_B,
+};
+
/* MT9M111 has only one fixed colorspace per pixelcode */
struct mt9m111_datafmt {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
};
-/* Find a data format by a pixel code in an array */
-static const struct mt9m111_datafmt *mt9m111_find_datafmt(
- enum v4l2_mbus_pixelcode code, const struct mt9m111_datafmt *fmt,
- int n)
-{
- int i;
- for (i = 0; i < n; i++)
- if (fmt[i].code == code)
- return fmt + i;
-
- return NULL;
-}
-
static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
{V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
@@ -173,27 +200,35 @@ static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
{V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
};
-enum mt9m111_context {
- HIGHPOWER = 0,
- LOWPOWER,
-};
-
struct mt9m111 {
struct v4l2_subdev subdev;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *gain;
int model; /* V4L2_IDENT_MT9M111 or V4L2_IDENT_MT9M112 code
* from v4l2-chip-ident.h */
- enum mt9m111_context context;
- struct v4l2_rect rect;
+ struct mt9m111_context *ctx;
+ struct v4l2_rect rect; /* cropping rectangle */
+ int width; /* output */
+ int height; /* sizes */
struct mutex power_lock; /* lock to protect power_count */
int power_count;
const struct mt9m111_datafmt *fmt;
int lastpage; /* PageMap cache value */
unsigned char datawidth;
- unsigned int powered:1;
};
+/* Find a data format by a pixel code */
+static const struct mt9m111_datafmt *mt9m111_find_datafmt(struct mt9m111 *mt9m111,
+ enum v4l2_mbus_pixelcode code)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(mt9m111_colour_fmts); i++)
+ if (mt9m111_colour_fmts[i].code == code)
+ return mt9m111_colour_fmts + i;
+
+ return mt9m111->fmt;
+}
+
static struct mt9m111 *to_mt9m111(const struct i2c_client *client)
{
return container_of(i2c_get_clientdata(client), struct mt9m111, subdev);
@@ -211,7 +246,7 @@ static int reg_page_map_set(struct i2c_client *client, const u16 reg)
if (page > 2)
return -EINVAL;
- ret = i2c_smbus_write_word_data(client, MT9M111_PAGE_MAP, swab16(page));
+ ret = i2c_smbus_write_word_swapped(client, MT9M111_PAGE_MAP, page);
if (!ret)
mt9m111->lastpage = page;
return ret;
@@ -223,7 +258,7 @@ static int mt9m111_reg_read(struct i2c_client *client, const u16 reg)
ret = reg_page_map_set(client, reg);
if (!ret)
- ret = swab16(i2c_smbus_read_word_data(client, reg & 0xff));
+ ret = i2c_smbus_read_word_swapped(client, reg & 0xff);
dev_dbg(&client->dev, "read reg.%03x -> %04x\n", reg, ret);
return ret;
@@ -236,8 +271,7 @@ static int mt9m111_reg_write(struct i2c_client *client, const u16 reg,
ret = reg_page_map_set(client, reg);
if (!ret)
- ret = i2c_smbus_write_word_data(client, reg & 0xff,
- swab16(data));
+ ret = i2c_smbus_write_word_swapped(client, reg & 0xff, data);
dev_dbg(&client->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret);
return ret;
}
@@ -276,76 +310,63 @@ static int mt9m111_reg_mask(struct i2c_client *client, const u16 reg,
}
static int mt9m111_set_context(struct mt9m111 *mt9m111,
- enum mt9m111_context ctxt)
+ struct mt9m111_context *ctx)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B
- | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B
- | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B
- | MT9M111_CTXT_CTRL_VBLANK_SEL_B
- | MT9M111_CTXT_CTRL_HBLANK_SEL_B;
- int valA = MT9M111_CTXT_CTRL_RESTART;
-
- if (ctxt == HIGHPOWER)
- return reg_write(CONTEXT_CONTROL, valB);
- else
- return reg_write(CONTEXT_CONTROL, valA);
+ return reg_write(CONTEXT_CONTROL, ctx->control);
}
-static int mt9m111_setup_rect(struct mt9m111 *mt9m111,
- struct v4l2_rect *rect)
+static int mt9m111_setup_rect_ctx(struct mt9m111 *mt9m111,
+ struct mt9m111_context *ctx, struct v4l2_rect *rect,
+ unsigned int width, unsigned int height)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- int ret, is_raw_format;
- int width = rect->width;
- int height = rect->height;
+ int ret = mt9m111_reg_write(client, ctx->reducer_xzoom, rect->width);
+ if (!ret)
+ ret = mt9m111_reg_write(client, ctx->reducer_yzoom, rect->height);
+ if (!ret)
+ ret = mt9m111_reg_write(client, ctx->reducer_xsize, width);
+ if (!ret)
+ ret = mt9m111_reg_write(client, ctx->reducer_ysize, height);
+ return ret;
+}
- if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
- mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE)
- is_raw_format = 1;
- else
- is_raw_format = 0;
+static int mt9m111_setup_geometry(struct mt9m111 *mt9m111, struct v4l2_rect *rect,
+ int width, int height, enum v4l2_mbus_pixelcode code)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
+ int ret;
ret = reg_write(COLUMN_START, rect->left);
if (!ret)
ret = reg_write(ROW_START, rect->top);
- if (is_raw_format) {
- if (!ret)
- ret = reg_write(WINDOW_WIDTH, width);
- if (!ret)
- ret = reg_write(WINDOW_HEIGHT, height);
- } else {
- if (!ret)
- ret = reg_write(REDUCER_XZOOM_B, MT9M111_MAX_WIDTH);
- if (!ret)
- ret = reg_write(REDUCER_YZOOM_B, MT9M111_MAX_HEIGHT);
- if (!ret)
- ret = reg_write(REDUCER_XSIZE_B, width);
- if (!ret)
- ret = reg_write(REDUCER_YSIZE_B, height);
- if (!ret)
- ret = reg_write(REDUCER_XZOOM_A, MT9M111_MAX_WIDTH);
- if (!ret)
- ret = reg_write(REDUCER_YZOOM_A, MT9M111_MAX_HEIGHT);
+ if (!ret)
+ ret = reg_write(WINDOW_WIDTH, rect->width);
+ if (!ret)
+ ret = reg_write(WINDOW_HEIGHT, rect->height);
+
+ if (code != V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
+ /* IFP in use, down-scaling possible */
if (!ret)
- ret = reg_write(REDUCER_XSIZE_A, width);
+ ret = mt9m111_setup_rect_ctx(mt9m111, &context_b,
+ rect, width, height);
if (!ret)
- ret = reg_write(REDUCER_YSIZE_A, height);
+ ret = mt9m111_setup_rect_ctx(mt9m111, &context_a,
+ rect, width, height);
}
+ dev_dbg(&client->dev, "%s(%x): %ux%u@%u:%u -> %ux%u = %d\n",
+ __func__, code, rect->width, rect->height, rect->left, rect->top,
+ width, height, ret);
+
return ret;
}
static int mt9m111_enable(struct mt9m111 *mt9m111)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- int ret;
-
- ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE);
- if (!ret)
- mt9m111->powered = 1;
- return ret;
+ return reg_write(RESET, MT9M111_RESET_CHIP_ENABLE);
}
static int mt9m111_reset(struct mt9m111 *mt9m111)
@@ -363,43 +384,41 @@ static int mt9m111_reset(struct mt9m111 *mt9m111)
return ret;
}
-static int mt9m111_make_rect(struct mt9m111 *mt9m111,
- struct v4l2_rect *rect)
+static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
+ struct v4l2_rect rect = a->c;
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ int width, height;
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* Bayer format - even size lengths */
- rect->width = ALIGN(rect->width, 2);
- rect->height = ALIGN(rect->height, 2);
+ rect.width = ALIGN(rect.width, 2);
+ rect.height = ALIGN(rect.height, 2);
/* Let the user play with the starting pixel */
}
/* FIXME: the datasheet doesn't specify minimum sizes */
- soc_camera_limit_side(&rect->left, &rect->width,
+ soc_camera_limit_side(&rect.left, &rect.width,
MT9M111_MIN_DARK_COLS, 2, MT9M111_MAX_WIDTH);
- soc_camera_limit_side(&rect->top, &rect->height,
+ soc_camera_limit_side(&rect.top, &rect.height,
MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT);
- return mt9m111_setup_rect(mt9m111, rect);
-}
-
-static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
-{
- struct v4l2_rect rect = a->c;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
- int ret;
-
- dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
- __func__, rect.left, rect.top, rect.width, rect.height);
+ width = min(mt9m111->width, rect.width);
+ height = min(mt9m111->height, rect.height);
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- ret = mt9m111_make_rect(mt9m111, &rect);
- if (!ret)
+ ret = mt9m111_setup_geometry(mt9m111, &rect, width, height, mt9m111->fmt->code);
+ if (!ret) {
mt9m111->rect = rect;
+ mt9m111->width = width;
+ mt9m111->height = height;
+ }
+
return ret;
}
@@ -434,8 +453,8 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
{
struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
- mf->width = mt9m111->rect.width;
- mf->height = mt9m111->rect.height;
+ mf->width = mt9m111->width;
+ mf->height = mt9m111->height;
mf->code = mt9m111->fmt->code;
mf->colorspace = mt9m111->fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
@@ -504,46 +523,11 @@ static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
return -EINVAL;
}
- ret = reg_mask(OUTPUT_FORMAT_CTRL2_A, data_outfmt2,
- mask_outfmt2);
+ ret = mt9m111_reg_mask(client, context_a.output_fmt_ctrl2,
+ data_outfmt2, mask_outfmt2);
if (!ret)
- ret = reg_mask(OUTPUT_FORMAT_CTRL2_B, data_outfmt2,
- mask_outfmt2);
-
- return ret;
-}
-
-static int mt9m111_s_fmt(struct v4l2_subdev *sd,
- struct v4l2_mbus_framefmt *mf)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct mt9m111_datafmt *fmt;
- struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
- struct v4l2_rect rect = {
- .left = mt9m111->rect.left,
- .top = mt9m111->rect.top,
- .width = mf->width,
- .height = mf->height,
- };
- int ret;
-
- fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts,
- ARRAY_SIZE(mt9m111_colour_fmts));
- if (!fmt)
- return -EINVAL;
-
- dev_dbg(&client->dev,
- "%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
- mf->code, rect.left, rect.top, rect.width, rect.height);
-
- ret = mt9m111_make_rect(mt9m111, &rect);
- if (!ret)
- ret = mt9m111_set_pixfmt(mt9m111, mf->code);
- if (!ret) {
- mt9m111->rect = rect;
- mt9m111->fmt = fmt;
- mf->colorspace = fmt->colorspace;
- }
+ ret = mt9m111_reg_mask(client, context_b.output_fmt_ctrl2,
+ data_outfmt2, mask_outfmt2);
return ret;
}
@@ -551,42 +535,71 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
static int mt9m111_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
const struct mt9m111_datafmt *fmt;
- bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
- mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
-
- fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts,
- ARRAY_SIZE(mt9m111_colour_fmts));
- if (!fmt) {
- fmt = mt9m111->fmt;
- mf->code = fmt->code;
- }
+ struct v4l2_rect *rect = &mt9m111->rect;
+ bool bayer;
+
+ fmt = mt9m111_find_datafmt(mt9m111, mf->code);
+
+ bayer = fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+ fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
/*
* With Bayer format enforce even side lengths, but let the user play
* with the starting pixel
*/
+ if (bayer) {
+ rect->width = ALIGN(rect->width, 2);
+ rect->height = ALIGN(rect->height, 2);
+ }
- if (mf->height > MT9M111_MAX_HEIGHT)
- mf->height = MT9M111_MAX_HEIGHT;
- else if (mf->height < 2)
- mf->height = 2;
- else if (bayer)
- mf->height = ALIGN(mf->height, 2);
+ if (fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
+ /* IFP bypass mode, no scaling */
+ mf->width = rect->width;
+ mf->height = rect->height;
+ } else {
+ /* No upscaling */
+ if (mf->width > rect->width)
+ mf->width = rect->width;
+ if (mf->height > rect->height)
+ mf->height = rect->height;
+ }
- if (mf->width > MT9M111_MAX_WIDTH)
- mf->width = MT9M111_MAX_WIDTH;
- else if (mf->width < 2)
- mf->width = 2;
- else if (bayer)
- mf->width = ALIGN(mf->width, 2);
+ dev_dbg(&client->dev, "%s(): %ux%u, code=%x\n", __func__,
+ mf->width, mf->height, fmt->code);
+ mf->code = fmt->code;
mf->colorspace = fmt->colorspace;
return 0;
}
+static int mt9m111_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ const struct mt9m111_datafmt *fmt;
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ struct v4l2_rect *rect = &mt9m111->rect;
+ int ret;
+
+ mt9m111_try_fmt(sd, mf);
+ fmt = mt9m111_find_datafmt(mt9m111, mf->code);
+ /* try_fmt() guarantees fmt != NULL && fmt->code == mf->code */
+
+ ret = mt9m111_setup_geometry(mt9m111, rect, mf->width, mf->height, mf->code);
+ if (!ret)
+ ret = mt9m111_set_pixfmt(mt9m111, mf->code);
+ if (!ret) {
+ mt9m111->width = mf->width;
+ mt9m111->height = mf->height;
+ mt9m111->fmt = fmt;
+ }
+
+ return ret;
+}
+
static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
@@ -650,17 +663,10 @@ static int mt9m111_set_flip(struct mt9m111 *mt9m111, int flip, int mask)
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
- if (mt9m111->context == HIGHPOWER) {
- if (flip)
- ret = reg_set(READ_MODE_B, mask);
- else
- ret = reg_clear(READ_MODE_B, mask);
- } else {
- if (flip)
- ret = reg_set(READ_MODE_A, mask);
- else
- ret = reg_clear(READ_MODE_A, mask);
- }
+ if (flip)
+ ret = mt9m111_reg_set(client, mt9m111->ctx->read_mode, mask);
+ else
+ ret = mt9m111_reg_clear(client, mt9m111->ctx->read_mode, mask);
return ret;
}
@@ -738,30 +744,39 @@ static int mt9m111_s_ctrl(struct v4l2_ctrl *ctrl)
static int mt9m111_suspend(struct mt9m111 *mt9m111)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
+ int ret;
+
v4l2_ctrl_s_ctrl(mt9m111->gain, mt9m111_get_global_gain(mt9m111));
- return 0;
+ ret = reg_set(RESET, MT9M111_RESET_RESET_MODE);
+ if (!ret)
+ ret = reg_set(RESET, MT9M111_RESET_RESET_SOC |
+ MT9M111_RESET_OUTPUT_DISABLE |
+ MT9M111_RESET_ANALOG_STANDBY);
+ if (!ret)
+ ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE);
+
+ return ret;
}
static void mt9m111_restore_state(struct mt9m111 *mt9m111)
{
- mt9m111_set_context(mt9m111, mt9m111->context);
+ mt9m111_set_context(mt9m111, mt9m111->ctx);
mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
- mt9m111_setup_rect(mt9m111, &mt9m111->rect);
+ mt9m111_setup_geometry(mt9m111, &mt9m111->rect,
+ mt9m111->width, mt9m111->height, mt9m111->fmt->code);
v4l2_ctrl_handler_setup(&mt9m111->hdl);
}
static int mt9m111_resume(struct mt9m111 *mt9m111)
{
- int ret = 0;
+ int ret = mt9m111_enable(mt9m111);
+ if (!ret)
+ ret = mt9m111_reset(mt9m111);
+ if (!ret)
+ mt9m111_restore_state(mt9m111);
- if (mt9m111->powered) {
- ret = mt9m111_enable(mt9m111);
- if (!ret)
- ret = mt9m111_reset(mt9m111);
- if (!ret)
- mt9m111_restore_state(mt9m111);
- }
return ret;
}
@@ -770,12 +785,13 @@ static int mt9m111_init(struct mt9m111 *mt9m111)
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
- mt9m111->context = HIGHPOWER;
+ /* Default HIGHPOWER context */
+ mt9m111->ctx = &context_b;
ret = mt9m111_enable(mt9m111);
if (!ret)
ret = mt9m111_reset(mt9m111);
if (!ret)
- ret = mt9m111_set_context(mt9m111, mt9m111->context);
+ ret = mt9m111_set_context(mt9m111, mt9m111->ctx);
if (ret)
dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
return ret;
@@ -955,6 +971,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
+ mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
if (ret) {
diff --git a/drivers/media/video/mt9p031.c b/drivers/media/video/mt9p031.c
index 73c068993f05..93c3ec7426e8 100644
--- a/drivers/media/video/mt9p031.c
+++ b/drivers/media/video/mt9p031.c
@@ -132,13 +132,12 @@ static struct mt9p031 *to_mt9p031(struct v4l2_subdev *sd)
static int mt9p031_read(struct i2c_client *client, u8 reg)
{
- s32 data = i2c_smbus_read_word_data(client, reg);
- return data < 0 ? data : be16_to_cpu(data);
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int mt9p031_write(struct i2c_client *client, u8 reg, u16 data)
{
- return i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
+ return i2c_smbus_write_word_swapped(client, reg, data);
}
static int mt9p031_set_output_control(struct mt9p031 *mt9p031, u16 clear,
diff --git a/drivers/media/video/mt9t001.c b/drivers/media/video/mt9t001.c
index 08074b8a2736..cd81d04a529e 100644
--- a/drivers/media/video/mt9t001.c
+++ b/drivers/media/video/mt9t001.c
@@ -133,13 +133,12 @@ static inline struct mt9t001 *to_mt9t001(struct v4l2_subdev *sd)
static int mt9t001_read(struct i2c_client *client, u8 reg)
{
- s32 data = i2c_smbus_read_word_data(client, reg);
- return data < 0 ? data : be16_to_cpu(data);
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int mt9t001_write(struct i2c_client *client, u8 reg, u16 data)
{
- return i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
+ return i2c_smbus_write_word_swapped(client, reg, data);
}
static int mt9t001_set_output_control(struct mt9t001 *mt9t001, u16 clear,
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 0e78477452ff..84add1aef139 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -90,14 +90,13 @@ static struct mt9t031 *to_mt9t031(const struct i2c_client *client)
static int reg_read(struct i2c_client *client, const u8 reg)
{
- s32 data = i2c_smbus_read_word_data(client, reg);
- return data < 0 ? data : swab16(data);
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int reg_write(struct i2c_client *client, const u8 reg,
const u16 data)
{
- return i2c_smbus_write_word_data(client, reg, swab16(data));
+ return i2c_smbus_write_word_swapped(client, reg, data);
}
static int reg_set(struct i2c_client *client, const u8 reg,
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 32114a3c0ca7..7b34b11daf24 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret)
+ if (ret) {
kfree(priv);
+ return ret;
+ }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 690ee0d42eeb..944940758fa3 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -130,14 +130,13 @@ static struct mt9v022 *to_mt9v022(const struct i2c_client *client)
static int reg_read(struct i2c_client *client, const u8 reg)
{
- s32 data = i2c_smbus_read_word_data(client, reg);
- return data < 0 ? data : swab16(data);
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int reg_write(struct i2c_client *client, const u8 reg,
const u16 data)
{
- return i2c_smbus_write_word_data(client, reg, swab16(data));
+ return i2c_smbus_write_word_swapped(client, reg, data);
}
static int reg_set(struct i2c_client *client, const u8 reg,
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index f080c162123f..d90b982cc218 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -139,10 +139,10 @@ static struct mt9v032 *to_mt9v032(struct v4l2_subdev *sd)
static int mt9v032_read(struct i2c_client *client, const u8 reg)
{
- s32 data = i2c_smbus_read_word_data(client, reg);
+ s32 data = i2c_smbus_read_word_swapped(client, reg);
dev_dbg(&client->dev, "%s: read 0x%04x from 0x%02x\n", __func__,
- swab16(data), reg);
- return data < 0 ? data : swab16(data);
+ data, reg);
+ return data;
}
static int mt9v032_write(struct i2c_client *client, const u8 reg,
@@ -150,7 +150,7 @@ static int mt9v032_write(struct i2c_client *client, const u8 reg,
{
dev_dbg(&client->dev, "%s: writing 0x%04x to 0x%02x\n", __func__,
data, reg);
- return i2c_smbus_write_word_data(client, reg, swab16(data));
+ return i2c_smbus_write_word_swapped(client, reg, data);
}
static int mt9v032_set_chip_control(struct mt9v032 *mt9v032, u16 clear, u16 set)
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 18e94c7d2be8..055d11ddb038 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -487,7 +487,7 @@ static int mx1_camera_set_crop(struct soc_camera_device *icd,
return v4l2_subdev_call(sd, video, s_crop, a);
}
-static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
+static int mx1_camera_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index a803d9ea8fd6..04aab0c538aa 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -210,6 +210,22 @@
#define MAX_VIDEO_MEM 16
+struct mx2_prp_cfg {
+ int channel;
+ u32 in_fmt;
+ u32 out_fmt;
+ u32 src_pixel;
+ u32 ch1_pixel;
+ u32 irq_flags;
+};
+
+/* prp configuration for a client-host fmt pair */
+struct mx2_fmt_cfg {
+ enum v4l2_mbus_pixelcode in_fmt;
+ u32 out_fmt;
+ struct mx2_prp_cfg cfg;
+};
+
struct mx2_camera_dev {
struct device *dev;
struct soc_camera_host soc_host;
@@ -241,6 +257,8 @@ struct mx2_camera_dev {
void *discard_buffer;
dma_addr_t discard_buffer_dma;
size_t discard_size;
+ struct mx2_fmt_cfg *emma_prp;
+ u32 frame_count;
};
/* buffer for one video frame */
@@ -253,6 +271,59 @@ struct mx2_buffer {
int bufnum;
};
+static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
+ /*
+ * This is a generic configuration which is valid for most
+ * prp input-output format combinations.
+ * We set the incomming and outgoing pixelformat to a
+ * 16 Bit wide format and adjust the bytesperline
+ * accordingly. With this configuration the inputdata
+ * will not be changed by the emma and could be any type
+ * of 16 Bit Pixelformat.
+ */
+ {
+ .in_fmt = 0,
+ .out_fmt = 0,
+ .cfg = {
+ .channel = 1,
+ .in_fmt = PRP_CNTL_DATA_IN_RGB16,
+ .out_fmt = PRP_CNTL_CH1_OUT_RGB16,
+ .src_pixel = 0x2ca00565, /* RGB565 */
+ .ch1_pixel = 0x2ca00565, /* RGB565 */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
+ PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_YUYV8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUV420,
+ .cfg = {
+ .channel = 2,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+ PRP_INTR_CH2OVF,
+ }
+ },
+};
+
+static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
+ enum v4l2_mbus_pixelcode in_fmt,
+ u32 out_fmt)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(mx27_emma_prp_table); i++)
+ if ((mx27_emma_prp_table[i].in_fmt == in_fmt) &&
+ (mx27_emma_prp_table[i].out_fmt == out_fmt)) {
+ return &mx27_emma_prp_table[i];
+ }
+ /* If no match return the most generic configuration */
+ return &mx27_emma_prp_table[0];
+};
+
static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
{
unsigned long flags;
@@ -301,6 +372,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
writel(pcdev->csicr1, pcdev->base_csi + CSICR1);
pcdev->icd = icd;
+ pcdev->frame_count = 0;
dev_info(icd->parent, "Camera driver attached to camera %d\n",
icd->devnum);
@@ -719,55 +791,77 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
struct soc_camera_host *ici =
to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
+ u32 imgsize = pcdev->icd->user_height * pcdev->icd->user_width;
+
+ if (prp->cfg.channel == 1) {
+ writel(pcdev->discard_buffer_dma,
+ pcdev->base_emma + PRP_DEST_RGB1_PTR);
+ writel(pcdev->discard_buffer_dma,
+ pcdev->base_emma + PRP_DEST_RGB2_PTR);
+
+ writel(PRP_CNTL_CH1EN |
+ PRP_CNTL_CSIEN |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH1_LEN |
+ PRP_CNTL_CH1BYP |
+ PRP_CNTL_CH1_TSKIP(0) |
+ PRP_CNTL_IN_TSKIP(0),
+ pcdev->base_emma + PRP_CNTL);
+
+ writel((icd->user_width << 16) | icd->user_height,
+ pcdev->base_emma + PRP_SRC_FRAME_SIZE);
+ writel((icd->user_width << 16) | icd->user_height,
+ pcdev->base_emma + PRP_CH1_OUT_IMAGE_SIZE);
+ writel(bytesperline,
+ pcdev->base_emma + PRP_DEST_CH1_LINE_STRIDE);
+ writel(prp->cfg.src_pixel,
+ pcdev->base_emma + PRP_SRC_PIXEL_FORMAT_CNTL);
+ writel(prp->cfg.ch1_pixel,
+ pcdev->base_emma + PRP_CH1_PIXEL_FORMAT_CNTL);
+ } else { /* channel 2 */
+ writel(pcdev->discard_buffer_dma,
+ pcdev->base_emma + PRP_DEST_Y_PTR);
+ writel(pcdev->discard_buffer_dma,
+ pcdev->base_emma + PRP_SOURCE_Y_PTR);
+
+ if (prp->cfg.out_fmt == PRP_CNTL_CH2_OUT_YUV420) {
+ writel(pcdev->discard_buffer_dma + imgsize,
+ pcdev->base_emma + PRP_DEST_CB_PTR);
+ writel(pcdev->discard_buffer_dma + ((5 * imgsize) / 4),
+ pcdev->base_emma + PRP_DEST_CR_PTR);
+ writel(pcdev->discard_buffer_dma + imgsize,
+ pcdev->base_emma + PRP_SOURCE_CB_PTR);
+ writel(pcdev->discard_buffer_dma + ((5 * imgsize) / 4),
+ pcdev->base_emma + PRP_SOURCE_CR_PTR);
+ }
- writel(pcdev->discard_buffer_dma,
- pcdev->base_emma + PRP_DEST_RGB1_PTR);
- writel(pcdev->discard_buffer_dma,
- pcdev->base_emma + PRP_DEST_RGB2_PTR);
-
- /*
- * We only use the EMMA engine to get rid of the broken
- * DMA Engine. No color space consversion at the moment.
- * We set the incomming and outgoing pixelformat to an
- * 16 Bit wide format and adjust the bytesperline
- * accordingly. With this configuration the inputdata
- * will not be changed by the emma and could be any type
- * of 16 Bit Pixelformat.
- */
- writel(PRP_CNTL_CH1EN |
+ writel(PRP_CNTL_CH2EN |
PRP_CNTL_CSIEN |
- PRP_CNTL_DATA_IN_RGB16 |
- PRP_CNTL_CH1_OUT_RGB16 |
- PRP_CNTL_CH1_LEN |
- PRP_CNTL_CH1BYP |
- PRP_CNTL_CH1_TSKIP(0) |
+ prp->cfg.in_fmt |
+ prp->cfg.out_fmt |
+ PRP_CNTL_CH2_LEN |
+ PRP_CNTL_CH2_TSKIP(0) |
PRP_CNTL_IN_TSKIP(0),
pcdev->base_emma + PRP_CNTL);
- writel(((bytesperline >> 1) << 16) | icd->user_height,
+ writel((icd->user_width << 16) | icd->user_height,
pcdev->base_emma + PRP_SRC_FRAME_SIZE);
- writel(((bytesperline >> 1) << 16) | icd->user_height,
- pcdev->base_emma + PRP_CH1_OUT_IMAGE_SIZE);
- writel(bytesperline,
- pcdev->base_emma + PRP_DEST_CH1_LINE_STRIDE);
- writel(0x2ca00565, /* RGB565 */
+
+ writel((icd->user_width << 16) | icd->user_height,
+ pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE);
+
+ writel(prp->cfg.src_pixel,
pcdev->base_emma + PRP_SRC_PIXEL_FORMAT_CNTL);
- writel(0x2ca00565, /* RGB565 */
- pcdev->base_emma + PRP_CH1_PIXEL_FORMAT_CNTL);
+
+ }
/* Enable interrupts */
- writel(PRP_INTR_RDERR |
- PRP_INTR_CH1WERR |
- PRP_INTR_CH2WERR |
- PRP_INTR_CH1FC |
- PRP_INTR_CH2FC |
- PRP_INTR_LBOVF |
- PRP_INTR_CH2OVF,
- pcdev->base_emma + PRP_INTR_CNTL);
+ writel(prp->cfg.irq_flags, pcdev->base_emma + PRP_INTR_CNTL);
}
-static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
- __u32 pixfmt)
+static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -911,9 +1005,58 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd,
return ret;
}
+static int mx2_camera_get_formats(struct soc_camera_device *icd,
+ unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_mbus_pixelfmt *fmt;
+ struct device *dev = icd->parent;
+ enum v4l2_mbus_pixelcode code;
+ int ret, formats = 0;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* no more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(dev, "Invalid format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
+ formats++;
+ if (xlate) {
+ /*
+ * CH2 can output YUV420 which is a standard format in
+ * soc_mediabus.c
+ */
+ xlate->host_fmt =
+ soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_1_5X8);
+ xlate->code = code;
+ dev_dbg(dev, "Providing host format %s for sensor code %d\n",
+ xlate->host_fmt->name, code);
+ xlate++;
+ }
+ }
+
+ /* Generic pass-trough */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+ return formats;
+}
+
static int mx2_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct mx2_camera_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -946,6 +1089,10 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
pix->colorspace = mf.colorspace;
icd->current_fmt = xlate;
+ if (mx27_camera_emma(pcdev))
+ pcdev->emma_prp = mx27_emma_prp_get_format(xlate->code,
+ xlate->host_fmt->fourcc);
+
return 0;
}
@@ -1011,7 +1158,12 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
if (mf.field == V4L2_FIELD_ANY)
mf.field = V4L2_FIELD_NONE;
- if (mf.field != V4L2_FIELD_NONE) {
+ /*
+ * Driver supports interlaced images provided they have
+ * both fields so that they can be processed as if they
+ * were progressive.
+ */
+ if (mf.field != V4L2_FIELD_NONE && !V4L2_FIELD_HAS_BOTH(mf.field)) {
dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
return -EINVAL;
@@ -1173,6 +1325,7 @@ static struct soc_camera_host_ops mx2_soc_camera_host_ops = {
.remove = mx2_camera_remove_device,
.set_fmt = mx2_camera_set_fmt,
.set_crop = mx2_camera_set_crop,
+ .get_formats = mx2_camera_get_formats,
.try_fmt = mx2_camera_try_fmt,
.init_videobuf = mx2_camera_init_videobuf,
.reqbufs = mx2_camera_reqbufs,
@@ -1184,6 +1337,8 @@ static struct soc_camera_host_ops mx2_soc_camera_host_ops = {
static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
int bufnum, int state)
{
+ u32 imgsize = pcdev->icd->user_height * pcdev->icd->user_width;
+ struct mx2_fmt_cfg *prp = pcdev->emma_prp;
struct mx2_buffer *buf;
struct videobuf_buffer *vb;
unsigned long phys;
@@ -1197,12 +1352,22 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
vb = &buf->vb;
#ifdef DEBUG
phys = videobuf_to_dma_contig(vb);
- if (readl(pcdev->base_emma + PRP_DEST_RGB1_PTR + 4 * bufnum)
- != phys) {
- dev_err(pcdev->dev, "%p != %p\n", phys,
- readl(pcdev->base_emma +
- PRP_DEST_RGB1_PTR +
- 4 * bufnum));
+ if (prp->cfg.channel == 1) {
+ if (readl(pcdev->base_emma + PRP_DEST_RGB1_PTR +
+ 4 * bufnum) != phys) {
+ dev_err(pcdev->dev, "%p != %p\n", phys,
+ readl(pcdev->base_emma +
+ PRP_DEST_RGB1_PTR +
+ 4 * bufnum));
+ }
+ } else {
+ if (readl(pcdev->base_emma + PRP_DEST_Y_PTR -
+ 0x14 * bufnum) != phys) {
+ dev_err(pcdev->dev, "%p != %p\n", phys,
+ readl(pcdev->base_emma +
+ PRP_DEST_Y_PTR -
+ 0x14 * bufnum));
+ }
}
#endif
dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb,
@@ -1211,14 +1376,29 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
list_del(&vb->queue);
vb->state = state;
do_gettimeofday(&vb->ts);
- vb->field_count++;
+ vb->field_count = pcdev->frame_count * 2;
+ pcdev->frame_count++;
wake_up(&vb->done);
}
if (list_empty(&pcdev->capture)) {
- writel(pcdev->discard_buffer_dma, pcdev->base_emma +
- PRP_DEST_RGB1_PTR + 4 * bufnum);
+ if (prp->cfg.channel == 1) {
+ writel(pcdev->discard_buffer_dma, pcdev->base_emma +
+ PRP_DEST_RGB1_PTR + 4 * bufnum);
+ } else {
+ writel(pcdev->discard_buffer_dma, pcdev->base_emma +
+ PRP_DEST_Y_PTR -
+ 0x14 * bufnum);
+ if (prp->out_fmt == V4L2_PIX_FMT_YUV420) {
+ writel(pcdev->discard_buffer_dma + imgsize,
+ pcdev->base_emma + PRP_DEST_CB_PTR -
+ 0x14 * bufnum);
+ writel(pcdev->discard_buffer_dma +
+ ((5 * imgsize) / 4), pcdev->base_emma +
+ PRP_DEST_CR_PTR - 0x14 * bufnum);
+ }
+ }
return;
}
@@ -1233,7 +1413,18 @@ static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
vb->state = VIDEOBUF_ACTIVE;
phys = videobuf_to_dma_contig(vb);
- writel(phys, pcdev->base_emma + PRP_DEST_RGB1_PTR + 4 * bufnum);
+ if (prp->cfg.channel == 1) {
+ writel(phys, pcdev->base_emma + PRP_DEST_RGB1_PTR + 4 * bufnum);
+ } else {
+ writel(phys, pcdev->base_emma +
+ PRP_DEST_Y_PTR - 0x14 * bufnum);
+ if (prp->cfg.out_fmt == PRP_CNTL_CH2_OUT_YUV420) {
+ writel(phys + imgsize, pcdev->base_emma +
+ PRP_DEST_CB_PTR - 0x14 * bufnum);
+ writel(phys + ((5 * imgsize) / 4), pcdev->base_emma +
+ PRP_DEST_CR_PTR - 0x14 * bufnum);
+ }
+ }
}
static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
@@ -1253,10 +1444,12 @@ static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
* the next one.
*/
cntl = readl(pcdev->base_emma + PRP_CNTL);
- writel(cntl & ~PRP_CNTL_CH1EN, pcdev->base_emma + PRP_CNTL);
+ writel(cntl & ~(PRP_CNTL_CH1EN | PRP_CNTL_CH2EN),
+ pcdev->base_emma + PRP_CNTL);
writel(cntl, pcdev->base_emma + PRP_CNTL);
}
- if ((status & (3 << 5)) == (3 << 5)
+ if ((((status & (3 << 5)) == (3 << 5)) ||
+ ((status & (3 << 3)) == (3 << 3)))
&& !list_empty(&pcdev->active_bufs)) {
/*
* Both buffers have triggered, process the one we're expecting
@@ -1267,9 +1460,9 @@ static irqreturn_t mx27_camera_emma_irq(int irq_emma, void *data)
mx27_camera_frame_done_emma(pcdev, buf->bufnum, VIDEOBUF_DONE);
status &= ~(1 << (6 - buf->bufnum)); /* mark processed */
}
- if (status & (1 << 6))
+ if ((status & (1 << 6)) || (status & (1 << 4)))
mx27_camera_frame_done_emma(pcdev, 0, VIDEOBUF_DONE);
- if (status & (1 << 5))
+ if ((status & (1 << 5)) || (status & (1 << 3)))
mx27_camera_frame_done_emma(pcdev, 1, VIDEOBUF_DONE);
writel(status, pcdev->base_emma + PRP_INTRSTATUS);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index f96f92f00f92..74522773e934 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
sg_dma_len(sg) = new_size;
txd = ichan->dma_chan.device->device_prep_slave_sg(
- &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
+ &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!txd)
goto error;
@@ -982,12 +982,13 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
return 0;
}
-static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
+static int mx3_camera_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
unsigned long bus_flags, common_flags;
u32 dw, sens_conf;
const struct soc_mbus_pixelfmt *fmt;
@@ -1285,19 +1286,7 @@ static struct platform_driver mx3_camera_driver = {
.remove = __devexit_p(mx3_camera_remove),
};
-
-static int __init mx3_camera_init(void)
-{
- return platform_driver_register(&mx3_camera_driver);
-}
-
-static void __exit mx3_camera_exit(void)
-{
- platform_driver_unregister(&mx3_camera_driver);
-}
-
-module_init(mx3_camera_init);
-module_exit(mx3_camera_exit);
+module_platform_driver(mx3_camera_driver);
MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver");
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 9c5c19f142de..1fb7d5bd5ec2 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -38,6 +38,7 @@
#include <linux/irq.h>
#include <linux/videodev2.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
@@ -69,9 +70,9 @@ static u32 video1_numbuffers = 3;
static u32 video2_numbuffers = 3;
static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
-static u32 vid1_static_vrfb_alloc;
-static u32 vid2_static_vrfb_alloc;
-static int debug;
+static bool vid1_static_vrfb_alloc;
+static bool vid2_static_vrfb_alloc;
+static bool debug;
/* Module parameters */
module_param(video1_numbuffers, uint, S_IRUGO);
@@ -423,7 +424,7 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
"%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
"out_height=%d rotation_type=%d screen_width=%d\n",
- __func__, info.enabled, info.paddr, info.width, info.height,
+ __func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height,
info.color_mode, info.rotation, info.mirror, info.pos_x,
info.pos_y, info.out_width, info.out_height, info.rotation_type,
info.screen_width);
@@ -523,10 +524,50 @@ static int omapvid_apply_changes(struct omap_vout_device *vout)
return 0;
}
+static int omapvid_handle_interlace_display(struct omap_vout_device *vout,
+ unsigned int irqstatus, struct timeval timevalue)
+{
+ u32 fid;
+
+ if (vout->first_int) {
+ vout->first_int = 0;
+ goto err;
+ }
+
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ fid = 1;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ fid = 0;
+ else
+ goto err;
+
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (fid == 0)
+ vout->field_id = fid;
+ } else if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm)
+ goto err;
+
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ } else {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm))
+ goto err;
+ }
+
+ return vout->field_id;
+err:
+ return 0;
+}
+
static void omap_vout_isr(void *arg, unsigned int irqstatus)
{
- int ret;
- u32 addr, fid;
+ int ret, fid, mgr_id;
+ u32 addr, irq;
struct omap_overlay *ovl;
struct timeval timevalue;
struct omapvideo_info *ovid;
@@ -542,112 +583,73 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
if (!ovl->manager || !ovl->manager->device)
return;
+ mgr_id = ovl->manager->id;
cur_display = ovl->manager->device;
spin_lock(&vout->vbq_lock);
do_gettimeofday(&timevalue);
- if (cur_display->type != OMAP_DISPLAY_TYPE_VENC) {
- switch (cur_display->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- if (!(irqstatus & (DISPC_IRQ_VSYNC | DISPC_IRQ_VSYNC2)))
- goto vout_isr_err;
- break;
- case OMAP_DISPLAY_TYPE_HDMI:
- if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
- goto vout_isr_err;
- break;
- default:
- goto vout_isr_err;
- }
- if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
- vout->cur_frm->ts = timevalue;
- vout->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&vout->cur_frm->done);
- vout->cur_frm = vout->next_frm;
- }
- vout->first_int = 0;
- if (list_empty(&vout->dma_queue))
+ switch (cur_display->type) {
+ case OMAP_DISPLAY_TYPE_DSI:
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (mgr_id == OMAP_DSS_CHANNEL_LCD)
+ irq = DISPC_IRQ_VSYNC;
+ else if (mgr_id == OMAP_DSS_CHANNEL_LCD2)
+ irq = DISPC_IRQ_VSYNC2;
+ else
goto vout_isr_err;
- vout->next_frm = list_entry(vout->dma_queue.next,
- struct videobuf_buffer, queue);
- list_del(&vout->next_frm->queue);
-
- vout->next_frm->state = VIDEOBUF_ACTIVE;
+ if (!(irqstatus & irq))
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_VENC:
+ fid = omapvid_handle_interlace_display(vout, irqstatus,
+ timevalue);
+ if (!fid)
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_HDMI:
+ if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
+ goto vout_isr_err;
+ break;
+ default:
+ goto vout_isr_err;
+ }
- addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
- + vout->cropped_offset;
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
- /* First save the configuration in ovelray structure */
- ret = omapvid_init(vout, addr);
- if (ret)
- printk(KERN_ERR VOUT_NAME
- "failed to set overlay info\n");
- /* Enable the pipeline and set the Go bit */
- ret = omapvid_apply_changes(vout);
- if (ret)
- printk(KERN_ERR VOUT_NAME "failed to change mode\n");
- } else {
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue))
+ goto vout_isr_err;
- if (vout->first_int) {
- vout->first_int = 0;
- goto vout_isr_err;
- }
- if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
- fid = 1;
- else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
- fid = 0;
- else
- goto vout_isr_err;
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
- vout->field_id ^= 1;
- if (fid != vout->field_id) {
- if (0 == fid)
- vout->field_id = fid;
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
- goto vout_isr_err;
- }
- if (0 == fid) {
- if (vout->cur_frm == vout->next_frm)
- goto vout_isr_err;
-
- vout->cur_frm->ts = timevalue;
- vout->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&vout->cur_frm->done);
- vout->cur_frm = vout->next_frm;
- } else if (1 == fid) {
- if (list_empty(&vout->dma_queue) ||
- (vout->cur_frm != vout->next_frm))
- goto vout_isr_err;
-
- vout->next_frm = list_entry(vout->dma_queue.next,
- struct videobuf_buffer, queue);
- list_del(&vout->next_frm->queue);
-
- vout->next_frm->state = VIDEOBUF_ACTIVE;
- addr = (unsigned long)
- vout->queued_buf_addr[vout->next_frm->i] +
- vout->cropped_offset;
- /* First save the configuration in ovelray structure */
- ret = omapvid_init(vout, addr);
- if (ret)
- printk(KERN_ERR VOUT_NAME
- "failed to set overlay info\n");
- /* Enable the pipeline and set the Go bit */
- ret = omapvid_apply_changes(vout);
- if (ret)
- printk(KERN_ERR VOUT_NAME
- "failed to change mode\n");
- }
+ addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset;
- }
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
vout_isr_err:
spin_unlock(&vout->vbq_lock);
}
-
/* Video buffer call backs */
/*
@@ -663,10 +665,14 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
u32 phy_addr = 0, virt_addr = 0;
struct omap_vout_device *vout = q->priv_data;
struct omapvideo_info *ovid = &vout->vid_info;
+ int vid_max_buf_size;
if (!vout)
return -EINVAL;
+ vid_max_buf_size = vout->vid == OMAP_VIDEO1 ? video1_bufsize :
+ video2_bufsize;
+
if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
return -EINVAL;
@@ -689,7 +695,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
video1_numbuffers : video2_numbuffers;
/* Check the size of the buffer */
- if (*size > vout->buffer_size) {
+ if (*size > vid_max_buf_size) {
v4l2_err(&vout->vid_dev->v4l2_dev,
"buffer allocation mismatch [%u] [%u]\n",
*size, vout->buffer_size);
@@ -942,12 +948,8 @@ static int omap_vout_release(struct file *file)
/* Disable all the overlay managers connected with this interface */
for (i = 0; i < ovid->num_overlays; i++) {
struct omap_overlay *ovl = ovid->overlays[i];
- if (ovl->manager && ovl->manager->device) {
- struct omap_overlay_info info;
- ovl->get_overlay_info(ovl, &info);
- info.enabled = 0;
- ovl->set_overlay_info(ovl, &info);
- }
+ if (ovl->manager && ovl->manager->device)
+ ovl->disable(ovl);
}
/* Turn off the pipeline */
ret = omapvid_apply_changes(vout);
@@ -1040,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
cap->bus_info[0] = '\0';
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
return 0;
}
@@ -1667,7 +1670,6 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
if (ovl->manager && ovl->manager->device) {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
- info.enabled = 1;
info.paddr = addr;
if (ovl->set_overlay_info(ovl, &info)) {
ret = -EINVAL;
@@ -1686,6 +1688,16 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
if (ret)
v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ ret = ovl->enable(ovl);
+ if (ret)
+ goto streamon_err1;
+ }
+ }
+
ret = 0;
streamon_err1:
@@ -1715,16 +1727,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
for (j = 0; j < ovid->num_overlays; j++) {
struct omap_overlay *ovl = ovid->overlays[j];
- if (ovl->manager && ovl->manager->device) {
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
- info.enabled = 0;
- ret = ovl->set_overlay_info(ovl, &info);
- if (ret)
- v4l2_err(&vout->vid_dev->v4l2_dev,
- "failed to update overlay info in streamoff\n");
- }
+ if (ovl->manager && ovl->manager->device)
+ ovl->disable(ovl);
}
/* Turn of the pipeline */
@@ -1822,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
- a->flags = 0x0;
+ /* The video overlay must stay within the framebuffer and can't be
+ positioned independently. */
+ a->flags = V4L2_FBUF_FLAG_OVERLAY;
a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
| V4L2_FBUF_CAP_SRC_CHROMAKEY;
@@ -2169,6 +2175,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
vid_dev->displays[vid_dev->num_displays++] = dssdev;
}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.c b/drivers/media/video/omap/omap_vout_vrfb.c
index ebebcac49225..4be26abf6cea 100644
--- a/drivers/media/video/omap/omap_vout_vrfb.c
+++ b/drivers/media/video/omap/omap_vout_vrfb.c
@@ -84,7 +84,7 @@ void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
}
int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
- u32 static_vrfb_allocation)
+ bool static_vrfb_allocation)
{
int ret = 0, i, j;
struct omap_vout_device *vout;
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
index d793501cafcc..27a95d23b913 100644
--- a/drivers/media/video/omap/omap_voutdef.h
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -25,7 +25,7 @@
#define MAC_VRFB_CTXS 4
#define MAX_VOUT_DEV 2
#define MAX_OVLS 3
-#define MAX_DISPLAYS 3
+#define MAX_DISPLAYS 10
#define MAX_MANAGERS 3
#define QQVGA_WIDTH 160
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e87ae2f634b2..c20f5ecd6790 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -1435,13 +1436,13 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
return 0;
}
-static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
- __u32 pixfmt)
+static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
+ u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
const struct soc_camera_format_xlate *xlate;
const struct soc_mbus_pixelfmt *fmt;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
@@ -1712,17 +1713,7 @@ static struct platform_driver omap1_cam_driver = {
.remove = __exit_p(omap1_cam_remove),
};
-static int __init omap1_cam_init(void)
-{
- return platform_driver_register(&omap1_cam_driver);
-}
-module_init(omap1_cam_init);
-
-static void __exit omap1_cam_exit(void)
-{
- platform_driver_unregister(&omap1_cam_driver);
-}
-module_exit(omap1_cam_exit);
+module_platform_driver(omap1_cam_driver);
module_param(sg_mode, bool, 0644);
MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 1d54b86c936b..3ea38a8def8e 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
unsigned long flags;
struct sgdma_state *sg_state;
- if ((sglen < 0) || ((sglen > 0) & !sglist))
+ if ((sglen < 0) || ((sglen > 0) && !sglist))
return -EINVAL;
spin_lock_irqsave(&sgdma->lock, flags);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 45522e603185..7d3864144368 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1868,21 +1868,7 @@ static struct platform_driver omap24xxcam_driver = {
},
};
-/*
- *
- * Module initialisation and deinitialisation
- *
- */
-
-static int __init omap24xxcam_init(void)
-{
- return platform_driver_register(&omap24xxcam_driver);
-}
-
-static void __exit omap24xxcam_cleanup(void)
-{
- platform_driver_unregister(&omap24xxcam_driver);
-}
+module_platform_driver(omap24xxcam_driver);
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
@@ -1894,6 +1880,3 @@ MODULE_PARM_DESC(video_nr,
module_param(capture_mem, int, 0);
MODULE_PARM_DESC(capture_mem, "Maximum amount of memory for capture "
"buffers (default 4800kiB)");
-
-module_init(omap24xxcam_init);
-module_exit(omap24xxcam_cleanup);
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index b818cacf420f..12d5f923e1d0 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -80,13 +80,6 @@
#include "isph3a.h"
#include "isphist.h"
-/*
- * this is provided as an interim solution until omap3isp doesn't need
- * any omap-specific iommu API
- */
-#define to_iommu(dev) \
- (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
-
static unsigned int autoidle;
module_param(autoidle, int, 0444);
MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@@ -410,6 +403,7 @@ static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
static void isp_isr_sbl(struct isp_device *isp)
{
struct device *dev = isp->dev;
+ struct isp_pipeline *pipe;
u32 sbl_pcr;
/*
@@ -423,27 +417,38 @@ static void isp_isr_sbl(struct isp_device *isp)
if (sbl_pcr)
dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
- if (sbl_pcr & (ISPSBL_PCR_CCDC_WBL_OVF | ISPSBL_PCR_CSIA_WBL_OVF
- | ISPSBL_PCR_CSIB_WBL_OVF)) {
- isp->isp_ccdc.error = 1;
- if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
- isp->isp_prev.error = 1;
- if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
- isp->isp_res.error = 1;
+ if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
+
+ if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
+ pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
}
if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
- isp->isp_prev.error = 1;
- if (isp->isp_res.input == RESIZER_INPUT_VP &&
- !(isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER))
- isp->isp_res.error = 1;
+ pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
}
if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
| ISPSBL_PCR_RSZ2_WBL_OVF
| ISPSBL_PCR_RSZ3_WBL_OVF
- | ISPSBL_PCR_RSZ4_WBL_OVF))
- isp->isp_res.error = 1;
+ | ISPSBL_PCR_RSZ4_WBL_OVF)) {
+ pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
+ if (pipe != NULL)
+ pipe->error = true;
+ }
if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
omap3isp_stat_sbl_overflow(&isp->isp_af);
@@ -471,24 +476,17 @@ static irqreturn_t isp_isr(int irq, void *_isp)
IRQ0STATUS_HS_VS_IRQ;
struct isp_device *isp = _isp;
u32 irqstatus;
- int ret;
irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
isp_isr_sbl(isp);
- if (irqstatus & IRQ0STATUS_CSIA_IRQ) {
- ret = omap3isp_csi2_isr(&isp->isp_csi2a);
- if (ret)
- isp->isp_ccdc.error = 1;
- }
+ if (irqstatus & IRQ0STATUS_CSIA_IRQ)
+ omap3isp_csi2_isr(&isp->isp_csi2a);
- if (irqstatus & IRQ0STATUS_CSIB_IRQ) {
- ret = omap3isp_ccp2_isr(&isp->isp_ccp2);
- if (ret)
- isp->isp_ccdc.error = 1;
- }
+ if (irqstatus & IRQ0STATUS_CSIB_IRQ)
+ omap3isp_ccp2_isr(&isp->isp_ccp2);
if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
@@ -1114,8 +1112,7 @@ isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
static void isp_save_ctx(struct isp_device *isp)
{
isp_save_context(isp, isp_reg_list);
- if (isp->iommu)
- omap_iommu_save_ctx(isp->iommu);
+ omap_iommu_save_ctx(isp->dev);
}
/*
@@ -1128,8 +1125,7 @@ static void isp_save_ctx(struct isp_device *isp)
static void isp_restore_ctx(struct isp_device *isp)
{
isp_restore_context(isp, isp_reg_list);
- if (isp->iommu)
- omap_iommu_restore_ctx(isp->iommu);
+ omap_iommu_restore_ctx(isp->dev);
omap3isp_ccdc_restore_context(isp);
omap3isp_preview_restore_context(isp);
}
@@ -1983,7 +1979,7 @@ static int isp_remove(struct platform_device *pdev)
isp_cleanup_modules(isp);
omap3isp_get(isp);
- iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_detach_device(isp->domain, &pdev->dev);
iommu_domain_free(isp->domain);
omap3isp_put(isp);
@@ -2131,17 +2127,6 @@ static int isp_probe(struct platform_device *pdev)
}
}
- /* IOMMU */
- isp->iommu_dev = omap_find_iommu_device("isp");
- if (!isp->iommu_dev) {
- dev_err(isp->dev, "omap_find_iommu_device failed\n");
- ret = -ENODEV;
- goto error_isp;
- }
-
- /* to be removed once iommu migration is complete */
- isp->iommu = to_iommu(isp->iommu_dev);
-
isp->domain = iommu_domain_alloc(pdev->dev.bus);
if (!isp->domain) {
dev_err(isp->dev, "can't alloc iommu domain\n");
@@ -2149,7 +2134,7 @@ static int isp_probe(struct platform_device *pdev)
goto error_isp;
}
- ret = iommu_attach_device(isp->domain, isp->iommu_dev);
+ ret = iommu_attach_device(isp->domain, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
goto free_domain;
@@ -2188,7 +2173,7 @@ error_modules:
error_irq:
free_irq(isp->irq_num, isp);
detach_dev:
- iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_detach_device(isp->domain, &pdev->dev);
free_domain:
iommu_domain_free(isp->domain);
error_isp:
@@ -2242,24 +2227,7 @@ static struct platform_driver omap3isp_driver = {
},
};
-/*
- * isp_init - ISP module initialization.
- */
-static int __init isp_init(void)
-{
- return platform_driver_register(&omap3isp_driver);
-}
-
-/*
- * isp_cleanup - ISP module cleanup.
- */
-static void __exit isp_cleanup(void)
-{
- platform_driver_unregister(&omap3isp_driver);
-}
-
-module_init(isp_init);
-module_exit(isp_cleanup);
+module_platform_driver(omap3isp_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("TI OMAP3 ISP driver");
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 705946ef4d60..d96603eb0d17 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -212,9 +212,7 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
- struct omap_iommu *iommu;
struct iommu_domain *domain;
- struct device *iommu_dev;
struct isp_platform_callback platform_cb;
};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5a3572..a74a79701d34 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
if (req->table)
- omap_iommu_vfree(isp->domain, isp->iommu, req->table);
+ omap_iommu_vfree(isp->domain, isp->dev, req->table);
kfree(req);
}
@@ -438,7 +438,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1;
- req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
req->config.size, IOMMU_FLAG);
if (IS_ERR_VALUE(req->table)) {
req->table = 0;
@@ -446,7 +446,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
goto done;
}
- req->iovm = omap_find_iovm_area(isp->iommu, req->table);
+ req->iovm = omap_find_iovm_area(isp->dev, req->table);
if (req->iovm == NULL) {
ret = -ENOMEM;
goto done;
@@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
- table = omap_da_to_va(isp->iommu, req->table);
+ table = omap_da_to_va(isp->dev, req->table);
if (copy_from_user(table, config->lsc, req->config.size)) {
ret = -EFAULT;
goto done;
@@ -734,15 +734,15 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
* already done by omap_iommu_vmalloc().
*/
size = ccdc->fpc.fpnum * 4;
- table_new = omap_iommu_vmalloc(isp->domain, isp->iommu,
+ table_new = omap_iommu_vmalloc(isp->domain, isp->dev,
0, size, IOMMU_FLAG);
if (IS_ERR_VALUE(table_new))
return -ENOMEM;
- if (copy_from_user(omap_da_to_va(isp->iommu, table_new),
+ if (copy_from_user(omap_da_to_va(isp->dev, table_new),
(__force void __user *)
ccdc->fpc.fpcaddr, size)) {
- omap_iommu_vfree(isp->domain, isp->iommu,
+ omap_iommu_vfree(isp->domain, isp->dev,
table_new);
return -EFAULT;
}
@@ -753,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc_configure_fpc(ccdc);
if (table_old != 0)
- omap_iommu_vfree(isp->domain, isp->iommu, table_old);
+ omap_iommu_vfree(isp->domain, isp->dev, table_old);
}
return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -1406,8 +1406,7 @@ static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
{
- struct isp_pipeline *pipe =
- to_isp_pipeline(&ccdc->video_out.video.entity);
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
struct video_device *vdev = &ccdc->subdev.devnode;
struct v4l2_event event;
@@ -1428,8 +1427,11 @@ static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events)
unsigned long flags;
if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) {
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&ccdc->subdev.entity);
+
ccdc_lsc_error_handler(ccdc);
- ccdc->error = 1;
+ pipe->error = true;
dev_dbg(to_device(ccdc), "lsc prefetch error\n");
}
@@ -1504,7 +1506,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
goto done;
}
- buffer = omap3isp_video_buffer_next(&ccdc->video_out, ccdc->error);
+ buffer = omap3isp_video_buffer_next(&ccdc->video_out);
if (buffer != NULL) {
ccdc_set_outaddr(ccdc, buffer->isp_addr);
restart = 1;
@@ -1518,7 +1520,6 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
ISP_PIPELINE_STREAM_SINGLESHOT);
done:
- ccdc->error = 0;
return restart;
}
@@ -1744,7 +1745,6 @@ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
*/
ccdc_config_vp(ccdc);
ccdc_enable_vp(ccdc, 1);
- ccdc->error = 0;
ccdc_print_status(ccdc);
}
@@ -2309,7 +2309,7 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
if (ccdc->fpc.fpcaddr != 0)
- omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
+ omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr);
mutex_destroy(&ccdc->ioctl_lock);
}
diff --git a/drivers/media/video/omap3isp/ispccdc.h b/drivers/media/video/omap3isp/ispccdc.h
index 483a19cac1ad..6d0264bab75b 100644
--- a/drivers/media/video/omap3isp/ispccdc.h
+++ b/drivers/media/video/omap3isp/ispccdc.h
@@ -150,7 +150,6 @@ struct ispccdc_lsc {
* @input: Active input
* @output: Active outputs
* @video_out: Output video node
- * @error: A hardware error occurred during capture
* @alaw: A-law compression enabled (1) or disabled (0)
* @lpf: Low pass filter enabled (1) or disabled (0)
* @obclamp: Optical-black clamp enabled (1) or disabled (0)
@@ -178,7 +177,6 @@ struct isp_ccdc_device {
enum ccdc_input_entity input;
unsigned int output;
struct isp_video video_out;
- unsigned int error;
unsigned int alaw:1,
lpf:1,
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index 904ca8c8b17f..70ddbf35b223 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -556,7 +556,7 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
struct isp_buffer *buffer;
- buffer = omap3isp_video_buffer_next(&ccp2->video_in, ccp2->error);
+ buffer = omap3isp_video_buffer_next(&ccp2->video_in);
if (buffer != NULL)
ccp2_set_inaddr(ccp2, buffer->isp_addr);
@@ -567,8 +567,6 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
omap3isp_pipeline_set_stream(pipe,
ISP_PIPELINE_STREAM_SINGLESHOT);
}
-
- ccp2->error = 0;
}
/*
@@ -576,13 +574,11 @@ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2)
* @ccp2: Pointer to ISP CCP2 device
*
* This will handle the CCP2 interrupts
- *
- * Returns -EIO in case of error, or 0 on success.
*/
-int omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
struct isp_device *isp = to_isp_device(ccp2);
- int ret = 0;
static const u32 ISPCCP2_LC01_ERROR =
ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
@@ -604,19 +600,18 @@ int omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
ISPCCP2_LCM_IRQSTATUS);
/* Errors */
if (lcx_irqstatus & ISPCCP2_LC01_ERROR) {
- ccp2->error = 1;
+ pipe->error = true;
dev_dbg(isp->dev, "CCP2 err:%x\n", lcx_irqstatus);
- return -EIO;
+ return;
}
if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ) {
- ccp2->error = 1;
+ pipe->error = true;
dev_dbg(isp->dev, "CCP2 OCP err:%x\n", lcm_irqstatus);
- ret = -EIO;
}
if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping))
- return 0;
+ return;
/* Frame number propagation */
if (lcx_irqstatus & ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ) {
@@ -629,8 +624,6 @@ int omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
/* Handle queued buffers on frame end interrupts */
if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
ccp2_isr_buffer(ccp2);
-
- return ret;
}
/* -----------------------------------------------------------------------------
@@ -867,7 +860,6 @@ static int ccp2_s_stream(struct v4l2_subdev *sd, int enable)
if (enable == ISP_PIPELINE_STREAM_STOPPED)
return 0;
atomic_set(&ccp2->stopping, 0);
- ccp2->error = 0;
}
switch (enable) {
diff --git a/drivers/media/video/omap3isp/ispccp2.h b/drivers/media/video/omap3isp/ispccp2.h
index 6674e9de2cd7..76d65f4576ef 100644
--- a/drivers/media/video/omap3isp/ispccp2.h
+++ b/drivers/media/video/omap3isp/ispccp2.h
@@ -82,7 +82,6 @@ struct isp_ccp2_device {
struct isp_video video_in;
struct isp_csiphy *phy;
struct regulator *vdds_csib;
- unsigned int error;
enum isp_pipeline_stream_state state;
wait_queue_head_t wait;
atomic_t stopping;
@@ -94,6 +93,6 @@ void omap3isp_ccp2_cleanup(struct isp_device *isp);
int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
struct v4l2_device *vdev);
void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2);
-int omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2);
+void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2);
#endif /* OMAP3_ISP_CCP2_H */
diff --git a/drivers/media/video/omap3isp/ispcsi2.c b/drivers/media/video/omap3isp/ispcsi2.c
index 0c5f1cb9d99d..fcb5168996a7 100644
--- a/drivers/media/video/omap3isp/ispcsi2.c
+++ b/drivers/media/video/omap3isp/ispcsi2.c
@@ -667,7 +667,7 @@ static void csi2_isr_buffer(struct isp_csi2_device *csi2)
csi2_ctx_enable(isp, csi2, 0, 0);
- buffer = omap3isp_video_buffer_next(&csi2->video_out, 0);
+ buffer = omap3isp_video_buffer_next(&csi2->video_out);
/*
* Let video queue operation restart engine if there is an underrun
@@ -727,17 +727,15 @@ static void csi2_isr_ctx(struct isp_csi2_device *csi2,
/*
* omap3isp_csi2_isr - CSI2 interrupt handling.
- *
- * Return -EIO on Transmission error
*/
-int omap3isp_csi2_isr(struct isp_csi2_device *csi2)
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
{
+ struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
u32 csi2_irqstatus, cpxio1_irqstatus;
struct isp_device *isp = csi2->isp;
- int retval = 0;
if (!csi2->available)
- return -ENODEV;
+ return;
csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS);
isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS);
@@ -750,7 +748,7 @@ int omap3isp_csi2_isr(struct isp_csi2_device *csi2)
csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
"%x\n", cpxio1_irqstatus);
- retval = -EIO;
+ pipe->error = true;
}
if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ |
@@ -775,11 +773,11 @@ int omap3isp_csi2_isr(struct isp_csi2_device *csi2)
ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0,
(csi2_irqstatus &
ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0);
- retval = -EIO;
+ pipe->error = true;
}
if (omap3isp_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
- return 0;
+ return;
/* Successful cases */
if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0))
@@ -787,8 +785,6 @@ int omap3isp_csi2_isr(struct isp_csi2_device *csi2)
if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ)
dev_dbg(isp->dev, "CSI2: ECC correction done\n");
-
- return retval;
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/video/omap3isp/ispcsi2.h b/drivers/media/video/omap3isp/ispcsi2.h
index 456fb7fb8a0f..885ad79a7678 100644
--- a/drivers/media/video/omap3isp/ispcsi2.h
+++ b/drivers/media/video/omap3isp/ispcsi2.h
@@ -156,7 +156,7 @@ struct isp_csi2_device {
atomic_t stopping;
};
-int omap3isp_csi2_isr(struct isp_csi2_device *csi2);
+void omap3isp_csi2_isr(struct isp_csi2_device *csi2);
int omap3isp_csi2_reset(struct isp_csi2_device *csi2);
int omap3isp_csi2_init(struct isp_device *isp);
void omap3isp_csi2_cleanup(struct isp_device *isp);
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index ccb876fe023f..6d0fb2c8c26d 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -116,11 +116,11 @@ static struct omap3isp_prev_csc flr_prev_csc = {
#define PREV_MIN_IN_HEIGHT 8
#define PREV_MAX_IN_HEIGHT 16384
-#define PREV_MIN_OUT_WIDTH 0
-#define PREV_MIN_OUT_HEIGHT 0
-#define PREV_MAX_OUT_WIDTH 1280
-#define PREV_MAX_OUT_WIDTH_ES2 3300
-#define PREV_MAX_OUT_WIDTH_3630 4096
+#define PREV_MIN_OUT_WIDTH 0
+#define PREV_MIN_OUT_HEIGHT 0
+#define PREV_MAX_OUT_WIDTH_REV_1 1280
+#define PREV_MAX_OUT_WIDTH_REV_2 3300
+#define PREV_MAX_OUT_WIDTH_REV_15 4096
/*
* Coeficient Tables for the submodules in Preview.
@@ -1306,14 +1306,14 @@ static unsigned int preview_max_out_width(struct isp_prev_device *prev)
switch (isp->revision) {
case ISP_REVISION_1_0:
- return PREV_MAX_OUT_WIDTH;
+ return PREV_MAX_OUT_WIDTH_REV_1;
case ISP_REVISION_2_0:
default:
- return PREV_MAX_OUT_WIDTH_ES2;
+ return PREV_MAX_OUT_WIDTH_REV_2;
case ISP_REVISION_15_0:
- return PREV_MAX_OUT_WIDTH_3630;
+ return PREV_MAX_OUT_WIDTH_REV_15;
}
}
@@ -1404,16 +1404,14 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
int restart = 0;
if (prev->input == PREVIEW_INPUT_MEMORY) {
- buffer = omap3isp_video_buffer_next(&prev->video_in,
- prev->error);
+ buffer = omap3isp_video_buffer_next(&prev->video_in);
if (buffer != NULL)
preview_set_inaddr(prev, buffer->isp_addr);
pipe->state |= ISP_PIPELINE_IDLE_INPUT;
}
if (prev->output & PREVIEW_OUTPUT_MEMORY) {
- buffer = omap3isp_video_buffer_next(&prev->video_out,
- prev->error);
+ buffer = omap3isp_video_buffer_next(&prev->video_out);
if (buffer != NULL) {
preview_set_outaddr(prev, buffer->isp_addr);
restart = 1;
@@ -1440,8 +1438,6 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
default:
return;
}
-
- prev->error = 0;
}
/*
@@ -1565,7 +1561,6 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
preview_configure(prev);
atomic_set(&prev->stopping, 0);
- prev->error = 0;
preview_print_status(prev);
}
diff --git a/drivers/media/video/omap3isp/isppreview.h b/drivers/media/video/omap3isp/isppreview.h
index f54e775c2df4..09686607973c 100644
--- a/drivers/media/video/omap3isp/isppreview.h
+++ b/drivers/media/video/omap3isp/isppreview.h
@@ -157,7 +157,6 @@ struct isptables_update {
* @output: Bitmask of the active output
* @video_in: Input video entity
* @video_out: Output video entity
- * @error: A hardware error occurred during capture
* @params: Module configuration data
* @shadow_update: If set, update the hardware configured in the next interrupt
* @underrun: Whether the preview entity has queued buffers on the output
@@ -179,7 +178,6 @@ struct isp_prev_device {
unsigned int output;
struct isp_video video_in;
struct isp_video video_out;
- unsigned int error;
struct prev_params params;
unsigned int shadow_update:1;
diff --git a/drivers/media/video/omap3isp/ispresizer.c b/drivers/media/video/omap3isp/ispresizer.c
index 50e593bfcfaf..6958a9e3dc22 100644
--- a/drivers/media/video/omap3isp/ispresizer.c
+++ b/drivers/media/video/omap3isp/ispresizer.c
@@ -1038,7 +1038,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
/* Complete the output buffer and, if reading from memory, the input
* buffer.
*/
- buffer = omap3isp_video_buffer_next(&res->video_out, res->error);
+ buffer = omap3isp_video_buffer_next(&res->video_out);
if (buffer != NULL) {
resizer_set_outaddr(res, buffer->isp_addr);
restart = 1;
@@ -1047,7 +1047,7 @@ static void resizer_isr_buffer(struct isp_res_device *res)
pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
if (res->input == RESIZER_INPUT_MEMORY) {
- buffer = omap3isp_video_buffer_next(&res->video_in, 0);
+ buffer = omap3isp_video_buffer_next(&res->video_in);
if (buffer != NULL)
resizer_set_inaddr(res, buffer->isp_addr);
pipe->state |= ISP_PIPELINE_IDLE_INPUT;
@@ -1064,8 +1064,6 @@ static void resizer_isr_buffer(struct isp_res_device *res)
if (restart)
resizer_enable_oneshot(res);
}
-
- res->error = 0;
}
/*
@@ -1154,7 +1152,6 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_RESIZER);
resizer_configure(res);
- res->error = 0;
resizer_print_status(res);
}
diff --git a/drivers/media/video/omap3isp/ispresizer.h b/drivers/media/video/omap3isp/ispresizer.h
index 76abc2e42126..70c1c0e1bbdf 100644
--- a/drivers/media/video/omap3isp/ispresizer.h
+++ b/drivers/media/video/omap3isp/ispresizer.h
@@ -107,7 +107,6 @@ struct isp_res_device {
enum resizer_input_entity input;
struct isp_video video_in;
struct isp_video video_out;
- unsigned int error;
u32 addr_base; /* stored source buffer address in memory mode */
u32 crop_offset; /* additional offset for crop in memory mode */
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d539456c55..11871ecc6d25 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -366,7 +366,7 @@ static void isp_stat_bufs_free(struct ispstat *stat)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents,
DMA_FROM_DEVICE);
- omap_iommu_vfree(isp->domain, isp->iommu,
+ omap_iommu_vfree(isp->domain, isp->dev,
buf->iommu_addr);
} else {
if (!buf->virt_addr)
@@ -400,7 +400,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
- buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
@@ -410,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
return -ENOMEM;
}
- iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr);
+ iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
@@ -419,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
}
buf->iovm = iovm;
- buf->virt_addr = omap_da_to_va(stat->isp->iommu,
+ buf->virt_addr = omap_da_to_va(stat->isp->dev,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
- struct video_device *vdev = &stat->subdev.devnode;
+ struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index d1000723c5ae..b02070057724 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -26,6 +26,7 @@
#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
@@ -210,14 +211,14 @@ static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
mbus->width = pix->width;
mbus->height = pix->height;
- for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ /* Skip the last format in the loop so that it will be selected if no
+ * match is found.
+ */
+ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
if (formats[i].pixelformat == pix->pixelformat)
break;
}
- if (WARN_ON(i == ARRAY_SIZE(formats)))
- return;
-
mbus->code = formats[i].code;
mbus->colorspace = pix->colorspace;
mbus->field = pix->field;
@@ -452,7 +453,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
sgt->nents = sglen;
sgt->orig_nents = sglen;
- da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
+ da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
if (IS_ERR_VALUE(da))
kfree(sgt);
@@ -468,7 +469,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
{
struct sg_table *sgt;
- sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
+ sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
kfree(sgt);
}
@@ -580,21 +581,20 @@ static const struct isp_video_queue_operations isp_video_queue_ops = {
/*
* omap3isp_video_buffer_next - Complete the current buffer and return the next
* @video: ISP video object
- * @error: Whether an error occurred during capture
*
* Remove the current video buffer from the DMA queue and fill its timestamp,
* field count and state fields before waking up its completion handler.
*
- * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0)
- * or VIDEOBUF_ERROR otherwise (@error is non-zero).
+ * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
+ * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
+ * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
*
* The DMA queue is expected to contain at least one buffer.
*
* Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
* empty.
*/
-struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
- unsigned int error)
+struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
struct isp_video_queue *queue = video->queue;
@@ -629,7 +629,13 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
else
buf->vbuf.sequence = atomic_read(&pipe->frame_number);
- buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE;
+ /* Report pipeline errors to userspace on the capture device side. */
+ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
+ buf->state = ISP_BUF_STATE_ERROR;
+ pipe->error = false;
+ } else {
+ buf->state = ISP_BUF_STATE_DONE;
+ }
wake_up(&buf->wait);
@@ -1015,6 +1021,8 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto error;
+ pipe->error = false;
+
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~ISP_PIPELINE_STREAM;
pipe->state |= state;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index 08cbfa144e6e..d91bdb919be0 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -85,6 +85,10 @@ enum isp_pipeline_state {
ISP_PIPELINE_STREAM = 64,
};
+/*
+ * struct isp_pipeline - An ISP hardware pipeline
+ * @error: A hardware error occurred during capture
+ */
struct isp_pipeline {
struct media_pipeline pipe;
spinlock_t lock; /* Pipeline state and queue flags */
@@ -96,6 +100,7 @@ struct isp_pipeline {
unsigned int max_rate;
atomic_t frame_number;
bool do_propagation; /* of frame number */
+ bool error;
struct v4l2_fract max_timeperframe;
};
@@ -194,8 +199,7 @@ void omap3isp_video_cleanup(struct isp_video *video);
int omap3isp_video_register(struct isp_video *video,
struct v4l2_device *vdev);
void omap3isp_video_unregister(struct isp_video *video);
-struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
- unsigned int error);
+struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video);
void omap3isp_video_resume(struct isp_video *video, int continuous);
struct media_pad *omap3isp_video_remote_pad(struct isp_video *video);
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 9f2d26b1d4cb..6806345ec2f0 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 8aa058531280..6a564964853a 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -25,7 +25,7 @@ MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 122b45760f0d..ebc2c7e39233 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2546,8 +2546,9 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
}
/* Define and configure additional controls from cx2341x module. */
- hdw->mpeg_ctrl_info = kzalloc(
- sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT, GFP_KERNEL);
+ hdw->mpeg_ctrl_info = kcalloc(MPEGDEF_COUNT,
+ sizeof(*(hdw->mpeg_ctrl_info)),
+ GFP_KERNEL);
if (!hdw->mpeg_ctrl_info) goto fail;
for (idx = 0; idx < MPEGDEF_COUNT; idx++) {
cptr = hdw->controls + idx + CTRLDEF_COUNT;
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 3977addf3ba8..1f506fde97d0 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -102,52 +102,18 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
#include "pwc-nala.h"
};
-static void pwc_set_image_buffer_size(struct pwc_device *pdev);
-
/****************************************************************************/
-static int _send_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, int index, void *buf, int buflen)
-{
- int rc;
- void *kbuf = NULL;
-
- if (buflen) {
- kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
- if (kbuf == NULL)
- return -ENOMEM;
- memcpy(kbuf, buf, buflen);
- }
-
- rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
- request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- kbuf, buflen, USB_CTRL_SET_TIMEOUT);
-
- kfree(kbuf);
- return rc;
-}
-
static int recv_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, void *buf, int buflen)
+ u8 request, u16 value, int recv_count)
{
int rc;
- void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
-
- if (kbuf == NULL)
- return -ENOMEM;
rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- pdev->vcinterface,
- kbuf, buflen, USB_CTRL_GET_TIMEOUT);
- memcpy(buf, kbuf, buflen);
- kfree(kbuf);
-
+ value, pdev->vcinterface,
+ pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
if (rc < 0)
PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
rc, request, value);
@@ -155,26 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
}
static inline int send_video_command(struct pwc_device *pdev,
- int index, void *buf, int buflen)
+ int index, const unsigned char *buf, int buflen)
{
- return _send_control_msg(pdev,
- SET_EP_STREAM_CTL,
- VIDEO_OUTPUT_CONTROL_FORMATTER,
- index,
- buf, buflen);
+ int rc;
+
+ memcpy(pdev->ctrl_buf, buf, buflen);
+
+ rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+ SET_EP_STREAM_CTL,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ VIDEO_OUTPUT_CONTROL_FORMATTER, index,
+ pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
+ if (rc >= 0)
+ memcpy(pdev->cmd_buf, buf, buflen);
+ else
+ PWC_ERROR("send_video_command error %d\n", rc);
+
+ return rc;
}
int send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
- return _send_control_msg(pdev,
- request, value, pdev->vcinterface, buf, buflen);
+ return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, pdev->vcinterface,
+ buf, buflen, USB_CTRL_SET_TIMEOUT);
}
-static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
+static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
- unsigned char buf[3];
- int ret, fps;
+ int fps, ret = 0;
struct Nala_table_entry *pEntry;
int frames2frames[31] =
{ /* closest match of framerate */
@@ -196,35 +175,34 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
7 /* 30 */
};
- if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+ if (size < 0 || size > PSZ_CIF)
return -EINVAL;
+ if (frames < 4)
+ frames = 4;
+ else if (frames > 25)
+ frames = 25;
frames = frames2frames[frames];
fps = frames2table[frames];
pEntry = &Nala_table[size][fps];
if (pEntry->alternate == 0)
return -EINVAL;
- memcpy(buf, pEntry->mode, 3);
- ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
- if (ret < 0) {
- PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
+ if (send_to_cam)
+ ret = send_video_command(pdev, pdev->vendpoint,
+ pEntry->mode, 3);
+ if (ret < 0)
return ret;
- }
- if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
- if (ret < 0)
- return ret;
- }
- pdev->cmd_len = 3;
- memcpy(pdev->cmd_buf, buf, 3);
+ if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec1_init(pdev, pEntry->mode);
/* Set various parameters */
+ pdev->pixfmt = pixfmt;
pdev->vframes = frames;
- pdev->vsize = size;
pdev->valternate = pEntry->alternate;
- pdev->image = pwc_image_sizes[size];
- pdev->frame_size = (pdev->image.x * pdev->image.y * 3) / 2;
+ pdev->width = pwc_image_sizes[size][0];
+ pdev->height = pwc_image_sizes[size][1];
+ pdev->frame_size = (pdev->width * pdev->height * 3) / 2;
if (pEntry->compressed) {
if (pdev->release < 5) { /* 4 fold compression */
pdev->vbandlength = 528;
@@ -237,183 +215,142 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
}
else
pdev->vbandlength = 0;
+
+ /* Let pwc-if.c:isoc_init know we don't support higher compression */
+ *compression = 3;
+
return 0;
}
-static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, int compression, int snapshot)
+static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
- unsigned char buf[13];
const struct Timon_table_entry *pChoose;
- int ret, fps;
+ int fps, ret = 0;
- if (size >= PSZ_MAX || frames < 5 || frames > 30 || compression < 0 || compression > 3)
- return -EINVAL;
- if (size == PSZ_VGA && frames > 15)
+ if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
return -EINVAL;
+ if (frames < 5)
+ frames = 5;
+ else if (size == PSZ_VGA && frames > 15)
+ frames = 15;
+ else if (frames > 30)
+ frames = 30;
fps = (frames / 5) - 1;
- /* Find a supported framerate with progressively higher compression ratios
- if the preferred ratio is not available.
- */
+ /* Find a supported framerate with progressively higher compression */
pChoose = NULL;
- while (compression <= 3) {
- pChoose = &Timon_table[size][fps][compression];
- if (pChoose->alternate != 0)
- break;
- compression++;
+ while (*compression <= 3) {
+ pChoose = &Timon_table[size][fps][*compression];
+ if (pChoose->alternate != 0)
+ break;
+ (*compression)++;
}
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
- memcpy(buf, pChoose->mode, 13);
- if (snapshot)
- buf[0] |= 0x80;
- ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
+ if (send_to_cam)
+ ret = send_video_command(pdev, pdev->vendpoint,
+ pChoose->mode, 13);
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec23_init(pdev, pdev->type, buf);
- if (ret < 0)
- return ret;
- }
-
- pdev->cmd_len = 13;
- memcpy(pdev->cmd_buf, buf, 13);
+ if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec23_init(pdev, pChoose->mode);
/* Set various parameters */
- pdev->vframes = frames;
- pdev->vsize = size;
- pdev->vsnapshot = snapshot;
+ pdev->pixfmt = pixfmt;
+ pdev->vframes = (fps + 1) * 5;
pdev->valternate = pChoose->alternate;
- pdev->image = pwc_image_sizes[size];
+ pdev->width = pwc_image_sizes[size][0];
+ pdev->height = pwc_image_sizes[size][1];
pdev->vbandlength = pChoose->bandlength;
if (pChoose->bandlength > 0)
- pdev->frame_size = (pChoose->bandlength * pdev->image.y) / 4;
+ pdev->frame_size = (pChoose->bandlength * pdev->height) / 4;
else
- pdev->frame_size = (pdev->image.x * pdev->image.y * 12) / 8;
+ pdev->frame_size = (pdev->width * pdev->height * 12) / 8;
return 0;
}
-static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, int compression, int snapshot)
+static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
const struct Kiara_table_entry *pChoose = NULL;
- int fps, ret;
- unsigned char buf[12];
- struct Kiara_table_entry RawEntry = {6, 773, 1272, {0xAD, 0xF4, 0x10, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x03, 0x80}};
+ int fps, ret = 0;
- if (size >= PSZ_MAX || frames < 5 || frames > 30 || compression < 0 || compression > 3)
- return -EINVAL;
- if (size == PSZ_VGA && frames > 15)
+ if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
return -EINVAL;
+ if (frames < 5)
+ frames = 5;
+ else if (size == PSZ_VGA && frames > 15)
+ frames = 15;
+ else if (frames > 30)
+ frames = 30;
fps = (frames / 5) - 1;
- /* special case: VGA @ 5 fps and snapshot is raw bayer mode */
- if (size == PSZ_VGA && frames == 5 && snapshot && pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- {
- /* Only available in case the raw palette is selected or
- we have the decompressor available. This mode is
- only available in compressed form
- */
- PWC_DEBUG_SIZE("Choosing VGA/5 BAYER mode.\n");
- pChoose = &RawEntry;
- }
- else
- {
- /* Find a supported framerate with progressively higher compression ratios
- if the preferred ratio is not available.
- Skip this step when using RAW modes.
- */
- snapshot = 0;
- while (compression <= 3) {
- pChoose = &Kiara_table[size][fps][compression];
- if (pChoose->alternate != 0)
- break;
- compression++;
- }
+ /* Find a supported framerate with progressively higher compression */
+ while (*compression <= 3) {
+ pChoose = &Kiara_table[size][fps][*compression];
+ if (pChoose->alternate != 0)
+ break;
+ (*compression)++;
}
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
- PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
-
- /* usb_control_msg won't take staticly allocated arrays as argument?? */
- memcpy(buf, pChoose->mode, 12);
- if (snapshot)
- buf[0] |= 0x80;
-
/* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
- ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
+ if (send_to_cam)
+ ret = send_video_command(pdev, 4, pChoose->mode, 12);
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec23_init(pdev, pdev->type, buf);
- if (ret < 0)
- return ret;
- }
+ if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec23_init(pdev, pChoose->mode);
- pdev->cmd_len = 12;
- memcpy(pdev->cmd_buf, buf, 12);
/* All set and go */
- pdev->vframes = frames;
- pdev->vsize = size;
- pdev->vsnapshot = snapshot;
+ pdev->pixfmt = pixfmt;
+ pdev->vframes = (fps + 1) * 5;
pdev->valternate = pChoose->alternate;
- pdev->image = pwc_image_sizes[size];
+ pdev->width = pwc_image_sizes[size][0];
+ pdev->height = pwc_image_sizes[size][1];
pdev->vbandlength = pChoose->bandlength;
if (pdev->vbandlength > 0)
- pdev->frame_size = (pdev->vbandlength * pdev->image.y) / 4;
+ pdev->frame_size = (pdev->vbandlength * pdev->height) / 4;
else
- pdev->frame_size = (pdev->image.x * pdev->image.y * 12) / 8;
- PWC_TRACE("frame_size=%d, vframes=%d, vsize=%d, vsnapshot=%d, vbandlength=%d\n",
- pdev->frame_size,pdev->vframes,pdev->vsize,pdev->vsnapshot,pdev->vbandlength);
+ pdev->frame_size = (pdev->width * pdev->height * 12) / 8;
+ PWC_TRACE("frame_size=%d, vframes=%d, vsize=%d, vbandlength=%d\n",
+ pdev->frame_size, pdev->vframes, size, pdev->vbandlength);
return 0;
}
-
-
-/**
- @pdev: device structure
- @width: viewport width
- @height: viewport height
- @frame: framerate, in fps
- @compression: preferred compression ratio
- @snapshot: snapshot mode or streaming
- */
-int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot)
+int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
+ int pixfmt, int frames, int *compression, int send_to_cam)
{
int ret, size;
- PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
- size = pwc_decode_size(pdev, width, height);
- if (size < 0) {
- PWC_DEBUG_MODULE("Could not find suitable size.\n");
- return -ERANGE;
- }
+ PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
+ width, height, frames, pixfmt);
+ size = pwc_get_size(pdev, width, height);
PWC_TRACE("decode_size = %d.\n", size);
if (DEVICE_USE_CODEC1(pdev->type)) {
- ret = set_video_mode_Nala(pdev, size, frames);
-
+ ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
} else if (DEVICE_USE_CODEC3(pdev->type)) {
- ret = set_video_mode_Kiara(pdev, size, frames, compression, snapshot);
-
+ ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
} else {
- ret = set_video_mode_Timon(pdev, size, frames, compression, snapshot);
+ ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
}
if (ret < 0) {
PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
return ret;
}
- pdev->view.x = width;
- pdev->view.y = height;
- pdev->vcompression = compression;
pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size;
- pwc_set_image_buffer_size(pdev);
- PWC_DEBUG_SIZE("Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y);
+ PWC_DEBUG_SIZE("Set resolution to %dx%d\n", pdev->width, pdev->height);
return 0;
}
@@ -470,44 +407,15 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
return ret;
}
-static void pwc_set_image_buffer_size(struct pwc_device *pdev)
-{
- int factor = 0;
-
- /* for V4L2_PIX_FMT_YUV420 */
- switch (pdev->pixfmt) {
- case V4L2_PIX_FMT_YUV420:
- factor = 6;
- break;
- case V4L2_PIX_FMT_PWC1:
- case V4L2_PIX_FMT_PWC2:
- factor = 6; /* can be uncompressed YUV420P */
- break;
- }
-
- /* Set sizes in bytes */
- pdev->image.size = pdev->image.x * pdev->image.y * factor / 4;
- pdev->view.size = pdev->view.x * pdev->view.y * factor / 4;
-
- /* Align offset, or you'll get some very weird results in
- YUV420 mode... x must be multiple of 4 (to get the Y's in
- place), and y even (or you'll mixup U & V). This is less of a
- problem for YUV420P.
- */
- pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC;
- pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE;
-}
-
int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- u8 buf;
- ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 1);
if (ret < 0)
return ret;
- *data = buf;
+ *data = pdev->ctrl_buf[0];
return 0;
}
@@ -515,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
{
int ret;
- ret = send_control_msg(pdev, request, value, &data, sizeof(data));
+ pdev->ctrl_buf[0] = data;
+ ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
if (ret < 0)
return ret;
@@ -525,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- s8 buf;
- ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 1);
if (ret < 0)
return ret;
- *data = buf;
+ *data = ((s8 *)pdev->ctrl_buf)[0];
return 0;
}
int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- u8 buf[2];
- ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 2);
if (ret < 0)
return ret;
- *data = (buf[1] << 8) | buf[0];
+ *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
return 0;
}
int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
{
int ret;
- u8 buf[2];
- buf[0] = data & 0xff;
- buf[1] = data >> 8;
- ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
+ pdev->ctrl_buf[0] = data & 0xff;
+ pdev->ctrl_buf[1] = data >> 8;
+ ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
if (ret < 0)
return ret;
@@ -576,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
/* POWER */
void pwc_camera_power(struct pwc_device *pdev, int power)
{
- char buf;
int r;
if (!pdev->power_save)
@@ -586,69 +491,18 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
return; /* Not supported by Nala or Timon < release 6 */
if (power)
- buf = 0x00; /* active */
+ pdev->ctrl_buf[0] = 0x00; /* active */
else
- buf = 0xFF; /* power save */
- r = send_control_msg(pdev,
- SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
- &buf, sizeof(buf));
-
+ pdev->ctrl_buf[0] = 0xFF; /* power save */
+ r = send_control_msg(pdev, SET_STATUS_CTL,
+ SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
if (r < 0)
PWC_ERROR("Failed to power %s camera (%d)\n",
power ? "on" : "off", r);
}
-static int pwc_set_wb_speed(struct pwc_device *pdev, int speed)
-{
- unsigned char buf;
-
- /* useful range is 0x01..0x20 */
- buf = speed / 0x7f0;
- return send_control_msg(pdev,
- SET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, &buf, sizeof(buf));
-}
-
-static int pwc_get_wb_speed(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf * 0x7f0;
- return 0;
-}
-
-
-static int pwc_set_wb_delay(struct pwc_device *pdev, int delay)
-{
- unsigned char buf;
-
- /* useful range is 0x01..0x3F */
- buf = (delay >> 10);
- return send_control_msg(pdev,
- SET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, &buf, sizeof(buf));
-}
-
-static int pwc_get_wb_delay(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 10;
- return 0;
-}
-
-
int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
{
- unsigned char buf[2];
int r;
if (pdev->type < 730)
@@ -664,123 +518,20 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
if (off_value > 0xff)
off_value = 0xff;
- buf[0] = on_value;
- buf[1] = off_value;
+ pdev->ctrl_buf[0] = on_value;
+ pdev->ctrl_buf[1] = off_value;
r = send_control_msg(pdev,
- SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+ SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
if (r < 0)
PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
return r;
}
-static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
-{
- unsigned char buf[2];
- int ret;
-
- if (pdev->type < 730) {
- *on_value = -1;
- *off_value = -1;
- return 0;
- }
-
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *on_value = buf[0] * 100;
- *off_value = buf[1] * 100;
- return 0;
-}
-
-static int _pwc_mpt_reset(struct pwc_device *pdev, int flags)
-{
- unsigned char buf;
-
- buf = flags & 0x03; // only lower two bits are currently used
- return send_control_msg(pdev,
- SET_MPT_CTL, PT_RESET_CONTROL_FORMATTER, &buf, sizeof(buf));
-}
-
-int pwc_mpt_reset(struct pwc_device *pdev, int flags)
-{
- int ret;
- ret = _pwc_mpt_reset(pdev, flags);
- if (ret >= 0) {
- pdev->pan_angle = 0;
- pdev->tilt_angle = 0;
- }
- return ret;
-}
-
-static int _pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt)
-{
- unsigned char buf[4];
-
- /* set new relative angle; angles are expressed in degrees * 100,
- but cam as .5 degree resolution, hence divide by 200. Also
- the angle must be multiplied by 64 before it's send to
- the cam (??)
- */
- pan = 64 * pan / 100;
- tilt = -64 * tilt / 100; /* positive tilt is down, which is not what the user would expect */
- buf[0] = pan & 0xFF;
- buf[1] = (pan >> 8) & 0xFF;
- buf[2] = tilt & 0xFF;
- buf[3] = (tilt >> 8) & 0xFF;
- return send_control_msg(pdev,
- SET_MPT_CTL, PT_RELATIVE_CONTROL_FORMATTER, &buf, sizeof(buf));
-}
-
-int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt)
-{
- int ret;
-
- /* check absolute ranges */
- if (pan < pdev->angle_range.pan_min ||
- pan > pdev->angle_range.pan_max ||
- tilt < pdev->angle_range.tilt_min ||
- tilt > pdev->angle_range.tilt_max)
- return -ERANGE;
-
- /* go to relative range, check again */
- pan -= pdev->pan_angle;
- tilt -= pdev->tilt_angle;
- /* angles are specified in degrees * 100, thus the limit = 36000 */
- if (pan < -36000 || pan > 36000 || tilt < -36000 || tilt > 36000)
- return -ERANGE;
-
- ret = _pwc_mpt_set_angle(pdev, pan, tilt);
- if (ret >= 0) {
- pdev->pan_angle += pan;
- pdev->tilt_angle += tilt;
- }
- if (ret == -EPIPE) /* stall -> out of range */
- ret = -ERANGE;
- return ret;
-}
-
-static int pwc_mpt_get_status(struct pwc_device *pdev, struct pwc_mpt_status *status)
-{
- int ret;
- unsigned char buf[5];
-
- ret = recv_control_msg(pdev,
- GET_MPT_CTL, PT_STATUS_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- status->status = buf[0] & 0x7; // 3 bits are used for reporting
- status->time_pan = (buf[1] << 8) + buf[2];
- status->time_tilt = (buf[3] << 8) + buf[4];
- return 0;
-}
-
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
- unsigned char buf;
int ret = -1, request;
if (pdev->type < 675)
@@ -790,431 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
else
request = SENSOR_TYPE_FORMATTER2;
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, request, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
if (ret < 0)
return ret;
if (pdev->type < 675)
- *sensor = buf | 0x100;
+ *sensor = pdev->ctrl_buf[0] | 0x100;
else
- *sensor = buf;
+ *sensor = pdev->ctrl_buf[0];
return 0;
}
#endif
-
- /* End of Add-Ons */
- /* ************************************************* */
-
-/* Linux 2.5.something and 2.6 pass direct pointers to arguments of
- ioctl() calls. With 2.4, you have to do tedious copy_from_user()
- and copy_to_user() calls. With these macros we circumvent this,
- and let me maintain only one source file. The functionality is
- exactly the same otherwise.
- */
-
-/* define local variable for arg */
-#define ARG_DEF(ARG_type, ARG_name)\
- ARG_type *ARG_name = arg;
-/* copy arg to local variable */
-#define ARG_IN(ARG_name) /* nothing */
-/* argument itself (referenced) */
-#define ARGR(ARG_name) (*ARG_name)
-/* argument address */
-#define ARGA(ARG_name) ARG_name
-/* copy local variable to arg */
-#define ARG_OUT(ARG_name) /* nothing */
-
-/*
- * Our ctrls use native values, but the old custom pwc ioctl interface expects
- * values from 0 - 65535, define 2 helper functions to scale things. */
-static int pwc_ioctl_g_ctrl(struct v4l2_ctrl *ctrl)
-{
- return v4l2_ctrl_g_ctrl(ctrl) * 65535 / ctrl->maximum;
-}
-
-static int pwc_ioctl_s_ctrl(struct v4l2_ctrl *ctrl, int val)
-{
- return v4l2_ctrl_s_ctrl(ctrl, val * ctrl->maximum / 65535);
-}
-
-long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
-{
- long ret = 0;
-
- switch(cmd) {
- case VIDIOCPWCRUSER:
- ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
- break;
-
- case VIDIOCPWCSUSER:
- ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
- break;
-
- case VIDIOCPWCFACTORY:
- ret = pwc_button_ctrl(pdev, RESTORE_FACTORY_DEFAULTS_FORMATTER);
- break;
-
- case VIDIOCPWCSCQUAL:
- {
- ARG_DEF(int, qual)
-
- if (vb2_is_streaming(&pdev->vb_queue)) {
- ret = -EBUSY;
- break;
- }
-
- ARG_IN(qual)
- if (ARGR(qual) < 0 || ARGR(qual) > 3)
- ret = -EINVAL;
- else
- ret = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot);
- break;
- }
-
- case VIDIOCPWCGCQUAL:
- {
- ARG_DEF(int, qual)
-
- ARGR(qual) = pdev->vcompression;
- ARG_OUT(qual)
- break;
- }
-
- case VIDIOCPWCPROBE:
- {
- ARG_DEF(struct pwc_probe, probe)
-
- strcpy(ARGR(probe).name, pdev->vdev.name);
- ARGR(probe).type = pdev->type;
- ARG_OUT(probe)
- break;
- }
-
- case VIDIOCPWCGSERIAL:
- {
- ARG_DEF(struct pwc_serial, serial)
-
- strcpy(ARGR(serial).serial, pdev->serial);
- ARG_OUT(serial)
- break;
- }
-
- case VIDIOCPWCSAGC:
- {
- ARG_DEF(int, agc)
- ARG_IN(agc)
- ret = v4l2_ctrl_s_ctrl(pdev->autogain, ARGR(agc) < 0);
- if (ret == 0 && ARGR(agc) >= 0)
- ret = pwc_ioctl_s_ctrl(pdev->gain, ARGR(agc));
- break;
- }
-
- case VIDIOCPWCGAGC:
- {
- ARG_DEF(int, agc)
- if (v4l2_ctrl_g_ctrl(pdev->autogain))
- ARGR(agc) = -1;
- else
- ARGR(agc) = pwc_ioctl_g_ctrl(pdev->gain);
- ARG_OUT(agc)
- break;
- }
-
- case VIDIOCPWCSSHUTTER:
- {
- ARG_DEF(int, shutter)
- ARG_IN(shutter)
- ret = v4l2_ctrl_s_ctrl(pdev->exposure_auto,
- /* Menu idx 0 = auto, idx 1 = manual */
- ARGR(shutter) >= 0);
- if (ret == 0 && ARGR(shutter) >= 0)
- ret = pwc_ioctl_s_ctrl(pdev->exposure, ARGR(shutter));
- break;
- }
-
- case VIDIOCPWCSAWB:
- {
- ARG_DEF(struct pwc_whitebalance, wb)
- ARG_IN(wb)
- ret = v4l2_ctrl_s_ctrl(pdev->auto_white_balance,
- ARGR(wb).mode);
- if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
- ret = pwc_ioctl_s_ctrl(pdev->red_balance,
- ARGR(wb).manual_red);
- if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
- ret = pwc_ioctl_s_ctrl(pdev->blue_balance,
- ARGR(wb).manual_blue);
- break;
- }
-
- case VIDIOCPWCGAWB:
- {
- ARG_DEF(struct pwc_whitebalance, wb)
- ARGR(wb).mode = v4l2_ctrl_g_ctrl(pdev->auto_white_balance);
- ARGR(wb).manual_red = ARGR(wb).read_red =
- pwc_ioctl_g_ctrl(pdev->red_balance);
- ARGR(wb).manual_blue = ARGR(wb).read_blue =
- pwc_ioctl_g_ctrl(pdev->blue_balance);
- ARG_OUT(wb)
- break;
- }
-
- case VIDIOCPWCSAWBSPEED:
- {
- ARG_DEF(struct pwc_wb_speed, wbs)
-
- if (ARGR(wbs).control_speed > 0) {
- ret = pwc_set_wb_speed(pdev, ARGR(wbs).control_speed);
- }
- if (ARGR(wbs).control_delay > 0) {
- ret = pwc_set_wb_delay(pdev, ARGR(wbs).control_delay);
- }
- break;
- }
-
- case VIDIOCPWCGAWBSPEED:
- {
- ARG_DEF(struct pwc_wb_speed, wbs)
-
- ret = pwc_get_wb_speed(pdev, &ARGR(wbs).control_speed);
- if (ret < 0)
- break;
- ret = pwc_get_wb_delay(pdev, &ARGR(wbs).control_delay);
- if (ret < 0)
- break;
- ARG_OUT(wbs)
- break;
- }
-
- case VIDIOCPWCSLED:
- {
- ARG_DEF(struct pwc_leds, leds)
-
- ARG_IN(leds)
- ret = pwc_set_leds(pdev, ARGR(leds).led_on, ARGR(leds).led_off);
- break;
- }
-
-
- case VIDIOCPWCGLED:
- {
- ARG_DEF(struct pwc_leds, leds)
-
- ret = pwc_get_leds(pdev, &ARGR(leds).led_on, &ARGR(leds).led_off);
- ARG_OUT(leds)
- break;
- }
-
- case VIDIOCPWCSCONTOUR:
- {
- ARG_DEF(int, contour)
- ARG_IN(contour)
- ret = v4l2_ctrl_s_ctrl(pdev->autocontour, ARGR(contour) < 0);
- if (ret == 0 && ARGR(contour) >= 0)
- ret = pwc_ioctl_s_ctrl(pdev->contour, ARGR(contour));
- break;
- }
-
- case VIDIOCPWCGCONTOUR:
- {
- ARG_DEF(int, contour)
- if (v4l2_ctrl_g_ctrl(pdev->autocontour))
- ARGR(contour) = -1;
- else
- ARGR(contour) = pwc_ioctl_g_ctrl(pdev->contour);
- ARG_OUT(contour)
- break;
- }
-
- case VIDIOCPWCSBACKLIGHT:
- {
- ARG_DEF(int, backlight)
- ARG_IN(backlight)
- ret = v4l2_ctrl_s_ctrl(pdev->backlight, ARGR(backlight));
- break;
- }
-
- case VIDIOCPWCGBACKLIGHT:
- {
- ARG_DEF(int, backlight)
- ARGR(backlight) = v4l2_ctrl_g_ctrl(pdev->backlight);
- ARG_OUT(backlight)
- break;
- }
-
- case VIDIOCPWCSFLICKER:
- {
- ARG_DEF(int, flicker)
- ARG_IN(flicker)
- ret = v4l2_ctrl_s_ctrl(pdev->flicker, ARGR(flicker));
- break;
- }
-
- case VIDIOCPWCGFLICKER:
- {
- ARG_DEF(int, flicker)
- ARGR(flicker) = v4l2_ctrl_g_ctrl(pdev->flicker);
- ARG_OUT(flicker)
- break;
- }
-
- case VIDIOCPWCSDYNNOISE:
- {
- ARG_DEF(int, dynnoise)
- ARG_IN(dynnoise)
- ret = v4l2_ctrl_s_ctrl(pdev->noise_reduction, ARGR(dynnoise));
- break;
- }
-
- case VIDIOCPWCGDYNNOISE:
- {
- ARG_DEF(int, dynnoise)
- ARGR(dynnoise) = v4l2_ctrl_g_ctrl(pdev->noise_reduction);
- ARG_OUT(dynnoise);
- break;
- }
-
- case VIDIOCPWCGREALSIZE:
- {
- ARG_DEF(struct pwc_imagesize, size)
-
- ARGR(size).width = pdev->image.x;
- ARGR(size).height = pdev->image.y;
- ARG_OUT(size)
- break;
- }
-
- case VIDIOCPWCMPTRESET:
- {
- if (pdev->features & FEATURE_MOTOR_PANTILT)
- {
- ARG_DEF(int, flags)
-
- ARG_IN(flags)
- ret = pwc_mpt_reset(pdev, ARGR(flags));
- }
- else
- {
- ret = -ENXIO;
- }
- break;
- }
-
- case VIDIOCPWCMPTGRANGE:
- {
- if (pdev->features & FEATURE_MOTOR_PANTILT)
- {
- ARG_DEF(struct pwc_mpt_range, range)
-
- ARGR(range) = pdev->angle_range;
- ARG_OUT(range)
- }
- else
- {
- ret = -ENXIO;
- }
- break;
- }
-
- case VIDIOCPWCMPTSANGLE:
- {
- int new_pan, new_tilt;
-
- if (pdev->features & FEATURE_MOTOR_PANTILT)
- {
- ARG_DEF(struct pwc_mpt_angles, angles)
-
- ARG_IN(angles)
- /* The camera can only set relative angles, so
- do some calculations when getting an absolute angle .
- */
- if (ARGR(angles).absolute)
- {
- new_pan = ARGR(angles).pan;
- new_tilt = ARGR(angles).tilt;
- }
- else
- {
- new_pan = pdev->pan_angle + ARGR(angles).pan;
- new_tilt = pdev->tilt_angle + ARGR(angles).tilt;
- }
- ret = pwc_mpt_set_angle(pdev, new_pan, new_tilt);
- }
- else
- {
- ret = -ENXIO;
- }
- break;
- }
-
- case VIDIOCPWCMPTGANGLE:
- {
-
- if (pdev->features & FEATURE_MOTOR_PANTILT)
- {
- ARG_DEF(struct pwc_mpt_angles, angles)
-
- ARGR(angles).absolute = 1;
- ARGR(angles).pan = pdev->pan_angle;
- ARGR(angles).tilt = pdev->tilt_angle;
- ARG_OUT(angles)
- }
- else
- {
- ret = -ENXIO;
- }
- break;
- }
-
- case VIDIOCPWCMPTSTATUS:
- {
- if (pdev->features & FEATURE_MOTOR_PANTILT)
- {
- ARG_DEF(struct pwc_mpt_status, status)
-
- ret = pwc_mpt_get_status(pdev, ARGA(status));
- ARG_OUT(status)
- }
- else
- {
- ret = -ENXIO;
- }
- break;
- }
-
- case VIDIOCPWCGVIDCMD:
- {
- ARG_DEF(struct pwc_video_command, vcmd);
-
- ARGR(vcmd).type = pdev->type;
- ARGR(vcmd).release = pdev->release;
- ARGR(vcmd).command_len = pdev->cmd_len;
- memcpy(&ARGR(vcmd).command_buf, pdev->cmd_buf, pdev->cmd_len);
- ARGR(vcmd).bandlength = pdev->vbandlength;
- ARGR(vcmd).frame_size = pdev->frame_size;
- ARG_OUT(vcmd)
- break;
- }
- /*
- case VIDIOCPWCGVIDTABLE:
- {
- ARG_DEF(struct pwc_table_init_buffer, table);
- ARGR(table).len = pdev->cmd_len;
- memcpy(&ARGR(table).buffer, pdev->decompress_data, pdev->decompressor->table_size);
- ARG_OUT(table)
- break;
- }
- */
-
- default:
- ret = -ENOIOCTLCMD;
- break;
- }
-
- if (ret > 0)
- return 0;
- return ret;
-}
-
-
-/* vim: set cinoptions= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index be0e02cb487f..e899036aadf4 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,19 +22,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include "pwc-dec1.h"
+#include "pwc.h"
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
{
- struct pwc_dec1_private *pdec;
+ struct pwc_dec1_private *pdec = &pdev->dec1;
- if (pwc->decompress_data == NULL) {
- pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
- if (pdec == NULL)
- return -ENOMEM;
- pwc->decompress_data = pdec;
- }
- pdec = pwc->decompress_data;
-
- return 0;
+ pdec->version = pdev->release;
}
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index a57d8601080b..c565ef8f52fb 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -25,13 +25,15 @@
#ifndef PWC_DEC1_H
#define PWC_DEC1_H
-#include "pwc.h"
+#include <linux/mutex.h>
+
+struct pwc_device;
struct pwc_dec1_private
{
int version;
};
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
#endif
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 06a4e877ba40..3792fedff951 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -27,7 +27,6 @@
#include "pwc-timon.h"
#include "pwc-kiara.h"
#include "pwc-dec23.h"
-#include <media/pwc-ioctl.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -51,13 +50,6 @@
# define USE_LOOKUP_TABLE_TO_CLAMP 1
#endif
-/*
- * ENABLE_BAYER_DECODER
- * 0: bayer decoder is not build (save some space)
- * 1: bayer decoder is build and can be used
- */
-#define ENABLE_BAYER_DECODER 0
-
static void build_subblock_pattern(struct pwc_dec23_private *pdec)
{
static const unsigned int initial_values[12] = {
@@ -302,20 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
/* If the type or the command change, we rebuild the lookup table */
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
{
int flags, version, shift, i;
- struct pwc_dec23_private *pdec;
+ struct pwc_dec23_private *pdec = &pdev->dec23;
- if (pwc->decompress_data == NULL) {
- pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
- if (pdec == NULL)
- return -ENOMEM;
- pwc->decompress_data = pdec;
- }
- pdec = pwc->decompress_data;
+ mutex_init(&pdec->lock);
- if (DEVICE_USE_CODEC3(type)) {
+ if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
+ return;
+
+ if (DEVICE_USE_CODEC3(pdev->type)) {
flags = cmd[2] & 0x18;
if (flags == 8)
pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */
@@ -362,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
#endif
- return 0;
+ pdec->last_cmd = cmd[2];
+ pdec->last_cmd_valid = 1;
}
/*
@@ -467,123 +457,6 @@ static void copy_image_block_CrCb(const int *src, unsigned char *dst, unsigned i
#endif
}
-#if ENABLE_BAYER_DECODER
-/*
- * Format: 8x2 pixels
- * . G . G . G . G . G . G . G
- * . . . . . . . . . . . . . .
- * . G . G . G . G . G . G . G
- * . . . . . . . . . . . . . .
- * or
- * . . . . . . . . . . . . . .
- * G . G . G . G . G . G . G .
- * . . . . . . . . . . . . . .
- * G . G . G . G . G . G . G .
-*/
-static void copy_image_block_Green(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits)
-{
-#if UNROLL_LOOP_FOR_COPY
- /* Unroll all loops */
- const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE;
- unsigned char *d = dst;
- const int *c = src;
-
- d[0] = cm[c[0] >> scalebits];
- d[2] = cm[c[1] >> scalebits];
- d[4] = cm[c[2] >> scalebits];
- d[6] = cm[c[3] >> scalebits];
- d[8] = cm[c[4] >> scalebits];
- d[10] = cm[c[5] >> scalebits];
- d[12] = cm[c[6] >> scalebits];
- d[14] = cm[c[7] >> scalebits];
-
- d = dst + bytes_per_line;
- d[0] = cm[c[8] >> scalebits];
- d[2] = cm[c[9] >> scalebits];
- d[4] = cm[c[10] >> scalebits];
- d[6] = cm[c[11] >> scalebits];
- d[8] = cm[c[12] >> scalebits];
- d[10] = cm[c[13] >> scalebits];
- d[12] = cm[c[14] >> scalebits];
- d[14] = cm[c[15] >> scalebits];
-#else
- int i;
- unsigned char *d;
- const int *c = src;
-
- d = dst;
- for (i = 0; i < 8; i++, c++)
- d[i*2] = CLAMP((*c) >> scalebits);
-
- d = dst + bytes_per_line;
- for (i = 0; i < 8; i++, c++)
- d[i*2] = CLAMP((*c) >> scalebits);
-#endif
-}
-#endif
-
-#if ENABLE_BAYER_DECODER
-/*
- * Format: 4x4 pixels
- * R . R . R . R
- * . B . B . B .
- * R . R . R . R
- * . B . B . B .
- */
-static void copy_image_block_RedBlue(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits)
-{
-#if UNROLL_LOOP_FOR_COPY
- /* Unroll all loops */
- const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE;
- unsigned char *d = dst;
- const int *c = src;
-
- d[0] = cm[c[0] >> scalebits];
- d[2] = cm[c[1] >> scalebits];
- d[4] = cm[c[2] >> scalebits];
- d[6] = cm[c[3] >> scalebits];
-
- d = dst + bytes_per_line;
- d[1] = cm[c[4] >> scalebits];
- d[3] = cm[c[5] >> scalebits];
- d[5] = cm[c[6] >> scalebits];
- d[7] = cm[c[7] >> scalebits];
-
- d = dst + bytes_per_line*2;
- d[0] = cm[c[8] >> scalebits];
- d[2] = cm[c[9] >> scalebits];
- d[4] = cm[c[10] >> scalebits];
- d[6] = cm[c[11] >> scalebits];
-
- d = dst + bytes_per_line*3;
- d[1] = cm[c[12] >> scalebits];
- d[3] = cm[c[13] >> scalebits];
- d[5] = cm[c[14] >> scalebits];
- d[7] = cm[c[15] >> scalebits];
-#else
- int i;
- unsigned char *d;
- const int *c = src;
-
- d = dst;
- for (i = 0; i < 4; i++, c++)
- d[i*2] = CLAMP((*c) >> scalebits);
-
- d = dst + bytes_per_line;
- for (i = 0; i < 4; i++, c++)
- d[i*2+1] = CLAMP((*c) >> scalebits);
-
- d = dst + bytes_per_line*2;
- for (i = 0; i < 4; i++, c++)
- d[i*2] = CLAMP((*c) >> scalebits);
-
- d = dst + bytes_per_line*3;
- for (i = 0; i < 4; i++, c++)
- d[i*2+1] = CLAMP((*c) >> scalebits);
-#endif
-}
-#endif
-
/*
* To manage the stream, we keep bits in a 32 bits register.
* fill_nbits(n): fill the reservoir with at least n bits
@@ -775,146 +648,44 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
}
-#if ENABLE_BAYER_DECODER
-/*
- * Size need to be a multiple of 8 in width
- *
- * Return a block of four line encoded like this:
- *
- * G R G R G R G R G R G R G R G R
- * B G B G B G B G B G B G B G B G
- * G R G R G R G R G R G R G R G R
- * B G B G B G B G B G B G B G B G
- *
- */
-static void DecompressBandBayer(struct pwc_dec23_private *pdec,
- const unsigned char *rawyuv,
- unsigned char *rgbbayer,
- unsigned int compressed_image_width,
- unsigned int real_image_width)
-{
- int compression_index, nblocks;
- const unsigned char *ptable0004;
- const unsigned char *ptable8004;
- unsigned char *dest;
-
- pdec->reservoir = 0;
- pdec->nbits_in_reservoir = 0;
- pdec->stream = rawyuv + 1; /* The first byte of the stream is skipped */
-
- get_nbits(pdec, 4, compression_index);
-
- /* pass 1: uncompress RB component */
- nblocks = compressed_image_width / 4;
-
- ptable0004 = pdec->table_0004_pass1[compression_index];
- ptable8004 = pdec->table_8004_pass1[compression_index];
- dest = rgbbayer;
-
- /* Each block decode a square of 4x4 */
- while (nblocks) {
- decode_block(pdec, ptable0004, ptable8004);
- copy_image_block_RedBlue(pdec->temp_colors, rgbbayer, real_image_width, pdec->scalebits);
- dest += 8;
- nblocks--;
- }
-
- /* pass 2: uncompress G component */
- nblocks = compressed_image_width / 8;
-
- ptable0004 = pdec->table_0004_pass2[compression_index];
- ptable8004 = pdec->table_8004_pass2[compression_index];
-
- /* Each block decode a square of 4x4 */
- while (nblocks) {
- decode_block(pdec, ptable0004, ptable8004);
- copy_image_block_Green(pdec->temp_colors, rgbbayer+1, real_image_width, pdec->scalebits);
-
- decode_block(pdec, ptable0004, ptable8004);
- copy_image_block_Green(pdec->temp_colors, rgbbayer+real_image_width, real_image_width, pdec->scalebits);
-
- rgbbayer += 16;
- nblocks -= 2;
- }
-}
-#endif
-
-
/**
*
* Uncompress a pwc23 buffer.
*
- * pwc.view: size of the image wanted
- * pwc.image: size of the image returned by the camera
- * pwc.offset: (x,y) to displayer image in the view
- *
* src: raw data
* dst: image output
- * flags: PWCX_FLAG_PLANAR or PWCX_FLAG_BAYER
*/
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
- void *dst,
- int flags)
+ void *dst)
{
- int bandlines_left, stride, bytes_per_block;
-
- bandlines_left = pwc->image.y / 4;
- bytes_per_block = pwc->view.x * 4;
-
- if (flags & PWCX_FLAG_BAYER) {
-#if ENABLE_BAYER_DECODER
- /* RGB Bayer format */
- unsigned char *rgbout;
-
- stride = pwc->view.x * pwc->offset.y;
- rgbout = dst + stride + pwc->offset.x;
-
-
- while (bandlines_left--) {
-
- DecompressBandBayer(pwc->decompress_data,
- src,
- rgbout,
- pwc->image.x, pwc->view.x);
-
- src += pwc->vbandlength;
- rgbout += bytes_per_block;
-
- }
-#else
- memset(dst, 0, pwc->view.x * pwc->view.y);
-#endif
-
- } else {
- /* YUV420P image format */
- unsigned char *pout_planar_y;
- unsigned char *pout_planar_u;
- unsigned char *pout_planar_v;
- unsigned int plane_size;
-
- plane_size = pwc->view.x * pwc->view.y;
-
- /* offset in Y plane */
- stride = pwc->view.x * pwc->offset.y;
- pout_planar_y = dst + stride + pwc->offset.x;
-
- /* offsets in U/V planes */
- stride = (pwc->view.x * pwc->offset.y) / 4 + pwc->offset.x / 2;
- pout_planar_u = dst + plane_size + stride;
- pout_planar_v = dst + plane_size + plane_size / 4 + stride;
-
- while (bandlines_left--) {
-
- DecompressBand23(pwc->decompress_data,
- src,
- pout_planar_y, pout_planar_u, pout_planar_v,
- pwc->image.x, pwc->view.x);
- src += pwc->vbandlength;
- pout_planar_y += bytes_per_block;
- pout_planar_u += pwc->view.x;
- pout_planar_v += pwc->view.x;
-
- }
+ int bandlines_left, bytes_per_block;
+ struct pwc_dec23_private *pdec = &pdev->dec23;
+
+ /* YUV420P image format */
+ unsigned char *pout_planar_y;
+ unsigned char *pout_planar_u;
+ unsigned char *pout_planar_v;
+ unsigned int plane_size;
+
+ mutex_lock(&pdec->lock);
+
+ bandlines_left = pdev->height / 4;
+ bytes_per_block = pdev->width * 4;
+ plane_size = pdev->height * pdev->width;
+
+ pout_planar_y = dst;
+ pout_planar_u = dst + plane_size;
+ pout_planar_v = dst + plane_size + plane_size / 4;
+
+ while (bandlines_left--) {
+ DecompressBand23(pdec, src,
+ pout_planar_y, pout_planar_u, pout_planar_v,
+ pdev->width, pdev->width);
+ src += pdev->vbandlength;
+ pout_planar_y += bytes_per_block;
+ pout_planar_u += pdev->width;
+ pout_planar_v += pdev->width;
}
+ mutex_unlock(&pdec->lock);
}
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index a0ac4f3dff81..c655b1c1e6a9 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -25,15 +25,20 @@
#ifndef PWC_DEC23_H
#define PWC_DEC23_H
-#include "pwc.h"
+struct pwc_device;
struct pwc_dec23_private
{
+ struct mutex lock;
+
+ unsigned char last_cmd, last_cmd_valid;
+
unsigned int scalebits;
unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
unsigned int reservoir;
unsigned int nbits_in_reservoir;
+
const unsigned char *stream;
int temp_colors[16];
@@ -49,9 +54,8 @@ struct pwc_dec23_private
};
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
+void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
- void *dst,
- int flags);
+ void *dst);
#endif
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 01ff643e682d..122fbd0081eb 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -128,33 +128,23 @@ static struct usb_driver pwc_driver = {
#define MAX_DEV_HINTS 20
#define MAX_ISOC_ERRORS 20
-static int default_fps = 10;
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_trace = PWC_DEBUG_LEVEL;
#endif
static int power_save = -1;
-static int led_on = 100, led_off; /* defaults to LED that is on while in use */
-static int pwc_preferred_compression = 1; /* 0..3 = uncompressed..high */
-static struct {
- int type;
- char serial_number[30];
- int device_node;
- struct pwc_device *pdev;
-} device_hint[MAX_DEV_HINTS];
+static int leds[2] = { 100, 0 };
/***/
-static int pwc_video_open(struct file *file);
static int pwc_video_close(struct file *file);
static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
-static void pwc_video_release(struct video_device *vfd);
static const struct v4l2_file_operations pwc_fops = {
.owner = THIS_MODULE,
- .open = pwc_video_open,
+ .open = v4l2_fh_open,
.release = pwc_video_close,
.read = pwc_video_read,
.poll = pwc_video_poll,
@@ -163,7 +153,7 @@ static const struct v4l2_file_operations pwc_fops = {
};
static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
- .release = pwc_video_release,
+ .release = video_device_release_empty,
.fops = &pwc_fops,
.ioctl_ops = &pwc_ioctl_ops,
};
@@ -191,7 +181,6 @@ static void pwc_snapshot_button(struct pwc_device *pdev, int down)
{
if (down) {
PWC_TRACE("Snapshot button pressed.\n");
- pdev->snapshot_button_status = 1;
} else {
PWC_TRACE("Snapshot button released.\n");
}
@@ -375,6 +364,7 @@ static int pwc_isoc_init(struct pwc_device *pdev)
int i, j, ret;
struct usb_interface *intf;
struct usb_host_interface *idesc = NULL;
+ int compression = 0; /* 0..3 = uncompressed..high */
if (pdev->iso_init)
return 0;
@@ -386,6 +376,12 @@ static int pwc_isoc_init(struct pwc_device *pdev)
pdev->visoc_errors = 0;
udev = pdev->udev;
+retry:
+ /* We first try with low compression and then retry with a higher
+ compression setting if there is not enough bandwidth. */
+ ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+ pdev->vframes, &compression, 1);
+
/* Get the current alternate interface, adjust packet size */
intf = usb_ifnum_to_if(udev, 0);
if (intf)
@@ -408,9 +404,12 @@ static int pwc_isoc_init(struct pwc_device *pdev)
}
/* Set alternate interface */
- ret = 0;
PWC_DEBUG_OPEN("Setting alternate interface %d\n", pdev->valternate);
ret = usb_set_interface(pdev->udev, 0, pdev->valternate);
+ if (ret == -ENOSPC && compression < 3) {
+ compression++;
+ goto retry;
+ }
if (ret < 0)
return ret;
@@ -454,6 +453,12 @@ static int pwc_isoc_init(struct pwc_device *pdev)
/* link */
for (i = 0; i < MAX_ISO_BUFS; i++) {
ret = usb_submit_urb(pdev->urbs[i], GFP_KERNEL);
+ if (ret == -ENOSPC && compression < 3) {
+ compression++;
+ pdev->iso_init = 1;
+ pwc_isoc_cleanup(pdev);
+ goto retry;
+ }
if (ret) {
PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret);
pdev->iso_init = 1;
@@ -517,12 +522,11 @@ static void pwc_isoc_cleanup(struct pwc_device *pdev)
PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
}
-/*
- * Release all queued buffers, no need to take queued_bufs_lock, since all
- * iso urbs have been killed when we're called so pwc_isoc_handler won't run.
- */
static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
while (!list_empty(&pdev->queued_bufs)) {
struct pwc_frame_buf *buf;
@@ -531,84 +535,7 @@ static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
-}
-
-/*********
- * sysfs
- *********/
-static struct pwc_device *cd_to_pwc(struct device *cd)
-{
- struct video_device *vdev = to_video_device(cd);
- return video_get_drvdata(vdev);
-}
-
-static ssize_t show_pan_tilt(struct device *class_dev,
- struct device_attribute *attr, char *buf)
-{
- struct pwc_device *pdev = cd_to_pwc(class_dev);
- return sprintf(buf, "%d %d\n", pdev->pan_angle, pdev->tilt_angle);
-}
-
-static ssize_t store_pan_tilt(struct device *class_dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pwc_device *pdev = cd_to_pwc(class_dev);
- int pan, tilt;
- int ret = -EINVAL;
-
- if (strncmp(buf, "reset", 5) == 0)
- ret = pwc_mpt_reset(pdev, 0x3);
-
- else if (sscanf(buf, "%d %d", &pan, &tilt) > 0)
- ret = pwc_mpt_set_angle(pdev, pan, tilt);
-
- if (ret < 0)
- return ret;
- return strlen(buf);
-}
-static DEVICE_ATTR(pan_tilt, S_IRUGO | S_IWUSR, show_pan_tilt,
- store_pan_tilt);
-
-static ssize_t show_snapshot_button_status(struct device *class_dev,
- struct device_attribute *attr, char *buf)
-{
- struct pwc_device *pdev = cd_to_pwc(class_dev);
- int status = pdev->snapshot_button_status;
- pdev->snapshot_button_status = 0;
- return sprintf(buf, "%d\n", status);
-}
-
-static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
- NULL);
-
-static int pwc_create_sysfs_files(struct pwc_device *pdev)
-{
- int rc;
-
- rc = device_create_file(&pdev->vdev.dev, &dev_attr_button);
- if (rc)
- goto err;
- if (pdev->features & FEATURE_MOTOR_PANTILT) {
- rc = device_create_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
- if (rc)
- goto err_button;
- }
-
- return 0;
-
-err_button:
- device_remove_file(&pdev->vdev.dev, &dev_attr_button);
-err:
- PWC_ERROR("Could not create sysfs files.\n");
- return rc;
-}
-
-static void pwc_remove_sysfs_files(struct pwc_device *pdev)
-{
- if (pdev->features & FEATURE_MOTOR_PANTILT)
- device_remove_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
- device_remove_file(&pdev->vdev.dev, &dev_attr_button);
+ spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
#ifdef CONFIG_USB_PWC_DEBUG
@@ -644,85 +571,60 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
/***************************************************************************/
/* Video4Linux functions */
-static int pwc_video_open(struct file *file)
+int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file)
{
- struct video_device *vdev = video_devdata(file);
- struct pwc_device *pdev;
-
- PWC_DEBUG_OPEN(">> video_open called(vdev = 0x%p).\n", vdev);
-
- pdev = video_get_drvdata(vdev);
- if (!pdev->udev)
- return -ENODEV;
+ int r = 0;
- file->private_data = vdev;
- PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
- return 0;
+ mutex_lock(&pdev->capt_file_lock);
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file) {
+ r = -EBUSY;
+ goto leave;
+ }
+ pdev->capt_file = file;
+leave:
+ mutex_unlock(&pdev->capt_file_lock);
+ return r;
}
-static void pwc_video_release(struct video_device *vfd)
+static void pwc_video_release(struct v4l2_device *v)
{
- struct pwc_device *pdev = container_of(vfd, struct pwc_device, vdev);
- int hint;
-
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
-
- /* Free intermediate decompression buffer & tables */
- if (pdev->decompress_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
- pdev->decompress_data);
- kfree(pdev->decompress_data);
- pdev->decompress_data = NULL;
- }
+ struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
-
+ kfree(pdev->ctrl_buf);
kfree(pdev);
}
static int pwc_video_close(struct file *file)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
-
- PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
+ struct pwc_device *pdev = video_drvdata(file);
- pdev = video_get_drvdata(vdev);
if (pdev->capt_file == file) {
vb2_queue_release(&pdev->vb_queue);
pdev->capt_file = NULL;
}
-
- PWC_DEBUG_OPEN("<< video_close()\n");
- return 0;
+ return v4l2_fh_release(file);
}
static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev = video_get_drvdata(vdev);
+ struct pwc_device *pdev = video_drvdata(file);
if (!pdev->udev)
return -ENODEV;
- if (pdev->capt_file != NULL &&
- pdev->capt_file != file)
+ if (pwc_test_n_set_capt_file(pdev, file))
return -EBUSY;
- pdev->capt_file = file;
-
return vb2_read(&pdev->vb_queue, buf, count, ppos,
file->f_flags & O_NONBLOCK);
}
static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev = video_get_drvdata(vdev);
+ struct pwc_device *pdev = video_drvdata(file);
if (!pdev->udev)
return POLL_ERR;
@@ -732,8 +634,7 @@ static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev = video_get_drvdata(vdev);
+ struct pwc_device *pdev = video_drvdata(file);
if (pdev->capt_file != file)
return -EBUSY;
@@ -749,6 +650,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
unsigned int sizes[], void *alloc_ctxs[])
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ int size;
if (*nbuffers < MIN_FRAMES)
*nbuffers = MIN_FRAMES;
@@ -757,7 +659,9 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
*nplanes = 1;
- sizes[0] = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
+ size = pwc_get_size(pdev, MAX_WIDTH, MAX_HEIGHT);
+ sizes[0] = PAGE_ALIGN(pwc_image_sizes[size][0] *
+ pwc_image_sizes[size][1] * 3 / 2);
return 0;
}
@@ -812,56 +716,59 @@ static void buffer_queue(struct vb2_buffer *vb)
unsigned long flags = 0;
spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
- list_add_tail(&buf->list, &pdev->queued_bufs);
+ /* Check the device has not disconnected between prep and queuing */
+ if (pdev->udev)
+ list_add_tail(&buf->list, &pdev->queued_bufs);
+ else
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ int r;
- if (!pdev->udev)
- return -ENODEV;
+ mutex_lock(&pdev->udevlock);
+ if (!pdev->udev) {
+ r = -ENODEV;
+ goto leave;
+ }
/* Turn on camera and set LEDS on */
pwc_camera_power(pdev, 1);
- if (pdev->power_save) {
- /* Restore video mode */
- pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y,
- pdev->vframes, pdev->vcompression,
- pdev->vsnapshot);
- }
- pwc_set_leds(pdev, led_on, led_off);
+ pwc_set_leds(pdev, leds[0], leds[1]);
- return pwc_isoc_init(pdev);
+ r = pwc_isoc_init(pdev);
+ if (r) {
+ /* If we failed turn camera and LEDS back off */
+ pwc_set_leds(pdev, 0, 0);
+ pwc_camera_power(pdev, 0);
+ /* And cleanup any queued bufs!! */
+ pwc_cleanup_queued_bufs(pdev);
+ }
+leave:
+ mutex_unlock(&pdev->udevlock);
+ return r;
}
static int stop_streaming(struct vb2_queue *vq)
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_lock(&pdev->udevlock);
if (pdev->udev) {
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
pwc_isoc_cleanup(pdev);
}
+ mutex_unlock(&pdev->udevlock);
+
pwc_cleanup_queued_bufs(pdev);
return 0;
}
-static void pwc_lock(struct vb2_queue *vq)
-{
- struct pwc_device *pdev = vb2_get_drv_priv(vq);
- mutex_lock(&pdev->modlock);
-}
-
-static void pwc_unlock(struct vb2_queue *vq)
-{
- struct pwc_device *pdev = vb2_get_drv_priv(vq);
- mutex_unlock(&pdev->modlock);
-}
-
static struct vb2_ops pwc_vb_queue_ops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
@@ -871,8 +778,6 @@ static struct vb2_ops pwc_vb_queue_ops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = pwc_unlock,
- .wait_finish = pwc_lock,
};
/***************************************************************************/
@@ -887,9 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
struct usb_device *udev = interface_to_usbdev(intf);
struct pwc_device *pdev = NULL;
int vendor_id, product_id, type_id;
- int hint, rc;
+ int rc;
int features = 0;
- int video_nr = -1; /* default: use next available device */
+ int compression = 0;
int my_power_save = power_save;
char serial_number[30], *name;
@@ -1149,28 +1054,15 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return -ENOMEM;
}
pdev->type = type_id;
- pdev->vframes = default_fps;
- strcpy(pdev->serial, serial_number);
pdev->features = features;
- if (vendor_id == 0x046D && product_id == 0x08B5) {
- /* Logitech QuickCam Orbit
- The ranges have been determined experimentally; they may differ from cam to cam.
- Also, the exact ranges left-right and up-down are different for my cam
- */
- pdev->angle_range.pan_min = -7000;
- pdev->angle_range.pan_max = 7000;
- pdev->angle_range.tilt_min = -3000;
- pdev->angle_range.tilt_max = 2500;
- }
pwc_construct(pdev); /* set min/max sizes correct */
- mutex_init(&pdev->modlock);
+ mutex_init(&pdev->capt_file_lock);
mutex_init(&pdev->udevlock);
spin_lock_init(&pdev->queued_bufs_lock);
INIT_LIST_HEAD(&pdev->queued_bufs);
pdev->udev = udev;
- pdev->vcompression = pwc_preferred_compression;
pdev->power_save = my_power_save;
/* Init videobuf2 queue structure */
@@ -1185,35 +1077,21 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
/* Init video_device structure */
memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
- pdev->vdev.parent = &intf->dev;
- pdev->vdev.lock = &pdev->modlock;
strcpy(pdev->vdev.name, name);
+ set_bit(V4L2_FL_USE_FH_PRIO, &pdev->vdev.flags);
video_set_drvdata(&pdev->vdev, pdev);
pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
- /* Now search device_hint[] table for a match, so we can hint a node number. */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
- if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
- (device_hint[hint].pdev == NULL)) {
- /* so far, so good... try serial number */
- if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
- /* match! */
- video_nr = device_hint[hint].device_node;
- PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
- break;
- }
- }
+ /* Allocate USB command buffers */
+ pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
+ if (!pdev->ctrl_buf) {
+ PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
+ rc = -ENOMEM;
+ goto err_free_mem;
}
- /* occupy slot */
- if (hint < MAX_DEV_HINTS)
- device_hint[hint].pdev = pdev;
-
- PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
- usb_set_intfdata(intf, pdev);
-
#ifdef CONFIG_USB_PWC_DEBUG
/* Query sensor type */
if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1227,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pwc_set_leds(pdev, 0, 0);
/* Setup intial videomode */
- rc = pwc_set_video_mode(pdev, pdev->view_max.x, pdev->view_max.y,
- pdev->vframes, pdev->vcompression, 0);
+ rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
+ V4L2_PIX_FMT_YUV420, 30, &compression, 1);
if (rc)
goto err_free_mem;
@@ -1239,20 +1117,25 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
goto err_free_mem;
}
- pdev->vdev.ctrl_handler = &pdev->ctrl_handler;
-
/* And powerdown the camera until streaming starts */
pwc_camera_power(pdev, 0);
- rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
- if (rc < 0) {
- PWC_ERROR("Failed to register as video device (%d).\n", rc);
+ /* Register the v4l2_device structure */
+ pdev->v4l2_dev.release = pwc_video_release;
+ rc = v4l2_device_register(&intf->dev, &pdev->v4l2_dev);
+ if (rc) {
+ PWC_ERROR("Failed to register v4l2-device (%d).\n", rc);
goto err_free_controls;
}
- rc = pwc_create_sysfs_files(pdev);
- if (rc)
- goto err_video_unreg;
+ pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
+ pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
+
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (rc < 0) {
+ PWC_ERROR("Failed to register as video device (%d).\n", rc);
+ goto err_unregister_v4l2_dev;
+ }
PWC_INFO("Registered as %s.\n", video_device_node_name(&pdev->vdev));
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
@@ -1261,7 +1144,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (!pdev->button_dev) {
PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
rc = -ENOMEM;
- pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
@@ -1279,7 +1161,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (rc) {
input_free_device(pdev->button_dev);
pdev->button_dev = NULL;
- pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
#endif
@@ -1287,13 +1168,13 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return 0;
err_video_unreg:
- if (hint < MAX_DEV_HINTS)
- device_hint[hint].pdev = NULL;
video_unregister_device(&pdev->vdev);
+err_unregister_v4l2_dev:
+ v4l2_device_unregister(&pdev->v4l2_dev);
err_free_controls:
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
err_free_mem:
- usb_set_intfdata(intf, NULL);
+ kfree(pdev->ctrl_buf);
kfree(pdev);
return rc;
}
@@ -1301,27 +1182,26 @@ err_free_mem:
/* The user yanked out the cable... */
static void usb_pwc_disconnect(struct usb_interface *intf)
{
- struct pwc_device *pdev = usb_get_intfdata(intf);
+ struct v4l2_device *v = usb_get_intfdata(intf);
+ struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
mutex_lock(&pdev->udevlock);
- mutex_lock(&pdev->modlock);
-
- usb_set_intfdata(intf, NULL);
/* No need to keep the urbs around after disconnection */
pwc_isoc_cleanup(pdev);
- pwc_cleanup_queued_bufs(pdev);
pdev->udev = NULL;
-
- mutex_unlock(&pdev->modlock);
mutex_unlock(&pdev->udevlock);
- pwc_remove_sysfs_files(pdev);
+ pwc_cleanup_queued_bufs(pdev);
+
video_unregister_device(&pdev->vdev);
+ v4l2_device_unregister(&pdev->v4l2_dev);
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
if (pdev->button_dev)
input_unregister_device(pdev->button_dev);
#endif
+
+ v4l2_device_put(&pdev->v4l2_dev);
}
@@ -1329,30 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
* Initialization code & module stuff
*/
-static int fps;
-static int compression = -1;
-static int leds[2] = { -1, -1 };
static unsigned int leds_nargs;
-static char *dev_hint[MAX_DEV_HINTS];
-static unsigned int dev_hint_nargs;
-module_param(fps, int, 0444);
#ifdef CONFIG_USB_PWC_DEBUG
module_param_named(trace, pwc_trace, int, 0644);
#endif
module_param(power_save, int, 0644);
-module_param(compression, int, 0444);
module_param_array(leds, int, &leds_nargs, 0444);
-module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
-MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
#ifdef CONFIG_USB_PWC_DEBUG
MODULE_PARM_DESC(trace, "For debugging purposes");
#endif
MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
-MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
-MODULE_PARM_DESC(dev_hint, "Device node hints");
MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1362,122 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
static int __init usb_pwc_init(void)
{
- int i;
-
-#ifdef CONFIG_USB_PWC_DEBUG
- PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
- PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
- PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
- PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
-
- if (pwc_trace >= 0) {
- PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
- }
-#endif
-
- if (fps) {
- if (fps < 4 || fps > 30) {
- PWC_ERROR("Framerate out of bounds (4-30).\n");
- return -EINVAL;
- }
- default_fps = fps;
- PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
- }
-
- if (compression >= 0) {
- if (compression > 3) {
- PWC_ERROR("Invalid compression setting; use a number between 0 (uncompressed) and 3 (high).\n");
- return -EINVAL;
- }
- pwc_preferred_compression = compression;
- PWC_DEBUG_MODULE("Preferred compression set to %d.\n", pwc_preferred_compression);
- }
- if (leds[0] >= 0)
- led_on = leds[0];
- if (leds[1] >= 0)
- led_off = leds[1];
-
- /* Big device node whoopla. Basically, it allows you to assign a
- device node (/dev/videoX) to a camera, based on its type
- & serial number. The format is [type[.serialnumber]:]node.
-
- Any camera that isn't matched by these rules gets the next
- available free device node.
- */
- for (i = 0; i < MAX_DEV_HINTS; i++) {
- char *s, *colon, *dot;
-
- /* This loop also initializes the array */
- device_hint[i].pdev = NULL;
- s = dev_hint[i];
- if (s != NULL && *s != '\0') {
- device_hint[i].type = -1; /* wildcard */
- strcpy(device_hint[i].serial_number, "*");
-
- /* parse string: chop at ':' & '/' */
- colon = dot = s;
- while (*colon != '\0' && *colon != ':')
- colon++;
- while (*dot != '\0' && *dot != '.')
- dot++;
- /* Few sanity checks */
- if (*dot != '\0' && dot > colon) {
- PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
- return -EINVAL;
- }
-
- if (*colon == '\0') {
- /* No colon */
- if (*dot != '\0') {
- PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
- return -EINVAL;
- }
- else {
- /* No type or serial number specified, just a number. */
- device_hint[i].device_node =
- simple_strtol(s, NULL, 10);
- }
- }
- else {
- /* There's a colon, so we have at least a type and a device node */
- device_hint[i].type =
- simple_strtol(s, NULL, 10);
- device_hint[i].device_node =
- simple_strtol(colon + 1, NULL, 10);
- if (*dot != '\0') {
- /* There's a serial number as well */
- int k;
-
- dot++;
- k = 0;
- while (*dot != ':' && k < 29) {
- device_hint[i].serial_number[k++] = *dot;
- dot++;
- }
- device_hint[i].serial_number[k] = '\0';
- }
- }
- PWC_TRACE("device_hint[%d]:\n", i);
- PWC_TRACE(" type : %d\n", device_hint[i].type);
- PWC_TRACE(" serial# : %s\n", device_hint[i].serial_number);
- PWC_TRACE(" node : %d\n", device_hint[i].device_node);
- }
- else
- device_hint[i].type = 0; /* not filled */
- } /* ..for MAX_DEV_HINTS */
-
- PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
return usb_register(&pwc_driver);
}
static void __exit usb_pwc_exit(void)
{
- PWC_DEBUG_MODULE("Deregistering driver.\n");
usb_deregister(&pwc_driver);
- PWC_INFO("Philips webcam module removed.\n");
}
module_init(usb_pwc_init);
module_exit(usb_pwc_exit);
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-kiara.h b/drivers/media/video/pwc/pwc-kiara.h
index 047dad8c15f7..8e02b7ac2139 100644
--- a/drivers/media/video/pwc/pwc-kiara.h
+++ b/drivers/media/video/pwc/pwc-kiara.h
@@ -27,7 +27,7 @@
#ifndef PWC_KIARA_H
#define PWC_KIARA_H
-#include <media/pwc-ioctl.h>
+#include "pwc.h"
#define PWC_FPS_MAX_KIARA 6
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 0b031336eab8..9be5adffa874 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -27,67 +27,47 @@
#include "pwc.h"
-const struct pwc_coord pwc_image_sizes[PSZ_MAX] =
+const int pwc_image_sizes[PSZ_MAX][2] =
{
- { 128, 96, 0 }, /* sqcif */
- { 160, 120, 0 }, /* qsif */
- { 176, 144, 0 }, /* qcif */
- { 320, 240, 0 }, /* sif */
- { 352, 288, 0 }, /* cif */
- { 640, 480, 0 }, /* vga */
+ { 128, 96 }, /* sqcif */
+ { 160, 120 }, /* qsif */
+ { 176, 144 }, /* qcif */
+ { 320, 240 }, /* sif */
+ { 352, 288 }, /* cif */
+ { 640, 480 }, /* vga */
};
/* x,y -> PSZ_ */
-int pwc_decode_size(struct pwc_device *pdev, int width, int height)
+int pwc_get_size(struct pwc_device *pdev, int width, int height)
{
- int i, find;
-
- /* Make sure we don't go beyond our max size.
- NB: we have different limits for RAW and normal modes. In case
- you don't have the decompressor loaded or use RAW mode,
- the maximum viewable size is smaller.
- */
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- {
- if (width > pdev->abs_max.x || height > pdev->abs_max.y)
- {
- PWC_DEBUG_SIZE("VIDEO_PALETTE_RAW: going beyond abs_max.\n");
- return -1;
- }
- }
- else
- {
- if (width > pdev->view_max.x || height > pdev->view_max.y)
- {
- PWC_DEBUG_SIZE("VIDEO_PALETTE_not RAW: going beyond view_max.\n");
- return -1;
- }
- }
+ int i;
/* Find the largest size supported by the camera that fits into the
- requested size.
- */
- find = -1;
+ requested size. */
+ for (i = PSZ_MAX - 1; i >= 0; i--) {
+ if (!(pdev->image_mask & (1 << i)))
+ continue;
+
+ if (pwc_image_sizes[i][0] <= width &&
+ pwc_image_sizes[i][1] <= height)
+ return i;
+ }
+
+ /* No mode found, return the smallest mode we have */
for (i = 0; i < PSZ_MAX; i++) {
- if (pdev->image_mask & (1 << i)) {
- if (pwc_image_sizes[i].x <= width && pwc_image_sizes[i].y <= height)
- find = i;
- }
+ if (pdev->image_mask & (1 << i))
+ return i;
}
- return find;
+
+ /* Never reached there always is atleast one supported mode */
+ return 0;
}
-/* initialize variables depending on type and decompressor*/
+/* initialize variables depending on type and decompressor */
void pwc_construct(struct pwc_device *pdev)
{
if (DEVICE_USE_CODEC1(pdev->type)) {
- pdev->view_min.x = 128;
- pdev->view_min.y = 96;
- pdev->view_max.x = 352;
- pdev->view_max.y = 288;
- pdev->abs_max.x = 352;
- pdev->abs_max.y = 288;
pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QCIF | 1 << PSZ_CIF;
pdev->vcinterface = 2;
pdev->vendpoint = 4;
@@ -96,13 +76,7 @@ void pwc_construct(struct pwc_device *pdev)
} else if (DEVICE_USE_CODEC3(pdev->type)) {
- pdev->view_min.x = 160;
- pdev->view_min.y = 120;
- pdev->view_max.x = 640;
- pdev->view_max.y = 480;
pdev->image_mask = 1 << PSZ_QSIF | 1 << PSZ_SIF | 1 << PSZ_VGA;
- pdev->abs_max.x = 640;
- pdev->abs_max.y = 480;
pdev->vcinterface = 3;
pdev->vendpoint = 5;
pdev->frame_header_size = TOUCAM_HEADER_SIZE;
@@ -110,20 +84,10 @@ void pwc_construct(struct pwc_device *pdev)
} else /* if (DEVICE_USE_CODEC2(pdev->type)) */ {
- pdev->view_min.x = 128;
- pdev->view_min.y = 96;
- /* Anthill bug #38: PWC always reports max size, even without PWCX */
- pdev->view_max.x = 640;
- pdev->view_max.y = 480;
pdev->image_mask = 1 << PSZ_SQCIF | 1 << PSZ_QSIF | 1 << PSZ_QCIF | 1 << PSZ_SIF | 1 << PSZ_CIF | 1 << PSZ_VGA;
- pdev->abs_max.x = 640;
- pdev->abs_max.y = 480;
pdev->vcinterface = 3;
pdev->vendpoint = 4;
pdev->frame_header_size = 0;
pdev->frame_trailer_size = 0;
}
- pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
- pdev->view_min.size = pdev->view_min.x * pdev->view_min.y;
- pdev->view_max.size = pdev->view_max.x * pdev->view_max.y;
}
diff --git a/drivers/media/video/pwc/pwc-timon.h b/drivers/media/video/pwc/pwc-timon.h
index a6e22224c95f..270c5b9010f6 100644
--- a/drivers/media/video/pwc/pwc-timon.h
+++ b/drivers/media/video/pwc/pwc-timon.h
@@ -42,7 +42,7 @@
#ifndef PWC_TIMON_H
#define PWC_TIMON_H
-#include <media/pwc-ioctl.h>
+#include "pwc.h"
#define PWC_FPS_MAX_TIMON 6
diff --git a/drivers/media/video/pwc/pwc-uncompress.c b/drivers/media/video/pwc/pwc-uncompress.c
index 51265092bd31..b65903fbcf0d 100644
--- a/drivers/media/video/pwc/pwc-uncompress.c
+++ b/drivers/media/video/pwc/pwc-uncompress.c
@@ -35,7 +35,7 @@
int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
{
- int n, line, col, stride;
+ int n, line, col;
void *yuv, *image;
u16 *src;
u16 *dsty, *dstu, *dstv;
@@ -60,35 +60,23 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
return 0;
}
- vb2_set_plane_payload(&fbuf->vb, 0, pdev->view.size);
+ vb2_set_plane_payload(&fbuf->vb, 0,
+ pdev->width * pdev->height * 3 / 2);
if (pdev->vbandlength == 0) {
/* Uncompressed mode.
- * We copy the data into the output buffer, using the viewport
- * size (which may be larger than the image size).
- * Unfortunately we have to do a bit of byte stuffing to get
- * the desired output format/size.
*
* We do some byte shuffling here to go from the
* native format to YUV420P.
*/
src = (u16 *)yuv;
- n = pdev->view.x * pdev->view.y;
+ n = pdev->width * pdev->height;
+ dsty = (u16 *)(image);
+ dstu = (u16 *)(image + n);
+ dstv = (u16 *)(image + n + n / 4);
- /* offset in Y plane */
- stride = pdev->view.x * pdev->offset.y + pdev->offset.x;
- dsty = (u16 *)(image + stride);
-
- /* offsets in U/V planes */
- stride = pdev->view.x * pdev->offset.y / 4 + pdev->offset.x / 2;
- dstu = (u16 *)(image + n + stride);
- dstv = (u16 *)(image + n + n / 4 + stride);
-
- /* increment after each line */
- stride = (pdev->view.x - pdev->image.x) / 2; /* u16 is 2 bytes */
-
- for (line = 0; line < pdev->image.y; line++) {
- for (col = 0; col < pdev->image.x; col += 4) {
+ for (line = 0; line < pdev->height; line++) {
+ for (col = 0; col < pdev->width; col += 4) {
*dsty++ = *src++;
*dsty++ = *src++;
if (line & 1)
@@ -96,11 +84,6 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
else
*dstu++ = *src++;
}
- dsty += stride;
- if (line & 1)
- dstv += (stride >> 1);
- else
- dstu += (stride >> 1);
}
return 0;
@@ -111,12 +94,6 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
* the decompressor routines will write the data in planar format
* immediately.
*/
- if (pdev->vsize == PSZ_VGA && pdev->vframes == 5 && pdev->vsnapshot) {
- PWC_ERROR("Mode Bayer is not supported for now\n");
- /* flags |= PWCX_FLAG_BAYER; */
- return -ENXIO; /* No such device or address: missing decompressor */
- }
-
if (DEVICE_USE_CODEC1(pdev->type)) {
/* TODO & FIXME */
@@ -124,10 +101,7 @@ int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
return -ENXIO; /* No such device or address: missing decompressor */
} else {
- pwc_dec23_decompress(pdev, yuv, image, PWCX_FLAG_PLANAR);
+ pwc_dec23_decompress(pdev, yuv, image);
}
return 0;
}
-
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index a10ff6b64acf..f495eeb5403a 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -49,6 +49,7 @@ static const struct v4l2_ctrl_ops pwc_ctrl_ops = {
enum { awb_indoor, awb_outdoor, awb_fl, awb_manual, awb_auto };
enum { custom_autocontour, custom_contour, custom_noise_reduction,
+ custom_awb_speed, custom_awb_delay,
custom_save_user, custom_restore_user, custom_restore_factory };
const char * const pwc_auto_whitebal_qmenu[] = {
@@ -138,6 +139,26 @@ static const struct v4l2_ctrl_config pwc_restore_factory_cfg = {
.name = "Restore Factory Settings",
};
+static const struct v4l2_ctrl_config pwc_awb_speed_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(awb_speed),
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Auto White Balance Speed",
+ .min = 1,
+ .max = 32,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_awb_delay_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(awb_delay),
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Auto White Balance Delay",
+ .min = 0,
+ .max = 63,
+ .step = 1,
+};
+
int pwc_init_controls(struct pwc_device *pdev)
{
struct v4l2_ctrl_handler *hdl;
@@ -338,6 +359,23 @@ int pwc_init_controls(struct pwc_device *pdev)
if (pdev->restore_factory)
pdev->restore_factory->flags |= V4L2_CTRL_FLAG_UPDATE;
+ /* Auto White Balance speed & delay */
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
+ AWB_CONTROL_SPEED_FORMATTER, &def);
+ if (r || def < 1 || def > 32)
+ def = 1;
+ cfg = pwc_awb_speed_cfg;
+ cfg.def = def;
+ pdev->awb_speed = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
+ AWB_CONTROL_DELAY_FORMATTER, &def);
+ if (r || def > 63)
+ def = 0;
+ cfg = pwc_awb_delay_cfg;
+ cfg.def = def;
+ pdev->awb_delay = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
if (!(pdev->features & FEATURE_MOTOR_PANTILT))
return hdl->error;
@@ -357,25 +395,16 @@ int pwc_init_controls(struct pwc_device *pdev)
return hdl->error;
}
-static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_format *f)
+static void pwc_vidioc_fill_fmt(struct v4l2_format *f,
+ int width, int height, u32 pixfmt)
{
memset(&f->fmt.pix, 0, sizeof(struct v4l2_pix_format));
- f->fmt.pix.width = pdev->view.x;
- f->fmt.pix.height = pdev->view.y;
+ f->fmt.pix.width = width;
+ f->fmt.pix.height = height;
f->fmt.pix.field = V4L2_FIELD_NONE;
- if (pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
- f->fmt.pix.bytesperline = (f->fmt.pix.width * 3)/2;
- f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- } else {
- /* vbandlength contains 4 lines ... */
- f->fmt.pix.bytesperline = pdev->vbandlength/4;
- f->fmt.pix.sizeimage = pdev->frame_size + sizeof(struct pwc_raw_frame);
- if (DEVICE_USE_CODEC1(pdev->type))
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_PWC1;
- else
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_PWC2;
- }
+ f->fmt.pix.pixelformat = pixfmt;
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.width * 3 / 2;
PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() "
"width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
f->fmt.pix.width,
@@ -391,6 +420,8 @@ static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_forma
/* ioctl(VIDIOC_TRY_FMT) */
static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
{
+ int size;
+
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
PWC_DEBUG_IOCTL("Bad video type must be V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
return -EINVAL;
@@ -417,15 +448,11 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
}
- if (f->fmt.pix.width > pdev->view_max.x)
- f->fmt.pix.width = pdev->view_max.x;
- else if (f->fmt.pix.width < pdev->view_min.x)
- f->fmt.pix.width = pdev->view_min.x;
-
- if (f->fmt.pix.height > pdev->view_max.y)
- f->fmt.pix.height = pdev->view_max.y;
- else if (f->fmt.pix.height < pdev->view_min.y)
- f->fmt.pix.height = pdev->view_min.y;
+ size = pwc_get_size(pdev, f->fmt.pix.width, f->fmt.pix.height);
+ pwc_vidioc_fill_fmt(f,
+ pwc_image_sizes[size][0],
+ pwc_image_sizes[size][1],
+ f->fmt.pix.pixelformat);
return 0;
}
@@ -435,68 +462,45 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
struct pwc_device *pdev = video_drvdata(file);
- int ret, fps, snapshot, compression, pixelformat;
-
- if (!pdev->udev)
- return -ENODEV;
+ int ret, pixelformat, compression = 0;
- if (pdev->capt_file != NULL &&
- pdev->capt_file != file)
+ if (pwc_test_n_set_capt_file(pdev, file))
return -EBUSY;
- pdev->capt_file = file;
-
ret = pwc_vidioc_try_fmt(pdev, f);
- if (ret<0)
+ if (ret < 0)
return ret;
pixelformat = f->fmt.pix.pixelformat;
- compression = pdev->vcompression;
- snapshot = 0;
- fps = pdev->vframes;
- if (f->fmt.pix.priv) {
- compression = (f->fmt.pix.priv & PWC_QLT_MASK) >> PWC_QLT_SHIFT;
- snapshot = !!(f->fmt.pix.priv & PWC_FPS_SNAPSHOT);
- fps = (f->fmt.pix.priv & PWC_FPS_FRMASK) >> PWC_FPS_SHIFT;
- if (fps == 0)
- fps = pdev->vframes;
- }
- if (pixelformat != V4L2_PIX_FMT_YUV420 &&
- pixelformat != V4L2_PIX_FMT_PWC1 &&
- pixelformat != V4L2_PIX_FMT_PWC2)
- return -EINVAL;
+ mutex_lock(&pdev->udevlock);
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
- if (vb2_is_streaming(&pdev->vb_queue))
- return -EBUSY;
+ if (pdev->iso_init) {
+ ret = -EBUSY;
+ goto leave;
+ }
PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
- "compression=%d snapshot=%d format=%c%c%c%c\n",
- f->fmt.pix.width, f->fmt.pix.height, fps,
- compression, snapshot,
+ "format=%c%c%c%c\n",
+ f->fmt.pix.width, f->fmt.pix.height, pdev->vframes,
(pixelformat)&255,
(pixelformat>>8)&255,
(pixelformat>>16)&255,
(pixelformat>>24)&255);
- ret = pwc_set_video_mode(pdev,
- f->fmt.pix.width,
- f->fmt.pix.height,
- fps,
- compression,
- snapshot);
+ ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
+ pixelformat, 30, &compression, 0);
PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
- if (ret)
- return ret;
-
- pdev->pixfmt = pixelformat;
-
- pwc_vidioc_fill_fmt(pdev, f);
-
- return 0;
-
+ pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
+leave:
+ mutex_unlock(&pdev->udevlock);
+ return ret;
}
static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
@@ -536,30 +540,14 @@ static int pwc_s_input(struct file *file, void *fh, unsigned int i)
return i ? -EINVAL : 0;
}
-static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+static int pwc_g_volatile_ctrl_unlocked(struct v4l2_ctrl *ctrl)
{
struct pwc_device *pdev =
container_of(ctrl->handler, struct pwc_device, ctrl_handler);
int ret = 0;
- /*
- * Sometimes it can take quite long for the pwc to complete usb control
- * transfers, so release the modlock to give streaming by another
- * process / thread the chance to continue with a dqbuf.
- */
- mutex_unlock(&pdev->modlock);
-
- /*
- * Take the udev-lock to protect against the disconnect handler
- * completing and setting dev->udev to NULL underneath us. Other code
- * does not need to do this since it is protected by the modlock.
- */
- mutex_lock(&pdev->udevlock);
-
- if (!pdev->udev) {
- ret = -ENODEV;
- goto leave;
- }
+ if (!pdev->udev)
+ return -ENODEV;
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
@@ -624,9 +612,18 @@ static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
PWC_ERROR("g_ctrl %s error %d\n", ctrl->name, ret);
-leave:
+ return ret;
+}
+
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct pwc_device *pdev =
+ container_of(ctrl->handler, struct pwc_device, ctrl_handler);
+ int ret;
+
+ mutex_lock(&pdev->udevlock);
+ ret = pwc_g_volatile_ctrl_unlocked(ctrl);
mutex_unlock(&pdev->udevlock);
- mutex_lock(&pdev->modlock);
return ret;
}
@@ -643,6 +640,15 @@ static int pwc_set_awb(struct pwc_device *pdev)
if (pdev->auto_white_balance->val != awb_manual)
pdev->color_bal_valid = false; /* Force cache update */
+
+ /*
+ * If this is a preset, update our red / blue balance values
+ * so that events get generated for the new preset values
+ */
+ if (pdev->auto_white_balance->val == awb_indoor ||
+ pdev->auto_white_balance->val == awb_outdoor ||
+ pdev->auto_white_balance->val == awb_fl)
+ pwc_g_volatile_ctrl_unlocked(pdev->auto_white_balance);
}
if (pdev->auto_white_balance->val != awb_manual)
return 0;
@@ -766,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
static int pwc_set_motor(struct pwc_device *pdev)
{
int ret;
- u8 buf[4];
- buf[0] = 0;
+ pdev->ctrl_buf[0] = 0;
if (pdev->motor_pan_reset->is_new)
- buf[0] |= 0x01;
+ pdev->ctrl_buf[0] |= 0x01;
if (pdev->motor_tilt_reset->is_new)
- buf[0] |= 0x02;
+ pdev->ctrl_buf[0] |= 0x02;
if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
ret = send_control_msg(pdev, SET_MPT_CTL,
- PT_RESET_CONTROL_FORMATTER, buf, 1);
+ PT_RESET_CONTROL_FORMATTER,
+ pdev->ctrl_buf, 1);
if (ret < 0)
return ret;
}
- memset(buf, 0, sizeof(buf));
+ memset(pdev->ctrl_buf, 0, 4);
if (pdev->motor_pan->is_new) {
- buf[0] = pdev->motor_pan->val & 0xFF;
- buf[1] = (pdev->motor_pan->val >> 8);
+ pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
+ pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
}
if (pdev->motor_tilt->is_new) {
- buf[2] = pdev->motor_tilt->val & 0xFF;
- buf[3] = (pdev->motor_tilt->val >> 8);
+ pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
+ pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
}
if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
ret = send_control_msg(pdev, SET_MPT_CTL,
PT_RELATIVE_CONTROL_FORMATTER,
- buf, sizeof(buf));
+ pdev->ctrl_buf, 4);
if (ret < 0)
return ret;
}
@@ -806,8 +812,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
container_of(ctrl->handler, struct pwc_device, ctrl_handler);
int ret = 0;
- /* See the comments on locking in pwc_g_volatile_ctrl */
- mutex_unlock(&pdev->modlock);
mutex_lock(&pdev->udevlock);
if (!pdev->udev) {
@@ -891,6 +895,16 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
ret = pwc_button_ctrl(pdev,
RESTORE_FACTORY_DEFAULTS_FORMATTER);
break;
+ case PWC_CID_CUSTOM(awb_speed):
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ AWB_CONTROL_SPEED_FORMATTER,
+ ctrl->val);
+ break;
+ case PWC_CID_CUSTOM(awb_delay):
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ AWB_CONTROL_DELAY_FORMATTER,
+ ctrl->val);
+ break;
case V4L2_CID_PAN_RELATIVE:
ret = pwc_set_motor(pdev);
break;
@@ -903,7 +917,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
leave:
mutex_unlock(&pdev->udevlock);
- mutex_lock(&pdev->modlock);
return ret;
}
@@ -933,9 +946,14 @@ static int pwc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
struct pwc_device *pdev = video_drvdata(file);
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ mutex_lock(&pdev->udevlock); /* To avoid race with s_fmt */
PWC_DEBUG_IOCTL("ioctl(VIDIOC_G_FMT) return size %dx%d\n",
- pdev->image.x, pdev->image.y);
- pwc_vidioc_fill_fmt(pdev, f);
+ pdev->width, pdev->height);
+ pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
+ mutex_unlock(&pdev->udevlock);
return 0;
}
@@ -951,12 +969,9 @@ static int pwc_reqbufs(struct file *file, void *fh,
{
struct pwc_device *pdev = video_drvdata(file);
- if (pdev->capt_file != NULL &&
- pdev->capt_file != file)
+ if (pwc_test_n_set_capt_file(pdev, file))
return -EBUSY;
- pdev->capt_file = file;
-
return vb2_reqbufs(&pdev->vb_queue, rb);
}
@@ -1025,25 +1040,21 @@ static int pwc_enum_framesizes(struct file *file, void *fh,
struct pwc_device *pdev = video_drvdata(file);
unsigned int i = 0, index = fsize->index;
- if (fsize->pixel_format == V4L2_PIX_FMT_YUV420) {
+ if (fsize->pixel_format == V4L2_PIX_FMT_YUV420 ||
+ (fsize->pixel_format == V4L2_PIX_FMT_PWC1 &&
+ DEVICE_USE_CODEC1(pdev->type)) ||
+ (fsize->pixel_format == V4L2_PIX_FMT_PWC2 &&
+ DEVICE_USE_CODEC23(pdev->type))) {
for (i = 0; i < PSZ_MAX; i++) {
- if (pdev->image_mask & (1UL << i)) {
- if (!index--) {
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = pwc_image_sizes[i].x;
- fsize->discrete.height = pwc_image_sizes[i].y;
- return 0;
- }
+ if (!(pdev->image_mask & (1UL << i)))
+ continue;
+ if (!index--) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = pwc_image_sizes[i][0];
+ fsize->discrete.height = pwc_image_sizes[i][1];
+ return 0;
}
}
- } else if (fsize->index == 0 &&
- ((fsize->pixel_format == V4L2_PIX_FMT_PWC1 && DEVICE_USE_CODEC1(pdev->type)) ||
- (fsize->pixel_format == V4L2_PIX_FMT_PWC2 && DEVICE_USE_CODEC23(pdev->type)))) {
-
- fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- fsize->discrete.width = pdev->abs_max.x;
- fsize->discrete.height = pdev->abs_max.y;
- return 0;
}
return -EINVAL;
}
@@ -1056,8 +1067,8 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
unsigned int i;
for (i = 0; i < PSZ_MAX; i++) {
- if (pwc_image_sizes[i].x == fival->width &&
- pwc_image_sizes[i].y == fival->height) {
+ if (pwc_image_sizes[i][0] == fival->width &&
+ pwc_image_sizes[i][1] == fival->height) {
size = i;
break;
}
@@ -1078,20 +1089,69 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
return 0;
}
-static int pwc_log_status(struct file *file, void *priv)
+static int pwc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
{
struct pwc_device *pdev = video_drvdata(file);
- v4l2_ctrl_handler_log_status(&pdev->ctrl_handler, PWC_NAME);
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(parm, 0, sizeof(*parm));
+
+ parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ parm->parm.capture.readbuffers = MIN_FRAMES;
+ parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe.denominator = pdev->vframes;
+ parm->parm.capture.timeperframe.numerator = 1;
+
return 0;
}
-static long pwc_default(struct file *file, void *fh, bool valid_prio,
- int cmd, void *arg)
+static int pwc_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
{
struct pwc_device *pdev = video_drvdata(file);
+ int compression = 0;
+ int ret, fps;
- return pwc_ioctl(pdev, cmd, arg);
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ parm->parm.capture.timeperframe.numerator == 0)
+ return -EINVAL;
+
+ if (pwc_test_n_set_capt_file(pdev, file))
+ return -EBUSY;
+
+ fps = parm->parm.capture.timeperframe.denominator /
+ parm->parm.capture.timeperframe.numerator;
+
+ mutex_lock(&pdev->udevlock);
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (pdev->iso_init) {
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+ fps, &compression, 0);
+
+ pwc_g_parm(file, fh, parm);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ return ret;
+}
+
+static int pwc_log_status(struct file *file, void *priv)
+{
+ struct pwc_device *pdev = video_drvdata(file);
+
+ v4l2_ctrl_handler_log_status(&pdev->ctrl_handler, PWC_NAME);
+ return 0;
}
const struct v4l2_ioctl_ops pwc_ioctl_ops = {
@@ -1112,8 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_log_status = pwc_log_status,
.vidioc_enum_framesizes = pwc_enum_framesizes,
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
- .vidioc_default = pwc_default,
+ .vidioc_g_parm = pwc_g_parm,
+ .vidioc_s_parm = pwc_s_parm,
};
-
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 0e4e2d7b7872..e4d4d711dd1f 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -35,14 +35,17 @@
#include <asm/errno.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include <linux/input.h>
#endif
-
-#include <media/pwc-ioctl.h>
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
/* Version block */
#define PWC_VERSION "10.0.15"
@@ -106,6 +109,9 @@
#define FEATURE_CODEC1 0x0002
#define FEATURE_CODEC2 0x0004
+#define MAX_WIDTH 640
+#define MAX_HEIGHT 480
+
/* Ignore errors in the first N frames, to allow for startup delays */
#define FRAME_LOWMARK 5
@@ -128,9 +134,6 @@
#define DEVICE_USE_CODEC3(x) ((x)>=700)
#define DEVICE_USE_CODEC23(x) ((x)>=675)
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR 0x0001
-
/* Request types: video */
#define SET_LUM_CTL 0x01
#define GET_LUM_CTL 0x02
@@ -186,6 +189,24 @@
#define PT_RESET_CONTROL_FORMATTER 0x02
#define PT_STATUS_FORMATTER 0x03
+/* Enumeration of image sizes */
+#define PSZ_SQCIF 0x00
+#define PSZ_QSIF 0x01
+#define PSZ_QCIF 0x02
+#define PSZ_SIF 0x03
+#define PSZ_CIF 0x04
+#define PSZ_VGA 0x05
+#define PSZ_MAX 6
+
+struct pwc_raw_frame {
+ __le16 type; /* type of the webcam */
+ __le16 vbandlength; /* Size of 4 lines compressed (used by the
+ decompressor) */
+ __u8 cmd[4]; /* the four byte of the command (in case of
+ nala, only the first 3 bytes is filled) */
+ __u8 rawframe[0]; /* frame_size = H / 4 * vbandlength */
+} __packed;
+
/* intermediate buffers with raw data from the USB cam */
struct pwc_frame_buf
{
@@ -198,39 +219,36 @@ struct pwc_frame_buf
struct pwc_device
{
struct video_device vdev;
- struct mutex modlock;
+ struct v4l2_device v4l2_dev;
/* Pointer to our usb_device, may be NULL after unplug */
struct usb_device *udev;
- /* Protects the setting of udev to NULL by our disconnect handler */
struct mutex udevlock;
/* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
int type;
int release; /* release number */
int features; /* feature bits */
- char serial[30]; /* serial number (string) */
/*** Video data ***/
struct file *capt_file; /* file doing video capture */
+ struct mutex capt_file_lock;
int vendpoint; /* video isoc endpoint */
int vcinterface; /* video control interface */
int valternate; /* alternate interface needed */
- int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
+ int vframes; /* frames-per-second */
int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or _PWCX */
int vframe_count; /* received frames */
int vmax_packet_size; /* USB maxpacket size */
int vlast_packet_size; /* for frame synchronisation */
int visoc_errors; /* number of contiguous ISOC errors */
- int vcompression; /* desired compression factor */
int vbandlength; /* compressed band length; 0 is uncompressed */
- char vsnapshot; /* snapshot mode */
char vsync; /* used by isoc handler */
char vmirror; /* for ToUCaM series */
char power_save; /* Do powersaving for this cam */
- int cmd_len;
unsigned char cmd_buf[13];
+ unsigned char *ctrl_buf;
struct urb *urbs[MAX_ISO_BUFS];
char iso_init;
@@ -253,7 +271,10 @@ struct pwc_device
int frame_total_size; /* including header & trailer */
int drop_frames;
- void *decompress_data; /* private data for decompression engine */
+ union { /* private data for decompression engine */
+ struct pwc_dec1_private dec1;
+ struct pwc_dec23_private dec23;
+ };
/*
* We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -262,21 +283,8 @@ struct pwc_device
* a gray or black border. view_min <= image <= view <= view_max;
*/
int image_mask; /* supported sizes */
- struct pwc_coord view_min, view_max; /* minimum and maximum view */
- struct pwc_coord abs_max; /* maximum supported size */
- struct pwc_coord image, view; /* image and viewport size */
- struct pwc_coord offset; /* offset of the viewport */
+ int width, height; /* current resolution */
- /*** motorized pan/tilt feature */
- struct pwc_mpt_range angle_range;
- int pan_angle; /* in degrees * 100 */
- int tilt_angle; /* absolute angle; 0,0 is home */
-
- /*
- * Set to 1 when the user push the button, reset to 0
- * when this value is read from sysfs.
- */
- int snapshot_button_status;
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
struct input_dev *button_dev; /* webcam snapshot button input */
char button_phys[64];
@@ -328,6 +336,8 @@ struct pwc_device
struct v4l2_ctrl *save_user;
struct v4l2_ctrl *restore_user;
struct v4l2_ctrl *restore_factory;
+ struct v4l2_ctrl *awb_speed;
+ struct v4l2_ctrl *awb_delay;
struct {
/* motor control cluster */
struct v4l2_ctrl *motor_pan;
@@ -344,19 +354,20 @@ struct pwc_device
extern int pwc_trace;
#endif
+int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file);
+
/** Functions in pwc-misc.c */
/* sizes in pixels */
-extern const struct pwc_coord pwc_image_sizes[PSZ_MAX];
+extern const int pwc_image_sizes[PSZ_MAX][2];
-int pwc_decode_size(struct pwc_device *pdev, int width, int height);
+int pwc_get_size(struct pwc_device *pdev, int width, int height);
void pwc_construct(struct pwc_device *pdev);
/** Functions in pwc-ctrl.c */
/* Request a certain video mode. Returns < 0 if not possible */
-extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot);
+extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
+ int pixfmt, int frames, int *compression, int send_to_cam);
extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
-extern int pwc_mpt_reset(struct pwc_device *pdev, int flags);
-extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt);
extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
extern int send_control_msg(struct pwc_device *pdev,
@@ -375,9 +386,6 @@ int pwc_init_controls(struct pwc_device *pdev);
/* Power down or up the camera; not supported by all models */
extern void pwc_camera_power(struct pwc_device *pdev, int power);
-/* Private ioctl()s; see pwc-ioctl.h */
-extern long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg);
-
extern const struct v4l2_ioctl_ops pwc_ioctl_ops;
/** pwc-uncompress.c */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 79fb22c89ae9..0bd7da26d018 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -1133,12 +1133,13 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
__raw_writel(cicr0, pcdev->base + CICR0);
}
-static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
+static int pxa_camera_set_bus_param(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
+ u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
unsigned long bus_flags, common_flags;
int ret;
struct pxa_cam *cam = icd->host_priv;
@@ -1851,19 +1852,7 @@ static struct platform_driver pxa_camera_driver = {
.remove = __devexit_p(pxa_camera_remove),
};
-
-static int __init pxa_camera_init(void)
-{
- return platform_driver_register(&pxa_camera_driver);
-}
-
-static void __exit pxa_camera_exit(void)
-{
- platform_driver_unregister(&pxa_camera_driver);
-}
-
-module_init(pxa_camera_init);
-module_exit(pxa_camera_exit);
+module_platform_driver(pxa_camera_driver);
MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index c8d91b0cd9bd..a9e9653beeb4 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -63,6 +63,8 @@ static int fimc_init_capture(struct fimc_dev *fimc)
fimc_hw_set_effect(ctx, false);
fimc_hw_set_output_path(ctx);
fimc_hw_set_out_dma(ctx);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
spin_unlock_irqrestore(&fimc->slock, flags);
@@ -98,6 +100,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
@@ -137,7 +143,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
return 0;
spin_lock(&ctx->slock);
@@ -150,7 +156,9 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
fimc_hw_set_rotation(ctx);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
fimc_hw_set_out_dma(ctx);
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
spin_unlock(&ctx->slock);
return ret;
@@ -164,7 +172,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
int min_bufs;
int ret;
- fimc_hw_reset(fimc);
vid_cap->frame_count = 0;
ret = fimc_init_capture(fimc);
@@ -523,7 +530,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
- if (fimc->id == 1 && var->pix_hoff)
+ if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
depth = fimc_get_format_depth(ffmt);
@@ -686,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
mf->code = 0;
continue;
}
- if (mf->width != tfmt->width || mf->width != tfmt->width) {
+ if (mf->width != tfmt->width || mf->height != tfmt->height) {
u32 fcc = ffmt->fourcc;
tfmt->width = mf->width;
tfmt->height = mf->height;
@@ -695,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
NULL, &fcc, FIMC_SD_PAD_SOURCE);
if (ffmt && ffmt->mbus_code)
mf->code = ffmt->mbus_code;
- if (mf->width != tfmt->width || mf->width != tfmt->width)
+ if (mf->width != tfmt->width ||
+ mf->height != tfmt->height)
continue;
tfmt->code = mf->code;
}
@@ -703,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
if (mf->code == tfmt->code &&
- mf->width == tfmt->width && mf->width == tfmt->width)
+ mf->width == tfmt->width && mf->height == tfmt->height)
break;
}
@@ -809,6 +817,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
FIMC_SD_PAD_SOURCE);
if (!ff->fmt)
return -EINVAL;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
/* Try to match format at the host and the sensor */
if (!fimc->vid_cap.user_subdev_api) {
mf->code = ff->fmt->mbus_code;
@@ -1232,6 +1244,9 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
*mf = fmt->format;
return 0;
}
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ffmt->color));
ff = fmt->pad == FIMC_SD_PAD_SINK ?
@@ -1239,6 +1254,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
set_frame_bounds(ff, mf->width, mf->height);
+ fimc->vid_cap.mf = *mf;
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
@@ -1375,7 +1391,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
media_entity_cleanup(&sd->entity);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- sd = NULL;
+ fimc->vid_cap.subdev = NULL;
}
/* Set default format at the sensor and host interface */
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 19ca6db38b2f..81bcbb9492ea 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
static struct fimc_fmt fimc_formats[] = {
{
.name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565X,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
.color = S5P_FIMC_RGB565,
.memplanes = 1,
@@ -52,13 +52,29 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "XRGB-8-8-8-8, 32 bpp",
+ .name = "ARGB8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = { 32 },
.color = S5P_FIMC_RGB888,
.memplanes = 1,
.colplanes = 1,
- .flags = FMT_FLAGS_M2M,
+ .flags = FMT_FLAGS_M2M | FMT_HAS_ALPHA,
+ }, {
+ .name = "ARGB1555",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .depth = { 16 },
+ .color = S5P_FIMC_RGB555,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
+ }, {
+ .name = "ARGB4444",
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .depth = { 16 },
+ .color = S5P_FIMC_RGB444,
+ .memplanes = 1,
+ .colplanes = 1,
+ .flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
}, {
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
@@ -171,6 +187,14 @@ static struct fimc_fmt fimc_formats[] = {
},
};
+static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+{
+ if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return FMT_FLAGS_M2M_IN;
+ else
+ return FMT_FLAGS_M2M_OUT;
+}
+
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
int dw, int dh, int rotation)
{
@@ -652,8 +676,11 @@ static void fimc_dma_run(void *priv)
if (ctx->state & (FIMC_DST_ADDR | FIMC_PARAMS))
fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr, -1);
- if (ctx->state & FIMC_PARAMS)
+ if (ctx->state & FIMC_PARAMS) {
fimc_hw_set_out_dma(ctx);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
+ }
fimc_activate_capture(ctx);
@@ -750,12 +777,11 @@ static struct vb2_ops fimc_qops = {
#define ctrl_to_ctx(__ctrl) \
container_of((__ctrl)->handler, struct fimc_ctx, ctrl_handler)
-static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
+static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
{
- struct fimc_ctx *ctx = ctrl_to_ctx(ctrl);
struct fimc_dev *fimc = ctx->fimc_dev;
struct samsung_fimc_variant *variant = fimc->variant;
- unsigned long flags;
+ unsigned int flags = FIMC_DST_FMT | FIMC_SRC_FMT;
int ret = 0;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
@@ -763,59 +789,77 @@ static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_HFLIP:
- spin_lock_irqsave(&ctx->slock, flags);
ctx->hflip = ctrl->val;
break;
case V4L2_CID_VFLIP:
- spin_lock_irqsave(&ctx->slock, flags);
ctx->vflip = ctrl->val;
break;
case V4L2_CID_ROTATE:
if (fimc_capture_pending(fimc) ||
- fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
+ (ctx->state & flags) == flags) {
ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
ctx->s_frame.height, ctx->d_frame.width,
ctx->d_frame.height, ctrl->val);
- }
- if (ret) {
- v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
- return -EINVAL;
+ if (ret)
+ return -EINVAL;
}
if ((ctrl->val == 90 || ctrl->val == 270) &&
!variant->has_out_rot)
return -EINVAL;
- spin_lock_irqsave(&ctx->slock, flags);
+
ctx->rotation = ctrl->val;
break;
- default:
- v4l2_err(fimc->v4l2_dev, "Invalid control: 0x%X\n", ctrl->id);
- return -EINVAL;
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->d_frame.alpha = ctrl->val;
+ break;
}
ctx->state |= FIMC_PARAMS;
set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- spin_unlock_irqrestore(&ctx->slock, flags);
return 0;
}
+static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ ret = __fimc_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ return ret;
+}
+
static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
.s_ctrl = fimc_s_ctrl,
};
int fimc_ctrls_create(struct fimc_ctx *ctx)
{
+ struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
+
if (ctx->ctrls_rdy)
return 0;
- v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_ROTATE, 0, 270, 90, 0);
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (variant->has_alpha)
+ ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
+ 0, max_alpha, 1, 0);
+ else
+ ctx->ctrl_alpha = NULL;
+
ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
return ctx->ctrl_handler.error;
@@ -826,11 +870,14 @@ void fimc_ctrls_delete(struct fimc_ctx *ctx)
if (ctx->ctrls_rdy) {
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
ctx->ctrls_rdy = false;
+ ctx->ctrl_alpha = NULL;
}
}
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
{
+ unsigned int has_alpha = ctx->d_frame.fmt->flags & FMT_HAS_ALPHA;
+
if (!ctx->ctrls_rdy)
return;
@@ -838,6 +885,8 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
v4l2_ctrl_activate(ctx->ctrl_rotate, active);
v4l2_ctrl_activate(ctx->ctrl_hflip, active);
v4l2_ctrl_activate(ctx->ctrl_vflip, active);
+ if (ctx->ctrl_alpha)
+ v4l2_ctrl_activate(ctx->ctrl_alpha, active && has_alpha);
if (active) {
ctx->rotation = ctx->ctrl_rotate->val;
@@ -851,6 +900,24 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
mutex_unlock(&ctx->ctrl_handler.lock);
}
+/* Update maximum value of the alpha color control */
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct v4l2_ctrl *ctrl = ctx->ctrl_alpha;
+
+ if (ctrl == NULL || !fimc->variant->has_alpha)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ ctrl->maximum = fimc_get_alpha_mask(ctx->d_frame.fmt);
+
+ if (ctrl->cur.val > ctrl->maximum)
+ ctrl->cur.val = ctrl->maximum;
+
+ v4l2_ctrl_unlock(ctrl);
+}
+
/*
* V4L2 ioctl handlers
*/
@@ -874,7 +941,8 @@ static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
{
struct fimc_fmt *fmt;
- fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_M2M, f->index);
+ fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
+ f->index);
if (!fmt)
return -EINVAL;
@@ -938,6 +1006,7 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
pix->colorspace = V4L2_COLORSPACE_JPEG;
pix->field = V4L2_FIELD_NONE;
pix->num_planes = fmt->memplanes;
+ pix->pixelformat = fmt->fourcc;
pix->height = height;
pix->width = width;
@@ -1017,7 +1086,8 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
dbg("w: %d, h: %d", pix->width, pix->height);
- fmt = fimc_find_format(&pix->pixelformat, NULL, FMT_FLAGS_M2M, 0);
+ fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
if (WARN(fmt == NULL, "Pixel format lookup failed"))
return -EINVAL;
@@ -1038,12 +1108,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
- if (fimc->id == 1 && variant->pix_hoff)
+ if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
- mod_y = mod_x;
+ mod_y = ffs(variant->min_vsize_align) - 1;
}
- dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1088,10 +1157,13 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
pix = &f->fmt.pix_mp;
frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
- FMT_FLAGS_M2M, 0);
+ get_m2m_fmt_flags(f->type), 0);
if (!frame->fmt)
return -EINVAL;
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
for (i = 0; i < frame->fmt->colplanes; i++) {
frame->payload[i] =
(pix->width * pix->height * frame->fmt->depth[i]) / 8;
@@ -1226,10 +1298,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
- if (fimc->id == 1 && fimc->variant->pix_hoff)
+ if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
- halign = ffs(min_size) - 1;
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
@@ -1375,6 +1447,12 @@ static int fimc_m2m_open(struct file *file)
if (!ctx)
return -ENOMEM;
v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
+ ctx->fimc_dev = fimc;
+
+ /* Default color format */
+ ctx->s_frame.fmt = &fimc_formats[0];
+ ctx->d_frame.fmt = &fimc_formats[0];
+
ret = fimc_ctrls_create(ctx);
if (ret)
goto error_fh;
@@ -1384,10 +1462,6 @@ static int fimc_m2m_open(struct file *file)
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
- ctx->fimc_dev = fimc;
- /* Default color format */
- ctx->s_frame.fmt = &fimc_formats[0];
- ctx->d_frame.fmt = &fimc_formats[0];
/* Setup the device context for memory-to-memory mode */
ctx->state = FIMC_CTX_M2M;
ctx->flags = 0;
@@ -1615,7 +1689,6 @@ static int fimc_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
fimc->pdata = pdata;
- set_bit(ST_LPM, &fimc->state);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
@@ -1707,15 +1780,12 @@ static int fimc_runtime_resume(struct device *dev)
/* Enable clocks and perform basic initalization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
/* Resume the capture or mem-to-mem device */
if (fimc_capture_busy(fimc))
return fimc_capture_resume(fimc);
- else if (fimc_m2m_pending(fimc))
- return fimc_m2m_resume(fimc);
- return 0;
+
+ return fimc_m2m_resume(fimc);
}
static int fimc_runtime_suspend(struct device *dev)
@@ -1750,8 +1820,6 @@ static int fimc_resume(struct device *dev)
return 0;
}
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
spin_unlock_irqrestore(&fimc->slock, flags);
if (fimc_capture_busy(fimc))
@@ -1780,7 +1848,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- fimc_runtime_suspend(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1907,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[0],
};
@@ -1849,6 +1917,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1861,6 +1930,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1874,6 +1944,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
+ .min_vsize_align = 1,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1884,6 +1955,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1895,9 +1967,11 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.has_cam_if = 1,
.has_cistatus2 = 1,
.has_mainscaler_ext = 1,
+ .has_alpha = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1907,9 +1981,11 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
.has_cam_if = 1,
.has_cistatus2 = 1,
.has_mainscaler_ext = 1,
+ .has_alpha = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[3],
};
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a6936dad5b10..4e20560c73d4 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -85,7 +85,9 @@ enum fimc_datapath {
};
enum fimc_color_fmt {
- S5P_FIMC_RGB565 = 0x10,
+ S5P_FIMC_RGB444 = 0x10,
+ S5P_FIMC_RGB555,
+ S5P_FIMC_RGB565,
S5P_FIMC_RGB666,
S5P_FIMC_RGB888,
S5P_FIMC_RGB30_LOCAL,
@@ -160,8 +162,11 @@ struct fimc_fmt {
u16 colplanes;
u8 depth[VIDEO_MAX_PLANES];
u16 flags;
-#define FMT_FLAGS_CAM (1 << 0)
-#define FMT_FLAGS_M2M (1 << 1)
+#define FMT_FLAGS_CAM (1 << 0)
+#define FMT_FLAGS_M2M_IN (1 << 1)
+#define FMT_FLAGS_M2M_OUT (1 << 2)
+#define FMT_FLAGS_M2M (1 << 1 | 1 << 2)
+#define FMT_HAS_ALPHA (1 << 3)
};
/**
@@ -283,6 +288,7 @@ struct fimc_frame {
struct fimc_addr paddr;
struct fimc_dma_offset dma_offset;
struct fimc_fmt *fmt;
+ u8 alpha;
};
/**
@@ -377,6 +383,7 @@ struct fimc_pix_limit {
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
* @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
@@ -386,10 +393,12 @@ struct samsung_fimc_variant {
unsigned int has_cistatus2:1;
unsigned int has_mainscaler_ext:1;
unsigned int has_cam_if:1;
+ unsigned int has_alpha:1;
struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
+ u16 min_vsize_align;
u16 out_buf_count;
};
@@ -480,7 +489,8 @@ struct fimc_dev {
* @ctrl_handler: v4l2 controls handler
* @ctrl_rotate image rotation control
* @ctrl_hflip horizontal flip control
- * @ctrl_vflip vartical flip control
+ * @ctrl_vflip vertical flip control
+ * @ctrl_alpha RGB alpha control
* @ctrls_rdy: true if the control handler is initialized
*/
struct fimc_ctx {
@@ -507,6 +517,7 @@ struct fimc_ctx {
struct v4l2_ctrl *ctrl_rotate;
struct v4l2_ctrl *ctrl_hflip;
struct v4l2_ctrl *ctrl_vflip;
+ struct v4l2_ctrl *ctrl_alpha;
bool ctrls_rdy;
};
@@ -576,6 +587,17 @@ static inline int tiled_fmt(struct fimc_fmt *fmt)
return fmt->fourcc == V4L2_PIX_FMT_NV12MT;
}
+/* Return the alpha component bit mask */
+static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
+{
+ switch (fmt->color) {
+ case S5P_FIMC_RGB444: return 0x0f;
+ case S5P_FIMC_RGB555: return 0x01;
+ case S5P_FIMC_RGB888: return 0xff;
+ default: return 0;
+ };
+}
+
static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
{
u32 cfg = readl(dev->regs + S5P_CIGCTRL);
@@ -672,6 +694,7 @@ void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
void fimc_hw_en_capture(struct fimc_ctx *ctx);
void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active);
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
void fimc_hw_set_input_path(struct fimc_ctx *ctx);
void fimc_hw_set_output_path(struct fimc_ctx *ctx);
@@ -693,6 +716,7 @@ int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
int fimc_ctrls_create(struct fimc_ctx *ctx);
void fimc_ctrls_delete(struct fimc_ctx *ctx);
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active);
+void fimc_alpha_ctrl_update(struct fimc_ctx *ctx);
int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index cc337b1de913..8ea4ee116e46 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -21,7 +21,6 @@
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/v4l2-ctrls.h>
#include <media/media-device.h>
@@ -220,6 +219,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
+ i2c_put_adapter(adapter);
v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
return NULL;
}
@@ -234,12 +234,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter;
if (!client)
return;
v4l2_device_unregister_subdev(sd);
+ adapter = client->adapter;
i2c_unregister_device(client);
- i2c_put_adapter(client->adapter);
+ if (adapter)
+ i2c_put_adapter(adapter);
}
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +384,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
static int fimc_md_register_video_nodes(struct fimc_md *fmd)
{
+ struct video_device *vdev;
int i, ret = 0;
for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
if (!fmd->fimc[i])
continue;
- if (fmd->fimc[i]->m2m.vfd)
- ret = video_register_device(fmd->fimc[i]->m2m.vfd,
- VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- if (fmd->fimc[i]->vid_cap.vfd)
- ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
- VFL_TYPE_GRABBER, -1);
+ vdev = fmd->fimc[i]->m2m.vfd;
+ if (vdev) {
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ break;
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+ }
+
+ vdev = fmd->fimc[i]->vid_cap.vfd;
+ if (vdev == NULL)
+ continue;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
}
return ret;
@@ -502,7 +513,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (WARN(csis == NULL,
"MIPI-CSI interface specified "
"but s5p-csis module is not loaded!\n"))
- continue;
+ return -EINVAL;
ret = media_entity_create_link(&sensor->entity, 0,
&csis->entity, CSIS_PAD_SINK,
@@ -742,9 +753,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
struct fimc_md *fmd;
int ret;
- if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
- return -EINVAL;
-
fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
if (!fmd)
return -ENOMEM;
@@ -782,9 +790,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = fimc_md_register_sensor_entities(fmd);
- if (ret)
- goto err3;
+ if (pdev->dev.platform_data) {
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err3;
+ }
ret = fimc_md_create_links(fmd);
if (ret)
goto err3;
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 20e664e34163..15466d0529c1 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
+
+ if (dev->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
}
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -114,7 +117,7 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
S5P_CITRGFMT_VSIZE_MASK);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB565...S5P_FIMC_RGB888:
+ case S5P_FIMC_RGB444...S5P_FIMC_RGB888:
cfg |= S5P_CITRGFMT_RGB;
break;
case S5P_FIMC_YCBCR420:
@@ -172,6 +175,7 @@ void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->d_frame;
struct fimc_dma_offset *offset = &frame->dma_offset;
+ struct fimc_fmt *fmt = frame->fmt;
/* Set the input dma offsets. */
cfg = 0;
@@ -195,15 +199,22 @@ void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
cfg = readl(dev->regs + S5P_CIOCTRL);
cfg &= ~(S5P_CIOCTRL_ORDER2P_MASK | S5P_CIOCTRL_ORDER422_MASK |
- S5P_CIOCTRL_YCBCR_PLANE_MASK);
+ S5P_CIOCTRL_YCBCR_PLANE_MASK | S5P_CIOCTRL_RGB16FMT_MASK);
- if (frame->fmt->colplanes == 1)
+ if (fmt->colplanes == 1)
cfg |= ctx->out_order_1p;
- else if (frame->fmt->colplanes == 2)
+ else if (fmt->colplanes == 2)
cfg |= ctx->out_order_2p | S5P_CIOCTRL_YCBCR_2PLANE;
- else if (frame->fmt->colplanes == 3)
+ else if (fmt->colplanes == 3)
cfg |= S5P_CIOCTRL_YCBCR_3PLANE;
+ if (fmt->color == S5P_FIMC_RGB565)
+ cfg |= S5P_CIOCTRL_RGB565;
+ else if (fmt->color == S5P_FIMC_RGB555)
+ cfg |= S5P_CIOCTRL_ARGB1555;
+ else if (fmt->color == S5P_FIMC_RGB444)
+ cfg |= S5P_CIOCTRL_ARGB4444;
+
writel(cfg, dev->regs + S5P_CIOCTRL);
}
@@ -251,7 +262,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = 0;
+
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+ cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+ S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+ S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+ S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+ S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -268,22 +286,28 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
if (sc->copy_mode)
cfg |= S5P_CISCCTRL_ONE2ONE;
-
if (ctx->in_path == FIMC_DMA) {
- if (src_frame->fmt->color == S5P_FIMC_RGB565)
+ switch (src_frame->fmt->color) {
+ case S5P_FIMC_RGB565:
cfg |= S5P_CISCCTRL_INRGB_FMT_RGB565;
- else if (src_frame->fmt->color == S5P_FIMC_RGB666)
+ break;
+ case S5P_FIMC_RGB666:
cfg |= S5P_CISCCTRL_INRGB_FMT_RGB666;
- else if (src_frame->fmt->color == S5P_FIMC_RGB888)
+ break;
+ case S5P_FIMC_RGB888:
cfg |= S5P_CISCCTRL_INRGB_FMT_RGB888;
+ break;
+ }
}
if (ctx->out_path == FIMC_DMA) {
- if (dst_frame->fmt->color == S5P_FIMC_RGB565)
+ u32 color = dst_frame->fmt->color;
+
+ if (color >= S5P_FIMC_RGB444 && color <= S5P_FIMC_RGB565)
cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB565;
- else if (dst_frame->fmt->color == S5P_FIMC_RGB666)
+ else if (color == S5P_FIMC_RGB666)
cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB666;
- else if (dst_frame->fmt->color == S5P_FIMC_RGB888)
+ else if (color == S5P_FIMC_RGB888)
cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
} else {
cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
@@ -308,9 +332,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
fimc_hw_set_scaler(ctx);
cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +347,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CIEXTEN);
} else {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -370,6 +393,21 @@ void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active)
writel(cfg, dev->regs + S5P_CIIMGEFF);
}
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *dev = ctx->fimc_dev;
+ struct fimc_frame *frame = &ctx->d_frame;
+ u32 cfg;
+
+ if (!(frame->fmt->flags & FMT_HAS_ALPHA))
+ return;
+
+ cfg = readl(dev->regs + S5P_CIOCTRL);
+ cfg &= ~S5P_CIOCTRL_ALPHA_OUT_MASK;
+ cfg |= (frame->alpha << 4);
+ writel(cfg, dev->regs + S5P_CIOCTRL);
+}
+
static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
diff --git a/drivers/media/video/s5p-fimc/mipi-csis.c b/drivers/media/video/s5p-fimc/mipi-csis.c
index 59d79bc2f58a..130335cf62fd 100644
--- a/drivers/media/video/s5p-fimc/mipi-csis.c
+++ b/drivers/media/video/s5p-fimc/mipi-csis.c
@@ -427,6 +427,23 @@ static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return 0;
}
+static int s5pcsis_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
+
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->code = s5pcsis_formats[0].code;
+ format->width = S5PCSIS_DEF_PIX_WIDTH;
+ format->height = S5PCSIS_DEF_PIX_HEIGHT;
+ format->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops s5pcsis_sd_internal_ops = {
+ .open = s5pcsis_open,
+};
+
static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
.s_power = s5pcsis_s_power,
};
@@ -544,8 +561,13 @@ static int __devinit s5pcsis_probe(struct platform_device *pdev)
v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
state->sd.owner = THIS_MODULE;
strlcpy(state->sd.name, dev_name(&pdev->dev), sizeof(state->sd.name));
+ state->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
state->csis_fmt = &s5pcsis_formats[0];
+ state->format.code = s5pcsis_formats[0].code;
+ state->format.width = S5PCSIS_DEF_PIX_WIDTH;
+ state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
+
state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_init(&state->sd.entity,
diff --git a/drivers/media/video/s5p-fimc/mipi-csis.h b/drivers/media/video/s5p-fimc/mipi-csis.h
index f5691336dd5c..2709286396e1 100644
--- a/drivers/media/video/s5p-fimc/mipi-csis.h
+++ b/drivers/media/video/s5p-fimc/mipi-csis.h
@@ -19,4 +19,7 @@
#define CSIS_PAD_SOURCE 1
#define CSIS_PADS_NUM 2
+#define S5PCSIS_DEF_PIX_WIDTH 640
+#define S5PCSIS_DEF_PIX_HEIGHT 480
+
#endif
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
index c8e3b94bd91d..c7a5bc51d571 100644
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ b/drivers/media/video/s5p-fimc/regs-fimc.h
@@ -107,6 +107,11 @@
#define S5P_CIOCTRL_YCBCR_3PLANE (0 << 3)
#define S5P_CIOCTRL_YCBCR_2PLANE (1 << 3)
#define S5P_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define S5P_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
+#define S5P_CIOCTRL_RGB16FMT_MASK (3 << 16)
+#define S5P_CIOCTRL_RGB565 (0 << 16)
+#define S5P_CIOCTRL_ARGB1555 (1 << 16)
+#define S5P_CIOCTRL_ARGB4444 (2 << 16)
#define S5P_CIOCTRL_ORDER2P_SHIFT (24)
#define S5P_CIOCTRL_ORDER2P_MASK (3 << 24)
#define S5P_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
diff --git a/drivers/media/video/s5p-g2d/Makefile b/drivers/media/video/s5p-g2d/Makefile
new file mode 100644
index 000000000000..2c48c416a804
--- /dev/null
+++ b/drivers/media/video/s5p-g2d/Makefile
@@ -0,0 +1,3 @@
+s5p-g2d-objs := g2d.o g2d-hw.o
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d.o
diff --git a/drivers/media/video/s5p-g2d/g2d-hw.c b/drivers/media/video/s5p-g2d/g2d-hw.c
new file mode 100644
index 000000000000..39937cf03c88
--- /dev/null
+++ b/drivers/media/video/s5p-g2d/g2d-hw.c
@@ -0,0 +1,104 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/io.h>
+
+#include "g2d.h"
+#include "g2d-regs.h"
+
+#define w(x, a) writel((x), d->regs + (a))
+#define r(a) readl(d->regs + (a))
+
+/* g2d_reset clears all g2d registers */
+void g2d_reset(struct g2d_dev *d)
+{
+ w(1, SOFT_RESET_REG);
+}
+
+void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
+{
+ u32 n;
+
+ w(f->stride & 0xFFFF, SRC_STRIDE_REG);
+
+ n = f->o_height & 0xFFF;
+ n <<= 16;
+ n |= f->o_width & 0xFFF;
+ w(n, SRC_LEFT_TOP_REG);
+
+ n = f->bottom & 0xFFF;
+ n <<= 16;
+ n |= f->right & 0xFFF;
+ w(n, SRC_RIGHT_BOTTOM_REG);
+
+ w(f->fmt->hw, SRC_COLOR_MODE_REG);
+}
+
+void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a)
+{
+ w(a, SRC_BASE_ADDR_REG);
+}
+
+void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
+{
+ u32 n;
+
+ w(f->stride & 0xFFFF, DST_STRIDE_REG);
+
+ n = f->o_height & 0xFFF;
+ n <<= 16;
+ n |= f->o_width & 0xFFF;
+ w(n, DST_LEFT_TOP_REG);
+
+ n = f->bottom & 0xFFF;
+ n <<= 16;
+ n |= f->right & 0xFFF;
+ w(n, DST_RIGHT_BOTTOM_REG);
+
+ w(f->fmt->hw, DST_COLOR_MODE_REG);
+}
+
+void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a)
+{
+ w(a, DST_BASE_ADDR_REG);
+}
+
+void g2d_set_rop4(struct g2d_dev *d, u32 r)
+{
+ w(r, ROP4_REG);
+}
+
+u32 g2d_cmd_stretch(u32 e)
+{
+ e &= 1;
+ return e << 4;
+}
+
+void g2d_set_cmd(struct g2d_dev *d, u32 c)
+{
+ w(c, BITBLT_COMMAND_REG);
+}
+
+void g2d_start(struct g2d_dev *d)
+{
+ /* Clear cache */
+ w(0x7, CACHECTL_REG);
+ /* Enable interrupt */
+ w(1, INTEN_REG);
+ /* Start G2D engine */
+ w(1, BITBLT_START_REG);
+}
+
+void g2d_clear_int(struct g2d_dev *d)
+{
+ w(1, INTC_PEND_REG);
+}
diff --git a/drivers/media/video/s5p-g2d/g2d-regs.h b/drivers/media/video/s5p-g2d/g2d-regs.h
new file mode 100644
index 000000000000..02e1cf50da4e
--- /dev/null
+++ b/drivers/media/video/s5p-g2d/g2d-regs.h
@@ -0,0 +1,115 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+/* General Registers */
+#define SOFT_RESET_REG 0x0000 /* Software reset reg */
+#define INTEN_REG 0x0004 /* Interrupt Enable reg */
+#define INTC_PEND_REG 0x000C /* Interrupt Control Pending reg */
+#define FIFO_STAT_REG 0x0010 /* Command FIFO Status reg */
+#define AXI_ID_MODE_REG 0x0014 /* AXI Read ID Mode reg */
+#define CACHECTL_REG 0x0018 /* Cache & Buffer clear reg */
+#define AXI_MODE_REG 0x001C /* AXI Mode reg */
+
+/* Command Registers */
+#define BITBLT_START_REG 0x0100 /* BitBLT Start reg */
+#define BITBLT_COMMAND_REG 0x0104 /* Command reg for BitBLT */
+
+/* Parameter Setting Registers (Rotate & Direction) */
+#define ROTATE_REG 0x0200 /* Rotation reg */
+#define SRC_MSK_DIRECT_REG 0x0204 /* Src and Mask Direction reg */
+#define DST_PAT_DIRECT_REG 0x0208 /* Dest and Pattern Direction reg */
+
+/* Parameter Setting Registers (Src) */
+#define SRC_SELECT_REG 0x0300 /* Src Image Selection reg */
+#define SRC_BASE_ADDR_REG 0x0304 /* Src Image Base Address reg */
+#define SRC_STRIDE_REG 0x0308 /* Src Stride reg */
+#define SRC_COLOR_MODE_REG 0x030C /* Src Image Color Mode reg */
+#define SRC_LEFT_TOP_REG 0x0310 /* Src Left Top Coordinate reg */
+#define SRC_RIGHT_BOTTOM_REG 0x0314 /* Src Right Bottom Coordinate reg */
+
+/* Parameter Setting Registers (Dest) */
+#define DST_SELECT_REG 0x0400 /* Dest Image Selection reg */
+#define DST_BASE_ADDR_REG 0x0404 /* Dest Image Base Address reg */
+#define DST_STRIDE_REG 0x0408 /* Dest Stride reg */
+#define DST_COLOR_MODE_REG 0x040C /* Dest Image Color Mode reg */
+#define DST_LEFT_TOP_REG 0x0410 /* Dest Left Top Coordinate reg */
+#define DST_RIGHT_BOTTOM_REG 0x0414 /* Dest Right Bottom Coordinate reg */
+
+/* Parameter Setting Registers (Pattern) */
+#define PAT_BASE_ADDR_REG 0x0500 /* Pattern Image Base Address reg */
+#define PAT_SIZE_REG 0x0504 /* Pattern Image Size reg */
+#define PAT_COLOR_MODE_REG 0x0508 /* Pattern Image Color Mode reg */
+#define PAT_OFFSET_REG 0x050C /* Pattern Left Top Coordinate reg */
+#define PAT_STRIDE_REG 0x0510 /* Pattern Stride reg */
+
+/* Parameter Setting Registers (Mask) */
+#define MASK_BASE_ADDR_REG 0x0520 /* Mask Base Address reg */
+#define MASK_STRIDE_REG 0x0524 /* Mask Stride reg */
+
+/* Parameter Setting Registers (Clipping Window) */
+#define CW_LT_REG 0x0600 /* LeftTop coordinates of Clip Window */
+#define CW_RB_REG 0x0604 /* RightBottom coordinates of Clip
+ Window */
+
+/* Parameter Setting Registers (ROP & Alpha Setting) */
+#define THIRD_OPERAND_REG 0x0610 /* Third Operand Selection reg */
+#define ROP4_REG 0x0614 /* Raster Operation reg */
+#define ALPHA_REG 0x0618 /* Alpha value, Fading offset value */
+
+/* Parameter Setting Registers (Color) */
+#define FG_COLOR_REG 0x0700 /* Foreground Color reg */
+#define BG_COLOR_REG 0x0704 /* Background Color reg */
+#define BS_COLOR_REG 0x0708 /* Blue Screen Color reg */
+
+/* Parameter Setting Registers (Color Key) */
+#define SRC_COLORKEY_CTRL_REG 0x0710 /* Src Colorkey control reg */
+#define SRC_COLORKEY_DR_MIN_REG 0x0714 /* Src Colorkey Decision Reference
+ Min reg */
+#define SRC_COLORKEY_DR_MAX_REG 0x0718 /* Src Colorkey Decision Reference
+ Max reg */
+#define DST_COLORKEY_CTRL_REG 0x071C /* Dest Colorkey control reg */
+#define DST_COLORKEY_DR_MIN_REG 0x0720 /* Dest Colorkey Decision Reference
+ Min reg */
+#define DST_COLORKEY_DR_MAX_REG 0x0724 /* Dest Colorkey Decision Reference
+ Max reg */
+
+/* Color mode values */
+
+#define ORDER_XRGB 0
+#define ORDER_RGBX 1
+#define ORDER_XBGR 2
+#define ORDER_BGRX 3
+
+#define MODE_XRGB_8888 0
+#define MODE_ARGB_8888 1
+#define MODE_RGB_565 2
+#define MODE_XRGB_1555 3
+#define MODE_ARGB_1555 4
+#define MODE_XRGB_4444 5
+#define MODE_ARGB_4444 6
+#define MODE_PACKED_RGB_888 7
+
+#define COLOR_MODE(o, m) (((o) << 4) | (m))
+
+/* ROP4 operation values */
+#define ROP4_COPY 0xCCCC
+#define ROP4_INVERT 0x3333
+
+/* Hardware limits */
+#define MAX_WIDTH 8000
+#define MAX_HEIGHT 8000
+
+#define G2D_TIMEOUT 500
+
+#define DEFAULT_WIDTH 100
+#define DEFAULT_HEIGHT 100
+
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
new file mode 100644
index 000000000000..febaa673d363
--- /dev/null
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -0,0 +1,811 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "g2d.h"
+#include "g2d-regs.h"
+
+#define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh)
+
+static struct g2d_fmt formats[] = {
+ {
+ .name = "XRGB_8888",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_8888),
+ },
+ {
+ .name = "RGB_565",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_RGB_565),
+ },
+ {
+ .name = "XRGB_1555",
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .depth = 16,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_1555),
+ },
+ {
+ .name = "XRGB_4444",
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .depth = 16,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_4444),
+ },
+ {
+ .name = "PACKED_RGB_888",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = 24,
+ .hw = COLOR_MODE(ORDER_XRGB, MODE_PACKED_RGB_888),
+ },
+};
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+struct g2d_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .c_width = DEFAULT_WIDTH,
+ .c_height = DEFAULT_HEIGHT,
+ .o_width = 0,
+ .o_height = 0,
+ .fmt = &formats[0],
+ .right = DEFAULT_WIDTH,
+ .bottom = DEFAULT_HEIGHT,
+};
+
+struct g2d_fmt *find_fmt(struct v4l2_format *f)
+{
+ unsigned int i;
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+
+static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->in;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->out;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int g2d_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct g2d_ctx *ctx = vb2_get_drv_priv(vq);
+ struct g2d_frame *f = get_frame(ctx, vq->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+ alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+ if (*nbuffers == 0)
+ *nbuffers = 1;
+
+ return 0;
+}
+
+static int g2d_buf_prepare(struct vb2_buffer *vb)
+{
+ struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct g2d_frame *f = get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ vb2_set_plane_payload(vb, 0, f->size);
+ return 0;
+}
+
+static void g2d_buf_queue(struct vb2_buffer *vb)
+{
+ struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+
+static struct vb2_ops g2d_qops = {
+ .queue_setup = g2d_queue_setup,
+ .buf_prepare = g2d_buf_prepare,
+ .buf_queue = g2d_buf_queue,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct g2d_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &g2d_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &g2d_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct g2d_ctx *ctx = container_of(ctrl->handler, struct g2d_ctx,
+ ctrl_handler);
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ if (ctrl->val == V4L2_COLORFX_NEGATIVE)
+ ctx->rop = ROP4_INVERT;
+ else
+ ctx->rop = ROP4_COPY;
+ break;
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops g2d_ctrl_ops = {
+ .s_ctrl = g2d_s_ctrl,
+};
+
+int g2d_setup_ctrls(struct g2d_ctx *ctx)
+{
+ struct g2d_dev *dev = ctx->dev;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
+ if (ctx->ctrl_handler.error) {
+ v4l2_err(&dev->v4l2_dev, "v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+
+ v4l2_ctrl_new_std_menu(
+ &ctx->ctrl_handler,
+ &g2d_ctrl_ops,
+ V4L2_CID_COLORFX,
+ V4L2_COLORFX_NEGATIVE,
+ ~((1 << V4L2_COLORFX_NONE) | (1 << V4L2_COLORFX_NEGATIVE)),
+ V4L2_COLORFX_NONE);
+
+ if (ctx->ctrl_handler.error) {
+ v4l2_err(&dev->v4l2_dev, "v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+
+ return 0;
+}
+
+static int g2d_open(struct file *file)
+{
+ struct g2d_dev *dev = video_drvdata(file);
+ struct g2d_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ /* Set default formats */
+ ctx->in = def_frame;
+ ctx->out = def_frame;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ kfree(ctx);
+ return ret;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ g2d_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ v4l2_info(&dev->v4l2_dev, "instance opened\n");
+ return 0;
+}
+
+static int g2d_release(struct file *file)
+{
+ struct g2d_dev *dev = video_drvdata(file);
+ struct g2d_ctx *ctx = fh2ctx(file->private_data);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ v4l2_info(&dev->v4l2_dev, "instance closed\n");
+ return 0;
+}
+
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, G2D_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ struct g2d_fmt *fmt;
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+ fmt = &formats[f->index];
+ f->pixelformat = fmt->fourcc;
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct g2d_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct g2d_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ frm = get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = (frm->width * frm->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = frm->size;
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct g2d_fmt *fmt;
+ enum v4l2_field *field;
+
+ fmt = find_fmt(f);
+ if (!fmt)
+ return -EINVAL;
+
+ field = &f->fmt.pix.field;
+ if (*field == V4L2_FIELD_ANY)
+ *field = V4L2_FIELD_NONE;
+ else if (*field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < 1)
+ f->fmt.pix.width = 1;
+ if (f->fmt.pix.height < 1)
+ f->fmt.pix.height = 1;
+
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ struct vb2_queue *vq;
+ struct g2d_frame *frm;
+ struct g2d_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format. */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
+ return -EBUSY;
+ }
+ frm = get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+ fmt = find_fmt(f);
+ if (!fmt)
+ return -EINVAL;
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ /* Reset crop settings */
+ frm->o_width = 0;
+ frm->o_height = 0;
+ frm->c_width = frm->width;
+ frm->c_height = frm->height;
+ frm->right = frm->width;
+ frm->bottom = frm->height;
+ frm->fmt = fmt;
+ frm->stride = f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static unsigned int g2d_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct g2d_ctx *ctx = fh2ctx(file->private_data);
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int g2d_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct g2d_ctx *ctx = fh2ctx(file->private_data);
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct g2d_ctx *ctx = priv;
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct g2d_ctx *ctx = priv;
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct g2d_ctx *ctx = priv;
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct g2d_ctx *ctx = priv;
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct g2d_ctx *ctx = priv;
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct g2d_ctx *ctx = priv;
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cr)
+{
+ struct g2d_ctx *ctx = priv;
+ struct g2d_frame *f;
+
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = f->width;
+ cr->bounds.height = f->height;
+ cr->defrect = cr->bounds;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_frame *f;
+
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ cr->c.left = f->o_height;
+ cr->c.top = f->o_width;
+ cr->c.width = f->c_width;
+ cr->c.height = f->c_height;
+ return 0;
+}
+
+static int vidioc_try_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ struct g2d_frame *f;
+
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_frame *f;
+ int ret;
+
+ ret = vidioc_try_crop(file, prv, cr);
+ if (ret)
+ return ret;
+ f = get_frame(ctx, cr->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ f->c_width = cr->c.width;
+ f->c_height = cr->c.height;
+ f->o_width = cr->c.left;
+ f->o_height = cr->c.top;
+ f->bottom = f->o_height + f->c_height;
+ f->right = f->o_width + f->c_width;
+ return 0;
+}
+
+static void g2d_lock(void *prv)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ mutex_lock(&dev->mutex);
+}
+
+static void g2d_unlock(void *prv)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ mutex_unlock(&dev->mutex);
+}
+
+static void job_abort(void *prv)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ int ret;
+
+ if (dev->curr == 0) /* No job currently running */
+ return;
+
+ ret = wait_event_timeout(dev->irq_queue,
+ dev->curr == 0,
+ msecs_to_jiffies(G2D_TIMEOUT));
+}
+
+static void device_run(void *prv)
+{
+ struct g2d_ctx *ctx = prv;
+ struct g2d_dev *dev = ctx->dev;
+ struct vb2_buffer *src, *dst;
+ u32 cmd = 0;
+
+ dev->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ clk_enable(dev->gate);
+ g2d_reset(dev);
+
+ g2d_set_src_size(dev, &ctx->in);
+ g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
+
+ g2d_set_dst_size(dev, &ctx->out);
+ g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
+
+ g2d_set_rop4(dev, ctx->rop);
+ if (ctx->in.c_width != ctx->out.c_width ||
+ ctx->in.c_height != ctx->out.c_height)
+ cmd |= g2d_cmd_stretch(1);
+ g2d_set_cmd(dev, cmd);
+ g2d_start(dev);
+}
+
+static irqreturn_t g2d_isr(int irq, void *prv)
+{
+ struct g2d_dev *dev = prv;
+ struct g2d_ctx *ctx = dev->curr;
+ struct vb2_buffer *src, *dst;
+
+ g2d_clear_int(dev);
+ clk_disable(dev->gate);
+
+ BUG_ON(ctx == 0);
+
+ src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ BUG_ON(src == 0);
+ BUG_ON(dst == 0);
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+
+ dev->curr = 0;
+ wake_up(&dev->irq_queue);
+ return IRQ_HANDLED;
+}
+
+static const struct v4l2_file_operations g2d_fops = {
+ .owner = THIS_MODULE,
+ .open = g2d_open,
+ .release = g2d_release,
+ .poll = g2d_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = g2d_mmap,
+};
+
+static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_cropcap = vidioc_cropcap,
+};
+
+static struct video_device g2d_videodev = {
+ .name = G2D_NAME,
+ .fops = &g2d_fops,
+ .ioctl_ops = &g2d_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+};
+
+static struct v4l2_m2m_ops g2d_m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+ .lock = g2d_lock,
+ .unlock = g2d_unlock,
+};
+
+static int g2d_probe(struct platform_device *pdev)
+{
+ struct g2d_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ spin_lock_init(&dev->irqlock);
+ mutex_init(&dev->mutex);
+ atomic_set(&dev->num_inst, 0);
+ init_waitqueue_head(&dev->irq_queue);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find registers\n");
+ ret = -ENOENT;
+ goto free_dev;
+ }
+
+ dev->res_regs = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+
+ if (!dev->res_regs) {
+ dev_err(&pdev->dev, "failed to obtain register region\n");
+ ret = -ENOENT;
+ goto free_dev;
+ }
+
+ dev->regs = ioremap(res->start, resource_size(res));
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "failed to map registers\n");
+ ret = -ENOENT;
+ goto rel_res_regs;
+ }
+
+ dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
+ if (IS_ERR_OR_NULL(dev->clk)) {
+ dev_err(&pdev->dev, "failed to get g2d clock\n");
+ ret = -ENXIO;
+ goto unmap_regs;
+ }
+
+ dev->gate = clk_get(&pdev->dev, "fimg2d");
+ if (IS_ERR_OR_NULL(dev->gate)) {
+ dev_err(&pdev->dev, "failed to get g2d clock gate\n");
+ ret = -ENXIO;
+ goto put_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find IRQ\n");
+ ret = -ENXIO;
+ goto put_clk_gate;
+ }
+
+ dev->irq = res->start;
+
+ ret = request_irq(dev->irq, g2d_isr, 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install IRQ\n");
+ goto put_clk_gate;
+ }
+
+ dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ ret = PTR_ERR(dev->alloc_ctx);
+ goto rel_irq;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto alloc_ctx_cleanup;
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+ *vfd = g2d_videodev;
+ vfd->lock = &dev->mutex;
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
+ vfd->num);
+ platform_set_drvdata(pdev, dev);
+ dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto unreg_video_dev;
+ }
+
+ def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+
+ return 0;
+
+unreg_video_dev:
+ video_unregister_device(dev->vfd);
+rel_vdev:
+ video_device_release(vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+alloc_ctx_cleanup:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+rel_irq:
+ free_irq(dev->irq, dev);
+put_clk_gate:
+ clk_put(dev->gate);
+put_clk:
+ clk_put(dev->clk);
+unmap_regs:
+ iounmap(dev->regs);
+rel_res_regs:
+ release_resource(dev->res_regs);
+free_dev:
+ kfree(dev);
+ return ret;
+}
+
+static int g2d_remove(struct platform_device *pdev)
+{
+ struct g2d_dev *dev = (struct g2d_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+ free_irq(dev->irq, dev);
+ clk_put(dev->gate);
+ clk_put(dev->clk);
+ iounmap(dev->regs);
+ release_resource(dev->res_regs);
+ kfree(dev);
+ return 0;
+}
+
+static struct platform_driver g2d_pdrv = {
+ .probe = g2d_probe,
+ .remove = g2d_remove,
+ .driver = {
+ .name = G2D_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(g2d_pdrv);
+
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("S5P G2D 2d graphics accelerator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-g2d/g2d.h b/drivers/media/video/s5p-g2d/g2d.h
new file mode 100644
index 000000000000..5eae90107bf8
--- /dev/null
+++ b/drivers/media/video/s5p-g2d/g2d.h
@@ -0,0 +1,83 @@
+/*
+ * Samsung S5P G2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#define G2D_NAME "s5p-g2d"
+
+struct g2d_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+ struct mutex mutex;
+ spinlock_t irqlock;
+ atomic_t num_inst;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct resource *res_regs;
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk *gate;
+ struct g2d_ctx *curr;
+ int irq;
+ wait_queue_head_t irq_queue;
+};
+
+struct g2d_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ /* Crop size */
+ u32 c_width;
+ u32 c_height;
+ /* Offset */
+ u32 o_width;
+ u32 o_height;
+ /* Image format */
+ struct g2d_fmt *fmt;
+ /* Variables that can calculated once and reused */
+ u32 stride;
+ u32 bottom;
+ u32 right;
+ u32 size;
+};
+
+struct g2d_ctx {
+ struct v4l2_fh fh;
+ struct g2d_dev *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct g2d_frame in;
+ struct g2d_frame out;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u32 rop;
+};
+
+struct g2d_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ u32 hw;
+};
+
+
+void g2d_reset(struct g2d_dev *d);
+void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f);
+void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a);
+void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f);
+void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a);
+void g2d_start(struct g2d_dev *d);
+void g2d_clear_int(struct g2d_dev *d);
+void g2d_set_rop4(struct g2d_dev *d, u32 r);
+u32 g2d_cmd_stretch(u32 e);
+void g2d_set_cmd(struct g2d_dev *d, u32 c);
+
+
diff --git a/drivers/media/video/s5p-jpeg/Makefile b/drivers/media/video/s5p-jpeg/Makefile
new file mode 100644
index 000000000000..ddc2900d88a2
--- /dev/null
+++ b/drivers/media/video/s5p-jpeg/Makefile
@@ -0,0 +1,2 @@
+s5p-jpeg-objs := jpeg-core.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
new file mode 100644
index 000000000000..1105a8749c8b
--- /dev/null
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -0,0 +1,1482 @@
+/* linux/drivers/media/video/s5p-jpeg/jpeg-core.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "jpeg-core.h"
+#include "jpeg-hw.h"
+
+static struct s5p_jpeg_fmt formats_enc[] = {
+ {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .colplanes = 3,
+ .types = MEM2MEM_CAPTURE,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .colplanes = 1,
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .colplanes = 1,
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+#define NUM_FORMATS_ENC ARRAY_SIZE(formats_enc)
+
+static struct s5p_jpeg_fmt formats_dec[] = {
+ {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .colplanes = 3,
+ .h_align = 4,
+ .v_align = 4,
+ .types = MEM2MEM_CAPTURE,
+ },
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .colplanes = 1,
+ .h_align = 4,
+ .v_align = 3,
+ .types = MEM2MEM_CAPTURE,
+ },
+ {
+ .name = "JPEG JFIF",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .colplanes = 1,
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+#define NUM_FORMATS_DEC ARRAY_SIZE(formats_dec)
+
+static const unsigned char qtbl_luminance[4][64] = {
+ {/* level 1 - high quality */
+ 8, 6, 6, 8, 12, 14, 16, 17,
+ 6, 6, 6, 8, 10, 13, 12, 15,
+ 6, 6, 7, 8, 13, 14, 18, 24,
+ 8, 8, 8, 14, 13, 19, 24, 35,
+ 12, 10, 13, 13, 20, 26, 34, 39,
+ 14, 13, 14, 19, 26, 34, 39, 39,
+ 16, 12, 18, 24, 34, 39, 39, 39,
+ 17, 15, 24, 35, 39, 39, 39, 39
+ },
+ {/* level 2 */
+ 12, 8, 8, 12, 17, 21, 24, 23,
+ 8, 9, 9, 11, 15, 19, 18, 23,
+ 8, 9, 10, 12, 19, 20, 27, 36,
+ 12, 11, 12, 21, 20, 28, 36, 53,
+ 17, 15, 19, 20, 30, 39, 51, 59,
+ 21, 19, 20, 28, 39, 51, 59, 59,
+ 24, 18, 27, 36, 51, 59, 59, 59,
+ 23, 23, 36, 53, 59, 59, 59, 59
+ },
+ {/* level 3 */
+ 16, 11, 11, 16, 23, 27, 31, 30,
+ 11, 12, 12, 15, 20, 23, 23, 30,
+ 11, 12, 13, 16, 23, 26, 35, 47,
+ 16, 15, 16, 23, 26, 37, 47, 64,
+ 23, 20, 23, 26, 39, 51, 64, 64,
+ 27, 23, 26, 37, 51, 64, 64, 64,
+ 31, 23, 35, 47, 64, 64, 64, 64,
+ 30, 30, 47, 64, 64, 64, 64, 64
+ },
+ {/*level 4 - low quality */
+ 20, 16, 25, 39, 50, 46, 62, 68,
+ 16, 18, 23, 38, 38, 53, 65, 68,
+ 25, 23, 31, 38, 53, 65, 68, 68,
+ 39, 38, 38, 53, 65, 68, 68, 68,
+ 50, 38, 53, 65, 68, 68, 68, 68,
+ 46, 53, 65, 68, 68, 68, 68, 68,
+ 62, 65, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ }
+};
+
+static const unsigned char qtbl_chrominance[4][64] = {
+ {/* level 1 - high quality */
+ 9, 8, 9, 11, 14, 17, 19, 24,
+ 8, 10, 9, 11, 14, 13, 17, 22,
+ 9, 9, 13, 14, 13, 15, 23, 26,
+ 11, 11, 14, 14, 15, 20, 26, 33,
+ 14, 14, 13, 15, 20, 24, 33, 39,
+ 17, 13, 15, 20, 24, 32, 39, 39,
+ 19, 17, 23, 26, 33, 39, 39, 39,
+ 24, 22, 26, 33, 39, 39, 39, 39
+ },
+ {/* level 2 */
+ 13, 11, 13, 16, 20, 20, 29, 37,
+ 11, 14, 14, 14, 16, 20, 26, 32,
+ 13, 14, 15, 17, 20, 23, 35, 40,
+ 16, 14, 17, 21, 23, 30, 40, 50,
+ 20, 16, 20, 23, 30, 37, 50, 59,
+ 20, 20, 23, 30, 37, 48, 59, 59,
+ 29, 26, 35, 40, 50, 59, 59, 59,
+ 37, 32, 40, 50, 59, 59, 59, 59
+ },
+ {/* level 3 */
+ 17, 15, 17, 21, 20, 26, 38, 48,
+ 15, 19, 18, 17, 20, 26, 35, 43,
+ 17, 18, 20, 22, 26, 30, 46, 53,
+ 21, 17, 22, 28, 30, 39, 53, 64,
+ 20, 20, 26, 30, 39, 48, 64, 64,
+ 26, 26, 30, 39, 48, 63, 64, 64,
+ 38, 35, 46, 53, 64, 64, 64, 64,
+ 48, 43, 53, 64, 64, 64, 64, 64
+ },
+ {/*level 4 - low quality */
+ 21, 25, 32, 38, 54, 68, 68, 68,
+ 25, 28, 24, 38, 54, 68, 68, 68,
+ 32, 24, 32, 43, 66, 68, 68, 68,
+ 38, 38, 43, 53, 68, 68, 68, 68,
+ 54, 54, 66, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ }
+};
+
+static const unsigned char hdctbl0[16] = {
+ 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const unsigned char hdctblg0[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb
+};
+static const unsigned char hactbl0[16] = {
+ 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d
+};
+static const unsigned char hactblg0[162] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa
+};
+
+static inline void jpeg_set_qtbl(void __iomem *regs, const unsigned char *qtbl,
+ unsigned long tab, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
+}
+
+static inline void jpeg_set_qtbl_lum(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 0 with data for luma */
+ jpeg_set_qtbl(regs, qtbl_luminance[quality], S5P_JPG_QTBL_CONTENT(0),
+ ARRAY_SIZE(qtbl_luminance[quality]));
+}
+
+static inline void jpeg_set_qtbl_chr(void __iomem *regs, int quality)
+{
+ /* this driver fills quantisation table 1 with data for chroma */
+ jpeg_set_qtbl(regs, qtbl_chrominance[quality], S5P_JPG_QTBL_CONTENT(1),
+ ARRAY_SIZE(qtbl_chrominance[quality]));
+}
+
+static inline void jpeg_set_htbl(void __iomem *regs, const unsigned char *htbl,
+ unsigned long tab, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
+}
+
+static inline void jpeg_set_hdctbl(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0), ARRAY_SIZE(hdctbl0));
+}
+
+static inline void jpeg_set_hdctblg(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0), ARRAY_SIZE(hdctblg0));
+}
+
+static inline void jpeg_set_hactbl(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0), ARRAY_SIZE(hactbl0));
+}
+
+static inline void jpeg_set_hactblg(void __iomem *regs)
+{
+ /* this driver fills table 0 for this component */
+ jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0), ARRAY_SIZE(hactblg0));
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
+ __u32 pixelformat);
+
+static int s5p_jpeg_open(struct file *file)
+{
+ struct s5p_jpeg *jpeg = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct s5p_jpeg_ctx *ctx;
+ struct s5p_jpeg_fmt *out_fmt;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->jpeg = jpeg;
+ if (vfd == jpeg->vfd_encoder) {
+ ctx->mode = S5P_JPEG_ENCODE;
+ out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_RGB565);
+ } else {
+ ctx->mode = S5P_JPEG_DECODE;
+ out_fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_JPEG);
+ }
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int err = PTR_ERR(ctx->m2m_ctx);
+ kfree(ctx);
+ return err;
+ }
+
+ ctx->out_q.fmt = out_fmt;
+ ctx->cap_q.fmt = s5p_jpeg_find_format(ctx->mode, V4L2_PIX_FMT_YUYV);
+
+ return 0;
+}
+
+static int s5p_jpeg_release(struct file *file)
+{
+ struct s5p_jpeg_ctx *ctx = file->private_data;
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx);
+
+ return 0;
+}
+
+static unsigned int s5p_jpeg_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct s5p_jpeg_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int s5p_jpeg_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct s5p_jpeg_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations s5p_jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_jpeg_open,
+ .release = s5p_jpeg_release,
+ .poll = s5p_jpeg_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s5p_jpeg_mmap,
+};
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+
+static int get_byte(struct s5p_jpeg_buffer *buf)
+{
+ if (buf->curr >= buf->size)
+ return -1;
+
+ return ((unsigned char *)buf->data)[buf->curr++];
+}
+
+static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word)
+{
+ unsigned int temp;
+ int byte;
+
+ byte = get_byte(buf);
+ if (byte == -1)
+ return -1;
+ temp = byte << 8;
+ byte = get_byte(buf);
+ if (byte == -1)
+ return -1;
+ *word = (unsigned int)byte | temp;
+ return 0;
+}
+
+static void skip(struct s5p_jpeg_buffer *buf, long len)
+{
+ if (len <= 0)
+ return;
+
+ while (len--)
+ get_byte(buf);
+}
+
+static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
+ unsigned long buffer, unsigned long size)
+{
+ int c, components, notfound;
+ unsigned int height, width, word;
+ long length;
+ struct s5p_jpeg_buffer jpeg_buffer;
+
+ jpeg_buffer.size = size;
+ jpeg_buffer.data = buffer;
+ jpeg_buffer.curr = 0;
+
+ notfound = 1;
+ while (notfound) {
+ c = get_byte(&jpeg_buffer);
+ if (c == -1)
+ break;
+ if (c != 0xff)
+ continue;
+ do
+ c = get_byte(&jpeg_buffer);
+ while (c == 0xff);
+ if (c == -1)
+ break;
+ if (c == 0)
+ continue;
+ length = 0;
+ switch (c) {
+ /* SOF0: baseline JPEG */
+ case SOF0:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ if (get_byte(&jpeg_buffer) == -1)
+ break;
+ if (get_word_be(&jpeg_buffer, &height))
+ break;
+ if (get_word_be(&jpeg_buffer, &width))
+ break;
+ components = get_byte(&jpeg_buffer);
+ if (components == -1)
+ break;
+ notfound = 0;
+
+ skip(&jpeg_buffer, components * 3);
+ break;
+
+ /* skip payload-less markers */
+ case RST ... RST + 7:
+ case SOI:
+ case EOI:
+ case TEM:
+ break;
+
+ /* skip uninteresting payload markers */
+ default:
+ if (get_word_be(&jpeg_buffer, &word))
+ break;
+ length = (long)word - 2;
+ skip(&jpeg_buffer, length);
+ break;
+ }
+ }
+ result->w = width;
+ result->h = height;
+ result->size = components;
+ return !notfound;
+}
+
+static int s5p_jpeg_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ strlcpy(cap->driver, S5P_JPEG_M2M_NAME " encoder",
+ sizeof(cap->driver));
+ strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder",
+ sizeof(cap->card));
+ } else {
+ strlcpy(cap->driver, S5P_JPEG_M2M_NAME " decoder",
+ sizeof(cap->driver));
+ strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder",
+ sizeof(cap->card));
+ }
+ cap->bus_info[0] = 0;
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_OUTPUT;
+ return 0;
+}
+
+static int enum_fmt(struct s5p_jpeg_fmt *formats, int n,
+ struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num = 0;
+
+ for (i = 0; i < n; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ /* Format not found */
+ if (i >= n)
+ return -EINVAL;
+
+ strlcpy(f->description, formats[i].name, sizeof(f->description));
+ f->pixelformat = formats[i].fourcc;
+
+ return 0;
+}
+
+static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct s5p_jpeg_ctx *ctx;
+
+ ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
+ MEM2MEM_CAPTURE);
+
+ return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_CAPTURE);
+}
+
+static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct s5p_jpeg_ctx *ctx;
+
+ ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_ENCODE)
+ return enum_fmt(formats_enc, NUM_FORMATS_ENC, f,
+ MEM2MEM_OUTPUT);
+
+ return enum_fmt(formats_dec, NUM_FORMATS_DEC, f, MEM2MEM_OUTPUT);
+}
+
+static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return &ctx->out_q;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return &ctx->cap_q;
+
+ return NULL;
+}
+
+static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct s5p_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct s5p_jpeg_ctx *ct = priv;
+
+ vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed)
+ return -EINVAL;
+ q_data = get_q_data(ct, f->type);
+ BUG_ON(q_data == NULL);
+
+ pix->width = q_data->w;
+ pix->height = q_data->h;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->bytesperline = 0;
+ if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
+ u32 bpl = q_data->w;
+ if (q_data->fmt->colplanes == 1)
+ bpl = (bpl * q_data->fmt->depth) >> 3;
+ pix->bytesperline = bpl;
+ }
+ pix->sizeimage = q_data->size;
+
+ return 0;
+}
+
+static struct s5p_jpeg_fmt *s5p_jpeg_find_format(unsigned int mode,
+ u32 pixelformat)
+{
+ unsigned int k;
+ struct s5p_jpeg_fmt *formats;
+ int n;
+
+ if (mode == S5P_JPEG_ENCODE) {
+ formats = formats_enc;
+ n = NUM_FORMATS_ENC;
+ } else {
+ formats = formats_dec;
+ n = NUM_FORMATS_DEC;
+ }
+
+ for (k = 0; k < n; k++) {
+ struct s5p_jpeg_fmt *fmt = &formats[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+
+}
+
+static void jpeg_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign)
+{
+ int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+
+ w_step = 1 << walign;
+ h_step = 1 << halign;
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+ if (*w < width && (*w + w_step) < wmax)
+ *w += w_step;
+ if (*h < height && (*h + h_step) < hmax)
+ *h += h_step;
+
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
+ struct s5p_jpeg_ctx *ctx, int q_type)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ if (q_type == MEM2MEM_OUTPUT)
+ jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, 0,
+ &pix->height, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, 0);
+ else
+ jpeg_bound_align_image(&pix->width, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, fmt->h_align,
+ &pix->height, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, fmt->v_align);
+
+ if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+ if (pix->sizeimage <= 0)
+ pix->sizeimage = PAGE_SIZE;
+ pix->bytesperline = 0;
+ } else {
+ u32 bpl = pix->bytesperline;
+
+ if (fmt->colplanes > 1 && bpl < pix->width)
+ bpl = pix->width; /* planar */
+
+ if (fmt->colplanes == 1 && /* packed */
+ (bpl << 3) * fmt->depth < pix->width)
+ bpl = (pix->width * fmt->depth) >> 3;
+
+ pix->bytesperline = bpl;
+ pix->sizeimage = (pix->width * pix->height * fmt->depth) >> 3;
+ }
+
+ return 0;
+}
+
+static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct s5p_jpeg_fmt *fmt;
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->jpeg->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_CAPTURE);
+}
+
+static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct s5p_jpeg_fmt *fmt;
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ fmt = s5p_jpeg_find_format(ctx->mode, f->fmt.pix.pixelformat);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->jpeg->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt, ctx, MEM2MEM_OUTPUT);
+}
+
+static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct s5p_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ vq = v4l2_m2m_get_vq(ct->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ct, f->type);
+ BUG_ON(q_data == NULL);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = s5p_jpeg_find_format(ct->mode, pix->pixelformat);
+ q_data->w = pix->width;
+ q_data->h = pix->height;
+ if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)
+ q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
+ else
+ q_data->size = pix->sizeimage;
+
+ return 0;
+}
+
+static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = s5p_jpeg_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return s5p_jpeg_s_fmt(priv, f);
+}
+
+static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = s5p_jpeg_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return s5p_jpeg_s_fmt(priv, f);
+}
+
+static int s5p_jpeg_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int s5p_jpeg_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int s5p_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int s5p_jpeg_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int s5p_jpeg_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int s5p_jpeg_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+int s5p_jpeg_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ /* For JPEG blob active == default == bounds */
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_ACTIVE:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.width = ctx->out_q.w;
+ s->r.height = ctx->out_q.h;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.width = ctx->cap_q.w;
+ s->r.height = ctx->cap_q.h;
+ break;
+ default:
+ return -EINVAL;
+ }
+ s->r.left = 0;
+ s->r.top = 0;
+ return 0;
+}
+
+static int s5p_jpeg_g_jpegcomp(struct file *file, void *priv,
+ struct v4l2_jpegcompression *compr)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_DECODE)
+ return -ENOTTY;
+
+ memset(compr, 0, sizeof(*compr));
+ compr->quality = ctx->compr_quality;
+
+ return 0;
+}
+
+static int s5p_jpeg_s_jpegcomp(struct file *file, void *priv,
+ struct v4l2_jpegcompression *compr)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_DECODE)
+ return -ENOTTY;
+
+ compr->quality = clamp(compr->quality, S5P_JPEG_COMPR_QUAL_BEST,
+ S5P_JPEG_COMPR_QUAL_WORST);
+
+ ctx->compr_quality = S5P_JPEG_COMPR_QUAL_WORST - compr->quality;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
+ .vidioc_querycap = s5p_jpeg_querycap,
+
+ .vidioc_enum_fmt_vid_cap = s5p_jpeg_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = s5p_jpeg_enum_fmt_vid_out,
+
+ .vidioc_g_fmt_vid_cap = s5p_jpeg_g_fmt,
+ .vidioc_g_fmt_vid_out = s5p_jpeg_g_fmt,
+
+ .vidioc_try_fmt_vid_cap = s5p_jpeg_try_fmt_vid_cap,
+ .vidioc_try_fmt_vid_out = s5p_jpeg_try_fmt_vid_out,
+
+ .vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out,
+
+ .vidioc_reqbufs = s5p_jpeg_reqbufs,
+ .vidioc_querybuf = s5p_jpeg_querybuf,
+
+ .vidioc_qbuf = s5p_jpeg_qbuf,
+ .vidioc_dqbuf = s5p_jpeg_dqbuf,
+
+ .vidioc_streamon = s5p_jpeg_streamon,
+ .vidioc_streamoff = s5p_jpeg_streamoff,
+
+ .vidioc_g_selection = s5p_jpeg_g_selection,
+
+ .vidioc_g_jpegcomp = s5p_jpeg_g_jpegcomp,
+ .vidioc_s_jpegcomp = s5p_jpeg_s_jpegcomp,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+
+static void s5p_jpeg_device_run(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ struct s5p_jpeg *jpeg = ctx->jpeg;
+ struct vb2_buffer *src_buf, *dst_buf;
+ unsigned long src_addr, dst_addr;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+
+ jpeg_reset(jpeg->regs);
+ jpeg_poweron(jpeg->regs);
+ jpeg_proc_mode(jpeg->regs, ctx->mode);
+ if (ctx->mode == S5P_JPEG_ENCODE) {
+ if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
+ jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_565);
+ else
+ jpeg_input_raw_mode(jpeg->regs, S5P_JPEG_RAW_IN_422);
+ if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
+ jpeg_subsampling_mode(jpeg->regs,
+ S5P_JPEG_SUBSAMPLING_422);
+ else
+ jpeg_subsampling_mode(jpeg->regs,
+ S5P_JPEG_SUBSAMPLING_420);
+ jpeg_dri(jpeg->regs, 0);
+ jpeg_x(jpeg->regs, ctx->out_q.w);
+ jpeg_y(jpeg->regs, ctx->out_q.h);
+ jpeg_imgadr(jpeg->regs, src_addr);
+ jpeg_jpgadr(jpeg->regs, dst_addr);
+
+ /* ultimately comes from sizeimage from userspace */
+ jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
+
+ /* JPEG RGB to YCbCr conversion matrix */
+ jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
+ jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
+ jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
+ jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
+ jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
+ jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
+ jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
+ jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
+ jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
+
+ /*
+ * JPEG IP allows storing 4 quantization tables
+ * We fill table 0 for luma and table 1 for chroma
+ */
+ jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
+ jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
+ /* use table 0 for Y */
+ jpeg_qtbl(jpeg->regs, 1, 0);
+ /* use table 1 for Cb and Cr*/
+ jpeg_qtbl(jpeg->regs, 2, 1);
+ jpeg_qtbl(jpeg->regs, 3, 1);
+
+ /* Y, Cb, Cr use Huffman table 0 */
+ jpeg_htbl_ac(jpeg->regs, 1);
+ jpeg_htbl_dc(jpeg->regs, 1);
+ jpeg_htbl_ac(jpeg->regs, 2);
+ jpeg_htbl_dc(jpeg->regs, 2);
+ jpeg_htbl_ac(jpeg->regs, 3);
+ jpeg_htbl_dc(jpeg->regs, 3);
+ } else {
+ jpeg_rst_int_enable(jpeg->regs, true);
+ jpeg_data_num_int_enable(jpeg->regs, true);
+ jpeg_final_mcu_num_int_enable(jpeg->regs, true);
+ jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
+ jpeg_jpgadr(jpeg->regs, src_addr);
+ jpeg_imgadr(jpeg->regs, dst_addr);
+ }
+ jpeg_start(jpeg->regs);
+}
+
+static int s5p_jpeg_job_ready(void *priv)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+
+ if (ctx->mode == S5P_JPEG_DECODE)
+ return ctx->hdr_parsed;
+ return 1;
+}
+
+static void s5p_jpeg_job_abort(void *priv)
+{
+}
+
+static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
+ .device_run = s5p_jpeg_device_run,
+ .job_ready = s5p_jpeg_job_ready,
+ .job_abort = s5p_jpeg_job_abort,
+};
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+ struct s5p_jpeg_q_data *q_data = NULL;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+ BUG_ON(q_data == NULL);
+
+ size = q_data->size;
+
+ /*
+ * header is parsed during decoding and parsed information stored
+ * in the context so we do not allow another buffer to overwrite it
+ */
+ if (ctx->mode == S5P_JPEG_DECODE)
+ count = 1;
+
+ *nbuffers = count;
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = ctx->jpeg->alloc_ctx;
+
+ return 0;
+}
+
+static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct s5p_jpeg_q_data *q_data = NULL;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+ BUG_ON(q_data == NULL);
+
+ if (vb2_plane_size(vb, 0) < q_data->size) {
+ pr_err("%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, q_data->size);
+
+ return 0;
+}
+
+static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (ctx->mode == S5P_JPEG_DECODE &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ struct s5p_jpeg_q_data tmp, *q_data;
+ ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
+ (unsigned long)vb2_plane_vaddr(vb, 0),
+ min((unsigned long)ctx->out_q.size,
+ vb2_get_plane_payload(vb, 0)));
+ if (!ctx->hdr_parsed) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ q_data = &ctx->out_q;
+ q_data->w = tmp.w;
+ q_data->h = tmp.h;
+
+ q_data = &ctx->cap_q;
+ q_data->w = tmp.w;
+ q_data->h = tmp.h;
+
+ jpeg_bound_align_image(&q_data->w, S5P_JPEG_MIN_WIDTH,
+ S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
+ &q_data->h, S5P_JPEG_MIN_HEIGHT,
+ S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align
+ );
+ q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
+ }
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void s5p_jpeg_wait_prepare(struct vb2_queue *vq)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&ctx->jpeg->lock);
+}
+
+static void s5p_jpeg_wait_finish(struct vb2_queue *vq)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+
+ mutex_lock(&ctx->jpeg->lock);
+}
+
+static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = pm_runtime_get_sync(ctx->jpeg->dev);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static int s5p_jpeg_stop_streaming(struct vb2_queue *q)
+{
+ struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+
+ pm_runtime_put(ctx->jpeg->dev);
+
+ return 0;
+}
+
+static struct vb2_ops s5p_jpeg_qops = {
+ .queue_setup = s5p_jpeg_queue_setup,
+ .buf_prepare = s5p_jpeg_buf_prepare,
+ .buf_queue = s5p_jpeg_buf_queue,
+ .wait_prepare = s5p_jpeg_wait_prepare,
+ .wait_finish = s5p_jpeg_wait_finish,
+ .start_streaming = s5p_jpeg_start_streaming,
+ .stop_streaming = s5p_jpeg_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct s5p_jpeg_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &s5p_jpeg_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &s5p_jpeg_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * ISR
+ * ============================================================================
+ */
+
+static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
+{
+ struct s5p_jpeg *jpeg = dev_id;
+ struct s5p_jpeg_ctx *curr_ctx;
+ struct vb2_buffer *src_buf, *dst_buf;
+ unsigned long payload_size = 0;
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ bool enc_jpeg_too_large = false;
+ bool timer_elapsed = false;
+ bool op_completed = false;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+
+ if (curr_ctx->mode == S5P_JPEG_ENCODE)
+ enc_jpeg_too_large = jpeg_enc_stream_stat(jpeg->regs);
+ timer_elapsed = jpeg_timer_stat(jpeg->regs);
+ op_completed = jpeg_result_stat_ok(jpeg->regs);
+ if (curr_ctx->mode == S5P_JPEG_DECODE)
+ op_completed = op_completed && jpeg_stream_stat_ok(jpeg->regs);
+
+ if (enc_jpeg_too_large) {
+ state = VB2_BUF_STATE_ERROR;
+ jpeg_clear_enc_stream_stat(jpeg->regs);
+ } else if (timer_elapsed) {
+ state = VB2_BUF_STATE_ERROR;
+ jpeg_clear_timer_stat(jpeg->regs);
+ } else if (!op_completed) {
+ state = VB2_BUF_STATE_ERROR;
+ } else {
+ payload_size = jpeg_compressed_size(jpeg->regs);
+ }
+
+ v4l2_m2m_buf_done(src_buf, state);
+ if (curr_ctx->mode == S5P_JPEG_ENCODE)
+ vb2_set_plane_payload(dst_buf, 0, payload_size);
+ v4l2_m2m_buf_done(dst_buf, state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->m2m_ctx);
+
+ jpeg_clear_int(jpeg->regs);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+
+static int s5p_jpeg_probe(struct platform_device *pdev)
+{
+ struct s5p_jpeg *jpeg;
+ struct resource *res;
+ int ret;
+
+ /* JPEG IP abstraction struct */
+ jpeg = kzalloc(sizeof(struct s5p_jpeg), GFP_KERNEL);
+ if (!jpeg)
+ return -ENOMEM;
+
+ mutex_init(&jpeg->lock);
+ jpeg->dev = &pdev->dev;
+
+ /* memory-mapped registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot find IO resource\n");
+ ret = -ENOENT;
+ goto jpeg_alloc_rollback;
+ }
+
+ jpeg->ioarea = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!jpeg->ioarea) {
+ dev_err(&pdev->dev, "cannot request IO\n");
+ ret = -ENXIO;
+ goto jpeg_alloc_rollback;
+ }
+
+ jpeg->regs = ioremap(res->start, resource_size(res));
+ if (!jpeg->regs) {
+ dev_err(&pdev->dev, "cannot map IO\n");
+ ret = -ENXIO;
+ goto mem_region_rollback;
+ }
+
+ dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
+ jpeg->regs, jpeg->ioarea, res);
+
+ /* interrupt service routine registration */
+ jpeg->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot find IRQ\n");
+ goto ioremap_rollback;
+ }
+
+ ret = request_irq(jpeg->irq, s5p_jpeg_irq, 0,
+ dev_name(&pdev->dev), jpeg);
+
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
+ goto ioremap_rollback;
+ }
+
+ /* clocks */
+ jpeg->clk = clk_get(&pdev->dev, "jpeg");
+ if (IS_ERR(jpeg->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(jpeg->clk);
+ goto request_irq_rollback;
+ }
+ dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
+ clk_enable(jpeg->clk);
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto clk_get_rollback;
+ }
+
+ /* mem2mem device */
+ jpeg->m2m_dev = v4l2_m2m_init(&s5p_jpeg_m2m_ops);
+ if (IS_ERR(jpeg->m2m_dev)) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpeg->m2m_dev);
+ goto device_register_rollback;
+ }
+
+ jpeg->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(jpeg->alloc_ctx)) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to init memory allocator\n");
+ ret = PTR_ERR(jpeg->alloc_ctx);
+ goto m2m_init_rollback;
+ }
+
+ /* JPEG encoder /dev/videoX node */
+ jpeg->vfd_encoder = video_device_alloc();
+ if (!jpeg->vfd_encoder) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto vb2_allocator_rollback;
+ }
+ strlcpy(jpeg->vfd_encoder->name, S5P_JPEG_M2M_NAME,
+ sizeof(jpeg->vfd_encoder->name));
+ jpeg->vfd_encoder->fops = &s5p_jpeg_fops;
+ jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
+ jpeg->vfd_encoder->minor = -1;
+ jpeg->vfd_encoder->release = video_device_release;
+ jpeg->vfd_encoder->lock = &jpeg->lock;
+ jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev;
+
+ ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+ goto enc_vdev_alloc_rollback;
+ }
+
+ video_set_drvdata(jpeg->vfd_encoder, jpeg);
+ v4l2_info(&jpeg->v4l2_dev,
+ "encoder device registered as /dev/video%d\n",
+ jpeg->vfd_encoder->num);
+
+ /* JPEG decoder /dev/videoX node */
+ jpeg->vfd_decoder = video_device_alloc();
+ if (!jpeg->vfd_decoder) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto enc_vdev_register_rollback;
+ }
+ strlcpy(jpeg->vfd_decoder->name, S5P_JPEG_M2M_NAME,
+ sizeof(jpeg->vfd_decoder->name));
+ jpeg->vfd_decoder->fops = &s5p_jpeg_fops;
+ jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
+ jpeg->vfd_decoder->minor = -1;
+ jpeg->vfd_decoder->release = video_device_release;
+ jpeg->vfd_decoder->lock = &jpeg->lock;
+ jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
+
+ ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+ goto dec_vdev_alloc_rollback;
+ }
+
+ video_set_drvdata(jpeg->vfd_decoder, jpeg);
+ v4l2_info(&jpeg->v4l2_dev,
+ "decoder device registered as /dev/video%d\n",
+ jpeg->vfd_decoder->num);
+
+ /* final statements & power management */
+ platform_set_drvdata(pdev, jpeg);
+
+ pm_runtime_enable(&pdev->dev);
+
+ v4l2_info(&jpeg->v4l2_dev, "Samsung S5P JPEG codec\n");
+
+ return 0;
+
+dec_vdev_alloc_rollback:
+ video_device_release(jpeg->vfd_decoder);
+
+enc_vdev_register_rollback:
+ video_unregister_device(jpeg->vfd_encoder);
+
+enc_vdev_alloc_rollback:
+ video_device_release(jpeg->vfd_encoder);
+
+vb2_allocator_rollback:
+ vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
+
+m2m_init_rollback:
+ v4l2_m2m_release(jpeg->m2m_dev);
+
+device_register_rollback:
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+clk_get_rollback:
+ clk_disable(jpeg->clk);
+ clk_put(jpeg->clk);
+
+request_irq_rollback:
+ free_irq(jpeg->irq, jpeg);
+
+ioremap_rollback:
+ iounmap(jpeg->regs);
+
+mem_region_rollback:
+ release_resource(jpeg->ioarea);
+ release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea));
+
+jpeg_alloc_rollback:
+ kfree(jpeg);
+ return ret;
+}
+
+static int s5p_jpeg_remove(struct platform_device *pdev)
+{
+ struct s5p_jpeg *jpeg = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(jpeg->dev);
+
+ video_unregister_device(jpeg->vfd_decoder);
+ video_device_release(jpeg->vfd_decoder);
+ video_unregister_device(jpeg->vfd_encoder);
+ video_device_release(jpeg->vfd_encoder);
+ vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
+ v4l2_m2m_release(jpeg->m2m_dev);
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+ clk_disable(jpeg->clk);
+ clk_put(jpeg->clk);
+
+ free_irq(jpeg->irq, jpeg);
+
+ iounmap(jpeg->regs);
+
+ release_resource(jpeg->ioarea);
+ release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea));
+ kfree(jpeg);
+
+ return 0;
+}
+
+static int s5p_jpeg_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int s5p_jpeg_runtime_resume(struct device *dev)
+{
+ struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
+ /*
+ * JPEG IP allows storing two Huffman tables for each component
+ * We fill table 0 for each component
+ */
+ jpeg_set_hdctbl(jpeg->regs);
+ jpeg_set_hdctblg(jpeg->regs);
+ jpeg_set_hactbl(jpeg->regs);
+ jpeg_set_hactblg(jpeg->regs);
+ return 0;
+}
+
+static const struct dev_pm_ops s5p_jpeg_pm_ops = {
+ .runtime_suspend = s5p_jpeg_runtime_suspend,
+ .runtime_resume = s5p_jpeg_runtime_resume,
+};
+
+static struct platform_driver s5p_jpeg_driver = {
+ .probe = s5p_jpeg_probe,
+ .remove = s5p_jpeg_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = S5P_JPEG_M2M_NAME,
+ .pm = &s5p_jpeg_pm_ops,
+ },
+};
+
+static int __init
+s5p_jpeg_register(void)
+{
+ int ret;
+
+ pr_info("S5P JPEG V4L2 Driver, (c) 2011 Samsung Electronics\n");
+
+ ret = platform_driver_register(&s5p_jpeg_driver);
+
+ if (ret)
+ pr_err("%s: failed to register jpeg driver\n", __func__);
+
+ return ret;
+}
+
+static void __exit
+s5p_jpeg_unregister(void)
+{
+ platform_driver_unregister(&s5p_jpeg_driver);
+}
+
+module_init(s5p_jpeg_register);
+module_exit(s5p_jpeg_unregister);
+
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
+MODULE_DESCRIPTION("Samsung JPEG codec driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.h b/drivers/media/video/s5p-jpeg/jpeg-core.h
new file mode 100644
index 000000000000..facad6114f5e
--- /dev/null
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.h
@@ -0,0 +1,143 @@
+/* linux/drivers/media/video/s5p-jpeg/jpeg-core.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef JPEG_CORE_H_
+#define JPEG_CORE_H_
+
+#include <media/v4l2-device.h>
+
+#define S5P_JPEG_M2M_NAME "s5p-jpeg"
+
+/* JPEG compression quality setting */
+#define S5P_JPEG_COMPR_QUAL_BEST 0
+#define S5P_JPEG_COMPR_QUAL_WORST 3
+
+/* JPEG RGB to YCbCr conversion matrix coefficients */
+#define S5P_JPEG_COEF11 0x4d
+#define S5P_JPEG_COEF12 0x97
+#define S5P_JPEG_COEF13 0x1e
+#define S5P_JPEG_COEF21 0x2c
+#define S5P_JPEG_COEF22 0x57
+#define S5P_JPEG_COEF23 0x83
+#define S5P_JPEG_COEF31 0x83
+#define S5P_JPEG_COEF32 0x6e
+#define S5P_JPEG_COEF33 0x13
+
+/* a selection of JPEG markers */
+#define TEM 0x01
+#define SOF0 0xc0
+#define RST 0xd0
+#define SOI 0xd8
+#define EOI 0xd9
+#define DHP 0xde
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+/**
+ * struct s5p_jpeg - JPEG IP abstraction
+ * @lock: the mutex protecting this structure
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @vfd_encoder: video device node for encoder mem2mem mode
+ * @vfd_decoder: video device node for decoder mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @ioarea: JPEG IP memory region
+ * @regs: JPEG IP registers mapping
+ * @irq: JPEG IP irq
+ * @clk: JPEG IP clock
+ * @dev: JPEG IP struct device
+ * @alloc_ctx: videobuf2 memory allocator's context
+ */
+struct s5p_jpeg {
+ struct mutex lock;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_encoder;
+ struct video_device *vfd_decoder;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ struct resource *ioarea;
+ void __iomem *regs;
+ unsigned int irq;
+ struct clk *clk;
+ struct device *dev;
+ void *alloc_ctx;
+};
+
+/**
+ * struct jpeg_fmt - driver's internal color format data
+ * @name: format descritpion
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @depth: number of bits per pixel
+ * @colplanes: number of color planes (1 for packed formats)
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @types: types of queue this format is applicable to
+ */
+struct s5p_jpeg_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ int colplanes;
+ int h_align;
+ int v_align;
+ u32 types;
+};
+
+/**
+ * s5p_jpeg_q_data - parameters of one queue
+ * @fmt: driver-specific format of this queue
+ * @w: image width
+ * @h: image height
+ * @size: image buffer size in bytes
+ */
+struct s5p_jpeg_q_data {
+ struct s5p_jpeg_fmt *fmt;
+ u32 w;
+ u32 h;
+ u32 size;
+};
+
+/**
+ * s5p_jpeg_ctx - the device context data
+ * @jpeg: JPEG IP device for this context
+ * @mode: compression (encode) operation or decompression (decode)
+ * @compr_quality: destination image quality in compression (encode) mode
+ * @m2m_ctx: mem2mem device context
+ * @out_q: source (output) queue information
+ * @cap_fmt: destination (capture) queue queue information
+ * @hdr_parsed: set if header has been parsed during decompression
+ */
+struct s5p_jpeg_ctx {
+ struct s5p_jpeg *jpeg;
+ unsigned int mode;
+ unsigned int compr_quality;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct s5p_jpeg_q_data out_q;
+ struct s5p_jpeg_q_data cap_q;
+ bool hdr_parsed;
+};
+
+/**
+ * s5p_jpeg_buffer - description of memory containing input JPEG data
+ * @size: buffer size
+ * @curr: current position in the buffer
+ * @data: pointer to the data
+ */
+struct s5p_jpeg_buffer {
+ unsigned long size;
+ unsigned long curr;
+ unsigned long data;
+};
+
+#endif /* JPEG_CORE_H */
diff --git a/drivers/media/video/s5p-jpeg/jpeg-hw.h b/drivers/media/video/s5p-jpeg/jpeg-hw.h
new file mode 100644
index 000000000000..e10c744e9f23
--- /dev/null
+++ b/drivers/media/video/s5p-jpeg/jpeg-hw.h
@@ -0,0 +1,353 @@
+/* linux/drivers/media/video/s5p-jpeg/jpeg-hw.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef JPEG_HW_H_
+#define JPEG_HW_H_
+
+#include <linux/io.h>
+
+#include "jpeg-hw.h"
+#include "jpeg-regs.h"
+
+#define S5P_JPEG_MIN_WIDTH 32
+#define S5P_JPEG_MIN_HEIGHT 32
+#define S5P_JPEG_MAX_WIDTH 8192
+#define S5P_JPEG_MAX_HEIGHT 8192
+#define S5P_JPEG_ENCODE 0
+#define S5P_JPEG_DECODE 1
+#define S5P_JPEG_RAW_IN_565 0
+#define S5P_JPEG_RAW_IN_422 1
+#define S5P_JPEG_SUBSAMPLING_422 0
+#define S5P_JPEG_SUBSAMPLING_420 1
+#define S5P_JPEG_RAW_OUT_422 0
+#define S5P_JPEG_RAW_OUT_420 1
+
+static inline void jpeg_reset(void __iomem *regs)
+{
+ unsigned long reg;
+
+ writel(1, regs + S5P_JPG_SW_RESET);
+ reg = readl(regs + S5P_JPG_SW_RESET);
+ /* no other way but polling for when JPEG IP becomes operational */
+ while (reg != 0) {
+ cpu_relax();
+ reg = readl(regs + S5P_JPG_SW_RESET);
+ }
+}
+
+static inline void jpeg_poweron(void __iomem *regs)
+{
+ writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
+}
+
+static inline void jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
+{
+ unsigned long reg, m;
+
+ m = S5P_MOD_SEL_565;
+ if (mode == S5P_JPEG_RAW_IN_565)
+ m = S5P_MOD_SEL_565;
+ else if (mode == S5P_JPEG_RAW_IN_422)
+ m = S5P_MOD_SEL_422;
+
+ reg = readl(regs + S5P_JPGCMOD);
+ reg &= ~S5P_MOD_SEL_MASK;
+ reg |= m;
+ writel(reg, regs + S5P_JPGCMOD);
+}
+
+static inline void jpeg_input_raw_y16(void __iomem *regs, bool y16)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGCMOD);
+ if (y16)
+ reg |= S5P_MODE_Y16;
+ else
+ reg &= ~S5P_MODE_Y16_MASK;
+ writel(reg, regs + S5P_JPGCMOD);
+}
+
+static inline void jpeg_proc_mode(void __iomem *regs, unsigned long mode)
+{
+ unsigned long reg, m;
+
+ m = S5P_PROC_MODE_DECOMPR;
+ if (mode == S5P_JPEG_ENCODE)
+ m = S5P_PROC_MODE_COMPR;
+ else
+ m = S5P_PROC_MODE_DECOMPR;
+ reg = readl(regs + S5P_JPGMOD);
+ reg &= ~S5P_PROC_MODE_MASK;
+ reg |= m;
+ writel(reg, regs + S5P_JPGMOD);
+}
+
+static inline void jpeg_subsampling_mode(void __iomem *regs, unsigned long mode)
+{
+ unsigned long reg, m;
+
+ m = S5P_SUBSAMPLING_MODE_422;
+ if (mode == S5P_JPEG_SUBSAMPLING_422)
+ m = S5P_SUBSAMPLING_MODE_422;
+ else if (mode == S5P_JPEG_SUBSAMPLING_420)
+ m = S5P_SUBSAMPLING_MODE_420;
+ reg = readl(regs + S5P_JPGMOD);
+ reg &= ~S5P_SUBSAMPLING_MODE_MASK;
+ reg |= m;
+ writel(reg, regs + S5P_JPGMOD);
+}
+
+static inline void jpeg_dri(void __iomem *regs, unsigned int dri)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGDRI_U);
+ reg &= ~0xff;
+ reg |= (dri >> 8) & 0xff;
+ writel(reg, regs + S5P_JPGDRI_U);
+
+ reg = readl(regs + S5P_JPGDRI_L);
+ reg &= ~0xff;
+ reg |= dri & 0xff;
+ writel(reg, regs + S5P_JPGDRI_L);
+}
+
+static inline void jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_QTBL);
+ reg &= ~S5P_QT_NUMt_MASK(t);
+ reg |= (n << S5P_QT_NUMt_SHIFT(t)) & S5P_QT_NUMt_MASK(t);
+ writel(reg, regs + S5P_JPG_QTBL);
+}
+
+static inline void jpeg_htbl_ac(void __iomem *regs, unsigned int t)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_HTBL);
+ reg &= ~S5P_HT_NUMt_AC_MASK(t);
+ /* this driver uses table 0 for all color components */
+ reg |= (0 << S5P_HT_NUMt_AC_SHIFT(t)) & S5P_HT_NUMt_AC_MASK(t);
+ writel(reg, regs + S5P_JPG_HTBL);
+}
+
+static inline void jpeg_htbl_dc(void __iomem *regs, unsigned int t)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_HTBL);
+ reg &= ~S5P_HT_NUMt_DC_MASK(t);
+ /* this driver uses table 0 for all color components */
+ reg |= (0 << S5P_HT_NUMt_DC_SHIFT(t)) & S5P_HT_NUMt_DC_MASK(t);
+ writel(reg, regs + S5P_JPG_HTBL);
+}
+
+static inline void jpeg_y(void __iomem *regs, unsigned int y)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGY_U);
+ reg &= ~0xff;
+ reg |= (y >> 8) & 0xff;
+ writel(reg, regs + S5P_JPGY_U);
+
+ reg = readl(regs + S5P_JPGY_L);
+ reg &= ~0xff;
+ reg |= y & 0xff;
+ writel(reg, regs + S5P_JPGY_L);
+}
+
+static inline void jpeg_x(void __iomem *regs, unsigned int x)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGX_U);
+ reg &= ~0xff;
+ reg |= (x >> 8) & 0xff;
+ writel(reg, regs + S5P_JPGX_U);
+
+ reg = readl(regs + S5P_JPGX_L);
+ reg &= ~0xff;
+ reg |= x & 0xff;
+ writel(reg, regs + S5P_JPGX_L);
+}
+
+static inline void jpeg_rst_int_enable(void __iomem *regs, bool enable)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTSE);
+ reg &= ~S5P_RSTm_INT_EN_MASK;
+ if (enable)
+ reg |= S5P_RSTm_INT_EN;
+ writel(reg, regs + S5P_JPGINTSE);
+}
+
+static inline void jpeg_data_num_int_enable(void __iomem *regs, bool enable)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTSE);
+ reg &= ~S5P_DATA_NUM_INT_EN_MASK;
+ if (enable)
+ reg |= S5P_DATA_NUM_INT_EN;
+ writel(reg, regs + S5P_JPGINTSE);
+}
+
+static inline void jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTSE);
+ reg &= ~S5P_FINAL_MCU_NUM_INT_EN_MASK;
+ if (enbl)
+ reg |= S5P_FINAL_MCU_NUM_INT_EN;
+ writel(reg, regs + S5P_JPGINTSE);
+}
+
+static inline void jpeg_timer_enable(void __iomem *regs, unsigned long val)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_TIMER_SE);
+ reg |= S5P_TIMER_INT_EN;
+ reg &= ~S5P_TIMER_INIT_MASK;
+ reg |= val & S5P_TIMER_INIT_MASK;
+ writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+static inline void jpeg_timer_disable(void __iomem *regs)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_TIMER_SE);
+ reg &= ~S5P_TIMER_INT_EN_MASK;
+ writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+static inline int jpeg_timer_stat(void __iomem *regs)
+{
+ return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
+ >> S5P_TIMER_INT_STAT_SHIFT);
+}
+
+static inline void jpeg_clear_timer_stat(void __iomem *regs)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_TIMER_SE);
+ reg &= ~S5P_TIMER_INT_STAT_MASK;
+ writel(reg, regs + S5P_JPG_TIMER_SE);
+}
+
+static inline void jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
+ reg &= ~S5P_ENC_STREAM_BOUND_MASK;
+ reg |= S5P_ENC_STREAM_INT_EN;
+ reg |= size & S5P_ENC_STREAM_BOUND_MASK;
+ writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
+}
+
+static inline int jpeg_enc_stream_stat(void __iomem *regs)
+{
+ return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
+ S5P_ENC_STREAM_INT_STAT_MASK);
+}
+
+static inline void jpeg_clear_enc_stream_stat(void __iomem *regs)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
+ reg &= ~S5P_ENC_STREAM_INT_MASK;
+ writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
+}
+
+static inline void jpeg_outform_raw(void __iomem *regs, unsigned long format)
+{
+ unsigned long reg, f;
+
+ f = S5P_DEC_OUT_FORMAT_422;
+ if (format == S5P_JPEG_RAW_OUT_422)
+ f = S5P_DEC_OUT_FORMAT_422;
+ else if (format == S5P_JPEG_RAW_OUT_420)
+ f = S5P_DEC_OUT_FORMAT_420;
+ reg = readl(regs + S5P_JPG_OUTFORM);
+ reg &= ~S5P_DEC_OUT_FORMAT_MASK;
+ reg |= f;
+ writel(reg, regs + S5P_JPG_OUTFORM);
+}
+
+static inline void jpeg_jpgadr(void __iomem *regs, unsigned long addr)
+{
+ writel(addr, regs + S5P_JPG_JPGADR);
+}
+
+static inline void jpeg_imgadr(void __iomem *regs, unsigned long addr)
+{
+ writel(addr, regs + S5P_JPG_IMGADR);
+}
+
+static inline void jpeg_coef(void __iomem *regs, unsigned int i,
+ unsigned int j, unsigned int coef)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPG_COEF(i));
+ reg &= ~S5P_COEFn_MASK(j);
+ reg |= (coef << S5P_COEFn_SHIFT(j)) & S5P_COEFn_MASK(j);
+ writel(reg, regs + S5P_JPG_COEF(i));
+}
+
+static inline void jpeg_start(void __iomem *regs)
+{
+ writel(1, regs + S5P_JSTART);
+}
+
+static inline int jpeg_result_stat_ok(void __iomem *regs)
+{
+ return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
+ >> S5P_RESULT_STAT_SHIFT);
+}
+
+static inline int jpeg_stream_stat_ok(void __iomem *regs)
+{
+ return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
+ >> S5P_STREAM_STAT_SHIFT);
+}
+
+static inline void jpeg_clear_int(void __iomem *regs)
+{
+ unsigned long reg;
+
+ reg = readl(regs + S5P_JPGINTST);
+ writel(S5P_INT_RELEASE, regs + S5P_JPGCOM);
+ reg = readl(regs + S5P_JPGOPR);
+}
+
+static inline unsigned int jpeg_compressed_size(void __iomem *regs)
+{
+ unsigned long jpeg_size = 0;
+
+ jpeg_size |= (readl(regs + S5P_JPGCNT_U) & 0xff) << 16;
+ jpeg_size |= (readl(regs + S5P_JPGCNT_M) & 0xff) << 8;
+ jpeg_size |= (readl(regs + S5P_JPGCNT_L) & 0xff);
+
+ return (unsigned int)jpeg_size;
+}
+
+#endif /* JPEG_HW_H_ */
diff --git a/drivers/media/video/s5p-jpeg/jpeg-regs.h b/drivers/media/video/s5p-jpeg/jpeg-regs.h
new file mode 100644
index 000000000000..91f4dd5f86dd
--- /dev/null
+++ b/drivers/media/video/s5p-jpeg/jpeg-regs.h
@@ -0,0 +1,170 @@
+/* linux/drivers/media/video/s5p-jpeg/jpeg-regs.h
+ *
+ * Register definition file for Samsung JPEG codec driver
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef JPEG_REGS_H_
+#define JPEG_REGS_H_
+
+/* JPEG mode register */
+#define S5P_JPGMOD 0x00
+#define S5P_PROC_MODE_MASK (0x1 << 3)
+#define S5P_PROC_MODE_DECOMPR (0x1 << 3)
+#define S5P_PROC_MODE_COMPR (0x0 << 3)
+#define S5P_SUBSAMPLING_MODE_MASK 0x7
+#define S5P_SUBSAMPLING_MODE_444 (0x0 << 0)
+#define S5P_SUBSAMPLING_MODE_422 (0x1 << 0)
+#define S5P_SUBSAMPLING_MODE_420 (0x2 << 0)
+#define S5P_SUBSAMPLING_MODE_GRAY (0x3 << 0)
+
+/* JPEG operation status register */
+#define S5P_JPGOPR 0x04
+
+/* Quantization tables*/
+#define S5P_JPG_QTBL 0x08
+#define S5P_QT_NUMt_SHIFT(t) (((t) - 1) << 1)
+#define S5P_QT_NUMt_MASK(t) (0x3 << S5P_QT_NUMt_SHIFT(t))
+
+/* Huffman tables */
+#define S5P_JPG_HTBL 0x0c
+#define S5P_HT_NUMt_AC_SHIFT(t) (((t) << 1) - 1)
+#define S5P_HT_NUMt_AC_MASK(t) (0x1 << S5P_HT_NUMt_AC_SHIFT(t))
+
+#define S5P_HT_NUMt_DC_SHIFT(t) (((t) - 1) << 1)
+#define S5P_HT_NUMt_DC_MASK(t) (0x1 << S5P_HT_NUMt_DC_SHIFT(t))
+
+/* JPEG restart interval register upper byte */
+#define S5P_JPGDRI_U 0x10
+
+/* JPEG restart interval register lower byte */
+#define S5P_JPGDRI_L 0x14
+
+/* JPEG vertical resolution register upper byte */
+#define S5P_JPGY_U 0x18
+
+/* JPEG vertical resolution register lower byte */
+#define S5P_JPGY_L 0x1c
+
+/* JPEG horizontal resolution register upper byte */
+#define S5P_JPGX_U 0x20
+
+/* JPEG horizontal resolution register lower byte */
+#define S5P_JPGX_L 0x24
+
+/* JPEG byte count register upper byte */
+#define S5P_JPGCNT_U 0x28
+
+/* JPEG byte count register middle byte */
+#define S5P_JPGCNT_M 0x2c
+
+/* JPEG byte count register lower byte */
+#define S5P_JPGCNT_L 0x30
+
+/* JPEG interrupt setting register */
+#define S5P_JPGINTSE 0x34
+#define S5P_RSTm_INT_EN_MASK (0x1 << 7)
+#define S5P_RSTm_INT_EN (0x1 << 7)
+#define S5P_DATA_NUM_INT_EN_MASK (0x1 << 6)
+#define S5P_DATA_NUM_INT_EN (0x1 << 6)
+#define S5P_FINAL_MCU_NUM_INT_EN_MASK (0x1 << 5)
+#define S5P_FINAL_MCU_NUM_INT_EN (0x1 << 5)
+
+/* JPEG interrupt status register */
+#define S5P_JPGINTST 0x38
+#define S5P_RESULT_STAT_SHIFT 6
+#define S5P_RESULT_STAT_MASK (0x1 << S5P_RESULT_STAT_SHIFT)
+#define S5P_STREAM_STAT_SHIFT 5
+#define S5P_STREAM_STAT_MASK (0x1 << S5P_STREAM_STAT_SHIFT)
+
+/* JPEG command register */
+#define S5P_JPGCOM 0x4c
+#define S5P_INT_RELEASE (0x1 << 2)
+
+/* Raw image data r/w address register */
+#define S5P_JPG_IMGADR 0x50
+
+/* JPEG file r/w address register */
+#define S5P_JPG_JPGADR 0x58
+
+/* Coefficient for RGB-to-YCbCr converter register */
+#define S5P_JPG_COEF(n) (0x5c + (((n) - 1) << 2))
+#define S5P_COEFn_SHIFT(j) ((3 - (j)) << 3)
+#define S5P_COEFn_MASK(j) (0xff << S5P_COEFn_SHIFT(j))
+
+/* JPEG color mode register */
+#define S5P_JPGCMOD 0x68
+#define S5P_MOD_SEL_MASK (0x7 << 5)
+#define S5P_MOD_SEL_422 (0x1 << 5)
+#define S5P_MOD_SEL_565 (0x2 << 5)
+#define S5P_MODE_Y16_MASK (0x1 << 1)
+#define S5P_MODE_Y16 (0x1 << 1)
+
+/* JPEG clock control register */
+#define S5P_JPGCLKCON 0x6c
+#define S5P_CLK_DOWN_READY (0x1 << 1)
+#define S5P_POWER_ON (0x1 << 0)
+
+/* JPEG start register */
+#define S5P_JSTART 0x70
+
+/* JPEG SW reset register */
+#define S5P_JPG_SW_RESET 0x78
+
+/* JPEG timer setting register */
+#define S5P_JPG_TIMER_SE 0x7c
+#define S5P_TIMER_INT_EN_MASK (0x1 << 31)
+#define S5P_TIMER_INT_EN (0x1 << 31)
+#define S5P_TIMER_INIT_MASK 0x7fffffff
+
+/* JPEG timer status register */
+#define S5P_JPG_TIMER_ST 0x80
+#define S5P_TIMER_INT_STAT_SHIFT 31
+#define S5P_TIMER_INT_STAT_MASK (0x1 << S5P_TIMER_INT_STAT_SHIFT)
+#define S5P_TIMER_CNT_SHIFT 0
+#define S5P_TIMER_CNT_MASK 0x7fffffff
+
+/* JPEG decompression output format register */
+#define S5P_JPG_OUTFORM 0x88
+#define S5P_DEC_OUT_FORMAT_MASK (0x1 << 0)
+#define S5P_DEC_OUT_FORMAT_422 (0x0 << 0)
+#define S5P_DEC_OUT_FORMAT_420 (0x1 << 0)
+
+/* JPEG version register */
+#define S5P_JPG_VERSION 0x8c
+
+/* JPEG compressed stream size interrupt setting register */
+#define S5P_JPG_ENC_STREAM_INTSE 0x98
+#define S5P_ENC_STREAM_INT_MASK (0x1 << 24)
+#define S5P_ENC_STREAM_INT_EN (0x1 << 24)
+#define S5P_ENC_STREAM_BOUND_MASK 0xffffff
+
+/* JPEG compressed stream size interrupt status register */
+#define S5P_JPG_ENC_STREAM_INTST 0x9c
+#define S5P_ENC_STREAM_INT_STAT_MASK 0x1
+
+/* JPEG quantizer table register */
+#define S5P_JPG_QTBL_CONTENT(n) (0x400 + (n) * 0x100)
+
+/* JPEG DC Huffman table register */
+#define S5P_JPG_HDCTBL(n) (0x800 + (n) * 0x400)
+
+/* JPEG DC Huffman table register */
+#define S5P_JPG_HDCTBLG(n) (0x840 + (n) * 0x400)
+
+/* JPEG AC Huffman table register */
+#define S5P_JPG_HACTBL(n) (0x880 + (n) * 0x400)
+
+/* JPEG AC Huffman table register */
+#define S5P_JPG_HACTBLG(n) (0x8c0 + (n) * 0x400)
+
+#endif /* JPEG_REGS_H_ */
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index 8be8b54eb749..83fe461af263 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
ctx->mv_size = 0;
}
ctx->dpb_count = s5p_mfc_get_dpb_count();
- if (ctx->img_width == 0 || ctx->img_width == 0)
+ if (ctx->img_width == 0 || ctx->img_height == 0)
ctx->state = MFCINST_ERROR;
else
ctx->state = MFCINST_HEAD_PARSED;
@@ -1245,27 +1244,7 @@ static struct platform_driver s5p_mfc_driver = {
},
};
-static char banner[] __initdata =
- "S5P MFC V4L2 Driver, (C) 2011 Samsung Electronics\n";
-
-static int __init s5p_mfc_init(void)
-{
- int ret;
-
- pr_info("%s", banner);
- ret = platform_driver_register(&s5p_mfc_driver);
- if (ret)
- pr_err("Platform device registration failed.\n");
- return ret;
-}
-
-static void __exit s5p_mfc_exit(void)
-{
- platform_driver_unregister(&s5p_mfc_driver);
-}
-
-module_init(s5p_mfc_init);
-module_exit(s5p_mfc_exit);
+module_platform_driver(s5p_mfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 844a4d7797bc..c25ec022d267 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
.maximum = 32,
.step = 1,
.default_value = 1,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
+ .is_volatile = 1,
},
};
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 1e8cdb77d4b8..dff9dc798795 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
.num_planes = 1,
},
{
- .name = "H264 Encoded Stream",
+ .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_FIMV_CODEC_H263_ENC,
.type = MFC_FMT_ENC,
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
index 0279e6e89feb..8b41a0410ab1 100644
--- a/drivers/media/video/s5p-tv/hdmi_drv.c
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -838,8 +838,8 @@ static int hdmi_resources_init(struct hdmi_device *hdev)
dev_err(dev, "failed to get clock 'hdmiphy'\n");
goto fail;
}
- res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
- sizeof res->regul_bulk[0], GFP_KERNEL);
+ res->regul_bulk = kcalloc(ARRAY_SIZE(supply),
+ sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
dev_err(dev, "failed to get memory for regulators\n");
goto fail;
@@ -1016,28 +1016,4 @@ static struct platform_driver hdmi_driver __refdata = {
}
};
-/* D R I V E R I N I T I A L I Z A T I O N */
-
-static int __init hdmi_init(void)
-{
- int ret;
- static const char banner[] __initdata = KERN_INFO \
- "Samsung HDMI output driver, "
- "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
- printk(banner);
-
- ret = platform_driver_register(&hdmi_driver);
- if (ret)
- printk(KERN_ERR "HDMI platform driver register failed\n");
-
- return ret;
-}
-module_init(hdmi_init);
-
-static void __exit hdmi_exit(void)
-{
- platform_driver_unregister(&hdmi_driver);
-}
-module_exit(hdmi_exit);
-
-
+module_platform_driver(hdmi_driver);
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
index 51ad59b30358..1597078c4a50 100644
--- a/drivers/media/video/s5p-tv/mixer.h
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -86,6 +86,17 @@ struct mxr_crop {
unsigned int field;
};
+/** stages of geometry operations */
+enum mxr_geometry_stage {
+ MXR_GEOMETRY_SINK,
+ MXR_GEOMETRY_COMPOSE,
+ MXR_GEOMETRY_CROP,
+ MXR_GEOMETRY_SOURCE,
+};
+
+/* flag indicating that offset should be 0 */
+#define MXR_NO_OFFSET 0x80000000
+
/** description of transformation from source to destination image */
struct mxr_geometry {
/** cropping for source image */
@@ -133,7 +144,8 @@ struct mxr_layer_ops {
/** streaming stop/start */
void (*stream_set)(struct mxr_layer *, int);
/** adjusting geometry */
- void (*fix_geometry)(struct mxr_layer *);
+ void (*fix_geometry)(struct mxr_layer *,
+ enum mxr_geometry_stage, unsigned long);
};
/** layer instance, a single window and content displayed on output */
diff --git a/drivers/media/video/s5p-tv/mixer_grp_layer.c b/drivers/media/video/s5p-tv/mixer_grp_layer.c
index de8270c2b6e7..b93a21f5aa13 100644
--- a/drivers/media/video/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/video/s5p-tv/mixer_grp_layer.c
@@ -101,47 +101,132 @@ static void mxr_graph_format_set(struct mxr_layer *layer)
layer->fmt, &layer->geo);
}
-static void mxr_graph_fix_geometry(struct mxr_layer *layer)
+static inline unsigned int closest(unsigned int x, unsigned int a,
+ unsigned int b, unsigned long flags)
+{
+ unsigned int mid = (a + b) / 2;
+
+ /* choosing closest value with constraints according to table:
+ * -------------+-----+-----+-----+-------+
+ * flags | 0 | LE | GE | LE|GE |
+ * -------------+-----+-----+-----+-------+
+ * x <= a | a | a | a | a |
+ * a < x <= mid | a | a | b | a |
+ * mid < x < b | b | a | b | b |
+ * b <= x | b | b | b | b |
+ * -------------+-----+-----+-----+-------+
+ */
+
+ /* remove all non-constraint flags */
+ flags &= V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE;
+
+ if (x <= a)
+ return a;
+ if (x >= b)
+ return b;
+ if (flags == V4L2_SEL_FLAG_LE)
+ return a;
+ if (flags == V4L2_SEL_FLAG_GE)
+ return b;
+ if (x <= mid)
+ return a;
+ return b;
+}
+
+static inline unsigned int do_center(unsigned int center,
+ unsigned int size, unsigned int upper, unsigned int flags)
+{
+ unsigned int lower;
+
+ if (flags & MXR_NO_OFFSET)
+ return 0;
+
+ lower = center - min(center, size / 2);
+ return min(lower, upper - size);
+}
+
+static void mxr_graph_fix_geometry(struct mxr_layer *layer,
+ enum mxr_geometry_stage stage, unsigned long flags)
{
struct mxr_geometry *geo = &layer->geo;
+ struct mxr_crop *src = &geo->src;
+ struct mxr_crop *dst = &geo->dst;
+ unsigned int x_center, y_center;
- /* limit to boundary size */
- geo->src.full_width = clamp_val(geo->src.full_width, 1, 32767);
- geo->src.full_height = clamp_val(geo->src.full_height, 1, 2047);
- geo->src.width = clamp_val(geo->src.width, 1, geo->src.full_width);
- geo->src.width = min(geo->src.width, 2047U);
- /* not possible to crop of Y axis */
- geo->src.y_offset = min(geo->src.y_offset, geo->src.full_height - 1);
- geo->src.height = geo->src.full_height - geo->src.y_offset;
- /* limitting offset */
- geo->src.x_offset = min(geo->src.x_offset,
- geo->src.full_width - geo->src.width);
-
- /* setting position in output */
- geo->dst.width = min(geo->dst.width, geo->dst.full_width);
- geo->dst.height = min(geo->dst.height, geo->dst.full_height);
-
- /* Mixer supports only 1x and 2x scaling */
- if (geo->dst.width >= 2 * geo->src.width) {
- geo->x_ratio = 1;
- geo->dst.width = 2 * geo->src.width;
- } else {
- geo->x_ratio = 0;
- geo->dst.width = geo->src.width;
- }
+ switch (stage) {
- if (geo->dst.height >= 2 * geo->src.height) {
- geo->y_ratio = 1;
- geo->dst.height = 2 * geo->src.height;
- } else {
- geo->y_ratio = 0;
- geo->dst.height = geo->src.height;
- }
+ case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
+ flags = 0;
+ /* fall through */
+
+ case MXR_GEOMETRY_COMPOSE:
+ /* remember center of the area */
+ x_center = dst->x_offset + dst->width / 2;
+ y_center = dst->y_offset + dst->height / 2;
+ /* round up/down to 2 multiple depending on flags */
+ if (flags & V4L2_SEL_FLAG_LE) {
+ dst->width = round_down(dst->width, 2);
+ dst->height = round_down(dst->height, 2);
+ } else {
+ dst->width = round_up(dst->width, 2);
+ dst->height = round_up(dst->height, 2);
+ }
+ /* assure that compose rect is inside display area */
+ dst->width = min(dst->width, dst->full_width);
+ dst->height = min(dst->height, dst->full_height);
+
+ /* ensure that compose is reachable using 2x scaling */
+ dst->width = min(dst->width, 2 * src->full_width);
+ dst->height = min(dst->height, 2 * src->full_height);
+
+ /* setup offsets */
+ dst->x_offset = do_center(x_center, dst->width,
+ dst->full_width, flags);
+ dst->y_offset = do_center(y_center, dst->height,
+ dst->full_height, flags);
+ flags = 0;
+ /* fall through */
- geo->dst.x_offset = min(geo->dst.x_offset,
- geo->dst.full_width - geo->dst.width);
- geo->dst.y_offset = min(geo->dst.y_offset,
- geo->dst.full_height - geo->dst.height);
+ case MXR_GEOMETRY_CROP:
+ /* remember center of the area */
+ x_center = src->x_offset + src->width / 2;
+ y_center = src->y_offset + src->height / 2;
+ /* ensure that cropping area lies inside the buffer */
+ if (src->full_width < dst->width)
+ src->width = dst->width / 2;
+ else
+ src->width = closest(src->width, dst->width / 2,
+ dst->width, flags);
+
+ if (src->width == dst->width)
+ geo->x_ratio = 0;
+ else
+ geo->x_ratio = 1;
+
+ if (src->full_height < dst->height)
+ src->height = dst->height / 2;
+ else
+ src->height = closest(src->height, dst->height / 2,
+ dst->height, flags);
+
+ if (src->height == dst->height)
+ geo->y_ratio = 0;
+ else
+ geo->y_ratio = 1;
+
+ /* setup offsets */
+ src->x_offset = do_center(x_center, src->width,
+ src->full_width, flags);
+ src->y_offset = do_center(y_center, src->height,
+ src->full_height, flags);
+ flags = 0;
+ /* fall through */
+ case MXR_GEOMETRY_SOURCE:
+ src->full_width = clamp_val(src->full_width,
+ src->width + src->x_offset, 32767);
+ src->full_height = clamp_val(src->full_height,
+ src->height + src->y_offset, 2047);
+ };
}
/* PUBLIC API */
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index e16d3a4bc1dc..7884baeff76a 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
@@ -169,18 +170,22 @@ static int mxr_querycap(struct file *file, void *priv,
return 0;
}
-/* Geometry handling */
-static void mxr_layer_geo_fix(struct mxr_layer *layer)
+static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
{
- struct mxr_device *mdev = layer->mdev;
- struct v4l2_mbus_framefmt mbus_fmt;
-
- /* TODO: add some dirty flag to avoid unnecessary adjustments */
- mxr_get_mbus_fmt(mdev, &mbus_fmt);
- layer->geo.dst.full_width = mbus_fmt.width;
- layer->geo.dst.full_height = mbus_fmt.height;
- layer->geo.dst.field = mbus_fmt.field;
- layer->ops.fix_geometry(layer);
+ mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
+ geo->src.full_width, geo->src.full_height);
+ mxr_dbg(mdev, "src.size = (%u, %u)\n",
+ geo->src.width, geo->src.height);
+ mxr_dbg(mdev, "src.offset = (%u, %u)\n",
+ geo->src.x_offset, geo->src.y_offset);
+ mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
+ geo->dst.full_width, geo->dst.full_height);
+ mxr_dbg(mdev, "dst.size = (%u, %u)\n",
+ geo->dst.width, geo->dst.height);
+ mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
+ geo->dst.x_offset, geo->dst.y_offset);
+ mxr_dbg(mdev, "ratio = (%u, %u)\n",
+ geo->x_ratio, geo->y_ratio);
}
static void mxr_layer_default_geo(struct mxr_layer *layer)
@@ -203,27 +208,29 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
layer->geo.src.width = layer->geo.src.full_width;
layer->geo.src.height = layer->geo.src.full_height;
- layer->ops.fix_geometry(layer);
+ mxr_geometry_dump(mdev, &layer->geo);
+ layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+ mxr_geometry_dump(mdev, &layer->geo);
}
-static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
+static void mxr_layer_update_output(struct mxr_layer *layer)
{
- mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
- geo->src.full_width, geo->src.full_height);
- mxr_dbg(mdev, "src.size = (%u, %u)\n",
- geo->src.width, geo->src.height);
- mxr_dbg(mdev, "src.offset = (%u, %u)\n",
- geo->src.x_offset, geo->src.y_offset);
- mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
- geo->dst.full_width, geo->dst.full_height);
- mxr_dbg(mdev, "dst.size = (%u, %u)\n",
- geo->dst.width, geo->dst.height);
- mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
- geo->dst.x_offset, geo->dst.y_offset);
- mxr_dbg(mdev, "ratio = (%u, %u)\n",
- geo->x_ratio, geo->y_ratio);
-}
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+
+ mxr_get_mbus_fmt(mdev, &mbus_fmt);
+ /* checking if update is needed */
+ if (layer->geo.dst.full_width == mbus_fmt.width &&
+ layer->geo.dst.full_height == mbus_fmt.width)
+ return;
+ layer->geo.dst.full_width = mbus_fmt.width;
+ layer->geo.dst.full_height = mbus_fmt.height;
+ layer->geo.dst.field = mbus_fmt.field;
+ layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+
+ mxr_geometry_dump(mdev, &layer->geo);
+}
static const struct mxr_format *find_format_by_fourcc(
struct mxr_layer *layer, unsigned long fourcc);
@@ -248,37 +255,6 @@ static int mxr_enum_fmt(struct file *file, void *priv,
return 0;
}
-static int mxr_s_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct mxr_layer *layer = video_drvdata(file);
- const struct mxr_format *fmt;
- struct v4l2_pix_format_mplane *pix;
- struct mxr_device *mdev = layer->mdev;
- struct mxr_geometry *geo = &layer->geo;
-
- mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
-
- pix = &f->fmt.pix_mp;
- fmt = find_format_by_fourcc(layer, pix->pixelformat);
- if (fmt == NULL) {
- mxr_warn(mdev, "not recognized fourcc: %08x\n",
- pix->pixelformat);
- return -EINVAL;
- }
- layer->fmt = fmt;
- geo->src.full_width = pix->width;
- geo->src.width = pix->width;
- geo->src.full_height = pix->height;
- geo->src.height = pix->height;
- /* assure consistency of geometry */
- mxr_layer_geo_fix(layer);
- mxr_dbg(mdev, "width=%u height=%u span=%u\n",
- geo->src.width, geo->src.height, geo->src.full_width);
-
- return 0;
-}
-
static unsigned int divup(unsigned int divident, unsigned int divisor)
{
return (divident + divisor - 1) / divisor;
@@ -298,6 +274,10 @@ static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
{
int i;
+ /* checking if nothing to fill */
+ if (!planes)
+ return;
+
memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
for (i = 0; i < fmt->num_planes; ++i) {
struct v4l2_plane_pix_format *plane = planes
@@ -331,73 +311,194 @@ static int mxr_g_fmt(struct file *file, void *priv,
return 0;
}
-static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
- enum v4l2_buf_type type)
-{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return &geo->dst;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- return &geo->src;
- default:
- return NULL;
- }
-}
-
-static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+static int mxr_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct mxr_layer *layer = video_drvdata(file);
- struct mxr_crop *crop;
+ const struct mxr_format *fmt;
+ struct v4l2_pix_format_mplane *pix;
+ struct mxr_device *mdev = layer->mdev;
+ struct mxr_geometry *geo = &layer->geo;
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- crop = choose_crop_by_type(&layer->geo, a->type);
- if (crop == NULL)
+ mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+
+ pix = &f->fmt.pix_mp;
+ fmt = find_format_by_fourcc(layer, pix->pixelformat);
+ if (fmt == NULL) {
+ mxr_warn(mdev, "not recognized fourcc: %08x\n",
+ pix->pixelformat);
return -EINVAL;
- mxr_layer_geo_fix(layer);
- a->c.left = crop->x_offset;
- a->c.top = crop->y_offset;
- a->c.width = crop->width;
- a->c.height = crop->height;
+ }
+ layer->fmt = fmt;
+ /* set source size to highest accepted value */
+ geo->src.full_width = max(geo->dst.full_width, pix->width);
+ geo->src.full_height = max(geo->dst.full_height, pix->height);
+ layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+ mxr_geometry_dump(mdev, &layer->geo);
+ /* set cropping to total visible screen */
+ geo->src.width = pix->width;
+ geo->src.height = pix->height;
+ geo->src.x_offset = 0;
+ geo->src.y_offset = 0;
+ /* assure consistency of geometry */
+ layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
+ mxr_geometry_dump(mdev, &layer->geo);
+ /* set full size to lowest possible value */
+ geo->src.full_width = 0;
+ geo->src.full_height = 0;
+ layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+ mxr_geometry_dump(mdev, &layer->geo);
+
+ /* returning results */
+ mxr_g_fmt(file, priv, f);
+
return 0;
}
-static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+static int mxr_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
{
struct mxr_layer *layer = video_drvdata(file);
- struct mxr_crop *crop;
+ struct mxr_geometry *geo = &layer->geo;
mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- crop = choose_crop_by_type(&layer->geo, a->type);
- if (crop == NULL)
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_ACTIVE:
+ s->r.left = geo->src.x_offset;
+ s->r.top = geo->src.y_offset;
+ s->r.width = geo->src.width;
+ s->r.height = geo->src.height;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = geo->src.full_width;
+ s->r.height = geo->src.full_height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.left = geo->dst.x_offset;
+ s->r.top = geo->dst.y_offset;
+ s->r.width = geo->dst.width;
+ s->r.height = geo->dst.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = geo->dst.full_width;
+ s->r.height = geo->dst.full_height;
+ break;
+ default:
return -EINVAL;
- crop->x_offset = a->c.left;
- crop->y_offset = a->c.top;
- crop->width = a->c.width;
- crop->height = a->c.height;
- mxr_layer_geo_fix(layer);
+ }
+
return 0;
}
-static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
+/* returns 1 if rectangle 'a' is inside 'b' */
+static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left)
+ return 0;
+ if (a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+ return 1;
+}
+
+static int mxr_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
{
struct mxr_layer *layer = video_drvdata(file);
- struct mxr_crop *crop;
+ struct mxr_geometry *geo = &layer->geo;
+ struct mxr_crop *target = NULL;
+ enum mxr_geometry_stage stage;
+ struct mxr_geometry tmp;
+ struct v4l2_rect res;
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- crop = choose_crop_by_type(&layer->geo, a->type);
- if (crop == NULL)
+ memset(&res, 0, sizeof res);
+
+ mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
+ s->r.width, s->r.height, s->r.left, s->r.top);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
- mxr_layer_geo_fix(layer);
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = crop->full_width;
- a->bounds.top = crop->full_height;
- a->defrect = a->bounds;
- /* setting pixel aspect to 1/1 */
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+
+ switch (s->target) {
+ /* ignore read-only targets */
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ res.width = geo->src.full_width;
+ res.height = geo->src.full_height;
+ break;
+
+ /* ignore read-only targets */
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ res.width = geo->dst.full_width;
+ res.height = geo->dst.full_height;
+ break;
+
+ case V4L2_SEL_TGT_CROP_ACTIVE:
+ target = &geo->src;
+ stage = MXR_GEOMETRY_CROP;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ target = &geo->dst;
+ stage = MXR_GEOMETRY_COMPOSE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* apply change and update geometry if needed */
+ if (target) {
+ /* backup current geometry if setup fails */
+ memcpy(&tmp, geo, sizeof tmp);
+
+ /* apply requested selection */
+ target->x_offset = s->r.left;
+ target->y_offset = s->r.top;
+ target->width = s->r.width;
+ target->height = s->r.height;
+
+ layer->ops.fix_geometry(layer, stage, s->flags);
+
+ /* retrieve update selection rectangle */
+ res.left = target->x_offset;
+ res.top = target->y_offset;
+ res.width = target->width;
+ res.height = target->height;
+
+ mxr_geometry_dump(layer->mdev, &layer->geo);
+ }
+
+ /* checking if the rectangle satisfies constraints */
+ if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
+ goto fail;
+ if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
+ goto fail;
+
+ /* return result rectangle */
+ s->r = res;
+
return 0;
+fail:
+ /* restore old geometry, which is not touched if target is NULL */
+ if (target)
+ memcpy(geo, &tmp, sizeof tmp);
+ return -ERANGE;
}
static int mxr_enum_dv_presets(struct file *file, void *fh,
@@ -437,6 +538,8 @@ static int mxr_s_dv_preset(struct file *file, void *fh,
mutex_unlock(&mdev->mutex);
+ mxr_layer_update_output(layer);
+
/* any failure should return EINVAL according to V4L2 doc */
return ret ? -EINVAL : 0;
}
@@ -477,6 +580,8 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
mutex_unlock(&mdev->mutex);
+ mxr_layer_update_output(layer);
+
return ret ? -EINVAL : 0;
}
@@ -525,25 +630,27 @@ static int mxr_s_output(struct file *file, void *fh, unsigned int i)
struct video_device *vfd = video_devdata(file);
struct mxr_layer *layer = video_drvdata(file);
struct mxr_device *mdev = layer->mdev;
- int ret = 0;
if (i >= mdev->output_cnt || mdev->output[i] == NULL)
return -EINVAL;
mutex_lock(&mdev->mutex);
if (mdev->n_output > 0) {
- ret = -EBUSY;
- goto done;
+ mutex_unlock(&mdev->mutex);
+ return -EBUSY;
}
mdev->current_output = i;
vfd->tvnorms = 0;
v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
&vfd->tvnorms);
+ mutex_unlock(&mdev->mutex);
+
+ /* update layers geometry */
+ mxr_layer_update_output(layer);
+
mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
-done:
- mutex_unlock(&mdev->mutex);
- return ret;
+ return 0;
}
static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
@@ -632,10 +739,9 @@ static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
.vidioc_enum_output = mxr_enum_output,
.vidioc_s_output = mxr_s_output,
.vidioc_g_output = mxr_g_output,
- /* Crop ioctls */
- .vidioc_g_crop = mxr_g_crop,
- .vidioc_s_crop = mxr_s_crop,
- .vidioc_cropcap = mxr_cropcap,
+ /* selection ioctls */
+ .vidioc_g_selection = mxr_g_selection,
+ .vidioc_s_selection = mxr_s_selection,
};
static int mxr_video_open(struct file *file)
@@ -804,10 +910,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* block any changes in output configuration */
mxr_output_get(mdev);
- /* update layers geometry */
- mxr_layer_geo_fix(layer);
- mxr_geometry_dump(mdev, &layer->geo);
-
+ mxr_layer_update_output(layer);
layer->ops.format_set(layer);
/* enabling layer in hardware */
spin_lock_irqsave(&layer->enq_slock, flags);
diff --git a/drivers/media/video/s5p-tv/mixer_vp_layer.c b/drivers/media/video/s5p-tv/mixer_vp_layer.c
index f3bb2e34cb51..3d13a636877b 100644
--- a/drivers/media/video/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/video/s5p-tv/mixer_vp_layer.c
@@ -127,47 +127,77 @@ static void mxr_vp_format_set(struct mxr_layer *layer)
mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
}
-static void mxr_vp_fix_geometry(struct mxr_layer *layer)
+static inline unsigned int do_center(unsigned int center,
+ unsigned int size, unsigned int upper, unsigned int flags)
{
- struct mxr_geometry *geo = &layer->geo;
+ unsigned int lower;
+
+ if (flags & MXR_NO_OFFSET)
+ return 0;
+
+ lower = center - min(center, size / 2);
+ return min(lower, upper - size);
+}
- /* align horizontal size to 8 pixels */
- geo->src.full_width = ALIGN(geo->src.full_width, 8);
- /* limit to boundary size */
- geo->src.full_width = clamp_val(geo->src.full_width, 8, 8192);
- geo->src.full_height = clamp_val(geo->src.full_height, 1, 8192);
- geo->src.width = clamp_val(geo->src.width, 32, geo->src.full_width);
- geo->src.width = min(geo->src.width, 2047U);
- geo->src.height = clamp_val(geo->src.height, 4, geo->src.full_height);
- geo->src.height = min(geo->src.height, 2047U);
-
- /* setting size of output window */
- geo->dst.width = clamp_val(geo->dst.width, 8, geo->dst.full_width);
- geo->dst.height = clamp_val(geo->dst.height, 1, geo->dst.full_height);
-
- /* ensure that scaling is in range 1/4x to 16x */
- if (geo->src.width >= 4 * geo->dst.width)
- geo->src.width = 4 * geo->dst.width;
- if (geo->dst.width >= 16 * geo->src.width)
- geo->dst.width = 16 * geo->src.width;
- if (geo->src.height >= 4 * geo->dst.height)
- geo->src.height = 4 * geo->dst.height;
- if (geo->dst.height >= 16 * geo->src.height)
- geo->dst.height = 16 * geo->src.height;
-
- /* setting scaling ratio */
- geo->x_ratio = (geo->src.width << 16) / geo->dst.width;
- geo->y_ratio = (geo->src.height << 16) / geo->dst.height;
-
- /* adjust offsets */
- geo->src.x_offset = min(geo->src.x_offset,
- geo->src.full_width - geo->src.width);
- geo->src.y_offset = min(geo->src.y_offset,
- geo->src.full_height - geo->src.height);
- geo->dst.x_offset = min(geo->dst.x_offset,
- geo->dst.full_width - geo->dst.width);
- geo->dst.y_offset = min(geo->dst.y_offset,
- geo->dst.full_height - geo->dst.height);
+static void mxr_vp_fix_geometry(struct mxr_layer *layer,
+ enum mxr_geometry_stage stage, unsigned long flags)
+{
+ struct mxr_geometry *geo = &layer->geo;
+ struct mxr_crop *src = &geo->src;
+ struct mxr_crop *dst = &geo->dst;
+ unsigned long x_center, y_center;
+
+ switch (stage) {
+
+ case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
+ case MXR_GEOMETRY_COMPOSE:
+ /* remember center of the area */
+ x_center = dst->x_offset + dst->width / 2;
+ y_center = dst->y_offset + dst->height / 2;
+
+ /* ensure that compose is reachable using 16x scaling */
+ dst->width = clamp(dst->width, 8U, 16 * src->full_width);
+ dst->height = clamp(dst->height, 1U, 16 * src->full_height);
+
+ /* setup offsets */
+ dst->x_offset = do_center(x_center, dst->width,
+ dst->full_width, flags);
+ dst->y_offset = do_center(y_center, dst->height,
+ dst->full_height, flags);
+ flags = 0; /* remove possible MXR_NO_OFFSET flag */
+ /* fall through */
+ case MXR_GEOMETRY_CROP:
+ /* remember center of the area */
+ x_center = src->x_offset + src->width / 2;
+ y_center = src->y_offset + src->height / 2;
+
+ /* ensure scaling is between 0.25x .. 16x */
+ src->width = clamp(src->width, round_up(dst->width / 16, 4),
+ dst->width * 4);
+ src->height = clamp(src->height, round_up(dst->height / 16, 4),
+ dst->height * 4);
+
+ /* hardware limits */
+ src->width = clamp(src->width, 32U, 2047U);
+ src->height = clamp(src->height, 4U, 2047U);
+
+ /* setup offsets */
+ src->x_offset = do_center(x_center, src->width,
+ src->full_width, flags);
+ src->y_offset = do_center(y_center, src->height,
+ src->full_height, flags);
+
+ /* setting scaling ratio */
+ geo->x_ratio = (src->width << 16) / dst->width;
+ geo->y_ratio = (src->height << 16) / dst->height;
+ /* fall through */
+
+ case MXR_GEOMETRY_SOURCE:
+ src->full_width = clamp(src->full_width,
+ ALIGN(src->width + src->x_offset, 8), 8192U);
+ src->full_height = clamp(src->full_height,
+ src->height + src->y_offset, 8192U);
+ };
}
/* PUBLIC API */
diff --git a/drivers/media/video/s5p-tv/sdo_drv.c b/drivers/media/video/s5p-tv/sdo_drv.c
index 8cec67ef48c9..059e7749ce95 100644
--- a/drivers/media/video/s5p-tv/sdo_drv.c
+++ b/drivers/media/video/s5p-tv/sdo_drv.c
@@ -457,24 +457,4 @@ static struct platform_driver sdo_driver __refdata = {
}
};
-static int __init sdo_init(void)
-{
- int ret;
- static const char banner[] __initdata = KERN_INFO \
- "Samsung Standard Definition Output (SDO) driver, "
- "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
- printk(banner);
-
- ret = platform_driver_register(&sdo_driver);
- if (ret)
- printk(KERN_ERR "SDO platform driver register failed\n");
-
- return ret;
-}
-module_init(sdo_init);
-
-static void __exit sdo_exit(void)
-{
- platform_driver_unregister(&sdo_driver);
-}
-module_exit(sdo_exit);
+module_platform_driver(sdo_driver);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 5cfdbc78b918..0ef5484696b6 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -57,7 +57,7 @@ MODULE_AUTHOR( "Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, "
"Hans Verkuil, Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 0f9fb99adeb4..065d0f6be4a0 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5691,6 +5691,27 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE1,
},
},
+ [SAA7134_BOARD_SENSORAY811_911] = {
+ .name = "Sensoray 811/911",
+ .audio_clock = 0x00200000,
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .inputs = {{
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_comp3,
+ .vmux = 2,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ } },
+ },
};
@@ -6914,6 +6935,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0xd136,
.driver_data = SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x6000,
+ .subdevice = 0x0811,
+ .driver_data = SAA7134_BOARD_SENSORAY811_911,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x6000,
+ .subdevice = 0x0911,
+ .driver_data = SAA7134_BOARD_SENSORAY811_911,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index ca65cda3e101..5fbb4e49495c 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -1263,7 +1263,6 @@ static int saa7134_resume(struct pci_dev *pci_dev)
saa7134_tvaudio_setmute(dev);
saa7134_tvaudio_setvolume(dev, dev->ctl_volume);
saa7134_tvaudio_init(dev);
- saa7134_tvaudio_do_scan(dev);
saa7134_enable_i2s(dev);
saa7134_hw_enable2(dev);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 1e4ef1669887..089fa0fb5c94 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -183,9 +183,9 @@ static int mt352_avermedia_xc3028_init(struct dvb_frontend *fe)
return 0;
}
-static int mt352_pinnacle_tuner_set_params(struct dvb_frontend* fe,
- struct dvb_frontend_parameters* params)
+static int mt352_pinnacle_tuner_set_params(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 off[] = { 0x00, 0xf1};
u8 on[] = { 0x00, 0x71};
struct i2c_msg msg = {.addr=0x43, .flags=0, .buf=off, .len = sizeof(off)};
@@ -196,7 +196,7 @@ static int mt352_pinnacle_tuner_set_params(struct dvb_frontend* fe,
/* set frequency (mt2050) */
f.tuner = 0;
f.type = V4L2_TUNER_DIGITAL_TV;
- f.frequency = params->frequency / 1000 * 16 / 1000;
+ f.frequency = c->frequency / 1000 * 16 / 1000;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
i2c_transfer(&dev->i2c_adap, &msg, 1);
@@ -287,8 +287,9 @@ static int philips_tda1004x_request_firmware(struct dvb_frontend *fe,
* these tuners are tu1216, td1316(a)
*/
-static int philips_tda6651_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int philips_tda6651_pll_set(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct saa7134_dev *dev = fe->dvb->priv;
struct tda1004x_state *state = fe->demodulator_priv;
u8 addr = state->config->tuner_address;
@@ -299,7 +300,7 @@ static int philips_tda6651_pll_set(struct dvb_frontend *fe, struct dvb_frontend_
u8 band, cp, filter;
/* determine charge pump */
- tuner_frequency = params->frequency + 36166000;
+ tuner_frequency = c->frequency + 36166000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
@@ -324,28 +325,28 @@ static int philips_tda6651_pll_set(struct dvb_frontend *fe, struct dvb_frontend_
return -EINVAL;
/* determine band */
- if (params->frequency < 49000000)
+ if (c->frequency < 49000000)
return -EINVAL;
- else if (params->frequency < 161000000)
+ else if (c->frequency < 161000000)
band = 1;
- else if (params->frequency < 444000000)
+ else if (c->frequency < 444000000)
band = 2;
- else if (params->frequency < 861000000)
+ else if (c->frequency < 861000000)
band = 4;
else
return -EINVAL;
/* setup PLL filter */
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_6_MHZ:
+ switch (c->bandwidth_hz) {
+ case 6000000:
filter = 0;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
filter = 0;
break;
- case BANDWIDTH_8_MHZ:
+ case 8000000:
filter = 1;
break;
@@ -356,7 +357,7 @@ static int philips_tda6651_pll_set(struct dvb_frontend *fe, struct dvb_frontend_
/* calculate divisor
* ((36166000+((1000000/6)/2)) + Finput)/(1000000/6)
*/
- tuner_frequency = (((params->frequency / 1000) * 6) + 217496) / 1000;
+ tuner_frequency = (((c->frequency / 1000) * 6) + 217496) / 1000;
/* setup tuner buffer */
tuner_buf[0] = (tuner_frequency >> 8) & 0x7f;
@@ -436,9 +437,9 @@ static int philips_td1316_tuner_init(struct dvb_frontend *fe)
return 0;
}
-static int philips_td1316_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+static int philips_td1316_tuner_set_params(struct dvb_frontend *fe)
{
- return philips_tda6651_pll_set(fe, params);
+ return philips_tda6651_pll_set(fe);
}
static int philips_td1316_tuner_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index d4ee24bf6928..22ecd7297d2d 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -235,22 +235,25 @@ static int get_key_purpletv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
static int get_key_hvr1110(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
- unsigned char buf[5], cod4, code3, code4;
+ unsigned char buf[5];
/* poll IR chip */
if (5 != i2c_master_recv(ir->c, buf, 5))
return -EIO;
- cod4 = buf[4];
- code4 = (cod4 >> 2);
- code3 = buf[3];
- if (code3 == 0)
- /* no key pressed */
+ /* Check if some key were pressed */
+ if (!(buf[0] & 0x80))
return 0;
- /* return key */
- *ir_key = code4;
- *ir_raw = code4;
+ /*
+ * buf[3] & 0x80 is always high.
+ * buf[3] & 0x40 is a parity bit. A repeat event is marked
+ * by preserving it into two separate readings
+ * buf[4] bits 0 and 1, and buf[1] and buf[2] are always
+ * zero.
+ */
+ *ir_key = 0x1fff & ((buf[3] << 8) | (buf[4] >> 2));
+ *ir_raw = *ir_key;
return 1;
}
@@ -752,7 +755,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
polling = 50; /* ms */
break;
case SAA7134_BOARD_VIDEOMATE_M1F:
- ir_codes = RC_MAP_VIDEOMATE_M1F;
+ ir_codes = RC_MAP_VIDEOMATE_K100;
mask_keycode = 0x0ff00;
mask_keyup = 0x040000;
break;
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index 57e646bb48b3..b7a99bee2f98 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -332,6 +332,13 @@ static int tvaudio_checkcarrier(struct saa7134_dev *dev, struct mainscan *scan)
{
__s32 left,right,value;
+ if (!(dev->tvnorm->id & scan->std)) {
+ value = 0;
+ dprintk("skipping %d.%03d MHz [%4s]\n",
+ scan->carr / 1000, scan->carr % 1000, scan->name);
+ return 0;
+ }
+
if (audio_debug > 1) {
int i;
dprintk("debug %d:",scan->carr);
@@ -348,30 +355,25 @@ static int tvaudio_checkcarrier(struct saa7134_dev *dev, struct mainscan *scan)
}
printk("\n");
}
- if (dev->tvnorm->id & scan->std) {
- tvaudio_setcarrier(dev,scan->carr-90,scan->carr-90);
- saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
- if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
- return -1;
- left = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
-
- tvaudio_setcarrier(dev,scan->carr+90,scan->carr+90);
- saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
- if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
- return -1;
- right = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
-
- left >>= 16;
- right >>= 16;
- value = left > right ? left - right : right - left;
- dprintk("scanning %d.%03d MHz [%4s] => dc is %5d [%d/%d]\n",
- scan->carr / 1000, scan->carr % 1000,
- scan->name, value, left, right);
- } else {
- value = 0;
- dprintk("skipping %d.%03d MHz [%4s]\n",
- scan->carr / 1000, scan->carr % 1000, scan->name);
- }
+
+ tvaudio_setcarrier(dev,scan->carr-90,scan->carr-90);
+ saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
+ if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
+ return -1;
+ left = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
+
+ tvaudio_setcarrier(dev,scan->carr+90,scan->carr+90);
+ saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
+ if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
+ return -1;
+ right = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
+
+ left >>= 16;
+ right >>= 16;
+ value = left > right ? left - right : right - left;
+ dprintk("scanning %d.%03d MHz [%4s] => dc is %5d [%d/%d]\n",
+ scan->carr / 1000, scan->carr % 1000,
+ scan->name, value, left, right);
return value;
}
@@ -546,6 +548,7 @@ static int tvaudio_thread(void *data)
dev->tvnorm->name, carrier/1000, carrier%1000,
max1, max2);
dev->last_carrier = carrier;
+ dev->automute = 0;
} else if (0 != dev->last_carrier) {
/* no carrier -- try last detected one as fallback */
@@ -553,6 +556,7 @@ static int tvaudio_thread(void *data)
dprintk("audio carrier scan failed, "
"using %d.%03d MHz [last detected]\n",
carrier/1000, carrier%1000);
+ dev->automute = 1;
} else {
/* no carrier + no fallback -- use default */
@@ -560,9 +564,9 @@ static int tvaudio_thread(void *data)
dprintk("audio carrier scan failed, "
"using %d.%03d MHz [default]\n",
carrier/1000, carrier%1000);
+ dev->automute = 1;
}
tvaudio_setcarrier(dev,carrier,carrier);
- dev->automute = 0;
saa_andorb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0x30, 0x00);
saa7134_tvaudio_setmute(dev);
/* find the exact tv audio norm */
@@ -601,7 +605,7 @@ static int tvaudio_thread(void *data)
if (kthread_should_stop())
break;
if (UNSET == dev->thread.mode) {
- rx = tvaudio_getstereo(dev,&tvaudio[i]);
+ rx = tvaudio_getstereo(dev, &tvaudio[audio]);
mode = saa7134_tvaudio_rx2mode(rx);
} else {
mode = dev->thread.mode;
@@ -1020,6 +1024,7 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev)
}
dev->thread.thread = NULL;
+ dev->thread.scan1 = dev->thread.scan2 = 0;
if (my_thread) {
saa7134_tvaudio_init(dev);
/* start tvaudio thread */
@@ -1029,13 +1034,19 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev)
dev->name);
/* XXX: missing error handling here */
}
- saa7134_tvaudio_do_scan(dev);
}
saa7134_enable_i2s(dev);
return 0;
}
+int saa7134_tvaudio_close(struct saa7134_dev *dev)
+{
+ dev->automute = 1;
+ /* anything else to undo? */
+ return 0;
+}
+
int saa7134_tvaudio_fini(struct saa7134_dev *dev)
{
/* shutdown tvaudio thread */
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 9cf7914f6f90..417034eb6ad2 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1462,6 +1462,8 @@ static int video_release(struct file *file)
struct saa6588_command cmd;
unsigned long flags;
+ saa7134_tvaudio_close(dev);
+
/* turn off overlay */
if (res_check(fh, RESOURCE_OVERLAY)) {
spin_lock_irqsave(&dev->slock,flags);
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 9b550687213a..42fba4f93c72 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -330,6 +330,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2 185
#define SAA7134_BOARD_BEHOLD_501 186
#define SAA7134_BOARD_BEHOLD_503FM 187
+#define SAA7134_BOARD_SENSORAY811_911 188
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -817,6 +818,7 @@ void saa7134_tvaudio_init(struct saa7134_dev *dev);
int saa7134_tvaudio_init2(struct saa7134_dev *dev);
int saa7134_tvaudio_fini(struct saa7134_dev *dev);
int saa7134_tvaudio_do_scan(struct saa7134_dev *dev);
+int saa7134_tvaudio_close(struct saa7134_dev *dev);
int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value);
diff --git a/drivers/media/video/saa7164/saa7164-bus.c b/drivers/media/video/saa7164/saa7164-bus.c
index 466e1b02f91f..a7f58a998752 100644
--- a/drivers/media/video/saa7164/saa7164-bus.c
+++ b/drivers/media/video/saa7164/saa7164-bus.c
@@ -149,7 +149,7 @@ int saa7164_bus_set(struct saa7164_dev *dev, struct tmComResInfo* msg,
saa7164_bus_verify(dev);
msg->size = cpu_to_le16(msg->size);
- msg->command = cpu_to_le16(msg->command);
+ msg->command = cpu_to_le32(msg->command);
msg->controlselector = cpu_to_le16(msg->controlselector);
if (msg->size > dev->bus.m_wMaxReqSize) {
@@ -464,7 +464,7 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
peekout:
msg->size = le16_to_cpu(msg->size);
- msg->command = le16_to_cpu(msg->command);
+ msg->command = le32_to_cpu(msg->command);
msg->controlselector = le16_to_cpu(msg->controlselector);
ret = SAA_OK;
out:
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index 971591d6450f..5b72da5ce418 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
- .portc = SAA7164_MPEG_ENCODER,
- .portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
- .porte = SAA7164_MPEG_VBI,
- .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x28,
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f390682629cf..f854d85a387c 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = sh_mobile_ceu_soft_reset(pcdev);
csi2_sd = find_csi2(pcdev);
- if (csi2_sd)
- csi2_sd->grp_id = (long)icd;
+ if (csi2_sd) {
+ csi2_sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(csi2_sd, icd);
+ }
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
{
if (pcdev->csi2_pdev) {
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
- if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+ if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
return csi2_sd;
}
@@ -784,8 +786,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
V4L2_MBUS_DATA_ACTIVE_HIGH)
/* Capture is not running, no interrupts, no locking needed */
-static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
- __u32 pixfmt)
+static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -923,11 +924,6 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
ceu_write(pcdev, CDOCR, value);
ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
- dev_dbg(icd->parent, "S_FMT successful for %c%c%c%c %ux%u\n",
- pixfmt & 0xff, (pixfmt >> 8) & 0xff,
- (pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff,
- icd->user_width, icd->user_height);
-
capture_restore(pcdev, capsr);
/* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
@@ -1089,8 +1085,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Try 2560x1920, 1280x960, 640x480, 320x240 */
mf.width = 2560 >> shift;
mf.height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
shift++;
@@ -1389,7 +1386,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
bool ceu_1to1;
int ret;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
s_mbus_fmt, mf);
if (ret < 0)
return ret;
@@ -1426,8 +1424,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
tmp_h = min(2 * tmp_h, max_height);
mf->width = tmp_w;
mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
mf->width, mf->height);
if (ret < 0) {
@@ -1580,8 +1579,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
}
if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1867,7 +1867,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1891,8 +1892,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
*/
mf.width = 2560;
mf.height = 1920;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->parent,
@@ -1958,8 +1960,7 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
if (!ret) {
icd->user_width = out_width & ~3;
icd->user_height = out_height & ~3;
- ret = sh_mobile_ceu_set_bus_param(icd,
- icd->current_fmt->host_fmt->fourcc);
+ ret = sh_mobile_ceu_set_bus_param(icd);
}
}
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index ea4f0473ed3b..05286500b4d4 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
.flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct device *dev = v4l2_get_subdevdata(&priv->subdev);
struct v4l2_mbus_config cfg;
@@ -390,18 +390,7 @@ static struct platform_driver __refdata sh_csi2_pdrv = {
},
};
-static int __init sh_csi2_init(void)
-{
- return platform_driver_register(&sh_csi2_pdrv);
-}
-
-static void __exit sh_csi2_exit(void)
-{
- platform_driver_unregister(&sh_csi2_pdrv);
-}
-
-module_init(sh_csi2_init);
-module_exit(sh_csi2_exit);
+module_platform_driver(sh_csi2_pdrv);
MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 7025be129286..c2882fa5be85 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -75,8 +75,8 @@ MODULE_PARM_DESC(video_nr,
"\none and for every other camera."
"\n");
-static short force_munmap[] = {[0 ... SN9C102_MAX_DEVICES-1] =
- SN9C102_FORCE_MUNMAP};
+static bool force_munmap[] = {[0 ... SN9C102_MAX_DEVICES-1] =
+ SN9C102_FORCE_MUNMAP};
module_param_array(force_munmap, bool, NULL, 0444);
MODULE_PARM_DESC(force_munmap,
" <0|1[,...]>"
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b72580c38957..b82710745ba8 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -487,7 +487,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
icd->user_width, icd->user_height);
/* set physical bus parameters */
- return ici->ops->set_bus_param(icd, pix->pixelformat);
+ return ici->ops->set_bus_param(icd);
}
static int soc_camera_open(struct file *file)
@@ -600,9 +600,9 @@ static int soc_camera_close(struct file *file)
pm_runtime_suspend(&icd->vdev->dev);
pm_runtime_disable(&icd->vdev->dev);
- ici->ops->remove(icd);
if (ici->ops->init_videobuf2)
vb2_queue_release(&icd->vb2_vidq);
+ ici->ops->remove(icd);
soc_camera_power_off(icd, icl);
}
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
}
sd = soc_camera_to_subdev(icd);
- sd->grp_id = (long)icd;
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
goto ectrl;
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index 4402a8a74f7a..f59ccade07c8 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -189,18 +189,7 @@ static struct platform_driver soc_camera_platform_driver = {
.remove = soc_camera_platform_remove,
};
-static int __init soc_camera_platform_module_init(void)
-{
- return platform_driver_register(&soc_camera_platform_driver);
-}
-
-static void __exit soc_camera_platform_module_exit(void)
-{
- platform_driver_unregister(&soc_camera_platform_driver);
-}
-
-module_init(soc_camera_platform_module_init);
-module_exit(soc_camera_platform_module_exit);
+module_platform_driver(soc_camera_platform_driver);
MODULE_DESCRIPTION("SoC Camera Platform driver");
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index b7fb5a5cad7e..d427f8436c70 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -38,11 +38,11 @@
#include "stk-webcam.h"
-static int hflip = 1;
+static bool hflip = 1;
module_param(hflip, bool, 0444);
MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 1");
-static int vflip = 1;
+static bool vflip = 1;
module_param(vflip, bool, 0444);
MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 1");
@@ -377,8 +377,8 @@ static int stk_prepare_iso(struct stk_camera *dev)
if (dev->isobufs)
STK_ERROR("isobufs already allocated. Bad\n");
else
- dev->isobufs = kzalloc(MAX_ISO_BUFS * sizeof(*dev->isobufs),
- GFP_KERNEL);
+ dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs),
+ GFP_KERNEL);
if (dev->isobufs == NULL) {
STK_ERROR("Unable to allocate iso buffers\n");
return -ENOMEM;
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index a0895bf07487..4ed1c7c28ae7 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
spin_unlock_irq(&fh->queue_lock);
desc = fh->chan->device->device_prep_slave_sg(fh->chan,
- buf->sg, sg_elems, DMA_FROM_DEVICE,
+ buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
@@ -872,20 +872,7 @@ static struct platform_driver timblogiw_platform_driver = {
.remove = __devexit_p(timblogiw_remove),
};
-/* Module functions */
-
-static int __init timblogiw_init(void)
-{
- return platform_driver_register(&timblogiw_platform_driver);
-}
-
-static void __exit timblogiw_exit(void)
-{
- platform_driver_unregister(&timblogiw_platform_driver);
-}
-
-module_init(timblogiw_init);
-module_exit(timblogiw_exit);
+module_platform_driver(timblogiw_platform_driver);
MODULE_DESCRIPTION(TIMBLOGIWIN_NAME);
MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
diff --git a/drivers/media/video/tlg2300/pd-common.h b/drivers/media/video/tlg2300/pd-common.h
index 56564e6aaac2..5dd73b7857d1 100644
--- a/drivers/media/video/tlg2300/pd-common.h
+++ b/drivers/media/video/tlg2300/pd-common.h
@@ -140,7 +140,7 @@ struct pd_dvb_adapter {
u8 reserved[3];
/* data for power resume*/
- struct dvb_frontend_parameters fe_param;
+ struct dtv_frontend_properties fe_param;
/* for channel scanning */
int prev_freq;
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index d0da11ae19df..30fcb117e898 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -12,9 +12,9 @@
static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb);
static int dvb_bandwidth[][2] = {
- { TLG_BW_8, BANDWIDTH_8_MHZ },
- { TLG_BW_7, BANDWIDTH_7_MHZ },
- { TLG_BW_6, BANDWIDTH_6_MHZ }
+ { TLG_BW_8, 8000000 },
+ { TLG_BW_7, 7000000 },
+ { TLG_BW_6, 6000000 }
};
static int dvb_bandwidth_length = ARRAY_SIZE(dvb_bandwidth);
@@ -146,9 +146,9 @@ static int fw_delay_overflow(struct pd_dvb_adapter *adapter)
return msec > 800 ? true : false;
}
-static int poseidon_set_fe(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int poseidon_set_fe(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
s32 ret = 0, cmd_status = 0;
s32 i, bandwidth = -1;
struct poseidon *pd = fe->demodulator_priv;
@@ -159,7 +159,7 @@ static int poseidon_set_fe(struct dvb_frontend *fe,
mutex_lock(&pd->lock);
for (i = 0; i < dvb_bandwidth_length; i++)
- if (fep->u.ofdm.bandwidth == dvb_bandwidth[i][1])
+ if (fep->bandwidth_hz == dvb_bandwidth[i][1])
bandwidth = dvb_bandwidth[i][0];
if (check_scan_ok(fep->frequency, bandwidth, pd_dvb)) {
@@ -210,7 +210,7 @@ static int pm_dvb_resume(struct poseidon *pd)
poseidon_check_mode_dvbt(pd);
msleep(300);
- poseidon_set_fe(&pd_dvb->dvb_fe, &pd_dvb->fe_param);
+ poseidon_set_fe(&pd_dvb->dvb_fe);
dvb_start_streaming(pd_dvb);
return 0;
@@ -227,13 +227,13 @@ static s32 poseidon_fe_init(struct dvb_frontend *fe)
pd->pm_resume = pm_dvb_resume;
#endif
memset(&pd_dvb->fe_param, 0,
- sizeof(struct dvb_frontend_parameters));
+ sizeof(struct dtv_frontend_properties));
return 0;
}
-static int poseidon_get_fe(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int poseidon_get_fe(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct poseidon *pd = fe->demodulator_priv;
struct pd_dvb_adapter *pd_dvb = &pd->dvb_data;
@@ -332,9 +332,9 @@ static int poseidon_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
}
static struct dvb_frontend_ops poseidon_frontend_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "Poseidon DVB-T",
- .type = FE_OFDM,
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 62500,/* FIXME */
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 129f135d5a5f..c096b3f74200 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
}
#endif
-static bool check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev, int *down_firmware)
{
void *buf;
int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
*down_firmware = 1;
return firmware_download(udev);
}
- return ret;
+ return 0;
}
static int poseidon_probe(struct usb_interface *interface,
diff --git a/drivers/media/video/tm6000/Kconfig b/drivers/media/video/tm6000/Kconfig
index 114eec8a630a..a43b77abd931 100644
--- a/drivers/media/video/tm6000/Kconfig
+++ b/drivers/media/video/tm6000/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_TM6000
tristate "TV Master TM5600/6000/6010 driver"
- depends on VIDEO_DEV && I2C && INPUT && RC_CORE && USB && EXPERIMENTAL
+ depends on VIDEO_DEV && I2C && INPUT && RC_CORE && USB
select VIDEO_TUNER
select MEDIA_TUNER_XC2028
select MEDIA_TUNER_XC5000
@@ -16,7 +16,7 @@ config VIDEO_TM6000
config VIDEO_TM6000_ALSA
tristate "TV Master TM5600/6000/6010 audio support"
- depends on VIDEO_TM6000 && SND && EXPERIMENTAL
+ depends on VIDEO_TM6000 && SND
select SND_PCM
---help---
This is a video4linux driver for direct (DMA) audio for
@@ -27,7 +27,7 @@ config VIDEO_TM6000_ALSA
config VIDEO_TM6000_DVB
tristate "DVB Support for tm6000 based TV cards"
- depends on VIDEO_TM6000 && DVB_CORE && USB && EXPERIMENTAL
+ depends on VIDEO_TM6000 && DVB_CORE && USB
select DVB_ZL10353
---help---
This adds support for DVB cards based on the tm5600/tm6000 chip.
diff --git a/drivers/media/video/tm6000/tm6000-alsa.c b/drivers/media/video/tm6000/tm6000-alsa.c
index 7d675c72fd47..bd07ec707956 100644
--- a/drivers/media/video/tm6000/tm6000-alsa.c
+++ b/drivers/media/video/tm6000/tm6000-alsa.c
@@ -42,7 +42,7 @@
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled.");
@@ -146,20 +146,21 @@ static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size)
#define DEFAULT_FIFO_SIZE 4096
static struct snd_pcm_hardware snd_tm6000_digital_hw = {
- .info = SNDRV_PCM_INFO_MMAP |
+ .info = SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.period_bytes_min = 64,
.period_bytes_max = 12544,
- .periods_min = 1,
+ .periods_min = 2,
.periods_max = 98,
.buffer_bytes_max = 62720 * 8,
};
@@ -181,6 +182,7 @@ static int snd_tm6000_pcm_open(struct snd_pcm_substream *substream)
chip->substream = substream;
runtime->hw = snd_tm6000_digital_hw;
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
return 0;
_error:
@@ -347,9 +349,13 @@ static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd)
int err = 0;
switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
+ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
atomic_set(&core->stream_started, 1);
break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
+ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
atomic_set(&core->stream_started, 0);
break;
@@ -371,6 +377,14 @@ static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream)
return chip->buf_pos;
}
+static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
+ unsigned long offset)
+{
+ void *pageptr = subs->runtime->dma_area + offset;
+
+ return vmalloc_to_page(pageptr);
+}
+
/*
* operators
*/
@@ -383,6 +397,7 @@ static struct snd_pcm_ops snd_tm6000_pcm_ops = {
.prepare = snd_tm6000_prepare,
.trigger = snd_tm6000_card_trigger,
.pointer = snd_tm6000_pointer,
+ .page = snd_pcm_get_vmalloc_page,
};
/*
diff --git a/drivers/media/video/tm6000/tm6000-cards.c b/drivers/media/video/tm6000/tm6000-cards.c
index ff939bc0e0b9..034659b13174 100644
--- a/drivers/media/video/tm6000/tm6000-cards.c
+++ b/drivers/media/video/tm6000/tm6000-cards.c
@@ -351,6 +351,7 @@ static struct tm6000_board tm6000_boards[] = {
.tuner_addr = 0xc2 >> 1,
.demod_addr = 0x1e >> 1,
.type = TM6010,
+ .ir_codes = RC_MAP_HAUPPAUGE,
.caps = {
.has_tuner = 1,
.has_dvb = 1,
@@ -639,6 +640,7 @@ static struct usb_device_id tm6000_id_table[] = {
{ USB_DEVICE(0x6000, 0xdec3), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER_LITE },
{ }
};
+MODULE_DEVICE_TABLE(usb, tm6000_id_table);
/* Control power led for show some activity */
void tm6000_flash_led(struct tm6000_core *dev, u8 state)
@@ -941,6 +943,7 @@ static void tm6000_config_tuner(struct tm6000_core *dev)
case TM6010_BOARD_HAUPPAUGE_900H:
case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
case TM6010_BOARD_TWINHAN_TU501:
+ ctl.max_len = 80;
ctl.fname = "xc3028L-v36.fw";
break;
default:
@@ -1002,6 +1005,7 @@ static int fill_board_specific_data(struct tm6000_core *dev)
/* setup per-model quirks */
switch (dev->model) {
case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_HAUPPAUGE_900H:
dev->quirks |= TM6000_QUIRK_NO_USB_DELAY;
break;
@@ -1050,6 +1054,33 @@ static void use_alternative_detection_method(struct tm6000_core *dev)
tm6000_boards[model].name, model);
}
+#if defined(CONFIG_MODULES) && defined(MODULE)
+static void request_module_async(struct work_struct *work)
+{
+ struct tm6000_core *dev = container_of(work, struct tm6000_core,
+ request_module_wk);
+
+ request_module("tm6000-alsa");
+
+ if (dev->caps.has_dvb)
+ request_module("tm6000-dvb");
+}
+
+static void request_modules(struct tm6000_core *dev)
+{
+ INIT_WORK(&dev->request_module_wk, request_module_async);
+ schedule_work(&dev->request_module_wk);
+}
+
+static void flush_request_modules(struct tm6000_core *dev)
+{
+ flush_work_sync(&dev->request_module_wk);
+}
+#else
+#define request_modules(dev)
+#define flush_request_modules(dev)
+#endif /* CONFIG_MODULES */
+
static int tm6000_init_dev(struct tm6000_core *dev)
{
struct v4l2_frequency f;
@@ -1112,6 +1143,8 @@ static int tm6000_init_dev(struct tm6000_core *dev)
tm6000_ir_init(dev);
+ request_modules(dev);
+
mutex_unlock(&dev->lock);
return 0;
@@ -1324,6 +1357,8 @@ static void tm6000_usb_disconnect(struct usb_interface *interface)
printk(KERN_INFO "tm6000: disconnecting %s\n", dev->name);
+ flush_request_modules(dev);
+
tm6000_ir_fini(dev);
if (dev->gpio.power_led) {
diff --git a/drivers/media/video/tm6000/tm6000-core.c b/drivers/media/video/tm6000/tm6000-core.c
index 9783616a0da2..22cc0116deb6 100644
--- a/drivers/media/video/tm6000/tm6000-core.c
+++ b/drivers/media/video/tm6000/tm6000-core.c
@@ -38,6 +38,7 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
int ret, i;
unsigned int pipe;
u8 *data = NULL;
+ int delay = 5000;
mutex_lock(&dev->usb_lock);
@@ -88,7 +89,20 @@ int tm6000_read_write_usb(struct tm6000_core *dev, u8 req_type, u8 req,
}
kfree(data);
- msleep(5);
+
+ if (dev->quirks & TM6000_QUIRK_NO_USB_DELAY)
+ delay = 0;
+
+ if (req == REQ_16_SET_GET_I2C_WR1_RDN && !(req_type & USB_DIR_IN)) {
+ unsigned int tsleep;
+ /* Calculate delay time, 14000us for 64 bytes */
+ tsleep = (len * 200) + 200;
+ if (tsleep < delay)
+ tsleep = delay;
+ usleep_range(tsleep, tsleep + 1000);
+ }
+ else if (delay)
+ usleep_range(delay, delay + 1000);
mutex_unlock(&dev->usb_lock);
return ret;
@@ -125,14 +139,14 @@ int tm6000_set_reg_mask(struct tm6000_core *dev, u8 req, u16 value,
u8 new_index;
rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
- value, index, buf, 1);
+ value, 0, buf, 1);
if (rc < 0)
return rc;
new_index = (buf[0] & ~mask) | (index & mask);
- if (new_index == index)
+ if (new_index == buf[0])
return 0;
return tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR,
@@ -536,16 +550,16 @@ static struct reg_init tm6010_init_tab[] = {
{ TM6010_REQ05_R18_IMASK7, 0x00 },
- { TM6010_REQ07_RD8_IR_LEADER1, 0xaa },
- { TM6010_REQ07_RD8_IR_LEADER0, 0x30 },
- { TM6010_REQ07_RD8_IR_PULSE_CNT1, 0x20 },
- { TM6010_REQ07_RD8_IR_PULSE_CNT0, 0xd0 },
+ { TM6010_REQ07_RDC_IR_LEADER1, 0xaa },
+ { TM6010_REQ07_RDD_IR_LEADER0, 0x30 },
+ { TM6010_REQ07_RDE_IR_PULSE_CNT1, 0x20 },
+ { TM6010_REQ07_RDF_IR_PULSE_CNT0, 0xd0 },
{ REQ_04_EN_DISABLE_MCU_INT, 0x02, 0x00 },
- { TM6010_REQ07_RD8_IR, 0x2f },
+ { TM6010_REQ07_RD8_IR, 0x0f },
/* set remote wakeup key:any key wakeup */
{ TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe },
- { TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0xff },
+ { TM6010_REQ07_RDA_IR_WAKEUP_SEL, 0xff },
};
int tm6000_init(struct tm6000_core *dev)
@@ -599,55 +613,6 @@ int tm6000_init(struct tm6000_core *dev)
return rc;
}
-int tm6000_reset(struct tm6000_core *dev)
-{
- int pipe;
- int err;
-
- msleep(500);
-
- err = usb_set_interface(dev->udev, dev->isoc_in.bInterfaceNumber, 0);
- if (err < 0) {
- tm6000_err("failed to select interface %d, alt. setting 0\n",
- dev->isoc_in.bInterfaceNumber);
- return err;
- }
-
- err = usb_reset_configuration(dev->udev);
- if (err < 0) {
- tm6000_err("failed to reset configuration\n");
- return err;
- }
-
- if ((dev->quirks & TM6000_QUIRK_NO_USB_DELAY) == 0)
- msleep(5);
-
- /*
- * Not all devices have int_in defined
- */
- if (!dev->int_in.endp)
- return 0;
-
- err = usb_set_interface(dev->udev, dev->isoc_in.bInterfaceNumber, 2);
- if (err < 0) {
- tm6000_err("failed to select interface %d, alt. setting 2\n",
- dev->isoc_in.bInterfaceNumber);
- return err;
- }
-
- msleep(5);
-
- pipe = usb_rcvintpipe(dev->udev,
- dev->int_in.endp->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
-
- err = usb_clear_halt(dev->udev, pipe);
- if (err < 0) {
- tm6000_err("usb_clear_halt failed: %d\n", err);
- return err;
- }
-
- return 0;
-}
int tm6000_set_audio_bitrate(struct tm6000_core *dev, int bitrate)
{
@@ -696,11 +661,13 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev)
if (dev->dev_type == TM6010) {
/* Audio crossbar setting, default SIF1 */
u8 areg_f0;
+ u8 areg_07 = 0x10;
switch (dev->rinput.amux) {
case TM6000_AMUX_SIF1:
case TM6000_AMUX_SIF2:
areg_f0 = 0x03;
+ areg_07 = 0x30;
break;
case TM6000_AMUX_ADC1:
areg_f0 = 0x00;
@@ -720,6 +687,9 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev)
/* Set audio input crossbar */
tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
areg_f0, 0x0f);
+ /* Mux overflow workaround */
+ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL,
+ areg_07, 0xf0);
} else {
u8 areg_eb;
/* Audio setting, default LINE1 */
diff --git a/drivers/media/video/tm6000/tm6000-dvb.c b/drivers/media/video/tm6000/tm6000-dvb.c
index 5e6c129a4beb..e1f3f66e1e63 100644
--- a/drivers/media/video/tm6000/tm6000-dvb.c
+++ b/drivers/media/video/tm6000/tm6000-dvb.c
@@ -89,9 +89,19 @@ static void tm6000_urb_received(struct urb *urb)
int ret;
struct tm6000_core *dev = urb->context;
- if (urb->status != 0)
+ switch (urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ return;
+ default:
print_err_status(dev, 0, urb->status);
- else if (urb->actual_length > 0)
+ }
+
+ if (urb->actual_length > 0)
dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
urb->actual_length);
@@ -151,7 +161,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
printk(KERN_ERR "tm6000: pipe resetted\n");
/* mutex_lock(&tm6000_driver.open_close_mutex); */
- ret = usb_submit_urb(dvb->bulk_urb, GFP_KERNEL);
+ ret = usb_submit_urb(dvb->bulk_urb, GFP_ATOMIC);
/* mutex_unlock(&tm6000_driver.open_close_mutex); */
if (ret) {
@@ -396,6 +406,11 @@ static int dvb_init(struct tm6000_core *dev)
if (!dev->caps.has_dvb)
return 0;
+ if (dev->udev->speed == USB_SPEED_FULL) {
+ printk(KERN_INFO "This USB2.0 device cannot be run on a USB1.1 port. (it lacks a hardware PID filter)\n");
+ return 0;
+ }
+
dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL);
if (!dvb) {
printk(KERN_INFO "Cannot allocate memory\n");
diff --git a/drivers/media/video/tm6000/tm6000-i2c.c b/drivers/media/video/tm6000/tm6000-i2c.c
index 0290bbf00c3e..c7e23e3dd75e 100644
--- a/drivers/media/video/tm6000/tm6000-i2c.c
+++ b/drivers/media/video/tm6000/tm6000-i2c.c
@@ -46,11 +46,10 @@ static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr,
__u8 reg, char *buf, int len)
{
int rc;
- unsigned int tsleep;
unsigned int i2c_packet_limit = 16;
if (dev->dev_type == TM6010)
- i2c_packet_limit = 64;
+ i2c_packet_limit = 80;
if (!buf)
return -1;
@@ -71,10 +70,6 @@ static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr,
return rc;
}
- /* Calculate delay time, 14000us for 64 bytes */
- tsleep = ((len * 200) + 200 + 1000) / 1000;
- msleep(tsleep);
-
/* release mutex */
return rc;
}
@@ -145,7 +140,6 @@ static int tm6000_i2c_recv_regs16(struct tm6000_core *dev, unsigned char addr,
return rc;
}
- msleep(1400 / 1000);
rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, REQ_35_AFTEK_TUNER_READ,
reg, 0, buf, len);
diff --git a/drivers/media/video/tm6000/tm6000-input.c b/drivers/media/video/tm6000/tm6000-input.c
index 405d12729d05..7844607dd45a 100644
--- a/drivers/media/video/tm6000/tm6000-input.c
+++ b/drivers/media/video/tm6000/tm6000-input.c
@@ -31,22 +31,25 @@
static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
-MODULE_PARM_DESC(ir_debug, "enable debug message [IR]");
+MODULE_PARM_DESC(ir_debug, "debug message level");
static unsigned int enable_ir = 1;
module_param(enable_ir, int, 0644);
MODULE_PARM_DESC(enable_ir, "enable ir (default is enable)");
-/* number of 50ms for ON-OFF-ON power led */
-/* show IR activity */
-#define PWLED_OFF 2
+static unsigned int ir_clock_mhz = 12;
+module_param(ir_clock_mhz, int, 0644);
+MODULE_PARM_DESC(enable_ir, "ir clock, in MHz");
+
+#define URB_SUBMIT_DELAY 100 /* ms - Delay to submit an URB request on retrial and init */
+#define URB_INT_LED_DELAY 100 /* ms - Delay to turn led on again on int mode */
#undef dprintk
-#define dprintk(fmt, arg...) \
- if (ir_debug) { \
+#define dprintk(level, fmt, arg...) do {\
+ if (ir_debug >= level) \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
- }
+ } while (0)
struct tm6000_ir_poll_result {
u16 rc_data;
@@ -62,20 +65,15 @@ struct tm6000_IR {
int polling;
struct delayed_work work;
u8 wait:1;
- u8 key:1;
- u8 pwled:1;
- u8 pwledcnt;
+ u8 pwled:2;
+ u8 submit_urb:1;
u16 key_addr;
struct urb *int_urb;
- u8 *urb_data;
-
- int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *);
/* IR device properties */
u64 rc_type;
};
-
void tm6000_ir_wait(struct tm6000_core *dev, u8 state)
{
struct tm6000_IR *ir = dev->ir;
@@ -83,62 +81,84 @@ void tm6000_ir_wait(struct tm6000_core *dev, u8 state)
if (!dev->ir)
return;
+ dprintk(2, "%s: %i\n",__func__, ir->wait);
+
if (state)
ir->wait = 1;
else
ir->wait = 0;
}
-
static int tm6000_ir_config(struct tm6000_IR *ir)
{
struct tm6000_core *dev = ir->dev;
- u8 buf[10];
- int rc;
+ u32 pulse = 0, leader = 0;
+
+ dprintk(2, "%s\n",__func__);
+
+ /*
+ * The IR decoder supports RC-5 or NEC, with a configurable timing.
+ * The timing configuration there is not that accurate, as it uses
+ * approximate values. The NEC spec mentions a 562.5 unit period,
+ * and RC-5 uses a 888.8 period.
+ * Currently, driver assumes a clock provided by a 12 MHz XTAL, but
+ * a modprobe parameter can adjust it.
+ * Adjustments are required for other timings.
+ * It seems that the 900ms timing for NEC is used to detect a RC-5
+ * IR, in order to discard such decoding
+ */
switch (ir->rc_type) {
case RC_TYPE_NEC:
- /* Setup IR decoder for NEC standard 12MHz system clock */
- /* IR_LEADER_CNT = 0.9ms */
- tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_LEADER1, 0xaa);
- tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_LEADER0, 0x30);
- /* IR_PULSE_CNT = 0.7ms */
- tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT1, 0x20);
- tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT0, 0xd0);
- /* Remote WAKEUP = enable */
- tm6000_set_reg(dev, TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe);
- /* IR_WKUP_SEL = Low byte in decoded IR data */
- tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0xff);
- /* IR_WKU_ADD code */
- tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_ADD, 0xff);
- tm6000_flash_led(dev, 0);
- msleep(100);
- tm6000_flash_led(dev, 1);
+ leader = 900; /* ms */
+ pulse = 700; /* ms - the actual value would be 562 */
break;
default:
- /* hack */
- buf[0] = 0xff;
- buf[1] = 0xff;
- buf[2] = 0xf2;
- buf[3] = 0x2b;
- buf[4] = 0x20;
- buf[5] = 0x35;
- buf[6] = 0x60;
- buf[7] = 0x04;
- buf[8] = 0xc0;
- buf[9] = 0x08;
-
- rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE, REQ_00_SET_IR_VALUE, 0, 0, buf, 0x0a);
- msleep(100);
-
- if (rc < 0) {
- printk(KERN_INFO "IR configuration failed");
- return rc;
- }
+ case RC_TYPE_RC5:
+ leader = 900; /* ms - from the NEC decoding */
+ pulse = 1780; /* ms - The actual value would be 1776 */
break;
}
+ pulse = ir_clock_mhz * pulse;
+ leader = ir_clock_mhz * leader;
+ if (ir->rc_type == RC_TYPE_NEC)
+ leader = leader | 0x8000;
+
+ dprintk(2, "%s: %s, %d MHz, leader = 0x%04x, pulse = 0x%06x \n",
+ __func__,
+ (ir->rc_type == RC_TYPE_NEC) ? "NEC" : "RC-5",
+ ir_clock_mhz, leader, pulse);
+
+ /* Remote WAKEUP = enable, normal mode, from IR decoder output */
+ tm6000_set_reg(dev, TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe);
+
+ /* Enable IR reception on non-busrt mode */
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR, 0x2f);
+
+ /* IR_WKUP_SEL = Low byte in decoded IR data */
+ tm6000_set_reg(dev, TM6010_REQ07_RDA_IR_WAKEUP_SEL, 0xff);
+ /* IR_WKU_ADD code */
+ tm6000_set_reg(dev, TM6010_REQ07_RDB_IR_WAKEUP_ADD, 0xff);
+
+ tm6000_set_reg(dev, TM6010_REQ07_RDC_IR_LEADER1, leader >> 8);
+ tm6000_set_reg(dev, TM6010_REQ07_RDD_IR_LEADER0, leader);
+
+ tm6000_set_reg(dev, TM6010_REQ07_RDE_IR_PULSE_CNT1, pulse >> 8);
+ tm6000_set_reg(dev, TM6010_REQ07_RDF_IR_PULSE_CNT0, pulse);
+
+ if (!ir->polling)
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
+ else
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
+ msleep(10);
+
+ /* Shows that IR is working via the LED */
+ tm6000_flash_led(dev, 0);
+ msleep(100);
+ tm6000_flash_led(dev, 1);
+ ir->pwled = 1;
+
return 0;
}
@@ -146,132 +166,124 @@ static void tm6000_ir_urb_received(struct urb *urb)
{
struct tm6000_core *dev = urb->context;
struct tm6000_IR *ir = dev->ir;
+ struct tm6000_ir_poll_result poll_result;
+ char *buf;
int rc;
- if (urb->status != 0)
- printk(KERN_INFO "not ready\n");
- else if (urb->actual_length > 0) {
- memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length);
+ dprintk(2, "%s\n",__func__);
+ if (urb->status < 0 || urb->actual_length <= 0) {
+ printk(KERN_INFO "tm6000: IR URB failure: status: %i, length %i\n",
+ urb->status, urb->actual_length);
+ ir->submit_urb = 1;
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY));
+ return;
+ }
+ buf = urb->transfer_buffer;
- dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
- ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
+ if (ir_debug)
+ print_hex_dump(KERN_DEBUG, "tm6000: IR data: ",
+ DUMP_PREFIX_OFFSET,16, 1,
+ buf, urb->actual_length, false);
- ir->key = 1;
- }
+ poll_result.rc_data = buf[0];
+ if (urb->actual_length > 1)
+ poll_result.rc_data |= buf[1] << 8;
+
+ dprintk(1, "%s, scancode: 0x%04x\n",__func__, poll_result.rc_data);
+ rc_keydown(ir->rc, poll_result.rc_data, 0);
rc = usb_submit_urb(urb, GFP_ATOMIC);
+ /*
+ * Flash the led. We can't do it here, as it is running on IRQ context.
+ * So, use the scheduler to do it, in a few ms.
+ */
+ ir->pwled = 2;
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(10));
}
-static int default_polling_getkey(struct tm6000_IR *ir,
- struct tm6000_ir_poll_result *poll_result)
+static void tm6000_ir_handle_key(struct work_struct *work)
{
+ struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work);
struct tm6000_core *dev = ir->dev;
+ struct tm6000_ir_poll_result poll_result;
int rc;
u8 buf[2];
- if (ir->wait && !&dev->int_in)
- return 0;
-
- if (&dev->int_in) {
- switch (ir->rc_type) {
- case RC_TYPE_RC5:
- poll_result->rc_data = ir->urb_data[0];
- break;
- case RC_TYPE_NEC:
- if (ir->urb_data[1] == ((ir->key_addr >> 8) & 0xff)) {
- poll_result->rc_data = ir->urb_data[0]
- | ir->urb_data[1] << 8;
- }
- break;
- default:
- poll_result->rc_data = ir->urb_data[0]
- | ir->urb_data[1] << 8;
- break;
- }
- } else {
- tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
- msleep(10);
- tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
- msleep(10);
-
- if (ir->rc_type == RC_TYPE_RC5) {
- rc = tm6000_read_write_usb(dev, USB_DIR_IN |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- REQ_02_GET_IR_CODE, 0, 0, buf, 1);
-
- msleep(10);
-
- dprintk("read data=%02x\n", buf[0]);
- if (rc < 0)
- return rc;
+ if (ir->wait)
+ return;
- poll_result->rc_data = buf[0];
- } else {
- rc = tm6000_read_write_usb(dev, USB_DIR_IN |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- REQ_02_GET_IR_CODE, 0, 0, buf, 2);
+ dprintk(3, "%s\n",__func__);
- msleep(10);
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN |
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_02_GET_IR_CODE, 0, 0, buf, 2);
+ if (rc < 0)
+ return;
- dprintk("read data=%04x\n", buf[0] | buf[1] << 8);
- if (rc < 0)
- return rc;
+ if (rc > 1)
+ poll_result.rc_data = buf[0] | buf[1] << 8;
+ else
+ poll_result.rc_data = buf[0];
- poll_result->rc_data = buf[0] | buf[1] << 8;
+ /* Check if something was read */
+ if ((poll_result.rc_data & 0xff) == 0xff) {
+ if (!ir->pwled) {
+ tm6000_flash_led(dev, 1);
+ ir->pwled = 1;
}
- if ((poll_result->rc_data & 0x00ff) != 0xff)
- ir->key = 1;
+ return;
}
- return 0;
+
+ dprintk(1, "%s, scancode: 0x%04x\n",__func__, poll_result.rc_data);
+ rc_keydown(ir->rc, poll_result.rc_data, 0);
+ tm6000_flash_led(dev, 0);
+ ir->pwled = 0;
+
+ /* Re-schedule polling */
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
-static void tm6000_ir_handle_key(struct tm6000_IR *ir)
+static void tm6000_ir_int_work(struct work_struct *work)
{
+ struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work);
struct tm6000_core *dev = ir->dev;
- int result;
- struct tm6000_ir_poll_result poll_result;
+ int rc;
- /* read the registers containing the IR status */
- result = ir->get_key(ir, &poll_result);
- if (result < 0) {
- printk(KERN_INFO "ir->get_key() failed %d\n", result);
- return;
- }
+ dprintk(3, "%s, submit_urb = %d, pwled = %d\n",__func__, ir->submit_urb,
+ ir->pwled);
- dprintk("ir->get_key result data=%04x\n", poll_result.rc_data);
+ if (ir->submit_urb) {
+ dprintk(3, "Resubmit urb\n");
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
- if (ir->pwled) {
- if (ir->pwledcnt >= PWLED_OFF) {
- ir->pwled = 0;
- ir->pwledcnt = 0;
- tm6000_flash_led(dev, 1);
- } else
- ir->pwledcnt += 1;
+ rc = usb_submit_urb(ir->int_urb, GFP_ATOMIC);
+ if (rc < 0) {
+ printk(KERN_ERR "tm6000: Can't submit an IR interrupt. Error %i\n",
+ rc);
+ /* Retry in 100 ms */
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY));
+ return;
+ }
+ ir->submit_urb = 0;
}
- if (ir->key) {
- rc_keydown(ir->rc, poll_result.rc_data, 0);
- ir->key = 0;
- ir->pwled = 1;
- ir->pwledcnt = 0;
+ /* Led is enabled only if USB submit doesn't fail */
+ if (ir->pwled == 2) {
tm6000_flash_led(dev, 0);
+ ir->pwled = 0;
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_INT_LED_DELAY));
+ } else if (!ir->pwled) {
+ tm6000_flash_led(dev, 1);
+ ir->pwled = 1;
}
- return;
-}
-
-static void tm6000_ir_work(struct work_struct *work)
-{
- struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work);
-
- tm6000_ir_handle_key(ir);
- schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
static int tm6000_ir_start(struct rc_dev *rc)
{
struct tm6000_IR *ir = rc->priv;
- INIT_DELAYED_WORK(&ir->work, tm6000_ir_work);
+ dprintk(2, "%s\n",__func__);
+
schedule_delayed_work(&ir->work, 0);
return 0;
@@ -281,6 +293,8 @@ static void tm6000_ir_stop(struct rc_dev *rc)
{
struct tm6000_IR *ir = rc->priv;
+ dprintk(2, "%s\n",__func__);
+
cancel_delayed_work_sync(&ir->work);
}
@@ -291,10 +305,11 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
if (!ir)
return 0;
+ dprintk(2, "%s\n",__func__);
+
if ((rc->rc_map.scan) && (rc_type == RC_TYPE_NEC))
ir->key_addr = ((rc->rc_map.scan[0].scancode >> 8) & 0xffff);
- ir->get_key = default_polling_getkey;
ir->rc_type = rc_type;
tm6000_ir_config(ir);
@@ -302,17 +317,19 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
return 0;
}
-int tm6000_ir_int_start(struct tm6000_core *dev)
+static int __tm6000_ir_int_start(struct rc_dev *rc)
{
- struct tm6000_IR *ir = dev->ir;
+ struct tm6000_IR *ir = rc->priv;
+ struct tm6000_core *dev = ir->dev;
int pipe, size;
int err = -ENOMEM;
-
if (!ir)
return -ENODEV;
- ir->int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dprintk(2, "%s\n",__func__);
+
+ ir->int_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!ir->int_urb)
return -ENOMEM;
@@ -321,42 +338,59 @@ int tm6000_ir_int_start(struct tm6000_core *dev)
& USB_ENDPOINT_NUMBER_MASK);
size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
- dprintk("IR max size: %d\n", size);
+ dprintk(1, "IR max size: %d\n", size);
- ir->int_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
+ ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
if (ir->int_urb->transfer_buffer == NULL) {
usb_free_urb(ir->int_urb);
return err;
}
- dprintk("int interval: %d\n", dev->int_in.endp->desc.bInterval);
+ dprintk(1, "int interval: %d\n", dev->int_in.endp->desc.bInterval);
+
usb_fill_int_urb(ir->int_urb, dev->udev, pipe,
ir->int_urb->transfer_buffer, size,
tm6000_ir_urb_received, dev,
dev->int_in.endp->desc.bInterval);
- err = usb_submit_urb(ir->int_urb, GFP_KERNEL);
- if (err) {
- kfree(ir->int_urb->transfer_buffer);
- usb_free_urb(ir->int_urb);
- return err;
- }
- ir->urb_data = kzalloc(size, GFP_KERNEL);
+
+ ir->submit_urb = 1;
+ schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY));
return 0;
}
-void tm6000_ir_int_stop(struct tm6000_core *dev)
+static void __tm6000_ir_int_stop(struct rc_dev *rc)
{
- struct tm6000_IR *ir = dev->ir;
+ struct tm6000_IR *ir = rc->priv;
- if (!ir)
+ if (!ir || !ir->int_urb)
return;
+ dprintk(2, "%s\n",__func__);
+
usb_kill_urb(ir->int_urb);
kfree(ir->int_urb->transfer_buffer);
usb_free_urb(ir->int_urb);
ir->int_urb = NULL;
- kfree(ir->urb_data);
- ir->urb_data = NULL;
+}
+
+int tm6000_ir_int_start(struct tm6000_core *dev)
+{
+ struct tm6000_IR *ir = dev->ir;
+
+ if (!ir)
+ return 0;
+
+ return __tm6000_ir_int_start(ir->rc);
+}
+
+void tm6000_ir_int_stop(struct tm6000_core *dev)
+{
+ struct tm6000_IR *ir = dev->ir;
+
+ if (!ir || !ir->rc)
+ return;
+
+ __tm6000_ir_int_stop(ir->rc);
}
int tm6000_ir_init(struct tm6000_core *dev)
@@ -374,29 +408,36 @@ int tm6000_ir_init(struct tm6000_core *dev)
if (!dev->ir_codes)
return 0;
- ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ ir = kzalloc(sizeof(*ir), GFP_ATOMIC);
rc = rc_allocate_device();
if (!ir || !rc)
goto out;
+ dprintk(2, "%s\n", __func__);
+
/* record handles to ourself */
ir->dev = dev;
dev->ir = ir;
ir->rc = rc;
- /* input einrichten */
+ /* input setup */
rc->allowed_protos = RC_TYPE_RC5 | RC_TYPE_NEC;
+ /* Neded, in order to support NEC remotes with 24 or 32 bits */
+ rc->scanmask = 0xffff;
rc->priv = ir;
rc->change_protocol = tm6000_ir_change_protocol;
- rc->open = tm6000_ir_start;
- rc->close = tm6000_ir_stop;
+ if (dev->int_in.endp) {
+ rc->open = __tm6000_ir_int_start;
+ rc->close = __tm6000_ir_int_stop;
+ INIT_DELAYED_WORK(&ir->work, tm6000_ir_int_work);
+ } else {
+ rc->open = tm6000_ir_start;
+ rc->close = tm6000_ir_stop;
+ ir->polling = 50;
+ INIT_DELAYED_WORK(&ir->work, tm6000_ir_handle_key);
+ }
rc->driver_type = RC_DRIVER_SCANCODE;
- ir->polling = 50;
- ir->pwled = 0;
- ir->pwledcnt = 0;
-
-
snprintf(ir->name, sizeof(ir->name), "tm5600/60x0 IR (%s)",
dev->name);
@@ -415,15 +456,6 @@ int tm6000_ir_init(struct tm6000_core *dev)
rc->driver_name = "tm6000";
rc->dev.parent = &dev->udev->dev;
- if (&dev->int_in) {
- dprintk("IR over int\n");
-
- err = tm6000_ir_int_start(dev);
-
- if (err)
- goto out;
- }
-
/* ir register */
err = rc_register_device(rc);
if (err)
@@ -447,10 +479,19 @@ int tm6000_ir_fini(struct tm6000_core *dev)
if (!ir)
return 0;
+ dprintk(2, "%s\n",__func__);
+
rc_unregister_device(ir->rc);
- if (ir->int_urb)
- tm6000_ir_int_stop(dev);
+ if (!ir->polling)
+ __tm6000_ir_int_stop(ir->rc);
+
+ tm6000_ir_stop(ir->rc);
+
+ /* Turn off the led */
+ tm6000_flash_led(dev, 0);
+ ir->pwled = 0;
+
kfree(ir);
dev->ir = NULL;
diff --git a/drivers/media/video/tm6000/tm6000-regs.h b/drivers/media/video/tm6000/tm6000-regs.h
index 7f491b6de933..a38c251ed57b 100644
--- a/drivers/media/video/tm6000/tm6000-regs.h
+++ b/drivers/media/video/tm6000/tm6000-regs.h
@@ -284,19 +284,19 @@ enum {
/* ONLY for TM6010 */
#define TM6010_REQ07_RD8_IR 0x07, 0xd8
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_BSIZE 0x07, 0xd9
+#define TM6010_REQ07_RD9_IR_BSIZE 0x07, 0xd9
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_WAKEUP_SEL 0x07, 0xda
+#define TM6010_REQ07_RDA_IR_WAKEUP_SEL 0x07, 0xda
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_WAKEUP_ADD 0x07, 0xdb
+#define TM6010_REQ07_RDB_IR_WAKEUP_ADD 0x07, 0xdb
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_LEADER1 0x07, 0xdc
+#define TM6010_REQ07_RDC_IR_LEADER1 0x07, 0xdc
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_LEADER0 0x07, 0xdd
+#define TM6010_REQ07_RDD_IR_LEADER0 0x07, 0xdd
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_PULSE_CNT1 0x07, 0xde
+#define TM6010_REQ07_RDE_IR_PULSE_CNT1 0x07, 0xde
/* ONLY for TM6010 */
-#define TM6010_REQ07_RD8_IR_PULSE_CNT0 0x07, 0xdf
+#define TM6010_REQ07_RDF_IR_PULSE_CNT0 0x07, 0xdf
/* ONLY for TM6010 */
#define TM6010_REQ07_RE0_DVIDEO_SOURCE 0x07, 0xe0
/* ONLY for TM6010 */
diff --git a/drivers/media/video/tm6000/tm6000-stds.c b/drivers/media/video/tm6000/tm6000-stds.c
index 9a4145dc3d87..9dc0831d813f 100644
--- a/drivers/media/video/tm6000/tm6000-stds.c
+++ b/drivers/media/video/tm6000/tm6000-stds.c
@@ -361,82 +361,51 @@ static int tm6000_set_audio_std(struct tm6000_core *dev)
return 0;
}
- switch (tm6010_a_mode) {
+ /*
+ * STD/MN shouldn't be affected by tm6010_a_mode, as there's just one
+ * audio standard for each V4L2_STD type.
+ */
+ if ((dev->norm & V4L2_STD_NTSC) == V4L2_STD_NTSC_M_KR) {
+ areg_05 |= 0x04;
+ } else if ((dev->norm & V4L2_STD_NTSC) == V4L2_STD_NTSC_M_JP) {
+ areg_05 |= 0x43;
+ } else if (dev->norm & V4L2_STD_MN) {
+ areg_05 |= 0x22;
+ } else switch (tm6010_a_mode) {
/* auto */
case 0:
- switch (dev->norm) {
- case V4L2_STD_NTSC_M_KR:
+ if ((dev->norm & V4L2_STD_SECAM) == V4L2_STD_SECAM_L)
areg_05 |= 0x00;
- break;
- case V4L2_STD_NTSC_M_JP:
- areg_05 |= 0x40;
- break;
- case V4L2_STD_NTSC_M:
- case V4L2_STD_PAL_M:
- case V4L2_STD_PAL_N:
- areg_05 |= 0x20;
- break;
- case V4L2_STD_PAL_Nc:
- areg_05 |= 0x60;
- break;
- case V4L2_STD_SECAM_L:
- areg_05 |= 0x00;
- break;
- case V4L2_STD_DK:
+ else /* Other PAL/SECAM standards */
areg_05 |= 0x10;
- break;
- }
break;
/* A2 */
case 1:
- switch (dev->norm) {
- case V4L2_STD_B:
- case V4L2_STD_GH:
- areg_05 = 0x05;
- break;
- case V4L2_STD_DK:
+ if (dev->norm & V4L2_STD_DK)
areg_05 = 0x09;
- break;
- }
+ else
+ areg_05 = 0x05;
break;
/* NICAM */
case 2:
- switch (dev->norm) {
- case V4L2_STD_B:
- case V4L2_STD_GH:
- areg_05 = 0x07;
- break;
- case V4L2_STD_DK:
+ if (dev->norm & V4L2_STD_DK) {
areg_05 = 0x06;
- break;
- case V4L2_STD_PAL_I:
+ } else if (dev->norm & V4L2_STD_PAL_I) {
areg_05 = 0x08;
- break;
- case V4L2_STD_SECAM_L:
+ } else if (dev->norm & V4L2_STD_SECAM_L) {
areg_05 = 0x0a;
areg_02 = 0x02;
- break;
+ } else {
+ areg_05 = 0x07;
}
nicam_flag = 1;
break;
/* other */
case 3:
- switch (dev->norm) {
- /* DK3_A2 */
- case V4L2_STD_DK:
+ if (dev->norm & V4L2_STD_DK) {
areg_05 = 0x0b;
- break;
- /* Korea */
- case V4L2_STD_NTSC_M_KR:
- areg_05 = 0x04;
- break;
- /* EIAJ */
- case V4L2_STD_NTSC_M_JP:
- areg_05 = 0x03;
- break;
- default:
+ } else {
areg_05 = 0x02;
- break;
}
break;
}
@@ -557,10 +526,16 @@ int tm6000_set_standard(struct tm6000_core *dev)
case TM6000_AMUX_ADC1:
tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
0x00, 0x0f);
+ /* Mux overflow workaround */
+ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL,
+ 0x10, 0xf0);
break;
case TM6000_AMUX_ADC2:
tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
0x08, 0x0f);
+ /* Mux overflow workaround */
+ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL,
+ 0x10, 0xf0);
break;
case TM6000_AMUX_SIF1:
reg_08_e2 |= 0x02;
@@ -570,6 +545,9 @@ int tm6000_set_standard(struct tm6000_core *dev)
tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
0x02, 0x0f);
+ /* Mux overflow workaround */
+ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL,
+ 0x30, 0xf0);
break;
case TM6000_AMUX_SIF2:
reg_08_e2 |= 0x02;
@@ -579,6 +557,9 @@ int tm6000_set_standard(struct tm6000_core *dev)
tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf7);
tm6000_set_reg_mask(dev, TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG,
0x02, 0x0f);
+ /* Mux overflow workaround */
+ tm6000_set_reg_mask(dev, TM6010_REQ07_R07_OUTPUT_CONTROL,
+ 0x30, 0xf0);
break;
default:
break;
diff --git a/drivers/media/video/tm6000/tm6000-video.c b/drivers/media/video/tm6000/tm6000-video.c
index 1e5ace0b5d10..bc13db736e24 100644
--- a/drivers/media/video/tm6000/tm6000-video.c
+++ b/drivers/media/video/tm6000/tm6000-video.c
@@ -1605,16 +1605,25 @@ static int tm6000_release(struct file *file)
res_free(dev, fh);
if (!dev->users) {
- int err;
-
tm6000_uninit_isoc(dev);
+ /* Stop interrupt USB pipe */
+ tm6000_ir_int_stop(dev);
+
+ usb_reset_configuration(dev->udev);
+
+ if (dev->int_in.endp)
+ usb_set_interface(dev->udev,
+ dev->isoc_in.bInterfaceNumber, 2);
+ else
+ usb_set_interface(dev->udev,
+ dev->isoc_in.bInterfaceNumber, 0);
+
+ /* Start interrupt USB pipe */
+ tm6000_ir_int_start(dev);
+
if (!fh->radio)
videobuf_mmap_free(&fh->vb_vidq);
-
- err = tm6000_reset(dev);
- if (err < 0)
- dev_err(&vdev->dev, "reset failed: %d\n", err);
}
kfree(fh);
diff --git a/drivers/media/video/tm6000/tm6000.h b/drivers/media/video/tm6000/tm6000.h
index 2777e514eff2..27ba659cfa85 100644
--- a/drivers/media/video/tm6000/tm6000.h
+++ b/drivers/media/video/tm6000/tm6000.h
@@ -188,6 +188,9 @@ struct tm6000_core {
/* Device Capabilities*/
struct tm6000_capabilities caps;
+ /* Used to load alsa/dvb */
+ struct work_struct request_module_wk;
+
/* Tuner configuration */
int tuner_type; /* type of the tuner */
int tuner_addr; /* tuner address */
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 11cc980b0cd5..4059ea178c2d 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -326,6 +326,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
t->mode_mask = T_RADIO;
break;
case TUNER_PHILIPS_FMD1216ME_MK3:
+ case TUNER_PHILIPS_FMD1216MEX_MK3:
buffer[0] = 0x0b;
buffer[1] = 0xdc;
buffer[2] = 0x9c;
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 926f03931156..dd26cacd0556 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -52,7 +52,7 @@
#define LOCK_RETRY_DELAY (200)
/* Debug functions */
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 6abaa16ae136..6be9910a6e24 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -703,21 +703,21 @@ static int tvp5150_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
/* First tests should be against specific std */
if (std == V4L2_STD_ALL) {
- fmt = 0; /* Autodetect mode */
+ fmt = VIDEO_STD_AUTO_SWITCH_BIT; /* Autodetect mode */
} else if (std & V4L2_STD_NTSC_443) {
- fmt = 0xa;
+ fmt = VIDEO_STD_NTSC_4_43_BIT;
} else if (std & V4L2_STD_PAL_M) {
- fmt = 0x6;
+ fmt = VIDEO_STD_PAL_M_BIT;
} else if (std & (V4L2_STD_PAL_N | V4L2_STD_PAL_Nc)) {
- fmt = 0x8;
+ fmt = VIDEO_STD_PAL_COMBINATION_N_BIT;
} else {
/* Then, test against generic ones */
if (std & V4L2_STD_NTSC)
- fmt = 0x2;
+ fmt = VIDEO_STD_NTSC_MJ_BIT;
else if (std & V4L2_STD_PAL)
- fmt = 0x4;
+ fmt = VIDEO_STD_PAL_BDGHIN_BIT;
else if (std & V4L2_STD_SECAM)
- fmt = 0xc;
+ fmt = VIDEO_STD_SECAM_BIT;
}
v4l2_dbg(1, debug, sd, "Set video std register to %d.\n", fmt);
@@ -779,6 +779,70 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
return -EINVAL;
}
+static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd)
+{
+ int val = tvp5150_read(sd, TVP5150_STATUS_REG_5);
+
+ switch (val & 0x0F) {
+ case 0x01:
+ return V4L2_STD_NTSC;
+ case 0x03:
+ return V4L2_STD_PAL;
+ case 0x05:
+ return V4L2_STD_PAL_M;
+ case 0x07:
+ return V4L2_STD_PAL_N | V4L2_STD_PAL_Nc;
+ case 0x09:
+ return V4L2_STD_NTSC_443;
+ case 0xb:
+ return V4L2_STD_SECAM;
+ default:
+ return V4L2_STD_UNKNOWN;
+ }
+}
+
+static int tvp5150_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+
+ *code = V4L2_MBUS_FMT_YUYV8_2X8;
+ return 0;
+}
+
+static int tvp5150_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *f)
+{
+ struct tvp5150 *decoder = to_tvp5150(sd);
+ v4l2_std_id std;
+
+ if (f == NULL)
+ return -EINVAL;
+
+ tvp5150_reset(sd, 0);
+
+ /* Calculate height and width based on current standard */
+ if (decoder->norm == V4L2_STD_ALL)
+ std = tvp5150_read_std(sd);
+ else
+ std = decoder->norm;
+
+ f->width = 720;
+ if (std & V4L2_STD_525_60)
+ f->height = 480;
+ else
+ f->height = 576;
+
+ f->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ f->field = V4L2_FIELD_SEQ_TB;
+ f->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
+ f->height);
+ return 0;
+}
+
/****************************************************************************
I2C Command
****************************************************************************/
@@ -931,6 +995,9 @@ static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.s_routing = tvp5150_s_routing,
+ .enum_mbus_fmt = tvp5150_enum_mbus_fmt,
+ .s_mbus_fmt = tvp5150_mbus_fmt,
+ .try_mbus_fmt = tvp5150_mbus_fmt,
};
static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 7875e80cb2ff..236c559d5f51 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
#define TVP7002_CL_MASK 0x0f
/* Debug functions */
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index 9bbe61700fd5..65d065aa6091 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -34,7 +34,7 @@ MODULE_DESCRIPTION("uPD64083 driver");
MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index d7f97513b289..89fec029e924 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -110,42 +110,20 @@ static inline int usb_find_address(struct i2c_adapter *i2c_adap,
unsigned char addr;
int ret;
- if ((flags & I2C_M_TEN)) {
- /* a ten bit address */
- addr = 0xf0 | ((msg->addr >> 7) & 0x03);
- /* try extended address code... */
- ret = try_write_address(i2c_adap, addr, retries);
- if (ret != 1) {
- dev_err(&i2c_adap->dev,
- "died at extended address code, while writing\n");
- return -EREMOTEIO;
- }
- add[0] = addr;
- if (flags & I2C_M_RD) {
- /* okay, now switch into reading mode */
- addr |= 0x01;
- ret = try_read_address(i2c_adap, addr, retries);
- if (ret != 1) {
- dev_err(&i2c_adap->dev,
- "died at extended address code, while reading\n");
- return -EREMOTEIO;
- }
- }
- } else { /* normal 7bit address */
- addr = (msg->addr << 1);
- if (flags & I2C_M_RD)
- addr |= 1;
+ addr = (msg->addr << 1);
+ if (flags & I2C_M_RD)
+ addr |= 1;
- add[0] = addr;
- if (flags & I2C_M_RD)
- ret = try_read_address(i2c_adap, addr, retries);
- else
- ret = try_write_address(i2c_adap, addr, retries);
+ add[0] = addr;
+ if (flags & I2C_M_RD)
+ ret = try_read_address(i2c_adap, addr, retries);
+ else
+ ret = try_write_address(i2c_adap, addr, retries);
+
+ if (ret != 1)
+ return -EREMOTEIO;
- if (ret != 1)
- return -EREMOTEIO;
- }
return 0;
}
@@ -184,7 +162,7 @@ usbvision_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
static u32 functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
/* -----exported algorithm data: ------------------------------------- */
diff --git a/drivers/media/video/uvc/Kconfig b/drivers/media/video/uvc/Kconfig
index 2956a7637219..6c197da531b2 100644
--- a/drivers/media/video/uvc/Kconfig
+++ b/drivers/media/video/uvc/Kconfig
@@ -1,5 +1,6 @@
config USB_VIDEO_CLASS
tristate "USB Video Class (UVC)"
+ select VIDEOBUF2_VMALLOC
---help---
Support for the USB Video Class (UVC). Currently only video
input devices, such as webcams, are supported.
diff --git a/drivers/media/video/uvc/Makefile b/drivers/media/video/uvc/Makefile
index 2071ca8a2f03..c26d12fdb8f4 100644
--- a/drivers/media/video/uvc/Makefile
+++ b/drivers/media/video/uvc/Makefile
@@ -1,5 +1,5 @@
uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
- uvc_status.o uvc_isight.o
+ uvc_status.o uvc_isight.o uvc_debugfs.o
ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
uvcvideo-objs += uvc_entity.o
endif
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 254d32688843..0efd3b10b353 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -878,8 +878,21 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
chain->dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES),
ctrl->info.size);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (UVC_ENTITY_TYPE(ctrl->entity) !=
+ UVC_VC_EXTENSION_UNIT)
+ return ret;
+
+ /* GET_RES is mandatory for XU controls, but some
+ * cameras still choke on it. Ignore errors and set the
+ * resolution value to zero.
+ */
+ uvc_warn_once(chain->dev, UVC_WARN_XU_GET_RES,
+ "UVC non compliance - GET_RES failed on "
+ "an XU control. Enabling workaround.\n");
+ memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES), 0,
+ ctrl->info.size);
+ }
}
ctrl->cached = 1;
@@ -1861,7 +1874,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
if (ncontrols == 0)
continue;
- entity->controls = kzalloc(ncontrols * sizeof(*ctrl),
+ entity->controls = kcalloc(ncontrols, sizeof(*ctrl),
GFP_KERNEL);
if (entity->controls == NULL)
return -ENOMEM;
diff --git a/drivers/media/video/uvc/uvc_debugfs.c b/drivers/media/video/uvc/uvc_debugfs.c
new file mode 100644
index 000000000000..14561a5abb79
--- /dev/null
+++ b/drivers/media/video/uvc/uvc_debugfs.c
@@ -0,0 +1,136 @@
+/*
+ * uvc_debugfs.c -- USB Video Class driver - Debugging support
+ *
+ * Copyright (C) 2011
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "uvcvideo.h"
+
+/* -----------------------------------------------------------------------------
+ * Statistics
+ */
+
+#define UVC_DEBUGFS_BUF_SIZE 1024
+
+struct uvc_debugfs_buffer {
+ size_t count;
+ char data[UVC_DEBUGFS_BUF_SIZE];
+};
+
+static int uvc_debugfs_stats_open(struct inode *inode, struct file *file)
+{
+ struct uvc_streaming *stream = inode->i_private;
+ struct uvc_debugfs_buffer *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ buf->count = uvc_video_stats_dump(stream, buf->data, sizeof(buf->data));
+
+ file->private_data = buf;
+ return 0;
+}
+
+static ssize_t uvc_debugfs_stats_read(struct file *file, char __user *user_buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct uvc_debugfs_buffer *buf = file->private_data;
+
+ return simple_read_from_buffer(user_buf, nbytes, ppos, buf->data,
+ buf->count);
+}
+
+static int uvc_debugfs_stats_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations uvc_debugfs_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = uvc_debugfs_stats_open,
+ .llseek = no_llseek,
+ .read = uvc_debugfs_stats_read,
+ .release = uvc_debugfs_stats_release,
+};
+
+/* -----------------------------------------------------------------------------
+ * Global and stream initialization/cleanup
+ */
+
+static struct dentry *uvc_debugfs_root_dir;
+
+int uvc_debugfs_init_stream(struct uvc_streaming *stream)
+{
+ struct usb_device *udev = stream->dev->udev;
+ struct dentry *dent;
+ char dir_name[32];
+
+ if (uvc_debugfs_root_dir == NULL)
+ return -ENODEV;
+
+ sprintf(dir_name, "%u-%u", udev->bus->busnum, udev->devnum);
+
+ dent = debugfs_create_dir(dir_name, uvc_debugfs_root_dir);
+ if (IS_ERR_OR_NULL(dent)) {
+ uvc_printk(KERN_INFO, "Unable to create debugfs %s "
+ "directory.\n", dir_name);
+ return -ENODEV;
+ }
+
+ stream->debugfs_dir = dent;
+
+ dent = debugfs_create_file("stats", 0444, stream->debugfs_dir,
+ stream, &uvc_debugfs_stats_fops);
+ if (IS_ERR_OR_NULL(dent)) {
+ uvc_printk(KERN_INFO, "Unable to create debugfs stats file.\n");
+ uvc_debugfs_cleanup_stream(stream);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
+{
+ if (stream->debugfs_dir == NULL)
+ return;
+
+ debugfs_remove_recursive(stream->debugfs_dir);
+ stream->debugfs_dir = NULL;
+}
+
+int uvc_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("uvcvideo", usb_debug_root);
+ if (IS_ERR_OR_NULL(dir)) {
+ uvc_printk(KERN_INFO, "Unable to create debugfs directory\n");
+ return -ENODATA;
+ }
+
+ uvc_debugfs_root_dir = dir;
+ return 0;
+}
+
+void uvc_debugfs_cleanup(void)
+{
+ if (uvc_debugfs_root_dir != NULL)
+ debugfs_remove_recursive(uvc_debugfs_root_dir);
+}
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 656d4c9e3b9f..a240d43d15d1 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1675,6 +1675,8 @@ static void uvc_unregister_video(struct uvc_device *dev)
video_unregister_device(stream->vdev);
stream->vdev = NULL;
+
+ uvc_debugfs_cleanup_stream(stream);
}
/* Decrement the stream count and call uvc_delete explicitly if there
@@ -1700,6 +1702,8 @@ static int uvc_register_video(struct uvc_device *dev,
return ret;
}
+ uvc_debugfs_init_stream(stream);
+
/* Register the device with V4L. */
vdev = video_device_alloc();
if (vdev == NULL) {
@@ -2033,6 +2037,15 @@ MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
* though they are compliant.
*/
static struct usb_device_id uvc_ids[] = {
+ /* LogiLink Wireless Webcam */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0416,
+ .idProduct = 0xa91a,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_MINMAX },
/* Genius eFace 2025 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2396,17 +2409,24 @@ struct uvc_driver uvc_driver = {
static int __init uvc_init(void)
{
- int result;
+ int ret;
+
+ uvc_debugfs_init();
- result = usb_register(&uvc_driver.driver);
- if (result == 0)
- printk(KERN_INFO DRIVER_DESC " (" DRIVER_VERSION ")\n");
- return result;
+ ret = usb_register(&uvc_driver.driver);
+ if (ret < 0) {
+ uvc_debugfs_cleanup();
+ return ret;
+ }
+
+ printk(KERN_INFO DRIVER_DESC " (" DRIVER_VERSION ")\n");
+ return 0;
}
static void __exit uvc_cleanup(void)
{
usb_deregister(&uvc_driver.driver);
+ uvc_debugfs_cleanup();
}
module_init(uvc_init);
diff --git a/drivers/media/video/uvc/uvc_isight.c b/drivers/media/video/uvc/uvc_isight.c
index 74bbe8f18f3e..8510e7259e76 100644
--- a/drivers/media/video/uvc/uvc_isight.c
+++ b/drivers/media/video/uvc/uvc_isight.c
@@ -74,7 +74,7 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
* Empty buffers (bytesused == 0) don't trigger end of frame detection
* as it doesn't make sense to return an empty buffer.
*/
- if (is_header && buf->buf.bytesused != 0) {
+ if (is_header && buf->bytesused != 0) {
buf->state = UVC_BUF_STATE_DONE;
return -EAGAIN;
}
@@ -83,13 +83,13 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
* contain no data.
*/
if (!is_header) {
- maxlen = buf->buf.length - buf->buf.bytesused;
- mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;
+ maxlen = buf->length - buf->bytesused;
+ mem = buf->mem + buf->bytesused;
nbytes = min(len, maxlen);
memcpy(mem, data, nbytes);
- buf->buf.bytesused += nbytes;
+ buf->bytesused += nbytes;
- if (len > maxlen || buf->buf.bytesused == buf->buf.length) {
+ if (len > maxlen || buf->bytesused == buf->length) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete "
"(overflow).\n");
buf->state = UVC_BUF_STATE_DONE;
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 677691c44500..518f77d3a4d8 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/list.h>
@@ -19,7 +20,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <linux/atomic.h>
+#include <media/videobuf2-vmalloc.h>
#include "uvcvideo.h"
@@ -29,467 +30,211 @@
* Video queues is initialized by uvc_queue_init(). The function performs
* basic initialization of the uvc_video_queue struct and never fails.
*
- * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
- * uvc_free_buffers respectively. The former acquires the video queue lock,
- * while the later must be called with the lock held (so that allocation can
- * free previously allocated buffers). Trying to free buffers that are mapped
- * to user space will return -EBUSY.
- *
- * Video buffers are managed using two queues. However, unlike most USB video
- * drivers that use an in queue and an out queue, we use a main queue to hold
- * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
- * hold empty buffers. This design (copied from video-buf) minimizes locking
- * in interrupt, as only one queue is shared between interrupt and user
- * contexts.
- *
- * Use cases
- * ---------
- *
- * Unless stated otherwise, all operations that modify the irq buffers queue
- * are protected by the irq spinlock.
- *
- * 1. The user queues the buffers, starts streaming and dequeues a buffer.
- *
- * The buffers are added to the main and irq queues. Both operations are
- * protected by the queue lock, and the later is protected by the irq
- * spinlock as well.
- *
- * The completion handler fetches a buffer from the irq queue and fills it
- * with video data. If no buffer is available (irq queue empty), the handler
- * returns immediately.
- *
- * When the buffer is full, the completion handler removes it from the irq
- * queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue.
- * At that point, any process waiting on the buffer will be woken up. If a
- * process tries to dequeue a buffer after it has been marked done, the
- * dequeing will succeed immediately.
- *
- * 2. Buffers are queued, user is waiting on a buffer and the device gets
- * disconnected.
- *
- * When the device is disconnected, the kernel calls the completion handler
- * with an appropriate status code. The handler marks all buffers in the
- * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
- * that any process waiting on a buffer gets woken up.
- *
- * Waking up up the first buffer on the irq list is not enough, as the
- * process waiting on the buffer might restart the dequeue operation
- * immediately.
- *
+ * Video buffers are managed by videobuf2. The driver uses a mutex to protect
+ * the videobuf2 queue operations by serializing calls to videobuf2 and a
+ * spinlock to protect the IRQ queue that holds the buffers to be processed by
+ * the driver.
*/
-void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
- int drop_corrupted)
-{
- mutex_init(&queue->mutex);
- spin_lock_init(&queue->irqlock);
- INIT_LIST_HEAD(&queue->mainqueue);
- INIT_LIST_HEAD(&queue->irqqueue);
- queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
- queue->type = type;
-}
-
-/*
- * Free the video buffers.
- *
- * This function must be called with the queue lock held.
+/* -----------------------------------------------------------------------------
+ * videobuf2 queue operations
*/
-static int __uvc_free_buffers(struct uvc_video_queue *queue)
+
+static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- unsigned int i;
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream =
+ container_of(queue, struct uvc_streaming, queue);
- for (i = 0; i < queue->count; ++i) {
- if (queue->buffer[i].vma_use_count != 0)
- return -EBUSY;
- }
+ if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
+ *nbuffers = UVC_MAX_VIDEO_BUFFERS;
- if (queue->count) {
- uvc_queue_cancel(queue, 0);
- INIT_LIST_HEAD(&queue->mainqueue);
- vfree(queue->mem);
- queue->count = 0;
- }
+ *nplanes = 1;
+
+ sizes[0] = stream->ctrl.dwMaxVideoFrameSize;
return 0;
}
-int uvc_free_buffers(struct uvc_video_queue *queue)
+static int uvc_buffer_prepare(struct vb2_buffer *vb)
{
- int ret;
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
+ struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
- mutex_lock(&queue->mutex);
- ret = __uvc_free_buffers(queue);
- mutex_unlock(&queue->mutex);
+ if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
+ return -EINVAL;
+ }
- return ret;
-}
+ if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
+ return -ENODEV;
-/*
- * Allocate the video buffers.
- *
- * Pages are reserved to make sure they will not be swapped, as they will be
- * filled in the URB completion handler.
- *
- * Buffers will be individually mapped, so they must all be page aligned.
- */
-int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
- unsigned int buflength)
-{
- unsigned int bufsize = PAGE_ALIGN(buflength);
- unsigned int i;
- void *mem = NULL;
- int ret;
+ buf->state = UVC_BUF_STATE_QUEUED;
+ buf->error = 0;
+ buf->mem = vb2_plane_vaddr(vb, 0);
+ buf->length = vb2_plane_size(vb, 0);
+ if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ buf->bytesused = 0;
+ else
+ buf->bytesused = vb2_get_plane_payload(vb, 0);
- if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
- nbuffers = UVC_MAX_VIDEO_BUFFERS;
+ return 0;
+}
- mutex_lock(&queue->mutex);
+static void uvc_buffer_queue(struct vb2_buffer *vb)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
+ struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
+ unsigned long flags;
- if ((ret = __uvc_free_buffers(queue)) < 0)
- goto done;
+ spin_lock_irqsave(&queue->irqlock, flags);
+ if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
+ list_add_tail(&buf->queue, &queue->irqqueue);
+ } else {
+ /* If the device is disconnected return the buffer to userspace
+ * directly. The next QBUF call will fail with -ENODEV.
+ */
+ buf->state = UVC_BUF_STATE_ERROR;
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
+ }
- /* Bail out if no buffers should be allocated. */
- if (nbuffers == 0)
- goto done;
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+}
- /* Decrement the number of buffers until allocation succeeds. */
- for (; nbuffers > 0; --nbuffers) {
- mem = vmalloc_32(nbuffers * bufsize);
- if (mem != NULL)
- break;
- }
+static int uvc_buffer_finish(struct vb2_buffer *vb)
+{
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
+ struct uvc_streaming *stream =
+ container_of(queue, struct uvc_streaming, queue);
+ struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf);
- if (mem == NULL) {
- ret = -ENOMEM;
- goto done;
- }
+ uvc_video_clock_update(stream, &vb->v4l2_buf, buf);
+ return 0;
+}
- for (i = 0; i < nbuffers; ++i) {
- memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
- queue->buffer[i].buf.index = i;
- queue->buffer[i].buf.m.offset = i * bufsize;
- queue->buffer[i].buf.length = buflength;
- queue->buffer[i].buf.type = queue->type;
- queue->buffer[i].buf.field = V4L2_FIELD_NONE;
- queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
- queue->buffer[i].buf.flags = 0;
- init_waitqueue_head(&queue->buffer[i].wait);
- }
+static struct vb2_ops uvc_queue_qops = {
+ .queue_setup = uvc_queue_setup,
+ .buf_prepare = uvc_buffer_prepare,
+ .buf_queue = uvc_buffer_queue,
+ .buf_finish = uvc_buffer_finish,
+};
- queue->mem = mem;
- queue->count = nbuffers;
- queue->buf_size = bufsize;
- ret = nbuffers;
+void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
+ int drop_corrupted)
+{
+ queue->queue.type = type;
+ queue->queue.io_modes = VB2_MMAP;
+ queue->queue.drv_priv = queue;
+ queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
+ queue->queue.ops = &uvc_queue_qops;
+ queue->queue.mem_ops = &vb2_vmalloc_memops;
+ vb2_queue_init(&queue->queue);
-done:
- mutex_unlock(&queue->mutex);
- return ret;
+ mutex_init(&queue->mutex);
+ spin_lock_init(&queue->irqlock);
+ INIT_LIST_HEAD(&queue->irqqueue);
+ queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
}
-/*
- * Check if buffers have been allocated.
+/* -----------------------------------------------------------------------------
+ * V4L2 queue operations
*/
-int uvc_queue_allocated(struct uvc_video_queue *queue)
+
+int uvc_alloc_buffers(struct uvc_video_queue *queue,
+ struct v4l2_requestbuffers *rb)
{
- int allocated;
+ int ret;
mutex_lock(&queue->mutex);
- allocated = queue->count != 0;
+ ret = vb2_reqbufs(&queue->queue, rb);
mutex_unlock(&queue->mutex);
- return allocated;
+ return ret ? ret : rb->count;
}
-static void __uvc_query_buffer(struct uvc_buffer *buf,
- struct v4l2_buffer *v4l2_buf)
+void uvc_free_buffers(struct uvc_video_queue *queue)
{
- memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
-
- if (buf->vma_use_count)
- v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
-
- switch (buf->state) {
- case UVC_BUF_STATE_ERROR:
- case UVC_BUF_STATE_DONE:
- v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
- break;
- case UVC_BUF_STATE_QUEUED:
- case UVC_BUF_STATE_ACTIVE:
- case UVC_BUF_STATE_READY:
- v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
- break;
- case UVC_BUF_STATE_IDLE:
- default:
- break;
- }
+ mutex_lock(&queue->mutex);
+ vb2_queue_release(&queue->queue);
+ mutex_unlock(&queue->mutex);
}
-int uvc_query_buffer(struct uvc_video_queue *queue,
- struct v4l2_buffer *v4l2_buf)
+int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
- int ret = 0;
+ int ret;
mutex_lock(&queue->mutex);
- if (v4l2_buf->index >= queue->count) {
- ret = -EINVAL;
- goto done;
- }
-
- __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
-
-done:
+ ret = vb2_querybuf(&queue->queue, buf);
mutex_unlock(&queue->mutex);
+
return ret;
}
-/*
- * Queue a video buffer. Attempting to queue a buffer that has already been
- * queued will return -EINVAL.
- */
-int uvc_queue_buffer(struct uvc_video_queue *queue,
- struct v4l2_buffer *v4l2_buf)
+int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
- struct uvc_buffer *buf;
- unsigned long flags;
- int ret = 0;
-
- uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
-
- if (v4l2_buf->type != queue->type ||
- v4l2_buf->memory != V4L2_MEMORY_MMAP) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
- "and/or memory (%u).\n", v4l2_buf->type,
- v4l2_buf->memory);
- return -EINVAL;
- }
+ int ret;
mutex_lock(&queue->mutex);
- if (v4l2_buf->index >= queue->count) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
- ret = -EINVAL;
- goto done;
- }
-
- buf = &queue->buffer[v4l2_buf->index];
- if (buf->state != UVC_BUF_STATE_IDLE) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
- "(%u).\n", buf->state);
- ret = -EINVAL;
- goto done;
- }
-
- if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- v4l2_buf->bytesused > buf->buf.length) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
- ret = -EINVAL;
- goto done;
- }
-
- spin_lock_irqsave(&queue->irqlock, flags);
- if (queue->flags & UVC_QUEUE_DISCONNECTED) {
- spin_unlock_irqrestore(&queue->irqlock, flags);
- ret = -ENODEV;
- goto done;
- }
- buf->state = UVC_BUF_STATE_QUEUED;
- if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- buf->buf.bytesused = 0;
- else
- buf->buf.bytesused = v4l2_buf->bytesused;
-
- list_add_tail(&buf->stream, &queue->mainqueue);
- list_add_tail(&buf->queue, &queue->irqqueue);
- spin_unlock_irqrestore(&queue->irqlock, flags);
-
-done:
+ ret = vb2_qbuf(&queue->queue, buf);
mutex_unlock(&queue->mutex);
- return ret;
-}
-static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
-{
- if (nonblocking) {
- return (buf->state != UVC_BUF_STATE_QUEUED &&
- buf->state != UVC_BUF_STATE_ACTIVE &&
- buf->state != UVC_BUF_STATE_READY)
- ? 0 : -EAGAIN;
- }
-
- return wait_event_interruptible(buf->wait,
- buf->state != UVC_BUF_STATE_QUEUED &&
- buf->state != UVC_BUF_STATE_ACTIVE &&
- buf->state != UVC_BUF_STATE_READY);
+ return ret;
}
-/*
- * Dequeue a video buffer. If nonblocking is false, block until a buffer is
- * available.
- */
-int uvc_dequeue_buffer(struct uvc_video_queue *queue,
- struct v4l2_buffer *v4l2_buf, int nonblocking)
+int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
+ int nonblocking)
{
- struct uvc_buffer *buf;
- int ret = 0;
-
- if (v4l2_buf->type != queue->type ||
- v4l2_buf->memory != V4L2_MEMORY_MMAP) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
- "and/or memory (%u).\n", v4l2_buf->type,
- v4l2_buf->memory);
- return -EINVAL;
- }
+ int ret;
mutex_lock(&queue->mutex);
- if (list_empty(&queue->mainqueue)) {
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
- ret = -EINVAL;
- goto done;
- }
-
- buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
- if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
- goto done;
-
- uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
- buf->buf.index, buf->state, buf->buf.bytesused);
-
- switch (buf->state) {
- case UVC_BUF_STATE_ERROR:
- uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
- "(transmission error).\n");
- ret = -EIO;
- case UVC_BUF_STATE_DONE:
- buf->state = UVC_BUF_STATE_IDLE;
- break;
-
- case UVC_BUF_STATE_IDLE:
- case UVC_BUF_STATE_QUEUED:
- case UVC_BUF_STATE_ACTIVE:
- case UVC_BUF_STATE_READY:
- default:
- uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
- "(driver bug?).\n", buf->state);
- ret = -EINVAL;
- goto done;
- }
-
- list_del(&buf->stream);
- __uvc_query_buffer(buf, v4l2_buf);
-
-done:
+ ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
mutex_unlock(&queue->mutex);
- return ret;
-}
-/*
- * VMA operations.
- */
-static void uvc_vm_open(struct vm_area_struct *vma)
-{
- struct uvc_buffer *buffer = vma->vm_private_data;
- buffer->vma_use_count++;
-}
-
-static void uvc_vm_close(struct vm_area_struct *vma)
-{
- struct uvc_buffer *buffer = vma->vm_private_data;
- buffer->vma_use_count--;
+ return ret;
}
-static const struct vm_operations_struct uvc_vm_ops = {
- .open = uvc_vm_open,
- .close = uvc_vm_close,
-};
-
-/*
- * Memory-map a video buffer.
- *
- * This function implements video buffers memory mapping and is intended to be
- * used by the device mmap handler.
- */
int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
{
- struct uvc_buffer *uninitialized_var(buffer);
- struct page *page;
- unsigned long addr, start, size;
- unsigned int i;
- int ret = 0;
-
- start = vma->vm_start;
- size = vma->vm_end - vma->vm_start;
+ int ret;
mutex_lock(&queue->mutex);
+ ret = vb2_mmap(&queue->queue, vma);
+ mutex_unlock(&queue->mutex);
- for (i = 0; i < queue->count; ++i) {
- buffer = &queue->buffer[i];
- if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
- break;
- }
-
- if (i == queue->count || PAGE_ALIGN(size) != queue->buf_size) {
- ret = -EINVAL;
- goto done;
- }
-
- /*
- * VM_IO marks the area as being an mmaped region for I/O to a
- * device. It also prevents the region from being core dumped.
- */
- vma->vm_flags |= VM_IO;
-
- addr = (unsigned long)queue->mem + buffer->buf.m.offset;
-#ifdef CONFIG_MMU
- while (size > 0) {
- page = vmalloc_to_page((void *)addr);
- if ((ret = vm_insert_page(vma, start, page)) < 0)
- goto done;
-
- start += PAGE_SIZE;
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-#endif
+ return ret;
+}
- vma->vm_ops = &uvc_vm_ops;
- vma->vm_private_data = buffer;
- uvc_vm_open(vma);
+unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+ poll_table *wait)
+{
+ unsigned int ret;
-done:
+ mutex_lock(&queue->mutex);
+ ret = vb2_poll(&queue->queue, file, wait);
mutex_unlock(&queue->mutex);
+
return ret;
}
-/*
- * Poll the video queue.
+/* -----------------------------------------------------------------------------
*
- * This function implements video queue polling and is intended to be used by
- * the device poll handler.
*/
-unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
- poll_table *wait)
+
+/*
+ * Check if buffers have been allocated.
+ */
+int uvc_queue_allocated(struct uvc_video_queue *queue)
{
- struct uvc_buffer *buf;
- unsigned int mask = 0;
+ int allocated;
mutex_lock(&queue->mutex);
- if (list_empty(&queue->mainqueue)) {
- mask |= POLLERR;
- goto done;
- }
- buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
-
- poll_wait(file, &buf->wait, wait);
- if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR) {
- if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- mask |= POLLIN | POLLRDNORM;
- else
- mask |= POLLOUT | POLLWRNORM;
- }
-
-done:
+ allocated = vb2_is_busy(&queue->queue);
mutex_unlock(&queue->mutex);
- return mask;
+
+ return allocated;
}
#ifndef CONFIG_MMU
@@ -515,7 +260,7 @@ unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
ret = -EINVAL;
goto done;
}
- ret = (unsigned long)queue->mem + buffer->buf.m.offset;
+ ret = (unsigned long)buf->mem;
done:
mutex_unlock(&queue->mutex);
return ret;
@@ -540,27 +285,24 @@ done:
*/
int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
{
- unsigned int i;
- int ret = 0;
+ unsigned long flags;
+ int ret;
mutex_lock(&queue->mutex);
if (enable) {
- if (uvc_queue_streaming(queue)) {
- ret = -EBUSY;
+ ret = vb2_streamon(&queue->queue, queue->queue.type);
+ if (ret < 0)
goto done;
- }
- queue->flags |= UVC_QUEUE_STREAMING;
+
queue->buf_used = 0;
} else {
- uvc_queue_cancel(queue, 0);
- INIT_LIST_HEAD(&queue->mainqueue);
-
- for (i = 0; i < queue->count; ++i) {
- queue->buffer[i].error = 0;
- queue->buffer[i].state = UVC_BUF_STATE_IDLE;
- }
+ ret = vb2_streamoff(&queue->queue, queue->queue.type);
+ if (ret < 0)
+ goto done;
- queue->flags &= ~UVC_QUEUE_STREAMING;
+ spin_lock_irqsave(&queue->irqlock, flags);
+ INIT_LIST_HEAD(&queue->irqqueue);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
}
done:
@@ -591,12 +333,12 @@ void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
queue);
list_del(&buf->queue);
buf->state = UVC_BUF_STATE_ERROR;
- wake_up(&buf->wait);
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
}
/* This must be protected by the irqlock spinlock to avoid race
- * conditions between uvc_queue_buffer and the disconnection event that
+ * conditions between uvc_buffer_queue and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
- * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
+ * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
* state outside the queue code.
*/
if (disconnect)
@@ -613,14 +355,12 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
buf->error = 0;
buf->state = UVC_BUF_STATE_QUEUED;
- buf->buf.bytesused = 0;
+ vb2_set_plane_payload(&buf->buf, 0, 0);
return buf;
}
spin_lock_irqsave(&queue->irqlock, flags);
list_del(&buf->queue);
- buf->error = 0;
- buf->state = UVC_BUF_STATE_DONE;
if (!list_empty(&queue->irqqueue))
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
@@ -628,7 +368,9 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
nextbuf = NULL;
spin_unlock_irqrestore(&queue->irqlock, flags);
- wake_up(&buf->wait);
+ buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
+ vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+
return nextbuf;
}
-
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index dadf11f704dc..2ae4f880ea05 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -58,6 +58,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
break;
case V4L2_CTRL_TYPE_MENU:
+ /* Prevent excessive memory consumption, as well as integer
+ * overflows.
+ */
+ if (xmap->menu_count == 0 ||
+ xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
+ ret = -EINVAL;
+ goto done;
+ }
+
size = xmap->menu_count * sizeof(*map->menu_info);
map->menu_info = kmalloc(size, GFP_KERNEL);
if (map->menu_info == NULL) {
@@ -513,10 +522,7 @@ static int uvc_v4l2_release(struct file *file)
/* Only free resources if this is a privileged handle. */
if (uvc_has_privileges(handle)) {
uvc_video_enable(stream, 0);
-
- if (uvc_free_buffers(&stream->queue) < 0)
- uvc_printk(KERN_ERR, "uvc_v4l2_release: Unable to "
- "free buffers.\n");
+ uvc_free_buffers(&stream->queue);
}
/* Release the file handle. */
@@ -914,19 +920,11 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Buffers & streaming */
case VIDIOC_REQBUFS:
- {
- struct v4l2_requestbuffers *rb = arg;
-
- if (rb->type != stream->type ||
- rb->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
-
if ((ret = uvc_acquire_privileges(handle)) < 0)
return ret;
mutex_lock(&stream->mutex);
- ret = uvc_alloc_buffers(&stream->queue, rb->count,
- stream->ctrl.dwMaxVideoFrameSize);
+ ret = uvc_alloc_buffers(&stream->queue, arg);
mutex_unlock(&stream->mutex);
if (ret < 0)
return ret;
@@ -934,18 +932,13 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (ret == 0)
uvc_dismiss_privileges(handle);
- rb->count = ret;
ret = 0;
break;
- }
case VIDIOC_QUERYBUF:
{
struct v4l2_buffer *buf = arg;
- if (buf->type != stream->type)
- return -EINVAL;
-
if (!uvc_has_privileges(handle))
return -EBUSY;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index b015e8e5e8b0..c7e69b8f81c9 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -351,25 +351,553 @@ done:
return ret;
}
-int uvc_commit_video(struct uvc_streaming *stream,
- struct uvc_streaming_control *probe)
+static int uvc_commit_video(struct uvc_streaming *stream,
+ struct uvc_streaming_control *probe)
{
return uvc_set_video_ctrl(stream, probe, 0);
}
+/* -----------------------------------------------------------------------------
+ * Clocks and timestamps
+ */
+
+static void
+uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ const __u8 *data, int len)
+{
+ struct uvc_clock_sample *sample;
+ unsigned int header_size;
+ bool has_pts = false;
+ bool has_scr = false;
+ unsigned long flags;
+ struct timespec ts;
+ u16 host_sof;
+ u16 dev_sof;
+
+ switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
+ case UVC_STREAM_PTS | UVC_STREAM_SCR:
+ header_size = 12;
+ has_pts = true;
+ has_scr = true;
+ break;
+ case UVC_STREAM_PTS:
+ header_size = 6;
+ has_pts = true;
+ break;
+ case UVC_STREAM_SCR:
+ header_size = 8;
+ has_scr = true;
+ break;
+ default:
+ header_size = 2;
+ break;
+ }
+
+ /* Check for invalid headers. */
+ if (len < header_size)
+ return;
+
+ /* Extract the timestamps:
+ *
+ * - store the frame PTS in the buffer structure
+ * - if the SCR field is present, retrieve the host SOF counter and
+ * kernel timestamps and store them with the SCR STC and SOF fields
+ * in the ring buffer
+ */
+ if (has_pts && buf != NULL)
+ buf->pts = get_unaligned_le32(&data[2]);
+
+ if (!has_scr)
+ return;
+
+ /* To limit the amount of data, drop SCRs with an SOF identical to the
+ * previous one.
+ */
+ dev_sof = get_unaligned_le16(&data[header_size - 2]);
+ if (dev_sof == stream->clock.last_sof)
+ return;
+
+ stream->clock.last_sof = dev_sof;
+
+ host_sof = usb_get_current_frame_number(stream->dev->udev);
+ ktime_get_ts(&ts);
+
+ /* The UVC specification allows device implementations that can't obtain
+ * the USB frame number to keep their own frame counters as long as they
+ * match the size and frequency of the frame number associated with USB
+ * SOF tokens. The SOF values sent by such devices differ from the USB
+ * SOF tokens by a fixed offset that needs to be estimated and accounted
+ * for to make timestamp recovery as accurate as possible.
+ *
+ * The offset is estimated the first time a device SOF value is received
+ * as the difference between the host and device SOF values. As the two
+ * SOF values can differ slightly due to transmission delays, consider
+ * that the offset is null if the difference is not higher than 10 ms
+ * (negative differences can not happen and are thus considered as an
+ * offset). The video commit control wDelay field should be used to
+ * compute a dynamic threshold instead of using a fixed 10 ms value, but
+ * devices don't report reliable wDelay values.
+ *
+ * See uvc_video_clock_host_sof() for an explanation regarding why only
+ * the 8 LSBs of the delta are kept.
+ */
+ if (stream->clock.sof_offset == (u16)-1) {
+ u16 delta_sof = (host_sof - dev_sof) & 255;
+ if (delta_sof >= 10)
+ stream->clock.sof_offset = delta_sof;
+ else
+ stream->clock.sof_offset = 0;
+ }
+
+ dev_sof = (dev_sof + stream->clock.sof_offset) & 2047;
+
+ spin_lock_irqsave(&stream->clock.lock, flags);
+
+ sample = &stream->clock.samples[stream->clock.head];
+ sample->dev_stc = get_unaligned_le32(&data[header_size - 6]);
+ sample->dev_sof = dev_sof;
+ sample->host_sof = host_sof;
+ sample->host_ts = ts;
+
+ /* Update the sliding window head and count. */
+ stream->clock.head = (stream->clock.head + 1) % stream->clock.size;
+
+ if (stream->clock.count < stream->clock.size)
+ stream->clock.count++;
+
+ spin_unlock_irqrestore(&stream->clock.lock, flags);
+}
+
+static int uvc_video_clock_init(struct uvc_streaming *stream)
+{
+ struct uvc_clock *clock = &stream->clock;
+
+ spin_lock_init(&clock->lock);
+ clock->head = 0;
+ clock->count = 0;
+ clock->size = 32;
+ clock->last_sof = -1;
+ clock->sof_offset = -1;
+
+ clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
+ GFP_KERNEL);
+ if (clock->samples == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void uvc_video_clock_cleanup(struct uvc_streaming *stream)
+{
+ kfree(stream->clock.samples);
+ stream->clock.samples = NULL;
+}
+
+/*
+ * uvc_video_clock_host_sof - Return the host SOF value for a clock sample
+ *
+ * Host SOF counters reported by usb_get_current_frame_number() usually don't
+ * cover the whole 11-bits SOF range (0-2047) but are limited to the HCI frame
+ * schedule window. They can be limited to 8, 9 or 10 bits depending on the host
+ * controller and its configuration.
+ *
+ * We thus need to recover the SOF value corresponding to the host frame number.
+ * As the device and host frame numbers are sampled in a short interval, the
+ * difference between their values should be equal to a small delta plus an
+ * integer multiple of 256 caused by the host frame number limited precision.
+ *
+ * To obtain the recovered host SOF value, compute the small delta by masking
+ * the high bits of the host frame counter and device SOF difference and add it
+ * to the device SOF value.
+ */
+static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
+{
+ /* The delta value can be negative. */
+ s8 delta_sof;
+
+ delta_sof = (sample->host_sof - sample->dev_sof) & 255;
+
+ return (sample->dev_sof + delta_sof) & 2047;
+}
+
+/*
+ * uvc_video_clock_update - Update the buffer timestamp
+ *
+ * This function converts the buffer PTS timestamp to the host clock domain by
+ * going through the USB SOF clock domain and stores the result in the V4L2
+ * buffer timestamp field.
+ *
+ * The relationship between the device clock and the host clock isn't known.
+ * However, the device and the host share the common USB SOF clock which can be
+ * used to recover that relationship.
+ *
+ * The relationship between the device clock and the USB SOF clock is considered
+ * to be linear over the clock samples sliding window and is given by
+ *
+ * SOF = m * PTS + p
+ *
+ * Several methods to compute the slope (m) and intercept (p) can be used. As
+ * the clock drift should be small compared to the sliding window size, we
+ * assume that the line that goes through the points at both ends of the window
+ * is a good approximation. Naming those points P1 and P2, we get
+ *
+ * SOF = (SOF2 - SOF1) / (STC2 - STC1) * PTS
+ * + (SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1)
+ *
+ * or
+ *
+ * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1)
+ *
+ * to avoid loosing precision in the division. Similarly, the host timestamp is
+ * computed with
+ *
+ * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
+ *
+ * SOF values are coded on 11 bits by USB. We extend their precision with 16
+ * decimal bits, leading to a 11.16 coding.
+ *
+ * TODO: To avoid surprises with device clock values, PTS/STC timestamps should
+ * be normalized using the nominal device clock frequency reported through the
+ * UVC descriptors.
+ *
+ * Both the PTS/STC and SOF counters roll over, after a fixed but device
+ * specific amount of time for PTS/STC and after 2048ms for SOF. As long as the
+ * sliding window size is smaller than the rollover period, differences computed
+ * on unsigned integers will produce the correct result. However, the p term in
+ * the linear relations will be miscomputed.
+ *
+ * To fix the issue, we subtract a constant from the PTS and STC values to bring
+ * PTS to half the 32 bit STC range. The sliding window STC values then fit into
+ * the 32 bit range without any rollover.
+ *
+ * Similarly, we add 2048 to the device SOF values to make sure that the SOF
+ * computed by (1) will never be smaller than 0. This offset is then compensated
+ * by adding 2048 to the SOF values used in (2). However, this doesn't prevent
+ * rollovers between (1) and (2): the SOF value computed by (1) can be slightly
+ * lower than 4096, and the host SOF counters can have rolled over to 2048. This
+ * case is handled by subtracting 2048 from the SOF value if it exceeds the host
+ * SOF value at the end of the sliding window.
+ *
+ * Finally we subtract a constant from the host timestamps to bring the first
+ * timestamp of the sliding window to 1s.
+ */
+void uvc_video_clock_update(struct uvc_streaming *stream,
+ struct v4l2_buffer *v4l2_buf,
+ struct uvc_buffer *buf)
+{
+ struct uvc_clock *clock = &stream->clock;
+ struct uvc_clock_sample *first;
+ struct uvc_clock_sample *last;
+ unsigned long flags;
+ struct timespec ts;
+ u32 delta_stc;
+ u32 y1, y2;
+ u32 x1, x2;
+ u32 mean;
+ u32 sof;
+ u32 div;
+ u32 rem;
+ u64 y;
+
+ spin_lock_irqsave(&clock->lock, flags);
+
+ if (clock->count < clock->size)
+ goto done;
+
+ first = &clock->samples[clock->head];
+ last = &clock->samples[(clock->head - 1) % clock->size];
+
+ /* First step, PTS to SOF conversion. */
+ delta_stc = buf->pts - (1UL << 31);
+ x1 = first->dev_stc - delta_stc;
+ x2 = last->dev_stc - delta_stc;
+ y1 = (first->dev_sof + 2048) << 16;
+ y2 = (last->dev_sof + 2048) << 16;
+
+ if (y2 < y1)
+ y2 += 2048 << 16;
+
+ y = (u64)(y2 - y1) * (1ULL << 31) + (u64)y1 * (u64)x2
+ - (u64)y2 * (u64)x1;
+ y = div_u64(y, x2 - x1);
+
+ sof = y;
+
+ uvc_trace(UVC_TRACE_CLOCK, "%s: PTS %u y %llu.%06llu SOF %u.%06llu "
+ "(x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
+ stream->dev->name, buf->pts,
+ y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
+ sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ x1, x2, y1, y2, clock->sof_offset);
+
+ /* Second step, SOF to host clock conversion. */
+ ts = timespec_sub(last->host_ts, first->host_ts);
+ x1 = (uvc_video_clock_host_sof(first) + 2048) << 16;
+ x2 = (uvc_video_clock_host_sof(last) + 2048) << 16;
+ y1 = NSEC_PER_SEC;
+ y2 = (ts.tv_sec + 1) * NSEC_PER_SEC + ts.tv_nsec;
+
+ if (x2 < x1)
+ x2 += 2048 << 16;
+
+ /* Interpolated and host SOF timestamps can wrap around at slightly
+ * different times. Handle this by adding or removing 2048 to or from
+ * the computed SOF value to keep it close to the SOF samples mean
+ * value.
+ */
+ mean = (x1 + x2) / 2;
+ if (mean - (1024 << 16) > sof)
+ sof += 2048 << 16;
+ else if (sof > mean + (1024 << 16))
+ sof -= 2048 << 16;
+
+ y = (u64)(y2 - y1) * (u64)sof + (u64)y1 * (u64)x2
+ - (u64)y2 * (u64)x1;
+ y = div_u64(y, x2 - x1);
+
+ div = div_u64_rem(y, NSEC_PER_SEC, &rem);
+ ts.tv_sec = first->host_ts.tv_sec - 1 + div;
+ ts.tv_nsec = first->host_ts.tv_nsec + rem;
+ if (ts.tv_nsec >= NSEC_PER_SEC) {
+ ts.tv_sec++;
+ ts.tv_nsec -= NSEC_PER_SEC;
+ }
+
+ uvc_trace(UVC_TRACE_CLOCK, "%s: SOF %u.%06llu y %llu ts %lu.%06lu "
+ "buf ts %lu.%06lu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
+ stream->dev->name,
+ sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ y, ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC,
+ v4l2_buf->timestamp.tv_sec, v4l2_buf->timestamp.tv_usec,
+ x1, first->host_sof, first->dev_sof,
+ x2, last->host_sof, last->dev_sof, y1, y2);
+
+ /* Update the V4L2 buffer. */
+ v4l2_buf->timestamp.tv_sec = ts.tv_sec;
+ v4l2_buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
+done:
+ spin_unlock_irqrestore(&stream->clock.lock, flags);
+}
+
/* ------------------------------------------------------------------------
- * Video codecs
+ * Stream statistics
*/
-/* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */
-#define UVC_STREAM_EOH (1 << 7)
-#define UVC_STREAM_ERR (1 << 6)
-#define UVC_STREAM_STI (1 << 5)
-#define UVC_STREAM_RES (1 << 4)
-#define UVC_STREAM_SCR (1 << 3)
-#define UVC_STREAM_PTS (1 << 2)
-#define UVC_STREAM_EOF (1 << 1)
-#define UVC_STREAM_FID (1 << 0)
+static void uvc_video_stats_decode(struct uvc_streaming *stream,
+ const __u8 *data, int len)
+{
+ unsigned int header_size;
+ bool has_pts = false;
+ bool has_scr = false;
+ u16 uninitialized_var(scr_sof);
+ u32 uninitialized_var(scr_stc);
+ u32 uninitialized_var(pts);
+
+ if (stream->stats.stream.nb_frames == 0 &&
+ stream->stats.frame.nb_packets == 0)
+ ktime_get_ts(&stream->stats.stream.start_ts);
+
+ switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
+ case UVC_STREAM_PTS | UVC_STREAM_SCR:
+ header_size = 12;
+ has_pts = true;
+ has_scr = true;
+ break;
+ case UVC_STREAM_PTS:
+ header_size = 6;
+ has_pts = true;
+ break;
+ case UVC_STREAM_SCR:
+ header_size = 8;
+ has_scr = true;
+ break;
+ default:
+ header_size = 2;
+ break;
+ }
+
+ /* Check for invalid headers. */
+ if (len < header_size || data[0] < header_size) {
+ stream->stats.frame.nb_invalid++;
+ return;
+ }
+
+ /* Extract the timestamps. */
+ if (has_pts)
+ pts = get_unaligned_le32(&data[2]);
+
+ if (has_scr) {
+ scr_stc = get_unaligned_le32(&data[header_size - 6]);
+ scr_sof = get_unaligned_le16(&data[header_size - 2]);
+ }
+
+ /* Is PTS constant through the whole frame ? */
+ if (has_pts && stream->stats.frame.nb_pts) {
+ if (stream->stats.frame.pts != pts) {
+ stream->stats.frame.nb_pts_diffs++;
+ stream->stats.frame.last_pts_diff =
+ stream->stats.frame.nb_packets;
+ }
+ }
+
+ if (has_pts) {
+ stream->stats.frame.nb_pts++;
+ stream->stats.frame.pts = pts;
+ }
+
+ /* Do all frames have a PTS in their first non-empty packet, or before
+ * their first empty packet ?
+ */
+ if (stream->stats.frame.size == 0) {
+ if (len > header_size)
+ stream->stats.frame.has_initial_pts = has_pts;
+ if (len == header_size && has_pts)
+ stream->stats.frame.has_early_pts = true;
+ }
+
+ /* Do the SCR.STC and SCR.SOF fields vary through the frame ? */
+ if (has_scr && stream->stats.frame.nb_scr) {
+ if (stream->stats.frame.scr_stc != scr_stc)
+ stream->stats.frame.nb_scr_diffs++;
+ }
+
+ if (has_scr) {
+ /* Expand the SOF counter to 32 bits and store its value. */
+ if (stream->stats.stream.nb_frames > 0 ||
+ stream->stats.frame.nb_scr > 0)
+ stream->stats.stream.scr_sof_count +=
+ (scr_sof - stream->stats.stream.scr_sof) % 2048;
+ stream->stats.stream.scr_sof = scr_sof;
+
+ stream->stats.frame.nb_scr++;
+ stream->stats.frame.scr_stc = scr_stc;
+ stream->stats.frame.scr_sof = scr_sof;
+
+ if (scr_sof < stream->stats.stream.min_sof)
+ stream->stats.stream.min_sof = scr_sof;
+ if (scr_sof > stream->stats.stream.max_sof)
+ stream->stats.stream.max_sof = scr_sof;
+ }
+
+ /* Record the first non-empty packet number. */
+ if (stream->stats.frame.size == 0 && len > header_size)
+ stream->stats.frame.first_data = stream->stats.frame.nb_packets;
+
+ /* Update the frame size. */
+ stream->stats.frame.size += len - header_size;
+
+ /* Update the packets counters. */
+ stream->stats.frame.nb_packets++;
+ if (len > header_size)
+ stream->stats.frame.nb_empty++;
+
+ if (data[1] & UVC_STREAM_ERR)
+ stream->stats.frame.nb_errors++;
+}
+
+static void uvc_video_stats_update(struct uvc_streaming *stream)
+{
+ struct uvc_stats_frame *frame = &stream->stats.frame;
+
+ uvc_trace(UVC_TRACE_STATS, "frame %u stats: %u/%u/%u packets, "
+ "%u/%u/%u pts (%searly %sinitial), %u/%u scr, "
+ "last pts/stc/sof %u/%u/%u\n",
+ stream->sequence, frame->first_data,
+ frame->nb_packets - frame->nb_empty, frame->nb_packets,
+ frame->nb_pts_diffs, frame->last_pts_diff, frame->nb_pts,
+ frame->has_early_pts ? "" : "!",
+ frame->has_initial_pts ? "" : "!",
+ frame->nb_scr_diffs, frame->nb_scr,
+ frame->pts, frame->scr_stc, frame->scr_sof);
+
+ stream->stats.stream.nb_frames++;
+ stream->stats.stream.nb_packets += stream->stats.frame.nb_packets;
+ stream->stats.stream.nb_empty += stream->stats.frame.nb_empty;
+ stream->stats.stream.nb_errors += stream->stats.frame.nb_errors;
+ stream->stats.stream.nb_invalid += stream->stats.frame.nb_invalid;
+
+ if (frame->has_early_pts)
+ stream->stats.stream.nb_pts_early++;
+ if (frame->has_initial_pts)
+ stream->stats.stream.nb_pts_initial++;
+ if (frame->last_pts_diff <= frame->first_data)
+ stream->stats.stream.nb_pts_constant++;
+ if (frame->nb_scr >= frame->nb_packets - frame->nb_empty)
+ stream->stats.stream.nb_scr_count_ok++;
+ if (frame->nb_scr_diffs + 1 == frame->nb_scr)
+ stream->stats.stream.nb_scr_diffs_ok++;
+
+ memset(&stream->stats.frame, 0, sizeof(stream->stats.frame));
+}
+
+size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf,
+ size_t size)
+{
+ unsigned int scr_sof_freq;
+ unsigned int duration;
+ struct timespec ts;
+ size_t count = 0;
+
+ ts.tv_sec = stream->stats.stream.stop_ts.tv_sec
+ - stream->stats.stream.start_ts.tv_sec;
+ ts.tv_nsec = stream->stats.stream.stop_ts.tv_nsec
+ - stream->stats.stream.start_ts.tv_nsec;
+ if (ts.tv_nsec < 0) {
+ ts.tv_sec--;
+ ts.tv_nsec += 1000000000;
+ }
+
+ /* Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF
+ * frequency this will not overflow before more than 1h.
+ */
+ duration = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
+ if (duration != 0)
+ scr_sof_freq = stream->stats.stream.scr_sof_count * 1000
+ / duration;
+ else
+ scr_sof_freq = 0;
+
+ count += scnprintf(buf + count, size - count,
+ "frames: %u\npackets: %u\nempty: %u\n"
+ "errors: %u\ninvalid: %u\n",
+ stream->stats.stream.nb_frames,
+ stream->stats.stream.nb_packets,
+ stream->stats.stream.nb_empty,
+ stream->stats.stream.nb_errors,
+ stream->stats.stream.nb_invalid);
+ count += scnprintf(buf + count, size - count,
+ "pts: %u early, %u initial, %u ok\n",
+ stream->stats.stream.nb_pts_early,
+ stream->stats.stream.nb_pts_initial,
+ stream->stats.stream.nb_pts_constant);
+ count += scnprintf(buf + count, size - count,
+ "scr: %u count ok, %u diff ok\n",
+ stream->stats.stream.nb_scr_count_ok,
+ stream->stats.stream.nb_scr_diffs_ok);
+ count += scnprintf(buf + count, size - count,
+ "sof: %u <= sof <= %u, freq %u.%03u kHz\n",
+ stream->stats.stream.min_sof,
+ stream->stats.stream.max_sof,
+ scr_sof_freq / 1000, scr_sof_freq % 1000);
+
+ return count;
+}
+
+static void uvc_video_stats_start(struct uvc_streaming *stream)
+{
+ memset(&stream->stats, 0, sizeof(stream->stats));
+ stream->stats.stream.min_sof = 2048;
+}
+
+static void uvc_video_stats_stop(struct uvc_streaming *stream)
+{
+ ktime_get_ts(&stream->stats.stream.stop_ts);
+}
+
+/* ------------------------------------------------------------------------
+ * Video codecs
+ */
/* Video payload decoding is handled by uvc_video_decode_start(),
* uvc_video_decode_data() and uvc_video_decode_end().
@@ -416,14 +944,9 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
* - bHeaderLength value must be at least 2 bytes (see above)
* - bHeaderLength value can't be larger than the packet size.
*/
- if (len < 2 || data[0] < 2 || data[0] > len)
+ if (len < 2 || data[0] < 2 || data[0] > len) {
+ stream->stats.frame.nb_invalid++;
return -EINVAL;
-
- /* Skip payloads marked with the error bit ("error frames"). */
- if (data[1] & UVC_STREAM_ERR) {
- uvc_trace(UVC_TRACE_FRAME, "Dropping payload (error bit "
- "set).\n");
- return -ENODATA;
}
fid = data[1] & UVC_STREAM_FID;
@@ -431,8 +954,14 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
/* Increase the sequence number regardless of any buffer states, so
* that discontinuous sequence numbers always indicate lost frames.
*/
- if (stream->last_fid != fid)
+ if (stream->last_fid != fid) {
stream->sequence++;
+ if (stream->sequence)
+ uvc_video_stats_update(stream);
+ }
+
+ uvc_video_clock_decode(stream, buf, data, len);
+ uvc_video_stats_decode(stream, data, len);
/* Store the payload FID bit and return immediately when the buffer is
* NULL.
@@ -442,6 +971,13 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -ENODATA;
}
+ /* Mark the buffer as bad if the error bit is set. */
+ if (data[1] & UVC_STREAM_ERR) {
+ uvc_trace(UVC_TRACE_FRAME, "Marking buffer as bad (error bit "
+ "set).\n");
+ buf->error = 1;
+ }
+
/* Synchronize to the input stream by waiting for the FID bit to be
* toggled when the the buffer state is not UVC_BUF_STATE_ACTIVE.
* stream->last_fid is initialized to -1, so the first isochronous
@@ -467,9 +1003,10 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
else
ktime_get_real_ts(&ts);
- buf->buf.sequence = stream->sequence;
- buf->buf.timestamp.tv_sec = ts.tv_sec;
- buf->buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ buf->buf.v4l2_buf.sequence = stream->sequence;
+ buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
+ buf->buf.v4l2_buf.timestamp.tv_usec =
+ ts.tv_nsec / NSEC_PER_USEC;
/* TODO: Handle PTS and SCR. */
buf->state = UVC_BUF_STATE_ACTIVE;
@@ -490,7 +1027,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
* avoids detecting end of frame conditions at FID toggling if the
* previous payload had the EOF bit set.
*/
- if (fid != stream->last_fid && buf->buf.bytesused != 0) {
+ if (fid != stream->last_fid && buf->bytesused != 0) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit "
"toggled).\n");
buf->state = UVC_BUF_STATE_READY;
@@ -505,7 +1042,6 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
static void uvc_video_decode_data(struct uvc_streaming *stream,
struct uvc_buffer *buf, const __u8 *data, int len)
{
- struct uvc_video_queue *queue = &stream->queue;
unsigned int maxlen, nbytes;
void *mem;
@@ -513,11 +1049,11 @@ static void uvc_video_decode_data(struct uvc_streaming *stream,
return;
/* Copy the video data to the buffer. */
- maxlen = buf->buf.length - buf->buf.bytesused;
- mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;
+ maxlen = buf->length - buf->bytesused;
+ mem = buf->mem + buf->bytesused;
nbytes = min((unsigned int)len, maxlen);
memcpy(mem, data, nbytes);
- buf->buf.bytesused += nbytes;
+ buf->bytesused += nbytes;
/* Complete the current frame if the buffer size was exceeded. */
if (len > maxlen) {
@@ -530,7 +1066,7 @@ static void uvc_video_decode_end(struct uvc_streaming *stream,
struct uvc_buffer *buf, const __u8 *data, int len)
{
/* Mark the buffer as done if the EOF marker is set. */
- if (data[1] & UVC_STREAM_EOF && buf->buf.bytesused != 0) {
+ if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
if (data[0] == len)
uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n");
@@ -568,8 +1104,8 @@ static int uvc_video_encode_data(struct uvc_streaming *stream,
void *mem;
/* Copy video data to the URB buffer. */
- mem = queue->mem + buf->buf.m.offset + queue->buf_used;
- nbytes = min((unsigned int)len, buf->buf.bytesused - queue->buf_used);
+ mem = buf->mem + queue->buf_used;
+ nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
nbytes = min(stream->bulk.max_payload_size - stream->bulk.payload_size,
nbytes);
memcpy(data, mem, nbytes);
@@ -624,7 +1160,7 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
urb->iso_frame_desc[i].actual_length);
if (buf->state == UVC_BUF_STATE_READY) {
- if (buf->buf.length != buf->buf.bytesused &&
+ if (buf->length != buf->bytesused &&
!(stream->cur_format->flags &
UVC_FMT_FLAG_COMPRESSED))
buf->error = 1;
@@ -724,12 +1260,12 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
stream->bulk.payload_size += ret;
len -= ret;
- if (buf->buf.bytesused == stream->queue.buf_used ||
+ if (buf->bytesused == stream->queue.buf_used ||
stream->bulk.payload_size == stream->bulk.max_payload_size) {
- if (buf->buf.bytesused == stream->queue.buf_used) {
+ if (buf->bytesused == stream->queue.buf_used) {
stream->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_READY;
- buf->buf.sequence = ++stream->sequence;
+ buf->buf.v4l2_buf.sequence = ++stream->sequence;
uvc_queue_next_buffer(&stream->queue, buf);
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -870,6 +1406,8 @@ static void uvc_uninit_video(struct uvc_streaming *stream, int free_buffers)
struct urb *urb;
unsigned int i;
+ uvc_video_stats_stop(stream);
+
for (i = 0; i < UVC_URBS; ++i) {
urb = stream->urb[i];
if (urb == NULL)
@@ -882,6 +1420,8 @@ static void uvc_uninit_video(struct uvc_streaming *stream, int free_buffers)
if (free_buffers)
uvc_free_urb_buffers(stream);
+
+ uvc_video_clock_cleanup(stream);
}
/*
@@ -1009,6 +1549,12 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
stream->bulk.skip_payload = 0;
stream->bulk.payload_size = 0;
+ uvc_video_stats_start(stream);
+
+ ret = uvc_video_clock_init(stream);
+ if (ret < 0)
+ return ret;
+
if (intf->num_altsetting > 1) {
struct usb_host_endpoint *best_ep = NULL;
unsigned int best_psize = 3 * 1024;
@@ -1283,6 +1829,11 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
return ret;
}
- return uvc_init_video(stream, GFP_KERNEL);
-}
+ ret = uvc_init_video(stream, GFP_KERNEL);
+ if (ret < 0) {
+ usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+ uvc_queue_enable(&stream->queue, 0);
+ }
+ return ret;
+}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 4c1392ebcd4b..67f88d85bb16 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -13,6 +13,7 @@
#include <linux/videodev2.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
/* --------------------------------------------------------------------------
* UVC constants
@@ -113,6 +114,7 @@
/* Maximum allowed number of control mappings per device */
#define UVC_MAX_CONTROL_MAPPINGS 1024
+#define UVC_MAX_CONTROL_MENU_ENTRIES 32
/* Devices quirks */
#define UVC_QUIRK_STATUS_INTERVAL 0x00000001
@@ -319,35 +321,30 @@ enum uvc_buffer_state {
};
struct uvc_buffer {
- unsigned long vma_use_count;
- struct list_head stream;
-
- /* Touched by interrupt handler. */
- struct v4l2_buffer buf;
+ struct vb2_buffer buf;
struct list_head queue;
- wait_queue_head_t wait;
+
enum uvc_buffer_state state;
unsigned int error;
+
+ void *mem;
+ unsigned int length;
+ unsigned int bytesused;
+
+ u32 pts;
};
-#define UVC_QUEUE_STREAMING (1 << 0)
-#define UVC_QUEUE_DISCONNECTED (1 << 1)
-#define UVC_QUEUE_DROP_CORRUPTED (1 << 2)
+#define UVC_QUEUE_DISCONNECTED (1 << 0)
+#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
struct uvc_video_queue {
- enum v4l2_buf_type type;
+ struct vb2_queue queue;
+ struct mutex mutex; /* Protects queue */
- void *mem;
unsigned int flags;
-
- unsigned int count;
- unsigned int buf_size;
unsigned int buf_used;
- struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS];
- struct mutex mutex; /* protects buffers and mainqueue */
- spinlock_t irqlock; /* protects irqqueue */
- struct list_head mainqueue;
+ spinlock_t irqlock; /* Protects irqqueue */
struct list_head irqqueue;
};
@@ -362,6 +359,51 @@ struct uvc_video_chain {
struct mutex ctrl_mutex; /* Protects ctrl.info */
};
+struct uvc_stats_frame {
+ unsigned int size; /* Number of bytes captured */
+ unsigned int first_data; /* Index of the first non-empty packet */
+
+ unsigned int nb_packets; /* Number of packets */
+ unsigned int nb_empty; /* Number of empty packets */
+ unsigned int nb_invalid; /* Number of packets with an invalid header */
+ unsigned int nb_errors; /* Number of packets with the error bit set */
+
+ unsigned int nb_pts; /* Number of packets with a PTS timestamp */
+ unsigned int nb_pts_diffs; /* Number of PTS differences inside a frame */
+ unsigned int last_pts_diff; /* Index of the last PTS difference */
+ bool has_initial_pts; /* Whether the first non-empty packet has a PTS */
+ bool has_early_pts; /* Whether a PTS is present before the first non-empty packet */
+ u32 pts; /* PTS of the last packet */
+
+ unsigned int nb_scr; /* Number of packets with a SCR timestamp */
+ unsigned int nb_scr_diffs; /* Number of SCR.STC differences inside a frame */
+ u16 scr_sof; /* SCR.SOF of the last packet */
+ u32 scr_stc; /* SCR.STC of the last packet */
+};
+
+struct uvc_stats_stream {
+ struct timespec start_ts; /* Stream start timestamp */
+ struct timespec stop_ts; /* Stream stop timestamp */
+
+ unsigned int nb_frames; /* Number of frames */
+
+ unsigned int nb_packets; /* Number of packets */
+ unsigned int nb_empty; /* Number of empty packets */
+ unsigned int nb_invalid; /* Number of packets with an invalid header */
+ unsigned int nb_errors; /* Number of packets with the error bit set */
+
+ unsigned int nb_pts_constant; /* Number of frames with constant PTS */
+ unsigned int nb_pts_early; /* Number of frames with early PTS */
+ unsigned int nb_pts_initial; /* Number of frames with initial PTS */
+
+ unsigned int nb_scr_count_ok; /* Number of frames with at least one SCR per non empty packet */
+ unsigned int nb_scr_diffs_ok; /* Number of frames with varying SCR.STC */
+ unsigned int scr_sof_count; /* STC.SOF counter accumulated since stream start */
+ unsigned int scr_sof; /* STC.SOF of the last packet */
+ unsigned int min_sof; /* Minimum STC.SOF value */
+ unsigned int max_sof; /* Maximum STC.SOF value */
+};
+
struct uvc_streaming {
struct list_head list;
struct uvc_device *dev;
@@ -387,6 +429,7 @@ struct uvc_streaming {
*/
struct mutex mutex;
+ /* Buffers queue. */
unsigned int frozen : 1;
struct uvc_video_queue queue;
void (*decode) (struct urb *urb, struct uvc_streaming *video,
@@ -408,6 +451,32 @@ struct uvc_streaming {
__u32 sequence;
__u8 last_fid;
+
+ /* debugfs */
+ struct dentry *debugfs_dir;
+ struct {
+ struct uvc_stats_frame frame;
+ struct uvc_stats_stream stream;
+ } stats;
+
+ /* Timestamps support. */
+ struct uvc_clock {
+ struct uvc_clock_sample {
+ u32 dev_stc;
+ u16 dev_sof;
+ struct timespec host_ts;
+ u16 host_sof;
+ } *samples;
+
+ unsigned int head;
+ unsigned int count;
+ unsigned int size;
+
+ u16 last_sof;
+ u16 sof_offset;
+
+ spinlock_t lock;
+ } clock;
};
enum uvc_device_state {
@@ -479,9 +548,12 @@ struct uvc_driver {
#define UVC_TRACE_SUSPEND (1 << 8)
#define UVC_TRACE_STATUS (1 << 9)
#define UVC_TRACE_VIDEO (1 << 10)
+#define UVC_TRACE_STATS (1 << 11)
+#define UVC_TRACE_CLOCK (1 << 12)
#define UVC_WARN_MINMAX 0
#define UVC_WARN_PROBE_DEF 1
+#define UVC_WARN_XU_GET_RES 2
extern unsigned int uvc_clock_param;
extern unsigned int uvc_no_drop_param;
@@ -516,8 +588,8 @@ extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
extern void uvc_queue_init(struct uvc_video_queue *queue,
enum v4l2_buf_type type, int drop_corrupted);
extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
- unsigned int nbuffers, unsigned int buflength);
-extern int uvc_free_buffers(struct uvc_video_queue *queue);
+ struct v4l2_requestbuffers *rb);
+extern void uvc_free_buffers(struct uvc_video_queue *queue);
extern int uvc_query_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *v4l2_buf);
extern int uvc_queue_buffer(struct uvc_video_queue *queue,
@@ -539,7 +611,7 @@ extern unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
extern int uvc_queue_allocated(struct uvc_video_queue *queue);
static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
{
- return queue->flags & UVC_QUEUE_STREAMING;
+ return vb2_is_streaming(&queue->queue);
}
/* V4L2 interface */
@@ -556,10 +628,11 @@ extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
extern int uvc_probe_video(struct uvc_streaming *stream,
struct uvc_streaming_control *probe);
-extern int uvc_commit_video(struct uvc_streaming *stream,
- struct uvc_streaming_control *ctrl);
extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
__u8 intfnum, __u8 cs, void *data, __u16 size);
+void uvc_video_clock_update(struct uvc_streaming *stream,
+ struct v4l2_buffer *v4l2_buf,
+ struct uvc_buffer *buf);
/* Status */
extern int uvc_status_init(struct uvc_device *dev);
@@ -612,4 +685,13 @@ extern struct usb_host_endpoint *uvc_find_endpoint(
void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
struct uvc_buffer *buf);
+/* debugfs and statistics */
+int uvc_debugfs_init(void);
+void uvc_debugfs_cleanup(void);
+int uvc_debugfs_init_stream(struct uvc_streaming *stream);
+void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream);
+
+size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf,
+ size_t size);
+
#endif
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index c68531b88279..af4419e6c658 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -985,6 +985,8 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_CROPCAP:
case VIDIOC_G_CROP:
case VIDIOC_S_CROP:
+ case VIDIOC_G_SELECTION:
+ case VIDIOC_S_SELECTION:
case VIDIOC_G_JPEGCOMP:
case VIDIOC_S_JPEGCOMP:
case VIDIOC_QUERYSTD:
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 0f415dade05a..cccd42be718a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -465,8 +465,9 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers";
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
+ case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -505,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
- case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
- case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode";
- case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
@@ -534,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
/* CAMERA controls */
@@ -579,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -587,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
- case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
/* Flash controls */
- case V4L2_CID_FLASH_CLASS: return "Flash controls";
- case V4L2_CID_FLASH_LED_MODE: return "LED mode";
- case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source";
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
case V4L2_CID_FLASH_STROBE: return "Strobe";
- case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe";
- case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status";
- case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout";
- case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode";
- case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode";
- case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
case V4L2_CID_FLASH_FAULT: return "Faults";
case V4L2_CID_FLASH_CHARGE: return "Charge";
- case V4L2_CID_FLASH_READY: return "Ready to strobe";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
default:
return NULL;
@@ -1108,8 +1109,8 @@ int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
- hdl->buckets = kzalloc(sizeof(hdl->buckets[0]) * hdl->nr_of_buckets,
- GFP_KERNEL);
+ hdl->buckets = kcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
+ GFP_KERNEL);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
return hdl->error;
}
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index a5c9ed128b97..96e9615663e9 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -146,10 +146,9 @@ static void v4l2_device_release(struct device *cd)
struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
mutex_lock(&videodev_lock);
- if (video_device[vdev->minor] != vdev) {
- mutex_unlock(&videodev_lock);
+ if (WARN_ON(video_device[vdev->minor] != vdev)) {
/* should not happen */
- WARN_ON(1);
+ mutex_unlock(&videodev_lock);
return;
}
@@ -168,7 +167,7 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
+ if (v4l2_dev && v4l2_dev->mdev &&
vdev->vfl_type != VFL_TYPE_SUBDEV)
media_device_unregister_entity(&vdev->entity);
#endif
@@ -556,8 +555,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->minor = -1;
/* the release callback MUST be present */
- WARN_ON(!vdev->release);
- if (!vdev->release)
+ if (WARN_ON(!vdev->release))
return -EINVAL;
/* v4l2_fh support */
@@ -703,8 +701,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->vfl_type != VFL_TYPE_SUBDEV) {
vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
vdev->entity.name = vdev->name;
- vdev->entity.v4l.major = VIDEO_MAJOR;
- vdev->entity.v4l.minor = vdev->minor;
+ vdev->entity.info.v4l.major = VIDEO_MAJOR;
+ vdev->entity.info.v4l.minor = vdev->minor;
ret = media_device_register_entity(vdev->v4l2_dev->mdev,
&vdev->entity);
if (ret < 0)
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 0edd618b9ddf..1f203b85a637 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -234,8 +234,8 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
goto clean_up;
}
#if defined(CONFIG_MEDIA_CONTROLLER)
- sd->entity.v4l.major = VIDEO_MAJOR;
- sd->entity.v4l.minor = vdev->minor;
+ sd->entity.info.v4l.major = VIDEO_MAJOR;
+ sd->entity.info.v4l.minor = vdev->minor;
#endif
sd->devnode = vdev;
}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index e1da8fc9dd2f..3f623859a337 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -238,6 +238,8 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
[_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
[_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
+ [_IOC_NR(VIDIOC_G_SELECTION)] = "VIDIOC_G_SELECTION",
+ [_IOC_NR(VIDIOC_S_SELECTION)] = "VIDIOC_S_SELECTION",
[_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
[_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
[_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
@@ -1547,11 +1549,32 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_crop *p = arg;
- if (!ops->vidioc_g_crop)
+ if (!ops->vidioc_g_crop && !ops->vidioc_g_selection)
break;
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
- ret = ops->vidioc_g_crop(file, fh, p);
+
+ if (ops->vidioc_g_crop) {
+ ret = ops->vidioc_g_crop(file, fh, p);
+ } else {
+ /* simulate capture crop using selection api */
+ struct v4l2_selection s = {
+ .type = p->type,
+ };
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ else
+ s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+
+ /* copying results to old structure on success */
+ if (!ret)
+ p->c = s.r;
+ }
+
if (!ret)
dbgrect(vfd, "", &p->c);
break;
@@ -1560,15 +1583,65 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_crop *p = arg;
- if (!ops->vidioc_s_crop)
+ if (!ops->vidioc_s_crop && !ops->vidioc_s_selection)
break;
+
if (ret_prio) {
ret = ret_prio;
break;
}
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
dbgrect(vfd, "", &p->c);
- ret = ops->vidioc_s_crop(file, fh, p);
+
+ if (ops->vidioc_s_crop) {
+ ret = ops->vidioc_s_crop(file, fh, p);
+ } else {
+ /* simulate capture crop using selection api */
+ struct v4l2_selection s = {
+ .type = p->type,
+ .r = p->c,
+ };
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ else
+ s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+
+ ret = ops->vidioc_s_selection(file, fh, &s);
+ }
+ break;
+ }
+ case VIDIOC_G_SELECTION:
+ {
+ struct v4l2_selection *p = arg;
+
+ if (!ops->vidioc_g_selection)
+ break;
+
+ dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
+
+ ret = ops->vidioc_g_selection(file, fh, p);
+ if (!ret)
+ dbgrect(vfd, "", &p->r);
+ break;
+ }
+ case VIDIOC_S_SELECTION:
+ {
+ struct v4l2_selection *p = arg;
+
+ if (!ops->vidioc_s_selection)
+ break;
+
+ if (ret_prio) {
+ ret = ret_prio;
+ break;
+ }
+
+ dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
+ dbgrect(vfd, "", &p->r);
+
+ ret = ops->vidioc_s_selection(file, fh, p);
break;
}
case VIDIOC_CROPCAP:
@@ -1576,11 +1649,42 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_cropcap *p = arg;
/*FIXME: Should also show v4l2_fract pixelaspect */
- if (!ops->vidioc_cropcap)
+ if (!ops->vidioc_cropcap && !ops->vidioc_g_selection)
break;
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
- ret = ops->vidioc_cropcap(file, fh, p);
+ if (ops->vidioc_cropcap) {
+ ret = ops->vidioc_cropcap(file, fh, p);
+ } else {
+ struct v4l2_selection s = { .type = p->type };
+
+ /* obtaining bounds */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
+ else
+ s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ break;
+ p->bounds = s.r;
+
+ /* obtaining defrect */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+ else
+ s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ break;
+ p->defrect = s.r;
+
+ /* setting trivial pixelaspect */
+ p->pixelaspect.numerator = 1;
+ p->pixelaspect.denominator = 1;
+ }
+
if (!ret) {
dbgrect(vfd, "bounds ", &p->bounds);
dbgrect(vfd, "defrect ", &p->defrect);
@@ -1767,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
case VIDIOC_S_FREQUENCY:
{
struct v4l2_frequency *p = arg;
+ enum v4l2_tuner_type type;
if (!ops->vidioc_s_frequency)
break;
@@ -1774,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
ret = ret_prio;
break;
}
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
p->tuner, p->type, p->frequency);
- ret = ops->vidioc_s_frequency(file, fh, p);
+ if (p->type != type)
+ ret = -EINVAL;
+ else
+ ret = ops->vidioc_s_frequency(file, fh, p);
break;
}
case VIDIOC_G_SLICED_VBI_CAP:
@@ -2226,6 +2336,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
struct v4l2_ext_controls *ctrls = parg;
if (ctrls->count != 0) {
+ if (ctrls->count > V4L2_CID_MAX_CTRLS) {
+ ret = -EINVAL;
+ break;
+ }
*user_ptr = (void __user *)ctrls->controls;
*kernel_ptr = (void *)&ctrls->controls;
*array_size = sizeof(struct v4l2_ext_control)
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 65ade5f03c2e..41d118ee2de6 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -193,6 +193,10 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_subdev_call(sd, core, s_register, p);
}
#endif
+
+ case VIDIOC_LOG_STATUS:
+ return v4l2_subdev_call(sd, core, log_status);
+
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
case VIDIOC_SUBDEV_G_FMT: {
struct v4l2_subdev_format *format = arg;
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index cbf13d09b4ac..20f7237b8242 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -34,13 +34,13 @@ MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver");
MODULE_LICENSE("GPL");
-static int flip_image;
+static bool flip_image;
module_param(flip_image, bool, 0444);
MODULE_PARM_DESC(flip_image,
"If set, the sensor will be instructed to flip the image "
"vertically.");
-static int override_serial;
+static bool override_serial;
module_param(override_serial, bool, 0444);
MODULE_PARM_DESC(override_serial,
"The camera driver will normally refuse to load if "
@@ -156,14 +156,10 @@ static struct via_format {
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
.bpp = 2,
},
- {
- .desc = "RGB 565",
- .pixelformat = V4L2_PIX_FMT_RGB565,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
- .bpp = 2,
- },
/* RGB444 and Bayer should be doable, but have never been
- tested with this driver. */
+ tested with this driver. RGB565 seems to work at the default
+ resolution, but results in color corruption when being scaled by
+ viacam_set_scaled(), and is disabled as a result. */
};
#define N_VIA_FMTS ARRAY_SIZE(via_formats)
@@ -1504,14 +1500,4 @@ static struct platform_driver viacam_driver = {
.remove = viacam_remove,
};
-static int viacam_init(void)
-{
- return platform_driver_register(&viacam_driver);
-}
-module_init(viacam_init);
-
-static void viacam_exit(void)
-{
- platform_driver_unregister(&viacam_driver);
-}
-module_exit(viacam_exit);
+module_platform_driver(viacam_driver);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 3de7c7e4402d..59cb54aa2946 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -226,9 +226,10 @@ static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
}
/* register network adapter */
- dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
- if (dvb->net.dvbdev == NULL) {
- result = -ENOMEM;
+ result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+ if (result < 0) {
+ printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
+ dvb->name, result);
goto fail_fe_conn;
}
return 0;
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 95a3f5e82aef..2e8f1df775b6 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -30,7 +30,7 @@ module_param(debug, int, 0644);
printk(KERN_DEBUG "vb2: " fmt, ## arg); \
} while (0)
-#define call_memop(q, plane, op, args...) \
+#define call_memop(q, op, args...) \
(((q)->mem_ops->op) ? \
((q)->mem_ops->op(args)) : 0)
@@ -52,7 +52,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
/* Allocate memory for all planes in this buffer */
for (plane = 0; plane < vb->num_planes; ++plane) {
- mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane],
+ mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
q->plane_sizes[plane]);
if (IS_ERR_OR_NULL(mem_priv))
goto free;
@@ -65,8 +65,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
return 0;
free:
/* Free already allocated memory if one of the allocations failed */
- for (; plane > 0; --plane)
- call_memop(q, plane, put, vb->planes[plane - 1].mem_priv);
+ for (; plane > 0; --plane) {
+ call_memop(q, put, vb->planes[plane - 1].mem_priv);
+ vb->planes[plane - 1].mem_priv = NULL;
+ }
return -ENOMEM;
}
@@ -80,10 +82,10 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
- call_memop(q, plane, put, vb->planes[plane].mem_priv);
+ call_memop(q, put, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
- dprintk(3, "Freed plane %d of buffer %d\n",
- plane, vb->v4l2_buf.index);
+ dprintk(3, "Freed plane %d of buffer %d\n", plane,
+ vb->v4l2_buf.index);
}
}
@@ -97,12 +99,9 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
- void *mem_priv = vb->planes[plane].mem_priv;
-
- if (mem_priv) {
- call_memop(q, plane, put_userptr, mem_priv);
- vb->planes[plane].mem_priv = NULL;
- }
+ if (vb->planes[plane].mem_priv)
+ call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
}
}
@@ -305,7 +304,7 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
* case anyway. If num_users() returns more than 1,
* we are not the only user of the plane's memory.
*/
- if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
+ if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
return true;
}
return false;
@@ -731,10 +730,10 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
struct vb2_queue *q = vb->vb2_queue;
- if (plane_no > vb->num_planes)
+ if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_memop(q, plane_no, vaddr, vb->planes[plane_no].mem_priv);
+ return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -754,10 +753,10 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
{
struct vb2_queue *q = vb->vb2_queue;
- if (plane_no > vb->num_planes)
+ if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_memop(q, plane_no, cookie, vb->planes[plane_no].mem_priv);
+ return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
@@ -883,7 +882,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
for (plane = 0; plane < vb->num_planes; ++plane) {
/* Skip the plane if already verified */
- if (vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
+ if (vb->v4l2_planes[plane].m.userptr &&
+ vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
&& vb->v4l2_planes[plane].length == planes[plane].length)
continue;
@@ -898,27 +898,23 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Release previously acquired memory if present */
if (vb->planes[plane].mem_priv)
- call_memop(q, plane, put_userptr,
- vb->planes[plane].mem_priv);
+ call_memop(q, put_userptr, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
vb->v4l2_planes[plane].m.userptr = 0;
vb->v4l2_planes[plane].length = 0;
/* Acquire each plane's memory */
- if (q->mem_ops->get_userptr) {
- mem_priv = q->mem_ops->get_userptr(q->alloc_ctx[plane],
- planes[plane].m.userptr,
- planes[plane].length,
- write);
- if (IS_ERR(mem_priv)) {
- dprintk(1, "qbuf: failed acquiring userspace "
+ mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
+ planes[plane].m.userptr,
+ planes[plane].length, write);
+ if (IS_ERR_OR_NULL(mem_priv)) {
+ dprintk(1, "qbuf: failed acquiring userspace "
"memory for plane %d\n", plane);
- ret = PTR_ERR(mem_priv);
- goto err;
- }
- vb->planes[plane].mem_priv = mem_priv;
+ ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
+ goto err;
}
+ vb->planes[plane].mem_priv = mem_priv;
}
/*
@@ -943,8 +939,7 @@ err:
/* In case of errors, release planes that were already acquired */
for (plane = 0; plane < vb->num_planes; ++plane) {
if (vb->planes[plane].mem_priv)
- call_memop(q, plane, put_userptr,
- vb->planes[plane].mem_priv);
+ call_memop(q, put_userptr, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
vb->v4l2_planes[plane].m.userptr = 0;
vb->v4l2_planes[plane].length = 0;
@@ -1081,46 +1076,76 @@ EXPORT_SYMBOL_GPL(vb2_prepare_buf);
*/
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
+ struct rw_semaphore *mmap_sem = NULL;
struct vb2_buffer *vb;
- int ret;
+ int ret = 0;
+
+ /*
+ * In case of user pointer buffers vb2 allocator needs to get direct
+ * access to userspace pages. This requires getting read access on
+ * mmap semaphore in the current process structure. The same
+ * semaphore is taken before calling mmap operation, while both mmap
+ * and qbuf are called by the driver or v4l2 core with driver's lock
+ * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
+ * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
+ * release driver's lock, takes mmap_sem and then takes again driver's
+ * lock.
+ *
+ * To avoid race with other vb2 calls, which might be called after
+ * releasing driver's lock, this operation is performed at the
+ * beggining of qbuf processing. This way the queue status is
+ * consistent after getting driver's lock back.
+ */
+ if (q->memory == V4L2_MEMORY_USERPTR) {
+ mmap_sem = &current->mm->mmap_sem;
+ call_qop(q, wait_prepare, q);
+ down_read(mmap_sem);
+ call_qop(q, wait_finish, q);
+ }
if (q->fileio) {
dprintk(1, "qbuf: file io in progress\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto unlock;
}
if (b->type != q->type) {
dprintk(1, "qbuf: invalid buffer type\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
if (b->index >= q->num_buffers) {
dprintk(1, "qbuf: buffer index out of range\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
vb = q->bufs[b->index];
if (NULL == vb) {
/* Should never happen */
dprintk(1, "qbuf: buffer is NULL\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
if (b->memory != q->memory) {
dprintk(1, "qbuf: invalid memory type\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
ret = __buf_prepare(vb, b);
if (ret)
- return ret;
+ goto unlock;
case VB2_BUF_STATE_PREPARED:
break;
default:
dprintk(1, "qbuf: buffer already in use\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
/*
@@ -1141,7 +1166,10 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
__fill_v4l2_buffer(vb, b);
dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
- return 0;
+unlock:
+ if (mmap_sem)
+ up_read(mmap_sem);
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -1521,7 +1549,6 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
{
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
- struct vb2_plane *vb_plane;
struct vb2_buffer *vb;
unsigned int buffer, plane;
int ret;
@@ -1558,9 +1585,8 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return ret;
vb = q->bufs[buffer];
- vb_plane = &vb->planes[plane];
- ret = q->mem_ops->mmap(vb_plane->mem_priv, vma);
+ ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
if (ret)
return ret;
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index 3bad8b105fea..25c3b360e1ad 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -140,7 +140,6 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->pages)
goto userptr_fail_pages_array_alloc;
- down_read(&current->mm->mmap_sem);
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
buf->sg_desc.num_pages,
@@ -148,7 +147,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
1, /* force */
buf->pages,
NULL);
- up_read(&current->mm->mmap_sem);
+
if (num_pages_from_user != buf->sg_desc.num_pages)
goto userptr_fail_get_user_pages;
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index 71a7a78c3fc0..c41cb60245d6 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -100,29 +100,26 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
unsigned long offset, start, end;
unsigned long this_pfn, prev_pfn;
dma_addr_t pa = 0;
- int ret = -EFAULT;
start = vaddr;
offset = start & ~PAGE_MASK;
end = start + size;
- down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
if (vma == NULL || vma->vm_end < end)
- goto done;
+ return -EFAULT;
for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
- ret = follow_pfn(vma, start, &this_pfn);
+ int ret = follow_pfn(vma, start, &this_pfn);
if (ret)
- goto done;
+ return ret;
if (prev_pfn == 0)
pa = this_pfn << PAGE_SHIFT;
- else if (this_pfn != prev_pfn + 1) {
- ret = -EFAULT;
- goto done;
- }
+ else if (this_pfn != prev_pfn + 1)
+ return -EFAULT;
+
prev_pfn = this_pfn;
}
@@ -130,16 +127,11 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
* Memory is contigous, lock vma and return to the caller
*/
*res_vma = vb2_get_vma(vma);
- if (*res_vma == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- *res_pa = pa + offset;
- ret = 0;
+ if (*res_vma == NULL)
+ return -ENOMEM;
-done:
- up_read(&mm->mmap_sem);
- return ret;
+ *res_pa = pa + offset;
+ return 0;
}
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
diff --git a/drivers/media/video/videobuf2-vmalloc.c b/drivers/media/video/videobuf2-vmalloc.c
index a3a884234059..4e789a178f8a 100644
--- a/drivers/media/video/videobuf2-vmalloc.c
+++ b/drivers/media/video/videobuf2-vmalloc.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -20,7 +21,10 @@
struct vb2_vmalloc_buf {
void *vaddr;
+ struct page **pages;
+ int write;
unsigned long size;
+ unsigned int n_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
};
@@ -31,7 +35,7 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
{
struct vb2_vmalloc_buf *buf;
- buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return NULL;
@@ -42,15 +46,12 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
buf->handler.arg = buf;
if (!buf->vaddr) {
- printk(KERN_ERR "vmalloc of size %ld failed\n", buf->size);
+ pr_debug("vmalloc of size %ld failed\n", buf->size);
kfree(buf);
return NULL;
}
atomic_inc(&buf->refcount);
- printk(KERN_DEBUG "Allocated vmalloc buffer of size %ld at vaddr=%p\n",
- buf->size, buf->vaddr);
-
return buf;
}
@@ -59,21 +60,84 @@ static void vb2_vmalloc_put(void *buf_priv)
struct vb2_vmalloc_buf *buf = buf_priv;
if (atomic_dec_and_test(&buf->refcount)) {
- printk(KERN_DEBUG "%s: Freeing vmalloc mem at vaddr=%p\n",
- __func__, buf->vaddr);
vfree(buf->vaddr);
kfree(buf);
}
}
-static void *vb2_vmalloc_vaddr(void *buf_priv)
+static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ unsigned long size, int write)
+{
+ struct vb2_vmalloc_buf *buf;
+ unsigned long first, last;
+ int n_pages, offset;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->write = write;
+ offset = vaddr & ~PAGE_MASK;
+ buf->size = size;
+
+ first = vaddr >> PAGE_SHIFT;
+ last = (vaddr + size - 1) >> PAGE_SHIFT;
+ buf->n_pages = last - first + 1;
+ buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!buf->pages)
+ goto fail_pages_array_alloc;
+
+ /* current->mm->mmap_sem is taken by videobuf2 core */
+ n_pages = get_user_pages(current, current->mm, vaddr & PAGE_MASK,
+ buf->n_pages, write, 1, /* force */
+ buf->pages, NULL);
+ if (n_pages != buf->n_pages)
+ goto fail_get_user_pages;
+
+ buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1, PAGE_KERNEL);
+ if (!buf->vaddr)
+ goto fail_get_user_pages;
+
+ buf->vaddr += offset;
+ return buf;
+
+fail_get_user_pages:
+ pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
+ buf->n_pages);
+ while (--n_pages >= 0)
+ put_page(buf->pages[n_pages]);
+ kfree(buf->pages);
+
+fail_pages_array_alloc:
+ kfree(buf);
+
+ return NULL;
+}
+
+static void vb2_vmalloc_put_userptr(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
+ unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
+ unsigned int i;
+
+ if (vaddr)
+ vm_unmap_ram((void *)vaddr, buf->n_pages);
+ for (i = 0; i < buf->n_pages; ++i) {
+ if (buf->write)
+ set_page_dirty_lock(buf->pages[i]);
+ put_page(buf->pages[i]);
+ }
+ kfree(buf->pages);
+ kfree(buf);
+}
- BUG_ON(!buf);
+static void *vb2_vmalloc_vaddr(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
if (!buf->vaddr) {
- printk(KERN_ERR "Address of an unallocated plane requested\n");
+ pr_err("Address of an unallocated plane requested "
+ "or cannot map user pointer\n");
return NULL;
}
@@ -92,13 +156,13 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
int ret;
if (!buf) {
- printk(KERN_ERR "No memory to map\n");
+ pr_err("No memory to map\n");
return -EINVAL;
}
ret = remap_vmalloc_range(vma, buf->vaddr, 0);
if (ret) {
- printk(KERN_ERR "Remapping vmalloc memory, error: %d\n", ret);
+ pr_err("Remapping vmalloc memory, error: %d\n", ret);
return ret;
}
@@ -121,6 +185,8 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
const struct vb2_mem_ops vb2_vmalloc_memops = {
.alloc = vb2_vmalloc_alloc,
.put = vb2_vmalloc_put,
+ .get_userptr = vb2_vmalloc_get_userptr,
+ .put_userptr = vb2_vmalloc_put_userptr,
.vaddr = vb2_vmalloc_vaddr,
.mmap = vb2_vmalloc_mmap,
.num_users = vb2_vmalloc_num_users,
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 52a0a3736c82..4d7391ec8001 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -708,7 +708,7 @@ static int vino_allocate_buffer(struct vino_framebuffer *fb,
size, count);
/* allocate memory for table with virtual (page) addresses */
- fb->desc_table.virtual = (unsigned long *)
+ fb->desc_table.virtual =
kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
if (!fb->desc_table.virtual)
return -ENOMEM;
diff --git a/drivers/media/video/zoran/zoran_device.c b/drivers/media/video/zoran/zoran_device.c
index e8a27844bf39..e86173bd1327 100644
--- a/drivers/media/video/zoran/zoran_device.c
+++ b/drivers/media/video/zoran/zoran_device.c
@@ -57,7 +57,7 @@
ZR36057_ISR_GIRQ1 | \
ZR36057_ISR_JPEGRepIRQ )
-static int lml33dpath; /* default = 0
+static bool lml33dpath; /* default = 0
* 1 will use digital path in capture
* mode instead of analog. It can be
* used for picture adjustments using
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index d4d05d2ace65..4c09ab781ec3 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1550,7 +1550,7 @@ static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
if (zoran_formats[i].flags & flag && num++ == fmt->index) {
strncpy(fmt->description, zoran_formats[i].name,
sizeof(fmt->description) - 1);
- /* fmt struct pre-zeroed, so adding '\0' not neeed */
+ /* fmt struct pre-zeroed, so adding '\0' not needed */
fmt->pixelformat = zoran_formats[i].fourcc;
if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
mutex_unlock(&zr->resource_lock);
fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
fb->fmt.field = V4L2_FIELD_INTERLACED;
- fb->flags = V4L2_FBUF_FLAG_OVERLAY;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
return 0;
diff --git a/drivers/media/video/zoran/zr36060.c b/drivers/media/video/zoran/zr36060.c
index 5e4f57cbf314..f08546fe2234 100644
--- a/drivers/media/video/zoran/zr36060.c
+++ b/drivers/media/video/zoran/zr36060.c
@@ -50,7 +50,7 @@
/* amount of chips attached via this driver */
static int zr36060_codecs;
-static int low_bitrate;
+static bool low_bitrate;
module_param(low_bitrate, bool, 0);
MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate");
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 6ce70e9615d3..5319e9b65847 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -21,7 +21,7 @@
#define DRIVER_NAME "jmb38x_ms"
-static int no_dma;
+static bool no_dma;
module_param(no_dma, bool, 0644);
enum {
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 668f5c6a0399..29b2172ae18f 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -23,7 +23,7 @@
#include <linux/swab.h>
#include "r592.h"
-static int r592_enable_dma = 1;
+static bool r592_enable_dma = 1;
static int debug;
static const char *tpc_names[] = {
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index b7aacf47703a..6902b83eb1b4 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -22,7 +22,7 @@
#define DRIVER_NAME "tifm_ms"
-static int no_dma;
+static bool no_dma;
module_param(no_dma, bool, 0644);
/*
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 22027e7946f7..d9bcfba6b049 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -583,6 +583,7 @@ typedef struct _MSG_CONFIG_REPLY
#define MPI_MANUFACTPAGE_DEVID_SAS1066E (0x005A)
#define MPI_MANUFACTPAGE_DEVID_SAS1068 (0x0054)
#define MPI_MANUFACTPAGE_DEVID_SAS1068E (0x0058)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP (0x0059)
#define MPI_MANUFACTPAGE_DEVID_SAS1078 (0x0062)
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index fd6222882a0e..19fb21b8f0ce 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -857,7 +857,7 @@ typedef struct _EVENT_DATA_SAS_DISCOVERY
#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_MASK (0xFFFF0000)
#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_SHIFT (16)
-/* SAS Discovery Errror Event data */
+/* SAS Discovery Error Event data */
typedef struct _EVENT_DATA_DISCOVERY_ERROR
{
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e9c6a6047a00..a7dc4672d996 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -115,7 +115,8 @@ module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug,
"Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
-static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
+static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
+ [MPT_MAX_CALLBACKNAME_LEN+1];
#ifdef MFCNT
static int mfcounter = 0;
@@ -717,8 +718,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
MptDriverClass[cb_idx] = dclass;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx = cb_idx;
- memcpy(MptCallbacksName[cb_idx], func_name,
- strlen(func_name) > 50 ? 50 : strlen(func_name));
+ strlcpy(MptCallbacksName[cb_idx], func_name,
+ MPT_MAX_CALLBACKNAME_LEN+1);
break;
}
}
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b4d24dc081ae..76c05bc24cb7 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -89,6 +89,7 @@
*/
#define MPT_MAX_ADAPTERS 18
#define MPT_MAX_PROTOCOL_DRIVERS 16
+#define MPT_MAX_CALLBACKNAME_LEN 49
#define MPT_MAX_BUS 1 /* Do not change */
#define MPT_MAX_FC_DEVICES 255
#define MPT_MAX_SCSI_DEVICES 16
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9d9504298549..551262e4b96e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5376,6 +5376,8 @@ static struct pci_device_id mptsas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 07dbeaf9df99..6d115c7208ab 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -56,7 +56,7 @@
/* Structure used to define /proc entries */
typedef struct _i2o_proc_entry_t {
char *name; /* entry name */
- mode_t mode; /* mode */
+ umode_t mode; /* mode */
const struct file_operations *fops; /* open function */
} i2o_proc_entry;
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index e017dc88622a..f93dd9571c3c 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -12,51 +12,20 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
-static inline int pm860x_read_device(struct i2c_client *i2c,
- int reg, int bytes, void *dest)
-{
- unsigned char data;
- int ret;
-
- data = (unsigned char)reg;
- ret = i2c_master_send(i2c, &data, 1);
- if (ret < 0)
- return ret;
-
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int pm860x_write_device(struct i2c_client *i2c,
- int reg, int bytes, void *src)
-{
- unsigned char buf[bytes + 1];
- int ret;
-
- buf[0] = (unsigned char)reg;
- memcpy(&buf[1], src, bytes);
-
- ret = i2c_master_send(i2c, buf, bytes + 1);
- if (ret < 0)
- return ret;
- return 0;
-}
-
int pm860x_reg_read(struct i2c_client *i2c, int reg)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
- unsigned char data;
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
+ unsigned int data;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_read_device(i2c, reg, 1, &data);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_read(map, reg, &data);
if (ret < 0)
return ret;
else
@@ -68,12 +37,11 @@ int pm860x_reg_write(struct i2c_client *i2c, int reg,
unsigned char data)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_write_device(i2c, reg, 1, &data);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_write(map, reg, data);
return ret;
}
EXPORT_SYMBOL(pm860x_reg_write);
@@ -82,12 +50,11 @@ int pm860x_bulk_read(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_read_device(i2c, reg, count, buf);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_raw_read(map, reg, buf, count);
return ret;
}
EXPORT_SYMBOL(pm860x_bulk_read);
@@ -96,12 +63,11 @@ int pm860x_bulk_write(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_write_device(i2c, reg, count, buf);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_raw_write(map, reg, buf, count);
return ret;
}
EXPORT_SYMBOL(pm860x_bulk_write);
@@ -110,39 +76,78 @@ int pm860x_set_bits(struct i2c_client *i2c, int reg,
unsigned char mask, unsigned char data)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
- unsigned char value;
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_read_device(i2c, reg, 1, &value);
- if (ret < 0)
- goto out;
- value &= ~mask;
- value |= data;
- ret = pm860x_write_device(i2c, reg, 1, &value);
-out:
- mutex_unlock(&chip->io_lock);
+ ret = regmap_update_bits(map, reg, mask, data);
return ret;
}
EXPORT_SYMBOL(pm860x_set_bits);
+static int read_device(struct i2c_client *i2c, int reg,
+ int bytes, void *dest)
+{
+ unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX + 3];
+ unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX + 2];
+ struct i2c_adapter *adap = i2c->adapter;
+ struct i2c_msg msg[2] = {{i2c->addr, 0, 1, msgbuf0},
+ {i2c->addr, I2C_M_RD, 0, msgbuf1},
+ };
+ int num = 1, ret = 0;
+
+ if (dest == NULL)
+ return -EINVAL;
+ msgbuf0[0] = (unsigned char)reg; /* command */
+ msg[1].len = bytes;
+
+ /* if data needs to read back, num should be 2 */
+ if (bytes > 0)
+ num = 2;
+ ret = adap->algo->master_xfer(adap, msg, num);
+ memcpy(dest, msgbuf1, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int write_device(struct i2c_client *i2c, int reg,
+ int bytes, void *src)
+{
+ unsigned char buf[bytes + 1];
+ struct i2c_adapter *adap = i2c->adapter;
+ struct i2c_msg msg;
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+ msg.addr = i2c->addr;
+ msg.flags = 0;
+ msg.len = bytes + 1;
+ msg.buf = buf;
+
+ ret = adap->algo->master_xfer(adap, &msg, 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
int pm860x_page_reg_read(struct i2c_client *i2c, int reg)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero = 0;
unsigned char data;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_read_device(i2c, reg, 1, &data);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = read_device(i2c, reg, 1, &data);
if (ret >= 0)
ret = (int)data;
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_reg_read);
@@ -150,18 +155,17 @@ EXPORT_SYMBOL(pm860x_page_reg_read);
int pm860x_page_reg_write(struct i2c_client *i2c, int reg,
unsigned char data)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_write_device(i2c, reg, 1, &data);
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = write_device(i2c, reg, 1, &data);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_reg_write);
@@ -169,18 +173,17 @@ EXPORT_SYMBOL(pm860x_page_reg_write);
int pm860x_page_bulk_read(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero = 0;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_read_device(i2c, reg, count, buf);
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xfa, 0, &zero);
+ read_device(i2c, 0xfb, 0, &zero);
+ read_device(i2c, 0xff, 0, &zero);
+ ret = read_device(i2c, reg, count, buf);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_bulk_read);
@@ -188,18 +191,18 @@ EXPORT_SYMBOL(pm860x_page_bulk_read);
int pm860x_page_bulk_write(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero = 0;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_write_device(i2c, reg, count, buf);
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = write_device(i2c, reg, count, buf);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_bulk_write);
@@ -207,25 +210,24 @@ EXPORT_SYMBOL(pm860x_page_bulk_write);
int pm860x_page_set_bits(struct i2c_client *i2c, int reg,
unsigned char mask, unsigned char data)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero;
unsigned char value;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_read_device(i2c, reg, 1, &value);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = read_device(i2c, reg, 1, &value);
if (ret < 0)
goto out;
value &= ~mask;
value |= data;
- ret = pm860x_write_device(i2c, reg, 1, &value);
+ ret = write_device(i2c, reg, 1, &value);
out:
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_set_bits);
@@ -257,11 +259,17 @@ static int verify_addr(struct i2c_client *i2c)
return 0;
}
+static struct regmap_config pm860x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit pm860x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pm860x_platform_data *pdata = client->dev.platform_data;
struct pm860x_chip *chip;
+ int ret;
if (!pdata) {
pr_info("No platform data in %s!\n", __func__);
@@ -273,10 +281,17 @@ static int __devinit pm860x_probe(struct i2c_client *client,
return -ENOMEM;
chip->id = verify_addr(client);
+ chip->regmap = regmap_init_i2c(client, &pm860x_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(chip);
+ return ret;
+ }
chip->client = client;
i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
- mutex_init(&chip->io_lock);
dev_set_drvdata(chip->dev, chip);
/*
@@ -290,6 +305,14 @@ static int __devinit pm860x_probe(struct i2c_client *client,
chip->companion_addr = pdata->companion_addr;
chip->companion = i2c_new_dummy(chip->client->adapter,
chip->companion_addr);
+ chip->regmap_companion = regmap_init_i2c(chip->companion,
+ &pm860x_regmap_config);
+ if (IS_ERR(chip->regmap_companion)) {
+ ret = PTR_ERR(chip->regmap_companion);
+ dev_err(&chip->companion->dev,
+ "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
i2c_set_clientdata(chip->companion, chip);
}
@@ -302,7 +325,11 @@ static int __devexit pm860x_remove(struct i2c_client *client)
struct pm860x_chip *chip = i2c_get_clientdata(client);
pm860x_device_exit(chip);
- i2c_unregister_device(chip->companion);
+ if (chip->companion) {
+ regmap_exit(chip->regmap_companion);
+ i2c_unregister_device(chip->companion);
+ }
+ regmap_exit(chip->regmap);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f1391c21ef26..cd13e9f2f5e6 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -12,6 +12,7 @@ config MFD_CORE
config MFD_88PM860X
bool "Support Marvell 88PM8606/88PM8607"
depends on I2C=y && GENERIC_HARDIRQS
+ select REGMAP_I2C
select MFD_CORE
help
This supports for Marvell 88PM8606/88PM8607 Power Management IC.
@@ -199,7 +200,7 @@ config MENELAUS
config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on I2C=y && GENERIC_HARDIRQS && IRQ_DOMAIN
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
@@ -257,7 +258,7 @@ config TWL6040_CORE
config MFD_STMPE
bool "Support STMicroelectronics STMPE"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on (I2C=y || SPI_MASTER=y) && GENERIC_HARDIRQS
select MFD_CORE
help
Support for the STMPE family of I/O Expanders from
@@ -278,6 +279,23 @@ config MFD_STMPE
Keypad: stmpe-keypad
Touchscreen: stmpe-ts
+menu "STMPE Interface Drivers"
+depends on MFD_STMPE
+
+config STMPE_I2C
+ bool "STMPE I2C Inteface"
+ depends on I2C=y
+ default y
+ help
+ This is used to enable I2C interface of STMPE
+
+config STMPE_SPI
+ bool "STMPE SPI Inteface"
+ depends on SPI_MASTER
+ help
+ This is used to enable SPI interface of STMPE
+endmenu
+
config MFD_TC3589X
bool "Support Toshiba TC35892 and variants"
depends on I2C=y && GENERIC_HARDIRQS
@@ -311,7 +329,7 @@ config MFD_TC6387XB
config MFD_TC6393XB
bool "Support Toshiba TC6393XB"
- depends on GPIOLIB && ARM
+ depends on GPIOLIB && ARM && HAVE_CLK
select MFD_CORE
select MFD_TMIO
help
@@ -328,6 +346,34 @@ config PMIC_DA903X
individual components like LCD backlight, voltage regulators,
LEDs and battery-charger under the corresponding menus.
+config PMIC_DA9052
+ bool
+ select MFD_CORE
+
+config MFD_DA9052_SPI
+ bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on SPI_MASTER=y
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using SPI. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
+config MFD_DA9052_I2C
+ bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on I2C=y
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using I2C. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
config PMIC_ADP5520
bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
depends on I2C=y
@@ -371,6 +417,17 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_S5M_CORE
+ bool "SAMSUNG S5M Series Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the Samsung Electronics S5M MFD series.
+ This driver provies common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -477,6 +534,7 @@ config MFD_WM8994
bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
depends on I2C=y && GENERIC_HARDIRQS
help
The WM8994 is a highly integrated hi-fi CODEC designed for
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2292eb75242..b953bab934f7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
+obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
+obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
@@ -31,7 +33,7 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
-obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
+obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o wm8994-regmap.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
@@ -67,6 +69,11 @@ endif
obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+
+obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
+obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
+obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
@@ -104,3 +111,4 @@ obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
+obj-$(CONFIG_MFD_S5M_CORE) += s5m-core.o s5m-irq.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 02c42015ba51..3aa36eb5c79b 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -407,13 +407,13 @@ static int aat2870_i2c_probe(struct i2c_client *client,
aat2870->init(aat2870);
if (aat2870->en_pin >= 0) {
- ret = gpio_request(aat2870->en_pin, "aat2870-en");
+ ret = gpio_request_one(aat2870->en_pin, GPIOF_OUT_INIT_HIGH,
+ "aat2870-en");
if (ret < 0) {
dev_err(&client->dev,
"Failed to request GPIO %d\n", aat2870->en_pin);
goto out_kfree;
}
- gpio_direction_output(aat2870->en_pin, 1);
}
aat2870_enable(aat2870);
@@ -468,9 +468,10 @@ static int aat2870_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int aat2870_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct aat2870_data *aat2870 = i2c_get_clientdata(client);
aat2870_disable(aat2870);
@@ -478,8 +479,9 @@ static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
return 0;
}
-static int aat2870_i2c_resume(struct i2c_client *client)
+static int aat2870_i2c_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct aat2870_data *aat2870 = i2c_get_clientdata(client);
struct aat2870_register *reg = NULL;
int i;
@@ -495,12 +497,12 @@ static int aat2870_i2c_resume(struct i2c_client *client)
return 0;
}
-#else
-#define aat2870_i2c_suspend NULL
-#define aat2870_i2c_resume NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(aat2870_pm_ops, aat2870_i2c_suspend,
+ aat2870_i2c_resume);
-static struct i2c_device_id aat2870_i2c_id_table[] = {
+static const struct i2c_device_id aat2870_i2c_id_table[] = {
{ "aat2870", 0 },
{ }
};
@@ -510,11 +512,10 @@ static struct i2c_driver aat2870_i2c_driver = {
.driver = {
.name = "aat2870",
.owner = THIS_MODULE,
+ .pm = &aat2870_pm_ops,
},
.probe = aat2870_i2c_probe,
.remove = aat2870_i2c_remove,
- .suspend = aat2870_i2c_suspend,
- .resume = aat2870_i2c_resume,
.id_table = aat2870_i2c_id_table,
};
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index ec10629a0b0b..bd56a764dea1 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -22,8 +22,8 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/random.h>
-#include <linux/mfd/ab5500/ab5500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
#include <linux/list.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 43c0ebb81956..72006940937a 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,11 +4,11 @@
* Debugfs support for the AB5500 MFD driver
*/
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/mfd/ab5500/ab5500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
#include <linux/uaccess.h>
#include "ab5500-core.h"
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1e9173804ede..53e2a80f42fa 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/ab8500.h>
/*
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+#ifdef CONFIG_DEBUG_FS
static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
.flags = IORESOURCE_IRQ,
},
};
+#endif
static struct resource __devinitdata ab8500_usb_resources[] = {
{
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index dedb7f65cea6..9a0211aa8897 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
static u32 debug_bank;
static u32 debug_address;
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index e985d1701a83..c39fc716e1dc 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -18,9 +18,9 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpadc.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
/*
* GPADC register offsets
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 9be541c6b004..087fecd71ce0 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/db8500-prcmu.h>
static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index f20feefac190..c28d4eb1eff0 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,9 +7,9 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/sysctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
static struct device *sysctrl_dev;
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f1d88483112c..8d816cce8322 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
ret = __adp5520_read(chip->client, reg, &reg_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = __adp5520_write(chip->client, reg, reg_val);
}
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 155fa0407882..315fef5d466a 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -172,14 +172,14 @@ static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id cs5535_mfd_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cs5535_mfd_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
-static struct pci_driver cs5535_mfd_drv = {
+static struct pci_driver cs5535_mfd_driver = {
.name = DRV_NAME,
.id_table = cs5535_mfd_pci_tbl,
.probe = cs5535_mfd_probe,
@@ -188,12 +188,12 @@ static struct pci_driver cs5535_mfd_drv = {
static int __init cs5535_mfd_init(void)
{
- return pci_register_driver(&cs5535_mfd_drv);
+ return pci_register_driver(&cs5535_mfd_driver);
}
static void __exit cs5535_mfd_exit(void)
{
- pci_unregister_driver(&cs5535_mfd_drv);
+ pci_unregister_driver(&cs5535_mfd_driver);
}
module_init(cs5535_mfd_init);
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1b79c37fd599..1924b857a0fb 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __da903x_write(chip->client, reg, reg_val);
}
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ free_irq(client->irq, chip);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
new file mode 100644
index 000000000000..5ddde2a9176a
--- /dev/null
+++ b/drivers/mfd/da9052-core.c
@@ -0,0 +1,694 @@
+/*
+ * Device access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS 4
+#define DA9052_IRQ_MASK_POS_1 0x01
+#define DA9052_IRQ_MASK_POS_2 0x02
+#define DA9052_IRQ_MASK_POS_3 0x04
+#define DA9052_IRQ_MASK_POS_4 0x08
+#define DA9052_IRQ_MASK_POS_5 0x10
+#define DA9052_IRQ_MASK_POS_6 0x20
+#define DA9052_IRQ_MASK_POS_7 0x40
+#define DA9052_IRQ_MASK_POS_8 0x80
+
+static bool da9052_reg_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_PAGE0_CON_REG:
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
+ case DA9052_IRQ_MASK_A_REG:
+ case DA9052_IRQ_MASK_B_REG:
+ case DA9052_IRQ_MASK_C_REG:
+ case DA9052_IRQ_MASK_D_REG:
+ case DA9052_CONTROL_A_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_C_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_PDDIS_REG:
+ case DA9052_INTERFACE_REG:
+ case DA9052_RESET_REG:
+ case DA9052_GPIO_0_1_REG:
+ case DA9052_GPIO_2_3_REG:
+ case DA9052_GPIO_4_5_REG:
+ case DA9052_GPIO_6_7_REG:
+ case DA9052_GPIO_14_15_REG:
+ case DA9052_ID_0_1_REG:
+ case DA9052_ID_2_3_REG:
+ case DA9052_ID_4_5_REG:
+ case DA9052_ID_6_7_REG:
+ case DA9052_ID_8_9_REG:
+ case DA9052_ID_10_11_REG:
+ case DA9052_ID_12_13_REG:
+ case DA9052_ID_14_15_REG:
+ case DA9052_ID_16_17_REG:
+ case DA9052_ID_18_19_REG:
+ case DA9052_ID_20_21_REG:
+ case DA9052_SEQ_STATUS_REG:
+ case DA9052_SEQ_A_REG:
+ case DA9052_SEQ_B_REG:
+ case DA9052_SEQ_TIMER_REG:
+ case DA9052_BUCKA_REG:
+ case DA9052_BUCKB_REG:
+ case DA9052_BUCKCORE_REG:
+ case DA9052_BUCKPRO_REG:
+ case DA9052_BUCKMEM_REG:
+ case DA9052_BUCKPERI_REG:
+ case DA9052_LDO1_REG:
+ case DA9052_LDO2_REG:
+ case DA9052_LDO3_REG:
+ case DA9052_LDO4_REG:
+ case DA9052_LDO5_REG:
+ case DA9052_LDO6_REG:
+ case DA9052_LDO7_REG:
+ case DA9052_LDO8_REG:
+ case DA9052_LDO9_REG:
+ case DA9052_LDO10_REG:
+ case DA9052_SUPPLY_REG:
+ case DA9052_PULLDOWN_REG:
+ case DA9052_CHGBUCK_REG:
+ case DA9052_WAITCONT_REG:
+ case DA9052_ISET_REG:
+ case DA9052_BATCHG_REG:
+ case DA9052_CHG_CONT_REG:
+ case DA9052_INPUT_CONT_REG:
+ case DA9052_CHG_TIME_REG:
+ case DA9052_BBAT_CONT_REG:
+ case DA9052_BOOST_REG:
+ case DA9052_LED_CONT_REG:
+ case DA9052_LEDMIN123_REG:
+ case DA9052_LED1_CONF_REG:
+ case DA9052_LED2_CONF_REG:
+ case DA9052_LED3_CONF_REG:
+ case DA9052_LED1CONT_REG:
+ case DA9052_LED2CONT_REG:
+ case DA9052_LED3CONT_REG:
+ case DA9052_LED_CONT_4_REG:
+ case DA9052_LED_CONT_5_REG:
+ case DA9052_ADC_MAN_REG:
+ case DA9052_ADC_CONT_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_VDD_MON_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_ICHG_THD_REG:
+ case DA9052_ICHG_END_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_TBAT_HIGHP_REG:
+ case DA9052_TBAT_HIGHN_REG:
+ case DA9052_TBAT_LOW_REG:
+ case DA9052_T_OFFSET_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_AUTO4_HIGH_REG:
+ case DA9052_AUTO4_LOW_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_AUTO5_HIGH_REG:
+ case DA9052_AUTO5_LOW_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_AUTO6_HIGH_REG:
+ case DA9052_AUTO6_LOW_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_CONT_A_REG:
+ case DA9052_TSI_CONT_B_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ case DA9052_ALARM_H_REG:
+ case DA9052_ALARM_D_REG:
+ case DA9052_ALARM_MO_REG:
+ case DA9052_ALARM_Y_REG:
+ case DA9052_SECOND_A_REG:
+ case DA9052_SECOND_B_REG:
+ case DA9052_SECOND_C_REG:
+ case DA9052_SECOND_D_REG:
+ case DA9052_PAGE1_CON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool da9052_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_PAGE0_CON_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_IRQ_MASK_A_REG:
+ case DA9052_IRQ_MASK_B_REG:
+ case DA9052_IRQ_MASK_C_REG:
+ case DA9052_IRQ_MASK_D_REG:
+ case DA9052_CONTROL_A_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_C_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_PDDIS_REG:
+ case DA9052_RESET_REG:
+ case DA9052_GPIO_0_1_REG:
+ case DA9052_GPIO_2_3_REG:
+ case DA9052_GPIO_4_5_REG:
+ case DA9052_GPIO_6_7_REG:
+ case DA9052_GPIO_14_15_REG:
+ case DA9052_ID_0_1_REG:
+ case DA9052_ID_2_3_REG:
+ case DA9052_ID_4_5_REG:
+ case DA9052_ID_6_7_REG:
+ case DA9052_ID_8_9_REG:
+ case DA9052_ID_10_11_REG:
+ case DA9052_ID_12_13_REG:
+ case DA9052_ID_14_15_REG:
+ case DA9052_ID_16_17_REG:
+ case DA9052_ID_18_19_REG:
+ case DA9052_ID_20_21_REG:
+ case DA9052_SEQ_STATUS_REG:
+ case DA9052_SEQ_A_REG:
+ case DA9052_SEQ_B_REG:
+ case DA9052_SEQ_TIMER_REG:
+ case DA9052_BUCKA_REG:
+ case DA9052_BUCKB_REG:
+ case DA9052_BUCKCORE_REG:
+ case DA9052_BUCKPRO_REG:
+ case DA9052_BUCKMEM_REG:
+ case DA9052_BUCKPERI_REG:
+ case DA9052_LDO1_REG:
+ case DA9052_LDO2_REG:
+ case DA9052_LDO3_REG:
+ case DA9052_LDO4_REG:
+ case DA9052_LDO5_REG:
+ case DA9052_LDO6_REG:
+ case DA9052_LDO7_REG:
+ case DA9052_LDO8_REG:
+ case DA9052_LDO9_REG:
+ case DA9052_LDO10_REG:
+ case DA9052_SUPPLY_REG:
+ case DA9052_PULLDOWN_REG:
+ case DA9052_CHGBUCK_REG:
+ case DA9052_WAITCONT_REG:
+ case DA9052_ISET_REG:
+ case DA9052_BATCHG_REG:
+ case DA9052_CHG_CONT_REG:
+ case DA9052_INPUT_CONT_REG:
+ case DA9052_BBAT_CONT_REG:
+ case DA9052_BOOST_REG:
+ case DA9052_LED_CONT_REG:
+ case DA9052_LEDMIN123_REG:
+ case DA9052_LED1_CONF_REG:
+ case DA9052_LED2_CONF_REG:
+ case DA9052_LED3_CONF_REG:
+ case DA9052_LED1CONT_REG:
+ case DA9052_LED2CONT_REG:
+ case DA9052_LED3CONT_REG:
+ case DA9052_LED_CONT_4_REG:
+ case DA9052_LED_CONT_5_REG:
+ case DA9052_ADC_MAN_REG:
+ case DA9052_ADC_CONT_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_VDD_MON_REG:
+ case DA9052_ICHG_THD_REG:
+ case DA9052_ICHG_END_REG:
+ case DA9052_TBAT_HIGHP_REG:
+ case DA9052_TBAT_HIGHN_REG:
+ case DA9052_TBAT_LOW_REG:
+ case DA9052_T_OFFSET_REG:
+ case DA9052_AUTO4_HIGH_REG:
+ case DA9052_AUTO4_LOW_REG:
+ case DA9052_AUTO5_HIGH_REG:
+ case DA9052_AUTO5_LOW_REG:
+ case DA9052_AUTO6_HIGH_REG:
+ case DA9052_AUTO6_LOW_REG:
+ case DA9052_TSI_CONT_A_REG:
+ case DA9052_TSI_CONT_B_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ case DA9052_ALARM_H_REG:
+ case DA9052_ALARM_D_REG:
+ case DA9052_ALARM_MO_REG:
+ case DA9052_ALARM_Y_REG:
+ case DA9052_PAGE1_CON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
+ case DA9052_CHG_TIME_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct resource da9052_rtc_resource = {
+ .name = "ALM",
+ .start = DA9052_IRQ_ALARM,
+ .end = DA9052_IRQ_ALARM,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_onkey_resource = {
+ .name = "ONKEY",
+ .start = DA9052_IRQ_NONKEY,
+ .end = DA9052_IRQ_NONKEY,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_bat_resources[] = {
+ {
+ .name = "BATT TEMP",
+ .start = DA9052_IRQ_TBAT,
+ .end = DA9052_IRQ_TBAT,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "DCIN DET",
+ .start = DA9052_IRQ_DCIN,
+ .end = DA9052_IRQ_DCIN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "DCIN REM",
+ .start = DA9052_IRQ_DCINREM,
+ .end = DA9052_IRQ_DCINREM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS DET",
+ .start = DA9052_IRQ_VBUS,
+ .end = DA9052_IRQ_VBUS,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS REM",
+ .start = DA9052_IRQ_VBUSREM,
+ .end = DA9052_IRQ_VBUSREM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CHG END",
+ .start = DA9052_IRQ_CHGEND,
+ .end = DA9052_IRQ_CHGEND,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource da9052_tsi_resources[] = {
+ {
+ .name = "PENDWN",
+ .start = DA9052_IRQ_PENDOWN,
+ .end = DA9052_IRQ_PENDOWN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "TSIRDY",
+ .start = DA9052_IRQ_TSIREADY,
+ .end = DA9052_IRQ_TSIREADY,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell __devinitdata da9052_subdev_info[] = {
+ {
+ .name = "da9052-regulator",
+ .id = 1,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 2,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 3,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 4,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 5,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 6,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 7,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 8,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 9,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 10,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 11,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 12,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 13,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 14,
+ },
+ {
+ .name = "da9052-onkey",
+ .resources = &da9052_onkey_resource,
+ .num_resources = 1,
+ },
+ {
+ .name = "da9052-rtc",
+ .resources = &da9052_rtc_resource,
+ .num_resources = 1,
+ },
+ {
+ .name = "da9052-gpio",
+ },
+ {
+ .name = "da9052-hwmon",
+ },
+ {
+ .name = "da9052-leds",
+ },
+ {
+ .name = "da9052-wled1",
+ },
+ {
+ .name = "da9052-wled2",
+ },
+ {
+ .name = "da9052-wled3",
+ },
+ {
+ .name = "da9052-tsi",
+ .resources = da9052_tsi_resources,
+ .num_resources = ARRAY_SIZE(da9052_tsi_resources),
+ },
+ {
+ .name = "da9052-bat",
+ .resources = da9052_bat_resources,
+ .num_resources = ARRAY_SIZE(da9052_bat_resources),
+ },
+ {
+ .name = "da9052-watchdog",
+ },
+};
+
+static struct regmap_irq da9052_irqs[] = {
+ [DA9052_IRQ_DCIN] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_VBUS] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_DCINREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_VBUSREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_VDDLOW] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ALARM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_SEQRDY] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_COMP1V2] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_NONKEY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_IDFLOAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_IDGND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_CHGEND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_TBAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ADC_EOM] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_PENDOWN] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_TSIREADY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI0] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI1] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI2] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI3] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI4] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI5] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI6] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI7] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI8] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI9] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI10] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI11] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI12] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI13] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI14] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI15] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+ .name = "da9052_irq",
+ .status_base = DA9052_EVENT_A_REG,
+ .mask_base = DA9052_IRQ_MASK_A_REG,
+ .ack_base = DA9052_EVENT_A_REG,
+ .num_regs = DA9052_NUM_IRQ_REGS,
+ .irqs = da9052_irqs,
+ .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+struct regmap_config da9052_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .max_register = DA9052_PAGE1_CON_REG,
+ .readable_reg = da9052_reg_readable,
+ .writeable_reg = da9052_reg_writeable,
+ .volatile_reg = da9052_reg_volatile,
+};
+EXPORT_SYMBOL_GPL(da9052_regmap_config);
+
+int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
+{
+ struct da9052_pdata *pdata = da9052->dev->platform_data;
+ struct irq_desc *desc;
+ int ret;
+
+ mutex_init(&da9052->io_lock);
+
+ if (pdata && pdata->init != NULL)
+ pdata->init(da9052);
+
+ da9052->chip_id = chip_id;
+
+ if (!pdata || !pdata->irq_base)
+ da9052->irq_base = -1;
+ else
+ da9052->irq_base = pdata->irq_base;
+
+ ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ da9052->irq_base, &da9052_regmap_irq_chip,
+ NULL);
+ if (ret < 0)
+ goto regmap_err;
+
+ desc = irq_to_desc(da9052->chip_irq);
+ da9052->irq_base = regmap_irq_chip_get_base(desc->action->dev_id);
+
+ ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ARRAY_SIZE(da9052_subdev_info), NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ mfd_remove_devices(da9052->dev);
+regmap_err:
+ return ret;
+}
+
+void da9052_device_exit(struct da9052 *da9052)
+{
+ regmap_del_irq_chip(da9052->chip_irq,
+ irq_get_irq_data(da9052->irq_base)->chip_data);
+ mfd_remove_devices(da9052->dev);
+}
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 MFD Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
new file mode 100644
index 000000000000..44b97c70a61f
--- /dev/null
+++ b/drivers/mfd/da9052-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * I2C access for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+{
+ int reg_val, ret;
+
+ ret = regmap_read(da9052->regmap, DA9052_CONTROL_B_REG, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
+ reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+ ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
+ reg_val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit da9052_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct da9052 *da9052;
+ int ret;
+
+ da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ if (!da9052)
+ return -ENOMEM;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
+ __func__);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ da9052->dev = &client->dev;
+ da9052->chip_irq = client->irq;
+
+ i2c_set_clientdata(client, da9052);
+
+ da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+ if (IS_ERR(da9052->regmap)) {
+ ret = PTR_ERR(da9052->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = da9052_i2c_enable_multiwrite(da9052);
+ if (ret < 0)
+ goto err;
+
+ ret = da9052_device_init(da9052, id->driver_data);
+ if (ret != 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(da9052);
+ return ret;
+}
+
+static int da9052_i2c_remove(struct i2c_client *client)
+{
+ struct da9052 *da9052 = i2c_get_clientdata(client);
+
+ da9052_device_exit(da9052);
+ kfree(da9052);
+
+ return 0;
+}
+
+static struct i2c_device_id da9052_i2c_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+static struct i2c_driver da9052_i2c_driver = {
+ .probe = da9052_i2c_probe,
+ .remove = da9052_i2c_remove,
+ .id_table = da9052_i2c_id,
+ .driver = {
+ .name = "da9052",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&da9052_i2c_driver);
+ if (ret != 0) {
+ pr_err("DA9052 I2C registration failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(da9052_i2c_init);
+
+static void __exit da9052_i2c_exit(void)
+{
+ i2c_del_driver(&da9052_i2c_driver);
+}
+module_exit(da9052_i2c_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("I2C driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
new file mode 100644
index 000000000000..cdbc7cad326f
--- /dev/null
+++ b/drivers/mfd/da9052-spi.c
@@ -0,0 +1,115 @@
+/*
+ * SPI access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+
+static int da9052_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+
+ if (!da9052)
+ return -ENOMEM;
+
+ spi->mode = SPI_MODE_0 | SPI_CPOL;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ da9052->dev = &spi->dev;
+ da9052->chip_irq = spi->irq;
+
+ dev_set_drvdata(&spi->dev, da9052);
+
+ da9052_regmap_config.read_flag_mask = 1;
+ da9052_regmap_config.write_flag_mask = 0;
+
+ da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+ if (IS_ERR(da9052->regmap)) {
+ ret = PTR_ERR(da9052->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = da9052_device_init(da9052, id->driver_data);
+ if (ret != 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(da9052);
+ return ret;
+}
+
+static int da9052_spi_remove(struct spi_device *spi)
+{
+ struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
+
+ da9052_device_exit(da9052);
+ kfree(da9052);
+
+ return 0;
+}
+
+static struct spi_device_id da9052_spi_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+static struct spi_driver da9052_spi_driver = {
+ .probe = da9052_spi_probe,
+ .remove = __devexit_p(da9052_spi_remove),
+ .id_table = da9052_spi_id,
+ .driver = {
+ .name = "da9052",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&da9052_spi_driver);
+ if (ret != 0) {
+ pr_err("Failed to register DA9052 SPI driver, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(da9052_spi_init);
+
+static void __exit da9052_spi_exit(void)
+{
+ spi_unregister_driver(&da9052_spi_driver);
+}
+module_exit(da9052_spi_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("SPI driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a25ab9c6b5af..af8e0efedbe4 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2102,14 +2102,11 @@ static struct irq_chip prcmu_irq_chip = {
void __init db8500_prcmu_early_init(void)
{
unsigned int i;
-
- if (cpu_is_u8500v1()) {
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
- } else if (cpu_is_u8500v2()) {
+ if (cpu_is_u8500v2()) {
void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
if (tcpm_base != NULL) {
- int version;
+ u32 version;
version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
prcmu_version.project_number = version & 0xFF;
prcmu_version.api_version = (version >> 8) & 0xFF;
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 8ad88da647b9..7710227d284e 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -308,8 +308,7 @@ static int add_children(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(config_inputs); i++) {
int gpio = dm355evm_msp_gpio.base + config_inputs[i].offset;
- gpio_request(gpio, config_inputs[i].label);
- gpio_direction_input(gpio);
+ gpio_request_one(gpio, GPIOF_IN, config_inputs[i].label);
/* make it easy for userspace to see these */
gpio_export(gpio, false);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 97c27762174f..b76657eb0c51 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -485,17 +485,7 @@ static struct platform_driver intel_msic_driver = {
},
};
-static int __init intel_msic_init(void)
-{
- return platform_driver_register(&intel_msic_driver);
-}
-module_init(intel_msic_init);
-
-static void __exit intel_msic_exit(void)
-{
- platform_driver_unregister(&intel_msic_driver);
-}
-module_exit(intel_msic_exit);
+module_platform_driver(intel_msic_driver);
MODULE_DESCRIPTION("Driver for Intel MSIC");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 5c2a06acb77f..a9223ed1b7c5 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -33,7 +33,7 @@
/* Module Parameters */
static unsigned int num_modules = CMODIO_MAX_MODULES;
-static unsigned char *modules[CMODIO_MAX_MODULES] = {
+static char *modules[CMODIO_MAX_MODULES] = {
"empty", "empty", "empty", "empty",
};
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 1e9ee533eacb..87662a17dec6 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -16,6 +16,7 @@
*/
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -180,7 +181,7 @@ static struct resource jz4740_battery_resources[] = {
},
};
-const struct mfd_cell jz4740_adc_cells[] = {
+static struct mfd_cell jz4740_adc_cells[] = {
{
.id = 0,
.name = "jz4740-hwmon",
@@ -337,17 +338,7 @@ static struct platform_driver jz4740_adc_driver = {
},
};
-static int __init jz4740_adc_init(void)
-{
- return platform_driver_register(&jz4740_adc_driver);
-}
-module_init(jz4740_adc_init);
-
-static void __exit jz4740_adc_exit(void)
-{
- platform_driver_unregister(&jz4740_adc_driver);
-}
-module_exit(jz4740_adc_exit);
+module_platform_driver(jz4740_adc_driver);
MODULE_DESCRIPTION("JZ4740 SoC ADC driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index ea1169b04779..abc421364a45 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -74,7 +74,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
},
};
-static struct pci_device_id lpc_sch_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
{ 0, }
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index e1e59c92f758..ca881efedf75 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -210,21 +210,6 @@ static struct max8925_irq_data max8925_irqs[] = {
.mask_reg = MAX8925_CHG_IRQ1_MASK,
.offs = 1 << 2,
},
- [MAX8925_IRQ_VCHG_USB_OVP] = {
- .reg = MAX8925_CHG_IRQ1,
- .mask_reg = MAX8925_CHG_IRQ1_MASK,
- .offs = 1 << 3,
- },
- [MAX8925_IRQ_VCHG_USB_F] = {
- .reg = MAX8925_CHG_IRQ1,
- .mask_reg = MAX8925_CHG_IRQ1_MASK,
- .offs = 1 << 4,
- },
- [MAX8925_IRQ_VCHG_USB_R] = {
- .reg = MAX8925_CHG_IRQ1,
- .mask_reg = MAX8925_CHG_IRQ1_MASK,
- .offs = 1 << 5,
- },
[MAX8925_IRQ_VCHG_THM_OK_R] = {
.reg = MAX8925_CHG_IRQ2,
.mask_reg = MAX8925_CHG_IRQ2_MASK,
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 0219115e00c7..d9e4b36edee9 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -161,6 +161,8 @@ static int __devinit max8925_probe(struct i2c_client *client,
chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
i2c_set_clientdata(chip->adc, chip);
+ device_init_wakeup(&client->dev, 1);
+
max8925_device_init(chip, pdata);
return 0;
@@ -177,10 +179,35 @@ static int __devexit max8925_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int max8925_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct max8925_chip *chip = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev) && chip->wakeup_flag)
+ enable_irq_wake(chip->core_irq);
+ return 0;
+}
+
+static int max8925_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct max8925_chip *chip = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev) && chip->wakeup_flag)
+ disable_irq_wake(chip->core_irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(max8925_pm_ops, max8925_suspend, max8925_resume);
+
static struct i2c_driver max8925_driver = {
.driver = {
.name = "max8925",
.owner = THIS_MODULE,
+ .pm = &max8925_pm_ops,
},
.probe = max8925_probe,
.remove = __devexit_p(max8925_remove),
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5be53ae9b61c..cb83a7ab53e7 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -43,7 +43,8 @@ static struct mfd_cell max8997_devs[] = {
{ .name = "max8997-battery", },
{ .name = "max8997-haptic", },
{ .name = "max8997-muic", },
- { .name = "max8997-flash", },
+ { .name = "max8997-led", .id = 1 },
+ { .name = "max8997-led", .id = 2 },
};
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index de4096aee248..6ef56d28c056 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -176,6 +176,8 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err;
+ device_init_wakeup(max8998->dev, max8998->wakeup);
+
return ret;
err:
@@ -210,7 +212,7 @@ static int max8998_suspend(struct device *dev)
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
- if (max8998->wakeup)
+ if (device_may_wakeup(dev))
irq_set_irq_wake(max8998->irq, 1);
return 0;
}
@@ -220,7 +222,7 @@ static int max8998_resume(struct device *dev)
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
- if (max8998->wakeup)
+ if (device_may_wakeup(dev))
irq_set_irq_wake(max8998->irq, 0);
/*
* In LP3974, if IRQ registers are not "read & clear"
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index e9619acc0237..7122386b4e3c 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -18,11 +18,15 @@
#include <linux/spi/spi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
struct mc13xxx {
struct spi_device *spidev;
struct mutex lock;
int irq;
+ int flags;
irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
void *irqdata[MC13XXX_NUM_IRQ];
@@ -550,10 +554,7 @@ static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
{
- struct mc13xxx_platform_data *pdata =
- dev_get_platdata(&mc13xxx->spidev->dev);
-
- return pdata->flags;
+ return mc13xxx->flags;
}
EXPORT_SYMBOL(mc13xxx_get_flags);
@@ -615,13 +616,13 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
break;
case MC13XXX_ADC_MODE_SINGLE_CHAN:
- adc0 |= old_adc0 & MC13XXX_ADC0_TSMOD_MASK;
+ adc0 |= old_adc0 & MC13XXX_ADC0_CONFIG_MASK;
adc1 |= (channel & 0x7) << MC13XXX_ADC1_CHAN0_SHIFT;
adc1 |= MC13XXX_ADC1_RAND;
break;
case MC13XXX_ADC_MODE_MULT_CHAN:
- adc0 |= old_adc0 & MC13XXX_ADC0_TSMOD_MASK;
+ adc0 |= old_adc0 & MC13XXX_ADC0_CONFIG_MASK;
adc1 |= 4 << MC13XXX_ADC1_CHAN1_SHIFT;
break;
@@ -696,17 +697,67 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
}
+#ifdef CONFIG_OF
+static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
+{
+ struct device_node *np = mc13xxx->spidev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-adc", NULL))
+ mc13xxx->flags |= MC13XXX_USE_ADC;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-codec", NULL))
+ mc13xxx->flags |= MC13XXX_USE_CODEC;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-rtc", NULL))
+ mc13xxx->flags |= MC13XXX_USE_RTC;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-touch", NULL))
+ mc13xxx->flags |= MC13XXX_USE_TOUCHSCREEN;
+
+ return 0;
+}
+#else
+static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
+{
+ return -ENODEV;
+}
+#endif
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+ {
+ .name = "mc13783",
+ .driver_data = MC13XXX_ID_MC13783,
+ }, {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+ { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
static int mc13xxx_probe(struct spi_device *spi)
{
+ const struct of_device_id *of_id;
+ struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
struct mc13xxx *mc13xxx;
struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
enum mc13xxx_id id;
int ret;
- if (!pdata) {
- dev_err(&spi->dev, "invalid platform data\n");
- return -EINVAL;
- }
+ of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+ if (of_id)
+ sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
if (!mc13xxx)
@@ -749,28 +800,33 @@ err_revision:
mc13xxx_unlock(mc13xxx);
- if (pdata->flags & MC13XXX_USE_ADC)
+ if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
+ mc13xxx->flags = pdata->flags;
+
+ if (mc13xxx->flags & MC13XXX_USE_ADC)
mc13xxx_add_subdevice(mc13xxx, "%s-adc");
- if (pdata->flags & MC13XXX_USE_CODEC)
+ if (mc13xxx->flags & MC13XXX_USE_CODEC)
mc13xxx_add_subdevice(mc13xxx, "%s-codec");
- mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
- &pdata->regulators, sizeof(pdata->regulators));
-
- if (pdata->flags & MC13XXX_USE_RTC)
+ if (mc13xxx->flags & MC13XXX_USE_RTC)
mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
- if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
+ if (mc13xxx->flags & MC13XXX_USE_TOUCHSCREEN)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
- if (pdata->leds)
+ if (pdata) {
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
+ &pdata->regulators, sizeof(pdata->regulators));
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
pdata->leds, sizeof(*pdata->leds));
-
- if (pdata->buttons)
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-pwrbutton",
pdata->buttons, sizeof(*pdata->buttons));
+ } else {
+ mc13xxx_add_subdevice(mc13xxx, "%s-regulator");
+ mc13xxx_add_subdevice(mc13xxx, "%s-led");
+ mc13xxx_add_subdevice(mc13xxx, "%s-pwrbutton");
+ }
return 0;
}
@@ -788,25 +844,12 @@ static int __devexit mc13xxx_remove(struct spi_device *spi)
return 0;
}
-static const struct spi_device_id mc13xxx_device_id[] = {
- {
- .name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
- }, {
- .name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
static struct spi_driver mc13xxx_driver = {
.id_table = mc13xxx_device_id,
.driver = {
.name = "mc13xxx",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .of_match_table = mc13xxx_dt_ids,
},
.probe = mc13xxx_probe,
.remove = __devexit_p(mc13xxx_remove),
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 84815f9ef636..63be60bc3455 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -26,9 +26,35 @@
#define to_mcp(d) container_of(d, struct mcp, attached_device)
#define to_mcp_driver(d) container_of(d, struct mcp_driver, drv)
+static const struct mcp_device_id *mcp_match_id(const struct mcp_device_id *id,
+ const char *codec)
+{
+ while (id->name[0]) {
+ if (strcmp(codec, id->name) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+const struct mcp_device_id *mcp_get_device_id(const struct mcp *mcp)
+{
+ const struct mcp_driver *driver =
+ to_mcp_driver(mcp->attached_device.driver);
+
+ return mcp_match_id(driver->id_table, mcp->codec);
+}
+EXPORT_SYMBOL(mcp_get_device_id);
+
static int mcp_bus_match(struct device *dev, struct device_driver *drv)
{
- return 1;
+ const struct mcp *mcp = to_mcp(dev);
+ const struct mcp_driver *driver = to_mcp_driver(drv);
+
+ if (driver->id_table)
+ return !!mcp_match_id(driver->id_table, mcp->codec);
+
+ return 0;
}
static int mcp_bus_probe(struct device *dev)
@@ -74,9 +100,18 @@ static int mcp_bus_resume(struct device *dev)
return ret;
}
+static int mcp_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mcp *mcp = to_mcp(dev);
+
+ add_uevent_var(env, "MODALIAS=%s%s", MCP_MODULE_PREFIX, mcp->codec);
+ return 0;
+}
+
static struct bus_type mcp_bus_type = {
.name = "mcp",
.match = mcp_bus_match,
+ .uevent = mcp_bus_uevent,
.probe = mcp_bus_probe,
.remove = mcp_bus_remove,
.suspend = mcp_bus_suspend,
@@ -212,9 +247,14 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
}
EXPORT_SYMBOL(mcp_host_alloc);
-int mcp_host_register(struct mcp *mcp)
+int mcp_host_register(struct mcp *mcp, void *pdata)
{
+ if (!mcp->codec)
+ return -EINVAL;
+
+ mcp->attached_device.platform_data = pdata;
dev_set_name(&mcp->attached_device, "mcp0");
+ request_module("%s%s", MCP_MODULE_PREFIX, mcp->codec);
return device_register(&mcp->attached_device);
}
EXPORT_SYMBOL(mcp_host_register);
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 2dab02d9ac8b..9adc2eb69492 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/mfd/mcp.h>
+#include <linux/io.h>
#include <mach/dma.h>
#include <mach/hardware.h>
@@ -26,12 +27,19 @@
#include <asm/system.h>
#include <mach/mcp.h>
-#include <mach/assabet.h>
-
+/* Register offsets */
+#define MCCR0 0x00
+#define MCDR0 0x08
+#define MCDR1 0x0C
+#define MCDR2 0x10
+#define MCSR 0x18
+#define MCCR1 0x00
struct mcp_sa11x0 {
- u32 mccr0;
- u32 mccr1;
+ u32 mccr0;
+ u32 mccr1;
+ unsigned char *mccr0_base;
+ unsigned char *mccr1_base;
};
#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp))
@@ -39,25 +47,25 @@ struct mcp_sa11x0 {
static void
mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
{
- unsigned int mccr0;
+ struct mcp_sa11x0 *priv = priv(mcp);
divisor /= 32;
- mccr0 = Ser4MCCR0 & ~0x00007f00;
- mccr0 |= divisor << 8;
- Ser4MCCR0 = mccr0;
+ priv->mccr0 &= ~0x00007f00;
+ priv->mccr0 |= divisor << 8;
+ __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
}
static void
mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
{
- unsigned int mccr0;
+ struct mcp_sa11x0 *priv = priv(mcp);
divisor /= 32;
- mccr0 = Ser4MCCR0 & ~0x0000007f;
- mccr0 |= divisor;
- Ser4MCCR0 = mccr0;
+ priv->mccr0 &= ~0x0000007f;
+ priv->mccr0 |= divisor;
+ __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
}
/*
@@ -71,12 +79,16 @@ mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
{
int ret = -ETIME;
int i;
+ u32 mcpreg;
+ struct mcp_sa11x0 *priv = priv(mcp);
- Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff);
+ mcpreg = reg << 17 | MCDR2_Wr | (val & 0xffff);
+ __raw_writel(mcpreg, priv->mccr0_base + MCDR2);
for (i = 0; i < 2; i++) {
udelay(mcp->rw_timeout);
- if (Ser4MCSR & MCSR_CWC) {
+ mcpreg = __raw_readl(priv->mccr0_base + MCSR);
+ if (mcpreg & MCSR_CWC) {
ret = 0;
break;
}
@@ -97,13 +109,18 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
{
int ret = -ETIME;
int i;
+ u32 mcpreg;
+ struct mcp_sa11x0 *priv = priv(mcp);
- Ser4MCDR2 = reg << 17 | MCDR2_Rd;
+ mcpreg = reg << 17 | MCDR2_Rd;
+ __raw_writel(mcpreg, priv->mccr0_base + MCDR2);
for (i = 0; i < 2; i++) {
udelay(mcp->rw_timeout);
- if (Ser4MCSR & MCSR_CRC) {
- ret = Ser4MCDR2 & 0xffff;
+ mcpreg = __raw_readl(priv->mccr0_base + MCSR);
+ if (mcpreg & MCSR_CRC) {
+ ret = __raw_readl(priv->mccr0_base + MCDR2)
+ & 0xffff;
break;
}
}
@@ -116,13 +133,19 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
static void mcp_sa11x0_enable(struct mcp *mcp)
{
- Ser4MCSR = -1;
- Ser4MCCR0 |= MCCR0_MCE;
+ struct mcp_sa11x0 *priv = priv(mcp);
+
+ __raw_writel(-1, priv->mccr0_base + MCSR);
+ priv->mccr0 |= MCCR0_MCE;
+ __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
}
static void mcp_sa11x0_disable(struct mcp *mcp)
{
- Ser4MCCR0 &= ~MCCR0_MCE;
+ struct mcp_sa11x0 *priv = priv(mcp);
+
+ priv->mccr0 &= ~MCCR0_MCE;
+ __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
}
/*
@@ -142,50 +165,69 @@ static int mcp_sa11x0_probe(struct platform_device *pdev)
struct mcp_plat_data *data = pdev->dev.platform_data;
struct mcp *mcp;
int ret;
+ struct mcp_sa11x0 *priv;
+ struct resource *res_mem0, *res_mem1;
+ u32 size0, size1;
if (!data)
return -ENODEV;
- if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp"))
+ if (!data->codec)
+ return -ENODEV;
+
+ res_mem0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem0)
+ return -ENODEV;
+ size0 = res_mem0->end - res_mem0->start + 1;
+
+ res_mem1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_mem1)
+ return -ENODEV;
+ size1 = res_mem1->end - res_mem1->start + 1;
+
+ if (!request_mem_region(res_mem0->start, size0, "sa11x0-mcp"))
return -EBUSY;
+ if (!request_mem_region(res_mem1->start, size1, "sa11x0-mcp")) {
+ ret = -EBUSY;
+ goto release;
+ }
+
mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0));
if (!mcp) {
ret = -ENOMEM;
- goto release;
+ goto release2;
}
+ priv = priv(mcp);
+
mcp->owner = THIS_MODULE;
mcp->ops = &mcp_sa11x0;
mcp->sclk_rate = data->sclk_rate;
- mcp->dma_audio_rd = DMA_Ser4MCP0Rd;
- mcp->dma_audio_wr = DMA_Ser4MCP0Wr;
- mcp->dma_telco_rd = DMA_Ser4MCP1Rd;
- mcp->dma_telco_wr = DMA_Ser4MCP1Wr;
- mcp->gpio_base = data->gpio_base;
+ mcp->dma_audio_rd = DDAR_DevAdd(res_mem0->start + MCDR0)
+ + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev;
+ mcp->dma_audio_wr = DDAR_DevAdd(res_mem0->start + MCDR0)
+ + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev;
+ mcp->dma_telco_rd = DDAR_DevAdd(res_mem0->start + MCDR1)
+ + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev;
+ mcp->dma_telco_wr = DDAR_DevAdd(res_mem0->start + MCDR1)
+ + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev;
+ mcp->codec = data->codec;
platform_set_drvdata(pdev, mcp);
- if (machine_is_assabet()) {
- ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
- }
-
- /*
- * Setup the PPC unit correctly.
- */
- PPDR &= ~PPC_RXD4;
- PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
- PSDR |= PPC_RXD4;
- PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
- PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
/*
* Initialise device. Note that we initially
* set the sampling rate to minimum.
*/
- Ser4MCSR = -1;
- Ser4MCCR1 = data->mccr1;
- Ser4MCCR0 = data->mccr0 | 0x7f7f;
+ priv->mccr0_base = ioremap(res_mem0->start, size0);
+ priv->mccr1_base = ioremap(res_mem1->start, size1);
+
+ __raw_writel(-1, priv->mccr0_base + MCSR);
+ priv->mccr1 = data->mccr1;
+ priv->mccr0 = data->mccr0 | 0x7f7f;
+ __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
+ __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1);
/*
* Calculate the read/write timeout (us) from the bit clock
@@ -195,36 +237,53 @@ static int mcp_sa11x0_probe(struct platform_device *pdev)
mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
mcp->sclk_rate;
- ret = mcp_host_register(mcp);
+ ret = mcp_host_register(mcp, data->codec_pdata);
if (ret == 0)
goto out;
+ release2:
+ release_mem_region(res_mem1->start, size1);
release:
- release_mem_region(0x80060000, 0x60);
+ release_mem_region(res_mem0->start, size0);
platform_set_drvdata(pdev, NULL);
out:
return ret;
}
-static int mcp_sa11x0_remove(struct platform_device *dev)
+static int mcp_sa11x0_remove(struct platform_device *pdev)
{
- struct mcp *mcp = platform_get_drvdata(dev);
+ struct mcp *mcp = platform_get_drvdata(pdev);
+ struct mcp_sa11x0 *priv = priv(mcp);
+ struct resource *res_mem;
+ u32 size;
- platform_set_drvdata(dev, NULL);
+ platform_set_drvdata(pdev, NULL);
mcp_host_unregister(mcp);
- release_mem_region(0x80060000, 0x60);
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem) {
+ size = res_mem->end - res_mem->start + 1;
+ release_mem_region(res_mem->start, size);
+ }
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_mem) {
+ size = res_mem->end - res_mem->start + 1;
+ release_mem_region(res_mem->start, size);
+ }
+ iounmap(priv->mccr0_base);
+ iounmap(priv->mccr1_base);
return 0;
}
static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state)
{
struct mcp *mcp = platform_get_drvdata(dev);
+ struct mcp_sa11x0 *priv = priv(mcp);
+ u32 mccr0;
- priv(mcp)->mccr0 = Ser4MCCR0;
- priv(mcp)->mccr1 = Ser4MCCR1;
- Ser4MCCR0 &= ~MCCR0_MCE;
+ mccr0 = priv->mccr0 & ~MCCR0_MCE;
+ __raw_writel(mccr0, priv->mccr0_base + MCCR0);
return 0;
}
@@ -232,9 +291,10 @@ static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state)
static int mcp_sa11x0_resume(struct platform_device *dev)
{
struct mcp *mcp = platform_get_drvdata(dev);
+ struct mcp_sa11x0 *priv = priv(mcp);
- Ser4MCCR1 = priv(mcp)->mccr1;
- Ser4MCCR0 = priv(mcp)->mccr0;
+ __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
+ __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1);
return 0;
}
@@ -251,24 +311,14 @@ static struct platform_driver mcp_sa11x0_driver = {
.resume = mcp_sa11x0_resume,
.driver = {
.name = "sa11x0-mcp",
+ .owner = THIS_MODULE,
},
};
/*
* This needs re-working
*/
-static int __init mcp_sa11x0_init(void)
-{
- return platform_driver_register(&mcp_sa11x0_driver);
-}
-
-static void __exit mcp_sa11x0_exit(void)
-{
- platform_driver_unregister(&mcp_sa11x0_driver);
-}
-
-module_init(mcp_sa11x0_init);
-module_exit(mcp_sa11x0_exit);
+module_platform_driver(mcp_sa11x0_driver);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 86e14583a082..68ac2c55d5ae 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -27,8 +27,9 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
-#define USBHS_DRIVER_NAME "usbhs-omap"
+#define USBHS_DRIVER_NAME "usbhs_omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
#define OMAP_OHCI_DEVICE "ohci-omap3"
@@ -147,9 +148,6 @@
struct usbhs_hcd_omap {
- struct clk *usbhost_ick;
- struct clk *usbhost_hs_fck;
- struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -159,8 +157,7 @@ struct usbhs_hcd_omap {
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
+ struct clk *ehci_logic_fck;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -169,7 +166,6 @@ struct usbhs_hcd_omap {
u32 usbhs_rev;
spinlock_t lock;
- int count;
};
/*-------------------------------------------------------------------------*/
@@ -319,269 +315,6 @@ err_end:
return ret;
}
-/**
- * usbhs_omap_probe - initialize TI-based HCDs
- *
- * Allocates basic resources for this USB host controller.
- */
-static int __devinit usbhs_omap_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
- struct usbhs_hcd_omap *omap;
- struct resource *res;
- int ret = 0;
- int i;
-
- if (!pdata) {
- dev_err(dev, "Missing platform data\n");
- ret = -ENOMEM;
- goto end_probe;
- }
-
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end_probe;
- }
-
- spin_lock_init(&omap->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- omap->platdata.port_mode[i] = pdata->port_mode[i];
-
- omap->platdata.ehci_data = pdata->ehci_data;
- omap->platdata.ohci_data = pdata->ohci_data;
-
- omap->usbhost_ick = clk_get(dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- ret = PTR_ERR(omap->usbhost_ick);
- dev_err(dev, "usbhost_ick failed error:%d\n", ret);
- goto err_end;
- }
-
- omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
- if (IS_ERR(omap->usbhost_hs_fck)) {
- ret = PTR_ERR(omap->usbhost_hs_fck);
- dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
- goto err_usbhost_ick;
- }
-
- omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
- if (IS_ERR(omap->usbhost_fs_fck)) {
- ret = PTR_ERR(omap->usbhost_fs_fck);
- dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
- goto err_usbhost_hs_fck;
- }
-
- omap->usbtll_fck = clk_get(dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- ret = PTR_ERR(omap->usbtll_fck);
- dev_err(dev, "usbtll_fck failed error:%d\n", ret);
- goto err_usbhost_fs_fck;
- }
-
- omap->usbtll_ick = clk_get(dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- ret = PTR_ERR(omap->usbtll_ick);
- dev_err(dev, "usbtll_ick failed error:%d\n", ret);
- goto err_usbtll_fck;
- }
-
- omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
- if (IS_ERR(omap->utmi_p1_fck)) {
- ret = PTR_ERR(omap->utmi_p1_fck);
- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_usbtll_ick;
- }
-
- omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
- if (IS_ERR(omap->xclk60mhsp1_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp1_ck);
- dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_utmi_p1_fck;
- }
-
- omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
- if (IS_ERR(omap->utmi_p2_fck)) {
- ret = PTR_ERR(omap->utmi_p2_fck);
- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_xclk60mhsp1_ck;
- }
-
- omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
- if (IS_ERR(omap->xclk60mhsp2_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp2_ck);
- dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_utmi_p2_fck;
- }
-
- omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
- if (IS_ERR(omap->usbhost_p1_fck)) {
- ret = PTR_ERR(omap->usbhost_p1_fck);
- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
- goto err_xclk60mhsp2_ck;
- }
-
- omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
- if (IS_ERR(omap->usbtll_p1_fck)) {
- ret = PTR_ERR(omap->usbtll_p1_fck);
- dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
- goto err_usbhost_p1_fck;
- }
-
- omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
- if (IS_ERR(omap->usbhost_p2_fck)) {
- ret = PTR_ERR(omap->usbhost_p2_fck);
- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
- goto err_usbtll_p1_fck;
- }
-
- omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
- if (IS_ERR(omap->usbtll_p2_fck)) {
- ret = PTR_ERR(omap->usbtll_p2_fck);
- dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
- goto err_usbhost_p2_fck;
- }
-
- omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
- if (IS_ERR(omap->init_60m_fclk)) {
- ret = PTR_ERR(omap->init_60m_fclk);
- dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_usbtll_p2_fck;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_init_60m_fclk;
- }
-
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_init_60m_fclk;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_tll;
- }
-
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll;
- }
-
- platform_set_drvdata(pdev, omap);
-
- ret = omap_usbhs_alloc_children(pdev);
- if (ret) {
- dev_err(dev, "omap_usbhs_alloc_children failed\n");
- goto err_alloc;
- }
-
- goto end_probe;
-
-err_alloc:
- iounmap(omap->tll_base);
-
-err_tll:
- iounmap(omap->uhh_base);
-
-err_init_60m_fclk:
- clk_put(omap->init_60m_fclk);
-
-err_usbtll_p2_fck:
- clk_put(omap->usbtll_p2_fck);
-
-err_usbhost_p2_fck:
- clk_put(omap->usbhost_p2_fck);
-
-err_usbtll_p1_fck:
- clk_put(omap->usbtll_p1_fck);
-
-err_usbhost_p1_fck:
- clk_put(omap->usbhost_p1_fck);
-
-err_xclk60mhsp2_ck:
- clk_put(omap->xclk60mhsp2_ck);
-
-err_utmi_p2_fck:
- clk_put(omap->utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
- clk_put(omap->xclk60mhsp1_ck);
-
-err_utmi_p1_fck:
- clk_put(omap->utmi_p1_fck);
-
-err_usbtll_ick:
- clk_put(omap->usbtll_ick);
-
-err_usbtll_fck:
- clk_put(omap->usbtll_fck);
-
-err_usbhost_fs_fck:
- clk_put(omap->usbhost_fs_fck);
-
-err_usbhost_hs_fck:
- clk_put(omap->usbhost_hs_fck);
-
-err_usbhost_ick:
- clk_put(omap->usbhost_ick);
-
-err_end:
- kfree(omap);
-
-end_probe:
- return ret;
-}
-
-/**
- * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
- * @pdev: USB Host Controller being removed
- *
- * Reverses the effect of usbhs_omap_probe().
- */
-static int __devexit usbhs_omap_remove(struct platform_device *pdev)
-{
- struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
-
- if (omap->count != 0) {
- dev_err(&pdev->dev,
- "Either EHCI or OHCI is still using usbhs core\n");
- return -EBUSY;
- }
-
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
- clk_put(omap->init_60m_fclk);
- clk_put(omap->usbtll_p2_fck);
- clk_put(omap->usbhost_p2_fck);
- clk_put(omap->usbtll_p1_fck);
- clk_put(omap->usbhost_p1_fck);
- clk_put(omap->xclk60mhsp2_ck);
- clk_put(omap->utmi_p2_fck);
- clk_put(omap->xclk60mhsp1_ck);
- clk_put(omap->utmi_p1_fck);
- clk_put(omap->usbtll_ick);
- clk_put(omap->usbtll_fck);
- clk_put(omap->usbhost_fs_fck);
- clk_put(omap->usbhost_hs_fck);
- clk_put(omap->usbhost_ick);
- kfree(omap);
-
- return 0;
-}
-
static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
{
switch (pmode) {
@@ -689,96 +422,101 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
}
}
-static int usbhs_enable(struct device *dev)
+static int usbhs_runtime_resume(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- int ret = 0;
- unsigned long timeout;
- unsigned reg;
+ unsigned long flags;
+
+ dev_dbg(dev, "usbhs_runtime_resume\n");
- dev_dbg(dev, "starting TI HSUSB Controller\n");
if (!pdata) {
dev_dbg(dev, "missing platform_data\n");
return -ENODEV;
}
spin_lock_irqsave(&omap->lock, flags);
- if (omap->count > 0)
- goto end_count;
- clk_enable(omap->usbhost_ick);
- clk_enable(omap->usbhost_hs_fck);
- clk_enable(omap->usbhost_fs_fck);
- clk_enable(omap->usbtll_fck);
- clk_enable(omap->usbtll_ick);
+ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ clk_enable(omap->ehci_logic_fck);
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
- gpio_request(pdata->ehci_data->reset_gpio_port[0],
- "USB1 PHY reset");
- gpio_direction_output
- (pdata->ehci_data->reset_gpio_port[0], 0);
- }
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_enable(omap->usbhost_p1_fck);
+ clk_enable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_enable(omap->usbhost_p2_fck);
+ clk_enable(omap->usbtll_p2_fck);
+ }
+ clk_enable(omap->utmi_p1_fck);
+ clk_enable(omap->utmi_p2_fck);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
- gpio_request(pdata->ehci_data->reset_gpio_port[1],
- "USB2 PHY reset");
- gpio_direction_output
- (pdata->ehci_data->reset_gpio_port[1], 0);
- }
+ spin_unlock_irqrestore(&omap->lock, flags);
- /* Hold the PHY in RESET for enough time till DIR is high */
- udelay(10);
- }
+ return 0;
+}
- omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
- dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+static int usbhs_runtime_suspend(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags;
- /* perform TLL soft reset, and wait until reset is complete */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+ dev_dbg(dev, "usbhs_runtime_suspend\n");
- /* Wait for TLL reset to complete */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
+ if (!pdata) {
+ dev_dbg(dev, "missing platform_data\n");
+ return -ENODEV;
+ }
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_tll;
- }
+ spin_lock_irqsave(&omap->lock, flags);
+
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_disable(omap->usbhost_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_disable(omap->usbhost_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
}
+ clk_disable(omap->utmi_p2_fck);
+ clk_disable(omap->utmi_p1_fck);
- dev_dbg(dev, "TLL RESET DONE\n");
+ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ clk_disable(omap->ehci_logic_fck);
- /* (1<<3) = no idle mode only for initial debugging */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+ spin_unlock_irqrestore(&omap->lock, flags);
- /* Put UHH in NoIdle/NoStandby mode */
- reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- if (is_omap_usbhs_rev1(omap)) {
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+ return 0;
+}
+
+static void omap_usbhs_init(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags;
+ unsigned reg;
+ dev_dbg(dev, "starting TI HSUSB Controller\n");
- } else if (is_omap_usbhs_rev2(omap)) {
- reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
- reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+ pm_runtime_get_sync(dev);
+ spin_lock_irqsave(&omap->lock, flags);
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
}
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+ omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
+ dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
@@ -825,49 +563,6 @@ static int usbhs_enable(struct device *dev)
reg &= ~OMAP4_P1_MODE_CLEAR;
reg &= ~OMAP4_P2_MODE_CLEAR;
- if (is_ehci_phy_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->xclk60mhsp1_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p1_fck);
- clk_enable(omap->usbtll_p1_fck);
- }
-
- if (is_ehci_phy_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->xclk60mhsp2_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p2_fck);
- clk_enable(omap->usbtll_p2_fck);
- }
-
- clk_enable(omap->utmi_p1_fck);
- clk_enable(omap->utmi_p2_fck);
-
if (is_ehci_tll_mode(pdata->port_mode[0]) ||
(is_ohci_port(pdata->port_mode[0])))
reg |= OMAP4_P1_MODE_TLL;
@@ -913,12 +608,15 @@ static int usbhs_enable(struct device *dev)
(pdata->ehci_data->reset_gpio_port[1], 1);
}
-end_count:
- omap->count++;
spin_unlock_irqrestore(&omap->lock, flags);
- return 0;
+ pm_runtime_put_sync(dev);
+}
+
+static void omap_usbhs_deinit(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
-err_tll:
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -926,123 +624,272 @@ err_tll:
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
-
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
- spin_unlock_irqrestore(&omap->lock, flags);
- return ret;
}
-static void usbhs_disable(struct device *dev)
+
+/**
+ * usbhs_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller.
+ */
+static int __devinit usbhs_omap_probe(struct platform_device *pdev)
{
- struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- unsigned long timeout;
+ struct device *dev = &pdev->dev;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_hcd_omap *omap;
+ struct resource *res;
+ int ret = 0;
+ int i;
- dev_dbg(dev, "stopping TI HSUSB Controller\n");
+ if (!pdata) {
+ dev_err(dev, "Missing platform data\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
- spin_lock_irqsave(&omap->lock, flags);
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ dev_err(dev, "Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
- if (omap->count == 0)
- goto end_disble;
+ spin_lock_init(&omap->lock);
- omap->count--;
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ omap->platdata.port_mode[i] = pdata->port_mode[i];
- if (omap->count != 0)
- goto end_disble;
+ omap->platdata.ehci_data = pdata->ehci_data;
+ omap->platdata.ohci_data = pdata->ohci_data;
+
+ pm_runtime_enable(dev);
- /* Reset OMAP modules for insmod/rmmod to work */
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- is_omap_usbhs_rev2(omap) ?
- OMAP4_UHH_SYSCONFIG_SOFTRESET :
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- timeout = jiffies + msecs_to_jiffies(100);
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
+ is_ehci_hsic_mode(i)) {
+ omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
+ if (IS_ERR(omap->ehci_logic_fck)) {
+ ret = PTR_ERR(omap->ehci_logic_fck);
+ dev_warn(dev, "ehci_logic_fck failed:%d\n",
+ ret);
+ }
+ break;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(omap->utmi_p1_fck)) {
+ ret = PTR_ERR(omap->utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_end;
}
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 1))) {
- cpu_relax();
+ omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(omap->xclk60mhsp1_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
+
+ omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(omap->utmi_p2_fck)) {
+ ret = PTR_ERR(omap->utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
+ }
+
+ omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(omap->xclk60mhsp2_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(omap->usbhost_p1_fck)) {
+ ret = PTR_ERR(omap->usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
+ }
+
+ omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
+ if (IS_ERR(omap->usbtll_p1_fck)) {
+ ret = PTR_ERR(omap->usbtll_p1_fck);
+ dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
+
+ omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(omap->usbhost_p2_fck)) {
+ ret = PTR_ERR(omap->usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbtll_p1_fck;
}
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 2))) {
- cpu_relax();
+ omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
+ if (IS_ERR(omap->usbtll_p2_fck)) {
+ ret = PTR_ERR(omap->usbtll_p2_fck);
+ dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(omap->init_60m_fclk)) {
+ ret = PTR_ERR(omap->init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbtll_p2_fck;
}
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+ if (is_ehci_phy_mode(pdata->port_mode[0])) {
+ /* for OMAP3 , the clk set paretn fails */
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->xclk60mhsp1_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
+ if (is_ehci_phy_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->xclk60mhsp2_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_init_60m_fclk;
}
- if (is_omap_usbhs_rev2(omap)) {
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(omap->usbtll_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(omap->usbtll_p2_fck);
- clk_disable(omap->utmi_p2_fck);
- clk_disable(omap->utmi_p1_fck);
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_init_60m_fclk;
}
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_tll;
+ }
- /* The gpio_free migh sleep; so unlock the spinlock */
- spin_unlock_irqrestore(&omap->lock, flags);
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll;
+ }
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+ platform_set_drvdata(pdev, omap);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ ret = omap_usbhs_alloc_children(pdev);
+ if (ret) {
+ dev_err(dev, "omap_usbhs_alloc_children failed\n");
+ goto err_alloc;
}
- return;
-end_disble:
- spin_unlock_irqrestore(&omap->lock, flags);
-}
+ omap_usbhs_init(dev);
-int omap_usbhs_enable(struct device *dev)
-{
- return usbhs_enable(dev->parent);
+ goto end_probe;
+
+err_alloc:
+ iounmap(omap->tll_base);
+
+err_tll:
+ iounmap(omap->uhh_base);
+
+err_init_60m_fclk:
+ clk_put(omap->init_60m_fclk);
+
+err_usbtll_p2_fck:
+ clk_put(omap->usbtll_p2_fck);
+
+err_usbhost_p2_fck:
+ clk_put(omap->usbhost_p2_fck);
+
+err_usbtll_p1_fck:
+ clk_put(omap->usbtll_p1_fck);
+
+err_usbhost_p1_fck:
+ clk_put(omap->usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(omap->xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(omap->utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(omap->xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(omap->utmi_p1_fck);
+
+err_end:
+ clk_put(omap->ehci_logic_fck);
+ pm_runtime_disable(dev);
+ kfree(omap);
+
+end_probe:
+ return ret;
}
-EXPORT_SYMBOL_GPL(omap_usbhs_enable);
-void omap_usbhs_disable(struct device *dev)
+/**
+ * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
- usbhs_disable(dev->parent);
+ struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+
+ omap_usbhs_deinit(&pdev->dev);
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ clk_put(omap->init_60m_fclk);
+ clk_put(omap->usbtll_p2_fck);
+ clk_put(omap->usbhost_p2_fck);
+ clk_put(omap->usbtll_p1_fck);
+ clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->xclk60mhsp2_ck);
+ clk_put(omap->utmi_p2_fck);
+ clk_put(omap->xclk60mhsp1_ck);
+ clk_put(omap->utmi_p1_fck);
+ clk_put(omap->ehci_logic_fck);
+ pm_runtime_disable(&pdev->dev);
+ kfree(omap);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(omap_usbhs_disable);
+
+static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+ .runtime_suspend = usbhs_runtime_suspend,
+ .runtime_resume = usbhs_runtime_resume,
+};
static struct platform_driver usbhs_omap_driver = {
.driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
+ .pm = &usbhsomap_dev_pm_ops,
},
.remove = __exit_p(usbhs_omap_remove),
};
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index aed0d2a9b032..3927c17e4175 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -249,17 +249,7 @@ static struct platform_driver pcf50633_adc_driver = {
.remove = __devexit_p(pcf50633_adc_remove),
};
-static int __init pcf50633_adc_init(void)
-{
- return platform_driver_register(&pcf50633_adc_driver);
-}
-module_init(pcf50633_adc_init);
-
-static void __exit pcf50633_adc_exit(void)
-{
- platform_driver_unregister(&pcf50633_adc_driver);
-}
-module_exit(pcf50633_adc_exit);
+module_platform_driver(pcf50633_adc_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 adc driver");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
new file mode 100644
index 000000000000..e075c113eec6
--- /dev/null
+++ b/drivers/mfd/s5m-core.c
@@ -0,0 +1,176 @@
+/*
+ * s5m87xx.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/s5m87xx/s5m-core.h>
+#include <linux/mfd/s5m87xx/s5m-pmic.h>
+#include <linux/mfd/s5m87xx/s5m-rtc.h>
+#include <linux/regmap.h>
+
+static struct mfd_cell s5m87xx_devs[] = {
+ {
+ .name = "s5m8767-pmic",
+ }, {
+ .name = "s5m-rtc",
+ },
+};
+
+int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest)
+{
+ return regmap_read(s5m87xx->regmap, reg, dest);
+}
+EXPORT_SYMBOL_GPL(s5m_reg_read);
+
+int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
+{
+ return regmap_bulk_read(s5m87xx->regmap, reg, buf, count);;
+}
+EXPORT_SYMBOL_GPL(s5m_bulk_read);
+
+int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value)
+{
+ return regmap_write(s5m87xx->regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(s5m_reg_write);
+
+int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
+{
+ return regmap_raw_write(s5m87xx->regmap, reg, buf, count * sizeof(u16));
+}
+EXPORT_SYMBOL_GPL(s5m_bulk_write);
+
+int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask)
+{
+ return regmap_update_bits(s5m87xx->regmap, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(s5m_reg_update);
+
+static struct regmap_config s5m_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int s5m87xx_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct s5m_platform_data *pdata = i2c->dev.platform_data;
+ struct s5m87xx_dev *s5m87xx;
+ int ret = 0;
+ int error;
+
+ s5m87xx = kzalloc(sizeof(struct s5m87xx_dev), GFP_KERNEL);
+ if (s5m87xx == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, s5m87xx);
+ s5m87xx->dev = &i2c->dev;
+ s5m87xx->i2c = i2c;
+ s5m87xx->irq = i2c->irq;
+ s5m87xx->type = id->driver_data;
+
+ if (pdata) {
+ s5m87xx->device_type = pdata->device_type;
+ s5m87xx->ono = pdata->ono;
+ s5m87xx->irq_base = pdata->irq_base;
+ s5m87xx->wakeup = pdata->wakeup;
+ }
+
+ s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+ if (IS_ERR(s5m87xx->regmap)) {
+ error = PTR_ERR(s5m87xx->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ error);
+ goto err;
+ }
+
+ s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ i2c_set_clientdata(s5m87xx->rtc, s5m87xx);
+
+ if (pdata->cfg_pmic_irq)
+ pdata->cfg_pmic_irq();
+
+ s5m_irq_init(s5m87xx);
+
+ pm_runtime_set_active(s5m87xx->dev);
+
+ ret = mfd_add_devices(s5m87xx->dev, -1,
+ s5m87xx_devs, ARRAY_SIZE(s5m87xx_devs),
+ NULL, 0);
+
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(s5m87xx->dev);
+ s5m_irq_exit(s5m87xx);
+ i2c_unregister_device(s5m87xx->rtc);
+ regmap_exit(s5m87xx->regmap);
+ kfree(s5m87xx);
+ return ret;
+}
+
+static int s5m87xx_i2c_remove(struct i2c_client *i2c)
+{
+ struct s5m87xx_dev *s5m87xx = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(s5m87xx->dev);
+ s5m_irq_exit(s5m87xx);
+ i2c_unregister_device(s5m87xx->rtc);
+ regmap_exit(s5m87xx->regmap);
+ kfree(s5m87xx);
+ return 0;
+}
+
+static const struct i2c_device_id s5m87xx_i2c_id[] = {
+ { "s5m87xx", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, s5m87xx_i2c_id);
+
+static struct i2c_driver s5m87xx_i2c_driver = {
+ .driver = {
+ .name = "s5m87xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = s5m87xx_i2c_probe,
+ .remove = s5m87xx_i2c_remove,
+ .id_table = s5m87xx_i2c_id,
+};
+
+static int __init s5m87xx_i2c_init(void)
+{
+ return i2c_add_driver(&s5m87xx_i2c_driver);
+}
+
+subsys_initcall(s5m87xx_i2c_init);
+
+static void __exit s5m87xx_i2c_exit(void)
+{
+ i2c_del_driver(&s5m87xx_i2c_driver);
+}
+module_exit(s5m87xx_i2c_exit);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("Core support for the S5M MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-irq.c b/drivers/mfd/s5m-irq.c
new file mode 100644
index 000000000000..de76dfb6f0ad
--- /dev/null
+++ b/drivers/mfd/s5m-irq.c
@@ -0,0 +1,487 @@
+/*
+ * s5m-irq.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/s5m87xx/s5m-core.h>
+
+struct s5m_irq_data {
+ int reg;
+ int mask;
+};
+
+static struct s5m_irq_data s5m8767_irqs[] = {
+ [S5M8767_IRQ_PWRR] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_PWRR_MASK,
+ },
+ [S5M8767_IRQ_PWRF] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_PWRF_MASK,
+ },
+ [S5M8767_IRQ_PWR1S] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_PWR1S_MASK,
+ },
+ [S5M8767_IRQ_JIGR] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_JIGR_MASK,
+ },
+ [S5M8767_IRQ_JIGF] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_JIGF_MASK,
+ },
+ [S5M8767_IRQ_LOWBAT2] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_LOWBAT2_MASK,
+ },
+ [S5M8767_IRQ_LOWBAT1] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_LOWBAT1_MASK,
+ },
+ [S5M8767_IRQ_MRB] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_MRB_MASK,
+ },
+ [S5M8767_IRQ_DVSOK2] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_DVSOK2_MASK,
+ },
+ [S5M8767_IRQ_DVSOK3] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_DVSOK3_MASK,
+ },
+ [S5M8767_IRQ_DVSOK4] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_DVSOK4_MASK,
+ },
+ [S5M8767_IRQ_RTC60S] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTC60S_MASK,
+ },
+ [S5M8767_IRQ_RTCA1] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTCA1_MASK,
+ },
+ [S5M8767_IRQ_RTCA2] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTCA2_MASK,
+ },
+ [S5M8767_IRQ_SMPL] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_SMPL_MASK,
+ },
+ [S5M8767_IRQ_RTC1S] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTC1S_MASK,
+ },
+ [S5M8767_IRQ_WTSR] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_WTSR_MASK,
+ },
+};
+
+static struct s5m_irq_data s5m8763_irqs[] = {
+ [S5M8763_IRQ_DCINF] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_DCINF_MASK,
+ },
+ [S5M8763_IRQ_DCINR] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_DCINR_MASK,
+ },
+ [S5M8763_IRQ_JIGF] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_JIGF_MASK,
+ },
+ [S5M8763_IRQ_JIGR] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_JIGR_MASK,
+ },
+ [S5M8763_IRQ_PWRONF] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_PWRONF_MASK,
+ },
+ [S5M8763_IRQ_PWRONR] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_PWRONR_MASK,
+ },
+ [S5M8763_IRQ_WTSREVNT] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_WTSREVNT_MASK,
+ },
+ [S5M8763_IRQ_SMPLEVNT] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_SMPLEVNT_MASK,
+ },
+ [S5M8763_IRQ_ALARM1] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_ALARM1_MASK,
+ },
+ [S5M8763_IRQ_ALARM0] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_ALARM0_MASK,
+ },
+ [S5M8763_IRQ_ONKEY1S] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_ONKEY1S_MASK,
+ },
+ [S5M8763_IRQ_TOPOFFR] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_TOPOFFR_MASK,
+ },
+ [S5M8763_IRQ_DCINOVPR] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_DCINOVPR_MASK,
+ },
+ [S5M8763_IRQ_CHGRSTF] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_CHGRSTF_MASK,
+ },
+ [S5M8763_IRQ_DONER] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_DONER_MASK,
+ },
+ [S5M8763_IRQ_CHGFAULT] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_CHGFAULT_MASK,
+ },
+ [S5M8763_IRQ_LOBAT1] = {
+ .reg = 4,
+ .mask = S5M8763_IRQ_LOBAT1_MASK,
+ },
+ [S5M8763_IRQ_LOBAT2] = {
+ .reg = 4,
+ .mask = S5M8763_IRQ_LOBAT2_MASK,
+ },
+};
+
+static inline struct s5m_irq_data *
+irq_to_s5m8767_irq(struct s5m87xx_dev *s5m87xx, int irq)
+{
+ return &s5m8767_irqs[irq - s5m87xx->irq_base];
+}
+
+static void s5m8767_irq_lock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&s5m87xx->irqlock);
+}
+
+static void s5m8767_irq_sync_unlock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5m87xx->irq_masks_cur); i++) {
+ if (s5m87xx->irq_masks_cur[i] != s5m87xx->irq_masks_cache[i]) {
+ s5m87xx->irq_masks_cache[i] = s5m87xx->irq_masks_cur[i];
+ s5m_reg_write(s5m87xx, S5M8767_REG_INT1M + i,
+ s5m87xx->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&s5m87xx->irqlock);
+}
+
+static void s5m8767_irq_unmask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8767_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void s5m8767_irq_mask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8767_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip s5m8767_irq_chip = {
+ .name = "s5m8767",
+ .irq_bus_lock = s5m8767_irq_lock,
+ .irq_bus_sync_unlock = s5m8767_irq_sync_unlock,
+ .irq_mask = s5m8767_irq_mask,
+ .irq_unmask = s5m8767_irq_unmask,
+};
+
+static inline struct s5m_irq_data *
+irq_to_s5m8763_irq(struct s5m87xx_dev *s5m87xx, int irq)
+{
+ return &s5m8763_irqs[irq - s5m87xx->irq_base];
+}
+
+static void s5m8763_irq_lock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&s5m87xx->irqlock);
+}
+
+static void s5m8763_irq_sync_unlock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5m87xx->irq_masks_cur); i++) {
+ if (s5m87xx->irq_masks_cur[i] != s5m87xx->irq_masks_cache[i]) {
+ s5m87xx->irq_masks_cache[i] = s5m87xx->irq_masks_cur[i];
+ s5m_reg_write(s5m87xx, S5M8763_REG_IRQM1 + i,
+ s5m87xx->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&s5m87xx->irqlock);
+}
+
+static void s5m8763_irq_unmask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8763_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void s5m8763_irq_mask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8763_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip s5m8763_irq_chip = {
+ .name = "s5m8763",
+ .irq_bus_lock = s5m8763_irq_lock,
+ .irq_bus_sync_unlock = s5m8763_irq_sync_unlock,
+ .irq_mask = s5m8763_irq_mask,
+ .irq_unmask = s5m8763_irq_unmask,
+};
+
+
+static irqreturn_t s5m8767_irq_thread(int irq, void *data)
+{
+ struct s5m87xx_dev *s5m87xx = data;
+ u8 irq_reg[NUM_IRQ_REGS-1];
+ int ret;
+ int i;
+
+
+ ret = s5m_bulk_read(s5m87xx, S5M8767_REG_INT1,
+ NUM_IRQ_REGS - 1, irq_reg);
+ if (ret < 0) {
+ dev_err(s5m87xx->dev, "Failed to read interrupt register: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < NUM_IRQ_REGS - 1; i++)
+ irq_reg[i] &= ~s5m87xx->irq_masks_cur[i];
+
+ for (i = 0; i < S5M8767_IRQ_NR; i++) {
+ if (irq_reg[s5m8767_irqs[i].reg - 1] & s5m8767_irqs[i].mask)
+ handle_nested_irq(s5m87xx->irq_base + i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t s5m8763_irq_thread(int irq, void *data)
+{
+ struct s5m87xx_dev *s5m87xx = data;
+ u8 irq_reg[NUM_IRQ_REGS];
+ int ret;
+ int i;
+
+ ret = s5m_bulk_read(s5m87xx, S5M8763_REG_IRQ1,
+ NUM_IRQ_REGS, irq_reg);
+ if (ret < 0) {
+ dev_err(s5m87xx->dev, "Failed to read interrupt register: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < NUM_IRQ_REGS; i++)
+ irq_reg[i] &= ~s5m87xx->irq_masks_cur[i];
+
+ for (i = 0; i < S5M8763_IRQ_NR; i++) {
+ if (irq_reg[s5m8763_irqs[i].reg - 1] & s5m8763_irqs[i].mask)
+ handle_nested_irq(s5m87xx->irq_base + i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int s5m_irq_resume(struct s5m87xx_dev *s5m87xx)
+{
+ if (s5m87xx->irq && s5m87xx->irq_base){
+ switch (s5m87xx->device_type) {
+ case S5M8763X:
+ s5m8763_irq_thread(s5m87xx->irq_base, s5m87xx);
+ break;
+ case S5M8767X:
+ s5m8767_irq_thread(s5m87xx->irq_base, s5m87xx);
+ break;
+ default:
+ break;
+
+ }
+ }
+ return 0;
+}
+
+int s5m_irq_init(struct s5m87xx_dev *s5m87xx)
+{
+ int i;
+ int cur_irq;
+ int ret = 0;
+ int type = s5m87xx->device_type;
+
+ if (!s5m87xx->irq) {
+ dev_warn(s5m87xx->dev,
+ "No interrupt specified, no interrupts\n");
+ s5m87xx->irq_base = 0;
+ return 0;
+ }
+
+ if (!s5m87xx->irq_base) {
+ dev_err(s5m87xx->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ mutex_init(&s5m87xx->irqlock);
+
+ switch (type) {
+ case S5M8763X:
+ for (i = 0; i < NUM_IRQ_REGS; i++) {
+ s5m87xx->irq_masks_cur[i] = 0xff;
+ s5m87xx->irq_masks_cache[i] = 0xff;
+ s5m_reg_write(s5m87xx, S5M8763_REG_IRQM1 + i,
+ 0xff);
+ }
+
+ s5m_reg_write(s5m87xx, S5M8763_REG_STATUSM1, 0xff);
+ s5m_reg_write(s5m87xx, S5M8763_REG_STATUSM2, 0xff);
+
+ for (i = 0; i < S5M8763_IRQ_NR; i++) {
+ cur_irq = i + s5m87xx->irq_base;
+ irq_set_chip_data(cur_irq, s5m87xx);
+ irq_set_chip_and_handler(cur_irq, &s5m8763_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(s5m87xx->irq, NULL,
+ s5m8763_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "s5m87xx-irq", s5m87xx);
+ if (ret) {
+ dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
+ s5m87xx->irq, ret);
+ return ret;
+ }
+ break;
+ case S5M8767X:
+ for (i = 0; i < NUM_IRQ_REGS - 1; i++) {
+ s5m87xx->irq_masks_cur[i] = 0xff;
+ s5m87xx->irq_masks_cache[i] = 0xff;
+ s5m_reg_write(s5m87xx, S5M8767_REG_INT1M + i,
+ 0xff);
+ }
+ for (i = 0; i < S5M8767_IRQ_NR; i++) {
+ cur_irq = i + s5m87xx->irq_base;
+ irq_set_chip_data(cur_irq, s5m87xx);
+ if (ret) {
+ dev_err(s5m87xx->dev,
+ "Failed to irq_set_chip_data %d: %d\n",
+ s5m87xx->irq, ret);
+ return ret;
+ }
+
+ irq_set_chip_and_handler(cur_irq, &s5m8767_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(s5m87xx->irq, NULL,
+ s5m8767_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "s5m87xx-irq", s5m87xx);
+ if (ret) {
+ dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
+ s5m87xx->irq, ret);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!s5m87xx->ono)
+ return 0;
+
+ switch (type) {
+ case S5M8763X:
+ ret = request_threaded_irq(s5m87xx->ono, NULL,
+ s5m8763_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "s5m87xx-ono",
+ s5m87xx);
+ break;
+ case S5M8767X:
+ ret = request_threaded_irq(s5m87xx->ono, NULL,
+ s5m8767_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "s5m87xx-ono", s5m87xx);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
+ s5m87xx->ono, ret);
+
+ return 0;
+}
+
+void s5m_irq_exit(struct s5m87xx_dev *s5m87xx)
+{
+ if (s5m87xx->ono)
+ free_irq(s5m87xx->ono, s5m87xx);
+
+ if (s5m87xx->irq)
+ free_irq(s5m87xx->irq, s5m87xx);
+}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index df3702c1756d..f4d86117f44a 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1720,7 +1720,7 @@ static int sm501_plat_remove(struct platform_device *dev)
return 0;
}
-static struct pci_device_id sm501_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sm501_pci_tbl) = {
{ 0x126f, 0x0501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
};
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
new file mode 100644
index 000000000000..373f423b1181
--- /dev/null
+++ b/drivers/mfd/stmpe-i2c.c
@@ -0,0 +1,109 @@
+/*
+ * ST Microelectronics MFD: stmpe's i2c client specific driver
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST Microelectronics SA 2011
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "stmpe.h"
+
+static int i2c_reg_read(struct stmpe *stmpe, u8 reg)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_read_byte_data(i2c, reg);
+}
+
+static int i2c_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static int i2c_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_read_i2c_block_data(i2c, reg, length, values);
+}
+
+static int i2c_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_write_i2c_block_data(i2c, reg, length, values);
+}
+
+static struct stmpe_client_info i2c_ci = {
+ .read_byte = i2c_reg_read,
+ .write_byte = i2c_reg_write,
+ .read_block = i2c_block_read,
+ .write_block = i2c_block_write,
+};
+
+static int __devinit
+stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ i2c_ci.data = (void *)id;
+ i2c_ci.irq = i2c->irq;
+ i2c_ci.client = i2c;
+ i2c_ci.dev = &i2c->dev;
+
+ return stmpe_probe(&i2c_ci, id->driver_data);
+}
+
+static int __devexit stmpe_i2c_remove(struct i2c_client *i2c)
+{
+ struct stmpe *stmpe = dev_get_drvdata(&i2c->dev);
+
+ return stmpe_remove(stmpe);
+}
+
+static const struct i2c_device_id stmpe_i2c_id[] = {
+ { "stmpe610", STMPE610 },
+ { "stmpe801", STMPE801 },
+ { "stmpe811", STMPE811 },
+ { "stmpe1601", STMPE1601 },
+ { "stmpe2401", STMPE2401 },
+ { "stmpe2403", STMPE2403 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, stmpe_id);
+
+static struct i2c_driver stmpe_i2c_driver = {
+ .driver.name = "stmpe-i2c",
+ .driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &stmpe_dev_pm_ops,
+#endif
+ .probe = stmpe_i2c_probe,
+ .remove = __devexit_p(stmpe_i2c_remove),
+ .id_table = stmpe_i2c_id,
+};
+
+static int __init stmpe_init(void)
+{
+ return i2c_add_driver(&stmpe_i2c_driver);
+}
+subsys_initcall(stmpe_init);
+
+static void __exit stmpe_exit(void)
+{
+ i2c_del_driver(&stmpe_i2c_driver);
+}
+module_exit(stmpe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPE MFD I2C Interface Driver");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
new file mode 100644
index 000000000000..b58c43c7ea93
--- /dev/null
+++ b/drivers/mfd/stmpe-spi.c
@@ -0,0 +1,150 @@
+/*
+ * ST Microelectronics MFD: stmpe's spi client specific driver
+ *
+ * Copyright (C) ST Microelectronics SA 2011
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "stmpe.h"
+
+#define READ_CMD (1 << 7)
+
+static int spi_reg_read(struct stmpe *stmpe, u8 reg)
+{
+ struct spi_device *spi = stmpe->client;
+ int status = spi_w8r16(spi, reg | READ_CMD);
+
+ return (status < 0) ? status : status >> 8;
+}
+
+static int spi_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
+{
+ struct spi_device *spi = stmpe->client;
+ u16 cmd = (val << 8) | reg;
+
+ return spi_write(spi, (const u8 *)&cmd, 2);
+}
+
+static int spi_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
+{
+ int ret, i;
+
+ for (i = 0; i < length; i++) {
+ ret = spi_reg_read(stmpe, reg + i);
+ if (ret < 0)
+ return ret;
+ *(values + i) = ret;
+ }
+
+ return 0;
+}
+
+static int spi_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret = 0, i;
+
+ for (i = length; i > 0; i--, reg++) {
+ ret = spi_reg_write(stmpe, reg, *(values + i - 1));
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void spi_init(struct stmpe *stmpe)
+{
+ struct spi_device *spi = stmpe->client;
+
+ spi->bits_per_word = 8;
+
+ /* This register is only present for stmpe811 */
+ if (stmpe->variant->id_val == 0x0811)
+ spi_reg_write(stmpe, STMPE811_REG_SPI_CFG, spi->mode);
+
+ if (spi_setup(spi) < 0)
+ dev_dbg(&spi->dev, "spi_setup failed\n");
+}
+
+static struct stmpe_client_info spi_ci = {
+ .read_byte = spi_reg_read,
+ .write_byte = spi_reg_write,
+ .read_block = spi_block_read,
+ .write_block = spi_block_write,
+ .init = spi_init,
+};
+
+static int __devinit
+stmpe_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ /* don't exceed max specified rate - 1MHz - Limitation of STMPE */
+ if (spi->max_speed_hz > 1000000) {
+ dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
+ (spi->max_speed_hz/1000));
+ return -EINVAL;
+ }
+
+ spi_ci.irq = spi->irq;
+ spi_ci.client = spi;
+ spi_ci.dev = &spi->dev;
+
+ return stmpe_probe(&spi_ci, id->driver_data);
+}
+
+static int __devexit stmpe_spi_remove(struct spi_device *spi)
+{
+ struct stmpe *stmpe = dev_get_drvdata(&spi->dev);
+
+ return stmpe_remove(stmpe);
+}
+
+static const struct spi_device_id stmpe_spi_id[] = {
+ { "stmpe610", STMPE610 },
+ { "stmpe801", STMPE801 },
+ { "stmpe811", STMPE811 },
+ { "stmpe1601", STMPE1601 },
+ { "stmpe2401", STMPE2401 },
+ { "stmpe2403", STMPE2403 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, stmpe_id);
+
+static struct spi_driver stmpe_spi_driver = {
+ .driver = {
+ .name = "stmpe-spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &stmpe_dev_pm_ops,
+#endif
+ },
+ .probe = stmpe_spi_probe,
+ .remove = __devexit_p(stmpe_spi_remove),
+ .id_table = stmpe_spi_id,
+};
+
+static int __init stmpe_init(void)
+{
+ return spi_register_driver(&stmpe_spi_driver);
+}
+subsys_initcall(stmpe_init);
+
+static void __exit stmpe_exit(void)
+{
+ spi_unregister_driver(&stmpe_spi_driver);
+}
+module_exit(stmpe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 2963689cf45c..e07947e56b2a 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1,18 +1,20 @@
/*
+ * ST Microelectronics MFD: stmpe's driver
+ *
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/gpio.h>
+#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/pm.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/stmpe.h>
#include "stmpe.h"
static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
@@ -29,10 +31,9 @@ static int __stmpe_reg_read(struct stmpe *stmpe, u8 reg)
{
int ret;
- ret = i2c_smbus_read_byte_data(stmpe->i2c, reg);
+ ret = stmpe->ci->read_byte(stmpe, reg);
if (ret < 0)
- dev_err(stmpe->dev, "failed to read reg %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to read reg %#x: %d\n", reg, ret);
dev_vdbg(stmpe->dev, "rd: reg %#x => data %#x\n", reg, ret);
@@ -45,10 +46,9 @@ static int __stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
dev_vdbg(stmpe->dev, "wr: reg %#x <= %#x\n", reg, val);
- ret = i2c_smbus_write_byte_data(stmpe->i2c, reg, val);
+ ret = stmpe->ci->write_byte(stmpe, reg, val);
if (ret < 0)
- dev_err(stmpe->dev, "failed to write reg %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to write reg %#x: %d\n", reg, ret);
return ret;
}
@@ -72,10 +72,9 @@ static int __stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length,
{
int ret;
- ret = i2c_smbus_read_i2c_block_data(stmpe->i2c, reg, length, values);
+ ret = stmpe->ci->read_block(stmpe, reg, length, values);
if (ret < 0)
- dev_err(stmpe->dev, "failed to read regs %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to read regs %#x: %d\n", reg, ret);
dev_vdbg(stmpe->dev, "rd: reg %#x (%d) => ret %#x\n", reg, length, ret);
stmpe_dump_bytes("stmpe rd: ", values, length);
@@ -91,11 +90,9 @@ static int __stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
dev_vdbg(stmpe->dev, "wr: regs %#x (%d)\n", reg, length);
stmpe_dump_bytes("stmpe wr: ", values, length);
- ret = i2c_smbus_write_i2c_block_data(stmpe->i2c, reg, length,
- values);
+ ret = stmpe->ci->write_block(stmpe, reg, length, values);
if (ret < 0)
- dev_err(stmpe->dev, "failed to write regs %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to write regs %#x: %d\n", reg, ret);
return ret;
}
@@ -245,12 +242,14 @@ int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block)
u8 regaddr = stmpe->regs[STMPE_IDX_GPAFR_U_MSB];
int af_bits = variant->af_bits;
int numregs = DIV_ROUND_UP(stmpe->num_gpios * af_bits, 8);
- int afperreg = 8 / af_bits;
int mask = (1 << af_bits) - 1;
u8 regs[numregs];
- int af;
- int ret;
+ int af, afperreg, ret;
+
+ if (!variant->get_altfunc)
+ return 0;
+ afperreg = 8 / af_bits;
mutex_lock(&stmpe->lock);
ret = __stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
@@ -325,7 +324,51 @@ static struct mfd_cell stmpe_keypad_cell = {
};
/*
- * Touchscreen (STMPE811)
+ * STMPE801
+ */
+static const u8 stmpe801_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE801_REG_CHIP_ID,
+ [STMPE_IDX_ICR_LSB] = STMPE801_REG_SYS_CTRL,
+ [STMPE_IDX_GPMR_LSB] = STMPE801_REG_GPIO_MP_STA,
+ [STMPE_IDX_GPSR_LSB] = STMPE801_REG_GPIO_SET_PIN,
+ [STMPE_IDX_GPCR_LSB] = STMPE801_REG_GPIO_SET_PIN,
+ [STMPE_IDX_GPDR_LSB] = STMPE801_REG_GPIO_DIR,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE801_REG_GPIO_INT_EN,
+ [STMPE_IDX_ISGPIOR_MSB] = STMPE801_REG_GPIO_INT_STA,
+
+};
+
+static struct stmpe_variant_block stmpe801_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = 0,
+ .block = STMPE_BLOCK_GPIO,
+ },
+};
+
+static int stmpe801_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ if (blocks & STMPE_BLOCK_GPIO)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static struct stmpe_variant_info stmpe801 = {
+ .name = "stmpe801",
+ .id_val = STMPE801_ID,
+ .id_mask = 0xffff,
+ .num_gpios = 8,
+ .regs = stmpe801_regs,
+ .blocks = stmpe801_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe801_blocks),
+ .num_irqs = STMPE801_NR_INTERNAL_IRQS,
+ .enable = stmpe801_enable,
+};
+
+/*
+ * Touchscreen (STMPE811 or STMPE610)
*/
static struct resource stmpe_ts_resources[] = {
@@ -350,7 +393,7 @@ static struct mfd_cell stmpe_ts_cell = {
};
/*
- * STMPE811
+ * STMPE811 or STMPE610
*/
static const u8 stmpe811_regs[] = {
@@ -421,6 +464,21 @@ static struct stmpe_variant_info stmpe811 = {
.get_altfunc = stmpe811_get_altfunc,
};
+/* Similar to 811, except number of gpios */
+static struct stmpe_variant_info stmpe610 = {
+ .name = "stmpe610",
+ .id_val = 0x0811,
+ .id_mask = 0xffff,
+ .num_gpios = 6,
+ .af_bits = 1,
+ .regs = stmpe811_regs,
+ .blocks = stmpe811_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe811_blocks),
+ .num_irqs = STMPE811_NR_INTERNAL_IRQS,
+ .enable = stmpe811_enable,
+ .get_altfunc = stmpe811_get_altfunc,
+};
+
/*
* STMPE1601
*/
@@ -655,6 +713,8 @@ static struct stmpe_variant_info stmpe2403 = {
};
static struct stmpe_variant_info *stmpe_variant_info[] = {
+ [STMPE610] = &stmpe610,
+ [STMPE801] = &stmpe801,
[STMPE811] = &stmpe811,
[STMPE1601] = &stmpe1601,
[STMPE2401] = &stmpe2401,
@@ -671,6 +731,11 @@ static irqreturn_t stmpe_irq(int irq, void *data)
int ret;
int i;
+ if (variant->id_val == STMPE801_ID) {
+ handle_nested_irq(stmpe->irq_base);
+ return IRQ_HANDLED;
+ }
+
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
@@ -757,14 +822,17 @@ static struct irq_chip stmpe_irq_chip = {
static int __devinit stmpe_irq_init(struct stmpe *stmpe)
{
+ struct irq_chip *chip = NULL;
int num_irqs = stmpe->variant->num_irqs;
int base = stmpe->irq_base;
int irq;
+ if (stmpe->variant->id_val != STMPE801_ID)
+ chip = &stmpe_irq_chip;
+
for (irq = base; irq < base + num_irqs; irq++) {
irq_set_chip_data(irq, stmpe);
- irq_set_chip_and_handler(irq, &stmpe_irq_chip,
- handle_edge_irq);
+ irq_set_chip_and_handler(irq, chip, handle_edge_irq);
irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
@@ -796,7 +864,7 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
unsigned int irq_trigger = stmpe->pdata->irq_trigger;
int autosleep_timeout = stmpe->pdata->autosleep_timeout;
struct stmpe_variant_info *variant = stmpe->variant;
- u8 icr = STMPE_ICR_LSB_GIM;
+ u8 icr;
unsigned int id;
u8 data[2];
int ret;
@@ -819,16 +887,32 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
if (ret)
return ret;
- if (irq_trigger == IRQF_TRIGGER_FALLING ||
- irq_trigger == IRQF_TRIGGER_RISING)
- icr |= STMPE_ICR_LSB_EDGE;
+ if (id == STMPE801_ID)
+ icr = STMPE801_REG_SYS_CTRL_INT_EN;
+ else
+ icr = STMPE_ICR_LSB_GIM;
+
+ /* STMPE801 doesn't support Edge interrupts */
+ if (id != STMPE801_ID) {
+ if (irq_trigger == IRQF_TRIGGER_FALLING ||
+ irq_trigger == IRQF_TRIGGER_RISING)
+ icr |= STMPE_ICR_LSB_EDGE;
+ }
if (irq_trigger == IRQF_TRIGGER_RISING ||
- irq_trigger == IRQF_TRIGGER_HIGH)
- icr |= STMPE_ICR_LSB_HIGH;
+ irq_trigger == IRQF_TRIGGER_HIGH) {
+ if (id == STMPE801_ID)
+ icr |= STMPE801_REG_SYS_CTRL_INT_HI;
+ else
+ icr |= STMPE_ICR_LSB_HIGH;
+ }
- if (stmpe->pdata->irq_invert_polarity)
- icr ^= STMPE_ICR_LSB_HIGH;
+ if (stmpe->pdata->irq_invert_polarity) {
+ if (id == STMPE801_ID)
+ icr ^= STMPE801_REG_SYS_CTRL_INT_HI;
+ else
+ icr ^= STMPE_ICR_LSB_HIGH;
+ }
if (stmpe->pdata->autosleep) {
ret = stmpe_autosleep(stmpe, autosleep_timeout);
@@ -873,32 +957,10 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
-#ifdef CONFIG_PM
-static int stmpe_suspend(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
-
- if (device_may_wakeup(&i2c->dev))
- enable_irq_wake(i2c->irq);
-
- return 0;
-}
-
-static int stmpe_resume(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
-
- if (device_may_wakeup(&i2c->dev))
- disable_irq_wake(i2c->irq);
-
- return 0;
-}
-#endif
-
-static int __devinit stmpe_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+/* Called from client specific probe routines */
+int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
{
- struct stmpe_platform_data *pdata = i2c->dev.platform_data;
+ struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
struct stmpe *stmpe;
int ret;
@@ -912,30 +974,43 @@ static int __devinit stmpe_probe(struct i2c_client *i2c,
mutex_init(&stmpe->irq_lock);
mutex_init(&stmpe->lock);
- stmpe->dev = &i2c->dev;
- stmpe->i2c = i2c;
-
+ stmpe->dev = ci->dev;
+ stmpe->client = ci->client;
stmpe->pdata = pdata;
stmpe->irq_base = pdata->irq_base;
-
- stmpe->partnum = id->driver_data;
- stmpe->variant = stmpe_variant_info[stmpe->partnum];
+ stmpe->ci = ci;
+ stmpe->partnum = partnum;
+ stmpe->variant = stmpe_variant_info[partnum];
stmpe->regs = stmpe->variant->regs;
stmpe->num_gpios = stmpe->variant->num_gpios;
+ dev_set_drvdata(stmpe->dev, stmpe);
- i2c_set_clientdata(i2c, stmpe);
+ if (ci->init)
+ ci->init(stmpe);
+
+ if (pdata->irq_over_gpio) {
+ ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe");
+ if (ret) {
+ dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ stmpe->irq = gpio_to_irq(pdata->irq_gpio);
+ } else {
+ stmpe->irq = ci->irq;
+ }
ret = stmpe_chip_init(stmpe);
if (ret)
- goto out_free;
+ goto free_gpio;
ret = stmpe_irq_init(stmpe);
if (ret)
- goto out_free;
+ goto free_gpio;
- ret = request_threaded_irq(stmpe->i2c->irq, NULL, stmpe_irq,
- pdata->irq_trigger | IRQF_ONESHOT,
- "stmpe", stmpe);
+ ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq,
+ pdata->irq_trigger | IRQF_ONESHOT, "stmpe", stmpe);
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ: %d\n", ret);
goto out_removeirq;
@@ -951,67 +1026,55 @@ static int __devinit stmpe_probe(struct i2c_client *i2c,
out_removedevs:
mfd_remove_devices(stmpe->dev);
- free_irq(stmpe->i2c->irq, stmpe);
+ free_irq(stmpe->irq, stmpe);
out_removeirq:
stmpe_irq_remove(stmpe);
+free_gpio:
+ if (pdata->irq_over_gpio)
+ gpio_free(pdata->irq_gpio);
out_free:
kfree(stmpe);
return ret;
}
-static int __devexit stmpe_remove(struct i2c_client *client)
+int stmpe_remove(struct stmpe *stmpe)
{
- struct stmpe *stmpe = i2c_get_clientdata(client);
-
mfd_remove_devices(stmpe->dev);
- free_irq(stmpe->i2c->irq, stmpe);
+ free_irq(stmpe->irq, stmpe);
stmpe_irq_remove(stmpe);
+ if (stmpe->pdata->irq_over_gpio)
+ gpio_free(stmpe->pdata->irq_gpio);
+
kfree(stmpe);
return 0;
}
-static const struct i2c_device_id stmpe_id[] = {
- { "stmpe811", STMPE811 },
- { "stmpe1601", STMPE1601 },
- { "stmpe2401", STMPE2401 },
- { "stmpe2403", STMPE2403 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, stmpe_id);
-
#ifdef CONFIG_PM
-static const struct dev_pm_ops stmpe_dev_pm_ops = {
- .suspend = stmpe_suspend,
- .resume = stmpe_resume,
-};
-#endif
+static int stmpe_suspend(struct device *dev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(dev);
-static struct i2c_driver stmpe_driver = {
- .driver.name = "stmpe",
- .driver.owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .driver.pm = &stmpe_dev_pm_ops,
-#endif
- .probe = stmpe_probe,
- .remove = __devexit_p(stmpe_remove),
- .id_table = stmpe_id,
-};
+ if (device_may_wakeup(dev))
+ enable_irq_wake(stmpe->irq);
-static int __init stmpe_init(void)
-{
- return i2c_add_driver(&stmpe_driver);
+ return 0;
}
-subsys_initcall(stmpe_init);
-static void __exit stmpe_exit(void)
+static int stmpe_resume(struct device *dev)
{
- i2c_del_driver(&stmpe_driver);
+ struct stmpe *stmpe = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(stmpe->irq);
+
+ return 0;
}
-module_exit(stmpe_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("STMPE MFD core driver");
-MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+const struct dev_pm_ops stmpe_dev_pm_ops = {
+ .suspend = stmpe_suspend,
+ .resume = stmpe_resume,
+};
+#endif
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index e4ee38956583..7b8e13f5b764 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -8,6 +8,14 @@
#ifndef __STMPE_H
#define __STMPE_H
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stmpe.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+
+extern const struct dev_pm_ops stmpe_dev_pm_ops;
+
#ifdef STMPE_DUMP_BYTES
static inline void stmpe_dump_bytes(const char *str, const void *buf,
size_t len)
@@ -67,11 +75,55 @@ struct stmpe_variant_info {
int (*enable_autosleep)(struct stmpe *stmpe, int autosleep_timeout);
};
+/**
+ * struct stmpe_client_info - i2c or spi specific routines/info
+ * @data: client specific data
+ * @read_byte: read single byte
+ * @write_byte: write single byte
+ * @read_block: read block or multiple bytes
+ * @write_block: write block or multiple bytes
+ * @init: client init routine, called during probe
+ */
+struct stmpe_client_info {
+ void *data;
+ int irq;
+ void *client;
+ struct device *dev;
+ int (*read_byte)(struct stmpe *stmpe, u8 reg);
+ int (*write_byte)(struct stmpe *stmpe, u8 reg, u8 val);
+ int (*read_block)(struct stmpe *stmpe, u8 reg, u8 len, u8 *values);
+ int (*write_block)(struct stmpe *stmpe, u8 reg, u8 len,
+ const u8 *values);
+ void (*init)(struct stmpe *stmpe);
+};
+
+int stmpe_probe(struct stmpe_client_info *ci, int partnum);
+int stmpe_remove(struct stmpe *stmpe);
+
#define STMPE_ICR_LSB_HIGH (1 << 2)
#define STMPE_ICR_LSB_EDGE (1 << 1)
#define STMPE_ICR_LSB_GIM (1 << 0)
/*
+ * STMPE801
+ */
+#define STMPE801_ID 0x0108
+#define STMPE801_NR_INTERNAL_IRQS 1
+
+#define STMPE801_REG_CHIP_ID 0x00
+#define STMPE801_REG_VERSION_ID 0x02
+#define STMPE801_REG_SYS_CTRL 0x04
+#define STMPE801_REG_GPIO_INT_EN 0x08
+#define STMPE801_REG_GPIO_INT_STA 0x09
+#define STMPE801_REG_GPIO_MP_STA 0x10
+#define STMPE801_REG_GPIO_SET_PIN 0x11
+#define STMPE801_REG_GPIO_DIR 0x12
+
+#define STMPE801_REG_SYS_CTRL_RESET (1 << 7)
+#define STMPE801_REG_SYS_CTRL_INT_EN (1 << 2)
+#define STMPE801_REG_SYS_CTRL_INT_HI (1 << 0)
+
+/*
* STMPE811
*/
@@ -87,6 +139,7 @@ struct stmpe_variant_info {
#define STMPE811_REG_CHIP_ID 0x00
#define STMPE811_REG_SYS_CTRL2 0x04
+#define STMPE811_REG_SPI_CFG 0x08
#define STMPE811_REG_INT_CTRL 0x09
#define STMPE811_REG_INT_EN 0x0A
#define STMPE811_REG_INT_STA 0x0B
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 91ad21ef7721..2d9e8799e733 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -442,21 +442,7 @@ static struct platform_driver t7l66xb_platform_driver = {
/*--------------------------------------------------------------------------*/
-static int __init t7l66xb_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_register(&t7l66xb_platform_driver);
- return retval;
-}
-
-static void __exit t7l66xb_exit(void)
-{
- platform_driver_unregister(&t7l66xb_platform_driver);
-}
-
-module_init(t7l66xb_init);
-module_exit(t7l66xb_exit);
+module_platform_driver(t7l66xb_platform_driver);
MODULE_DESCRIPTION("Toshiba T7L66XB core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 71bc835324d8..d20a284ad4ba 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -234,19 +234,7 @@ static struct platform_driver tc6387xb_platform_driver = {
.resume = tc6387xb_resume,
};
-
-static int __init tc6387xb_init(void)
-{
- return platform_driver_register(&tc6387xb_platform_driver);
-}
-
-static void __exit tc6387xb_exit(void)
-{
- platform_driver_unregister(&tc6387xb_platform_driver);
-}
-
-module_init(tc6387xb_init);
-module_exit(tc6387xb_exit);
+module_platform_driver(tc6387xb_platform_driver);
MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index af9ab0e5ca64..4fb0e6c8e8fe 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -458,17 +458,7 @@ static struct platform_driver ti_ssp_driver = {
}
};
-static int __init ti_ssp_init(void)
-{
- return platform_driver_register(&ti_ssp_driver);
-}
-module_init(ti_ssp_init);
-
-static void __exit ti_ssp_exit(void)
-{
- platform_driver_unregister(&ti_ssp_driver);
-}
-module_exit(ti_ssp_exit);
+module_platform_driver(ti_ssp_driver);
MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver");
MODULE_AUTHOR("Cyril Chemparathy");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 02d65692ceb4..0ba26fb12cf5 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -857,7 +857,7 @@ static void __devexit timb_remove(struct pci_dev *dev)
kfree(priv);
}
-static struct pci_device_id timberdale_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(timberdale_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
{ 0 }
};
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d96c240..a5ddf31b60ca 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
}
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index a56be931551c..95c0d7978bec 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -215,6 +215,7 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
int tps65910_irq_exit(struct tps65910 *tps65910)
{
- free_irq(tps65910->chip_irq, tps65910);
+ if (tps65910->chip_irq)
+ free_irq(tps65910->chip_irq, tps65910);
return 0;
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6f5b8cf2f652..01cf5012a08f 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
goto out;
}
- data &= mask;
+ data &= ~mask;
err = tps65910_i2c_write(tps65910, reg, 1, &data);
if (err)
dev_err(tps65910->dev, "write to reg %x failed\n", reg);
@@ -172,15 +172,12 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
- ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
- if (ret < 0)
- goto err;
+ tps65910_irq_init(tps65910, init_data->irq, init_data);
kfree(init_data);
return ret;
err:
- mfd_remove_devices(tps65910->dev);
kfree(tps65910);
kfree(init_data);
return ret;
@@ -190,8 +187,8 @@ static int tps65910_i2c_remove(struct i2c_client *i2c)
{
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
- mfd_remove_devices(tps65910->dev);
tps65910_irq_exit(tps65910);
+ mfd_remove_devices(tps65910->dev);
kfree(tps65910);
return 0;
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 6d71e0d25744..27d3302d56b8 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -111,7 +111,6 @@ static int __devexit tps65912_spi_remove(struct spi_device *spi)
static struct spi_driver tps65912_spi_driver = {
.driver = {
.name = "tps65912",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = tps65912_spi_probe,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bfbd66021afd..e04e04ddc15e 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -34,6 +34,11 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
#include <linux/regulator/machine.h>
@@ -144,6 +149,9 @@
#define TWL_MODULE_LAST TWL4030_MODULE_LAST
+#define TWL4030_NR_IRQS 8
+#define TWL6030_NR_IRQS 20
+
/* Base Address defns for twl4030_map[] */
/* subchip/slave 0 - USB ID */
@@ -255,6 +263,7 @@ struct twl_client {
static struct twl_client twl_modules[TWL_NUM_SLAVES];
+static struct irq_domain domain;
/* mapping the module id to slave id and base address */
struct twl_mapping {
@@ -363,13 +372,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/*
* [MSG1]: fill the register address data
@@ -420,13 +429,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/* [MSG1] fill the register address data */
msg = &twl->xfer_msg[0];
@@ -1183,14 +1192,48 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i;
struct twl4030_platform_data *pdata = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
u8 temp;
int ret = 0;
+ int nr_irqs = TWL4030_NR_IRQS;
+
+ if ((id->driver_data) & TWL6030_CLASS)
+ nr_irqs = TWL6030_NR_IRQS;
+
+ if (node && !pdata) {
+ /*
+ * XXX: Temporary pdata until the information is correctly
+ * retrieved by every TWL modules from DT.
+ */
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct twl4030_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ }
if (!pdata) {
dev_dbg(&client->dev, "no platform data?\n");
return -EINVAL;
}
+ status = irq_alloc_descs(-1, pdata->irq_base, nr_irqs, 0);
+ if (IS_ERR_VALUE(status)) {
+ dev_err(&client->dev, "Fail to allocate IRQ descs\n");
+ return status;
+ }
+
+ pdata->irq_base = status;
+ pdata->irq_end = pdata->irq_base + nr_irqs;
+
+ domain.irq_base = pdata->irq_base;
+ domain.nr_irq = nr_irqs;
+#ifdef CONFIG_OF_IRQ
+ domain.of_node = of_node_get(node);
+ domain.ops = &irq_domain_simple_ops;
+#endif
+ irq_domain_add(&domain);
+
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
dev_dbg(&client->dev, "can't talk I2C?\n");
return -EIO;
@@ -1270,7 +1313,13 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
}
- status = add_children(pdata, id->driver_data);
+#ifdef CONFIG_OF_DEVICE
+ if (node)
+ status = of_platform_populate(node, NULL, NULL, &client->dev);
+ else
+#endif
+ status = add_children(pdata, id->driver_data);
+
fail:
if (status < 0)
twl_remove(client);
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index ae51ab5d0e5d..838ce4eb444e 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -261,17 +261,7 @@ static struct platform_driver twl4030_audio_driver = {
},
};
-static int __devinit twl4030_audio_init(void)
-{
- return platform_driver_register(&twl4030_audio_driver);
-}
-module_init(twl4030_audio_init);
-
-static void __devexit twl4030_audio_exit(void)
-{
- platform_driver_unregister(&twl4030_audio_driver);
-}
-module_exit(twl4030_audio_exit);
+module_platform_driver(twl4030_audio_driver);
MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index f062c8cc6c38..b69bb517b102 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -432,6 +432,7 @@ struct sih_agent {
u32 edge_change;
struct mutex irq_lock;
+ char *irq_name;
};
/*----------------------------------------------------------------------*/
@@ -491,7 +492,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
u8 bytes[4];
} imr;
- /* byte[0] gets overwriten as we write ... */
+ /* byte[0] gets overwritten as we write ... */
imr.word = cpu_to_le32(agent->imr << 8);
agent->imr_change_pending = false;
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
- return;
+ return IRQ_HANDLED;
}
while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
+ return IRQ_HANDLED;
}
static unsigned twl4030_irq_next;
@@ -665,21 +667,23 @@ int twl4030_sih_setup(int module)
irq_set_chip_data(irq, agent);
irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
activate_irq(irq);
}
- status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
- irq_set_chained_handler(irq, handle_twl4030_sih);
+ agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+ status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+ agent->irq_name ?: sih->name, NULL);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
- return status;
+ return status < 0 ? status : irq_base;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +737,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
- status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
- "TWL4030-PIH", NULL);
+ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+ IRQF_ONESHOT,
+ "TWL4030-PIH", NULL);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 834f824d3c11..456ecb5ac4fe 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -807,19 +807,7 @@ static struct platform_driver twl4030_madc_driver = {
},
};
-static int __init twl4030_madc_init(void)
-{
- return platform_driver_register(&twl4030_madc_driver);
-}
-
-module_init(twl4030_madc_init);
-
-static void __exit twl4030_madc_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_driver);
-}
-
-module_exit(twl4030_madc_exit);
+module_platform_driver(twl4030_madc_driver);
MODULE_DESCRIPTION("TWL4030 ADC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index a764676f0922..d905f5171153 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -34,7 +34,8 @@
static u8 twl4030_start_script_address = 0x2b;
#define PWR_P1_SW_EVENTS 0x10
-#define PWR_DEVOFF (1<<0)
+#define PWR_DEVOFF (1 << 0)
+#define SEQ_OFFSYNC (1 << 0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
#define PHY_TO_OFF_PM_RECEIVER(p) (p - 0x5b)
@@ -511,12 +512,27 @@ int twl4030_remove_script(u8 flags)
return err;
}
+/*
+ * In master mode, start the power off sequence.
+ * After a successful execution, TWL shuts down the power to the SoC
+ * and all peripherals connected to it.
+ */
+void twl4030_power_off(void)
+{
+ int err;
+
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+ TWL4030_PM_MASTER_P1_SW_EVENTS);
+ if (err)
+ pr_err("TWL4030 Unable to power off\n");
+}
+
void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
{
int err = 0;
int i;
struct twl4030_resconfig *resconfig;
- u8 address = twl4030_start_script_address;
+ u8 val, address = twl4030_start_script_address;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG1,
@@ -548,6 +564,28 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
}
+ /* Board has to be wired properly to use this feature */
+ if (twl4030_scripts->use_poweroff && !pm_power_off) {
+ /* Default for SEQ_OFFSYNC is set, lets ensure this */
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+ TWL4030_PM_MASTER_CFG_P123_TRANSITION);
+ if (err) {
+ pr_warning("TWL4030 Unable to read registers\n");
+
+ } else if (!(val & SEQ_OFFSYNC)) {
+ val |= SEQ_OFFSYNC;
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+ TWL4030_PM_MASTER_CFG_P123_TRANSITION);
+ if (err) {
+ pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
+ goto relock;
+ }
+ }
+
+ pm_power_off = twl4030_power_off;
+ }
+
+relock:
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45ffb096..c6b456ad7342 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
static const unsigned max_i2c_errors = 100;
int ret;
- current->flags |= PF_NOFREEZE;
-
while (!kthread_should_stop()) {
int i;
union {
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 268f80fd0439..dda86293dc9f 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -509,13 +509,10 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
- ret = gpio_request(twl6040->audpwron, "audpwron");
+ ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
+ "audpwron");
if (ret)
goto gpio1_err;
-
- ret = gpio_direction_output(twl6040->audpwron, 0);
- if (ret)
- goto gpio2_err;
}
/* codec interrupt */
@@ -619,18 +616,7 @@ static struct platform_driver twl6040_driver = {
},
};
-static int __devinit twl6040_init(void)
-{
- return platform_driver_register(&twl6040_driver);
-}
-module_init(twl6040_init);
-
-static void __devexit twl6040_exit(void)
-{
- platform_driver_unregister(&twl6040_driver);
-}
-
-module_exit(twl6040_exit);
+module_platform_driver(twl6040_driver);
MODULE_DESCRIPTION("TWL6040 MFD");
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index b281217334eb..91c4f25e0e55 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -36,6 +36,15 @@ static DEFINE_MUTEX(ucb1x00_mutex);
static LIST_HEAD(ucb1x00_drivers);
static LIST_HEAD(ucb1x00_devices);
+static struct mcp_device_id ucb1x00_id[] = {
+ { "ucb1x00", 0 }, /* auto-detection */
+ { "ucb1200", UCB_ID_1200 },
+ { "ucb1300", UCB_ID_1300 },
+ { "tc35143", UCB_ID_TC35143 },
+ { }
+};
+MODULE_DEVICE_TABLE(mcp, ucb1x00_id);
+
/**
* ucb1x00_io_set_dir - set IO direction
* @ucb: UCB1x00 structure describing chip
@@ -527,17 +536,33 @@ static struct class ucb1x00_class = {
static int ucb1x00_probe(struct mcp *mcp)
{
+ const struct mcp_device_id *mid;
struct ucb1x00 *ucb;
struct ucb1x00_driver *drv;
+ struct ucb1x00_plat_data *pdata;
unsigned int id;
int ret = -ENODEV;
int temp;
mcp_enable(mcp);
id = mcp_reg_read(mcp, UCB_ID);
+ mid = mcp_get_device_id(mcp);
- if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
- printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
+ if (mid && mid->driver_data) {
+ if (id != mid->driver_data) {
+ printk(KERN_WARNING "%s wrong ID %04x found: %04x\n",
+ mid->name, (unsigned int) mid->driver_data, id);
+ goto err_disable;
+ }
+ } else {
+ mid = &ucb1x00_id[1];
+ while (mid->driver_data) {
+ if (id == mid->driver_data)
+ break;
+ mid++;
+ }
+ printk(KERN_WARNING "%s ID not found: %04x\n",
+ ucb1x00_id[0].name, id);
goto err_disable;
}
@@ -546,28 +571,28 @@ static int ucb1x00_probe(struct mcp *mcp)
if (!ucb)
goto err_disable;
-
+ pdata = mcp->attached_device.platform_data;
ucb->dev.class = &ucb1x00_class;
ucb->dev.parent = &mcp->attached_device;
- dev_set_name(&ucb->dev, "ucb1x00");
+ dev_set_name(&ucb->dev, mid->name);
spin_lock_init(&ucb->lock);
spin_lock_init(&ucb->io_lock);
sema_init(&ucb->adc_sem, 1);
- ucb->id = id;
+ ucb->id = mid;
ucb->mcp = mcp;
ucb->irq = ucb1x00_detect_irq(ucb);
if (ucb->irq == NO_IRQ) {
- printk(KERN_ERR "UCB1x00: IRQ probe failed\n");
+ printk(KERN_ERR "%s: IRQ probe failed\n", mid->name);
ret = -ENODEV;
goto err_free;
}
ucb->gpio.base = -1;
- if (mcp->gpio_base != 0) {
+ if (pdata && (pdata->gpio_base >= 0)) {
ucb->gpio.label = dev_name(&ucb->dev);
- ucb->gpio.base = mcp->gpio_base;
+ ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
ucb->gpio.set = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
@@ -580,10 +605,10 @@ static int ucb1x00_probe(struct mcp *mcp)
dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
- "UCB1x00", ucb);
+ mid->name, ucb);
if (ret) {
- printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
- ucb->irq, ret);
+ printk(KERN_ERR "%s: unable to grab irq%d: %d\n",
+ mid->name, ucb->irq, ret);
goto err_gpio;
}
@@ -705,6 +730,7 @@ static struct mcp_driver ucb1x00_driver = {
.remove = ucb1x00_remove,
.suspend = ucb1x00_suspend,
.resume = ucb1x00_resume,
+ .id_table = ucb1x00_id,
};
static int __init ucb1x00_init(void)
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38ffbd50a0d2..40ec3c118868 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -382,7 +382,7 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
idev->name = "Touchscreen panel";
- idev->id.product = ts->ucb->id;
+ idev->id.product = ts->ucb->id->driver_data;
idev->open = ucb1x00_ts_open;
idev->close = ucb1x00_ts_close;
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index d698703dbd46..b73cc15e0081 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -118,7 +118,7 @@ static void __devexit vx855_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id vx855_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vx855_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
{ 0, }
};
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 0a2b8d41a702..f5e54fae8ada 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -559,6 +559,8 @@ static int wm831x_write(struct wm831x *wm831x, unsigned short reg,
dev_vdbg(wm831x->dev, "Write %04x to R%d(0x%x)\n",
buf[i], reg + i, reg + i);
ret = regmap_write(wm831x->regmap, reg + i, buf[i]);
+ if (ret != 0)
+ return ret;
}
return 0;
@@ -1875,7 +1877,6 @@ err_irq:
err_regmap:
mfd_remove_devices(wm831x->dev);
regmap_exit(wm831x->regmap);
- kfree(wm831x);
return ret;
}
@@ -1887,7 +1888,6 @@ void wm831x_device_exit(struct wm831x *wm831x)
free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
wm831x_irq_exit(wm831x);
regmap_exit(wm831x->regmap);
- kfree(wm831x);
}
int wm831x_device_suspend(struct wm831x *wm831x)
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index ac8da1d439da..cb15609b0a48 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -30,7 +30,7 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
struct wm831x *wm831x;
int ret;
- wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
+ wm831x = devm_kzalloc(&i2c->dev, sizeof(struct wm831x), GFP_KERNEL);
if (wm831x == NULL)
return -ENOMEM;
@@ -42,7 +42,6 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(wm831x->regmap);
dev_err(wm831x->dev, "Failed to allocate register map: %d\n",
ret);
- kfree(wm831x);
return ret;
}
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index f4747a4a9a93..bec4d0539160 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -325,11 +325,6 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
return WM831X_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
}
-static inline int irq_data_to_mask_reg(struct wm831x_irq_data *irq_data)
-{
- return WM831X_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
-}
-
static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
int irq)
{
@@ -477,8 +472,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
if (primary & WM831X_TCHDATA_INT)
handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
- if (primary & (WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT))
- goto out;
+ primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 8d6a9a969dbc..62ef3254105f 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -30,7 +30,7 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
type = (enum wm831x_parent)id->driver_data;
- wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
+ wm831x = devm_kzalloc(&spi->dev, sizeof(struct wm831x), GFP_KERNEL);
if (wm831x == NULL)
return -ENOMEM;
@@ -45,7 +45,6 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
ret = PTR_ERR(wm831x->regmap);
dev_err(wm831x->dev, "Failed to allocate register map: %d\n",
ret);
- kfree(wm831x);
return ret;
}
@@ -95,7 +94,6 @@ MODULE_DEVICE_TABLE(spi, wm831x_spi_id);
static struct spi_driver wm831x_spi_driver = {
.driver = {
.name = "wm831x",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &wm831x_spi_pm,
},
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index e81cc31e4202..dd1caaac55e4 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -573,6 +573,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
u16 id1, id2, mask_rev;
u16 cust_id, mode, chip_rev;
+ dev_set_drvdata(wm8350->dev, wm8350);
+
/* get WM8350 revision and config mode */
ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
if (ret != 0) {
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 5fe5de166adb..d955faaf27c4 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -63,7 +63,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
struct wm8350 *wm8350;
int ret = 0;
- wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL);
+ wm8350 = devm_kzalloc(&i2c->dev, sizeof(struct wm8350), GFP_KERNEL);
if (wm8350 == NULL)
return -ENOMEM;
@@ -80,7 +80,6 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
return ret;
err:
- kfree(wm8350);
return ret;
}
@@ -89,7 +88,6 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
wm8350_device_exit(wm8350);
- kfree(wm8350);
return 0;
}
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 62b4626f4561..2204893444a6 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -344,7 +344,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
struct wm8400 *wm8400;
int ret;
- wm8400 = kzalloc(sizeof(struct wm8400), GFP_KERNEL);
+ wm8400 = devm_kzalloc(&i2c->dev, sizeof(struct wm8400), GFP_KERNEL);
if (wm8400 == NULL) {
ret = -ENOMEM;
goto err;
@@ -353,7 +353,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
wm8400->regmap = regmap_init_i2c(i2c, &wm8400_regmap_config);
if (IS_ERR(wm8400->regmap)) {
ret = PTR_ERR(wm8400->regmap);
- goto struct_err;
+ goto err;
}
wm8400->dev = &i2c->dev;
@@ -367,8 +367,6 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
map_err:
regmap_exit(wm8400->regmap);
-struct_err:
- kfree(wm8400);
err:
return ret;
}
@@ -379,7 +377,6 @@ static int wm8400_i2c_remove(struct i2c_client *i2c)
wm8400_release(wm8400);
regmap_exit(wm8400->regmap);
- kfree(wm8400);
return 0;
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d6ba132837e..f117e7fb9321 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -28,11 +28,7 @@
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/registers.h>
-static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *dest)
-{
- return regmap_raw_read(wm8994->regmap, reg, dest, bytes);
-}
+#include "wm8994.h"
/**
* wm8994_reg_read: Read a single WM8994 register.
@@ -68,12 +64,6 @@ int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
return regmap_bulk_read(wm8994->regmap, reg, buf, count);
}
-static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
- int bytes, const void *src)
-{
- return regmap_raw_write(wm8994->regmap, reg, src, bytes);
-}
-
/**
* wm8994_reg_write: Write a single WM8994 register.
*
@@ -239,6 +229,7 @@ static int wm8994_suspend(struct device *dev)
switch (wm8994->type) {
case WM8958:
+ case WM1811:
ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
if (ret < 0) {
dev_err(dev, "Failed to read power status: %d\n", ret);
@@ -251,6 +242,20 @@ static int wm8994_suspend(struct device *dev)
break;
}
+ switch (wm8994->type) {
+ case WM1811:
+ ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read jackdet: %d\n", ret);
+ } else if (ret & WM1811_JACKDET_MODE_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
/* Disable LDO pulldowns while the device is suspended if we
* don't know that something will be driving them. */
if (!wm8994->ldo_ena_always_driven)
@@ -258,25 +263,14 @@ static int wm8994_suspend(struct device *dev)
WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD);
- /* GPIO configuration state is saved here since we may be configuring
- * the GPIO alternate functions even if we're not using the gpiolib
- * driver for them.
- */
- ret = wm8994_read(wm8994, WM8994_GPIO_1, WM8994_NUM_GPIO_REGS * 2,
- &wm8994->gpio_regs);
- if (ret < 0)
- dev_err(dev, "Failed to save GPIO registers: %d\n", ret);
-
- /* For similar reasons we also stash the regulator states */
- ret = wm8994_read(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
- &wm8994->ldo_regs);
- if (ret < 0)
- dev_err(dev, "Failed to save LDO registers: %d\n", ret);
-
/* Explicitly put the device into reset in case regulators
* don't get disabled in order to ensure consistent restart.
*/
- wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET, 0x8994);
+ wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET,
+ wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET));
+
+ regcache_cache_only(wm8994->regmap, true);
+ regcache_mark_dirty(wm8994->regmap);
wm8994->suspended = true;
@@ -293,7 +287,7 @@ static int wm8994_suspend(struct device *dev)
static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
- int ret, i;
+ int ret;
/* We may have lied to the PM core about suspending */
if (!wm8994->suspended)
@@ -306,27 +300,13 @@ static int wm8994_resume(struct device *dev)
return ret;
}
- /* Write register at a time as we use the cache on the CPU so store
- * it in native endian.
- */
- for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
- ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
- + i, wm8994->irq_masks_cur[i]);
- if (ret < 0)
- dev_err(dev, "Failed to restore interrupt masks: %d\n",
- ret);
+ regcache_cache_only(wm8994->regmap, false);
+ ret = regcache_sync(wm8994->regmap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to restore register map: %d\n", ret);
+ goto err_enable;
}
- ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
- &wm8994->ldo_regs);
- if (ret < 0)
- dev_err(dev, "Failed to restore LDO registers: %d\n", ret);
-
- ret = wm8994_write(wm8994, WM8994_GPIO_1, WM8994_NUM_GPIO_REGS * 2,
- &wm8994->gpio_regs);
- if (ret < 0)
- dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
-
/* Disable LDO pulldowns while the device is active */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
@@ -335,6 +315,11 @@ static int wm8994_resume(struct device *dev)
wm8994->suspended = false;
return 0;
+
+err_enable:
+ regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
+
+ return ret;
}
#endif
@@ -360,19 +345,16 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
}
#endif
-static struct regmap_config wm8994_regmap_config = {
- .reg_bits = 16,
- .val_bits = 16,
-};
-
/*
* Instantiate the generic non-control parts of the device.
*/
static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ struct regmap_config *regmap_config;
const char *devname;
int ret, i;
+ int pulls = 0;
dev_set_drvdata(wm8994->dev, wm8994);
@@ -401,9 +383,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_regmap;
}
- wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
- wm8994->num_supplies,
- GFP_KERNEL);
+ wm8994->supplies = devm_kzalloc(wm8994->dev,
+ sizeof(struct regulator_bulk_data) *
+ wm8994->num_supplies, GFP_KERNEL);
if (!wm8994->supplies) {
ret = -ENOMEM;
goto err_regmap;
@@ -431,7 +413,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
- goto err_supplies;
+ goto err_regmap;
}
ret = regulator_bulk_enable(wm8994->num_supplies,
@@ -481,25 +463,54 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret);
goto err_enable;
}
+ wm8994->revision = ret;
switch (wm8994->type) {
case WM8994:
- switch (ret) {
+ switch (wm8994->revision) {
case 0:
case 1:
dev_warn(wm8994->dev,
"revision %c not fully supported\n",
- 'A' + ret);
+ 'A' + wm8994->revision);
break;
default:
break;
}
break;
+ case WM1811:
+ /* Revision C did not change the relevant layer */
+ if (wm8994->revision > 1)
+ wm8994->revision++;
+ break;
default:
break;
}
- dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
+ dev_info(wm8994->dev, "%s revision %c\n", devname,
+ 'A' + wm8994->revision);
+
+ switch (wm8994->type) {
+ case WM1811:
+ regmap_config = &wm1811_regmap_config;
+ break;
+ case WM8994:
+ regmap_config = &wm8994_regmap_config;
+ break;
+ case WM8958:
+ regmap_config = &wm8958_regmap_config;
+ break;
+ default:
+ dev_err(wm8994->dev, "Unknown device type %d\n", wm8994->type);
+ return -EINVAL;
+ }
+
+ ret = regmap_reinit_cache(wm8994->regmap, regmap_config);
+ if (ret != 0) {
+ dev_err(wm8994->dev, "Failed to reinit register cache: %d\n",
+ ret);
+ return ret;
+ }
if (pdata) {
wm8994->irq_base = pdata->irq_base;
@@ -515,12 +526,16 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
+
+ if (pdata->spkmode_pu)
+ pulls |= WM8994_SPKMODE_PU;
}
- /* Disable LDO pulldowns while the device is active */
+ /* Disable unneeded pulls */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
- WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
- 0);
+ WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD |
+ WM8994_SPKMODE_PU | WM8994_CSNADDR_PD,
+ pulls);
/* In some system designs where the regulators are not in use,
* we can achieve a small reduction in leakage currents by
@@ -559,12 +574,9 @@ err_enable:
wm8994->supplies);
err_get:
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
-err_supplies:
- kfree(wm8994->supplies);
err_regmap:
regmap_exit(wm8994->regmap);
mfd_remove_devices(wm8994->dev);
- kfree(wm8994);
return ret;
}
@@ -576,18 +588,24 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
- kfree(wm8994->supplies);
regmap_exit(wm8994->regmap);
- kfree(wm8994);
}
+static const struct of_device_id wm8994_of_match[] = {
+ { .compatible = "wlf,wm1811", },
+ { .compatible = "wlf,wm8994", },
+ { .compatible = "wlf,wm8958", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wm8994_of_match);
+
static int wm8994_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8994 *wm8994;
int ret;
- wm8994 = kzalloc(sizeof(struct wm8994), GFP_KERNEL);
+ wm8994 = devm_kzalloc(&i2c->dev, sizeof(struct wm8994), GFP_KERNEL);
if (wm8994 == NULL)
return -ENOMEM;
@@ -596,12 +614,11 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
wm8994->irq = i2c->irq;
wm8994->type = id->driver_data;
- wm8994->regmap = regmap_init_i2c(i2c, &wm8994_regmap_config);
+ wm8994->regmap = regmap_init_i2c(i2c, &wm8994_base_regmap_config);
if (IS_ERR(wm8994->regmap)) {
ret = PTR_ERR(wm8994->regmap);
dev_err(wm8994->dev, "Failed to allocate register map: %d\n",
ret);
- kfree(wm8994);
return ret;
}
@@ -619,6 +636,7 @@ static int wm8994_i2c_remove(struct i2c_client *i2c)
static const struct i2c_device_id wm8994_i2c_id[] = {
{ "wm1811", WM1811 },
+ { "wm1811a", WM1811 },
{ "wm8994", WM8994 },
{ "wm8958", WM8958 },
{ }
@@ -633,6 +651,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.name = "wm8994",
.owner = THIS_MODULE,
.pm = &wm8994_pm_ops,
+ .of_match_table = wm8994_of_match,
},
.probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index d682f7bd112c..46b20c445ecf 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -18,248 +18,127 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/registers.h>
#include <linux/delay.h>
-struct wm8994_irq_data {
- int reg;
- int mask;
-};
-
-static struct wm8994_irq_data wm8994_irqs[] = {
+static struct regmap_irq wm8994_irqs[] = {
[WM8994_IRQ_TEMP_SHUT] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_TEMP_SHUT_EINT,
},
[WM8994_IRQ_MIC1_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC1_DET_EINT,
},
[WM8994_IRQ_MIC1_SHRT] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC1_SHRT_EINT,
},
[WM8994_IRQ_MIC2_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC2_DET_EINT,
},
[WM8994_IRQ_MIC2_SHRT] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC2_SHRT_EINT,
},
[WM8994_IRQ_FLL1_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_FLL1_LOCK_EINT,
},
[WM8994_IRQ_FLL2_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_FLL2_LOCK_EINT,
},
[WM8994_IRQ_SRC1_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_SRC1_LOCK_EINT,
},
[WM8994_IRQ_SRC2_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_SRC2_LOCK_EINT,
},
[WM8994_IRQ_AIF1DRC1_SIG_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_AIF1DRC1_SIG_DET,
},
[WM8994_IRQ_AIF1DRC2_SIG_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_AIF1DRC2_SIG_DET_EINT,
},
[WM8994_IRQ_AIF2DRC_SIG_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_AIF2DRC_SIG_DET_EINT,
},
[WM8994_IRQ_FIFOS_ERR] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_FIFOS_ERR_EINT,
},
[WM8994_IRQ_WSEQ_DONE] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_WSEQ_DONE_EINT,
},
[WM8994_IRQ_DCS_DONE] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_DCS_DONE_EINT,
},
[WM8994_IRQ_TEMP_WARN] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_TEMP_WARN_EINT,
},
[WM8994_IRQ_GPIO(1)] = {
- .reg = 1,
.mask = WM8994_GP1_EINT,
},
[WM8994_IRQ_GPIO(2)] = {
- .reg = 1,
.mask = WM8994_GP2_EINT,
},
[WM8994_IRQ_GPIO(3)] = {
- .reg = 1,
.mask = WM8994_GP3_EINT,
},
[WM8994_IRQ_GPIO(4)] = {
- .reg = 1,
.mask = WM8994_GP4_EINT,
},
[WM8994_IRQ_GPIO(5)] = {
- .reg = 1,
.mask = WM8994_GP5_EINT,
},
[WM8994_IRQ_GPIO(6)] = {
- .reg = 1,
.mask = WM8994_GP6_EINT,
},
[WM8994_IRQ_GPIO(7)] = {
- .reg = 1,
.mask = WM8994_GP7_EINT,
},
[WM8994_IRQ_GPIO(8)] = {
- .reg = 1,
.mask = WM8994_GP8_EINT,
},
[WM8994_IRQ_GPIO(9)] = {
- .reg = 1,
.mask = WM8994_GP8_EINT,
},
[WM8994_IRQ_GPIO(10)] = {
- .reg = 1,
.mask = WM8994_GP10_EINT,
},
[WM8994_IRQ_GPIO(11)] = {
- .reg = 1,
.mask = WM8994_GP11_EINT,
},
};
-static inline int irq_data_to_status_reg(struct wm8994_irq_data *irq_data)
-{
- return WM8994_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
-}
-
-static inline int irq_data_to_mask_reg(struct wm8994_irq_data *irq_data)
-{
- return WM8994_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
-}
-
-static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
- int irq)
-{
- return &wm8994_irqs[irq - wm8994->irq_base];
-}
-
-static void wm8994_irq_lock(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&wm8994->irq_lock);
-}
-
-static void wm8994_irq_sync_unlock(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
- /* If there's been a change in the mask write it back
- * to the hardware. */
- if (wm8994->irq_masks_cur[i] != wm8994->irq_masks_cache[i]) {
- wm8994->irq_masks_cache[i] = wm8994->irq_masks_cur[i];
- wm8994_reg_write(wm8994,
- WM8994_INTERRUPT_STATUS_1_MASK + i,
- wm8994->irq_masks_cur[i]);
- }
- }
-
- mutex_unlock(&wm8994->irq_lock);
-}
-
-static void wm8994_irq_enable(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
- struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
- data->irq);
-
- wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
-}
-
-static void wm8994_irq_disable(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
- struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
- data->irq);
-
- wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
-}
+static struct regmap_irq_chip wm8994_irq_chip = {
+ .name = "wm8994",
+ .irqs = wm8994_irqs,
+ .num_irqs = ARRAY_SIZE(wm8994_irqs),
-static struct irq_chip wm8994_irq_chip = {
- .name = "wm8994",
- .irq_bus_lock = wm8994_irq_lock,
- .irq_bus_sync_unlock = wm8994_irq_sync_unlock,
- .irq_disable = wm8994_irq_disable,
- .irq_enable = wm8994_irq_enable,
+ .num_regs = 2,
+ .status_base = WM8994_INTERRUPT_STATUS_1,
+ .mask_base = WM8994_INTERRUPT_STATUS_1_MASK,
+ .ack_base = WM8994_INTERRUPT_STATUS_1,
};
-/* The processing of the primary interrupt occurs in a thread so that
- * we can interact with the device over I2C or SPI. */
-static irqreturn_t wm8994_irq_thread(int irq, void *data)
-{
- struct wm8994 *wm8994 = data;
- unsigned int i;
- u16 status[WM8994_NUM_IRQ_REGS];
- int ret;
-
- ret = wm8994_bulk_read(wm8994, WM8994_INTERRUPT_STATUS_1,
- WM8994_NUM_IRQ_REGS, status);
- if (ret < 0) {
- dev_err(wm8994->dev, "Failed to read interrupt status: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- /* Bit swap and apply masking */
- for (i = 0; i < WM8994_NUM_IRQ_REGS; i++) {
- status[i] = be16_to_cpu(status[i]);
- status[i] &= ~wm8994->irq_masks_cur[i];
- }
-
- /* Ack any unmasked IRQs */
- for (i = 0; i < ARRAY_SIZE(status); i++) {
- if (status[i])
- wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1 + i,
- status[i]);
- }
-
- /* Report */
- for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
- if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
- handle_nested_irq(wm8994->irq_base + i);
- }
-
- return IRQ_HANDLED;
-}
-
int wm8994_irq_init(struct wm8994 *wm8994)
{
- int i, cur_irq, ret;
-
- mutex_init(&wm8994->irq_lock);
-
- /* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
- wm8994->irq_masks_cur[i] = 0xffff;
- wm8994->irq_masks_cache[i] = 0xffff;
- wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK + i,
- 0xffff);
- }
+ int ret;
if (!wm8994->irq) {
dev_warn(wm8994->dev,
@@ -274,30 +153,12 @@ int wm8994_irq_init(struct wm8994 *wm8994)
return 0;
}
- /* Register them with genirq */
- for (cur_irq = wm8994->irq_base;
- cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, wm8994);
- irq_set_chip_and_handler(cur_irq, &wm8994_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
-
- ret = request_threaded_irq(wm8994->irq, NULL, wm8994_irq_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "wm8994", wm8994);
+ ret = regmap_add_irq_chip(wm8994->regmap, wm8994->irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ wm8994->irq_base, &wm8994_irq_chip,
+ &wm8994->irq_data);
if (ret != 0) {
- dev_err(wm8994->dev, "Failed to request IRQ %d: %d\n",
- wm8994->irq, ret);
+ dev_err(wm8994->dev, "Failed to register IRQ chip: %d\n", ret);
return ret;
}
@@ -309,6 +170,5 @@ int wm8994_irq_init(struct wm8994 *wm8994)
void wm8994_irq_exit(struct wm8994 *wm8994)
{
- if (wm8994->irq)
- free_irq(wm8994->irq, wm8994);
+ regmap_del_irq_chip(wm8994->irq, wm8994->irq_data);
}
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
new file mode 100644
index 000000000000..c598ae69b8ff
--- /dev/null
+++ b/drivers/mfd/wm8994-regmap.c
@@ -0,0 +1,1238 @@
+/*
+ * wm8994-regmap.c -- Register map data for WM8994 series devices
+ *
+ * Copyright 2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/regmap.h>
+
+#include "wm8994.h"
+
+static struct reg_default wm1811_defaults[] = {
+ { 0x0000, 0x1811 }, /* R0 - Software Reset */
+ { 0x0001, 0x0000 }, /* R1 - Power Management (1) */
+ { 0x0002, 0x6000 }, /* R2 - Power Management (2) */
+ { 0x0003, 0x0000 }, /* R3 - Power Management (3) */
+ { 0x0004, 0x0000 }, /* R4 - Power Management (4) */
+ { 0x0005, 0x0000 }, /* R5 - Power Management (5) */
+ { 0x0006, 0x0000 }, /* R6 - Power Management (6) */
+ { 0x0015, 0x0000 }, /* R21 - Input Mixer (1) */
+ { 0x0018, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
+ { 0x0019, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
+ { 0x001A, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
+ { 0x001B, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
+ { 0x001C, 0x006D }, /* R28 - Left Output Volume */
+ { 0x001D, 0x006D }, /* R29 - Right Output Volume */
+ { 0x001E, 0x0066 }, /* R30 - Line Outputs Volume */
+ { 0x001F, 0x0020 }, /* R31 - HPOUT2 Volume */
+ { 0x0020, 0x0079 }, /* R32 - Left OPGA Volume */
+ { 0x0021, 0x0079 }, /* R33 - Right OPGA Volume */
+ { 0x0022, 0x0003 }, /* R34 - SPKMIXL Attenuation */
+ { 0x0023, 0x0003 }, /* R35 - SPKMIXR Attenuation */
+ { 0x0024, 0x0011 }, /* R36 - SPKOUT Mixers */
+ { 0x0025, 0x0140 }, /* R37 - ClassD */
+ { 0x0026, 0x0079 }, /* R38 - Speaker Volume Left */
+ { 0x0027, 0x0079 }, /* R39 - Speaker Volume Right */
+ { 0x0028, 0x0000 }, /* R40 - Input Mixer (2) */
+ { 0x0029, 0x0000 }, /* R41 - Input Mixer (3) */
+ { 0x002A, 0x0000 }, /* R42 - Input Mixer (4) */
+ { 0x002B, 0x0000 }, /* R43 - Input Mixer (5) */
+ { 0x002C, 0x0000 }, /* R44 - Input Mixer (6) */
+ { 0x002D, 0x0000 }, /* R45 - Output Mixer (1) */
+ { 0x002E, 0x0000 }, /* R46 - Output Mixer (2) */
+ { 0x002F, 0x0000 }, /* R47 - Output Mixer (3) */
+ { 0x0030, 0x0000 }, /* R48 - Output Mixer (4) */
+ { 0x0031, 0x0000 }, /* R49 - Output Mixer (5) */
+ { 0x0032, 0x0000 }, /* R50 - Output Mixer (6) */
+ { 0x0033, 0x0000 }, /* R51 - HPOUT2 Mixer */
+ { 0x0034, 0x0000 }, /* R52 - Line Mixer (1) */
+ { 0x0035, 0x0000 }, /* R53 - Line Mixer (2) */
+ { 0x0036, 0x0000 }, /* R54 - Speaker Mixer */
+ { 0x0037, 0x0000 }, /* R55 - Additional Control */
+ { 0x0038, 0x0000 }, /* R56 - AntiPOP (1) */
+ { 0x0039, 0x0180 }, /* R57 - AntiPOP (2) */
+ { 0x003B, 0x000D }, /* R59 - LDO 1 */
+ { 0x003C, 0x0003 }, /* R60 - LDO 2 */
+ { 0x003D, 0x0039 }, /* R61 - MICBIAS1 */
+ { 0x003E, 0x0039 }, /* R62 - MICBIAS2 */
+ { 0x004C, 0x1F25 }, /* R76 - Charge Pump (1) */
+ { 0x004D, 0xAB19 }, /* R77 - Charge Pump (2) */
+ { 0x0051, 0x0004 }, /* R81 - Class W (1) */
+ { 0x0054, 0x0000 }, /* R84 - DC Servo (1) */
+ { 0x0055, 0x054A }, /* R85 - DC Servo (2) */
+ { 0x0058, 0x0000 }, /* R88 - DC Servo Readback */
+ { 0x0059, 0x0000 }, /* R89 - DC Servo (4) */
+ { 0x0060, 0x0000 }, /* R96 - Analogue HP (1) */
+ { 0x00C5, 0x0000 }, /* R197 - Class D Test (5) */
+ { 0x00D0, 0x7600 }, /* R208 - Mic Detect 1 */
+ { 0x00D1, 0x007F }, /* R209 - Mic Detect 2 */
+ { 0x00D2, 0x0000 }, /* R210 - Mic Detect 3 */
+ { 0x0100, 0x0100 }, /* R256 - Chip Revision */
+ { 0x0101, 0x8004 }, /* R257 - Control Interface */
+ { 0x0200, 0x0000 }, /* R512 - AIF1 Clocking (1) */
+ { 0x0201, 0x0000 }, /* R513 - AIF1 Clocking (2) */
+ { 0x0204, 0x0000 }, /* R516 - AIF2 Clocking (1) */
+ { 0x0205, 0x0000 }, /* R517 - AIF2 Clocking (2) */
+ { 0x0208, 0x0000 }, /* R520 - Clocking (1) */
+ { 0x0209, 0x0000 }, /* R521 - Clocking (2) */
+ { 0x0210, 0x0083 }, /* R528 - AIF1 Rate */
+ { 0x0211, 0x0083 }, /* R529 - AIF2 Rate */
+ { 0x0212, 0x0000 }, /* R530 - Rate Status */
+ { 0x0220, 0x0000 }, /* R544 - FLL1 Control (1) */
+ { 0x0221, 0x0000 }, /* R545 - FLL1 Control (2) */
+ { 0x0222, 0x0000 }, /* R546 - FLL1 Control (3) */
+ { 0x0223, 0x0000 }, /* R547 - FLL1 Control (4) */
+ { 0x0224, 0x0C80 }, /* R548 - FLL1 Control (5) */
+ { 0x0226, 0x0000 }, /* R550 - FLL1 EFS 1 */
+ { 0x0227, 0x0006 }, /* R551 - FLL1 EFS 2 */
+ { 0x0240, 0x0000 }, /* R576 - FLL2Control (1) */
+ { 0x0241, 0x0000 }, /* R577 - FLL2Control (2) */
+ { 0x0242, 0x0000 }, /* R578 - FLL2Control (3) */
+ { 0x0243, 0x0000 }, /* R579 - FLL2 Control (4) */
+ { 0x0244, 0x0C80 }, /* R580 - FLL2Control (5) */
+ { 0x0246, 0x0000 }, /* R582 - FLL2 EFS 1 */
+ { 0x0247, 0x0006 }, /* R583 - FLL2 EFS 2 */
+ { 0x0300, 0x4050 }, /* R768 - AIF1 Control (1) */
+ { 0x0301, 0x4000 }, /* R769 - AIF1 Control (2) */
+ { 0x0302, 0x0000 }, /* R770 - AIF1 Master/Slave */
+ { 0x0303, 0x0040 }, /* R771 - AIF1 BCLK */
+ { 0x0304, 0x0040 }, /* R772 - AIF1ADC LRCLK */
+ { 0x0305, 0x0040 }, /* R773 - AIF1DAC LRCLK */
+ { 0x0306, 0x0004 }, /* R774 - AIF1DAC Data */
+ { 0x0307, 0x0100 }, /* R775 - AIF1ADC Data */
+ { 0x0310, 0x4050 }, /* R784 - AIF2 Control (1) */
+ { 0x0311, 0x4000 }, /* R785 - AIF2 Control (2) */
+ { 0x0312, 0x0000 }, /* R786 - AIF2 Master/Slave */
+ { 0x0313, 0x0040 }, /* R787 - AIF2 BCLK */
+ { 0x0314, 0x0040 }, /* R788 - AIF2ADC LRCLK */
+ { 0x0315, 0x0040 }, /* R789 - AIF2DAC LRCLK */
+ { 0x0316, 0x0000 }, /* R790 - AIF2DAC Data */
+ { 0x0317, 0x0000 }, /* R791 - AIF2ADC Data */
+ { 0x0318, 0x0003 }, /* R792 - AIF2TX Control */
+ { 0x0320, 0x0040 }, /* R800 - AIF3 Control (1) */
+ { 0x0321, 0x0000 }, /* R801 - AIF3 Control (2) */
+ { 0x0322, 0x0000 }, /* R802 - AIF3DAC Data */
+ { 0x0323, 0x0000 }, /* R803 - AIF3ADC Data */
+ { 0x0400, 0x00C0 }, /* R1024 - AIF1 ADC1 Left Volume */
+ { 0x0401, 0x00C0 }, /* R1025 - AIF1 ADC1 Right Volume */
+ { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
+ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
+ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
+ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0430, 0x0068 }, /* R1072 - AIF1 DAC1 Noise Gate */
+ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
+ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
+ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
+ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
+ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
+ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
+ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
+ { 0x0483, 0x0400 }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
+ { 0x0484, 0x00D8 }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
+ { 0x0485, 0x1EB5 }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
+ { 0x0486, 0xF145 }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
+ { 0x0487, 0x0B75 }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
+ { 0x0488, 0x01C5 }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
+ { 0x0489, 0x1C58 }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
+ { 0x048A, 0xF373 }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
+ { 0x048B, 0x0A54 }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
+ { 0x048C, 0x0558 }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
+ { 0x048D, 0x168E }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
+ { 0x048E, 0xF829 }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
+ { 0x048F, 0x07AD }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
+ { 0x0490, 0x1103 }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
+ { 0x0491, 0x0564 }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
+ { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
+ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
+ { 0x0494, 0x0000 }, /* R1172 - AIF1 DAC1 EQ Band 1 C */
+ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
+ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
+ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
+ { 0x0503, 0x00C0 }, /* R1283 - AIF2 DAC Right Volume */
+ { 0x0510, 0x0000 }, /* R1296 - AIF2 ADC Filters */
+ { 0x0520, 0x0200 }, /* R1312 - AIF2 DAC Filters (1) */
+ { 0x0521, 0x0010 }, /* R1313 - AIF2 DAC Filters (2) */
+ { 0x0530, 0x0068 }, /* R1328 - AIF2 DAC Noise Gate */
+ { 0x0540, 0x0098 }, /* R1344 - AIF2 DRC (1) */
+ { 0x0541, 0x0845 }, /* R1345 - AIF2 DRC (2) */
+ { 0x0542, 0x0000 }, /* R1346 - AIF2 DRC (3) */
+ { 0x0543, 0x0000 }, /* R1347 - AIF2 DRC (4) */
+ { 0x0544, 0x0000 }, /* R1348 - AIF2 DRC (5) */
+ { 0x0580, 0x6318 }, /* R1408 - AIF2 EQ Gains (1) */
+ { 0x0581, 0x6300 }, /* R1409 - AIF2 EQ Gains (2) */
+ { 0x0582, 0x0FCA }, /* R1410 - AIF2 EQ Band 1 A */
+ { 0x0583, 0x0400 }, /* R1411 - AIF2 EQ Band 1 B */
+ { 0x0584, 0x00D8 }, /* R1412 - AIF2 EQ Band 1 PG */
+ { 0x0585, 0x1EB5 }, /* R1413 - AIF2 EQ Band 2 A */
+ { 0x0586, 0xF145 }, /* R1414 - AIF2 EQ Band 2 B */
+ { 0x0587, 0x0B75 }, /* R1415 - AIF2 EQ Band 2 C */
+ { 0x0588, 0x01C5 }, /* R1416 - AIF2 EQ Band 2 PG */
+ { 0x0589, 0x1C58 }, /* R1417 - AIF2 EQ Band 3 A */
+ { 0x058A, 0xF373 }, /* R1418 - AIF2 EQ Band 3 B */
+ { 0x058B, 0x0A54 }, /* R1419 - AIF2 EQ Band 3 C */
+ { 0x058C, 0x0558 }, /* R1420 - AIF2 EQ Band 3 PG */
+ { 0x058D, 0x168E }, /* R1421 - AIF2 EQ Band 4 A */
+ { 0x058E, 0xF829 }, /* R1422 - AIF2 EQ Band 4 B */
+ { 0x058F, 0x07AD }, /* R1423 - AIF2 EQ Band 4 C */
+ { 0x0590, 0x1103 }, /* R1424 - AIF2 EQ Band 4 PG */
+ { 0x0591, 0x0564 }, /* R1425 - AIF2 EQ Band 5 A */
+ { 0x0592, 0x0559 }, /* R1426 - AIF2 EQ Band 5 B */
+ { 0x0593, 0x4000 }, /* R1427 - AIF2 EQ Band 5 PG */
+ { 0x0594, 0x0000 }, /* R1428 - AIF2 EQ Band 1 C */
+ { 0x0600, 0x0000 }, /* R1536 - DAC1 Mixer Volumes */
+ { 0x0601, 0x0000 }, /* R1537 - DAC1 Left Mixer Routing */
+ { 0x0602, 0x0000 }, /* R1538 - DAC1 Right Mixer Routing */
+ { 0x0603, 0x0000 }, /* R1539 - AIF2ADC Mixer Volumes */
+ { 0x0604, 0x0000 }, /* R1540 - AIF2ADC Left Mixer Routing */
+ { 0x0605, 0x0000 }, /* R1541 - AIF2ADC Right Mixer Routing */
+ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
+ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
+ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
+ { 0x0612, 0x02C0 }, /* R1554 - AIF2TX Left Volume */
+ { 0x0613, 0x02C0 }, /* R1555 - AIF2TX Right Volume */
+ { 0x0614, 0x0000 }, /* R1556 - DAC Softmute */
+ { 0x0620, 0x0002 }, /* R1568 - Oversampling */
+ { 0x0621, 0x0000 }, /* R1569 - Sidetone */
+ { 0x0700, 0x8100 }, /* R1792 - GPIO 1 */
+ { 0x0701, 0xA101 }, /* R1793 - Pull Control (MCLK2) */
+ { 0x0702, 0xA101 }, /* R1794 - Pull Control (BCLK2) */
+ { 0x0703, 0xA101 }, /* R1795 - Pull Control (DACLRCLK2) */
+ { 0x0704, 0xA101 }, /* R1796 - Pull Control (DACDAT2) */
+ { 0x0707, 0xA101 }, /* R1799 - GPIO 8 */
+ { 0x0708, 0xA101 }, /* R1800 - GPIO 9 */
+ { 0x0709, 0xA101 }, /* R1801 - GPIO 10 */
+ { 0x070A, 0xA101 }, /* R1802 - GPIO 11 */
+ { 0x0720, 0x0000 }, /* R1824 - Pull Control (1) */
+ { 0x0721, 0x0156 }, /* R1825 - Pull Control (2) */
+ { 0x0730, 0x0000 }, /* R1840 - Interrupt Status 1 */
+ { 0x0731, 0x0000 }, /* R1841 - Interrupt Status 2 */
+ { 0x0732, 0x0000 }, /* R1842 - Interrupt Raw Status 2 */
+ { 0x0738, 0x07FF }, /* R1848 - Interrupt Status 1 Mask */
+ { 0x0739, 0xDFEF }, /* R1849 - Interrupt Status 2 Mask */
+ { 0x0740, 0x0000 }, /* R1856 - Interrupt Control */
+ { 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
+};
+
+static struct reg_default wm8994_defaults[] = {
+ { 0x0000, 0x8994 }, /* R0 - Software Reset */
+ { 0x0001, 0x0000 }, /* R1 - Power Management (1) */
+ { 0x0002, 0x6000 }, /* R2 - Power Management (2) */
+ { 0x0003, 0x0000 }, /* R3 - Power Management (3) */
+ { 0x0004, 0x0000 }, /* R4 - Power Management (4) */
+ { 0x0005, 0x0000 }, /* R5 - Power Management (5) */
+ { 0x0006, 0x0000 }, /* R6 - Power Management (6) */
+ { 0x0015, 0x0000 }, /* R21 - Input Mixer (1) */
+ { 0x0018, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
+ { 0x0019, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
+ { 0x001A, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
+ { 0x001B, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
+ { 0x001C, 0x006D }, /* R28 - Left Output Volume */
+ { 0x001D, 0x006D }, /* R29 - Right Output Volume */
+ { 0x001E, 0x0066 }, /* R30 - Line Outputs Volume */
+ { 0x001F, 0x0020 }, /* R31 - HPOUT2 Volume */
+ { 0x0020, 0x0079 }, /* R32 - Left OPGA Volume */
+ { 0x0021, 0x0079 }, /* R33 - Right OPGA Volume */
+ { 0x0022, 0x0003 }, /* R34 - SPKMIXL Attenuation */
+ { 0x0023, 0x0003 }, /* R35 - SPKMIXR Attenuation */
+ { 0x0024, 0x0011 }, /* R36 - SPKOUT Mixers */
+ { 0x0025, 0x0140 }, /* R37 - ClassD */
+ { 0x0026, 0x0079 }, /* R38 - Speaker Volume Left */
+ { 0x0027, 0x0079 }, /* R39 - Speaker Volume Right */
+ { 0x0028, 0x0000 }, /* R40 - Input Mixer (2) */
+ { 0x0029, 0x0000 }, /* R41 - Input Mixer (3) */
+ { 0x002A, 0x0000 }, /* R42 - Input Mixer (4) */
+ { 0x002B, 0x0000 }, /* R43 - Input Mixer (5) */
+ { 0x002C, 0x0000 }, /* R44 - Input Mixer (6) */
+ { 0x002D, 0x0000 }, /* R45 - Output Mixer (1) */
+ { 0x002E, 0x0000 }, /* R46 - Output Mixer (2) */
+ { 0x002F, 0x0000 }, /* R47 - Output Mixer (3) */
+ { 0x0030, 0x0000 }, /* R48 - Output Mixer (4) */
+ { 0x0031, 0x0000 }, /* R49 - Output Mixer (5) */
+ { 0x0032, 0x0000 }, /* R50 - Output Mixer (6) */
+ { 0x0033, 0x0000 }, /* R51 - HPOUT2 Mixer */
+ { 0x0034, 0x0000 }, /* R52 - Line Mixer (1) */
+ { 0x0035, 0x0000 }, /* R53 - Line Mixer (2) */
+ { 0x0036, 0x0000 }, /* R54 - Speaker Mixer */
+ { 0x0037, 0x0000 }, /* R55 - Additional Control */
+ { 0x0038, 0x0000 }, /* R56 - AntiPOP (1) */
+ { 0x0039, 0x0000 }, /* R57 - AntiPOP (2) */
+ { 0x003A, 0x0000 }, /* R58 - MICBIAS */
+ { 0x003B, 0x000D }, /* R59 - LDO 1 */
+ { 0x003C, 0x0003 }, /* R60 - LDO 2 */
+ { 0x004C, 0x1F25 }, /* R76 - Charge Pump (1) */
+ { 0x0051, 0x0004 }, /* R81 - Class W (1) */
+ { 0x0054, 0x0000 }, /* R84 - DC Servo (1) */
+ { 0x0055, 0x054A }, /* R85 - DC Servo (2) */
+ { 0x0057, 0x0000 }, /* R87 - DC Servo (4) */
+ { 0x0058, 0x0000 }, /* R88 - DC Servo Readback */
+ { 0x0060, 0x0000 }, /* R96 - Analogue HP (1) */
+ { 0x0100, 0x0003 }, /* R256 - Chip Revision */
+ { 0x0101, 0x8004 }, /* R257 - Control Interface */
+ { 0x0110, 0x0000 }, /* R272 - Write Sequencer Ctrl (1) */
+ { 0x0111, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */
+ { 0x0200, 0x0000 }, /* R512 - AIF1 Clocking (1) */
+ { 0x0201, 0x0000 }, /* R513 - AIF1 Clocking (2) */
+ { 0x0204, 0x0000 }, /* R516 - AIF2 Clocking (1) */
+ { 0x0205, 0x0000 }, /* R517 - AIF2 Clocking (2) */
+ { 0x0208, 0x0000 }, /* R520 - Clocking (1) */
+ { 0x0209, 0x0000 }, /* R521 - Clocking (2) */
+ { 0x0210, 0x0083 }, /* R528 - AIF1 Rate */
+ { 0x0211, 0x0083 }, /* R529 - AIF2 Rate */
+ { 0x0212, 0x0000 }, /* R530 - Rate Status */
+ { 0x0220, 0x0000 }, /* R544 - FLL1 Control (1) */
+ { 0x0221, 0x0000 }, /* R545 - FLL1 Control (2) */
+ { 0x0222, 0x0000 }, /* R546 - FLL1 Control (3) */
+ { 0x0223, 0x0000 }, /* R547 - FLL1 Control (4) */
+ { 0x0224, 0x0C80 }, /* R548 - FLL1 Control (5) */
+ { 0x0240, 0x0000 }, /* R576 - FLL2 Control (1) */
+ { 0x0241, 0x0000 }, /* R577 - FLL2 Control (2) */
+ { 0x0242, 0x0000 }, /* R578 - FLL2 Control (3) */
+ { 0x0243, 0x0000 }, /* R579 - FLL2 Control (4) */
+ { 0x0244, 0x0C80 }, /* R580 - FLL2 Control (5) */
+ { 0x0300, 0x4050 }, /* R768 - AIF1 Control (1) */
+ { 0x0301, 0x4000 }, /* R769 - AIF1 Control (2) */
+ { 0x0302, 0x0000 }, /* R770 - AIF1 Master/Slave */
+ { 0x0303, 0x0040 }, /* R771 - AIF1 BCLK */
+ { 0x0304, 0x0040 }, /* R772 - AIF1ADC LRCLK */
+ { 0x0305, 0x0040 }, /* R773 - AIF1DAC LRCLK */
+ { 0x0306, 0x0004 }, /* R774 - AIF1DAC Data */
+ { 0x0307, 0x0100 }, /* R775 - AIF1ADC Data */
+ { 0x0310, 0x4050 }, /* R784 - AIF2 Control (1) */
+ { 0x0311, 0x4000 }, /* R785 - AIF2 Control (2) */
+ { 0x0312, 0x0000 }, /* R786 - AIF2 Master/Slave */
+ { 0x0313, 0x0040 }, /* R787 - AIF2 BCLK */
+ { 0x0314, 0x0040 }, /* R788 - AIF2ADC LRCLK */
+ { 0x0315, 0x0040 }, /* R789 - AIF2DAC LRCLK */
+ { 0x0316, 0x0000 }, /* R790 - AIF2DAC Data */
+ { 0x0317, 0x0000 }, /* R791 - AIF2ADC Data */
+ { 0x0400, 0x00C0 }, /* R1024 - AIF1 ADC1 Left Volume */
+ { 0x0401, 0x00C0 }, /* R1025 - AIF1 ADC1 Right Volume */
+ { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
+ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
+ { 0x0404, 0x00C0 }, /* R1028 - AIF1 ADC2 Left Volume */
+ { 0x0405, 0x00C0 }, /* R1029 - AIF1 ADC2 Right Volume */
+ { 0x0406, 0x00C0 }, /* R1030 - AIF1 DAC2 Left Volume */
+ { 0x0407, 0x00C0 }, /* R1031 - AIF1 DAC2 Right Volume */
+ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0411, 0x0000 }, /* R1041 - AIF1 ADC2 Filters */
+ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
+ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0422, 0x0200 }, /* R1058 - AIF1 DAC2 Filters (1) */
+ { 0x0423, 0x0010 }, /* R1059 - AIF1 DAC2 Filters (2) */
+ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
+ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
+ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
+ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
+ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0450, 0x0098 }, /* R1104 - AIF1 DRC2 (1) */
+ { 0x0451, 0x0845 }, /* R1105 - AIF1 DRC2 (2) */
+ { 0x0452, 0x0000 }, /* R1106 - AIF1 DRC2 (3) */
+ { 0x0453, 0x0000 }, /* R1107 - AIF1 DRC2 (4) */
+ { 0x0454, 0x0000 }, /* R1108 - AIF1 DRC2 (5) */
+ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
+ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
+ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
+ { 0x0483, 0x0400 }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
+ { 0x0484, 0x00D8 }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
+ { 0x0485, 0x1EB5 }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
+ { 0x0486, 0xF145 }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
+ { 0x0487, 0x0B75 }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
+ { 0x0488, 0x01C5 }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
+ { 0x0489, 0x1C58 }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
+ { 0x048A, 0xF373 }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
+ { 0x048B, 0x0A54 }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
+ { 0x048C, 0x0558 }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
+ { 0x048D, 0x168E }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
+ { 0x048E, 0xF829 }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
+ { 0x048F, 0x07AD }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
+ { 0x0490, 0x1103 }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
+ { 0x0491, 0x0564 }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
+ { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
+ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
+ { 0x04A0, 0x6318 }, /* R1184 - AIF1 DAC2 EQ Gains (1) */
+ { 0x04A1, 0x6300 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */
+ { 0x04A2, 0x0FCA }, /* R1186 - AIF1 DAC2 EQ Band 1 A */
+ { 0x04A3, 0x0400 }, /* R1187 - AIF1 DAC2 EQ Band 1 B */
+ { 0x04A4, 0x00D8 }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
+ { 0x04A5, 0x1EB5 }, /* R1189 - AIF1 DAC2 EQ Band 2 A */
+ { 0x04A6, 0xF145 }, /* R1190 - AIF1 DAC2 EQ Band 2 B */
+ { 0x04A7, 0x0B75 }, /* R1191 - AIF1 DAC2 EQ Band 2 C */
+ { 0x04A8, 0x01C5 }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
+ { 0x04A9, 0x1C58 }, /* R1193 - AIF1 DAC2 EQ Band 3 A */
+ { 0x04AA, 0xF373 }, /* R1194 - AIF1 DAC2 EQ Band 3 B */
+ { 0x04AB, 0x0A54 }, /* R1195 - AIF1 DAC2 EQ Band 3 C */
+ { 0x04AC, 0x0558 }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
+ { 0x04AD, 0x168E }, /* R1197 - AIF1 DAC2 EQ Band 4 A */
+ { 0x04AE, 0xF829 }, /* R1198 - AIF1 DAC2 EQ Band 4 B */
+ { 0x04AF, 0x07AD }, /* R1199 - AIF1 DAC2 EQ Band 4 C */
+ { 0x04B0, 0x1103 }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
+ { 0x04B1, 0x0564 }, /* R1201 - AIF1 DAC2 EQ Band 5 A */
+ { 0x04B2, 0x0559 }, /* R1202 - AIF1 DAC2 EQ Band 5 B */
+ { 0x04B3, 0x4000 }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
+ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
+ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
+ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
+ { 0x0503, 0x00C0 }, /* R1283 - AIF2 DAC Right Volume */
+ { 0x0510, 0x0000 }, /* R1296 - AIF2 ADC Filters */
+ { 0x0520, 0x0200 }, /* R1312 - AIF2 DAC Filters (1) */
+ { 0x0521, 0x0010 }, /* R1313 - AIF2 DAC Filters (2) */
+ { 0x0540, 0x0098 }, /* R1344 - AIF2 DRC (1) */
+ { 0x0541, 0x0845 }, /* R1345 - AIF2 DRC (2) */
+ { 0x0542, 0x0000 }, /* R1346 - AIF2 DRC (3) */
+ { 0x0543, 0x0000 }, /* R1347 - AIF2 DRC (4) */
+ { 0x0544, 0x0000 }, /* R1348 - AIF2 DRC (5) */
+ { 0x0580, 0x6318 }, /* R1408 - AIF2 EQ Gains (1) */
+ { 0x0581, 0x6300 }, /* R1409 - AIF2 EQ Gains (2) */
+ { 0x0582, 0x0FCA }, /* R1410 - AIF2 EQ Band 1 A */
+ { 0x0583, 0x0400 }, /* R1411 - AIF2 EQ Band 1 B */
+ { 0x0584, 0x00D8 }, /* R1412 - AIF2 EQ Band 1 PG */
+ { 0x0585, 0x1EB5 }, /* R1413 - AIF2 EQ Band 2 A */
+ { 0x0586, 0xF145 }, /* R1414 - AIF2 EQ Band 2 B */
+ { 0x0587, 0x0B75 }, /* R1415 - AIF2 EQ Band 2 C */
+ { 0x0588, 0x01C5 }, /* R1416 - AIF2 EQ Band 2 PG */
+ { 0x0589, 0x1C58 }, /* R1417 - AIF2 EQ Band 3 A */
+ { 0x058A, 0xF373 }, /* R1418 - AIF2 EQ Band 3 B */
+ { 0x058B, 0x0A54 }, /* R1419 - AIF2 EQ Band 3 C */
+ { 0x058C, 0x0558 }, /* R1420 - AIF2 EQ Band 3 PG */
+ { 0x058D, 0x168E }, /* R1421 - AIF2 EQ Band 4 A */
+ { 0x058E, 0xF829 }, /* R1422 - AIF2 EQ Band 4 B */
+ { 0x058F, 0x07AD }, /* R1423 - AIF2 EQ Band 4 C */
+ { 0x0590, 0x1103 }, /* R1424 - AIF2 EQ Band 4 PG */
+ { 0x0591, 0x0564 }, /* R1425 - AIF2 EQ Band 5 A */
+ { 0x0592, 0x0559 }, /* R1426 - AIF2 EQ Band 5 B */
+ { 0x0593, 0x4000 }, /* R1427 - AIF2 EQ Band 5 PG */
+ { 0x0600, 0x0000 }, /* R1536 - DAC1 Mixer Volumes */
+ { 0x0601, 0x0000 }, /* R1537 - DAC1 Left Mixer Routing */
+ { 0x0602, 0x0000 }, /* R1538 - DAC1 Right Mixer Routing */
+ { 0x0603, 0x0000 }, /* R1539 - DAC2 Mixer Volumes */
+ { 0x0604, 0x0000 }, /* R1540 - DAC2 Left Mixer Routing */
+ { 0x0605, 0x0000 }, /* R1541 - DAC2 Right Mixer Routing */
+ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
+ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0608, 0x0000 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */
+ { 0x0609, 0x0000 }, /* R1545 - AIF1 ADC2 Right mixer Routing */
+ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
+ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
+ { 0x0612, 0x02C0 }, /* R1554 - DAC2 Left Volume */
+ { 0x0613, 0x02C0 }, /* R1555 - DAC2 Right Volume */
+ { 0x0614, 0x0000 }, /* R1556 - DAC Softmute */
+ { 0x0620, 0x0002 }, /* R1568 - Oversampling */
+ { 0x0621, 0x0000 }, /* R1569 - Sidetone */
+ { 0x0700, 0x8100 }, /* R1792 - GPIO 1 */
+ { 0x0701, 0xA101 }, /* R1793 - GPIO 2 */
+ { 0x0702, 0xA101 }, /* R1794 - GPIO 3 */
+ { 0x0703, 0xA101 }, /* R1795 - GPIO 4 */
+ { 0x0704, 0xA101 }, /* R1796 - GPIO 5 */
+ { 0x0705, 0xA101 }, /* R1797 - GPIO 6 */
+ { 0x0706, 0xA101 }, /* R1798 - GPIO 7 */
+ { 0x0707, 0xA101 }, /* R1799 - GPIO 8 */
+ { 0x0708, 0xA101 }, /* R1800 - GPIO 9 */
+ { 0x0709, 0xA101 }, /* R1801 - GPIO 10 */
+ { 0x070A, 0xA101 }, /* R1802 - GPIO 11 */
+ { 0x0720, 0x0000 }, /* R1824 - Pull Control (1) */
+ { 0x0721, 0x0156 }, /* R1825 - Pull Control (2) */
+ { 0x0730, 0x0000 }, /* R1840 - Interrupt Status 1 */
+ { 0x0731, 0x0000 }, /* R1841 - Interrupt Status 2 */
+ { 0x0732, 0x0000 }, /* R1842 - Interrupt Raw Status 2 */
+ { 0x0738, 0x07FF }, /* R1848 - Interrupt Status 1 Mask */
+ { 0x0739, 0xFFFF }, /* R1849 - Interrupt Status 2 Mask */
+ { 0x0740, 0x0000 }, /* R1856 - Interrupt Control */
+ { 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
+};
+
+static struct reg_default wm8958_defaults[] = {
+ { 0x0000, 0x8958 }, /* R0 - Software Reset */
+ { 0x0001, 0x0000 }, /* R1 - Power Management (1) */
+ { 0x0002, 0x6000 }, /* R2 - Power Management (2) */
+ { 0x0003, 0x0000 }, /* R3 - Power Management (3) */
+ { 0x0004, 0x0000 }, /* R4 - Power Management (4) */
+ { 0x0005, 0x0000 }, /* R5 - Power Management (5) */
+ { 0x0006, 0x0000 }, /* R6 - Power Management (6) */
+ { 0x0015, 0x0000 }, /* R21 - Input Mixer (1) */
+ { 0x0018, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
+ { 0x0019, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
+ { 0x001A, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
+ { 0x001B, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
+ { 0x001C, 0x006D }, /* R28 - Left Output Volume */
+ { 0x001D, 0x006D }, /* R29 - Right Output Volume */
+ { 0x001E, 0x0066 }, /* R30 - Line Outputs Volume */
+ { 0x001F, 0x0020 }, /* R31 - HPOUT2 Volume */
+ { 0x0020, 0x0079 }, /* R32 - Left OPGA Volume */
+ { 0x0021, 0x0079 }, /* R33 - Right OPGA Volume */
+ { 0x0022, 0x0003 }, /* R34 - SPKMIXL Attenuation */
+ { 0x0023, 0x0003 }, /* R35 - SPKMIXR Attenuation */
+ { 0x0024, 0x0011 }, /* R36 - SPKOUT Mixers */
+ { 0x0025, 0x0140 }, /* R37 - ClassD */
+ { 0x0026, 0x0079 }, /* R38 - Speaker Volume Left */
+ { 0x0027, 0x0079 }, /* R39 - Speaker Volume Right */
+ { 0x0028, 0x0000 }, /* R40 - Input Mixer (2) */
+ { 0x0029, 0x0000 }, /* R41 - Input Mixer (3) */
+ { 0x002A, 0x0000 }, /* R42 - Input Mixer (4) */
+ { 0x002B, 0x0000 }, /* R43 - Input Mixer (5) */
+ { 0x002C, 0x0000 }, /* R44 - Input Mixer (6) */
+ { 0x002D, 0x0000 }, /* R45 - Output Mixer (1) */
+ { 0x002E, 0x0000 }, /* R46 - Output Mixer (2) */
+ { 0x002F, 0x0000 }, /* R47 - Output Mixer (3) */
+ { 0x0030, 0x0000 }, /* R48 - Output Mixer (4) */
+ { 0x0031, 0x0000 }, /* R49 - Output Mixer (5) */
+ { 0x0032, 0x0000 }, /* R50 - Output Mixer (6) */
+ { 0x0033, 0x0000 }, /* R51 - HPOUT2 Mixer */
+ { 0x0034, 0x0000 }, /* R52 - Line Mixer (1) */
+ { 0x0035, 0x0000 }, /* R53 - Line Mixer (2) */
+ { 0x0036, 0x0000 }, /* R54 - Speaker Mixer */
+ { 0x0037, 0x0000 }, /* R55 - Additional Control */
+ { 0x0038, 0x0000 }, /* R56 - AntiPOP (1) */
+ { 0x0039, 0x0180 }, /* R57 - AntiPOP (2) */
+ { 0x003B, 0x000D }, /* R59 - LDO 1 */
+ { 0x003C, 0x0005 }, /* R60 - LDO 2 */
+ { 0x003D, 0x0039 }, /* R61 - MICBIAS1 */
+ { 0x003E, 0x0039 }, /* R62 - MICBIAS2 */
+ { 0x004C, 0x1F25 }, /* R76 - Charge Pump (1) */
+ { 0x004D, 0xAB19 }, /* R77 - Charge Pump (2) */
+ { 0x0051, 0x0004 }, /* R81 - Class W (1) */
+ { 0x0055, 0x054A }, /* R85 - DC Servo (2) */
+ { 0x0057, 0x0000 }, /* R87 - DC Servo (4) */
+ { 0x0060, 0x0000 }, /* R96 - Analogue HP (1) */
+ { 0x00C5, 0x0000 }, /* R197 - Class D Test (5) */
+ { 0x00D0, 0x5600 }, /* R208 - Mic Detect 1 */
+ { 0x00D1, 0x007F }, /* R209 - Mic Detect 2 */
+ { 0x0101, 0x8004 }, /* R257 - Control Interface */
+ { 0x0110, 0x0000 }, /* R272 - Write Sequencer Ctrl (1) */
+ { 0x0111, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */
+ { 0x0200, 0x0000 }, /* R512 - AIF1 Clocking (1) */
+ { 0x0201, 0x0000 }, /* R513 - AIF1 Clocking (2) */
+ { 0x0204, 0x0000 }, /* R516 - AIF2 Clocking (1) */
+ { 0x0205, 0x0000 }, /* R517 - AIF2 Clocking (2) */
+ { 0x0208, 0x0000 }, /* R520 - Clocking (1) */
+ { 0x0209, 0x0000 }, /* R521 - Clocking (2) */
+ { 0x0210, 0x0083 }, /* R528 - AIF1 Rate */
+ { 0x0211, 0x0083 }, /* R529 - AIF2 Rate */
+ { 0x0220, 0x0000 }, /* R544 - FLL1 Control (1) */
+ { 0x0221, 0x0000 }, /* R545 - FLL1 Control (2) */
+ { 0x0222, 0x0000 }, /* R546 - FLL1 Control (3) */
+ { 0x0223, 0x0000 }, /* R547 - FLL1 Control (4) */
+ { 0x0224, 0x0C80 }, /* R548 - FLL1 Control (5) */
+ { 0x0226, 0x0000 }, /* R550 - FLL1 EFS 1 */
+ { 0x0227, 0x0006 }, /* R551 - FLL1 EFS 2 */
+ { 0x0240, 0x0000 }, /* R576 - FLL2Control (1) */
+ { 0x0241, 0x0000 }, /* R577 - FLL2Control (2) */
+ { 0x0242, 0x0000 }, /* R578 - FLL2Control (3) */
+ { 0x0243, 0x0000 }, /* R579 - FLL2 Control (4) */
+ { 0x0244, 0x0C80 }, /* R580 - FLL2Control (5) */
+ { 0x0246, 0x0000 }, /* R582 - FLL2 EFS 1 */
+ { 0x0247, 0x0006 }, /* R583 - FLL2 EFS 2 */
+ { 0x0300, 0x4050 }, /* R768 - AIF1 Control (1) */
+ { 0x0301, 0x4000 }, /* R769 - AIF1 Control (2) */
+ { 0x0302, 0x0000 }, /* R770 - AIF1 Master/Slave */
+ { 0x0303, 0x0040 }, /* R771 - AIF1 BCLK */
+ { 0x0304, 0x0040 }, /* R772 - AIF1ADC LRCLK */
+ { 0x0305, 0x0040 }, /* R773 - AIF1DAC LRCLK */
+ { 0x0306, 0x0004 }, /* R774 - AIF1DAC Data */
+ { 0x0307, 0x0100 }, /* R775 - AIF1ADC Data */
+ { 0x0310, 0x4053 }, /* R784 - AIF2 Control (1) */
+ { 0x0311, 0x4000 }, /* R785 - AIF2 Control (2) */
+ { 0x0312, 0x0000 }, /* R786 - AIF2 Master/Slave */
+ { 0x0313, 0x0040 }, /* R787 - AIF2 BCLK */
+ { 0x0314, 0x0040 }, /* R788 - AIF2ADC LRCLK */
+ { 0x0315, 0x0040 }, /* R789 - AIF2DAC LRCLK */
+ { 0x0316, 0x0000 }, /* R790 - AIF2DAC Data */
+ { 0x0317, 0x0000 }, /* R791 - AIF2ADC Data */
+ { 0x0320, 0x0040 }, /* R800 - AIF3 Control (1) */
+ { 0x0321, 0x0000 }, /* R801 - AIF3 Control (2) */
+ { 0x0322, 0x0000 }, /* R802 - AIF3DAC Data */
+ { 0x0323, 0x0000 }, /* R803 - AIF3ADC Data */
+ { 0x0400, 0x00C0 }, /* R1024 - AIF1 ADC1 Left Volume */
+ { 0x0401, 0x00C0 }, /* R1025 - AIF1 ADC1 Right Volume */
+ { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
+ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
+ { 0x0404, 0x00C0 }, /* R1028 - AIF1 ADC2 Left Volume */
+ { 0x0405, 0x00C0 }, /* R1029 - AIF1 ADC2 Right Volume */
+ { 0x0406, 0x00C0 }, /* R1030 - AIF1 DAC2 Left Volume */
+ { 0x0407, 0x00C0 }, /* R1031 - AIF1 DAC2 Right Volume */
+ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0411, 0x0000 }, /* R1041 - AIF1 ADC2 Filters */
+ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
+ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0422, 0x0200 }, /* R1058 - AIF1 DAC2 Filters (1) */
+ { 0x0423, 0x0010 }, /* R1059 - AIF1 DAC2 Filters (2) */
+ { 0x0430, 0x0068 }, /* R1072 - AIF1 DAC1 Noise Gate */
+ { 0x0431, 0x0068 }, /* R1073 - AIF1 DAC2 Noise Gate */
+ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
+ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
+ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
+ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
+ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0450, 0x0098 }, /* R1104 - AIF1 DRC2 (1) */
+ { 0x0451, 0x0845 }, /* R1105 - AIF1 DRC2 (2) */
+ { 0x0452, 0x0000 }, /* R1106 - AIF1 DRC2 (3) */
+ { 0x0453, 0x0000 }, /* R1107 - AIF1 DRC2 (4) */
+ { 0x0454, 0x0000 }, /* R1108 - AIF1 DRC2 (5) */
+ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
+ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
+ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
+ { 0x0483, 0x0400 }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
+ { 0x0484, 0x00D8 }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
+ { 0x0485, 0x1EB5 }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
+ { 0x0486, 0xF145 }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
+ { 0x0487, 0x0B75 }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
+ { 0x0488, 0x01C5 }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
+ { 0x0489, 0x1C58 }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
+ { 0x048A, 0xF373 }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
+ { 0x048B, 0x0A54 }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
+ { 0x048C, 0x0558 }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
+ { 0x048D, 0x168E }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
+ { 0x048E, 0xF829 }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
+ { 0x048F, 0x07AD }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
+ { 0x0490, 0x1103 }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
+ { 0x0491, 0x0564 }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
+ { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
+ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
+ { 0x0494, 0x0000 }, /* R1172 - AIF1 DAC1 EQ Band 1 C */
+ { 0x04A0, 0x6318 }, /* R1184 - AIF1 DAC2 EQ Gains (1) */
+ { 0x04A1, 0x6300 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */
+ { 0x04A2, 0x0FCA }, /* R1186 - AIF1 DAC2 EQ Band 1 A */
+ { 0x04A3, 0x0400 }, /* R1187 - AIF1 DAC2 EQ Band 1 B */
+ { 0x04A4, 0x00D8 }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
+ { 0x04A5, 0x1EB5 }, /* R1189 - AIF1 DAC2 EQ Band 2 A */
+ { 0x04A6, 0xF145 }, /* R1190 - AIF1 DAC2 EQ Band 2 B */
+ { 0x04A7, 0x0B75 }, /* R1191 - AIF1 DAC2 EQ Band 2 C */
+ { 0x04A8, 0x01C5 }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
+ { 0x04A9, 0x1C58 }, /* R1193 - AIF1 DAC2 EQ Band 3 A */
+ { 0x04AA, 0xF373 }, /* R1194 - AIF1 DAC2 EQ Band 3 B */
+ { 0x04AB, 0x0A54 }, /* R1195 - AIF1 DAC2 EQ Band 3 C */
+ { 0x04AC, 0x0558 }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
+ { 0x04AD, 0x168E }, /* R1197 - AIF1 DAC2 EQ Band 4 A */
+ { 0x04AE, 0xF829 }, /* R1198 - AIF1 DAC2 EQ Band 4 B */
+ { 0x04AF, 0x07AD }, /* R1199 - AIF1 DAC2 EQ Band 4 C */
+ { 0x04B0, 0x1103 }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
+ { 0x04B1, 0x0564 }, /* R1201 - AIF1 DAC2 EQ Band 5 A */
+ { 0x04B2, 0x0559 }, /* R1202 - AIF1 DAC2 EQ Band 5 B */
+ { 0x04B3, 0x4000 }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
+ { 0x04B4, 0x0000 }, /* R1204 - AIF1 DAC2EQ Band 1 C */
+ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
+ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
+ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
+ { 0x0503, 0x00C0 }, /* R1283 - AIF2 DAC Right Volume */
+ { 0x0510, 0x0000 }, /* R1296 - AIF2 ADC Filters */
+ { 0x0520, 0x0200 }, /* R1312 - AIF2 DAC Filters (1) */
+ { 0x0521, 0x0010 }, /* R1313 - AIF2 DAC Filters (2) */
+ { 0x0530, 0x0068 }, /* R1328 - AIF2 DAC Noise Gate */
+ { 0x0540, 0x0098 }, /* R1344 - AIF2 DRC (1) */
+ { 0x0541, 0x0845 }, /* R1345 - AIF2 DRC (2) */
+ { 0x0542, 0x0000 }, /* R1346 - AIF2 DRC (3) */
+ { 0x0543, 0x0000 }, /* R1347 - AIF2 DRC (4) */
+ { 0x0544, 0x0000 }, /* R1348 - AIF2 DRC (5) */
+ { 0x0580, 0x6318 }, /* R1408 - AIF2 EQ Gains (1) */
+ { 0x0581, 0x6300 }, /* R1409 - AIF2 EQ Gains (2) */
+ { 0x0582, 0x0FCA }, /* R1410 - AIF2 EQ Band 1 A */
+ { 0x0583, 0x0400 }, /* R1411 - AIF2 EQ Band 1 B */
+ { 0x0584, 0x00D8 }, /* R1412 - AIF2 EQ Band 1 PG */
+ { 0x0585, 0x1EB5 }, /* R1413 - AIF2 EQ Band 2 A */
+ { 0x0586, 0xF145 }, /* R1414 - AIF2 EQ Band 2 B */
+ { 0x0587, 0x0B75 }, /* R1415 - AIF2 EQ Band 2 C */
+ { 0x0588, 0x01C5 }, /* R1416 - AIF2 EQ Band 2 PG */
+ { 0x0589, 0x1C58 }, /* R1417 - AIF2 EQ Band 3 A */
+ { 0x058A, 0xF373 }, /* R1418 - AIF2 EQ Band 3 B */
+ { 0x058B, 0x0A54 }, /* R1419 - AIF2 EQ Band 3 C */
+ { 0x058C, 0x0558 }, /* R1420 - AIF2 EQ Band 3 PG */
+ { 0x058D, 0x168E }, /* R1421 - AIF2 EQ Band 4 A */
+ { 0x058E, 0xF829 }, /* R1422 - AIF2 EQ Band 4 B */
+ { 0x058F, 0x07AD }, /* R1423 - AIF2 EQ Band 4 C */
+ { 0x0590, 0x1103 }, /* R1424 - AIF2 EQ Band 4 PG */
+ { 0x0591, 0x0564 }, /* R1425 - AIF2 EQ Band 5 A */
+ { 0x0592, 0x0559 }, /* R1426 - AIF2 EQ Band 5 B */
+ { 0x0593, 0x4000 }, /* R1427 - AIF2 EQ Band 5 PG */
+ { 0x0594, 0x0000 }, /* R1428 - AIF2 EQ Band 1 C */
+ { 0x0600, 0x0000 }, /* R1536 - DAC1 Mixer Volumes */
+ { 0x0601, 0x0000 }, /* R1537 - DAC1 Left Mixer Routing */
+ { 0x0602, 0x0000 }, /* R1538 - DAC1 Right Mixer Routing */
+ { 0x0603, 0x0000 }, /* R1539 - DAC2 Mixer Volumes */
+ { 0x0604, 0x0000 }, /* R1540 - DAC2 Left Mixer Routing */
+ { 0x0605, 0x0000 }, /* R1541 - DAC2 Right Mixer Routing */
+ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
+ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0608, 0x0000 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */
+ { 0x0609, 0x0000 }, /* R1545 - AIF1 ADC2 Right mixer Routing */
+ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
+ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
+ { 0x0612, 0x02C0 }, /* R1554 - DAC2 Left Volume */
+ { 0x0613, 0x02C0 }, /* R1555 - DAC2 Right Volume */
+ { 0x0614, 0x0000 }, /* R1556 - DAC Softmute */
+ { 0x0620, 0x0002 }, /* R1568 - Oversampling */
+ { 0x0621, 0x0000 }, /* R1569 - Sidetone */
+ { 0x0700, 0x8100 }, /* R1792 - GPIO 1 */
+ { 0x0701, 0xA101 }, /* R1793 - Pull Control (MCLK2) */
+ { 0x0702, 0xA101 }, /* R1794 - Pull Control (BCLK2) */
+ { 0x0703, 0xA101 }, /* R1795 - Pull Control (DACLRCLK2) */
+ { 0x0704, 0xA101 }, /* R1796 - Pull Control (DACDAT2) */
+ { 0x0705, 0xA101 }, /* R1797 - GPIO 6 */
+ { 0x0707, 0xA101 }, /* R1799 - GPIO 8 */
+ { 0x0708, 0xA101 }, /* R1800 - GPIO 9 */
+ { 0x0709, 0xA101 }, /* R1801 - GPIO 10 */
+ { 0x070A, 0xA101 }, /* R1802 - GPIO 11 */
+ { 0x0720, 0x0000 }, /* R1824 - Pull Control (1) */
+ { 0x0721, 0x0156 }, /* R1825 - Pull Control (2) */
+ { 0x0738, 0x07FF }, /* R1848 - Interrupt Status 1 Mask */
+ { 0x0739, 0xFFEF }, /* R1849 - Interrupt Status 2 Mask */
+ { 0x0740, 0x0000 }, /* R1856 - Interrupt Control */
+ { 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
+ { 0x0900, 0x1C00 }, /* R2304 - DSP2_Program */
+ { 0x0901, 0x0000 }, /* R2305 - DSP2_Config */
+ { 0x0A0D, 0x0000 }, /* R2573 - DSP2_ExecControl */
+ { 0x2400, 0x003F }, /* R9216 - MBC Band 1 K (1) */
+ { 0x2401, 0x8BD8 }, /* R9217 - MBC Band 1 K (2) */
+ { 0x2402, 0x0032 }, /* R9218 - MBC Band 1 N1 (1) */
+ { 0x2403, 0xF52D }, /* R9219 - MBC Band 1 N1 (2) */
+ { 0x2404, 0x0065 }, /* R9220 - MBC Band 1 N2 (1) */
+ { 0x2405, 0xAC8C }, /* R9221 - MBC Band 1 N2 (2) */
+ { 0x2406, 0x006B }, /* R9222 - MBC Band 1 N3 (1) */
+ { 0x2407, 0xE087 }, /* R9223 - MBC Band 1 N3 (2) */
+ { 0x2408, 0x0072 }, /* R9224 - MBC Band 1 N4 (1) */
+ { 0x2409, 0x1483 }, /* R9225 - MBC Band 1 N4 (2) */
+ { 0x240A, 0x0072 }, /* R9226 - MBC Band 1 N5 (1) */
+ { 0x240B, 0x1483 }, /* R9227 - MBC Band 1 N5 (2) */
+ { 0x240C, 0x0043 }, /* R9228 - MBC Band 1 X1 (1) */
+ { 0x240D, 0x3525 }, /* R9229 - MBC Band 1 X1 (2) */
+ { 0x240E, 0x0006 }, /* R9230 - MBC Band 1 X2 (1) */
+ { 0x240F, 0x6A4A }, /* R9231 - MBC Band 1 X2 (2) */
+ { 0x2410, 0x0043 }, /* R9232 - MBC Band 1 X3 (1) */
+ { 0x2411, 0x6079 }, /* R9233 - MBC Band 1 X3 (2) */
+ { 0x2412, 0x000C }, /* R9234 - MBC Band 1 Attack (1) */
+ { 0x2413, 0xCCCD }, /* R9235 - MBC Band 1 Attack (2) */
+ { 0x2414, 0x0000 }, /* R9236 - MBC Band 1 Decay (1) */
+ { 0x2415, 0x0800 }, /* R9237 - MBC Band 1 Decay (2) */
+ { 0x2416, 0x003F }, /* R9238 - MBC Band 2 K (1) */
+ { 0x2417, 0x8BD8 }, /* R9239 - MBC Band 2 K (2) */
+ { 0x2418, 0x0032 }, /* R9240 - MBC Band 2 N1 (1) */
+ { 0x2419, 0xF52D }, /* R9241 - MBC Band 2 N1 (2) */
+ { 0x241A, 0x0065 }, /* R9242 - MBC Band 2 N2 (1) */
+ { 0x241B, 0xAC8C }, /* R9243 - MBC Band 2 N2 (2) */
+ { 0x241C, 0x006B }, /* R9244 - MBC Band 2 N3 (1) */
+ { 0x241D, 0xE087 }, /* R9245 - MBC Band 2 N3 (2) */
+ { 0x241E, 0x0072 }, /* R9246 - MBC Band 2 N4 (1) */
+ { 0x241F, 0x1483 }, /* R9247 - MBC Band 2 N4 (2) */
+ { 0x2420, 0x0072 }, /* R9248 - MBC Band 2 N5 (1) */
+ { 0x2421, 0x1483 }, /* R9249 - MBC Band 2 N5 (2) */
+ { 0x2422, 0x0043 }, /* R9250 - MBC Band 2 X1 (1) */
+ { 0x2423, 0x3525 }, /* R9251 - MBC Band 2 X1 (2) */
+ { 0x2424, 0x0006 }, /* R9252 - MBC Band 2 X2 (1) */
+ { 0x2425, 0x6A4A }, /* R9253 - MBC Band 2 X2 (2) */
+ { 0x2426, 0x0043 }, /* R9254 - MBC Band 2 X3 (1) */
+ { 0x2427, 0x6079 }, /* R9255 - MBC Band 2 X3 (2) */
+ { 0x2428, 0x000C }, /* R9256 - MBC Band 2 Attack (1) */
+ { 0x2429, 0xCCCD }, /* R9257 - MBC Band 2 Attack (2) */
+ { 0x242A, 0x0000 }, /* R9258 - MBC Band 2 Decay (1) */
+ { 0x242B, 0x0800 }, /* R9259 - MBC Band 2 Decay (2) */
+ { 0x242C, 0x005A }, /* R9260 - MBC_B2_PG2 (1) */
+ { 0x242D, 0x7EFA }, /* R9261 - MBC_B2_PG2 (2) */
+ { 0x242E, 0x005A }, /* R9262 - MBC_B1_PG2 (1) */
+ { 0x242F, 0x7EFA }, /* R9263 - MBC_B1_PG2 (2) */
+ { 0x2600, 0x00A7 }, /* R9728 - MBC Crossover (1) */
+ { 0x2601, 0x0D1C }, /* R9729 - MBC Crossover (2) */
+ { 0x2602, 0x0083 }, /* R9730 - MBC HPF (1) */
+ { 0x2603, 0x98AD }, /* R9731 - MBC HPF (2) */
+ { 0x2606, 0x0008 }, /* R9734 - MBC LPF (1) */
+ { 0x2607, 0xE7A2 }, /* R9735 - MBC LPF (2) */
+ { 0x260A, 0x0055 }, /* R9738 - MBC RMS Limit (1) */
+ { 0x260B, 0x8C4B }, /* R9739 - MBC RMS Limit (2) */
+};
+
+static bool wm1811_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8994_SOFTWARE_RESET:
+ case WM8994_POWER_MANAGEMENT_1:
+ case WM8994_POWER_MANAGEMENT_2:
+ case WM8994_POWER_MANAGEMENT_3:
+ case WM8994_POWER_MANAGEMENT_4:
+ case WM8994_POWER_MANAGEMENT_5:
+ case WM8994_POWER_MANAGEMENT_6:
+ case WM8994_INPUT_MIXER_1:
+ case WM8994_LEFT_LINE_INPUT_1_2_VOLUME:
+ case WM8994_LEFT_LINE_INPUT_3_4_VOLUME:
+ case WM8994_RIGHT_LINE_INPUT_1_2_VOLUME:
+ case WM8994_RIGHT_LINE_INPUT_3_4_VOLUME:
+ case WM8994_LEFT_OUTPUT_VOLUME:
+ case WM8994_RIGHT_OUTPUT_VOLUME:
+ case WM8994_LINE_OUTPUTS_VOLUME:
+ case WM8994_HPOUT2_VOLUME:
+ case WM8994_LEFT_OPGA_VOLUME:
+ case WM8994_RIGHT_OPGA_VOLUME:
+ case WM8994_SPKMIXL_ATTENUATION:
+ case WM8994_SPKMIXR_ATTENUATION:
+ case WM8994_SPKOUT_MIXERS:
+ case WM8994_CLASSD:
+ case WM8994_SPEAKER_VOLUME_LEFT:
+ case WM8994_SPEAKER_VOLUME_RIGHT:
+ case WM8994_INPUT_MIXER_2:
+ case WM8994_INPUT_MIXER_3:
+ case WM8994_INPUT_MIXER_4:
+ case WM8994_INPUT_MIXER_5:
+ case WM8994_INPUT_MIXER_6:
+ case WM8994_OUTPUT_MIXER_1:
+ case WM8994_OUTPUT_MIXER_2:
+ case WM8994_OUTPUT_MIXER_3:
+ case WM8994_OUTPUT_MIXER_4:
+ case WM8994_OUTPUT_MIXER_5:
+ case WM8994_OUTPUT_MIXER_6:
+ case WM8994_HPOUT2_MIXER:
+ case WM8994_LINE_MIXER_1:
+ case WM8994_LINE_MIXER_2:
+ case WM8994_SPEAKER_MIXER:
+ case WM8994_ADDITIONAL_CONTROL:
+ case WM8994_ANTIPOP_1:
+ case WM8994_ANTIPOP_2:
+ case WM8994_LDO_1:
+ case WM8994_LDO_2:
+ case WM8958_MICBIAS1:
+ case WM8958_MICBIAS2:
+ case WM8994_CHARGE_PUMP_1:
+ case WM8958_CHARGE_PUMP_2:
+ case WM8994_CLASS_W_1:
+ case WM8994_DC_SERVO_1:
+ case WM8994_DC_SERVO_2:
+ case WM8994_DC_SERVO_READBACK:
+ case WM8994_DC_SERVO_4:
+ case WM8994_ANALOGUE_HP_1:
+ case WM8958_MIC_DETECT_1:
+ case WM8958_MIC_DETECT_2:
+ case WM8958_MIC_DETECT_3:
+ case WM8994_CHIP_REVISION:
+ case WM8994_CONTROL_INTERFACE:
+ case WM8994_AIF1_CLOCKING_1:
+ case WM8994_AIF1_CLOCKING_2:
+ case WM8994_AIF2_CLOCKING_1:
+ case WM8994_AIF2_CLOCKING_2:
+ case WM8994_CLOCKING_1:
+ case WM8994_CLOCKING_2:
+ case WM8994_AIF1_RATE:
+ case WM8994_AIF2_RATE:
+ case WM8994_RATE_STATUS:
+ case WM8994_FLL1_CONTROL_1:
+ case WM8994_FLL1_CONTROL_2:
+ case WM8994_FLL1_CONTROL_3:
+ case WM8994_FLL1_CONTROL_4:
+ case WM8994_FLL1_CONTROL_5:
+ case WM8958_FLL1_EFS_1:
+ case WM8958_FLL1_EFS_2:
+ case WM8994_FLL2_CONTROL_1:
+ case WM8994_FLL2_CONTROL_2:
+ case WM8994_FLL2_CONTROL_3:
+ case WM8994_FLL2_CONTROL_4:
+ case WM8994_FLL2_CONTROL_5:
+ case WM8958_FLL2_EFS_1:
+ case WM8958_FLL2_EFS_2:
+ case WM8994_AIF1_CONTROL_1:
+ case WM8994_AIF1_CONTROL_2:
+ case WM8994_AIF1_MASTER_SLAVE:
+ case WM8994_AIF1_BCLK:
+ case WM8994_AIF1ADC_LRCLK:
+ case WM8994_AIF1DAC_LRCLK:
+ case WM8994_AIF1DAC_DATA:
+ case WM8994_AIF1ADC_DATA:
+ case WM8994_AIF2_CONTROL_1:
+ case WM8994_AIF2_CONTROL_2:
+ case WM8994_AIF2_MASTER_SLAVE:
+ case WM8994_AIF2_BCLK:
+ case WM8994_AIF2ADC_LRCLK:
+ case WM8994_AIF2DAC_LRCLK:
+ case WM8994_AIF2DAC_DATA:
+ case WM8994_AIF2ADC_DATA:
+ case WM1811_AIF2TX_CONTROL:
+ case WM8958_AIF3_CONTROL_1:
+ case WM8958_AIF3_CONTROL_2:
+ case WM8958_AIF3DAC_DATA:
+ case WM8958_AIF3ADC_DATA:
+ case WM8994_AIF1_ADC1_LEFT_VOLUME:
+ case WM8994_AIF1_ADC1_RIGHT_VOLUME:
+ case WM8994_AIF1_DAC1_LEFT_VOLUME:
+ case WM8994_AIF1_DAC1_RIGHT_VOLUME:
+ case WM8994_AIF1_ADC1_FILTERS:
+ case WM8994_AIF1_DAC1_FILTERS_1:
+ case WM8994_AIF1_DAC1_FILTERS_2:
+ case WM8958_AIF1_DAC1_NOISE_GATE:
+ case WM8994_AIF1_DRC1_1:
+ case WM8994_AIF1_DRC1_2:
+ case WM8994_AIF1_DRC1_3:
+ case WM8994_AIF1_DRC1_4:
+ case WM8994_AIF1_DRC1_5:
+ case WM8994_AIF1_DAC1_EQ_GAINS_1:
+ case WM8994_AIF1_DAC1_EQ_GAINS_2:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_C:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_C:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_C:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_5_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_5_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_5_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_C:
+ case WM8994_AIF2_ADC_LEFT_VOLUME:
+ case WM8994_AIF2_ADC_RIGHT_VOLUME:
+ case WM8994_AIF2_DAC_LEFT_VOLUME:
+ case WM8994_AIF2_DAC_RIGHT_VOLUME:
+ case WM8994_AIF2_ADC_FILTERS:
+ case WM8994_AIF2_DAC_FILTERS_1:
+ case WM8994_AIF2_DAC_FILTERS_2:
+ case WM8958_AIF2_DAC_NOISE_GATE:
+ case WM8994_AIF2_DRC_1:
+ case WM8994_AIF2_DRC_2:
+ case WM8994_AIF2_DRC_3:
+ case WM8994_AIF2_DRC_4:
+ case WM8994_AIF2_DRC_5:
+ case WM8994_AIF2_EQ_GAINS_1:
+ case WM8994_AIF2_EQ_GAINS_2:
+ case WM8994_AIF2_EQ_BAND_1_A:
+ case WM8994_AIF2_EQ_BAND_1_B:
+ case WM8994_AIF2_EQ_BAND_1_PG:
+ case WM8994_AIF2_EQ_BAND_2_A:
+ case WM8994_AIF2_EQ_BAND_2_B:
+ case WM8994_AIF2_EQ_BAND_2_C:
+ case WM8994_AIF2_EQ_BAND_2_PG:
+ case WM8994_AIF2_EQ_BAND_3_A:
+ case WM8994_AIF2_EQ_BAND_3_B:
+ case WM8994_AIF2_EQ_BAND_3_C:
+ case WM8994_AIF2_EQ_BAND_3_PG:
+ case WM8994_AIF2_EQ_BAND_4_A:
+ case WM8994_AIF2_EQ_BAND_4_B:
+ case WM8994_AIF2_EQ_BAND_4_C:
+ case WM8994_AIF2_EQ_BAND_4_PG:
+ case WM8994_AIF2_EQ_BAND_5_A:
+ case WM8994_AIF2_EQ_BAND_5_B:
+ case WM8994_AIF2_EQ_BAND_5_PG:
+ case WM8994_AIF2_EQ_BAND_1_C:
+ case WM8994_DAC1_MIXER_VOLUMES:
+ case WM8994_DAC1_LEFT_MIXER_ROUTING:
+ case WM8994_DAC1_RIGHT_MIXER_ROUTING:
+ case WM8994_DAC2_MIXER_VOLUMES:
+ case WM8994_DAC2_LEFT_MIXER_ROUTING:
+ case WM8994_DAC2_RIGHT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING:
+ case WM8994_DAC1_LEFT_VOLUME:
+ case WM8994_DAC1_RIGHT_VOLUME:
+ case WM8994_DAC2_LEFT_VOLUME:
+ case WM8994_DAC2_RIGHT_VOLUME:
+ case WM8994_DAC_SOFTMUTE:
+ case WM8994_OVERSAMPLING:
+ case WM8994_SIDETONE:
+ case WM8994_GPIO_1:
+ case WM8994_GPIO_2:
+ case WM8994_GPIO_3:
+ case WM8994_GPIO_4:
+ case WM8994_GPIO_5:
+ case WM8994_GPIO_6:
+ case WM8994_GPIO_8:
+ case WM8994_GPIO_9:
+ case WM8994_GPIO_10:
+ case WM8994_GPIO_11:
+ case WM8994_PULL_CONTROL_1:
+ case WM8994_PULL_CONTROL_2:
+ case WM8994_INTERRUPT_STATUS_1:
+ case WM8994_INTERRUPT_STATUS_2:
+ case WM8994_INTERRUPT_RAW_STATUS_2:
+ case WM8994_INTERRUPT_STATUS_1_MASK:
+ case WM8994_INTERRUPT_STATUS_2_MASK:
+ case WM8994_INTERRUPT_CONTROL:
+ case WM8994_IRQ_DEBOUNCE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm8994_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8994_DC_SERVO_READBACK:
+ case WM8994_WRITE_SEQUENCER_CTRL_1:
+ case WM8994_WRITE_SEQUENCER_CTRL_2:
+ case WM8994_AIF1_ADC2_LEFT_VOLUME:
+ case WM8994_AIF1_ADC2_RIGHT_VOLUME:
+ case WM8994_AIF1_DAC2_LEFT_VOLUME:
+ case WM8994_AIF1_DAC2_RIGHT_VOLUME:
+ case WM8994_AIF1_ADC2_FILTERS:
+ case WM8994_AIF1_DAC2_FILTERS_1:
+ case WM8994_AIF1_DAC2_FILTERS_2:
+ case WM8958_AIF1_DAC2_NOISE_GATE:
+ case WM8994_AIF1_DRC2_1:
+ case WM8994_AIF1_DRC2_2:
+ case WM8994_AIF1_DRC2_3:
+ case WM8994_AIF1_DRC2_4:
+ case WM8994_AIF1_DRC2_5:
+ case WM8994_AIF1_DAC2_EQ_GAINS_1:
+ case WM8994_AIF1_DAC2_EQ_GAINS_2:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_C:
+ case WM8994_DAC2_MIXER_VOLUMES:
+ case WM8994_DAC2_LEFT_MIXER_ROUTING:
+ case WM8994_DAC2_RIGHT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING:
+ case WM8994_DAC2_LEFT_VOLUME:
+ case WM8994_DAC2_RIGHT_VOLUME:
+ return true;
+ default:
+ return wm1811_readable_register(dev, reg);
+ }
+}
+
+static bool wm8958_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8958_DSP2_PROGRAM:
+ case WM8958_DSP2_CONFIG:
+ case WM8958_DSP2_MAGICNUM:
+ case WM8958_DSP2_RELEASEYEAR:
+ case WM8958_DSP2_RELEASEMONTHDAY:
+ case WM8958_DSP2_RELEASETIME:
+ case WM8958_DSP2_VERMAJMIN:
+ case WM8958_DSP2_VERBUILD:
+ case WM8958_DSP2_TESTREG:
+ case WM8958_DSP2_XORREG:
+ case WM8958_DSP2_SHIFTMAXX:
+ case WM8958_DSP2_SHIFTMAXY:
+ case WM8958_DSP2_SHIFTMAXZ:
+ case WM8958_DSP2_SHIFTMAXEXTLO:
+ case WM8958_DSP2_AESSELECT:
+ case WM8958_DSP2_EXECCONTROL:
+ case WM8958_DSP2_SAMPLEBREAK:
+ case WM8958_DSP2_COUNTBREAK:
+ case WM8958_DSP2_INTSTATUS:
+ case WM8958_DSP2_EVENTSTATUS:
+ case WM8958_DSP2_INTMASK:
+ case WM8958_DSP2_CONFIGDWIDTH:
+ case WM8958_DSP2_CONFIGINSTR:
+ case WM8958_DSP2_CONFIGDMEM:
+ case WM8958_DSP2_CONFIGDELAYS:
+ case WM8958_DSP2_CONFIGNUMIO:
+ case WM8958_DSP2_CONFIGEXTDEPTH:
+ case WM8958_DSP2_CONFIGMULTIPLIER:
+ case WM8958_DSP2_CONFIGCTRLDWIDTH:
+ case WM8958_DSP2_CONFIGPIPELINE:
+ case WM8958_DSP2_SHIFTMAXEXTHI:
+ case WM8958_DSP2_SWVERSIONREG:
+ case WM8958_DSP2_CONFIGXMEM:
+ case WM8958_DSP2_CONFIGYMEM:
+ case WM8958_DSP2_CONFIGZMEM:
+ case WM8958_FW_BUILD_1:
+ case WM8958_FW_BUILD_0:
+ case WM8958_FW_ID_1:
+ case WM8958_FW_ID_0:
+ case WM8958_FW_MAJOR_1:
+ case WM8958_FW_MAJOR_0:
+ case WM8958_FW_MINOR_1:
+ case WM8958_FW_MINOR_0:
+ case WM8958_FW_PATCH_1:
+ case WM8958_FW_PATCH_0:
+ case WM8958_MBC_BAND_1_K_1:
+ case WM8958_MBC_BAND_1_K_2:
+ case WM8958_MBC_BAND_1_N1_1:
+ case WM8958_MBC_BAND_1_N1_2:
+ case WM8958_MBC_BAND_1_N2_1:
+ case WM8958_MBC_BAND_1_N2_2:
+ case WM8958_MBC_BAND_1_N3_1:
+ case WM8958_MBC_BAND_1_N3_2:
+ case WM8958_MBC_BAND_1_N4_1:
+ case WM8958_MBC_BAND_1_N4_2:
+ case WM8958_MBC_BAND_1_N5_1:
+ case WM8958_MBC_BAND_1_N5_2:
+ case WM8958_MBC_BAND_1_X1_1:
+ case WM8958_MBC_BAND_1_X1_2:
+ case WM8958_MBC_BAND_1_X2_1:
+ case WM8958_MBC_BAND_1_X2_2:
+ case WM8958_MBC_BAND_1_X3_1:
+ case WM8958_MBC_BAND_1_X3_2:
+ case WM8958_MBC_BAND_1_ATTACK_1:
+ case WM8958_MBC_BAND_1_ATTACK_2:
+ case WM8958_MBC_BAND_1_DECAY_1:
+ case WM8958_MBC_BAND_1_DECAY_2:
+ case WM8958_MBC_BAND_2_K_1:
+ case WM8958_MBC_BAND_2_K_2:
+ case WM8958_MBC_BAND_2_N1_1:
+ case WM8958_MBC_BAND_2_N1_2:
+ case WM8958_MBC_BAND_2_N2_1:
+ case WM8958_MBC_BAND_2_N2_2:
+ case WM8958_MBC_BAND_2_N3_1:
+ case WM8958_MBC_BAND_2_N3_2:
+ case WM8958_MBC_BAND_2_N4_1:
+ case WM8958_MBC_BAND_2_N4_2:
+ case WM8958_MBC_BAND_2_N5_1:
+ case WM8958_MBC_BAND_2_N5_2:
+ case WM8958_MBC_BAND_2_X1_1:
+ case WM8958_MBC_BAND_2_X1_2:
+ case WM8958_MBC_BAND_2_X2_1:
+ case WM8958_MBC_BAND_2_X2_2:
+ case WM8958_MBC_BAND_2_X3_1:
+ case WM8958_MBC_BAND_2_X3_2:
+ case WM8958_MBC_BAND_2_ATTACK_1:
+ case WM8958_MBC_BAND_2_ATTACK_2:
+ case WM8958_MBC_BAND_2_DECAY_1:
+ case WM8958_MBC_BAND_2_DECAY_2:
+ case WM8958_MBC_B2_PG2_1:
+ case WM8958_MBC_B2_PG2_2:
+ case WM8958_MBC_B1_PG2_1:
+ case WM8958_MBC_B1_PG2_2:
+ case WM8958_MBC_CROSSOVER_1:
+ case WM8958_MBC_CROSSOVER_2:
+ case WM8958_MBC_HPF_1:
+ case WM8958_MBC_HPF_2:
+ case WM8958_MBC_LPF_1:
+ case WM8958_MBC_LPF_2:
+ case WM8958_MBC_RMS_LIMIT_1:
+ case WM8958_MBC_RMS_LIMIT_2:
+ return true;
+ default:
+ return wm8994_readable_register(dev, reg);
+ }
+}
+
+static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8994_SOFTWARE_RESET:
+ case WM8994_DC_SERVO_1:
+ case WM8994_DC_SERVO_READBACK:
+ case WM8994_RATE_STATUS:
+ case WM8958_MIC_DETECT_3:
+ case WM8994_DC_SERVO_4E:
+ case WM8994_CHIP_REVISION:
+ case WM8994_INTERRUPT_STATUS_1:
+ case WM8994_INTERRUPT_STATUS_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm1811_volatile_register(struct device *dev, unsigned int reg)
+{
+ struct wm8994 *wm8994 = dev_get_drvdata(dev);
+
+ switch (reg) {
+ case WM8994_GPIO_6:
+ if (wm8994->revision > 1)
+ return true;
+ else
+ return false;
+ default:
+ return wm8994_volatile_register(dev, reg);
+ }
+}
+
+static bool wm8958_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8958_DSP2_MAGICNUM:
+ case WM8958_DSP2_RELEASEYEAR:
+ case WM8958_DSP2_RELEASEMONTHDAY:
+ case WM8958_DSP2_RELEASETIME:
+ case WM8958_DSP2_VERMAJMIN:
+ case WM8958_DSP2_VERBUILD:
+ case WM8958_DSP2_EXECCONTROL:
+ case WM8958_DSP2_SWVERSIONREG:
+ case WM8958_DSP2_CONFIGXMEM:
+ case WM8958_DSP2_CONFIGYMEM:
+ case WM8958_DSP2_CONFIGZMEM:
+ case WM8958_FW_BUILD_1:
+ case WM8958_FW_BUILD_0:
+ case WM8958_FW_ID_1:
+ case WM8958_FW_ID_0:
+ case WM8958_FW_MAJOR_1:
+ case WM8958_FW_MAJOR_0:
+ case WM8958_FW_MINOR_1:
+ case WM8958_FW_MINOR_0:
+ case WM8958_FW_PATCH_1:
+ case WM8958_FW_PATCH_0:
+ return true;
+ default:
+ return wm8994_volatile_register(dev, reg);
+ }
+}
+
+struct regmap_config wm1811_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm1811_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm1811_defaults),
+
+ .max_register = WM8994_MAX_REGISTER,
+ .volatile_reg = wm1811_volatile_register,
+ .readable_reg = wm1811_readable_register,
+};
+
+struct regmap_config wm8994_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm8994_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8994_defaults),
+
+ .max_register = WM8994_MAX_REGISTER,
+ .volatile_reg = wm8994_volatile_register,
+ .readable_reg = wm8994_readable_register,
+};
+
+struct regmap_config wm8958_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm8958_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8958_defaults),
+
+ .max_register = WM8994_MAX_REGISTER,
+ .volatile_reg = wm8958_volatile_register,
+ .readable_reg = wm8958_readable_register,
+};
+
+struct regmap_config wm8994_base_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+};
diff --git a/drivers/mfd/wm8994.h b/drivers/mfd/wm8994.h
new file mode 100644
index 000000000000..6f39a84eeadf
--- /dev/null
+++ b/drivers/mfd/wm8994.h
@@ -0,0 +1,25 @@
+/*
+ * wm8994.h -- WM8994 MFD internals
+ *
+ * Copyright 2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_H__
+#define __MFD_WM8994_H__
+
+#include <linux/regmap.h>
+
+extern struct regmap_config wm1811_regmap_config;
+extern struct regmap_config wm8994_regmap_config;
+extern struct regmap_config wm8958_regmap_config;
+extern struct regmap_config wm8994_base_regmap_config;
+
+#endif
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 5664696f2d3a..6a1a092db146 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -500,6 +500,14 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config MAX8997_MUIC
+ tristate "MAX8997 MUIC Support"
+ depends on MFD_MAX8997
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX8997 PMIC.
+ The MAX8997 MUIC is a USB port accessory detector and switch.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b26495a02554..3e1d80106f04 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -48,3 +48,4 @@ obj-y += lis3lv02d/
obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
+obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 2208a9d52622..d7a9aa14e5d5 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -8,8 +8,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pwm.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/module.h>
/*
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index a39e0555df63..83adab69bfd4 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -1,7 +1,7 @@
/*
* Driver for the Analog Devices digital potentiometers (I2C bus)
*
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,7 +11,6 @@
#include "ad525x_dpot.h"
-/* ------------------------------------------------------------------------- */
/* I2C bus functions */
static int write_d8(void *client, u8 val)
{
@@ -60,18 +59,13 @@ static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
.bops = &bops,
};
- struct ad_dpot_id dpot_id = {
- .name = (char *) &id->name,
- .devid = id->driver_data,
- };
-
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev, "SMBUS Word Data not Supported\n");
return -EIO;
}
- return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+ return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 7f9a55afe05d..822749e41fea 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -1,7 +1,7 @@
/*
* Driver for the Analog Devices digital potentiometers (SPI bus)
*
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,40 +11,6 @@
#include "ad525x_dpot.h"
-static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
- {.name = "ad5160", .devid = AD5160_ID},
- {.name = "ad5161", .devid = AD5161_ID},
- {.name = "ad5162", .devid = AD5162_ID},
- {.name = "ad5165", .devid = AD5165_ID},
- {.name = "ad5200", .devid = AD5200_ID},
- {.name = "ad5201", .devid = AD5201_ID},
- {.name = "ad5203", .devid = AD5203_ID},
- {.name = "ad5204", .devid = AD5204_ID},
- {.name = "ad5206", .devid = AD5206_ID},
- {.name = "ad5207", .devid = AD5207_ID},
- {.name = "ad5231", .devid = AD5231_ID},
- {.name = "ad5232", .devid = AD5232_ID},
- {.name = "ad5233", .devid = AD5233_ID},
- {.name = "ad5235", .devid = AD5235_ID},
- {.name = "ad5260", .devid = AD5260_ID},
- {.name = "ad5262", .devid = AD5262_ID},
- {.name = "ad5263", .devid = AD5263_ID},
- {.name = "ad5290", .devid = AD5290_ID},
- {.name = "ad5291", .devid = AD5291_ID},
- {.name = "ad5292", .devid = AD5292_ID},
- {.name = "ad5293", .devid = AD5293_ID},
- {.name = "ad7376", .devid = AD7376_ID},
- {.name = "ad8400", .devid = AD8400_ID},
- {.name = "ad8402", .devid = AD8402_ID},
- {.name = "ad8403", .devid = AD8403_ID},
- {.name = "adn2850", .devid = ADN2850_ID},
- {.name = "ad5270", .devid = AD5270_ID},
- {.name = "ad5271", .devid = AD5271_ID},
- {}
-};
-
-/* ------------------------------------------------------------------------- */
-
/* SPI bus functions */
static int write8(void *client, u8 val)
{
@@ -109,36 +75,16 @@ static const struct ad_dpot_bus_ops bops = {
.write_r8d8 = write16,
.write_r8d16 = write24,
};
-
-static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
- char *name)
-{
- while (id->name && id->name[0]) {
- if (strcmp(name, id->name) == 0)
- return id;
- id++;
- }
- return NULL;
-}
-
static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
{
- char *name = spi->dev.platform_data;
- const struct ad_dpot_id *dpot_id;
-
struct ad_dpot_bus_data bdata = {
.client = spi,
.bops = &bops,
};
- dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
-
- if (dpot_id == NULL) {
- dev_err(&spi->dev, "%s not in supported device list", name);
- return -ENODEV;
- }
-
- return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+ return ad_dpot_probe(&spi->dev, &bdata,
+ spi_get_device_id(spi)->driver_data,
+ spi_get_device_id(spi)->name);
}
static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
@@ -146,14 +92,47 @@ static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
return ad_dpot_remove(&spi->dev);
}
+static const struct spi_device_id ad_dpot_spi_id[] = {
+ {"ad5160", AD5160_ID},
+ {"ad5161", AD5161_ID},
+ {"ad5162", AD5162_ID},
+ {"ad5165", AD5165_ID},
+ {"ad5200", AD5200_ID},
+ {"ad5201", AD5201_ID},
+ {"ad5203", AD5203_ID},
+ {"ad5204", AD5204_ID},
+ {"ad5206", AD5206_ID},
+ {"ad5207", AD5207_ID},
+ {"ad5231", AD5231_ID},
+ {"ad5232", AD5232_ID},
+ {"ad5233", AD5233_ID},
+ {"ad5235", AD5235_ID},
+ {"ad5260", AD5260_ID},
+ {"ad5262", AD5262_ID},
+ {"ad5263", AD5263_ID},
+ {"ad5290", AD5290_ID},
+ {"ad5291", AD5291_ID},
+ {"ad5292", AD5292_ID},
+ {"ad5293", AD5293_ID},
+ {"ad7376", AD7376_ID},
+ {"ad8400", AD8400_ID},
+ {"ad8402", AD8402_ID},
+ {"ad8403", AD8403_ID},
+ {"adn2850", ADN2850_ID},
+ {"ad5270", AD5270_ID},
+ {"ad5271", AD5271_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
+
static struct spi_driver ad_dpot_spi_driver = {
.driver = {
.name = "ad_dpot",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad_dpot_spi_probe,
.remove = __devexit_p(ad_dpot_spi_remove),
+ .id_table = ad_dpot_spi_id,
};
static int __init ad_dpot_spi_init(void)
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 7cb911028d09..1d1d42615915 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -64,7 +64,7 @@
* Author: Chris Verges <chrisv@cyberswitching.com>
*
* derived from ad5252.c
- * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Licensed under the GPL-2 or later.
*/
@@ -76,8 +76,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "0.2"
-
#include "ad525x_dpot.h"
/*
@@ -687,8 +685,9 @@ inline void ad_dpot_remove_files(struct device *dev,
}
}
-__devinit int ad_dpot_probe(struct device *dev,
- struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
+int __devinit ad_dpot_probe(struct device *dev,
+ struct ad_dpot_bus_data *bdata, unsigned long devid,
+ const char *name)
{
struct dpot_data *data;
@@ -704,13 +703,13 @@ __devinit int ad_dpot_probe(struct device *dev,
mutex_init(&data->update_lock);
data->bdata = *bdata;
- data->devid = id->devid;
+ data->devid = devid;
- data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+ data->max_pos = 1 << DPOT_MAX_POS(devid);
data->rdac_mask = data->max_pos - 1;
- data->feat = DPOT_FEAT(data->devid);
- data->uid = DPOT_UID(data->devid);
- data->wipers = DPOT_WIPERS(data->devid);
+ data->feat = DPOT_FEAT(devid);
+ data->uid = DPOT_UID(devid);
+ data->wipers = DPOT_WIPERS(devid);
for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
if (data->wipers & (1 << i)) {
@@ -731,7 +730,7 @@ __devinit int ad_dpot_probe(struct device *dev,
}
dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
- id->name, data->max_pos);
+ name, data->max_pos);
return 0;
@@ -745,7 +744,7 @@ exit_free:
dev_set_drvdata(dev, NULL);
exit:
dev_err(dev, "failed to create client for %s ID 0x%lX\n",
- id->name, id->devid);
+ name, devid);
return err;
}
EXPORT_SYMBOL(ad_dpot_probe);
@@ -770,4 +769,3 @@ MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
"Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 82b2cb77ae19..6bd1eba23bc0 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -208,12 +208,8 @@ struct ad_dpot_bus_data {
const struct ad_dpot_bus_ops *bops;
};
-struct ad_dpot_id {
- char *name;
- unsigned long devid;
-};
-
-int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
+ unsigned long devid, const char *name);
int ad_dpot_remove(struct device *dev);
#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 5f898cb706a6..b29a2be24591 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -216,7 +216,7 @@ static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
*temperature = (x1+x2+8) >> 4;
exit:
- return status;;
+ return status;
}
/*
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index eb5cd28bc6d8..a2d25e4857e3 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
* transaction, and then put it under external control
*/
memset(&config, 0, sizeof(config));
- config.direction = DMA_TO_DEVICE;
+ config.direction = DMA_MEM_TO_DEV;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 7b33de95c4bf..0ff4b02177be 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
eeprom->reg_data_out = 0;
eeprom->reg_data_clock = 0;
eeprom->reg_chip_select = 1;
+ eeprom->drive_data = 1;
eeprom->register_write(eeprom);
/*
@@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 1;
/*
* Start writing all bits.
@@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 0;
/*
* Start reading all bits.
@@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ /* create command to enable/disable */
+
+ command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+ command <<= (eeprom->width - 2);
+
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+ int timeout = 100;
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+ command |= addr;
+
+ /* send write command */
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /* send data */
+ eeprom_93cx6_write_bits(eeprom, data, 16);
+
+ /* get ready to check for busy */
+ eeprom->drive_data = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->register_write(eeprom);
+
+ /* wait at-least 250ns to get DO to be the busy signal */
+ usleep_range(1000, 2000);
+
+ /* wait for DO to go high to signify finish */
+
+ while (true) {
+ eeprom->register_read(eeprom);
+
+ if (eeprom->reg_data_out)
+ break;
+
+ usleep_range(1000, 2000);
+
+ if (--timeout <= 0) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ break;
+ }
+ }
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 5c766b4fb238..7d56f45dee19 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 3dd2dfb8da17..d7b2ca358b23 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 6cbba1afef35..fc9fc9d4e087 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 76bfda1ffaa9..8e540f4e9d52 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 1bc4306572a4..90746378f9b7 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index bf2c738d2b72..2e9566dab2b1 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 4d8a4e248b34..9b083448814d 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 89947723a27d..35361753b487 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index 4b2398e27fd5..5319ea261c05 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index 766766523a60..e97848f51b3f 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index a234d965243b..1ccedb71e728 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
* This driver is based on code originally written by Pete Reynolds
* and others.
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 2de487ac788c..232034f5da48 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index 00dbf1d4373a..a7729ef76acb 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
* Originally written by Pete Reynolds
*/
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 93baa350d698..1dcb9ae1905a 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 307aada5fffe..3d6cce663bea 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -158,7 +158,7 @@ static int als_set_default_config(struct i2c_client *client)
dev_err(&client->dev, "default write failed.");
return retval;
}
- return 0;;
+ return 0;
}
static int isl29020_probe(struct i2c_client *client,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index b1f4563be9ae..701eb600b127 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -376,20 +376,20 @@ static int blocks;
module_param(blocks, int, 0604);
MODULE_PARM_DESC(blocks, "max_blocks_to_send");
-static int dump;
+static bool dump;
module_param(dump, bool, 0604);
MODULE_PARM_DESC(dump, "dump_hex_content");
-static int jump = 1;
+static bool jump = 1;
module_param(jump, bool, 0604);
-static int direct = 1;
+static bool direct = 1;
module_param(direct, bool, 0604);
-static int checksum = 1;
+static bool checksum = 1;
module_param(checksum, bool, 0604);
-static int fw_download = 1;
+static bool fw_download = 1;
module_param(fw_download, bool, 0604);
static int block_size = IWMC_SDIO_BLK_SIZE;
@@ -398,7 +398,7 @@ module_param(block_size, int, 0404);
static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
module_param(download_trans_blks, int, 0604);
-static int rubbish_barker;
+static bool rubbish_barker;
module_param(rubbish_barker, bool, 0604);
#ifdef CONFIG_IWMC3200TOP_DEBUG
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 29d12a70eb1b..a981e2a42f92 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -111,6 +111,8 @@ static struct kernel_param_ops param_ops_axis = {
.get = param_get_int,
};
+#define param_check_axis(name, p) param_check_int(name, p)
+
module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644);
MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions");
diff --git a/drivers/misc/max8997-muic.c b/drivers/misc/max8997-muic.c
new file mode 100644
index 000000000000..d74ef41aabd5
--- /dev/null
+++ b/drivers/misc/max8997-muic.c
@@ -0,0 +1,505 @@
+/*
+ * max8997-muic.c - MAX8997 muic driver for the Maxim 8997
+ *
+ * Copyright (C) 2011 Samsung Electrnoics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+/* MAX8997-MUIC STATUS1 register */
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCLOW_SHIFT 5
+#define STATUS1_ADCERR_SHIFT 6
+#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
+
+/* MAX8997-MUIC STATUS2 register */
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DBCHG_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
+#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
+
+/* MAX8997-MUIC STATUS3 register */
+#define STATUS3_OVP_SHIFT 2
+#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX8997-MUIC CONTROL1 register */
+#define COMN1SW_SHIFT 0
+#define COMP2SW_SHIFT 3
+#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
+#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
+
+#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
+#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
+#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
+#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
+
+#define MAX8997_ADC_GROUND 0x00
+#define MAX8997_ADC_MHL 0x01
+#define MAX8997_ADC_JIG_USB_1 0x18
+#define MAX8997_ADC_JIG_USB_2 0x19
+#define MAX8997_ADC_DESKDOCK 0x1a
+#define MAX8997_ADC_JIG_UART 0x1c
+#define MAX8997_ADC_CARDOCK 0x1d
+#define MAX8997_ADC_OPEN 0x1f
+
+struct max8997_muic_irq {
+ unsigned int irq;
+ const char *name;
+};
+
+static struct max8997_muic_irq muic_irqs[] = {
+ { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
+ { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
+ { MAX8997_MUICIRQ_ADC, "muic-ADC" },
+ { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
+ { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
+ { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
+ { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
+ { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
+ { MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+};
+
+struct max8997_muic_info {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct i2c_client *muic;
+ struct max8997_muic_platform_data *muic_pdata;
+
+ int irq;
+ struct work_struct irq_work;
+
+ enum max8997_muic_charger_type pre_charger_type;
+ int pre_adc;
+
+ struct mutex mutex;
+};
+
+static int max8997_muic_handle_usb(struct max8997_muic_info *info,
+ enum max8997_muic_usb_type usb_type, bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ if (usb_type == MAX8997_USB_HOST) {
+ /* switch to USB */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+ }
+
+ if (mdata->usb_callback)
+ mdata->usb_callback(usb_type, attached);
+out:
+ return ret;
+}
+
+static void max8997_muic_handle_mhl(struct max8997_muic_info *info,
+ bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+
+ if (mdata->mhl_callback)
+ mdata->mhl_callback(attached);
+}
+
+static int max8997_muic_handle_dock(struct max8997_muic_info *info,
+ int adc, bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ /* switch to AUDIO */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+
+ switch (adc) {
+ case MAX8997_ADC_DESKDOCK:
+ if (mdata->deskdock_callback)
+ mdata->deskdock_callback(attached);
+ break;
+ case MAX8997_ADC_CARDOCK:
+ if (mdata->cardock_callback)
+ mdata->cardock_callback(attached);
+ break;
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
+ bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ /* switch to UART */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+
+ if (mdata->uart_callback)
+ mdata->uart_callback(attached);
+out:
+ return ret;
+}
+
+static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
+{
+ int ret = 0;
+
+ switch (info->pre_adc) {
+ case MAX8997_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
+ break;
+ case MAX8997_ADC_MHL:
+ max8997_muic_handle_mhl(info, false);
+ break;
+ case MAX8997_ADC_JIG_USB_1:
+ case MAX8997_ADC_JIG_USB_2:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
+ break;
+ case MAX8997_ADC_DESKDOCK:
+ case MAX8997_ADC_CARDOCK:
+ ret = max8997_muic_handle_dock(info, info->pre_adc, false);
+ break;
+ case MAX8997_ADC_JIG_UART:
+ ret = max8997_muic_handle_jig_uart(info, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
+{
+ int ret = 0;
+
+ switch (adc) {
+ case MAX8997_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
+ break;
+ case MAX8997_ADC_MHL:
+ max8997_muic_handle_mhl(info, true);
+ break;
+ case MAX8997_ADC_JIG_USB_1:
+ case MAX8997_ADC_JIG_USB_2:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
+ break;
+ case MAX8997_ADC_DESKDOCK:
+ case MAX8997_ADC_CARDOCK:
+ ret = max8997_muic_handle_dock(info, adc, true);
+ break;
+ case MAX8997_ADC_JIG_UART:
+ ret = max8997_muic_handle_jig_uart(info, true);
+ break;
+ case MAX8997_ADC_OPEN:
+ ret = max8997_muic_handle_adc_detach(info);
+ break;
+ default:
+ break;
+ }
+
+ info->pre_adc = adc;
+
+ return ret;
+}
+
+static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
+ enum max8997_muic_charger_type charger_type)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ u8 adc;
+ int ret;
+
+ ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ goto out;
+ }
+
+ switch (charger_type) {
+ case MAX8997_CHARGER_TYPE_NONE:
+ if (mdata->charger_callback)
+ mdata->charger_callback(false, charger_type);
+ if (info->pre_charger_type == MAX8997_CHARGER_TYPE_USB) {
+ max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, false);
+ }
+ break;
+ case MAX8997_CHARGER_TYPE_USB:
+ if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+ max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, true);
+ }
+ if (mdata->charger_callback)
+ mdata->charger_callback(true, charger_type);
+ break;
+ case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
+ case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX8997_CHARGER_TYPE_500MA:
+ case MAX8997_CHARGER_TYPE_1A:
+ if (mdata->charger_callback)
+ mdata->charger_callback(true, charger_type);
+ break;
+ default:
+ break;
+ }
+
+ info->pre_charger_type = charger_type;
+out:
+ return ret;
+}
+
+static void max8997_muic_irq_work(struct work_struct *work)
+{
+ struct max8997_muic_info *info = container_of(work,
+ struct max8997_muic_info, irq_work);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(info->iodev->dev);
+ u8 status[3];
+ u8 adc, chg_type;
+
+ int irq_type = info->irq - pdata->irq_base;
+ int ret;
+
+ mutex_lock(&info->mutex);
+
+ ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
+ 3, status);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
+ status[0], status[1]);
+
+ switch (irq_type) {
+ case MAX8997_MUICIRQ_ADC:
+ adc = status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ max8997_muic_handle_adc(info, adc);
+ break;
+ case MAX8997_MUICIRQ_ChgTyp:
+ chg_type = status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ max8997_muic_handle_charger_type(info, chg_type);
+ break;
+ default:
+ dev_info(info->dev, "misc interrupt: %s occurred\n",
+ muic_irqs[irq_type].name);
+ break;
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return;
+}
+
+static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
+{
+ struct max8997_muic_info *info = data;
+
+ dev_dbg(info->dev, "irq:%d\n", irq);
+ info->irq = irq;
+
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+{
+ int ret;
+ u8 status[2], adc, chg_type;
+
+ ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
+ 2, status);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ return;
+ }
+
+ dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
+ status[0], status[1]);
+
+ adc = status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ chg_type = status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ max8997_muic_handle_adc(info, adc);
+ max8997_muic_handle_charger_type(info, chg_type);
+}
+
+static void max8997_initialize_device(struct max8997_muic_info *info)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int i;
+
+ for (i = 0; i < mdata->num_init_data; i++) {
+ max8997_write_reg(info->muic, mdata->init_data[i].addr,
+ mdata->init_data[i].data);
+ }
+}
+
+static int __devinit max8997_muic_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8997_muic_info *info;
+ int ret, i;
+
+ info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ if (!pdata->muic_pdata) {
+ dev_err(&pdev->dev, "failed to get platform_data\n");
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+ info->muic_pdata = pdata->muic_pdata;
+
+ info->dev = &pdev->dev;
+ info->iodev = iodev;
+ info->muic = iodev->muic;
+
+ platform_set_drvdata(pdev, info);
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, max8997_muic_irq_work);
+
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
+ struct max8997_muic_irq *muic_irq = &muic_irqs[i];
+
+ ret = request_threaded_irq(pdata->irq_base + muic_irq->irq,
+ NULL, max8997_muic_irq_handler,
+ 0, muic_irq->name,
+ info);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed: irq request (IRQ: %d,"
+ " error :%d)\n",
+ muic_irq->irq, ret);
+
+ for (i = i - 1; i >= 0; i--)
+ free_irq(muic_irq->irq, info);
+
+ goto err_irq;
+ }
+ }
+
+ /* Initialize registers according to platform data */
+ max8997_initialize_device(info);
+
+ /* Initial device detection */
+ max8997_muic_detect_dev(info);
+
+ return ret;
+
+err_irq:
+err_pdata:
+ kfree(info);
+err_kfree:
+ return ret;
+}
+
+static int __devexit max8997_muic_remove(struct platform_device *pdev)
+{
+ struct max8997_muic_info *info = platform_get_drvdata(pdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(info->iodev->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
+ free_irq(pdata->irq_base + muic_irqs[i].irq, info);
+ cancel_work_sync(&info->irq_work);
+
+ kfree(info);
+
+ return 0;
+}
+
+static struct platform_driver max8997_muic_driver = {
+ .driver = {
+ .name = "max8997-muic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_muic_probe,
+ .remove = __devexit_p(max8997_muic_remove),
+};
+
+static int __init max8997_muic_init(void)
+{
+ return platform_driver_register(&max8997_muic_driver);
+}
+module_init(max8997_muic_init);
+
+static void __exit max8997_muic_exit(void)
+{
+ platform_driver_unregister(&max8997_muic_driver);
+}
+module_exit(max8997_muic_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8997 MUIC driver");
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 7768b87d995b..950dbe9ecb36 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -324,7 +324,7 @@ static const struct file_operations gru_fops = {
static struct proc_entry {
char *name;
- int mode;
+ umode_t mode;
const struct file_operations *fops;
struct proc_dir_entry *entry;
} proc_files[] = {
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 42f067347bc7..3fac67a5204c 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -576,7 +576,7 @@ xpnet_init(void)
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
- xpnet_device->features = NETIF_F_NO_CSUM;
+ xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index ba168a7d54d4..2b62232c2c6a 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -137,6 +137,8 @@ void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
* st_reg_complete -
* to call registration complete callbacks
* of all protocol stack drivers
+ * This function is being called with spin lock held, protocol drivers are
+ * only expected to complete their waits and do nothing more than that.
*/
void st_reg_complete(struct st_data_s *st_gdata, char err)
{
@@ -538,11 +540,12 @@ long st_register(struct st_proto_s *new_proto)
set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_kim_recv;
+ /* enable the ST LL - to set default chip state */
+ st_ll_enable(st_gdata);
+
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
- /* enable the ST LL - to set default chip state */
- st_ll_enable(st_gdata);
/* this may take a while to complete
* since it involves BT fw download
*/
@@ -553,10 +556,13 @@ long st_register(struct st_proto_s *new_proto)
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
st_reg_complete(st_gdata, err);
+ clear_bit(ST_REG_PENDING, &st_gdata->st_state);
}
return -EINVAL;
}
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
@@ -576,10 +582,10 @@ long st_register(struct st_proto_s *new_proto)
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
pr_err(" proto %d already registered ",
new_proto->chnl_id);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EALREADY;
}
- spin_lock_irqsave(&st_gdata->lock, flags);
add_channel_to_table(st_gdata, new_proto);
st_gdata->protos_registered++;
new_proto->write = st_write;
@@ -619,7 +625,7 @@ long st_unregister(struct st_proto_s *proto)
spin_lock_irqsave(&st_gdata->lock, flags);
- if (st_gdata->list[proto->chnl_id] == NULL) {
+ if (st_gdata->is_registered[proto->chnl_id] == false) {
pr_err(" chnl_id %d not registered", proto->chnl_id);
spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EPROTONOSUPPORT;
@@ -629,6 +635,10 @@ long st_unregister(struct st_proto_s *proto)
remove_channel_from_table(st_gdata, proto);
spin_unlock_irqrestore(&st_gdata->lock, flags);
+ /* paranoid check */
+ if (st_gdata->protos_registered < ST_EMPTY)
+ st_gdata->protos_registered = ST_EMPTY;
+
if ((st_gdata->protos_registered == ST_EMPTY) &&
(!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_info(" all chnl_ids unregistered ");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 43ef8d162f2d..a7a861ceee2d 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -469,37 +469,21 @@ long st_kim_start(void *kim_data)
/* wait for ldisc to be installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
- if (!err) { /* timeout */
- pr_err("line disc installation timed out ");
- kim_gdata->ldisc_install = 0;
- pr_info("ldisc_install = 0");
- sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
- NULL, "install");
- /* the following wait is never going to be completed,
- * since the ldisc was never installed, hence serving
- * as a mdelay of LDISC_TIME msecs */
- err = wait_for_completion_timeout
- (&kim_gdata->ldisc_installed,
- msecs_to_jiffies(LDISC_TIME));
- err = -ETIMEDOUT;
+ if (!err) {
+ /* ldisc installation timeout,
+ * flush uart, power cycle BT_EN */
+ pr_err("ldisc installation timeout");
+ err = st_kim_stop(kim_gdata);
continue;
} else {
/* ldisc installed now */
- pr_info(" line discipline installed ");
+ pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
+ /* ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN */
pr_err("download firmware failed");
- kim_gdata->ldisc_install = 0;
- pr_info("ldisc_install = 0");
- sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
- NULL, "install");
- /* this wait might be completed, though in the
- * tty_close() since the ldisc is already
- * installed */
- err = wait_for_completion_timeout
- (&kim_gdata->ldisc_installed,
- msecs_to_jiffies(LDISC_TIME));
- err = -EINVAL;
+ err = st_kim_stop(kim_gdata);
continue;
} else { /* on success don't retry */
break;
@@ -510,8 +494,14 @@ long st_kim_start(void *kim_data)
}
/**
- * st_kim_stop - called from ST Core, on the last un-registration
- * toggle low the chip enable gpio
+ * st_kim_stop - stop communication with chip.
+ * This can be called from ST Core/KIM, on the-
+ * (a) last un-register when chip need not be powered there-after,
+ * (b) upon failure to either install ldisc or download firmware.
+ * The function is responsible to (a) notify UIM about un-installation,
+ * (b) flush UART if the ldisc was installed.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
@@ -519,12 +509,16 @@ long st_kim_stop(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
struct ti_st_plat_data *pdata =
kim_gdata->kim_pdev->dev.platform_data;
+ struct tty_struct *tty = kim_gdata->core_data->tty;
INIT_COMPLETION(kim_gdata->ldisc_installed);
- /* Flush any pending characters in the driver and discipline. */
- tty_ldisc_flush(kim_gdata->core_data->tty);
- tty_driver_flush_buffer(kim_gdata->core_data->tty);
+ if (tty) { /* can be called before ldisc is installed */
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+ tty->ops->flush_buffer(tty);
+ }
/* send uninstall notification to UIM */
pr_info("ldisc_install = 0");
@@ -579,6 +573,28 @@ static ssize_t show_install(struct device *dev,
return sprintf(buf, "%d\n", kim_data->ldisc_install);
}
+#ifdef DEBUG
+static ssize_t store_dev_name(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing dev name >%s<", buf);
+ strncpy(kim_data->dev_name, buf, count);
+ pr_debug("stored dev name >%s<", kim_data->dev_name);
+ return count;
+}
+
+static ssize_t store_baud_rate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing baud rate >%s<", buf);
+ sscanf(buf, "%ld", &kim_data->baud_rate);
+ pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
+ return count;
+}
+#endif /* if DEBUG */
+
static ssize_t show_dev_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -605,10 +621,18 @@ static struct kobj_attribute ldisc_install =
__ATTR(install, 0444, (void *)show_install, NULL);
static struct kobj_attribute uart_dev_name =
+#ifdef DEBUG /* TODO: move this to debug-fs if possible */
+__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
+#else
__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
+#endif
static struct kobj_attribute uart_baud_rate =
+#ifdef DEBUG /* TODO: move to debugfs */
+__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
+#else
__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
+#endif
static struct kobj_attribute uart_flow_cntrl =
__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 12eef393e216..400756ec7c49 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -6,5 +6,4 @@ subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
obj-$(CONFIG_MMC) += card/
-obj-$(CONFIG_MMC) += host/
-
+obj-$(subst m,y,$(CONFIG_MMC)) += host/
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f95302..0cad48a284a8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -107,6 +107,8 @@ struct mmc_blk_data {
*/
unsigned int part_curr;
struct device_attribute force_ro;
+ struct device_attribute power_ro_lock;
+ int area_type;
};
static DEFINE_MUTEX(open_lock);
@@ -119,6 +121,7 @@ enum mmc_blk_status {
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
};
module_param(perdev_minors, int, 0444);
@@ -165,6 +168,70 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+static ssize_t power_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ int locked = 0;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ locked = 2;
+ else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ locked = 1;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+ return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_card *card;
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set))
+ return -EINVAL;
+
+ if (set != 1)
+ return count;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ mmc_claim_host(card->host);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+ card->ext_csd.boot_ro_lock |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
+ else
+ card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+
+ mmc_release_host(card->host);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition ro until next power on\n",
+ md->disk->disk_name);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+
+ mmc_blk_put(md);
+ return count;
+}
+
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -266,6 +333,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
goto idata_err;
}
+ if (!idata->buf_bytes)
+ return idata;
+
idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
err = -ENOMEM;
@@ -312,25 +382,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
if (IS_ERR(idata))
return PTR_ERR(idata);
- cmd.opcode = idata->ic.opcode;
- cmd.arg = idata->ic.arg;
- cmd.flags = idata->ic.flags;
-
- data.sg = &sg;
- data.sg_len = 1;
- data.blksz = idata->ic.blksz;
- data.blocks = idata->ic.blocks;
-
- sg_init_one(data.sg, idata->buf, idata->buf_bytes);
-
- if (idata->ic.write_flag)
- data.flags = MMC_DATA_WRITE;
- else
- data.flags = MMC_DATA_READ;
-
- mrq.cmd = &cmd;
- mrq.data = &data;
-
md = mmc_blk_get(bdev->bd_disk);
if (!md) {
err = -EINVAL;
@@ -343,6 +394,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_done;
}
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ if (idata->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the
+ * host driver to compute timeout. When all host
+ * drivers support cmd.cmd_timeout for R1B, this
+ * can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
mmc_claim_host(card->host);
if (idata->ic.is_acmd) {
@@ -351,24 +444,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}
- /* data.flags must already be set before doing this. */
- mmc_set_data_timeout(&data, card);
- /* Allow overriding the timeout_ns for empirical tuning. */
- if (idata->ic.data_timeout_ns)
- data.timeout_ns = idata->ic.data_timeout_ns;
-
- if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
- /*
- * Pretend this is a data transfer and rely on the host driver
- * to compute timeout. When all host drivers support
- * cmd.cmd_timeout for R1B, this can be changed to:
- *
- * mrq.data = NULL;
- * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
- */
- data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
- }
-
mmc_wait_for_req(card->host, &mrq);
if (cmd.error) {
@@ -565,6 +640,7 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
return err;
}
+#define ERR_NOMEDIUM 3
#define ERR_RETRY 2
#define ERR_ABORT 1
#define ERR_CONTINUE 0
@@ -632,6 +708,9 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
u32 status, stop_status = 0;
int err, retry;
+ if (mmc_card_removed(card))
+ return ERR_NOMEDIUM;
+
/*
* Try to get card status which indicates both the card state
* and why there was no response. If the first attempt fails,
@@ -648,8 +727,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
}
/* We couldn't get a response from the card. Give up. */
- if (err)
+ if (err) {
+ /* Check if the card is removed */
+ if (mmc_detect_card_removed(card->host))
+ return ERR_NOMEDIUM;
return ERR_ABORT;
+ }
/* Flag ECC errors */
if ((status & R1_CARD_ECC_FAILED) ||
@@ -922,6 +1005,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_RETRY;
case ERR_ABORT:
return MMC_BLK_ABORT;
+ case ERR_NOMEDIUM:
+ return MMC_BLK_NOMEDIUM;
case ERR_CONTINUE:
break;
}
@@ -1255,6 +1340,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
if (!ret)
goto start_new_req;
break;
+ case MMC_BLK_NOMEDIUM:
+ goto cmd_abort;
}
if (ret) {
@@ -1271,6 +1358,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
cmd_abort:
spin_lock_irq(&md->lock);
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);
@@ -1339,7 +1428,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
bool default_ro,
- const char *subname)
+ const char *subname,
+ int area_type)
{
struct mmc_blk_data *md;
int devidx, ret;
@@ -1364,11 +1454,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
if (!subname) {
md->name_idx = find_first_zero_bit(name_use, max_devices);
__set_bit(md->name_idx, name_use);
- }
- else
+ } else
md->name_idx = ((struct mmc_blk_data *)
dev_to_disk(parent)->private_data)->name_idx;
+ md->area_type = area_type;
+
/*
* Set the read-only status based on the supported commands
* and the write protect switch.
@@ -1462,7 +1553,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
- md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
+ md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+ MMC_BLK_DATA_AREA_MAIN);
return md;
}
@@ -1471,13 +1563,14 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
unsigned int part_type,
sector_t size,
bool default_ro,
- const char *subname)
+ const char *subname,
+ int area_type)
{
char cap_str[10];
struct mmc_blk_data *part_md;
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
- subname);
+ subname, area_type);
if (IS_ERR(part_md))
return PTR_ERR(part_md);
part_md->part_type = part_type;
@@ -1510,7 +1603,8 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
card->part[idx].part_cfg,
card->part[idx].size >> 9,
card->part[idx].force_ro,
- card->part[idx].name);
+ card->part[idx].name,
+ card->part[idx].area_type);
if (ret)
return ret;
}
@@ -1539,9 +1633,16 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
+ struct mmc_card *card;
+
if (md) {
+ card = md->queue.card;
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable)
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
/* Stop new requests from getting into the queue */
del_gendisk(md->disk);
@@ -1570,6 +1671,7 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
static int mmc_add_disk(struct mmc_blk_data *md)
{
int ret;
+ struct mmc_card *card = md->queue.card;
add_disk(md->disk);
md->force_ro.show = force_ro_show;
@@ -1579,18 +1681,53 @@ static int mmc_add_disk(struct mmc_blk_data *md)
md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
if (ret)
- del_gendisk(md->disk);
+ goto force_ro_fail;
+
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable) {
+ mode_t mode;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+ mode = S_IRUGO;
+ else
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock.show = power_ro_lock_show;
+ md->power_ro_lock.store = power_ro_lock_store;
+ md->power_ro_lock.attr.mode = mode;
+ md->power_ro_lock.attr.name =
+ "ro_lock_until_next_power_on";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+ if (ret)
+ goto power_ro_lock_fail;
+ }
+ return ret;
+
+power_ro_lock_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+ del_gendisk(md->disk);
return ret;
}
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+
static const struct mmc_fixup blk_fixups[] =
{
- MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
/*
* Some MMC cards experience performance degradation with CMD23
@@ -1600,12 +1737,20 @@ static const struct mmc_fixup blk_fixups[] =
*
* N.B. This doesn't affect SD cards.
*/
- MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
END_FIXUP
};
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b038c4a9468b..759714ed6bee 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1581,6 +1581,7 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
+ t->max_seg_sz -= t->max_seg_sz % 512;
t->max_tfr = t->max_sz;
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
@@ -2949,7 +2950,7 @@ static void mmc_test_free_dbgfs_file(struct mmc_card *card)
}
static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
- const char *name, mode_t mode, const struct file_operations *fops)
+ const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index dcad59cbfef1..2517547b4366 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -29,6 +29,8 @@
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
+ struct mmc_queue *mq = q->queuedata;
+
/*
* We only like normal block requests and discards.
*/
@@ -37,6 +39,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
+ if (mq && mmc_card_removed(mq->card))
+ return BLKPREP_KILL;
+
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 639501970b41..dca4428380f1 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o
+ quirks.o cd-gpio.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 6be49249895a..5d011a39dfff 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,10 +303,11 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_ddr_mode(card) ? "DDR " : "",
type);
} else {
- printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
- mmc_sd_card_uhs(card) ? "ultra high speed " :
+ mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_highspeed(card) ? "high speed " : ""),
+ (mmc_card_hs200(card) ? "HS200 " : ""),
mmc_card_ddr_mode(card) ? "DDR " : "",
type, card->rca);
}
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
new file mode 100644
index 000000000000..082202ae4a03
--- /dev/null
+++ b/drivers/mmc/core/cd-gpio.c
@@ -0,0 +1,74 @@
+/*
+ * Generic GPIO card-detect helper
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+struct mmc_cd_gpio {
+ unsigned int gpio;
+ char label[0];
+};
+
+static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
+{
+ /* Schedule a card detection after a debounce timeout */
+ mmc_detect_change(dev_id, msecs_to_jiffies(100));
+ return IRQ_HANDLED;
+}
+
+int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio,
+ unsigned int irq, unsigned long flags)
+{
+ size_t len = strlen(dev_name(host->parent)) + 4;
+ struct mmc_cd_gpio *cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
+ int ret;
+
+ if (!cd)
+ return -ENOMEM;
+
+ snprintf(cd->label, len, "%s cd", dev_name(host->parent));
+
+ ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label);
+ if (ret < 0)
+ goto egpioreq;
+
+ ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
+ flags, cd->label, host);
+ if (ret < 0)
+ goto eirqreq;
+
+ cd->gpio = gpio;
+ host->hotplug.irq = irq;
+ host->hotplug.handler_priv = cd;
+
+ return 0;
+
+eirqreq:
+ gpio_free(gpio);
+egpioreq:
+ kfree(cd);
+ return ret;
+}
+EXPORT_SYMBOL(mmc_cd_gpio_request);
+
+void mmc_cd_gpio_free(struct mmc_host *host)
+{
+ struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
+
+ free_irq(host->hotplug.irq, host);
+ gpio_free(cd->gpio);
+ kfree(cd);
+}
+EXPORT_SYMBOL(mmc_cd_gpio_free);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e74..f545a3e6eb80 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,7 +48,7 @@ static struct workqueue_struct *workqueue;
* performance cost, and for other reasons may not always be desired.
* So we allow it it to be disabled.
*/
-int use_spi_crc = 1;
+bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
@@ -58,9 +58,9 @@ module_param(use_spi_crc, bool, 0);
* overridden if necessary.
*/
#ifdef CONFIG_MMC_UNSAFE_RESUME
-int mmc_assume_removable;
+bool mmc_assume_removable;
#else
-int mmc_assume_removable = 1;
+bool mmc_assume_removable = 1;
#endif
EXPORT_SYMBOL(mmc_assume_removable);
module_param_named(removable, mmc_assume_removable, bool, 0644);
@@ -140,7 +140,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries = 0;
}
- if (err && cmd->retries) {
+ if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
* mmc_wait_for_req_done().
@@ -247,6 +247,11 @@ static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ complete(&mrq->completion);
+ return;
+ }
mmc_start_request(host, mrq);
}
@@ -259,7 +264,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
wait_for_completion(&mrq->completion);
cmd = mrq->cmd;
- if (!cmd->error || !cmd->retries)
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card))
break;
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
@@ -529,6 +535,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_clks = 0;
}
}
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
@@ -1213,6 +1231,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1269,42 +1327,12 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
- struct mmc_card *card;
- unsigned int notify_type;
- unsigned int timeout;
- int err;
-
mmc_host_clk_hold(host);
- card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
+ mmc_poweroff_notify(host);
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -1434,7 +1462,7 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
WARN_ON(host->removed);
spin_unlock_irqrestore(&host->lock, flags);
#endif
-
+ host->detect_change = 1;
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -2027,6 +2055,43 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
return -EIO;
}
+int _mmc_detect_card_removed(struct mmc_host *host)
+{
+ int ret;
+
+ if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
+ return 0;
+
+ if (!host->card || mmc_card_removed(host->card))
+ return 1;
+
+ ret = host->bus_ops->alive(host);
+ if (ret) {
+ mmc_card_set_removed(host->card);
+ pr_debug("%s: card remove detected\n", mmc_hostname(host));
+ }
+
+ return ret;
+}
+
+int mmc_detect_card_removed(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ WARN_ON(!host->claimed);
+ /*
+ * The card will be considered unchanged unless we have been asked to
+ * detect a change or host requires polling to provide card detection.
+ */
+ if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
+ return mmc_card_removed(card);
+
+ host->detect_change = 0;
+
+ return _mmc_detect_card_removed(host);
+}
+EXPORT_SYMBOL(mmc_detect_card_removed);
+
void mmc_rescan(struct work_struct *work)
{
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
@@ -2047,6 +2112,8 @@ void mmc_rescan(struct work_struct *work)
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
+ host->detect_change = 0;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
@@ -2108,6 +2175,7 @@ void mmc_stop_host(struct mmc_host *host)
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
@@ -2179,6 +2247,9 @@ int mmc_card_awake(struct mmc_host *host)
{
int err = -ENOSYS;
+ if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
+ return 0;
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
@@ -2194,9 +2265,12 @@ int mmc_card_sleep(struct mmc_host *host)
{
int err = -ENOSYS;
+ if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
+ return 0;
+
mmc_bus_get(host);
- if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
@@ -2248,6 +2322,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
{
struct mmc_card *card = host->card;
+ unsigned int timeout;
int err = 0;
if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
@@ -2258,16 +2333,18 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
(card->ext_csd.cache_size > 0)) {
enable = !!enable;
- if (card->ext_csd.cache_ctrl ^ enable)
+ if (card->ext_csd.cache_ctrl ^ enable) {
+ timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, enable, 0);
- if (err)
- pr_err("%s: cache %s error %d\n",
- mmc_hostname(card->host),
- enable ? "on" : "off",
- err);
- else
- card->ext_csd.cache_ctrl = enable;
+ EXT_CSD_CACHE_CTRL, enable, timeout);
+ if (err)
+ pr_err("%s: cache %s error %d\n",
+ mmc_hostname(card->host),
+ enable ? "on" : "off",
+ err);
+ else
+ card->ext_csd.cache_ctrl = enable;
+ }
}
return err;
@@ -2288,7 +2365,13 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
- err = mmc_cache_ctrl(host, 0);
+ if (mmc_try_claim_host(host)) {
+ err = mmc_cache_ctrl(host, 0);
+ mmc_do_release_host(host);
+ } else {
+ err = -EBUSY;
+ }
+
if (err)
goto out;
@@ -2302,12 +2385,23 @@ int mmc_suspend_host(struct mmc_host *host)
* pre-claim the host.
*/
if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
+ }
+ mmc_do_release_host(host);
+
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
- * It will be redetected on resume.
+ * It will be redetected on resume. (Calling
+ * bus_ops->remove() with a claimed host can
+ * deadlock.)
*/
if (host->bus_ops->remove)
host->bus_ops->remove(host);
@@ -2318,7 +2412,6 @@ int mmc_suspend_host(struct mmc_host *host)
host->pm_flags = 0;
err = 0;
}
- mmc_do_release_host(host);
} else {
err = -EBUSY;
}
@@ -2401,11 +2494,11 @@ int mmc_pm_notify(struct notifier_block *notify_block,
if (!host->bus_ops || host->bus_ops->suspend)
break;
- mmc_claim_host(host);
-
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
+ mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 14664f1fb16f..3bdafbca354f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -24,6 +24,7 @@ struct mmc_bus_ops {
int (*resume)(struct mmc_host *);
int (*power_save)(struct mmc_host *);
int (*power_restore)(struct mmc_host *);
+ int (*alive)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,12 +60,14 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+int _mmc_detect_card_removed(struct mmc_host *host);
+
int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
/* Module parameters */
-extern int use_spi_crc;
+extern bool use_spi_crc;
/* Debugfs information for hosts and cards */
void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 3923880118b6..9ab5b17d488a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -57,6 +57,8 @@ static int mmc_ios_show(struct seq_file *s, void *data)
const char *str;
seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
+ if (host->actual_clock)
+ seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock);
seq_printf(s, "vdd:\t\t%u ", ios->vdd);
if ((1 << ios->vdd) & MMC_VDD_165_195)
seq_printf(s, "(1.65 - 1.95 V)\n");
@@ -133,6 +135,9 @@ static int mmc_ios_show(struct seq_file *s, void *data)
case MMC_TIMING_UHS_DDR50:
str = "sd uhs DDR50";
break;
+ case MMC_TIMING_MMC_HS200:
+ str = "mmc high-speed SDR200";
+ break;
default:
str = "invalid";
break;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8a5eb38748b..30055f2b0d44 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -54,6 +54,27 @@ static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);
#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clkgate_delay = value;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return count;
+}
/*
* Enabling clock gating will make the core call out to the host
@@ -114,7 +135,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
static void mmc_host_clk_gate_work(struct work_struct *work)
{
struct mmc_host *host = container_of(work, struct mmc_host,
- clk_gate_work);
+ clk_gate_work.work);
mmc_host_clk_gate_delayed(host);
}
@@ -131,6 +152,8 @@ void mmc_host_clk_hold(struct mmc_host *host)
{
unsigned long flags;
+ /* cancel any clock gating work scheduled by mmc_host_clk_release() */
+ cancel_delayed_work_sync(&host->clk_gate_work);
mutex_lock(&host->clk_gate_mutex);
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated) {
@@ -180,7 +203,8 @@ void mmc_host_clk_release(struct mmc_host *host)
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- queue_work(system_nrt_wq, &host->clk_gate_work);
+ queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -213,8 +237,13 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
host->clk_requests = 0;
/* Hold MCI clock for 8 cycles by default */
host->clk_delay = 8;
+ /*
+ * Default clock gating delay is 200ms.
+ * This value can be tuned by writing into sysfs entry.
+ */
+ host->clkgate_delay = 200;
host->clk_gated = false;
- INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
spin_lock_init(&host->clk_lock);
mutex_init(&host->clk_gate_mutex);
}
@@ -229,7 +258,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
* Wait for any outstanding gate and then make sure we're
* ungated before exiting.
*/
- if (cancel_work_sync(&host->clk_gate_work))
+ if (cancel_delayed_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_hold(host);
@@ -237,6 +266,17 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
WARN_ON(host->clk_requests > 1);
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+ host->clkgate_delay_attr.show = clkgate_delay_show;
+ host->clkgate_delay_attr.store = clkgate_delay_store;
+ sysfs_attr_init(&host->clkgate_delay_attr.attr);
+ host->clkgate_delay_attr.attr.name = "clkgate_delay";
+ host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+ pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+ mmc_hostname(host));
+}
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -247,6 +287,10 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
{
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
#endif
/**
@@ -302,17 +346,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
- /*
- * Enable runtime power management by default. This flag was added due
- * to runtime power management causing disruption for some users, but
- * the power on/off code has been improved since then.
- *
- * We'll enable this flag by default as an experiment, and if no
- * problems are reported, we will follow up later and remove the flag
- * altogether.
- */
- host->caps = MMC_CAP_POWER_OFF_CARD;
-
return host;
free:
@@ -346,6 +379,7 @@ int mmc_add_host(struct mmc_host *host)
#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
#endif
+ mmc_host_clk_sysfs_init(host);
mmc_start_host(host);
register_pm_notifier(&host->pm_notify);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279c..59b9ba52e66a 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -286,6 +286,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
+ case EXT_CSD_CARD_TYPE_SDR_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
+ break;
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
+ break;
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
+ break;
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
card->ext_csd.hs_max_dtr = 52000000;
@@ -348,7 +369,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
- "boot%d", idx, true);
+ "boot%d", idx, true,
+ MMC_BLK_DATA_AREA_BOOT);
}
}
}
@@ -435,7 +457,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
- "gp%d", idx, false);
+ "gp%d", idx, false,
+ MMC_BLK_DATA_AREA_GP);
}
}
card->ext_csd.sec_trim_mult =
@@ -446,6 +469,14 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
card->ext_csd.trim_timeout = 300 *
ext_csd[EXT_CSD_TRIM_MULT];
+
+ /*
+ * Note that the call to mmc_part_add above defaults to read
+ * only. If this default assumption is changed, the call must
+ * take into account the value of boot_locked below.
+ */
+ card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
+ card->ext_csd.boot_ro_lockable = true;
}
if (card->ext_csd.rev >= 5) {
@@ -690,6 +721,79 @@ static int mmc_select_powerclass(struct mmc_card *card,
}
/*
+ * Selects the desired buswidth and switch to the HS200 mode
+ * if bus width set without error
+ */
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ int idx, err = 0;
+ struct mmc_host *host;
+ static unsigned ext_csd_bits[] = {
+ EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_8,
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_8,
+ };
+
+ BUG_ON(!card);
+
+ host = card->host;
+
+ if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
+ err = mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_180, 0);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto err;
+
+ idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0;
+
+ /*
+ * Unlike SD, MMC cards dont have a configuration register to notify
+ * supported bus width. So bus test command should be run to identify
+ * the supported bus width or compare the ext csd values of current
+ * bus width and ext csd values of 1 bit mode read earlier.
+ */
+ for (; idx >= 0; idx--) {
+
+ /*
+ * Host is capable of 8bit transfer, then switch
+ * the device to work in 8bit transfer mode. If the
+ * mmc switch command returns error then switch to
+ * 4bit transfer mode. On success set the corresponding
+ * bus width on the host.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx],
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ continue;
+
+ mmc_set_bus_width(card->host, bus_widths[idx]);
+
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ err = mmc_compare_ext_csds(card, bus_widths[idx]);
+ else
+ err = mmc_bus_test(card, bus_widths[idx]);
+ if (!err)
+ break;
+ }
+
+ /* switch to HS200 mode if bus width set successfully */
+ if (!err)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 2, 0);
+err:
+ return err;
+}
+
+/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
@@ -876,26 +980,34 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* set the notification byte in the ext_csd register of device
*/
if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
- (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ (card->ext_csd.rev >= 6)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
- }
- if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+ }
/*
* Activate high speed (if supported)
*/
- if ((card->ext_csd.hs_max_dtr != 0) &&
- (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1,
- card->ext_csd.generic_cmd6_time);
+ if (card->ext_csd.hs_max_dtr != 0) {
+ err = 0;
+ if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS200)
+ err = mmc_select_hs200(card);
+ else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 1, 0);
+
if (err && err != -EBADMSG)
goto free_card;
@@ -904,8 +1016,15 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_hostname(card->host));
err = 0;
} else {
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS200) {
+ mmc_card_set_hs200(card);
+ mmc_set_timing(card->host,
+ MMC_TIMING_MMC_HS200);
+ } else {
+ mmc_card_set_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ }
}
}
@@ -930,7 +1049,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
max_dtr = (unsigned int)-1;
- if (mmc_card_highspeed(card)) {
+ if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
if (max_dtr > card->ext_csd.hs_max_dtr)
max_dtr = card->ext_csd.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
@@ -956,9 +1075,48 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Indicate HS200 SDR mode (if supported).
+ */
+ if (mmc_card_hs200(card)) {
+ u32 ext_csd_bits;
+ u32 bus_width = card->host->ios.bus_width;
+
+ /*
+ * For devices supporting HS200 mode, the bus width has
+ * to be set before executing the tuning function. If
+ * set before tuning, then device will respond with CRC
+ * errors for responses on CMD line. So for HS200 the
+ * sequence will be
+ * 1. set bus width 4bit / 8 bit (1 bit not supported)
+ * 2. switch to HS200 mode
+ * 3. set the clock to > 52Mhz <=200MHz and
+ * 4. execute tuning for HS200
+ */
+ if ((host->caps2 & MMC_CAP2_HS200) &&
+ card->host->ops->execute_tuning)
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ if (err) {
+ pr_warning("%s: tuning execution failed\n",
+ mmc_hostname(card->host));
+ goto err;
+ }
+
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+ err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
+ if (err) {
+ pr_err("%s: power class selection to bus width %d failed\n",
+ mmc_hostname(card->host), 1 << bus_width);
+ goto err;
+ }
+ }
+
+ /*
* Activate wide bus and DDR (if supported).
*/
- if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
+ if (!mmc_card_hs200(card) &&
+ (card->csd.mmca_vsn >= CSD_SPEC_VER_3) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
static unsigned ext_csd_bits[][2] = {
{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
@@ -1044,7 +1202,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
- if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ if (ddr == MMC_1_2V_DDR_MODE) {
err = mmc_set_signal_voltage(host,
MMC_SIGNAL_VOLTAGE_120, 0);
if (err)
@@ -1063,14 +1221,23 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
card->ext_csd.cache_size > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, 1, 0);
+ EXT_CSD_CACHE_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
/*
* Only if no error, cache is turned on successfully.
*/
- card->ext_csd.cache_ctrl = err ? 0 : 1;
+ if (err) {
+ pr_warning("%s: Cache is supported, "
+ "but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
}
if (!oldcard)
@@ -1101,6 +1268,14 @@ static void mmc_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_detect(struct mmc_host *host)
@@ -1115,7 +1290,7 @@ static void mmc_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_send_status(host->card, NULL);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -1220,6 +1395,7 @@ static const struct mmc_bus_ops mmc_ops = {
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_power_restore,
+ .alive = mmc_alive,
};
static const struct mmc_bus_ops mmc_ops_unsafe = {
@@ -1230,6 +1406,7 @@ static const struct mmc_bus_ops mmc_ops_unsafe = {
.suspend = mmc_suspend,
.resume = mmc_resume,
.power_restore = mmc_power_restore,
+ .alive = mmc_alive,
};
static void mmc_attach_bus_ops(struct mmc_host *host)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index f2a05ea40f2a..c63ad03c29c7 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -307,8 +307,8 @@ static int mmc_read_switch(struct mmc_card *card)
goto out;
}
- if (status[13] & UHS_SDR50_BUS_SPEED)
- card->sw_caps.hs_max_dtr = 50000000;
+ if (status[13] & SD_MODE_HIGH_SPEED)
+ card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -661,7 +661,8 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
/* SPI mode doesn't define CMD19 */
if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
- err = card->host->ops->execute_tuning(card->host);
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
out:
kfree(status);
@@ -960,7 +961,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
/* Card is an ultra-high-speed card */
- mmc_sd_card_set_uhs(card);
+ mmc_card_set_uhs(card);
/*
* Since initialization is now complete, enable preset
@@ -1019,6 +1020,14 @@ static void mmc_sd_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_sd_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_sd_detect(struct mmc_host *host)
@@ -1033,7 +1042,7 @@ static void mmc_sd_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_send_status(host->card, NULL);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -1102,6 +1111,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
};
static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
@@ -1110,6 +1120,7 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
};
static void mmc_sd_attach_bus_ops(struct mmc_host *host)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 3ab565e32a6a..bd7bacc950dc 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -14,6 +14,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
@@ -102,6 +103,7 @@ static int sdio_read_cccr(struct mmc_card *card)
int ret;
int cccr_vsn;
unsigned char data;
+ unsigned char speed;
memset(&card->cccr, 0, sizeof(struct sdio_cccr));
@@ -140,12 +142,60 @@ static int sdio_read_cccr(struct mmc_card *card)
}
if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
- ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data);
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
goto out;
- if (data & SDIO_SPEED_SHS)
- card->cccr.high_speed = 1;
+ card->scr.sda_spec3 = 0;
+ card->sw_caps.sd3_bus_mode = 0;
+ card->sw_caps.sd3_drv_type = 0;
+ if (cccr_vsn >= SDIO_CCCR_REV_3_00) {
+ card->scr.sda_spec3 = 1;
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_UHS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (card->host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50)) {
+ if (data & SDIO_UHS_DDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_DDR50;
+
+ if (data & SDIO_UHS_SDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR50;
+
+ if (data & SDIO_UHS_SDR104)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR104;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_DRIVE_SDTA)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
+ if (data & SDIO_DRIVE_SDTC)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
+ if (data & SDIO_DRIVE_SDTD)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+ }
+
+ /* if no uhs mode ensure we check for high speed */
+ if (!card->sw_caps.sd3_bus_mode) {
+ if (speed & SDIO_SPEED_SHS) {
+ card->cccr.high_speed = 1;
+ card->sw_caps.hs_max_dtr = 50000000;
+ } else {
+ card->cccr.high_speed = 0;
+ card->sw_caps.hs_max_dtr = 25000000;
+ }
+ }
}
out:
@@ -327,6 +377,194 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static unsigned char host_drive_to_sdio_drive(int host_strength)
+{
+ switch (host_strength) {
+ case MMC_SET_DRIVER_TYPE_A:
+ return SDIO_DTSx_SET_TYPE_A;
+ case MMC_SET_DRIVER_TYPE_B:
+ return SDIO_DTSx_SET_TYPE_B;
+ case MMC_SET_DRIVER_TYPE_C:
+ return SDIO_DTSx_SET_TYPE_C;
+ case MMC_SET_DRIVER_TYPE_D:
+ return SDIO_DTSx_SET_TYPE_D;
+ default:
+ return SDIO_DTSx_SET_TYPE_B;
+ }
+}
+
+static void sdio_select_driver_type(struct mmc_card *card)
+{
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
+ unsigned char card_strength;
+ int err;
+
+ /*
+ * If the host doesn't support any of the Driver Types A,C or D,
+ * or there is no board specific handler then default Driver
+ * Type B is used.
+ */
+ if (!(card->host->caps &
+ (MMC_CAP_DRIVER_TYPE_A |
+ MMC_CAP_DRIVER_TYPE_C |
+ MMC_CAP_DRIVER_TYPE_D)))
+ return;
+
+ if (!card->host->ops->select_drive_strength)
+ return;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
+
+ /* if error just use default for drive strength B */
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
+ &card_strength);
+ if (err)
+ return;
+
+ card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
+ card_strength |= host_drive_to_sdio_drive(drive_strength);
+
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
+ card_strength, NULL);
+
+ /* if error default to drive strength B */
+ if (!err)
+ mmc_set_driver_type(card->host, drive_strength);
+}
+
+
+static int sdio_set_bus_speed_mode(struct mmc_card *card)
+{
+ unsigned int bus_speed, timing;
+ int err;
+ unsigned char speed;
+
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+ return 0;
+
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ bus_speed = SDIO_SPEED_SDR104;
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ bus_speed = SDIO_SPEED_DDR50;
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ bus_speed = SDIO_SPEED_SDR50;
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ bus_speed = SDIO_SPEED_SDR25;
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ }
+
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (err)
+ return err;
+
+ speed &= ~SDIO_SPEED_BSS_MASK;
+ speed |= bus_speed;
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (err)
+ return err;
+
+ if (bus_speed) {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ }
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+
+ if (!card->scr.sda_spec3)
+ return 0;
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0) {
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+ }
+
+ /* Set the driver strength for the card */
+ sdio_select_driver_type(card);
+
+ /* Set bus speed mode of the card */
+ err = sdio_set_bus_speed_mode(card);
+ if (err)
+ goto out;
+
+ /* Initialize and start re-tuning timer */
+ if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+
+out:
+
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -394,6 +632,30 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
host->ops->init_card(host, card);
/*
+ * If the host and card support UHS-I mode request the card
+ * to switch to 1.8V signaling level. No 1.8v signalling if
+ * UHS mode is not enabled to maintain compatibilty and some
+ * systems that claim 1.8v signalling in fact do not support
+ * it.
+ */
+ if ((ocr & R4_18V_PRESENT) &&
+ (host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50))) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ true);
+ if (err) {
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+ err = 0;
+ } else {
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
@@ -492,29 +754,39 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto remove;
- /*
- * Switch to high-speed (if supported).
- */
- err = sdio_enable_hs(card);
- if (err > 0)
- mmc_sd_go_highspeed(card);
- else if (err)
- goto remove;
+ /* Initialization sequence for UHS-I cards */
+ /* Only if card supports 1.8v and UHS signaling */
+ if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
+ err = mmc_sdio_init_uhs_card(card);
+ if (err)
+ goto remove;
- /*
- * Change to the card's maximum speed.
- */
- mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ /* Card is an ultra-high-speed card */
+ mmc_card_set_uhs(card);
+ } else {
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto remove;
- /*
- * Switch to wider bus (if supported).
- */
- err = sdio_enable_4bit_bus(card);
- if (err > 0)
- mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
- else if (err)
- goto remove;
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ /*
+ * Switch to wider bus (if supported).
+ */
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto remove;
+ }
finish:
if (!oldcard)
host->card = card;
@@ -550,6 +822,14 @@ static void mmc_sdio_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_sdio_alive(struct mmc_host *host)
+{
+ return mmc_select_card(host->card);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_sdio_detect(struct mmc_host *host)
@@ -571,7 +851,7 @@ static void mmc_sdio_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_select_card(host->card);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -749,6 +1029,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
.power_restore = mmc_sdio_power_restore,
+ .alive = mmc_sdio_alive,
};
@@ -797,8 +1078,17 @@ int mmc_attach_sdio(struct mmc_host *host)
* Detect and init the card.
*/
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- if (err)
- goto err;
+ if (err) {
+ if (err == -EAGAIN) {
+ /*
+ * Retry initialization with S18R set to 0.
+ */
+ host->ocr &= ~R4_18V_PRESENT;
+ err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
+ }
+ if (err)
+ goto err;
+ }
card = host->card;
/*
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index b1f3168f791b..8f6f5ac131fc 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -196,6 +196,9 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
else
mval = min(mval, func->max_blksize);
+ if (mmc_card_broken_byte_mode_512(func->card))
+ return min(mval, 511u);
+
return min(mval, 512u); /* maximum size for byte mode */
}
@@ -314,7 +317,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
func->card->host->max_seg_size / func->cur_blksize);
max_blocks = min(max_blocks, 511u);
- while (remainder > func->cur_blksize) {
+ while (remainder >= func->cur_blksize) {
unsigned blocks;
blocks = remainder / func->cur_blksize;
@@ -339,8 +342,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
while (remainder > 0) {
size = min(remainder, sdio_max_byte_size(func));
+ /* Indicate byte mode by setting "blocks" = 0 */
ret = mmc_io_rw_extended(func->card, write, func->num, addr,
- incr_addr, buf, 1, size);
+ incr_addr, buf, 0, size);
if (ret)
return ret;
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index b0517cc06200..d29e20630eed 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -128,8 +128,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
BUG_ON(!card);
BUG_ON(fn > 7);
- BUG_ON(blocks == 1 && blksz > 512);
- WARN_ON(blocks == 0);
WARN_ON(blksz == 0);
/* sanity check */
@@ -144,22 +142,20 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
cmd.arg |= fn << 28;
cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
cmd.arg |= addr << 9;
- if (blocks == 1 && blksz < 512)
- cmd.arg |= blksz; /* byte mode */
- else if (blocks == 1 && blksz == 512 &&
- !(mmc_card_broken_byte_mode_512(card)))
- cmd.arg |= 0; /* byte mode, 0==512 */
+ if (blocks == 0)
+ cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
else
cmd.arg |= 0x08000000 | blocks; /* block mode */
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
data.blksz = blksz;
- data.blocks = blocks;
+ /* Code in host drivers/fwk assumes that "blocks" always is >=1 */
+ data.blocks = blocks ? blocks : 1;
data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
- sg_init_one(&sg, buf, blksz * blocks);
+ sg_init_one(&sg, buf, data.blksz * data.blocks);
mmc_set_data_timeout(&data, card);
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b4b83f302e32..745f8fce2519 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a8b4d2aa18e5..947faa5d2ce4 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -236,7 +236,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
sg = &data->sg[i];
- sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
amount = min(size, sg->length);
size -= amount;
@@ -252,7 +252,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
dmabuf = (unsigned *)tmpv;
}
- kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(sgbuffer);
if (size == 0)
break;
@@ -302,7 +302,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
sg = &data->sg[i];
- sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
amount = min(size, sg->length);
size -= amount;
@@ -318,7 +318,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
}
flush_kernel_dcache_page(sg_page(sg));
- kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(sgbuffer);
data->bytes_xfered += amount;
if (size == 0)
break;
@@ -741,7 +741,7 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
/* maybe switch power to the card */
- if (host->board->vcc_pin) {
+ if (gpio_is_valid(host->board->vcc_pin)) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
gpio_set_value(host->board->vcc_pin, 0);
@@ -897,7 +897,7 @@ static int at91_mci_get_ro(struct mmc_host *mmc)
{
struct at91mci_host *host = mmc_priv(mmc);
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
return !!gpio_get_value(host->board->wp_pin);
/*
* Board doesn't support read only detection; let the mmc core
@@ -991,21 +991,21 @@ static int __init at91_mci_probe(struct platform_device *pdev)
* Reserve GPIOs ... board init code makes sure these pins are set
* up as GPIOs with the right direction (input, except for vcc)
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
ret = gpio_request(host->board->det_pin, "mmc_detect");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
goto fail4b;
}
}
- if (host->board->wp_pin) {
+ if (gpio_is_valid(host->board->wp_pin)) {
ret = gpio_request(host->board->wp_pin, "mmc_wp");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
goto fail4;
}
}
- if (host->board->vcc_pin) {
+ if (gpio_is_valid(host->board->vcc_pin)) {
ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
@@ -1057,7 +1057,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* Add host to MMC layer
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
host->present = !gpio_get_value(host->board->det_pin);
}
else
@@ -1068,7 +1068,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* monitor card insertion/removal if we can
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
ret = request_irq(gpio_to_irq(host->board->det_pin),
at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
if (ret)
@@ -1087,13 +1087,13 @@ fail0:
fail1:
clk_put(host->mci_clk);
fail2:
- if (host->board->vcc_pin)
+ if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
fail3:
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
fail4:
- if (host->board->det_pin)
+ if (gpio_is_valid(host->board->det_pin))
gpio_free(host->board->det_pin);
fail4b:
if (host->buffer)
@@ -1125,7 +1125,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
host->buffer, host->physical_address);
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
if (device_can_wakeup(&pdev->dev))
free_irq(gpio_to_irq(host->board->det_pin), host);
device_init_wakeup(&pdev->dev, 0);
@@ -1140,9 +1140,9 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
clk_disable(host->mci_clk); /* Disable the peripheral clock */
clk_put(host->mci_clk);
- if (host->board->vcc_pin)
+ if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
iounmap(host->baseaddr);
@@ -1163,7 +1163,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
- if (host->board->det_pin && device_may_wakeup(&pdev->dev))
+ if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
enable_irq_wake(host->board->det_pin);
if (mmc)
@@ -1178,7 +1178,7 @@ static int at91_mci_resume(struct platform_device *pdev)
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
- if (host->board->det_pin && device_may_wakeup(&pdev->dev))
+ if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
disable_irq_wake(host->board->det_pin);
if (mmc)
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index a7ee50271465..fcfe1eb5acc8 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
+ enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 iflags;
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (host->caps.has_dma)
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
- else
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
direction = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ }
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
desc = chan->device->device_prep_slave_sg(chan,
- data->sg, sglen, direction,
+ data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 5d3b9ae64523..dbd0c8a4e98a 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -153,6 +153,7 @@ static inline int has_dbdma(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
return 1;
default:
return 0;
@@ -768,11 +769,15 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
config2 = au_readl(HOST_CONFIG2(host));
switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ config2 |= SD_CONFIG2_BB;
+ break;
case MMC_BUS_WIDTH_4:
+ config2 &= ~SD_CONFIG2_BB;
config2 |= SD_CONFIG2_WB;
break;
case MMC_BUS_WIDTH_1:
- config2 &= ~SD_CONFIG2_WB;
+ config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
break;
}
au_writel(config2, HOST_CONFIG2(host));
@@ -943,7 +948,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct au1xmmc_host *host;
struct resource *r;
- int ret;
+ int ret, iflag;
mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
if (!mmc) {
@@ -982,37 +987,43 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no IRQ defined\n");
goto out3;
}
-
host->irq = r->start;
- /* IRQ is shared among both SD controllers */
- ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED,
- DRIVER_NAME, host);
- if (ret) {
- dev_err(&pdev->dev, "cannot grab IRQ\n");
- goto out3;
- }
mmc->ops = &au1xmmc_ops;
mmc->f_min = 450000;
mmc->f_max = 24000000;
+ mmc->max_blk_size = 2048;
+ mmc->max_blk_count = 512;
+
+ mmc->ocr_avail = AU1XMMC_OCR;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+
+ iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */
+
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1100:
mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
- mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
break;
case ALCHEMY_CPU_AU1200:
mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
- mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ iflag = 0; /* nothing is shared */
+ mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
+ mmc->f_max = 52000000;
+ if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
break;
}
- mmc->max_blk_size = 2048;
- mmc->max_blk_count = 512;
-
- mmc->ocr_avail = AU1XMMC_OCR;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot grab IRQ\n");
+ goto out3;
+ }
host->status = HOST_S_IDLE;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 0371bf502249..03666174ca48 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -627,17 +627,7 @@ static struct platform_driver sdh_driver = {
},
};
-static int __init sdh_init(void)
-{
- return platform_driver_register(&sdh_driver);
-}
-module_init(sdh_init);
-
-static void __exit sdh_exit(void)
-{
- platform_driver_unregister(&sdh_driver);
-}
-module_exit(sdh_exit);
+module_platform_driver(sdh_driver);
MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
MODULE_AUTHOR("Cliff Cai, Roy Huang");
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index ce2a47b71dd6..83693fd7c6b3 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -780,18 +780,7 @@ static struct platform_driver cb710_mmc_driver = {
#endif
};
-static int __init cb710_mmc_init_module(void)
-{
- return platform_driver_register(&cb710_mmc_driver);
-}
-
-static void __exit cb710_mmc_cleanup_module(void)
-{
- platform_driver_unregister(&cb710_mmc_driver);
-}
-
-module_init(cb710_mmc_init_module);
-module_exit(cb710_mmc_cleanup_module);
+module_platform_driver(cb710_mmc_driver);
MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 3aaeb0841914..0e342793ff14 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -588,11 +588,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
-static void dw_mci_start_request(struct dw_mci *host,
- struct dw_mci_slot *slot)
+static void __dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot,
+ struct mmc_command *cmd)
{
struct mmc_request *mrq;
- struct mmc_command *cmd;
struct mmc_data *data;
u32 cmdflags;
@@ -610,14 +610,13 @@ static void dw_mci_start_request(struct dw_mci *host,
host->completed_events = 0;
host->data_status = 0;
- data = mrq->data;
+ data = cmd->data;
if (data) {
dw_mci_set_timeout(host);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
- cmd = mrq->cmd;
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
/* this is the first command, send the initialization clock */
@@ -635,6 +634,16 @@ static void dw_mci_start_request(struct dw_mci *host,
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
+static void dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot)
+{
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_command *cmd;
+
+ cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
+ __dw_mci_start_request(host, slot, cmd);
+}
+
/* must be called with host->lock held */
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
@@ -698,12 +707,15 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
+ regs = mci_readl(slot->host, UHS_REG);
+
/* DDR mode set */
- if (ios->timing == MMC_TIMING_UHS_DDR50) {
- regs = mci_readl(slot->host, UHS_REG);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
regs |= (0x1 << slot->id) << 16;
- mci_writel(slot->host, UHS_REG, regs);
- }
+ else
+ regs &= ~(0x1 << slot->id) << 16;
+
+ mci_writel(slot->host, UHS_REG, regs);
if (ios->clock) {
/*
@@ -889,7 +901,14 @@ static void dw_mci_tasklet_func(unsigned long priv)
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
- dw_mci_command_complete(host, host->mrq->cmd);
+ dw_mci_command_complete(host, cmd);
+ if (cmd == host->mrq->sbc && !cmd->error) {
+ prev_state = state = STATE_SENDING_CMD;
+ __dw_mci_start_request(host, host->cur_slot,
+ host->mrq->cmd);
+ goto unlock;
+ }
+
if (!host->mrq->data || cmd->error) {
dw_mci_request_end(host, host->mrq);
goto unlock;
@@ -967,6 +986,12 @@ static void dw_mci_tasklet_func(unsigned long priv)
goto unlock;
}
+ if (host->mrq->sbc && !data->error) {
+ data->stop->error = 0;
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
prev_state = state = STATE_SENDING_STOP;
if (!data->error)
send_stop_cmd(host, data);
@@ -1678,8 +1703,9 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
- else
- mmc->caps = 0;
+
+ if (host->pdata->caps2)
+ mmc->caps2 = host->pdata->caps2;
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
@@ -1923,7 +1949,7 @@ static int dw_mci_probe(struct platform_device *pdev)
* should put it in the platform data.
*/
fifo_size = mci_readl(host, FIFOTH);
- fifo_size = 1 + ((fifo_size >> 16) & 0x7ff);
+ fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
} else {
fifo_size = host->pdata->fifo_depth;
}
@@ -2062,14 +2088,14 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* TODO: we should probably disable the clock to the card in the suspend path.
*/
-static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+static int dw_mci_suspend(struct device *dev)
{
int i, ret;
- struct dw_mci *host = platform_get_drvdata(pdev);
+ struct dw_mci *host = dev_get_drvdata(dev);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
@@ -2092,10 +2118,10 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
return 0;
}
-static int dw_mci_resume(struct platform_device *pdev)
+static int dw_mci_resume(struct device *dev)
{
int i, ret;
- struct dw_mci *host = platform_get_drvdata(pdev);
+ struct dw_mci *host = dev_get_drvdata(dev);
if (host->vmmc)
regulator_enable(host->vmmc);
@@ -2103,7 +2129,7 @@ static int dw_mci_resume(struct platform_device *pdev)
if (host->dma_ops->init)
host->dma_ops->init(host);
- if (!mci_wait_reset(&pdev->dev, host)) {
+ if (!mci_wait_reset(dev, host)) {
ret = -ENODEV;
return ret;
}
@@ -2131,14 +2157,15 @@ static int dw_mci_resume(struct platform_device *pdev)
#else
#define dw_mci_suspend NULL
#define dw_mci_resume NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dw_mci_pmops, dw_mci_suspend, dw_mci_resume);
static struct platform_driver dw_mci_driver = {
.remove = __exit_p(dw_mci_remove),
- .suspend = dw_mci_suspend,
- .resume = dw_mci_resume,
.driver = {
.name = "dw_mmc",
+ .pm = &dw_mci_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 72c071f6e001..df392a1143f2 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -126,7 +126,7 @@
#define SDMMC_CMD_RESP_EXP BIT(6)
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
-#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 74218ad677e4..c8852a8128a9 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1012,17 +1012,7 @@ static struct platform_driver jz4740_mmc_driver = {
},
};
-static int __init jz4740_mmc_init(void)
-{
- return platform_driver_register(&jz4740_mmc_driver);
-}
-module_init(jz4740_mmc_init);
-
-static void __exit jz4740_mmc_exit(void)
-{
- platform_driver_unregister(&jz4740_mmc_driver);
-}
-module_exit(jz4740_mmc_exit);
+module_platform_driver(jz4740_mmc_driver);
MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 92946b84e9fa..273306c68d58 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1525,7 +1525,6 @@ static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = mmc_spi_of_match_table,
},
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50b5f9926f64..0d955ffaf44e 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction buffer_dirn;
int nr_sg;
/* Check if next job is already prepared */
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
}
if (data->flags & MMC_DATA_READ) {
- conf.direction = DMA_FROM_DEVICE;
+ conf.direction = DMA_DEV_TO_MEM;
+ buffer_dirn = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
- conf.direction = DMA_TO_DEVICE;
+ conf.direction = DMA_MEM_TO_DEV;
+ buffer_dirn = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
device = chan->device;
- nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
if (nr_sg == 0)
return -EINVAL;
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
unmap_exit:
if (!next)
dmaengine_terminate_all(chan);
- dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
@@ -675,7 +678,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -754,8 +758,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
if (!cmd->data || cmd->error) {
- if (host->data)
+ if (host->data) {
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
mmci_stop_data(host);
+ }
mmci_request_end(host, cmd->mrq);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
@@ -955,8 +963,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
data = host->data;
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+ MCI_DATABLOCKEND) && data)
mmci_data_irq(host, data, status);
cmd = host->cmd;
@@ -1239,6 +1248,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
if (host->vcc == NULL)
mmc->ocr_avail = plat->ocr_mask;
mmc->caps = plat->capabilities;
+ mmc->caps2 = plat->capabilities2;
/*
* We can do SGIO
@@ -1496,6 +1506,8 @@ static struct amba_id mmci_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, mmci_ids);
+
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 80d8eb143b48..1d14cda95e56 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -689,8 +689,8 @@ msmsdcc_pio_irq(int irq, void *dev_id)
/* Map the current scatter buffer */
local_irq_save(flags);
- buffer = kmap_atomic(sg_page(host->pio.sg),
- KM_BIO_SRC_IRQ) + host->pio.sg->offset;
+ buffer = kmap_atomic(sg_page(host->pio.sg))
+ + host->pio.sg->offset;
buffer += host->pio.sg_off;
remain = host->pio.sg->length - host->pio.sg_off;
len = 0;
@@ -700,7 +700,7 @@ msmsdcc_pio_irq(int irq, void *dev_id)
len = msmsdcc_pio_write(host, buffer, remain, status);
/* Unmap the buffer */
- kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buffer);
local_irq_restore(flags);
host->pio.sg_off += len;
@@ -1480,18 +1480,7 @@ static struct platform_driver msmsdcc_driver = {
},
};
-static int __init msmsdcc_init(void)
-{
- return platform_driver_register(&msmsdcc_driver);
-}
-
-static void __exit msmsdcc_exit(void)
-{
- platform_driver_unregister(&msmsdcc_driver);
-}
-
-module_init(msmsdcc_init);
-module_exit(msmsdcc_exit);
+module_platform_driver(msmsdcc_driver);
MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 211a4959c293..eeb8cd125b0c 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -679,8 +679,9 @@ static const struct mmc_host_ops mvsd_ops = {
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
-static void __init mv_conf_mbus_windows(struct mvsd_host *host,
- struct mbus_dram_target_info *dram)
+static void __init
+mv_conf_mbus_windows(struct mvsd_host *host,
+ const struct mbus_dram_target_info *dram)
{
void __iomem *iobase = host->base;
int i;
@@ -691,7 +692,7 @@ static void __init mv_conf_mbus_windows(struct mvsd_host *host,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
@@ -705,6 +706,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mvsdio_platform_data *mvsd_data;
+ const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
@@ -755,8 +757,9 @@ static int __init mvsd_probe(struct platform_device *pdev)
}
/* (Re-)program MBUS remapping windows if we are asked to. */
- if (mvsd_data->dram != NULL)
- mv_conf_mbus_windows(host, mvsd_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(host, dram);
mvsd_power_down(host);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d3..4184b7946bbf 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
struct scatterlist *sg;
+ enum dma_transfer_direction slave_dirn;
int i, nents;
if (data->flags & MMC_DATA_STREAM)
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
- else
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
host->dma_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ }
nents = dma_map_sg(host->dma->device->dev, data->sg,
data->sg_len, host->dma_dir);
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
return -EINVAL;
host->desc = host->dma->device->device_prep_slave_sg(host->dma,
- data->sg, data->sg_len, host->dma_dir,
+ data->sg, data->sg_len, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!host->desc) {
@@ -732,6 +736,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
+ host->dma = NULL;
}
}
@@ -1046,18 +1051,7 @@ static struct platform_driver mxcmci_driver = {
}
};
-static int __init mxcmci_init(void)
-{
- return platform_driver_register(&mxcmci_driver);
-}
-
-static void __exit mxcmci_exit(void)
-{
- platform_driver_unregister(&mxcmci_driver);
-}
-
-module_init(mxcmci_init);
-module_exit(mxcmci_exit);
+module_platform_driver(mxcmci_driver);
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 99b449d26a4d..382c835d217c 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -154,6 +154,7 @@ struct mxs_mmc_host {
struct dma_chan *dmach;
struct mxs_dma_data dma_data;
unsigned int dma_dir;
+ enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
unsigned int version;
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
}
desc = host->dmach->device->device_prep_slave_sg(host->dmach,
- sgl, sg_len, host->dma_dir, append);
+ sgl, sg_len, host->slave_dirn, append);
if (desc) {
desc->callback = mxs_mmc_dma_irq_callback;
desc->callback_param = host;
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
int i;
unsigned short dma_data_dir, timeout;
+ enum dma_transfer_direction slave_dirn;
unsigned int data_size = 0, log2_blksz;
unsigned int blocks = data->blocks;
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
read = 0;
} else {
dma_data_dir = DMA_FROM_DEVICE;
+ slave_dirn = DMA_DEV_TO_MEM;
read = BM_SSP_CTRL0_READ;
}
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
WARN_ON(host->data != NULL);
host->data = data;
host->dma_dir = dma_data_dir;
+ host->slave_dirn = slave_dirn;
desc = mxs_mmc_prep_dma(host, 1);
if (!desc)
goto out;
@@ -713,7 +721,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = PTR_ERR(host->clk);
goto out_iounmap;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
mxs_mmc_reset(host);
@@ -772,7 +780,7 @@ out_free_dma:
if (host->dmach)
dma_release_channel(host->dmach);
out_clk_put:
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
out_iounmap:
iounmap(host->base);
@@ -798,7 +806,7 @@ static int mxs_mmc_remove(struct platform_device *pdev)
if (host->dmach)
dma_release_channel(host->dmach);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
iounmap(host->base);
@@ -819,7 +827,7 @@ static int mxs_mmc_suspend(struct device *dev)
ret = mmc_suspend_host(mmc);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return ret;
}
@@ -830,7 +838,7 @@ static int mxs_mmc_resume(struct device *dev)
struct mxs_mmc_host *host = mmc_priv(mmc);
int ret = 0;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
ret = mmc_resume_host(mmc);
@@ -855,18 +863,7 @@ static struct platform_driver mxs_mmc_driver = {
},
};
-static int __init mxs_mmc_init(void)
-{
- return platform_driver_register(&mxs_mmc_driver);
-}
-
-static void __exit mxs_mmc_exit(void)
-{
- platform_driver_unregister(&mxs_mmc_driver);
-}
-
-module_init(mxs_mmc_init);
-module_exit(mxs_mmc_exit);
+module_platform_driver(mxs_mmc_driver);
MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 2dba999caf2c..887c0e598cf3 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/omap.c
*
* Copyright (C) 2004 Nokia Corporation
- * Written by Tuukka Tikkanen and Juha Yrjl<juha.yrjola@nokia.com>
+ * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
* Misc hacks here and there by Tony Lindgren <tony@atomide.com>
* Other hacks (DMA, SD, etc) by David Brownell
*
@@ -1634,4 +1634,4 @@ module_exit(mmc_omap_exit);
MODULE_DESCRIPTION("OMAP Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_AUTHOR("Juha Yrjl");
+MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c8220..fd0c661bbad3 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -24,7 +24,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
@@ -120,7 +119,6 @@
#define MMC_AUTOSUSPEND_DELAY 100
#define MMC_TIMEOUT_MS 20
-#define OMAP_MMC_MASTER_CLOCK 96000000
#define OMAP_MMC_MIN_CLOCK 400000
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
@@ -163,7 +161,6 @@ struct omap_hsmmc_host {
*/
struct regulator *vcc;
struct regulator *vcc_aux;
- struct work_struct mmc_carddetect_work;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
@@ -598,12 +595,12 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
}
/* Calculate divisor for the given clock frequency */
-static u16 calc_divisor(struct mmc_ios *ios)
+static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
{
u16 dsor = 0;
if (ios->clock) {
- dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock);
+ dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
if (dsor > 250)
dsor = 250;
}
@@ -623,7 +620,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
regval = OMAP_HSMMC_READ(host->base, SYSCTL);
regval = regval & ~(CLKD_MASK | DTO_MASK);
- regval = regval | (calc_divisor(ios) << 6) | (DTO << 16);
+ regval = regval | (calc_divisor(host, ios) << 6) | (DTO << 16);
OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
@@ -1010,6 +1007,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
+ host->data->host_cookie = 0;
}
host->data = NULL;
}
@@ -1279,17 +1277,16 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
}
/*
- * Work Item to notify the core about card insertion/removal
+ * irq handler to notify the core about card insertion/removal
*/
-static void omap_hsmmc_detect(struct work_struct *work)
+static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
{
- struct omap_hsmmc_host *host =
- container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
+ struct omap_hsmmc_host *host = dev_id;
struct omap_mmc_slot_data *slot = &mmc_slot(host);
int carddetect;
if (host->suspended)
- return;
+ return IRQ_HANDLED;
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
@@ -1304,19 +1301,6 @@ static void omap_hsmmc_detect(struct work_struct *work)
mmc_detect_change(host->mmc, (HZ * 200) / 1000);
else
mmc_detect_change(host->mmc, (HZ * 50) / 1000);
-}
-
-/*
- * ISR for handling card insertion and removal
- */
-static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
-{
- struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
-
- if (host->suspended)
- return IRQ_HANDLED;
- schedule_work(&host->mmc_carddetect_work);
-
return IRQ_HANDLED;
}
@@ -1575,8 +1559,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->use_dma) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
@@ -1916,7 +1902,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
- INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
mmc->ops = &omap_hsmmc_ops;
@@ -1988,6 +1973,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (mmc_slot(host).nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
+ mmc->pm_caps = mmc_slot(host).pm_caps;
+
omap_hsmmc_conf_bus_power(host);
/* Select DMA lines */
@@ -2044,10 +2031,11 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
- ret = request_irq(mmc_slot(host).card_detect_irq,
- omap_hsmmc_cd_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- mmc_hostname(mmc), host);
+ ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
+ NULL,
+ omap_hsmmc_detect,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
"Unable to grab MMC CD IRQ\n");
@@ -2126,7 +2114,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
free_irq(host->irq, host);
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
- flush_work_sync(&host->mmc_carddetect_work);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
@@ -2173,16 +2160,9 @@ static int omap_hsmmc_suspend(struct device *dev)
return ret;
}
}
- cancel_work_sync(&host->mmc_carddetect_work);
ret = mmc_suspend_host(host->mmc);
- if (ret == 0) {
- omap_hsmmc_disable_irq(host);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- if (host->got_dbclk)
- clk_disable(host->dbclk);
- } else {
+ if (ret) {
host->suspended = 0;
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev,
@@ -2191,9 +2171,20 @@ static int omap_hsmmc_suspend(struct device *dev)
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
+ goto err;
}
- pm_runtime_put_sync(host->dev);
+
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
+ omap_hsmmc_disable_irq(host);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+ }
+ if (host->got_dbclk)
+ clk_disable(host->dbclk);
+
}
+err:
+ pm_runtime_put_sync(host->dev);
return ret;
}
@@ -2213,7 +2204,8 @@ static int omap_hsmmc_resume(struct device *dev)
if (host->got_dbclk)
clk_enable(host->dbclk);
- omap_hsmmc_conf_bus_power(host);
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
+ omap_hsmmc_conf_bus_power(host);
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev, host->slot_id);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index fc4356e00d46..cb2dc0e75ba7 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -872,18 +872,7 @@ static struct platform_driver pxamci_driver = {
},
};
-static int __init pxamci_init(void)
-{
- return platform_driver_register(&pxamci_driver);
-}
-
-static void __exit pxamci_exit(void)
-{
- platform_driver_unregister(&pxamci_driver);
-}
-
-module_init(pxamci_init);
-module_exit(pxamci_exit);
+module_platform_driver(pxamci_driver);
MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 720f99334a7f..1bcfd6dbb5cc 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1914,18 +1914,7 @@ static struct platform_driver s3cmci_driver = {
.shutdown = s3cmci_shutdown,
};
-static int __init s3cmci_init(void)
-{
- return platform_driver_register(&s3cmci_driver);
-}
-
-static void __exit s3cmci_exit(void)
-{
- platform_driver_unregister(&s3cmci_driver);
-}
-
-module_init(s3cmci_init);
-module_exit(s3cmci_exit);
+module_platform_driver(s3cmci_driver);
MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621cf..28a870804f60 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
@@ -108,26 +109,13 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
.remove = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_cns3xxx_init(void)
-{
- return platform_driver_register(&sdhci_cns3xxx_driver);
-}
-module_init(sdhci_cns3xxx_init);
-
-static void __exit sdhci_cns3xxx_exit(void)
-{
- platform_driver_unregister(&sdhci_cns3xxx_driver);
-}
-module_exit(sdhci_cns3xxx_exit);
+module_platform_driver(sdhci_cns3xxx_driver);
MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
MODULE_AUTHOR("Scott Shu, "
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index f2d29dca4420..46fd1fd1b605 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -82,26 +82,13 @@ static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_dove_probe,
.remove = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_dove_init(void)
-{
- return platform_driver_register(&sdhci_dove_driver);
-}
-module_init(sdhci_dove_init);
-
-static void __exit sdhci_dove_exit(void)
-{
- platform_driver_unregister(&sdhci_dove_driver);
-}
-module_exit(sdhci_dove_exit);
+module_platform_driver(sdhci_dove_driver);
MODULE_DESCRIPTION("SDHCI driver for Dove");
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4b976f00ea85..d601e41af282 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -599,27 +599,14 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_esdhc_imx_init(void)
-{
- return platform_driver_register(&sdhci_esdhc_imx_driver);
-}
-module_init(sdhci_esdhc_imx_init);
-
-static void __exit sdhci_esdhc_imx_exit(void)
-{
- platform_driver_unregister(&sdhci_esdhc_imx_driver);
-}
-module_exit(sdhci_esdhc_imx_exit);
+module_platform_driver(sdhci_esdhc_imx_driver);
MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index c3b08f111942..b97b2f5dafdb 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -73,7 +73,7 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
| (div << ESDHC_DIVIDER_SHIFT)
| (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- mdelay(100);
+ mdelay(1);
out:
host->clock = clock;
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 59e9d003e589..ff4adc018041 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -125,26 +125,13 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_esdhc_probe,
.remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_esdhc_init(void)
-{
- return platform_driver_register(&sdhci_esdhc_driver);
-}
-module_init(sdhci_esdhc_init);
-
-static void __exit sdhci_esdhc_exit(void)
-{
- platform_driver_unregister(&sdhci_esdhc_driver);
-}
-module_exit(sdhci_esdhc_exit);
+module_platform_driver(sdhci_esdhc_driver);
MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 9b0d794a4f69..0ce088ae0228 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -87,26 +87,13 @@ static struct platform_driver sdhci_hlwd_driver = {
.name = "sdhci-hlwd",
.owner = THIS_MODULE,
.of_match_table = sdhci_hlwd_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_hlwd_probe,
.remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_hlwd_init(void)
-{
- return platform_driver_register(&sdhci_hlwd_driver);
-}
-module_init(sdhci_hlwd_init);
-
-static void __exit sdhci_hlwd_exit(void)
-{
- platform_driver_unregister(&sdhci_hlwd_driver);
-}
-module_exit(sdhci_hlwd_exit);
+module_platform_driver(sdhci_hlwd_driver);
MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver");
MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
new file mode 100644
index 000000000000..a611217769f5
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-data.c
@@ -0,0 +1,5 @@
+#include <linux/module.h>
+#include <linux/mmc/sdhci-pci-data.h>
+
+struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
+EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d833d9c2f7e3..7165e6a09274 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -23,8 +23,8 @@
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/gpio.h>
-#include <linux/sfi.h>
#include <linux/pm_runtime.h>
+#include <linux/mmc/sdhci-pci-data.h>
#include "sdhci.h"
@@ -54,14 +54,14 @@ struct sdhci_pci_fixes {
int (*probe_slot) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
- int (*suspend) (struct sdhci_pci_chip *,
- pm_message_t);
+ int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
};
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
+ struct sdhci_pci_data *data;
int pci_bar;
int rst_n_gpio;
@@ -172,32 +172,9 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip)
return 0;
}
-/* Medfield eMMC hardware reset GPIOs */
-static int mfd_emmc0_rst_gpio = -EINVAL;
-static int mfd_emmc1_rst_gpio = -EINVAL;
-
-static int mfd_emmc_gpio_parse(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb = (struct sfi_table_simple *)table;
- struct sfi_gpio_table_entry *entry;
- int i, num;
-
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
- entry = (struct sfi_gpio_table_entry *)sb->pentry;
-
- for (i = 0; i < num; i++, entry++) {
- if (!strncmp(entry->pin_name, "emmc0_rst", SFI_NAME_LEN))
- mfd_emmc0_rst_gpio = entry->pin_no;
- else if (!strncmp(entry->pin_name, "emmc1_rst", SFI_NAME_LEN))
- mfd_emmc1_rst_gpio = entry->pin_no;
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM_RUNTIME
-static irqreturn_t mfd_sd_cd(int irq, void *dev_id)
+static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
{
struct sdhci_pci_slot *slot = dev_id;
struct sdhci_host *host = slot->host;
@@ -206,15 +183,16 @@ static irqreturn_t mfd_sd_cd(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#define MFLD_SD_CD_PIN 69
-
-static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot)
+static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
{
- int err, irq, gpio = MFLD_SD_CD_PIN;
+ int err, irq, gpio = slot->cd_gpio;
slot->cd_gpio = -EINVAL;
slot->cd_irq = -EINVAL;
+ if (!gpio_is_valid(gpio))
+ return;
+
err = gpio_request(gpio, "sd_cd");
if (err < 0)
goto out;
@@ -227,72 +205,53 @@ static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot)
if (irq < 0)
goto out_free;
- err = request_irq(irq, mfd_sd_cd, IRQF_TRIGGER_RISING |
+ err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING, "sd_cd", slot);
if (err)
goto out_free;
slot->cd_gpio = gpio;
slot->cd_irq = irq;
- slot->host->quirks2 |= SDHCI_QUIRK2_OWN_CARD_DETECTION;
- return 0;
+ return;
out_free:
gpio_free(gpio);
out:
dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
- return 0;
}
-static void mfd_sd_remove_slot(struct sdhci_pci_slot *slot, int dead)
+static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
{
if (slot->cd_irq >= 0)
free_irq(slot->cd_irq, slot);
- gpio_free(slot->cd_gpio);
+ if (gpio_is_valid(slot->cd_gpio))
+ gpio_free(slot->cd_gpio);
}
#else
-#define mfd_sd_probe_slot NULL
-#define mfd_sd_remove_slot NULL
+static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
+{
+}
+
+static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
+{
+}
#endif
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
- const char *name = NULL;
- int gpio = -EINVAL;
-
- sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, mfd_emmc_gpio_parse);
-
- switch (slot->chip->pdev->device) {
- case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
- gpio = mfd_emmc0_rst_gpio;
- name = "eMMC0_reset";
- break;
- case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
- gpio = mfd_emmc1_rst_gpio;
- name = "eMMC1_reset";
- break;
- }
-
- if (!gpio_request(gpio, name)) {
- gpio_direction_output(gpio, 1);
- slot->rst_n_gpio = gpio;
- slot->host->mmc->caps |= MMC_CAP_HW_RESET;
- }
-
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
-
slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC;
-
return 0;
}
-static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
- gpio_free(slot->rst_n_gpio);
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD;
+ return 0;
}
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
@@ -308,20 +267,18 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
- .probe_slot = mfd_sd_probe_slot,
- .remove_slot = mfd_sd_remove_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
+ .probe_slot = mfd_sdio_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
.probe_slot = mfd_emmc_probe_slot,
- .remove_slot = mfd_emmc_remove_slot,
};
/* O2Micro extra registers */
@@ -549,7 +506,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
jmicron_enable_mmc(slot->host, 0);
}
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i;
@@ -993,8 +950,9 @@ static struct sdhci_ops sdhci_pci_ops = {
#ifdef CONFIG_PM
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
mmc_pm_flag_t slot_pm_flags;
@@ -1010,13 +968,10 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (!slot)
continue;
- ret = sdhci_suspend_host(slot->host, state);
+ ret = sdhci_suspend_host(slot->host);
- if (ret) {
- for (i--; i >= 0; i--)
- sdhci_resume_host(chip->slots[i]->host);
- return ret;
- }
+ if (ret)
+ goto err_pci_suspend;
slot_pm_flags = slot->host->mmc->pm_flags;
if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
@@ -1026,12 +981,9 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
- if (ret) {
- for (i = chip->num_slots - 1; i >= 0; i--)
- sdhci_resume_host(chip->slots[i]->host);
- return ret;
- }
+ ret = chip->fixes->suspend(chip);
+ if (ret)
+ goto err_pci_suspend;
}
pci_save_state(pdev);
@@ -1042,16 +994,22 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
pci_set_power_state(pdev, PCI_D3hot);
} else {
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
+
+err_pci_suspend:
+ while (--i >= 0)
+ sdhci_resume_host(chip->slots[i]->host);
+ return ret;
}
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
@@ -1099,7 +1057,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
- pm_message_t state = { .event = PM_EVENT_SUSPEND };
int i, ret;
chip = pci_get_drvdata(pdev);
@@ -1113,23 +1070,22 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
ret = sdhci_runtime_suspend_host(slot->host);
- if (ret) {
- for (i--; i >= 0; i--)
- sdhci_runtime_resume_host(chip->slots[i]->host);
- return ret;
- }
+ if (ret)
+ goto err_pci_runtime_suspend;
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
- if (ret) {
- for (i = chip->num_slots - 1; i >= 0; i--)
- sdhci_runtime_resume_host(chip->slots[i]->host);
- return ret;
- }
+ ret = chip->fixes->suspend(chip);
+ if (ret)
+ goto err_pci_runtime_suspend;
}
return 0;
+
+err_pci_runtime_suspend:
+ while (--i >= 0)
+ sdhci_runtime_resume_host(chip->slots[i]->host);
+ return ret;
}
static int sdhci_pci_runtime_resume(struct device *dev)
@@ -1176,6 +1132,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .suspend = sdhci_pci_suspend,
+ .resume = sdhci_pci_resume,
.runtime_suspend = sdhci_pci_runtime_suspend,
.runtime_resume = sdhci_pci_runtime_resume,
.runtime_idle = sdhci_pci_runtime_idle,
@@ -1188,11 +1146,12 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
\*****************************************************************************/
static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
- struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar)
+ struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
+ int slotno)
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
- int ret;
+ int ret, bar = first_bar + slotno;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
@@ -1226,6 +1185,23 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
slot->host = host;
slot->pci_bar = bar;
slot->rst_n_gpio = -EINVAL;
+ slot->cd_gpio = -EINVAL;
+
+ /* Retrieve platform data if there is any */
+ if (*sdhci_pci_get_data)
+ slot->data = sdhci_pci_get_data(pdev, slotno);
+
+ if (slot->data) {
+ if (slot->data->setup) {
+ ret = slot->data->setup(slot->data);
+ if (ret) {
+ dev_err(&pdev->dev, "platform setup failed\n");
+ goto free;
+ }
+ }
+ slot->rst_n_gpio = slot->data->rst_n_gpio;
+ slot->cd_gpio = slot->data->cd_gpio;
+ }
host->hw_name = "PCI";
host->ops = &sdhci_pci_ops;
@@ -1236,7 +1212,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
if (ret) {
dev_err(&pdev->dev, "cannot request region\n");
- goto free;
+ goto cleanup;
}
host->ioaddr = pci_ioremap_bar(pdev, bar);
@@ -1252,15 +1228,30 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
goto unmap;
}
+ if (gpio_is_valid(slot->rst_n_gpio)) {
+ if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) {
+ gpio_direction_output(slot->rst_n_gpio, 1);
+ slot->host->mmc->caps |= MMC_CAP_HW_RESET;
+ } else {
+ dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
+ slot->rst_n_gpio = -EINVAL;
+ }
+ }
+
host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
ret = sdhci_add_host(host);
if (ret)
goto remove;
+ sdhci_pci_add_own_cd(slot);
+
return slot;
remove:
+ if (gpio_is_valid(slot->rst_n_gpio))
+ gpio_free(slot->rst_n_gpio);
+
if (chip->fixes && chip->fixes->remove_slot)
chip->fixes->remove_slot(slot, 0);
@@ -1270,6 +1261,10 @@ unmap:
release:
pci_release_region(pdev, bar);
+cleanup:
+ if (slot->data && slot->data->cleanup)
+ slot->data->cleanup(slot->data);
+
free:
sdhci_free_host(host);
@@ -1281,6 +1276,8 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
int dead;
u32 scratch;
+ sdhci_pci_remove_own_cd(slot);
+
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
@@ -1288,9 +1285,15 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
sdhci_remove_host(slot->host, dead);
+ if (gpio_is_valid(slot->rst_n_gpio))
+ gpio_free(slot->rst_n_gpio);
+
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
+ if (slot->data && slot->data->cleanup)
+ slot->data->cleanup(slot->data);
+
pci_release_region(slot->chip->pdev, slot->pci_bar);
sdhci_free_host(slot->host);
@@ -1377,7 +1380,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
slots = chip->num_slots; /* Quirk may have changed this */
for (i = 0; i < slots; i++) {
- slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
+ slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
if (IS_ERR(slot)) {
for (i--; i >= 0; i--)
sdhci_pci_remove_slot(chip->slots[i]);
@@ -1428,8 +1431,6 @@ static struct pci_driver sdhci_driver = {
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = __devexit_p(sdhci_pci_remove),
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
.driver = {
.pm = &sdhci_pci_pm_ops
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a9e12ea05583..03970bcb3495 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, state);
+ return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f40840..37e0e184a0bb 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d4bf6d30c7ba..dbb75bfbcffb 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -218,26 +218,13 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_pxav2_init(void)
-{
- return platform_driver_register(&sdhci_pxav2_driver);
-}
-
-static void __exit sdhci_pxav2_exit(void)
-{
- platform_driver_unregister(&sdhci_pxav2_driver);
-}
-module_init(sdhci_pxav2_init);
-module_exit(sdhci_pxav2_exit);
+module_platform_driver(sdhci_pxav2_driver);
MODULE_DESCRIPTION("SDHCI driver for pxav2");
MODULE_AUTHOR("Marvell International Ltd.");
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index cff4ad3e7a59..f29695683556 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -264,26 +264,13 @@ static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_pxav3_init(void)
-{
- return platform_driver_register(&sdhci_pxav3_driver);
-}
-
-static void __exit sdhci_pxav3_exit(void)
-{
- platform_driver_unregister(&sdhci_pxav3_driver);
-}
-module_init(sdhci_pxav3_init);
-module_exit(sdhci_pxav3_exit);
+module_platform_driver(sdhci_pxav3_driver);
MODULE_DESCRIPTION("SDHCI driver for pxav3");
MODULE_AUTHOR("Marvell International Ltd.");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc9..1af756ee0f9a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -80,7 +80,7 @@ static void sdhci_s3c_check_sclk(struct sdhci_host *host)
tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
- writel(tmp, host->ioaddr + 0x80);
+ writel(tmp, host->ioaddr + S3C_SDHCI_CONTROL2);
}
}
@@ -435,14 +435,11 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
struct clk *clk;
- char *name = pdata->clocks[ptr];
-
- if (name == NULL)
- continue;
+ char name[14];
+ snprintf(name, 14, "mmc_busclk.%d", ptr);
clk = clk_get(dev, name);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to get clock %s\n", name);
continue;
}
@@ -524,6 +521,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
@@ -622,48 +622,42 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, pm);
+ return sdhci_suspend_host(host);
}
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
#else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
#endif
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
- .suspend = sdhci_s3c_suspend,
- .resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
+ .pm = SDHCI_S3C_PMOPS,
},
};
-static int __init sdhci_s3c_init(void)
-{
- return platform_driver_register(&sdhci_s3c_driver);
-}
-
-static void __exit sdhci_s3c_exit(void)
-{
- platform_driver_unregister(&sdhci_s3c_driver);
-}
-
-module_init(sdhci_s3c_init);
-module_exit(sdhci_s3c_exit);
+module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 63cc8b6a1c9e..b7f8b33c5f19 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-spear.h>
@@ -271,26 +272,54 @@ static int __devexit sdhci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int sdhci_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct spear_sdhci *sdhci = dev_get_platdata(dev);
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (!ret)
+ clk_disable(sdhci->clk);
+
+ return ret;
+}
+
+static int sdhci_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct spear_sdhci *sdhci = dev_get_platdata(dev);
+ int ret;
+
+ ret = clk_enable(sdhci->clk);
+ if (ret) {
+ dev_dbg(dev, "Resume: Error enabling clock\n");
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+
+const struct dev_pm_ops sdhci_pm_ops = {
+ .suspend = sdhci_suspend,
+ .resume = sdhci_resume,
+};
+#endif
+
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &sdhci_pm_ops,
+#endif
},
.probe = sdhci_probe,
.remove = __devexit_p(sdhci_remove),
};
-static int __init sdhci_init(void)
-{
- return platform_driver_register(&sdhci_driver);
-}
-module_init(sdhci_init);
-
-static void __exit sdhci_exit(void)
-{
- platform_driver_unregister(&sdhci_driver);
-}
-module_exit(sdhci_exit);
+module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 89699e861fc1..78a36eba4df0 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,26 +318,13 @@ static struct platform_driver sdhci_tegra_driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
-static int __init sdhci_tegra_init(void)
-{
- return platform_driver_register(&sdhci_tegra_driver);
-}
-module_init(sdhci_tegra_init);
-
-static void __exit sdhci_tegra_exit(void)
-{
- platform_driver_unregister(&sdhci_tegra_driver);
-}
-module_exit(sdhci_tegra_exit);
+module_platform_driver(sdhci_tegra_driver);
MODULE_DESCRIPTION("SDHCI driver for Tegra");
MODULE_AUTHOR(" Google, Inc.");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d8eea323541..8d66706824a6 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -49,7 +49,7 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc);
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
#ifdef CONFIG_PM_RUNTIME
@@ -146,10 +146,8 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
u32 present, irqs;
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
- return;
-
- if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION)
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ !mmc_card_is_removable(host->mmc))
return;
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
@@ -214,6 +212,11 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
+ host->ops->enable_dma(host);
+ }
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
@@ -423,12 +426,12 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ return kmap_atomic(sg_page(sg)) + sg->offset;
}
static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
- kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buffer);
local_irq_restore(*flags);
}
@@ -1016,7 +1019,8 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
flags |= SDHCI_CMD_INDEX;
/* CMD19 is special in that the Data Present Select should be set */
- if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
+ if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
flags |= SDHCI_CMD_DATA;
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -1066,12 +1070,15 @@ static void sdhci_finish_command(struct sdhci_host *host)
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
+ int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
- if (clock == host->clock)
+ if (clock && clock == host->clock)
return;
+ host->mmc->actual_clock = 0;
+
if (host->ops->set_clock) {
host->ops->set_clock(host, clock);
if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
@@ -1109,6 +1116,8 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
* Control register.
*/
clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
div--;
}
} else {
@@ -1122,6 +1131,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
break;
}
}
+ real_div = div;
div >>= 1;
}
} else {
@@ -1130,9 +1140,13 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if ((host->max_clk / div) <= clock)
break;
}
+ real_div = div;
div >>= 1;
}
+ if (real_div)
+ host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1160,7 +1174,7 @@ out:
host->clock = clock;
}
-static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
+static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
@@ -1183,13 +1197,13 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
}
if (host->pwr == pwr)
- return;
+ return -1;
host->pwr = pwr;
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- return;
+ return 0;
}
/*
@@ -1216,6 +1230,8 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
*/
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
+
+ return power;
}
/*****************************************************************************\
@@ -1277,7 +1293,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc);
+ sdhci_execute_tuning(mmc, mrq->cmd->opcode);
spin_lock_irqsave(&host->lock, flags);
/* Restore original mmc_request structure */
@@ -1297,12 +1313,17 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
unsigned long flags;
+ int vdd_bit = -1;
u8 ctrl;
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
+ return;
+ }
/*
* Reset the chip on each power off.
@@ -1316,9 +1337,15 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_set_clock(host, ios->clock);
if (ios->power_mode == MMC_POWER_OFF)
- sdhci_set_power(host, -1);
+ vdd_bit = sdhci_set_power(host, -1);
else
- sdhci_set_power(host, ios->vdd);
+ vdd_bit = sdhci_set_power(host, ios->vdd);
+
+ if (host->vmmc && vdd_bit != -1) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+ spin_lock_irqsave(&host->lock, flags);
+ }
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1361,11 +1388,11 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
unsigned int clock;
/* In case of UHS-I modes, set High Speed Enable */
- if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+ if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+ (ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
(ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR25) ||
- (ios->timing == MMC_TIMING_UHS_SDR12))
+ (ios->timing == MMC_TIMING_UHS_SDR25))
ctrl |= SDHCI_CTRL_HISPD;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1415,7 +1442,9 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- if (ios->timing == MMC_TIMING_UHS_SDR12)
+ if (ios->timing == MMC_TIMING_MMC_HS200)
+ ctrl_2 |= SDHCI_CTRL_HS_SDR200;
+ else if (ios->timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
else if (ios->timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
@@ -1443,7 +1472,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
-out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -1663,7 +1691,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return err;
}
-static int sdhci_execute_tuning(struct mmc_host *mmc)
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host;
u16 ctrl;
@@ -1671,6 +1699,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
int tuning_loop_counter = MAX_TUNING_LOOP;
unsigned long timeout;
int err = 0;
+ bool requires_tuning_nonuhs = false;
host = mmc_priv(mmc);
@@ -1681,13 +1710,19 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/*
- * Host Controller needs tuning only in case of SDR104 mode
- * and for SDR50 mode when Use Tuning for SDR50 is set in
+ * The Host Controller needs tuning only in case of SDR104 mode
+ * and for SDR50 mode when Use Tuning for SDR50 is set in the
* Capabilities register.
+ * If the Host Controller supports the HS200 mode then the
+ * tuning function has to be executed.
*/
+ if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+ (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+ host->flags & SDHCI_HS200_NEEDS_TUNING))
+ requires_tuning_nonuhs = true;
+
if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
- (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
- (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
+ requires_tuning_nonuhs)
ctrl |= SDHCI_CTRL_EXEC_TUNING;
else {
spin_unlock(&host->lock);
@@ -1723,7 +1758,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
if (!tuning_loop_counter && !timeout)
break;
- cmd.opcode = MMC_SEND_TUNING_BLOCK;
+ cmd.opcode = opcode;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
cmd.retries = 0;
@@ -1738,7 +1773,17 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
* block to the Host Controller. So we set the block size
* to 64 here.
*/
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
+ SDHCI_BLOCK_SIZE);
+ else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ } else {
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ }
/*
* The tuning block is sent by the card to the host controller.
@@ -2121,12 +2166,14 @@ static void sdhci_show_adma_error(struct sdhci_host *host) { }
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
+ u32 command;
BUG_ON(intmask == 0);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
- if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
- MMC_SEND_TUNING_BLOCK) {
+ command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+ if (command == MMC_SEND_TUNING_BLOCK ||
+ command == MMC_SEND_TUNING_BLOCK_HS200) {
host->tuning_done = 1;
wake_up(&host->buf_ready_int);
return;
@@ -2327,29 +2374,36 @@ out:
#ifdef CONFIG_PM
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
+ bool has_tuning_timer;
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
- if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
- host->tuning_mode == SDHCI_TUNING_MODE_1) {
+ has_tuning_timer = host->version >= SDHCI_SPEC_300 &&
+ host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1;
+ if (has_tuning_timer) {
+ del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
}
ret = mmc_suspend_host(host->mmc);
- if (ret)
+ if (ret) {
+ if (has_tuning_timer) {
+ host->flags |= SDHCI_NEEDS_RETUNING;
+ mod_timer(&host->tuning_timer, jiffies +
+ host->tuning_count * HZ);
+ }
+
+ sdhci_enable_card_detection(host);
+
return ret;
+ }
free_irq(host->irq, host);
- if (host->vmmc)
- ret = regulator_disable(host->vmmc);
-
return ret;
}
@@ -2359,12 +2413,6 @@ int sdhci_resume_host(struct sdhci_host *host)
{
int ret;
- if (host->vmmc) {
- int ret = regulator_enable(host->vmmc);
- if (ret)
- return ret;
- }
-
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
@@ -2727,10 +2775,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_SUPPORT_DDR50)
mmc->caps |= MMC_CAP_UHS_DDR50;
- /* Does the host needs tuning for SDR50? */
+ /* Does the host need tuning for SDR50? */
if (caps[1] & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
+ /* Does the host need tuning for HS200? */
+ if (mmc->caps2 & MMC_CAP2_HS200)
+ host->flags |= SDHCI_HS200_NEEDS_TUNING;
+
/* Driver Type(s) (A, C, D) supported by the host */
if (caps[1] & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
@@ -2926,8 +2978,6 @@ int sdhci_add_host(struct sdhci_host *host)
if (IS_ERR(host->vmmc)) {
pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
- } else {
- regulator_enable(host->vmmc);
}
sdhci_init(host, 0);
@@ -3016,10 +3066,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
- if (host->vmmc) {
- regulator_disable(host->vmmc);
+ if (host->vmmc)
regulator_put(host->vmmc);
- }
kfree(host->adma_desc);
kfree(host->align_buffer);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a5b65460d8a..ad265b96b75b 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -158,6 +158,7 @@
#define SDHCI_CTRL_UHS_SDR50 0x0002
#define SDHCI_CTRL_UHS_SDR104 0x0003
#define SDHCI_CTRL_UHS_DDR50 0x0004
+#define SDHCI_CTRL_HS_SDR200 0x0005 /* reserved value in SDIO spec */
#define SDHCI_CTRL_VDD_180 0x0008
#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define SDHCI_CTRL_DRV_TYPE_B 0x0000
@@ -374,7 +375,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
#ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_resume_host(struct sdhci_host *host);
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e205..f5d8b53be333 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,33 @@
*
*/
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -123,6 +150,11 @@
#define MASK_MRBSYTO (1 << 1)
#define MASK_MRSPTO (1 << 0)
+#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+ MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+ MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+ MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
/* CE_HOST_STS1 */
#define STS1_CMDSEQ (1 << 31)
@@ -162,9 +194,21 @@ enum mmcif_state {
STATE_IOS,
};
+enum mmcif_wait_for {
+ MMCIF_WAIT_FOR_REQUEST,
+ MMCIF_WAIT_FOR_CMD,
+ MMCIF_WAIT_FOR_MREAD,
+ MMCIF_WAIT_FOR_MWRITE,
+ MMCIF_WAIT_FOR_READ,
+ MMCIF_WAIT_FOR_WRITE,
+ MMCIF_WAIT_FOR_READ_END,
+ MMCIF_WAIT_FOR_WRITE_END,
+ MMCIF_WAIT_FOR_STOP,
+};
+
struct sh_mmcif_host {
struct mmc_host *mmc;
- struct mmc_data *data;
+ struct mmc_request *mrq;
struct platform_device *pd;
struct sh_dmae_slave dma_slave_tx;
struct sh_dmae_slave dma_slave_rx;
@@ -172,11 +216,17 @@ struct sh_mmcif_host {
unsigned int clk;
int bus_width;
bool sd_error;
+ bool dying;
long timeout;
void __iomem *addr;
- struct completion intr_wait;
+ u32 *pio_ptr;
+ spinlock_t lock; /* protect sh_mmcif_host::state */
enum mmcif_state state;
- spinlock_t lock;
+ enum mmcif_wait_for wait_for;
+ struct delayed_work timeout_work;
+ size_t blocksize;
+ int sg_idx;
+ int sg_blkidx;
bool power;
bool card_present;
@@ -202,19 +252,21 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
static void mmcif_dma_complete(void *arg)
{
struct sh_mmcif_host *host = arg;
+ struct mmc_data *data = host->mrq->data;
+
dev_dbg(&host->pd->dev, "Command completed\n");
- if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
+ if (WARN(!data, "%s: NULL data in DMA completion!\n",
dev_name(&host->pd->dev)))
return;
- if (host->data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
- host->data->sg, host->data->sg_len,
+ data->sg, data->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
- host->data->sg, host->data->sg_len,
+ data->sg, data->sg_len,
DMA_TO_DEVICE);
complete(&host->dma_complete);
@@ -222,18 +274,19 @@ static void mmcif_dma_complete(void *arg)
static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
{
- struct scatterlist *sg = host->data->sg;
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
DMA_FROM_DEVICE);
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
@@ -244,7 +297,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
- __func__, host->data->sg_len, ret, cookie);
+ __func__, data->sg_len, ret, cookie);
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -265,23 +318,24 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
}
dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
- desc, cookie, host->data->sg_len);
+ desc, cookie, data->sg_len);
}
static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
{
- struct scatterlist *sg = host->data->sg;
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
DMA_TO_DEVICE);
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
@@ -292,7 +346,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
- __func__, host->data->sg_len, ret, cookie);
+ __func__, data->sg_len, ret, cookie);
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -399,7 +453,7 @@ static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
else
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
- (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
+ ((fls(host->clk / clk) - 1) << 16));
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
}
@@ -421,7 +475,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
{
u32 state1, state2;
- int ret, timeout = 10000000;
+ int ret, timeout;
host->sd_error = false;
@@ -433,155 +487,212 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
if (state1 & STS1_CMDSEQ) {
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
- while (1) {
- timeout--;
- if (timeout < 0) {
- dev_err(&host->pd->dev,
- "Forceed end of command sequence timeout err\n");
- return -EIO;
- }
+ for (timeout = 10000000; timeout; timeout--) {
if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
- & STS1_CMDSEQ))
+ & STS1_CMDSEQ))
break;
mdelay(1);
}
+ if (!timeout) {
+ dev_err(&host->pd->dev,
+ "Forced end of command sequence timeout err\n");
+ return -EIO;
+ }
sh_mmcif_sync_reset(host);
dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
return -EIO;
}
if (state2 & STS2_CRC_ERR) {
- dev_dbg(&host->pd->dev, ": Happened CRC error\n");
+ dev_dbg(&host->pd->dev, ": CRC error\n");
ret = -EIO;
} else if (state2 & STS2_TIMEOUT_ERR) {
- dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
+ dev_dbg(&host->pd->dev, ": Timeout\n");
ret = -ETIMEDOUT;
} else {
- dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
+ dev_dbg(&host->pd->dev, ": End/Index error\n");
ret = -EIO;
}
return ret;
}
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
- struct mmc_request *mrq)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
{
- struct mmc_data *data = mrq->data;
- long time;
- u32 blocksize, i, *p = sg_virt(data->sg);
+ struct mmc_data *data = host->mrq->data;
+
+ host->sg_blkidx += host->blocksize;
+
+ /* data->sg->length must be a multiple of host->blocksize? */
+ BUG_ON(host->sg_blkidx > data->sg->length);
+
+ if (host->sg_blkidx == data->sg->length) {
+ host->sg_blkidx = 0;
+ if (++host->sg_idx < data->sg_len)
+ host->pio_ptr = sg_virt(++data->sg);
+ } else {
+ host->pio_ptr = p;
+ }
+
+ if (host->sg_idx == data->sg_len)
+ return false;
+
+ return true;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK) + 3;
+
+ host->wait_for = MMCIF_WAIT_FOR_READ;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf read enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- blocksize = (BLOCK_SIZE_MASK &
- sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
- for (i = 0; i < blocksize / 4; i++)
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = sg_virt(data->sg);
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
+ }
+
+ for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
/* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
+ host->wait_for = MMCIF_WAIT_FOR_READ_END;
- return 0;
+ return true;
}
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
- struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- long time;
- u32 blocksize, i, j, sec, *p;
-
- blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
- MMCIF_CE_BLOCK_SET);
- for (j = 0; j < data->sg_len; j++) {
- p = sg_virt(data->sg);
- for (sec = 0; sec < data->sg->length / blocksize; sec++) {
- sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
- /* buf read enable */
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
-
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- for (i = 0; i < blocksize / 4; i++)
- *p++ = sh_mmcif_readl(host->addr,
- MMCIF_CE_DATA);
- }
- if (j < data->sg_len - 1)
- data->sg++;
+
+ if (!data->sg_len || !data->sg->length)
+ return;
+
+ host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK;
+
+ host->wait_for = MMCIF_WAIT_FOR_MREAD;
+ host->sg_idx = 0;
+ host->sg_blkidx = 0;
+ host->pio_ptr = sg_virt(data->sg);
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = host->pio_ptr;
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
}
- return 0;
+
+ BUG_ON(!data->sg->length);
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+ if (!sh_mmcif_next_block(host, p))
+ return false;
+
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+ return true;
}
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
- struct mmc_data *data = mrq->data;
- long time;
- u32 blocksize, i, *p = sg_virt(data->sg);
+ host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK) + 3;
- sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+ host->wait_for = MMCIF_WAIT_FOR_WRITE;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf write enable */
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- blocksize = (BLOCK_SIZE_MASK &
- sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
- for (i = 0; i < blocksize / 4; i++)
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = sg_virt(data->sg);
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
+ }
+
+ for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
/* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+ host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- return 0;
+ return true;
}
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
- struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- long time;
- u32 i, sec, j, blocksize, *p;
- blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
- MMCIF_CE_BLOCK_SET);
+ if (!data->sg_len || !data->sg->length)
+ return;
- for (j = 0; j < data->sg_len; j++) {
- p = sg_virt(data->sg);
- for (sec = 0; sec < data->sg->length / blocksize; sec++) {
- sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
- /* buf write enable*/
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
+ host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK;
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
+ host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+ host->sg_idx = 0;
+ host->sg_blkidx = 0;
+ host->pio_ptr = sg_virt(data->sg);
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
- for (i = 0; i < blocksize / 4; i++)
- sh_mmcif_writel(host->addr,
- MMCIF_CE_DATA, *p++);
- }
- if (j < data->sg_len - 1)
- data->sg++;
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = host->pio_ptr;
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
}
- return 0;
+
+ BUG_ON(!data->sg->length);
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+ if (!sh_mmcif_next_block(host, p))
+ return false;
+
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+ return true;
}
static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -603,8 +714,11 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
}
static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+ struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *cmd = mrq->cmd;
+ u32 opc = cmd->opcode;
u32 tmp = 0;
/* Response Type check */
@@ -636,7 +750,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
break;
}
/* WDAT / DATW */
- if (host->data) {
+ if (data) {
tmp |= CMD_SET_WDAT;
switch (host->bus_width) {
case MMC_BUS_WIDTH_1:
@@ -660,7 +774,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
- mrq->data->blocks << 16);
+ data->blocks << 16);
}
/* RIDXC[1:0] check bits */
if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
@@ -674,68 +788,60 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
tmp |= CMD_SET_CRC7C_INTERNAL;
- return opc = ((opc << 24) | tmp);
+ return (opc << 24) | tmp;
}
static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
- struct mmc_request *mrq, u32 opc)
+ struct mmc_request *mrq, u32 opc)
{
- int ret;
-
switch (opc) {
case MMC_READ_MULTIPLE_BLOCK:
- ret = sh_mmcif_multi_read(host, mrq);
- break;
+ sh_mmcif_multi_read(host, mrq);
+ return 0;
case MMC_WRITE_MULTIPLE_BLOCK:
- ret = sh_mmcif_multi_write(host, mrq);
- break;
+ sh_mmcif_multi_write(host, mrq);
+ return 0;
case MMC_WRITE_BLOCK:
- ret = sh_mmcif_single_write(host, mrq);
- break;
+ sh_mmcif_single_write(host, mrq);
+ return 0;
case MMC_READ_SINGLE_BLOCK:
case MMC_SEND_EXT_CSD:
- ret = sh_mmcif_single_read(host, mrq);
- break;
+ sh_mmcif_single_read(host, mrq);
+ return 0;
default:
dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- return ret;
}
static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_request *mrq)
{
- long time;
- int ret = 0, mask = 0;
+ struct mmc_command *cmd = mrq->cmd;
u32 opc = cmd->opcode;
+ u32 mask;
switch (opc) {
- /* respons busy check */
+ /* response busy check */
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
case MMC_CLR_WRITE_PROT:
case MMC_ERASE:
case MMC_GEN_CMD:
- mask = MASK_MRBSYE;
+ mask = MASK_START_CMD | MASK_MRBSYE;
break;
default:
- mask = MASK_MCRSPE;
+ mask = MASK_START_CMD | MASK_MCRSPE;
break;
}
- mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
- MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
- MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
- MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
- if (host->data) {
+ if (mrq->data) {
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
mrq->data->blksz);
}
- opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+ opc = sh_mmcif_set_cmd(host, mrq);
sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
@@ -744,80 +850,28 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
/* set cmd */
sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0) {
- cmd->error = sh_mmcif_error_manage(host);
- return;
- }
- if (host->sd_error) {
- switch (cmd->opcode) {
- case MMC_ALL_SEND_CID:
- case MMC_SELECT_CARD:
- case MMC_APP_CMD:
- cmd->error = -ETIMEDOUT;
- break;
- default:
- dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
- cmd->opcode);
- cmd->error = sh_mmcif_error_manage(host);
- break;
- }
- host->sd_error = false;
- return;
- }
- if (!(cmd->flags & MMC_RSP_PRESENT)) {
- cmd->error = 0;
- return;
- }
- sh_mmcif_get_response(host, cmd);
- if (host->data) {
- if (!host->dma_active) {
- ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
- } else {
- long time =
- wait_for_completion_interruptible_timeout(&host->dma_complete,
- host->timeout);
- if (!time)
- ret = -ETIMEDOUT;
- else if (time < 0)
- ret = time;
- sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
- BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
- host->dma_active = false;
- }
- if (ret < 0)
- mrq->data->bytes_xfered = 0;
- else
- mrq->data->bytes_xfered =
- mrq->data->blocks * mrq->data->blksz;
- }
- cmd->error = ret;
+ host->wait_for = MMCIF_WAIT_FOR_CMD;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_request *mrq)
{
- long time;
-
- if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
+ switch (mrq->cmd->opcode) {
+ case MMC_READ_MULTIPLE_BLOCK:
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
- else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+ break;
+ case MMC_WRITE_MULTIPLE_BLOCK:
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
- else {
+ break;
+ default:
dev_err(&host->pd->dev, "unsupported stop cmd\n");
- cmd->error = sh_mmcif_error_manage(host);
+ mrq->stop->error = sh_mmcif_error_manage(host);
return;
}
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error) {
- cmd->error = sh_mmcif_error_manage(host);
- return;
- }
- sh_mmcif_get_cmd12response(host, cmd);
- cmd->error = 0;
+ host->wait_for = MMCIF_WAIT_FOR_STOP;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -856,23 +910,10 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
default:
break;
}
- host->data = mrq->data;
- if (mrq->data) {
- if (mrq->data->flags & MMC_DATA_READ) {
- if (host->chan_rx)
- sh_mmcif_start_dma_rx(host);
- } else {
- if (host->chan_tx)
- sh_mmcif_start_dma_tx(host);
- }
- }
- sh_mmcif_start_cmd(host, mrq, mrq->cmd);
- host->data = NULL;
- if (!mrq->cmd->error && mrq->stop)
- sh_mmcif_stop_cmd(host, mrq, mrq->stop);
- host->state = STATE_IDLE;
- mmc_request_done(mmc, mrq);
+ host->mrq = mrq;
+
+ sh_mmcif_start_cmd(host, mrq);
}
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -908,7 +949,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->power) {
pm_runtime_put(&host->pd->dev);
host->power = false;
- if (p->down_pwr)
+ if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
p->down_pwr(host->pd);
}
host->state = STATE_IDLE;
@@ -947,9 +988,156 @@ static struct mmc_host_ops sh_mmcif_ops = {
.get_cd = sh_mmcif_get_cd,
};
-static void sh_mmcif_detect(struct mmc_host *mmc)
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
{
- mmc_detect_change(mmc, 0);
+ struct mmc_command *cmd = host->mrq->cmd;
+ struct mmc_data *data = host->mrq->data;
+ long time;
+
+ if (host->sd_error) {
+ switch (cmd->opcode) {
+ case MMC_ALL_SEND_CID:
+ case MMC_SELECT_CARD:
+ case MMC_APP_CMD:
+ cmd->error = -ETIMEDOUT;
+ host->sd_error = false;
+ break;
+ default:
+ cmd->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+ cmd->opcode, cmd->error);
+ break;
+ }
+ return false;
+ }
+ if (!(cmd->flags & MMC_RSP_PRESENT)) {
+ cmd->error = 0;
+ return false;
+ }
+
+ sh_mmcif_get_response(host, cmd);
+
+ if (!data)
+ return false;
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ sh_mmcif_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ sh_mmcif_start_dma_tx(host);
+ }
+
+ if (!host->dma_active) {
+ data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+ if (!data->error)
+ return true;
+ return false;
+ }
+
+ /* Running in the IRQ thread, can sleep */
+ time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+ host->timeout);
+ if (host->sd_error) {
+ dev_err(host->mmc->parent,
+ "Error IRQ while waiting for DMA completion!\n");
+ /* Woken up by an error IRQ: abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+ data->error = sh_mmcif_error_manage(host);
+ } else if (!time) {
+ data->error = -ETIMEDOUT;
+ } else if (time < 0) {
+ data->error = time;
+ }
+ sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+ BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+ host->dma_active = false;
+
+ if (data->error)
+ data->bytes_xfered = 0;
+
+ return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+ struct sh_mmcif_host *host = dev_id;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ /*
+ * All handlers return true, if processing continues, and false, if the
+ * request has to be completed - successfully or not
+ */
+ switch (host->wait_for) {
+ case MMCIF_WAIT_FOR_REQUEST:
+ /* We're too late, the timeout has already kicked in */
+ return IRQ_HANDLED;
+ case MMCIF_WAIT_FOR_CMD:
+ if (sh_mmcif_end_cmd(host))
+ /* Wait for data */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_MREAD:
+ if (sh_mmcif_mread_block(host))
+ /* Wait for more data */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_READ:
+ if (sh_mmcif_read_block(host))
+ /* Wait for data end */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_MWRITE:
+ if (sh_mmcif_mwrite_block(host))
+ /* Wait data to write */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_WRITE:
+ if (sh_mmcif_write_block(host))
+ /* Wait for data end */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_STOP:
+ if (host->sd_error) {
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ break;
+ }
+ sh_mmcif_get_cmd12response(host, mrq->stop);
+ mrq->stop->error = 0;
+ break;
+ case MMCIF_WAIT_FOR_READ_END:
+ case MMCIF_WAIT_FOR_WRITE_END:
+ if (host->sd_error)
+ data->error = sh_mmcif_error_manage(host);
+ break;
+ default:
+ BUG();
+ }
+
+ if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+ if (!mrq->cmd->error && data && !data->error)
+ data->bytes_xfered =
+ data->blocks * data->blksz;
+
+ if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
+ sh_mmcif_stop_cmd(host, mrq);
+ if (!mrq->stop->error)
+ return IRQ_HANDLED;
+ }
+ }
+
+ host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+ host->state = STATE_IDLE;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+
+ return IRQ_HANDLED;
}
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
@@ -960,7 +1148,12 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
- if (state & INT_RBSYE) {
+ if (state & INT_ERR_STS) {
+ /* error interrupts - process first */
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+ err = 1;
+ } else if (state & INT_RBSYE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_RBSYE | INT_CRSPE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
@@ -988,11 +1181,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_CMD12RBE | INT_CMD12CRE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
- } else if (state & INT_ERR_STS) {
- /* err interrupts */
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
- err = 1;
} else {
dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
@@ -1003,14 +1191,57 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
host->sd_error = true;
dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
}
- if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
- complete(&host->intr_wait);
- else
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+ if (!host->dma_active)
+ return IRQ_WAKE_THREAD;
+ else if (host->sd_error)
+ mmcif_dma_complete(host);
+ } else {
dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+ }
return IRQ_HANDLED;
}
+static void mmcif_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+ struct mmc_request *mrq = host->mrq;
+
+ if (host->dying)
+ /* Don't run after mmc_remove_host() */
+ return;
+
+ /*
+ * Handle races with cancel_delayed_work(), unless
+ * cancel_delayed_work_sync() is used
+ */
+ switch (host->wait_for) {
+ case MMCIF_WAIT_FOR_CMD:
+ mrq->cmd->error = sh_mmcif_error_manage(host);
+ break;
+ case MMCIF_WAIT_FOR_STOP:
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ break;
+ case MMCIF_WAIT_FOR_MREAD:
+ case MMCIF_WAIT_FOR_MWRITE:
+ case MMCIF_WAIT_FOR_READ:
+ case MMCIF_WAIT_FOR_WRITE:
+ case MMCIF_WAIT_FOR_READ_END:
+ case MMCIF_WAIT_FOR_WRITE_END:
+ mrq->data->error = sh_mmcif_error_manage(host);
+ break;
+ default:
+ BUG();
+ }
+
+ host->state = STATE_IDLE;
+ host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
static int __devinit sh_mmcif_probe(struct platform_device *pdev)
{
int ret = 0, irq[2];
@@ -1064,7 +1295,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
host->clk = clk_get_rate(host->hclk);
host->pd = pdev;
- init_completion(&host->intr_wait);
spin_lock_init(&host->lock);
mmc->ops = &sh_mmcif_ops;
@@ -1101,19 +1331,21 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+ ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
goto clean_up3;
}
- ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+ ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
if (ret) {
free_irq(irq[0], host);
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
goto clean_up3;
}
- sh_mmcif_detect(host->mmc);
+ INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+
+ mmc_detect_change(host->mmc, 0);
dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
dev_dbg(&pdev->dev, "chip ver H'%04x\n",
@@ -1139,11 +1371,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
int irq[2];
+ host->dying = true;
pm_runtime_get_sync(&pdev->dev);
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+ /*
+ * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+ * mmc_remove_host() call above. But swapping order doesn't help either
+ * (a query on the linux-mmc mailing list didn't bring any replies).
+ */
+ cancel_delayed_work_sync(&host->timeout_work);
+
if (host->addr)
iounmap(host->addr);
@@ -1206,19 +1446,7 @@ static struct platform_driver sh_mmcif_driver = {
},
};
-static int __init sh_mmcif_init(void)
-{
- return platform_driver_register(&sh_mmcif_driver);
-}
-
-static void __exit sh_mmcif_exit(void)
-{
- platform_driver_unregister(&sh_mmcif_driver);
-}
-
-module_init(sh_mmcif_init);
-module_exit(sh_mmcif_exit);
-
+module_platform_driver(sh_mmcif_driver);
MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 41ae6466bd83..58da3c44acc5 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -282,18 +282,7 @@ static struct platform_driver sh_mobile_sdhi_driver = {
.remove = __devexit_p(sh_mobile_sdhi_remove),
};
-static int __init sh_mobile_sdhi_init(void)
-{
- return platform_driver_register(&sh_mobile_sdhi_driver);
-}
-
-static void __exit sh_mobile_sdhi_exit(void)
-{
- platform_driver_unregister(&sh_mobile_sdhi_driver);
-}
-
-module_init(sh_mobile_sdhi_init);
-module_exit(sh_mobile_sdhi_exit);
+module_platform_driver(sh_mobile_sdhi_driver);
MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index f70d04664cac..43d962829f8e 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -22,8 +22,8 @@
#define DRIVER_NAME "tifm_sd"
#define DRIVER_VERSION "0.8"
-static int no_dma = 0;
-static int fixed_timeout = 0;
+static bool no_dma = 0;
+static bool fixed_timeout = 0;
module_param(no_dma, bool, 0644);
module_param(fixed_timeout, bool, 0644);
@@ -118,7 +118,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
unsigned char *buf;
unsigned int pos = 0, val;
- buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off;
+ buf = kmap_atomic(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
buf[pos++] = host->bounce_buf_data[0];
host->cmd_flags &= ~DATA_CARRY;
@@ -134,7 +134,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
}
buf[pos++] = (val >> 8) & 0xff;
}
- kunmap_atomic(buf - off, KM_BIO_DST_IRQ);
+ kunmap_atomic(buf - off);
}
static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
@@ -144,7 +144,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
unsigned char *buf;
unsigned int pos = 0, val;
- buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off;
+ buf = kmap_atomic(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
writel(val, sock->addr + SOCK_MMCSD_DATA);
@@ -161,7 +161,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
val |= (buf[pos++] << 8) & 0xff00;
writel(val, sock->addr + SOCK_MMCSD_DATA);
}
- kunmap_atomic(buf - off, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buf - off);
}
static void tifm_sd_transfer_data(struct tifm_sd *host)
@@ -212,13 +212,13 @@ static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
struct page *src, unsigned int src_off,
unsigned int count)
{
- unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off;
- unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off;
+ unsigned char *src_buf = kmap_atomic(src) + src_off;
+ unsigned char *dst_buf = kmap_atomic(dst) + dst_off;
memcpy(dst_buf, src_buf, count);
- kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ);
- kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ);
+ kunmap_atomic(dst_buf - dst_off);
+ kunmap_atomic(src_buf - src_off);
}
static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index a4ea10242787..113ce6c9cf32 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -138,19 +138,7 @@ static struct platform_driver tmio_mmc_driver = {
.resume = tmio_mmc_resume,
};
-
-static int __init tmio_mmc_init(void)
-{
- return platform_driver_register(&tmio_mmc_driver);
-}
-
-static void __exit tmio_mmc_exit(void)
-{
- platform_driver_unregister(&tmio_mmc_driver);
-}
-
-module_init(tmio_mmc_init);
-module_exit(tmio_mmc_exit);
+module_platform_driver(tmio_mmc_driver);
MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3020f98218f0..a95e6d901726 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -105,13 +105,13 @@ static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ return kmap_atomic(sg_page(sg)) + sg->offset;
}
static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
unsigned long *flags, void *virt)
{
- kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt - sg->offset);
local_irq_restore(*flags);
}
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 86f259cdfcbc..7a6e6cc8f8b8 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
@@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda167..abad01b37cfb 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,10 +798,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr)
+ if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
- if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
- pdata->power) {
+ if (pdata->power) {
pdata->power = false;
pm_runtime_put(&host->pdev->dev);
}
@@ -915,6 +914,23 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
if (ret < 0)
goto pm_disable;
+ /*
+ * There are 4 different scenarios for the card detection:
+ * 1) an external gpio irq handles the cd (best for power savings)
+ * 2) internal sdhi irq handles the cd
+ * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL
+ * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE
+ *
+ * While we increment the rtpm counter for all scenarios when the mmc
+ * core activates us by calling an appropriate set_ios(), we must
+ * additionally ensure that in case 2) the tmio mmc hardware stays
+ * powered on during runtime for the card detection to work.
+ */
+ if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD
+ || mmc->caps & MMC_CAP_NEEDS_POLL
+ || mmc->caps & MMC_CAP_NONREMOVABLE))
+ pm_runtime_get_noresume(&pdev->dev);
+
tmio_mmc_clk_stop(_host);
tmio_mmc_reset(_host);
@@ -933,12 +949,6 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- /* We have to keep the device powered for its card detection to work */
- if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) {
- pdata->power = true;
- pm_runtime_get_noresume(&pdev->dev);
- }
-
mmc_add_host(mmc);
/* Unmask the IRQs we want to know about */
@@ -974,7 +984,9 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
* the controller, the runtime PM is suspended and pdata->power == false,
* so, our .runtime_resume() will not try to detect a card in the slot.
*/
- if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
+ if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD
+ || host->mmc->caps & MMC_CAP_NEEDS_POLL
+ || host->mmc->caps & MMC_CAP_NONREMOVABLE)
pm_runtime_get_sync(&pdev->dev);
mmc_remove_host(host->mmc);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e8f6e65183d7..3135a1a5d75d 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -223,25 +223,25 @@ enum SD_RESPONSE_TYPE {
#define FUN(c) (0x000007 & (c->arg>>28))
#define REG(c) (0x01FFFF & (c->arg>>9))
-static int limit_speed_to_24_MHz;
+static bool limit_speed_to_24_MHz;
module_param(limit_speed_to_24_MHz, bool, 0644);
MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
-static int pad_input_to_usb_pkt;
+static bool pad_input_to_usb_pkt;
module_param(pad_input_to_usb_pkt, bool, 0644);
MODULE_PARM_DESC(pad_input_to_usb_pkt,
"Pad USB data input transfers to whole USB Packet");
-static int disable_offload_processing;
+static bool disable_offload_processing;
module_param(disable_offload_processing, bool, 0644);
MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
-static int force_1_bit_data_xfers;
+static bool force_1_bit_data_xfers;
module_param(force_1_bit_data_xfers, bool, 0644);
MODULE_PARM_DESC(force_1_bit_data_xfers,
"Force SDIO Data Transfers to 1-bit Mode");
-static int force_polling_for_irqs;
+static bool force_polling_for_irqs;
module_param(force_polling_for_irqs, bool, 0644);
MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
static int firmware_rom_wait_states = 0x1C;
#endif
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
MODULE_PARM_DESC(firmware_rom_wait_states,
"ROM wait states byte=RRRIIEEE (Reserved Internal External)");
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 318a869286ab..1be621841400 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -140,6 +140,14 @@ config MTD_AR7_PARTS
---help---
TI AR7 partitioning support
+config MTD_BCM63XX_PARTS
+ tristate "BCM63XX CFE partitioning support"
+ depends on BCM63XX
+ select CRC32
+ help
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 9aaac3ac89f3..f90135429dc7 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 89a02f6f65dc..5a3942bf109c 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -75,7 +75,7 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
size_t sz;
int ret;
- ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
if (ret >= 0 && sz != sizeof(fs))
ret = -EINVAL;
@@ -132,7 +132,7 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
int ret, i;
memset(iis, 0, sizeof(*iis));
- ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+ ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
if (ret < 0)
goto failed;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index f40ea4547554..945393129952 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -73,8 +73,8 @@ static int create_mtd_partitions(struct mtd_info *master,
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
- master->read(master, offset,
- sizeof(header), &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
@@ -95,16 +95,16 @@ static int create_mtd_partitions(struct mtd_info *master,
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
@@ -114,8 +114,7 @@ static int create_mtd_partitions(struct mtd_info *master,
break;
}
- master->read(master, root_offset,
- sizeof(header), &len, (u8 *)&header);
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
new file mode 100644
index 000000000000..608321ee056e
--- /dev/null
+++ b/drivers/mtd/bcm63xxpart.c
@@ -0,0 +1,222 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ if (ret)
+ return ret;
+
+ if (strncmp("cfe-v", buf, 5) == 0)
+ return 0;
+
+ /* very old CFE's do not have the cfe-v string, so check for magic */
+ ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ unsigned int cfelen, nvramlen;
+ int namelen = 0;
+ int i;
+ u32 computed_crc;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
+ nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+ (void *)buf);
+
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ char *boardid = &(buf->board_id[0]);
+ char *tagversion = &(buf->tag_version[0]);
+
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+
+ pr_info("CFE boot tag found with version %s and board type %s\n",
+ tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = kerneladdr + kernellen;
+ spareaddr = roundup(totallen, master->erasesize) + cfelen;
+ sparelen = master->size - spareaddr - nvramlen;
+ rootfslen = spareaddr - rootfsaddr;
+ } else {
+ pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+ buf->header_crc, computed_crc);
+ kernellen = 0;
+ rootfslen = 0;
+ rootfsaddr = 0;
+ spareaddr = cfelen;
+ sparelen = master->size - cfelen - nvramlen;
+ }
+
+ /* Determine number of partitions */
+ namelen = 8;
+ if (rootfslen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+ if (kernellen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ if (kernellen > 0) {
+ parts[curpart].name = "kernel";
+ parts[curpart].offset = kerneladdr;
+ parts[curpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ parts[curpart].name = "rootfs";
+ parts[curpart].offset = rootfsaddr;
+ parts[curpart].size = rootfslen;
+ if (sparelen > 0)
+ parts[curpart].size += sparelen;
+ curpart++;
+ }
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ curpart++;
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %lx and length %lx\n", i,
+ parts[i].name, (long unsigned int)(parts[i].offset),
+ (long unsigned int)(parts[i].size));
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+ return register_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+ deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 179814a95f3a..85e80180b65b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -139,8 +139,9 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
}
/* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
@@ -698,7 +699,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
continue;
}
memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
- ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
@@ -707,7 +709,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
to += ECCBUF_SIZE;
}
if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
- ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
totlen += thislen;
if (ret || thislen != ECCBUF_DIV(elem_len))
goto write_error;
@@ -721,7 +724,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
if (buflen) { /* flush last page, even if not full */
/* This is sometimes intended behaviour, really */
- ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 283d887f7825..37b05c3f2792 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -191,6 +191,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOC2000
tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -213,6 +214,7 @@ config MTD_DOC2000
config MTD_DOC2001
tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -234,6 +236,7 @@ config MTD_DOC2001
config MTD_DOC2001PLUS
tristate "M-Systems Disk-On-Chip Millennium Plus"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -251,6 +254,8 @@ config MTD_DOC2001PLUS
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
@@ -259,6 +264,13 @@ config MTD_DOCG3
M-Systems and now Sandisk. The support is very experimental,
and doesn't give access to any write operations.
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
+
config MTD_DOCPROBE
tristate
select MTD_DOCECC
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index b78f23169d4e..e7e46d1e7463 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -14,7 +14,6 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
-#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -288,7 +287,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
dev->mtd.write = block2mtd_write;
- dev->mtd.writev = default_mtd_writev;
+ dev->mtd.writev = mtd_writev;
dev->mtd.sync = block2mtd_sync;
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index e9fad9151219..b1cdf6479019 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,23 +562,14 @@ void DoC2k_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
-
this->curfloor = -1;
this->curchip = -1;
mutex_init(&this->lock);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index a3f7a27499be..7543b98f46c4 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -343,25 +343,17 @@ void DoCMil_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
/* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
-
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 99351bc3e0ed..177510d0e7ee 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,23 +467,14 @@ void DoCMilPlus_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
-
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index bdcf5df982e8..ad11ef0a81f4 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -29,6 +29,9 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -41,11 +44,7 @@
*
* As no specification is available from M-Systems/Sandisk, this drivers lacks
* several functions available on the chip, as :
- * - block erase
- * - page write
* - IPL write
- * - ECC fixing (lack of BCH algorith understanding)
- * - powerdown / powerup
*
* The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
* the driver assumes a 16bits data bus.
@@ -53,8 +52,7 @@
* DocG3 relies on 2 ECC algorithms, which are handled in hardware :
* - a 1 byte Hamming code stored in the OOB for each page
* - a 7 bytes BCH code stored in the OOB for each page
- * The BCH part is only used for check purpose, no correction is available as
- * some information is missing. What is known is that :
+ * The BCH ECC is :
* - BCH is in GF(2^14)
* - BCH is over data of 520 bytes (512 page + 7 page_info bytes
* + 1 hamming byte)
@@ -63,6 +61,30 @@
*
*/
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+ .eccbytes = 8,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+ .oobfree = {{0, 7}, {15, 1} },
+ .oobavail = 8,
+};
+
+/**
+ * struct docg3_bch - BCH engine
+ */
+static struct bch_control *docg3_bch;
+
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
u8 val = readb(docg3->base + reg);
@@ -82,7 +104,7 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
writeb(val, docg3->base + reg);
- trace_docg3_io(1, 16, reg, val);
+ trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
@@ -143,7 +165,7 @@ static void doc_delay(struct docg3 *docg3, int nbNOPs)
{
int i;
- doc_dbg("NOP x %d\n", nbNOPs);
+ doc_vdbg("NOP x %d\n", nbNOPs);
for (i = 0; i < nbNOPs; i++)
doc_writeb(docg3, 0, DOC_NOP);
}
@@ -196,8 +218,8 @@ static int doc_reset_seq(struct docg3 *docg3)
/**
* doc_read_data_area - Read data from data area
* @docg3: the device
- * @buf: the buffer to fill in
- * @len: the lenght to read
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
* @first: first time read, DOC_READADDRESS should be set
*
* Reads bytes from flash data. Handles the single byte / even bytes reads.
@@ -218,8 +240,10 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
dst16 = buf;
for (i = 0; i < len4; i += 2) {
data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
- *dst16 = data16;
- dst16++;
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
}
if (cdr) {
@@ -229,26 +253,84 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
dst8 = (u8 *)dst16;
for (i = 0; i < cdr; i++) {
data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
- *dst8 = data8;
- dst8++;
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
}
}
}
/**
- * doc_set_data_mode - Sets the flash to reliable data mode
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
* occur. Entering the reliable mode cannot be done without entering the fast
* mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
*/
static void doc_set_reliable_mode(struct docg3 *docg3)
{
- doc_dbg("doc_set_reliable_mode()\n");
- doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
- doc_flash_command(docg3, DOC_CMD_FAST_MODE);
- doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
doc_delay(docg3, 2);
}
@@ -325,6 +407,37 @@ static int doc_set_extra_page_mode(struct docg3 *docg3)
}
/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
* doc_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
* @block0: the first plane block index
@@ -360,34 +473,80 @@ static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
if (ret)
goto out;
- sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
- doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
- doc_delay(docg3, 1);
+ doc_setup_addr_sector(docg3, sector);
sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
out:
return ret;
}
+
/**
* doc_read_page_ecc_init - Initialize hardware ECC engine
* @docg3: the device
* @len: the number of bytes covered by the ECC (BCH covered)
*
* The function does initialize the hardware ECC engine to compute the Hamming
- * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
*
* Return 0 if succeeded, -EIO on error
*/
@@ -403,6 +562,106 @@ static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
}
/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
+/**
* doc_read_page_prepare - Prepares reading data from a flash page
* @docg3: the device
* @block0: the first plane block index on flash memory
@@ -488,16 +747,40 @@ static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
}
/**
- * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
* @docg3: the device
- * @syns: the array of 7 integers where the syndroms will be stored
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
*/
-static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
{
int i;
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
- syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
}
/**
@@ -510,8 +793,7 @@ static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
*/
static void doc_read_page_finish(struct docg3 *docg3)
{
- doc_writeb(docg3, 0, DOC_DATAEND);
- doc_delay(docg3, 2);
+ doc_page_finish(docg3);
doc_set_device_id(docg3, 0);
}
@@ -523,18 +805,29 @@ static void doc_read_page_finish(struct docg3 *docg3)
* @block1: second plane block index calculated
* @page: page calculated
* @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
*/
static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
- int *ofs)
+ int *ofs, int reliable)
{
- uint sector;
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
sector = from / DOC_LAYOUT_PAGE_SIZE;
- *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
- * DOC_LAYOUT_NBPLANES;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
*block1 = *block0 + 1;
- *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+ *page = sector % pages_biblock;
*page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
if (sector % 2)
*ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
else
@@ -542,99 +835,124 @@ static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
}
/**
- * doc_read - Read bytes from flash
+ * doc_read_oob - Read out of band bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
+ * @ops: the mtd oob structure
*
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
+ * Reads flash memory OOB area of pages.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, readlen, ret, ofs = 0;
- int syn[DOC_ECC_BCH_SIZE], eccconf1;
- u8 oob[DOC_LAYOUT_OOB_SIZE];
+ int block0, block1, page, ret, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
+ (from % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
ret = -EINVAL;
- doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
- if (from % DOC_LAYOUT_PAGE_SIZE)
- goto err;
- if (len % 4)
- goto err;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from + len, &block0, &block1, &page, &ofs,
+ docg3->reliable);
if (block1 > docg3->max_block)
goto err;
- *retlen = 0;
+ ops->oobretlen = 0;
+ ops->retlen = 0;
ret = 0;
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
- while (!ret && len > 0) {
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ while (!ret && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
goto err;
- ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
- if (ret < readlen)
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+ if (ret < nbdata)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- oob, 0);
- if (ret < DOC_LAYOUT_OOB_SIZE)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+ NULL, 0);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+ if (ret < nboob)
goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0);
- *retlen += readlen;
- buf += readlen;
- len -= readlen;
-
- ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
- if (ofs == 0)
- page += 2;
- if (page > DOC_ADDR_PAGE_MASK) {
- page = 0;
- block0 += 2;
- block1 += 2;
- }
-
- /*
- * There should be a BCH bitstream fixing algorithm here ...
- * By now, a page read failure is triggered by BCH error
- */
- doc_get_hw_bch_syndroms(docg3, syn);
+ doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
- doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[0], oob[1], oob[2], oob[3], oob[4],
- oob[5], oob[6]);
- doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
- doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[8], oob[9], oob[10], oob[11], oob[12],
- oob[13], oob[14]);
- doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
+ oobbuf[4], oobbuf[5], oobbuf[6]);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
+ oobbuf[12], oobbuf[13], oobbuf[14]);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
- doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
-
- ret = -EBADMSG;
- if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
- if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
- goto err_in_read;
- if (is_prot_seq_error(docg3))
- goto err_in_read;
+ doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
+ hwecc[5], hwecc[6]);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ ret = -EUCLEAN;
+ }
}
+
doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
}
- return 0;
+ return ret;
err_in_read:
doc_read_page_finish(docg3);
err:
@@ -642,54 +960,33 @@ err:
}
/**
- * doc_read_oob - Read out of band bytes from flash
+ * doc_read - Read bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @ops: the mtd oob structure
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
*
- * Reads flash memory OOB area of pages.
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
- struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ofs, ret;
- u8 *buf = ops->oobbuf;
- size_t len = ops->ooblen;
-
- doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
- if (len != DOC_LAYOUT_OOB_SIZE)
- return -EINVAL;
-
- switch (ops->mode) {
- case MTD_OPS_PLACE_OOB:
- buf += ops->ooboffs;
- break;
- default:
- break;
- }
+ struct mtd_oob_ops ops;
+ size_t ret;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
- if (block1 > docg3->max_block)
- return -EINVAL;
-
- ret = doc_read_page_prepare(docg3, block0, block1, page,
- ofs + DOC_LAYOUT_PAGE_SIZE);
- if (!ret)
- ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
- if (!ret)
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- buf, 1);
- doc_read_page_finish(docg3);
+ memset(&ops, 0, sizeof(ops));
+ ops.datbuf = buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_AUTO_OOB;
- if (ret > 0)
- ops->oobretlen = ret;
- else
- ops->oobretlen = 0;
- return (ret > 0) ? 0 : ret;
+ ret = doc_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
}
static int doc_reload_bbt(struct docg3 *docg3)
@@ -726,7 +1023,8 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
struct docg3 *docg3 = mtd->priv;
int block0, block1, page, ofs, is_good;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
from, block0, block1, page, ofs);
@@ -739,6 +1037,7 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
return !is_good;
}
+#if 0
/**
* doc_get_erase_count - Get block erase count
* @docg3: the device
@@ -758,7 +1057,7 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
if (from % DOC_LAYOUT_PAGE_SIZE)
return -EINVAL;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
if (block1 > docg3->max_block)
return -EINVAL;
@@ -780,6 +1079,558 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
return max(plane1_erase_count, plane2_erase_count);
}
+#endif
+
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
+ */
+static int doc_get_op_status(struct docg3 *docg3)
+{
+ u8 status;
+
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
+
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int status, ret = 0;
+
+ if (!doc_is_ready(docg3))
+ usleep_range(3000, 3000);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+ doc_set_device_id(docg3, docg3->device_id);
+
+ info->state = MTD_ERASE_PENDING;
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ ret = -EINVAL;
+ if (block1 > docg3->max_block || page || ofs)
+ goto reset_err;
+
+ ret = 0;
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ info->state = MTD_ERASING;
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+
+ if (ret)
+ goto reset_err;
+
+ info->state = MTD_ERASE_DONE;
+ return 0;
+
+reset_err:
+ info->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->ecclayout->oobavail;
+ break;
+ default:
+ oobdelta = 0;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+
+ ret = -EINVAL;
+ calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
+ docg3->reliable);
+ if (block1 > docg3->max_block)
+ goto err;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+err:
+ doc_set_device_id(docg3, 0);
+ return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+ ops.datbuf = (char *)buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+ ops.ooboffs = 0;
+
+ ret = doc_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ int ret = 0, floor, i = 0;
+ struct device *dev = &pdev->dev;
+
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; !ret && i < 4; i++)
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (!ret)
+ return 0;
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
/*
* Debug sysfs entries
@@ -852,13 +1703,15 @@ static int dbg_protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- int protect = doc_register_readb(docg3, DOC_PROTECTION);
- int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
- int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
- int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
- int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
- int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
- int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
pos += seq_printf(s, "Protection = 0x%02x (",
protect);
@@ -947,52 +1800,54 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = "DiskOnChip G3";
+ mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+ docg3->device_id);
docg3->max_block = 2047;
break;
}
mtd->type = MTD_NANDFLASH;
- /*
- * Once write methods are added, the correct flags will be set.
- * mtd->flags = MTD_CAP_NANDFLASH;
- */
- mtd->flags = MTD_CAP_ROM;
+ mtd->flags = MTD_CAP_NANDFLASH;
mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->owner = THIS_MODULE;
- mtd->erase = NULL;
- mtd->point = NULL;
- mtd->unpoint = NULL;
+ mtd->erase = doc_erase;
mtd->read = doc_read;
- mtd->write = NULL;
+ mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
+ mtd->write_oob = doc_write_oob;
mtd->block_isbad = doc_block_isbad;
+ mtd->ecclayout = &docg3_oobinfo;
}
/**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
- * @pdev: platform device
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
*
- * Probes for a G3 chip at the specified IO space in the platform data
- * ressources.
+ * Checks whether a device at the specified IO range, and floor is available.
*
- * Returns 0 on success, -ENOMEM, -ENXIO on error
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
*/
-static int __init docg3_probe(struct platform_device *pdev)
+static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
+ struct device *dev)
{
- struct device *dev = &pdev->dev;
- struct docg3 *docg3;
- struct mtd_info *mtd;
- struct resource *ress;
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
ret = -ENOMEM;
docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
@@ -1002,69 +1857,218 @@ static int __init docg3_probe(struct platform_device *pdev)
if (!mtd)
goto nomem2;
mtd->priv = docg3;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
- ret = -ENXIO;
- ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ress) {
- dev_err(dev, "No I/O memory resource defined\n");
- goto noress;
- }
- docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
-
- docg3->dev = &pdev->dev;
- docg3->device_id = 0;
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->base = base;
doc_set_device_id(docg3, docg3->device_id);
- doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
chip_id = doc_register_readw(docg3, DOC_CHIPID);
chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
- ret = -ENODEV;
+ ret = 0;
if (chip_id != (u16)(~chip_id_inv)) {
- doc_info("No device found at IO addr %p\n",
- (void *)ress->start);
- goto nochipfound;
+ goto nomem3;
}
switch (chip_id) {
case DOC_CHIPID_G3:
- doc_info("Found a G3 DiskOnChip at addr %p\n",
- (void *)ress->start);
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
- goto nochipfound;
+ goto nomem3;
}
doc_set_driver_info(chip_id, mtd);
- platform_set_drvdata(pdev, mtd);
- ret = -ENOMEM;
- bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
- 8 * DOC_LAYOUT_PAGE_SIZE);
- docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
- if (!docg3->bbt)
- goto nochipfound;
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
doc_reload_bbt(docg3);
+ return mtd;
- ret = mtd_device_parse_register(mtd, part_probes,
- NULL, NULL, 0);
- if (ret)
- goto register_error;
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ERR_PTR(ret);
+}
- doc_dbg_register(docg3);
- return 0;
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
-register_error:
+ mtd_device_unregister(mtd);
kfree(docg3->bbt);
-nochipfound:
- iounmap(docg3->base);
-noress:
+ kfree(docg3);
+ kfree(mtd->name);
kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successfull)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor, found = 0;
+ struct mtd_info **docg3_floors;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ goto noress;
+ }
+ base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+ ret = -ENOMEM;
+ docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!docg3_floors)
+ goto nomem1;
+ docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY);
+ if (!docg3_bch)
+ goto nomem2;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(base, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ docg3_floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+ found++;
+ }
+
+ ret = doc_register_sysfs(pdev, docg3_floors);
+ if (ret)
+ goto err_probe;
+ if (!found)
+ goto notfound;
+
+ platform_set_drvdata(pdev, docg3_floors);
+ doc_dbg_register(docg3_floors[0]->priv);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ free_bch(docg3_bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
nomem2:
- kfree(docg3);
+ kfree(docg3_floors);
nomem1:
+ iounmap(base);
+noress:
return ret;
}
@@ -1076,15 +2080,20 @@ nomem1:
*/
static int __exit docg3_release(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct docg3 *docg3 = mtd->priv;
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = docg3_floors[0]->priv;
+ void __iomem *base = docg3->base;
+ int floor;
+ doc_unregister_sysfs(pdev, docg3_floors);
doc_dbg_unregister(docg3);
- mtd_device_unregister(mtd);
- iounmap(docg3->base);
- kfree(docg3->bbt);
- kfree(docg3);
- kfree(mtd);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
+
+ kfree(docg3_floors);
+ free_bch(docg3_bch);
+ iounmap(base);
return 0;
}
@@ -1093,6 +2102,8 @@ static struct platform_driver g3_driver = {
.name = "docg3",
.owner = THIS_MODULE,
},
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
.remove = __exit_p(docg3_release),
};
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index 0d407be24594..db0da436b493 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -51,10 +51,19 @@
#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
#define DOC_LAYOUT_BLOCK_SIZE \
(DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
#define DOC_ECC_BCH_SIZE 7
#define DOC_ECC_BCH_COVERED_BYTES \
(DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
- DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
/*
* Blocks distribution
@@ -80,6 +89,7 @@
#define DOC_CHIPID_G3 0x200
#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
/*
* Flash registers
*/
@@ -105,9 +115,11 @@
#define DOC_ECCCONF1 0x1042
#define DOC_ECCPRESET 0x1044
#define DOC_HAMMINGPARITY 0x1046
-#define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1))
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
#define DOC_DPS0_ADDRLOW 0x1060
#define DOC_DPS0_ADDRHIGH 0x1062
#define DOC_DPS1_ADDRLOW 0x1064
@@ -117,6 +129,7 @@
#define DOC_ASICMODECONFIRM 0x1072
#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
/*
* Flash sequences
@@ -124,11 +137,14 @@
*/
#define DOC_SEQ_RESET 0x00
#define DOC_SEQ_PAGE_SIZE_532 0x03
-#define DOC_SEQ_SET_MODE 0x09
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
#define DOC_SEQ_READ 0x12
#define DOC_SEQ_SET_PLANE1 0x0e
#define DOC_SEQ_SET_PLANE2 0x10
#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
/*
* Flash commands
@@ -143,7 +159,10 @@
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOC_CMD_PROG_CYCLE1 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_FAST_MODE 0xa2
@@ -174,6 +193,7 @@
/*
* Flash register : DOC_ECCCONF0
*/
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
#define DOC_ECCCONF0_READ_MODE 0x8000
#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
@@ -185,7 +205,7 @@
*/
#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
#define DOC_ECCCONF1_UNKOWN1 0x40
-#define DOC_ECCCONF1_UNKOWN2 0x20
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
#define DOC_ECCCONF1_UNKOWN3 0x10
#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
@@ -223,13 +243,46 @@
#define DOC_READADDR_ONE_BYTE 0x4000
#define DOC_READADDR_ADDR_MASK 0x1fff
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
/**
* struct docg3 - DiskOnChip driver private data
* @dev: the device currently under control
* @base: mapped IO space
* @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
* @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
* @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
* @debugfs_root: debugfs root node
*/
struct docg3 {
@@ -237,8 +290,12 @@ struct docg3 {
void __iomem *base;
unsigned int device_id:4;
unsigned int if_cfg:1;
+ unsigned int reliable:2;
int max_block;
u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
struct dentry *debugfs_root;
};
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 45116bb30297..706b847b46b3 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -241,8 +241,7 @@ static void __init DoC_Probe(unsigned long physadr)
return;
}
docfound = 1;
- mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-
+ mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd) {
printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
iounmap(docptr);
@@ -250,10 +249,6 @@ static void __init DoC_Probe(unsigned long physadr)
}
this = (struct DiskOnChip *)(&mtd[1]);
-
- memset((char *)mtd,0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct DiskOnChip));
-
mtd->priv = this;
this->virtadr = docptr;
this->physadr = physadr;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 884904d3f9d2..7c60dddbefc0 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -992,7 +992,6 @@ static int __devexit m25p_remove(struct spi_device *spi)
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = m25p_ids,
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index d75c7af18a63..236057ead0d2 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -936,7 +936,6 @@ static int __devexit dataflash_remove(struct spi_device *spi)
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = dataflash_dt_ids,
},
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index d38ef3bffe8d..5fc198350b94 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -378,7 +378,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
- int ret, i;
+ int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -444,7 +444,6 @@ static int __devexit sst25l_remove(struct spi_device *spi)
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index c7382bb686c6..19d637266fcd 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -168,8 +168,8 @@ static int scan_header(partition_t *part)
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
- (unsigned char *)&header);
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
if (err)
return err;
@@ -224,8 +224,8 @@ static int build_maps(partition_t *part)
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
- (unsigned char *)&header);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
if (ret)
goto out_XferInfo;
@@ -289,9 +289,9 @@ static int build_maps(partition_t *part)
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t), &retval,
- (unsigned char *)part->bam_cache);
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
if (ret)
goto out_bam_cache;
@@ -355,7 +355,7 @@ static int erase_xfer(partition_t *part,
erase->len = 1 << part->header.EraseUnitSize;
erase->priv = (u_long)part;
- ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ ret = mtd_erase(part->mbd.mtd, erase);
if (!ret)
xfer->EraseCount++;
@@ -422,8 +422,8 @@ static int prepare_xfer(partition_t *part, int i)
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
- &retlen, (u_char *)&header);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
if (ret) {
return ret;
@@ -438,8 +438,8 @@ static int prepare_xfer(partition_t *part, int i)
for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&ctl);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
if (ret)
return ret;
@@ -485,9 +485,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
/* mark the cache bad, in case we get an error later */
part->bam_index = 0xffff;
@@ -503,8 +503,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
- &retlen, (u_char *) &unit);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
@@ -523,16 +523,16 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
- ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
return ret;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
return ret;
@@ -550,9 +550,11 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
}
/* Write the BAM to the transfer unit */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(int32_t), &retlen,
- (u_char *)part->bam_cache);
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
@@ -560,8 +562,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
/* All clear? Then update the LogicalEUN again */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
- &retlen, (u_char *)&srcunitswap);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
@@ -648,8 +650,7 @@ static int reclaim_block(partition_t *part)
if (queued) {
pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
@@ -747,10 +748,11 @@ static uint32_t find_free(partition_t *part)
/* Invalidate cache */
part->bam_index = 0xffff;
- ret = part->mbd.mtd->read(part->mbd.mtd,
- part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
@@ -810,8 +812,8 @@ static int ftl_read(partition_t *part, caddr_t buffer,
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
- &retlen, (u_char *) buffer);
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
if (ret) {
printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
@@ -849,8 +851,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&old_addr);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
return ret;
@@ -886,8 +888,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&le_virt_addr);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
if (ret) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
@@ -946,8 +948,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
- buffer);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index dd034efd1875..28646c95cfb8 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -158,7 +158,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -178,7 +178,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -199,7 +199,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.retlen;
return res;
}
@@ -343,14 +343,17 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd,
- (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE,
- &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret != -EIO)
pr_debug("INFTL: error went away on retry?\n");
}
@@ -914,7 +917,7 @@ foundit:
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
- int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
if (ret < 0 && !mtd_is_bitflip(ret))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 2ff601f816ce..4adc0374fb6b 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -73,8 +73,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
- ret = mtd->read(mtd, block * inftl->EraseSize,
- SECTORSIZE, &retlen, buf);
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -118,8 +118,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
/* Read the spare media header at offset 4096 */
- mtd->read(mtd, block * inftl->EraseSize + 4096,
- SECTORSIZE, &retlen, buf);
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
if (retlen != SECTORSIZE) {
printk(KERN_WARNING "INFTL: Unable to read spare "
"Media Header\n");
@@ -220,7 +220,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
*/
instr->addr = ip->Reserved0 * inftl->EraseSize;
instr->len = inftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
}
if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
printk(KERN_WARNING "INFTL: Media Header "
@@ -306,7 +306,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
/* If any of the physical eraseblocks are bad, don't
use the unit. */
for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
- if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
inftl->PUtable[i] = BLOCK_RESERVED;
}
}
@@ -342,7 +343,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -393,7 +394,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
mark only the failed block in the bbt. */
for (physblock = 0; physblock < inftl->EraseSize;
physblock += instr->len, instr->addr += instr->len) {
- mtd->erase(inftl->mbd.mtd, instr);
+ mtd_erase(inftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -423,7 +424,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the PUtable to BLOCK_RESERVED on failure) */
- inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
return -1;
}
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1dca31d9a8b3..536bbceaeaad 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -70,19 +70,12 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mtd->erase = lpddr_erase;
mtd->write = lpddr_write_buffers;
mtd->writev = lpddr_writev;
- mtd->read_oob = NULL;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
mtd->lock = lpddr_lock;
mtd->unlock = lpddr_unlock;
- mtd->suspend = NULL;
- mtd->resume = NULL;
if (map_is_linear(map)) {
mtd->point = lpddr_point;
mtd->unpoint = lpddr_unpoint;
}
- mtd->block_isbad = NULL;
- mtd->block_markbad = NULL;
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8e0c4bf9f7fb..6c5c431c64af 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -242,15 +242,6 @@ config MTD_NETtel
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
-config MTD_BCM963XX
- tristate "Map driver for Broadcom BCM963xx boards"
- depends on BCM63XX
- select MTD_MAP_BANK_WIDTH_2
- select MTD_CFI_I1
- help
- Support for parsing CFE image tag and creating MTD partitions on
- Broadcom BCM63xx boards.
-
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 45dcb8b14f22..68a9a91d344f 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -55,6 +55,5 @@ obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
-obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
deleted file mode 100644
index 736ca10ca9f1..000000000000
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
- * Mike Albon <malbon@openwrt.org>
- * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
-
-#define BCM63XX_BUSWIDTH 2 /* Buswidth */
-#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-
-#define PFX KBUILD_MODNAME ": "
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *bcm963xx_mtd_info;
-
-static struct map_info bcm963xx_map = {
- .name = "bcm963xx",
- .bankwidth = BCM63XX_BUSWIDTH,
-};
-
-static int parse_cfe_partitions(struct mtd_info *master,
- struct mtd_partition **pparts)
-{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
- struct mtd_partition *parts;
- int ret;
- size_t retlen;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
- int namelen = 0;
- int i;
- char *boardid;
- char *tagversion;
-
- /* Allocate memory for buffer */
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
- &retlen, (void *)buf);
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
-
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
- tagversion = &(buf->tag_version[0]);
- boardid = &(buf->board_id[0]);
-
- printk(KERN_INFO PFX "CFE boot tag found with version %s "
- "and board type %s\n", tagversion, boardid);
-
- kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
- spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
- sparelen = master->size - spareaddr - master->erasesize;
- rootfslen = spareaddr - rootfsaddr;
-
- /* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
- nrparts++;
- namelen += 6;
- };
- if (kernellen > 0) {
- nrparts++;
- namelen += 6;
- };
-
- /* Ask kernel for more memory */
- parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- vfree(buf);
- return -ENOMEM;
- };
-
- /* Start building partition list */
- parts[curpart].name = "CFE";
- parts[curpart].offset = 0;
- parts[curpart].size = master->erasesize;
- curpart++;
-
- if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
- curpart++;
- };
-
- if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
- curpart++;
- };
-
- parts[curpart].name = "nvram";
- parts[curpart].offset = master->size - master->erasesize;
- parts[curpart].size = master->erasesize;
-
- /* Global partition "linux" to make easy firmware upgrade */
- curpart++;
- parts[curpart].name = "linux";
- parts[curpart].offset = parts[0].size;
- parts[curpart].size = master->size - parts[0].size - parts[3].size;
-
- for (i = 0; i < nrparts; i++)
- printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
- "length %lx\n", i, parts[i].name,
- (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
-
- printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
- spareaddr, sparelen);
- *pparts = parts;
- vfree(buf);
-
- return nrparts;
-};
-
-static int bcm963xx_detect_cfe(struct mtd_info *master)
-{
- int idoffset = 0x4e0;
- static char idstring[8] = "CFE1CFE1";
- char buf[9];
- int ret;
- size_t retlen;
-
- ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
- buf[retlen] = 0;
- printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
-
- return strncmp(idstring, buf, 8);
-}
-
-static int bcm963xx_probe(struct platform_device *pdev)
-{
- int err = 0;
- int parsed_nr_parts = 0;
- char *part_type;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no resource supplied\n");
- return -ENODEV;
- }
-
- bcm963xx_map.phys = r->start;
- bcm963xx_map.size = resource_size(r);
- bcm963xx_map.virt = ioremap(r->start, resource_size(r));
- if (!bcm963xx_map.virt) {
- dev_err(&pdev->dev, "failed to ioremap\n");
- return -EIO;
- }
-
- dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
- bcm963xx_map.size, bcm963xx_map.phys);
-
- simple_map_init(&bcm963xx_map);
-
- bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
- if (!bcm963xx_mtd_info) {
- dev_err(&pdev->dev, "failed to probe using CFI\n");
- bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
- if (bcm963xx_mtd_info)
- goto probe_ok;
- dev_err(&pdev->dev, "failed to probe using JEDEC\n");
- err = -EIO;
- goto err_probe;
- }
-
-probe_ok:
- bcm963xx_mtd_info->owner = THIS_MODULE;
-
- /* This is mutually exclusive */
- if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
- dev_info(&pdev->dev, "CFE bootloader detected\n");
- if (parsed_nr_parts == 0) {
- int ret = parse_cfe_partitions(bcm963xx_mtd_info,
- &parsed_parts);
- if (ret > 0) {
- part_type = "CFE";
- parsed_nr_parts = ret;
- }
- }
- } else {
- dev_info(&pdev->dev, "unsupported bootloader\n");
- err = -ENODEV;
- goto err_probe;
- }
-
- return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
-
-err_probe:
- iounmap(bcm963xx_map.virt);
- return err;
-}
-
-static int bcm963xx_remove(struct platform_device *pdev)
-{
- if (bcm963xx_mtd_info) {
- mtd_device_unregister(bcm963xx_mtd_info);
- map_destroy(bcm963xx_mtd_info);
- }
-
- if (bcm963xx_map.virt) {
- iounmap(bcm963xx_map.virt);
- bcm963xx_map.virt = 0;
- }
-
- return 0;
-}
-
-static struct platform_driver bcm63xx_mtd_dev = {
- .probe = bcm963xx_probe,
- .remove = bcm963xx_remove,
- .driver = {
- .name = "bcm963xx-flash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init bcm963xx_mtd_init(void)
-{
- return platform_driver_register(&bcm63xx_mtd_dev);
-}
-
-static void __exit bcm963xx_mtd_exit(void)
-{
- platform_driver_unregister(&bcm63xx_mtd_dev);
-}
-
-module_init(bcm963xx_mtd_init);
-module_exit(bcm963xx_mtd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
-MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 6d6b2b5674ee..650126c361f1 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -190,17 +190,7 @@ static struct platform_driver bfin_flash_driver = {
},
};
-static int __init bfin_flash_init(void)
-{
- return platform_driver_register(&bfin_flash_driver);
-}
-module_init(bfin_flash_init);
-
-static void __exit bfin_flash_exit(void)
-{
- platform_driver_unregister(&bfin_flash_driver);
-}
-module_exit(bfin_flash_exit);
+module_platform_driver(bfin_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 1ec66f031c51..33cce895859f 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -279,17 +279,7 @@ static struct platform_driver gpio_flash_driver = {
},
};
-static int __init gpio_flash_init(void)
-{
- return platform_driver_register(&gpio_flash_driver);
-}
-module_init(gpio_flash_init);
-
-static void __exit gpio_flash_exit(void)
-{
- platform_driver_unregister(&gpio_flash_driver);
-}
-module_exit(gpio_flash_exit);
+module_platform_driver(gpio_flash_driver);
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 437fcd2f352f..fc7d4d0d9a4e 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -246,18 +246,8 @@ static struct platform_driver ixp2000_flash_driver = {
},
};
-static int __init ixp2000_flash_init(void)
-{
- return platform_driver_register(&ixp2000_flash_driver);
-}
-
-static void __exit ixp2000_flash_exit(void)
-{
- platform_driver_unregister(&ixp2000_flash_driver);
-}
+module_platform_driver(ixp2000_flash_driver);
-module_init(ixp2000_flash_init);
-module_exit(ixp2000_flash_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 30409015a3de..8b5410162d70 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -270,19 +270,7 @@ static struct platform_driver ixp4xx_flash_driver = {
},
};
-static int __init ixp4xx_flash_init(void)
-{
- return platform_driver_register(&ixp4xx_flash_driver);
-}
-
-static void __exit ixp4xx_flash_exit(void)
-{
- platform_driver_unregister(&ixp4xx_flash_driver);
-}
-
-
-module_init(ixp4xx_flash_init);
-module_exit(ixp4xx_flash_exit);
+module_platform_driver(ixp4xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 4f10e27ada55..7b889de9477b 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -159,7 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
err = -ENXIO;
- goto err_unmap;
+ goto err_free;
}
ltq_mtd->mtd->owner = THIS_MODULE;
@@ -179,8 +179,6 @@ ltq_mtd_probe(struct platform_device *pdev)
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_unmap:
- iounmap(ltq_mtd->map->virt);
err_free:
kfree(ltq_mtd->map);
err_out:
@@ -198,8 +196,6 @@ ltq_mtd_remove(struct platform_device *pdev)
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
- if (ltq_mtd->map->virt)
- iounmap(ltq_mtd->map->virt);
kfree(ltq_mtd->map);
kfree(ltq_mtd);
}
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 119baa7d7477..8fed58e3a4a8 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -223,17 +223,7 @@ static struct platform_driver latch_addr_flash_driver = {
},
};
-static int __init latch_addr_flash_init(void)
-{
- return platform_driver_register(&latch_addr_flash_driver);
-}
-module_init(latch_addr_flash_init);
-
-static void __exit latch_addr_flash_exit(void)
-{
- platform_driver_unregister(&latch_addr_flash_driver);
-}
-module_exit(latch_addr_flash_exit);
+module_platform_driver(latch_addr_flash_driver);
MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 66e8200079c2..abc562653b31 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -85,6 +85,7 @@ static int physmap_flash_probe(struct platform_device *dev)
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
const char **probe_type;
+ const char **part_types;
int err = 0;
int i;
int devices_found = 0;
@@ -171,7 +172,9 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
- mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+ part_types = physmap_data->part_probe_types ? : part_probe_types;
+
+ mtd_device_parse_register(info->cmtd, part_types, 0,
physmap_data->parts, physmap_data->nr_parts);
return 0;
@@ -187,9 +190,8 @@ static void physmap_flash_shutdown(struct platform_device *dev)
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend && info->mtd[i]->resume)
- if (info->mtd[i]->suspend(info->mtd[i]) == 0)
- info->mtd[i]->resume(info->mtd[i]);
+ if (mtd_suspend(info->mtd[i]) == 0)
+ mtd_resume(info->mtd[i]);
}
#else
#define physmap_flash_shutdown NULL
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7d65f9d3e690..2e6fb6831d55 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -338,18 +338,7 @@ static struct platform_driver of_flash_driver = {
.remove = of_flash_remove,
};
-static int __init of_flash_init(void)
-{
- return platform_driver_register(&of_flash_driver);
-}
-
-static void __exit of_flash_exit(void)
-{
- platform_driver_unregister(&of_flash_driver);
-}
-
-module_init(of_flash_init);
-module_exit(of_flash_exit);
+module_platform_driver(of_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 94f553489725..45876d0e5b8e 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
- /* add the whole device. */
- err = mtd_device_register(info->mtd, NULL, 0);
- if (err)
- dev_err(&pdev->dev, "failed to register the entire device\n");
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
return err;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 411a17df9fc1..436d121185b1 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+ mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
@@ -125,8 +125,8 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
@@ -142,18 +142,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.shutdown = pxa2xx_flash_shutdown,
};
-static int __init init_pxa2xx_flash(void)
-{
- return platform_driver_register(&pxa2xx_flash_driver);
-}
-
-static void __exit cleanup_pxa2xx_flash(void)
-{
- platform_driver_unregister(&pxa2xx_flash_driver);
-}
-
-module_init(init_pxa2xx_flash);
-module_exit(cleanup_pxa2xx_flash);
+module_platform_driver(pxa2xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 0237f197fd12..3da63fc6f16e 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -119,9 +119,8 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
- if (info->mtd->suspend && info->mtd->resume)
- if (info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define rbtx4939_flash_shutdown NULL
@@ -137,18 +136,7 @@ static struct platform_driver rbtx4939_flash_driver = {
},
};
-static int __init rbtx4939_flash_init(void)
-{
- return platform_driver_register(&rbtx4939_flash_driver);
-}
-
-static void __exit rbtx4939_flash_exit(void)
-{
- platform_driver_unregister(&rbtx4939_flash_driver);
-}
-
-module_init(rbtx4939_flash_init);
-module_exit(rbtx4939_flash_exit);
+module_platform_driver(rbtx4939_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RBTX4939 MTD map driver");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fa9c0a9670cd..502821997707 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -377,8 +377,8 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define sa1100_mtd_shutdown NULL
@@ -394,18 +394,7 @@ static struct platform_driver sa1100_mtd_driver = {
},
};
-static int __init sa1100_mtd_init(void)
-{
- return platform_driver_register(&sa1100_mtd_driver);
-}
-
-static void __exit sa1100_mtd_exit(void)
-{
- platform_driver_unregister(&sa1100_mtd_driver);
-}
-
-module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_exit);
+module_platform_driver(sa1100_mtd_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index d88c8426bb0f..934a72c80078 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -204,8 +204,7 @@ scb2_flash_remove(struct pci_dev *dev)
return;
/* disable flash writes */
- if (scb2_mtd->lock)
- scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 2d66234f57cb..175e537b444f 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -158,15 +158,4 @@ static struct platform_driver uflash_driver = {
.remove = __devexit_p(uflash_remove),
};
-static int __init uflash_init(void)
-{
- return platform_driver_register(&uflash_driver);
-}
-
-static void __exit uflash_exit(void)
-{
- platform_driver_unregister(&uflash_driver);
-}
-
-module_init(uflash_init);
-module_exit(uflash_exit);
+module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ed8b5e744b12..424ca5f93c6c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&dev->lock);
- if (dev->open++)
+ if (dev->open)
goto unlock;
kref_get(&dev->ref);
@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
goto error_release;
unlock:
+ dev->open++;
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 7c1dc908a174..af6591237b9b 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -85,7 +85,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -102,7 +102,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
* Next, write the data to flash.
*/
- ret = mtd->write(mtd, pos, len, &retlen, buf);
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
@@ -152,7 +152,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtd->name, pos, len);
if (!sect_size)
- return mtd->write(mtd, pos, len, &retlen, buf);
+ return mtd_write(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -184,8 +184,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset != sect_start) {
/* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
- ret = mtd->read(mtd, sect_start, sect_size,
- &retlen, mtdblk->cache_data);
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
if (ret)
return ret;
if (retlen != sect_size)
@@ -222,7 +222,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtd->name, pos, len);
if (!sect_size)
- return mtd->read(mtd, pos, len, &retlen, buf);
+ return mtd_read(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -241,7 +241,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
- ret = mtd->read(mtd, pos, size, &retlen, buf);
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
return ret;
if (retlen != size)
@@ -322,8 +322,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
if (!--mtdblk->count) {
/* It was the last usage. Free the cache */
- if (mbd->mtd->sync)
- mbd->mtd->sync(mbd->mtd);
+ mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
@@ -341,9 +340,7 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
-
- if (dev->mtd->sync)
- dev->mtd->sync(dev->mtd);
+ mtd_sync(dev->mtd);
return 0;
}
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 0470a6e86309..92759a9d2985 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -30,7 +30,7 @@ static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
@@ -40,7 +40,7 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index e7dc732ddabc..50c6a1e7f675 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -51,7 +51,7 @@ struct mtd_file_info {
enum mtd_file_modes mode;
};
-static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -77,7 +77,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
-static int mtd_open(struct inode *inode, struct file *file)
+static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int devnum = minor >> 1;
@@ -142,11 +142,11 @@ static int mtd_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&mtd_mutex);
return ret;
-} /* mtd_open */
+} /* mtdchar_open */
/*====================================================================*/
-static int mtd_close(struct inode *inode, struct file *file)
+static int mtdchar_close(struct inode *inode, struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -154,8 +154,8 @@ static int mtd_close(struct inode *inode, struct file *file)
pr_debug("MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & FMODE_WRITE) && mtd->sync)
- mtd->sync(mtd);
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
iput(mfi->ino);
@@ -164,7 +164,7 @@ static int mtd_close(struct inode *inode, struct file *file)
kfree(mfi);
return 0;
-} /* mtd_close */
+} /* mtdchar_close */
/* Back in June 2001, dwmw2 wrote:
*
@@ -184,11 +184,12 @@ static int mtd_close(struct inode *inode, struct file *file)
* alignment requirements are not met in the NAND subdriver.
*/
-static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- size_t retlen=0;
+ size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
@@ -212,10 +213,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_OTP_USER:
- ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
{
@@ -226,12 +229,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
ops.oobbuf = NULL;
ops.len = len;
- ret = mtd->read_oob(mtd, *ppos, &ops);
+ ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
}
/* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
@@ -265,9 +268,10 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
kfree(kbuf);
return total_retlen;
-} /* mtd_read */
+} /* mtdchar_read */
-static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -306,11 +310,8 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ret = -EROFS;
break;
case MTD_FILE_MODE_OTP_USER:
- if (!mtd->write_user_prot_reg) {
- ret = -EOPNOTSUPP;
- break;
- }
- ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
@@ -323,13 +324,13 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ops.ooboffs = 0;
ops.len = len;
- ret = mtd->write_oob(mtd, *ppos, &ops);
+ ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
if (!ret) {
*ppos += retlen;
@@ -345,7 +346,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
kfree(kbuf);
return total_retlen;
-} /* mtd_write */
+} /* mtdchar_write */
/*======================================================================
@@ -361,20 +362,22 @@ static void mtdchar_erase_callback (struct erase_info *instr)
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
int ret = 0;
+ /*
+ * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
+ * operations are supported.
+ */
+ if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
switch (mode) {
case MTD_OTP_FACTORY:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_USER;
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
default:
ret = -EINVAL;
@@ -387,7 +390,7 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
-static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
@@ -424,7 +427,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return PTR_ERR(ops.oobbuf);
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->write_oob(mtd, start, &ops);
+ ret = mtd_write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
@@ -436,7 +439,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
}
-static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
@@ -447,13 +450,8 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_WRITE, ptr,
- length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
@@ -469,7 +467,7 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
return -ENOMEM;
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->read_oob(mtd, start, &ops);
+ ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT;
@@ -530,7 +528,7 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
struct blkpg_ioctl_arg a;
@@ -566,7 +564,7 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
}
}
-static int mtd_write_ioctl(struct mtd_info *mtd,
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
@@ -607,7 +605,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
ops.oobbuf = NULL;
}
- ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+ ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
kfree(ops.datbuf);
kfree(ops.oobbuf);
@@ -615,7 +613,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
return ret;
}
-static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -729,7 +727,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
wq_head is no longer there when the
callback routine tries to wake us up.
*/
- ret = mtd->erase(mtd, erase);
+ ret = mtd_erase(mtd, erase);
if (!ret) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -755,7 +753,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->length);
break;
}
@@ -769,7 +767,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
@@ -782,7 +780,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -796,7 +794,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -804,7 +802,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case MEMWRITE:
{
- ret = mtd_write_ioctl(mtd,
+ ret = mtdchar_write_ioctl(mtd,
(struct mtd_write_req __user *)arg);
break;
}
@@ -816,10 +814,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->lock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->lock(mtd, einfo.start, einfo.length);
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
break;
}
@@ -830,10 +825,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->unlock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->unlock(mtd, einfo.start, einfo.length);
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
break;
}
@@ -844,10 +836,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->is_locked)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
break;
}
@@ -878,10 +867,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_isbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_isbad(mtd, offs);
+ return mtd_block_isbad(mtd, offs);
break;
}
@@ -891,10 +877,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_markbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_markbad(mtd, offs);
+ return mtd_block_markbad(mtd, offs);
break;
}
@@ -919,17 +902,15 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = -EOPNOTSUPP;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- if (mtd->get_fact_prot_info)
- ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+ ret = mtd_get_fact_prot_info(mtd, buf, 4096);
break;
case MTD_FILE_MODE_OTP_USER:
- if (mtd->get_user_prot_info)
- ret = mtd->get_user_prot_info(mtd, buf, 4096);
+ ret = mtd_get_user_prot_info(mtd, buf, 4096);
break;
default:
+ ret = -EINVAL;
break;
}
if (ret >= 0) {
@@ -953,9 +934,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
- if (!mtd->lock_user_prot_reg)
- return -EOPNOTSUPP;
- ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
#endif
@@ -999,7 +978,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
case MTD_FILE_MODE_RAW:
- if (!mtd->read_oob || !mtd->write_oob)
+ if (!mtd_has_oob(mtd))
return -EOPNOTSUPP;
mfi->mode = arg;
@@ -1014,7 +993,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case BLKPG:
{
- ret = mtd_blkpg_ioctl(mtd,
+ ret = mtdchar_blkpg_ioctl(mtd,
(struct blkpg_ioctl_arg __user *)arg);
break;
}
@@ -1033,12 +1012,12 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return ret;
} /* memory_ioctl */
-static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
{
int ret;
mutex_lock(&mtd_mutex);
- ret = mtd_ioctl(file, cmd, arg);
+ ret = mtdchar_ioctl(file, cmd, arg);
mutex_unlock(&mtd_mutex);
return ret;
@@ -1055,7 +1034,7 @@ struct mtd_oob_buf32 {
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
-static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -1074,7 +1053,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start,
+ ret = mtdchar_writeoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->length);
break;
@@ -1089,13 +1068,13 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start,
+ ret = mtdchar_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
}
default:
- ret = mtd_ioctl(file, cmd, (unsigned long)argp);
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
mutex_unlock(&mtd_mutex);
@@ -1111,7 +1090,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
* mappings)
*/
#ifndef CONFIG_MMU
-static unsigned long mtd_get_unmapped_area(struct file *file,
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
@@ -1119,32 +1098,28 @@ static unsigned long mtd_get_unmapped_area(struct file *file,
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
- if (mtd->get_unmapped_area) {
- unsigned long offset;
-
- if (addr != 0)
- return (unsigned long) -EINVAL;
-
- if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
- return (unsigned long) -EINVAL;
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
- offset = pgoff << PAGE_SHIFT;
- if (offset > mtd->size - len)
- return (unsigned long) -EINVAL;
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
- }
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
- /* can't map directly */
- return (unsigned long) -ENOSYS;
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENOSYS : ret;
}
#endif
/*
* set up a mapping for shared memory segments
*/
-static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
@@ -1185,18 +1160,18 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
static const struct file_operations mtd_fops = {
.owner = THIS_MODULE,
- .llseek = mtd_lseek,
- .read = mtd_read,
- .write = mtd_write,
- .unlocked_ioctl = mtd_unlocked_ioctl,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = mtd_compat_ioctl,
+ .compat_ioctl = mtdchar_compat_ioctl,
#endif
- .open = mtd_open,
- .release = mtd_close,
- .mmap = mtd_mmap,
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
- .get_unmapped_area = mtd_get_unmapped_area,
+ .get_unmapped_area = mtdchar_get_unmapped_area,
#endif
};
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 6df4d4d4eb92..1ed5103b219b 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -91,7 +91,7 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Entire transaction goes into this subdev */
size = len;
- err = subdev->read(subdev, from, size, &retsize, buf);
+ err = mtd_read(subdev, from, size, &retsize, buf);
/* Save information about bitflips! */
if (unlikely(err)) {
@@ -148,7 +148,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->write(subdev, to, size, &retsize, buf);
+ err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
@@ -227,8 +227,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->writev(subdev, &vecs_copy[entry_low],
- entry_high - entry_low + 1, to, &retsize);
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to,
+ &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
@@ -273,7 +274,7 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (from + devops.len > subdev->size)
devops.len = subdev->size - from;
- err = subdev->read_oob(subdev, from, &devops);
+ err = mtd_read_oob(subdev, from, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
@@ -333,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (to + devops.len > subdev->size)
devops.len = subdev->size - to;
- err = subdev->write_oob(subdev, to, &devops);
+ err = mtd_write_oob(subdev, to, &devops);
ops->retlen += devops.oobretlen;
if (err)
return err;
@@ -379,7 +380,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
* FIXME: Allow INTERRUPTIBLE. Which means
* not having the wait_queue head on the stack.
*/
- err = mtd->erase(mtd, erase);
+ err = mtd_erase(mtd, erase);
if (!err) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -554,12 +555,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->lock) {
- err = subdev->lock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -594,12 +592,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->unlock) {
- err = subdev->unlock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -619,7 +614,7 @@ static void concat_sync(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->sync(subdev);
+ mtd_sync(subdev);
}
}
@@ -630,7 +625,7 @@ static int concat_suspend(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- if ((rc = subdev->suspend(subdev)) < 0)
+ if ((rc = mtd_suspend(subdev)) < 0)
return rc;
}
return rc;
@@ -643,7 +638,7 @@ static void concat_resume(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->resume(subdev);
+ mtd_resume(subdev);
}
}
@@ -652,7 +647,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, res = 0;
- if (!concat->subdev[0]->block_isbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return res;
if (ofs > mtd->size)
@@ -666,7 +661,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- res = subdev->block_isbad(subdev, ofs);
+ res = mtd_block_isbad(subdev, ofs);
break;
}
@@ -678,7 +673,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if (!concat->subdev[0]->block_markbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return 0;
if (ofs > mtd->size)
@@ -692,7 +687,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- err = subdev->block_markbad(subdev, ofs);
+ err = mtd_block_markbad(subdev, ofs);
if (!err)
mtd->ecc_stats.badblocks++;
break;
@@ -725,11 +720,7 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
if (offset + len > subdev->size)
return (unsigned long) -EINVAL;
- if (subdev->get_unmapped_area)
- return subdev->get_unmapped_area(subdev, len, offset,
- flags);
-
- break;
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
}
return (unsigned long) -ENOSYS;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b01993ea260e..6ae9ca01388b 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,8 @@ static LIST_HEAD(mtd_notifiers);
*/
static void mtd_release(struct device *dev)
{
- dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
/* remove /dev/mtdXro node if needed */
if (index)
@@ -116,27 +117,24 @@ static void mtd_release(struct device *dev)
static int mtd_cls_suspend(struct device *dev, pm_message_t state)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
- if (mtd && mtd->suspend)
- return mtd->suspend(mtd);
- else
- return 0;
+ return mtd_suspend(mtd);
}
static int mtd_cls_resume(struct device *dev)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
-
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
if (mtd && mtd->resume)
- mtd->resume(mtd);
+ mtd_resume(mtd);
return 0;
}
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
char *type;
switch (mtd->type) {
@@ -172,7 +170,7 @@ static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
@@ -182,7 +180,7 @@ static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
@@ -193,7 +191,7 @@ static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
@@ -203,7 +201,7 @@ static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
@@ -213,7 +211,7 @@ static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
@@ -224,7 +222,7 @@ static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
@@ -234,7 +232,7 @@ static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
@@ -245,7 +243,7 @@ static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
@@ -338,9 +336,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
@@ -516,7 +514,6 @@ EXPORT_SYMBOL_GPL(mtd_device_unregister);
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
-
void register_mtd_user (struct mtd_notifier *new)
{
struct mtd_info *mtd;
@@ -532,6 +529,7 @@ void register_mtd_user (struct mtd_notifier *new)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(register_mtd_user);
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
@@ -542,7 +540,6 @@ void register_mtd_user (struct mtd_notifier *new)
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
-
int unregister_mtd_user (struct mtd_notifier *old)
{
struct mtd_info *mtd;
@@ -558,7 +555,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
mutex_unlock(&mtd_table_mutex);
return 0;
}
-
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
/**
* get_mtd_device - obtain a validated handle for an MTD device
@@ -571,7 +568,6 @@ int unregister_mtd_user (struct mtd_notifier *old)
* both, return the num'th driver only if its address matches. Return
* error code if not.
*/
-
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL, *other;
@@ -604,6 +600,7 @@ out:
mutex_unlock(&mtd_table_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
@@ -624,6 +621,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd->usecount++;
return 0;
}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
@@ -633,7 +631,6 @@ int __get_mtd_device(struct mtd_info *mtd)
* This function returns MTD device description structure in case of
* success and an error code in case of failure.
*/
-
struct mtd_info *get_mtd_device_nm(const char *name)
{
int err = -ENODEV;
@@ -662,6 +659,7 @@ out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
void put_mtd_device(struct mtd_info *mtd)
{
@@ -670,6 +668,7 @@ void put_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
@@ -681,39 +680,65 @@ void __put_mtd_device(struct mtd_info *mtd)
module_put(mtd->owner);
}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
-/* default_mtd_writev - default mtd writev method for MTD devices that
- * don't implement their own
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
- if(!mtd->write) {
- ret = -EROFS;
- } else {
- for (i=0; i<count; i++) {
- if (!vecs[i].iov_len)
- continue;
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
}
- if (retlen)
- *retlen = totlen;
+ *retlen = totlen;
return ret;
}
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!mtd->writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
/**
* mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
- * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
* to the actual allocation size on success.
*
* This routine attempts to allocate a contiguous kernel buffer up to
@@ -758,15 +783,6 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
*/
return kmalloc(*size, GFP_KERNEL);
}
-
-EXPORT_SYMBOL_GPL(get_mtd_device);
-EXPORT_SYMBOL_GPL(get_mtd_device_nm);
-EXPORT_SYMBOL_GPL(__get_mtd_device);
-EXPORT_SYMBOL_GPL(put_mtd_device);
-EXPORT_SYMBOL_GPL(__put_mtd_device);
-EXPORT_SYMBOL_GPL(register_mtd_user);
-EXPORT_SYMBOL_GPL(unregister_mtd_user);
-EXPORT_SYMBOL_GPL(default_mtd_writev);
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1e2fa6236705..3ce99e00a49e 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -112,7 +112,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -169,8 +169,8 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
cxt->nextpage = 0;
}
- while (mtd->block_isbad) {
- ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
+ while (mtd_can_have_bb(mtd)) {
+ ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
if (ret < 0) {
@@ -199,8 +199,8 @@ badblock:
return;
}
- if (mtd->block_markbad && ret == -EIO) {
- ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
+ if (mtd_can_have_bb(mtd) && ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
@@ -221,12 +221,16 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
hdr[0] = cxt->nextcount;
hdr[1] = MTDOOPS_KERNMSG_MAGIC;
- if (panic)
- ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
- else
- ret = mtd->write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ return;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
if (retlen != record_size || ret < 0)
printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
@@ -253,10 +257,13 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_can_have_bb(mtd) &&
+ mtd_block_isbad(mtd, page * record_size))
+ continue;
/* Assume the page is used */
mark_page_used(cxt, page);
- ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
- &retlen, (u_char *) &count[0]);
+ ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *)&count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
(ret < 0 && !mtd_is_bitflip(ret))) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
@@ -308,8 +315,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
char *dst;
if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC &&
- reason != KMSG_DUMP_KEXEC)
+ reason != KMSG_DUMP_PANIC)
return;
/* Only dump oopses if dump_oops is set */
@@ -327,13 +333,8 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
/* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS) {
- if (!cxt->mtd->panic_write)
- printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
- else
- mtdoops_write(cxt, 1);
- return;
- }
+ if (reason != KMSG_DUMP_OOPS)
+ mtdoops_write(cxt, 1);
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
@@ -369,7 +370,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
/* oops_page_used is a bit field */
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG));
+ BITS_PER_LONG) * sizeof(unsigned long));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a0bd2de4752b..a3d44c3416b4 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -70,8 +70,7 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- res = part->master->read(part->master, from + part->offset,
- len, retlen, buf);
+ res = mtd_read(part->master, from + part->offset, len, retlen, buf);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -89,15 +88,15 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- return part->master->point (part->master, from + part->offset,
- len, retlen, virt, phys);
+ return mtd_point(part->master, from + part->offset, len, retlen,
+ virt, phys);
}
static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
- part->master->unpoint(part->master, from + part->offset, len);
+ mtd_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -108,8 +107,7 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = PART(mtd);
offset += part->offset;
- return part->master->get_unmapped_area(part->master, len, offset,
- flags);
+ return mtd_get_unmapped_area(part->master, len, offset, flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -140,7 +138,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ res = mtd_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
@@ -154,30 +152,28 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info(part->master, buf, len);
+ return mtd_get_user_prot_info(part->master, buf, len);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info(part->master, buf, len);
+ return mtd_get_fact_prot_info(part->master, buf, len);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -190,8 +186,7 @@ static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_write(part->master, to + part->offset, len, retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -204,8 +199,8 @@ static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->panic_write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_panic_write(part->master, to + part->offset, len, retlen,
+ buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -220,22 +215,21 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return part->master->write_oob(part->master, to + part->offset, ops);
+ return mtd_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg(part->master, from, len);
+ return mtd_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
@@ -244,8 +238,8 @@ static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return part->master->writev(part->master, vecs, count,
- to + part->offset, retlen);
+ return mtd_writev(part->master, vecs, count, to + part->offset,
+ retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -257,7 +251,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
- ret = part->master->erase(part->master, instr);
+ ret = mtd_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
@@ -285,7 +279,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->lock(part->master, ofs + part->offset, len);
+ return mtd_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -293,7 +287,7 @@ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->unlock(part->master, ofs + part->offset, len);
+ return mtd_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -301,25 +295,25 @@ static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->is_locked(part->master, ofs + part->offset, len);
+ return mtd_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->sync(part->master);
+ mtd_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- return part->master->suspend(part->master);
+ return mtd_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->resume(part->master);
+ mtd_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
@@ -328,7 +322,7 @@ static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- return part->master->block_isbad(part->master, ofs);
+ return mtd_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -341,7 +335,7 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- res = part->master->block_markbad(part->master, ofs);
+ res = mtd_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
@@ -559,8 +553,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
+ if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index bd9590c723e4..c92f0f6bc130 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -274,12 +274,12 @@ static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
eb->root = NULL;
/* badblocks not supported */
- if (!d->mtd->block_markbad)
+ if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
- ret = d->mtd->block_markbad(d->mtd, offset);
+ ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
@@ -312,7 +312,7 @@ static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = d->mtd->read_oob(d->mtd, from, ops);
+ int ret = mtd_read_oob(d->mtd, from, ops);
if (mtd_is_bitflip(ret))
return ret;
@@ -343,7 +343,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
- if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
@@ -403,7 +403,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
- ret = d->mtd->write_oob(d->mtd, offset , &ops);
+ ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
@@ -567,7 +567,7 @@ retry:
erase.len = mtd->erasesize;
erase.priv = (u_long)&wq;
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
@@ -689,7 +689,7 @@ retry:
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
- ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
@@ -736,7 +736,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
@@ -946,7 +946,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
- ret = mtd->write_oob(mtd, pos, &ops);
+ ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -955,7 +955,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
pos = base;
for (i = 0; i < mtd_pages; i++) {
- ret = mtd->read_oob(mtd, pos, &ops);
+ ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -1047,8 +1047,7 @@ static int mtdswap_flush(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- if (d->mtd->sync)
- d->mtd->sync(d->mtd);
+ mtd_sync(d->mtd);
return 0;
}
@@ -1059,9 +1058,9 @@ static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
badcnt = 0;
- if (mtd->block_isbad)
+ if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
@@ -1161,7 +1160,7 @@ static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
if (mtd_is_bitflip(ret)) {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index cce7b70824c3..31b034b7eba3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -110,7 +110,7 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
- depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
+ depends on ARCH_OMAP2PLUS
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
platforms.
@@ -420,7 +420,6 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
- select MTD_PARTITIONS
select MTD_CMDLINE_PARTS
help
Enables NAND Flash support for IMX23 or IMX28.
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e6b498c9beb..3197e9764fcd 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -280,17 +280,7 @@ static struct platform_driver ams_delta_nand_driver = {
},
};
-static int __init ams_delta_nand_init(void)
-{
- return platform_driver_register(&ams_delta_nand_driver);
-}
-module_init(ams_delta_nand_init);
-
-static void __exit ams_delta_nand_exit(void)
-{
- platform_driver_unregister(&ams_delta_nand_driver);
-}
-module_exit(ams_delta_nand_exit);
+module_platform_driver(ams_delta_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 23e5d77c39fc..4dd056e2e16a 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -113,7 +113,7 @@ static int cpu_has_dma(void)
*/
static void atmel_nand_enable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
+ if (gpio_is_valid(host->board->enable_pin))
gpio_set_value(host->board->enable_pin, 0);
}
@@ -122,7 +122,7 @@ static void atmel_nand_enable(struct atmel_nand_host *host)
*/
static void atmel_nand_disable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
+ if (gpio_is_valid(host->board->enable_pin))
gpio_set_value(host->board->enable_pin, 1);
}
@@ -492,7 +492,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->IO_ADDR_W = host->io_base;
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- if (host->board->rdy_pin)
+ if (gpio_is_valid(host->board->rdy_pin))
nand_chip->dev_ready = atmel_nand_device_ready;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -530,7 +530,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
if (gpio_get_value(host->board->det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = -ENXIO;
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 7dd3700f2303..73abbc3e093e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -17,35 +17,19 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
-#ifdef CONFIG_MIPS_PB1550
-#include <asm/mach-pb1x00/pb1550.h>
-#elif defined(CONFIG_MIPS_DB1550)
-#include <asm/mach-db1x00/db1x00.h>
-#endif
-#include <asm/mach-db1x00/bcsr.h>
-/*
- * MTD structure for NAND controller
- */
-static struct mtd_info *au1550_mtd = NULL;
-static void __iomem *p_nand;
-static int nand_width = 1; /* default x8 */
-static void (*au1550_write_byte)(struct mtd_info *, u_char);
+struct au1550nd_ctx {
+ struct mtd_info info;
+ struct nand_chip chip;
-/*
- * Define partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "NAND FS 0",
- .offset = 0,
- .size = 8 * 1024 * 1024},
- {
- .name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL}
+ int cs;
+ void __iomem *base;
+ void (*write_byte)(struct mtd_info *, u_char);
};
/**
@@ -259,24 +243,25 @@ static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
{
- register struct nand_chip *this = mtd->priv;
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
switch (cmd) {
case NAND_CTL_SETCLE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_CMD;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
break;
case NAND_CTL_CLRCLE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
break;
case NAND_CTL_SETALE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
break;
case NAND_CTL_CLRALE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
/* FIXME: Nobody knows why this is necessary,
* but it works only that way */
udelay(1);
@@ -284,7 +269,7 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
case NAND_CTL_SETNCE:
/* assert (force assert) chip enable */
- au_writel((1 << (4 + NAND_CS)), MEM_STNDCTL);
+ au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
break;
case NAND_CTL_CLRNCE:
@@ -331,9 +316,10 @@ static void au1550_select_chip(struct mtd_info *mtd, int chip)
*/
static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
{
- register struct nand_chip *this = mtd->priv;
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
int ce_override = 0, i;
- ulong flags;
+ unsigned long flags = 0;
/* Begin command latch cycle */
au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
@@ -354,9 +340,9 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
column -= 256;
readcmd = NAND_CMD_READ1;
}
- au1550_write_byte(mtd, readcmd);
+ ctx->write_byte(mtd, readcmd);
}
- au1550_write_byte(mtd, command);
+ ctx->write_byte(mtd, command);
/* Set ALE and clear CLE to start address cycle */
au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
@@ -369,10 +355,10 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
/* Adjust columns for 16 bit buswidth */
if (this->options & NAND_BUSWIDTH_16)
column >>= 1;
- au1550_write_byte(mtd, column);
+ ctx->write_byte(mtd, column);
}
if (page_addr != -1) {
- au1550_write_byte(mtd, (u8)(page_addr & 0xff));
+ ctx->write_byte(mtd, (u8)(page_addr & 0xff));
if (command == NAND_CMD_READ0 ||
command == NAND_CMD_READ1 ||
@@ -390,11 +376,12 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
}
- au1550_write_byte(mtd, (u8)(page_addr >> 8));
+ ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */
if (this->chipsize > (32 << 20))
- au1550_write_byte(mtd, (u8)((page_addr >> 16) & 0x0f));
+ ctx->write_byte(mtd,
+ ((page_addr >> 16) & 0x0f));
}
/* Latch in address */
au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
@@ -440,121 +427,79 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
while(!this->dev_ready(mtd));
}
-
-/*
- * Main initialization routine
- */
-static int __init au1xxx_nand_init(void)
+static int __devinit find_nand_cs(unsigned long nand_base)
{
- struct nand_chip *this;
- u16 boot_swapboot = 0; /* default value */
- int retval;
- u32 mem_staddr;
- u32 nand_phys;
-
- /* Allocate memory for MTD device structure and private data */
- au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!au1550_mtd) {
- printk("Unable to allocate NAND MTD dev structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&au1550_mtd[1]);
-
- /* Link the private data with the MTD structure */
- au1550_mtd->priv = this;
- au1550_mtd->owner = THIS_MODULE;
-
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
- /* MEM_STNDCTL: disable ints, disable nand boot */
- au_writel(0, MEM_STNDCTL);
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
-#ifdef CONFIG_MIPS_PB1550
- /* set gpio206 high */
- gpio_direction_input(206);
+ return -ENODEV;
+}
- boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
+static int __devinit au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct resource *r;
+ int ret, cs;
- switch (boot_swapboot) {
- case 0:
- case 2:
- case 8:
- case 0xC:
- case 0xD:
- /* x16 NAND Flash */
- nand_width = 0;
- break;
- case 1:
- case 9:
- case 3:
- case 0xE:
- case 0xF:
- /* x8 NAND Flash */
- nand_width = 1;
- break;
- default:
- printk("Pb1550 NAND: bad boot:swap\n");
- retval = -EINVAL;
- goto outmem;
+ pd = pdev->dev.platform_data;
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
}
-#endif
-
- /* Configure chip-select; normally done by boot code, e.g. YAMON */
-#ifdef NAND_STCFG
- if (NAND_CS == 0) {
- au_writel(NAND_STCFG, MEM_STCFG0);
- au_writel(NAND_STTIME, MEM_STTIME0);
- au_writel(NAND_STADDR, MEM_STADDR0);
- }
- if (NAND_CS == 1) {
- au_writel(NAND_STCFG, MEM_STCFG1);
- au_writel(NAND_STTIME, MEM_STTIME1);
- au_writel(NAND_STADDR, MEM_STADDR1);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "no memory for NAND context\n");
+ return -ENOMEM;
}
- if (NAND_CS == 2) {
- au_writel(NAND_STCFG, MEM_STCFG2);
- au_writel(NAND_STTIME, MEM_STTIME2);
- au_writel(NAND_STADDR, MEM_STADDR2);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
}
- if (NAND_CS == 3) {
- au_writel(NAND_STCFG, MEM_STCFG3);
- au_writel(NAND_STTIME, MEM_STTIME3);
- au_writel(NAND_STADDR, MEM_STADDR3);
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
}
-#endif
-
- /* Locate NAND chip-select in order to determine NAND phys address */
- mem_staddr = 0x00000000;
- if (((au_readl(MEM_STCFG0) & 0x7) == 0x5) && (NAND_CS == 0))
- mem_staddr = au_readl(MEM_STADDR0);
- else if (((au_readl(MEM_STCFG1) & 0x7) == 0x5) && (NAND_CS == 1))
- mem_staddr = au_readl(MEM_STADDR1);
- else if (((au_readl(MEM_STCFG2) & 0x7) == 0x5) && (NAND_CS == 2))
- mem_staddr = au_readl(MEM_STADDR2);
- else if (((au_readl(MEM_STCFG3) & 0x7) == 0x5) && (NAND_CS == 3))
- mem_staddr = au_readl(MEM_STADDR3);
-
- if (mem_staddr == 0x00000000) {
- printk("Au1xxx NAND: ERROR WITH NAND CHIP-SELECT\n");
- kfree(au1550_mtd);
- return 1;
+
+ ctx->base = ioremap_nocache(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
}
- nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = ioremap(nand_phys, 0x1000);
+ this = &ctx->chip;
+ ctx->info.priv = this;
+ ctx->info.owner = THIS_MODULE;
- /* make controller and MTD agree */
- if (NAND_CS == 0)
- nand_width = au_readl(MEM_STCFG0) & (1 << 22);
- if (NAND_CS == 1)
- nand_width = au_readl(MEM_STCFG1) & (1 << 22);
- if (NAND_CS == 2)
- nand_width = au_readl(MEM_STCFG2) & (1 << 22);
- if (NAND_CS == 3)
- nand_width = au_readl(MEM_STCFG3) & (1 << 22);
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
- /* Set address of hardware control function */
this->dev_ready = au1550_device_ready;
this->select_chip = au1550_select_chip;
this->cmdfunc = au1550_command;
@@ -565,54 +510,57 @@ static int __init au1xxx_nand_init(void)
this->options = NAND_NO_AUTOINCR;
- if (!nand_width)
+ if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
- this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
- au1550_write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
+ this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+ ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
this->read_word = au_read_word;
- this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
- this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
- this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
-
- /* Scan to find existence of the device */
- if (nand_scan(au1550_mtd, 1)) {
- retval = -ENXIO;
- goto outio;
+ this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+ this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+ this->verify_buf = (pd->devwidth) ? au_verify_buf16 : au_verify_buf;
+
+ ret = nand_scan(&ctx->info, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
}
- /* Register the partitions */
- mtd_device_register(au1550_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
return 0;
- outio:
- iounmap(p_nand);
-
- outmem:
- kfree(au1550_mtd);
- return retval;
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
}
-module_init(au1xxx_nand_init);
-
-/*
- * Clean up routine
- */
-static void __exit au1550_cleanup(void)
+static int __devexit au1550nd_remove(struct platform_device *pdev)
{
- /* Release resources, unregister device */
- nand_release(au1550_mtd);
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Free the MTD device structure */
- kfree(au1550_mtd);
-
- /* Unmap */
- iounmap(p_nand);
+ nand_release(&ctx->info);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+ return 0;
}
-module_exit(au1550_cleanup);
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = au1550nd_probe,
+ .remove = __devexit_p(au1550nd_remove),
+};
+
+module_platform_driver(au1550nd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Embedded Edge, LLC");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 46b58d672847..50387fd4009b 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -546,18 +546,7 @@ static struct platform_driver nand_driver = {
.resume = bcm_umi_nand_resume,
};
-static int __init nand_init(void)
-{
- return platform_driver_register(&nand_driver);
-}
-
-static void __exit nand_exit(void)
-{
- platform_driver_unregister(&nand_driver);
-}
-
-module_init(nand_init);
-module_exit(nand_exit);
+module_platform_driver(nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index c153e1f77f90..6e566156956f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -675,7 +675,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
- ret = davinci_aemif_setup_timing(info->timing, info->base,
+ ret = 0;
+ if (info->timing)
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 5780dbab6113..df921e7a496c 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1072,7 +1072,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
@@ -1097,7 +1097,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index eedd8ee2c9ac..7195ee6efe12 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -166,15 +166,22 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
elbc_fcm_ctrl->page = page_addr;
- out_be32(&lbc->fbar,
- page_addr >> (chip->phys_erase_shift - chip->page_shift));
-
if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
out_be32(&lbc->fpar,
((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
(oob ? FPAR_LP_MS : 0) | column);
buf_num = (page_addr & 1) << 2;
} else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
out_be32(&lbc->fpar,
((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
(oob ? FPAR_SP_MS : 0) | column);
@@ -349,20 +356,22 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
fsl_elbc_run_command(mtd);
return;
- /* READID must read all 5 possible bytes while CEB is active */
case NAND_CMD_READID:
- dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
- out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* nand_get_flash_type() reads 8 bytes of entire ID string */
- out_be32(&lbc->fbcr, 8);
- elbc_fcm_ctrl->read_bytes = 8;
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
elbc_fcm_ctrl->use_mdr = 1;
- elbc_fcm_ctrl->mdr = 0;
-
+ elbc_fcm_ctrl->mdr = column;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
return;
@@ -407,9 +416,17 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
page_addr, column);
elbc_fcm_ctrl->column = column;
- elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
@@ -434,16 +451,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
- if (column >= mtd->writesize) {
+ if (elbc_fcm_ctrl->oob)
/* OOB area --> READOOB */
- column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
- elbc_fcm_ctrl->oob = 1;
- } else {
- WARN_ON(column != 0);
+ else
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
- }
}
out_be32(&lbc->fcr, fcr);
@@ -463,7 +476,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
- out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
else
out_be32(&lbc->fbcr, 0);
@@ -659,9 +673,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
if (chip->pagemask & 0xff000000)
al++;
- /* add to ECCM mode set in fsl_elbc_init */
- priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
- (al << FMR_AL_SHIFT);
+ priv->fmr |= al << FMR_AL_SHIFT;
dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
chip->numchips);
@@ -764,8 +776,10 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
priv->mtd.priv = chip;
priv->mtd.owner = THIS_MODULE;
- /* Set the ECCM according to the settings in bootloader.*/
- priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
@@ -971,18 +985,7 @@ static struct platform_driver fsl_elbc_nand_driver = {
.remove = fsl_elbc_nand_remove,
};
-static int __init fsl_elbc_nand_init(void)
-{
- return platform_driver_register(&fsl_elbc_nand_driver);
-}
-
-static void __exit fsl_elbc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_elbc_nand_driver);
-}
-
-module_init(fsl_elbc_nand_init);
-module_exit(fsl_elbc_nand_exit);
+module_platform_driver(fsl_elbc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index b4f3cc9f32fb..45df542b9c61 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -353,17 +353,7 @@ static struct platform_driver of_fun_driver = {
.remove = __devexit_p(fun_remove),
};
-static int __init fun_module_init(void)
-{
- return platform_driver_register(&of_fun_driver);
-}
-module_init(fun_module_init);
-
-static void __exit fun_module_exit(void)
-{
- platform_driver_unregister(&of_fun_driver);
-}
-module_exit(fun_module_exit);
+module_platform_driver(of_fun_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 2c2060b2800e..27000a5f5f47 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -27,6 +27,9 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
@@ -171,6 +174,96 @@ static int gpio_nand_devready(struct mtd_info *mtd)
return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ u64 addr;
+
+ if (!r || of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+#define gpio_nand_id_table NULL
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev->platform_data) {
+ memcpy(plat, dev->platform_data, sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
static int __devexit gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
@@ -178,7 +271,7 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
nand_release(&gpiomtd->mtd_info);
- res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res = gpio_nand_get_io_sync(dev);
iounmap(gpiomtd->io_sync);
if (res)
release_mem_region(res->start, resource_size(res));
@@ -226,9 +319,10 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
struct gpiomtd *gpiomtd;
struct nand_chip *this;
struct resource *res0, *res1;
- int ret;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
- if (!dev->dev.platform_data)
+ if (!dev->dev.of_node && !dev->dev.platform_data)
return -EINVAL;
res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -248,7 +342,7 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
goto err_map;
}
- res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res1 = gpio_nand_get_io_sync(dev);
if (res1) {
gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
if (!gpiomtd->io_sync) {
@@ -257,7 +351,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
}
}
- memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+ ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+ if (ret)
+ goto err_nce;
ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
@@ -316,8 +412,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ ppdata.of_node = dev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (ret)
+ goto err_wp;
platform_set_drvdata(dev, gpiomtd);
return 0;
@@ -352,6 +452,7 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
+ .of_match_table = gpio_nand_id_table,
},
};
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index de4db7604a3f..7f680420bfab 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -126,7 +126,7 @@ int gpmi_init(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
int ret;
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret)
goto err_out;
ret = gpmi_reset_block(r->gpmi_regs, false);
@@ -146,7 +146,7 @@ int gpmi_init(struct gpmi_nand_data *this)
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
return 0;
err_out:
return ret;
@@ -202,7 +202,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret)
goto err_out;
@@ -229,7 +229,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
r->bch_regs + HW_BCH_CTRL_SET);
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
return 0;
err_out:
return ret;
@@ -704,7 +704,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
int ret;
/* Enable the clock. */
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret) {
pr_err("We failed in enable the clk\n");
goto err_out;
@@ -773,7 +773,7 @@ err_out:
void gpmi_end(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
}
/* Clears a BCH interrupt. */
@@ -827,7 +827,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
pio[1] = pio[2] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -839,7 +839,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
sg_init_one(sgl, this->cmd_buffer, this->command_length);
dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
desc = channel->device->device_prep_slave_sg(channel,
- sgl, 1, DMA_TO_DEVICE, 1);
+ sgl, 1, DMA_MEM_TO_DEV, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -872,7 +872,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -881,7 +881,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
/* [2] send DMA request */
prepare_data_dma(this, DMA_TO_DEVICE);
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_TO_DEVICE, 1);
+ 1, DMA_MEM_TO_DEV, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -908,7 +908,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -917,7 +917,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
/* [2] : send DMA request */
prepare_data_dma(this, DMA_FROM_DEVICE);
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_FROM_DEVICE, 1);
+ 1, DMA_DEV_TO_MEM, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -964,7 +964,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -998,7 +998,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_XFER_COUNT(0);
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2, DMA_NONE, 0);
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -1027,7 +1028,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
pio[5] = auxiliary;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 1);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -1045,7 +1046,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2, DMA_NONE, 1);
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 1);
if (!desc) {
pr_err("step 3 error\n");
return -1;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 071b63420f0e..493ec2fcf97f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -21,9 +21,9 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
-
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e2664073a89b..ac3b9f255e00 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -423,17 +423,7 @@ static struct platform_driver jz_nand_driver = {
},
};
-static int __init jz_nand_init(void)
-{
- return platform_driver_register(&jz_nand_driver);
-}
-module_init(jz_nand_init);
-
-static void __exit jz_nand_exit(void)
-{
- platform_driver_unregister(&jz_nand_driver);
-}
-module_exit(jz_nand_exit);
+module_platform_driver(jz_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 5ede64706346..c240cf1af961 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -879,19 +879,7 @@ static struct platform_driver mpc5121_nfc_driver = {
},
};
-static int __init mpc5121_nfc_init(void)
-{
- return platform_driver_register(&mpc5121_nfc_driver);
-}
-
-module_init(mpc5121_nfc_init);
-
-static void __exit mpc5121_nfc_cleanup(void)
-{
- platform_driver_unregister(&mpc5121_nfc_driver);
-}
-
-module_exit(mpc5121_nfc_cleanup);
+module_platform_driver(mpc5121_nfc_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3ed9c5e4d34e..35b4565050f1 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3132,8 +3132,8 @@ ident_done:
* Bad block marker is stored in the last page of each block
* on Samsung and Hynix MLC devices; stored in first two pages
* of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
- * only the first page.
+ * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
+ * All others scan only the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
@@ -3143,7 +3143,8 @@ ident_done:
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX ||
*maf_id == NAND_MFR_TOSHIBA ||
- *maf_id == NAND_MFR_AMD)) ||
+ *maf_id == NAND_MFR_AMD ||
+ *maf_id == NAND_MFR_MACRONIX)) ||
(mtd->writesize == 2048 &&
*maf_id == NAND_MFR_MICRON))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 69148ae3bf58..20a112f591fe 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -201,7 +201,7 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
from += marker_len;
marker_len = 0;
}
- res = mtd->read(mtd, from, len, &retlen, buf);
+ res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (mtd_is_eccerr(res)) {
pr_info("nand_bbt: ECC error in BBT at "
@@ -298,7 +298,7 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
if (td->options & NAND_BBT_VERSION)
len++;
- return mtd->read(mtd, offs, len, &retlen, buf);
+ return mtd_read(mtd, offs, len, &retlen, buf);
}
/* Scan read raw data from flash */
@@ -317,7 +317,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
ops.len = min(len, (size_t)mtd->writesize);
ops.oobbuf = buf + ops.len;
- res = mtd->read_oob(mtd, offs, &ops);
+ res = mtd_read_oob(mtd, offs, &ops);
if (res)
return res;
@@ -350,7 +350,7 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = oob;
ops.len = len;
- return mtd->write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -434,7 +434,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
@@ -756,7 +756,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Make it block aligned */
to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
- res = mtd->read(mtd, to, len, &retlen, buf);
+ res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
pr_info("nand_bbt: error reading block "
@@ -769,7 +769,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
- res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 00cf1b0d6053..af4fe8ca7b5e 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -73,11 +73,12 @@ struct nand_flash_dev nand_flash_ids[] = {
#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- /*512 Megabit */
+ /* 512 Megabit */
{"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
@@ -176,6 +177,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
{NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_MACRONIX, "Macronix"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 34c03be77301..261f478f8cc3 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -737,7 +737,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
return -EINVAL;
}
offset = erase_block_no * ns->geom.secsz;
- if (mtd->block_markbad(mtd, offset)) {
+ if (mtd_block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ee1713907b92..ec688548c880 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
- ppdata->of_node = flash_np;
+ ppdata.of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
@@ -294,18 +294,7 @@ static struct platform_driver ndfc_driver = {
.remove = __devexit_p(ndfc_remove),
};
-static int __init ndfc_nand_init(void)
-{
- return platform_driver_register(&ndfc_driver);
-}
-
-static void __exit ndfc_nand_exit(void)
-{
- platform_driver_unregister(&ndfc_driver);
-}
-
-module_init(ndfc_nand_init);
-module_exit(ndfc_nand_exit);
+module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b463ecfb4c1a..a86aa812ca13 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -201,7 +201,7 @@ static int nomadik_nand_suspend(struct device *dev)
struct nomadik_nand_host *host = dev_get_drvdata(dev);
int ret = 0;
if (host)
- ret = host->mtd.suspend(&host->mtd);
+ ret = mtd_suspend(&host->mtd);
return ret;
}
@@ -209,7 +209,7 @@ static int nomadik_nand_resume(struct device *dev)
{
struct nomadik_nand_host *host = dev_get_drvdata(dev);
if (host)
- host->mtd.resume(&host->mtd);
+ mtd_resume(&host->mtd);
return 0;
}
@@ -228,19 +228,7 @@ static struct platform_driver nomadik_nand_driver = {
},
};
-static int __init nand_nomadik_init(void)
-{
- pr_info("Nomadik NAND driver\n");
- return platform_driver_register(&nomadik_nand_driver);
-}
-
-static void __exit nand_nomadik_exit(void)
-{
- platform_driver_unregister(&nomadik_nand_driver);
-}
-
-module_init(nand_nomadik_init);
-module_exit(nand_nomadik_exit);
+module_platform_driver(nomadik_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index fa8faedfad6e..8febe46e1105 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -364,18 +364,7 @@ static struct platform_driver nuc900_nand_driver = {
},
};
-static int __init nuc900_nand_init(void)
-{
- return platform_driver_register(&nuc900_nand_driver);
-}
-
-static void __exit nuc900_nand_exit(void)
-{
- platform_driver_unregister(&nuc900_nand_driver);
-}
-
-module_init(nuc900_nand_init);
-module_exit(nuc900_nand_exit);
+module_platform_driver(nuc900_nand_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index f745f00f3167..b3a883e2a22f 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1145,20 +1145,7 @@ static struct platform_driver omap_nand_driver = {
},
};
-static int __init omap_nand_init(void)
-{
- pr_info("%s driver initializing\n", DRIVER_NAME);
-
- return platform_driver_register(&omap_nand_driver);
-}
-
-static void __exit omap_nand_exit(void)
-{
- platform_driver_unregister(&omap_nand_driver);
-}
-
-module_init(omap_nand_init);
-module_exit(omap_nand_exit);
+module_platform_driver(omap_nand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a97264ececdb..974dbf8251c9 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -230,17 +230,7 @@ static struct platform_driver pasemi_nand_driver =
.remove = pasemi_nand_remove,
};
-static int __init pasemi_nand_init(void)
-{
- return platform_driver_register(&pasemi_nand_driver);
-}
-module_init(pasemi_nand_init);
-
-static void __exit pasemi_nand_exit(void)
-{
- platform_driver_unregister(&pasemi_nand_driver);
-}
-module_exit(pasemi_nand_exit);
+module_platform_driver(pasemi_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index ea8e1234e0e2..7f2da6953357 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -148,18 +148,7 @@ static struct platform_driver plat_nand_driver = {
},
};
-static int __init plat_nand_init(void)
-{
- return platform_driver_register(&plat_nand_driver);
-}
-
-static void __exit plat_nand_exit(void)
-{
- platform_driver_unregister(&plat_nand_driver);
-}
-
-module_init(plat_nand_init);
-module_exit(plat_nand_exit);
+module_platform_driver(plat_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 9eb7f879969e..5c3d719c37e6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -185,7 +185,7 @@ struct pxa3xx_nand_info {
uint32_t ndcb2;
};
-static int use_dma = 1;
+static bool use_dma = 1;
module_param(use_dma, bool, 0444);
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
@@ -1258,7 +1258,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->suspend(mtd);
+ mtd_suspend(mtd);
}
return 0;
@@ -1291,7 +1291,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
nand_writel(info, NDSR, NDSR_MASK);
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->resume(mtd);
+ mtd_resume(mtd);
}
return 0;
@@ -1311,17 +1311,7 @@ static struct platform_driver pxa3xx_nand_driver = {
.resume = pxa3xx_nand_resume,
};
-static int __init pxa3xx_nand_init(void)
-{
- return platform_driver_register(&pxa3xx_nand_driver);
-}
-module_init(pxa3xx_nand_init);
-
-static void __exit pxa3xx_nand_exit(void)
-{
- platform_driver_unregister(&pxa3xx_nand_driver);
-}
-module_exit(pxa3xx_nand_exit);
+module_platform_driver(pxa3xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index f20f393bfda6..769a4e096b3c 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -22,7 +22,7 @@
#include "r852.h"
-static int r852_enable_dma = 1;
+static bool r852_enable_dma = 1;
module_param(r852_enable_dma, bool, S_IRUGO);
MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 619d2a504788..b175c0fd8b93 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -230,17 +230,7 @@ static struct platform_driver sharpsl_nand_driver = {
.remove = __devexit_p(sharpsl_nand_remove),
};
-static int __init sharpsl_nand_init(void)
-{
- return platform_driver_register(&sharpsl_nand_driver);
-}
-module_init(sharpsl_nand_init);
-
-static void __exit sharpsl_nand_exit(void)
-{
- platform_driver_unregister(&sharpsl_nand_driver);
-}
-module_exit(sharpsl_nand_exit);
+module_platform_driver(sharpsl_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 32ae5af7444f..774c3c266713 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -55,7 +55,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
ops.datbuf = NULL;
- ret = mtd->write_oob(mtd, ofs, &ops);
+ ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 0fb24f9c2327..e02b08bcf0c0 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -273,18 +273,7 @@ static struct platform_driver socrates_nand_driver = {
.remove = __devexit_p(socrates_nand_remove),
};
-static int __init socrates_nand_init(void)
-{
- return platform_driver_register(&socrates_nand_driver);
-}
-
-static void __exit socrates_nand_exit(void)
-{
- platform_driver_unregister(&socrates_nand_driver);
-}
-
-module_init(socrates_nand_init);
-module_exit(socrates_nand_exit);
+module_platform_driver(socrates_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ilya Yanok");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index beebd95f7690..6caa0cd9d6a7 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -533,18 +533,7 @@ static struct platform_driver tmio_driver = {
.resume = tmio_resume,
};
-static int __init tmio_init(void)
-{
- return platform_driver_register(&tmio_driver);
-}
-
-static void __exit tmio_exit(void)
-{
- platform_driver_unregister(&tmio_driver);
-}
-
-module_init(tmio_init);
-module_exit(tmio_exit);
+module_platform_driver(tmio_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ace46fdaef58..c7c4f1d11c77 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -298,11 +298,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (!devm_request_mem_region(&dev->dev, res->start,
- resource_size(res), dev_name(&dev->dev)))
- return -EBUSY;
- drvdata->base = devm_ioremap(&dev->dev, res->start,
- resource_size(res));
+ drvdata->base = devm_request_and_ioremap(&dev->dev, res);
if (!drvdata->base)
return -EBUSY;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index cda77b562ad4..a75382aff5f6 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,7 +56,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
+ if (!mtd_can_have_bb(mtd)) {
printk(KERN_ERR
"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
@@ -153,7 +153,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~mask, &ops);
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -174,7 +174,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -198,7 +198,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
@@ -423,12 +423,17 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512, &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
- + (block * 512), 512, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret != -EIO)
printk("Error went away on retry.\n");
}
@@ -771,7 +776,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
- int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
+ int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
if (res < 0 && !mtd_is_bitflip(res))
return -EIO;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ac4092591aea..51b9d6af307f 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -63,8 +63,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
- ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf);
+ ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -242,7 +242,8 @@ The new DiskOnChip driver already scanned the bad block table. Just query it.
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
#endif
- if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
+ if (mtd_block_isbad(nftl->mbd.mtd,
+ i * nftl->EraseSize))
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
@@ -274,7 +275,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -326,7 +327,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
instr->mtd = nftl->mbd.mtd;
instr->addr = block * nftl->EraseSize;
instr->len = nftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk("Error while formatting block %d\n", block);
@@ -355,7 +356,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
- nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(nftl->mbd.mtd, instr->addr);
return -1;
}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 7813095264a5..0ccd5bff2544 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -115,21 +115,9 @@ static struct platform_driver generic_onenand_driver = {
.remove = __devexit_p(generic_onenand_remove),
};
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
-static int __init generic_onenand_init(void)
-{
- return platform_driver_register(&generic_onenand_driver);
-}
-
-static void __exit generic_onenand_exit(void)
-{
- platform_driver_unregister(&generic_onenand_driver);
-}
-
-module_init(generic_onenand_init);
-module_exit(generic_onenand_exit);
+module_platform_driver(generic_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a8394730b4b6..a061bc163da2 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2633,7 +2633,6 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2645,7 +2644,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = this->block_markbad(mtd, ofs);
+ ret = mtd_block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 5474547eafc2..fa1ee43f735b 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1133,18 +1133,7 @@ static struct platform_driver s3c_onenand_driver = {
.remove = __devexit_p(s3c_onenand_remove),
};
-static int __init s3c_onenand_init(void)
-{
- return platform_driver_register(&s3c_onenand_driver);
-}
-
-static void __exit s3c_onenand_exit(void)
-{
- platform_driver_unregister(&s3c_onenand_driver);
-}
-
-module_init(s3c_onenand_init);
-module_exit(s3c_onenand_exit);
+module_platform_driver(s3c_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index e366b1d84ead..48970c14beff 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_can_have_bb(master) &&
+ mtd_block_isbad(master, offset)) {
if (!offset) {
nogood:
printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +89,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
} else {
offset = directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_can_have_bb(master) &&
+ mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
@@ -104,8 +104,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
- ret = master->read(master, offset,
- master->erasesize, &retlen, (void *)buf);
+ ret = mtd_read(master, offset, master->erasesize, &retlen,
+ (void *)buf);
if (ret)
goto out;
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 73ae217a4252..233b946e5d66 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -200,9 +200,9 @@ static int scan_header(struct partition *part)
part->sector_map[i] = -1;
for (i=0, blocks_found=0; i<part->total_blocks; i++) {
- rc = part->mbd.mtd->read(part->mbd.mtd,
- i * part->block_size, part->header_size,
- &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, i * part->block_size,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -250,8 +250,8 @@ static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *b
addr = part->sector_map[sector];
if (addr != -1) {
- rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
- &retlen, (u_char*)buf);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -304,9 +304,8 @@ static void erase_callback(struct erase_info *erase)
part->blocks[i].used_sectors = 0;
part->blocks[i].erases++;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- part->blocks[i].offset, sizeof(magic), &retlen,
- (u_char*)&magic);
+ rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
+ &retlen, (u_char *)&magic);
if (!rc && retlen != sizeof(magic))
rc = -EIO;
@@ -342,7 +341,7 @@ static int erase_block(struct partition *part, int block)
part->blocks[block].state = BLOCK_ERASING;
part->blocks[block].free_sectors = 0;
- rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ rc = mtd_erase(part->mbd.mtd, erase);
if (rc) {
printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
@@ -372,9 +371,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
if (!map)
goto err2;
- rc = part->mbd.mtd->read(part->mbd.mtd,
- part->blocks[block_no].offset, part->header_size,
- &retlen, (u_char*)map);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+ part->header_size, &retlen, (u_char *)map);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -413,8 +411,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
}
continue;
}
- rc = part->mbd.mtd->read(part->mbd.mtd, addr,
- SECTOR_SIZE, &retlen, sector_data);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ sector_data);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -450,8 +448,7 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
int rc;
/* we have a race if sync doesn't exist */
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
score = 0x7fffffff; /* MAX_INT */
best_block = -1;
@@ -563,8 +560,9 @@ static int find_writable_block(struct partition *part, u_long *old_sector)
}
}
- rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
- part->header_size, &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -595,8 +593,8 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
addr = part->blocks[block].offset +
(HEADER_MAP_OFFSET + offset) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(del), &retlen, (u_char*)&del);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+ (u_char *)&del);
if (!rc && retlen != sizeof(del))
rc = -EIO;
@@ -668,8 +666,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
block->offset;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- addr, SECTOR_SIZE, &retlen, (u_char*)buf);
+ rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -688,8 +686,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
part->header_cache[i + HEADER_MAP_OFFSET] = entry;
addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(entry), &retlen, (u_char*)&entry);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+ (u_char *)&entry);
if (!rc && retlen != sizeof(entry))
rc = -EIO;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index fddb714e323c..072ed5970e2f 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -25,7 +25,7 @@
struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
-module_param(cache_timeout, bool, S_IRUGO);
+module_param(cache_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cache_timeout,
"Timeout (in ms) for cache flush (1000 ms default");
@@ -278,7 +278,7 @@ again:
/* Unfortunately, oob read will _always_ succeed,
despite card removal..... */
- ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
@@ -343,7 +343,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
- ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
/* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
@@ -479,7 +479,7 @@ static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
return -EIO;
}
- if (mtd->erase(mtd, &erase)) {
+ if (mtd_erase(mtd, &erase)) {
sm_printk("erase of block %d in zone %d failed",
block, zone_num);
goto error;
@@ -645,8 +645,8 @@ int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
return -ENODEV;
- /* We use these functions for IO */
- if (!mtd->read_oob || !mtd->write_oob)
+ /* We use OOB */
+ if (!mtd_has_oob(mtd))
return -ENODEV;
/* Find geometry information */
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 976e3d28b962..ab2a52a039c3 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -122,9 +122,9 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
* is not SSFDC formatted
*/
for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
- if (!mtd->block_isbad(mtd, offset)) {
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
- sect_buf);
+ if (mtd_block_isbad(mtd, offset)) {
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
/* CIS pattern match on the sector buffer */
if (ret < 0 || retlen != SECTOR_SIZE) {
@@ -156,7 +156,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
size_t retlen;
loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
if (ret < 0 || retlen != SECTOR_SIZE)
return -1;
@@ -175,7 +175,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
ops.oobbuf = buf;
ops.datbuf = NULL;
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
if (ret < 0 || ops.oobretlen != OOB_SIZE)
return -1;
@@ -255,7 +255,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
phys_block++) {
offset = (unsigned long)phys_block * ssfdc->erase_size;
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
continue; /* skip bad blocks */
ret = read_raw_oob(mtd, offset, oob_buf);
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index 933f7e5f32d3..ed9b62827f1b 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -78,7 +78,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -139,7 +139,7 @@ static int write_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
printk(PRINT_PREF "error: writeoob failed at %#llx\n",
(long long)addr);
@@ -192,7 +192,7 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
printk(PRINT_PREF "error: readoob failed at %#llx\n",
(long long)addr);
@@ -219,7 +219,7 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
printk(PRINT_PREF "error: readoob failed at "
"%#llx\n", (long long)addr);
@@ -284,7 +284,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
printk(PRINT_PREF "error: readoob failed at %#llx\n",
(long long)addr);
@@ -329,7 +329,7 @@ static int is_block_bad(int ebnum)
int ret;
loff_t addr = ebnum * mtd->erasesize;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -524,7 +524,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to start write past end of OOB\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, addr0, &ops);
+ err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -544,7 +544,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to start read past end of OOB\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, addr0, &ops);
+ err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -568,7 +568,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to write past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -588,7 +588,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to read past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -612,7 +612,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to write past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -632,7 +632,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to read past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -670,7 +670,7 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
if (i % 256 == 0)
@@ -698,7 +698,7 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index afafb6935fd0..252ddb092fb2 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -77,7 +77,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -95,12 +95,12 @@ static int erase_eraseblock(int ebnum)
static int write_eraseblock(int ebnum)
{
int err = 0;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, mtd->erasesize);
cond_resched();
- err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
+ err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -111,7 +111,7 @@ static int write_eraseblock(int ebnum)
static int verify_eraseblock(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0, i;
loff_t addr0, addrn;
loff_t addr = ebnum * mtd->erasesize;
@@ -127,7 +127,7 @@ static int verify_eraseblock(int ebnum)
set_random_data(writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -135,7 +135,7 @@ static int verify_eraseblock(int ebnum)
(long long)addr0);
return err;
}
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+ err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -144,8 +144,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -163,7 +162,7 @@ static int verify_eraseblock(int ebnum)
if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
unsigned long oldnext = next;
/* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -171,7 +170,7 @@ static int verify_eraseblock(int ebnum)
(long long)addr0);
return err;
}
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+ err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -180,8 +179,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -203,7 +201,7 @@ static int verify_eraseblock(int ebnum)
static int crosstest(void)
{
- size_t read = 0;
+ size_t read;
int err = 0, i;
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
@@ -228,9 +226,8 @@ static int crosstest(void)
addrn -= mtd->erasesize;
/* Read 2nd-to-last page to pp1 */
- read = 0;
addr = addrn - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
+ err = mtd_read(mtd, addr, pgsize, &read, pp1);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -241,9 +238,8 @@ static int crosstest(void)
}
/* Read 3rd-to-last page to pp1 */
- read = 0;
addr = addrn - pgsize - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
+ err = mtd_read(mtd, addr, pgsize, &read, pp1);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -254,10 +250,9 @@ static int crosstest(void)
}
/* Read first page to pp2 */
- read = 0;
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp2);
+ err = mtd_read(mtd, addr, pgsize, &read, pp2);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -268,10 +263,9 @@ static int crosstest(void)
}
/* Read last page to pp3 */
- read = 0;
addr = addrn - pgsize;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp3);
+ err = mtd_read(mtd, addr, pgsize, &read, pp3);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -282,10 +276,9 @@ static int crosstest(void)
}
/* Read first page again to pp4 */
- read = 0;
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp4);
+ err = mtd_read(mtd, addr, pgsize, &read, pp4);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -309,7 +302,7 @@ static int crosstest(void)
static int erasecrosstest(void)
{
- size_t read = 0, written = 0;
+ size_t read, written;
int err = 0, i, ebnum, ebnum2;
loff_t addr0;
char *readbuf = twopages;
@@ -335,7 +328,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -344,7 +337,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+ err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -368,7 +361,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -382,7 +375,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+ err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -405,7 +398,7 @@ static int erasecrosstest(void)
static int erasetest(void)
{
- size_t read = 0, written = 0;
+ size_t read, written;
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
@@ -425,7 +418,7 @@ static int erasetest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -438,7 +431,7 @@ static int erasetest(void)
return err;
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
- err = mtd->read(mtd, addr0, pgsize, &read, twopages);
+ err = mtd_read(mtd, addr0, pgsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -469,7 +462,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 550fe51225a7..121aba189cec 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -44,7 +44,7 @@ static int pgcnt;
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
+ size_t read;
int i, ret, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
@@ -52,7 +52,7 @@ static int read_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
memset(buf, 0 , pgcnt);
- ret = mtd->read(mtd, addr, pgsize, &read, buf);
+ ret = mtd_read(mtd, addr, pgsize, &read, buf);
if (ret == -EUCLEAN)
ret = 0;
if (ret || read != pgsize) {
@@ -74,7 +74,7 @@ static int read_eraseblock_by_page(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = oobbuf;
- ret = mtd->read_oob(mtd, addr, &ops);
+ ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
printk(PRINT_PREF "error: read oob failed at "
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -148,8 +148,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
return 0;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 493b367bdd35..2aec4f3b72be 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -79,7 +79,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -105,7 +105,7 @@ static int multiblock_erase(int ebnum, int blocks)
ei.addr = addr;
ei.len = mtd->erasesize * blocks;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
@@ -139,11 +139,11 @@ static int erase_whole_device(void)
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
+ err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
if (err || written != mtd->erasesize) {
printk(PRINT_PREF "error: write failed at %#llx\n", addr);
if (!err)
@@ -155,13 +155,13 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock_by_page(int ebnum)
{
- size_t written = 0;
+ size_t written;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
+ err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -178,13 +178,13 @@ static int write_eraseblock_by_page(int ebnum)
static int write_eraseblock_by_2pages(int ebnum)
{
- size_t written = 0, sz = pgsize * 2;
+ size_t written, sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->write(mtd, addr, sz, &written, buf);
+ err = mtd_write(mtd, addr, sz, &written, buf);
if (err || written != sz) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -196,7 +196,7 @@ static int write_eraseblock_by_2pages(int ebnum)
buf += sz;
}
if (pgcnt % 2) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
+ err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -210,11 +210,11 @@ static int write_eraseblock_by_2pages(int ebnum)
static int read_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
+ err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -229,13 +229,13 @@ static int read_eraseblock(int ebnum)
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
+ size_t read;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
+ err = mtd_read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -255,13 +255,13 @@ static int read_eraseblock_by_page(int ebnum)
static int read_eraseblock_by_2pages(int ebnum)
{
- size_t read = 0, sz = pgsize * 2;
+ size_t read, sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->read(mtd, addr, sz, &read, buf);
+ err = mtd_read(mtd, addr, sz, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -276,7 +276,7 @@ static int read_eraseblock_by_2pages(int ebnum)
buf += sz;
}
if (pgcnt % 2) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
+ err = mtd_read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -296,7 +296,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -336,8 +336,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
goto out;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 52ffd9120e0d..7b33f22d0b58 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -112,7 +112,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (unlikely(err)) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -140,7 +140,7 @@ static int is_block_bad(int ebnum)
static int do_read(void)
{
- size_t read = 0;
+ size_t read;
int eb = rand_eb();
int offs = rand_offs();
int len = rand_len(offs), err;
@@ -153,7 +153,7 @@ static int do_read(void)
len = mtd->erasesize - offs;
}
addr = eb * mtd->erasesize + offs;
- err = mtd->read(mtd, addr, len, &read, readbuf);
+ err = mtd_read(mtd, addr, len, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
@@ -169,7 +169,7 @@ static int do_read(void)
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
- size_t written = 0;
+ size_t written;
loff_t addr;
offs = offsets[eb];
@@ -192,7 +192,7 @@ static int do_write(void)
}
}
addr = eb * mtd->erasesize + offs;
- err = mtd->write(mtd, addr, len, &written, writebuf);
+ err = mtd_write(mtd, addr, len, &written, writebuf);
if (unlikely(err || written != len)) {
printk(PRINT_PREF "error: write failed at 0x%llx\n",
(long long)addr);
@@ -227,8 +227,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
return 0;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
@@ -284,6 +283,12 @@ static int __init mtd_stresstest_init(void)
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
+ if (ebcnt < 2) {
+ printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+ err = -ENOSPC;
+ goto out_put_mtd;
+ }
+
/* Read or write up 2 eraseblocks at a time */
bufsize = mtd->erasesize * 2;
@@ -322,6 +327,7 @@ out:
kfree(bbt);
vfree(writebuf);
vfree(readbuf);
+out_put_mtd:
put_mtd_device(mtd);
if (err)
printk(PRINT_PREF "error %d occurred\n", err);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 1a05bfac4eee..9667bf535282 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -80,7 +80,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -115,12 +115,12 @@ static int erase_whole_device(void)
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
addr += subpgsize;
set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -150,7 +150,7 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock2(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
@@ -158,7 +158,7 @@ static int write_eraseblock2(int ebnum)
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
set_random_data(writebuf, subpgsize * k);
- err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -189,14 +189,13 @@ static void print_subpage(unsigned char *p)
static int verify_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -223,8 +222,7 @@ static int verify_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -252,7 +250,7 @@ static int verify_eraseblock(int ebnum)
static int verify_eraseblock2(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
@@ -261,8 +259,7 @@ static int verify_eraseblock2(int ebnum)
break;
set_random_data(writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -288,15 +285,14 @@ static int verify_eraseblock2(int ebnum)
static int verify_eraseblock_ff(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
memset(writebuf, 0xff, subpgsize);
for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -344,7 +340,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index 03ab649a6964..b65861bc7b8e 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -105,7 +105,7 @@ static inline int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -127,7 +127,7 @@ static inline int erase_eraseblock(int ebnum)
static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
- size_t read = 0;
+ size_t read;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -137,7 +137,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
}
retry:
- err = mtd->read(mtd, addr, len, &read, check_buf);
+ err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
printk(PRINT_PREF "single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
@@ -181,7 +181,7 @@ retry:
static inline int write_pattern(int ebnum, void *buf)
{
int err;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -189,7 +189,7 @@ static inline int write_pattern(int ebnum, void *buf)
addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
- err = mtd->write(mtd, addr, len, &written, buf);
+ err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
printk(PRINT_PREF "error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
@@ -290,10 +290,9 @@ static int __init tort_init(void)
* Check if there is a bad eraseblock among those we are going to test.
*/
memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
- if (mtd->block_isbad) {
+ if (mtd_can_have_bb(mtd)) {
for (i = eb; i < eb + ebcnt; i++) {
- err = mtd->block_isbad(mtd,
- (loff_t)i * mtd->erasesize);
+ err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
if (err < 0) {
printk(PRINT_PREF "block_isbad() returned %d "
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6c3fb5ab20f5..115749f20f9e 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -664,7 +664,7 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ if (mtd_can_have_bb(ubi->mtd))
ubi->bad_allowed = 1;
if (ubi->mtd->type == MTD_NORFLASH) {
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 3320a50ba4f0..ad76592fb2f4 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
if (req->alignment != 1 && n)
goto bad;
+ if (!req->name[0] || !req->name_len)
+ goto bad;
+
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index ab80c0debac8..e2cdebf40840 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -216,7 +216,7 @@ void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
buf = vmalloc(len);
if (!buf)
return;
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 64fbb0021825..ead2cd16ba75 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -43,7 +43,10 @@
pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...) \
+ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__)
+
/* General debugging messages */
#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
/* Messages from the eraseblock association sub-system */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index fb7f19b62d91..cd26da8ad225 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return
- * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
- return MOVE_CANCEL_RACE;
+ return MOVE_RETRY;
}
/*
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index f20b6f22f240..5cde4e5ca3e5 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -170,7 +170,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err) {
const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
@@ -289,7 +289,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
}
addr = (loff_t)pnum * ubi->peb_size + offset;
- err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
"%zd bytes", err, len, pnum, offset, written);
@@ -361,7 +361,7 @@ retry:
ei.callback = erase_callback;
ei.priv = (unsigned long)&wq;
- err = ubi->mtd->erase(ubi->mtd, &ei);
+ err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
dbg_io("error %d while erasing PEB %d, retry",
@@ -525,11 +525,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* the header comment in scan.c for more information).
*/
addr = (loff_t)pnum * ubi->peb_size;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err) {
addr += ubi->vid_hdr_aloffset;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
- (void *)&data);
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err)
return 0;
}
@@ -635,7 +634,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
if (ubi->bad_allowed) {
int ret;
- ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
if (ret < 0)
ubi_err("error %d while checking if PEB %d is bad",
ret, pnum);
@@ -670,7 +669,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
if (!ubi->bad_allowed)
return 0;
- err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
return err;
@@ -1357,7 +1356,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
if (err && !mtd_is_bitflip(err))
goto out_free;
@@ -1421,7 +1420,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && !mtd_is_bitflip(err)) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 1a35fc5e3b40..9fdb35367fe0 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -714,9 +714,7 @@ int ubi_sync(int ubi_num)
if (!ubi)
return -ENODEV;
- if (ubi->mtd->sync)
- ubi->mtd->sync(ubi->mtd);
-
+ mtd_sync(ubi->mtd);
ubi_put_device(ubi);
return 0;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dc64c767fd21..d51d75d34446 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -120,6 +120,7 @@ enum {
* PEB
* MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
* target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
*/
enum {
MOVE_CANCEL_RACE = 1,
@@ -127,6 +128,7 @@ enum {
MOVE_TARGET_RD_ERR,
MOVE_TARGET_WR_ERR,
MOVE_CANCEL_BITFLIPS,
+ MOVE_RETRY,
};
/**
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 9ad18da1891d..17cec0c01544 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
int copy, void *vtbl)
{
int err, tries = 0;
- static struct ubi_vid_hdr *vid_hdr;
+ struct ubi_vid_hdr *vid_hdr;
struct ubi_scan_leb *new_seb;
ubi_msg("create volume table (copy #%d)", copy + 1);
@@ -322,7 +322,7 @@ retry:
goto out_free;
}
- vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
@@ -632,7 +632,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
return -ENOMEM;
vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
- vol->alignment = 1;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
vol->vol_type = UBI_DYNAMIC_VOLUME;
vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 42c684cf3688..0696e36b0539 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
protect = 1;
goto out_not_moved;
}
-
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) {
/*
@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
}
return err;
- } else if (err != -EIO) {
+ }
+
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
- }
/* It is %-EIO, the PEB went bad */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 654a5e94e0e7..b98285446a5a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -125,6 +125,8 @@ config IFB
'ifb1' etc.
Look at the iproute2 documentation directory for usage etc
+source "drivers/net/team/Kconfig"
+
config MACVLAN
tristate "MAC-VLAN support (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -241,6 +243,8 @@ source "drivers/atm/Kconfig"
source "drivers/net/caif/Kconfig"
+source "drivers/net/dsa/Kconfig"
+
source "drivers/net/ethernet/Kconfig"
source "drivers/net/fddi/Kconfig"
@@ -338,4 +342,6 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+source "drivers/net/hyperv/Kconfig"
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa877cd2b139..a6b8ce11a22f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
@@ -66,3 +68,5 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
+
+obj-$(CONFIG_HYPERV_NET) += hyperv/
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff6..84fb6349a59a 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
menuconfig ARCNET
depends on NETDEVICES && (ISA || PCI || PCMCIA)
- bool "ARCnet support"
+ tristate "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 106b88a04738..342626f4bc46 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -99,16 +99,26 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
/*********************** tlb specific functions ***************************/
-static inline void _lock_tx_hashtbl(struct bonding *bond)
+static inline void _lock_tx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
-static inline void _unlock_tx_hashtbl(struct bonding *bond)
+static inline void _unlock_tx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
+static inline void _lock_tx_hashtbl(struct bonding *bond)
+{
+ spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
+}
+
+static inline void _unlock_tx_hashtbl(struct bonding *bond)
+{
+ spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
+}
+
/* Caller must hold tx_hashtbl lock */
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
{
@@ -129,14 +139,13 @@ static inline void tlb_init_slave(struct slave *slave)
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
}
-/* Caller must hold bond lock for read */
-static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
+/* Caller must hold bond lock for read, BH disabled */
+static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
+ int save_load)
{
struct tlb_client_info *tx_hash_table;
u32 index;
- _lock_tx_hashtbl(bond);
-
/* clear slave from tx_hashtbl */
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
@@ -151,8 +160,15 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
}
tlb_init_slave(slave);
+}
- _unlock_tx_hashtbl(bond);
+/* Caller must hold bond lock for read */
+static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
+ int save_load)
+{
+ _lock_tx_hashtbl_bh(bond);
+ __tlb_clear_slave(bond, slave, save_load);
+ _unlock_tx_hashtbl_bh(bond);
}
/* Must be called before starting the monitor timer */
@@ -169,7 +185,7 @@ static int tlb_initialize(struct bonding *bond)
bond->dev->name);
return -1;
}
- _lock_tx_hashtbl(bond);
+ _lock_tx_hashtbl_bh(bond);
bond_info->tx_hashtbl = new_hashtbl;
@@ -177,7 +193,7 @@ static int tlb_initialize(struct bonding *bond)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
- _unlock_tx_hashtbl(bond);
+ _unlock_tx_hashtbl_bh(bond);
return 0;
}
@@ -187,12 +203,12 @@ static void tlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- _lock_tx_hashtbl(bond);
+ _lock_tx_hashtbl_bh(bond);
kfree(bond_info->tx_hashtbl);
bond_info->tx_hashtbl = NULL;
- _unlock_tx_hashtbl(bond);
+ _unlock_tx_hashtbl_bh(bond);
}
static long long compute_gap(struct slave *slave)
@@ -226,15 +242,13 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
return least_loaded;
}
-/* Caller must hold bond lock for read */
-static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
+static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
+ u32 skb_len)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct tlb_client_info *hash_table;
struct slave *assigned_slave;
- _lock_tx_hashtbl(bond);
-
hash_table = bond_info->tx_hashtbl;
assigned_slave = hash_table[hash_index].tx_slave;
if (!assigned_slave) {
@@ -263,22 +277,46 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3
hash_table[hash_index].tx_bytes += skb_len;
}
- _unlock_tx_hashtbl(bond);
-
return assigned_slave;
}
+/* Caller must hold bond lock for read */
+static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
+ u32 skb_len)
+{
+ struct slave *tx_slave;
+ /*
+ * We don't need to disable softirq here, becase
+ * tlb_choose_channel() is only called by bond_alb_xmit()
+ * which already has softirq disabled.
+ */
+ _lock_tx_hashtbl(bond);
+ tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
+ _unlock_tx_hashtbl(bond);
+ return tx_slave;
+}
+
/*********************** rlb specific functions ***************************/
-static inline void _lock_rx_hashtbl(struct bonding *bond)
+static inline void _lock_rx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
-static inline void _unlock_rx_hashtbl(struct bonding *bond)
+static inline void _unlock_rx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
+static inline void _lock_rx_hashtbl(struct bonding *bond)
+{
+ spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+}
+
+static inline void _unlock_rx_hashtbl(struct bonding *bond)
+{
+ spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+}
+
/* when an ARP REPLY is received from a client update its info
* in the rx_hashtbl
*/
@@ -288,7 +326,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
struct rlb_client_info *client_info;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
@@ -303,7 +341,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
bond_info->rx_ntt = 1;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
@@ -401,7 +439,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
u32 index, next_index;
/* clear slave from rx_hashtbl */
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
rx_hash_table = bond_info->rx_hashtbl;
index = bond_info->rx_hashtbl_head;
@@ -432,7 +470,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
}
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
write_lock_bh(&bond->curr_slave_lock);
@@ -489,7 +527,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
struct rlb_client_info *client_info;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
@@ -507,7 +545,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
*/
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/* The slave was assigned a new mac address - update the clients */
@@ -518,7 +556,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
int ntt = 0;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
@@ -538,7 +576,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/* mark all clients using src_ip to be updated */
@@ -709,7 +747,7 @@ static void rlb_rebalance(struct bonding *bond)
int ntt;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
ntt = 0;
hash_index = bond_info->rx_hashtbl_head;
@@ -727,7 +765,7 @@ static void rlb_rebalance(struct bonding *bond)
if (ntt) {
bond_info->rx_ntt = 1;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/* Caller must hold rx_hashtbl lock */
@@ -751,7 +789,7 @@ static int rlb_initialize(struct bonding *bond)
bond->dev->name);
return -1;
}
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
bond_info->rx_hashtbl = new_hashtbl;
@@ -761,7 +799,7 @@ static int rlb_initialize(struct bonding *bond)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
/* register to receive ARPs */
bond->recv_probe = rlb_arp_recv;
@@ -773,13 +811,13 @@ static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
@@ -787,7 +825,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 curr_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
curr_index = bond_info->rx_hashtbl_head;
while (curr_index != RLB_NULL_INDEX) {
@@ -812,7 +850,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
curr_index = next_index;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/*********************** tlb/rlb shared functions *********************/
@@ -1320,7 +1358,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
} else {
if (tx_slave) {
- tlb_clear_slave(bond, tx_slave, 0);
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
}
}
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644
index 027a0ee7d85b..000000000000
--- a/drivers/net/bonding/bond_ipv6.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
- struct inet6_dev *idev;
-
- if (!dev)
- return;
-
- idev = in6_dev_get(dev);
- if (!idev)
- return;
-
- read_lock_bh(&idev->lock);
- if (!list_empty(&idev->addr_list)) {
- struct inet6_ifaddr *ifa
- = list_first_entry(&idev->addr_list,
- struct inet6_ifaddr, if_list);
- ipv6_addr_copy(addr, &ifa->addr);
- } else
- ipv6_addr_set(addr, 0, 0, 0, 0);
-
- read_unlock_bh(&idev->lock);
-
- in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
- struct in6_addr *daddr,
- int router,
- unsigned short vlan_id)
-{
- struct in6_addr mcaddr;
- struct icmp6hdr icmp6h = {
- .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
- };
- struct sk_buff *skb;
-
- icmp6h.icmp6_router = router;
- icmp6h.icmp6_solicited = 0;
- icmp6h.icmp6_override = 1;
-
- addrconf_addr_solict_mult(daddr, &mcaddr);
-
- pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
- slave_dev->name, &mcaddr, daddr);
-
- skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
- ND_OPT_TARGET_LL_ADDR);
-
- if (!skb) {
- pr_err("NA packet allocation failed\n");
- return;
- }
-
- if (vlan_id) {
- /* The Ethernet header is not present yet, so it is
- * too early to insert a VLAN tag. Force use of an
- * out-of-line tag here and let dev_hard_start_xmit()
- * insert it if the slave hardware can't.
- */
- skb = __vlan_hwaccel_put_tag(skb, vlan_id);
- if (!skb) {
- pr_err("failed to insert VLAN tag\n");
- return;
- }
- }
-
- ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master. This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
- struct slave *slave = bond->curr_active_slave;
- struct vlan_entry *vlan;
- struct inet6_dev *idev;
- int is_router;
-
- pr_debug("%s: bond %s slave %s\n", bond->dev->name,
- __func__, slave ? slave->dev->name : "NULL");
-
- if (!slave || !bond->send_unsol_na ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
- return;
-
- bond->send_unsol_na--;
-
- idev = in6_dev_get(bond->dev);
- if (!idev)
- return;
-
- is_router = !!idev->cnf.forwarding;
-
- in6_dev_put(idev);
-
- if (!ipv6_addr_any(&bond->master_ipv6))
- bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
- bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
- vlan->vlan_id);
- }
- }
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- struct inet6_ifaddr *ifa = ptr;
- struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
- struct bonding *bond;
- struct vlan_entry *vlan;
- struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
- if (bond->dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&bond->master_ipv6))
- ipv6_addr_copy(&bond->master_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&bond->master_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(bond->dev,
- &bond->master_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&vlan->vlan_ipv6))
- ipv6_addr_copy(&vlan->vlan_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&vlan->vlan_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(vlan_dev,
- &vlan->vlan_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
- .notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
- register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
- unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c577256487..435984ad8b2f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
* @bond_dev: bonding net device that got called
* @vid: vlan id being added
*/
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *stop_at;
int i, res;
bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_add_vid) {
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
- }
+ res = vlan_vid_add(slave->dev, vid);
+ if (res)
+ goto unwind;
}
res = bond_add_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
+
+unwind:
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ vlan_vid_del(slave->dev, vid);
+
+ return res;
}
/**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
* @bond_dev: bonding net device that got called
* @vid: vlan id being removed
*/
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int i, res;
- bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_kill_vid) {
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
- }
- }
+ bond_for_each_slave(bond, slave, i)
+ vlan_vid_del(slave->dev, vid);
res = bond_del_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
}
static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
{
struct vlan_entry *vlan;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_add_vid))
- return;
+ int res;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ res = vlan_vid_add(slave_dev, vlan->vlan_id);
+ if (res)
+ pr_warning("%s: Failed to add vlan id %d to device %s\n",
+ bond->dev->name, vlan->vlan_id,
+ slave_dev->name);
+ }
}
static void bond_del_vlans_from_slave(struct bonding *bond,
struct net_device *slave_dev)
{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_kill_vid))
- return;
-
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id)
continue;
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+ vlan_vid_del(slave_dev, vlan->vlan_id);
}
}
@@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0;
}
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct slave *slave;
struct bonding *bond = netdev_priv(dev);
- u32 mask;
+ netdev_features_t mask;
int i;
read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- u32 vlan_features = BOND_VLAN_FEATURES;
+ netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
@@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
"but new slave device does not support netpoll.\n",
bond_dev->name);
res = -EBUSY;
- goto err_close;
+ goto err_detach;
}
}
#endif
@@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = bond_create_slave_symlinks(bond_dev, slave_dev);
if (res)
- goto err_close;
+ goto err_detach;
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
@@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_dest_symlinks:
bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_detach:
+ write_lock_bh(&bond->lock);
+ bond_detach_slave(bond, new_slave);
+ write_unlock_bh(&bond->lock);
+
err_close:
dev_close(slave_dev);
@@ -1897,7 +1902,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
- u32 old_features = bond_dev->features;
+ netdev_features_t old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2553,30 +2558,6 @@ re_arm:
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
@@ -3322,6 +3303,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
@@ -3329,7 +3314,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -3345,8 +3330,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -4360,7 +4344,7 @@ static void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
bond_dev->features |= bond_dev->hw_features;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4ef7e2fd9fe6..aef42f045320 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
-#include <linux/sysdev.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 073352517adc..0a4fc62a381d 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
dev_dbg(&cfhsi->ndev->dev, "%s.\n",
__func__);
-
- ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
- if (ret) {
- dev_warn(&cfhsi->ndev->dev,
- "%s: can't wake up HSI interface: %d.\n",
- __func__, ret);
- return ret;
- }
-
do {
ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
&fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
}
} while (1);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
return ret;
}
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Create HSI frame. */
len = cfhsi_tx_frm(desc, cfhsi);
- BUG_ON(!len);
+ WARN_ON(!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 23406e62c0b0..8a3054b84812 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF);
/*This list is protected by the rtnl lock. */
static LIST_HEAD(ser_list);
-static int ser_loop;
+static bool ser_loop;
module_param(ser_loop, bool, S_IRUGO);
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
module_param(ser_use_stx, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
module_param(ser_use_fcs, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
skb_pull(skb, tty_wr);
if (skb->len == 0) {
struct sk_buff *tmp = skb_dequeue(&ser->head);
- BUG_ON(tmp != skb);
+ WARN_ON(tmp != skb);
if (in_interrupt())
dev_kfree_skb_irq(skb);
else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
ser = tty->disc_data;
BUG_ON(ser == NULL);
- BUG_ON(ser->tty != tty);
+ WARN_ON(ser->tty != tty);
handle_tx(ser);
}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index d4b26fb24ed9..5b2041319a32 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_ON);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
list_entry(pshm_drv->rx_full_list.next, struct buf_list,
list);
list_del_init(&pbuf->list);
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Retrieve pointer to start of the packet descriptor area. */
pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
/* Get a suitable CAIF packet and copy in data. */
skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
frm_pck_len + 1);
- BUG_ON(skb == NULL);
+
+ if (skb == NULL) {
+ pr_info("OOM: Try next frame in descriptor\n");
+ break;
+ }
p = skb_put(skb, frm_pck_len);
memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
pck_desc++;
}
+ spin_lock_irqsave(&pshm_drv->lock, flags);
list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
if (skb == NULL)
goto send_msg;
-
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_OFF);
+ spin_lock_irqsave(&pshm_drv->lock, flags);
}
/*
* We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
}
skb = skb_dequeue(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
/* Copy in CAIF frame. */
skb_copy_bits(skb, 0, pbuf->desc_vptr +
pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
frmlen;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
/* Fill in the shared memory packet descriptor area. */
pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
{
struct shmdrv_layer *pshm_drv;
- unsigned long flags = 0;
pshm_drv = netdev_priv(shm_netdev);
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
skb_queue_tail(&pshm_drv->sk_qhead, skb);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
/* Schedule Tx work queue. for deferred processing of skbs*/
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
(NR_TX_BUF * TX_BUF_SZ);
+ spin_lock_init(&pshm_drv->lock);
INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
else
tx_buf->desc_vptr =
ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
rx_buf->len = RX_BUF_SZ;
if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
else
rx_buf->desc_vptr =
ioremap(rx_buf->phy_addr, RX_BUF_SZ);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 05e791f46aef..96391c36fa74 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver");
/* Returns the number of padding bytes for alignment. */
#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
-static int spi_loop;
+static bool spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_tx,
+ cfspi->xfer.va_tx[0],
(cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
netif_stop_queue(dev);
return 0;
}
-static const struct net_device_ops cfspi_ops = {
- .ndo_open = cfspi_open,
- .ndo_stop = cfspi_close,
- .ndo_start_xmit = cfspi_xmit
-};
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
{
+ int res = 0;
struct cfspi *cfspi = netdev_priv(dev);
- dev->features = 0;
- dev->netdev_ops = &cfspi_ops;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->tx_queue_len = 0;
- dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->destructor = free_netdev;
- skb_queue_head_init(&cfspi->qhead);
- skb_queue_head_init(&cfspi->chead);
- cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfspi->cfdev.use_frag = false;
- cfspi->cfdev.use_stx = false;
- cfspi->cfdev.use_fcs = false;
- cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
- struct cfspi *cfspi = NULL;
- struct net_device *ndev;
- struct cfspi_dev *dev;
- int res;
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
- ndev = alloc_netdev(sizeof(struct cfspi),
- "cfspi%d", cfspi_setup);
- if (!ndev)
- return -ENOMEM;
-
- cfspi = netdev_priv(ndev);
- netif_stop_queue(ndev);
- cfspi->ndev = ndev;
- cfspi->pdev = pdev;
/* Set flow info. */
cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
cfspi->slave_talked = false;
}
- /* Assign the SPI device. */
- cfspi->dev = dev;
- /* Assign the device ifc to this SPI interface. */
- dev->ifc = &cfspi->ifc;
-
/* Allocate DMA buffers. */
- cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
- if (!cfspi->xfer.va_tx) {
+ cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+ if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
- goto err_dma_alloc_tx;
+ goto err_dma_alloc_tx_0;
}
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
/* Schedule the work queue. */
queue_work(cfspi->wq, &cfspi->work);
+ return 0;
+
+ err_create_wq:
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+ return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+
+ /* Remove from list. */
+ spin_lock(&cfspi_list_lock);
+ list_del(&cfspi->list);
+ spin_unlock(&cfspi_list_lock);
+
+ cfspi->ndev = NULL;
+ /* Free DMA buffers. */
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ set_bit(SPI_TERMINATE, &cfspi->state);
+ wake_up_interruptible(&cfspi->wait);
+ destroy_workqueue(cfspi->wq);
+ /* Destroy debugfs directory and files. */
+ dev_debugfs_rem(cfspi);
+ return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+ .ndo_open = cfspi_open,
+ .ndo_stop = cfspi_close,
+ .ndo_init = cfspi_init,
+ .ndo_uninit = cfspi_uninit,
+ .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &cfspi_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+ dev->tx_queue_len = 0;
+ dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&cfspi->qhead);
+ skb_queue_head_init(&cfspi->chead);
+ cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+ cfspi->cfdev.use_frag = false;
+ cfspi->cfdev.use_stx = false;
+ cfspi->cfdev.use_fcs = false;
+ cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+ struct cfspi *cfspi = NULL;
+ struct net_device *ndev;
+ struct cfspi_dev *dev;
+ int res;
+ dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+ ndev = alloc_netdev(sizeof(struct cfspi),
+ "cfspi%d", cfspi_setup);
+ if (!dev)
+ return -ENODEV;
+
+ cfspi = netdev_priv(ndev);
+ netif_stop_queue(ndev);
+ cfspi->ndev = ndev;
+ cfspi->pdev = pdev;
+
+ /* Assign the SPI device. */
+ cfspi->dev = dev;
+ /* Assign the device ifc to this SPI interface. */
+ dev->ifc = &cfspi->ifc;
+
/* Register network device. */
res = register_netdev(ndev);
if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
return res;
err_net_reg:
- dev_debugfs_rem(cfspi);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- err_create_wq:
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
free_netdev(ndev);
return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
int cfspi_spi_remove(struct platform_device *pdev)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfspi *cfspi = NULL;
- struct cfspi_dev *dev;
-
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
- spin_lock(&cfspi_list_lock);
- list_for_each_safe(list_node, n, &cfspi_list) {
- cfspi = list_entry(list_node, struct cfspi, list);
- /* Find the corresponding device. */
- if (cfspi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- /* Free DMA buffers. */
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- /* Destroy debugfs directory and files. */
- dev_debugfs_rem(cfspi);
- unregister_netdev(cfspi->ndev);
- spin_unlock(&cfspi_list_lock);
- return 0;
- }
- }
- spin_unlock(&cfspi_list_lock);
- return -ENODEV;
+ /* Everything is done in cfspi_uninit(). */
+ return 0;
}
static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
list_for_each_safe(list_node, n, &cfspi_list) {
cfspi = list_entry(list_node, struct cfspi, list);
- platform_device_unregister(cfspi->pdev);
+ unregister_netdev(cfspi->ndev);
}
/* Destroy sysfs files. */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f6c98fb4a517..ab45758c49a4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/c_can/Kconfig"
+source "drivers/net/can/cc770/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 24ebfe8d758a..938be37b670c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,6 +14,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 044ea0647b04..6ea905c2cf6d 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = {
.id_table = at91_can_id_table,
};
-static int __init at91_can_module_init(void)
-{
- return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
- platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a1c5abc38cd2..349e0fabb63a 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = {
},
};
-static int __init bfin_can_init(void)
-{
- return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
- platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 0b5c6f8bdd34..5e1a5ff6476e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = {
.remove = __devexit_p(c_can_plat_remove),
};
-static int __init c_can_plat_init(void)
-{
- return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
- platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644
index 000000000000..22c07a8c8b43
--- /dev/null
+++ b/drivers/net/can/cc770/Kconfig
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+ tristate "Bosch CC770 and Intel AN82527 devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+ tristate "ISA Bus based legacy CC770 driver"
+ ---help---
+ This driver adds legacy support for CC770 and AN82527 chips
+ connected to the ISA bus using I/O port, memory mapped or
+ indirect access.
+
+config CAN_CC770_PLATFORM
+ tristate "Generic Platform Bus based CC770 driver"
+ ---help---
+ This driver adds support for the CC770 and AN82527 chips
+ connected to the "platform bus" (Linux abstraction for directly
+ to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644
index 000000000000..9fb8321b33eb
--- /dev/null
+++ b/drivers/net/can/cc770/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644
index 000000000000..766896747643
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.c
@@ -0,0 +1,881 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+ "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+ "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+ [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+ [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+ [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+ CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+ if (intid == 2)
+ return 0;
+ else
+ return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 msgcfg;
+ unsigned char obj_flags;
+ unsigned int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ obj_flags = priv->obj_flags[o];
+ mo = obj2msgobj(o);
+
+ if (obj_flags & CC770_OBJ_FLAG_RX) {
+ /*
+ * We don't need extra objects for RTR and EFF if
+ * the additional CC770 functions are enabled.
+ */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (o > 0)
+ continue;
+ netdev_dbg(dev, "Message object %d for "
+ "RX data, RTR, SFF and EFF\n", mo);
+ } else {
+ netdev_dbg(dev,
+ "Message object %d for RX %s %s\n",
+ mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+ "RTR" : "data",
+ obj_flags & CC770_OBJ_FLAG_EFF ?
+ "EFF" : "SFF");
+ }
+
+ if (obj_flags & CC770_OBJ_FLAG_EFF)
+ msgcfg = MSGCFG_XTD;
+ else
+ msgcfg = 0;
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ msgcfg |= MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ else
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ } else {
+ netdev_dbg(dev, "Message object %d for "
+ "TX data, RTR, SFF and EFF\n", mo);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+ int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ mo = obj2msgobj(o);
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+ if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+ continue;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ } else {
+ /* Clear message object for send */
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration and puts chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Disable all used message objects */
+ disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register and pre-set last error code */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ /* Enable all used message objects*/
+ enable_all_objs(dev);
+
+ /*
+ * Clear bus-off, interrupts only for errors,
+ * not for status change
+ */
+ cc770_write_reg(priv, control, priv->control_normal_mode);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+ int mo, id, data;
+
+ /* Enable configuration and put chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+ /* Set CLKOUT divider and slew rates */
+ cc770_write_reg(priv, clkout, priv->clkout);
+
+ /* Configure CPU interface / CLKOUT enable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /* Set bus configuration */
+ cc770_write_reg(priv, bus_config, priv->bus_config);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Clear and invalidate message objects */
+ for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_UNC | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_RES | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ for (data = 0; data < 8; data++)
+ cc770_write_reg(priv, msgobj[mo].data[data], 0);
+ for (id = 0; id < 4; id++)
+ cc770_write_reg(priv, msgobj[mo].id[id], 0);
+ cc770_write_reg(priv, msgobj[mo].config, 0);
+ }
+
+ /* Set all global ID masks to "don't care" */
+ cc770_write_reg(priv, global_mask_std[0], 0);
+ cc770_write_reg(priv, global_mask_std[1], 0);
+ cc770_write_reg(priv, global_mask_ext[0], 0);
+ cc770_write_reg(priv, global_mask_ext[1], 0);
+ cc770_write_reg(priv, global_mask_ext[2], 0);
+ cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration, put chip in bus-off, disable ints */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+ /* Configure cpu interface / CLKOUT disable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /*
+ * Check if hardware reset is still inactive or maybe there
+ * is no chip in this address space
+ */
+ if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+ netdev_info(dev, "probing @0x%p failed (reset)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Write and read back test pattern (some arbitrary values) */
+ cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+ cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+ cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+ if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+ (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+ (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+ netdev_info(dev, "probing @0x%p failed (pattern)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Check if this chip is a CC770 supporting additional functions */
+ if (cc770_read_reg(priv, control) & CTRL_EAF)
+ priv->control_normal_mode |= CTRL_EAF;
+
+ return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* leave reset mode */
+ if (priv->can.state != CAN_STATE_STOPPED)
+ set_reset_mode(dev);
+
+ /* leave reset mode */
+ set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ cc770_start(dev);
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+ cc770_write_reg(priv, bit_timing_0, btr0);
+ cc770_write_reg(priv, bit_timing_1, btr1);
+
+ return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ bec->txerr = cc770_read_reg(priv, tx_error_counter);
+ bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+ return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ u8 dlc, rtr;
+ u32 id;
+ int i;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ netif_stop_queue(dev);
+
+ dlc = cf->can_dlc;
+ id = cf->can_id;
+ if (cf->can_id & CAN_RTR_FLAG)
+ rtr = 0;
+ else
+ rtr = MSGCFG_DIR;
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+ if (id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config,
+ (dlc << 4) | rtr | MSGCFG_XTD);
+ cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+ cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+ cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+ } else {
+ id &= CAN_SFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+ cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+ }
+
+ for (i = 0; i < dlc; i++)
+ cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+ stats->tx_bytes += dlc;
+
+ can_put_echo_skb(skb, dev, 0);
+
+ /*
+ * HM: We had some cases of repeated IRQs so make sure the
+ * INT is acknowledged I know it's already further up, but
+ * doing again fixed the issue
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 config;
+ u32 id;
+ int i;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (!skb)
+ return;
+
+ config = cc770_read_reg(priv, msgobj[mo].config);
+
+ if (ctrl1 & RMTPND_SET) {
+ /*
+ * Unfortunately, the chip does not store the real message
+ * identifier of the received remote transmission request
+ * frame. Therefore we set it to 0.
+ */
+ cf->can_id = CAN_RTR_FLAG;
+ if (config & MSGCFG_XTD)
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_dlc = 0;
+ } else {
+ if (config & MSGCFG_XTD) {
+ id = cc770_read_reg(priv, msgobj[mo].id[3]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+ id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+ id >>= 3;
+ id |= CAN_EFF_FLAG;
+ } else {
+ id = cc770_read_reg(priv, msgobj[mo].id[1]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+ id >>= 5;
+ }
+
+ cf->can_id = id;
+ cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+ }
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 lec;
+
+ netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Use extended functions of the CC770 */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+ cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+ }
+
+ if (status & STAT_BOFF) {
+ /* Disable interrupts */
+ cc770_write_reg(priv, control, CTRL_INI);
+ cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(dev);
+ } else if (status & STAT_WARN) {
+ cf->can_id |= CAN_ERR_CRTL;
+ /* Only the CC770 does show error passive */
+ if (cf->data[7] > 127) {
+ cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+ CAN_ERR_CRTL_TX_PASSIVE;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ } else {
+ cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+ CAN_ERR_CRTL_TX_WARNING;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ }
+ } else {
+ /* Back to error avtive */
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ lec = status & STAT_LEC_MASK;
+ if (lec < 7 && lec > 0) {
+ if (lec == STAT_LEC_ACK) {
+ cf->can_id |= CAN_ERR_ACK;
+ } else {
+ cf->can_id |= CAN_ERR_PROT;
+ switch (lec) {
+ case STAT_LEC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case STAT_LEC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case STAT_LEC_BIT1:
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case STAT_LEC_BIT0:
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case STAT_LEC_CRC:
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+ }
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 status;
+
+ status = cc770_read_reg(priv, status);
+ /* Reset the status register including RXOK and TXOK */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ if (status & (STAT_WARN | STAT_BOFF) ||
+ (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+ cc770_err(dev, status);
+ return status & STAT_BOFF;
+ }
+
+ return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+ if (!(ctrl1 & NEWDAT_SET)) {
+ /* Check for RTR if additional functions are enabled */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+ INTPND_SET))
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (ctrl1 & MSGLST_SET) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+ if (mo < MSGOBJ_LAST)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_UNC | RMTPND_UNC);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl0, ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+ if (!(ctrl0 & INTPND_SET))
+ break;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+
+ /* Nothing more to send, switch off interrupts */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+ /*
+ * We had some cases of repeated IRQ so make sure the
+ * INT is acknowledged
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
+ netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 intid;
+ int o, n = 0;
+
+ /* Shared interrupts and IRQ off? */
+ if (priv->can.state == CAN_STATE_STOPPED)
+ return IRQ_NONE;
+
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
+ while (n < CC770_MAX_IRQ) {
+ /* Read the highest pending interrupt request */
+ intid = cc770_read_reg(priv, interrupt);
+ if (!intid)
+ break;
+ n++;
+
+ if (intid == 1) {
+ /* Exit in case of bus-off */
+ if (cc770_status_interrupt(dev))
+ break;
+ } else {
+ o = intid2obj(intid);
+
+ if (o >= CC770_OBJ_MAX) {
+ netdev_err(dev, "Unexpected interrupt id %d\n",
+ intid);
+ continue;
+ }
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+ cc770_rtr_interrupt(dev, o);
+ else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+ cc770_rx_interrupt(dev, o);
+ else
+ cc770_tx_interrupt(dev, o);
+ }
+ }
+
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+ if (n >= CC770_MAX_IRQ)
+ netdev_dbg(dev, "%d messages handled in ISR", n);
+
+ return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* set chip into reset mode */
+ set_reset_mode(dev);
+
+ /* common open */
+ err = open_candev(dev);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+ dev->name, dev);
+ if (err) {
+ close_candev(dev);
+ return -EAGAIN;
+ }
+
+ /* init and start chip */
+ cc770_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ set_reset_mode(dev);
+
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+
+ dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+ CC770_ECHO_SKB_MAX);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &cc770_bittiming_const;
+ priv->can.do_set_bittiming = cc770_set_bittiming;
+ priv->can.do_set_mode = cc770_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+ if (sizeof_priv)
+ priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+ .ndo_open = cc770_open,
+ .ndo_stop = cc770_close,
+ .ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = cc770_probe_chip(dev);
+ if (err)
+ return err;
+
+ dev->netdev_ops = &cc770_netdev_ops;
+
+ dev->flags |= IFF_ECHO; /* we support local echo */
+
+ /* Should we use additional functions? */
+ if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+ priv->can.do_get_berr_counter = cc770_get_berr_counter;
+ priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+ netdev_dbg(dev, "i82527 mode with additional functions\n");
+ } else {
+ priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+ netdev_dbg(dev, "strict i82527 compatibility mode\n");
+ }
+
+ chipset_init(priv);
+ set_reset_mode(dev);
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+ set_reset_mode(dev);
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+ if (msgobj15_eff) {
+ cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+ cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+ }
+
+ pr_info("CAN netdevice driver\n");
+
+ return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+ pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644
index 000000000000..a1739db98d91
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.h
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+ u8 ctrl0;
+ u8 ctrl1;
+ u8 id[4];
+ u8 config;
+ u8 data[8];
+ u8 dontuse; /* padding */
+} __packed;
+
+struct cc770_regs {
+ union {
+ struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+ struct {
+ u8 control; /* Control Register */
+ u8 status; /* Status Register */
+ u8 cpu_interface; /* CPU Interface Register */
+ u8 dontuse1;
+ u8 high_speed_read[2]; /* High Speed Read */
+ u8 global_mask_std[2]; /* Standard Global Mask */
+ u8 global_mask_ext[4]; /* Extended Global Mask */
+ u8 msg15_mask[4]; /* Message 15 Mask */
+ u8 dontuse2[15];
+ u8 clkout; /* Clock Out Register */
+ u8 dontuse3[15];
+ u8 bus_config; /* Bus Configuration Register */
+ u8 dontuse4[15];
+ u8 bit_timing_0; /* Bit Timing Register byte 0 */
+ u8 dontuse5[15];
+ u8 bit_timing_1; /* Bit Timing Register byte 1 */
+ u8 dontuse6[15];
+ u8 interrupt; /* Interrupt Register */
+ u8 dontuse7[15];
+ u8 rx_error_counter; /* Receive Error Counter */
+ u8 dontuse8[15];
+ u8 tx_error_counter; /* Transmit Error Counter */
+ u8 dontuse9[31];
+ u8 p1_conf;
+ u8 dontuse10[15];
+ u8 p2_conf;
+ u8 dontuse11[15];
+ u8 p1_in;
+ u8 dontuse12[15];
+ u8 p2_in;
+ u8 dontuse13[15];
+ u8 p1_out;
+ u8 dontuse14[15];
+ u8 p2_out;
+ u8 dontuse15[15];
+ u8 serial_reset_addr;
+ };
+ };
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI 0x01 /* Initialization */
+#define CTRL_IE 0x02 /* Interrupt Enable */
+#define CTRL_SIE 0x04 /* Status Interrupt Enable */
+#define CTRL_EIE 0x08 /* Error Interrupt Enable */
+#define CTRL_EAF 0x20 /* Enable additional functions */
+#define CTRL_CCE 0x40 /* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF 0x01 /* Stuff error */
+#define STAT_LEC_FORM 0x02 /* Form error */
+#define STAT_LEC_ACK 0x03 /* Acknowledgement error */
+#define STAT_LEC_BIT1 0x04 /* Bit1 error */
+#define STAT_LEC_BIT0 0x05 /* Bit0 error */
+#define STAT_LEC_CRC 0x06 /* CRC error */
+#define STAT_LEC_MASK 0x07 /* Last Error Code mask */
+#define STAT_TXOK 0x08 /* Transmit Message Successfully */
+#define STAT_RXOK 0x10 /* Receive Message Successfully */
+#define STAT_WAKE 0x20 /* Wake Up Status */
+#define STAT_WARN 0x40 /* Warning Status */
+#define STAT_BOFF 0x80 /* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES 0x01 /* No Interrupt pending */
+#define INTPND_SET 0x02 /* Interrupt pending */
+#define INTPND_UNC 0x03
+#define RXIE_RES 0x04 /* Receive Interrupt Disable */
+#define RXIE_SET 0x08 /* Receive Interrupt Enable */
+#define RXIE_UNC 0x0c
+#define TXIE_RES 0x10 /* Transmit Interrupt Disable */
+#define TXIE_SET 0x20 /* Transmit Interrupt Enable */
+#define TXIE_UNC 0x30
+#define MSGVAL_RES 0x40 /* Message Invalid */
+#define MSGVAL_SET 0x80 /* Message Valid */
+#define MSGVAL_UNC 0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES 0x01 /* No New Data */
+#define NEWDAT_SET 0x02 /* New Data */
+#define NEWDAT_UNC 0x03
+#define MSGLST_RES 0x04 /* No Message Lost */
+#define MSGLST_SET 0x08 /* Message Lost */
+#define MSGLST_UNC 0x0c
+#define CPUUPD_RES 0x04 /* No CPU Updating */
+#define CPUUPD_SET 0x08 /* CPU Updating */
+#define CPUUPD_UNC 0x0c
+#define TXRQST_RES 0x10 /* No Transmission Request */
+#define TXRQST_SET 0x20 /* Transmission Request */
+#define TXRQST_UNC 0x30
+#define RMTPND_RES 0x40 /* No Remote Request Pending */
+#define RMTPND_SET 0x80 /* Remote Request Pending */
+#define RMTPND_UNC 0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD 0x04 /* Extended Identifier */
+#define MSGCFG_DIR 0x08 /* Direction is Transmit */
+
+#define MSGOBJ_FIRST 1
+#define MSGOBJ_LAST 15
+
+#define CC770_IO_SIZE 0x100
+#define CC770_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG 4 /* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX 1
+
+#define cc770_read_reg(priv, member) \
+ priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value) \
+ priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX 0x01
+#define CC770_OBJ_FLAG_RTR 0x02
+#define CC770_OBJ_FLAG_EFF 0x04
+
+enum {
+ CC770_OBJ_RX0 = 0, /* for receiving normal messages */
+ CC770_OBJ_RX1, /* for receiving normal messages */
+ CC770_OBJ_RX_RTR0, /* for receiving remote transmission requests */
+ CC770_OBJ_RX_RTR1, /* for receiving remote transmission requests */
+ CC770_OBJ_TX, /* for sending messages */
+ CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o) (MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+ struct can_priv can; /* must be the first member */
+ struct sk_buff *echo_skb;
+
+ /* the lower-layer is responsible for appropriate locking */
+ u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+ void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+ void (*pre_irq)(const struct cc770_priv *priv);
+ void (*post_irq)(const struct cc770_priv *priv);
+
+ void *priv; /* for board-specific data */
+ struct net_device *dev;
+
+ void __iomem *reg_base; /* ioremap'ed address to registers */
+ unsigned long irq_flags; /* for request_irq() */
+
+ unsigned char obj_flags[CC770_OBJ_MAX];
+ u8 control_normal_mode; /* Control register for normal mode */
+ u8 cpu_interface; /* CPU interface register */
+ u8 clkout; /* Clock out register */
+ u8 bus_config; /* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644
index 000000000000..4be5fe2c40a5
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -0,0 +1,367 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ * clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ * cir: CPU interface register (default=0x40 [DSC])
+ * bcr: Bus configuration register (default=0x40 [CBY])
+ * cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT 16000000 /* 16 MHz */
+#define COR_DEFAULT 0x00
+#define BCR_DEFAULT BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+ "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE 0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+ int reg)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+
+ outb(reg, base);
+ outb(val, base + 1);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ void __iomem *base = NULL;
+ int iosize = CC770_IOSIZE;
+ int idx = pdev->id;
+ int err;
+ u32 clktmp;
+
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ if (mem[idx]) {
+ if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ base = ioremap_nocache(mem[idx], iosize);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ } else {
+ if (indirect[idx] > 0 ||
+ (indirect[idx] == -1 && indirect[0] > 0))
+ iosize = CC770_IOSIZE_INDIRECT;
+ if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap;
+ }
+ priv = netdev_priv(dev);
+
+ dev->irq = irq[idx];
+ priv->irq_flags = IRQF_SHARED;
+ if (mem[idx]) {
+ priv->reg_base = base;
+ dev->base_addr = mem[idx];
+ priv->read_reg = cc770_isa_mem_read_reg;
+ priv->write_reg = cc770_isa_mem_write_reg;
+ } else {
+ priv->reg_base = (void __iomem *)port[idx];
+ dev->base_addr = port[idx];
+
+ if (iosize == CC770_IOSIZE_INDIRECT) {
+ priv->read_reg = cc770_isa_port_read_reg_indirect;
+ priv->write_reg = cc770_isa_port_write_reg_indirect;
+ } else {
+ priv->read_reg = cc770_isa_port_read_reg;
+ priv->write_reg = cc770_isa_port_write_reg;
+ }
+ }
+
+ if (clk[idx])
+ clktmp = clk[idx];
+ else if (clk[0])
+ clktmp = clk[0];
+ else
+ clktmp = CLK_DEFAULT;
+ priv->can.clock.freq = clktmp;
+
+ if (cir[idx] != 0xff) {
+ priv->cpu_interface = cir[idx];
+ } else if (cir[0] != 0xff) {
+ priv->cpu_interface = cir[0];
+ } else {
+ /* The system clock may not exceed 10 MHz */
+ if (clktmp > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ clktmp /= 2;
+ }
+ /* The memory clock may not exceed 8 MHz */
+ if (clktmp > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+ }
+
+ if (priv->cpu_interface & CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+
+ if (bcr[idx] != 0xff)
+ priv->bus_config = bcr[idx];
+ else if (bcr[0] != 0xff)
+ priv->bus_config = bcr[0];
+ else
+ priv->bus_config = BCR_DEFAULT;
+
+ if (cor[idx] != 0xff)
+ priv->clkout = cor[idx];
+ else if (cor[0] != 0xff)
+ priv->clkout = cor[0];
+ else
+ priv->clkout = COR_DEFAULT;
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register device (err=%d)\n", err);
+ goto exit_unmap;
+ }
+
+ dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+ priv->reg_base, dev->irq);
+ return 0;
+
+ exit_unmap:
+ if (mem[idx])
+ iounmap(base);
+ exit_release:
+ if (mem[idx])
+ release_mem_region(mem[idx], iosize);
+ else
+ release_region(port[idx], iosize);
+ exit:
+ return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
+
+ unregister_cc770dev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (mem[idx]) {
+ iounmap(priv->reg_base);
+ release_mem_region(mem[idx], CC770_IOSIZE);
+ } else {
+ if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+ release_region(port[idx], CC770_IOSIZE_INDIRECT);
+ else
+ release_region(port[idx], CC770_IOSIZE);
+ }
+ free_cc770dev(dev);
+
+ return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+ .probe = cc770_isa_probe,
+ .remove = __devexit_p(cc770_isa_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cc770_isa_init(void)
+{
+ int idx, err;
+
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ cc770_isa_devs[idx] =
+ platform_device_alloc(KBUILD_MODNAME, idx);
+ if (!cc770_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(cc770_isa_devs[idx]);
+ if (err) {
+ platform_device_put(cc770_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("insufficient parameters supplied\n");
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&cc770_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+
+ return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+ int idx;
+
+ platform_driver_unregister(&cc770_isa_driver);
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644
index 000000000000..53115eee8075
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ * static struct cc770_platform_data myboard_cc770_pdata = {
+ * .osc_freq = 16000000,
+ * .cir = 0x41,
+ * .cor = 0x20,
+ * .bcr = 0x40,
+ * };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ * can@3,100 {
+ * compatible = "bosch,cc770";
+ * reg = <3 0x100 0x80>;
+ * interrupts = <2 0>;
+ * interrupt-parent = <&mpic>;
+ * bosch,external-clock-frequency = <16000000>;
+ * };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK 16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+ u8 val)
+{
+ iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const u32 *prop;
+ int prop_size;
+ u32 clkext;
+
+ prop = of_get_property(np, "bosch,external-clock-frequency",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ clkext = *prop;
+ else
+ clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+ priv->can.clock.freq = clkext;
+
+ /* The system clock may not exceed 10 MHz */
+ if (priv->can.clock.freq > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ priv->can.clock.freq /= 2;
+ }
+
+ /* The memory clock may not exceed 8 MHz */
+ if (priv->can.clock.freq > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+
+ if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ priv->cpu_interface |= CPUIF_DMC;
+ if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ priv->cpu_interface |= CPUIF_MUX;
+
+ if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ priv->bus_config |= BUSCFG_CBY;
+ if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ priv->bus_config |= BUSCFG_DR0;
+ if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ priv->bus_config |= BUSCFG_DR1;
+ if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ priv->bus_config |= BUSCFG_DT1;
+ if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ priv->bus_config |= BUSCFG_POL;
+
+ prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+ u32 cdv = clkext / *prop;
+ int slew;
+
+ if (cdv > 0 && cdv < 16) {
+ priv->cpu_interface |= CPUIF_CEN;
+ priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+ prop = of_get_property(np, "bosch,slew-rate",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32))) {
+ slew = *prop;
+ } else {
+ /* Determine default slew rate */
+ slew = (CLKOUT_SL_MASK >>
+ CLKOUT_SL_SHIFT) -
+ ((cdv * clkext - 1) / 8000000);
+ if (slew < 0)
+ slew = 0;
+ }
+ priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+ CLKOUT_SL_MASK;
+ } else {
+ dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+
+ struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+ priv->can.clock.freq = pdata->osc_freq;
+ if (priv->cpu_interface | CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+ priv->clkout = pdata->cor;
+ priv->bus_config = pdata->bcr;
+ priv->cpu_interface = pdata->cir;
+
+ return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ struct resource *mem;
+ resource_size_t mem_size;
+ void __iomem *base;
+ int err, irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq <= 0)
+ return -ENODEV;
+
+ mem_size = resource_size(mem);
+ if (!request_mem_region(mem->start, mem_size, pdev->name))
+ return -EBUSY;
+
+ base = ioremap(mem->start, mem_size);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap_mem;
+ }
+
+ dev->irq = irq;
+ priv = netdev_priv(dev);
+ priv->read_reg = cc770_platform_read_reg;
+ priv->write_reg = cc770_platform_write_reg;
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = base;
+
+ if (pdev->dev.of_node)
+ err = cc770_get_of_node_data(pdev, priv);
+ else if (pdev->dev.platform_data)
+ err = cc770_get_platform_data(pdev, priv);
+ else
+ err = -ENODEV;
+ if (err)
+ goto exit_free_cc770;
+
+ dev_dbg(&pdev->dev,
+ "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+ "bus_config=0x%02x clkout=0x%02x\n",
+ priv->reg_base, dev->irq, priv->can.clock.freq,
+ priv->cpu_interface, priv->bus_config, priv->clkout);
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register CC700 device (err=%d)\n", err);
+ goto exit_free_cc770;
+ }
+
+ return 0;
+
+exit_free_cc770:
+ free_cc770dev(dev);
+exit_unmap_mem:
+ iounmap(base);
+exit_release_mem:
+ release_mem_region(mem->start, mem_size);
+
+ return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_cc770dev(dev);
+ iounmap(priv->reg_base);
+ free_cc770dev(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+ {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+ {.compatible = "intc,82527"}, /* AN82527 from Intel CP */
+ {},
+};
+
+static struct platform_driver cc770_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cc770_platform_table,
+ },
+ .probe = cc770_platform_probe,
+ .remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 25695bde0549..120f1ab5a2ce 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index e02337953f41..7fd8089946fb 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -802,7 +802,7 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
err = open_candev(dev);
if (err)
@@ -824,7 +824,7 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
out:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return err;
}
@@ -838,7 +838,7 @@ static int flexcan_close(struct net_device *dev)
flexcan_chip_stop(dev);
free_irq(dev->irq, dev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
close_candev(dev);
@@ -877,7 +877,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -911,7 +911,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
out:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return err;
}
@@ -1060,20 +1060,7 @@ static struct platform_driver flexcan_driver = {
.remove = __devexit_p(flexcan_remove),
};
-static int __init flexcan_init(void)
-{
- pr_info("%s netdevice driver\n", DRV_NAME);
- return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
- platform_driver_unregister(&flexcan_driver);
- pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
"Marc Kleine-Budde <kernel@pengutronix.de>");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32778d56d330..08c893cb7896 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = {
.remove = __devexit_p(ican3_remove),
};
-static int __init ican3_init(void)
-{
- return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
- platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5fedc3375562..5caa572d71e3 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = {
#endif
};
-static int __init mpc5xxx_can_init(void)
-{
- return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
- platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index ec4a3119e2c9..1c82dd8b896e 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev)
priv->open_time = jiffies;
- clrbits8(&regs->canctl1, MSCAN_LISTEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setbits8(&regs->canctl1, MSCAN_LISTEN);
+ else
+ clrbits8(&regs->canctl1, MSCAN_LISTEN);
ret = mscan_start(dev);
if (ret)
@@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index fe9e64d476eb..36e9d594069d 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -6,7 +6,6 @@ if CAN_SJA1000
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
- depends on ISA
---help---
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a43..2c7f5036f570 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 496223e9e2fc..90c5c2dfd2fd 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
@@ -44,9 +44,9 @@ static unsigned long port[MAXDEV];
static unsigned long mem[MAXDEV];
static int __devinitdata irq[MAXDEV];
static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number");
module_param_array(mem, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(mem, "I/O memory address");
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register "
#define SJA1000_IOSIZE 0x20
#define SJA1000_IOSIZE_INDIRECT 0x02
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
{
return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
outb(val, base + 1);
}
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
- if (port[idx] || mem[idx]) {
- if (irq[idx])
- return 1;
- } else if (idx)
- return 0;
-
- dev_err(pdev, "insufficient parameters supplied\n");
- return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sja1000_priv *priv;
void __iomem *base = NULL;
int iosize = SJA1000_IOSIZE;
+ int idx = pdev->id;
int err;
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+
if (mem[idx]) {
if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
err = -EBUSY;
@@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
else
priv->can.clock.freq = CLK_DEFAULT / 2;
- if (ocr[idx] != -1)
- priv->ocr = ocr[idx] & 0xff;
- else if (ocr[0] != -1)
- priv->ocr = ocr[0] & 0xff;
+ if (ocr[idx] != 0xff)
+ priv->ocr = ocr[idx];
+ else if (ocr[0] != 0xff)
+ priv->ocr = ocr[0];
else
priv->ocr = OCR_DEFAULT;
- if (cdr[idx] != -1)
- priv->cdr = cdr[idx] & 0xff;
- else if (cdr[0] != -1)
- priv->cdr = cdr[0] & 0xff;
+ if (cdr[idx] != 0xff)
+ priv->cdr = cdr[idx];
+ else if (cdr[0] != 0xff)
+ priv->cdr = cdr[0];
else
priv->cdr = CDR_DEFAULT;
- dev_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, pdev);
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = register_sja1000dev(dev);
if (err) {
- dev_err(pdev, "registering %s failed (err=%d)\n",
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
goto exit_unmap;
}
- dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+ dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
@@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
return err;
}
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
{
- struct net_device *dev = dev_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
unregister_sja1000dev(dev);
- dev_set_drvdata(pdev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
if (mem[idx]) {
iounmap(priv->reg_base);
@@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
return 0;
}
-static struct isa_driver sja1000_isa_driver = {
- .match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
.remove = __devexit_p(sja1000_isa_remove),
.driver = {
.name = DRV_NAME,
+ .owner = THIS_MODULE,
},
};
static int __init sja1000_isa_init(void)
{
- int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+ int idx, err;
+
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ sja1000_isa_devs[idx] =
+ platform_device_alloc(DRV_NAME, idx);
+ if (!sja1000_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(sja1000_isa_devs[idx]);
+ if (err) {
+ platform_device_put(sja1000_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("%s: insufficient parameters supplied\n",
+ DRV_NAME);
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&sja1000_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("Legacy %s driver for max. %d devices registered\n",
+ DRV_NAME, MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
- if (!err)
- printk(KERN_INFO
- "Legacy %s driver for max. %d devices registered\n",
- DRV_NAME, MAXDEV);
return err;
}
static void __exit sja1000_isa_exit(void)
{
- isa_unregister_driver(&sja1000_isa_driver);
+ int idx;
+
+ platform_driver_unregister(&sja1000_isa_driver);
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
}
module_init(sja1000_isa_init);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index c3dd9d09be57..f2683eb6a3d5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = {
.remove = __devexit_p(sja1000_ofp_remove),
};
-static int __init sja1000_ofp_init(void)
-{
- return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
- return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d9fadc489b32..4f50145f6483 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -185,15 +185,4 @@ static struct platform_driver sp_driver = {
},
};
-static int __init sp_init(void)
-{
- return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a979b006f459..3f1ebcc2cb83 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
/******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 09a8b86cf1ac..a7c77c744ee9 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -874,21 +874,9 @@ static struct platform_driver softing_driver = {
.remove = __devexit_p(softing_pdev_remove),
};
-MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
- return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
- platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
+module_platform_driver(softing_driver);
+MODULE_ALIAS("platform:softing");
MODULE_DESCRIPTION("Softing DPRAM CAN driver");
MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2adc294f512a..df809e3f130e 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1037,20 +1037,7 @@ static struct platform_driver ti_hecc_driver = {
.resume = ti_hecc_resume,
};
-static int __init ti_hecc_init_driver(void)
-{
- printk(KERN_INFO DRV_DESC "\n");
- return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
- printk(KERN_INFO DRV_DESC " unloaded\n");
- platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f93e2d6fc88c..ea2d94285936 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
* See Documentation/networking/can.txt for details.
*/
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
module_param(echo, bool, S_IRUGO);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644
index 000000000000..dd151d53d506
--- /dev/null
+++ b/drivers/net/dsa/Kconfig
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+ depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+ tristate
+ default n
+
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ select NET_DSA_TAG_TRAILER
+ ---help---
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+ bool
+ default n
+
+config NET_DSA_MV88E6131
+ tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_MV88E6XXX_NEED_PPU
+ select NET_DSA_TAG_DSA
+ ---help---
+ This enables support for the Marvell 88E6085/6095/6095F/6131
+ ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+ tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_TAG_EDSA
+ ---help---
+ This enables support for the Marvell 88E6123/6161/6165
+ ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644
index 000000000000..f3bda05536cc
--- /dev/null
+++ b/drivers/net/dsa/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
new file mode 100644
index 000000000000..7fc4e81d4d43
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.c
@@ -0,0 +1,293 @@
+/*
+ * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#define REG_PORT(p) (8 + (p))
+#define REG_GLOBAL 0x0f
+
+static int reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg);
+}
+
+#define REG_READ(addr, reg) \
+ ({ \
+ int __ret; \
+ \
+ __ret = reg_read(ds, addr, reg); \
+ if (__ret < 0) \
+ return __ret; \
+ __ret; \
+ })
+
+
+static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr,
+ reg, val);
+}
+
+#define REG_WRITE(addr, reg, val) \
+ ({ \
+ int __ret; \
+ \
+ __ret = reg_write(ds, addr, reg, val); \
+ if (__ret < 0) \
+ return __ret; \
+ })
+
+static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == 0x0600)
+ return "Marvell 88E6060";
+ }
+
+ return NULL;
+}
+
+static int mv88e6060_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 6; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0x8000) == 0x0000)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6060_setup_global(struct dsa_switch *ds)
+{
+ /*
+ * Disable discarding of frames with excessive collisions,
+ * set the maximum frame size to 1536 bytes, and mask all
+ * interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
+
+ /*
+ * Enable automatic address learning, set the address
+ * database size to 1024 entries, and set the default aging
+ * time to 5 minutes.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x2130);
+
+ return 0;
+}
+
+static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+
+ /*
+ * Do not force flow control, disable Ingress and Egress
+ * Header tagging, disable VLAN tunneling, and set the port
+ * state to Forwarding. Additionally, if this is the CPU
+ * port, enable Ingress and Egress Trailer tagging mode.
+ */
+ REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the CPU port.
+ */
+ REG_WRITE(addr, 0x06,
+ ((p & 0xf) << 12) |
+ (dsa_is_cpu_port(ds, p) ?
+ ds->phys_port_mask :
+ (1 << ds->dst->cpu_port)));
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ return 0;
+}
+
+static int mv88e6060_setup(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ ret = mv88e6060_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise atu */
+
+ ret = mv88e6060_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 6; i++) {
+ ret = mv88e6060_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
+static int mv88e6060_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 5)
+ return port;
+ return -1;
+}
+
+static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_read(ds, addr, regnum);
+}
+
+static int
+mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_write(ds, addr, regnum, val);
+}
+
+static void mv88e6060_poll_link(struct dsa_switch *ds)
+{
+ int i;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ struct net_device *dev;
+ int uninitialized_var(port_status);
+ int link;
+ int speed;
+ int duplex;
+ int fc;
+
+ dev = ds->ports[i];
+ if (dev == NULL)
+ continue;
+
+ link = 0;
+ if (dev->flags & IFF_UP) {
+ port_status = reg_read(ds, REG_PORT(i), 0x00);
+ if (port_status < 0)
+ continue;
+
+ link = !!(port_status & 0x1000);
+ }
+
+ if (!link) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ continue;
+ }
+
+ speed = (port_status & 0x0100) ? 100 : 10;
+ duplex = (port_status & 0x0200) ? 1 : 0;
+ fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
+
+ if (!netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
+ netif_carrier_on(dev);
+ }
+ }
+}
+
+static struct dsa_switch_driver mv88e6060_switch_driver = {
+ .tag_protocol = htons(ETH_P_TRAILER),
+ .probe = mv88e6060_probe,
+ .setup = mv88e6060_setup,
+ .set_addr = mv88e6060_set_addr,
+ .phy_read = mv88e6060_phy_read,
+ .phy_write = mv88e6060_phy_write,
+ .poll_link = mv88e6060_poll_link,
+};
+
+static int __init mv88e6060_init(void)
+{
+ register_switch_driver(&mv88e6060_switch_driver);
+ return 0;
+}
+module_init(mv88e6060_init);
+
+static void __exit mv88e6060_cleanup(void)
+{
+ unregister_switch_driver(&mv88e6060_switch_driver);
+}
+module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
new file mode 100644
index 000000000000..c0a458fc698f
--- /dev/null
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -0,0 +1,438 @@
+/*
+ * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == 0x1210)
+ return "Marvell 88E6123";
+ if (ret == 0x1610)
+ return "Marvell 88E6161";
+ if (ret == 0x1650)
+ return "Marvell 88E6165";
+ }
+
+ return NULL;
+}
+
+static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 8; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0xc800) == 0xc800)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /*
+ * Disable the PHY polling unit (since there won't be any
+ * external PHYs to poll), don't discard packets with
+ * excessive collisions, and mask all interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
+
+ /*
+ * Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /*
+ * Configure the priority mapping registers.
+ */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Configure the upstream port, and configure the upstream
+ * port as the port to which ingress and egress monitor frames
+ * are to be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+
+ /*
+ * Disable remote management for now, and set the switch's
+ * DSA device number.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /*
+ * Disable the loopback filter, disable flow control
+ * messages, disable flood broadcast override, disable
+ * removing of provider tags, disable ATU age violation
+ * interrupts, disable tag flow control, force flow
+ * control priority to the highest, and send all special
+ * multicast frames to the CPU at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /*
+ * Program the DSA routing table.
+ */
+ for (i = 0; i < 32; i++) {
+ int nexthop;
+
+ nexthop = 0x1f;
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /*
+ * Clear all trunk masks.
+ */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
+
+ /*
+ * Clear all trunk mappings.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /*
+ * Disable ingress rate limiting by resetting all ingress
+ * rate limit registers to their initial state.
+ */
+ for (i = 0; i < 6; i++)
+ REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
+
+ /*
+ * Initialise cross-chip port VLAN table to reset defaults.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
+
+ /*
+ * Clear the priority override table.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
+
+ /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /*
+ * MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ REG_WRITE(addr, 0x01, 0x003e);
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /*
+ * Do not limit the period of time that this port can be
+ * paused for by the remote end or the period of time that
+ * this port can pause the remote end.
+ */
+ REG_WRITE(addr, 0x02, 0x0000);
+
+ /*
+ * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ val = 0x0433;
+ if (dsa_is_cpu_port(ds, p)) {
+ if (ds->dst->tag_protocol == htons(ETH_P_EDSA))
+ val |= 0x3300;
+ else
+ val |= 0x0100;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ if (p == dsa_upstream_port(ds))
+ val |= 0x000c;
+ REG_WRITE(addr, 0x04, val);
+
+ /*
+ * Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /*
+ * Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /*
+ * Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ REG_WRITE(addr, 0x08, 0x2080);
+
+ /*
+ * Egress rate control: disable egress rate control.
+ */
+ REG_WRITE(addr, 0x09, 0x0001);
+
+ /*
+ * Egress rate control 2: disable egress rate control.
+ */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /*
+ * Port ATU control: disable limiting the number of address
+ * database entries that this port is allowed to use.
+ */
+ REG_WRITE(addr, 0x0c, 0x0000);
+
+ /*
+ * Priorit Override: disable DA, SA and VTU priority override.
+ */
+ REG_WRITE(addr, 0x0d, 0x0000);
+
+ /*
+ * Port Ethertype: use the Ethertype DSA Ethertype value.
+ */
+ REG_WRITE(addr, 0x0f, ETH_P_EDSA);
+
+ /*
+ * Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /*
+ * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int i;
+ int ret;
+
+ mutex_init(&ps->smi_mutex);
+ mutex_init(&ps->stats_mutex);
+
+ ret = mv88e6123_61_65_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6123_61_65_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 6; i++) {
+ ret = mv88e6123_61_65_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6123_61_65_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 4)
+ return port;
+ return -1;
+}
+
+static int
+mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr = mv88e6123_61_65_port_to_phy_addr(port);
+ return mv88e6xxx_phy_read(ds, addr, regnum);
+}
+
+static int
+mv88e6123_61_65_phy_write(struct dsa_switch *ds,
+ int port, int regnum, u16 val)
+{
+ int addr = mv88e6123_61_65_port_to_phy_addr(port);
+ return mv88e6xxx_phy_write(ds, addr, regnum, val);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+};
+
+static void
+mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
+ mv88e6123_61_65_hw_stats, port, data);
+}
+
+static void
+mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
+ mv88e6123_61_65_hw_stats, port, data);
+}
+
+static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+ .tag_protocol = cpu_to_be16(ETH_P_EDSA),
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6123_61_65_probe,
+ .setup = mv88e6123_61_65_setup,
+ .set_addr = mv88e6xxx_set_addr_indirect,
+ .phy_read = mv88e6123_61_65_phy_read,
+ .phy_write = mv88e6123_61_65_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6123_61_65_get_strings,
+ .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats,
+ .get_sset_count = mv88e6123_61_65_get_sset_count,
+};
+
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
new file mode 100644
index 000000000000..e0eb68243834
--- /dev/null
+++ b/drivers/net/dsa/mv88e6131.c
@@ -0,0 +1,435 @@
+/*
+ * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+/*
+ * Switch product IDs
+ */
+#define ID_6085 0x04a0
+#define ID_6095 0x0950
+#define ID_6131 0x1060
+
+static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == ID_6085)
+ return "Marvell 88E6085";
+ if (ret == ID_6095)
+ return "Marvell 88E6095/88E6095F";
+ if (ret == ID_6131)
+ return "Marvell 88E6131";
+ }
+
+ return NULL;
+}
+
+static int mv88e6131_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 11; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0xc800) == 0xc800)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6131_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /*
+ * Enable the PHY polling unit, don't discard packets with
+ * excessive collisions, use a weighted fair queueing scheme
+ * to arbitrate between packet queues, set the maximum frame
+ * size to 1632, and mask all interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
+
+ /*
+ * Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /*
+ * Configure the priority mapping registers.
+ */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set the VLAN ethertype to 0x8100.
+ */
+ REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+
+ /*
+ * Disable ARP mirroring, and configure the upstream port as
+ * the port to which ingress and egress monitor frames are to
+ * be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+
+ /*
+ * Disable cascade port functionality unless this device
+ * is used in a cascade configuration, and set the switch's
+ * DSA device number.
+ */
+ if (ds->dst->pd->nr_chips > 1)
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+ else
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /*
+ * Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /*
+ * Program the DSA routing table.
+ */
+ for (i = 0; i < 32; i++) {
+ int nexthop;
+
+ nexthop = 0x1f;
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /*
+ * Clear all trunk masks.
+ */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
+
+ /*
+ * Clear all trunk mappings.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /*
+ * Force the priority of IGMP/MLD snoop frames and ARP frames
+ * to the highest setting.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+
+ return 0;
+}
+
+static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /*
+ * MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * (100 Mb/s on 6085) full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ if (ps->id == ID_6085)
+ REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /*
+ * Port Control: disable Core Tag, disable Drop-on-Lock,
+ * transmit frames unmodified, disable Header mode,
+ * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and
+ * IP priority fields (IP prio has precedence), and set STP
+ * state to Forwarding.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts, and enable DSA tagging
+ * mode.
+ *
+ * If this is the link to another switch, use DSA tagging
+ * mode, but do not enable forwarding of unknown unicasts.
+ */
+ val = 0x0433;
+ if (p == dsa_upstream_port(ds)) {
+ val |= 0x0104;
+ /*
+ * On 6085, unknown multicast forward is controlled
+ * here rather than in Port Control 2 register.
+ */
+ if (ps->id == ID_6085)
+ val |= 0x0008;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ REG_WRITE(addr, 0x04, val);
+
+ /*
+ * Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /*
+ * Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /*
+ * Port Control 2: don't force a good FCS, don't use
+ * VLAN-based, source address-based or destination
+ * address-based priority overrides, don't let the switch
+ * add or strip 802.1q tags, don't discard tagged or
+ * untagged frames on this port, do a destination address
+ * lookup on received packets as usual, don't send a copy
+ * of all transmitted/received frames on this port to the
+ * CPU, and configure the upstream port number.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown multicast addresses.
+ */
+ if (ps->id == ID_6085)
+ /*
+ * on 6085, bits 3:0 are reserved, bit 6 control ARP
+ * mirroring, and multicast forward is handled in
+ * Port Control register.
+ */
+ REG_WRITE(addr, 0x08, 0x0080);
+ else {
+ val = 0x0080 | dsa_upstream_port(ds);
+ if (p == dsa_upstream_port(ds))
+ val |= 0x0040;
+ REG_WRITE(addr, 0x08, val);
+ }
+
+ /*
+ * Rate Control: disable ingress rate limiting.
+ */
+ REG_WRITE(addr, 0x09, 0x0000);
+
+ /*
+ * Rate Control 2: disable egress rate limiting.
+ */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /*
+ * Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /*
+ * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+static int mv88e6131_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int i;
+ int ret;
+
+ mutex_init(&ps->smi_mutex);
+ mv88e6xxx_ppu_state_init(ds);
+ mutex_init(&ps->stats_mutex);
+
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
+ ret = mv88e6131_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6131_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 11; i++) {
+ ret = mv88e6131_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6131_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 11)
+ return port;
+ return -1;
+}
+
+static int
+mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr = mv88e6131_port_to_phy_addr(port);
+ return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
+}
+
+static int
+mv88e6131_phy_write(struct dsa_switch *ds,
+ int port, int regnum, u16 val)
+{
+ int addr = mv88e6131_port_to_phy_addr(port);
+ return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+};
+
+static void
+mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
+ mv88e6131_hw_stats, port, data);
+}
+
+static void
+mv88e6131_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
+ mv88e6131_hw_stats, port, data);
+}
+
+static int mv88e6131_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6131_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6131_switch_driver = {
+ .tag_protocol = cpu_to_be16(ETH_P_DSA),
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6131_probe,
+ .setup = mv88e6131_setup,
+ .set_addr = mv88e6xxx_set_addr_direct,
+ .phy_read = mv88e6131_phy_read,
+ .phy_write = mv88e6131_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6131_get_strings,
+ .get_ethtool_stats = mv88e6131_get_ethtool_stats,
+ .get_sset_count = mv88e6131_get_sset_count,
+};
+
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
new file mode 100644
index 000000000000..5467c040824a
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -0,0 +1,549 @@
+/*
+ * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+/*
+ * If the switch's ADDR[4:0] strap pins are strapped to zero, it will
+ * use all 32 SMI bus addresses on its SMI bus, and all switch registers
+ * will be directly accessible on some {device address,register address}
+ * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
+ * will only respond to SMI transactions to that specific address, and
+ * an indirect addressing mechanism needs to be used to access its
+ * registers.
+ */
+static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ ret = mdiobus_read(bus, sw_addr, 0);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & 0x8000) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
+{
+ int ret;
+
+ if (sw_addr == 0)
+ return mdiobus_read(bus, addr, reg);
+
+ /*
+ * Wait for the bus to become free.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the read command.
+ */
+ ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for the read command to complete.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Read the data.
+ */
+ ret = mdiobus_read(bus, sw_addr, 1);
+ if (ret < 0)
+ return ret;
+
+ return ret & 0xffff;
+}
+
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_reg_read(ds->master_mii_bus,
+ ds->pd->sw_addr, addr, reg);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
+ int reg, u16 val)
+{
+ int ret;
+
+ if (sw_addr == 0)
+ return mdiobus_write(bus, addr, reg, val);
+
+ /*
+ * Wait for the bus to become free.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the data to write.
+ */
+ ret = mdiobus_write(bus, sw_addr, 1, val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the write command.
+ */
+ ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for the write command to complete.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_reg_write(ds->master_mii_bus,
+ ds->pd->sw_addr, addr, reg, val);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int mv88e6xxx_config_prio(struct dsa_switch *ds)
+{
+ /*
+ * Configure the IP ToS mapping registers.
+ */
+ REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
+ REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
+ REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
+ REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
+
+ /*
+ * Configure the IEEE 802.1p priority mapping register.
+ */
+ REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
+
+ return 0;
+}
+
+int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+{
+ REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
+int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < 6; i++) {
+ int j;
+
+ /*
+ * Write the MAC address byte.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
+
+ /*
+ * Wait for the write to complete.
+ */
+ for (j = 0; j < 16; j++) {
+ ret = REG_READ(REG_GLOBAL2, 0x0d);
+ if ((ret & 0x8000) == 0)
+ break;
+ }
+ if (j == 16)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+{
+ if (addr >= 0)
+ return mv88e6xxx_reg_read(ds, addr, regnum);
+ return 0xffff;
+}
+
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
+{
+ if (addr >= 0)
+ return mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return 0;
+}
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ ret = REG_READ(REG_GLOBAL, 0x04);
+ REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
+
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ msleep(1);
+ if ((ret & 0xc000) != 0xc000)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ ret = REG_READ(REG_GLOBAL, 0x04);
+ REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
+
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ msleep(1);
+ if ((ret & 0xc000) == 0xc000)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_priv_state *ps;
+
+ ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
+ if (mutex_trylock(&ps->ppu_mutex)) {
+ struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
+
+ if (mv88e6xxx_ppu_enable(ds) == 0)
+ ps->ppu_disabled = 0;
+ mutex_unlock(&ps->ppu_mutex);
+ }
+}
+
+static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)_ps;
+
+ schedule_work(&ps->ppu_work);
+}
+
+static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->ppu_mutex);
+
+ /*
+ * If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!ps->ppu_disabled) {
+ ret = mv88e6xxx_ppu_disable(ds);
+ if (ret < 0) {
+ mutex_unlock(&ps->ppu_mutex);
+ return ret;
+ }
+ ps->ppu_disabled = 1;
+ } else {
+ del_timer(&ps->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+
+ /*
+ * Schedule a timer to re-enable the PHY polling unit.
+ */
+ mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&ps->ppu_mutex);
+}
+
+void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+
+ mutex_init(&ps->ppu_mutex);
+ INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
+ init_timer(&ps->ppu_timer);
+ ps->ppu_timer.data = (unsigned long)ps;
+ ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
+}
+
+int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(ds);
+ if (ret >= 0) {
+ ret = mv88e6xxx_reg_read(ds, addr, regnum);
+ mv88e6xxx_ppu_access_put(ds);
+ }
+
+ return ret;
+}
+
+int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
+ int regnum, u16 val)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(ds);
+ if (ret >= 0) {
+ ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ds);
+ }
+
+ return ret;
+}
+#endif
+
+void mv88e6xxx_poll_link(struct dsa_switch *ds)
+{
+ int i;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ struct net_device *dev;
+ int uninitialized_var(port_status);
+ int link;
+ int speed;
+ int duplex;
+ int fc;
+
+ dev = ds->ports[i];
+ if (dev == NULL)
+ continue;
+
+ link = 0;
+ if (dev->flags & IFF_UP) {
+ port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
+ if (port_status < 0)
+ continue;
+
+ link = !!(port_status & 0x0800);
+ }
+
+ if (!link) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ continue;
+ }
+
+ switch (port_status & 0x0300) {
+ case 0x0000:
+ speed = 10;
+ break;
+ case 0x0100:
+ speed = 100;
+ break;
+ case 0x0200:
+ speed = 1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ duplex = (port_status & 0x0400) ? 1 : 0;
+ fc = (port_status & 0x8000) ? 1 : 0;
+
+ if (!netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
+ netif_carrier_on(dev);
+ }
+ }
+}
+
+static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x1d);
+ if ((ret & 0x8000) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+{
+ int ret;
+
+ /*
+ * Snapshot the hardware statistics counters for this port.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
+
+ /*
+ * Wait for the snapshotting to complete.
+ */
+ ret = mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+{
+ u32 _val;
+ int ret;
+
+ *val = 0;
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
+ if (ret < 0)
+ return;
+
+ ret = mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ return;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
+ if (ret < 0)
+ return;
+
+ _val = ret << 16;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
+ if (ret < 0)
+ return;
+
+ *val = _val | ret;
+}
+
+void mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < nr_stats; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+ int i;
+
+ mutex_lock(&ps->stats_mutex);
+
+ ret = mv88e6xxx_stats_snapshot(ds, port);
+ if (ret < 0) {
+ mutex_unlock(&ps->stats_mutex);
+ return;
+ }
+
+ /*
+ * Read each of the counters.
+ */
+ for (i = 0; i < nr_stats; i++) {
+ struct mv88e6xxx_hw_stat *s = stats + i;
+ u32 low;
+ u32 high;
+
+ mv88e6xxx_stats_read(ds, s->reg, &low);
+ if (s->sizeof_stat == 8)
+ mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ else
+ high = 0;
+
+ data[i] = (((u64)high) << 32) | low;
+ }
+
+ mutex_unlock(&ps->stats_mutex);
+}
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+ return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..fc2cd7b90e8d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -0,0 +1,98 @@
+/*
+ * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MV88E6XXX_H
+#define __MV88E6XXX_H
+
+#define REG_PORT(p) (0x10 + (p))
+#define REG_GLOBAL 0x1b
+#define REG_GLOBAL2 0x1c
+
+struct mv88e6xxx_priv_state {
+ /*
+ * When using multi-chip addressing, this mutex protects
+ * access to the indirect access registers. (In single-chip
+ * mode, this mutex is effectively useless.)
+ */
+ struct mutex smi_mutex;
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+ /*
+ * Handles automatic disabling and re-enabling of the PHY
+ * polling unit.
+ */
+ struct mutex ppu_mutex;
+ int ppu_disabled;
+ struct work_struct ppu_work;
+ struct timer_list ppu_timer;
+#endif
+
+ /*
+ * This mutex serialises access to the statistics unit.
+ * Hold this mutex over snapshot + dump sequences.
+ */
+ struct mutex stats_mutex;
+
+ int id; /* switch product id */
+};
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+};
+
+int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
+int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
+ int reg, u16 val);
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
+int mv88e6xxx_config_prio(struct dsa_switch *ds);
+int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
+int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val);
+void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
+int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
+ int regnum, u16 val);
+void mv88e6xxx_poll_link(struct dsa_switch *ds);
+void mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data);
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data);
+
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
+#define REG_READ(addr, reg) \
+ ({ \
+ int __ret; \
+ \
+ __ret = mv88e6xxx_reg_read(ds, addr, reg); \
+ if (__ret < 0) \
+ return __ret; \
+ __ret; \
+ })
+
+#define REG_WRITE(addr, reg, val) \
+ ({ \
+ int __ret; \
+ \
+ __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \
+ if (__ret < 0) \
+ return __ret; \
+ })
+
+
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index a7c5e8831e8c..087648ea1edb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
- dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 972f80ecc510..da410f036869 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index b42c06baba89..8153a3e0a1a4 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+ strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strcpy(info->bus_info, dev_name(vp->gendev));
+ strlcpy(info->bus_info, dev_name(vp->gendev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "EISA 0x%lx %d",
- dev->base_addr, dev->irq);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "EISA 0x%lx %d", dev->base_addr, dev->irq);
}
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 20ea07508ac7..6d6bc754b1a8 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if(tp->card_state == Sleeping) {
- strcpy(info->fw_version, "Sleep image");
+ strlcpy(info->fw_version, "Sleep image",
+ sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strcpy(info->fw_version, "Unknown runtime");
+ strlcpy(info->fw_version, "Unknown runtime",
+ sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
- snprintf(info->fw_version, 32, "%02x.%03x.%03x",
- sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
- sleep_ver & 0xfff);
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%02x.%03x.%03x", sleep_ver >> 24,
+ (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
}
}
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 58a12e4c78f9..ef325ffa1b5a 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -14,8 +14,6 @@
#define TX_PAGES 12 /* Two Tx slots */
-#define ETHER_ADDR_LEN 6
-
/* The 8390 specific per-packet-header format. */
struct e8390_pkt_hdr {
unsigned char status; /* status */
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 547737340cbb..3ad5d2f9a49c 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e9f8432f55b4..0f92e3567f68 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -623,7 +623,8 @@ static int ax_mii_init(struct net_device *dev)
ax->mii_bus->name = "ax88796_mii_bus";
ax->mii_bus->parent = dev->dev.parent;
- snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!ax->mii_bus->irq) {
@@ -735,15 +736,14 @@ static int ax_init_dev(struct net_device *dev)
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] =
ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr,
- ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
ax_reset_8390(dev);
@@ -991,18 +991,7 @@ static struct platform_driver axdrv = {
.resume = ax_resume,
};
-static int __init axdrv_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
index 7a09575ecff0..6428f9e7a554 100644
--- a/drivers/net/ethernet/8390/es3210.c
+++ b/drivers/net/ethernet/8390/es3210.c
@@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- for (i = 0; i < ETHER_ADDR_LEN ; i++)
+ for (i = 0; i < ETH_ALEN ; i++)
dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
/* Check the Racal vendor ID as well. */
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
index eeac843dcd2d..d42938b6b596 100644
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
/* Retrieve and checksum the station address. */
outw(MAC_Page, ioaddr + HP_PAGING);
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for(i = 0; i < ETH_ALEN; i++) {
unsigned char inval = inb(ioaddr + 8 + i);
dev->dev_addr[i] = inval;
checksum += inval;
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
index 18564d4a7c04..113f1e075a26 100644
--- a/drivers/net/ethernet/8390/hp.c
+++ b/drivers/net/ethernet/8390/hp.c
@@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for(i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + i);
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 3dac937a67c4..5370c884620b 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
if (!dev)
return -ENOMEM;
- for(j = 0; j < ETHER_ADDR_LEN; j++)
+ for (j = 0; j < ETH_ALEN; j++)
dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
/* We must set the 8390 for word mode. */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
index f9888d20177b..69490ae018ea 100644
--- a/drivers/net/ethernet/8390/lne390.c
+++ b/drivers/net/ethernet/8390/lne390.c
@@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
|| inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
|| inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
printk("lne390.c: card not found");
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
printk(" (invalid prefix).\n");
return -ENODEV;
}
#endif
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
0xa+revision, ioaddr/0x1000, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
index cd36a6a5f408..9b9c77d5a65c 100644
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1063093b3afc..f92ea2a65a57 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
#ifdef CONFIG_PLAT_MAPPI
outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ioaddr + E8390_CMD); /* 0x61 */
- for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i]
= inb_p(ioaddr + EN1_PHYS_SHIFT(i));
}
#else
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i];
}
#endif
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
index 70cdc6996342..922b32036c63 100644
--- a/drivers/net/ethernet/8390/ne2.c
+++ b/drivers/net/ethernet/8390/ne2.c
@@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
dev->base_addr = base_addr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 39923425ba25..3fab04a0034a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
struct ei_device *ei = netdev_priv(dev);
struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index 243ed2aee88e..2a3e8057feae 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device)
#endif
port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
edev->slot, ifmap[port_index], dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index d85f0a84bc7b..3b903759980a 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = stnic_eadr[i];
/* Set the base address to point to the NIC, not the "real" base! */
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 3aa9fe9999b5..bcd27323b203 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
if (i)
return i;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 597f4d45c632..3474a61d4705 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index be5dde040261..08d5f0388877 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,10 +10,11 @@ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
-obj-$(CONFIG_NET_ATMEL) += cadence/
+obj-$(CONFIG_NET_CADENCE) += cadence/
obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 6d9f6911000f..cb4f38a17f20 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
#endif /* VLAN_SUPPORT */
@@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index b6d69c91db96..d812a103e032 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1670,7 +1670,8 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
miibus->name = "bfin_mii_bus";
miibus->phy_mask = mii_bus_pd->phy_mask;
- snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
+ snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!miibus->irq)
goto out_err_irq_alloc;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 442fefa4f2ca..c885aa905dec 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = {
.remove = __devexit_p(greth_of_remove),
};
-static int __init greth_init(void)
-{
- return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
- platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
MODULE_AUTHOR("Aeroflex Gaisler AB.");
MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a9745f4ddbfe..33e0a8c20f6b 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i );
/* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strcpy (info->driver, MODULE_NAME);
- strcpy (info->version, MODULE_VERS);
- sprintf(info->fw_version,"%u",chip_version);
- strcpy (info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u", chip_version);
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], lp->mmio + PADR + i );
spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
}
/* Initializing MAC address */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = readb(lp->mmio + PADR + i);
/* Setting user defined parametrs */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 2ff2e7a12dd0..8baa3527ba74 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -586,7 +586,6 @@ typedef enum {
#define PKT_BUFF_SZ 1536
#define MIN_PKT_LEN 60
-#define ETH_ADDR_LEN 6
#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
@@ -808,8 +807,8 @@ typedef enum {
static int card_idx;
static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
static unsigned int chip_version;
#endif /* _AMD8111E_H */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 4865ff14bebf..8b95dd314253 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1171,7 +1171,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->mii_bus->write = au1000_mdiobus_write;
aup->mii_bus->reset = au1000_mdiobus_reset;
aup->mii_bus->name = "au1000_eth_mii";
- snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
+ snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, aup->mac_id);
aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (aup->mii_bus->irq == NULL)
goto err_out;
@@ -1339,18 +1340,7 @@ static struct platform_driver au1000_eth_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
- return platform_driver_register(&au1000_eth_driver);
-}
-static void __exit au1000_exit_module(void)
-{
- platform_driver_unregister(&au1000_eth_driver);
-}
+module_platform_driver(au1000_eth_driver);
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
+MODULE_ALIAS("platform:au1000-eth");
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 3accd5d21b08..6be0dd67631a 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -160,8 +160,6 @@ Include Files
Defines
---------------------------------------------------------------------------- */
-#define ETHER_ADDR_LEN ETH_ALEN
- /* 6 bytes in an Ethernet Address */
#define MACE_LADRF_LEN 8
/* 8 bytes in Logical Address Filter */
@@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
}
}
/* Set PADR register */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
/* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link)
/* Read the ethernet address from the CIS. */
len = pcmcia_get_tuple(link, 0x80, &buf);
- if (!buf || len < ETHER_ADDR_LEN) {
+ if (!buf || len < ETH_ALEN) {
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, buf, ETH_ALEN);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@ Output
static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int adr[ETH_ALEN] = {0}; /* Ethernet address */
struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev)
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
netdev_for_each_mc_addr(ha, dev) {
- memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+ memcpy(adr, ha->addr, ETH_ALEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f92bc6e34828..20e6dab0186c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 8fda457f94cf..7ea16d32a5f5 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = {
.remove = __devexit_p(sunlance_sbus_remove),
};
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
- return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
- platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 7be884d0aaf6..0a9326aa58b5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 02c7ed8d9eca..b8591246eb4c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev)
}
}
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
}
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1c_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6269438d365f..6e61f9f9ebb5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl1e_driver_name, 32);
- strncpy(drvinfo->version, atl1e_driver_version, 32);
- strncpy(drvinfo->fw_version, "L1e", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl1e_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl1e_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 95483bcac1d0..c915c0873810 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
return 0;
}
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1e_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 33a4e35f5ee8..9bd204976648 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1feae5928a4b..071f4c858969 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
@@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter)
atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl2_driver_name, 32);
- strncpy(drvinfo->version, atl2_driver_version, 32);
- strncpy(drvinfo->fw_version, "L2", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl2_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl2_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index aabcf4b5745a..8ff7411094d5 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
@@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atlx_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc122..3fb66d09ece5 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a11a8ad94226..986019b2c849 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1469,7 +1469,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
return 0;
}
-static struct ethtool_ops bcm_enet_ethtool_ops = {
+static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
@@ -1727,7 +1727,7 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
- sprintf(bus->id, "%d", priv->mac_id);
+ sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 965c7235804d..021fb818007a 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -57,11 +57,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.1.11"
-#define DRV_MODULE_RELDATE "July 20, 2011"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION "2.2.1"
+#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_lock);
cp->drv_state = 0;
bnapi->cnic_present = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_lock);
synchronize_rcu();
return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
if (bp->autoneg & AUTONEG_SPEED) {
u32 adv_reg, adv1000_reg;
- u32 new_adv_reg = 0;
- u32 new_adv1000_reg = 0;
+ u32 new_adv = 0;
+ u32 new_adv1000 = 0;
bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- if (bp->advertising & ADVERTISED_10baseT_Half)
- new_adv_reg |= ADVERTISE_10HALF;
- if (bp->advertising & ADVERTISED_10baseT_Full)
- new_adv_reg |= ADVERTISE_10FULL;
- if (bp->advertising & ADVERTISED_100baseT_Half)
- new_adv_reg |= ADVERTISE_100HALF;
- if (bp->advertising & ADVERTISED_100baseT_Full)
- new_adv_reg |= ADVERTISE_100FULL;
- if (bp->advertising & ADVERTISED_1000baseT_Full)
- new_adv1000_reg |= ADVERTISE_1000FULL;
+ new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+ new_adv |= ADVERTISE_CSMA;
+ new_adv |= bnx2_phy_get_pause_adv(bp);
- new_adv_reg |= ADVERTISE_CSMA;
+ new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
- new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
- if ((adv1000_reg != new_adv1000_reg) ||
- (adv_reg != new_adv_reg) ||
+ if ((adv1000_reg != new_adv1000) ||
+ (adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
- bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
- bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+ bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
BMCR_ANENABLE);
}
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
- unsigned long align;
- skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
- if (skb == NULL) {
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
return -ENOMEM;
- }
- if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
- skb_reserve(skb, BNX2_RX_ALIGN - align);
-
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+ mapping = dma_map_single(&bp->pdev->dev,
+ get_l2_fhdr(data),
+ bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- rx_buf->skb = skb;
- rx_buf->desc = (struct l2_fhdr *) skb->data;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
u16 hw_cons, sw_cons, sw_ring_cons;
int tx_pkt = 0, index;
+ unsigned int tx_bytes = 0;
struct netdev_queue *txq;
index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
+ tx_bytes += skb->len;
dev_kfree_skb(skb);
tx_pkt++;
if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
+ netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
txr->hw_tx_cons = hw_cons;
txr->tx_cons = sw_cons;
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
- struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ u8 *data, u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
rxr->rx_prod_bseq += bp->rx_buf_use_size;
- prod_rx_buf->skb = skb;
- prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+ prod_rx_buf->data = data;
if (cons == prod)
return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
+ struct sk_buff *skb;
- err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+ err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
- return err;
+ return NULL;
}
- skb_reserve(skb, BNX2_RX_OFFSET);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
-
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto error;
+ }
+ skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
if (hdr_len == 0) {
skb_put(skb, len);
- return 0;
+ return skb;
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
- return 0;
+ return skb;
}
rx_pg = &rxr->rx_pg_ring[pg_cons];
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
bnx2_reuse_rx_skb_pages(bp, rxr, skb,
pages - i);
- return err;
+ return NULL;
}
dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
rxr->rx_pg_cons = pg_cons;
}
- return 0;
+ return skb;
}
static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ u8 *data;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
- skb = rx_buf->skb;
- prefetchw(skb);
-
- next_rx_buf =
- &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
- prefetch(next_rx_buf->desc);
+ data = rx_buf->data;
+ rx_buf->data = NULL;
- rx_buf->skb = NULL;
+ rx_hdr = get_l2_fhdr(data);
+ prefetch(rx_hdr);
dma_addr = dma_unmap_addr(rx_buf, mapping);
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = rx_buf->desc;
+ next_rx_buf =
+ &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(get_l2_fhdr(next_rx_buf->data));
+
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME))) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
if (pg_ring_used) {
int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
len -= 4;
if (len <= bp->rx_copy_thresh) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + 6);
- if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ skb = netdev_alloc_skb(bp->dev, len + 6);
+ if (skb == NULL) {
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
/* aligned copy */
- skb_copy_from_linear_data_offset(skb,
- BNX2_RX_OFFSET - 6,
- new_skb->data, len + 6);
- skb_reserve(new_skb, 6);
- skb_put(new_skb, len);
+ memcpy(skb->data,
+ (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+ len + 6);
+ skb_reserve(skb, 6);
+ skb_put(skb, len);
- bnx2_reuse_rx_skb(bp, rxr, skb,
+ bnx2_reuse_rx_data(bp, rxr, data,
sw_ring_cons, sw_ring_prod);
- skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
- dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
- goto next_rx;
-
+ } else {
+ skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+ (sw_ring_cons << 16) | sw_ring_prod);
+ if (!skb)
+ goto next_rx;
+ }
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+ if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
- sizeof(struct skb_shared_info);
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
}
bp->rx_buf_use_size = rx_size;
- /* hw alignment */
- bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+ /* hw alignment + build_skb() overhead*/
+ bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
}
}
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_ring_idx; j++) {
struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
- dev_kfree_skb(skb);
+ kfree(data);
}
for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
{
unsigned int pkt_size, num_pkts, i;
- struct sk_buff *skb, *rx_skb;
+ struct sk_buff *skb;
+ u8 *data;
unsigned char *packet;
u16 rx_start_idx, rx_idx;
dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
- rx_skb = rx_buf->skb;
+ data = rx_buf->data;
- rx_hdr = rx_buf->desc;
- skb_reserve(rx_skb, BNX2_RX_OFFSET);
+ rx_hdr = get_l2_fhdr(data);
+ data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
(L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
for (i = 14; i < pkt_size; i++) {
- if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+ if (*(data + i) != (unsigned char) (i & 0xff)) {
goto loopback_test_done;
}
}
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+ netdev_tx_sent_queue(txq, skb->len);
+
prod = NEXT_TX_BD(prod);
txr->tx_prod_bseq += skb->len;
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->bus_info, pci_name(bp->pdev));
- strcpy(info->fw_version, bp->fw_version);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
return 0;
}
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
}
static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 99d31a7d6aaa..1db2d51ba3f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6563,12 +6563,25 @@ struct l2_fhdr {
#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_bd {
- struct sk_buff *skb;
- struct l2_fhdr *desc;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+ return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
struct sw_pg {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index aec7212ac983..8c73d34b2ff1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.70.30-0"
-#define DRV_MODULE_RELDATE "2011/10/25"
+#define DRV_MODULE_VERSION "1.70.35-0"
+#define DRV_MODULE_RELDATE "2011/11/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@ enum {
#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
/* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_rx_bd {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -411,8 +416,7 @@ union db_prod {
/* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
- BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
@@ -425,8 +429,8 @@ union host_hc_status_block {
struct bnx2x_agg_info {
/*
- * First aggregation buffer is an skb, the following - are pages.
- * We will preallocate the skbs for each aggregation when
+ * First aggregation buffer is a data buffer, the following - are pages.
+ * We will preallocate the data buffer for each aggregation when
* we open the interface and will replace the BD at the consumer
* with this one when we receive the TPA_START CQE in order to
* keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@ struct bnx2x_agg_info {
u16 parsing_flags;
u16 vlan_tag;
u16 len_on_bd;
+ u32 rxhash;
};
#define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@ struct bnx2x_fastpath {
__le16 fp_hc_idx;
u8 index; /* number in fp array */
+ u8 rx_queue; /* index for skb_record */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
@@ -881,6 +887,8 @@ struct bnx2x_common {
#define CHIP_PORT_MODE_NONE 0x2
#define CHIP_MODE(bp) (bp->common.chip_port_mode)
#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+ u32 boot_mode;
};
/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
+
+ union drv_info_to_mcp drv_info_to_mcp;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -1122,18 +1132,21 @@ enum {
enum {
BNX2X_PORT_QUERY_IDX,
BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
BNX2X_FIRST_QUEUE_QUERY_IDX,
};
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+ struct stats_query_entry query[FP_SB_MAX_E1x+
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
};
struct bnx2x_fw_stats_data {
struct stats_counter storm_counters;
struct per_port_stats port;
struct per_pf_stats pf;
+ struct fcoe_statistics_params fcoe;
struct per_queue_stats queue_stats[1];
};
@@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data {
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
+ BNX2X_SP_RTNL_FAN_FAILURE,
};
@@ -1186,10 +1200,20 @@ struct bnx2x {
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
- L1_CACHE_SHIFT : 8)
- /* FW use 2 Cache lines Alignment for start packet and size */
-#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
+
+ /* FW uses 2 Cache lines Alignment for start packet and size
+ *
+ * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END \
+ max(1UL << BNX2X_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@ struct bnx2x {
#define NO_ISCSI_OOO_FLAG (1 << 13)
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
+#define BC_SUPPORTS_PFC_STATS (1 << 17)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
-#define RSS_FLAGS(bp) \
- (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
- (bp->multi_mode << \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f
@@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+int bnx2x_close(struct net_device *dev);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 580b44edb066..2b731b253598 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
* @to: destination FP index
*
* Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
- struct napi_struct orig_napi = to_fp->napi;
+
+ /* Copy the NAPI object as it has been already initialized */
+ from_fp->napi = to_fp->napi;
+
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
-
- /* Restore the NAPI object as it has been already initialized */
- to_fp->napi = orig_napi;
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
* return idx of last bd freed
*/
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
- u16 idx)
+ u16 idx, unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
+ if (skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{
struct netdev_queue *txq;
u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
" pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
- bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+ &pkts_compl, &bytes_compl);
+
sw_cons++;
}
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
txdata->tx_pkt_cons = sw_cons;
txdata->tx_bd_cons = bd_cons;
@@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+ const struct eth_fast_path_rx_cqe *cqe)
+{
+ /* Set Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ return le32_to_cpu(cqe->rss_hash_result);
+ return 0;
+}
+
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
- struct sk_buff *skb, u16 cons, u16 prod,
+ u16 cons, u16 prod,
struct eth_fast_path_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (tpa_info->tpa_state != BNX2X_TPA_STOP)
BNX2X_ERR("start of bin not in stop [%d]\n", queue);
- /* Try to map an empty skb from the aggregation info */
+ /* Try to map an empty data buffer from the aggregation info */
mapping = dma_map_single(&bp->pdev->dev,
- first_buf->skb->data,
+ first_buf->data + NET_SKB_PAD,
fp->rx_buf_size, DMA_FROM_DEVICE);
/*
* ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
/* Move the BD from the consumer to the producer */
- bnx2x_reuse_rx_skb(fp, cons, prod);
+ bnx2x_reuse_rx_data(fp, cons, prod);
tpa_info->tpa_state = BNX2X_TPA_ERROR;
return;
}
- /* move empty skb from pool to prod */
- prod_rx_buf->skb = first_buf->skb;
+ /* move empty data from pool to prod */
+ prod_rx_buf->data = first_buf->data;
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
- /* point prod_bd to new skb */
+ /* point prod_bd to new data */
prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
{
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u8 pad = tpa_info->placement_offset;
+ u32 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = NULL;
+ u8 *data = rx_buf->data;
/* alloc new skb */
- struct sk_buff *new_skb;
+ u8 *new_data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,18 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (old_tpa_state == BNX2X_TPA_ERROR)
goto drop;
- /* Try to allocate the new skb */
- new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+ /* Try to allocate the new data */
+ new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
+ if (likely(new_data))
+ skb = build_skb(data);
- if (likely(new_skb)) {
- prefetch(skb);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
@@ -507,8 +534,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
#endif
- skb_reserve(skb, pad);
+ skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
+ skb->rxhash = tpa_info->rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,8 +552,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
- /* put new skb in bin */
- rx_buf->skb = new_skb;
+ /* put new data in bin */
+ rx_buf->data = new_data;
return;
}
@@ -537,19 +565,6 @@ drop:
fp->eth_q_stats.rx_skb_alloc_failed++;
}
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
- struct sk_buff *skb)
-{
- /* Set Toeplitz hash from CQE */
- if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->fast_path_cqe.status_flags &
- ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
- skb->rxhash =
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -592,6 +607,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad;
+ u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -602,13 +618,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
- /* Prefetch the page containing the BD descriptor
- at producer's index. It will be needed when new skb is
- allocated */
- prefetch((void *)(PAGE_ALIGN((unsigned long)
- (&fp->rx_desc_ring[bd_prod])) -
- PAGE_SIZE + 1));
-
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp = &cqe->fast_path_cqe;
cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +633,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
+ }
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ data = rx_buf->data;
- /* this is an rx packet */
- } else {
- rx_buf = &fp->rx_buf_ring[bd_cons];
- skb = rx_buf->skb;
- prefetch(skb);
-
- if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
#ifdef BNX2X_STOP_ON_ERROR
- /* sanity check */
- if (fp->disable_tpa &&
- (CQE_TYPE_START(cqe_fp_type) ||
- CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
- CQE_TYPE(cqe_fp_type));
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
#endif
- if (CQE_TYPE_START(cqe_fp_type)) {
- u16 queue = cqe_fp->queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_start on queue %d\n",
- queue);
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
- bnx2x_tpa_start(fp, queue, skb,
- bd_cons, bd_prod,
- cqe_fp);
-
- /* Set Toeplitz hash for LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
-
- goto next_rx;
-
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
-
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ bnx2x_tpa_start(fp, queue,
+ bd_cons, bd_prod,
+ cqe_fp);
+ goto next_rx;
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, cqe_fp);
+ goto next_cqe;
}
- /* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
- pad = cqe_fp->placement_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev,
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- DMA_FROM_DEVICE);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ pad += NET_SKB_PAD;
+ prefetch(data + pad); /* speedup eth_type_trans() */
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ fp->eth_q_stats.rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
- /* is this an error packet? */
- if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ if (skb == NULL) {
DP(NETIF_MSG_RX_ERR,
- "ERROR flags %x rx packet %u\n",
- cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ "ERROR packet dropped because of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
}
-
- /* Since we don't have a jumbo ring
- * copy small packets if mtu > 1500
- */
- if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
- (len <= RX_COPY_THRESH)) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + pad);
- if (new_skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped "
- "because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
- goto reuse_rx;
- }
-
- /* aligned copy */
- skb_copy_from_linear_data_offset(skb, pad,
- new_skb->data + pad, len);
- skb_reserve(new_skb, pad);
- skb_put(new_skb, len);
-
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
- skb = new_skb;
-
- } else
- if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+ memcpy(skb->data, data + pad, len);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+ } else {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
+ skb = build_skb(data);
+ if (unlikely(!skb)) {
+ kfree(data);
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ goto next_rx;
+ }
skb_reserve(skb, pad);
- skb_put(skb, len);
-
} else {
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped because "
"of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
}
+ }
- skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, bp->dev);
- /* Set Toeplitz hash for a none-LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
+ /* Set Toeplitz hash for a none-LRO skb */
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
- skb_checksum_none_assert(skb);
+ skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ fp->eth_q_stats.hw_csum_err++;
}
- skb_record_rx_queue(skb, fp->index);
+ skb_record_rx_queue(skb, fp->rx_queue);
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
@@ -765,7 +759,7 @@ reuse_rx:
next_rx:
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
bd_cons = NEXT_RX_IDX(bd_cons);
bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1005,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->skb = netdev_alloc_skb(bp->dev,
- fp->rx_buf_size);
- if (!first_buf->skb) {
+ first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+ GFP_ATOMIC);
+ if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
"disabling TPA on this "
@@ -1093,16 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
- u16 bd_cons = txdata->tx_bd_cons;
u16 sw_prod = txdata->tx_pkt_prod;
u16 sw_cons = txdata->tx_pkt_cons;
while (sw_cons != sw_prod) {
- bd_cons = bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(sw_cons));
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev, txdata->txq_index));
}
}
}
@@ -1118,16 +1114,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
for (i = 0; i < NUM_RX_BD; i++) {
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- rx_buf->skb = NULL;
- dev_kfree_skb(skb);
+ rx_buf->data = NULL;
+ kfree(data);
}
}
@@ -1445,6 +1441,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
break;
}
+#ifdef BCM_CNIC
+ /* override in ISCSI SD mod */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->num_queues = 1;
+#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
}
@@ -1509,6 +1510,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ u32 mtu;
/* Always use a mini-jumbo MTU for the FCoE L2 ring */
if (IS_FCOE_IDX(i))
@@ -1518,13 +1520,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
* IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
* overrun attack.
*/
- fp->rx_buf_size =
- BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
else
- fp->rx_buf_size =
- bp->dev->mtu + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = bp->dev->mtu;
+ fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+ IP_HEADER_ALIGNMENT_PADDING +
+ ETH_OVREHEAD +
+ mtu +
+ BNX2X_FW_RX_ALIGN_END;
+ /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
}
}
@@ -1541,7 +1545,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
for (i = 0; i < sizeof(ind_table); i++)
ind_table[i] =
- bp->fp->cl_id + (i % num_eth_queues);
+ bp->fp->cl_id +
+ ethtool_rxfh_indir_default(i, num_eth_queues);
}
/*
@@ -1929,13 +1934,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
}
- if (!bp->port.pmf)
+ if (bp->port.pmf)
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+ else
bnx2x__link_status_update(bp);
/* start the timer */
mod_timer(&bp->timer, jiffies + bp->current_interval);
#ifdef BCM_CNIC
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2808,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
"dropping packet...\n");
@@ -2810,7 +2820,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
first_bd->nbd = cpu_to_le16(nbd);
bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(txdata->tx_pkt_prod));
+ TX_BD(txdata->tx_pkt_prod),
+ &pkts_compl, &bytes_compl);
return NETDEV_TX_OK;
}
@@ -2871,6 +2882,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+ netdev_tx_sent_queue(txq, skb->len);
+
txdata->tx_pkt_prod++;
/*
* Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2994,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
return -EINVAL;
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ return -EINVAL;
+#endif
+
if (netif_running(dev)) {
rc = bnx2x_set_eth_mac(bp, false);
if (rc)
@@ -3098,7 +3116,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
- /* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp)) {
+ rx_ring_size = MIN_RX_SIZE_NONTPA;
+ bp->rx_ring_size = rx_ring_size;
+ } else
+#endif
if (!bp->rx_ring_size) {
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3131,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
MIN_RX_SIZE_TPA, rx_ring_size);
bp->rx_ring_size = rx_ring_size;
- } else
+ } else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
/* Common */
@@ -3278,14 +3301,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
bp->fp = fp;
/* msix table */
- tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
goto alloc_err;
bp->msix_table = tbl;
@@ -3409,7 +3432,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
return bnx2x_reload_if_running(dev);
}
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -3420,7 +3444,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features)
return features;
}
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
u32 flags = bp->flags;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 283d663da180..bf27c54ff2e0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include "bnx2x.h"
@@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
/**
* bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
- memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
return 0;
}
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
- if (unlikely(skb == NULL))
+ data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ if (unlikely(data == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
+ kfree(data);
return -ENOMEM;
}
- rx_buf->skb = skb;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
return 0;
}
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
* we are just moving one from cons to prod
* we are not creating a new mapping,
* so there is no need to check for dma_mapping_error().
*/
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
u16 cons, u16 prod)
{
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
- prod_rx_buf->skb = cons_rx_buf->skb;
+ prod_rx_buf->data = cons_rx_buf->data;
*prod_bd = *cons_bd;
}
@@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
for (i = 0; i < last; i++) {
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
struct sw_rx_bd *first_buf = &tpa_info->first_buf;
- struct sk_buff *skb = first_buf->skb;
+ u8 *data = first_buf->data;
- if (skb == NULL) {
+ if (data == NULL) {
DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
continue;
}
@@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- first_buf->skb = NULL;
+ kfree(data);
+ first_buf->data = NULL;
}
}
@@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
fp->eth_q_stats.rx_skb_alloc_failed++;
continue;
}
@@ -1318,6 +1320,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
unsigned long q_type = 0;
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1491,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
return max_cfg;
}
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp: driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp: driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp: driver handle
+ * @flags: flags to update
+ * @set: set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+ if (is_valid_ether_addr(addr))
+ return true;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+ return true;
+#endif
+ return false;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 51bd7485ab18..5051cf3deb20 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
}
#endif
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
- if (SHMEM2_HAS(bp, drv_flags)) {
- u32 drv_flags;
- bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- drv_flags = SHMEM2_RD(bp, drv_flags);
-
- if (set)
- SET_FLAGS(drv_flags, flags);
- else
- RESET_FLAGS(drv_flags, flags);
-
- SHMEM2_WR(bp, drv_flags, drv_flags);
- DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
- bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- }
-}
-
static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
{
u8 prio, cos;
@@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
/* mark DCBX result for PMF migration */
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
#ifdef BCM_DCBNL
- /**
+ /*
* Add new app tlvs to dcbnl
*/
bnx2x_dcbnl_update_applist(bp, false);
#endif
- bnx2x_dcbx_stop_hw_tx(bp);
-
- /* reconfigure the netdevice with the results of the new
+ /*
+ * reconfigure the netdevice with the results of the new
* dcbx negotiation.
*/
bnx2x_dcbx_update_tc_mapping(bp);
+ /*
+ * allow other funtions to update their netdevices
+ * accordingly
+ */
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ bnx2x_dcbx_stop_hw_tx(bp);
+
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_ets_params(bp);
bnx2x_dcbx_resume_hw_tx(bp);
+
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
/*For IEEE admin_recommendation_bw_precentage
*For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
if (dp->admin_priority_app_table[i].valid) {
struct bnx2x_admin_priority_app_table *table =
dp->admin_priority_app_table;
@@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
- if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
@@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{
/* if we need to syncronize DCBX result from prev PMF
- * read it from shmem and update bp accordingly
+ * read it from shmem and update bp and netdev accordingly
*/
if (SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
@@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
bp->dcbx_error);
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
+#ifdef BCM_DCBNL
+ /*
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ /*
+ * reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
}
}
@@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
int i, ff;
/* iterate over the app entries looking for idtype and idval */
- for (i = 0, ff = -1; i < 4; i++) {
+ for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
struct bnx2x_admin_priority_app_table *app_ent =
&bp->dcbx_config_params.admin_priority_app_table[i];
if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
if (ff < 0 && !app_ent->valid)
ff = i;
}
- if (i < 4)
+ if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
/* if found overwrite up */
bp->dcbx_config_params.
admin_priority_app_table[i].priority = up;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2c6a3bca6f28..2ab9254e2d5e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table {
u32 app_id;
};
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
@@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params {
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
- struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ struct bnx2x_admin_priority_app_table
+ admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
u32 admin_default_priority;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index f0ca8b27a55e..f99c6e312a5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -107,6 +107,10 @@ static const struct {
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(mf_tag_discard),
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(pfc_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_received" },
+ { STATS_OFFSET32(pfc_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successuly */
+ /* Save new config in case command complete successully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -361,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
if (cmd->autoneg == AUTONEG_ENABLE) {
+ u32 an_supported_speed = bp->port.supported[cfg_idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ an_supported_speed |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
- if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ if (cmd->advertising & ~an_supported_speed) {
DP(NETIF_MSG_LINK, "Advertisement parameters "
"are not supported\n");
return -EINVAL;
@@ -761,8 +770,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 phy_fw_ver[PHY_FW_VER_LEN];
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
@@ -773,14 +782,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- strncpy(info->fw_version, bp->fw_ver, 32);
+ strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
- strcpy(info->bus_info, pci_name(bp->pdev));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
info->eedump_len = bp->common.flash_size;
@@ -1740,6 +1749,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
struct sw_rx_bd *rx_buf;
u16 len;
int rc = -ENODEV;
+ u8 *data;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
@@ -1748,8 +1759,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
- bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
- LOOPBACK_XMAC : LOOPBACK_BMAC;
+ if (CHIP_IS_E3(bp)) {
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (bp->port.supported[cfg_idx] &
+ (SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ bp->link_params.loopback_mode = LOOPBACK_XMAC;
+ else
+ bp->link_params.loopback_mode = LOOPBACK_UMAC;
+ } else
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
@@ -1784,6 +1805,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ netdev_tx_sent_queue(txq, skb->len);
+
pkt_prod = txdata->tx_pkt_prod++;
tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1865,10 +1888,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp_rx->rx_buf_size, DMA_FROM_DEVICE);
- skb = rx_buf->skb;
- skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+ data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
for (i = ETH_HLEN; i < pkt_size; i++)
- if (*(skb->data + i) != (unsigned char) (i & 0xff))
+ if (*(data + i) != (unsigned char) (i & 0xff))
goto test_loopback_rx_exit;
rc = 0;
@@ -2285,18 +2307,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
}
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
- struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+ 0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
- size_t copy_size =
- min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
@@ -2309,33 +2333,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
* align the returned table to the Client ID of the leading RSS
* queue.
*/
- for (i = 0; i < copy_size; i++)
- indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
- indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+ indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
- const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
- u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
- /* validate the size */
- if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
- return -EINVAL;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
- /* validate the indices */
- if (indir->ring_index[i] >= num_eth_queues)
- return -EINVAL;
/*
* The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
@@ -2345,7 +2355,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ ind_table[i] = indir[i] + bp->fp->cl_id;
}
return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2378,6 +2388,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index fc754cb6cc0f..3e30c8642c26 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
u32 extended_dev_info_shared_addr;
u32 ncsi_oem_data_addr;
- u32 ocsd_host_addr;
- u32 ocbb_host_addr;
- u32 ocsd_req_update_interval;
+ u32 ocsd_host_addr; /* initialized by option ROM */
+ u32 ocbb_host_addr; /* initialized by option ROM */
+ u32 ocsd_req_update_interval; /* initialized by option ROM */
+ u32 temperature_in_half_celsius;
+ u32 glob_struct_in_host;
+
+ u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+
+ u32 extended_dev_info_shared_cfg_size;
+
+ u32 dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ u32 multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ u32 drv_info_host_addr_lo;
+ u32 drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT 0
+#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
};
@@ -2501,14 +2536,18 @@ struct mac_stx {
#define MAC_STX_IDX_MAX 2
struct host_port_stats {
- u32 host_port_stats_start;
+ u32 host_port_stats_counter;
struct mac_stx mac_stx[MAC_STX_IDX_MAX];
u32 brb_drop_hi;
u32 brb_drop_lo;
- u32 host_port_stats_end;
+ u32 not_used; /* obsolete */
+ u32 pfc_frames_tx_hi;
+ u32 pfc_frames_tx_lo;
+ u32 pfc_frames_rx_hi;
+ u32 pfc_frames_rx_lo;
};
@@ -2548,6 +2587,118 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 0
#define BCM_5710_FW_REVISION_VERSION 29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
/*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
*/
+struct fcoe_rx_stat_params_section0 {
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
struct cfc_del_event_data {
u32 cid;
u32 reserved0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9e..2091e5dbbcdd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,7 +27,6 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
-
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -163,6 +162,11 @@
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
/* BRB thresholds for E2*/
#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
@@ -177,6 +181,12 @@
#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
+
/* BRB thresholds for E3A0 */
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
@@ -190,6 +200,11 @@
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
/* BRB thresholds for E3B0 2 port mode*/
#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
@@ -239,18 +254,29 @@
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
-
/* only for E3B0*/
#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
-#define PFC_E3B0_4P_LB_GUART 120
+#define PFC_E3B0_4P_LB_GUART 120
#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
+#define DEFAULT_E3B0_LB_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
+
+/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
u32 min_w_val = 0;
/* Calculate min_w_val.*/
if (vars->link_up) {
- if (SPEED_20000 == vars->line_speed)
+ if (vars->line_speed == SPEED_20000)
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
else
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
credit_upper_bound);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
* port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
* In 2 port mode port0 has COS0-5 that can be used for WFQ.
* In 4 port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_weight = PBF_REG_COS0_WEIGHT_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
******************************************************************************/
static int bnx2x_ets_e3b0_get_total_bw(
const struct link_params *params,
- const struct bnx2x_ets_params *ets_params,
+ struct bnx2x_ets_params *ets_params,
u16 *total_bw)
{
struct bnx2x *bp = params->bp;
u8 cos_idx = 0;
+ u8 is_bw_cos_exist = 0;
*total_bw = 0 ;
+
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
- if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+ if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+ is_bw_cos_exist = 1;
+ if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+ "was set to 0\n");
+ /*
+ * This is to prevent a state when ramrods
+ * can't be sent
+ */
+ ets_params->cos[cos_idx].params.bw_params.bw
+ = 1;
+ }
*total_bw +=
ets_params->cos[cos_idx].params.bw_params.bw;
}
}
/* Check total BW is valid */
- if ((100 != *total_bw) || (0 == *total_bw)) {
- if (0 == *total_bw) {
+ if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+ if (*total_bw == 0) {
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+ "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
return -EINVAL;
}
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW should be 100\n");
- /**
- * We can handle a case whre the BW isn't 100 this can happen
- * if the TC are joined.
- */
+ "bnx2x_ets_E3B0_config total BW should be 100\n");
+ /*
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
}
return 0;
}
@@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
- if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter There can't be two COS's with "
"the same strict pri\n");
@@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri > max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
- "parameter Illegal strict priority\n");
+ "parameter Illegal strict priority\n");
return -EINVAL;
}
@@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
/* Set all the strict priority first */
for (i = 0; i < max_num_of_cos; i++) {
- if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
- if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+ if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid cos entry\n");
@@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
sp_pri_to_cos[i], pri_set);
pri_bitmask = 1 << sp_pri_to_cos[i];
/* COS is used remove it from bitmap.*/
- if (0 == (pri_bitmask & cos_bit_to_set)) {
+ if (!(pri_bitmask & cos_bit_to_set)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
******************************************************************************/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params)
+ struct bnx2x_ets_params *ets_params)
{
struct bnx2x *bp = params->bp;
int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
/* Prepare BW parameters*/
bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
&total_bw);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config get_total_bw failed\n");
return -EINVAL;
}
- /**
- * Upper bound is set according to current link speed (min_w_val
- * should be the same for upper bound and COS credit val).
+ /*
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
*/
bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
cos_bw_bitmap |= (1 << cos_entry);
- /**
+ /*
* The function also sets the BW in HW(not the mappin
* yet)
*/
@@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
"bnx2x_ets_e3b0_config cos state not valid\n");
return -EINVAL;
}
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_config set cos bw failed\n");
return bnx2x_status;
@@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
sp_pri_to_cos);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
return bnx2x_status;
@@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
cos_sp_bitmap,
cos_bw_bitmap);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
return bnx2x_status;
}
@@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- if ((0 == total_bw) ||
- (0 == cos0_bw) ||
- (0 == cos1_bw)) {
+ if ((!total_bw) ||
+ (!cos0_bw) ||
+ (!cos1_bw)) {
DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
* dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
*/
- val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ val = (!strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
return 0;
@@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/******************************************************************/
/* PFC section */
/******************************************************************/
-
static void bnx2x_update_pfc_xmac(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
@@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
if (!vars->link_up)
return;
- if (MAC_TYPE_EMAC == vars->mac_type) {
+ if (vars->mac_type == MAC_TYPE_EMAC) {
DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
@@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
udelay(40);
}
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
@@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params,
}
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
- u32 port4mode_ovwr_val;
- /* Check 4-port override enabled */
- port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
- if (port4mode_ovwr_val & (1<<0)) {
- /* Return 4-port mode override value */
- return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
- }
- /* Return 4-port mode from input pin */
- return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
/* Define the XMAC mode */
static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
{
struct bnx2x *bp = params->bp;
u32 is_port4mode = bnx2x_is_4_port_mode(bp);
- /**
- * In 4-port mode, need to set the mode only once, so if XMAC is
- * already out of reset, it means the mode has already been set,
- * and it must not* reset the XMAC again, since it controls both
- * ports of the path
- **/
+ /*
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ */
if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
return 0;
}
+
static int bnx2x_emac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
@@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-
/* PFC BRB internal port configuration params */
struct bnx2x_pfc_brb_threshold_val {
u32 pause_xoff;
@@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val {
};
struct bnx2x_pfc_brb_e3b0_val {
+ u32 per_class_guaranty_mode;
+ u32 lb_guarantied_hyst;
u32 full_lb_xoff_th;
u32 full_lb_xon_threshold;
u32 lb_guarantied;
@@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val {
struct bnx2x_pfc_brb_th_val {
struct bnx2x_pfc_brb_threshold_val pauseable_th;
struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val default_class0;
+ struct bnx2x_pfc_brb_threshold_val default_class1;
+
};
static int bnx2x_pfc_brb_get_config_params(
struct link_params *params,
@@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params(
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+ config_val->default_class1.pause_xoff = 0;
+ config_val->default_class1.pause_xon = 0;
+ config_val->default_class1.full_xoff = 0;
+ config_val->default_class1.full_xon = 0;
+
if (CHIP_IS_E2(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+ /* pause able*/
config_val->pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3A0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+ /* pause able */
config_val->pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3B0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
if (params->phy[INT_PHY].flags &
FLAGS_4_PORT_MODE) {
config_val->pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- }
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
} else
return -EINVAL;
return 0;
}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
- struct bnx2x_pfc_brb_e3b0_val
- *e3b0_val,
- u32 cos0_pauseable,
- u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+ const u8 pfc_enabled)
{
- if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ if (pfc_enabled && pfc_params) {
+ e3b0_val->per_class_guaranty_mode = 1;
+ e3b0_val->lb_guarantied_hyst = 80;
+
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (pfc_params->cos0_pauseable !=
+ pfc_params->cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (pfc_params->cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+ } else {
+ e3b0_val->per_class_guaranty_mode = 0;
+ e3b0_val->lb_guarantied_hyst = 0;
e3b0_val->full_lb_xoff_th =
- PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
e3b0_val->lb_guarantied =
- PFC_E3B0_4P_LB_GUART;
+ DEFAULT_E3B0_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
- } else {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
- if (cos0_pauseable != cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
- } else if (cos0_pauseable) {
- /* Lossless +Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
- } else {
- /* Lossy +Lossy*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_NON_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
- }
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
}
}
static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
struct bnx2x *bp = params->bp;
struct bnx2x_pfc_brb_th_val config_val = { {0} };
struct bnx2x_pfc_brb_threshold_val *reg_th_config =
- &config_val.pauseable_th;
+ &config_val.pauseable_th;
struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
- int set_pfc = params->feature_config_flags &
+ const int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
+ const u8 pfc_enabled = (set_pfc && pfc_params);
int bnx2x_status = 0;
u8 port = params->port;
/* default - pause configuration */
reg_th_config = &config_val.pauseable_th;
bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
- if (set_pfc && pfc_params)
+ if (pfc_enabled) {
/* First COS */
- if (!pfc_params->cos0_pauseable)
+ if (pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class0;
/*
* The number of free blocks below which the pause signal to class 0
* of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
- if (set_pfc && pfc_params) {
+ if (pfc_enabled) {
/* Second COS */
if (pfc_params->cos1_pauseable)
reg_th_config = &config_val.pauseable_th;
else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class1;
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
+
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+ if (CHIP_IS_E3B0(bp)) {
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params,
+ pfc_enabled);
+
+ REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+ e3b0_val.per_class_guaranty_mode);
+
/*
- * The number of free blocks below which the pause signal to
- * class 1 of MAC #n is asserted. n=0,1
- **/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
- reg_th_config->pause_xoff);
+ * The hysteresis on the guarantied buffer space for the Lb
+ * port before signaling XON.
+ */
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+ e3b0_val.lb_guarantied_hyst);
+
/*
- * The number of free blocks above which the pause signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
- reg_th_config->pause_xon);
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
/*
- * The number of free blocks below which the full signal to
- * class 1 of MAC #n is asserted. n=0,1
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
- reg_th_config->full_xoff);
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
/*
- * The number of free blocks above which the full signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of blocks guarantied for the MAC #n port. n=0,1
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
- BRB1_REG_FULL_1_XON_THRESHOLD_0,
- reg_th_config->full_xon);
+ /* The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
- if (CHIP_IS_E3B0(bp)) {
- /*Should be done by init tool */
- /*
- * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
- * reset value
- * 944
- */
-
- /**
- * The hysteresis on the guarantied buffer space for the Lb port
- * before signaling XON.
- **/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
- bnx2x_pfc_brb_get_e3b0_config_params(
- params,
- &e3b0_val,
- pfc_params->cos0_pauseable,
- pfc_params->cos1_pauseable);
- /**
- * The number of free blocks below which the full signal to the
- * LB port is asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
- e3b0_val.full_lb_xoff_th);
- /**
- * The number of free blocks above which the full signal to the
- * LB port is de-asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
- e3b0_val.full_lb_xon_threshold);
- /**
- * The number of blocks guarantied for the MAC #n port. n=0,1
- */
-
- /*The number of blocks guarantied for the LB port.*/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED,
- e3b0_val.lb_guarantied);
-
- /**
- * The number of blocks guarantied for the MAC #n port.
- */
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
- 2 * e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
- 2 * e3b0_val.mac_1_class_t_guarantied);
- /**
- * The number of blocks guarantied for class #t in MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class in
- * MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
-
- /**
- * The number of blocks guarantied for class #t in MAC1.t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class #t
- * in MAC1. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
-
- }
+ /*
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ /*
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
}
return bnx2x_status;
@@ -2398,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
{
u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
- u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
u32 pkt_priority_to_cos = 0;
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -2412,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
*/
- xcm_mask = REG_RD(bp,
- port ? NIG_REG_LLH1_XCM_MASK :
- NIG_REG_LLH0_XCM_MASK);
+ xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
/*
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
@@ -2429,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
- xcm0_out_en = 0;
- p0_hwpfc_enable = 1;
+ xcm_out_en = 0;
+ hwpfc_enable = 1;
} else {
if (nig_params) {
llfc_out_en = nig_params->llfc_out_en;
@@ -2441,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
- xcm0_out_en = 1;
+ xcm_out_en = 1;
}
if (CHIP_IS_E3(bp))
@@ -2460,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK, xcm_mask);
- REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+ REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+ NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
/* output enable for RX_XCM # IF */
- REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+ NIG_REG_XCM0_OUT_EN, xcm_out_en);
/* HW PFC TX enable */
- REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+ REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+ NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
if (nig_params) {
u8 i = 0;
@@ -2515,7 +2621,7 @@ int bnx2x_update_pfc(struct link_params *params,
/* update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
if (!vars->link_up)
@@ -2533,7 +2639,6 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_emac_enable(params, vars, 0);
return bnx2x_status;
}
-
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
else
@@ -3053,7 +3158,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "write phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
-
} else {
/* data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3194,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
EMAC_MDIO_STATUS_10MB);
return rc;
}
-
-
/******************************************************************/
/* BSC access functions from E3 */
/******************************************************************/
@@ -3339,7 +3441,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, aer_val);
@@ -3661,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ /*
+ * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+ */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
+ if (val16 < 0xd108) {
+ DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ }
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, &val16);
@@ -3942,13 +4052,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
struct link_params *params,
- u8 fiber_mode)
+ u8 fiber_mode,
+ u8 always_autoneg)
{
struct bnx2x *bp = params->bp;
u16 val16, digctrl_kx1, digctrl_kx2;
- u8 lane;
-
- lane = bnx2x_get_warpcore_lane(phy, params);
/* Clear XFI clock comp in non-10G single lane mode. */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4064,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
- if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4075,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- val16 &= 0xcfbf;
+ val16 &= 0xcebf;
switch (phy->req_line_speed) {
case SPEED_10:
break;
@@ -4043,9 +4151,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, &val);
}
-
-
- /* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
struct link_params *params,
u16 lane)
@@ -4250,7 +4356,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
vars->phy_flags |= PHY_SGMII_FLAG;
DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
bnx2x_warpcore_clear_regs(phy, params, lane);
- bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4384,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
}
bnx2x_warpcore_set_sgmii_speed(phy,
params,
- fiber_mode);
+ fiber_mode,
+ 0);
}
break;
@@ -4291,7 +4398,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
bnx2x_warpcore_set_10G_XFI(phy, params, 0);
} else if (vars->line_speed == SPEED_1000) {
DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ bnx2x_warpcore_set_sgmii_speed(
+ phy, params, 1, 0);
}
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4536,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
/* Switch back to 4-copy registers */
bnx2x_set_aer_mmd(params, phy);
- /* Global loopback, not recommended. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
} else {
/* 10G & 20G */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4552,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
}
-void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
- u8 port = params->port;
- u32 sync_offset, media_types;
- /* Update PHY configuration */
- set_phy_vars(params, vars);
-
- vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
-
- vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
- vars->phy_flags = PHY_XGXS_FLAG;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -4563,7 +4654,23 @@ void bnx2x_link_status_update(struct link_params *params,
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_sync_link(params, vars);
/* Sync media type */
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
@@ -4602,7 +4709,6 @@ void bnx2x_link_status_update(struct link_params *params,
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
-
static void bnx2x_set_master_ln(struct link_params *params,
struct bnx2x_phy *phy)
{
@@ -4676,11 +4782,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
* Each two bits represents a lane number:
* No swap is 0123 => 0x1b no need to enable the swap
*/
- u16 ser_lane, rx_lane_swap, tx_lane_swap;
+ u16 rx_lane_swap, tx_lane_swap;
- ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5459,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5505,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
-
u8 lane;
u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
int rc = 0;
@@ -6678,7 +6778,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
return rc;
}
-
/*****************************************************************************/
/* External Phy section */
/*****************************************************************************/
@@ -8103,7 +8202,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
+ struct bnx2x *bp = params->bp;
bnx2x_warpcore_power_module(params, phy, 0);
+ /* Put Warpcore in low power mode */
+ REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+ /* Put LCPLL in low power mode */
+ REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
}
static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9147,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"8727 Power fault has been detected on port %d\n",
oc_port);
- netdev_err(bp->dev, "Error: Power fault on Port %d has"
- " been detected and the power to "
- "that SFP+ module has been removed"
- " to prevent failure of the card."
- " Please remove the SFP+ module and"
- " restart the system to clear this"
- " error.\n",
+ netdev_err(bp->dev, "Error: Power fault on Port %d has "
+ "been detected and the power to "
+ "that SFP+ module has been removed "
+ "to prevent failure of the card. "
+ "Please remove the SFP+ module and "
+ "restart the system to clear this "
+ "error.\n",
oc_port);
/* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
@@ -9169,66 +9276,72 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
/* BCM8481/BCM84823/BCM84833 PHY SECTION */
/******************************************************************/
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
- struct link_params *params)
+ struct bnx2x *bp,
+ u8 port)
{
u16 val, fw_ver1, fw_ver2, cnt;
- u8 port;
- struct bnx2x *bp = params->bp;
- port = params->port;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+ bnx2x_save_spirom_version(bp, port,
+ ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+ phy->ver_addr);
+ } else {
+ /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx "
+ "phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
- /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
- /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
- if (val & 1)
- break;
- udelay(5);
- }
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
- bnx2x_save_spirom_version(bp, port, 0,
- phy->ver_addr);
- return;
- }
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+ "version(2)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
- /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
- if (val & 1)
- break;
- udelay(5);
- }
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
- bnx2x_save_spirom_version(bp, port, 0,
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
- return;
}
- /* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
- /* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
-
- bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
- phy->ver_addr);
}
-
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, offset;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9376,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_REG_8481_LED3_BLINK,
0);
- bnx2x_cl45_read(bp, phy,
+ /* Configure the blink rate to ~15.9 Hz */
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
- val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+ MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+ else
+ offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, offset, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+ MDIO_PMA_DEVAD, offset, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -9283,14 +9404,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
u16 tmp_req_line_speed;
tmp_req_line_speed = phy->req_line_speed;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
if (phy->req_line_speed == SPEED_10000)
phy->req_line_speed = SPEED_AUTO_NEG;
-
+ } else {
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ }
/*
* This phy uses the NIG latch mechanism since link indication
* arrives through its LED4 and not via its LASI signal, so we
@@ -9338,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
an_1000_val);
/* set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
- (phy->supported &
- (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full)))) {
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
an_10_100_val |= (1<<7);
/* Enable autoneg and restart autoneg for legacy speeds */
autoneg_val |= (1<<9 | 1<<12);
@@ -9378,6 +9499,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
(1<<15 | 1<<9 | 7<<0));
+ /* The PHY needs this set even for forced link. */
+ an_10_100_val |= (1<<8) | (1<<7);
DP(NETIF_MSG_LINK, "Setting 100M force\n");
}
if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,18 +9538,23 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 10G\n");
/* Restart autoneg for 10G*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ &an_10g_val);
bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
- 0x3200);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ an_10g_val | 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
} else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, params);
-
phy->req_line_speed = tmp_req_line_speed;
return 0;
@@ -9449,74 +9577,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-
-#define PHY84833_HDSHK_WAIT 300
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ u16 fw_cmd,
+ u16 cmd_args[])
{
u32 idx;
- u32 pair_swap;
u16 val;
- u16 data;
struct bnx2x *bp = params->bp;
- /* Do pair swap */
-
- /* Check for configuration. */
- pair_swap = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
- PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
-
- if (pair_swap == 0)
- return 0;
-
- data = (u16)pair_swap;
-
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
msleep(1);
}
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ if (idx >= PHY84833_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
+ /* Prepare argument(s) and issue command */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- data);
- /* Issue pair swap command */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
msleep(1);
}
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
+ /* Gather returning data */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
- DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 pair_swap;
+ u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ int status;
+ struct bnx2x *bp = params->bp;
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ /* Only the second argument is used for this command */
+ data[1] = (u16)pair_swap;
+
+ status = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_PAIR_SWAP, data);
+ if (status == 0)
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+ return status;
+}
+
static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
u32 shmem_base_path[],
u32 chip_id)
@@ -9579,24 +9728,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 chip_id)
-{
- u8 reset_gpios;
-
- reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
- udelay(10);
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
- msleep(800);
- DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
- reset_gpios);
-
- return 0;
-}
-
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9605,8 +9736,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
u16 val;
- u16 temp;
- u32 actual_phy_selection, cms_enable, idx;
+ u32 actual_phy_selection, cms_enable;
+ u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
msleep(1);
@@ -9625,34 +9756,24 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL, 0x8000);
- /* Bring PHY out of super isolate mode */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val &= ~MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
}
bnx2x_wait_reset_complete(bp, phy, params);
/* Wait for GPHY to come out of reset */
msleep(50);
-
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
- bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
- /*
- * BCM84823 requires that XGXS links up first @ 10G for normal behavior
- */
- temp = vars->line_speed;
- vars->line_speed = SPEED_10000;
- bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
- bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
- vars->line_speed = temp;
-
- /* Set dual-media configuration according to configuration */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal
+ * behavior.
+ */
+ u16 temp;
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+ }
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9698,70 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- /* AutogrEEEn */
- if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
- /* Ensure that f/w is ready */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
- break;
- usleep_range(1000, 1000);
- }
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
- return -EINVAL;
- }
-
- /* Select EEE mode */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG3,
- 0x2);
-
- /* Set Idle and Latency */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA3_REG,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA4_REG,
- PHY84833_CONSTANT_LATENCY);
-
- /* Send EEE instruction to command register */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_SET_EEE_MODE);
-
- /* Ensure that the command has completed */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
- break;
- usleep_range(1000, 1000);
- }
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
- return -EINVAL;
- }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ bnx2x_84833_pair_swap_cfg(phy, params, vars);
- /* Reset command handler */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
+ /* Keep AutogrEEEn disabled. */
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, cmd_args);
+ if (rc != 0)
+ DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
-
if (initialize)
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
- bnx2x_save_848xx_spirom_version(phy, params);
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
/* 84833 PHY has a better feature and doesn't need to support this. */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
cms_enable = REG_RD(bp, params->shmem_base +
@@ -9779,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Bring PHY out of super isolate mode as the final step. */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val &= ~MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
return rc;
}
@@ -9916,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
- 0x400f, &val16);
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+ val16 |= MDIO_84833_SUPER_ISOLATE;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x800);
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
}
}
@@ -10144,8 +10229,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
usleep_range(1000, 1000);
- /* This works with E3 only, no need to check the chip
- before determining the port. */
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
port = params->port;
cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -10327,6 +10414,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
return 0;
}
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -11103,7 +11227,7 @@ static struct bnx2x_phy phy_54618se = {
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
/*****************************************************************/
@@ -11181,7 +11305,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
offsetof(struct shmem_region,
dev_info.port_feature_config[port].link_config)) &
PORT_FEATURE_CONNECTED_SWITCH_MASK);
- chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
if (USES_WARPCORE(bp)) {
u32 serdes_net_if;
@@ -11360,6 +11486,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
return -EINVAL;
default:
*phy = phy_null;
+ /* In case external PHY wasn't found */
+ if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ return -EINVAL;
return 0;
}
@@ -11399,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ (phy->ver_addr)) {
+ /*
+ * Remove 100Mb link supported for BCM84833 when phy fw
+ * version lower than or equal to 1.39
+ */
+ u32 raw_ver = REG_RD(bp, phy->ver_addr);
+ if (((raw_ver & 0x7F) <= 39) &&
+ (((raw_ver & 0xF80) >> 7) <= 1))
+ phy->supported &= ~(SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+ }
+
/*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
@@ -11533,7 +11676,7 @@ u32 bnx2x_phy_selection(struct link_params *params)
int bnx2x_phy_probe(struct link_params *params)
{
- u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u8 phy_index, actual_phy_idx;
u32 phy_config_swapped, sync_offset, media_types;
struct bnx2x *bp = params->bp;
struct bnx2x_phy *phy;
@@ -11544,7 +11687,6 @@ int bnx2x_phy_probe(struct link_params *params)
for (phy_index = INT_PHY; phy_index < MAX_PHYS;
phy_index++) {
- link_cfg_idx = LINK_CONFIG_IDX(phy_index);
actual_phy_idx = phy_index;
if (phy_config_swapped) {
if (phy_index == EXT_PHY1)
@@ -12210,6 +12352,77 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
return 0;
}
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[],
+ u8 phy_index,
+ u32 chip_id)
+{
+ u8 reset_gpios;
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+ return 0;
+}
+
+static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
+ /* Wait for FW completing its initialization. */
+ for (cnt = 0; cnt < 1500; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &val);
+ if (!(val & (1<<15)))
+ break;
+ msleep(1);
+ }
+ if (cnt >= 1500) {
+ DP(NETIF_MSG_LINK, "84833 reset timeout\n");
+ return -EINVAL;
+ }
+
+ /* Put the port in super isolate mode. */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val |= MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
+ return 0;
+}
+
+int bnx2x_pre_init_phy(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u32 chip_id)
+{
+ int rc = 0;
+ struct bnx2x_phy phy;
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+ if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
+ PORT_0, &phy)) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+ switch (phy.type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ rc = bnx2x_84833_pre_init_phy(bp, &phy);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
@@ -12244,7 +12457,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
- rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 2a46e633abe9..e02a68a7fb85 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Configure the COS to ETS according to BW and SP settings.*/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params);
+ struct bnx2x_ets_params *ets_params);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f6361e949f0..ffeaaa95ed96 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
- return 2 * vn + BP_PORT(bp);
-}
-
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
"rate shaping and fairness are disabled\n");
}
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
- int func;
- int vn;
-
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
- if (vn == BP_VN(bp))
- continue;
-
- func = func_by_vn(bp, vn);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-}
-
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
if (bp->state != BNX2X_STATE_OPEN)
return;
+ /* read updated dcb configuration */
+ bnx2x_dcbx_pmf_update(bp);
+
bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
if (bp->link_vars.link_up)
@@ -2643,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
- /* Statistics are not supported for CNIC Clients at the moment */
- if (IS_FCOE_FP(fp))
- return false;
-#endif
- return true;
-}
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
@@ -2695,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
* parent connection). The statistics are zeroed when the parent
* connection is initialized.
*/
- if (stat_counter_valid(bp, fp)) {
- __set_bit(BNX2X_Q_FLG_STATS, &flags);
- if (zero_stats)
- __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
- }
+
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
return flags;
}
@@ -2808,8 +2780,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* This should be a maximum number of data bytes that may be
* placed on the BD (not including paddings).
*/
- rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
- IP_HEADER_ALIGNMENT_PADDING;
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+ BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+ struct eth_stats_info *ether_stat =
+ &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+ /* leave last char as NULL */
+ memcpy(ether_stat->version, DRV_MODULE_VERSION,
+ ETH_STAT_INFO_VERSION_LEN - 1);
+
+ bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
+
+ ether_stat->mtu_size = bp->dev->mtu;
+
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+ if (bp->dev->features & NETIF_F_TSO)
+ ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+ ether_stat->feature_flags |= bp->common.boot_mode;
+
+ ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+ ether_stat->txq_size = bp->tx_ring_size;
+ ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct fcoe_stats_info *fcoe_stat =
+ &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+ memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+ fcoe_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+ /* insert FCoE stats from ramrod response */
+ if (!NO_FCOE(bp)) {
+ struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ tstorm_queue_statistics;
+
+ struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ xstorm_queue_statistics;
+
+ struct fcoe_statistics_params *fw_fcoe_stat =
+ &bp->fw_stats_data->fcoe;
+
+ ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+ ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
+ }
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct iscsi_stats_info *iscsi_stat =
+ &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+ memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+ iscsi_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
/* called due to MCP event (on pmf):
* reread new bandwidth configuration
* configure FW
@@ -2960,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+ enum drv_info_opcode op_code;
+ u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+ /* if drv_info version supported by MFW doesn't match - send NACK */
+ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+ DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+
+ switch (op_code) {
+ case ETH_STATS_OPCODE:
+ bnx2x_drv_info_ether_stat(bp);
+ break;
+ case FCOE_STATS_OPCODE:
+ bnx2x_drv_info_fcoe_stat(bp);
+ break;
+ case ISCSI_STATS_OPCODE:
+ bnx2x_drv_info_iscsi_stat(bp);
+ break;
+ default:
+ /* if op code isn't supported - send NACK */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ /* if we got drv_info attn from MFW then these fields are defined in
+ * shmem2 for sure
+ */
+ SHMEM2_WR(bp, drv_info_host_addr_lo,
+ U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+ SHMEM2_WR(bp, drv_info_host_addr_hi,
+ U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3471,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
" the driver to shutdown the card to prevent permanent"
" damage. Please contact OEM Support for assistance\n");
+
+ /*
+ * Scheudle device reset (unload)
+ * This is due to some boards consuming sufficient power when driver is
+ * up to overheat if fan fails.
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
+ if (val & DRV_STATUS_DRV_INFO_REQ)
+ bnx2x_handle_drv_info_req(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -5247,7 +5413,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
u8 cos;
unsigned long q_type = 0;
u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+ fp->rx_queue = fp_idx;
fp->cid = fp_idx;
fp->cl_id = bnx2x_fp_cl_id(fp);
fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{
int num_groups;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
- /* number of eth_queues */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
/* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + num_eth_queues */
- bp->fw_stats_num = 2 + num_queue_stats;
+ * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+ * num of queues
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
/* Request is built from stats_query_header and an array of
@@ -6870,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
* STATS_QUERY_CMD_COUNT rules. The real number or requests is
* configured in the stats_query_header.
*/
- num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
- (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+ num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
*
* stats_counter holds per-STORM counters that are incremented
* when STORM has finished with the current request.
+ *
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
*/
bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
@@ -7025,6 +7198,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
unsigned long ramrod_flags = 0;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+ DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+ return 0;
+ }
+#endif
+
DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8702,17 @@ sp_rtnl_not_reset:
if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+ /*
+ * in case of fan failure we need to reset id if the "stop on error"
+ * debug flag is set, since we trying to prevent permanent overheating
+ * damage
+ */
+ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+ netif_device_detach(bp->dev);
+ bnx2x_close(bp->dev);
+ }
+
sp_rtnl_exit:
rtnl_unlock();
}
@@ -8708,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
- u32 val, val2, val3, val4, id;
+ u32 val, val2, val3, val4, id, boot_mode;
u16 pmc;
/* Get the chip revision id and number. */
@@ -8817,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+ BC_SUPPORTS_PFC_STATS : 0;
+
+ boot_mode = SHMEM_RD(bp,
+ dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+ PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+ switch (boot_mode) {
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+ break;
+ }
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9478,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
+#ifdef BCM_CNIC
int port = BP_PORT(bp);
- int func = BP_ABS_FUNC(bp);
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_iscsi_conn);
- u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
- drv_lic_key[port].max_fcoe_conn);
- /* Get the number of maximum allowed iSCSI and FCoE connections */
+ /* Get the number of maximum allowed iSCSI connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_FLAG;
+#else
+ bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed FCoE connections */
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9566,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
}
}
- BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
- bp->cnic_eth_dev.max_iscsi_conn,
- bp->cnic_eth_dev.max_fcoe_conn);
+ BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
/*
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_iscsi_conn)
- bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
-}
+#else
+ bp->flags |= NO_FCOE_FLAG;
#endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ /*
+ * iSCSI may be dynamically disabled but reading
+ * info here we will decrease memory usage by driver
+ * if the feature is disabled for good
+ */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_get_fcoe_info(bp);
+}
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
@@ -9374,7 +9614,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ /*
+ * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
*/
if (IS_MF_SI(bp)) {
@@ -9396,11 +9637,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
fip_mac);
} else
bp->flags |= NO_FCOE_FLAG;
+ } else { /* SD mode */
+ if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ }
}
#endif
} else {
@@ -9449,7 +9701,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
#endif
- if (!is_valid_ether_addr(bp->dev->dev_addr))
+ if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: "
"%pM, change it manually before bringing up "
@@ -9661,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
-#ifdef BCM_CNIC
bnx2x_get_cnic_info(bp);
-#endif
/* Get current FW pulse sequence */
if (!BP_NOMCP(bp)) {
@@ -9681,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
{
int cnt, i, block_end, rodi;
- char vpd_data[BNX2X_VPD_LEN+1];
+ char vpd_start[BNX2X_VPD_LEN+1];
char str_id_reg[VENDOR_ID_LEN+1];
char str_id_cap[VENDOR_ID_LEN+1];
+ char *vpd_data;
+ char *vpd_extended_data = NULL;
u8 len;
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
if (cnt < BNX2X_VPD_LEN)
goto out_not_found;
- i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ /* VPD RO tag should be first tag after identifier string, hence
+ * we should be able to find it in first BNX2X_VPD_LEN chars
+ */
+ i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
-
block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_data[i]);
+ pci_vpd_lrdt_size(&vpd_start[i]);
i += PCI_VPD_LRDT_TAG_SIZE;
- if (block_end > BNX2X_VPD_LEN)
- goto out_not_found;
+ if (block_end > BNX2X_VPD_LEN) {
+ vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+ if (vpd_extended_data == NULL)
+ goto out_not_found;
+
+ /* read rest of vpd image into vpd_extended_data */
+ memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+ cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+ block_end - BNX2X_VPD_LEN,
+ vpd_extended_data + BNX2X_VPD_LEN);
+ if (cnt < (block_end - BNX2X_VPD_LEN))
+ goto out_not_found;
+ vpd_data = vpd_extended_data;
+ } else
+ vpd_data = vpd_start;
+
+ /* now vpd_data holds full vpd content in both cases */
rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
bp->fw_ver[len] = ' ';
}
}
+ kfree(vpd_extended_data);
return;
}
out_not_found:
+ kfree(vpd_extended_data);
return;
}
@@ -9840,15 +10111,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->multi_mode = multi_mode;
+ bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+ bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
/* Set TPA flags */
- if (disable_tpa) {
+ if (bp->disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
bp->dev->features &= ~NETIF_F_LRO;
} else {
bp->flags |= TPA_ENABLE_FLAG;
bp->dev->features |= NETIF_F_LRO;
}
- bp->disable_tpa = disable_tpa;
if (CHIP_IS_E1(bp))
bp->dropless_fc = 0;
@@ -9965,7 +10241,7 @@ static int bnx2x_open(struct net_device *dev)
}
/* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10119,6 +10395,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
}
bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+ /* handle ISCSI SD mode */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10479,15 @@ static void poll_bnx2x(struct net_device *dev)
}
#endif
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+ return -EADDRNOTAVAIL;
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -10205,7 +10495,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_select_queue = bnx2x_select_queue,
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = bnx2x_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
@@ -10823,8 +11113,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x and E3*/
- if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ /* disable FCOE L2 queue for E1x */
+ if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
#endif
@@ -11486,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
smp_mb__after_atomic_inc();
break;
}
+ case DRV_CTL_ULP_REGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
+ case DRV_CTL_ULP_UNREGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
default:
BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11561,7 +11883,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_mutex);
cp->drv_state = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
kfree(bp->cnic_kwq);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0b..dddbcf6e154e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -160,8 +160,11 @@
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
/* [RW 10] Write client 0: Assert pause threshold. */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268
+/* [R 24] The number of full blocks occpied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [RW 1] Reset the design by software. */
#define BRB1_REG_SOFT_RESET 0x600dc
@@ -1619,6 +1622,14 @@
register bits. */
#define MISC_REG_LCPLL_CTRL_1 0xa2a4
#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c
/* [RW 4] Interrupt mask register #0 read/write */
#define MISC_REG_MISC_INT_MASK 0xa388
/* [RW 1] Parity mask register #0 read/write */
@@ -1754,6 +1765,7 @@
* is compared to the value on ctrl_md_devad. Drives output
* misc_xgxs0_phy_addr. Global register. */
#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+#define MISC_REG_WC0_RESET 0xac30
/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
side. This should be less than or equal to phy_port_mode; if some of the
ports are not used. This enables reduction of frequency on the core side.
@@ -2164,6 +2176,7 @@
* set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
* accommodate the 9 input clients to ETS arbiter. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
#define NIG_REG_P1_MAC_IN_EN 0x185c0
/* [RW 1] Output enable for TX MAC interface */
#define NIG_REG_P1_MAC_OUT_EN 0x185c4
@@ -6823,11 +6836,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
@@ -6838,26 +6853,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
/* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
/* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED 0x0001
-#define PHY84833_CMD_IN_PROGRESS 0x0002
-#define PHY84833_CMD_COMPLETE_PASS 0x0004
-#define PHY84833_CMD_COMPLETE_ERROR 0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
-#define PHY84833_CMD_SYSTEM_BOOT 0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
-
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
@@ -6990,6 +7014,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 14517691f8db..5ac616093f9f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,6 +30,8 @@
#define BNX2X_MAX_EMUL_MULTI 16
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/**** Exe Queue interfaces ****/
/**
@@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ u8 *next = buf;
+ int counter = 0;
+
+ /* traverse list */
+ list_for_each_entry(pos, &o->head, link) {
+ if (counter < n) {
+ /* place leading zeroes in buffer */
+ memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+ /* place mac after leading zeroes*/
+ memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+ ETH_ALEN);
+
+ /* calculate address of next element and
+ * advance counter
+ */
+ counter++;
+ next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+ counter, next, pos->u.mac.mac);
+ }
+ }
+ return counter * ETH_ALEN;
+}
+
/* check_add() callbacks */
static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
mac_obj->check_move = bnx2x_check_move;
mac_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ mac_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -3342,7 +3375,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
if (!list_empty(&o->registry.exact_match.macs))
return 0;
- elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+ elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
if (!elem) {
BNX2X_ERR("Failed to allocate registry memory\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 9a517c2e9f1b..992308ff82e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj {
/* RAMROD command to be used */
int ramrod_cmd;
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accomodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+ int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf);
+
/**
* Checks if ADD-ramrod with the given params may be performed.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 02ac6a771bf9..bc0121ac291e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
#endif
}
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+ u16 res = sizeof(struct host_port_stats) >> 2;
+
+ /* if PFC stats are not supported by the MFW, don't DMA them */
+ if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
+ res -= (sizeof(u32)*4) >> 2;
+
+ return res;
+}
+
/*
* Init service functions
*/
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
DMAE_LEN32_RD_MAX * 4);
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
DMAE_LEN32_RD_MAX * 4);
- dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ /* collect PFC stats */
+ DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+ pstats->pfc_frames_tx_hi,
+ diff.lo, new->tx_stat_gtpp_lo,
+ pstats->pfc_frames_tx_lo);
+ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+ ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+ pstats->pfc_frames_tx_lo, diff.lo);
+
+ DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+ pstats->pfc_frames_rx_hi,
+ diff.lo, new->rx_stat_grpp_lo,
+ pstats->pfc_frames_rx_lo);
+ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+ ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+ pstats->pfc_frames_rx_lo, diff.lo);
}
estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+ /* collect pfc stats */
+ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+ pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+ ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+ pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
- pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+ pstats->host_port_stats_counter++;
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
@@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
if (bp->func_stx) {
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
@@ -1349,12 +1403,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
enum bnx2x_stats_state state;
if (unlikely(bp->panic))
return;
- bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
+ bnx2x_stats_stm[state][event].action(bp);
+
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
@@ -1380,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
int i;
+ int first_queue_query_index;
struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
dma_addr_t cur_data_offset;
@@ -1512,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+ /**** FCoE FW statistics data ****/
+ if (!NO_FCOE(bp)) {
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+ cur_query_entry =
+ &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_FCOE;
+ /* For FCoE query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
+
/**** Clients' queries ****/
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ /* first queue query index depends whether FCoE offloaded request will
+ * be included in the ramrod
+ */
+ if (!NO_FCOE(bp))
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+ else
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
for_each_eth_queue(bp, i) {
cur_query_entry =
&bp->fw_stats_req->
- query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+ query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_data_offset += sizeof(struct per_queue_stats);
}
+
+ /* add FCoE queue query if needed */
+ if (!NO_FCOE(bp)) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
}
void bnx2x_stats_init(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 5d8ce2f6afef..683deb053109 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
+
+ /* PFC */
+ u32 pfc_frames_received_hi;
+ u32 pfc_frames_received_lo;
+ u32 pfc_frames_sent_hi;
+ u32 pfc_frames_sent_lo;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 6f10c6939834..dd3a0a232ea0 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
return io->data;
}
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+
+ if (reg)
+ info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+ else
+ info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+ info.data.ulp_type = ulp_type;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type)
}
read_unlock(&cnic_dev_lock);
- rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+ RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
+ cnic_ulp_ctl(dev, ulp_type, true);
+
return 0;
}
@@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+ cnic_ulp_ctl(dev, ulp_type, false);
+
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1342,7 +1361,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
if (ret == 1)
return 0;
- return -EBUSY;
+ return ret;
}
static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
@@ -1830,7 +1849,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
done:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
- return ret;
+ return 0;
}
@@ -1928,7 +1947,7 @@ destroy_reply:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
- return ret;
+ return 0;
}
static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
@@ -2494,6 +2513,57 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
return ret;
}
+static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct kcqe kcqe;
+ struct kcqe *cqes[1];
+ u32 cid;
+ u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+ u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
+ int ulp_type;
+
+ cid = kwqe->kwqe_info0;
+ memset(&kcqe, 0, sizeof(kcqe));
+
+ if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
+ ulp_type = CNIC_ULP_ISCSI;
+ if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
+ cid = kwqe->kwqe_info1;
+
+ kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
+ kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
+ kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ kcqe.kcqe_info2 = cid;
+ cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
+
+ } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
+ struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
+ u32 kcqe_op;
+
+ ulp_type = CNIC_ULP_L4;
+ if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
+ else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+ else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ else
+ return;
+
+ kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
+ KCQE_FLAGS_LAYER_MASK_L4;
+ l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ l4kcqe->cid = cid;
+ cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
+ } else {
+ return;
+ }
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
+}
+
static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
@@ -2551,9 +2621,17 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
opcode);
break;
}
- if (ret < 0)
+ if (ret < 0) {
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
+
+ /* Possibly bnx2x parity error, send completion
+ * to ulp drivers with error code to speed up
+ * cleanup and reset recovery.
+ */
+ if (ret == -EIO || ret == -EAGAIN)
+ cnic_bnx2x_kwqe_err(dev, kwqe);
+ }
i += work;
}
return 0;
@@ -3052,9 +3130,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
}
}
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+ int rc;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ if (ulp_ops && ulp_ops->cnic_get_stats)
+ rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+ else
+ rc = -ENODEV;
+ mutex_unlock(&cnic_lock);
+ return rc;
+}
+
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
{
struct cnic_dev *dev = data;
+ int ulp_type = CNIC_ULP_ISCSI;
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3195,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
}
break;
}
+ case CNIC_CTL_FCOE_STATS_GET_CMD:
+ ulp_type = CNIC_ULP_FCOE;
+ /* fall through */
+ case CNIC_CTL_ISCSI_STATS_GET_CMD:
+ cnic_hold(dev);
+ cnic_copy_ulp_stats(dev, ulp_type);
+ cnic_put(dev);
+ break;
+
default:
return -EINVAL;
}
@@ -3475,7 +3579,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+ fl6.daddr = dst_addr->sin6_addr;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
@@ -3804,6 +3908,9 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+ if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
+ set_bit(SK_F_HW_ERR, &csk->flags);
+
cp->close_conn(csk, opcode);
break;
@@ -3931,7 +4038,9 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
if (cnic_ready_to_close(csk, opcode)) {
- if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ if (test_bit(SK_F_HW_ERR, &csk->flags))
+ close_complete = 1;
+ else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
else
close_complete = 1;
@@ -4824,6 +4933,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
int func = CNIC_FUNC(cp), ret;
u32 pfid;
+ dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
@@ -5134,7 +5244,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
- rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
cnic_cm_shutdown(dev);
cp->stop_hw(dev);
@@ -5288,6 +5398,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 239de898f071..86936f6b6dbc 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -85,6 +85,7 @@
/* KCQ (kernel completion queue) completion status */
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
+#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4)
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 79443e0dbf96..1517763d4e55 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.7"
-#define CNIC_MODULE_RELDATE "July 20, 2011"
+#define CNIC_MODULE_VERSION "2.5.8"
+#define CNIC_MODULE_RELDATE "Jan 3, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -86,6 +86,8 @@ struct kcqe {
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define CNIC_CTL_STOP_ISCSI_CMD 4
+#define CNIC_CTL_FCOE_STATS_GET_CMD 5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD 6
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
@@ -96,6 +98,8 @@ struct kcqe {
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
+#define DRV_CTL_ULP_REGISTER_CMD 0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f
struct cnic_ctl_completion {
u32 cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
+ int ulp_type;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
+ union drv_info_to_mcp *addr_drv_info_to_mcp;
};
struct cnic_sockaddr {
@@ -255,6 +261,7 @@ struct cnic_sock {
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
+#define SK_F_HW_ERR 8
atomic_t ref_count;
u32 state;
@@ -297,6 +304,8 @@ struct cnic_dev {
int max_fcoe_conn;
int max_rdma_conn;
+ union drv_info_to_mcp *stats_addr;
+
void *cnic_priv;
};
@@ -326,6 +335,7 @@ struct cnic_ulp_ops {
void (*cm_remote_abort)(struct cnic_sock *);
int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
+ int (*cnic_get_stats)(void *ulp_ctx);
struct module *owner;
atomic_t ref_count;
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 0a1d7f279fc8..084904ceaa30 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -163,7 +163,6 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
-#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
@@ -266,7 +265,7 @@ struct sbmac_softc {
int sbm_pause; /* current pause setting */
int sbm_link; /* current link state */
- unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
+ unsigned char sbm_hwaddr[ETH_ALEN];
struct sbmacdma sbm_txdma; /* only channel 0 for now */
struct sbmacdma sbm_rxdma;
@@ -2260,7 +2259,8 @@ static int sbmac_init(struct platform_device *pldev, long long base)
}
sc->mii_bus->name = sbmac_mdio_string;
- snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+ snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pldev->name, idx);
sc->mii_bus->priv = sc;
sc->mii_bus->read = sbmac_mii_read;
sc->mii_bus->write = sbmac_mii_write;
@@ -2676,15 +2676,4 @@ static struct platform_driver sbmac_driver = {
},
};
-static int __init sbmac_init_module(void)
-{
- return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
- platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf4074167d6a..d529af99157d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 121
+#define TG3_MIN_NUM 122
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "November 2, 2011"
+#define DRV_MODULE_RELDATE "December 7, 2011"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#if (NET_IP_ALIGN != 0)
#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
#else
-#define TG3_RX_OFFSET(tp) 0
+#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
#endif
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX 4096
+#define TG3_TX_BD_DMA_MAX_2K 2048
+#define TG3_TX_BD_DMA_MAX_4K 4096
#define TG3_RAW_IP_ALIGN 2
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
}
}
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
- u16 miireg;
-
- if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
- miireg = ADVERTISE_PAUSE_CAP;
- else if (flow_ctrl & FLOW_CTRL_TX)
- miireg = ADVERTISE_PAUSE_ASYM;
- else if (flow_ctrl & FLOW_CTRL_RX)
- miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- else
- miireg = 0;
-
- return miireg;
-}
-
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
u16 miireg;
@@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
- if (lcladv & ADVERTISE_1000XPAUSE) {
- if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- else if (rmtadv & LPA_1000XPAUSE_ASYM)
- cap = FLOW_CTRL_RX;
- } else {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- }
- } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+ if (lcladv & ADVERTISE_1000XPAUSE)
+ cap = FLOW_CTRL_RX;
+ if (rmtadv & ADVERTISE_1000XPAUSE)
cap = FLOW_CTRL_TX;
}
@@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->duplex == DUPLEX_HALF)
mac_mode |= MAC_MODE_HALF_DUPLEX;
else {
- lcl_adv = tg3_advert_flowctrl_1000T(
+ lcl_adv = mii_advertise_flowctrl(
tp->link_config.flowctrl);
if (phydev->pause)
@@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
if (tp->link_config.active_speed == SPEED_1000 &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+ tg3_flag(tp, 57765_CLASS)) &&
!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
- if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
u32 val, new_adv;
new_adv = ADVERTISE_CSMA;
- if (advertise & ADVERTISED_10baseT_Half)
- new_adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- new_adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- new_adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- new_adv |= ADVERTISE_100FULL;
-
- new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+ new_adv |= mii_advertise_flowctrl(flowctrl);
err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
if (err)
goto done;
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- goto done;
-
- new_adv = 0;
- if (advertise & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000HALF;
- if (advertise & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000FULL;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
- new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
- err = tg3_writephy(tp, MII_CTRL1000, new_adv);
- if (err)
- goto done;
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+ if (err)
+ goto done;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
goto done;
@@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
+ case ASIC_REV_57766:
case ASIC_REV_5719:
/* If we advertised any eee advertisements above... */
if (val)
@@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
return err;
}
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
{
- u32 adv_reg, all_mask = 0;
+ u32 advmsk, tgtadv, advertising;
- if (mask & ADVERTISED_10baseT_Half)
- all_mask |= ADVERTISE_10HALF;
- if (mask & ADVERTISED_10baseT_Full)
- all_mask |= ADVERTISE_10FULL;
- if (mask & ADVERTISED_100baseT_Half)
- all_mask |= ADVERTISE_100HALF;
- if (mask & ADVERTISED_100baseT_Full)
- all_mask |= ADVERTISE_100FULL;
+ advertising = tp->link_config.advertising;
+ tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
- if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
- return 0;
+ advmsk = ADVERTISE_ALL;
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+ advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ }
- if ((adv_reg & ADVERTISE_ALL) != all_mask)
- return 0;
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return false;
+
+ if ((*lcladv & advmsk) != tgtadv)
+ return false;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
u32 tg3_ctrl;
- all_mask = 0;
- if (mask & ADVERTISED_1000baseT_Half)
- all_mask |= ADVERTISE_1000HALF;
- if (mask & ADVERTISED_1000baseT_Full)
- all_mask |= ADVERTISE_1000FULL;
+ tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
- return 0;
+ return false;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
- if (tg3_ctrl != all_mask)
- return 0;
+ if (tg3_ctrl != tgtadv)
+ return false;
}
- return 1;
+ return true;
}
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
{
- u32 curadv, reqadv;
-
- if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
- return 1;
+ u32 lpeth = 0;
- curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 val;
- if (tp->link_config.active_duplex == DUPLEX_FULL) {
- if (curadv != reqadv)
- return 0;
+ if (tg3_readphy(tp, MII_STAT1000, &val))
+ return false;
- if (tg3_flag(tp, PAUSE_AUTONEG))
- tg3_readphy(tp, MII_LPA, rmtadv);
- } else {
- /* Reprogram the advertisement register, even if it
- * does not affect the current link. If the link
- * gets renegotiated in the future, we can save an
- * additional renegotiation cycle by advertising
- * it correctly in the first place.
- */
- if (curadv != reqadv) {
- *lcladv &= ~(ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
- tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
- }
+ lpeth = mii_stat1000_to_ethtool_lpa_t(val);
}
- return 1;
+ if (tg3_readphy(tp, MII_LPA, rmtadv))
+ return false;
+
+ lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+ tp->link_config.rmt_adv = lpeth;
+
+ return true;
}
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+ tp->link_config.rmt_adv = 0;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
if ((bmcr & BMCR_ANENABLE) &&
- tg3_copper_is_advertising_all(tp,
- tp->link_config.advertising)) {
- if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
- &rmt_adv))
- current_link_up = 1;
- }
+ tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+ tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+ current_link_up = 1;
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
if (current_link_up == 1 &&
- tp->link_config.active_duplex == DUPLEX_FULL)
+ tp->link_config.active_duplex == DUPLEX_FULL) {
+ u32 reg, bit;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ reg = MII_TG3_FET_GEN_STAT;
+ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+ } else {
+ reg = MII_TG3_EXT_STAT;
+ bit = MII_TG3_EXT_STAT_MDIX;
+ }
+
+ if (!tg3_readphy(tp, reg, &val) && (val & bit))
+ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
}
relink:
@@ -4643,6 +4606,9 @@ restart_autoneg:
if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
if (rxflags & MR_LP_ADV_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
@@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
udelay(40);
current_link_up = 0;
+ tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
/* do nothing, just check for link up at the end */
} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- u32 adv, new_adv;
+ u32 adv, newadv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
- new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
- ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM |
- ADVERTISE_SLCT);
-
- new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000XHALF;
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000XFULL;
+ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
- if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
- tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, newadv);
bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
tg3_writephy(tp, MII_BMCR, bmcr);
@@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
+
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
} else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
@@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
u32 sw_idx = tnapi->tx_cons;
struct netdev_queue *txq;
int index = tnapi - tp->napi;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
if (tg3_flag(tp, ENABLE_TSS))
index--;
@@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
}
+ pkts_compl++;
+ bytes_compl += skb->len;
+
dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
+ netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
{
- if (!ri->skb)
+ if (!ri->data)
return;
pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(ri->skb);
- ri->skb = NULL;
+ kfree(ri->data);
+ ri->data = NULL;
}
/* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
u32 opaque_key, u32 dest_idx_unmasked)
{
struct tg3_rx_buffer_desc *desc;
struct ring_info *map;
- struct sk_buff *skb;
+ u8 *data;
dma_addr_t mapping;
- int skb_size, dest_idx;
+ int skb_size, data_size, dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
desc = &tpr->rx_std[dest_idx];
map = &tpr->rx_std_buffers[dest_idx];
- skb_size = tp->rx_pkt_map_sz;
+ data_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
desc = &tpr->rx_jmb[dest_idx].std;
map = &tpr->rx_jmb_buffers[dest_idx];
- skb_size = TG3_RX_JMB_MAP_SZ;
+ data_size = TG3_RX_JMB_MAP_SZ;
break;
default:
@@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
- if (skb == NULL)
+ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc(skb_size, GFP_ATOMIC);
+ if (!data)
return -ENOMEM;
- skb_reserve(skb, TG3_RX_OFFSET(tp));
-
- mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ mapping = pci_map_single(tp->pdev,
+ data + TG3_RX_OFFSET(tp),
+ data_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- map->skb = skb;
+ map->data = data;
dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
- return skb_size;
+ return data_size;
}
/* We only need to move over in the address because the other
* members of the RX descriptor are invariant. See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
*/
static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
return;
}
- dest_map->skb = src_map->skb;
+ dest_map->data = src_map->data;
dma_unmap_addr_set(dest_map, mapping,
dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
*/
smp_wmb();
- src_map->skb = NULL;
+ src_map->data = NULL;
}
/* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ u8 *data;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &jmb_prod_idx;
} else
goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ prefetch(data + TG3_RX_OFFSET(tp));
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
- skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
*post_ptr);
if (skb_size < 0)
goto drop_it;
@@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- /* Ensure that the update to the skb happens
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+ /* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
smp_wmb();
- ri->skb = NULL;
+ ri->data = NULL;
- skb_put(skb, len);
} else {
- struct sk_buff *copy_skb;
-
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len +
- TG3_RAW_IP_ALIGN);
- if (copy_skb == NULL)
+ skb = netdev_alloc_skb(tp->dev,
+ len + TG3_RAW_IP_ALIGN);
+ if (skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
- skb_put(copy_skb, len);
+ skb_reserve(skb, TG3_RAW_IP_ALIGN);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(skb, copy_skb->data, len);
+ memcpy(skb->data,
+ data + TG3_RX_OFFSET(tp),
+ len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
- /* We'll reuse the original ring buffer. */
- skb = copy_skb;
}
+ skb_put(skb, len);
if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_std_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_std_buffers[i].skb) {
+ if (dpr->rx_std_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_jmb_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_jmb_buffers[i].skb) {
+ if (dpr->rx_jmb_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
bool hwbug = false;
if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- hwbug = 1;
+ hwbug = true;
if (tg3_4g_overflow_test(map, len))
- hwbug = 1;
+ hwbug = true;
if (tg3_40bit_overflow_test(tp, map, len))
- hwbug = 1;
+ hwbug = true;
- if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ if (tp->dma_limit) {
u32 prvidx = *entry;
u32 tmp_flag = flags & ~TXD_FLAG_END;
- while (len > TG3_TX_BD_DMA_MAX && *budget) {
- u32 frag_len = TG3_TX_BD_DMA_MAX;
- len -= TG3_TX_BD_DMA_MAX;
+ while (len > tp->dma_limit && *budget) {
+ u32 frag_len = tp->dma_limit;
+ len -= tp->dma_limit;
/* Avoid the 8byte DMA problem */
if (len <= 8) {
- len += TG3_TX_BD_DMA_MAX / 2;
- frag_len = TG3_TX_BD_DMA_MAX / 2;
+ len += tp->dma_limit / 2;
+ frag_len = tp->dma_limit / 2;
}
tnapi->tx_buffers[*entry].fragmented = true;
@@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
*budget -= 1;
*entry = NEXT_TX(*entry);
} else {
- hwbug = 1;
+ hwbug = true;
tnapi->tx_buffers[prvidx].fragmented = false;
}
}
@@ -6816,6 +6798,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ netdev_sent_queue(tp->dev, skb->len);
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6951,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
return 0;
}
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6994,7 +6977,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
}
}
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7004,9 +6988,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
tg3_set_loopback(dev, features);
@@ -7082,14 +7066,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
i = (i + 1) & tp->rx_std_ring_mask)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE)) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
i = (i + 1) & tp->rx_jmb_ring_mask) {
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7098,12 +7082,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
for (i = 0; i <= tp->rx_std_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7159,7 +7143,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX standard ring. Only "
"%d out of %d buffers were allocated "
@@ -7191,7 +7175,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
}
for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX jumbo ring. Only %d "
"out of %d buffers were allocated "
@@ -7297,6 +7281,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
}
+ netdev_reset_queue(tp->dev);
}
/* Initialize tx/rx rings for packet processing.
@@ -7591,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
if (tnapi->hw_status)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
}
- if (tp->hw_stats)
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return err;
}
@@ -7626,15 +7609,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
- if (tg3_flag(tp, PCI_EXPRESS))
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
- else {
- pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
- tp->pci_cacheline_sz);
- pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
- tp->pci_lat_timer);
- }
+ if (!tg3_flag(tp, PCI_EXPRESS)) {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
}
/* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7798,6 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
/* Clear error status */
pci_write_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp)
return 0;
}
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+ struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+ struct tg3_ethtool_stats *);
+
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, int silent)
{
@@ -7931,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
+ if (tp->hw_stats) {
+ /* Save the stats across chip resets... */
+ tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+ tg3_get_estats(tp, &tp->estats_prev);
+
+ /* And make sure the next sample is new data */
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+ }
+
if (err)
return err;
@@ -8074,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp)
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
return;
- if (!tg3_flag(tp, 5705_PLUS))
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
- else
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
@@ -8231,6 +8220,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
}
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] =
+ ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return;
+
+ if (tp->irq_cnt <= 2) {
+ memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+ return;
+ }
+
+ /* Validate table against current IRQ count */
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+ if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+ break;
+ }
+
+ if (i != TG3_RSS_INDIR_TBL_SIZE)
+ tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+ int i = 0;
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ u32 val = tp->rss_ind_tbl[i];
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= tp->rss_ind_tbl[i];
+ }
+ tw32(reg, val);
+ reg += 4;
+ }
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
@@ -8337,7 +8374,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tg3_flag(tp, 57765_CLASS)) {
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
@@ -8425,7 +8462,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ if (!tg3_flag(tp, 57765_CLASS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8609,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -8581,10 +8618,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- val = TG3_RX_STD_MAX_SIZE_5700;
- else
- val = TG3_RX_STD_MAX_SIZE_5717;
+ val = TG3_RX_STD_RING_SIZE(tp);
val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
val |= (TG3_RX_STD_DMA_SZ << 2);
} else
@@ -8661,6 +8695,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
@@ -8809,9 +8846,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+ if (tg3_flag(tp, USING_MSIX)) {
val = tr32(MSGINT_MODE);
- val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ val |= MSGINT_MODE_ENABLE;
+ if (tp->irq_cnt > 1)
+ val |= MSGINT_MODE_MULTIVEC_EN;
if (!tg3_flag(tp, 1SHOT_MSI))
val |= MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
@@ -8924,28 +8963,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
- int i = 0;
- u32 reg = MAC_RSS_INDIR_TBL_0;
-
- if (tp->irq_cnt == 2) {
- for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
- tw32(reg, 0x0);
- reg += 4;
- }
- } else {
- u32 val;
-
- while (i < TG3_RSS_INDIR_TBL_SIZE) {
- val = i % (tp->irq_cnt - 1);
- i++;
- for (; i % 8; i++) {
- val <<= 4;
- val |= (i % (tp->irq_cnt - 1));
- }
- tw32(reg, val);
- reg += 4;
- }
- }
+ tg3_rss_write_indir_tbl(tp);
/* Setup the "secret" hash key. */
tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9020,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (tg3_flag(tp, 57765_CLASS))
val = 1;
else
val = 2;
@@ -9217,7 +9235,7 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9532,19 +9550,18 @@ static int tg3_request_firmware(struct tg3 *tp)
static bool tg3_enable_msix(struct tg3 *tp)
{
- int i, rc, cpus = num_online_cpus();
+ int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- if (cpus == 1)
- /* Just fallback to the simpler MSI mode. */
- return false;
-
- /*
- * We want as many rx rings enabled as there are cpus.
- * The first MSIX vector only deals with link interrupts, etc,
- * so we add one to the number of vectors we are requesting.
- */
- tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+ tp->irq_cnt = num_online_cpus();
+ if (tp->irq_cnt > 1) {
+ /* We want as many rx rings enabled as there are cpus.
+ * In multiqueue MSI-X mode, the first MSI-X vector
+ * only deals with link interrupts, etc, so we add
+ * one to the number of vectors we are requesting.
+ */
+ tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+ }
for (i = 0; i < tp->irq_max; i++) {
msix_ent[i].entry = i;
@@ -9669,6 +9686,8 @@ static int tg3_open(struct net_device *dev)
*/
tg3_ints_init(tp);
+ tg3_rss_check_indir_tbl(tp);
+
/* The placement of this call is tied
* to the setup and use of Host TX descriptors.
*/
@@ -9700,8 +9719,8 @@ static int tg3_open(struct net_device *dev)
tg3_free_rings(tp);
} else {
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ !tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
tp->timer_offset = HZ / 10;
@@ -9782,10 +9801,6 @@ err_out1:
return err;
}
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
- struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
static int tg3_close(struct net_device *dev)
{
int i;
@@ -9817,10 +9832,9 @@ static int tg3_close(struct net_device *dev)
tg3_ints_fini(tp);
- tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
- memcpy(&tp->estats_prev, tg3_get_estats(tp),
- sizeof(tp->estats_prev));
+ /* Clear stats across close / open calls */
+ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_napi_fini(tp);
@@ -9868,9 +9882,9 @@ static u64 calc_crc_errors(struct tg3 *tp)
estats->member = old_estats->member + \
get_stat64(&hw_stats->member)
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+ struct tg3_ethtool_stats *estats)
{
- struct tg3_ethtool_stats *estats = &tp->estats;
struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
@@ -10318,12 +10332,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
- if (netif_running(dev)) {
+ if (netif_running(dev) && netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
+ cmd->lp_advertising = tp->link_config.rmt_adv;
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
} else {
ethtool_cmd_speed_set(cmd, SPEED_INVALID);
cmd->duplex = DUPLEX_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10450,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->fw_version, tp->fw_ver);
- strcpy(info->bus_info, pci_name(tp->pdev));
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10612,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
- if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX)
epause->rx_pause = 1;
else
epause->rx_pause = 0;
- if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX)
epause->tx_pause = 1;
else
epause->tx_pause = 0;
@@ -10715,6 +10737,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (netif_running(tp->dev))
+ info->data = tp->irq_cnt;
+ else {
+ info->data = num_online_cpus();
+ if (info->data > TG3_IRQ_MAX_VECS_RSS)
+ info->data = TG3_IRQ_MAX_VECS_RSS;
+ }
+
+ /* The first interrupt vector only
+ * handles link interrupts.
+ */
+ info->data -= 1;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+ u32 size = 0;
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ size = TG3_RSS_INDIR_TBL_SIZE;
+
+ return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ indir[i] = tp->rss_ind_tbl[i];
+
+ return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ size_t i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] = indir[i];
+
+ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+ return 0;
+
+ /* It is legal to write the indirection
+ * table while the device is running.
+ */
+ tg3_full_lock(tp, 0);
+ tg3_rss_write_indir_tbl(tp);
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
@@ -10769,7 +10863,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
- memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
}
static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11447,7 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
@@ -11400,8 +11495,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
u32 budget;
- struct sk_buff *skb, *rx_skb;
- u8 *tx_data;
+ struct sk_buff *skb;
+ u8 *tx_data, *rx_data;
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11664,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
}
if (opaque_key == RXD_OPAQUE_RING_STD) {
- rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ rx_data = tpr->rx_std_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
mapping);
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ rx_data = tpr->rx_jmb_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
mapping);
} else
@@ -11582,15 +11677,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
PCI_DMA_FROMDEVICE);
+ rx_data += TG3_RX_OFFSET(tp);
for (i = data_off; i < rx_len; i++, val++) {
- if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ if (*(rx_data + i) != (u8) (val & 0xff))
goto out;
}
}
err = 0;
- /* tg3_free_rings will unmap and free the rx_skb */
+ /* tg3_free_rings will unmap and free the rx_data */
out:
return err;
}
@@ -11943,6 +12039,10 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
+ .get_rxnfc = tg3_get_rxnfc,
+ .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
+ .get_rxfh_indir = tg3_get_rxfh_indir,
+ .set_rxfh_indir = tg3_set_rxfh_indir,
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12612,7 +12712,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13318,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
{
- u32 adv = ADVERTISED_Autoneg |
- ADVERTISED_Pause;
+ u32 adv = ADVERTISED_Autoneg;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13421,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!tg3_flag(tp, ENABLE_APE) &&
!tg3_flag(tp, ENABLE_ASF)) {
- u32 bmsr, mask;
+ u32 bmsr, dummy;
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13434,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
tg3_phy_set_wirespeed(tp);
- mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
- if (!tg3_copper_is_advertising_all(tp, mask)) {
+ if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
tp->link_config.flowctrl);
@@ -13460,6 +13556,17 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+ strcpy(tp->board_part_number, "BCM57762");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+ strcpy(tp->board_part_number, "BCM57766");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+ strcpy(tp->board_part_number, "BCM57782");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+ strcpy(tp->board_part_number, "BCM57786");
+ else
+ goto nomatch;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
@@ -13798,7 +13905,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN15_PRODID_ASICREV,
&prod_id_asic_rev);
@@ -13945,7 +14056,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5717_PLUS);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- tg3_flag(tp, 5717_PLUS))
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tg3_flag_set(tp, 57765_CLASS);
+
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14111,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tp->fw_needed) {
+ /* For firmware TSO, assume ASF is disabled.
+ * We'll disable TSO later if we discover ASF
+ * is enabled in tg3_get_eeprom_hw_cfg().
+ */
tg3_flag_set(tp, TSO_CAPABLE);
- else {
+ } else {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -14027,6 +14145,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 57765_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
+ tg3_rss_init_dflt_indir_tbl(tp);
}
}
@@ -14034,9 +14153,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
- tg3_flag_set(tp, 4K_FIFO_LIMIT);
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
- if (tg3_flag(tp, 5717_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14179,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
- tp->pcie_readrq = 2048;
-
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ int readrq = pcie_get_readrq(tp->pdev);
+ if (readrq > 2048)
+ pcie_set_readrq(tp->pdev, 2048);
+ }
pci_read_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14395,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
tg3_get_eeprom_hw_cfg(tp);
+ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
@@ -14311,7 +14439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14676,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tg3_flag_clear(tp, POLL_SERDES);
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
- tp->rx_offset = 0;
+ tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -15313,7 +15441,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
- u32 features = 0;
+ netdev_features_t features = 0;
printk_once(KERN_INFO "%s\n", version);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 94b4bd049a33..aea8f72c24fa 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -31,6 +31,8 @@
#define TG3_RX_RET_MAX_SIZE_5705 512
#define TG3_RX_RET_MAX_SIZE_5717 4096
+#define TG3_RSS_INDIR_TBL_SIZE 128
+
/* First 256 bytes are a mirror of PCI config space. */
#define TG3PCI_VENDOR 0x00000000
#define TG3PCI_VENDOR_BROADCOM 0x14e4
@@ -57,6 +59,10 @@
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
#define TG3PCI_DEVICE_TIGON3_5719 0x1657
#define TG3PCI_DEVICE_TIGON3_5720 0x165f
+#define TG3PCI_DEVICE_TIGON3_57762 0x1682
+#define TG3PCI_DEVICE_TIGON3_57766 0x1686
+#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
+#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -168,6 +174,7 @@
#define ASIC_REV_57765 0x57785
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
+#define ASIC_REV_57766 0x57766
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -1340,6 +1347,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_JMB_2K_MMRR 0x00800000
#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
@@ -2174,6 +2182,7 @@
#define MII_TG3_EXT_CTRL_TBI 0x8000
#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
+#define MII_TG3_EXT_STAT_MDIX 0x2000
#define MII_TG3_EXT_STAT_LPASS 0x0100
#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */
@@ -2277,6 +2286,9 @@
#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+#define MII_TG3_FET_GEN_STAT 0x1c
+#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2662,9 +2674,13 @@ struct tg3_hw_stats {
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
*/
struct ring_info {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -2690,6 +2706,7 @@ struct tg3_link_config {
#define DUPLEX_INVALID 0xff
#define AUTONEG_INVALID 0xff
u16 active_speed;
+ u32 rmt_adv;
/* When we go in and out of low power mode we need
* to swap with this state.
@@ -2865,6 +2882,8 @@ enum TG3_FLAGS {
TG3_FLAG_NVRAM_BUFFERED,
TG3_FLAG_SUPPORT_MSI,
TG3_FLAG_SUPPORT_MSIX,
+ TG3_FLAG_USING_MSI,
+ TG3_FLAG_USING_MSIX,
TG3_FLAG_PCIX_MODE,
TG3_FLAG_PCI_HIGH_SPEED,
TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@ enum TG3_FLAGS {
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
TG3_FLAG_TSO_BUG,
- TG3_FLAG_IS_5788,
TG3_FLAG_MAX_RXPEND_64,
TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@ enum TG3_FLAGS {
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
TG3_FLAG_HW_TSO_1,
- TG3_FLAG_5705_PLUS,
- TG3_FLAG_5750_PLUS,
+ TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
- TG3_FLAG_USING_MSI,
- TG3_FLAG_USING_MSIX,
TG3_FLAG_ICH_WORKAROUND,
- TG3_FLAG_5780_CLASS,
- TG3_FLAG_HW_TSO_2,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@ enum TG3_FLAGS {
TG3_FLAG_RGMII_EXT_IBND_RX_EN,
TG3_FLAG_RGMII_EXT_IBND_TX_EN,
TG3_FLAG_CLKREQ_BUG,
- TG3_FLAG_5755_PLUS,
TG3_FLAG_NO_NVRAM,
TG3_FLAG_ENABLE_RSS,
TG3_FLAG_ENABLE_TSS,
TG3_FLAG_SHORT_DMA_BUG,
TG3_FLAG_USE_JUMBO_BDFLAG,
TG3_FLAG_L1PLLPD_EN,
- TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
- TG3_FLAG_5717_PLUS,
TG3_FLAG_4K_FIFO_LIMIT,
TG3_FLAG_RESET_TASK_PENDING,
+ TG3_FLAG_5705_PLUS,
+ TG3_FLAG_IS_5788,
+ TG3_FLAG_5750_PLUS,
+ TG3_FLAG_5780_CLASS,
+ TG3_FLAG_5755_PLUS,
+ TG3_FLAG_57765_PLUS,
+ TG3_FLAG_57765_CLASS,
+ TG3_FLAG_5717_PLUS,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@ struct tg3 {
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
+ u32 dma_limit;
/* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@ struct tg3 {
unsigned long rx_dropped;
unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
- struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@ struct tg3 {
#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
#define TG3_PHYFLG_EEE_CAP 0x00040000
+#define TG3_PHYFLG_MDIX_STATE 0x00200000
u32 led_ctrl;
u32 phy_otp;
u32 setlpicnt;
+ u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
#define TG3_BPN_SIZE 24
char board_part_number[TG3_BPN_SIZE];
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 74d3abca1960..6027302ae73a 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BNA) += bna.o
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 8e627186507c..29f284f79e02 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -185,6 +185,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
+ * bfa_cee_get_attr()
+ *
+ * @brief Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+ if (!bfa_nw_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+
+ if (cee->get_attr_pending == true)
+ return BFA_STATUS_DEVBUSY;
+
+ cee->get_attr_pending = true;
+ cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/**
* bfa_cee_isrs()
*
* @brief Handles Mail-box interrupts for CEE module.
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 58d54e98d595..93fde633d6f3 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void);
void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
u64 dma_pa);
void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+ struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 2f12d68021d5..871c6309334c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -219,41 +219,39 @@ enum {
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block {
- u8 version; /*!< manufacturing block version */
- u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
- u16 mfgsize; /*!< mfg block size */
- u16 u16_chksum; /*!< old u16 checksum */
- char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
- char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
- u8 mfg_day; /*!< manufacturing day */
- u8 mfg_month; /*!< manufacturing month */
- u16 mfg_year; /*!< manufacturing year */
- u64 mfg_wwn; /*!< wwn base for this adapter */
- u8 num_wwn; /*!< number of wwns assigned */
- u8 mfg_speeds; /*!< speeds allowed for this adapter */
- u8 rsv[2];
- char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
- char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
- char
- supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
- char
- supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /*!< mac address */
- u8 num_mac; /*!< number of mac addresses */
- u8 rsv2;
- u32 card_type; /*!< card type */
- char cap_nic; /*!< capability nic */
- char cap_cna; /*!< capability cna */
- char cap_hba; /*!< capability hba */
- char cap_fc16g; /*!< capability fc 16g */
- char cap_sriov; /*!< capability sriov */
- char cap_mezz; /*!< capability mezz */
- u8 rsv3;
- u8 mfg_nports; /*!< number of ports */
- char media[8]; /*!< xfi/xaui */
- char initial_mode[8];/*!< initial mode: hba/cna/nic */
- u8 rsv4[84];
- u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+ u8 version; /* manufacturing block version */
+ u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
+ u16 mfgsize; /* mfg block size */
+ u16 u16_chksum; /* old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /* manufacturing day */
+ u8 mfg_month; /* manufacturing month */
+ u16 mfg_year; /* manufacturing year */
+ u64 mfg_wwn; /* wwn base for this adapter */
+ u8 num_wwn; /* number of wwns assigned */
+ u8 mfg_speeds; /* speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /* base mac address */
+ u8 num_mac; /* number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /* card type */
+ char cap_nic; /* capability nic */
+ char cap_cna; /* capability cna */
+ char cap_hba; /* capability hba */
+ char cap_fc16g; /* capability fc 16g */
+ char cap_sriov; /* capability sriov */
+ char cap_mezz; /* capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /* number of ports */
+ char media[8]; /* xfi/xaui */
+ char initial_mode[8]; /* initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
@@ -293,4 +291,34 @@ enum bfa_mode {
BFA_MODE_NIC = 3
};
+/*
+ * Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
+#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE 0x400000
+#define BFA_FLASH_PART_MFG 7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+ u32 part_type; /* partition type */
+ u32 part_instance; /* partition instance */
+ u32 part_off; /* partition offset */
+ u32 part_size; /* partition size */
+ u32 part_len; /* partition content length */
+ u32 part_status; /* partition status */
+ char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+ u32 status; /* flash overall status */
+ u32 npart; /* num of partitions */
+ struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b0307a00a109..abfad275b5f3 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
{
+ bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] tbuf app memory to store data from smem
+ * @param[in] soff smem offset
+ * @param[in] sz size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+ u32 pgnum, loff, r32;
+ int i, len;
+ u32 *buf = tbuf;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+ loff = PSS_SMEM_PGOFF(soff);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ return 1;
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ len = sz/sizeof(u32);
+ for (i = 0; i < len; i++) {
+ r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ buf[i] = be32_to_cpu(r32);
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * release semaphore
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+ return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+ int tlen, status = 0;
+
+ tlen = *trclen;
+ if (tlen > BNA_DBG_FWTRC_LEN)
+ tlen = BNA_DBG_FWTRC_LEN;
+
+ status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+ *trclen = tlen;
+ return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_once) {
+ ioc->dbg_fwsave_once = 0;
+ if (ioc->dbg_fwsave_len) {
+ tlen = ioc->dbg_fwsave_len;
+ bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+ }
+ }
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len == 0)
+ return BFA_STATUS_ENOFSAVE;
+
+ tlen = *trclen;
+ if (tlen > ioc->dbg_fwsave_len)
+ tlen = ioc->dbg_fwsave_len;
+
+ memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ *trclen = tlen;
+ return BFA_STATUS_OK;
+}
+
static void
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
{
@@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+ bfa_nw_ioc_debug_save_ftrc(ioc);
}
/**
@@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+ ioc->dbg_fwsave = dbg_fwsave;
+ ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
static u32
bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
{
@@ -2172,6 +2293,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
}
/**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
@@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
msecs_to_jiffies(BFA_IOC_POLL_TOV));
}
}
+
+/*
+ * Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ 2048
+#define BFA_FLASH_DMA_BUF_SZ \
+ roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+ flash->op_busy = 0;
+ if (flash->cbfn)
+ flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+ struct bfa_flash *flash = cbarg;
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (flash->op_busy) {
+ flash->status = BFA_STATUS_IOC_FAILURE;
+ flash->cbfn(flash->cbarg, flash->status);
+ flash->op_busy = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+ struct bfi_flash_write_req *msg =
+ (struct bfi_flash_write_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == flash->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ flash->residue -= len;
+ flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+ struct bfa_flash *flash = cbarg;
+ struct bfi_flash_read_req *msg =
+ (struct bfi_flash_read_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+ struct bfa_flash *flash = flasharg;
+ u32 status;
+
+ union {
+ struct bfi_flash_query_rsp *query;
+ struct bfi_flash_write_rsp *write;
+ struct bfi_flash_read_rsp *read;
+ struct bfi_mbmsg *msg;
+ } m;
+
+ m.msg = msg;
+
+ /* receiving response after ioc failure */
+ if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+ return;
+
+ switch (msg->mh.msg_id) {
+ case BFI_FLASH_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ if (status == BFA_STATUS_OK) {
+ u32 i;
+ struct bfa_flash_attr *attr, *f;
+
+ attr = (struct bfa_flash_attr *) flash->ubuf;
+ f = (struct bfa_flash_attr *) flash->dbuf_kva;
+ attr->status = be32_to_cpu(f->status);
+ attr->npart = be32_to_cpu(f->npart);
+ for (i = 0; i < attr->npart; i++) {
+ attr->part[i].part_type =
+ be32_to_cpu(f->part[i].part_type);
+ attr->part[i].part_instance =
+ be32_to_cpu(f->part[i].part_instance);
+ attr->part[i].part_off =
+ be32_to_cpu(f->part[i].part_off);
+ attr->part[i].part_size =
+ be32_to_cpu(f->part[i].part_size);
+ attr->part[i].part_len =
+ be32_to_cpu(f->part[i].part_len);
+ attr->part[i].part_status =
+ be32_to_cpu(f->part[i].part_status);
+ }
+ }
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ if (status != BFA_STATUS_OK || flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_write_send(flash);
+ break;
+ case BFI_FLASH_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ if (status != BFA_STATUS_OK) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ memcpy(flash->ubuf + flash->offset,
+ flash->dbuf_kva, len);
+ flash->residue -= len;
+ flash->offset += len;
+ if (flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_read_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_BOOT_VER_RSP:
+ case BFI_FLASH_I2H_EVENT:
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+ return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+ flash->ioc = ioc;
+ flash->cbfn = NULL;
+ flash->cbarg = NULL;
+ flash->op_busy = 0;
+
+ bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+ bfa_q_qe_init(&flash->ioc_notify);
+ bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+ list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+ flash->dbuf_kva = dm_kva;
+ flash->dbuf_pa = dm_pa;
+ memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+ dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ struct bfi_flash_query_req *msg =
+ (struct bfi_flash_query_req *) flash->mb.msg;
+
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->ubuf = (u8 *) attr;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (type == BFA_FLASH_PART_MFG)
+ return BFA_STATUS_EINVAL;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_write_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_read_send(flash);
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index ca158d1eaef3..3b4460fdc148 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -27,6 +27,8 @@
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_POLL_TOV 200 /* msecs */
+#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+ BFI_IOC_TRC_HDR_SZ)
/**
* PCI device information required by IOC
@@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
}
+#define bfa_alen_set(__alen, __len, __pa) \
+ __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+ alen->al_len = cpu_to_be32(len);
+ bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
struct bfa_ioc_regs {
void __iomem *hfn_mbox_cmd;
void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
@@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
+/*
+ * Flash module specific
+ */
+typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+ struct bfa_ioc *ioc; /* back pointer to ioc */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 op_busy; /* operation busy flag */
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ enum bfa_status status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ bfa_cb_flash cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ u32 addr_off; /* partition address offset */
+ struct bfa_mbox_cmd mb; /* mailbox */
+ struct bfa_ioc_notify ioc_notify; /* ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+ struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+u32 bfa_nw_flash_meminfo(void);
+void bfa_nw_flash_attach(struct bfa_flash *flash,
+ struct bfa_ioc *ioc, void *dev);
+void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 7a1393aabd43..0d9df695397a 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -83,6 +83,14 @@ union bfi_addr_u {
} a32;
};
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+ union bfi_addr_u al_addr; /* DMA addr of buffer */
+ u32 al_len; /* length of buffer */
+};
+
/*
* Large Message structure - 128 Bytes size Msgs
*/
@@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply {
*/
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
+#define BFI_IOC_TRC_ENT_SZ 16
+#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
#define BFI_IOC_MD5SUM_SZ 4
@@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req {
u16 len;
};
+/*
+ * FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+ BFI_FLASH_H2I_QUERY_REQ = 1,
+ BFI_FLASH_H2I_ERASE_REQ = 2,
+ BFI_FLASH_H2I_WRITE_REQ = 3,
+ BFI_FLASH_H2I_READ_REQ = 4,
+ BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+ BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+ BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+ BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+ BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
#pragma pack()
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 26f5c5abfd1f..9ccc586e3767 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+ bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
/**
* Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
kva += bfa_nw_cee_meminfo();
dma += bfa_nw_cee_meminfo();
+ bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+ bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+ kva += bfa_nw_flash_meminfo();
+ dma += bfa_nw_flash_meminfo();
+
bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
bfa_msgq_memclaim(&bna->msgq, kva, dma);
bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info)
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
(bfa_nw_cee_meminfo() +
- bfa_msgq_meminfo()), PAGE_SIZE);
+ bfa_nw_flash_meminfo() +
+ bfa_msgq_meminfo()), PAGE_SIZE);
/* DMA memory for retrieving IOC attributes */
res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info)
/* Virtual memory for retreiving fw_trc */
res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
/* DMA memory for retreiving stats */
res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d090fbfb12fa..e8d3ab7ea6cb 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -427,7 +427,7 @@ struct bna_ethport {
/* Doorbell structure */
struct bna_ib_dbell {
- void *__iomem doorbell_addr;
+ void __iomem *doorbell_addr;
u32 doorbell_ack;
};
@@ -463,7 +463,7 @@ struct bna_tcb {
u32 consumer_index;
volatile u32 *hw_consumer_index;
u32 q_depth;
- void *__iomem q_dbell;
+ void __iomem *q_dbell;
struct bna_ib_dbell *i_dbell;
int page_idx;
int page_count;
@@ -599,7 +599,7 @@ struct bna_rcb {
u32 producer_index;
u32 consumer_index;
u32 q_depth;
- void *__iomem q_dbell;
+ void __iomem *q_dbell;
int page_idx;
int page_count;
/* Control path */
@@ -966,6 +966,7 @@ struct bna {
struct bna_ioceth ioceth;
struct bfa_cee cee;
+ struct bfa_flash flash;
struct bfa_msgq msgq;
struct bna_ethport ethport;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7f3091e7eb42..be7d91e4b785 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1;
module_param(bnad_ioc_auto_recover, uint, 0444);
MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+ " Range[false:0|true:1]");
+
/*
* Global variables
*/
u32 bnad_rxqs_per_cq = 2;
-
+static u32 bna_id;
+static struct mutex bnad_list_mutex;
+static LIST_HEAD(bnad_list);
static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
@@ -75,6 +82,23 @@ do { \
#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_add_tail(&bnad->list_entry, &bnad_list);
+ bnad->id = bna_id++;
+ mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_del(&bnad->list_entry);
+ mutex_unlock(&bnad_list_mutex);
+}
+
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -723,7 +747,7 @@ void
bnad_cb_ethport_link_status(struct bnad *bnad,
enum bna_link_status link_status)
{
- bool link_up = 0;
+ bool link_up = false;
link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
@@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad)
complete(&bnad->bnad_completions.mtu_comp);
}
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+ struct bnad_iocmd_comp *iocmd_comp =
+ (struct bnad_iocmd_comp *)arg;
+
+ iocmd_comp->comp_status = (u32) status;
+ complete(&iocmd_comp->comp);
+}
+
/* Resource allocation, free functions */
static void
@@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
-static void
+static int
bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
-static void
+static int
bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad)
{
spin_lock_init(&bnad->bna_lock);
mutex_init(&bnad->conf_mutex);
+ mutex_init(&bnad_list_mutex);
}
static void
bnad_lock_uninit(struct bnad *bnad)
{
mutex_destroy(&bnad->conf_mutex);
+ mutex_destroy(&bnad_list_mutex);
}
/* PCI Initialization */
@@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad,
goto disable_device;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = 1;
+ *using_dac = true;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
@@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad,
if (err)
goto release_regions;
}
- *using_dac = 0;
+ *using_dac = false;
}
pci_set_master(pdev);
return 0;
@@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev,
return err;
}
bnad = netdev_priv(netdev);
-
bnad_lock_init(bnad);
+ bnad_add_to_list(bnad);
mutex_lock(&bnad->conf_mutex);
/*
@@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Set link to down state */
netif_carrier_off(netdev);
+ /* Setup the debugfs node for this bfad */
+ if (bna_debugfs_enable)
+ bnad_debugfs_init(bnad);
+
/* Get resource requirement form bna */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@ disable_ioceth:
res_free:
bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
drv_uninit:
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
pci_uninit:
bnad_pci_uninit(pdev);
unlock_mutex:
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
free_netdev(netdev);
return err;
@@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 5487ca42d018..55824d92699f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -124,6 +124,12 @@ enum bnad_link_state {
BNAD_LS_UP = 1
};
+struct bnad_iocmd_comp {
+ struct bnad *bnad;
+ struct completion comp;
+ int comp_status;
+};
+
struct bnad_completion {
struct completion ioc_comp;
struct completion ucast_comp;
@@ -251,6 +257,8 @@ struct bnad_unmap_q {
struct bnad {
struct net_device *netdev;
+ u32 id;
+ struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,12 +328,26 @@ struct bnad {
char adapter_name[BNAD_NAME_LEN];
char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
+
+ /* debugfs specific data */
+ char *regdata;
+ u32 reglen;
+ struct dentry *bnad_dentry_files[5];
+ struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+ struct bfa_ioc_attr ioc_attr;
+ struct bfa_cee_attr cee_attr;
+ struct bfa_flash_attr flash_attr;
+ u32 cee_status;
+ u32 flash_status;
};
/*
* EXTERN VARIABLES
*/
-extern struct firmware *bfi_fw;
+extern const struct firmware *bfi_fw;
extern u32 bnad_rxqs_per_cq;
/*
@@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
extern int bnad_enable_default_bcast(struct bnad *bnad);
extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad,
extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
+/* Debugfs */
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
+
/**
* MACROS
*/
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644
index 000000000000..592ad3929f53
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ * mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ * - bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ * fwtrc: To collect current firmware trace.
+ * fwsave: To collect last saved fw trace as a result of firmware crash.
+ * regwr: To write one word to chip register
+ * regrd: To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bnad %s: Failed to collect fwtrc\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to collect fwsave\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *reg_debug;
+
+ reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!reg_debug)
+ return -ENOMEM;
+
+ reg_debug->i_private = inode->i_private;
+
+ file->private_data = reg_debug;
+
+ return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+ struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+ struct bnad_iocmd_comp fcomp;
+ unsigned long flags = 0;
+ int ret = BFA_STATUS_FAILED;
+
+ /* Get IOC info */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Retrieve CEE related info */
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->cee_status = fcomp.comp_status;
+
+ /* Retrieve flash partition info */
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->flash_status = fcomp.comp_status;
+out:
+ return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *drv_info;
+ int rc;
+
+ drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!drv_info)
+ return -ENOMEM;
+
+ drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+ drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+ if (!drv_info->debug_buffer) {
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to allocate drv info buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ mutex_lock(&bnad->conf_mutex);
+ rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+ drv_info->buffer_len);
+ mutex_unlock(&bnad->conf_mutex);
+ if (rc != BFA_STATUS_OK) {
+ kfree(drv_info->debug_buffer);
+ drv_info->debug_buffer = NULL;
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to collect drvinfo\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = drv_info;
+
+ return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t pos = file->f_pos;
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return -EINVAL;
+
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ break;
+ case 1:
+ file->f_pos += offset;
+ break;
+ case 2:
+ file->f_pos = debug->buffer_len - offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+ file->f_pos = pos;
+ return -EINVAL;
+ }
+
+ return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug || !debug->debug_buffer)
+ return 0;
+
+ return simple_read_from_buffer(buf, nbytes, pos,
+ debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ (0x40000)
+#define BFA_REG_CB_ADDRSZ (0x20000)
+#define BFA_REG_ADDRSZ(__ioc) \
+ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
+ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+ u8 area;
+
+ /* check [16:15] */
+ area = (offset >> 15) & 0x7;
+ if (area == 0) {
+ /* PCIe core register */
+ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else if (area == 0x1) {
+ /* CB 32 KB memory page */
+ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else {
+ /* CB register space 64KB */
+ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+ return BFA_STATUS_EINVAL;
+ }
+ return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ ssize_t rc;
+
+ if (!bnad->regdata)
+ return 0;
+
+ rc = simple_read_from_buffer(buf, nbytes, pos,
+ bnad->regdata, bnad->reglen);
+
+ if ((*pos + nbytes) >= bnad->reglen) {
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ }
+
+ return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, len, rc, i;
+ u32 *regbuf;
+ void __iomem *rb, *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+
+ kfree(kern_buf);
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+
+ bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+ if (!bnad->regdata) {
+ pr_warn("bna %s: Failed to allocate regrd buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ bnad->reglen = len << 2;
+ rb = bfa_ioc_bar0(ioc);
+ addr &= BFA_REG_ADDRMSK(ioc);
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, len);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ return -EINVAL;
+ }
+
+ reg_addr = rb + addr;
+ regbuf = (u32 *)bnad->regdata;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < len; i++) {
+ *regbuf = readl(reg_addr);
+ regbuf++;
+ reg_addr += sizeof(u32);
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, val, rc;
+ void __iomem *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ kfree(kern_buf);
+
+ addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, 1);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ return -EINVAL;
+ }
+
+ reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ writel(val, reg_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ kfree(debug->debug_buffer);
+
+ file->private_data = NULL;
+ kfree(debug);
+ debug = NULL;
+ return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwtrc,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwsave,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read_regrd,
+ .write = bnad_debugfs_write_regrd,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .write = bnad_debugfs_write_regwr,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_drvinfo,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+ { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+ { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+ { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+ { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+ { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+ const struct bnad_debugfs_entry *file;
+ char name[64];
+ int i;
+
+ /* Setup the BNA debugfs root directory*/
+ if (!bna_debugfs_root) {
+ bna_debugfs_root = debugfs_create_dir("bna", NULL);
+ atomic_set(&bna_debugfs_port_count, 0);
+ if (!bna_debugfs_root) {
+ pr_warn("BNA: debugfs root dir creation failed\n");
+ return;
+ }
+ }
+
+ /* Setup the pci_dev debugfs directory for the port */
+ snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+ if (!bnad->port_debugfs_root) {
+ bnad->port_debugfs_root =
+ debugfs_create_dir(name, bna_debugfs_root);
+ if (!bnad->port_debugfs_root) {
+ pr_warn("bna pci_dev %s: root dir creation failed\n",
+ pci_name(bnad->pcidev));
+ return;
+ }
+
+ atomic_inc(&bna_debugfs_port_count);
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ file = &bnad_debugfs_files[i];
+ bnad->bnad_dentry_files[i] =
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
+ if (!bnad->bnad_dentry_files[i]) {
+ pr_warn(
+ "BNA pci_dev:%s: create %s entry failed\n",
+ pci_name(bnad->pcidev), file->name);
+ return;
+ }
+ }
+ }
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ if (bnad->bnad_dentry_files[i]) {
+ debugfs_remove(bnad->bnad_dentry_files[i]);
+ bnad->bnad_dentry_files[i] = NULL;
+ }
+ }
+
+ /* Remove the pci_dev debugfs directory for the port */
+ if (bnad->port_debugfs_root) {
+ debugfs_remove(bnad->port_debugfs_root);
+ bnad->port_debugfs_root = NULL;
+ atomic_dec(&bna_debugfs_port_count);
+ }
+
+ /* Remove the BNA debugfs root directory */
+ if (atomic_read(&bna_debugfs_port_count) == 0) {
+ debugfs_remove(bna_debugfs_root);
+ bna_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index fd3dcc1e9145..9b44ec8096ba 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -38,7 +38,7 @@
sizeof(struct bnad_drv_stats) / sizeof(u64) + \
offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strcpy(drvinfo->driver, BNAD_NAME);
- strcpy(drvinfo->version, BNAD_VERSION);
+ strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
- sizeof(drvinfo->fw_version) - 1);
+ strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ sizeof(drvinfo->bus_info));
}
static void
@@ -934,7 +935,144 @@ bnad_get_sset_count(struct net_device *netdev, int sset)
}
}
-static struct ethtool_ops bnad_ethtool_ops = {
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+ u32 *base_offset)
+{
+ struct bfa_flash_attr *flash_attr;
+ struct bnad_iocmd_comp fcomp;
+ u32 i, flash_part = 0, ret;
+ unsigned long flags = 0;
+
+ flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+ if (!flash_attr)
+ return -ENOMEM;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ kfree(flash_attr);
+ goto out_err;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+
+ /* Check for the flash type & base offset value */
+ if (ret == BFA_STATUS_OK) {
+ for (i = 0; i < flash_attr->npart; i++) {
+ if (offset >= flash_attr->part[i].part_off &&
+ offset < (flash_attr->part[i].part_off +
+ flash_attr->part[i].part_size)) {
+ flash_part = flash_attr->part[i].part_type;
+ *base_offset = flash_attr->part[i].part_off;
+ break;
+ }
+ }
+ }
+ kfree(flash_attr);
+ return flash_part;
+out_err:
+ return -EINVAL;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+ return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash read request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EFAULT;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part <= 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash update request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EINVAL;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part <= 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static const struct ethtool_ops bnad_ethtool_ops = {
.get_settings = bnad_get_settings,
.set_settings = bnad_set_settings,
.get_drvinfo = bnad_get_drvinfo,
@@ -948,7 +1086,10 @@ static struct ethtool_ops bnad_ethtool_ops = {
.set_pauseparam = bnad_set_pauseparam,
.get_strings = bnad_get_strings,
.get_ethtool_stats = bnad_get_ethtool_stats,
- .get_sset_count = bnad_get_sset_count
+ .get_sset_count = bnad_get_sset_count,
+ .get_eeprom_len = bnad_get_eeprom_len,
+ .get_eeprom = bnad_get_eeprom,
+ .set_eeprom = bnad_set_eeprom,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 1b3e90dfbd9a..32e8f178ab76 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -43,8 +43,7 @@ extern char bfa_version[];
#pragma pack(1)
-#define MAC_ADDRLEN (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
#pragma pack()
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index 725b9fff337f..cfc22a64157e 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -16,6 +16,7 @@
* www.brocade.com
*/
#include <linux/firmware.h>
+#include "bnad.h"
#include "bfi.h"
#include "cna.h"
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index b48378a41e49..db931916da08 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -5,8 +5,8 @@
config HAVE_NET_MACB
bool
-config NET_ATMEL
- bool "Atmel devices"
+config NET_CADENCE
+ bool "Cadence devices"
default y
depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
---help---
@@ -21,7 +21,7 @@ config NET_ATMEL
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_ATMEL
+if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
@@ -33,14 +33,16 @@ config ARM_AT91_ETHER
ethernet support, then you should always answer Y to this.
config MACB
- tristate "Atmel MACB support"
+ tristate "Cadence MACB/GEM support"
depends on HAVE_NET_MACB
select PHYLIB
---help---
- The Atmel MACB ethernet interface is found on many AT32 and AT91
- parts. Say Y to include support for the MACB chip.
+ The Cadence MACB ethernet interface is found on many Atmel AT32 and
+ AT91 parts. This driver also supports the Cadence GEM (Gigabit
+ Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
+ is not yet supported. Say Y to include support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
will be called macb.
-endif # NET_ATMEL
+endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 56624d303487..1a5b6efa0120 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/ethtool.h>
+#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gfp.h>
@@ -255,8 +256,7 @@ static void enable_phyirq(struct net_device *dev)
unsigned int dsintr, irq_number;
int status;
- irq_number = lp->board_data.phy_irq_pin;
- if (!irq_number) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
/*
* PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
* or board does not have it connected.
@@ -265,6 +265,7 @@ static void enable_phyirq(struct net_device *dev)
return;
}
+ irq_number = lp->board_data.phy_irq_pin;
status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
if (status) {
printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -319,8 +320,7 @@ static void disable_phyirq(struct net_device *dev)
unsigned int dsintr;
unsigned int irq_number;
- irq_number = lp->board_data.phy_irq_pin;
- if (!irq_number) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
del_timer_sync(&lp->check_timer);
return;
}
@@ -365,6 +365,7 @@ static void disable_phyirq(struct net_device *dev)
disable_mdi();
spin_unlock_irq(&lp->lock);
+ irq_number = lp->board_data.phy_irq_pin;
free_irq(irq_number, dev); /* Free interrupt handler */
}
@@ -984,7 +985,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
struct platform_device *pdev, struct clk *ether_clk)
{
- struct at91_eth_data *board_data = pdev->dev.platform_data;
+ struct macb_platform_data *board_data = pdev->dev.platform_data;
struct net_device *dev;
struct at91_private *lp;
unsigned int val;
@@ -1077,7 +1078,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
netif_carrier_off(dev); /* will be enabled in open() */
/* If board has no PHY IRQ, use a timer to poll the PHY */
- if (!lp->board_data.phy_irq_pin) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
init_timer(&lp->check_timer);
lp->check_timer.data = (unsigned long)dev;
lp->check_timer.function = at91ether_check_link;
@@ -1169,7 +1170,8 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(dev);
- if (lp->board_data.phy_irq_pin >= 32)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin) &&
+ lp->board_data.phy_irq_pin >= 32)
gpio_free(lp->board_data.phy_irq_pin);
unregister_netdev(dev);
@@ -1188,11 +1190,12 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
- int phy_irq = lp->board_data.phy_irq_pin;
if (netif_running(net_dev)) {
- if (phy_irq)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+ int phy_irq = lp->board_data.phy_irq_pin;
disable_irq(phy_irq);
+ }
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
@@ -1206,7 +1209,6 @@ static int at91ether_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
- int phy_irq = lp->board_data.phy_irq_pin;
if (netif_running(net_dev)) {
clk_enable(lp->ether_clk);
@@ -1214,8 +1216,10 @@ static int at91ether_resume(struct platform_device *pdev)
netif_device_attach(net_dev);
netif_start_queue(net_dev);
- if (phy_irq)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+ int phy_irq = lp->board_data.phy_irq_pin;
enable_irq(phy_irq);
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 353f4dab62be..3725fbb0defe 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -85,7 +85,9 @@ struct recv_desc_bufs
struct at91_private
{
struct mii_if_info mii; /* ethtool support */
- struct at91_eth_data board_data; /* board-specific configuration */
+ struct macb_platform_data board_data; /* board-specific
+ * configuration (shared with
+ * macb for common data */
struct clk *ether_clk; /* clock */
/* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a437b46e5490..23200680d4c1 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1,5 +1,5 @@
/*
- * Atmel MACB Ethernet Controller driver
+ * Cadence MACB/GEM Ethernet Controller driver
*
* Copyright (C) 2004-2006 Atmel Corporation
*
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -19,11 +20,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
-
-#include <mach/board.h>
-#include <mach/cpu.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
#include "macb.h"
@@ -60,9 +62,9 @@ static void __macb_set_hwaddr(struct macb *bp)
u16 top;
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
- macb_writel(bp, SA1B, bottom);
+ macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
- macb_writel(bp, SA1T, top);
+ macb_or_gem_writel(bp, SA1T, top);
}
static void __init macb_get_hwaddr(struct macb *bp)
@@ -71,8 +73,8 @@ static void __init macb_get_hwaddr(struct macb *bp)
u16 top;
u8 addr[6];
- bottom = macb_readl(bp, SA1B);
- top = macb_readl(bp, SA1T);
+ bottom = macb_or_gem_readl(bp, SA1B);
+ top = macb_or_gem_readl(bp, SA1T);
addr[0] = bottom & 0xff;
addr[1] = (bottom >> 8) & 0xff;
@@ -84,7 +86,7 @@ static void __init macb_get_hwaddr(struct macb *bp)
if (is_valid_ether_addr(addr)) {
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
} else {
- dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
+ netdev_info(bp->dev, "invalid hw address, using random\n");
random_ether_addr(bp->dev->dev_addr);
}
}
@@ -178,11 +180,12 @@ static void macb_handle_link_change(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full":"Half");
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
@@ -191,25 +194,21 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev;
- struct eth_platform_data *pdata;
int ret;
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
- printk (KERN_ERR "%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
- pdata = bp->pdev->dev.platform_data;
/* TODO : add pin_irq */
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
- pdata && pdata->is_rmii ?
- PHY_INTERFACE_MODE_RMII :
- PHY_INTERFACE_MODE_MII);
+ bp->phy_interface);
if (ret) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return ret;
}
@@ -228,7 +227,7 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct eth_platform_data *pdata;
+ struct macb_platform_data *pdata;
int err = -ENXIO, i;
/* Enable management port */
@@ -244,7 +243,8 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
bp->mii_bus->reset = &macb_mdio_reset;
- snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
pdata = bp->pdev->dev.platform_data;
@@ -285,8 +285,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
u32 __iomem *reg = bp->regs + MACB_PFR;
- u32 *p = &bp->hw_stats.rx_pause_frames;
- u32 *end = &bp->hw_stats.tx_pause_frames + 1;
+ u32 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -303,14 +303,13 @@ static void macb_tx(struct macb *bp)
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
- dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
- (unsigned long)status);
+ netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
int i;
- printk(KERN_ERR "%s: TX %s, resetting buffers\n",
- bp->dev->name, status & MACB_BIT(UND) ?
- "underrun" : "retry limit exceeded");
+ netdev_err(bp->dev, "TX %s, resetting buffers\n",
+ status & MACB_BIT(UND) ?
+ "underrun" : "retry limit exceeded");
/* Transfer ongoing, disable transmitter, to avoid confusion */
if (status & MACB_BIT(TGO))
@@ -369,8 +368,8 @@ static void macb_tx(struct macb *bp)
if (!(bufstat & MACB_BIT(TX_USED)))
break;
- dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
- tail, skb->data);
+ netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
+ tail, skb->data);
dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
DMA_TO_DEVICE);
bp->stats.tx_packets++;
@@ -395,8 +394,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
- dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- first_frag, last_frag, len);
+ netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ first_frag, last_frag, len);
skb = dev_alloc_skb(len + RX_OFFSET);
if (!skb) {
@@ -437,8 +436,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += len;
- dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
+ netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@@ -515,8 +514,8 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
- dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ (unsigned long)status, budget);
work_done = macb_rx(bp, budget);
if (work_done < budget) {
@@ -565,8 +564,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
- dev_dbg(&bp->pdev->dev,
- "scheduling RX softirq\n");
+ netdev_dbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi);
}
}
@@ -582,16 +580,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
- bp->hw_stats.rx_overruns++;
+ if (macb_is_gem(bp))
+ bp->hw_stats.gem.rx_overruns++;
+ else
+ bp->hw_stats.macb.rx_overruns++;
}
if (status & MACB_BIT(HRESP)) {
/*
- * TODO: Reset the hardware, and maybe move the printk
- * to a lower-priority context as well (work queue?)
+ * TODO: Reset the hardware, and maybe move the
+ * netdev_err to a lower-priority context as well
+ * (work queue?)
*/
- printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
- dev->name);
+ netdev_err(dev, "DMA bus error: HRESP not OK\n");
}
status = macb_readl(bp, ISR);
@@ -626,16 +627,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
#ifdef DEBUG
- int i;
- dev_dbg(&bp->pdev->dev,
- "start_xmit: len %u head %p data %p tail %p end %p\n",
- skb->len, skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb));
- dev_dbg(&bp->pdev->dev,
- "data:");
- for (i = 0; i < 16; i++)
- printk(" %02x", (unsigned int)skb->data[i]);
- printk("\n");
+ netdev_dbg(bp->dev,
+ "start_xmit: len %u head %p data %p tail %p end %p\n",
+ skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 16, true);
#endif
len = skb->len;
@@ -645,21 +642,20 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
- dev_err(&bp->pdev->dev,
- "BUG! Tx Ring full when queue awake!\n");
- dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
- bp->tx_head, bp->tx_tail);
+ netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+ bp->tx_head, bp->tx_tail);
return NETDEV_TX_BUSY;
}
entry = bp->tx_head;
- dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
+ netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
bp->tx_skb[entry].skb = skb;
bp->tx_skb[entry].mapping = mapping;
- dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
- skb->data, (unsigned long)mapping);
+ netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ skb->data, (unsigned long)mapping);
ctrl = MACB_BF(TX_FRMLEN, len);
ctrl |= MACB_BIT(TX_LAST);
@@ -723,27 +719,27 @@ static int macb_alloc_consistent(struct macb *bp)
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
size = TX_RING_BYTES;
bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->tx_ring_dma, GFP_KERNEL);
if (!bp->tx_ring)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+ netdev_dbg(bp->dev,
+ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
size = RX_RING_SIZE * RX_BUFFER_SIZE;
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
@@ -797,6 +793,84 @@ static void macb_reset_hw(struct macb *bp)
macb_readl(bp, ISR);
}
+static u32 gem_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz = clk_get_rate(bp->pclk);
+
+ if (pclk_hz <= 20000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV32);
+ else if (pclk_hz <= 120000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV48);
+ else if (pclk_hz <= 160000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV64);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV96);
+
+ return config;
+}
+
+static u32 macb_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz;
+
+ if (macb_is_gem(bp))
+ return gem_mdc_clk_div(bp);
+
+ pclk_hz = clk_get_rate(bp->pclk);
+ if (pclk_hz <= 20000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV32);
+ else
+ config = MACB_BF(CLK, MACB_CLK_DIV64);
+
+ return config;
+}
+
+/*
+ * Get the DMA bus width field of the network configuration register that we
+ * should program. We find the width from decoding the design configuration
+ * register to find the maximum supported data bus width.
+ */
+static u32 macb_dbw(struct macb *bp)
+{
+ if (!macb_is_gem(bp))
+ return 0;
+
+ switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
+ case 4:
+ return GEM_BF(DBW, GEM_DBW128);
+ case 2:
+ return GEM_BF(DBW, GEM_DBW64);
+ case 1:
+ default:
+ return GEM_BF(DBW, GEM_DBW32);
+ }
+}
+
+/*
+ * Configure the receive DMA engine to use the correct receive buffer size.
+ * This is a configurable parameter for GEM.
+ */
+static void macb_configure_dma(struct macb *bp)
+{
+ u32 dmacfg;
+
+ if (macb_is_gem(bp)) {
+ dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+ dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+ gem_writel(bp, DMACFG, dmacfg);
+ }
+}
+
static void macb_init_hw(struct macb *bp)
{
u32 config;
@@ -804,7 +878,7 @@ static void macb_init_hw(struct macb *bp)
macb_reset_hw(bp);
__macb_set_hwaddr(bp);
- config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
+ config = macb_mdc_clk_div(bp);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -812,8 +886,11 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
config |= MACB_BIT(NBC); /* No BroadCast */
+ config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ macb_configure_dma(bp);
+
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
macb_writel(bp, TBQP, bp->tx_ring_dma);
@@ -909,8 +986,8 @@ static void macb_sethashtable(struct net_device *dev)
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
- macb_writel(bp, HRB, mc_filter[0]);
- macb_writel(bp, HRT, mc_filter[1]);
+ macb_or_gem_writel(bp, HRB, mc_filter[0]);
+ macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
/*
@@ -932,8 +1009,8 @@ static void macb_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
/* Enable all multicast mode */
- macb_writel(bp, HRB, -1);
- macb_writel(bp, HRT, -1);
+ macb_or_gem_writel(bp, HRB, -1);
+ macb_or_gem_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
} else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
@@ -941,8 +1018,8 @@ static void macb_set_rx_mode(struct net_device *dev)
cfg |= MACB_BIT(NCFGR_MTI);
} else if (dev->flags & (~IFF_ALLMULTI)) {
/* Disable all multicast mode */
- macb_writel(bp, HRB, 0);
- macb_writel(bp, HRT, 0);
+ macb_or_gem_writel(bp, HRB, 0);
+ macb_or_gem_writel(bp, HRT, 0);
cfg &= ~MACB_BIT(NCFGR_MTI);
}
@@ -954,7 +1031,7 @@ static int macb_open(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
int err;
- dev_dbg(&bp->pdev->dev, "open\n");
+ netdev_dbg(bp->dev, "open\n");
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
@@ -965,9 +1042,8 @@ static int macb_open(struct net_device *dev)
err = macb_alloc_consistent(bp);
if (err) {
- printk(KERN_ERR
- "%s: Unable to allocate DMA memory (error %d)\n",
- dev->name, err);
+ netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
+ err);
return err;
}
@@ -1005,11 +1081,62 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static void gem_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + GEM_OTX;
+ u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
+
+ for (; p < end; p++, reg++)
+ *p += __raw_readl(reg);
+}
+
+static struct net_device_stats *gem_get_stats(struct macb *bp)
+{
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+ struct net_device_stats *nstat = &bp->stats;
+
+ gem_update_stats(bp);
+
+ nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+ hwstat->rx_alignment_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->tx_errors = (hwstat->tx_late_collisions +
+ hwstat->tx_excessive_collisions +
+ hwstat->tx_underrun +
+ hwstat->tx_carrier_sense_errors);
+ nstat->multicast = hwstat->rx_multicast_frames;
+ nstat->collisions = (hwstat->tx_single_collision_frames +
+ hwstat->tx_multiple_collision_frames +
+ hwstat->tx_excessive_collisions);
+ nstat->rx_length_errors = (hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->rx_over_errors = hwstat->rx_resource_errors;
+ nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
+ nstat->rx_frame_errors = hwstat->rx_alignment_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underrun;
+
+ return nstat;
+}
+
static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
- struct macb_stats *hwstat = &bp->hw_stats;
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ if (macb_is_gem(bp))
+ return gem_get_stats(bp);
/* read stats from hardware */
macb_update_stats(bp);
@@ -1117,14 +1244,59 @@ static const struct net_device_ops macb_netdev_ops = {
#endif
};
+#if defined(CONFIG_OF)
+static const struct of_device_id macb_dt_ids[] = {
+ { .compatible = "cdns,at32ap7000-macb" },
+ { .compatible = "cdns,at91sam9260-macb" },
+ { .compatible = "cdns,macb" },
+ { .compatible = "cdns,pc302-gem" },
+ { .compatible = "cdns,gem" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+
+static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np)
+ return of_get_phy_mode(np);
+
+ return -ENODEV;
+}
+
+static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+{
+ struct device_node *np = bp->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac) {
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+#else
+static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+{
+ return -ENODEV;
+}
+#endif
+
static int __init macb_probe(struct platform_device *pdev)
{
- struct eth_platform_data *pdata;
+ struct macb_platform_data *pdata;
struct resource *regs;
struct net_device *dev;
struct macb *bp;
struct phy_device *phydev;
- unsigned long pclk_hz;
u32 config;
int err = -ENXIO;
@@ -1152,28 +1324,19 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
-#if defined(CONFIG_ARCH_AT91)
- bp->pclk = clk_get(&pdev->dev, "macb_clk");
+ bp->pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
dev_err(&pdev->dev, "failed to get macb_clk\n");
goto err_out_free_dev;
}
clk_enable(bp->pclk);
-#else
- bp->pclk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get pclk\n");
- goto err_out_free_dev;
- }
+
bp->hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
dev_err(&pdev->dev, "failed to get hclk\n");
goto err_out_put_pclk;
}
-
- clk_enable(bp->pclk);
clk_enable(bp->hclk);
-#endif
bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
@@ -1185,9 +1348,8 @@ static int __init macb_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
if (err) {
- printk(KERN_ERR
- "%s: Unable to request IRQ %d (error %d)\n",
- dev->name, dev->irq, err);
+ dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+ dev->irq, err);
goto err_out_iounmap;
}
@@ -1198,31 +1360,37 @@ static int __init macb_probe(struct platform_device *pdev)
dev->base_addr = regs->start;
/* Set MII management clock divider */
- pclk_hz = clk_get_rate(bp->pclk);
- if (pclk_hz <= 20000000)
- config = MACB_BF(CLK, MACB_CLK_DIV8);
- else if (pclk_hz <= 40000000)
- config = MACB_BF(CLK, MACB_CLK_DIV16);
- else if (pclk_hz <= 80000000)
- config = MACB_BF(CLK, MACB_CLK_DIV32);
- else
- config = MACB_BF(CLK, MACB_CLK_DIV64);
+ config = macb_mdc_clk_div(bp);
+ config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
- macb_get_hwaddr(bp);
- pdata = pdev->dev.platform_data;
+ err = macb_get_hwaddr_dt(bp);
+ if (err < 0)
+ macb_get_hwaddr(bp);
+
+ err = macb_get_phy_mode_dt(pdev);
+ if (err < 0) {
+ pdata = pdev->dev.platform_data;
+ if (pdata && pdata->is_rmii)
+ bp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ bp->phy_interface = err;
+ }
- if (pdata && pdata->is_rmii)
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
#if defined(CONFIG_ARCH_AT91)
- macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
+ macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
+ MACB_BIT(CLKEN)));
#else
- macb_writel(bp, USRIO, 0);
+ macb_or_gem_writel(bp, USRIO, 0);
#endif
else
#if defined(CONFIG_ARCH_AT91)
- macb_writel(bp, USRIO, MACB_BIT(CLKEN));
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
#else
- macb_writel(bp, USRIO, MACB_BIT(MII));
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
#endif
bp->tx_pending = DEF_TX_RING_PENDING;
@@ -1239,13 +1407,13 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
+ macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
+ dev->irq, dev->dev_addr);
phydev = bp->phy_dev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
- phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -1256,14 +1424,10 @@ err_out_free_irq:
err_out_iounmap:
iounmap(bp->regs);
err_out_disable_clocks:
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
clk_put(bp->hclk);
-#endif
clk_disable(bp->pclk);
-#ifndef CONFIG_ARCH_AT91
err_out_put_pclk:
-#endif
clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
@@ -1289,10 +1453,8 @@ static int __exit macb_remove(struct platform_device *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
clk_put(bp->hclk);
-#endif
clk_disable(bp->pclk);
clk_put(bp->pclk);
free_netdev(dev);
@@ -1310,9 +1472,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
netif_device_detach(netdev);
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
-#endif
clk_disable(bp->pclk);
return 0;
@@ -1324,9 +1484,7 @@ static int macb_resume(struct platform_device *pdev)
struct macb *bp = netdev_priv(netdev);
clk_enable(bp->pclk);
-#ifndef CONFIG_ARCH_AT91
clk_enable(bp->hclk);
-#endif
netif_device_attach(netdev);
@@ -1344,6 +1502,7 @@ static struct platform_driver macb_driver = {
.driver = {
.name = "macb",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(macb_dt_ids),
},
};
@@ -1361,6 +1520,6 @@ module_init(macb_init);
module_exit(macb_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
+MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d3212f6db703..335e288f5314 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -59,6 +59,24 @@
#define MACB_TPQ 0x00bc
#define MACB_USRIO 0x00c0
#define MACB_WOL 0x00c4
+#define MACB_MID 0x00fc
+
+/* GEM register offsets. */
+#define GEM_NCFGR 0x0004
+#define GEM_USRIO 0x000c
+#define GEM_DMACFG 0x0010
+#define GEM_HRB 0x0080
+#define GEM_HRT 0x0084
+#define GEM_SA1B 0x0088
+#define GEM_SA1T 0x008C
+#define GEM_OTX 0x0100
+#define GEM_DCFG1 0x0280
+#define GEM_DCFG2 0x0284
+#define GEM_DCFG3 0x0288
+#define GEM_DCFG4 0x028c
+#define GEM_DCFG5 0x0290
+#define GEM_DCFG6 0x0294
+#define GEM_DCFG7 0x0298
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0
@@ -126,6 +144,21 @@
#define MACB_IRXFCS_OFFSET 19
#define MACB_IRXFCS_SIZE 1
+/* GEM specific NCFGR bitfields. */
+#define GEM_CLK_OFFSET 18
+#define GEM_CLK_SIZE 3
+#define GEM_DBW_OFFSET 21
+#define GEM_DBW_SIZE 2
+
+/* Constants for data bus width. */
+#define GEM_DBW32 0
+#define GEM_DBW64 1
+#define GEM_DBW128 2
+
+/* Bitfields in DMACFG. */
+#define GEM_RXBS_OFFSET 16
+#define GEM_RXBS_SIZE 8
+
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0
#define MACB_NSR_LINK_SIZE 1
@@ -228,12 +261,30 @@
#define MACB_WOL_MTI_OFFSET 19
#define MACB_WOL_MTI_SIZE 1
+/* Bitfields in MID */
+#define MACB_IDNUM_OFFSET 16
+#define MACB_IDNUM_SIZE 16
+#define MACB_REV_OFFSET 0
+#define MACB_REV_SIZE 16
+
+/* Bitfields in DCFG1. */
+#define GEM_DBWDEF_OFFSET 25
+#define GEM_DBWDEF_SIZE 3
+
/* Constants for CLK */
#define MACB_CLK_DIV8 0
#define MACB_CLK_DIV16 1
#define MACB_CLK_DIV32 2
#define MACB_CLK_DIV64 3
+/* GEM specific constants for CLK. */
+#define GEM_CLK_DIV8 0
+#define GEM_CLK_DIV16 1
+#define GEM_CLK_DIV32 2
+#define GEM_CLK_DIV48 3
+#define GEM_CLK_DIV64 4
+#define GEM_CLK_DIV96 5
+
/* Constants for MAN register */
#define MACB_MAN_SOF 1
#define MACB_MAN_WRITE 1
@@ -254,11 +305,52 @@
<< MACB_##name##_OFFSET)) \
| MACB_BF(name,value))
+#define GEM_BIT(name) \
+ (1 << GEM_##name##_OFFSET)
+#define GEM_BF(name, value) \
+ (((value) & ((1 << GEM_##name##_SIZE) - 1)) \
+ << GEM_##name##_OFFSET)
+#define GEM_BFEXT(name, value)\
+ (((value) >> GEM_##name##_OFFSET) \
+ & ((1 << GEM_##name##_SIZE) - 1))
+#define GEM_BFINS(name, value, old) \
+ (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \
+ << GEM_##name##_OFFSET)) \
+ | GEM_BF(name, value))
+
/* Register access macros */
#define macb_readl(port,reg) \
__raw_readl((port)->regs + MACB_##reg)
#define macb_writel(port,reg,value) \
__raw_writel((value), (port)->regs + MACB_##reg)
+#define gem_readl(port, reg) \
+ __raw_readl((port)->regs + GEM_##reg)
+#define gem_writel(port, reg, value) \
+ __raw_writel((value), (port)->regs + GEM_##reg)
+
+/*
+ * Conditional GEM/MACB macros. These perform the operation to the correct
+ * register dependent on whether the device is a GEM or a MACB. For registers
+ * and bitfields that are common across both devices, use macb_{read,write}l
+ * to avoid the cost of the conditional.
+ */
+#define macb_or_gem_writel(__bp, __reg, __value) \
+ ({ \
+ if (macb_is_gem((__bp))) \
+ gem_writel((__bp), __reg, __value); \
+ else \
+ macb_writel((__bp), __reg, __value); \
+ })
+
+#define macb_or_gem_readl(__bp, __reg) \
+ ({ \
+ u32 __v; \
+ if (macb_is_gem((__bp))) \
+ __v = gem_readl((__bp), __reg); \
+ else \
+ __v = macb_readl((__bp), __reg); \
+ __v; \
+ })
struct dma_desc {
u32 addr;
@@ -358,6 +450,54 @@ struct macb_stats {
u32 tx_pause_frames;
};
+struct gem_stats {
+ u32 tx_octets_31_0;
+ u32 tx_octets_47_32;
+ u32 tx_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_64_byte_frames;
+ u32 tx_65_127_byte_frames;
+ u32 tx_128_255_byte_frames;
+ u32 tx_256_511_byte_frames;
+ u32 tx_512_1023_byte_frames;
+ u32 tx_1024_1518_byte_frames;
+ u32 tx_greater_than_1518_byte_frames;
+ u32 tx_underrun;
+ u32 tx_single_collision_frames;
+ u32 tx_multiple_collision_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_deferred_frames;
+ u32 tx_carrier_sense_errors;
+ u32 rx_octets_31_0;
+ u32 rx_octets_47_32;
+ u32 rx_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_64_byte_frames;
+ u32 rx_65_127_byte_frames;
+ u32 rx_128_255_byte_frames;
+ u32 rx_256_511_byte_frames;
+ u32 rx_512_1023_byte_frames;
+ u32 rx_1024_1518_byte_frames;
+ u32 rx_greater_than_1518_byte_frames;
+ u32 rx_undersized_frames;
+ u32 rx_oversize_frames;
+ u32 rx_jabbers;
+ u32 rx_frame_check_sequence_errors;
+ u32 rx_length_field_frame_errors;
+ u32 rx_symbol_errors;
+ u32 rx_alignment_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_ip_header_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+};
+
struct macb {
void __iomem *regs;
@@ -376,7 +516,10 @@ struct macb {
struct net_device *dev;
struct napi_struct napi;
struct net_device_stats stats;
- struct macb_stats hw_stats;
+ union {
+ struct macb_stats macb;
+ struct gem_stats gem;
+ } hw_stats;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
@@ -389,6 +532,13 @@ struct macb {
unsigned int link;
unsigned int speed;
unsigned int duplex;
+
+ phy_interface_t phy_interface;
};
+static inline bool macb_is_gem(struct macb *bp)
+{
+ return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+}
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644
index 000000000000..aba435c3d4ae
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+ tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+ depends on HAS_IOMEM
+ select CRC32
+ help
+ This is the driver for the XGMAC Ethernet IP block found on Calxeda
+ Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644
index 000000000000..f0ef08067f97
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644
index 000000000000..1fce186a9031
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
+#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
+#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
+#define XGMAC_VERSION 0x00000020 /* Version */
+#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
+#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
+#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
+#define XGMAC_DEBUG 0x00000038 /* Debug */
+#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH 16
+#define XGMAC_OMR 0x00000400
+#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
+#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
+#define XGMAC_MMC_TXBCFRAME_G 0x00000824
+#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
+#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
+#define XGMAC_MMC_TXVLANFRAME 0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
+#define XGMAC_MMC_RXBCFRAME_G 0x00000918
+#define XGMAC_MMC_RXMCFRAME_G 0x00000920
+#define XGMAC_MMC_RXCRCERR 0x00000928
+#define XGMAC_MMC_RXRUNT 0x00000930
+#define XGMAC_MMC_RXJABBER 0x00000934
+#define XGMAC_MMC_RXUCFRAME_G 0x00000970
+#define XGMAC_MMC_RXLENGTHERR 0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
+#define XGMAC_MMC_RXOVERFLOW 0x00000990
+#define XGMAC_MMC_RXVLANFRAME 0x00000998
+#define XGMAC_MMC_RXWATCHDOG 0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
+#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
+#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
+#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
+#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE 0x80000000
+#define XGMAC_MAX_FILTER_ADDR 31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET 0x80000000
+#define XGMAC_PMT_GLBL_UNICAST 0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
+#define XGMAC_PMT_MAGIC_PKT 0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
+#define XGMAC_PMT_POWERDOWN 0x00000001
+
+#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
+#define XGMAC_CONTROL_SPD_MASK 0x60000000
+#define XGMAC_CONTROL_SPD_1G 0x60000000
+#define XGMAC_CONTROL_SPD_2_5G 0x40000000
+#define XGMAC_CONTROL_SPD_10G 0x00000000
+#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK 0x18000000
+#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK 0x06000000
+#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
+#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT 16
+#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_8PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+ DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+ DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+ DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK 0x00030000
+#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
+#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
+#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ (0x2000 - 8)
+
+#define RXDESC_EXT_STATUS 0x00000001
+#define RXDESC_CRC_ERR 0x00000002
+#define RXDESC_RX_ERR 0x00000008
+#define RXDESC_RX_WDOG 0x00000010
+#define RXDESC_FRAME_TYPE 0x00000020
+#define RXDESC_GIANT_FRAME 0x00000080
+#define RXDESC_LAST_SEG 0x00000100
+#define RXDESC_FIRST_SEG 0x00000200
+#define RXDESC_VLAN_FRAME 0x00000400
+#define RXDESC_OVERFLOW_ERR 0x00000800
+#define RXDESC_LENGTH_ERR 0x00001000
+#define RXDESC_SA_FILTER_FAIL 0x00002000
+#define RXDESC_DESCRIPTOR_ERR 0x00004000
+#define RXDESC_ERROR_SUMMARY 0x00008000
+#define RXDESC_FRAME_LEN_OFFSET 16
+#define RXDESC_FRAME_LEN_MASK 0x3fff0000
+#define RXDESC_DA_FILTER_FAIL 0x40000000
+
+#define RXDESC1_END_RING 0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK 0x00000003
+#define RXDESC_IP_PAYLOAD_UDP 0x00000001
+#define RXDESC_IP_PAYLOAD_TCP 0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
+#define RXDESC_IP_HEADER_ERR 0x00000008
+#define RXDESC_IP_PAYLOAD_ERR 0x00000010
+#define RXDESC_IPV4_PACKET 0x00000040
+#define RXDESC_IPV6_PACKET 0x00000080
+#define TXDESC_UNDERFLOW_ERR 0x00000001
+#define TXDESC_JABBER_TIMEOUT 0x00000002
+#define TXDESC_LOCAL_FAULT 0x00000004
+#define TXDESC_REMOTE_FAULT 0x00000008
+#define TXDESC_VLAN_FRAME 0x00000010
+#define TXDESC_FRAME_FLUSHED 0x00000020
+#define TXDESC_IP_HEADER_ERR 0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
+#define TXDESC_ERROR_SUMMARY 0x00008000
+#define TXDESC_SA_CTRL_INSERT 0x00040000
+#define TXDESC_SA_CTRL_REPLACE 0x00080000
+#define TXDESC_2ND_ADDR_CHAINED 0x00100000
+#define TXDESC_END_RING 0x00200000
+#define TXDESC_CSUM_IP 0x00400000
+#define TXDESC_CSUM_IP_PAYLD 0x00800000
+#define TXDESC_CSUM_ALL 0x00C00000
+#define TXDESC_CRC_EN_REPLACE 0x01000000
+#define TXDESC_CRC_EN_APPEND 0x02000000
+#define TXDESC_DISABLE_PAD 0x04000000
+#define TXDESC_FIRST_SEG 0x10000000
+#define TXDESC_LAST_SEG 0x20000000
+#define TXDESC_INTERRUPT 0x40000000
+
+#define DESC_OWN 0x80000000
+#define DESC_BUFFER1_SZ_MASK 0x00001fff
+#define DESC_BUFFER2_SZ_MASK 0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET 16
+
+struct xgmac_dma_desc {
+ __le32 flags;
+ __le32 buf_size;
+ __le32 buf1_addr; /* Buffer 1 Address Pointer */
+ __le32 buf2_addr; /* Buffer 2 Address Pointer */
+ __le32 ext_status;
+ __le32 res[3];
+};
+
+struct xgmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ unsigned long tx_local_fault;
+ unsigned long tx_remote_fault;
+ /* Receive errors */
+ unsigned long rx_watchdog;
+ unsigned long rx_da_filter_fail;
+ unsigned long rx_sa_filter_fail;
+ unsigned long rx_payload_error;
+ unsigned long rx_ip_header_error;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow;
+ unsigned long tx_process_stopped;
+ unsigned long rx_buf_unav;
+ unsigned long rx_process_stopped;
+ unsigned long tx_early;
+ unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+ struct xgmac_dma_desc *dma_rx;
+ struct sk_buff **rx_skbuff;
+ unsigned int rx_tail;
+ unsigned int rx_head;
+
+ struct xgmac_dma_desc *dma_tx;
+ struct sk_buff **tx_skbuff;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ void __iomem *base;
+ struct sk_buff_head rx_recycle;
+ unsigned int dma_buf_sz;
+ dma_addr_t dma_rx_phy;
+ dma_addr_t dma_tx_phy;
+
+ struct net_device *dev;
+ struct device *device;
+ struct napi_struct napi;
+
+ struct xgmac_extra_stats xstats;
+
+ spinlock_t stats_lock;
+ int pmt_irq;
+ char rx_pause;
+ char tx_pause;
+ int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU 9000
+#define PAUSE_TIME 0x400
+
+#define DMA_RX_RING_SZ 256
+#define DMA_TX_RING_SZ 128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH (DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+ if (buf_sz > MAX_DESC_BUF_SZ)
+ p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+ (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+ else
+ p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+ u32 len = cpu_to_le32(p->flags);
+ return (len & DESC_BUFFER1_SZ_MASK) +
+ ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+ int buf_sz)
+{
+ struct xgmac_dma_desc *end = p + ring_size - 1;
+
+ memset(p, 0, sizeof(*p) * ring_size);
+
+ for (; p <= end; p++)
+ desc_set_buf_len(p, buf_sz);
+
+ end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+ memset(p, 0, sizeof(*p) * ring_size);
+ p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+ /* Clear all fields and set the owner */
+ p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ tmpflags |= flags | DESC_OWN;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ p->buf1_addr = cpu_to_le32(paddr);
+ if (len > MAX_DESC_BUF_SZ)
+ p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ desc_set_buf_len(p, len);
+ desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+ u32 data = le32_to_cpu(p->flags);
+ u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+ if (data & RXDESC_FRAME_TYPE)
+ len -= ETH_FCS_LEN;
+
+ return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ int timeout = 1000;
+ u32 reg = readl(ioaddr + XGMAC_OMR);
+ writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+ while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+ udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ u32 status = le32_to_cpu(p->flags);
+
+ if (!(status & TXDESC_ERROR_SUMMARY))
+ return 0;
+
+ netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+ if (status & TXDESC_JABBER_TIMEOUT)
+ x->tx_jabber++;
+ if (status & TXDESC_FRAME_FLUSHED)
+ x->tx_frame_flushed++;
+ if (status & TXDESC_UNDERFLOW_ERR)
+ xgmac_dma_flush_tx_fifo(priv->base);
+ if (status & TXDESC_IP_HEADER_ERR)
+ x->tx_ip_header_error++;
+ if (status & TXDESC_LOCAL_FAULT)
+ x->tx_local_fault++;
+ if (status & TXDESC_REMOTE_FAULT)
+ x->tx_remote_fault++;
+ if (status & TXDESC_PAYLOAD_CSUM_ERR)
+ x->tx_payload_error++;
+
+ return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ int ret = CHECKSUM_UNNECESSARY;
+ u32 status = le32_to_cpu(p->flags);
+ u32 ext_status = le32_to_cpu(p->ext_status);
+
+ if (status & RXDESC_DA_FILTER_FAIL) {
+ netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+ x->rx_da_filter_fail++;
+ return -1;
+ }
+
+ /* Check if packet has checksum already */
+ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+ !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+ ret = CHECKSUM_NONE;
+
+ netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+ (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+ if (!(status & RXDESC_ERROR_SUMMARY))
+ return ret;
+
+ /* Handle any errors */
+ if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+ RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+ return -1;
+
+ if (status & RXDESC_EXT_STATUS) {
+ if (ext_status & RXDESC_IP_HEADER_ERR)
+ x->rx_ip_header_error++;
+ if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+ x->rx_payload_error++;
+ netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+ ext_status);
+ return CHECKSUM_NONE;
+ }
+
+ return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_CONTROL);
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+ writel(value, ioaddr + XGMAC_CONTROL);
+
+ value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ value = readl(ioaddr + XGMAC_CONTROL);
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+ writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 data;
+
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+ lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+ u32 reg;
+ unsigned int flow = 0;
+
+ priv->rx_pause = rx;
+ priv->tx_pause = tx;
+
+ if (rx || tx) {
+ if (rx)
+ flow |= XGMAC_FLOW_CTRL_RFE;
+ if (tx)
+ flow |= XGMAC_FLOW_CTRL_TFE;
+
+ flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+ flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg |= XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ } else {
+ writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg &= ~XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ }
+
+ return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+ struct xgmac_dma_desc *p;
+ dma_addr_t paddr;
+
+ while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+ int entry = priv->rx_head;
+ struct sk_buff *skb;
+
+ p = priv->dma_rx + entry;
+
+ if (priv->rx_skbuff[entry] != NULL)
+ continue;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+ netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+ priv->rx_head, priv->rx_tail);
+
+ priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+ /* Ensure descriptor is in memory before handing to h/w */
+ wmb();
+ desc_set_rx_owner(p);
+ }
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int bfsize;
+
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+ 64);
+
+ netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+ priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ return -ENOMEM;
+
+ priv->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma_rx;
+
+ priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skb;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx)
+ goto err_dma_tx;
+
+ netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ priv->rx_tail = 0;
+ priv->rx_head = 0;
+ priv->dma_buf_sz = bfsize;
+ desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+ xgmac_rx_refill(priv);
+
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+ writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+ return 0;
+
+err_dma_tx:
+ kfree(priv->tx_skbuff);
+err_tx_skb:
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+ kfree(priv->rx_skbuff);
+ return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+ int i;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->rx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_RX_RING_SZ; i++) {
+ if (priv->rx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_rx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+ int i, f;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->tx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_TX_RING_SZ; i++) {
+ if (priv->tx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_tx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+ p = priv->dma_tx + i++;
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ xgmac_free_rx_skbufs(priv);
+ xgmac_free_tx_skbufs(priv);
+
+ /* Free the consistent memory allocated for descriptor rings */
+ if (priv->dma_tx) {
+ dma_free_coherent(priv->device,
+ DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ priv->dma_tx = NULL;
+ }
+ if (priv->dma_rx) {
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ priv->dma_rx = NULL;
+ }
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+ int i;
+ void __iomem *ioaddr = priv->base;
+
+ writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+ while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+ unsigned int entry = priv->tx_tail;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (desc_get_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ if (desc_get_tx_ls(p))
+ desc_get_tx_status(priv, p);
+
+ netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+ priv->tx_head, priv->tx_tail);
+
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ if (!skb) {
+ continue;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+ DMA_TX_RING_SZ);
+ p = priv->dma_tx + priv->tx_tail;
+
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ DMA_RX_RING_SZ) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+ }
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+ TX_THRESH)
+ netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+ u32 reg, value, inten;
+
+ netif_stop_queue(priv->dev);
+
+ inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ reg = readl(priv->base + XGMAC_DMA_CONTROL);
+ writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+ do {
+ value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+ } while (value && (value != 0x600000));
+
+ xgmac_free_tx_skbufs(priv);
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+ writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+ priv->base + XGMAC_DMA_STATUS);
+ writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+ netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+ u32 value, ctrl;
+ int limit;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Save the ctrl register value */
+ ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+ /* SW reset */
+ value = DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+ limit = 15000;
+ while (limit-- &&
+ (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ cpu_relax();
+ if (limit < 0)
+ return -EBUSY;
+
+ value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+ (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+ DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ /* XGMAC requires AXI bus init. This is a 'magic number' for now */
+ writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+ ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+ XGMAC_CONTROL_CAR;
+ if (dev->features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ value = DMA_CONTROL_DFF;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ /* Set the HW DMA mode and the COE */
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ ioaddr + XGMAC_OMR);
+
+ /* Reset the MMC counters */
+ writel(1, ioaddr + XGMAC_MMC_CTRL);
+ return 0;
+}
+
+/**
+ * xgmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+ int ret;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ skb_queue_head_init(&priv->rx_recycle);
+ memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+ /* Initialize the XGMAC and descriptors */
+ xgmac_hw_init(dev);
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+ xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+ ret = xgmac_dma_desc_rings_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the MAC Rx/Tx */
+ xgmac_mac_enable(ioaddr);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/**
+ * xgmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+ napi_disable(&priv->napi);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Disable the MAC core */
+ xgmac_mac_disable(priv->base);
+
+ /* Release and free the Rx/Tx resources */
+ xgmac_free_dma_desc_rings(priv);
+
+ return 0;
+}
+
+/**
+ * xgmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int entry;
+ int i;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct xgmac_dma_desc *desc, *first;
+ unsigned int desc_flags;
+ unsigned int len;
+ dma_addr_t paddr;
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+ (nfrags + 1)) {
+ writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+ priv->base + XGMAC_DMA_INTR_ENA);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+ TXDESC_CSUM_ALL : 0;
+ entry = priv->tx_head;
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+ len = skb_headlen(skb);
+ paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ priv->tx_skbuff[entry] = skb;
+ desc_set_buf_addr_and_size(desc, paddr, len);
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+
+ paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+
+ desc_set_buf_addr_and_size(desc, paddr, len);
+ if (i < (nfrags - 1))
+ desc_set_tx_owner(desc, desc_flags);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ if (desc != first)
+ desc_set_tx_owner(desc, desc_flags |
+ TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+ else
+ desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+ /* Set owner on first desc last to avoid race condition */
+ wmb();
+ desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+ priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+ return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+ unsigned int entry;
+ unsigned int count = 0;
+ struct xgmac_dma_desc *p;
+
+ while (count < limit) {
+ int ip_checksum;
+ struct sk_buff *skb;
+ int frame_len;
+
+ writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+ priv->base + XGMAC_DMA_STATUS);
+
+ entry = priv->rx_tail;
+ p = priv->dma_rx + entry;
+ if (desc_get_owner(p))
+ break;
+
+ count++;
+ priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+ /* read the status of the incoming frame */
+ ip_checksum = desc_get_rx_status(priv, p);
+ if (ip_checksum < 0)
+ continue;
+
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+ break;
+ }
+ priv->rx_skbuff[entry] = NULL;
+
+ frame_len = desc_get_rx_frame_len(p);
+ netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+ frame_len, ip_checksum);
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ frame_len, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb->ip_summed = ip_checksum;
+ if (ip_checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ xgmac_rx_refill(priv);
+
+ writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+ return count;
+}
+
+/**
+ * xgmac_poll - xgmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+ struct xgmac_priv *priv = container_of(napi,
+ struct xgmac_priv, napi);
+ int work_done = 0;
+
+ xgmac_tx_complete(priv);
+ work_done = xgmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ }
+ return work_done;
+}
+
+/**
+ * xgmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ xgmac_tx_err(priv);
+}
+
+/**
+ * xgmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+ int i;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ unsigned int value = 0;
+ u32 hash_filter[XGMAC_NUM_HASH];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ bool use_hash = false;
+
+ netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+ netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC) {
+ writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+ return;
+ }
+
+ memset(hash_filter, 0, sizeof(hash_filter));
+
+ if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_uc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ value |= XGMAC_FRAME_FILTER_PM;
+ goto out;
+ }
+
+ if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_mc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+out:
+ for (i = 0; i < XGMAC_NUM_HASH; i++)
+ writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+ writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ * xgmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ int old_mtu;
+
+ if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+ netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+ return -EINVAL;
+ }
+
+ old_mtu = dev->mtu;
+ dev->mtu = new_mtu;
+
+ /* return early if the buffer sizes will not change */
+ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+ return 0;
+ if (old_mtu == new_mtu)
+ return 0;
+
+ /* Stop everything, get ready to change the MTU */
+ if (!netif_running(dev))
+ return 0;
+
+ /* Bring the interface down and then back up */
+ xgmac_stop(dev);
+ return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ intr_status = readl(ioaddr + XGMAC_INT_STAT);
+ if (intr_status & XGMAC_INT_STAT_PMT) {
+ netdev_dbg(priv->dev, "received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT */
+ readl(ioaddr + XGMAC_PMT);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool tx_err = false;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ struct xgmac_extra_stats *x = &priv->xstats;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+ intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+ /* It displays the DMA process states (CSR5 register) */
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ if (intr_status & DMA_STATUS_TJT) {
+ netdev_err(priv->dev, "transmit jabber\n");
+ x->tx_jabber++;
+ }
+ if (intr_status & DMA_STATUS_RU)
+ x->rx_buf_unav++;
+ if (intr_status & DMA_STATUS_RPS) {
+ netdev_err(priv->dev, "receive process stopped\n");
+ x->rx_process_stopped++;
+ }
+ if (intr_status & DMA_STATUS_ETI) {
+ netdev_err(priv->dev, "transmit early interrupt\n");
+ x->tx_early++;
+ }
+ if (intr_status & DMA_STATUS_TPS) {
+ netdev_err(priv->dev, "transmit process stopped\n");
+ x->tx_process_stopped++;
+ tx_err = true;
+ }
+ if (intr_status & DMA_STATUS_FBI) {
+ netdev_err(priv->dev, "fatal bus error\n");
+ x->fatal_bus_error++;
+ tx_err = true;
+ }
+
+ if (tx_err)
+ xgmac_tx_err(priv);
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+ writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ xgmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *base = priv->base;
+ u32 count;
+
+ spin_lock_bh(&priv->stats_lock);
+ writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+ storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+ storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+ storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+ storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+ storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+ storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+ storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+ storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+ storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+ count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+ storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+ storage->tx_packets = count;
+ storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+ writel(0, base + XGMAC_MMC_CTRL);
+ spin_unlock_bh(&priv->stats_lock);
+ return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+ return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+ u32 ctrl;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ ctrl = readl(ioaddr + XGMAC_CONTROL);
+ if (features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ else
+ ctrl &= ~XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+ .ndo_open = xgmac_open,
+ .ndo_start_xmit = xgmac_xmit,
+ .ndo_stop = xgmac_stop,
+ .ndo_change_mtu = xgmac_change_mtu,
+ .ndo_set_rx_mode = xgmac_set_rx_mode,
+ .ndo_tx_timeout = xgmac_tx_timeout,
+ .ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgmac_poll_controller,
+#endif
+ .ndo_set_mac_address = xgmac_set_mac_address,
+ .ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = 0;
+ cmd->duplex = DUPLEX_FULL;
+ ethtool_cmd_speed_set(cmd, 10000);
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ pause->rx_pause = priv->rx_pause;
+ pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+ bool is_reg;
+};
+
+#define XGMAC_STAT(m) \
+ { #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset) \
+ { #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+ XGMAC_STAT(tx_frame_flushed),
+ XGMAC_STAT(tx_payload_error),
+ XGMAC_STAT(tx_ip_header_error),
+ XGMAC_STAT(tx_local_fault),
+ XGMAC_STAT(tx_remote_fault),
+ XGMAC_STAT(tx_early),
+ XGMAC_STAT(tx_process_stopped),
+ XGMAC_STAT(tx_jabber),
+ XGMAC_STAT(rx_buf_unav),
+ XGMAC_STAT(rx_process_stopped),
+ XGMAC_STAT(rx_payload_error),
+ XGMAC_STAT(rx_ip_header_error),
+ XGMAC_STAT(rx_da_filter_fail),
+ XGMAC_STAT(rx_sa_filter_fail),
+ XGMAC_STAT(fatal_bus_error),
+ XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+ XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+ XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+ XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+ XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy,
+ u64 *data)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void *p = priv;
+ int i;
+
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ if (xgmac_gstrings_stats[i].is_reg)
+ *data++ = readl(priv->base +
+ xgmac_gstrings_stats[i].stat_offset);
+ else
+ *data++ = *(u32 *)(p +
+ xgmac_gstrings_stats[i].stat_offset);
+ }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return XGMAC_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ memcpy(p, xgmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -ENOTSUPP;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ priv->wolopts = wol->wolopts;
+
+ if (wol->wolopts) {
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(dev->irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(dev->irq);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops xgmac_ethtool_ops = {
+ .get_settings = xgmac_ethtool_getsettings,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = xgmac_get_pauseparam,
+ .set_pauseparam = xgmac_set_pauseparam,
+ .get_ethtool_stats = xgmac_get_ethtool_stats,
+ .get_strings = xgmac_get_strings,
+ .get_wol = xgmac_get_wol,
+ .set_wol = xgmac_set_wol,
+ .get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct net_device *ndev = NULL;
+ struct xgmac_priv *priv = NULL;
+ u32 uid;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ priv = netdev_priv(ndev);
+ platform_set_drvdata(pdev, ndev);
+ ether_setup(ndev);
+ ndev->netdev_ops = &xgmac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ spin_lock_init(&priv->stats_lock);
+
+ priv->device = &pdev->dev;
+ priv->dev = ndev;
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ netdev_err(ndev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ uid = readl(priv->base + XGMAC_VERSION);
+ netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq == -ENXIO) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = ndev->irq;
+ goto err_irq;
+ }
+
+ ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ ndev->irq, ret);
+ goto err_irq;
+ }
+
+ priv->pmt_irq = platform_get_irq(pdev, 1);
+ if (priv->pmt_irq == -ENXIO) {
+ netdev_err(ndev, "No pmt irq resource\n");
+ ret = priv->pmt_irq;
+ goto err_pmt_irq;
+ }
+
+ ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ priv->pmt_irq, ret);
+ goto err_pmt_irq;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ if (device_can_wakeup(priv->device))
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Get the MAC address */
+ xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ netdev_warn(ndev, "MAC address %pM not valid",
+ ndev->dev_addr);
+
+ netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ ret = register_netdev(ndev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ netif_napi_del(&priv->napi);
+ free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ iounmap(priv->base);
+err_io:
+ free_netdev(ndev);
+err_alloc:
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+
+ xgmac_mac_disable(priv->base);
+
+ /* Free the IRQ lines */
+ free_irq(ndev->irq, ndev);
+ free_irq(priv->pmt_irq, ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi);
+
+ iounmap(priv->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+ if (mode & WAKE_UCAST)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+ writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ u32 value;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&priv->napi);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ if (device_may_wakeup(priv->device)) {
+ /* Stop TX/RX DMA Only */
+ value = readl(priv->base + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+ xgmac_pmt(priv->base, priv->wolopts);
+ } else
+ xgmac_mac_disable(priv->base);
+
+ return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ void __iomem *ioaddr = priv->base;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ xgmac_pmt(ioaddr, 0);
+
+ /* Enable the MAC and DMA */
+ xgmac_mac_enable(ioaddr);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ netif_device_attach(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+ { .compatible = "calxeda,hb-xgmac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+ .driver = {
+ .name = "calxedaxgmac",
+ .of_match_table = xgmac_of_match,
+ },
+ .probe = xgmac_probe,
+ .remove = xgmac_remove,
+ .driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index ca26d97171bd..1d17c92f2dda 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
}
static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
struct adapter *adapter = dev->ml_priv;
if (changed & NETIF_F_HW_VLAN_RX)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index f9b602300040..47a84359d4e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
/*
* Enable/disable VLAN acceleration.
*/
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
{
struct sge *sge = adapter->sge;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index e03980bcdd65..b9bf16b385f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 4d15c8f99c3b..857cc254cab8 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
- if (!fw_vers)
- strcpy(info->fw_version, "N/A");
- else {
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%s %u.%u.%u TP %u.%u.%u",
G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
G_TP_VERSION_MAJOR(tp_vers),
G_TP_VERSION_MINOR(tp_vers),
G_TP_VERSION_MICRO(tp_vers));
- }
}
static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
}
}
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features)
t3_synchronize_rx(adapter, pi);
}
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
cxgb_vlan_mode(dev, features);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 90ff1318cc05..65e4b280619a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour(nr->new));
+ cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
break;
}
default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
static void cxgb_neigh_update(struct neighbour *neigh)
{
- struct net_device *dev = neigh->dev;
+ struct net_device *dev;
+ if (!neigh)
+ return;
+ dev = neigh->dev;
if (dev && (is_offloading(dev))) {
struct t3cdev *tdev = dev2t3cdev(dev);
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
{
struct net_device *olddev, *newdev;
+ struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = dst_get_neighbour(old)->dev;
- newdev = dst_get_neighbour(new)->dev;
+ n = dst_get_neighbour_noref(old);
+ if (!n)
+ return;
+ olddev = n->dev;
+
+ n = dst_get_neighbour_noref(new);
+ if (!n)
+ return;
+ newdev = n->dev;
+
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+ e = t3_l2t_get(tdev, new, newdev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
@@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
- rcu_assign_pointer(dev->l2opt, NULL);
+ RCU_INIT_POINTER(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
@@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
rcu_read_lock();
d = L2DATA(tdev);
rcu_read_unlock();
- rcu_assign_pointer(tdev->l2opt, NULL);
+ RCU_INIT_POINTER(tdev->l2opt, NULL);
call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 70fec8b1140f..3fa3c8833ed7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
spin_unlock(&e->lock);
}
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev)
{
struct l2t_entry *e = NULL;
+ struct neighbour *neigh;
+ struct port_info *p;
struct l2t_data *d;
int hash;
- u32 addr = *(u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- struct port_info *p = netdev_priv(dev);
- int smt_idx = p->port_id;
+ u32 addr;
+ int ifidx;
+ int smt_idx;
rcu_read_lock();
+ neigh = dst_get_neighbour_noref(dst);
+ if (!neigh)
+ goto done_rcu;
+
+ addr = *(u32 *) neigh->primary_key;
+ ifidx = neigh->dev->ifindex;
+
+ if (!dev)
+ dev = neigh->dev;
+ p = netdev_priv(dev);
+ smt_idx = p->port_id;
+
d = L2DATA(cdev);
if (!d)
goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
- goto done;
+ goto done_unlock;
}
/* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
-done:
+done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
rcu_read_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c5f54796e2cb..c4e864369751 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4c8f42afa3c6..e83d12c7bf20 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
"thresholds 1..3 for queue interrupt packet counters");
-static int vf_acls;
+static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
@@ -1002,13 +1002,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1854,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return err;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
int err;
if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -1872,30 +1871,30 @@ static int cxgb_set_features(struct net_device *dev, u32 features)
return err;
}
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
{
const struct port_info *pi = netdev_priv(dev);
- unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+ unsigned int n = pi->rss_size;
- p->size = pi->rss_size;
while (n--)
- p->ring_index[n] = pi->rss[n];
+ p[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev,
- const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
- if (p->size != pi->rss_size)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- if (p->ring_index[i] >= pi->nqsets)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- pi->rss[i] = p->ring_index[i];
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
return write_rss(pi, pi->rss);
return 0;
@@ -1964,7 +1963,7 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return -EOPNOTSUPP;
}
-static struct ethtool_ops cxgb_ethtool_ops = {
+static const struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
.get_drvinfo = get_drvinfo,
@@ -1990,6 +1989,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
+ .get_rxfh_indir_size = get_rss_table_size,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
.flash_device = set_flash,
@@ -3449,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap)
if (!pi->rss)
return -ENOMEM;
for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = j % pi->nqsets;
+ pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -3537,7 +3537,7 @@ static int __devinit init_one(struct pci_dev *pdev,
{
int func, i, err;
struct port_info *pi;
- unsigned int highdma = 0;
+ bool highdma = false;
struct adapter *adapter = NULL;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3563,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- highdma = NETIF_F_HIGHDMA;
+ highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3637,9 @@ static int __devinit init_one(struct pci_dev *pdev,
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->features |= netdev->hw_features | highdma;
+ if (highdma)
+ netdev->hw_features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 140254c7cba9..2dae7959f000 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- gfp |= __GFP_NOWARN; /* failures are expected */
+ gfp |= __GFP_NOWARN | __GFP_COLD;
#if FL_PG_ORDER > 0
/*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
#endif
while (n--) {
- pg = __netdev_alloc_page(adap->port[0], gfp);
+ pg = alloc_page(gfp);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- netdev_free_page(adap->port[0], pg);
+ put_page(pg);
goto out;
}
*d++ = cpu_to_be64(mapping);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index da9072bfca8b..e53365a71484 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, DRV_VERSION);
- strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
@@ -1561,7 +1564,7 @@ static void cxgb4vf_get_wol(struct net_device *dev,
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-static struct ethtool_ops cxgb4vf_ethtool_ops = {
+static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_settings = cxgb4vf_get_settings,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
@@ -2000,7 +2003,7 @@ static const struct file_operations interfaces_proc_fops = {
*/
struct cxgb4vf_debugfs_entry {
const char *name; /* name of debugfs node */
- mode_t mode; /* file system mode */
+ umode_t mode; /* file system mode */
const struct file_operations *fops;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8d5d55ad102d..0bd585bba39d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = __netdev_alloc_page(adapter->port[0],
- gfp | __GFP_NOWARN);
+ page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
@@ -664,7 +663,7 @@ alloc_small_pages:
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
- netdev_free_page(adapter->port[0], page);
+ put_page(page);
break;
}
*d++ = cpu_to_be64(dma_addr);
@@ -684,7 +683,7 @@ out:
/*
* Update our accounting state to incorporate the new Free List
* buffers, tell the hardware about them and return the number of
- * bufers which we were able to allocate.
+ * buffers which we were able to allocate.
*/
cred = fl->avail - cred;
fl->pend_cred += cred;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index fd6247b3c0ee..bf0fc56dba19 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
}
/* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
+ err = enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
/* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
+ err = enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
int enic_dev_enable2(struct enic *enic, int active)
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 1f83a4747ba0..da1cba3c410e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c3786fda11db..2fd9db4b1be5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev,
enic_dev_fw_info(enic, &fw_info);
- strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, fw_info->fw_version,
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
@@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
#endif
/* Allocate structure for port profiles */
- enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+ enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
if (!enic->pp) {
pr_err("port profile alloc failed, aborting\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf66..f801754c71a7 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev)
return mii_nway_restart(&dm->mii);
}
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+ netdev_features_t features)
{
board_info_t *dm = to_dm9000_board(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
unsigned long flags;
if (!(changed & NETIF_F_RXCSUM))
@@ -613,7 +614,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 1427739d9a51..1eb46a0bb488 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(de->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
info->eedump_len = DE_EEPROM_SIZE;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 871bcaa7068d..4d71f5ae20c8 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
u_long iobase = 0; /* Clear upper 32 bits in Alphas */
int i, j;
struct de4x5_private *lp = netdev_priv(dev);
- struct list_head *walk;
-
- list_for_each(walk, &pdev->bus_list) {
- struct pci_dev *this_dev = pci_dev_b(walk);
-
- /* Skip the pci_bus list entry */
- if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+ struct pci_dev *this_dev;
+ list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
vendor = this_dev->vendor;
device = this_dev->device << 8;
if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
char *p, *q, t;
- lp->params.fdx = 0;
+ lp->params.fdx = false;
lp->params.autosense = AUTO;
if (args == NULL) return;
@@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev)
t = *q;
*q = '\0';
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
if (strstr(p, "TP")) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 17b11ee1745a..51f7542eb451 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9656dd0647d9..4eb0d76145c2 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 7a44a7a6adc8..48b0b6566eef 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d01219ba22f..52da7b2fe3b6 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
index 23a65398d011..c24fab1e9cbe 100644
--- a/drivers/net/ethernet/dlink/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
#include "de600.h"
-static unsigned int check_lost = 1;
+static bool check_lost = true;
module_param(check_lost, bool, 0);
MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index dcd7f7a71ad4..28a3a9b50b8b 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index c1063d1540c2..925c9bafc9b9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -325,7 +325,8 @@ static int dnet_mii_init(struct dnet *bp)
bp->mii_bus->write = &dnet_mdio_write;
bp->mii_bus->reset = &dnet_mdio_reset;
- snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -804,9 +805,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, "0");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +978,7 @@ static struct platform_driver dnet_driver = {
},
};
-static int __init dnet_init(void)
-{
- return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
- platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Dave DNET Ethernet driver");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 644e8fed8364..cbdec2536da6 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -40,6 +40,7 @@
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define OC_NAME_SH OC_NAME "(Skyhawk)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
@@ -50,6 +51,7 @@
#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev)
return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
+ case OC_DEVICE_ID5:
+ return OC_NAME_SH;
default:
return BE_NAME;
}
@@ -288,14 +292,14 @@ struct be_drv_stats {
};
struct be_vf_cfg {
- unsigned char vf_mac_addr[ETH_ALEN];
- u32 vf_if_handle;
- u32 vf_pmac_id;
- u16 vf_vlan_tag;
- u32 vf_tx_rate;
+ unsigned char mac_addr[ETH_ALEN];
+ int if_handle;
+ int pmac_id;
+ u16 vlan_tag;
+ u32 tx_rate;
};
-#define BE_INVALID_PMAC_ID 0xffffffff
+#define BE_FLAGS_LINK_STATUS_INIT 1
struct be_adapter {
struct pci_dev *pdev;
@@ -345,13 +349,16 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
- u32 if_handle; /* Used to configure filtering */
+ int if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
bool eeh_err;
+ bool ue_detected;
+ bool fw_timeout;
u32 port_num;
bool promiscuous;
bool wol;
@@ -359,7 +366,6 @@ struct be_adapter {
u32 function_caps;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
- bool ue_detected;
bool stats_cmd_sent;
int link_speed;
u8 port_type;
@@ -369,16 +375,20 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- bool be3_native;
- bool sriov_enabled;
- struct be_vf_cfg *vf_cfg;
+ u32 num_vfs;
u8 is_virtfn;
+ struct be_vf_cfg *vf_cfg;
+ bool be3_native;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
+#define sriov_enabled(adapter) (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i) \
+ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+ i++, vf_cfg++)
/* BladeEngine Generation numbers */
#define BE_GEN2 2
@@ -524,9 +534,14 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
return adapter->num_rx_qs > 1;
}
+static inline bool be_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
extern void be_parse_stats(struct be_adapter *adapter);
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2c7b36673dfc..0fcb45624796 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (adapter->eeh_err) {
- dev_info(&adapter->pdev->dev,
- "Error in Card Detected! Cannot issue commands\n");
+ if (be_error(adapter))
return;
- }
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -128,7 +125,14 @@ done:
static void be_async_link_state_process(struct be_adapter *adapter,
struct be_async_event_link_state *evt)
{
- be_link_status_update(adapter, evt->port_link_status);
+ /* When link status changes, link speed must be re-queried from FW */
+ adapter->link_speed = -1;
+
+ /* For the initial link status do not rely on the ASYNC event as
+ * it may not be received in some cases.
+ */
+ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+ be_link_status_update(adapter, evt->port_link_status);
}
/* Grp5 CoS Priority evt */
@@ -266,10 +270,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- if (adapter->eeh_err)
- return -EIO;
-
for (i = 0; i < mcc_timeout; i++) {
+ if (be_error(adapter))
+ return -EIO;
+
num = be_process_mcc(adapter, &status);
if (num)
be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +284,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
udelay(100);
}
if (i == mcc_timeout) {
- dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
return -1;
}
return status;
@@ -298,26 +303,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
- if (adapter->eeh_err) {
- dev_err(&adapter->pdev->dev,
- "Error detected in card.Cannot issue commands\n");
- return -EIO;
- }
-
do {
+ if (be_error(adapter))
+ return -EIO;
+
ready = ioread32(db);
- if (ready == 0xffffffff) {
- dev_err(&adapter->pdev->dev,
- "pci slot disconnected\n");
+ if (ready == 0xffffffff)
return -1;
- }
ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
if (msecs > 4000) {
- dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
be_detect_dump_ue(adapter);
return -1;
}
@@ -555,9 +555,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -619,7 +616,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle)
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mac_query *req;
@@ -641,6 +638,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req->permanent = 1;
} else {
req->if_id = cpu_to_le16((u16) if_handle);
+ req->pmac_id = cpu_to_le32(pmac_id);
req->permanent = 0;
}
@@ -695,12 +693,15 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
int status;
+ if (pmac_id == -1)
+ return 0;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -923,10 +924,14 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
- wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
ctxt = &req->context;
@@ -952,14 +957,15 @@ int be_cmd_txq_create(struct be_adapter *adapter,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1018,9 +1024,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1136,16 +1139,13 @@ err:
}
/* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
- if (!interface_id)
+ if (interface_id == -1)
return 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1239,7 +1239,7 @@ err:
/* Uses synchronous mcc */
int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u32 dom)
+ u16 *link_speed, u8 *link_status, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1247,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
spin_lock_bh(&adapter->mcc_lock);
+ if (link_status)
+ *link_status = LINK_DOWN;
+
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
@@ -1254,6 +1257,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
}
req = embedded_payload(wrb);
+ if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+ req->hdr.version = 1;
+
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
@@ -1261,10 +1267,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- *link_speed = le16_to_cpu(resp->link_speed);
+ if (link_speed)
+ *link_speed = le16_to_cpu(resp->link_speed);
if (mac_speed)
*mac_speed = resp->mac_speed;
}
+ if (link_status)
+ *link_status = resp->logical_link_status;
}
err:
@@ -1673,8 +1682,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
- 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+ u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+ 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+ 0x3ea83c02, 0x4a110304};
int status;
if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1846,53 @@ err_unlock:
return status;
}
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_read_object *req;
+ struct lancer_cmd_resp_read_object *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
+
+ req->desired_read_len = cpu_to_le32(data_size);
+ req->read_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+ status = be_mcc_notify_wait(adapter);
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_read = le32_to_cpu(resp->actual_read_len);
+ *eof = le32_to_cpu(resp->eof);
+ } else {
+ *addn_status = resp->additional_status;
+ }
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
@@ -2238,3 +2295,99 @@ err:
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_list *req;
+ int status;
+ int mac_count;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_mac_list *resp =
+ embedded_payload(wrb);
+ int i;
+ u8 *ctxt = &resp->context[0][0];
+ status = -EIO;
+ mac_count = resp->mac_count;
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+ for (i = 0; i < mac_count; i++) {
+ if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+ act, ctxt)) {
+ *pmac_id = AMAP_GET_BITS
+ (struct amap_get_mac_list_context,
+ macid, ctxt);
+ status = 0;
+ break;
+ }
+ ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_mac_list *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+ &cmd.dma, GFP_KERNEL);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
+
+ req->hdr.domain = domain;
+ req->mac_count = mac_count;
+ if (mac_count)
+ memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, cmd.size,
+ cmd.va, cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a35cd03fac4e..dca89249088f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,9 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_GET_MAC_LIST 147
+#define OPCODE_COMMON_SET_MAC_LIST 148
+#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_ETH_RSS_CONFIG 1
@@ -294,6 +297,7 @@ struct be_cmd_req_mac_query {
u8 type;
u8 permanent;
u16 if_id;
+ u32 pmac_id;
} __packed;
struct be_cmd_resp_mac_query {
@@ -956,7 +960,8 @@ struct be_cmd_resp_link_status {
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u16 link_speed;
- u32 rsvd0;
+ u8 logical_link_status;
+ u8 rsvd1[3];
} __packed;
/******************** Port Identification ***************************/
@@ -1161,6 +1166,38 @@ struct lancer_cmd_resp_write_object {
u32 actual_write_len;
};
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK (32*1024)
+#define LANCER_READ_FILE_EOF_MASK 0x80000000
+
+#define LANCER_FW_DUMP_FILE "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+ struct be_cmd_req_hdr hdr;
+ u32 desired_read_len;
+ u32 read_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_read_len;
+ u32 eof;
+};
+
/************************ WOL *******************************/
struct be_cmd_req_acpi_wol_magic_config{
struct be_cmd_req_hdr hdr;
@@ -1307,6 +1344,34 @@ struct be_cmd_resp_set_func_cap {
u8 rsvd[212];
};
+/******************** GET/SET_MACLIST **************************/
+#define BE_MAX_MAC 64
+struct amap_get_mac_list_context {
+ u8 macid[31];
+ u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+ struct be_cmd_resp_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1413,15 +1478,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle);
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- u32 pmac_id, u32 domain);
+ int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
@@ -1443,8 +1508,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
- u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u8 *link_status, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -1480,6 +1545,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter,
u32 data_size, u32 data_offset,
const char *obj_name,
u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1574,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bf8153ea4ed8..6db6b6ae5e9b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev,
memset(fw_on_flash, 0 , sizeof(fw_on_flash));
be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VER);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev,
strcat(drvinfo->fw_version, "]");
}
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+ u32 data_read = 0, eof;
+ u8 addn_status;
+ struct be_dma_mem data_len_cmd;
+ int status;
+
+ memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+ /* data_offset and data_size should be 0 to get reg len */
+ status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+ file_name, &data_read, &eof, &addn_status);
+
+ return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
+{
+ struct be_dma_mem read_cmd;
+ u32 read_len = 0, total_read_len = 0, chunk_size;
+ u32 eof = 0;
+ u8 addn_status;
+ int status = 0;
+
+ read_cmd.size = LANCER_READ_FILE_CHUNK;
+ read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+ &read_cmd.dma);
+
+ if (!read_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while reading dump\n");
+ return -ENOMEM;
+ }
+
+ while ((total_read_len < buf_len) && !eof) {
+ chunk_size = min_t(u32, (buf_len - total_read_len),
+ LANCER_READ_FILE_CHUNK);
+ chunk_size = ALIGN(chunk_size, 4);
+ status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+ total_read_len, file_name, &read_len,
+ &eof, &addn_status);
+ if (!status) {
+ memcpy(buf + total_read_len, read_cmd.va, read_len);
+ total_read_len += read_len;
+ eof &= LANCER_READ_FILE_EOF_MASK;
+ } else {
+ status = -EIO;
+ break;
+ }
+ }
+ pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
+
+ return status;
+}
+
static int
be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
- if (be_physfn(adapter))
- be_cmd_get_reg_len(adapter, &log_size);
-
+ if (be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ log_size = lancer_cmd_get_file_len(adapter,
+ LANCER_FW_DUMP_FILE);
+ else
+ be_cmd_get_reg_len(adapter, &log_size);
+ }
return log_size;
}
@@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
if (be_physfn(adapter)) {
memset(buf, 0, regs->len);
- be_cmd_get_regs(adapter, regs->len, buf);
+ if (lancer_chip(adapter))
+ lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+ regs->len, buf);
+ else
+ be_cmd_get_regs(adapter, regs->len, buf);
}
}
@@ -362,11 +429,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct be_phy_info phy_info;
u8 mac_speed = 0;
u16 link_speed = 0;
+ u8 link_status;
int status;
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
status = be_cmd_link_status_query(adapter, &mac_speed,
- &link_speed, 0);
+ &link_speed, &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
@@ -453,16 +523,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct be_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
- ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
- ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+ ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
}
static void
@@ -636,7 +703,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
if (be_cmd_link_status_query(adapter, &mac_speed,
- &qos_link_speed, 0) != 0) {
+ &qos_link_speed, NULL, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (!mac_speed) {
@@ -660,7 +727,17 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
static int
be_get_eeprom_len(struct net_device *netdev)
{
- return BE_READ_SEEPROM_LEN;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_PF_FILE);
+ else
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_VF_FILE);
+ } else {
+ return BE_READ_SEEPROM_LEN;
+ }
}
static int
@@ -675,6 +752,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!eeprom->len)
return -EINVAL;
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+ eeprom->len, data);
+ else
+ return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+ eeprom->len, data);
+ }
+
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bf266a00c774..a6bcdb5cd2be 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
-static ushort rx_frag_size = 2048;
static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,7 +239,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
status = be_cmd_mac_addr_query(adapter, current_mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (status)
goto err;
@@ -315,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
struct be_drv_stats *drvs = &adapter->drv_stats;
be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -491,19 +496,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
return stats;
}
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
{
struct net_device *netdev = adapter->netdev;
- /* when link status changes, link speed must be re-queried from card */
- adapter->link_speed = -1;
- if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
- netif_carrier_on(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
- } else {
+ if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
netif_carrier_off(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+ adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
}
+
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
}
static void be_tx_stats_update(struct be_tx_obj *txo,
@@ -549,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
}
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u8 vlan_prio;
+ u16 vlan_tag;
+
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio;
+
+ return vlan_tag;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
- u8 vlan_prio = 0;
- u16 vlan_tag = 0;
+ u16 vlan_tag;
memset(hdr, 0, sizeof(*hdr));
@@ -584,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
if (vlan_tx_tag_present(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
- vlan_tag = vlan_tx_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- /* If vlan priority provided by OS is NOT in available bmap */
- if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
- vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
- adapter->recommended_prio;
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
@@ -692,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
+ /* For vlan tagged pkts, BE
+ * 1) calculates checksum even when CSO is not requested
+ * 2) calculates checksum wrongly for padded pkt less than
+ * 60 bytes long.
+ * As a workaround disable TX vlan offloading in such cases.
+ */
+ if (unlikely(vlan_tx_tag_present(skb) &&
+ (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb->vlan_tci = 0;
+ }
+
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
txq->head = start;
dev_kfree_skb_any(skb);
}
+tx_drop:
return NETDEV_TX_OK;
}
@@ -746,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
*/
static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
int status = 0;
- u32 if_handle;
if (vf) {
- if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
- vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
- status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+ vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+ status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+ 1, 1, 0);
}
/* No need to further configure vids if in promiscuous mode */
@@ -779,31 +814,48 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
return status;
}
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added++;
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added++;
+ else
+ adapter->vlan_tag[vid] = 0;
+ret:
+ return status;
}
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added--;
-
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added--;
+ else
+ adapter->vlan_tag[vid] = 1;
+ret:
+ return status;
}
static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +892,30 @@ done:
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- status = be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
mac, vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
return status;
}
@@ -870,18 +924,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (vf >= num_vfs)
+ if (vf >= adapter->num_vfs)
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
- vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+ vi->tx_rate = vf_cfg->tx_rate;
+ vi->vlan = vf_cfg->vlan_tag;
vi->qos = 0;
- memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+ memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
}
@@ -892,17 +947,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (vlan > 4095))
+ if (vf >= adapter->num_vfs || vlan > 4095)
return -EINVAL;
if (vlan) {
- adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+ adapter->vf_cfg[vf].vlan_tag = vlan;
adapter->vlans_added++;
} else {
- adapter->vf_cfg[vf].vf_vlan_tag = 0;
+ adapter->vf_cfg[vf].vlan_tag = 0;
adapter->vlans_added--;
}
@@ -920,21 +975,25 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (rate < 0))
+ if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate > 10000)
- rate = 10000;
+ if (rate < 100 || rate > 10000) {
+ dev_err(&adapter->pdev->dev,
+ "tx rate must be between 100 and 10000 Mbps\n");
+ return -EINVAL;
+ }
- adapter->vf_cfg[vf].vf_tx_rate = rate;
status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
- dev_info(&adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"tx rate %d on VF %d failed\n", rate, vf);
+ else
+ adapter->vf_cfg[vf].tx_rate = rate;
return status;
}
@@ -1645,8 +1704,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_num_txqs_want(struct be_adapter *adapter)
{
- if ((num_vfs && adapter->sriov_enabled) ||
- be_is_mc(adapter) ||
+ if (sriov_enabled(adapter) || be_is_mc(adapter) ||
lancer_chip(adapter) || !be_physfn(adapter) ||
adapter->generation == BE_GEN2)
return 1;
@@ -1662,9 +1720,12 @@ static int be_tx_queues_create(struct be_adapter *adapter)
u8 i;
adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS)
+ if (adapter->num_tx_qs != MAX_TX_QS) {
+ rtnl_lock();
netif_set_real_num_tx_queues(adapter->netdev,
adapter->num_tx_qs);
+ rtnl_unlock();
+ }
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1754,6 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_queue_alloc(adapter, q, TX_Q_LEN,
sizeof(struct be_eth_wrb)))
goto err;
-
- if (be_cmd_txq_create(adapter, q, cq))
- goto err;
}
return 0;
@@ -1728,8 +1786,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && be_physfn(adapter) &&
- !be_is_mc(adapter)) {
+ !sriov_enabled(adapter) && be_physfn(adapter) &&
+ !be_is_mc(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
@@ -1929,6 +1987,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter =
container_of(tx_eq, struct be_adapter, tx_eq);
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
int tx_compl, mcc_compl, status = 0;
@@ -1965,12 +2024,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
}
napi_complete(napi);
+ /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+ if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
+ be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+ }
+
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
adapter->drv_stats.tx_events++;
return 1;
@@ -1982,6 +2048,9 @@ void be_detect_dump_ue(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2077,8 @@ void be_detect_dump_ue(struct be_adapter *adapter)
sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->ue_detected = true;
adapter->eeh_err = true;
- dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable error in the card\n");
}
if (ue_lo) {
@@ -2036,53 +2106,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
-static void be_worker(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, work.work);
- struct be_rx_obj *rxo;
- int i;
-
- if (!adapter->ue_detected)
- be_detect_dump_ue(adapter);
-
- /* when interrupts are not yet enabled, just reap any pending
- * mcc completions */
- if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
- goto reschedule;
- }
-
- if (!adapter->stats_cmd_sent) {
- if (lancer_chip(adapter))
- lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
- else
- be_cmd_get_stats(adapter, &adapter->stats_cmd);
- }
-
- for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
- be_post_rx_frags(rxo, GFP_KERNEL);
- }
- }
-
-reschedule:
- adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
static void be_msix_disable(struct be_adapter *adapter)
{
if (msix_enabled(adapter)) {
@@ -2119,27 +2142,28 @@ done:
static int be_sriov_enable(struct be_adapter *adapter)
{
be_check_sriov_fn_type(adapter);
+
#ifdef CONFIG_PCI_IOV
if (be_physfn(adapter) && num_vfs) {
int status, pos;
- u16 nvfs;
+ u16 dev_vfs;
pos = pci_find_ext_capability(adapter->pdev,
PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(adapter->pdev,
- pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+ pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
- if (num_vfs > nvfs) {
+ adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+ if (adapter->num_vfs != num_vfs)
dev_info(&adapter->pdev->dev,
- "Device supports %d VFs and not %d\n",
- nvfs, num_vfs);
- num_vfs = nvfs;
- }
+ "Device supports %d VFs and not %d\n",
+ adapter->num_vfs, num_vfs);
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- adapter->sriov_enabled = status ? false : true;
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status)
+ adapter->num_vfs = 0;
- if (adapter->sriov_enabled) {
+ if (adapter->num_vfs) {
adapter->vf_cfg = kcalloc(num_vfs,
sizeof(struct be_vf_cfg),
GFP_KERNEL);
@@ -2154,10 +2178,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
static void be_sriov_disable(struct be_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
- if (adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
pci_disable_sriov(adapter->pdev);
kfree(adapter->vf_cfg);
- adapter->sriov_enabled = false;
+ adapter->num_vfs = 0;
}
#endif
}
@@ -2351,8 +2375,8 @@ static int be_close(struct net_device *netdev)
static int be_rx_queues_setup(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
- int rc, i;
- u8 rsstable[MAX_RSS_QS];
+ int rc, i, j;
+ u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2388,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for_all_rss_queues(adapter, rxo, i)
- rsstable[i] = rxo->rss_id;
+ for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for_all_rss_queues(adapter, rxo, i) {
+ if ((j + i) >= 128)
+ break;
+ rsstable[j + i] = rxo->rss_id;
+ }
+ }
+ rc = be_cmd_rss_config(adapter, rsstable, 128);
- rc = be_cmd_rss_config(adapter, rsstable,
- adapter->num_rx_qs - 1);
if (rc)
return rc;
}
@@ -2386,6 +2414,7 @@ static int be_open(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *tx_eq = &adapter->tx_eq;
struct be_rx_obj *rxo;
+ u8 link_status;
int status, i;
status = be_rx_queues_setup(adapter);
@@ -2409,6 +2438,11 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
+ status = be_cmd_link_status_query(adapter, NULL, NULL,
+ &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+
return 0;
err:
be_close(adapter->netdev);
@@ -2465,19 +2499,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
u32 vf;
int status = 0;
u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
be_vf_eth_addr_generate(adapter, mac);
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id,
- vf + 1);
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_add(adapter, mac,
+ vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
+
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address add failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n", vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
mac[5] += 1;
}
@@ -2486,24 +2525,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
static void be_vf_clear(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 vf;
- for (vf = 0; vf < num_vfs; vf++) {
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
- }
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter))
+ be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+ else
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+ be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+ }
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter) && adapter->sriov_enabled)
+ if (sriov_enabled(adapter))
be_vf_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -2511,61 +2549,94 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
-
- adapter->be3_native = false;
- adapter->promiscuous = false;
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
return 0;
}
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+ struct be_vf_cfg *vf_cfg;
+ int vf;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ vf_cfg->if_handle = -1;
+ vf_cfg->pmac_id = -1;
+ }
+}
+
static int be_vf_setup(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
u16 lnk_speed;
int status;
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- for (vf = 0; vf < num_vfs; vf++) {
+ be_vf_setup_init(adapter);
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &adapter->vf_cfg[vf].vf_if_handle,
- NULL, vf+1);
+ &vf_cfg->if_handle, NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
}
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto err;
- }
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
- for (vf = 0; vf < num_vfs; vf++) {
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
- vf + 1);
+ NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ vf_cfg->tx_rate = lnk_speed * 10;
}
return 0;
err:
return status;
}
+static void be_setup_init(struct be_adapter *adapter)
+{
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->link_speed = -1;
+ adapter->if_handle = -1;
+ adapter->be3_native = false;
+ adapter->promiscuous = false;
+ adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+ u32 pmac_id;
+ int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, adapter->if_handle, pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id, 0);
+do_none:
+ return status;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
- int status;
+ int status, i;
u8 mac[ETH_ALEN];
+ struct be_tx_obj *txo;
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
- adapter->link_speed = -1;
+ be_setup_init(adapter);
be_cmd_req_native_mode(adapter);
@@ -2583,7 +2654,7 @@ static int be_setup(struct be_adapter *adapter)
memset(mac, 0, ETH_ALEN);
status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0);
+ true /*permanent */, 0, 0);
if (status)
return status;
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2663,8 @@ static int be_setup(struct be_adapter *adapter)
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2675,23 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto err;
- /* For BEx, the VF's permanent mac queried from card is incorrect.
- * Query the mac configued by the PF using if_handle
- */
- if (!be_physfn(adapter) && !lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ for_all_tx_queues(adapter, txo, i) {
+ status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ if (status)
+ goto err;
+ }
+
+ /* The VF's permanent mac queried from card is incorrect.
+ * For BEx: Query the mac configued by the PF using if_handle
+ * For Lancer: Get and use mac_list to obtain mac address.
+ */
+ if (!be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ status = be_configure_mac_from_list(adapter, mac);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (!status) {
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2707,21 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
+
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
}
pcie_set_readrq(adapter->pdev, 4096);
- if (be_physfn(adapter) && adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
status = be_vf_setup(adapter);
if (status)
goto err;
@@ -2647,6 +2733,19 @@ err:
return status;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
+
+ event_handle(adapter, &adapter->tx_eq, false);
+ for_all_rx_queues(adapter, rxo, i)
+ event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -2981,7 +3080,7 @@ fw_exit:
return status;
}
-static struct net_device_ops be_netdev_ops = {
+static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
@@ -2995,7 +3094,10 @@ static struct net_device_ops be_netdev_ops = {
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
.ndo_set_vf_tx_rate = be_set_vf_tx_rate,
- .ndo_get_vf_config = be_get_vf_config
+ .ndo_get_vf_config = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = be_netpoll,
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3242,6 +3344,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID2:
+ case OC_DEVICE_ID5:
adapter->generation = BE_GEN3;
break;
case OC_DEVICE_ID3:
@@ -3267,7 +3370,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
static int lancer_wait_ready(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
int status = 0, i;
@@ -3276,7 +3379,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
if (sliport_status & SLIPORT_STATUS_RDY_MASK)
break;
- msleep(20);
+ msleep(1000);
}
if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3416,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
return status;
}
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status;
+
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in error state."
+ "Trying to recover.\n");
+
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
+
+ netif_device_detach(adapter->netdev);
+
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
+
+ be_clear(adapter);
+
+ adapter->fw_timeout = false;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
+ }
+
+ netif_device_attach(adapter->netdev);
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery succeeded\n");
+ }
+ return;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (lancer_chip(adapter))
+ lancer_test_and_recover_fn_err(adapter);
+
+ be_detect_dump_ue(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ }
+ }
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -3365,7 +3566,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_sriov;
if (lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
if (status) {
dev_err(&pdev->dev, "Adapter in non recoverable error\n");
goto ctrl_clean;
@@ -3559,6 +3765,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
dev_info(&adapter->pdev->dev, "EEH reset\n");
adapter->eeh_err = false;
+ adapter->ue_detected = false;
+ adapter->fw_timeout = false;
status = pci_enable_device(pdev);
if (status)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 251b635fe75a..60f0e788cc25 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = {
},
};
-static int __init ethoc_init(void)
-{
- return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
- platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 61d2bddec1fa..c82d444b582d 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b298..3574e1499dfc 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -21,9 +21,10 @@ config NET_VENDOR_FREESCALE
if NET_VENDOR_FREESCALE
config FEC
- bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || ARCH_MXS)
+ ARCH_MXC || SOC_IMX28)
+ default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a1594..7b25e9cf13f6 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
MODULE_DEVICE_TABLE(platform, fec_devtype);
enum imx_fec_type {
- IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#elif defined (CONFIG_M5272C3)
#define FEC_FLASHMAC (0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC 0xffc0406b
+#define FEC_FLASHMAC 0xffc0406b
#else
#define FEC_FLASHMAC 0
#endif
@@ -232,6 +232,7 @@ struct fec_enet_private {
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
@@ -254,11 +255,13 @@ struct fec_enet_private {
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
-#define FEC_MII_TIMEOUT 1000 /* us */
+#define FEC_MII_TIMEOUT 30000 /* us */
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static int mii_cnt;
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -473,6 +476,7 @@ fec_restart(struct net_device *ndev, int duplex)
} else {
#ifdef FEC_MIIGSK_ENR
if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
+ u32 cfgr;
/* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR);
while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
@@ -483,9 +487,11 @@ fec_restart(struct net_device *ndev, int duplex)
* RMII, 50 MHz, no loopback, no echo
* MII, 25 MHz, no loopback, no echo
*/
- writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
- 1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
-
+ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+ cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+ writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
/* re-enable the gasket */
writel(2, fep->hwp + FEC_MIIGSK_ENR);
@@ -515,6 +521,7 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -531,8 +538,10 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
}
@@ -818,7 +827,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
iap = (unsigned char *)FEC_FLASHMAC;
#else
if (pdata)
- memcpy(iap, pdata->mac, ETH_ALEN);
+ iap = (unsigned char *)&pdata->mac;
#endif
}
@@ -837,7 +846,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
@@ -865,6 +874,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
fec_restart(ndev, phy_dev->duplex);
+ /* prevent unnecessary second fec_restart() below */
+ fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -953,7 +964,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@ -972,8 +983,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
if (phy_id >= PHY_MAX_ADDR) {
- printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", ndev->name);
+ printk(KERN_INFO
+ "%s: no PHY, assuming direct connection to switch\n",
+ ndev->name);
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -998,8 +1010,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ printk(KERN_INFO
+ "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -1031,10 +1044,14 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
- fep->mii_bus = fec0_mii_bus;
- return 0;
+ if (mii_cnt && fec0_mii_bus) {
+ fep->mii_bus = fec0_mii_bus;
+ mii_cnt++;
+ return 0;
+ }
+ return -ENOENT;
}
fep->mii_timeout = 0;
@@ -1063,7 +1080,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1079,6 +1097,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ mii_cnt++;
+
/* save fec0 mii_bus */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
@@ -1095,11 +1115,11 @@ err_out:
static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- if (fep->phy_dev)
- phy_disconnect(fep->phy_dev);
- mdiobus_unregister(fep->mii_bus);
- kfree(fep->mii_bus->irq);
- mdiobus_free(fep->mii_bus);
+ if (--mii_cnt == 0) {
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
+ }
}
static int fec_enet_get_settings(struct net_device *ndev,
@@ -1136,7 +1156,7 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strcpy(info->bus_info, dev_name(&ndev->dev));
}
-static struct ethtool_ops fec_enet_ethtool_ops = {
+static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
@@ -1521,6 +1541,7 @@ fec_probe(struct platform_device *pdev)
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1548,6 +1569,7 @@ fec_probe(struct platform_device *pdev)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
@@ -1571,8 +1593,12 @@ fec_probe(struct platform_device *pdev)
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
- if (i && irq < 0)
- break;
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
while (--i >= 0) {
@@ -1583,12 +1609,12 @@ fec_probe(struct platform_device *pdev)
}
}
- fep->clk = clk_get(&pdev->dev, "fec_clk");
+ fep->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(fep->clk)) {
ret = PTR_ERR(fep->clk);
goto failed_clk;
}
- clk_enable(fep->clk);
+ clk_prepare_enable(fep->clk);
ret = fec_enet_init(ndev);
if (ret)
@@ -1611,7 +1637,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
- clk_disable(fep->clk);
+ clk_disable_unprepare(fep->clk);
clk_put(fep->clk);
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1635,13 +1661,18 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct resource *r;
+ int i;
- fec_stop(ndev);
+ unregister_netdev(ndev);
fec_enet_mii_remove(fep);
- clk_disable(fep->clk);
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ int irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
+ clk_disable_unprepare(fep->clk);
clk_put(fep->clk);
iounmap(fep->hwp);
- unregister_netdev(ndev);
free_netdev(ndev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1664,7 +1695,7 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable(fep->clk);
+ clk_disable_unprepare(fep->clk);
return 0;
}
@@ -1675,7 +1706,7 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- clk_enable(fep->clk);
+ clk_prepare_enable(fep->clk);
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8b2c6d797e6d..8408c627b195 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -47,6 +47,10 @@
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
+#define BM_MIIGSK_CFGR_MII 0x00
+#define BM_MIIGSK_CFGR_RMII 0x01
+#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
+
#else
#define FEC_ECNTRL 0x000 /* Ethernet control reg */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5bf5471f06ff..910a8e18a9ae 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = {
.remove = fs_enet_remove,
};
-static int __init fs_init(void)
-{
- return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
- platform_driver_unregister(&fs_enet_driver);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
@@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev)
}
#endif
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index b09270b5d0a5..0f2d1a710909 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_bb_init(void)
-{
- return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
- platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index e0e9d6c35d83..55bb867258e6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_fec_init(void)
-{
- return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
- platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e7..9eb815941df5 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
- return i;
-}
-
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
@@ -386,23 +359,12 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
-
goto err_free_irqs;
+ } else {
+ out_be32(tbipa, tbiaddr);
}
- out_be32(tbipa, tbiaddr);
-
err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -480,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = {
.remove = fsl_pq_mdio_remove,
};
-int __init fsl_pq_mdio_init(void)
-{
- return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
-void fsl_pq_mdio_exit(void)
-{
- platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 83199fd0d62b..39d160d353a4 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags =
@@ -1984,7 +1984,8 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
return fcb;
}
-static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
+static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
+ int fcb_length)
{
u8 flags = 0;
@@ -2006,7 +2007,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
* l3 hdr and the l4 hdr */
- fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
+ fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
fcb->flags = flags;
@@ -2046,7 +2047,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, nr_txbds, length;
+ unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
/*
* TOE=1 frames larger than 2500 bytes may see excess delays
@@ -2070,22 +2071,28 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* check if time stamp should be generated */
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en))
+ priv->hwts_tx_en)) {
do_tstamp = 1;
+ fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ }
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
vlan_tx_tag_present(skb) ||
unlikely(do_tstamp)) &&
- (skb_headroom(skb) < GMAC_FCB_LEN)) {
+ (skb_headroom(skb) < fcb_length)) {
struct sk_buff *skb_new;
- skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
+ skb_new = skb_realloc_headroom(skb, fcb_length);
if (!skb_new) {
dev->stats.tx_errors++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
+
+ /* Steal sock reference for processing TX time stamps */
+ swap(skb_new->sk, skb->sk);
+ swap(skb_new->destructor, skb->destructor);
kfree_skb(skb);
skb = skb_new;
}
@@ -2154,6 +2161,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus = txbdp_start->lstatus;
}
+ /* Add TxPAL between FCB and frame if required */
+ if (unlikely(do_tstamp)) {
+ skb_push(skb, GMAC_TXPAL_LEN);
+ memset(skb->data, 0, GMAC_TXPAL_LEN);
+ }
+
/* Set up checksumming */
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
@@ -2164,7 +2177,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_checksum_help(skb);
} else {
lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb);
+ gfar_tx_checksum(skb, fcb, fcb_length);
}
}
@@ -2196,9 +2209,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length.
*/
if (unlikely(do_tstamp)) {
- txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - GMAC_FCB_LEN);
+ (skb_headlen(skb) - fcb_length);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2306,7 +2319,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
}
/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
@@ -2490,7 +2503,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
next = next_txbd(bdp, base, tx_ring_size);
- buflen = next->length + GMAC_FCB_LEN;
+ buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
} else
buflen = bdp->length;
@@ -2502,6 +2515,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
skb_tstamp_tx(skb, &shhwtstamps);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next;
@@ -3114,7 +3128,7 @@ static void gfar_set_multi(struct net_device *dev)
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
- static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+ static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3151,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
- u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u32 result = ether_crc(ETH_ALEN, addr);
int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3172,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
int idx;
- char tmpbuf[MAC_ADDR_LEN];
+ char tmpbuf[ETH_ALEN];
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
@@ -3166,8 +3180,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
/* Now copy it into the mac registers backwards, cuz */
/* little endian is silly */
- for (idx = 0; idx < MAC_ADDR_LEN; idx++)
- tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
gfar_write(macptr, *((u32 *) (tmpbuf)));
@@ -3281,16 +3295,4 @@ static struct platform_driver gfar_driver = {
.remove = gfar_remove,
};
-static int __init gfar_init(void)
-{
- return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
- platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9aa43773e8e3..40c33a7554c0 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -63,6 +63,9 @@ struct ethtool_rx_list {
/* Length for FCB */
#define GMAC_FCB_LEN 8
+/* Length for TxPAL */
+#define GMAC_TXPAL_LEN 16
+
/* Default padding amount */
#define DEFAULT_PADDING 2
@@ -74,9 +77,6 @@ struct ethtool_rx_list {
* will be the next highest multiple of 512 bytes. */
#define INCREMENTAL_BUFFER_SIZE 512
-
-#define MAC_ADDR_LEN 6
-
#define PHY_INIT_TIMEOUT 100000
#define GFAR_PHY_CHANGE_TIME 2
@@ -1179,9 +1179,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
extern void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 212736bab6bb..5a3b2e5b2880 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return err;
}
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
unsigned long flags;
int err = 0, i = 0;
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
/* We need a copy of the filer table because
* we want to change its order */
- temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
- memcpy(temp_table, tab, sizeof(*temp_table));
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
sizeof(struct gfar_mask_entry), GFP_KERNEL);
@@ -1693,8 +1692,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
ret = gfar_set_hash_opts(priv, cmd);
break;
case ETHTOOL_SRXCLSRLINS:
- if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
- cmd->fs.ring_cookie >= priv->num_rx_queues) {
+ if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+ cmd->fs.location >= MAX_FILER_IDX) {
ret = -EINVAL;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index f67b8aebc89c..83e0ed757e33 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = {
.remove = gianfar_ptp_remove,
};
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
- return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
- platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
MODULE_DESCRIPTION("PTP clock using the eTSEC");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index b5dc0273a1d1..ba2dc083bfc0 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
static inline int compare_addr(u8 **addr1, u8 **addr2)
{
- return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+ return memcmp(addr1, addr2, ETH_ALEN);
}
#ifdef DEBUG
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index d12fcad145e9..2e395a2566b8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/if_ether.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
@@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics {
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define ENET_NUM_OCTETS_PER_ADDRESS 6
#define ENET_GROUP_ADDR 0x01 /* Group address mask
for ethernet
addresses */
@@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses {
/* UCC GETH 82xx Ethernet Address Container */
struct enet_addr_container {
- u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 address[ETH_ALEN]; /* ethernet address */
enum ucc_geth_enet_address_recognition_location location; /* location in
82xx address
recognition
@@ -1194,7 +1194,7 @@ struct ucc_geth_private {
u16 cpucount[NUM_TX_QUEUES];
u16 __iomem *p_cpucount[NUM_TX_QUEUES];
int indAddrRegUsed[NUM_OF_PADDRS];
- u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */
u8 numGroupAddrInHash;
u8 numIndAddrInHash;
u8 numIndAddrInReg;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 15416752c13e..ee84b472cee6 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index 067c46069a11..114cda7721fe 100644
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
static void eepro_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VERSION);
- sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+ "ISA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops eepro_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984ed..6650068c996c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
-#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ1 1023
#define EHEA_DEF_ENTRIES_RQ2 1023
-#define EHEA_DEF_ENTRIES_RQ3 1023
+#define EHEA_DEF_ENTRIES_RQ3 511
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 05b7359bde8d..6bdd8e36e564 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
}
-const struct ethtool_ops ehea_ethtool_ops = {
+static const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b6..5d5fb2627184 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -94,8 +94,8 @@ static int port_name_cnt;
static LIST_HEAD(adapter_list);
static unsigned long ehea_driver_flags;
static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg)
}
}
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
{
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
schedule_work(&port->reset_task);
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -1403,7 +1404,7 @@ out:
return ret;
}
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
{
int ret;
struct ehea_adapter *adapter = pr->port->adapter;
@@ -1425,7 +1426,7 @@ out:
return -EIO;
}
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
{
if ((ehea_rem_mr(&pr->send_mr)) ||
(ehea_rem_mr(&pr->recv_mr)))
@@ -2113,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2131,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2139,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
- return;
+ return err;
}
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2164,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2172,13 +2181,16 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
+ return err;
}
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
{
int ret = -EIO;
u64 hret;
@@ -2434,7 +2446,8 @@ static int ehea_open(struct net_device *dev)
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
@@ -2518,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port)
}
}
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2587,7 +2600,7 @@ out:
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2620,7 +2633,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
}
}
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2811,7 +2824,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
ehea_schedule_port_reset(port);
}
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
{
struct hcp_query_ehea *cb;
u64 hret;
@@ -2839,7 +2852,7 @@ out:
return ret;
}
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
{
struct hcp_ehea_port_cb4 *cb4;
u64 hret;
@@ -2953,7 +2966,7 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_tx_timeout = ehea_tx_watchdog,
};
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
u32 logical_port_id,
struct device_node *dn)
{
@@ -3224,7 +3237,7 @@ static ssize_t ehea_remove_port(struct device *dev,
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
{
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
if (ret)
@@ -3235,7 +3248,7 @@ out:
return ret;
}
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_probe_port);
device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3366,7 +3379,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
return 0;
}
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
{
int i;
@@ -3478,7 +3491,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
ehea_show_capabilities, NULL);
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
{
int ret;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 95b9f4fa811e..c25b05b94daa 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -34,9 +34,7 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
-struct ehea_bmap *ehea_bmap = NULL;
-
-
+static struct ehea_bmap *ehea_bmap;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
{
@@ -212,7 +210,7 @@ out_nomem:
return NULL;
}
-u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
+static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
{
u64 hret;
u64 adapter_handle = cq->adapter->handle;
@@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
return eqe;
}
-u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
+static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
{
u64 hret;
unsigned long flags;
@@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
/**
* allocates memory for a queue and registers pages in phyp
*/
-int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
{
@@ -516,7 +514,7 @@ out_freemem:
return NULL;
}
-u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
+static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
{
u64 hret;
struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
@@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
return 0;
}
-void print_error_data(u64 *data)
+static void print_error_data(u64 *data)
{
int length;
u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ed79b2d3ad3e..2abce965c7bd 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
+ busy_phy_map &= ~(1 << dev->phy.address);
+ DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b1cd41b9c61c..e877371680a9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df382..acc31af6594a 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
/* FIXME: do we need this? */
memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+ memset(remote_list, 0, sizeof(remote_list));
/* a 0 address marks the end of the valid entries */
if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 8fd80a00b898..075451d0207d 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
}
/* The last cycle is a tri-state, so read from the PHY. */
- for (j = 7; j < 8; j++) {
- for (i = 0; i < p[j].len; i++) {
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
- p[j].field |= ((ipg_r8(PHY_CTRL) &
- IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
- }
- }
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+ ipg_r8(PHY_CTRL);
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
}
static void ipg_set_led_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5a2fdf7a00c8..9436397e5725 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(nic->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev),
+ sizeof(info->bus_info));
}
#define E100_PHY_REGS 0x1C
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2b223ac99c42..3103f0b6bf5e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000_driver_name, 32);
- strncpy(drvinfo->version, e1000_driver_version, 32);
+ strlcpy(drvinfo->driver, e1000_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000_driver_version,
+ sizeof(drvinfo->version));
- sprintf(firmware_version, "N/A");
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 5c9a8403668b..f6c4d7e2560c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
/* MAC decode size is 128K - This is the size of BAR0 */
#define MAC_DECODE_SIZE (128 * 1024)
@@ -813,8 +812,7 @@ struct e1000_ffvt_entry {
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
-extern void __iomem *ce4100_gbe_mdio_base_virt;
-#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt)
#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
@@ -1344,6 +1342,7 @@ struct e1000_hw_stats {
struct e1000_hw {
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
+ void __iomem *ce4100_gbe_mdio_base_virt;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
u32 phy_init_script;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cf480b554622..669ca3800c01 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -33,11 +33,6 @@
#include <linux/bitops.h>
#include <linux/if_vlan.h>
-/* Intel Media SOC GbE MDIO physical base address */
-static unsigned long ce4100_gbe_mdio_base_phy;
-/* Intel Media SOC GbE MDIO virtual base address */
-void __iomem *ce4100_gbe_mdio_base_virt;
-
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#define DRV_VERSION "7.3.21-k8-NAPI"
@@ -167,9 +162,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM
@@ -806,7 +802,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
}
}
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -820,10 +817,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & NETIF_F_HW_VLAN_RX)
e1000_vlan_mode(netdev, features);
@@ -1051,11 +1049,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = -EIO;
if (hw->mac_type == e1000_ce4100) {
- ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
- ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ hw->ce4100_gbe_mdio_base_virt =
+ ioremap(pci_resource_start(pdev, BAR_1),
pci_resource_len(pdev, BAR_1));
- if (!ce4100_gbe_mdio_base_virt)
+ if (!hw->ce4100_gbe_mdio_base_virt)
goto err_mdio_ioremap;
}
@@ -1182,7 +1180,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
else
- adapter->quad_port_a = 1;
+ adapter->quad_port_a = true;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
@@ -1246,7 +1244,7 @@ err_eeprom:
err_dma:
err_sw_init:
err_mdio_ioremap:
- iounmap(ce4100_gbe_mdio_base_virt);
+ iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -1283,6 +1281,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
+ if (hw->mac_type == e1000_ce4100)
+ iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
@@ -1676,7 +1676,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* need this to apply a workaround later in the send path. */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
- adapter->pcix_82544 = 1;
+ adapter->pcix_82544 = true;
ew32(TCTL, tctl);
@@ -1999,7 +1999,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
writel(0, hw->hw_addr + tx_ring->tdh);
writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2848,7 +2848,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
size -= 4;
}
@@ -3216,7 +3216,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(tso)) {
if (likely(hw->mac_type != e1000_82544))
- tx_ring->last_tx_tso = 1;
+ tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4577,7 +4577,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
e1000_irq_enable(adapter);
}
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4601,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
e1000_irq_enable(adapter);
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4610,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4622,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
e1000_write_vfta(hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4647,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, false);
+
+ return 0;
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -4716,8 +4721,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
- mutex_lock(&adapter->mutex);
-
if (netif_running(netdev)) {
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -4725,10 +4728,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
- if (retval) {
- mutex_unlock(&adapter->mutex);
+ if (retval)
return retval;
- }
#endif
status = er32(STATUS);
@@ -4783,8 +4784,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- mutex_unlock(&adapter->mutex);
-
pci_disable_device(pdev);
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9fe18d1d53d8..f478a22ed577 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,7 @@ struct e1000_adapter {
u32 txd_cmd;
bool detect_tx_hung;
+ bool tx_hang_recheck;
u8 tx_timeout_factor;
u32 tx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 69c9d2199140..fb2c28e799a2 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, e1000e_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version));
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a855db1ad249..3911401ed65d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
regs[n] = __er32(hw, E1000_TARC(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 2; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
/*
@@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
reginfo->name; reginfo++) {
e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
/* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* +----------------------------------------------------------------+
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
- printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
- printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
- printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
+ pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
+ pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->length, buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp,
- buffer_info->skb);
if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
+ next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ next_desc = " NTC";
else
- printk(KERN_CONT "\n");
+ next_desc = "";
+ pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print Rx Ring Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
- printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info("Queue [NTU] [NTC]\n");
+ pr_info(" %5d %5X %5X\n",
+ 0, rx_ring->next_to_use, rx_ring->next_to_clean);
/* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@ rx_ring_summary:
* 24 | Buffer Address 3 [63:0] |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
- "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext Pkt Split format\n");
+ pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 13 12 8 7 4 3 0
@@ -352,35 +339,40 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
- "[ l3 l2 l1 hs] [reserved ] ---------------- "
- "[bi->skb] <-- Ext Rx Write-Back format\n");
+ pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
buffer_info = &rx_ring->buffer_info[i];
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@ rx_ring_summary:
phys_to_virt(buffer_info->dma),
adapter->rx_ps_bsize0, true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
break;
default:
@@ -407,9 +392,7 @@ rx_ring_summary:
* 8 | Reserved |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
- "[reserved 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext (Read) format\n");
+ pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
/* Extended Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 24 23 4 3 0
@@ -423,29 +406,37 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
- "[vt ln xe xs] "
- "[bi->skb] <-- Ext (Write-Back) format\n");
+ pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+
buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@ rx_ring_summary:
adapter->rx_buffer_len,
true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
}
@@ -875,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
u32 length, staterr;
unsigned int i;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -904,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev,
buffer_info->dma,
@@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
print_hang_task);
+ struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
+ if (!adapter->tx_hang_recheck &&
+ (adapter->flags2 & FLAG2_DMA_BURST)) {
+ /* May be block on write-back, flush and detect again
+ * flush pending descriptor writebacks to memory
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ /* execute the writes immediately */
+ e1e_flush();
+ adapter->tx_hang_recheck = true;
+ return;
+ }
+ /* Real hang detected */
+ adapter->tx_hang_recheck = false;
+ netif_stop_queue(netdev);
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
if (cleaned) {
total_tx_packets += buffer_info->segs;
total_tx_bytes += buffer_info->bytecount;
+ if (buffer_info->skb) {
+ bytes_compl += buffer_info->skb->len;
+ pkts_compl++;
+ }
}
e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
#define TX_WAKE_THRESHOLD 32
if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1150,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
- adapter->detect_tx_hung = 0;
+ adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ !(er32(STATUS) & E1000_STATUS_TXOFF))
schedule_work(&adapter->print_hang_task);
- netif_stop_queue(netdev);
- }
+ else
+ adapter->tx_hang_recheck = false;
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
@@ -1185,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
unsigned int i, j;
u32 length, staterr;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -1211,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
- e_dbg("Packet Split buffers didn't pick up the full "
- "packet\n");
+ e_dbg("Packet Split buffers didn't pick up the full packet\n");
dev_kfree_skb_irq(skb);
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->wb.middle.length0);
if (!length) {
- e_dbg("Last part of the packet spanning multiple "
- "descriptors\n");
+ e_dbg("Last part of the packet spanning multiple descriptors\n");
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
return;
}
/* MSI-X failed, so fall through and try MSI */
- e_err("Failed to initialize MSI-X interrupts. "
- "Falling back to MSI interrupts.\n");
+ e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
adapter->flags |= FLAG_MSI_ENABLED;
} else {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e_err("Failed to initialize MSI interrupts. Falling "
- "back to legacy interrupts.\n");
+ e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
/* Fall through */
case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
e1000_put_txbuf(adapter, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
@@ -2518,7 +2522,7 @@ clean_rx:
return work_done;
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
/* add VID to filter table */
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
e1000e_release_hw_control(adapter);
- return;
+ return 0;
}
/* remove VID from filter table */
@@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/**
- * e1000_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* update_mc_addr_list expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
*
- * Updates the Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
**/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count)
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int rar_entries = hw->mac.rar_entry_count;
+ int count = 0;
+
+ /* save a rar entry for our hardware address */
+ rar_entries--;
+
+ /* save a rar entry for the LAA workaround */
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+ rar_entries--;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+
+ /*
+ * write the addresses in reverse order to avoid write
+ * combining
+ */
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ e1000e_rar_set(hw, ha->addr, rar_entries--);
+ count++;
+ }
+ }
+
+ /* zero out the remaining RAR entries not used above */
+ for (; rar_entries > 0; rar_entries--) {
+ ew32(RAH(rar_entries), 0);
+ ew32(RAL(rar_entries), 0);
+ }
+ e1e_flush();
+
+ return count;
}
/**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-static void e1000_set_multi(struct net_device *netdev)
+static void e1000e_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u8 *mta_list;
u32 rctl;
/* Check for Promiscuous and All Multicast modes */
-
rctl = er32(RCTL);
+ /* clear the affected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- rctl &= ~E1000_RCTL_VFE;
/* Do not hardware filter VLANs in promisc mode */
e1000e_vlan_filter_disable(adapter);
} else {
+ int count;
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
- rctl &= ~E1000_RCTL_UPE;
} else {
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = e1000e_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
- }
-
- ew32(RCTL, rctl);
-
- if (!netdev_mc_empty(netdev)) {
- int i = 0;
-
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list)
- return;
-
- /* prepare a packed array of only addresses. */
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- e1000_update_mc_addr_list(hw, mta_list, i);
- kfree(mta_list);
- } else {
/*
- * if we're called from probe, we might not have
- * anything to do here, so clear out the list
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
*/
- e1000_update_mc_addr_list(hw, NULL, 0);
+ count = e1000e_write_uc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_UPE;
}
+ ew32(RCTL, rctl);
+
if (netdev->features & NETIF_F_HW_VLAN_RX)
e1000e_vlan_strip_enable(adapter);
else
@@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev)
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
- e1000_set_multi(adapter->netdev);
+ e1000e_set_rx_mode(adapter->netdev);
e1000_restore_vlan(adapter);
e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
clear_bit(__E1000_DOWN, &adapter->state);
- napi_enable(&adapter->napi);
if (adapter->msix_entries)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
e1e_flush();
usleep_range(10000, 20000);
- napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
e1000_irq_enable(adapter);
+ adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
adapter->idle_check = true;
@@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
+ napi_disable(&adapter->napi);
+
if (!test_bit(__E1000_DOWN, &adapter->state)) {
e1000e_down(adapter);
e1000_free_irq(adapter);
@@ -4168,22 +4245,19 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
- (adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "Rx/Tx" :
- ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
- ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+ printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- bool link_active = 0;
+ bool link_active = false;
s32 ret_val = 0;
/*
@@ -4198,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
} else {
- link_active = 1;
+ link_active = true;
}
break;
case e1000_media_type_fiber:
@@ -4297,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
+ bool txb2b = true;
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4323,21 +4397,18 @@ static void e1000_watchdog_task(struct work_struct *work)
e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
- e_info("Autonegotiated half duplex but"
- " link partner cannot autoneg. "
- " Try forcing full duplex if "
- "link gets many collisions.\n");
+ e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 10;
break;
}
@@ -4473,7 +4544,7 @@ link_up:
e1000e_flush_descriptors(adapter);
/* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
+ adapter->detect_tx_hung = true;
/*
* With 82571 controllers, LAA may be overwritten due to controller
@@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* if count is 0 then mapping error has occurred */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
if (count) {
+ netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(adapter, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((adapter->hw.mac.type == e1000_pch2lan) &&
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
(new_mtu > ETH_DATA_LEN)) {
- e_err("Jumbo Frames not supported on 82579 when CRC "
- "stripping is disabled.\n");
+ e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
return -EINVAL;
}
@@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (wufc) {
e1000_setup_rctl(adapter);
- e1000_set_multi(netdev);
+ e1000e_set_rx_mode(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev)
phy_data & E1000_WUS_MC ? "Multicast Packet" :
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ? "Link Status "
- " Change" : "other");
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
@@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
- .ndo_set_rx_mode = e1000_set_multi,
+ .ndo_set_rx_mode = e1000e_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
.ndo_do_ioctl = e1000_ioctl,
@@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
@@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
NETIF_F_TSO6 |
NETIF_F_HW_CSUM);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -6135,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
- adapter->fc_autoneg = 1;
+ adapter->fc_autoneg = true;
adapter->hw.fc.requested_mode = e1000_fc_default;
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7881fb95a25b..b8e20f037d0a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -29,6 +29,8 @@
* e1000_82576
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
* Check for invalid size
*/
if ((hw->mac.type == e1000_82576) && (size > 15)) {
- printk("igb: The NVM size is not valid, "
- "defaulting to 32K.\n");
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
size = 15;
}
nvm->word_size = 1 << size;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c69feebf2653..3d12e67eebb4 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,4 +447,9 @@ static inline s32 igb_get_phy_info(struct e1000_hw *hw)
return 0;
}
+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 43873eba2f63..7998bf4d5946 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -36,6 +36,7 @@
#include <linux/ethtool.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include "igb.h"
@@ -148,7 +149,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ ecmd->advertising = (ADVERTISED_TP |
+ ADVERTISED_Pause);
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
@@ -165,7 +167,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ ADVERTISED_Autoneg |
+ ADVERTISED_Pause);
ecmd->port = PORT_FIBRE;
}
@@ -673,25 +676,22 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u16 eeprom_data;
- strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, igb_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -2162,6 +2162,19 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2188,6 +2201,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ethtool_stats = igb_get_ethtool_stats,
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
+ .begin = igb_ethtool_begin,
+ .complete = igb_ethtool_complete,
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ced544499f1b..01e5e89ef959 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -51,6 +53,7 @@
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -145,9 +148,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *);
@@ -170,8 +173,18 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *, pm_message_t);
-static int igb_resume(struct pci_dev *);
+static int igb_suspend(struct device *);
+static int igb_resume(struct device *);
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+#endif
+static const struct dev_pm_ops igb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+ SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle)
+};
#endif
static void igb_shutdown(struct pci_dev *);
#ifdef CONFIG_IGB_DCA
@@ -212,9 +225,7 @@ static struct pci_driver igb_driver = {
.probe = igb_probe,
.remove = __devexit_p(igb_remove),
#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = igb_suspend,
- .resume = igb_resume,
+ .driver.pm = &igb_pm_ops,
#endif
.shutdown = igb_shutdown,
.err_handler = &igb_err_handler
@@ -325,16 +336,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
regs[n] = rd32(E1000_TXDCTL(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, rd32(reginfo->ofs));
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 4; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
}
/*
@@ -359,18 +367,15 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start "
+ "last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
reginfo->name; reginfo++) {
igb_regdump(hw, reginfo);
@@ -381,18 +386,17 @@ static void igb_dump(struct igb_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igb_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
}
/* Print TX Rings */
@@ -414,36 +418,38 @@ static void igb_dump(struct igb_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "T [desc] [address 63:0 ] "
- "[PlPOCIStDDM Ln] [bi->dma ] "
- "leng ntw timestamp bi->skb\n");
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
+ "[bi->dma ] leng ntw timestamp "
+ "bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
struct igb_tx_buffer *buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p", i,
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
buffer_info->length,
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
- buffer_info->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
- else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "",
@@ -456,11 +462,11 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO " %5d %5X %5X\n", n,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
@@ -492,36 +498,43 @@ rx_ring_summary:
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "R [desc] [ PktBuf A0] "
- "[ HeadBuf DD] [bi->dma ] [bi->skb] "
- "<-- Adv Rx Read format\n");
- printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
- "[vl er S cks ln] ---------------- [bi->skb] "
- "<-- Adv Rx Write-Back format\n");
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
+ "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+ "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
struct igb_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX -------"
+ "--------- %p%s\n", "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb);
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX"
+ " %p%s\n", "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb);
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter)) {
print_hex_dump(KERN_INFO, "",
@@ -538,14 +551,6 @@ rx_ring_summary:
PAGE_SIZE/2, true);
}
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
-
}
}
@@ -599,10 +604,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
+ pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
- printk(KERN_INFO "%s\n", igb_copyright);
+ pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
@@ -1502,6 +1507,7 @@ void igb_power_up_link(struct igb_adapter *adapter)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
+ igb_reset_phy(&adapter->hw);
}
/**
@@ -1742,7 +1748,8 @@ void igb_reset(struct igb_adapter *adapter)
igb_get_phy_info(hw);
}
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1763,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features);
@@ -2113,6 +2121,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
default:
break;
}
+
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
err_register:
@@ -2152,6 +2162,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ pm_runtime_get_noresume(&pdev->dev);
+
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2474,16 +2486,22 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int igb_open(struct net_device *netdev)
+static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
int i;
/* disallow open during test */
- if (test_bit(__IGB_TESTING, &adapter->state))
+ if (test_bit(__IGB_TESTING, &adapter->state)) {
+ WARN_ON(resuming);
return -EBUSY;
+ }
+
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
@@ -2529,6 +2547,9 @@ static int igb_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+
/* start the watchdog. */
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
@@ -2543,10 +2564,17 @@ err_setup_rx:
igb_free_all_tx_resources(adapter);
err_setup_tx:
igb_reset(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
return err;
}
+static int igb_open(struct net_device *netdev)
+{
+ return __igb_open(netdev, false);
+}
+
/**
* igb_close - Disables a network interface
* @netdev: network interface device structure
@@ -2558,21 +2586,32 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int igb_close(struct net_device *netdev)
+static int __igb_close(struct net_device *netdev, bool suspending)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
- igb_down(adapter);
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+
+ igb_down(adapter);
igb_free_irq(adapter);
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
return 0;
}
+static int igb_close(struct net_device *netdev)
+{
+ return __igb_close(netdev, false);
+}
+
/**
* igb_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup
@@ -3203,6 +3242,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
+ netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -3632,6 +3672,9 @@ static void igb_watchdog_task(struct work_struct *work)
link = igb_has_link(adapter);
if (link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
hw->mac.ops.get_speed_and_duplex(hw,
@@ -3640,23 +3683,23 @@ static void igb_watchdog_task(struct work_struct *work)
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+ "Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) &&
- (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+ (ctrl & E1000_CTRL_RFCE) ? "RX" :
+ (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
- printk(KERN_INFO "igb: %s The network adapter "
- "link speed was downshifted "
- "because it overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_LINK_THROTTLE)) {
+ netdev_info(netdev, "The network adapter link "
+ "speed was downshifted because it "
+ "overheated\n");
}
/* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3729,10 @@ static void igb_watchdog_task(struct work_struct *work)
adapter->link_duplex = 0;
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
- printk(KERN_ERR "igb: %s The network adapter "
- "was stopped because it "
- "overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_PWR_DOWN)) {
+ netdev_err(netdev, "The network adapter was "
+ "stopped because it overheated\n");
}
/* Links status message must follow this format */
@@ -3704,6 +3746,9 @@ static void igb_watchdog_task(struct work_struct *work)
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
}
}
@@ -4241,6 +4286,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
frag++;
}
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
/* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
tx_desc->read.cmd_type_len = cmd_type;
@@ -5780,6 +5827,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->tx_syncp);
@@ -6138,7 +6187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
if (!page) {
- page = netdev_alloc_page(rx_ring->netdev);
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6516,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
return 0;
}
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6543,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
igb_rlpml_set(adapter);
}
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6556,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6575,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -6582,13 +6635,14 @@ err_inval:
return -EINVAL;
}
-static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
- u32 wufc = adapter->wol;
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
@@ -6596,7 +6650,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev))
- igb_close(netdev);
+ __igb_close(netdev, true);
igb_clear_interrupt_scheme(adapter);
@@ -6655,12 +6709,13 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
#ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int igb_suspend(struct device *dev)
{
int retval;
bool wake;
+ struct pci_dev *pdev = to_pci_dev(dev);
- retval = __igb_shutdown(pdev, &wake);
+ retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return retval;
@@ -6674,8 +6729,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int igb_resume(struct pci_dev *pdev)
+static int igb_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6696,7 +6752,18 @@ static int igb_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (igb_init_interrupt_scheme(adapter)) {
+ if (!rtnl_is_locked()) {
+ /*
+ * shut up ASSERT_RTNL() warning in
+ * netif_set_real_num_tx/rx_queues.
+ */
+ rtnl_lock();
+ err = igb_init_interrupt_scheme(adapter);
+ rtnl_unlock();
+ } else {
+ err = igb_init_interrupt_scheme(adapter);
+ }
+ if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -6709,23 +6776,61 @@ static int igb_resume(struct pci_dev *pdev)
wr32(E1000_WUS, ~0);
- if (netif_running(netdev)) {
- err = igb_open(netdev);
+ if (netdev->flags & IFF_UP) {
+ err = __igb_open(netdev, true);
if (err)
return err;
}
netif_device_attach(netdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (!igb_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __igb_shutdown(pdev, &wake, 1);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
return 0;
}
+
+static int igb_runtime_resume(struct device *dev)
+{
+ return igb_resume(dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
#endif
static void igb_shutdown(struct pci_dev *pdev)
{
bool wake;
- __igb_shutdown(pdev, &wake);
+ __igb_shutdown(pdev, &wake, 0);
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, wake);
@@ -7064,15 +7169,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
wr32(E1000_DMCTXTH, 0);
/*
- * DMA Coalescing high water mark needs to be higher
- * than the RX threshold. set hwm to PBA - 2 * max
- * frame size
+ * DMA Coalescing high water mark needs to be greater
+ * than the Rx threshold. Set hwm to PBA - max frame
+ * size in 16B units, capping it at PBA - 6KB.
*/
- hwm = pba - (2 * adapter->max_frame_size);
+ hwm = 64 * pba - adapter->max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+ reg = rd32(E1000_FCRTC);
+ reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+ & E1000_FCRTC_RTH_COAL_MASK);
+ wr32(E1000_FCRTC, reg);
+
+ /*
+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ * frame size, capping it at PBA - 10KB.
+ */
+ dmac_thr = pba - adapter->max_frame_size / 512;
+ if (dmac_thr < pba - 10)
+ dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- dmac_thr = pba - 4;
-
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
& E1000_DMACR_DMACTHR_MASK);
@@ -7088,7 +7206,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
* coalescing(smart fifb)-UTRESH=0
*/
wr32(E1000_DMCRTRH, 0);
- wr32(E1000_FCRTC, hwm);
reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2c25858cc0ff..7b600a1f6366 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32] = "N/A";
- strncpy(drvinfo->driver, igbvf_driver_name, 32);
- strncpy(drvinfo->version, igbvf_driver_version, 32);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igbvf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index cca78124be31..fd3da3076c2f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -1174,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
e1000_rlpml_set_vf(hw, max_frame_size);
}
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if (hw->mac.ops.set_vfta(hw, vid, true))
+ if (hw->mac.ops.set_vfta(hw, vid, true)) {
dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
- else
- set_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ set_bit(vid, adapter->active_vlans);
+ return 0;
}
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1195,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__IGBVF_DOWN, &adapter->state))
igbvf_irq_enable(adapter);
- if (hw->mac.ops.set_vfta(hw, vid, false))
+ if (hw->mac.ops.set_vfta(hw, vid, false)) {
dev_err(&adapter->pdev->dev,
"Failed to remove vlan id %d\n", vid);
- else
- clear_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ clear_bit(vid, adapter->active_vlans);
+ return 0;
}
static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1752,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
static void igbvf_print_link_info(struct igbvf_adapter *adapter)
{
- dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
- adapter->link_speed,
- ((adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex"));
+ dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
}
static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2537,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
}
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2842,9 +2848,8 @@ static struct pci_driver igbvf_driver = {
static int __init igbvf_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- igbvf_driver_string, igbvf_driver_version);
- printk(KERN_INFO "%s\n", igbvf_copyright);
+ pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+ pr_info("%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 9dfce7dff79b..dbb7dd2f8e36 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, ixgb_driver_name, 32);
- strncpy(drvinfo->version, ixgb_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgb_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgb_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGB_STATS_LEN;
drvinfo->regdump_len = ixgb_get_regs_len(netdev);
drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e21148f8b160..9bd5faf64a85 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
err = pci_enable_msi(adapter->pdev);
if (!err) {
- adapter->have_msi = 1;
+ adapter->have_msi = true;
irq_flags = 0;
}
/* proceed to try to request regular interrupt */
@@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter)
}
}
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features)
}
static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
return 0;
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
}
-static void
+static int
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
vfta |= (1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vfta &= ~(1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8368d5cf686..258164d6d45a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -560,6 +560,7 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
+extern char ixgbe_default_device_descr[];
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
@@ -627,6 +628,8 @@ extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4ae26a748da0..772072147bea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index f1365fef4ed2..a3aa6333073f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
}
return 0;
@@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
- bool link_up = 0;
+ bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 33b93ffb87cb..da31735311f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort a bad configuration */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
- adapter->dcb_set_bitmap |= BIT_PFC;
+ adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort bad configurations */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (ret)
return DCB_NO_HW_CHG;
-#ifdef IXGBE_FCOE
- if (up && !(up & (1 << adapter->fcoe.up)))
- adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
- /*
- * Only take down the adapter if an app change occurred. FCoE
- * may shuffle tx rings in this case and this can not be done
- * without a reset currently.
- */
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- adapter->fcoe.up = ffs(up) - 1;
-
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- }
-#endif
-
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
-#ifdef IXGBE_FCOE
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- ret = DCB_HW_CHG_RST;
- }
-#endif
-
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+ /* Reprogam FCoE hardware offloads when the traffic class
+ * FCoE is using changes. This happens if the APP info
+ * changes or the up2tc mapping is updated.
+ */
+ if ((up && !(up & (1 << adapter->fcoe.up))) ||
+ (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+ adapter->fcoe.up = ffs(up) - 1;
+ ixgbe_dcbnl_devreset(netdev);
+ ret = DCB_HW_CHG_RST;
+ }
+#endif
+
adapter->dcb_set_bitmap = 0x00;
return ret;
}
@@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
}
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_stop(dev);
-
- ixgbe_clear_interrupt_scheme(adapter);
- ixgbe_init_interrupt_scheme(adapter);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
@@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
- adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+ u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+ adapter->dcb_set_bitmap |= mask;
ixgbe_dcbnl_set_all(dev);
} else {
/* Drop into single TC mode strict priority as this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 70d58c3849b0..da7e580f517a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -888,23 +888,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u32 nvm_track_id;
- strncpy(drvinfo->driver, ixgbe_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
nvm_track_id = (adapter->eeprom_verh << 16) |
adapter->eeprom_verl;
- snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
nvm_track_id);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1959,12 +1955,21 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
/* WOL not supported except for the following */
switch(hw->device_id) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ /* Only these subdevices could supports WOL */
+ switch (hw->subsystem_device_id) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0) {
+ wol->supported = 0;
+ break;
+ }
+ case IXGBE_SUBDEV_ID_82599_SFP:
+ retval = 0;
+ break;
+ default:
wol->supported = 0;
break;
}
- retval = 0;
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index df3b1be69d83..d18d6157dd2c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -855,3 +855,86 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
}
return rc;
}
+
+/**
+ * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
+ * @netdev : ixgbe adapter
+ * @info : HBA information
+ *
+ * Returns ixgbe HBA information
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, pos;
+ u8 buf[8];
+
+ if (!info)
+ return -EINVAL;
+
+ /* Don't return information on unsupported devices */
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540)
+ return -EINVAL;
+
+ /* Manufacturer */
+ snprintf(info->manufacturer, sizeof(info->manufacturer),
+ "Intel Corporation");
+
+ /* Serial Number */
+
+ /* Get the PCI-e Device Serial Number Capability */
+ pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
+
+ snprintf(info->serial_number, sizeof(info->serial_number),
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(info->serial_number, sizeof(info->serial_number),
+ "Unknown");
+
+ /* Hardware Version */
+ snprintf(info->hardware_version,
+ sizeof(info->hardware_version),
+ "Rev %d", hw->revision_id);
+ /* Driver Name/Version */
+ snprintf(info->driver_version,
+ sizeof(info->driver_version),
+ "%s v%s",
+ ixgbe_driver_name,
+ ixgbe_driver_version);
+ /* Firmware Version */
+ snprintf(info->firmware_version,
+ sizeof(info->firmware_version),
+ "0x%08x",
+ (adapter->eeprom_verh << 16) |
+ adapter->eeprom_verl);
+
+ /* Model */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ snprintf(info->model,
+ sizeof(info->model),
+ "Intel 82599");
+ } else {
+ snprintf(info->model,
+ sizeof(info->model),
+ "Intel X540");
+ }
+
+ /* Model Description */
+ snprintf(info->model_description,
+ sizeof(info->model_description),
+ "%s",
+ ixgbe_default_device_descr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8ef92d1a6aa1..1ee5d0fbb905 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -55,6 +55,8 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
+char ixgbe_default_device_descr[] =
+ "Intel(R) 10 Gigabit Network Connection";
#define MAJ 3
#define MIN 6
#define BUILD 7
@@ -106,6 +108,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
/* required last entry */
{0, }
};
@@ -146,7 +149,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
- /* flush memory to make sure state is correct before next watchog */
+ /* flush memory to make sure state is correct before next watchdog */
smp_mb__before_clear_bit();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
@@ -1140,7 +1143,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(rx_ring->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
@@ -2156,7 +2159,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
- * therefore no explict interrupt disable is necessary */
+ * therefore no explicit interrupt disable is necessary */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
/*
@@ -3044,7 +3047,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3056,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
/* add VID to filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3069,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
/* remove VID from filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3602,7 +3609,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
{
/*
- * We are assuming the worst case scenerio here, and that
+ * We are assuming the worst case scenario here, and that
* is that an SFP was inserted/removed after the reset
* but before SFP detection was enabled. As such the best
* solution is to just start searching as soon as we start
@@ -3824,7 +3831,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
- "Please be aware there may be issuesassociated with "
+ "Please be aware there may be issues associated with "
"your hardware. If you are experiencing problems "
"please contact your Intel or hardware "
"representative who provided you with this "
@@ -4019,7 +4026,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Mark all the VFs as inactive */
for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
+ adapter->vfinfo[i].clear_to_send = false;
/* ping all the active vfs to let them know we are going down */
ixgbe_ping_all_vfs(adapter);
@@ -5788,9 +5795,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
* @adapter - pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
- * in order to make certain interrupts are occuring. Secondly it sets the
+ * in order to make certain interrupts are occurring. Secondly it sets the
* bits needed to check for TX hangs. As a result we should immediately
- * determine if a hang has occured.
+ * determine if a hang has occurred.
*/
static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
{
@@ -7128,7 +7135,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
/* Hardware has to reinitialize queues and interrupts to
- * match packet buffer alignment. Unfortunantly, the
+ * match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
@@ -7174,7 +7181,8 @@ void ixgbe_do_reset(struct net_device *netdev)
ixgbe_reset(adapter);
}
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7204,7 +7212,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
return data;
}
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
@@ -7286,6 +7295,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_enable = ixgbe_fcoe_enable,
.ndo_fcoe_disable = ixgbe_fcoe_disable,
.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+ .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
#endif /* IXGBE_FCOE */
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
@@ -7598,9 +7608,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->wol = 0;
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ /* Only these subdevice supports WOL */
+ switch (pdev->subsystem_device) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0)
+ break;
+ case IXGBE_SUBDEV_ID_82599_SFP:
adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
@@ -7708,7 +7725,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
- e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+ e_dev_info("%s\n", ixgbe_default_device_descr);
cards_found++;
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 9a56fd74e673..7cf1e1f56c69 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
- bool nack = 1;
+ bool nack = true;
*data = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
s32 i;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
s32 status = 0;
s32 i;
u32 i2cctl;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -1457,6 +1457,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
i2cctl |= IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
return status;
}
@@ -1473,7 +1474,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 timeout = 10;
- bool ack = 1;
+ bool ack = true;
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1646,9 +1647,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
bool data;
if (*i2cctl & IXGBE_I2C_DATA_IN)
- data = 1;
+ data = true;
else
- data = 0;
+ data = false;
return data;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 00fcd39ad666..cf6812dd1436 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(new_mac, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index df04f1a3857c..e8badab03359 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6c5cca808bd7..802bfa0f62cc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -57,6 +57,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -65,6 +66,7 @@
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
@@ -1710,8 +1712,6 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
-
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info {
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
/* prefix for World Wide Node Name (WWNN) */
u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e5101e91b6b5..8cc5eccfd651 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
/*
- * In order for the blink bit in the LED control register
- * to work, link and speed must be forced in the MAC. We
- * will reverse this when we stop the blinking.
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
*/
- macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
- macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 78abb6f1a866..2eb89cb94a0d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -35,7 +35,6 @@
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 1
#define IXGBE_VF_MAX_RX_QUEUES 1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e29ba4506b74..dc8e6511c640 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -27,6 +27,8 @@
/* ethtool support for ixgbevf */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -265,11 +267,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
- strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbevf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +551,8 @@ static const u32 register_test_patterns[] = {
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
- printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
+ pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+ "0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4c8e19951d57..891162d1610c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -29,6 +29,9 @@
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(adapter->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
@@ -1400,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1444,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
int count = 0;
if ((netdev_uc_count(netdev)) > 10) {
- printk(KERN_ERR "Too many unicast filters - No Space\n");
+ pr_err("Too many unicast filters - No Space\n");
return -ENOSPC;
}
@@ -2135,7 +2142,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
err = ixgbevf_alloc_queues(adapter);
if (err) {
- printk(KERN_ERR "Unable to allocate memory for queues\n");
+ pr_err("Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
@@ -2189,7 +2196,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
- printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ pr_err("init_shared_code failed: %d\n", err);
goto out;
}
}
@@ -2630,8 +2637,8 @@ static int ixgbevf_open(struct net_device *netdev)
* the vf can't start. */
if (hw->adapter_stopped) {
err = IXGBE_ERR_MBX;
- printk(KERN_ERR "Unable to start - perhaps the PF"
- " Driver isn't up yet\n");
+ pr_err("Unable to start - perhaps the PF Driver isn't "
+ "up yet\n");
goto err_setup_reset;
}
}
@@ -2842,10 +2849,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit())) {
- printk(KERN_WARNING
- "partial checksum but "
- "proto=%x!\n",
- skb->protocol);
+ pr_warn("partial checksum but "
+ "proto=%x!\n", skb->protocol);
}
break;
}
@@ -3249,7 +3254,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3414,7 +3420,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- printk(KERN_ERR "invalid MAC address\n");
+ pr_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
}
@@ -3535,10 +3541,10 @@ static struct pci_driver ixgbevf_driver = {
static int __init ixgbevf_init_module(void)
{
int ret;
- printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
- ixgbevf_driver_version);
+ pr_info("%s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
- printk(KERN_INFO "%s\n", ixgbevf_copyright);
+ pr_info("%s\n", ixgbevf_copyright);
ret = pci_register_driver(&ixgbevf_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index ea393eb03f3a..9d38a94a348a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -47,8 +47,8 @@
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn)))
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 189200eeca26..5e4d5e5cdf38 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -39,29 +39,29 @@
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index aa3682e8c473..21533e300367 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
- memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
@@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
- memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
return 0;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387d..27d651a80f3f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
}
static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
+static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
@@ -1883,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
struct page *page,
u32 page_offset,
u32 len,
- u8 hidma)
+ bool hidma)
{
dma_addr_t dmaaddr;
@@ -1917,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc, *ctxdesc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
- u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+ bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
@@ -2292,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(jme->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -2620,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
jme->msg_enable = value;
}
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
{
if (netdev->mtu > 1900)
features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2629,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, u32 features)
}
static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb5..4304072bd3c5 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d8430f487b84..6ad094f176f8 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = {
.remove = korina_remove,
};
-static int __init korina_init_module(void)
-{
- return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
- return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 0b3567ab8121..85e2c6cd9708 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -98,6 +98,7 @@ struct ltq_etop_chan {
struct ltq_etop_priv {
struct net_device *netdev;
+ struct platform_device *pdev;
struct ltq_eth_data *pldata;
struct resource *res;
@@ -436,7 +437,8 @@ ltq_etop_mdio_init(struct net_device *dev)
priv->mii_bus->read = ltq_etop_mdio_rd;
priv->mii_bus->write = ltq_etop_mdio_wr;
priv->mii_bus->name = "ltq_mii";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->pdev->name, priv->pdev->id);
priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!priv->mii_bus->irq) {
err = -ENOMEM;
@@ -734,6 +736,7 @@ ltq_etop_probe(struct platform_device *pdev)
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
priv->res = res;
+ priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 194a03113802..9c049d2cb97d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1502,10 +1502,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
- strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "platform", 32);
+ strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
}
@@ -1578,10 +1580,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- u32 rx_csum = features & NETIF_F_RXCSUM;
+ bool rx_csum = features & NETIF_F_RXCSUM;
wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
@@ -2509,7 +2511,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
/* platform glue ************************************************************/
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->base;
u32 win_enable;
@@ -2527,7 +2529,7 @@ mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
win_protect = 0;
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -2577,6 +2579,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
static int mv643xx_eth_version_printed;
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
struct mv643xx_eth_shared_private *msp;
+ const struct mbus_dram_target_info *dram;
struct resource *res;
int ret;
@@ -2610,7 +2613,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
msp->smi_bus->name = "mv643xx_eth smi";
msp->smi_bus->read = smi_bus_read;
msp->smi_bus->write = smi_bus_write,
- snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
msp->smi_bus->parent = &pdev->dev;
msp->smi_bus->phy_mask = 0xffffffff;
if (mdiobus_register(msp->smi_bus) < 0)
@@ -2641,8 +2645,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- mv643xx_eth_conf_mbus_windows(msp, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv643xx_eth_conf_mbus_windows(msp, dram);
/*
* Detect hardware parameters.
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d17d0624c5e6..953ba5851f7b 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1552,7 +1552,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus->name = "pxa168_eth smi";
pep->smi_bus->read = pxa168_smi_read;
pep->smi_bus->write = pxa168_smi_write;
- snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
pep->smi_bus->parent = &pdev->dev;
pep->smi_bus->phy_mask = 0xffffffff;
err = mdiobus_register(pep->smi_bus);
@@ -1645,18 +1646,7 @@ static struct platform_driver pxa168_eth_driver = {
},
};
-static int __init pxa168_init_module(void)
-{
- return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
- platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c7b60839ac99..18a87a57fc0a 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(skge->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct skge_stat {
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
spin_unlock_irq(&hw->hw_lock);
napi_enable(&skge->napi);
+
+ skge_set_multicast(dev);
+
return 0;
free_tx_ring:
@@ -4039,7 +4042,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -4101,7 +4104,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
#else
#define SKGE_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
static void skge_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 7803efa46eb2..760c2b17dfd3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1110,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2)
sky2->tx_prod = sky2->tx_cons = 0;
sky2->tx_tcpsum = 0;
sky2->tx_last_mss = 0;
+ netdev_reset_queue(sky2->netdev);
le = get_tx_le(sky2, &sky2->tx_prod);
le->addr = 0;
@@ -1284,7 +1285,7 @@ static const uint32_t rss_init_key[10] = {
};
/* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1402,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1971,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (tx_avail(sky2) <= MAX_SKB_TX_LE)
netif_stop_queue(dev);
+ netdev_sent_queue(dev, skb->len);
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
return NETDEV_TX_OK;
@@ -2002,7 +2004,8 @@ mapping_error:
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
{
struct net_device *dev = sky2->netdev;
- unsigned idx;
+ u16 idx;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
BUG_ON(done >= sky2->tx_ring_size);
@@ -2017,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
netif_printk(sky2, tx_done, KERN_DEBUG, dev,
"tx done %u\n", idx);
- u64_stats_update_begin(&sky2->tx_stats.syncp);
- ++sky2->tx_stats.packets;
- sky2->tx_stats.bytes += skb->len;
- u64_stats_update_end(&sky2->tx_stats.syncp);
+ pkts_compl++;
+ bytes_compl += skb->len;
re->skb = NULL;
dev_kfree_skb_any(skb);
@@ -2031,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ sky2->tx_stats.packets += pkts_compl;
+ sky2->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -3643,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct sky2_stat {
@@ -4311,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
const struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
@@ -4335,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_RXCSUM) {
- u32 on = features & NETIF_F_RXCSUM;
+ bool on = features & NETIF_F_RXCSUM;
sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a15854..4a40ab967eeb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 45aea9c3ae2c..915e947b422d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
- "Reset device on internal errors if non-zero (default 1)");
+ "Reset device on internal errors if non-zero"
+ " (default 1, in SRIOV mode default is 0)");
static void dump_err_buf(struct mlx4_dev *dev)
{
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
+ /*If we are in SRIOV the default of the module param must be 0*/
+ if (mlx4_is_mfunc(dev))
+ internal_err_reset = 0;
+
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a0b8c8..978f593094c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,12 +39,18 @@
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
#include <asm/io.h>
#include "mlx4.h"
+#include "fw.h"
#define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK 0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
enum {
/* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
int next;
u64 out_param;
u16 token;
+ u8 fw_status;
};
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr);
+
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
return trans_table[status];
}
+static u8 mlx4_errno_to_status(int errno)
+{
+ switch (errno) {
+ case -EPERM:
+ return CMD_STAT_BAD_OP;
+ case -EINVAL:
+ return CMD_STAT_BAD_PARAM;
+ case -ENXIO:
+ return CMD_STAT_BAD_SYS_STATE;
+ case -EBUSY:
+ return CMD_STAT_RESOURCE_BUSY;
+ case -ENOMEM:
+ return CMD_STAT_EXCEED_LIM;
+ case -ENFILE:
+ return CMD_STAT_ICM_ERROR;
+ default:
+ return CMD_STAT_INTERNAL_ERR;
+ }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 status = readl(&priv->mfunc.comm->slave_read);
+
+ return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 val;
+
+ priv->cmd.comm_toggle ^= 1;
+ val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+ __raw_writel((__force u32) cpu_to_be32(val),
+ &priv->mfunc.comm->slave_write);
+ mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long end;
+ int err = 0;
+ int ret_from_pending = 0;
+
+ /* First, verify that the master reports correct status */
+ if (comm_pending(dev)) {
+ mlx4_warn(dev, "Communication channel is not idle."
+ "my toggle is %d (cmd:0x%x)\n",
+ priv->cmd.comm_toggle, cmd);
+ return -EAGAIN;
+ }
+
+ /* Write command */
+ down(&priv->cmd.poll_sem);
+ mlx4_comm_cmd_post(dev, cmd, param);
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+ ret_from_pending = comm_pending(dev);
+ if (ret_from_pending) {
+ /* check if the slave is trying to boot in the middle of
+ * FLR process. The only non-zero result in the RESET command
+ * is MLX4_DELAY_RESET_SLAVE*/
+ if ((MLX4_COMM_CMD_RESET == cmd)) {
+ mlx4_warn(dev, "Got slave FLRed from Communication"
+ " channel (ret:0x%x)\n", ret_from_pending);
+ err = MLX4_DELAY_RESET_SLAVE;
+ } else {
+ mlx4_warn(dev, "Communication channel timed out\n");
+ err = -ETIMEDOUT;
+ }
+ }
+
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+ u16 param, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_comm_cmd_post(dev, op, param);
+
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+ return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
- if (time_after_eq(jiffies, end))
+ if (time_after_eq(jiffies, end)) {
+ mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
+ }
cond_resched();
}
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
- op), hcr + 6);
+ op), hcr + 6);
/*
* Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
return ret;
}
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+ int ret;
+
+ down(&priv->cmd.slave_sem);
+ vhcr->in_param = cpu_to_be64(in_param);
+ vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+ vhcr->in_modifier = cpu_to_be32(in_modifier);
+ vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+ vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+ vhcr->status = 0;
+ vhcr->flags = !!(priv->cmd.use_events) << 6;
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ }
+ } else {
+ ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+ MLX4_COMM_TIME + timeout);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ } else
+ mlx4_err(dev, "failed execution of VHCR_POST command"
+ "opcode 0x%x\n", op);
+ }
+ up(&priv->cmd.slave_sem);
+ return ret;
+}
+
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
+ u32 stat;
down(&priv->cmd.poll_sem);
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
- err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+ stat = be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+ err = mlx4_status_to_errno(stat);
+ if (err)
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, stat);
out:
up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
if (token != context->token)
return;
+ context->fw_status = status;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
- if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
err = -EBUSY;
goto out;
}
err = context->result;
- if (err)
+ if (err) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
goto out;
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -311,17 +521,1046 @@ out:
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
- u16 op, unsigned long timeout)
+ u16 op, unsigned long timeout, int native)
{
- if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
- else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ }
+ return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+ int slave, u64 slave_addr,
+ int size, int is_read)
+{
+ u64 in_param;
+ u64 out_param;
+
+ if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+ (slave & ~0x7f) | (size & 0xff)) {
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+ "master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
+ return -EINVAL;
+ }
+
+ if (is_read) {
+ in_param = (u64) slave | slave_addr;
+ out_param = (u64) dev->caps.function | master_addr;
+ } else {
+ in_param = (u64) dev->caps.function | master_addr;
+ out_param = (u64) slave | slave_addr;
+ }
+
+ return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+ MLX4_CMD_ACCESS_MEM,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+ if (cmd->encode_slave_id) {
+ in_param &= 0xffffffffffffff00ll;
+ in_param |= slave;
+ }
+
+ err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+ vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm)
+ vhcr->out_param = out_param;
+
+ return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+ {
+ .opcode = MLX4_CMD_QUERY_FW,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_HCA,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_DEV_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_ADAPTER,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_INIT_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_CLOSE_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CLOSE_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_PORT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_PORT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MAP_EQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MAP_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_EQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_NOP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_ALLOC_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ALLOC_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_FREE_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FREE_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_MPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_MPT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_MPT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_READ_MTT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_WRITE_MTT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_WRITE_MTT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SYNC_TPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_CQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_CQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MODIFY_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MODIFY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_SRQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_SRQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_ARM_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ARM_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RST2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_RST2INIT_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2RTR_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT2RTR_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQERR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2ERR_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2SQD_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2SQD_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2RST_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_2RST_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_QP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_UNSUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_IF_STAT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_IF_STAT_wrapper
+ },
+ /* Native multicast commands are not available for guests */
+ {
+ .opcode = MLX4_CMD_QP_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_PROMISC,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_PROMISC_wrapper
+ },
+ /* Ethernet specific commands */
+ {
+ .opcode = MLX4_CMD_SET_VLAN_FLTR,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_MCAST_FLTR,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_DUMP_ETH_STATS,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INFORM_FLR_DONE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_info *cmd = NULL;
+ struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+ struct mlx4_vhcr *vhcr;
+ struct mlx4_cmd_mailbox *inbox = NULL;
+ struct mlx4_cmd_mailbox *outbox = NULL;
+ u64 in_param;
+ u64 out_param;
+ int ret = 0;
+ int i;
+ int err = 0;
+
+ /* Create sw representation of Virtual HCR */
+ vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+ if (!vhcr)
+ return -ENOMEM;
+
+ /* DMA in the vHCR */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr_cmd),
+ MLX4_ACCESS_MEM_ALIGN), 1);
+ if (ret) {
+ mlx4_err(dev, "%s:Failed reading vhcr"
+ "ret: 0x%x\n", __func__, ret);
+ kfree(vhcr);
+ return ret;
+ }
+ }
+
+ /* Fill SW VHCR fields */
+ vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+ vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+ vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+ vhcr->token = be16_to_cpu(vhcr_cmd->token);
+ vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+ vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+ vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+ /* Lookup command */
+ for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+ if (vhcr->op == cmd_info[i].opcode) {
+ cmd = &cmd_info[i];
+ break;
+ }
+ }
+ if (!cmd) {
+ mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+ vhcr->op, slave);
+ vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+ goto out_status;
+ }
+
+ /* Read inbox */
+ if (cmd->has_inbox) {
+ vhcr->in_param &= INBOX_MASK;
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ inbox = NULL;
+ goto out_status;
+ }
+
+ if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+ vhcr->in_param,
+ MLX4_MAILBOX_SIZE, 1)) {
+ mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+ __func__, cmd->opcode);
+ vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+ goto out_status;
+ }
+ }
+
+ /* Apply permission and bound checks if applicable */
+ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+ "checks for resource_id:%d\n", vhcr->op, slave,
+ vhcr->in_modifier);
+ vhcr_cmd->status = CMD_STAT_BAD_OP;
+ goto out_status;
+ }
+
+ /* Allocate outbox */
+ if (cmd->has_outbox) {
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ outbox = NULL;
+ goto out_status;
+ }
+ }
+
+ /* Execute the command! */
+ if (cmd->wrapper) {
+ err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+ cmd);
+ if (cmd->out_is_imm)
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ } else {
+ in_param = cmd->has_inbox ? (u64) inbox->dma :
+ vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma :
+ vhcr->out_param;
+ err = __mlx4_cmd(dev, in_param, &out_param,
+ cmd->out_is_imm, vhcr->in_modifier,
+ vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm) {
+ vhcr->out_param = out_param;
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ }
+ }
+
+ if (err) {
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+ " error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
+ vhcr_cmd->status = mlx4_errno_to_status(err);
+ goto out_status;
+ }
+
+
+ /* Write outbox if command completed successfully */
+ if (cmd->has_outbox && !vhcr_cmd->status) {
+ ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+ vhcr->out_param,
+ MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+ if (ret) {
+ /* If we failed to write back the outbox after the
+ *command was successfully executed, we must fail this
+ * slave, as it is now in undefined state */
+ mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+ goto out;
+ }
+ }
+
+out_status:
+ /* DMA back vhcr result */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr),
+ MLX4_ACCESS_MEM_ALIGN),
+ MLX4_CMD_WRAPPED);
+ if (ret)
+ mlx4_err(dev, "%s:Failed writing vhcr result\n",
+ __func__);
+ else if (vhcr->e_bit &&
+ mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+ mlx4_warn(dev, "Failed to generate command completion "
+ "eqe for slave %d\n", slave);
+ }
+
+out:
+ kfree(vhcr);
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+ u16 param, u8 toggle)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ u32 reply;
+ u32 slave_status = 0;
+ u8 is_going_down = 0;
+
+ slave_state[slave].comm_toggle ^= 1;
+ reply = (u32) slave_state[slave].comm_toggle << 31;
+ if (toggle != slave_state[slave].comm_toggle) {
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+ "STATE COMPROMISIED ***\n", toggle, slave);
+ goto reset_slave;
+ }
+ if (cmd == MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+ slave_state[slave].active = false;
+ /*check if we are in the middle of FLR process,
+ if so return "retry" status to the slave*/
+ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ slave_status = MLX4_DELAY_RESET_SLAVE;
+ goto inform_slave_state;
+ }
+
+ /* write the version in the event field */
+ reply |= mlx4_comm_get_version();
+
+ goto reset_slave;
+ }
+ /*command from slave in the middle of FLR*/
+ if (cmd != MLX4_COMM_CMD_RESET &&
+ MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+ "in the middle of FLR\n", slave, cmd);
+ return;
+ }
+
+ switch (cmd) {
+ case MLX4_COMM_CMD_VHCR0:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma = ((u64) param) << 48;
+ priv->mfunc.master.slave_state[slave].cookie = 0;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ break;
+ case MLX4_COMM_CMD_VHCR1:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+ break;
+ case MLX4_COMM_CMD_VHCR2:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+ break;
+ case MLX4_COMM_CMD_VHCR_EN:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= param;
+ slave_state[slave].active = true;
+ break;
+ case MLX4_COMM_CMD_VHCR_POST:
+ if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+ (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+ goto reset_slave;
+ down(&priv->cmd.slave_sem);
+ if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+ mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+ " reseting slave.\n", slave);
+ up(&priv->cmd.slave_sem);
+ goto reset_slave;
+ }
+ up(&priv->cmd.slave_sem);
+ break;
+ default:
+ mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+ goto reset_slave;
+ }
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = cmd;
+ else
+ is_going_down = 1;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ if (is_going_down) {
+ mlx4_warn(dev, "Slave is going down aborting command(%d)"
+ " executing from slave:%d\n",
+ cmd, slave);
+ return;
+ }
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ mmiowb();
+
+ return;
+
+reset_slave:
+ /* cleanup any slave resources */
+ mlx4_delete_all_resources_for_slave(dev, slave);
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+ memset(&slave_state[slave].event_eq, 0,
+ sizeof(struct mlx4_slave_event_eq_info));
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work,
+ struct mlx4_mfunc_master_ctx,
+ comm_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ __be32 *bit_vec;
+ u32 comm_cmd;
+ u32 vec;
+ int i, j, slave;
+ int toggle;
+ int served = 0;
+ int reported = 0;
+ u32 slt;
+
+ bit_vec = master->comm_arm_bit_vector;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+ vec = be32_to_cpu(bit_vec[i]);
+ for (j = 0; j < 32; j++) {
+ if (!(vec & (1 << j)))
+ continue;
+ ++reported;
+ slave = (i * 32) + j;
+ comm_cmd = swab32(readl(
+ &mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read))
+ >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ printk(KERN_INFO "slave %d out of sync."
+ " read toggle %d, state toggle %d. "
+ "Resynching.\n", slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
+ }
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
+ }
+ }
+ }
+
+ if (reported && reported != served)
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+ " but %d were served\n",
+ reported, served);
+
+ if (mlx4_ARM_COMM_CHANNEL(dev))
+ mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int wr_toggle;
+ int rd_toggle;
+ unsigned long end;
+
+ wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+ end = jiffies + msecs_to_jiffies(5000);
+
+ while (time_before(jiffies, end)) {
+ rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+ if (rd_toggle == wr_toggle) {
+ priv->cmd.comm_toggle = rd_toggle;
+ return 0;
+ }
+
+ cond_resched();
+ }
+
+ /*
+ * we could reach here if for example the previous VM using this
+ * function misbehaved and left the channel with unsynced state. We
+ * should fix this here and give this VM a chance to use a properly
+ * synced channel
+ */
+ mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+ priv->cmd.comm_toggle = 0;
+
+ return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i, err, port;
+
+ priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ &priv->mfunc.vhcr_dma,
+ GFP_KERNEL);
+ if (!priv->mfunc.vhcr) {
+ mlx4_err(dev, "Couldn't allocate vhcr.\n");
+ return -ENOMEM;
+ }
+
+ if (mlx4_is_master(dev))
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+ priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+ else
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+ if (!priv->mfunc.comm) {
+ mlx4_err(dev, "Couldn't map communication vector.\n");
+ goto err_vhcr;
+ }
+
+ if (mlx4_is_master(dev)) {
+ priv->mfunc.master.slave_state =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_slave_state), GFP_KERNEL);
+ if (!priv->mfunc.master.slave_state)
+ goto err_comm;
+
+ for (i = 0; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_write);
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_read);
+ mmiowb();
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ s_state->vlan_filter[port] =
+ kzalloc(sizeof(struct mlx4_vlan_fltr),
+ GFP_KERNEL);
+ if (!s_state->vlan_filter[port]) {
+ if (--port)
+ kfree(s_state->vlan_filter[port]);
+ goto err_slaves;
+ }
+ INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+ }
+ spin_lock_init(&s_state->lock);
+ }
+
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ INIT_WORK(&priv->mfunc.master.comm_work,
+ mlx4_master_comm_channel);
+ INIT_WORK(&priv->mfunc.master.slave_event_work,
+ mlx4_gen_slave_eqe);
+ INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+ mlx4_master_handle_slave_flr);
+ spin_lock_init(&priv->mfunc.master.slave_state_lock);
+ priv->mfunc.master.comm_wq =
+ create_singlethread_workqueue("mlx4_comm");
+ if (!priv->mfunc.master.comm_wq)
+ goto err_slaves;
+
+ if (mlx4_init_resource_tracker(dev))
+ goto err_thread;
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ err = mlx4_ARM_COMM_CHANNEL(dev);
+ if (err) {
+ mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+ err);
+ goto err_resource;
+ }
+
+ } else {
+ err = sync_toggles(dev);
+ if (err) {
+ mlx4_err(dev, "Couldn't sync toggles\n");
+ goto err_comm;
+ }
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ }
+ return 0;
+
+err_resource:
+ mlx4_free_resource_tracker(dev);
+err_thread:
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+ while (--i) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+err_comm:
+ iounmap(priv->mfunc.comm);
+err_vhcr:
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ return -ENOMEM;
+}
+
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1570,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
- priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
- MLX4_HCR_SIZE);
- if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.");
- return -ENOMEM;
+ priv->cmd.hcr = NULL;
+ priv->mfunc.vhcr = NULL;
+
+ if (!mlx4_is_slave(dev)) {
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+ MLX4_HCR_BASE, MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.\n");
+ return -ENOMEM;
+ }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool) {
- iounmap(priv->cmd.hcr);
- return -ENOMEM;
- }
+ if (!priv->cmd.pool)
+ goto err_hcr;
return 0;
+
+err_hcr:
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
+ return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, port;
+
+ if (mlx4_is_master(dev)) {
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+ for (i = 0; i < dev->num_slaves; i++) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ }
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1622,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
pci_pool_destroy(priv->cmd.pool);
- iounmap(priv->cmd.hcr);
+
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
}
/*
@@ -365,6 +1635,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ int err = 0;
priv->cmd.context = kmalloc(priv->cmd.max_cmds *
sizeof (struct mlx4_cmd_context),
@@ -389,11 +1660,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
; /* nothing */
--priv->cmd.token_mask;
- priv->cmd.use_events = 1;
-
down(&priv->cmd.poll_sem);
+ priv->cmd.use_events = 1;
- return 0;
+ return err;
}
/*
@@ -433,7 +1703,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
@@ -442,3 +1713,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+ return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 499a5168892a..475f9d6af955 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
-#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
@@ -44,27 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_cq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- __be32 logsize_usrpage;
- __be16 cq_period;
- __be16 cq_max_count;
- u8 reserved2[3];
- u8 comp_eqn;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved4[2];
- __be64 db_rec_addr;
-};
-
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (!cq) {
- mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+ MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+ cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (*cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+ if (err)
+ goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+ else {
+ *cqn = get_param_l(&out_param);
+ return 0;
+ }
+ }
+ return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+ mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+ mlx4_table_put(dev, &cq_table->table, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cqn);
+ err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+ } else
+ __mlx4_cq_free_icm(dev, cqn);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
cq->vector = vector;
- cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
- if (cq->cqn == -1)
- return -ENOMEM;
-
- err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
- if (err)
- goto err_out;
-
- err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ err = mlx4_cq_alloc_icm(dev, &cq->cqn);
if (err)
- goto err_put;
+ return err;
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+ mlx4_cq_free_icm(dev, cq->cqn);
return err;
}
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
complete(&cq->free);
wait_for_completion(&cq->free);
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+ mlx4_cq_free_icm(dev, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 227997d775e8..00b81272e314 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX)
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- else
- cq->buf_size = sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->ring = ring;
cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+ cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
if (err)
return err;
@@ -147,6 +144,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 74e2a2a8a02b..7dbc6a230779 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
- sprintf(drvinfo->fw_version, "%d.%d.%d",
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
int err = 0;
u64 config = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+ if ((priv->port < 1) || (priv->port > 2)) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
u64 config = 0;
int err = 0;
+ u64 mask;
+
+ if ((priv->port < 1) || (priv->port > 2))
+ return -EOPNOTSUPP;
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+ if (!(priv->mdev->dev->caps.flags & mask))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 78d776bc355c..72fa807b69ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,7 +45,7 @@
#include "mlx4_en.h"
#include "en_port.h"
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+ return 0;
}
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
+
+ return 0;
}
u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac, 0);
+ priv->base_qpn, priv->mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
kfree(priv->mc_addrs);
+ priv->mc_addrs = NULL;
priv->mc_addrs_cnt = 0;
}
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ mlx4_en_clear_list(dev);
priv->mc_addrs = mc_addrs;
priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
goto out;
}
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
/*
* Promsicuous mode: disable all filters
*/
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
++rx_index;
}
- /* Set port mac number */
- en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn, 0);
+ /* Set qp number */
+ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+ err = mlx4_get_eth_qp(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn);
if (err) {
- en_err(priv, "Failed setting port mac\n");
+ en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
@@ -699,7 +714,7 @@ tx_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +760,6 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
- /* Unregister Mac address for the port */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
-
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +773,10 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
+ /* Unregister Mac address for the port */
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mdev->mac_removed[priv->port] = 1;
+
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -974,6 +989,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int mlx4_en_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (features & NETIF_F_LOOPBACK)
+ priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ else
+ priv->ctrl_flags &=
+ cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+ return 0;
+
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -990,6 +1020,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
+ .ndo_set_features = mlx4_en_set_features,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1053,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
+ priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
priv->mac_index = -1;
@@ -1088,6 +1121,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features |= NETIF_F_LOOPBACK;
mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 03c84cd78cde..331791467a22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -41,13 +41,6 @@
#include "mlx4_en.h"
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
- u64 mac, u64 clear, u8 mode)
-{
- return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
- MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
- MLX4_CMD_TIME_CLASS_B);
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
- u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_general_context *context;
- int err;
- u32 in_mod;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->flags = SET_PORT_GEN_ALL_VALID;
- context->mtu = cpu_to_be16(mtu);
- context->pptx = (pptx * (!pfctx)) << 7;
- context->pfctx = pfctx;
- context->pprx = (pprx * (!pfcrx)) << 7;
- context->pfcrx = pfcrx;
-
- in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
- u8 promisc)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_rqp_calc_context *context;
- int err;
- u32 in_mod;
- u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
- MCAST_DIRECT : MCAST_DEFAULT;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
- return 0;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = dev->caps.log_num_macs;
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
- base_qpn);
- context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
- base_qpn);
- context->intra_no_vlan = 0;
- context->no_vlan = MLX4_NO_VLAN_IDX;
- context->intra_vlan_miss = 0;
- context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
- in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
- MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 19eb244f5165..6934fd7e66ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,49 +39,6 @@
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
-enum {
- MLX4_CMD_SET_VLAN_FLTR = 0x47,
- MLX4_CMD_SET_MCAST_FLTR = 0x48,
- MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
- MCAST_DIRECT_ONLY = 0,
- MCAST_DIRECT = 1,
- MCAST_DEFAULT = 2
-};
-
-struct mlx4_set_port_general_context {
- u8 reserved[3];
- u8 flags;
- u16 reserved2;
- __be16 mtu;
- u8 pptx;
- u8 pfctx;
- u16 reserved3;
- u8 pprx;
- u8 pfcrx;
- u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
- __be32 base_qpn;
- u8 rererved;
- u8 n_mac;
- u8 n_vlan;
- u8 n_prio;
- u8 reserved2[3];
- u8 mac_miss;
- u8 intra_no_vlan;
- u8 no_vlan;
- u8 intra_vlan_miss;
- u8 vlan_miss;
- u8 reserved3[3];
- u8 no_vlan_prio;
- __be32 promisc;
- __be32 mcast;
-};
-
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec8a9dd..bcbc54c16947 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
- context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c2df6c358603..e8d6ad2dce0a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
+ struct ethhdr *ethh;
+ u64 s_mac;
if (!priv->port_up)
return 0;
@@ -577,6 +579,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ /* Get pointer to first fragment since we haven't skb yet and
+ * cast it to ethhdr struct */
+ ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+ skb_frags[0].offset);
+ s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+ /* If source MAC is equal to our own MAC and not performing
+ * the selftest or flb disabled - drop the packet */
+ if (s_mac == priv->mac &&
+ (!(dev->features & NETIF_F_LOOPBACK) ||
+ !priv->validate_loopback))
+ goto next;
+
/*
* Packet is OK - process it.
*/
@@ -837,9 +852,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
- struct mlx4_en_rss_context *rss_context;
+ struct mlx4_rss_context *rss_context;
void *ptr;
- u8 rss_mask = 0x3f;
+ u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+ MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -877,18 +893,21 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
- ptr = ((void *) &context) + 0x3c;
+ ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ if (priv->mdev->profile.udp_rss) {
+ rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ }
rss_context->flags = rss_mask;
- rss_context->hash_fn = 1;
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
rss_context->rss_key[i] = rsskey[i];
- if (priv->mdev->profile.udp_rss)
- rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcecd499d..bf2e5d3f177c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -43,7 +43,7 @@
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d901b4267537..9ef9038d0629 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- struct mlx4_cqe *cqe = cq->buf;
+ struct mlx4_cqe *cqe;
u16 index;
- u16 new_index;
+ u16 new_index, ring_index;
u32 txbbs_skipped = 0;
- u32 cq_last_sav;
-
- /* index always points to the first TXBB of the last polled descriptor */
- index = ring->cons & ring->size_mask;
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- if (index == new_index)
- return;
+ u32 cons_index = mcq->cons_index;
+ int size = cq->size;
+ u32 size_mask = ring->size_mask;
+ struct mlx4_cqe *buf = cq->buf;
if (!priv->port_up)
return;
- /*
- * We use a two-stage loop:
- * - the first samples the HW-updated CQE
- * - the second frees TXBBs until the last sample
- * This lets us amortize CQE cache misses, while still polling the CQ
- * until is quiescent.
- */
- cq_last_sav = mcq->cons_index;
- do {
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ ring_index = ring->cons & size_mask;
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cons_index & size)) {
+ /*
+ * make sure we read the CQE after we read the
+ * ownership bit
+ */
+ rmb();
+
+ /* Skip over last polled CQE */
+ new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
do {
- /* Skip over last polled CQE */
- index = (index + ring->last_nr_txbb) & ring->size_mask;
txbbs_skipped += ring->last_nr_txbb;
-
- /* Poll next CQE */
+ ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ /* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
- priv, ring, index,
- !!((ring->cons + txbbs_skipped) &
- ring->size));
- ++mcq->cons_index;
-
- } while (index != new_index);
+ priv, ring, ring_index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ } while (ring_index != new_index);
+
+ ++cons_index;
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ }
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- } while (index != new_index);
- AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
- (u32) (mcq->cons_index - cq_last_sav));
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
+ mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ (!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ !!vlan_tx_tag_present(skb);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
- MLX4_WQE_CTRL_SOLICITED);
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
ring->tx_csum++;
}
- if (unlikely(priv->validate_loopback)) {
- /* Copy dst mac address to wqe */
- skb_reset_mac_header(skb);
- ethh = eth_hdr(skb);
- if (ethh && ethh->h_dest) {
- mac = mlx4_en_mac_to_u64(ethh->h_dest);
- mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
- mac_l = (u32) (mac & 0xffffffff);
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
- tx_desc->ctrl.imm = cpu_to_be32(mac_l);
- }
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
}
/* Handle LSO (TSO) packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 24ee96775996..1e9b55eb7217 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- u8 log_eq_size;
- u8 reserved2[4];
- u8 eq_period;
- u8 reserved3;
- u8 eq_max_count;
- u8 reserved4[3];
- u8 intr;
- u8 log_page_size;
- u8 reserved5[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- u32 reserved6[2];
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved7[4];
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
- (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- } event;
- u8 reserved3[3];
- u8 owner;
-} __packed;
+ (1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
+ (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+ struct mlx4_eqe *eqe =
+ &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+ return (!!(eqe->owner & 0x80) ^
+ !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+ eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+ struct mlx4_eqe *eqe;
+ u8 slave;
+ int i;
+
+ for (eqe = next_slave_event_eqe(slave_eq); eqe;
+ eqe = next_slave_event_eqe(slave_eq)) {
+ slave = eqe->slave_id;
+
+ /* All active slaves need to receive the event */
+ if (slave == ALL_SLAVES) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i != dev->caps.function &&
+ master->slave_state[i].active)
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to "
+ " generate event "
+ "for slave %d\n", i);
+ }
+ } else {
+ if (mlx4_GEN_EQE(dev, slave, eqe))
+ mlx4_warn(dev, "Failed to generate event "
+ "for slave %d\n", slave);
+ }
+ ++slave_eq->cons;
+ }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+ struct mlx4_eqe *s_eqe =
+ &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+ if ((!!(s_eqe->owner & 0x80)) ^
+ (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+ "No free EQE on slave events queue\n", slave);
+ return;
+ }
+
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ s_eqe->slave_id = slave;
+ /* ensure all information is written before setting the ownersip bit */
+ wmb();
+ s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+ ++slave_eq->prod;
+
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+ struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave =
+ &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active) {
+ /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ return;
+ }
+
+ slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_flr_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int i;
+ int err;
+
+ mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+
+ if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+ "clean slave: %d\n", i);
+
+ mlx4_delete_all_resources_for_slave(dev, i);
+ /*return the slave to running mode*/
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+ slave_state[i].is_slave_going_down = 0;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*notify the FW:*/
+ err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to notify FW on "
+ "FLR done (slave:%d)\n", i);
+ }
+ }
+}
+
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
+ int slave = 0;
+ int ret;
+ u32 flr_slave;
+ u8 update_slave_state;
+ int i;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
- eqe->type);
+ mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the QP */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_QP,
+ be32_to_cpu(eqe->event.qp.qpn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "QP event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+
+ }
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+ __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
- mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
- eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the SRQ */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_SRQ,
+ be32_to_cpu(eqe->event.srq.srqn)
+ & 0xffffff,
+ &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_warn(dev, "SRQ event %02x(%02x) "
+ "on EQ %d at index %u: could"
+ " not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+ " event: %02x(%02x)\n", __func__,
+ slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_warn(dev, "%s: sending event "
+ "%02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+ if (mlx4_is_master(dev))
+ /*change the state of all slave's port
+ * to down:*/
+ for (i = 0; i < dev->num_slaves; i++) {
+ mlx4_dbg(dev, "%s: Sending "
+ "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ " to slave: %d, port:%d\n",
+ __func__, i, port);
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
} else {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_UP,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+ if (mlx4_is_master(dev)) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
+ }
}
break;
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
- mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_CQ,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "CQ event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_cq_event(dev,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff,
eqe->type);
break;
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_COMM_CHANNEL:
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Received comm channel event "
+ "for non master device\n");
+ break;
+ }
+ memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+ eqe->event.comm_channel_arm.bit_vec,
+ sizeof eqe->event.comm_channel_arm.bit_vec);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.comm_work);
+ break;
+
+ case MLX4_EVENT_TYPE_FLR_EVENT:
+ flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Non-master function received"
+ "FLR event\n");
+ break;
+ }
+
+ mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+ if (flr_slave > dev->num_slaves) {
+ mlx4_warn(dev,
+ "Got FLR for unknown function: %d\n",
+ flr_slave);
+ update_slave_state = 0;
+ } else
+ update_slave_state = 1;
+
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (update_slave_state) {
+ priv->mfunc.master.slave_state[flr_slave].active = false;
+ priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+ priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+ }
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_flr_event_work);
+ break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
- eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+ "index %u. owner=%x, nent=0x%x, slave=%x, "
+ "ownership=%s\n",
+ eqe->type, eqe->subtype, eq->eqn,
+ eq->cons_index, eqe->owner, eq->nent,
+ eqe->slave_id,
+ !!(eqe->owner & 0x80) ^
+ !!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- }
+ };
++eq->cons_index;
eqes_found = 1;
@@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq =
+ &priv->mfunc.master.slave_state[slave].event_eq;
+ u32 in_modifier = vhcr->in_modifier;
+ u32 eqn = in_modifier & 0x1FF;
+ u64 in_param = vhcr->in_param;
+ int err = 0;
+
+ if (slave == dev->caps.function)
+ err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ if (!err) {
+ if (in_modifier >> 31) {
+ /* unmap */
+ event_eq->event_type &= ~in_param;
+ } else {
+ event_eq->eqn = eqn;
+ event_eq->event_type = in_param;
+ }
+ }
+ return err;
+}
+
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+ MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+ 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
- err = mlx4_map_clr_int(dev);
- if (err)
- goto err_out_bitmap;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_bitmap;
- priv->eq_table.clr_mask =
- swab32(1 << (priv->eq_table.inta_pin & 31));
- priv->eq_table.clr_int = priv->clr_base +
- (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ }
priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@ err_out_unmap:
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
--i;
}
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
err_out_bitmap:
@@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
err = mlx4_NOP(dev);
/* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X))
+ if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
return err;
/* A loop over all completion vectors, for each vector we will check
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 435ca6e49734..a424a19280cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -32,6 +32,7 @@
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static int enable_qos;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
@@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 field;
+ u32 size;
+ int err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+
+ if (vhcr->op_modifier == 1) {
+ field = vhcr->in_modifier;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+ field = 0; /* ensure fvl bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ } else if (vhcr->op_modifier == 0) {
+ field = 1 << 7; /* enable only ethernet interface */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+ field = slave;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+ field = dev->caps.num_ports;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+ size = 0; /* no PF behavious is set for now */
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mgms + dev->caps.num_amgms;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 size;
+ int i;
+ int err = 0;
+
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+ if (!(field & (1 << 7))) {
+ mlx4_err(dev, "The host doesn't support eth interface\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+ func_cap->function = field;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+ func_cap->num_ports = field;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ func_cap->pf_context_behaviour = size;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ func_cap->max_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ func_cap->reserved_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ for (i = 1; i <= func_cap->num_ports; ++i) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+ MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & (1 << 7)) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ if (field & (1 << 6)) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ func_cap->physical_port[i] = field;
+ }
+
+ /* All other resources are allocated by the master, but we still report
+ * 'num' and 'reserved' capabilities as follows:
+ * - num remains the maximum resource index
+ * - 'num - reserved' is the total available objects of a resource, but
+ * resource indices may be less than 'reserved'
+ * TODO: set per-resource quotas */
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
if (err)
goto out;
@@ -396,12 +570,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
+ dev_cap->suggested_type[i] = (field >> 3) & 1;
+ dev_cap->default_sense[i] = (field >> 4) & 1;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +647,61 @@ out:
return err;
}
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 def_mac;
+ u8 port_type;
+ int err;
+
+#define MLX4_PORT_SUPPORT_IB (1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
+ ~MLX4_PORT_SUGGEST_TYPE & \
+ ~MLX4_PORT_DEFAULT_SENSE)
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ if (!err && dev->caps.function != slave) {
+ /* set slave default_mac address */
+ MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+ def_mac += slave << 8;
+ MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+ /* get port type - currently only eth is enabled */
+ MLX4_GET(port_type, outbox->buf,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ /* Allow only Eth port, no link sensing allowed */
+ port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+ /* check eth is enabled for this port */
+ if (!(port_type & 2))
+ mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+ MLX4_PUT(outbox->buf, port_type,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ }
+
+ return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+ struct mlx4_cmd_mailbox *outbox = ptr;
+
+ return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +751,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
goto out;
nent = 0;
@@ -528,7 +761,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
}
if (nent)
- err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -557,13 +791,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +815,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_PPF_ID 0x09
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
@@ -589,13 +826,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+#define QUERY_FW_COMM_BASE_OFFSET 0x40
+#define QUERY_FW_COMM_BAR_OFFSET 0x48
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -608,6 +848,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +892,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+ MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+ MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
+ fw->comm_bar = (fw->comm_bar >> 6) * 2;
+ mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+ fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
/*
@@ -711,7 +959,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -743,6 +991,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1080,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* UAR attributes */
- MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+ MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1093,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return err;
}
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *outbox;
+ int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_QUERY_HCA,
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
+ if (err)
+ goto out;
+
+ MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+
+ /* Enable port only if it was previously disabled */
+ if (!priv->mfunc.master.init_port_ref[port]) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.slave_state[slave].init_port_mask |=
+ (1 << port);
+ }
+ ++priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1231,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+ (1 << port)))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+ if (priv->mfunc.master.init_port_ref[port] == 1) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ --priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+ MLX4_CMD_NATIVE);
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (ret)
return ret;
@@ -929,7 +1303,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1312,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
- MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_read);
@@ -947,6 +1322,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index bf5ec2286528..119e0cc9fab3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -111,11 +111,30 @@ struct mlx4_dev_cap {
u64 max_icm_sz;
int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters;
};
+struct mlx4_func_cap {
+ u8 function;
+ u8 num_ports;
+ u8 flags;
+ u32 pf_context_behaviour;
+ int qp_quota;
+ int cq_quota;
+ int srq_quota;
+ int mpt_quota;
+ int mtt_quota;
+ int max_eq;
+ int reserved_eq;
+ int mcg_quota;
+ u8 physical_port[MLX4_MAX_PORTS + 1];
+ u8 port_flags[MLX4_MAX_PORTS + 1];
+};
+
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
@@ -133,6 +152,7 @@ struct mlx4_init_hca_param {
u64 dmpt_base;
u64 cmpt_base;
u64 mtt_base;
+ u64 global_caps;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
u8 log_num_qps;
@@ -143,6 +163,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 uar_page_sz; /* log pg sz in 4k chunks */
};
struct mlx4_init_ib_param {
@@ -167,12 +188,19 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
int mlx4_QUERY_FW(struct mlx4_dev *dev);
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fdf44c1..a9ade1c3cad5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index ca6feb55bd94..b4e9f6f5cc04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
- mlx4_start_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
- mlx4_stop_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 94bbc85a532d..6bb62c580e2d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -40,6 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/delay.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+ " of qp per mcg, for example:"
+ " 10 gives 248.range: 9<="
+ " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK 0
+#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mlx4_profile default_profile = {
- .num_qp = 1 << 17,
+ .num_qp = 1 << 18,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
- .num_mpt = 1 << 17,
+ .num_mpt = 1 << 19,
.num_mtt = 1 << 20,
};
-static int log_num_mac = 2;
+static int log_num_mac = 7;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
-static int use_prio;
+static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+ "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+ struct list_head list;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+ return dev->caps.reserved_eqs +
+ MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
- dev->caps.port_mask = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
- if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
- dev->caps.port_mask |= 1 << (i - 1);
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+ dev->caps.default_sense[i] = dev_cap->default_sense[i];
dev->caps.trans_type[i] = dev_cap->trans_type[i];
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
dev->caps.wavelength[i] = dev_cap->wavelength[i];
dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
+ dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
- dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
- dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- dev->caps.mtts_per_seg);
+ dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
- dev->caps.reserved_uars = dev_cap->reserved_uars;
+
+ /* The first 128 UARs are used for EQ doorbells */
+ dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0;
- dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
+
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+ if (dev->pdev->device != 0x1003)
+ dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
- if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
- else
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- dev->caps.possible_type[i] = dev->caps.port_type[i];
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+ if (dev->caps.supported_type[i]) {
+ /* if only ETH is supported - assign ETH */
+ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ /* if only IB is supported,
+ * assign IB only if SRIOV is off*/
+ else if (dev->caps.supported_type[i] ==
+ MLX4_PORT_TYPE_IB) {
+ if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_NONE;
+ else
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_IB;
+ /* if IB and ETH are supported,
+ * first of all check if SRIOV is on */
+ } else if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ else {
+ /* In non-SRIOV mode, we set the port type
+ * according to user selection of port type,
+ * if usere selected none, take the FW hint */
+ if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+ MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = port_type_array[i-1];
+ }
+ }
+ /*
+ * Link sensing is allowed on the port if 3 conditions are true:
+ * 1. Both protocols are supported on the port.
+ * 2. Different types are supported on the port
+ * 3. FW declared that it supports link sensing
+ */
mlx4_priv(dev)->sense.sense_allowed[i] =
- dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+ ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+ /*
+ * If "default_sense" bit is set, we move the port to "AUTO" mode
+ * and perform sense_port FW command to try and set the correct
+ * port type from beginning
+ */
+ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+ enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+ dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+ mlx4_SENSE_PORT(dev, i, &sensed_port);
+ if (sensed_port != MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = sensed_port;
+ } else {
+ dev->caps.possible_type[i] = dev->caps.port_type[i];
+ }
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- mlx4_set_port_mask(dev);
-
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i;
+ int ret = 0;
+
+ for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ if (s_state->active && s_state->last_cmd !=
+ MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "%s: slave: %d is still active\n",
+ __func__, i);
+ ret++;
+ }
+ }
+ return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave;
+
+ if (!mlx4_is_master(dev))
+ return 0;
+
+ s_slave = &priv->mfunc.master.slave_state[slave];
+ return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+ int err;
+ u32 page_size;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_func_cap func_cap;
+ struct mlx4_init_hca_param hca_param;
+ int i;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ if (err) {
+ mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ return err;
+ }
+
+ /*fail if the hca has an unknown capability */
+ if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+ HCA_GLOBAL_CAP_MASK) {
+ mlx4_err(dev, "Unknown hca global capabilities\n");
+ return -ENOSYS;
+ }
+
+ mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ page_size = ~dev->caps.page_size_cap + 1;
+ mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+ if (page_size > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ /* slave gets uar page size from QUERY_HCA fw command */
+ dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+ /* TODO: relax this assumption */
+ if (dev->caps.uar_page_size != PAGE_SIZE) {
+ mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+ dev->caps.uar_page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+ PF_CONTEXT_BEHAVIOUR_MASK) {
+ mlx4_err(dev, "Unknown pf context behaviour\n");
+ return -ENOSYS;
+ }
+
+ dev->caps.function = func_cap.function;
+ dev->caps.num_ports = func_cap.num_ports;
+ dev->caps.num_qps = func_cap.qp_quota;
+ dev->caps.num_srqs = func_cap.srq_quota;
+ dev->caps.num_cqs = func_cap.cq_quota;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
+ dev->caps.num_mpts = func_cap.mpt_quota;
+ dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->caps.num_pds = MLX4_NUM_PDS;
+ dev->caps.num_mgms = 0;
+ dev->caps.num_amgms = 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+ if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev->caps.uar_page_size * (dev->caps.num_uars -
+ dev->caps.reserved_uars) >
+ pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+#if 0
+ mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+ mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+ dev->caps.num_uars, dev->caps.reserved_uars,
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ pci_resource_len(dev->pdev, 2));
+ mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+ dev->caps.reserved_eqs);
+ mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+ dev->caps.num_pds, dev->caps.reserved_pds,
+ dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+ return 0;
+}
/*
* Change the port configuration of the device.
@@ -377,7 +612,8 @@ static ssize_t set_port_type(struct device *dev,
types[i] = mdev->caps.port_type[i+1];
}
- if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +687,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int num_eqs;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
@@ -480,12 +717,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
- cmpt_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+ cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
if (err)
goto err_cq;
@@ -509,6 +748,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
+ int num_eqs;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +780,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
+
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs,
- 0, 0);
+ num_eqs, num_eqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -563,7 +806,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
- dev->caps.num_mtt_segs,
+ dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +893,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
* and it's a lot easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
- init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ init_hca->mc_base,
+ mlx4_get_mgm_entry_size(dev),
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
@@ -726,6 +970,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ down(&priv->cmd.slave_sem);
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+ mlx4_warn(dev, "Failed to close slave function.\n");
+ up(&priv->cmd.slave_sem);
+}
+
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +987,10 @@ static int map_bf_area(struct mlx4_dev *dev)
resource_size_t bf_len;
int err = 0;
- bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
- bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ bf_start = pci_resource_start(dev->pdev, 2) +
+ (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) -
+ (dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
@@ -751,10 +1007,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_bf_area(dev);
- mlx4_CLOSE_HCA(dev, 0);
- mlx4_free_icms(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ if (mlx4_is_slave(dev))
+ mlx4_slave_exit(dev);
+ else {
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 dma = (u64) priv->mfunc.vhcr_dma;
+ int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+ int ret_from_reset = 0;
+ u32 slave_read;
+ u32 cmd_channel_ver;
+
+ down(&priv->cmd.slave_sem);
+ priv->cmd.max_cmds = 1;
+ mlx4_warn(dev, "Sending reset\n");
+ ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+ MLX4_COMM_TIME);
+ /* if we are in the middle of flr the slave will try
+ * NUM_OF_RESET_RETRIES times before leaving.*/
+ if (ret_from_reset) {
+ if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+ msleep(SLEEP_TIME_IN_RESET);
+ while (ret_from_reset && num_of_reset_retries) {
+ mlx4_warn(dev, "slave is currently in the"
+ "middle of FLR. retrying..."
+ "(try num:%d)\n",
+ (NUM_OF_RESET_RETRIES -
+ num_of_reset_retries + 1));
+ ret_from_reset =
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+ 0, MLX4_COMM_TIME);
+ num_of_reset_retries = num_of_reset_retries - 1;
+ }
+ } else
+ goto err;
+ }
+
+ /* check the driver version - the slave I/F revision
+ * must match the master's */
+ slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+ cmd_channel_ver = mlx4_comm_get_version();
+
+ if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+ MLX4_COMM_GET_IF_REV(slave_read)) {
+ mlx4_err(dev, "slave driver version is not supported"
+ " by the master\n");
+ goto err;
+ }
+
+ mlx4_warn(dev, "Sending vhcr0\n");
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+ goto err;
+ up(&priv->cmd.slave_sem);
+ return 0;
+
+err:
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+ up(&priv->cmd.slave_sem);
+ return -EIO;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1095,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
u64 icm_size;
int err;
- err = mlx4_QUERY_FW(dev);
- if (err) {
- if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
- else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
- return err;
- }
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+ mlx4_info(dev, "non-primary physical function, skipping.\n");
+ else
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ goto unmap_bf;
+ }
- err = mlx4_load_fw(dev);
- if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
- return err;
- }
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ goto unmap_bf;
+ }
- mlx4_cfg.log_pg_sz_m = 1;
- mlx4_cfg.log_pg_sz = 0;
- err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
- if (err)
- mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ mlx4_cfg.log_pg_sz_m = 1;
+ mlx4_cfg.log_pg_sz = 0;
+ err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+ if (err)
+ mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
- err = mlx4_dev_cap(dev, &dev_cap);
- if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
- goto err_stop_fw;
- }
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
- profile = default_profile;
+ profile = default_profile;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
- if ((long long) icm_size < 0) {
- err = icm_size;
- goto err_stop_fw;
- }
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+ &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
- if (map_bf_area(dev))
- mlx4_dbg(dev, "Failed to map blue flame area\n");
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca.uar_page_sz = PAGE_SHIFT - 12;
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
- if (err)
- goto err_stop_fw;
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+ } else {
+ err = mlx4_init_slave(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize slave\n");
+ goto unmap_bf;
+ }
- err = mlx4_INIT_HCA(dev, &init_hca);
- if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
+ err = mlx4_slave_cap(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to obtain slave caps\n");
+ goto err_close;
+ }
}
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+ /*Only the master set the ports, all the rest got it from it.*/
+ if (!mlx4_is_slave(dev))
+ mlx4_set_port_mask(dev);
+
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1177,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_CLOSE_HCA(dev, 0);
+ mlx4_close_hca(dev);
err_free_icm:
- mlx4_free_icms(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_free_icms(dev);
err_stop_fw:
+ if (!mlx4_is_slave(dev)) {
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+ }
+unmap_bf:
unmap_bf_area(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
return err;
}
@@ -986,55 +1336,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_counters_table_free;
+ goto err_mcg_table_free;
}
- for (port = 1; port <= dev->caps.num_ports; port++) {
- enum mlx4_port_type port_type = 0;
- mlx4_SENSE_PORT(dev, port, &port_type);
- if (port_type)
- dev->caps.port_type[port] = port_type;
- ib_port_default_caps = 0;
- err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
- if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing with "
- "caps = 0\n", port, err);
- dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
- err = mlx4_check_ext_port_caps(dev, port);
- if (err)
- mlx4_warn(dev, "failed to get port %d extended "
- "port capabilities support info (%d)."
- " Assuming not supported\n", port, err);
+ if (!mlx4_is_slave(dev)) {
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port,
+ &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing "
+ "with caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+ err = mlx4_check_ext_port_caps(dev, port);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d extended "
+ "port capabilities support info (%d)."
+ " Assuming not supported\n",
+ port, err);
- err = mlx4_SET_PORT(dev, port);
- if (err) {
- mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
- goto err_mcg_table_free;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_counters_table_free;
+ }
}
}
- mlx4_set_port_mask(dev);
return 0;
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1081,8 +1432,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
- nreq);
+ /* In multifunction mode each function gets 2 msi-X vectors
+ * one for data path completions anf the other for asynch events
+ * or command completions */
+ if (mlx4_is_mfunc(dev)) {
+ nreq = 2;
+ } else {
+ nreq = min_t(int, dev->caps.num_eqs -
+ dev->caps.reserved_eqs, nreq);
+ }
+
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -1138,16 +1497,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
- mlx4_init_mac_table(dev, &info->mac_table);
- mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ if (!mlx4_is_slave(dev)) {
+ INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn =
+ dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
+ }
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev))
+ info->port_attr.attr.mode = S_IRUGO;
+ else {
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.store = set_port_type;
+ }
info->port_attr.show = show_port_type;
- info->port_attr.store = set_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1587,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
kfree(priv->steer);
}
+static int extended_func_num(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE 0x8069c
+#define MLX4_OWNER_SIZE 4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+ u32 ret;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return -ENOMEM;
+ }
+
+ ret = readl(owner);
+ iounmap(owner);
+ return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return;
+ }
+ writel(0, owner);
+ msleep(1000);
+ iounmap(owner);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
@@ -1235,13 +1642,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
"aborting.\n");
return err;
}
-
+ if (num_vfs > MLX4_MAX_NUM_VF) {
+ printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF);
+ return -EINVAL;
+ }
/*
- * Check for BARs. We expect 0: 1MB
+ * Check for BARs.
*/
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) != 1 << 20) {
- dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+ !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing DCS, aborting."
+ "(id == 0X%p, id->driver_data: 0x%lx,"
+ " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+ id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1305,42 +1719,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ /* Detect if this device is a virtual function */
+ if (id && id->driver_data & MLX4_VF) {
+ /* When acting as pf, we normally skip vfs unless explicitly
+ * requested to probe them. */
+ if (num_vfs && extended_func_num(pdev) > probe_vf) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+ dev->flags |= MLX4_FLAG_SLAVE;
+ } else {
+ /* We reset the device and enable SRIOV only for physical
+ * devices. Try to claim ownership on the device;
+ * if already taken, skip -- do not allow multiple PFs */
+ err = mlx4_get_ownership(dev);
+ if (err) {
+ if (err < 0)
+ goto err_free_dev;
+ else {
+ mlx4_warn(dev, "Multiple PFs not yet supported."
+ " Skipping PF.\n");
+ err = -EINVAL;
+ goto err_free_dev;
+ }
+ }
- /*
- * Now reset the HCA before we touch the PCI capabilities or
- * attempt a firmware command, since a boot ROM may have left
- * the HCA in an undefined state.
- */
- err = mlx4_reset(dev);
- if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
- goto err_free_dev;
+ if (num_vfs) {
+ mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ mlx4_err(dev, "Failed to enable sriov,"
+ "continuing without sriov enabled"
+ " (err = %d).\n", err);
+ num_vfs = 0;
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = num_vfs;
+ }
+ }
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_rel_own;
+ }
}
+slave_start:
if (mlx4_cmd_init(dev)) {
mlx4_err(dev, "Failed to init command interface, aborting.\n");
- goto err_free_dev;
+ goto err_sriov;
+ }
+
+ /* In slave functions, the communication channel must be initialized
+ * before posting commands. Also, init num_slaves before calling
+ * mlx4_init_hca */
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_master(dev))
+ dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+ else {
+ dev->num_slaves = 0;
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init slave mfunc"
+ " interface, aborting.\n");
+ goto err_cmd;
+ }
+ }
}
err = mlx4_init_hca(dev);
- if (err)
- goto err_cmd;
+ if (err) {
+ if (err == -EACCES) {
+ /* Not primary Physical function
+ * Running in slave mode */
+ mlx4_cmd_cleanup(dev);
+ dev->flags |= MLX4_FLAG_SLAVE;
+ dev->flags &= ~MLX4_FLAG_MASTER;
+ goto slave_start;
+ } else
+ goto err_mfunc;
+ }
+
+ /* In master functions, the communication channel must be initialized
+ * after obtaining its address from fw */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init master mfunc"
+ "interface, aborting.\n");
+ goto err_close;
+ }
+ }
err = mlx4_alloc_eq_table(dev);
if (err)
- goto err_close;
+ goto err_master_mfunc;
priv->msix_ctl.pool_bm = 0;
spin_lock_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
-
- err = mlx4_init_steering(dev);
- if (err)
+ if ((mlx4_is_mfunc(dev)) &&
+ !(dev->flags & MLX4_FLAG_MSI_X)) {
+ mlx4_err(dev, "INTx is not supported in multi-function mode."
+ " aborting.\n");
goto err_free_eq;
+ }
+
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+ }
err = mlx4_setup_hca(dev);
- if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+ if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+ !mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
@@ -1383,20 +1887,37 @@ err_port:
mlx4_cleanup_uar_table(dev);
err_steer:
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
err_free_eq:
mlx4_free_eq_table(dev);
+err_master_mfunc:
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
mlx4_close_hca(dev);
+err_mfunc:
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_cmd:
mlx4_cmd_cleanup(dev);
+err_sriov:
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+ pci_disable_sriov(pdev);
+
+err_rel_own:
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
err_free_dev:
kfree(priv);
@@ -1424,6 +1945,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
int p;
if (dev) {
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ }
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -1443,17 +1970,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev);
+
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Disabling sriov\n");
+ pci_disable_sriov(pdev);
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -1468,33 +2009,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
- { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+ /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+ /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+ /* MT25408 "Hermon" QDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+ /* MT25408 "Hermon" DDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+ /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+ /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+ /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+ /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+ /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+ { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+ /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+ /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
{ 0, }
};
@@ -1523,6 +2079,12 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ /* Check if module param for ports type has legal combination */
+ if (port_type_array[0] == false && port_type_array[1] == true) {
+ printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ port_type_array[0] = true;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 978688c31046..0785d9b2a265 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -44,28 +44,47 @@
static const u8 zero_gid[16]; /* automatically initialized to 0 */
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+ return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+ return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
struct mlx4_cmd_mailbox *mailbox)
{
u32 in_mod;
- in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ in_mod = (u32) port << 16 | steer << 1;
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
- MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
- MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (!err)
*hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
* Add new entry to steering data structure.
* All promisc QPs should be added as well
*/
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_promisc_qp *dqp = NULL;
u32 prot;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* If the given qpn is also a promisc qp,
* it should be inserted to duplicates list
*/
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (pqp) {
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* don't add already existing qpn */
if (pqp->qpn == qpn)
continue;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* out of space */
err = -ENOMEM;
goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
}
/* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
return 0; /* nothing to do */
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
- if (qpn == dqp->qpn)
+ if (qpn == pqp->qpn)
return 0; /* qp is already duplicated */
}
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* Check whether a qpn is a duplicate on steering entry
* If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
/* if qp is not promisc, it cannot be duplicated */
- if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ if (!get_promisc_qp(dev, 0, steer, qpn))
return false;
/* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
u32 members_count;
bool ret = false;
int i;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; i++) {
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
- if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
@@ -332,7 +344,7 @@ out:
return ret;
}
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool found;
int last_index;
int err;
- u8 pf_num;
struct mlx4_priv *priv = mlx4_priv(dev);
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ s_steer = &mlx4_priv(dev)->steer[0];
mutex_lock(&priv->mcg_table.mutex);
- if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ if (get_promisc_qp(dev, 0, steer, qpn)) {
err = 0; /* Noting to do, already exists */
goto out_mutex;
}
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
if (!found) {
/* Need to add the qpn to mgm */
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_list;
@@ -439,7 +450,7 @@ out_mutex:
return err;
}
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool back_to_list = false;
int loc, i;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
mutex_lock(&priv->mcg_table.mutex);
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (unlikely(!pqp)) {
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
/* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
goto out_list;
}
mgm = mailbox->buf;
+ memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_mailbox;
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
index += dev->caps.num_mgms;
+ new_entry = 1;
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
goto out;
@@ -696,9 +707,9 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ new_steering_entry(dev, port, steer, index, qp->qpn);
else
- existing_steering_entry(dev, 0, port, steer,
+ existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
/* if this pq is also a promisc qp, it shouldn't be removed */
if (prot == MLX4_PROT_ETH &&
- check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ check_duplicate_entry(dev, port, steer, index, qp->qpn))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm->qp[i - 1] = 0;
if (prot == MLX4_PROT_ETH)
- removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ removed_entry = can_remove_steering_entry(dev, port, steer,
+ index, qp->qpn);
if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
@@ -828,6 +840,34 @@ out:
return err;
}
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 attach, u8 block_loopback,
+ enum mlx4_protocol prot)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err = 0;
+ int qpn;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EBADF;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, gid, 16);
+ qpn = qp->qpn;
+ qpn |= (prot << 28);
+ if (attach && block_loopback)
+ qpn |= (1 << 31);
+
+ err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+ MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- return mlx4_qp_attach_common(dev, qp, gid,
- block_mcast_loopback, prot,
- steer);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- }
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+ struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+ u8 port = vhcr->in_param >> 62;
+ enum mlx4_steer_type steer = vhcr->in_modifier;
+
+ /* Promiscuous unicast is not allowed in mfunc */
+ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+ return 0;
+
+ if (vhcr->op_modifier)
+ return add_promisc_qp(dev, port, steer, qpn);
+ else
+ return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+ enum mlx4_steer_type steer, u8 add, u8 port)
+{
+ return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+ MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+}
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
@@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
- return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
@@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
@@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+ return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 5dfa68ffc11c..a80121a2b519 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,21 +46,25 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "July 14, 2011"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "1.1"
+#define DRV_RELDATE "Dec, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
- MLX4_CLR_INT_SIZE = 0x00008
+ MLX4_CLR_INT_SIZE = 0x00008,
+ MLX4_SLAVE_COMM_BASE = 0x0,
+ MLX4_COMM_PAGESIZE = 0x1000
};
enum {
- MLX4_MGM_ENTRY_SIZE = 0x100,
- MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8
+ MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+ MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -80,6 +84,94 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
+enum mlx4_mr_state {
+ MLX4_MR_DISABLED = 0,
+ MLX4_MR_EN_HW,
+ MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME 10000
+enum {
+ MLX4_COMM_CMD_RESET,
+ MLX4_COMM_CMD_VHCR0,
+ MLX4_COMM_CMD_VHCR1,
+ MLX4_COMM_CMD_VHCR2,
+ MLX4_COMM_CMD_VHCR_EN,
+ MLX4_COMM_CMD_VHCR_POST,
+ MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES 10
+#define SLEEP_TIME_IN_RESET (2 * 1000)
+enum mlx4_resource {
+ RES_QP,
+ RES_CQ,
+ RES_SRQ,
+ RES_XRCD,
+ RES_MPT,
+ RES_MTT,
+ RES_MAC,
+ RES_VLAN,
+ RES_EQ,
+ RES_COUNTER,
+ MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+ RES_OP_RESERVE,
+ RES_OP_RESERVE_AND_MAP,
+ RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+ u64 in_param;
+ u64 out_param;
+ u32 in_modifier;
+ u32 errno;
+ u16 op;
+ u16 token;
+ u8 op_modifier;
+ u8 e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+ __be64 in_param;
+ __be32 in_modifier;
+ __be64 out_param;
+ __be16 token;
+ u16 reserved;
+ u8 status;
+ u8 flags;
+ __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+ u16 opcode;
+ bool has_inbox;
+ bool has_outbox;
+ bool out_is_imm;
+ bool encode_slave_id;
+ int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox);
+ int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+};
+
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do { \
#define mlx4_warn(mdev, format, arg...) \
dev_warn(&mdev->pdev->dev, format, ##arg)
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
struct mlx4_bitmap {
u32 last;
u32 top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd_flags;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_addr;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ u8 reserved2[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved4[2];
+ __be64 db_rec_addr;
+};
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1;
+ __be16 xrcd;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved4;
+ __be16 wqe_counter;
+ u32 reserved5;
+ __be64 db_rec_addr;
+};
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ u8 port;
+ } __packed sw_event;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -142,6 +381,18 @@ struct mlx4_eq {
struct mlx4_mtt mtt;
};
+struct mlx4_slave_eqe {
+ u8 type;
+ u8 port;
+ u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+ u32 eqn;
+ u16 token;
+ u64 event_type;
+};
+
struct mlx4_profile {
int num_qp;
int rdmarc_per_qp;
@@ -155,16 +406,37 @@ struct mlx4_profile {
struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
+ u64 comm_base;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
u16 fw_pages;
u8 clr_int_bar;
u8 catas_bar;
+ u8 comm_bar;
};
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
+struct mlx4_comm {
+ u32 slave_write;
+ u32 slave_read;
+};
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+ struct list_head list;
+ u64 addr;
+};
struct mlx4_promisc_qp {
struct list_head list;
@@ -177,19 +449,87 @@ struct mlx4_steer_index {
struct list_head duplicates;
};
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+ u8 comm_toggle;
+ u8 last_cmd;
+ u8 init_port_mask;
+ bool active;
+ u8 function;
+ dma_addr_t vhcr_dma;
+ u16 mtu[MLX4_MAX_PORTS + 1];
+ __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+ struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+ struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_event_eq_info event_eq;
+ u16 eq_pi;
+ u16 eq_ci;
+ spinlock_t lock;
+ /*initialized via the kzalloc*/
+ u8 is_slave_going_down;
+ u32 cookie;
+};
+
+struct slave_list {
+ struct mutex mutex;
+ struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+ spinlock_t lock;
+ /* tree for each resources */
+ struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ /* num_of_slave's lists, one per slave */
+ struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE 128
+struct mlx4_slave_event_eq {
+ u32 eqn;
+ u32 cons;
+ u32 prod;
+ struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+ int proxy_qp0_active;
+ int qp0_active;
+ int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+ int init_port_ref[MLX4_MAX_PORTS + 1];
+ u16 max_mtu[MLX4_MAX_PORTS + 1];
+ int disable_mcast_ref[MLX4_MAX_PORTS + 1];
+ struct mlx4_resource_tracker res_tracker;
+ struct workqueue_struct *comm_wq;
+ struct work_struct comm_work;
+ struct work_struct slave_event_work;
+ struct work_struct slave_flr_event_work;
+ spinlock_t slave_state_lock;
+ __be32 comm_arm_bit_vector[4];
+ struct mlx4_eqe cmd_eqe;
+ struct mlx4_slave_event_eq slave_eq;
+ struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+ struct mlx4_comm __iomem *comm;
+ struct mlx4_vhcr_cmd *vhcr;
+ dma_addr_t vhcr_dma;
+
+ struct mlx4_mfunc_master_ctx master;
};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct semaphore slave_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -197,6 +537,7 @@ struct mlx4_cmd {
u16 token_mask;
u8 use_events;
u8 toggle;
+ u8 comm_toggle;
};
struct mlx4_uar_table {
@@ -287,6 +628,48 @@ struct mlx4_vlan_table {
int max;
};
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
+
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved3[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
struct mlx4_mac_entry {
u64 mac;
};
@@ -333,6 +716,7 @@ struct mlx4_priv {
struct mlx4_fw fw;
struct mlx4_cmd cmd;
+ struct mlx4_mfunc mfunc;
struct mlx4_bitmap pd_bitmap;
struct mlx4_bitmap xrcd_bitmap;
@@ -359,6 +743,7 @@ struct mlx4_priv {
struct list_head bf_list;
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
+ int reserved_mtts;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout);
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
@@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource resource_type,
+ int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer);
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+ int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+ struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+ *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+ *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+ return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+ return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+ return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 207b5add3ca8..f2a8e65f5f88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.2"
-#define DRV_RELDATE "October 2011"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "Dec 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -366,16 +366,6 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
-struct mlx4_en_rss_context {
- __be32 base_qpn;
- __be32 default_qpn;
- u16 reserved;
- u8 hash_fn;
- u8 flags;
- __be32 rss_key[10];
- __be32 base_qpn_udp;
-};
-
struct mlx4_en_port_state {
int link_state;
int link_speed;
@@ -463,6 +453,7 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
+ u32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
@@ -495,9 +486,9 @@ struct mlx4_en_priv {
enum mlx4_en_wol {
MLX4_EN_WOL_MAGIC = (1ULL << 61),
MLX4_EN_WOL_ENABLED = (1ULL << 62),
- MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
};
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index efa3e77355e4..01df5567e16e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,35 +32,17 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
- __be32 flags;
- __be32 qpn;
- __be32 key;
- __be32 pd_flags;
- __be64 start;
- __be64 length;
- __be32 lkey;
- __be32 win_cnt;
- u8 reserved1[3];
- u8 mtt_rep;
- __be64 mtt_seg;
- __be32 mtt_sz;
- __be32 entity_size;
- __be32 first_byte_offset;
-} __packed;
-
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->num_free);
}
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
+ int seg_order;
+ u32 offset;
- seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1)
return -1;
- if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
- seg + (1 << order) - 1)) {
- mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ offset = seg * (1 << log_mtts_per_seg);
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1;
}
- return seg;
+ return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, order);
+ err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_alloc_mtt_range(dev, order);
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order;
- mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
- if (mtt->first_seg == -1)
+ mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->offset == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
+ u32 first_seg;
+ int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+ first_seg = offset / (1 << log_mtts_per_seg);
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, offset);
+ set_param_h(&in_param, order);
+ err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to free mtt range at:"
+ "%d order:%d\n", offset, order);
+ return;
+ }
+ __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
if (mtt->order < 0)
return;
- mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
- return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+ return (u64) mtt->offset * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
- return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
- MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+ 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
- !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ !mailbox, MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
- int npages, int page_shift, struct mlx4_mr *mr)
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ u32 *base_mridx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u32 index;
- int err;
+ u32 mridx;
- index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
- if (index == -1)
+ mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+ if (mridx == -1)
return -ENOMEM;
+ *base_mridx = mridx;
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u64 iova, u64 size, u32 access, int npages,
+ int page_shift, struct mlx4_mr *mr)
+{
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = 0;
- mr->key = hw_index_to_key(index);
+ mr->enabled = MLX4_MR_DISABLED;
+ mr->key = hw_index_to_key(mridx);
+
+ return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ int num_entries)
+{
+ return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+}
- err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ u64 out_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to release mr index:%d\n",
+ index);
+ return;
+ }
+ __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, index);
+ return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+ index);
+ return;
+ }
+ return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ u32 index;
+ int err;
+
+ index = mlx4_mr_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+ access, npages, page_shift, mr);
if (err)
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_mr_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- if (mr->enabled) {
+ if (mr->enabled == MLX4_MR_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
- mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- }
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ mr->enabled = MLX4_MR_EN_SW;
+ }
mlx4_mtt_cleanup(dev, &mr->mtt);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ mlx4_mr_free_reserved(dev, mr);
+ if (mr->enabled)
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mr_release(dev, key_to_hw_index(mr->key));
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
- mpt_entry->mtt_seg = 0;
+ mpt_entry->mtt_addr = 0;
} else {
- mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+ &mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
- mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- dev->caps.mtts_per_seg);
+ mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
-
- mr->enabled = 1;
+ mr->enabled = MLX4_MR_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -373,7 +546,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
__be64 *mtts;
dma_addr_t dma_handle;
int i;
- int s = start_index * sizeof (u64);
- /* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof (u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
- return -EINVAL;
-
- if (start_index & (dev->caps.mtts_per_seg - 1))
- return -EINVAL;
+ mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+ start_index, &dma_handle);
- mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
- s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts)
return -ENOMEM;
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
return 0;
}
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
{
+ int err = 0;
int chunk;
- int err;
+ int mtts_per_page;
+ int max_mtts_first_page;
- if (mtt->order < 0)
- return -EINVAL;
+ /* compute how may mtts fit in the first page */
+ mtts_per_page = PAGE_SIZE / sizeof(u64);
+ max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+ % mtts_per_page;
+
+ chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
-
npages -= chunk;
start_index += chunk;
page_list += chunk;
+
+ chunk = min_t(int, mtts_per_page, npages);
}
+ return err;
+}
- return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ __be64 *inbox = NULL;
+ int chunk;
+ int err = 0;
+ int i;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ if (mlx4_is_mfunc(dev)) {
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ while (npages > 0) {
+ chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+ npages);
+ inbox[0] = cpu_to_be64(mtt->offset + start_index);
+ inbox[1] = 0;
+ for (i = 0; i < chunk; ++i)
+ inbox[i + 2] = cpu_to_be64(page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
+ err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+ if (err) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
+ /* Nothing to do for slaves - all MR handling is forwarded
+ * to the master */
+ if (mlx4_is_slave(dev))
+ return 0;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtt_segs));
+ ilog2(dev->caps.num_mtts /
+ (1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
- if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+ priv->reserved_mtts =
+ mlx4_alloc_mtt_range(dev,
+ fls(dev->caps.reserved_mtts - 1));
+ if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %d is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
+ if (mlx4_is_slave(dev))
+ return;
+ if (priv->reserved_mtts >= 0)
+ mlx4_free_mtt_range(dev, priv->reserved_mtts,
+ fls(dev->caps.reserved_mtts - 1));
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u64 mtt_seg;
+ u64 mtt_offset;
int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
if (err)
return err;
- mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+ mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.first_seg,
+ fmr->mr.mtt.offset,
&fmr->dma_handle);
+
if (!fmr->mtts) {
err = -ENOMEM;
goto err_free;
@@ -619,6 +852,46 @@ err_free:
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+ u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = -ENOMEM;
+
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+ return -EINVAL;
+
+ /* All MTTs must fit in the same page */
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+ return -EINVAL;
+
+ fmr->page_shift = page_shift;
+ fmr->max_pages = max_pages;
+ fmr->max_maps = max_maps;
+ fmr->maps = 0;
+
+ err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+ page_shift, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+ fmr->mr.mtt.offset,
+ &fmr->dma_handle);
+ if (!fmr->mtts) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
if (!fmr->maps)
return;
fmr->maps = 0;
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+ " failed (%d)\n", err);
+ return;
+ }
+
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(fmr->mr.key) &
+ (dev->caps.num_mpts - 1));
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err) {
+ printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+ err);
+ return;
+ }
+ fmr->mr.enabled = MLX4_MR_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
- fmr->mr.enabled = 0;
mlx4_mr_free(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ if (fmr->maps)
+ return -EBUSY;
+
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 260ed259ce9b..5c9a54df17ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
if (*pdn == -1)
return -ENOMEM;
-
+ if (mlx4_is_mfunc(dev))
+ *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds, 0);
+ (1 << NOT_MASKED_PD_BITS) - 1,
+ dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
+ int offset;
+
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
- uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ if (mlx4_is_slave(dev))
+ offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+ dev->caps.uar_page_size);
+ else
+ offset = uar->index;
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
uar->map = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars), 0);
+ dev->caps.reserved_uars, 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index d942aea4927b..88b52e547524 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
- __be64 *entries)
-{
- struct mlx4_cmd_mailbox *mailbox;
- u32 in_mod;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
- in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
int err;
- if (reserve) {
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- return err;
- }
- }
qp.qpn = *qpn;
mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- err = mlx4_qp_attach_common(dev, &qp, gid, 0,
- MLX4_PROT_ETH, MLX4_UC_STEER);
- if (err && reserve)
- mlx4_qp_release_range(dev, *qpn, 1);
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ if (err)
+ mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u8 free)
+ u64 mac, int qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
@@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
- if (free)
- mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
+{
+ int err = 0;
+
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if ((mac & MLX4_MAC_MASK) ==
+ (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
+ }
+ /* Mac not found */
+ return -EINVAL;
}
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
- int i, err = 0;
- int free = -1;
+ int index = 0;
+ int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
- if (err)
- return err;
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+ (unsigned long long) mac);
+ index = mlx4_register_mac(dev, port, mac);
+ if (index < 0) {
+ err = index;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
+ return err;
+ }
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return -ENOMEM;
- }
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ *qpn = info->base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ if (err)
+ goto steer_err;
- entry->mac = mac;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err)
+ goto insert_err;
+ return 0;
+
+insert_err:
+ kfree(entry);
+
+alloc_err:
+ mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, port, mac);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_entry *entry;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+ (unsigned long long) mac);
+ mlx4_unregister_mac(dev, port, mac);
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+ " qpn %d\n", port,
+ (unsigned long long) mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_qp_release_range(dev, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return err;
}
}
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
- mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+ (unsigned long long) mac, port);
mutex_lock(&table->mutex);
- for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
- if (free < 0 && !table->refs[i]) {
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (free < 0 && !table->entries[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, increase references count */
- ++table->refs[i];
+ /* MAC already registered, Must not have duplicates */
+ err = -EEXIST;
goto out;
}
}
- if (free < 0) {
- err = -ENOMEM;
- goto out;
- }
-
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
}
/* Register new MAC */
- table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
- table->refs[free] = 0;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
table->entries[free] = 0;
goto out;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- *qpn = info->base_qpn + free;
+ err = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
-static int validate_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- int err = 0;
+ u64 out_param;
+ int err;
- if (index < 0 || index >= table->max || !table->entries[index]) {
- mlx4_warn(dev, "No valid Mac entry for the given index\n");
- err = -EINVAL;
- }
- return err;
-}
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
-static int find_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, u64 mac)
-{
- int i;
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
- return i;
+ return get_param_l(&out_param);
}
- /* Mac not found */
- return -EINVAL;
+ return __mlx4_register_mac(dev, port, mac);
}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
- struct mlx4_mac_entry *entry;
+ int index;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- index = find_index(dev, table, entry->mac);
- kfree(entry);
- }
- }
+ index = find_index(dev, table, mac);
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
- /* Check whether this address has reference count */
- if (!(--table->refs[index])) {
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
- }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
out:
mutex_unlock(&table->mutex);
}
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ return;
+ }
+ __mlx4_unregister_mac(dev, port, mac);
+ return;
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- int err;
+ int index = qpn - info->base_qpn;
+ int err = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- index = find_index(dev, table, entry->mac);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
- if (err || index < 0)
- return err;
+ mlx4_register_mac(dev, port, new_mac);
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ return err;
}
+ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) new_mac);
table->entries[index] = 0;
}
out:
@@ -312,6 +387,7 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
@@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+ int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
@@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- /* Register new MAC */
+ /* Register new VLAN */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
@@ -405,9 +482,27 @@ out:
mutex_unlock(&table->mutex);
return err;
}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *index = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_register_vlan(dev, port, vlan, index);
+}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
out:
mutex_unlock(&table->mutex);
}
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, port);
+ err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (!err)
+ mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+ index);
+
+ return;
+ }
+ __mlx4_unregister_vlan(dev, port, index);
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
@@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
return err;
}
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+ u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_port_info *port_info;
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ struct mlx4_set_port_rqp_calc_context *qpn_context;
+ struct mlx4_set_port_general_context *gen_context;
+ int reset_qkey_viols;
+ int port;
+ int is_eth;
+ u32 in_modifier;
+ u32 promisc;
+ u16 mtu, prev_mtu;
+ int err;
+ int i;
+ __be32 agg_cap_mask;
+ __be32 slave_cap_mask;
+ __be32 new_cap_mask;
+
+ port = in_mod & 0xff;
+ in_modifier = in_mod >> 8;
+ is_eth = op_mod;
+ port_info = &priv->port[port];
+
+ /* Slaves cannot perform SET_PORT operations except changing MTU */
+ if (is_eth) {
+ if (slave != dev->caps.function &&
+ in_modifier != MLX4_SET_PORT_GENERAL) {
+ mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+ slave);
+ return -EINVAL;
+ }
+ switch (in_modifier) {
+ case MLX4_SET_PORT_RQP_CALC:
+ qpn_context = inbox->buf;
+ qpn_context->base_qpn =
+ cpu_to_be32(port_info->base_qpn);
+ qpn_context->n_mac = 0x7;
+ promisc = be32_to_cpu(qpn_context->promisc) >>
+ SET_PORT_PROMISC_SHIFT;
+ qpn_context->promisc = cpu_to_be32(
+ promisc << SET_PORT_PROMISC_SHIFT |
+ port_info->base_qpn);
+ promisc = be32_to_cpu(qpn_context->mcast) >>
+ SET_PORT_MC_PROMISC_SHIFT;
+ qpn_context->mcast = cpu_to_be32(
+ promisc << SET_PORT_MC_PROMISC_SHIFT |
+ port_info->base_qpn);
+ break;
+ case MLX4_SET_PORT_GENERAL:
+ gen_context = inbox->buf;
+ /* Mtu is configured as the max MTU among all the
+ * the functions on the port. */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu ==
+ master->max_mtu[port]) {
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++) {
+ master->max_mtu[port] =
+ max(master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ }
+
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ break;
+ }
+ return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ }
+
+ /* For IB, we only consider:
+ * - The capability mask, which is set to the aggregate of all
+ * slave function capabilities
+ * - The QKey violatin counter - reset according to each request.
+ */
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+ new_cap_mask = ((__be32 *) inbox->buf)[2];
+ } else {
+ reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+ new_cap_mask = ((__be32 *) inbox->buf)[1];
+ }
+
+ agg_cap_mask = 0;
+ slave_cap_mask =
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+ for (i = 0; i < dev->num_slaves; i++)
+ agg_cap_mask |=
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+ /* only clear mailbox for guests. Master may be setting
+ * MTU or PKEY table size
+ */
+ if (slave != dev->caps.function)
+ memset(inbox->buf, 0, 256);
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+ } else {
+ ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+ slave_cap_mask;
+ return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+ vhcr->op_modifier, inbox);
+}
+
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->n_mac = dev->caps.log_num_macs;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+ u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+ return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_dump_eth_stats(dev, slave,
+ vhcr->in_modifier, outbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647d0c76..66f91ca7a7c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
- profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+ profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
- dev->caps.num_mtt_segs = profile[i].num;
+ dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
- init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_entry_sz =
+ ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->log_mc_hash_sz = profile[i].log_num - 1;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 15f870cb2590..6b03ac8b9002 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
spin_unlock(&qp_table->lock);
if (!qp) {
- mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
complete(&qp->free);
}
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
- struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
- int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ return qp->qpn >= dev->caps.sqp_start &&
+ qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
};
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
+ u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
- if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
- return mlx4_cmd(dev, 0, qp->qpn, 2,
- MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+ ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+ if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+ cur_state != MLX4_QP_STATE_RST &&
+ is_qp0(dev, qp)) {
+ port = (qp->qpn & 1) + 1;
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ }
+ return ret;
+ }
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
+ port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ context->pri_path.sched_queue = (context->pri_path.sched_queue &
+ 0xc3);
+
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
- ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+ qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
- op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+ optpar, sqd_event, qp, 0);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- int qpn;
- qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
- if (qpn == -1)
+ *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (*base == -1)
return -ENOMEM;
- *base = qpn;
return 0;
}
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cnt);
+ set_param_h(&in_param, align);
+ err = mlx4_cmd_imm(dev, in_param, &out_param,
+ RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+
+ *base = get_param_l(&out_param);
+ return 0;
+ }
+ return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- if (base_qpn < dev->caps.sqp_start + 8)
- return;
+ if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+ return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, base_qpn);
+ set_param_h(&in_param, cnt);
+ err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err) {
+ mlx4_warn(dev, "Failed to release qp range"
+ " base:%d cnt:%d\n", base_qpn, cnt);
+ }
+ } else
+ __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (!qpn)
- return -EINVAL;
-
- qp->qpn = qpn;
-
- err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
- spin_lock_irq(&qp_table->lock);
- err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
- spin_unlock_irq(&qp_table->lock);
- if (err)
- goto err_put_cmpt;
-
- atomic_set(&qp->refcount, 1);
- init_completion(&qp->free);
-
return 0;
-err_put_cmpt:
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
err_put_rdmarc:
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, qpn);
+ return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, qpn);
+ if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+ } else
+ __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
+
+ err = mlx4_qp_alloc_icm(dev, qpn);
+ if (err)
+ return err;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+ (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_icm;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_icm:
+ mlx4_qp_free_icm(dev, qpn);
+ return err;
+}
+
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
/*
* We reserve 2 extra QPs per port for the special QPs. The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
+
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
- MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 000000000000..ed20751a057d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3104 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0x7fffffffffffffffULL
+#define ETH_ALEN 6
+
+struct mac_res {
+ struct list_head list;
+ u64 mac;
+ u8 port;
+};
+
+struct res_common {
+ struct list_head list;
+ u32 res_id;
+ int owner;
+ int state;
+ int from_state;
+ int to_state;
+ int removing;
+};
+
+enum {
+ RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+ struct list_head list;
+ u8 gid[16];
+ enum mlx4_protocol prot;
+};
+
+enum res_qp_states {
+ RES_QP_BUSY = RES_ANY_BUSY,
+
+ /* QP number was allocated */
+ RES_QP_RESERVED,
+
+ /* ICM memory for QP context was mapped */
+ RES_QP_MAPPED,
+
+ /* QP is in hw ownership */
+ RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+ switch (state) {
+ case RES_QP_BUSY: return "RES_QP_BUSY";
+ case RES_QP_RESERVED: return "RES_QP_RESERVED";
+ case RES_QP_MAPPED: return "RES_QP_MAPPED";
+ case RES_QP_HW: return "RES_QP_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_qp {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ struct res_srq *srq;
+ struct list_head mcg_list;
+ spinlock_t mcg_spl;
+ int local_qpn;
+};
+
+enum res_mtt_states {
+ RES_MTT_BUSY = RES_ANY_BUSY,
+ RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+ switch (state) {
+ case RES_MTT_BUSY: return "RES_MTT_BUSY";
+ case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_mtt {
+ struct res_common com;
+ int order;
+ atomic_t ref_count;
+};
+
+enum res_mpt_states {
+ RES_MPT_BUSY = RES_ANY_BUSY,
+ RES_MPT_RESERVED,
+ RES_MPT_MAPPED,
+ RES_MPT_HW,
+};
+
+struct res_mpt {
+ struct res_common com;
+ struct res_mtt *mtt;
+ int key;
+};
+
+enum res_eq_states {
+ RES_EQ_BUSY = RES_ANY_BUSY,
+ RES_EQ_RESERVED,
+ RES_EQ_HW,
+};
+
+struct res_eq {
+ struct res_common com;
+ struct res_mtt *mtt;
+};
+
+enum res_cq_states {
+ RES_CQ_BUSY = RES_ANY_BUSY,
+ RES_CQ_ALLOCATED,
+ RES_CQ_HW,
+};
+
+struct res_cq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ atomic_t ref_count;
+};
+
+enum res_srq_states {
+ RES_SRQ_BUSY = RES_ANY_BUSY,
+ RES_SRQ_ALLOCATED,
+ RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+ switch (state) {
+ case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+ case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+ case RES_SRQ_HW: return "RES_SRQ_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_srq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *cq;
+ atomic_t ref_count;
+};
+
+enum res_counter_states {
+ RES_COUNTER_BUSY = RES_ANY_BUSY,
+ RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+ switch (state) {
+ case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+ case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_counter {
+ struct res_common com;
+ int port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+ switch (rt) {
+ case RES_QP: return "RES_QP";
+ case RES_CQ: return "RES_CQ";
+ case RES_SRQ: return "RES_SRQ";
+ case RES_MPT: return "RES_MPT";
+ case RES_MTT: return "RES_MTT";
+ case RES_MAC: return "RES_MAC";
+ case RES_EQ: return "RES_EQ";
+ case RES_COUNTER: return "RES_COUNTER";
+ default: return "Unknown resource type !!!";
+ };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int t;
+
+ priv->mfunc.master.res_tracker.slave_list =
+ kzalloc(dev->num_slaves * sizeof(struct slave_list),
+ GFP_KERNEL);
+ if (!priv->mfunc.master.res_tracker.slave_list)
+ return -ENOMEM;
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+ for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+ INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+ slave_list[i].res_list[t]);
+ mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
+
+ mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+ dev->num_slaves);
+ for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+ INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+ GFP_ATOMIC|__GFP_NOWARN);
+
+ spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+ return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ if (priv->mfunc.master.res_tracker.slave_list) {
+ for (i = 0 ; i < dev->num_slaves; i++)
+ mlx4_delete_all_resources_for_slave(dev, i);
+
+ kfree(priv->mfunc.master.res_tracker.slave_list);
+ }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+ u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+ if (MLX4_QP_ST_UD == ts)
+ qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+ mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+ slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+ return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+ enum mlx4_resource type)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type,
+ void *res)
+{
+ struct res_common *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (!r) {
+ err = -ENONET;
+ goto exit;
+ }
+
+ if (r->state == RES_ANY_BUSY) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto exit;
+ }
+
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ ResourceType(type), r->res_id);
+
+ if (res)
+ *((struct res_common **)res) = r;
+
+exit:
+ spin_unlock_irq(mlx4_tlock(dev));
+ return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource type,
+ int res_id, int *slave)
+{
+
+ struct res_common *r;
+ int err = -ENOENT;
+ int id = res_id;
+
+ if (type == RES_QP)
+ id &= 0x7fffff;
+ spin_lock(mlx4_tlock(dev));
+
+ r = find_res(dev, id, type);
+ if (r) {
+ *slave = r->owner;
+ err = 0;
+ }
+ spin_unlock(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type)
+{
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (r)
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+ struct res_qp *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_QP_RESERVED;
+ INIT_LIST_HEAD(&ret->mcg_list);
+ spin_lock_init(&ret->mcg_spl);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+ struct res_mtt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->order = order;
+ ret->com.state = RES_MTT_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+ struct res_mpt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_MPT_RESERVED;
+ ret->key = key;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+ struct res_eq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_EQ_RESERVED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+ struct res_cq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_CQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+ struct res_srq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_SRQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+ struct res_counter *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_COUNTER_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+ int extra)
+{
+ struct res_common *ret;
+
+ switch (type) {
+ case RES_QP:
+ ret = alloc_qp_tr(id);
+ break;
+ case RES_MPT:
+ ret = alloc_mpt_tr(id, extra);
+ break;
+ case RES_MTT:
+ ret = alloc_mtt_tr(id, extra);
+ break;
+ case RES_EQ:
+ ret = alloc_eq_tr(id);
+ break;
+ case RES_CQ:
+ ret = alloc_cq_tr(id);
+ break;
+ case RES_SRQ:
+ ret = alloc_srq_tr(id);
+ break;
+ case RES_MAC:
+ printk(KERN_ERR "implementation missing\n");
+ return NULL;
+ case RES_COUNTER:
+ ret = alloc_counter_tr(id);
+ break;
+
+ default:
+ return NULL;
+ }
+ if (ret)
+ ret->owner = slave;
+
+ return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct res_common **res_arr;
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct radix_tree_root *root = &tracker->res_tree[type];
+
+ res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+ if (!res_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < count; ++i) {
+ res_arr[i] = alloc_tr(base + i, type, slave, extra);
+ if (!res_arr[i]) {
+ for (--i; i >= 0; --i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = 0; i < count; ++i) {
+ if (find_res(dev, base + i, type)) {
+ err = -EEXIST;
+ goto undo;
+ }
+ err = radix_tree_insert(root, base + i, res_arr[i]);
+ if (err)
+ goto undo;
+ list_add_tail(&res_arr[i]->list,
+ &tracker->slave_list[slave].res_list[type]);
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(res_arr);
+
+ return 0;
+
+undo:
+ for (--i; i >= base; --i)
+ radix_tree_delete(&tracker->res_tree[type], i);
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ for (i = 0; i < count; ++i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+
+ return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+ if (res->com.state == RES_QP_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_QP_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+ if (res->com.state == RES_MTT_BUSY ||
+ atomic_read(&res->ref_count)) {
+ printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
+ return -EBUSY;
+ } else if (res->com.state != RES_MTT_ALLOCATED)
+ return -EPERM;
+ else if (res->order != order)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+ if (res->com.state == RES_COUNTER_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_COUNTER_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+ if (res->com.state == RES_CQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_CQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+ if (res->com.state == RES_SRQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_SRQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+ switch (type) {
+ case RES_QP:
+ return remove_qp_ok((struct res_qp *)res);
+ case RES_CQ:
+ return remove_cq_ok((struct res_cq *)res);
+ case RES_SRQ:
+ return remove_srq_ok((struct res_srq *)res);
+ case RES_MPT:
+ return remove_mpt_ok((struct res_mpt *)res);
+ case RES_MTT:
+ return remove_mtt_ok((struct res_mtt *)res, extra);
+ case RES_MAC:
+ return -ENOSYS;
+ case RES_EQ:
+ return remove_eq_ok((struct res_eq *)res);
+ case RES_COUNTER:
+ return remove_counter_ok((struct res_counter *)res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ if (!r) {
+ err = -ENOENT;
+ goto out;
+ }
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto out;
+ }
+ err = remove_ok(r, type, extra);
+ if (err)
+ goto out;
+ }
+
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ radix_tree_delete(&tracker->res_tree[type], i);
+ list_del(&r->list);
+ kfree(r);
+ }
+ err = 0;
+
+out:
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+ enum res_qp_states state, struct res_qp **qp,
+ int alloc)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_qp *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_QP_BUSY:
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ __func__, r->com.res_id);
+ err = -EBUSY;
+ break;
+
+ case RES_QP_RESERVED:
+ if (r->com.state == RES_QP_MAPPED && !alloc)
+ break;
+
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ err = -EINVAL;
+ break;
+
+ case RES_QP_MAPPED:
+ if ((r->com.state == RES_QP_RESERVED && alloc) ||
+ r->com.state == RES_QP_HW)
+ break;
+ else {
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ r->com.res_id);
+ err = -EINVAL;
+ }
+
+ break;
+
+ case RES_QP_HW:
+ if (r->com.state != RES_QP_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_QP_BUSY;
+ if (qp)
+ *qp = (struct res_qp *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_mpt_states state, struct res_mpt **mpt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mpt *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_MPT_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_RESERVED:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_MAPPED:
+ if (r->com.state != RES_MPT_RESERVED &&
+ r->com.state != RES_MPT_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_HW:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_MPT_BUSY;
+ if (mpt)
+ *mpt = (struct res_mpt *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_eq_states state, struct res_eq **eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_eq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_EQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_RESERVED:
+ if (r->com.state != RES_EQ_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_HW:
+ if (r->com.state != RES_EQ_RESERVED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_EQ_BUSY;
+ if (eq)
+ *eq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+ enum res_cq_states state, struct res_cq **cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_cq *r;
+ int err;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_CQ_BUSY:
+ err = -EBUSY;
+ break;
+
+ case RES_CQ_ALLOCATED:
+ if (r->com.state != RES_CQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ break;
+
+ case RES_CQ_HW:
+ if (r->com.state != RES_CQ_ALLOCATED)
+ err = -EINVAL;
+ else
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_cq_states state, struct res_srq **srq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_srq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_SRQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_SRQ_ALLOCATED:
+ if (r->com.state != RES_SRQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ break;
+
+ case RES_SRQ_HW:
+ if (r->com.state != RES_SRQ_ALLOCATED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->to_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+ return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err;
+ int count;
+ int align;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ count = get_param_l(&in_param);
+ align = get_param_h(&in_param);
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err) {
+ __mlx4_qp_release_range(dev, base, count);
+ return err;
+ }
+ set_param_l(out_param, base);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ if (valid_reserved(dev, slave, qpn)) {
+ err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ if (err)
+ return err;
+ }
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+ NULL, 1);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn)) {
+ err = __mlx4_qp_alloc_icm(dev, qpn);
+ if (err) {
+ res_abort_move(dev, slave, RES_QP, qpn);
+ return err;
+ }
+ }
+
+ res_end_move(dev, slave, RES_QP, qpn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ order = get_param_l(&in_param);
+ base = __mlx4_alloc_mtt_range(dev, order);
+ if (base == -1)
+ return -ENOMEM;
+
+ err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (err)
+ __mlx4_free_mtt_range(dev, base, order);
+ else
+ set_param_l(out_param, base);
+
+ return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = __mlx4_mr_reserve(dev);
+ if (index == -1)
+ break;
+ id = index & mpt_mask(dev);
+
+ err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+ if (err) {
+ __mlx4_mr_release(dev, index);
+ break;
+ }
+ set_param_l(out_param, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ if (err) {
+ res_abort_move(dev, slave, RES_MPT, id);
+ return err;
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ break;
+ }
+ return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err) {
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+ }
+
+ set_param_l(out_param, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err) {
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+ }
+
+ set_param_l(out_param, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct mac_res *res;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->mac = mac;
+ res->port = (u8) port;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_MAC]);
+ return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ list_del(&res->list);
+ kfree(res);
+ break;
+ }
+ }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ list_del(&res->list);
+ __mlx4_unregister_mac(dev, res->port, res->mac);
+ kfree(res);
+ }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int port;
+ u64 mac;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ port = get_param_l(out_param);
+ mac = in_param;
+
+ err = __mlx4_register_mac(dev, port, mac);
+ if (err >= 0) {
+ set_param_l(out_param, err);
+ err = 0;
+ }
+
+ if (!err) {
+ err = mac_add_to_slave(dev, slave, mac, port);
+ if (err)
+ __mlx4_unregister_mac(dev, port, mac);
+ }
+ return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_CQ:
+ err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err;
+ int count;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ base = get_param_l(&in_param) & 0x7fffff;
+ count = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err)
+ break;
+ __mlx4_qp_release_range(dev, base, count);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+ NULL, 0);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ if (valid_reserved(dev, slave, qpn))
+ err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ base = get_param_l(&in_param);
+ order = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (!err)
+ __mlx4_free_mtt_range(dev, base, order);
+ return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ break;
+ index = mpt->key;
+ put_res(dev, slave, id, RES_MPT);
+
+ err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+ if (err)
+ break;
+ __mlx4_mr_release(dev, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
+ return err;
+
+ __mlx4_mr_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
+ return err;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ cqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err)
+ break;
+
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ srqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err)
+ break;
+
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int port;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ port = get_param_l(out_param);
+ mac_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_mac(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = -EINVAL;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_CQ:
+ err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ break;
+ }
+ return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+ return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+ return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+ int page_shift = (qpc->log_page_size & 0x3f) + 12;
+ int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+ int log_sq_sride = qpc->sq_size_stride & 7;
+ int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+ int log_rq_stride = qpc->rq_size_stride & 7;
+ int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+ int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+ int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ int sq_size;
+ int rq_size;
+ int total_pages;
+ int total_mem;
+ int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+ sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+ rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+ total_mem = sq_size + rq_size;
+ total_pages =
+ roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+ page_shift);
+
+ return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+ return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+ int size, struct res_mtt *mtt)
+{
+ int res_start = mtt->com.res_id;
+ int res_size = (1 << mtt->order);
+
+ if (start < res_start || start + size > res_start + res_size)
+ return -EPERM;
+ return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_mpt *mpt;
+ int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+ int phys;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+ if (err)
+ return err;
+
+ phys = mr_phys_mpt(inbox->buf);
+ if (!phys) {
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base,
+ mr_get_mtt_size(inbox->buf), mtt);
+ if (err)
+ goto ex_put;
+
+ mpt->mtt = mtt;
+ }
+
+ if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+ err = -EPERM;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ if (!phys) {
+ atomic_inc(&mtt->ref_count);
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_put:
+ if (!phys)
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ return err;
+
+ if (mpt->com.from_state != RES_MPT_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+ put_res(dev, slave, id, RES_MPT);
+ return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_mtt *mtt;
+ struct res_qp *qp;
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+ int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+ int mtt_size = qp_get_mtt_size(qpc);
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ int rcqn = qp_get_rcqn(qpc);
+ int scqn = qp_get_scqn(qpc);
+ u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+ int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+ struct res_srq *srq;
+ int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+ if (err)
+ return err;
+ qp->local_qpn = local_qpn;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+ err = -EPERM;
+ goto ex_put_mtt;
+ }
+
+ err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+ if (err)
+ goto ex_put_mtt;
+
+ if (scqn != rcqn) {
+ err = get_res(dev, slave, scqn, RES_CQ, &scq);
+ if (err)
+ goto ex_put_rcq;
+ } else
+ scq = rcq;
+
+ if (use_srq) {
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ goto ex_put_scq;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_srq;
+ atomic_inc(&mtt->ref_count);
+ qp->mtt = mtt;
+ atomic_inc(&rcq->ref_count);
+ qp->rcq = rcq;
+ atomic_inc(&scq->ref_count);
+ qp->scq = scq;
+
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+
+ if (use_srq) {
+ atomic_inc(&srq->ref_count);
+ put_res(dev, slave, srqn, RES_SRQ);
+ qp->srq = srq;
+ }
+ put_res(dev, slave, rcqn, RES_CQ);
+ put_res(dev, slave, mtt_base, RES_MTT);
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ return 0;
+
+ex_put_srq:
+ if (use_srq)
+ put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+ put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+ put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+ return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+ int log_eq_size = eqc->log_eq_size & 0x1f;
+ int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+ if (log_eq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+ return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+ int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+ int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+ if (log_cq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int eqn = vhcr->in_modifier;
+ int res_id = (slave << 8) | eqn;
+ struct mlx4_eq_context *eqc = inbox->buf;
+ int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+ int mtt_size = eq_get_mtt_size(eqc);
+ struct res_eq *eq;
+ struct res_mtt *mtt;
+
+ err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ if (err)
+ return err;
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+ if (err)
+ goto out_add;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto out_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+
+ atomic_inc(&mtt->ref_count);
+ eq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+ int len, struct res_mtt **res)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mtt *mtt;
+ int err = -EINVAL;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+ com.list) {
+ if (!check_mtt_range(dev, slave, start, len, mtt)) {
+ *res = mtt;
+ mtt->com.from_state = mtt->com.state;
+ mtt->com.state = RES_MTT_BUSY;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_mtt mtt;
+ __be64 *page_list = inbox->buf;
+ u64 *pg_list = (u64 *)page_list;
+ int i;
+ struct res_mtt *rmtt = NULL;
+ int start = be64_to_cpu(page_list[0]);
+ int npages = vhcr->in_modifier;
+ int err;
+
+ err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+ if (err)
+ return err;
+
+ /* Call the SW implementation of write_mtt:
+ * - Prepare a dummy mtt struct
+ * - Translate inbox contents to simple addresses in host endianess */
+ mtt.offset = 0; /* TBD this is broken but I don't handle it since
+ we don't really use it */
+ mtt.order = 0;
+ mtt.page_shift = 0;
+ for (i = 0; i < npages; ++i)
+ pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+ err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+ ((u64 *)page_list + 2));
+
+ if (rmtt)
+ put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+ return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+ if (err)
+ return err;
+
+ err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+ if (err)
+ goto ex_abort;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ atomic_dec(&eq->mtt->ref_count);
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+ return 0;
+
+ex_put:
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+
+ return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_modifier = 0;
+ int err;
+ int res_id;
+ struct res_eq *req;
+
+ if (!priv->mfunc.master.slave_state)
+ return -EINVAL;
+
+ event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+ /* Create the event only if the slave is registered */
+ if ((event_eq->event_type & (1 << eqe->type)) == 0)
+ return 0;
+
+ mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ res_id = (slave << 8) | event_eq->eqn;
+ err = get_res(dev, slave, res_id, RES_EQ, &req);
+ if (err)
+ goto unlock;
+
+ if (req->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto put;
+ }
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto put;
+ }
+
+ if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+ ++event_eq->token;
+ eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+ }
+
+ memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+ err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ put_res(dev, slave, res_id, RES_EQ);
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+
+put:
+ put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = get_res(dev, slave, res_id, RES_EQ, &eq);
+ if (err)
+ return err;
+
+ if (eq->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+ put_res(dev, slave, res_id, RES_EQ);
+ return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+ struct res_cq *cq;
+ struct res_mtt *mtt;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto out_put;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_move;
+ atomic_dec(&cq->mtt->ref_count);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd,
+ struct res_cq *cq)
+{
+ int err;
+ struct res_mtt *orig_mtt;
+ struct res_mtt *mtt;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+ err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+ if (err)
+ return err;
+
+ if (orig_mtt != cq->mtt) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_put;
+
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto ex_put1;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put1;
+ atomic_dec(&orig_mtt->ref_count);
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ return 0;
+
+ex_put1:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+ return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ if (vhcr->op_modifier == 0) {
+ err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+ if (err)
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+ int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+ int log_rq_stride = srqc->logstride & 7;
+ int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+ if (log_srq_size + log_rq_stride + 4 < page_shift)
+ return 1;
+
+ return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_srq *srq;
+ struct mlx4_srq_context *srqc = inbox->buf;
+ int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+ if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+ return -EINVAL;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+ err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+ mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+ err = -EPERM;
+ goto ex_put_mtt;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_mtt;
+
+ atomic_inc(&mtt->ref_count);
+ srq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+ return 0;
+
+ex_put_mtt:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = get_res(dev, slave, qpn, RES_QP, &qp);
+ if (err)
+ return err;
+ if (qp->com.from_state != RES_QP_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+ update_ud_gid(dev, qpc, (u8)slave);
+
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ atomic_dec(&qp->mtt->ref_count);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ res_end_move(dev, slave, RES_QP, qpn);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+ struct res_qp *rqp, u8 *gid)
+{
+ struct res_gid *res;
+
+ list_for_each_entry(res, &rqp->mcg_list, list) {
+ if (!memcmp(res->gid, gid, 16))
+ return res;
+ }
+ return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ struct res_gid *res;
+ int err;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ if (find_gid(dev, slave, rqp, gid)) {
+ kfree(res);
+ err = -EEXIST;
+ } else {
+ memcpy(res->gid, gid, 16);
+ res->prot = prot;
+ list_add_tail(&res->list, &rqp->mcg_list);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ struct res_gid *res;
+ int err;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ res = find_gid(dev, slave, rqp, gid);
+ if (!res || res->prot != prot)
+ err = -EINVAL;
+ else {
+ list_del(&res->list);
+ kfree(res);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+ u8 *gid = inbox->buf;
+ enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+ int err, err1;
+ int qpn;
+ struct res_qp *rqp;
+ int attach = vhcr->op_modifier;
+ int block_loopback = vhcr->in_modifier >> 31;
+ u8 steer_type_mask = 2;
+ enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+ qpn = vhcr->in_modifier & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err)
+ return err;
+
+ qp.qpn = qpn;
+ if (attach) {
+ err = add_mcg_res(dev, slave, rqp, gid, prot);
+ if (err)
+ goto ex_put;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid,
+ block_loopback, prot, type);
+ if (err)
+ goto ex_rem;
+ } else {
+ err = rem_mcg_res(dev, slave, rqp, gid, prot);
+ if (err)
+ goto ex_put;
+ err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+ }
+
+ put_res(dev, slave, qpn, RES_QP);
+ return 0;
+
+ex_rem:
+ /* ignore error return below, already in error */
+ err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+ put_res(dev, slave, qpn, RES_QP);
+
+ return err;
+}
+
+enum {
+ BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier & 0xffff;
+
+ err = get_res(dev, slave, index, RES_COUNTER, NULL);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ put_res(dev, slave, index, RES_COUNTER);
+ return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+ struct res_gid *rgid;
+ struct res_gid *tmp;
+ int err;
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+ qp.qpn = rqp->local_qpn;
+ err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+ MLX4_MC_STEER);
+ list_del(&rgid->list);
+ kfree(rgid);
+ }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int print)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+ struct res_common *r;
+ struct res_common *tmp;
+ int busy;
+
+ busy = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(r, tmp, rlist, list) {
+ if (r->owner == slave) {
+ if (!r->removing) {
+ if (r->state == RES_ANY_BUSY) {
+ if (print)
+ mlx4_dbg(dev,
+ "%s id 0x%x is busy\n",
+ ResourceType(type),
+ r->res_id);
+ ++busy;
+ } else {
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ r->removing = 1;
+ }
+ }
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type)
+{
+ unsigned long begin;
+ int busy;
+
+ begin = jiffies;
+ do {
+ busy = _move_all_busy(dev, slave, type, 0);
+ if (time_after(jiffies, begin + 5 * HZ))
+ break;
+ if (busy)
+ cond_resched();
+ } while (busy);
+
+ if (busy)
+ busy = _move_all_busy(dev, slave, type, 1);
+
+ return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *qp_list =
+ &tracker->slave_list[slave].res_list[RES_QP];
+ struct res_qp *qp;
+ struct res_qp *tmp;
+ int state;
+ u64 in_param;
+ int qpn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_QP);
+ if (err)
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+ "for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (qp->com.owner == slave) {
+ qpn = qp->com.res_id;
+ detach_qp(dev, slave, qp);
+ state = qp->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_QP_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_QP],
+ qp->com.res_id);
+ list_del(&qp->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(qp);
+ state = 0;
+ break;
+ case RES_QP_MAPPED:
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+ state = RES_QP_RESERVED;
+ break;
+ case RES_QP_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param,
+ qp->local_qpn, 2,
+ MLX4_CMD_2RST_QP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_qps: failed"
+ " to move slave %d qpn %d to"
+ " reset\n", slave,
+ qp->local_qpn);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ atomic_dec(&qp->mtt->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ state = RES_QP_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *srq_list =
+ &tracker->slave_list[slave].res_list[RES_SRQ];
+ struct res_srq *srq;
+ struct res_srq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int srqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_SRQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (srq->com.owner == slave) {
+ srqn = srq->com.res_id;
+ state = srq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_SRQ_ALLOCATED:
+ __mlx4_srq_free_icm(dev, srqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_SRQ],
+ srqn);
+ list_del(&srq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(srq);
+ state = 0;
+ break;
+
+ case RES_SRQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, srqn, 1,
+ MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_srqs: failed"
+ " to move slave %d srq %d to"
+ " SW ownership\n",
+ slave, srqn);
+
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ state = RES_SRQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *cq_list =
+ &tracker->slave_list[slave].res_list[RES_CQ];
+ struct res_cq *cq;
+ struct res_cq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int cqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_CQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+ cqn = cq->com.res_id;
+ state = cq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_CQ_ALLOCATED:
+ __mlx4_cq_free_icm(dev, cqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_CQ],
+ cqn);
+ list_del(&cq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(cq);
+ state = 0;
+ break;
+
+ case RES_CQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, cqn, 1,
+ MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_cqs: failed"
+ " to move slave %d cq %d to"
+ " SW ownership\n",
+ slave, cqn);
+ atomic_dec(&cq->mtt->ref_count);
+ state = RES_CQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mpt_list =
+ &tracker->slave_list[slave].res_list[RES_MPT];
+ struct res_mpt *mpt;
+ struct res_mpt *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int mptn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MPT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mpt->com.owner == slave) {
+ mptn = mpt->com.res_id;
+ state = mpt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MPT_RESERVED:
+ __mlx4_mr_release(dev, mpt->key);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MPT],
+ mptn);
+ list_del(&mpt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mpt);
+ state = 0;
+ break;
+
+ case RES_MPT_MAPPED:
+ __mlx4_mr_free_icm(dev, mpt->key);
+ state = RES_MPT_RESERVED;
+ break;
+
+ case RES_MPT_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, mptn, 0,
+ MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_mrs: failed"
+ " to move slave %d mpt %d to"
+ " SW ownership\n",
+ slave, mptn);
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+ state = RES_MPT_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *mtt_list =
+ &tracker->slave_list[slave].res_list[RES_MTT];
+ struct res_mtt *mtt;
+ struct res_mtt *tmp;
+ int state;
+ LIST_HEAD(tlist);
+ int base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MTT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mtt->com.owner == slave) {
+ base = mtt->com.res_id;
+ state = mtt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MTT_ALLOCATED:
+ __mlx4_free_mtt_range(dev, base,
+ mtt->order);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MTT],
+ base);
+ list_del(&mtt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mtt);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *eq_list =
+ &tracker->slave_list[slave].res_list[RES_EQ];
+ struct res_eq *eq;
+ struct res_eq *tmp;
+ int err;
+ int state;
+ LIST_HEAD(tlist);
+ int eqn;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ err = move_all_busy(dev, slave, RES_EQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (eq->com.owner == slave) {
+ eqn = eq->com.res_id;
+ state = eq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_EQ_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_EQ],
+ eqn);
+ list_del(&eq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(eq);
+ state = 0;
+ break;
+
+ case RES_EQ_HW:
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ cond_resched();
+ continue;
+ }
+ err = mlx4_cmd_box(dev, slave, 0,
+ eqn & 0xff, 0,
+ MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_dbg(dev, "rem_slave_eqs: failed"
+ " to move slave %d eqs %d to"
+ " SW ownership\n", slave, eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err) {
+ atomic_dec(&eq->mtt->ref_count);
+ state = RES_EQ_RESERVED;
+ }
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+ /*VLAN*/
+ rem_slave_macs(dev, slave);
+ rem_slave_qps(dev, slave);
+ rem_slave_srqs(dev, slave);
+ rem_slave_cqs(dev, slave);
+ rem_slave_mrs(dev, slave);
+ rem_slave_eqs(dev, slave);
+ rem_slave_mtts(dev, slave);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7411d9..802498293528 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
- MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9cbf3fce0145..2823fffc6383 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -38,26 +40,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_srq_context {
- __be32 state_logsize_srqn;
- u8 logstride;
- u8 reserved1;
- __be16 xrcd;
- __be32 pg_offset_cqn;
- u32 reserved2;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 pd;
- __be16 limit_watermark;
- __be16 wqe_cnt;
- u16 reserved4;
- __be16 wqe_counter;
- u32 reserved5;
- __be64 db_rec_addr;
-};
-
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
- return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+ MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
- struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_srq_context *srq_context;
- u64 mtt_addr;
int err;
- srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
- if (srq->srqn == -1)
+
+ *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *srqn = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+ mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+ mlx4_table_put(dev, &srq_table->table, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, srqn);
+ if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+ return;
+ }
+ __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+ if (err)
+ return err;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+ mlx4_srq_free_icm(dev, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
complete(&srq->free);
wait_for_completion(&srq->free);
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+ mlx4_srq_free_icm(dev, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d10c2e15f4ed..1ea811cf515b 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,6 +42,8 @@ config KS8851
select NET_CORE
select MII
select CRC32
+ select MISC_DEVICES
+ select EEPROM_93CX6
---help---
SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 4a6ae057e3b1..0a85690a1321 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
- &ctl->sg, 1, DMA_TO_DEVICE,
+ &ctl->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
return NETDEV_TX_BUSY;
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
- sg, 1, DMA_FROM_DEVICE,
+ sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
@@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = {
.remove = ks8842_remove,
};
-static int __init ks8842_init(void)
-{
- return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
- platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f56743a28fc0..6b35e7da9a9c 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
#include <linux/spi/spi.h>
@@ -82,6 +83,7 @@ union ks8851_tx_hdr {
* @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@ struct ks8851_net {
struct spi_message spi_msg2;
struct spi_transfer spi_xfer1;
struct spi_transfer spi_xfer2[2];
+
+ struct eeprom_93cx6 eeprom;
};
static int msg_enable;
@@ -343,6 +347,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
}
/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+ pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
* ks8851_write_mac_addr - write mac address to device registers
* @dev: The network device
*
@@ -358,8 +382,15 @@ static int ks8851_write_mac_addr(struct net_device *dev)
mutex_lock(&ks->lock);
+ /*
+ * Wake up chip in case it was powered off when stopped; otherwise,
+ * the first write to the MAC address does not take effect.
+ */
+ ks8851_set_powermode(ks, PMECR_PM_NORMAL);
for (i = 0; i < ETH_ALEN; i++)
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+ if (!netif_running(dev))
+ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
mutex_unlock(&ks->lock);
@@ -367,21 +398,47 @@ static int ks8851_write_mac_addr(struct net_device *dev)
}
/**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int i;
+
+ mutex_lock(&ks->lock);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+ mutex_unlock(&ks->lock);
+}
+
+/**
* ks8851_init_mac - initialise the mac address
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
* to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ /* first, try reading what we've got already */
+ if (ks->rc_ccr & CCR_EEPROM) {
+ ks8851_read_mac_addr(dev);
+ if (is_valid_ether_addr(dev->dev_addr))
+ return;
+
+ netdev_err(ks->netdev, "invalid mac address read %pM\n",
+ dev->dev_addr);
+ }
+
random_ether_addr(dev->dev_addr);
ks8851_write_mac_addr(dev);
}
@@ -739,26 +796,6 @@ static void ks8851_tx_work(struct work_struct *work)
}
/**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
- unsigned pmecr;
-
- netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
- pmecr = ks8851_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_PM_MASK;
- pmecr |= pwrmode;
-
- ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
* ks8851_net_open - open network device
* @dev: The network device being opened.
*
@@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Companion eeprom access */
-
-enum { /* EEPROM programming states */
- EEPROM_CONTROL,
- EEPROM_ADDRESS,
- EEPROM_DATA,
- EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- * - on period start: set clock high and read value on bus
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int ctrl = EEPROM_OP_READ;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int data = 0;
- int dummy;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((ctrl >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- bit_count--;
- break;
- case EEPROM_DATA:
- /* Change to receive mode */
- eepcr &= ~EEPCR_EESRWA;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* waitread period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- mutex_lock(&ks->lock);
- eepcr |= EEPCR_EESCK;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* Manage read */
- switch (state) {
- case EEPROM_ADDRESS:
- if (bit_count < 0) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- }
- break;
- case EEPROM_DATA:
- mutex_lock(&ks->lock);
- dummy = ks8851_rdreg16(ks, KS_EEPCR);
- mutex_unlock(&ks->lock);
- data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- * - on period start: set clock high
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
- unsigned int addr, unsigned int data)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- switch (op) {
- case EEPROM_OP_EWEN:
- addr = 0x30;
- break;
- case EEPROM_OP_EWDS:
- addr = 0;
- break;
- }
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((op >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- if (op == EEPROM_OP_WRITE) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- } else {
- state = EEPROM_COMPLETE;
- }
- }
- break;
- case EEPROM_DATA:
- eepcr |= ((data >> bit_count) & 1) << 2;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- eepcr |= EEPCR_EESCK;
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
-}
-
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
-static int ks8851_get_eeprom_len(struct net_device *dev)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- return ks->eeprom_size;
-}
+/* EEPROM support */
-static int ks8851_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
{
- struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
+ struct ks8851_net *ks = ee->data;
+ unsigned val;
- if (eeprom->len > ks->eeprom_size)
- return -EINVAL;
+ val = ks8851_rdreg16(ks, KS_EEPCR);
- eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+ ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+ ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+ ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+ struct ks8851_net *ks = ee->data;
+ unsigned val = EEPCR_EESA; /* default - eeprom access on */
+
+ if (ee->drive_data)
+ val |= EEPCR_EESRWA;
+ if (ee->reg_data_in)
+ val |= EEPCR_EEDO;
+ if (ee->reg_data_clock)
+ val |= EEPCR_EESCK;
+ if (ee->reg_chip_select)
+ val |= EEPCR_EECS;
+
+ ks8851_wrreg16(ks, KS_EEPCR, val);
+}
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+ if (!(ks->rc_ccr & CCR_EEPROM))
+ return -ENOENT;
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+ mutex_lock(&ks->lock);
- /* Device's eeprom is little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ /* start with clock low, cs high */
+ ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+ return 0;
+}
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+ unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
- return ret_val;
+ ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+ mutex_unlock(&ks->lock);
}
+#define KS_EEPROM_MAGIC (0x00008851)
+
static int ks8851_set_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *ee, u8 *data)
{
struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- void *ptr;
- int max_len;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EOPNOTSUPP;
-
- if (eeprom->len > ks->eeprom_size)
+ int offset = ee->offset;
+ int len = ee->len;
+ u16 tmp;
+
+ /* currently only support byte writing */
+ if (len != 1)
return -EINVAL;
- if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
- return -EFAULT;
+ if (ee->magic != KS_EEPROM_MAGIC)
+ return -EINVAL;
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- max_len = (last_word - first_word + 1) * 2;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
- ptr = (void *)eeprom_buff;
+ eeprom_93cx6_wren(&ks->eeprom, true);
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
- ptr++;
+ /* ethtool currently only supports writing bytes, which means
+ * we have to read/modify/write our 16bit EEPROMs */
+
+ eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+ if (offset & 1) {
+ tmp &= 0xff;
+ tmp |= *data << 8;
+ } else {
+ tmp &= 0xff00;
+ tmp |= *data;
}
- if ((eeprom->offset + eeprom->len) & 1)
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word] =
- ks8851_eeprom_read(dev, last_word);
+ eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+ eeprom_93cx6_wren(&ks->eeprom, false);
- /* Device's eeprom is little-endian, word addressable */
- le16_to_cpus(&eeprom_buff[0]);
- le16_to_cpus(&eeprom_buff[last_word - first_word]);
+ ks8851_eeprom_release(ks);
- memcpy(ptr, bytes, eeprom->len);
+ return 0;
+}
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int offset = ee->offset;
+ int len = ee->len;
- ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+ /* must be 2 byte aligned */
+ if (len & 1 || offset & 1)
+ return -EINVAL;
- for (i = 0; i < last_word - first_word + 1; i++) {
- ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
- eeprom_buff[i]);
- mdelay(EEPROM_WRITE_TIME);
- }
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
+
+ ee->magic = KS_EEPROM_MAGIC;
- ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+ eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+ ks8851_eeprom_release(ks);
- kfree(eeprom_buff);
- return ret_val;
+ return 0;
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ /* currently, we assume it is an 93C46 attached, so return 128 */
+ return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
}
static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi)
spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+ /* setup EEPROM state */
+
+ ks->eeprom.data = ks;
+ ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+ ks->eeprom.register_read = ks8851_eeprom_regread;
+ ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
/* setup mii state */
ks->mii.dev = ndev;
ks->mii.phy_id = 1,
@@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ ndev->dev_addr, ndev->irq,
+ ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 537fb06e5932..b0fae86aacad 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -16,7 +16,7 @@
#define CCR_32PIN (1 << 0)
/* MAC address registers */
-#define KS_MAR(_m) 0x15 - (_m)
+#define KS_MAR(_m) (0x15 - (_m))
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
@@ -27,22 +27,11 @@
#define KS_EEPCR 0x22
#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB_OFFSET 3
-#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB (1 << 3)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
-#define EEPROM_OP_LEN 3 /* bits:*/
-#define EEPROM_OP_READ 0x06
-#define EEPROM_OP_EWEN 0x04
-#define EEPROM_OP_WRITE 0x05
-#define EEPROM_OP_EWDS 0x14
-
-#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
-#define EEPROM_SK_PERIOD 400 /* in us */
-
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index d19c849059d8..e58e78e5c930 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1500,8 +1500,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->all_mcast = 0;
ks->mcast_lst_size = 0;
- ks->frame_head_info = (struct type_frame_head *) \
- kmalloc(MHEADER_SIZE, GFP_KERNEL);
+ ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
pr_err("Error: Fail to allocate frame memory\n");
return false;
@@ -1659,18 +1658,7 @@ static struct platform_driver ks8851_platform_driver = {
.remove = __devexit_p(ks8851_remove),
};
-static int __init ks8851_init(void)
-{
- return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
- platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
MODULE_DESCRIPTION("KS8851 MLL Network driver");
MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7ece990381c8..e52cd310ae76 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -743,11 +743,10 @@
/* Change default LED mode. */
#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
-#define MAC_ADDR_LEN 6
-#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
#define MAX_ETHERNET_BODY_SIZE 1500
-#define ETHERNET_HEADER_SIZE 14
+#define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
#define MAX_ETHERNET_PACKET_SIZE \
(MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
@@ -1043,7 +1042,7 @@ enum {
* @valid: Valid setting indicating the entry is being used.
*/
struct ksz_mac_table {
- u8 mac_addr[MAC_ADDR_LEN];
+ u8 mac_addr[ETH_ALEN];
u16 vid;
u8 fid;
u8 ports;
@@ -1187,8 +1186,8 @@ struct ksz_switch {
u8 diffserv[DIFFSERV_ENTRIES];
u8 p_802_1p[PRIO_802_1P_ENTRIES];
- u8 br_addr[MAC_ADDR_LEN];
- u8 other_addr[MAC_ADDR_LEN];
+ u8 br_addr[ETH_ALEN];
+ u8 other_addr[ETH_ALEN];
u8 broad_per;
u8 member;
@@ -1292,14 +1291,14 @@ struct ksz_hw {
int tx_int_mask;
int tx_size;
- u8 perm_addr[MAC_ADDR_LEN];
- u8 override_addr[MAC_ADDR_LEN];
- u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 override_addr[ETH_ALEN];
+ u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
u8 addr_list_size;
u8 mac_override;
u8 promiscuous;
u8 all_multi;
- u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
u8 multi_bits[HW_MULTICAST_SIZE];
u8 multi_list_size;
@@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
static const u8 mask[] = { 0x3F };
static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+ hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
}
/**
@@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
- hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+ hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
}
/**
@@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
hw->io + KS884X_ADDR_0_OFFSET + i);
@@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
KS884X_ADDR_0_OFFSET + i);
if (!hw->mac_override) {
- memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
if (empty_addr(hw->override_addr)) {
- memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ ETH_ALEN);
hw->override_addr[5] += hw->id;
hw_set_addr(hw);
}
@@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
}
if (j < ADDITIONAL_ENTRIES) {
- memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ memcpy(hw->address[j], mac_addr, ETH_ALEN);
hw_ena_add_addr(hw, j, hw->address[j]);
return 0;
}
@@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
- memset(hw->address[i], 0, MAC_ADDR_LEN);
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
@@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info)
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
- desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
- GFP_KERNEL);
+ desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
if (!desc_info->ring)
return 1;
- memset((void *) desc_info->ring, 0,
- sizeof(struct ksz_desc) * desc_info->alloc);
hw_init_desc(desc_info, transmit);
return 0;
}
@@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
hw_del_addr(hw, dev->dev_addr);
else {
hw->mac_override = 1;
- memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(hw_priv->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ sizeof(info->bus_info));
}
/**
@@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
@@ -6609,7 +6607,7 @@ static int netdev_set_features(struct net_device *dev, u32 features)
return 0;
}
-static struct ethtool_ops netdev_ethtool_ops = {
+static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
@@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
int num;
i = j = num = got_num = 0;
- while (j < MAC_ADDR_LEN) {
+ while (j < ETH_ALEN) {
if (macaddr[i]) {
int digit;
@@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
}
i++;
}
- if (MAC_ADDR_LEN == j) {
+ if (ETH_ALEN == j) {
if (MAIN_PORT == port)
hw_priv->hw.mac_override = 1;
}
@@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
- memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
read_other_addr(hw);
if (mac1addr[0] != ':')
get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- MAC_ADDR_LEN);
+ ETH_ALEN);
else {
- memcpy(dev->dev_addr, sw->other_addr,
- MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
if (!memcmp(sw->other_addr, hw->override_addr,
- MAC_ADDR_LEN))
+ ETH_ALEN))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 0778edcf7b9a..20b72ecb020a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
* access to avoid theoretical race condition with functions that
* change NETIF_F_LRO flag at runtime.
*/
- bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+ bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index 11be150e4d67..b7fc26c4f738 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_DCA_OFFSET = 56,
/* offset of dca control for WDMAs */
- /* VMWare NetQueue commands */
+ /* VMware NetQueue commands */
MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
MXGEFW_CMD_NETQ_ADD_FILTER = 58,
/* data0 = filter_id << 16 | queue << 8 | type */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index fc7c6a932ad9..5b89fd377ae3 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = {
},
};
-static int __init jazz_sonic_init_module(void)
-{
- return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a2eacbfb4252..f1b85561c650 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -142,8 +142,7 @@ static int macsonic_open(struct net_device* dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
@@ -154,8 +153,8 @@ static int macsonic_open(struct net_device* dev)
* rupt as well, which must prevent re-entrance of the sonic handler.
*/
if (dev->irq == IRQ_AUTO_3) {
- retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt,
- IRQ_FLG_FAST, "sonic", dev);
+ retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+ "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, IRQ_NUBUS_9);
@@ -643,15 +642,4 @@ static struct platform_driver mac_sonic_driver = {
},
};
-static int __init mac_sonic_init_module(void)
-{
- return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 6ca047aab793..ac7b16b6e7af 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
- strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2b8f64ddfb55..c24b46cbfe27 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strcpy(info->driver, "ns83820");
- strcpy(info->version, VERSION);
- strcpy(info->bus_info, pci_name(dev->pci_dev));
+ strlcpy(info->driver, "ns83820", sizeof(info->driver));
+ strlcpy(info->version, VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ccf61b9da8d1..e01c0a07a93a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = {
},
};
-static int __init xtsonic_init(void)
-{
- return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
- platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c27fb3dda9f4..97f63e12d86e 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strncpy(info->version, s2io_driver_version, sizeof(info->version));
- strncpy(info->fw_version, "", sizeof(info->fw_version));
- strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
}
@@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
}
}
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
{
struct s2io_nic *sp = netdev_priv(dev);
- u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+ netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
if (changed && netif_running(dev)) {
int rc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a83197d757c1..ef76725454d2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data)
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
/* Enabling RTH requires some of the logic in vxge_device_register and a
* vpath reset. Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
{
struct vxgedev *vdev = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXHASH))
return 0;
@@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
*
* Add the vlan id to the devices vlan id table
*/
-static void
+static int
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
vxge_hw_vpath_vid_add(vpath->handle, vid);
}
set_bit(vid, vdev->active_vlans);
+ return 0;
}
/**
@@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
*
* Remove the vlan id from the device's vlan id table
*/
-static void
+static int
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
clear_bit(vid, vdev->active_vlans);
+ return 0;
}
static const struct net_device_ops vxge_netdev_ops = {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index f1bfb8f8fcf0..b75a0497d58d 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = {
},
};
-static int __init w90p910_ether_init(void)
-{
- return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
- platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 MAC driver!");
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1c61d36e6570..4c4e7f458383 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -65,7 +65,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/prefetch.h>
-#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -736,6 +737,16 @@ struct nv_skb_map {
* - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs netdev_priv(dev)->lock :-(
* - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ * integer wraparound in the NIC stats registers, at low frequency
+ * (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
*/
/* in dev: base, irq */
@@ -745,9 +756,10 @@ struct fe_priv {
struct net_device *dev;
struct napi_struct napi;
- /* General data:
- * Locking: spin_lock(&np->lock); */
+ /* hardware stats are updated in syscall and timer */
+ spinlock_t hwstats_lock;
struct nv_ethtool_stats estats;
+
int in_shutdown;
u32 linkspeed;
int duplex;
@@ -798,6 +810,13 @@ struct fe_priv {
u32 nic_poll_irq;
int rx_ring_size;
+ /* RX software stats */
+ struct u64_stats_sync swstats_rx_syncp;
+ u64 stat_rx_packets;
+ u64 stat_rx_bytes; /* not always available in HW */
+ u64 stat_rx_missed_errors;
+ u64 stat_rx_dropped;
+
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
@@ -820,6 +839,12 @@ struct fe_priv {
struct nv_skb_map *tx_end_flip;
int tx_stop;
+ /* TX software stats */
+ struct u64_stats_sync swstats_tx_syncp;
+ u64 stat_tx_packets; /* not always available in HW */
+ u64 stat_tx_bytes;
+ u64 stat_tx_dropped;
+
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -892,6 +917,11 @@ enum {
static int dma_64bit = NV_DMA_64BIT_ENABLED;
/*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
+/*
* Crossover Detection
* Realtek 8201 phy + some OEM boards do not work properly.
*/
@@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev)
pci_push(base);
}
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ /* If it happens that this is run in top-half context, then
+ * replace the spin_lock of hwstats_lock with
+ * spin_lock_irqsave() in calling functions. */
+ WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+ assert_spin_locked(&np->hwstats_lock);
+
+ /* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,40 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev)
}
/*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
* Called with read_lock(&dev_base_lock) held for read -
* only synchronized against unregister_netdevice.
*/
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
+ unsigned int syncp_start;
+
+ /*
+ * Note: because HW stats are not always available and for
+ * consistency reasons, the following ifconfig stats are
+ * managed by software: rx_bytes, tx_bytes, rx_packets and
+ * tx_packets. The related hardware stats reported by ethtool
+ * should be equivalent to these ifconfig stats, with 4
+ * additional bytes per packet (Ethernet FCS CRC), except for
+ * tx_packets when TSO kicks in.
+ */
+
+ /* software stats */
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+ storage->rx_packets = np->stat_rx_packets;
+ storage->rx_bytes = np->stat_rx_bytes;
+ storage->rx_dropped = np->stat_rx_dropped;
+ storage->rx_missed_errors = np->stat_rx_missed_errors;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+ storage->tx_packets = np->stat_tx_packets;
+ storage->tx_bytes = np->stat_tx_bytes;
+ storage->tx_dropped = np->stat_tx_dropped;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
- if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
- nv_get_hw_stats(dev);
+ if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+ spin_lock_bh(&np->hwstats_lock);
- /*
- * Note: because HW stats are not always available and
- * for consistency reasons, the following ifconfig
- * stats are managed by software: rx_bytes, tx_bytes,
- * rx_packets and tx_packets. The related hardware
- * stats reported by ethtool should be equivalent to
- * these ifconfig stats, with 4 additional bytes per
- * packet (Ethernet FCS CRC).
- */
+ nv_update_stats(dev);
+
+ /* generic stats */
+ storage->rx_errors = np->estats.rx_errors_total;
+ storage->tx_errors = np->estats.tx_errors_total;
- /* copy to net_device stats */
- dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
- dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
- dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
- dev->stats.rx_over_errors = np->estats.rx_over_errors;
- dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
- dev->stats.rx_errors = np->estats.rx_errors_total;
- dev->stats.tx_errors = np->estats.tx_errors_total;
+ /* meaningful only when NIC supports stats v3 */
+ storage->multicast = np->estats.rx_multicast;
+
+ /* detailed rx_errors */
+ storage->rx_length_errors = np->estats.rx_length_error;
+ storage->rx_over_errors = np->estats.rx_over_errors;
+ storage->rx_crc_errors = np->estats.rx_crc_errors;
+ storage->rx_frame_errors = np->estats.rx_frame_align_error;
+ storage->rx_fifo_errors = np->estats.rx_drop_frame;
+
+ /* detailed tx_errors */
+ storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+ storage->tx_fifo_errors = np->estats.tx_fifo_errors;
+
+ spin_unlock_bh(&np->hwstats_lock);
}
- return &dev->stats;
+ return storage;
}
/*
@@ -1759,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev)
np->put_rx.orig = np->first_rx.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1791,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
np->put_rx.ex = np->first_rx.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1849,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev)
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+ netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
@@ -1927,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev)
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
- if (nv_release_txskb(np, &np->tx_skb[i]))
- dev->stats.tx_dropped++;
+ if (nv_release_txskb(np, &np->tx_skb[i])) {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ }
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].dma_single = 0;
@@ -2194,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.orig = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2338,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.ex = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2375,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc *orig_get_tx = np->get_tx.orig;
+ unsigned int bytes_compl = 0;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2385,12 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
if (flags & NV_TX_ERROR) {
- if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+ if ((flags & NV_TX_RETRYERROR)
+ && !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2398,12 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
} else {
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2414,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2427,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+ unsigned long bytes_cleaned = 0;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2436,17 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
nv_gear_backoff_reseed(dev);
else
nv_legacybackoff_reseed(dev);
}
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_cleaned += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2454,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (np->tx_limit)
nv_tx_flip_ownership(dev);
}
+
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2477,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev)
u32 status;
union ring_type put_tx;
int saved_tx_limit;
- int i;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+ netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
- netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
- netdev_info(dev, "Dumping tx registers\n");
- for (i = 0; i <= np->register_size; i += 32) {
- netdev_info(dev,
- "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i,
- readl(base + i + 0), readl(base + i + 4),
- readl(base + i + 8), readl(base + i + 12),
- readl(base + i + 16), readl(base + i + 20),
- readl(base + i + 24), readl(base + i + 28));
- }
- netdev_info(dev, "Dumping tx ring\n");
- for (i = 0; i < np->tx_ring_size; i += 4) {
- if (!nv_optimized(np)) {
- netdev_info(dev,
- "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.orig[i].buf),
- le32_to_cpu(np->tx_ring.orig[i].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+1].buf),
- le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+2].buf),
- le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+3].buf),
- le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
- } else {
+ if (unlikely(debug_tx_timeout)) {
+ int i;
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
netdev_info(dev,
- "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ "%3x: %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.ex[i].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i].buflow),
- le32_to_cpu(np->tx_ring.ex[i].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+1].buflow),
- le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+2].buflow),
- le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+3].buflow),
- le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x "
+ "// %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ }
}
}
@@ -2649,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
- if (flags & NV_RX_MISSEDFRAME)
- dev->stats.rx_missed_errors++;
+ if (flags & NV_RX_MISSEDFRAME) {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_missed_errors++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
+ }
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2693,8 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
np->get_rx.orig = np->first_rx.orig;
@@ -2777,8 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
__vlan_hwaccel_put_tag(skb, vid);
}
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
}
@@ -3021,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
}
}
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 phyreg, txreg;
+ int mii_status;
+
+ np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+ np->duplex = duplex;
+
+ /* see if gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ phyreg = readl(base + NvRegSlotTime);
+ phyreg &= ~(0x3FF00);
+ if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+ phyreg |= NVREG_SLOTTIME_1000_FULL;
+ writel(phyreg, base + NvRegSlotTime);
+ }
+
+ phyreg = readl(base + NvRegPhyInterface);
+ phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+ if (np->duplex == 0)
+ phyreg |= PHY_HALF;
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+ phyreg |= PHY_100;
+ else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ phyreg |= PHY_1000;
+ writel(phyreg, base + NvRegPhyInterface);
+
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+ base + NvRegMisc1);
+ pci_push(base);
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+
+ return;
+}
+
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
@@ -3042,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev)
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
+ u32 bmcr;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
u32 txrxFlags = 0;
u32 phy_exp;
+ /* If device loopback is enabled, set carrier on and enable max link
+ * speed.
+ */
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (bmcr & BMCR_LOOPBACK) {
+ if (netif_running(dev)) {
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ }
+ return 1;
+ }
+
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
*/
@@ -3729,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
+ netdev_info(dev, "MSI-X enabled\n");
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3750,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ netdev_info(dev, "MSI enabled\n");
}
}
if (ret != 0) {
@@ -3904,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev)
#endif
static void nv_do_stats_poll(unsigned long data)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
- nv_get_hw_stats(dev);
+ /* If lock is currently taken, the stats are being refreshed
+ * and hence fresh enough */
+ if (spin_trylock(&np->hwstats_lock)) {
+ nv_update_stats(dev);
+ spin_unlock(&np->hwstats_lock);
+ }
if (!np->in_shutdown)
mod_timer(&np->stats_poll,
@@ -3918,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, FORCEDETH_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4473,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
return 0;
}
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ unsigned long flags;
+ u32 miicontrol;
+ int err, retval = 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (features & NETIF_F_LOOPBACK) {
+ if (miicontrol & BMCR_LOOPBACK) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already enabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn on loopback mode */
+ miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+ if (err) {
+ retval = PHY_ERROR;
+ spin_unlock_irqrestore(&np->lock, flags);
+ phy_init(dev);
+ } else {
+ if (netif_running(dev)) {
+ /* Force 1000 Mbps full-duplex */
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+ 1);
+ /* Force link up */
+ netif_carrier_on(dev);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev,
+ "Internal PHY loopback mode enabled.\n");
+ }
+ } else {
+ if (!(miicontrol & BMCR_LOOPBACK)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already disabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn off loopback */
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+ phy_init(dev);
+ }
+ msleep(500);
+ spin_lock_irqsave(&np->lock, flags);
+ nv_enable_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/* vlan is dependent on rx checksum offload */
if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4482,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
return features;
}
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -4503,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features)
spin_unlock_irq(&np->lock);
}
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
+ int retval;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+ retval = nv_set_loopback(dev, features);
+ if (retval != 0)
+ return retval;
+ }
if (changed & NETIF_F_RXCSUM) {
spin_lock_irq(&np->lock);
@@ -4553,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
}
}
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *buffer)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
- /* update stats */
- nv_get_hw_stats(dev);
-
- memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_lock_bh(&np->hwstats_lock);
+ nv_update_stats(dev);
+ memcpy(buffer, &np->estats,
+ nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_unlock_bh(&np->hwstats_lock);
}
static int nv_link_test(struct net_device *dev)
@@ -5142,6 +5424,12 @@ static int nv_open(struct net_device *dev)
spin_unlock_irq(&np->lock);
+ /* If the loopback feature was set while the device was down, make sure
+ * that it's set correctly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ nv_set_loopback(dev, dev->features);
+
return 0;
out_drain:
nv_drain_rxtx(dev);
@@ -5198,7 +5486,7 @@ static int nv_close(struct net_device *dev)
static const struct net_device_ops nv_netdev_ops = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5215,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = {
static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit_optimized,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5254,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->dev = dev;
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
+ spin_lock_init(&np->hwstats_lock);
SET_NETDEV_DEV(dev, &pci_dev->dev);
init_timer(&np->oom_kick);
@@ -5262,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = nv_do_nic_poll; /* timer handler */
- init_timer(&np->stats_poll);
+ init_timer_deferrable(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
np->stats_poll.function = nv_do_stats_poll; /* timer handler */
@@ -5346,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->features |= dev->hw_features;
+ /* Add loopback capability to the device. */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5621,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
- dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
"vlan " : "",
+ dev->features & (NETIF_F_LOOPBACK) ?
+ "loopback " : "",
id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -6000,6 +6294,9 @@ module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_power_down, int, 0);
MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+ "Dump tx related registers and ring when tx_timeout happens");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 8c8027176bef..ac4e72d529e5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, pch_driver_version);
- strcpy(drvinfo->fw_version, "N/A");
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 48406ca382f1..964e9c0948bc 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2109,10 +2109,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* Returns
* 0: HW state updated successfully
*/
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434bafc..90497ffb1ac3 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
# Makefile for the A Semi network device drivers.
#
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index e09ea83b8c47..8a371985319f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
- strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+ strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8cf3173ba488..7dd9a4b107e6 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev)
adapter->set_multi(dev);
}
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM)) {
netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int hw_lro;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index a4bdff438a5e..7931531c3a40 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
- strncpy(drvinfo->version, ql3xxx_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ql3xxx_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7ed53dbb8646..60976fc4ccc6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 8aa1c6e8667b..cc228cf3d84b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
-
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
}
static int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index bcb81e47543a..b528e52a8ee1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
}
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
}
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
if (!(changed & NETIF_F_LRO))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bd163828e33..69b8e4ef14d9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
/* PCI Device ID Table */
#define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
adapter->pvid = 0;
}
-static void
+static int
qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
set_bit(vid, adapter->vlans);
+ return 0;
}
-static void
+static int
qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
clear_bit(vid, adapter->vlans);
+ return 0;
}
static void
@@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- unsigned long features, vlan_features;
+ netdev_features_t features, vlan_features;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9efc..b8478aab050e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
#define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 1];
+ struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
};
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 9b67bfea035f..8e2c2a74f3a5 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, qlge_driver_name, 32);
- strncpy(drvinfo->version, qlge_driver_version, 32);
- snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+ strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, qlge_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "v%d.%d.%d",
(qdev->fw_rev_id & 0x00ff0000) >> 16,
(qdev->fw_rev_id & 0x0000ff00) >> 8,
(qdev->fw_rev_id & 0x000000ff));
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index c92afcd912e2..b54898737284 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features)
}
}
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features)
return features;
}
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+ netdev_features_t features)
{
- u32 changed = ndev->features ^ features;
+ netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features)
return 0;
}
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_add_vid(qdev, vid);
+ err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_kill_vid(qdev, vid);
+ err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4bf68cfef390..cb0eca807852 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -52,12 +52,6 @@
#define DRV_VERSION "0.28"
#define DRV_RELDATE "07Oct2011"
-/* PHY CHIP Address */
-#define PHY1_ADDR 1 /* For MAC1 */
-#define PHY2_ADDR 3 /* For MAC2 */
-#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */
-#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */
-
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -69,8 +63,11 @@
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
+#define MCR0_RCVEN 0x0002 /* Receive enable */
#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
+#define MCR0_XMTEN 0x1000 /* Transmission enable */
+#define MCR0_FD 0x8000 /* Full/Half duplex */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
@@ -129,6 +126,7 @@
#define PHY_CC 0x88 /* PHY status change configuration register */
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
+#define MAC_SM_RST 0x0002 /* MAC status machine reset */
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@@ -154,9 +152,6 @@
#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
-/* PHY settings */
-#define ICPLUS_PHY_ID 0x0243
-
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
"Florian Fainelli <florian@openwrt.org>");
@@ -178,7 +173,7 @@ struct r6040_descriptor {
struct r6040_descriptor *vndescp; /* 14-17 */
struct sk_buff *skb_ptr; /* 18-1B */
u32 rev2; /* 1C-1F */
-} __attribute__((aligned(32)));
+} __aligned(32);
struct r6040_private {
spinlock_t lock; /* driver lock */
@@ -191,7 +186,7 @@ struct r6040_private {
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
- u16 tx_free_desc, phy_addr;
+ u16 tx_free_desc;
u16 mcr0, mcr1;
struct net_device *dev;
struct mii_bus *mii_bus;
@@ -206,8 +201,6 @@ static char version[] __devinitdata = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
-static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
-
/* Read a word data from PHY Chip */
static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
@@ -379,11 +372,11 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
- if (cmd & 0x1)
+ if (cmd & MAC_RST)
break;
}
/* Reset internal state machine */
- iowrite16(2, ioaddr + MAC_SM);
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
@@ -409,7 +402,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(INT_MASK, ioaddr + MIER);
/* Enable TX and RX */
- iowrite16(lp->mcr0 | 0x0002, ioaddr);
+ iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
/* Let TX poll the descriptors
* we may got called by r6040_tx_timeout which has left
@@ -461,7 +454,7 @@ static void r6040_down(struct net_device *dev)
iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
- if (cmd & 0x1)
+ if (cmd & MAC_RST)
break;
}
@@ -742,9 +735,10 @@ static void r6040_mac_address(struct net_device *dev)
void __iomem *ioaddr = lp->base;
u16 *adrp;
- /* MAC operation register */
- iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
- iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
+ /* Reset MAC */
+ iowrite16(MAC_RST, ioaddr + MCR1);
+ /* Reset internal state machine */
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
@@ -1013,7 +1007,7 @@ static void r6040_adjust_link(struct net_device *dev)
/* reflect duplex change */
if (phydev->link && (lp->old_duplex != phydev->duplex)) {
- lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+ lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
iowrite16(lp->mcr0, ioaddr);
status_changed = 1;
@@ -1166,8 +1160,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->dev = dev;
/* Init RDC private data */
- lp->mcr0 = 0x1002;
- lp->phy_addr = phy_table[card_idx];
+ lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
/* The RDC-specific entries in the device structure. */
dev->netdev_ops = &r6040_netdev_ops;
@@ -1188,7 +1181,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->mii_bus->write = r6040_mdiobus_write;
lp->mii_bus->reset = r6040_mdiobus_reset;
lp->mii_bus->name = "r6040_eth_mii";
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ dev_name(&pdev->dev), card_idx);
lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!lp->mii_bus->irq) {
dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index ee5da9293ce0..abc79076f867 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,6 +563,7 @@ rx_next:
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
+ napi_gro_flush(napi);
spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
@@ -859,7 +860,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
- u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
@@ -886,11 +886,9 @@ static void __cp_set_rx_mode (struct net_device *dev)
}
/* We can safely update without stopping the chip. */
- tmp = cp_rx_config | rx_mode;
- if (cp->rx_config != tmp) {
- cpw32_f (RxConfig, tmp);
- cp->rx_config = tmp;
- }
+ cp->rx_config = cp_rx_config | rx_mode;
+ cpw32_f(RxConfig, cp->rx_config);
+
cpw32_f (MAR0 + 0, mc_filter[0]);
cpw32_f (MAR0 + 4, mc_filter[1]);
}
@@ -1319,9 +1317,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(cp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1390,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
cp->msg_enable = value;
}
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -1589,7 +1587,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() readl(ee_addr)
+#define eeprom_delay() readb(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_EXTEND_CMD (4)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 4d6b254fc6c1..a8779bedb3d9 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() (void)RTL_R32(Cfg9346)
+#define eeprom_delay() (void)RTL_R8(Cfg9346)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
@@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
info->regdump_len = tp->regs_len;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f06aa10f0d7..7a0c800b50ad 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -69,9 +69,6 @@
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
-/* MAC address length */
-#define MAC_ADDR_LEN 6
-
#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
@@ -477,7 +474,6 @@ enum rtl_register_content {
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
@@ -485,6 +481,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -1183,11 +1180,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- RTL_W16(IntrMask, 0x0000);
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrStatus, 0xffff);
+ RTL_W16(IntrMask, 0x0000);
+ RTL_W16(IntrStatus, tp->intr_event);
+ RTL_R8(ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -1404,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strcpy(info->driver, MODULENAME);
- strcpy(info->version, RTL8169_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
- strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
- rtl_fw->version);
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ strlcpy(info->fw_version, rtl_fw->version,
+ sizeof(info->fw_version));
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1553,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1567,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -3424,22 +3426,24 @@ static const struct rtl_cfg_info {
};
/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
- RTL_W8(Config2, cfg2);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
return msi;
}
@@ -3933,8 +3937,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
break;
udelay(100);
}
-
- rtl8169_init_ring_indexes(tp);
}
static int __devinit
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if (rtl_tbi_enabled(tp)) {
@@ -4099,7 +4101,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&tp->lock);
/* Get MAC address */
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -4339,7 +4341,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp);
@@ -4885,8 +4887,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -5076,6 +5077,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+ tp->intr_event &= ~RxFIFOOver;
+ tp->napi_event &= ~RxFIFOOver;
+ }
+
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5348,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
/* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi);
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5395,16 @@ static void rtl8169_reset_task(struct work_struct *work)
if (!netif_running(dev))
goto out_unlock;
+ rtl8169_hw_reset(tp);
+
rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5415,6 @@ out_unlock:
static void rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_hw_reset(tp);
-
- /* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -5804,6 +5807,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
*/
status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
+ status &= tp->intr_event;
+ if (!status)
+ break;
+
handled = 1;
/* Handle all of the error cases first. These will reset
@@ -5818,27 +5825,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
- /* Testers needed. */
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- /* Experimental science. Pktgen proof. */
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_25:
- if (status == RxFIFOOver)
- goto done;
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 9b230740c6ab..813d41c4a845 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1369,13 +1369,13 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
-static struct ethtool_ops sh_eth_ethtool_ops = {
+static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
- .nway_reset = sh_eth_nway_reset,
+ .nway_reset = sh_eth_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
- .get_link = ethtool_op_get_link,
+ .get_link = ethtool_op_get_link,
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
@@ -1702,7 +1702,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
- snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
+ snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
@@ -1957,18 +1958,7 @@ static struct platform_driver sh_eth_driver = {
},
};
-static int __init sh_eth_init(void)
-{
- return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
- platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a7ff8ea342b4..22e9c0181ce8 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -1004,7 +1004,7 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
mb->write = s6mii_write;
mb->reset = s6mii_reset;
mb->priv = pd;
- snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
mb->phy_mask = ~(1 << 0);
mb->irq = &pd->mii.irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++) {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c3673f151a41..f955a19eb22f 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = {
}
};
-static int __init sgiseeq_module_init(void)
-{
- if (platform_driver_register(&sgiseeq_driver)) {
- printk(KERN_ERR "Driver registration failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
- platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
MODULE_DESCRIPTION("SGI Seeq 8003 driver");
MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d5731f1fe6d6..e43702f33b62 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] = i % efx->n_rx_channels;
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->n_rx_channels);
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1900,7 +1901,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
/* Otherwise efx_start_port() will do this */
}
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4764793ed234..a3541ac6ea01 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,10 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID 0x0803
-#define SIENA_A_P_DEVID 0x0813
-
/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
@@ -65,13 +61,23 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
-extern int efx_filter_insert_filter(struct efx_nic *efx,
+extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
-extern int efx_filter_remove_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec);
+extern int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
extern void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
+extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f3cd96dfa398..29b2ebfef19f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -818,9 +818,58 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct efx_filter_spec spec;
+ u16 vid;
+ u8 proto;
+ int rc;
+
+ rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+ rule->location, &spec);
+ if (rc)
+ return rc;
+
+ if (spec.dmaq_id == 0xfff)
+ rule->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ rule->ring_cookie = spec.dmaq_id;
+
+ rc = efx_filter_get_eth_local(&spec, &vid,
+ rule->h_u.ether_spec.h_dest);
+ if (rc == 0) {
+ rule->flow_type = ETHER_FLOW;
+ memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN);
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = htons(vid);
+ rule->m_ext.vlan_tci = htons(0xfff);
+ }
+ return 0;
+ }
+
+ rc = efx_filter_get_ipv4_local(&spec, &proto,
+ &ip_entry->ip4dst, &ip_entry->pdst);
+ if (rc != 0) {
+ rc = efx_filter_get_ipv4_full(
+ &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
+ &ip_entry->ip4dst, &ip_entry->pdst);
+ EFX_WARN_ON_PARANOID(rc);
+ ip_mask->ip4src = ~0;
+ ip_mask->psrc = ~0;
+ }
+ rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
+ ip_mask->ip4dst = ~0;
+ ip_mask->pdst = ~0;
+ return rc;
+}
+
static int
efx_ethtool_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *info, u32 *rules __always_unused)
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -862,42 +911,80 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
}
+ case ETHTOOL_GRXCLSRLCNT:
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ info->data |= RX_CLS_LOC_SPECIAL;
+ info->rule_cnt =
+ efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+ return 0;
+
+ case ETHTOOL_GRXCLSRULE:
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+ return efx_ethtool_get_class_rule(efx, &info->fs);
+
+ case ETHTOOL_GRXCLSRLALL: {
+ s32 rc;
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+ rule_locs, info->rule_cnt);
+ if (rc < 0)
+ return rc;
+ info->rule_cnt = rc;
+ return 0;
+ }
+
default:
return -EOPNOTSUPP;
}
}
-static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
- struct ethtool_rx_ntuple *ntuple)
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
- struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
- struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
- struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
- struct efx_filter_spec filter;
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct efx_filter_spec spec;
int rc;
- /* Range-check action */
- if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
- ntuple->fs.action >= (s32)efx->n_rx_channels)
+ /* Check that user wants us to choose the location */
+ if (rule->location != RX_CLS_LOC_ANY &&
+ rule->location != RX_CLS_LOC_FIRST &&
+ rule->location != RX_CLS_LOC_LAST)
+ return -EINVAL;
+
+ /* Range-check ring_cookie */
+ if (rule->ring_cookie >= efx->n_rx_channels &&
+ rule->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (~ntuple->fs.data_mask)
+ /* Check for unsupported extensions */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
+ rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
- (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
- 0xfff : ntuple->fs.action);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ (rule->location == RX_CLS_LOC_FIRST) ?
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+ (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+ 0xfff : rule->ring_cookie);
- switch (ntuple->fs.flow_type) {
+ switch (rule->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
- u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ u8 proto = (rule->flow_type == TCP_V4_FLOW ?
IPPROTO_TCP : IPPROTO_UDP);
/* Must match all of destination, */
- if (ip_mask->ip4dst | ip_mask->pdst)
+ if ((__force u32)~ip_mask->ip4dst |
+ (__force u16)~ip_mask->pdst)
return -EINVAL;
/* all or none of source, */
if ((ip_mask->ip4src | ip_mask->psrc) &&
@@ -905,17 +992,17 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
(__force u16)~ip_mask->psrc))
return -EINVAL;
/* and nothing else */
- if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+ if (ip_mask->tos | rule->m_ext.vlan_tci)
return -EINVAL;
- if (!ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&filter, proto,
+ if (ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst,
ip_entry->ip4src,
ip_entry->psrc);
else
- rc = efx_filter_set_ipv4_local(&filter, proto,
+ rc = efx_filter_set_ipv4_local(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst);
if (rc)
@@ -923,23 +1010,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
break;
}
- case ETHER_FLOW:
- /* Must match all of destination, */
- if (!is_zero_ether_addr(mac_mask->h_dest))
+ case ETHER_FLOW | FLOW_EXT:
+ /* Must match all or none of VID */
+ if (rule->m_ext.vlan_tci != htons(0xfff) &&
+ rule->m_ext.vlan_tci != 0)
return -EINVAL;
- /* all or none of VID, */
- if (ntuple->fs.vlan_tag_mask != 0xf000 &&
- ntuple->fs.vlan_tag_mask != 0xffff)
+ case ETHER_FLOW:
+ /* Must match all of destination */
+ if (!is_broadcast_ether_addr(mac_mask->h_dest))
return -EINVAL;
/* and nothing else */
- if (!is_broadcast_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto != htons(0xffff))
+ if (!is_zero_ether_addr(mac_mask->h_source) ||
+ mac_mask->h_proto)
return -EINVAL;
rc = efx_filter_set_eth_local(
- &filter,
- (ntuple->fs.vlan_tag_mask == 0xf000) ?
- ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ &spec,
+ (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ?
+ ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
mac_entry->h_dest);
if (rc)
return rc;
@@ -949,47 +1037,57 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
return -EINVAL;
}
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
- return efx_filter_remove_filter(efx, &filter);
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
- rc = efx_filter_insert_filter(efx, &filter, true);
- return rc < 0 ? rc : 0;
+ rule->location = rc;
+ return 0;
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
- struct ethtool_rxfh_indir *indir)
+static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t copy_size =
- min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
- indir->size = ARRAY_SIZE(efx->rx_indir_table);
- memcpy(indir->ring_index, efx->rx_indir_table,
- copy_size * sizeof(indir->ring_index[0]));
- return 0;
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return efx_ethtool_set_class_rule(efx, &info->fs);
+
+ case ETHTOOL_SRXCLSRLDEL:
+ return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+ info->fs.location);
+
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const struct ethtool_rxfh_indir *indir)
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t i;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
+ return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+ 0 : ARRAY_SIZE(efx->rx_indir_table));
+}
- /* Validate size and indices */
- if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- if (indir->ring_index[i] >= efx->n_rx_channels)
- return -EINVAL;
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
- memcpy(efx->rx_indir_table, indir->ring_index,
- sizeof(efx->rx_indir_table));
+ memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx);
return 0;
}
@@ -1019,7 +1117,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
- .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
+ .set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 97b606b92e88..8ae1ebd35397 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
if (!nic_data->stats_pending)
return;
- nic_data->stats_pending = 0;
+ nic_data->stats_pending = false;
if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
rmb(); /* read the done flag before the stats */
efx->mac_op->update_stats(efx);
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2b9636f96e05..1fbbbee7b1ae 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -155,6 +155,16 @@ static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
spec->data[2] = ntohl(host2);
}
+static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
+ __be32 *host1, __be16 *port1,
+ __be32 *host2, __be16 *port2)
+{
+ *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ *port1 = htons(spec->data[0]);
+ *host2 = htonl(spec->data[2]);
+ *port2 = htons(spec->data[1] >> 16);
+}
+
/**
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
@@ -205,6 +215,26 @@ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
return 0;
}
+int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port)
+{
+ __be32 host1;
+ __be16 port1;
+
+ switch (spec->type) {
+ case EFX_FILTER_TCP_WILD:
+ *proto = IPPROTO_TCP;
+ __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
+ return 0;
+ case EFX_FILTER_UDP_WILD:
+ *proto = IPPROTO_UDP;
+ __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
@@ -242,6 +272,25 @@ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
return 0;
}
+int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port,
+ __be32 *rhost, __be16 *rport)
+{
+ switch (spec->type) {
+ case EFX_FILTER_TCP_FULL:
+ *proto = IPPROTO_TCP;
+ break;
+ case EFX_FILTER_UDP_FULL:
+ *proto = IPPROTO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __efx_filter_get_ipv4(spec, rhost, rport, host, port);
+ return 0;
+}
+
/**
* efx_filter_set_eth_local - specify local Ethernet address and optional VID
* @spec: Specification to initialise
@@ -270,6 +319,29 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
return 0;
}
+int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+ u16 *vid, u8 *addr)
+{
+ switch (spec->type) {
+ case EFX_FILTER_MAC_WILD:
+ *vid = EFX_FILTER_VID_UNSPEC;
+ break;
+ case EFX_FILTER_MAC_FULL:
+ *vid = spec->data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ addr[0] = spec->data[2] >> 8;
+ addr[1] = spec->data[2];
+ addr[2] = spec->data[1] >> 24;
+ addr[3] = spec->data[1] >> 16;
+ addr[4] = spec->data[1] >> 8;
+ addr[5] = spec->data[1];
+ return 0;
+}
+
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
@@ -332,7 +404,7 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
- bool for_insert, int *depth_required)
+ bool for_insert, unsigned int *depth_required)
{
unsigned hash, incr, filter_idx, depth, depth_max;
@@ -366,12 +438,59 @@ static int efx_filter_search(struct efx_filter_table *table,
}
}
-/* Construct/deconstruct external filter IDs */
+/*
+ * Construct/deconstruct external filter IDs. These must be ordered
+ * by matching priority, for RX NFC semantics.
+ *
+ * Each RX MAC filter entry has a flag for whether it can override an
+ * RX IP filter that also matches. So we assign locations for MAC
+ * filters with overriding behaviour, then for IP filters, then for
+ * MAC filters without overriding behaviour.
+ */
+
+#define EFX_FILTER_INDEX_WIDTH 13
+#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
+ unsigned int index, u8 flags)
+{
+ return (table_id == EFX_FILTER_TABLE_RX_MAC &&
+ flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ?
+ index :
+ (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
+{
+ return (id <= EFX_FILTER_INDEX_MASK) ?
+ EFX_FILTER_TABLE_RX_MAC :
+ (id >> EFX_FILTER_INDEX_WIDTH) - 1;
+}
+
+static inline unsigned int efx_filter_id_index(u32 id)
+{
+ return id & EFX_FILTER_INDEX_MASK;
+}
-static inline int
-efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+static inline u8 efx_filter_id_flags(u32 id)
{
- return table_id << 16 | index;
+ return (id <= EFX_FILTER_INDEX_MASK) ?
+ EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP :
+ EFX_FILTER_FLAG_RX;
+}
+
+u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+
+ if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0)
+ return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH)
+ + state->table[EFX_FILTER_TABLE_RX_MAC].size;
+ else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0)
+ return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH)
+ + state->table[EFX_FILTER_TABLE_RX_IP].size;
+ else
+ return 0;
}
/**
@@ -384,14 +503,14 @@ efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
* On success, return the filter ID.
* On failure, return a negative error code.
*/
-int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- int filter_idx, depth;
+ unsigned int filter_idx, depth;
u32 key;
int rc;
@@ -439,7 +558,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(table->id, filter_idx);
+ rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
out:
spin_unlock_bh(&state->lock);
@@ -448,7 +567,7 @@ out:
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
- int filter_idx)
+ unsigned int filter_idx)
{
static efx_oword_t filter;
@@ -463,48 +582,101 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
}
/**
- * efx_filter_remove_filter - remove a filter by specification
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
- * @spec: Specification for the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
- * On success, return zero.
- * On failure, return a negative error code.
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
*/
-int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
{
struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- struct efx_filter_spec *saved_spec;
- efx_oword_t filter;
- int filter_idx, depth;
- u32 key;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_filter_spec *spec;
+ u8 filter_flags;
int rc;
- if (!table)
- return -EINVAL;
+ table_id = efx_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
- key = efx_filter_build(&filter, spec);
+ filter_idx = efx_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
- spin_lock_bh(&state->lock);
+ filter_flags = efx_filter_id_flags(filter_id);
- rc = efx_filter_search(table, spec, key, false, &depth);
- if (rc < 0)
- goto out;
- filter_idx = rc;
- saved_spec = &table->spec[filter_idx];
+ spin_lock_bh(&state->lock);
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority && spec->flags == filter_flags) {
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
}
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
+ spin_unlock_bh(&state->lock);
+
+ return rc;
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+int efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ u8 filter_flags;
+ int rc;
+
+ table_id = efx_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ filter_flags = efx_filter_id_flags(filter_id);
+
+ spin_lock_bh(&state->lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority && spec->flags == filter_flags) {
+ *spec_buf = *spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
-out:
spin_unlock_bh(&state->lock);
+
return rc;
}
@@ -514,7 +686,7 @@ static void efx_filter_table_clear(struct efx_nic *efx,
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
- int filter_idx;
+ unsigned int filter_idx;
spin_lock_bh(&state->lock);
@@ -538,6 +710,68 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
}
+u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&state->lock);
+
+ return count;
+}
+
+s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_filter_make_id(
+ table_id, filter_idx,
+ table->spec[filter_idx].flags);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&state->lock);
+
+ return count;
+}
+
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
@@ -545,7 +779,7 @@ void efx_restore_filters(struct efx_nic *efx)
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
- int filter_idx;
+ unsigned int filter_idx;
spin_lock_bh(&state->lock);
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 872f2132a496..3d4108cd90ca 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -78,6 +78,11 @@ enum efx_filter_flags {
*
* Use the efx_filter_set_*() functions to initialise the @type and
* @data fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one. The hardware priority of a filter
+ * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
+ * flag.
*/
struct efx_filter_spec {
u8 type:4;
@@ -100,11 +105,18 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port);
+extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port);
extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port,
__be32 rhost, __be16 rport);
+extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port,
+ __be32 *rhost, __be16 *rport);
extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr);
+extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+ u16 *vid, u8 *addr);
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index b6304486f244..bc9dcd6b30d7 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
while (offset < end) {
@@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
int rc = 0;
if (part->mcdi.updating) {
- part->mcdi.updating = 0;
+ part->mcdi.updating = false;
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b8e251a1ee48..c49502bab6a3 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -908,7 +908,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- u32 offload_features;
+ netdev_features_t offload_features;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 752d521c09b1..aca349861767 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,11 +479,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
- skb_frag_set_page(skb, 0, page);
- skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(efx, rx_buf);
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
- skb_shinfo(skb)->nr_frags = 1;
+ skb_fill_page_desc(skb, 0, page,
+ efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
@@ -669,7 +666,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->ptr_mask);
/* Allocate RX buffers */
- rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 822f6c2a6a7c..52edd24fcde3 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc(sizeof(state->skbs[0]) *
- state->packet_count, GFP_KERNEL);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index cc2549cb7076..4d5d619feaa6 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = 0;
+ bool already_attached = false;
efx_oword_t reg;
int rc;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index df88c5430f95..72f0fbc73b1a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -31,7 +31,9 @@
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->skb = skb;
buffer->continuation = false;
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->len = 0;
}
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
buffer->continuation = true;
buffer->len = 0;
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
@@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
}
+ netdev_tx_reset_queue(tx_queue->core_txq);
}
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err;
}
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 60135aa55802..53efe7c7b1c0 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -28,6 +28,7 @@
#include <linux/tcp.h> /* struct tcphdr */
#include <linux/skbuff.h>
#include <linux/mii.h> /* MII definitions */
+#include <linux/crc32.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
@@ -58,12 +59,19 @@ static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
+ */
+#define METH_MCF_LIMIT 32
+
+/*
* This structure is private to each device. It is used to pass
* packets in and out, so there is place for a packet
*/
struct meth_private {
/* in-memory copy of MAC Control register */
- unsigned long mac_ctrl;
+ u64 mac_ctrl;
+
/* in-memory copy of DMA Control register */
unsigned long dma_ctrl;
/* address of PHY, used by mdio_* functions, initialized in mdio_probe */
@@ -79,6 +87,9 @@ struct meth_private {
struct sk_buff *rx_skbs[RX_RING_ENTRIES];
unsigned long rx_write;
+ /* Multicast filter. */
+ u64 mcast_filter;
+
spinlock_t meth_lock;
};
@@ -765,6 +776,40 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
+static void meth_set_rx_mode(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ priv->mac_ctrl &= ~METH_PROMISC;
+
+ if (dev->flags & IFF_PROMISC) {
+ priv->mac_ctrl |= METH_PROMISC;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ priv->mac_ctrl |= METH_ACCEPT_AMCAST;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else {
+ struct netdev_hw_addr *ha;
+ priv->mac_ctrl |= METH_ACCEPT_MCAST;
+
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
+ (volatile unsigned long *)&priv->mcast_filter);
+ }
+
+ /* Write the changes to the chip registers. */
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ mace->eth.mcast_filter = priv->mcast_filter;
+
+ /* Done! */
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+ netif_wake_queue(dev);
+}
+
static const struct net_device_ops meth_netdev_ops = {
.ndo_open = meth_open,
.ndo_stop = meth_release,
@@ -774,6 +819,7 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = meth_set_rx_mode,
};
/*
@@ -830,24 +876,7 @@ static struct platform_driver meth_driver = {
}
};
-static int __init meth_init_module(void)
-{
- int err;
-
- err = platform_driver_register(&meth_driver);
- if (err)
- printk(KERN_ERR "Driver registration failed\n");
-
- return err;
-}
-
-static void __exit meth_exit_module(void)
-{
- platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 1b4658c99391..5b118cd5bf94 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -47,8 +47,6 @@
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
-#define MAC_ADDR_LEN 6
-
#define NUM_TX_DESC 64 /* [8..1024] */
#define NUM_RX_DESC 64 /* [8..8192] */
#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
}
/* Get MAC address from EEPROM */
- for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+ for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
udelay(50);
pci_read_config_byte(isa_bridge, 0x48, &reg);
- for (i = 0; i < MAC_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
dev->dev_addr[i] = inb(0x79);
}
@@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
*/
SIS_W16(RxMacControl, ctl & ~0x0f00);
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ sizeof(info->bus_info));
}
static int sis190_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index a184abc5ef11..c8efc708c792 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strcpy (info->driver, SIS900_MODULE_NAME);
- strcpy (info->version, SIS900_DRV_VERSION);
- strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+ strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 sis900_get_msglevel(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 150511a922ef..1341f33e6084 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -205,7 +205,7 @@ enum sis900_tx_buffer_status {
EXCCOLL = 0x00100000, COLCNT = 0x000F0000
};
-enum sis900_rx_bufer_status {
+enum sis900_rx_buffer_status {
OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0a5dfb814157..2c077ce0b6d6 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8f61fe9db1d0..313ba3b32ab4 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = {
},
};
-static int __init smc911x_init(void)
-{
- return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
- platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index cbfa98187131..ada927aba7a5 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f47f81e25322..64ad3ed74495 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = {
},
};
-static int __init smc_init(void)
-{
- return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
- platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8843071fe987..24d2df068d71 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -44,6 +44,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -88,6 +89,8 @@ struct smsc911x_ops {
unsigned int *buf, unsigned int wordcount);
};
+#define SMSC911X_NUM_SUPPLIES 2
+
struct smsc911x_data {
void __iomem *ioaddr;
@@ -138,6 +141,9 @@ struct smsc911x_data {
/* register access functions */
const struct smsc911x_ops *ops;
+
+ /* regulators */
+ struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
};
/* Easy access to information */
@@ -362,6 +368,76 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "failed to enable regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ /* Request regulators */
+ pdata->supplies[0].supply = "vdd33a";
+ pdata->supplies[1].supply = "vddvario";
+ ret = regulator_bulk_get(&pdev->dev,
+ ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "couldn't get regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+
+ /* Free regulators */
+ regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+}
+
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
* and smsc911x_mac_write, so assumes mac_lock is held */
static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -968,7 +1044,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
}
pdata->mii_bus->name = SMSC_MDIONAME;
- snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
@@ -1243,10 +1320,92 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
spin_unlock(&pdata->mac_lock);
}
+static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /*
+ * If energy is detected the PHY is already awake so is not necessary
+ * to disable the energy detect power-down mode.
+ */
+ if ((rc & MII_LAN83C185_EDPWRDOWN) &&
+ !(rc & MII_LAN83C185_ENERGYON)) {
+ /* Disable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc & (~MII_LAN83C185_EDPWRDOWN));
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /* Only enable if energy detect mode is already disabled */
+ if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
+ mdelay(100);
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ mdelay(1);
+ }
+ return 0;
+}
+
static int smsc911x_soft_reset(struct smsc911x_data *pdata)
{
unsigned int timeout;
unsigned int temp;
+ int ret;
+
+ /*
+ * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
+ * are initialized in a Energy Detect Power-Down mode that prevents
+ * the MAC chip to be software reseted. So we have to wakeup the PHY
+ * before.
+ */
+ if (pdata->generation == 4) {
+ ret = smsc911x_phy_disable_energy_detect(pdata);
+
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+ return ret;
+ }
+ }
/* Reset the LAN911x */
smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
@@ -1260,6 +1419,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed to complete reset");
return -EIO;
}
+
+ if (pdata->generation == 4) {
+ ret = smsc911x_phy_enable_energy_detect(pdata);
+
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -2092,6 +2261,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
iounmap(pdata->ioaddr);
+ (void)smsc911x_disable_resources(pdev);
+ smsc911x_free_resources(pdev);
+
free_netdev(dev);
return 0;
@@ -2218,10 +2390,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1);
+ platform_set_drvdata(pdev, dev);
+
+ retval = smsc911x_request_resources(pdev);
+ if (retval)
+ goto out_return_resources;
+
+ retval = smsc911x_enable_resources(pdev);
+ if (retval)
+ goto out_disable_resources;
+
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
retval = -ENOMEM;
- goto out_free_netdev_2;
+ goto out_disable_resources;
}
retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2233,7 +2415,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe, "Error smsc911x config not found");
- goto out_unmap_io_3;
+ goto out_disable_resources;
}
/* assume standard, non-shifted, access to HW registers */
@@ -2244,7 +2426,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_unmap_io_3;
+ goto out_disable_resources;
/* configure irq polarity and type before connecting isr */
if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2264,15 +2446,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
- goto out_unmap_io_3;
+ goto out_free_irq;
}
- platform_set_drvdata(pdev, dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_unset_drvdata_4;
+ goto out_free_irq;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2321,12 +2501,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
out_unregister_netdev_5:
unregister_netdev(dev);
-out_unset_drvdata_4:
- platform_set_drvdata(pdev, NULL);
+out_free_irq:
free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+ (void)smsc911x_disable_resources(pdev);
+out_return_resources:
+ smsc911x_free_resources(pdev);
+ platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
-out_free_netdev_2:
free_netdev(dev);
out_release_io_1:
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d67aacf8867..9ad5e5d39a03 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -401,4 +401,6 @@
#include <asm/smsc911x.h>
#endif
+#include <linux/smscphy.h>
+
#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index edb24b0e337b..a9efbdfe5302 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->bus_info, pci_name(pd->pdev));
- strcpy(drvinfo->version, DRV_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 22745d7bf530..036428348faa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -12,11 +12,36 @@ config STMMAC_ETH
if STMMAC_ETH
+config STMMAC_PLATFORM
+ tristate "STMMAC platform bus support"
+ depends on STMMAC_ETH
+ default y
+ ---help---
+ This selects the platform specific bus support for
+ the stmmac device driver. This is the driver used
+ on many embedded STM platforms based on ARM and SuperH
+ processors.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config STMMAC_PCI
+ tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ depends on STMMAC_ETH && PCI && EXPERIMENTAL
+ ---help---
+ This is to select the Synopsys DWMAC available on PCI devices,
+ if you have a controller with this interface, say Y or M here.
+
+ This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board.
+
+ If unsure, say N.
+
config STMMAC_DEBUG_FS
bool "Enable monitoring via sysFS "
default n
depends on STMMAC_ETH && DEBUG_FS
- -- help
+ ---help---
The stmmac entry in /sys reports DMA TX/RX rings
or (if supported) the HW cap register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index d7c45164ea79..bc965ac9e025 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2cc119295821..d0b814ef0675 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,7 +22,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -315,5 +319,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e25093510b0c..f20aa12931d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
writel(data, ioaddr + low);
}
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ if (enable)
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 41e6b33e1b08..c07cfe989f6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -22,6 +22,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/kernel.h>
#include <linux/io.h>
#include "mmc.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index a140a8fbf051..120740020e2c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_2011"
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define DRV_MODULE_VERSION "Dec_2011"
#include <linux/stmmac.h>
#include <linux/phy.h>
#include "common.h"
@@ -82,8 +83,18 @@ struct stmmac_priv {
int hw_cap_support;
};
+extern int phyaddr;
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 0395f9eba801..9573303a706b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -185,9 +185,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac)
- strcpy(info->driver, GMAC_ETHTOOL_NAME);
+ strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strcpy(info->driver, MAC100_ETHTOOL_NAME);
+ strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ sizeof(info->driver));
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
@@ -458,7 +459,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-static struct ethtool_ops stmmac_ethtool_ops = {
+static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_settings = stmmac_ethtool_getsettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8ea770a89f25..96fa2da30763 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,12 +28,8 @@
https://bugzilla.stlinux.com/
*******************************************************************************/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
@@ -52,8 +48,6 @@
#endif
#include "stmmac.h"
-#define STMMAC_RESOURCE_NAME "stmmaceth"
-
#undef STMMAC_DEBUG
/*#define STMMAC_DEBUG*/
#ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@ static int debug = -1; /* -1: default, 0: no output, 16: all */
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
-static int phyaddr = -1;
+int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
/**
* stmmac_verify_args - verify the driver parameters.
* Description: it verifies if some wrong parameter is passed to the driver.
@@ -308,7 +307,7 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id);
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -345,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
/**
* display_ring
* @p: pointer to the ring.
@@ -781,10 +764,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Do not manage MMC IRQ (FIXME) */
+ /* Mask MMC irq, counters are managed in SW and registers
+ * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
- dwmac_mmc_ctrl(priv->ioaddr, mode);
- memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available\n");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -882,6 +870,53 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
}
/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: this is to attach the GMAC or MAC 10/100
+ * main core structures that will be completed during the
+ * open step.
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ struct mac_device_info *device;
+
+ if (priv->plat->has_gmac)
+ device = dwmac1000_setup(priv->ioaddr);
+ else
+ device = dwmac100_setup(priv->ioaddr);
+
+ if (!device)
+ return -ENOMEM;
+
+ priv->hw = device;
+ priv->hw->ring = &ring_mode_ops;
+
+ if (device_can_wakeup(priv->device)) {
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(priv->wol_irq);
+ }
+
+ return 0;
+}
+
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ /* verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address */
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->dev->base_addr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ random_ether_addr(priv->dev->dev_addr);
+ }
+ pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -895,18 +930,28 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- /* Check that the MAC address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using the following linux command:
- * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
- pr_warning("%s: generated random MAC address %pM\n", dev->name,
- dev->dev_addr);
- }
+ /* MAC HW device setup */
+ ret = stmmac_mac_device_setup(dev);
+ if (ret < 0)
+ return ret;
+
+ stmmac_check_ether_addr(priv);
stmmac_verify_args();
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(dev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ return ret;
+ }
+
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@@ -1003,7 +1048,7 @@ static int stmmac_open(struct net_device *dev)
}
/* Enable the MAC Rx/Tx */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1012,9 +1057,13 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- if (priv->dma_cap.rmon)
- stmmac_mmc_setup(priv);
+ stmmac_mmc_setup(priv);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warning("\tFailed debugFS registration");
+#endif
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
priv->hw->dma->start_tx(priv->ioaddr);
@@ -1087,10 +1136,15 @@ static int stmmac_release(struct net_device *dev)
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(dev);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+ stmmac_mdio_unregister(dev);
+
return 0;
}
@@ -1466,7 +1520,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1734,28 +1789,41 @@ static const struct net_device_ops stmmac_netdev_ops = {
};
/**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
*/
-static int stmmac_probe(struct net_device *dev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat)
{
int ret = 0;
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ return NULL;
+ }
- ether_setup(dev);
+ SET_NETDEV_DEV(ndev, device);
- dev->netdev_ops = &stmmac_netdev_ops;
- stmmac_set_ethtool_ops(dev);
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ ether_setup(ndev);
+
+ ndev->netdev_ops = &stmmac_netdev_ops;
+ stmmac_set_ethtool_ops(ndev);
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
- dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
- dev->features |= NETIF_F_HW_VLAN_RX;
+ ndev->features |= NETIF_F_HW_VLAN_RX;
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -1763,248 +1831,60 @@ static int stmmac_probe(struct net_device *dev)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
priv->pause = pause;
- netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
-
- /* Get the MAC address */
- priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
- dev->dev_addr, 0);
-
- if (!is_valid_ether_addr(dev->dev_addr))
- pr_warning("\tno valid MAC address;"
- "please, use ifconfig or nwhwconfig!\n");
+ priv->plat = plat_dat;
+ netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
- ret = register_netdev(dev);
+ ret = register_netdev(ndev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
__func__, ret);
- return -ENODEV;
+ goto error;
}
DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
- dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
- (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
+ ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
+ (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
- return ret;
-}
+ return priv;
-/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- struct mac_device_info *device;
+error:
+ netif_napi_del(&priv->napi);
- if (priv->plat->has_gmac) {
- dev->priv_flags |= IFF_UNICAST_FLT;
- device = dwmac1000_setup(priv->ioaddr);
- } else {
- device = dwmac100_setup(priv->ioaddr);
- }
-
- if (!device)
- return -ENOMEM;
-
- priv->hw = device;
- priv->hw->ring = &ring_mode_ops;
-
- if (device_can_wakeup(priv->device)) {
- priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- enable_irq_wake(priv->wol_irq);
- }
-
- return 0;
-}
-
-/**
- * stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
- */
-static int stmmac_dvr_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res;
- void __iomem *addr = NULL;
- struct net_device *ndev = NULL;
- struct stmmac_priv *priv = NULL;
- struct plat_stmmacenet_data *plat_dat;
-
- pr_info("STMMAC driver:\n\tplatform registration... ");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- pr_info("\tdone!\n");
-
- if (!request_mem_region(res->start, resource_size(res),
- pdev->name)) {
- pr_err("%s: ERROR: memory allocation failed"
- "cannot get the I/O addr 0x%x\n",
- __func__, (unsigned int)res->start);
- return -EBUSY;
- }
-
- addr = ioremap(res->start, resource_size(res));
- if (!addr) {
- pr_err("%s: ERROR: memory mapping failed\n", __func__);
- ret = -ENOMEM;
- goto out_release_region;
- }
-
- ndev = alloc_etherdev(sizeof(struct stmmac_priv));
- if (!ndev) {
- pr_err("%s: ERROR: allocating the device\n", __func__);
- ret = -ENOMEM;
- goto out_unmap;
- }
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- /* Get the MAC information */
- ndev->irq = platform_get_irq_byname(pdev, "macirq");
- if (ndev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- ret = -ENXIO;
- goto out_free_ndev;
- }
-
- priv = netdev_priv(ndev);
- priv->device = &(pdev->dev);
- priv->dev = ndev;
- plat_dat = pdev->dev.platform_data;
-
- priv->plat = plat_dat;
-
- priv->ioaddr = addr;
-
- /*
- * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
- * The external wake up irq can be passed through the platform code
- * named as "eth_wake_irq"
- *
- * In case the wake up interrupt is not passed from the platform
- * so the driver will continue to use the mac irq (ndev->irq)
- */
- priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
- priv->wol_irq = ndev->irq;
-
- platform_set_drvdata(pdev, ndev);
-
- /* Set the I/O base addr */
- ndev->base_addr = (unsigned long)addr;
-
- /* Custom initialisation */
- if (priv->plat->init) {
- ret = priv->plat->init(pdev);
- if (unlikely(ret))
- goto out_free_ndev;
- }
-
- /* MAC HW device detection */
- ret = stmmac_mac_device_setup(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Network Device Registration */
- ret = stmmac_probe(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Override with kernel parameters if supplied XXX CRS XXX
- * this needs to have multiple instances */
- if ((phyaddr >= 0) && (phyaddr <= 31))
- priv->plat->phy_addr = phyaddr;
-
- pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
- "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
- pdev->id, ndev->irq, addr);
-
- /* MDIO bus Registration */
- pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
- ret = stmmac_mdio_register(ndev);
- if (ret < 0)
- goto out_unregister;
- pr_debug("registered!\n");
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- pr_warning("\tFailed debugFS registration");
-#endif
-
- return 0;
-
-out_unregister:
unregister_netdev(ndev);
-out_plat_exit:
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-out_free_ndev:
free_netdev(ndev);
- platform_set_drvdata(pdev, NULL);
-out_unmap:
- iounmap(addr);
-out_release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return NULL;
}
/**
* stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
*/
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_disable_mac(priv->ioaddr);
-
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
-
- stmmac_mdio_unregister(ndev);
-
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-
- platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
-
- iounmap((void *)priv->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- stmmac_exit_fs();
-#endif
-
free_netdev(ndev);
return 0;
}
#ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int dis_ic = 0;
@@ -2038,15 +1918,14 @@ static int stmmac_suspend(struct device *dev)
if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
spin_unlock(&priv->lock);
return 0;
}
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
if (!netif_running(ndev))
@@ -2065,7 +1944,7 @@ static int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
/* Enable the MAC and DMA */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
@@ -2085,68 +1964,23 @@ static int stmmac_resume(struct device *dev)
return 0;
}
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_release(ndev);
}
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_open(ndev);
}
-
-static const struct dev_pm_ops stmmac_pm_ops = {
- .suspend = stmmac_suspend,
- .resume = stmmac_resume,
- .freeze = stmmac_freeze,
- .thaw = stmmac_restore,
- .restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
#endif /* CONFIG_PM */
-static struct platform_driver stmmac_driver = {
- .probe = stmmac_dvr_probe,
- .remove = stmmac_dvr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
- .pm = &stmmac_pm_ops,
- },
-};
-
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = platform_driver_register(&stmmac_driver);
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- platform_driver_unregister(&stmmac_driver);
-}
-
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -2206,9 +2040,6 @@ err:
__setup("stmmaceth=", stmmac_cmdline_opt);
#endif
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9c3b9d5c3411..da4a1042523a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
static int stmmac_mdio_reset(struct mii_bus *bus)
{
+#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
* on MDC, so perform a dummy mdio read.
*/
writel(0, priv->ioaddr + mii_address);
-
+#endif
return 0;
}
@@ -157,7 +158,8 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->read = &stmmac_mdio_read;
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", mdio_bus_data->bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ new_bus->name, mdio_bus_data->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = mdio_bus_data->phy_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644
index 000000000000..54a819a36487
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -0,0 +1,221 @@
+/*******************************************************************************
+ This contains the functions to handle the pci driver.
+
+ Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+ memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+ plat_dat.bus_id = 1;
+ plat_dat.phy_addr = 0;
+ plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+ plat_dat.pbl = 32;
+ plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat_dat.has_gmac = 1;
+ plat_dat.force_sf_dma_mode = 1;
+
+ mdio_data.bus_id = 1;
+ mdio_data.phy_reset = NULL;
+ mdio_data.phy_mask = 0;
+ plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ int i;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+ pci_name(pdev));
+ return ret;
+ }
+ if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+ pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+ ret = -ENODEV;
+ goto err_out_req_reg_failed;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ addr = pci_iomap(pdev, i, 0);
+ if (addr == NULL) {
+ pr_err("%s: ERROR: cannot map regiser memory, aborting",
+ __func__);
+ ret = -EIO;
+ goto err_out_map_failed;
+ }
+ break;
+ }
+ pci_set_master(pdev);
+
+ stmmac_default_data();
+
+ priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat);
+ if (!priv) {
+ pr_err("%s: main drivr probe failed", __func__);
+ goto err_out;
+ }
+ priv->ioaddr = addr;
+ priv->dev->base_addr = (unsigned long)addr;
+ priv->dev->irq = pdev->irq;
+ priv->wol_irq = pdev->irq;
+
+ pci_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+err_out:
+ pci_clear_master(pdev);
+err_out_map_failed:
+ pci_release_regions(pdev);
+err_out_req_reg_failed:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_dvr_remove(ndev);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, priv->ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = stmmac_suspend(ndev);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+ {
+ PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
+ }
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .id_table = stmmac_id_table,
+ .probe = stmmac_pci_probe,
+ .remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = stmmac_pci_suspend,
+ .resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&stmmac_driver);
+ if (ret < 0)
+ pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644
index 000000000000..1ac83243649a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -0,0 +1,198 @@
+/*******************************************************************************
+ This contains the functions to handle the platform driver.
+
+ Copyright (C) 2007-2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat_dat;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ return -EBUSY;
+ }
+
+ addr = ioremap(res->start, resource_size(res));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed", __func__);
+ ret = -ENOMEM;
+ goto out_release_region;
+ }
+ plat_dat = pdev->dev.platform_data;
+ priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
+ if (!priv) {
+ pr_err("%s: main drivr probe failed", __func__);
+ goto out_unmap;
+ }
+
+ priv->ioaddr = addr;
+ /* Set the I/O base addr */
+ priv->dev->base_addr = (unsigned long)addr;
+
+ /* Get the MAC information */
+ priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (priv->dev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = priv->dev->irq;
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ /* Custom initialisation */
+ if (priv->plat->init) {
+ ret = priv->plat->init(pdev);
+ if (unlikely(ret))
+ goto out_unmap;
+ }
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+out_unmap:
+ iounmap(addr);
+ platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+ int ret = stmmac_dvr_remove(ndev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap((void *)priv->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ .suspend = stmmac_pltfr_suspend,
+ .resume = stmmac_pltfr_resume,
+ .freeze = stmmac_pltfr_freeze,
+ .thaw = stmmac_pltfr_restore,
+ .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pltfr_pm_ops,
+ },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index fd40988c19a6..f10665f594c4 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
- info->fw_version[0] = '\0';
- strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
cp->casreg_len : CAS_MAX_REGS;
info->n_stats = CAS_NUM_STAT_KEYS;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 73c708107a37..cf433931304f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p)
supported |= SUPPORTED_1000baseT_Full;
lp->supported = supported;
- advertising = 0;
- if (advert & ADVERTISE_10HALF)
- advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- advertising |= ADVERTISED_100baseT_Full;
- if (ctrl1000 & ADVERTISE_1000HALF)
- advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- advertising |= ADVERTISED_1000baseT_Full;
+ advertising = mii_adv_to_ethtool_adv_t(advert);
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmcr & BMCR_ANENABLE) {
int neg, neg1000;
@@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
+ unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- while (pkt_cnt--)
+ tx_bytes = 0;
+ tmp = pkt_cnt;
+ while (tmp--) {
+ tx_bytes += rp->tx_buffs[cons].skb->len;
cons = release_tx_packet(np, rp, cons);
+ }
rp->cons = cons;
smp_mb();
+ netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
+ netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
+ netdev_tx_sent_queue(txq, skb->len);
+
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
@@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- sprintf(info->fw_version, "%d.%d",
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
}
static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent,
if (dev_id_1 < 0 || dev_id_2 < 0)
return 0;
if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+ /* Because of the NIU_PHY_ID_MASK being applied, the 8704
+ * test covers the 8706 as well.
+ */
if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
return 0;
} else {
if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d8cfd9ea053..220f724c3377 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = {
.remove = __devexit_p(bigmac_sbus_remove),
};
-static int __init bigmac_init(void)
-{
- return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
- platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index ceab215bb4a3..31441a870b0b 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(gp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index cf14ab9db576..09c518655db2 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strcpy(info->driver, "sunhme");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunhme", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strcpy(info->bus_info, pci_name(pdev));
+ strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d",
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "SBUS:%d",
regs->which_io);
}
#endif
@@ -2849,7 +2850,7 @@ err_out:
static int is_quattro_p(struct pci_dev *pdev)
{
struct pci_dev *busdev = pdev->bus->self;
- struct list_head *tmp;
+ struct pci_dev *this_pdev;
int n_hmes;
if (busdev == NULL ||
@@ -2858,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev)
return 0;
n_hmes = 0;
- tmp = pdev->bus->devices.next;
- while (tmp != &pdev->bus->devices) {
- struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+ list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
n_hmes++;
-
- tmp = tmp->next;
}
if (n_hmes != 4)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 3a90af6d111c..4b19e9b0606b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
* @ndev network device
* @vid VLAN vid to add
*/
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
__bdx_vlan_rx_vid(ndev, vid, 1);
+ return 0;
}
/*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
* @ndev network device
* @vid VLAN vid to kill
*/
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
__bdx_vlan_rx_vid(ndev, vid, 0);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index aaac0c7ad111..4d9a28ffd3c3 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1269,7 +1269,7 @@ int __devinit cpmac_init(void)
}
cpmac_mii->phy_mask = ~(mask | 0x80000000);
- snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
+ snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
res = mdiobus_register(cpmac_mii);
if (res)
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d3369cdd..c97d2f590855 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
+ spin_unlock_irqrestore(&chan->lock, flags);
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+ spin_lock_irqsave(&chan->lock, flags);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 815c7970261b..794ac30a577b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -115,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -336,6 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
+ atomic_t cur_tx;
const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
@@ -1044,6 +1046,9 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ atomic_dec(&priv->cur_tx);
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
@@ -1092,6 +1097,9 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
goto fail_tx;
}
+ if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail_tx:
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 7615040df756..ef7c9c17bfff 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -313,7 +313,8 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
data->bus->reset = davinci_mdio_reset,
data->bus->parent = dev;
data->bus->priv = data;
- snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
data->clk = clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2d..d9951afb9269 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
@@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev)
sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
panic("Failed to stop LIPP/LEPP!\n");
- priv->partly_opened = 0;
+ priv->partly_opened = false;
}
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
@@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev)
priv->network_cpus_count, priv->network_cpus_credits);
#endif
- priv->partly_opened = 1;
+ priv->partly_opened = true;
} else {
/* FIXME: Is this possible? */
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
@@ -2260,8 +2260,7 @@ static int tile_net_get_mac(struct net_device *dev)
return 0;
}
-
-static struct net_device_ops tile_net_ops = {
+static const struct net_device_ops tile_net_ops = {
.ndo_open = tile_net_open,
.ndo_stop = tile_net_stop,
.ndo_start_xmit = tile_net_tx,
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 7bf1e2015784..5ee82a77723b 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -640,7 +640,7 @@ static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
int status;
/* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0);
+ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
if (status)
dev_err(ctodev(card),
"lv1_net_stop_rx_dma failed, %d\n", status);
@@ -658,7 +658,7 @@ static inline void gelic_card_disable_txdmac(struct gelic_card *card)
int status;
/* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0);
+ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
if (status)
dev_err(ctodev(card),
"lv1_net_stop_tx_dma failed, status=%d\n", status);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a8df7eca0956..164fb775d7b3 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1604,7 +1604,7 @@ tsi108_init_one(struct platform_device *pdev)
data->phyregs = ioremap(einfo->phyregs, 0x400);
if (NULL == data->phyregs) {
err = -ENOMEM;
- goto regs_fail;
+ goto phyregs_fail;
}
/* MII setup */
data->mii_if.dev = dev;
@@ -1663,9 +1663,11 @@ tsi108_init_one(struct platform_device *pdev)
return 0;
register_fail:
- iounmap(data->regs);
iounmap(data->phyregs);
+phyregs_fail:
+ iounmap(data->regs);
+
regs_fail:
free_netdev(dev);
return err;
@@ -1688,18 +1690,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr)
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
-static int tsi108_ether_init(void)
-{
- int ret;
- ret = platform_driver_register (&tsi_eth_driver);
- if (ret < 0){
- printk("tsi108_ether_init: error initializing ethernet "
- "device\n");
- return ret;
- }
- return 0;
-}
-
static int tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1704,7 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
-static void tsi108_ether_exit(void)
-{
- platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f34dd99fe579..10b18eb63d25 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -35,13 +35,13 @@
#define DRV_VERSION "1.5.0"
#define DRV_RELDATE "2010-10-09"
+#include <linux/types.h>
/* A few user-configurable values.
These may be modified when a driver module is loaded. */
-
-#define DEBUG
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-static int max_interrupt_work = 20;
+static int debug = 0;
+#define RHINE_MSG_DEFAULT \
+ (0x0000)
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
Setting to > 1518 effectively disables this feature. */
@@ -55,7 +55,7 @@ static int rx_copybreak;
/* Work-around for broken BIOSes: they are unable to get the chip back out of
power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
/*
* In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -127,12 +127,10 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
-module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
module_param(avoid_D3, bool, 0);
-MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
-MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
@@ -350,16 +348,25 @@ static const int mmio_verify_registers[] = {
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
- IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
- IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
- IntrPCIErr=0x0040,
- IntrStatsMax=0x0080, IntrRxEarly=0x0100,
- IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
- IntrTxAborted=0x2000, IntrLinkChange=0x4000,
- IntrRxWakeUp=0x8000,
- IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
- IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
- IntrTxErrSummary=0x082218,
+ IntrRxDone = 0x0001,
+ IntrTxDone = 0x0002,
+ IntrRxErr = 0x0004,
+ IntrTxError = 0x0008,
+ IntrRxEmpty = 0x0020,
+ IntrPCIErr = 0x0040,
+ IntrStatsMax = 0x0080,
+ IntrRxEarly = 0x0100,
+ IntrTxUnderrun = 0x0210,
+ IntrRxOverflow = 0x0400,
+ IntrRxDropped = 0x0800,
+ IntrRxNoBuf = 0x1000,
+ IntrTxAborted = 0x2000,
+ IntrLinkChange = 0x4000,
+ IntrRxWakeUp = 0x8000,
+ IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
+ IntrNormalSummary = IntrRxDone | IntrTxDone,
+ IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
+ IntrTxUnderrun,
};
/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
@@ -438,8 +445,13 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct mutex task_lock;
+ bool task_enable;
+ struct work_struct slow_event_task;
struct work_struct reset_task;
+ u32 msg_enable;
+
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
struct rx_desc *rx_head_desc;
@@ -475,41 +487,50 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
static void rhine_reset_task(struct work_struct *work);
+static void rhine_slow_event_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
static void rhine_tx(struct net_device *dev);
static int rhine_rx(struct net_device *dev, int limit);
-static void rhine_error(struct net_device *dev, int intr_status);
static void rhine_set_rx_mode(struct net_device *dev);
static struct net_device_stats *rhine_get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
-static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
-static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
-static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
-static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
-static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
-static void rhine_init_cam_filter(struct net_device *dev);
-static void rhine_update_vcam(struct net_device *dev);
-
-#define RHINE_WAIT_FOR(condition) \
-do { \
- int i = 1024; \
- while (!(condition) && --i) \
- ; \
- if (debug > 1 && i < 512) \
- pr_info("%4d cycles used @ %s:%d\n", \
- 1024 - i, __func__, __LINE__); \
-} while (0)
-
-static inline u32 get_intr_status(struct net_device *dev)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_restart_tx(struct net_device *dev);
+
+static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high)
+{
+ void __iomem *ioaddr = rp->base;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ if (high ^ !!(ioread8(ioaddr + reg) & mask))
+ break;
+ udelay(10);
+ }
+ if (i > 64) {
+ netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
+ "count: %04d\n", high ? "high" : "low", reg, mask, i);
+ }
+}
+
+static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
+{
+ rhine_wait_bit(rp, reg, mask, true);
+}
+
+static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
+{
+ rhine_wait_bit(rp, reg, mask, false);
+}
+
+static u32 rhine_get_events(struct rhine_private *rp)
{
- struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 intr_status;
@@ -520,6 +541,16 @@ static inline u32 get_intr_status(struct net_device *dev)
return intr_status;
}
+static void rhine_ack_events(struct rhine_private *rp, u32 mask)
+{
+ void __iomem *ioaddr = rp->base;
+
+ if (rp->quirks & rqStatusWBRace)
+ iowrite8(mask >> 16, ioaddr + IntrStatus2);
+ iowrite16(mask, ioaddr + IntrStatus);
+ mmiowb();
+}
+
/*
* Get power related registers into sane state.
* Notify user about past WOL event.
@@ -584,6 +615,7 @@ static void rhine_chip_reset(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
+ u8 cmd1;
iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
IOSYNC;
@@ -596,13 +628,12 @@ static void rhine_chip_reset(struct net_device *dev)
iowrite8(0x40, ioaddr + MiscCmd);
/* Reset can take somewhat longer (rare) */
- RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
+ rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
}
- if (debug > 1)
- netdev_info(dev, "Reset %s\n",
- (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
- "failed" : "succeeded");
+ cmd1 = ioread8(ioaddr + ChipCmd1);
+ netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
+ "failed" : "succeeded");
}
#ifdef USE_MMIO
@@ -628,9 +659,15 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
+ int i;
outb(0x20, pioaddr + MACRegEEcsr);
- RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
+ for (i = 0; i < 1024; i++) {
+ if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
+ break;
+ }
+ if (i > 512)
+ pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
#ifdef USE_MMIO
/*
@@ -656,23 +693,127 @@ static void rhine_poll(struct net_device *dev)
}
#endif
+static void rhine_kick_tx_threshold(struct rhine_private *rp)
+{
+ if (rp->tx_thresh < 0xe0) {
+ void __iomem *ioaddr = rp->base;
+
+ rp->tx_thresh += 0x20;
+ BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
+ }
+}
+
+static void rhine_tx_err(struct rhine_private *rp, u32 status)
+{
+ struct net_device *dev = rp->dev;
+
+ if (status & IntrTxAborted) {
+ netif_info(rp, tx_err, dev,
+ "Abort %08x, frame dropped\n", status);
+ }
+
+ if (status & IntrTxUnderrun) {
+ rhine_kick_tx_threshold(rp);
+ netif_info(rp, tx_err ,dev, "Transmitter underrun, "
+ "Tx threshold now %02x\n", rp->tx_thresh);
+ }
+
+ if (status & IntrTxDescRace)
+ netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
+
+ if ((status & IntrTxError) &&
+ (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
+ rhine_kick_tx_threshold(rp);
+ netif_info(rp, tx_err, dev, "Unspecified error. "
+ "Tx threshold now %02x\n", rp->tx_thresh);
+ }
+
+ rhine_restart_tx(dev);
+}
+
+static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
+{
+ void __iomem *ioaddr = rp->base;
+ struct net_device_stats *stats = &rp->dev->stats;
+
+ stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
+
+ /*
+ * Clears the "tally counters" for CRC errors and missed frames(?).
+ * It has been reported that some chips need a write of 0 to clear
+ * these, for others the counters are set to 1 when written to and
+ * instead cleared when read. So we clear them both ways ...
+ */
+ iowrite32(0, ioaddr + RxMissed);
+ ioread16(ioaddr + RxCRCErrs);
+ ioread16(ioaddr + RxMissed);
+}
+
+#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
+ IntrRxErr | \
+ IntrRxEmpty | \
+ IntrRxOverflow | \
+ IntrRxDropped | \
+ IntrRxNoBuf | \
+ IntrRxWakeUp)
+
+#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
+ IntrTxAborted | \
+ IntrTxUnderrun | \
+ IntrTxDescRace)
+#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
+
+#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
+ RHINE_EVENT_NAPI_TX | \
+ IntrStatsMax)
+#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
+#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
+
static int rhine_napipoll(struct napi_struct *napi, int budget)
{
struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
struct net_device *dev = rp->dev;
void __iomem *ioaddr = rp->base;
- int work_done;
+ u16 enable_mask = RHINE_EVENT & 0xffff;
+ int work_done = 0;
+ u32 status;
+
+ status = rhine_get_events(rp);
+ rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
+
+ if (status & RHINE_EVENT_NAPI_RX)
+ work_done += rhine_rx(dev, budget);
+
+ if (status & RHINE_EVENT_NAPI_TX) {
+ if (status & RHINE_EVENT_NAPI_TX_ERR) {
+ /* Avoid scavenging before Tx engine turned off */
+ rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
+ if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
+ netif_warn(rp, tx_err, dev, "Tx still on\n");
+ }
- work_done = rhine_rx(dev, budget);
+ rhine_tx(dev);
+
+ if (status & RHINE_EVENT_NAPI_TX_ERR)
+ rhine_tx_err(rp, status);
+ }
+
+ if (status & IntrStatsMax) {
+ spin_lock(&rp->lock);
+ rhine_update_rx_crc_and_missed_errord(rp);
+ spin_unlock(&rp->lock);
+ }
+
+ if (status & RHINE_EVENT_SLOW) {
+ enable_mask &= ~RHINE_EVENT_SLOW;
+ schedule_work(&rp->slow_event_task);
+ }
if (work_done < budget) {
napi_complete(napi);
-
- iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
- IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
- IntrTxDone | IntrTxError | IntrTxUnderrun |
- IntrPCIErr | IntrStatsMax | IntrLinkChange,
- ioaddr + IntrEnable);
+ iowrite16(enable_mask, ioaddr + IntrEnable);
+ mmiowb();
}
return work_done;
}
@@ -796,6 +937,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
rp->quirks = quirks;
rp->pioaddr = pioaddr;
rp->pdev = pdev;
+ rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
@@ -855,7 +997,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ mutex_init(&rp->task_lock);
INIT_WORK(&rp->reset_task, rhine_reset_task);
+ INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
@@ -915,8 +1059,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
}
}
rp->mii_if.phy_id = phy_id;
- if (debug > 1 && avoid_D3)
- netdev_info(dev, "No D3 power state at shutdown\n");
+ if (avoid_D3)
+ netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
return 0;
@@ -1092,7 +1236,7 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- mii_check_media(&rp->mii_if, debug, init_media);
+ mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
if (rp->mii_if.full_duplex)
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1100,24 +1244,26 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
else
iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
ioaddr + ChipCmd1);
- if (debug > 1)
- netdev_info(dev, "force_media %d, carrier %d\n",
- rp->mii_if.force_media, netif_carrier_ok(dev));
+
+ netif_info(rp, link, dev, "force_media %d, carrier %d\n",
+ rp->mii_if.force_media, netif_carrier_ok(dev));
}
/* Called after status of force_media possibly changed */
static void rhine_set_carrier(struct mii_if_info *mii)
{
+ struct net_device *dev = mii->dev;
+ struct rhine_private *rp = netdev_priv(dev);
+
if (mii->force_media) {
/* autoneg is off: Link is always assumed to be up */
- if (!netif_carrier_ok(mii->dev))
- netif_carrier_on(mii->dev);
- }
- else /* Let MMI library update carrier status */
- rhine_check_media(mii->dev, 0);
- if (debug > 1)
- netdev_info(mii->dev, "force_media %d, carrier %d\n",
- mii->force_media, netif_carrier_ok(mii->dev));
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else /* Let MMI library update carrier status */
+ rhine_check_media(dev, 0);
+
+ netif_info(rp, link, dev, "force_media %d, carrier %d\n",
+ mii->force_media, netif_carrier_ok(dev));
}
/**
@@ -1261,24 +1407,26 @@ static void rhine_update_vcam(struct net_device *dev)
rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
}
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
- spin_lock_irq(&rp->lock);
+ spin_lock_bh(&rp->lock);
set_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
- spin_unlock_irq(&rp->lock);
+ spin_unlock_bh(&rp->lock);
+ return 0;
}
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
- spin_lock_irq(&rp->lock);
+ spin_lock_bh(&rp->lock);
clear_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
- spin_unlock_irq(&rp->lock);
+ spin_unlock_bh(&rp->lock);
+ return 0;
}
static void init_registers(struct net_device *dev)
@@ -1307,12 +1455,7 @@ static void init_registers(struct net_device *dev)
napi_enable(&rp->napi);
- /* Enable interrupts by setting the interrupt mask. */
- iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
- IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
- IntrTxDone | IntrTxError | IntrTxUnderrun |
- IntrPCIErr | IntrStatsMax | IntrLinkChange,
- ioaddr + IntrEnable);
+ iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
ioaddr + ChipCmd);
@@ -1320,23 +1463,27 @@ static void init_registers(struct net_device *dev)
}
/* Enable MII link status auto-polling (required for IntrLinkChange) */
-static void rhine_enable_linkmon(void __iomem *ioaddr)
+static void rhine_enable_linkmon(struct rhine_private *rp)
{
+ void __iomem *ioaddr = rp->base;
+
iowrite8(0, ioaddr + MIICmd);
iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
iowrite8(0x80, ioaddr + MIICmd);
- RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
+ rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
}
/* Disable MII link status auto-polling (required for MDIO access) */
-static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
+static void rhine_disable_linkmon(struct rhine_private *rp)
{
+ void __iomem *ioaddr = rp->base;
+
iowrite8(0, ioaddr + MIICmd);
- if (quirks & rqRhineI) {
+ if (rp->quirks & rqRhineI) {
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
/* Can be called from ISR. Evil. */
@@ -1345,13 +1492,13 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
/* 0x80 must be set immediately before turning it off */
iowrite8(0x80, ioaddr + MIICmd);
- RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
+ rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
/* Heh. Now clear 0x80 again. */
iowrite8(0, ioaddr + MIICmd);
}
else
- RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
+ rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
}
/* Read and write over the MII Management Data I/O (MDIO) interface. */
@@ -1362,16 +1509,16 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum)
void __iomem *ioaddr = rp->base;
int result;
- rhine_disable_linkmon(ioaddr, rp->quirks);
+ rhine_disable_linkmon(rp);
/* rhine_disable_linkmon already cleared MIICmd */
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
- RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
+ rhine_wait_bit_low(rp, MIICmd, 0x40);
result = ioread16(ioaddr + MIIData);
- rhine_enable_linkmon(ioaddr);
+ rhine_enable_linkmon(rp);
return result;
}
@@ -1380,16 +1527,33 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- rhine_disable_linkmon(ioaddr, rp->quirks);
+ rhine_disable_linkmon(rp);
/* rhine_disable_linkmon already cleared MIICmd */
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite16(value, ioaddr + MIIData);
iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
- RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
+ rhine_wait_bit_low(rp, MIICmd, 0x20);
- rhine_enable_linkmon(ioaddr);
+ rhine_enable_linkmon(rp);
+}
+
+static void rhine_task_disable(struct rhine_private *rp)
+{
+ mutex_lock(&rp->task_lock);
+ rp->task_enable = false;
+ mutex_unlock(&rp->task_lock);
+
+ cancel_work_sync(&rp->slow_event_task);
+ cancel_work_sync(&rp->reset_task);
+}
+
+static void rhine_task_enable(struct rhine_private *rp)
+{
+ mutex_lock(&rp->task_lock);
+ rp->task_enable = true;
+ mutex_unlock(&rp->task_lock);
}
static int rhine_open(struct net_device *dev)
@@ -1403,8 +1567,7 @@ static int rhine_open(struct net_device *dev)
if (rc)
return rc;
- if (debug > 1)
- netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
rc = alloc_ring(dev);
if (rc) {
@@ -1414,11 +1577,12 @@ static int rhine_open(struct net_device *dev)
alloc_rbufs(dev);
alloc_tbufs(dev);
rhine_chip_reset(dev);
+ rhine_task_enable(rp);
init_registers(dev);
- if (debug > 2)
- netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
- __func__, ioread16(ioaddr + ChipCmd),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
+ __func__, ioread16(ioaddr + ChipCmd),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
@@ -1431,11 +1595,12 @@ static void rhine_reset_task(struct work_struct *work)
reset_task);
struct net_device *dev = rp->dev;
- /* protect against concurrent rx interrupts */
- disable_irq(rp->pdev->irq);
+ mutex_lock(&rp->task_lock);
- napi_disable(&rp->napi);
+ if (!rp->task_enable)
+ goto out_unlock;
+ napi_disable(&rp->napi);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
@@ -1449,11 +1614,13 @@ static void rhine_reset_task(struct work_struct *work)
init_registers(dev);
spin_unlock_bh(&rp->lock);
- enable_irq(rp->pdev->irq);
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
+
+out_unlock:
+ mutex_unlock(&rp->task_lock);
}
static void rhine_tx_timeout(struct net_device *dev)
@@ -1474,7 +1641,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
unsigned entry;
- unsigned long flags;
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
@@ -1526,7 +1692,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_ring[entry].tx_status = 0;
/* lock eth irq */
- spin_lock_irqsave(&rp->lock, flags);
wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
@@ -1547,78 +1712,43 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
netif_stop_queue(dev);
- spin_unlock_irqrestore(&rp->lock, flags);
+ netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
+ rp->cur_tx - 1, entry);
- if (debug > 4) {
- netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
- rp->cur_tx-1, entry);
- }
return NETDEV_TX_OK;
}
+static void rhine_irq_disable(struct rhine_private *rp)
+{
+ iowrite16(0x0000, rp->base + IntrEnable);
+ mmiowb();
+}
+
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
- u32 intr_status;
- int boguscnt = max_interrupt_work;
+ u32 status;
int handled = 0;
- while ((intr_status = get_intr_status(dev))) {
- handled = 1;
-
- /* Acknowledge all of the current interrupt sources ASAP. */
- if (intr_status & IntrTxDescRace)
- iowrite8(0x08, ioaddr + IntrStatus2);
- iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
- IOSYNC;
-
- if (debug > 4)
- netdev_dbg(dev, "Interrupt, status %08x\n",
- intr_status);
+ status = rhine_get_events(rp);
- if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
- IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
- iowrite16(IntrTxAborted |
- IntrTxDone | IntrTxError | IntrTxUnderrun |
- IntrPCIErr | IntrStatsMax | IntrLinkChange,
- ioaddr + IntrEnable);
+ netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
- napi_schedule(&rp->napi);
- }
-
- if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
- if (intr_status & IntrTxErrSummary) {
- /* Avoid scavenging before Tx engine turned off */
- RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
- if (debug > 2 &&
- ioread8(ioaddr+ChipCmd) & CmdTxOn)
- netdev_warn(dev,
- "%s: Tx engine still on\n",
- __func__);
- }
- rhine_tx(dev);
- }
+ if (status & RHINE_EVENT) {
+ handled = 1;
- /* Abnormal error summary/uncommon events handlers. */
- if (intr_status & (IntrPCIErr | IntrLinkChange |
- IntrStatsMax | IntrTxError | IntrTxAborted |
- IntrTxUnderrun | IntrTxDescRace))
- rhine_error(dev, intr_status);
+ rhine_irq_disable(rp);
+ napi_schedule(&rp->napi);
+ }
- if (--boguscnt < 0) {
- netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
- intr_status);
- break;
- }
+ if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
+ netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
+ status);
}
- if (debug > 3)
- netdev_dbg(dev, "exiting interrupt, status=%08x\n",
- ioread16(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1629,20 +1759,16 @@ static void rhine_tx(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
- spin_lock(&rp->lock);
-
/* find and cleanup dirty tx descriptors */
while (rp->dirty_tx != rp->cur_tx) {
txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
- if (debug > 6)
- netdev_dbg(dev, "Tx scavenge %d status %08x\n",
- entry, txstatus);
+ netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
+ entry, txstatus);
if (txstatus & DescOwn)
break;
if (txstatus & 0x8000) {
- if (debug > 1)
- netdev_dbg(dev, "Transmit error, Tx status %08x\n",
- txstatus);
+ netif_dbg(rp, tx_done, dev,
+ "Transmit error, Tx status %08x\n", txstatus);
dev->stats.tx_errors++;
if (txstatus & 0x0400)
dev->stats.tx_carrier_errors++;
@@ -1664,10 +1790,8 @@ static void rhine_tx(struct net_device *dev)
dev->stats.collisions += (txstatus >> 3) & 0x0F;
else
dev->stats.collisions += txstatus & 0x0F;
- if (debug > 6)
- netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
- (txstatus >> 3) & 0xF,
- txstatus & 0xF);
+ netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
+ (txstatus >> 3) & 0xF, txstatus & 0xF);
dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
dev->stats.tx_packets++;
}
@@ -1684,8 +1808,6 @@ static void rhine_tx(struct net_device *dev)
}
if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
netif_wake_queue(dev);
-
- spin_unlock(&rp->lock);
}
/**
@@ -1710,11 +1832,8 @@ static int rhine_rx(struct net_device *dev, int limit)
int count;
int entry = rp->cur_rx % RX_RING_SIZE;
- if (debug > 4) {
- netdev_dbg(dev, "%s(), entry %d status %08x\n",
- __func__, entry,
- le32_to_cpu(rp->rx_head_desc->rx_status));
- }
+ netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
+ entry, le32_to_cpu(rp->rx_head_desc->rx_status));
/* If EOP is set on the next entry, it's a new packet. Send it up. */
for (count = 0; count < limit; ++count) {
@@ -1726,9 +1845,8 @@ static int rhine_rx(struct net_device *dev, int limit)
if (desc_status & DescOwn)
break;
- if (debug > 4)
- netdev_dbg(dev, "%s() status is %08x\n",
- __func__, desc_status);
+ netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
+ desc_status);
if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
if ((desc_status & RxWholePkt) != RxWholePkt) {
@@ -1744,9 +1862,9 @@ static int rhine_rx(struct net_device *dev, int limit)
dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
- if (debug > 2)
- netdev_dbg(dev, "%s() Rx error was %08x\n",
- __func__, desc_status);
+ netif_dbg(rp, rx_err, dev,
+ "%s() Rx error %08x\n", __func__,
+ desc_status);
dev->stats.rx_errors++;
if (desc_status & 0x0030)
dev->stats.rx_length_errors++;
@@ -1836,19 +1954,6 @@ static int rhine_rx(struct net_device *dev, int limit)
return count;
}
-/*
- * Clears the "tally counters" for CRC errors and missed frames(?).
- * It has been reported that some chips need a write of 0 to clear
- * these, for others the counters are set to 1 when written to and
- * instead cleared when read. So we clear them both ways ...
- */
-static inline void clear_tally_counters(void __iomem *ioaddr)
-{
- iowrite32(0, ioaddr + RxMissed);
- ioread16(ioaddr + RxCRCErrs);
- ioread16(ioaddr + RxMissed);
-}
-
static void rhine_restart_tx(struct net_device *dev) {
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
@@ -1859,7 +1964,7 @@ static void rhine_restart_tx(struct net_device *dev) {
* If new errors occurred, we need to sort them out before doing Tx.
* In that case the ISR will be back here RSN anyway.
*/
- intr_status = get_intr_status(dev);
+ intr_status = rhine_get_events(rp);
if ((intr_status & IntrTxErrSummary) == 0) {
@@ -1880,79 +1985,50 @@ static void rhine_restart_tx(struct net_device *dev) {
}
else {
/* This should never happen */
- if (debug > 1)
- netdev_warn(dev, "%s() Another error occurred %08x\n",
- __func__, intr_status);
+ netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
+ intr_status);
}
}
-static void rhine_error(struct net_device *dev, int intr_status)
+static void rhine_slow_event_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
+ struct rhine_private *rp =
+ container_of(work, struct rhine_private, slow_event_task);
+ struct net_device *dev = rp->dev;
+ u32 intr_status;
- spin_lock(&rp->lock);
+ mutex_lock(&rp->task_lock);
+
+ if (!rp->task_enable)
+ goto out_unlock;
+
+ intr_status = rhine_get_events(rp);
+ rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
if (intr_status & IntrLinkChange)
rhine_check_media(dev, 0);
- if (intr_status & IntrStatsMax) {
- dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
- dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
- clear_tally_counters(ioaddr);
- }
- if (intr_status & IntrTxAborted) {
- if (debug > 1)
- netdev_info(dev, "Abort %08x, frame dropped\n",
- intr_status);
- }
- if (intr_status & IntrTxUnderrun) {
- if (rp->tx_thresh < 0xE0)
- BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
- if (debug > 1)
- netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
- rp->tx_thresh);
- }
- if (intr_status & IntrTxDescRace) {
- if (debug > 2)
- netdev_info(dev, "Tx descriptor write-back race\n");
- }
- if ((intr_status & IntrTxError) &&
- (intr_status & (IntrTxAborted |
- IntrTxUnderrun | IntrTxDescRace)) == 0) {
- if (rp->tx_thresh < 0xE0) {
- BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
- }
- if (debug > 1)
- netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
- rp->tx_thresh);
- }
- if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
- IntrTxError))
- rhine_restart_tx(dev);
-
- if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
- IntrTxError | IntrTxAborted | IntrNormalSummary |
- IntrTxDescRace)) {
- if (debug > 1)
- netdev_err(dev, "Something Wicked happened! %08x\n",
- intr_status);
- }
- spin_unlock(&rp->lock);
+ if (intr_status & IntrPCIErr)
+ netif_warn(rp, hw, dev, "PCI error\n");
+
+ napi_disable(&rp->napi);
+ rhine_irq_disable(rp);
+ /* Slow and safe. Consider __napi_schedule as a replacement ? */
+ napi_enable(&rp->napi);
+ napi_schedule(&rp->napi);
+
+out_unlock:
+ mutex_unlock(&rp->task_lock);
}
static struct net_device_stats *rhine_get_stats(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
- unsigned long flags;
- spin_lock_irqsave(&rp->lock, flags);
- dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
- dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
- clear_tally_counters(ioaddr);
- spin_unlock_irqrestore(&rp->lock, flags);
+ spin_lock_bh(&rp->lock);
+ rhine_update_rx_crc_and_missed_errord(rp);
+ spin_unlock_bh(&rp->lock);
return &dev->stats;
}
@@ -2009,9 +2085,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct rhine_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2019,9 +2095,9 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct rhine_private *rp = netdev_priv(dev);
int rc;
- spin_lock_irq(&rp->lock);
+ mutex_lock(&rp->task_lock);
rc = mii_ethtool_gset(&rp->mii_if, cmd);
- spin_unlock_irq(&rp->lock);
+ mutex_unlock(&rp->task_lock);
return rc;
}
@@ -2031,10 +2107,10 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct rhine_private *rp = netdev_priv(dev);
int rc;
- spin_lock_irq(&rp->lock);
+ mutex_lock(&rp->task_lock);
rc = mii_ethtool_sset(&rp->mii_if, cmd);
- spin_unlock_irq(&rp->lock);
rhine_set_carrier(&rp->mii_if);
+ mutex_unlock(&rp->task_lock);
return rc;
}
@@ -2055,12 +2131,16 @@ static u32 netdev_get_link(struct net_device *dev)
static u32 netdev_get_msglevel(struct net_device *dev)
{
- return debug;
+ struct rhine_private *rp = netdev_priv(dev);
+
+ return rp->msg_enable;
}
static void netdev_set_msglevel(struct net_device *dev, u32 value)
{
- debug = value;
+ struct rhine_private *rp = netdev_priv(dev);
+
+ rp->msg_enable = value;
}
static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2116,10 +2196,10 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irq(&rp->lock);
+ mutex_lock(&rp->task_lock);
rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
- spin_unlock_irq(&rp->lock);
rhine_set_carrier(&rp->mii_if);
+ mutex_unlock(&rp->task_lock);
return rc;
}
@@ -2129,27 +2209,21 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
+ rhine_task_disable(rp);
napi_disable(&rp->napi);
- cancel_work_sync(&rp->reset_task);
netif_stop_queue(dev);
- spin_lock_irq(&rp->lock);
-
- if (debug > 1)
- netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
- ioread16(ioaddr + ChipCmd));
+ netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
+ ioread16(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
- /* Disable interrupts by clearing the interrupt mask. */
- iowrite16(0x0000, ioaddr + IntrEnable);
+ rhine_irq_disable(rp);
/* Stop the chip's Tx and Rx processes. */
iowrite16(CmdStop, ioaddr + ChipCmd);
- spin_unlock_irq(&rp->lock);
-
free_irq(rp->pdev->irq, dev);
free_rbufs(dev);
free_tbufs(dev);
@@ -2189,6 +2263,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
if (rp->quirks & rq6patterns)
iowrite8(0x04, ioaddr + WOLcgClr);
+ spin_lock(&rp->lock);
+
if (rp->wolopts & WAKE_MAGIC) {
iowrite8(WOLmagic, ioaddr + WOLcrSet);
/*
@@ -2213,58 +2289,46 @@ static void rhine_shutdown (struct pci_dev *pdev)
iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
}
- /* Hit power state D3 (sleep) */
- if (!avoid_D3)
- iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ spin_unlock(&rp->lock);
- /* TODO: Check use of pci_enable_wake() */
+ if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
}
-#ifdef CONFIG_PM
-static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int rhine_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
- unsigned long flags;
if (!netif_running(dev))
return 0;
+ rhine_task_disable(rp);
+ rhine_irq_disable(rp);
napi_disable(&rp->napi);
netif_device_detach(dev);
- pci_save_state(pdev);
- spin_lock_irqsave(&rp->lock, flags);
rhine_shutdown(pdev);
- spin_unlock_irqrestore(&rp->lock, flags);
- free_irq(dev->irq, dev);
return 0;
}
-static int rhine_resume(struct pci_dev *pdev)
+static int rhine_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
- unsigned long flags;
- int ret;
if (!netif_running(dev))
return 0;
- if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
- netdev_err(dev, "request_irq failed\n");
-
- ret = pci_set_power_state(pdev, PCI_D0);
- if (debug > 1)
- netdev_info(dev, "Entering power state D0 %s (%d)\n",
- ret ? "failed" : "succeeded", ret);
-
- pci_restore_state(pdev);
-
- spin_lock_irqsave(&rp->lock, flags);
#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
#endif
@@ -2273,25 +2337,32 @@ static int rhine_resume(struct pci_dev *pdev)
free_rbufs(dev);
alloc_tbufs(dev);
alloc_rbufs(dev);
+ rhine_task_enable(rp);
+ spin_lock_bh(&rp->lock);
init_registers(dev);
- spin_unlock_irqrestore(&rp->lock, flags);
+ spin_unlock_bh(&rp->lock);
netif_device_attach(dev);
return 0;
}
-#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
+#define RHINE_PM_OPS (&rhine_pm_ops)
+
+#else
+
+#define RHINE_PM_OPS NULL
+
+#endif /* !CONFIG_PM_SLEEP */
static struct pci_driver rhine_driver = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
.probe = rhine_init_one,
.remove = __devexit_p(rhine_remove_one),
-#ifdef CONFIG_PM
- .suspend = rhine_suspend,
- .resume = rhine_resume,
-#endif /* CONFIG_PM */
- .shutdown = rhine_shutdown,
+ .shutdown = rhine_shutdown,
+ .driver.pm = RHINE_PM_OPS,
};
static struct dmi_system_id __initdata rhine_dmi_table[] = {
@@ -2320,7 +2391,7 @@ static int __init rhine_init(void)
#endif
if (dmi_check_system(rhine_dmi_table)) {
/* these BIOSes fail at PXE boot if chip is in D3 */
- avoid_D3 = 1;
+ avoid_D3 = true;
pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
}
else if (avoid_D3)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4535d7cc848e..4128d6b8cc28 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
clear_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -3270,9 +3272,9 @@ static int velocity_set_settings(struct net_device *dev,
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct velocity_info *vptr = netdev_priv(dev);
- strcpy(info->driver, VELOCITY_NAME);
- strcpy(info->version, VELOCITY_VERSION);
- strcpy(info->bus_info, pci_name(vptr->pdev));
+ strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2681b53820ee..f21addb1db95 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -237,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb) {
dev_err(&ndev->dev,
"can't allocate memory for DMA RX buffer\n");
@@ -920,12 +920,26 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!lp->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1077,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
of_node_put(np); /* Finished with the DMA node; drop the reference */
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if (!lp->rx_irq || !lp->tx_irq) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto err_iounmap_2;
@@ -1167,17 +1181,7 @@ static struct platform_driver temac_of_driver = {
},
};
-static int __init temac_init(void)
-{
- return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
- platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 8018d7d045b0..79013e5731a5 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
- bool tx_complete = 0;
+ bool tx_complete = false;
struct net_device *dev = dev_id;
struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
/* Get IRQ for the device */
rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
- if (rc == NO_IRQ) {
+ if (!rc) {
dev_err(dev, "no IRQ found\n");
return rc;
}
@@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = {
.remove = __devexit_p(xemaclite_of_remove),
};
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return: 0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
- /* No kernel boot options used, we just need to register the driver */
- return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
- platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bbe8b7dbf3f3..33979c3ac943 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1411,7 +1411,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "xirc2ps_cs");
+ strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f45c85a84261..72a854f05bb8 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -529,7 +529,7 @@ static int ixp4xx_mdio_register(void)
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;
- strcpy(mdio_bus->id, "0");
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
if ((err = mdiobus_register(mdio_bus)))
mdiobus_free(mdio_bus);
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 000000000000..936968d23559
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,5 @@
+config HYPERV_NET
+ tristate "Microsoft Hyper-V virtual network driver"
+ depends on HYPERV
+ help
+ Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 000000000000..c8a66827100c
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
+
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ac1ec8405124..dec5836ae075 100644
--- a/drivers/staging/hv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -39,9 +39,6 @@ struct xferpage_packet {
u32 count;
};
-/* The number of pages which are enough to cover jumbo frame buffer. */
-#define NETVSC_PACKET_MAXPAGE 4
-
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
@@ -77,8 +74,9 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
+ void *data;
u32 page_buf_cnt;
- struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
+ struct hv_page_buffer page_buf[0];
};
struct netvsc_device_info {
@@ -87,6 +85,27 @@ struct netvsc_device_info {
int ring_size;
};
+enum rndis_device_state {
+ RNDIS_DEV_UNINITIALIZED = 0,
+ RNDIS_DEV_INITIALIZING,
+ RNDIS_DEV_INITIALIZED,
+ RNDIS_DEV_DATAINITIALIZED,
+};
+
+struct rndis_device {
+ struct netvsc_device *net_dev;
+
+ enum rndis_device_state state;
+ bool link_state;
+ atomic_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+
+ unsigned char hw_mac_adr[ETH_ALEN];
+};
+
+
/* Interface */
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
@@ -109,11 +128,13 @@ int rndis_filter_receive(struct hv_device *dev,
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
+int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
+
+
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
#define NVSP_PROTOCOL_VERSION_1 2
-#define NVSP_MIN_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
-#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
+#define NVSP_PROTOCOL_VERSION_2 0x30002
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -138,11 +159,36 @@ enum {
NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
- /*
- * This should be set to the number of messages for the version with
- * the maximum number of messages.
- */
- NVSP_NUM_MSG_PER_VERSION = 9,
+ /* Version 2 messages */
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF_COMP,
+ NVSP_MSG2_TYPE_REVOKE_CHIMNEY_DELEGATED_BUF,
+
+ NVSP_MSG2_TYPE_RESUME_CHIMNEY_RX_INDICATION,
+
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY,
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY_COMP,
+
+ NVSP_MSG2_TYPE_INDICATE_CHIMNEY_EVENT,
+
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT_COMP,
+
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ,
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ_COMP,
+
+ NVSP_MSG2_TYPE_ALLOC_RXBUF,
+ NVSP_MSG2_TYPE_ALLOC_RXBUF_COMP,
+
+ NVSP_MSG2_TYPE_FREE_RXBUF,
+
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT,
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT_COMP,
+
+ NVSP_MSG2_TYPE_SEND_NDIS_CONFIG,
+
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
};
enum {
@@ -153,6 +199,7 @@ enum {
NVSP_STAT_PROTOCOL_TOO_OLD,
NVSP_STAT_INVALID_RNDIS_PKT,
NVSP_STAT_BUSY,
+ NVSP_STAT_PROTOCOL_UNSUPPORTED,
NVSP_STAT_MAX,
};
@@ -337,9 +384,69 @@ union nvsp_1_message_uber {
send_rndis_pkt_complete;
} __packed;
+
+/*
+ * Network VSP protocol version 2 messages:
+ */
+struct nvsp_2_vsc_capability {
+ union {
+ u64 data;
+ struct {
+ u64 vmq:1;
+ u64 chimney:1;
+ u64 sriov:1;
+ u64 ieee8021q:1;
+ u64 correlation_id:1;
+ };
+ };
+} __packed;
+
+struct nvsp_2_send_ndis_config {
+ u32 mtu;
+ u32 reserved;
+ struct nvsp_2_vsc_capability capability;
+} __packed;
+
+/* Allocate receive buffer */
+struct nvsp_2_alloc_rxbuf {
+ /* Allocation ID to match the allocation request and response */
+ u32 alloc_id;
+
+ /* Length of the VM shared memory receive buffer that needs to
+ * be allocated
+ */
+ u32 len;
+} __packed;
+
+/* Allocate receive buffer complete */
+struct nvsp_2_alloc_rxbuf_comp {
+ /* The NDIS_STATUS code for buffer allocation */
+ u32 status;
+
+ u32 alloc_id;
+
+ /* GPADL handle for the allocated receive buffer */
+ u32 gpadl_handle;
+
+ /* Receive buffer ID */
+ u64 recv_buf_id;
+} __packed;
+
+struct nvsp_2_free_rxbuf {
+ u64 recv_buf_id;
+} __packed;
+
+union nvsp_2_message_uber {
+ struct nvsp_2_send_ndis_config send_ndis_config;
+ struct nvsp_2_alloc_rxbuf alloc_rxbuf;
+ struct nvsp_2_alloc_rxbuf_comp alloc_rxbuf_comp;
+ struct nvsp_2_free_rxbuf free_rxbuf;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
+ union nvsp_2_message_uber v2_msg;
} __packed;
/* ALL Messages */
@@ -349,12 +456,9 @@ struct nvsp_message {
} __packed;
+#define NETVSC_MTU 65536
-
-/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
-/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
-
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
@@ -369,7 +473,10 @@ struct nvsp_message {
struct netvsc_device {
struct hv_device *dev;
+ u32 nvsp_version;
+
atomic_t num_outstanding_sends;
+ bool start_remove;
bool destroy;
/*
* List of free preallocated hv_netvsc_packet to represent receive
diff --git a/drivers/staging/hv/netvsc.c b/drivers/net/hyperv/netvsc.c
index b902579c7fe6..8965b45ce5a5 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include "hyperv_net.h"
@@ -41,7 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
-
+ net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
net_device->ndev = ndev;
@@ -230,19 +231,16 @@ static int netvsc_init_recv_buf(struct hv_device *device)
net_device->recv_section_cnt = init_packet->msg.
v1_msg.send_recv_buf_complete.num_sections;
- net_device->recv_section = kmalloc(net_device->recv_section_cnt
- * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
+ net_device->recv_section = kmemdup(
+ init_packet->msg.v1_msg.send_recv_buf_complete.sections,
+ net_device->recv_section_cnt *
+ sizeof(struct nvsp_1_receive_buffer_section),
+ GFP_KERNEL);
if (net_device->recv_section == NULL) {
ret = -EINVAL;
goto cleanup;
}
- memcpy(net_device->recv_section,
- init_packet->msg.v1_msg.
- send_recv_buf_complete.sections,
- net_device->recv_section_cnt *
- sizeof(struct nvsp_1_receive_buffer_section));
-
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
@@ -263,27 +261,18 @@ exit:
}
-static int netvsc_connect_vsp(struct hv_device *device)
+/* Negotiate NVSP protocol version */
+static int negotiate_nvsp_ver(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct nvsp_message *init_packet,
+ u32 nvsp_ver)
{
int ret, t;
- struct netvsc_device *net_device;
- struct nvsp_message *init_packet;
- int ndis_version;
- struct net_device *ndev;
-
- net_device = get_outbound_net_device(device);
- if (!net_device)
- return -ENODEV;
- ndev = net_device->ndev;
-
- init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
- init_packet->msg.init_msg.init.min_protocol_ver =
- NVSP_MIN_PROTOCOL_VERSION;
- init_packet->msg.init_msg.init.max_protocol_ver =
- NVSP_MAX_PROTOCOL_VERSION;
+ init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
+ init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -293,26 +282,62 @@ static int netvsc_connect_vsp(struct hv_device *device)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
- goto cleanup;
+ return ret;
t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (t == 0)
+ return -ETIMEDOUT;
if (init_packet->msg.init_msg.init_complete.status !=
- NVSP_STAT_SUCCESS) {
- ret = -EINVAL;
- goto cleanup;
- }
+ NVSP_STAT_SUCCESS)
+ return -EINVAL;
- if (init_packet->msg.init_msg.init_complete.
- negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
+ if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ return 0;
+
+ /* NVSPv2 only: Send NDIS config */
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND, 0);
+
+ return ret;
+}
+
+static int netvsc_connect_vsp(struct hv_device *device)
+{
+ int ret;
+ struct netvsc_device *net_device;
+ struct nvsp_message *init_packet;
+ int ndis_version;
+ struct net_device *ndev;
+
+ net_device = get_outbound_net_device(device);
+ if (!net_device)
+ return -ENODEV;
+ ndev = net_device->ndev;
+
+ init_packet = &net_device->channel_init_pkt;
+
+ /* Negotiate the latest NVSP protocol supported */
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ NVSP_PROTOCOL_VERSION_2) == 0) {
+ net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
+ } else if (negotiate_nvsp_ver(device, net_device, init_packet,
+ NVSP_PROTOCOL_VERSION_1) == 0) {
+ net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
+ } else {
ret = -EPROTO;
goto cleanup;
}
+
+ pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
+
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -438,6 +463,9 @@ static void netvsc_send_completion(struct hv_device *device,
nvsc_packet->completion.send.send_completion_ctx);
atomic_dec(&net_device->num_outstanding_sends);
+
+ if (netif_queue_stopped(ndev) && !net_device->start_remove)
+ netif_wake_queue(ndev);
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -488,11 +516,16 @@ int netvsc_send(struct hv_device *device,
}
- if (ret != 0)
+ if (ret == 0) {
+ atomic_inc(&net_device->num_outstanding_sends);
+ } else if (ret == -EAGAIN) {
+ netif_stop_queue(ndev);
+ if (atomic_read(&net_device->num_outstanding_sends) < 1)
+ netif_wake_queue(ndev);
+ } else {
netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
- else
- atomic_inc(&net_device->num_outstanding_sends);
+ }
return ret;
}
@@ -598,12 +631,10 @@ static void netvsc_receive(struct hv_device *device,
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
- unsigned long start;
- unsigned long end, end_virtual;
/* struct netvsc_driver *netvscDriver; */
struct xferpage_packet *xferpage_packet = NULL;
- int i, j;
- int count = 0, bytes_remain = 0;
+ int i;
+ int count = 0;
unsigned long flags;
struct net_device *ndev;
@@ -712,53 +743,10 @@ static void netvsc_receive(struct hv_device *device,
netvsc_packet->completion.recv.recv_completion_tid =
vmxferpage_packet->d.trans_id;
+ netvsc_packet->data = (void *)((unsigned long)net_device->
+ recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
vmxferpage_packet->ranges[i].byte_count;
- netvsc_packet->page_buf_cnt = 1;
-
- netvsc_packet->page_buf[0].len =
- vmxferpage_packet->ranges[i].byte_count;
-
- start = virt_to_phys((void *)((unsigned long)net_device->
- recv_buf + vmxferpage_packet->ranges[i].byte_offset));
-
- netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
- end_virtual = (unsigned long)net_device->recv_buf
- + vmxferpage_packet->ranges[i].byte_offset
- + vmxferpage_packet->ranges[i].byte_count - 1;
- end = virt_to_phys((void *)end_virtual);
-
- /* Calculate the page relative offset */
- netvsc_packet->page_buf[0].offset =
- vmxferpage_packet->ranges[i].byte_offset &
- (PAGE_SIZE - 1);
- if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
- /* Handle frame across multiple pages: */
- netvsc_packet->page_buf[0].len =
- (netvsc_packet->page_buf[0].pfn <<
- PAGE_SHIFT)
- + PAGE_SIZE - start;
- bytes_remain = netvsc_packet->total_data_buflen -
- netvsc_packet->page_buf[0].len;
- for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
- netvsc_packet->page_buf[j].offset = 0;
- if (bytes_remain <= PAGE_SIZE) {
- netvsc_packet->page_buf[j].len =
- bytes_remain;
- bytes_remain = 0;
- } else {
- netvsc_packet->page_buf[j].len =
- PAGE_SIZE;
- bytes_remain -= PAGE_SIZE;
- }
- netvsc_packet->page_buf[j].pfn =
- virt_to_phys((void *)(end_virtual -
- bytes_remain)) >> PAGE_SHIFT;
- netvsc_packet->page_buf_cnt++;
- if (bytes_remain == 0)
- break;
- }
- }
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 93b0e91cbf98..462d05f05e84 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,24 +43,59 @@
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
- atomic_t avail;
struct delayed_work dwork;
};
-#define PACKET_PAGES_LOWATER 8
-/* Need this many pages to handle worst case fragmented packet */
-#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
-
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-/* no-op so the netdev core doesn't return -EINVAL when modifying the the
- * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
- * when it calls RndisFilterOnOpen() */
+struct set_multicast_work {
+ struct work_struct work;
+ struct net_device *net;
+};
+
+static void do_set_multicast(struct work_struct *w)
+{
+ struct set_multicast_work *swk =
+ container_of(w, struct set_multicast_work, work);
+ struct net_device *net = swk->net;
+
+ struct net_device_context *ndevctx = netdev_priv(net);
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
+
+ nvdev = hv_get_drvdata(ndevctx->device_ctx);
+ if (nvdev == NULL)
+ return;
+
+ rdev = nvdev->extension;
+ if (rdev == NULL)
+ return;
+
+ if (net->flags & IFF_PROMISC)
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_PROMISCUOUS);
+ else
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+
+ kfree(w);
+}
+
static void netvsc_set_multicast_list(struct net_device *net)
{
+ struct set_multicast_work *swk =
+ kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
+ if (swk == NULL)
+ return;
+
+ swk->net = net;
+ INIT_WORK(&swk->work, do_set_multicast);
+ schedule_work(&swk->work);
}
static int netvsc_open(struct net_device *net)
@@ -104,18 +139,8 @@ static void netvsc_xmit_completion(void *context)
kfree(packet);
- if (skb) {
- struct net_device *net = skb->dev;
- struct net_device_context *net_device_ctx = netdev_priv(net);
- unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
-
+ if (skb)
dev_kfree_skb_any(skb);
-
- atomic_add(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) >=
- PACKET_PAGES_HIWATER)
- netif_wake_queue(net);
- }
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -123,12 +148,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
- unsigned int i, num_pages;
+ unsigned int i, num_pages, npg_data;
- /* Add 1 for skb->data and additional one for RNDIS */
- num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
- if (num_pages > atomic_read(&net_device_ctx->avail))
- return NETDEV_TX_BUSY;
+ /* Add multipage for skb->data and additional one for RNDIS */
+ npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
+ >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
+ num_pages = skb_shinfo(skb)->nr_frags + npg_data + 1;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -151,21 +176,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->page_buf_cnt = num_pages;
/* Initialize it from the skb */
- packet->total_data_buflen = skb->len;
+ packet->total_data_buflen = skb->len;
/* Start filling in the page buffers starting after RNDIS buffer. */
packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
packet->page_buf[1].offset
= (unsigned long)skb->data & (PAGE_SIZE - 1);
- packet->page_buf[1].len = skb_headlen(skb);
+ if (npg_data == 1)
+ packet->page_buf[1].len = skb_headlen(skb);
+ else
+ packet->page_buf[1].len = PAGE_SIZE
+ - packet->page_buf[1].offset;
+
+ for (i = 2; i <= npg_data; i++) {
+ packet->page_buf[i].pfn = virt_to_phys(skb->data
+ + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
+ packet->page_buf[i].offset = 0;
+ packet->page_buf[i].len = PAGE_SIZE;
+ }
+ if (npg_data > 1)
+ packet->page_buf[npg_data].len = (((unsigned long)skb->data
+ + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
- packet->page_buf[i+2].offset = f->page_offset;
- packet->page_buf[i+2].len = skb_frag_size(f);
+ packet->page_buf[i+npg_data+1].pfn =
+ page_to_pfn(skb_frag_page(f));
+ packet->page_buf[i+npg_data+1].offset = f->page_offset;
+ packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
}
/* Set the completion routine */
@@ -178,10 +218,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
-
- atomic_sub(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
- netif_stop_queue(net);
} else {
/* we are shutting down or bus overloaded, just drop packet */
net->stats.tx_dropped++;
@@ -232,9 +268,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = dev_get_drvdata(&device_obj->device);
struct sk_buff *skb;
- void *data;
- int i;
- unsigned long flags;
struct netvsc_device *net_device;
net_device = hv_get_drvdata(device_obj);
@@ -253,27 +286,12 @@ int netvsc_recv_callback(struct hv_device *device_obj,
return 0;
}
- /* for kmap_atomic */
- local_irq_save(flags);
-
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- for (i = 0; i < packet->page_buf_cnt; i++) {
- data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
- KM_IRQ1);
- data = (void *)(unsigned long)data +
- packet->page_buf[i].offset;
-
- memcpy(skb_put(skb, packet->page_buf[i].len), data,
- packet->page_buf[i].len);
-
- kunmap_atomic((void *)((unsigned long)data -
- packet->page_buf[i].offset), KM_IRQ1);
- }
-
- local_irq_restore(flags);
+ memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
+ packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
@@ -299,6 +317,39 @@ static void netvsc_get_drvinfo(struct net_device *net,
strcpy(info->fw_version, "N/A");
}
+static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndevctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct netvsc_device_info device_info;
+ int limit = ETH_DATA_LEN;
+
+ if (nvdev == NULL || nvdev->destroy)
+ return -ENODEV;
+
+ if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ limit = NETVSC_MTU;
+
+ if (mtu < 68 || mtu > limit)
+ return -EINVAL;
+
+ nvdev->start_remove = true;
+ cancel_delayed_work_sync(&ndevctx->dwork);
+ netif_stop_queue(ndev);
+ rndis_filter_device_remove(hdev);
+
+ ndev->mtu = mtu;
+
+ ndevctx->device_ctx = hdev;
+ hv_set_drvdata(hdev, ndev);
+ device_info.ring_size = ring_size;
+ rndis_filter_device_add(hdev, &device_info);
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -309,7 +360,7 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -351,7 +402,6 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
- atomic_set(&net_device_ctx->avail, ring_size);
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
@@ -403,6 +453,8 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+ net_device->start_remove = true;
+
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index bafccb360041..da181f9a49d1 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -30,26 +30,6 @@
#include "hyperv_net.h"
-enum rndis_device_state {
- RNDIS_DEV_UNINITIALIZED = 0,
- RNDIS_DEV_INITIALIZING,
- RNDIS_DEV_INITIALIZED,
- RNDIS_DEV_DATAINITIALIZED,
-};
-
-struct rndis_device {
- struct netvsc_device *net_dev;
-
- enum rndis_device_state state;
- bool link_state;
- atomic_t new_req_id;
-
- spinlock_t request_lock;
- struct list_head req_list;
-
- unsigned char hw_mac_adr[ETH_ALEN];
-};
-
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -329,7 +309,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
- int i;
rndis_pkt = &msg->msg.pkt;
@@ -342,17 +321,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
pkt->total_data_buflen -= data_offset;
- pkt->page_buf[0].offset += data_offset;
- pkt->page_buf[0].len -= data_offset;
-
- /* Drop the 0th page, if rndis data go beyond page boundary */
- if (pkt->page_buf[0].offset >= PAGE_SIZE) {
- pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
- pkt->page_buf[1].len -= pkt->page_buf[1].offset;
- pkt->page_buf_cnt--;
- for (i = 0; i < pkt->page_buf_cnt; i++)
- pkt->page_buf[i] = pkt->page_buf[i+1];
- }
+ pkt->data = (void *)((unsigned long)pkt->data + data_offset);
pkt->is_data_pkt = true;
@@ -387,11 +356,7 @@ int rndis_filter_receive(struct hv_device *dev,
return -ENODEV;
}
- rndis_hdr = (struct rndis_message *)kmap_atomic(
- pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
-
- rndis_hdr = (void *)((unsigned long)rndis_hdr +
- pkt->page_buf[0].offset);
+ rndis_hdr = pkt->data;
/* Make sure we got a valid rndis message */
if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
@@ -407,8 +372,6 @@ int rndis_filter_receive(struct hv_device *dev,
sizeof(struct rndis_message) :
rndis_hdr->msg_len);
- kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
-
dump_rndis_message(dev, &rndis_msg);
switch (rndis_msg.ndis_msg_type) {
@@ -522,8 +485,7 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
return ret;
}
-static int rndis_filter_set_packet_filter(struct rndis_device *dev,
- u32 new_filter)
+int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 46b5f5fd686b..e05b645bbc32 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d423d18b4ad6..e535137eb2d0 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -313,8 +313,12 @@ config TOSHIBA_FIR
donauboe.
config AU1000_FIR
- tristate "Alchemy Au1000 SIR/FIR"
+ tristate "Alchemy IrDA SIR/FIR"
depends on IRDA && MIPS_ALCHEMY
+ help
+ Say Y/M here to build suppor the the IrDA peripheral on the
+ Alchemy Au1000 and Au1100 SoCs.
+ Say M to build a module; it will be called au1k_ir.ko
config SMC_IRCC_FIR
tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
deleted file mode 100644
index c072c09a8d91..000000000000
--- a/drivers/net/irda/au1000_ircc.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Au1000 IrDA driver.
- *
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef AU1000_IRCC_H
-#define AU1000_IRCC_H
-
-#include <linux/time.h>
-
-#include <linux/spinlock.h>
-#include <linux/pm.h>
-#include <asm/io.h>
-
-#define NUM_IR_IFF 1
-#define NUM_IR_DESC 64
-#define RING_SIZE_4 0x0
-#define RING_SIZE_16 0x3
-#define RING_SIZE_64 0xF
-#define MAX_NUM_IR_DESC 64
-#define MAX_BUF_SIZE 2048
-
-#define BPS_115200 0
-#define BPS_57600 1
-#define BPS_38400 2
-#define BPS_19200 5
-#define BPS_9600 11
-#define BPS_2400 47
-
-/* Ring descriptor flags */
-#define AU_OWN (1<<7) /* tx,rx */
-
-#define IR_DIS_CRC (1<<6) /* tx */
-#define IR_BAD_CRC (1<<5) /* tx */
-#define IR_NEED_PULSE (1<<4) /* tx */
-#define IR_FORCE_UNDER (1<<3) /* tx */
-#define IR_DISABLE_TX (1<<2) /* tx */
-#define IR_HW_UNDER (1<<0) /* tx */
-#define IR_TX_ERROR (IR_DIS_CRC|IR_BAD_CRC|IR_HW_UNDER)
-
-#define IR_PHY_ERROR (1<<6) /* rx */
-#define IR_CRC_ERROR (1<<5) /* rx */
-#define IR_MAX_LEN (1<<4) /* rx */
-#define IR_FIFO_OVER (1<<3) /* rx */
-#define IR_SIR_ERROR (1<<2) /* rx */
-#define IR_RX_ERROR (IR_PHY_ERROR|IR_CRC_ERROR| \
- IR_MAX_LEN|IR_FIFO_OVER|IR_SIR_ERROR)
-
-typedef struct db_dest {
- struct db_dest *pnext;
- volatile u32 *vaddr;
- dma_addr_t dma_addr;
-} db_dest_t;
-
-
-typedef struct ring_desc {
- u8 count_0; /* 7:0 */
- u8 count_1; /* 12:8 */
- u8 reserved;
- u8 flags;
- u8 addr_0; /* 7:0 */
- u8 addr_1; /* 15:8 */
- u8 addr_2; /* 23:16 */
- u8 addr_3; /* 31:24 */
-} ring_dest_t;
-
-
-/* Private data for each instance */
-struct au1k_private {
-
- db_dest_t *pDBfree;
- db_dest_t db[2*NUM_IR_DESC];
- volatile ring_dest_t *rx_ring[NUM_IR_DESC];
- volatile ring_dest_t *tx_ring[NUM_IR_DESC];
- db_dest_t *rx_db_inuse[NUM_IR_DESC];
- db_dest_t *tx_db_inuse[NUM_IR_DESC];
- u32 rx_head;
- u32 tx_head;
- u32 tx_tail;
- u32 tx_full;
-
- iobuff_t rx_buff;
-
- struct net_device *netdev;
-
- struct timeval stamp;
- struct timeval now;
- struct qos_info qos;
- struct irlap_cb *irlap;
-
- u8 open;
- u32 speed;
- u32 newspeed;
-
- u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
- struct timer_list timer;
-
- spinlock_t lock; /* For serializing operations */
-};
-#endif /* AU1000_IRCC_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index a3d696a9456a..fc503aa5288e 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -18,104 +18,220 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
-#include <linux/module.h>
-#include <linux/types.h>
+
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
-#include <linux/pm.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/au1000.h>
-#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100)
-#include <asm/pb1000.h>
-#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
-#include <asm/db1x00.h>
-#include <asm/mach-db1x00/bcsr.h>
-#else
-#error au1k_ir: unsupported board
-#endif
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/types.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
-#include "au1000_ircc.h"
+#include <asm/mach-au1x00/au1000.h>
+
+/* registers */
+#define IR_RING_PTR_STATUS 0x00
+#define IR_RING_BASE_ADDR_H 0x04
+#define IR_RING_BASE_ADDR_L 0x08
+#define IR_RING_SIZE 0x0C
+#define IR_RING_PROMPT 0x10
+#define IR_RING_ADDR_CMPR 0x14
+#define IR_INT_CLEAR 0x18
+#define IR_CONFIG_1 0x20
+#define IR_SIR_FLAGS 0x24
+#define IR_STATUS 0x28
+#define IR_READ_PHY_CONFIG 0x2C
+#define IR_WRITE_PHY_CONFIG 0x30
+#define IR_MAX_PKT_LEN 0x34
+#define IR_RX_BYTE_CNT 0x38
+#define IR_CONFIG_2 0x3C
+#define IR_ENABLE 0x40
+
+/* Config1 */
+#define IR_RX_INVERT_LED (1 << 0)
+#define IR_TX_INVERT_LED (1 << 1)
+#define IR_ST (1 << 2)
+#define IR_SF (1 << 3)
+#define IR_SIR (1 << 4)
+#define IR_MIR (1 << 5)
+#define IR_FIR (1 << 6)
+#define IR_16CRC (1 << 7)
+#define IR_TD (1 << 8)
+#define IR_RX_ALL (1 << 9)
+#define IR_DMA_ENABLE (1 << 10)
+#define IR_RX_ENABLE (1 << 11)
+#define IR_TX_ENABLE (1 << 12)
+#define IR_LOOPBACK (1 << 14)
+#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
+ IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
+ IR_16CRC)
+
+/* ir_status */
+#define IR_RX_STATUS (1 << 9)
+#define IR_TX_STATUS (1 << 10)
+#define IR_PHYEN (1 << 15)
+
+/* ir_write_phy_config */
+#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
+#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
+#define IR_P(x) ((x) & 0x1f) /* preamble bits */
+
+/* Config2 */
+#define IR_MODE_INV (1 << 0)
+#define IR_ONE_PIN (1 << 1)
+#define IR_PHYCLK_40MHZ (0 << 2)
+#define IR_PHYCLK_48MHZ (1 << 2)
+#define IR_PHYCLK_56MHZ (2 << 2)
+#define IR_PHYCLK_64MHZ (3 << 2)
+#define IR_DP (1 << 4)
+#define IR_DA (1 << 5)
+#define IR_FLT_HIGH (0 << 6)
+#define IR_FLT_MEDHI (1 << 6)
+#define IR_FLT_MEDLO (2 << 6)
+#define IR_FLT_LO (3 << 6)
+#define IR_IEN (1 << 8)
+
+/* ir_enable */
+#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
+#define IR_CE (1 << 2) /* clock enable */
+#define IR_C (1 << 1) /* coherency bit */
+#define IR_BE (1 << 0) /* set in big endian mode */
+
+#define NUM_IR_DESC 64
+#define RING_SIZE_4 0x0
+#define RING_SIZE_16 0x3
+#define RING_SIZE_64 0xF
+#define MAX_NUM_IR_DESC 64
+#define MAX_BUF_SIZE 2048
+
+/* Ring descriptor flags */
+#define AU_OWN (1 << 7) /* tx,rx */
+#define IR_DIS_CRC (1 << 6) /* tx */
+#define IR_BAD_CRC (1 << 5) /* tx */
+#define IR_NEED_PULSE (1 << 4) /* tx */
+#define IR_FORCE_UNDER (1 << 3) /* tx */
+#define IR_DISABLE_TX (1 << 2) /* tx */
+#define IR_HW_UNDER (1 << 0) /* tx */
+#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
+
+#define IR_PHY_ERROR (1 << 6) /* rx */
+#define IR_CRC_ERROR (1 << 5) /* rx */
+#define IR_MAX_LEN (1 << 4) /* rx */
+#define IR_FIFO_OVER (1 << 3) /* rx */
+#define IR_SIR_ERROR (1 << 2) /* rx */
+#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
+ IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
+
+struct db_dest {
+ struct db_dest *pnext;
+ volatile u32 *vaddr;
+ dma_addr_t dma_addr;
+};
-static int au1k_irda_net_init(struct net_device *);
-static int au1k_irda_start(struct net_device *);
-static int au1k_irda_stop(struct net_device *dev);
-static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
-static int au1k_irda_rx(struct net_device *);
-static void au1k_irda_interrupt(int, void *);
-static void au1k_tx_timeout(struct net_device *);
-static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
-static int au1k_irda_set_speed(struct net_device *dev, int speed);
+struct ring_dest {
+ u8 count_0; /* 7:0 */
+ u8 count_1; /* 12:8 */
+ u8 reserved;
+ u8 flags;
+ u8 addr_0; /* 7:0 */
+ u8 addr_1; /* 15:8 */
+ u8 addr_2; /* 23:16 */
+ u8 addr_3; /* 31:24 */
+};
-static void *dma_alloc(size_t, dma_addr_t *);
-static void dma_free(void *, size_t);
+/* Private data for each instance */
+struct au1k_private {
+ void __iomem *iobase;
+ int irq_rx, irq_tx;
+
+ struct db_dest *pDBfree;
+ struct db_dest db[2 * NUM_IR_DESC];
+ volatile struct ring_dest *rx_ring[NUM_IR_DESC];
+ volatile struct ring_dest *tx_ring[NUM_IR_DESC];
+ struct db_dest *rx_db_inuse[NUM_IR_DESC];
+ struct db_dest *tx_db_inuse[NUM_IR_DESC];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ iobuff_t rx_buff;
+
+ struct net_device *netdev;
+ struct timeval stamp;
+ struct timeval now;
+ struct qos_info qos;
+ struct irlap_cb *irlap;
+
+ u8 open;
+ u32 speed;
+ u32 newspeed;
+
+ struct timer_list timer;
+
+ struct resource *ioarea;
+ struct au1k_irda_platform_data *platdata;
+};
static int qos_mtt_bits = 0x07; /* 1 ms or more */
-static struct net_device *ir_devs[NUM_IR_IFF];
-static char version[] __devinitdata =
- "au1k_ircc:1.2 ppopov@mvista.com\n";
#define RUN_AT(x) (jiffies + (x))
-static DEFINE_SPINLOCK(ir_lock);
+static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
+{
+ if (p->platdata && p->platdata->set_phy_mode)
+ p->platdata->set_phy_mode(mode);
+}
-/*
- * IrDA peripheral bug. You have to read the register
- * twice to get the right value.
- */
-u32 read_ir_reg(u32 addr)
-{
- readl(addr);
- return readl(addr);
+static inline unsigned long irda_read(struct au1k_private *p,
+ unsigned long ofs)
+{
+ /*
+ * IrDA peripheral bug. You have to read the register
+ * twice to get the right value.
+ */
+ (void)__raw_readl(p->iobase + ofs);
+ return __raw_readl(p->iobase + ofs);
}
+static inline void irda_write(struct au1k_private *p, unsigned long ofs,
+ unsigned long val)
+{
+ __raw_writel(val, p->iobase + ofs);
+ wmb();
+}
/*
* Buffer allocation/deallocation routines. The buffer descriptor returned
- * has the virtual and dma address of a buffer suitable for
+ * has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *GetFreeDB(struct au1k_private *aup)
+static struct db_dest *GetFreeDB(struct au1k_private *aup)
{
- db_dest_t *pDB;
- pDB = aup->pDBfree;
-
- if (pDB) {
- aup->pDBfree = pDB->pnext;
- }
- return pDB;
-}
+ struct db_dest *db;
+ db = aup->pDBfree;
-static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB)
-{
- db_dest_t *pDBfree = aup->pDBfree;
- if (pDBfree)
- pDBfree->pnext = pDB;
- aup->pDBfree = pDB;
+ if (db)
+ aup->pDBfree = db->pnext;
+ return db;
}
-
/*
DMA memory allocation, derived from pci_alloc_consistent.
However, the Au1000 data cache is coherent (when programmed
so), therefore we return KSEG0 address, not KSEG1.
*/
-static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
+static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC | GFP_DMA;
- ret = (void *) __get_free_pages(gfp, get_order(size));
+ ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
@@ -125,7 +241,6 @@ static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
return ret;
}
-
static void dma_free(void *vaddr, size_t size)
{
vaddr = (void *)KSEG0ADDR(vaddr);
@@ -133,206 +248,306 @@ static void dma_free(void *vaddr, size_t size)
}
-static void
-setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
+static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
{
int i;
- for (i=0; i<NUM_IR_DESC; i++) {
- aup->rx_ring[i] = (volatile ring_dest_t *)
- (rx_base + sizeof(ring_dest_t)*i);
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->rx_ring[i] = (volatile struct ring_dest *)
+ (rx_base + sizeof(struct ring_dest) * i);
}
- for (i=0; i<NUM_IR_DESC; i++) {
- aup->tx_ring[i] = (volatile ring_dest_t *)
- (tx_base + sizeof(ring_dest_t)*i);
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->tx_ring[i] = (volatile struct ring_dest *)
+ (tx_base + sizeof(struct ring_dest) * i);
}
}
-static int au1k_irda_init(void)
-{
- static unsigned version_printed = 0;
- struct au1k_private *aup;
- struct net_device *dev;
- int err;
-
- if (version_printed++ == 0) printk(version);
-
- dev = alloc_irdadev(sizeof(struct au1k_private));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
- err = au1k_irda_net_init(dev);
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- ir_devs[0] = dev;
- printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
- return 0;
-
-out1:
- aup = netdev_priv(dev);
- dma_free((void *)aup->db[0].vaddr,
- MAX_BUF_SIZE * 2*NUM_IR_DESC);
- dma_free((void *)aup->rx_ring[0],
- 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
- kfree(aup->rx_buff.head);
-out:
- free_netdev(dev);
- return err;
-}
-
static int au1k_irda_init_iobuf(iobuff_t *io, int size)
{
io->head = kmalloc(size, GFP_KERNEL);
if (io->head != NULL) {
- io->truesize = size;
- io->in_frame = FALSE;
- io->state = OUTSIDE_FRAME;
- io->data = io->head;
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
}
return io->head ? 0 : -ENOMEM;
}
-static const struct net_device_ops au1k_irda_netdev_ops = {
- .ndo_open = au1k_irda_start,
- .ndo_stop = au1k_irda_stop,
- .ndo_start_xmit = au1k_irda_hard_xmit,
- .ndo_tx_timeout = au1k_tx_timeout,
- .ndo_do_ioctl = au1k_irda_ioctl,
-};
-
-static int au1k_irda_net_init(struct net_device *dev)
+/*
+ * Set the IrDA communications speed.
+ */
+static int au1k_irda_set_speed(struct net_device *dev, int speed)
{
struct au1k_private *aup = netdev_priv(dev);
- int i, retval = 0, err;
- db_dest_t *pDB, *pDBfree;
- dma_addr_t temp;
+ volatile struct ring_dest *ptxd;
+ unsigned long control;
+ int ret = 0, timeout = 10, i;
- err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
- if (err)
- goto out1;
+ if (speed == aup->speed)
+ return ret;
- dev->netdev_ops = &au1k_irda_netdev_ops;
+ /* disable PHY first */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
- irda_init_max_qos_capabilies(&aup->qos);
+ /* disable RX/TX */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
+ msleep(20);
+ while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
+ msleep(20);
+ if (!timeout--) {
+ printk(KERN_ERR "%s: rx/tx disable timeout\n",
+ dev->name);
+ break;
+ }
+ }
- /* The only value we must override it the baudrate */
- aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
- IR_115200|IR_576000 |(IR_4000000 << 8);
-
- aup->qos.min_turn_time.bits = qos_mtt_bits;
- irda_qos_bits_to_value(&aup->qos);
+ /* disable DMA */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
+ msleep(20);
- retval = -ENOMEM;
+ /* After we disable tx/rx. the index pointers go back to zero. */
+ aup->tx_head = aup->tx_tail = aup->rx_head = 0;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->tx_ring[i];
+ ptxd->flags = 0;
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ }
- /* Tx ring follows rx ring + 512 bytes */
- /* we need a 1k aligned buffer */
- aup->rx_ring[0] = (ring_dest_t *)
- dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);
- if (!aup->rx_ring[0])
- goto out2;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->rx_ring[i];
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ ptxd->flags = AU_OWN;
+ }
- /* allocate the data buffers */
- aup->db[0].vaddr =
- (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
- if (!aup->db[0].vaddr)
- goto out3;
+ if (speed == 4000000)
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
+ else
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
- setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+ switch (speed) {
+ case 9600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 19200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 38400:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 57600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 115200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 4000000:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
+ irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
+ IR_RX_ENABLE);
+ break;
+ default:
+ printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ ret = -EINVAL;
+ break;
+ }
- pDBfree = NULL;
- pDB = aup->db;
- for (i=0; i<(2*NUM_IR_DESC); i++) {
- pDB->pnext = pDBfree;
- pDBfree = pDB;
- pDB->vaddr =
- (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
- pDB++;
+ aup->speed = speed;
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
+
+ control = irda_read(aup, IR_STATUS);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ if (control & (1 << 14)) {
+ printk(KERN_ERR "%s: configuration error\n", dev->name);
+ } else {
+ if (control & (1 << 11))
+ printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ if (control & (1 << 12))
+ printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ if (control & (1 << 13))
+ printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ if (control & (1 << 10))
+ printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ if (control & (1 << 9))
+ printk(KERN_DEBUG "%s RX enabled\n", dev->name);
}
- aup->pDBfree = pDBfree;
- /* attach a data buffer to each descriptor */
- for (i=0; i<NUM_IR_DESC; i++) {
- pDB = GetFreeDB(aup);
- if (!pDB) goto out;
- aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
- aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
- aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
- aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
- aup->rx_db_inuse[i] = pDB;
+ return ret;
+}
+
+static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+
+ if (status & IR_RX_ERROR) {
+ ps->rx_errors++;
+ if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
+ ps->rx_missed_errors++;
+ if (status & IR_MAX_LEN)
+ ps->rx_length_errors++;
+ if (status & IR_CRC_ERROR)
+ ps->rx_crc_errors++;
+ } else
+ ps->rx_bytes += count;
+}
+
+static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->tx_packets++;
+ ps->tx_bytes += pkt_len;
+
+ if (status & IR_TX_ERROR) {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
}
- for (i=0; i<NUM_IR_DESC; i++) {
- pDB = GetFreeDB(aup);
- if (!pDB) goto out;
- aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
- aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
- aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
- aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
- aup->tx_ring[i]->count_0 = 0;
- aup->tx_ring[i]->count_1 = 0;
- aup->tx_ring[i]->flags = 0;
- aup->tx_db_inuse[i] = pDB;
+}
+
+static void au1k_tx_ack(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *ptxd;
+
+ ptxd = aup->tx_ring[aup->tx_tail];
+ while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
+ update_tx_stats(dev, ptxd->flags,
+ (ptxd->count_1 << 8) | ptxd->count_0);
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ wmb();
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
+ ptxd = aup->tx_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
}
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- /* power on */
- bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
- BCSR_RESETS_IRDA_MODE_FULL);
-#endif
+ if (aup->tx_tail == aup->tx_head) {
+ if (aup->newspeed) {
+ au1k_irda_set_speed(dev, aup->newspeed);
+ aup->newspeed = 0;
+ } else {
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
+ }
+ }
+}
- return 0;
+static int au1k_irda_rx(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *prxd;
+ struct sk_buff *skb;
+ struct db_dest *pDB;
+ u32 flags, count;
-out3:
- dma_free((void *)aup->rx_ring[0],
- 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
-out2:
- kfree(aup->rx_buff.head);
-out1:
- printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval);
- return retval;
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ while (!(flags & AU_OWN)) {
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ count = (prxd->count_1 << 8) | prxd->count_0;
+ if (!(flags & IR_RX_ERROR)) {
+ /* good frame */
+ update_rx_stats(dev, flags, count);
+ skb = alloc_skb(count + 1, GFP_ATOMIC);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ if (aup->speed == 4000000)
+ skb_put(skb, count);
+ else
+ skb_put(skb, count - 2);
+ skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
+ count - 2);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ prxd->count_0 = 0;
+ prxd->count_1 = 0;
+ }
+ prxd->flags |= AU_OWN;
+ aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ /* next descriptor */
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ }
+ return 0;
}
+static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
+
+ au1k_irda_rx(dev);
+ au1k_tx_ack(dev);
+
+ return IRQ_HANDLED;
+}
static int au1k_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
+ u32 enable, ring_address;
int i;
- u32 control;
- u32 ring_address;
- /* bring the device out of reset */
- control = 0xe; /* coherent, clock enable, one half system clock */
-
+ enable = IR_HC | IR_CE | IR_C;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
- control |= 1;
+ enable |= IR_BE;
#endif
aup->tx_head = 0;
aup->tx_tail = 0;
aup->rx_head = 0;
- for (i=0; i<NUM_IR_DESC; i++) {
+ for (i = 0; i < NUM_IR_DESC; i++)
aup->rx_ring[i]->flags = AU_OWN;
- }
- writel(control, IR_INTERFACE_CONFIG);
- au_sync_delay(10);
+ irda_write(aup, IR_ENABLE, enable);
+ msleep(20);
- writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */
- au_sync_delay(1);
+ /* disable PHY */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
+ msleep(20);
- writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN);
+ irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
- writel(ring_address >> 26, IR_RING_BASE_ADDR_H);
- writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L);
+ irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
+ irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
- writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE);
+ irda_write(aup, IR_RING_SIZE,
+ (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
- writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */
- writel(0, IR_RING_ADDR_CMPR);
+ irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
+ irda_write(aup, IR_RING_ADDR_CMPR, 0);
au1k_irda_set_speed(dev, 9600);
return 0;
@@ -340,25 +555,28 @@ static int au1k_init(struct net_device *dev)
static int au1k_irda_start(struct net_device *dev)
{
- int retval;
- char hwname[32];
struct au1k_private *aup = netdev_priv(dev);
+ char hwname[32];
+ int retval;
- if ((retval = au1k_init(dev))) {
+ retval = au1k_init(dev);
+ if (retval) {
printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
return retval;
}
- if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt,
- 0, dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
- if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt,
- 0, dev->name, dev))) {
- free_irq(AU1000_IRDA_TX_INT, dev);
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ free_irq(aup->irq_tx, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
@@ -368,9 +586,13 @@ static int au1k_irda_start(struct net_device *dev)
aup->irlap = irlap_open(dev, &aup->qos, hwname);
netif_start_queue(dev);
- writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */
+ /* int enable */
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
+
+ /* power up */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
- aup->timer.expires = RUN_AT((3*HZ));
+ aup->timer.expires = RUN_AT((3 * HZ));
aup->timer.data = (unsigned long)dev;
return 0;
}
@@ -379,11 +601,12 @@ static int au1k_irda_stop(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+
/* disable interrupts */
- writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2);
- writel(0, IR_CONFIG_1);
- writel(0, IR_INTERFACE_CONFIG); /* disable clock */
- au_sync();
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
+ irda_write(aup, IR_CONFIG_1, 0);
+ irda_write(aup, IR_ENABLE, 0); /* disable clock */
if (aup->irlap) {
irlap_close(aup->irlap);
@@ -394,83 +617,12 @@ static int au1k_irda_stop(struct net_device *dev)
del_timer(&aup->timer);
/* disable the interrupt */
- free_irq(AU1000_IRDA_TX_INT, dev);
- free_irq(AU1000_IRDA_RX_INT, dev);
- return 0;
-}
-
-static void __exit au1k_irda_exit(void)
-{
- struct net_device *dev = ir_devs[0];
- struct au1k_private *aup = netdev_priv(dev);
+ free_irq(aup->irq_tx, dev);
+ free_irq(aup->irq_rx, dev);
- unregister_netdev(dev);
-
- dma_free((void *)aup->db[0].vaddr,
- MAX_BUF_SIZE * 2*NUM_IR_DESC);
- dma_free((void *)aup->rx_ring[0],
- 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
- kfree(aup->rx_buff.head);
- free_netdev(dev);
-}
-
-
-static inline void
-update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
-{
- struct au1k_private *aup = netdev_priv(dev);
- struct net_device_stats *ps = &aup->stats;
-
- ps->tx_packets++;
- ps->tx_bytes += pkt_len;
-
- if (status & IR_TX_ERROR) {
- ps->tx_errors++;
- ps->tx_aborted_errors++;
- }
-}
-
-
-static void au1k_tx_ack(struct net_device *dev)
-{
- struct au1k_private *aup = netdev_priv(dev);
- volatile ring_dest_t *ptxd;
-
- ptxd = aup->tx_ring[aup->tx_tail];
- while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
- update_tx_stats(dev, ptxd->flags,
- ptxd->count_1<<8 | ptxd->count_0);
- ptxd->count_0 = 0;
- ptxd->count_1 = 0;
- au_sync();
-
- aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
- ptxd = aup->tx_ring[aup->tx_tail];
-
- if (aup->tx_full) {
- aup->tx_full = 0;
- netif_wake_queue(dev);
- }
- }
-
- if (aup->tx_tail == aup->tx_head) {
- if (aup->newspeed) {
- au1k_irda_set_speed(dev, aup->newspeed);
- aup->newspeed = 0;
- }
- else {
- writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
- IR_CONFIG_1);
- au_sync();
- writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
- IR_CONFIG_1);
- writel(0, IR_RING_PROMPT);
- au_sync();
- }
- }
+ return 0;
}
-
/*
* Au1000 transmit routine.
*/
@@ -478,15 +630,12 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
int speed = irda_get_next_speed(skb);
- volatile ring_dest_t *ptxd;
- u32 len;
-
- u32 flags;
- db_dest_t *pDB;
+ volatile struct ring_dest *ptxd;
+ struct db_dest *pDB;
+ u32 len, flags;
- if (speed != aup->speed && speed != -1) {
+ if (speed != aup->speed && speed != -1)
aup->newspeed = speed;
- }
if ((skb->len == 0) && (aup->newspeed)) {
if (aup->tx_tail == aup->tx_head) {
@@ -504,138 +653,47 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
- return NETDEV_TX_BUSY;
- }
- else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
+ return 1;
+ } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
- return NETDEV_TX_BUSY;
+ return 1;
}
pDB = aup->tx_db_inuse[aup->tx_head];
#if 0
- if (read_ir_reg(IR_RX_BYTE_CNT) != 0) {
- printk("tx warning: rx byte cnt %x\n",
- read_ir_reg(IR_RX_BYTE_CNT));
+ if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
+ printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
+ irda_read(aup, IR_RX_BYTE_CNT));
}
#endif
-
+
if (aup->speed == 4000000) {
/* FIR */
- skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
ptxd->count_0 = skb->len & 0xff;
ptxd->count_1 = (skb->len >> 8) & 0xff;
-
- }
- else {
+ } else {
/* SIR */
len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
ptxd->count_0 = len & 0xff;
ptxd->count_1 = (len >> 8) & 0xff;
ptxd->flags |= IR_DIS_CRC;
- au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
}
ptxd->flags |= AU_OWN;
- au_sync();
+ wmb();
- writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1);
- writel(0, IR_RING_PROMPT);
- au_sync();
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
return NETDEV_TX_OK;
}
-
-static inline void
-update_rx_stats(struct net_device *dev, u32 status, u32 count)
-{
- struct au1k_private *aup = netdev_priv(dev);
- struct net_device_stats *ps = &aup->stats;
-
- ps->rx_packets++;
-
- if (status & IR_RX_ERROR) {
- ps->rx_errors++;
- if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
- ps->rx_missed_errors++;
- if (status & IR_MAX_LEN)
- ps->rx_length_errors++;
- if (status & IR_CRC_ERROR)
- ps->rx_crc_errors++;
- }
- else
- ps->rx_bytes += count;
-}
-
-/*
- * Au1000 receive routine.
- */
-static int au1k_irda_rx(struct net_device *dev)
-{
- struct au1k_private *aup = netdev_priv(dev);
- struct sk_buff *skb;
- volatile ring_dest_t *prxd;
- u32 flags, count;
- db_dest_t *pDB;
-
- prxd = aup->rx_ring[aup->rx_head];
- flags = prxd->flags;
-
- while (!(flags & AU_OWN)) {
- pDB = aup->rx_db_inuse[aup->rx_head];
- count = prxd->count_1<<8 | prxd->count_0;
- if (!(flags & IR_RX_ERROR)) {
- /* good frame */
- update_rx_stats(dev, flags, count);
- skb=alloc_skb(count+1,GFP_ATOMIC);
- if (skb == NULL) {
- aup->netdev->stats.rx_dropped++;
- continue;
- }
- skb_reserve(skb, 1);
- if (aup->speed == 4000000)
- skb_put(skb, count);
- else
- skb_put(skb, count-2);
- skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb->protocol = htons(ETH_P_IRDA);
- netif_rx(skb);
- prxd->count_0 = 0;
- prxd->count_1 = 0;
- }
- prxd->flags |= AU_OWN;
- aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
- writel(0, IR_RING_PROMPT);
- au_sync();
-
- /* next descriptor */
- prxd = aup->rx_ring[aup->rx_head];
- flags = prxd->flags;
-
- }
- return 0;
-}
-
-
-static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
-{
- struct net_device *dev = dev_id;
-
- writel(0, IR_INT_CLEAR); /* ack irda interrupts */
-
- au1k_irda_rx(dev);
- au1k_tx_ack(dev);
-
- return IRQ_HANDLED;
-}
-
-
/*
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
@@ -653,142 +711,7 @@ static void au1k_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-
-/*
- * Set the IrDA communications speed.
- */
-static int
-au1k_irda_set_speed(struct net_device *dev, int speed)
-{
- unsigned long flags;
- struct au1k_private *aup = netdev_priv(dev);
- u32 control;
- int ret = 0, timeout = 10, i;
- volatile ring_dest_t *ptxd;
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- unsigned long irda_resets;
-#endif
-
- if (speed == aup->speed)
- return ret;
-
- spin_lock_irqsave(&ir_lock, flags);
-
- /* disable PHY first */
- writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
-
- /* disable RX/TX */
- writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
- IR_CONFIG_1);
- au_sync_delay(1);
- while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
- mdelay(1);
- if (!timeout--) {
- printk(KERN_ERR "%s: rx/tx disable timeout\n",
- dev->name);
- break;
- }
- }
-
- /* disable DMA */
- writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
- au_sync_delay(1);
-
- /*
- * After we disable tx/rx. the index pointers
- * go back to zero.
- */
- aup->tx_head = aup->tx_tail = aup->rx_head = 0;
- for (i=0; i<NUM_IR_DESC; i++) {
- ptxd = aup->tx_ring[i];
- ptxd->flags = 0;
- ptxd->count_0 = 0;
- ptxd->count_1 = 0;
- }
-
- for (i=0; i<NUM_IR_DESC; i++) {
- ptxd = aup->rx_ring[i];
- ptxd->count_0 = 0;
- ptxd->count_1 = 0;
- ptxd->flags = AU_OWN;
- }
-
- if (speed == 4000000) {
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
-#else /* Pb1000 and Pb1100 */
- writel(1<<13, CPLD_AUX1);
-#endif
- }
- else {
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
-#else /* Pb1000 and Pb1100 */
- writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
-#endif
- }
-
- switch (speed) {
- case 9600:
- writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 19200:
- writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 38400:
- writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 57600:
- writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 115200:
- writel(12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 4000000:
- writel(0xF, IR_WRITE_PHY_CONFIG);
- writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
- break;
- default:
- printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
- ret = -EINVAL;
- break;
- }
-
- aup->speed = speed;
- writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
- au_sync();
-
- control = read_ir_reg(IR_ENABLE);
- writel(0, IR_RING_PROMPT);
- au_sync();
-
- if (control & (1<<14)) {
- printk(KERN_ERR "%s: configuration error\n", dev->name);
- }
- else {
- if (control & (1<<11))
- printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
- if (control & (1<<12))
- printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
- if (control & (1<<13))
- printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
- if (control & (1<<10))
- printk(KERN_DEBUG "%s TX enabled\n", dev->name);
- if (control & (1<<9))
- printk(KERN_DEBUG "%s RX enabled\n", dev->name);
- }
-
- spin_unlock_irqrestore(&ir_lock, flags);
- return ret;
-}
-
-static int
-au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
{
struct if_irda_req *rq = (struct if_irda_req *)ifreq;
struct au1k_private *aup = netdev_priv(dev);
@@ -829,8 +752,218 @@ au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
return ret;
}
+static const struct net_device_ops au1k_irda_netdev_ops = {
+ .ndo_open = au1k_irda_start,
+ .ndo_stop = au1k_irda_stop,
+ .ndo_start_xmit = au1k_irda_hard_xmit,
+ .ndo_tx_timeout = au1k_tx_timeout,
+ .ndo_do_ioctl = au1k_irda_ioctl,
+};
+
+static int __devinit au1k_irda_net_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct db_dest *pDB, *pDBfree;
+ int i, err, retval = 0;
+ dma_addr_t temp;
+
+ err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
+ if (err)
+ goto out1;
+
+ dev->netdev_ops = &au1k_irda_netdev_ops;
+
+ irda_init_max_qos_capabilies(&aup->qos);
+
+ /* The only value we must override it the baudrate */
+ aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
+ IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
+
+ aup->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&aup->qos);
+
+ retval = -ENOMEM;
+
+ /* Tx ring follows rx ring + 512 bytes */
+ /* we need a 1k aligned buffer */
+ aup->rx_ring[0] = (struct ring_dest *)
+ dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
+ &temp);
+ if (!aup->rx_ring[0])
+ goto out2;
+
+ /* allocate the data buffers */
+ aup->db[0].vaddr =
+ (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ if (!aup->db[0].vaddr)
+ goto out3;
+
+ setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+
+ pDBfree = NULL;
+ pDB = aup->db;
+ for (i = 0; i < (2 * NUM_IR_DESC); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr =
+ (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ /* attach a data buffer to each descriptor */
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->tx_ring[i]->count_0 = 0;
+ aup->tx_ring[i]->count_1 = 0;
+ aup->tx_ring[i]->flags = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ return 0;
+
+out3:
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+out2:
+ kfree(aup->rx_buff.head);
+out1:
+ printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
+ return retval;
+}
+
+static int __devinit au1k_irda_probe(struct platform_device *pdev)
+{
+ struct au1k_private *aup;
+ struct net_device *dev;
+ struct resource *r;
+ int err;
+
+ dev = alloc_irdadev(sizeof(struct au1k_private));
+ if (!dev)
+ return -ENOMEM;
+
+ aup = netdev_priv(dev);
+
+ aup->platdata = pdev->dev.platform_data;
+
+ err = -EINVAL;
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r)
+ goto out;
+
+ aup->irq_tx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!r)
+ goto out;
+
+ aup->irq_rx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ goto out;
+
+ err = -EBUSY;
+ aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ pdev->name);
+ if (!aup->ioarea)
+ goto out;
+
+ aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
+ if (!aup->iobase)
+ goto out2;
+
+ dev->irq = aup->irq_rx;
+
+ err = au1k_irda_net_init(dev);
+ if (err)
+ goto out3;
+ err = register_netdev(dev);
+ if (err)
+ goto out4;
+
+ platform_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ return 0;
+
+out4:
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+out3:
+ iounmap(aup->iobase);
+out2:
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+out:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit au1k_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1k_private *aup = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+
+ iounmap(aup->iobase);
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1k_irda_driver = {
+ .driver = {
+ .name = "au1000-irda",
+ .owner = THIS_MODULE,
+ },
+ .probe = au1k_irda_probe,
+ .remove = __devexit_p(au1k_irda_remove),
+};
+
+static int __init au1k_irda_load(void)
+{
+ return platform_driver_register(&au1k_irda_driver);
+}
+
+static void __exit au1k_irda_unload(void)
+{
+ return platform_driver_unregister(&au1k_irda_driver);
+}
+
MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
-module_init(au1k_irda_init);
-module_exit(au1k_irda_exit);
+module_init(au1k_irda_load);
+module_exit(au1k_irda_unload);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 9d4ce1aba10c..a561ae44a9ac 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = {
},
};
-static int __init bfin_sir_init(void)
-{
- return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
- platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
module_param(max_rate, int, 0);
MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b45b2cc42804..64f403da101c 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME;
static int max_baud = 4000000;
#ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
#endif
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index b56636da6cc3..2a4f2f153244 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1664,7 +1664,7 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
switch_bank(iobase, BANK0);
outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
- /* Check for underrrun! */
+ /* Check for underrun! */
if (inb(iobase+ASCR) & ASCR_TXUR) {
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index d0851dfa0378..81d5275a15e2 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = {
.resume = pxa_irda_resume,
};
-static int __init pxa_irda_init(void)
-{
- return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
- platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index d275e276e742..725d6b367822 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = {
},
};
-static int __init sh_irda_init(void)
-{
- return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
- platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index ed7d7d62bf68..e6661b5c1f83 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = {
},
};
-static int __init sh_sir_init(void)
-{
- return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
- platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8b1c3484d271..6c95d4087b2d 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
MODULE_LICENSE("GPL");
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
module_param_named(nopnp, smsc_nopnp, bool, 0);
MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 212868eb6f5f..e6e59a078ef4 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
- refrigerator();
+ try_to_freeze();
if (change_speed(stir, stir->speed))
break;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6d6479049aa1..2d456dd164fb 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -942,14 +942,14 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
iobase = self->io.fir_base;
/* Disable DMA */
// DisableDmaChannel(self->io.dma);
- /* Check for underrrun! */
+ /* Check for underrun! */
/* Clear bit, by writing 1 into it */
Tx_status = GetTXStatus(iobase);
if (Tx_status & 0x08) {
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
hwreset(self);
-// how to clear underrrun ?
+ /* how to clear underrun? */
} else {
self->netdev->stats.tx_packets++;
ResetChip(iobase, 3);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index c4366601b067..7d43506c7032 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -677,7 +677,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
switch_bank(iobase, SET0);
outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
- /* Check for underrrun! */
+ /* Check for underrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4ce9e5f2c069..b71998d0b5b4 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_ALL_TSO
| NETIF_F_UFO
- | NETIF_F_NO_CSUM
+ | NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 74134970b709..f2f820c4b40a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -26,6 +26,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <net/rtnetlink.h>
@@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
return stats;
}
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_add_vid)
- ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+ return vlan_vid_add(lowerdev, vid);
}
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_kill_vid)
- ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+ vlan_vid_del(lowerdev, vid);
+ return 0;
}
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1b7082d08f33..58dc117a8d78 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
if (vlan) {
int index = get_slot(vlan, q);
- rcu_assign_pointer(vlan->taps[index], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[index], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
sock_put(&q->sk);
--vlan->numvtaps;
}
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
}
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
- if (rxq) {
- tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
- if (tap)
- goto out;
- }
-
/* Everything failed - find first available queue */
for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
tap = rcu_dereference(vlan->taps[rxq]);
@@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev)
lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
- rcu_assign_pointer(vlan->taps[i], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[i], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
vlan->numvtaps--;
}
}
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c62e7816d548..c70c2332d15e 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -35,26 +35,11 @@
static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
{
- u32 result = 0;
int advert;
advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
- if (advert & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
- if (advert & ADVERTISE_10HALF)
- result |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- result |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- result |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- result |= ADVERTISED_100baseT_Full;
- if (advert & ADVERTISE_PAUSE_CAP)
- result |= ADVERTISED_Pause;
- if (advert & ADVERTISE_PAUSE_ASYM)
- result |= ADVERTISED_Asym_Pause;
-
- return result;
+
+ return mii_lpa_to_ethtool_lpa_t(advert);
}
/**
@@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (ctrl1000 & ADVERTISE_1000HALF)
- ecmd->advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (mii->supports_gmii)
+ ecmd->advertising |=
+ mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmsr & BMSR_ANEGCOMPLETE) {
ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
- if (stat1000 & LPA_1000HALF)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Half;
- if (stat1000 & LPA_1000FULL)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Full;
+ ecmd->lp_advertising |=
+ mii_stat1000_to_ethtool_lpa_t(stat1000);
} else {
ecmd->lp_advertising = 0;
}
@@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
}
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
- tmp |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
- tmp |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
- tmp |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- tmp |= ADVERTISE_100FULL;
- if (mii->supports_gmii) {
- if (ecmd->advertising & ADVERTISED_1000baseT_Half)
- tmp2 |= ADVERTISE_1000HALF;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
- tmp2 |= ADVERTISE_1000FULL;
- }
+ tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+ if (mii->supports_gmii)
+ tmp2 |=
+ ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
if (advert != tmp) {
mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
mii->advertising = tmp;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c7..fbdcdf83cbfd 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig PHYLIB
- bool "PHY Device support and infrastructure"
+ tristate "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
@@ -131,3 +131,7 @@ config MDIO_OCTEON
If in doubt, say Y.
endif # PHYLIB
+
+config MICREL_KS8995MA
+ tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+ depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2333215bbb32..e15c83fecbe0 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 9663e0ba6003..ba3c59147aa7 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1159,7 +1159,7 @@ static void rx_timestamp_work(struct work_struct *work)
}
}
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
- netif_rx(skb);
+ netif_rx_ni(skb);
}
/* Clear out expired time stamps. */
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 1fa4d73c3cca..633680d0828e 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -220,7 +220,7 @@ static int __init fixed_mdio_bus_init(void)
goto err_mdiobus_reg;
}
- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "0");
+ snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
fmb->mii_bus->name = "Fixed MDIO Bus";
fmb->mii_bus->priv = fmb;
fmb->mii_bus->parent = &pdev->dev;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 65391891d8c4..daec9b05d168 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
+static int mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ if (ctrl->reset)
+ ctrl->reset(bus);
+ return 0;
+}
+
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
struct mii_bus *bus;
@@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
bus->read = mdiobb_read;
bus->write = mdiobb_write;
+ bus->reset = mdiobb_reset;
bus->priv = ctrl;
return bus;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2843c90f712f..50e8e5e74465 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
goto out;
bitbang->ctrl.ops = &mdio_gpio_ops;
+ bitbang->ctrl.reset = pdata->reset;
bitbang->mdc = pdata->mdc;
bitbang->mdio = pdata->mdio;
@@ -115,7 +116,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
if (!new_bus->irq[i])
new_bus->irq[i] = PHY_POLL;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
if (gpio_request(bitbang->mdc, "mdc"))
goto out_free_bus;
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index bd12ba941be5..826d961f39f7 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -118,7 +118,8 @@ static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
bus->mii_bus->priv = bus;
bus->mii_bus->irq = bus->phy_irq;
bus->mii_bus->name = "mdio-octeon";
- snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
+ snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bus->mii_bus->name, bus->unit);
bus->mii_bus->parent = &pdev->dev;
bus->mii_bus->read = octeon_mdiobus_read;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6c58da2b882c..88cc5db9affd 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -37,22 +37,36 @@
#include <asm/uaccess.h>
/**
- * mdiobus_alloc - allocate a mii_bus structure
+ * mdiobus_alloc_size - allocate a mii_bus structure
*
* Description: called by a bus driver to allocate an mii_bus
* structure to fill in.
+ *
+ * 'size' is an an extra amount of memory to allocate for private storage.
+ * If non-zero, then bus->priv is points to that memory.
*/
-struct mii_bus *mdiobus_alloc(void)
+struct mii_bus *mdiobus_alloc_size(size_t size)
{
struct mii_bus *bus;
+ size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
+ size_t alloc_size;
+
+ /* If we alloc extra space, it should be aligned */
+ if (size)
+ alloc_size = aligned_size + size;
+ else
+ alloc_size = sizeof(*bus);
- bus = kzalloc(sizeof(*bus), GFP_KERNEL);
- if (bus != NULL)
+ bus = kzalloc(alloc_size, GFP_KERNEL);
+ if (bus) {
bus->state = MDIOBUS_ALLOCATED;
+ if (size)
+ bus->priv = (void *)bus + aligned_size;
+ }
return bus;
}
-EXPORT_SYMBOL(mdiobus_alloc);
+EXPORT_SYMBOL(mdiobus_alloc_size);
/**
* mdiobus_release - mii_bus device release callback
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5afec67..f320f466f03b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev)
if (adv < 0)
return adv;
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- if (advertise & ADVERTISED_Pause)
- adv |= ADVERTISE_PAUSE_CAP;
- if (advertise & ADVERTISED_Asym_Pause)
- adv |= ADVERTISE_PAUSE_ASYM;
+ adv |= ethtool_adv_to_mii_adv_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev)
return adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (advertise & SUPPORTED_1000baseT_Half)
- adv |= ADVERTISE_1000HALF;
- if (advertise & SUPPORTED_1000baseT_Full)
- adv |= ADVERTISE_1000FULL;
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_CTRL1000, adv);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 342505c976d6..fc3e7e96c88c 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -22,26 +22,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-
-#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
-#define MII_LAN83C185_IM 30 /* Interrupt Mask */
-#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
-
-#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
-#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
-#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
-#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
-#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
-#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
-#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
-
-#define MII_LAN83C185_ISF_INT_ALL (0x0e)
-
-#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
- MII_LAN83C185_ISF_INT7)
-
-#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#include <linux/smscphy.h>
static int smsc_phy_config_intr(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644
index 000000000000..116a2dd7c879
--- /dev/null
+++ b/drivers/net/phy/spi_ks8995.c
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
+#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE 0x80
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define CHIPID_M 0
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_RESET_DELAY 10 /* usec */
+
+struct ks8995_pdata {
+ /* not yet implemented */
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct ks8995_pdata *pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_READ;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_WRITE;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off > KS8995_REGS_SIZE))
+ return 0;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off >= KS8995_REGS_SIZE))
+ return -EFBIG;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+ .attr = {
+ .name = "registers",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = KS8995_REGS_SIZE,
+ .read = ks8995_registers_read,
+ .write = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ struct ks8995_pdata *pdata;
+ u8 ids[2];
+ int err;
+
+ /* Chip description */
+ pdata = spi->dev.platform_data;
+
+ ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks) {
+ dev_err(&spi->dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ks->lock);
+ ks->pdata = pdata;
+ ks->spi = spi_dev_get(spi);
+ dev_set_drvdata(&spi->dev, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ goto err_drvdata;
+ }
+
+ err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+ if (err < 0) {
+ dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ switch (ids[0]) {
+ case FAMILY_KS8995:
+ break;
+ default:
+ dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+ err = -ENODEV;
+ goto err_drvdata;
+ }
+
+ err = ks8995_reset(ks);
+ if (err)
+ goto err_drvdata;
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ if (err) {
+ dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+ "Revision:%01x\n", ids[0],
+ get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+ return 0;
+
+err_drvdata:
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks);
+ return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_data *ks8995;
+
+ ks8995 = dev_get_drvdata(&spi->dev);
+ sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks8995);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8995_probe,
+ .remove = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+ printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+ return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+ spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f725..df884dde2a51 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock)
{
spin_lock(&chan_lock);
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+ RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
synchronize_rcu();
}
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
@@ -587,8 +585,8 @@ static int pptp_create(struct net *net, struct socket *sock)
po = pppox_sk(sk);
opt = &po->proto.pptp;
- opt->seq_sent = 0; opt->seq_recv = 0;
- opt->ack_recv = 0; opt->ack_sent = 0;
+ opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
+ opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
error = 0;
out:
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644
index 000000000000..248a144033ca
--- /dev/null
+++ b/drivers/net/team/Kconfig
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+ tristate "Ethernet team driver support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This allows one to create virtual interfaces that teams together
+ multiple ethernet devices.
+
+ Team devices can be added using the "ip" command from the
+ iproute2 package:
+
+ "ip link add link [ address MAC ] [ NAME ] type team"
+
+ To compile this driver as a module, choose M here: the module
+ will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+ tristate "Round-robin mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where port used for transmitting packets is selected in
+ round-robin fashion using packet counter.
+
+ All added ports are setup to have bond's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+ tristate "Active-backup mode support"
+ depends on NET_TEAM
+ ---help---
+ Only one port is active at a time and the rest of ports are used
+ for backup.
+
+ Mac addresses of ports are not modified. Userspace is responsible
+ to do so.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644
index 000000000000..85f2028a87af
--- /dev/null
+++ b/drivers/net/team/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644
index 000000000000..ed2a862b835d
--- /dev/null
+++ b/drivers/net/team/team.c
@@ -0,0 +1,1684 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+ struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+ const unsigned char *dev_addr)
+{
+ struct sockaddr addr;
+
+ memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+ addr.sa_family = ARPHRD_ETHER;
+ return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+ struct team_option *option;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ if (strcmp(option->name, opt_name) == 0)
+ return option;
+ }
+ return NULL;
+}
+
+int team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+ struct team_option **dst_opts;
+ int err;
+
+ dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+ GFP_KERNEL);
+ if (!dst_opts)
+ return -ENOMEM;
+ for (i = 0; i < option_count; i++, option++) {
+ if (__team_find_option(team, option->name)) {
+ err = -EEXIST;
+ goto rollback;
+ }
+ dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+ if (!dst_opts[i]) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+ }
+
+ for (i = 0; i < option_count; i++)
+ list_add_tail(&dst_opts[i]->list, &team->option_list);
+
+ kfree(dst_opts);
+ return 0;
+
+rollback:
+ for (i = 0; i < option_count; i++)
+ kfree(dst_opts[i]);
+
+ kfree(dst_opts);
+ return err;
+}
+
+EXPORT_SYMBOL(team_options_register);
+
+static void __team_options_change_check(struct team *team,
+ struct team_option *changed_option);
+
+static void __team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+
+ for (i = 0; i < option_count; i++, option++) {
+ struct team_option *del_opt;
+
+ del_opt = __team_find_option(team, option->name);
+ if (del_opt) {
+ list_del(&del_opt->list);
+ kfree(del_opt);
+ }
+ }
+}
+
+void team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ __team_options_unregister(team, option, option_count);
+ __team_options_change_check(team, NULL);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+ void *arg)
+{
+ return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+ void *arg)
+{
+ int err;
+
+ err = option->setter(team, arg);
+ if (err)
+ return err;
+
+ __team_options_change_check(team, option);
+ return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+ struct team_mode *mode;
+
+ list_for_each_entry(mode, &mode_list, list) {
+ if (strcmp(mode->kind, kind) == 0)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+ while (*name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ name++;
+ }
+ return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+ int err = 0;
+
+ if (!is_good_mode_name(mode->kind) ||
+ mode->priv_size > TEAM_MODE_PRIV_SIZE)
+ return -EINVAL;
+ spin_lock(&mode_list_lock);
+ if (__find_mode(mode->kind)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+ list_add_tail(&mode->list, &mode_list);
+unlock:
+ spin_unlock(&mode_list_lock);
+ return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+ spin_lock(&mode_list_lock);
+ list_del_init(&mode->list);
+ spin_unlock(&mode_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+ struct team_mode *mode;
+
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ if (!mode) {
+ spin_unlock(&mode_list_lock);
+ request_module("team-mode-%s", kind);
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ }
+ if (mode)
+ if (!try_module_get(mode->owner))
+ mode = NULL;
+
+ spin_unlock(&mode_list_lock);
+ return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+ module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
+{
+ return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+ /*
+ * To avoid checks in rx/tx skb paths, ensure here that non-null and
+ * correct ops are always set.
+ */
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->transmit)
+ team->ops.transmit = team_dummy_transmit;
+ else
+ team->ops.transmit = team->mode->ops->transmit;
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->receive)
+ team->ops.receive = team_dummy_receive;
+ else
+ team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+ const struct team_mode *new_mode)
+{
+ /* Check if mode was previously set and do cleanup if so */
+ if (team->mode) {
+ void (*exit_op)(struct team *team) = team->ops.exit;
+
+ /* Clear ops area so no callback is called any longer */
+ memset(&team->ops, 0, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ if (exit_op)
+ exit_op(team);
+ team_mode_put(team->mode);
+ team->mode = NULL;
+ /* zero private data area */
+ memset(&team->mode_priv, 0,
+ sizeof(struct team) - offsetof(struct team, mode_priv));
+ }
+
+ if (!new_mode)
+ return 0;
+
+ if (new_mode->ops->init) {
+ int err;
+
+ err = new_mode->ops->init(team);
+ if (err)
+ return err;
+ }
+
+ team->mode = new_mode;
+ memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+ struct team_mode *new_mode;
+ struct net_device *dev = team->dev;
+ int err;
+
+ if (!list_empty(&team->port_list)) {
+ netdev_err(dev, "No ports can be present during mode change\n");
+ return -EBUSY;
+ }
+
+ if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ netdev_err(dev, "Unable to change to the same mode the team is in\n");
+ return -EINVAL;
+ }
+
+ new_mode = team_mode_get(kind);
+ if (!new_mode) {
+ netdev_err(dev, "Mode \"%s\" not found\n", kind);
+ return -EINVAL;
+ }
+
+ err = __team_change_mode(team, new_mode);
+ if (err) {
+ netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+ team_mode_put(new_mode);
+ return err;
+ }
+
+ netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+ return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct team_port *port;
+ struct team *team;
+ rx_handler_result_t res;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
+
+ port = team_port_get_rcu(skb->dev);
+ team = port->team;
+
+ res = team->ops.receive(team, port, skb);
+ if (res == RX_HANDLER_ANOTHER) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ pcpu_stats->rx_multicast++;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->dev = team->dev;
+ } else {
+ this_cpu_inc(team->pcpu_stats->rx_dropped);
+ }
+
+ return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+ const struct team_port *port)
+{
+ struct team_port *cur;
+
+ list_for_each_entry(cur, &team->port_list, list)
+ if (cur == port)
+ return true;
+ return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+ struct team_port *port)
+{
+ port->index = team->port_count++;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+ int i;
+ struct team_port *port;
+
+ for (i = rm_index + 1; i < team->port_count; i++) {
+ port = team_get_port_by_index(team, i);
+ hlist_del_rcu(&port->hlist);
+ port->index--;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ }
+}
+
+static void team_port_list_del_port(struct team *team,
+ struct team_port *port)
+{
+ int rm_index = port->index;
+
+ hlist_del_rcu(&port->hlist);
+ list_del_rcu(&port->list);
+ __reconstruct_port_hlist(team, rm_index);
+ team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+ struct team_port *port;
+ u32 vlan_features = TEAM_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ vlan_features = netdev_increment_features(vlan_features,
+ port->dev->vlan_features,
+ TEAM_VLAN_FEATURES);
+
+ if (port->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = port->dev->hard_header_len;
+ }
+
+ team->dev->vlan_features = vlan_features;
+ team->dev->hard_header_len = max_hard_header_len;
+
+ netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+ mutex_lock(&team->lock);
+ __team_compute_features(team);
+ mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+ int err = 0;
+
+ dev_hold(team->dev);
+ port->dev->priv_flags |= IFF_TEAM_PORT;
+ if (team->ops.port_enter) {
+ err = team->ops.port_enter(team, port);
+ if (err) {
+ netdev_err(team->dev, "Device %s failed to enter team mode\n",
+ port->dev->name);
+ goto err_port_enter;
+ }
+ }
+
+ return 0;
+
+err_port_enter:
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+
+ return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+ if (team->ops.port_leave)
+ team->ops.port_leave(team, port);
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+ int err;
+
+ if (port_dev->flags & IFF_LOOPBACK ||
+ port_dev->type != ARPHRD_ETHER) {
+ netdev_err(dev, "Device %s is of an unsupported type\n",
+ portname);
+ return -EINVAL;
+ }
+
+ if (team_port_exists(port_dev)) {
+ netdev_err(dev, "Device %s is already a port "
+ "of a team device\n", portname);
+ return -EBUSY;
+ }
+
+ if (port_dev->flags & IFF_UP) {
+ netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+ portname);
+ return -EBUSY;
+ }
+
+ port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = port_dev;
+ port->team = team;
+
+ port->orig.mtu = port_dev->mtu;
+ err = dev_set_mtu(port_dev, dev->mtu);
+ if (err) {
+ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+ goto err_set_mtu;
+ }
+
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+ err = team_port_enter(team, port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to enter team mode\n",
+ portname);
+ goto err_port_enter;
+ }
+
+ err = dev_open(port_dev);
+ if (err) {
+ netdev_dbg(dev, "Device %s opening failed\n",
+ portname);
+ goto err_dev_open;
+ }
+
+ err = vlan_vids_add_by_dev(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Failed to add vlan ids to device %s\n",
+ portname);
+ goto err_vids_add;
+ }
+
+ err = netdev_set_master(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Device %s failed to set master\n", portname);
+ goto err_set_master;
+ }
+
+ err = netdev_rx_handler_register(port_dev, team_handle_frame,
+ port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to register rx_handler\n",
+ portname);
+ goto err_handler_register;
+ }
+
+ team_port_list_add_port(team, port);
+ team_adjust_ops(team);
+ __team_compute_features(team);
+ __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+ netdev_info(dev, "Port device %s added\n", portname);
+
+ return 0;
+
+err_handler_register:
+ netdev_set_master(port_dev, NULL);
+
+err_set_master:
+ vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+ dev_close(port_dev);
+
+err_dev_open:
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+
+err_port_enter:
+ dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+ kfree(port);
+
+ return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+
+ port = team_port_get_rtnl(port_dev);
+ if (!port || !team_port_find(team, port)) {
+ netdev_err(dev, "Device %s does not act as a port of this team\n",
+ portname);
+ return -ENOENT;
+ }
+
+ __team_port_change_check(port, false);
+ team_port_list_del_port(team, port);
+ team_adjust_ops(team);
+ netdev_rx_handler_unregister(port_dev);
+ netdev_set_master(port_dev, NULL);
+ vlan_vids_del_by_dev(port_dev, dev);
+ dev_close(port_dev);
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+ dev_set_mtu(port_dev, port->orig.mtu);
+ synchronize_rcu();
+ kfree(port);
+ netdev_info(dev, "Port device %s removed\n", portname);
+ __team_compute_features(team);
+
+ return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ *str = team->mode ? team->mode->kind : team_no_mode_kind;
+ return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+ {
+ .name = "mode",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = team_mode_option_get,
+ .setter = team_mode_option_set,
+ },
+};
+
+static int team_init(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ int i;
+ int err;
+
+ team->dev = dev;
+ mutex_init(&team->lock);
+
+ team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ if (!team->pcpu_stats)
+ return -ENOMEM;
+
+ for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&team->port_hlist[i]);
+ INIT_LIST_HEAD(&team->port_list);
+
+ team_adjust_ops(team);
+
+ INIT_LIST_HEAD(&team->option_list);
+ err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+ if (err)
+ goto err_options_register;
+ netif_carrier_off(dev);
+
+ return 0;
+
+err_options_register:
+ free_percpu(team->pcpu_stats);
+
+ return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct team_port *tmp;
+
+ mutex_lock(&team->lock);
+ list_for_each_entry_safe(port, tmp, &team->port_list, list)
+ team_port_del(team, port->dev);
+
+ __team_change_mode(team, NULL); /* cleanup */
+ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+
+ free_percpu(team->pcpu_stats);
+ free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ bool tx_success = false;
+ unsigned int len = skb->len;
+
+ tx_success = team->ops.transmit(team, skb);
+ if (tx_success) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(team->pcpu_stats->tx_dropped);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int inc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (change & IFF_PROMISC) {
+ inc = dev->flags & IFF_PROMISC ? 1 : -1;
+ dev_set_promiscuity(port->dev, inc);
+ }
+ if (change & IFF_ALLMULTI) {
+ inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+ dev_set_allmulti(port->dev, inc);
+ }
+ }
+ rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ dev_uc_sync(port->dev, dev);
+ dev_mc_sync(port->dev, dev);
+ }
+ rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ if (team->ops.port_change_mac)
+ team->ops.port_change_mac(team, port);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = dev_set_mtu(port->dev, new_mtu);
+ if (err) {
+ netdev_err(dev, "Device %s failed to change mtu",
+ port->dev->name);
+ goto unwind;
+ }
+ }
+ mutex_unlock(&team->lock);
+
+ dev->mtu = new_mtu;
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ dev_set_mtu(port->dev, dev->mtu);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(team->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /*
+ * rx_dropped & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_dropped += p->rx_dropped;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = vlan_vid_add(port->dev, vid);
+ if (err)
+ goto unwind;
+ }
+ mutex_unlock(&team->lock);
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_add(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_del(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct team_port *port;
+ struct team *team = netdev_priv(dev);
+ netdev_features_t mask;
+
+ mask = features;
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+ }
+ rcu_read_unlock();
+ return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+ .ndo_init = team_init,
+ .ndo_uninit = team_uninit,
+ .ndo_open = team_open,
+ .ndo_stop = team_close,
+ .ndo_start_xmit = team_xmit,
+ .ndo_change_rx_flags = team_change_rx_flags,
+ .ndo_set_rx_mode = team_set_rx_mode,
+ .ndo_set_mac_address = team_set_mac_address,
+ .ndo_change_mtu = team_change_mtu,
+ .ndo_get_stats64 = team_get_stats64,
+ .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
+ .ndo_add_slave = team_add_slave,
+ .ndo_del_slave = team_del_slave,
+ .ndo_fix_features = team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &team_netdev_ops;
+ dev->destructor = team_destructor;
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_MULTICAST;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+ /*
+ * Indicate we support unicast address filtering. That way core won't
+ * bring us to promisc mode in case a unicast addr is added.
+ * Let this up to underlay drivers.
+ */
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features = NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ if (tb[IFLA_ADDRESS] == NULL)
+ random_ether_addr(dev->dev_addr);
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
+ .newlink = team_newlink,
+ .validate = team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = TEAM_GENL_NAME,
+ .version = TEAM_GENL_VERSION,
+ .maxattr = TEAM_ATTR_MAX,
+ .netnsok = true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+ [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
+ [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
+ [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+ [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_OPTION_NAME] = {
+ .type = NLA_STRING,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
+ [TEAM_ATTR_OPTION_DATA] = {
+ .type = NLA_BINARY,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &team_nl_family, 0, TEAM_CMD_NOOP);
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto err_msg_put;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+ nlmsg_free(msg);
+
+ return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ int ifindex;
+ struct net_device *dev;
+ struct team *team;
+
+ if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+ return NULL;
+
+ ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev || dev->netdev_ops != &team_netdev_ops) {
+ if (dev)
+ dev_put(dev);
+ return NULL;
+ }
+
+ team = netdev_priv(dev);
+ mutex_lock(&team->lock);
+ return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+ mutex_unlock(&team->lock);
+ dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+ int (*fill_func)(struct sk_buff *skb,
+ struct genl_info *info,
+ int flags, struct team *team))
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = fill_func(skb, info, NLM_F_ACK, team);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_fill_options_get_changed(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ struct team_option *changed_option)
+{
+ struct nlattr *option_list;
+ void *hdr;
+ struct team_option *option;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_OPTIONS_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+ if (!option_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ struct nlattr *option_item;
+ long arg;
+
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ goto nla_put_failure;
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+ if (option == changed_option)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+ team_option_get(team, option, &arg);
+ NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+ team_option_get(team, option, &arg);
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+ (char *) arg);
+ break;
+ default:
+ BUG();
+ }
+ nla_nest_end(skb, option_item);
+ }
+
+ nla_nest_end(skb, option_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_options_get_changed(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, NULL);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err = 0;
+ int i;
+ struct nlattr *nl_option;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = -EINVAL;
+ if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+
+ nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+ struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+ enum team_option_type opt_type;
+ struct team_option *option;
+ char *opt_name;
+ bool opt_found = false;
+
+ if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+ nl_option, team_nl_option_policy);
+ if (err)
+ goto team_put;
+ if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+ !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+ !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+ case NLA_U32:
+ opt_type = TEAM_OPTION_TYPE_U32;
+ break;
+ case NLA_STRING:
+ opt_type = TEAM_OPTION_TYPE_STRING;
+ break;
+ default:
+ goto team_put;
+ }
+
+ opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+ list_for_each_entry(option, &team->option_list, list) {
+ long arg;
+ struct nlattr *opt_data_attr;
+
+ if (option->type != opt_type ||
+ strcmp(option->name, opt_name))
+ continue;
+ opt_found = true;
+ opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+ switch (opt_type) {
+ case TEAM_OPTION_TYPE_U32:
+ arg = nla_get_u32(opt_data_attr);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ arg = (long) nla_data(opt_data_attr);
+ break;
+ default:
+ BUG();
+ }
+ err = team_option_set(team, option, &arg);
+ if (err)
+ goto team_put;
+ }
+ if (!opt_found) {
+ err = -ENOENT;
+ goto team_put;
+ }
+ }
+
+team_put:
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ struct team_port *changed_port)
+{
+ struct nlattr *port_list;
+ void *hdr;
+ struct team_port *port;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_PORT_LIST_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+ if (!port_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct nlattr *port_item;
+
+ port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ if (!port_item)
+ goto nla_put_failure;
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+ if (port == changed_port)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+ if (port->linkup)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+ NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+ nla_nest_end(skb, port_item);
+ }
+
+ nla_nest_end(skb, port_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, NULL);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+ {
+ .cmd = TEAM_CMD_NOOP,
+ .doit = team_nl_cmd_noop,
+ .policy = team_nl_policy,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_SET,
+ .doit = team_nl_cmd_options_set,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_GET,
+ .doit = team_nl_cmd_options_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_PORT_LIST_GET,
+ .doit = team_nl_cmd_port_list_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team,
+ struct team_option *changed_option)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
+ changed_option);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team_port *port)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(port->team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
+ port->team, port);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_init(void)
+{
+ int err;
+
+ err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+ ARRAY_SIZE(team_nl_ops));
+ if (err)
+ return err;
+
+ err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+ if (err)
+ goto err_change_event_grp_reg;
+
+ return 0;
+
+err_change_event_grp_reg:
+ genl_unregister_family(&team_nl_family);
+
+ return err;
+}
+
+static void team_nl_fini(void)
+{
+ genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team,
+ struct team_option *changed_option)
+{
+ int err;
+
+ err = team_nl_send_event_options_get(team, changed_option);
+ if (err)
+ netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+ int err;
+
+ if (port->linkup == linkup)
+ return;
+
+ port->linkup = linkup;
+ if (linkup) {
+ struct ethtool_cmd ecmd;
+
+ err = __ethtool_get_settings(port->dev, &ecmd);
+ if (!err) {
+ port->speed = ethtool_cmd_speed(&ecmd);
+ port->duplex = ecmd.duplex;
+ goto send_event;
+ }
+ }
+ port->speed = 0;
+ port->duplex = 0;
+
+send_event:
+ err = team_nl_send_event_port_list_get(port);
+ if (err)
+ netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+ port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+ struct team *team = port->team;
+
+ mutex_lock(&team->lock);
+ __team_port_change_check(port, linkup);
+ mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct team_port *port;
+
+ port = team_port_get_rtnl(dev);
+ if (!port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (netif_carrier_ok(dev))
+ team_port_change_check(port, true);
+ case NETDEV_DOWN:
+ team_port_change_check(port, false);
+ case NETDEV_CHANGE:
+ if (netif_running(port->dev))
+ team_port_change_check(port,
+ !!netif_carrier_ok(port->dev));
+ break;
+ case NETDEV_UNREGISTER:
+ team_del_slave(port->team->dev, dev);
+ break;
+ case NETDEV_FEAT_CHANGE:
+ team_compute_features(port->team);
+ break;
+ case NETDEV_CHANGEMTU:
+ /* Forbid to change mtu of underlaying device */
+ return NOTIFY_BAD;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid to change type of underlaying device */
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+ .notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&team_notifier_block);
+
+ err = rtnl_link_register(&team_link_ops);
+ if (err)
+ goto err_rtnl_reg;
+
+ err = team_nl_init();
+ if (err)
+ goto err_nl_init;
+
+ return 0;
+
+err_nl_init:
+ rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+ unregister_netdevice_notifier(&team_notifier_block);
+
+ return err;
+}
+
+static void __exit team_module_exit(void)
+{
+ team_nl_fini();
+ rtnl_link_unregister(&team_link_ops);
+ unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644
index 000000000000..f4d960e82e29
--- /dev/null
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+ struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+ return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+ struct sk_buff *skb) {
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (active_port != port)
+ return RX_HANDLER_EXACT;
+ return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (unlikely(!active_port))
+ goto drop;
+ skb->dev = active_port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+ if (ab_priv(team)->active_port == port)
+ RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+
+ *ifindex = 0;
+ if (ab_priv(team)->active_port)
+ *ifindex = ab_priv(team)->active_port->dev->ifindex;
+ return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+ struct team_port *port;
+
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (port->dev->ifindex == *ifindex) {
+ rcu_assign_pointer(ab_priv(team)->active_port, port);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+ {
+ .name = "activeport",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = ab_active_port_get,
+ .setter = ab_active_port_set,
+ },
+};
+
+int ab_init(struct team *team)
+{
+ return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+ team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+ .init = ab_init,
+ .exit = ab_exit,
+ .receive = ab_receive,
+ .transmit = ab_transmit,
+ .port_leave = ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+ .kind = "activebackup",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct ab_priv),
+ .ops = &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+ return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+ team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644
index 000000000000..a0e8f806331a
--- /dev/null
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+ unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+ return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+ struct team_port *port)
+{
+ struct team_port *cur;
+
+ if (port->linkup)
+ return port;
+ cur = port;
+ list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+ if (cur->linkup)
+ return cur;
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (cur == port)
+ break;
+ if (cur->linkup)
+ return cur;
+ }
+ return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *port;
+ int port_index;
+
+ port_index = rr_priv(team)->sent_packets++ % team->port_count;
+ port = team_get_port_by_index_rcu(team, port_index);
+ port = __get_first_port_up(team, port);
+ if (unlikely(!port))
+ goto drop;
+ skb->dev = port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+ .transmit = rr_transmit,
+ .port_enter = rr_port_enter,
+ .port_change_mac = rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+ .kind = "roundrobin",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct rr_priv),
+ .ops = &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+ return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+ team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7bea9c65119e..93c5d72711b0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,7 +123,7 @@ struct tun_struct {
gid_t group;
struct net_device *dev;
- u32 set_features;
+ netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
struct fasync_struct *fasync;
@@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1196,7 +1197,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
- u32 features = 0;
+ netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1590,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
- strcpy(info->bus_info, "tun");
+ strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case TUN_TAP_DEV:
- strcpy(info->bus_info, "tap");
+ strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6832ca467c84..8e84f5bdd6ca 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "08-Nov-2011"
+#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -376,7 +376,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, (size + 1) & 0xfffe);
- if (skb->len == 0)
+ if (skb->len < sizeof(header))
break;
head = (u8 *) skb->data;
@@ -689,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
}
static int
@@ -974,6 +978,7 @@ static int ax88772_link_reset(struct usbnet *dev)
static int ax88772_reset(struct usbnet *dev)
{
+ struct asix_data *data = (struct asix_data *)&dev->data;
int ret, embd_phy;
u16 rx_ctl;
@@ -1051,6 +1056,13 @@ static int ax88772_reset(struct usbnet *dev)
goto out;
}
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
if (ret < 0)
@@ -1148,7 +1160,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return 0;
}
-static struct ethtool_ops ax88178_ethtool_ops = {
+static const struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -1316,6 +1328,13 @@ static int ax88178_reset(struct usbnet *dev)
if (ret < 0)
return ret;
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ return ret;
+
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
if (ret < 0)
return ret;
@@ -1655,6 +1674,10 @@ static const struct usb_device_id products [] = {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ // Asus USB Ethernet Adapter
+ USB_DEVICE (0x0b95, 0x7e2b),
+ .driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 30aedcd55975..790cbdea7392 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
err = usb_submit_urb(req, gfp_flags);
if (unlikely(err)) {
dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
- netdev_free_page(dev, page);
+ put_page(page);
}
return err;
}
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
dev->stats.rx_errors++;
resubmit:
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- rx_submit(pnd, req, GFP_ATOMIC);
+ rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
}
static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
- if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+ if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
usbpn_close(dev);
return -ENOMEM;
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9904b7ebca2d..3a539a9cac54 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -138,7 +138,7 @@ struct cdc_ncm_ctx {
static void cdc_ncm_tx_timeout(unsigned long arg);
static const struct driver_info cdc_ncm_info;
static struct usb_driver cdc_ncm_driver;
-static struct ethtool_ops cdc_ncm_ethtool_ops;
+static const struct ethtool_ops cdc_ncm_ethtool_ops;
static const struct usb_device_id cdc_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
@@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
int temp;
u8 iface_no;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return -ENODEV;
- memset(ctx, 0, sizeof(*ctx));
-
init_timer(&ctx->tx_timer);
spin_lock_init(&ctx->mtx);
ctx->netdev = dev->net;
@@ -1222,7 +1220,7 @@ static struct usb_driver cdc_ncm_driver = {
.supports_autosuspend = 1,
};
-static struct ethtool_ops cdc_ncm_ethtool_ops = {
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.get_drvinfo = cdc_ncm_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08a4df238796..e84662db51cc 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -420,7 +420,7 @@ static u32 ipheth_ethtool_op_get_link(struct net_device *net)
return netif_carrier_ok(dev->net);
}
-static struct ethtool_ops ops = {
+static const struct ethtool_ops ops = {
.get_link = ipheth_ethtool_op_get_link
};
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 769f5090bda1..5d99b8cacd7d 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus";
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
static char *devid;
static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus)
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
if (~data & 0x08) {
- if (loopback & 1)
+ if (loopback)
break;
if (mii_mode && (pegasus->features & HAS_HOME_PNA))
set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
data[1] |= 0x10; /* set 100 Mbps */
if (mii_mode)
data[1] = 0;
- data[2] = (loopback & 1) ? 0x09 : 0x01;
+ data[2] = loopback ? 0x09 : 0x01;
memcpy(pegasus->eth_regs, data, sizeof(data));
ret = set_registers(pegasus, EthCtrl0, 3, data);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index e45dfdcb8718..b59cf20c7817 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -618,7 +618,7 @@ static u32 sierra_net_get_link(struct net_device *net)
return sierra_net_get_private(dev)->link_up && netif_running(net);
}
-static struct ethtool_ops sierra_net_ethtool_ops = {
+static const struct ethtool_ops sierra_net_ethtool_ops = {
.get_drvinfo = sierra_net_get_drvinfo,
.get_link = sierra_net_get_link,
.get_msglevel = usbnet_get_msglevel,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index e85840ee36e0..3b017bbd2a22 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -76,7 +76,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -728,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 4a107610ac43..d45520e6dd48 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -59,7 +59,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
}
/* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
u32 read_buf;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ef883e97cee0..49f4667e1fa3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,8 +27,8 @@
struct veth_net_stats {
u64 rx_packets;
- u64 tx_packets;
u64 rx_bytes;
+ u64 tx_packets;
u64 tx_bytes;
u64 rx_dropped;
struct u64_stats_sync syncp;
@@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->destructor = veth_dev_free;
- dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6ee8410443c4..4880aa8b4c28 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
@@ -39,6 +39,7 @@ module_param(gso, bool, 0444);
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
struct u64_stats_sync syncp;
@@ -155,6 +156,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
*len -= size;
}
+/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int len)
{
@@ -357,7 +359,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
struct skb_vnet_hdr *hdr;
int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
@@ -368,7 +370,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
@@ -413,8 +415,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
- first, gfp);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
+ first, gfp);
if (err < 0)
give_pages(vi, first);
@@ -432,14 +434,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
if (err < 0)
give_pages(vi, page);
return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
@@ -501,7 +509,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -520,7 +528,7 @@ again:
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
@@ -601,7 +609,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
- 0, skb);
+ 0, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -699,6 +707,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
}
tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -719,6 +728,10 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
virtnet_napi_enable(vi);
return 0;
}
@@ -754,7 +767,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
virtqueue_kick(vi->cvq);
@@ -772,6 +785,8 @@ static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
@@ -853,7 +868,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
kfree(buf);
}
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -863,9 +878,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+ return 0;
}
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -875,6 +891,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+ return 0;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +906,21 @@ static void virtnet_get_ringparam(struct net_device *dev,
}
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
};
@@ -954,15 +985,38 @@ static void virtnet_config_changed(struct virtio_device *vdev)
virtnet_update_status(vi);
}
+static int init_vqs(struct virtnet_info *vi)
+{
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
+ const char *names[] = { "input", "output", "control" };
+ int nvqs, err;
+
+ /* We expect two virtqueues, receive then send,
+ * and optionally control. */
+ nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
+
+ err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
+ if (err)
+ return err;
+
+ vi->rvq = vqs[0];
+ vi->svq = vqs[1];
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
+ vi->cvq = vqs[2];
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
+ vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
+ }
+ return 0;
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
struct net_device *dev;
struct virtnet_info *vi;
- struct virtqueue *vqs[3];
- vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
- const char *names[] = { "input", "output", "control" };
- int nvqs;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -1034,24 +1088,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
- /* We expect two virtqueues, receive then send,
- * and optionally control. */
- nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
-
- err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+ err = init_vqs(vi);
if (err)
goto free_stats;
- vi->rvq = vqs[0];
- vi->svq = vqs[1];
-
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
- vi->cvq = vqs[2];
-
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
- dev->features |= NETIF_F_HW_VLAN_FILTER;
- }
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -1082,7 +1122,6 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
- cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free_stats:
@@ -1114,29 +1153,73 @@ static void free_unused_bufs(struct virtnet_info *vi)
BUG_ON(vi->num != 0);
}
-static void __devexit virtnet_remove(struct virtio_device *vdev)
+static void remove_vq_common(struct virtnet_info *vi)
{
- struct virtnet_info *vi = vdev->priv;
-
- /* Stop all the virtqueues. */
- vdev->config->reset(vdev);
-
-
- unregister_netdev(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
+ vi->vdev->config->reset(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
- vdev->config->del_vqs(vi->vdev);
+ vi->vdev->config->del_vqs(vi->vdev);
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);
+}
+
+static void __devexit virtnet_remove(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+
+ unregister_netdev(vi->dev);
+
+ remove_vq_common(vi);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
+#ifdef CONFIG_PM
+static int virtnet_freeze(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+
+ virtqueue_disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->svq);
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
+ virtqueue_disable_cb(vi->cvq);
+
+ netif_device_detach(vi->dev);
+ cancel_delayed_work_sync(&vi->refill);
+
+ if (netif_running(vi->dev))
+ napi_disable(&vi->napi);
+
+ remove_vq_common(vi);
+
+ return 0;
+}
+
+static int virtnet_restore(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+ int err;
+
+ err = init_vqs(vi);
+ if (err)
+ return err;
+
+ if (netif_running(vi->dev))
+ virtnet_napi_enable(vi);
+
+ netif_device_attach(vi->dev);
+
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
+ return 0;
+}
+#endif
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1161,6 +1244,10 @@ static struct virtio_driver virtio_net_driver = {
.probe = virtnet_probe,
.remove = __devexit_p(virtnet_remove),
.config_changed = virtnet_config_changed,
+#ifdef CONFIG_PM
+ .freeze = virtnet_freeze,
+ .restore = virtnet_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d96bfb1ac20b..de7fc345148a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
}
-static void
+static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
@@ -2163,7 +2167,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = i % adapter->num_rx_queues;
+ rssConf->indTable[i] = ethtool_rxfh_indir_default(
+ i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(*rssConf);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e662cbc8bfbd..587a218b2345 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
if (features & NETIF_F_RXCSUM)
@@ -570,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
}
#ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ return rssConf->indTableSize;
+}
+
static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
- struct ethtool_rxfh_indir *p)
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+ unsigned int n = rssConf->indTableSize;
- p->size = rssConf->indTableSize;
while (n--)
- p->ring_index[n] = rssConf->indTable[n];
+ p[n] = rssConf->indTable[n];
return 0;
}
static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
- const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- if (p->size != rssConf->indTableSize)
- return -EINVAL;
- for (i = 0; i < rssConf->indTableSize; i++) {
- /*
- * Return with error code if any of the queue indices
- * is out of range
- */
- if (p->ring_index[i] < 0 ||
- p->ring_index[i] >= adapter->num_rx_queues)
- return -EINVAL;
- }
-
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = p->ring_index[i];
+ rssConf->indTable[i] = p[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -619,7 +608,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
}
#endif
-static struct ethtool_ops vmxnet3_ethtool_ops = {
+static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
@@ -634,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
#ifdef VMXNET3_RSS
+ .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh_indir = vmxnet3_get_rss_indir,
.set_rxfh_indir = vmxnet3_set_rss_indir,
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index b18eac1dccaa..ed54797db191 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -401,7 +401,7 @@ void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 783168cce077..d43f4efd3e07 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -155,7 +155,7 @@ static int emancipate( struct net_device * );
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
-static int skip_pci_probe __initdata = 0;
+static bool skip_pci_probe __initdata = false;
static int scandone __initdata = 0;
static int num __initdata = 0;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0b4fd05e1508..4f7748478984 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -362,7 +362,7 @@ static int io=0x238;
static int txdma=1;
static int rxdma=3;
static int irq=5;
-static int slow=0;
+static bool slow=false;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c421a6141854..c806d4550212 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -75,7 +75,7 @@
* device is up and running or shutdown (through ifconfig up /
* down). Bus-generic only.
*
- * - control ops: control.c - implements various commmands for
+ * - control ops: control.c - implements various commands for
* controlling the device. bus-generic only.
*
* - device model glue: driver.c - implements helpers for the
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b9ecb20deec..f20886ade1cc 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
- bool try_head = 0;
+ bool try_head = false;
BUG_ON(i2400m->tx_msg != NULL);
/*
* In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@ try_head:
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
- try_head = 1;
+ try_head = true;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
- bool try_head = 0;
+ bool try_head = false;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -771,7 +771,7 @@ try_new:
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
- try_head = 1;
+ try_head = true;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index ac357acfb3e9..99ef81b3d5a5 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -177,7 +177,6 @@ retry:
static
int i2400mu_txd(void *_i2400mu)
{
- int result = 0;
struct i2400mu *i2400mu = _i2400mu;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu)
/* Yeah, we ignore errors ... not much we can do */
i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
- if (result < 0)
- break;
}
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400mu->tx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
- d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
- return result;
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+ return 0;
}
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0a304b060b6c..98db76196b59 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
obj-$(CONFIG_IWLWIFI) += iwlwifi/
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
+obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
@@ -58,6 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
obj-$(CONFIG_IWM) += iwmc3200wifi/
obj-$(CONFIG_MWIFIEX) += mwifiex/
-obj-$(CONFIG_BRCMFMAC) += brcm80211/
-obj-$(CONFIG_BRCMUMAC) += brcm80211/
-obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_BRCMFMAC) += brcm80211/
+obj-$(CONFIG_BRCMSMAC) += brcm80211/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ac1176a4f465..1c008c61b95c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+ emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
emmh32_final(&context->seed, (u8*)&mic->mic);
/* New Type/length ?????????? */
@@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);
+ emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);
//Calculate MIC
emmh32_final(&context->seed, digest);
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d1214696a35b..d716b748e574 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -11,3 +11,4 @@ ath-objs := main.o \
key.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 0f9ee46cfc97..efc01110dc34 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -152,6 +152,7 @@ struct ath_common {
struct ath_cycle_counters cc_survey;
struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
@@ -214,6 +215,10 @@ do { \
* @ATH_DBG_HWTIMER: hardware timer handling
* @ATH_DBG_BTCOEX: bluetooth coexistance
* @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ * used exclusively for WLAN-BT coexistence starting from
+ * AR9462.
+ * @ATH_DBG_DFS: radar datection
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -239,6 +244,8 @@ enum ATH_DEBUG {
ATH_DBG_BTCOEX = 0x00002000,
ATH_DBG_WMI = 0x00004000,
ATH_DBG_BSTUCK = 0x00008000,
+ ATH_DBG_MCI = 0x00010000,
+ ATH_DBG_DFS = 0x00020000,
ATH_DBG_ANY = 0xffffffff
};
@@ -248,7 +255,7 @@ enum ATH_DEBUG {
#define ath_dbg(common, dbg_mask, fmt, ...) \
do { \
- if ((common)->debug_mask & dbg_mask) \
+ if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
_ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
} while (0)
@@ -258,10 +265,13 @@ do { \
#else
static inline __attribute__ ((format (printf, 3, 4)))
-void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
const char *fmt, ...)
{
}
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+ _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
+
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
int __ret_warn_once = !!(foo); \
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e5be7e701816..ee7ea572b065 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (to_platform_device(ah->dev)->id == 0 &&
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
(BD_WLAN1 | BD_WLAN0))
- __set_bit(ATH_STAT_2G_DISABLED, ah->status);
+ ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+ else
+ ah->ah_capabilities.cap_needs_2GHz_ovr = false;
}
ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bea90e6be70e..bf674161a217 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -27,15 +27,21 @@
* or reducing sensitivity as necessary.
*
* The parameters are:
+ *
* - "noise immunity"
+ *
* - "spur immunity"
+ *
* - "firstep level"
+ *
* - "OFDM weak signal detection"
+ *
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
+ *
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
@@ -45,11 +51,13 @@
*/
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
- * on the chip revision).
+ * on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- * detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
on ? "on" : "off");
}
-
/**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
}
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- * the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
*/
}
-
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
}
}
-
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
return listen;
}
-
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
return 1;
}
-
/**
* ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
as->listen_time = 0;
}
-
/**
* ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
}
}
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
tasklet_schedule(&ah->ani_tasklet);
}
-
/**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
}
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
}
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
#ifdef CONFIG_ATH5K_DEBUG
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 7358b6c83c6c..21aa355460bb 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- * algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- * maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- * minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- * amount of OFDM and CCK frame errors (default).
+ * algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
/**
* struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index fecbcd9a4259..c2b2518c2ecd 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -187,10 +187,9 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
-
#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -262,16 +261,34 @@
#define AR5K_AGC_SETTLING_TURBO 37
-/* GENERIC CHIPSET DEFINITIONS */
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
enum ath5k_version {
AR5K_AR5210 = 0,
AR5K_AR5211 = 1,
AR5K_AR5212 = 2,
};
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
enum ath5k_radio {
AR5K_RF5110 = 0,
AR5K_RF5111 = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
#define AR5K_SREV_AR5213A 0x59 /* Hainan */
#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
#define AR5K_SREV_AR2414 0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR5424 0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
#define AR5K_SREV_AR5414 0xa0 /* Eagle */
#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
/* TODO add support to mac80211 for vendor-specific rates and modes */
-/*
+/**
+ * DOC: Atheros XR
+ *
* Some of this information is based on Documentation from:
*
* http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
*
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
*
* Please note that can you either use XR or TURBO but you cannot use both,
* they are exclusive.
*
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
*/
-#define MODULATION_XR 0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
* There is also a distinction between "static" and "dynamic" turbo modes:
*
* - Static: is the dumb version: devices set to this mode stick to it until
* the mode is turned off.
+ *
* - Dynamic: is the intelligent version, the network decides itself if it
* is ok to use turbo. As soon as traffic is detected on adjacent channels
* (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
*
* http://www.pcworld.com/article/id,113428-page,1/article.html
*
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
*
* - Bursting: allows multiple frames to be sent at once, rather than pausing
* after each frame. Bursting is a standards-compliant feature that can be
* used with any Access Point.
+ *
* - Fast frames: increases the amount of information that can be sent per
* frame, also resulting in a reduction of transmission overhead. It is a
* proprietary feature that needs to be supported by the Access Point.
+ *
* - Compression: data frames are compressed in real time using a Lempel Ziv
* algorithm. This is done transparently. Once this feature is enabled,
* compression and decompression takes place inside the chipset, without
* putting additional load on the host CPU.
*
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
*/
-#define MODULATION_TURBO 0x00000080
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
AR5K_MODE_11B = 1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
AR5K_MODE_MAX = 3
};
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
enum ath5k_ant_mode {
- AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */
- AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */
- AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */
- AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */
- AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */
- AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */
- AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */
+ AR5K_ANTMODE_DEFAULT = 0,
+ AR5K_ANTMODE_FIXED_A = 1,
+ AR5K_ANTMODE_FIXED_B = 2,
+ AR5K_ANTMODE_SINGLE_AP = 3,
+ AR5K_ANTMODE_SECTOR_AP = 4,
+ AR5K_ANTMODE_SECTOR_STA = 5,
+ AR5K_ANTMODE_DEBUG = 6,
AR5K_ANTMODE_MAX,
};
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
enum ath5k_bw_mode {
- AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
- AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
- AR5K_BWMODE_10MHZ = 2, /* Half rate */
- AR5K_BWMODE_40MHZ = 3 /* Turbo */
+ AR5K_BWMODE_DEFAULT = 0,
+ AR5K_BWMODE_5MHZ = 1,
+ AR5K_BWMODE_10MHZ = 2,
+ AR5K_BWMODE_40MHZ = 3
};
+
+
/****************\
TX DEFINITIONS
\****************/
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
*/
struct ath5k_tx_status {
u16 ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
* enum ath5k_tx_queue - Queue types used to classify tx queues.
* @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
* @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
* @AR5K_TX_QUEUE_BEACON: The beacon queue
* @AR5K_TX_QUEUE_CAB: The after-beacon queue
* @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
enum ath5k_tx_queue {
AR5K_TX_QUEUE_INACTIVE = 0,
AR5K_TX_QUEUE_DATA,
- AR5K_TX_QUEUE_XR_DATA,
AR5K_TX_QUEUE_BEACON,
AR5K_TX_QUEUE_CAB,
AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
#define AR5K_NUM_TX_QUEUES 10
#define AR5K_NUM_TX_QUEUES_NOQCU 2
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
* These are the 4 Access Categories as defined in
* WME spec. 0 is the lowest priority and 4 is the
* highest. Normal data that hasn't been classified
* goes to the Best Effort AC.
*/
enum ath5k_tx_queue_subtype {
- AR5K_WME_AC_BK = 0, /*Background traffic*/
- AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/
- AR5K_WME_AC_VI, /*Video traffic*/
- AR5K_WME_AC_VO, /*Voice traffic*/
+ AR5K_WME_AC_BK = 0,
+ AR5K_WME_AC_BE,
+ AR5K_WME_AC_VI,
+ AR5K_WME_AC_VO,
};
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
*/
enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
- AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
- AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
- AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
- AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
- AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
- AR5K_TX_QUEUE_ID_UAPSD = 8,
- AR5K_TX_QUEUE_ID_XR_DATA = 9,
+ AR5K_TX_QUEUE_ID_DATA_MIN = 0,
+ AR5K_TX_QUEUE_ID_DATA_MAX = 3,
+ AR5K_TX_QUEUE_ID_UAPSD = 7,
+ AR5K_TX_QUEUE_ID_CAB = 8,
+ AR5K_TX_QUEUE_ID_BEACON = 9,
};
/*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
-/*
- * Data transmit queue state. One of these exists for each
- * hardware transmit queue. Packets sent to us from above
- * are assigned to queues based on their priority. Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority. Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
*/
struct ath5k_txq {
- unsigned int qnum; /* hardware q number */
- u32 *link; /* link ptr in last TX desc */
- struct list_head q; /* transmit queue */
- spinlock_t lock; /* lock on q and link */
+ unsigned int qnum;
+ u32 *link;
+ struct list_head q;
+ spinlock_t lock;
bool setup;
- int txq_len; /* number of queued buffers */
- int txq_max; /* max allowed num of queued buffers */
+ int txq_len;
+ int txq_max;
bool txq_poll_mark;
- unsigned int txq_stuck; /* informational counter */
+ unsigned int txq_stuck;
};
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
*/
struct ath5k_txq_info {
enum ath5k_tx_queue tqi_type;
enum ath5k_tx_queue_subtype tqi_subtype;
- u16 tqi_flags; /* Tx queue flags (see above) */
- u8 tqi_aifs; /* Arbitrated Interframe Space */
- u16 tqi_cw_min; /* Minimum Contention Window */
- u16 tqi_cw_max; /* Maximum Contention Window */
- u32 tqi_cbr_period; /* Constant bit rate period */
+ u16 tqi_flags;
+ u8 tqi_aifs;
+ u16 tqi_cw_min;
+ u16 tqi_cw_max;
+ u32 tqi_cbr_period;
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Time queue waits after an event */
+ u32 tqi_ready_time;
};
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
(ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
)
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
- AR5K_DMASIZE_4B = 0,
- AR5K_DMASIZE_8B,
- AR5K_DMASIZE_16B,
- AR5K_DMASIZE_32B,
- AR5K_DMASIZE_64B,
- AR5K_DMASIZE_128B,
- AR5K_DMASIZE_256B,
- AR5K_DMASIZE_512B
-};
/****************\
RX DEFINITIONS
\****************/
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
*/
struct ath5k_rx_status {
u16 rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
+
/*******************************\
GAIN OPTIMIZATION DEFINITIONS
\*******************************/
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
enum ath5k_rfgain {
AR5K_RFGAIN_INACTIVE = 0,
AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
AR5K_RFGAIN_NEED_CHANGE,
};
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
struct ath5k_gain {
u8 g_step_idx;
u8 g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
u8 g_state;
};
+
+
/********************\
COMMON DEFINITIONS
\********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
#define AR5K_SLOT_TIME_20 880
#define AR5K_SLOT_TIME_MAX 0xffff
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
* TODO: Clean up
*/
struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
u16 a2_athchan;
};
+/**
+ * enum ath5k_dmasize - DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+ AR5K_DMASIZE_4B = 0,
+ AR5K_DMASIZE_8B,
+ AR5K_DMASIZE_16B,
+ AR5K_DMASIZE_32B,
+ AR5K_DMASIZE_64B,
+ AR5K_DMASIZE_128B,
+ AR5K_DMASIZE_256B,
+ AR5K_DMASIZE_512B
+};
+
+
/******************\
RATE DEFINITIONS
\******************/
/**
+ * DOC: Rate codes
+ *
* Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
*
* The rate code is used to get the RX rate or set the TX rate on the
* hardware descriptors. It is also used for internal modulation control
* and settings.
*
- * This is the hardware rate map we are aware of:
- *
- * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
- * rate_kbps 3000 1000 ? ? ? 2000 500 48000
- *
- * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
- * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
+ * This is the hardware rate map we are aware of (html unfriendly):
*
- * rate_code 17 18 19 20 21 22 23 24
- * rate_kbps ? ? ? ? ? ? ? 11000
+ * Rate code Rate (Kbps)
+ * --------- -----------
+ * 0x01 3000 (XR)
+ * 0x02 1000 (XR)
+ * 0x03 250 (XR)
+ * 0x04 - 05 -Reserved-
+ * 0x06 2000 (XR)
+ * 0x07 500 (XR)
+ * 0x08 48000 (OFDM)
+ * 0x09 24000 (OFDM)
+ * 0x0A 12000 (OFDM)
+ * 0x0B 6000 (OFDM)
+ * 0x0C 54000 (OFDM)
+ * 0x0D 36000 (OFDM)
+ * 0x0E 18000 (OFDM)
+ * 0x0F 9000 (OFDM)
+ * 0x10 - 17 -Reserved-
+ * 0x18 11000L (CCK)
+ * 0x19 5500L (CCK)
+ * 0x1A 2000L (CCK)
+ * 0x1B 1000L (CCK)
+ * 0x1C 11000S (CCK)
+ * 0x1D 5500S (CCK)
+ * 0x1E 2000S (CCK)
+ * 0x1F -Reserved-
*
- * rate_code 25 26 27 28 29 30 31 32
- * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
*
* AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
* (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
* We handle this in ath5k_setup_bands().
*/
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
#define ATH5K_RATE_CODE_36M 0x0D
#define ATH5K_RATE_CODE_48M 0x08
#define ATH5K_RATE_CODE_54M 0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K 0x07
-#define ATH5K_RATE_CODE_XR_1M 0x02
-#define ATH5K_RATE_CODE_XR_2M 0x06
-#define ATH5K_RATE_CODE_XR_3M 0x01
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
#define AR5K_SET_SHORT_PREAMBLE 0x04
/*
@@ -747,7 +914,7 @@ struct ath5k_athchan_2ghz {
*/
#define AR5K_KEYCACHE_SIZE 8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
/***********************\
HW RELATED DEFINITIONS
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
/**
* enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ * not always fatal, on some chips we can continue operation
+ * without resetting the card, that's why %AR5K_INT_FATAL is not
+ * common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ * Queue Control Unit (QCU) signals an EOL interrupt only if a
+ * descriptor's LinkPtr is NULL. For more details, refer to:
+ * "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ * increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
*
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- * AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- * LinkPtr is NULL. For more details, refer to:
- * http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- * Note that Rx overrun is not always fatal, on some chips we can continue
- * operation without resetting the card, that's why int_fatal is not
- * common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- * AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- * We currently do increments on interrupt by
- * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- * one of the PHY error counters reached the maximum value and should be
- * read and cleared.
+ * one of the PHY error counters reached the maximum value and
+ * should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- * beacon that must be handled in software. The alternative is if you
- * have VEOL support, in that case you let the hardware deal with things.
+ * beacon that must be handled in software. The alternative is if
+ * you have VEOL support, in that case you let the hardware deal
+ * with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
* @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- * beacons from the AP have associated with, we should probably try to
- * reassociate. When in IBSS mode this might mean we have not received
- * any beacons from any local stations. Note that every station in an
- * IBSS schedules to send beacons at the Target Beacon Transmission Time
- * (TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- * until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- * errors. These types of errors we can enable seem to be of type
- * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ * beacons from the AP have associated with, we should probably
+ * try to reassociate. When in IBSS mode this might mean we have
+ * not received any beacons from any local stations. Note that
+ * every station in an IBSS schedules to send beacons at the
+ * Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ * our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ * nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ * errors. Indicates we need to reset the card.
* @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- * bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ * bit value
*
* These are mapped to take advantage of some common bits
* between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
AR5K_INT_GPIO = 0x01000000,
AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
- AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */
- AR5K_INT_QCBRORN = 0x10000000, /* Non common */
- AR5K_INT_QCBRURN = 0x20000000, /* Non common */
- AR5K_INT_QTRIG = 0x40000000, /* Non common */
+ AR5K_INT_QCBRORN = 0x08000000, /* Non common */
+ AR5K_INT_QCBRURN = 0x10000000, /* Non common */
+ AR5K_INT_QTRIG = 0x20000000, /* Non common */
AR5K_INT_GLOBAL = 0x80000000,
AR5K_INT_TX_ALL = AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
+ | AR5K_INT_TXNOFRM
| AR5K_INT_TXEOL
| AR5K_INT_TXURN,
@@ -891,15 +1074,32 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
enum ath5k_calibration_mask {
AR5K_CALIBRATION_FULL = 0x01,
AR5K_CALIBRATION_SHORT = 0x02,
- AR5K_CALIBRATION_ANI = 0x04,
+ AR5K_CALIBRATION_NF = 0x04,
+ AR5K_CALIBRATION_ANI = 0x08,
};
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
*/
enum ath5k_power_mode {
AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
} cap_queues;
bool cap_has_phyerr_counters;
+ bool cap_has_mrr_support;
+ bool cap_needs_2GHz_ovr;
};
/* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
dma_addr_t desc_daddr; /* DMA (physical) address */
size_t desc_len; /* size of TX/RX descriptors */
- DECLARE_BITMAP(status, 6);
+ DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
-#define ATH_STAT_PROMISC 2
-#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
-#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
+#define ATH_STAT_PROMISC 1
+#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
+#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
struct ieee80211_channel *curchan; /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
led_on; /* pin setting for LED on */
struct work_struct reset_work; /* deferred chip reset */
+ struct work_struct calib_work; /* deferred phy calibration */
struct list_head rxbuf; /* receive buffer */
spinlock_t rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
struct ath5k_rfkill rf_kill;
- struct tasklet_struct calib; /* calibration tasklet */
-
spinlock_t block; /* protects beacon */
struct tasklet_struct beacontq; /* beacon intr tasklet */
struct list_head bcbuf; /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
struct ieee80211_channel *ah_current_channel;
- bool ah_calibration;
+ bool ah_iq_cal_needed;
bool ah_single_chip;
enum ath5k_version ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
u32 ah_txq_imr_cbrurn;
u32 ah_txq_imr_qtrig;
u32 ah_txq_imr_nofrm;
- u32 ah_txq_isr;
+
+ u32 ah_txq_isr_txok_all;
+ u32 ah_txq_isr_txurn;
+ u32 ah_txq_isr_qcborn;
+ u32 ah_txq_isr_qcburn;
+ u32 ah_txq_isr_qtrig;
+
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
size_t ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
/* Calibration timestamp */
unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_short;
unsigned long ah_cal_next_ani;
- unsigned long ah_cal_next_nf;
/* Calibration mask */
u8 ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+ u32 interval);
bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
/* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
/* Queue Control Unit, DFS Control Unit Functions */
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 91627dd2c26a..d7114c75fe9b 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -27,8 +27,7 @@
#include "debug.h"
/**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
goto err;
}
- if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
- __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
- __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
- }
-
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
}
/**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b346d0492001..d366dadcf86e 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -68,18 +68,23 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_all_channels;
+static bool modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = 0 },
- /* XR missing */
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ret)
goto err_unmap;
- memset(mrr_rate, 0, sizeof(mrr_rate));
- memset(mrr_tries, 0, sizeof(mrr_tries));
- for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
- if (!rate)
- break;
+ /* Set up MRR descriptor */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
+ memset(mrr_rate, 0, sizeof(mrr_rate));
+ memset(mrr_tries, 0, sizeof(mrr_tries));
+ for (i = 0; i < 3; i++) {
+ rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+ if (!rate)
+ break;
- mrr_rate[i] = rate->hw_value;
- mrr_tries[i] = info->control.rates[i + 1].count;
- }
+ mrr_rate[i] = rate->hw_value;
+ mrr_tries[i] = info->control.rates[i + 1].count;
+ }
- ath5k_hw_setup_mrr_tx_desc(ah, ds,
- mrr_rate[0], mrr_tries[0],
- mrr_rate[1], mrr_tries[1],
- mrr_rate[2], mrr_tries[2]);
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
+ mrr_rate[0], mrr_tries[0],
+ mrr_rate[1], mrr_tries[1],
+ mrr_rate[2], mrr_tries[2]);
+ }
ds->ds_link = 0;
ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
struct ath5k_hw *ah = (void *)data;
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
- if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+ if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
ath5k_tx_processq(ah, &ah->txqs[i]);
ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
- ath5k_hw_init_beacon(ah, nexttbtt, intval);
+ ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
/*
* debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
- !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
- /* run ANI only when full calibration is not active */
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run ANI only when calibration is not active */
+
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ani_tasklet);
- } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
- ah->ah_cal_next_full = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
- tasklet_schedule(&ah->calib);
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run calibration only when another calibration
+ * is not running.
+ *
+ * Note: This is for both full/short calibration,
+ * if it's time for a full one, ath5k_calibrate_work will deal
+ * with it. */
+
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+ ieee80211_queue_work(ah->hw, &ah->calib_work);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
enum ath5k_int status;
unsigned int counter = 1000;
+
+ /*
+ * If hw is not ready (or detached) and we get an
+ * interrupt, or if we have no interrupts pending
+ * (that means it's not for us) skip it.
+ *
+ * NOTE: Group 0/1 PCI interface registers are not
+ * supported on WiSOCs, so we can't check for pending
+ * interrupts (ISR belongs to another register group
+ * so we are ok).
+ */
if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
- ((ath5k_get_bus_type(ah) != ATH_AHB) &&
- !ath5k_hw_is_intr_pending(ah))))
+ ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+ !ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
+ /** Main loop **/
do {
- ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+ ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+
ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
status, ah->imask);
+
+ /*
+ * Fatal hw error -> Log and reset
+ *
+ * Fatal errors are unrecoverable so we have to
+ * reset the card. These errors include bus and
+ * dma errors.
+ */
if (unlikely(status & AR5K_INT_FATAL)) {
- /*
- * Fatal errors are unrecoverable.
- * Typically these are caused by DMA errors.
- */
+
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+ /*
+ * RX Overrun -> Count and reset if needed
+ *
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ */
} else if (unlikely(status & AR5K_INT_RXORN)) {
+
/*
- * Receive buffers are full. Either the bus is busy or
- * the CPU is not fast enough to process all received
- * frames.
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
- * We don't know exactly which versions need a reset -
+ * We don't know exactly which versions need a reset
* this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
+
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} else
ath5k_schedule_rx(ah);
+
} else {
+
+ /* Software Beacon Alert -> Schedule beacon tasklet */
if (status & AR5K_INT_SWBA)
tasklet_hi_schedule(&ah->beacontq);
- if (status & AR5K_INT_RXEOL) {
- /*
- * NB: the hardware should re-read the link when
- * RXE bit is written, but it doesn't work at
- * least on older hardware revs.
- */
+ /*
+ * No more RX descriptors -> Just count
+ *
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work at
+ * least on older hardware revs.
+ */
+ if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
- }
- if (status & AR5K_INT_TXURN) {
- /* bump tx trigger level */
+
+
+ /* TX Underrun -> Bump tx trigger level */
+ if (status & AR5K_INT_TXURN)
ath5k_hw_update_tx_triglevel(ah, true);
- }
+
+ /* RX -> Schedule rx tasklet */
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
ath5k_schedule_rx(ah);
- if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
- | AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+ /* TX -> Schedule tx tasklet */
+ if (status & (AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXEOL))
ath5k_schedule_tx(ah);
- if (status & AR5K_INT_BMISS) {
- /* TODO */
- }
+
+ /* Missed beacon -> TODO
+ if (status & AR5K_INT_BMISS)
+ */
+
+ /* MIB event -> Update counters and notify ANI */
if (status & AR5K_INT_MIB) {
ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
+
+ /* GPIO -> Notify RFKill layer */
if (status & AR5K_INT_GPIO)
tasklet_schedule(&ah->rf_kill.toggleq);
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
+ /*
+ * Until we handle rx/tx interrupts mask them on IMR
+ *
+ * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+ * and unset after we 've handled the interrupts.
+ */
if (ah->rx_pending || ah->tx_pending)
ath5k_set_current_imask(ah);
if (unlikely(!counter))
ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
+ /* Fire up calibration poll */
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
* for temperature/environment changes.
*/
static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ calib_work);
+
+ /* Should we run a full calibration ? */
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "running full calibration\n");
+
+ if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+ /*
+ * Rfgain is out of bounds, reset the chip
+ * to load new gain values.
+ */
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "got new rfgain, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ }
+
+ /* TODO: On full calibration we should stop TX here,
+ * so that it doesn't interfere (mostly due to gain_f
+ * calibration that messes with tx packets -see phy.c).
+ *
+ * NOTE: Stopping the queues from above is not enough
+ * to stop TX but saves us from disconecting (at least
+ * we don't lose packets). */
+ ieee80211_stop_queues(ah->hw);
+ } else
+ ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
- /* Only full calibration for now */
- ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(ah->curchan->center_freq),
ah->curchan->hw_value);
- if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
- /*
- * Rfgain is out of bounds, reset the chip
- * to load new gain values.
- */
- ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ieee80211_queue_work(ah->hw, &ah->reset_work);
- }
if (ath5k_hw_phy_calibrate(ah, ah->curchan))
ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
ah->curchan->center_freq));
- /* Noise floor calibration interrupts rx/tx path while I/Q calibration
- * doesn't.
- * TODO: We should stop TX here, so that it doesn't interfere.
- * Note that stopping the queues is not enough to stop TX! */
- if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
- ah->ah_cal_next_nf = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
- ath5k_hw_update_noise_floor(ah);
- }
-
- ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ /* Clear calibration flags */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+ ieee80211_wake_queues(ah->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
if (ret)
goto err_irq;
- /* set up multi-rate retry capabilities */
- if (ah->ah_version == AR5K_AR5212) {
+ /* Set up multi-rate retry capabilities */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
ah->curchan = ah->hw->conf.channel;
- ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
- AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+ ah->imask = AR5K_INT_RXOK
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXEOL
+ | AR5K_INT_FATAL
+ | AR5K_INT_GLOBAL
+ | AR5K_INT_MIB;
ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
- ath5k_rfkill_hw_start(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_start(ah);
/*
* Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
ah->tx_pending = false;
tasklet_kill(&ah->rxtq);
tasklet_kill(&ah->txtq);
- tasklet_kill(&ah->calib);
tasklet_kill(&ah->beacontq);
tasklet_kill(&ah->ani_tasklet);
}
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ah->tx_complete_work);
- ath5k_rfkill_hw_stop(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_stop(ah);
}
/*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
ath5k_ani_init(ah, ani_mode);
- ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
- ah->ah_cal_next_ani = jiffies;
- ah->ah_cal_next_nf = jiffies;
+ /*
+ * Set calibration intervals
+ *
+ * Note: We don't need to run calibration imediately
+ * since some initial calibration is done on reset
+ * even for fast channel switching. Also on scanning
+ * this will get set again and again and it won't get
+ * executed unless we connect somewhere and spend some
+ * time on the channel (that's what calibration needs
+ * anyway to be accurate).
+ */
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
/* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw)
/*
- * Check if the MAC has multi-rate retry support.
- * We do this by trying to setup a fake extended
- * descriptor. MACs that don't have support will
- * return false w/o doing anything. MACs that do
- * support it will return true w/o doing anything.
- */
- ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
- if (ret < 0)
- goto err;
- if (ret > 0)
- __set_bit(ATH_STAT_MRRETRY, ah->status);
-
- /*
* Collect the channel list. The 802.11 layer
* is responsible for filtering this list based
* on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
- tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
+ INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 810fba96702b..994169ad39cb 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
- if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B, caps->cap_mode);
-
- if (AR5K_EEPROM_HDR_11G(ee_header) &&
- ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G, caps->cap_mode);
+ /* Override 2GHz modes on SoCs that need it
+ * NOTE: cap_needs_2GHz_ovr gets set from
+ * ath_ahb_probe */
+ if (!caps->cap_needs_2GHz_ovr) {
+ if (AR5K_EEPROM_HDR_11B(ee_header))
+ __set_bit(AR5K_MODE_11B,
+ caps->cap_mode);
+
+ if (AR5K_EEPROM_HDR_11G(ee_header) &&
+ ah->ah_version != AR5K_AR5211)
+ __set_bit(AR5K_MODE_11G,
+ caps->cap_mode);
+ }
}
}
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
- /* newer hardware has PHY error counters */
+ /* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
+ /* MACs since AR5212 have MRR support */
+ if (ah->ah_version == AR5K_AR5212)
+ caps->cap_has_mrr_support = true;
+ else
+ caps->cap_has_mrr_support = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7e88dda82221..f8bfa3ac2af0 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,20 +26,61 @@
#include "debug.h"
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
/************************\
* TX Control descriptors *
\************************/
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, int padsize,
- enum ath5k_pkt_type type,
- unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
- unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
- struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
- int padsize,
- enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
- unsigned int tx_tries0, unsigned int key_index,
- unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate,
- unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
return 0;
}
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
*/
int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
- u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u_int tx_rate1, u_int tx_tries1,
+ u_int tx_rate2, u_int tx_tries2,
+ u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
* TX Status descriptors *
\***********************/
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_2w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
* RX Descriptors *
\****************/
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
*/
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc,
- struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Attach *
\********/
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
*/
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index cfd529b548f3..8d6c01a49ea3 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -20,25 +20,30 @@
* RX/TX descriptor structures
*/
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
*/
struct ath5k_hw_rx_ctl {
- u32 rx_control_0; /* RX control word 0 */
- u32 rx_control_1; /* RX control word 1 */
+ u32 rx_control_0;
+ u32 rx_control_1;
} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
* 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
- u32 rx_status_0; /* RX status word 0 */
- u32 rx_status_1; /* RX status word 1 */
+ u32 rx_status_0;
+ u32 rx_status_1;
} __packed __aligned(4);
/* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
/**
* enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
*/
enum ath5k_phy_error_code {
- AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
- AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
- AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
- AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
- AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
- AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
- AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
- AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
- /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0,
+ AR5K_RX_PHY_ERROR_TIMING = 1,
+ AR5K_RX_PHY_ERROR_PARITY = 2,
+ AR5K_RX_PHY_ERROR_RATE = 3,
+ AR5K_RX_PHY_ERROR_LENGTH = 4,
+ AR5K_RX_PHY_ERROR_RADAR = 5,
+ AR5K_RX_PHY_ERROR_SERVICE = 6,
+ AR5K_RX_PHY_ERROR_TOR = 7,
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
*/
struct ath5k_hw_2w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
+ u32 tx_control_0;
+ u32 tx_control_1;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
*/
struct ath5k_hw_4w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
- u32 tx_control_2; /* TX control word 2 */
- u32 tx_control_3; /* TX control word 3 */
+ u32 tx_control_0;
+ u32 tx_control_1;
+ u32 tx_control_2;
+ u32 tx_control_3;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
*/
struct ath5k_hw_tx_status {
- u32 tx_status_0; /* TX status word 0 */
- u32 tx_status_1; /* TX status word 1 */
+ u32 tx_status_0;
+ u32 tx_status_1;
} __packed __aligned(4);
/* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
*/
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
} __packed __aligned(4);
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
* This is read and written to by the hardware
*/
struct ath5k_desc {
- u32 ds_link; /* physical address of the next descriptor */
- u32 ds_data; /* physical address of data buffer (skb) */
+ u32 ds_link;
+ u32 ds_data;
union {
struct ath5k_hw_5210_tx_desc ds_tx5210;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 2481f9c7f4b6..5cc9aa814697 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -20,16 +20,13 @@
* DMA and interrupt masking functions *
\*************************************/
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
*/
#include "ath5k.h"
@@ -42,22 +39,22 @@
\*********/
/**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
\**********/
/**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
- *
*/
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
*
* XXX: Is TXDP read and clear ?
*/
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
+ * @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
}
/**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
*/
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
@@ -498,21 +494,20 @@ done:
\*******************/
/**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
*/
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
- u32 data;
+ u32 data = 0;
/*
- * Read interrupt status from the Interrupt Status register
- * on 5210
+ * Read interrupt status from Primary Interrupt
+ * Register.
+ *
+ * Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
- data = ath5k_hw_reg_read(ah, AR5K_ISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ u32 isr = 0;
+ isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+ if (unlikely(isr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = isr;
return -ENODEV;
}
- } else {
+
/*
- * Read interrupt status from Interrupt
- * Status Register shadow copy (Read And Clear)
- *
- * Note: PISR/SISR Not available on 5210
+ * Filter out the non-common bits from the interrupt
+ * status.
*/
- data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+ /* Hanlde INT_FATAL */
+ if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+ | AR5K_ISR_DPERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*
+ * XXX: BMISS interrupts may occur after association.
+ * I found this on 5210 code but it needs testing. If this is
+ * true we should disable them before assoc and re-enable them
+ * after a successful assoc + some jiffies.
+ interrupt_mask &= ~AR5K_INT_BMISS;
+ */
+
+ data = isr;
+ } else {
+ u32 pisr = 0;
+ u32 pisr_clear = 0;
+ u32 sisr0 = 0;
+ u32 sisr1 = 0;
+ u32 sisr2 = 0;
+ u32 sisr3 = 0;
+ u32 sisr4 = 0;
+
+ /* Read PISR and SISRs... */
+ pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+ if (unlikely(pisr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = pisr;
return -ENODEV;
}
- }
- /*
- * Get abstract interrupt mask (driver-compatible)
- */
- *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+ sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+ sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+ sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+ sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+ sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
- if (ah->ah_version != AR5K_AR5210) {
- u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+ /*
+ * PISR holds the logical OR of interrupt bits
+ * from SISR registers:
+ *
+ * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
+ * per-queue bits on SISR0
+ *
+ * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+ * per-queue bits on SISR1
+ *
+ * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+ *
+ * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+ *
+ * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+ * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+ * (and TSFOOR ?) bits on SISR2
+ *
+ * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+ * QCBRURN per-queue bits on SISR3
+ * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+ *
+ * If we clean these bits on PISR we 'll also clear all
+ * related bits from SISRs, e.g. if we write the TXOK bit on
+ * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+ * interrupt got fired for another queue while we were reading
+ * the interrupt registers and we write back the TXOK bit on
+ * PISR we 'll lose it. So make sure that we don't write back
+ * on PISR any bits that come from SISRs. Clearing them from
+ * SISRs will also clear PISR so no need to worry here.
+ */
- /*HIU = Host Interface Unit (PCI etc)*/
- if (unlikely(data & (AR5K_ISR_HIUERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
- /*Beacon Not Ready*/
- if (unlikely(data & (AR5K_ISR_BNR)))
- *interrupt_mask |= AR5K_INT_BNR;
+ /*
+ * Write to clear them...
+ * Note: This means that each bit we write back
+ * to the registers will get cleared, leaving the
+ * rest unaffected. So this won't affect new interrupts
+ * we didn't catch while reading/processing, we 'll get
+ * them next time get_isr gets called.
+ */
+ ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+ ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+ ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+ ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+ ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+ ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+ /* Flush previous write */
+ ath5k_hw_reg_read(ah, AR5K_PISR);
- if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
- AR5K_SISR2_DPERR |
- AR5K_SISR2_MCABT)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ /*
+ * Filter out the non-common bits from the interrupt
+ * status.
+ */
+ *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+ /* We treat TXOK,TXDESC, TXERR and TXEOL
+ * the same way (schedule the tx tasklet)
+ * so we track them all together per queue */
+ if (pisr & AR5K_ISR_TXOK)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXOK);
- if (data & AR5K_ISR_TIM)
+ if (pisr & AR5K_ISR_TXDESC)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXDESC);
+
+ if (pisr & AR5K_ISR_TXERR)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXERR);
+
+ if (pisr & AR5K_ISR_TXEOL)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXEOL);
+
+ /* Currently this is not much usefull since we treat
+ * all queues the same way if we get a TXURN (update
+ * tx trigger level) but we might need it later on*/
+ if (pisr & AR5K_ISR_TXURN)
+ ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+ AR5K_SISR2_QCU_TXURN);
+
+ /* Misc Beacon related interrupts */
+
+ /* For AR5211 */
+ if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
- if (data & AR5K_ISR_BCNMISC) {
+ /* For AR5212+ */
+ if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
- if (data & AR5K_ISR_RXDOPPLER)
- *interrupt_mask |= AR5K_INT_RX_DOPPLER;
- if (data & AR5K_ISR_QCBRORN) {
+ /* Below interrupts are unlikely to happen */
+
+ /* HIU = Host Interface Unit (PCI etc)
+ * Can be one of MCABT, SSERR, DPERR from SISR2 */
+ if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*Beacon Not Ready*/
+ if (unlikely(pisr & (AR5K_ISR_BNR)))
+ *interrupt_mask |= AR5K_INT_BNR;
+
+ /* A queue got CBR overrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRORN);
+ ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRORN);
}
- if (data & AR5K_ISR_QCBRURN) {
+
+ /* A queue got CBR underrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRURN);
+ ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRURN);
}
- if (data & AR5K_ISR_QTRIG) {
+
+ /* A queue got triggered */
+ if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
- AR5K_SISR4_QTRIG);
+ ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+ AR5K_SISR4_QTRIG);
}
- if (data & AR5K_ISR_TXOK)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXOK);
-
- if (data & AR5K_ISR_TXDESC)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXDESC);
-
- if (data & AR5K_ISR_TXERR)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXERR);
-
- if (data & AR5K_ISR_TXEOL)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXEOL);
-
- if (data & AR5K_ISR_TXURN)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
- AR5K_SISR2_QCU_TXURN);
- } else {
- if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
- | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
-
- /*
- * XXX: BMISS interrupts may occur after association.
- * I found this on 5210 code but it needs testing. If this is
- * true we should disable them before assoc and re-enable them
- * after a successful assoc + some jiffies.
- interrupt_mask &= ~AR5K_INT_BMISS;
- */
+ data = pisr;
}
/*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
}
/**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
+ /* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
- /*Beacon Not Ready*/
- if (new_mask & AR5K_INT_BNR)
- int_mask |= AR5K_INT_BNR;
-
+ /* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
- if (new_mask & AR5K_INT_RX_DOPPLER)
- int_mask |= AR5K_IMR_RXDOPPLER;
+ /*Beacon Not Ready*/
+ if (new_mask & AR5K_INT_BNR)
+ int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
+ /* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
+ /* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
\********************/
/**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
* stuck frames on tx queues, only a reset
* can fix that.
*/
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 859297811914..73d3dd8a306a 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -24,10 +24,33 @@
#include "reg.h"
#include "debug.h"
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
*/
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
*/
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
*/
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
*/
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
0x1;
}
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
*/
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
return 0;
}
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
*/
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 1ffecc0fd3ed..a1ea78e05b47 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -23,24 +23,27 @@
#include "reg.h"
#include "debug.h"
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
*/
-
struct ath5k_ini {
u16 ini_register;
u32 ini_value;
enum {
AR5K_INI_WRITE = 0, /* Default */
- AR5K_INI_READ = 1, /* Cleared on read */
+ AR5K_INI_READ = 1,
} ini_mode;
};
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
*/
-
struct ath5k_ini_mode {
u16 mode_register;
u32 mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
/* Initial mode-specific settings for AR5211
* 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
- /* A/XR B G */
+ /* A B G */
{ 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ 0x00000010, 0x00000010, 0x00000010 } },
};
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RXCFG, 0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ 0x00000000, 0x00000000, 0x00000108 } },
};
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
+/* Common for all modes */
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
};
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
*/
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
}
}
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
unsigned int size, const struct ath5k_ini_mode *ini_mode,
u8 mode)
{
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
}
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index dfa48eb7d953..849fa060ebc4 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
0xffff);
return true;
}
- udelay(15);
+ usleep_range(15, 20);
}
return false;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index a7eafa3edc21..cebfd6fd31d3 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -30,11 +30,47 @@
#include "reg.h"
#include "debug.h"
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
\*******************/
/**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
\******************/
/**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
* that include all OFDM and CCK rates.
*
*/
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
\*******************/
/**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
}
/**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
ath_hw_setbssidmask(common);
}
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
*/
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
#define ATH5K_MAX_TSF_READ 10
/**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
+#undef ATH5K_MAX_TSF_READ
+
/**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
*/
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
/**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
}
/**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
}
/**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
\***************************/
/**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
return 0;
}
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 01cb72de44cb..e1f8613426a9 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1,6 +1,4 @@
/*
- * PHY functions
- *
* Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
@@ -20,6 +18,10 @@
*
*/
+/***********************\
+* PHY related functions *
+\***********************/
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -31,14 +33,53 @@
#include "../regd.h"
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
*/
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
{
unsigned int i;
u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return 0;
}
- mdelay(2);
+ usleep_range(2000, 2500);
/* ...wait until PHY is ready and read the selected radio revision */
ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return ret;
}
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
*/
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
u16 freq = channel->center_freq;
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
return false;
}
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
return false;
}
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
*/
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
- const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
u32 val, u8 reg_id, bool set)
{
const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
* @ah: the &struct ath5k_hw
* @channel: the currently set channel upon reset
*
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
* mantissa and provide these values on hw.
*
* For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
*/
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
{
/* Get exponent and mantissa and set it */
u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
return 0;
}
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
/*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
/*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
delay = delay << 2;
/* XXX: /2 on turbo ? Let's be safe
* for now */
- udelay(100 + delay);
+ usleep_range(100 + delay, 100 + (2 * delay));
} else {
- mdelay(1);
+ usleep_range(1000, 1500);
}
}
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* RF Gain optimization *
\**********************/
-/*
+/**
+ * DOC: RF Gain optimization
+ *
* This code is used to optimize RF gain on different environments
* (temperature mostly) based on feedback from a power detector.
*
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* no gain optimization ladder-.
*
* For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
*
* This paper describes power drops as seen on the receiver due to
* probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
*
* And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
* with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
*/
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
{
/* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
return 0;
}
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
* That means our next packet is going to be sent with lower
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
*
- * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
* just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
*/
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
{
/* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
}
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
return ah->ah_gain.g_f_corr;
}
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
* power detector windows. If we get a measurement outside range
* we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
ah->ah_gain.g_current <= level[3]);
}
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
{
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
return ret;
}
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
* Check for a new gain reading and schedule an adjustment
* if needed.
*
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
{
u32 data, type;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
return ah->ah_gain.g_state;
}
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
}
-
/********************\
* RF Registers setup *
\********************/
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
*/
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode)
{
const struct ath5k_rf_reg *rf_regs;
const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
PHY/RF channel functions
\**************************/
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
*/
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
{
u32 athchan;
- /*
- * Convert IEEE channel/MHz to an internal channel value used
- * by the AR5210 chipset. This has not been verified with
- * newer chipsets like the AR5212A who have a completely
- * different RF/PHY part.
- */
athchan = (ath5k_hw_bitswap(
(ieee80211_frequency_to_channel(
channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
return athchan;
}
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
data = ath5k_hw_rf5110_chan2athchan(channel);
ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
- mdelay(1);
+ usleep_range(1000, 1500);
return 0;
}
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
*/
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
struct ath5k_athchan_2ghz *athchan)
{
int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
return 0;
}
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
*/
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
data = data0 = data1 = data2 = 0;
c = channel->center_freq;
+ /* My guess based on code:
+ * 2GHz RF has 2 synth modes, one with a Local Oscillator
+ * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+ * (3040/2). data0 is used to set the PLL divider and data1
+ * selects synth mode. */
if (c < 4800) {
+ /* Channel 14 and all frequencies with 2Hz spacing
+ * below/above (non-standard channels) */
if (!((c - 2224) % 5)) {
+ /* Same as (c - 2224) / 5 */
data0 = ((2 * (c - 704)) - 3040) / 10;
data1 = 1;
+ /* Channel 1 and all frequencies with 5Hz spacing
+ * below/above (standard channels without channel 14) */
} else if (!((c - 2192) % 5)) {
+ /* Same as (c - 2192) / 5 */
data0 = ((2 * (c - 672)) - 3040) / 10;
data1 = 0;
} else
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+ /* This is more complex, we have a single synthesizer with
+ * 4 reference clock settings (?) based on frequency spacing
+ * and set using data2. LO is at 4800Hz and data0 is again used
+ * to set some divider.
+ *
+ * NOTE: There is an old atheros presentation at Stanford
+ * that mentions a method called dual direct conversion
+ * with 1GHz sliding IF for RF5110. Maybe that's what we
+ * have here, or an updated version. */
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
*/
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
*/
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
return 0;
}
+
/*****************\
PHY calibration
\*****************/
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
{
s32 val;
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
}
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
{
int i;
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
}
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
{
struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
hist->nfval[hist->index] = noise_floor;
}
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
{
s16 sort[ATH5K_NF_CAL_HIST_MAX];
s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
}
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
*
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
return;
}
+ ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
/* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_noise_floor = nf;
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor calibrated: %d\n", nf);
}
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
*/
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 phy_sig, phy_agc, phy_sat, beacon;
int ret;
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+ return 0;
+
/*
* Disable beacons and RX/TX queues, wait
*/
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
- mdelay(2);
+ usleep_range(2000, 2500);
/*
* Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
* Activate PHY and wait
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
return 0;
}
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
int i;
- if (!ah->ah_calibration ||
- ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
- return 0;
+ /* Skip if I/Q calibration is not needed or if it's still running */
+ if (!ah->ah_iq_cal_needed)
+ return -EINVAL;
+ else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "I/Q calibration still running");
+ return -EBUSY;
+ }
/* Calibration has finished, get the results and re-run */
- /* work around empty results which can apparently happen on 5212 */
+
+ /* Work around for empty results which can apparently happen on 5212:
+ * Read registers up to 10 times until we get both i_pr and q_pwr */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
else
q_coffd = q_pwr >> 7;
- /* protect against divide by 0 and loss of sign bits */
+ /* In case i_coffd became zero, cancel calibration
+ * not only it's too small, it'll also result a divide
+ * by zero later on. */
if (i_coffd == 0 || q_coffd < 2)
- return 0;
+ return -ECANCELED;
+
+ /* Protect against loss of sign bits */
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
return 0;
}
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
*/
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
return ath5k_hw_rf5110_calibrate(ah, channel);
ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ if (ret) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "No I/Q correction performed (%uMHz)\n",
+ channel->center_freq);
+
+ /* Happens all the time if there is not much
+ * traffic, consider it normal behaviour. */
+ ret = 0;
+ }
+
+ /* On full calibration do an AGC calibration and
+ * request a PAPD probe for gainf calibration if
+ * needed */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
- if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
- (channel->hw_value != AR5K_MODE_11B))
- ath5k_hw_request_rfgain_probe(ah);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL);
+
+ ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+ 0, false);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "gain calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ }
+
+ if ((ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112)
+ && (channel->hw_value != AR5K_MODE_11B))
+ ath5k_hw_request_rfgain_probe(ah);
+ }
+
+ /* Update noise floor
+ * XXX: Only do this after AGC calibration */
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+ ath5k_hw_update_noise_floor(ah);
return ret;
}
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
* Spur mitigation functions *
\***************************/
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
static void
ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
* Antenna control *
\*****************/
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
*/
static void
ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
}
}
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
void
ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
AR5K_PHY_ANT_SWITCH_TABLE_1);
}
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() - Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
*/
void
ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
* Helper functions
*/
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
*/
static s16
ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
return result;
}
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
*
* Since we have the top of the curve and we draw the line below
* until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
*/
static s16
ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
return max(min_pwrL, min_pwrR);
}
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* Interpolate (pwr,vpd) points to create a Power to PDADC or a
* Power to PCDAC curve.
*
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
}
}
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
* Get the surrounding per-channel power calibration piers
* for a given frequency so that we can interpolate between
* them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
*pcinfo_r = &pcinfo[idx_r];
}
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
* Get the surrounding per-rate power calibration data
* for a given frequency and interpolate between power
* values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
*/
static void
ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
rpinfo[idx_r].target_power_54);
}
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Get the max edge power for this channel if
* we have such data from EEPROM's Conformance Test
* Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
* Power to PCDAC table functions
*/
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
*
* No further processing is needed for RF5111, the only thing we have to
* do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
}
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
*
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
* RFX112 can have up to 2 curves (one for low txpower range and one for
* higher txpower range). We need to put them both on pcdac_out and place
* them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
}
}
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
static void
ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
* Power to PDADC table functions
*/
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
*
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
* We can have up to 4 pd curves, we need to do a similar process
* as we do for RF5112. This time we don't have an edge_flag but we
* set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
}
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
* Common code for PCDAC/PDADC tables
*/
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* This is the main function that uses all of the above
* to set PCDAC/PDADC table on hw for the current channel.
* This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
return 0;
}
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
static void
ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
{
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
ath5k_write_pcdac_table(ah);
}
-/*
- * Per-rate tx power setting
+
+/**
+ * DOC: Per-rate tx power setting
*
- * This is the code that sets the desired tx power (below
+ * This is the code that sets the desired tx power limit (below
* maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
-
-/*
- * Set rate power table
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
*
* For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
*
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
* rates[0] - rates[7] -> OFDM rates
* rates[8] - rates[14] -> CCK rates
* rates[15] -> XR rates (they all have the same power)
*/
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
}
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
*/
static int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return 0;
}
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
}
+
/*************\
Init function
\*************/
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 mode, bool fast)
{
struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
}
} else if (ah->ah_version == AR5K_AR5210) {
- mdelay(1);
+ usleep_range(1000, 1500);
/* Disable phy and wait */
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
for (i = 0; i <= 20; i++) {
if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
break;
- udelay(200);
+ usleep_range(200, 250);
}
ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* At the same time start I/Q calibration for QAM constellation
* -no need for CCK- */
- ah->ah_calibration = false;
+ ah->ah_iq_cal_needed = false;
if (!(mode == AR5K_MODE_11B)) {
- ah->ah_calibration = true;
+ ah->ah_iq_cal_needed = true;
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 776654228eaa..30b50f934172 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,23 +17,48 @@
*/
/********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
\********************************************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
/******************\
* Helper functions *
\******************/
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
return pending;
}
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
{
- u32 cw = 1;
cw_req = min(cw_req, (u16)1023);
- while (cw < cw_req)
- cw = (cw << 1) | 1;
+ /* Check if cw_req + 1 a power of 2 */
+ if (is_power_of_2(cw_req + 1))
+ return cw_req;
- return cw;
+ /* Check if cw_req is a power of 2 */
+ if (is_power_of_2(cw_req))
+ return cw_req - 1;
+
+ /* If none of the above is correct
+ * find the closest power of 2 */
+ cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+ return cw_req;
}
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
*/
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
*/
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
return 0;
}
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
*/
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
* Single QCU/DCU initialization *
\*******************************/
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
*/
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
\**************************/
/**
- * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
}
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index f5c1000045d3..0ea1608b47fd 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -280,6 +280,10 @@
* 5211/5212 we have one primary and 4 secondary registers.
* So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
* Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
@@ -292,7 +296,10 @@
#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
-#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
+ * NOTE: We don't have per-queue info for this
+ * one, but we can enable it per-queue through
+ * TXNOFRM_QCU field on TXNOFRM register */
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@@ -302,21 +309,29 @@
#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
-#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
+ * 'or' of MCABT, SSERR, DPERR from SISR2 */
#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
-#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
- CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
+ * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+ * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
+#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+ AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+ AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+ AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+ AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
/*
* Secondary status registers [5211+] (0 - 4)
*
@@ -347,7 +362,7 @@
#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
-#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */
+#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 2abac257b4b4..250db40b751d 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,9 +19,9 @@
*
*/
-/*****************************\
- Reset functions and helpers
-\*****************************/
+/****************************\
+ Reset function and helpers
+\****************************/
#include <asm/unaligned.h>
@@ -33,14 +33,36 @@
#include "debug.h"
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
*/
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set)
{
int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
\*************************/
/**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
* @ah: The &struct ath5k_hw
* @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
*/
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
{
struct ath_common *common = ath5k_hw_common(ah);
return usec * common->clockrate;
}
/**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
* @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
*/
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
{
struct ath_common *common = ath5k_hw_common(ah);
return clock / common->clockrate;
}
/**
- * ath5k_hw_init_core_clock - Initialize core clock
- *
- * @ah The &struct ath5k_hw
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
*
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
*/
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
}
}
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
* If there is an external 32KHz crystal available, use it
* as ref. clock instead of 32/40MHz clock and baseband clocks
* to save power during sleep or restore normal 32/40MHz
* operation.
*
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- * 123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
*/
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
* Reset/Sleep control *
\*********************/
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
{
int ret;
u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
/* Wait at least 128 PCI clocks */
- udelay(15);
+ usleep_range(15, 20);
if (ah->ah_version == AR5K_AR5210) {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
return ret;
}
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() - Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = __raw_readl(reg);
__raw_writel(regval | val, reg);
regval = __raw_readl(reg);
- udelay(100);
+ usleep_range(100, 150);
/* Bring BB/MAC out of reset */
__raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
return 0;
}
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
*/
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
- udelay(15);
+ usleep_range(15, 20);
for (i = 200; i > 0; i--) {
/* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
break;
/* Wait a bit and retry */
- udelay(50);
+ usleep_range(50, 75);
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
}
@@ -523,17 +589,20 @@ commit:
return 0;
}
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
*
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
* without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
*/
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
{
struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return 0;
/* Make sure device is awake */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
}
/* ...wakeup again!*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return ret;
}
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
*/
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
}
/* ...wakeup again!...*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
/* ...update PLL if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
- udelay(300);
+ usleep_range(300, 350);
}
/* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* Post-initvals register modifications *
\**************************************/
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
}
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
* Main reset function *
\*********************/
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1047,7 +1159,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
*/
if (fast && (ah->ah_radio != AR5K_RF2413) &&
(ah->ah_radio != AR5K_RF5413))
- fast = 0;
+ fast = false;
/* Disable sleep clock operation
* to avoid register access delay on certain
@@ -1073,7 +1185,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret && fast) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"DMA didn't stop, falling back to normal reset\n");
- fast = 0;
+ fast = false;
/* Non fatal, just continue with
* normal reset */
ret = 0;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* Initialize PCU
*/
- ath5k_hw_pcu_init(ah, op_mode, mode);
+ ath5k_hw_pcu_init(ah, op_mode);
/*
* Initialize PHY
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 5d11c23b4297..aed34d9954c0 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -18,7 +18,9 @@
*/
-/*
+/**
+ * DOC: RF Buffer registers
+ *
* There are some special registers on the RF chip
* that control various operation settings related mostly to
* the analog parts (channel, gain adjustment etc).
@@ -44,40 +46,63 @@
*/
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
* Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
*/
struct ath5k_ini_rfbuffer {
- u8 rfb_bank; /* RF Bank number */
- u16 rfb_ctrl_register; /* RF Buffer control register */
- u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
+ u8 rfb_bank;
+ u16 rfb_ctrl_register;
+ u32 rfb_mode_data[3];
};
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
* Struct to hold RF Buffer field
* infos used to access certain RF
* analog registers
*/
struct ath5k_rfb_field {
- u8 len; /* Field length */
- u16 pos; /* Offset on the raw packet */
- u8 col; /* Column -used for shifting */
+ u8 len;
+ u16 pos;
+ u8 col;
};
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
*/
struct ath5k_rf_reg {
- u8 bank; /* RF Buffer Bank number */
- u8 index; /* Register's index on rf_regs_idx */
- struct ath5k_rfb_field field; /* RF Buffer field for this register */
+ u8 bank;
+ u8 index;
+ struct ath5k_rfb_field field;
};
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
* We do this to handle common bits and make our
* life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
enum ath5k_rf_regs_idx {
/* BANK 2 */
AR5K_RF_TURBO = 0,
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index ebfae052d89e..4d21df0e5975 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -18,13 +18,17 @@
*
*/
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
* Mode-specific RF Gain table (64bytes) for RF5111/5112
* (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
* RF Gain values are included in AR5K_AR5210_INI)
*/
struct ath5k_ini_rfgain {
- u16 rfg_register; /* RF Gain register address */
+ u16 rfg_register;
u32 rfg_value[2]; /* [freq (see below)] */
};
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
struct ath5k_gain_opt_step {
s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
s8 gos_gain;
};
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
struct ath5k_gain_opt {
u8 go_default;
u8 go_steps_count;
const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
};
+
/*
+ * RF5111
* Parameters on gos_param:
* 1) Tx clip PHY register
* 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
};
/*
+ * RF5112
* Parameters on gos_param:
* 1) Mixgain ovr RF register
* 2) PWD 138 RF register
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index 39f002ed4a88..00f015819344 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -3,7 +3,8 @@
#include <linux/tracepoint.h>
-#ifndef CONFIG_ATH5K_TRACER
+
+#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
@@ -93,7 +94,7 @@ TRACE_EVENT(ath5k_tx_complete,
#endif /* __TRACE_ATH5K_H */
-#ifdef CONFIG_ATH5K_TRACER
+#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8f7a0d1c290c..707069303550 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -23,7 +23,7 @@
obj-$(CONFIG_ATH6KL) := ath6kl.o
ath6kl-y += debug.o
-ath6kl-y += htc_hif.o
+ath6kl-y += hif.o
ath6kl-y += htc.o
ath6kl-y += bmi.o
ath6kl-y += cfg80211.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index c5d11cc536e0..bce3575c310a 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -19,165 +19,6 @@
#include "target.h"
#include "debug.h"
-static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
-{
- u32 addr;
- unsigned long timeout;
- int ret;
-
- ar->bmi.cmd_credits = 0;
-
- /* Read the counter register to get the command credits */
- addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
-
- timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
- while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
- /*
- * Hit the credit counter with a 4-byte access, the first byte
- * read will hit the counter and cause a decrement, while the
- * remaining 3 bytes has no effect. The rationale behind this
- * is to make all HIF accesses 4-byte aligned.
- */
- ret = hif_read_write_sync(ar, addr,
- (u8 *)&ar->bmi.cmd_credits, 4,
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("Unable to decrement the command credit count register: %d\n",
- ret);
- return ret;
- }
-
- /* The counter is only 8 bits.
- * Ignore anything in the upper 3 bytes
- */
- ar->bmi.cmd_credits &= 0xFF;
- }
-
- if (!ar->bmi.cmd_credits) {
- ath6kl_err("bmi communication timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
-{
- unsigned long timeout;
- u32 rx_word = 0;
- int ret = 0;
-
- timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
- while (time_before(jiffies, timeout) && !rx_word) {
- ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
- (u8 *)&rx_word, sizeof(rx_word),
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
- return ret;
- }
-
- /* all we really want is one bit */
- rx_word &= (1 << ENDPOINT1);
- }
-
- if (!rx_word) {
- ath6kl_err("bmi_recv_buf FIFO empty\n");
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
- int ret;
- u32 addr;
-
- ret = ath6kl_get_bmi_cmd_credits(ar);
- if (ret)
- return ret;
-
- addr = ar->mbox_info.htc_addr;
-
- ret = hif_read_write_sync(ar, addr, buf, len,
- HIF_WR_SYNC_BYTE_INC);
- if (ret)
- ath6kl_err("unable to send the bmi data to the device\n");
-
- return ret;
-}
-
-static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
- int ret;
- u32 addr;
-
- /*
- * During normal bootup, small reads may be required.
- * Rather than issue an HIF Read and then wait as the Target
- * adds successive bytes to the FIFO, we wait here until
- * we know that response data is available.
- *
- * This allows us to cleanly timeout on an unexpected
- * Target failure rather than risk problems at the HIF level.
- * In particular, this avoids SDIO timeouts and possibly garbage
- * data on some host controllers. And on an interconnect
- * such as Compact Flash (as well as some SDIO masters) which
- * does not provide any indication on data timeout, it avoids
- * a potential hang or garbage response.
- *
- * Synchronization is more difficult for reads larger than the
- * size of the MBOX FIFO (128B), because the Target is unable
- * to push the 129th byte of data until AFTER the Host posts an
- * HIF Read and removes some FIFO data. So for large reads the
- * Host proceeds to post an HIF Read BEFORE all the data is
- * actually available to read. Fortunately, large BMI reads do
- * not occur in practice -- they're supported for debug/development.
- *
- * So Host/Target BMI synchronization is divided into these cases:
- * CASE 1: length < 4
- * Should not happen
- *
- * CASE 2: 4 <= length <= 128
- * Wait for first 4 bytes to be in FIFO
- * If CONSERVATIVE_BMI_READ is enabled, also wait for
- * a BMI command credit, which indicates that the ENTIRE
- * response is available in the the FIFO
- *
- * CASE 3: length > 128
- * Wait for the first 4 bytes to be in FIFO
- *
- * For most uses, a small timeout should be sufficient and we will
- * usually see a response quickly; but there may be some unusual
- * (debug) cases of BMI_EXECUTE where we want an larger timeout.
- * For now, we use an unbounded busy loop while waiting for
- * BMI_EXECUTE.
- *
- * If BMI_EXECUTE ever needs to support longer-latency execution,
- * especially in production, this code needs to be enhanced to sleep
- * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
- * a function of Host processor speed.
- */
- if (len >= 4) { /* NB: Currently, always true */
- ret = ath6kl_bmi_get_rx_lkahd(ar);
- if (ret)
- return ret;
- }
-
- addr = ar->mbox_info.htc_addr;
- ret = hif_read_write_sync(ar, addr, buf, len,
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("Unable to read the bmi data from the device: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
int ath6kl_bmi_done(struct ath6kl *ar)
{
int ret;
@@ -190,14 +31,12 @@ int ath6kl_bmi_done(struct ath6kl *ar)
ar->bmi.done_sent = true;
- ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send bmi done: %d\n", ret);
return ret;
}
- ath6kl_bmi_cleanup(ar);
-
return 0;
}
@@ -212,13 +51,13 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
return -EACCES;
}
- ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send get target info: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
sizeof(targ_info->version));
if (ret) {
ath6kl_err("Unable to recv target info: %d\n", ret);
@@ -227,7 +66,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
/* Determine how many bytes are in the Target's targ_info */
- ret = ath6kl_bmi_recv_buf(ar,
+ ret = ath6kl_hif_bmi_read(ar,
(u8 *)&targ_info->byte_count,
sizeof(targ_info->byte_count));
if (ret) {
@@ -246,7 +85,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
}
/* Read the remainder of the targ_info */
- ret = ath6kl_bmi_recv_buf(ar,
+ ret = ath6kl_hif_bmi_read(ar,
((u8 *)targ_info) +
sizeof(targ_info->byte_count),
sizeof(*targ_info) -
@@ -278,8 +117,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return -EACCES;
}
- size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -292,8 +131,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
len_remain = len;
while (len_remain) {
- rx_len = (len_remain < BMI_DATASZ_MAX) ?
- len_remain : BMI_DATASZ_MAX;
+ rx_len = (len_remain < ar->bmi.max_data_size) ?
+ len_remain : ar->bmi.max_data_size;
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
@@ -302,13 +141,13 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
offset += sizeof(len);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
if (ret) {
ath6kl_err("Unable to read from the device: %d\n",
ret);
@@ -328,7 +167,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
u32 offset;
u32 len_remain, tx_len;
const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
- u8 aligned_buf[BMI_DATASZ_MAX];
+ u8 aligned_buf[400];
u8 *src;
if (ar->bmi.done_sent) {
@@ -336,12 +175,15 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return -EACCES;
}
- if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
+ if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
- memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
+ if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
+ return -E2BIG;
+
+ memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi write memory: addr: 0x%x, len: %d\n", addr, len);
@@ -350,7 +192,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
while (len_remain) {
src = &buf[len - len_remain];
- if (len_remain < (BMI_DATASZ_MAX - header)) {
+ if (len_remain < (ar->bmi.max_data_size - header)) {
if (len_remain & 3) {
/* align it with 4 bytes */
len_remain = len_remain +
@@ -360,7 +202,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
}
tx_len = len_remain;
} else {
- tx_len = (BMI_DATASZ_MAX - header);
+ tx_len = (ar->bmi.max_data_size - header);
}
offset = 0;
@@ -373,7 +215,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
offset += tx_len;
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
@@ -398,7 +240,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -415,13 +257,13 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
offset += sizeof(*param);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
@@ -445,7 +287,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -459,7 +301,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
@@ -481,7 +323,7 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -495,13 +337,13 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
@@ -524,7 +366,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -542,7 +384,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
offset += sizeof(param);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
@@ -565,8 +407,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
return -EACCES;
}
- size = BMI_DATASZ_MAX + header;
- if (size > MAX_BMI_CMDBUF_SZ) {
+ size = ar->bmi.max_data_size + header;
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -577,8 +419,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
len_remain = len;
while (len_remain) {
- tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
- len_remain : (BMI_DATASZ_MAX - header);
+ tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
+ len_remain : (ar->bmi.max_data_size - header);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
@@ -589,7 +431,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
tx_len);
offset += tx_len;
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
@@ -615,7 +457,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -631,7 +473,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to start LZ stream to the device: %d\n",
ret);
@@ -672,10 +514,20 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return ret;
}
+void ath6kl_bmi_reset(struct ath6kl *ar)
+{
+ ar->bmi.done_sent = false;
+}
+
int ath6kl_bmi_init(struct ath6kl *ar)
{
- ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
+ if (WARN_ON(ar->bmi.max_data_size == 0))
+ return -EINVAL;
+
+ /* cmd + addr + len + data_size */
+ ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index 96851d5df24b..f1ca6812456d 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -44,12 +44,6 @@
* BMI handles all required Target-side cache flushing.
*/
-#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
- (sizeof(u32) * 3 /* cmd + addr + len */))
-
-/* Maximum data size used for BMI transfers */
-#define BMI_DATASZ_MAX 256
-
/* BMI Commands */
#define BMI_NO_COMMAND 0
@@ -230,6 +224,8 @@ struct ath6kl_bmi_target_info {
int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar);
+void ath6kl_bmi_reset(struct ath6kl *ar);
+
int ath6kl_bmi_done(struct ath6kl *ar);
int ath6kl_bmi_get_target_info(struct ath6kl *ar,
struct ath6kl_bmi_target_info *targ_info);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index f517eb8f7b44..6c59a217b1a1 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -123,17 +123,50 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
.bitrates = ath6kl_a_rates,
};
-static int ath6kl_set_wpa_version(struct ath6kl *ar,
+#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
+
+/* returns true if scheduled scan was stopped */
+static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (ar->state != ATH6KL_STATE_SCHED_SCAN)
+ return false;
+
+ del_timer_sync(&vif->sched_scan_timer);
+
+ ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+
+ ar->state = ATH6KL_STATE_ON;
+
+ return true;
+}
+
+static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return;
+
+ cfg80211_sched_scan_stopped(ar->wiphy);
+}
+
+static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
enum nl80211_wpa_versions wpa_version)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
if (!wpa_version) {
- ar->auth_mode = NONE_AUTH;
+ vif->auth_mode = NONE_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_2) {
- ar->auth_mode = WPA2_AUTH;
+ vif->auth_mode = WPA2_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_1) {
- ar->auth_mode = WPA_AUTH;
+ vif->auth_mode = WPA_AUTH;
} else {
ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
return -ENOTSUPP;
@@ -142,25 +175,24 @@ static int ath6kl_set_wpa_version(struct ath6kl *ar,
return 0;
}
-static int ath6kl_set_auth_type(struct ath6kl *ar,
+static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
enum nl80211_auth_type auth_type)
{
-
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
switch (auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
- ar->dot11_auth_mode = OPEN_AUTH;
+ vif->dot11_auth_mode = OPEN_AUTH;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
- ar->dot11_auth_mode = SHARED_AUTH;
+ vif->dot11_auth_mode = SHARED_AUTH;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
- ar->dot11_auth_mode = LEAP_AUTH;
+ vif->dot11_auth_mode = LEAP_AUTH;
break;
case NL80211_AUTHTYPE_AUTOMATIC:
- ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
break;
default:
@@ -171,11 +203,11 @@ static int ath6kl_set_auth_type(struct ath6kl *ar,
return 0;
}
-static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
{
- u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
- u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
- &ar->grp_crypto_len;
+ u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
+ &vif->grp_crypto_len;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
__func__, cipher, ucast);
@@ -202,6 +234,10 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
*ar_cipher = AES_CRYPT;
*ar_cipher_len = 0;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ *ar_cipher = WAPI_CRYPT;
+ *ar_cipher_len = 0;
+ break;
default:
ath6kl_err("cipher 0x%x not supported\n", cipher);
return -ENOTSUPP;
@@ -210,28 +246,35 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
return 0;
}
-static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
if (key_mgmt == WLAN_AKM_SUITE_PSK) {
- if (ar->auth_mode == WPA_AUTH)
- ar->auth_mode = WPA_PSK_AUTH;
- else if (ar->auth_mode == WPA2_AUTH)
- ar->auth_mode = WPA2_PSK_AUTH;
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_PSK_AUTH;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt == 0x00409600) {
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_AUTH_CCKM;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_AUTH_CCKM;
} else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
- ar->auth_mode = NONE_AUTH;
+ vif->auth_mode = NONE_AUTH;
}
}
-static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
{
+ struct ath6kl *ar = vif->ar;
+
if (!test_bit(WMI_READY, &ar->flag)) {
ath6kl_err("wmi is not ready\n");
return false;
}
- if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+ if (!test_bit(WLAN_ENABLED, &vif->flags)) {
ath6kl_err("wlan disabled\n");
return false;
}
@@ -239,15 +282,146 @@ static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
return true;
}
+static bool ath6kl_is_wpa_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 &&
+ pos[4] == 0xf2 && pos[5] == 0x01;
+}
+
+static bool ath6kl_is_rsn_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_RSN;
+}
+
+static bool ath6kl_is_wps_ie(const u8 *pos)
+{
+ return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
+ pos[5] == 0x04);
+}
+
+static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
+ size_t ies_len)
+{
+ struct ath6kl *ar = vif->ar;
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Clear previously set flag
+ */
+
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ /*
+ * Filter out RSN/WPA IE(s)
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+
+ if (ath6kl_is_wps_ie(pos))
+ ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
+
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_REQ, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ *nw_type = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ *nw_type = AP_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ *nw_type = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
+ u8 *if_idx, u8 *nw_type)
+{
+ int i;
+
+ if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
+ return false;
+
+ if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
+ ar->num_vif))
+ return false;
+
+ if (type == NL80211_IFTYPE_STATION ||
+ type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
+ for (i = 0; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map >> i) & BIT(0)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ if (type == NL80211_IFTYPE_P2P_CLIENT ||
+ type == NL80211_IFTYPE_P2P_GO) {
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map >> i) & BIT(0)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
int status;
+ u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
- ar->sme_state = SME_CONNECTING;
+ ath6kl_cfg80211_sscan_disable(vif);
- if (!ath6kl_cfg80211_ready(ar))
+ vif->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -287,12 +461,22 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
}
- if (test_bit(CONNECTED, &ar->flag) &&
- ar->ssid_len == sme->ssid_len &&
- !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
- ar->reconnect_flag = true;
- status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
- ar->ch_hint);
+ if (sme->ie && (sme->ie_len > 0)) {
+ status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+ } else
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ if (test_bit(CONNECTED, &vif->flags) &&
+ vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ vif->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->req_bssid,
+ vif->ch_hint);
up(&ar->sem);
if (status) {
@@ -300,42 +484,43 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EIO;
}
return 0;
- } else if (ar->ssid_len == sme->ssid_len &&
- !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
- ath6kl_disconnect(ar);
+ } else if (vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ ath6kl_disconnect(vif);
}
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = sme->ssid_len;
- memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = sme->ssid_len;
+ memcpy(vif->ssid, sme->ssid, sme->ssid_len);
if (sme->channel)
- ar->ch_hint = sme->channel->center_freq;
+ vif->ch_hint = sme->channel->center_freq;
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
- memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+ memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
- ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+ ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
- status = ath6kl_set_auth_type(ar, sme->auth_type);
+ status = ath6kl_set_auth_type(vif, sme->auth_type);
if (status) {
up(&ar->sem);
return status;
}
if (sme->crypto.n_ciphers_pairwise)
- ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
else
- ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(vif, 0, true);
- ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+ ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
if (sme->crypto.n_akm_suites)
- ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+ ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
if ((sme->key_len) &&
- (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ (vif->auth_mode == NONE_AUTH) &&
+ (vif->prwise_crypto == WEP_CRYPT)) {
struct ath6kl_key *key = NULL;
if (sme->key_idx < WMI_MIN_KEY_INDEX ||
@@ -346,56 +531,60 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
}
- key = &ar->keys[sme->key_idx];
+ key = &vif->keys[sme->key_idx];
key->key_len = sme->key_len;
memcpy(key->key, sme->key, key->key_len);
- key->cipher = ar->prwise_crypto;
- ar->def_txkey_index = sme->key_idx;
+ key->cipher = vif->prwise_crypto;
+ vif->def_txkey_index = sme->key_idx;
- ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
- ar->prwise_crypto,
+ ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
+ vif->prwise_crypto,
GROUP_USAGE | TX_USAGE,
key->key_len,
- NULL,
+ NULL, 0,
key->key, KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0) != 0) {
ath6kl_err("couldn't set bss filtering\n");
up(&ar->sem);
return -EIO;
}
}
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
+ nw_subtype = SUBTYPE_P2PCLIENT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
- ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
- ar->prwise_crypto_len, ar->grp_crypto,
- ar->grp_crypto_len, ar->ch_hint);
-
- ar->reconnect_flag = 0;
- status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
- ar->dot11_auth_mode, ar->auth_mode,
- ar->prwise_crypto,
- ar->prwise_crypto_len,
- ar->grp_crypto, ar->grp_crypto_len,
- ar->ssid_len, ar->ssid,
- ar->req_bssid, ar->ch_hint,
- ar->connect_ctrl_flags);
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ vif->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, nw_subtype);
up(&ar->sem);
if (status == -EINVAL) {
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
ath6kl_err("invalid request\n");
return -ENOENT;
} else if (status) {
@@ -404,28 +593,40 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
- ((ar->auth_mode == WPA_PSK_AUTH)
- || (ar->auth_mode == WPA2_PSK_AUTH))) {
- mod_timer(&ar->disconnect_timer,
+ ((vif->auth_mode == WPA_PSK_AUTH)
+ || (vif->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&vif->disconnect_timer,
jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
}
ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
- set_bit(CONNECT_PEND, &ar->flag);
+ set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
-static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
+ enum network_type nw_type,
+ const u8 *bssid,
struct ieee80211_channel *chan,
const u8 *beacon_ie, size_t beacon_ie_len)
{
+ struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
+ u16 cap_mask, cap_val;
u8 *ie;
- bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
- ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
+ if (nw_type & ADHOC_NETWORK) {
+ cap_mask = WLAN_CAPABILITY_IBSS;
+ cap_val = WLAN_CAPABILITY_IBSS;
+ } else {
+ cap_mask = WLAN_CAPABILITY_ESS;
+ cap_val = WLAN_CAPABILITY_ESS;
+ }
+
+ bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
+ vif->ssid, vif->ssid_len,
+ cap_mask, cap_val);
if (bss == NULL) {
/*
* Since cfg80211 may not yet know about the BSS,
@@ -435,21 +636,20 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
* Prepend SSID element since it is not included in the Beacon
* IEs from the target.
*/
- ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+ ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
if (ie == NULL)
return -ENOMEM;
ie[0] = WLAN_EID_SSID;
- ie[1] = ar->ssid_len;
- memcpy(ie + 2, ar->ssid, ar->ssid_len);
- memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
- bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
- bssid, 0, WLAN_CAPABILITY_ESS, 100,
- ie, 2 + ar->ssid_len + beacon_ie_len,
+ ie[1] = vif->ssid_len;
+ memcpy(ie + 2, vif->ssid, vif->ssid_len);
+ memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wiphy, chan,
+ bssid, 0, cap_val, 100,
+ ie, 2 + vif->ssid_len + beacon_ie_len,
0, GFP_KERNEL);
if (bss)
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
- "%pM prior to indicating connect/roamed "
- "event\n", bssid);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to "
+ "cfg80211\n", bssid);
kfree(ie);
} else
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
@@ -463,7 +663,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
return 0;
}
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
@@ -471,6 +671,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
u8 assoc_resp_len, u8 *assoc_info)
{
struct ieee80211_channel *chan;
+ struct ath6kl *ar = vif->ar;
/* capinfo + listen interval */
u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -489,11 +690,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
* Store Beacon interval here; DTIM period will be available only once
* a Beacon frame from the AP is seen.
*/
- ar->assoc_bss_beacon_int = beacon_intvl;
- clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ vif->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
if (nw_type & ADHOC_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
@@ -501,39 +702,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
}
if (nw_type & INFRA_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
- ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
}
}
- chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
-
+ chan = ieee80211_get_channel(ar->wiphy, (int) channel);
- if (nw_type & ADHOC_NETWORK) {
- cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info,
+ beacon_ie_len) < 0) {
+ ath6kl_err("could not add cfg80211 bss entry\n");
return;
}
- if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
- beacon_ie_len) < 0) {
- ath6kl_err("could not add cfg80211 bss entry for "
- "connect/roamed notification\n");
+ if (nw_type & ADHOC_NETWORK) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+ nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+ cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
- if (ar->sme_state == SME_CONNECTING) {
+ if (vif->sme_state == SME_CONNECTING) {
/* inform connect result to cfg80211 */
- ar->sme_state = SME_CONNECTED;
- cfg80211_connect_result(ar->net_dev, bssid,
+ vif->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(vif->ndev, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
- } else if (ar->sme_state == SME_CONNECTED) {
+ } else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
- cfg80211_roamed(ar->net_dev, chan, bssid,
+ cfg80211_roamed(vif->ndev, chan, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
}
@@ -542,12 +743,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
struct net_device *dev, u16 reason_code)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
reason_code);
- if (!ath6kl_cfg80211_ready(ar))
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -560,44 +764,46 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
return -ERESTARTSYS;
}
- ar->reconnect_flag = 0;
- ath6kl_disconnect(ar);
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ vif->reconnect_flag = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
if (!test_bit(SKIP_SCAN, &ar->flag))
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
up(&ar->sem);
- ar->sme_state = SME_DISCONNECTED;
+ vif->sme_state = SME_DISCONNECTED;
return 0;
}
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason)
{
- if (ar->scan_req) {
- cfg80211_scan_done(ar->scan_req, true);
- ar->scan_req = NULL;
+ struct ath6kl *ar = vif->ar;
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
}
- if (ar->nw_type & ADHOC_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->nw_type & ADHOC_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
}
memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
- if (ar->nw_type & INFRA_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
- ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ if (vif->nw_type & INFRA_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
@@ -614,42 +820,46 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
*/
if (reason != DISCONNECT_CMD) {
- ath6kl_wmi_disconnect_cmd(ar->wmi);
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
return;
}
- clear_bit(CONNECT_PEND, &ar->flag);
+ clear_bit(CONNECT_PEND, &vif->flags);
- if (ar->sme_state == SME_CONNECTING) {
- cfg80211_connect_result(ar->net_dev,
+ if (vif->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(vif->ndev,
bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
- } else if (ar->sme_state == SME_CONNECTED) {
- cfg80211_disconnected(ar->net_dev, reason,
+ } else if (vif->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(vif->ndev, reason,
NULL, 0, GFP_KERNEL);
}
- ar->sme_state = SME_DISCONNECTED;
+ vif->sme_state = SME_DISCONNECTED;
}
static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
s8 n_channels = 0;
u16 *channels = NULL;
int ret = 0;
+ u32 force_fg_scan = 0;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
+ ath6kl_cfg80211_sscan_disable(vif);
+
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
ret = ath6kl_wmi_bssfilter_cmd(
- ar->wmi,
- (test_bit(CONNECTED, &ar->flag) ?
+ ar->wmi, vif->fw_vif_idx,
+ (test_bit(CONNECTED, &vif->flags) ?
ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
if (ret) {
ath6kl_err("couldn't set bss filtering\n");
@@ -664,14 +874,19 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
for (i = 0; i < request->n_ssids; i++)
- ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
- SPECIFIC_SSID_FLAG,
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, SPECIFIC_SSID_FLAG,
request->ssids[i].ssid_len,
request->ssids[i].ssid);
}
+ /*
+ * FIXME: we should clear the IE in fw if it's not set so just
+ * remove the check altogether
+ */
if (request->ie) {
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_err("failed to set Probe Request appie for "
@@ -702,44 +917,63 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
channels[i] = request->channels[i]->center_freq;
}
- ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
- false, 0, 0, n_channels, channels);
+ if (test_bit(CONNECTED, &vif->flags))
+ force_fg_scan = 1;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, n_channels,
+ channels, request->no_cck,
+ request->rates);
+ } else {
+ ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, n_channels,
+ channels);
+ }
if (ret)
ath6kl_err("wmi_startscan_cmd failed\n");
else
- ar->scan_req = request;
+ vif->scan_req = request;
kfree(channels);
return ret;
}
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
{
+ struct ath6kl *ar = vif->ar;
int i;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
+ aborted ? " aborted" : "");
- if (!ar->scan_req)
+ if (!vif->scan_req)
return;
- if ((status == -ECANCELED) || (status == -EBUSY)) {
- cfg80211_scan_done(ar->scan_req, true);
+ if (aborted)
goto out;
- }
- cfg80211_scan_done(ar->scan_req, false);
-
- if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
- for (i = 0; i < ar->scan_req->n_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
- DISABLE_SSID_FLAG,
+ if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < vif->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, DISABLE_SSID_FLAG,
0, NULL);
}
}
out:
- ar->scan_req = NULL;
+ cfg80211_scan_done(vif->scan_req, aborted);
+ vif->scan_req = NULL;
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -747,15 +981,22 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac_addr,
struct key_params *params)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
u8 key_usage;
u8 key_type;
- int status = 0;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
+ if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
+ if (params->key_len != WMI_KRK_LEN)
+ return -EINVAL;
+ return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
+ params->key);
+ }
+
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
@@ -763,7 +1004,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- key = &ar->keys[key_index];
+ key = &vif->keys[key_index];
memset(key, 0, sizeof(struct ath6kl_key));
if (pairwise)
@@ -772,13 +1013,19 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
key_usage = GROUP_USAGE;
if (params) {
+ int seq_len = params->seq_len;
+ if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
+ seq_len > ATH6KL_KEY_SEQ_LEN) {
+ /* Only first half of the WPI PN is configured */
+ seq_len = ATH6KL_KEY_SEQ_LEN;
+ }
if (params->key_len > WLAN_MAX_KEY_LEN ||
- params->seq_len > sizeof(key->seq))
+ seq_len > sizeof(key->seq))
return -EINVAL;
key->key_len = params->key_len;
memcpy(key->key, params->key, key->key_len);
- key->seq_len = params->seq_len;
+ key->seq_len = seq_len;
memcpy(key->seq, params->seq, key->seq_len);
key->cipher = params->cipher;
}
@@ -796,31 +1043,33 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
case WLAN_CIPHER_SUITE_CCMP:
key_type = AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ key_type = WAPI_CRYPT;
+ break;
default:
return -ENOTSUPP;
}
- if (((ar->auth_mode == WPA_PSK_AUTH)
- || (ar->auth_mode == WPA2_PSK_AUTH))
+ if (((vif->auth_mode == WPA_PSK_AUTH)
+ || (vif->auth_mode == WPA2_PSK_AUTH))
&& (key_usage & GROUP_USAGE))
- del_timer(&ar->disconnect_timer);
+ del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
__func__, key_index, key->key_len, key_type,
key_usage, key->seq_len);
- ar->def_txkey_index = key_index;
-
- if (ar->nw_type == AP_NETWORK && !pairwise &&
- (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+ if (vif->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
+ key_type == WAPI_CRYPT) && params) {
ar->ap_mode_bkey.valid = true;
ar->ap_mode_bkey.key_index = key_index;
ar->ap_mode_bkey.key_type = key_type;
ar->ap_mode_bkey.key_len = key->key_len;
memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
- if (!test_bit(CONNECTED, &ar->flag)) {
+ if (!test_bit(CONNECTED, &vif->flags)) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
"key configuration until AP mode has been "
"started\n");
@@ -832,8 +1081,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
}
- if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
- !test_bit(CONNECTED, &ar->flag)) {
+ if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &vif->flags)) {
/*
* Store the key locally so that it can be re-configured after
* the AP mode has properly started
@@ -841,31 +1090,29 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
*/
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
"until AP mode has been started\n");
- ar->wep_key_list[key_index].key_len = key->key_len;
- memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+ vif->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(vif->wep_key_list[key_index].key, key->key,
+ key->key_len);
return 0;
}
- status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
- key_type, key_usage, key->key_len,
- key->seq, key->key, KEY_OP_INIT_VAL,
- (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
-
- if (status)
- return -EIO;
-
- return 0;
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->seq_len, key->key,
+ KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -875,15 +1122,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- if (!ar->keys[key_index].key_len) {
+ if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d is empty\n", __func__, key_index);
return 0;
}
- ar->keys[key_index].key_len = 0;
+ vif->keys[key_index].key_len = 0;
- return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -892,13 +1139,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
void (*callback) (void *cookie,
struct key_params *))
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
struct key_params params;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -908,7 +1155,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- key = &ar->keys[key_index];
+ key = &vif->keys[key_index];
memset(&params, 0, sizeof(params));
params.cipher = key->cipher;
params.key_len = key->key_len;
@@ -926,15 +1173,15 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
u8 key_index, bool unicast,
bool multicast)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
- int status = 0;
u8 key_usage;
enum crypto_type key_type = NONE_CRYPT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -944,43 +1191,41 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
return -ENOENT;
}
- if (!ar->keys[key_index].key_len) {
+ if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
__func__, key_index);
return -EINVAL;
}
- ar->def_txkey_index = key_index;
- key = &ar->keys[ar->def_txkey_index];
+ vif->def_txkey_index = key_index;
+ key = &vif->keys[vif->def_txkey_index];
key_usage = GROUP_USAGE;
- if (ar->prwise_crypto == WEP_CRYPT)
+ if (vif->prwise_crypto == WEP_CRYPT)
key_usage |= TX_USAGE;
if (unicast)
- key_type = ar->prwise_crypto;
+ key_type = vif->prwise_crypto;
if (multicast)
- key_type = ar->grp_crypto;
+ key_type = vif->grp_crypto;
- if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+ if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
return 0; /* Delay until AP mode has been started */
- status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
- key_type, key_usage,
- key->key_len, key->seq, key->key,
- KEY_OP_INIT_VAL, NULL,
- SYNC_BOTH_WMIFLAG);
- if (status)
- return -EIO;
-
- return 0;
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->seq_len,
+ key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
}
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
- cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
(ismcast ? NL80211_KEYTYPE_GROUP :
NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
GFP_KERNEL);
@@ -989,12 +1234,17 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
int ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
changed);
- if (!ath6kl_cfg80211_ready(ar))
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -1014,15 +1264,21 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
*/
static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
u8 ath6kl_dbm;
+ int dbm = MBM_TO_DBM(mbm);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
type, dbm);
- if (!ath6kl_cfg80211_ready(ar))
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
switch (type) {
@@ -1037,7 +1293,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
return 0;
}
@@ -1045,14 +1301,19 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (test_bit(CONNECTED, &ar->flag)) {
+ if (test_bit(CONNECTED, &vif->flags)) {
ar->tx_pwr = 0;
- if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
@@ -1076,11 +1337,12 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
{
struct ath6kl *ar = ath6kl_priv(dev);
struct wmi_power_mode_cmd mode;
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
__func__, pmgmt, timeout);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (pmgmt) {
@@ -1091,7 +1353,8 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
mode.pwr_mode = MAX_PERF_POWER;
}
- if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
+ mode.pwr_mode) != 0) {
ath6kl_err("wmi_powermode_cmd failed\n");
return -EIO;
}
@@ -1099,41 +1362,83 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return 0;
}
+static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct net_device *ndev;
+ u8 if_idx, nw_type;
+
+ if (ar->num_vif == ar->vif_max) {
+ ath6kl_err("Reached maximum number of supported vif\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
+ ath6kl_err("Not a supported interface type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+ if (!ndev)
+ return ERR_PTR(-ENOMEM);
+
+ ar->num_vif++;
+
+ return ndev;
+}
+
+static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+
+ spin_lock_bh(&ar->list_lock);
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+
+ ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+
+ ath6kl_deinit_if_data(vif);
+
+ return 0;
+}
+
static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct ath6kl *ar = ath6kl_priv(ndev);
- struct wireless_dev *wdev = ar->wdev;
+ struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
- if (!ath6kl_cfg80211_ready(ar))
- return -EIO;
-
switch (type) {
case NL80211_IFTYPE_STATION:
- ar->next_mode = INFRA_NETWORK;
+ vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
- ar->next_mode = ADHOC_NETWORK;
+ vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- ar->next_mode = AP_NETWORK;
+ vif->next_mode = AP_NETWORK;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- ar->next_mode = INFRA_NETWORK;
+ vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_P2P_GO:
- ar->next_mode = AP_NETWORK;
+ vif->next_mode = AP_NETWORK;
break;
default:
ath6kl_err("invalid interface type %u\n", type);
return -EOPNOTSUPP;
}
- wdev->iftype = type;
+ vif->wdev.iftype = type;
return 0;
}
@@ -1143,16 +1448,17 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
struct cfg80211_ibss_params *ibss_param)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
int status;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- ar->ssid_len = ibss_param->ssid_len;
- memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+ vif->ssid_len = ibss_param->ssid_len;
+ memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
if (ibss_param->channel)
- ar->ch_hint = ibss_param->channel->center_freq;
+ vif->ch_hint = ibss_param->channel->center_freq;
if (ibss_param->channel_fixed) {
/*
@@ -1164,44 +1470,45 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
- memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+ memcpy(vif->req_bssid, ibss_param->bssid,
+ sizeof(vif->req_bssid));
- ath6kl_set_wpa_version(ar, 0);
+ ath6kl_set_wpa_version(vif, 0);
- status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
if (status)
return status;
if (ibss_param->privacy) {
- ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
- ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
} else {
- ath6kl_set_cipher(ar, 0, true);
- ath6kl_set_cipher(ar, 0, false);
+ ath6kl_set_cipher(vif, 0, true);
+ ath6kl_set_cipher(vif, 0, false);
}
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
- ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
- ar->prwise_crypto_len, ar->grp_crypto,
- ar->grp_crypto_len, ar->ch_hint);
-
- status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
- ar->dot11_auth_mode, ar->auth_mode,
- ar->prwise_crypto,
- ar->prwise_crypto_len,
- ar->grp_crypto, ar->grp_crypto_len,
- ar->ssid_len, ar->ssid,
- ar->req_bssid, ar->ch_hint,
- ar->connect_ctrl_flags);
- set_bit(CONNECT_PEND, &ar->flag);
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, SUBTYPE_NONE);
+ set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
@@ -1209,14 +1516,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
struct net_device *dev)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- ath6kl_disconnect(ar);
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
return 0;
}
@@ -1226,6 +1533,8 @@ static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
+ CCKM_KRK_CIPHER_SUITE,
+ WLAN_CIPHER_SUITE_SMS4,
};
static bool is_rate_legacy(s32 rate)
@@ -1293,21 +1602,22 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
long left;
bool sgi;
s32 rate;
int ret;
u8 mcs;
- if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
return -ENOENT;
if (down_interruptible(&ar->sem))
return -EBUSY;
- set_bit(STATS_UPDATE_PEND, &ar->flag);
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
- ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
if (ret != 0) {
up(&ar->sem);
@@ -1316,7 +1626,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
- &ar->flag),
+ &vif->flags),
WMI_TIMEOUT);
up(&ar->sem);
@@ -1326,24 +1636,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
else if (left < 0)
return left;
- if (ar->target_stats.rx_byte) {
- sinfo->rx_bytes = ar->target_stats.rx_byte;
+ if (vif->target_stats.rx_byte) {
+ sinfo->rx_bytes = vif->target_stats.rx_byte;
sinfo->filled |= STATION_INFO_RX_BYTES;
- sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
- if (ar->target_stats.tx_byte) {
- sinfo->tx_bytes = ar->target_stats.tx_byte;
+ if (vif->target_stats.tx_byte) {
+ sinfo->tx_bytes = vif->target_stats.tx_byte;
sinfo->filled |= STATION_INFO_TX_BYTES;
- sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
- sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->signal = vif->target_stats.cs_rssi;
sinfo->filled |= STATION_INFO_SIGNAL;
- rate = ar->target_stats.tx_ucast_rate;
+ rate = vif->target_stats.tx_ucast_rate;
if (is_rate_legacy(rate)) {
sinfo->txrate.legacy = rate / 100;
@@ -1375,13 +1685,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
sinfo->filled |= STATION_INFO_TX_BITRATE;
- if (test_bit(CONNECTED, &ar->flag) &&
- test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
- ar->nw_type == INFRA_NETWORK) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
+ vif->nw_type == INFRA_NETWORK) {
sinfo->filled |= STATION_INFO_BSS_PARAM;
sinfo->bss_param.flags = 0;
- sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
- sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+ sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
}
return 0;
@@ -1391,7 +1701,9 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, true);
}
@@ -1399,25 +1711,302 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, false);
}
static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- if (test_bit(CONNECTED, &ar->flag))
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ if (test_bit(CONNECTED, &vif->flags))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->bssid, NULL, false);
+ return 0;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_vif *vif;
+ int ret, pos, left;
+ u32 filter = 0;
+ u16 i;
+ u8 mask[WOW_MASK_SIZE];
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (!test_bit(CONNECTED, &vif->flags))
+ return -EINVAL;
+
+ /* Clear existing WOW patterns */
+ for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+ ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+ WOW_LIST_ID, i);
+ /* Configure new WOW patterns */
+ for (i = 0; i < wow->n_patterns; i++) {
+
+ /*
+ * Convert given nl80211 specific mask value to equivalent
+ * driver specific mask value and send it to the chip along
+ * with patterns. For example, If the mask value defined in
+ * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
+ * then equivalent driver specific mask value is
+ * "0xFF 0x00 0xFF 0x00".
+ */
+ memset(&mask, 0, sizeof(mask));
+ for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
+ if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
+ mask[pos] = 0xFF;
+ }
+ /*
+ * Note: Pattern's offset is not passed as part of wowlan
+ * parameter from CFG layer. So it's always passed as ZERO
+ * to the firmware. It means, given WOW patterns are always
+ * matched from the first byte of received pkt in the firmware.
+ */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ wow->patterns[i].pattern_len,
+ 0 /* pattern offset */,
+ wow->patterns[i].pattern, mask);
+ if (ret)
+ return ret;
+ }
+
+ if (wow->disconnect)
+ filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+
+ if (wow->magic_pkt)
+ filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+
+ if (wow->gtk_rekey_failure)
+ filter |= WOW_FILTER_OPTION_GTK_ERROR;
+
+ if (wow->eap_identity_req)
+ filter |= WOW_FILTER_OPTION_EAP_REQ;
+
+ if (wow->four_way_handshake)
+ filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ filter,
+ WOW_HOST_REQ_DELAY);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret)
+ return ret;
+
+ if (ar->tx_pending[ar->ctrl_ep]) {
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
+ if (left == 0) {
+ ath6kl_warn("clear wmi ctrl data timeout\n");
+ ret = -ETIMEDOUT;
+ } else if (left < 0) {
+ ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+ ret = left;
+ }
+ }
+
+ return ret;
+}
+
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+ return ret;
+}
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow)
+{
+ int ret;
+
+ switch (mode) {
+ case ATH6KL_CFG_SUSPEND_WOW:
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
+
+ /* Flush all non control pkts in TX path */
+ ath6kl_tx_data_cleanup(ar);
+
+ ret = ath6kl_wow_suspend(ar, wow);
+ if (ret) {
+ ath6kl_err("wow suspend failed: %d\n", ret);
+ return ret;
+ }
+ ar->state = ATH6KL_STATE_WOW;
+ break;
+
+ case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+ if (ret) {
+ ath6kl_warn("wmi powermode command failed during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_DEEPSLEEP;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_CUTPOWER:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ if (ar->state == ATH6KL_STATE_OFF) {
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "suspend hw off, no action for cutpower\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
+
+ ret = ath6kl_init_hw_stop(ar);
+ if (ret) {
+ ath6kl_warn("failed to stop hw during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_CUTPOWER;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
+ /*
+ * Nothing needed for schedule scan, firmware is already in
+ * wow mode and sleeping most of the time.
+ */
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar)
+{
+ int ret;
+
+ switch (ar->state) {
+ case ATH6KL_STATE_WOW:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
+
+ ret = ath6kl_wow_resume(ar);
+ if (ret) {
+ ath6kl_warn("wow mode resume failed: %d\n", ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+ ar->wmi->saved_pwr_mode);
+ if (ret) {
+ ath6kl_warn("wmi powermode command failed during resume: %d\n",
+ ret);
+ }
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+
+ break;
+
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case ATH6KL_STATE_SCHED_SCAN:
+ break;
+
+ default:
+ break;
+ }
+
return 0;
}
#ifdef CONFIG_PM
-static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+
+/* hif layer decides what suspend mode to use */
+static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- return ath6kl_hif_suspend(ar);
+ return ath6kl_hif_suspend(ar, wow);
+}
+
+static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ return ath6kl_hif_resume(ar);
+}
+
+/*
+ * FIXME: WOW suspend mode is selected if the host sdio controller supports
+ * both sdio irq wake up and keep power. The target pulls sdio data line to
+ * wake up the host when WOW pattern matches. This causes sdio irq handler
+ * is being called in the host side which internally hits ath6kl's RX path.
+ *
+ * Since sdio interrupt is not disabled, RX path executes even before
+ * the host executes the actual resume operation from PM module.
+ *
+ * In the current scenario, WOW resume should happen before start processing
+ * any data from the target. So It's required to perform WOW resume in RX path.
+ * Ideally we should perform WOW resume only in the actual platform
+ * resume path. This area needs bit rework to avoid WOW resume in RX path.
+ *
+ * ath6kl_check_wow_status() is called from ath6kl_rx().
+ */
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+ if (ar->state == ATH6KL_STATE_WOW)
+ ath6kl_cfg80211_resume(ar);
+}
+
+#else
+
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
}
#endif
@@ -1425,14 +2014,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
__func__, chan->center_freq, chan->hw_value);
- ar->next_chan = chan->center_freq;
+ vif->next_chan = chan->center_freq;
return 0;
}
@@ -1444,9 +2033,10 @@ static bool ath6kl_is_p2p_ie(const u8 *pos)
pos[4] == 0x9a && pos[5] == 0x09;
}
-static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
- size_t ies_len)
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
+ const u8 *ies, size_t ies_len)
{
+ struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *buf = NULL;
size_t len = 0;
@@ -1473,8 +2063,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
}
}
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
- buf, len);
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_RESP, buf, len);
kfree(buf);
return ret;
}
@@ -1483,36 +2073,39 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
struct beacon_parameters *info, bool add)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
struct ieee80211_mgmt *mgmt;
u8 *ies;
int ies_len;
struct wmi_connect_cmd p;
int res;
- int i;
+ int i, ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (ar->next_mode != AP_NETWORK)
+ if (vif->next_mode != AP_NETWORK)
return -EOPNOTSUPP;
if (info->beacon_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_BEACON,
info->beacon_ies,
info->beacon_ies_len);
if (res)
return res;
}
if (info->proberesp_ies) {
- res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+ res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
info->proberesp_ies_len);
if (res)
return res;
}
if (info->assocresp_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
if (res)
@@ -1539,12 +2132,14 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
if (info->ssid == NULL)
return -EINVAL;
- memcpy(ar->ssid, info->ssid, info->ssid_len);
- ar->ssid_len = info->ssid_len;
+ memcpy(vif->ssid, info->ssid, info->ssid_len);
+ vif->ssid_len = info->ssid_len;
if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
return -EOPNOTSUPP; /* TODO */
- ar->dot11_auth_mode = OPEN_AUTH;
+ ret = ath6kl_set_auth_type(vif, info->auth_type);
+ if (ret)
+ return ret;
memset(&p, 0, sizeof(p));
@@ -1566,7 +2161,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
}
if (p.auth_mode == 0)
p.auth_mode = NONE_AUTH;
- ar->auth_mode = p.auth_mode;
+ vif->auth_mode = p.auth_mode;
for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
switch (info->crypto.ciphers_pairwise[i]) {
@@ -1580,13 +2175,16 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_CCMP:
p.prwise_crypto_type |= AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.prwise_crypto_type |= WAPI_CRYPT;
+ break;
}
}
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
- ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(vif, 0, true);
} else if (info->crypto.n_ciphers_pairwise == 1)
- ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+ ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -1599,21 +2197,34 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_CCMP:
p.grp_crypto_type = AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.grp_crypto_type = WAPI_CRYPT;
+ break;
default:
p.grp_crypto_type = NONE_CRYPT;
break;
}
- ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+ ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
p.nw_type = AP_NETWORK;
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
- p.ssid_len = ar->ssid_len;
- memcpy(p.ssid, ar->ssid, ar->ssid_len);
- p.dot11_auth_mode = ar->dot11_auth_mode;
- p.ch = cpu_to_le16(ar->next_chan);
+ p.ssid_len = vif->ssid_len;
+ memcpy(p.ssid, vif->ssid, vif->ssid_len);
+ p.dot11_auth_mode = vif->dot11_auth_mode;
+ p.ch = cpu_to_le16(vif->next_chan);
- res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+ p.nw_subtype = SUBTYPE_P2PGO;
+ } else {
+ /*
+ * Due to firmware limitation, it is not possible to
+ * do P2P mgmt operations in AP mode
+ */
+ p.nw_subtype = SUBTYPE_NONE;
+ }
+
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
@@ -1635,14 +2246,15 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (ar->nw_type != AP_NETWORK)
+ if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
- if (!test_bit(CONNECTED, &ar->flag))
+ if (!test_bit(CONNECTED, &vif->flags))
return -ENOTCONN;
- ath6kl_wmi_disconnect_cmd(ar->wmi);
- clear_bit(CONNECTED, &ar->flag);
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+ clear_bit(CONNECTED, &vif->flags);
return 0;
}
@@ -1651,8 +2263,9 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (ar->nw_type != AP_NETWORK)
+ if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
/* Use this only for authorizing/unauthorizing a station */
@@ -1660,10 +2273,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
- mac, 0);
- return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
- 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_AUTHORIZE, mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_UNAUTHORIZE, mac, 0);
}
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
@@ -1674,13 +2287,20 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
u64 *cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u32 id;
/* TODO: if already pending or ongoing remain-on-channel,
* return -EBUSY */
- *cookie = 1; /* only a single pending request is supported */
+ id = ++vif->last_roc_id;
+ if (id == 0) {
+ /* Do not use 0 as the cookie value */
+ id = ++vif->last_roc_id;
+ }
+ *cookie = id;
- return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
- duration);
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
+ chan->center_freq, duration);
}
static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1688,16 +2308,20 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
u64 cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (cookie != 1)
+ if (cookie != vif->last_roc_id)
return -ENOENT;
+ vif->last_cancel_roc_id = cookie;
- return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
}
-static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
- size_t len, unsigned int freq)
+static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
+ const u8 *buf, size_t len,
+ unsigned int freq)
{
+ struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *p2p;
int p2p_len;
@@ -1724,8 +2348,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
pos += 2 + pos[1];
}
- ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
- p2p, p2p_len);
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
+ mgmt->da, p2p, p2p_len);
kfree(p2p);
return ret;
}
@@ -1734,44 +2358,61 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ bool dont_wait_for_ack, u64 *cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
u32 id;
const struct ieee80211_mgmt *mgmt;
mgmt = (const struct ieee80211_mgmt *) buf;
if (buf + len >= mgmt->u.probe_resp.variable &&
- ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+ vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control)) {
/*
* Send Probe Response frame in AP mode using a separate WMI
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
- return ath6kl_send_go_probe_resp(ar, buf, len,
+ return ath6kl_send_go_probe_resp(vif, buf, len,
chan->center_freq);
}
- id = ar->send_action_id++;
+ id = vif->send_action_id++;
if (id == 0) {
/*
* 0 is a reserved value in the WMI command and shall not be
* used for the command.
*/
- id = ar->send_action_id++;
+ id = vif->send_action_id++;
}
*cookie = id;
- return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
- buf, len);
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len, no_cck);
+ } else {
+ return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len);
+ }
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
struct net_device *dev,
u16 frame_type, bool reg)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
__func__, frame_type, reg);
@@ -1781,8 +2422,92 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
* we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
* hardcode target to report Probe Request frames all the time.
*/
- ar->probe_req_report = reg;
+ vif->probe_req_report = reg;
+ }
+}
+
+static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u16 interval;
+ int ret;
+ u8 i;
+
+ if (ar->state != ATH6KL_STATE_ON)
+ return -EIO;
+
+ if (vif->sme_state != SME_DISCONNECTED)
+ return -EBUSY;
+
+ for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i, DISABLE_SSID_FLAG,
+ 0, NULL);
}
+
+ /* fw uses seconds, also make sure that it's >0 */
+ interval = max_t(u16, 1, request->interval / 1000);
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ interval, interval,
+ 10, 0, 0, 0, 3, 0, 0, 0);
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ for (i = 0; i < request->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i, SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+ }
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ WOW_FILTER_SSID,
+ WOW_HOST_REQ_DELAY);
+ if (ret) {
+ ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
+ return ret;
+ }
+
+ /* this also clears IE in fw if it's not set */
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_warn("Failed to set probe request IE for scheduled scan: %d",
+ ret);
+ return ret;
+ }
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret) {
+ ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
+ ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_SCHED_SCAN;
+
+ return ret;
+}
+
+static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return -EIO;
+
+ return 0;
}
static const struct ieee80211_txrx_stypes
@@ -1808,6 +2533,8 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
};
static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .add_virtual_intf = ath6kl_cfg80211_add_iface,
+ .del_virtual_intf = ath6kl_cfg80211_del_iface,
.change_virtual_intf = ath6kl_cfg80211_change_iface,
.scan = ath6kl_cfg80211_scan,
.connect = ath6kl_cfg80211_connect,
@@ -1828,7 +2555,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.flush_pmksa = ath6kl_flush_pmksa,
CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
#ifdef CONFIG_PM
- .suspend = ar6k_cfg80211_suspend,
+ .suspend = __ath6kl_cfg80211_suspend,
+ .resume = __ath6kl_cfg80211_resume,
#endif
.set_channel = ath6kl_set_channel,
.add_beacon = ath6kl_add_beacon,
@@ -1839,78 +2567,277 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
.mgmt_tx = ath6kl_mgmt_tx,
.mgmt_frame_register = ath6kl_mgmt_frame_register,
+ .sched_scan_start = ath6kl_cfg80211_sscan_start,
+ .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
};
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
{
- int ret = 0;
- struct wireless_dev *wdev;
- struct ath6kl *ar;
+ ath6kl_cfg80211_sscan_disable(vif);
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- ath6kl_err("couldn't allocate wireless device\n");
- return NULL;
+ switch (vif->sme_state) {
+ case SME_DISCONNECTED:
+ break;
+ case SME_CONNECTING:
+ cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ case SME_CONNECTED:
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ break;
+ }
+
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags))
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
+
+ vif->sme_state = SME_DISCONNECTED;
+ clear_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+
+ /* disable scanning */
+ if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
+ ath6kl_warn("failed to disable scan during stop\n");
+
+ ath6kl_cfg80211_scan_complete_event(vif, true);
+}
+
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif) {
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
+ ath6kl_warn("ath6kl_deep_sleep_enable: "
+ "wmi_powermode_cmd failed\n");
+ return;
}
+ /*
+ * FIXME: we should take ar->list_lock to protect changes in the
+ * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop()
+ * sleeps.
+ */
+ list_for_each_entry(vif, &ar->vif_list, list)
+ ath6kl_cfg80211_stop(vif);
+}
+
+struct ath6kl *ath6kl_core_alloc(struct device *dev)
+{
+ struct ath6kl *ar;
+ struct wiphy *wiphy;
+ u8 ctr;
+
/* create a new wiphy for use with cfg80211 */
- wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
- if (!wdev->wiphy) {
+ wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+ if (!wiphy) {
ath6kl_err("couldn't allocate wiphy device\n");
- kfree(wdev);
return NULL;
}
- ar = wiphy_priv(wdev->wiphy);
+ ar = wiphy_priv(wiphy);
ar->p2p = !!ath6kl_p2p;
+ ar->wiphy = wiphy;
+ ar->dev = dev;
- wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+ ar->vif_max = 1;
- wdev->wiphy->max_remain_on_channel_duration = 5000;
+ ar->max_norm_iface = 1;
+
+ spin_lock_init(&ar->lock);
+ spin_lock_init(&ar->mcastpsq_lock);
+ spin_lock_init(&ar->list_lock);
+
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ INIT_LIST_HEAD(&ar->vif_list);
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ clear_bit(SKIP_SCAN, &ar->flag);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+ ar->listen_intvl_b = 0;
+ ar->tx_pwr = 0;
+
+ ar->intra_bss = 1;
+ ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+ return ar;
+}
+
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
+{
+ struct wiphy *wiphy = ar->wiphy;
+ int ret;
+
+ wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wiphy->max_remain_on_channel_duration = 5000;
/* set device pointer for wiphy */
- set_wiphy_dev(wdev->wiphy, dev);
+ set_wiphy_dev(wiphy, ar->dev);
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
if (ar->p2p) {
- wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT);
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
}
- /* max num of ssids that can be probed during scanning */
- wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
- wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wdev->wiphy->cipher_suites = cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
- ret = wiphy_register(wdev->wiphy);
+ /* max num of ssids that can be probed during scanning */
+ wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+ wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
+ wiphy->wowlan.pattern_min_len = 1;
+ wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+
+ wiphy->max_sched_scan_ssids = 10;
+
+ ret = wiphy_register(wiphy);
if (ret < 0) {
ath6kl_err("couldn't register wiphy device\n");
- wiphy_free(wdev->wiphy);
- kfree(wdev);
- return NULL;
+ return ret;
}
- return wdev;
+ return 0;
}
-void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+static int ath6kl_init_if_data(struct ath6kl_vif *vif)
{
- struct wireless_dev *wdev = ar->wdev;
-
- if (ar->scan_req) {
- cfg80211_scan_done(ar->scan_req, true);
- ar->scan_req = NULL;
+ vif->aggr_cntxt = aggr_init(vif->ndev);
+ if (!vif->aggr_cntxt) {
+ ath6kl_err("failed to initialize aggr\n");
+ return -ENOMEM;
}
- if (!wdev)
- return;
+ setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
+ (unsigned long) vif->ndev);
+ setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
+ (unsigned long) vif);
+
+ set_bit(WMM_ENABLED, &vif->flags);
+ spin_lock_init(&vif->if_lock);
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
+ return 0;
+}
+
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ aggr_module_destroy(vif->aggr_cntxt);
+
+ ar->avail_idx_map |= BIT(vif->fw_vif_idx);
+
+ if (vif->nw_type == ADHOC_NETWORK)
+ ar->ibss_if_active = false;
+
+ unregister_netdevice(vif->ndev);
+
+ ar->num_vif--;
+}
+
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type, u8 fw_vif_idx,
+ u8 nw_type)
+{
+ struct net_device *ndev;
+ struct ath6kl_vif *vif;
+
+ ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+ if (!ndev)
+ return NULL;
+
+ vif = netdev_priv(ndev);
+ ndev->ieee80211_ptr = &vif->wdev;
+ vif->wdev.wiphy = ar->wiphy;
+ vif->ar = ar;
+ vif->ndev = ndev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
+ vif->wdev.netdev = ndev;
+ vif->wdev.iftype = type;
+ vif->fw_vif_idx = fw_vif_idx;
+ vif->nw_type = vif->next_mode = nw_type;
+
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+ if (fw_vif_idx != 0)
+ ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
+ 0x2;
+
+ init_netdev(ndev);
+
+ ath6kl_init_control_info(vif);
+
+ /* TODO: Pass interface specific pointer instead of ar */
+ if (ath6kl_init_if_data(vif))
+ goto err;
+
+ if (register_netdevice(ndev))
+ goto err;
+
+ ar->avail_idx_map &= ~BIT(fw_vif_idx);
+ vif->sme_state = SME_DISCONNECTED;
+ set_bit(WLAN_ENABLED, &vif->flags);
+ ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+ set_bit(NETDEV_REGISTERED, &vif->flags);
+
+ if (type == NL80211_IFTYPE_ADHOC)
+ ar->ibss_if_active = true;
+
+ spin_lock_bh(&ar->list_lock);
+ list_add_tail(&vif->list, &ar->vif_list);
+ spin_unlock_bh(&ar->list_lock);
+
+ return ndev;
+
+err:
+ aggr_module_destroy(vif->aggr_cntxt);
+ free_netdev(ndev);
+ return NULL;
+}
+
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
+{
+ wiphy_unregister(ar->wiphy);
+ wiphy_free(ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index a84adc249c61..81f20a572315 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -17,23 +17,43 @@
#ifndef ATH6KL_CFG80211_H
#define ATH6KL_CFG80211_H
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
-void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+enum ath6kl_cfg_suspend_mode {
+ ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+ ATH6KL_CFG_SUSPEND_CUTPOWER,
+ ATH6KL_CFG_SUSPEND_WOW,
+ ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+};
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type);
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
+struct ath6kl *ath6kl_core_alloc(struct device *dev);
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason);
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast);
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar);
+
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
+
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index b92f0e5d2336..bfd6597763da 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -23,8 +23,6 @@
extern int ath6kl_printk(const char *level, const char *fmt, ...);
-#define A_CACHE_LINE_PAD 128
-
/*
* Reflects the version of binary interface exposed by ATH6KL target
* firmware. Needs to be incremented by 1 for any change in the firmware
@@ -73,25 +71,16 @@ enum crypto_type {
WEP_CRYPT = 0x02,
TKIP_CRYPT = 0x04,
AES_CRYPT = 0x08,
+ WAPI_CRYPT = 0x10,
};
struct htc_endpoint_credit_dist;
struct ath6kl;
enum htc_credit_dist_reason;
-struct htc_credit_state_info;
+struct ath6kl_htc_credit_info;
-int ath6k_setup_credit_dist(void *htc_handle,
- struct htc_credit_state_info *cred_info);
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
- struct list_head *epdist_list,
- enum htc_credit_dist_reason reason);
-void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
- struct list_head *ep_list,
- int tot_credits);
-void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
- struct htc_endpoint_credit_dist *ep_dist);
struct ath6kl *ath6kl_core_alloc(struct device *sdev);
int ath6kl_core_init(struct ath6kl *ar);
-int ath6kl_unavail_ev(struct ath6kl *ar);
+void ath6kl_core_cleanup(struct ath6kl *ar);
struct sk_buff *ath6kl_buf_alloc(int size);
#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6d8a4845baaf..c863a28f2e0c 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -70,10 +70,20 @@ enum ath6kl_fw_ie_type {
ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
ATH6KL_FW_IE_CAPABILITIES = 6,
ATH6KL_FW_IE_PATCH_ADDR = 7,
+ ATH6KL_FW_IE_BOARD_ADDR = 8,
+ ATH6KL_FW_IE_VIF_MAX = 9,
};
enum ath6kl_fw_capability {
ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1,
+
+ /*
+ * Firmware is capable of supporting P2P mgmt operations on a
+ * station interface. After group formation, the station
+ * interface will become a P2P client/GO interface as the case may be
+ */
+ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
@@ -88,37 +98,47 @@ struct ath6kl_fw_ie {
};
/* AR6003 1.0 definitions */
-#define AR6003_REV1_VERSION 0x300002ba
+#define AR6003_HW_1_0_VERSION 0x300002ba
/* AR6003 2.0 definitions */
-#define AR6003_REV2_VERSION 0x30000384
-#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
-#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
-#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
-#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
-#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
-#define AR6003_REV2_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
-#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+#define AR6003_HW_2_0_VERSION 0x30000384
+#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
+#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
+#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.0/bdata.SD31.bin"
/* AR6003 3.0 definitions */
-#define AR6003_REV3_VERSION 0x30000582
-#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
-#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
-#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
-#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
-#define AR6003_REV3_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
-#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+#define AR6003_HW_2_1_1_VERSION 0x30000582
+#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
+#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \
+ "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
+#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
/* AR6004 1.0 definitions */
-#define AR6004_REV1_VERSION 0x30000623
-#define AR6004_REV1_FIRMWARE_FILE "ath6k/AR6004/hw6.1/fw.ram.bin"
-#define AR6004_REV1_FIRMWARE_2_FILE "ath6k/AR6004/hw6.1/fw-2.bin"
-#define AR6004_REV1_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.bin"
-#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin"
-#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin"
+#define AR6004_HW_1_0_VERSION 0x30000623
+#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin"
+#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin"
+#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
+#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6004/hw1.0/bdata.DB132.bin"
+
+/* AR6004 1.1 definitions */
+#define AR6004_HW_1_1_VERSION 0x30000001
+#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin"
+#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin"
+#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
+#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6004/hw1.1/bdata.DB132.bin"
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
@@ -166,6 +186,7 @@ struct ath6kl_fw_ie {
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
#define ATH6KL_CONF_ENABLE_11N BIT(2)
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
+#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON,
@@ -271,6 +292,8 @@ struct ath6kl_bmi {
u32 cmd_credits;
bool done_sent;
u8 *cmd_buf;
+ u32 max_data_size;
+ u32 max_cmd_size;
};
struct target_stats {
@@ -380,40 +403,42 @@ struct ath6kl_req_key {
u8 key_len;
};
-/* Flag info */
-#define WMI_ENABLED 0
-#define WMI_READY 1
-#define CONNECTED 2
-#define STATS_UPDATE_PEND 3
-#define CONNECT_PEND 4
-#define WMM_ENABLED 5
-#define NETQ_STOPPED 6
-#define WMI_CTRL_EP_FULL 7
-#define DTIM_EXPIRED 8
-#define DESTROY_IN_PROGRESS 9
-#define NETDEV_REGISTERED 10
-#define SKIP_SCAN 11
-#define WLAN_ENABLED 12
-#define TESTMODE 13
-#define CLEAR_BSSFILTER_ON_BEACON 14
-#define DTIM_PERIOD_AVAIL 15
+enum ath6kl_hif_type {
+ ATH6KL_HIF_TYPE_SDIO,
+ ATH6KL_HIF_TYPE_USB,
+};
-struct ath6kl {
- struct device *dev;
- struct net_device *net_dev;
- struct ath6kl_bmi bmi;
- const struct ath6kl_hif_ops *hif_ops;
- struct wmi *wmi;
- int tx_pending[ENDPOINT_MAX];
- int total_tx_data_pend;
- struct htc_target *htc_target;
- void *hif_priv;
- spinlock_t lock;
- struct semaphore sem;
+/*
+ * Driver's maximum limit, note that some firmwares support only one vif
+ * and the runtime (current) limit must be checked from ar->vif_max.
+ */
+#define ATH6KL_VIF_MAX 3
+
+/* vif flags info */
+enum ath6kl_vif_state {
+ CONNECTED,
+ CONNECT_PEND,
+ WMM_ENABLED,
+ NETQ_STOPPED,
+ DTIM_EXPIRED,
+ NETDEV_REGISTERED,
+ CLEAR_BSSFILTER_ON_BEACON,
+ DTIM_PERIOD_AVAIL,
+ WLAN_ENABLED,
+ STATS_UPDATE_PEND,
+};
+
+struct ath6kl_vif {
+ struct list_head list;
+ struct wireless_dev wdev;
+ struct net_device *ndev;
+ struct ath6kl *ar;
+ /* Lock to protect vif specific net_stats and flags */
+ spinlock_t if_lock;
+ u8 fw_vif_idx;
+ unsigned long flags;
int ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 next_mode;
- u8 nw_type;
u8 dot11_auth_mode;
u8 auth_mode;
u8 prwise_crypto;
@@ -421,21 +446,91 @@ struct ath6kl {
u8 grp_crypto;
u8 grp_crypto_len;
u8 def_txkey_index;
- struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ u8 next_mode;
+ u8 nw_type;
u8 bssid[ETH_ALEN];
u8 req_bssid[ETH_ALEN];
u16 ch_hint;
u16 bss_ch;
+ struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+ struct aggr_info *aggr_cntxt;
+
+ struct timer_list disconnect_timer;
+ struct timer_list sched_scan_timer;
+
+ struct cfg80211_scan_request *scan_req;
+ enum sme_state sme_state;
+ int reconnect_flag;
+ u32 last_roc_id;
+ u32 last_cancel_roc_id;
+ u32 send_action_id;
+ bool probe_req_report;
+ u16 next_chan;
+ u16 assoc_bss_beacon_int;
+ u8 assoc_bss_dtim_period;
+ struct net_device_stats net_stats;
+ struct target_stats target_stats;
+};
+
+#define WOW_LIST_ID 0
+#define WOW_HOST_REQ_DELAY 500 /* ms */
+
+#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */
+
+/* Flag info */
+enum ath6kl_dev_state {
+ WMI_ENABLED,
+ WMI_READY,
+ WMI_CTRL_EP_FULL,
+ TESTMODE,
+ DESTROY_IN_PROGRESS,
+ SKIP_SCAN,
+ ROAM_TBL_PEND,
+ FIRST_BOOT,
+};
+
+enum ath6kl_state {
+ ATH6KL_STATE_OFF,
+ ATH6KL_STATE_ON,
+ ATH6KL_STATE_DEEPSLEEP,
+ ATH6KL_STATE_CUTPOWER,
+ ATH6KL_STATE_WOW,
+ ATH6KL_STATE_SCHED_SCAN,
+};
+
+struct ath6kl {
+ struct device *dev;
+ struct wiphy *wiphy;
+
+ enum ath6kl_state state;
+
+ struct ath6kl_bmi bmi;
+ const struct ath6kl_hif_ops *hif_ops;
+ struct wmi *wmi;
+ int tx_pending[ENDPOINT_MAX];
+ int total_tx_data_pend;
+ struct htc_target *htc_target;
+ enum ath6kl_hif_type hif_type;
+ void *hif_priv;
+ struct list_head vif_list;
+ /* Lock to avoid race in vif_list entries among add/del/traverse */
+ spinlock_t list_lock;
+ u8 num_vif;
+ unsigned int vif_max;
+ u8 max_norm_iface;
+ u8 avail_idx_map;
+ spinlock_t lock;
+ struct semaphore sem;
u16 listen_intvl_b;
u16 listen_intvl_t;
u8 lrssi_roam_threshold;
struct ath6kl_version version;
u32 target_type;
u8 tx_pwr;
- struct net_device_stats net_stats;
- struct target_stats target_stats;
struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
u8 ibss_ps_enable;
+ bool ibss_if_active;
u8 node_num;
u8 next_ep_id;
struct ath6kl_cookie *cookie_list;
@@ -446,7 +541,7 @@ struct ath6kl {
u8 hiac_stream_active_pri;
u8 ep2ac_map[ENDPOINT_MAX];
enum htc_endpoint_id ctrl_ep;
- struct htc_credit_state_info credit_state_info;
+ struct ath6kl_htc_credit_info credit_state_info;
u32 connect_ctrl_flags;
u32 user_key_ctrl;
u8 usr_bss_filter;
@@ -456,30 +551,37 @@ struct ath6kl {
struct sk_buff_head mcastpsq;
spinlock_t mcastpsq_lock;
u8 intra_bss;
- struct aggr_info *aggr_cntxt;
struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3];
struct list_head amsdu_rx_buffer_queue;
- struct timer_list disconnect_timer;
u8 rx_meta_ver;
- struct wireless_dev *wdev;
- struct cfg80211_scan_request *scan_req;
- struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
- enum sme_state sme_state;
enum wlan_low_pwr_state wlan_pwr_state;
- struct wmi_scan_params_cmd sc_params;
+ u8 mac_addr[ETH_ALEN];
#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
struct {
void *rx_report;
size_t rx_report_len;
} tm;
- struct {
+ struct ath6kl_hw {
+ u32 id;
+ const char *name;
u32 dataset_patch_addr;
u32 app_load_addr;
u32 app_start_override_addr;
u32 board_ext_data_addr;
u32 reserved_ram_size;
+ u32 board_addr;
+ u32 refclk_hz;
+ u32 uarttx_pin;
+
+ const char *fw_otp;
+ const char *fw;
+ const char *fw_tcmd;
+ const char *fw_patch;
+ const char *fw_api2;
+ const char *fw_board;
+ const char *fw_default_board;
} hw;
u16 conf_flags;
@@ -487,7 +589,6 @@ struct ath6kl {
struct ath6kl_mbox_info mbox_info;
struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
- int reconnect_flag;
unsigned long flag;
u8 *fw_board;
@@ -508,13 +609,7 @@ struct ath6kl {
struct dentry *debugfs_phy;
- u32 send_action_id;
- bool probe_req_report;
- u16 next_chan;
-
bool p2p;
- u16 assoc_bss_beacon_int;
- u8 assoc_bss_dtim_period;
#ifdef CONFIG_ATH6KL_DEBUG
struct {
@@ -529,23 +624,19 @@ struct ath6kl {
struct {
unsigned int invalid_rate;
} war_stats;
+
+ u8 *roam_tbl;
+ unsigned int roam_tbl_len;
+
+ u8 keepalive;
+ u8 disc_timeout;
} debug;
#endif /* CONFIG_ATH6KL_DEBUG */
};
-static inline void *ath6kl_priv(struct net_device *dev)
+static inline struct ath6kl *ath6kl_priv(struct net_device *dev)
{
- return wdev_priv(dev->ieee80211_ptr);
-}
-
-static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
- *cred_info,
- struct htc_endpoint_credit_dist
- *ep_dist, int credits)
-{
- ep_dist->credits += credits;
- ep_dist->cred_assngd += credits;
- cred_info->cur_free_credits -= credits;
+ return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
}
static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@@ -561,7 +652,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
return addr;
}
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
int ath6kl_configure_target(struct ath6kl *ar);
void ath6kl_detect_error(unsigned long ptr);
void disconnect_timer_handler(unsigned long ptr);
@@ -579,10 +669,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_read_fwlogs(struct ath6kl *ar);
-void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl_vif *vif);
void ath6kl_tx_data_cleanup(struct ath6kl *ar);
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
- bool get_dbglogs);
struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@@ -598,40 +686,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info *aggr_info);
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_int,
u16 beacon_int, enum network_type net_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel);
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info);
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 prot_reason_status);
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
-void ath6kl_dtimexpiry_event(struct ath6kl *ar);
-void ath6kl_disconnect(struct ath6kl *ar);
-void ath6kl_deep_sleep_enable(struct ath6kl *ar);
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
+void ath6kl_disconnect(struct ath6kl_vif *vif);
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
u8 win_sz);
void ath6kl_wakeup_event(void *dev);
-void ath6kl_target_failure(struct ath6kl *ar);
+
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset);
+void ath6kl_init_control_info(struct ath6kl_vif *vif);
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
+void ath6kl_core_free(struct ath6kl *ar);
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+int ath6kl_init_hw_start(struct ath6kl *ar);
+int ath6kl_init_hw_stop(struct ath6kl *ar);
+void ath6kl_check_wow_status(struct ath6kl *ar);
#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7879b5314285..eb808b46f94c 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
{
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
"--- endpoint: %d svc_id: 0x%X ---\n",
ep_dist->endpoint, ep_dist->svc_id);
- ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n",
ep_dist->dist_flags);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n",
ep_dist->cred_norm);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n",
ep_dist->cred_min);
- ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n",
ep_dist->credits);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n",
ep_dist->cred_assngd);
- ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n",
ep_dist->seek_cred);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n",
ep_dist->cred_sz);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n",
ep_dist->cred_per_msg);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n",
ep_dist->cred_to_dist);
- ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
- get_queue_depth(&((struct htc_endpoint *)
- ep_dist->htc_rsvd)->txq));
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n",
+ get_queue_depth(&ep_dist->htc_ep->txq));
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
"----------------------------------\n");
}
+/* FIXME: move to htc.c */
void dump_cred_dist_stats(struct htc_target *target)
{
struct htc_endpoint_credit_dist *ep_list;
- if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
return;
list_for_each_entry(ep_list, &target->cred_dist_list, list)
dump_cred_dist(ep_list);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
- target->cred_dist_cntxt, NULL);
- ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
- target->cred_dist_cntxt->total_avail_credits,
- target->cred_dist_cntxt->cur_free_credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit distribution total %d free %d\n",
+ target->credit_info->total_avail_credits,
+ target->credit_info->cur_free_credits);
}
static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
- struct target_stats *tgt_stats = &ar->target_stats;
+ struct ath6kl_vif *vif;
+ struct target_stats *tgt_stats;
char *buf;
unsigned int len = 0, buf_len = 1500;
int i;
long left;
ssize_t ret_cnt;
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ tgt_stats = &vif->target_stats;
+
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
return -EBUSY;
}
- set_bit(STATS_UPDATE_PEND, &ar->flag);
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
- if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
up(&ar->sem);
kfree(buf);
return -EIO;
@@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
- &ar->flag), WMI_TIMEOUT);
+ &vif->flags), WMI_TIMEOUT);
up(&ar->sem);
@@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Total Avail Credits: ",
- target->cred_dist_cntxt->total_avail_credits);
+ target->credit_info->total_avail_credits);
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Free credits :",
- target->cred_dist_cntxt->cur_free_credits);
+ target->credit_info->cur_free_credits);
len += scnprintf(buf + len, buf_len - len,
" Epid Flags Cred_norm Cred_min Credits Cred_assngd"
@@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
print_credit_info("%9d", cred_per_msg);
print_credit_info("%14d", cred_to_dist);
len += scnprintf(buf + len, buf_len - len, "%12d\n",
- get_queue_depth(&((struct htc_endpoint *)
- ep_list->htc_rsvd)->txq));
+ get_queue_depth(&ep_list->htc_ep->txq));
}
if (len > buf_len)
@@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = {
.llseek = default_llseek,
};
+static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
+ unsigned int buf_len, unsigned int len,
+ int offset, const char *name)
+{
+ int i;
+ struct htc_endpoint_stats *ep_st;
+ u32 *counter;
+
+ len += scnprintf(buf + len, buf_len - len, "%s:", name);
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ counter = ((u32 *) ep_st) + (offset / 4);
+ len += scnprintf(buf + len, buf_len - len, " %u", *counter);
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ return len;
+}
+
+static ssize_t ath6kl_endpoint_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
+ (25 + ENDPOINT_MAX * 11);
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+#define EPSTAT(name) \
+ len = print_endpoint_stat(target, buf, buf_len, len, \
+ offsetof(struct htc_endpoint_stats, name), \
+ #name)
+ EPSTAT(cred_low_indicate);
+ EPSTAT(tx_issued);
+ EPSTAT(tx_pkt_bundled);
+ EPSTAT(tx_bundles);
+ EPSTAT(tx_dropped);
+ EPSTAT(tx_cred_rpt);
+ EPSTAT(cred_rpt_from_rx);
+ EPSTAT(cred_rpt_from_other);
+ EPSTAT(cred_rpt_ep0);
+ EPSTAT(cred_from_rx);
+ EPSTAT(cred_from_other);
+ EPSTAT(cred_from_ep0);
+ EPSTAT(cred_cosumd);
+ EPSTAT(cred_retnd);
+ EPSTAT(rx_pkts);
+ EPSTAT(rx_lkahds);
+ EPSTAT(rx_bundl);
+ EPSTAT(rx_bundle_lkahd);
+ EPSTAT(rx_bundle_from_hdr);
+ EPSTAT(rx_alloc_thresh_hit);
+ EPSTAT(rxalloc_thresh_byte);
+#undef EPSTAT
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t ath6kl_endpoint_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ int ret, i;
+ u32 val;
+ struct htc_endpoint_stats *ep_st;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+ if (val == 0) {
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ memset(ep_st, 0, sizeof(*ep_st));
+ }
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_endpoint_stats = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_endpoint_stats_read,
+ .write = ath6kl_endpoint_stats_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static unsigned long ath6kl_get_num_reg(void)
{
int i;
@@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = {
.llseek = default_llseek,
};
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len)
+{
+ const struct wmi_target_roam_tbl *tbl;
+ u16 num_entries;
+
+ if (len < sizeof(*tbl))
+ return -EINVAL;
+
+ tbl = (const struct wmi_target_roam_tbl *) buf;
+ num_entries = le16_to_cpu(tbl->num_entries);
+ if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
+ len)
+ return -EINVAL;
+
+ if (ar->debug.roam_tbl == NULL ||
+ ar->debug.roam_tbl_len < (unsigned int) len) {
+ kfree(ar->debug.roam_tbl);
+ ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+ }
+
+ memcpy(ar->debug.roam_tbl, buf, len);
+ ar->debug.roam_tbl_len = len;
+
+ if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
+ clear_bit(ROAM_TBL_PEND, &ar->flag);
+ wake_up(&ar->event_wq);
+ }
+
+ return 0;
+}
+
+static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ long left;
+ struct wmi_target_roam_tbl *tbl;
+ u16 num_entries, i;
+ char *buf;
+ unsigned int len, buf_len;
+ ssize_t ret_cnt;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(ROAM_TBL_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
+ if (ret) {
+ up(&ar->sem);
+ return ret;
+ }
+
+ left = wait_event_interruptible_timeout(
+ ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
+ up(&ar->sem);
+
+ if (left <= 0)
+ return -ETIMEDOUT;
+
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+
+ tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
+ num_entries = le16_to_cpu(tbl->num_entries);
+
+ buf_len = 100 + num_entries * 100;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "roam_mode=%u\n\n"
+ "# roam_util bssid rssi rssidt last_rssi util bias\n",
+ le16_to_cpu(tbl->roam_mode));
+
+ for (i = 0; i < num_entries; i++) {
+ struct wmi_bss_roam_info *info = &tbl->info[i];
+ len += scnprintf(buf + len, buf_len - len,
+ "%d %pM %d %d %d %d %d\n",
+ a_sle32_to_cpu(info->roam_util), info->bssid,
+ info->rssi, info->rssidt, info->last_rssi,
+ info->util, info->bias);
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_roam_table = {
+ .read = ath6kl_roam_table_read,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_force_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ u8 bssid[ETH_ALEN];
+ int i;
+ int addr[ETH_ALEN];
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+ != ETH_ALEN)
+ return -EINVAL;
+ for (i = 0; i < ETH_ALEN; i++)
+ bssid[i] = addr[i];
+
+ ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_force_roam = {
+ .write = ath6kl_force_roam_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_roam_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ enum wmi_roam_mode mode;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ if (strcasecmp(buf, "default") == 0)
+ mode = WMI_DEFAULT_ROAM_MODE;
+ else if (strcasecmp(buf, "bssbias") == 0)
+ mode = WMI_HOST_BIAS_ROAM_MODE;
+ else if (strcasecmp(buf, "lock") == 0)
+ mode = WMI_LOCK_BSS_MODE;
+ else
+ return -EINVAL;
+
+ ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_roam_mode = {
+ .write = ath6kl_roam_mode_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+ ar->debug.keepalive = keepalive;
+}
+
+static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_keepalive_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_keepalive = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_keepalive_read,
+ .write = ath6kl_keepalive_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
+{
+ ar->debug.disc_timeout = timeout;
+}
+
+static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_disconnect_timeout = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_disconnect_timeout_read,
+ .write = ath6kl_disconnect_timeout_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_create_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[200];
+ ssize_t len;
+ char *sptr, *token;
+ struct wmi_create_pstream_cmd pstream;
+ u32 val32;
+ u16 val16;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.user_pri))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_direc))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.voice_psc_cap))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.inactivity_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.suspension_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.service_start_time = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.tsid))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.nominal_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.max_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.mean_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.peak_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_burst_size = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.delay_bound = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_phy_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.sba = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.medium_time = cpu_to_le32(val32);
+
+ ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
+
+ return count;
+}
+
+static const struct file_operations fops_create_qos = {
+ .write = ath6kl_create_qos_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_delete_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[100];
+ ssize_t len;
+ char *sptr, *token;
+ u8 traffic_class;
+ u8 tsid;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &tsid))
+ return -EINVAL;
+
+ ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
+ traffic_class, tsid);
+
+ return count;
+}
+
+static const struct file_operations fops_delete_qos = {
+ .write = ath6kl_delete_qos_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_bgscan_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u16 bgscan_int;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtou16(buf, 0, &bgscan_int))
+ return -EINVAL;
+
+ if (bgscan_int == 0)
+ bgscan_int = 0xffff;
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
+ 0, 0, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_bgscan_int = {
+ .write = ath6kl_bgscan_int_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_listen_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u16 listen_int_t, listen_int_b;
+ char buf[32];
+ char *sptr, *token;
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou16(token, 0, &listen_int_t))
+ return -EINVAL;
+
+ if (kstrtou16(sptr, 0, &listen_int_b))
+ return -EINVAL;
+
+ if ((listen_int_t < 15) || (listen_int_t > 5000))
+ return -EINVAL;
+
+ if ((listen_int_b < 1) || (listen_int_b > 50))
+ return -EINVAL;
+
+ ar->listen_intvl_t = listen_int_t;
+ ar->listen_intvl_b = listen_int_b;
+
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ return count;
+}
+
+static ssize_t ath6kl_listen_int_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_listen_int = {
+ .read = ath6kl_listen_int_read,
+ .write = ath6kl_listen_int_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_power_params_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[100];
+ unsigned int len = 0;
+ char *sptr, *token;
+ u16 idle_period, ps_poll_num, dtim,
+ tx_wakeup, num_tx;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &idle_period))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &ps_poll_num))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &dtim))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &tx_wakeup))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &num_tx))
+ return -EINVAL;
+
+ ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
+ dtim, tx_wakeup, num_tx, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_power_params = {
+ .write = ath6kl_power_params_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath6kl_debug_init(struct ath6kl *ar)
{
ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar)
ar->debug.fwlog_mask = 0;
ar->debugfs_phy = debugfs_create_dir("ath6kl",
- ar->wdev->wiphy->debugfsdir);
+ ar->wiphy->debugfsdir);
if (!ar->debugfs_phy) {
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
@@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_credit_dist_stats);
+ debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_endpoint_stats);
+
debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog);
@@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_war_stats);
+ debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_roam_table);
+
+ debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_force_roam);
+
+ debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_roam_mode);
+
+ debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_keepalive);
+
+ debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_disconnect_timeout);
+
+ debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_create_qos);
+
+ debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_delete_qos);
+
+ debugfs_create_file("bgscan_interval", S_IWUSR,
+ ar->debugfs_phy, ar, &fops_bgscan_int);
+
+ debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_power_params);
+
return 0;
}
@@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar)
{
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
+ kfree(ar->debug.roam_tbl);
}
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 7b7675f70a10..9853c9c125c1 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -17,19 +17,19 @@
#ifndef DEBUG_H
#define DEBUG_H
-#include "htc_hif.h"
+#include "hif.h"
enum ATH6K_DEBUG_MASK {
- ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
- ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
+ ATH6KL_DBG_CREDIT = BIT(0),
+ /* hole */
ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
- ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
- ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
+ ATH6KL_DBG_HTC = BIT(5),
+ ATH6KL_DBG_HIF = BIT(6),
ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
- ATH6KL_DBG_PM = BIT(8), /* power management */
- ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
+ /* hole */
+ /* hole */
ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
@@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_SDIO_DUMP = BIT(17),
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
ATH6KL_DBG_WMI_DUMP = BIT(19),
+ ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
@@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
void dump_cred_dist_stats(struct htc_target *target);
void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len);
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
int ath6kl_debug_init(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
@@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
{
}
+static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
+ const void *buf, size_t len)
+{
+ return 0;
+}
+
+static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+}
+
+static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
+ u8 timeout)
+{
+}
+
static inline int ath6kl_debug_init(struct ath6kl *ar)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index d6c898f3d0b3..2fe1dadfc77a 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -18,10 +18,16 @@
#define HIF_OPS_H
#include "hif.h"
+#include "debug.h"
static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
+ (request & HIF_WRITE) ? "write" : "read",
+ addr, buf, len, request);
+
return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
}
@@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
u32 length, u32 request,
struct htc_packet *packet)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
+ address, buffer, length, request);
+
return ar->hif_ops->write_async(ar, address, buffer, length,
request, packet);
}
static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
+
return ar->hif_ops->irq_enable(ar);
}
static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
+
return ar->hif_ops->irq_disable(ar);
}
@@ -69,9 +83,70 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
return ar->hif_ops->cleanup_scatter(ar);
}
-static inline int ath6kl_hif_suspend(struct ath6kl *ar)
+static inline int ath6kl_hif_suspend(struct ath6kl *ar,
+ struct cfg80211_wowlan *wow)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
+
+ return ar->hif_ops->suspend(ar, wow);
+}
+
+/*
+ * Read from the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address,
+ u32 *value)
{
- return ar->hif_ops->suspend(ar);
+ return ar->hif_ops->diag_read32(ar, address, value);
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 value)
+{
+ return ar->hif_ops->diag_write32(ar, address, value);
+}
+
+static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_read(ar, buf, len);
+}
+
+static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_write(ar, buf, len);
+}
+
+static inline int ath6kl_hif_resume(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
+
+ return ar->hif_ops->resume(ar);
+}
+
+static inline int ath6kl_hif_power_on(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
+
+ return ar->hif_ops->power_on(ar);
+}
+
+static inline int ath6kl_hif_power_off(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
+
+ return ar->hif_ops->power_off(ar);
+}
+
+static inline void ath6kl_hif_stop(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
+
+ ar->hif_ops->stop(ar);
}
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 86b1cc7409c2..e57da35e59fa 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -13,18 +13,19 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include "hif.h"
#include "core.h"
#include "target.h"
#include "hif-ops.h"
-#include "htc_hif.h"
#include "debug.h"
#define MAILBOX_FOR_BLOCK_SIZE 1
#define ATH6KL_TIME_QUANTUM 10 /* in ms */
-static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
+ bool from_dma)
{
u8 *buf;
int i;
@@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
return 0;
}
-int ath6kldev_rw_comp_handler(void *context, int status)
+int ath6kl_hif_rw_comp_handler(void *context, int status)
{
struct htc_packet *packet = context;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
packet, status);
packet->status = status;
@@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status)
return 0;
}
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
-static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
{
- u32 dummy;
- int status;
+ __le32 regdump_val[REGISTER_DUMP_LEN_MAX];
+ u32 i, address, regdump_addr = 0;
+ int ret;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return;
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+ address = TARG_VTOP(ar->target_type, address);
+
+ /* read RAM location through diagnostic window */
+ ret = ath6kl_diag_read32(ar, address, &regdump_addr);
+
+ if (ret || !regdump_addr) {
+ ath6kl_warn("failed to get ptr to register dump area: %d\n",
+ ret);
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
+ regdump_addr);
+ regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
+
+ /* fetch register dump data */
+ ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
+ REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+ if (ret) {
+ ath6kl_warn("failed to get register dump: %d\n", ret);
+ return;
+ }
+
+ ath6kl_info("crash dump:\n");
+ ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
+ ar->wiphy->fw_version);
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
- ath6kl_err("target debug interrupt\n");
+ for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
+ ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
+ 4 * i,
+ le32_to_cpu(regdump_val[i]),
+ le32_to_cpu(regdump_val[i + 1]),
+ le32_to_cpu(regdump_val[i + 2]),
+ le32_to_cpu(regdump_val[i + 3]));
+ }
+
+}
+
+static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
+{
+ u32 dummy;
+ int ret;
- ath6kl_target_failure(dev->ar);
+ ath6kl_warn("firmware crashed\n");
/*
* read counter to clear the interrupt, the debug error interrupt is
* counter 0.
*/
- status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+ ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
(u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
- if (status)
- WARN_ON(1);
+ if (ret)
+ ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
- return status;
+ ath6kl_hif_dump_fw_crash(dev->ar);
+
+ return ret;
}
/* mailbox recv message polling */
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
int timeout)
{
struct ath6kl_irq_proc_registers *rg;
@@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
/* delay a little */
mdelay(ATH6KL_TIME_QUANTUM);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
}
if (i == 0) {
@@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Target failure handler will be called in case of
* an assert.
*/
- ath6kldev_proc_dbg_intr(dev);
+ ath6kl_hif_proc_dbg_intr(dev);
}
return status;
@@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Disable packet reception (used in case the host runs out of buffers)
* using the interrupt enable registers through the host I/F
*/
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
{
struct ath6kl_irq_enable_reg regs;
int status = 0;
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
+ enable_rx ? "enable" : "disable");
+
/* take the lock to protect interrupt enable shadows */
spin_lock_bh(&dev->lock);
@@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
return status;
}
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read)
{
int status = 0;
@@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
dev->ar->mbox_info.htc_addr;
}
- ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
- "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
scat_req->scat_entries, scat_req->len,
scat_req->addr, !read ? "async" : "sync",
(read) ? "rd" : "wr");
if (!read && scat_req->virt_scat) {
- status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+ status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
if (status) {
scat_req->status = status;
scat_req->complete(dev->ar->htc_target, scat_req);
@@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
scat_req->status = status;
if (!status && scat_req->virt_scat)
scat_req->status =
- ath6kldev_cp_scat_dma_buf(scat_req, true);
+ ath6kl_hif_cp_scat_dma_buf(scat_req, true);
}
return status;
}
-static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
{
u8 counter_int_status;
@@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
* the debug assertion counter interrupt.
*/
if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
- return ath6kldev_proc_dbg_intr(dev);
+ return ath6kl_hif_proc_dbg_intr(dev);
return 0;
}
-static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
{
int status;
u8 error_int_status;
@@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
return status;
}
-static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
{
int status;
u8 cpu_int_status;
@@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
* we rapidly pull packets.
*/
status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
- &lk_ahd, &fetched);
+ lk_ahd, &fetched);
if (status)
goto out;
@@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
/* CPU Interrupt */
- status = ath6kldev_proc_cpu_intr(dev);
+ status = ath6kl_hif_proc_cpu_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
/* Error Interrupt */
- status = ath6kldev_proc_err_intr(dev);
+ status = ath6kl_hif_proc_err_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
/* Counter Interrupt */
- status = ath6kldev_proc_counter_intr(dev);
+ status = ath6kl_hif_proc_counter_intr(dev);
out:
/*
@@ -479,9 +535,10 @@ out:
}
/* interrupt handler, kicks off all interrupt processing */
-int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
{
struct ath6kl_device *dev = ar->htc_target->dev;
+ unsigned long timeout;
int status = 0;
bool done = false;
@@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
* IRQ processing is synchronous, interrupt status registers can be
* re-read.
*/
- while (!done) {
+ timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !done) {
status = proc_pending_irqs(dev, &done);
if (status)
break;
@@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
return status;
}
-static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
int status;
@@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
return status;
}
-int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
@@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev)
}
/* enable device interrupts */
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
{
int status = 0;
@@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
* target "soft" resets. The ATH6KL interrupt enables reset back to an
* "enabled" state when this happens.
*/
- ath6kldev_disable_intrs(dev);
+ ath6kl_hif_disable_intrs(dev);
/* unmask the host controller interrupts */
ath6kl_hif_irq_enable(dev->ar);
- status = ath6kldev_enable_intrs(dev);
+ status = ath6kl_hif_enable_intrs(dev);
return status;
}
/* disable all device interrupts */
-int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
{
/*
* Mask the interrupt at the HIF layer to avoid any stray interrupt
* taken while we zero out our shadow registers in
- * ath6kldev_disable_intrs().
+ * ath6kl_hif_disable_intrs().
*/
ath6kl_hif_irq_disable(dev->ar);
- return ath6kldev_disable_intrs(dev);
+ return ath6kl_hif_disable_intrs(dev);
}
-int ath6kldev_setup(struct ath6kl_device *dev)
+int ath6kl_hif_setup(struct ath6kl_device *dev)
{
int status = 0;
@@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev)
/* must be a power of 2 */
if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
WARN_ON(1);
+ status = -EINVAL;
goto fail_setup;
}
/* assemble mask, used for padding to a block */
dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
- ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "hif interrupt processing is sync only\n");
-
- status = ath6kldev_disable_intrs(dev);
+ status = ath6kl_hif_disable_intrs(dev);
fail_setup:
return status;
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 797e2d1d9bf9..699a036f3a44 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -35,6 +35,7 @@
#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
#define MANUFACTURER_ID_AR6003_BASE 0x300
+#define MANUFACTURER_ID_AR6004_BASE 0x400
/* SDIO manufacturer ID and Codes */
#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
#define MANUFACTURER_CODE 0x271 /* Atheros */
@@ -59,6 +60,18 @@
/* mode to enable special 4-bit interrupt assertion without clock */
#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
+
+/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
+#define ATH6KL_SCATTER_REQS 4
+
+#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
+
struct bus_request {
struct list_head list;
@@ -186,6 +199,34 @@ struct hif_scatter_req {
struct hif_scatter_item scat_list[1];
};
+struct ath6kl_irq_proc_registers {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lkahd_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lkahd[2];
+ __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+ spinlock_t lock;
+ struct ath6kl_irq_proc_registers irq_proc_reg;
+ struct ath6kl_irq_enable_reg irq_en_reg;
+ struct htc_target *htc_cnxt;
+ struct ath6kl *ar;
+};
+
struct ath6kl_hif_ops {
int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request);
@@ -202,7 +243,30 @@ struct ath6kl_hif_ops {
int (*scat_req_rw) (struct ath6kl *ar,
struct hif_scatter_req *scat_req);
void (*cleanup_scatter)(struct ath6kl *ar);
- int (*suspend)(struct ath6kl *ar);
+ int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
+ int (*resume)(struct ath6kl *ar);
+ int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
+ int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
+ int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*power_on)(struct ath6kl *ar);
+ int (*power_off)(struct ath6kl *ar);
+ void (*stop)(struct ath6kl *ar);
};
+int ath6kl_hif_setup(struct ath6kl_device *dev);
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
+ u32 *lk_ahd, int timeout);
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kl_hif_rw_comp_handler(void *context, int status);
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read);
+
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index f88a7c9e4148..f3b63ca25c7e 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -15,13 +15,321 @@
*/
#include "core.h"
-#include "htc_hif.h"
+#include "hif.h"
#include "debug.h"
#include "hif-ops.h"
#include <asm/unaligned.h>
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+/* Functions for Tx credit handling */
+static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int credits)
+{
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
+ ep_dist->endpoint, credits);
+
+ ep_dist->credits += credits;
+ ep_dist->cred_assngd += credits;
+ cred_info->cur_free_credits -= credits;
+}
+
+static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_list,
+ int tot_credits)
+{
+ struct htc_endpoint_credit_dist *cur_ep_dist;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
+
+ cred_info->cur_free_credits = tot_credits;
+ cred_info->total_avail_credits = tot_credits;
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+ if (tot_credits > 4) {
+ if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+ (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+ ath6kl_credit_deposit(cred_info,
+ cur_ep_dist,
+ cur_ep_dist->cred_min);
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+ }
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ ath6kl_credit_deposit(cred_info, cur_ep_dist,
+ cur_ep_dist->cred_min);
+ /*
+ * Control service is always marked active, it
+ * never goes inactive EVER.
+ */
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+ /* this is the lowest priority data endpoint */
+ /* FIXME: this looks fishy, check */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+ /*
+ * Streams have to be created (explicit | implicit) for all
+ * kinds of traffic. BE endpoints are also inactive in the
+ * beginning. When BE traffic starts it creates implicit
+ * streams that redistributes credits.
+ *
+ * Note: all other endpoints have minimums set but are
+ * initially given NO credits. credits will be distributed
+ * as traffic activity demands
+ */
+ }
+
+ WARN_ON(cred_info->cur_free_credits <= 0);
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+ else {
+ /*
+ * For the remaining data endpoints, we assume that
+ * each cred_per_msg are the same. We use a simple
+ * calculation here, we take the remaining credits
+ * and determine how many max messages this can
+ * cover and then set each endpoint's normal value
+ * equal to 3/4 this amount.
+ */
+ count = (cred_info->cur_free_credits /
+ cur_ep_dist->cred_per_msg)
+ * cur_ep_dist->cred_per_msg;
+ count = (count * 3) >> 2;
+ count = max(count, cur_ep_dist->cred_per_msg);
+ cur_ep_dist->cred_norm = count;
+
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
+ cur_ep_dist->endpoint,
+ cur_ep_dist->svc_id,
+ cur_ep_dist->credits,
+ cur_ep_dist->cred_per_msg,
+ cur_ep_dist->cred_norm,
+ cur_ep_dist->cred_min);
+ }
+}
+
+/* initialize and setup credit distribution */
+int ath6kl_credit_setup(void *htc_handle,
+ struct ath6kl_htc_credit_info *cred_info)
+{
+ u16 servicepriority[5];
+
+ memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set priority list */
+ ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+ return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int limit)
+{
+ int credits;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
+ ep_dist->endpoint, limit);
+
+ ep_dist->cred_assngd = limit;
+
+ if (ep_dist->credits <= limit)
+ return;
+
+ credits = ep_dist->credits - limit;
+ ep_dist->credits -= credits;
+ cred_info->cur_free_credits += credits;
+}
+
+static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *epdist_list)
+{
+ struct htc_endpoint_credit_dist *cur_dist_list;
+
+ list_for_each_entry(cur_dist_list, epdist_list, list) {
+ if (cur_dist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_dist_list->cred_to_dist > 0) {
+ cur_dist_list->credits +=
+ cur_dist_list->cred_to_dist;
+ cur_dist_list->cred_to_dist = 0;
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_assngd)
+ ath6kl_credit_reduce(cred_info,
+ cur_dist_list,
+ cur_dist_list->cred_assngd);
+
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_norm)
+ ath6kl_credit_reduce(cred_info, cur_dist_list,
+ cur_dist_list->cred_norm);
+
+ if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_dist_list->txq_depth == 0)
+ ath6kl_credit_reduce(cred_info,
+ cur_dist_list, 0);
+ }
+ }
+ }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+ int credits = 0;
+ int need;
+
+ if (ep_dist->svc_id == WMI_CONTROL_SVC)
+ goto out;
+
+ if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+ (ep_dist->svc_id == WMI_DATA_VO_SVC))
+ if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+ goto out;
+
+ /*
+ * For all other services, we follow a simple algorithm of:
+ *
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take
+ */
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+ if (credits >= ep_dist->seek_cred)
+ goto out;
+
+ /*
+ * We don't have enough in the free pool, try taking away from
+ * lower priority services The rule for taking away credits:
+ *
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never
+ * starve an endpoint completely)
+ * 3. Only take what you need.
+ */
+
+ list_for_each_entry_reverse(curdist_list,
+ &cred_info->lowestpri_ep_dist,
+ list) {
+ if (curdist_list == ep_dist)
+ break;
+
+ need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+ if ((curdist_list->cred_assngd - need) >=
+ curdist_list->cred_min) {
+ /*
+ * The current one has been allocated more than
+ * it's minimum and it has enough credits assigned
+ * above it's minimum to fulfill our need try to
+ * take away just enough to fulfill our need.
+ */
+ ath6kl_credit_reduce(cred_info, curdist_list,
+ curdist_list->cred_assngd - need);
+
+ if (cred_info->cur_free_credits >=
+ ep_dist->seek_cred)
+ break;
+ }
+
+ if (curdist_list->endpoint == ENDPOINT_0)
+ break;
+ }
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+ /* did we find some credits? */
+ if (credits)
+ ath6kl_credit_deposit(cred_info, ep_dist, credits);
+
+ ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
+ struct list_head *ep_dist_list)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+
+ list_for_each_entry(curdist_list, ep_dist_list, list) {
+ if (curdist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
+ (curdist_list->svc_id == WMI_DATA_BE_SVC))
+ curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+ if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+ !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (curdist_list->txq_depth == 0)
+ ath6kl_credit_reduce(info, curdist_list, 0);
+ else
+ ath6kl_credit_reduce(info,
+ curdist_list,
+ curdist_list->cred_min);
+ }
+ }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_dist_list,
+ enum htc_credit_dist_reason reason)
+{
+ switch (reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE:
+ ath6kl_credit_update(cred_info, ep_dist_list);
+ break;
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+ ath6kl_credit_redistribute(cred_info, ep_dist_list);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+ WARN_ON(cred_info->cur_free_credits < 0);
+}
+
static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
{
u8 *align_addr;
@@ -102,12 +410,12 @@ static void htc_tx_comp_update(struct htc_target *target,
packet->info.tx.cred_used;
endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_SEND_COMPLETE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
spin_unlock_bh(&target->tx_lock);
}
@@ -118,8 +426,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
if (list_empty(txq))
return;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "send complete ep %d, (%d pkts)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx complete ep %d pkts %d\n",
endpoint->eid, get_queue_depth(txq));
ath6kl_tx_complete(endpoint->target->dev->ar, txq);
@@ -131,6 +439,9 @@ static void htc_tx_comp_handler(struct htc_target *target,
struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
struct list_head container;
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
+ packet->info.tx.seqno);
+
htc_tx_comp_update(target, endpoint, packet);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
@@ -148,8 +459,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
INIT_LIST_HEAD(&tx_compq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_async_tx_scat_complete total len: %d entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scat complete len %d entries %d\n",
scat_req->len, scat_req->scat_entries);
if (scat_req->status)
@@ -190,16 +501,13 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
send_len = packet->act_len + HTC_HDR_LENGTH;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
- __func__, send_len, sync ? "sync" : "async");
-
padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
- padded_len,
- target->dev->ar->mbox_info.htc_addr,
- sync ? "sync" : "async");
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
+ send_len, packet->info.tx.seqno, padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
if (sync) {
status = hif_read_write_sync(target->dev->ar,
@@ -227,7 +535,7 @@ static int htc_check_credits(struct htc_target *target,
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
*req_cred, ep->cred_dist.credits);
if (ep->cred_dist.credits < *req_cred) {
@@ -237,16 +545,13 @@ static int htc_check_credits(struct htc_target *target,
/* Seek more credits */
ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &ep->cred_dist);
-
- ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
ep->cred_dist.seek_cred = 0;
if (ep->cred_dist.credits < *req_cred) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "not enough credits for ep %d - leaving packet in queue\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit not found for ep %d\n",
eid);
return -EINVAL;
}
@@ -260,17 +565,15 @@ static int htc_check_credits(struct htc_target *target,
ep->cred_dist.seek_cred =
ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &ep->cred_dist);
-
- ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
/* see if we were successful in getting more */
if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
/* tell the target we need credits ASAP! */
*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit we need credits asap\n");
}
}
@@ -295,8 +598,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
packet = list_first_entry(&endpoint->txq, struct htc_packet,
list);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "got head pkt:0x%p , queue depth: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx got packet 0x%p queue depth %d\n",
packet, get_queue_depth(&endpoint->txq));
len = CALC_TXRX_PADDED_LEN(target,
@@ -404,9 +707,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
scat_req->len += len;
scat_req->scat_entries++;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
- i, packet, len, rem_scat);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
+ i, packet, packet->info.tx.seqno, len, rem_scat);
}
/* Roll back scatter setup in case of any failure */
@@ -455,12 +758,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
if (!scat_req) {
/* no scatter resources */
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "no more scatter resources\n");
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx no more scatter resources\n");
break;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat);
scat_req->len = 0;
@@ -479,10 +782,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
n_sent_bundle++;
tot_pkts_bundle += scat_req->scat_entries;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "send scatter total bytes: %d , entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scatter bytes %d entries %d\n",
scat_req->len, scat_req->scat_entries);
- ath6kldev_submit_scat_req(target->dev, scat_req, false);
+ ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
if (status)
break;
@@ -490,8 +793,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
*sent_bundle = n_sent_bundle;
*n_bundle_pkts = tot_pkts_bundle;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
- __func__, n_sent_bundle);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
+ n_sent_bundle);
return;
}
@@ -510,7 +813,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
if (endpoint->tx_proc_cnt > 1) {
endpoint->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
return;
}
@@ -588,15 +891,12 @@ static bool ath6kl_htc_tx_try(struct htc_target *target,
overflow = true;
if (overflow)
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
- endpoint->eid, overflow, txq_depth,
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx overflow ep %d depth %d max %d\n",
+ endpoint->eid, txq_depth,
endpoint->max_txq_depth);
if (overflow && ep_cb.tx_full) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "indicating overflowed tx packet: 0x%p\n", tx_pkt);
-
if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
HTC_SEND_FULL_DROP) {
endpoint->ep_st.tx_dropped += 1;
@@ -625,12 +925,12 @@ static void htc_chk_ep_txq(struct htc_target *target)
* are not modifying any state.
*/
list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
- endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+ endpoint = cred_dist->htc_ep;
spin_lock_bh(&target->tx_lock);
if (!list_empty(&endpoint->txq)) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "ep %d has %d credits and %d packets in tx queue\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc creds ep %d credits %d pkts %d\n",
cred_dist->endpoint,
endpoint->cred_dist.credits,
get_queue_depth(&endpoint->txq));
@@ -704,13 +1004,13 @@ static int htc_setup_tx_complete(struct htc_target *target)
}
void ath6kl_htc_set_credit_dist(struct htc_target *target,
- struct htc_credit_state_info *cred_dist_cntxt,
+ struct ath6kl_htc_credit_info *credit_info,
u16 srvc_pri_order[], int list_len)
{
struct htc_endpoint *endpoint;
int i, ep;
- target->cred_dist_cntxt = cred_dist_cntxt;
+ target->credit_info = credit_info;
list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
&target->cred_dist_list);
@@ -736,8 +1036,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
struct htc_endpoint *endpoint;
struct list_head queue;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx ep id %d buf 0x%p len %d\n",
packet->endpoint, packet->buf, packet->act_len);
if (packet->endpoint >= ENDPOINT_MAX) {
@@ -787,8 +1087,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
packet->status = -ECANCELED;
list_del(&packet->list);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
packet, packet->act_len,
packet->endpoint, packet->info.tx.tag);
@@ -844,12 +1144,13 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target,
endpoint->cred_dist.txq_depth =
get_queue_depth(&endpoint->txq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx activity ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
}
spin_unlock_bh(&target->tx_lock);
@@ -919,15 +1220,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
if (padded_len > packet->buf_len) {
- ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+ ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
padded_len, rx_len, packet->buf_len);
return -ENOMEM;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
packet, packet->info.rx.exp_hdr,
- padded_len, dev->ar->mbox_info.htc_addr, "sync");
+ padded_len, dev->ar->mbox_info.htc_addr);
status = hif_read_write_sync(dev->ar,
dev->ar->mbox_info.htc_addr,
@@ -1137,8 +1438,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
}
endpoint->ep_st.rx_bundle_from_hdr += 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc hdr indicates :%d msg can be fetched as a bundle\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle pkts %d\n",
n_msg);
} else
/* HTC header only indicates 1 message to fetch */
@@ -1191,8 +1492,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
packets->act_len + HTC_HDR_LENGTH);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
- "Unexpected ENDPOINT 0 Message", "",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx unexpected endpoint 0 message", "",
packets->buf - HTC_HDR_LENGTH,
packets->act_len + HTC_HDR_LENGTH);
}
@@ -1209,9 +1510,6 @@ static void htc_proc_cred_rpt(struct htc_target *target,
int tot_credits = 0, i;
bool dist = false;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
-
spin_lock_bh(&target->tx_lock);
for (i = 0; i < n_entries; i++, rpt++) {
@@ -1223,8 +1521,9 @@ static void htc_proc_cred_rpt(struct htc_target *target,
endpoint = &target->endpoint[rpt->eid];
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
- rpt->eid, rpt->credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit report ep %d credits %d\n",
+ rpt->eid, rpt->credits);
endpoint->ep_st.tx_cred_rpt += 1;
endpoint->ep_st.cred_retnd += rpt->credits;
@@ -1264,21 +1563,14 @@ static void htc_proc_cred_rpt(struct htc_target *target,
tot_credits += rpt->credits;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "report indicated %d credits to distribute\n",
- tot_credits);
-
if (dist) {
/*
* This was a credit return based on a completed send
* operations note, this is done with the lock held
*/
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
-
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_SEND_COMPLETE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
}
spin_unlock_bh(&target->tx_lock);
@@ -1320,14 +1612,15 @@ static int htc_parse_trailer(struct htc_target *target,
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
&& next_lk_ahds) {
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
/* look ahead bytes are valid, copy them over */
memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx next look ahead",
"", next_lk_ahds, 4);
*n_lk_ahds = 1;
@@ -1346,7 +1639,7 @@ static int htc_parse_trailer(struct htc_target *target,
bundle_lkahd_rpt =
(struct htc_bundle_lkahd_rpt *) record_buf;
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
"", record_buf, record->len);
for (i = 0; i < len; i++) {
@@ -1378,10 +1671,8 @@ static int htc_proc_trailer(struct htc_target *target,
u8 *record_buf;
u8 *orig_buf;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
-
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
- buf, len);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
orig_buf = buf;
orig_len = len;
@@ -1418,7 +1709,7 @@ static int htc_proc_trailer(struct htc_target *target,
}
if (status)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
"", orig_buf, orig_len);
return status;
@@ -1436,9 +1727,6 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
if (n_lkahds != NULL)
*n_lkahds = 0;
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
- packet->buf, packet->act_len);
-
/*
* NOTE: we cannot assume the alignment of buf, so we use the safe
* macros to retrieve 16 bit fields.
@@ -1480,9 +1768,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
if (lk_ahd != packet->info.rx.exp_hdr) {
ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
__func__, packet, packet->info.rx.rx_flags);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
"", &packet->info.rx.exp_hdr, 4);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
"", (u8 *)&lk_ahd, sizeof(lk_ahd));
status = -ENOMEM;
goto fail_rx;
@@ -1518,15 +1806,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
fail_rx:
if (status)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
- "", packet->buf,
- packet->act_len < 256 ? packet->act_len : 256);
- else {
- if (packet->act_len > 0)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
- "HTC - Application Msg", "",
- packet->buf, packet->act_len);
- }
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
+ "", packet->buf, packet->act_len);
return status;
}
@@ -1534,8 +1815,8 @@ fail_rx:
static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc calling ep %d recv callback on packet 0x%p\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx complete ep %d packet 0x%p\n",
endpoint->eid, packet);
endpoint->ep_cb.rx(endpoint->target, packet);
}
@@ -1571,9 +1852,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
len = 0;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "%s(): (numpackets: %d , actual : %d)\n",
- __func__, get_queue_depth(rxq), n_scat_pkt);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle depth %d pkts %d\n",
+ get_queue_depth(rxq), n_scat_pkt);
scat_req = hif_scatter_req_get(target->dev->ar);
@@ -1620,7 +1901,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
scat_req->len = len;
scat_req->scat_entries = i;
- status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+ status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
if (!status)
*n_pkt_fetched = i;
@@ -1643,7 +1924,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
int status = 0;
list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
- list_del(&packet->list);
ep = &target->endpoint[packet->endpoint];
/* process header for each of the recv packet */
@@ -1652,6 +1932,8 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
if (status)
return status;
+ list_del(&packet->list);
+
if (list_empty(comp_pktq)) {
/*
* Last packet's more packet flag is set
@@ -1686,11 +1968,15 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
int fetched_pkts;
bool part_bundle = false;
int status = 0;
+ struct list_head tmp_rxq;
+ struct htc_packet *packet, *tmp_pkt;
/* now go fetch the list of HTC packets */
while (!list_empty(rx_pktq)) {
fetched_pkts = 0;
+ INIT_LIST_HEAD(&tmp_rxq);
+
if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
/*
* There are enough packets to attempt a
@@ -1698,28 +1984,27 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
* allowed.
*/
status = ath6kl_htc_rx_bundle(target, rx_pktq,
- comp_pktq,
+ &tmp_rxq,
&fetched_pkts,
part_bundle);
if (status)
- return status;
+ goto fail_rx;
if (!list_empty(rx_pktq))
part_bundle = true;
+
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
}
if (!fetched_pkts) {
- struct htc_packet *packet;
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
- list_del(&packet->list);
-
/* fully synchronous */
packet->completion = NULL;
- if (!list_empty(rx_pktq))
+ if (!list_is_singular(rx_pktq))
/*
* look_aheads in all packet
* except the last one in the
@@ -1731,18 +2016,42 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
/* go fetch the packet */
status = ath6kl_htc_rx_packet(target, packet,
packet->act_len);
+
+ list_move_tail(&packet->list, &tmp_rxq);
+
if (status)
- return status;
+ goto fail_rx;
- list_add_tail(&packet->list, comp_pktq);
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
}
}
+ return 0;
+
+fail_rx:
+
+ /*
+ * Cleanup any packets we allocated but didn't use to
+ * actually fetch any packets.
+ */
+
+ list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
return status;
}
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
- u32 msg_look_ahead[], int *num_pkts)
+ u32 msg_look_ahead, int *num_pkts)
{
struct htc_packet *packets, *tmp_pkt;
struct htc_endpoint *endpoint;
@@ -1759,7 +2068,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
* On first entry copy the look_aheads into our temp array for
* processing
*/
- memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+ look_aheads[0] = msg_look_ahead;
while (true) {
@@ -1827,15 +2136,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
if (status) {
ath6kl_err("failed to get pending recv messages: %d\n",
status);
- /*
- * Cleanup any packets we allocated but didn't use to
- * actually fetch any packets.
- */
- list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
- list_del(&packets->list);
- htc_reclaim_rxbuf(target, packets,
- &target->endpoint[packets->endpoint]);
- }
/* cleanup any packets in sync completion queue */
list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
@@ -1846,7 +2146,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
- ath6kldev_rx_control(target->dev, false);
+ ath6kl_hif_rx_control(target->dev, false);
}
}
@@ -1856,7 +2156,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
*/
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
- ath6kldev_rx_control(target->dev, false);
+ ath6kl_hif_rx_control(target->dev, false);
}
*num_pkts = n_fetched;
@@ -1874,12 +2174,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
struct htc_frame_hdr *htc_hdr;
u32 look_ahead;
- if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+ if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
@@ -1943,8 +2243,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
depth = get_queue_depth(pkt_queue);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx add multiple ep id %d cnt %d len %d\n",
first_pkt->endpoint, depth, first_pkt->buf_len);
endpoint = &target->endpoint[first_pkt->endpoint];
@@ -1969,8 +2269,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
/* check if we are blocked waiting for a new buffer */
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
if (target->ep_waiting == first_pkt->endpoint) {
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "receiver was blocked on ep:%d, unblocking.\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx blocked on ep %d, unblocking\n",
target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
@@ -1982,7 +2282,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
/* TODO : implement a buffer threshold count? */
- ath6kldev_rx_control(target->dev, true);
+ ath6kl_hif_rx_control(target->dev, true);
return status;
}
@@ -2004,8 +2304,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
&endpoint->rx_bufq, list) {
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint);
dev_kfree_skb(packet->pkt_cntxt);
@@ -2028,8 +2328,8 @@ int ath6kl_htc_conn_service(struct htc_target *target,
unsigned int max_msg_sz = 0;
int status = 0;
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "htc_conn_service, target:0x%p service id:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc connect service target 0x%p service id 0x%x\n",
target, conn_req->svc_id);
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
@@ -2115,7 +2415,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
endpoint->len_max = max_msg_sz;
endpoint->ep_cb = conn_req->ep_cb;
endpoint->cred_dist.svc_id = conn_req->svc_id;
- endpoint->cred_dist.htc_rsvd = endpoint;
+ endpoint->cred_dist.htc_ep = endpoint;
endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
@@ -2172,6 +2472,7 @@ static void reset_ep_state(struct htc_target *target)
}
/* reset distribution list */
+ /* FIXME: free existing entries */
INIT_LIST_HEAD(&target->cred_dist_list);
}
@@ -2201,8 +2502,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->msg_per_bndl_max = min(target->max_scat_entries,
target->msg_per_bndl_max);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "htc bundling allowed. max msg per htc bundle: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc bundling allowed msg_per_bndl_max %d\n",
target->msg_per_bndl_max);
/* Max rx bundle size is limited by the max tx bundle size */
@@ -2211,7 +2512,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
target->max_xfer_szper_scatreq);
- ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz)
@@ -2265,8 +2566,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "target ready: credits: %d credit size: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc target ready credits %d size %d\n",
target->tgt_creds, target->tgt_cred_sz);
/* check if this is an extended ready message */
@@ -2280,7 +2581,7 @@ int ath6kl_htc_wait_target(struct htc_target *target)
target->msg_per_bndl_max = 0;
}
- ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
(target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
target->htc_tgt_ver);
@@ -2300,6 +2601,10 @@ int ath6kl_htc_wait_target(struct htc_target *target)
status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
if (status)
+ /*
+ * FIXME: this call doesn't make sense, the caller should
+ * call ath6kl_htc_cleanup() when it wants remove htc
+ */
ath6kl_hif_cleanup_scatter(target->dev->ar);
fail_wait_target:
@@ -2320,8 +2625,11 @@ int ath6kl_htc_start(struct htc_target *target)
struct htc_packet *packet;
int status;
+ memset(&target->dev->irq_proc_reg, 0,
+ sizeof(target->dev->irq_proc_reg));
+
/* Disable interrupts at the chip level */
- ath6kldev_disable_intrs(target->dev);
+ ath6kl_hif_disable_intrs(target->dev);
target->htc_flags = 0;
target->rx_st_flags = 0;
@@ -2334,8 +2642,8 @@ int ath6kl_htc_start(struct htc_target *target)
}
/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
- ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
- target->tgt_creds);
+ ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
+ target->tgt_creds);
dump_cred_dist_stats(target);
@@ -2346,7 +2654,7 @@ int ath6kl_htc_start(struct htc_target *target)
return status;
/* unmask interrupts */
- status = ath6kldev_unmask_intrs(target->dev);
+ status = ath6kl_hif_unmask_intrs(target->dev);
if (status)
ath6kl_htc_stop(target);
@@ -2354,6 +2662,44 @@ int ath6kl_htc_start(struct htc_target *target)
return status;
}
+static int ath6kl_htc_reset(struct htc_target *target)
+{
+ u32 block_size, ctrl_bufsz;
+ struct htc_packet *packet;
+ int i;
+
+ reset_ep_state(target);
+
+ block_size = target->dev->ar->mbox_info.block_size;
+
+ ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+ (block_size + HTC_HDR_LENGTH) :
+ (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+ for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+ packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+ if (!packet->buf_start) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ packet->buf_len = ctrl_bufsz;
+ if (i < NUM_CONTROL_RX_BUFFERS) {
+ packet->act_len = 0;
+ packet->buf = packet->buf_start;
+ packet->endpoint = ENDPOINT_0;
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ } else
+ list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
+
+ return 0;
+}
+
/* htc_stop: stop interrupt reception, and flush all queued buffers */
void ath6kl_htc_stop(struct htc_target *target)
{
@@ -2366,21 +2712,19 @@ void ath6kl_htc_stop(struct htc_target *target)
* function returns all pending HIF I/O has completed, we can
* safely flush the queues.
*/
- ath6kldev_mask_intrs(target->dev);
+ ath6kl_hif_mask_intrs(target->dev);
ath6kl_htc_flush_txep_all(target);
ath6kl_htc_flush_rx_buf(target);
- reset_ep_state(target);
+ ath6kl_htc_reset(target);
}
void *ath6kl_htc_create(struct ath6kl *ar)
{
struct htc_target *target = NULL;
- struct htc_packet *packet;
- int status = 0, i = 0;
- u32 block_size, ctrl_bufsz;
+ int status = 0;
target = kzalloc(sizeof(*target), GFP_KERNEL);
if (!target) {
@@ -2392,7 +2736,7 @@ void *ath6kl_htc_create(struct ath6kl *ar)
if (!target->dev) {
ath6kl_err("unable to allocate memory\n");
status = -ENOMEM;
- goto fail_create_htc;
+ goto err_htc_cleanup;
}
spin_lock_init(&target->htc_lock);
@@ -2407,49 +2751,20 @@ void *ath6kl_htc_create(struct ath6kl *ar)
target->dev->htc_cnxt = target;
target->ep_waiting = ENDPOINT_MAX;
- reset_ep_state(target);
-
- status = ath6kldev_setup(target->dev);
-
+ status = ath6kl_hif_setup(target->dev);
if (status)
- goto fail_create_htc;
-
- block_size = ar->mbox_info.block_size;
-
- ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
- (block_size + HTC_HDR_LENGTH) :
- (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
-
- for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
- packet = kzalloc(sizeof(*packet), GFP_KERNEL);
- if (!packet)
- break;
+ goto err_htc_cleanup;
- packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
- if (!packet->buf_start) {
- kfree(packet);
- break;
- }
+ status = ath6kl_htc_reset(target);
+ if (status)
+ goto err_htc_cleanup;
- packet->buf_len = ctrl_bufsz;
- if (i < NUM_CONTROL_RX_BUFFERS) {
- packet->act_len = 0;
- packet->buf = packet->buf_start;
- packet->endpoint = ENDPOINT_0;
- list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
- } else
- list_add_tail(&packet->list, &target->free_ctrl_txbuf);
- }
+ return target;
-fail_create_htc:
- if (i != NUM_CONTROL_BUFFERS || status) {
- if (target) {
- ath6kl_htc_cleanup(target);
- target = NULL;
- }
- }
+err_htc_cleanup:
+ ath6kl_htc_cleanup(target);
- return target;
+ return NULL;
}
/* cleanup the HTC instance */
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 8ce0c2c07ded..57672e1ed1a6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist {
int cred_per_msg;
/* reserved for HTC use */
- void *htc_rsvd;
+ struct htc_endpoint *htc_ep;
/*
* current depth of TX queue , i.e. messages waiting for credits
@@ -414,9 +414,11 @@ enum htc_credit_dist_reason {
HTC_CREDIT_DIST_SEEK_CREDITS,
};
-struct htc_credit_state_info {
+struct ath6kl_htc_credit_info {
int total_avail_credits;
int cur_free_credits;
+
+ /* list of lowest priority endpoints */
struct list_head lowestpri_ep_dist;
};
@@ -508,10 +510,13 @@ struct ath6kl_device;
/* our HTC target state */
struct htc_target {
struct htc_endpoint endpoint[ENDPOINT_MAX];
+
+ /* contains struct htc_endpoint_credit_dist */
struct list_head cred_dist_list;
+
struct list_head free_ctrl_txbuf;
struct list_head free_ctrl_rxbuf;
- struct htc_credit_state_info *cred_dist_cntxt;
+ struct ath6kl_htc_credit_info *credit_info;
int tgt_creds;
unsigned int tgt_cred_sz;
spinlock_t htc_lock;
@@ -542,7 +547,7 @@ struct htc_target {
void *ath6kl_htc_create(struct ath6kl *ar);
void ath6kl_htc_set_credit_dist(struct htc_target *target,
- struct htc_credit_state_info *cred_info,
+ struct ath6kl_htc_credit_info *cred_info,
u16 svc_pri_order[], int len);
int ath6kl_htc_wait_target(struct htc_target *target);
int ath6kl_htc_start(struct htc_target *target);
@@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq);
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
- u32 msg_look_ahead[], int *n_pkts);
+ u32 msg_look_ahead, int *n_pkts);
+
+int ath6kl_credit_setup(void *htc_handle,
+ struct ath6kl_htc_credit_info *cred_info);
static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned int len,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
deleted file mode 100644
index 171ad63d89b0..000000000000
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2007-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HTC_HIF_H
-#define HTC_HIF_H
-
-#include "htc.h"
-#include "hif.h"
-
-#define ATH6KL_MAILBOXES 4
-
-/* HTC runs over mailbox 0 */
-#define HTC_MAILBOX 0
-
-#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
-
-#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
- INT_STATUS_ENABLE_CPU_MASK | \
- INT_STATUS_ENABLE_COUNTER_MASK)
-
-#define ATH6KL_REG_IO_BUFFER_SIZE 32
-#define ATH6KL_MAX_REG_IO_BUFFERS 8
-#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
-#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
-#define ATH6KL_SCATTER_REQS 4
-
-#ifndef A_CACHE_LINE_PAD
-#define A_CACHE_LINE_PAD 128
-#endif
-#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
-#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
-
-struct ath6kl_irq_proc_registers {
- u8 host_int_status;
- u8 cpu_int_status;
- u8 error_int_status;
- u8 counter_int_status;
- u8 mbox_frame;
- u8 rx_lkahd_valid;
- u8 host_int_status2;
- u8 gmbox_rx_avail;
- __le32 rx_lkahd[2];
- __le32 rx_gmbox_lkahd_alias[2];
-} __packed;
-
-struct ath6kl_irq_enable_reg {
- u8 int_status_en;
- u8 cpu_int_status_en;
- u8 err_int_status_en;
- u8 cntr_int_status_en;
-} __packed;
-
-struct ath6kl_device {
- spinlock_t lock;
- u8 pad1[A_CACHE_LINE_PAD];
- struct ath6kl_irq_proc_registers irq_proc_reg;
- u8 pad2[A_CACHE_LINE_PAD];
- struct ath6kl_irq_enable_reg irq_en_reg;
- u8 pad3[A_CACHE_LINE_PAD];
- struct htc_target *htc_cnxt;
- struct ath6kl *ar;
-};
-
-int ath6kldev_setup(struct ath6kl_device *dev);
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
-int ath6kldev_mask_intrs(struct ath6kl_device *dev);
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
- u32 *lk_ahd, int timeout);
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
-int ath6kldev_disable_intrs(struct ath6kl_device *dev);
-
-int ath6kldev_rw_comp_handler(void *context, int status);
-int ath6kldev_intr_bh_handler(struct ath6kl *ar);
-
-/* Scatter Function and Definitions */
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
- struct hif_scatter_req *scat_req, bool read);
-
-#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index c1d2366704b5..7f55be3092d1 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,6 +16,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/errno.h>
#include <linux/of.h>
#include <linux/mmc/sdio_func.h>
#include "core.h"
@@ -26,9 +27,85 @@
unsigned int debug_mask;
static unsigned int testmode;
+static bool suspend_cutpower;
module_param(debug_mask, uint, 0644);
module_param(testmode, uint, 0644);
+module_param(suspend_cutpower, bool, 0444);
+
+static const struct ath6kl_hw hw_list[] = {
+ {
+ .id = AR6003_HW_2_0_VERSION,
+ .name = "ar6003 hw 2.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x543180,
+ .board_ext_data_addr = 0x57e500,
+ .reserved_ram_size = 6912,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+
+ /* hw2.0 needs override address hardcoded */
+ .app_start_override_addr = 0x944C00,
+
+ .fw_otp = AR6003_HW_2_0_OTP_FILE,
+ .fw = AR6003_HW_2_0_FIRMWARE_FILE,
+ .fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
+ .fw_patch = AR6003_HW_2_0_PATCH_FILE,
+ .fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE,
+ .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6003_HW_2_1_1_VERSION,
+ .name = "ar6003 hw 2.1.1",
+ .dataset_patch_addr = 0x57ff74,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x542330,
+ .reserved_ram_size = 512,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+
+ .fw_otp = AR6003_HW_2_1_1_OTP_FILE,
+ .fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
+ .fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
+ .fw_patch = AR6003_HW_2_1_1_PATCH_FILE,
+ .fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE,
+ .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_0_VERSION,
+ .name = "ar6004 hw 1.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 19456,
+ .board_addr = 0x433900,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 11,
+
+ .fw = AR6004_HW_1_0_FIRMWARE_FILE,
+ .fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE,
+ .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_1_VERSION,
+ .name = "ar6004 hw 1.1",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 11264,
+ .board_addr = 0x43d400,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+
+ .fw = AR6004_HW_1_1_FIRMWARE_FILE,
+ .fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE,
+ .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+};
/*
* Include definitions here that can be used to tune the WLAN module
@@ -55,7 +132,6 @@ module_param(testmode, uint, 0644);
*/
#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
-#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
#define ATH6KL_DATA_OFFSET 64
struct sk_buff *ath6kl_buf_alloc(int size)
@@ -73,37 +149,21 @@ struct sk_buff *ath6kl_buf_alloc(int size)
return skb;
}
-void ath6kl_init_profile_info(struct ath6kl *ar)
+void ath6kl_init_profile_info(struct ath6kl_vif *vif)
{
- ar->ssid_len = 0;
- memset(ar->ssid, 0, sizeof(ar->ssid));
-
- ar->dot11_auth_mode = OPEN_AUTH;
- ar->auth_mode = NONE_AUTH;
- ar->prwise_crypto = NONE_CRYPT;
- ar->prwise_crypto_len = 0;
- ar->grp_crypto = NONE_CRYPT;
- ar->grp_crypto_len = 0;
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
- memset(ar->bssid, 0, sizeof(ar->bssid));
- ar->bss_ch = 0;
- ar->nw_type = ar->next_mode = INFRA_NETWORK;
-}
-
-static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
-{
- switch (ar->nw_type) {
- case INFRA_NETWORK:
- return HI_OPTION_FW_MODE_BSS_STA;
- case ADHOC_NETWORK:
- return HI_OPTION_FW_MODE_IBSS;
- case AP_NETWORK:
- return HI_OPTION_FW_MODE_AP;
- default:
- ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
- return 0xff;
- }
+ vif->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+
+ vif->dot11_auth_mode = OPEN_AUTH;
+ vif->auth_mode = NONE_AUTH;
+ vif->prwise_crypto = NONE_CRYPT;
+ vif->prwise_crypto_len = 0;
+ vif->grp_crypto = NONE_CRYPT;
+ vif->grp_crypto_len = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
}
static int ath6kl_set_host_app_area(struct ath6kl *ar)
@@ -120,7 +180,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar)
return -EIO;
address = TARG_VTOP(ar->target_type, data);
- host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+ host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
sizeof(struct host_app_area)))
return -EIO;
@@ -258,40 +318,12 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
return 0;
}
-static void ath6kl_init_control_info(struct ath6kl *ar)
+void ath6kl_init_control_info(struct ath6kl_vif *vif)
{
- u8 ctr;
-
- clear_bit(WMI_ENABLED, &ar->flag);
- ath6kl_init_profile_info(ar);
- ar->def_txkey_index = 0;
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- ar->ch_hint = 0;
- ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
- ar->listen_intvl_b = 0;
- ar->tx_pwr = 0;
- clear_bit(SKIP_SCAN, &ar->flag);
- set_bit(WMM_ENABLED, &ar->flag);
- ar->intra_bss = 1;
- memset(&ar->sc_params, 0, sizeof(ar->sc_params));
- ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
- ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
- ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
-
- memset((u8 *)ar->sta_list, 0,
- AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
-
- spin_lock_init(&ar->mcastpsq_lock);
-
- /* Init the PS queues */
- for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
- spin_lock_init(&ar->sta_list[ctr].psq_lock);
- skb_queue_head_init(&ar->sta_list[ctr].psq);
- }
-
- skb_queue_head_init(&ar->mcastpsq);
-
- memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+ ath6kl_init_profile_info(vif);
+ vif->def_txkey_index = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ vif->ch_hint = 0;
}
/*
@@ -341,62 +373,7 @@ out:
return status;
}
-#define REG_DUMP_COUNT_AR6003 60
-#define REGISTER_DUMP_LEN_MAX 60
-
-static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
-{
- u32 address;
- u32 regdump_loc = 0;
- int status;
- u32 regdump_val[REGISTER_DUMP_LEN_MAX];
- u32 i;
-
- if (ar->target_type != TARGET_TYPE_AR6003)
- return;
-
- /* the reg dump pointer is copied to the host interest area */
- address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
- address = TARG_VTOP(ar->target_type, address);
-
- /* read RAM location through diagnostic window */
- status = ath6kl_diag_read32(ar, address, &regdump_loc);
-
- if (status || !regdump_loc) {
- ath6kl_err("failed to get ptr to register dump area\n");
- return;
- }
-
- ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
- regdump_loc);
- regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
-
- /* fetch register dump data */
- status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
- REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
-
- if (status) {
- ath6kl_err("failed to get register dump\n");
- return;
- }
- ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
-
- for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
- ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
- i, regdump_val[i]);
-
-}
-
-void ath6kl_target_failure(struct ath6kl *ar)
-{
- ath6kl_err("target asserted\n");
-
- /* try dumping target assertion information (if any) */
- ath6kl_dump_target_assert_info(ar);
-
-}
-
-static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
{
int status = 0;
int ret;
@@ -406,46 +383,46 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
* default values. Required if checksum offload is needed. Set
* RxMetaVersion to 2.
*/
- if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
ar->rx_meta_ver, 0, 0)) {
ath6kl_err("unable to set the rx frame format\n");
status = -EIO;
}
if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
- if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+ if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
ath6kl_err("unable to set power save fail event policy\n");
status = -EIO;
}
if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
- if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+ if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
ath6kl_err("unable to set barker preamble policy\n");
status = -EIO;
}
- if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+ if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
ath6kl_err("unable to set keep alive interval\n");
status = -EIO;
}
- if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+ if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
ath6kl_err("unable to set disconnect timeout\n");
status = -EIO;
}
if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
- if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+ if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
ath6kl_err("unable to set txop bursting\n");
status = -EIO;
}
- if (ar->p2p) {
- ret = ath6kl_wmi_info_req_cmd(ar->wmi,
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
+ ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
P2P_FLAG_CAPABILITIES_REQ |
P2P_FLAG_MACADDR_REQ |
P2P_FLAG_HMODEL_REQ);
@@ -453,13 +430,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
"capabilities (%d) - assuming P2P not "
"supported\n", ret);
- ar->p2p = 0;
+ ar->p2p = false;
}
}
- if (ar->p2p) {
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
/* Enable Probe Request reporting for P2P */
- ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true);
+ ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
if (ret) {
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
"Request reporting (%d)\n", ret);
@@ -472,13 +449,40 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
int ath6kl_configure_target(struct ath6kl *ar)
{
u32 param, ram_reserved_size;
- u8 fw_iftype;
+ u8 fw_iftype, fw_mode = 0, fw_submode = 0;
+ int i, status;
- fw_iftype = ath6kl_get_fw_iftype(ar);
- if (fw_iftype == 0xff)
- return -EINVAL;
+ /*
+ * Note: Even though the firmware interface type is
+ * chosen as BSS_STA for all three interfaces, can
+ * be configured to IBSS/AP as long as the fw submode
+ * remains normal mode (0 - AP, STA and IBSS). But
+ * due to an target assert in firmware only one interface is
+ * configured for now.
+ */
+ fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
+
+ for (i = 0; i < ar->vif_max; i++)
+ fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
+
+ /*
+ * By default, submodes :
+ * vif[0] - AP/STA/IBSS
+ * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
+ * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+ */
+
+ for (i = 0; i < ar->max_norm_iface; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ if (ar->p2p && ar->vif_max == 1)
+ fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
- /* Tell target which HTC version it is used*/
param = HTC_PROTOCOL_VERSION;
if (ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
@@ -499,12 +503,10 @@ int ath6kl_configure_target(struct ath6kl *ar)
return -EIO;
}
- param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
- param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
- if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) {
- param |= HI_OPTION_FW_SUBMODE_P2PDEV <<
- HI_OPTION_FW_SUBMODE_SHIFT;
- }
+ param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT);
+ param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
+ param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
+
param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
@@ -550,71 +552,55 @@ int ath6kl_configure_target(struct ath6kl *ar)
/* use default number of control buffers */
return -EIO;
+ /* Configure GPIO AR600x UART */
+ param = ar->hw.uarttx_pin;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbg_uart_txpin)),
+ (u8 *)&param, 4);
+ if (status)
+ return status;
+
+ /* Configure target refclk_hz */
+ param = ar->hw.refclk_hz;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_refclk_hz)),
+ (u8 *)&param, 4);
+ if (status)
+ return status;
+
return 0;
}
-struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+void ath6kl_core_free(struct ath6kl *ar)
{
- struct net_device *dev;
- struct ath6kl *ar;
- struct wireless_dev *wdev;
-
- wdev = ath6kl_cfg80211_init(sdev);
- if (!wdev) {
- ath6kl_err("ath6kl_cfg80211_init failed\n");
- return NULL;
- }
-
- ar = wdev_priv(wdev);
- ar->dev = sdev;
- ar->wdev = wdev;
- wdev->iftype = NL80211_IFTYPE_STATION;
-
- if (ath6kl_debug_init(ar)) {
- ath6kl_err("Failed to initialize debugfs\n");
- ath6kl_cfg80211_deinit(ar);
- return NULL;
- }
-
- dev = alloc_netdev(0, "wlan%d", ether_setup);
- if (!dev) {
- ath6kl_err("no memory for network device instance\n");
- ath6kl_cfg80211_deinit(ar);
- return NULL;
- }
-
- dev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
- wdev->netdev = dev;
- ar->sme_state = SME_DISCONNECTED;
-
- init_netdev(dev);
+ wiphy_free(ar->wiphy);
+}
- ar->net_dev = dev;
- set_bit(WLAN_ENABLED, &ar->flag);
+void ath6kl_core_cleanup(struct ath6kl *ar)
+{
+ ath6kl_hif_power_off(ar);
- ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+ destroy_workqueue(ar->ath6kl_wq);
- spin_lock_init(&ar->lock);
+ if (ar->htc_target)
+ ath6kl_htc_cleanup(ar->htc_target);
- ath6kl_init_control_info(ar);
- init_waitqueue_head(&ar->event_wq);
- sema_init(&ar->sem, 1);
- clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+ ath6kl_cookie_cleanup(ar);
- INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
- setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
- (unsigned long) dev);
+ ath6kl_bmi_cleanup(ar);
- return ar;
-}
+ ath6kl_debug_cleanup(ar);
-int ath6kl_unavail_ev(struct ath6kl *ar)
-{
- ath6kl_destroy(ar->net_dev, 1);
+ kfree(ar->fw_board);
+ kfree(ar->fw_otp);
+ kfree(ar->fw);
+ kfree(ar->fw_patch);
- return 0;
+ ath6kl_deinit_ieee80211_hw(ar);
}
/* firmware upload */
@@ -643,11 +629,11 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
static const char *get_target_ver_dir(const struct ath6kl *ar)
{
switch (ar->version.target_ver) {
- case AR6003_REV1_VERSION:
+ case AR6003_HW_1_0_VERSION:
return "ath6k/AR6003/hw1.0";
- case AR6003_REV2_VERSION:
+ case AR6003_HW_2_0_VERSION:
return "ath6k/AR6003/hw2.0";
- case AR6003_REV3_VERSION:
+ case AR6003_HW_2_1_1_VERSION:
return "ath6k/AR6003/hw2.1.1";
}
ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
@@ -705,17 +691,10 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
if (ar->fw_board != NULL)
return 0;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_BOARD_DATA_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_BOARD_DATA_FILE;
- break;
- default:
- filename = AR6003_REV3_BOARD_DATA_FILE;
- break;
- }
+ if (WARN_ON(ar->hw.fw_board == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw_board;
ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
&ar->fw_board_len);
@@ -733,17 +712,7 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
filename, ret);
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE;
- break;
- default:
- filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
- break;
- }
+ filename = ar->hw.fw_default_board;
ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
&ar->fw_board_len);
@@ -767,19 +736,14 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar)
if (ar->fw_otp != NULL)
return 0;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_OTP_FILE;
- break;
- case AR6004_REV1_VERSION:
- ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n");
+ if (ar->hw.fw_otp == NULL) {
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "no OTP file configured for this hw\n");
return 0;
- break;
- default:
- filename = AR6003_REV3_OTP_FILE;
- break;
}
+ filename = ar->hw.fw_otp;
+
ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
&ar->fw_otp_len);
if (ret) {
@@ -800,38 +764,22 @@ static int ath6kl_fetch_fw_file(struct ath6kl *ar)
return 0;
if (testmode) {
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
- break;
- case AR6003_REV3_VERSION:
- filename = AR6003_REV3_TCMD_FIRMWARE_FILE;
- break;
- case AR6004_REV1_VERSION:
- ath6kl_warn("testmode not supported with ar6004\n");
+ if (ar->hw.fw_tcmd == NULL) {
+ ath6kl_warn("testmode not supported\n");
return -EOPNOTSUPP;
- default:
- ath6kl_warn("unknown target version: 0x%x\n",
- ar->version.target_ver);
- return -EINVAL;
}
+ filename = ar->hw.fw_tcmd;
+
set_bit(TESTMODE, &ar->flag);
goto get_fw;
}
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_FIRMWARE_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_FIRMWARE_FILE;
- break;
- default:
- filename = AR6003_REV3_FIRMWARE_FILE;
- break;
- }
+ if (WARN_ON(ar->hw.fw == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw;
get_fw:
ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
@@ -849,27 +797,20 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar)
const char *filename;
int ret;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_PATCH_FILE;
- break;
- case AR6004_REV1_VERSION:
- /* FIXME: implement for AR6004 */
+ if (ar->fw_patch != NULL)
return 0;
- break;
- default:
- filename = AR6003_REV3_PATCH_FILE;
- break;
- }
- if (ar->fw_patch == NULL) {
- ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
- &ar->fw_patch_len);
- if (ret) {
- ath6kl_err("Failed to get patch file %s: %d\n",
- filename, ret);
- return ret;
- }
+ if (ar->hw.fw_patch == NULL)
+ return 0;
+
+ filename = ar->hw.fw_patch;
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+ &ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to get patch file %s: %d\n",
+ filename, ret);
+ return ret;
}
return 0;
@@ -904,19 +845,10 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
int ret, ie_id, i, index, bit;
__le32 *val;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_FIRMWARE_2_FILE;
- break;
- case AR6003_REV3_VERSION:
- filename = AR6003_REV3_FIRMWARE_2_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_FIRMWARE_2_FILE;
- break;
- default:
+ if (ar->hw.fw_api2 == NULL)
return -EOPNOTSUPP;
- }
+
+ filename = ar->hw.fw_api2;
ret = request_firmware(&fw, filename, ar->dev);
if (ret)
@@ -1006,12 +938,15 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
ar->hw.reserved_ram_size);
break;
case ATH6KL_FW_IE_CAPABILITIES:
+ if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
+ break;
+
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found firmware capabilities ie (%zd B)\n",
ie_len);
for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
- index = ALIGN(i, 8) / 8;
+ index = i / 8;
bit = i % 8;
if (data[index] & (1 << bit))
@@ -1030,9 +965,34 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
ar->hw.dataset_patch_addr = le32_to_cpup(val);
ath6kl_dbg(ATH6KL_DBG_BOOT,
- "found patch address ie 0x%d\n",
+ "found patch address ie 0x%x\n",
ar->hw.dataset_patch_addr);
break;
+ case ATH6KL_FW_IE_BOARD_ADDR:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->hw.board_addr = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found board address ie 0x%x\n",
+ ar->hw.board_addr);
+ break;
+ case ATH6KL_FW_IE_VIF_MAX:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->vif_max = min_t(unsigned int, le32_to_cpup(val),
+ ATH6KL_VIF_MAX);
+
+ if (ar->vif_max > 1 && !ar->p2p)
+ ar->max_norm_iface = 2;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found vif max ie %d\n", ar->vif_max);
+ break;
default:
ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n",
le32_to_cpup(&hdr->id));
@@ -1087,8 +1047,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
* For AR6004, host determine Target RAM address for
* writing board data.
*/
- if (ar->target_type == TARGET_TYPE_AR6004) {
- board_address = AR6004_REV1_BOARD_DATA_ADDRESS;
+ if (ar->hw.board_addr != 0) {
+ board_address = ar->hw.board_addr;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_data)),
@@ -1106,7 +1066,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
HI_ITEM(hi_board_ext_data)),
(u8 *) &board_ext_address, 4);
- if (board_ext_address == 0) {
+ if (ar->target_type == TARGET_TYPE_AR6003 &&
+ board_ext_address == 0) {
ath6kl_err("Failed to get board file target address.\n");
return -EINVAL;
}
@@ -1126,8 +1087,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
break;
}
- if (ar->fw_board_len == (board_data_size +
- board_ext_data_size)) {
+ if (board_ext_address &&
+ ar->fw_board_len == (board_data_size + board_ext_data_size)) {
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
@@ -1182,10 +1143,11 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
static int ath6kl_upload_otp(struct ath6kl *ar)
{
u32 address, param;
+ bool from_hw = false;
int ret;
- if (WARN_ON(ar->fw_otp == NULL))
- return -ENOENT;
+ if (ar->fw_otp == NULL)
+ return 0;
address = ar->hw.app_load_addr;
@@ -1210,15 +1172,20 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
return ret;
}
- ar->hw.app_start_override_addr = address;
+ if (ar->hw.app_start_override_addr == 0) {
+ ar->hw.app_start_override_addr = address;
+ from_hw = true;
+ }
- ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
+ from_hw ? " (from hw)" : "",
ar->hw.app_start_override_addr);
/* execute the OTP code */
- ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address);
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
+ ar->hw.app_start_override_addr);
param = 0;
- ath6kl_bmi_execute(ar, address, &param);
+ ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
return ret;
}
@@ -1229,7 +1196,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar)
int ret;
if (WARN_ON(ar->fw == NULL))
- return -ENOENT;
+ return 0;
address = ar->hw.app_load_addr;
@@ -1259,8 +1226,8 @@ static int ath6kl_upload_patch(struct ath6kl *ar)
u32 address, param;
int ret;
- if (WARN_ON(ar->fw_patch == NULL))
- return -ENOENT;
+ if (ar->fw_patch == NULL)
+ return 0;
address = ar->hw.dataset_patch_addr;
@@ -1345,7 +1312,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
/* WAR to avoid SDIO CRC err */
- if (ar->version.target_ver == AR6003_REV2_VERSION) {
+ if (ar->version.target_ver == AR6003_HW_2_0_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
param = 0x20;
@@ -1402,43 +1369,29 @@ static int ath6kl_init_upload(struct ath6kl *ar)
if (status)
return status;
- /* Configure GPIO AR6003 UART */
- param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_dbg_uart_txpin)),
- (u8 *)&param, 4);
-
return status;
}
static int ath6kl_init_hw_params(struct ath6kl *ar)
{
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
- ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
- break;
- case AR6003_REV3_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = 0x1234;
- ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE;
- break;
- case AR6004_REV1_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS;
- ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE;
- break;
- default:
+ const struct ath6kl_hw *hw;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = &hw_list[i];
+
+ if (hw->id == ar->version.target_ver)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hw_list)) {
ath6kl_err("Unsupported hardware version: 0x%x\n",
ar->version.target_ver);
return -EINVAL;
}
+ ar->hw = *hw;
+
ath6kl_dbg(ATH6KL_DBG_BOOT,
"target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n",
ar->version.target_ver, ar->target_type,
@@ -1447,75 +1400,75 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
"app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x",
ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr,
ar->hw.reserved_ram_size);
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "refclk_hz %d uarttx_pin %d",
+ ar->hw.refclk_hz, ar->hw.uarttx_pin);
return 0;
}
-static int ath6kl_init(struct net_device *dev)
+static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- int status = 0;
- s32 timeleft;
+ switch (type) {
+ case ATH6KL_HIF_TYPE_SDIO:
+ return "sdio";
+ case ATH6KL_HIF_TYPE_USB:
+ return "usb";
+ }
- if (!ar)
- return -EIO;
+ return NULL;
+}
+
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+ long timeleft;
+ int ret, i;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
+
+ ret = ath6kl_hif_power_on(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_configure_target(ar);
+ if (ret)
+ goto err_power_off;
+
+ ret = ath6kl_init_upload(ar);
+ if (ret)
+ goto err_power_off;
/* Do we need to finish the BMI phase */
+ /* FIXME: return error from ath6kl_bmi_done() */
if (ath6kl_bmi_done(ar)) {
- status = -EIO;
- goto ath6kl_init_done;
- }
-
- /* Indicate that WMI is enabled (although not ready yet) */
- set_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = ath6kl_wmi_init(ar);
- if (!ar->wmi) {
- ath6kl_err("failed to initialize wmi\n");
- status = -EIO;
- goto ath6kl_init_done;
+ ret = -EIO;
+ goto err_power_off;
}
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
-
/*
* The reason we have to wait for the target here is that the
* driver layer has to init BMI in order to set the host block
* size.
*/
if (ath6kl_htc_wait_target(ar->htc_target)) {
- status = -EIO;
- goto err_node_cleanup;
+ ret = -EIO;
+ goto err_power_off;
}
if (ath6kl_init_service_ep(ar)) {
- status = -EIO;
+ ret = -EIO;
goto err_cleanup_scatter;
}
- /* setup access class priority mappings */
- ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
- ar->ac_stream_pri_map[WMM_AC_BE] = 1;
- ar->ac_stream_pri_map[WMM_AC_VI] = 2;
- ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
-
- /* give our connected endpoints some buffers */
- ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
- ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
-
- /* allocate some buffers that handle larger AMSDU frames */
- ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
-
/* setup credit distribution */
- ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
-
- ath6kl_cookie_init(ar);
+ ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
/* start HTC */
- status = ath6kl_htc_start(ar->htc_target);
-
- if (status) {
+ ret = ath6kl_htc_start(ar->htc_target);
+ if (ret) {
+ /* FIXME: call this */
ath6kl_cookie_cleanup(ar);
- goto err_rxbuf_cleanup;
+ goto err_cleanup_scatter;
}
/* Wait for Wmi event to be ready */
@@ -1526,54 +1479,81 @@ static int ath6kl_init(struct net_device *dev)
ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
+
+ if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
+ ath6kl_info("%s %s fw %s%s\n",
+ ar->hw.name,
+ ath6kl_init_get_hif_name(ar->hif_type),
+ ar->wiphy->fw_version,
+ test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+ }
+
if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
ATH6KL_ABI_VERSION, ar->version.abi_ver);
- status = -EIO;
+ ret = -EIO;
goto err_htc_stop;
}
if (!timeleft || signal_pending(current)) {
ath6kl_err("wmi is not ready or wait was interrupted\n");
- status = -EIO;
+ ret = -EIO;
goto err_htc_stop;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
/* communicate the wmi protocol verision to the target */
+ /* FIXME: return error */
if ((ath6kl_set_host_app_area(ar)) != 0)
ath6kl_err("unable to set the host app area\n");
- ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
- ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+ for (i = 0; i < ar->vif_max; i++) {
+ ret = ath6kl_target_config_wlan_params(ar, i);
+ if (ret)
+ goto err_htc_stop;
+ }
- ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ ar->state = ATH6KL_STATE_ON;
- status = ath6kl_target_config_wlan_params(ar);
- if (!status)
- goto ath6kl_init_done;
+ return 0;
err_htc_stop:
ath6kl_htc_stop(ar->htc_target);
-err_rxbuf_cleanup:
- ath6kl_htc_flush_rx_buf(ar->htc_target);
- ath6kl_cleanup_amsdu_rxbufs(ar);
err_cleanup_scatter:
ath6kl_hif_cleanup_scatter(ar);
-err_node_cleanup:
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
+err_power_off:
+ ath6kl_hif_power_off(ar);
-ath6kl_init_done:
- return status;
+ return ret;
+}
+
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
+
+ ath6kl_htc_stop(ar->htc_target);
+
+ ath6kl_hif_stop(ar);
+
+ ath6kl_bmi_reset(ar);
+
+ ret = ath6kl_hif_power_off(ar);
+ if (ret)
+ ath6kl_warn("failed to power off hif: %d\n", ret);
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ return 0;
}
int ath6kl_core_init(struct ath6kl *ar)
{
- int ret = 0;
struct ath6kl_bmi_target_info targ_info;
+ struct net_device *ndev;
+ int ret = 0, i;
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
if (!ar->ath6kl_wq)
@@ -1583,145 +1563,236 @@ int ath6kl_core_init(struct ath6kl *ar)
if (ret)
goto err_wq;
- ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ /*
+ * Turn on power to get hardware (target) version and leave power
+ * on delibrately as we will boot the hardware anyway within few
+ * seconds.
+ */
+ ret = ath6kl_hif_power_on(ar);
if (ret)
goto err_bmi_cleanup;
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_power_off;
+
ar->version.target_ver = le32_to_cpu(targ_info.version);
ar->target_type = le32_to_cpu(targ_info.type);
- ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
+ ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
ret = ath6kl_init_hw_params(ar);
if (ret)
- goto err_bmi_cleanup;
-
- ret = ath6kl_configure_target(ar);
- if (ret)
- goto err_bmi_cleanup;
+ goto err_power_off;
ar->htc_target = ath6kl_htc_create(ar);
if (!ar->htc_target) {
ret = -ENOMEM;
- goto err_bmi_cleanup;
- }
-
- ar->aggr_cntxt = aggr_init(ar->net_dev);
- if (!ar->aggr_cntxt) {
- ath6kl_err("failed to initialize aggr\n");
- ret = -ENOMEM;
- goto err_htc_cleanup;
+ goto err_power_off;
}
ret = ath6kl_fetch_firmwares(ar);
if (ret)
goto err_htc_cleanup;
- ret = ath6kl_init_upload(ar);
- if (ret)
+ /* FIXME: we should free all firmwares in the error cases below */
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init(ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ ret = -EIO;
goto err_htc_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
- ret = ath6kl_init(ar->net_dev);
+ ret = ath6kl_register_ieee80211_hw(ar);
if (ret)
- goto err_htc_cleanup;
+ goto err_node_cleanup;
- /* This runs the init function if registered */
- ret = register_netdev(ar->net_dev);
+ ret = ath6kl_debug_init(ar);
if (ret) {
- ath6kl_err("register_netdev failed\n");
- ath6kl_destroy(ar->net_dev, 0);
- return ret;
+ wiphy_unregister(ar->wiphy);
+ goto err_node_cleanup;
+ }
+
+ for (i = 0; i < ar->vif_max; i++)
+ ar->avail_idx_map |= BIT(i);
+
+ rtnl_lock();
+
+ /* Add an initial station interface */
+ ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+ INFRA_NETWORK);
+
+ rtnl_unlock();
+
+ if (!ndev) {
+ ath6kl_err("Failed to instantiate a network device\n");
+ ret = -ENOMEM;
+ wiphy_unregister(ar->wiphy);
+ goto err_debug_init;
}
- set_bit(NETDEV_REGISTERED, &ar->flag);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
- __func__, ar->net_dev->name, ar->net_dev, ar);
+ __func__, ndev->name, ndev, ar);
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ ath6kl_cookie_init(ar);
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ if (suspend_cutpower)
+ ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
+
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+ WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+
+ ar->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+
+ set_bit(FIRST_BOOT, &ar->flag);
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_err("Failed to start hardware: %d\n", ret);
+ goto err_rxbuf_cleanup;
+ }
+
+ /*
+ * Set mac address which is received in ready event
+ * FIXME: Move to ath6kl_interface_add()
+ */
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
return ret;
+err_rxbuf_cleanup:
+ ath6kl_htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+ rtnl_lock();
+ ath6kl_deinit_if_data(netdev_priv(ndev));
+ rtnl_unlock();
+ wiphy_unregister(ar->wiphy);
+err_debug_init:
+ ath6kl_debug_cleanup(ar);
+err_node_cleanup:
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
err_htc_cleanup:
ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+ ath6kl_hif_power_off(ar);
err_bmi_cleanup:
ath6kl_bmi_cleanup(ar);
err_wq:
destroy_workqueue(ar->ath6kl_wq);
+
return ret;
}
-void ath6kl_stop_txrx(struct ath6kl *ar)
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
{
- struct net_device *ndev = ar->net_dev;
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
- if (!ndev)
- return;
+ netif_stop_queue(vif->ndev);
- set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+ clear_bit(WLAN_ENABLED, &vif->flags);
- if (down_interruptible(&ar->sem)) {
- ath6kl_err("down_interruptible failed\n");
- return;
- }
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
- if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
- ath6kl_stop_endpoint(ndev, false, true);
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
- clear_bit(WLAN_ENABLED, &ar->flag);
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
}
-/*
- * We need to differentiate between the surprise and planned removal of the
- * device because of the following consideration:
- *
- * - In case of surprise removal, the hcd already frees up the pending
- * for the device and hence there is no need to unregister the function
- * driver inorder to get these requests. For planned removal, the function
- * driver has to explicitly unregister itself to have the hcd return all the
- * pending requests before the data structures for the devices are freed up.
- * Note that as per the current implementation, the function driver will
- * end up releasing all the devices since there is no API to selectively
- * release a particular device.
- *
- * - Certain commands issued to the target can be skipped for surprise
- * removal since they will anyway not go through.
- */
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
+void ath6kl_stop_txrx(struct ath6kl *ar)
{
- struct ath6kl *ar;
+ struct ath6kl_vif *vif, *tmp_vif;
- if (!dev || !ath6kl_priv(dev)) {
- ath6kl_err("failed to get device structure\n");
+ set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("down_interruptible failed\n");
return;
}
- ar = ath6kl_priv(dev);
-
- destroy_workqueue(ar->ath6kl_wq);
-
- if (ar->htc_target)
- ath6kl_htc_cleanup(ar->htc_target);
-
- aggr_module_destroy(ar->aggr_cntxt);
-
- ath6kl_cookie_cleanup(ar);
-
- ath6kl_cleanup_amsdu_rxbufs(ar);
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+ ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ rtnl_lock();
+ ath6kl_deinit_if_data(vif);
+ rtnl_unlock();
+ spin_lock_bh(&ar->list_lock);
+ }
+ spin_unlock_bh(&ar->list_lock);
- ath6kl_bmi_cleanup(ar);
+ clear_bit(WMI_READY, &ar->flag);
- ath6kl_debug_cleanup(ar);
+ /*
+ * After wmi_shudown all WMI events will be dropped. We
+ * need to cleanup the buffers allocated in AP mode and
+ * give disconnect notification to stack, which usually
+ * happens in the disconnect_event. Simulate the disconnect
+ * event by calling the function directly. Sometimes
+ * disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ ath6kl_wmi_shutdown(ar->wmi);
- if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
- unregister_netdev(dev);
- clear_bit(NETDEV_REGISTERED, &ar->flag);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ if (ar->htc_target) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+ ath6kl_htc_stop(ar->htc_target);
}
- free_netdev(dev);
-
- kfree(ar->fw_board);
- kfree(ar->fw_otp);
- kfree(ar->fw);
- kfree(ar->fw_patch);
+ /*
+ * Try to reset the device if we can. The driver may have been
+ * configure NOT to reset the target during a debug session.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "attempting to reset target on instance destroy\n");
+ ath6kl_reset_device(ar, ar->target_type, true, true);
- ath6kl_cfg80211_deinit(ar);
+ clear_bit(WLAN_ENABLED, &ar->flag);
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 30b5a53db9ed..eea3c747653e 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -20,12 +20,13 @@
#include "target.h"
#include "debug.h"
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_sta *conn = NULL;
u8 i, max_conn;
- max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+ max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) {
if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@@ -174,64 +175,6 @@ void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
ar->cookie_count++;
}
-/* set the window address register (using 4-byte register access ). */
-static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
-{
- int status;
- s32 i;
- __le32 addr_val;
-
- /*
- * Write bytes 1,2,3 of the register to set the upper address bytes,
- * the LSB is written last to initiate the access cycle
- */
-
- for (i = 1; i <= 3; i++) {
- /*
- * Fill the buffer with the address byte value we want to
- * hit 4 times. No need to worry about endianness as the
- * same byte is copied to all four bytes of addr_val at
- * any time.
- */
- memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4);
-
- /*
- * Hit each byte of the register address with a 4-byte
- * write operation to the same address, this is a harmless
- * operation.
- */
- status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val,
- 4, HIF_WR_SYNC_BYTE_FIX);
- if (status)
- break;
- }
-
- if (status) {
- ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
- addr, reg_addr);
- return status;
- }
-
- /*
- * Write the address register again, this time write the whole
- * 4-byte value. The effect here is that the LSB write causes the
- * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
- * effect since we are writing the same values again
- */
- addr_val = cpu_to_le32(addr);
- status = hif_read_write_sync(ar, reg_addr,
- (u8 *)&(addr_val),
- 4, HIF_WR_SYNC_BYTE_INC);
-
- if (status) {
- ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
- addr, reg_addr);
- return status;
- }
-
- return 0;
-}
-
/*
* Read from the hardware through its diagnostic window. No cooperation
* from the firmware is required for this.
@@ -240,14 +183,7 @@ int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
{
int ret;
- /* set window register to start read cycle */
- ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address);
- if (ret)
- return ret;
-
- /* read the data */
- ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value,
- sizeof(*value), HIF_RD_SYNC_BYTE_INC);
+ ret = ath6kl_hif_diag_read32(ar, address, value);
if (ret) {
ath6kl_warn("failed to read32 through diagnose window: %d\n",
ret);
@@ -265,18 +201,15 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
{
int ret;
- /* set write data */
- ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value,
- sizeof(value), HIF_WR_SYNC_BYTE_INC);
+ ret = ath6kl_hif_diag_write32(ar, address, value);
+
if (ret) {
ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
address, value);
return ret;
}
- /* set window register, which starts the write cycle */
- return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
- address);
+ return 0;
}
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
@@ -393,8 +326,8 @@ out:
#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
-static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
- bool wait_fot_compltn, bool cold_reset)
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset)
{
int status = 0;
u32 address;
@@ -425,102 +358,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
ath6kl_err("failed to reset target\n");
}
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
- bool get_dbglogs)
-{
- struct ath6kl *ar = ath6kl_priv(dev);
- static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- bool discon_issued;
-
- netif_stop_queue(dev);
-
- /* disable the target and the interrupts associated with it */
- if (test_bit(WMI_READY, &ar->flag)) {
- discon_issued = (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag));
- ath6kl_disconnect(ar);
- if (!keep_profile)
- ath6kl_init_profile_info(ar);
-
- del_timer(&ar->disconnect_timer);
-
- clear_bit(WMI_READY, &ar->flag);
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
-
- /*
- * After wmi_shudown all WMI events will be dropped. We
- * need to cleanup the buffers allocated in AP mode and
- * give disconnect notification to stack, which usually
- * happens in the disconnect_event. Simulate the disconnect
- * event by calling the function directly. Sometimes
- * disconnect_event will be received when the debug logs
- * are collected.
- */
- if (discon_issued)
- ath6kl_disconnect_event(ar, DISCONNECT_CMD,
- (ar->nw_type & AP_NETWORK) ?
- bcast_mac : ar->bssid,
- 0, NULL, 0);
-
- ar->user_key_ctrl = 0;
-
- } else {
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "%s: wmi is not ready 0x%p 0x%p\n",
- __func__, ar, ar->wmi);
-
- /* Shut down WMI if we have started it */
- if (test_bit(WMI_ENABLED, &ar->flag)) {
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "%s: shut down wmi\n", __func__);
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
- }
- }
-
- if (ar->htc_target) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
- ath6kl_htc_stop(ar->htc_target);
- }
-
- /*
- * Try to reset the device if we can. The driver may have been
- * configure NOT to reset the target during a debug session.
- */
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "attempting to reset target on instance destroy\n");
- ath6kl_reset_device(ar, ar->target_type, true, true);
-}
-
-static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
{
u8 index;
u8 keyusage;
for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
- if (ar->wep_key_list[index].key_len) {
+ if (vif->wep_key_list[index].key_len) {
keyusage = GROUP_USAGE;
- if (index == ar->def_txkey_index)
+ if (index == vif->def_txkey_index)
keyusage |= TX_USAGE;
- ath6kl_wmi_addkey_cmd(ar->wmi,
+ ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
index,
WEP_CRYPT,
keyusage,
- ar->wep_key_list[index].key_len,
- NULL,
- ar->wep_key_list[index].key,
+ vif->wep_key_list[index].key_len,
+ NULL, 0,
+ vif->wep_key_list[index].key,
KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
}
}
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_req_key *ik;
int res;
u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@@ -529,11 +393,13 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
- switch (ar->auth_mode) {
+ switch (vif->auth_mode) {
case NONE_AUTH:
- if (ar->prwise_crypto == WEP_CRYPT)
- ath6kl_install_static_wep_keys(ar);
- break;
+ if (vif->prwise_crypto == WEP_CRYPT)
+ ath6kl_install_static_wep_keys(vif);
+ if (!ik->valid || ik->key_type != WAPI_CRYPT)
+ break;
+ /* for WAPI, we need to set the delayed group key, continue: */
case WPA_PSK_AUTH:
case WPA2_PSK_AUTH:
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
@@ -544,8 +410,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
"the initial group key for AP mode\n");
memset(key_rsc, 0, sizeof(key_rsc));
res = ath6kl_wmi_addkey_cmd(
- ar->wmi, ik->key_index, ik->key_type,
- GROUP_USAGE, ik->key_len, key_rsc, ik->key,
+ ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
+ GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
+ ik->key,
KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
if (res) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@@ -554,15 +421,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
break;
}
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
- set_bit(CONNECTED, &ar->flag);
- netif_carrier_on(ar->net_dev);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
+ set_bit(CONNECTED, &vif->flags);
+ netif_carrier_on(vif->ndev);
}
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info)
{
+ struct ath6kl *ar = vif->ar;
u8 *ies = NULL, *wpa_ie = NULL, *pos;
size_t ies_len = 0;
struct station_info sinfo;
@@ -600,6 +468,18 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
wpa_ie = pos; /* WPS IE */
break; /* overrides WPA/RSN IE */
}
+ } else if (pos[0] == 0x44 && wpa_ie == NULL) {
+ /*
+ * Note: WAPI Parameter Set IE re-uses Element ID that
+ * was officially allocated for BSS AC Access Delay. As
+ * such, we need to be a bit more careful on when
+ * parsing the frame. However, BSS AC Access Delay
+ * element is not supposed to be included in
+ * (Re)Association Request frames, so this should not
+ * cause problems.
+ */
+ wpa_ie = pos; /* WAPI IE */
+ break;
}
pos += 2 + pos[1];
}
@@ -617,380 +497,49 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
sinfo.assoc_req_ies_len = ies_len;
sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
- cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL);
-
- netif_wake_queue(ar->net_dev);
-}
-
-/* Functions for Tx credit handling */
-void ath6k_credit_init(struct htc_credit_state_info *cred_info,
- struct list_head *ep_list,
- int tot_credits)
-{
- struct htc_endpoint_credit_dist *cur_ep_dist;
- int count;
-
- cred_info->cur_free_credits = tot_credits;
- cred_info->total_avail_credits = tot_credits;
-
- list_for_each_entry(cur_ep_dist, ep_list, list) {
- if (cur_ep_dist->endpoint == ENDPOINT_0)
- continue;
-
- cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
-
- if (tot_credits > 4)
- if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
- (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
- ath6kl_deposit_credit_to_ep(cred_info,
- cur_ep_dist,
- cur_ep_dist->cred_min);
- cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- }
-
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
- ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
- cur_ep_dist->cred_min);
- /*
- * Control service is always marked active, it
- * never goes inactive EVER.
- */
- cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
- /* this is the lowest priority data endpoint */
- cred_info->lowestpri_ep_dist = cur_ep_dist->list;
-
- /*
- * Streams have to be created (explicit | implicit) for all
- * kinds of traffic. BE endpoints are also inactive in the
- * beginning. When BE traffic starts it creates implicit
- * streams that redistributes credits.
- *
- * Note: all other endpoints have minimums set but are
- * initially given NO credits. credits will be distributed
- * as traffic activity demands
- */
- }
-
- WARN_ON(cred_info->cur_free_credits <= 0);
-
- list_for_each_entry(cur_ep_dist, ep_list, list) {
- if (cur_ep_dist->endpoint == ENDPOINT_0)
- continue;
-
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
- cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
- else {
- /*
- * For the remaining data endpoints, we assume that
- * each cred_per_msg are the same. We use a simple
- * calculation here, we take the remaining credits
- * and determine how many max messages this can
- * cover and then set each endpoint's normal value
- * equal to 3/4 this amount.
- */
- count = (cred_info->cur_free_credits /
- cur_ep_dist->cred_per_msg)
- * cur_ep_dist->cred_per_msg;
- count = (count * 3) >> 2;
- count = max(count, cur_ep_dist->cred_per_msg);
- cur_ep_dist->cred_norm = count;
-
- }
- }
-}
-
-/* initialize and setup credit distribution */
-int ath6k_setup_credit_dist(void *htc_handle,
- struct htc_credit_state_info *cred_info)
-{
- u16 servicepriority[5];
-
- memset(cred_info, 0, sizeof(struct htc_credit_state_info));
-
- servicepriority[0] = WMI_CONTROL_SVC; /* highest */
- servicepriority[1] = WMI_DATA_VO_SVC;
- servicepriority[2] = WMI_DATA_VI_SVC;
- servicepriority[3] = WMI_DATA_BE_SVC;
- servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
-
- /* set priority list */
- ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
-
- return 0;
-}
-
-/* reduce an ep's credits back to a set limit */
-static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
- struct htc_endpoint_credit_dist *ep_dist,
- int limit)
-{
- int credits;
-
- ep_dist->cred_assngd = limit;
-
- if (ep_dist->credits <= limit)
- return;
+ cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
- credits = ep_dist->credits - limit;
- ep_dist->credits -= credits;
- cred_info->cur_free_credits += credits;
-}
-
-static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
- struct list_head *epdist_list)
-{
- struct htc_endpoint_credit_dist *cur_dist_list;
-
- list_for_each_entry(cur_dist_list, epdist_list, list) {
- if (cur_dist_list->endpoint == ENDPOINT_0)
- continue;
-
- if (cur_dist_list->cred_to_dist > 0) {
- cur_dist_list->credits +=
- cur_dist_list->cred_to_dist;
- cur_dist_list->cred_to_dist = 0;
- if (cur_dist_list->credits >
- cur_dist_list->cred_assngd)
- ath6k_reduce_credits(cred_info,
- cur_dist_list,
- cur_dist_list->cred_assngd);
-
- if (cur_dist_list->credits >
- cur_dist_list->cred_norm)
- ath6k_reduce_credits(cred_info, cur_dist_list,
- cur_dist_list->cred_norm);
-
- if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (cur_dist_list->txq_depth == 0)
- ath6k_reduce_credits(cred_info,
- cur_dist_list, 0);
- }
- }
- }
-}
-
-/*
- * HTC has an endpoint that needs credits, ep_dist is the endpoint in
- * question.
- */
-void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
- struct htc_endpoint_credit_dist *ep_dist)
-{
- struct htc_endpoint_credit_dist *curdist_list;
- int credits = 0;
- int need;
-
- if (ep_dist->svc_id == WMI_CONTROL_SVC)
- goto out;
-
- if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
- (ep_dist->svc_id == WMI_DATA_VO_SVC))
- if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
- goto out;
-
- /*
- * For all other services, we follow a simple algorithm of:
- *
- * 1. checking the free pool for credits
- * 2. checking lower priority endpoints for credits to take
- */
-
- credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
- if (credits >= ep_dist->seek_cred)
- goto out;
-
- /*
- * We don't have enough in the free pool, try taking away from
- * lower priority services The rule for taking away credits:
- *
- * 1. Only take from lower priority endpoints
- * 2. Only take what is allocated above the minimum (never
- * starve an endpoint completely)
- * 3. Only take what you need.
- */
-
- list_for_each_entry_reverse(curdist_list,
- &cred_info->lowestpri_ep_dist,
- list) {
- if (curdist_list == ep_dist)
- break;
-
- need = ep_dist->seek_cred - cred_info->cur_free_credits;
-
- if ((curdist_list->cred_assngd - need) >=
- curdist_list->cred_min) {
- /*
- * The current one has been allocated more than
- * it's minimum and it has enough credits assigned
- * above it's minimum to fulfill our need try to
- * take away just enough to fulfill our need.
- */
- ath6k_reduce_credits(cred_info, curdist_list,
- curdist_list->cred_assngd - need);
-
- if (cred_info->cur_free_credits >=
- ep_dist->seek_cred)
- break;
- }
-
- if (curdist_list->endpoint == ENDPOINT_0)
- break;
- }
-
- credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-out:
- /* did we find some credits? */
- if (credits)
- ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
-
- ep_dist->seek_cred = 0;
-}
-
-/* redistribute credits based on activity change */
-static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
- struct list_head *ep_dist_list)
-{
- struct htc_endpoint_credit_dist *curdist_list;
-
- list_for_each_entry(curdist_list, ep_dist_list, list) {
- if (curdist_list->endpoint == ENDPOINT_0)
- continue;
-
- if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
- (curdist_list->svc_id == WMI_DATA_BE_SVC))
- curdist_list->dist_flags |= HTC_EP_ACTIVE;
-
- if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
- !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (curdist_list->txq_depth == 0)
- ath6k_reduce_credits(info,
- curdist_list, 0);
- else
- ath6k_reduce_credits(info,
- curdist_list,
- curdist_list->cred_min);
- }
- }
-}
-
-/*
- *
- * This function is invoked whenever endpoints require credit
- * distributions. A lock is held while this function is invoked, this
- * function shall NOT block. The ep_dist_list is a list of distribution
- * structures in prioritized order as defined by the call to the
- * htc_set_credit_dist() api.
- */
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
- struct list_head *ep_dist_list,
- enum htc_credit_dist_reason reason)
-{
- switch (reason) {
- case HTC_CREDIT_DIST_SEND_COMPLETE:
- ath6k_credit_update(cred_info, ep_dist_list);
- break;
- case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
- ath6k_redistribute_credits(cred_info, ep_dist_list);
- break;
- default:
- break;
- }
-
- WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
- WARN_ON(cred_info->cur_free_credits < 0);
+ netif_wake_queue(vif->ndev);
}
void disconnect_timer_handler(unsigned long ptr)
{
struct net_device *dev = (struct net_device *)ptr;
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- ath6kl_init_profile_info(ar);
- ath6kl_disconnect(ar);
+ ath6kl_init_profile_info(vif);
+ ath6kl_disconnect(vif);
}
-void ath6kl_disconnect(struct ath6kl *ar)
+void ath6kl_disconnect(struct ath6kl_vif *vif)
{
- if (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag)) {
- ath6kl_wmi_disconnect_cmd(ar->wmi);
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags)) {
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
/*
* Disconnect command is issued, clear the connect pending
* flag. The connected flag will be cleared in
* disconnect event notification.
*/
- clear_bit(CONNECT_PEND, &ar->flag);
+ clear_bit(CONNECT_PEND, &vif->flags);
}
}
-void ath6kl_deep_sleep_enable(struct ath6kl *ar)
-{
- switch (ar->sme_state) {
- case SME_CONNECTING:
- cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- break;
- case SME_CONNECTED:
- default:
- /*
- * FIXME: oddly enough smeState is in DISCONNECTED during
- * suspend, why? Need to send disconnected event in that
- * state.
- */
- cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
- break;
- }
-
- if (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag))
- ath6kl_wmi_disconnect_cmd(ar->wmi);
-
- ar->sme_state = SME_DISCONNECTED;
-
- /* disable scanning */
- if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
- 0, 0) != 0)
- printk(KERN_WARNING "ath6kl: failed to disable scan "
- "during suspend\n");
-
- ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
-}
-
/* WMI Event handlers */
-static const char *get_hw_id_string(u32 id)
-{
- switch (id) {
- case AR6003_REV1_VERSION:
- return "1.0";
- case AR6003_REV2_VERSION:
- return "2.0";
- case AR6003_REV3_VERSION:
- return "2.1.1";
- default:
- return "unknown";
- }
-}
-
void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
{
struct ath6kl *ar = devt;
- struct net_device *dev = ar->net_dev;
- memcpy(dev->dev_addr, datap, ETH_ALEN);
+ memcpy(ar->mac_addr, datap, ETH_ALEN);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
- __func__, dev->dev_addr);
+ __func__, ar->mac_addr);
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
- snprintf(ar->wdev->wiphy->fw_version,
- sizeof(ar->wdev->wiphy->fw_version),
+ snprintf(ar->wiphy->fw_version,
+ sizeof(ar->wiphy->fw_version),
"%u.%u.%u.%u",
(ar->version.wlan_ver & 0xf0000000) >> 28,
(ar->version.wlan_ver & 0x0f000000) >> 24,
@@ -1000,79 +549,85 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
/* indicate to the waiting thread that the ready event was received */
set_bit(WMI_READY, &ar->flag);
wake_up(&ar->event_wq);
-
- ath6kl_info("hw %s fw %s%s\n",
- get_hw_id_string(ar->wdev->wiphy->hw_version),
- ar->wdev->wiphy->fw_version,
- test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
}
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
{
- ath6kl_cfg80211_scan_complete_event(ar, status);
+ struct ath6kl *ar = vif->ar;
+ bool aborted = false;
+
+ if (status != WMI_SCAN_STATUS_SUCCESS)
+ aborted = true;
+
+ ath6kl_cfg80211_scan_complete_event(vif, aborted);
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
}
- ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
}
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
u16 listen_int, u16 beacon_int,
enum network_type net_type, u8 beacon_ie_len,
u8 assoc_req_len, u8 assoc_resp_len,
u8 *assoc_info)
{
- unsigned long flags;
+ struct ath6kl *ar = vif->ar;
- ath6kl_cfg80211_connect_event(ar, channel, bssid,
+ ath6kl_cfg80211_connect_event(vif, channel, bssid,
listen_int, beacon_int,
net_type, beacon_ie_len,
assoc_req_len, assoc_resp_len,
assoc_info);
- memcpy(ar->bssid, bssid, sizeof(ar->bssid));
- ar->bss_ch = channel;
+ memcpy(vif->bssid, bssid, sizeof(vif->bssid));
+ vif->bss_ch = channel;
- if ((ar->nw_type == INFRA_NETWORK))
- ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+ if ((vif->nw_type == INFRA_NETWORK))
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ ar->listen_intvl_t,
ar->listen_intvl_b);
- netif_wake_queue(ar->net_dev);
+ netif_wake_queue(vif->ndev);
/* Update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
- set_bit(CONNECTED, &ar->flag);
- clear_bit(CONNECT_PEND, &ar->flag);
- netif_carrier_on(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_lock_bh(&vif->if_lock);
+ set_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+ netif_carrier_on(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
- aggr_reset_state(ar->aggr_cntxt);
- ar->reconnect_flag = 0;
+ aggr_reset_state(vif->aggr_cntxt);
+ vif->reconnect_flag = 0;
- if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+ if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
memset(ar->node_map, 0, sizeof(ar->node_map));
ar->node_num = 0;
ar->next_ep_id = ENDPOINT_2;
}
if (!ar->usr_bss_filter) {
- set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0);
+ set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ CURRENT_BSS_FILTER, 0);
}
}
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
{
struct ath6kl_sta *sta;
+ struct ath6kl *ar = vif->ar;
u8 tsc[6];
+
/*
* For AP case, keyid will have aid of STA which sent pkt with
* MIC error. Use this aid to get MAC & send it to hostapd.
*/
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
if (!sta)
return;
@@ -1081,19 +636,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
"ap tkip mic error received from aid=%d\n", keyid);
memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
- cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+ cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
} else
- ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+ ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
}
-static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{
struct wmi_target_stats *tgt_stats =
(struct wmi_target_stats *) ptr;
- struct target_stats *stats = &ar->target_stats;
+ struct ath6kl *ar = vif->ar;
+ struct target_stats *stats = &vif->target_stats;
struct tkip_ccmp_stats *ccmp_stats;
u8 ac;
@@ -1189,8 +745,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
stats->wow_evt_discarded +=
le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
- if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
- clear_bit(STATS_UPDATE_PEND, &ar->flag);
+ if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
+ clear_bit(STATS_UPDATE_PEND, &vif->flags);
wake_up(&ar->event_wq);
}
}
@@ -1200,14 +756,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val)
*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
}
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{
struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+ struct ath6kl *ar = vif->ar;
struct wmi_ap_mode_stat *ap = &ar->ap_stats;
struct wmi_per_sta_stat *st_ap, *st_p;
u8 ac;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
if (len < sizeof(*p))
return;
@@ -1226,7 +783,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
}
} else {
- ath6kl_update_target_stats(ar, ptr, len);
+ ath6kl_update_target_stats(vif, ptr, len);
}
}
@@ -1245,11 +802,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
wake_up(&ar->event_wq);
}
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
{
struct ath6kl_sta *conn;
struct sk_buff *skb;
bool psq_empty = false;
+ struct ath6kl *ar = vif->ar;
conn = ath6kl_find_sta_by_aid(ar, aid);
@@ -1272,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED;
- ath6kl_data_tx(skb, ar->net_dev);
+ ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED;
spin_lock_bh(&conn->psq_lock);
@@ -1280,13 +838,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
- ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
}
-void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
{
bool mcastq_empty = false;
struct sk_buff *skb;
+ struct ath6kl *ar = vif->ar;
/*
* If there are no associated STAs, ignore the DTIM expiry event.
@@ -1308,31 +867,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar)
return;
/* set the STA flag to dtim_expired for the frame to go out */
- set_bit(DTIM_EXPIRED, &ar->flag);
+ set_bit(DTIM_EXPIRED, &vif->flags);
spin_lock_bh(&ar->mcastpsq_lock);
while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
spin_unlock_bh(&ar->mcastpsq_lock);
- ath6kl_data_tx(skb, ar->net_dev);
+ ath6kl_data_tx(skb, vif->ndev);
spin_lock_bh(&ar->mcastpsq_lock);
}
spin_unlock_bh(&ar->mcastpsq_lock);
- clear_bit(DTIM_EXPIRED, &ar->flag);
+ clear_bit(DTIM_EXPIRED, &vif->flags);
/* clear the LSB of the BitMapCtl field of the TIM IE */
- ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
}
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
u8 assoc_resp_len, u8 *assoc_info,
u16 prot_reason_status)
{
- unsigned long flags;
+ struct ath6kl *ar = vif->ar;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
@@ -1344,31 +903,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
/* clear the LSB of the TIM IE's BitMapCtl field */
if (test_bit(WMI_READY, &ar->flag))
- ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ MCAST_AID, 0);
}
if (!is_broadcast_ether_addr(bssid)) {
/* send event to application */
- cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+ cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
}
- if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) {
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- clear_bit(CONNECTED, &ar->flag);
+ if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ clear_bit(CONNECTED, &vif->flags);
}
return;
}
- ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+ ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
assoc_resp_len, assoc_info,
prot_reason_status);
- aggr_reset_state(ar->aggr_cntxt);
+ aggr_reset_state(vif->aggr_cntxt);
- del_timer(&ar->disconnect_timer);
+ del_timer(&vif->disconnect_timer);
- ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
- "disconnect reason is %d\n", reason);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
/*
* If the event is due to disconnect cmd from the host, only they
@@ -1377,83 +936,88 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
*/
if (reason == DISCONNECT_CMD) {
if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
} else {
- set_bit(CONNECT_PEND, &ar->flag);
+ set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) &&
(prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
- && (ar->reconnect_flag == 1))) {
- set_bit(CONNECTED, &ar->flag);
+ && (vif->reconnect_flag == 1))) {
+ set_bit(CONNECTED, &vif->flags);
return;
}
}
/* update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
- clear_bit(CONNECTED, &ar->flag);
- netif_carrier_off(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_lock_bh(&vif->if_lock);
+ clear_bit(CONNECTED, &vif->flags);
+ netif_carrier_off(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
- if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
- ar->reconnect_flag = 0;
+ if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
+ vif->reconnect_flag = 0;
if (reason != CSERV_DISCONNECT)
ar->user_key_ctrl = 0;
- netif_stop_queue(ar->net_dev);
- memset(ar->bssid, 0, sizeof(ar->bssid));
- ar->bss_ch = 0;
+ netif_stop_queue(vif->ndev);
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
ath6kl_tx_data_cleanup(ar);
}
-static int ath6kl_open(struct net_device *dev)
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- unsigned long flags;
+ struct ath6kl_vif *vif;
- spin_lock_irqsave(&ar->lock, flags);
+ spin_lock_bh(&ar->list_lock);
+ if (list_empty(&ar->vif_list)) {
+ spin_unlock_bh(&ar->list_lock);
+ return NULL;
+ }
- set_bit(WLAN_ENABLED, &ar->flag);
+ vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
- if (test_bit(CONNECTED, &ar->flag)) {
+ spin_unlock_bh(&ar->list_lock);
+
+ return vif;
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ set_bit(WLAN_ENABLED, &vif->flags);
+
+ if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
} else
netif_carrier_off(dev);
- spin_unlock_irqrestore(&ar->lock, flags);
-
return 0;
}
static int ath6kl_close(struct net_device *dev)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
netif_stop_queue(dev);
- ath6kl_disconnect(ar);
-
- if (test_bit(WMI_READY, &ar->flag)) {
- if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
- 0, 0, 0))
- return -EIO;
-
- clear_bit(WLAN_ENABLED, &ar->flag);
- }
+ ath6kl_cfg80211_stop(vif);
- ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+ clear_bit(WLAN_ENABLED, &vif->flags);
return 0;
}
static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- return &ar->net_stats;
+ return &vif->net_stats;
}
static struct net_device_ops ath6kl_netdev_ops = {
@@ -1466,6 +1030,7 @@ static struct net_device_ops ath6kl_netdev_ops = {
void init_netdev(struct net_device *dev)
{
dev->netdev_ops = &ath6kl_netdev_ops;
+ dev->destructor = free_netdev;
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 066d4f88807f..9475e2d0d0b7 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -22,7 +22,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sd.h>
-#include "htc_hif.h"
+#include "hif.h"
#include "hif-ops.h"
#include "target.h"
#include "debug.h"
@@ -40,12 +40,18 @@ struct ath6kl_sdio {
struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
struct ath6kl *ar;
+
u8 *dma_buffer;
+ /* protects access to dma_buffer */
+ struct mutex dma_buffer_mutex;
+
/* scatter request list head */
struct list_head scat_req;
spinlock_t scat_lock;
+ bool scatter_enabled;
+
bool is_disabled;
atomic_t irq_handling;
const struct sdio_device_id *id;
@@ -135,6 +141,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
{
int ret = 0;
+ sdio_claim_host(func);
+
if (request & HIF_WRITE) {
/* FIXME: looks like ugly workaround for something */
if (addr >= HIF_MBOX_BASE_ADDR &&
@@ -156,6 +164,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
ret = sdio_memcpy_fromio(func, buf, addr, len);
}
+ sdio_release_host(func);
+
ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
request & HIF_WRITE ? "wr" : "rd", addr,
request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@@ -167,12 +177,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
{
struct bus_request *bus_req;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
if (list_empty(&ar_sdio->bus_req_freeq)) {
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
return NULL;
}
@@ -180,7 +189,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
struct bus_request, list);
list_del(&bus_req->list);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
@@ -190,14 +199,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
struct bus_request *bus_req)
{
- unsigned long flag;
-
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
}
static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -291,10 +298,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
mmc_req.cmd = &cmd;
mmc_req.data = &data;
+ sdio_claim_host(ar_sdio->func);
+
mmc_set_data_timeout(&data, ar_sdio->func->card);
/* synchronous call to process request */
mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+ sdio_release_host(ar_sdio->func);
+
status = cmd.error ? cmd.error : data.error;
scat_complete:
@@ -389,17 +400,19 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
if (buf_needs_bounce(buf)) {
if (!ar_sdio->dma_buffer)
return -ENOMEM;
+ mutex_lock(&ar_sdio->dma_buffer_mutex);
tbuf = ar_sdio->dma_buffer;
memcpy(tbuf, buf, len);
bounced = true;
} else
tbuf = buf;
- sdio_claim_host(ar_sdio->func);
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
memcpy(buf, tbuf, len);
- sdio_release_host(ar_sdio->func);
+
+ if (bounced)
+ mutex_unlock(&ar_sdio->dma_buffer_mutex);
return ret;
}
@@ -418,29 +431,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
req->request);
context = req->packet;
ath6kl_sdio_free_bus_req(ar_sdio, req);
- ath6kldev_rw_comp_handler(context, status);
+ ath6kl_hif_rw_comp_handler(context, status);
}
}
static void ath6kl_sdio_write_async_work(struct work_struct *work)
{
struct ath6kl_sdio *ar_sdio;
- unsigned long flags;
struct bus_request *req, *tmp_req;
ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
- sdio_claim_host(ar_sdio->func);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
__ath6kl_sdio_write_async(ar_sdio, req);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
}
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
-
- sdio_release_host(ar_sdio->func);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
}
static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@@ -459,20 +468,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
*/
sdio_release_host(ar_sdio->func);
- status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
atomic_set(&ar_sdio->irq_handling, 0);
WARN_ON(status && status != -ECANCELED);
}
-static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_on(struct ath6kl *ar)
{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
int ret = 0;
if (!ar_sdio->is_disabled)
return 0;
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
+
sdio_claim_host(func);
ret = sdio_enable_func(func);
@@ -495,13 +507,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
return ret;
}
-static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_off(struct ath6kl *ar)
{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
int ret;
if (ar_sdio->is_disabled)
return 0;
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
+
/* Disable the card */
sdio_claim_host(ar_sdio->func);
ret = sdio_disable_func(ar_sdio->func);
@@ -521,7 +536,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *bus_req;
- unsigned long flags;
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
@@ -534,9 +548,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
bus_req->request = request;
bus_req->packet = packet;
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
return 0;
@@ -582,9 +596,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *node = NULL;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
if (!list_empty(&ar_sdio->scat_req)) {
node = list_first_entry(&ar_sdio->scat_req,
@@ -592,7 +605,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
list_del(&node->list);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
return node;
}
@@ -601,13 +614,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
struct hif_scatter_req *s_req)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_add_tail(&s_req->list, &ar_sdio->scat_req);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
@@ -618,7 +630,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
u32 request = scat_req->req;
int status = 0;
- unsigned long flags;
if (!scat_req->len)
return -EINVAL;
@@ -627,14 +638,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
- if (request & HIF_SYNCHRONOUS) {
- sdio_claim_host(ar_sdio->func);
+ if (request & HIF_SYNCHRONOUS)
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
- sdio_release_host(ar_sdio->func);
- } else {
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ else {
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
}
@@ -646,23 +655,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *s_req, *tmp_req;
- unsigned long flag;
/* empty the free list */
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
list_del(&s_req->list);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
+ /*
+ * FIXME: should we also call completion handler with
+ * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
+ * that the packet is properly freed?
+ */
if (s_req->busrequest)
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
kfree(s_req->virt_dma_buf);
kfree(s_req->sgentries);
kfree(s_req);
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
/* setup of HIF scatter resources */
@@ -673,6 +686,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
int ret;
bool virt_scat = false;
+ if (ar_sdio->scatter_enabled)
+ return 0;
+
+ ar_sdio->scatter_enabled = true;
+
/* check if host supports scatter and it meets our requirements */
if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@@ -687,8 +705,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
MAX_SCATTER_REQUESTS, virt_scat);
if (!ret) {
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "hif-scatter enabled requests %d entries %d\n",
MAX_SCATTER_REQUESTS,
MAX_SCATTER_ENTRIES_PER_REQ);
@@ -712,8 +730,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return ret;
}
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "virtual scatter enabled requests %d entries %d\n",
ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@@ -724,7 +742,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return 0;
}
-static int ath6kl_sdio_suspend(struct ath6kl *ar)
+static int ath6kl_sdio_config(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
@@ -733,12 +791,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
flags = sdio_get_host_pm_caps(func);
- if (!(flags & MMC_PM_KEEP_POWER))
- /* as host doesn't support keep power we need to bail out */
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "func %d doesn't support MMC_PM_KEEP_POWER\n",
- func->num);
- return -EINVAL;
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
+
+ if (!(flags & MMC_PM_KEEP_POWER) ||
+ (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
+ /* as host doesn't support keep power we need to cut power */
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
+ NULL);
+ }
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
@@ -747,11 +807,367 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
return ret;
}
- ath6kl_deep_sleep_enable(ar);
+ if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
+ goto deepsleep;
+
+ /* sdio irq wakes up host */
+
+ if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
+ ret = ath6kl_cfg80211_suspend(ar,
+ ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+ NULL);
+ if (ret) {
+ ath6kl_warn("Schedule scan suspend failed: %d", ret);
+ return ret;
+ }
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+ }
+
+ if (wow) {
+ /*
+ * The host sdio controller is capable of keep power and
+ * sdio irq wake up at this point. It's fine to continue
+ * wow suspend operation.
+ */
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+ if (ret)
+ return ret;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+ }
+
+deepsleep:
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
+}
+
+static int ath6kl_sdio_resume(struct ath6kl *ar)
+{
+ switch (ar->state) {
+ case ATH6KL_STATE_OFF:
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath6kl_sdio_config(ar);
+ break;
+
+ case ATH6KL_STATE_ON:
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ break;
+
+ case ATH6KL_STATE_WOW:
+ break;
+ case ATH6KL_STATE_SCHED_SCAN:
+ break;
+ }
+
+ ath6kl_cfg80211_resume(ar);
+
+ return 0;
+}
+
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+ int status;
+ u8 addr_val[4];
+ s32 i;
+
+ /*
+ * Write bytes 1,2,3 of the register to set the upper address bytes,
+ * the LSB is written last to initiate the access cycle
+ */
+
+ for (i = 1; i <= 3; i++) {
+ /*
+ * Fill the buffer with the address byte value we want to
+ * hit 4 times.
+ */
+ memset(addr_val, ((u8 *)&addr)[i], 4);
+
+ /*
+ * Hit each byte of the register address with a 4-byte
+ * write operation to the same address, this is a harmless
+ * operation.
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
+ 4, HIF_WR_SYNC_BYTE_FIX);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ ath6kl_err("%s: failed to write initial bytes of 0x%x "
+ "to window reg: 0x%X\n", __func__,
+ addr, reg_addr);
+ return status;
+ }
+
+ /*
+ * Write the address register again, this time write the whole
+ * 4-byte value. The effect here is that the LSB write causes the
+ * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+ * effect since we are writing the same values again
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
+ 4, HIF_WR_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
+ return status;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+ int status;
+
+ /* set window register to start read cycle */
+ status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
+ address);
+
+ if (status)
+ return status;
+
+ /* read the data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to read from window data addr\n",
+ __func__);
+ return status;
+ }
+
+ return status;
+}
+
+static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 data)
+{
+ int status;
+ u32 val = (__force u32) data;
+
+ /* set write data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window data addr\n",
+ __func__, data);
+ return status;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+ address);
+}
+
+static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
+{
+ u32 addr;
+ unsigned long timeout;
+ int ret;
+
+ ar->bmi.cmd_credits = 0;
+
+ /* Read the counter register to get the command credits */
+ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+
+ /*
+ * Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath6kl_sdio_read_write_sync(ar, addr,
+ (u8 *)&ar->bmi.cmd_credits, 4,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to decrement the command credit "
+ "count register: %d\n", ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ ar->bmi.cmd_credits &= 0xFF;
+ }
+
+ if (!ar->bmi.cmd_credits) {
+ ath6kl_err("bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
+{
+ unsigned long timeout;
+ u32 rx_word = 0;
+ int ret = 0;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath6kl_sdio_read_write_sync(ar,
+ RX_LOOKAHEAD_VALID_ADDRESS,
+ (u8 *)&rx_word, sizeof(rx_word),
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= (1 << ENDPOINT1);
+ }
+
+ if (!rx_word) {
+ ath6kl_err("bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ ret = ath6kl_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar->mbox_info.htc_addr;
+
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_WR_SYNC_BYTE_INC);
+ if (ret)
+ ath6kl_err("unable to send the bmi data to the device\n");
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (len >= 4) { /* NB: Currently, always true */
+ ret = ath6kl_bmi_get_rx_lkahd(ar);
+ if (ret)
+ return ret;
+ }
+
+ addr = ar->mbox_info.htc_addr;
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
return 0;
}
+static void ath6kl_sdio_stop(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *req, *tmp_req;
+ void *context;
+
+ /* FIXME: make sure that wq is not queued again */
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+
+ if (req->scat_req) {
+ /* this is a scatter gather request */
+ req->scat_req->status = -ECANCELED;
+ req->scat_req->complete(ar_sdio->ar->htc_target,
+ req->scat_req);
+ } else {
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kl_hif_rw_comp_handler(context, -ECANCELED);
+ }
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
+}
+
static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.read_write_sync = ath6kl_sdio_read_write_sync,
.write_async = ath6kl_sdio_write_async,
@@ -763,8 +1179,47 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.scat_req_rw = ath6kl_sdio_async_rw_scatter,
.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
.suspend = ath6kl_sdio_suspend,
+ .resume = ath6kl_sdio_resume,
+ .diag_read32 = ath6kl_sdio_diag_read32,
+ .diag_write32 = ath6kl_sdio_diag_write32,
+ .bmi_read = ath6kl_sdio_bmi_read,
+ .bmi_write = ath6kl_sdio_bmi_write,
+ .power_on = ath6kl_sdio_power_on,
+ .power_off = ath6kl_sdio_power_off,
+ .stop = ath6kl_sdio_stop,
};
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath6kl_sdio_pm_suspend(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
+
+ return 0;
+}
+
+static int ath6kl_sdio_pm_resume(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
+ ath6kl_sdio_pm_resume);
+
+#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
+
+#else
+
+#define ATH6KL_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
static int ath6kl_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -773,8 +1228,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
struct ath6kl *ar;
int count;
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
func->max_blksize, func->cur_blksize);
@@ -797,6 +1252,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
spin_lock_init(&ar_sdio->lock);
spin_lock_init(&ar_sdio->scat_lock);
spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->dma_buffer_mutex);
INIT_LIST_HEAD(&ar_sdio->scat_req);
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
@@ -815,62 +1271,29 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
}
ar_sdio->ar = ar;
+ ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
ar->hif_priv = ar_sdio;
ar->hif_ops = &ath6kl_sdio_ops;
+ ar->bmi.max_data_size = 256;
ath6kl_sdio_set_mbox_info(ar);
- sdio_claim_host(func);
-
- if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
- MANUFACTURER_ID_AR6003_BASE) {
- /* enable 4-bit ASYNC interrupt on AR6003 or later */
- ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
- CCCR_SDIO_IRQ_MODE_REG,
- SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
- if (ret) {
- ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
- ret);
- sdio_release_host(func);
- goto err_cfg80211;
- }
-
- ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
- }
-
- /* give us some time to enable, in ms */
- func->enable_timeout = 100;
-
- sdio_release_host(func);
-
- ret = ath6kl_sdio_power_on(ar_sdio);
- if (ret)
- goto err_cfg80211;
-
- sdio_claim_host(func);
-
- ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ ret = ath6kl_sdio_config(ar);
if (ret) {
- ath6kl_err("Set sdio block size %d failed: %d)\n",
- HIF_MBOX_BLOCK_SIZE, ret);
- sdio_release_host(func);
- goto err_off;
+ ath6kl_err("Failed to config sdio: %d\n", ret);
+ goto err_core_alloc;
}
- sdio_release_host(func);
-
ret = ath6kl_core_init(ar);
if (ret) {
ath6kl_err("Failed to init ath6kl core\n");
- goto err_off;
+ goto err_core_alloc;
}
return ret;
-err_off:
- ath6kl_sdio_power_off(ar_sdio);
-err_cfg80211:
- ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_core_alloc:
+ ath6kl_core_free(ar_sdio->ar);
err_dma:
kfree(ar_sdio->dma_buffer);
err_hif:
@@ -883,8 +1306,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
{
struct ath6kl_sdio *ar_sdio;
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "removed func %d vendor 0x%x device 0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
func->num, func->vendor, func->device);
ar_sdio = sdio_get_drvdata(func);
@@ -892,9 +1315,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
ath6kl_stop_txrx(ar_sdio->ar);
cancel_work_sync(&ar_sdio->wr_async_work);
- ath6kl_unavail_ev(ar_sdio->ar);
-
- ath6kl_sdio_power_off(ar_sdio);
+ ath6kl_core_cleanup(ar_sdio->ar);
kfree(ar_sdio->dma_buffer);
kfree(ar_sdio);
@@ -903,16 +1324,19 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{},
};
MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
static struct sdio_driver ath6kl_sdio_driver = {
- .name = "ath6kl_sdio",
+ .name = "ath6kl",
.id_table = ath6kl_sdio_devices,
.probe = ath6kl_sdio_probe,
.remove = ath6kl_sdio_remove,
+ .drv.pm = ATH6KL_SDIO_PM_OPS,
};
static int __init ath6kl_sdio_init(void)
@@ -938,13 +1362,19 @@ MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index c9a76051f042..108a723a1085 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -20,7 +20,7 @@
#define AR6003_BOARD_DATA_SZ 1024
#define AR6003_BOARD_EXT_DATA_SZ 768
-#define AR6004_BOARD_DATA_SZ 7168
+#define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0
#define RESET_CONTROL_ADDRESS 0x00000000
@@ -320,7 +320,10 @@ struct host_interest {
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
|------------------------------------------------------------------------------|
*/
+#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_SHIFT 0xC
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
/* Convert a Target virtual address into a Target physical address */
@@ -331,20 +334,6 @@ struct host_interest {
(((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
(((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
-#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
-#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
-#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
-#define AR6003_REV2_RAM_RESERVE_SIZE 6912
-
-#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
-#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
-#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
-#define AR6003_REV3_RAM_RESERVE_SIZE 512
-
-#define AR6004_REV1_BOARD_DATA_ADDRESS 0x435400
-#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS 0x437000
-#define AR6004_REV1_RAM_RESERVE_SIZE 11264
-
#define ATH6KL_FWLOG_PAYLOAD_SIZE 1500
struct ath6kl_dbglog_buf {
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index a7117074f81c..506a3031a885 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
return ar->node_map[ep_map].ep_id;
}
-static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
bool *more_data)
{
struct ethhdr *datap = (struct ethhdr *) skb->data;
struct ath6kl_sta *conn = NULL;
bool ps_queued = false, is_psq_empty = false;
+ struct ath6kl *ar = vif->ar;
if (is_multicast_ether_addr(datap->h_dest)) {
u8 ctr = 0;
@@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
* If this transmit is not because of a Dtim Expiry
* q it.
*/
- if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+ if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
bool is_mcastq_empty = false;
spin_lock_bh(&ar->mcastpsq_lock);
@@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/
if (is_mcastq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
MCAST_AID, 1);
ps_queued = true;
@@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
}
}
} else {
- conn = ath6kl_find_sta(ar, datap->h_dest);
+ conn = ath6kl_find_sta(vif, datap->h_dest);
if (!conn) {
dev_kfree_skb(skb);
@@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/
if (is_psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
conn->aid, 1);
ps_queued = true;
@@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_cookie *cookie = NULL;
enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+ struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */
@@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb, skb->data, skb->len);
/* If target is not associated */
- if (!test_bit(CONNECTED, &ar->flag)) {
+ if (!test_bit(CONNECTED, &vif->flags)) {
dev_kfree_skb(skb);
return 0;
}
@@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
goto fail_tx;
/* AP mode Power saving processing */
- if (ar->nw_type == AP_NETWORK) {
- if (ath6kl_powersave_ap(ar, skb, &more_data))
+ if (vif->nw_type == AP_NETWORK) {
+ if (ath6kl_powersave_ap(vif, skb, &more_data))
return 0;
}
if (test_bit(WMI_ENABLED, &ar->flag)) {
if (skb_headroom(skb) < dev->needed_headroom) {
- WARN_ON(1);
- goto fail_tx;
+ struct sk_buff *tmp_skb = skb;
+
+ skb = skb_realloc_headroom(skb, dev->needed_headroom);
+ kfree_skb(tmp_skb);
+ if (skb == NULL) {
+ vif->net_stats.tx_dropped++;
+ return 0;
+ }
}
if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
}
if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
- more_data, 0, 0, NULL)) {
+ more_data, 0, 0, NULL,
+ vif->fw_vif_idx)) {
ath6kl_err("wmi_data_hdr_add failed\n");
goto fail_tx;
}
- if ((ar->nw_type == ADHOC_NETWORK) &&
- ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+ if ((vif->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
chk_adhoc_ps_mapping = true;
else {
/* get the stream mapping */
- ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
- 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+ vif->fw_vif_idx, skb,
+ 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
if (ret)
goto fail_tx;
}
@@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
fail_tx:
dev_kfree_skb(skb);
- ar->net_stats.tx_dropped++;
- ar->net_stats.tx_aborted_errors++;
+ vif->net_stats.tx_dropped++;
+ vif->net_stats.tx_aborted_errors++;
return 0;
}
@@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
struct htc_packet *packet)
{
struct ath6kl *ar = target->dev->ar;
+ struct ath6kl_vif *vif;
enum htc_endpoint_id endpoint = packet->endpoint;
+ enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
if (endpoint == ar->ctrl_ep) {
/*
@@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
set_bit(WMI_CTRL_EP_FULL, &ar->flag);
spin_unlock_bh(&ar->lock);
ath6kl_err("wmi ctrl ep is full\n");
- return HTC_SEND_FULL_KEEP;
+ return action;
}
if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
- return HTC_SEND_FULL_KEEP;
-
- if (ar->nw_type == ADHOC_NETWORK)
- /*
- * In adhoc mode, we cannot differentiate traffic
- * priorities so there is no need to continue, however we
- * should stop the network.
- */
- goto stop_net_queues;
+ return action;
/*
* The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@@ -464,24 +470,36 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
* Give preference to the highest priority stream by
* dropping the packets which overflowed.
*/
- return HTC_SEND_FULL_DROP;
+ action = HTC_SEND_FULL_DROP;
-stop_net_queues:
- spin_lock_bh(&ar->lock);
- set_bit(NETQ_STOPPED, &ar->flag);
- spin_unlock_bh(&ar->lock);
- netif_stop_queue(ar->net_dev);
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->nw_type == ADHOC_NETWORK ||
+ action != HTC_SEND_FULL_DROP) {
+ spin_unlock_bh(&ar->list_lock);
+
+ spin_lock_bh(&vif->if_lock);
+ set_bit(NETQ_STOPPED, &vif->flags);
+ spin_unlock_bh(&vif->if_lock);
+ netif_stop_queue(vif->ndev);
- return HTC_SEND_FULL_KEEP;
+ return action;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return action;
}
/* TODO this needs to be looked at */
-static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
enum htc_endpoint_id eid, u32 map_no)
{
+ struct ath6kl *ar = vif->ar;
u32 i;
- if (ar->nw_type != ADHOC_NETWORK)
+ if (vif->nw_type != ADHOC_NETWORK)
return;
if (!ar->ibss_ps_enable)
@@ -523,7 +541,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
int status;
enum htc_endpoint_id eid;
bool wake_event = false;
- bool flushing = false;
+ bool flushing[ATH6KL_VIF_MAX] = {false};
+ u8 if_idx;
+ struct ath6kl_vif *vif;
skb_queue_head_init(&skb_queue);
@@ -549,8 +569,6 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
if (!skb || !skb->data)
goto fatal;
- packet->buf = skb->data;
-
__skb_queue_tail(&skb_queue, skb);
if (!status && (packet->act_len != skb->len))
@@ -569,15 +587,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
wake_event = true;
}
+ if (eid == ar->ctrl_ep) {
+ if_idx = wmi_cmd_hdr_get_if_idx(
+ (struct wmi_cmd_hdr *) packet->buf);
+ } else {
+ if_idx = wmi_data_hdr_get_if_idx(
+ (struct wmi_data_hdr *) packet->buf);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
+
if (status) {
if (status == -ECANCELED)
/* a packet was flushed */
- flushing = true;
+ flushing[if_idx] = true;
+
+ vif->net_stats.tx_errors++;
- ar->net_stats.tx_errors++;
+ if (status != -ENOSPC && status != -ECANCELED)
+ ath6kl_warn("tx complete error: %d\n", status);
- if (status != -ENOSPC)
- ath6kl_err("tx error, status: 0x%x\n", status);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
"%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
__func__, skb, packet->buf, packet->act_len,
@@ -588,27 +621,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
__func__, skb, packet->buf, packet->act_len,
eid, "OK");
- flushing = false;
- ar->net_stats.tx_packets++;
- ar->net_stats.tx_bytes += skb->len;
+ flushing[if_idx] = false;
+ vif->net_stats.tx_packets++;
+ vif->net_stats.tx_bytes += skb->len;
}
- ath6kl_tx_clear_node_map(ar, eid, map_no);
+ ath6kl_tx_clear_node_map(vif, eid, map_no);
ath6kl_free_cookie(ar, ath6kl_cookie);
- if (test_bit(NETQ_STOPPED, &ar->flag))
- clear_bit(NETQ_STOPPED, &ar->flag);
+ if (test_bit(NETQ_STOPPED, &vif->flags))
+ clear_bit(NETQ_STOPPED, &vif->flags);
}
spin_unlock_bh(&ar->lock);
__skb_queue_purge(&skb_queue);
- if (test_bit(CONNECTED, &ar->flag)) {
- if (!flushing)
- netif_wake_queue(ar->net_dev);
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ !flushing[vif->fw_vif_idx]) {
+ spin_unlock_bh(&ar->list_lock);
+ netif_wake_queue(vif->ndev);
+ spin_lock_bh(&ar->list_lock);
+ }
}
+ spin_unlock_bh(&ar->list_lock);
if (wake_event)
wake_up(&ar->event_wq);
@@ -1041,8 +1081,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
struct ath6kl_sta *conn = NULL;
struct sk_buff *skb1 = NULL;
struct ethhdr *datap = NULL;
+ struct ath6kl_vif *vif;
u16 seq_no, offset;
- u8 tid;
+ u8 tid, if_idx;
ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
"%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@@ -1050,7 +1091,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
packet->act_len, status);
if (status || !(skb->data + HTC_HDR_LENGTH)) {
- ar->net_stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ if (ept == ar->ctrl_ep) {
+ if_idx =
+ wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
+ } else {
+ if_idx =
+ wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
dev_kfree_skb(skb);
return;
}
@@ -1059,28 +1116,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* Take lock to protect buffer counts and adaptive power throughput
* state.
*/
- spin_lock_bh(&ar->lock);
+ spin_lock_bh(&vif->if_lock);
- ar->net_stats.rx_packets++;
- ar->net_stats.rx_bytes += packet->act_len;
+ vif->net_stats.rx_packets++;
+ vif->net_stats.rx_bytes += packet->act_len;
- spin_unlock_bh(&ar->lock);
+ spin_unlock_bh(&vif->if_lock);
- skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
- skb_pull(skb, HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
- skb->dev = ar->net_dev;
+ skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) {
if (EPPING_ALIGNMENT_PAD > 0)
skb_pull(skb, EPPING_ALIGNMENT_PAD);
- ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
return;
}
+ ath6kl_check_wow_status(ar);
+
if (ept == ar->ctrl_ep) {
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
@@ -1096,18 +1153,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* that do not have LLC hdr. They are 16 bytes in size.
* Allow these frames in the AP mode.
*/
- if (ar->nw_type != AP_NETWORK &&
+ if (vif->nw_type != AP_NETWORK &&
((packet->act_len < min_hdr_len) ||
(packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
ath6kl_info("frame len is too short or too long\n");
- ar->net_stats.rx_errors++;
- ar->net_stats.rx_length_errors++;
+ vif->net_stats.rx_errors++;
+ vif->net_stats.rx_length_errors++;
dev_kfree_skb(skb);
return;
}
/* Get the Power save state of the STA */
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
meta_type = wmi_data_hdr_get_meta(dhdr);
ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@@ -1129,7 +1186,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
}
datap = (struct ethhdr *) (skb->data + offset);
- conn = ath6kl_find_sta(ar, datap->h_source);
+ conn = ath6kl_find_sta(vif, datap->h_source);
if (!conn) {
dev_kfree_skb(skb);
@@ -1160,12 +1217,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
while ((skbuff = skb_dequeue(&conn->psq))
!= NULL) {
spin_unlock_bh(&conn->psq_lock);
- ath6kl_data_tx(skbuff, ar->net_dev);
+ ath6kl_data_tx(skbuff, vif->ndev);
spin_lock_bh(&conn->psq_lock);
}
spin_unlock_bh(&conn->psq_lock);
/* Clear the PVB for this STA */
- ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ conn->aid, 0);
}
}
@@ -1215,12 +1273,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
return;
}
- if (!(ar->net_dev->flags & IFF_UP)) {
+ if (!(vif->ndev->flags & IFF_UP)) {
dev_kfree_skb(skb);
return;
}
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
datap = (struct ethhdr *) skb->data;
if (is_multicast_ether_addr(datap->h_dest))
/*
@@ -1235,8 +1293,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* frame to it on the air else send the
* frame up the stack.
*/
- struct ath6kl_sta *conn = NULL;
- conn = ath6kl_find_sta(ar, datap->h_dest);
+ conn = ath6kl_find_sta(vif, datap->h_dest);
if (conn && ar->intra_bss) {
skb1 = skb;
@@ -1247,18 +1304,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
}
}
if (skb1)
- ath6kl_data_tx(skb1, ar->net_dev);
+ ath6kl_data_tx(skb1, vif->ndev);
+
+ if (skb == NULL) {
+ /* nothing to deliver up the stack */
+ return;
+ }
}
datap = (struct ethhdr *) skb->data;
if (is_unicast_ether_addr(datap->h_dest) &&
- aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+ aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
is_amsdu, skb))
/* aggregation code will handle the skb */
return;
- ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
static void aggr_timeout(unsigned long arg)
@@ -1336,9 +1398,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
memset(stats, 0, sizeof(struct rxtid_stats));
}
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+ u8 win_sz)
{
- struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid;
struct rxtid_stats *stats;
u16 hold_q_size;
@@ -1405,9 +1468,9 @@ struct aggr_info *aggr_init(struct net_device *dev)
return p_aggr;
}
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
{
- struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid;
if (!p_aggr)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a7de23cbd2c7..f6f2aa27fc20 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -21,7 +21,7 @@
#include "../regd.h"
#include "../regd_common.h"
-static int ath6kl_wmi_sync_point(struct wmi *wmi);
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
static const s32 wmi_rate_tbl[][2] = {
/* {W/O SGI, with SGI} */
@@ -81,6 +81,26 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
return wmi->ep_id;
}
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
+{
+ struct ath6kl_vif *vif, *found = NULL;
+
+ if (WARN_ON(if_idx > (ar->vif_max - 1)))
+ return NULL;
+
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->fw_vif_idx == if_idx) {
+ found = vif;
+ break;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return found;
+}
+
/* Performs DIX to 802.3 encapsulation for transmit packets.
* Assumes the entire DIX header is contigous and that there is
* enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
@@ -162,12 +182,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type,
- u8 meta_ver, void *tx_meta_info)
+ u8 meta_ver, void *tx_meta_info, u8 if_idx)
{
struct wmi_data_hdr *data_hdr;
int ret;
- if (WARN_ON(skb == NULL))
+ if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
return -EINVAL;
if (tx_meta_info) {
@@ -189,7 +209,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
- data_hdr->info3 = 0;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
return 0;
}
@@ -216,7 +236,8 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
return ip_pri;
}
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb,
u32 layer2_priority, bool wmm_enabled,
u8 *ac)
{
@@ -262,7 +283,12 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
usr_pri = layer2_priority & 0x7;
}
- /* workaround for WMM S5 */
+ /*
+ * workaround for WMM S5
+ *
+ * FIXME: wmi->traffic_class is always 100 so this test doesn't
+ * make sense
+ */
if ((wmi->traffic_class == WMM_AC_VI) &&
((usr_pri == 5) || (usr_pri == 4)))
usr_pri = 1;
@@ -284,7 +310,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
/* Implicit streams are created with TSID 0xFF */
cmd.tsid = WMI_IMPLICIT_PSTREAM;
- ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+ ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
}
*ac = traffic_class;
@@ -410,13 +436,14 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
}
static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
- int len)
+ int len, struct ath6kl_vif *vif)
{
struct wmi_remain_on_chnl_event *ev;
u32 freq;
u32 dur;
struct ieee80211_channel *chan;
struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
if (len < sizeof(*ev))
return -EINVAL;
@@ -426,26 +453,29 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
dur = le32_to_cpu(ev->duration);
ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
freq, dur);
- chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
"(freq=%u)\n", freq);
return -EINVAL;
}
- cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT,
+ id = vif->last_roc_id;
+ cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
dur, GFP_ATOMIC);
return 0;
}
static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
- u8 *datap, int len)
+ u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_cancel_remain_on_chnl_event *ev;
u32 freq;
u32 dur;
struct ieee80211_channel *chan;
struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
if (len < sizeof(*ev))
return -EINVAL;
@@ -455,23 +485,29 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
dur = le32_to_cpu(ev->duration);
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
"status=%u\n", freq, dur, ev->status);
- chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
"channel (freq=%u)\n", freq);
return -EINVAL;
}
- cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan,
+ if (vif->last_cancel_roc_id &&
+ vif->last_cancel_roc_id + 1 == vif->last_roc_id)
+ id = vif->last_cancel_roc_id; /* event for cancel command */
+ else
+ id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
+ vif->last_cancel_roc_id = 0;
+ cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
NL80211_CHAN_NO_HT, GFP_ATOMIC);
return 0;
}
-static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_tx_status_event *ev;
u32 id;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -481,7 +517,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
id, ev->ack_status);
if (wmi->last_mgmt_tx_frame) {
- cfg80211_mgmt_tx_status(ar->net_dev, id,
+ cfg80211_mgmt_tx_status(vif->ndev, id,
wmi->last_mgmt_tx_frame,
wmi->last_mgmt_tx_frame_len,
!!ev->ack_status, GFP_ATOMIC);
@@ -493,12 +529,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_p2p_rx_probe_req_event *ev;
u32 freq;
u16 dlen;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -513,10 +549,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
"probe_req_report=%d\n",
- dlen, freq, ar->probe_req_report);
+ dlen, freq, vif->probe_req_report);
- if (ar->probe_req_report || ar->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+ if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
+ cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -536,12 +572,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_rx_action_event *ev;
u32 freq;
u16 dlen;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -555,7 +591,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -620,7 +656,8 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
}
/* Send a "simple" wmi command -- one with no arguments */
-static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_cmd_id cmd_id)
{
struct sk_buff *skb;
int ret;
@@ -629,7 +666,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
if (!skb)
return -ENOMEM;
- ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
return ret;
}
@@ -641,7 +678,6 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
if (len < sizeof(struct wmi_ready_event_2))
return -EINVAL;
- wmi->ready = true;
ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
le32_to_cpu(ev->sw_version),
le32_to_cpu(ev->abi_version));
@@ -673,32 +709,73 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
- ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG);
+ ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
return 0;
}
-static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(cmd->info.bssid, bssid, ETH_ALEN);
+ cmd->roam_ctrl = WMI_FORCE_ROAM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->info.roam_mode = mode;
+ cmd->roam_ctrl = WMI_SET_ROAM_MODE;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_connect_event *ev;
u8 *pie, *peie;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(struct wmi_connect_event))
return -EINVAL;
ev = (struct wmi_connect_event *) datap;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
/* AP mode start/STA connected event */
- struct net_device *dev = ar->net_dev;
+ struct net_device *dev = vif->ndev;
if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
"(AP started)\n",
__func__, le16_to_cpu(ev->u.ap_bss.ch),
ev->u.ap_bss.bssid);
ath6kl_connect_ap_mode_bss(
- ar, le16_to_cpu(ev->u.ap_bss.ch));
+ vif, le16_to_cpu(ev->u.ap_bss.ch));
} else {
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
"auth=%u keymgmt=%u cipher=%u apsd_info=%u "
@@ -710,7 +787,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.apsd_info);
ath6kl_connect_ap_mode_sta(
- ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+ vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
ev->u.ap_sta.keymgmt,
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.auth, ev->assoc_req_len,
@@ -755,7 +832,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
pie += pie[1] + 2;
}
- ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch),
+ ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
ev->u.sta.bssid,
le16_to_cpu(ev->u.sta.listen_intvl),
le16_to_cpu(ev->u.sta.beacon_intvl),
@@ -834,14 +911,15 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
alpha2[0] = country->isoName[0];
alpha2[1] = country->isoName[1];
- regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2);
+ regulatory_hint(wmi->parent_dev->wiphy, alpha2);
ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
alpha2[0], alpha2[1]);
}
}
-static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_disconnect_event *ev;
wmi->traffic_class = 100;
@@ -857,10 +935,8 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->disconn_reason, ev->assoc_resp_len);
wmi->is_wmm_enabled = false;
- wmi->pair_crypto_type = NONE_CRYPT;
- wmi->grp_crypto_type = NONE_CRYPT;
- ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+ ath6kl_disconnect_event(vif, ev->disconn_reason,
ev->bssid, ev->assoc_resp_len, ev->assoc_info,
le16_to_cpu(ev->proto_reason_status));
@@ -886,7 +962,8 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_tkip_micerr_event *ev;
@@ -895,12 +972,20 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
ev = (struct wmi_tkip_micerr_event *) datap;
- ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+ ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
return 0;
}
-static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+void ath6kl_wmi_sscan_timer(unsigned long ptr)
+{
+ struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
+
+ cfg80211_sched_scan_results(vif->ar->wiphy);
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_bss_info_hdr2 *bih;
u8 *buf;
@@ -927,26 +1012,27 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0; /* Only update BSS table for now */
if (bih->frame_type == BEACON_FTYPE &&
- test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
}
- channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch));
+ channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
if (channel == NULL)
return -EINVAL;
if (len < 8 + 2 + 2)
return -EINVAL;
- if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) &&
- memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) {
+ if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
+ && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
const u8 *tim;
tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
len - 8 - 2 - 2);
if (tim && tim[1] >= 2) {
- ar->assoc_bss_dtim_period = tim[3];
- set_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ vif->assoc_bss_dtim_period = tim[3];
+ set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
}
}
@@ -966,7 +1052,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
} else {
- struct net_device *dev = ar->net_dev;
+ struct net_device *dev = vif->ndev;
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
@@ -979,7 +1065,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
memcpy(&mgmt->u.beacon, buf, len);
- bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt,
+ bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt,
24 + len, (bih->snr - 95) * 100,
GFP_ATOMIC);
kfree(mgmt);
@@ -987,6 +1073,21 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
return -ENOMEM;
cfg80211_put_bss(bss);
+ /*
+ * Firmware doesn't return any event when scheduled scan has
+ * finished, so we need to use a timer to find out when there are
+ * no more results.
+ *
+ * The timer is started from the first bss info received, otherwise
+ * the timer would not ever fire if the scan interval is short
+ * enough.
+ */
+ if (ar->state == ATH6KL_STATE_SCHED_SCAN &&
+ !timer_pending(&vif->sched_scan_timer)) {
+ mod_timer(&vif->sched_scan_timer, jiffies +
+ msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
+ }
+
return 0;
}
@@ -1094,20 +1195,21 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_scan_complete_event *ev;
ev = (struct wmi_scan_complete_event *) datap;
- ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+ ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
wmi->is_probe_ssid = false;
return 0;
}
static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
- int len)
+ int len, struct ath6kl_vif *vif)
{
struct wmi_neighbor_report_event *ev;
u8 i;
@@ -1125,7 +1227,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
ev->neighbor[i].bss_flags);
- cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i,
+ cfg80211_pmksa_candidate_notify(vif->ndev, i,
ev->neighbor[i].bssid,
!!(ev->neighbor[i].bss_flags &
WMI_PREAUTH_CAPABLE_BSS),
@@ -1166,9 +1268,10 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
- ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+ ath6kl_tgt_stats_event(vif, datap, len);
return 0;
}
@@ -1222,7 +1325,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -1322,7 +1425,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
return 0;
}
-static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_cac_event *reply;
struct ieee80211_tspec_ie *ts;
@@ -1343,7 +1447,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
IEEE80211_WMM_IE_TSPEC_TID_MASK;
- ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, tsid);
} else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
/*
* Following assumes that there is only one outstanding
@@ -1358,7 +1463,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
break;
}
if (index < (sizeof(active_tsids) * 8))
- ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, index);
}
/*
@@ -1403,7 +1509,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -1528,14 +1634,15 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
{
struct wmi_cmd_hdr *cmd_hdr;
enum htc_endpoint_id ep_id = wmi->ep_id;
int ret;
+ u16 info1;
- if (WARN_ON(skb == NULL))
+ if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1))))
return -EINVAL;
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
@@ -1554,19 +1661,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
* Make sure all data currently queued is transmitted before
* the cmd execution. Establish a new sync point.
*/
- ath6kl_wmi_sync_point(wmi);
+ ath6kl_wmi_sync_point(wmi, if_idx);
}
skb_push(skb, sizeof(struct wmi_cmd_hdr));
cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
- cmd_hdr->info1 = 0; /* added for virtual interface */
+ info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
+ cmd_hdr->info1 = cpu_to_le16(info1);
/* Only for OPT_TX_CMD, use BE endpoint. */
if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
- false, false, 0, NULL);
+ false, false, 0, NULL, if_idx);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -1582,20 +1690,22 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
* Make sure all new data queued waits for the command to
* execute. Establish a new sync point.
*/
- ath6kl_wmi_sync_point(wmi);
+ ath6kl_wmi_sync_point(wmi, if_idx);
}
return 0;
}
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags)
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cc;
@@ -1635,19 +1745,19 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
cc->grp_crypto_len = group_crypto_len;
cc->ch = cpu_to_le16(channel);
cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+ cc->nw_subtype = nw_subtype;
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
- wmi->pair_crypto_type = pairwise_crypto;
- wmi->grp_crypto_type = group_crypto;
-
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel)
{
struct sk_buff *skb;
struct wmi_reconnect_cmd *cc;
@@ -1668,13 +1778,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
{
int ret;
@@ -1683,12 +1793,79 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
wmi->traffic_class = 100;
/* Disconnect command does not need to do a SYNC before. */
- ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+ ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
+
+ return ret;
+}
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
+{
+ struct sk_buff *skb;
+ struct wmi_begin_scan_cmd *sc;
+ s8 size;
+ int i, band, ret;
+ struct ath6kl *ar = wmi->parent_dev;
+ int num_rates;
+
+ size = sizeof(struct wmi_begin_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_begin_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->no_cck = cpu_to_le32(no_cck);
+ sc->num_ch = num_chan;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ ar->wiphy->bands[band];
+ u32 ratemask = rates[band];
+ u8 *supp_rates = sc->supp_rates[band].rates;
+ num_rates = 0;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & ratemask) == 0)
+ continue; /* skip rate */
+ supp_rates[num_rates++] =
+ (u8) (sband->bitrates[i].bitrate / 5);
+ }
+ sc->supp_rates[band].nrates = num_rates;
+ }
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list)
@@ -1724,13 +1901,14 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
for (i = 0; i < num_chan; i++)
sc->ch_list[i] = cpu_to_le16(ch_list[i]);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
+ u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio,
@@ -1757,12 +1935,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
{
struct sk_buff *skb;
struct wmi_bss_filter_cmd *cmd;
@@ -1779,12 +1957,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
cmd->bss_filter = filter;
cmd->ie_mask = cpu_to_le32(ie_mask);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid)
{
struct sk_buff *skb;
@@ -1816,12 +1994,13 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
cmd->ssid_len = ssid_len;
memcpy(cmd->ssid, ssid, ssid_len);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
u16 listen_beacons)
{
struct sk_buff *skb;
@@ -1836,12 +2015,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
cmd->listen_intvl = cpu_to_le16(listen_interval);
cmd->num_beacons = cpu_to_le16(listen_beacons);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
{
struct sk_buff *skb;
struct wmi_power_mode_cmd *cmd;
@@ -1855,12 +2034,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
cmd->pwr_mode = pwr_mode;
wmi->pwr_mode = pwr_mode;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy)
@@ -1881,12 +2060,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
{
struct sk_buff *skb;
struct wmi_disc_timeout_cmd *cmd;
@@ -1899,15 +2078,20 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
cmd = (struct wmi_disc_timeout_cmd *) skb->data;
cmd->discon_timeout = timeout;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
+
return ret;
}
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type,
u8 key_usage, u8 key_len,
- u8 *key_rsc, u8 *key_material,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag)
{
@@ -1920,7 +2104,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
key_index, key_type, key_usage, key_len, key_op_ctrl);
if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
- (key_material == NULL))
+ (key_material == NULL) || key_rsc_len > 8)
return -EINVAL;
if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
@@ -1938,20 +2122,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
memcpy(cmd->key, key_material, key_len);
if (key_rsc != NULL)
- memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+ memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
cmd->key_op_ctrl = key_op_ctrl;
if (mac_addr)
memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
sync_flag);
return ret;
}
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
{
struct sk_buff *skb;
struct wmi_add_krk_cmd *cmd;
@@ -1964,12 +2148,13 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
cmd = (struct wmi_add_krk_cmd *) skb->data;
memcpy(cmd->krk, krk, WMI_KRK_LEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
{
struct sk_buff *skb;
struct wmi_delete_cipher_key_cmd *cmd;
@@ -1985,13 +2170,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
cmd->key_index = key_index;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set)
{
struct sk_buff *skb;
@@ -2018,14 +2203,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
cmd->enable = PMKID_DISABLE;
}
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
- enum htc_endpoint_id ep_id)
+ enum htc_endpoint_id ep_id, u8 if_idx)
{
struct wmi_data_hdr *data_hdr;
int ret;
@@ -2037,14 +2222,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
data_hdr = (struct wmi_data_hdr *) skb->data;
data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
- data_hdr->info3 = 0;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
return ret;
}
-static int ath6kl_wmi_sync_point(struct wmi *wmi)
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
{
struct sk_buff *skb;
struct wmi_sync_cmd *cmd;
@@ -2100,7 +2285,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
* Send sync cmd followed by sync data messages on all
* endpoints being used
*/
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
NO_SYNC_WMIFLAG);
if (ret)
@@ -2119,7 +2304,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
traffic_class);
ret =
ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
- ep_id);
+ ep_id, if_idx);
if (ret)
break;
@@ -2142,7 +2327,7 @@ free_skb:
return ret;
}
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
struct wmi_create_pstream_cmd *params)
{
struct sk_buff *skb;
@@ -2231,12 +2416,13 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
ath6kl_indicate_tx_activity(wmi->parent_dev,
params->traffic_class, true);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid)
{
struct sk_buff *skb;
struct wmi_delete_pstream_cmd *cmd;
@@ -2272,7 +2458,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
"sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
traffic_class, tsid);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
SYNC_BEFORE_WMIFLAG);
spin_lock_bh(&wmi->lock);
@@ -2311,17 +2497,173 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
cmd = (struct wmi_set_ip_cmd *) skb->data;
memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
- int len)
+static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
{
- if (len < sizeof(struct wmi_get_wow_list_reply))
+ u16 active_tsids;
+ u8 stream_exist;
+ int i;
+
+ /*
+ * Relinquish credits from all implicitly created pstreams
+ * since when we go to sleep. If user created explicit
+ * thinstreams exists with in a fatpipe leave them intact
+ * for the user to delete.
+ */
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (stream_exist & (1 << i)) {
+
+ /*
+ * FIXME: Is this lock & unlock inside
+ * for loop correct? may need rework.
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[i];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * If there are no user created thin streams
+ * delete the fatpipe
+ */
+ if (!active_tsids) {
+ stream_exist &= ~(1 << i);
+ /*
+ * Indicate inactivity to driver layer for
+ * this fatpipe (pstream)
+ */
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ i, false);
+ }
+ }
+ }
+
+ /* FIXME: Can we do this assignment without locking ? */
+ spin_lock_bh(&wmi->lock);
+ wmi->fat_pipe_exist = stream_exist;
+ spin_unlock_bh(&wmi->lock);
+}
+
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_set_host_sleep_mode_cmd *cmd;
+ int ret;
+
+ if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
+ (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
+ ath6kl_err("invalid host sleep mode: %d\n", host_mode);
return -EINVAL;
+ }
- return 0;
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
+
+ if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
+ ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
+ cmd->asleep = cpu_to_le32(1);
+ } else
+ cmd->awake = cpu_to_le32(1);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wow_mode_cmd *cmd;
+ int ret;
+
+ if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
+ wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+ ath6kl_err("invalid wow mode: %d\n", wow_mode);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
+ cmd->enable_wow = cpu_to_le32(wow_mode);
+ cmd->filter = cpu_to_le32(filter);
+ cmd->host_req_delay = cpu_to_le16(host_req_delay);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, u8 *filter, u8 *mask)
+{
+ struct sk_buff *skb;
+ struct wmi_add_wow_pattern_cmd *cmd;
+ u16 size;
+ u8 *filter_mask;
+ int ret;
+
+ /*
+ * Allocate additional memory in the buffer to hold
+ * filter and mask value, which is twice of filter_size.
+ */
+ size = sizeof(*cmd) + (2 * filter_size);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = list_id;
+ cmd->filter_size = filter_size;
+ cmd->filter_offset = filter_offset;
+
+ memcpy(cmd->filter, filter, filter_size);
+
+ filter_mask = (u8 *) (cmd->filter + filter_size);
+ memcpy(filter_mask, mask, filter_size);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id)
+{
+ struct sk_buff *skb;
+ struct wmi_del_wow_pattern_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = cpu_to_le16(list_id);
+ cmd->filter_id = cpu_to_le16(filter_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
}
static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
@@ -2336,7 +2678,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
return ret;
}
@@ -2379,12 +2721,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
return ret;
}
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
{
- return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
}
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
{
struct sk_buff *skb;
struct wmi_set_tx_pwr_cmd *cmd;
@@ -2397,18 +2739,24 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
cmd->dbM = dbM;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
+{
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
+}
+
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
{
- return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
}
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+ u8 preamble_policy)
{
struct sk_buff *skb;
struct wmi_set_lpreamble_cmd *cmd;
@@ -2422,7 +2770,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
cmd->status = status;
cmd->preamble_policy = preamble_policy;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
@@ -2440,11 +2788,12 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
cmd = (struct wmi_set_rts_cmd *) skb->data;
cmd->threshold = cpu_to_le16(threshold);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
{
struct sk_buff *skb;
struct wmi_set_wmm_txop_cmd *cmd;
@@ -2460,12 +2809,13 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
cmd->txop_enable = cfg;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl)
{
struct sk_buff *skb;
struct wmi_set_keepalive_cmd *cmd;
@@ -2477,10 +2827,13 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
cmd = (struct wmi_set_keepalive_cmd *) skb->data;
cmd->keep_alive_intvl = keep_alive_intvl;
- wmi->keep_alive_intvl = keep_alive_intvl;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
+
return ret;
}
@@ -2495,7 +2848,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
memcpy(skb->data, buf, len);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
return ret;
}
@@ -2528,28 +2881,31 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
return 0;
}
-static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
- aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+ aggr_recv_addba_req_evt(vif, cmd->tid,
le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
return 0;
}
-static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
- aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+ aggr_recv_delba_req_evt(vif, cmd->tid);
return 0;
}
/* AP mode functions */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cm;
@@ -2562,7 +2918,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
cm = (struct wmi_connect_cmd *) skb->data;
memcpy(cm, p, sizeof(*cm));
- res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+ res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
NO_SYNC_WMIFLAG);
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
"ctrl_flags=0x%x-> res=%d\n",
@@ -2571,7 +2927,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
return res;
}
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
+ u16 reason)
{
struct sk_buff *skb;
struct wmi_ap_set_mlme_cmd *cm;
@@ -2585,11 +2942,12 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
cm->reason = cpu_to_le16(reason);
cm->cmd = cmd;
- return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID,
+ return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
NO_SYNC_WMIFLAG);
}
-static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_pspoll_event *ev;
@@ -2598,19 +2956,21 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
ev = (struct wmi_pspoll_event *) datap;
- ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+ ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
return 0;
}
-static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
- ath6kl_dtimexpiry_event(wmi->parent_dev);
+ ath6kl_dtimexpiry_event(vif);
return 0;
}
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
+ bool flag)
{
struct sk_buff *skb;
struct wmi_ap_set_pvb_cmd *cmd;
@@ -2625,13 +2985,14 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
cmd->rsvd = cpu_to_le16(0);
cmd->flag = cpu_to_le32(flag);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
NO_SYNC_WMIFLAG);
return 0;
}
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_ver,
bool rx_dot11_hdr, bool defrag_on_host)
{
struct sk_buff *skb;
@@ -2648,14 +3009,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
cmd->meta_ver = rx_meta_ver;
/* Delete the local aggr state, on host */
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len)
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len)
{
struct sk_buff *skb;
struct wmi_set_appie_cmd *p;
@@ -2669,8 +3030,11 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
p = (struct wmi_set_appie_cmd *) skb->data;
p->mgmt_frm_type = mgmt_frm_type;
p->ie_len = ie_len;
- memcpy(p->ie_info, ie, ie_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID,
+
+ if (ie != NULL && ie_len > 0)
+ memcpy(p->ie_info, ie, ie_len);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -2688,11 +3052,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
cmd->disable = disable ? 1 : 0;
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
{
struct sk_buff *skb;
struct wmi_remain_on_chnl_cmd *p;
@@ -2706,12 +3070,16 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
p = (struct wmi_remain_on_chnl_cmd *) skb->data;
p->freq = cpu_to_le32(freq);
p->duration = cpu_to_le32(dur);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
- const u8 *data, u16 data_len)
+/* ath6kl_wmi_send_action_cmd is to be deprecated. Use
+ * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len)
{
struct sk_buff *skb;
struct wmi_send_action_cmd *p;
@@ -2731,6 +3099,7 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
}
kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
@@ -2742,18 +3111,61 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
p->wait = cpu_to_le32(wait);
p->len = cpu_to_le16(data_len);
memcpy(p->data, data, data_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
- const u8 *dst,
- const u8 *data, u16 data_len)
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck)
{
struct sk_buff *skb;
- struct wmi_p2p_probe_response_cmd *p;
+ struct wmi_send_mgmt_cmd *p;
+ u8 *buf;
+
+ if (wait)
+ return -EINVAL; /* Offload for wait not supported */
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
+ wmi->last_mgmt_tx_frame = buf;
+ wmi->last_mgmt_tx_frame_len = data_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
+ "len=%u\n", id, freq, wait, data_len);
+ p = (struct wmi_send_mgmt_cmd *) skb->data;
+ p->id = cpu_to_le32(id);
+ p->freq = cpu_to_le32(freq);
+ p->wait = cpu_to_le32(wait);
+ p->no_cck = cpu_to_le32(no_cck);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_p2p_probe_response_cmd *p;
+ size_t cmd_len = sizeof(*p) + data_len;
+
+ if (data_len == 0)
+ cmd_len++; /* work around target minimum length requirement */
+
+ skb = ath6kl_wmi_get_new_buf(cmd_len);
if (!skb)
return -ENOMEM;
@@ -2764,11 +3176,12 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
memcpy(p->destination_addr, dst, ETH_ALEN);
p->len = cpu_to_le16(data_len);
memcpy(p->data, data, data_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SEND_PROBE_RESPONSE_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
{
struct sk_buff *skb;
struct wmi_probe_req_report_cmd *p;
@@ -2781,11 +3194,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
enable);
p = (struct wmi_probe_req_report_cmd *) skb->data;
p->enable = enable ? 1 : 0;
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
{
struct sk_buff *skb;
struct wmi_get_p2p_info *p;
@@ -2798,14 +3211,15 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
info_req_flags);
p = (struct wmi_get_p2p_info *) skb->data;
p->info_req_flags = cpu_to_le32(info_req_flags);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi)
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
{
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
- return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, if_idx,
+ WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
}
static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
@@ -2818,7 +3232,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
if (skb->len < sizeof(struct wmix_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
- wmi->stat.cmd_len_err++;
return -EINVAL;
}
@@ -2840,7 +3253,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
break;
default:
ath6kl_warn("unknown cmd id 0x%x\n", id);
- wmi->stat.cmd_id_err++;
ret = -EINVAL;
break;
}
@@ -2848,12 +3260,19 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
return ret;
}
+static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
+}
+
/* Control Path */
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd;
+ struct ath6kl_vif *vif;
u32 len;
u16 id;
+ u8 if_idx;
u8 *datap;
int ret = 0;
@@ -2863,12 +3282,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
if (skb->len < sizeof(struct wmi_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
dev_kfree_skb(skb);
- wmi->stat.cmd_len_err++;
return -EINVAL;
}
cmd = (struct wmi_cmd_hdr *) skb->data;
id = le16_to_cpu(cmd->cmd_id);
+ if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -2879,6 +3298,15 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
datap, len);
+ vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+ if (!vif) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Wmi event for unavailable vif, vif_index:%d\n",
+ if_idx);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
switch (id) {
case WMI_GET_BITRATE_CMDID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -2898,11 +3326,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_CONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
- ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
break;
case WMI_DISCONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
- ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
break;
case WMI_PEER_NODE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
@@ -2910,11 +3338,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_TKIP_MICERR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
- ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
break;
case WMI_BSSINFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
- ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
break;
case WMI_REGDOMAIN_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
@@ -2926,11 +3354,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_NEIGHBOR_REPORT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
- ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+ vif);
break;
case WMI_SCAN_COMPLETE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
- ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+ ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
break;
case WMI_CMDERROR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
@@ -2938,7 +3367,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REPORT_STATISTICS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
- ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
break;
case WMI_RSSI_THRESHOLD_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
@@ -2953,6 +3382,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REPORT_ROAM_TBL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+ ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
break;
case WMI_EXTENSION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
@@ -2960,7 +3390,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_CAC_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
- ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
break;
case WMI_CHANNEL_CHANGE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
@@ -2996,7 +3426,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_GET_WOW_LIST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
- ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
break;
case WMI_GET_PMKID_LIST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
@@ -3004,25 +3433,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_PSPOLL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
- ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
break;
case WMI_DTIMEXPIRY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
- ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
break;
case WMI_SET_PARAMS_REPLY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
break;
case WMI_ADDBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_ADDBA_RESP_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
break;
case WMI_DELBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
@@ -3038,21 +3467,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
- ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
break;
case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
- len);
+ len, vif);
break;
case WMI_TX_STATUS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
- ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
break;
case WMI_RX_PROBE_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
- ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_CAPABILITIES_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
@@ -3060,7 +3489,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_RX_ACTION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
- ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_INFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
@@ -3068,7 +3497,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
default:
ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
- wmi->stat.cmd_id_err++;
ret = -EINVAL;
break;
}
@@ -3078,11 +3506,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
return ret;
}
-static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+void ath6kl_wmi_reset(struct wmi *wmi)
{
- if (!wmi)
- return;
-
spin_lock_bh(&wmi->lock);
wmi->fat_pipe_exist = 0;
@@ -3103,16 +3528,9 @@ void *ath6kl_wmi_init(struct ath6kl *dev)
wmi->parent_dev = dev;
- ath6kl_wmi_qos_state_init(wmi);
-
wmi->pwr_mode = REC_POWER;
- wmi->phy_mode = WMI_11G_MODE;
-
- wmi->pair_crypto_type = NONE_CRYPT;
- wmi->grp_crypto_type = NONE_CRYPT;
- wmi->ht_allowed[A_BAND_24GHZ] = 1;
- wmi->ht_allowed[A_BAND_5GHZ] = 1;
+ ath6kl_wmi_reset(wmi);
return wmi;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index f8e644d54aa7..42ac311eda4e 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -93,11 +93,6 @@ struct sq_threshold_params {
u8 last_rssi_poll_event;
};
-struct wmi_stats {
- u32 cmd_len_err;
- u32 cmd_id_err;
-};
-
struct wmi_data_sync_bufs {
u8 traffic_class;
struct sk_buff *skb;
@@ -111,32 +106,26 @@ struct wmi_data_sync_bufs {
#define WMM_AC_VO 3 /* voice */
struct wmi {
- bool ready;
u16 stream_exist_for_ac[WMM_NUM_AC];
u8 fat_pipe_exist;
struct ath6kl *parent_dev;
- struct wmi_stats stat;
u8 pwr_mode;
- u8 phy_mode;
- u8 keep_alive_intvl;
spinlock_t lock;
enum htc_endpoint_id ep_id;
struct sq_threshold_params
sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
- enum crypto_type pair_crypto_type;
- enum crypto_type grp_crypto_type;
bool is_wmm_enabled;
- u8 ht_allowed[A_NUM_BANDS];
u8 traffic_class;
bool is_probe_ssid;
u8 *last_mgmt_tx_frame;
size_t last_mgmt_tx_frame_len;
+ u8 saved_pwr_mode;
};
struct host_app_area {
- u32 wmi_protocol_ver;
-};
+ __le32 wmi_protocol_ver;
+} __packed;
enum wmi_msg_type {
DATA_MSGTYPE = 0x0,
@@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type {
#define WMI_DATA_HDR_META_MASK 0x7
#define WMI_DATA_HDR_META_SHIFT 13
+#define WMI_DATA_HDR_IF_IDX_MASK 0xF
+
struct wmi_data_hdr {
s8 rssi;
@@ -208,6 +199,12 @@ struct wmi_data_hdr {
* b15:b13 - META_DATA_VERSION 0 - 7
*/
__le16 info2;
+
+ /*
+ * usage of info3, 16-bit:
+ * b3:b0 - Interface index
+ * b15:b4 - Reserved
+ */
__le16 info3;
} __packed;
@@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
WMI_DATA_HDR_META_MASK;
}
+static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
+{
+ return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
+}
+
/* Tx meta version definitions */
#define WMI_MAX_TX_META_SZ 12
#define WMI_META_VERSION_1 0x01
@@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 {
u8 csum_flags;
} __packed;
+#define WMI_CMD_HDR_IF_ID_MASK 0xF
+
/* Control Path */
struct wmi_cmd_hdr {
__le16 cmd_id;
@@ -312,6 +316,11 @@ struct wmi_cmd_hdr {
__le16 reserved;
} __packed;
+static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
+{
+ return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
+}
+
/* List of WMI commands */
enum wmi_cmd_id {
WMI_CONNECT_CMDID = 0x0001,
@@ -320,6 +329,10 @@ enum wmi_cmd_id {
WMI_SYNCHRONIZE_CMDID,
WMI_CREATE_PSTREAM_CMDID,
WMI_DELETE_PSTREAM_CMDID,
+ /* WMI_START_SCAN_CMDID is to be deprecated. Use
+ * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
WMI_START_SCAN_CMDID,
WMI_SET_SCAN_PARAMS_CMDID,
WMI_SET_BSS_FILTER_CMDID,
@@ -533,12 +546,61 @@ enum wmi_cmd_id {
WMI_GTK_OFFLOAD_OP_CMDID,
WMI_REMAIN_ON_CHNL_CMDID,
WMI_CANCEL_REMAIN_ON_CHNL_CMDID,
+ /* WMI_SEND_ACTION_CMDID is to be deprecated. Use
+ * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
WMI_SEND_ACTION_CMDID,
WMI_PROBE_REQ_REPORT_CMDID,
WMI_DISABLE_11B_RATES_CMDID,
WMI_SEND_PROBE_RESPONSE_CMDID,
WMI_GET_P2P_INFO_CMDID,
WMI_AP_JOIN_BSS_CMDID,
+
+ WMI_SMPS_ENABLE_CMDID,
+ WMI_SMPS_CONFIG_CMDID,
+ WMI_SET_RATECTRL_PARM_CMDID,
+ /* LPL specific commands*/
+ WMI_LPL_FORCE_ENABLE_CMDID,
+ WMI_LPL_SET_POLICY_CMDID,
+ WMI_LPL_GET_POLICY_CMDID,
+ WMI_LPL_GET_HWSTATE_CMDID,
+ WMI_LPL_SET_PARAMS_CMDID,
+ WMI_LPL_GET_PARAMS_CMDID,
+
+ WMI_SET_BUNDLE_PARAM_CMDID,
+
+ /*GreenTx specific commands*/
+
+ WMI_GREENTX_PARAMS_CMDID,
+
+ WMI_RTT_MEASREQ_CMDID,
+ WMI_RTT_CAPREQ_CMDID,
+ WMI_RTT_STATUSREQ_CMDID,
+
+ /* WPS Commands */
+ WMI_WPS_START_CMDID,
+ WMI_GET_WPS_STATUS_CMDID,
+
+ /* More P2P commands */
+ WMI_SET_NOA_CMDID,
+ WMI_GET_NOA_CMDID,
+ WMI_SET_OPPPS_CMDID,
+ WMI_GET_OPPPS_CMDID,
+ WMI_ADD_PORT_CMDID,
+ WMI_DEL_PORT_CMDID,
+
+ /* 802.11w cmd */
+ WMI_SET_RSN_CAP_CMDID,
+ WMI_GET_RSN_CAP_CMDID,
+ WMI_SET_IGTK_CMDID,
+
+ WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID,
+ WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID,
+
+ WMI_SEND_MGMT_CMDID,
+ WMI_BEGIN_SCAN_CMDID,
+
};
enum wmi_mgmt_frame_type {
@@ -558,6 +620,14 @@ enum network_type {
AP_NETWORK = 0x10,
};
+enum network_subtype {
+ SUBTYPE_NONE,
+ SUBTYPE_BT,
+ SUBTYPE_P2PDEV,
+ SUBTYPE_P2PCLIENT,
+ SUBTYPE_P2PGO,
+};
+
enum dot11_auth_mode {
OPEN_AUTH = 0x01,
SHARED_AUTH = 0x02,
@@ -576,9 +646,6 @@ enum auth_mode {
WPA2_AUTH_CCKM = 0x40,
};
-#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
-#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
-
#define WMI_MIN_KEY_INDEX 0
#define WMI_MAX_KEY_INDEX 3
@@ -617,6 +684,7 @@ enum wmi_connect_ctrl_flags_bits {
CONNECT_CSA_FOLLOW_BSS = 0x0020,
CONNECT_DO_WPA_OFFLOAD = 0x0040,
CONNECT_DO_NOT_DEAUTH = 0x0080,
+ CONNECT_WPS_FLAG = 0x0100,
};
struct wmi_connect_cmd {
@@ -632,6 +700,7 @@ struct wmi_connect_cmd {
__le16 ch;
u8 bssid[ETH_ALEN];
__le32 ctrl_flags;
+ u8 nw_subtype;
} __packed;
/* WMI_RECONNECT_CMDID */
@@ -719,7 +788,12 @@ enum wmi_scan_type {
WMI_SHORT_SCAN = 1,
};
-struct wmi_start_scan_cmd {
+struct wmi_supp_rates {
+ u8 nrates;
+ u8 rates[ATH6KL_RATE_MAXSIZE];
+};
+
+struct wmi_begin_scan_cmd {
__le32 force_fg_scan;
/* for legacy cisco AP compatibility */
@@ -731,9 +805,15 @@ struct wmi_start_scan_cmd {
/* time interval between scans (msec) */
__le32 force_scan_intvl;
+ /* no CCK rates */
+ __le32 no_cck;
+
/* enum wmi_scan_type */
u8 scan_type;
+ /* Supported rates to advertise in the probe request frames */
+ struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS];
+
/* how many channels follow */
u8 num_ch;
@@ -741,8 +821,31 @@ struct wmi_start_scan_cmd {
__le16 ch_list[1];
} __packed;
-/* WMI_SET_SCAN_PARAMS_CMDID */
-#define WMI_SHORTSCANRATIO_DEFAULT 3
+/* wmi_start_scan_cmd is to be deprecated. Use
+ * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
+struct wmi_start_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
/*
* Warning: scan control flag value of 0xFF is used to disable
@@ -776,13 +879,6 @@ enum wmi_scan_ctrl_flags_bits {
ENABLE_SCAN_ABORT_EVENT = 0x40
};
-#define DEFAULT_SCAN_CTRL_FLAGS \
- (CONNECT_SCAN_CTRL_FLAGS | \
- SCAN_CONNECTED_CTRL_FLAGS | \
- ACTIVE_SCAN_CTRL_FLAGS | \
- ROAM_SCAN_CTRL_FLAGS | \
- ENABLE_AUTO_CTRL_FLAGS)
-
struct wmi_scan_params_cmd {
/* sec */
__le16 fg_start_period;
@@ -1365,14 +1461,20 @@ enum wmi_roam_ctrl {
WMI_SET_LRSSI_SCAN_PARAMS,
};
+enum wmi_roam_mode {
+ WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
+ WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
+ WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
+};
+
struct bss_bias {
u8 bssid[ETH_ALEN];
- u8 bias;
+ s8 bias;
} __packed;
struct bss_bias_info {
u8 num_bss;
- struct bss_bias bss_bias[1];
+ struct bss_bias bss_bias[0];
} __packed;
struct low_rssi_scan_params {
@@ -1385,10 +1487,11 @@ struct low_rssi_scan_params {
struct roam_ctrl_cmd {
union {
- u8 bssid[ETH_ALEN];
- u8 roam_mode;
- struct bss_bias_info bss;
- struct low_rssi_scan_params params;
+ u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
+ u8 roam_mode; /* WMI_SET_ROAM_MODE */
+ struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
+ struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
+ */
} __packed info;
u8 roam_ctrl;
} __packed;
@@ -1455,6 +1558,10 @@ struct wmi_tkip_micerr_event {
u8 is_mcast;
} __packed;
+enum wmi_scan_status {
+ WMI_SCAN_STATUS_SUCCESS = 0,
+};
+
/* WMI_SCAN_COMPLETE_EVENTID */
struct wmi_scan_complete_event {
a_sle32 status;
@@ -1635,6 +1742,12 @@ struct wmi_bss_roam_info {
u8 reserved;
} __packed;
+struct wmi_target_roam_tbl {
+ __le16 roam_mode;
+ __le16 num_entries;
+ struct wmi_bss_roam_info info[];
+} __packed;
+
/* WMI_CAC_EVENTID */
enum cac_indication {
CAC_INDICATION_ADMISSION = 0x00,
@@ -1771,7 +1884,6 @@ struct wmi_set_appie_cmd {
#define WSC_REG_ACTIVE 1
#define WSC_REG_INACTIVE 0
-#define WOW_MAX_FILTER_LISTS 1
#define WOW_MAX_FILTERS_PER_LIST 4
#define WOW_PATTERN_SIZE 64
#define WOW_MASK_SIZE 64
@@ -1794,17 +1906,52 @@ struct wmi_set_ip_cmd {
__le32 ips[MAX_IP_ADDRS];
} __packed;
-/* WMI_GET_WOW_LIST_CMD reply */
-struct wmi_get_wow_list_reply {
- /* number of patterns in reply */
- u8 num_filters;
+enum ath6kl_wow_filters {
+ WOW_FILTER_SSID = BIT(1),
+ WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2),
+ WOW_FILTER_OPTION_EAP_REQ = BIT(3),
+ WOW_FILTER_OPTION_PATTERNS = BIT(4),
+ WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5),
+ WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6),
+ WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7),
+ WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8),
+ WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9),
+ WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10),
+ WOW_FILTER_OPTION_GTK_ERROR = BIT(11),
+ WOW_FILTER_OPTION_TEST_MODE = BIT(15),
+};
+
+enum ath6kl_host_mode {
+ ATH6KL_HOST_MODE_AWAKE,
+ ATH6KL_HOST_MODE_ASLEEP,
+};
+
+struct wmi_set_host_sleep_mode_cmd {
+ __le32 awake;
+ __le32 asleep;
+} __packed;
+
+enum ath6kl_wow_mode {
+ ATH6KL_WOW_MODE_DISABLE,
+ ATH6KL_WOW_MODE_ENABLE,
+};
+
+struct wmi_set_wow_mode_cmd {
+ __le32 enable_wow;
+ __le32 filter;
+ __le16 host_req_delay;
+} __packed;
- /* this is filter # x of total num_filters */
- u8 this_filter_num;
+struct wmi_add_wow_pattern_cmd {
+ u8 filter_list_id;
+ u8 filter_size;
+ u8 filter_offset;
+ u8 filter[0];
+} __packed;
- u8 wow_mode;
- u8 host_mode;
- struct wow_filter wow_filters[1];
+struct wmi_del_wow_pattern_cmd {
+ __le16 filter_list_id;
+ __le16 filter_id;
} __packed;
/* WMI_SET_AKMP_PARAMS_CMD */
@@ -1905,7 +2052,7 @@ struct wmi_tx_complete_event {
* !!! Warning !!!
* -Changing the following values needs compilation of both driver and firmware
*/
-#define AP_MAX_NUM_STA 8
+#define AP_MAX_NUM_STA 10
/* Spl. AID used to set DTIM flag in the beacons */
#define MCAST_AID 0xFF
@@ -1988,6 +2135,10 @@ struct wmi_remain_on_chnl_cmd {
__le32 duration;
} __packed;
+/* wmi_send_action_cmd is to be deprecated. Use
+ * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
struct wmi_send_action_cmd {
__le32 id;
__le32 freq;
@@ -1996,6 +2147,15 @@ struct wmi_send_action_cmd {
u8 data[0];
} __packed;
+struct wmi_send_mgmt_cmd {
+ __le32 id;
+ __le32 freq;
+ __le32 wait;
+ __le32 no_cck;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
struct wmi_tx_status_event {
__le32 id;
u8 ack_status;
@@ -2163,120 +2323,162 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type,
- u8 meta_ver, void *tx_meta_info);
+ u8 meta_ver, void *tx_meta_info, u8 if_idx);
int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
- u32 layer2_priority, bool wmm_enabled,
- u8 *ac);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb, u32 layer2_priority,
+ bool wmm_enabled, u8 *ac);
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags);
-
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype);
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list);
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck,
+ u32 *rates);
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio,
u8 scan_ctrl_flag, u32 max_dfsch_act_time,
u16 maxact_scan_per_ssid);
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
+ u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid);
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
u16 listen_beacons);
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy);
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
struct wmi_create_pstream_cmd *pstream);
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
u8 preamble_policy);
int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type,
u8 key_usage, u8 key_len,
- u8 *key_rsc, u8 *key_material,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set);
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
s32 ath6kl_wmi_get_rate(s8 rate_index);
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay);
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, u8 *filter, u8 *mask);
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
/* AP mode */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p);
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p);
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason);
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
+ const u8 *mac, u16 reason);
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_version,
bool rx_dot11_hdr, bool defrag_on_host);
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
/* P2P */
int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur);
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ u32 dur);
+
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len);
+
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck);
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
- const u8 *data, u16 data_len);
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len);
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
- const u8 *dst,
- const u8 *data, u16 data_len);
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable);
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags);
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len);
+void ath6kl_wmi_sscan_timer(unsigned long ptr);
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
void *ath6kl_wmi_init(struct ath6kl *devt);
void ath6kl_wmi_shutdown(struct wmi *wmi);
+void ath6kl_wmi_reset(struct wmi *wmi);
#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9c08c619a3a..dc6be4afe8eb 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,9 @@ config ATH9K_HW
tristate
config ATH9K_COMMON
tristate
+config ATH9K_DFS_DEBUGFS
+ def_bool y
+ depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
config ATH9K
tristate "Atheros 802.11n wireless cards support"
@@ -25,6 +28,7 @@ config ATH9K
config ATH9K_PCI
bool "Atheros ath9k PCI/PCIe bus support"
+ default y
depends on ATH9K && PCI
---help---
This option enables the PCI bus support in ath9k.
@@ -50,6 +54,25 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH9K && EXPERT
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath9k. There is no way to dynamically detect if a card was DFS
+ certified and as such this is left as a build time option. This
+ option should only be enabled by system integrators that can
+ guarantee that all the platforms that their kernel will run on
+ have obtained appropriate regulatory body certification for a
+ respective Atheros card by using ath9k on the target shipping
+ platforms.
+
+ This is currently only a placeholder for future DFS support,
+ as DFS support requires more components that still need to be
+ developed. At this point enabling this option won't do anything
+ except increase code size.
+
config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
@@ -58,6 +81,14 @@ config ATH9K_RATE_CONTROL
Say Y, if you want to use the ath9k specific rate control
module instead of minstrel_ht.
+config ATH9K_BTCOEX_SUPPORT
+ bool "Atheros ath9k bluetooth coexistence support"
+ depends on ATH9K
+ default y
+ ---help---
+ Say Y, if you want to use the ath9k radios together with
+ Bluetooth modules in the same system.
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 36ed3c46fec6..da02242499af 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,11 +4,14 @@ ath9k-y += beacon.o \
main.o \
recv.o \
xmit.o \
+ mci.o \
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
obj-$(CONFIG_ATH9K) += ath9k.o
@@ -33,7 +36,8 @@ ath9k_hw-y:= \
ar9002_mac.o \
ar9003_mac.o \
ar9003_eeprom.o \
- ar9003_paprd.o
+ ar9003_paprd.o \
+ ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index a639b94f7643..bc56f57b393b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -136,8 +136,8 @@ static void ath9k_ani_restart(struct ath_hw *ah)
cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
}
- ath_dbg(common, ATH_DBG_ANI,
- "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
+ ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
+ ofdm_base, cck_base);
ENABLE_REGWRITE_BUFFER(ah);
@@ -268,8 +268,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_dbg(common, ATH_DBG_ANI,
- "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
immunityLevel, aniState->noiseFloor,
aniState->rssiThrLow, aniState->rssiThrHigh);
@@ -336,8 +335,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
const struct ani_cck_level_entry *entry_cck;
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_dbg(common, ATH_DBG_ANI,
- "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
aniState->noiseFloor, aniState->rssiThrLow,
aniState->rssiThrHigh);
@@ -481,8 +479,7 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
if (ah->opmode != NL80211_IFTYPE_STATION
&& ah->opmode != NL80211_IFTYPE_ADHOC) {
- ath_dbg(common, ATH_DBG_ANI,
- "Reset ANI state opmode %u\n", ah->opmode);
+ ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
ah->stats.ast_ani_reset++;
if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -582,7 +579,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ATH9K_ANI_OFDM_DEF_LEVEL ||
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
@@ -599,7 +596,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
/*
* restore historical levels for this channel
*/
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
@@ -662,7 +659,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
if (phyCnt1 < ofdm_base) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"phyCnt1 0x%x, resetting counter value to 0x%x\n",
phyCnt1, ofdm_base);
REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
@@ -670,7 +667,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
AR_PHY_ERR_OFDM_TIMING);
}
if (phyCnt2 < cck_base) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"phyCnt2 0x%x, resetting counter value to 0x%x\n",
phyCnt2, cck_base);
REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
@@ -713,7 +710,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
aniState->listenTime;
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
aniState->listenTime,
aniState->ofdmNoiseImmunityLevel,
@@ -748,7 +745,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n");
+ ath_dbg(common, ANI, "Enable MIB counters\n");
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -770,7 +767,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n");
+ ath_dbg(common, ANI, "Disable MIB counters\n");
REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -845,7 +842,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int i;
- ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n");
+ ath_dbg(common, ANI, "Initialize ANI\n");
if (use_new_ani(ah)) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index f199e9e25149..f901a17f76ba 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -158,7 +158,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
/* pre-reverse this field */
tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
- ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
new_bias, synth_freq);
/* swizzle rf_pwd_icsyndiv */
@@ -1053,8 +1053,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(ah->totalSizeDesired));
return false;
}
@@ -1157,8 +1156,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep));
return false;
}
@@ -1177,8 +1175,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1));
return false;
}
@@ -1195,23 +1192,22 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI, "ANI parameters:\n");
+ ath_dbg(common, ANI,
"noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
aniState->noiseImmunityLevel,
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
aniState->cckWeakSigThreshold,
aniState->firstepLevel,
aniState->listenTime);
- ath_dbg(common, ATH_DBG_ANI,
- "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1295,7 +1291,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
@@ -1313,7 +1309,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
@@ -1350,7 +1346,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -1358,7 +1354,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -1378,7 +1374,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
@@ -1414,7 +1410,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1422,7 +1418,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1448,11 +1444,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1506,7 +1502,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 88279e325dca..c55e5bbafc46 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -61,18 +61,16 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah,
switch (currCal->calData->calType) {
case IQ_MISMATCH_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting IQ Mismatch Calibration\n");
break;
case ADC_GAIN_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
+ ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n");
break;
case ADC_DC_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
+ ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n");
break;
}
@@ -129,7 +127,7 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
ah->cal_samples, i, ah->totalPowerMeasI[i],
ah->totalPowerMeasQ[i],
@@ -151,7 +149,7 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
ah->totalAdcQEvenPhase[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
ah->cal_samples, i,
ah->totalAdcIOddPhase[i],
@@ -175,7 +173,7 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
ah->totalAdcDcOffsetQEvenPhase[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
ah->cal_samples, i,
ah->totalAdcDcOffsetIOddPhase[i],
@@ -198,12 +196,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting IQ Cal and Correction for Chain %d\n",
i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -213,12 +211,11 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
qCoffDenom = powerMeasQ / 64;
@@ -227,13 +224,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
(qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
iCoff = iCoff & 0x3f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"New: Chn %d iCoff = 0x%08x\n", i, iCoff);
if (iqCorrNeg == 0x0)
iCoff = 0x40 - iCoff;
@@ -243,7 +240,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
else if (qCoff <= -16)
qCoff = -16;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
i, iCoff, qCoff);
@@ -253,7 +250,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction done for Chain %d\n",
i);
}
@@ -275,21 +272,17 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcQOddPhase[i];
qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting ADC Gain Cal for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n",
+ i, qEvenMeasOffset);
if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
iGainMismatch =
@@ -299,19 +292,19 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
((qOddMeasOffset * 32) /
qEvenMeasOffset) & 0x3f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n",
+ i, iGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n",
+ i, qGainMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xfffff000;
val |= (qGainMismatch) | (iGainMismatch << 6);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"ADC Gain Cal done for Chain %d\n", i);
}
}
@@ -337,40 +330,36 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting ADC DC Offset Cal for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n",
+ i, qEvenMeasOffset);
iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
numSamples) & 0x1ff;
qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
numSamples) & 0x1ff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n",
+ i, iDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n",
+ i, qDcMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xc0000fff;
val |= (qDcMismatch << 12) | (iDcMismatch << 21);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"ADC DC Offset Cal done for Chain %d\n", i);
}
@@ -560,7 +549,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
{ 0x7838, 0 },
};
- ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+ ath_dbg(common, CALIBRATE, "Running PA Calibration\n");
/* PA CAL is not needed for high power solution */
if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -741,7 +730,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -755,7 +744,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -851,7 +840,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -886,22 +875,21 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
INIT_CAL(&ah->adcgain_caldata);
INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC Gain Calibration\n");
}
if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
INIT_CAL(&ah->adcdc_caldata);
INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC DC Calibration\n");
}
if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
}
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index b5920168606d..7b6417b5212e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -107,7 +107,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (isr & AR_ISR_RXORN) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"receive FIFO overrun interrupt\n");
}
@@ -143,24 +143,24 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if (fatal_int) {
if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"received PCI FATAL interrupt\n");
}
if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"received PCI PERR interrupt\n");
}
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
REG_WRITE(ah, AR_RC, 0);
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 12a730dcb500..8e70f0bc073e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,7 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
+#include "ar9003_mci.h"
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
@@ -51,7 +52,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
currCal->calData->calCountMax);
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting IQ Mismatch Calibration\n");
/* Kick-off cal */
@@ -63,7 +64,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
AR_PHY_65NM_CH0_THERM_START, 1);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting Temperature Compensation Calibration\n");
break;
}
@@ -193,7 +194,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
ah->cal_samples, i, ah->totalPowerMeasI[i],
ah->totalPowerMeasQ[i],
@@ -220,12 +221,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
+ ath_dbg(common, CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -235,12 +235,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
qCoffDenom = powerMeasQ / 64;
@@ -248,10 +247,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
/* Force bounds on iCoff */
if (iCoff >= 63)
@@ -272,10 +271,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iCoff = iCoff & 0x7f;
qCoff = qCoff & 0x7f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
i, iCoff, qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) before update = 0x%x\n",
offset_array[i],
REG_READ(ah, offset_array[i]));
@@ -286,25 +285,25 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
REG_READ(ah, offset_array[i]));
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
REG_READ(ah, offset_array[i]));
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction done for Chain %d\n", i);
}
}
REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
(unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
@@ -348,7 +347,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
f2 = (f1 * f1 + f3 * f3) / result_shift;
if (!f2) {
- ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ ath_dbg(common, CALIBRATE, "Divide by 0\n");
return false;
}
@@ -469,7 +468,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
(i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0:\n"
"a0_d0=%d\n"
"a0_d1=%d\n"
@@ -509,8 +508,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
if ((mag1 == 0) || (mag2 == 0)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Divide by 0: mag1=%d, mag2=%d\n",
+ ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n",
mag1, mag2);
return false;
}
@@ -528,8 +526,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_a0_d0, phs_a0_d0,
mag_a1_d0,
phs_a1_d0, solved_eq)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ ath_dbg(common, CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed\n");
return false;
}
@@ -538,12 +536,12 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_rx = solved_eq[2];
phs_rx = solved_eq[3];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"chain %d: mag mismatch=%d phase mismatch=%d\n",
chain_idx, mag_tx/res_scale, phs_tx/res_scale);
if (res_scale == mag_tx) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0: mag_tx=%d, res_scale=%d\n",
mag_tx, res_scale);
return false;
@@ -556,8 +554,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_tx * 128 / res_scale);
q_i_coff = (phs_corr_tx * 256 / res_scale);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "tx chain %d: mag corr=%d phase corr=%d\n",
+ ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n",
chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
@@ -571,12 +568,11 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "tx chain %d: iq corr coeff=%x\n",
+ ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
if (-mag_rx == res_scale) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0: mag_rx=%d, res_scale=%d\n",
mag_rx, res_scale);
return false;
@@ -589,8 +585,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_rx * 128 / res_scale);
q_i_coff = (phs_corr_rx * 256 / res_scale);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "rx chain %d: mag corr=%d phase corr=%d\n",
+ ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n",
chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
@@ -604,8 +599,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "rx chain %d: iq corr coeff=%x\n",
+ ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
return true;
@@ -752,8 +746,7 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
AR_PHY_TX_IQCAL_START_DO_CAL, 0,
AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal is not completed.\n");
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
return false;
}
return true;
@@ -791,13 +784,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
nmeasurement = MAX_MEASUREMENT;
for (im = 0; im < nmeasurement; im++) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Doing Tx IQ Cal for chain %d.\n", i);
+ ath_dbg(common, CALIBRATE,
+ "Doing Tx IQ Cal for chain %d\n", i);
if (REG_READ(ah, txiqcal_status[i]) &
AR_PHY_TX_IQCAL_STATUS_FAILED) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal failed for chain %d.\n", i);
+ ath_dbg(common, CALIBRATE,
+ "Tx IQ Cal failed for chain %d\n", i);
goto tx_iqcal_fail;
}
@@ -823,18 +816,16 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
iq_res[idx + 1] = 0xffff & REG_READ(ah,
chan_info_tab[i] + offset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "IQ RES[%d]=0x%x"
- "IQ_RES[%d]=0x%x\n",
+ ath_dbg(common, CALIBRATE,
+ "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
idx, iq_res[idx], idx + 1,
iq_res[idx + 1]);
}
if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
coeff.iqc_coeff)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Failed in calculation of \
- IQ correction.\n");
+ ath_dbg(common, CALIBRATE,
+ "Failed in calculation of IQ correction\n");
goto tx_iqcal_fail;
}
@@ -854,7 +845,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
return;
tx_iqcal_fail:
- ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n");
return;
}
@@ -934,10 +925,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
bool txiqcal_done = false, txclcal_done = false;
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -950,7 +943,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (!ar9003_hw_rtt_restore(ah, chan))
run_rtt_cal = true;
- ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n",
+ ath_dbg(common, CALIBRATE, "RTT restore %s\n",
run_rtt_cal ? "failed" : "succeed");
}
run_agc_cal = run_rtt_cal;
@@ -1005,6 +998,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ /* send CAL_REQ only when BT is AWAKE. */
+ ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+ mci_hw->wlan_cal_seq);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+ /* Wait BT_CAL_GRANT for 50ms */
+ ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n");
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+ ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n");
+ else {
+ is_reusable = false;
+ ath_dbg(common, MCI, "\nMCI BT is not responding\n");
+ }
+ }
+
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
@@ -1022,6 +1040,21 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
}
+
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+ mci_hw->wlan_cal_done);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+ }
+
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
@@ -1031,9 +1064,8 @@ skip_tx_iqcal:
if (run_rtt_cal)
ar9003_hw_rtt_disable(ah);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to complete in 1ms;"
- "noisy environment?\n");
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -1092,15 +1124,14 @@ skip_tx_iqcal:
if (ah->supp_cals & IQ_MISMATCH_CAL) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
}
if (ah->supp_cals & TEMP_COMP_CAL) {
INIT_CAL(&ah->tempCompCalData);
INSERT_CAL(ah, &ah->tempCompCalData);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling Temperature Compensation Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling Temperature Compensation Calibration\n");
}
/* Initialize current pointer to first element in list */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 3b262ba6b172..9fbcbddea165 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -121,10 +121,8 @@ static const struct ar9300_eeprom ar9300_default = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -144,7 +142,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -323,10 +321,8 @@ static const struct ar9300_eeprom ar9300_default = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -698,10 +694,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -721,7 +715,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -900,10 +894,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
.spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0xf,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1276,10 +1268,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1291,20 +1281,20 @@ static const struct ar9300_eeprom ar9300_h112 = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .papdRateMaskHt20 = LE32(0x80c080),
- .papdRateMaskHt40 = LE32(0x80c080),
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
- FREQ2FBIN(2472, 1),
+ FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@@ -1314,7 +1304,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.calTarget_freqbin_Cck = {
FREQ2FBIN(2412, 1),
- FREQ2FBIN(2484, 1),
+ FREQ2FBIN(2472, 1),
},
.calTarget_freqbin_2G = {
FREQ2FBIN(2412, 1),
@@ -1478,10 +1468,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1515,7 +1503,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
FREQ2FBIN(5500, 0),
FREQ2FBIN(5600, 0),
FREQ2FBIN(5700, 0),
- FREQ2FBIN(5825, 0)
+ FREQ2FBIN(5785, 0)
},
.calPierData5G = {
{
@@ -1854,10 +1842,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1877,7 +1863,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -2056,10 +2042,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshch check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2431,10 +2415,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2454,12 +2436,12 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
- FREQ2FBIN(2472, 1),
+ FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@@ -2633,10 +2615,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2663,7 +2643,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.xatten1MarginHigh = {0, 0, 0}
},
.calFreqPier5G = {
- FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5160, 0),
FREQ2FBIN(5220, 0),
FREQ2FBIN(5320, 0),
FREQ2FBIN(5400, 0),
@@ -3023,6 +3003,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
return eep->modalHeader2G.antennaGain;
+ case EEP_QUICK_DROP:
+ return pBase->miscConfiguration & BIT(1);
default:
return 0;
}
@@ -3061,8 +3043,7 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
int i;
if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "eeprom address not in range\n");
+ ath_dbg(common, EEPROM, "eeprom address not in range\n");
return false;
}
@@ -3093,8 +3074,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
return true;
error:
- ath_dbg(common, ATH_DBG_EEPROM,
- "unable to read eeprom region at offset %d\n", address);
+ ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n",
+ address);
return false;
}
@@ -3178,13 +3159,13 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length &= 0xff;
if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Restore at %d: spot=%d offset=%d length=%d\n",
it, spot, offset, length);
memcpy(&mptr[spot], &block[it+2], length);
spot += length;
} else if (length > 0) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Bad restore at %d: spot=%d offset=%d length=%d\n",
it, spot, offset, length);
return false;
@@ -3206,13 +3187,13 @@ static int ar9300_compress_decision(struct ath_hw *ah,
switch (code) {
case _CompressNone:
if (length != mdata_size) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM structure size mismatch memory=%d eeprom=%d\n",
mdata_size, length);
return -1;
}
memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"restored eeprom %d: uncompressed, length %d\n",
it, length);
break;
@@ -3221,22 +3202,21 @@ static int ar9300_compress_decision(struct ath_hw *ah,
} else {
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"can't find reference eeprom struct %d\n",
reference);
return -1;
}
memcpy(mptr, eep, mdata_size);
}
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"restore eeprom %d: block, reference %d, length %d\n",
it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
(u8 *) (word + COMP_HDR_LEN), length);
break;
default:
- ath_dbg(common, ATH_DBG_EEPROM,
- "unknown compression code %d\n", code);
+ ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
return -1;
}
return 0;
@@ -3312,34 +3292,32 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
cptr = AR9300_BASE_ADDR_512;
else
cptr = AR9300_BASE_ADDR;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying EEPROM access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
cptr = AR9300_BASE_ADDR_512;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying EEPROM access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
read = ar9300_read_otp;
cptr = AR9300_BASE_ADDR;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying OTP access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
cptr = AR9300_BASE_ADDR_512;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying OTP access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
goto fail;
found:
- ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n");
+ ath_dbg(common, EEPROM, "Found valid EEPROM data\n");
for (it = 0; it < MSTATE; it++) {
if (!read(ah, cptr, word, COMP_HDR_LEN))
@@ -3350,13 +3328,12 @@ found:
ar9300_comp_hdr_unpack(word, &code, &reference,
&length, &major, &minor);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
cptr, code, reference, length, major, minor);
if ((!AR_SREV_9485(ah) && length >= 1024) ||
(AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Skipping bad header\n");
+ ath_dbg(common, EEPROM, "Skipping bad header\n");
cptr -= COMP_HDR_LEN;
continue;
}
@@ -3365,13 +3342,13 @@ found:
read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
- ath_dbg(common, ATH_DBG_EEPROM,
- "checksum %x %x\n", checksum, mchecksum);
+ ath_dbg(common, EEPROM, "checksum %x %x\n",
+ checksum, mchecksum);
if (checksum == mchecksum) {
ar9300_compress_decision(ah, it, code, reference, mptr,
word, length, mdata_size);
} else {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"skipping block with bad checksum\n");
}
cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
@@ -3428,25 +3405,14 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("Quick Drop", modal_hdr->quick_drop);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
PR_EEP("txClip", modal_hdr->txClip);
PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
- PR_EEP("Chain0 ob", modal_hdr->ob[0]);
- PR_EEP("Chain1 ob", modal_hdr->ob[1]);
- PR_EEP("Chain2 ob", modal_hdr->ob[2]);
-
- PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
- PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
- PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
- PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
- PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
- PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
- PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
- PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
- PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
return len;
}
@@ -3503,6 +3469,7 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+ PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
@@ -3571,13 +3538,13 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
+ __le16 val;
if (is_2ghz)
val = eep->modalHeader2G.switchcomspdt;
else
val = eep->modalHeader5G.switchcomspdt;
- return le32_to_cpu(val);
+ return le16_to_cpu(val);
}
@@ -3965,6 +3932,40 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
}
}
+static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+ s32 t[3], f[3] = {5180, 5500, 5785};
+
+ if (!quick_drop)
+ return;
+
+ if (freq < 4000)
+ quick_drop = eep->modalHeader2G.quick_drop;
+ else {
+ t[0] = eep->base_ext1.quick_drop_low;
+ t[1] = eep->modalHeader5G.quick_drop;
+ t[2] = eep->base_ext1.quick_drop_high;
+ quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+}
+
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u32 value;
+
+ value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
+ eep->modalHeader5G.txEndToXpaOff;
+
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -3972,10 +3973,12 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_drive_strength_apply(ah);
ar9003_hw_atten_apply(ah, chan);
+ ar9003_hw_quick_drop_apply(ah, chan->channel);
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
ar9003_hw_internal_regulator_apply(ah);
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4416,8 +4419,8 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
is2GHz) + ht40PowerIncForPdadc;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
}
}
@@ -4436,7 +4439,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
if (ichain >= AR9300_MAX_CHAINS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid chain index, must be less than %d\n",
AR9300_MAX_CHAINS);
return -1;
@@ -4444,7 +4447,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
if (mode) { /* 5GHz */
if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid 5GHz cal pier index, must be less than %d\n",
AR9300_NUM_5G_CAL_PIERS);
return -1;
@@ -4454,7 +4457,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
is2GHz = 0;
} else {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid 2GHz cal pier index, must be less than %d\n",
AR9300_NUM_2G_CAL_PIERS);
return -1;
@@ -4616,8 +4619,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* interpolate */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "ch=%d f=%d low=%d %d h=%d %d\n",
+ ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
ichain, frequency, lfrequency[ichain],
lcorrection[ichain], hfrequency[ichain],
hcorrection[ichain]);
@@ -4672,7 +4674,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
ar9003_hw_power_control_override(ah, frequency, correction, voltage,
temperature);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"for frequency=%d, calibration correction = %d %d %d\n",
frequency, correction[0], correction[1], correction[2]);
@@ -4771,7 +4773,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
@@ -4858,7 +4860,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
ctlMode, numCtlModes, isHt40CtlMode,
(pCtlMode[ctlMode] & EXT_ADDITIVE));
@@ -4872,8 +4874,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
ctlNum = AR9300_NUM_CTLS_5G;
}
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
chan->channel);
@@ -4915,7 +4918,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
scaledPower, minCtlPower);
@@ -5039,7 +5042,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
target_power_val_t2_eep[i]) >
paprd_scale_factor)) {
ah->paprd_ratemask &= ~(1 << i);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"paprd disabled for mcs %d\n", i);
}
}
@@ -5051,12 +5054,14 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
regulatory->max_power_level = targetPowerValT2[i];
}
+ ath9k_hw_update_regulatory_maxpower(ah);
+
if (test)
return;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
}
ah->txpower_limit = regulatory->max_power_level;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 6335a867527e..bb223fe82816 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -216,10 +216,8 @@ struct ar9300_modal_eep_header {
u8 spurChans[AR_EEPROM_MODAL_SPURS];
/* 3 Check if the register is per chain */
int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
- u8 ob[AR9300_MAX_CHAINS];
- u8 db_stage2[AR9300_MAX_CHAINS];
- u8 db_stage3[AR9300_MAX_CHAINS];
- u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 reserved[11];
+ int8_t quick_drop;
u8 xpaBiasLvl;
u8 txFrameToDataStart;
u8 txFrameToPaOn;
@@ -269,7 +267,9 @@ struct cal_ctl_data_5g {
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[13];
+ u8 future[11];
+ int8_t quick_drop_low;
+ int8_t quick_drop_high;
} __packed;
struct ar9300_BaseExtension_2 {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index ccde784a842f..09b8c9dbf78f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 isr = 0;
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 sync_cause = 0, async_cause;
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+ if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
+
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
- if (!isr && !sync_cause)
+ if (!isr && !sync_cause && !async_cause)
return false;
if (isr) {
@@ -294,6 +298,33 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_hw_bb_watchdog_read(ah);
}
+ if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+ u32 raw_intr, rx_msg_intr;
+
+ rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+ if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+ ath_dbg(common, MCI,
+ "MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+ raw_intr, rx_msg_intr, mci->raw_intr,
+ mci->rx_msg_intr);
+ else {
+ mci->rx_msg_intr |= rx_msg_intr;
+ mci->raw_intr |= raw_intr;
+ *masked |= ATH9K_INT_MCI;
+
+ if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+ mci->cont_status =
+ REG_READ(ah, AR_MCI_CONT_STATUS);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+ ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n");
+
+ }
+ }
+
if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -302,7 +333,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -333,7 +364,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(ah), XMIT,
"Tx Descriptor error %x\n", ads->ds_info);
memset(ads, 0, sizeof(*ads));
return -EIO;
@@ -526,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
- if (rxsp->status11 & AR_KeyMiss)
- rxs->rs_status |= ATH9K_RXERR_KEYMISS;
}
+ if (rxsp->status11 & AR_KeyMiss)
+ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
@@ -541,7 +573,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
memset((void *) ah->ts_ring, 0,
ah->ts_size * sizeof(struct ar9003_txs));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(ah), XMIT,
"TS Start 0x%x End 0x%x Virt %p, Size %d\n",
ah->ts_paddr_start, ah->ts_paddr_end,
ah->ts_ring, ah->ts_size);
@@ -552,7 +584,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
- u8 size)
+ u16 size)
{
ah->ts_paddr_start = ts_paddr_start;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index c50449387bf1..e203b51e968b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -118,5 +118,5 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
- u8 size);
+ u16 size);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 000000000000..709520c6835b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+ if (!AR_SREV_9462_20(ah))
+ return;
+
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+ udelay(1);
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+ u32 bit_position, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ while (time_out) {
+
+ if (REG_READ(ah, address) & bit_position) {
+
+ REG_WRITE(ah, address, bit_position);
+
+ if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+ if (bit_position &
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position &
+ (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_RX_MSG);
+ }
+ break;
+ }
+
+ udelay(10);
+ time_out -= 10;
+
+ if (time_out < 0)
+ break;
+ }
+
+ if (time_out <= 0) {
+ ath_dbg(common, MCI,
+ "MCI Wait for Reg 0x%08x = 0x%08x timeout\n",
+ address, bit_position);
+ ath_dbg(common, MCI,
+ "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n",
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ time_out = 0;
+ }
+
+ return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+ wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x00000000;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x70000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+ MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!mci->bt_version_known &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ ath_dbg(common, MCI, "MCI Send Coex version query\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ }
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI Send Coex version response\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_RESPONSE);
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+ mci->wlan_ver_major;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+ mci->wlan_ver_minor;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload = &mci->wlan_channels[0];
+
+ if ((mci->wlan_channels_update == true) &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+ }
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+ bool wait_done, u8 query_type)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+ bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n",
+ query_type);
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
+ if (query_btinfo) {
+ mci->need_flush_btinfo = true;
+
+ ath_dbg(common, MCI,
+ "MCI send bt_status_query fail, set flush flag again\n");
+ }
+ }
+
+ if (query_btinfo)
+ mci->query_bt = false;
+ }
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n",
+ (halt) ? "halt" : "unhalt");
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+ if (halt) {
+ mci->query_bt = true;
+ /* Send next unhalt no matter halt sent or not */
+ mci->unhalt_bt_gpm = true;
+ mci->need_flush_btinfo = true;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_HALT;
+ } else
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_UNHALT;
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 saved_mci_int_en;
+ u32 mci_timeout = 150;
+
+ mci->bt_state = MCI_BT_SLEEP;
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+ /* Remote Reset */
+ ath_dbg(common, MCI, "MCI Reset sequence start\n");
+ ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+
+ /*
+ * This delay is required for the reset delay worst case value 255 in
+ * MCI_COMMAND2 register
+ */
+
+ if (AR_SREV_9462_10(ah))
+ udelay(252);
+
+ ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+ ar9003_mci_send_req_wake(ah, true);
+
+ if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+ ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n");
+ mci->bt_state = MCI_BT_AWAKE;
+
+ if (AR_SREV_9462_10(ah))
+ udelay(10);
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+
+ /* Send SYS_WAKING to BT */
+
+ ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n");
+
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
+
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_BT_PRI);
+
+ if (AR_SREV_9462_10(ah) || mci->is_2g) {
+ /* Send LNA_TRANS */
+ ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
+
+ if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+ !mci->update_2g5g)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, MCI,
+ "MCI WLAN has control over the LNA & BT obeys it\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT didn't respond to LNA_TRANS\n");
+ }
+
+ if (AR_SREV_9462_10(ah)) {
+ /* Send another remote_reset to deassert BT clk_req. */
+ ath_dbg(common, MCI,
+ "MCI another remote_reset to deassert clk_req\n");
+ ar9003_mci_remote_reset(ah, true);
+ udelay(252);
+ }
+ }
+
+ /* Clear the extra redundant SYS_WAKING from BT */
+ if ((mci->bt_state == MCI_BT_AWAKE) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+ }
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+ u32 intr;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return false;
+
+ intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ *raw_intr = mci->raw_intr;
+ *rx_msg_intr = mci->rx_msg_intr;
+
+ /* Clean int bits after the values are read. */
+ mci->raw_intr = 0;
+ mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ if (!mci->update_2g5g &&
+ (mci->is_2g != is_2g))
+ mci->update_2g5g = true;
+
+ mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload;
+ u32 recv_type, offset;
+
+ if (msg_index == MCI_GPM_INVALID)
+ return false;
+
+ offset = msg_index << 4;
+
+ payload = (u32 *)(mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(payload);
+
+ if (recv_type == MCI_GPM_RSVD_PATTERN) {
+ ath_dbg(common, MCI, "MCI Skip RSVD GPM\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+ ath9k_hw_cfg_output(ah, 3,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else
+ return;
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_DS_JTAG_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_WLAN_UART_INTF_EN, 0);
+ REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+ ATH_MCI_CONFIG_MCI_OBS_GPIO);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+ REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+ u8 opcode, u32 bt_flags)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 pld[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(pld,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+ *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+ opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" :
+ opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR",
+ bt_flags);
+
+ return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+ wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 regval, thresh;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n",
+ is_full_sleep, is_2g);
+
+ /*
+ * GPM buffer and scheduling message buffer are not allocated
+ */
+
+ if (!mci->gpm_addr && !mci->sched_addr) {
+ ath_dbg(common, MCI,
+ "MCI GPM and schedule buffers are not allocated\n");
+ return;
+ }
+
+ if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+ ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n");
+ return;
+ }
+
+ /* Program MCI DMA related registers */
+ REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+ REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+ REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+ /*
+ * To avoid MCI state machine be affected by incoming remote MCI msgs,
+ * MCI mode will be enabled later, right before reset the MCI TX and RX.
+ */
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ if (is_2g && (AR_SREV_9462_20(ah)) &&
+ !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+ regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ ath_dbg(common, MCI, "MCI sched one step look ahead\n");
+
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+ thresh = MS(mci->config,
+ ATH_MCI_CONFIG_AGGR_THRESH);
+ thresh &= 7;
+ regval |= SM(1,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+ regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ } else
+ ath_dbg(common, MCI, "MCI sched aggr thresh: off\n");
+ } else
+ ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n");
+
+ if (AR_SREV_9462_10(ah))
+ regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_SPDT_ENABLE);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+ AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+ REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+ thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+ REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+ /* Resetting the Rx and Tx paths of MCI */
+ regval = REG_READ(ah, AR_MCI_COMMAND2);
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ udelay(1);
+
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ if (is_full_sleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(100);
+ }
+
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+ udelay(1);
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+ (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+ SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ ar9003_mci_observation_set_up(ah);
+
+ mci->ready = true;
+ ar9003_mci_prep_interface(ah);
+
+ if (en_int)
+ ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /* disable all MCI messages */
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ /* wait pending HW messages to flush out */
+ udelay(10);
+
+ /*
+ * Send LNA_TAKE and SYS_SLEEPING when
+ * 1. reset not after resuming from full sleep
+ * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+ */
+
+ ath_dbg(common, MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+
+ udelay(5);
+
+ ath_dbg(common, MCI, "MCI Send sys sleeping\n");
+ ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 cur_bt_state;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+ if (mci->bt_state != cur_bt_state) {
+ ath_dbg(common, MCI,
+ "MCI BT state mismatches. old: %d, new: %d\n",
+ mci->bt_state, cur_bt_state);
+ mci->bt_state = cur_bt_state;
+ }
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm == true) {
+ ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+ }
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 new_flags, to_set, to_clear;
+
+ if (AR_SREV_9462_20(ah) &&
+ mci->update_2g5g &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
+
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+ mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+ }
+
+ if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+ mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+ u32 *payload, bool queue)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 type, opcode;
+
+ if (queue) {
+
+ if (payload)
+ ath_dbg(common, MCI,
+ "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+ header,
+ *(((u8 *)payload) + 4),
+ *(((u8 *)payload) + 5),
+ *(((u8 *)payload) + 6));
+ else
+ ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n",
+ header);
+ }
+
+ /* check if the message is to be queued */
+ if (header != MCI_GPM)
+ return;
+
+ type = MCI_GPM_TYPE(payload);
+ opcode = MCI_GPM_OPCODE(payload);
+
+ if (type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (opcode) {
+ case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+ if (AR_SREV_9462_10(ah))
+ break;
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+ MCI_GPM_COEX_BT_FLAGS_READ)
+ break;
+
+ mci->update_2g5g = queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <queued> %s\n",
+ mci->is_2g ? "2G" : "5G");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <sent> %s\n",
+ mci->is_2g ? "2G" : "5G");
+
+ break;
+
+ case MCI_GPM_COEX_WLAN_CHANNELS:
+
+ mci->wlan_channels_update = queue;
+ if (queue)
+ ath_dbg(common, MCI, "MCI WLAN channel map <queued>\n");
+ else
+ ath_dbg(common, MCI, "MCI WLAN channel map <sent>\n");
+ break;
+
+ case MCI_GPM_COEX_HALT_BT_GPM:
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+ mci->unhalt_bt_gpm = queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <queued>\n");
+ else {
+ mci->halted_bt_gpm = false;
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+ }
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_HALT) {
+
+ mci->halted_bt_gpm = !queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI HALT BT GPM <not sent>\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ if (mci->update_2g5g) {
+ if (mci->is_2g) {
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ ath_dbg(common, MCI, "MCI Send LNA trans\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_OSLA)) {
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+ }
+ } else {
+ ath_dbg(common, MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ }
+ }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ bool msg_sent = false;
+ u32 regval;
+ u32 saved_mci_int_en;
+ int i;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return false;
+
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+ regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+ ath_dbg(common, MCI,
+ "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
+ header,
+ (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+
+ } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+ ath_dbg(common, MCI,
+ "MCI Don't send message 0x%x. BT is in sleep state\n",
+ header);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+ /* Need to clear SW_MSG_DONE raw bit before wait */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ (AR_MCI_INTERRUPT_SW_MSG_DONE |
+ AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+ if (payload) {
+ for (i = 0; (i * 4) < len; i++)
+ REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+ *(payload + i));
+ }
+
+ REG_WRITE(ah, AR_MCI_COMMAND0,
+ (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+ AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+ SM(len, AR_MCI_COMMAND0_LEN) |
+ SM(header, AR_MCI_COMMAND0_HEADER)));
+
+ if (wait_done &&
+ !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ else {
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+ msg_sent = true;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+ return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ mci->gpm_addr = gpm_addr;
+ mci->gpm_buf = gpm_buf;
+ mci->gpm_len = len;
+ mci->sched_addr = sched_addr;
+ mci->sched_buf = sched_buf;
+
+ ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /* Turn off MCI and Jupiter mode. */
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+ ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n");
+ ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, u32 *p_gpm)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 *p_data = (u8 *) p_gpm;
+
+ if (gpm_type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (gpm_opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ mci->bt_ver_major =
+ *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+ mci->bt_ver_minor =
+ *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02X\n",
+ *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ mci->query_bt = true;
+ ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ mci->query_bt = true;
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
+ *(p_gpm + 3));
+ break;
+ default:
+ break;
+ }
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 offset;
+ u8 recv_type = 0, recv_opcode = 0;
+ bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+ while (time_out > 0) {
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (more_data != MCI_GPM_MORE)
+ time_out = ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM,
+ time_out);
+
+ if (!time_out)
+ break;
+
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ continue;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+ if (recv_type == gpm_type) {
+
+ if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+ !b_is_bt_cal_done) {
+ gpm_type = MCI_GPM_BT_CAL_GRANT;
+ ath_dbg(common, MCI,
+ "MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n");
+ continue;
+ }
+
+ break;
+ }
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
+ break;
+
+ /* not expected message */
+
+ /*
+ * check if it's cal_grant
+ *
+ * When we're waiting for cal_grant in reset routine,
+ * it's possible that BT sends out cal_request at the
+ * same time. Since BT's calibration doesn't happen
+ * that often, we'll let BT completes calibration then
+ * we continue to wait for cal_grant from BT.
+ * Orginal: Wait BT_CAL_GRANT.
+ * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+ * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+ */
+
+ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+ (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+ u32 payload[4] = {0, 0, 0, 0};
+
+ gpm_type = MCI_GPM_BT_CAL_DONE;
+ ath_dbg(common, MCI,
+ "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+ MCI_GPM_SET_CAL_TYPE(payload,
+ MCI_GPM_WLAN_CAL_GRANT);
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ false, false);
+
+ ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n");
+
+ continue;
+ } else {
+ ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
+ *(p_gpm + 1));
+ mismatch++;
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+ }
+ }
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (time_out <= 0) {
+ time_out = 0;
+ ath_dbg(common, MCI,
+ "MCI GPM received timeout, mismatch = %d\n", mismatch);
+ } else
+ ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n",
+ gpm_type, gpm_opcode);
+
+ while (more_data == MCI_GPM_MORE) {
+
+ ath_dbg(common, MCI, "MCI discard remaining GPM\n");
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+
+ MCI_GPM_RECYCLE(p_gpm);
+ }
+
+ return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 value = 0, more_gpm = 0, gpm_ptr;
+ u8 query_type;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ switch (state_type) {
+ case MCI_STATE_ENABLE:
+ if (mci->ready) {
+
+ value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((value == 0xdeadbeef) || (value == 0xffffffff))
+ value = 0;
+ }
+ value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+ break;
+ case MCI_STATE_INIT_GPM_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value);
+ mci->gpm_idx = value;
+ break;
+ case MCI_STATE_NEXT_GPM_OFFSET:
+ case MCI_STATE_LAST_GPM_OFFSET:
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ value = gpm_ptr;
+
+ if (value == 0)
+ value = mci->gpm_len - 1;
+ else if (value >= mci->gpm_len) {
+ if (value != 0xFFFF) {
+ value = 0;
+ ath_dbg(common, MCI,
+ "MCI GPM offset out of range\n");
+ }
+ } else
+ value--;
+
+ if (value == 0xFFFF) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ ath_dbg(common, MCI,
+ "MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+ if (gpm_ptr == mci->gpm_idx) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+
+ ath_dbg(common, MCI,
+ "MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else {
+ for (;;) {
+
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (value != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >=
+ mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ ath_dbg(common, MCI,
+ "MCI GPM message got ptr=%d, @offset=%d, more=%d\n",
+ gpm_ptr, temp_index,
+ (more_gpm == MCI_GPM_MORE));
+
+ if (ar9003_mci_is_gpm_valid(ah,
+ temp_index)) {
+ value = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ value = MCI_GPM_INVALID;
+ break;
+ }
+ }
+ }
+ if (p_data)
+ *p_data = more_gpm;
+ }
+
+ if (value != MCI_GPM_INVALID)
+ value <<= 4;
+
+ break;
+ case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+ /* Make it in bytes */
+ value <<= 4;
+ break;
+
+ case MCI_STATE_REMOTE_SLEEP:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_REMOTE_SLEEP) ?
+ MCI_BT_SLEEP : MCI_BT_AWAKE;
+ break;
+
+ case MCI_STATE_CONT_RSSI_POWER:
+ value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+ break;
+
+ case MCI_STATE_CONT_PRIORITY:
+ value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+ break;
+
+ case MCI_STATE_CONT_TXRX:
+ value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+ break;
+
+ case MCI_STATE_BT:
+ value = mci->bt_state;
+ break;
+
+ case MCI_STATE_SET_BT_SLEEP:
+ mci->bt_state = MCI_BT_SLEEP;
+ break;
+
+ case MCI_STATE_SET_BT_AWAKE:
+ mci->bt_state = MCI_BT_AWAKE;
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm) {
+
+ ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_CAL_START:
+ mci->bt_state = MCI_BT_CAL_START;
+ break;
+
+ case MCI_STATE_SET_BT_CAL:
+ mci->bt_state = MCI_BT_CAL;
+ break;
+
+ case MCI_STATE_RESET_REQ_WAKE:
+ ar9003_mci_reset_req_wakeup(ah);
+ mci->update_2g5g = true;
+
+ if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+ (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+ /* Check if we still have control of the GPIOs */
+ if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+ ath_dbg(common, MCI,
+ "MCI reconfigure observation\n");
+ ar9003_mci_observation_set_up(ah);
+ }
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_COEX_VERSION:
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_COEX_VERSION:
+
+ if (!p_data)
+ ath_dbg(common, MCI,
+ "MCI Set BT Coex version with NULL data!!\n");
+ else {
+ mci->bt_ver_major = (*p_data >> 8) & 0xff;
+ mci->bt_ver_minor = (*p_data) & 0xff;
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_CHANNELS:
+ if (p_data) {
+ if (((mci->wlan_channels[1] & 0xffff0000) ==
+ (*(p_data + 1) & 0xffff0000)) &&
+ (mci->wlan_channels[2] == *(p_data + 2)) &&
+ (mci->wlan_channels[3] == *(p_data + 3)))
+ break;
+
+ mci->wlan_channels[0] = *p_data++;
+ mci->wlan_channels[1] = *p_data++;
+ mci->wlan_channels[2] = *p_data++;
+ mci->wlan_channels[3] = *p_data++;
+ }
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+
+ case MCI_STATE_SEND_VERSION_QUERY:
+ ar9003_mci_send_coex_version_query(ah, true);
+ break;
+
+ case MCI_STATE_SEND_STATUS_QUERY:
+ query_type = (AR_SREV_9462_10(ah)) ?
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+ ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+ break;
+
+ case MCI_STATE_NEED_FLUSH_BT_INFO:
+ /*
+ * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+ * needed to send UNHALT message. It's set whenever
+ * there's a request to send HALT message.
+ * mci_halted_bt_gpm means whether HALT message is sent
+ * out successfully.
+ *
+ * Checking (mci_unhalt_bt_gpm == false) instead of
+ * checking (ah->mci_halted_bt_gpm == false) will make
+ * sure currently is in UNHALT-ed mode and BT can
+ * respond to status query.
+ */
+ value = (!mci->unhalt_bt_gpm &&
+ mci->need_flush_btinfo) ? 1 : 0;
+ if (p_data)
+ mci->need_flush_btinfo =
+ (*p_data != 0) ? true : false;
+ break;
+
+ case MCI_STATE_RECOVER_RX:
+
+ ath_dbg(common, MCI, "MCI hw RECOVER_RX\n");
+ ar9003_mci_prep_interface(ah);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_NEED_FTP_STOMP:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+ break;
+
+ case MCI_STATE_NEED_TUNING:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+ break;
+
+ default:
+ break;
+
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 000000000000..798da116a44c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
+
+enum mci_gpm_coex_query_type {
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
+ MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+ MCI_GPM_COEX_BT_GPM_UNHALT,
+ MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+ MCI_GPM_COEX_BT_FLAGS_READ,
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS 79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+ MCI_BT_MCI_FLAGS_UPDATE_HDR | \
+ MCI_BT_MCI_FLAGS_UPDATE_PLD | \
+ MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
+#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK 0x00000000
+#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
+ ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S 12
+#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
+ ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+ ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index a4450cba0653..59647a3ceb7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -119,8 +119,8 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah)
break;
default:
delta = 0;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Invalid tx-chainmask: %u\n", ah->txchainmask);
+ ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n",
+ ah->txchainmask);
}
power += delta;
@@ -148,13 +148,12 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
else
training_power = ar9003_get_training_power_5g(ah);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Training power: %d, Target power: %d\n",
+ ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n",
training_power, ah->paprd_target_power);
if (training_power < 0) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "PAPRD target power delta out of range");
+ ath_dbg(common, CALIBRATE,
+ "PAPRD target power delta out of range\n");
return -ERANGE;
}
ah->paprd_training_power = training_power;
@@ -311,8 +310,8 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
reg_cl_gain = AR_PHY_CL_TAB_2;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "Invalid chainmask: %d\n", chain);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Invalid chainmask: %d\n", chain);
break;
}
@@ -850,7 +849,7 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"AGC2_PWR = 0x%x training done = 0x%x\n",
agc2_pwr, paprd_done);
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 2330e7ede199..2b0bfb8cca02 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -46,7 +46,7 @@ static const int m2ThreshExt_off = 127;
* @chan:
*
* This is the function to change channel on single-chip devices, that is
- * all devices after ar9280.
+ * for AR9300 family of chipsets.
*
* This function takes the channel value in MHz and sets
* hardware channel value. Assumes writes have been enabled to analog bus.
@@ -199,12 +199,14 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
synth_freq = chan->channel;
}
} else {
- range = 10;
+ range = AR_SREV_9462(ah) ? 5 : 10;
max_spur_cnts = 4;
synth_freq = chan->channel;
}
for (i = 0; i < max_spur_cnts; i++) {
+ if (AR_SREV_9462(ah) && (i == 0 || i == 3))
+ continue;
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
@@ -880,7 +882,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
@@ -898,7 +900,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
@@ -935,7 +937,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -943,7 +945,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -963,7 +965,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
@@ -999,7 +1001,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_EXT_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1007,7 +1009,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1034,8 +1036,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
if (!is_on != aniState->mrcCCKOff) {
- ath_dbg(common, ATH_DBG_ANI,
- "** ch %d: MRC CCK: %s=>%s\n",
+ ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
chan->channel,
!aniState->mrcCCKOff ? "on" : "off",
is_on ? "on" : "off");
@@ -1050,11 +1051,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1123,8 +1124,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->curchan->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ATH_DBG_ANI,
- "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
@@ -1386,7 +1386,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
AR_PHY_WATCHDOG_IDLE_ENABLE));
- ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+ ath_dbg(common, RESET, "Disabled BB Watchdog\n");
return;
}
@@ -1422,8 +1422,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
AR_PHY_WATCHDOG_IDLE_MASK |
(AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
- ath_dbg(common, ATH_DBG_RESET,
- "Enabled BB Watchdog timeout (%u ms)\n",
+ ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n",
idle_tmo_ms);
}
@@ -1452,9 +1451,9 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
return;
status = ah->bb_watchdog_last_status;
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"\n==== BB update: BB status=0x%08x ====\n", status);
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
MS(status, AR_PHY_WATCHDOG_INFO),
MS(status, AR_PHY_WATCHDOG_DET_HANG),
@@ -1466,22 +1465,19 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
MS(status, AR_PHY_WATCHDOG_AGC_SM),
MS(status, AR_PHY_WATCHDOG_SRCH_SM));
- ath_dbg(common, ATH_DBG_RESET,
- "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+ ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
- ath_dbg(common, ATH_DBG_RESET,
- "** BB mode: BB_gen_controls=0x%08x **\n",
+ ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n",
REG_READ(ah, AR_PHY_GEN_CTRL));
#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
if (common->cc_survey.cycles)
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
- ath_dbg(common, ATH_DBG_RESET,
- "==== BB update: done ====\n\n");
+ ath_dbg(common, RESET, "==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 4114fe752c6b..ed64114571fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -389,6 +389,8 @@
#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_QUICK_DROP 0x03c00000
+#define AR_PHY_AGC_QUICK_DROP_S 22
#define AR_PHY_AGC_COARSE_LOW 0x00007F80
#define AR_PHY_AGC_COARSE_LOW_S 7
#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
@@ -488,6 +490,8 @@
#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
@@ -999,6 +1003,7 @@
/* GLB Registers */
#define AR_GLB_BASE 0x20000
+#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 48803ee9c0d6..458bedf0b0ae 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -16,6 +16,7 @@
#include "hw.h"
#include "ar9003_phy.h"
+#include "ar9003_rtt.h"
#define RTT_RESTORE_TIMEOUT 1000
#define RTT_ACCESS_TIMEOUT 100
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 9c51b395b4ff..dc2054f0378e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -688,8 +697,8 @@ static const u32 ar9462_2p0_mac_postamble_emulation[][5] = {
static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
- {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
@@ -717,8 +726,8 @@ static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
- {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
@@ -1059,7 +1068,7 @@ static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
static const u32 ar9462_2p0_soc_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+ {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
};
static const u32 ar9462_2p0_baseband_core[][2] = {
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x15262820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
- {0x00009e54, 0xe4c355c7},
- {0x00009e58, 0xfd897735},
+ {0x00009e54, 0xe4c555c2},
+ {0x00009e58, 0xfd857722},
{0x00009e5c, 0xe9198724},
{0x00009fc0, 0x803e4788},
{0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a40c, 0x00820820},
{0x0000a414, 0x1ce739ce},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
{0x0000a43c, 0x00100000},
@@ -1257,8 +1257,8 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
@@ -1850,8 +1850,8 @@ static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1c269f50822b..171ccf7c972f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -25,6 +25,7 @@
#include "debug.h"
#include "common.h"
+#include "mci.h"
/*
* Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -96,7 +97,7 @@ enum buffer_type {
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-#define ATH_TXSTATUS_RING_SIZE 64
+#define ATH_TXSTATUS_RING_SIZE 512
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
@@ -158,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
/* return block-ack bitmap index given sequence and starting sequence */
#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
/* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \
(((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
@@ -192,6 +196,7 @@ struct ath_txq {
u8 txq_headidx;
u8 txq_tailidx;
int pending_frames;
+ struct sk_buff_head complete_q;
};
struct ath_atx_ac {
@@ -237,6 +242,7 @@ struct ath_atx_tid {
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+ int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
@@ -251,8 +257,9 @@ struct ath_atx_tid {
struct ath_node {
#ifdef CONFIG_ATH9K_DEBUGFS
struct list_head list; /* for sc->nodes */
- struct ieee80211_sta *sta; /* station struct we're part of */
#endif
+ struct ieee80211_sta *sta; /* station struct we're part of */
+ struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
int ps_key;
@@ -274,7 +281,6 @@ struct ath_tx_control {
};
#define ATH_TX_ERROR 0x01
-#define ATH_TX_BAR 0x02
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -443,7 +449,9 @@ struct ath_btcoex {
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
u32 btscan_no_stomp; /* in usec */
+ u32 duty_cycle;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
+ struct ath_mci_profile mci;
};
int ath_init_btcoex_timer(struct ath_softc *sc);
@@ -458,7 +466,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9300 10
#define ATH_LED_PIN_9485 6
-#define ATH_LED_PIN_9462 0
+#define ATH_LED_PIN_9462 4
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
@@ -538,7 +546,7 @@ struct ath_ant_comb {
#define DEFAULT_CACHELINE 32
#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 10
+#define ATH_MAX_SW_RETRIES 30
#define ATH_CHAN_MAX 255
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
@@ -643,6 +651,7 @@ struct ath_softc {
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
+ struct ath_mci_coex mci_coex;
struct ath_descdma txsdma;
@@ -670,7 +679,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
bool ath9k_uses_beacons(int type);
#ifdef CONFIG_ATH9K_PCI
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a13cabb95435..b8967e482e6e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -117,11 +117,10 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->beacon.cabq;
- ath_dbg(common, ATH_DBG_XMIT,
- "transmitting CABQ packet, skb: %p\n", skb);
+ ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n");
+ ath_dbg(common, XMIT, "CABQ TX failed\n");
dev_kfree_skb_any(skb);
}
}
@@ -204,7 +203,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
if (skb && cabq_depth) {
if (sc->nvifs > 1) {
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"Flushing previous cabq traffic\n");
ath_draintxq(sc, cabq, false);
}
@@ -297,7 +296,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
avp->tsf_adjust = cpu_to_le64(tsfadjust);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"stagger beacons, bslot %d intval %u tsfadjust %llu\n",
avp->av_bslot, intval, (unsigned long long)tsfadjust);
@@ -357,6 +356,7 @@ void ath_beacon_tasklet(unsigned long data)
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
struct ath_tx_status ts;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
u32 bfaddr, bc = 0;
@@ -371,15 +371,14 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.bmisscnt++;
if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"missed %u consecutive beacons\n",
sc->beacon.bmisscnt);
ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
if (sc->beacon.bmisscnt > 3)
ath9k_hw_bstuck_nfcal(ah);
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
- ath_dbg(common, ATH_DBG_BSTUCK,
- "beacon is officially stuck\n");
+ ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -406,7 +405,7 @@ void ath_beacon_tasklet(unsigned long data)
slot = (tsftu % (intval * ATH_BCBUF)) / intval;
vif = sc->beacon.bslot[slot];
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
slot, tsf, tsftu / ATH_BCBUF, intval, vif);
} else {
@@ -424,7 +423,7 @@ void ath_beacon_tasklet(unsigned long data)
}
if (sc->beacon.bmisscnt != 0) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"resume beacon xmit after %u misses\n",
sc->beacon.bmisscnt);
sc->beacon.bmisscnt = 0;
@@ -458,10 +457,12 @@ void ath_beacon_tasklet(unsigned long data)
if (bfaddr != 0) {
/* NB: cabq traffic should already be queued and primed */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
- ath9k_hw_txstart(ah, sc->beacon.beaconq);
+
+ if (!edma)
+ ath9k_hw_txstart(ah, sc->beacon.beaconq);
sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (edma) {
spin_lock_bh(&sc->sc_pcu_lock);
ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
spin_unlock_bh(&sc->sc_pcu_lock);
@@ -541,7 +542,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* No need to configure beacon if we are not associated */
if (!common->curaid) {
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
return;
}
@@ -631,8 +632,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_dbg(common, BEACON,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -660,8 +661,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
nexttbtt = tsf + intval;
- ath_dbg(common, ATH_DBG_BEACON,
- "IBSS nexttbtt %u intval %u (%u)\n",
+ ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n",
nexttbtt, intval, conf->beacon_interval);
/*
@@ -699,9 +699,8 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
(sc->nbcnvifs > 1) &&
(vif->type == NL80211_IFTYPE_AP) &&
(cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Changing beacon interval of multiple \
- AP interfaces !\n");
+ ath_dbg(common, CONFIG,
+ "Changing beacon interval of multiple AP interfaces !\n");
return false;
}
/*
@@ -710,7 +709,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
*/
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
(vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"STA vif's beacon not allowed on AP mode\n");
return false;
}
@@ -722,7 +721,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
(vif->type == NL80211_IFTYPE_STATION) &&
(sc->sc_flags & SC_OP_BEACONS) &&
!avp->primary_sta_vif) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
return false;
}
@@ -802,8 +801,7 @@ void ath_set_beacon(struct ath_softc *sc)
ath_beacon_config_sta(sc, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 012263968d64..a6712a95d76a 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -21,7 +21,7 @@ enum ath_bt_mode {
ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
- ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
+ ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
};
struct ath_btcoex_config {
@@ -36,6 +36,20 @@ struct ath_btcoex_config {
bool bt_hold_rx_clear;
};
+static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
+ { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
+};
+
+static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
+ { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
+ { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
+ { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+};
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
{
@@ -54,6 +68,9 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
if (AR_SREV_9300_20_OR_LATER(ah))
rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
@@ -85,6 +102,9 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* connect bt_active to baseband */
REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
@@ -107,6 +127,9 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* btcoex 3-wire */
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
@@ -133,6 +156,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* Configure the desired GPIO port for TX_FRAME output */
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
@@ -144,6 +170,9 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
SM(wlan_weight, AR_BTCOEX_WL_WGHT);
}
@@ -152,27 +181,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
{
- struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
u32 val;
+ int i;
/*
* Program coex mode and weight registers to
* enable coex 3-wire
*/
- REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
- REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
+ REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
- REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
-
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
+ btcoex->bt_weight[i]);
} else
- REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
+ REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
@@ -185,23 +213,39 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+ ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
}
+static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex->wlan_weight[i]);
+
+ REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+ btcoex->enabled = true;
+}
+
void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- switch (btcoex_hw->scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(ah)) {
case ATH_BTCOEX_CFG_NONE:
- break;
+ return;
case ATH_BTCOEX_CFG_2WIRE:
ath9k_hw_btcoex_enable_2wire(ah);
break;
case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_enable_3wire(ah);
break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath9k_hw_btcoex_enable_mci(ah);
+ return;
}
REG_RMW(ah, AR_GPIO_PDPU,
@@ -215,7 +259,18 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
void ath9k_hw_btcoex_disable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+ btcoex_hw->enabled = false;
+ if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex_hw->wlan_weight[i]);
+ }
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@@ -228,49 +283,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
}
-
- ah->btcoex_hw.enabled = false;
}
EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
- ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
-
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
- break;
-
- default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
+ ar9462_wlan_weights[stomp_type];
+ int i;
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex->wlan_weight[i] = weight[i];
}
-
- ath9k_hw_btcoex_enable(ah);
}
/*
@@ -279,6 +312,9 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_btcoex_bt_stomp(ah, stomp_type);
return;
@@ -298,11 +334,8 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
AR_STOMP_NONE_WLAN_WGHT);
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
break;
}
-
- ath9k_hw_btcoex_enable(ah);
}
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 234f77689b14..278361c867ca 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,22 +36,57 @@
#define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15
+#define AR9300_NUM_BT_WEIGHTS 4
+#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
- ATH_BTCOEX_NO_STOMP,
ATH_BTCOEX_STOMP_ALL,
ATH_BTCOEX_STOMP_LOW,
- ATH_BTCOEX_STOMP_NONE
+ ATH_BTCOEX_STOMP_NONE,
+ ATH_BTCOEX_STOMP_LOW_FTP,
+ ATH_BTCOEX_STOMP_MAX
};
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE,
+ ATH_BTCOEX_CFG_MCI,
+};
+
+struct ath9k_hw_mci {
+ u32 raw_intr;
+ u32 rx_msg_intr;
+ u32 cont_status;
+ u32 gpm_addr;
+ u32 gpm_len;
+ u32 gpm_idx;
+ u32 sched_addr;
+ u32 wlan_channels[4];
+ u32 wlan_cal_seq;
+ u32 wlan_cal_done;
+ u32 config;
+ u8 *gpm_buf;
+ u8 *sched_buf;
+ bool ready;
+ bool update_2g5g;
+ bool is_2g;
+ bool query_bt;
+ bool unhalt_bt_gpm; /* need send UNHALT */
+ bool halted_bt_gpm; /* HALT sent */
+ bool need_flush_btinfo;
+ bool bt_version_known;
+ bool wlan_channels_update;
+ u8 wlan_ver_major;
+ u8 wlan_ver_minor;
+ u8 bt_ver_major;
+ u8 bt_ver_minor;
+ u8 bt_state;
};
struct ath_btcoex_hw {
enum ath_btcoex_scheme scheme;
+ struct ath9k_hw_mci mci;
bool enabled;
u8 wlanactive_gpio;
u8 btactive_gpio;
@@ -59,6 +94,8 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
+ u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
+ u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
};
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 99538810a312..2f4b48e6fb03 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -116,7 +116,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
if (h[i].privNF > limit->max) {
high_nf_mid = true;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
(cal->nfcal_interference ?
@@ -199,8 +199,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
if (currCal->calState != CAL_DONE) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Calibration state incorrect, %d\n",
+ ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n",
currCal->calState);
return true;
}
@@ -208,8 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
if (!(ah->supp_cals & currCal->calData->calType))
return true;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Resetting Cal %d state for channel %u\n",
+ ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType, conf->channel->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
@@ -302,7 +300,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* noisefloor until the next calibration timer.
*/
if (j == 10000) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
return;
@@ -344,17 +342,17 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
if (!nf[i])
continue;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF calibrated [%s] [chain %d] is %d\n",
(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
if (nf[i] > ATH9K_NF_TOO_HIGH) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
i, nf[i], ATH9K_NF_TOO_HIGH);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF[%d] (%d) < MIN (%d), correcting to NOM\n",
i, nf[i], limit->min);
nf[i] = limit->nominal;
@@ -373,7 +371,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
return false;
}
@@ -383,7 +381,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
nf = nfarray[0];
if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"noise floor failed detected; detected %d, threshold %d\n",
nf, nfThresh);
chan->channelFlags |= CHANNEL_CW_INT;
@@ -402,6 +400,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
ah->noise = ath9k_hw_getchan_noise(ah, chan);
return true;
}
+EXPORT_SYMBOL(ath9k_hw_getnf);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan)
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 05b9dbf81850..3b33996d97df 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,7 +19,6 @@
#include "hw.h"
-#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
#define NUM_NF_READINGS 6
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2741203e803f..68d972bf232d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -709,24 +709,29 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
len += snprintf(buf + len, size - len,
"Stations:\n"
- " tid: addr sched paused buf_q-empty an ac\n"
+ " tid: addr sched paused buf_q-empty an ac baw\n"
" ac: addr sched tid_q-empty txq\n");
spin_lock(&sc->nodes_lock);
list_for_each_entry(an, &sc->nodes, list) {
+ unsigned short ma = an->maxampdu;
+ if (ma == 0)
+ ma = 65535; /* see ath_lookup_rate */
len += snprintf(buf + len, size - len,
- "%pM\n", an->sta->addr);
+ "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
+ an->vif->addr, an->sta->addr, ma,
+ (unsigned int)(an->mpdudensity));
if (len >= size)
goto done;
for (q = 0; q < WME_NUM_TID; q++) {
struct ath_atx_tid *tid = &(an->tid[q]);
len += snprintf(buf + len, size - len,
- " tid: %p %s %s %i %p %p\n",
+ " tid: %p %s %s %i %p %p %hu\n",
tid, tid->sched ? "sched" : "idle",
tid->paused ? "paused" : "running",
skb_queue_empty(&tid->buf_q),
- tid->an, tid->ac);
+ tid->an, tid->ac, tid->baw_size);
if (len >= size)
goto done;
}
@@ -851,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
- if (flags & ATH_TX_BAR)
+ if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
@@ -1625,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_debug);
#endif
+
+ ath9k_dfs_init_debug(sc);
+
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 356352ac2d6e..776a24ada600 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,6 +19,7 @@
#include "hw.h"
#include "rc.h"
+#include "dfs_debug.h"
struct ath_txq;
struct ath_buf;
@@ -187,6 +188,7 @@ struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
+ struct ath_dfs_stats dfs_stats;
u32 reset[__RESET_TYPE_MAX];
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 000000000000..f4f56aff1e9d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/*
+ * TODO: move into or synchronize this with generic header
+ * as soon as IF is defined
+ */
+struct dfs_radar_pulse {
+ u16 freq;
+ u64 ts;
+ u32 width;
+ u8 rssi;
+};
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+ u8 pulse_bw_info;
+ u8 rssi;
+ u8 ext_rssi;
+ u8 pulse_length_ext;
+ u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+ const u32 AR93X_NSECS_PER_DUR = 800;
+ const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+ u32 nsecs;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+ nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+ else
+ nsecs = dur * AR93X_NSECS_PER_DUR;
+
+ return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+ struct ath_radar_data *are,
+ struct dfs_radar_pulse *drp)
+{
+ u8 rssi;
+ u16 dur;
+
+ ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
+ "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
+ are->pulse_bw_info,
+ are->pulse_length_pri, are->rssi,
+ are->pulse_length_ext, are->ext_rssi);
+
+ /*
+ * Only the last 2 bits of the BW info are relevant, they indicate
+ * which channel the radar was detected in.
+ */
+ are->pulse_bw_info &= 0x03;
+
+ switch (are->pulse_bw_info) {
+ case PRI_CH_RADAR_FOUND:
+ /* radar in ctrl channel */
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, pri_phy_errors);
+ /*
+ * cannot use ctrl channel RSSI
+ * if extension channel is stronger
+ */
+ rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+ break;
+ case EXT_CH_RADAR_FOUND:
+ /* radar in extension channel */
+ dur = are->pulse_length_ext;
+ DFS_STAT_INC(sc, ext_phy_errors);
+ /*
+ * cannot use extension channel RSSI
+ * if control channel is stronger
+ */
+ rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+ break;
+ case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+ /*
+ * Conducted testing, when pulse is on DC, both pri and ext
+ * durations are reported to be same
+ *
+ * Radiated testing, when pulse is on DC, different pri and
+ * ext durations are reported, so take the larger of the two
+ */
+ if (are->pulse_length_ext >= are->pulse_length_pri)
+ dur = are->pulse_length_ext;
+ else
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, dc_phy_errors);
+
+ /* when both are present use stronger one */
+ rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+ break;
+ default:
+ /*
+ * Bogus bandwidth info was received in descriptor,
+ * so ignore this PHY error
+ */
+ DFS_STAT_INC(sc, bwinfo_discards);
+ return false;
+ }
+
+ if (rssi == 0) {
+ DFS_STAT_INC(sc, rssi_discards);
+ return false;
+ }
+
+ /*
+ * TODO: check chirping pulses
+ * checks for chirping are dependent on the DFS regulatory domain
+ * used, which is yet TBD
+ */
+
+ /* convert duration to usecs */
+ drp->width = dur_to_usecs(sc->sc_ah, dur);
+ drp->rssi = rssi;
+
+ DFS_STAT_INC(sc, pulses_detected);
+ return true;
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime)
+{
+ struct ath_radar_data ard;
+ u16 datalen;
+ char *vdata_end;
+ struct dfs_radar_pulse drp;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
+ (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+ ath_dbg(common, DFS,
+ "Error: rs_phyer=0x%x not a radar error\n",
+ rs->rs_phyerr);
+ return;
+ }
+
+ datalen = rs->rs_datalen;
+ if (datalen == 0) {
+ DFS_STAT_INC(sc, datalen_discards);
+ return;
+ }
+
+ ard.rssi = rs->rs_rssi_ctl0;
+ ard.ext_rssi = rs->rs_rssi_ext0;
+
+ /*
+ * hardware stores this as 8 bit signed value.
+ * we will cap it at 0 if it is a negative number
+ */
+ if (ard.rssi & 0x80)
+ ard.rssi = 0;
+ if (ard.ext_rssi & 0x80)
+ ard.ext_rssi = 0;
+
+ vdata_end = (char *)data + datalen;
+ ard.pulse_bw_info = vdata_end[-1];
+ ard.pulse_length_ext = vdata_end[-2];
+ ard.pulse_length_pri = vdata_end[-3];
+
+ ath_dbg(common, DFS,
+ "bw_info=%d, length_pri=%d, length_ext=%d, "
+ "rssi_pri=%d, rssi_ext=%d\n",
+ ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
+ ard.rssi, ard.ext_rssi);
+
+ drp.freq = ah->curchan->channel;
+ drp.ts = mactime;
+ if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+ static u64 last_ts;
+ ath_dbg(common, DFS,
+ "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
+ "width=%d, rssi=%d, delta_ts=%llu\n",
+ drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
+ last_ts = drp.ts;
+ /*
+ * TODO: forward pulse to pattern detector
+ *
+ * ieee80211_add_radar_pulse(drp.freq, drp.ts,
+ * drp.width, drp.rssi);
+ */
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 000000000000..c2412857f122
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the upper layer for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 000000000000..106d031d834a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+
+#define ATH9K_DFS_STAT(s, p) \
+ len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ "enabled" : "disabled");
+ ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
+ ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
+ ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
+ ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
+ ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
+ ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+ ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = read_file_dfs,
+ .open = ath9k_dfs_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 000000000000..4911724cb445
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef ATH9K_DFS_DEBUG_H
+#define ATH9K_DFS_DEBUG_H
+
+#include "hw.h"
+
+/**
+ * struct ath_dfs_stats - DFS Statistics
+ *
+ * @pulses_detected: No. of pulses detected so far
+ * @datalen_discards: No. of pulses discarded due to invalid datalen
+ * @rssi_discards: No. of pulses discarded due to invalid RSSI
+ * @bwinfo_discards: No. of pulses discarded due to invalid BW info
+ * @pri_phy_errors: No. of pulses reported for primary channel
+ * @ext_phy_errors: No. of pulses reported for extension channel
+ * @dc_phy_errors: No. of pulses reported for primary + extension channel
+ */
+struct ath_dfs_stats {
+ u32 pulses_detected;
+ u32 datalen_discards;
+ u32 rssi_discards;
+ u32 bwinfo_discards;
+ u32 pri_phy_errors;
+ u32 ext_phy_errors;
+ u32 dc_phy_errors;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e46f751ab508..c43523233319 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -305,8 +305,7 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_dbg(common, ATH_DBG_EEPROM,
- "Invalid chainmask configuration\n");
+ ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
break;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 49abd34be741..5ff7ab965120 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -249,7 +249,8 @@ enum eeprom_param {
EEP_ANT_DIV_CTL1,
EEP_CHAIN_MASK_REDUCE,
EEP_ANTENNA_GAIN_2G,
- EEP_ANTENNA_GAIN_5G
+ EEP_ANTENNA_GAIN_5G,
+ EEP_QUICK_DROP
};
enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9a7520f987f0..4322ac80c203 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -38,7 +38,7 @@ static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Unable to read eeprom region\n");
return false;
}
@@ -62,8 +62,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -204,8 +203,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
return false;
}
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -227,7 +225,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -249,7 +247,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
u32 integer;
u16 word;
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM Endianness is not native.. Changing\n");
word = swab16(eep->baseEepHeader.length);
@@ -435,11 +433,11 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC: Chain %d | "
"PDADC %3d Value %3d | "
"PDADC %3d Value %3d | "
@@ -473,7 +471,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
int i;
u16 twiceMinEdgePower;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
u16 scaledPower = 0, minCtlPower;
u16 numCtlModes;
const u16 *pCtlMode;
@@ -542,9 +540,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
@@ -1081,8 +1077,7 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1090,8 +1085,8 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4f5c50a87ce3..f272236d8053 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -41,7 +41,7 @@ static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
eep_data)) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Unable to read eeprom region\n");
return false;
}
@@ -66,8 +66,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -197,8 +196,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
return false;
}
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -220,7 +218,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -569,7 +567,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +667,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
+ twiceMaxEdgePower = MAX_RATE_POWER;
/* Walk through the CTL indices stored in EEPROM */
for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
struct cal_ctl_edges *pRdEdgesPower;
@@ -1040,8 +1039,7 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1049,8 +1047,8 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 81e629671679..619b95d764ff 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -121,8 +121,7 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -279,8 +278,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -303,7 +301,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -325,7 +323,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
u32 integer, j;
u16 word;
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM Endianness is not native.. Changing.\n");
word = swab16(eep->baseEepHeader.length);
@@ -385,7 +383,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
((eep->baseEepHeader.version & 0xff) > 0x0a) &&
(eep->baseEepHeader.pwdclkind == 0))
- ah->need_an_top2_fixup = 1;
+ ah->need_an_top2_fixup = true;
if ((common->bus_ops->ath_bus_type == ATH_USB) &&
(AR_SREV_9280(ah)))
@@ -965,15 +963,12 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
- "PDADC: Chain %d | PDADC %3d "
- "Value %3d | PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | PDADC %3d "
- "Value %3d |\n",
+ ath_dbg(common, EEPROM,
+ "PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n",
i, 4 * j, pdadcValues[4 * j],
4 * j + 1, pdadcValues[4 * j + 1],
4 * j + 2, pdadcValues[4 * j + 2],
@@ -1000,7 +995,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data *rep;
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1116,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
if ((((cfgCtl & ~CTL_MODE_M) |
@@ -1280,7 +1273,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+ ath_dbg(ath9k_hw_common(ah), EEPROM,
"Invalid chainmask configuration\n");
break;
}
@@ -1398,8 +1391,7 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1407,8 +1399,8 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 655576c8fdab..597c84e31adb 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -130,12 +130,12 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT scan detected\n");
sc->sc_flags |= (SC_OP_BT_SCAN |
SC_OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT priority traffic detected\n");
sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
}
@@ -189,8 +189,8 @@ static void ath_btcoex_period_timer(unsigned long data)
bool is_btscan;
ath9k_ps_wakeup(sc);
- ath_detect_bt_priority(sc);
-
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath_detect_bt_priority(sc);
is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
spin_lock_bh(&btcoex->btcoex_lock);
@@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@@ -212,8 +213,9 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
+ timer_period = btcoex->btcoex_period / 1000;
mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+ msecs_to_jiffies(timer_period));
}
/*
@@ -228,8 +230,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
- ath_dbg(common, ATH_DBG_BTCOEX,
- "no stomp timer running\n");
+ ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
@@ -239,6 +240,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+ ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
ath9k_ps_restore(sc);
}
@@ -247,6 +249,9 @@ int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE)
+ return 0;
+
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -277,8 +282,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers\n");
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
/* make sure duty cycle timer is also stopped when resuming */
if (btcoex->hw_timer_enabled)
@@ -300,6 +307,9 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
del_timer_sync(&btcoex->period_timer);
if (btcoex->hw_timer_enabled)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 57fe22b24247..2eadffb7971c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -167,9 +167,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+ ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -224,9 +224,8 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_CONFIG,
- "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d "
- "imask: 0x%x\n",
+ ath_dbg(common, CONFIG,
+ "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
@@ -273,9 +272,8 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_CONFIG,
- "IBSS Beacon config, intval: %d, nexttbtt: %u, "
- "resp_time: %d, imask: 0x%x\n",
+ ath_dbg(common, CONFIG,
+ "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
@@ -323,7 +321,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
tx_slot = ath9k_htc_tx_get_slot(priv);
if (tx_slot < 0) {
- ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n");
+ ath_dbg(common, XMIT, "No free CAB slot\n");
dev_kfree_skb_any(skb);
goto next;
}
@@ -333,8 +331,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
ath9k_htc_tx_clear_slot(priv, tx_slot);
dev_kfree_skb_any(skb);
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to send CAB frame\n");
+ ath_dbg(common, XMIT, "Failed to send CAB frame\n");
} else {
spin_lock_bh(&priv->tx.tx_lock);
priv->tx.queued_cnt++;
@@ -409,7 +406,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
ret = htc_send(priv->htc, beacon);
if (ret != 0) {
if (ret == -ENOMEM) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"Failed to send beacon, no free TX buffer\n");
}
dev_kfree_skb_any(beacon);
@@ -434,7 +431,7 @@ static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval;
slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1;
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n",
slot, tsf, tsftu, intval);
@@ -450,8 +447,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
if (swba->beacon_pending != 0) {
priv->cur_beacon_conf.bmiss_cnt++;
if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
- ath_dbg(common, ATH_DBG_BSTUCK,
- "Beacon stuck, HW reset\n");
+ ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
ieee80211_queue_work(priv->hw,
&priv->fatal_work);
}
@@ -459,7 +455,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
}
if (priv->cur_beacon_conf.bmiss_cnt) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"Resuming beacon xmit after %u misses\n",
priv->cur_beacon_conf.bmiss_cnt);
priv->cur_beacon_conf.bmiss_cnt = 0;
@@ -495,8 +491,8 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
priv->cur_beacon_conf.bslot[avp->bslot] = vif;
spin_unlock_bh(&priv->beacon_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Added interface at beacon slot: %d\n", avp->bslot);
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->bslot);
}
void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
@@ -509,8 +505,8 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
spin_unlock_bh(&priv->beacon_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Removed interface at beacon slot: %d\n", avp->bslot);
+ ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
+ avp->bslot);
}
/*
@@ -536,8 +532,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF;
avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
- ath_dbg(common, ATH_DBG_CONFIG,
- "tsfadjust is: %llu for bslot: %d\n",
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
(unsigned long long)tsfadjust, avp->bslot);
}
@@ -568,7 +563,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
(priv->num_ap_vif > 1) &&
(vif->type == NL80211_IFTYPE_AP) &&
(cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Changing beacon interval of multiple AP interfaces !\n");
return false;
}
@@ -579,7 +574,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
*/
if (priv->num_ap_vif &&
(vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"HW in AP mode, cannot set STA beacon parameters\n");
return false;
}
@@ -597,7 +592,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
&beacon_configured);
if (beacon_configured) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
return false;
}
@@ -637,8 +632,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
}
@@ -659,8 +653,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index e3a02eb8e0cc..6506e1fd5036 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -36,12 +36,12 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT scan detected\n");
priv->op_flags |= (OP_BT_SCAN |
OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT priority traffic detected\n");
priv->op_flags |= OP_BT_PRIORITY_DETECTED;
}
@@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_enable(priv->ah);
timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@@ -101,19 +102,22 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = priv->op_flags & OP_BT_SCAN;
- ath_dbg(common, ATH_DBG_BTCOEX,
- "time slice work for bt and wlan\n");
+ ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+ ath9k_hw_btcoex_enable(priv->ah);
}
void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
{
struct ath_btcoex *btcoex = &priv->btcoex;
+ if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -132,7 +136,10 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_hw *ah = priv->ah;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n");
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n");
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -146,6 +153,9 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
*/
void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
{
+ if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
cancel_delayed_work_sync(&priv->coex_period_work);
cancel_delayed_work_sync(&priv->duty_cycle_work);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 966661c9e586..9be10a2da1c2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -299,8 +299,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
- "REGISTER READ FAILED: (0x%04x, %d)\n",
+ ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
reg_offset, r);
return -EIO;
}
@@ -327,7 +326,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
(u8 *)tmpval, sizeof(u32) * count,
100);
if (unlikely(ret)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"Multiple REGISTER READ FAILED (count: %d)\n", count);
}
@@ -352,8 +351,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
- "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n",
reg_offset, r);
}
}
@@ -384,7 +382,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"REGISTER WRITE FAILED, multi len: %d\n",
priv->wmi->multi_write_idx);
}
@@ -434,7 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"REGISTER WRITE FAILED, multi len: %d\n",
priv->wmi->multi_write_idx);
}
@@ -512,8 +510,7 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
- ath_dbg(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams != rx_streams) {
@@ -610,7 +607,7 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
{
int qnum;
- switch (priv->ah->btcoex_hw.scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
case ATH_BTCOEX_CFG_NONE:
break;
case ATH_BTCOEX_CFG_3WIRE:
@@ -704,7 +701,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
- ath9k_init_btcoex(priv);
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
+ ath9k_init_btcoex(priv);
}
return 0;
@@ -876,9 +874,8 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
goto err_world;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
- "BE:%d, BK:%d, VI:%d, VO:%d\n",
+ ath_dbg(common, CONFIG,
+ "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n",
priv->wmi_cmd_ep,
priv->beacon_ep,
priv->cab_ep,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0b9a0e8a4958..ef4c60661290 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -266,7 +266,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_wmi_event_drain(priv);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
priv->ah->curchan->channel,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
@@ -415,7 +415,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
priv->ah->is_monitoring = true;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Attached a monitor interface at idx: %d, sta idx: %d\n",
priv->mon_vif_idx, sta_idx);
@@ -427,7 +427,7 @@ err_sta:
*/
__ath9k_htc_remove_monitor_interface(priv);
err_vif:
- ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+ ath_dbg(common, FATAL, "Unable to attach a monitor interface\n");
return ret;
}
@@ -452,7 +452,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
priv->nstations--;
priv->ah->is_monitoring = false;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a monitor interface at idx: %d, sta idx: %d\n",
priv->mon_vif_idx, sta_idx);
@@ -512,11 +512,11 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
}
if (sta) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Added a station entry for: %pM (idx: %d)\n",
sta->addr, tsta.sta_index);
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Added a station entry for VIF %d (idx: %d)\n",
avp->index, tsta.sta_index);
}
@@ -556,11 +556,11 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
}
if (sta) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a station entry for: %pM (idx: %d)\n",
sta->addr, sta_idx);
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a station entry for VIF %d (idx: %d)\n",
avp->index, sta_idx);
}
@@ -665,7 +665,7 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
ath9k_htc_setup_rate(priv, sta, &trate);
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Updated target sta: %pM, rate caps: 0x%X\n",
sta->addr, be32_to_cpu(trate.capflags));
}
@@ -692,7 +692,7 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Updated target sta: %pM, rate caps: 0x%X\n",
bss_conf->bssid, be32_to_cpu(trate.capflags));
}
@@ -721,11 +721,11 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Unable to %s TX aggregation for (%pM, %d)\n",
(aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
else
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"%s TX aggregation for (%pM, %d)\n",
(aggr.aggr_enable) ? "Starting" : "Stopping",
sta->addr, tid);
@@ -784,7 +784,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ ath_dbg(common, ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -793,8 +793,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
if ((timestamp - common->ani.shortcal_timer) >=
short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
+ ath_dbg(common, ANI, "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -808,7 +807,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if (ah->config.enable_ani &&
+ (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -838,7 +838,7 @@ set_timer:
* short calibration and long calibration.
*/
cal_interval = ATH_LONG_CALINTERVAL;
- if (priv->ah->config.enable_ani)
+ if (ah->config.enable_ani)
cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
@@ -865,7 +865,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize) {
- ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n");
+ ath_dbg(common, XMIT, "No room for padding\n");
goto fail_tx;
}
skb_push(skb, padsize);
@@ -874,13 +874,13 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
slot = ath9k_htc_tx_get_slot(priv);
if (slot < 0) {
- ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n");
+ ath_dbg(common, XMIT, "No free TX slot\n");
goto fail_tx;
}
ret = ath9k_htc_tx_start(priv, skb, slot, false);
if (ret != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n");
+ ath_dbg(common, XMIT, "Tx failed\n");
goto clear_slot;
}
@@ -908,7 +908,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
@@ -942,7 +942,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ret = ath9k_htc_update_cap_target(priv, 0);
if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
priv->op_flags &= ~OP_INVALID;
@@ -957,7 +957,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
@@ -979,7 +979,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
if (priv->op_flags & OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
}
@@ -1009,7 +1009,8 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (ah->btcoex_hw.enabled) {
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_htc_cancel_btcoex_work(priv);
@@ -1026,7 +1027,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
priv->op_flags |= OP_INVALID;
- ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
}
@@ -1119,8 +1120,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_start_ani(priv);
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n",
+ vif->type, avp->index);
out:
ath9k_htc_ps_restore(priv);
@@ -1176,7 +1177,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_stop_ani(priv);
}
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+ ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1201,8 +1202,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&priv->htc_pm_lock);
if (enable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
+ ath_dbg(common, CONFIG, "not-idle: enabling radio\n");
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
ath9k_htc_radio_enable(hw);
}
@@ -1224,7 +1224,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
- ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
@@ -1264,8 +1264,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
mutex_unlock(&priv->htc_pm_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "idle: disabling radio\n");
+ ath_dbg(common, CONFIG, "idle: disabling radio\n");
ath9k_htc_radio_disable(hw);
}
@@ -1297,7 +1296,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
if (priv->op_flags & OP_INVALID) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
+ ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
return;
@@ -1308,8 +1307,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(priv->ah, rfilt);
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1376,7 +1375,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
qnum = get_hw_qnum(queue, priv->hwq_map);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
@@ -1411,7 +1410,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
return -ENOSPC;
mutex_lock(&priv->mutex);
- ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key\n");
ath9k_htc_ps_wakeup(priv);
switch (cmd) {
@@ -1447,8 +1446,7 @@ static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv)
struct ath_common *common = ath9k_hw_common(priv->ah);
ath9k_hw_write_associd(priv->ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "BSSID: %pM aid: 0x%x\n",
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
common->curbssid, common->curaid);
}
@@ -1486,7 +1484,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath9k_htc_ps_wakeup(priv);
if (changed & BSS_CHANGED_ASSOC) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
bss_conf->assoc);
bss_conf->assoc ?
@@ -1511,8 +1509,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
}
if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
+ ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
+ bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
priv->op_flags |= OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif);
@@ -1524,7 +1522,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
* AP/IBSS interfaces.
*/
if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
priv->op_flags &= ~OP_ENABLE_BEACON;
@@ -1542,7 +1540,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
(vif->type == NL80211_IFTYPE_AP)) {
priv->op_flags |= OP_TSF_RESET;
}
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon interval changed for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_beacon_config(priv, vif);
@@ -1732,8 +1730,7 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set bitrate masks: 0x%x, 0x%x\n",
+ ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
mask->control[IEEE80211_BAND_2GHZ].legacy,
mask->control[IEEE80211_BAND_5GHZ].legacy);
out:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2d81c700e201..3e40a6461512 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -355,7 +355,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
vif_idx = avp->index;
} else {
if (!priv->ah->is_monitoring) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(priv->ah), XMIT,
"VIF is null, but no monitor interface !\n");
return -EINVAL;
}
@@ -620,8 +620,7 @@ static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
}
spin_unlock_irqrestore(&epid_queue->lock, flags);
- ath_dbg(common, ATH_DBG_XMIT,
- "No matching packet for cookie: %d, epid: %d\n",
+ ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n",
txs->cookie, epid);
return NULL;
@@ -705,8 +704,7 @@ static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb
if (time_after(jiffies,
tx_ctl->timestamp +
msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Dropping a packet due to TX timeout\n");
+ ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n");
return true;
}
@@ -753,7 +751,7 @@ void ath9k_htc_tx_cleanup_timer(unsigned long data)
skb = ath9k_htc_tx_get_packet(priv, &event->txs);
if (skb) {
- ath_dbg(common, ATH_DBG_XMIT,
+ ath_dbg(common, XMIT,
"Found packet for cookie: %d, epid: %d\n",
event->txs.cookie,
MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
@@ -1167,8 +1165,7 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
spin_unlock(&priv->rx.rxbuflock);
if (rxbuf == NULL) {
- ath_dbg(common, ATH_DBG_ANY,
- "No free RX buffer\n");
+ ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index e74c233757a2..c4ad0b06bdbc 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
ini_reloaded);
}
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->set_radar_params)
+ return;
+
+ ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8873c6e6fb96..ee7759575050 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -133,7 +133,7 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
udelay(AH_TIME_QUANTUM);
}
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY,
+ ath_dbg(ath9k_hw_common(ah), ANY,
"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
timeout, reg, REG_READ(ah, reg), mask, val);
@@ -491,8 +491,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
if (ecode != 0)
return ecode;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG,
- "Eeprom VER: %d, REV: %d\n",
+ ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n",
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
@@ -504,7 +503,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return ecode;
}
- if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+ if (ah->config.enable_ani) {
ath9k_hw_ani_setup(ah);
ath9k_hw_ani_init(ah);
}
@@ -567,7 +566,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
+ ath_dbg(common, RESET, "serialize_regmode is %d\n",
ah->config.serialize_regmode);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
@@ -610,6 +609,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+ /* disable ANI for 9340 */
+ if (AR_SREV_9340(ah))
+ ah->config.enable_ani = false;
+
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
@@ -954,8 +957,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
{
if (tu > 0xFFFF) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
- "bad global tx timeout %u\n", tu);
+ ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n",
+ tu);
ah->globaltxtimeout = (u32) -1;
return false;
} else {
@@ -976,7 +979,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
int rx_lat = 0, tx_lat = 0, eifs = 0;
u32 reg;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
+ ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (!chan)
@@ -1271,7 +1274,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(npend || type == ATH9K_RESET_COLD)) {
int reset_err = 0;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(ah), RESET,
"reset MAC via external reset\n");
reset_err = ah->external_reset();
@@ -1294,8 +1297,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC stuck in MAC reset\n");
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
return false;
}
@@ -1340,8 +1342,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
AR_RTC_STATUS_M,
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC not waking up\n");
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
return false;
}
@@ -1350,6 +1351,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
+ bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1361,13 +1363,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
- return ath9k_hw_set_reset_power_on(ah);
+ ret = ath9k_hw_set_reset_power_on(ah);
+ break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
- return ath9k_hw_set_reset(ah, type);
+ ret = ath9k_hw_set_reset(ah, type);
+ break;
default:
- return false;
+ break;
}
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+ return ret;
}
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1406,7 +1415,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Transmit frames pending on queue %d\n", qnum);
return false;
}
@@ -1506,6 +1515,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool bChannelChange)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 saveLedState;
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
@@ -1513,6 +1523,52 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool allow_fbs = false;
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+ bool save_fullsleep = ah->chip_fullsleep;
+
+ if (mci) {
+
+ ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+ if (mci_hw->bt_state == MCI_BT_CAL_START) {
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI stop rx for BT CAL\n");
+
+ mci_hw->bt_state = MCI_BT_CAL;
+
+ /*
+ * MCI FIX: disable mci interrupt here. This is to avoid
+ * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+ * lead to mci_intr reentry.
+ */
+
+ ar9003_mci_disable_interrupt(ah);
+
+ ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+ 16, true, false);
+
+ ath_dbg(common, MCI, "\nMCI BT is calibrating\n");
+
+ /* Wait BT calibration to be completed for 25ms */
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+ 0, 25000))
+ ath_dbg(common, MCI,
+ "MCI got BT_CAL_DONE\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI ### BT cal takes to long, force bt_state to be bt_awake\n");
+ mci_hw->bt_state = MCI_BT_AWAKE;
+ /* MCI FIX: enable mci interrupt here */
+ ar9003_mci_enable_interrupt(ah);
+
+ return true;
+ }
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1550,12 +1606,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready)
+ ar9003_mci_2g5g_switch(ah, true);
+
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
return 0;
}
}
+ if (mci) {
+ ar9003_mci_disable_interrupt(ah);
+
+ if (mci_hw->ready && !save_fullsleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(20);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+ }
+
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ mci_hw->ready = false;
+ }
+
+
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
saveDefAntenna = 1;
@@ -1611,6 +1684,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ if (mci)
+ ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
@@ -1728,6 +1804,54 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready) {
+
+ if (IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+ if (ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+ ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+
+ ath_dbg(common, MCI,
+ "MCI BT wakes up during WLAN calibration\n");
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+ ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+
+ ath_dbg(common, MCI, "MCI re-cal\n");
+
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_hist.num_readings = 0;
+ }
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ }
+ }
+ ar9003_mci_enable_interrupt(ah);
+ }
+
ENABLE_REGWRITE_BUFFER(ah);
ath9k_hw_restore_chainmask(ah);
@@ -1742,14 +1866,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 mask;
mask = REG_READ(ah, AR_CFG);
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
- ath_dbg(common, ATH_DBG_RESET,
- "CFG Byte Swap Set 0x%x\n", mask);
+ ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
+ mask);
} else {
mask =
INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
REG_WRITE(ah, AR_CFG, mask);
- ath_dbg(common, ATH_DBG_RESET,
- "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
+ ath_dbg(common, RESET, "Setting CFG 0x%x\n",
+ REG_READ(ah, AR_CFG));
}
} else {
if (common->bus_ops->ath_bus_type == ATH_USB) {
@@ -1767,9 +1891,25 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
#endif
}
- if (ah->btcoex_hw.enabled)
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
ath9k_hw_btcoex_enable(ah);
+ if (mci && mci_hw->ready) {
+ /*
+ * check BT state again to make
+ * sure it's not changed.
+ */
+
+ ar9003_mci_sync_bt_state(ah);
+ ar9003_mci_2g5g_switch(ah, true);
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (mci_hw->query_bt == true)) {
+ mci_hw->need_flush_btinfo = true;
+ }
+ }
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -1934,6 +2074,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
@@ -1945,18 +2086,41 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
if (ah->power_mode == mode)
return status;
- ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n",
+ ath_dbg(common, RESET, "%s -> %s\n",
modes[ah->power_mode], modes[mode]);
switch (mode) {
case ATH9K_PM_AWAKE:
status = ath9k_hw_set_power_awake(ah, setChip);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
break;
case ATH9K_PM_FULL_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ (mci->bt_state != MCI_BT_SLEEP) &&
+ !mci->halted_bt_gpm) {
+ ath_dbg(common, MCI,
+ "MCI halt BT GPM (full_sleep)\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah,
+ true, true);
+ }
+
+ mci->ready = false;
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+ }
+
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
@@ -2006,9 +2170,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON,
- "%s: unsupported opmode: %d\n",
- __func__, ah->opmode);
+ ath_dbg(ath9k_hw_common(ah), BEACON,
+ "%s: unsupported opmode: %d\n", __func__, ah->opmode);
return;
break;
}
@@ -2059,10 +2222,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
else
nextTbtt = bs->bs_nexttbtt;
- ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
- ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
- ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
- ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+ ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt);
+ ath_dbg(common, BEACON, "beacon period %d\n", beaconintval);
+ ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod);
ENABLE_REGWRITE_BUFFER(ah);
@@ -2109,6 +2272,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
return chip_chainmask;
}
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+ switch (ah->hw_version.macVersion) {
+ /* AR9580 will likely be our first target to get testing on */
+ case AR_SREV_VERSION_9580:
+ default:
+ return false;
+ }
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2130,8 +2317,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
regulatory->current_rd += 5;
else if (regulatory->current_rd == 0x41)
regulatory->current_rd = 0x43;
- ath_dbg(common, ATH_DBG_REGULATORY,
- "regdomain mapped to 0x%x\n", regulatory->current_rd);
+ ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n",
+ regulatory->current_rd);
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
@@ -2149,6 +2336,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
chip_chainmask = 1;
+ else if (AR_SREV_9462(ah))
+ chip_chainmask = 3;
else if (!AR_SREV_9280_20_OR_LATER(ah))
chip_chainmask = 7;
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2205,12 +2394,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->num_gpio_pins = AR_NUM_GPIO;
- if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
- pCap->hw_caps |= ATH9K_HW_CAP_CST;
+ if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
- } else {
+ else
pCap->rts_aggr_limit = (8 * 1024);
- }
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2234,7 +2421,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
if (common->btcoex_enabled) {
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (AR_SREV_9462(ah))
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2318,6 +2507,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->pcie_lcr_offset = 0x80;
}
+ if (ath9k_hw_dfs_tested(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
tx_chainmask = pCap->tx_chainmask;
rx_chainmask = pCap->rx_chainmask;
while (tx_chainmask || rx_chainmask) {
@@ -2332,11 +2524,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
- if (!AR_SREV_9330(ah))
+ if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
if (AR_SREV_9462(ah))
- pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+ pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
return 0;
}
@@ -2584,7 +2776,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
- reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
+ reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
if (test)
channel->max_power = MAX_RATE_POWER / 2;
@@ -2651,7 +2843,7 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
{
if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
AH_TSF_WRITE_TIMEOUT))
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(ah), RESET,
"AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
@@ -2776,7 +2968,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
timer_next = tsf + trig_timeout;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
+ ath_dbg(ath9k_hw_common(ah), HWTIMER,
"current tsf %x period %x timer_next %x\n",
tsf, timer_period, timer_next);
@@ -2865,8 +3057,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, ATH_DBG_HWTIMER,
- "TSF overflow for Gen timer %d\n", index);
+ ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
+ index);
timer->overflow(timer->arg);
}
@@ -2874,7 +3066,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, ATH_DBG_HWTIMER,
+ ath_dbg(common, HWTIMER,
"Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index f389b3c93cf3..6a29004a71b0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -59,9 +59,6 @@
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
-#define AR9300_NUM_BT_WEIGHTS 4
-#define AR9300_NUM_WLAN_WEIGHTS 4
-
#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
#define ATH_DEFAULT_NOISE_FLOOR -95
@@ -129,6 +126,16 @@
#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
@@ -189,20 +196,25 @@ enum ath_ini_subsys {
enum ath9k_hw_caps {
ATH9K_HW_CAP_HT = BIT(0),
ATH9K_HW_CAP_RFSILENT = BIT(1),
- ATH9K_HW_CAP_CST = BIT(2),
- ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
- ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
- ATH9K_HW_CAP_EDMA = BIT(6),
- ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7),
- ATH9K_HW_CAP_LDPC = BIT(8),
- ATH9K_HW_CAP_FASTCLOCK = BIT(9),
- ATH9K_HW_CAP_SGI_20 = BIT(10),
- ATH9K_HW_CAP_PAPRD = BIT(11),
- ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
- ATH9K_HW_CAP_2GHZ = BIT(13),
- ATH9K_HW_CAP_5GHZ = BIT(14),
- ATH9K_HW_CAP_APM = BIT(15),
- ATH9K_HW_CAP_RTT = BIT(16),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
+ ATH9K_HW_CAP_EDMA = BIT(4),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
+ ATH9K_HW_CAP_LDPC = BIT(6),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(7),
+ ATH9K_HW_CAP_SGI_20 = BIT(8),
+ ATH9K_HW_CAP_PAPRD = BIT(9),
+ ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
+ ATH9K_HW_CAP_2GHZ = BIT(11),
+ ATH9K_HW_CAP_5GHZ = BIT(12),
+ ATH9K_HW_CAP_APM = BIT(13),
+ ATH9K_HW_CAP_RTT = BIT(14),
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ATH9K_HW_CAP_MCI = BIT(15),
+#else
+ ATH9K_HW_CAP_MCI = 0,
+#endif
+ ATH9K_HW_CAP_DFS = BIT(16),
};
struct ath9k_hw_capabilities {
@@ -268,6 +280,7 @@ enum ath9k_int {
ATH9K_INT_TX = 0x00000040,
ATH9K_INT_TXDESC = 0x00000080,
ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_MCI = 0x00000200,
ATH9K_INT_BB_WATCHDOG = 0x00000400,
ATH9K_INT_TXURN = 0x00000800,
ATH9K_INT_MIB = 0x00001000,
@@ -419,6 +432,161 @@ enum ath9k_rx_qtype {
ATH9K_RX_QUEUE_MAX,
};
+enum mci_message_header { /* length of payload */
+ MCI_LNA_CTRL = 0x10, /* len = 0 */
+ MCI_CONT_NACK = 0x20, /* len = 0 */
+ MCI_CONT_INFO = 0x30, /* len = 4 */
+ MCI_CONT_RST = 0x40, /* len = 0 */
+ MCI_SCHD_INFO = 0x50, /* len = 16 */
+ MCI_CPU_INT = 0x60, /* len = 4 */
+ MCI_SYS_WAKING = 0x70, /* len = 0 */
+ MCI_GPM = 0x80, /* len = 16 */
+ MCI_LNA_INFO = 0x90, /* len = 1 */
+ MCI_LNA_STATE = 0x94,
+ MCI_LNA_TAKE = 0x98,
+ MCI_LNA_TRANS = 0x9c,
+ MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
+ MCI_REQ_WAKE = 0xc0, /* len = 0 */
+ MCI_DEBUG_16 = 0xfe, /* len = 2 */
+ MCI_REMOTE_RESET = 0xff /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+ MCI_GPM_COEX_PROFILE_UNKNOWN,
+ MCI_GPM_COEX_PROFILE_RFCOMM,
+ MCI_GPM_COEX_PROFILE_A2DP,
+ MCI_GPM_COEX_PROFILE_HID,
+ MCI_GPM_COEX_PROFILE_BNEP,
+ MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+ MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
+ MCI_GPM_COEX_B_GPM_TYPE = 4,
+ MCI_GPM_COEX_B_GPM_OPCODE = 5,
+ /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+ MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
+
+ /* MCI_GPM_COEX_VERSION_QUERY */
+ /* MCI_GPM_COEX_VERSION_RESPONSE */
+ MCI_GPM_COEX_B_MAJOR_VERSION = 6,
+ MCI_GPM_COEX_B_MINOR_VERSION = 7,
+ /* MCI_GPM_COEX_STATUS_QUERY */
+ MCI_GPM_COEX_B_BT_BITMAP = 6,
+ MCI_GPM_COEX_B_WLAN_BITMAP = 7,
+ /* MCI_GPM_COEX_HALT_BT_GPM */
+ MCI_GPM_COEX_B_HALT_STATE = 6,
+ /* MCI_GPM_COEX_WLAN_CHANNELS */
+ MCI_GPM_COEX_B_CHANNEL_MAP = 6,
+ /* MCI_GPM_COEX_BT_PROFILE_INFO */
+ MCI_GPM_COEX_B_PROFILE_TYPE = 6,
+ MCI_GPM_COEX_B_PROFILE_LINKID = 7,
+ MCI_GPM_COEX_B_PROFILE_STATE = 8,
+ MCI_GPM_COEX_B_PROFILE_ROLE = 9,
+ MCI_GPM_COEX_B_PROFILE_RATE = 10,
+ MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
+ MCI_GPM_COEX_H_PROFILE_T = 12,
+ MCI_GPM_COEX_B_PROFILE_W = 14,
+ MCI_GPM_COEX_B_PROFILE_A = 15,
+ /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+ MCI_GPM_COEX_B_STATUS_TYPE = 6,
+ MCI_GPM_COEX_B_STATUS_LINKID = 7,
+ MCI_GPM_COEX_B_STATUS_STATE = 8,
+ /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+ MCI_GPM_COEX_W_BT_FLAGS = 6,
+ MCI_GPM_COEX_B_BT_FLAGS_OP = 10
+};
+
+enum mci_gpm_subtype {
+ MCI_GPM_BT_CAL_REQ = 0,
+ MCI_GPM_BT_CAL_GRANT = 1,
+ MCI_GPM_BT_CAL_DONE = 2,
+ MCI_GPM_WLAN_CAL_REQ = 3,
+ MCI_GPM_WLAN_CAL_GRANT = 4,
+ MCI_GPM_WLAN_CAL_DONE = 5,
+ MCI_GPM_COEX_AGENT = 0x0c,
+ MCI_GPM_RSVD_PATTERN = 0xfe,
+ MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
+ MCI_GPM_BT_DEBUG = 0xff
+};
+
+enum mci_bt_state {
+ MCI_BT_SLEEP,
+ MCI_BT_AWAKE,
+ MCI_BT_CAL_START,
+ MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+ MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
+ MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
+ MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_RSSI_POWER,
+ MCI_STATE_CONT_PRIORITY,
+ MCI_STATE_CONT_TXRX,
+ MCI_STATE_RESET_REQ_WAKE,
+ MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
+ MCI_STATE_SEND_VERSION_QUERY,
+ MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
+ MCI_STATE_RECOVER_RX,
+ MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_DEBUG,
+ MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+ MCI_GPM_COEX_VERSION_QUERY,
+ MCI_GPM_COEX_VERSION_RESPONSE,
+ MCI_GPM_COEX_STATUS_QUERY,
+ MCI_GPM_COEX_HALT_BT_GPM,
+ MCI_GPM_COEX_WLAN_CHANNELS,
+ MCI_GPM_COEX_BT_PROFILE_INFO,
+ MCI_GPM_COEX_BT_STATUS_UPDATE,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE 0
+#define MCI_GPM_MORE 1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm) do { \
+ *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+ MCI_GPM_RSVD_PATTERN32; \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -791,8 +959,6 @@ struct ath_hw {
/* Bluetooth coexistance */
struct ath_btcoex_hw btcoex_hw;
- u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
- u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u32 intr_txqs;
u8 txchainmask;
@@ -850,7 +1016,7 @@ struct ath_hw {
u32 ts_paddr_start;
u32 ts_paddr_end;
u16 ts_tail;
- u8 ts_size;
+ u16 ts_size;
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
@@ -948,7 +1114,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
void ath9k_hw_write_associd(struct ath_hw *ah);
u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1041,6 +1206,42 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.scheme;
+}
+#else
+#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE
+#endif
+
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d4c909f8e474..abf943557dee 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -258,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
else if (AR_SREV_9300_20_OR_LATER(ah))
max_streams = 3;
else
@@ -274,8 +276,7 @@ static void setup_ht_cap(struct ath_softc *sc,
tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
- ath_dbg(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams != rx_streams) {
@@ -295,9 +296,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ int ret;
+
+ ret = ath_reg_notifier_apply(wiphy, request, reg);
+
+ /* Set tx power */
+ if (ah->curchan) {
+ sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+ sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
+ ath9k_ps_restore(sc);
+ }
- return ath_reg_notifier_apply(wiphy, request, reg);
+ return ret;
}
/*
@@ -314,7 +328,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct ath_buf *bf;
int i, bsize, error, desc_len;
- ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
@@ -360,7 +374,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
goto fail;
}
ds = (u8 *) dd->dd_desc;
- ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -408,9 +422,10 @@ fail:
static int ath9k_init_btcoex(struct ath_softc *sc)
{
struct ath_txq *txq;
+ struct ath_hw *ah = sc->sc_ah;
int r;
- switch (sc->sc_ah->btcoex_hw.scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
case ATH_BTCOEX_CFG_NONE:
break;
case ATH_BTCOEX_CFG_2WIRE:
@@ -425,6 +440,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
+ case ATH_BTCOEX_CFG_MCI:
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ ah->btcoex_hw.mci.ready = false;
+ ah->btcoex_hw.mci.bt_state = 0;
+ ah->btcoex_hw.mci.bt_ver_major = 3;
+ ah->btcoex_hw.mci.bt_ver_minor = 0;
+ ah->btcoex_hw.mci.bt_version_known = false;
+ ah->btcoex_hw.mci.update_2g5g = true;
+ ah->btcoex_hw.mci.is_2g = true;
+ ah->btcoex_hw.mci.wlan_channels_update = false;
+ ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+ ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+ ah->btcoex_hw.mci.query_bt = true;
+ ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+ ah->btcoex_hw.mci.halted_bt_gpm = false;
+ ah->btcoex_hw.mci.need_flush_btinfo = false;
+ ah->btcoex_hw.mci.wlan_cal_seq = 0;
+ ah->btcoex_hw.mci.wlan_cal_done = 0;
+ ah->btcoex_hw.mci.config = 0x2201;
+ }
+ break;
default:
WARN_ON(1);
break;
@@ -695,6 +741,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->queues = 4;
hw->max_rates = 4;
@@ -833,9 +880,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
if ((sc->btcoex.no_stomp_timer) &&
- sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
+ ath_mci_cleanup(sc);
+
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index ecdb6fd29079..e196aba77acf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -21,7 +21,7 @@
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
{
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
@@ -57,8 +57,7 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
- "Enable TXE on queue: %u\n", q);
+ ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);
@@ -202,12 +201,12 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Set TXQ properties, inactive queue: %u\n", q);
return false;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
+ ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
qi->tqi_ver = qinfo->tqi_ver;
qi->tqi_subtype = qinfo->tqi_subtype;
@@ -266,7 +265,7 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Get TXQ properties, inactive queue: %u\n", q);
return false;
}
@@ -325,7 +324,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
return -1;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
qi = &ah->txq[q];
if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
@@ -348,12 +347,11 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Release TXQ, inactive queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
return false;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
ah->txok_interrupt_mask &= ~(1 << q);
@@ -376,12 +374,11 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Reset TXQ, inactive queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
return true;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
if (chan && IS_CHAN_B(chan))
@@ -621,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
- if (ads.ds_rxstatus8 & AR_KeyMiss)
- rs->rs_status |= ATH9K_RXERR_KEYMISS;
}
+ if (ads.ds_rxstatus8 & AR_KeyMiss)
+ rs->rs_status |= ATH9K_RXERR_KEYMISS;
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
@@ -760,7 +758,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
return true;
host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+ if (((host_isr & AR_INTR_MAC_IRQ) ||
+ (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+ (host_isr != AR_INTR_SPURIOUS))
return true;
host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -781,7 +782,7 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
else
atomic_dec(&ah->intr_ref_cnt);
- ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ ath_dbg(common, INTERRUPT, "disable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
if (!AR_SREV_9100(ah)) {
@@ -798,13 +799,13 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
+ u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
- "Do not enable IER ref count %d\n",
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
@@ -812,18 +813,21 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
- ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ async_mask = AR_INTR_MAC_IRQ;
+
+ if (ah->imask & ATH9K_INT_MCI)
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
+ ath_dbg(common, INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
}
- ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
@@ -838,7 +842,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (!(ints & ATH9K_INT_GLOBAL))
ath9k_hw_disable_interrupts(ah);
- ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
+ ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
mask = ints & ATH9K_INT_COMMON;
mask2 = 0;
@@ -901,7 +905,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask2 |= AR_IMR_S2_CST;
}
- ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
REG_WRITE(ah, AR_IMR, mask);
ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f40898..4a00806e2852 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_idle)
+ if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
@@ -332,14 +332,14 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
hchan = ah->curchan;
}
- if (fastcc && !ath9k_hw_check_alive(ah))
+ if (fastcc && (ah->chip_fullsleep ||
+ !ath9k_hw_check_alive(ah)))
fastcc = false;
if (!ath_prepare_reset(sc, retry_tx, flush))
fastcc = false;
- ath_dbg(common, ATH_DBG_CONFIG,
- "Reset to %u MHz, HT40: %d fastcc: %d\n",
+ ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS |
CHANNEL_HT40PLUS)),
fastcc);
@@ -428,7 +428,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
txctl.paprd = BIT(chain);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n");
+ ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
dev_kfree_skb_any(skb);
return false;
}
@@ -437,7 +437,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
if (!time_left)
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Timeout waiting for paprd training on TX chain %d\n",
chain);
@@ -486,27 +486,27 @@ void ath_paprd_calibrate(struct work_struct *work)
chain_ok = 0;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Sending PAPRD frame for thermal measurement "
- "on chain %d\n", chain);
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD frame for thermal measurement on chain %d\n",
+ chain);
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
ar9003_paprd_setup_gain_table(ah, chain);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Sending PAPRD training frame on chain %d\n", chain);
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
if (!ar9003_paprd_is_done(ah)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"PAPRD not yet done on chain %d\n", chain);
break;
}
if (ar9003_paprd_create_curve(ah, caldata, chain)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"PAPRD create curve failed on chain %d\n",
chain);
break;
@@ -561,7 +561,6 @@ void ath_ani_calibrate(unsigned long data)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -569,8 +568,6 @@ void ath_ani_calibrate(unsigned long data)
if (!common->ani.caldone) {
if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -584,8 +581,9 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
+ if (sc->sc_ah->config.enable_ani
+ && (timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -605,6 +603,12 @@ void ath_ani_calibrate(unsigned long data)
ah->rxchainmask, longcal);
}
+ ath_dbg(common, ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n",
+ jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
ath9k_ps_restore(sc);
set_timer:
@@ -630,7 +634,8 @@ set_timer:
}
}
-static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
+static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
{
struct ath_node *an;
an = (struct ath_node *)sta->drv_priv;
@@ -639,8 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
spin_lock(&sc->nodes_lock);
list_add(&an->list, &sc->nodes);
spin_unlock(&sc->nodes_lock);
- an->sta = sta;
#endif
+ an->sta = sta;
+ an->vif = vif;
if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -709,8 +715,7 @@ void ath9k_tasklet(unsigned long data)
* TSF sync does not look correct; remain awake to sync with
* the next Beacon.
*/
- ath_dbg(common, ATH_DBG_PS,
- "TSFOOR - Sync with next Beacon\n");
+ ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
@@ -736,10 +741,13 @@ void ath9k_tasklet(unsigned long data)
ath_tx_tasklet(sc);
}
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
if (status & ATH9K_INT_GENTIMER)
ath_gen_timer_isr(sc->sc_ah);
+ if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI)
+ ath_mci_intr(sc);
+
out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
@@ -762,7 +770,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
ATH9K_INT_TSFOOR | \
- ATH9K_INT_GENTIMER)
+ ATH9K_INT_GENTIMER | \
+ ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
@@ -880,82 +889,6 @@ chip_reset:
#undef SCHED_INTR
}
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- atomic_set(&ah->intr_ref_cnt, -1);
-
- ath9k_hw_configpcipowersave(ah, false);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(common,
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath_complete_reset(sc, true);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
-
- ath_cancel_work(sc);
-
- spin_lock_bh(&sc->sc_pcu_lock);
-
- /*
- * Keep the LED on when the radio is disabled
- * during idle unassociated state.
- */
- if (!sc->ps_idle) {
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
-
- ath_prepare_reset(sc, false, true);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath9k_hw_phy_disable(ah);
-
- ath9k_hw_configpcipowersave(ah, true);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
- ath9k_ps_restore(sc);
-}
-
static int ath_reset(struct ath_softc *sc, bool retry_tx)
{
int r;
@@ -1002,8 +935,8 @@ void ath_hw_check(struct work_struct *work)
busy = ath_update_survey_stats(sc);
spin_unlock_irqrestore(&common->cc_lock, flags);
- ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
- "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
+ busy, sc->hw_busy_count + 1);
if (busy >= 99) {
if (++sc->hw_busy_count >= 3) {
RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
@@ -1026,8 +959,7 @@ static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
count++;
if (count == 3) {
/* Rx is hung for more than 500ms. Reset it */
- ath_dbg(common, ATH_DBG_RESET,
- "Possible RX hang, resetting");
+ ath_dbg(common, RESET, "Possible RX hang, resetting\n");
RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
count = 0;
@@ -1067,7 +999,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
struct ath9k_channel *init_channel;
int r;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
@@ -1091,6 +1023,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
spin_lock_bh(&sc->sc_pcu_lock);
+
+ atomic_set(&ah->intr_ref_cnt, -1);
+
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
ath_err(common,
@@ -1117,6 +1052,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ah->imask |= ATH9K_INT_MCI;
+
sc->sc_flags &= ~SC_OP_INVALID;
sc->sc_ah->is_monitoring = false;
@@ -1129,15 +1067,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
goto mutex_unlock;
}
+ if (ah->led_pin >= 0) {
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+ * semi-random values after suspend/resume.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
spin_unlock_bh(&sc->sc_pcu_lock);
- if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
+ if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
!ah->btcoex_hw.enabled) {
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_resume(sc);
}
@@ -1167,12 +1118,19 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control) &&
!ieee80211_has_pm(hdr->frame_control)) {
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Add PM=1 for a TX frame while in PS mode\n");
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
}
}
+ /*
+ * Cannot tx while the hardware is in full sleep, it first needs a full
+ * chip reset to recover from that
+ */
+ if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+ goto exit;
+
if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
/*
* We are using PS-Poll and mac80211 can request TX while in
@@ -1183,12 +1141,11 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Sending PS-Poll to pick a buffered frame\n");
sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
- ath_dbg(common, ATH_DBG_PS,
- "Wake up to complete TX\n");
+ ath_dbg(common, PS, "Wake up to complete TX\n");
sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
@@ -1202,10 +1159,10 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
- ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
+ ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
+ ath_dbg(common, XMIT, "TX failed\n");
goto exit;
}
@@ -1219,13 +1176,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ bool prev_idle;
mutex_lock(&sc->mutex);
ath_cancel_work(sc);
if (sc->sc_flags & SC_OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
@@ -1233,10 +1191,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
- if (ah->btcoex_hw.enabled) {
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
+ ath_mci_flush_profile(&sc->btcoex.mci);
}
spin_lock_bh(&sc->sc_pcu_lock);
@@ -1248,39 +1208,49 @@ static void ath9k_stop(struct ieee80211_hw *hw)
* before setting the invalid flag. */
ath9k_hw_disable_interrupts(ah);
- if (!(sc->sc_flags & SC_OP_INVALID)) {
- ath_drain_all_txq(sc, false);
- ath_stoprecv(sc);
- ath9k_hw_phy_disable(ah);
- } else
- sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ prev_idle = sc->ps_idle;
+ sc->ps_idle = true;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ ath_prepare_reset(sc, false, true);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
sc->rx.frag = NULL;
}
- /* disable HAL and put h/w to sleep */
- ath9k_hw_disable(ah);
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+ ath9k_hw_phy_disable(ah);
- /* we can now sync irq and kill any running tasklets, since we already
- * disabled interrupts and not holding a spin lock */
- synchronize_irq(sc->irq);
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
+ ath9k_hw_configpcipowersave(ah, true);
- ath9k_ps_restore(sc);
+ spin_unlock_bh(&sc->sc_pcu_lock);
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
+ ath9k_ps_restore(sc);
sc->sc_flags |= SC_OP_INVALID;
+ sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
- ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, CONFIG, "Driver halt\n");
}
bool ath9k_uses_beacons(int type)
@@ -1495,8 +1465,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", vif->type);
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -1516,7 +1485,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
- ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
+ ath_dbg(common, CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -1559,7 +1528,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath_dbg(common, CONFIG, "Detach Interface\n");
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
@@ -1616,8 +1585,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio = false;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
/*
@@ -1628,13 +1597,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (!sc->ps_idle) {
- ath_radio_enable(sc, hw);
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
- } else {
- disable_radio = true;
- }
+ if (sc->ps_idle)
+ ath_cancel_work(sc);
}
/*
@@ -1655,19 +1619,16 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor mode is enabled\n");
+ ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
sc->sc_ah->is_monitoring = true;
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor mode is disabled\n");
+ ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
sc->sc_ah->is_monitoring = false;
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
- struct ath9k_channel old_chan;
int pos = curchan->hw_value;
int old_pos = -1;
unsigned long flags;
@@ -1680,8 +1641,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
else
sc->sc_flags &= ~SC_OP_OFFCHANNEL;
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set channel: %d MHz type: %d\n",
+ ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
curchan->center_freq, conf->channel_type);
/* update survey stats for the old channel before switching */
@@ -1693,11 +1653,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* Preserve the current channel values, before updating
* the same channel
*/
- if (old_pos == pos) {
- memcpy(&old_chan, &sc->sc_ah->channels[pos],
- sizeof(struct ath9k_channel));
- ah->curchan = &old_chan;
- }
+ if (ah->curchan && (old_pos == pos))
+ ath9k_hw_getnf(ah, ah->curchan);
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
curchan, conf->channel_type);
@@ -1738,21 +1695,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set power: %d\n", conf->power_level);
+ ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
sc->config.txpowlimit = 2 * conf->power_level;
- ath9k_ps_wakeup(sc);
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- ath9k_ps_restore(sc);
- }
-
- if (disable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- ath_radio_disable(sc, hw);
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return 0;
}
@@ -1785,8 +1735,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
ath9k_ps_restore(sc);
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
}
static int ath9k_sta_add(struct ieee80211_hw *hw,
@@ -1798,7 +1748,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ath_node *an = (struct ath_node *) sta->drv_priv;
struct ieee80211_key_conf ps_key = { };
- ath_node_attach(sc, sta);
+ ath_node_attach(sc, sta, vif);
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_AP_VLAN)
@@ -1843,6 +1793,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ if (!(sc->sc_flags & SC_OP_TXAGGR))
+ return;
+
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;
@@ -1880,7 +1833,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
qi.tqi_cwmax = params->cw_max;
qi.tqi_burstTime = params->txop;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, txq->axq_qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
@@ -1912,7 +1865,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
- if (vif->type == NL80211_IFTYPE_ADHOC &&
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -1928,7 +1882,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key\n");
switch (cmd) {
case SET_KEY:
@@ -1980,9 +1934,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
+ ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
+ bss_conf->aid, common->curbssid);
ath_beacon_config(sc, vif);
/*
* Request a re-configuration of Beacon related timers
@@ -2013,8 +1966,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
/* Reconfigure bss info */
if (avp->primary_sta_vif && !bss_conf->assoc) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Bss Info DISASSOC %d, bssid %pM\n",
+ ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
common->curaid, common->curbssid);
sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
avp->primary_sta_vif = false;
@@ -2056,7 +2008,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ath9k_config_bss(sc, vif);
- ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
common->curbssid, common->curaid);
}
@@ -2134,7 +2086,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n",
bss_conf->use_short_preamble);
if (bss_conf->use_short_preamble)
sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
@@ -2143,7 +2095,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n",
bss_conf->use_cts_prot);
if (bss_conf->use_cts_prot &&
hw->conf.channel->band != IEEE80211_BAND_5GHZ)
@@ -2309,20 +2261,17 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
cancel_delayed_work_sync(&sc->tx_complete_work);
if (ah->ah_flags & AH_UNPLUGGED) {
- ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+ ath_dbg(common, ANY, "Device has been unplugged!\n");
mutex_unlock(&sc->mutex);
return;
}
if (sc->sc_flags & SC_OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
- if (drop)
- timeout = 1;
-
for (j = 0; j < timeout; j++) {
bool npend = false;
@@ -2340,21 +2289,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
}
if (!npend)
- goto out;
+ break;
}
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ if (drop) {
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ drain_txq = ath_drain_all_txq(sc, false);
+ spin_unlock_bh(&sc->sc_pcu_lock);
- if (!drain_txq)
- ath_reset(sc, false);
+ if (!drain_txq)
+ ath_reset(sc, false);
- ath9k_ps_restore(sc);
- ieee80211_wake_queues(hw);
+ ath9k_ps_restore(sc);
+ ieee80211_wake_queues(hw);
+ }
-out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
new file mode 100644
index 000000000000..05c23ea4c633
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "ath9k.h"
+#include "mci.h"
+
+static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+
+static struct ath_mci_profile_info*
+ath_mci_find_profile(struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ list_for_each_entry(entry, &mci->info, list) {
+ if (entry->conn_handle == info->conn_handle)
+ break;
+ }
+ return entry;
+}
+
+static bool ath_mci_add_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
+ (info->type == MCI_GPM_COEX_PROFILE_VOICE)) {
+ ath_dbg(common, MCI,
+ "Too many SCO profile, failed to add new profile\n");
+ return false;
+ }
+
+ if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
+ (info->type != MCI_GPM_COEX_PROFILE_VOICE)) {
+ ath_dbg(common, MCI,
+ "Too many ACL profile, failed to add new profile\n");
+ return false;
+ }
+
+ entry = ath_mci_find_profile(mci, info);
+
+ if (entry)
+ memcpy(entry, info, 10);
+ else {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return false;
+
+ memcpy(entry, info, 10);
+ INC_PROF(mci, info);
+ list_add_tail(&info->list, &mci->info);
+ }
+ return true;
+}
+
+static void ath_mci_del_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ entry = ath_mci_find_profile(mci, info);
+
+ if (!entry) {
+ ath_dbg(common, MCI, "Profile to be deleted not found\n");
+ return;
+ }
+ DEC_PROF(mci, entry);
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci)
+{
+ struct ath_mci_profile_info *info, *tinfo;
+
+ list_for_each_entry_safe(info, tinfo, &mci->info, list) {
+ list_del(&info->list);
+ DEC_PROF(mci, info);
+ kfree(info);
+ }
+ mci->aggr_limit = 0;
+}
+
+static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
+{
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u32 wlan_airtime = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ /*
+ * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
+ * When wlan_airtime is less than 4ms, aggregation limit has to be
+ * adjusted half of wlan_airtime to ensure that the aggregation can fit
+ * without collision with BT traffic.
+ */
+ if ((wlan_airtime <= 4) &&
+ (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
+ mci->aggr_limit = 2 * wlan_airtime;
+}
+
+static void ath_mci_update_scheme(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info *info;
+ u32 num_profile = NUM_PROF(mci);
+
+ if (num_profile == 1) {
+ info = list_first_entry(&mci->info,
+ struct ath_mci_profile_info,
+ list);
+ if (mci->num_sco && info->T == 12) {
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Single SCO, aggregation limit 2 ms\n");
+ } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
+ !info->master) {
+ btcoex->btcoex_period = 60;
+ ath_dbg(common, MCI,
+ "Single slave PAN/FTP, bt period 60 ms\n");
+ } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
+ (info->T > 0 && info->T < 50) &&
+ (info->A > 1 || info->W > 1)) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Multiple attempt/timeout single HID "
+ "aggregation limit 2 ms dutycycle 30%%\n");
+ }
+ } else if ((num_profile == 2) && (mci->num_hid == 2)) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
+ } else if (num_profile > 3) {
+ mci->aggr_limit = 6;
+ ath_dbg(common, MCI,
+ "Three or more profiles aggregation limit 1.5 ms\n");
+ }
+
+ if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
+ if (IS_CHAN_HT(sc->sc_ah->curchan))
+ ath_mci_adjust_aggr_limit(btcoex);
+ else
+ btcoex->btcoex_period >>= 1;
+ }
+
+ ath9k_hw_btcoex_disable(sc->sc_ah);
+ ath9k_btcoex_timer_pause(sc);
+
+ if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
+ return;
+
+ btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+ if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
+ btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
+
+ btcoex->btcoex_period *= 1000;
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ ath9k_hw_btcoex_enable(sc->sc_ah);
+ ath9k_btcoex_timer_resume(sc);
+}
+
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 payload[4] = {0, 0, 0, 0};
+
+ switch (opcode) {
+ case MCI_GPM_BT_CAL_REQ:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ } else
+ ath_dbg(common, MCI, "MCI State mismatches: %d\n",
+ ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+ break;
+
+ case MCI_GPM_BT_CAL_DONE:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+ ath_dbg(common, MCI, "MCI error illegal!\n");
+ else
+ ath_dbg(common, MCI, "MCI BT not in CAL state\n");
+
+ break;
+
+ case MCI_GPM_BT_CAL_GRANT:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n");
+
+ /* Send WLAN_CAL_DONE for now */
+ ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+ ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+ 16, false, true);
+ break;
+
+ default:
+ ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n");
+ break;
+ }
+}
+
+static void ath_mci_process_profile(struct ath_softc *sc,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+
+ if (info->start) {
+ if (!ath_mci_add_profile(common, mci, info))
+ return;
+ } else
+ ath_mci_del_profile(common, mci, info);
+
+ btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+ mci->aggr_limit = mci->num_sco ? 6 : 0;
+ if (NUM_PROF(mci)) {
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+ } else {
+ btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+ ATH_BTCOEX_STOMP_LOW;
+ btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ }
+
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_process_status(struct ath_softc *sc,
+ struct ath_mci_profile_status *status)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info info;
+ int i = 0, old_num_mgmt = mci->num_mgmt;
+
+ /* Link status type are not handled */
+ if (status->is_link) {
+ ath_dbg(common, MCI, "Skip link type status update\n");
+ return;
+ }
+
+ memset(&info, 0, sizeof(struct ath_mci_profile_info));
+
+ info.conn_handle = status->conn_handle;
+ if (ath_mci_find_profile(mci, &info)) {
+ ath_dbg(common, MCI,
+ "Skip non link state update for existing profile %d\n",
+ status->conn_handle);
+ return;
+ }
+ if (status->conn_handle >= ATH_MCI_MAX_PROFILE) {
+ ath_dbg(common, MCI, "Ignore too many non-link update\n");
+ return;
+ }
+ if (status->is_critical)
+ __set_bit(status->conn_handle, mci->status);
+ else
+ __clear_bit(status->conn_handle, mci->status);
+
+ mci->num_mgmt = 0;
+ do {
+ if (test_bit(i, mci->status))
+ mci->num_mgmt++;
+ } while (++i < ATH_MCI_MAX_PROFILE);
+
+ if (old_num_mgmt != mci->num_mgmt)
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_profile_info profile_info;
+ struct ath_mci_profile_status profile_status;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 version;
+ u8 major;
+ u8 minor;
+ u32 seq_num;
+
+ switch (opcode) {
+
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+ break;
+
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+ minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ major, minor);
+ version = (major << 8) + minor;
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_COEX_VERSION, &version);
+ break;
+
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02x\n",
+ *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+ ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ break;
+
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n");
+ memcpy(&profile_info,
+ (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+ if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+ || (profile_info.type >=
+ MCI_GPM_COEX_PROFILE_MAX)) {
+
+ ath_dbg(common, MCI,
+ "illegal profile type = %d, state = %d\n",
+ profile_info.type,
+ profile_info.start);
+ break;
+ }
+
+ ath_mci_process_profile(sc, &profile_info);
+ break;
+
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ profile_status.is_link = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_TYPE);
+ profile_status.conn_handle = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_LINKID);
+ profile_status.is_critical = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_STATE);
+
+ seq_num = *((u32 *)(rx_payload + 12));
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ profile_status.is_link, profile_status.conn_handle,
+ profile_status.is_critical, seq_num);
+
+ ath_mci_process_status(sc, &profile_status);
+ break;
+
+ default:
+ ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n",
+ opcode);
+ break;
+ }
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ int error = 0;
+
+ buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+ &buf->bf_paddr, GFP_KERNEL);
+
+ if (buf->bf_addr == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ memset(buf, 0, sizeof(*buf));
+ return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ if (buf->bf_addr) {
+ dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+ buf->bf_paddr);
+ memset(buf, 0, sizeof(*buf));
+ }
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ int error = 0;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+ if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+ ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+ memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+ mci->sched_buf.bf_len);
+
+ mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+ mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+ mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+ /* initialize the buffer */
+ memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+ ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+ mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+ mci->sched_buf.bf_paddr);
+fail:
+ return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_coex *mci = &sc->mci_coex;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /*
+ * both schedule and gpm buffers will be released
+ */
+ ath_mci_buf_free(sc, &mci->sched_buf);
+ ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 mci_int, mci_int_rxmsg;
+ u32 offset, subtype, opcode;
+ u32 *pgpm;
+ u32 more_data = MCI_GPM_MORE;
+ bool skip_gpm = false;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+ ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n");
+
+ ath_dbg(common, MCI,
+ "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+ mci_int, mci_int_rxmsg);
+ return;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+ u32 payload[4] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffff00};
+
+ /*
+ * The following REMOTE_RESET and SYS_WAKING used to sent
+ * only when BT wake up. Now they are always sent, as a
+ * recovery method to reset BT MCI's RX alignment.
+ */
+ ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n");
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+ payload, 16, true, false);
+ ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n");
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+ NULL, 0, true, false);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+ /*
+ * always do this for recovery and 2G/5G toggling and LNA_TRANS
+ */
+ ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+
+ /* Processing SYS_WAKING/SYS_SLEEPING */
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_SLEEP)
+ ath_dbg(common, MCI,
+ "MCI BT stays in sleep mode\n");
+ else {
+ ath_dbg(common, MCI,
+ "MCI Set BT state to AWAKE\n");
+ ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+ } else
+ ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_AWAKE)
+ ath_dbg(common, MCI,
+ "MCI BT stays in AWAKE mode\n");
+ else {
+ ath_dbg(common, MCI,
+ "MCI SetBT state to SLEEP\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+ NULL);
+ }
+ } else
+ ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n");
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+ ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n");
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+ skip_gpm = true;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ NULL);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+ while (more_data == MCI_GPM_MORE) {
+
+ pgpm = mci->gpm_buf.bf_addr;
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ pgpm += (offset >> 2);
+
+ /*
+ * The first dword is timer.
+ * The real data starts from 2nd dword.
+ */
+
+ subtype = MCI_GPM_TYPE(pgpm);
+ opcode = MCI_GPM_OPCODE(pgpm);
+
+ if (!skip_gpm) {
+
+ if (MCI_GPM_IS_CAL_TYPE(subtype))
+ ath_mci_cal_msg(sc, subtype,
+ (u8 *) pgpm);
+ else {
+ switch (subtype) {
+ case MCI_GPM_COEX_AGENT:
+ ath_mci_msg(sc, opcode,
+ (u8 *) pgpm);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ MCI_GPM_RECYCLE(pgpm);
+ }
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+ ath_dbg(common, MCI, "MCI LNA_INFO\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+ int value_dbm = ar9003_mci_state(ah,
+ MCI_STATE_CONT_RSSI_POWER, NULL);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+ if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ else
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+ ath_dbg(common, MCI, "MCI CONT_NACK\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+ ath_dbg(common, MCI, "MCI CONT_RST\n");
+ }
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+ mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+ if (mci_int_rxmsg & 0xfffffffe)
+ ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n",
+ mci_int_rxmsg);
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
new file mode 100644
index 000000000000..29e3e51d078f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MCI_H
+#define MCI_H
+
+#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY 16
+#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
+#define ATH_MCI_DEF_BT_PERIOD 40
+#define ATH_MCI_BDR_DUTY_CYCLE 20
+#define ATH_MCI_MAX_DUTY_CYCLE 90
+
+#define ATH_MCI_DEF_AGGR_LIMIT 6 /* in 0.24 ms */
+#define ATH_MCI_MAX_ACL_PROFILE 7
+#define ATH_MCI_MAX_SCO_PROFILE 1
+#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
+ ATH_MCI_MAX_SCO_PROFILE)
+
+#define INC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp++; \
+ if (!_info->edr) \
+ _mci->num_bdr++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ _mci->num_sco++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp--; \
+ if (!_info->edr) \
+ _mci->num_bdr--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ _mci->num_sco--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \
+ _mci->num_hid + _mci->num_pan + _mci->num_sco)
+
+struct ath_mci_profile_info {
+ u8 type;
+ u8 conn_handle;
+ bool start;
+ bool master;
+ bool edr;
+ u8 voice_type;
+ u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */
+ u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */
+ u8 A; /* HID: Sniff attempt, in slots */
+ struct list_head list;
+};
+
+struct ath_mci_profile_status {
+ bool is_critical;
+ bool is_link;
+ u8 conn_handle;
+};
+
+struct ath_mci_profile {
+ struct list_head info;
+ DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE);
+ u16 aggr_limit;
+ u8 num_mgmt;
+ u8 num_sco;
+ u8 num_a2dp;
+ u8 num_hid;
+ u8 num_pan;
+ u8 num_other_acl;
+ u8 num_bdr;
+};
+
+
+struct ath_mci_buf {
+ void *bf_addr; /* virtual addr of desc */
+ dma_addr_t bf_paddr; /* physical addr of buffer */
+ u32 bf_len; /* len of data */
+};
+
+struct ath_mci_coex {
+ atomic_t mci_cal_flag;
+ struct ath_mci_buf sched_buf;
+ struct ath_mci_buf gpm_buf;
+ u32 bt_cal_start;
+};
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 2dcdf63cb390..77dc327def8d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -121,7 +121,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
if (!parent)
return;
- if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
/* Bluetooth coexistance requires disabling ASPM. */
pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
@@ -307,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
* when no interface is up.
*/
+ ath9k_hw_disable(sc->sc_ah);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
return 0;
@@ -321,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_softc *sc = hw->priv;
u32 val;
/*
@@ -334,22 +331,6 @@ static int ath_pci_resume(struct device *device)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- ath9k_ps_wakeup(sc);
- /* Enable LED */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
- /*
- * Reset key cache to sane defaults (all entries cleared) instead of
- * semi-random values after suspend/resume.
- */
- ath9k_cmn_init_crypto(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 888abc2be3a5..b3c3798fe513 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1199,7 +1199,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
return &ar5416_11na_ratetable;
return &ar5416_11a_ratetable;
default:
- ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n");
+ ath_dbg(common, CONFIG, "Invalid band\n");
return NULL;
}
}
@@ -1271,11 +1271,12 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv);
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+ ath_rc_priv->rate_max_phy = (k > 4) ?
+ ath_rc_priv->valid_rate_index[k-4] :
+ ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
- ath_dbg(common, ATH_DBG_CONFIG,
- "RC Initialized with capabilities: 0x%x\n",
+ ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
ath_rc_priv->ht_cap);
}
@@ -1472,7 +1473,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
oper_cw40, oper_sgi);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
"Operating HT Bandwidth changed to: %d\n",
sc->hw->conf.channel_type);
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862cdae6d..0e666fbe0842 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -172,7 +172,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
u32 nbuf = 0;
if (list_empty(&sc->rx.rxbuf)) {
- ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ ath_dbg(common, QUEUE, "No free rx buf available\n");
return;
}
@@ -337,7 +337,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
return ath_rx_edma_init(sc, nbufs);
} else {
- ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
common->cachelsz, common->rx_bufsize);
/* Initialize rx descriptors */
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
return rfilt;
-#undef RX_FILTER_PRESERVE
}
int ath_startrecv(struct ath_softc *sc)
@@ -592,7 +591,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
ath_set_beacon(sc);
}
@@ -605,7 +604,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* a backup trigger for returning into NETWORK SLEEP state,
* so we are waiting for it as well.
*/
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
@@ -618,8 +617,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* been delivered.
*/
sc->ps_flags &= ~PS_WAIT_FOR_CAB;
- ath_dbg(common, ATH_DBG_PS,
- "PS wait for CAB frames timed out\n");
+ ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
}
}
@@ -644,13 +642,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
* point.
*/
sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"All PS CAB frames received, back to sleep\n");
} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Going back to sleep after having received PS-Poll data (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
@@ -933,7 +931,7 @@ static int ath9k_process_rate(struct ath_common *common,
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
@@ -1824,6 +1822,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
if (ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
!compare_ether_addr(hdr->addr3, common->curbssid))
rs.is_mybeacon = true;
else
@@ -1838,11 +1837,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag;
- retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
- rxs, &decrypt_error);
- if (retval)
- goto requeue_drop_frag;
-
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1846,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
+ retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+ rxs, &decrypt_error);
+ if (retval)
+ goto requeue_drop_frag;
+
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1922,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
- /*
- * change the default rx antenna if rx diversity chooses the
- * other antenna 3 times in a row.
- */
- if (sc->rx.defant != rs.rs_antenna) {
- if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rs.rs_antenna);
- } else {
- sc->rx.rxotherant = 0;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+ /*
+ * change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs.rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs.rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
}
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fcb7e9e8399..6e2f18861f5d 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1006,6 +1006,8 @@ enum {
#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
+#define AR_INTR_ASYNC_MASK_MCI 0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S 7
#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
+#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
+ AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S 7
+
#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
#define AR_RTC_INTR_MASK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+#define AR_RTC_KEEP_AWAKE 0x7034
+
/* RTC_DERIVED_* - only for AR9100 */
#define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
#define AR_DIAG_FRAME_NV0 0x00020000
#define AR_DIAG_OBS_PT_SEL1 0x000C0000
#define AR_DIAG_OBS_PT_SEL1_S 18
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S 27
#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
@@ -1752,19 +1766,10 @@ enum {
#define AR_BT_COEX_WL_WEIGHTS0 0x8174
#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
+#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
+#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
-#define AR_BT_COEX_BT_WEIGHTS0 0x83ac
-#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
-#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
-#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
-
-#define AR9300_BT_WGHT 0xcccc4444
-#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
-#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
-#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
-#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
-#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
-#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
+#define AR9300_BT_WGHT 0xcccc4444
#define AR_BT_COEX_MODE2 0x817c
#define AR_BT_BCN_MISS_THRESH 0x000000ff
@@ -1938,37 +1943,277 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
/* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+
+#define AR_MCI_COMMAND0 0x1800
+#define AR_MCI_COMMAND0_HEADER 0xFF
+#define AR_MCI_COMMAND0_HEADER_S 0
+#define AR_MCI_COMMAND0_LEN 0x1f00
+#define AR_MCI_COMMAND0_LEN_S 8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
+
+#define AR_MCI_COMMAND1 0x1804
+
+#define AR_MCI_COMMAND2 0x1808
+#define AR_MCI_COMMAND2_RESET_TX 0x01
+#define AR_MCI_COMMAND2_RESET_TX_S 0
+#define AR_MCI_COMMAND2_RESET_RX 0x02
+#define AR_MCI_COMMAND2_RESET_RX_S 1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
+
+#define AR_MCI_RX_CTRL 0x180c
+
+#define AR_MCI_TX_CTRL 0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S 0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
+
+#define AR_MCI_SCHD_TABLE_0 0x1818
+#define AR_MCI_SCHD_TABLE_1 0x181c
+#define AR_MCI_GPM_0 0x1820
+#define AR_MCI_GPM_1 0x1824
+#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S 16
+#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S 0
+
+#define AR_MCI_INTERRUPT_RAW 0x1828
+#define AR_MCI_INTERRUPT_EN 0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
+#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S 9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
+#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S 11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
+#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S 28
+#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S 29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
+ AR_MCI_INTERRUPT_RX_INVALID_HDR | \
+ AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_MSG | \
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT 0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+ AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT 0x1840
+
+#define AR_MCI_RX_STATUS 0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
+#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S 12
+#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S 13
+
+#define AR_MCI_CONT_STATUS 0x1848
+#define AR_MCI_CONT_RSSI_POWER 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S 0
+#define AR_MCI_CONT_RRIORITY 0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S 8
+#define AR_MCI_CONT_TXRX 0x00010000
+#define AR_MCI_CONT_TXRX_S 16
+
+#define AR_MCI_BT_PRI0 0x184c
+#define AR_MCI_BT_PRI1 0x1850
+#define AR_MCI_BT_PRI2 0x1854
+#define AR_MCI_BT_PRI3 0x1858
+#define AR_MCI_BT_PRI 0x185c
+#define AR_MCI_WL_FREQ0 0x1860
+#define AR_MCI_WL_FREQ1 0x1864
+#define AR_MCI_WL_FREQ2 0x1868
+#define AR_MCI_GAIN 0x186c
+#define AR_MCI_WBTIMER1 0x1870
+#define AR_MCI_WBTIMER2 0x1874
+#define AR_MCI_WBTIMER3 0x1878
+#define AR_MCI_WBTIMER4 0x187c
+#define AR_MCI_MAXGAIN 0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0 0x1888
+#define AR_MCI_HW_SCHD_TBL_D1 0x188c
+#define AR_MCI_HW_SCHD_TBL_D2 0x1890
+#define AR_MCI_HW_SCHD_TBL_D3 0x1894
+#define AR_MCI_TX_PAYLOAD0 0x1898
+#define AR_MCI_TX_PAYLOAD1 0x189c
+#define AR_MCI_TX_PAYLOAD2 0x18a0
+#define AR_MCI_TX_PAYLOAD3 0x18a4
+#define AR_BTCOEX_WBTIMER 0x18a8
+
+#define AR_BTCOEX_CTRL 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
+#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
+#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
+#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S 4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
+
+#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA 0x1940
+#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+
+#define AR_BTCOEX_CTRL2 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S 17
+#define AR_GLB_DS_JTAG_DISABLE 0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S 18
+
+#define AR_BTCOEX_RC 0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG 0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
+
+#define AR_MCI_SCHD_TABLE_2 0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
+
+#define AR_BTCOEX_CTRL3 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 35422fc1f2ce..65c8894c5f81 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -187,7 +187,7 @@ void ath9k_fatal_work(struct work_struct *work)
fatal_work);
struct ath_common *common = ath9k_hw_common(priv->ah);
- ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n");
+ ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
ath9k_htc_reset(priv);
}
@@ -330,8 +330,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
if (!time_left) {
- ath_dbg(common, ATH_DBG_WMI,
- "Timeout waiting for WMI command: %s\n",
+ ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
@@ -342,8 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
return 0;
out:
- ath_dbg(common, ATH_DBG_WMI,
- "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 03b0a651a591..3182408ffe35 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar);
+ struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -104,6 +104,32 @@ static int ath_max_4ms_framelen[4][32] = {
/* Aggregation logic */
/*********************/
+static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+ __acquires(&txq->axq_lock)
+{
+ spin_lock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&q);
+ skb_queue_splice_init(&txq->complete_q, &q);
+ spin_unlock_bh(&txq->axq_lock);
+
+ while ((skb = __skb_dequeue(&q)))
+ ieee80211_tx_status(sc->hw, skb);
+}
+
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
@@ -130,7 +156,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
WARN_ON(!tid->paused);
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
tid->paused = false;
if (skb_queue_empty(&tid->buf_q))
@@ -139,7 +165,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
unlock:
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -150,6 +176,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
}
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+ ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+ seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +190,36 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
+ bool sendbar = false;
INIT_LIST_HEAD(&bf_head);
memset(&ts, 0, sizeof(ts));
- spin_lock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&tid->buf_q))) {
fi = get_frame_info(skb);
bf = fi->bf;
- spin_unlock_bh(&txq->axq_lock);
if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ sendbar = true;
} else {
ath_tx_send_normal(sc, txq, NULL, skb);
}
- spin_lock_bh(&txq->axq_lock);
}
- spin_unlock_bh(&txq->axq_lock);
+ if (tid->baw_head == tid->baw_tail) {
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+ }
+
+ if (sendbar) {
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, tid->seq_start);
+ ath_txq_lock(sc, txq);
+ }
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +235,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ if (tid->bar_index >= 0)
+ tid->bar_index--;
}
}
@@ -238,9 +280,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
- spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
- spin_lock(&txq->axq_lock);
continue;
}
@@ -249,24 +289,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
if (fi->retries)
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
tid->seq_next = tid->seq_start;
tid->baw_tail = tid->baw_head;
+ tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
- struct sk_buff *skb)
+ struct sk_buff *skb, int count)
{
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_buf *bf = fi->bf;
struct ieee80211_hdr *hdr;
+ int prev = fi->retries;
TX_STAT_INC(txq->axq_qnum, a_retries);
- if (fi->retries++ > 0)
+ fi->retries += count;
+
+ if (prev > 0)
return;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +407,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
- u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
@@ -374,6 +416,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int nframes;
u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ int i, retries;
+ int bar_index = -1;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +426,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memcpy(rates, tx_info->control.rates, sizeof(rates));
+ retries = ts->ts_longretry + 1;
+ for (i = 0; i < ts->ts_rateindex; i++)
+ retries += rates[i].count;
+
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +443,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
bf = bf_next;
}
@@ -406,6 +453,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
an = (struct ath_node *)sta->drv_priv;
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(an, tidno);
+ seq_first = tid->seq_start;
/*
* The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +503,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
+ } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ /*
+ * cleanup in progress, just fail
+ * the un-acked sub-frames
+ */
+ txfail = 1;
+ } else if (flush) {
+ txpending = 1;
+ } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+ if (txok || !an->sleeping)
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+ retries);
+
+ txpending = 1;
} else {
- if ((tid->state & AGGR_CLEANUP) || !retry) {
- /*
- * cleanup in progress, just fail
- * the un-acked sub-frames
- */
- txfail = 1;
- } else if (flush) {
- txpending = 1;
- } else if (fi->retries < ATH_MAX_SW_RETRIES) {
- if (txok || !an->sleeping)
- ath_tx_set_retry(sc, txq, bf->bf_mpdu);
-
- txpending = 1;
- } else {
- txfail = 1;
- sendbar = 1;
- txfail_cnt++;
- }
+ txfail = 1;
+ txfail_cnt++;
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
}
/*
@@ -490,9 +538,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +547,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- !txfail, sendbar);
+ !txfail);
} else {
/* retry the un-acked ones */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the
- * frame with failed status if we
- * run out of tx buf.
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head,
- ts, 0,
- !flush);
- break;
- }
-
- fi->bf = tbf;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ ath_tx_update_baw(sc, tid, seqno);
+
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head, ts, 0);
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
+ break;
}
+
+ fi->bf = tbf;
}
/*
@@ -545,7 +588,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- spin_lock_bh(&txq->axq_lock);
skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
@@ -553,18 +595,22 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ts->ts_status & ATH9K_TXERR_FILT)
tid->ac->clear_ps_filter = true;
}
- spin_unlock_bh(&txq->axq_lock);
}
- if (tid->state & AGGR_CLEANUP) {
- ath_tx_flush_tid(sc, tid);
+ if (bar_index >= 0) {
+ u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
- if (tid->baw_head == tid->baw_tail) {
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
- }
+ if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+ tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+ ath_txq_lock(sc, txq);
}
+ if (tid->state & AGGR_CLEANUP)
+ ath_tx_flush_tid(sc, tid);
+
rcu_read_unlock();
if (needreset) {
@@ -601,6 +647,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
+ struct ath_mci_profile *mci = &sc->btcoex.mci;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, legacy = 0;
int i;
@@ -617,24 +664,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
for (i = 0; i < 4; i++) {
- if (rates[i].count) {
- int modeidx;
- if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
- legacy = 1;
- break;
- }
-
- if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- modeidx = MCS_HT40;
- else
- modeidx = MCS_HT20;
+ int modeidx;
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx++;
+ if (!rates[i].count)
+ continue;
- frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
- max_4ms_framelen = min(max_4ms_framelen, frmlen);
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+ legacy = 1;
+ break;
}
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ modeidx = MCS_HT40;
+ else
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
+
+ frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+ max_4ms_framelen = min(max_4ms_framelen, frmlen);
}
/*
@@ -645,7 +694,9 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
return 0;
- if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+ aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+ else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
aggr_limit = min((max_4ms_framelen * 3) / 8,
(u32)ATH_AMPDU_LIMIT_MAX);
else
@@ -768,8 +819,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
- if (!bf_first)
- bf_first = bf;
/* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -777,6 +826,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+ struct ath_tx_status ts = {};
+ struct list_head bf_head;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add(&bf->list, &bf_head);
+ __skb_unlink(skb, &tid->buf_q);
+ ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ continue;
+ }
+
+ if (!bf_first)
+ bf_first = bf;
+
if (!rl) {
aggr_limit = ath_lookup_rate(sc, bf, tid);
rl = 1;
@@ -1119,6 +1183,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
+ txtid->bar_index = -1;
memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
txtid->baw_head = txtid->baw_tail = 0;
@@ -1140,7 +1205,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
return;
}
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
txtid->paused = true;
/*
@@ -1153,9 +1218,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state |= AGGR_CLEANUP;
else
txtid->state &= ~AGGR_ADDBA_COMPLETE;
- spin_unlock_bh(&txq->axq_lock);
ath_tx_flush_tid(sc, txtid);
+ ath_txq_unlock_complete(sc, txq);
}
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1176,7 +1241,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
buffered = !skb_queue_empty(&tid->buf_q);
@@ -1188,7 +1253,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
list_del(&ac->list);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
@@ -1207,7 +1272,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
@@ -1215,7 +1280,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_schedule(sc, txq);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
}
@@ -1315,6 +1380,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_qnum = axq_qnum;
txq->mac80211_qnum = -1;
txq->axq_link = NULL;
+ __skb_queue_head_init(&txq->complete_q);
INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock);
@@ -1397,8 +1463,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *list, bool retry_tx)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1425,13 +1489,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock_bh(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
}
@@ -1443,7 +1505,8 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx;
@@ -1464,7 +1527,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1558,11 +1621,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
break;
}
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
- }
+ if (!list_empty(&ac->tid_q) && !ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
}
if (ac == last_ac ||
@@ -1600,8 +1661,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
bf_last = list_entry(head->prev, struct ath_buf, list);
- ath_dbg(common, ATH_DBG_QUEUE,
- "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+ ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
+ txq->axq_qnum, txq->axq_depth);
if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
@@ -1612,8 +1673,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (txq->axq_link) {
ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT,
- "link[%u] (%p)=%llx (%p)\n",
+ ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
txq->axq_qnum, txq->axq_link,
ito64(bf->bf_daddr), bf->bf_desc);
} else if (!edma)
@@ -1625,7 +1685,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (puttxbuf) {
TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+ ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
@@ -1705,10 +1765,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
- /* update starting sequence number for subsequent ADDBA request */
- if (tid)
- INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1771,7 +1827,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
bf = ath_tx_get_buffer(sc);
if (!bf) {
- ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
+ ath_dbg(common, XMIT, "TX buffers are full\n");
goto error;
}
@@ -1816,7 +1872,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- spin_lock_bh(&txctl->txq->axq_lock);
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1835,7 +1890,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
} else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf)
- goto out;
+ return;
bf->bf_state.bfs_paprd = txctl->paprd;
@@ -1844,9 +1899,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
ath_tx_send_normal(sc, txctl->txq, tid, skb);
}
-
-out:
- spin_unlock_bh(&txctl->txq->axq_lock);
}
/* Upon failure caller should free skb */
@@ -1907,15 +1959,18 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
q = skb_get_queue_mapping(skb);
- spin_lock_bh(&txq->axq_lock);
+
+ ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
- txq->stopped = 1;
+ txq->stopped = true;
}
- spin_unlock_bh(&txq->axq_lock);
ath_tx_start_dma(sc, skb, txctl);
+
+ ath_txq_unlock(sc, txq);
+
return 0;
}
@@ -1926,16 +1981,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq)
{
- struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int q, padpos, padsize;
- ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
-
- if (tx_flags & ATH_TX_BAR)
- tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
@@ -1952,9 +2003,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Going back to sleep after having received TX status (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
@@ -1964,32 +2015,27 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) {
- spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
ieee80211_wake_queue(sc->hw, q);
- txq->stopped = 0;
+ txq->stopped = false;
}
- spin_unlock_bh(&txq->axq_lock);
}
- ieee80211_tx_status(hw, skb);
+ __skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar)
+ struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
unsigned long flags;
int tx_flags = 0;
- if (sendbar)
- tx_flags = ATH_TX_BAR;
-
if (!txok)
tx_flags |= ATH_TX_ERROR;
@@ -2081,8 +2127,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
int txok;
@@ -2092,16 +2136,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
-
if (!bf_isampdu(bf)) {
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
- spin_lock_bh(&txq->axq_lock);
-
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
}
@@ -2116,11 +2156,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_tx_status ts;
int status;
- ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+ ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link);
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
for (;;) {
if (work_pending(&sc->hw_reset_work))
break;
@@ -2179,7 +2219,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
static void ath_tx_complete_poll_work(struct work_struct *work)
@@ -2196,21 +2236,21 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
txq = &sc->tx.txq[i];
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (txq->axq_depth) {
if (txq->axq_tx_inprogress) {
needreset = true;
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
break;
} else {
txq->axq_tx_inprogress = true;
}
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
if (needreset) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
@@ -2253,8 +2293,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (status == -EINPROGRESS)
break;
if (status == -EIO) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Error processing tx status\n");
+ ath_dbg(common, XMIT, "Error processing tx status\n");
break;
}
@@ -2264,10 +2303,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
txq = &sc->tx.txq[ts.qid];
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
return;
}
@@ -2293,7 +2332,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
}
@@ -2431,7 +2470,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (tid->sched) {
list_del(&tid->list);
@@ -2447,6 +2486,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP;
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
}
}
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index de57f90e1d5f..3c164226687f 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -56,7 +56,7 @@ static int carl9170_debugfs_open(struct inode *inode, struct file *file)
struct carl9170_debugfs_fops {
unsigned int read_bufsize;
- mode_t attr;
+ umode_t attr;
char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
ssize_t *len);
ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cba9d0435dc4..3de61adacd34 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -146,13 +146,15 @@ static bool valid_cpu_addr(const u32 address)
return false;
}
-static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data,
+ size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
- const struct carl9170fw_chk_desc *chk_desc;
const struct carl9170fw_last_desc *last_desc;
- const struct carl9170fw_txsq_desc *txsq_desc;
- u16 if_comb_types;
+ const struct carl9170fw_chk_desc *chk_desc;
+ unsigned long fin, diff;
+ unsigned int dsc_len;
+ u32 crc32;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -170,36 +172,68 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
- if (chk_desc) {
- unsigned long fin, diff;
- unsigned int dsc_len;
- u32 crc32;
+ if (!chk_desc) {
+ dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ return 0;
+ }
- dsc_len = min_t(unsigned int, len,
+ dsc_len = min_t(unsigned int, len,
(unsigned long)chk_desc - (unsigned long)otus_desc);
- fin = (unsigned long) last_desc + sizeof(*last_desc);
- diff = fin - (unsigned long) otus_desc;
+ fin = (unsigned long) last_desc + sizeof(*last_desc);
+ diff = fin - (unsigned long) otus_desc;
- if (diff < len)
- len -= diff;
+ if (diff < len)
+ len -= diff;
- if (len < 256)
- return -EIO;
+ if (len < 256)
+ return -EIO;
- crc32 = crc32_le(~0, data, len);
- if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
- dev_err(&ar->udev->dev, "fw checksum test failed.\n");
- return -ENOEXEC;
- }
+ crc32 = crc32_le(~0, data, len);
+ if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
+ dev_err(&ar->udev->dev, "fw checksum test failed.\n");
+ return -ENOEXEC;
+ }
+
+ crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
+ if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
+ dev_err(&ar->udev->dev, "descriptor check failed.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
- crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
- if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
- dev_err(&ar->udev->dev, "descriptor check failed.\n");
+static int carl9170_fw_tx_sequence(struct ar9170 *ar)
+{
+ const struct carl9170fw_txsq_desc *txsq_desc;
+
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc),
+ CARL9170FW_TXSQ_DESC_CUR_VER);
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
return -EINVAL;
- }
} else {
- dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ ar->fw.tx_seq_table = 0;
+ }
+
+ return 0;
+}
+
+static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+{
+ const struct carl9170fw_otus_desc *otus_desc;
+ int err;
+ u16 if_comb_types;
+
+ err = carl9170_fw_checksum(ar, data, len);
+ if (err)
+ return err;
+
+ otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
+ sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
+ if (!otus_desc) {
+ return -ENODATA;
}
#define SUPP(feat) \
@@ -321,19 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= if_comb_types;
- txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
- sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
-
- if (txsq_desc) {
- ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
- if (!valid_cpu_addr(ar->fw.tx_seq_table))
- return -EINVAL;
- } else {
- ar->fw.tx_seq_table = 0;
- }
-
#undef SUPPORTED
- return 0;
+ return carl9170_fw_tx_sequence(ar);
}
static struct carl9170fw_desc_head *
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f06e0695d412..db774212161b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -48,7 +48,7 @@
#include "carl9170.h"
#include "cmd.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
@@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->mutex);
if (IS_ACCEPTING_CMD(ar)) {
- rcu_assign_pointer(ar->beacon_iter, NULL);
+ RCU_INIT_POINTER(ar->beacon_iter, NULL);
carl9170_led_set_state(ar, 0);
@@ -678,7 +678,7 @@ unlock:
vif_priv->active = false;
bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
ar->vifs--;
- rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
list_del_rcu(&vif_priv->list);
mutex_unlock(&ar->mutex);
synchronize_rcu();
@@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
WARN_ON(vif_priv->enable_beacon);
vif_priv->enable_beacon = false;
list_del_rcu(&vif_priv->list);
- rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
if (vif == main_vif) {
rcu_read_unlock();
@@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
}
for (i = 0; i < CARL9170_NUM_TID; i++)
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
struct carl9170_sta_tid *tid_info;
tid_info = rcu_dereference(sta_info->agg[i]);
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
if (!tid_info)
continue;
@@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->tx_ampdu_list_lock);
}
- rcu_assign_pointer(sta_info->agg[tid], NULL);
+ RCU_INIT_POINTER(sta_info->agg[tid], NULL);
rcu_read_unlock();
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1605cd..d19a9ee9d057 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
* feedback either [CTL_REQ_TX_STATUS not set]
*/
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
return;
} else {
/*
@@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
err_free:
ar->tx_dropped++;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
}
void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 4cf7c5eb4813..0e81904956cf 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -143,7 +143,7 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
break;
case ATH_CIPHER_AES_CCM:
if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"AES-CCM not supported by this mac rev\n");
return false;
}
@@ -152,15 +152,15 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
case ATH_CIPHER_TKIP:
keyType = AR_KEYTABLE_TYPE_TKIP;
if (entry + 64 >= common->keymax) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"entry %u inappropriate for TKIP\n", entry);
return false;
}
break;
case ATH_CIPHER_WEP:
if (k->kv_len < WLAN_KEY_LEN_WEP40) {
- ath_dbg(common, ATH_DBG_ANY,
- "WEP key length %u too small\n", k->kv_len);
+ ath_dbg(common, ANY, "WEP key length %u too small\n",
+ k->kv_len);
return false;
}
if (k->kv_len <= WLAN_KEY_LEN_WEP40)
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 65ecb5bab25a..10dea37431b3 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -21,6 +21,8 @@
#include "regd.h"
#include "regd_common.h"
+static int __ath_regd_init(struct ath_regulatory *reg);
+
/*
* This is a set of common rules used by our world regulatory domains.
* We have 12 world regulatory domains. To save space we consolidate
@@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
}
}
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (!memcmp(allCountries[i].isoName, alpha2, 2))
+ return allCountries[i].countryCode;
+ }
+
+ return -1;
+}
+
int ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ u16 country_code;
+
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
return 0;
switch (request->initiator) {
- case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
+ /*
+ * If common->reg_world_copy is world roaming it means we *were*
+ * world roaming... so we now have to restore that data.
+ */
+ if (!ath_is_world_regd(&common->reg_world_copy))
+ break;
+
+ memcpy(reg, &common->reg_world_copy,
+ sizeof(struct ath_regulatory));
+ break;
+ case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_USER:
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (ath_is_world_regd(reg))
- ath_reg_apply_world_flags(wiphy, request->initiator,
- reg);
+ if (!ath_is_world_regd(reg))
+ break;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ break;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
+ reg->current_rd);
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
break;
}
@@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
reg->current_rd = 0x64;
}
-int
-ath_regd_init(struct ath_regulatory *reg,
- struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+static int __ath_regd_init(struct ath_regulatory *reg)
{
struct country_code_to_enum_rd *country = NULL;
u16 regdmn;
@@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
reg->regpair->regDmnEnum);
+ return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ int r;
+
+ r = __ath_regd_init(reg);
+ if (r)
+ return r;
+
+ if (ath_is_world_regd(reg))
+ memcpy(&common->reg_world_copy, reg,
+ sizeof(struct ath_regulatory));
+
ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
return 0;
}
EXPORT_SYMBOL(ath_regd_init);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 37110dfd2c96..16e8f8058155 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -191,6 +191,9 @@
#define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */
#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna
* with bluetooth */
+#define B43_BFH_NOCBUCK 0x0080
+#define B43_BFH_PALDO 0x0200
+#define B43_BFH_EXTLNA_5GHZ 0x1000 /* has an external LNA (5GHz mode) */
/* SPROM boardflags2_lo values */
#define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
@@ -204,6 +207,14 @@
#define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
#define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
#define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
+#define B43_BFL2_SINGLEANT_CCK 0x1000
+#define B43_BFL2_2G_SPUR_WAR 0x2000
+
+/* SPROM boardflags2_hi values */
+#define B43_BFH2_GPLL_WAR2 0x0001
+#define B43_BFH2_IPALVLSHIFT_3P3 0x0002
+#define B43_BFH2_INTERNDET_TXIQCAL 0x0004
+#define B43_BFH2_XTALBUFOUTEN 0x0008
/* GPIO register offset, in both ChipCommon and PCI core. */
#define B43_GPIO_CONTROL 0x6c
@@ -667,6 +678,7 @@ struct b43_key {
};
/* SHM offsets to the QOS data structures for the 4 different queues. */
+#define B43_QOS_QUEUE_NUM 4
#define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \
(B43_NR_QOSPARAMS * sizeof(u16) * (queue)))
#define B43_QOS_BACKGROUND B43_QOS_PARAMS(0)
@@ -904,7 +916,7 @@ struct b43_wl {
struct work_struct beacon_update_trigger;
/* The current QOS parameters for the 4 queues. */
- struct b43_qos_params qos_params[4];
+ struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM];
/* Work for adjustment of the transmission power.
* This is scheduled when we determine that the actual TX output
@@ -913,8 +925,12 @@ struct b43_wl {
/* Packet transmit work */
struct work_struct tx_work;
+
/* Queue of packets to be transmitted. */
- struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_queue[B43_QOS_QUEUE_NUM];
+
+ /* Flag that implement the queues stopping. */
+ bool tx_queue_stopped[B43_QOS_QUEUE_NUM];
/* The device LEDs. */
struct b43_leds leds;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 5e45604f0f5d..b5f1b91002bb 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
else
ring->ops = &dma32_ops;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev)
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
priv_info->bouncebuffer = NULL;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1465,8 +1465,10 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- ring->stopped = 1;
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+ ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
@@ -1584,12 +1586,21 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+ ring->stopped = false;
+ }
+
+ if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+ dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
- ring->stopped = 0;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
}
+ /* Add work to the queue. */
+ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43_dmaring *ring, int *slot)
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index a38c1c6446ad..d79ab2a227e1 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev,
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
- turn_on = 0;
+ turn_on = false;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
@@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- *activelow = 0;
+ *activelow = false;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
- *activelow = 1;
+ *activelow = true;
if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
@@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev)
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
- led->hw_state = 1;
+ led->hw_state = true;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
}
@@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev)
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 4c82d582a524..916123a3d74e 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
const struct b43_rfatt *rfatt;
const struct b43_bbatt *bbatt;
u64 power_vector;
- bool table_changed = 0;
+ bool table_changed = false;
BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
| (val & 0x00FF);
}
- table_changed = 1;
+ table_changed = true;
}
if (table_changed) {
/* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
unsigned long now;
unsigned long expire;
struct b43_lo_calib *cal, *tmp;
- bool current_item_expired = 0;
+ bool current_item_expired = false;
bool hwpctl;
if (!lo)
@@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
B43_WARN_ON(current_item_expired);
- current_item_expired = 1;
+ current_item_expired = true;
}
if (b43_debug(dev, B43_DBG_LO)) {
b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5634d9a9965b..b91f28ef1032 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
if (ps_flags & B43_PS_ENABLED) {
- hwps = 1;
+ hwps = true;
} else if (ps_flags & B43_PS_DISABLED) {
- hwps = 0;
+ hwps = false;
} else {
//TODO: If powersave is not off and FIXME is not set and we are not in adhoc
// and thus is not an AP and we are associated, set bit 25
}
if (ps_flags & B43_PS_AWAKE) {
- awake = 1;
+ awake = true;
} else if (ps_flags & B43_PS_ASLEEP) {
- awake = 0;
+ awake = false;
} else {
//TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
// or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
}
/* FIXME: For now we force awake-on and hwps-off */
- hwps = 0;
- awake = 1;
+ hwps = false;
+ awake = true;
macctl = b43_read32(dev, B43_MMIO_MACCTL);
if (hwps)
@@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
return;
if (dev->noisecalc.calculation_running)
return;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43_generate_noise_sample(dev);
@@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev)
average -= 48;
dev->stats.link_noise = average;
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev)
b43_write32(dev, B43_MMIO_MACCMD,
b43_read32(dev, B43_MMIO_MACCMD)
| B43_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
if (wl->beacon0_uploaded)
return;
b43_write_beacon_template(dev, 0x68, 0x18);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43_upload_beacon0(dev);
b43_upload_beacon1(dev);
cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
b43err(dev->wl, "This device does not support DMA "
"on your system. It will now be switched to PIO.\n");
/* Fall back to PIO transfers if we get fatal DMA errors! */
- dev->use_pio = 1;
+ dev->use_pio = true;
b43_controller_restart(dev, "DMA error");
return;
}
@@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
filename = NULL;
else
goto err_no_pcm;
- fw->pcm_request_failed = 0;
+ fw->pcm_request_failed = false;
err = b43_do_request_fw(ctx, filename, &fw->pcm);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
- fw->pcm_request_failed = 1;
+ fw->pcm_request_failed = true;
} else if (err)
goto err_load;
@@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
dev->qos_enabled = !!modparam_qos;
/* Default to firmware/hardware crypto acceleration. */
- dev->hwcrypto_enabled = 1;
+ dev->hwcrypto_enabled = true;
if (dev->fw.opensource) {
u16 fwcapa;
@@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
/* Disable hardware crypto and fall back to software crypto. */
- dev->hwcrypto_enabled = 0;
+ dev->hwcrypto_enabled = false;
}
if (!(fwcapa & B43_FWCAPA_QOS)) {
b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
* ieee80211_unregister to make sure the networking core can
* properly free possible resources. */
dev->wl->hw->queues = 1;
- dev->qos_enabled = 0;
+ dev->qos_enabled = false;
}
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl)
wl->rng.name = wl->rng_name;
wl->rng.data_read = b43_rng_read;
wl->rng.priv = (unsigned long)wl;
- wl->rng_initialized = 1;
+ wl->rng_initialized = true;
err = hwrng_register(&wl->rng);
if (err) {
- wl->rng_initialized = 0;
+ wl->rng_initialized = false;
b43err(wl, "Failed to register the random "
"number generator (%d)\n", err);
}
@@ -3378,6 +3378,7 @@ static void b43_tx_work(struct work_struct *work)
struct b43_wl *wl = container_of(work, struct b43_wl, tx_work);
struct b43_wldev *dev;
struct sk_buff *skb;
+ int queue_num;
int err = 0;
mutex_lock(&wl->mutex);
@@ -3387,15 +3388,26 @@ static void b43_tx_work(struct work_struct *work)
return;
}
- while (skb_queue_len(&wl->tx_queue)) {
- skb = skb_dequeue(&wl->tx_queue);
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ if (b43_using_pio_transfers(dev))
+ err = b43_pio_tx(dev, skb);
+ else
+ err = b43_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = 1;
+ ieee80211_stop_queue(wl->hw, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+ if (unlikely(err))
+ dev_kfree_skb(skb); /* Drop it */
+ err = 0;
+ }
- if (b43_using_pio_transfers(dev))
- err = b43_pio_tx(dev, skb);
- else
- err = b43_dma_tx(dev, skb);
- if (unlikely(err))
- dev_kfree_skb(skb); /* Drop it */
+ if (!err)
+ wl->tx_queue_stopped[queue_num] = 0;
}
#if B43_DEBUG
@@ -3416,8 +3428,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue, skb);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ } else {
+ ieee80211_stop_queue(wl->hw, skb->queue_mapping);
+ }
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -3702,13 +3718,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
case IEEE80211_BAND_5GHZ:
if (d->phy.supports_5ghz) {
up_dev = d;
- gmode = 0;
+ gmode = false;
}
break;
case IEEE80211_BAND_2GHZ:
if (d->phy.supports_2ghz) {
up_dev = d;
- gmode = 1;
+ gmode = true;
}
break;
default:
@@ -4147,6 +4163,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
struct b43_wl *wl;
struct b43_wldev *orig_dev;
u32 mask;
+ int queue_num;
if (!dev)
return NULL;
@@ -4199,9 +4216,11 @@ redo:
mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
B43_WARN_ON(mask != 0xFFFFFFFF && mask);
- /* Drain the TX queue */
- while (skb_queue_len(&wl->tx_queue))
- dev_kfree_skb(skb_dequeue(&wl->tx_queue));
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num]))
+ dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ }
b43_mac_suspend(dev);
b43_leds_exit(dev);
@@ -4425,18 +4444,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
#if B43_DEBUG
- phy->phy_locked = 0;
- phy->radio_locked = 0;
+ phy->phy_locked = false;
+ phy->radio_locked = false;
#endif
}
static void setup_struct_wldev_for_init(struct b43_wldev *dev)
{
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4670,16 +4689,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if (b43_bus_host_is_pcmcia(dev->dev) ||
b43_bus_host_is_sdio(dev->dev)) {
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else if (dev->use_pio) {
b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
"This should not be needed and will result in lower "
"performance.\n");
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else {
- dev->__using_pio_transfers = 0;
+ dev->__using_pio_transfers = false;
err = b43_dma_init(dev);
}
if (err)
@@ -4733,7 +4752,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4767,7 +4786,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
b43_adjust_opmode(dev);
memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4789,12 +4808,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->radiotap_enabled = 0;
+ wl->radiotap_enabled = false;
b43_qos_clear(wl);
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -4833,6 +4852,9 @@ static void b43_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&(wl->beacon_update_trigger));
+ if (!dev)
+ goto out;
+
mutex_lock(&wl->mutex);
if (b43_status(dev) >= B43_STAT_STARTED) {
dev = b43_wireless_core_stop(dev);
@@ -4840,11 +4862,11 @@ static void b43_op_stop(struct ieee80211_hw *hw)
goto out_unlock;
}
b43_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
out_unlock:
mutex_unlock(&wl->mutex);
-
+out:
cancel_work_sync(&(wl->txpower_adjust_work));
}
@@ -5028,7 +5050,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
struct pci_dev *pdev = NULL;
int err;
u32 tmp;
- bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+ bool have_2ghz_phy = false, have_5ghz_phy = false;
/* Do NOT do any device initialization here.
* Do it in wireless_core_init() instead.
@@ -5071,7 +5093,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
@@ -5082,11 +5104,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
(pdev->device != 0x4312 &&
pdev->device != 0x4319 && pdev->device != 0x4324)) {
/* No multiband support. */
- have_2ghz_phy = 0;
- have_5ghz_phy = 0;
+ have_2ghz_phy = false;
+ have_5ghz_phy = false;
switch (dev->phy.type) {
case B43_PHYTYPE_A:
- have_5ghz_phy = 1;
+ have_5ghz_phy = true;
break;
case B43_PHYTYPE_LP: //FIXME not always!
#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5096,7 +5118,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
case B43_PHYTYPE_N:
case B43_PHYTYPE_HT:
case B43_PHYTYPE_LCN:
- have_2ghz_phy = 1;
+ have_2ghz_phy = true;
break;
default:
B43_WARN_ON(1);
@@ -5112,8 +5134,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
/* FIXME: For now we disable the A-PHY on multi-PHY devices. */
if (dev->phy.type != B43_PHYTYPE_N &&
dev->phy.type != B43_PHYTYPE_LP) {
- have_2ghz_phy = 1;
- have_5ghz_phy = 0;
+ have_2ghz_phy = true;
+ have_5ghz_phy = false;
}
}
@@ -5245,6 +5267,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
struct ieee80211_hw *hw;
struct b43_wl *wl;
char chip_name[6];
+ int queue_num;
hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
if (!hw) {
@@ -5264,7 +5287,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->queues = modparam_qos ? 4 : 1;
+ hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
wl->mac80211_initially_registered_queues = hw->queues;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
@@ -5281,7 +5304,12 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
- skb_queue_head_init(&wl->tx_queue);
+
+ /* Initialize queues and flags. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ skb_queue_head_init(&wl->tx_queue[queue_num]);
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
snprintf(chip_name, ARRAY_SIZE(chip_name),
(dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3ea44bb03684..3f8883b14d9c 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(dev->phy.radio_locked);
- dev->phy.radio_locked = 1;
+ dev->phy.radio_locked = true;
#endif
macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(!dev->phy.radio_locked);
- dev->phy.radio_locked = 0;
+ dev->phy.radio_locked = false;
#endif
/* Commit any write */
@@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(dev->phy.phy_locked);
- dev->phy.phy_locked = 1;
+ dev->phy.phy_locked = true;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
@@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(!dev->phy.phy_locked);
- dev->phy.phy_locked = 0;
+ dev->phy.phy_locked = false;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 8e157bc213f3..12f467b8d564 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
if (b43_phy_read(dev, 0x0033) & 0x0800)
break;
- gphy->aci_enable = 1;
+ gphy->aci_enable = true;
phy_stacksave(B43_PHY_RADIO_BITFIELD);
phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
if (!(b43_phy_read(dev, 0x0033) & 0x0800))
break;
- gphy->aci_enable = 0;
+ gphy->aci_enable = false;
phy_stackrestore(B43_PHY_RADIO_BITFIELD);
phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
bbatt.att = 11;
if (phy->radio_rev == 8) {
rfatt.att = 15;
- rfatt.with_padmix = 1;
+ rfatt.with_padmix = true;
} else {
rfatt.att = 9;
- rfatt.with_padmix = 0;
+ rfatt.with_padmix = false;
}
b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
}
@@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
struct b43_bus_dev *bdev = dev->dev;
struct b43_phy *phy = &dev->phy;
- rf->with_padmix = 0;
+ rf->with_padmix = false;
if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
return;
case 8:
rf->att = 0xA;
- rf->with_padmix = 1;
+ rf->with_padmix = true;
return;
case 9:
default:
@@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
(phy->radio_ver != 0x2050)); /* Not supported anymore */
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
pab1, pab2);
if (!gphy->tssi2dbm)
return -ENOMEM;
- gphy->dyn_tssi_tbl = 1;
+ gphy->dyn_tssi_tbl = true;
} else {
/* pabX values not set in SPROM. */
gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev)
if (gphy->dyn_tssi_tbl)
kfree(gphy->tssi2dbm);
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
gphy->tssi2dbm = NULL;
kfree(gphy);
@@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
if (phy->rev == 1) {
/* Workaround: Temporarly disable gmode through the early init
* phase, as the gmode stuff is not needed for phy rev 1 */
- phy->gmode = 0;
+ phy->gmode = false;
b43_wireless_core_reset(dev, 0);
b43_phy_initg(dev);
- phy->gmode = 1;
+ phy->gmode = true;
b43_wireless_core_reset(dev, 1);
}
@@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
gphy->radio_off_context.rfover);
b43_phy_write(dev, B43_PHY_RFOVERVAL,
gphy->radio_off_context.rfoverval);
- gphy->radio_off_context.valid = 0;
+ gphy->radio_off_context.valid = false;
}
channel = phy->channel;
b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
gphy->radio_off_context.rfover = rfover;
gphy->radio_off_context.rfoverval = rfoverval;
- gphy->radio_off_context.valid = 1;
+ gphy->radio_off_context.valid = true;
b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
}
@@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
if ((phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- gphy->aci_wlan_automatic = 0;
+ gphy->aci_wlan_automatic = false;
switch (mode) {
case B43_INTERFMODE_AUTOWLAN:
- gphy->aci_wlan_automatic = 1;
+ gphy->aci_wlan_automatic = true;
if (gphy->aci_enable)
mode = B43_INTERFMODE_MANUALWLAN;
else
@@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
b43_radio_interference_mitigation_disable(dev, currentmode);
if (mode == B43_INTERFMODE_NONE) {
- gphy->aci_enable = 0;
- gphy->aci_hw_rssi = 0;
+ gphy->aci_enable = false;
+ gphy->aci_hw_rssi = false;
} else
b43_radio_interference_mitigation_enable(dev, mode);
gphy->interfmode = mode;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index f93d66b1817b..3ae28561f7a4 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 1;
+ lpphy->crs_usr_disable = true;
else
- lpphy->crs_sys_disable = 1;
+ lpphy->crs_sys_disable = true;
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
}
@@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 0;
+ lpphy->crs_usr_disable = false;
else
- lpphy->crs_sys_disable = 0;
+ lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b17d9b6c33a5..bf5a43855358 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -78,19 +78,6 @@ enum b43_nphy_rssi_type {
B43_NPHY_RSSI_TBD,
};
-/* TODO: reorder functions */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
- bool enable);
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
- u8 *events, u8 *delays, u8 length);
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
- enum b43_nphy_rf_sequence seq);
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
- u16 value, u8 core, bool off);
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
- u16 value, u8 core);
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev);
-
static inline bool b43_nphy_ipa(struct b43_wldev *dev)
{
enum ieee80211_band band = b43_current_band(dev->wl);
@@ -98,57 +85,394 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev)
(dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
}
-void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
-{//TODO
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ if (dev->dev->chip_id == 47162)
+ return txpwrctrl_tx_gain_ipa_rev5;
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
}
-static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
-{//TODO
+/**************************************************
+ * RF (just without b43_nphy_rf_control_intc_override)
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq)
+{
+ static const u16 trigger[] = {
+ [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX,
+ [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX,
+ [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX,
+ [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH,
+ [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL,
+ [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
+ };
+ int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
+ b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
+ for (i = 0; i < 200; i++) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
+ goto ok;
+ msleep(1);
+ }
+ b43err(dev->wl, "RF sequence status timeout\n");
+ok:
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
-static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
- bool ignore_tssi)
-{//TODO
- return B43_TXPWR_RES_DONE;
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << i) & core)) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((1 << i) & core)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
}
-static void b43_chantab_radio_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry_rev2 *e)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
{
- b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
- b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
- b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
- b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ u8 i, j;
+ u16 reg, tmp, val;
- b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
- b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
- b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
- b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
- b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
- b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
- b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
- b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
- b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
- b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
- b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
- b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
- b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
- b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
- b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
- b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
+}
- b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
- b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+/**************************************************
+ * Various PHY ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
+ const u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->core_rev == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->core_rev == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_phy_force_clock(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_force_clock(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ if (enable) {
+ static const u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i;
+ s16 tmp;
+ u16 data[4];
+ s16 gain[2];
+ u16 minmax[2];
+ static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ gain[0] = 6;
+ gain[1] = 6;
+ } else {
+ tmp = 40370 - 315 * dev->phy.channel;
+ gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ tmp = 23242 - 224 * dev->phy.channel;
+ gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ }
+ } else {
+ gain[0] = 0;
+ gain[1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (nphy->elna_gain_config) {
+ data[0] = 19 + gain[i];
+ data[1] = 25 + gain[i];
+ data[2] = 25 + gain[i];
+ data[3] = 25 + gain[i];
+ } else {
+ data[0] = lna_gain[0] + gain[i];
+ data[1] = lna_gain[1] + gain[i];
+ data[2] = lna_gain[2] + gain[i];
+ data[3] = lna_gain[3] + gain[i];
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
+
+ minmax[i] = 23 + gain[i];
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+ minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+ minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/**************************************************
+ * Radio 0x2056
+ **************************************************/
+
static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
@@ -228,10 +552,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
static void b43_radio_2056_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ u16 offset;
+ u8 i;
+ u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+
B43_WARN_ON(dev->phy.rev < 3);
b43_chantab_radio_2056_upload(dev, e);
- /* TODO */
+ b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+
+ if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ if (dev->dev->chip_id == 0x4716) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
+ } else {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
+ }
+ }
+ if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
+ }
+
+ if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < 2; i++) {
+ offset = i ? B2056_TX1 : B2056_TX0;
+ if (dev->phy.rev >= 5) {
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_IDAC, 0xcc);
+
+ if (dev->dev->chip_id == 0x4716) {
+ bias = 0x40;
+ cbias = 0x45;
+ pag_boost = 0x5;
+ pgag_boost = 0x33;
+ mixg_boost = 0x55;
+ } else {
+ bias = 0x25;
+ cbias = 0x20;
+ pag_boost = 0x4;
+ pgag_boost = 0x03;
+ mixg_boost = 0x65;
+ }
+ padg_boost = 0x77;
+
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ cbias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_BOOST_TUNE,
+ pag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PGAG_BOOST_TUNE,
+ pgag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_BOOST_TUNE,
+ padg_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_MIXG_BOOST_TUNE,
+ mixg_boost);
+ } else {
+ bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ 0x30);
+ }
+ b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
+ }
+ } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+ /* TODO */
+ }
+
udelay(50);
/* VCO calibration */
b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -242,285 +654,84 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
udelay(300);
}
-static void b43_chantab_phy_upload(struct b43_wldev *dev,
- const struct b43_phy_n_sfo_cfg *e)
-{
- b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
- b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
- b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
- b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
- b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
- b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
-static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+static void b43_radio_init2056_pre(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
- u8 i;
- u16 bmask, val, tmp;
- enum ieee80211_band band = b43_current_band(dev->wl);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- nphy->txpwrctrl = enable;
- if (!enable) {
- if (dev->phy.rev >= 3 &&
- (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
- (B43_NPHY_TXPCTL_CMD_COEFF |
- B43_NPHY_TXPCTL_CMD_HWPCTLEN |
- B43_NPHY_TXPCTL_CMD_PCTLEN))) {
- /* We disable enabled TX pwr ctl, save it's state */
- nphy->tx_pwr_idx[0] = b43_phy_read(dev,
- B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
- nphy->tx_pwr_idx[1] = b43_phy_read(dev,
- B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
- }
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
- for (i = 0; i < 84; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
- for (i = 0; i < 84; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
- tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- if (dev->phy.rev >= 3)
- tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
-
- if (dev->phy.rev >= 3) {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
- }
-
- if (dev->phy.rev == 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
- else if (dev->phy.rev < 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
-
- if (dev->phy.rev < 2 && dev->phy.is_40mhz)
- b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
- } else {
- b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
- nphy->adj_pwr_tbl);
- b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
- nphy->adj_pwr_tbl);
-
- bmask = B43_NPHY_TXPCTL_CMD_COEFF |
- B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- /* wl does useless check for "enable" param here */
- val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- if (dev->phy.rev >= 3) {
- bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- if (val)
- val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- }
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
-
- if (band == IEEE80211_BAND_5GHZ) {
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
- ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
- if (dev->phy.rev > 1)
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
- ~B43_NPHY_TXPCTL_INIT_PIDXI1,
- 0x64);
- }
-
- if (dev->phy.rev >= 3) {
- if (nphy->tx_pwr_idx[0] != 128 &&
- nphy->tx_pwr_idx[1] != 128) {
- /* Recover TX pwr ctl state */
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
- ~B43_NPHY_TXPCTL_CMD_INIT,
- nphy->tx_pwr_idx[0]);
- if (dev->phy.rev > 1)
- b43_phy_maskset(dev,
- B43_NPHY_TXPCTL_INIT,
- ~0xff, nphy->tx_pwr_idx[1]);
- }
- }
-
- if (dev->phy.rev >= 3) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
- } else {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
- }
-
- if (dev->phy.rev == 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
- else if (dev->phy.rev < 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
-
- if (dev->phy.rev < 2 && dev->phy.is_40mhz)
- b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
-
- if (b43_nphy_ipa(dev)) {
- b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
- b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
- }
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_CHIP0PU);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
-static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+static void b43_radio_init2056_post(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- u8 txpi[2], bbmult, i;
- u16 tmp, radio_gain, dac_gain;
- u16 freq = dev->phy.channel_freq;
- u32 txgain;
- /* u32 gaintbl; rev3+ */
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (dev->phy.rev >= 3) {
- txpi[0] = 40;
- txpi[1] = 40;
- } else if (sprom->revision < 4) {
- txpi[0] = 72;
- txpi[1] = 72;
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- txpi[0] = sprom->txpid2g[0];
- txpi[1] = sprom->txpid2g[1];
- } else if (freq >= 4900 && freq < 5100) {
- txpi[0] = sprom->txpid5gl[0];
- txpi[1] = sprom->txpid5gl[1];
- } else if (freq >= 5100 && freq < 5500) {
- txpi[0] = sprom->txpid5g[0];
- txpi[1] = sprom->txpid5g[1];
- } else if (freq >= 5500) {
- txpi[0] = sprom->txpid5gh[0];
- txpi[1] = sprom->txpid5gh[1];
- } else {
- txpi[0] = 91;
- txpi[1] = 91;
- }
- }
-
+ b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
+ b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
+ b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
+ msleep(1);
+ b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
+ b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
+ b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
/*
- for (i = 0; i < 2; i++) {
- nphy->txpwrindex[i].index_internal = txpi[i];
- nphy->txpwrindex[i].index_internal_save = txpi[i];
- }
+ if (nphy->init_por)
+ Call Radio 2056 Recalibrate
*/
+}
- for (i = 0; i < 2; i++) {
- if (dev->phy.rev >= 3) {
- /* FIXME: support 5GHz */
- txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
- radio_gain = (txgain >> 16) & 0x1FFFF;
- } else {
- txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
- radio_gain = (txgain >> 16) & 0x1FFF;
- }
-
- dac_gain = (txgain >> 8) & 0x3F;
- bbmult = txgain & 0xFF;
-
- if (dev->phy.rev >= 3) {
- if (i == 0)
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
- else
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
- }
-
- if (i == 0)
- b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
- else
- b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
-
- b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
-
- tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
- if (i == 0)
- tmp = (tmp & 0x00FF) | (bbmult << 8);
- else
- tmp = (tmp & 0xFF00) | bbmult;
- b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
-
- if (b43_nphy_ipa(dev)) {
- u32 tmp32;
- u16 reg = (i == 0) ?
- B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
- tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
- b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
- b43_phy_set(dev, reg, 0x4);
- }
- }
-
- b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
+{
+ b43_radio_init2056_pre(dev);
+ b2056_upload_inittabs(dev, 0, 0);
+ b43_radio_init2056_post(dev);
}
-static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+/**************************************************
+ * Radio 0x2055
+ **************************************************/
+
+static void b43_chantab_radio_upload(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev2 *e)
{
- struct b43_phy *phy = &dev->phy;
+ b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+ b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+ b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+ b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- const u32 *table = NULL;
-#if 0
- TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
- u32 rfpwr_offset;
- u8 pga_gain;
- int i;
-#endif
+ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+ b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+ b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+ b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- if (phy->rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- table = b43_nphy_get_ipa_gain_table(dev);
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- if (phy->rev == 3)
- table = b43_ntab_tx_gain_rev3_5ghz;
- if (phy->rev == 4)
- table = b43_ntab_tx_gain_rev4_5ghz;
- else
- table = b43_ntab_tx_gain_rev5plus_5ghz;
- } else {
- table = b43_ntab_tx_gain_rev3plus_2ghz;
- }
- }
- } else {
- table = b43_ntab_tx_gain_rev0_1_2;
- }
- b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
- b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+ b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+ b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+ b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- if (phy->rev >= 3) {
-#if 0
- nphy->gmval = (table[0] >> 16) & 0x7000;
+ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+ b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+ b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- for (i = 0; i < 128; i++) {
- pga_gain = (table[i] >> 24) & 0xF;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
- else
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
- b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
- rfpwr_offset);
- b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
- rfpwr_offset);
- }
-#endif
- }
+ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+ b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+ b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+ b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
@@ -622,1089 +833,9 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
-static void b43_radio_init2056_pre(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_CHIP0PU);
- /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_OEPORFORCE);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_CHIP0PU);
-}
-
-static void b43_radio_init2056_post(struct b43_wldev *dev)
-{
- b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
- b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
- b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
- msleep(1);
- b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
- b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
- b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
- /*
- if (nphy->init_por)
- Call Radio 2056 Recalibrate
- */
-}
-
-/*
- * Initialize a Broadcom 2056 N-radio
- * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
- */
-static void b43_radio_init2056(struct b43_wldev *dev)
-{
- b43_radio_init2056_pre(dev);
- b2056_upload_inittabs(dev, 0, 0);
- b43_radio_init2056_post(dev);
-}
-
-/*
- * Upload the N-PHY tables.
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
- */
-static void b43_nphy_tables_init(struct b43_wldev *dev)
-{
- if (dev->phy.rev < 3)
- b43_nphy_rev0_1_2_tables_init(dev);
- else
- b43_nphy_rev3plus_tables_init(dev);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
-static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- enum ieee80211_band band;
- u16 tmp;
-
- if (!enable) {
- nphy->rfctrl_intc1_save = b43_phy_read(dev,
- B43_NPHY_RFCTL_INTC1);
- nphy->rfctrl_intc2_save = b43_phy_read(dev,
- B43_NPHY_RFCTL_INTC2);
- band = b43_current_band(dev->wl);
- if (dev->phy.rev >= 3) {
- if (band == IEEE80211_BAND_5GHZ)
- tmp = 0x600;
- else
- tmp = 0x480;
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- tmp = 0x180;
- else
- tmp = 0x120;
- }
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
- } else {
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
- nphy->rfctrl_intc1_save);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
- nphy->rfctrl_intc2_save);
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
-static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
-{
- u16 tmp;
-
- if (dev->phy.rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- tmp = 4;
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
- (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
- }
-
- tmp = 1;
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
- (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
-{
- u16 bbcfg;
-
- b43_phy_force_clock(dev, 1);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
- udelay(1);
- b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- b43_phy_force_clock(dev, 0);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
-static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
-{
- u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
-
- mimocfg |= B43_NPHY_MIMOCFG_AUTO;
- if (preamble == 1)
- mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
- else
- mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
-
- b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
-static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- bool override = false;
- u16 chain = 0x33;
-
- if (nphy->txrx_chain == 0) {
- chain = 0x11;
- override = true;
- } else if (nphy->txrx_chain == 1) {
- chain = 0x22;
- override = true;
- }
-
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
- ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
- chain);
-
- if (override)
- b43_phy_set(dev, B43_NPHY_RFSEQMODE,
- B43_NPHY_RFSEQMODE_CAOVER);
- else
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~B43_NPHY_RFSEQMODE_CAOVER);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
-static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
- u16 samps, u8 time, bool wait)
-{
- int i;
- u16 tmp;
-
- b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
- b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
- if (wait)
- b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
- else
- b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
-
- b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
-
- for (i = 1000; i; i--) {
- tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
- if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
- est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
- est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
- est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
-
- est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
- est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
- est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
- return;
- }
- udelay(10);
- }
- memset(est, 0, sizeof(*est));
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
-static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
- struct b43_phy_n_iq_comp *pcomp)
-{
- if (write) {
- b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
- b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
- b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
- b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
- } else {
- pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
- pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
- pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
- pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
- }
-}
-
-#if 0
-/* Ready but not used anywhere */
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
-static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
-{
- u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
- b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
- if (core == 0) {
- b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
- } else {
- b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
- }
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
- b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
- b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
- b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
- b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
-static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
-{
- u8 rxval, txval;
- u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
- regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
- if (core == 0) {
- regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
- regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
- } else {
- regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
- regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
- }
- regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
- regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
- regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
- regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
- regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
- regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
- regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
-
- b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
- b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
-
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
- ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
- ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
- ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
- (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
- (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
-
- if (core == 0) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
- } else {
- b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
- }
-
- b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
- b43_nphy_rf_control_override(dev, 8, 0, 3, false);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
-
- if (core == 0) {
- rxval = 1;
- txval = 8;
- } else {
- rxval = 4;
- txval = 2;
- }
- b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
- b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
-}
-#endif
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
-static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
-{
- int i;
- s32 iq;
- u32 ii;
- u32 qq;
- int iq_nbits, qq_nbits;
- int arsh, brsh;
- u16 tmp, a, b;
-
- struct nphy_iq_est est;
- struct b43_phy_n_iq_comp old;
- struct b43_phy_n_iq_comp new = { };
- bool error = false;
-
- if (mask == 0)
- return;
-
- b43_nphy_rx_iq_coeffs(dev, false, &old);
- b43_nphy_rx_iq_coeffs(dev, true, &new);
- b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
- new = old;
-
- for (i = 0; i < 2; i++) {
- if (i == 0 && (mask & 1)) {
- iq = est.iq0_prod;
- ii = est.i0_pwr;
- qq = est.q0_pwr;
- } else if (i == 1 && (mask & 2)) {
- iq = est.iq1_prod;
- ii = est.i1_pwr;
- qq = est.q1_pwr;
- } else {
- continue;
- }
-
- if (ii + qq < 2) {
- error = true;
- break;
- }
-
- iq_nbits = fls(abs(iq));
- qq_nbits = fls(qq);
-
- arsh = iq_nbits - 20;
- if (arsh >= 0) {
- a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
- tmp = ii >> arsh;
- } else {
- a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
- tmp = ii << -arsh;
- }
- if (tmp == 0) {
- error = true;
- break;
- }
- a /= tmp;
-
- brsh = qq_nbits - 11;
- if (brsh >= 0) {
- b = (qq << (31 - qq_nbits));
- tmp = ii >> brsh;
- } else {
- b = (qq << (31 - qq_nbits));
- tmp = ii << -brsh;
- }
- if (tmp == 0) {
- error = true;
- break;
- }
- b = int_sqrt(b / tmp - a * a) - (1 << 10);
-
- if (i == 0 && (mask & 0x1)) {
- if (dev->phy.rev >= 3) {
- new.a0 = a & 0x3FF;
- new.b0 = b & 0x3FF;
- } else {
- new.a0 = b & 0x3FF;
- new.b0 = a & 0x3FF;
- }
- } else if (i == 1 && (mask & 0x2)) {
- if (dev->phy.rev >= 3) {
- new.a1 = a & 0x3FF;
- new.b1 = b & 0x3FF;
- } else {
- new.a1 = b & 0x3FF;
- new.b1 = a & 0x3FF;
- }
- }
- }
-
- if (error)
- new = old;
-
- b43_nphy_rx_iq_coeffs(dev, true, &new);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
-static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
-{
- u16 array[4];
- b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
-
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
- const u16 *clip_st)
-{
- b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
-{
- clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
- clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
-static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
-{
- if (dev->phy.rev >= 3) {
- if (!init)
- return;
- if (0 /* FIXME */) {
- b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
- b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
- b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
- b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
- }
- } else {
- b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
- b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
-
- switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
- case B43_BUS_BCMA:
- bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
- 0xFC00, 0xFC00);
- break;
-#endif
-#ifdef CONFIG_B43_SSB
- case B43_BUS_SSB:
- ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
- 0xFC00, 0xFC00);
- break;
-#endif
- }
-
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL) &
- ~B43_MACCTL_GPOUTSMSK);
- b43_write16(dev, B43_MMIO_GPIO_MASK,
- b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
- b43_write16(dev, B43_MMIO_GPIO_CONTROL,
- b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
-
- if (init) {
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
-static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
-{
- u16 tmp;
-
- if (dev->dev->core_rev == 16)
- b43_mac_suspend(dev);
-
- tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
- tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
- B43_NPHY_CLASSCTL_WAITEDEN);
- tmp &= ~mask;
- tmp |= (val & mask);
- b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
-
- if (dev->dev->core_rev == 16)
- b43_mac_enable(dev);
-
- return tmp;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- if (enable) {
- static const u16 clip[] = { 0xFFFF, 0xFFFF };
- if (nphy->deaf_count++ == 0) {
- nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
- b43_nphy_classifier(dev, 0x7, 0);
- b43_nphy_read_clip_detection(dev, nphy->clip_state);
- b43_nphy_write_clip_detection(dev, clip);
- }
- b43_nphy_reset_cca(dev);
- } else {
- if (--nphy->deaf_count == 0) {
- b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
- b43_nphy_write_clip_detection(dev, nphy->clip_state);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
-static void b43_nphy_stop_playback(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- u16 tmp;
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
- if (tmp & 0x1)
- b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
- else if (tmp & 0x2)
- b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
-
- b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
-
- if (nphy->bb_mult_save & 0x80000000) {
- tmp = nphy->bb_mult_save & 0xFFFF;
- b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
- nphy->bb_mult_save = 0;
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
-static void b43_nphy_spur_workaround(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- u8 channel = dev->phy.channel;
- int tone[2] = { 57, 58 };
- u32 noise[2] = { 0x3FF, 0x3FF };
-
- B43_WARN_ON(dev->phy.rev < 3);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (nphy->gband_spurwar_en) {
- /* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && dev->phy.is_40mhz)
- ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- else
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- /* TODO: N PHY Adjust CRS Min Power (0x1E) */
- }
-
- if (nphy->aband_spurwar_en) {
- if (channel == 54) {
- tone[0] = 0x20;
- noise[0] = 0x25F;
- } else if (channel == 38 || channel == 102 || channel == 118) {
- if (0 /* FIXME */) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
- } else if (channel == 134) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else if (channel == 151) {
- tone[0] = 0x10;
- noise[0] = 0x23F;
- } else if (channel == 153 || channel == 161) {
- tone[0] = 0x30;
- noise[0] = 0x23F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
-
- if (!tone[0] && !noise[0])
- ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- else
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
-static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- u8 i;
- s16 tmp;
- u16 data[4];
- s16 gain[2];
- u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- gain[0] = 6;
- gain[1] = 6;
- } else {
- tmp = 40370 - 315 * dev->phy.channel;
- gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
- tmp = 23242 - 224 * dev->phy.channel;
- gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
- }
- } else {
- gain[0] = 0;
- gain[1] = 0;
- }
-
- for (i = 0; i < 2; i++) {
- if (nphy->elna_gain_config) {
- data[0] = 19 + gain[i];
- data[1] = 25 + gain[i];
- data[2] = 25 + gain[i];
- data[3] = 25 + gain[i];
- } else {
- data[0] = lna_gain[0] + gain[i];
- data[1] = lna_gain[1] + gain[i];
- data[2] = lna_gain[2] + gain[i];
- data[3] = lna_gain[3] + gain[i];
- }
- b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
-
- minmax[i] = 23 + gain[i];
- }
-
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
- minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
- minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
-static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- /* PHY rev 0, 1, 2 */
- u8 i, j;
- u8 code;
- u16 tmp;
- u8 rfseq_events[3] = { 6, 8, 7 };
- u8 rfseq_delays[3] = { 10, 30, 1 };
-
- /* PHY rev >= 3 */
- bool ghz5;
- bool ext_lna;
- u16 rssi_gain;
- struct nphy_gain_ctl_workaround_entry *e;
- u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
- u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
-
- if (dev->phy.rev >= 3) {
- /* Prepare values */
- ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
- & B43_NPHY_BANDCTL_5GHZ;
- ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
- e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
- if (ghz5 && dev->phy.rev >= 5)
- rssi_gain = 0x90;
- else
- rssi_gain = 0x50;
-
- b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
- rssi_gain);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
- rssi_gain);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
-
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
- b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
-
- b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
- b43_phy_write(dev, 0x2A7, e->init_gain);
- b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
- e->rfseq_init);
- b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
-
- /* TODO: check defines. Do not match variables names */
- b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
- b43_phy_write(dev, 0x2A9, e->cliphi_gain);
- b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
- b43_phy_write(dev, 0x2AB, e->clipmd_gain);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
- b43_phy_write(dev, 0x2AD, e->cliplo_gain);
-
- b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
- b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
- b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
- b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
- } else {
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
-
- if (!dev->phy.is_40mhz) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
- }
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21);
-
- if (!dev->phy.is_40mhz) {
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
- ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
- ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
- }
-
- b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
-
- if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
- dev->phy.is_40mhz)
- code = 4;
- else
- code = 5;
- } else {
- code = dev->phy.is_40mhz ? 6 : 7;
- }
-
- /* Set HPVGA2 index */
- b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
- ~B43_NPHY_C1_INITGAIN_HPVGA2,
- code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
- ~B43_NPHY_C2_INITGAIN_HPVGA2,
- code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- /* specs say about 2 loops, but wl does 4 */
- for (i = 0; i < 4; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x7C));
-
- b43_nphy_adjust_lna_gain_table(dev);
-
- if (nphy->elna_gain_config) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- /* specs say about 2 loops, but wl does 4 */
- for (i = 0; i < 4; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x74));
- }
-
- if (dev->phy.rev == 2) {
- for (i = 0; i < 4; i++) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (0x0400 * i) + 0x0020);
- for (j = 0; j < 21; j++) {
- tmp = j * (i < 2 ? 3 : 1);
- b43_phy_write(dev,
- B43_NPHY_TABLE_DATALO, tmp);
- }
- }
- }
-
- b43_nphy_set_rf_sequence(dev, 5,
- rfseq_events, rfseq_delays, 3);
- b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
- ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
- 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- b43_phy_maskset(dev, B43_PHY_N(0xC5D),
- 0xFF80, 4);
- }
-}
-
-static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- /* TX to RX */
- u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
- /* RX to TX */
- u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
- 0x1F };
- u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
- u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
-
- u16 tmp16;
- u32 tmp32;
-
- tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
- tmp32 &= 0xffffff;
- b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
-
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
-
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
- b43_phy_write(dev, 0x2AE, 0x000C);
-
- /* TX to RX */
- b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
-
- /* RX to TX */
- if (b43_nphy_ipa(dev))
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
- rx2tx_delays_ipa, 9);
- if (nphy->hw_phyrxchain != 3 &&
- nphy->hw_phyrxchain != nphy->hw_phytxchain) {
- if (b43_nphy_ipa(dev)) {
- rx2tx_delays[5] = 59;
- rx2tx_delays[6] = 1;
- rx2tx_events[7] = 0x1F;
- }
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
- }
-
- tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
- 0x2 : 0x9C40;
- b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
-
- b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
-
- b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
- b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
-
- b43_nphy_gain_ctrl_workarounds(dev);
-
- b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
- b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
-
- /* TODO */
-
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
-
- /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
-
- if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
- (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
- tmp32 = 0x00088888;
- else
- tmp32 = 0x88888888;
- b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
-
- if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
- 0x70);
- b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
- 0x70);
- }
-
- b43_phy_write(dev, 0x224, 0x039C);
- b43_phy_write(dev, 0x225, 0x0357);
- b43_phy_write(dev, 0x226, 0x0317);
- b43_phy_write(dev, 0x227, 0x02D7);
- b43_phy_write(dev, 0x228, 0x039C);
- b43_phy_write(dev, 0x229, 0x0357);
- b43_phy_write(dev, 0x22A, 0x0317);
- b43_phy_write(dev, 0x22B, 0x02D7);
- b43_phy_write(dev, 0x22C, 0x039C);
- b43_phy_write(dev, 0x22D, 0x0357);
- b43_phy_write(dev, 0x22E, 0x0317);
- b43_phy_write(dev, 0x22F, 0x02D7);
-}
-
-static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
-{
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
- u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
-
- u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
- u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
- nphy->band5g_pwrgain) {
- b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
- b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
- } else {
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
- }
-
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
-
- if (dev->phy.rev < 2) {
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
- }
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
- dev->dev->board_type == 0x8B) {
- delays1[0] = 0x1;
- delays1[5] = 0x14;
- }
- b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
- b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
-
- b43_nphy_gain_ctrl_workarounds(dev);
-
- if (dev->phy.rev < 2) {
- if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
- b43_hf_write(dev, b43_hf_read(dev) |
- B43_HF_MLADVW);
- } else if (dev->phy.rev == 2) {
- b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
- b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
- }
-
- if (dev->phy.rev < 2)
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
-
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
-
- b43_phy_mask(dev, B43_NPHY_PIL_DW1,
- ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
-
- if (dev->phy.rev == 2)
- b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
- B43_NPHY_FINERX2_CGC_DECGC);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
-static void b43_nphy_workarounds(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
- b43_nphy_classifier(dev, 1, 0);
- else
- b43_nphy_classifier(dev, 1, 1);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
-
- if (dev->phy.rev >= 3)
- b43_nphy_workarounds_rev3plus(dev);
- else
- b43_nphy_workarounds_rev1_2(dev);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
+/**************************************************
+ * Samples
+ **************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
static int b43_nphy_load_samples(struct b43_wldev *dev,
@@ -1825,7 +956,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
}
for (i = 0; i < 100; i++) {
- if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) {
i = 0;
break;
}
@@ -1837,333 +968,9 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
-/*
- * Transmits a known value for LO calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
- */
-static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
- bool iqmode, bool dac_test)
-{
- u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
- if (samp == 0)
- return -1;
- b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
- return 0;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
-static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- int i, j;
- u32 tmp;
- u32 cur_real, cur_imag, real_part, imag_part;
-
- u16 buffer[7];
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
-
- b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
-
- for (i = 0; i < 2; i++) {
- tmp = ((buffer[i * 2] & 0x3FF) << 10) |
- (buffer[i * 2 + 1] & 0x3FF);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (((i + 26) << 10) | 320));
- for (j = 0; j < 128; j++) {
- b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
- ((tmp >> 16) & 0xFFFF));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (tmp & 0xFFFF));
- }
- }
-
- for (i = 0; i < 2; i++) {
- tmp = buffer[5 + i];
- real_part = (tmp >> 8) & 0xFF;
- imag_part = (tmp & 0xFF);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (((i + 26) << 10) | 448));
-
- if (dev->phy.rev >= 3) {
- cur_real = real_part;
- cur_imag = imag_part;
- tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
- }
-
- for (j = 0; j < 128; j++) {
- if (dev->phy.rev < 3) {
- cur_real = (real_part * loscale[j] + 128) >> 8;
- cur_imag = (imag_part * loscale[j] + 128) >> 8;
- tmp = ((cur_real & 0xFF) << 8) |
- (cur_imag & 0xFF);
- }
- b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
- ((tmp >> 16) & 0xFFFF));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (tmp & 0xFFFF));
- }
- }
-
- if (dev->phy.rev >= 3) {
- b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
- b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
- u8 *events, u8 *delays, u8 length)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- u8 i;
- u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
- u16 offset1 = cmd << 4;
- u16 offset2 = offset1 + 0x80;
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
-
- b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
- b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
-
- for (i = length; i < 16; i++) {
- b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
- b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
- enum b43_nphy_rf_sequence seq)
-{
- static const u16 trigger[] = {
- [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX,
- [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX,
- [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX,
- [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH,
- [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL,
- [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
- };
- int i;
- u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
-
- B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
-
- b43_phy_set(dev, B43_NPHY_RFSEQMODE,
- B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
- b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
- for (i = 0; i < 200; i++) {
- if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
- goto ok;
- msleep(1);
- }
- b43err(dev->wl, "RF sequence status timeout\n");
-ok:
- b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
- u16 value, u8 core, bool off)
-{
- int i;
- u8 index = fls(field);
- u8 addr, en_addr, val_addr;
- /* we expect only one bit set */
- B43_WARN_ON(field & (~(1 << (index - 1))));
-
- if (dev->phy.rev >= 3) {
- const struct nphy_rf_control_override_rev3 *rf_ctrl;
- for (i = 0; i < 2; i++) {
- if (index == 0 || index == 16) {
- b43err(dev->wl,
- "Unsupported RF Ctrl Override call\n");
- return;
- }
-
- rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
- en_addr = B43_PHY_N((i == 0) ?
- rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
- val_addr = B43_PHY_N((i == 0) ?
- rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
-
- if (off) {
- b43_phy_mask(dev, en_addr, ~(field));
- b43_phy_mask(dev, val_addr,
- ~(rf_ctrl->val_mask));
- } else {
- if (core == 0 || ((1 << core) & i) != 0) {
- b43_phy_set(dev, en_addr, field);
- b43_phy_maskset(dev, val_addr,
- ~(rf_ctrl->val_mask),
- (value << rf_ctrl->val_shift));
- }
- }
- }
- } else {
- const struct nphy_rf_control_override_rev2 *rf_ctrl;
- if (off) {
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
- value = 0;
- } else {
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
- }
-
- for (i = 0; i < 2; i++) {
- if (index <= 1 || index == 16) {
- b43err(dev->wl,
- "Unsupported RF Ctrl Override call\n");
- return;
- }
-
- if (index == 2 || index == 10 ||
- (index >= 13 && index <= 15)) {
- core = 1;
- }
-
- rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
- addr = B43_PHY_N((i == 0) ?
- rf_ctrl->addr0 : rf_ctrl->addr1);
-
- if ((core & (1 << i)) != 0)
- b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
- (value << rf_ctrl->shift));
-
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- udelay(1);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
- u16 value, u8 core)
-{
- u8 i, j;
- u16 reg, tmp, val;
-
- B43_WARN_ON(dev->phy.rev < 3);
- B43_WARN_ON(field > 4);
-
- for (i = 0; i < 2; i++) {
- if ((core == 1 && i == 1) || (core == 2 && !i))
- continue;
-
- reg = (i == 0) ?
- B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
- b43_phy_mask(dev, reg, 0xFBFF);
-
- switch (field) {
- case 0:
- b43_phy_write(dev, reg, 0);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
- break;
- case 1:
- if (!i) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
- 0xFC3F, (value << 6));
- b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
- 0xFFFE, 1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
- j = 0;
- break;
- }
- udelay(10);
- }
- if (j)
- b43err(dev->wl,
- "intc override timeout\n");
- b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
- 0xFFFE);
- } else {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
- 0xFC3F, (value << 6));
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
- 0xFFFE, 1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_RXTX);
- for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
- j = 0;
- break;
- }
- udelay(10);
- }
- if (j)
- b43err(dev->wl,
- "intc override timeout\n");
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
- 0xFFFE);
- }
- break;
- case 2:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0020;
- val = value << 5;
- } else {
- tmp = 0x0010;
- val = value << 4;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- case 3:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0001;
- val = value;
- } else {
- tmp = 0x0004;
- val = value << 2;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- case 4:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0002;
- val = value << 1;
- } else {
- tmp = 0x0008;
- val = value << 3;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
-static void b43_nphy_bphy_init(struct b43_wldev *dev)
-{
- unsigned int i;
- u16 val;
-
- val = 0x1E1F;
- for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
- val -= 0x202;
- }
- val = 0x3E3F;
- for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
- val -= 0x202;
- }
- b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
-}
+/**************************************************
+ * RSSI
+ **************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
@@ -2233,67 +1040,6 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
}
-static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
-{
- u16 val;
-
- if (type < 3)
- val = 0;
- else if (type == 6)
- val = 1;
- else if (type == 3)
- val = 2;
- else
- val = 3;
-
- val = (val << 12) | (val << 14);
- b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
- b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
-
- if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
- (type + 1) << 4);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
- (type + 1) << 4);
- }
-
- if (code == 0) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
- if (type < 3) {
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~(B43_NPHY_RFCTL_CMD_RXEN |
- B43_NPHY_RFCTL_CMD_CORESEL));
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
- ~(0x1 << 12 |
- 0x1 << 5 |
- 0x1 << 1 |
- 0x1));
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_START);
- udelay(20);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- }
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
- if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
- ~(B43_NPHY_RFCTL_CMD_RXEN |
- B43_NPHY_RFCTL_CMD_CORESEL),
- (B43_NPHY_RFCTL_CMD_RXEN |
- code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
- (0x1 << 12 |
- 0x1 << 5 |
- 0x1 << 1 |
- 0x1));
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- udelay(20);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- }
- }
-}
-
static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
u8 i;
@@ -2377,6 +1123,67 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
}
}
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
+ if (type < 3) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ ~(0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_START);
+ udelay(20);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+ }
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL),
+ (B43_NPHY_RFCTL_CMD_RXEN |
+ code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
+ (0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(20);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
@@ -2686,6 +1493,1413 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev)
}
}
+/**************************************************
+ * Workarounds
+ **************************************************/
+
+static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ bool ghz5;
+ bool ext_lna;
+ u16 rssi_gain;
+ struct nphy_gain_ctl_workaround_entry *e;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+
+ /* Prepare values */
+ ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
+ & B43_NPHY_BANDCTL_5GHZ;
+ ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ :
+ sprom->boardflags_lo & B43_BFL_EXTLNA;
+ e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
+ if (ghz5 && dev->phy.rev >= 5)
+ rssi_gain = 0x90;
+ else
+ rssi_gain = 0x50;
+
+ b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
+
+ b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+ b43_phy_write(dev, 0x2A7, e->init_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
+ e->rfseq_init);
+
+ /* TODO: check defines. Do not match variables names */
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
+ b43_phy_write(dev, 0x2A9, e->cliphi_gain);
+ b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
+ b43_phy_write(dev, 0x2AB, e->clipmd_gain);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
+ b43_phy_write(dev, 0x2AD, e->cliplo_gain);
+
+ b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
+ b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
+ b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+}
+
+static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i, j;
+ u8 code;
+ u16 tmp;
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
+
+ /* Set HPVGA2 index */
+ b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2,
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2,
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C));
+
+ b43_nphy_adjust_lna_gain_table(dev);
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
+
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++) {
+ tmp = j * (i < 2 ? 3 : 1);
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, tmp);
+ }
+ }
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_gain_ctl_workarounds_rev3plus(dev);
+ else
+ b43_nphy_gain_ctl_workarounds_rev1_2(dev);
+}
+
+static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ /* TX to RX */
+ u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ /* RX to TX */
+ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+ 0x1F };
+ u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
+ u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+
+ u16 tmp16;
+ u32 tmp32;
+
+ b43_phy_write(dev, 0x23f, 0x1f8);
+ b43_phy_write(dev, 0x240, 0x1f8);
+
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+ tmp32 &= 0xffffff;
+ b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+ b43_phy_write(dev, 0x2AE, 0x000C);
+
+ /* TX to RX */
+ b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+ ARRAY_SIZE(tx2rx_events));
+
+ /* RX to TX */
+ if (b43_nphy_ipa(dev))
+ b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+ rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+ if (nphy->hw_phyrxchain != 3 &&
+ nphy->hw_phyrxchain != nphy->hw_phytxchain) {
+ if (b43_nphy_ipa(dev)) {
+ rx2tx_delays[5] = 59;
+ rx2tx_delays[6] = 1;
+ rx2tx_events[7] = 0x1F;
+ }
+ b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+ ARRAY_SIZE(rx2tx_events));
+ }
+
+ tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ 0x2 : 0x9C40;
+ b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+
+ b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
+ b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
+
+ /* TODO */
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+ /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ tmp32 = 0x00088888;
+ else
+ tmp32 = 0x88888888;
+ b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+ if (dev->phy.rev == 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ }
+
+ b43_phy_write(dev, 0x224, 0x03eb);
+ b43_phy_write(dev, 0x225, 0x03eb);
+ b43_phy_write(dev, 0x226, 0x0341);
+ b43_phy_write(dev, 0x227, 0x0341);
+ b43_phy_write(dev, 0x228, 0x042b);
+ b43_phy_write(dev, 0x229, 0x042b);
+ b43_phy_write(dev, 0x22a, 0x0381);
+ b43_phy_write(dev, 0x22b, 0x0381);
+ b43_phy_write(dev, 0x22c, 0x042b);
+ b43_phy_write(dev, 0x22d, 0x042b);
+ b43_phy_write(dev, 0x22e, 0x0381);
+ b43_phy_write(dev, 0x22f, 0x0381);
+}
+
+static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
+ dev->dev->board_type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ b43_hf_write(dev, b43_hf_read(dev) |
+ B43_HF_MLADVW);
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3)
+ b43_nphy_workarounds_rev3plus(dev);
+ else
+ b43_nphy_workarounds_rev1_2(dev);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/**************************************************
+ * Tx/Rx common
+ **************************************************/
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/**************************************************
+ * Tx and Rx
+ **************************************************/
+
+void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
+{//TODO
+}
+
+static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
+{//TODO
+}
+
+static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
+ bool ignore_tssi)
+{//TODO
+ return B43_TXPWR_RES_DONE;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
+static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 bmask, val, tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ nphy->txpwrctrl = enable;
+ if (!enable) {
+ if (dev->phy.rev >= 3 &&
+ (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
+ (B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN |
+ B43_NPHY_TXPCTL_CMD_PCTLEN))) {
+ /* We disable enabled TX pwr ctl, save it's state */
+ nphy->tx_pwr_idx[0] = b43_phy_read(dev,
+ B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
+ nphy->tx_pwr_idx[1] = b43_phy_read(dev,
+ B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
+ }
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3)
+ tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
+ nphy->adj_pwr_tbl);
+ b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
+ nphy->adj_pwr_tbl);
+
+ bmask = B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ /* wl does useless check for "enable" param here */
+ val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3) {
+ bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ if (val)
+ val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ }
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+ ~B43_NPHY_TXPCTL_INIT_PIDXI1,
+ 0x64);
+ }
+
+ if (dev->phy.rev >= 3) {
+ if (nphy->tx_pwr_idx[0] != 128 &&
+ nphy->tx_pwr_idx[1] != 128) {
+ /* Recover TX pwr ctl state */
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT,
+ nphy->tx_pwr_idx[0]);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev,
+ B43_NPHY_TXPCTL_INIT,
+ ~0xff, nphy->tx_pwr_idx[1]);
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
+
+ if (b43_nphy_ipa(dev)) {
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
+ }
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
+static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ u8 txpi[2], bbmult, i;
+ u16 tmp, radio_gain, dac_gain;
+ u16 freq = dev->phy.channel_freq;
+ u32 txgain;
+ /* u32 gaintbl; rev3+ */
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev >= 7) {
+ txpi[0] = txpi[1] = 30;
+ } else if (dev->phy.rev >= 3) {
+ txpi[0] = 40;
+ txpi[1] = 40;
+ } else if (sprom->revision < 4) {
+ txpi[0] = 72;
+ txpi[1] = 72;
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txpi[0] = sprom->txpid2g[0];
+ txpi[1] = sprom->txpid2g[1];
+ } else if (freq >= 4900 && freq < 5100) {
+ txpi[0] = sprom->txpid5gl[0];
+ txpi[1] = sprom->txpid5gl[1];
+ } else if (freq >= 5100 && freq < 5500) {
+ txpi[0] = sprom->txpid5g[0];
+ txpi[1] = sprom->txpid5g[1];
+ } else if (freq >= 5500) {
+ txpi[0] = sprom->txpid5gh[0];
+ txpi[1] = sprom->txpid5gh[1];
+ } else {
+ txpi[0] = 91;
+ txpi[1] = 91;
+ }
+ }
+ if (dev->phy.rev < 7 &&
+ (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100))
+ txpi[0] = txpi[1] = 91;
+
+ /*
+ for (i = 0; i < 2; i++) {
+ nphy->txpwrindex[i].index_internal = txpi[i];
+ nphy->txpwrindex[i].index_internal_save = txpi[i];
+ }
+ */
+
+ for (i = 0; i < 2; i++) {
+ if (dev->phy.rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ txgain = *(b43_nphy_get_ipa_gain_table(dev) +
+ txpi[i]);
+ } else if (b43_current_band(dev->wl) ==
+ IEEE80211_BAND_5GHZ) {
+ /* FIXME: use 5GHz tables */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ } else {
+ if (dev->phy.rev >= 5 &&
+ sprom->fem.ghz5.extpa_gain == 3)
+ ; /* FIXME: 5GHz_txgain_HiPwrEPA */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ }
+ radio_gain = (txgain >> 16) & 0x1FFFF;
+ } else {
+ txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
+ radio_gain = (txgain >> 16) & 0x1FFF;
+ }
+
+ if (dev->phy.rev >= 7)
+ dac_gain = (txgain >> 8) & 0x7;
+ else
+ dac_gain = (txgain >> 8) & 0x3F;
+ bbmult = txgain & 0xFF;
+
+ if (dev->phy.rev >= 3) {
+ if (i == 0)
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ else
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (i == 0)
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
+ else
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
+
+ b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
+ if (i == 0)
+ tmp = (tmp & 0x00FF) | (bbmult << 8);
+ else
+ tmp = (tmp & 0xFF00) | bbmult;
+ b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
+
+ if (b43_nphy_ipa(dev)) {
+ u32 tmp32;
+ u16 reg = (i == 0) ?
+ B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
+ 576 + txpi[i]));
+ b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
+ b43_phy_set(dev, reg, 0x4);
+ }
+ }
+
+ b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ u8 core;
+ u16 r; /* routing */
+
+ if (phy->rev >= 7) {
+ for (core = 0; core < 2; core++) {
+ r = core ? 0x190 : 0x170;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, r + 0x5, 0x5);
+ b43_radio_write(dev, r + 0x9, 0xE);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r + 0xA, 0);
+ if (phy->rev != 7)
+ b43_radio_write(dev, r + 0xB, 1);
+ else
+ b43_radio_write(dev, r + 0xB, 0x31);
+ } else {
+ b43_radio_write(dev, r + 0x5, 0x9);
+ b43_radio_write(dev, r + 0x9, 0xC);
+ b43_radio_write(dev, r + 0xB, 0x0);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r + 0xA, 1);
+ else
+ b43_radio_write(dev, r + 0xA, 0x31);
+ }
+ b43_radio_write(dev, r + 0x6, 0);
+ b43_radio_write(dev, r + 0x7, 0);
+ b43_radio_write(dev, r + 0x8, 3);
+ b43_radio_write(dev, r + 0xC, 0);
+ }
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
+ else
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0);
+ b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29);
+
+ for (core = 0; core < 2; core++) {
+ r = core ? B2056_TX1 : B2056_TX0;
+
+ b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0);
+ b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3);
+ b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+ 0x5);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r | B2056_TX_TSSIA,
+ 0x00);
+ if (phy->rev >= 5)
+ b43_radio_write(dev, r | B2056_TX_TSSIG,
+ 0x31);
+ else
+ b43_radio_write(dev, r | B2056_TX_TSSIG,
+ 0x11);
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+ 0xE);
+ } else {
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+ 0x9);
+ b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31);
+ b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0);
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+ 0xC);
+ }
+ }
+ }
+}
+
+/*
+ * Stop radio and transmit known signal. Then check received signal strength to
+ * get TSSI (Transmit Signal Strength Indicator).
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
+ */
+static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u32 tmp;
+ s32 rssi[4] = { };
+
+ /* TODO: check if we can transmit */
+
+ if (b43_nphy_ipa(dev))
+ b43_nphy_ipa_internal_tssi_setup(dev);
+
+ if (phy->rev >= 7)
+ ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+ else if (phy->rev >= 3)
+ b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
+
+ b43_nphy_stop_playback(dev);
+ b43_nphy_tx_tone(dev, 0xFA0, 0, false, false);
+ udelay(20);
+ tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1);
+ b43_nphy_stop_playback(dev);
+ b43_nphy_rssi_select(dev, 0, 0);
+
+ if (phy->rev >= 7)
+ ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+ else if (phy->rev >= 3)
+ b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
+
+ if (phy->rev >= 3) {
+ nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF;
+ } else {
+ nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF;
+ }
+ nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
+}
+
+static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ const u32 *table = NULL;
+#if 0
+ TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
+ u32 rfpwr_offset;
+ u8 pga_gain;
+ int i;
+#endif
+
+ if (phy->rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (phy->rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ if (phy->rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+
+ if (phy->rev >= 3) {
+#if 0
+ nphy->gmval = (table[0] >> 16) & 0x7000;
+
+ for (i = 0; i < 128; i++) {
+ pga_gain = (table[i] >> 24) & 0xF;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+ else
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
+ b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
+ rfpwr_offset);
+ b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
+ rfpwr_offset);
+ }
+#endif
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ u16 tmp;
+
+ if (dev->phy.rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+#if 0
+/* Ready but not used anywhere */
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+#endif
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 channel = dev->phy.channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
/*
* Restore RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
@@ -2729,24 +2943,6 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
-{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (dev->phy.rev >= 6) {
- if (dev->dev->chip_id == 47162)
- return txpwrctrl_tx_gain_ipa_rev5;
- return txpwrctrl_tx_gain_ipa_rev6;
- } else if (dev->phy.rev >= 5) {
- return txpwrctrl_tx_gain_ipa_rev5;
- } else {
- return txpwrctrl_tx_gain_ipa;
- }
- } else {
- return txpwrctrl_tx_gain_ipa_5g;
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
{
@@ -2841,44 +3037,6 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
}
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
-static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
- struct nphy_txgains target,
- struct nphy_iqcal_params *params)
-{
- int i, j, indx;
- u16 gain;
-
- if (dev->phy.rev >= 3) {
- params->txgm = target.txgm[core];
- params->pga = target.pga[core];
- params->pad = target.pad[core];
- params->ipa = target.ipa[core];
- params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
- (params->pad << 4) | (params->ipa);
- for (j = 0; j < 5; j++)
- params->ncorr[j] = 0x79;
- } else {
- gain = (target.pad[core]) | (target.pga[core] << 4) |
- (target.txgm[core] << 8);
-
- indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
- 1 : 0;
- for (i = 0; i < 9; i++)
- if (tbl_iqcal_gainparams[indx][i][0] == gain)
- break;
- i = min(i, 8);
-
- params->txgm = tbl_iqcal_gainparams[indx][i][1];
- params->pga = tbl_iqcal_gainparams[indx][i][2];
- params->pad = tbl_iqcal_gainparams[indx][i][3];
- params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
- (params->pad << 2);
- for (j = 0; j < 4; j++)
- params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
{
@@ -3258,7 +3416,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (dev->phy.rev >= 4) {
avoid = nphy->hang_avoid;
- nphy->hang_avoid = 0;
+ nphy->hang_avoid = false;
}
b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3368,7 +3526,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (phy6or5x && updated[core] == 0) {
b43_nphy_update_tx_cal_ladder(dev, core);
- updated[core] = 1;
+ updated[core] = true;
}
tmp = (params[core].ncorr[type] << 8) | 0x66;
@@ -3730,10 +3888,104 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
b43_mac_enable(dev);
}
+/**************************************************
+ * N-PHY init
+ **************************************************/
+
/*
- * Init N-PHY
- * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
*/
+static void b43_nphy_tables_init(struct b43_wldev *dev)
+{
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
+static void b43_nphy_bphy_init(struct b43_wldev *dev)
+{
+ unsigned int i;
+ u16 val;
+
+ val = 0x1E1F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
+ val -= 0x202;
+ }
+ val = 0x3E3F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
+ val -= 0x202;
+ }
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+ if (dev->phy.rev >= 3) {
+ if (!init)
+ return;
+ if (0 /* FIXME */) {
+ b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+ b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+ b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+ b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+ b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+ }
+
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL) &
+ ~B43_MACCTL_GPOUTSMSK);
+ b43_write16(dev, B43_MMIO_GPIO_MASK,
+ b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+ b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+ b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+ if (init) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
int b43_phy_initn(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -3857,7 +4109,7 @@ int b43_phy_initn(struct b43_wldev *dev)
tx_pwr_state = nphy->txpwrctrl;
b43_nphy_tx_power_ctrl(dev, false);
b43_nphy_tx_power_fix(dev);
- /* TODO N PHY TX Power Control Idle TSSI */
+ b43_nphy_tx_power_ctl_idle_tssi(dev);
/* TODO N PHY TX Power Control Setup */
b43_nphy_tx_gain_table_upload(dev);
@@ -3928,6 +4180,91 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/**************************************************
+ * Channel switching ops.
+ **************************************************/
+
+static void b43_chantab_phy_upload(struct b43_wldev *dev,
+ const struct b43_phy_n_sfo_cfg *e)
+{
+ b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
+ b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
+ b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
+ b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
+ b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
+ b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
+{
+ struct bcma_drv_cc __maybe_unused *cc;
+ u32 __maybe_unused pmu_ctl;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ cc = &dev->dev->bdev->bus->drv_cc;
+ if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else if (dev->dev->chip_id == 0x4716) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
+ BCMA_CC_PMU_CTL_NOILPONW;
+ } else if (dev->dev->chip_id == 0x4322 ||
+ dev->dev->chip_id == 0x4340 ||
+ dev->dev->chip_id == 0x4341) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100070);
+ bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888854);
+ if (avoid)
+ bcma_chipco_pll_write(cc, 0x2, 0x05201828);
+ else
+ bcma_chipco_pll_write(cc, 0x2, 0x05001828);
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else {
+ return;
+ }
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* FIXME */
+ break;
+#endif
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_channel_setup(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4272,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = dev->phy.n;
+ int ch = new_channel->hw_value;
u16 old_band_5ghz;
u32 tmp32;
@@ -3974,8 +4312,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_nphy_tx_lp_fbw(dev);
- if (dev->phy.rev >= 3 && 0) {
- /* TODO */
+ if (dev->phy.rev >= 3 &&
+ dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
+ bool avoid = false;
+ if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
+ avoid = true;
+ } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+ if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+ avoid = true;
+ } else { /* 40MHz */
+ if (nphy->aband_spurwar_en &&
+ (ch == 38 || ch == 102 || ch == 118))
+ avoid = dev->dev->chip_id == 0x4716;
+ }
+
+ b43_nphy_pmu_spur_avoid(dev, avoid);
+
+ if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
+ dev->dev->chip_id == 43225) {
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
+ avoid ? 0x5341 : 0x8889);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ }
+
+ if (dev->phy.rev == 3 || dev->phy.rev == 4)
+ ; /* TODO: reset PLL */
+
+ if (avoid)
+ b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
+ else
+ b43_phy_mask(dev, B43_NPHY_BBCFG,
+ ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
+
+ b43_nphy_reset_cca(dev);
+
+ /* wl sets useless phy_isspuravoid here */
}
b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4039,6 +4410,10 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
return 0;
}
+/**************************************************
+ * Basic PHY ops.
+ **************************************************/
+
static int b43_nphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_n *nphy;
@@ -4055,10 +4430,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
memset(nphy, 0, sizeof(*nphy));
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+ nphy->spur_avoid = (phy->rev >= 3) ?
+ B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4445,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
* 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
nphy->tx_pwr_idx[0] = 128;
nphy->tx_pwr_idx[1] = 128;
+
+ /* Hardware TX power control and 5GHz power gain */
+ nphy->txpwrctrl = false;
+ nphy->pwg_gain_5ghz = false;
+ if (dev->phy.rev >= 3 ||
+ (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+ (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
+ nphy->txpwrctrl = true;
+ nphy->pwg_gain_5ghz = true;
+ } else if (sprom->revision >= 4) {
+ if (dev->phy.rev >= 2 &&
+ (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
+ nphy->txpwrctrl = true;
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
+ struct pci_dev *pdev =
+ dev->dev->sdev->bus->host_pci;
+ if (pdev->device == 0x4328 ||
+ pdev->device == 0x432a)
+ nphy->pwg_gain_5ghz = true;
+ }
+#endif
+ } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
+ nphy->pwg_gain_5ghz = true;
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
+ nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
+ }
}
static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fbf520285bd1..5de8f74cc02f 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -716,6 +716,12 @@
struct b43_wldev;
+enum b43_nphy_spur_avoid {
+ B43_SPUR_AVOID_DISABLE,
+ B43_SPUR_AVOID_AUTO,
+ B43_SPUR_AVOID_FORCE,
+};
+
struct b43_chanspec {
u16 center_freq;
enum nl80211_channel_type channel_type;
@@ -759,6 +765,11 @@ struct b43_phy_n_txpwrindex {
u16 locomp;
};
+struct b43_phy_n_pwr_ctl_info {
+ u8 idle_tssi_2g;
+ u8 idle_tssi_5g;
+};
+
struct b43_phy_n {
u8 antsel_type;
u8 cal_orig_pwr_idx[2];
@@ -785,12 +796,14 @@ struct b43_phy_n {
u16 mphase_txcal_bestcoeffs[11];
bool txpwrctrl;
+ bool pwg_gain_5ghz;
u8 tx_pwr_idx[2];
u16 adj_pwr_tbl[84];
u16 txcal_bbmult;
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
struct b43_phy_n_txpwrindex txpwrindex[2];
+ struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2];
struct b43_chanspec txiqlocal_chanspec;
u8 txrx_chain;
@@ -803,6 +816,7 @@ struct b43_phy_n {
u16 classifier_state;
u16 clip_state[2];
+ enum b43_nphy_spur_avoid spur_avoid;
bool aband_spurwar_en;
bool gband_spurwar_en;
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcff923b3c18..3533ab86bd36 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* Not enough memory on the queue. */
err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
goto out;
}
@@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
(q->free_packet_slots == 0)) {
/* The queue is full. */
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
}
out:
@@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
if (q->stopped) {
ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
- q->stopped = 0;
+ q->stopped = false;
}
}
@@ -617,9 +617,19 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
const char *err_msg = NULL;
struct b43_rxhdr_fw4 *rxhdr =
(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
+ size_t rxhdr_size = sizeof(*rxhdr);
BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
- memset(rxhdr, 0, sizeof(*rxhdr));
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_410:
+ case B43_FW_HDR_351:
+ rxhdr_size -= sizeof(rxhdr->format_598) -
+ sizeof(rxhdr->format_351);
+ break;
+ case B43_FW_HDR_598:
+ break;
+ }
+ memset(rxhdr, 0, rxhdr_size);
/* Check if we have data and wait for it to get ready. */
if (q->rev >= 8) {
@@ -657,11 +667,11 @@ data_ready:
/* Get the preamble (RX header) */
if (q->rev >= 8) {
- b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+ b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
} else {
- b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+ b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index a01f776ca4de..ce037fb6789a 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
[B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
- [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
- [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
[B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
[B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
B2056_RX1, pts->rx, pts->rx_length);
}
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
+{
+ struct b2056_inittabs_pts *pts;
+ const struct b2056_inittab_entry *e;
+
+ if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ B43_WARN_ON(1);
+ return;
+ }
+ pts = &b2056_inittabs[dev->phy.rev];
+ e = &pts->syn[B2056_SYN_PLL_CP2];
+
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
+}
+
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index a7159d8578be..5b86673459fa 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
void b2056_upload_inittabs(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag);
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 7b326f2efdc9..f7def13524dd 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
0x0000, 0x0000,
};
+/* volatile tables, PHY revision >= 3 */
+
+/* indexed by antswctl2g */
+static const u16 b43_ntab_antswctl2g_r3[4][32] = {
+ {
+ 0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
+ 0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
+ 0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
+ 0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
+ 0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
+ 0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
+ 0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
+ 0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
+ 0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
+ 0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
+ 0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x03cc,
+ }
+};
+
/* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
const s16 tbl_tx_filter_coef_rev4[7][15] = {
{ -377, 137, -407, 208, -1527,
956, 93, 186, 93, 230,
- -44, 230, 20, -191, 201 },
+ -44, 230, 201, -191, 201 },
{ -77, 20, -98, 49, -93,
60, 56, 111, 56, 26,
-5, 26, 34, -32, 34 },
@@ -2710,7 +2752,18 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
-struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
+ { 10, 14, 19, 27 },
+ { -5, 6, 10, 15 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x427E,
+ { 0x413F, 0x413F, 0x413F, 0x413F },
+ 0x007E, 0x0066, 0x1074,
+ 0x18, 0x18, 0x18,
+ 0x01D0, 0x5,
+};
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = {
{ /* 2GHz */
{ /* PHY rev 3 */
{ 7, 11, 16, 23 },
@@ -2734,15 +2787,26 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x18, 0x18, 0x18,
0x01A1, 0x5,
},
- { /* PHY rev 5+ */
+ { /* PHY rev 5 */
{ 9, 13, 18, 26 },
{ -3, 7, 11, 16 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x427E, /* invalid for external LNA! */
{ 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
- 0x1076, 0x0066, 0x106A,
- 0xC, 0xC, 0xC,
+ 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+ 0x18, 0x18, 0x18,
+ 0x01D0, 0x9,
+ },
+ { /* PHY rev 6+ */
+ { 8, 13, 18, 25 },
+ { -5, 6, 10, 14 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x527E, /* invalid for external LNA! */
+ { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */
+ 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+ 0x18, 0x18, 0x18,
0x01D0, 0x5,
},
},
@@ -2769,7 +2833,7 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x24, 0x24, 0x24,
0x0107, 25,
},
- { /* PHY rev 5+ */
+ { /* PHY rev 5 */
{ 6, 10, 16, 21 },
{ -7, 0, 4, 8 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
@@ -2780,6 +2844,17 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x24, 0x24, 0x24,
0x00A9, 25,
},
+ { /* PHY rev 6+ */
+ { 6, 10, 16, 21 },
+ { -7, 0, 4, 8 },
+ { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+ 0x729E,
+ { 0x714F, 0x714F, 0x714F, 0x714F },
+ 0x029E, 0x2084, 0x2086,
+ 0x24, 0x24, 0x24, /* low is invalid for radio rev 11! */
+ 0x00F0, 25,
+ },
},
};
@@ -2838,9 +2913,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
break;
case B43_NTAB_32BIT:
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
- value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- value <<= 16;
- value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
break;
default:
B43_WARN_ON(1);
@@ -2864,6 +2938,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
*data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2954,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
data += 2;
break;
case B43_NTAB_32BIT:
- *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- *((u32 *)data) <<= 16;
- *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) =
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) |=
+ b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
data += 4;
break;
default:
@@ -2932,6 +3013,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
+ dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
value = *data;
@@ -2999,6 +3087,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
} while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
/* Static tables */
ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3119,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */
- /* TODO */
+ if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
+ ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
+ b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
+ else
+ B43_WARN_ON(1);
}
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
@@ -3037,26 +3131,67 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
+ u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso :
+ dev->dev->bus_sprom->fem.ghz2.tr_iso;
+
+ if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11)
+ return &nphy_gain_ctl_wa_phy6_radio11_ghz2;
B43_WARN_ON(dev->phy.rev < 3);
- if (dev->phy.rev >= 5)
+ if (dev->phy.rev >= 6)
+ phy_idx = 3;
+ else if (dev->phy.rev == 5)
phy_idx = 2;
else if (dev->phy.rev == 4)
phy_idx = 1;
else
phy_idx = 0;
-
e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
- /* Only one entry differs for external LNA, so instead making whole
- * table 2 times bigger, hack is here
- */
- if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
- e->rfseq_init[0] &= 0x0FFF;
- e->rfseq_init[1] &= 0x0FFF;
- e->rfseq_init[2] &= 0x0FFF;
- e->rfseq_init[3] &= 0x0FFF;
- e->init_gain &= 0x0FFF;
+ /* Some workarounds to the workarounds... */
+ if (ghz5 && dev->phy.rev >= 6) {
+ if (dev->phy.radio_rev == 11 &&
+ !b43_channel_type_is_40mhz(dev->phy.channel_type))
+ e->cliplo_gain = 0x2d;
+ } else if (!ghz5 && dev->phy.rev >= 5) {
+ if (ext_lna) {
+ e->rfseq_init[0] &= ~0x4000;
+ e->rfseq_init[1] &= ~0x4000;
+ e->rfseq_init[2] &= ~0x4000;
+ e->rfseq_init[3] &= ~0x4000;
+ e->init_gain &= ~0x4000;
+ }
+ switch (tr_iso) {
+ case 0:
+ e->cliplo_gain = 0x0062;
+ case 1:
+ e->cliplo_gain = 0x0064;
+ case 2:
+ e->cliplo_gain = 0x006a;
+ case 3:
+ e->cliplo_gain = 0x106a;
+ case 4:
+ e->cliplo_gain = 0x106c;
+ case 5:
+ e->cliplo_gain = 0x1074;
+ case 6:
+ e->cliplo_gain = 0x107c;
+ case 7:
+ e->cliplo_gain = 0x207c;
+ default:
+ e->cliplo_gain = 0x106a;
+ }
+ } else if (ghz5 && dev->phy.rev == 4 && ext_lna) {
+ e->rfseq_init[0] &= ~0x4000;
+ e->rfseq_init[1] &= ~0x4000;
+ e->rfseq_init[2] &= ~0x4000;
+ e->rfseq_init[3] &= ~0x4000;
+ e->init_gain &= ~0x4000;
+ e->rfseq_init[0] |= 0x1000;
+ e->rfseq_init[1] |= 0x1000;
+ e->rfseq_init[2] |= 0x1000;
+ e->rfseq_init[3] |= 0x1000;
+ e->init_gain |= 0x1000;
}
return e;
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index a81696bff0ed..97038c481930 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+/* Volatile N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */
+
/* Static N-PHY tables, PHY revision >= 3 */
-#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
-#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
-#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
-#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
-#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
+#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */
+#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */
+#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
+#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
+#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
+#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
-#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
+#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
-#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
-#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
-#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
-#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
-#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
-#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
-#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
+#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */
+#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
+#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
+#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
+#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
+#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
+#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 5f77cbe0b6aa..2c5367884b3f 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
struct ieee80211_tx_info *report,
const struct b43_txstatus *status)
{
- bool frame_success = 1;
+ bool frame_success = true;
int retry_limit;
/* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
/* The frame was not ACKed... */
if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ...but we expected an ACK. */
- frame_success = 0;
+ frame_success = false;
}
}
if (status->frame_count == 0) {
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 1d4fc9db7f5e..98e3d44400c6 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -560,8 +560,16 @@ struct b43legacy_key {
u8 algorithm;
};
+#define B43legacy_QOS_QUEUE_NUM 4
+
struct b43legacy_wldev;
+/* QOS parameters for a queue. */
+struct b43legacy_qos_params {
+ /* The QOS parameters */
+ struct ieee80211_tx_queue_params p;
+};
+
/* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */
struct b43legacy_wl {
/* Pointer to the active wireless device on this chip */
@@ -611,6 +619,18 @@ struct b43legacy_wl {
bool beacon1_uploaded;
bool beacon_templates_virgin; /* Never wrote the templates? */
struct work_struct beacon_update_trigger;
+ /* The current QOS parameters for the 4 queues. */
+ struct b43legacy_qos_params qos_params[B43legacy_QOS_QUEUE_NUM];
+
+ /* Packet transmit work */
+ struct work_struct tx_work;
+
+ /* Queue of packets to be transmitted. */
+ struct sk_buff_head tx_queue[B43legacy_QOS_QUEUE_NUM];
+
+ /* Flag that implement the queues stopping. */
+ bool tx_queue_stopped[B43legacy_QOS_QUEUE_NUM];
+
};
/* Pointers to the firmware data and meta information about it. */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c5535adf6991..f1f8bd09bd87 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -727,7 +727,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
} else
B43legacy_WARN_ON(1);
}
- spin_lock_init(&ring->lock);
#ifdef CONFIG_B43LEGACY_DEBUG
ring->last_injected_overflow = jiffies;
#endif
@@ -806,7 +805,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -820,12 +819,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -858,7 +857,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
#ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. "
"Falling back to PIO\n");
- dev->__using_pio = 1;
+ dev->__using_pio = true;
return -EAGAIN;
#else
b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1068,7 +1067,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
@@ -1144,10 +1143,8 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
{
struct b43legacy_dmaring *ring;
int err = 0;
- unsigned long flags;
ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) {
@@ -1157,16 +1154,14 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
* For now, just refuse the transmit. */
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacyerr(dev->wl, "Packet after queue stopped\n");
- err = -ENOSPC;
- goto out_unlock;
+ return -ENOSPC;
}
if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
/* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43legacyerr(dev->wl, "DMA queue overflow\n");
- err = -ENOSPC;
- goto out_unlock;
+ return -ENOSPC;
}
/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
@@ -1176,25 +1171,23 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
- err = 0;
- goto out_unlock;
+ return 0;
}
if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
- goto out_unlock;
+ return err;
}
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
- ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 1;
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+ ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ ring->stopped = true;
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Stopped TX ring %d\n",
ring->index);
}
-out_unlock:
- spin_unlock_irqrestore(&ring->lock, flags);
-
return err;
}
@@ -1205,14 +1198,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
struct b43legacy_dmadesc_meta *meta;
int retry_limit;
int slot;
+ int firstused;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
return;
- B43legacy_WARN_ON(!irqs_disabled());
- spin_lock(&ring->lock);
-
B43legacy_WARN_ON(!ring->tx);
+
+ /* Sanity check: TX packets are processed in-order on one ring.
+ * Check if the slot deduced from the cookie really is the first
+ * used slot. */
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+ * malfunction, memory leaks and/or stall of DMA functionality.
+ */
+ b43legacydbg(dev->wl, "Out of order TX status report on DMA "
+ "ring %d. Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ return;
+ }
+
while (1) {
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
op32_idx2desc(ring, slot, &meta);
@@ -1285,14 +1293,21 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
dev->stats.last_tx = jiffies;
if (ring->stopped) {
B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
- ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 0;
+ ring->stopped = false;
+ }
+
+ if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+ dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
+ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Woke up TX ring %d\n",
- ring->index);
+ ring->index);
}
-
- spin_unlock(&ring->lock);
+ /* Add work to the queue. */
+ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43legacy_dmaring *ring,
@@ -1415,22 +1430,14 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
op32_tx_suspend(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
op32_tx_resume(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 504a58767e95..c3282f906bc7 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -150,8 +150,9 @@ struct b43legacy_dmaring {
enum b43legacy_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
- /* Lock, only used for TX. */
- spinlock_t lock;
+ /* The QOS priority assigned to this ring. Only used for TX rings.
+ * This is the mac80211 "queue" value. */
+ u8 queue_prio;
struct b43legacy_wldev *dev;
#ifdef CONFIG_B43LEGACY_DEBUG
/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 2f1bfdc44f94..fd4565389c77 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev)
if (sprom[i] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- activelow = 0;
+ activelow = false;
switch (i) {
case 0:
behaviour = B43legacy_LED_ACTIVITY;
- activelow = 1;
+ activelow = true;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
behaviour = B43legacy_LED_RADIO_ALL;
break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 20f02437af8c..75e70bce40f6 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
macctl &= ~B43legacy_MACCTL_GMODE;
if (flags & B43legacy_TMSLOW_GMODE) {
macctl |= B43legacy_MACCTL_GMODE;
- dev->phy.gmode = 1;
+ dev->phy.gmode = true;
} else
- dev->phy.gmode = 0;
+ dev->phy.gmode = false;
macctl |= B43legacy_MACCTL_IHR_ENABLED;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
}
@@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
if (dev->noisecalc.calculation_running)
return;
dev->noisecalc.channel_at_start = dev->phy.channel;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
dev->stats.link_noise = average;
drop_calculation:
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
b43legacy_power_saving_ctl_bits(dev, -1, -1);
}
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev)
b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
| B43legacy_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43legacy_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
* but we don't use that feature anyway. */
b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
&__b43legacy_ratetable[3]);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43legacy_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43legacy_upload_beacon0(dev);
b43legacy_upload_beacon1(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -2440,30 +2440,64 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
return err;
}
+static void b43legacy_tx_work(struct work_struct *work)
+{
+ struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
+ tx_work);
+ struct b43legacy_wldev *dev;
+ struct sk_buff *skb;
+ int queue_num;
+ int err = 0;
+
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ if (b43legacy_using_pio(dev))
+ err = b43legacy_pio_tx(dev, skb);
+ else
+ err = b43legacy_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = 1;
+ ieee80211_stop_queue(wl->hw, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+ if (unlikely(err))
+ dev_kfree_skb(skb); /* Drop it */
+ err = 0;
+ }
+
+ if (!err)
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
+
+ mutex_unlock(&wl->mutex);
+}
+
static void b43legacy_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- int err = -ENODEV;
- unsigned long flags;
- if (unlikely(!dev))
- goto out;
- if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED))
- goto out;
- /* DMA-TX is done without a global lock. */
- if (b43legacy_using_pio(dev)) {
- spin_lock_irqsave(&wl->irq_lock, flags);
- err = b43legacy_pio_tx(dev, skb);
- spin_unlock_irqrestore(&wl->irq_lock, flags);
- } else
- err = b43legacy_dma_tx(dev, skb);
-out:
- if (unlikely(err)) {
- /* Drop the packet. */
+ if (unlikely(skb->len < 2 + 2 + 6)) {
+ /* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
+ return;
}
+ B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags);
+
+ skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb->queue_mapping])
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ else
+ ieee80211_stop_queue(wl->hw, skb->queue_mapping);
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
@@ -2510,7 +2544,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl,
if (d->phy.possible_phymodes & phymode) {
/* Ok, this device supports the PHY-mode.
* Set the gmode bit. */
- *gmode = 1;
+ *gmode = true;
*dev = d;
return 0;
@@ -2546,7 +2580,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
struct b43legacy_wldev *uninitialized_var(up_dev);
struct b43legacy_wldev *down_dev;
int err;
- bool gmode = 0;
+ bool gmode = false;
int prev_status;
err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -2879,6 +2913,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
unsigned long flags;
+ int queue_num;
if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
return;
@@ -2898,11 +2933,16 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
/* Must unlock as it would otherwise deadlock. No races here.
* Cancel the possibly running self-rearming periodic work. */
cancel_delayed_work_sync(&dev->periodic_work);
+ cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
- ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num]))
+ dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ }
- b43legacy_mac_suspend(dev);
+b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
@@ -3044,12 +3084,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
phy->savedpctlreg = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
lo = phy->_lo_pairs;
if (lo)
@@ -3081,7 +3121,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
{
/* Flags */
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3187,9 +3227,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
phy->lofcal = 0xFFFF;
phy->initval = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
phy->antenna_diversity = 0xFFFF;
memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3355,7 +3395,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3389,7 +3429,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3413,10 +3453,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -3455,7 +3495,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
mutex_unlock(&wl->mutex);
}
@@ -3614,7 +3654,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
@@ -3705,7 +3745,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
(void (*)(unsigned long))b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
- wldev->__using_pio = 1;
+ wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
err = b43legacy_wireless_core_attach(wldev);
@@ -3748,6 +3788,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
struct ieee80211_hw *hw;
struct b43legacy_wl *wl;
int err = -ENOMEM;
+ int queue_num;
b43legacy_sprom_fixup(dev->bus);
@@ -3782,6 +3823,13 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
mutex_init(&wl->mutex);
INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
+ INIT_WORK(&wl->tx_work, b43legacy_tx_work);
+
+ /* Initialize queues and flags. */
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ skb_queue_head_init(&wl->tx_queue[queue_num]);
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
ssb_set_devtypedata(dev, wl);
b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n",
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 475eb14e665b..fcbafcd603cc 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev,
if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
break;
- phy->aci_enable = 1;
+ phy->aci_enable = true;
phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev,
if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
break;
- phy->aci_enable = 0;
+ phy->aci_enable = false;
phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
(phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- phy->aci_wlan_automatic = 0;
+ phy->aci_wlan_automatic = false;
switch (mode) {
case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
- phy->aci_wlan_automatic = 1;
+ phy->aci_wlan_automatic = true;
if (phy->aci_enable)
mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
else
@@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
currentmode);
if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
- phy->aci_enable = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_hw_rssi = false;
} else
b43legacy_radio_interference_mitigation_enable(dev, mode);
phy->interfmode = mode;
@@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
phy->radio_off_context.rfover);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
phy->radio_off_context.rfoverval);
- phy->radio_off_context.valid = 0;
+ phy->radio_off_context.valid = false;
}
channel = phy->channel;
err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
default:
B43legacy_BUG_ON(1);
}
- phy->radio_on = 1;
+ phy->radio_on = true;
}
void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
if (!force) {
phy->radio_off_context.rfover = rfover;
phy->radio_off_context.rfoverval = rfoverval;
- phy->radio_off_context.valid = 1;
+ phy->radio_off_context.valid = true;
}
b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
rfoverval & 0xFF73);
} else
b43legacy_phy_write(dev, 0x0015, 0xAA00);
- phy->radio_on = 0;
+ phy->radio_on = false;
b43legacydbg(dev->wl, "Radio initialized\n");
}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 2069fc8f7ad1..cd6375de2a60 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -3,9 +3,8 @@ config BRCMUTIL
config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
- depends on PCI
depends on MAC80211
- depends on BCMA=n
+ depends on BCMA
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
@@ -18,16 +17,26 @@ config BRCMSMAC
config BRCMFMAC
tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
- depends on MMC
depends on CFG80211
select BRCMUTIL
- select FW_LOADER
---help---
This module adds support for embedded wireless adapters based on
- Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's
- wireless extensions subsystem. If you choose to build a module,
+ Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least
+ one of the bus interface support. If you choose to build a module,
it'll be called brcmfmac.ko.
+config BRCMFMAC_SDIO
+ bool "SDIO bus interface support for FullMAC"
+ depends on MMC
+ depends on BRCMFMAC
+ select FW_LOADER
+ default y
+ ---help---
+ This option enables the SDIO bus interface support for Broadcom
+ FullMAC WLAN driver.
+ Say Y if you want to use brcmfmac for a compatible SDIO interface
+ wireless card.
+
config BRCMDBG
bool "Broadcom driver debug functions"
depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index b44e3094588a..9ca9ea1135ea 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -19,15 +19,16 @@ ccflags-y += \
-Idrivers/net/wireless/brcm80211/brcmfmac \
-Idrivers/net/wireless/brcm80211/include
-DHDOFILES = \
- wl_cfg80211.o \
- dhd_cdc.o \
- dhd_common.o \
- dhd_sdio.o \
- dhd_linux.o \
- bcmsdh.o \
- bcmsdh_sdmmc.o
-
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
-brcmfmac-objs += $(DHDOFILES)
+brcmfmac-objs += \
+ wl_cfg80211.o \
+ dhd_cdc.o \
+ dhd_common.o \
+ dhd_linux.o
+brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+ dhd_sdio.o \
+ bcmsdh.o \
+ bcmsdh_sdmmc.o \
+ sdio_chip.o
+
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644
index d7d3afd5a10f..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-/* firmware name */
-#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
-
-#endif /* _bcmchip_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 89ff94da556a..4bc8d251acf8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -31,7 +31,6 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include <soc.h>
-#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
@@ -51,12 +50,18 @@ static void brcmf_sdioh_irqhandler(struct sdio_func *func)
sdio_claim_host(func);
}
+/* dummy handler for SDIO function 2 interrupt */
+static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func)
+{
+}
+
int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(TRACE, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
+ sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler);
sdio_release_host(sdiodev->func[1]);
return 0;
@@ -67,6 +72,7 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(TRACE, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
+ sdio_release_irq(sdiodev->func[2]);
sdio_release_irq(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
@@ -222,19 +228,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
return sdiodev->regfail;
}
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+ uint flags, uint width, u32 *addr)
{
- int status;
- uint incr_fix;
- uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
@@ -247,29 +246,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
sdiodev->sbwad = bar0;
}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ if (width == 4)
+ *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ if (!err)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+ fn, addr, pkt);
+
+ return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pktq->qlen);
+
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
- status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
- fn, addr, width, nbytes, buf, pkt);
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+ pktq);
- return status;
+ return err;
}
int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ memcpy(mypkt->data, buf, nbytes);
+ err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
@@ -291,18 +375,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
- addr, width, nbytes, buf, pkt);
+ addr, pkt);
}
int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
u8 *buf, uint nbytes)
{
+ struct sk_buff *mypkt;
+ bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+ int err;
+
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
- (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
- addr, 4, nbytes, buf, NULL);
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, buf, nbytes);
+
+ err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+ SDIO_FUNC_1, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!err && !write)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
}
int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -333,7 +438,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->sbwad = SI_ENUM_BASE;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+ sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
if (!sdiodev->bus) {
brcmf_dbg(ERROR, "device attach failed\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index bbaeb2d5c93a..9b8c0ed833d4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -31,15 +31,15 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
-#include "dhd.h"
#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
+#include "dhd_bus.h"
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
#define DMA_ALIGN_MASK 0x03
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -47,6 +47,7 @@
/* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +205,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
+/* precondition: host controller is claimed */
static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
- uint write, uint func, uint addr,
- struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+ uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+ int err_ret = 0;
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (fifo) {
+ err_ret = sdio_readsb(sdiodev->func[func],
+ ((u8 *) (pkt->data)), addr, pktlen);
+ } else {
+ err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+ ((u8 *) (pkt->data)),
+ addr, pktlen);
+ }
+
+ return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
- struct sk_buff *pnext;
+ struct sk_buff *pkt;
brcmf_dbg(TRACE, "Enter\n");
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Claim host controller */
sdio_claim_host(sdiodev->func[func]);
- for (pnext = pkt; pnext; pnext = pnext->next) {
- uint pkt_len = pnext->len;
+
+ skb_queue_walk(pktq, pkt) {
+ uint pkt_len = pkt->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
- if ((write) && (!fifo)) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (write) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (fifo) {
- err_ret = sdio_readsb(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- } else {
- err_ret = sdio_memcpy_fromio(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- }
-
+ err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
if (err_ret) {
brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len);
}
-
if (!fifo)
addr += pkt_len;
- SGCount++;
+ SGCount++;
}
/* Release host controller */
@@ -270,91 +284,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
}
/*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case, it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
*/
int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint write, uint func, uint addr,
- uint reg_width, uint buflen_u, u8 *buffer,
struct sk_buff *pkt)
{
- int Status;
- struct sk_buff *mypkt = NULL;
+ int status;
+ uint pkt_len = pkt->len;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(TRACE, "Enter\n");
+ if (pkt == NULL)
+ return -EINVAL;
+
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Case 1: we don't have a packet. */
- if (pkt == NULL) {
- brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
- write ? "TX" : "RX", buflen_u);
- mypkt = brcmu_pkt_buf_get_skb(buflen_u);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- buflen_u);
- return -EIO;
- }
-
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, buffer, buflen_u);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
-
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(buffer, mypkt->data, buflen_u);
-
- brcmu_pkt_buf_free_skb(mypkt);
- } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
- /*
- * Case 2: We have a packet, but it is unaligned.
- * In this case, we cannot have a chain (pkt->next == NULL)
- */
- brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
- write ? "TX" : "RX", pkt->len);
- mypkt = brcmu_pkt_buf_get_skb(pkt->len);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- pkt->len);
- return -EIO;
- }
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, pkt->data, pkt->len);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(pkt->data, mypkt->data, mypkt->len);
+ pkt_len += 3;
+ pkt_len &= (uint)~3;
- brcmu_pkt_buf_free_skb(mypkt);
- } else { /* case 3: We have a packet and
- it is aligned. */
- brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
- write ? "Tx" : "Rx");
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, pkt);
+ status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
+ if (status) {
+ brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len, status);
+ } else {
+ brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len);
}
- return Status;
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ return status;
}
/* Read client card reg */
@@ -494,6 +462,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
{
int ret = 0;
struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(TRACE, "func->class=%x\n", func->class);
brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +474,26 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(ERROR, "card private drvdata occupied\n");
return -ENXIO;
}
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev)
+ if (!sdiodev) {
+ kfree(bus_if);
return -ENOMEM;
+ }
sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func;
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv = sdiodev;
+ bus_if->type = SDIO_BUS;
+ bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->card->dev, sdiodev);
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_packet_wait);
+ init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
}
@@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
return -ENODEV;
sdiodev->func[2] = func;
+ bus_if = sdiodev->bus_if;
+ sdiodev->dev = &func->dev;
+ dev_set_drvdata(&func->dev, bus_if);
+
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
ret = brcmf_sdio_probe(sdiodev);
}
@@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
+ struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
if (func->num == 2) {
- sdiodev = dev_get_drvdata(&func->card->dev);
+ bus_if = dev_get_drvdata(&func->dev);
+ sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL);
+ dev_set_drvdata(&func->dev, NULL);
+ kfree(bus_if);
kfree(sdiodev);
}
}
@@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
static int brcmf_sdio_suspend(struct device *dev)
{
mmc_pm_flag_t sdio_flags;
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
int ret = 0;
brcmf_dbg(TRACE, "\n");
- sdiodev = dev_get_drvdata(&func->card->dev);
-
atomic_set(&sdiodev->suspend, true);
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
static int brcmf_sdio_resume(struct device *dev)
{
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
- sdiodev = dev_get_drvdata(&func->card->dev);
brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
#endif /* CONFIG_PM_SLEEP */
};
-/* bus register interface */
-int brcmf_bus_register(void)
+static void __exit brcmf_sdio_exit(void)
{
brcmf_dbg(TRACE, "Enter\n");
- return sdio_register_driver(&brcmf_sdmmc_driver);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-void brcmf_bus_unregister(void)
+static int __init brcmf_sdio_init(void)
{
+ int ret;
+
brcmf_dbg(TRACE, "Enter\n");
- sdio_unregister_driver(&brcmf_sdmmc_driver);
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+ if (ret)
+ brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+ return ret;
}
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 4645766b4070..e58ea40a75b0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -87,7 +87,7 @@
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
-#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */
+#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -122,8 +122,6 @@
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
-#define BRCMF_DEL_IF -0xe
-#define BRCMF_BAD_IF -0xf
#define DOT11_BSSTYPE_ANY 2
#define DOT11_MAX_DEFAULT_KEYS 4
@@ -158,18 +156,6 @@ struct brcmf_event {
struct brcmf_event_msg msg;
} __packed;
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
/* event codes sent by the dongle to this driver */
#define BRCMF_E_SET_SSID 0
#define BRCMF_E_JOIN 1
@@ -319,13 +305,6 @@ struct dngl_stats {
#define BRCMF_E_LINK_ASSOC_REC 3
#define BRCMF_E_LINK_BSSCFG_DIS 4
-/* The level of bus communication with the dongle */
-enum brcmf_bus_state {
- BRCMF_BUS_DOWN, /* Not ready for frame transfers */
- BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
- BRCMF_BUS_DATA /* Ready for frame transfers */
-};
-
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -365,7 +344,7 @@ struct brcmf_pkt_filter_enable_le {
* Applications MUST CHECK ie_offset field and length field to access IEs and
* next bss_info structure in a vector (in struct brcmf_scan_results)
*/
-struct brcmf_bss_info {
+struct brcmf_bss_info_le {
__le32 version; /* version field */
__le32 length; /* byte length of data in this record,
* starting at version and including IEs
@@ -466,14 +445,13 @@ struct brcmf_scan_results {
u32 buflen;
u32 version;
u32 count;
- struct brcmf_bss_info bss_info[1];
+ struct brcmf_bss_info_le bss_info_le[];
};
struct brcmf_scan_results_le {
__le32 buflen;
__le32 version;
__le32 count;
- struct brcmf_bss_info bss_info[1];
};
/* used for association with a specific BSSID and chanspec list */
@@ -493,10 +471,6 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
-/* size of brcmf_scan_results not including variable length array */
-#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
- (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
-
/* incremental scan results struct */
struct brcmf_iscan_results {
union {
@@ -511,7 +485,7 @@ struct brcmf_iscan_results {
/* size of brcmf_iscan_results not including variable length array */
#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
- (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+ (sizeof(struct brcmf_scan_results) + \
offsetof(struct brcmf_iscan_results, results))
struct brcmf_wsec_key {
@@ -579,25 +553,19 @@ struct brcmf_dcmd {
};
/* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus; /* device bus info */
struct brcmf_proto; /* device communication protocol info */
-struct brcmf_info; /* device driver info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
/* Common structure for module and instance linkage */
struct brcmf_pub {
/* Linkage ponters */
- struct brcmf_bus *bus;
+ struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
- struct brcmf_info *info;
struct brcmf_cfg80211_dev *config;
+ struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
- bool up; /* Driver up/down (to OS) */
- bool txoff; /* Transmit flow-controlled */
- enum brcmf_bus_state busstate;
uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
u8 wme_dp; /* wme discard priority */
@@ -605,48 +573,21 @@ struct brcmf_pub {
bool iswl; /* Dongle-resident driver is wl */
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- struct dngl_stats dstats; /* Stats for dongle-based data */
/* Additional stats for the bus level */
- /* Data packets sent to dongle */
- unsigned long tx_packets;
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
- /* Errors in sending data to dongle */
- unsigned long tx_errors;
- /* Control packets sent to dongle */
- unsigned long tx_ctlpkts;
- /* Errors sending control frames to dongle */
- unsigned long tx_ctlerrs;
- /* Packets sent up the network interface */
- unsigned long rx_packets;
- /* Multicast packets sent up the network interface */
- unsigned long rx_multicast;
- /* Errors processing rx data packets */
- unsigned long rx_errors;
- /* Control frames processed from dongle */
- unsigned long rx_ctlpkts;
-
- /* Errors in processing rx control frames */
- unsigned long rx_ctlerrs;
- /* Packets dropped locally (no memory) */
- unsigned long rx_dropped;
/* Packets flushed due to unscheduled sendup thread */
unsigned long rx_flushed;
/* Number of times dpc scheduled by watchdog timer */
unsigned long wd_dpc_sched;
- /* Number of packets where header read-ahead was used. */
- unsigned long rx_readahead_cnt;
- /* Number of tx packets we had to realloc for headroom */
- unsigned long tx_realloc;
/* Number of flow control pkts recvd */
unsigned long fc_packets;
/* Last error return */
int bcmerror;
- uint tickcnt;
/* Last error from dongle */
int dongle_error;
@@ -664,6 +605,14 @@ struct brcmf_pub {
u8 country_code[BRCM_CNTRY_BUF_SZ];
char eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_if *iflist[BRCMF_MAX_IFS];
+
+ struct mutex proto_block;
+
+ struct work_struct setmacaddr_work;
+ struct work_struct multicast_work;
+ u8 macvalue[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
};
struct brcmf_if_event {
@@ -683,67 +632,33 @@ extern const struct bcmevent_name bcmevent_names[];
extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
char *buf, uint len);
-/* Indication from bus module regarding presence/insertion of dongle.
- * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
- * Returned structure should have bus and prot pointers filled in.
- * bus_hdrlen specifies required headroom for bus module header.
- */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
- uint bus_hdrlen);
extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
-/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct brcmf_pub *drvr);
-
-/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
-
-extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
- struct sk_buff *pkt, int prec);
-
-/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
- struct sk_buff *rxp, int numpkt);
-
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-/* Notify tx completion */
-extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
-
/* Query dongle */
extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
-/* OS independent layer functions */
-extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
-extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
#ifdef BCMDBG
extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
#endif /* BCMDBG */
-extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
-extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
+extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
+extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
void *pktdata, struct brcmf_event_msg *,
void **data_ptr);
-extern void brcmf_c_init(void);
-
-extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
- struct net_device *ndev, char *name, u8 *mac_addr,
- u32 flags, u8 bssidx);
-extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
+extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
/* Send packet to dongle via data channel */
extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
struct sk_buff *pkt);
-extern int brcmf_bus_start(struct brcmf_pub *drvr);
-
extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
int enable, int master_mode);
@@ -752,25 +667,4 @@ extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
-/* message levels */
-#define BRCMF_ERROR_VAL 0x0001
-#define BRCMF_TRACE_VAL 0x0002
-#define BRCMF_INFO_VAL 0x0004
-#define BRCMF_DATA_VAL 0x0008
-#define BRCMF_CTL_VAL 0x0010
-#define BRCMF_TIMER_VAL 0x0020
-#define BRCMF_HDRS_VAL 0x0040
-#define BRCMF_BYTES_VAL 0x0080
-#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0400
-#define BRCMF_EVENT_VAL 0x0800
-#define BRCMF_BTA_VAL 0x1000
-#define BRCMF_ISCAN_VAL 0x2000
-
-/* Enter idle immediately (no timeout) */
-#define BRCMF_IDLE_IMMEDIATE (-1)
-#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
- when idle */
-#define BRCMF_IDLE_INTERVAL 1
-
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a249407c9a1b..ad9be2410b59 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,41 +17,89 @@
#ifndef _BRCMF_BUS_H_
#define _BRCMF_BUS_H_
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#define BRCMF_SDALIGN (1 << 6)
+/* The level of bus communication with the dongle */
+enum brcmf_bus_state {
+ BRCMF_BUS_DOWN, /* Not ready for frame transfers */
+ BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
+ BRCMF_BUS_DATA /* Ready for frame transfers */
+};
-/* watchdog polling interval in ms */
-#define BRCMF_WD_POLL_MS 10
+struct dngl_stats {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+};
+
+/* interface structure between common and bus layer */
+struct brcmf_bus {
+ u8 type; /* bus type */
+ void *bus_priv; /* pointer to bus private structure */
+ void *drvr; /* pointer to driver pub structure brcmf_pub */
+ enum brcmf_bus_state state;
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ bool drvr_up; /* Status flag of driver up/down */
+ unsigned long tx_realloc; /* Tx packets realloced for headroom */
+ struct dngl_stats dstats; /* Stats for dongle-based data */
+ u8 align; /* bus alignment requirement */
+
+ /* interface functions pointers */
+ /* Stop bus module: clear pending frames, disable data flow */
+ void (*brcmf_bus_stop)(struct device *);
+ /* Initialize bus module: prepare for communication w/dongle */
+ int (*brcmf_bus_init)(struct device *);
+ /* Send a data frame to the dongle. Callee disposes of txp. */
+ int (*brcmf_bus_txdata)(struct device *, struct sk_buff *);
+ /* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+ int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint);
+ int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint);
+};
/*
- * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
+ * interface functions from common layer
*/
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
+ struct sk_buff *rxp);
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
+extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
+ struct sk_buff *pkt, int prec);
-/* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void brcmf_rx_frame(struct device *dev, int ifidx,
+ struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct device *dev, int ifidx,
+ struct sk_buff *pkt)
+{
+ struct sk_buff_head q;
-/* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+ skb_queue_head_init(&q);
+ skb_queue_tail(&q, pkt);
+ brcmf_rx_frame(dev, ifidx, &q);
+}
-/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+/* Indication from bus module regarding presence/insertion of dongle. */
+extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+/* Indication from bus module regarding removal/absence of dongle */
+extern void brcmf_detach(struct device *dev);
-/* Send/receive a control message to/from the dongle.
- * Expects caller to enforce a single outstanding transaction.
- */
-extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Indication from bus module to change flow-control state */
+extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
-extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Notify tx completion */
+extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
+ bool success);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern int brcmf_bus_start(struct device *dev);
+extern int brcmf_add_if(struct device *dev, int ifidx,
+ char *name, u8 *mac_addr);
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index e34c5c3d1d55..ac8d1f437888 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
* Used on data packets to convey priority across USB.
*/
#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 1 /* Protocol version */
+#define BDC_PROTO_VER 2 /* Protocol version */
#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
@@ -77,18 +77,19 @@ struct brcmf_proto_bdc_header {
u8 flags;
u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
u8 flags2;
- u8 rssi;
+ u8 data_offset;
};
#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
+#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
* (amount of header tha might be added)
* plus any space that might be needed
- * for alignment padding.
+ * for bus alignment padding.
*/
-#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for
* round off at the end of buffer
+ * Currently is SDIO
*/
struct brcmf_proto {
@@ -116,8 +117,9 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
len = CDC_MAX_MSG_SIZE;
/* Send request */
- return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
- len);
+ return drvr->bus_if->brcmf_bus_txctl(drvr->dev,
+ (unsigned char *)&prot->msg,
+ len);
}
static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -128,7 +130,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
brcmf_dbg(TRACE, "Enter\n");
do {
- ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+ ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev,
(unsigned char *)&prot->msg,
len + sizeof(struct brcmf_proto_cdc_dcmd));
if (ret < 0)
@@ -280,11 +282,11 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
struct brcmf_proto *prot = drvr->prot;
int ret = -1;
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
return ret;
}
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
brcmf_dbg(TRACE, "Enter\n");
@@ -338,7 +340,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
prot->pending = false;
done:
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return ret;
}
@@ -372,14 +374,16 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
- h->rssi = 0;
+ h->data_offset = 0;
BDC_SET_IF_IDX(h, ifidx);
}
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
+int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
@@ -435,7 +439,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
drvr->prot = cdc;
drvr->hdrlen += BDC_HEADER_LEN;
- drvr->maxctl = BRCMF_DCMD_MAXLEN +
+ drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
return 0;
@@ -451,18 +455,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr)
drvr->prot = NULL;
}
-void brcmf_proto_dstats(struct brcmf_pub *drvr)
-{
- /* No stats from dongle added yet, copy bus stats */
- drvr->dstats.tx_packets = drvr->tx_packets;
- drvr->dstats.tx_errors = drvr->tx_errors;
- drvr->dstats.rx_packets = drvr->rx_packets;
- drvr->dstats.rx_errors = drvr->rx_errors;
- drvr->dstats.rx_dropped = drvr->rx_dropped;
- drvr->dstats.multicast = drvr->rx_multicast;
- return;
-}
-
int brcmf_proto_init(struct brcmf_pub *drvr)
{
int ret = 0;
@@ -470,19 +462,19 @@ int brcmf_proto_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
/* Get the device MAC address */
strcpy(buf, "cur_etheraddr");
ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
buf, sizeof(buf));
if (ret < 0) {
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return ret;
}
memcpy(drvr->mac, buf, ETH_ALEN);
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
ret = brcmf_c_preinit_dcmds(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 891826197f96..a51d8f5d36fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,8 +32,6 @@
#define PKTFILTER_BUF_SIZE 2048
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
-int brcmf_msg_level;
-
#define MSGTRACE_VERSION 1
#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,25 +83,14 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
-void brcmf_c_init(void)
-{
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behaviour since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
-bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
{
struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
bool discard_oldest;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
/* Fast case, precedence queue is not full and we are also not
* exceeding total queue length
@@ -446,7 +433,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
#endif /* BCMDBG */
int
-brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
struct brcmf_event_msg *event, void **data_ptr)
{
/* check whether packet is a BRCM event pkt */
@@ -488,19 +475,18 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
if (ifevent->action == BRCMF_E_IF_ADD)
- brcmf_add_if(drvr_priv, ifevent->ifidx, NULL,
+ brcmf_add_if(drvr->dev, ifevent->ifidx,
event->ifname,
- pvt_data->eth.h_dest,
- ifevent->flags, ifevent->bssidx);
+ pvt_data->eth.h_dest);
else
- brcmf_del_if(drvr_priv, ifevent->ifidx);
+ brcmf_del_if(drvr, ifevent->ifidx);
} else {
brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
ifevent->ifidx, event->ifname);
}
/* send up the if event: btamp user needs it */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ *ifidx = brcmf_ifname2idx(drvr, event->ifname);
break;
/* These are what external supplicant/authenticator wants */
@@ -512,7 +498,7 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
default:
/* Fall through: this should get _everything_ */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ *ifidx = brcmf_ifname2idx(drvr, event->ifname);
brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
type, flags, status);
@@ -812,7 +798,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
"event_msgs" + '\0' + bitvec */
uint up = 0;
char buf[128], *ptr;
- u32 dongle_align = BRCMF_SDALIGN;
+ u32 dongle_align = drvr->bus_if->align;
u32 glom = 0;
u32 roaming = 1;
uint bcn_timeout = 3;
@@ -820,7 +806,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
int scan_unassoc_time = 40;
int i;
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
/* Set Country code */
if (drvr->country_code[0] != 0) {
@@ -889,7 +875,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
0, true);
}
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 7467922f0536..bb26ee36bc68 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -17,6 +17,21 @@
#ifndef _BRCMF_DBG_H_
#define _BRCMF_DBG_H_
+/* message levels */
+#define BRCMF_ERROR_VAL 0x0001
+#define BRCMF_TRACE_VAL 0x0002
+#define BRCMF_INFO_VAL 0x0004
+#define BRCMF_DATA_VAL 0x0008
+#define BRCMF_CTL_VAL 0x0010
+#define BRCMF_TIMER_VAL 0x0020
+#define BRCMF_HDRS_VAL 0x0040
+#define BRCMF_BYTES_VAL 0x0080
+#define BRCMF_INTR_VAL 0x0100
+#define BRCMF_GLOM_VAL 0x0400
+#define BRCMF_EVENT_VAL 0x0800
+#define BRCMF_BTA_VAL 0x1000
+#define BRCMF_ISCAN_VAL 0x2000
+
#if defined(BCMDBG)
#define brcmf_dbg(level, fmt, ...) \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 4acbac5a74c6..eb9eb766ac27 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -43,7 +43,6 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "wl_cfg80211.h"
-#include "bcmchip.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -53,48 +52,19 @@ MODULE_LICENSE("Dual BSD/GPL");
/* Interface control information */
struct brcmf_if {
- struct brcmf_info *info; /* back pointer to brcmf_info */
+ struct brcmf_pub *drvr; /* back pointer to brcmf_pub */
/* OS/stack specifics */
struct net_device *ndev;
struct net_device_stats stats;
int idx; /* iface idx in dongle */
- int state; /* interface state */
u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
};
-/* Local private structure (extension of pub) */
-struct brcmf_info {
- struct brcmf_pub pub;
-
- /* OS/stack specifics */
- struct brcmf_if *iflist[BRCMF_MAX_IFS];
-
- struct mutex proto_block;
-
- struct work_struct setmacaddr_work;
- struct work_struct multicast_work;
- u8 macvalue[ETH_ALEN];
- atomic_t pend_8021x_cnt;
-};
-
/* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
module_param(brcmf_msg_level, int, 0);
-
-static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
-{
- int i = 0;
-
- while (i < BRCMF_MAX_IFS) {
- if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
- return i;
- i++;
- }
-
- return BRCMF_BAD_IF;
-}
-
-int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
+int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
{
int i = BRCMF_MAX_IFS;
struct brcmf_if *ifp;
@@ -103,7 +73,7 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
return 0;
while (--i > 0) {
- ifp = drvr_priv->iflist[i];
+ ifp = drvr->iflist[i];
if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
break;
}
@@ -115,20 +85,18 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
- struct brcmf_info *drvr_priv = drvr->info;
-
if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
return "<if_bad>";
}
- if (drvr_priv->iflist[ifidx] == NULL) {
+ if (drvr->iflist[ifidx] == NULL) {
brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
return "<if_null>";
}
- if (drvr_priv->iflist[ifidx]->ndev)
- return drvr_priv->iflist[ifidx]->ndev->name;
+ if (drvr->iflist[ifidx]->ndev)
+ return drvr->iflist[ifidx]->ndev->name;
return "<if_none>";
}
@@ -146,10 +114,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
uint buflen;
int ret;
- struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
multicast_work);
- ndev = drvr_priv->iflist[0]->ndev;
+ ndev = drvr->iflist[0]->ndev;
cnt = netdev_mc_count(ndev);
/* Determine initial value of allmulti flag */
@@ -183,10 +151,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = buflen;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
- brcmf_ifname(&drvr_priv->pub, 0), cnt);
+ brcmf_ifname(drvr, 0), cnt);
dcmd_value = cnt ? true : dcmd_value;
}
@@ -208,7 +176,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
("allmulti", (void *)&dcmd_le_value,
sizeof(dcmd_le_value), buf, buflen)) {
brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
(int)sizeof(dcmd_value), buflen);
kfree(buf);
return;
@@ -220,10 +188,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = buflen;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
le32_to_cpu(dcmd_le_value));
}
@@ -241,10 +209,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = sizeof(dcmd_le_value);
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
le32_to_cpu(dcmd_le_value));
}
}
@@ -256,14 +224,14 @@ _brcmf_set_mac_address(struct work_struct *work)
struct brcmf_dcmd dcmd;
int ret;
- struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
setmacaddr_work);
brcmf_dbg(TRACE, "enter\n");
- if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue,
+ if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
ETH_ALEN, buf, 32)) {
brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
- brcmf_ifname(&drvr_priv->pub, 0));
+ brcmf_ifname(drvr, 0));
return;
}
memset(&dcmd, 0, sizeof(dcmd));
@@ -272,52 +240,40 @@ _brcmf_set_mac_address(struct work_struct *work)
dcmd.len = 32;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0)
brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
- brcmf_ifname(&drvr_priv->pub, 0));
+ brcmf_ifname(drvr, 0));
else
- memcpy(drvr_priv->iflist[0]->ndev->dev_addr,
- drvr_priv->macvalue, ETH_ALEN);
+ memcpy(drvr->iflist[0]->ndev->dev_addr,
+ drvr->macvalue, ETH_ALEN);
return;
}
static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct sockaddr *sa = (struct sockaddr *)addr;
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return -1;
- memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
- schedule_work(&drvr_priv->setmacaddr_work);
+ memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN);
+ schedule_work(&drvr->setmacaddr_work);
return 0;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
- schedule_work(&drvr_priv->multicast_work);
+ schedule_work(&drvr->multicast_work);
}
int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
{
- struct brcmf_info *drvr_priv = drvr->info;
-
/* Reject if down */
- if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+ if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
return -ENODEV;
/* Update multicast statistic */
@@ -328,122 +284,118 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
if (is_multicast_ether_addr(eh->h_dest))
drvr->tx_multicast++;
if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr_priv->pend_8021x_cnt);
+ atomic_inc(&drvr->pend_8021x_cnt);
}
/* If the protocol uses a data header, apply it */
brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
/* Use bus module to send data frame */
- return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+ return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
}
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int ret;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
/* Reject if down */
- if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
- brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
- drvr_priv->pub.up, drvr_priv->pub.busstate);
+ if (!drvr->bus_if->drvr_up ||
+ (drvr->bus_if->state == BRCMF_BUS_DOWN)) {
+ brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n",
+ drvr->bus_if->drvr_up,
+ drvr->bus_if->state);
netif_stop_queue(ndev);
return -ENODEV;
}
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF) {
- brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+ if (!drvr->iflist[ifp->idx]) {
+ brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx);
netif_stop_queue(ndev);
return -ENODEV;
}
/* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
+ if (skb_headroom(skb) < drvr->hdrlen) {
struct sk_buff *skb2;
brcmf_dbg(INFO, "%s: insufficient headroom\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
- drvr_priv->pub.tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
+ brcmf_ifname(drvr, ifp->idx));
+ drvr->bus_if->tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
+ brcmf_ifname(drvr, ifp->idx));
ret = -ENOMEM;
goto done;
}
}
- ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+ ret = brcmf_sendpkt(drvr, ifp->idx, skb);
done:
if (ret)
- drvr_priv->pub.dstats.tx_dropped++;
+ drvr->bus_if->dstats.tx_dropped++;
else
- drvr_priv->pub.tx_packets++;
+ drvr->bus_if->dstats.tx_packets++;
/* Return ok: we always eat the packet */
return 0;
}
-void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
+void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
{
struct net_device *ndev;
- struct brcmf_info *drvr_priv = drvr->info;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- drvr->txoff = state;
- ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev = drvr->iflist[ifidx]->ndev;
if (state == ON)
netif_stop_queue(ndev);
else
netif_wake_queue(ndev);
}
-static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
+static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
void *pktdata, struct brcmf_event_msg *event,
void **data)
{
int bcmerror = 0;
- bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
+ bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
if (bcmerror != 0)
return bcmerror;
- if (drvr_priv->iflist[*ifidx]->ndev)
- brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev,
+ if (drvr->iflist[*ifidx]->ndev)
+ brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
event, *data);
return bcmerror;
}
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
- int numpkt)
+void brcmf_rx_frame(struct device *dev, int ifidx,
+ struct sk_buff_head *skb_list)
{
- struct brcmf_info *drvr_priv = drvr->info;
unsigned char *eth;
uint len;
void *data;
- struct sk_buff *pnext, *save_pktbuf;
- int i;
+ struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_event_msg event;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- save_pktbuf = skb;
-
- for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
- pnext = skb->next;
- skb->next = NULL;
+ skb_queue_walk_safe(skb_list, skb, pnext) {
+ skb_unlink(skb, skb_list);
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
@@ -460,15 +412,21 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
eth = skb->data;
len = skb->len;
- ifp = drvr_priv->iflist[ifidx];
+ ifp = drvr->iflist[ifidx];
if (ifp == NULL)
- ifp = drvr_priv->iflist[0];
+ ifp = drvr->iflist[0];
+
+ if (!ifp || !ifp->ndev ||
+ ifp->ndev->reg_state != NETREG_REGISTERED) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- drvr_priv->pub.rx_multicast++;
+ bus_if->dstats.multicast++;
skb->data = eth;
skb->len = len;
@@ -478,19 +436,17 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
/* Process special event packets and then discard them */
if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
- brcmf_host_event(drvr_priv, &ifidx,
+ brcmf_host_event(drvr, &ifidx,
skb_mac_header(skb),
&event, &data);
- if (drvr_priv->iflist[ifidx] &&
- !drvr_priv->iflist[ifidx]->state)
- ifp = drvr_priv->iflist[ifidx];
-
- if (ifp->ndev)
+ if (drvr->iflist[ifidx]) {
+ ifp = drvr->iflist[ifidx];
ifp->ndev->last_rx = jiffies;
+ }
- drvr->dstats.rx_bytes += skb->len;
- drvr->rx_packets++; /* Local count */
+ bus_if->dstats.rx_bytes += skb->len;
+ bus_if->dstats.rx_packets++; /* Local count */
if (in_interrupt())
netif_rx(skb);
@@ -505,59 +461,48 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
}
}
-void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
uint ifidx;
- struct brcmf_info *drvr_priv = drvr->info;
struct ethhdr *eh;
u16 type;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_proto_hdrpull(drvr, &ifidx, txp);
+ brcmf_proto_hdrpull(dev, &ifidx, txp);
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE)
- atomic_dec(&drvr_priv->pend_8021x_cnt);
+ atomic_dec(&drvr->pend_8021x_cnt);
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- struct brcmf_if *ifp;
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_bus *bus_if = ifp->drvr->bus_if;
brcmf_dbg(TRACE, "Enter\n");
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return NULL;
-
- ifp = drvr_priv->iflist[ifidx];
-
- if (drvr_priv->pub.up)
- /* Use the protocol to get dongle stats */
- brcmf_proto_dstats(&drvr_priv->pub);
-
/* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
- ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
- ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
- ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
- ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
- ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
- ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
- ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
- ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
+ ifp->stats.rx_packets = bus_if->dstats.rx_packets;
+ ifp->stats.tx_packets = bus_if->dstats.tx_packets;
+ ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
+ ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
+ ifp->stats.rx_errors = bus_if->dstats.rx_errors;
+ ifp->stats.tx_errors = bus_if->dstats.tx_errors;
+ ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
+ ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
+ ifp->stats.multicast = bus_if->dstats.multicast;
return &ifp->stats;
}
/* Retrieve current toe component enables, which are kept
as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
+static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol)
{
struct brcmf_dcmd dcmd;
__le32 toe_le;
@@ -572,17 +517,17 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
dcmd.set = false;
strcpy(buf, "toe_ol");
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
/* Check for older dongle image that doesn't support toe_ol */
if (ret == -EIO) {
brcmf_dbg(ERROR, "%s: toe not supported by device\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
+ brcmf_ifname(drvr, ifidx));
return -EOPNOTSUPP;
}
brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -593,7 +538,7 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
/* Set current toe component enables in toe_ol iovar,
and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
+static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
{
struct brcmf_dcmd dcmd;
char buf[32];
@@ -611,10 +556,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
strcpy(buf, "toe_ol");
memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -624,10 +569,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
strcpy(buf, "toe");
memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -637,21 +582,19 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
sprintf(info->driver, KBUILD_MODNAME);
- sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
- sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
- sprintf(info->bus_info, "%s",
- dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+ sprintf(info->version, "%lu", drvr->drv_version);
+ sprintf(info->bus_info, "%s", dev_name(drvr->dev));
}
static struct ethtool_ops brcmf_ethtool_ops = {
.get_drvinfo = brcmf_ethtool_get_drvinfo
};
-static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
+static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
{
struct ethtool_drvinfo info;
char drvname[sizeof(info.driver)];
@@ -685,18 +628,18 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
}
/* otherwise, require dongle to be up */
- else if (!drvr_priv->pub.up) {
+ else if (!drvr->bus_if->drvr_up) {
brcmf_dbg(ERROR, "dongle is not up\n");
return -ENODEV;
}
/* finally, report dongle driver type */
- else if (drvr_priv->pub.iswl)
+ else if (drvr->iswl)
sprintf(info.driver, "wl");
else
sprintf(info.driver, "xx");
- sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
+ sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
brcmf_dbg(CTL, "given %*s, returning %s\n",
@@ -706,7 +649,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
/* Get toe offload components from dongle */
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -727,7 +670,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
return -EFAULT;
/* Read the current settings, update and write back */
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -739,17 +682,17 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
else
toe_cmpnt &= ~csum_dir;
- ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
+ ret = brcmf_toe_set(drvr, 0, toe_cmpnt);
if (ret < 0)
return ret;
/* If setting TX checksum mode, tell Linux the new mode */
if (cmd == ETHTOOL_STXCSUM) {
if (edata.data)
- drvr_priv->iflist[0]->ndev->features |=
+ drvr->iflist[0]->ndev->features |=
NETIF_F_IP_CSUM;
else
- drvr_priv->iflist[0]->ndev->features &=
+ drvr->iflist[0]->ndev->features &=
~NETIF_F_IP_CSUM;
}
@@ -765,18 +708,16 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
int cmd)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+ brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
- if (ifidx == BRCMF_BAD_IF)
+ if (!drvr->iflist[ifp->idx])
return -1;
if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(drvr_priv, ifr->ifr_data);
+ return brcmf_ethtool(drvr, ifr->ifr_data);
return -EOPNOTSUPP;
}
@@ -788,28 +729,25 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
s32 err = 0;
int buflen = 0;
bool is_set_key_cmd;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
memset(&dcmd, 0, sizeof(dcmd));
dcmd.cmd = cmd;
dcmd.buf = arg;
dcmd.len = len;
- ifidx = brcmf_net2idx(drvr_priv, ndev);
-
if (dcmd.buf != NULL)
buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
/* send to dongle (must be up, and wl) */
- if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
brcmf_dbg(ERROR, "DONGLE_DOWN\n");
err = -EIO;
goto done;
}
- if (!drvr_priv->pub.iswl) {
+ if (!drvr->iswl) {
err = -EIO;
goto done;
}
@@ -826,7 +764,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
if (is_set_key_cmd)
brcmf_netdev_wait_pend8021x(ndev);
- err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+ err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
done:
if (err > 0)
@@ -837,15 +775,16 @@ done:
static int brcmf_netdev_stop(struct net_device *ndev)
{
- struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
brcmf_cfg80211_down(drvr->config);
- if (drvr->up == 0)
+ if (drvr->bus_if->drvr_up == 0)
return 0;
/* Set state and stop OS transmissions */
- drvr->up = 0;
+ drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -853,39 +792,37 @@ static int brcmf_netdev_stop(struct net_device *ndev)
static int brcmf_netdev_open(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
u32 toe_ol;
- int ifidx = brcmf_net2idx(drvr_priv, ndev);
s32 ret = 0;
- brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
-
- if (ifidx == 0) { /* do it only for primary eth0 */
+ brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+ if (ifp->idx == 0) { /* do it only for primary eth0 */
/* try to bring up bus */
- ret = brcmf_bus_start(&drvr_priv->pub);
+ ret = brcmf_bus_start(drvr->dev);
if (ret != 0) {
brcmf_dbg(ERROR, "failed with code %d\n", ret);
return -1;
}
- atomic_set(&drvr_priv->pend_8021x_cnt, 0);
+ atomic_set(&drvr->pend_8021x_cnt, 0);
- memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
+ memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
/* Get current TOE mode from dongle */
- if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+ if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0
&& (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr_priv->iflist[ifidx]->ndev->features |=
+ drvr->iflist[ifp->idx]->ndev->features |=
NETIF_F_IP_CSUM;
else
- drvr_priv->iflist[ifidx]->ndev->features &=
+ drvr->iflist[ifp->idx]->ndev->features &=
~NETIF_F_IP_CSUM;
}
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr_priv->pub.up = 1;
- if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
+ drvr->bus_if->drvr_up = true;
+ if (brcmf_cfg80211_up(drvr->config)) {
brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
return -1;
}
@@ -893,193 +830,155 @@ static int brcmf_netdev_open(struct net_device *ndev)
return ret;
}
+static const struct net_device_ops brcmf_netdev_ops_pri = {
+ .ndo_open = brcmf_netdev_open,
+ .ndo_stop = brcmf_netdev_stop,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
int
-brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
- char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
- int ret = 0, err = 0;
+ struct net_device *ndev;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+ brcmf_dbg(TRACE, "idx %d\n", ifidx);
- ifp = drvr_priv->iflist[ifidx];
- if (!ifp) {
- ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
- if (!ifp)
- return -ENOMEM;
+ ifp = drvr->iflist[ifidx];
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the BRCMF_E_IF_DEL event.
+ */
+ if (ifp) {
+ brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+ ifp->ndev->name);
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
}
- memset(ifp, 0, sizeof(struct brcmf_if));
- ifp->info = drvr_priv;
- drvr_priv->iflist[ifidx] = ifp;
+ /* Allocate netdev, including space for private structure */
+ ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
+ if (!ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ return -ENOMEM;
+ }
+
+ ifp = netdev_priv(ndev);
+ ifp->ndev = ndev;
+ ifp->drvr = drvr;
+ drvr->iflist[ifidx] = ifp;
+ ifp->idx = ifidx;
if (mac_addr != NULL)
memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
- if (ndev == NULL) {
- ifp->state = BRCMF_E_IF_ADD;
- ifp->idx = ifidx;
- /*
- * Delete the existing interface before overwriting it
- * in case we missed the BRCMF_E_IF_DEL event.
- */
- if (ifp->ndev != NULL) {
- brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
- ifp->ndev->name);
- netif_stop_queue(ifp->ndev);
- unregister_netdev(ifp->ndev);
- free_netdev(ifp->ndev);
- }
-
- /* Allocate netdev, including space for private structure */
- ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
- ether_setup);
- if (!ifp->ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- ret = -ENOMEM;
- }
-
- if (ret == 0) {
- memcpy(netdev_priv(ifp->ndev), &drvr_priv,
- sizeof(drvr_priv));
- err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
- if (err != 0) {
- brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
- err);
- ret = -EOPNOTSUPP;
- } else {
- brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
- current->pid, ifp->ndev->name);
- ifp->state = 0;
- }
- }
-
- if (ret < 0) {
- if (ifp->ndev)
- free_netdev(ifp->ndev);
+ if (brcmf_net_attach(drvr, ifp->idx)) {
+ brcmf_dbg(ERROR, "brcmf_net_attach failed");
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ return -EOPNOTSUPP;
+ }
- drvr_priv->iflist[ifp->idx] = NULL;
- kfree(ifp);
- }
- } else
- ifp->ndev = ndev;
+ brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+ current->pid, ifp->ndev->name);
return 0;
}
-void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
{
struct brcmf_if *ifp;
brcmf_dbg(TRACE, "idx %d\n", ifidx);
- ifp = drvr_priv->iflist[ifidx];
+ ifp = drvr->iflist[ifidx];
if (!ifp) {
brcmf_dbg(ERROR, "Null interface\n");
return;
}
+ if (ifp->ndev) {
+ if (ifidx == 0) {
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ rtnl_lock();
+ brcmf_netdev_stop(ifp->ndev);
+ rtnl_unlock();
+ }
+ } else {
+ netif_stop_queue(ifp->ndev);
+ }
- ifp->state = BRCMF_E_IF_DEL;
- ifp->idx = ifidx;
- if (ifp->ndev != NULL) {
- netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ if (ifidx == 0)
+ brcmf_cfg80211_detach(drvr->config);
free_netdev(ifp->ndev);
- drvr_priv->iflist[ifidx] = NULL;
- kfree(ifp);
}
}
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+int brcmf_attach(uint bus_hdrlen, struct device *dev)
{
- struct brcmf_info *drvr_priv = NULL;
- struct net_device *ndev;
+ struct brcmf_pub *drvr = NULL;
+ int ret = 0;
brcmf_dbg(TRACE, "Enter\n");
- /* Allocate netdev, including space for private structure */
- ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
- if (!ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- goto fail;
- }
-
/* Allocate primary brcmf_info */
- drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
- if (!drvr_priv)
- goto fail;
-
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
- if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
- BRCMF_BAD_IF)
- goto fail;
-
- ndev->netdev_ops = NULL;
- mutex_init(&drvr_priv->proto_block);
+ drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
+ if (!drvr)
+ return -ENOMEM;
- /* Link to info module */
- drvr_priv->pub.info = drvr_priv;
+ mutex_init(&drvr->proto_block);
/* Link to bus module */
- drvr_priv->pub.bus = bus;
- drvr_priv->pub.hdrlen = bus_hdrlen;
+ drvr->hdrlen = bus_hdrlen;
+ drvr->bus_if = dev_get_drvdata(dev);
+ drvr->bus_if->drvr = drvr;
+ drvr->dev = dev;
/* Attach and link in the protocol */
- if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+ ret = brcmf_proto_attach(drvr);
+ if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
goto fail;
}
- /* Attach and link in the cfg80211 */
- drvr_priv->pub.config =
- brcmf_cfg80211_attach(ndev,
- brcmf_bus_get_device(bus),
- &drvr_priv->pub);
- if (drvr_priv->pub.config == NULL) {
- brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
- goto fail;
- }
-
- INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
+ INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
- return &drvr_priv->pub;
+ return ret;
fail:
- if (ndev)
- free_netdev(ndev);
- if (drvr_priv)
- brcmf_detach(&drvr_priv->pub);
+ brcmf_detach(dev);
- return NULL;
+ return ret;
}
-int brcmf_bus_start(struct brcmf_pub *drvr)
+int brcmf_bus_start(struct device *dev)
{
int ret = -1;
- struct brcmf_info *drvr_priv = drvr->info;
/* Room for "event_msgs" + '\0' + bitvec */
char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "\n");
/* Bring up the bus */
- ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+ ret = bus_if->brcmf_bus_init(dev);
if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
return ret;
}
/* If bus is not ready, can't come up */
- if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ if (bus_if->state != BRCMF_BUS_DATA) {
brcmf_dbg(ERROR, "failed bus is not ready\n");
return -ENODEV;
}
@@ -1116,33 +1015,22 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
/* Bus is ready, do any protocol initialization */
- ret = brcmf_proto_init(&drvr_priv->pub);
+ ret = brcmf_proto_init(drvr);
if (ret < 0)
return ret;
return 0;
}
-static struct net_device_ops brcmf_netdev_ops_pri = {
- .ndo_open = brcmf_netdev_open,
- .ndo_stop = brcmf_netdev_stop,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
{
- struct brcmf_info *drvr_priv = drvr->info;
struct net_device *ndev;
u8 temp_addr[ETH_ALEN] = {
0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
- ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev = drvr->iflist[ifidx]->ndev;
ndev->netdev_ops = &brcmf_netdev_ops_pri;
/*
@@ -1150,7 +1038,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
*/
if (ifidx != 0) {
/* for virtual interfaces use the primary MAC */
- memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
+ memcpy(temp_addr, drvr->mac, ETH_ALEN);
}
@@ -1161,14 +1049,23 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
- Locally Administered address */
}
- ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+ ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len +
- drvr_priv->pub.hdrlen;
+ drvr->rxsz = ndev->mtu + ndev->hard_header_len +
+ drvr->hdrlen;
memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
+ /* attach to cfg80211 for primary interface */
+ if (!ifidx) {
+ drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
+ if (drvr->config == NULL) {
+ brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+ goto fail;
+ }
+ }
+
if (register_netdev(ndev) != 0) {
brcmf_dbg(ERROR, "couldn't register the net device\n");
goto fail;
@@ -1185,127 +1082,57 @@ fail:
static void brcmf_bus_detach(struct brcmf_pub *drvr)
{
- struct brcmf_info *drvr_priv;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- /* Stop the protocol module */
- brcmf_proto_stop(&drvr_priv->pub);
-
- /* Stop the bus module */
- brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
- }
- }
-}
-
-void brcmf_detach(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv;
-
brcmf_dbg(TRACE, "Enter\n");
if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- struct brcmf_if *ifp;
- int i;
-
- for (i = 1; i < BRCMF_MAX_IFS; i++)
- if (drvr_priv->iflist[i])
- brcmf_del_if(drvr_priv, i);
-
- ifp = drvr_priv->iflist[0];
- if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
- rtnl_lock();
- brcmf_netdev_stop(ifp->ndev);
- rtnl_unlock();
- unregister_netdev(ifp->ndev);
- }
-
- cancel_work_sync(&drvr_priv->setmacaddr_work);
- cancel_work_sync(&drvr_priv->multicast_work);
-
- brcmf_bus_detach(drvr);
-
- if (drvr->prot)
- brcmf_proto_detach(drvr);
-
- brcmf_cfg80211_detach(drvr->config);
+ /* Stop the protocol module */
+ brcmf_proto_stop(drvr);
- free_netdev(ifp->ndev);
- kfree(ifp);
- kfree(drvr_priv);
- }
+ /* Stop the bus module */
+ drvr->bus_if->brcmf_bus_stop(drvr->dev);
}
}
-static void __exit brcmf_module_cleanup(void)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
+void brcmf_detach(struct device *dev)
{
- int error;
+ int i;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- error = brcmf_bus_register();
-
- if (error) {
- brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
- goto failed;
- }
- return 0;
-
-failed:
- return -EINVAL;
-}
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
+ /* make sure primary interface removed last */
+ for (i = BRCMF_MAX_IFS-1; i > -1; i--)
+ if (drvr->iflist[i])
+ brcmf_del_if(drvr, i);
-int brcmf_os_proto_block(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (drvr_priv) {
- mutex_lock(&drvr_priv->proto_block);
- return 1;
- }
- return 0;
-}
+ cancel_work_sync(&drvr->setmacaddr_work);
+ cancel_work_sync(&drvr->multicast_work);
-int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
+ brcmf_bus_detach(drvr);
- if (drvr_priv) {
- mutex_unlock(&drvr_priv->proto_block);
- return 1;
- }
+ if (drvr->prot)
+ brcmf_proto_detach(drvr);
- return 0;
+ bus_if->drvr = NULL;
+ kfree(drvr);
}
-static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
{
- return atomic_read(&drvr_priv->pend_8021x_cnt);
+ return atomic_read(&drvr->pend_8021x_cnt);
}
#define MAX_WAIT_FOR_8021X_TX 10
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
int timeout = 10 * HZ / 1000;
int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ int pend = brcmf_get_pend_8021x_cnt(drvr);
while (ntimes && pend) {
if (pend) {
@@ -1314,7 +1141,7 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
set_current_state(TASK_RUNNING);
ntimes--;
}
- pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ pend = brcmf_get_pend_8021x_cnt(drvr);
}
return pend;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 4ee1ea846f6d..6bc4425a8b0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -41,17 +41,10 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
struct sk_buff *txp);
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
- struct sk_buff *rxp);
-
/* Use protocol to issue command to dongle */
extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
struct brcmf_dcmd *dcmd, int len);
-/* Update local copy of dongle statistics */
-extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
-
extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 313b8bf592d1..f7eeee1dcdb6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -28,6 +28,7 @@
#include <linux/semaphore.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -35,6 +36,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -85,11 +87,8 @@ struct rte_console {
#endif /* BCMDBG */
#include <chipcommon.h>
-#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
-#include <bcmchip.h>
#define TXQLEN 2048 /* bulk tx queue length */
#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -134,33 +133,6 @@ struct rte_console {
/* Force no backplane reset */
#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
/* direct(mapped) cis space */
/* MAPPED common CIS address */
@@ -335,49 +307,16 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-/* sbimstate */
-#define SBIM_IBE 0x20000 /* inbanderror */
-#define SBIM_TO 0x40000 /* timeout */
-#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
-#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
-
-/* sbtmstatelow */
-
-/* reset */
-#define SBTML_RESET 0x0001
-/* reject field */
-#define SBTML_REJ_MASK 0x0006
-/* reject */
-#define SBTML_REJ 0x0002
-/* temporary reject, for error recovery */
-#define SBTML_TMPREJ 0x0004
-
-/* Shift to locate the SI control flags in sbtml */
-#define SBTML_SICF_SHIFT 16
-
-/* sbtmstatehigh */
-#define SBTMH_SERR 0x0001 /* serror */
-#define SBTMH_INT 0x0002 /* interrupt */
-#define SBTMH_BUSY 0x0004 /* busy */
-#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
-
-/* Shift to locate the SI status flags in sbtmh */
-#define SBTMH_SISF_SHIFT 16
-
-/* sbidlow */
-#define SBIDL_INIT 0x80 /* initiator */
-
-/* sbidhigh */
-#define SBIDH_RC_MASK 0x000f /* revision code */
-#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
-#define SBIDH_RCE_SHIFT 8
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
- ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0 /* core code */
-#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
-#define SBIDH_VC_SHIFT 16
+#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
+
+#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
+#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ * when idle
+ */
+#define BRCMF_IDLE_INTERVAL 1
/*
* Conversion of 802.1D priority to precedence level
@@ -388,17 +327,6 @@ static uint prio2prec(u32 prio)
(prio^2) : prio;
}
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
/* core registers */
struct sdpcmd_regs {
u32 corecontrol; /* 0x00, rev8 */
@@ -524,25 +452,8 @@ struct sdpcm_shared_le {
/* misc chip info needed by some of the routines */
-struct chip_info {
- u32 chip;
- u32 chiprev;
- u32 cccorebase;
- u32 ccrev;
- u32 cccaps;
- u32 buscorebase; /* 32 bits backplane bus address */
- u32 buscorerev;
- u32 buscoretype;
- u32 ramcorebase;
- u32 armcorebase;
- u32 pmurev;
- u32 ramsize;
-};
-
/* Private data for SDIO bus interaction */
-struct brcmf_bus {
- struct brcmf_pub *drvr;
-
+struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
struct chip_info *ci; /* Chip info struct */
char *vars; /* Variables (from CIS and/or other) */
@@ -574,7 +485,7 @@ struct brcmf_bus {
uint txminmax;
struct sk_buff *glomd; /* Packet containing glomming descriptor */
- struct sk_buff *glom; /* Packet chain for glommed superframe */
+ struct sk_buff_head glom; /* Packet list for glommed superframe */
uint glomerr; /* Glom packet read errors */
u8 *rxbuf; /* Buffer for receiving control packets */
@@ -637,6 +548,13 @@ struct brcmf_bus {
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
+ uint tickcnt; /* Number of watchdog been schedule */
+ unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
+ unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
+ unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
+ unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
+ unsigned long rx_readahead_cnt; /* Number of packets where header
+ * read-ahead was used. */
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
@@ -657,50 +575,10 @@ struct brcmf_bus {
struct semaphore sdsem;
- const char *fw_name;
const struct firmware *firmware;
- const char *nv_name;
u32 fw_ptr;
-};
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
+ bool txoff; /* Transmit flow-controlled */
};
/* clkstate */
@@ -737,7 +615,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
}
/* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
{
return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -748,12 +626,14 @@ static bool data_ok(struct brcmf_bus *bus)
* adresses on the 32 bit backplane bus.
*/
static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
{
+ u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
do {
*regvar = brcmf_sdcard_reg_read(bus->sdiodev,
- bus->ci->buscorebase + reg_offset, sizeof(u32));
+ bus->ci->c_inf[idx].base + reg_offset,
+ sizeof(u32));
} while (brcmf_sdcard_regfail(bus->sdiodev) &&
(++(*retryvar) <= retry_limit));
if (*retryvar) {
@@ -766,12 +646,13 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
}
static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
{
+ u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
do {
brcmf_sdcard_reg_write(bus->sdiodev,
- bus->ci->buscorebase + reg_offset,
+ bus->ci->c_inf[idx].base + reg_offset,
sizeof(u32), regval);
} while (brcmf_sdcard_regfail(bus->sdiodev) &&
(++(*retryvar) <= retry_limit));
@@ -790,14 +671,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
/* Packet free applicable unconditionally for sdio and sdspi.
* Conditional if bufpool was present for gspi bus.
*/
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
{
if (bus->usebufpool)
brcmu_pkt_buf_free_skb(pkt);
}
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@@ -812,10 +693,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- if ((bus->ci->chip == BCM4329_CHIP_ID)
- && (bus->ci->chiprev == 0))
- clkreq |= SBSDIO_FORCE_ALP;
-
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
if (err) {
@@ -823,14 +700,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
return -EBADE;
}
- if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev == 9))) {
- u32 dummy, retries;
- r_sdreg32(bus, &dummy,
- offsetof(struct sdpcmd_regs, clockctlstatus),
- &retries);
- }
-
/* Check current status */
clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
@@ -930,7 +799,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -943,7 +812,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
}
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef BCMDBG
uint oldstate = bus->clkstate;
@@ -999,7 +868,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
return 0;
}
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
uint retries = 0;
@@ -1034,11 +903,9 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
- if (bus->ci->chip != BCM4329_CHIP_ID) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
- }
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
/* Change state */
bus->sleeping = true;
@@ -1049,13 +916,6 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
- /* Force pad isolation off if possible
- (in case power never toggled) */
- if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev >= 10))
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, 0, NULL);
-
/* Make sure the controller has the bus up */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -1080,13 +940,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
return 0;
}
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
{
if (bus->sleeping)
brcmf_sdbrcm_bussleep(bus, false);
}
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@@ -1162,7 +1022,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
return intstatus;
}
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@@ -1219,16 +1079,61 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
/* If we can't reach the device, signal failure */
if (err || brcmf_sdcard_regfail(bus->sdiodev))
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+}
+
+/* copy a buffer into a pkt buffer chain */
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
+{
+ uint n, ret = 0;
+ struct sk_buff *p;
+ u8 *buf;
+
+ buf = bus->dataptr;
+
+ /* copy the data */
+ skb_queue_walk(&bus->glom, p) {
+ n = min_t(uint, p->len, len);
+ memcpy(p->data, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ if (!len)
+ break;
+ }
+
+ return ret;
+}
+
+/* return total length of buffer chain */
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+{
+ struct sk_buff *p;
+ uint total;
+
+ total = 0;
+ skb_queue_walk(&bus->glom, p)
+ total += p->len;
+ return total;
}
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+{
+ struct sk_buff *cur, *next;
+
+ skb_queue_walk_safe(&bus->glom, cur, next) {
+ skb_unlink(cur, &bus->glom);
+ brcmu_pkt_buf_free_skb(cur);
+ }
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
u16 sublen, check;
- struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+ struct sk_buff *pfirst, *pnext;
int errcode;
u8 chan, seq, doff, sfdoff;
@@ -1240,11 +1145,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
- brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+ brcmf_dbg(TRACE, "start: glomd %p glom %p\n",
+ bus->glomd, skb_peek(&bus->glom));
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
- pfirst = plast = pnext = NULL;
+ pfirst = pnext = NULL;
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
@@ -1287,12 +1193,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, sublen);
break;
}
- if (!pfirst) {
- pfirst = plast = pnext;
- } else {
- plast->next = pnext;
- plast = pnext;
- }
+ skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
pkt_align(pnext, sublen, BRCMF_SDALIGN);
@@ -1308,12 +1209,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
bus->nextlen, totlen, rxseq);
}
- bus->glom = pfirst;
pfirst = pnext = NULL;
} else {
- if (pfirst)
- brcmu_pkt_buf_free_skb(pfirst);
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
num = 0;
}
@@ -1325,37 +1223,33 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Ok -- either we just generated a packet chain,
or had one from before */
- if (bus->glom) {
+ if (!skb_queue_empty(&bus->glom)) {
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
- for (pnext = bus->glom; pnext; pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
pnext, (u8 *) (pnext->data),
pnext->len, pnext->len);
}
}
- pfirst = bus->glom;
- dlen = (u16) brcmu_pkttotlen(pfirst);
+ pfirst = skb_peek(&bus->glom);
+ dlen = (u16) brcmf_sdbrcm_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
if (usechain) {
- errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, (u8 *) pfirst->data, dlen,
- pfirst);
+ SDIO_FUNC_2, F2SYNC, &bus->glom);
} else if (bus->dataptr) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, bus->dataptr, dlen,
- NULL);
- sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
- bus->dataptr);
+ SDIO_FUNC_2, F2SYNC,
+ bus->dataptr, dlen);
+ sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
dlen, sublen);
@@ -1373,16 +1267,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
if (errcode < 0) {
brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
dlen, errcode);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
if (bus->glomerr++ < 3) {
brcmf_sdbrcm_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
}
return 0;
}
@@ -1455,10 +1348,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Remove superframe header, remember offset */
skb_pull(pfirst, doff);
sfdoff = doff;
+ num = 0;
/* Validate all the subframe headers */
- for (num = 0, pnext = pfirst; pnext && !errcode;
- num++, pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
+ /* leave when invalid subframe is found */
+ if (errcode)
+ break;
+
dptr = (u8 *) (pnext->data);
dlen = (u16) (pnext->len);
sublen = get_unaligned_le16(dptr);
@@ -1491,6 +1388,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, doff, sublen, SDPCM_HDRLEN);
errcode = -1;
}
+ /* increase the subframe count */
+ num++;
}
if (errcode) {
@@ -1503,23 +1402,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
return 0;
}
/* Basic SD framing looks ok - process each packet (header) */
- save_pfirst = pfirst;
- bus->glom = NULL;
- plast = NULL;
-
- for (num = 0; pfirst; rxseq++, pfirst = pnext) {
- pnext = pfirst->next;
- pfirst->next = NULL;
+ skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1539,6 +1431,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->rx_badseq++;
rxseq = seq;
}
+ rxseq++;
+
#ifdef BCMDBG
if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1551,36 +1445,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
skb_pull(pfirst, doff);
if (pfirst->len == 0) {
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
- pfirst) != 0) {
+ } else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
+ &ifidx, pfirst) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
}
- /* this packet will go up, link back into
- chain and count it */
- pfirst->next = pnext;
- plast = pfirst;
- num++;
-
#ifdef BCMDBG
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
- num, pfirst, pfirst->data,
+ bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1589,19 +1469,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
#endif /* BCMDBG */
}
- if (num) {
+ /* sent any remaining packets up */
+ if (bus->glom.qlen) {
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
down(&bus->sdsem);
}
bus->rxglomframes++;
- bus->rxglompkts += num;
+ bus->rxglompkts += bus->glom.qlen;
}
return num;
}
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
@@ -1623,7 +1504,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
return timeout;
}
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1631,7 +1512,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
return 0;
}
static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
@@ -1657,7 +1538,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((len + pad) < bus->drvr->maxctl))
+ ((len + pad) < bus->sdiodev->bus_if->maxctl))
rdlen += pad;
} else if (rdlen % BRCMF_SDALIGN) {
rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
@@ -1668,18 +1549,18 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
rdlen = roundup(rdlen, ALIGNMENT);
/* Drop if the read is too big or it exceeds our maximum */
- if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) {
+ if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
- rdlen, bus->drvr->maxctl);
- bus->drvr->rx_errors++;
+ rdlen, bus->sdiodev->bus_if->maxctl);
+ bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
- if ((len - doff) > bus->drvr->maxctl) {
+ if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
- len, len - doff, bus->drvr->maxctl);
- bus->drvr->rx_errors++;
+ len, len - doff, bus->sdiodev->bus_if->maxctl);
+ bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
@@ -1689,8 +1570,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
- F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
- NULL);
+ F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
bus->f2rxdata++;
/* Control frame failures need retransmission */
@@ -1721,7 +1601,7 @@ done:
}
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1734,7 +1614,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
}
static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
struct sk_buff **pkt, u8 **rxbuf)
{
int sdret; /* Return code from calls */
@@ -1746,16 +1626,15 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
*rxbuf = (u8 *) ((*pkt)->data);
/* Read the entire frame */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- *rxbuf, rdlen, *pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, *pkt);
bus->f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
rdlen, sdret);
brcmu_pkt_buf_free_skb(*pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
/* Force retry w/normal header read.
* Don't attempt NAK for
* gSPI
@@ -1767,7 +1646,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
/* Checks the header */
static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
u8 rxseq, u16 nextlen, u16 *len)
{
u16 check;
@@ -1823,7 +1702,7 @@ fail:
/* Return true if there may be more frames to read */
static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
{
u16 len, check; /* Extracted hardware header fields */
u8 chan, seq, doff; /* Extracted software header fields */
@@ -1846,14 +1725,15 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
*finished = false;
for (rxseq = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+ !bus->rxskip && rxleft &&
+ bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
rxseq++, rxleft--) {
/* Handle glomming separately */
- if (bus->glom || bus->glomd) {
+ if (bus->glomd || !skb_queue_empty(&bus->glom)) {
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
- bus->glomd, bus->glom);
+ bus->glomd, skb_peek(&bus->glom));
cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
rxseq += cnt - 1;
@@ -1905,7 +1785,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
bus->nextlen = 0;
}
- bus->drvr->rx_readahead_cnt++;
+ bus->rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@@ -1976,7 +1856,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Read frame header (hardware and software) */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
- BRCMF_FIRSTREAD, NULL);
+ BRCMF_FIRSTREAD);
bus->f2rxhdrs++;
if (sdret < 0) {
@@ -2103,7 +1983,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Too long -- skip this frame */
brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
len, rdlen);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
@@ -2115,7 +1995,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Give up on data, request rtx of events */
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
rdlen, chan);
- bus->drvr->rx_dropped++;
+ bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
continue;
}
@@ -2125,9 +2005,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
pkt_align(pkt, rdlen, BRCMF_SDALIGN);
/* Read the remaining frame data */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
- rdlen, pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2rxdata++;
if (sdret < 0) {
@@ -2136,7 +2015,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
: ((chan == SDPCM_DATA_CHANNEL) ? "data"
: "test")), sdret);
brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
continue;
}
@@ -2185,16 +2064,17 @@ deliver:
if (pkt->len == 0) {
brcmu_pkt_buf_free_skb(pkt);
continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+ } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
+ pkt) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
continue;
}
/* Unlock during rx call */
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
@@ -2214,16 +2094,8 @@ deliver:
return rxcount;
}
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
- return brcmf_sdcard_send_buf
- (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
{
up(&bus->sdsem);
wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2233,7 +2105,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
}
static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
@@ -2242,7 +2114,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan, bool free_pkt)
{
int ret;
@@ -2262,7 +2134,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (skb_headroom(pkt) < pad) {
brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
skb_headroom(pkt), pad);
- bus->drvr->tx_realloc++;
+ bus->sdiodev->bus_if->tx_realloc++;
new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
if (!new) {
brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
@@ -2331,9 +2203,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame,
- len, pkt);
+ ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2txdata++;
if (ret < 0) {
@@ -2371,7 +2242,7 @@ done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
up(&bus->sdsem);
- brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
down(&bus->sdsem);
if (free_pkt)
@@ -2380,7 +2251,7 @@ done:
return ret;
}
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
@@ -2390,8 +2261,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
uint datalen;
u8 tx_prec_map;
- struct brcmf_pub *drvr = bus->drvr;
-
brcmf_dbg(TRACE, "Enter\n");
tx_prec_map = ~bus->flowcontrol;
@@ -2409,9 +2278,9 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
if (ret)
- bus->drvr->tx_errors++;
+ bus->sdiodev->bus_if->dstats.tx_errors++;
else
- bus->drvr->dstats.tx_bytes += datalen;
+ bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -2428,14 +2297,98 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
- drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
- brcmf_txflowcontrol(drvr, 0, OFF);
+ if (bus->sdiodev->bus_if->drvr_up &&
+ (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
+ bus->txoff = OFF;
+ brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+ }
return cnt;
}
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_bus_stop(struct device *dev)
+{
+ u32 local_hostintmask;
+ u8 saveclk;
+ uint retries;
+ int err;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
+ if (bus->dpc_tsk && bus->dpc_tsk != current) {
+ send_sig(SIGTERM, bus->dpc_tsk, 1);
+ kthread_stop(bus->dpc_tsk);
+ bus->dpc_tsk = NULL;
+ }
+
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Change our idea of bus state */
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err)
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ /* Turn off the backplane clock (only) */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Clear the data packet queues */
+ brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ brcmu_pkt_buf_free_skb(bus->glomd);
+ brcmf_sdbrcm_free_glom(bus);
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = false;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ up(&bus->sdsem);
+}
+
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
uint retries = 0;
@@ -2463,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
#endif /* BCMDBG */
@@ -2473,7 +2426,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2486,7 +2439,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2494,7 +2447,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
@@ -2596,9 +2549,9 @@ clkwait:
(bus->clkstate == CLK_AVAIL)) {
int ret, i;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len, NULL);
+ (u32) bus->ctrl_frame_len);
if (ret < 0) {
/* On failure, abort the command and
@@ -2650,11 +2603,11 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) ||
brcmf_sdcard_regfail(bus->sdiodev)) {
brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
brcmf_sdcard_regfail(bus->sdiodev));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2681,7 +2634,7 @@ clkwait:
static int brcmf_sdbrcm_dpc_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -2691,12 +2644,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
/* Call bus dpc unless it indicated down
(then clean stop) */
- if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) {
if (brcmf_sdbrcm_dpc(bus))
complete(&bus->dpc_wait);
} else {
/* after stopping the bus, exit thread */
- brcmf_sdbrcm_bus_stop(bus);
+ brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
break;
}
@@ -2706,10 +2659,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
return 0;
}
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2728,9 +2684,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
- if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
+ if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) ==
+ false) {
skb_pull(pkt, SDPCM_HDRLEN);
- brcmf_txcomplete(bus->drvr, pkt, false);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
brcmu_pkt_buf_free_skb(pkt);
brcmf_dbg(ERROR, "out of bus->txq !!!\n");
ret = -ENOSR;
@@ -2739,8 +2696,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
spin_unlock_bh(&bus->txqlock);
- if (pktq_len(&bus->txq) >= TXHI)
- brcmf_txflowcontrol(bus->drvr, 0, ON);
+ if (pktq_len(&bus->txq) >= TXHI) {
+ bus->txoff = ON;
+ brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+ }
#ifdef BCMDBG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2757,7 +2716,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
uint size)
{
int bcmerror = 0;
@@ -2818,7 +2777,7 @@ xfer_done:
#ifdef BCMDBG
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2895,14 +2854,14 @@ break2:
}
#endif /* BCMDBG */
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@@ -2937,8 +2896,8 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
return ret;
}
-int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
@@ -2946,6 +2905,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
uint retries = 0;
u8 doff = 0;
int ret = -1;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3045,19 +3007,22 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
up(&bus->sdsem);
if (ret)
- bus->drvr->tx_ctlerrs++;
+ bus->tx_ctlerrs++;
else
- bus->drvr->tx_ctlpkts++;
+ bus->tx_ctlpkts++;
return ret ? -EIO : 0;
}
-int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3083,21 +3048,21 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
}
if (rxlen)
- bus->drvr->rx_ctlpkts++;
+ bus->rx_ctlpkts++;
else
- bus->drvr->rx_ctlerrs++;
+ bus->rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
{
int bcmerror = 0;
brcmf_dbg(TRACE, "Enter\n");
/* Basic sanity checks */
- if (bus->drvr->up) {
+ if (bus->sdiodev->bus_if->drvr_up) {
bcmerror = -EISCONN;
goto err;
}
@@ -3123,7 +3088,7 @@ err:
return bcmerror;
}
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
u32 varsize;
@@ -3154,8 +3119,10 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
/* Verify NVRAM bytes */
brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
- if (!nvram_ularray)
+ if (!nvram_ularray) {
+ kfree(vbuffer);
return -ENOMEM;
+ }
/* Upload image to verify downloaded contents. */
memset(nvram_ularray, 0xaa, varsize);
@@ -3210,135 +3177,11 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
return bcmerror;
}
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
- u32 regdata;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- if (regdata & SBTML_RESET)
- return;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
- 4, regdata | SBTML_REJ);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4) &
- SBTMH_BUSY), 100000);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_BUSY)
- brcmf_dbg(ERROR, "ARM core still busy\n");
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) |
- SBIM_RJ;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) &
- SBIM_BY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4,
- (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_REJ | SBTML_RESET));
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) &
- ~SBIM_RJ;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SBTML_REJ | SBTML_RESET));
- udelay(1);
-}
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
- u32 regdata;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_RESET);
- udelay(1);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_SERR)
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4, 0);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4);
- if (regdata & (SBIM_IBE | SBIM_TO))
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
- regdata & ~(SBIM_IBE | SBIM_TO));
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_FGC << SBTML_SICF_SHIFT) |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-}
-
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
uint retries;
- u32 regdata;
int bcmerror = 0;
+ struct chip_info *ci = bus->ci;
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
@@ -3346,10 +3189,9 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
if (enter) {
bus->alp_only = true;
- brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
- bus->ci->armcorebase);
+ ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
- brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+ ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
/* Clear the top bit of memory */
if (bus->ramsize) {
@@ -3358,11 +3200,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
(u8 *)&zeros, 4);
}
} else {
- regdata = brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
- regdata &= (SBTML_RESET | SBTML_REJ_MASK |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+ if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
bcmerror = -EBADE;
goto fail;
@@ -3377,18 +3215,18 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
w_sdreg32(bus, 0xFFFFFFFF,
offsetof(struct sdpcmd_regs, intstatus), &retries);
- brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+ ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
- bus->drvr->busstate = BRCMF_BUS_LOAD;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
}
fail:
return bcmerror;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
{
if (bus->firmware->size < bus->fw_ptr + len)
len = bus->firmware->size - bus->fw_ptr;
@@ -3398,10 +3236,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
return len;
}
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
int offset = 0;
uint len;
@@ -3410,8 +3245,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
brcmf_dbg(INFO, "Enter\n");
- bus->fw_name = BCM4329_FW_NAME;
- ret = request_firmware(&bus->firmware, bus->fw_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3501,15 +3335,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
return buf_len;
}
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
uint len;
char *memblock = NULL;
char *bufp;
int ret;
- bus->nv_name = BCM4329_NV_NAME;
- ret = request_firmware(&bus->firmware, bus->nv_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3549,7 +3382,7 @@ err:
return ret;
}
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
int bcmerror = -1;
@@ -3582,7 +3415,7 @@ err:
}
static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
bool ret;
@@ -3596,91 +3429,11 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
return ret;
}
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
-{
- u32 local_hostintmask;
- u8 saveclk;
- uint retries;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->watchdog_tsk) {
- send_sig(SIGTERM, bus->watchdog_tsk, 1);
- kthread_stop(bus->watchdog_tsk);
- bus->watchdog_tsk = NULL;
- }
-
- if (bus->dpc_tsk && bus->dpc_tsk != current) {
- send_sig(SIGTERM, bus->dpc_tsk, 1);
- kthread_stop(bus->dpc_tsk);
- bus->dpc_tsk = NULL;
- }
-
- down(&bus->sdsem);
-
- bus_wake(bus);
-
- /* Enable clock for device interrupts */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
-
- /* Change our idea of bus state */
- bus->drvr->busstate = BRCMF_BUS_DOWN;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err)
- brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
-
- /* Turn off the bus (F2), free any pending packets */
- brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
-
- /* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- /* Clear the data packet queues */
- brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
-
- /* Clear any held glomming stuff */
- if (bus->glomd)
- brcmu_pkt_buf_free_skb(bus->glomd);
-
- if (bus->glom)
- brcmu_pkt_buf_free_skb(bus->glom);
-
- bus->glom = bus->glomd = NULL;
-
- /* Clear rx control and wake any waiters */
- bus->rxlen = 0;
- brcmf_sdbrcm_dcmd_resp_wake(bus);
-
- /* Reset some F2 state stuff */
- bus->rxskip = false;
- bus->tx_seq = bus->rx_seq = 0;
-
- up(&bus->sdsem);
-}
-
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+static int brcmf_sdbrcm_bus_init(struct device *dev)
{
- struct brcmf_bus *bus = drvr->bus;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
uint retries = 0;
u8 ready, enable;
@@ -3690,16 +3443,16 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
/* try to download image and nvram to the dongle */
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus_if->state == BRCMF_BUS_DOWN) {
if (!(brcmf_sdbrcm_download_firmware(bus)))
return -1;
}
- if (!bus->drvr)
+ if (!bus->sdiodev->bus_if->drvr)
return 0;
/* Start the watchdog timer */
- bus->drvr->tickcnt = 0;
+ bus->tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
down(&bus->sdsem);
@@ -3756,7 +3509,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_WATERMARK, 8, &err);
/* Set bus state according to enable result */
- drvr->busstate = BRCMF_BUS_DATA;
+ bus_if->state = BRCMF_BUS_DATA;
}
else {
@@ -3771,7 +3524,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
/* If we didn't come up, turn off backplane clock */
- if (drvr->busstate != BRCMF_BUS_DATA)
+ if (bus_if->state != BRCMF_BUS_DATA)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
@@ -3782,7 +3535,7 @@ exit:
void brcmf_sdbrcm_isr(void *arg)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
brcmf_dbg(TRACE, "Enter\n");
@@ -3791,7 +3544,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
- if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
return;
}
@@ -3814,14 +3567,14 @@ void brcmf_sdbrcm_isr(void *arg)
complete(&bus->dpc_wait);
}
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
{
- struct brcmf_bus *bus;
+#ifdef BCMDBG
+ struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif /* BCMDBG */
brcmf_dbg(TIMER, "Enter\n");
- bus = drvr->bus;
-
/* Ignore the timer if simulating bus down */
if (bus->sleeping)
return false;
@@ -3865,7 +3618,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
}
#ifdef BCMDBG
/* Poll for console output periodically */
- if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+ if (bus_if->state == BRCMF_BUS_DATA &&
+ bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
@@ -3900,10 +3654,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
if (chipid == BCM4329_CHIP_ID)
return true;
+ if (chipid == BCM4330_CHIP_ID)
+ return true;
return false;
}
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3915,13 +3671,13 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
bus->databuf = NULL;
}
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
- if (bus->drvr->maxctl) {
+ if (bus->sdiodev->bus_if->maxctl) {
bus->rxblen =
- roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
ALIGNMENT) + BRCMF_SDALIGN;
bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
if (!(bus->rxbuf))
@@ -3950,276 +3706,14 @@ fail:
return false;
}
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
- {
- 4, 0x2}, {
- 2, 0x3}, {
- 1, 0x0}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
- {
- 12, 0x7}, {
- 10, 0x6}, {
- 8, 0x5}, {
- 6, 0x4}, {
- 4, 0x2}, {
- 2, 0x1}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
- {
- 32, 0x7}, {
- 26, 0x6}, {
- 22, 0x5}, {
- 16, 0x4}, {
- 12, 0x3}, {
- 8, 0x2}, {
- 4, 0x1}, {
- 0, 0x0}
- };
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-
-static char *brcmf_chipname(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
- u32 drivestrength) {
- struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask = 0;
- u32 str_shift = 0;
- char chn[8];
-
- if (!(bus->ci->cccaps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
- str_mask = 0x30000000;
- str_shift = 28;
- break;
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_chipname(bus->ci->chip, chn, 8),
- bus->ci->chiprev, bus->ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- int i;
-
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
-
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, 1);
- cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, cc_data_temp);
-
- brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
- drivestrength, cc_data_temp);
- }
-}
-
-static int
-brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 regs)
-{
- u32 regdata;
-
- /*
- * Get CC core rev
- * Chipid is assume to be at offset 0 from regs arg
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- ci->cccorebase = regs;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, chipid), 4);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-
- brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM4329_CHIP_ID:
- ci->buscorebase = BCM4329_CORE_BUS_BASE;
- ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
- ci->armcorebase = BCM4329_CORE_ARM_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- break;
- default:
- brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
- return -ENODEV;
- }
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->cccorebase, sbidhigh), 4);
- ci->ccrev = SBCOREREV(regdata);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
- ci->pmurev = regdata & PCAP_REV_MASK;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->buscorebase, sbidhigh), 4);
- ci->buscorerev = SBCOREREV(regdata);
- ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
-
- /* get chipcommon capabilites */
- ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, capabilities), 4);
-
- return 0;
-}
-
-static int
-brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
-{
- struct chip_info *ci;
- int err;
- u8 clkval, clkset;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* alloc chip_info_t */
- ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
- if (NULL == ci)
- return -ENOMEM;
-
- /* bus/core/clk setup for register access */
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_dbg(ERROR, "error writing for HT off\n");
- goto fail;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
- if ((clkval & ~SBSDIO_AVBITS) == clkset) {
- SPINWAIT(((clkval =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- err = -EBUSY;
- goto fail;
- }
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
- SBSDIO_FORCE_ALP;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- clkset, &err);
- udelay(65);
- } else {
- brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- err = -EACCES;
- goto fail;
- }
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
- if (err)
- goto fail;
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
-
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* WAR: cmd52 backplane read so core HW will drop ALPReq */
- clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- 0, NULL);
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
- bus->ci = ci;
- return 0;
-fail:
- bus->ci = NULL;
- kfree(ci);
- return err;
-}
-
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
{
u8 clkctl = 0;
int err = 0;
int reg_addr;
u32 reg_val;
+ u8 idx;
bus->alp_only = true;
@@ -4234,7 +3728,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
#endif /* BCMDBG */
/*
- * Force PLL off until brcmf_sdbrcm_chip_attach()
+ * Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
@@ -4252,8 +3746,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
goto fail;
}
- if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+ if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+ brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n");
goto fail;
}
@@ -4262,11 +3756,10 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
goto fail;
}
- brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+ brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci,
+ SDIO_DRIVE_STRENGTH);
- /* Get info on the ARM and SOCRAM cores... */
- brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+ /* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
if (!(bus->ramsize)) {
brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
@@ -4274,7 +3767,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
}
/* Set core control so an SDIO reset does a backplane reset */
- reg_addr = bus->ci->buscorebase +
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ reg_addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, corecontrol);
reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
@@ -4298,7 +3792,7 @@ fail:
return false;
}
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4306,7 +3800,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
@@ -4333,7 +3827,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
static int
brcmf_sdbrcm_watchdog_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -4341,9 +3835,9 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
- bus->drvr->tickcnt++;
+ bus->tickcnt++;
} else
break;
}
@@ -4353,7 +3847,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
static void
brcmf_sdbrcm_watchdog(unsigned long data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -4364,23 +3858,14 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
-static void
-brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(bus->ci);
- bus->ci = NULL;
-}
-
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
if (bus->ci) {
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- brcmf_sdbrcm_chip_detach(bus);
+ brcmf_sdio_chip_detach(&bus->ci);
if (bus->vars && bus->varsz)
kfree(bus->vars);
bus->vars = NULL;
@@ -4390,7 +3875,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
}
/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4398,10 +3883,9 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
/* De-register interrupt handler */
brcmf_sdcard_intr_dereg(bus->sdiodev);
- if (bus->drvr) {
- brcmf_detach(bus->drvr);
+ if (bus->sdiodev->bus_if->drvr) {
+ brcmf_detach(bus->sdiodev->dev);
brcmf_sdbrcm_release_dongle(bus);
- bus->drvr = NULL;
}
brcmf_sdbrcm_release_malloc(bus);
@@ -4412,21 +3896,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
brcmf_dbg(TRACE, "Disconnected\n");
}
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
- struct brcmf_bus *bus;
-
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behavior since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_c_init();
+ struct brcmf_sdio *bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -4434,12 +3907,13 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
* regsva == SI_ENUM_BASE*/
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
goto fail;
bus->sdiodev = sdiodev;
sdiodev->bus = bus;
+ skb_queue_head_init(&bus->glom);
bus->txbound = BRCMF_TXBOUND;
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
@@ -4484,9 +3958,15 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
bus->dpc_tsk = NULL;
}
+ /* Assign bus interface call back */
+ bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
+ bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init;
+ bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata;
+ bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl;
+ bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl;
/* Attach to the brcmf/OS/network interface */
- bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
- if (!bus->drvr) {
+ ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+ if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_attach failed\n");
goto fail;
}
@@ -4514,16 +3994,17 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
brcmf_dbg(INFO, "completed!!\n");
/* if firmware path present try to download and bring up bus */
- ret = brcmf_bus_start(bus->drvr);
+ ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
if (ret == -ENOLINK) {
brcmf_dbg(ERROR, "dongle is not responding\n");
goto fail;
}
}
- /* Ok, have the per-port tell the stack we're open for business */
- if (brcmf_net_attach(bus->drvr, 0) != 0) {
- brcmf_dbg(ERROR, "Net attach failed!!\n");
+
+ /* add interface and open for business */
+ if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
+ brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
goto fail;
}
@@ -4536,7 +4017,7 @@ fail:
void brcmf_sdbrcm_disconnect(void *ptr)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
brcmf_dbg(TRACE, "Enter\n");
@@ -4546,18 +4027,9 @@ void brcmf_sdbrcm_disconnect(void *ptr)
brcmf_dbg(TRACE, "Disconnected\n");
}
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
- return &bus->sdiodev->func[2]->dev;
-}
-
void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
- /* don't start the wd until fw is loaded */
- if (bus->drvr->busstate == BRCMF_BUS_DOWN)
- return;
-
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid == true) {
del_timer_sync(&bus->timer);
@@ -4566,6 +4038,10 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
return;
}
+ /* don't start the wd until fw is loaded */
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+ return;
+
if (wdtick) {
if (bus->save_ms != BRCMF_WD_POLL_MS) {
if (bus->wd_timer_valid == true)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
new file mode 100644
index 000000000000..11b2d7c97ba2
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ***** SDIO interface chip backplane handle functions ***** */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/card.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+
+#include <chipcommon.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <soc.h>
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+#include "sdio_chip.h"
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+u8
+brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+
+ for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+ if (coreid == ci->c_inf[idx].id)
+ return idx;
+
+ return BRCMF_MAX_CORENUM;
+}
+
+static u32
+brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+ return SBCOREREV(regdata);
+}
+
+static u32
+brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+}
+
+static bool
+brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return (SSB_TMSLOW_CLOCK == regdata);
+}
+
+static bool
+brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+ bool ret;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void
+brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ if (regdata & SSB_TMSLOW_RESET)
+ return;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ 4, regdata | SSB_TMSLOW_REJECT);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+ SSB_TMSHIGH_BUSY), 100000);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ if (regdata & SSB_TMSHIGH_BUSY)
+ brcmf_dbg(ERROR, "core state still busy\n");
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ if (regdata & SSB_IDLOW_INITIATOR) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
+ SSB_IMSTATE_REJECT;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ if (regdata & SSB_IDLOW_INITIATOR) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ ~SSB_IMSTATE_REJECT;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void
+brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+ u32 regdata;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /* if core is already in reset, just return */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, 0);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ udelay(10);
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4, BCMA_RESET_CTL_RESET);
+ udelay(1);
+}
+
+static void
+brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_sdio_sb_coredisable(sdiodev, ci, coreid);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+
+ /* clear any serror */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ if (regdata & SSB_TMSHIGH_SERR)
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+
+ /* clear reset and allow it to propagate throughout the core */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+
+ /* leave clock enabled */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ 4, SSB_TMSLOW_CLOCK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+}
+
+static void
+brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+ u32 regdata;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
+
+ /* now do initialization sequence */
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4, 0);
+ udelay(1);
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, BCMA_IOCTL_CLK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ udelay(1);
+}
+
+static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 regs)
+{
+ u32 regdata;
+
+ /*
+ * Get CC core rev
+ * Chipid is assume to be at offset 0 from regs arg
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = regs;
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+ ci->chip = regdata & CID_ID_MASK;
+ ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM4329_CHIP_ID:
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+ ci->ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x07004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x0d080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->ramsize = 0x48000;
+ break;
+ default:
+ brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+ return -ENODEV;
+ }
+
+ switch (ci->socitype) {
+ case SOCI_SB:
+ ci->iscoreup = brcmf_sdio_sb_iscoreup;
+ ci->corerev = brcmf_sdio_sb_corerev;
+ ci->coredisable = brcmf_sdio_sb_coredisable;
+ ci->resetcore = brcmf_sdio_sb_resetcore;
+ break;
+ case SOCI_AI:
+ ci->iscoreup = brcmf_sdio_ai_iscoreup;
+ ci->corerev = brcmf_sdio_ai_corerev;
+ ci->coredisable = brcmf_sdio_ai_coredisable;
+ ci->resetcore = brcmf_sdio_ai_resetcore;
+ break;
+ default:
+ brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+{
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void
+brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci)
+{
+ /* get chipcommon rev */
+ ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+
+ /* get chipcommon capabilites */
+ ci->c_inf[0].caps =
+ brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+
+ /* get pmu caps & rev */
+ if (ci->c_inf[0].caps & CC_CAP_PMU) {
+ ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+ ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+ }
+
+ ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+ ci->c_inf[0].rev, ci->pmurev,
+ ci->c_inf[1].rev, ci->c_inf[1].id);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3);
+}
+
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs)
+{
+ int ret;
+ struct chip_info *ci;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* alloc chip_info_t */
+ ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ if (!ci)
+ return -ENOMEM;
+
+ ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+ if (ret != 0)
+ goto err;
+
+ ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+ if (ret != 0)
+ goto err;
+
+ brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+
+ *ci_ptr = ci;
+ return 0;
+
+err:
+ kfree(ci);
+ return ret;
+}
+
+void
+brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(*ci_ptr);
+ *ci_ptr = NULL;
+}
+
+static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+void
+brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 drivestrength)
+{
+ struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask = 0;
+ u32 str_shift = 0;
+ char chn[8];
+
+ if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_sdio_chip_name(ci->chip, chn, 8),
+ ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+ 4, 1);
+ cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+ 4, cc_data_temp);
+
+ brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp);
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
new file mode 100644
index 000000000000..ce974d76bd92
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMFMAC_SDIO_CHIP_H_
+#define _BRCMFMAC_SDIO_CHIP_H_
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+#define BRCMF_MAX_CORENUM 6
+
+struct chip_core_info {
+ u16 id;
+ u16 rev;
+ u32 base;
+ u32 wrapbase;
+ u32 caps;
+ u32 cib;
+};
+
+struct chip_info {
+ u32 chip;
+ u32 chiprev;
+ u32 socitype;
+ /* core info */
+ /* always put chipcommon core at 0, bus core at 1 */
+ struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+ u32 pmurev;
+ u32 pmucaps;
+ u32 ramsize;
+
+ bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u16 coreid);
+ u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u16 coreid);
+ void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid);
+ void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid);
+};
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs);
+extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci,
+ u32 drivestrength);
+extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+
+
+#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 726fa8981113..0281d207d998 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -116,12 +116,20 @@
#define SUCCESS 0
#define ERROR 1
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#define BRCMF_SDALIGN (1 << 6)
+
+/* watchdog polling interval in ms */
+#define BRCMF_WD_POLL_MS 10
+
struct brcmf_sdreg {
int func;
int offset;
int value;
};
+struct brcmf_sdio;
+
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
@@ -132,9 +140,10 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
- wait_queue_head_t request_packet_wait;
+ wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
-
+ struct device *dev;
+ struct brcmf_bus *bus_if;
};
/* Register/deregister device interrupt handler. */
@@ -182,11 +191,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
* NOTE: Async operation is not currently supported.
*/
extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
extern int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
/* Flags bits */
@@ -237,16 +256,20 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
/* read or write any buffer using cmd53 */
extern int
brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
- uint fix_inc, uint rw, uint fnc_num,
- u32 addr, uint regwidth,
- u32 buflen, u8 *buffer, struct sk_buff *pkt);
+ uint fix_inc, uint rw, uint fnc_num, u32 addr,
+ struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq);
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
+
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 5eddabe5228a..bf11850a20f1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, s32 dbm)
+ enum nl80211_tx_power_setting type, s32 mbm)
{
struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
+ s32 dbm = MBM_TO_DBM(mbm);
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
break;
case NL80211_TX_POWER_LIMITED:
- if (dbm < 0) {
- WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
- err = -EINVAL;
- goto done;
- }
- break;
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
WL_ERR("TX_POWER_FIXED - dbm is negative\n");
@@ -1997,7 +1992,7 @@ done:
}
static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
- struct brcmf_bss_info *bi)
+ struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
@@ -2049,18 +2044,27 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
notify_timestamp, notify_capability, notify_interval, notify_ie,
notify_ielen, notify_signal, GFP_KERNEL);
- if (!bss) {
- WL_ERR("cfg80211_inform_bss_frame error\n");
- return -EINVAL;
- }
+ if (!bss)
+ return -ENOMEM;
+
+ cfg80211_put_bss(bss);
return err;
}
+static struct brcmf_bss_info_le *
+next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
+{
+ if (bss == NULL)
+ return list->bss_info_le;
+ return (struct brcmf_bss_info_le *)((unsigned long)bss +
+ le32_to_cpu(bss->length));
+}
+
static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
{
struct brcmf_scan_results *bss_list;
- struct brcmf_bss_info *bi = NULL; /* must be initialized */
+ struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
s32 err = 0;
int i;
@@ -2072,7 +2076,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
}
WL_SCAN("scanned AP count (%d)\n", bss_list->count);
for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
- bi = next_bss(bss_list, bi);
+ bi = next_bss_le(bss_list, bi);
err = brcmf_inform_single_bss(cfg_priv, bi);
if (err)
break;
@@ -2085,8 +2089,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
{
struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
- struct brcmf_bss_info *bi = NULL;
+ struct brcmf_bss_info_le *bi = NULL;
struct ieee80211_supported_band *band;
+ struct cfg80211_bss *bss;
u8 *buf = NULL;
s32 err = 0;
u16 channel;
@@ -2114,7 +2119,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
goto CleanUp;
}
- bi = (struct brcmf_bss_info *)(buf + 4);
+ bi = (struct brcmf_bss_info_le *)(buf + 4);
channel = bi->ctl_ch ? bi->ctl_ch :
CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
@@ -2140,10 +2145,17 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
WL_CONN("signal: %d\n", notify_signal);
WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
- cfg80211_inform_bss(wiphy, notify_channel, bssid,
+ bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
notify_timestamp, notify_capability, notify_interval,
notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
+ if (!bss) {
+ err = -ENOMEM;
+ goto CleanUp;
+ }
+
+ cfg80211_put_bss(bss);
+
CleanUp:
kfree(buf);
@@ -2188,7 +2200,7 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct brcmf_bss_info *bi;
+ struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
u16 beacon_interval;
@@ -2211,7 +2223,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
goto update_bss_info_out;
}
- bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+ bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
err = brcmf_inform_single_bss(cfg_priv, bi);
if (err)
goto update_bss_info_out;
@@ -2463,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
return err;
}
-static void brcmf_delay(u32 ms)
+static __always_inline void brcmf_delay(u32 ms)
{
if (ms < 1000 / HZ) {
cond_resched();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 62dc46144ede..a613b49cb13f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -352,15 +352,6 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
return &cfg->conn_info;
}
-static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
- struct brcmf_bss_info *bss)
-{
- return bss = bss ?
- (struct brcmf_bss_info *)((unsigned long)bss +
- le32_to_cpu(bss->length)) :
- list->bss_info;
-}
-
extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
struct device *busdev,
void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 025fa0eb6f47..ab9bb11abfbb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -16,6 +16,8 @@
* File contents: support functions for PCI/PCIe
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/pci.h>
@@ -316,51 +318,24 @@
#define BADIDX (SI_MAXCORES + 1)
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
- (((si)->pub.buscoretype == PCI_CORE_ID) && \
- (si)->pub.buscorerev >= 13))
-
-#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
- PCI_16KB0_CCREGS_OFFSET))
-
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
- if ((si)->intrsoff_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
+#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
+#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
-#define INTR_RESTORE(si, intr_val) \
- if ((si)->intrsrestore_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
-
-#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID)
-#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
#ifdef BCMDBG
-#define SI_MSG(args) printk args
+#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
-#define SI_MSG(args)
+#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
#define GOODCOREADDR(x, b) \
(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
IS_ALIGNED((x), SI_CORE_SIZE))
-#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
- PCI_16KB0_PCIREGS_OFFSET)
-
struct aidmp {
u32 oobselina30; /* 0x000 */
u32 oobselina74; /* 0x004 */
@@ -479,406 +454,13 @@ struct aidmp {
u32 componentid3; /* 0xffc */
};
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
-{
- u32 ent;
- uint inv = 0, nom = 0;
-
- while (true) {
- ent = R_REG(*eromptr);
- (*eromptr)++;
-
- if (mask == 0)
- break;
-
- if ((ent & ER_VALID) == 0) {
- inv++;
- continue;
- }
-
- if (ent == (ER_END | ER_VALID))
- break;
-
- if ((ent & mask) == match)
- break;
-
- nom++;
- }
-
- return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
- u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
- u32 asd, sz, szd;
-
- asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
- if (((asd & ER_TAG1) != ER_ADD) ||
- (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
- ((asd & AD_ST_MASK) != st)) {
- /* This is not what we want, "push" it back */
- (*eromptr)--;
- return 0;
- }
- *addrl = asd & AD_ADDR_MASK;
- if (asd & AD_AG32)
- *addrh = get_erom_ent(sih, eromptr, 0, 0);
- else
- *addrh = 0;
- *sizeh = 0;
- sz = asd & AD_SZ_MASK;
- if (sz == AD_SZ_SZD) {
- szd = get_erom_ent(sih, eromptr, 0, 0);
- *sizel = szd & SD_SZ_MASK;
- if (szd & SD_SG32)
- *sizeh = get_erom_ent(sih, eromptr, 0, 0);
- } else
- *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
- return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- u32 erombase;
- u32 __iomem *eromptr, *eromlim;
- void __iomem *regs = cc;
-
- erombase = R_REG(&cc->eromptr);
-
- /* Set wrappers address */
- sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
-
- /* Now point the window at the erom */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
- eromptr = regs;
- eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
- while (eromptr < eromlim) {
- u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
- u32 mpd, asd, addrl, addrh, sizel, sizeh;
- u32 __iomem *base;
- uint i, j, idx;
- bool br;
-
- br = false;
-
- /* Grok a component */
- cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
- if (cia == (ER_END | ER_VALID)) {
- /* Found END of erom */
- ai_hwfixup(sii);
- return;
- }
- base = eromptr - 1;
- cib = get_erom_ent(sih, &eromptr, 0, 0);
-
- if ((cib & ER_TAG) != ER_CI) {
- /* CIA not followed by CIB */
- goto error;
- }
-
- cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
- mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
- crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
- nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
- nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
- nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
- nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
- if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
- continue;
- if ((nmw + nsw == 0)) {
- /* A component which is not a core */
- if (cid == OOB_ROUTER_CORE_ID) {
- asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd != 0)
- sii->oob_router = addrl;
- }
- continue;
- }
-
- idx = sii->numcores;
-/* sii->eromptr[idx] = base; */
- sii->cia[idx] = cia;
- sii->cib[idx] = cib;
- sii->coreid[idx] = cid;
-
- for (i = 0; i < nmp; i++) {
- mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
- if ((mpd & ER_TAG) != ER_MP) {
- /* Not enough MP entries for component */
- goto error;
- }
- }
-
- /* First Slave Address Descriptor should be port 0:
- * the main register space for the core
- */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
- &sizel, &sizeh);
- if (asd == 0) {
- /* Try again to see if it is a bridge */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd != 0)
- br = true;
- else if ((addrh != 0) || (sizeh != 0)
- || (sizel != SI_CORE_SIZE)) {
- /* First Slave ASD for core malformed */
- goto error;
- }
- }
- sii->coresba[idx] = addrl;
- sii->coresba_size[idx] = sizel;
- /* Get any more ASDs in port 0 */
- j = 1;
- do {
- asd =
- get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
- &addrh, &sizel, &sizeh);
- if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
- sii->coresba2[idx] = addrl;
- sii->coresba2_size[idx] = sizel;
- }
- j++;
- } while (asd != 0);
-
- /* Go through the ASDs for other slave ports */
- for (i = 1; i < nsp; i++) {
- j = 0;
- do {
- asd =
- get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- } while (asd != 0);
- if (j == 0) {
- /* SP has no address descriptors */
- goto error;
- }
- }
-
- /* Now get master wrappers */
- for (i = 0; i < nmw; i++) {
- asd =
- get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for MW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Master wrapper %d is not 4KB */
- goto error;
- }
- if (i == 0)
- sii->wrapba[idx] = addrl;
- }
-
- /* And finally slave wrappers */
- for (i = 0; i < nsw; i++) {
- uint fwp = (nsp == 1) ? 0 : 1;
- asd =
- get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for SW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Slave wrapper is not 4KB */
- goto error;
- }
- if ((nmw == 0) && (i == 0))
- sii->wrapba[idx] = addrl;
- }
-
- /* Don't record bridges */
- if (br)
- continue;
-
- /* Done with core */
- sii->numcores++;
- }
-
- error:
- /* Reached end of erom without finding END */
- sii->numcores = 0;
- return;
-}
-
-/*
- * This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address. Since each core starts with the
- * same set of registers (BIST, clock control, etc), the returned address
- * contains the first register of this 'common' register block (not to be
- * confused with 'common core').
- */
-void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 addr = sii->coresba[coreidx];
- u32 wrap = sii->wrapba[coreidx];
-
- if (coreidx >= sii->numcores)
- return NULL;
-
- /* point bar0 window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
- /* point bar0 2nd 4KB window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
- sii->curidx = coreidx;
-
- return sii->curmap;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
- return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba[cidx];
- else if (asidx == 1)
- return sii->coresba2[cidx];
- else {
- /* Need to parse the erom again to find addr space */
- return 0;
- }
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba_size[cidx];
- else if (asidx == 1)
- return sii->coresba2_size[cidx];
- else {
- /* Need to parse the erom again to find addr */
- return 0;
- }
-}
-
-uint ai_flag(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cia;
-
- sii = (struct si_info *)sih;
- cia = sii->cia[sii->curidx];
- return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cib;
-
- sii = (struct si_info *)sih;
- cib = sii->cib[sii->curidx];
- return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
- SICF_CLOCK_EN)
- && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-
- return R_REG(&ai->ioctrl);
-}
-
/* return true if PCIE capability exists in the pci config space */
static bool ai_ispcie(struct si_info *sii)
{
u8 cap_ptr;
cap_ptr =
- pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+ pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
NULL);
if (!cap_ptr)
return false;
@@ -894,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii)
return true;
}
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->iostatus) & ~mask) | val);
- W_REG(&ai->iostatus, w);
- }
-
- return R_REG(&ai->iostatus);
-}
-
static bool
-ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
{
- bool pci, pcie;
- uint i;
- uint pciidx, pcieidx, pcirev, pcierev;
- struct chipcregs __iomem *cc;
+ struct bcma_device *pci = NULL;
+ struct bcma_device *pcie = NULL;
+ struct bcma_device *core;
- cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+ /* no cores found, bail out */
+ if (cc->bus->nr_cores == 0)
+ return false;
/* get chipcommon rev */
- sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+ sii->pub.ccrev = cc->id.rev;
/* get chipcommon chipstatus */
- if (sii->pub.ccrev >= 11)
- sii->pub.chipst = R_REG(&cc->chipstatus);
+ if (ai_get_ccrev(&sii->pub) >= 11)
+ sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
/* get chipcommon capabilites */
- sii->pub.cccaps = R_REG(&cc->capabilities);
- /* get chipcommon extended capabilities */
-
- if (sii->pub.ccrev >= 35)
- sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+ sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
/* get pmu rev and caps */
- if (sii->pub.cccaps & CC_CAP_PMU) {
- sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
+ sii->pub.pmucaps = bcma_read32(cc,
+ CHIPCREGOFFS(pmucapabilities));
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
- /* figure out bus/orignal core idx */
- sii->pub.buscoretype = NODEV_CORE_ID;
- sii->pub.buscorerev = NOREV;
- sii->pub.buscoreidx = BADIDX;
-
- pci = pcie = false;
- pcirev = pcierev = NOREV;
- pciidx = pcieidx = BADIDX;
-
- for (i = 0; i < sii->numcores; i++) {
+ /* figure out buscore */
+ list_for_each_entry(core, &cc->bus->cores, list) {
uint cid, crev;
- ai_setcoreidx(&sii->pub, i);
- cid = ai_coreid(&sii->pub);
- crev = ai_corerev(&sii->pub);
+ cid = core->id.id;
+ crev = core->id.rev;
if (cid == PCI_CORE_ID) {
- pciidx = i;
- pcirev = crev;
- pci = true;
+ pci = core;
} else if (cid == PCIE_CORE_ID) {
- pcieidx = i;
- pcierev = crev;
- pcie = true;
+ pcie = core;
}
-
- /* find the core idx before entering this func. */
- if ((savewin && (savewin == sii->coresba[i])) ||
- (cc == sii->regs[i]))
- *origidx = i;
}
if (pci && pcie) {
if (ai_ispcie(sii))
- pci = false;
+ pci = NULL;
else
- pcie = false;
+ pcie = NULL;
}
if (pci) {
- sii->pub.buscoretype = PCI_CORE_ID;
- sii->pub.buscorerev = pcirev;
- sii->pub.buscoreidx = pciidx;
+ sii->buscore = pci;
} else if (pcie) {
- sii->pub.buscoretype = PCIE_CORE_ID;
- sii->pub.buscorerev = pcierev;
- sii->pub.buscoreidx = pcieidx;
+ sii->buscore = pcie;
}
/* fixup necessary chip/core configurations */
- if (SI_FAST(sii)) {
- if (!sii->pch) {
- sii->pch = pcicore_init(&sii->pub, sii->pbus,
- (__iomem void *)PCIEREGS(sii));
- if (sii->pch == NULL)
- return false;
- }
+ if (!sii->pch) {
+ sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
+ if (sii->pch == NULL)
+ return false;
}
- if (ai_pci_fixcfg(&sii->pub)) {
- /* si_doattach: si_pci_fixcfg failed */
+ if (ai_pci_fixcfg(&sii->pub))
return false;
- }
-
- /* return to the original core */
- ai_setcoreidx(&sii->pub, *origidx);
return true;
}
@@ -1017,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii)
uint w = 0;
/* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
sii->pub.boardvendor = w & 0xffff;
sii->pub.boardtype = (w >> 16) & 0xffff;
- sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
}
static struct si_info *ai_doattach(struct si_info *sii,
- void __iomem *regs, struct pci_dev *pbus)
+ struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint socitype;
- uint origidx;
-
- memset((unsigned char *) sii, 0, sizeof(struct si_info));
savewin = 0;
- sih->buscoreidx = BADIDX;
-
- sii->curmap = regs;
- sii->pbus = pbus;
-
- /* find Chipcommon address */
- pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
- if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
- savewin = SI_ENUM_BASE;
+ sii->icbus = pbus;
+ sii->pcibus = pbus->host_pci;
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
- SI_ENUM_BASE);
- cc = (struct chipcregs __iomem *) regs;
+ /* switch to Chipcommon core */
+ cc = pbus->drv_cc.core;
/* bus/core/clk setup for register access */
if (!ai_buscore_prep(sii))
@@ -1062,94 +584,74 @@ static struct si_info *ai_doattach(struct si_info *sii,
* hosts w/o chipcommon), some way of recognizing them needs to
* be added here.
*/
- w = R_REG(&cc->chipid);
+ w = bcma_read32(cc, CHIPCREGOFFS(chipid));
socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
/* Might as wll fill in chip id rev & pkg */
sih->chip = w & CID_ID_MASK;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
- sih->issim = false;
-
/* scan for cores */
- if (socitype == SOCI_AI) {
- SI_MSG(("Found chip type AI (0x%08x)\n", w));
- /* pass chipc address instead of original core base */
- ai_scan(&sii->pub, cc);
- } else {
- /* Found chip of unknown type */
- return NULL;
- }
- /* no cores found, bail out */
- if (sii->numcores == 0)
+ if (socitype != SOCI_AI)
return NULL;
- /* bus/core/clk setup */
- origidx = SI_CC_IDX;
- if (!ai_buscore_setup(sii, savewin, &origidx))
+ SI_MSG("Found chip type AI (0x%08x)\n", w);
+ if (!ai_buscore_setup(sii, cc))
goto exit;
/* Init nvram from sprom/otp if they exist */
- if (srom_var_init(&sii->pub, cc))
+ if (srom_var_init(&sii->pub))
goto exit;
ai_nvram_process(sii);
/* === NVRAM, clock is ready === */
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
- W_REG(&cc->gpiopullup, 0);
- W_REG(&cc->gpiopulldown, 0);
- ai_setcoreidx(sih, origidx);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
/* PMU specific initializations */
- if (sih->cccaps & CC_CAP_PMU) {
- u32 xtalfreq;
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
si_pmu_init(sih);
- si_pmu_chip_init(sih);
-
- xtalfreq = si_pmu_measure_alpclk(sih);
- si_pmu_pll_init(sih, xtalfreq);
+ (void)si_pmu_measure_alpclk(sih);
si_pmu_res_init(sih);
- si_pmu_swreg_init(sih);
}
/* setup the GPIO based LED powersave register */
w = getintvar(sih, BRCMS_SROM_LEDDC);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
- ~0, w);
+ ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
+ ~0, w);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_attach(sii->pch, SI_DOATTACH);
- if (sih->chip == BCM43224_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
* set chipControl register bit 15
*/
- if (sih->chiprev == 0) {
- SI_MSG(("Applying 43224A0 WARs\n"));
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
+ if (ai_get_chiprev(sih) == 0) {
+ SI_MSG("Applying 43224A0 WARs\n");
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
+ CCTRL43224_GPIO_TOGGLE,
+ CCTRL43224_GPIO_TOGGLE);
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
CCTRL_43224A0_12MA_LED_DRIVE);
}
- if (sih->chiprev >= 1) {
- SI_MSG(("Applying 43224B0+ WARs\n"));
+ if (ai_get_chiprev(sih) >= 1) {
+ SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
}
- if (sih->chip == BCM4313_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
/*
* enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1
*/
- SI_MSG(("Applying 4313 WARs\n"));
+ SI_MSG("Applying 4313 WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE);
}
@@ -1165,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii,
}
/*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
+ * Allocate a si handle and do the attach.
*/
struct si_pub *
-ai_attach(void __iomem *regs, struct pci_dev *sdh)
+ai_attach(struct bcma_bus *pbus)
{
struct si_info *sii;
/* alloc struct si_info */
- sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+ sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
if (sii == NULL)
return NULL;
- if (ai_doattach(sii, regs, sdh) == NULL) {
+ if (ai_doattach(sii, pbus) == NULL) {
kfree(sii);
return NULL;
}
@@ -1209,292 +708,66 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intr_arg = intr_arg;
- sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
- sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
- sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
- /* save current core id. when this function called, the current core
- * must be the core which provides driver functions(il, et, wl, etc.)
- */
- sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
- return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
/* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
{
+ struct bcma_device *core;
struct si_info *sii;
uint found;
- uint i;
sii = (struct si_info *)sih;
found = 0;
- for (i = 0; i < sii->numcores; i++)
- if (sii->coreid[i] == coreid) {
+ list_for_each_entry(core, &sii->icbus->cores, list)
+ if (core->id.id == coreid) {
if (found == coreunit)
- return i;
+ return core;
found++;
}
- return BADIDX;
-}
-
-/*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
- */
-void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
-{
- uint idx;
-
- idx = ai_findcoreidx(sih, coreid, coreunit);
- if (idx >= SI_MAXCORES)
- return NULL;
-
- return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
- uint *intr_val)
-{
- void __iomem *cc;
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
-
- if (SI_FAST(sii)) {
- /* Overloading the origidx variable to remember the coreid,
- * this works because the core ids cannot be confused with
- * core indices.
- */
- *origidx = coreid;
- if (coreid == CC_CORE_ID)
- return CCREGS_FAST(sii);
- else if (coreid == sih->buscoretype)
- return PCIEREGS(sii);
- }
- INTR_OFF(sii, *intr_val);
- *origidx = sii->curidx;
- cc = ai_setcore(sih, coreid, 0);
- return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- if (SI_FAST(sii)
- && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
- return;
-
- ai_setcoreidx(sih, coreid);
- INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 *w = (u32 *) sii->curwrap;
- W_REG(w + (offset / 4), val);
- return;
+ return NULL;
}
/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
+ * read/modify chipcommon core register.
*/
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val)
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
{
- uint origidx = 0;
- u32 __iomem *r = NULL;
- uint w;
- uint intr_val = 0;
- bool fast = false;
+ struct bcma_device *cc;
+ u32 w;
struct si_info *sii;
sii = (struct si_info *)sih;
-
- if (coreidx >= SI_MAXCORES)
- return 0;
-
- /*
- * If pci/pcie, we can get at pci/pcie regs
- * and on newer cores to chipc
- */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
- /* Chipc registers are mapped at 12KB */
- fast = true;
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_CCREGS_OFFSET + regoff);
- } else if (sii->pub.buscoreidx == coreidx) {
- /*
- * pci registers are at either in the last 2KB of
- * an 8KB window or, in pcie and pci rev 13 at 8KB
- */
- fast = true;
- if (SI_FAST(sii))
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
- else
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- ((regoff >= SBCONFIGOFF) ?
- PCI_BAR0_PCISBR_OFFSET :
- PCI_BAR0_PCIREGS_OFFSET) + regoff);
- }
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
-
- /* save current core index */
- origidx = ai_coreidx(&sii->pub);
-
- /* switch core */
- r = (u32 __iomem *) ((unsigned char __iomem *)
- ai_setcoreidx(&sii->pub, coreidx) + regoff);
- }
+ cc = sii->icbus->drv_cc.core;
/* mask and set */
if (mask || val) {
- w = (R_REG(r) & ~mask) | val;
- W_REG(r, w);
+ bcma_maskset32(cc, regoff, ~mask, val);
}
/* readback */
- w = R_REG(r);
-
- if (!fast) {
- /* restore core index */
- if (origidx != coreidx)
- ai_setcoreidx(&sii->pub, origidx);
-
- INTR_RESTORE(sii, intr_val);
- }
+ w = bcma_read32(cc, regoff);
return w;
}
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
- struct si_info *sii;
- u32 dummy;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- /* if core is already in reset, just return */
- if (R_REG(&ai->resetctrl) & AIRC_RESET)
- return;
-
- W_REG(&ai->ioctrl, bits);
- dummy = R_REG(&ai->ioctrl);
- udelay(10);
-
- W_REG(&ai->resetctrl, AIRC_RESET);
- udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 dummy;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- /*
- * Must do the disable sequence first to work
- * for arbitrary current core state.
- */
- ai_core_disable(sih, (bits | resetbits));
-
- /*
- * Now do the initialization sequence.
- */
- W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- W_REG(&ai->resetctrl, 0);
- udelay(1);
-
- W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- udelay(1);
-}
-
/* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
+static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
{
- struct chipcregs __iomem *cc;
+ struct si_info *sii;
u32 val;
- if (sii->pub.ccrev < 6) {
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+ sii = (struct si_info *)sih;
+ if (ai_get_ccrev(&sii->pub) < 6) {
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
&val);
if (val & PCI_CFG_GPIO_SCS)
return SCC_SS_PCI;
return SCC_SS_XTAL;
- } else if (sii->pub.ccrev < 10) {
- cc = (struct chipcregs __iomem *)
- ai_setcoreidx(&sii->pub, sii->curidx);
- return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+ } else if (ai_get_ccrev(&sii->pub) < 10) {
+ return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_SS_MASK;
} else /* Insta-clock */
return SCC_SS_XTAL;
}
@@ -1503,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii)
* return the ILP (slowclock) min or max frequency
* precondition: we've established the chip has dynamic clk control
*/
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
- struct chipcregs __iomem *cc)
+static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
+ struct bcma_device *cc)
{
u32 slowclk;
uint div;
- slowclk = ai_slowclk_src(sii);
- if (sii->pub.ccrev < 6) {
+ slowclk = ai_slowclk_src(sih, cc);
+ if (ai_get_ccrev(sih) < 6) {
if (slowclk == SCC_SS_PCI)
return max_freq ? (PCIMAXFREQ / 64)
: (PCIMINFREQ / 64);
else
return max_freq ? (XTALMAXFREQ / 32)
: (XTALMINFREQ / 32);
- } else if (sii->pub.ccrev < 10) {
+ } else if (ai_get_ccrev(sih) < 10) {
div = 4 *
- (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
- SCC_CD_SHIFT) + 1);
+ (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
if (slowclk == SCC_SS_LPO)
return max_freq ? LPOMAXFREQ : LPOMINFREQ;
else if (slowclk == SCC_SS_XTAL)
@@ -1531,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
: (PCIMINFREQ / div);
} else {
/* Chipc rev 10 is InstaClock */
- div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
- div = 4 * (div + 1);
+ div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+ div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
}
return 0;
}
static void
-ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
{
uint slowmaxfreq, pll_delay, slowclk;
uint pll_on_delay, fref_sel_delay;
@@ -1552,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
* powered down by dynamic clk control logic.
*/
- slowclk = ai_slowclk_src(sii);
+ slowclk = ai_slowclk_src(sih, cc);
if (slowclk != SCC_SS_XTAL)
pll_delay += XTAL_ON_DELAY;
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq =
- ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+ ai_slowclk_freq(sih,
+ (ai_get_ccrev(sih) >= 10) ? false : true, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
- W_REG(&cc->pll_on_delay, pll_on_delay);
- W_REG(&cc->fref_sel_delay, fref_sel_delay);
+ bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
+ bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
}
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
- struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
- bool fast;
+ struct bcma_device *cc;
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
- sii = (struct si_info *)sih;
- fast = SI_FAST(sii);
- if (!fast) {
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- return;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- return;
- }
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ if (cc == NULL)
+ return;
/* set all Instaclk chip ILP to 1 MHz */
- if (sih->ccrev >= 10)
- SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+ if (ai_get_ccrev(sih) >= 10)
+ bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
- ai_clkctl_setdelay(sii, cc);
-
- if (!fast)
- ai_setcoreidx(sih, origidx);
+ ai_clkctl_setdelay(sih, cc);
}
/*
@@ -1610,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih)
u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
{
struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint slowminfreq;
u16 fpdelay;
- uint intr_val = 0;
- bool fast;
sii = (struct si_info *)sih;
- if (sih->cccaps & CC_CAP_PMU) {
- INTR_OFF(sii, intr_val);
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
- INTR_RESTORE(sii, intr_val);
return fpdelay;
}
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return 0;
- fast = SI_FAST(sii);
fpdelay = 0;
- if (!fast) {
- origidx = sii->curidx;
- INTR_OFF(sii, intr_val);
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- goto done;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
-
- slowminfreq = ai_slowclk_freq(sii, false, cc);
- fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
- (slowminfreq - 1)) / slowminfreq;
-
- done:
- if (!fast) {
- ai_setcoreidx(sih, origidx);
- INTR_RESTORE(sii, intr_val);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
+ if (cc) {
+ slowminfreq = ai_slowclk_freq(sih, false, cc);
+ fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
+ * 1000000) + (slowminfreq - 1)) / slowminfreq;
}
return fpdelay;
}
@@ -1664,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
sii = (struct si_info *)sih;
/* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sii))
+ if (PCIE(sih))
return -1;
- pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
/*
* Avoid glitching the clock if GPRS is already using it.
@@ -1690,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out |= PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
udelay(XTAL_ON_DELAY);
}
@@ -1700,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* turn pll on */
if (what & PLL) {
out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
mdelay(2);
}
@@ -1709,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out &= ~PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
}
@@ -1721,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* clk control mechanism through chipcommon, no policy checking */
static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
{
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
u32 scc;
- uint intr_val = 0;
- bool fast = SI_FAST(sii);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sii->pub.ccrev < 6)
+ if (ai_get_ccrev(&sii->pub) < 6)
return false;
- if (!fast) {
- INTR_OFF(sii, intr_val);
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(&sii->pub, CC_CORE_ID, 0);
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
- goto done;
+ if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
+ (ai_get_ccrev(&sii->pub) < 20))
+ return mode == CLK_FAST;
switch (mode) {
case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (sii->pub.ccrev < 10) {
+ if (ai_get_ccrev(&sii->pub) < 10) {
/*
* don't forget to force xtal back
* on before we clear SCC_DYN_XTAL..
*/
ai_clkctl_xtal(&sii->pub, XTAL, ON);
- SET_REG(&cc->slow_clk_ctl,
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (sii->pub.ccrev < 20) {
- OR_REG(&cc->system_clk_ctl, SYCC_HR);
+ bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
+ (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
+ bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
} else {
- OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+ bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
}
/* wait for the PLL */
- if (sii->pub.cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
- == 0), PMU_MAX_TRANSITION_DLY);
+ SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
+ htavail) == 0), PMU_MAX_TRANSITION_DLY);
} else {
udelay(PLL_DELAY);
}
break;
case CLK_DYNAMIC: /* enable dynamic clock control */
- if (sii->pub.ccrev < 10) {
- scc = R_REG(&cc->slow_clk_ctl);
+ if (ai_get_ccrev(&sii->pub) < 10) {
+ scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
scc &= ~(SCC_FS | SCC_IP | SCC_XC);
if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
scc |= SCC_XC;
- W_REG(&cc->slow_clk_ctl, scc);
+ bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
/*
* for dynamic control, we have to
@@ -1785,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
*/
if (scc & SCC_XC)
ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (sii->pub.ccrev < 20) {
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
/* Instaclock */
- AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+ bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
} else {
- AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+ bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
}
break;
@@ -1797,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
break;
}
- done:
- if (!fast) {
- ai_setcoreidx(&sii->pub, origidx);
- INTR_RESTORE(sii, intr_val);
- }
return mode == CLK_FAST;
}
@@ -1820,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode)
sii = (struct si_info *)sih;
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sih->ccrev < 6)
+ if (ai_get_ccrev(sih) < 6)
return false;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
return mode == CLK_FAST;
return _ai_clkctl_cc(sii, mode);
}
-/* Build device path */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
- int slen;
-
- if (!path || size <= 0)
- return -1;
-
- slen = snprintf(path, (size_t) size, "pci/%u/%u/",
- ((struct si_info *)sih)->pbus->bus->number,
- PCI_SLOT(((struct pci_dev *)
- (((struct si_info *)(sih))->pbus))->devfn));
-
- if (slen < 0 || slen >= size) {
- path[0] = '\0';
- return -1;
- }
-
- return 0;
-}
-
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
sii = (struct si_info *)sih;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_FAST);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_up(sii->pch, SI_PCIUP);
}
@@ -1882,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih)
sii = (struct si_info *)sih;
/* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_DYNAMIC);
pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1895,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih)
void ai_pci_setup(struct si_pub *sih, uint coremask)
{
struct si_info *sii;
- struct sbpciregs __iomem *regs = NULL;
- u32 siflag = 0, w;
- uint idx = 0;
+ u32 w;
sii = (struct si_info *)sih;
- if (PCI(sii)) {
- /* get current core index */
- idx = sii->curidx;
-
- /* we interrupt on this backplane flag number */
- siflag = ai_flag(sih);
-
- /* switch over to pci core */
- regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
- }
-
/*
* Enable sb->pci interrupts. Assume
* PCI rev 2.3 support was added in pci core rev 6 and things changed..
*/
- if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
/* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+ pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
- } else {
- /* set sbintvec bit for our flag number */
- ai_setint(sih, siflag);
+ pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
}
- if (PCI(sii)) {
- pcicore_pci_setup(sii->pch, regs);
-
- /* switch back to previous core */
- ai_setcoreidx(sih, idx);
+ if (PCI(sih)) {
+ pcicore_pci_setup(sii->pch);
}
}
@@ -1940,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
*/
int ai_pci_fixcfg(struct si_pub *sih)
{
- uint origidx;
- void __iomem *regs = NULL;
struct si_info *sii = (struct si_info *)sih;
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* save the current index */
- origidx = ai_coreidx(&sii->pub);
-
/* check 'pi' is correct and fix it if not */
- regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
- if (sii->pub.buscoretype == PCIE_CORE_ID)
- pcicore_fixcfg_pcie(sii->pch,
- (struct sbpcieregs __iomem *)regs);
- else if (sii->pub.buscoretype == PCI_CORE_ID)
- pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
-
- /* restore the original index */
- ai_setcoreidx(&sii->pub, origidx);
-
+ pcicore_fixcfg(sii->pch);
pcicore_hwup(sii->pch);
return 0;
}
@@ -1969,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
uint regoff;
regoff = offsetof(struct chipcregs, gpiocontrol);
- return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+ return ai_cc_reg(sih, regoff, mask, val);
}
void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
u32 val;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-
- val = R_REG(&cc->chipcontrol);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
if (on) {
- if (sih->chippkg == 9 || sih->chippkg == 0xb)
+ if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
/* Ext PA Controls for 4331 12x9 Package */
- W_REG(&cc->chipcontrol, val |
- CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN |
+ CCTRL4331_EXTPA_ON_GPIO2_5);
else
/* Ext PA Controls for 4331 12x12 Package */
- W_REG(&cc->chipcontrol,
- val | CCTRL4331_EXTPA_EN);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN);
} else {
val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- W_REG(&cc->chipcontrol, val);
+ bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
+ ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
}
-
- ai_setcoreidx(sih, origidx);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = ai_setcore(sih, CC_CORE_ID, 0);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
/* EPA Fix */
- W_REG(&cc->gpiocontrol,
- R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
- ai_setcoreidx(sih, origidx);
+ bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
}
/* check if the device is removed */
@@ -2031,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih)
sii = (struct si_info *)sih;
- pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
return true;
@@ -2040,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih)
bool ai_is_sprom_available(struct si_pub *sih)
{
- if (sih->ccrev >= 31) {
- struct si_info *sii;
- uint origidx;
- struct chipcregs __iomem *cc;
+ struct si_info *sii = (struct si_info *)sih;
+
+ if (ai_get_ccrev(sih) >= 31) {
+ struct bcma_device *cc;
u32 sromctrl;
- if ((sih->cccaps & CC_CAP_SROM) == 0)
+ if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
return false;
- sii = (struct si_info *)sih;
- origidx = sii->curidx;
- cc = ai_setcoreidx(sih, SI_CC_IDX);
- sromctrl = R_REG(&cc->sromcontrol);
- ai_setcoreidx(sih, origidx);
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
return sromctrl & SRC_PRESENT;
}
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
default:
return true;
}
@@ -2067,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih)
bool ai_is_otp_disabled(struct si_pub *sih)
{
- switch (sih->chip) {
+ struct si_info *sii = (struct si_info *)sih;
+
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+ return (sii->chipst & CST4313_OTP_PRESENT) == 0;
/* These chips always have their OTP on */
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
@@ -2077,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih)
return false;
}
}
+
+uint ai_get_buscoretype(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.id;
+}
+
+uint ai_get_buscorerev(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.rev;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 106a7424a7cd..f84c6f781692 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -17,6 +17,8 @@
#ifndef _BRCM_AIUTILS_H_
#define _BRCM_AIUTILS_H_
+#include <linux/bcma/bcma.h>
+
#include "types.h"
/*
@@ -38,88 +40,12 @@
/* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
#define SI_PCIE_DMA_H32 0x80000000
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
- * maps all unused address ranges
- */
-
/* chipcommon being the first core: */
#define SI_CC_IDX 0
/* SOC Interconnect types (aka chip types) */
#define SOCI_AI 1
-/* Common core control flags */
-#define SICF_BIST_EN 0x8000
-#define SICF_PME_EN 0x4000
-#define SICF_CORE_BITS 0x3ffc
-#define SICF_FGC 0x0002
-#define SICF_CLOCK_EN 0x0001
-
-/* Common core status flags */
-#define SISF_BIST_DONE 0x8000
-#define SISF_BIST_ERROR 0x4000
-#define SISF_GATED_CLK 0x2000
-#define SISF_DMA64 0x1000
-#define SISF_CORE_BITS 0x0fff
-
/* A register that is common to all cores to
* communicate w/PMU regarding clock control.
*/
@@ -220,26 +146,15 @@
* public (read-only) portion of aiutils handle returned by si_attach()
*/
struct si_pub {
- uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
- uint buscorerev; /* buscore rev */
- uint buscoreidx; /* buscore index */
int ccrev; /* chip common core rev */
u32 cccaps; /* chip common capabilities */
- u32 cccaps_ext; /* chip common capabilities extension */
int pmurev; /* pmu core rev */
u32 pmucaps; /* pmu capabilities */
uint boardtype; /* board type */
uint boardvendor; /* board vendor */
- uint boardflags; /* board flags */
- uint boardflags2; /* board flags2 */
uint chip; /* chip number */
uint chiprev; /* chip revision */
uint chippkg; /* chip package option */
- u32 chipst; /* chip status */
- bool issim; /* chip is in simulation or emulation */
- uint socirev; /* SOC interconnect rev */
- bool pci_pr32414;
-
};
struct pci_dev;
@@ -255,38 +170,13 @@ struct gpioh_item {
/* misc si info needed by some of the routines */
struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
- struct pci_dev *pbus; /* handle to pci bus */
- uint dev_coreid; /* the core provides driver functions */
- void *intr_arg; /* interrupt callback function arg */
- u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
- /* restore chip interrupts */
- void (*intrsrestore_fn) (void *intr_arg, u32 arg);
- /* check if interrupts are enabled */
- bool (*intrsenabled_fn) (void *intr_arg);
-
+ struct bcma_bus *icbus; /* handle to soc interconnect bus */
+ struct pci_dev *pcibus; /* handle to pci bus */
struct pcicore_info *pch; /* PCI/E core handle */
-
+ struct bcma_device *buscore;
struct list_head var_list; /* list of srom variables */
- void __iomem *curmap; /* current regs va */
- void __iomem *regs[SI_MAXCORES]; /* other regs va */
-
- uint curidx; /* current core index */
- uint numcores; /* # discovered cores */
- uint coreid[SI_MAXCORES]; /* id of each core */
- u32 coresba[SI_MAXCORES]; /* backplane address of each core */
- void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
- u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
- u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
- u32 coresba2_size[SI_MAXCORES]; /* second address space size */
-
- void *curwrap; /* current wrapper va */
- void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
- u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
-
- u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
- u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
- u32 oob_router; /* oob router registers for axi */
+ u32 chipst; /* chip status */
};
/*
@@ -299,52 +189,15 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+extern struct bcma_device *ai_findcore(struct si_pub *sih,
+ u16 coreid, u16 coreunit);
+extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih);
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
- uint *origidx, uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
+extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -359,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
/* SPROM availability */
extern bool ai_is_sprom_available(struct si_pub *sih);
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-
extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
@@ -375,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
+extern uint ai_get_buscoretype(struct si_pub *sih);
+extern uint ai_get_buscorerev(struct si_pub *sih);
+
+static inline int ai_get_ccrev(struct si_pub *sih)
+{
+ return sih->ccrev;
+}
+
+static inline u32 ai_get_cccaps(struct si_pub *sih)
+{
+ return sih->cccaps;
+}
+
+static inline int ai_get_pmurev(struct si_pub *sih)
+{
+ return sih->pmurev;
+}
+
+static inline u32 ai_get_pmucaps(struct si_pub *sih)
+{
+ return sih->pmucaps;
+}
+
+static inline uint ai_get_boardtype(struct si_pub *sih)
+{
+ return sih->boardtype;
+}
+
+static inline uint ai_get_boardvendor(struct si_pub *sih)
+{
+ return sih->boardvendor;
+}
+
+static inline uint ai_get_chip_id(struct si_pub *sih)
+{
+ return sih->chip;
+}
+
+static inline uint ai_get_chiprev(struct si_pub *sih)
+{
+ return sih->chiprev;
+}
+
+static inline uint ai_get_chippkg(struct si_pub *sih)
+{
+ return sih->chippkg;
+}
+
#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 7f27dbdb6b60..90911eec0cf5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
len = roundup(len, 4);
ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
- dma_len += (u16) brcmu_pkttotlen(p);
+ dma_len += (u16) p->len;
BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
" seg_cnt %d null delim %d\n",
@@ -741,9 +741,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
-
- plen = brcmu_pkttotlen(p) +
- AMPDU_MAX_MPDU_OVERHEAD;
+ plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
plen = max(scb_ampdu->min_len, plen);
if ((plen + ampdu_len) > max_ampdu_bytes) {
@@ -1120,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */
- while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+ s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
+ while ((s1 & TXS_V) == 0) {
udelay(1);
status_delay++;
if (status_delay > 10)
return; /* error condition */
+ s1 = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(frmtxstatus));
}
- s2 = R_REG(&wlc->regs->frmtxstatus2);
+ s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
}
if (scb) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 89ad1b7dab8f..55e9f45fce22 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -1153,121 +1153,6 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
&txpwr);
}
-#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
-{
- int i;
- char buf[80];
- char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
-
- sprintf(buf, "CCK ");
- for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- printk(KERN_DEBUG "MCS32 %2d%s\n",
- txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
-}
-#endif /* POWER_DBG */
-
void
brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
@@ -1478,9 +1363,6 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
}
-#ifdef POWER_DBG
- wlc_phy_txpower_limits_dump(txpwr);
-#endif
return;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index ed51616abc85..1948cb2771e9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -430,6 +430,9 @@ struct d11regs {
u16 PAD[0x380]; /* 0x800 - 0xEFE */
};
+/* d11 register field offset */
+#define D11REGOFFS(field) offsetof(struct d11regs, field)
+
#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
/* biststatus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 6ebec8f42846..2e90a9a16ed6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -13,8 +13,10 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
-#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -22,6 +24,14 @@
#include <aiutils.h>
#include "types.h"
#include "dma.h"
+#include "soc.h"
+
+/*
+ * dma register field offset calculation
+ */
+#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
+#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
+#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
/*
* DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
@@ -168,26 +178,25 @@
/* debug/trace */
#ifdef BCMDBG
-#define DMA_ERROR(args) \
- do { \
- if (!(*di->msg_level & 1)) \
- ; \
- else \
- printk args; \
- } while (0)
-#define DMA_TRACE(args) \
- do { \
- if (!(*di->msg_level & 2)) \
- ; \
- else \
- printk args; \
- } while (0)
+#define DMA_ERROR(fmt, ...) \
+do { \
+ if (*di->msg_level & 1) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#define DMA_TRACE(fmt, ...) \
+do { \
+ if (*di->msg_level & 2) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
#else
-#define DMA_ERROR(args)
-#define DMA_TRACE(args)
+#define DMA_ERROR(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#define DMA_TRACE(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
-#define DMA_NONE(args)
+#define DMA_NONE(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#define MAXNAMEL 8 /* 8 char names */
@@ -218,15 +227,16 @@ struct dma_info {
uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
- struct pci_dev *pbus; /* bus handle */
+ struct bcma_device *core;
+ struct device *dmadev;
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
/* 64-bit dma tx engine registers */
- struct dma64regs __iomem *d64txregs;
+ uint d64txregbase;
/* 64-bit dma rx engine registers */
- struct dma64regs __iomem *d64rxregs;
+ uint d64rxregbase;
/* pointer to dma64 tx descriptor ring */
struct dma64desc *txd64;
/* pointer to dma64 rx descriptor ring */
@@ -361,7 +371,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
uint dmactrlflags;
if (di == NULL) {
- DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
+ DMA_ERROR("NULL dma handle\n");
return 0;
}
@@ -373,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
if (dmactrlflags & DMA_CTRL_PEN) {
u32 control;
- control = R_REG(&di->d64txregs->control);
- W_REG(&di->d64txregs->control,
+ control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
control | D64_XC_PD);
- if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+ if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
+ D64_XC_PD)
/* We *can* disable it so it is supported,
* restore control register
*/
- W_REG(&di->d64txregs->control,
- control);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
+ control);
else
/* Not supported, don't allow it to be enabled */
dmactrlflags &= ~DMA_CTRL_PEN;
@@ -392,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
return dmactrlflags;
}
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
{
u32 w;
- OR_REG(&dma64regs->control, D64_XC_AE);
- w = R_REG(&dma64regs->control);
- AND_REG(&dma64regs->control, ~D64_XC_AE);
+ bcma_set32(di->core, ctrl_offset, D64_XC_AE);
+ w = bcma_read32(di->core, ctrl_offset);
+ bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
return (w & D64_XC_AE) == D64_XC_AE;
}
@@ -410,15 +421,15 @@ static bool _dma_isaddrext(struct dma_info *di)
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
/* not all tx or rx channel are available */
- if (di->d64txregs != NULL) {
- if (!_dma64_addrext(di->d64txregs))
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
- "AE set\n", di->name));
+ if (di->d64txregbase != 0) {
+ if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
+ DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
+ di->name);
return true;
- } else if (di->d64rxregs != NULL) {
- if (!_dma64_addrext(di->d64rxregs))
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
- "AE set\n", di->name));
+ } else if (di->d64rxregbase != 0) {
+ if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
+ DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
+ di->name);
return true;
}
@@ -430,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
u32 addrl;
/* Check to see if the descriptors need to be aligned on 4K/8K or not */
- if (di->d64txregs != NULL) {
- W_REG(&di->d64txregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64txregs->addrlow);
+ if (di->d64txregbase != 0) {
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
- } else if (di->d64rxregs != NULL) {
- W_REG(&di->d64rxregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64rxregs->addrlow);
+ } else if (di->d64rxregbase != 0) {
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
}
@@ -448,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
* Descriptor table must start at the DMA hardware dictated alignment, so
* allocated memory must be large enough to support this requirement.
*/
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
u16 align_bits, uint *alloced,
dma_addr_t *pap)
{
@@ -458,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
size += align;
*alloced = size;
}
- return pci_alloc_consistent(pdev, size, pap);
+ return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
}
static
@@ -484,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u32 desc_strtaddr;
u32 alignbytes = 1 << *alignbits;
- va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+ va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
if (NULL == va)
return NULL;
@@ -493,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
& boundary)) {
*alignbits = dma_align_sizetobits(size);
- pci_free_consistent(di->pbus, size, va, *descpa);
- va = dma_alloc_consistent(di->pbus, size, *alignbits,
+ dma_free_coherent(di->dmadev, size, va, *descpa);
+ va = dma_alloc_consistent(di, size, *alignbits,
alloced, descpa);
}
return va;
@@ -519,8 +530,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
- " failed\n", di->name));
+ DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -533,8 +544,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
- " failed\n", di->name));
+ DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -554,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
}
struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
+ struct bcma_device *core,
+ uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level)
{
struct dma_info *di;
+ u8 rev = core->id.rev;
uint size;
/* allocate private info structure */
@@ -570,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->msg_level = msg_level ? msg_level : &dma_msg_level;
- di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+ di->dma64 =
+ ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
- /* init dma reg pointer */
- di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
- di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+ /* init dma reg info */
+ di->core = core;
+ di->d64txregbase = txregbase;
+ di->d64rxregbase = rxregbase;
/*
* Default flags (which can be changed by the driver calling
@@ -583,17 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
- DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
- "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
- "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
- di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+ DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "txregbase %u rxregbase %u\n", name, "DMA64",
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
di->name[MAXNAMEL - 1] = '\0';
- di->pbus = ((struct si_info *)sih)->pbus;
+ di->dmadev = core->dma_dev;
/* save tunables */
di->ntxd = (u16) ntxd;
@@ -625,12 +639,12 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((ai_coreid(sih) == SDIOD_CORE_ID)
- && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
- di->addrext = 0;
- else if ((ai_coreid(sih) == I2S_CORE_ID) &&
- ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
- di->addrext = 0;
+ if ((core->id.id == SDIOD_CORE_ID)
+ && ((rev > 0) && (rev <= 2)))
+ di->addrext = false;
+ else if ((core->id.id == I2S_CORE_ID) &&
+ ((rev == 0) || (rev == 1)))
+ di->addrext = false;
else
di->addrext = _dma_isaddrext(di);
@@ -645,8 +659,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */
}
- DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
- di->aligndesc_4k, di->dmadesc_align));
+ DMA_NONE("DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */
if (ntxd) {
@@ -684,21 +698,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
- "supported\n", di->name, (u32)di->txdpa));
+ DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->txdpa);
goto fail;
}
if (di->rxdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
- "supported\n", di->name, (u32)di->rxdpa));
+ DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->rxdpa);
goto fail;
}
}
- DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
- "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
- di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
- di->addrext));
+ DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+ di->ddoffsetlow, di->ddoffsethigh,
+ di->dataoffsetlow, di->dataoffsethigh,
+ di->addrext);
return (struct dma_pub *) di;
@@ -744,17 +758,17 @@ void dma_detach(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_detach\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
/* free dma descriptor rings */
if (di->txd64)
- pci_free_consistent(di->pbus, di->txdalloc,
- ((s8 *)di->txd64 - di->txdalign),
- (di->txdpaorig));
+ dma_free_coherent(di->dmadev, di->txdalloc,
+ ((s8 *)di->txd64 - di->txdalign),
+ (di->txdpaorig));
if (di->rxd64)
- pci_free_consistent(di->pbus, di->rxdalloc,
- ((s8 *)di->rxd64 - di->rxdalign),
- (di->rxdpaorig));
+ dma_free_coherent(di->dmadev, di->rxdalloc,
+ ((s8 *)di->rxd64 - di->rxdalign),
+ (di->rxdpaorig));
/* free packet pointer vectors */
kfree(di->txp);
@@ -779,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
if ((di->ddoffsetlow == 0)
|| !(pa & PCI32ADDR_HIGH)) {
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
}
} else {
/* DMA64 32bits address extension */
@@ -794,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
pa &= ~PCI32ADDR_HIGH;
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64txregs->control,
- D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64rxregs->control,
- D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
}
}
}
@@ -812,11 +834,11 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
- DMA_TRACE(("%s: dma_rxenable\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
- control =
- (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
- D64_RC_RE;
+ control = D64_RC_RE | (bcma_read32(di->core,
+ DMA64RXREGOFFS(di, control)) &
+ D64_RC_AE);
if ((dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_RC_PD;
@@ -824,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
if (dmactrlflags & DMA_CTRL_ROC)
control |= D64_RC_OC;
- W_REG(&di->d64rxregs->control,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control),
((di->rxoffset << D64_RC_RO_SHIFT) | control));
}
@@ -832,7 +854,7 @@ void dma_rxinit(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_rxinit\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return;
@@ -867,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
return NULL;
curr =
- B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
/* ignore curr if forceall */
@@ -881,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
/* clear this packet from the descriptor ring */
- pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -901,7 +924,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
/*
* !! rx entry routine
- * returns a pointer to the next frame received, or NULL if there are no more
+ * returns the number packages in the next frame, or 0 if there are no more
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
* supported with pkts chain
* otherwise, it's treated as giant pkt and will be tossed.
@@ -909,74 +932,83 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
* buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header
*/
-struct sk_buff *dma_rx(struct dma_pub *pub)
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *head, *tail;
+ struct sk_buff_head dma_frames;
+ struct sk_buff *p, *next;
uint len;
uint pkt_len;
int resid = 0;
+ int pktcnt = 1;
+ skb_queue_head_init(&dma_frames);
next_frame:
- head = _dma_getnextrxp(di, false);
- if (head == NULL)
- return NULL;
+ p = _dma_getnextrxp(di, false);
+ if (p == NULL)
+ return 0;
- len = le16_to_cpu(*(__le16 *) (head->data));
- DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
- dma_spin_for_len(len, head);
+ len = le16_to_cpu(*(__le16 *) (p->data));
+ DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+ dma_spin_for_len(len, p);
/* set actual length */
pkt_len = min((di->rxoffset + len), di->rxbufsize);
- __skb_trim(head, pkt_len);
+ __skb_trim(p, pkt_len);
+ skb_queue_tail(&dma_frames, p);
resid = len - (di->rxbufsize - di->rxoffset);
/* check for single or multi-buffer rx */
if (resid > 0) {
- tail = head;
while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
- tail->next = p;
pkt_len = min_t(uint, resid, di->rxbufsize);
__skb_trim(p, pkt_len);
-
- tail = p;
+ skb_queue_tail(&dma_frames, p);
resid -= di->rxbufsize;
+ pktcnt++;
}
#ifdef BCMDBG
if (resid > 0) {
uint cur;
cur =
- B2I(((R_REG(&di->d64rxregs->status0) &
- D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK,
- struct dma64desc);
- DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur));
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_CD_MASK) - di->rcvptrbase) &
+ D64_RS0_CD_MASK, struct dma64desc);
+ DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur);
}
#endif /* BCMDBG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
- DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
- di->name, len));
- brcmu_pkt_buf_free_skb(head);
+ DMA_ERROR("%s: bad frame length (%d)\n",
+ di->name, len);
+ skb_queue_walk_safe(&dma_frames, p, next) {
+ skb_unlink(p, &dma_frames);
+ brcmu_pkt_buf_free_skb(p);
+ }
di->dma.rxgiants++;
+ pktcnt = 1;
goto next_frame;
}
}
- return head;
+ skb_queue_splice_tail(&dma_frames, skb_list);
+ return pktcnt;
}
static bool dma64_rxidle(struct dma_info *di)
{
- DMA_TRACE(("%s: dma_rxidle\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return true;
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
- (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+ return ((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
+ D64_RS0_CD_MASK));
}
/*
@@ -1010,7 +1042,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout);
- DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+ DMA_TRACE("%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom;
@@ -1023,11 +1055,9 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
- DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
- di->name));
+ DMA_ERROR("%s: out of rxbufs\n", di->name);
if (i == 0 && dma64_rxidle(di)) {
- DMA_ERROR(("%s: rxfill64: ring is empty !\n",
- di->name));
+ DMA_ERROR("%s: ring is empty !\n", di->name);
ring_empty = true;
}
di->dma.rxnobuf++;
@@ -1042,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
*/
*(u32 *) (p->data) = 0;
- pa = pci_map_single(di->pbus, p->data,
- di->rxbufsize, PCI_DMA_FROMDEVICE);
+ pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+ DMA_FROM_DEVICE);
/* save the free packet pointer */
di->rxp[rxout] = p;
@@ -1061,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
di->rxout = rxout;
/* update the chip lastdscr pointer */
- W_REG(&di->d64rxregs->ptr,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
di->rcvptrbase + I2B(rxout, struct dma64desc));
return ring_empty;
@@ -1072,7 +1102,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p);
@@ -1103,7 +1133,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE;
- DMA_TRACE(("%s: dma_txinit\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1122,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_XC_PD;
- OR_REG(&di->d64txregs->control, control);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
/* DMA engine with alignment requirement requires table to be inited
* before enabling the engine
@@ -1135,24 +1165,24 @@ void dma_txsuspend(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
- OR_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
}
void dma_txresume(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_txresume\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
- AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+ bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
}
bool dma_txsuspended(struct dma_pub *pub)
@@ -1160,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
return (di->ntxd == 0) ||
- ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
- D64_XC_SE);
+ ((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
+ D64_XC_SE);
}
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1169,11 +1200,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
+ DMA_TRACE("%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->txin == di->txout)
return;
@@ -1194,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
return true;
/* suspend tx DMA first */
- W_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
- && (status != D64_XS0_XS_STOPPED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
+ (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
+ 10000);
- W_REG(&di->d64txregs->control, 0);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
/* wait for the last transaction to complete */
udelay(300);
@@ -1219,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
if (di->nrxd == 0)
return true;
- W_REG(&di->d64rxregs->control, 0);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
- != D64_RS0_RS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
return status == D64_RS0_RS_DISABLED;
}
@@ -1233,72 +1265,58 @@ bool dma_rxreset(struct dma_pub *pub)
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
u32 flags = 0;
dma_addr_t pa;
- DMA_TRACE(("%s: dma_txfast\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
txout = di->txout;
/*
- * Walk the chain of packet buffers
- * allocating and initializing transmit descriptor entries.
+ * obtain and initialize transmit descriptor entry.
*/
- for (p = p0; p; p = next) {
- data = p->data;
- len = p->len;
- next = p->next;
+ data = p->data;
+ len = p->len;
- /* return nonzero if out of tx descriptors */
- if (nexttxd(di, txout) == di->txin)
- goto outoftxd;
+ /* no use to transmit a zero length packet */
+ if (len == 0)
+ return 0;
- if (len == 0)
- continue;
+ /* return nonzero if out of tx descriptors */
+ if (nexttxd(di, txout) == di->txin)
+ goto outoftxd;
- /* get physical address of buffer start */
- pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+ /* get physical address of buffer start */
+ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
- flags = 0;
- if (p == p0)
- flags |= D64_CTRL1_SOF;
-
- /* With a DMA segment list, Descriptor table is filled
- * using the segment list instead of looping over
- * buffers in multi-chain DMA. Therefore, EOF for SGLIST
- * is when end of segment list is reached.
- */
- if (next == NULL)
- flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
- if (txout == (di->ntxd - 1))
- flags |= D64_CTRL1_EOT;
-
- dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+ * is when end of segment list is reached.
+ */
+ flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
- txout = nexttxd(di, txout);
- }
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
- /* if last txd eof not set, fix it */
- if (!(flags & D64_CTRL1_EOF))
- di->txd64[prevtxd(di, txout)].ctrl1 =
- cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+ txout = nexttxd(di, txout);
/* save the packet */
- di->txp[prevtxd(di, txout)] = p0;
+ di->txp[prevtxd(di, txout)] = p;
/* bump the tx descriptor index */
di->txout = txout;
/* kick the chip */
if (commit)
- W_REG(&di->d64txregs->ptr,
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(txout, struct dma64desc));
/* tx flow control */
@@ -1307,8 +1325,8 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
return 0;
outoftxd:
- DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
- brcmu_pkt_buf_free_skb(p0);
+ DMA_ERROR("%s: out of txds !!!\n", di->name);
+ brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
return -1;
@@ -1331,11 +1349,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc;
struct sk_buff *txp;
- DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
+ DMA_TRACE("%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->ntxd == 0)
return NULL;
@@ -1346,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
if (range == DMA_RANGE_ALL)
end = di->txout;
else {
- struct dma64regs __iomem *dregs = di->d64txregs;
-
- end = (u16) (B2I(((R_REG(&dregs->status0) &
- D64_XS0_CD_MASK) -
- di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc));
+ end = (u16) (B2I(((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_CD_MASK) - di->xmtptrbase) &
+ D64_XS0_CD_MASK, struct dma64desc));
if (range == DMA_RANGE_TRANSFERED) {
active_desc =
- (u16) (R_REG(&dregs->status1) &
+ (u16)(bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status1)) &
D64_XS1_AD_MASK);
active_desc =
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1384,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
txp = di->txp[i];
di->txp[i] = NULL;
- pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+ dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
}
di->txin = i;
@@ -1395,8 +1412,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
return txp;
bogus:
- DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
- "force %d\n", start, end, di->txout, forceall));
+ DMA_NONE("bogus curr: start %d end %d txout %d\n",
+ start, end, di->txout);
return NULL;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index ebc5bc546f3b..cc269ee5c499 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -18,6 +18,7 @@
#define _BRCM_DMA_H_
#include <linux/delay.h>
+#include <linux/skbuff.h>
#include "types.h" /* forward structure declarations */
/* map/unmap direction */
@@ -74,13 +75,14 @@ struct dma_pub {
};
extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level);
+ struct bcma_device *d11core,
+ uint txregbase, uint rxregbase,
+ uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub);
-struct sk_buff *dma_rx(struct dma_pub *pub);
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 0d8a9cdf897a..448ab9c4eb47 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -17,11 +17,11 @@
#define __UNDEF_NO_VERSION__
#include <linux/etherdevice.h>
-#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <net/mac80211.h>
#include <defs.h>
#include "nicpci.h"
@@ -40,10 +40,10 @@
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
- FIF_PLCPFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
- FIF_BCN_PRBRESP_PROMISC)
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
.band = IEEE80211_BAND_2GHZ, \
@@ -87,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
- {0}
-};
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+/* recognized BCMA Core IDs */
+static struct bcma_device_id brcms_coreid_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
#ifdef BCMDBG
static int msglevel = 0xdeadbeef;
@@ -216,8 +214,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
.ht_cap = {
/* from include/linux/ieee80211.h */
.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
+ IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -238,8 +235,7 @@ static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
BRCMS_LEGACY_5G_RATE_OFFSET,
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
+ IEEE80211_HT_CAP_SGI_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -287,6 +283,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
{
struct brcms_info *wl = hw->priv;
bool blocked;
+ int err;
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
@@ -295,57 +292,69 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- return 0;
+ spin_lock_bh(&wl->lock);
+ /* avoid acknowledging frames before a non-monitor device is added */
+ wl->mute_tx = true;
+
+ if (!wl->pub->up)
+ err = brcms_up(wl);
+ else
+ err = -ENODEV;
+ spin_unlock_bh(&wl->lock);
+
+ if (err != 0)
+ wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
+ err);
+ return err;
}
static void brcms_ops_stop(struct ieee80211_hw *hw)
{
+ struct brcms_info *wl = hw->priv;
+ int status;
+
ieee80211_stop_queues(hw);
+
+ if (wl->wlc == NULL)
+ return;
+
+ spin_lock_bh(&wl->lock);
+ status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
+ wl->wlc->hw->deviceid);
+ spin_unlock_bh(&wl->lock);
+ if (!status) {
+ wiphy_err(wl->wiphy,
+ "wl: brcms_ops_stop: chipmatch failed\n");
+ return;
+ }
+
+ /* put driver in down state */
+ spin_lock_bh(&wl->lock);
+ brcms_down(wl);
+ spin_unlock_bh(&wl->lock);
}
static int
brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct brcms_info *wl;
- int err;
+ struct brcms_info *wl = hw->priv;
/* Just STA for now */
- if (vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
- vif->type != NL80211_IFTYPE_ADHOC) {
+ if (vif->type != NL80211_IFTYPE_STATION) {
wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
" STA for now\n", __func__, vif->type);
return -EOPNOTSUPP;
}
- wl = hw->priv;
- spin_lock_bh(&wl->lock);
- if (!wl->pub->up)
- err = brcms_up(wl);
- else
- err = -ENODEV;
- spin_unlock_bh(&wl->lock);
-
- if (err != 0)
- wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
- err);
+ wl->mute_tx = false;
+ brcms_c_mute(wl->wlc, false);
- return err;
+ return 0;
}
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct brcms_info *wl;
-
- wl = hw->priv;
-
- /* put driver in down state */
- spin_lock_bh(&wl->lock);
- brcms_down(wl);
- spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -362,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+ wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
__func__, conf->flags & IEEE80211_CONF_MONITOR ?
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -539,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
+
if (changed_flags & FIF_PROMISC_IN_BSS)
- wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+ wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
- wiphy_err(wiphy, "FIF_ALLMULTI\n");
+ wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
- wiphy_err(wiphy, "FIF_FCSFAIL\n");
- if (changed_flags & FIF_PLCPFAIL)
- wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+ wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
if (changed_flags & FIF_CONTROL)
- wiphy_err(wiphy, "FIF_CONTROL\n");
+ wiphy_dbg(wiphy, "FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
- wiphy_err(wiphy, "FIF_OTHER_BSS\n");
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- spin_lock_bh(&wl->lock);
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
- wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
- brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
- } else {
- brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
- wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
- }
- spin_unlock_bh(&wl->lock);
- }
+ wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+ if (changed_flags & FIF_PSPOLL)
+ wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+ wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+ spin_lock_bh(&wl->lock);
+ brcms_c_mac_promisc(wl->wlc, *total_flags);
+ spin_unlock_bh(&wl->lock);
return;
}
@@ -609,13 +614,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wl->pub->global_ampdu->scb = scb;
wl->pub->global_ampdu->max_pdu = 16;
- sta->ht_cap.ht_supported = true;
- sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
- sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
-
/*
* minstrel_ht initiates addBA on our behalf by calling
* ieee80211_start_tx_ba_session()
@@ -724,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = {
};
/*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
{
@@ -864,56 +862,26 @@ static void brcms_free(struct brcms_info *wl)
#endif
kfree(t);
}
-
- /*
- * unregister_netdev() calls get_stats() which may read chip
- * registers so we cannot unmap the chip registers until
- * after calling unregister_netdev() .
- */
- if (wl->regsva)
- iounmap(wl->regsva);
-
- wl->regsva = NULL;
}
/*
-* called from both kernel as from this kernel module.
+* called from both kernel as from this kernel module (error flow on attach)
* precondition: perimeter lock is not acquired.
*/
-static void brcms_remove(struct pci_dev *pdev)
+static void brcms_remove(struct bcma_device *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int status;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
- return;
- }
+ struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+ struct brcms_info *wl = hw->priv;
- spin_lock_bh(&wl->lock);
- status = brcms_c_chipmatch(pdev->vendor, pdev->device);
- spin_unlock_bh(&wl->lock);
- if (!status) {
- wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
- "failed\n");
- return;
- }
if (wl->wlc) {
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
ieee80211_unregister_hw(hw);
- spin_lock_bh(&wl->lock);
- brcms_down(wl);
- spin_unlock_bh(&wl->lock);
}
- pci_disable_device(pdev);
brcms_free(wl);
- pci_set_drvdata(pdev, NULL);
+ bcma_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
}
@@ -1021,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* it as static.
*
*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
- resource_size_t regs,
- struct pci_dev *btparam, uint irq)
+static struct brcms_info *brcms_attach(struct bcma_device *pdev)
{
struct brcms_info *wl = NULL;
int unit, err;
@@ -1039,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
return NULL;
/* allocate private info */
- hw = pci_get_drvdata(btparam); /* btparam == pdev */
+ hw = bcma_get_drvdata(pdev);
if (hw != NULL)
wl = hw->priv;
if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1051,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
- wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
- if (wl->regsva == NULL) {
- wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
- goto fail;
- }
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (brcms_request_fw(wl, btparam) < 0) {
+ if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
brcms_release_fw(wl);
- brcms_remove(btparam);
+ brcms_remove(pdev);
return NULL;
}
/* common load-time initialization */
- wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
- wl->regsva, btparam, &err);
+ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
brcms_release_fw(wl);
if (!wl->wlc) {
wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1081,15 +1041,13 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
wl->pub->ieee_hw = hw;
- /* disable mpc */
- brcms_c_set_radio_mpc(wl->wlc, false);
-
/* register our interrupt handler */
- if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+ if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+ IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
- wl->irq = irq;
+ wl->irq = pdev->bus->host_pci->irq;
/* register module */
brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1136,37 +1094,18 @@ fail:
*
* Perimeter lock is initialized in the course of this function.
*/
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
{
- int rc;
struct brcms_info *wl;
struct ieee80211_hw *hw;
- u32 val;
-
- dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq);
- if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
- ((pdev->device != 0x0576) &&
- ((pdev->device & 0xff00) != 0x4300) &&
- ((pdev->device & 0xff00) != 0x4700) &&
- ((pdev->device < 43000) || (pdev->device > 43999))))
- return -ENODEV;
+ dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+ pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+ pdev->bus->host_pci->irq);
- rc = pci_enable_device(pdev);
- if (rc) {
- pr_err("%s: Cannot enable device %d-%d_%d\n",
- __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
+ (pdev->id.id != BCMA_CORE_80211))
return -ENODEV;
- }
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
if (!hw) {
@@ -1176,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
+ bcma_set_drvdata(pdev, hw);
memset(hw->priv, 0, sizeof(*wl));
- wl = brcms_attach(pdev->vendor, pdev->device,
- pci_resource_start(pdev, 0), pdev,
- pdev->irq);
-
+ wl = brcms_attach(pdev);
if (!wl) {
pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
__func__);
@@ -1192,16 +1128,16 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_suspend(struct bcma_device *pdev)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
- hw = pci_get_drvdata(pdev);
+ hw = bcma_get_drvdata(pdev);
wl = hw->priv;
if (!wl) {
wiphy_err(wl->wiphy,
- "brcms_suspend: pci_get_drvdata failed\n");
+ "brcms_suspend: bcma_get_drvdata failed\n");
return -ENODEV;
}
@@ -1210,60 +1146,28 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
wl->pub->hw_up = false;
spin_unlock_bh(&wl->lock);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, PCI_D3hot);
+ pr_debug("brcms_suspend ok\n");
+
+ return 0;
}
-static int brcms_resume(struct pci_dev *pdev)
+static int brcms_resume(struct bcma_device *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int err = 0;
- u32 val;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- wiphy_err(wl->wiphy,
- "wl: brcms_resume: pci_get_drvdata failed\n");
- return -ENODEV;
- }
-
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- return err;
-
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
-
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
- /*
- * done. driver will be put in up state
- * in brcms_ops_add_interface() call.
- */
- return err;
+ pr_debug("brcms_resume ok\n");
+ return 0;
}
-static struct pci_driver brcms_pci_driver = {
+static struct bcma_driver brcms_bcma_driver = {
.name = KBUILD_MODNAME,
- .probe = brcms_pci_probe,
+ .probe = brcms_bcma_probe,
.suspend = brcms_suspend,
.resume = brcms_resume,
.remove = __devexit_p(brcms_remove),
- .id_table = brcms_pci_id_table,
+ .id_table = brcms_coreid_table,
};
/**
- * This is the main entry point for the WL driver.
+ * This is the main entry point for the brcmsmac driver.
*
* This function determines if a device pointed to by pdev is a WL device,
* and if so, performs a brcms_attach() on it.
@@ -1278,26 +1182,24 @@ static int __init brcms_module_init(void)
brcm_msg_level = msglevel;
#endif /* BCMDBG */
- error = pci_register_driver(&brcms_pci_driver);
+ error = bcma_driver_register(&brcms_bcma_driver);
+ printk(KERN_ERR "%s: register returned %d\n", __func__, error);
if (!error)
return 0;
-
-
return error;
}
/**
- * This function unloads the WL driver from the system.
+ * This function unloads the brcmsmac driver from the system.
*
- * This function unconditionally unloads the WL driver module from the
+ * This function unconditionally unloads the brcmsmac driver module from the
* system.
*
*/
static void __exit brcms_module_exit(void)
{
- pci_unregister_driver(&brcms_pci_driver);
-
+ bcma_driver_unregister(&brcms_bcma_driver);
}
module_init(brcms_module_init);
@@ -1319,8 +1221,7 @@ void brcms_init(struct brcms_info *wl)
{
BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
brcms_reset(wl);
-
- brcms_c_init(wl->wlc);
+ brcms_c_init(wl->wlc, wl->mute_tx);
}
/*
@@ -1332,11 +1233,19 @@ uint brcms_reset(struct brcms_info *wl)
brcms_c_reset(wl->wlc);
/* dpc will not be rescheduled */
- wl->resched = 0;
+ wl->resched = false;
return 0;
}
+void brcms_fatal_error(struct brcms_info *wl)
+{
+ wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+ wl->wlc->pub->unit);
+ brcms_reset(wl);
+ ieee80211_restart_hw(wl->pub->ieee_hw);
+}
+
/*
* These are interrupt on/off entry points. Disable interrupts
* during interrupt state transition.
@@ -1561,11 +1470,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kmalloc(len, GFP_ATOMIC);
+ *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
if (*pbuf == NULL)
goto fail;
- memcpy(*pbuf, pdata, len);
return 0;
}
}
@@ -1578,7 +1486,7 @@ fail:
}
/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1618,7 +1526,7 @@ void brcms_ucode_free_buf(void *p)
/*
* checks validity of all firmware images loaded from user space
*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_check_firmwares(struct brcms_info *wl)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 177f0e44e4b6..8f60419c37bf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,8 +68,6 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
- /* regsva for unmap in brcms_free() */
- void __iomem *regsva; /* opaque chip registers virtual address */
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -80,6 +78,7 @@ struct brcms_info {
struct brcms_firmware fw;
struct wiphy *wiphy;
struct brcms_ucode ucode;
+ bool mute_tx;
};
/* misc callbacks */
@@ -104,5 +103,6 @@ extern bool brcms_del_timer(struct brcms_timer *timer);
extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
+extern void brcms_fatal_error(struct brcms_info *wl);
#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 510e9bb52287..f7ed34034f88 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -30,44 +30,21 @@
#include "mac80211_if.h"
#include "ucode_loader.h"
#include "main.h"
+#include "soc.h"
/*
* Indication for txflowcontrol that all priority bits in
* TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
*/
-#define ALLPRIO -1
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+#define ALLPRIO -1
/* watchdog timer, in unit of ms */
-#define TIMER_INTERVAL_WATCHDOG 1000
+#define TIMER_INTERVAL_WATCHDOG 1000
/* radio monitor timer, in unit of ms */
-#define TIMER_INTERVAL_RADIOCHK 800
-
-/* Max MPC timeout, in unit of watchdog */
-#ifndef BRCMS_MPC_MAX_DELAYCNT
-#define BRCMS_MPC_MAX_DELAYCNT 10
-#endif
-
-/* Min MPC timeout, in unit of watchdog */
-#define BRCMS_MPC_MIN_DELAYCNT 1
-#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */
+#define TIMER_INTERVAL_RADIOCHK 800
/* beacon interval, in unit of 1024TU */
-#define BEACON_INTERVAL_DEFAULT 100
-/* DTIM interval, in unit of beacon interval */
-#define DTIM_INTERVAL_DEFAULT 3
-
-/* Scale down delays to accommodate QT slow speed */
-/* beacon interval, in unit of 1024TU */
-#define BEACON_INTERVAL_DEF_QT 20
-/* DTIM interval, in unit of beacon interval */
-#define DTIM_INTERVAL_DEF_QT 1
-
-#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */
+#define BEACON_INTERVAL_DEFAULT 100
/* n-mode support capability */
/* 2x2 includes both 1x1 & 2x2 devices
@@ -78,113 +55,71 @@
#define WL_11N_3x3 3
#define WL_11N_4x4 4
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N 0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF 0x00000080
-
-#define EDCF_ACI_MASK 0x60
-#define EDCF_ACI_SHIFT 5
-#define EDCF_ECWMIN_MASK 0x0f
-#define EDCF_ECWMAX_SHIFT 4
-#define EDCF_AIFSN_MASK 0x0f
-#define EDCF_AIFSN_MAX 15
-#define EDCF_ECWMAX_MASK 0xf0
-
-#define EDCF_AC_BE_TXOP_STA 0x0000
-#define EDCF_AC_BK_TXOP_STA 0x0000
-#define EDCF_AC_VO_ACI_STA 0x62
-#define EDCF_AC_VO_ECW_STA 0x32
-#define EDCF_AC_VI_ACI_STA 0x42
-#define EDCF_AC_VI_ECW_STA 0x43
-#define EDCF_AC_BK_ECW_STA 0xA4
-#define EDCF_AC_VI_TXOP_STA 0x005e
-#define EDCF_AC_VO_TXOP_STA 0x002f
-#define EDCF_AC_BE_ACI_STA 0x03
-#define EDCF_AC_BE_ECW_STA 0xA4
-#define EDCF_AC_BK_ACI_STA 0x27
-#define EDCF_AC_VO_TXOP_AP 0x002f
-
-#define EDCF_TXOP2USEC(txop) ((txop) << 5)
-#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
-
-#define APHY_SYMBOL_TIME 4
-#define APHY_PREAMBLE_TIME 16
-#define APHY_SIGNAL_TIME 4
-#define APHY_SIFS_TIME 16
-#define APHY_SERVICE_NBITS 16
-#define APHY_TAIL_NBITS 6
-#define BPHY_SIFS_TIME 10
-#define BPHY_PLCP_SHORT_TIME 96
-
-#define PREN_PREAMBLE 24
-#define PREN_MM_EXT 12
-#define PREN_PREAMBLE_EXT 4
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_SHIFT 4
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_AIFSN_MAX 15
+#define EDCF_ECWMAX_MASK 0xf0
+
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_TXOP_STA 0x002f
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+
+#define APHY_SYMBOL_TIME 4
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SIFS_TIME 16
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define BPHY_SIFS_TIME 10
+#define BPHY_PLCP_SHORT_TIME 96
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
#define DOT11_MAC_HDR_LEN 24
-#define DOT11_ACK_LEN 10
-#define DOT11_BA_LEN 4
+#define DOT11_ACK_LEN 10
+#define DOT11_BA_LEN 4
#define DOT11_OFDM_SIGNAL_EXTENSION 6
#define DOT11_MIN_FRAG_LEN 256
-#define DOT11_RTS_LEN 16
-#define DOT11_CTS_LEN 10
+#define DOT11_RTS_LEN 16
+#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
#define DOT11_MIN_BEACON_PERIOD 1
#define DOT11_MAX_BEACON_PERIOD 0xFFFF
-#define DOT11_MAXNUMFRAGS 16
+#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
-#define BPHY_PLCP_TIME 192
-#define RIFS_11N_TIME 2
-
-#define WME_VER 1
-#define WME_SUBTYPE_PARAM_IE 1
-#define WME_TYPE 2
-#define WME_OUI "\x00\x50\xf2"
+#define BPHY_PLCP_TIME 192
+#define RIFS_11N_TIME 2
-#define AC_BE 0
-#define AC_BK 1
-#define AC_VI 2
-#define AC_VO 3
-
-#define BCN_TMPL_LEN 512 /* length of the BCN template area */
+/* length of the BCN template area */
+#define BCN_TMPL_LEN 512
/* brcms_bss_info flag bit values */
-#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-
-/* Flags used in brcms_c_txq_info.stopped */
-/* per prio flow control bits */
-#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF
-/* stop txq enqueue for packet drain */
-#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100
-/* stop txq enqueue for ampdu flow control */
-#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200
-
-#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */
+#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-/* Find basic rate for a given rate */
-static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
-{
- if (is_mcs_rate(rspec))
- return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
- .leg_ofdm];
- return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
-}
-
-static u16 frametype(u32 rspec, u8 mimoframe)
-{
- if (is_mcs_rate(rspec))
- return mimoframe;
- return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
-}
+/* chip rx buffer offset */
+#define BRCMS_HWRXOFF 38
/* rfdisable delay timer 500 ms, runs of ALP clock */
-#define RFDISABLE_DEFAULT 10000000
+#define RFDISABLE_DEFAULT 10000000
#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
@@ -194,87 +129,83 @@ static u16 frametype(u32 rspec, u8 mimoframe)
* These constants are used ONLY by wlc_prio2prec_map. Do not use them
* elsewhere.
*/
-#define _BRCMS_PREC_NONE 0 /* None = - */
-#define _BRCMS_PREC_BK 2 /* BK - Background */
-#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
-#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
-#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
-#define _BRCMS_PREC_VI 10 /* Vi - Video */
-#define _BRCMS_PREC_VO 12 /* Vo - Voice */
-#define _BRCMS_PREC_NC 14 /* NC - Network Control */
-
-/* The BSS is generating beacons in HW */
-#define BRCMS_BSSCFG_HW_BCN 0x20
-
-#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */
-#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us */
-#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us */
-#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */
-
-#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */
-
-#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */
+#define _BRCMS_PREC_NONE 0 /* None = - */
+#define _BRCMS_PREC_BK 2 /* BK - Background */
+#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
+#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
+#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
+#define _BRCMS_PREC_VI 10 /* Vi - Video */
+#define _BRCMS_PREC_VO 12 /* Vo - Voice */
+#define _BRCMS_PREC_NC 14 /* NC - Network Control */
+
+/* synthpu_dly times in us */
+#define SYNTHPU_DLY_APHY_US 3700
+#define SYNTHPU_DLY_BPHY_US 1050
+#define SYNTHPU_DLY_NPHY_US 2048
+#define SYNTHPU_DLY_LPPHY_US 300
+
+#define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */
/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
-#define EDCF_SHORT_S 0
-#define EDCF_SFB_S 4
-#define EDCF_LONG_S 8
-#define EDCF_LFB_S 12
-#define EDCF_SHORT_M BITFIELD_MASK(4)
-#define EDCF_SFB_M BITFIELD_MASK(4)
-#define EDCF_LONG_M BITFIELD_MASK(4)
-#define EDCF_LFB_M BITFIELD_MASK(4)
+#define EDCF_SHORT_S 0
+#define EDCF_SFB_S 4
+#define EDCF_LONG_S 8
+#define EDCF_LFB_S 12
+#define EDCF_SHORT_M BITFIELD_MASK(4)
+#define EDCF_SFB_M BITFIELD_MASK(4)
+#define EDCF_LONG_M BITFIELD_MASK(4)
+#define EDCF_LFB_M BITFIELD_MASK(4)
-#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
-#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
-#define RETRY_LONG_DEF 4 /* Default Long retry count */
-#define RETRY_SHORT_FB 3 /* Short count for fallback rate */
-#define RETRY_LONG_FB 2 /* Long count for fallback rate */
+#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
+#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
+#define RETRY_LONG_DEF 4 /* Default Long retry count */
+#define RETRY_SHORT_FB 3 /* Short count for fb rate */
+#define RETRY_LONG_FB 2 /* Long count for fb rate */
-#define APHY_CWMIN 15
-#define PHY_CWMAX 1023
+#define APHY_CWMIN 15
+#define PHY_CWMAX 1023
-#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MIN 1
-#define FRAGNUM_MASK 0xF
+#define FRAGNUM_MASK 0xF
-#define APHY_SLOT_TIME 9
-#define BPHY_SLOT_TIME 20
+#define APHY_SLOT_TIME 9
+#define BPHY_SLOT_TIME 20
-#define WL_SPURAVOID_OFF 0
-#define WL_SPURAVOID_ON1 1
-#define WL_SPURAVOID_ON2 2
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
/* invalid core flags, use the saved coreflags */
-#define BRCMS_USE_COREFLAGS 0xffffffff
+#define BRCMS_USE_COREFLAGS 0xffffffff
/* values for PLCPHdr_override */
-#define BRCMS_PLCP_AUTO -1
-#define BRCMS_PLCP_SHORT 0
-#define BRCMS_PLCP_LONG 1
+#define BRCMS_PLCP_AUTO -1
+#define BRCMS_PLCP_SHORT 0
+#define BRCMS_PLCP_LONG 1
/* values for g_protection_override and n_protection_override */
#define BRCMS_PROTECTION_AUTO -1
#define BRCMS_PROTECTION_OFF 0
#define BRCMS_PROTECTION_ON 1
#define BRCMS_PROTECTION_MMHDR_ONLY 2
-#define BRCMS_PROTECTION_CTS_ONLY 3
+#define BRCMS_PROTECTION_CTS_ONLY 3
/* values for g_protection_control and n_protection_control */
-#define BRCMS_PROTECTION_CTL_OFF 0
+#define BRCMS_PROTECTION_CTL_OFF 0
#define BRCMS_PROTECTION_CTL_LOCAL 1
#define BRCMS_PROTECTION_CTL_OVERLAP 2
/* values for n_protection */
#define BRCMS_N_PROTECTION_OFF 0
#define BRCMS_N_PROTECTION_OPTIONAL 1
-#define BRCMS_N_PROTECTION_20IN40 2
+#define BRCMS_N_PROTECTION_20IN40 2
#define BRCMS_N_PROTECTION_MIXEDMODE 3
/* values for band specific 40MHz capabilities */
-#define BRCMS_N_BW_20ALL 0
-#define BRCMS_N_BW_40ALL 1
-#define BRCMS_N_BW_20IN2G_40IN5G 2
+#define BRCMS_N_BW_20ALL 0
+#define BRCMS_N_BW_40ALL 1
+#define BRCMS_N_BW_20IN2G_40IN5G 2
/* bitflags for SGI support (sgi_rx iovar) */
#define BRCMS_N_SGI_20 0x01
@@ -282,48 +213,42 @@ static u16 frametype(u32 rspec, u8 mimoframe)
/* defines used by the nrate iovar */
/* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_MCS_INUSE 0x00000080
+#define NRATE_MCS_INUSE 0x00000080
/* rate/mcs value */
-#define NRATE_RATE_MASK 0x0000007f
+#define NRATE_RATE_MASK 0x0000007f
/* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_MASK 0x0000ff00
+#define NRATE_STF_MASK 0x0000ff00
/* stf mode shift */
-#define NRATE_STF_SHIFT 8
-/* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE 0x80000000
+#define NRATE_STF_SHIFT 8
/* bit indicate to override mcs only */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
-#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
-#define NRATE_SGI_SHIFT 23 /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
-#define NRATE_STF_SISO 0 /* stf mode SISO */
-#define NRATE_STF_CDD 1 /* stf mode CDD */
-#define NRATE_STF_STBC 2 /* stf mode STBC */
-#define NRATE_STF_SDM 3 /* stf mode SDM */
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
-#define MAX_DMA_SEGS 4
+#define MAX_DMA_SEGS 4
/* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD 256
+#define NTXD 256
/* Max # of entries in Rx FIFO based on 4kb page size */
-#define NRXD 256
+#define NRXD 256
/* try to keep this # rbufs posted to the chip */
-#define NRXBUFPOST 32
+#define NRXBUFPOST 32
/* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT 50
+#define BRCMS_DATAHIWAT 50
-/* bounded rx loops */
-#define RXBND 8 /* max # frames to process in brcms_c_recv() */
-#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+/* max # frames to process in brcms_c_recv() */
+#define RXBND 8
+/* max # tx status to process in wlc_txstatus() */
+#define TXSBND 8
/* brcmu_format_flags() bit description structure */
struct brcms_c_bit_desc {
@@ -375,10 +300,22 @@ uint brcm_msg_level =
#endif /* BCMDBG */
/* TX FIFO number to WME/802.1E Access Category */
-static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
+static const u8 wme_fifo2ac[] = {
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BE
+};
-/* WME/802.1E Access Category to TX FIFO number */
-static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
+/* ieee80211 Access Category to TX FIFO number */
+static const u8 wme_ac2fifo[] = {
+ TX_AC_VO_FIFO,
+ TX_AC_VI_FIFO,
+ TX_AC_BE_FIFO,
+ TX_AC_BK_FIFO
+};
/* 802.1D Priority to precedence queue mapping */
const u8 wlc_prio2prec_map[] = {
@@ -405,13 +342,6 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{9, 58, 22, 14, 14, 5},
};
-static const u8 acbitmap2maxprio[] = {
- PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
- PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
-};
-
#ifdef BCMDBG
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
@@ -424,6 +354,22 @@ static const char fifo_names[6][0];
static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
+/* Find basic rate for a given rate */
+static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
+{
+ if (is_mcs_rate(rspec))
+ return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
+ .leg_ofdm];
+ return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
+}
+
+static u16 frametype(u32 rspec, u8 mimoframe)
+{
+ if (is_mcs_rate(rspec))
+ return mimoframe;
+ return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
+}
+
/* currently the best mechanism for determining SIFS is the band in use */
static u16 get_sifs(struct brcms_band *band)
{
@@ -442,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band)
*/
static bool brcms_deviceremoved(struct brcms_c_info *wlc)
{
+ u32 macctrl;
+
if (!wlc->hw->clk)
return ai_deviceremoved(wlc->hw->sih);
- return (R_REG(&wlc->hw->regs->maccontrol) &
- (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+ macctrl = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(maccontrol));
+ return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
}
/* sum the individual fifo tx pending packet counts */
@@ -470,20 +419,6 @@ static int brcms_chspec_bw(u16 chanspec)
return BRCMS_10_MHZ;
}
-/*
- * return true if Minimum Power Consumption should
- * be entered, false otherwise
- */
-static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
-{
- return false;
-}
-
-static bool brcms_c_ismpc(struct brcms_c_info *wlc)
-{
- return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
-}
-
static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
{
if (cfg == NULL)
@@ -650,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot)
{
- struct d11regs __iomem *regs;
-
- regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
if (shortslot) {
/* 11g short slot: 11a timing */
- W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
} else {
/* 11g long slot: 11b timing */
- W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
}
}
@@ -669,9 +602,8 @@ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
* calculate frame duration of a given rate and length, return
* time in usec unit
*/
-uint
-brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
- u8 preamble_type, uint mac_len)
+static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
+ u8 preamble_type, uint mac_len)
{
uint nsyms, dur = 0, Ndps, kNdps;
uint rate = rspec2rate(ratespec);
@@ -741,24 +673,22 @@ brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits)
{
+ struct bcma_device *core = wlc_hw->d11core;
int i;
- u8 __iomem *base;
- u8 __iomem *addr;
+ uint offset;
u16 size;
u32 value;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- base = (u8 __iomem *)wlc_hw->regs;
-
for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
size = le16_to_cpu(inits[i].size);
- addr = base + le16_to_cpu(inits[i].addr);
+ offset = le16_to_cpu(inits[i].addr);
value = le32_to_cpu(inits[i].value);
if (size == 2)
- W_REG((u16 __iomem *)addr, value);
+ bcma_write16(core, offset, value);
else if (size == 4)
- W_REG((u32 __iomem *)addr, value);
+ bcma_write32(core, offset, value);
else
break;
}
@@ -808,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
}
}
+static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
+{
+ struct bcma_device *core = wlc_hw->d11core;
+ u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
+
+ bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
+}
+
static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -816,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
if (OFF == clk) { /* clear gmode bit, put phy into reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
- (SICF_PRST | SICF_FGC));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
+ (SICF_PRST | SICF_FGC));
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
udelay(1);
} else { /* take phy out of reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
udelay(1);
}
@@ -847,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
/* set gmode core flag */
- if (wlc_hw->sbclk && !wlc_hw->noreset)
- ai_core_cflags(wlc_hw->sih, SICF_GMODE,
- ((bandunit == 0) ? SICF_GMODE : 0));
+ if (wlc_hw->sbclk && !wlc_hw->noreset) {
+ u32 gmode = 0;
+
+ if (bandunit == 0)
+ gmode = SICF_GMODE;
+
+ brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
+ }
}
/* switch to new band but leave it inactive */
@@ -857,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
+ u32 macctrl;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ macctrl = bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol));
+ WARN_ON((macctrl & MCTL_EN_MAC) != 0);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
@@ -969,7 +914,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
lfbl, /* Long Frame Rate Fallback Limit */
fbl;
- if (queue < AC_COUNT) {
+ if (queue < IEEE80211_NUM_ACS) {
sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
EDCF_SFB);
lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
@@ -1018,14 +963,12 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = brcmu_pkttotlen(p);
+ totlen = p->len;
free_pdu = true;
brcms_c_txfifo_complete(wlc, queue, 1);
if (lastframe) {
- p->next = NULL;
- p->prev = NULL;
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
@@ -1053,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
struct brcms_c_info *wlc = wlc_hw->wlc;
- struct d11regs __iomem *regs;
+ struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
uint n = 0;
@@ -1066,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
txs = &txstatus;
- regs = wlc_hw->regs;
+ core = wlc_hw->d11core;
*fatal = false;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
while (!(*fatal)
- && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+ && (s1 & TXS_V)) {
if (s1 == 0xffffffff) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
wlc_hw->unit, __func__);
return morepending;
}
-
- s2 = R_REG(&regs->frmtxstatus2);
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1090,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
/* !give others some time to run! */
if (++n >= max_tx_num)
break;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
}
if (*fatal)
@@ -1134,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
}
}
-static struct dma64regs __iomem *
-dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+static uint
+dmareg(uint direction, uint fifonum)
{
if (direction == DMA_TX)
- return &(hw->regs->fifo64regs[fifonum].dmaxmt);
- return &(hw->regs->fifo64regs[fifonum].dmarcv);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
}
static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1165,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_BK_FIFO (TX AC Background data packets)
* RX: RX_FIFO (RX data packets)
*/
- wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
- (wme ? dmareg(wlc_hw, DMA_TX, 0) :
- NULL), dmareg(wlc_hw, DMA_RX, 0),
+ wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ (wme ? dmareg(DMA_TX, 0) : 0),
+ dmareg(DMA_RX, 0),
(wme ? NTXD : 0), NRXD,
RXBUFSZ, -1, NRXBUFPOST,
BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1179,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* (legacy) TX_DATA_FIFO (TX data packets)
* RX: UNUSED
*/
- wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 1), NULL,
+ wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 1), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1190,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VI_FIFO (TX AC Video data packets)
* RX: UNUSED
*/
- wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 2), NULL,
+ wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 2), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1200,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VO_FIFO (TX AC Voice data packets)
* (legacy) TX_CTL_FIFO (TX control & mgmt packets)
*/
- wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 3),
- NULL, NTXD, 0, 0, -1,
+ wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 3),
+ 0, NTXD, 0, 0, -1,
0, 0, &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
@@ -1276,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
/* control chip clock to save power, enable dynamic clock or force fast clock */
static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
{
- if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
* on backplane, but mac core will still run on ALP(not HT) when
* it enters powersave mode, which means the FCA bit may not be
@@ -1285,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
if (wlc_hw->clk) {
if (mode == CLK_FAST) {
- OR_REG(&wlc_hw->regs->clk_ctl_st,
- CCS_FORCEHT);
+ bcma_set32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
+ CCS_FORCEHT);
udelay(64);
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL) == 0),
- PMU_MAX_TRANSITION_DLY);
- WARN_ON(!(R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL));
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ WARN_ON(!(bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL));
} else {
- if ((wlc_hw->sih->pmurev == 0) &&
- (R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL)
- == 0),
- PMU_MAX_TRANSITION_DLY);
- AND_REG(&wlc_hw->regs->clk_ctl_st,
+ if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
+ (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ (CCS_FORCEHT | CCS_HTAREQ)))
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ offsetof(struct d11regs,
+ clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ bcma_mask32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
~CCS_FORCEHT);
}
}
@@ -1322,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
/* check fast clock is available (if core is not in reset) */
if (wlc_hw->forcefastclk && wlc_hw->clk)
- WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+ WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
SISF_FCLKA));
/*
@@ -1439,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
maccontrol |= MCTL_INFRA;
}
- W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
+ maccontrol);
}
/* set or clear maccontrol bits */
@@ -1533,7 +1482,7 @@ static void
brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
const u8 *addr)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 mac_l;
u16 mac_m;
u16 mac_h;
@@ -1541,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
wlc_hw->unit);
- regs = wlc_hw->regs;
mac_l = addr[0] | (addr[1] << 8);
mac_m = addr[2] | (addr[3] << 8);
mac_h = addr[4] | (addr[5] << 8);
/* enter the MAC addr into the RXE match registers */
- W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
- W_REG(&regs->rcm_mat_data, mac_l);
- W_REG(&regs->rcm_mat_data, mac_m);
- W_REG(&regs->rcm_mat_data, mac_h);
-
+ bcma_write16(core, D11REGOFFS(rcm_ctl),
+ RCM_INC_DATA | match_reg_offset);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
}
void
brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
void *buf)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 word;
__le32 word_le;
__be32 word_be;
bool be_bit;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
- W_REG(&regs->tplatewrptr, offset);
+ bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
/* if MCTL_BIGEND bit set in mac control register,
* the chip swaps data in fifo, as well as data in
* template ram
*/
- be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+ be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
while (len > 0) {
memcpy(&word, buf, sizeof(u32));
@@ -1585,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
word = *(u32 *)&word_le;
}
- W_REG(&regs->tplatewrdata, word);
+ bcma_write32(core, D11REGOFFS(tplatewrdata), word);
buf = (u8 *) buf + sizeof(u32);
len -= sizeof(u32);
@@ -1596,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
{
wlc_hw->band->CWmin = newmin;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmin);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
}
static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
{
wlc_hw->band->CWmax = newmax;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmax);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
}
void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1773,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 4);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
}
@@ -1797,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
return;
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
else
- ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
}
void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
{
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
else
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
}
void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1828,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
NREV_LE(wlc_hw->band->phyrev, 4)) {
/* Set the PHY bandwidth */
- ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+ brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
udelay(1);
@@ -1836,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_reset(wlc_hw);
/* reset the PHY */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
- (SICF_PRST | SICF_PCLKE));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
+ (SICF_PRST | SICF_PCLKE));
phy_in_reset = true;
} else {
- ai_core_cflags(wlc_hw->sih,
- (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
- (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+ brcms_b_core_ioctl(wlc_hw,
+ (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+ (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
}
udelay(2);
@@ -1859,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
u32 macintmask;
/* Enable the d11 core before accessing it */
- if (!ai_iscoreup(wlc_hw->sih)) {
- ai_core_reset(wlc_hw->sih, 0, 0);
+ if (!bcma_core_is_enabled(wlc_hw->d11core)) {
+ bcma_core_enable(wlc_hw->d11core, 0);
brcms_c_mctrl_reset(wlc_hw);
}
@@ -1886,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
brcms_intrsrestore(wlc->wl, macintmask);
/* ucode should still be suspended.. */
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0);
}
static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1914,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
uint b2 = boardrev & 0xf;
/* voards from other vendors are always considered valid */
- if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+ if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
return true;
/* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1986,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
{
bool v, clk, xtal;
- u32 resetbits = 0, flags = 0;
+ u32 flags = 0;
xtal = wlc_hw->sbclk;
if (!xtal)
@@ -2003,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
flags |= SICF_PCLKE;
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+
+ bcma_core_enable(wlc_hw->d11core, flags);
brcms_c_mctrl_reset(wlc_hw);
}
- v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+ v = ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
/* put core back into reset */
if (!clk)
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
if (!xtal)
brcms_b_xtal(wlc_hw, OFF);
@@ -2042,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
*/
void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
{
- struct d11regs __iomem *regs;
uint i;
bool fastclk;
- u32 resetbits = 0;
if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/* reset the dma engines except first time thru */
- if (ai_iscoreup(wlc_hw->sih)) {
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2098,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* they may touch chipcommon as well.
*/
wlc_hw->clk = false;
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+ bcma_core_enable(wlc_hw->d11core, flags);
wlc_hw->clk = true;
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
brcms_c_mctrl_reset(wlc_hw);
- if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
brcms_b_phy_reset(wlc_hw);
@@ -2126,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
*/
static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 fifo_nu;
u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
u16 txfifo_def, txfifo_def1;
@@ -2147,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
txfifo_cmd =
TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
- W_REG(&regs->xmtfifodef, txfifo_def);
- W_REG(&regs->xmtfifodef1, txfifo_def1);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
+ bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
}
@@ -2186,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
+ (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x2082);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x5341);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else { /* 120Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x8889);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
}
} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
} else { /* 80Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
}
}
}
@@ -2215,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
static void brcms_c_gpio_init(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
u32 gc, gm;
- regs = wlc_hw->regs;
-
/* use GPIO select 0 to get all gpio signals from the gpio out reg */
brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
@@ -2250,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
* The board itself is powered by these GPIOs
* (when not sending pattern) so set them high
*/
- OR_REG(&regs->psm_gpio_oe,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
- OR_REG(&regs->psm_gpio_out,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
/* Enable antenna diversity, use 2x4 mode */
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2280,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
const __le32 ucode[], const size_t nbytes)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
uint i;
uint count;
@@ -2288,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
count = (nbytes / sizeof(u32));
- W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
for (i = 0; i < count; i++)
- W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+ bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
}
@@ -2352,19 +2296,12 @@ void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
}
-static void brcms_c_fatal_error(struct brcms_c_info *wlc)
-{
- wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
- wlc->pub->unit);
- brcms_init(wlc->wl);
-}
-
static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
{
bool fatal = false;
uint unit;
uint intstatus, idx;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
unit = wlc_hw->unit;
@@ -2372,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
for (idx = 0; idx < NFIFO; idx++) {
/* read intstatus register and ignore any non-error bits */
intstatus =
- R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+ bcma_read32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus)) &
+ I_ERRORS;
if (!intstatus)
continue;
@@ -2414,11 +2353,12 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
}
if (fatal) {
- brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */
+ brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
break;
} else
- W_REG(&regs->intctrlregs[idx].intstatus,
- intstatus);
+ bcma_write32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus),
+ intstatus);
}
}
@@ -2426,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
wlc->macintmask = wlc->defmacintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/*
- * callback for siutils.c, which has only wlc handler, no wl they both check
- * up, not only because there is no need to off/restore d11 interrupt but also
- * because per-port code may require sync with valid interrupt.
- */
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
- if (!wlc->hw->up)
- return 0;
-
- return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
- if (!wlc->hw->up)
- return;
-
- brcms_intrsrestore(wlc->wl, macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2460,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
macintmask = wlc->macintmask; /* isr can still happen */
- W_REG(&wlc_hw->regs->macintmask, 0);
- (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
udelay(1); /* ensure int line is no longer driven */
wlc->macintmask = 0;
@@ -2476,9 +2395,10 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
return;
wlc->macintmask = macintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
+/* assumes that the d11 MAC is enabled */
static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
uint tx_fifo)
{
@@ -2535,11 +2455,12 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
}
}
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
+/* precondition: requires the mac core to be enabled */
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- if (on) {
+ if (mute_tx) {
/* suspend tx fifos */
brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
@@ -2561,14 +2482,20 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
wlc_hw->etheraddr);
}
- wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
+ wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
- if (on)
+ if (mute_tx)
brcms_c_ucode_mute_override_set(wlc_hw);
else
brcms_c_ucode_mute_override_clear(wlc_hw);
}
+void
+brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
+{
+ brcms_b_mute(wlc->hw, mute_tx);
+}
+
/*
* Read and clear macintmask and macintstatus and intstatus registers.
* This routine should be called with interrupts off
@@ -2580,11 +2507,11 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 macintstatus;
/* macintstatus includes a DMA interrupt summary bit */
- macintstatus = R_REG(&regs->macintstatus);
+ macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
macintstatus);
@@ -2611,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* consequences
*/
/* turn off the interrupts */
- W_REG(&regs->macintmask, 0);
- (void)R_REG(&regs->macintmask); /* sync readback */
+ bcma_write32(core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(core, D11REGOFFS(macintmask));
wlc->macintmask = 0;
/* clear device interrupts */
- W_REG(&regs->macintstatus, macintstatus);
+ bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
/* MI_DMAINT is indication of non-zero intstatus */
if (macintstatus & MI_DMAINT)
@@ -2625,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* RX_FIFO. If MI_DMAINT is set, assume it
* is set and clear the interrupt.
*/
- W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
- DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
+ DEF_RXINTMASK);
return macintstatus;
}
@@ -2689,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
struct wiphy *wiphy = wlc->wiphy;
@@ -2706,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
/* force the core awake */
brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2718,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
WARN_ON(!(mc & MCTL_PSM_RUN));
WARN_ON(!(mc & MCTL_EN_MAC));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
if (mi == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2729,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
- SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+ SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
BRCMS_MAX_MAC_SUSPEND);
- if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+ if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
- R_REG(&regs->psmdebug),
- R_REG(&regs->phydebug),
- R_REG(&regs->psm_brc));
+ bcma_read32(core, D11REGOFFS(psmdebug)),
+ bcma_read32(core, D11REGOFFS(phydebug)),
+ bcma_read16(core, D11REGOFFS(psm_brc)));
}
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2758,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
void brcms_c_enable_mac(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2771,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
if (wlc_hw->mac_suspend_depth > 0)
return;
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(mc & MCTL_EN_MAC);
WARN_ON(!(mc & MCTL_PSM_RUN));
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
- W_REG(&regs->macintstatus, MI_MACSSPNDD);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_EN_MAC));
WARN_ON(!(mc & MCTL_PSM_RUN));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
WARN_ON(mi & MI_MACSSPNDD);
brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2801,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 w, val;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* Validate dchip register access */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- w = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ w = bcma_read32(core, D11REGOFFS(objdata));
/* Can we write and read back a 32bit register? */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0xaa5555aa);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0xaa5555aa) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0xaa5555aa\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0x55aaaa55);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0x55aaaa55) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0x55aaaa55\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, w);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), w);
/* clear CFPStart */
- W_REG(&regs->tsf_cfpstart, 0);
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
- w = R_REG(&regs->maccontrol);
+ w = bcma_read32(core, D11REGOFFS(maccontrol));
if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
(w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2866,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 tmp;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
tmp = 0;
- regs = wlc_hw->regs;
if (on) {
- if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
- CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
- (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ CCS_ERSRC_REQ_HT |
+ CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
+ CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
- if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
- (CCS_ERSRC_AVAIL_HT))
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
+ if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
" PLL failed\n", __func__);
} else {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ tmp | CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL)) !=
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp &
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
@@ -2911,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
* be requesting it; so we'll deassert the request but
* not wait for status to comply.
*/
- AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
- tmp = R_REG(&regs->clk_ctl_st);
+ bcma_mask32(core, D11REGOFFS(clk_ctl_st),
+ ~CCS_ERSRC_REQ_PHYPLL);
+ (void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
}
}
@@ -2940,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_ctl(wlc_hw, false);
wlc_hw->clk = false;
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
}
@@ -2964,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
static u16
brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
- u16 v;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- v = R_REG(objdata_hi);
- else
- v = R_REG(objdata_lo);
+ objoff += 2;
- return v;
+ return bcma_read16(core, objoff);
+;
}
static void
brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- W_REG(objdata_hi, v);
- else
- W_REG(objdata_lo, v);
+ objoff += 2;
+
+ bcma_write16(core, objoff, v);
}
/*
@@ -3078,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
/* write retry limit to SCR, shouldn't need to suspend */
if (wlc_hw->up) {
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
}
}
@@ -3132,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
return false;
/* disallow PS when one of these meets when not scanning */
- if (wlc->monitor)
+ if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
return false;
if (cfg->associated) {
@@ -3267,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb)
static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 sflags;
- uint bcnint_us;
+ u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
int err = 0;
@@ -3277,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
struct wiphy *wiphy = wlc->wiphy;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- regs = wlc_hw->regs;
-
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* reset PSM */
@@ -3291,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
fifosz_fixup = true;
/* let the PSM run to the suspended state, set mode to BSS STA */
- W_REG(&regs->macintstatus, -1);
+ bcma_write32(core, D11REGOFFS(macintstatus), -1);
brcms_b_mctrl(wlc_hw, ~0,
(MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
/* wait for ucode to self-suspend after auto-init */
- SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
- 1000 * 1000);
- if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+ SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
+ MI_MACSSPNDD) == 0), 1000 * 1000);
+ if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
brcms_c_gpio_init(wlc);
- sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+ sflags = bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3368,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
wlc_hw->xmtfifo_sz[i], i);
/* make sure we can still talk to the mac */
- WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+ WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
/* band-specific inits done by wlc_bsinit() */
@@ -3377,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
/* enable one rx interrupt per received frame */
- W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+ bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
/* set the station mode (BSS STA) */
brcms_b_mctrl(wlc_hw,
@@ -3386,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
/* set up Beacon interval */
bcnint_us = 0x8000 << 10;
- W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
- W_REG(&regs->tsf_cfpstart, bcnint_us);
- W_REG(&regs->macintstatus, MI_GP1);
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ (bcnint_us << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
/* write interrupt mask */
- W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
+ DEF_RXINTMASK);
/* allow the MAC to control the PHY clock (dynamic on/off) */
brcms_b_macphyclk_set(wlc_hw, ON);
/* program dynamic clock control fast powerup delay register */
wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
- W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+ bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
/* tell the ucode the corerev */
brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3411,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
machwcap >> 16) & 0xffff));
/* write retry limits to SCR, this done after PSM init */
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->SRL);
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->LRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
/* write rate fallback retry limits */
brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
- AND_REG(&regs->ifs_ctl, 0x0FFF);
- W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+ bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
+ bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
/* init the tx dma engines */
for (i = 0; i < NFIFO; i++) {
@@ -3437,8 +3361,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
}
void
-static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
- bool mute) {
+static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
u32 macintmask;
bool fastclk;
struct brcms_c_info *wlc = wlc_hw->wlc;
@@ -3463,10 +3386,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
/* core-specific initialization */
brcms_b_coreinit(wlc);
- /* suspend the tx fifos and mute the phy for preism cac time */
- if (mute)
- brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
-
/* band-specific inits */
brcms_b_bsinit(wlc, chanspec);
@@ -3656,42 +3575,32 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
brcms_c_set_phy_chanspec(wlc, chanspec);
}
-static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
-{
- if (wlc->bcnmisc_monitor)
- brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
- else
- brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
-}
-
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
- wlc->bcnmisc_monitor = promisc;
- brcms_c_mac_bcn_promisc(wlc);
-}
-
-/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+/*
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
+ */
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
{
u32 promisc_bits = 0;
- /*
- * promiscuous mode just sets MCTL_PROMISC
- * Note: APs get all BSS traffic without the need to set
- * the MCTL_PROMISC bit since all BSS data traffic is
- * directed at the AP
- */
- if (wlc->pub->promisc)
+ wlc->filter_flags = filter_flags;
+
+ if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
promisc_bits |= MCTL_PROMISC;
- /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
- * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
- * handled in brcms_c_mac_bcn_promisc()
- */
- if (wlc->monitor)
- promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
+ if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
+ promisc_bits |= MCTL_BCNS_PROMISC;
+
+ if (filter_flags & FIF_FCSFAIL)
+ promisc_bits |= MCTL_KEEPBADFCS;
- brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+ if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+ promisc_bits |= MCTL_KEEPCONTROL;
+
+ brcms_b_mctrl(wlc->hw,
+ MCTL_PROMISC | MCTL_BCNS_PROMISC |
+ MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+ promisc_bits);
}
/*
@@ -3721,10 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
} else {
/* disable an active IBSS if we are not on the home channel */
}
-
- /* update the various promisc bits */
- brcms_c_mac_bcn_promisc(wlc);
- brcms_c_mac_promisc(wlc);
}
static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3899,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
- v1 = R_REG(&wlc->regs->maccontrol);
+ v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
v2 = MCTL_WAKE;
if (hps)
v2 |= MCTL_HPS;
@@ -3979,7 +3884,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
void
brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
- bool mute, struct txpwr_limits *txpwr)
+ bool mute_tx, struct txpwr_limits *txpwr)
{
uint bandunit;
@@ -4005,7 +3910,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
}
}
- wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
+ wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx);
if (!wlc_hw->up) {
if (wlc_hw->clk)
@@ -4017,7 +3922,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
/* Update muting of the channel */
- brcms_b_mute(wlc_hw, mute, 0);
+ brcms_b_mute(wlc_hw, mute_tx);
}
}
@@ -4205,7 +4110,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
EDCF_TXOP2USEC(acp_shm.txop);
acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
- if (aci == AC_VI && acp_shm.txop == 0
+ if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
&& acp_shm.aifs < EDCF_AIFSN_MAX)
acp_shm.aifs++;
@@ -4218,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
acp_shm.cwmax = params->cw_max;
acp_shm.cwcur = acp_shm.cwmin;
acp_shm.bslots =
- R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+ bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
+ acp_shm.cwcur;
acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
/* Indicate the new params to the ucode */
acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4242,7 +4148,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
}
}
-void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
+static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
{
u16 aci;
int i_ac;
@@ -4255,7 +4161,7 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}; /* ucode needs these parameters during its initialization */
const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
- for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
+ for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
@@ -4277,17 +4183,6 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}
}
-/* maintain LED behavior in down state */
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
-{
- /*
- * maintain LEDs while in down state, turn on sbclk if
- * not available yet. Turn on sbclk if necessary
- */
- brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP);
- brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP);
-}
-
static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
{
/* Don't start the timer if HWRADIO feature is disabled */
@@ -4299,28 +4194,6 @@ static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
}
-static void brcms_c_radio_disable(struct brcms_c_info *wlc)
-{
- if (!wlc->pub->up) {
- brcms_c_down_led_upd(wlc);
- return;
- }
-
- brcms_c_radio_monitor_start(wlc);
- brcms_down(wlc->wl);
-}
-
-static void brcms_c_radio_enable(struct brcms_c_info *wlc)
-{
- if (wlc->pub->up)
- return;
-
- if (brcms_deviceremoved(wlc))
- return;
-
- brcms_up(wlc->wl);
-}
-
static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
{
if (!wlc->radio_monitor)
@@ -4343,18 +4216,6 @@ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
}
-/*
- * centralized radio disable/enable function,
- * invoke radio enable/disable after updating hwradio status
- */
-static void brcms_c_radio_upd(struct brcms_c_info *wlc)
-{
- if (wlc->pub->radio_disabled)
- brcms_c_radio_disable(wlc);
- else
- brcms_c_radio_enable(wlc);
-}
-
/* update hwradio status and return it */
bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
{
@@ -4376,12 +4237,7 @@ static void brcms_c_radio_timer(void *arg)
return;
}
- /* cap mpc off count */
- if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
- wlc->mpc_offcnt++;
-
brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
}
/* common low-level watchdog code */
@@ -4407,60 +4263,6 @@ static void brcms_b_watchdog(void *arg)
wlc_phy_watchdog(wlc_hw->band->pi);
}
-static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
-{
- bool mpc_radio, radio_state;
-
- /*
- * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
- * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
- * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
- * the radio is going down.
- */
- if (!wlc->mpc) {
- if (!wlc->pub->radio_disabled)
- return;
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (!wlc->pub->radio_disabled)
- brcms_c_radio_monitor_stop(wlc);
- return;
- }
-
- /*
- * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in
- * wlc->pub->radio_disabled to go ON, always call radio_upd
- * synchronously to go OFF, postpone radio_upd to later when
- * context is safe(e.g. watchdog)
- */
- radio_state =
- (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
- ON);
- mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
-
- if (radio_state == ON && mpc_radio == OFF)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
- else if (radio_state == OFF && mpc_radio == ON) {
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
- wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
- else
- wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
- }
- /*
- * Below logic is meant to capture the transition from mpc off
- * to mpc on for reasons other than wlc->mpc_delay_off keeping
- * the mpc off. In that case reset wlc->mpc_delay_off to
- * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
- */
- if ((wlc->prev_non_delay_mpc == false) &&
- (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
-
- wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
-}
-
/* common watchdog code */
static void brcms_c_watchdog(void *arg)
{
@@ -4481,21 +4283,7 @@ static void brcms_c_watchdog(void *arg)
/* increment second count */
wlc->pub->now++;
- /* delay radio disable */
- if (wlc->mpc_delay_off) {
- if (--wlc->mpc_delay_off == 0) {
- mboolset(wlc->pub->radio_disabled,
- WL_RADIO_MPC_DISABLE);
- if (wlc->mpc && brcms_c_ismpc(wlc))
- wlc->mpc_offcnt = 0;
- }
- }
-
- /* mpc sync */
- brcms_c_radio_mpc_upd(wlc);
- /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
/* if radio is disable, driver may be down, quit here */
if (wlc->pub->radio_disabled)
return;
@@ -4599,9 +4387,6 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
/* WME QoS mode is Auto by default */
wlc->pub->_ampdu = AMPDU_AGG_HOST;
wlc->pub->bcmerror = 0;
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
}
static uint brcms_c_attach_module(struct brcms_c_info *wlc)
@@ -4647,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
-static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, void __iomem *regsva,
- struct pci_dev *btparam)
+static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
+ uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
- struct d11regs __iomem *regs;
char *macaddr = NULL;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
+ struct pci_dev *pcidev = core->bus->host_pci;
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
- device);
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
wme = true;
@@ -4678,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
* Do the hardware portion of the attach. Also initialize software
* state that depends on the particular hardware we are running.
*/
- wlc_hw->sih = ai_attach(regsva, btparam);
+ wlc_hw->sih = ai_attach(core->bus);
if (wlc_hw->sih == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
unit);
@@ -4687,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
}
/* verify again the device is supported */
- if (!brcms_c_chipmatch(vendor, device)) {
+ if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
- unit, vendor, device);
+ unit, pcidev->vendor, pcidev->device);
err = 12;
goto fail;
}
- wlc_hw->vendorid = vendor;
- wlc_hw->deviceid = device;
-
- /* set bar0 window to point at D11 core */
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- wlc_hw->corerev = ai_corerev(wlc_hw->sih);
+ wlc_hw->vendorid = pcidev->vendor;
+ wlc_hw->deviceid = pcidev->device;
- regs = wlc_hw->regs;
-
- wlc->regs = wlc_hw->regs;
+ wlc_hw->d11core = core;
+ wlc_hw->corerev = core->id.rev;
/* validate chip, chiprev and corerev */
if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4740,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->boardrev = (u16) j;
if (!brcms_c_validboardtype(wlc_hw)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
- "board type (0x%x)" " or revision level (0x%x)\n",
- unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+ "board type (0x%x)" " or revision level (0x%x)\n",
+ unit, ai_get_boardtype(wlc_hw->sih),
+ wlc_hw->boardrev);
err = 15;
goto fail;
}
@@ -4762,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
else
wlc_hw->_nbands = 1;
- if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4794,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
sha_params.corerev = wlc_hw->corerev;
sha_params.vid = wlc_hw->vendorid;
sha_params.did = wlc_hw->deviceid;
- sha_params.chip = wlc_hw->sih->chip;
- sha_params.chiprev = wlc_hw->sih->chiprev;
- sha_params.chippkg = wlc_hw->sih->chippkg;
+ sha_params.chip = ai_get_chip_id(wlc_hw->sih);
+ sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
+ sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
sha_params.sromrev = wlc_hw->sromrev;
- sha_params.boardtype = wlc_hw->sih->boardtype;
+ sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
sha_params.boardrev = wlc_hw->boardrev;
- sha_params.boardvendor = wlc_hw->sih->boardvendor;
sha_params.boardflags = wlc_hw->boardflags;
sha_params.boardflags2 = wlc_hw->boardflags2;
- sha_params.buscorerev = wlc_hw->sih->buscorerev;
/* alloc and save pointer to shared phy state area */
wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4825,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->band->bandunit = j;
wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
- wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+ wlc->core->coreidx = core->core_index;
- wlc_hw->machwcap = R_REG(&regs->machwcap);
+ wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
@@ -4836,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Get a phy for this band */
wlc_hw->band->pi =
- wlc_phy_attach(wlc_hw->phy_sh, regs,
+ wlc_phy_attach(wlc_hw->phy_sh, core,
wlc_hw->band->bandtype,
wlc->wiphy);
if (wlc_hw->band->pi == NULL) {
@@ -4910,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Match driver "down" state */
ai_pci_down(wlc_hw->sih);
- /* register sb interrupt callback functions */
- ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
- (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -4944,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
goto fail;
}
- BCMMSG(wlc->wiphy,
- "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands,
- wlc_hw->sih->boardtype, macaddr);
+ BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+ wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
+ macaddr);
return err;
@@ -5185,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
* and per-port interrupt object may has been freed. this must
* be done before sb core switch
*/
- ai_deregister_intr_callback(wlc_hw->sih);
ai_pci_sleep(wlc_hw->sih);
}
@@ -5259,9 +5031,6 @@ static void brcms_c_ap_upd(struct brcms_c_info *wlc)
{
/* STA-BSS; short capable */
wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
-
- /* fixup mpc */
- wlc->mpc = true;
}
/* Initialize just the hardware when coming out of POR or S3/S5 system states */
@@ -5283,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
ai_pci_fixcfg(wlc_hw->sih);
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
/*
* Inform phy that a POR reset has occurred so
@@ -5301,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5376,7 +5143,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
if (!wlc->clk)
return;
- for (ac = 0; ac < AC_COUNT; ac++)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
wlc->wme_retries[ac]);
}
@@ -5396,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5533,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
} else {
/* Reset and disable the core */
- if (ai_iscoreup(wlc_hw->sih)) {
- if (R_REG(&wlc_hw->regs->maccontrol) &
- MCTL_EN_MAC)
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
+ if (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
callbacks += brcms_reset(wlc_hw->wlc->wl);
brcms_c_coredisable(wlc_hw);
@@ -5575,7 +5342,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
if (!wlc->pub->up)
return callbacks;
- /* in between, mpc could try to bring down again.. */
wlc->going_down = true;
callbacks += brcms_b_bmac_down_prep(wlc->hw);
@@ -5852,7 +5618,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
- for (ac = 0; ac < AC_COUNT; ac++) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
EDCF_SHORT, wlc->SRL);
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
@@ -6103,7 +5869,6 @@ void brcms_c_print_txdesc(struct d11txh *txh)
u8 *rtsph = txh->RTSPhyHeader;
struct ieee80211_rts rts = txh->rts_frame;
- char hexbuf[256];
/* add plcp header along with txh descriptor */
printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
@@ -6124,17 +5889,16 @@ void brcms_c_print_txdesc(struct d11txh *txh)
printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
printk(KERN_DEBUG "\n");
- brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
- printk(KERN_DEBUG "SecIV: %s\n", hexbuf);
- brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
- printk(KERN_DEBUG "RA: %s\n", hexbuf);
+ print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
+ print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
+ ra, sizeof(txh->TxFrameRA));
printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
- brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+ print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
+ rtspfb, sizeof(txh->RTSPLCPFallback));
printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
- brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
- printk(KERN_DEBUG "PLCP: %s ", hexbuf);
+ print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
+ fragpfb, sizeof(txh->FragPLCPFallback));
printk(KERN_DEBUG "DUR: %04x", fragdfb);
printk(KERN_DEBUG "\n");
@@ -6149,18 +5913,18 @@ void brcms_c_print_txdesc(struct d11txh *txh)
printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
- brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
- brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
- printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
+ print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
+ rtsph, sizeof(txh->RTSPhyHeader));
+ print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
+ (u8 *)&rts, sizeof(txh->rts_frame));
printk(KERN_DEBUG "\n");
}
#endif /* defined(BCMDBG) */
#if defined(BCMDBG)
-int
+static int
brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
- int len)
+ int len)
{
int i;
char *p = buf;
@@ -6916,7 +6680,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
qos = ieee80211_is_data_qos(h->frame_control);
/* compute length of frame in bytes for use in PLCP computations */
- len = brcmu_pkttotlen(p);
+ len = p->len;
phylen = len + FCS_LEN;
/* Get tx_info */
@@ -7695,11 +7459,11 @@ static void
brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
/* read the tsf timer low, then high to get an atomic read */
- *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
- *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+ *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
+ *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
}
/*
@@ -8253,12 +8017,6 @@ int brcms_c_get_tx_power(struct brcms_c_info *wlc)
return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
}
-void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc)
-{
- wlc->mpc = mpc;
- brcms_c_radio_mpc_upd(wlc);
-}
-
/* Process received frames */
/*
* Return true if more frames need to be processed. false otherwise.
@@ -8293,14 +8051,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
- if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
- wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
- " tossing\n");
+ if (!(wlc->filter_flags & FIF_FCSFAIL))
goto toss;
- } else {
- wiphy_err(wlc->wiphy, "RCSERR!!!\n");
- goto toss;
- }
}
/* check received pkt has at least frame control field */
@@ -8328,21 +8080,17 @@ static bool
brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{
struct sk_buff *p;
- struct sk_buff *head = NULL;
- struct sk_buff *tail = NULL;
+ struct sk_buff *next = NULL;
+ struct sk_buff_head recv_frames;
+
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- /* gather received frames */
- while ((p = dma_rx(wlc_hw->di[fifo]))) {
+ skb_queue_head_init(&recv_frames);
- if (!tail)
- head = tail = p;
- else {
- tail->prev = p;
- tail = p;
- }
+ /* gather received frames */
+ while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
/* !give others some time to run! */
if (++n >= bound_limit)
@@ -8353,12 +8101,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
dma_rxfill(wlc_hw->di[fifo]);
/* process each frame */
- while ((p = head) != NULL) {
+ skb_queue_walk_safe(&recv_frames, p, next) {
struct d11rxhdr_le *rxh_le;
struct d11rxhdr *rxh;
- head = head->prev;
- p->prev = NULL;
+ skb_unlink(p, &recv_frames);
rxh_le = (struct d11rxhdr_le *)p->data;
rxh = (struct d11rxhdr *)p->data;
@@ -8389,7 +8136,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
{
u32 macintstatus;
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc->wiphy;
if (brcms_deviceremoved(wlc)) {
@@ -8425,7 +8172,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
BCMMSG(wlc->wiphy, "end of ATIM window\n");
- OR_REG(&regs->maccommand, wlc->qvalid);
+ bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
wlc->qvalid = 0;
}
@@ -8443,18 +8190,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
if (macintstatus & MI_GP0) {
wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
- "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+ "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, wlc_hw->sih->chip,
- wlc_hw->sih->chiprev);
- /* big hammer */
- brcms_init(wlc->wl);
+ __func__, ai_get_chip_id(wlc_hw->sih),
+ ai_get_chiprev(wlc_hw->sih));
+ brcms_fatal_error(wlc_hw->wlc->wl);
}
/* gptimer timeout */
if (macintstatus & MI_TO)
- W_REG(&regs->gptimer, 0);
+ bcma_write32(core, D11REGOFFS(gptimer), 0);
if (macintstatus & MI_RFDISABLE) {
BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8470,20 +8216,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
return wlc->macintstatus != 0;
fatal:
- brcms_init(wlc->wl);
+ brcms_fatal_error(wlc_hw->wlc->wl);
return wlc->macintstatus != 0;
}
-void brcms_c_init(struct brcms_c_info *wlc)
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc->hw->d11core;
u16 chanspec;
- bool mute = false;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- regs = wlc->regs;
-
/*
* This will happen if a big-hammer was executed. In
* that case, we want to go back to the channel that
@@ -8494,7 +8237,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
else
chanspec = brcms_c_init_chanspec(wlc);
- brcms_b_init(wlc->hw, chanspec, mute);
+ brcms_b_init(wlc->hw, chanspec);
/* update beacon listen interval */
brcms_c_bcn_li_upd(wlc);
@@ -8513,8 +8256,8 @@ void brcms_c_init(struct brcms_c_info *wlc)
* update since init path would reset
* to default value
*/
- W_REG(&regs->tsf_cfprep,
- (bi << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ bi << CFPREP_CBI_SHIFT);
/* Update maccontrol PM related bits */
brcms_c_set_ps_ctrl(wlc);
@@ -8544,7 +8287,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
brcms_c_bsinit(wlc);
/* Enable EDCF mode (while the MAC is suspended) */
- OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+ bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
brcms_c_edcf_setparams(wlc, false);
/* Init precedence maps for empty FIFOs */
@@ -8560,14 +8303,15 @@ void brcms_c_init(struct brcms_c_info *wlc)
/* ..now really unleash hell (allow the MAC out of suspend) */
brcms_c_enable_mac(wlc);
+ /* suspend the tx fifos and mute the phy for preism cac time */
+ if (mute_tx)
+ brcms_b_mute(wlc->hw, true);
+
/* clear tx flow control */
brcms_c_txflowcontrol_reset(wlc);
/* enable the RF Disable Delay timer */
- W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+ bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
/*
* Initialize WME parameters; if they haven't been set by some other
@@ -8577,7 +8321,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
/* Uninitialized; read from HW */
int ac;
- for (ac = 0; ac < AC_COUNT; ac++)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
wlc->wme_retries[ac] =
brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
}
@@ -8587,9 +8331,8 @@ void brcms_c_init(struct brcms_c_info *wlc)
* The common driver entry routine. Error codes should be unique
*/
struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr)
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr)
{
struct brcms_c_info *wlc;
uint err = 0;
@@ -8597,7 +8340,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
@@ -8624,8 +8367,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
* low level attach steps(all hw accesses go
* inside, no more in rest of the attach)
*/
- err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
- btparam);
+ err = brcms_b_attach(wlc, core, unit, piomode);
if (err)
goto fail;
@@ -8754,8 +8496,6 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
brcms_c_ht_update_sgi_rx(wlc, 0);
}
- /* initialize radio_mpc_disable according to wlc->mpc */
- brcms_c_radio_mpc_upd(wlc);
brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
if (perr)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index c0e0fcfdfaf8..adb136ec1f04 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -44,8 +44,6 @@
/* transmit buffer max headroom for protocol headers */
#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
-#define AC_COUNT 4
-
/* Macros for doing definition and get/set of bitfields
* Usage example, e.g. a three-bit field (bits 4-6):
* #define <NAME>_M BITFIELD_MASK(3)
@@ -336,7 +334,7 @@ struct brcms_hardware {
u32 machwcap_backup; /* backup of machwcap */
struct si_pub *sih; /* SI handle (cookie for siutils calls) */
- struct d11regs __iomem *regs; /* pointer to device registers */
+ struct bcma_device *d11core; /* pointer to 802.11 core */
struct phy_shim_info *physhim; /* phy shim layer handler */
struct shared_phy *phy_sh; /* pointer to shared phy state */
struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -402,7 +400,6 @@ struct brcms_txq_info {
*
* pub: pointer to driver public state.
* wl: pointer to specific private state.
- * regs: pointer to device registers.
* hw: HW related state.
* clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
* fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -427,11 +424,6 @@ struct brcms_txq_info {
* bandinit_pending: track band init in auto band.
* radio_monitor: radio timer is running.
* going_down: down path intermediate variable.
- * mpc: enable minimum power consumption.
- * mpc_dlycnt: # of watchdog cnt before turn disable radio.
- * mpc_offcnt: # of watchdog cnt that radio is disabled.
- * mpc_delay_off: delay radio disable by # of watchdog cnt.
- * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc.
* wdtimer: timer for watchdog routine.
* radio_timer: timer for hw radio button monitor routine.
* monitor: monitor (MPDU sniffing) mode.
@@ -441,7 +433,7 @@ struct brcms_txq_info {
* bcn_li_dtim: beacon listen interval in # dtims.
* WDarmed: watchdog timer is armed.
* WDlast: last time wlc_watchdog() was called.
- * edcf_txop[AC_COUNT]: current txop for each ac.
+ * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
* wme_retries: per-AC retry limits.
* tx_prec_map: Precedence map based on HW FIFO space.
* fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
@@ -484,7 +476,6 @@ struct brcms_txq_info {
struct brcms_c_info {
struct brcms_pub *pub;
struct brcms_info *wl;
- struct d11regs __iomem *regs;
struct brcms_hardware *hw;
/* clock */
@@ -522,18 +513,11 @@ struct brcms_c_info {
bool radio_monitor;
bool going_down;
- bool mpc;
- u8 mpc_dlycnt;
- u8 mpc_offcnt;
- u8 mpc_delay_off;
- u8 prev_non_delay_mpc;
-
struct brcms_timer *wdtimer;
struct brcms_timer *radio_timer;
/* promiscuous */
- bool monitor;
- bool bcnmisc_monitor;
+ uint filter_flags;
/* driver feature */
bool _rifs;
@@ -546,9 +530,9 @@ struct brcms_c_info {
u32 WDlast;
/* WME */
- u16 edcf_txop[AC_COUNT];
+ u16 edcf_txop[IEEE80211_NUM_ACS];
- u16 wme_retries[AC_COUNT];
+ u16 wme_retries[IEEE80211_NUM_ACS];
u16 tx_prec_map;
u16 fifo2prec_map[NFIFO];
@@ -671,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
#endif
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
- bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
extern void brcms_c_send_q(struct brcms_c_info *wlc);
extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
uint *fifo);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 0bcb26792046..7fad6dc19258 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -139,6 +139,9 @@
#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
#define SRSH_PI_SHIFT 12 /* bit 15:12 */
+#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
+#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
+
/* Sonics side: PCI core and host control registers */
struct sbpciregs {
u32 control; /* PCI control */
@@ -205,11 +208,7 @@ struct sbpcieregs {
};
struct pcicore_info {
- union {
- struct sbpcieregs __iomem *pcieregs;
- struct sbpciregs __iomem *pciregs;
- } regs; /* Memory mapped register to the core */
-
+ struct bcma_device *core;
struct si_pub *sih; /* System interconnect handle */
struct pci_dev *dev;
u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@ struct pcicore_info {
};
#define PCIE_ASPM(sih) \
- (((sih)->buscoretype == PCIE_CORE_ID) && \
- (((sih)->buscorerev >= 3) && \
- ((sih)->buscorerev <= 5)))
+ ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
+ ((ai_get_buscorerev(sih) >= 3) && \
+ (ai_get_buscorerev(sih) <= 5)))
/* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@ static void pr28829_delay(void)
/* Initialize the PCI core.
* It's caller's responsibility to make sure that this is done only once
*/
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
- void __iomem *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
{
struct pcicore_info *pi;
@@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
return NULL;
pi->sih = sih;
- pi->dev = pdev;
+ pi->dev = core->bus->host_pci;
+ pi->core = core;
- if (sih->buscoretype == PCIE_CORE_ID) {
+ if (core->id.id == PCIE_CORE_ID) {
u8 cap_ptr;
- pi->regs.pcieregs = regs;
cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
NULL, NULL);
pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
- } else
- pi->regs.pciregs = regs;
-
+ }
return pi;
}
@@ -334,37 +330,37 @@ end:
/* ***** Register Access API */
static uint
-pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
{
uint retval = 0xFFFFFFFF;
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG(&pcieregs->configaddr, offset);
- (void)R_REG((&pcieregs->configaddr));
- retval = R_REG(&pcieregs->configdata);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(configaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(configdata));
break;
case PCIE_PCIEREGS:
- W_REG(&pcieregs->pcieindaddr, offset);
- (void)R_REG(&pcieregs->pcieindaddr);
- retval = R_REG(&pcieregs->pcieinddata);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
break;
}
return retval;
}
-static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+static uint pcie_writereg(struct bcma_device *core, uint addrtype,
uint offset, uint val)
{
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG((&pcieregs->configaddr), offset);
- W_REG((&pcieregs->configdata), val);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(configdata), val);
break;
case PCIE_PCIEREGS:
- W_REG((&pcieregs->pcieindaddr), offset);
- W_REG((&pcieregs->pcieinddata), val);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
break;
default:
break;
@@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata, i = 0;
uint pcie_serdes_spinwait = 200;
@@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
(MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
(MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
(blk << 4));
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE)
break;
udelay(1000);
@@ -404,15 +400,15 @@ static int
pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
uint *val)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata;
uint i = 0;
uint pcie_serdes_spinwait = 10;
/* enable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
+ MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
- if (pi->sih->buscorerev >= 10) {
+ if (ai_get_buscorerev(pi->sih) >= 10) {
/* new serdes is slower in rw,
* using two layers of reg address mapping
*/
@@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
*val);
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE) {
if (!write) {
pr28829_delay();
- *val = (R_REG(&pcieregs->mdiodata) &
+ *val = (bcma_read32(pi->core,
+ PCIEREGOFFS(mdiodata)) &
MDIODATA_MASK);
}
/* Disable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 0;
}
udelay(1000);
@@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
}
/* Timed out. Disable mdio access to SERDES. */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 1;
}
@@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
{
u32 w;
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
+ if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
+ ai_get_buscorerev(sih) < 7)
return;
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
if (extend)
w |= PCIE_ASPMTIMER_EXTEND;
else
w &= ~PCIE_ASPMTIMER_EXTEND;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
}
/* centralized clkreq control policy */
@@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
pcie_clkreq(pi, 1, 0);
break;
case SI_PCIDOWN:
- if (sih->buscorerev == 6) { /* turn on serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0);
+ /* turn on serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0);
} else if (pi->pcie_pr42767) {
pcie_clkreq(pi, 1, 1);
}
break;
case SI_PCIUP:
- if (sih->buscorerev == 6) { /* turn off serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0x40);
+ /* turn off serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0x40);
} else if (PCIE_ASPM(sih)) { /* disable clkreq */
pcie_clkreq(pi, 1, 0);
}
@@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi)
if (pi->pcie_polarity != 0)
return;
- w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
/* Detect the current polarity at attach and force that polarity and
* disable changing the polarity
@@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi)
*/
static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
struct si_pub *sih = pi->sih;
u16 val16;
- u16 __iomem *reg16;
u32 w;
if (!PCIE_ASPM(sih))
return;
/* bypass this on QT or VSIM */
- reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
val16 &= ~SRSH_ASPM_ENB;
if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
val16 |= SRSH_ASPM_L0s_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
w &= ~PCIE_ASPM_ENAB;
w |= pi->pcie_war_aspm_ovr;
pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
- reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
} else
val16 &= ~SRSH_CLKREQ_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
+ val16);
}
/* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_misc_config_fixup(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u16 val16;
- u16 __iomem *reg16;
- reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
val16 |= SRSH_L23READY_EXIT_NOPERST;
- W_REG(reg16, val16);
+ bcma_write16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
}
}
@@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_noplldown(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- u16 __iomem *reg16;
-
/* turn off serdes PLL down */
- ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
- CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+ ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
+ CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
/* clear srom shadow backdoor */
- reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
- W_REG(reg16, 0);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
}
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_pci_setup(struct pcicore_info *pi)
{
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u32 w;
- if (sih->buscorerev == 0 || sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG);
w |= 0x8;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG, w);
}
- if (sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+ if (ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
w |= 0x40;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
}
- if (sih->buscorerev == 0) {
+ if (ai_get_buscorerev(sih) == 0) {
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
} else if (PCIE_ASPM(sih)) {
/* Change the L1 threshold for better performance */
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG);
w &= ~PCIE_L1THRESHOLDTIME_MASK;
w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG, w);
pcie_war_serdes(pi);
pcie_war_aspm_clkreq(pi);
- } else if (pi->sih->buscorerev == 7)
+ } else if (ai_get_buscorerev(pi->sih) == 7)
pcie_war_noplldown(pi);
/* Note that the fix is actually in the SROM,
* that's why this is open-ended
*/
- if (pi->sih->buscorerev >= 6)
+ if (ai_get_buscorerev(pi->sih) >= 6)
pcie_misc_config_fixup(pi);
}
@@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state)
void pcicore_hwup(struct pcicore_info *pi)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi)
void pcicore_up(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
/* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi)
void pcicore_down(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state)
pcie_extendL1timer(pi, false);
}
-/* precondition: current core is sii->buscoretype */
-static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
+void pcicore_fixcfg(struct pcicore_info *pi)
{
- struct si_info *sii = (struct si_info *)(pi->sih);
+ struct bcma_device *core = pi->core;
u16 val16;
- uint pciidx;
+ uint regoff;
- pciidx = ai_coreidx(&sii->pub);
- val16 = R_REG(reg16);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
- val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
- (val16 & ~SRSH_PI_MASK);
- W_REG(reg16, val16);
- }
-}
+ switch (pi->core->id.id) {
+ case BCMA_CORE_PCI:
+ regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void
-pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
-{
- pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
-}
+ case BCMA_CORE_PCIE:
+ regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void pcicore_fixcfg_pcie(struct pcicore_info *pi,
- struct sbpcieregs __iomem *pcieregs)
-{
- pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+ default:
+ return;
+ }
+
+ val16 = bcma_read16(pi->core, regoff);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
+ (u16)core->core_index) {
+ val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
+ (val16 & ~SRSH_PI_MASK);
+ bcma_write16(pi->core, regoff, val16);
+ }
}
/* precondition: current core is pci core */
void
-pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+pcicore_pci_setup(struct pcicore_info *pi)
{
- u32 w;
-
- OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-
- if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
- OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
- w = R_REG(&pciregs->clkrun);
- W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
- w = R_REG(&pciregs->clkrun);
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_PREF | SBTOPCI_BURST);
+
+ if (pi->core->id.rev >= 11) {
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_RC_READMULTI);
+ bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
+ (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
index 58aa80dc3329..9fc3ead540a8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -62,8 +62,7 @@ struct sbpciregs;
struct sbpcieregs;
extern struct pcicore_info *pcicore_init(struct si_pub *sih,
- struct pci_dev *pdev,
- void __iomem *regs);
+ struct bcma_device *core);
extern void pcicore_deinit(struct pcicore_info *pch);
extern void pcicore_attach(struct pcicore_info *pch, int state);
extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
extern void pcicore_down(struct pcicore_info *pch, int state);
extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
-extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
- struct sbpcieregs __iomem *pciregs);
-extern void pcicore_pci_setup(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg(struct pcicore_info *pch);
+extern void pcicore_pci_setup(struct pcicore_info *pch);
#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
index edf551561fd8..f1ca12625860 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -77,7 +77,7 @@ struct otp_fn_s {
};
struct otpinfo {
- uint ccrev; /* chipc revision */
+ struct bcma_device *core; /* chipc core */
const struct otp_fn_s *fn; /* OTP functions */
struct si_pub *sih; /* Saved sb handle */
@@ -133,9 +133,10 @@ struct otpinfo {
#define OTP_SZ_FU_144 (144/8) /* 144 bits */
static u16
-ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+ipxotp_otpr(struct otpinfo *oi, uint wn)
{
- return R_REG(&cc->sromotp[wn]);
+ return bcma_read16(oi->core,
+ CHIPCREGOFFS(sromotp[wn]));
}
/*
@@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
{
int ret = 0;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
return ret;
}
-static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+static void _ipxotp_init(struct otpinfo *oi)
{
uint k;
u32 otpp, st;
+ int ccrev = ai_get_ccrev(oi->sih);
+
/*
* record word offset of General Use Region
* for various chipcommon revs
*/
- if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
- || oi->sih->ccrev == 27) {
+ if (ccrev == 21 || ccrev == 24
+ || ccrev == 27) {
oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (oi->sih->ccrev == 36) {
+ } else if (ccrev == 36) {
/*
* OTP size greater than equal to 2KB (128 words),
* otpgu_base is similar to rev23
@@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->otpgu_base = REVB8_OTPGU_BASE;
else
oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ } else if (ccrev == 23 || ccrev >= 25) {
oi->otpgu_base = REVB8_OTPGU_BASE;
}
@@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
otpp =
OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
- W_REG(&cc->otpprog, otpp);
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
+ bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
+ for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
if (k >= OTPP_TRIES)
return;
/* Read OTP lock bits and subregion programmed indication bits */
- oi->status = R_REG(&cc->otpstatus);
+ oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
- if ((oi->sih->chip == BCM43224_CHIP_ID)
- || (oi->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
+ || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
u32 p_bits;
- p_bits =
- (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK)
- >> OTPGU_P_SHIFT;
+ p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK) >> OTPGU_P_SHIFT;
oi->status |= (p_bits << OTPS_GUP_SHIFT);
}
@@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->hwlim = oi->wsize;
if (oi->status & OTPS_GUP_HW) {
oi->hwlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
oi->swbase = oi->hwlim;
} else
oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
if (oi->status & OTPS_GUP_SW) {
oi->swlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
oi->fbase = oi->swlim;
} else
oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
{
- uint idx;
- struct chipcregs __iomem *cc;
-
/* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(sih->ccrev))
+ if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
return -EBADE;
/* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
return -EBADE;
/* Check for otp size */
- switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
case 0:
/* Nothing there */
return -EBADE;
@@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
}
/* Retrieve OTP region info */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- _ipxotp_init(oi, cc);
-
- ai_setcoreidx(sih, idx);
-
+ _ipxotp_init(oi);
return 0;
}
static int
ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
{
- uint idx;
- struct chipcregs __iomem *cc;
uint base, i, sz;
/* Validate region selection */
@@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
return -EINVAL;
}
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
/* Read the data */
for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oi, cc, base + i);
+ data[i] = ipxotp_otpr(oi, base + i);
- ai_setcoreidx(oi->sih, idx);
*wlen = sz;
return 0;
}
@@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = {
static int otp_init(struct si_pub *sih, struct otpinfo *oi)
{
-
int ret;
memset(oi, 0, sizeof(struct otpinfo));
- oi->ccrev = sih->ccrev;
+ oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (OTPTYPE_IPX(oi->ccrev))
+ if (OTPTYPE_IPX(ai_get_ccrev(sih)))
oi->fn = &ipxotp_fn;
if (oi->fn == NULL)
@@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi)
oi->sih = sih;
- ret = (oi->fn->init) (sih, oi);
+ ret = (oi->fn->init)(sih, oi);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index a3149254cbcd..264f8c4c703d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -109,10 +109,10 @@ static const struct chan_info_basic chan_info_all[] = {
{204, 5020},
{208, 5040},
{212, 5060},
- {216, 50800}
+ {216, 5080}
};
-const u8 ofdm_rate_lookup[] = {
+static const u8 ofdm_rate_lookup[] = {
BRCM_RATE_48M,
BRCM_RATE_24M,
@@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
void wlc_radioreg_exit(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- u16 dummy;
- dummy = R_REG(&pi->regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->phy_wreg = 0;
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
}
@@ -186,19 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- data = R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-
-#ifdef __ARM_ARCH_4T__
- __asm__(" .align 4 ");
- __asm__(" nop ");
- data = R_REG(&pi->regs->phy4wdatalo);
-#else
- data = R_REG(&pi->regs->phy4wdatalo);
-#endif
-
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
}
pi->phy_wreg = 0;
@@ -211,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- W_REG(&pi->regs->radioregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
- W_REG(&pi->regs->phy4wdatalo, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
}
@@ -231,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi)
if (D11REV_GE(pi->sh->corerev, 24)) {
u32 b0, b1, b2;
- W_REG_FLUSH(&pi->regs->radioregaddr, 0);
- b0 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 1);
- b1 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 2);
- b2 = (u32) R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
+ b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
+ b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
+ b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
& 0xf);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
- id = (u32) R_REG(&pi->regs->phy4wdatalo);
- id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
+ id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
+ id |= (u32) bcma_read16(pi->d11core,
+ D11REGOFFS(phy4wdatahi)) << 16;
}
pi->phy_wreg = 0;
return id;
@@ -283,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
void write_phy_channel_reg(struct brcms_phy *pi, uint val)
{
- W_REG(&pi->regs->phychannel, val);
+ bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
}
u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
pi->phy_wreg = 0;
- return R_REG(&regs->phyregdata);
+ return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
}
void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
#ifdef CONFIG_BCM47XX
- W_REG_FLUSH(&regs->phyregaddr, addr);
- W_REG(&regs->phyregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
- (void)R_REG(&regs->phyregdata);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
#else
- W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+ bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
pi->phy_wreg = 0;
- (void)R_REG(&regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
#endif
}
void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata,
- ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
+ val &= mask;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
pi->phy_wreg = 0;
}
@@ -412,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
sh->sromrev = shp->sromrev;
sh->boardtype = shp->boardtype;
sh->boardrev = shp->boardrev;
- sh->boardvendor = shp->boardvendor;
sh->boardflags = shp->boardflags;
sh->boardflags2 = shp->boardflags2;
- sh->buscorerev = shp->buscorerev;
sh->fast_timer = PHY_SW_TIMER_FAST;
sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -458,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
}
struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy)
{
struct brcms_phy *pi;
@@ -470,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (D11REV_IS(sh->corerev, 4))
sflags = SISF_2G_PHY | SISF_5G_PHY;
else
- sflags = ai_core_sflags(sh->sih, 0, 0);
+ sflags = bcma_aread32(d11core, BCMA_IOST);
if (bandtype == BRCM_BAND_5G) {
if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -488,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (pi == NULL)
return NULL;
pi->wiphy = wiphy;
- pi->regs = regs;
+ pi->d11core = d11core;
pi->sh = sh;
pi->phy_init_por = true;
pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -503,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.coreflags = SICF_GMODE;
wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
- phyversion = R_REG(&pi->regs->phyversion);
+ phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->pubpi.phy_type = PHY_TYPE(phyversion);
pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -515,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
- if (!pi->pubpi.phy_type == PHY_TYPE_N &&
- !pi->pubpi.phy_type == PHY_TYPE_LCN)
+ if (pi->pubpi.phy_type != PHY_TYPE_N &&
+ pi->pubpi.phy_type != PHY_TYPE_LCN)
goto err;
if (bandtype == BRCM_BAND_5G) {
@@ -787,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
pi->radio_chanspec = chanspec;
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
return;
if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
- if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
+ if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
"HW error SISF_FCLKA\n"))
return;
@@ -833,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
struct brcms_phy *pi = (struct brcms_phy *) pih;
void (*cal_init)(struct brcms_phy *) = NULL;
- if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
- "HW error: MAC enabled during phy cal\n"))
+ if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
return;
if (!pi->initialized) {
@@ -1025,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi,
void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
{
#define DUMMY_PKT_LEN 20
- struct d11regs __iomem *regs = pi->regs;
+ struct bcma_device *core = pi->d11core;
int i, count;
u8 ofdmpkt[DUMMY_PKT_LEN] = {
0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1041,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
dummypkt);
- W_REG(&regs->xmtsel, 0);
+ bcma_write16(core, D11REGOFFS(xmtsel), 0);
if (D11REV_GE(pi->sh->corerev, 11))
- W_REG(&regs->wepctl, 0x100);
+ bcma_write16(core, D11REGOFFS(wepctl), 0x100);
else
- W_REG(&regs->wepctl, 0);
+ bcma_write16(core, D11REGOFFS(wepctl), 0);
- W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
+ bcma_write16(core, D11REGOFFS(txe_phyctl),
+ (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_phyctl1, 0x1A02);
+ bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
- W_REG(&regs->txe_wm_0, 0);
- W_REG(&regs->txe_wm_1, 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
- W_REG(&regs->xmttplatetxptr, 0);
- W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN);
+ bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
+ bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
- W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2));
+ bcma_write16(core, D11REGOFFS(xmtsel),
+ ((8 << 8) | (1 << 5) | (1 << 2) | 2));
- W_REG(&regs->txe_ctl, 0);
+ bcma_write16(core, D11REGOFFS(txe_ctl), 0);
if (!pa_on) {
if (ISNPHY(pi))
@@ -1068,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
}
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_aux, 0xD0);
+ bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
else
- W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4)));
+ bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
- (void)R_REG(&regs->txe_aux);
+ (void)bcma_read16(core, D11REGOFFS(txe_aux));
i = 0;
count = ofdm ? 30 : 250;
while ((i++ < count)
- && (R_REG(&regs->txe_status) & (1 << 7)))
+ && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
udelay(10);
i = 0;
- while ((i++ < 10)
- && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
udelay(10);
i = 0;
- while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8))))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
udelay(10);
if (!pa_on) {
@@ -1145,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (ISNPHY(pi)) {
wlc_phy_switch_radio_nphy(pi, on);
@@ -1385,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
&txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
- if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
+ if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
mac_enabled = true;
if (mac_enabled)
@@ -1415,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
if (!SCAN_INPROG_PHY(pi)) {
bool suspend;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
@@ -1868,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- rxc = R_REG(&pi->regs->phyregdata);
- W_REG(&pi->regs->phyregdata,
- (0x1 << 15) | rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
+ 0x1 << 15);
}
} else {
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- W_REG(&pi->regs->phyregdata, rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
}
wlc_phy_por_inform(ppi);
@@ -1999,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
pi->txpwrctrl = hwpwrctrl;
if (ISNPHY(pi)) {
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2201,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2419,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2438,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
struct phy_iq_est est[PHY_CORE_MAX];
u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2932,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
}
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x40);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x40);
} else {
mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x00);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x00);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x40);
}
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index 96e15163222b..e34a71e7d242 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -166,7 +166,6 @@ struct shared_phy_params {
struct phy_shim_info *physhim;
uint unit;
uint corerev;
- uint buscorerev;
u16 vid;
u16 did;
uint chip;
@@ -175,7 +174,6 @@ struct shared_phy_params {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
};
@@ -183,7 +181,7 @@ struct shared_phy_params {
extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct d11regs __iomem *regs,
+ struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy);
extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index bea85241a244..af00e2c2b266 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -503,10 +503,8 @@ struct shared_phy {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
- uint buscorerev;
uint fast_timer;
uint slow_timer;
uint glacial_timer;
@@ -559,7 +557,7 @@ struct brcms_phy {
} u;
bool user_txpwr_at_rfport;
- struct d11regs __iomem *regs;
+ struct bcma_device *d11core;
struct brcms_phy *next;
struct brcms_phy_pub pubpi;
@@ -774,11 +772,6 @@ struct brcms_phy {
s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ];
u8 nphy_noise_index;
- u8 nphy_txpid2g[PHY_CORE_NUM_2];
- u8 nphy_txpid5g[PHY_CORE_NUM_2];
- u8 nphy_txpid5gl[PHY_CORE_NUM_2];
- u8 nphy_txpid5gh[PHY_CORE_NUM_2];
-
bool nphy_gain_boost;
bool nphy_elna_gain_config;
u16 old_bphy_test;
@@ -1095,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
#define BRCMS_PHY_WAR_PR51571(pi) \
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
- (void)R_REG(&(pi)->regs->maccontrol)
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index a63aa99d9810..ce8562aa5db0 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
si_pmu_pllupd(pi->sh->sih);
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
{
s8 index, delta_brd, delta_temp, new_index, tempcorrx;
s16 manp, meas_temp, temp_diff;
- bool neg = 0;
+ bool neg = false;
u16 temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
@@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
temp_diff = manp - meas_temp;
if (temp_diff < 0) {
- neg = 1;
+ neg = true;
temp_diff = -temp_diff;
}
@@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
idleTssi = read_phy_reg(pi, 0x4ab);
- suspend =
- (0 ==
- (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
- MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
for (i = 0; i < 14; i++)
values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
bool suspend;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
timer = 0;
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- curval1 = R_REG(&pi->regs->psm_corectlsts);
+ curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
ptr[130] = 0;
- W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
+ ((1 << 6) | curval1));
- W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
- W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
udelay(20);
- curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+ curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
+ curval2 | 0x30);
write_phy_reg(pi, 0x555, 0x0);
write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
- stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
do {
udelay(10);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
timer++;
} while ((curptr != stpptr) && (timer < 500));
- W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
strptr = 0x7E00;
- W_REG(&pi->regs->tplatewrptr, strptr);
+ bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
while (strptr < 0x8000) {
- val = R_REG(&pi->regs->tplatewrdata);
+ val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
imag = ((val >> 16) & 0x3ff);
real = ((val) & 0x3ff);
if (imag > 511)
@@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
}
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2);
- W_REG(&pi->regs->psm_corectlsts, curval1);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
}
static void
@@ -3681,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
udelay(20);
for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
- phy_c23 = 1;
- phy_c22 = 0;
+ phy_c23 = true;
+ phy_c22 = false;
switch (cal_type) {
case 0:
phy_c10 = 511;
@@ -3700,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c9 = read_phy_reg(pi, 0x93d);
phy_c9 = 2 * phy_c9;
- phy_c24 = 0;
+ phy_c24 = false;
phy_c5 = 7;
- phy_c25 = 1;
+ phy_c25 = true;
while (1) {
write_radio_reg(pi, RADIO_2064_REG026,
(phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
udelay(50);
- phy_c22 = 0;
+ phy_c22 = false;
ptr[130] = 0;
wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
if (ptr[130] == 1)
- phy_c22 = 1;
+ phy_c22 = true;
if (phy_c22)
phy_c5 -= 1;
if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3721,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
if (phy_c5 <= 0 || phy_c5 >= 7)
break;
phy_c24 = phy_c22;
- phy_c25 = 0;
+ phy_c25 = false;
}
if (phy_c5 < 0)
@@ -3772,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c13 = phy_c11;
phy_c14 = phy_c12;
}
- phy_c23 = 0;
+ phy_c23 = false;
}
}
- phy_c23 = 1;
+ phy_c23 = true;
phy_c15 = phy_c13;
phy_c16 = phy_c14;
phy_c7 = phy_c7 >> 1;
@@ -3965,12 +3966,12 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s16 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4007,14 +4008,14 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4075,12 +4076,12 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
{
u16 vbatsenseval;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
s8 index;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend) {
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index cd19c2f7a347..a16f1ab292fd 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -29,6 +29,7 @@
#include "phy_radio.h"
#include "phyreg_n.h"
#include "phytbl_n.h"
+#include "soc.h"
#define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \
read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
@@ -14417,12 +14418,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
switch (band_num) {
case 0:
- pi->nphy_txpid2g[PHY_CORE_0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID2GA0);
- pi->nphy_txpid2g[PHY_CORE_1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID2GA1);
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP2GA0);
@@ -14486,12 +14481,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 1:
- pi->nphy_txpid5g[PHY_CORE_0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GA0);
- pi->nphy_txpid5g[PHY_CORE_1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GA1);
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
(s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
@@ -14551,12 +14540,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 2:
- pi->nphy_txpid5gl[0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GLA0);
- pi->nphy_txpid5gl[1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GLA1);
pi->nphy_pwrctrl_info[0].max_pwr_5gl =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP5GLA0);
@@ -14615,12 +14598,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 3:
- pi->nphy_txpid5gh[0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GHA0);
- pi->nphy_txpid5gh[1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GHA1);
pi->nphy_pwrctrl_info[0].max_pwr_5gh =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP5GHA0);
@@ -17825,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -17976,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -19470,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
u8 tx_pwr_ctrl_state;
bool do_nphy_cal = false;
uint core;
- uint origidx, intr_val;
- struct d11regs __iomem *regs;
u32 d11_clk_ctl_st;
bool do_rssi_cal = false;
@@ -19485,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
(pi->sh->chippkg == BCM4718_PKG_ID))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, chipcontrol),
+ 0x40, 0x40);
}
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
- regs = (struct d11regs __iomem *)
- ai_switch_core(pi->sh->sih,
- D11_CORE_ID, &origidx,
- &intr_val);
- d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
- AND_REG(&regs->clk_ctl_st,
- ~(CCS_FORCEHT | CCS_HTAREQ));
+ d11_clk_ctl_st = bcma_read32(pi->d11core,
+ D11REGOFFS(clk_ctl_st));
+ bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ ~(CCS_FORCEHT | CCS_HTAREQ));
- W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
-
- ai_restore_core(pi->sh->sih, origidx, intr_val);
+ bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ d11_clk_ctl_st);
}
pi->use_int_tx_iqlo_cal_nphy =
@@ -19908,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -21286,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(BBCFG_RESETCCA | BBCFG_RESETRX));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
} else if (!CHSPEC_IS5G(chanspec) && val) {
and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21365,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
spuravoid = 1;
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid(pi->sh->sih, spuravoid);
+ si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
-
if (spuravoid == 1) {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x5341);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x5341);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
} else {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x8889);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x8889);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
}
}
@@ -21522,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
mc &= ~MCTL_GPOUT_SEL_MASK;
- W_REG(&pi->regs->maccontrol, mc);
+ bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
- OR_REG(&pi->regs->psm_gpio_oe, mask);
+ bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- AND_REG(&pi->regs->psm_gpio_out, ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21545,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
bool suspended = false;
if (D11REV_IS(pi->sh->corerev, 16)) {
- suspended =
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
- false : true;
+ suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) ? false : true;
if (!suspended)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
@@ -25406,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
if (pi->nphy_papd_skip == 1)
return;
- phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!phy_b3)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -27994,20 +27965,11 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
switch (chan_freq_range) {
case WL_CHAN_FREQ_RANGE_2G:
- txpi[0] = pi->nphy_txpid2g[0];
- txpi[1] = pi->nphy_txpid2g[1];
- break;
case WL_CHAN_FREQ_RANGE_5GL:
- txpi[0] = pi->nphy_txpid5gl[0];
- txpi[1] = pi->nphy_txpid5gl[1];
- break;
case WL_CHAN_FREQ_RANGE_5GM:
- txpi[0] = pi->nphy_txpid5g[0];
- txpi[1] = pi->nphy_txpid5g[1];
- break;
case WL_CHAN_FREQ_RANGE_5GH:
- txpi[0] = pi->nphy_txpid5gh[0];
- txpi[1] = pi->nphy_txpid5gh[1];
+ txpi[0] = 0;
+ txpi[1] = 0;
break;
default:
txpi[0] = txpi[1] = 91;
@@ -28389,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 3b36e3acfd74..4931d29d077b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -23,6 +23,7 @@
#include "pub.h"
#include "aiutils.h"
#include "pmu.h"
+#include "soc.h"
/*
* external LPO crystal frequency
@@ -114,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
uint rsrcs;
/* # resources */
- rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
/* determine min/max rsrc masks */
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
/* ??? */
@@ -138,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
*pmax = max_mask;
}
-static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
- u8 spuravoid)
+void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
{
u32 tmp = 0;
+ struct bcma_device *core;
- switch (sih->chip) {
+ /* switch to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000C0C06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x2001E920);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11500010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000C0C06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x0F600a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x2001E920);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
} else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000c0c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11100010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000c0c06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x03000a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x200005c0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
}
tmp = 1 << 10;
break;
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100008);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x0c000c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888855);
-
- tmp = 1 << 10;
- break;
-
default:
/* bail out */
return;
}
- tmp |= R_REG(&cc->pmucontrol);
- W_REG(&cc->pmucontrol, tmp);
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
}
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -219,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
return (u16) delay;
}
-void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
/* Read/write a chipcontrol reg */
u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
+ mask, val);
}
/* Read/write a regcontrol reg */
u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, regcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
+ mask, val);
}
/* Read/write a pllcontrol reg */
u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, pllcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
+ mask, val);
}
/* PMU PLL update */
void si_pmu_pllupd(struct si_pub *sih)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
- PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
}
/* query alp/xtal clock frequency */
@@ -275,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
u32 clock = ALP_CLOCK;
/* bail out with default */
- if (!(sih->cccaps & CC_CAP_PMU))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
return clock;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -292,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
-void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
-{
- struct chipcregs __iomem *cc;
- uint origidx, intr_val;
-
- /* Remember original core before switch to chipc */
- cc = (struct chipcregs __iomem *)
- ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
-
- /* update the pll changes */
- si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
- /* Return to original core */
- ai_restore_core(sih, origidx, intr_val);
-}
-
/* initialize PMU */
void si_pmu_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- if (sih->pmurev == 1)
- AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
- else if (sih->pmurev >= 2)
- OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+ /* select chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(struct si_pub *sih)
-{
- uint origidx;
-
- /* Gate off SPROM clock and chip select signals */
- si_pmu_sprom_enable(sih, false);
-
- /* Remember original core */
- origidx = ai_coreidx(sih);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(struct si_pub *sih)
-{
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4313_CHIP_ID:
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- /* ??? */
- break;
- default:
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
+ if (ai_get_pmurev(sih) == 1)
+ bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
+ ~PCTL_NOILP_ON_WAIT);
+ else if (ai_get_pmurev(sih) >= 2)
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
}
/* initialize PMU resources */
void si_pmu_res_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 min_mask = 0, max_mask = 0;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ /* select to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
/* Determine min/max rsrc masks */
si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -390,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih)
/* Program max resource mask */
if (max_mask)
- W_REG(&cc->max_res_mask, max_mask);
+ bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
/* Program min resource mask */
if (min_mask)
- W_REG(&cc->min_res_mask, min_mask);
+ bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
/* Add some delay; allow resources to come up and settle. */
mdelay(2);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
}
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 alp_khz;
- if (sih->pmurev < 10)
+ if (ai_get_pmurev(sih) < 10)
return 0;
/* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+ if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;
/*
* Enable the reg to measure the freq,
* in case it was disabled before
*/
- W_REG(&cc->pmu_xtalfreq,
- 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
+ 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
/* Delay for well over 4 ILP clocks */
udelay(1000);
/* Read the latched number of ALP ticks per 4 ILP ticks */
- ilp_ctr =
- R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+ ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
+ PMU_XTALFREQ_REG_ILPCTR_MASK;
/*
* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
* bit to save power
*/
- W_REG(&cc->pmu_xtalfreq, 0);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
/* Calculate ALP frequency */
alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -451,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
} else
alp_khz = 0;
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-
return alp_khz;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3a08c620640e..3e39c5e0f9ff 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_chip_init(struct si_pub *sih);
-extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
extern void si_pmu_res_init(struct si_pub *sih);
-extern void si_pmu_swreg_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 37bb2dcc113f..f0038ad7d7bf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -17,6 +17,7 @@
#ifndef _BRCM_PUB_H_
#define _BRCM_PUB_H_
+#include <linux/bcma/bcma.h>
#include <brcmu_wifi.h>
#include "types.h"
#include "defs.h"
@@ -170,22 +171,6 @@ enum brcms_srom_id {
BRCMS_SROM_TSSIPOS2G,
BRCMS_SROM_TSSIPOS5G,
BRCMS_SROM_TXCHAIN,
- BRCMS_SROM_TXPID2GA0,
- BRCMS_SROM_TXPID2GA1,
- BRCMS_SROM_TXPID2GA2,
- BRCMS_SROM_TXPID2GA3,
- BRCMS_SROM_TXPID5GA0,
- BRCMS_SROM_TXPID5GA1,
- BRCMS_SROM_TXPID5GA2,
- BRCMS_SROM_TXPID5GA3,
- BRCMS_SROM_TXPID5GHA0,
- BRCMS_SROM_TXPID5GHA1,
- BRCMS_SROM_TXPID5GHA2,
- BRCMS_SROM_TXPID5GHA3,
- BRCMS_SROM_TXPID5GLA0,
- BRCMS_SROM_TXPID5GLA1,
- BRCMS_SROM_TXPID5GLA2,
- BRCMS_SROM_TXPID5GLA3,
/*
* per-path identifiers (see srom.c)
*/
@@ -225,10 +210,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA2GW2A1,
BRCMS_SROM_PA2GW2A2,
BRCMS_SROM_PA2GW2A3,
- BRCMS_SROM_PA2GW3A0,
- BRCMS_SROM_PA2GW3A1,
- BRCMS_SROM_PA2GW3A2,
- BRCMS_SROM_PA2GW3A3,
BRCMS_SROM_PA5GHW0A0,
BRCMS_SROM_PA5GHW0A1,
BRCMS_SROM_PA5GHW0A2,
@@ -241,10 +222,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GHW2A1,
BRCMS_SROM_PA5GHW2A2,
BRCMS_SROM_PA5GHW2A3,
- BRCMS_SROM_PA5GHW3A0,
- BRCMS_SROM_PA5GHW3A1,
- BRCMS_SROM_PA5GHW3A2,
- BRCMS_SROM_PA5GHW3A3,
BRCMS_SROM_PA5GLW0A0,
BRCMS_SROM_PA5GLW0A1,
BRCMS_SROM_PA5GLW0A2,
@@ -257,10 +234,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GLW2A1,
BRCMS_SROM_PA5GLW2A2,
BRCMS_SROM_PA5GLW2A3,
- BRCMS_SROM_PA5GLW3A0,
- BRCMS_SROM_PA5GLW3A1,
- BRCMS_SROM_PA5GLW3A2,
- BRCMS_SROM_PA5GLW3A3,
BRCMS_SROM_PA5GW0A0,
BRCMS_SROM_PA5GW0A1,
BRCMS_SROM_PA5GW0A2,
@@ -273,14 +246,9 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GW2A1,
BRCMS_SROM_PA5GW2A2,
BRCMS_SROM_PA5GW2A3,
- BRCMS_SROM_PA5GW3A0,
- BRCMS_SROM_PA5GW3A1,
- BRCMS_SROM_PA5GW3A2,
- BRCMS_SROM_PA5GW3A3,
};
#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
-#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
/* phy types */
#define PHY_TYPE_A 0 /* Phy type A */
@@ -414,7 +382,6 @@ struct brcms_pub {
uint _nbands; /* # bands supported */
uint now; /* # elapsed seconds */
- bool promisc; /* promiscuous destination address */
bool delayed_down; /* down delayed */
bool associated; /* true:part of [I]BSS, false: not */
/* (union of stas_associated, aps_associated) */
@@ -564,15 +531,14 @@ struct brcms_antselcfg {
/* common functions for every port */
extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr);
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
extern bool brcms_c_chipmatch(u16 vendor, u16 device);
-extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
extern void brcms_c_reset(struct brcms_c_info *wlc);
extern void brcms_c_intrson(struct brcms_c_info *wlc);
@@ -628,7 +594,7 @@ extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
u8 interval);
extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index e7b9dc2f2731..980d578825cc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -19,6 +19,7 @@
#include "types.h"
#include "d11.h"
+#include "phy_hal.h"
extern const u8 rate_info[];
extern const struct brcms_c_rateset cck_ofdm_mimo_rates;
@@ -198,11 +199,9 @@ static inline u8 cck_rspec(u8 cck)
/* Convert encoded rate value in plcp header to numerical rates in 500 KHz
* increments */
-extern const u8 ofdm_rate_lookup[];
-
static inline u8 ofdm_phy2mac_rate(u8 rlpt)
{
- return ofdm_rate_lookup[rlpt & 0x7];
+ return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7];
}
static inline u8 cck_phy2mac_rate(u8 signal)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index 99f791048e84..563743643038 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -28,6 +28,7 @@
#include "aiutils.h"
#include "otp.h"
#include "srom.h"
+#include "soc.h"
/*
* SROM CRC8 polynomial value:
@@ -62,9 +63,6 @@
#define SROM_MACHI_ET1 42
#define SROM_MACMID_ET1 43
#define SROM_MACLO_ET1 44
-#define SROM3_MACHI 37
-#define SROM3_MACMID 38
-#define SROM3_MACLO 39
#define SROM_BXARSSI2G 40
#define SROM_BXARSSI5G 41
@@ -101,7 +99,6 @@
#define SROM_BFL 57
#define SROM_BFL2 28
-#define SROM3_BFL2 61
#define SROM_AG10 58
@@ -109,99 +106,16 @@
#define SROM_OPO 60
-#define SROM3_LEDDC 62
-
#define SROM_CRCREV 63
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
#define SROM4_WORDS 220
-#define SROM4_SIGN 32
-#define SROM4_SIGNATURE 0x5372
-
-#define SROM4_BREV 33
-
-#define SROM4_BFL0 34
-#define SROM4_BFL1 35
-#define SROM4_BFL2 36
-#define SROM4_BFL3 37
-#define SROM5_BFL0 37
-#define SROM5_BFL1 38
-#define SROM5_BFL2 39
-#define SROM5_BFL3 40
-
-#define SROM4_MACHI 38
-#define SROM4_MACMID 39
-#define SROM4_MACLO 40
-#define SROM5_MACHI 41
-#define SROM5_MACMID 42
-#define SROM5_MACLO 43
-
-#define SROM4_CCODE 41
-#define SROM4_REGREV 42
-#define SROM5_CCODE 34
-#define SROM5_REGREV 35
-
-#define SROM4_LEDBH10 43
-#define SROM4_LEDBH32 44
-#define SROM5_LEDBH10 59
-#define SROM5_LEDBH32 60
-
-#define SROM4_LEDDC 45
-#define SROM5_LEDDC 45
-
-#define SROM4_AA 46
-
-#define SROM4_AG10 47
-#define SROM4_AG32 48
-
-#define SROM4_TXPID2G 49
-#define SROM4_TXPID5G 51
-#define SROM4_TXPID5GL 53
-#define SROM4_TXPID5GH 55
-
-#define SROM4_TXRXC 61
#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_TXCHAIN_SHIFT 0
#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_RXCHAIN_SHIFT 4
#define SROM4_SWITCH_MASK 0xff00
-#define SROM4_SWITCH_SHIFT 8
/* Per-path fields */
#define MAX_PATH_SROM 4
-#define SROM4_PATH0 64
-#define SROM4_PATH1 87
-#define SROM4_PATH2 110
-#define SROM4_PATH3 133
-
-#define SROM4_2G_ITT_MAXP 0
-#define SROM4_2G_PA 1
-#define SROM4_5G_ITT_MAXP 5
-#define SROM4_5GLH_MAXP 6
-#define SROM4_5G_PA 7
-#define SROM4_5GL_PA 11
-#define SROM4_5GH_PA 15
-
-/* All the miriad power offsets */
-#define SROM4_2G_CCKPO 156
-#define SROM4_2G_OFDMPO 157
-#define SROM4_5G_OFDMPO 159
-#define SROM4_5GL_OFDMPO 161
-#define SROM4_5GH_OFDMPO 163
-#define SROM4_2G_MCSPO 165
-#define SROM4_5G_MCSPO 173
-#define SROM4_5GL_MCSPO 181
-#define SROM4_5GH_MCSPO 189
-#define SROM4_CDDPO 197
-#define SROM4_STBCPO 198
-#define SROM4_BW40PO 199
-#define SROM4_BWDUPPO 200
#define SROM4_CRCREV 219
@@ -424,103 +338,32 @@ struct brcms_varbuf {
static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
0xffff},
- {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV,
- SROM_BR_MASK},
- {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
{BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff},
{BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff},
{BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
{BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff},
{BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
- {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
- {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00},
- {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff},
- {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff},
{BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
- {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
- {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
- {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
{BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
{BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
{BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
{BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
- {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
- {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
- {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
- {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff},
- {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
{BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
{BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
{BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
{BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
{BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
- {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff},
{BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
- {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
- {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff},
{BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
- {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
- {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00},
{BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
- {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00},
- {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00},
- {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff},
- {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00},
{BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
{BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
{BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
{BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
- {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
- {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
- {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
- {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
- {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
- {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
- {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
- {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
- {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
- {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00},
- {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
- {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
- {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
{BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
{BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
{BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
@@ -534,40 +377,20 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
{BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
{BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
- {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
- {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
- {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
- {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
{BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
{BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
{BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
{BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
- {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
- {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
- {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
- {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
{BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
{BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
{BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
{BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
- {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff},
- {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00},
- {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
- {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00},
{BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
{BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
{BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
{BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
- {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
- {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
{BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
{BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
- {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_TXCHAIN_MASK},
- {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_RXCHAIN_MASK},
- {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_SWITCH_MASK},
{BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
SROM4_TXCHAIN_MASK},
{BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
@@ -594,43 +417,11 @@ static const struct brcms_sromvar pci_sromvars[] = {
SROM8_FEM_ANTSWLUT_MASK},
{BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
{BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
- {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
- {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
- {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
- {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
- {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
- {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
- {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
- {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
- {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
- {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
- {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
- {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
- {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
-
- {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
- {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
- {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+
{BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
{BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
- {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0,
- 0xffff},
- {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1,
- 0xffff},
{BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
0xffff},
- {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC,
- 0xffff},
- {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC,
- 0xffff},
- {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC,
- 0xffff},
{BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
0x01ff},
{BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
@@ -650,16 +441,7 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
0x00ff},
- {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
{BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
- {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
@@ -668,38 +450,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
{BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
{BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
{BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
@@ -732,10 +482,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
{BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
{BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
- {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff},
- {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff},
- {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff},
- {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
{BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
{BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
{BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
@@ -811,34 +557,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
};
static const struct brcms_sromvar perpath_pci_sromvars[] = {
- {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
- {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
- {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
- {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
- {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
- {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
- {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
- {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
- {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
- {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
- {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3,
- 0xffff},
- {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
- {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3,
- 0xffff},
{BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
{BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
{BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
@@ -868,24 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
* shared between devices. */
static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
-static u16 __iomem *
-srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
-{
- if (sih->ccrev < 32)
- return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET);
- if (sih->cccaps & CC_CAP_SROM)
- return (u16 __iomem *)
- (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP);
-
- return NULL;
-}
-
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
static uint mask_shift(u16 mask)
{
uint i;
@@ -906,18 +606,16 @@ static uint mask_width(u16 mask)
return 0;
}
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
+static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
{
- size /= 2;
- while (size--)
- *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size));
+ while (nwords--)
+ *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
}
-static inline void htol16_buf(u16 *buf, unsigned int size)
+static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
{
- size /= 2;
- while (size--)
- *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size));
+ while (nwords--)
+ *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
}
/*
@@ -929,11 +627,14 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
struct brcms_srom_list_head *entry;
enum brcms_srom_id id;
u16 w;
- u32 val;
+ u32 val = 0;
const struct brcms_sromvar *srv;
uint width;
uint flags;
u32 sr = (1 << sromrev);
+ uint p;
+ uint pb = SROM8_PATH0;
+ const uint psz = SROM8_PATH1 - SROM8_PATH0;
/* first store the srom revision */
entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
@@ -1031,90 +732,93 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
list_add(&entry->var_list, var_list);
}
- if (sromrev >= 4) {
- /* Do per-path variables */
- uint p, pb, psz;
-
- if (sromrev >= 8) {
- pb = SROM8_PATH0;
- psz = SROM8_PATH1 - SROM8_PATH0;
- } else {
- pb = SROM4_PATH0;
- psz = SROM4_PATH1 - SROM4_PATH0;
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars;
- srv->varid != BRCMS_SROM_NULL; srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
+ for (p = 0; p < MAX_PATH_SROM; p++) {
+ for (srv = perpath_pci_sromvars;
+ srv->varid != BRCMS_SROM_NULL; srv++) {
+ if ((srv->revmask & sr) == 0)
+ continue;
- if (srv->flags & SRFL_NOVAR)
- continue;
+ if (srv->flags & SRFL_NOVAR)
+ continue;
- w = srom[pb + srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
+ w = srom[pb + srv->off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
- /* Cheating: no per-path var is more than
- * 1 word */
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
+ /* Cheating: no per-path var is more than
+ * 1 word */
+ if ((srv->flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
- entry =
- kzalloc(sizeof(struct brcms_srom_list_head),
- GFP_KERNEL);
- entry->varid = srv->varid+p;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = val;
- list_add(&entry->var_list, var_list);
- }
- pb += psz;
+ entry =
+ kzalloc(sizeof(struct brcms_srom_list_head),
+ GFP_KERNEL);
+ entry->varid = srv->varid+p;
+ entry->var_type = BRCMS_SROM_UNUMBER;
+ entry->uval = val;
+ list_add(&entry->var_list, var_list);
}
+ pb += psz;
}
}
/*
+ * The crc check is done on a little-endian array, we need
+ * to switch the bytes around before checking crc (and
+ * then switch it back).
+ */
+static int do_crc_check(u16 *buf, unsigned nwords)
+{
+ u8 crc;
+
+ cpu_to_le16_buf(buf, nwords);
+ crc = crc8(brcms_srom_crc8_table, (void *)buf, nwords << 1, CRC8_INIT_VALUE);
+ le16_to_cpu_buf(buf, nwords);
+
+ return crc == CRC8_GOOD_VALUE(brcms_srom_crc8_table);
+}
+
+/*
* Read in and validate sprom.
* Return 0 on success, nonzero on error.
*/
static int
-sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc)
+sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
{
int err = 0;
uint i;
+ struct bcma_device *core;
+ uint sprom_offset;
+
+ /* determine core to read */
+ if (ai_get_ccrev(sih) < 32) {
+ core = ai_findcore(sih, BCMA_CORE_80211, 0);
+ sprom_offset = PCI_BAR0_SPROM_OFFSET;
+ } else {
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sprom_offset = CHIPCREGOFFS(sromotp);
+ }
/* read the sprom */
for (i = 0; i < nwords; i++)
- buf[i] = R_REG(&sprom[wordoff + i]);
+ buf[i] = bcma_read16(core, sprom_offset+i*2);
- if (check_crc) {
-
- if (buf[0] == 0xffff)
- /*
- * The hardware thinks that an srom that starts with
- * 0xffff is blank, regardless of the rest of the
- * content, so declare it bad.
- */
- return -ENODATA;
+ if (buf[0] == 0xffff)
+ /*
+ * The hardware thinks that an srom that starts with
+ * 0xffff is blank, regardless of the rest of the
+ * content, so declare it bad.
+ */
+ return -ENODATA;
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, nwords * 2);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2,
- CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE(brcms_srom_crc8_table))
- /* DBG only pci always read srom4 first, then srom8/9 */
- err = -EIO;
+ if (check_crc && !do_crc_check(buf, nwords))
+ err = -EIO;
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, nwords * 2);
- }
return err;
}
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
{
u8 *otp;
uint sz = OTP_SZ_MAX / 2; /* size in words */
@@ -1126,7 +830,8 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
- memcpy(buf, otp, bufsz);
+ sz = min_t(uint, sz, nwords);
+ memcpy(buf, otp, sz * 2);
kfree(otp);
@@ -1139,13 +844,13 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
return -ENODATA;
/* fixup the endianness so crc8 will pass */
- htol16_buf(buf, bufsz);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2,
+ cpu_to_le16_buf(buf, sz);
+ if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
err = -EIO;
-
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, bufsz);
+ else
+ /* now correct the endianness of the byte array */
+ le16_to_cpu_buf(buf, sz);
return err;
}
@@ -1154,10 +859,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
* Initialize nonvolatile variable table from sprom.
* Return 0 on success, nonzero on error.
*/
-static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+int srom_var_init(struct si_pub *sih)
{
u16 *srom;
- u16 __iomem *sromwindow;
u8 sromrev = 0;
u32 sr;
int err = 0;
@@ -1169,33 +873,17 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
if (!srom)
return -ENOMEM;
- sromwindow = srom_window_address(sih, curmap);
-
crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
- true);
-
- if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
- (((sih->buscoretype == PCIE_CORE_ID)
- && (sih->buscorerev >= 6))
- || ((sih->buscoretype == PCI_CORE_ID)
- && (sih->buscorerev >= 0xe)))) {
- /* sromrev >= 4, read more */
- err = sprom_read_pci(sih, sromwindow, 0, srom,
- SROM4_WORDS, true);
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else if (err == 0) {
- /* srom is good and is rev < 4 */
+ err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
+
+ if (err == 0)
+ /* srom read and passed crc */
/* top word of sprom contains version and crc8 */
- sromrev = srom[SROM_CRCREV] & 0xff;
- /* bcm4401 sroms misprogrammed */
- if (sromrev == 0x10)
- sromrev = 1;
- }
+ sromrev = srom[SROM4_CRCREV] & 0xff;
} else {
/* Use OTP if SPROM not available */
- err = otp_read_pci(sih, srom, SROM_MAX);
+ err = otp_read_pci(sih, srom, SROM4_WORDS);
if (err == 0)
/* OTP only contain SROM rev8/rev9 for now */
sromrev = srom[SROM4_CRCREV] & 0xff;
@@ -1208,10 +896,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
sr = 1 << sromrev;
/*
- * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8,
- * 9
+ * srom version check: Current valid versions: 8, 9
*/
- if ((sr & 0x33e) == 0) {
+ if ((sr & 0x300) == 0) {
err = -EINVAL;
goto errout;
}
@@ -1238,21 +925,6 @@ void srom_free_vars(struct si_pub *sih)
kfree(entry);
}
}
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, void __iomem *curmap)
-{
- uint len;
-
- len = 0;
-
- if (curmap != NULL)
- return initvars_srom_pci(sih, curmap);
-
- return -EINVAL;
-}
/*
* Search the name=value vars for a specific one and return its value.
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index 708c43ff51cc..f2a58f262c99 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,15 +20,10 @@
#include "types.h"
/* Prototypes */
-extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern int srom_var_init(struct si_pub *sih);
extern void srom_free_vars(struct si_pub *sih);
extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
uint byteoff, uint nbytes, u16 *buf, bool check_crc);
-/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
- * and extract from it into name=value pairs
- */
-extern int srom_parsecis(u8 **pcis, uint ciscnt,
- char **vars, uint *count);
#endif /* _BRCM_SROM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index 27a814b07462..e11ae83111e4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -250,66 +250,18 @@ do { \
wiphy_err(dev, "%s: " fmt, __func__, ##args); \
} while (0)
-/*
- * Register access macros.
- *
- * These macro's take a pointer to the address to read as one of their
- * arguments. The macro itself deduces the size of the IO transaction (u8, u16
- * or u32). Advantage of this approach in combination with using a struct to
- * define the registers in a register block, is that access size and access
- * location are defined in only one spot. This reduces the risk of the
- * programmer trying to use an unsupported transaction size on a register.
- *
- */
-
-#define R_REG(r) \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((u32 __iomem *)(r)); \
- break; \
- } \
- __osl_v; \
- })
-
-#define W_REG(r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- writel((u32)(v), (u32 __iomem *)(r)); \
- break; \
- } \
- } while (0)
-
#ifdef CONFIG_BCM47XX
/*
* bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
* transactions. As a fix, a read after write is performed on certain places
* in the code. Older chips and the newer 5357 family don't require this fix.
*/
-#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
+#define bcma_wflush16(c, o, v) \
+ ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
#else
-#define W_REG_FLUSH(r, v) W_REG((r), (v))
+#define bcma_wflush16(c, o, v) bcma_write16(c, o, v)
#endif /* CONFIG_BCM47XX */
-#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
- W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
/* multi-bool data type: set of bools, mbool is true if any is set */
/* set one bool */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index f27c48910827..b7537f70a795 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/module.h>
+
#include <brcmu_utils.h>
MODULE_AUTHOR("Broadcom Corporation");
@@ -40,74 +41,20 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
- struct sk_buff *nskb;
- int nest = 0;
-
- /* perversion: we use skb->next to chain multi-skb packets */
- while (skb) {
- nskb = skb->next;
- skb->next = NULL;
-
- if (skb->destructor)
- /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
- * destructor exists
- */
- dev_kfree_skb_any(skb);
- else
- /* can free immediately (even in_irq()) if destructor
- * does not exist
- */
- dev_kfree_skb(skb);
-
- nest++;
- skb = nskb;
- }
+ WARN_ON(skb->next);
+ if (skb->destructor)
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ else
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
-
-/* copy a buffer into a pkt buffer chain */
-uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
- unsigned char *buf)
-{
- uint n, ret = 0;
-
- /* skip 'offset' bytes */
- for (; p && offset; p = p->next) {
- if (offset < (uint) (p->len))
- break;
- offset -= p->len;
- }
-
- if (!p)
- return 0;
-
- /* copy the data */
- for (; p && len; p = p->next) {
- n = min((uint) (p->len) - offset, (uint) len);
- memcpy(p->data + offset, buf, n);
- buf += n;
- len -= n;
- ret += n;
- offset = 0;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(brcmu_pktfrombuf);
-
-/* return total length of buffer chain */
-uint brcmu_pkttotlen(struct sk_buff *p)
-{
- uint total;
-
- total = 0;
- for (; p; p = p->next)
- total += p->len;
- return total;
-}
-EXPORT_SYMBOL(brcmu_pkttotlen);
-
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
@@ -115,21 +62,13 @@ EXPORT_SYMBOL(brcmu_pkttotlen);
struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
struct sk_buff *p)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
- q = &pq->q[prec];
-
- if (q->head)
- q->tail->prev = p;
- else
- q->head = p;
-
- q->tail = p;
- q->len++;
-
+ q = &pq->q[prec].skblist;
+ skb_queue_tail(q, p);
pq->len++;
if (pq->hi_prec < prec)
@@ -142,20 +81,13 @@ EXPORT_SYMBOL(brcmu_pktq_penq);
struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
struct sk_buff *p)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
- q = &pq->q[prec];
-
- if (q->head == NULL)
- q->tail = p;
-
- p->prev = q->head;
- q->head = p;
- q->len++;
-
+ q = &pq->q[prec].skblist;
+ skb_queue_head(q, p);
pq->len++;
if (pq->hi_prec < prec)
@@ -167,53 +99,30 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head);
struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
struct sk_buff *p;
- q = &pq->q[prec];
-
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue(q);
if (p == NULL)
return NULL;
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
-
pq->len--;
-
- p->prev = NULL;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq);
struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
{
- struct pktq_prec *q;
- struct sk_buff *p, *prev;
-
- q = &pq->q[prec];
+ struct sk_buff_head *q;
+ struct sk_buff *p;
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue_tail(q);
if (p == NULL)
return NULL;
- for (prev = NULL; p != q->tail; p = p->prev)
- prev = p;
-
- if (prev)
- prev->prev = NULL;
- else
- q->head = NULL;
-
- q->tail = prev;
- q->len--;
-
pq->len--;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
@@ -222,31 +131,17 @@ void
brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg)
{
- struct pktq_prec *q;
- struct sk_buff *p, *prev = NULL;
+ struct sk_buff_head *q;
+ struct sk_buff *p, *next;
- q = &pq->q[prec];
- p = q->head;
- while (p) {
+ q = &pq->q[prec].skblist;
+ skb_queue_walk_safe(q, p, next) {
if (fn == NULL || (*fn) (p, arg)) {
- bool head = (p == q->head);
- if (head)
- q->head = p->prev;
- else
- prev->prev = p->prev;
- p->prev = NULL;
+ skb_unlink(p, q);
brcmu_pkt_buf_free_skb(p);
- q->len--;
pq->len--;
- p = (head ? q->head : prev->prev);
- } else {
- prev = p;
- p = p->prev;
}
}
-
- if (q->head == NULL)
- q->tail = NULL;
}
EXPORT_SYMBOL(brcmu_pktq_pflush);
@@ -271,8 +166,10 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
pq->max = (u16) max_len;
- for (prec = 0; prec < num_prec; prec++)
+ for (prec = 0; prec < num_prec; prec++) {
pq->q[prec].max = pq->max;
+ skb_queue_head_init(&pq->q[prec].skblist);
+ }
}
EXPORT_SYMBOL(brcmu_pktq_init);
@@ -284,13 +181,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
return NULL;
for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
+ if (!skb_queue_empty(&pq->q[prec].skblist))
break;
if (prec_out)
*prec_out = prec;
- return pq->q[prec].tail;
+ return skb_peek_tail(&pq->q[prec].skblist);
}
EXPORT_SYMBOL(brcmu_pktq_peek_tail);
@@ -303,7 +200,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
- len += pq->q[prec].len;
+ len += pq->q[prec].skblist.qlen;
return len;
}
@@ -313,39 +210,32 @@ EXPORT_SYMBOL(brcmu_pktq_mlen);
struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
int *prec_out)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
struct sk_buff *p;
int prec;
if (pq->len == 0)
return NULL;
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ while ((prec = pq->hi_prec) > 0 &&
+ skb_queue_empty(&pq->q[prec].skblist))
pq->hi_prec--;
- while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ while ((prec_bmp & (1 << prec)) == 0 ||
+ skb_queue_empty(&pq->q[prec].skblist))
if (prec-- == 0)
return NULL;
- q = &pq->q[prec];
-
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue(q);
if (p == NULL)
return NULL;
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
+ pq->len--;
if (prec_out)
*prec_out = prec;
- pq->len--;
-
- p->prev = NULL;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);
@@ -364,23 +254,3 @@ void brcmu_prpkt(const char *msg, struct sk_buff *p0)
}
EXPORT_SYMBOL(brcmu_prpkt);
#endif /* defined(BCMDBG) */
-
-#if defined(BCMDBG)
-/*
- * print bytes formatted as hex to a string. return the resulting
- * string length
- */
-int brcmu_format_hex(char *str, const void *bytes, int len)
-{
- int i;
- char *p = str;
- const u8 *src = (const u8 *)bytes;
-
- for (i = 0; i < len; i++) {
- p += snprintf(p, 3, "%02X", *src);
- src++;
- }
- return (int)(p - str);
-}
-EXPORT_SYMBOL(brcmu_format_hex);
-#endif /* defined(BCMDBG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 7d0f46e0eb95..ad249a0b4730 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -65,9 +65,7 @@
#define ETHER_ADDR_STR_LEN 18
struct pktq_prec {
- struct sk_buff *head; /* first packet to dequeue */
- struct sk_buff *tail; /* last packet to dequeue */
- u16 len; /* number of queued packets */
+ struct sk_buff_head skblist;
u16 max; /* maximum number of queued packets */
};
@@ -88,32 +86,32 @@ struct pktq {
static inline int pktq_plen(struct pktq *pq, int prec)
{
- return pq->q[prec].len;
+ return pq->q[prec].skblist.qlen;
}
static inline int pktq_pavail(struct pktq *pq, int prec)
{
- return pq->q[prec].max - pq->q[prec].len;
+ return pq->q[prec].max - pq->q[prec].skblist.qlen;
}
static inline bool pktq_pfull(struct pktq *pq, int prec)
{
- return pq->q[prec].len >= pq->q[prec].max;
+ return pq->q[prec].skblist.qlen >= pq->q[prec].max;
}
static inline bool pktq_pempty(struct pktq *pq, int prec)
{
- return pq->q[prec].len == 0;
+ return skb_queue_empty(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
{
- return pq->q[prec].head;
+ return skb_peek(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
{
- return pq->q[prec].tail;
+ return skb_peek_tail(&pq->q[prec].skblist);
}
extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
@@ -172,24 +170,16 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
-/* packet */
-extern uint brcmu_pktfrombuf(struct sk_buff *p,
- uint offset, int len, unsigned char *buf);
-extern uint brcmu_pkttotlen(struct sk_buff *p);
-
/* ip address */
struct ipv4_addr;
+
+/* externs */
+/* format/print */
#ifdef BCMDBG
extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
#endif /* BCMDBG */
-/* externs */
-/* format/print */
-#if defined(BCMDBG)
-extern int brcmu_format_hex(char *str, const void *bytes, int len);
-#endif
-
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index fefabc39e646..f96834a7c055 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,6 +19,8 @@
#include "defs.h" /* for PAD macro */
+#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field)
+
struct chipcregs {
u32 chipid; /* 0x0 */
u32 capabilities;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index 1e5f310af1e7..f0d8c04a9c8c 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -62,7 +62,6 @@
#define WL_RADIO_SW_DISABLE (1<<0)
#define WL_RADIO_HW_DISABLE (1<<1)
-#define WL_RADIO_MPC_DISABLE (1<<2)
/* some countries don't support any channel */
#define WL_RADIO_COUNTRY_DISABLE (1<<3)
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4fcb956ad9e0..4e9b7e4827ea 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -77,8 +77,9 @@
#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-/* Default component, in ai chips it maps all unused address ranges */
-#define DEF_AI_COMP 0xfff
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
+ * maps all unused address ranges
+ */
/* Common core control flags */
#define SICF_BIST_EN 0x8000
@@ -87,4 +88,11 @@
#define SICF_FGC 0x0002
#define SICF_CLOCK_EN 0x0001
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
#endif /* _BRCM_SOC_H */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 5441ad195119..89e9d3a78c3c 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
+ "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+ 0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
+ PCMCIA_DEVICE_PROD_ID123(
"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 045a93645a3d..18054d9c6688 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
- strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
- snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+ strlcpy(info->driver, "hostap", sizeof(info->driver));
+ snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
local->sta_fw_ver & 0xff);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 127e9c63beaf..a0e5c21d3657 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
- strcpy(info->bus_info, pci_name(priv->pci_dev));
+ strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 ipw2100_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 99a710dfe771..4fcdac63a300 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -131,6 +131,14 @@ static struct ieee80211_rate ipw2200_rates[] = {
#define ipw2200_bg_rates (ipw2200_rates + 0)
#define ipw2200_num_bg_rates 12
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+ (((x) <= 14) ? \
+ (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+ ((x) + 1000) * 5)
+
#ifdef CONFIG_IPW2200_QOS
static int qos_enable = 0;
static int qos_burst_enable = 0;
@@ -7840,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
* more efficiently than we can parse it. ORDER MATTERS HERE */
struct ipw_rt_hdr *ipw_rt;
- short len = le16_to_cpu(pkt->u.frame.length);
+ unsigned short len = le16_to_cpu(pkt->u.frame.length);
/* We received data from the HW, so stop the watchdog */
dev->trans_start = jiffies;
@@ -8015,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
s8 noise = (s8) le16_to_cpu(frame->noise);
u8 rate = frame->rate;
- short len = le16_to_cpu(pkt->u.frame.length);
+ unsigned short len = le16_to_cpu(pkt->u.frame.length);
struct sk_buff *skb;
int hdr_only = 0;
u16 filter = priv->prom_priv->filter;
@@ -10540,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10550,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
- strcpy(info->bus_info, pci_name(p->pci_dev));
+ strlcpy(info->bus_info, pci_name(p->pci_dev),
+ sizeof(info->bus_info));
info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
}
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 70f5586d96bd..8874588fb929 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -66,16 +66,8 @@ extern u32 libipw_debug_level;
do { if (libipw_debug_level & (level)) \
printk(KERN_DEBUG "libipw: %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
- return (libipw_debug_level & level) && net_ratelimit();
-}
#else
#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
- return false;
-}
#endif /* CONFIG_LIBIPW_DEBUG */
/*
@@ -813,9 +805,6 @@ struct libipw_device {
/* WEP and other encryption related settings at the device level */
int open_wep; /* Set to 1 to allow unencrypted frames */
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
- * WEP key changes */
-
/* If the host performs {en,de}cryption, then set to 1 */
int host_encrypt;
int host_encrypt_msdu;
@@ -868,7 +857,6 @@ struct libipw_device {
struct libipw_security * sec);
netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb,
struct net_device * dev, int pri);
- int (*reset_port) (struct net_device * dev);
int (*is_queue_full) (struct net_device * dev, int pri);
int (*handle_management) (struct net_device * dev,
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 6623e5052254..1571505b1a38 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -474,17 +474,6 @@ int libipw_wx_set_encode(struct libipw_device *ieee,
if (ieee->set_security)
ieee->set_security(dev, &sec);
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
return 0;
}
@@ -688,20 +677,6 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- /*
- * Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack.
- */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
-
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 000000000000..5e1a19fd354d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(il->_3945.stats.flag));
+ if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+ return p;
+}
+
+ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct iwl39_stats_rx_phy) * 40 +
+ sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_stats_rx_non_phy *general, *accum_general;
+ struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_3945.stats.rx.ofdm;
+ cck = &il->_3945.stats.rx.cck;
+ general = &il->_3945.stats.rx.general;
+ accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+ accum_cck = &il->_3945.accum_stats.rx.cck;
+ accum_general = &il->_3945.accum_stats.rx.general;
+ delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+ delta_cck = &il->_3945.delta_stats.rx.cck;
+ delta_general = &il->_3945.delta_stats.rx.general;
+ max_ofdm = &il->_3945.max_delta.rx.ofdm;
+ max_cck = &il->_3945.max_delta.rx.cck;
+ max_general = &il->_3945.max_delta.rx.general;
+
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_3945.stats.tx;
+ accum_tx = &il->_3945.accum_stats.tx;
+ delta_tx = &il->_3945.delta_stats.tx;
+ max_tx = &il->_3945.max_delta.tx;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_stats_general *general, *accum_general;
+ struct iwl39_stats_general *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_3945.stats.general;
+ dbg = &il->_3945.stats.general.dbg;
+ div = &il->_3945.stats.general.div;
+ accum_general = &il->_3945.accum_stats.general;
+ delta_general = &il->_3945.delta_stats.general;
+ max_general = &il->_3945.max_delta.general;
+ accum_dbg = &il->_3945.accum_stats.general.dbg;
+ delta_dbg = &il->_3945.delta_stats.general.dbg;
+ max_dbg = &il->_3945.max_delta.general.dbg;
+ accum_div = &il->_3945.accum_stats.general.div;
+ delta_div = &il->_3945.delta_stats.general.div;
+ max_div = &il->_3945.max_delta.general.div;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 000000000000..54b2d391e91a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3976 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION \
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+ .sw_crypto = 1,
+ .restart_fw = 1,
+ .disable_hw_scan = 1,
+ /* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN - Force MAIN antenna
+ * IL_ANTENNA_AUX - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ switch (il3945_mod_params.antenna) {
+ case IL_ANTENNA_DIVERSITY:
+ return 0;
+
+ case IL_ANTENNA_MAIN:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+ case IL_ANTENNA_AUX:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ }
+
+ /* bad antenna selector value */
+ IL_ERR("Bad antenna selector value (0x%x)\n",
+ il3945_mod_params.antenna);
+
+ return 0; /* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ int ret;
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+ if (sta_id == il->ctx.bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = keyconf->keyidx;
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ D_INFO("hwcrypto: modify ucode station key info\n");
+
+ ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ D_INFO("hwcrypto: clear ucode station key info\n");
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ int ret = 0;
+
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+ int ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return -EOPNOTSUPP;
+
+ IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+ return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il3945_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+
+ if (!il_is_associated(il) || !il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ unsigned int frame_size;
+ int rc;
+ u8 rate;
+
+ frame = il3945_get_free_frame(il);
+
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ rate = il_get_lowest_plcp(il, &il->ctx);
+
+ frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il3945_free_frame(il, frame);
+
+ return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+ if (il->_3945.shared_virt)
+ dma_free_coherent(&il->pci_dev->dev,
+ sizeof(struct il3945_shared),
+ il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_device_cmd *cmd,
+ struct sk_buff *skb_frag, int sta_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+ tx_cmd->sec_ctl = 0;
+
+ switch (keyinfo->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ TX_CMD_SEC_WEP | (info->control.hw_key->
+ hw_key_idx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT;
+
+ memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ info->control.hw_key->hw_key_idx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+ break;
+ }
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ __le32 tx_flags = tx_cmd->tx_flags;
+ __le16 fc = hdr->frame_control;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il3945_tx_cmd *tx_cmd;
+ struct il_tx_queue *txq = NULL;
+ struct il_queue *q = NULL;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ int txq_id = skb_get_queue_mapping(skb);
+ u16 len, idx, hdr_len;
+ u8 id;
+ u8 unicast;
+ u8 sta_id;
+ u8 tid = 0;
+ __le16 fc;
+ u8 wait_write_ptr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+ IL_INVALID_RATE) {
+ IL_ERR("ERROR: No TX rate available.\n");
+ goto drop_unlock;
+ }
+
+ unicast = !is_multicast_ether_addr(hdr->addr1);
+ id = 0;
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop;
+ }
+
+ D_RATE("station Id %d\n", sta_id);
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop;
+ }
+
+ /* Descriptor for chosen Tx queue */
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if ((il_queue_space(q) < q->high_mark))
+ goto drop;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = &il->ctx;
+
+ /* Init first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+ tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (info->control.hw_key)
+ il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+ il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ il_dbg_log_tx_data_frame(il, len, hdr);
+ il_update_stats(il, true, fc, len);
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len =
+ sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+ hdr_len;
+ len = (len + 3) & ~3;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+ /* we do not map meta data ... so we can safely access address to
+ * provide to unmap command*/
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
+ 0);
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ len = skb->len - hdr_len;
+ if (len) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ len, 0, U32_PAD(len));
+ }
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ il_stop_queue(il, txq);
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+drop:
+ return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+ struct ieee80211_measurement_params *params, u8 type)
+{
+ struct il_spectrum_cmd spectrum;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SPECTRUM_MEASUREMENT,
+ .data = (void *)&spectrum,
+ .flags = CMD_WANT_SKB,
+ };
+ u32 add_time = le64_to_cpu(params->start_time);
+ int rc;
+ int spectrum_resp_status;
+ int duration = le16_to_cpu(params->duration);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (il_is_associated(il))
+ add_time =
+ il_usecs_to_beacons(il,
+ le64_to_cpu(params->start_time) -
+ il->_3945.last_tsf,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+
+ memset(&spectrum, 0, sizeof(spectrum));
+
+ spectrum.channel_count = cpu_to_le16(1);
+ spectrum.flags =
+ RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+ spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+ cmd.len = sizeof(spectrum);
+ spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+ if (il_is_associated(il))
+ spectrum.start_time =
+ il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+ else
+ spectrum.start_time = 0;
+
+ spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+ spectrum.channels[0].channel = params->channel;
+ spectrum.channels[0].type = type;
+ if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+ spectrum.flags |=
+ RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_TGG_PROTECT_MSK;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+ switch (spectrum_resp_status) {
+ case 0: /* Command will be handled */
+ if (pkt->u.spectrum.id != 0xff) {
+ D_INFO("Replaced existing measurement: %d\n",
+ pkt->u.spectrum.id);
+ il->measurement_status &= ~MEASUREMENT_READY;
+ }
+ il->measurement_status |= MEASUREMENT_ACTIVE;
+ rc = 0;
+ break;
+
+ case 1: /* Command will not be handled */
+ rc = -EAGAIN;
+ break;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ il3945_disable_events(il);
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+ D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = beacon->beacon_notify_hdr.rate;
+
+ D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ IL_WARN("Card state received: HW:%s SW:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il3945_hdl_alive;
+ il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il3945_hdl_c_stats;
+ il->handlers[N_STATS] = il3945_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+ il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+ /* Set up hardware specific Rx handlers */
+ il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il3945_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+ int write;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ write = rxq->write & ~0x7;
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7) ||
+ abs(rxq->write - rxq->read) > 7) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("Failed to allocate SKB buffer.\n");
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to allocate SKB buffer with %0x."
+ "Only %u free buffers remaining.\n",
+ priority, rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ break;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ rxb->page = page;
+ /* Get physical address of RB/SKB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+ struct il_priv *il = data;
+ unsigned long flags;
+
+ il3945_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il3945_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+ il3945_rx_allocate(il, GFP_ATOMIC);
+
+ il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/* 0 1 2 3 4 5 6 7 8 9 */
+ 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
+ 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
+ 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
+ 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
+ 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
+ 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
+ 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
+ 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
+ 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
+ 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ * (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+ /* 1000:1 or higher just report as 60 dB */
+ if (sig_ratio >= 1000)
+ return 60;
+
+ /* 100:1 or higher, divide by 10 and use table,
+ * add 20 dB to make up for divide by 10 */
+ if (sig_ratio >= 100)
+ return 20 + (int)ratio2dB[sig_ratio / 10];
+
+ /* We shouldn't see this */
+ if (sig_ratio < 1)
+ return 0;
+
+ /* Use table for ratios 1:1 - 99:1 */
+ return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty = 0;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il3945_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode won't assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il3945_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il3945_rx_replenish_now(il);
+ else
+ il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+ switch (i) {
+ case 1:
+ return "FAIL";
+ case 2:
+ return "BAD_PARAM";
+ case 3:
+ return "BAD_CHECKSUM";
+ case 4:
+ return "NMI_INTERRUPT";
+ case 5:
+ return "SYSASSERT";
+ case 6:
+ return "FATAL_ERROR";
+ }
+
+ return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+ u32 i;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X\n", base);
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ IL_ERR("Desc Time asrtPC blink2 "
+ "ilink1 nmiPC Line\n");
+ for (i = ERROR_START_OFFSET;
+ i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+ i += ERROR_ELEM_SIZE) {
+ desc = il_read_targ_mem(il, base + i);
+ time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+ IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+ il3945_desc_lookup(desc), desc, time, blink1, blink2,
+ ilink1, ilink2, data1);
+ }
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR39_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR39_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ il_txq_update_write_ptr(il, &il->txq[0]);
+ il_txq_update_write_ptr(il, &il->txq[1]);
+ il_txq_update_write_ptr(il, &il->txq[2]);
+ il_txq_update_write_ptr(il, &il->txq[3]);
+ il_txq_update_write_ptr(il, &il->txq[4]);
+ il_txq_update_write_ptr(il, &il->txq[5]);
+
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il3945_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("Tx interrupt\n");
+ il->isr_stats.tx++;
+
+ _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+ il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+ handled |= CSR_INT_BIT_FH_TX;
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~il->inta_mask) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct il3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ scan_ch->channel = chan->hw_value;
+
+ ch_info = il_get_channel_info(il, band, scan_ch->channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ scan_ch->channel);
+ continue;
+ }
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* If passive , set up for auto-switch
+ * and use long active_dwell time.
+ */
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+ scan_ch->type = 0; /* passive */
+ if (IL_UCODE_API(il->ucode_ver) == 1)
+ scan_ch->active_dwell =
+ cpu_to_le16(passive_dwell - 1);
+ } else {
+ scan_ch->type = 1; /* active */
+ }
+
+ /* Set direct probe bits. These may be used both for active
+ * scan channels (probes gets sent right away),
+ * or for passive channels (probes get se sent only after
+ * hearing clear Rx packet).*/
+ if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ if (n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ } else {
+ /* uCode v1 does not allow setting direct probe bits on
+ * passive channel. */
+ if ((scan_ch->type & 1) && n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ }
+
+ /* Set txpower levels to defaults */
+ scan_ch->tpc.dsp_atten = 110;
+ /* scan_pwr_info->tpc.dsp_atten; */
+
+ /*scan_pwr_info->tpc.tx_gain; */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else {
+ scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ }
+
+ D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+ (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il3945_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il3945_rates[i].plcp ==
+ 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int rc = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int rc = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+#if 0 /* Enable this if you want to see details */
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+ *image);
+#endif
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int rc = 0;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_full(il, image, len);
+
+ return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item) \
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->v1.item); \
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+ return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+ const struct il_ucode_header *ucode;
+ int ret = -EINVAL, idx;
+ const struct firmware *ucode_raw;
+ /* firmware file name contains uCode/driver compatibility version */
+ const char *name_pre = il->cfg->fw_name_pre;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ char buf[25];
+ u8 *src;
+ size_t len;
+ u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+ /* Ask kernel firmware_class module to get the boot firmware off disk.
+ * request_firmware() is synchronous, file is in memory on return. */
+ for (idx = api_max; idx >= api_min; idx--) {
+ sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+ ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+ if (ret < 0) {
+ IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+ if (ret == -ENOENT)
+ continue;
+ else
+ goto error;
+ } else {
+ if (idx < api_max)
+ IL_ERR("Loaded firmware %s, "
+ "which is deprecated. "
+ " Please use API v%u instead.\n", buf,
+ api_max);
+ D_INFO("Got firmware '%s' file "
+ "(%zd bytes) from disk\n", buf, ucode_raw->size);
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Make sure that we got at least our header! */
+ if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+ IL_ERR("File size way too small!\n");
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+ inst_size = il3945_ucode_get_inst_size(ucode);
+ data_size = il3945_ucode_get_data_size(ucode);
+ init_size = il3945_ucode_get_init_size(ucode);
+ init_data_size = il3945_ucode_get_init_data_size(ucode);
+ boot_size = il3945_ucode_get_boot_size(ucode);
+ src = il3945_ucode_get_data(ucode);
+
+ /* api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward */
+
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ il->ucode_ver = 0;
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected %u, "
+ "got %u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+ D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+ D_INFO("f/w package hdr init inst size = %u\n", init_size);
+ D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+ D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+ init_size + init_data_size + boot_size) {
+
+ D_INFO("uCode file size %zd does not match expected size\n",
+ ucode_raw->size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (inst_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ if (data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode data len %d too large to fit in\n", data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode init instr len %d too large to fit in\n",
+ init_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode init data len %d too large to fit in\n",
+ init_data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (boot_size > IL39_MAX_BSM_SIZE) {
+ D_INFO("uCode boot instr len %d too large to fit in\n",
+ boot_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (init_size && init_data_size) {
+ il->ucode_init.len = init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (boot_size) {
+ il->ucode_boot.len = boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ len = inst_size;
+ D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+ memcpy(il->ucode_code.v_addr, src, len);
+ src += len;
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /* Runtime data (2nd block)
+ * NOTE: Copy into backup buffer will be done in il3945_up() */
+ len = data_size;
+ D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+ memcpy(il->ucode_data.v_addr, src, len);
+ memcpy(il->ucode_data_backup.v_addr, src, len);
+ src += len;
+
+ /* Initialization instructions (3rd block) */
+ if (init_size) {
+ len = init_size;
+ D_INFO("Copying (but not loading) init instr len %zd\n", len);
+ memcpy(il->ucode_init.v_addr, src, len);
+ src += len;
+ }
+
+ /* Initialization data (4th block) */
+ if (init_data_size) {
+ len = init_data_size;
+ D_INFO("Copying (but not loading) init data len %zd\n", len);
+ memcpy(il->ucode_init_data.v_addr, src, len);
+ src += len;
+ }
+
+ /* Bootstrap instructions (5th block) */
+ len = boot_size;
+ D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+ memcpy(il->ucode_boot.v_addr, src, len);
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ return 0;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ ret = -ENOMEM;
+ il3945_dealloc_ucode_pci(il);
+
+err_release:
+ release_firmware(ucode_raw);
+
+error:
+ return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+
+ /* bits 31:0 for 3945 */
+ pinst = il->ucode_code.p_addr;
+ pdata = il->ucode_data_backup.p_addr;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+ /* Check alive response for "valid" sign from uCode */
+ if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il3945_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+ int thermal_spin = 0;
+ u32 rfkill;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+ D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+ if (rfkill & 0x1) {
+ clear_bit(S_RF_KILL_HW, &il->status);
+ /* if RFKILL is not on, then wait for thermal
+ * sensor in adapter to kick in */
+ while (il3945_hw_get_temperature(il) == 0) {
+ thermal_spin++;
+ udelay(10);
+ }
+
+ if (thermal_spin)
+ D_INFO("Thermal calibration took %dus\n",
+ thermal_spin * 10);
+ } else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ /* After the ALIVE response, we can send commands to 3945 uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK_3945;
+
+ il_power_update_mode(il, true);
+
+ if (il_is_associated(il)) {
+ struct il3945_rxon_cmd *active_rxon =
+ (struct il3945_rxon_cmd *)(&ctx->active);
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, ctx);
+ }
+
+ /* Configure Bluetooth device coexistence support */
+ il_send_bt_config(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il3945_commit_rxon(il, ctx);
+
+ il3945_reg_txpower_periodic(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ /* Station information will now be cleared in device */
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il3945_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il3945_init() then
+ * clear all bits but the RF Kill bits and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il3945_hw_txq_ctx_stop(il);
+ il3945_hw_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il3945_down(il);
+ mutex_unlock(&il->mutex);
+
+ il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+ int rc, i;
+
+ rc = il3945_alloc_bcast_station(il);
+ if (rc)
+ return rc;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bring up\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else {
+ set_bit(S_RF_KILL_HW, &il->status);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return -ENODEV;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ rc = il3945_hw_nic_init(il);
+ if (rc) {
+ IL_ERR("Unable to int nic\n");
+ return rc;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ /* We return success when we resume from suspend and rf_kill is on. */
+ if (test_bit(S_RF_KILL_HW, &il->status))
+ return 0;
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ rc = il->cfg->ops->lib->load_ucode(il);
+
+ if (rc) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il3945_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il3945_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change. This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, _3945.rfkill_poll.work);
+ bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+ bool new_rfkill =
+ !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+ if (new_rfkill != old_rfkill) {
+ if (new_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+ D_RF_KILL("RF_KILL bit toggled to %s.\n",
+ new_rfkill ? "disable radio" : "enable radio");
+ }
+
+ /* Keep this running, even if radio now enabled. This will be
+ * cancelled in mac_start() if system decides to start again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il3945_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il3945_scan_cmd *scan;
+ u8 n_probes = 0;
+ enum ieee80211_band band;
+ bool is_active = false;
+ int ret;
+ u16 len;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("Fail to allocate scan memory\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+ /*
+ * suspend time format:
+ * 0-19: beacon interval in usec (time before exec.)
+ * 20-23: 0
+ * 24-31: number of beacons (suspend between channels)
+ */
+
+ extra = (suspend_time / interval) << 24;
+ scan_suspend_time =
+ 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Kicking off passive scan.\n");
+
+ /* We don't build a direct scan probe request; the uCode will do
+ * that based on the direct_mask added to each channel entry */
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ /* flags + rate selection */
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ scan->tx_cmd.rate = RATE_1M_PLCP;
+ band = IEEE80211_BAND_2GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ scan->tx_cmd.rate = RATE_6M_PLCP;
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scaning is requested but a certain channel is marked
+ * passive, we can do active scanning if we detect transmissions. For
+ * passive only scanning disable switching to active on any channel.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(len);
+
+ /* select Rx antennas */
+ scan->flags |= il3945_get_antenna_flags(il);
+
+ scan->channel_count =
+ il3945_get_channels_for_scan(il, band, is_active, n_probes,
+ (void *)&scan->data[len], vif);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il3945_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+ return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il3945_commit_rxon(il, ctx);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+ mutex_unlock(&il->mutex);
+ il3945_down(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il3945_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il3945_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_rx_replenish(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+ int rc = 0;
+ struct ieee80211_conf *conf = NULL;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (!ctx->vif || !il->is_open)
+ return;
+
+ D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
+ ctx->vif->bss_conf.beacon_int);
+
+ if (ctx->vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (ctx->vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il3945_commit_rxon(il, ctx);
+
+ switch (ctx->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ il3945_rate_scale_init(il->hw, IL_AP_ID);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il3945_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ ctx->vif->type);
+ break;
+ }
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+
+ /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+ * ucode filename and max sizes are card-specific. */
+
+ if (!il->ucode_code.len) {
+ ret = il3945_read_ucode(il);
+ if (ret) {
+ IL_ERR("Could not read microcode: %d\n", ret);
+ mutex_unlock(&il->mutex);
+ goto out_release_irq;
+ }
+ }
+
+ ret = __il3945_up(il);
+
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ goto out_release_irq;
+
+ D_INFO("Start UP work.\n");
+
+ /* Wait for START_ALIVE from ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ ret = -ETIMEDOUT;
+ goto out_release_irq;
+ }
+ }
+
+ /* ucode is running and will send rfkill notifications,
+ * no need to poll the killswitch state anymore */
+ cancel_delayed_work(&il->_3945.rfkill_poll);
+
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+
+out_release_irq:
+ il->is_open = 0;
+ D_MAC80211("leave - failed\n");
+ return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open) {
+ D_MAC80211("leave - skip\n");
+ return;
+ }
+
+ il->is_open = 0;
+
+ il3945_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* start polling the killswitch state again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+ D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il3945_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int rc = 0;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!(il_is_associated(il))) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - "
+ "Attempting to continue.\n");
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+ }
+ il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ int ret = 0;
+ u8 sta_id = IL_INVALID_STATION;
+ u8 static_key;
+
+ D_MAC80211("enter\n");
+
+ if (il3945_mod_params.sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ static_key = !il_is_associated(il);
+
+ if (!static_key) {
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+ }
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ switch (cmd) {
+ case SET_KEY:
+ if (static_key)
+ ret = il3945_set_static_key(il, key);
+ else
+ ret = il3945_set_dynamic_key(il, key, sta_id);
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (static_key)
+ ret = il3945_remove_static_key(il);
+ else
+ ret = il3945_clear_sta_key_info(il, sta_id);
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ ret =
+ il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il3945_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but even if hw is ready, committing here breaks for some reason,
+ * we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_INFO("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
+ il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+ u32 val;
+
+ val = simple_strtoul(p, &p, 10);
+ if (p == buf)
+ IL_INFO(": %s is not in decimal form.\n", buf);
+ else
+ il3945_hw_reg_set_txpower(il, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
+ il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ u32 flags = simple_strtoul(buf, NULL, 0);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.flags) != flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+ ctx->staging.flags = cpu_to_le32(flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
+ il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+ filter_flags);
+ ctx->staging.filter_flags = cpu_to_le32(filter_flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
+ il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_spectrum_notification measure_report;
+ u32 size = sizeof(measure_report), len = 0, ofs = 0;
+ u8 *data = (u8 *) &measure_report;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (!(il->measurement_status & MEASUREMENT_READY)) {
+ spin_unlock_irqrestore(&il->lock, flags);
+ return 0;
+ }
+ memcpy(&measure_report, &il->measure_report, size);
+ il->measurement_status = 0;
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ while (size && PAGE_SIZE - len) {
+ hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+ PAGE_SIZE - len, 1);
+ len = strlen(buf);
+ if (PAGE_SIZE - len)
+ buf[len++] = '\n';
+
+ ofs += 16;
+ size -= min(size, 16U);
+ }
+
+ return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_measurement_params params = {
+ .channel = le16_to_cpu(ctx->active.channel),
+ .start_time = cpu_to_le64(il->_3945.last_tsf),
+ .duration = cpu_to_le16(1),
+ };
+ u8 type = IL_MEASURE_BASIC;
+ u8 buffer[32];
+ u8 channel;
+
+ if (count) {
+ char *p = buffer;
+ strncpy(buffer, buf, min(sizeof(buffer), count));
+ channel = simple_strtoul(p, NULL, 0);
+ if (channel)
+ params.channel = channel;
+
+ p = buffer;
+ while (*p && *p != ' ')
+ p++;
+ if (*p)
+ type = simple_strtoul(p + 1, NULL, 0);
+ }
+
+ D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+ type, params.channel, buf);
+ il3945_get_measurement(il, &params, type);
+
+ return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
+ il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ il->retry_rate = simple_strtoul(buf, NULL, 0);
+ if (il->retry_rate <= 0)
+ il->retry_rate = 1;
+
+ return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
+ il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+ /* all this shit doesn't belong into sysfs anyway */
+ return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+ int ant;
+
+ if (count == 0)
+ return 0;
+
+ if (sscanf(buf, "%1i", &ant) != 1) {
+ D_INFO("not in hex or decimal form.\n");
+ return count;
+ }
+
+ if (ant >= 0 && ant <= 2) {
+ D_INFO("Setting antenna select to %d.\n", ant);
+ il3945_mod_params.antenna = (enum il3945_antenna)ant;
+ } else
+ D_INFO("Bad antenna select value %d.\n", ant);
+
+ return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
+ il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ if (!il_is_alive(il))
+ return -EAGAIN;
+ return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+
+ if (p[0] == '1')
+ il3945_dump_nic_error_log(il);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il3945_bg_restart);
+ INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+ INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+ INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+ il_setup_scan_deferred_work(il);
+
+ il3945_hw_setup_deferred_work(il);
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il3945_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+ il3945_hw_cancel_deferred_work(il);
+
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+
+ il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+ &dev_attr_antenna.attr,
+ &dev_attr_channels.attr,
+ &dev_attr_dump_errors.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_filter_flags.attr,
+ &dev_attr_measurement.attr,
+ &dev_attr_retry_rate.attr,
+ &dev_attr_status.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il3945_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il3945_sysfs_entries,
+};
+
+struct ieee80211_ops il3945_hw_ops = {
+ .tx = il3945_mac_tx,
+ .start = il3945_mac_start,
+ .stop = il3945_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il3945_configure_filter,
+ .set_key = il3945_mac_set_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il3945_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+ int ret;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ il->retry_rate = 1;
+ il->beacon_skb = NULL;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+ IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+ eeprom->version);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ /* Set up txpower settings in driver for all channels */
+ if (il3945_txpower_set_from_eeprom(il)) {
+ ret = -EIO;
+ goto err_free_channel_map;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il3945_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST 200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-3945-rs";
+ hw->sta_data_size = sizeof(struct il3945_sta_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+
+ hw->wiphy->interface_modes = il->ctx.interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ struct il3945_eeprom *eeprom;
+ unsigned long flags;
+
+ /***********************
+ * 1. Allocating HW data
+ * ********************/
+
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ hw = il_alloc_all(cfg);
+ if (hw == NULL) {
+ pr_err("Can not allocate network device\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ /*
+ * Disabling hardware scan means that mac80211 will perform scans
+ * "the hard way", rather than using device's scan.
+ */
+ if (il3945_mod_params.disable_hw_scan) {
+ D_INFO("Disabling hw_scan\n");
+ il3945_hw_ops.hw_scan = NULL;
+ }
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /***************************
+ * 2. Initializing PCI bus
+ * *************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+
+ pci_set_drvdata(pdev, il);
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ /***********************
+ * 3. Read REV Register
+ * ********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, 0x41, 0x00);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /***********************
+ * 4. Read EEPROM
+ * ********************/
+
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ /* MAC Address location in EEPROM same for 3945/4965 */
+ eeprom = (struct il3945_eeprom *)il->eeprom;
+ D_INFO("MAC address: %pM\n", eeprom->mac_address);
+ SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+ /***********************
+ * 5. Setup HW Constants
+ * ********************/
+ /* Device-specific setup */
+ if (il3945_hw_set_hw_params(il)) {
+ IL_ERR("failed to set hw settings\n");
+ goto out_eeprom_free;
+ }
+
+ /***********************
+ * 6. Setup il
+ * ********************/
+
+ err = il3945_init_drv(il);
+ if (err) {
+ IL_ERR("initializing driver failed\n");
+ goto out_unset_hw_params;
+ }
+
+ IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+ /***********************
+ * 7. Setup Services
+ * ********************/
+
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_release_irq;
+ }
+
+ il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
+ &il->ctx);
+ il3945_setup_deferred_work(il);
+ il3945_setup_handlers(il);
+ il_power_initialize(il);
+
+ /*********************************
+ * 8. Setup and Register mac80211
+ * *******************************/
+
+ il_enable_interrupts(il);
+
+ err = il3945_setup_mac(il);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ /* Start monitoring the killswitch */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+ return 0;
+
+out_remove_sysfs:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il_free_geos(il);
+ il_free_channel_map(il);
+out_unset_hw_params:
+ il3945_unset_hw_params(il);
+out_eeprom_free:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il3945_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il3945_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il_down(), but there are paths to
+ * run il_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_synchronize_irq(il);
+
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+ cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+ il3945_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il3945_rx_queue_free(il, &il->rxq);
+ il3945_hw_txq_ctx_free(il);
+
+ il3945_unset_hw_params(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(pdev->irq, il);
+ pci_disable_msi(pdev);
+
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il_free_channel_map(il);
+ il_free_geos(il);
+ kfree(il->scan_cmd);
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+ .name = DRV_NAME,
+ .id_table = il3945_hw_card_ids,
+ .probe = il3945_pci_probe,
+ .remove = __devexit_p(il3945_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il3945_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il3945_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il3945_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+ pci_unregister_driver(&il3945_driver);
+ il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 000000000000..d7a83f229190
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,986 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+ 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+ s8 min_rssi;
+ u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-72, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-87, RATE_9M_IDX},
+ {-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-68, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-86, RATE_11M_IDX},
+ {-88, RATE_5M_IDX},
+ {-90, RATE_2M_IDX},
+ {-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW 62
+#define RATE_FLUSH (3*HZ)
+#define RATE_WIN_FLUSH (HZ/2)
+#define IL39_RATE_HIGH_TH 11520
+#define IL_SUCCESS_UP_TH 8960
+#define IL_SUCCESS_DOWN_TH 10880
+#define RATE_MIN_FAILURE_TH 6
+#define RATE_MIN_SUCCESS_TH 8
+#define RATE_DECREASE_TH 1920
+#define RATE_RETRY_TH 15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+ u32 idx = 0;
+ u32 table_size = 0;
+ struct il3945_tpt_entry *tpt_table = NULL;
+
+ if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+ rssi = IL_MIN_RSSI_VAL;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ tpt_table = il3945_tpt_table_g;
+ table_size = ARRAY_SIZE(il3945_tpt_table_g);
+ break;
+ case IEEE80211_BAND_5GHZ:
+ tpt_table = il3945_tpt_table_a;
+ table_size = ARRAY_SIZE(il3945_tpt_table_a);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+ idx++;
+
+ idx = min(idx, table_size - 1);
+
+ return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = -1;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed. If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+ int unflushed = 0;
+ int i;
+ unsigned long flags;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /*
+ * For each rate, if we have collected data on that rate
+ * and it has been more than RATE_WIN_FLUSH
+ * since we flushed, clear out the gathered stats
+ */
+ for (i = 0; i < RATE_COUNT_3945; i++) {
+ if (!rs_sta->win[i].counter)
+ continue;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+ if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+ D_RATE("flushing %d samples of rate " "idx %d\n",
+ rs_sta->win[i].counter, i);
+ il3945_clear_win(&rs_sta->win[i]);
+ } else
+ unflushed++;
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+ }
+
+ return unflushed;
+}
+
+#define RATE_FLUSH_MAX 5000 /* msec */
+#define RATE_FLUSH_MIN 50 /* msec */
+#define IL_AVERAGE_PACKETS 1500
+
+static void
+il3945_bg_rate_scale_flush(unsigned long data)
+{
+ struct il3945_rs_sta *rs_sta = (void *)data;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+ int unflushed = 0;
+ unsigned long flags;
+ u32 packet_count, duration, pps;
+
+ D_RATE("enter\n");
+
+ unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* Number of packets Rx'd since last time this timer ran */
+ packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+ rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+ if (unflushed) {
+ duration =
+ jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+ D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+ /* Determine packets per second */
+ if (duration)
+ pps = (packet_count * 1000) / duration;
+ else
+ pps = 0;
+
+ if (pps) {
+ duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+ if (duration < RATE_FLUSH_MIN)
+ duration = RATE_FLUSH_MIN;
+ else if (duration > RATE_FLUSH_MAX)
+ duration = RATE_FLUSH_MAX;
+ } else
+ duration = RATE_FLUSH_MAX;
+
+ rs_sta->flush_time = msecs_to_jiffies(duration);
+
+ D_RATE("new flush period: %d msec ave %d\n", duration,
+ packet_count);
+
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+
+ rs_sta->last_partial_flush = jiffies;
+ } else {
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->flush_pending = 0;
+ }
+ /* If there weren't any unflushed entries, we don't schedule the timer
+ * to run again */
+
+ rs_sta->last_flush = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+ struct il3945_rate_scale_data *win, int success,
+ int retries, int idx)
+{
+ unsigned long flags;
+ s32 fail_count;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ if (!retries) {
+ D_RATE("leave: retries == 0 -- should be at least 1\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ * */
+ while (retries > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+ win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame (throw away oldest history),
+ * OR in "1", and increment "success" if this
+ * frame was successful. */
+ win->data <<= 1;
+ if (success > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ success--;
+ }
+
+ retries--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt =
+ ((win->success_ratio * rs_sta->expected_tpt[idx] +
+ 64) / 128);
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct il3945_sta_priv *psta;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ D_INFO("enter\n");
+ if (sta_id == il->ctx.bcast_sta_id)
+ goto out;
+
+ psta = (struct il3945_sta_priv *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ rs_sta->il = il;
+
+ rs_sta->start_rate = RATE_INVALID;
+
+ /* default to just 802.11b */
+ rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->last_flush = jiffies;
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->last_tx_packets = 0;
+
+ rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+ rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
+
+ for (i = 0; i < RATE_COUNT_3945; i++)
+ il3945_clear_win(&rs_sta->win[i]);
+
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ for (i = sband->n_bitrates - 1; i >= 0; i--) {
+ if (sta->supp_rates[sband->band] & (1 << i)) {
+ rs_sta->last_txrate_idx = i;
+ break;
+ }
+ }
+
+ il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+ /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
+ }
+
+out:
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+ D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il3945_rs_sta *rs_sta;
+ struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+ struct il_priv *il __maybe_unused = il_priv;
+
+ D_RATE("enter\n");
+
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
+ D_RATE("leave\n");
+
+ return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il3945_rs_sta *rs_sta = il_sta;
+
+ /*
+ * Be careful not to use any members of il3945_rs_sta (like trying
+ * to use il_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
+ del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ s8 retries = 0, current_count;
+ int scale_rate_idx, first_idx, last_idx;
+ unsigned long flags;
+ struct il_priv *il = (struct il_priv *)il_rate;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ retries = info->status.rates[0].count;
+ /* Sanity Check for retries */
+ if (retries > RATE_RETRY_TH)
+ retries = RATE_RETRY_TH;
+
+ first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+ if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+ D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+ return;
+ }
+
+ if (!il_sta) {
+ D_RATE("leave: No STA il data to update!\n");
+ return;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->il) {
+ D_RATE("leave: STA il data uninitialized!\n");
+ return;
+ }
+
+ rs_sta->tx_packets++;
+
+ scale_rate_idx = first_idx;
+ last_idx = first_idx;
+
+ /*
+ * Update the win for each rate. We determine which rates
+ * were Tx'd based on the total number of retries vs. the number
+ * of retries configured for each rate -- currently set to the
+ * il value 'retry_rate' vs. rate specific
+ *
+ * On exit from this while loop last_idx indicates the rate
+ * at which the frame was finally transmitted (or failed if no
+ * ACK)
+ */
+ while (retries > 1) {
+ if ((retries - 1) < il->retry_rate) {
+ current_count = (retries - 1);
+ last_idx = scale_rate_idx;
+ } else {
+ current_count = il->retry_rate;
+ last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+ }
+
+ /* Update this rate accounting for as many retries
+ * as was used for it (per current_count) */
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+ current_count, scale_rate_idx);
+ D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+ current_count);
+
+ retries -= current_count;
+
+ scale_rate_idx = last_idx;
+ }
+
+ /* Update the last idx win with success/failure based on ACK */
+ D_RATE("Update rate %d with %s.\n", last_idx,
+ (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+ info->flags & IEEE80211_TX_STAT_ACK, 1,
+ last_idx);
+
+ /* We updated the rate scale win -- if its been more than
+ * flush_time since the last run, schedule the flush
+ * again */
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ if (!rs_sta->flush_pending &&
+ time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->flush_pending = 1;
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+ enum ieee80211_band band)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /* 802.11A walks to the next literal adjacent rate in
+ * the rate table */
+ if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ if (rs_sta->tgg)
+ low = il3945_rates[low].prev_rs_tgg;
+ else
+ low = il3945_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ if (rs_sta->tgg)
+ high = il3945_rates[high].next_rs_tgg;
+ else
+ high = il3945_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct sk_buff *skb = txrc->skb;
+ u8 low = RATE_INVALID;
+ u8 high = RATE_INVALID;
+ u16 high_low;
+ int idx;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct il3945_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ unsigned long flags;
+ u16 rate_mask;
+ s8 max_rate_idx = -1;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->il) {
+ D_RATE("Rate scaling information not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ rate_mask = sta->supp_rates[sband->band];
+
+ /* get user max rate if set */
+ max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+ max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+ max_rate_idx = -1;
+
+ idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* for recent assoc, choose best rate regarding
+ * to rssi value
+ */
+ if (rs_sta->start_rate != RATE_INVALID) {
+ if (rs_sta->start_rate < idx &&
+ (rate_mask & (1 << rs_sta->start_rate)))
+ idx = rs_sta->start_rate;
+ rs_sta->start_rate = RATE_INVALID;
+ }
+
+ /* force user max rate if set by user */
+ if (max_rate_idx != -1 && max_rate_idx < idx) {
+ if (rate_mask & (1 << max_rate_idx))
+ idx = max_rate_idx;
+ }
+
+ win = &(rs_sta->win[idx]);
+
+ fail_count = win->counter - win->success_counter;
+
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("Invalid average_tpt on rate %d: "
+ "counter: %d, success_counter: %d, "
+ "expected_tpt is %sNULL\n", idx, win->counter,
+ win->success_counter,
+ rs_sta->expected_tpt ? "not " : "");
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+ goto out;
+
+ }
+
+ current_tpt = win->average_tpt;
+
+ high_low =
+ il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (max_rate_idx != -1 && max_rate_idx < high)
+ high = RATE_INVALID;
+
+ /* Collect Measured throughputs of adjacent rates */
+ if (low != RATE_INVALID)
+ low_tpt = rs_sta->win[low].average_tpt;
+
+ if (high != RATE_INVALID)
+ high_tpt = rs_sta->win[high].average_tpt;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ scale_action = 0;
+
+ /* Low success ratio , need to drop the rate */
+ if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+ /* No throughput measured yet for adjacent rates,
+ * try increase */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+
+ /* Both adjacent throughputs are measured, but neither one has
+ * better throughput; we're using the best rate, don't change
+ * it! */
+ } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+ && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+ D_RATE("No action -- low [%d] & high [%d] < "
+ "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+ scale_action = 0;
+
+ /* At least one of the rates has better throughput */
+ } else {
+ if (high_tpt != IL_INVALID_VALUE) {
+
+ /* High rate has better throughput, Increase
+ * rate */
+ if (high_tpt > current_tpt &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else {
+ D_RATE("decrease rate because of high tpt\n");
+ scale_action = 0;
+ }
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (win->success_ratio >= RATE_INCREASE_TH) {
+ /* Lower rate has better
+ * throughput,decrease rate */
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (win->success_ratio > RATE_HIGH_TH ||
+ current_tpt > 100 * rs_sta->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrese rate */
+ if (low != RATE_INVALID)
+ idx = low;
+ break;
+ case 1:
+ /* Increase rate */
+ if (high != RATE_INVALID)
+ idx = high;
+
+ break;
+ case 0:
+ default:
+ /* No change */
+ break;
+ }
+
+ D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+ low, high);
+
+out:
+
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+ idx = IL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
+
+ D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il3945_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int j;
+ ssize_t ret;
+ struct il3945_rs_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc +=
+ sprintf(buff + desc,
+ "tx packets=%d last rate idx=%d\n"
+ "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+ lq_sta->last_txrate_idx, lq_sta->start_rate,
+ jiffies_to_msecs(lq_sta->flush_time));
+ for (j = 0; j < RATE_COUNT_3945; j++) {
+ desc +=
+ sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+ lq_sta->win[j].counter,
+ lq_sta->win[j].success_counter,
+ lq_sta->win[j].success_ratio);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il3945_sta_dbgfs_stats_table_read,
+ .open = il3945_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+ .module = NULL,
+ .name = RS_NAME,
+ .tx_status = il3945_rs_tx_status,
+ .get_rate = il3945_rs_get_rate,
+ .rate_init = il3945_rs_rate_init_stub,
+ .alloc = il3945_rs_alloc,
+ .free = il3945_rs_free,
+ .alloc_sta = il3945_rs_alloc_sta,
+ .free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il3945_add_debugfs,
+ .remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+ struct il_priv *il = hw->priv;
+ s32 rssi = 0;
+ unsigned long flags;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_sta *sta;
+ struct il3945_sta_priv *psta;
+
+ D_RATE("enter\n");
+
+ rcu_read_lock();
+
+ sta =
+ ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+ if (!sta) {
+ D_RATE("Unable to find station to initialize rate scaling.\n");
+ rcu_read_unlock();
+ return;
+ }
+
+ psta = (void *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ rs_sta->tgg = 0;
+ switch (il->band) {
+ case IEEE80211_BAND_2GHZ:
+ /* TODO: this always does G, not a regression */
+ if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+ rs_sta->tgg = 1;
+ rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+ } else
+ rs_sta->expected_tpt = il3945_expected_tpt_g;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rs_sta->expected_tpt = il3945_expected_tpt_a;
+ break;
+ case IEEE80211_NUM_BANDS:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ rssi = il->_3945.last_rx_rssi;
+ if (rssi == 0)
+ rssi = IL_MIN_RSSI_VAL;
+
+ D_RATE("Network RSSI: %d\n", rssi);
+
+ rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+ D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+ rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+ rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 000000000000..1489b1573a6a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2743 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+
+ return il_send_cmd(il, &cmd);
+}
+
+const struct il_led_ops il3945_led_ops = {
+ .cmd = il3945_send_led_cmd,
+};
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX, \
+ RATE_##r##M_IDX_TBL, \
+ RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ * rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+ IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+ u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+ if (rate == RATE_INVALID)
+ rate = rate_idx;
+ return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ * bitmap in SRAM. Bit position corresponds to Event # (id/type).
+ * Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging. This function is just a placeholder as-is,
+ * you'll need to provide the special bits! ...
+ * ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+ int i;
+ u32 base; /* SRAM address of event log header */
+ u32 disable_ptr; /* SRAM address of event-disable bitmap array */
+ u32 array_size; /* # of u32 entries in array */
+ static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+ 0x00000000, /* 31 - 0 Event id numbers */
+ 0x00000000, /* 63 - 32 */
+ 0x00000000, /* 95 - 64 */
+ 0x00000000, /* 127 - 96 */
+ 0x00000000, /* 159 - 128 */
+ 0x00000000, /* 191 - 160 */
+ 0x00000000, /* 223 - 192 */
+ 0x00000000, /* 255 - 224 */
+ 0x00000000, /* 287 - 256 */
+ 0x00000000, /* 319 - 288 */
+ 0x00000000, /* 351 - 320 */
+ 0x00000000, /* 383 - 352 */
+ 0x00000000, /* 415 - 384 */
+ 0x00000000, /* 447 - 416 */
+ 0x00000000, /* 479 - 448 */
+ 0x00000000, /* 511 - 480 */
+ 0x00000000, /* 543 - 512 */
+ 0x00000000, /* 575 - 544 */
+ 0x00000000, /* 607 - 576 */
+ 0x00000000, /* 639 - 608 */
+ 0x00000000, /* 671 - 640 */
+ 0x00000000, /* 703 - 672 */
+ 0x00000000, /* 735 - 704 */
+ 0x00000000, /* 767 - 736 */
+ 0x00000000, /* 799 - 768 */
+ 0x00000000, /* 831 - 800 */
+ 0x00000000, /* 863 - 832 */
+ 0x00000000, /* 895 - 864 */
+ 0x00000000, /* 927 - 896 */
+ 0x00000000, /* 959 - 928 */
+ 0x00000000, /* 991 - 960 */
+ 0x00000000, /* 1023 - 992 */
+ 0x00000000, /* 1055 - 1024 */
+ 0x00000000, /* 1087 - 1056 */
+ 0x00000000, /* 1119 - 1088 */
+ 0x00000000, /* 1151 - 1120 */
+ 0x00000000, /* 1183 - 1152 */
+ 0x00000000, /* 1215 - 1184 */
+ 0x00000000, /* 1247 - 1216 */
+ 0x00000000, /* 1279 - 1248 */
+ 0x00000000, /* 1311 - 1280 */
+ 0x00000000, /* 1343 - 1312 */
+ 0x00000000, /* 1375 - 1344 */
+ 0x00000000, /* 1407 - 1376 */
+ 0x00000000, /* 1439 - 1408 */
+ 0x00000000, /* 1471 - 1440 */
+ 0x00000000, /* 1503 - 1472 */
+ };
+
+ base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Invalid event log pointer 0x%08X\n", base);
+ return;
+ }
+
+ disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+ array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+ if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+ D_INFO("Disabling selected uCode log events at 0x%x\n",
+ disable_ptr);
+ for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+ il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+ evt_disable[i]);
+
+ } else {
+ D_INFO("Selected uCode log events may be disabled\n");
+ D_INFO(" by writing \"1\"s into disable bitmap\n");
+ D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+ array_size);
+ }
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+ int idx;
+
+ for (idx = 0; idx < RATE_COUNT_3945; idx++)
+ if (il3945_rates[idx].plcp == plcp)
+ return idx;
+ return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ switch (status & TX_STATUS_MSK) {
+ case TX_3945_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_ENTRY(SHORT_LIMIT);
+ TX_STATUS_ENTRY(LONG_LIMIT);
+ TX_STATUS_ENTRY(FIFO_UNDERRUN);
+ TX_STATUS_ENTRY(MGMNT_ABORT);
+ TX_STATUS_ENTRY(NEXT_FRAG);
+ TX_STATUS_ENTRY(LIFE_EXPIRE);
+ TX_STATUS_ENTRY(DEST_PS);
+ TX_STATUS_ENTRY(ABORTED);
+ TX_STATUS_ENTRY(BT_RETRY);
+ TX_STATUS_ENTRY(STA_INVALID);
+ TX_STATUS_ENTRY(FRAG_DROPPED);
+ TX_STATUS_ENTRY(TID_DISABLE);
+ TX_STATUS_ENTRY(FRAME_FLUSHED);
+ TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+ TX_STATUS_ENTRY(TX_LOCKED);
+ TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+ int next_rate = il3945_get_prev_ieee_rate(rate);
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ if (rate == RATE_12M_IDX)
+ next_rate = RATE_9M_IDX;
+ else if (rate == RATE_6M_IDX)
+ next_rate = RATE_6M_IDX;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+ if (rate == RATE_11M_IDX)
+ next_rate = RATE_5M_IDX;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+
+ BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+ tx_info->skb = NULL;
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+
+ if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+ txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+ il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->status);
+ int rate_idx;
+ int fail;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ ieee80211_tx_info_clear_status(info);
+
+ /* Fill the MRR chain with some info about on-chip retransmissions */
+ rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+
+ fail = tx_resp->failure_frame;
+
+ info->status.rates[0].idx = rate_idx;
+ info->status.rates[0].count = fail + 1; /* add final attempt */
+
+ /* tx_status->rts_retry_count = tx_resp->failure_rts; */
+ info->flags |=
+ ((status & TX_STATUS_MSK) ==
+ TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+ D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+ il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+ tx_resp->failure_frame);
+
+ D_TX_REPLY("Tx queue reclaim %d\n", idx);
+ il3945_tx_queue_reclaim(il, txq_id, idx);
+
+ if (status & TX_ABORT_REQUIRED_MSK)
+ IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ * RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *) &il->_3945.stats;
+ accum_stats = (u32 *) &il->_3945.accum_stats;
+ delta = (u32 *) &il->_3945.delta_stats;
+ max_delta = (u32 *) &il->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ il->_3945.accum_stats.general.temperature =
+ il->_3945.stats.general.temperature;
+ il->_3945.accum_stats.general.ttl_timestamp =
+ il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il3945_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+ memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *) &pkt->u.raw;
+
+ if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_3945.accum_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.delta_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.max_delta, 0,
+ sizeof(struct il3945_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+ /* Filter incoming packets to determine if they are targeted toward
+ * this network, discarding packets coming from ourselves */
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr3, il->bssid);
+ case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr2, il->bssid);
+ default:
+ return 1;
+ }
+}
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 len = le16_to_cpu(rx_hdr->len);
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We received data from the HW, so stop the watchdog */
+ if (unlikely
+ (len + IL39_RX_FRAME_SIZE >
+ PAGE_SIZE << il->hw_params.rx_page_order)) {
+ D_DROP("Corruption detected!\n");
+ return;
+ }
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ if (!il3945_mod_params.sw_crypto)
+ il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+ le32_to_cpu(rx_end->status), stats);
+
+ skb_add_rx_frag(skb, 0, rxb->page,
+ (void *)rx_hdr->payload - (void *)pkt, len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused =
+ le16_to_cpu(rx_stats->noise_diff);
+ u8 network_packet;
+
+ rx_status.flag = 0;
+ rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+ rx_status.band =
+ (rx_hdr->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
+
+ rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+ if (rx_status.band == IEEE80211_BAND_5GHZ)
+ rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+ rx_status.antenna =
+ (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ 4;
+
+ /* set the preamble flag if appropriate */
+ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ if ((unlikely(rx_stats->phy_count > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ rx_stats->phy_count);
+ return;
+ }
+
+ if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+ return;
+ }
+
+ /* Convert 3945's rssi indicator to dBm */
+ rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+ D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+ rx_stats_sig_avg, rx_stats_noise_diff);
+
+ header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+ network_packet = il3945_is_network_packet(il, header);
+
+ D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+ network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+ rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+ il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
+
+ if (network_packet) {
+ il->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ il->_3945.last_rx_rssi = rx_status.signal;
+ }
+
+ il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ int count;
+ struct il_queue *q;
+ struct il3945_tfd *tfd, *tfd_tmp;
+
+ q = &txq->q;
+ tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+ if (count >= NUM_TFD_CHUNKS || count < 0) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ NUM_TFD_CHUNKS);
+ return -EINVAL;
+ }
+
+ tfd->tbs[count].addr = cpu_to_le32(addr);
+ tfd->tbs[count].len = cpu_to_le32(len);
+
+ count++;
+
+ tfd->control_flags =
+ cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ int idx = txq->q.read_ptr;
+ struct il3945_tfd *tfd = &tfd_tmp[idx];
+ struct pci_dev *dev = il->pci_dev;
+ int i;
+ int counter;
+
+ /* sanity check */
+ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+ if (counter > NUM_TFD_CHUNKS) {
+ IL_ERR("Too many chunks: %i\n", counter);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (counter)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_TODEVICE);
+
+ /* unmap chunks if any */
+
+ for (i = 1; i < counter; i++)
+ pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+ le32_to_cpu(tfd->tbs[i].len),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id)
+{
+ u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+ u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
+ u16 rate_mask;
+ int rate;
+ const u8 rts_retry_limit = 7;
+ u8 data_retry_limit;
+ __le32 tx_flags;
+ __le16 fc = hdr->frame_control;
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+ rate = il3945_rates[rate_idx].plcp;
+ tx_flags = tx_cmd->tx_flags;
+
+ /* We need to figure out how to get the sta->supp_rates while
+ * in this running context */
+ rate_mask = RATES_MASK_3945;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+ tx_cmd->rate = rate;
+ tx_cmd->tx_flags = tx_flags;
+
+ /* OFDM */
+ tx_cmd->supp_rates[0] =
+ ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ /* CCK */
+ tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+ D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+ "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+ le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+ tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+ unsigned long flags_spin;
+ struct il_station_entry *station;
+
+ if (sta_id == IL_INVALID_STATION)
+ return IL_INVALID_STATION;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ station = &il->stations[sta_id];
+
+ station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+ station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+ station->sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &station->sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+ return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN,
+ CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+ }
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+ il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+ il_wr(il, FH39_RCSR_WPTR(0), 0);
+ il_wr(il, FH39_RCSR_CONFIG(0),
+ FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+ FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+ <<
+ FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+ FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+ /* fake read to flush all prev I/O */
+ il_rd(il, FH39_RSSR_CTRL);
+
+ return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+
+ /* bypass mode */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+ /* RA 0 is active */
+ il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+ /* all 6 fifo are active */
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+ il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+ il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+ il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+ il_wr(il, FH39_TSSR_MSG_CONFIG,
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+ return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+ int rc;
+ int txq_id, slots_num;
+
+ il3945_hw_txq_ctx_free(il);
+
+ /* allocate tx queue structure */
+ rc = il_alloc_txq_mem(il);
+ if (rc)
+ return rc;
+
+ /* Tx CMD queue */
+ rc = il3945_tx_reset(il);
+ if (rc)
+ goto error;
+
+ /* Tx queue(s) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (rc) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ il3945_hw_txq_ctx_free(il);
+ return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+ int ret = il_apm_init(il);
+
+ /* Clear APMG (NIC's internal power management) interrupts */
+ il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+ il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+ /* Reset radio chip */
+ il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+ return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ unsigned long flags;
+ u8 rev_id = il->pci_dev->revision;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Determine HW type */
+ D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+ if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+ D_INFO("RTP type\n");
+ else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+ D_INFO("3945 RADIO-MB type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+ } else {
+ D_INFO("3945 RADIO-MM type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+ }
+
+ if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+ D_INFO("SKU OP mode is mrc\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+ } else
+ D_INFO("SKU OP mode is basic\n");
+
+ if ((eeprom->board_revision & 0xF0) == 0xD0) {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ } else {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ }
+
+ if (eeprom->almgor_m_version <= 1) {
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+ D_INFO("Card M type A version is 0x%X\n",
+ eeprom->almgor_m_version);
+ } else {
+ D_INFO("Card M type B version is 0x%X\n",
+ eeprom->almgor_m_version);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+ D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+ D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+ int rc;
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ rc = il_rx_queue_alloc(il);
+ if (rc) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il3945_rx_queue_reset(il, rxq);
+
+ il3945_rx_replenish(il);
+
+ il3945_rx_init(il, rxq);
+
+ /* Look at using this instead:
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+ */
+
+ il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+ rc = il3945_txq_ctx_reset(il);
+ if (rc)
+ return rc;
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq)
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IL39_CMD_QUEUE_NUM)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+ int txq_id;
+
+ /* stop SCD */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+ /* reset TFD queues */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+ il_poll_bit(il, FH39_TSSR_TX_STATUS,
+ FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+ 1000);
+ }
+
+ il3945_hw_txq_ctx_free(il);
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+ return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+ return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+ return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int temperature;
+
+ temperature = il3945_hw_get_temperature(il);
+
+ /* driver's okay range is -260 to +25.
+ * human readable okay range is 0 to +285 */
+ D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+ /* handle insane temp reading */
+ if (il3945_hw_reg_temp_out_of_range(temperature)) {
+ IL_ERR("Error bad temperature value %d\n", temperature);
+
+ /* if really really hot(?),
+ * substitute the 3rd band/group's temp measured at factory */
+ if (il->last_temperature > 100)
+ temperature = eeprom->groups[2].temperature;
+ else /* else use most recent "sane" value from driver */
+ temperature = il->last_temperature;
+ }
+
+ return temperature; /* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER 6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ * (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d,\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Same temp,\n");
+ else
+ D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+ /* if we don't need calibration, *don't* update last_temperature */
+ if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+ D_POWER("Timed thermal calib not needed\n");
+ return 0;
+ }
+
+ D_POWER("Timed thermal calib needed\n");
+
+ /* assume that caller will actually do calib ...
+ * update the "last temperature" value */
+ il->last_temperature = il->temperature;
+ return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+ {
+ {251, 127}, /* 2.4 GHz, highest power */
+ {251, 127},
+ {251, 127},
+ {251, 127},
+ {251, 125},
+ {251, 110},
+ {251, 105},
+ {251, 98},
+ {187, 125},
+ {187, 115},
+ {187, 108},
+ {187, 99},
+ {243, 119},
+ {243, 111},
+ {243, 105},
+ {243, 97},
+ {243, 92},
+ {211, 106},
+ {211, 100},
+ {179, 120},
+ {179, 113},
+ {179, 107},
+ {147, 125},
+ {147, 119},
+ {147, 112},
+ {147, 106},
+ {147, 101},
+ {147, 97},
+ {147, 91},
+ {115, 107},
+ {235, 121},
+ {235, 115},
+ {235, 109},
+ {203, 127},
+ {203, 121},
+ {203, 115},
+ {203, 108},
+ {203, 102},
+ {203, 96},
+ {203, 92},
+ {171, 110},
+ {171, 104},
+ {171, 98},
+ {139, 116},
+ {227, 125},
+ {227, 119},
+ {227, 113},
+ {227, 107},
+ {227, 101},
+ {227, 96},
+ {195, 113},
+ {195, 106},
+ {195, 102},
+ {195, 95},
+ {163, 113},
+ {163, 106},
+ {163, 102},
+ {163, 95},
+ {131, 113},
+ {131, 106},
+ {131, 102},
+ {131, 95},
+ {99, 113},
+ {99, 106},
+ {99, 102},
+ {99, 95},
+ {67, 113},
+ {67, 106},
+ {67, 102},
+ {67, 95},
+ {35, 113},
+ {35, 106},
+ {35, 102},
+ {35, 95},
+ {3, 113},
+ {3, 106},
+ {3, 102},
+ {3, 95} /* 2.4 GHz, lowest power */
+ },
+ {
+ {251, 127}, /* 5.x GHz, highest power */
+ {251, 120},
+ {251, 114},
+ {219, 119},
+ {219, 101},
+ {187, 113},
+ {187, 102},
+ {155, 114},
+ {155, 103},
+ {123, 117},
+ {123, 107},
+ {123, 99},
+ {123, 92},
+ {91, 108},
+ {59, 125},
+ {59, 118},
+ {59, 109},
+ {59, 102},
+ {59, 96},
+ {59, 90},
+ {27, 104},
+ {27, 98},
+ {27, 92},
+ {115, 118},
+ {115, 111},
+ {115, 104},
+ {83, 126},
+ {83, 121},
+ {83, 113},
+ {83, 105},
+ {83, 99},
+ {51, 118},
+ {51, 111},
+ {51, 104},
+ {51, 98},
+ {19, 116},
+ {19, 109},
+ {19, 102},
+ {19, 98},
+ {19, 93},
+ {171, 113},
+ {171, 107},
+ {171, 99},
+ {139, 120},
+ {139, 113},
+ {139, 107},
+ {139, 99},
+ {107, 120},
+ {107, 113},
+ {107, 107},
+ {107, 99},
+ {75, 120},
+ {75, 113},
+ {75, 107},
+ {75, 99},
+ {43, 120},
+ {43, 113},
+ {43, 107},
+ {43, 99},
+ {11, 120},
+ {11, 113},
+ {11, 107},
+ {11, 99},
+ {131, 107},
+ {131, 99},
+ {99, 120},
+ {99, 113},
+ {99, 107},
+ {99, 99},
+ {67, 120},
+ {67, 113},
+ {67, 107},
+ {67, 99},
+ {35, 120},
+ {35, 113},
+ {35, 107},
+ {35, 99},
+ {3, 120} /* 5.x GHz, lowest power */
+ }
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+ if (idx < 0)
+ return 0;
+ if (idx >= IL_MAX_GAIN_ENTRIES)
+ return IL_MAX_GAIN_ENTRIES - 1;
+ return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+ const s8 *clip_pwrs,
+ struct il_channel_info *ch_info, int band_idx)
+{
+ struct il3945_scan_power_info *scan_power_info;
+ s8 power;
+ u8 power_idx;
+
+ scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+ /* use this channel group's 6Mbit clipping/saturation pwr,
+ * but cap at regulatory scan power restriction (set during init
+ * based on eeprom channel data) for this channel. */
+ power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+ power = min(power, il->tx_power_user_lmt);
+ scan_power_info->requested_power = power;
+
+ /* find difference between new scan *power* and current "normal"
+ * Tx *power* for 6Mb. Use this difference (x2) to adjust the
+ * current "normal" temperature-compensated Tx power *idx* for
+ * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+ * *idx*. */
+ power_idx =
+ ch_info->power_info[rate_idx].power_table_idx - (power -
+ ch_info->
+ power_info
+ [RATE_6M_IDX_TBL].
+ requested_power) *
+ 2;
+
+ /* store reference idx that we use when adjusting *all* scan
+ * powers. So we can accommodate user (all channel) or spectrum
+ * management (single channel) power changes "between" temperature
+ * feedback compensation procedures.
+ * don't force fit this reference idx into gain table; it may be a
+ * negative number. This will help avoid errors when we're at
+ * the lower bounds (highest gains, for warmest temperatures)
+ * of the table. */
+
+ /* don't exceed table bounds for "real" setting */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ scan_power_info->power_table_idx = power_idx;
+ scan_power_info->tpc.tx_gain =
+ power_gain_table[band_idx][power_idx].tx_gain;
+ scan_power_info->tpc.dsp_atten =
+ power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+ int rate_idx, i;
+ const struct il_channel_info *ch_info = NULL;
+ struct il3945_txpowertable_cmd txpower = {
+ .channel = il->ctx.active.channel,
+ };
+ u16 chan;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ chan = le16_to_cpu(il->ctx.active.channel);
+
+ txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+ ch_info = il_get_channel_info(il, il->band, chan);
+ if (!ch_info) {
+ IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+ il->band);
+ return -EINVAL;
+ }
+
+ if (!il_is_channel_valid(ch_info)) {
+ D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+ return 0;
+ }
+
+ /* fill cmd with power settings for all rates for current channel */
+ /* Fill OFDM rate */
+ for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+ rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+ /* Fill CCK rates */
+ for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+ rate_idx++, i++) {
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+
+ return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+ sizeof(struct il3945_txpowertable_cmd),
+ &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update. Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ * properly fill out the scan powers, and actual h/w gain settings,
+ * and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+ struct il3945_channel_power_info *power_info;
+ int power_changed = 0;
+ int i;
+ const s8 *clip_pwrs;
+ int power;
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* Get this channel's rate-to-current-power settings table */
+ power_info = ch_info->power_info;
+
+ /* update OFDM Txpower settings */
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+ int delta_idx;
+
+ /* limit new power to be no more than h/w capability */
+ power = min(ch_info->curr_txpow, clip_pwrs[i]);
+ if (power == power_info->requested_power)
+ continue;
+
+ /* find difference between old and new requested powers,
+ * update base (non-temp-compensated) power idx */
+ delta_idx = (power - power_info->requested_power) * 2;
+ power_info->base_power_idx -= delta_idx;
+
+ /* save new requested power value */
+ power_info->requested_power = power;
+
+ power_changed = 1;
+ }
+
+ /* update CCK Txpower settings, based on OFDM 12M setting ...
+ * ... all CCK power settings for a given channel are the *same*. */
+ if (power_changed) {
+ power =
+ ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+ IL_CCK_FROM_OFDM_POWER_DIFF;
+
+ /* do all CCK rates' il3945_channel_power_info structures */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+ power_info->requested_power = power;
+ power_info->base_power_idx =
+ ch_info->power_info[RATE_12M_IDX_TBL].
+ base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ ++power_info;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ * based strictly on regulatory (eeprom and spectrum mgt) limitations
+ * (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+ s8 max_power;
+
+#if 0
+ /* if we're using TGd limits, use lower of TGd or EEPROM */
+ if (ch_info->tgd_data.max_power != 0)
+ max_power =
+ min(ch_info->tgd_data.max_power,
+ ch_info->eeprom.max_power_avg);
+
+ /* else just use EEPROM limits */
+ else
+#endif
+ max_power = ch_info->eeprom.max_power_avg;
+
+ return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ * and the factory calibration temperatures, and bases the new settings
+ * on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
+ u8 a_band;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ u8 i;
+ int ref_temp;
+ int temperature = il->temperature;
+
+ if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
+ /* set up new Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* Get this chnlgrp's factory calibration temperature */
+ ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+ /* get power idx adjustment based on current and factory
+ * temps */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+ /* set tx power value for all rates, OFDM and CCK */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+ int power_idx =
+ ch_info->power_info[rate_idx].base_power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within table range */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+ ch_info->power_info[rate_idx].power_table_idx =
+ (u8) power_idx;
+ ch_info->power_info[rate_idx].tpc =
+ power_gain_table[a_band][power_idx];
+ }
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ /* send Txpower command for current channel to ucode */
+ return il->cfg->ops->lib->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+ struct il_channel_info *ch_info;
+ s8 max_power;
+ u8 a_band;
+ u8 i;
+
+ if (il->tx_power_user_lmt == power) {
+ D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+ power);
+ return 0;
+ }
+
+ D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+ il->tx_power_user_lmt = power;
+
+ /* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* find minimum power of all user and regulatory constraints
+ * (does not consider h/w clipping limitations) */
+ max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+ max_power = min(power, max_power);
+ if (max_power != ch_info->curr_txpow) {
+ ch_info->curr_txpow = max_power;
+
+ /* this considers the h/w clipping limitations */
+ il3945_hw_reg_set_new_power(il, ch_info);
+ }
+ }
+
+ /* update txpower settings for all channels,
+ * send to NIC if associated. */
+ il3945_is_temp_calib_needed(il);
+ il3945_hw_reg_comp_txpower_temp(il);
+
+ return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int rc = 0;
+ struct il_rx_pkt *pkt;
+ struct il3945_rxon_assoc_cmd rxon_assoc;
+ struct il_host_cmd cmd = {
+ .id = C_RXON_ASSOC,
+ .len = sizeof(rxon_assoc),
+ .flags = CMD_WANT_SKB,
+ .data = &rxon_assoc,
+ };
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_RXON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data. This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+ struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+ int rc = 0;
+ bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ if (!il_is_alive(il))
+ return -1;
+
+ /* always get timestamp with Rx frame */
+ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+ /* select antenna */
+ staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+ staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+ rc = il_check_rxon_cmd(il, ctx);
+ if (rc) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il3945_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, &il->ctx)) {
+ rc = il_send_rxon_assoc(il, &il->ctx);
+ if (rc) {
+ IL_ERR("Error setting RXON_ASSOC "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated(il) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ active_rxon->reserved4 = 0;
+ active_rxon->reserved5 = 0;
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ &il->ctx.active);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (rc) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK on current "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ staging_rxon->reserved4 = 0;
+ staging_rxon->reserved5 = 0;
+
+ il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+
+ /* Apply the new configuration */
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ staging_rxon);
+ if (rc) {
+ IL_ERR("Error setting new configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+ if (!new_assoc) {
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ rc = il_set_tx_power(il, il->tx_power_next, true);
+ if (rc) {
+ IL_ERR("Error setting Tx power (%d).\n", rc);
+ return rc;
+ }
+
+ /* Init the hardware's rate fallback order based on the band */
+ rc = il3945_init_hw_rate_table(il);
+ if (rc) {
+ IL_ERR("Error setting HW rate table: %02X\n", rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic - called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ * -- correct coeffs for temp (can reset temp timer)
+ * -- save this temp as "last",
+ * -- send new set of gain settings to NIC
+ * NOTE: This should continue working, even when we're not associated,
+ * so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+ /* This will kick in the "brute force"
+ * il3945_hw_reg_comp_txpower_temp() below */
+ if (!il3945_is_temp_calib_needed(il))
+ goto reschedule;
+
+ /* Set up a new set of temp-adjusted TxPowers, send to NIC.
+ * This is based *only* on current temperature,
+ * ignoring any previous power measurements */
+ il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+ queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+ REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ _3945.thermal_periodic.work);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il3945_reg_txpower_periodic(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ * These channel groups are based on factory-tested channels;
+ * on A-band, EEPROM's "group frequency" entries represent the top
+ * channel in each group 1-4. Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+ const struct il_channel_info *ch_info)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+ u8 group;
+ u16 group_idx = 0; /* based on factory calib frequencies */
+ u8 grp_channel;
+
+ /* Find the group idx for the channel ... don't use idx 1(?) */
+ if (il_is_channel_a_band(ch_info)) {
+ for (group = 1; group < 5; group++) {
+ grp_channel = ch_grp[group].group_channel;
+ if (ch_info->channel <= grp_channel) {
+ group_idx = group;
+ break;
+ }
+ }
+ /* group 4 has a few channels *above* its factory cal freq */
+ if (group == 5)
+ group_idx = 4;
+ } else
+ group_idx = 0; /* 2.4 GHz, group 0 */
+
+ D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+ return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ * into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+ s32 setting_idx, s32 *new_idx)
+{
+ const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ s32 idx0, idx1;
+ s32 power = 2 * requested_power;
+ s32 i;
+ const struct il3945_eeprom_txpower_sample *samples;
+ s32 gains0, gains1;
+ s32 res;
+ s32 denominator;
+
+ chnl_grp = &eeprom->groups[setting_idx];
+ samples = chnl_grp->samples;
+ for (i = 0; i < 5; i++) {
+ if (power == samples[i].power) {
+ *new_idx = samples[i].gain_idx;
+ return 0;
+ }
+ }
+
+ if (power > samples[1].power) {
+ idx0 = 0;
+ idx1 = 1;
+ } else if (power > samples[2].power) {
+ idx0 = 1;
+ idx1 = 2;
+ } else if (power > samples[3].power) {
+ idx0 = 2;
+ idx1 = 3;
+ } else {
+ idx0 = 3;
+ idx1 = 4;
+ }
+
+ denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+ if (denominator == 0)
+ return -EINVAL;
+ gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+ gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+ res =
+ gains0 + (gains1 - gains0) * ((s32) power -
+ (s32) samples[idx0].power) /
+ denominator + (1 << 18);
+ *new_idx = res >> 19;
+ return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+ u32 i;
+ s32 rate_idx;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ const struct il3945_eeprom_txpower_group *group;
+
+ D_POWER("Initializing factory calib info from EEPROM\n");
+
+ for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+ s8 *clip_pwrs; /* table of power levels for each rate */
+ s8 satur_pwr; /* saturation power for each chnl group */
+ group = &eeprom->groups[i];
+
+ /* sanity check on factory saturation power value */
+ if (group->saturation_power < 40) {
+ IL_WARN("Error: saturation power is %d, "
+ "less than minimum expected 40\n",
+ group->saturation_power);
+ return;
+ }
+
+ /*
+ * Derive requested power levels for each rate, based on
+ * hardware capabilities (saturation power for band).
+ * Basic value is 3dB down from saturation, with further
+ * power reductions for highest 3 data rates. These
+ * backoffs provide headroom for high rate modulation
+ * power peaks, without too much distortion (clipping).
+ */
+ /* we'll fill in this array with h/w max power levels */
+ clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+ /* divide factory saturation power by 2 to find -3dB level */
+ satur_pwr = (s8) (group->saturation_power >> 1);
+
+ /* fill in channel group's nominal powers for each rate */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+ rate_idx++, clip_pwrs++) {
+ switch (rate_idx) {
+ case RATE_36M_IDX_TBL:
+ if (i == 0) /* B/G */
+ *clip_pwrs = satur_pwr;
+ else /* A */
+ *clip_pwrs = satur_pwr - 5;
+ break;
+ case RATE_48M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 7;
+ else
+ *clip_pwrs = satur_pwr - 10;
+ break;
+ case RATE_54M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 9;
+ else
+ *clip_pwrs = satur_pwr - 12;
+ break;
+ default:
+ *clip_pwrs = satur_pwr;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_channel_power_info *pwr_info;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ const s8 *clip_pwrs; /* array of power levels for each rate */
+ u8 gain, dsp_atten;
+ s8 power;
+ u8 pwr_idx, base_pwr_idx, a_band;
+ u8 i;
+ int temperature;
+
+ /* save temperature reference,
+ * so we can determine next time to calibrate */
+ temperature = il3945_hw_reg_txpower_get_temperature(il);
+ il->last_temperature = temperature;
+
+ il3945_hw_reg_init_channel_groups(il);
+
+ /* initialize Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+ i++, ch_info++) {
+ a_band = il_is_channel_a_band(ch_info);
+ if (!il_is_channel_valid(ch_info))
+ continue;
+
+ /* find this channel's channel group (*not* "band") idx */
+ ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+ /* Get this chnlgrp's rate->max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* calculate power idx *adjustment* value according to
+ * diff between current temperature and factory temperature */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature,
+ eeprom->groups[ch_info->
+ group_idx].
+ temperature);
+
+ D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+ delta_idx, temperature + IL_TEMP_CONVERT);
+
+ /* set tx power value for all OFDM rates */
+ for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+ s32 uninitialized_var(power_idx);
+ int rc;
+
+ /* use channel group's clip-power table,
+ * but don't exceed channel's max power */
+ s8 pwr = min(ch_info->max_power_avg,
+ clip_pwrs[rate_idx]);
+
+ pwr_info = &ch_info->power_info[rate_idx];
+
+ /* get base (i.e. at factory-measured temperature)
+ * power table idx for this rate's power */
+ rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+ ch_info->
+ group_idx,
+ &power_idx);
+ if (rc) {
+ IL_ERR("Invalid power idx\n");
+ return rc;
+ }
+ pwr_info->base_power_idx = (u8) power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within range of gain table */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ /* fill 1 OFDM rate's il3945_channel_power_info struct */
+ pwr_info->requested_power = pwr;
+ pwr_info->power_table_idx = (u8) power_idx;
+ pwr_info->tpc.tx_gain =
+ power_gain_table[a_band][power_idx].tx_gain;
+ pwr_info->tpc.dsp_atten =
+ power_gain_table[a_band][power_idx].dsp_atten;
+ }
+
+ /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+ pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+ power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+ pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ base_pwr_idx =
+ pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+ /* stay within table range */
+ pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+ gain = power_gain_table[a_band][pwr_idx].tx_gain;
+ dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+ /* fill each CCK rate's il3945_channel_power_info structure
+ * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
+ * NOTE: CCK rates start at end of OFDM rates! */
+ for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+ pwr_info =
+ &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+ pwr_info->requested_power = power;
+ pwr_info->power_table_idx = pwr_idx;
+ pwr_info->base_power_idx = base_pwr_idx;
+ pwr_info->tpc.tx_gain = gain;
+ pwr_info->tpc.dsp_atten = dsp_atten;
+ }
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+ int rc;
+
+ il_wr(il, FH39_RCSR_CONFIG(0), 0);
+ rc = il_poll_bit(il, FH39_RSSR_STATUS,
+ FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ if (rc < 0)
+ IL_ERR("Can't stop Rx DMA.\n");
+
+ return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+ shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+ il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+ il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+ il_wr(il, FH39_TCSR_CONFIG(txq_id),
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+ FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+ /* fake read to flush all prev. writes */
+ _il_rd(il, FH39_TSSR_CBB_BASE);
+
+ return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return sizeof(struct il3945_rxon_cmd);
+ case C_POWER_TBL:
+ return sizeof(struct il3945_powertable_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cpu_to_le16(0);
+ addsta->rate_n_flags = cmd->rate_n_flags;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+ return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int ret;
+ u8 sta_id;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret =
+ il3945_add_bssid_station(il, vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+ (il->band ==
+ IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+ RATE_1M_PLCP);
+ il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+ int rc, i, idx, prev_idx;
+ struct il3945_rate_scaling_cmd rate_cmd = {
+ .reserved = {0, 0, 0},
+ };
+ struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+ for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+ idx = il3945_rates[i].table_rs_idx;
+
+ table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
+ table[idx].try_cnt = il->retry_rate;
+ prev_idx = il3945_get_prev_ieee_rate(i);
+ table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+ }
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ D_RATE("Select A mode rate scale\n");
+ /* If one of the following CCK rates is used,
+ * have it fall back to the 6M OFDM rate */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+ /* Don't fall back to CCK rates */
+ table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+ /* Don't drop out of OFDM rates */
+ table[RATE_6M_IDX_TBL].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+ break;
+
+ case IEEE80211_BAND_2GHZ:
+ D_RATE("Select B/G mode rate scale\n");
+ /* If an OFDM rate is used, have it fall back to the
+ * 1M CCK rates */
+
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+
+ idx = IL_FIRST_CCK_RATE;
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[idx].table_rs_idx;
+
+ idx = RATE_11M_IDX_TBL;
+ /* CCK shouldn't fall back to OFDM... */
+ table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Update the rate scaling for control frame Tx */
+ rate_cmd.table_id = 0;
+ rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+ if (rc)
+ return rc;
+
+ /* Update the rate scaling for data frame Tx */
+ rate_cmd.table_id = 1;
+ return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+ memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+ il->_3945.shared_virt =
+ dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+ &il->_3945.shared_phys, GFP_KERNEL);
+ if (!il->_3945.shared_virt) {
+ IL_ERR("failed to allocate pci memory\n");
+ return -ENOMEM;
+ }
+
+ /* Assign number of Usable TX queues */
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+
+ il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ il->hw_params.max_stations = IL3945_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+ il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+ il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+ u8 rate)
+{
+ struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+ unsigned int frame_size;
+
+ tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ frame_size =
+ il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+ BUG_ON(frame_size > MAX_MPDU_SIZE);
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+ tx_beacon_cmd->tx.rate = rate;
+ tx_beacon_cmd->tx.tx_flags =
+ (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+ /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+ tx_beacon_cmd->tx.supp_rates[0] =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+ return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+ il->handlers[C_TX] = il3945_hdl_tx;
+ il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+ INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+ il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism. Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+ return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+ return;
+}
+
+ /**
+ * il3945_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int rc;
+ int i;
+ u32 done;
+ u32 reg_offset;
+
+ D_INFO("Begin load bsm\n");
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL39_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+ * NOTE: il3945_initialize_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache. */
+ pinst = il->ucode_init.p_addr;
+ pdata = il->ucode_init_data.p_addr;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ rc = il3945_verify_bsm(il);
+ if (rc)
+ return rc;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+static struct il_hcmd_ops il3945_hcmd = {
+ .rxon_assoc = il3945_send_rxon_assoc,
+ .commit_rxon = il3945_commit_rxon,
+};
+
+static struct il_lib_ops il3945_lib = {
+ .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il3945_hw_txq_free_tfd,
+ .txq_init = il3945_hw_tx_queue_init,
+ .load_ucode = il3945_load_bsm,
+ .dump_nic_error_log = il3945_dump_nic_error_log,
+ .apm_ops = {
+ .init = il3945_apm_init,
+ .config = il3945_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .acquire_semaphore = il3945_eeprom_acquire_semaphore,
+ .release_semaphore = il3945_eeprom_release_semaphore,
+ },
+ .send_tx_power = il3945_send_tx_power,
+ .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il3945_ucode_rx_stats_read,
+ .tx_stats_read = il3945_ucode_tx_stats_read,
+ .general_stats_read = il3945_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il3945_legacy_ops = {
+ .post_associate = il3945_post_associate,
+ .config_ap = il3945_config_ap,
+ .manage_ibss_station = il3945_manage_ibss_station,
+};
+
+static struct il_hcmd_utils_ops il3945_hcmd_utils = {
+ .get_hcmd_size = il3945_get_hcmd_size,
+ .build_addsta_hcmd = il3945_build_addsta_hcmd,
+ .request_scan = il3945_request_scan,
+ .post_scan = il3945_post_scan,
+};
+
+static const struct il_ops il3945_ops = {
+ .lib = &il3945_lib,
+ .hcmd = &il3945_hcmd,
+ .utils = &il3945_hcmd_utils,
+ .led = &il3945_led_ops,
+ .legacy = &il3945_legacy_ops,
+ .ieee80211_ops = &il3945_hw_ops,
+};
+
+static struct il_base_params il3945_base_params = {
+ .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+ .num_of_queues = IL39_NUM_QUEUES,
+ .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+ .set_l0s = false,
+ .use_bsm = true,
+ .led_compensation = 64,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+};
+
+static struct il_cfg il3945_bg_cfg = {
+ .name = "3945BG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+static struct il_cfg il3945_abg_cfg = {
+ .name = "3945ABG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+ {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 000000000000..9f42f79f8778
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE "iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+ u64 data;
+ s32 success_counter;
+ s32 success_ratio;
+ s32 counter;
+ s32 average_tpt;
+ unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+ spinlock_t lock;
+ struct il_priv *il;
+ s32 *expected_tpt;
+ unsigned long last_partial_flush;
+ unsigned long last_flush;
+ u32 flush_time;
+ u32 last_tx_packets;
+ u32 tx_packets;
+ u8 tgg;
+ u8 flush_pending;
+ u8 start_rate;
+ struct timer_list rate_scale_flush;
+ struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+ struct il_station_priv_common common;
+ struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+ IL_ANTENNA_DIVERSITY,
+ IL_ANTENNA_MAIN,
+ IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+#define IL_TX_FIFO_AC0 0
+#define IL_TX_FIFO_AC1 1
+#define IL_TX_FIFO_AC2 2
+#define IL_TX_FIFO_AC3 3
+#define IL_TX_FIFO_HCCA_1 5
+#define IL_TX_FIFO_HCCA_2 6
+#define IL_TX_FIFO_NONE 7
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il3945_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+#define STA_PS_STATUS_WAKE 0
+#define STA_PS_STATUS_SLEEP 1
+
+struct il3945_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+ x->u.rx_frame.stats.payload + \
+ x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+ IL_RX_HDR(x)->payload + \
+ le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int il3945_calc_db_from_ratio(int sig_ratio);
+extern void il3945_rx_replenish(void *data);
+extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr,
+ int left);
+extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
+ char **buf, bool display);
+extern void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE: The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_ <-- Called from work queue context
+ * il3945_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il3945_hw_handler_setup(struct il_priv *il);
+extern void il3945_hw_setup_deferred_work(struct il_priv *il);
+extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
+extern int il3945_hw_rxq_stop(struct il_priv *il);
+extern int il3945_hw_set_hw_params(struct il_priv *il);
+extern int il3945_hw_nic_init(struct il_priv *il);
+extern int il3945_hw_nic_stop_master(struct il_priv *il);
+extern void il3945_hw_txq_ctx_free(struct il_priv *il);
+extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
+extern int il3945_hw_nic_reset(struct il_priv *il);
+extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
+ struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset,
+ u8 pad);
+extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+extern int il3945_hw_get_temperature(struct il_priv *il);
+extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame,
+ u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id);
+extern int il3945_hw_reg_send_txpower(struct il_priv *il);
+extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+extern void il3945_disable_events(struct il_priv *il);
+extern int il4965_get_temperature(const struct il_priv *il);
+extern void il3945_post_associate(struct il_priv *il);
+extern void il3945_config_ap(struct il_priv *il);
+
+extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE: This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+
+extern struct ieee80211_ops il3945_hw_ops;
+
+extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
+extern int il3945_init_hw_rate_table(struct il_priv *il);
+extern void il3945_reg_txpower_periodic(struct il_priv *il);
+extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET 95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ * to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ * (PA) output voltage detector. This same detector is used during Tx of
+ * long packets in normal operation to provide feedback as to proper output
+ * level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+ u8 gain_idx; /* idx into power (gain) setup table ... */
+ s8 power; /* ... for this pwr level for this chnl group */
+ u16 v_det; /* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ * to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+ struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
+ s32 a, b, c, d, e; /* coefficients for voltage->power
+ * formula (signed) */
+ s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
+ * frequency (signed) */
+ s8 saturation_power; /* highest power possible by h/w in this
+ * band */
+ u8 group_channel; /* "representative" channel # in this band */
+ s16 temperature; /* h/w temperature at factory calib this band
+ * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ * difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+ u32 Ta;
+ u32 Tb;
+ u32 Tc;
+ u32 Td;
+ u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+ u8 reserved0[16];
+ u16 device_id; /* abs.ofs: 16 */
+ u8 reserved1[2];
+ u16 pmc; /* abs.ofs: 20 */
+ u8 reserved2[20];
+ u8 mac_address[6]; /* abs.ofs: 42 */
+ u8 reserved3[58];
+ u16 board_revision; /* abs.ofs: 106 */
+ u8 reserved4[11];
+ u8 board_pba_number[9]; /* abs.ofs: 119 */
+ u8 reserved5[8];
+ u16 version; /* abs.ofs: 136 */
+ u8 sku_cap; /* abs.ofs: 138 */
+ u8 leds_mode; /* abs.ofs: 139 */
+ u16 oem_mode;
+ u16 wowlan_mode; /* abs.ofs: 142 */
+ u16 leds_time_interval; /* abs.ofs: 144 */
+ u8 leds_off_time; /* abs.ofs: 146 */
+ u8 leds_on_time; /* abs.ofs: 147 */
+ u8 almgor_m_version; /* abs.ofs: 148 */
+ u8 antenna_switch_type; /* abs.ofs: 149 */
+ u8 reserved6[42];
+ u8 sku_id[4]; /* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+ u16 band_1_count; /* abs.ofs: 196 */
+ struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+ u16 band_2_count; /* abs.ofs: 226 */
+ struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+ u16 band_3_count; /* abs.ofs: 254 */
+ struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+ u16 band_4_count; /* abs.ofs: 280 */
+ struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+ u16 band_5_count; /* abs.ofs: 304 */
+ struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
+
+ u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+ struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+ struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
+ u8 reserved16[172]; /* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
+#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES 5
+#define IL39_CMD_QUEUE_NUM 4
+
+#define IL_DEFAULT_TX_RETRY 15
+
+/*********************************************/
+
+#define RFD_SIZE 4
+#define NUM_TFD_CHUNKS 4
+
+#define TFD_CTL_COUNT_SET(n) (n << 24)
+#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n) (n << 28)
+#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND (0x000000)
+#define IL39_RTC_INST_UPPER_BOUND (0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+ IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+ IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+ addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+ __le32 tx_base_ptr[8];
+} __packed;
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND (0x0800)
+#define FH39_MEM_UPPER_BOUND (0x1000)
+
+#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
+ ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+ (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+ FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+struct il3945_tfd_tb {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+struct il3945_tfd {
+ __le32 control_flags;
+ struct il3945_tfd_tb tbs[4];
+ u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877e6869..d3248e3ef23b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
+#include "common.h"
+#include "4965.h"
/*****************************************************************************
* INIT calibrations framework
*****************************************************************************/
-struct statistics_general_data {
+struct stats_general_data {
u32 beacon_silence_rssi_a;
u32 beacon_silence_rssi_b;
u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-void iwl4965_calib_free_results(struct iwl_priv *priv)
+void
+il4965_calib_free_results(struct il_priv *il)
{
int i;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ for (i = 0; i < IL_CALIB_MAX; i++) {
+ kfree(il->calib_results[i].buf);
+ il->calib_results[i].buf = NULL;
+ il->calib_results[i].buf_len = 0;
}
}
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
* enough to receive all of our own network traffic, but not so
* high that our DSP gets too busy trying to lock onto non-network
* activity/noise. */
-static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time,
- struct statistics_general_data *rx_info)
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+ struct stats_general_data *rx_info)
{
u32 max_nrg_cck = 0;
int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
data->nrg_auto_corr_silence_diff = 0;
/* Find max silence rssi among all 3 receivers.
* This is background noise, which may include transmissions from other
* networks, measured during silence before our network's beacon */
- silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
- ALL_BAND_FILTER) >> 8);
+ silence_rssi_a =
+ (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+ silence_rssi_b =
+ (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+ silence_rssi_c =
+ (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
val = max(silence_rssi_b, silence_rssi_c);
max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
val = data->nrg_silence_rssi[i];
silence_ref = max(silence_ref, val);
}
- IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
- silence_rssi_a, silence_rssi_b, silence_rssi_c,
- silence_ref);
+ D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+ silence_rssi_b, silence_rssi_c, silence_ref);
/* Find max rx energy (min value!) among all 3 receivers,
* measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
max_nrg_cck += 6;
- IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
- rx_info->beacon_energy_a, rx_info->beacon_energy_b,
- rx_info->beacon_energy_c, max_nrg_cck - 6);
+ D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+ rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+ rx_info->beacon_energy_c, max_nrg_cck - 6);
/* Count number of consecutive beacons with fewer-than-desired
* false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
data->num_in_cck_no_fa++;
else
data->num_in_cck_no_fa = 0;
- IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
- data->num_in_cck_no_fa);
+ D_CALIB("consecutive bcns with few false alarms = %u\n",
+ data->num_in_cck_no_fa);
/* If we got too many false alarms this time, reduce sensitivity */
- if ((false_alarms > max_false_alarms) &&
- (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
- false_alarms, max_false_alarms);
- IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
- data->nrg_curr_state = IWL_FA_TOO_MANY;
+ if (false_alarms > max_false_alarms &&
+ data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+ D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+ max_false_alarms);
+ D_CALIB("... reducing sensitivity\n");
+ data->nrg_curr_state = IL_FA_TOO_MANY;
/* Store for "fewer than desired" on later beacon */
data->nrg_silence_ref = silence_ref;
/* increase energy threshold (reduce nrg value)
* to decrease sensitivity */
data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
- /* Else if we got fewer than desired, increase sensitivity */
+ /* Else if we got fewer than desired, increase sensitivity */
} else if (false_alarms < min_false_alarms) {
- data->nrg_curr_state = IWL_FA_TOO_FEW;
+ data->nrg_curr_state = IL_FA_TOO_FEW;
/* Compare silence level with silence level for most recent
* healthy number or too many false alarms */
- data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
- (s32)silence_ref;
+ data->nrg_auto_corr_silence_diff =
+ (s32) data->nrg_silence_ref - (s32) silence_ref;
- IWL_DEBUG_CALIB(priv,
- "norm FA %u < min FA %u, silence diff %d\n",
- false_alarms, min_false_alarms,
- data->nrg_auto_corr_silence_diff);
+ D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+ false_alarms, min_false_alarms,
+ data->nrg_auto_corr_silence_diff);
/* Increase value to increase sensitivity, but only if:
* 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* from a previous beacon with too many, or healthy # FAs
* OR 2) We've seen a lot of beacons (100) with too few
* false alarms */
- if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
- IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+ D_CALIB("... increasing sensitivity\n");
/* Increase nrg value to increase sensitivity */
val = data->nrg_th_cck + NRG_STEP_CCK;
- data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+ data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
} else {
- IWL_DEBUG_CALIB(priv,
- "... but not changing sensitivity\n");
+ D_CALIB("... but not changing sensitivity\n");
}
- /* Else we got a healthy number of false alarms, keep status quo */
+ /* Else we got a healthy number of false alarms, keep status quo */
} else {
- IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
- data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+ D_CALIB(" FA in safe zone\n");
+ data->nrg_curr_state = IL_FA_GOOD_RANGE;
/* Store for use in "fewer than desired" with later beacon */
data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
/* If previous beacon had too many false alarms,
* give it some extra margin by reducing sensitivity again
* (but don't go below measured energy of desired Rx) */
- if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
- IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+ if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+ D_CALIB("... increasing margin\n");
if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
data->nrg_th_cck -= NRG_MARGIN;
else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* Lower value is higher energy, so we use max()!
*/
data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
- IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+ D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
data->nrg_prev_state = data->nrg_curr_state;
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
else {
val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
data->auto_corr_cck =
- min((u32)ranges->auto_corr_max_cck, val);
+ min((u32) ranges->auto_corr_max_cck, val);
}
val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- min((u32)ranges->auto_corr_max_cck_mrc, val);
- } else if ((false_alarms < min_false_alarms) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ min((u32) ranges->auto_corr_max_cck_mrc, val);
+ } else if (false_alarms < min_false_alarms &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
/* Decrease auto_corr values to increase sensitivity */
val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
- data->auto_corr_cck =
- max((u32)ranges->auto_corr_min_cck, val);
+ data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- max((u32)ranges->auto_corr_min_cck_mrc, val);
+ max((u32) ranges->auto_corr_min_cck_mrc, val);
}
return 0;
}
-
-static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time)
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
{
u32 val;
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
/* If we got too many false alarms this time, reduce sensitivity */
if (false_alarms > max_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
- false_alarms, max_false_alarms);
+ D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+ max_false_alarms);
val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- min((u32)ranges->auto_corr_max_ofdm, val);
+ min((u32) ranges->auto_corr_max_ofdm, val);
val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- min((u32)ranges->auto_corr_max_ofdm_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
}
/* Else if we got fewer than desired, increase sensitivity */
else if (false_alarms < min_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
- false_alarms, min_false_alarms);
+ D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+ min_false_alarms);
val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- max((u32)ranges->auto_corr_min_ofdm, val);
+ max((u32) ranges->auto_corr_min_ofdm, val);
val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- max((u32)ranges->auto_corr_min_ofdm_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
} else {
- IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
- min_false_alarms, false_alarms, max_false_alarms);
+ D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+ min_false_alarms, false_alarms, max_false_alarms);
}
return 0;
}
-static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
- struct iwl_sensitivity_data *data,
- __le16 *tbl)
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+ struct il_sensitivity_data *data,
+ __le16 *tbl)
{
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm);
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_x1);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
-
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck);
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck_mrc);
-
- tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_cck);
- tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_ofdm);
-
- tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
- cpu_to_le16(data->barker_corr_th_min);
- tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16(data->barker_corr_th_min_mrc);
- tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
- cpu_to_le16(data->nrg_th_cca);
-
- IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
- data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
- data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
- data->nrg_th_ofdm);
-
- IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
- data->auto_corr_cck, data->auto_corr_cck_mrc,
- data->nrg_th_cck);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck);
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+ tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+ tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+ cpu_to_le16(data->barker_corr_th_min);
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16(data->barker_corr_th_min_mrc);
+ tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+ D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+ data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+ data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+ data->nrg_th_ofdm);
+
+ D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+ data->auto_corr_cck_mrc, data->nrg_th_cck);
}
-/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
-static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
{
- struct iwl_sensitivity_cmd cmd;
- struct iwl_sensitivity_data *data = NULL;
- struct iwl_host_cmd cmd_out = {
- .id = SENSITIVITY_CMD,
- .len = sizeof(struct iwl_sensitivity_cmd),
+ struct il_sensitivity_cmd cmd;
+ struct il_sensitivity_data *data = NULL;
+ struct il_host_cmd cmd_out = {
+ .id = C_SENSITIVITY,
+ .len = sizeof(struct il_sensitivity_cmd),
.flags = CMD_ASYNC,
.data = &cmd,
};
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
memset(&cmd, 0, sizeof(cmd));
- iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+ il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
/* Update uCode's "work" table, and copy it to DSP */
- cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+ cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
/* Don't send command to uCode if nothing has changed */
- if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
- sizeof(u16)*HD_TABLE_SIZE)) {
- IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+ if (!memcmp
+ (&cmd.table[0], &(il->sensitivity_tbl[0]),
+ sizeof(u16) * HD_TBL_SIZE)) {
+ D_CALIB("No change in C_SENSITIVITY\n");
return 0;
}
/* Copy table for comparison next time */
- memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
- sizeof(u16)*HD_TABLE_SIZE);
+ memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+ sizeof(u16) * HD_TBL_SIZE);
- return iwl_legacy_send_cmd(priv, &cmd_out);
+ return il_send_cmd(il, &cmd_out);
}
-void iwl4965_init_sensitivity(struct iwl_priv *priv)
+void
+il4965_init_sensitivity(struct il_priv *il)
{
int ret = 0;
int i;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+ D_CALIB("Start il4965_init_sensitivity\n");
/* Clear driver's sensitivity algo data */
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
if (ranges == NULL)
return;
- memset(data, 0, sizeof(struct iwl_sensitivity_data));
+ memset(data, 0, sizeof(struct il_sensitivity_data));
data->num_in_cck_no_fa = 0;
- data->nrg_curr_state = IWL_FA_TOO_MANY;
- data->nrg_prev_state = IWL_FA_TOO_MANY;
+ data->nrg_curr_state = IL_FA_TOO_MANY;
+ data->nrg_prev_state = IL_FA_TOO_MANY;
data->nrg_silence_ref = 0;
data->nrg_silence_idx = 0;
data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
data->nrg_silence_rssi[i] = 0;
- data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+ data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
- data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+ data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
data->last_bad_plcp_cnt_cck = 0;
data->last_fa_cnt_cck = 0;
- ret |= iwl4965_sensitivity_write(priv);
- IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+ ret |= il4965_sensitivity_write(il);
+ D_CALIB("<<return 0x%X\n", ret);
}
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
{
u32 rx_enable_time;
u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
u32 bad_plcp_ofdm;
u32 norm_fa_ofdm;
u32 norm_fa_cck;
- struct iwl_sensitivity_data *data = NULL;
- struct statistics_rx_non_phy *rx_info;
- struct statistics_rx_phy *ofdm, *cck;
+ struct il_sensitivity_data *data = NULL;
+ struct stats_rx_non_phy *rx_info;
+ struct stats_rx_phy *ofdm, *cck;
unsigned long flags;
- struct statistics_general_data statis;
+ struct stats_general_data statis;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
- if (!iwl_legacy_is_any_associated(priv)) {
- IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+ if (!il_is_any_associated(il)) {
+ D_CALIB("<< - not associated\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
- ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
- cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+ rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+ ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+ cck = &(((struct il_notif_stats *)resp)->rx.cck);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB("<< invalid data.\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
statis.beacon_silence_rssi_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a);
+ le32_to_cpu(rx_info->beacon_silence_rssi_a);
statis.beacon_silence_rssi_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b);
+ le32_to_cpu(rx_info->beacon_silence_rssi_b);
statis.beacon_silence_rssi_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c);
- statis.beacon_energy_a =
- le32_to_cpu(rx_info->beacon_energy_a);
- statis.beacon_energy_b =
- le32_to_cpu(rx_info->beacon_energy_b);
- statis.beacon_energy_c =
- le32_to_cpu(rx_info->beacon_energy_c);
+ le32_to_cpu(rx_info->beacon_silence_rssi_c);
+ statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+ statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+ statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
- IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+ D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+ D_CALIB("<< RX Enable Time == 0!\n");
return;
}
- /* These statistics increase monotonically, and do not reset
+ /* These stats increase monotonically, and do not reset
* at each beacon. Calculate difference from last value, or just
- * use the new statistics value if it has reset or wrapped around. */
+ * use the new stats value if it has reset or wrapped around. */
if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
data->last_bad_plcp_cnt_cck = bad_plcp_cck;
else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
norm_fa_cck = fa_cck + bad_plcp_cck;
- IWL_DEBUG_CALIB(priv,
- "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
- bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+ D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
+ bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
- iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
- iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+ il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+ il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
- iwl4965_sensitivity_write(priv);
+ il4965_sensitivity_write(il);
}
-static inline u8 iwl4965_find_first_chain(u8 mask)
+static inline u8
+il4965_find_first_chain(u8 mask)
{
if (mask & ANT_A)
return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
* disconnected.
*/
static void
-iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
- struct iwl_chain_noise_data *data)
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+ struct il_chain_noise_data *data)
{
u32 active_chains = 0;
u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
u8 first_chain;
u16 i = 0;
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[0] =
+ data->chain_signal_a /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] =
+ data->chain_signal_b /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] =
+ data->chain_signal_c /
+ il->cfg->base_params->chain_noise_num_beacons;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
active_chains = (1 << max_average_sig_antenna_i);
}
- IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
- average_sig[0], average_sig[1], average_sig[2]);
- IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
- max_average_sig, max_average_sig_antenna_i);
+ D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+ average_sig[2]);
+ D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+ max_average_sig_antenna_i);
/* Compare signal strengths for all 3 receivers. */
for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
data->disconn_array[i] = 1;
else
active_chains |= (1 << i);
- IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
- "disconn_array[i] = %d\n",
- i, rssi_delta, data->disconn_array[i]);
+ D_CALIB("i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n", i, rssi_delta,
+ data->disconn_array[i]);
}
}
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= il->hw_params.valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
- * priv->hw_setting.valid_tx_ant */
+ * il->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(il->hw_params.valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == il->hw_params.tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
* connect the first valid tx chain
*/
first_chain =
- iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+ il4965_find_first_chain(il->cfg->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
- IWL_DEBUG_CALIB(priv,
- "All Tx chains are disconnected W/A - declare %d as connected\n",
- first_chain);
+ D_CALIB("All Tx chains are disconnected"
+ "- declare %d as connected\n", first_chain);
break;
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
- active_chains != priv->chain_noise_data.active_chains)
- IWL_DEBUG_CALIB(priv,
- "Detected that not all antennas are connected! "
- "Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ if (active_chains != il->hw_params.valid_rx_ant &&
+ active_chains != il->chain_noise_data.active_chains)
+ D_CALIB("Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n", active_chains,
+ il->hw_params.valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
- IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
- active_chains);
+ D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
}
-static void iwl4965_gain_computation(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+ u16 min_average_noise_antenna_i, u32 min_average_noise,
+ u8 default_chain)
{
int i, ret;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ struct il_chain_noise_data *data = &il->chain_noise_data;
data->delta_gain_code[min_average_noise_antenna_i] = 0;
for (i = default_chain; i < NUM_RX_CHAINS; i++) {
s32 delta_g = 0;
- if (!(data->disconn_array[i]) &&
- (data->delta_gain_code[i] ==
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+ if (!data->disconn_array[i] &&
+ data->delta_gain_code[i] ==
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
delta_g = average_noise[i] - min_average_noise;
- data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+ data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
data->delta_gain_code[i] =
- min(data->delta_gain_code[i],
+ min(data->delta_gain_code[i],
(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
data->delta_gain_code[i] =
- (data->delta_gain_code[i] | (1 << 2));
+ (data->delta_gain_code[i] | (1 << 2));
} else {
data->delta_gain_code[i] = 0;
}
}
- IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
- data->delta_gain_code[0],
- data->delta_gain_code[1],
- data->delta_gain_code[2]);
+ D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+ data->delta_gain_code[1], data->delta_gain_code[2]);
/* Differential gain gets sent to uCode only once */
if (!data->radio_write) {
- struct iwl_calib_diff_gain_cmd cmd;
+ struct il_calib_diff_gain_cmd cmd;
data->radio_write = 1;
memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = data->delta_gain_code[0];
cmd.diff_gain_b = data->delta_gain_code[1];
cmd.diff_gain_c = data->delta_gain_code[2];
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
+ ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
if (ret)
- IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD\n");
+ D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
/* Mark so we run this algo only once! */
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ data->state = IL_CHAIN_NOISE_CALIBRATED;
}
}
-
-
/*
- * Accumulate 16 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise stats for each of
* 3 receivers/antennas/rx-chains, then figure out:
* 1) Which antennas are connected.
* 2) Differential rx gain settings to balance the 3 receivers.
*/
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
{
- struct iwl_chain_noise_data *data = NULL;
+ struct il_chain_noise_data *data = NULL;
u32 chain_noise_a;
u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u32 chain_sig_a;
u32 chain_sig_b;
u32 chain_sig_c;
- u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
- u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+ u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u8 rxon_band24;
u8 stat_band24;
unsigned long flags;
- struct statistics_rx_non_phy *rx_info;
+ struct stats_rx_non_phy *rx_info;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct il_rxon_context *ctx = &il->ctx;
- if (priv->disable_chain_noise_cal)
+ if (il->disable_chain_noise_cal)
return;
- data = &(priv->chain_noise_data);
+ data = &(il->chain_noise_data);
/*
* Accumulate just the first "chain_noise_num_beacons" after
* the first association, then we're done forever.
*/
- if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
- if (data->state == IWL_CHAIN_NOISE_ALIVE)
- IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+ if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+ if (data->state == IL_CHAIN_NOISE_ALIVE)
+ D_CALIB("Wait for noise calib reset\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
- rx.general);
+ rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB(" << Interference data unavailable\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- stat_band24 = !!(((struct iwl_notif_statistics *)
- stat_resp)->flag &
- STATISTICS_REPLY_FLG_BAND_24G_MSK);
- stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
- stat_resp)->flag) >> 16;
+ stat_band24 =
+ !!(((struct il_notif_stats *)stat_resp)->
+ flag & STATS_REPLY_FLG_BAND_24G_MSK);
+ stat_chnum =
+ le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
/* Make sure we accumulate data for just the associated channel
* (even if scanning). */
- if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
- IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
- rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+ D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+ rxon_band24);
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
/*
- * Accumulate beacon statistics values across
+ * Accumulate beacon stats values across
* "chain_noise_num_beacons"
*/
- chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
- IN_BAND_FILTER;
- chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
- IN_BAND_FILTER;
- chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
- IN_BAND_FILTER;
+ chain_noise_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ chain_noise_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ chain_noise_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
data->beacon_count++;
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
- IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
- rxon_chnum, rxon_band24, data->beacon_count);
- IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
- chain_sig_a, chain_sig_b, chain_sig_c);
- IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
- chain_noise_a, chain_noise_b, chain_noise_c);
+ D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+ data->beacon_count);
+ D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+ chain_sig_c);
+ D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+ chain_noise_c);
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count !=
- priv->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
return;
/* Analyze signal for disconnected antenna */
- iwl4965_find_disconn_antenna(priv, average_sig, data);
+ il4965_find_disconn_antenna(il, average_sig, data);
/* Analyze noise for rx balance */
- average_noise[0] = data->chain_noise_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[1] = data->chain_noise_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[2] = data->chain_noise_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[0] =
+ data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[1] =
+ data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[2] =
+ data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
for (i = 0; i < NUM_RX_CHAINS; i++) {
- if (!(data->disconn_array[i]) &&
- (average_noise[i] <= min_average_noise)) {
+ if (!data->disconn_array[i] &&
+ average_noise[i] <= min_average_noise) {
/* This means that chain i is active and has
* lower noise values so far: */
min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
}
- IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
- average_noise[0], average_noise[1],
- average_noise[2]);
+ D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+ average_noise[1], average_noise[2]);
- IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
- min_average_noise, min_average_noise_antenna_i);
+ D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+ min_average_noise_antenna_i);
- iwl4965_gain_computation(priv, average_noise,
- min_average_noise_antenna_i, min_average_noise,
- iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+ il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+ min_average_noise,
+ il4965_find_first_chain(il->cfg->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
*/
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ if (il->cfg->ops->lib->update_chain_flags)
+ il->cfg->ops->lib->update_chain_flags(il);
- data->state = IWL_CHAIN_NOISE_DONE;
- iwl_legacy_power_update_mode(priv, false);
+ data->state = IL_CHAIN_NOISE_DONE;
+ il_power_update_mode(il, false);
}
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+void
+il4965_reset_run_time_calib(struct il_priv *il)
{
int i;
- memset(&(priv->sensitivity_data), 0,
- sizeof(struct iwl_sensitivity_data));
- memset(&(priv->chain_noise_data), 0,
- sizeof(struct iwl_chain_noise_data));
+ memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+ memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
for (i = 0; i < NUM_RX_CHAINS; i++)
- priv->chain_noise_data.delta_gain_code[i] =
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+ il->chain_noise_data.delta_gain_code[i] =
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
- /* Ask for statistics now, the uCode will send notification
+ /* Ask for stats now, the uCode will send notification
* periodically after association */
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+ il_send_stats_request(il, CMD_ASYNC, true);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 000000000000..98ec39f56ba3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = " %-30s %10u\n";
+static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+static const char *fmt_header =
+ "%-32s current cumulative delta max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+ u32 flag;
+
+ flag = le32_to_cpu(il->_4965.stats.flag);
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+ if (flag & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+ "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+ "disabled");
+
+ return p;
+}
+
+ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct stats_rx_phy) * 40 +
+ sizeof(struct stats_rx_non_phy) * 40 +
+ sizeof(struct stats_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct stats_rx_non_phy *general, *accum_general;
+ struct stats_rx_non_phy *delta_general, *max_general;
+ struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_4965.stats.rx.ofdm;
+ cck = &il->_4965.stats.rx.cck;
+ general = &il->_4965.stats.rx.general;
+ ht = &il->_4965.stats.rx.ofdm_ht;
+ accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+ accum_cck = &il->_4965.accum_stats.rx.cck;
+ accum_general = &il->_4965.accum_stats.rx.general;
+ accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+ delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+ delta_cck = &il->_4965.delta_stats.rx.cck;
+ delta_general = &il->_4965.delta_stats.rx.general;
+ delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+ max_ofdm = &il->_4965.max_delta.rx.ofdm;
+ max_cck = &il->_4965.max_delta.rx.cck;
+ max_general = &il->_4965.max_delta.rx.general;
+ max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load, delta_general->channel_load,
+ max_general->channel_load);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta_tx = &il->_4965.delta_stats.tx;
+ max_tx = &il->_4965.max_delta.tx;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+ le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct stats_general) * 10 + 300;
+ ssize_t ret;
+ struct stats_general_common *general, *accum_general;
+ struct stats_general_common *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_4965.stats.general.common;
+ dbg = &il->_4965.stats.general.common.dbg;
+ div = &il->_4965.stats.general.common.div;
+ accum_general = &il->_4965.accum_stats.general.common;
+ accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+ accum_div = &il->_4965.accum_stats.general.common.div;
+ delta_general = &il->_4965.delta_stats.general.common;
+ max_general = &il->_4965.max_delta.general.common;
+ delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+ max_dbg = &il->_4965.max_delta.general.common.dbg;
+ delta_div = &il->_4965.delta_stats.general.common.div;
+ max_div = &il->_4965.max_delta.general.common.div;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+ le32_to_cpu(general->temperature));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "wait_for_silence_timeout_count:",
+ le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+ accum_dbg->wait_for_silence_timeout_cnt,
+ delta_dbg->wait_for_silence_timeout_cnt,
+ max_dbg->wait_for_silence_timeout_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 000000000000..1667232af647
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6515 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IL_ERR("Tx flush command to flush out all frames\n");
+ if (!test_bit(S_EXIT_PENDING, &il->status))
+ queue_work(il->workqueue, &il->tx_flush);
+ }
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0;
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write idx */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size |
+ (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = il_rx_queue_alloc(il);
+ if (ret) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il4965_rx_queue_reset(il, rxq);
+
+ il4965_rx_replenish(il);
+
+ il4965_rx_init(il, rxq);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!il->txq) {
+ ret = il4965_txq_ctx_alloc(il);
+ if (ret)
+ return ret;
+ } else
+ il4965_txq_ctx_reset(il);
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("alloc_pages failed, " "order: %d\n",
+ il->hw_params.rx_page_order);
+
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to alloc_pages with %s. "
+ "Only %u free buffers remaining.\n",
+ priority ==
+ GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+ unsigned long flags;
+
+ il4965_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il4965_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+ il4965_rx_allocate(il, GFP_ATOMIC);
+
+ il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+
+ /* stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct il4965_rx_non_cfg_phy *ncphy =
+ (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc =
+ (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+ IL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |=
+ (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!il->cfg->mod_params->sw_crypto &&
+ il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct il_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * N_RX and N_RX_MPDU are handled differently.
+ * N_RX: physical layer info is in this buffer
+ * N_RX_MPDU: physical layer info was sent in separate
+ * command and cached in il->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == N_RX) {
+ phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+ header =
+ (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status =
+ *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!il->_4965.last_phy_res_valid) {
+ IL_ERR("MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &il->_4965.last_phy_res;
+ amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status =
+ il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band =
+ (phy_res->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.rate_idx =
+ il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+
+ il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+ il_dbg_log_rx_data_frame(il, len, header);
+ D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+ (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+ &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ il->_4965.last_phy_res_valid = true;
+ memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+ sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+ enum ieee80211_band band, u8 is_active,
+ u8 n_probes, struct il_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+ le32_to_cpu(scan_ch->type),
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+ passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
+{
+ int i;
+ u8 ind = *ant;
+
+ for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+ ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+ if (valid & BIT(ind)) {
+ *ant = ind;
+ return;
+ }
+ }
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il_scan_cmd *scan;
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = il->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx = il_rxon_ctx_from_vif(vif);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_any_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time =
+ (extra | ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod =
+ le32_to_cpu(il->ctx.active.
+ flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = RATE_6M_PLCP;
+ } else {
+ rate = RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = RATE_6M_PLCP;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+ * here instead of IL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ band = il->scan_band;
+
+ if (il->cfg->scan_rx_antennas[band])
+ rx_ant = il->cfg->scan_rx_antennas[band];
+
+ il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
+ rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
+ scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains =
+ rx_ant & ((u8) (il->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ D_SCAN("chain_noise_data.active_chains: %u\n",
+ il->chain_noise_data.active_chains);
+
+ rx_ant = il4965_first_antenna(active_chains);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+
+ cmd_len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |=
+ (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+ scan->channel_count =
+ il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+
+ return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return il4965_add_bssid_station(il, vif_priv->ctx,
+ vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&il->sta_lock);
+
+ if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ D_TX("free more than tfds_in_queue (%u:%d)\n",
+ il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+ il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+
+#define IL_TX_QUEUE_MSK 0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+ return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+ il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE 3
+#define IL_NUM_RX_CHAINS_SINGLE 2
+#define IL_NUM_IDLE_CHAINS_DUAL 2
+#define IL_NUM_IDLE_CHAINS_SINGLE 1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity. Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+ /* # of Rx chains to use when expecting MIMO. */
+ if (il4965_is_single_rx_stream(il))
+ return IL_NUM_RX_CHAINS_SINGLE;
+ else
+ return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (il->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+ WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+ return active_cnt;
+ }
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+ u8 res;
+ res = (chain_bitmap & BIT(0)) >> 0;
+ res += (chain_bitmap & BIT(1)) >> 1;
+ res += (chain_bitmap & BIT(2)) >> 2;
+ res += (chain_bitmap & BIT(3)) >> 3;
+ return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ bool is_single = il4965_is_single_rx_stream(il);
+ bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+ u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+ u32 active_chains;
+ u16 rx_chain;
+
+ /* Tell uCode which antennas are actually connected.
+ * Before first association, we assume all antennas are connected.
+ * Just after first association, il4965_chain_noise_calibration()
+ * checks which antennas actually *are* connected. */
+ if (il->chain_noise_data.active_chains)
+ active_chains = il->chain_noise_data.active_chains;
+ else
+ active_chains = il->hw_params.valid_rx_ant;
+
+ rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+ /* How many receivers should we use? */
+ active_rx_cnt = il4965_get_active_rx_chain_count(il);
+ idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+ /* correct rx chain count according hw settings
+ * and chain noise calibration
+ */
+ valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+ if (valid_rx_cnt < active_rx_cnt)
+ active_rx_cnt = valid_rx_cnt;
+
+ if (valid_rx_cnt < idle_rx_cnt)
+ idle_rx_cnt = valid_rx_cnt;
+
+ rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+ rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+ ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+ if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+ ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ else
+ ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+ D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+ active_rx_cnt, idle_rx_cnt);
+
+ WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+ active_rx_cnt < idle_rx_cnt);
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+ IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+ IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IL_CMD(FH49_TSSR_TX_STATUS_REG);
+ IL_CMD(FH49_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH49_RSCSR_CHNL0_WPTR,
+ FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_MEM_RSSR_SHARED_CTRL_REG,
+ FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IL_ERR("FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return 0;
+}
+
+void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ il->missed_beacon_threshold) {
+ D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(S_SCANNING, &il->status))
+ il4965_init_sensitivity(il);
+ }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+ struct stats_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ rx_info = &(il->_4965.stats.rx.general);
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+ D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+ bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ * based on the assumption of all stats counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i, size;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+ struct stats_general_common *general, *accum_general;
+ struct stats_tx *tx, *accum_tx;
+
+ prev_stats = (__le32 *) &il->_4965.stats;
+ accum_stats = (u32 *) &il->_4965.accum_stats;
+ size = sizeof(struct il_notif_stats);
+ general = &il->_4965.stats.general.common;
+ accum_general = &il->_4965.accum_stats.general.common;
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta = (u32 *) &il->_4965.delta_stats;
+ max_delta = (u32 *) &il->_4965.max_delta;
+
+ for (i = sizeof(__le32); i < size;
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ accum_general->temperature = general->temperature;
+ accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ int change;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+ change =
+ ((il->_4965.stats.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+ /* TODO: reading some of stats is unneeded */
+ memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+ set_bit(S_STATS, &il->status);
+
+ /* Reschedule the stats timer to occur in
+ * REG_RECALIB_PERIOD seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&il->stats_periodic,
+ jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+ if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ (pkt->hdr.cmd == N_STATS)) {
+ il4965_rx_calc_noise(il);
+ queue_work(il->workqueue, &il->run_time_calib_work);
+ }
+ if (il->cfg->ops->lib->temp_ops.temperature && change)
+ il->cfg->ops->lib->temp_ops.temperature(il);
+}
+
+void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_4965.accum_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.delta_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+ struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ const u8 rts_retry_limit = 60;
+ u32 rate_flags;
+ int rate_idx;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_idx = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * idx is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+ || rate_idx > RATE_COUNT_LEGACY)
+ rate_idx =
+ rate_lowest_index(&il->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = il_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up antennas */
+ il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+ rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ D_TX("tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ keyconf->keyidx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct il_station_priv *sta_priv = NULL;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ struct il_tx_cmd *tx_cmd;
+ struct il_rxon_context *ctx = &il->ctx;
+ int txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+ bool is_agg = false;
+
+ if (info->control.vif)
+ ctx = il_rxon_ctx_from_vif(info->control.vif);
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop_unlock;
+ }
+ }
+
+ D_TX("station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+ }
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = ctx->mcast_queue;
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else
+ txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+ /* irqs already disabled/saved above when locking il->lock */
+ spin_lock(&il->sta_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+ seq_number = il->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl =
+ hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+ txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(il_queue_space(q) < q->high_mark)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ il->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ il->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&il->sta_lock);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+ il_dbg_log_tx_data_frame(il, len, hdr);
+
+ il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+
+ il_update_stats(il, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
+ 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ secondlen, 0, 0);
+ }
+
+ scratch_phys =
+ txcmd_phys + sizeof(struct il_cmd_header) +
+ offsetof(struct il_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
+ le16_to_cpu(tx_cmd->
+ len));
+
+ pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ } else {
+ il_stop_queue(il, txq);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+ ptr->addr =
+ dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq) {
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+ }
+ il4965_free_dma_ptr(il, &il->kw);
+
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ il4965_hw_txq_ctx_free(il);
+
+ ret =
+ il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+ il->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IL_ERR("Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+ if (ret) {
+ IL_ERR("Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = il_alloc_txq_mem(il);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (ret) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+error:
+ il4965_hw_txq_ctx_free(il);
+ il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+ return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&il->lock, flags);
+
+ il4965_txq_set_sched(il, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (il_poll_bit
+ (il, FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
+ IL_ERR("Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ il_rd(il, FH49_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_unmap(il);
+ else
+ il_tx_queue_unmap(il, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr =
+ il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ * i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+ int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+ int ret;
+
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ /* Place first TFD at idx corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ /* Set up Tx win size and frame limit for this queue */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+ (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+ & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct il_tid_data *tid_data;
+
+ tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+ IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = il4965_txq_ctx_activate_free(il);
+ if (txq_id == -1) {
+ IL_ERR("No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue is empty\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+ il_txq_ctx_deactivate(il, txq_id);
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn;
+ struct il_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = il_sta_id(sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ tid_data = &il->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ D_HT("AGG stop before setup done\n");
+ goto turn_off;
+ case IL_AGG_ON:
+ break;
+ default:
+ IL_WARN("Stopping AGG while state not ON or starting\n");
+ }
+
+ write_ptr = il->txq[txq_id].q.write_ptr;
+ read_ptr = il->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ D_HT("Stopping a non empty AGG HW QUEUE\n");
+ il->stations[sta_id].tid[tid].agg.state =
+ IL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ D_HT("HW queue is empty\n");
+turn_off:
+ il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&il->sta_lock);
+ spin_lock(&il->lock);
+
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+ struct il_queue *q = &il->txq[txq_id].q;
+ u8 *addr = il->stations[sta_id].sta.sta.addr;
+ struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+ struct il_rxon_context *ctx;
+
+ ctx = &il->ctx;
+
+ lockdep_assert_held(&il->sta_lock);
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if (txq_id == tid_data->agg.txq_id &&
+ q->read_ptr == q->write_ptr) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+ D_HT("HW queue empty: continue DELBA flow\n");
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+ tid_data->agg.state = IL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr1)
+{
+ struct ieee80211_sta *sta;
+ struct il_station_priv *sta_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ctx->vif, addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(il->hw, sta, false);
+ }
+ rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+
+ if (!is_agg)
+ il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+
+ if (WARN_ON_ONCE(tx_info->skb == NULL))
+ continue;
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+
+ il4965_tx_status(il, tx_info,
+ txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+ tx_info->skb = NULL;
+
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+ struct il_compressed_ba_resp *ba_resp)
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+ u64 bitmap, sent_bitmap;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IL_ERR("Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx win bits */
+ sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ if (agg->frame_count > (64 - sh)) {
+ D_TX_REPLY("more frames than bitmap size");
+ return -1;
+ }
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+ i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
+
+ D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+ info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct il_tx_queue *txq = NULL;
+ struct il_ht_agg *agg;
+ int idx;
+ int sta_id;
+ int tid;
+ unsigned long flags;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx win, corresponds to idx
+ * (in Tx queue's circular buffer) of first TFD/frame in win */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= il->hw_params.max_txq_num) {
+ IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &il->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &il->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now!
+ * since it is possible happen very often and in order
+ * not to fill the syslog, don't enable the logging by default
+ */
+ D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
+
+ /* Find idx just before block-ack win */
+ idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+ agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+ "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow, ba_resp->scd_ssn);
+ D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in win */
+ il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack win (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+ if (il_queue_space(&txq->q) > txq->q.low_mark &&
+ il->mac80211_registered &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+
+ il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+ int i, r;
+ struct il_link_quality_cmd *link_cmd;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IL_ERR("Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (il->band == IEEE80211_BAND_5GHZ)
+ r = RATE_6M_IDX;
+ else
+ r = RATE_1M_IDX;
+
+ if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |=
+ il4965_first_antenna(il->hw_params.
+ valid_tx_ant) << RATE_MCS_ANT_POS;
+ rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IL_ERR("Link quality command failed (%d)\n", ret);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct il_wep_cmd) +
+ sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+ struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct il_wep_cmd);
+ struct il_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = wep_cmd,
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0,
+ cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX; i++) {
+ wep_cmd->key[i].key_idx = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return il_send_cmd(il, &cmd);
+ else
+ return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ lockdep_assert_held(&il->mutex);
+
+ return il4965_static_wepkey_cmd(il, ctx, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (il_is_rfkill(il)) {
+ D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = il4965_static_wepkey_cmd(il, ctx, 1);
+ D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = HW_KEY_DEFAULT;
+ il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = il4965_static_wepkey_cmd(il, ctx, false);
+ D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+ keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = 16;
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+
+ /* This copy is acutally not needed: we get the key with each TX */
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ u8 sta_id;
+ unsigned long flags;
+ int i;
+
+ if (il_scan_cancel(il)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+ for (i = 0; i < 5; i++)
+ il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+ cpu_to_le16(phase1key[i]);
+
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ u16 key_flags;
+ u8 keyidx;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys--;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+ keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+ D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+ if (keyconf->keyidx != keyidx) {
+ /* We need to remove a key with idx different that the one
+ * in the uCode. This means that the key we need to remove has
+ * been replaced by another one with different idx.
+ * Don't do anything and return ok
+ */
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+ key_flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (!test_and_clear_bit
+ (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+ IL_ERR("idx %d not used in uCode key table.\n",
+ il->stations[sta_id].sta.key.key_offset);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ if (il_is_rfkill(il)) {
+ D_WEP
+ ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys++;
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret =
+ il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret =
+ il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+ keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR
+ ("Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ unsigned long flags;
+ struct il_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (il->stations[sta_id].lq)
+ kfree(il->stations[sta_id].lq);
+ else
+ D_INFO("Bcast sta rate scaling has not been initialized.\n");
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+ return il4965_update_bcast_station(il, &il->ctx);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+ u16 ssn)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.sta.modify_mask =
+ STA_MODIFY_SLEEP_TX_COUNT_MSK;
+ il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+ if (il->cfg->ops->hcmd->set_rxon_chain) {
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
+ il_commit_rxon(il, &il->ctx);
+ }
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+ struct il_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+ struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+ u32 frame_size)
+{
+ u16 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /*
+ * The idx is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx + 1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+ tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+ } else
+ IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+ struct il_tx_beacon_cmd *tx_beacon_cmd;
+ u32 frame_size;
+ u32 rate_flags;
+ u32 rate;
+ /*
+ * We have to set up the TX command, the TX Beacon command, and the
+ * beacon contents.
+ */
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("trying to build beacon w/o beacon context!\n");
+ return 0;
+ }
+
+ /* Initialize memory */
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ /* Set up TX beacon contents */
+ frame_size =
+ il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+ if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+ return 0;
+ if (!frame_size)
+ return 0;
+
+ /* Set up TX command fields */
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+ tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ tx_beacon_cmd->tx.tx_flags =
+ TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+ TX_CMD_FLG_STA_RATE_MSK;
+
+ /* Set up TX beacon command fields */
+ il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+ frame_size);
+
+ /* Set up packet rate and flags */
+ rate = il_get_lowest_plcp(il, il->beacon_ctx);
+ il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+ rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+ if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+ tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+ struct il_frame *frame;
+ unsigned int frame_size;
+ int rc;
+
+ frame = il4965_get_free_frame(il);
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ frame_size = il4965_hw_get_beacon_cmd(il, frame);
+ if (!frame_size) {
+ IL_ERR("Error configuring the beacon command\n");
+ il4965_free_frame(il, frame);
+ return -EINVAL;
+ }
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il4965_free_frame(il, frame);
+
+ return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+ 16;
+
+ return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+ struct il_tfd *tfd;
+ struct pci_dev *dev = il->pci_dev;
+ int idx = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[idx];
+
+ /* Sanity check on number of chunks */
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+ il4965_tfd_tb_get_len(tfd, i),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ struct il_queue *q;
+ struct il_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = (struct il_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ IL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IL_TX_DMA_MASK))
+ IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+ il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ /* Circular buffer (TFD queue in DRAM) physical base address */
+ il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_init_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last N_STATS
+ * was received. We need to ensure we receive the stats in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* dont send host command if rf-kill is on */
+ if (!il_is_ready_rf(il))
+ return;
+
+ il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il4965_beacon_notif *beacon =
+ (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+ unsigned long flags;
+
+ D_POWER("Stop all queues\n");
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ spin_lock_irqsave(&il->reg_lock, flags);
+ if (!_il_grab_nic_access(il))
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ il_wr(il, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ }
+
+ if (flags & CT_CARD_DISABLED)
+ il4965_perform_ct_kill_task(il);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ if (!(flags & RXON_CARD_DISABLED))
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il4965_hdl_alive;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il4965_hdl_c_stats;
+ il->handlers[N_STATS] = il4965_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+
+ /* status change handler */
+ il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+ il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+ /* Rx handlers */
+ il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+ il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+ /* block ack */
+ il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+ /* Set up hardware specific Rx handlers */
+ il->cfg->ops->lib->handler_setup(il);
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
+ (pkt->hdr.cmd != N_RX_MPDU) &&
+ (pkt->hdr.cmd != N_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il4965_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il4965_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il4965_rx_replenish_now(il);
+ else
+ il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR49_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR49_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!
+ (_il_rd(il, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IL_WARN("RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ il->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(S_ALIVE, &il->status)) {
+ if (hw_rf_kill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IL_ERR("Microcode CT kill error detected.\n");
+ il->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /*
+ * uCode wakes up after power-down sleep.
+ * Tell device about any new tx or host commands enqueued,
+ * and about any Rx buffers made available while asleep.
+ */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ for (i = 0; i < il->hw_params.max_txq_num; i++)
+ il_txq_update_write_ptr(il, &il->txq[i]);
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il4965_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("uCode load interrupt\n");
+ il->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ il->ucode_write_complete = 1;
+ wake_up(&il->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(il->inta_mask)) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_ERR("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
+ il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_ready_rf(il))
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ IL_INFO("%s is not in decimal form.\n", buf);
+ else {
+ ret = il_set_tx_power(il, val, false);
+ if (ret)
+ IL_ERR("failed setting tx power (0x%d).\n", ret);
+ else
+ ret = count;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
+ il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+ void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+ const char *name_pre = il->cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+ il->fw_idx = il->cfg->ucode_api_max;
+ sprintf(tag, "%d", il->fw_idx);
+ } else {
+ il->fw_idx--;
+ sprintf(tag, "%d", il->fw_idx);
+ }
+
+ if (il->fw_idx < il->cfg->ucode_api_min) {
+ IL_ERR("no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+ &il->pci_dev->dev, GFP_KERNEL, il,
+ il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+ struct il4965_firmware_pieces *pieces)
+{
+ struct il_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IL_ERR("File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+ src = ucode->v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ hdr_size + pieces->inst_size + pieces->data_size +
+ pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+ IL_ERR("uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct il_priv *il = context;
+ struct il_ucode_header *ucode;
+ int err;
+ struct il4965_firmware_pieces pieces;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ u32 api_ver;
+
+ u32 max_probe_length = 200;
+ u32 standard_phy_calibration_size =
+ IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (il->fw_idx <= il->cfg->ucode_api_max)
+ IL_ERR("request for firmware file '%s' failed.\n",
+ il->firmware_name);
+ goto try_again;
+ }
+
+ D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+ ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IL_ERR("File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ goto try_again;
+ }
+
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected v%u, "
+ "got v%u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
+ D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
+ D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
+ D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
+ D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ if (pieces.boot_size > il->hw_params.max_bsm_size) {
+ IL_ERR("uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = pieces.inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ il->ucode_init.len = pieces.init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = pieces.init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (pieces.boot_size) {
+ il->ucode_boot.len = pieces.boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ D_INFO("Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in il_up()
+ */
+ D_INFO("Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
+ D_INFO("Copying (but not loading) init instr len %Zd\n",
+ pieces.init_size);
+ memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+ }
+
+ /* Initialization data */
+ if (pieces.init_data_size) {
+ D_INFO("Copying (but not loading) init data len %Zd\n",
+ pieces.init_data_size);
+ memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
+ }
+
+ /* Bootstrap instructions */
+ D_INFO("Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ il->_4965.phy_calib_chain_noise_reset_cmd =
+ standard_phy_calibration_size;
+ il->_4965.phy_calib_chain_noise_gain_cmd =
+ standard_phy_calibration_size + 1;
+
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = il4965_mac_setup_register(il, max_probe_length);
+ if (err)
+ goto out_unbind;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_unbind;
+ }
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&il->_4965.firmware_loading_complete);
+ return;
+
+try_again:
+ /* try next, if any */
+ if (il4965_request_firmware(il, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ il4965_dealloc_ucode_pci(il);
+out_unbind:
+ complete(&il->_4965.firmware_loading_complete);
+ device_release_driver(&il->pci_dev->dev);
+ release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STBL",
+ "FH49_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT",
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct {
+ char *name;
+ u8 num;
+} advanced_lookup[] = {
+ {
+ "NMI_INTERRUPT_WDG", 0x34}, {
+ "SYSASSERT", 0x35}, {
+ "UCODE_VERSION_MISMATCH", 0x37}, {
+ "BAD_COMMAND", 0x38}, {
+ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+ "FATAL_ERROR", 0x3D}, {
+ "NMI_TRM_HW_ERR", 0x46}, {
+ "NMI_INTERRUPT_TRM", 0x4C}, {
+ "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+ "NMI_INTERRUPT_HOST", 0x66}, {
+ "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+ "NMI_INTERRUPT_UNKNOWN", 0x84}, {
+ "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
+
+ if (il->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+ base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+ il->isr_stats.err_code = desc;
+ pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+ data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+ line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+ time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+ hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+ IL_ERR("Desc Time "
+ "data1 data2 line\n");
+ IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+ il4965_desc_lookup(desc), desc, time, data1, data2, line);
+ IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+ blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+ struct il_ct_kill_config cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&il->lock, flags);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ cmd.critical_temperature_R =
+ cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+ ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+ if (ret)
+ IL_ERR("C_CT_KILL_CONFIG failed\n");
+ else
+ D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+ "critical temperature is %d\n",
+ il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+ IL49_CMD_FIFO_NUM,
+ IL_TX_FIFO_UNUSED,
+ IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Clear 4965's internal Tx Scheduler data base */
+ il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+ a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (;
+ a <
+ il->scd_base_addr +
+ IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+ a += 4)
+ il_write_targ_mem(il, a, 0);
+
+ /* Tel 4965 where to find Tx byte count tables */
+ il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+ il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+ reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Disable chain mode for all queues */
+ il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+ /* Initialize each Tx queue (including the command queue) */
+ for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+ /* TFD circular buffer read/write idxes */
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+ il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+ /* Max Tx Window size for Scheduler-ACK mode */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+ (SCD_WIN_SIZE <<
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ /* Frame limit */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ }
+ il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+ (1 << il->hw_params.max_txq_num) - 1);
+
+ /* Activate all Tx DMA/FIFO channels */
+ il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+ il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&il->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ il->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+
+ il_txq_ctx_activate(il, i);
+
+ if (ac == IL_TX_FIFO_UNUSED)
+ continue;
+
+ il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+ int ret = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ ret = il4965_alive_notify(il);
+ if (ret) {
+ IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+ goto restart;
+ }
+
+ /* After the ALIVE response, we can send host commands to the uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK;
+
+ if (il_is_associated_ctx(ctx)) {
+ struct il_rxon_cmd *active_rxon =
+ (struct il_rxon_cmd *)&ctx->active;
+ /* apply any changes in staging */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, &il->ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ /* Configure bluetooth coexistence if enabled */
+ il_send_bt_config(il);
+
+ il4965_reset_run_time_calib(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il_commit_rxon(il, ctx);
+
+ /* At this point, the NIC is initialized and operational */
+ il4965_rf_kill_ct_config(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il4965_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il_init() then
+ * clear all bits but the RF Kill bit and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il4965_txq_ctx_stop(il);
+ il4965_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il4965_down(il);
+ mutex_unlock(&il->mutex);
+
+ il4965_cancel_deferred_work(il);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int
+il4965_set_hw_ready(struct il_priv *il)
+{
+ int ret = 0;
+
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ il->hw_ready = true;
+ else
+ il->hw_ready = false;
+
+ D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int
+il4965_prepare_card_hw(struct il_priv *il)
+{
+ int ret = 0;
+
+ D_INFO("il4965_prepare_card_hw enter\n");
+
+ ret = il4965_set_hw_ready(il);
+ if (il->hw_ready)
+ return ret;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ /* HW should be ready by now, check again. */
+ if (ret != -ETIMEDOUT)
+ il4965_set_hw_ready(il);
+
+ return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+ int i;
+ int ret;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
+ ret = il4965_alloc_bcast_station(il, &il->ctx);
+ if (ret) {
+ il_dealloc_bcast_stations(il);
+ return ret;
+ }
+
+ il4965_prepare_card_hw(il);
+
+ if (!il->hw_ready) {
+ IL_WARN("Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ if (il_is_rfkill(il)) {
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+ il_enable_interrupts(il);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return 0;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ /* must be initialised before il_hw_nic_init */
+ il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+ ret = il4965_hw_nic_init(il);
+ if (ret) {
+ IL_ERR("Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ ret = il->cfg->ops->lib->load_ucode(il);
+
+ if (ret) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il4965_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il4965_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il->cfg->ops->lib->init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il4965_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ run_time_calib_work);
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (il->start_calib) {
+ il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+ il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+ }
+
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+
+ __il4965_down(il);
+
+ mutex_unlock(&il->mutex);
+ il4965_cancel_deferred_work(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il4965_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il4965_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il4965_rx_replenish(il);
+ mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-4965-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags =
+ IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (il->cfg->sku & IL_SKU_N)
+ hw->flags |=
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ hw->sta_data_size = sizeof(struct il_station_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ hw->wiphy->interface_modes |= il->ctx.interface_modes;
+ hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+ * For now, disable PS by default because it affects
+ * RX performance significantly.
+ */
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+ ret = __il4965_up(il);
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ return ret;
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ D_INFO("Start UP work done.\n");
+
+ /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ return -ETIMEDOUT;
+ }
+ }
+
+ il4965_led_enable(il);
+
+out:
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open)
+ return;
+
+ il->is_open = 0;
+
+ il4965_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_rfkill_int(il);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MACDUMP("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il4965_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ D_MAC80211("enter\n");
+
+ il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
+ phase1key);
+
+ D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct il_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+ bool is_default_wep_key = false;
+
+ D_MAC80211("enter\n");
+
+ if (il->cfg->mod_params->sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ (key->hw_key_idx == HW_KEY_DEFAULT);
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key)
+ ret =
+ il4965_set_default_wep_key(il, vif_priv->ctx, key);
+ else
+ ret =
+ il4965_set_dynamic_key(il, vif_priv->ctx, key,
+ sta_id);
+
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = il4965_remove_default_wep_key(il, ctx, key);
+ else
+ ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size)
+{
+ struct il_priv *il = hw->priv;
+ int ret = -EINVAL;
+
+ D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+ if (!(il->cfg->sku & IL_SKU_N))
+ return -EACCES;
+
+ mutex_lock(&il->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ D_HT("start Rx\n");
+ ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ D_HT("stop Rx\n");
+ ret = il4965_sta_rx_agg_stop(il, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ D_HT("start Tx\n");
+ ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ D_HT("stop Tx\n");
+ ret = il4965_tx_agg_stop(il, vif, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+
+ ret =
+ il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
+ &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il4965_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 ch;
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status) ||
+ test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ goto out;
+
+ if (!il_is_associated_ctx(ctx))
+ goto out;
+
+ if (!il->cfg->ops->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&il->lock);
+
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&il->lock);
+
+ il_set_rate(il);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = cpu_to_le16(ch);
+ if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+ clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ il->ctx.staging.filter_flags &= ~filter_nand;
+ il->ctx.staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ txpower_work);
+
+ mutex_lock(&il->mutex);
+
+ /* If a scan happened to start before we got here
+ * then just return; the stats notification will
+ * kick off another scheduled work to compensate for
+ * any temperature delta we missed here. */
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status))
+ goto out;
+
+ /* Regardless of if we are associated, we must reconfigure the
+ * TX power since frames can be sent on non-radar channels while
+ * not associated */
+ il->cfg->ops->lib->send_tx_power(il);
+
+ /* Update last_temperature to keep is_calib_needed from running
+ * when it isn't needed... */
+ il->last_temperature = il->temperature;
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il4965_bg_restart);
+ INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+ INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+ INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+ il_setup_scan_deferred_work(il);
+
+ INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+ init_timer(&il->stats_periodic);
+ il->stats_periodic.data = (unsigned long)il;
+ il->stats_periodic.function = il4965_bg_stats_periodic;
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il4965_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->txpower_work);
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+ cancel_work_sync(&il->run_time_calib_work);
+
+ il_cancel_scan_deferred_work(il);
+
+ del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il_rates[i].plcp ==
+ RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+ il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+
+ /* Find out whether to activate Tx queue */
+ int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+ /* Set up and activate */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+ IL49_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+ int ret;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ /* Choose which receivers/antennas to use */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+
+ il_init_scan_params(il);
+
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il4965_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+ il4965_calib_free_results(il);
+ il_free_geos(il);
+ il_free_channel_map(il);
+ kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+ il->hw_rev = _il_rd(il, CSR_HW_REV);
+ il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+ il->rev_id = il->pci_dev->revision;
+ D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static int
+il4965_set_hw_params(struct il_priv *il)
+{
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ if (il->cfg->mod_params->amsdu_size_8K)
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+ else
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+ il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+ if (il->cfg->mod_params->disable_11n)
+ il->cfg->sku &= ~IL_SKU_N;
+
+ /* Device-specific setup */
+ return il->cfg->ops->lib->set_hw_params(il);
+}
+
+static const u8 il4965_bss_ac_to_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+};
+
+static const u8 il4965_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ unsigned long flags;
+ u16 pci_cmd;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+
+ hw = il_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ /* At this point both hw and il are allocated. */
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.always_active = true;
+ il->ctx.is_active = true;
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
+ il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
+ il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
+ il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /**************************
+ * 2. Initializing PCI bus
+ **************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err =
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ pci_set_drvdata(pdev, il);
+
+ /***********************
+ * 3. Read REV register
+ ***********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ il4965_hw_detect(il);
+ IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il4965_prepare_card_hw(il);
+ if (!il->hw_ready) {
+ IL_WARN("Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
+ /*****************
+ * 4. Read EEPROM
+ *****************/
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ err = il4965_eeprom_check_version(il);
+ if (err)
+ goto out_free_eeprom;
+
+ if (err)
+ goto out_free_eeprom;
+
+ /* extract MAC Address */
+ il4965_eeprom_get_mac(il, il->addresses[0].addr);
+ D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+ il->hw->wiphy->addresses = il->addresses;
+ il->hw->wiphy->n_addresses = 1;
+
+ /************************
+ * 5. Setup HW constants
+ ************************/
+ if (il4965_set_hw_params(il)) {
+ IL_ERR("failed to set hw parameters\n");
+ goto out_free_eeprom;
+ }
+
+ /*******************
+ * 6. Setup il
+ *******************/
+
+ err = il4965_init_drv(il);
+ if (err)
+ goto out_free_eeprom;
+ /* At this point both hw and il are initialized. */
+
+ /********************
+ * 7. Setup services
+ ********************/
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ il4965_setup_deferred_work(il);
+ il4965_setup_handlers(il);
+
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
+
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+ }
+
+ il_enable_rfkill_int(il);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+
+ il_power_initialize(il);
+
+ init_completion(&il->_4965.firmware_loading_complete);
+
+ err = il4965_request_firmware(il, true);
+ if (err)
+ goto out_destroy_workqueue;
+
+ return 0;
+
+out_destroy_workqueue:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il4965_uninit_drv(il);
+out_free_eeprom:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il4965_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ wait_for_completion(&il->_4965.firmware_loading_complete);
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+ sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+ /* ieee80211_unregister_hw call wil cause il_mac_stop to
+ * to be called and il4965_down since we are removing the device
+ * we need to set S_EXIT_PENDING bit.
+ */
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il4965_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il4965_down(), but there are paths to
+ * run il4965_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il4965_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_synchronize_irq(il);
+
+ il4965_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il4965_rx_queue_free(il, &il->rxq);
+ il4965_hw_txq_ctx_free(il);
+
+ il_eeprom_free(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(il->pci_dev->irq, il);
+ pci_disable_msi(il->pci_dev);
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il4965_uninit_drv(il);
+
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+ il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+ {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+ .name = DRV_NAME,
+ .id_table = il4965_hw_card_ids,
+ .probe = il4965_pci_probe,
+ .remove = __devexit_p(il4965_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il4965_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il4965_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il4965_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+ pci_unregister_driver(&il4965_driver);
+ il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
+ S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 000000000000..467d0cb14ecd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY 1
+#define IL_HT_NUMBER_TRY 3
+
+#define RATE_MAX_WINDOW 62 /* # tx in history win */
+#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX,
+ RATE_6M_IDX, RATE_9M_IDX,
+ RATE_12M_IDX, RATE_18M_IDX,
+ RATE_24M_IDX, RATE_36M_IDX,
+ RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_SISO_##s##M_PLCP, \
+ RATE_MIMO2_##s##M_PLCP,\
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+ IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+
+ if (idx >= RATE_MIMO2_6M_PLCP)
+ idx = idx - RATE_MIMO2_6M_PLCP;
+
+ idx += IL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht */
+ if (idx >= RATE_9M_IDX)
+ idx += 1;
+ if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+ struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+ bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+ u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
+ {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+ {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+ {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+ {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+ {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+ {"1", "BPSK DSSS"},
+ {"2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ {"11", "QPSK CCK"},
+ {"6", "BPSK 1/2"},
+ {"9", "BPSK 1/2"},
+ {"12", "QPSK 1/2"},
+ {"18", "QPSK 3/4"},
+ {"24", "16QAM 1/2"},
+ {"36", "16QAM 3/4"},
+ {"48", "64QAM 2/3"},
+ {"54", "64QAM 3/4"},
+ {"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM (8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+ return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = IL_INVALID_VALUE;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the stats. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count && tl->time_stamp < oldest_time) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else
+ return MAX_TID_COUNT;
+
+ if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ return MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[idx] = tl->packet_count[idx] + 1;
+ tl->total = tl->total + 1;
+
+ if ((idx + 1) > tl->queue_count)
+ tl->queue_count = idx + 1;
+
+ return tid;
+}
+
+/*
+ get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+
+ if (tid >= TID_MAX_LOAD_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+ u8 tid, struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = il4965_rs_tl_get_load(lq_data, tid);
+
+ if (load > IL_AGG_LOAD_THRESHOLD) {
+ D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else
+ D_HT("Aggregation not enabled for tid %d because load = %u\n",
+ tid, load);
+
+ return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < TID_MAX_LOAD_COUNT)
+ il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+ else
+ IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+ TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_idx];
+ return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+ int attempts, int successes)
+{
+ struct il_rate_scale_data *win = NULL;
+ static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+ return -EINVAL;
+
+ /* Select win for current tx bit rate */
+ win = &(tbl->win[scale_idx]);
+
+ /* Get expected throughput */
+ tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & mask) {
+ win->data &= ~mask;
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ win->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+ int idx, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = il_rates[idx].plcp;
+ if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+
+ } else if (is_Ht(tbl->lq_type)) {
+ if (idx > IL_LAST_OFDM_RATE) {
+ IL_ERR("Invalid HT rate idx %d\n", idx);
+ idx = IL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= il_rates[idx].plcp_siso;
+ else
+ rate_n_flags |= il_rates[idx].plcp_mimo2;
+ } else {
+ IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |=
+ ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40) {
+ if (tbl->is_dup)
+ rate_n_flags |= RATE_MCS_DUP_MSK;
+ else
+ rate_n_flags |= RATE_MCS_HT40_MSK;
+ }
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IL_ERR("GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 il4965_num_of_ant =
+ il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+ *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->is_dup = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (il4965_num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+ (rate_n_flags & RATE_MCS_DUP_MSK))
+ tbl->is_ht40 = 1;
+
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ tbl->is_dup = 1;
+
+ mcs = il4965_rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= RATE_SISO_60M_PLCP) {
+ if (il4965_num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE */
+ /* MIMO2 */
+ } else {
+ if (il4965_num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct il_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while (new_ant_type != tbl->ant_type &&
+ !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct ieee80211_sta *sta)
+{
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum il_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else
+ return lq_sta->active_mimo2_rate;
+ }
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ low = il_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ high = il_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, u8 scale_idx,
+ u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct il_priv *il = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+ switch_to_legacy = 1;
+ scale_idx = rs_ht_to_legacy[scale_idx];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (il4965_num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ }
+
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+ low = scale_idx;
+ goto out;
+ }
+
+ high_low =
+ il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == RATE_INVALID)
+ low = scale_idx;
+
+out:
+ return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+ struct il_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+ a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_idx, mac_idx, i;
+ struct il_lq_sta *lq_sta = il_sta;
+ struct il_link_quality_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct il_priv *il = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct il_scale_tbl_info tbl_type;
+ struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("get frame ack response, update rate scale win\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ D_RATE("Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+ if (il->band == IEEE80211_BAND_5GHZ)
+ rs_idx -= IL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_idx = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+ if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+ mac_idx++;
+ /*
+ * mac80211 HT idx is always zero-idxed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (il->band == IEEE80211_BAND_2GHZ)
+ mac_idx += IL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if (mac_idx < 0 ||
+ tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+ tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+ tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+ tbl_type.ant_type != info->antenna_sel_tx ||
+ !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+ || !!(tx_rate & RATE_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+ D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+ rs_idx, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (il4965_table_type_matches
+ (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else
+ if (il4965_table_type_matches
+ (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ D_RATE("Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+ tbl_type.ant_type, tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ il4965_rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first idx into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+ &rs_idx);
+ il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed +=
+ (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+ &tbl_type, &rs_idx);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (il4965_table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (il4965_table_type_matches
+ (&tbl_type, other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+ i <
+ retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[sband->band])
+ il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+ struct il_lq_sta *lq_sta)
+{
+ D_RATE("we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32(*ht_tbl_pointer)[RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 idx)
+{
+ /* "active" values */
+ struct il_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[idx].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[idx];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = idx;
+
+ new_rate = high = low = start_hi = RATE_INVALID;
+
+ for (;;) {
+ high_low =
+ il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+ (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+ && tpt_tbl[rate] <= active_tpt)) ||
+ (active_sr >= RATE_SCALE_SWITCH &&
+ tpt_tbl[rate] > active_tpt)) {
+
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
+ WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (il->hw_params.tx_chains_num < 2)
+ return -1;
+
+ D_RATE("LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ D_RATE("LQ: try to switch to SISO\n");
+
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("can not switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ int ret = 0;
+ u8 update_search_tbl_counter = 0;
+
+ tbl->action = IL_LEGACY_SWITCH_SISO;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_LEGACY_SWITCH_ANTENNA1:
+ case IL_LEGACY_SWITCH_ANTENNA2:
+ D_RATE("LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ il4965_rs_set_expected_tpt_table(lq_sta,
+ search_tbl);
+ goto out;
+ }
+ break;
+ case IL_LEGACY_SWITCH_SISO:
+ D_RATE("LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IL_LEGACY_SWITCH_MIMO2_AB:
+ case IL_LEGACY_SWITCH_MIMO2_AC:
+ case IL_LEGACY_SWITCH_MIMO2_BC:
+ D_RATE("LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ u8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_SISO_SWITCH_ANTENNA1:
+ case IL_SISO_SWITCH_ANTENNA2:
+ D_RATE("LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_SISO_SWITCH_MIMO2_AB:
+ case IL_SISO_SWITCH_MIMO2_AC:
+ case IL_SISO_SWITCH_MIMO2_BC:
+ D_RATE("LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+ break;
+ case IL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IL_ERR("SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ s8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_MIMO2_SWITCH_ANTENNA1:
+ case IL_MIMO2_SWITCH_ANTENNA2:
+ D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_MIMO2_SWITCH_SISO_A:
+ case IL_MIMO2_SWITCH_SISO_B:
+ case IL_MIMO2_SWITCH_SISO_C:
+ D_RATE("LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ }
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+ struct il_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct il_priv *il;
+
+ il = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ lq_sta->total_failed > lq_sta->max_failure_limit ||
+ lq_sta->total_success > lq_sta->max_success_limit ||
+ (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+ flush_interval_passed)) {
+ D_RATE("LQ: stay is expired %d %d %d\n:",
+ lq_sta->total_failed, lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >= lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ D_RATE("LQ: stay in table clear win\n");
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&
+ (tbl->
+ win
+ [i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ il4965_rs_fill_link_cmd(il, lq_sta, rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = RATE_INVALID;
+ int high = RATE_INVALID;
+ int idx;
+ int i;
+ struct il_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct il_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_idx_msk = 0;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = MAX_TID_COUNT;
+ struct il_tid_data *tid_data;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+ if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else
+ lq_sta->is_agg = 0;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ idx = lq_sta->last_txrate_idx;
+
+ D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ D_RATE("mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_idx_msk =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_scale_idx_msk =
+ (u16) (rate_mask & lq_sta->supp_rates);
+
+ } else
+ rate_scale_idx_msk = rate_mask;
+
+ if (!rate_scale_idx_msk)
+ rate_scale_idx_msk = rate_mask;
+
+ if (!((1 << idx) & rate_scale_idx_msk)) {
+ IL_ERR("Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid */
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history win for current rate */
+ if (!tbl->expected_tpt) {
+ IL_ERR("tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+ idx = lq_sta->max_rate_idx;
+ update_lq = 1;
+ win = &(tbl->win[idx]);
+ goto lq_update;
+ }
+
+ win = &(tbl->win[idx]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = win->counter - win->success_counter;
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+ win->success_counter, win->counter, idx);
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (win->average_tpt !=
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+ IL_ERR("expected_tpt should have been calculated by now\n");
+ win->average_tpt =
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (win->average_tpt > lq_sta->last_tpt) {
+
+ D_RATE("LQ: SWITCHING TO NEW TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = win->average_tpt;
+
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+
+ D_RATE("LQ: GOING BACK TO THE OLD TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low =
+ il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+ high = RATE_INVALID;
+
+ sr = win->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = win->average_tpt;
+ if (low != RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+ low_tpt < current_tpt && high_tpt < current_tpt)
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else
+ scale_action = 0;
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != RATE_INVALID) {
+ update_lq = 1;
+ idx = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != RATE_INVALID) {
+ update_lq = 1;
+ idx = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+ idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+ /* Save current throughput to compare with "search" throughput */
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+ else if (is_siso(tbl->lq_type))
+ il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+ idx);
+ else /* (is_mimo2(tbl->lq_type)) */
+ il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+ idx);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+ D_RATE("Switch current mcs: %X idx: %d\n",
+ tbl->current_rate, idx);
+ il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ } else
+ done_search = 1;
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+ lq_sta->action_counter > tbl1->max_search) {
+ D_RATE("LQ: STAY in legacy table\n");
+ il4965_rs_set_stay_in_table(il, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ lq_sta->action_counter >= tbl1->max_search) {
+ if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ tid != MAX_TID_COUNT) {
+ tid_data =
+ &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF) {
+ D_RATE("try to aggregate tid %d\n",
+ tid);
+ il4965_rs_tl_turn_on_agg(il, tid,
+ lq_sta, sta);
+ }
+ }
+ il4965_rs_set_stay_in_table(il, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ i = idx;
+ lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ * calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+ struct il_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = il4965_rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+ struct il_station_priv *sta_priv;
+ struct il_rxon_context *ctx;
+
+ if (!sta || !lq_sta)
+ return;
+
+ sta_priv = (void *)sta->drv_priv;
+ ctx = sta_priv->common.ctx;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if (i < 0 || i >= RATE_COUNT)
+ i = 0;
+
+ rate = il_rates[i].plcp;
+ tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+ if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+ il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il_lq_sta *lq_sta = il_sta;
+ int rate_idx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ &&
+ lq_sta->max_rate_idx != -1)
+ lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (lq_sta->max_rate_idx < 0 ||
+ lq_sta->max_rate_idx >= RATE_COUNT)
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ if (!lq_sta)
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS idx */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+ (sband->band == IEEE80211_BAND_5GHZ &&
+ rate_idx < IL_FIRST_OFDM_RATE))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust idx */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il_station_priv *sta_priv =
+ (struct il_station_priv *)sta->drv_priv;
+ struct il_priv *il;
+
+ il = (struct il_priv *)il_rate;
+ D_RATE("create station rate scale win\n");
+
+ return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ int i, j;
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct il_station_priv *sta_priv;
+ struct il_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct il_station_priv *)sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ lq_sta->lq.sta_id = sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+ sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->is_dup = 0;
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+ lq_sta->band = il->band;
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16) 0x2);
+ lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+ lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* These values will be overridden later */
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+ lq_sta->drv = il;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+ u32 new_rate)
+{
+ struct il_scale_tbl_info tbl_type;
+ int idx = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (idx 0) if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Interpret new_rate (rate_n_flags) */
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+ &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ lq_cmd->general_params.mimo_delimiter =
+ is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (idx 0) */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+ lq_cmd->general_params.single_stream_ant_msk =
+ tbl_type.ant_type;
+ } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+ lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+ }
+ /* otherwise we don't modify the existing value */
+ idx++;
+ repeat_rate--;
+ if (il)
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate,
+ &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ idx++;
+ }
+
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->general_params.mimo_delimiter = idx;
+
+ /* Get next rate */
+ new_rate =
+ il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ idx++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+ return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il_priv *il __maybe_unused = il_r;
+
+ D_RATE("enter\n");
+ D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il4965_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+ struct il_priv *il;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ il = lq_sta->drv;
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->
+ dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ D_RATE("Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IL_ERR
+ ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ D_RATE("Fixed rate OFF\n");
+ }
+ } else {
+ D_RATE("Fixed rate OFF\n");
+ }
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+ struct il_station_priv *sta_priv =
+ container_of(lq_sta, struct il_station_priv, lq_sta);
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ il = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+ lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+
+ return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int idx = 0;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ il = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc +=
+ sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc +=
+ sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+ desc +=
+ sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+ (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc +=
+ sprintf(buff + desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ desc +=
+ sprintf(buff + desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc +=
+ sprintf(buff + desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc +=
+ sprintf(buff + desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc +=
+ sprintf(buff + desc,
+ "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.general_params.flags,
+ lq_sta->lq.general_params.mimo_delimiter,
+ lq_sta->lq.general_params.single_stream_ant_msk,
+ lq_sta->lq.general_params.dual_stream_ant_msk);
+
+ desc +=
+ sprintf(buff + desc,
+ "agg:"
+ "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+ lq_sta->lq.agg_params.agg_dis_start_th,
+ lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+ desc +=
+ sprintf(buff + desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.general_params.start_rate_idx[0],
+ lq_sta->lq.general_params.start_rate_idx[1],
+ lq_sta->lq.general_params.start_rate_idx[2],
+ lq_sta->lq.general_params.start_rate_idx[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ idx =
+ il4965_hwrate_to_plcp_idx(le32_to_cpu
+ (lq_sta->lq.rs_table[i].
+ rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps);
+ } else {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps,
+ il_rate_mcs[idx].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = il4965_rs_sta_dbgfs_scale_table_write,
+ .read = il4965_rs_sta_dbgfs_scale_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc +=
+ sprintf(buff + desc,
+ "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+ "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < RATE_COUNT; j++) {
+ desc +=
+ sprintf(buff + desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il4965_rs_sta_dbgfs_stats_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ if (is_Ht(tbl->lq_type))
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_4965_ops = {
+ .module = NULL,
+ .name = IL4965_RS_NAME,
+ .tx_status = il4965_rs_tx_status,
+ .get_rate = il4965_rs_get_rate,
+ .rate_init = il4965_rs_rate_init_stub,
+ .alloc = il4965_rs_alloc,
+ .free = il4965_rs_free,
+ .alloc_sta = il4965_rs_alloc_sta,
+ .free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il4965_rs_add_debugfs,
+ .remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 000000000000..cacbc03880b0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2402 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_full(il, image, len);
+
+ return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+ if (eeprom_ver < il->cfg->eeprom_ver ||
+ calib_ver < il->cfg->eeprom_calib_ver)
+ goto err;
+
+ IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+ calib_ver, il->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+ const u8 *addr = il_eeprom_query_addr(il,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+ u32 reg;
+
+ reg = _il_rd(il, CSR_LED_REG);
+ if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+ _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+ return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+ _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct il_led_ops il4965_led_ops = {
+ .cmd = il4965_send_led_cmd,
+};
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int i;
+ u32 done;
+ u32 reg_offset;
+ int ret;
+
+ D_INFO("Begin load bsm\n");
+
+ il->ucode_type = UCODE_RT;
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL49_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+ * NOTE: il_init_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache.
+ */
+ pinst = il->ucode_init.p_addr >> 4;
+ pdata = il->ucode_init_data.p_addr >> 4;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ ret = il4965_verify_bsm(il);
+ if (ret)
+ return ret;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ int ret = 0;
+
+ /* bits 35:4 for 4965 */
+ pinst = il->ucode_code.p_addr >> 4;
+ pdata = il->ucode_data_backup.p_addr >> 4;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ * Voltage, temperature, and MIMO tx gain correction, now stored in il
+ * (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Calculate temperature */
+ il->temperature = il4965_hw_get_temperature(il);
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il4965_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+ int chan_mod =
+ le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ return (chan_mod == CHANNEL_MODE_PURE_40 ||
+ chan_mod == CHANNEL_MODE_MIXED);
+}
+
+static void
+il4965_nic_config(struct il_priv *il)
+{
+ unsigned long flags;
+ u16 radio_cfg;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ il->calib_info =
+ (struct il_eeprom_calib_info *)
+ il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ * ... once chain noise is calibrated the first time, it's good forever. */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+ struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+ if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+ struct il_calib_diff_gain_cmd cmd;
+
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.diff_gain_a = 0;
+ cmd.diff_gain_b = 0;
+ cmd.diff_gain_c = 0;
+ if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+ IL_ERR("Could not send C_PHY_CALIBRATION\n");
+ data->state = IL_CHAIN_NOISE_ACCUMULATE;
+ D_CALIB("Run chain_noise_calibrate\n");
+ }
+}
+
+static struct il_sensitivity_ranges il4965_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+
+ .auto_corr_min_ofdm = 85,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 140,
+ .auto_corr_max_ofdm_mrc_x1 = 270,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 200,
+ .auto_corr_max_cck_mrc = 400,
+
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static void
+il4965_set_ct_threshold(struct il_priv *il)
+{
+ /* want Kelvin */
+ il->hw_params.ct_kill_threshold =
+ CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * il4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int
+il4965_hw_set_hw_params(struct il_priv *il)
+{
+ if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+ il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+ il->cfg->base_params->num_of_queues =
+ il->cfg->mod_params->num_of_queues;
+
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+ il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+ il->hw_params.scd_bc_tbls_size =
+ il->cfg->base_params->num_of_queues *
+ sizeof(struct il4965_scd_bc_tbl);
+ il->hw_params.tfd_size = sizeof(struct il_tfd);
+ il->hw_params.max_stations = IL4965_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
+ il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+ il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+ il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+ il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+ il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+ il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+ il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+ il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+ il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+ il4965_set_ct_threshold(il);
+
+ il->hw_params.sens = &il4965_sensitivity;
+ il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+ s32 sign = 1;
+
+ if (num < 0) {
+ sign = -sign;
+ num = -num;
+ }
+ if (denom < 0) {
+ sign = -sign;
+ denom = -denom;
+ }
+ *res = 1;
+ *res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+ return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+ s32 comp = 0;
+
+ if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+ TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+ return 0;
+
+ il4965_math_div_round(current_voltage - eeprom_voltage,
+ TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+ if (current_voltage > eeprom_voltage)
+ comp *= 2;
+ if ((comp < -2) || (comp > 2))
+ comp = 0;
+
+ return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+ if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+ return CALIB_CH_GROUP_5;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+ return CALIB_CH_GROUP_1;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+ return CALIB_CH_GROUP_2;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+ return CALIB_CH_GROUP_3;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+ return CALIB_CH_GROUP_4;
+
+ return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+ s32 b = -1;
+
+ for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+ if (il->calib_info->band_info[b].ch_from == 0)
+ continue;
+
+ if (channel >= il->calib_info->band_info[b].ch_from &&
+ channel <= il->calib_info->band_info[b].ch_to)
+ break;
+ }
+
+ return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ s32 val;
+
+ if (x2 == x1)
+ return y1;
+ else {
+ il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+ return val + y2;
+ }
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest. Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+ struct il_eeprom_calib_ch_info *chan_info)
+{
+ s32 s = -1;
+ u32 c;
+ u32 m;
+ const struct il_eeprom_calib_measure *m1;
+ const struct il_eeprom_calib_measure *m2;
+ struct il_eeprom_calib_measure *omeas;
+ u32 ch_i1;
+ u32 ch_i2;
+
+ s = il4965_get_sub_band(il, channel);
+ if (s >= EEPROM_TX_POWER_BANDS) {
+ IL_ERR("Tx Power can not find channel %d\n", channel);
+ return -1;
+ }
+
+ ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+ ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+ chan_info->ch_num = (u8) channel;
+
+ D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+ ch_i1, ch_i2);
+
+ for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+ for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+ m1 = &(il->calib_info->band_info[s].ch1.
+ measurements[c][m]);
+ m2 = &(il->calib_info->band_info[s].ch2.
+ measurements[c][m]);
+ omeas = &(chan_info->measurements[c][m]);
+
+ omeas->actual_pow =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->actual_pow, ch_i2,
+ m2->actual_pow);
+ omeas->gain_idx =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->gain_idx, ch_i2,
+ m2->gain_idx);
+ omeas->temperature =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->temperature,
+ ch_i2,
+ m2->temperature);
+ omeas->pa_det =
+ (s8) il4965_interpolate_value(channel, ch_i1,
+ m1->pa_det, ch_i2,
+ m2->pa_det);
+
+ D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+ m, m1->actual_pow, m2->actual_pow,
+ omeas->actual_pow);
+ D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+ m, m1->gain_idx, m2->gain_idx,
+ omeas->gain_idx);
+ D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+ m, m1->pa_det, m2->pa_det, omeas->pa_det);
+ D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
+ m, m1->temperature, m2->temperature,
+ omeas->temperature);
+ }
+ }
+
+ return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
+ 10 /* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+ s32 degrees_per_05db_a;
+ s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+ {
+ 9, 2}, /* group 0 5.2, ch 34-43 */
+ {
+ 4, 1}, /* group 1 5.2, ch 44-70 */
+ {
+ 4, 1}, /* group 2 5.2, ch 71-124 */
+ {
+ 4, 1}, /* group 3 5.2, ch 125-200 */
+ {
+ 3, 1} /* group 4 2.4, ch all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+ if (!band) {
+ if ((rate_power_idx & 7) <= 4)
+ return MIN_TX_GAIN_IDX_52GHZ_EXT;
+ }
+ return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+ u8 dsp;
+ u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+ /* 5.2GHz power gain idx table */
+ {
+ {123, 0x3F}, /* highest txpower */
+ {117, 0x3F},
+ {110, 0x3F},
+ {104, 0x3F},
+ {98, 0x3F},
+ {110, 0x3E},
+ {104, 0x3E},
+ {98, 0x3E},
+ {110, 0x3D},
+ {104, 0x3D},
+ {98, 0x3D},
+ {110, 0x3C},
+ {104, 0x3C},
+ {98, 0x3C},
+ {110, 0x3B},
+ {104, 0x3B},
+ {98, 0x3B},
+ {110, 0x3A},
+ {104, 0x3A},
+ {98, 0x3A},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x25},
+ {104, 0x25},
+ {98, 0x25},
+ {110, 0x24},
+ {104, 0x24},
+ {98, 0x24},
+ {110, 0x23},
+ {104, 0x23},
+ {98, 0x23},
+ {110, 0x22},
+ {104, 0x18},
+ {98, 0x18},
+ {110, 0x17},
+ {104, 0x17},
+ {98, 0x17},
+ {110, 0x16},
+ {104, 0x16},
+ {98, 0x16},
+ {110, 0x15},
+ {104, 0x15},
+ {98, 0x15},
+ {110, 0x14},
+ {104, 0x14},
+ {98, 0x14},
+ {110, 0x13},
+ {104, 0x13},
+ {98, 0x13},
+ {110, 0x12},
+ {104, 0x08},
+ {98, 0x08},
+ {110, 0x07},
+ {104, 0x07},
+ {98, 0x07},
+ {110, 0x06},
+ {104, 0x06},
+ {98, 0x06},
+ {110, 0x05},
+ {104, 0x05},
+ {98, 0x05},
+ {110, 0x04},
+ {104, 0x04},
+ {98, 0x04},
+ {110, 0x03},
+ {104, 0x03},
+ {98, 0x03},
+ {110, 0x02},
+ {104, 0x02},
+ {98, 0x02},
+ {110, 0x01},
+ {104, 0x01},
+ {98, 0x01},
+ {110, 0x00},
+ {104, 0x00},
+ {98, 0x00},
+ {93, 0x00},
+ {88, 0x00},
+ {83, 0x00},
+ {78, 0x00},
+ },
+ /* 2.4GHz power gain idx table */
+ {
+ {110, 0x3f}, /* highest txpower */
+ {104, 0x3f},
+ {98, 0x3f},
+ {110, 0x3e},
+ {104, 0x3e},
+ {98, 0x3e},
+ {110, 0x3d},
+ {104, 0x3d},
+ {98, 0x3d},
+ {110, 0x3c},
+ {104, 0x3c},
+ {98, 0x3c},
+ {110, 0x3b},
+ {104, 0x3b},
+ {98, 0x3b},
+ {110, 0x3a},
+ {104, 0x3a},
+ {98, 0x3a},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x6},
+ {104, 0x6},
+ {98, 0x6},
+ {110, 0x5},
+ {104, 0x5},
+ {98, 0x5},
+ {110, 0x4},
+ {104, 0x4},
+ {98, 0x4},
+ {110, 0x3},
+ {104, 0x3},
+ {98, 0x3},
+ {110, 0x2},
+ {104, 0x2},
+ {98, 0x2},
+ {110, 0x1},
+ {104, 0x1},
+ {98, 0x1},
+ {110, 0x0},
+ {104, 0x0},
+ {98, 0x0},
+ {97, 0},
+ {96, 0},
+ {95, 0},
+ {94, 0},
+ {93, 0},
+ {92, 0},
+ {91, 0},
+ {90, 0},
+ {89, 0},
+ {88, 0},
+ {87, 0},
+ {86, 0},
+ {85, 0},
+ {84, 0},
+ {83, 0},
+ {82, 0},
+ {81, 0},
+ {80, 0},
+ {79, 0},
+ {78, 0},
+ {77, 0},
+ {76, 0},
+ {75, 0},
+ {74, 0},
+ {73, 0},
+ {72, 0},
+ {71, 0},
+ {70, 0},
+ {69, 0},
+ {68, 0},
+ {67, 0},
+ {66, 0},
+ {65, 0},
+ {64, 0},
+ {63, 0},
+ {62, 0},
+ {61, 0},
+ {60, 0},
+ {59, 0},
+ }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+ u8 ctrl_chan_high,
+ struct il4965_tx_power_db *tx_power_tbl)
+{
+ u8 saturation_power;
+ s32 target_power;
+ s32 user_target_power;
+ s32 power_limit;
+ s32 current_temp;
+ s32 reg_limit;
+ s32 current_regulatory;
+ s32 txatten_grp = CALIB_CH_GROUP_MAX;
+ int i;
+ int c;
+ const struct il_channel_info *ch_info = NULL;
+ struct il_eeprom_calib_ch_info ch_eeprom_info;
+ const struct il_eeprom_calib_measure *measurement;
+ s16 voltage;
+ s32 init_voltage;
+ s32 voltage_compensation;
+ s32 degrees_per_05db_num;
+ s32 degrees_per_05db_denom;
+ s32 factory_temp;
+ s32 temperature_comp[2];
+ s32 factory_gain_idx[2];
+ s32 factory_actual_pwr[2];
+ s32 power_idx;
+
+ /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+ * are used for idxing into txpower table) */
+ user_target_power = 2 * il->tx_power_user_lmt;
+
+ /* Get current (RXON) channel, band, width */
+ D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+ ch_info = il_get_channel_info(il, il->band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -EINVAL;
+
+ /* get txatten group, used to select 1) thermal txpower adjustment
+ * and 2) mimo txpower balance between Tx chains. */
+ txatten_grp = il4965_get_tx_atten_grp(channel);
+ if (txatten_grp < 0) {
+ IL_ERR("Can't find txatten group for channel %d.\n", channel);
+ return txatten_grp;
+ }
+
+ D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+ txatten_grp);
+
+ if (is_ht40) {
+ if (ctrl_chan_high)
+ channel -= 2;
+ else
+ channel += 2;
+ }
+
+ /* hardware txpower limits ...
+ * saturation (clipping distortion) txpowers are in half-dBm */
+ if (band)
+ saturation_power = il->calib_info->saturation_power24;
+ else
+ saturation_power = il->calib_info->saturation_power52;
+
+ if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+ saturation_power > IL_TX_POWER_SATURATION_MAX) {
+ if (band)
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+ else
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+ }
+
+ /* regulatory txpower limits ... reg_limit values are in half-dBm,
+ * max_power_avg values are in dBm, convert * 2 */
+ if (is_ht40)
+ reg_limit = ch_info->ht40_max_power_avg * 2;
+ else
+ reg_limit = ch_info->max_power_avg * 2;
+
+ if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+ (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+ if (band)
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+ else
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+ }
+
+ /* Interpolate txpower calibration values for this channel,
+ * based on factory calibration tests on spaced channels. */
+ il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+ /* calculate tx gain adjustment based on power supply voltage */
+ voltage = le16_to_cpu(il->calib_info->voltage);
+ init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+ voltage_compensation =
+ il4965_get_voltage_compensation(voltage, init_voltage);
+
+ D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+ voltage, voltage_compensation);
+
+ /* get current temperature (Celsius) */
+ current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+ current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+ current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+ /* select thermal txpower adjustment params, based on channel group
+ * (same frequency group used for mimo txatten adjustment) */
+ degrees_per_05db_num =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+ degrees_per_05db_denom =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+ /* get per-chain txpower values from factory measurements */
+ for (c = 0; c < 2; c++) {
+ measurement = &ch_eeprom_info.measurements[c][1];
+
+ /* txgain adjustment (in half-dB steps) based on difference
+ * between factory and current temperature */
+ factory_temp = measurement->temperature;
+ il4965_math_div_round((current_temp -
+ factory_temp) * degrees_per_05db_denom,
+ degrees_per_05db_num,
+ &temperature_comp[c]);
+
+ factory_gain_idx[c] = measurement->gain_idx;
+ factory_actual_pwr[c] = measurement->actual_pow;
+
+ D_TXPOWER("chain = %d\n", c);
+ D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+ factory_temp, current_temp, temperature_comp[c]);
+
+ D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+ factory_actual_pwr[c]);
+ }
+
+ /* for each of 33 bit-rates (including 1 for CCK) */
+ for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+ u8 is_mimo_rate;
+ union il4965_tx_power_dual_stream tx_power;
+
+ /* for mimo, reduce each chain's txpower by half
+ * (3dB, 6 steps), so total output power is regulatory
+ * compliant. */
+ if (i & 0x8) {
+ current_regulatory =
+ reg_limit -
+ IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+ is_mimo_rate = 1;
+ } else {
+ current_regulatory = reg_limit;
+ is_mimo_rate = 0;
+ }
+
+ /* find txpower limit, either hardware or regulatory */
+ power_limit = saturation_power - back_off_table[i];
+ if (power_limit > current_regulatory)
+ power_limit = current_regulatory;
+
+ /* reduce user's txpower request if necessary
+ * for this rate on this channel */
+ target_power = user_target_power;
+ if (target_power > power_limit)
+ target_power = power_limit;
+
+ D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+ saturation_power - back_off_table[i],
+ current_regulatory, user_target_power, target_power);
+
+ /* for each of 2 Tx chains (radio transmitters) */
+ for (c = 0; c < 2; c++) {
+ s32 atten_value;
+
+ if (is_mimo_rate)
+ atten_value =
+ (s32) le32_to_cpu(il->card_alive_init.
+ tx_atten[txatten_grp][c]);
+ else
+ atten_value = 0;
+
+ /* calculate idx; higher idx means lower txpower */
+ power_idx =
+ (u8) (factory_gain_idx[c] -
+ (target_power - factory_actual_pwr[c]) -
+ temperature_comp[c] - voltage_compensation +
+ atten_value);
+
+/* D_TXPOWER("calculated txpower idx %d\n",
+ power_idx); */
+
+ if (power_idx < get_min_power_idx(i, band))
+ power_idx = get_min_power_idx(i, band);
+
+ /* adjust 5 GHz idx to support negative idxes */
+ if (!band)
+ power_idx += 9;
+
+ /* CCK, rate 32, reduce txpower for CCK */
+ if (i == POWER_TBL_CCK_ENTRY)
+ power_idx +=
+ IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+ /* stay within the table! */
+ if (power_idx > 107) {
+ IL_WARN("txpower idx %d > 107\n", power_idx);
+ power_idx = 107;
+ }
+ if (power_idx < 0) {
+ IL_WARN("txpower idx %d < 0\n", power_idx);
+ power_idx = 0;
+ }
+
+ /* fill txpower command for this rate/chain */
+ tx_power.s.radio_tx_gain[c] =
+ gain_table[band][power_idx].radio;
+ tx_power.s.dsp_predis_atten[c] =
+ gain_table[band][power_idx].dsp;
+
+ D_TXPOWER("chain %d mimo %d idx %d "
+ "gain 0x%02x dsp %d\n", c, atten_value,
+ power_idx, tx_power.s.radio_tx_gain[c],
+ tx_power.s.dsp_predis_atten[c]);
+ } /* for each chain */
+
+ tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+ } /* for each rate */
+
+ return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+ struct il4965_txpowertable_cmd cmd = { 0 };
+ int ret;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+ if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.channel = ctx->active.channel;
+
+ ret =
+ il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+ is_ht40, ctrl_chan_high, &cmd.tx_power);
+ if (ret)
+ goto out;
+
+ ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+ return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int ret = 0;
+ struct il4965_rxon_assoc_cmd rxon_assoc;
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates &&
+ rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates &&
+ rxon1->rx_chain == rxon2->rx_chain &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ ctx->staging.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ ctx->staging.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+ ret =
+ il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+ &rxon_assoc, NULL);
+
+ return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+ int ret;
+ bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (!il_is_alive(il))
+ return -EBUSY;
+
+ if (!ctx->is_active)
+ return 0;
+
+ /* always get timestamp with Rx frame */
+ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+ ret = il_check_rxon_cmd(il, ctx);
+ if (ret) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * receive commit_rxon request
+ * abort any previous channel switch if still in process
+ */
+ if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+ il->switch_channel != ctx->staging.channel) {
+ D_11H("abort channel switch on %d\n",
+ le16_to_cpu(il->switch_channel));
+ il_chswitch_done(il, false);
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, ctx)) {
+ ret = il_send_rxon_assoc(il, ctx);
+ if (ret) {
+ IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_print_rx_config_cmd(il, ctx);
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated_ctx(ctx) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), active_rxon);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (ret) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+ return ret;
+ }
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+
+ il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+
+ /* Apply the new configuration
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
+ */
+ if (!new_assoc) {
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ D_INFO("Return from !new_assoc RXON.\n");
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+ if (new_assoc) {
+ il->start_calib = 0;
+ /* Apply the new configuration
+ * RXON assoc doesn't clear the station table in uCode,
+ */
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ }
+ il_print_rx_config_cmd(il, ctx);
+
+ il4965_init_sensitivity(il);
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ ret = il_set_tx_power(il, il->tx_power_next, true);
+ if (ret) {
+ IL_ERR("Error sending TX power (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int rc;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il4965_channel_switch_cmd cmd;
+ const struct il_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+ if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.expect_beacon = 0;
+ ch = ch_switch->channel->hw_value;
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+ if (switch_count >
+ ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+ switch_count -=
+ (il->ucode_beacon_time - tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time =
+ il_usecs_to_beacons(il, switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time =
+ il_add_beacon_time(il, il->ucode_beacon_time,
+ ucode_switch_time, beacon_interval);
+ }
+ D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+ ch_info = il_get_channel_info(il, il->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = il_is_channel_radar(ch_info);
+ else {
+ IL_ERR("invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+
+ rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+ &cmd.tx_power);
+ if (rc) {
+ D_11H("error:%d fill txpower_tbl\n", rc);
+ return rc;
+ }
+
+ return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int write_ptr = txq->q.write_ptr;
+ int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ bc_ent = cpu_to_le16(len & 0xFFF);
+ /* Set up byte count within first 256 entries */
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ /* If within first 64 entries, duplicate at end */
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+ s32 temperature;
+ s32 vt;
+ s32 R1, R2, R3;
+ u32 R4;
+
+ if (test_bit(S_TEMPERATURE, &il->status) &&
+ (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+ D_TEMP("Running HT40 temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+ } else {
+ D_TEMP("Running temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+ }
+
+ /*
+ * Temperature is only 23 bits, so sign extend out to 32.
+ *
+ * NOTE If we haven't received a stats notification yet
+ * with an updated temperature, use R4 provided to us in the
+ * "initialize" ALIVE response.
+ */
+ if (!test_bit(S_TEMPERATURE, &il->status))
+ vt = sign_extend32(R4, 23);
+ else
+ vt = sign_extend32(le32_to_cpu
+ (il->_4965.stats.general.common.temperature),
+ 23);
+
+ D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+ if (R3 == R1) {
+ IL_ERR("Calibration conflict R1 == R3\n");
+ return -1;
+ }
+
+ /* Calculate temperature in degrees Kelvin, adjust by 97%.
+ * Add offset to center the adjustment around 0 degrees Centigrade. */
+ temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+ temperature /= (R3 - R1);
+ temperature =
+ (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+ D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+ KELVIN_TO_CELSIUS(temperature));
+
+ return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD 3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ if (!test_bit(S_STATS, &il->status)) {
+ D_TEMP("Temperature not updated -- no stats.\n");
+ return 0;
+ }
+
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Temperature unchanged\n");
+ else
+ D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+ if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+ D_POWER(" => thermal txpower calib not needed\n");
+ return 0;
+ }
+
+ D_POWER(" => thermal txpower calib needed\n");
+
+ return 1;
+}
+
+static void
+il4965_temperature_calib(struct il_priv *il)
+{
+ s32 temp;
+
+ temp = il4965_hw_get_temperature(il);
+ if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+ return;
+
+ if (il->temperature != temp) {
+ if (il->temperature)
+ D_TEMP("Temperature changed " "from %dC to %dC\n",
+ KELVIN_TO_CELSIUS(il->temperature),
+ KELVIN_TO_CELSIUS(temp));
+ else
+ D_TEMP("Temperature " "initialized to %dC\n",
+ KELVIN_TO_CELSIUS(temp));
+ }
+
+ il->temperature = temp;
+ set_bit(S_TEMPERATURE, &il->status);
+
+ if (!il->disable_tx_power_cal &&
+ unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ il4965_is_temp_calib_needed(il))
+ queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return (u16) sizeof(struct il4965_rxon_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cmd->tid_disable_tx;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+ addsta->sleep_tx_count = cmd->sleep_tx_count;
+ addsta->reserved1 = cpu_to_le16(0);
+ addsta->reserved2 = cpu_to_le16(0);
+
+ return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+ return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+ struct il4965_tx_resp *tx_resp, int txq_id,
+ u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+ if (agg->wait_for_ba)
+ D_TX_REPLY("got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* num frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+ D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+ tx_resp->failure_frame);
+ D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx win */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_IDX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status &
+ (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (!hdr) {
+ IL_ERR("BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IL_ERR("BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n", idx,
+ SEQ_TO_SN(sc), hdr->seq_ctrl);
+ return -1;
+ }
+
+ D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+ SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+ (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 * addr)
+{
+ int i;
+ int start = 0;
+ int ret = IL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
+ start = IL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return il->ctx.bcast_sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = start; i < il->hw_params.max_stations; i++)
+ if (il->stations[i].used &&
+ (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+ ret = i;
+ goto out;
+ }
+
+ D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IL_INVALID_STATION &&
+ (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+ ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+ (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+ IL_ERR("Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+ if (il->iw_mode == NL80211_IFTYPE_STATION) {
+ return IL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return il4965_find_station(il, da);
+ }
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info;
+ struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->u.status);
+ int uninitialized_var(tid);
+ int sta_id;
+ int freed;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ memset(&info->status, 0, sizeof(info->status));
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ sta_id = il4965_get_ra_sta_id(il, hdr);
+ if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+ IL_ERR("Station not known\n");
+ return;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (txq->sched_retry) {
+ const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+ struct il_ht_agg *agg = NULL;
+ WARN_ON(!qc);
+
+ agg = &il->stations[sta_id].tid[tid].agg;
+
+ il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) &&
+ !il4965_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+ "%d idx %d\n", scd_ssn, idx);
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc)
+ il4965_free_tfds_in_queue(il, sta_id, tid,
+ freed);
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+ }
+ } else {
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ D_TX_REPLY("TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n", txq_id,
+ il4965_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+ else if (sta_id == IL_INVALID_STATION)
+ D_TX_REPLY("Station not known\n");
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark)
+ il_wake_queue(il, txq);
+ }
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+ il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void
+il4965_handler_setup(struct il_priv *il)
+{
+ /* Legacy Rx frames */
+ il->handlers[N_RX] = il4965_hdl_rx;
+ /* Tx response */
+ il->handlers[C_TX] = il4965_hdl_tx;
+}
+
+static struct il_hcmd_ops il4965_hcmd = {
+ .rxon_assoc = il4965_send_rxon_assoc,
+ .commit_rxon = il4965_commit_rxon,
+ .set_rxon_chain = il4965_set_rxon_chain,
+};
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_conf *conf = NULL;
+ int ret = 0;
+
+ if (!vif || !il->is_open)
+ return;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ il_set_rxon_ht(il, &il->current_ht_config);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+ vif->bss_conf.beacon_int);
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il_commit_rxon(il, ctx);
+
+ D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il4965_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ vif->type);
+ break;
+ }
+
+ /* the chain noise calibration will enabled PM upon completion
+ * If chain noise has already been run, then we need to enable
+ * power management here */
+ if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+ il_power_update_mode(il, false);
+
+ /* Enable Rx differential gain and sensitivity calibrations */
+ il4965_chain_noise_reset(il);
+ il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!il_is_associated_ctx(ctx)) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing failed - "
+ "Attempting to continue.\n");
+
+ /* AP has all antennas */
+ il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+ il_set_rxon_ht(il, &il->current_ht_config);
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* need to send beacon cmd before committing assoc RXON! */
+ il4965_send_beacon_cmd(il);
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+ }
+ il4965_send_beacon_cmd(il);
+}
+
+static struct il_hcmd_utils_ops il4965_hcmd_utils = {
+ .get_hcmd_size = il4965_get_hcmd_size,
+ .build_addsta_hcmd = il4965_build_addsta_hcmd,
+ .request_scan = il4965_request_scan,
+ .post_scan = il4965_post_scan,
+};
+
+static struct il_lib_ops il4965_lib = {
+ .set_hw_params = il4965_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+ .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il4965_hw_txq_free_tfd,
+ .txq_init = il4965_hw_tx_queue_init,
+ .handler_setup = il4965_handler_setup,
+ .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+ .init_alive_start = il4965_init_alive_start,
+ .load_ucode = il4965_load_bsm,
+ .dump_nic_error_log = il4965_dump_nic_error_log,
+ .dump_fh = il4965_dump_fh,
+ .set_channel_switch = il4965_hw_channel_switch,
+ .apm_ops = {
+ .init = il_apm_init,
+ .config = il4965_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
+ .acquire_semaphore = il4965_eeprom_acquire_semaphore,
+ .release_semaphore = il4965_eeprom_release_semaphore,
+ },
+ .send_tx_power = il4965_send_tx_power,
+ .update_chain_flags = il4965_update_chain_flags,
+ .temp_ops = {
+ .temperature = il4965_temperature_calib,
+ },
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il4965_ucode_rx_stats_read,
+ .tx_stats_read = il4965_ucode_tx_stats_read,
+ .general_stats_read = il4965_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il4965_legacy_ops = {
+ .post_associate = il4965_post_associate,
+ .config_ap = il4965_config_ap,
+ .manage_ibss_station = il4965_manage_ibss_station,
+ .update_bcast_stations = il4965_update_bcast_stations,
+};
+
+struct ieee80211_ops il4965_hw_ops = {
+ .tx = il4965_mac_tx,
+ .start = il4965_mac_start,
+ .stop = il4965_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il4965_configure_filter,
+ .set_key = il4965_mac_set_key,
+ .update_tkip_key = il4965_mac_update_tkip_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .ampdu_action = il4965_mac_ampdu_action,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il4965_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .channel_switch = il4965_mac_channel_switch,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static const struct il_ops il4965_ops = {
+ .lib = &il4965_lib,
+ .hcmd = &il4965_hcmd,
+ .utils = &il4965_hcmd_utils,
+ .led = &il4965_led_ops,
+ .legacy = &il4965_legacy_ops,
+ .ieee80211_ops = &il4965_hw_ops,
+};
+
+static struct il_base_params il4965_base_params = {
+ .eeprom_size = IL4965_EEPROM_IMG_SIZE,
+ .num_of_queues = IL49_NUM_QUEUES,
+ .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = true,
+ .led_compensation = 61,
+ .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+ .temperature_kelvin = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
+struct il_cfg il4965_cfg = {
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
+ .fw_name_pre = IL4965_FW_PRE,
+ .ucode_api_max = IL4965_UCODE_API_MAX,
+ .ucode_api_min = IL4965_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_ABC,
+ .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+ .ops = &il4965_ops,
+ .mod_params = &il4965_mod_params,
+ .base_params = &il4965_base_params,
+ .led_mode = IL_LED_BLINK,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 000000000000..f280e0161b17
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1301 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+
+extern struct il_mod_params il4965_mod_params;
+
+extern struct ieee80211_ops il4965_hw_ops;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+ int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE: Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+
+/* rx */
+void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
+bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
+void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
+int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il,
+ struct il_rxon_context *ctx);
+int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+ return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE 1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE 7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND (0x000000)
+#define IL49_RTC_INST_UPPER_BOUND (0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
+
+#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
+ IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
+ IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+ addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent). The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp). After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
+ * must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN (263)
+#define IL_TX_POWER_TEMPERATURE_MAX (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+ ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+ (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ * 1) EEPROM
+ * 2) "initialize" alive notification
+ * 3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1) Regulatory information (max txpower and channel usage flags) is provided
+ * separately for each channel that can possibly supported by 4965.
+ * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ * (legacy) channels.
+ *
+ * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ * for locations in EEPROM.
+ *
+ * 2) Factory txpower calibration information is provided separately for
+ * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
+ * but 5 GHz has several sub-bands.
+ *
+ * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ * See struct il4965_eeprom_calib_info (and the tree of structures
+ * contained within it) for format, and struct il4965_eeprom for
+ * locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1) Temperature calculation parameters.
+ *
+ * 2) Power supply voltage measurement.
+ *
+ * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1) Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ * Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * 2 transmitters will be used simultaneously; driver must reduce the
+ * regulatory limit by 3 dB (half-power) for each transmitter, so the
+ * combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
+ * reduce target txpower if necessary.
+ *
+ * Backoff values below are in 1/2 dB units (equivalent to steps in
+ * txpower gain tables):
+ *
+ * OFDM 6 - 36 MBit: 10 steps (5 dB)
+ * OFDM 48 MBit: 15 steps (7.5 dB)
+ * OFDM 54 MBit: 17 steps (8.5 dB)
+ * OFDM 60 MBit: 20 steps (10 dB)
+ * CCK all rates: 10 steps (5 dB)
+ *
+ * Backoff values apply to saturation txpower on a per-transmitter basis;
+ * when using MIMO (2 transmitters), each transmitter uses the same
+ * saturation level provided in EEPROM, and the same backoff values;
+ * no reduction (such as with regulatory txpower limits) is required.
+ *
+ * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ * factory measurement for ht40 channels.
+ *
+ * The result of this step is the final target txpower. The rest of
+ * the steps figure out the proper settings for the device to achieve
+ * that target txpower.
+ *
+ *
+ * 3) Determine (EEPROM) calibration sub band for the target channel, by
+ * comparing against first and last channels in each sub band
+ * (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
+ * referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ * Interpolation is based on difference between target channel's frequency
+ * and the sample channels' frequencies. Since channel numbers are based
+ * on frequency (5 MHz between each channel number), this is equivalent
+ * to interpolating based on channel number differences.
+ *
+ * Note that the sample channels may or may not be the channels at the
+ * edges of the sub band. The target channel may be "outside" of the
+ * span of the sampled channels.
+ *
+ * Driver may choose the pair (for 2 Tx chains) of measurements (see
+ * struct il4965_eeprom_calib_ch_info) for which the actual measured
+ * txpower comes closest to the desired txpower. Usually, though,
+ * the middle set of measurements is closest to the regulatory limits,
+ * and is therefore a good choice for all txpower calculations (this
+ * assumes that high accuracy is needed for maximizing legal txpower,
+ * while lower txpower configurations do not need as much accuracy).
+ *
+ * Driver should interpolate both members of the chosen measurement pair,
+ * i.e. for both Tx chains (radio transmitters), unless the driver knows
+ * that only one of the chains will be used (e.g. only one tx antenna
+ * connected, but this should be unusual). The rate scaling algorithm
+ * switches antennas to find best performance, so both Tx chains will
+ * be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ * Driver should interpolate factory values for temperature, gain table
+ * idx, and actual power. The power amplifier detector values are
+ * not used by the driver.
+ *
+ * Sanity check: If the target channel happens to be one of the sample
+ * channels, the results should agree with the sample channel's
+ * measurements!
+ *
+ *
+ * 5) Find difference between desired txpower and (interpolated)
+ * factory-measured txpower. Using (interpolated) factory gain table idx
+ * (shown elsewhere) as a starting point, adjust this idx lower to
+ * increase txpower, or higher to decrease txpower, until the target
+ * txpower is reached. Each step in the gain table is 1/2 dB.
+ *
+ * For example, if factory measured txpower is 16 dBm, and target txpower
+ * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ * by 3 dB.
+ *
+ *
+ * 6) Find difference between current device temperature and (interpolated)
+ * factory-measured temperature for sub-band. Factory values are in
+ * degrees Celsius. To calculate current temperature, see comments for
+ * "4965 temperature calculation".
+ *
+ * If current temperature is higher than factory temperature, driver must
+ * increase gain (lower gain table idx), and vice verse.
+ *
+ * Temperature affects gain differently for different channels:
+ *
+ * 2.4 GHz all channels: 3.5 degrees per half-dB step
+ * 5 GHz channels 34-43: 4.5 degrees per half-dB step
+ * 5 GHz channels >= 44: 4.0 degrees per half-dB step
+ *
+ * NOTE: Temperature can increase rapidly when transmitting, especially
+ * with heavy traffic at high txpowers. Driver should update
+ * temperature calculations often under these conditions to
+ * maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7) Find difference between current power supply voltage indicator
+ * (from "initialize alive") and factory-measured power supply voltage
+ * indicator (EEPROM).
+ *
+ * If the current voltage is higher (indicator is lower) than factory
+ * voltage, gain should be reduced (gain table idx increased) by:
+ *
+ * (eeprom - current) / 7
+ *
+ * If the current voltage is lower (indicator is higher) than factory
+ * voltage, gain should be increased (gain table idx decreased) by:
+ *
+ * 2 * (current - eeprom) / 7
+ *
+ * If number of idx steps in either direction turns out to be > 2,
+ * something is wrong ... just use 0.
+ *
+ * NOTE: Voltage compensation is independent of band/channel.
+ *
+ * NOTE: "Initialize" uCode measures current voltage, which is assumed
+ * to be constant after this initial measurement. Voltage
+ * compensation for txpower (number of steps in gain table)
+ * may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * adjust txpower for each transmitter chain, so txpower is balanced
+ * between the two chains. There are 5 pairs of tx_atten[group][chain]
+ * values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ * Group 0: 5 GHz channel 34-43
+ * Group 1: 5 GHz channel 44-70
+ * Group 2: 5 GHz channel 71-124
+ * Group 3: 5 GHz channel 125-200
+ * Group 4: 2.4 GHz all channels
+ *
+ * Add the tx_atten[group][chain] value to the idx for the target chain.
+ * The values are signed, but are in pairs of 0 and a non-negative number,
+ * so as to reduce gain (if necessary) of the "hotter" channel. This
+ * avoids any need to double-check for regulatory compliance after
+ * this step.
+ *
+ *
+ * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ * value to the idx:
+ *
+ * Hardware rev B: 9 steps (4.5 dB)
+ * Hardware rev C: 5 steps (2.5 dB)
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ *
+ * NOTE: This compensation is in addition to any saturation backoff that
+ * might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ * Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ * location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device. That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB. Note that these
+ * are *relative* steps, not indications of absolute output power. Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
+ * linear value that multiplies the output of the digital signal processor,
+ * before being sent to the analog radio.
+ * 2) Radio gain. This sets the analog gain of the radio Tx path.
+ * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower. This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration). A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * 0 110 0x3f (highest gain)
+ * 1 104 0x3f
+ * 2 98 0x3f
+ * 3 110 0x3e
+ * 4 104 0x3e
+ * 5 98 0x3e
+ * 6 110 0x3d
+ * 7 104 0x3d
+ * 8 98 0x3d
+ * 9 110 0x3c
+ * 10 104 0x3c
+ * 11 98 0x3c
+ * 12 110 0x3b
+ * 13 104 0x3b
+ * 14 98 0x3b
+ * 15 110 0x3a
+ * 16 104 0x3a
+ * 17 98 0x3a
+ * 18 110 0x39
+ * 19 104 0x39
+ * 20 98 0x39
+ * 21 110 0x38
+ * 22 104 0x38
+ * 23 98 0x38
+ * 24 110 0x37
+ * 25 104 0x37
+ * 26 98 0x37
+ * 27 110 0x36
+ * 28 104 0x36
+ * 29 98 0x36
+ * 30 110 0x35
+ * 31 104 0x35
+ * 32 98 0x35
+ * 33 110 0x34
+ * 34 104 0x34
+ * 35 98 0x34
+ * 36 110 0x33
+ * 37 104 0x33
+ * 38 98 0x33
+ * 39 110 0x32
+ * 40 104 0x32
+ * 41 98 0x32
+ * 42 110 0x31
+ * 43 104 0x31
+ * 44 98 0x31
+ * 45 110 0x30
+ * 46 104 0x30
+ * 47 98 0x30
+ * 48 110 0x6
+ * 49 104 0x6
+ * 50 98 0x6
+ * 51 110 0x5
+ * 52 104 0x5
+ * 53 98 0x5
+ * 54 110 0x4
+ * 55 104 0x4
+ * 56 98 0x4
+ * 57 110 0x3
+ * 58 104 0x3
+ * 59 98 0x3
+ * 60 110 0x2
+ * 61 104 0x2
+ * 62 98 0x2
+ * 63 110 0x1
+ * 64 104 0x1
+ * 65 98 0x1
+ * 66 110 0x0
+ * 67 104 0x0
+ * 68 98 0x0
+ * 69 97 0
+ * 70 96 0
+ * 71 95 0
+ * 72 94 0
+ * 73 93 0
+ * 74 92 0
+ * 75 91 0
+ * 76 90 0
+ * 77 89 0
+ * 78 88 0
+ * 79 87 0
+ * 80 86 0
+ * 81 85 0
+ * 82 84 0
+ * 83 83 0
+ * 84 82 0
+ * 85 81 0
+ * 86 80 0
+ * 87 79 0
+ * 88 78 0
+ * 89 77 0
+ * 90 76 0
+ * 91 75 0
+ * 92 74 0
+ * 93 73 0
+ * 94 72 0
+ * 95 71 0
+ * 96 70 0
+ * 97 69 0
+ * 98 68 0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * -9 123 0x3F (highest gain)
+ * -8 117 0x3F
+ * -7 110 0x3F
+ * -6 104 0x3F
+ * -5 98 0x3F
+ * -4 110 0x3E
+ * -3 104 0x3E
+ * -2 98 0x3E
+ * -1 110 0x3D
+ * 0 104 0x3D
+ * 1 98 0x3D
+ * 2 110 0x3C
+ * 3 104 0x3C
+ * 4 98 0x3C
+ * 5 110 0x3B
+ * 6 104 0x3B
+ * 7 98 0x3B
+ * 8 110 0x3A
+ * 9 104 0x3A
+ * 10 98 0x3A
+ * 11 110 0x39
+ * 12 104 0x39
+ * 13 98 0x39
+ * 14 110 0x38
+ * 15 104 0x38
+ * 16 98 0x38
+ * 17 110 0x37
+ * 18 104 0x37
+ * 19 98 0x37
+ * 20 110 0x36
+ * 21 104 0x36
+ * 22 98 0x36
+ * 23 110 0x35
+ * 24 104 0x35
+ * 25 98 0x35
+ * 26 110 0x34
+ * 27 104 0x34
+ * 28 98 0x34
+ * 29 110 0x33
+ * 30 104 0x33
+ * 31 98 0x33
+ * 32 110 0x32
+ * 33 104 0x32
+ * 34 98 0x32
+ * 35 110 0x31
+ * 36 104 0x31
+ * 37 98 0x31
+ * 38 110 0x30
+ * 39 104 0x30
+ * 40 98 0x30
+ * 41 110 0x25
+ * 42 104 0x25
+ * 43 98 0x25
+ * 44 110 0x24
+ * 45 104 0x24
+ * 46 98 0x24
+ * 47 110 0x23
+ * 48 104 0x23
+ * 49 98 0x23
+ * 50 110 0x22
+ * 51 104 0x18
+ * 52 98 0x18
+ * 53 110 0x17
+ * 54 104 0x17
+ * 55 98 0x17
+ * 56 110 0x16
+ * 57 104 0x16
+ * 58 98 0x16
+ * 59 110 0x15
+ * 60 104 0x15
+ * 61 98 0x15
+ * 62 110 0x14
+ * 63 104 0x14
+ * 64 98 0x14
+ * 65 110 0x13
+ * 66 104 0x13
+ * 67 98 0x13
+ * 68 110 0x12
+ * 69 104 0x08
+ * 70 98 0x08
+ * 71 110 0x07
+ * 72 104 0x07
+ * 73 98 0x07
+ * 74 110 0x06
+ * 75 104 0x06
+ * 76 98 0x06
+ * 77 110 0x05
+ * 78 104 0x05
+ * 79 98 0x05
+ * 80 110 0x04
+ * 81 104 0x04
+ * 82 98 0x04
+ * 83 110 0x03
+ * 84 104 0x03
+ * 85 98 0x03
+ * 86 110 0x02
+ * 87 104 0x02
+ * 88 98 0x02
+ * 89 110 0x01
+ * 90 104 0x01
+ * 91 98 0x01
+ * 92 110 0x00
+ * 93 104 0x00
+ * 94 98 0x00
+ * 95 93 0x00
+ * 96 88 0x00
+ * 97 83 0x00
+ * 98 78 0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated. These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
+#define IL_TX_POWER_REGULATORY_MIN (0)
+#define IL_TX_POWER_REGULATORY_MAX (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion. This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
+#define IL_TX_POWER_SATURATION_MIN (20)
+#define IL_TX_POWER_SATURATION_MAX (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain. Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below. If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+ CALIB_CH_GROUP_1 = 0,
+ CALIB_CH_GROUP_2 = 1,
+ CALIB_CH_GROUP_3 = 2,
+ CALIB_CH_GROUP_4 = 3,
+ CALIB_CH_GROUP_5 = 4,
+ CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues). All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device. When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS 7
+#define IL49_CMD_FIFO_NUM 4
+#define IL49_NUM_QUEUES 16
+#define IL49_NUM_AMPDU_QUEUES 8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+ u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IL4965_DEFAULT_TX_RETRY 15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE 10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+void il4965_calib_free_results(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND (0x1000)
+#define FH49_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the idx of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver; see struct il4965_shared, val0):
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1! See below).
+ * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD. The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx. For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define RX_RB_TIMEOUT (0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM (7)
+#define FH50_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL (9)
+#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000 /* 4k */
+
+#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd47661..05bd375cb845 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
-config IWLWIFI_LEGACY
+config IWLEGACY
tristate
select FW_LOADER
select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
select MAC80211_LEDS
menu "Debugging Options"
- depends on IWLWIFI_LEGACY
+ depends on IWLEGACY
-config IWLWIFI_LEGACY_DEBUG
- bool "Enable full debugging output in 4965 and 3945 drivers"
- depends on IWLWIFI_LEGACY
+config IWLEGACY_DEBUG
+ bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+ depends on IWLEGACY
---help---
- This option will enable debug tracing output for the iwlwifilegacy
+ This option will enable debug tracing output for the iwlegacy
drivers.
This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
% echo 0x43fff > /sys/class/net/wlan0/device/debug_level
You can find the list of debug mask values in:
- drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+ drivers/net/wireless/iwlegacy/common.h
If this is your first time using this driver, you should say Y here
as the debug information can assist others in helping you resolve
any problems you may encounter.
-config IWLWIFI_LEGACY_DEBUGFS
- bool "4965 and 3945 debugfs support"
- depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+config IWLEGACY_DEBUGFS
+ bool "iwlegacy (iwl 3945/4965) debugfs support"
+ depends on IWLEGACY && MAC80211_DEBUGFS
---help---
- Enable creation of debugfs files for the iwlwifilegacy drivers. This
+ Enable creation of debugfs files for the iwlegacy drivers. This
is a low-impact option that allows getting insight into the
driver's state at runtime.
-config IWLWIFI_LEGACY_DEVICE_TRACING
- bool "iwlwifilegacy legacy device access tracing"
- depends on IWLWIFI_LEGACY
- depends on EVENT_TRACING
- help
- Say Y here to trace all commands, including TX frames and IO
- accesses, sent to the device. If you say yes, iwlwifilegacy will
- register with the ftrace framework for event tracing and dump
- all this information to the ringbuffer, you may need to
- increase the ringbuffer size. See the ftrace documentation
- for more information.
-
- When tracing is not enabled, this option still has some
- (though rather small) overhead.
-
- If unsure, say Y so we can help you better when problems
- occur.
endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
This option enables support for
@@ -93,7 +76,7 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb38c211..c985a01a0731 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
-iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
-iwl-legacy-objs += iwl-scan.o iwl-led.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+obj-$(CONFIG_IWLEGACY) += iwlegacy.o
+iwlegacy-objs := common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
-iwl-legacy-objs += $(iwl-legacy-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
+iwlegacy-objs += $(iwlegacy-m)
# 4965
obj-$(CONFIG_IWL4965) += iwl4965.o
-iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
-iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
-iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
-iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
-iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
-iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 89904054473f..25dd7d28d022 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
- * Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
- */
-#ifndef __iwl_legacy_commands_h__
-#define __iwl_legacy_commands_h__
+#ifndef __il_commands_h__
+#define __il_commands_h__
-struct iwl_priv;
+#include <linux/ieee80211.h>
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+struct il_priv;
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
/* Tx rates */
-#define IWL_CCK_RATES 4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
+#define IL_CCK_RATES 4
+#define IL_OFDM_RATES 8
+#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
enum {
- REPLY_ALIVE = 0x1,
- REPLY_ERROR = 0x2,
+ N_ALIVE = 0x1,
+ N_ERROR = 0x2,
/* RXON and QOS commands */
- REPLY_RXON = 0x10,
- REPLY_RXON_ASSOC = 0x11,
- REPLY_QOS_PARAM = 0x13,
- REPLY_RXON_TIMING = 0x14,
+ C_RXON = 0x10,
+ C_RXON_ASSOC = 0x11,
+ C_QOS_PARAM = 0x13,
+ C_RXON_TIMING = 0x14,
/* Multi-Station support */
- REPLY_ADD_STA = 0x18,
- REPLY_REMOVE_STA = 0x19,
+ C_ADD_STA = 0x18,
+ C_REM_STA = 0x19,
/* Security */
- REPLY_WEPKEY = 0x20,
+ C_WEPKEY = 0x20,
/* RX, TX, LEDs */
- REPLY_3945_RX = 0x1b, /* 3945 only */
- REPLY_TX = 0x1c,
- REPLY_RATE_SCALE = 0x47, /* 3945 only */
- REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ N_3945_RX = 0x1b, /* 3945 only */
+ C_TX = 0x1c,
+ C_RATE_SCALE = 0x47, /* 3945 only */
+ C_LEDS = 0x48,
+ C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
/* 802.11h related */
- REPLY_CHANNEL_SWITCH = 0x72,
- CHANNEL_SWITCH_NOTIFICATION = 0x73,
- REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
- SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+ C_CHANNEL_SWITCH = 0x72,
+ N_CHANNEL_SWITCH = 0x73,
+ C_SPECTRUM_MEASUREMENT = 0x74,
+ N_SPECTRUM_MEASUREMENT = 0x75,
/* Power Management */
- POWER_TABLE_CMD = 0x77,
- PM_SLEEP_NOTIFICATION = 0x7A,
- PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+ C_POWER_TBL = 0x77,
+ N_PM_SLEEP = 0x7A,
+ N_PM_DEBUG_STATS = 0x7B,
/* Scan commands and notifications */
- REPLY_SCAN_CMD = 0x80,
- REPLY_SCAN_ABORT_CMD = 0x81,
- SCAN_START_NOTIFICATION = 0x82,
- SCAN_RESULTS_NOTIFICATION = 0x83,
- SCAN_COMPLETE_NOTIFICATION = 0x84,
+ C_SCAN = 0x80,
+ C_SCAN_ABORT = 0x81,
+ N_SCAN_START = 0x82,
+ N_SCAN_RESULTS = 0x83,
+ N_SCAN_COMPLETE = 0x84,
/* IBSS/AP commands */
- BEACON_NOTIFICATION = 0x90,
- REPLY_TX_BEACON = 0x91,
+ N_BEACON = 0x90,
+ C_TX_BEACON = 0x91,
/* Miscellaneous commands */
- REPLY_TX_PWR_TABLE_CMD = 0x97,
+ C_TX_PWR_TBL = 0x97,
/* Bluetooth device coexistence config command */
- REPLY_BT_CONFIG = 0x9b,
+ C_BT_CONFIG = 0x9b,
/* Statistics */
- REPLY_STATISTICS_CMD = 0x9c,
- STATISTICS_NOTIFICATION = 0x9d,
+ C_STATS = 0x9c,
+ N_STATS = 0x9d,
/* RF-KILL commands and notifications */
- CARD_STATE_NOTIFICATION = 0xa1,
+ N_CARD_STATE = 0xa1,
/* Missed beacons notification */
- MISSED_BEACONS_NOTIFICATION = 0xa2,
+ N_MISSED_BEACONS = 0xa2,
- REPLY_CT_KILL_CONFIG_CMD = 0xa4,
- SENSITIVITY_CMD = 0xa8,
- REPLY_PHY_CALIBRATION_CMD = 0xb0,
- REPLY_RX_PHY_CMD = 0xc0,
- REPLY_RX_MPDU_CMD = 0xc1,
- REPLY_RX = 0xc3,
- REPLY_COMPRESSED_BA = 0xc5,
+ C_CT_KILL_CONFIG = 0xa4,
+ C_SENSITIVITY = 0xa8,
+ C_PHY_CALIBRATION = 0xb0,
+ N_RX_PHY = 0xc0,
+ N_RX_MPDU = 0xc1,
+ N_RX = 0xc3,
+ N_COMPRESSED_BA = 0xc5,
- REPLY_MAX = 0xff
+ IL_CN_MAX = 0xff
};
/******************************************************************************
@@ -163,25 +159,25 @@ enum {
*
*****************************************************************************/
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_TO_IDX(s) ((s) & 0xff)
+#define IDX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
/**
- * struct iwl_cmd_header
+ * struct il_cmd_header
*
* This header format appears in the beginning of each command sent from the
* driver, and each response/notification received from uCode.
*/
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+struct il_cmd_header {
+ u8 cmd; /* Command ID: C_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
* There is one exception: uCode sets bit 15 when it originates
* the response/notification, i.e. when the response/notification
* is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_3945_RX when it sends a received frame
+ * example, uCode issues N_3945_RX when it sends a received frame
* to the driver; it is not a direct response to any driver command.
*
* The Linux driver uses the following format:
*
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13 reserved
- * 14 huge - driver sets this to indicate command is in the
- * 'huge' storage at the end of the command buffers
- * 15 unsolicited RX or uCode-originated notification
- */
+ * 0:7 tfd idx - position within TX queue
+ * 8:12 TX queue id
+ * 13 reserved
+ * 14 huge - driver sets this to indicate command is in the
+ * 'huge' storage at the end of the command buffers
+ * 15 unsolicited RX or uCode-originated notification
+ */
__le16 sequence;
/* command or response/notification data follows immediately */
u8 data[0];
} __packed;
-
/**
- * struct iwl3945_tx_power
+ * struct il3945_tx_power
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
*
* Each entry contains two values:
* 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
* 2) Radio gain. This sets the analog gain of the radio Tx path.
* It is a coarser setting, and behaves in a logarithmic (dB) fashion.
*
- * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
*/
-struct iwl3945_tx_power {
+struct il3945_tx_power {
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
} __packed;
/**
- * struct iwl3945_power_per_rate
+ * struct il3945_power_per_rate
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl3945_power_per_rate {
+struct il3945_power_per_rate {
u8 rate; /* plcp */
- struct iwl3945_tx_power tpc;
+ struct il3945_tx_power tpc;
u8 reserved;
} __packed;
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
* iwl4965 rate_n_flags bit fields
*
* rate_n_flags format is used in following iwl4965 commands:
- * REPLY_RX (response only)
- * REPLY_RX_MPDU (response only)
- * REPLY_TX (both command and response)
- * REPLY_TX_LINK_QUALITY_CMD
+ * N_RX (response only)
+ * N_RX_MPDU (response only)
+ * C_TX (both command and response)
+ * C_TX_LINK_QUALITY_CMD
*
* High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
* 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
#define RATE_ANT_NUM 3
-#define POWER_TABLE_NUM_ENTRIES 33
-#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
-#define POWER_TABLE_CCK_ENTRY 32
+#define POWER_TBL_NUM_ENTRIES 33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
+#define POWER_TBL_CCK_ENTRY 32
-#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
-#define IWL_PWR_CCK_ENTRIES 2
+#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
+#define IL_PWR_CCK_ENTRIES 2
/**
- * union iwl4965_tx_power_dual_stream
+ * union il4965_tx_power_dual_stream
*
- * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
* Use __le32 version (struct tx_power_dual_stream) when building command.
*
* Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
* For MIMO rates, one value may be different from the other,
* in order to balance the Tx output between the two transmitters.
*
- * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ * See more details in doc for TXPOWER in 4965.h.
*/
-union iwl4965_tx_power_dual_stream {
+union il4965_tx_power_dual_stream {
struct {
u8 radio_tx_gain[2];
u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
/**
* struct tx_power_dual_stream
*
- * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*
- * Same format as iwl_tx_power_dual_stream, but __le32
+ * Same format as il_tx_power_dual_stream, but __le32
*/
struct tx_power_dual_stream {
__le32 dw;
} __packed;
/**
- * struct iwl4965_tx_power_db
+ * struct il4965_tx_power_db
*
- * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl4965_tx_power_db {
- struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+struct il4965_tx_power_db {
+ struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
} __packed;
/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
#define INITIALIZE_SUBTYPE (9)
/*
- * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "initialize alive" notification once the initialization
* uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
* 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
* for each of 5 frequency ranges.
*/
-struct iwl_init_alive_resp {
+struct il_init_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
* 2 Tx chains */
} __packed;
-
/**
- * REPLY_ALIVE = 0x1 (response only, not a command)
+ * N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "alive" notification once the runtime image is ready
* to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
* __le32 log_size; log capacity (in number of entries)
* __le32 type; (1) timestamp with each entry, (0) no timestamp
* __le32 wraps; # times uCode has wrapped to top of circular buffer
- * __le32 write_index; next circular buffer entry that uCode would fill
+ * __le32 write_idx; next circular buffer entry that uCode would fill
*
* The header is followed by the circular buffer of log entries. Entries
* with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
*/
-struct iwl_alive_resp {
+struct il_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
u8 sw_rev[8];
u8 ver_type;
- u8 ver_subtype; /* not "9" for runtime alive */
+ u8 ver_subtype; /* not "9" for runtime alive */
__le16 reserved2;
__le32 log_event_table_ptr; /* SRAM address for event log */
__le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
} __packed;
/*
- * REPLY_ERROR = 0x2 (response only, not a command)
+ * N_ERROR = 0x2 (response only, not a command)
*/
-struct iwl_error_resp {
+struct il_error_resp {
__le32 error_type;
u8 cmd_id;
u8 reserved1;
@@ -554,7 +548,6 @@ enum {
RXON_DEV_TYPE_SNIFFER = 6,
};
-
#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
* (according to ON_AIR deassertion) */
#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
-
/* HT flags */
#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
/**
- * REPLY_RXON = 0x10 (command, has simple generic response)
+ * C_RXON = 0x10 (command, has simple generic response)
*
* RXON tunes the radio tuner to a service channel, and sets up a number
* of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
* channel.
*
* NOTE: All RXONs wipe clean the internal txpower table. Driver must
- * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
* regardless of whether RXON_FILTER_ASSOC_MSK is set.
*/
-struct iwl3945_rxon_cmd {
+struct il3945_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
__le16 reserved5;
} __packed;
-struct iwl4965_rxon_cmd {
+struct il4965_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
/* Create a common rxon cmd which will be typecast into the 3945 or 4965
* specific rxon cmd, depending on where it is called from.
*/
-struct iwl_legacy_rxon_cmd {
+struct il_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
u8 reserved5;
} __packed;
-
/*
- * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
*/
-struct iwl3945_rxon_assoc_cmd {
+struct il3945_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-struct iwl4965_rxon_assoc_cmd {
+struct il4965_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-#define IWL_CONN_MAX_LISTEN_INTERVAL 10
-#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
+#define IL_CONN_MAX_LISTEN_INTERVAL 10
+#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
- * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
*/
-struct iwl_rxon_time_cmd {
+struct il_rxon_time_cmd {
__le64 timestamp;
__le16 beacon_interval;
- __le16 atim_window;
+ __le16 atim_win;
__le32 beacon_init_val;
__le16 listen_interval;
u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
} __packed;
/*
- * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
*/
-struct iwl3945_channel_switch_cmd {
+struct il3945_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_channel_switch_cmd {
+struct il4965_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
/*
- * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
*/
-struct iwl_csa_notification {
+struct il_csa_notification {
__le16 band;
__le16 channel;
__le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
*****************************************************************************/
/**
- * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
- * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
*
- * @cw_min: Contention window, start value in numbers of slots.
+ * @cw_min: Contention win, start value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x0f.
- * @cw_max: Contention window, max value in numbers of slots.
+ * @cw_max: Contention win, max value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x3f.
* @aifsn: Number of slots in Arbitration Interframe Space (before
* performing random backoff timing prior to Tx). Device default 1.
* @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
*
- * Device will automatically increase contention window by (2*CW) + 1 for each
+ * Device will automatically increase contention win by (2*CW) + 1 for each
* transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
* value, to cap the CW value.
*/
-struct iwl_ac_qos {
+struct il_ac_qos {
__le16 cw_min;
__le16 cw_max;
u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
#define AC_NUM 4
/*
- * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
*
* This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
* 0: Background, 1: Best Effort, 2: Video, 3: Voice.
*/
-struct iwl_qosparam_cmd {
+struct il_qosparam_cmd {
__le32 qos_flags;
- struct iwl_ac_qos ac[AC_NUM];
+ struct il_ac_qos ac[AC_NUM];
} __packed;
/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
*/
/* Special, dedicated locations within device's station table */
-#define IWL_AP_ID 0
-#define IWL_STA_ID 2
-#define IWL3945_BROADCAST_ID 24
-#define IWL3945_STATION_COUNT 25
-#define IWL4965_BROADCAST_ID 31
-#define IWL4965_STATION_COUNT 32
+#define IL_AP_ID 0
+#define IL_STA_ID 2
+#define IL3945_BROADCAST_ID 24
+#define IL3945_STATION_COUNT 25
+#define IL4965_BROADCAST_ID 31
+#define IL4965_STATION_COUNT 32
-#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
-#define IWL_INVALID_STATION 255
+#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
+#define IL_INVALID_STATION 255
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
#define STA_MODIFY_DELBA_TID_MSK 0x10
#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
-/* Receiver address (actually, Rx station's index into station table),
+/* Receiver address (actually, Rx station's idx into station table),
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-struct iwl4965_keyinfo {
+struct il4965_keyinfo {
__le16 key_flags;
u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
/**
* struct sta_id_modify
* @addr[ETH_ALEN]: station's MAC address
- * @sta_id: index of station in uCode's station table
+ * @sta_id: idx of station in uCode's station table
* @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
*
- * Driver selects unused table index when adding new station,
- * or the index to a pre-existing station entry when modifying that station.
- * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
*
* modify_mask flags select which parameters to modify vs. leave alone.
*/
@@ -936,15 +927,15 @@ struct sta_id_modify {
} __packed;
/*
- * REPLY_ADD_STA = 0x18 (command)
+ * C_ADD_STA = 0x18 (command)
*
* The device contains an internal table of per-station information,
* with info on security keys, aggregation parameters, and Tx rates for
* initial Tx attempt and any retries (4965 devices uses
- * REPLY_TX_LINK_QUALITY_CMD,
- * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
*
- * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * C_ADD_STA sets up the table entry for one station, either creating
* a new entry, or modifying a pre-existing one.
*
* NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
* their own txpower/rate setup data).
*
* When getting started on a new channel, driver must set up the
- * IWL_BROADCAST_ID entry (last entry in the table). For a client
+ * IL_BROADCAST_ID entry (last entry in the table). For a client
* station in a BSS, once an AP is selected, driver sets up the AP STA
- * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
+ * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
* are all that are needed for a BSS client station. If the device is
* used as AP, or in an IBSS network, driver must set up station table
- * entries for all STAs in network, starting with index IWL_STA_ID.
+ * entries for all STAs in network, starting with idx IL_STA_ID.
*/
-struct iwl3945_addsta_cmd {
+struct il3945_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
__le16 add_immediate_ba_ssn;
} __packed;
-struct iwl4965_addsta_cmd {
+struct il4965_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 reserved1;
+ __le16 reserved1;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
} __packed;
/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
-struct iwl_legacy_addsta_cmd {
+struct il_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 rate_n_flags; /* 3945 only */
+ __le16 rate_n_flags; /* 3945 only */
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
__le16 reserved2;
} __packed;
-
#define ADD_STA_SUCCESS_MSK 0x1
-#define ADD_STA_NO_ROOM_IN_TABLE 0x2
+#define ADD_STA_NO_ROOM_IN_TBL 0x2
#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
/*
- * REPLY_ADD_STA = 0x18 (response)
+ * C_ADD_STA = 0x18 (response)
*/
-struct iwl_add_sta_resp {
- u8 status; /* ADD_STA_* */
+struct il_add_sta_resp {
+ u8 status; /* ADD_STA_* */
} __packed;
#define REM_STA_SUCCESS_MSK 0x1
/*
- * REPLY_REM_STA = 0x19 (response)
+ * C_REM_STA = 0x19 (response)
*/
-struct iwl_rem_sta_resp {
+struct il_rem_sta_resp {
u8 status;
} __packed;
/*
- * REPLY_REM_STA = 0x19 (command)
+ * C_REM_STA = 0x19 (command)
*/
-struct iwl_rem_sta_cmd {
- u8 num_sta; /* number of removed stations */
+struct il_rem_sta_cmd {
+ u8 num_sta; /* number of removed stations */
u8 reserved[3];
- u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+ u8 addr[ETH_ALEN]; /* MAC addr of the first station */
u8 reserved2[2];
} __packed;
-#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
-#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
-#define IWL_DROP_SINGLE 0
-#define IWL_DROP_SELECTED 1
-#define IWL_DROP_ALL 2
+#define IL_DROP_SINGLE 0
+#define IL_DROP_SELECTED 1
+#define IL_DROP_ALL 2
/*
* REPLY_WEP_KEY = 0x20
*/
-struct iwl_wep_key {
- u8 key_index;
+struct il_wep_key {
+ u8 key_idx;
u8 key_offset;
u8 reserved1[2];
u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
u8 key[16];
} __packed;
-struct iwl_wep_cmd {
+struct il_wep_cmd {
u8 num_keys;
u8 global_key_type;
u8 flags;
u8 reserved;
- struct iwl_wep_key key[0];
+ struct il_wep_key key[0];
} __packed;
#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
-
-struct iwl3945_rx_frame_stats {
+struct il3945_rx_frame_stats {
u8 phy_count;
u8 id;
u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_hdr {
+struct il3945_rx_frame_hdr {
__le16 channel;
__le16 phy_flags;
u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_end {
+struct il3945_rx_frame_end {
__le32 status;
__le64 timestamp;
__le32 beacon_timestamp;
} __packed;
/*
- * REPLY_3945_RX = 0x1b (response only, not a command)
+ * N_3945_RX = 0x1b (response only, not a command)
*
* NOTE: DO NOT dereference from casts to this structure
* It is provided only for calculating minimum data set size.
* The actual offsets of the hdr and end are dynamic based on
* stats.phy_count
*/
-struct iwl3945_rx_frame {
- struct iwl3945_rx_frame_stats stats;
- struct iwl3945_rx_frame_hdr hdr;
- struct iwl3945_rx_frame_end end;
+struct il3945_rx_frame {
+ struct il3945_rx_frame_stats stats;
+ struct il3945_rx_frame_hdr hdr;
+ struct il3945_rx_frame_end end;
} __packed;
-#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
+#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
/* Fixed (non-configurable) rx data from phy */
-#define IWL49_RX_RES_PHY_CNT 14
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
-#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
-#define IWL49_AGC_DB_POS (7)
-struct iwl4965_rx_non_cfg_phy {
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IL49_AGC_DB_POS (7)
+struct il4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
u8 pad[0];
} __packed;
-
/*
- * REPLY_RX = 0xc3 (response only, not a command)
+ * N_RX = 0xc3 (response only, not a command)
* Used only for legacy (non 11n) frames.
*/
-struct iwl_rx_phy_res {
- u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
+struct il_rx_phy_res {
+ u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
u8 stat_id; /* configurable DSP phy data set ID */
u8 reserved1;
__le64 timestamp; /* TSF at on air rise */
- __le32 beacon_time_stamp; /* beacon at on-air rise */
+ __le32 beacon_time_stamp; /* beacon at on-air rise */
__le16 phy_flags; /* general phy flags: band, modulation, ... */
__le16 channel; /* channel number */
- u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
__le32 rate_n_flags; /* RATE_MCS_* */
__le16 byte_count; /* frame's byte-count */
__le16 frame_time; /* frame's time on the air */
} __packed;
-struct iwl_rx_mpdu_res_start {
+struct il_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
} __packed;
-
/******************************************************************************
* (5)
* Tx Commands & Responses:
*
- * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * Driver must place each C_TX command into one of the prioritized Tx
* queues in host DRAM, shared between driver and device (see comments for
* SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
* are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
* uCode handles all timing and protocol related to control frames
* (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
* handle reception of block-acks; uCode updates the host driver via
- * REPLY_COMPRESSED_BA.
+ * N_COMPRESSED_BA.
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (4965).
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
*
- * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
* This command must be executed after every RXON command, before Tx can occur.
*****************************************************************************/
-/* REPLY_TX Tx flags field */
+/* C_TX Tx flags field */
/*
* 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
/* For 4965 devices:
- * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
- * Tx command's initial_rate_index indicates first rate to try;
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ * Tx command's initial_rate_idx indicates first rate to try;
* uCode walks through table for additional Tx attempts.
* 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
* This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
* Set this for management frames, non-QOS data frames, non-unicast frames,
- * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+ * and also in Tx command embedded in C_SCAN for active scans. */
#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
/* HCCA-AP - disable duration overwriting. */
#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
-
/*
* TX command security control
*/
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
#define TKIP_ICV_LEN 4
/*
- * REPLY_TX = 0x1c (command)
+ * C_TX = 0x1c (command)
*/
-struct iwl3945_tx_cmd {
+struct il3945_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
} __packed;
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*/
-struct iwl3945_tx_resp {
+struct il3945_tx_resp {
u8 failure_rts;
u8 failure_frame;
u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
__le32 status; /* TX status */
} __packed;
-
/*
* 4965 uCode updates these Tx attempt count values in host DRAM.
* Used for managing Tx retries when expecting block-acks.
* Driver should set these fields to 0.
*/
-struct iwl_dram_scratch {
+struct il_dram_scratch {
u8 try_cnt; /* Tx attempts */
u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
__le16 reserved;
} __packed;
-struct iwl_tx_cmd {
+struct il_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
/* uCode may modify this field of the Tx command (in host DRAM!).
* Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct iwl_dram_scratch scratch;
+ struct il_dram_scratch scratch;
/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
__le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
u8 sec_ctl; /* TX_CMD_SEC_* */
/*
- * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
* Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
* data frames, this field may be used to selectively reduce initial
* rate (via non-0 value) for special frames (e.g. management), while
* still supporting rate scaling for all frames.
*/
- u8 initial_rate_index;
+ u8 initial_rate_idx;
u8 reserved;
u8 key[16];
__le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
};
enum {
- TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
+ TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
TX_STATUS_DELAY_MSK = 0x00000040,
TX_STATUS_ABORT_MSK = 0x00000080,
TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
- TX_RESERVED = 0x00780000, /* bits 19:22 */
+ TX_RESERVED = 0x00780000, /* bits 19:22 */
TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
@@ -1671,7 +1656,7 @@ enum {
#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*
* This response may be in one of two slightly different formats, indicated
* by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
__le16 sequence;
} __packed;
-struct iwl4965_tx_resp {
+struct il4965_tx_resp {
u8 frame_count; /* 1 no aggregation, >1 aggregation */
u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ struct agg_tx_status agg_status[0]; /* for each agg frame */
} u;
} __packed;
/*
- * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
*
* Reports Block-Acknowledge from recipient station
*/
-struct iwl_compressed_ba_resp {
+struct il_compressed_ba_resp {
__le32 sta_addr_lo32;
__le16 sta_addr_hi16;
__le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
} __packed;
/*
- * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
*
- * See details under "TXPOWER" in iwl-4965-hw.h.
+ * See details under "TXPOWER" in 4965.h.
*/
-struct iwl3945_txpowertable_cmd {
+struct il3945_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_txpowertable_cmd {
+struct il4965_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
-
/**
- * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
*
- * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
*
* NOTE: The table of rates passed to the uCode via the
* RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
*
* For example, if you set 9MB (PLCP 0x0f) as the first
* rate in the rate table, the bit mask for that rate
- * when passed through ofdm_basic_rates on the REPLY_RXON
+ * when passed through ofdm_basic_rates on the C_RXON
* command would be bit 0 (1 << 0)
*/
-struct iwl3945_rate_scaling_info {
+struct il3945_rate_scaling_info {
__le16 rate_n_flags;
u8 try_cnt;
- u8 next_rate_index;
+ u8 next_rate_idx;
} __packed;
-struct iwl3945_rate_scaling_cmd {
+struct il3945_rate_scaling_cmd {
u8 table_id;
u8 reserved[3];
- struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+ struct il3945_rate_scaling_info table[IL_MAX_RATES];
} __packed;
-
/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
#define LINK_QUAL_ANT_B_MSK (1 << 1)
#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
-
/**
- * struct iwl_link_qual_general_params
+ * struct il_link_qual_general_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_general_params {
+struct il_link_qual_general_params {
u8 flags;
- /* No entries at or above this (driver chosen) index contain MIMO */
+ /* No entries at or above this (driver chosen) idx contain MIMO */
u8 mimo_delimiter;
/* Best single antenna to use for single stream (legacy, SISO). */
u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
/* Best antennas to use for MIMO (unused for 4965, assumes both). */
- u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
+ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
/*
* If driver needs to use different initial rates for different
* EDCA QOS access categories (as implemented by tx fifos 0-3),
- * this table will set that up, by indicating the indexes in the
+ * this table will set that up, by indicating the idxes in the
* rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
* Otherwise, driver should set all entries to 0.
*
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
* 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
* TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
*/
- u8 start_rate_index[LINK_QUAL_AC_NUM];
+ u8 start_rate_idx[LINK_QUAL_AC_NUM];
} __packed;
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
/**
- * struct iwl_link_qual_agg_params
+ * struct il_link_qual_agg_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_agg_params {
+struct il_link_qual_agg_params {
/*
*Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
} __packed;
/*
- * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
*
* Each station in the 4965 device's internal station table has its own table
* of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
* one station.
*
* NOTE: Station must already be in 4965 device's station table.
- * Use REPLY_ADD_STA.
+ * Use C_ADD_STA.
*
* The rate scaling procedures described below work well. Of course, other
* procedures are possible, and may work better for particular environments.
*
*
- * FILLING THE RATE TABLE
+ * FILLING THE RATE TBL
*
* Given a particular initial rate and mode, as determined by the rate
* scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
* speculative mode as the new current active mode.
*
* Each history set contains, separately for each possible rate, data for a
- * sliding window of the 62 most recent tx attempts at that rate. The data
+ * sliding win of the 62 most recent tx attempts at that rate. The data
* includes a shifting bitmap of success(1)/failure(0), and sums of successful
* and attempted frames, from which the driver can additionally calculate a
* success ratio (success / attempted) and number of failures
- * (attempted - success), and control the size of the window (attempted).
+ * (attempted - success), and control the size of the win (attempted).
* The driver uses the bit map to remove successes from the success sum, as
- * the oldest tx attempts fall out of the window.
+ * the oldest tx attempts fall out of the win.
*
* When the 4965 device makes multiple tx attempts for a given frame, each
* attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
*
* When using block-ack (aggregation), all frames are transmitted at the same
* rate, since there is no per-attempt acknowledgment from the destination
- * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * station. The Tx response struct il_tx_resp indicates the Tx rate in
* rate_n_flags field. After receiving a block-ack, the driver can update
* history for the entire block all at once.
*
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
* good performance; higher rate is sure to have poorer success.
*
* 6) Re-evaluate the rate after each tx frame. If working with block-
- * acknowledge, history and statistics may be calculated for the entire
- * block (including prior history that fits within the history windows),
+ * acknowledge, history and stats may be calculated for the entire
+ * block (including prior history that fits within the history wins),
* before re-evaluation.
*
* FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
* legacy), and then repeat the search process.
*
*/
-struct iwl_link_quality_cmd {
+struct il_link_quality_cmd {
/* Index of destination/recipient station in uCode's station table */
u8 sta_id;
u8 reserved1;
__le16 control; /* not used */
- struct iwl_link_qual_general_params general_params;
- struct iwl_link_qual_agg_params agg_params;
+ struct il_link_qual_general_params general_params;
+ struct il_link_qual_agg_params agg_params;
/*
- * Rate info; when using rate-scaling, Tx command's initial_rate_index
- * specifies 1st Tx rate attempted, via index into this table.
+ * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+ * specifies 1st Tx rate attempted, via idx into this table.
* 4965 devices works its way through table when retrying Tx.
*/
struct {
- __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
+ __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
} rs_table[LINK_QUAL_MAX_RETRY_NUM];
__le32 reserved2;
} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
#define BT_MAX_KILL_DEF (0x5)
/*
- * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
*
* 3945 and 4965 devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
-struct iwl_bt_cmd {
+struct il_bt_cmd {
u8 flags;
u8 lead_time;
u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
__le32 kill_cts_mask;
} __packed;
-
/******************************************************************************
* (6)
* Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
RXON_FILTER_ASSOC_MSK | \
RXON_FILTER_BCON_AWARE_MSK)
-struct iwl_measure_channel {
+struct il_measure_channel {
__le32 duration; /* measurement duration in extended beacon
* format */
u8 channel; /* channel to measure */
- u8 type; /* see enum iwl_measure_type */
+ u8 type; /* see enum il_measure_type */
__le16 reserved;
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
*/
-struct iwl_spectrum_cmd {
+struct il_spectrum_cmd {
__le16 len; /* number of bytes starting from token */
u8 token; /* token id */
u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
__le32 filter_flags; /* rxon filter flags */
__le16 channel_count; /* minimum 1, maximum 10 */
__le16 reserved3;
- struct iwl_measure_channel channels[10];
+ struct il_measure_channel channels[10];
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
*/
-struct iwl_spectrum_resp {
+struct il_spectrum_resp {
u8 token;
u8 id; /* id of the prior command replaced, or 0xff */
__le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
* measurement) */
} __packed;
-enum iwl_measurement_state {
- IWL_MEASUREMENT_START = 0,
- IWL_MEASUREMENT_STOP = 1,
+enum il_measurement_state {
+ IL_MEASUREMENT_START = 0,
+ IL_MEASUREMENT_STOP = 1,
};
-enum iwl_measurement_status {
- IWL_MEASUREMENT_OK = 0,
- IWL_MEASUREMENT_CONCURRENT = 1,
- IWL_MEASUREMENT_CSA_CONFLICT = 2,
- IWL_MEASUREMENT_TGH_CONFLICT = 3,
+enum il_measurement_status {
+ IL_MEASUREMENT_OK = 0,
+ IL_MEASUREMENT_CONCURRENT = 1,
+ IL_MEASUREMENT_CSA_CONFLICT = 2,
+ IL_MEASUREMENT_TGH_CONFLICT = 3,
/* 4-5 reserved */
- IWL_MEASUREMENT_STOPPED = 6,
- IWL_MEASUREMENT_TIMEOUT = 7,
- IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+ IL_MEASUREMENT_STOPPED = 6,
+ IL_MEASUREMENT_TIMEOUT = 7,
+ IL_MEASUREMENT_PERIODIC_FAILED = 8,
};
#define NUM_ELEMENTS_IN_HISTOGRAM 8
-struct iwl_measurement_histogram {
+struct il_measurement_histogram {
__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
} __packed;
/* clear channel availability counters */
-struct iwl_measurement_cca_counters {
+struct il_measurement_cca_counters {
__le32 ofdm;
__le32 cck;
} __packed;
-enum iwl_measure_type {
- IWL_MEASURE_BASIC = (1 << 0),
- IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
- IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
- IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
- IWL_MEASURE_FRAME = (1 << 4),
+enum il_measure_type {
+ IL_MEASURE_BASIC = (1 << 0),
+ IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+ IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+ IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+ IL_MEASURE_FRAME = (1 << 4),
/* bits 5:6 are reserved */
- IWL_MEASURE_IDLE = (1 << 7),
+ IL_MEASURE_IDLE = (1 << 7),
};
/*
- * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
*/
-struct iwl_spectrum_notification {
+struct il_spectrum_notification {
u8 id; /* measurement id -- 0 or 1 */
u8 token;
- u8 channel_index; /* index in measurement channel list */
+ u8 channel_idx; /* idx in measurement channel list */
u8 state; /* 0 - start, 1 - stop */
__le32 start_time; /* lower 32-bits of TSF */
u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
u8 channel;
- u8 type; /* see enum iwl_measurement_type */
+ u8 type; /* see enum il_measurement_type */
u8 reserved1;
/* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
* valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
* unidentified */
u8 reserved2[3];
- struct iwl_measurement_histogram histogram;
+ struct il_measurement_histogram histogram;
__le32 stop_time; /* lower 32-bits of TSF */
- __le32 status; /* see iwl_measurement_status */
+ __le32 status; /* see il_measurement_status */
} __packed;
/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
*****************************************************************************/
/**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct il_powertable_cmd - Power Table Command
* @flags: See below:
*
- * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
*
* PM allow:
* bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
* '10' force xtal sleep
* '11' Illegal set
*
- * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
* ucode assume sleep over DTIM is allowed and we don't need to wake up
* for every DTIM.
*/
-#define IWL_POWER_VEC_SIZE 5
+#define IL_POWER_VEC_SIZE 5
-#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
-#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
-struct iwl3945_powertable_cmd {
+struct il3945_powertable_cmd {
__le16 flags;
u8 reserved[2];
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
} __packed;
-struct iwl_powertable_cmd {
+struct il_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds; /* 3945 reserved */
+ u8 debug_flags; /* 3945 reserved */
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
__le32 keep_alive_beacons;
} __packed;
/*
- * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
* all devices identical.
*/
-struct iwl_sleep_notification {
+struct il_sleep_notification {
u8 pm_sleep_mode;
u8 pm_wakeup_src;
__le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
/* Sleep states. all devices identical. */
enum {
- IWL_PM_NO_SLEEP = 0,
- IWL_PM_SLP_MAC = 1,
- IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
- IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
- IWL_PM_SLP_PHY = 4,
- IWL_PM_SLP_REPENT = 5,
- IWL_PM_WAKEUP_BY_TIMER = 6,
- IWL_PM_WAKEUP_BY_DRIVER = 7,
- IWL_PM_WAKEUP_BY_RFKILL = 8,
+ IL_PM_NO_SLEEP = 0,
+ IL_PM_SLP_MAC = 1,
+ IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+ IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+ IL_PM_SLP_PHY = 4,
+ IL_PM_SLP_REPENT = 5,
+ IL_PM_WAKEUP_BY_TIMER = 6,
+ IL_PM_WAKEUP_BY_DRIVER = 7,
+ IL_PM_WAKEUP_BY_RFKILL = 8,
/* 3 reserved */
- IWL_PM_NUM_OF_MODES = 12,
+ IL_PM_NUM_OF_MODES = 12,
};
/*
- * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
*/
-struct iwl_card_state_notif {
+struct il_card_state_notif {
__le32 flags;
} __packed;
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
-struct iwl_ct_kill_config {
- __le32 reserved;
- __le32 critical_temperature_M;
- __le32 critical_temperature_R;
-} __packed;
+struct il_ct_kill_config {
+ __le32 reserved;
+ __le32 critical_temperature_M;
+ __le32 critical_temperature_R;
+} __packed;
/******************************************************************************
* (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * struct il_scan_channel - entry in C_SCAN channel table
*
* One for each channel in the scan list.
* Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
* quiet_plcp_th, good_CRC_th)
*
* To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * under struct il_scan_cmd about max_out_time and quiet_time):
* 1) If using passive_dwell (i.e. passive_dwell != 0):
* active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
* 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
* passive_dwell < max_out_time
* active_dwell < max_out_time
*/
-struct iwl3945_scan_channel {
+struct il3945_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
* 5:7 reserved
*/
u8 type;
- u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
- struct iwl3945_tx_power tpc;
+ u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
+ struct il3945_tx_power tpc;
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
__le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
} __packed;
/* set number of direct probes u8 type */
-#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
-struct iwl_scan_channel {
+struct il_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
* 21:31 reserved
*/
__le32 type;
- __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+ __le16 channel; /* band is selected by il_scan_cmd "flags" field */
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
} __packed;
/* set number of direct probes __le32 type */
-#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
/**
- * struct iwl_ssid_ie - directed scan network information element
+ * struct il_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
* each channel may select different ssids from among the 20 (4) entries.
* SSID IEs get transmitted in reverse order of entry.
*/
-struct iwl_ssid_ie {
+struct il_ssid_ie {
u8 id;
u8 len;
u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH_DISABLED 0
-#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
-#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
-#define IWL_MAX_CMD_SIZE 4096
+#define IL_GOOD_CRC_TH_DISABLED 0
+#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
/*
- * REPLY_SCAN_CMD = 0x80 (command)
+ * C_SCAN = 0x80 (command)
*
* The hardware scan command is very powerful; the driver can set it up to
* maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
* Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
*
* To avoid uCode errors, see timing restrictions described under
- * struct iwl_scan_channel.
+ * struct il_scan_channel.
*/
-struct iwl3945_scan_cmd {
+struct il3945_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl3945_tx_cmd tx_cmd;
+ struct il3945_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
/*
* Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl3945_scan_channel channels[0];
+ * struct il3945_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
} __packed;
-struct iwl_scan_cmd {
+struct il_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl_tx_cmd tx_cmd;
+ struct il_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
/*
* Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl_scan_channel channels[0];
+ * struct il_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
#define ABORT_STATUS 0x2
/*
- * REPLY_SCAN_CMD = 0x80 (response)
+ * C_SCAN = 0x80 (response)
*/
-struct iwl_scanreq_notification {
+struct il_scanreq_notification {
__le32 status; /* 1: okay, 2: cannot fulfill request */
} __packed;
/*
- * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ * N_SCAN_START = 0x82 (notification only, not a command)
*/
-struct iwl_scanstart_notification {
+struct il_scanstart_notification {
__le32 tsf_low;
__le32 tsf_high;
__le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
#define SCAN_OWNER_STATUS 0x1
#define MEASURE_OWNER_STATUS 0x2
-#define IWL_PROBE_STATUS_OK 0
-#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
+#define IL_PROBE_STATUS_OK 0
+#define IL_PROBE_STATUS_TX_FAILED BIT(0)
/* error statuses combined with TX_FAILED */
-#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
-#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
+#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT BIT(2)
-#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
/*
- * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
*/
-struct iwl_scanresults_notification {
+struct il_scanresults_notification {
u8 channel;
u8 band;
u8 probe_status;
- u8 num_probe_not_sent; /* not enough time to send */
+ u8 num_probe_not_sent; /* not enough time to send */
__le32 tsf_low;
__le32 tsf_high;
- __le32 statistics[NUMBER_OF_STATISTICS];
+ __le32 stats[NUMBER_OF_STATS];
} __packed;
/*
- * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
*/
-struct iwl_scancomplete_notification {
+struct il_scancomplete_notification {
u8 scanned_channels;
u8 status;
u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
__le32 tsf_high;
} __packed;
-
/******************************************************************************
* (9)
* IBSS/AP Commands and Notifications:
*
*****************************************************************************/
-enum iwl_ibss_manager {
- IWL_NOT_IBSS_MANAGER = 0,
- IWL_IBSS_MANAGER = 1,
+enum il_ibss_manager {
+ IL_NOT_IBSS_MANAGER = 0,
+ IL_IBSS_MANAGER = 1,
};
/*
- * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ * N_BEACON = 0x90 (notification only, not a command)
*/
-struct iwl3945_beacon_notif {
- struct iwl3945_tx_resp beacon_notify_hdr;
+struct il3945_beacon_notif {
+ struct il3945_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
-struct iwl4965_beacon_notif {
- struct iwl4965_tx_resp beacon_notify_hdr;
+struct il4965_beacon_notif {
+ struct il4965_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
/*
- * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
*/
-struct iwl3945_tx_beacon_cmd {
- struct iwl3945_tx_cmd tx;
+struct il3945_tx_beacon_cmd {
+ struct il3945_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
} __packed;
-struct iwl_tx_beacon_cmd {
- struct iwl_tx_cmd tx;
+struct il_tx_beacon_cmd {
+ struct il_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
*
*****************************************************************************/
-#define IWL_TEMP_CONVERT 260
+#define IL_TEMP_CONVERT 260
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
} failed;
} __packed;
-/* statistics command response */
+/* stats command response */
-struct iwl39_statistics_rx_phy {
+struct iwl39_stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
__le32 sent_cts_cnt;
} __packed;
-struct iwl39_statistics_rx_non_phy {
+struct iwl39_stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
* our serving channel */
} __packed;
-struct iwl39_statistics_rx {
- struct iwl39_statistics_rx_phy ofdm;
- struct iwl39_statistics_rx_phy cck;
- struct iwl39_statistics_rx_non_phy general;
+struct iwl39_stats_rx {
+ struct iwl39_stats_rx_phy ofdm;
+ struct iwl39_stats_rx_phy cck;
+ struct iwl39_stats_rx_non_phy general;
} __packed;
-struct iwl39_statistics_tx {
+struct iwl39_stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
__le32 actual_ack_cnt;
} __packed;
-struct statistics_dbg {
+struct stats_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
__le32 reserved[3];
} __packed;
-struct iwl39_statistics_div {
+struct iwl39_stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
} __packed;
-struct iwl39_statistics_general {
+struct iwl39_stats_general {
__le32 temperature;
- struct statistics_dbg dbg;
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct iwl39_statistics_div div;
+ struct iwl39_stats_div div;
} __packed;
-struct statistics_rx_phy {
+struct stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
__le32 reserved3;
} __packed;
-struct statistics_rx_ht_phy {
+struct stats_rx_ht_phy {
__le32 plcp_err;
__le32 overrun_err;
__le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
-struct statistics_rx_non_phy {
+struct stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
__le32 num_missed_bcon; /* number of missed beacons */
__le32 adc_rx_saturation_time; /* count in 0.8us units the time the
* ADC was in saturation */
- __le32 ina_detection_search_time;/* total time (in 0.8us) searched
- * for INA */
+ __le32 ina_detection_search_time; /* total time (in 0.8us) searched
+ * for INA */
__le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
__le32 interference_data_flag; /* flag for interference data
* availability. 1 when data is
* available. */
- __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 channel_load; /* counts RX Enable time in uSec */
__le32 dsp_false_alarms; /* DSP false alarm (both OFDM
* and CCK) counter */
__le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
__le32 beacon_energy_c;
} __packed;
-struct statistics_rx {
- struct statistics_rx_phy ofdm;
- struct statistics_rx_phy cck;
- struct statistics_rx_non_phy general;
- struct statistics_rx_ht_phy ofdm_ht;
+struct stats_rx {
+ struct stats_rx_phy ofdm;
+ struct stats_rx_phy cck;
+ struct stats_rx_non_phy general;
+ struct stats_rx_ht_phy ofdm_ht;
} __packed;
/**
- * struct statistics_tx_power - current tx power
+ * struct stats_tx_power - current tx power
*
* @ant_a: current tx power on chain a in 1/2 dB step
* @ant_b: current tx power on chain b in 1/2 dB step
* @ant_c: current tx power on chain c in 1/2 dB step
*/
-struct statistics_tx_power {
+struct stats_tx_power {
u8 ant_a;
u8 ant_b;
u8 ant_c;
u8 reserved;
} __packed;
-struct statistics_tx_non_phy_agg {
+struct stats_tx_non_phy_agg {
__le32 ba_timeout;
__le32 ba_reschedule_frames;
__le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
__le32 rx_ba_rsp_cnt;
} __packed;
-struct statistics_tx {
+struct stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
__le32 burst_abort_missing_next_frame_cnt;
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
- struct statistics_tx_non_phy_agg agg;
+ struct stats_tx_non_phy_agg agg;
__le32 reserved1;
} __packed;
-
-struct statistics_div {
+struct stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
__le32 reserved2;
} __packed;
-struct statistics_general_common {
- __le32 temperature; /* radio temperature */
- struct statistics_dbg dbg;
+struct stats_general_common {
+ __le32 temperature; /* radio temperature */
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct statistics_div div;
+ struct stats_div div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
__le32 num_of_sos_states;
} __packed;
-struct statistics_general {
- struct statistics_general_common common;
+struct stats_general {
+ struct stats_general_common common;
__le32 reserved2;
__le32 reserved3;
} __packed;
-#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
+#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
/*
- * REPLY_STATISTICS_CMD = 0x9c,
+ * C_STATS = 0x9c,
* all devices identical.
*
- * This command triggers an immediate response containing uCode statistics.
- * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
*
* If the CLEAR_STATS configuration flag is set, uCode will clear its
- * internal copy of the statistics (counters) after issuing the response.
- * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
*
* If the DISABLE_NOTIF configuration flag is set, uCode will not issue
- * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
- * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ * N_STATSs after received beacons (see below). This flag
+ * does not affect the response to the C_STATS 0x9c itself.
*/
-#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
-#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
-struct iwl_statistics_cmd {
- __le32 configuration_flags; /* IWL_STATS_CONF_* */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
+struct il_stats_cmd {
+ __le32 configuration_flags; /* IL_STATS_CONF_* */
} __packed;
/*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ * N_STATS = 0x9d (notification only, not a command)
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
+ * C_STATS 0x9c, above.
*
* Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * cleared when changing channels or when driver issues C_STATS
* 0x9c with CLEAR_STATS bit set (see above).
*
- * uCode also issues this notification during scans. uCode clears statistics
- * appropriately so that each notification contains statistics for only the
+ * uCode also issues this notification during scans. uCode clears stats
+ * appropriately so that each notification contains stats for only the
* one channel that has just been scanned.
*/
-#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
-#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
+#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
-struct iwl3945_notif_statistics {
+struct il3945_notif_stats {
__le32 flag;
- struct iwl39_statistics_rx rx;
- struct iwl39_statistics_tx tx;
- struct iwl39_statistics_general general;
+ struct iwl39_stats_rx rx;
+ struct iwl39_stats_tx tx;
+ struct iwl39_stats_general general;
} __packed;
-struct iwl_notif_statistics {
+struct il_notif_stats {
__le32 flag;
- struct statistics_rx rx;
- struct statistics_tx tx;
- struct statistics_general general;
+ struct stats_rx rx;
+ struct stats_tx tx;
+ struct stats_general general;
} __packed;
/*
- * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
*
- * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
* in regardless of how many missed beacons, which mean when driver receive the
* notification, inside the command, it can find all the beacons information
* which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
*
*/
-#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
-#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
-#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
+#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
-struct iwl_missed_beacon_notif {
+struct il_missed_beacon_notif {
__le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
} __packed;
-
/******************************************************************************
* (11)
* Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
*****************************************************************************/
/**
- * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that
* is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
* time listening, not transmitting). Driver must adjust sensitivity so that
* the ratio of actual false alarms to actual Rx time falls within this range.
*
- * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * While associated, uCode delivers N_STATSs after each
* received beacon. These provide information to the driver to analyze the
- * sensitivity. Don't analyze statistics that come in from scanning, or any
- * other non-associated-network source. Pertinent statistics include:
+ * sensitivity. Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source. Pertinent stats include:
*
- * From "general" statistics (struct statistics_rx_non_phy):
+ * From "general" stats (struct stats_rx_non_phy):
*
* (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
* Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
* uSecs of actual Rx time during beacon period (varies according to
* how much time was spent transmitting).
*
- * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
*
* false_alarm_cnt
* Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
*
* Total number of false alarms = false_alarms + plcp_errs
*
- * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
* (notice that the start points for OFDM are at or close to settings for
* maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
*
* If actual rate of OFDM false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
* Reset this to 0 at the first beacon period that falls within the
* "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
*
- * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
* (notice that the start points for CCK are at maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
- * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
+ * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
*
* If actual rate of CCK false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), method for reducing
* sensitivity is:
*
- * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* up to max 400.
*
- * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
* sensitivity has been reduced a significant amount; bring it up to
* a moderate 161. Otherwise, *add* 3, up to max 200.
*
- * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
* sensitivity has been reduced only a moderate or small amount;
- * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
* down to min 0. Otherwise (if gain has been significantly reduced),
- * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
*
* b) Save a snapshot of the "silence reference".
*
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
*
* Method for increasing sensitivity:
*
- * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
* down to min 125.
*
- * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* down to min 200.
*
- * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
*
* If actual rate of CCK false alarms (+ plcp_errors) is within good range
* (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
*
* 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
* give some extra margin to energy threshold by *subtracting* 8
- * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ * from value in HD_MIN_ENERGY_CCK_DET_IDX.
*
* For all cases (too few, too many, good range), make sure that the CCK
* detection threshold (energy) is below the energy level for robust
* detection over the past 10 beacon periods, the "Max cck energy".
* Lower values mean higher energy; this means making sure that the value
- * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
*
*/
/*
- * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
- */
-#define HD_TABLE_SIZE (11) /* number of entries */
-#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
-#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
-#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
-#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
-#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
-
-/* Control field in struct iwl_sensitivity_cmd */
-#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
-#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
+ */
+#define HD_TBL_SIZE (11) /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX (10)
+
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
/**
- * struct iwl_sensitivity_cmd
+ * struct il_sensitivity_cmd
* @control: (1) updates working table, (0) updates default table
- * @table: energy threshold values, use HD_* as index into table
+ * @table: energy threshold values, use HD_* as idx into table
*
* Always use "1" in "control" to update uCode's working table and DSP.
*/
-struct iwl_sensitivity_cmd {
- __le16 control; /* always use "1" */
- __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
+struct il_sensitivity_cmd {
+ __le16 control; /* always use "1" */
+ __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
} __packed;
-
/**
- * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
*
* This command sets the relative gains of 4965 device's 3 radio receiver chains.
*
* After the first association, driver should accumulate signal and noise
- * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
- * beacons from the associated network (don't collect statistics that come
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
* in from scanning, or any other non-network source).
*
* DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
* Driver should determine which antennas are actually connected, by comparing
* average beacon signal levels for the 3 Rx chains. Accumulate (add) the
* following values over 20 beacons, one accumulator for each of the chains
- * a/b/c, from struct statistics_rx_non_phy:
+ * a/b/c, from struct stats_rx_non_phy:
*
* beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
* to antennas, see above) for gain, by comparing the average signal levels
* detected during the silence after each beacon (background noise).
* Accumulate (add) the following values over 20 beacons, one accumulator for
- * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
*
* beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
* (accum_noise[i] - accum_noise[reference]) / 30
*
* The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
- * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
* driver should limit the difference results to a range of 0-3 (0-4.5 dB),
* and set bit 2 to indicate "reduce gain". The value for the reference
* (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
/* Phy calibration command for series */
/* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
- IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+ IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+ IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
};
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-struct iwl_calib_hdr {
+struct il_calib_hdr {
u8 op_code;
u8 first_group;
u8 groups_num;
u8 data_valid;
} __packed;
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
- struct iwl_calib_hdr hdr;
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+ struct il_calib_hdr hdr;
s8 diff_gain_a; /* see above */
s8 diff_gain_b;
s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
/*
* LEDs Command & Response
- * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ * C_LEDS = 0x48 (command, has simple generic response)
*
* For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
* this command turns it on or off, or sets up a periodic blinking cycle.
*/
-struct iwl_led_cmd {
+struct il_led_cmd {
__le32 interval; /* "interval" in uSec */
u8 id; /* 1: Activity, 2: Link, 3: Tech */
u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
u8 reserved;
} __packed;
-
/******************************************************************************
* (13)
* Union of all expected notifications/responses:
*
*****************************************************************************/
-struct iwl_rx_packet {
+#define IL_RX_FRAME_SIZE_MSK 0x00003fff
+
+struct il_rx_pkt {
/*
* The first 4 bytes of the RX frame header contain both the RX frame
* size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
* 13-00: RX frame size
*/
__le32 len_n_flags;
- struct iwl_cmd_header hdr;
+ struct il_cmd_header hdr;
union {
- struct iwl3945_rx_frame rx_frame;
- struct iwl3945_tx_resp tx_resp;
- struct iwl3945_beacon_notif beacon_status;
-
- struct iwl_alive_resp alive_frame;
- struct iwl_spectrum_notification spectrum_notif;
- struct iwl_csa_notification csa_notif;
- struct iwl_error_resp err_resp;
- struct iwl_card_state_notif card_state_notif;
- struct iwl_add_sta_resp add_sta;
- struct iwl_rem_sta_resp rem_sta;
- struct iwl_sleep_notification sleep_notif;
- struct iwl_spectrum_resp spectrum;
- struct iwl_notif_statistics stats;
- struct iwl_compressed_ba_resp compressed_ba;
- struct iwl_missed_beacon_notif missed_beacon;
+ struct il3945_rx_frame rx_frame;
+ struct il3945_tx_resp tx_resp;
+ struct il3945_beacon_notif beacon_status;
+
+ struct il_alive_resp alive_frame;
+ struct il_spectrum_notification spectrum_notif;
+ struct il_csa_notification csa_notif;
+ struct il_error_resp err_resp;
+ struct il_card_state_notif card_state_notif;
+ struct il_add_sta_resp add_sta;
+ struct il_rem_sta_resp rem_sta;
+ struct il_sleep_notification sleep_notif;
+ struct il_spectrum_resp spectrum;
+ struct il_notif_stats stats;
+ struct il_compressed_ba_resp compressed_ba;
+ struct il_missed_beacon_notif missed_beacon;
__le32 status;
u8 raw[0];
} u;
} __packed;
-#endif /* __iwl_legacy_commands_h__ */
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 000000000000..36454d0bbeed
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5867 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+ const int interval = 10; /* microseconds */
+ int t = 0;
+
+ do {
+ if ((_il_rd(il, addr) & mask) == (bits & mask))
+ return t;
+ udelay(interval);
+ t += interval;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(_il_poll_bit);
+
+void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_set_bit);
+
+void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_clear_bit);
+
+int
+_il_grab_nic_access(struct il_priv *il)
+{
+ int ret;
+ u32 val;
+
+ /* this bit wakes up the NIC */
+ _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (ret < 0) {
+ val = _il_rd(il, CSR_GP_CNTRL);
+ IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(_il_grab_nic_access);
+
+int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+ const int interval = 10; /* microseconds */
+ int t = 0;
+
+ do {
+ if ((il_rd(il, addr) & mask) == mask)
+ return t;
+ udelay(interval);
+ t += interval;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(il_poll_bit);
+
+u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return val;
+}
+EXPORT_SYMBOL(il_rd_prph);
+
+void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr_prph(il, addr, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_wr_prph);
+
+u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+
+ _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+ rmb();
+ value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+EXPORT_SYMBOL(il_read_targ_mem);
+
+void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _il_wr(il, HBUS_TARG_MEM_WDAT, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_write_targ_mem);
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IL_CMD(N_ALIVE);
+ IL_CMD(N_ERROR);
+ IL_CMD(C_RXON);
+ IL_CMD(C_RXON_ASSOC);
+ IL_CMD(C_QOS_PARAM);
+ IL_CMD(C_RXON_TIMING);
+ IL_CMD(C_ADD_STA);
+ IL_CMD(C_REM_STA);
+ IL_CMD(C_WEPKEY);
+ IL_CMD(N_3945_RX);
+ IL_CMD(C_TX);
+ IL_CMD(C_RATE_SCALE);
+ IL_CMD(C_LEDS);
+ IL_CMD(C_TX_LINK_QUALITY_CMD);
+ IL_CMD(C_CHANNEL_SWITCH);
+ IL_CMD(N_CHANNEL_SWITCH);
+ IL_CMD(C_SPECTRUM_MEASUREMENT);
+ IL_CMD(N_SPECTRUM_MEASUREMENT);
+ IL_CMD(C_POWER_TBL);
+ IL_CMD(N_PM_SLEEP);
+ IL_CMD(N_PM_DEBUG_STATS);
+ IL_CMD(C_SCAN);
+ IL_CMD(C_SCAN_ABORT);
+ IL_CMD(N_SCAN_START);
+ IL_CMD(N_SCAN_RESULTS);
+ IL_CMD(N_SCAN_COMPLETE);
+ IL_CMD(N_BEACON);
+ IL_CMD(C_TX_BEACON);
+ IL_CMD(C_TX_PWR_TBL);
+ IL_CMD(C_BT_CONFIG);
+ IL_CMD(C_STATS);
+ IL_CMD(N_STATS);
+ IL_CMD(N_CARD_STATE);
+ IL_CMD(N_MISSED_BEACONS);
+ IL_CMD(C_CT_KILL_CONFIG);
+ IL_CMD(C_SENSITIVITY);
+ IL_CMD(C_PHY_CALIBRATION);
+ IL_CMD(N_RX_PHY);
+ IL_CMD(N_RX_MPDU);
+ IL_CMD(N_RX);
+ IL_CMD(N_COMPRESSED_BA);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("back from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+ pkt->hdr.flags);
+ }
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int ret;
+
+ BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = il_generic_cmd_callback;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EBUSY;
+
+ ret = il_enqueue_hcmd(il, cmd);
+ if (ret < 0) {
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ BUG_ON(cmd->flags & CMD_ASYNC);
+
+ /* A synchronous command can not have a callback set. */
+ BUG_ON(cmd->callback);
+
+ D_INFO("Attempting to send sync command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ set_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Setting HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ cmd_idx = il_enqueue_hcmd(il, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ goto out;
+ }
+
+ ret = wait_event_timeout(il->wait_command_queue,
+ !test_bit(S_HCMD_ACTIVE, &il->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+ IL_ERR("Error sending %s: time out after %dms.\n",
+ il_get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(S_RF_KILL_HW, &il->status)) {
+ IL_ERR("Command %s aborted: RF KILL Switch\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(S_FW_ERROR, &il->status)) {
+ IL_ERR("Command %s failed: FW Error\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IL_ERR("Error: Response NULL in '%s'\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ ret = 0;
+ goto out;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ il_free_pages(il, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return il_send_cmd_async(il, cmd);
+
+ return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt))
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ cmd.flags |= CMD_ASYNC;
+ cmd.callback = callback;
+
+ return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode,
+ "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput OFF time(ms) ON time (ms)
+ * >300 25 25
+ * >200 to 300 40 40
+ * >100 to 200 55 55
+ * >70 to 100 65 65
+ * >50 to 70 75 75
+ * >20 to 50 85 85
+ * >10 to 20 95 95
+ * >5 to 10 110 110
+ * >1 to 5 130 130
+ * >0 to 1 167 167
+ * <=0 SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+ {.throughput = 0, .blink_time = 334},
+ {.throughput = 1 * 1024 - 1, .blink_time = 260},
+ {.throughput = 5 * 1024 - 1, .blink_time = 220},
+ {.throughput = 10 * 1024 - 1, .blink_time = 190},
+ {.throughput = 20 * 1024 - 1, .blink_time = 170},
+ {.throughput = 50 * 1024 - 1, .blink_time = 150},
+ {.throughput = 70 * 1024 - 1, .blink_time = 130},
+ {.throughput = 100 * 1024 - 1, .blink_time = 110},
+ {.throughput = 200 * 1024 - 1, .blink_time = 80},
+ {.throughput = 300 * 1024 - 1, .blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ * compensation = (100 - averageDeviation) * 64 / 100
+ * NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+ if (!compensation) {
+ IL_ERR("undefined blink compensation: "
+ "use pre-defined blinking time\n");
+ return time;
+ }
+
+ return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+ struct il_led_cmd led_cmd = {
+ .id = IL_LED_LINK,
+ .interval = IL_DEF_LED_INTRVL
+ };
+ int ret;
+
+ if (!test_bit(S_READY, &il->status))
+ return -EBUSY;
+
+ if (il->blink_on == on && il->blink_off == off)
+ return 0;
+
+ if (off == 0) {
+ /* led is SOLID_ON */
+ on = IL_LED_SOLID;
+ }
+
+ D_LED("Led blink time compensation=%u\n",
+ il->cfg->base_params->led_compensation);
+ led_cmd.on =
+ il_blink_compensation(il, on,
+ il->cfg->base_params->led_compensation);
+ led_cmd.off =
+ il_blink_compensation(il, off,
+ il->cfg->base_params->led_compensation);
+
+ ret = il->cfg->ops->led->cmd(il, &led_cmd);
+ if (!ret) {
+ il->blink_on = on;
+ il->blink_off = off;
+ }
+ return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+ unsigned long on = 0;
+
+ if (brightness > 0)
+ on = IL_LED_SOLID;
+
+ il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+ return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IL_LED_DEFAULT)
+ mode = il->cfg->led_mode;
+
+ il->led.name =
+ kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+ il->led.brightness_set = il_led_brightness_set;
+ il->led.blink_set = il_led_blink_set;
+ il->led.max_brightness = 1;
+
+ switch (mode) {
+ case IL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IL_LED_BLINK:
+ il->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(il->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ il_blink,
+ ARRAY_SIZE(il_blink));
+ break;
+ case IL_LED_RF_STATE:
+ il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+ break;
+ }
+
+ ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+ if (ret) {
+ kfree(il->led.name);
+ return;
+ }
+
+ il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+ if (!il->led_registered)
+ return;
+
+ led_classdev_unregister(&il->led);
+ kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel. We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware. This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel. There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+ u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ int ret = 0;
+
+ D_EEPROM("EEPROM signature=0x%08x\n", gp);
+ switch (gp) {
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ break;
+ default:
+ IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+ BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+ return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+ if (!il->eeprom)
+ return 0;
+ return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+ __le16 *e;
+ u32 gp = _il_rd(il, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+
+ /* allocate eeprom */
+ sz = il->cfg->base_params->eeprom_size;
+ D_EEPROM("NVM size = %d\n", sz);
+ il->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!il->eeprom) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ e = (__le16 *) il->eeprom;
+
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ ret = il_eeprom_verify_signature(il);
+ if (ret < 0) {
+ IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+ if (ret < 0) {
+ IL_ERR("Failed to acquire EEPROM semaphore.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _il_wr(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret =
+ _il_poll_bit(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IL_ERR("Time out reading EEPROM[%d]\n", addr);
+ goto done;
+ }
+ r = _il_rd(il, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+
+ D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+ il_eeprom_query16(il, EEPROM_VERSION));
+
+ ret = 0;
+done:
+ il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+
+err:
+ if (ret)
+ il_eeprom_free(il);
+ /* Reset chip to save power until we load uCode during "up". */
+ il_apm_stop(il);
+alloc_err:
+ return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+ kfree(il->eeprom);
+ il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+ int *eeprom_ch_count,
+ const struct il_eeprom_channel **eeprom_ch_info,
+ const u8 **eeprom_ch_idx)
+{
+ u32 offset =
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+ switch (eep_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_7;
+ break;
+ default:
+ BUG();
+ }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+ const struct il_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct il_channel_info *ch_info;
+
+ ch_info =
+ (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -1;
+
+ D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+ ch_info->ht40_eeprom = *eeprom_ch;
+ ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &=
+ ~clear_ht40_extension_channel;
+
+ return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+ int eeprom_ch_count = 0;
+ const u8 *eeprom_ch_idx = NULL;
+ const struct il_eeprom_channel *eeprom_ch_info = NULL;
+ int band, ch;
+ struct il_channel_info *ch_info;
+
+ if (il->channel_count) {
+ D_EEPROM("Channel map already initialized.\n");
+ return 0;
+ }
+
+ D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+ il->channel_count =
+ ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+ ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+ ARRAY_SIZE(il_eeprom_band_5);
+
+ D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+ il->channel_info =
+ kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+ GFP_KERNEL);
+ if (!il->channel_info) {
+ IL_ERR("Could not allocate channel_info\n");
+ il->channel_count = 0;
+ return -ENOMEM;
+ }
+
+ ch_info = il->channel_info;
+
+ /* Loop through the 5 EEPROM bands adding them in order to the
+ * channel map we maintain (that contains additional information than
+ * what just in the EEPROM) */
+ for (band = 1; band <= 5; band++) {
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ ch_info->channel = eeprom_ch_idx[ch];
+ ch_info->band =
+ (band ==
+ 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* permanently store EEPROM's channel regulatory flags
+ * and max power in channel info database. */
+ ch_info->eeprom = eeprom_ch_info[ch];
+
+ /* Copy the run-time flags so they are there even on
+ * invalid channels */
+ ch_info->flags = eeprom_ch_info[ch].flags;
+ /* First write that ht40 is not enabled, and then enable
+ * one by one */
+ ch_info->ht40_extension_channel =
+ IEEE80211_CHAN_NO_HT40;
+
+ if (!(il_is_channel_valid(ch_info))) {
+ D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+ "No traffic\n", ch_info->channel,
+ ch_info->flags,
+ il_is_channel_a_band(ch_info) ? "5.2" :
+ "2.4");
+ ch_info++;
+ continue;
+ }
+
+ /* Initialize regulatory-based run-time data */
+ ch_info->max_power_avg = ch_info->curr_txpow =
+ eeprom_ch_info[ch].max_power_avg;
+ ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+ ch_info->min_power = 0;
+
+ D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch].flags,
+ eeprom_ch_info[ch].max_power_avg,
+ ((eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_RADAR)) ? "" :
+ "not ");
+
+ ch_info++;
+ }
+ }
+
+ /* Check if we do have HT40 channels */
+ if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return 0;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband =
+ (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ /* Set up driver's info for lower half */
+ il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ il_mod_ht40_chan_info(il, ieeeband,
+ eeprom_ch_idx[ch] + 4,
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+ kfree(il->channel_info);
+ il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+ u16 channel)
+{
+ int i;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ for (i = 14; i < il->channel_count; i++) {
+ if (il->channel_info[i].channel == channel)
+ return &il->channel_info[i];
+ }
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (channel >= 1 && channel <= 14)
+ return &il->channel_info[channel - 1];
+ break;
+ default:
+ BUG();
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct il_power_vec_entry {
+ struct il_powertable_cmd cmd;
+ u8 no_dtim; /* number of skip dtim */
+};
+
+static void
+il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (il->power_data.pci_pm)
+ cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+ D_POWER("Sleep command for CAM\n");
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ D_POWER("Sending power/sleep command\n");
+ D_POWER("Flags value = 0x%08X\n", cmd->flags);
+ D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+ D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+ D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+ le32_to_cpu(cmd->sleep_interval[0]),
+ le32_to_cpu(cmd->sleep_interval[1]),
+ le32_to_cpu(cmd->sleep_interval[2]),
+ le32_to_cpu(cmd->sleep_interval[3]),
+ le32_to_cpu(cmd->sleep_interval[4]));
+
+ return il_send_cmd_pdu(il, C_POWER_TBL,
+ sizeof(struct il_powertable_cmd), cmd);
+}
+
+int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+ il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(S_SCANNING, &il->status) && !force) {
+ D_INFO("Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(S_POWER_PMI, &il->status);
+
+ ret = il_set_power(il, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(S_POWER_PMI, &il->status);
+
+ if (il->cfg->ops->lib->update_chain_flags && update_chains)
+ il->cfg->ops->lib->update_chain_flags(il);
+ else if (il->cfg->ops->lib->update_chain_flags)
+ D_POWER("Cannot update the power, chain noise "
+ "calibration running: %d\n",
+ il->chain_noise_data.state);
+
+ memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IL_ERR("set power fail, ret = %d", ret);
+
+ return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+ struct il_powertable_cmd cmd;
+
+ il_power_sleep_cam_cmd(il, &cmd);
+ return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+ u16 lctl = il_pcie_link_ctl(il);
+
+ il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+ il->power_data.debug_sleep_level_override = -1;
+
+ memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req. This should be set long enough to hear probe responses
+ * from more than one AP. */
+#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52 (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52 (10)
+#define IL_PASSIVE_DWELL_BASE (100)
+#define IL_CHANNEL_TUNE_TIME 5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+ int ret;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SCAN_ABORT,
+ .flags = CMD_WANT_SKB,
+ };
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * hardware scan currently */
+ if (!test_bit(S_READY, &il->status) ||
+ !test_bit(S_GEO_CONFIGURED, &il->status) ||
+ !test_bit(S_SCAN_HW, &il->status) ||
+ test_bit(S_FW_ERROR, &il->status) ||
+ test_bit(S_EXIT_PENDING, &il->status))
+ return -EIO;
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->u.status != CAN_ABORT_STATUS) {
+ /* The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before we
+ * the microcode has notified us that a scan is
+ * completed. */
+ D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+ ret = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+ return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+ /* check if scan was requested from mac80211 */
+ if (il->scan_request) {
+ D_SCAN("Complete scan in mac80211\n");
+ ieee80211_scan_completed(il->hw, aborted);
+ }
+
+ il->scan_vif = NULL;
+ il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Forcing scan end while not scanning\n");
+ return;
+ }
+
+ D_SCAN("Forcing scan end\n");
+ clear_bit(S_SCANNING, &il->status);
+ clear_bit(S_SCAN_HW, &il->status);
+ clear_bit(S_SCAN_ABORTING, &il->status);
+ il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Not performing scan to abort\n");
+ return;
+ }
+
+ if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan abort in progress\n");
+ return;
+ }
+
+ ret = il_send_scan_abort(il);
+ if (ret) {
+ D_SCAN("Send scan abort failed %d\n", ret);
+ il_force_scan_end(il);
+ } else
+ D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+ D_SCAN("Queuing abort scan\n");
+ queue_work(il->workqueue, &il->abort_scan);
+ return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+ lockdep_assert_held(&il->mutex);
+
+ D_SCAN("Scan cancel timeout\n");
+
+ il_do_scan_abort(il);
+
+ while (time_before_eq(jiffies, timeout)) {
+ if (!test_bit(S_SCAN_HW, &il->status))
+ break;
+ msleep(20);
+ }
+
+ return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanreq_notification *notif =
+ (struct il_scanreq_notification *)pkt->u.raw;
+
+ D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanstart_notification *notif =
+ (struct il_scanstart_notification *)pkt->u.raw;
+ il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+ D_SCAN("Scan start: " "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+ notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanresults_notification *notif =
+ (struct il_scanresults_notification *)pkt->u.raw;
+
+ D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+ "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+ le32_to_cpu(notif->stats[0]),
+ le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+ D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+ scan_notif->scanned_channels, scan_notif->tsf_low,
+ scan_notif->tsf_high, scan_notif->status);
+
+ /* The HW is no longer scanning */
+ clear_bit(S_SCAN_HW, &il->status);
+
+ D_SCAN("Scan on %sGHz took %dms\n",
+ (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ jiffies_to_msecs(jiffies - il->scan_start));
+
+ queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+ /* scan handlers */
+ il->handlers[C_SCAN] = il_hdl_scan;
+ il->handlers[N_SCAN_START] = il_hdl_scan_start;
+ il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+ il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+inline u16
+il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes)
+{
+ if (band == IEEE80211_BAND_5GHZ)
+ return IL_ACTIVE_DWELL_TIME_52 +
+ IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+ else
+ return IL_ACTIVE_DWELL_TIME_24 +
+ IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 value;
+
+ u16 passive =
+ (band ==
+ IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_52;
+
+ if (il_is_any_associated(il)) {
+ /*
+ * If we're associated, we clamp the maximum passive
+ * dwell time to be 98% of the smallest beacon interval
+ * (minus 2 * channel tune time)
+ */
+ value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ if (value > IL_PASSIVE_DWELL_BASE || !value)
+ value = IL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+ passive = min(value, passive);
+ }
+
+ return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+ u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+ if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+ if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (WARN_ON(!il->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work(&il->scan_check);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Request scan called when driver not ready.\n");
+ return -EIO;
+ }
+
+ if (test_bit(S_SCAN_HW, &il->status)) {
+ D_SCAN("Multiple concurrent scan requests in parallel.\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan request while abort pending.\n");
+ return -EBUSY;
+ }
+
+ D_SCAN("Starting scan...\n");
+
+ set_bit(S_SCANNING, &il->status);
+ il->scan_start = jiffies;
+
+ ret = il->cfg->ops->utils->request_scan(il, vif);
+ if (ret) {
+ clear_bit(S_SCANNING, &il->status);
+ return ret;
+ }
+
+ queue_delayed_work(il->workqueue, &il->scan_check,
+ IL_SCAN_CHECK_WATCHDOG);
+
+ return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ /* mac80211 will only ask for one band at a time */
+ il->scan_request = req;
+ il->scan_vif = vif;
+ il->scan_band = req->channels[0]->band;
+
+ ret = il_scan_initiate(il, vif);
+
+ D_MAC80211("leave\n");
+
+out_unlock:
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, scan_check.work);
+
+ D_SCAN("Scan check work\n");
+
+ /* Since we are here firmware does not finish scan and
+ * most likely is in bad shape, so we don't bother to
+ * send abort command, just force scan complete to mac80211 */
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+ D_SCAN("Abort scan work\n");
+
+ /* We keep scan_check work queued in case when firmware will not
+ * report back scan completed notification */
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 200);
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+ bool aborted;
+
+ D_SCAN("Completed scan.\n");
+
+ cancel_delayed_work(&il->scan_check);
+
+ mutex_lock(&il->mutex);
+
+ aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+ if (aborted)
+ D_SCAN("Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already completed.\n");
+ goto out_settings;
+ }
+
+ il_complete_scan(il, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!il_is_ready_rf(il))
+ goto out;
+
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+ il_set_tx_power(il, il->tx_power_next, false);
+
+ il->cfg->ops->utils->post_scan(il);
+
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+ INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+ INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+ INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->abort_scan);
+ cancel_work_sync(&il->scan_completed);
+
+ if (cancel_delayed_work_sync(&il->scan_check)) {
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+ IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, il->stations[sta_id].sta.sta.addr);
+
+ if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+ D_ASSOC("STA id %u addr %pM already present"
+ " in uCode (according to driver)\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ } else {
+ il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+ D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+ struct il_rx_pkt *pkt, bool sync)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+ return ret;
+ }
+
+ D_INFO("Processing response for adding station %u\n", sta_id);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ D_INFO("C_ADD_STA PASSED\n");
+ il_sta_ucode_activate(il, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TBL:
+ IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IL_ERR("Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IL_ERR("Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+ break;
+ }
+
+ D_INFO("%s station id %u addr %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ D_INFO("%s station according to cmd buffer %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+ il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+ struct il_rx_pkt *pkt = NULL;
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct il_host_cmd cmd = {
+ .id = C_ADD_STA,
+ .flags = flags,
+ .data = data,
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+ flags & CMD_ASYNC ? "a" : "");
+
+ if (flags & CMD_ASYNC)
+ cmd.callback = il_add_sta_callback;
+ else {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+
+ if (ret == 0) {
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ ret = il_process_add_sta_resp(il, sta, pkt, true);
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
+ struct il_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ D_ASSOC("spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ "disabled");
+
+ sta_flags = il->stations[idx].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ il->stations[idx].sta.station_flags = sta_flags;
+done:
+ return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct il_station_entry *station;
+ int i;
+ u8 sta_id = IL_INVALID_STATION;
+ u16 rate;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+ if (!compare_ether_addr
+ (il->stations[i].sta.sta.addr, addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!il->stations[i].used &&
+ sta_id == IL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ return sta_id;
+ }
+
+ station = &il->stations[sta_id];
+ station->used = IL_STA_DRIVER_ACTIVE;
+ D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+ il->num_stations++;
+
+ /* Set up the C_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct il_station_priv_common *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ il_set_ht_add_station(il, sta_id, sta, ctx);
+
+ /* 3945 only */
+ rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+ return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
+ u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare station %pM for addition\n", addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[sta_id].sta.sta.addr);
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((il->stations[sta_id].
+ used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+ IL_STA_UCODE_ACTIVE)
+ IL_ERR("removed non active STA %u\n", sta_id);
+
+ il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+ memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+ D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+ bool temporary)
+{
+ struct il_rx_pkt *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct il_rem_sta_cmd rm_sta_cmd;
+
+ struct il_host_cmd cmd = {
+ .id = C_REM_STA,
+ .len = sizeof(struct il_rem_sta_cmd),
+ .flags = CMD_SYNC,
+ .data = &rm_sta_cmd,
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il_sta_ucode_deactivate(il, sta_id);
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ D_ASSOC("C_REM_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IL_ERR("C_REM_STA failed\n");
+ break;
+ }
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+ unsigned long flags;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
+
+ if (WARN_ON(sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ D_INFO("Removing %pM but non DRIVER active\n", addr);
+ goto out_err;
+ }
+
+ if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_INFO("Removing %pM but non UCODE active\n", addr);
+ goto out_err;
+ }
+
+ if (il->stations[sta_id].used & IL_STA_LOCAL) {
+ kfree(il->stations[sta_id].lq);
+ il->stations[sta_id].lq = NULL;
+ }
+
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+ il->num_stations--;
+
+ BUG_ON(il->num_stations < 0);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ D_INFO("Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != il->stations[i].ctxid)
+ continue;
+
+ if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+ D_INFO("Clearing ucode active for station %d\n", i);
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ if (!cleared)
+ D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_addsta_cmd sta_cmd;
+ struct il_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ D_ASSOC("Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx->ctxid != il->stations[i].ctxid)
+ continue;
+ if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+ !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("Restoring sta %pM\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].sta.mode = 0;
+ il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &il->stations[i].sta,
+ sizeof(struct il_addsta_cmd));
+ send_lq = false;
+ if (il->stations[i].lq) {
+ memcpy(&lq, il->stations[i].lq,
+ sizeof(struct il_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[i].used &=
+ ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ if (!found)
+ D_INFO("Restoring all known stations"
+ " .... no stations to be restored.\n");
+ else
+ D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+ int i;
+
+ for (i = 0; i < il->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &il->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (!(il->stations[i].used & IL_STA_BCAST))
+ continue;
+
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ il->num_stations--;
+ BUG_ON(il->num_stations < 0);
+ kfree(il->stations[i].lq);
+ il->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+ int i;
+ D_RATE("lq station id 0x%x\n", lq->sta_id);
+ D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ D_INFO("idx %d of LQ expects HT channel\n", i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct il_host_cmd cmd = {
+ .id = C_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct il_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
+
+ if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ il_dump_lq_cmd(il, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+ if (il_is_lq_table_valid(il, ctx, lq))
+ ret = il_send_cmd(il, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ D_INFO("init LQ command complete,"
+ " clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ D_INFO("received request to remove station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to remove station %pM\n", sta->addr);
+ ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+ if (ret)
+ IL_ERR("Error removing station %pM\n", sta->addr);
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc() Allocates rx_free
+ * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+ unsigned long flags;
+ u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+ reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+ }
+
+ q->need_update = 0;
+
+exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd =
+ dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ rxq->rb_stts =
+ dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ rxq->need_update = 0;
+ return 0;
+
+err_rb:
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+err_bd:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ D_11H("Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&il->measure_report, report, sizeof(*report));
+ il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ D_RX("decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ D_RX("Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ D_RX("hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ /* if we're trying to save power */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+ txq_id, reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+ txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_tx_queue_unmap(il, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ int i;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->read_ptr != q->write_ptr) {
+ i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+
+ i = q->n_win;
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_cmd_queue_unmap(il);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_win;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
+ u32 id)
+{
+ q->n_bd = count;
+ q->n_win = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise il_queue_inc_wrap
+ * and il_queue_dec_wrap are broken. */
+ BUG_ON(!is_power_of_2(count));
+
+ /* slots_num must be power-of-two size, otherwise
+ * il_get_cmd_idx is broken. */
+ BUG_ON(!is_power_of_2(slots_num));
+
+ q->low_mark = q->n_win / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_win / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+ struct device *dev = &il->pci_dev->dev;
+ size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+ /* Driver ilate data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (id != il->cmd_queue) {
+ txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+ GFP_KERNEL);
+ if (!txq->txb) {
+ IL_ERR("kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds =
+ dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = id;
+
+ return 0;
+
+error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int i, len;
+ int ret;
+ int actual_slots = slots_num;
+
+ /*
+ * Alloc buffer array for commands (Tx or other types of commands).
+ * For the command queue (#4/#9), allocate command space + one big
+ * command for scan, since scan command is very huge; the system will
+ * not have two scans at the same time, so only one is needed.
+ * For normal Tx queues (all other queues), no super-size command
+ * space is needed.
+ */
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ txq->meta =
+ kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+ txq->cmd =
+ kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct il_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len = IL_MAX_CMD_SIZE;
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto err;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ ret = il_tx_queue_alloc(il, txq, txq_id);
+ if (ret)
+ goto err;
+
+ txq->need_update = 0;
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ il_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+
+ return 0;
+err:
+ for (i = 0; i < actual_slots; i++)
+ kfree(txq->cmd[i]);
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ int len;
+ u32 idx;
+ u16 fix_size;
+
+ cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+ /* If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
+ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+ !(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+ if (il_is_rfkill(il) || il_is_ctkill(il)) {
+ IL_WARN("Not sending command - %s KILL\n",
+ il_is_rfkill(il) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+ IL_ERR("Restarting adapter due to command queue full\n");
+ queue_work(il->workqueue, &il->restart);
+ return -ENOSPC;
+ }
+
+ idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return -ENOSPC;
+ }
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags | CMD_MAPPED;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
+
+ out_cmd->hdr.cmd = cmd->id;
+ memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+ /* At this point, the out_cmd now has all of the incoming cmd
+ * information */
+
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+ len = sizeof(struct il_device_cmd);
+ if (idx == TFD_CMD_SLOTS)
+ len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (out_cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, il->cmd_queue);
+ break;
+ default:
+ D_HC("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+ idx, il->cmd_queue);
+ }
+#endif
+ txq->need_update = 1;
+
+ if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+
+ phys_addr =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
+
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
+ 1, U32_PAD(cmd->len));
+
+ /* Increment and update queue's write idx */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ queue_work(il->workqueue, &il->restart);
+ }
+
+ }
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ int cmd_idx;
+ bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+ struct il_device_cmd *cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ unsigned long flags;
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN
+ (txq_id != il->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+ il->txq[il->cmd_queue].q.write_ptr)) {
+ il_print_hex_error(il, pkt, 32);
+ return;
+ }
+
+ cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+ cmd = txq->cmd[cmd_idx];
+ meta = &txq->meta[cmd_idx];
+
+ txq->time_stamp = jiffies;
+
+ pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(il, cmd, pkt);
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->hdr.cmd));
+ wake_up(&il->wait_command_queue);
+ }
+
+ /* Mark as unmapped */
+ meta->flags = 0;
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+/* This function both allocates and initializes hw and il. */
+struct ieee80211_hw *
+il_alloc_all(struct il_cfg *cfg)
+{
+ struct il_priv *il;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct il_priv),
+ cfg->ops->ieee80211_ops);
+ if (hw == NULL) {
+ pr_err("%s: Can not allocate network device\n", cfg->name);
+ goto out;
+ }
+
+ il = hw->priv;
+ il->hw = hw;
+
+out:
+ return hw;
+}
+EXPORT_SYMBOL(il_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ u16 max_bit_rate = 0;
+ u8 rx_chains_num = il->hw_params.rx_chains_num;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+ ht_info->cap = 0;
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+ ht_info->ht_supported = true;
+
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ if (il->hw_params.ht40_channel & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+ ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains_num >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains_num >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains_num;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains_num != rx_chains_num) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |=
+ ((tx_chains_num -
+ 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+ struct il_channel_info *ch;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *channels;
+ struct ieee80211_channel *geo_ch;
+ struct ieee80211_rate *rates;
+ int i = 0;
+ s8 max_tx_power = 0;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+ il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ D_INFO("Geography modes already initialized.\n");
+ set_bit(S_GEO_CONFIGURED, &il->status);
+ return 0;
+ }
+
+ channels =
+ kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ rates =
+ kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+ GFP_KERNEL);
+ if (!rates) {
+ kfree(channels);
+ return -ENOMEM;
+ }
+
+ /* 5.2GHz channels start after the 2.4GHz channels */
+ sband = &il->bands[IEEE80211_BAND_5GHZ];
+ sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+ /* just OFDM */
+ sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+ sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ sband = &il->bands[IEEE80211_BAND_2GHZ];
+ sband->channels = channels;
+ /* OFDM & CCK */
+ sband->bitrates = rates;
+ sband->n_bitrates = RATE_COUNT_LEGACY;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ il->ieee_channels = channels;
+ il->ieee_rates = rates;
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch = &il->channel_info[i];
+
+ if (!il_is_channel_valid(ch))
+ continue;
+
+ sband = &il->bands[ch->band];
+
+ geo_ch = &sband->channels[sband->n_channels++];
+
+ geo_ch->center_freq =
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
+ geo_ch->max_power = ch->max_power_avg;
+ geo_ch->max_antenna_gain = 0xff;
+ geo_ch->hw_value = ch->channel;
+
+ if (il_is_channel_valid(ch)) {
+ if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+ geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+ geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch->flags & EEPROM_CHANNEL_RADAR)
+ geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+ geo_ch->flags |= ch->ht40_extension_channel;
+
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
+ } else {
+ geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+ geo_ch->center_freq,
+ il_is_channel_a_band(ch) ? "5.2" : "2.4",
+ geo_ch->
+ flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+ geo_ch->flags);
+ }
+
+ il->tx_power_device_lmt = max_tx_power;
+ il->tx_power_user_lmt = max_tx_power;
+ il->tx_power_next = max_tx_power;
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+ (il->cfg->sku & IL_SKU_A)) {
+ IL_INFO("Incorrectly detected BG card as ABG. "
+ "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+ il->pci_dev->device, il->pci_dev->subsystem_device);
+ il->cfg->sku &= ~IL_SKU_A;
+ }
+
+ IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+ il->bands[IEEE80211_BAND_2GHZ].n_channels,
+ il->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+ set_bit(S_GEO_CONFIGURED, &il->status);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+ kfree(il->ieee_channels);
+ kfree(il->ieee_rates);
+ clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+ u16 channel, u8 extension_chan_offset)
+{
+ const struct il_channel_info *ch_info;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info))
+ return false;
+
+ if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+ else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+ return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+ /*
+ * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
+ if (ht_cap && !ht_cap->ht_supported)
+ return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ if (il->disable_ht40)
+ return false;
+#endif
+
+ return il_is_channel_extension(il, il->band,
+ le16_to_cpu(ctx->staging.channel),
+ ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+ u16 new_val;
+ u16 beacon_factor;
+
+ /*
+ * If mac80211 hasn't given us a beacon interval, program
+ * the default into the device.
+ */
+ if (!beacon_val)
+ return DEFAULT_BEACON_INTERVAL;
+
+ /*
+ * If the beacon interval we obtained from the peer
+ * is too large, we'll have to wake up more often
+ * (and in IBSS case, we'll beacon too much)
+ *
+ * For example, if max_beacon_val is 4096, and the
+ * requested beacon interval is 7000, we'll have to
+ * use 3500 to be able to wake up on the beacons.
+ *
+ * This could badly influence beacon detection stats.
+ */
+
+ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+ new_val = beacon_val / beacon_factor;
+
+ if (!new_val)
+ new_val = max_beacon_val;
+
+ return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ u64 tsf;
+ s32 interval_tm, rem;
+ struct ieee80211_conf *conf = NULL;
+ u16 beacon_int;
+ struct ieee80211_vif *vif = ctx->vif;
+
+ conf = &il->hw->conf;
+
+ lockdep_assert_held(&il->mutex);
+
+ memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+ ctx->timing.timestamp = cpu_to_le64(il->timestamp);
+ ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+ beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+ /*
+ * TODO: For IBSS we need to get atim_win from mac80211,
+ * for now just always use 0
+ */
+ ctx->timing.atim_win = 0;
+
+ beacon_int =
+ il_adjust_beacon_interval(beacon_int,
+ il->hw_params.max_beacon_itrvl *
+ TIME_UNIT);
+ ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+ tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
+ interval_tm = beacon_int * TIME_UNIT;
+ rem = do_div(tsf, interval_tm);
+ ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+ ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+ D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+ le16_to_cpu(ctx->timing.beacon_interval),
+ le32_to_cpu(ctx->timing.beacon_init_val),
+ le16_to_cpu(ctx->timing.atim_win));
+
+ return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
+ &ctx->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (hw_decrypt)
+ rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+ else
+ rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+ bool error = false;
+
+ if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+ if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+ IL_WARN("check 2.4G: wrong narrow\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+ IL_WARN("check 2.4G: wrong radar\n");
+ error = true;
+ }
+ } else {
+ if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("check 5.2G: not short slot!\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_CCK_MSK) {
+ IL_WARN("check 5.2G: CCK!\n");
+ error = true;
+ }
+ }
+ if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+ IL_WARN("mac/bssid mcast!\n");
+ error = true;
+ }
+
+ /* make sure basic rates 6Mbps and 1Mbps are supported */
+ if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+ (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+ IL_WARN("neither 1 nor 6 are basic\n");
+ error = true;
+ }
+
+ if (le16_to_cpu(rxon->assoc_id) > 2007) {
+ IL_WARN("aid > 2007\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("CCK and short slot\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+ IL_WARN("CCK and auto detect");
+ error = true;
+ }
+
+ if ((rxon->
+ flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+ RXON_FLG_TGG_PROTECT_MSK) {
+ IL_WARN("TGg but no auto-detect\n");
+ error = true;
+ }
+
+ if (error)
+ IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+ if (error) {
+ IL_ERR("Invalid RXON\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_rxon_cmd *staging = &ctx->staging;
+ const struct il_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond) \
+ if ((cond)) { \
+ D_INFO("need full RXON - " #cond "\n"); \
+ return 1; \
+ }
+
+#define CHK_NEQ(c1, c2) \
+ if ((c1) != (c2)) { \
+ D_INFO("need full RXON - " \
+ #c1 " != " #c2 " - %d != %d\n", \
+ (c1), (c2)); \
+ return 1; \
+ }
+
+ /* These items are only settable from the full RXON command */
+ CHK(!il_is_associated_ctx(ctx));
+ CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+ CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+ CHK(compare_ether_addr
+ (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+ CHK_NEQ(staging->dev_type, active->dev_type);
+ CHK_NEQ(staging->channel, active->channel);
+ CHK_NEQ(staging->air_propagation, active->air_propagation);
+ CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+ active->ofdm_ht_single_stream_basic_rates);
+ CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+ active->ofdm_ht_dual_stream_basic_rates);
+ CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+ /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+ * be updated with the RXON_ASSOC command -- however only some
+ * flag transitions are allowed using RXON_ASSOC */
+
+ /* Check if we are not switching bands */
+ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+ active->flags & RXON_FLG_BAND_24G_MSK);
+
+ /* Check if we are switching association toggle */
+ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+ active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+ return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ return RATE_1M_PLCP;
+ else
+ return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
+ struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (!ctx->ht.enabled) {
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+ | RXON_FLG_HT_PROT_MSK);
+ return;
+ }
+
+ rxon->flags |=
+ cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+ /* pure ht40 */
+ if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IL_ERR("invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+ }
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+ "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+ ctx->ht.protection, ctx->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+ _il_set_rxon_ht(il, ht_conf, &il->ctx);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+{
+ const struct il_channel_info *ch_info;
+ int i;
+ u8 channel = 0;
+ u8 min, max;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ min = 14;
+ max = il->channel_count;
+ } else {
+ min = 0;
+ max = 14;
+ }
+
+ for (i = min; i < max; i++) {
+ channel = il->channel_info[i].channel;
+ if (channel == le16_to_cpu(il->ctx.staging.channel))
+ continue;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (il_is_channel_valid(ch_info))
+ break;
+ }
+
+ return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE: Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx)
+{
+ enum ieee80211_band band = ch->band;
+ u16 channel = ch->hw_value;
+
+ if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+ return 0;
+
+ ctx->staging.channel = cpu_to_le16(channel);
+ if (band == IEEE80211_BAND_5GHZ)
+ ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+ il->band = band;
+
+ D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif)
+{
+ if (band == IEEE80211_BAND_5GHZ) {
+ ctx->staging.flags &=
+ ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_CCK_MSK);
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ } else {
+ /* Copied from il_post_associate() */
+ if (vif && vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ }
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_channel_info *ch_info;
+
+ memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+ if (!ctx->vif) {
+ ctx->staging.dev_type = ctx->unused_devtype;
+ } else
+ switch (ctx->vif->type) {
+
+ case NL80211_IFTYPE_STATION:
+ ctx->staging.dev_type = ctx->station_devtype;
+ ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ case NL80211_IFTYPE_ADHOC:
+ ctx->staging.dev_type = ctx->ibss_devtype;
+ ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ ctx->staging.filter_flags =
+ RXON_FILTER_BCON_AWARE_MSK |
+ RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ default:
+ IL_ERR("Unsupported interface type %d\n",
+ ctx->vif->type);
+ break;
+ }
+
+#if 0
+ /* TODO: Figure out when short_preamble would be set and cache from
+ * that */
+ if (!hw_to_local(il->hw)->short_preamble)
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+ ch_info =
+ il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+
+ if (!ch_info)
+ ch_info = &il->channel_info[0];
+
+ ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ il->band = ch_info->band;
+
+ il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+
+ ctx->staging.ofdm_basic_rates =
+ (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+ ctx->staging.cck_basic_rates =
+ (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ /* clear both MIX and PURE40 mode flag */
+ ctx->staging.flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+ if (ctx->vif)
+ memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+ ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+ const struct ieee80211_supported_band *hw = NULL;
+ struct ieee80211_rate *rate;
+ int i;
+
+ hw = il_get_hw_mode(il, il->band);
+ if (!hw) {
+ IL_ERR("Failed to set rate: unable to get hw mode\n");
+ return;
+ }
+
+ il->active_rate = 0;
+
+ for (i = 0; i < hw->n_bitrates; i++) {
+ rate = &(hw->bitrates[i]);
+ if (rate->hw_value < RATE_COUNT_LEGACY)
+ il->active_rate |= (1 << rate->hw_value);
+ }
+
+ D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+ il->ctx.staging.cck_basic_rates =
+ (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ il->ctx.staging.ofdm_basic_rates =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ ieee80211_chswitch_done(ctx->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_csa_notification *csa = &(pkt->u.csa_notif);
+
+ struct il_rxon_context *ctx = &il->ctx;
+ struct il_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+ il_chswitch_done(il, true);
+ } else {
+ IL_ERR("CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ il_chswitch_done(il, false);
+ }
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ D_RADIO("RX CONFIG:\n");
+ il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+ D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+ D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+ D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+ D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+ D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+ D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+ D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+ D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+ D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+
+ /* Cancel currently queued command. */
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+
+ IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+ il->cfg->ops->lib->dump_nic_error_log(il);
+ if (il->cfg->ops->lib->dump_fh)
+ il->cfg->ops->lib->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+ il_print_rx_config_cmd(il, &il->ctx);
+#endif
+
+ wake_up(&il->wait_command_queue);
+
+ /* Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit */
+ clear_bit(S_READY, &il->status);
+
+ if (!test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_DBG(IL_DL_FW_ERRORS,
+ "Restarting adapter due to uCode error.\n");
+
+ if (il->cfg->mod_params->restart_fw)
+ queue_work(il->workqueue, &il->restart);
+ }
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+il_apm_stop_master(struct il_priv *il)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret =
+ _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+ D_INFO("stop master\n");
+
+ return ret;
+}
+
+void
+il_apm_stop(struct il_priv *il)
+{
+ D_INFO("Stop card, put in low power state\n");
+
+ /* Stop device's DMA activity */
+ il_apm_stop_master(il);
+
+ /* Reset the entire device */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+ int ret = 0;
+ u16 lctl;
+
+ D_INFO("Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ * NOTE: This is no-op for 3945 (non-existent bit)
+ */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ if (il->cfg->base_params->set_l0s) {
+ lctl = il_pcie_link_ctl(il);
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ il_set_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ il_clear_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Disabled; Enabling L0S\n");
+ }
+ }
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (il->cfg->base_params->pll_cfg_val)
+ il_set_bit(il, CSR_ANA_PLL_CFG,
+ il->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. il_wr_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ D_INFO("Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+ * BSM (Boostrap State Machine) is only in 3945 and 4965.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (il->cfg->base_params->use_bsm)
+ il_wr_prph(il, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+ else
+ il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!il->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
+
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
+ IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+ return -EINVAL;
+ }
+
+ if (tx_power > il->tx_power_device_lmt) {
+ IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+ tx_power, il->tx_power_device_lmt);
+ return -EINVAL;
+ }
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ il->tx_power_next = tx_power;
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(S_SCANNING, &il->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ D_INFO("Deferring tx power set\n");
+ return 0;
+ }
+
+ prev_tx_power = il->tx_power_user_lmt;
+ il->tx_power_user_lmt = tx_power;
+
+ ret = il->cfg->ops->lib->send_tx_power(il);
+
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ il->tx_power_user_lmt = prev_tx_power;
+ il->tx_power_next = prev_tx_power;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+ struct il_bt_cmd bt_cmd = {
+ .lead_time = BT_LEAD_TIME_DEF,
+ .max_kill = BT_MAX_KILL_DEF,
+ .kill_ack_mask = 0,
+ .kill_cts_mask = 0,
+ };
+
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ D_INFO("BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+ if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+ IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+ struct il_stats_cmd stats_cmd = {
+ .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+ };
+
+ if (flags & CMD_ASYNC)
+ return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd, NULL);
+ else
+ return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ D_RX("sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+ il_get_cmd_string(pkt->hdr.cmd));
+ il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ il_get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+ memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ int q;
+
+ D_MAC80211("enter\n");
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ D_MAC80211("leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ return il->ibss_manager == IL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ il_connection_init_rx_config(il, ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ return il_commit_rxon(il, ctx);
+}
+
+static int
+il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&il->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ il->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = il_set_mode(il, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ return 0;
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int err;
+ u32 modes;
+
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* check if busy context is exclusive */
+ if (il->ctx.vif &&
+ (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
+ if (!(modes & BIT(vif->type))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = &il->ctx;
+ il->ctx.vif = vif;
+
+ err = il_setup_interface(il, &il->ctx);
+ if (err) {
+ il->ctx.vif = NULL;
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+ return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->scan_vif == vif) {
+ il_scan_cancel_timeout(il, 200);
+ il_force_scan_end(il);
+ }
+
+ if (!mode_change) {
+ il_set_mode(il, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
+
+ il_teardown_interface(il, vif, false);
+
+ memset(il->bssid, 0, ETH_ALEN);
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+ if (!il->txq)
+ il->txq =
+ kzalloc(sizeof(struct il_tx_queue) *
+ il->cfg->base_params->num_of_queues, GFP_KERNEL);
+ if (!il->txq) {
+ IL_ERR("Not enough memory for txq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_txq_mem(struct il_priv *il)
+{
+ kfree(il->txq);
+ il->txq = NULL;
+}
+EXPORT_SYMBOL(il_txq_mem);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+
+#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
+
+void
+il_reset_traffic_log(struct il_priv *il)
+{
+ il->tx_traffic_idx = 0;
+ il->rx_traffic_idx = 0;
+ if (il->tx_traffic)
+ memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+ if (il->rx_traffic)
+ memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+}
+
+int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
+
+ if (il_debug_level & IL_DL_TX) {
+ if (!il->tx_traffic) {
+ il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->tx_traffic)
+ return -ENOMEM;
+ }
+ }
+ if (il_debug_level & IL_DL_RX) {
+ if (!il->rx_traffic) {
+ il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->rx_traffic)
+ return -ENOMEM;
+ }
+ }
+ il_reset_traffic_log(il);
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_traffic_mem);
+
+void
+il_free_traffic_mem(struct il_priv *il)
+{
+ kfree(il->tx_traffic);
+ il->tx_traffic = NULL;
+
+ kfree(il->rx_traffic);
+ il->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(il_free_traffic_mem);
+
+void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_TX)))
+ return;
+
+ if (!il->tx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->tx_traffic +
+ (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->tx_traffic_idx =
+ (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
+
+void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_RX)))
+ return;
+
+ if (!il->rx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->rx_traffic +
+ (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->rx_traffic_idx =
+ (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
+
+const char *
+il_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(MANAGEMENT_ASSOC_REQ);
+ IL_CMD(MANAGEMENT_ASSOC_RESP);
+ IL_CMD(MANAGEMENT_REASSOC_REQ);
+ IL_CMD(MANAGEMENT_REASSOC_RESP);
+ IL_CMD(MANAGEMENT_PROBE_REQ);
+ IL_CMD(MANAGEMENT_PROBE_RESP);
+ IL_CMD(MANAGEMENT_BEACON);
+ IL_CMD(MANAGEMENT_ATIM);
+ IL_CMD(MANAGEMENT_DISASSOC);
+ IL_CMD(MANAGEMENT_AUTH);
+ IL_CMD(MANAGEMENT_DEAUTH);
+ IL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+const char *
+il_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(CONTROL_BACK_REQ);
+ IL_CMD(CONTROL_BACK);
+ IL_CMD(CONTROL_PSPOLL);
+ IL_CMD(CONTROL_RTS);
+ IL_CMD(CONTROL_CTS);
+ IL_CMD(CONTROL_ACK);
+ IL_CMD(CONTROL_CFEND);
+ IL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+ memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLEGACY_DEBUGFS defined,
+ * il_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_stats
+ * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of il_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &il->tx_stats;
+ else
+ stats = &il->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(il_update_stats);
+#endif
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+ struct il_force_reset *force_reset;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ force_reset = &il->force_reset;
+ force_reset->reset_request_count++;
+ if (!external) {
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ D_INFO("force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+
+ if (!external && !il->cfg->mod_params->restart_fw) {
+ D_INFO("Cancel firmware reload based on "
+ "module parameter setting\n");
+ return 0;
+ }
+
+ IL_ERR("On demand firmware reload\n");
+
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+ wake_up(&il->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(S_READY, &il->status);
+ queue_work(il->workqueue, &il->restart);
+
+ return 0;
+}
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ u32 modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&il->mutex);
+
+ if (!ctx->vif || !il_is_ready_rf(il)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+ if (!(modes & BIT(newtype))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
+ (il->ctx.exclusive_interface_modes & BIT(newtype))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* success */
+ il_teardown_interface(il, vif, true);
+ vif->type = newtype;
+ vif->p2p = newp2p;
+ err = il_setup_interface(il, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+out:
+ mutex_unlock(&il->mutex);
+ return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+ struct il_tx_queue *txq = &il->txq[cnt];
+ struct il_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout =
+ txq->time_stamp +
+ msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+ il->cfg->base_params->wd_timeout);
+ ret = il_force_reset(il, false);
+ return (ret == -EAGAIN) ? 0 : 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+ int cnt;
+ unsigned long timeout;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ timeout = il->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (il_check_stuck_queue(il, il->cmd_queue))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (il_is_any_associated(il)) {
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
+ }
+ }
+
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+ unsigned int timeout = il->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+ else
+ del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot =
+ (usec /
+ interval) & (il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits) >> il->
+ hw_params.beacon_time_tsf_bits);
+ rem =
+ (usec % interval) & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+
+ return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval)
+{
+ u32 base_low = base & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 addon_low = addon & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits)) +
+ (addon & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int
+il_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call il_mac_stop() from the mac80211 suspend function
+ * first but since il_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ il_apm_stop(il);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_suspend);
+
+int
+il_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+ bool hw_rfkill = false;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il_enable_interrupts(il);
+
+ if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_resume);
+
+const struct dev_pm_ops il_pm_ops = {
+ .suspend = il_pci_suspend,
+ .resume = il_pci_resume,
+ .freeze = il_pci_suspend,
+ .thaw = il_pci_resume,
+ .poweroff = il_pci_suspend,
+ .restore = il_pci_resume,
+};
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+
+ il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+ bool ht_changed = false;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&il->mutex);
+
+ D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+ changed);
+
+ if (unlikely(test_bit(S_SCANNING, &il->status))) {
+ scan_active = 1;
+ D_MAC80211("scan active\n");
+ }
+
+ if (changed &
+ (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ }
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = channel->hw_value;
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !il_is_channel_ibss(ch_info)) {
+ D_MAC80211("leave - not IBSS channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed = true;
+ }
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in il_ht_conf
+ */
+ ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il->cfg->ops->legacy->update_bcast_stations)
+ ret = il->cfg->ops->legacy->update_bcast_stations(il);
+
+set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ il_set_rate(il);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = il_power_update_mode(il, false);
+ if (ret)
+ D_MAC80211("Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+ conf->power_level);
+
+ il_set_tx_power(il, conf->power_level, false);
+ }
+
+ if (!il_is_ready(il)) {
+ D_MAC80211("leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+ else
+ D_INFO("Not re-sending same RXON configuration.\n");
+ if (ht_changed)
+ il_update_qos(il, ctx);
+
+out:
+ D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
+
+ spin_lock_irqsave(&il->lock, flags);
+ memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* new association get rid of ibss beacon skb */
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = NULL;
+
+ il->timestamp = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il_scan_cancel_timeout(il, 100);
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - not ready\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ il_set_rate(il);
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_ASSOC("enter:\n");
+
+ if (!ctx->ht.enabled)
+ return;
+
+ ctx->ht.protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present =
+ !!(bss_conf->
+ ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ ht_conf->single_chain_sufficient = false;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int maxstreams;
+
+ maxstreams =
+ (ht_cap->mcs.
+ tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+ >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if (ht_cap->mcs.rx_mask[1] == 0 &&
+ ht_cap->mcs.rx_mask[2] == 0)
+ ht_conf->single_chain_sufficient = true;
+ if (maxstreams <= 1)
+ ht_conf->single_chain_sufficient = true;
+ } else {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ ht_conf->single_chain_sufficient = true;
+ }
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_conf->single_chain_sufficient = true;
+ break;
+ default:
+ break;
+ }
+
+ D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ctx->staging.assoc_id = 0;
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb)
+ return;
+
+ D_MAC80211("enter\n");
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("update beacon but no beacon context!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = skb;
+
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ il->timestamp = le64_to_cpu(timestamp);
+
+ D_MAC80211("leave\n");
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return;
+ }
+
+ il->cfg->ops->legacy->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ int ret;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ D_MAC80211("changes = 0x%X\n", changes);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_alive(il)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (changes & BSS_CHANGED_QOS) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ ctx->qos_data.qos_active = bss_conf->qos;
+ il_update_qos(il, ctx);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ /*
+ * the add_interface code must make sure we only ever
+ * have a single interface that could be beaconing at
+ * any time.
+ */
+ if (vif->bss_conf.enable_beacon)
+ il->beacon_ctx = ctx;
+ else
+ il->beacon_ctx = NULL;
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
+ if (il_scan_cancel_timeout(il, 100)) {
+ IL_WARN("Aborted scan still in progress after 100ms\n");
+ D_MAC80211("leaving - scan abort failed.\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ }
+
+ }
+
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+ il_beacon_update(hw, vif);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from il_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ ctx->staging.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ il_ht_conf(il, vif);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+ if (bss_conf->assoc) {
+ il->timestamp = bss_conf->timestamp;
+
+ if (!il_is_rfkill(il))
+ il->cfg->ops->legacy->post_associate(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+ D_MAC80211("Changes (%#x) while associated\n", changes);
+ ret = il_send_rxon_assoc(il, ctx);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&ctx->active, &ctx->staging,
+ sizeof(struct il_rxon_cmd));
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ il->cfg->ops->legacy->config_ap(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret =
+ il->cfg->ops->legacy->manage_ibss_station(il, vif,
+ bss_conf->
+ ibss_joined);
+ if (ret)
+ IL_ERR("failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+ struct il_priv *il = data;
+ u32 inta, inta_mask;
+ u32 inta_fh;
+ unsigned long flags;
+ if (!il)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = _il_rd(il, CSR_INT);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta && !inta_fh) {
+ D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+ goto none;
+ }
+
+ if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+ D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+ inta_fh);
+
+ inta &= ~CSR_INT_BIT_SCD;
+
+ /* il_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta || inta_fh))
+ tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_HANDLED;
+
+none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
+{
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
+ } else if (info->control.rates[0].
+ flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 000000000000..abfa388588be
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE 256
+#define RX_QUEUE_MASK 255
+#define RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n) ((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+struct il_rx_buf {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct il_host_cmd *source;
+ /*
+ * only for ASYNC commands
+ * (which is somewhat stupid -- look at common.c for instance
+ * which duplicates a bunch of code because the callback isn't
+ * invoked for SYNC commands, if it were and its result passed
+ * through it would be simpler...)
+ */
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+
+ /* The CMD_SIZE_HUGE flag bit indicates that the command
+ * structure is stored at the end of the shared queue memory. */
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (idx) host_w */
+ int read_ptr; /* last used entry (idx) host_r */
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_win; /* safe queue win */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+/* One for each TFD */
+struct il_tx_info {
+ struct sk_buff *skb;
+ struct il_rxon_context *ctx;
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+ struct il_queue q;
+ void *tfds;
+ struct il_device_cmd **cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_info *txb;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ * It only indicates that 20 MHz channel use is supported; HT40 channel
+ * usage is indicated by a separate set of regulatory flags for each
+ * HT40 channel pair.
+ *
+ * NOTE: Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+ EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
+ EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+ /* Bit 2 Reserved */
+ EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
+ EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
+ EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+ /* Bit 6 Reserved (was Narrow Channel) */
+ EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+ u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
+ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION (5)
+#define EEPROM_4965_EEPROM_VERSION (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
+#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna). EEPROM contains:
+ *
+ * 1) Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2) Gain table idx used to achieve the target measurement power.
+ * This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4) RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+ u8 temperature; /* Device temperature (Celsius) */
+ u8 gain_idx; /* Index into gain table */
+ u8 actual_pow; /* Measured RF output power, half-dBm */
+ s8 pa_det; /* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel. EEPROM contains:
+ *
+ * 1) Channel number measured
+ *
+ * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
+ * (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+ u8 ch_num;
+ struct il_eeprom_calib_measure
+ measurements[EEPROM_TX_POWER_TX_CHAINS]
+ [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1) First and last channels within range of the subband. "0" values
+ * indicate that this sample set is not being used.
+ *
+ * 2) Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+ u8 ch_from; /* channel number of lowest channel in subband */
+ u8 ch_to; /* channel number of highest channel in subband */
+ struct il_eeprom_calib_ch_info ch1;
+ struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info. EEPROM contains:
+ *
+ * 1) Factory-measured saturation power levels (maximum levels at which
+ * tx power amplifier can output a signal without too much distortion).
+ * There is one level for 2.4 GHz band and one for 5 GHz band. These
+ * values apply to all channels within each of the bands.
+ *
+ * 2) Factory-measured power supply voltage level. This is assumed to be
+ * constant (i.e. same value applies to all channels/bands) while the
+ * factory measurements are being made.
+ *
+ * 3) Up to 8 sets of factory-measured txpower calibration values.
+ * These are for different frequency ranges, since txpower gain
+ * characteristics of the analog radio circuitry vary with frequency.
+ *
+ * Not all sets need to be filled with data;
+ * struct il_eeprom_calib_subband_info contains range of channels
+ * (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+ __le16 voltage; /* signed */
+ struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
+#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE: The RXON command uses 20 MHz channel numbers to specify the
+ * control channel to which to tune. RXON also specifies whether the
+ * control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
+
+struct il_eeprom_ops {
+ const u32 regulatory_bands[7];
+ int (*acquire_semaphore) (struct il_priv *il);
+ void (*release_semaphore) (struct il_priv *il);
+};
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+ enum ieee80211_band band,
+ u16 channel);
+
+#define IL_NUM_SCAN_RATES (2)
+
+struct il4965_channel_tgd_info {
+ u8 type;
+ s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+ s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+ /* maximum power level to prevent clipping for each rate, derived by
+ * us from this band's saturation power in EEPROM */
+ const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 base_power_idx; /* gain idx for power at factory temp. */
+ s8 requested_power; /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ * with one another!
+ */
+struct il_channel_info {
+ struct il4965_channel_tgd_info tgd;
+ struct il4965_channel_tgh_info tgh;
+ struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
+ struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
+ * HT40 channel */
+
+ u8 channel; /* channel number */
+ u8 flags; /* flags copied from EEPROM */
+ s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
+ s8 min_power; /* always 0 */
+ s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
+
+ u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
+ u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
+ enum ieee80211_band band;
+
+ /* HT40 channel info */
+ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ u8 ht40_flags; /* flags copied from EEPROM */
+ u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+ /* Radio/DSP gain settings for each "normal" data Tx rate.
+ * These include, in addition to RF and DSP gain, a few fields for
+ * remembering/modifying gain settings (idxes). */
+ struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+ /* Radio/DSP gain settings for each scan rate, for directed scans. */
+ struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK 0 /* shared */
+#define IL_TX_FIFO_BE 1
+#define IL_TX_FIFO_VI 2 /* shared */
+#define IL_TX_FIFO_VO 3
+#define IL_TX_FIFO_UNUSED -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES 10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM 4
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
+ CMD_SIZE_HUGE = (1 << 0),
+ CMD_ASYNC = (1 << 1),
+ CMD_WANT_SKB = (1 << 2),
+ CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+ struct il_cmd_header hdr; /* uCode API */
+ union {
+ u32 flags;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ struct il_tx_cmd tx;
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+ const void *data;
+ unsigned long reply_page;
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+ u32 flags;
+ u16 len;
+ u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct il_rx_buf *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct il_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+ u16 txq_id;
+ u16 frame_count;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+ u8 state;
+};
+
+struct il_tid_data {
+ u16 seq_number; /* 4965 only */
+ u16 tfds_in_queue;
+ struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+ u32 cipher;
+ int keylen;
+ u8 keyidx;
+ u8 key[32];
+};
+
+union il_ht_rate_supp {
+ u16 rates;
+ struct {
+ u8 siso_rate;
+ u8 mimo_rate;
+ };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN (0x1)
+
+struct il_ht_config {
+ bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+ int qos_active;
+ struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+ struct il_addsta_cmd sta;
+ struct il_tid_data tid[MAX_TID_COUNT];
+ u8 used, ctxid;
+ struct il_hw_key keyinfo;
+ struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+ struct il_rxon_context *ctx;
+ u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+ struct il_rxon_context *ctx;
+ u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ void *v_addr; /* access by driver */
+ dma_addr_t p_addr; /* access by card's busmaster DMA */
+ u32 len; /* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+};
+
+struct il4965_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+ u16 min_nrg_cck;
+ u16 max_nrg_cck;
+
+ u16 nrg_th_cck;
+ u16 nrg_th_ofdm;
+
+ u16 auto_corr_min_ofdm;
+ u16 auto_corr_min_ofdm_mrc;
+ u16 auto_corr_min_ofdm_x1;
+ u16 auto_corr_min_ofdm_mrc_x1;
+
+ u16 auto_corr_max_ofdm;
+ u16 auto_corr_max_ofdm_mrc;
+ u16 auto_corr_max_ofdm_x1;
+ u16 auto_corr_max_ofdm_mrc_x1;
+
+ u16 auto_corr_max_cck;
+ u16 auto_corr_max_cck_mrc;
+ u16 auto_corr_min_cck;
+ u16 auto_corr_min_cck_mrc;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+ u8 max_txq_num;
+ u8 dma_chnl_num;
+ u16 scd_bc_tbls_size;
+ u32 tfd_size;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 max_rxq_size;
+ u16 max_rxq_log;
+ u32 rx_page_order;
+ u32 rx_wrt_ptr_reg;
+ u8 max_stations;
+ u8 ht40_channel;
+ u8 max_beacon_itrvl; /* in 1024 ms */
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 max_bsm_size;
+ u32 ct_kill_threshold; /* value in hw-dependent units */
+ u16 beacon_time_tsf_bits;
+ const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE: The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_ <-- Is part of iwlwifi
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_ <-- Called from work queue context
+ * il4965_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+extern int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+ i < q->write_ptr) : !(i <
+ q->read_ptr
+ && i >=
+ q->
+ write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+ /*
+ * This is for init calibration result and scan command which
+ * required buffer > TFD_MAX_PAYLOAD_SIZE,
+ * the big buffer at end of command array
+ */
+ if (is_huge)
+ return q->n_win; /* must be power of 2 */
+
+ /* Otherwise, use normal size buffers */
+ return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO 0
+#define IL_OPERATION_MODE_HT_ONLY 1
+#define IL_OPERATION_MODE_MIXED 2
+#define IL_OPERATION_MODE_20MHZ 3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE 0xFFFF
+#define IL4965_CAL_NUM_BEACONS 20
+#define IL_CAL_NUM_BEACONS 16
+#define MAXIMUM_ALLOWED_PATHLOSS 15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM 50
+#define MIN_FA_OFDM 5
+#define MAX_FA_CCK 50
+#define MIN_FA_CCK 5
+
+#define AUTO_CORR_STEP_OFDM 1
+
+#define AUTO_CORR_STEP_CCK 3
+#define AUTO_CORR_MAX_TH_CCK 160
+
+#define NRG_DIFF 2
+#define NRG_STEP_CCK 2
+#define NRG_MARGIN 8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
+
+#define CHAIN_A 0
+#define CHAIN_B 1
+#define CHAIN_C 2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER 0xFF00
+#define IN_BAND_FILTER 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L 20
+#define NUM_RX_CHAINS 3
+
+enum il4965_false_alarm_state {
+ IL_FA_TOO_MANY = 0,
+ IL_FA_TOO_FEW = 1,
+ IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+ IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
+ IL_CHAIN_NOISE_ACCUMULATE,
+ IL_CHAIN_NOISE_CALIBRATED,
+ IL_CHAIN_NOISE_DONE,
+};
+
+enum il4965_calib_enabled_state {
+ IL_CALIB_DISABLED = 0, /* must be 0 */
+ IL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum il_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum il_calib {
+ IL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct il_calib_result {
+ void *buf;
+ size_t buf_len;
+};
+
+enum ucode_type {
+ UCODE_NONE = 0,
+ UCODE_INIT,
+ UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+ u32 auto_corr_ofdm;
+ u32 auto_corr_ofdm_mrc;
+ u32 auto_corr_ofdm_x1;
+ u32 auto_corr_ofdm_mrc_x1;
+ u32 auto_corr_cck;
+ u32 auto_corr_cck_mrc;
+
+ u32 last_bad_plcp_cnt_ofdm;
+ u32 last_fa_cnt_ofdm;
+ u32 last_bad_plcp_cnt_cck;
+ u32 last_fa_cnt_cck;
+
+ u32 nrg_curr_state;
+ u32 nrg_prev_state;
+ u32 nrg_value[10];
+ u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+ u32 nrg_silence_ref;
+ u32 nrg_energy_idx;
+ u32 nrg_silence_idx;
+ u32 nrg_th_cck;
+ s32 nrg_auto_corr_silence_diff;
+ u32 num_in_cck_no_fa;
+ u32 nrg_th_ofdm;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+ u32 active_chains;
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_signal_a;
+ u32 chain_signal_b;
+ u32 chain_signal_c;
+ u16 beacon_count;
+ u8 disconn_array[NUM_RX_CHAINS];
+ u8 delta_gain_code[NUM_RX_CHAINS];
+ u8 radio_write;
+ u8 state;
+};
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES (256)
+#define IL_TRAFFIC_ENTRY_SIZE (64)
+
+enum {
+ MEASUREMENT_READY = (1 << 0),
+ MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 handlers[IL_CN_MAX];
+ u32 tx;
+ u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+ MANAGEMENT_ASSOC_REQ = 0,
+ MANAGEMENT_ASSOC_RESP,
+ MANAGEMENT_REASSOC_REQ,
+ MANAGEMENT_REASSOC_RESP,
+ MANAGEMENT_PROBE_REQ,
+ MANAGEMENT_PROBE_RESP,
+ MANAGEMENT_BEACON,
+ MANAGEMENT_ATIM,
+ MANAGEMENT_DISASSOC,
+ MANAGEMENT_AUTH,
+ MANAGEMENT_DEAUTH,
+ MANAGEMENT_ACTION,
+ MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+ CONTROL_BACK_REQ = 0,
+ CONTROL_BACK,
+ CONTROL_PSPOLL,
+ CONTROL_RTS,
+ CONTROL_CTS,
+ CONTROL_ACK,
+ CONTROL_CFEND,
+ CONTROL_CFENDACK,
+ CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ u32 mgmt[MANAGEMENT_MAX];
+ u32 ctrl[CONTROL_MAX];
+ u32 data_cnt;
+ u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT (2000)
+#define IL_LONG_WD_TIMEOUT (10000)
+#define IL_MAX_WD_TIMEOUT (120000)
+
+struct il_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS 22
+
+struct il_rxon_context {
+ struct ieee80211_vif *vif;
+
+ const u8 *ac_to_fifo;
+ const u8 *ac_to_queue;
+ u8 mcast_queue;
+
+ /*
+ * We could use the vif to indicate active, but we
+ * also need it to be active during disabling when
+ * we already removed the vif for type setting.
+ */
+ bool always_active, is_active;
+
+ bool ht_need_multiple_chains;
+
+ int ctxid;
+
+ u32 interface_modes, exclusive_interface_modes;
+ u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct il_rxon_cmd active;
+ struct il_rxon_cmd staging;
+
+ struct il_rxon_time_cmd timing;
+
+ struct il_qos_info qos_data;
+
+ u8 bcast_sta_id, ap_sta_id;
+
+ u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+ u8 qos_cmd;
+ u8 wep_key_cmd;
+
+ struct il_wep_key wep_keys[WEP_KEYS_MAX];
+ u8 key_mapping_keys;
+
+ __le32 station_flags;
+
+ struct {
+ bool non_gf_sta_present;
+ u8 protection;
+ bool enabled, is_40mhz;
+ u8 extension_chan_offset;
+ } ht;
+};
+
+struct il_power_mgr {
+ struct il_powertable_cmd sleep_cmd;
+ struct il_powertable_cmd sleep_cmd_next;
+ int debug_sleep_level_override;
+ bool pci_pm;
+};
+
+struct il_priv {
+
+ /* ieee device used by generic ieee processing code */
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *ieee_channels;
+ struct ieee80211_rate *ieee_rates;
+ struct il_cfg *cfg;
+
+ /* temporary frame storage list */
+ struct list_head free_frames;
+ int frames_count;
+
+ enum ieee80211_band band;
+ int alloc_rxb_page;
+
+ void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+ struct il_rx_buf *rxb);
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* spectrum measurement report caching */
+ struct il_spectrum_notification measure_report;
+ u8 measurement_status;
+
+ /* ucode beacon time */
+ u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* track IBSS manager (last beacon) status */
+ u32 ibss_manager;
+
+ /* force reset */
+ struct il_force_reset force_reset;
+
+ /* we allocate array of il_channel_info for NIC's valid channels.
+ * Access via channel # using indirect idx array */
+ struct il_channel_info *channel_info; /* channel info array */
+ u8 channel_count; /* # of channels */
+
+ /* thermal calibration */
+ s32 temperature; /* degrees Kelvin */
+ s32 last_temperature;
+
+ /* init calibration results */
+ struct il_calib_result calib_results[IL_CALIB_MAX];
+
+ /* Scan related variables */
+ unsigned long scan_start;
+ unsigned long scan_start_tsf;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
+ struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+ /* spinlock */
+ spinlock_t lock; /* protect general shared data */
+ spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
+ struct mutex mutex;
+
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+ u32 hw_rev;
+ u32 hw_wa_rev;
+ u8 rev_id;
+
+ /* command queue number */
+ u8 cmd_queue;
+
+ /* max number of station keys */
+ u8 sta_key_max_num;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[1];
+
+ /* uCode images, save to reload in case of failure */
+ int fw_idx; /* firmware we're trying to load */
+ u32 ucode_ver; /* version of ucode, copy of
+ il_ucode.ver */
+ struct fw_desc ucode_code; /* runtime inst */
+ struct fw_desc ucode_data; /* runtime data original */
+ struct fw_desc ucode_data_backup; /* runtime data save/restore */
+ struct fw_desc ucode_init; /* initialization inst */
+ struct fw_desc ucode_init_data; /* initialization data */
+ struct fw_desc ucode_boot; /* bootstrap inst */
+ enum ucode_type ucode_type;
+ u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
+
+ struct il_rxon_context ctx;
+
+ __le16 switch_channel;
+
+ /* 1st responses from initialize and runtime uCode images.
+ * _4965's initialize alive response contains some calibration data. */
+ struct il_init_alive_resp card_alive_init;
+ struct il_alive_resp card_alive;
+
+ u16 active_rate;
+
+ u8 start_calib;
+ struct il_sensitivity_data sensitivity_data;
+ struct il_chain_noise_data chain_noise_data;
+ __le16 sensitivity_tbl[HD_TBL_SIZE];
+
+ struct il_ht_config current_ht_config;
+
+ /* Rate scaling data */
+ u8 retry_rate;
+
+ wait_queue_head_t wait_command_queue;
+
+ int activity_timer_active;
+
+ /* Rx and Tx DMA processing queues */
+ struct il_rx_queue rxq;
+ struct il_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+ struct il_dma_ptr kw; /* keep warm address */
+ struct il_dma_ptr scd_bc_tbls;
+
+ u32 scd_base_addr; /* scheduler sram base address */
+
+ unsigned long status;
+
+ /* counts mgmt, ctl, and data packets */
+ struct traffic_stats tx_stats;
+ struct traffic_stats rx_stats;
+
+ /* counts interrupts */
+ struct isr_stats isr_stats;
+
+ struct il_power_mgr power_data;
+
+ /* context information */
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
+ spinlock_t sta_lock;
+ int num_stations;
+ struct il_station_entry stations[IL_STATION_COUNT];
+ unsigned long ucode_key_table;
+
+ /* queue refcounts */
+#define IL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
+ /* Indication if ieee80211_ops->open has been called */
+ u8 is_open;
+
+ u8 mac80211_registered;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ struct il_eeprom_calib_info *calib_info;
+
+ enum nl80211_iftype iw_mode;
+
+ /* Last Rx'd beacon timestamp */
+ u64 timestamp;
+
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il3945_notif_stats accum_stats;
+ struct il3945_notif_stats delta_stats;
+ struct il3945_notif_stats max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet stats */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct il3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+ struct {
+ struct il_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_notif_stats accum_stats;
+ struct il_notif_stats delta_stats;
+ struct il_notif_stats max_delta;
+#endif
+
+ } _4965;
+#endif
+ };
+
+ struct il_hw_params hw_params;
+
+ u32 inta_mask;
+
+ struct workqueue_struct *workqueue;
+
+ struct work_struct restart;
+ struct work_struct scan_completed;
+ struct work_struct rx_replenish;
+ struct work_struct abort_scan;
+
+ struct il_rxon_context *beacon_ctx;
+ struct sk_buff *beacon_skb;
+
+ struct work_struct tx_flush;
+
+ struct tasklet_struct irq_tasklet;
+
+ struct delayed_work init_alive_start;
+ struct delayed_work alive_start;
+ struct delayed_work scan_check;
+
+ /* TX Power */
+ s8 tx_power_user_lmt;
+ s8 tx_power_device_lmt;
+ s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ /* debugging info */
+ u32 debug_level; /* per device debugging will override global
+ il_debug_level if set */
+#endif /* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ /* debugfs */
+ u16 tx_traffic_idx;
+ u16 rx_traffic_idx;
+ u8 *tx_traffic;
+ u8 *rx_traffic;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+ struct work_struct txpower_work;
+ u32 disable_sens_cal;
+ u32 disable_chain_noise_cal;
+ u32 disable_tx_power_cal;
+ struct work_struct run_time_calib_work;
+ struct timer_list stats_periodic;
+ struct timer_list watchdog;
+ bool hw_ready;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
+}; /*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+ set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+ clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline struct ieee80211_hdr *
+il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
+{
+ if (il->txq[txq_id].txb[idx].skb)
+ return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
+ data;
+ return NULL;
+}
+
+static inline struct il_rxon_context *
+il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ return vif_priv->ctx;
+}
+
+#define for_each_context(il, _ctx) \
+ for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+ return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+ return il_is_associated(il);
+}
+
+static inline int
+il_is_associated_ctx(struct il_rxon_context *ctx)
+{
+ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+ if (ch_info == NULL)
+ return 0;
+ return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+ return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+ return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+ return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+ return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+ __free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+ free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT 1024
+
+#define IL_SKU_G 0x1
+#define IL_SKU_A 0x2
+#define IL_SKU_N 0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+struct il_hcmd_ops {
+ int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
+ int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
+ void (*set_rxon_chain) (struct il_priv *il,
+ struct il_rxon_context *ctx);
+};
+
+struct il_hcmd_utils_ops {
+ u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+ u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+ int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+ void (*post_scan) (struct il_priv *il);
+};
+
+struct il_apm_ops {
+ int (*init) (struct il_priv *il);
+ void (*config) (struct il_priv *il);
+};
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+ ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*general_stats_read) (struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+};
+#endif
+
+struct il_temp_ops {
+ void (*temperature) (struct il_priv *il);
+};
+
+struct il_lib_ops {
+ /* set hw dependent parameters */
+ int (*set_hw_params) (struct il_priv *il);
+ /* Handling TX */
+ void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+ struct il_tx_queue *txq,
+ u16 byte_cnt);
+ int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+ struct il_tx_queue *txq, dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+ int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+ /* setup Rx handler */
+ void (*handler_setup) (struct il_priv *il);
+ /* alive notification after init uCode load */
+ void (*init_alive_start) (struct il_priv *il);
+ /* check validity of rtc data address */
+ int (*is_valid_rtc_data_addr) (u32 addr);
+ /* 1st ucode load */
+ int (*load_ucode) (struct il_priv *il);
+
+ void (*dump_nic_error_log) (struct il_priv *il);
+ int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+ int (*set_channel_switch) (struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+ struct il_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct il_priv *il);
+ void (*update_chain_flags) (struct il_priv *il);
+
+ /* eeprom operations */
+ struct il_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct il_temp_ops temp_ops;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_debugfs_ops debugfs_ops;
+#endif
+
+};
+
+struct il_led_ops {
+ int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_legacy_ops {
+ void (*post_associate) (struct il_priv *il);
+ void (*config_ap) (struct il_priv *il);
+ /* station management */
+ int (*update_bcast_stations) (struct il_priv *il);
+ int (*manage_ibss_station) (struct il_priv *il,
+ struct ieee80211_vif *vif, bool add);
+};
+
+struct il_ops {
+ const struct il_lib_ops *lib;
+ const struct il_hcmd_ops *hcmd;
+ const struct il_hcmd_utils_ops *utils;
+ const struct il_led_ops *led;
+ const struct il_nic_ops *nic;
+ const struct il_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct il_mod_params {
+ int sw_crypto; /* def: 0 = using hardware encryption */
+ int disable_hw_scan; /* def: 0 = use h/w scan */
+ int num_of_queues; /* def: HW dependent */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
+ int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int antenna; /* def: 0 = both antennas (use diversity) */
+ int restart_fw; /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in common.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ */
+struct il_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues; /* def: HW dependent */
+ /* for il_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY (0<<1)
+#define IL_LED_LINK (1<<1)
+
+/*
+ * LED mode
+ * IL_LED_DEFAULT: use device default
+ * IL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+ IL_LED_DEFAULT,
+ IL_LED_RF_STATE,
+ IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ * Driver interacts with Firmware API version >= 2.
+ * } else {
+ * Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ unsigned int sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct il_ops *ops;
+ /* module based parameters which can be set from modprobe cmd */
+ const struct il_mod_params *mod_params;
+ /* params not likely to change within a device family */
+ struct il_base_params *base_params;
+ /* params likely to change within a device family */
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum il_led_mode led_mode;
+};
+
+/***************************
+ * L i b *
+ ***************************/
+
+struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx);
+void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il,
+ struct il_rxon_context *ctx);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_alloc_traffic_mem(struct il_priv *il);
+void il_free_traffic_mem(struct il_priv *il);
+void il_reset_traffic_log(struct il_priv *il);
+void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+const char *il_get_mgmt_string(int cmd);
+const char *il_get_ctrl_string(int cmd);
+void il_clear_traffic_stats(struct il_priv *il);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ return 0;
+}
+
+static inline void
+il_free_traffic_mem(struct il_priv *il)
+{
+}
+
+static inline void
+il_reset_traffic_log(struct il_priv *il)
+{
+}
+
+static inline void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+/* Handlers */
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
+#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
+
+/*****************************************************
+ * S e n d i n g H o s t C o m m a n d s *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+ const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI *
+ *****************************************************/
+
+static inline u16
+il_pcie_link_ctl(struct il_priv *il)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ pos = pci_pcie_cap(il->pci_dev);
+ pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+void il_bg_watchdog(unsigned long data);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int il_pci_suspend(struct device *device);
+int il_pci_resume(struct device *device);
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS (&il_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IL_LEGACY_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+* Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+* GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS *****/
+
+#define S_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED 2
+#define S_RF_KILL_HW 3
+#define S_CT_KILL 4
+#define S_INIT 5
+#define S_ALIVE 6
+#define S_READY 7
+#define S_TEMPERATURE 8
+#define S_GEO_CONFIGURED 9
+#define S_EXIT_PENDING 10
+#define S_STATS 12
+#define S_SCANNING 13
+#define S_SCAN_ABORTING 14
+#define S_SCAN_HW 15
+#define S_POWER_PMI 16
+#define S_FW_ERROR 17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(S_READY, &il->status) &&
+ test_bit(S_GEO_CONFIGURED, &il->status) &&
+ !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+ return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+ return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill_hw(struct il_priv *il)
+{
+ return test_bit(S_RF_KILL_HW, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+ return il_is_rfkill_hw(il);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+ return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+ if (il_is_rfkill(il))
+ return 0;
+
+ return il_is_ready(il);
+}
+
+extern void il_send_bt_config(struct il_priv *il);
+extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+static inline int
+il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+{
+ return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
+extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+extern int _il_grab_nic_access(struct il_priv *il);
+extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+extern u32 il_rd_prph(struct il_priv *il, u32 reg);
+extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+ iowrite8(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+ iowrite32(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+ return ioread32(il->hw_base + ofs);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ value = _il_rd(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, reg, value);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+ _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ rmb();
+ return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+ wmb();
+ _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_wr_prph(il, reg, (val & ~mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(il->stations, 0, sizeof(il->stations));
+ il->num_stations = 0;
+
+ il->ucode_key_table = 0;
+
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IL_INVALID_STATION;
+
+ return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = il_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IL_INVALID_STATION);
+
+ return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+ return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+ return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+ desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr =
+ dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
+ GFP_KERNEL);
+ return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (test_and_clear_bit(hwq, il->queue_stopped))
+ if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (!test_and_set_bit(hwq, il->queue_stopped))
+ if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(il->hw, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+ clear_bit(S_INT_ENABLED, &il->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ _il_wr(il, CSR_INT, 0xffffffff);
+ _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+ _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+ set_bit(S_INT_ENABLED, &il->status);
+ _il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ * which was transferred
+ */
+struct il_rb_status {
+ __le16 closed_rb_num;
+ __le16 closed_fr_num;
+ __le16 finished_rb_num;
+ __le16 finished_fr_nam;
+ __le32 __unused; /* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS 20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ * unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+ __le32 lo;
+ __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+ u8 __reserved1[3];
+ u8 num_tbs;
+ struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+ __le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+struct il_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_idx; /* idx in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+ RATE_1M_IDX = 0,
+ RATE_2M_IDX,
+ RATE_5M_IDX,
+ RATE_11M_IDX,
+ RATE_6M_IDX,
+ RATE_9M_IDX,
+ RATE_12M_IDX,
+ RATE_18M_IDX,
+ RATE_24M_IDX,
+ RATE_36M_IDX,
+ RATE_48M_IDX,
+ RATE_54M_IDX,
+ RATE_60M_IDX,
+ RATE_COUNT,
+ RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
+ RATE_COUNT_3945 = RATE_COUNT - 1,
+ RATE_INVM_IDX = RATE_COUNT,
+ RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+ RATE_6M_IDX_TBL = 0,
+ RATE_9M_IDX_TBL,
+ RATE_12M_IDX_TBL,
+ RATE_18M_IDX_TBL,
+ RATE_24M_IDX_TBL,
+ RATE_36M_IDX_TBL,
+ RATE_48M_IDX_TBL,
+ RATE_54M_IDX_TBL,
+ RATE_1M_IDX_TBL,
+ RATE_2M_IDX_TBL,
+ RATE_5M_IDX_TBL,
+ RATE_11M_IDX_TBL,
+ RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+ IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+ IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+ IL_LAST_OFDM_RATE = RATE_60M_IDX,
+ IL_FIRST_CCK_RATE = RATE_1M_IDX,
+ IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define RATE_6M_MASK (1 << RATE_6M_IDX)
+#define RATE_9M_MASK (1 << RATE_9M_IDX)
+#define RATE_12M_MASK (1 << RATE_12M_IDX)
+#define RATE_18M_MASK (1 << RATE_18M_IDX)
+#define RATE_24M_MASK (1 << RATE_24M_IDX)
+#define RATE_36M_MASK (1 << RATE_36M_IDX)
+#define RATE_48M_MASK (1 << RATE_48M_IDX)
+#define RATE_54M_MASK (1 << RATE_54M_IDX)
+#define RATE_60M_MASK (1 << RATE_60M_IDX)
+#define RATE_1M_MASK (1 << RATE_1M_IDX)
+#define RATE_2M_MASK (1 << RATE_2M_IDX)
+#define RATE_5M_MASK (1 << RATE_5M_IDX)
+#define RATE_11M_MASK (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+ RATE_6M_PLCP = 13,
+ RATE_9M_PLCP = 15,
+ RATE_12M_PLCP = 5,
+ RATE_18M_PLCP = 7,
+ RATE_24M_PLCP = 9,
+ RATE_36M_PLCP = 11,
+ RATE_48M_PLCP = 1,
+ RATE_54M_PLCP = 3,
+ RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
+ RATE_1M_PLCP = 10,
+ RATE_2M_PLCP = 20,
+ RATE_5M_PLCP = 55,
+ RATE_11M_PLCP = 110,
+ /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ RATE_SISO_6M_PLCP = 0,
+ RATE_SISO_12M_PLCP = 1,
+ RATE_SISO_18M_PLCP = 2,
+ RATE_SISO_24M_PLCP = 3,
+ RATE_SISO_36M_PLCP = 4,
+ RATE_SISO_48M_PLCP = 5,
+ RATE_SISO_54M_PLCP = 6,
+ RATE_SISO_60M_PLCP = 7,
+ RATE_MIMO2_6M_PLCP = 0x8,
+ RATE_MIMO2_12M_PLCP = 0x9,
+ RATE_MIMO2_18M_PLCP = 0xa,
+ RATE_MIMO2_24M_PLCP = 0xb,
+ RATE_MIMO2_36M_PLCP = 0xc,
+ RATE_MIMO2_48M_PLCP = 0xd,
+ RATE_MIMO2_54M_PLCP = 0xe,
+ RATE_MIMO2_60M_PLCP = 0xf,
+ RATE_SISO_INVM_PLCP,
+ RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ RATE_6M_IEEE = 12,
+ RATE_9M_IEEE = 18,
+ RATE_12M_IEEE = 24,
+ RATE_18M_IEEE = 36,
+ RATE_24M_IEEE = 48,
+ RATE_36M_IEEE = 72,
+ RATE_48M_IEEE = 96,
+ RATE_54M_IEEE = 108,
+ RATE_60M_IEEE = 120,
+ RATE_1M_IEEE = 2,
+ RATE_2M_IEEE = 4,
+ RATE_5M_IEEE = 11,
+ RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK \
+ (RATE_1M_MASK | \
+ RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK \
+ (IL_CCK_BASIC_RATES_MASK | \
+ RATE_5M_MASK | \
+ RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK \
+ (RATE_6M_MASK | \
+ RATE_12M_MASK | \
+ RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ RATE_9M_MASK | \
+ RATE_18M_MASK | \
+ RATE_36M_MASK | \
+ RATE_48M_MASK | \
+ RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE -1
+
+#define IL_MIN_RSSI_VAL -100
+#define IL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT 160
+#define IL_LEGACY_SUCCESS_LIMIT 480
+#define IL_LEGACY_TBL_COUNT 160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IL_NONE_LEGACY_TBL_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO 12800 /* 100% */
+#define RATE_SCALE_SWITCH 10880 /* 85% */
+#define RATE_HIGH_TH 10880 /* 85% */
+#define RATE_INCREASE_TH 6400 /* 50% */
+#define RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1 0
+#define IL_LEGACY_SWITCH_ANTENNA2 1
+#define IL_LEGACY_SWITCH_SISO 2
+#define IL_LEGACY_SWITCH_MIMO2_AB 3
+#define IL_LEGACY_SWITCH_MIMO2_AC 4
+#define IL_LEGACY_SWITCH_MIMO2_BC 5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1 0
+#define IL_SISO_SWITCH_ANTENNA2 1
+#define IL_SISO_SWITCH_MIMO2_AB 2
+#define IL_SISO_SWITCH_MIMO2_AC 3
+#define IL_SISO_SWITCH_MIMO2_BC 4
+#define IL_SISO_SWITCH_GI 5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1 0
+#define IL_MIMO2_SWITCH_ANTENNA2 1
+#define IL_MIMO2_SWITCH_SISO_A 2
+#define IL_MIMO2_SWITCH_SISO_B 3
+#define IL_MIMO2_SWITCH_SISO_C 4
+#define IL_MIMO2_SWITCH_GI 5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD 0
+#define IL_AGG_LOAD_THRESHOLD 10
+#define IL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+#define TID_MAX_LOAD_COUNT 8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define ANT_NONE 0x0
+#define ANT_A BIT(0)
+#define ANT_B BIT(1)
+#define ANT_AB (ANT_A | ANT_B)
+#define ANT_C BIT(2)
+#define ANT_AC (ANT_A | ANT_C)
+#define ANT_BC (ANT_B | ANT_C)
+#define ANT_ABC (ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE 12
+
+struct il_rate_mcs_info {
+ char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+ enum il_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 is_dup; /* 1 = duplicated data streams */
+ u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
+};
+
+struct il_traffic_load {
+ unsigned long time_stamp; /* age of the oldest stats */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+ u8 active_tbl; /* idx of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ u8 is_dup;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct il_link_quality_cmd lq;
+ struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct il_priv *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+ struct il_station_priv_common common;
+ struct il_lq_sta lq_sta;
+ atomic_t pending_frames;
+ bool client;
+ bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+ return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+ if (mask & ANT_A)
+ return ANT_A;
+ if (mask & ANT_B)
+ return ANT_B;
+ return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int il4965_rate_control_register(void);
+extern int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void il4965_rate_control_unregister(void);
+extern void il3945_rate_control_unregister(void);
+
+extern int il_power_update_mode(struct il_priv *il, bool force);
+extern void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ if (il->debug_level)
+ return il->debug_level;
+ else
+ return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len) \
+do { \
+ print_hex_dump(KERN_ERR, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ print_hex_dump(KERN_DEBUG, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ * /sys/module/iwl4965/parameters/debug
+ * /sys/module/iwl3945/parameters/debug
+ * /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO (1 << 0)
+#define IL_DL_MAC80211 (1 << 1)
+#define IL_DL_HCMD (1 << 2)
+#define IL_DL_STATE (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP (1 << 4)
+#define IL_DL_HCMD_DUMP (1 << 5)
+#define IL_DL_EEPROM (1 << 6)
+#define IL_DL_RADIO (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER (1 << 8)
+#define IL_DL_TEMP (1 << 9)
+#define IL_DL_NOTIF (1 << 10)
+#define IL_DL_SCAN (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC (1 << 12)
+#define IL_DL_DROP (1 << 13)
+#define IL_DL_TXPOWER (1 << 14)
+#define IL_DL_AP (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW (1 << 16)
+#define IL_DL_RF_KILL (1 << 17)
+#define IL_DL_FW_ERRORS (1 << 18)
+#define IL_DL_LED (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE (1 << 20)
+#define IL_DL_CALIB (1 << 21)
+#define IL_DL_WEP (1 << 22)
+#define IL_DL_TX (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX (1 << 24)
+#define IL_DL_ISR (1 << 25)
+#define IL_DL_HT (1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H (1 << 28)
+#define IL_DL_STATS (1 << 29)
+#define IL_DL_TX_REPLY (1 << 30)
+#define IL_DL_QOS (1 << 31)
+
+#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a9616c269..9138e15004fa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#ifndef __iwl_legacy_csr_h__
-#define __iwl_legacy_csr_h__
+#ifndef __il_csr_h__
+#define __il_csr_h__
/*
* CSR (control and status registers)
*
@@ -70,9 +70,9 @@
* low power states due to driver-invoked device resets
* (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
*
- * Use iwl_write32() and iwl_read32() family to access these registers;
+ * Use _il_wr() and _il_rd() family to access these registers;
* these provide simple PCI bus access, without waking up the MAC.
- * Do not use iwl_legacy_write_direct32() family for these registers;
+ * Do not use il_wr() family for these registers;
* no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
@@ -82,16 +82,16 @@
*/
#define CSR_BASE (0x000)
-#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
-#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
-#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
-#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
-#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
-#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
+#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
#define CSR_GP_CNTRL (CSR_BASE+0x024)
-/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
/*
@@ -166,26 +166,26 @@
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
-#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
-#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
+#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */
-#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
-#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
-#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
-#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
-#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
-#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
-#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
-#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
-#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
-#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
CSR_INT_BIT_ALIVE)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
-#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
-#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
-#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
-#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
-#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
-#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
-#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
-#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
CSR39_FH_INT_BIT_RX_CHNL2 | \
CSR_FH_INT_BIT_RX_CHNL1 | \
CSR_FH_INT_BIT_RX_CHNL0)
-
#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
CSR_FH_INT_BIT_TX_CHNL1 | \
CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
-
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
/* GP REG */
-#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
-
/* CSR GIO */
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
@@ -357,7 +354,7 @@
/* HPET MEM debug */
#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
-/* DRAM INT TABLE */
+/* DRAM INT TBL */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
@@ -368,13 +365,13 @@
* to indirectly access device's internal memory or registers that
* may be powered-down.
*
- * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * Use il_wr()/il_rd() family
* for these registers;
* host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
* to make sure the MAC (uCode processor, etc.) is powered up for accessing
* internal resources.
*
- * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * Do not use _il_wr()/_il_rd() family to access these registers;
* these provide only simple PCI bus access, without waking up the MAC.
*/
#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
/*
- * Per-Tx-queue write pointer (index, really!)
- * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
* Bit usage:
- * 0-7: queue write index
+ * 0-7: queue write idx
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
-#endif /* !__iwl_legacy_csr_h__ */
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 000000000000..b1b8926a9c7b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, il, \
+ &il_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t il_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t il_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+static int
+il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->tx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->tx_stats.data_bytes);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &clear_flag) != 1)
+ return -EFAULT;
+ il_clear_traffic_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->rx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->rx_stats.data_bytes);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ u32 val;
+ char *buf;
+ ssize_t ret;
+ int i;
+ int pos = 0;
+ struct il_priv *il = file->private_data;
+ size_t bufsz;
+
+ /* default is to dump the entire data segment */
+ if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+ il->dbgfs_sram_offset = 0x800000;
+ if (il->ucode_type == UCODE_INIT)
+ il->dbgfs_sram_len = il->ucode_init_data.len;
+ else
+ il->dbgfs_sram_len = il->ucode_data.len;
+ }
+ bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+ il->dbgfs_sram_len);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ il->dbgfs_sram_offset);
+ for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+ val =
+ il_read_targ_mem(il,
+ il->dbgfs_sram_offset +
+ il->dbgfs_sram_len - i);
+ if (i < 4) {
+ switch (i) {
+ case 1:
+ val &= BYTE1_MASK;
+ break;
+ case 2:
+ val &= BYTE2_MASK;
+ break;
+ case 3:
+ val &= BYTE3_MASK;
+ break;
+ }
+ }
+ if (!(i % 16))
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ il->dbgfs_sram_offset = offset;
+ il->dbgfs_sram_len = len;
+ } else {
+ il->dbgfs_sram_offset = 0;
+ il->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_station_entry *station;
+ int max_sta = il->hw_params.max_stations;
+ char *buf;
+ int i, j, pos = 0;
+ ssize_t ret;
+ /* Add 30 for initial string */
+ const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+ il->num_stations);
+
+ for (i = 0; i < max_sta; i++) {
+ station = &il->stations[i];
+ if (!station->used)
+ continue;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n", i,
+ station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " - waitforba");
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t ret;
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0, buf_size = 0;
+ const u8 *ptr;
+ char *buf;
+ u16 eeprom_ver;
+ size_t eeprom_len = il->cfg->base_params->eeprom_size;
+ buf_size = 4 * eeprom_len + 256;
+
+ if (eeprom_len % 16) {
+ IL_ERR("NVM size is not multiple of 16.\n");
+ return -ENODATA;
+ }
+
+ ptr = il->eeprom;
+ if (!ptr) {
+ IL_ERR("Invalid EEPROM memory\n");
+ return -ENOMEM;
+ }
+
+ /* 4 characters for byte 0xYY */
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ pos +=
+ scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+ eeprom_ver);
+ for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
+ buf_size - pos, 0);
+ pos += strlen(buf + pos);
+ if (buf_size - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct ieee80211_channel *channels = NULL;
+ const struct ieee80211_supported_band *supp_band = NULL;
+ int pos = 0, i, bufsz = PAGE_SIZE;
+ char *buf;
+ ssize_t ret;
+
+ if (!test_bit(S_GEO_CONFIGURED, &il->status))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+ test_bit(S_HCMD_ACTIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+ test_bit(S_INT_ENABLED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
+ test_bit(S_RF_KILL_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+ test_bit(S_CT_KILL, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+ test_bit(S_INIT, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+ test_bit(S_ALIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+ test_bit(S_READY, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+ test_bit(S_TEMPERATURE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+ test_bit(S_GEO_CONFIGURED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+ test_bit(S_EXIT_PENDING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+ test_bit(S_STATS, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+ test_bit(S_SCANNING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+ test_bit(S_SCAN_ABORTING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+ test_bit(S_SCAN_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+ test_bit(S_POWER_PMI, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+ test_bit(S_FW_ERROR, &il->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ il->isr_stats.hw);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ il->isr_stats.sw);
+ if (il->isr_stats.sw || il->isr_stats.hw) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ il->isr_stats.err_code);
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ il->isr_stats.sch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ il->isr_stats.alive);
+#endif
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ il->isr_stats.rfkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ il->isr_stats.ctkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ il->isr_stats.wakeup);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+ il->isr_stats.rx);
+ for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+ if (il->isr_stats.handlers[cnt] > 0)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ il_get_cmd_string(cnt),
+ il->isr_stats.handlers[cnt]);
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ il->isr_stats.tx);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ il->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ il_clear_isr_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_rxon_context *ctx = &il->ctx;
+ int pos = 0, i;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
+ for (i = 0; i < AC_NUM; i++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tcw_min\tcw_max\taifsn\ttxop\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+ ctx->qos_data.def_qos_parm.ac[i].cw_min,
+ ctx->qos_data.def_qos_parm.ac[i].cw_max,
+ ctx->qos_data.def_qos_parm.ac[i].aifsn,
+ ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &ht40) != 1)
+ return -EFAULT;
+ if (!il_is_any_associated(il))
+ il->disable_ht40 = ht40 ? true : false;
+ else {
+ IL_ERR("Sta associated with AP - "
+ "Change to 40MHz channel support is not allowed\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[100];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+ il->disable_ht40 ? "Disabled" : "Enabled");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0;
+ int cnt = 0, entry;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_rx_queue *rxq = &il->rxq;
+ char *buf;
+ int bufsz =
+ ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+ (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ const u8 *ptr;
+ ssize_t ret;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate buffer\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
+ q->read_ptr, q->write_ptr);
+ }
+ if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
+ ptr = il->tx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
+ il->tx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
+ rxq->read, rxq->write);
+
+ if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
+ ptr = il->rx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
+ il->rx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &traffic_log) != 1)
+ return -EFAULT;
+ if (traffic_log == 0)
+ il_reset_traffic_log(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz =
+ sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+ q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, il->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&il->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_rx_queue *rxq = &il->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->
+ closed_rb_num) & 0x0FFF);
+ } else {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+ ssize_t ret;
+ struct il_sensitivity_data *data;
+
+ data = &il->sensitivity_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+ data->auto_corr_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+ data->auto_corr_ofdm_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+ data->auto_corr_ofdm_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+ data->auto_corr_ofdm_mrc_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+ data->auto_corr_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+ data->auto_corr_cck_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+ data->last_bad_plcp_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+ data->last_fa_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+ data->last_bad_plcp_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+ data->last_fa_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+ data->nrg_curr_state);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+ data->nrg_prev_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+ for (cnt = 0; cnt < 10; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_value[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+ for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_silence_rssi[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+ data->nrg_silence_ref);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+ data->nrg_energy_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+ data->nrg_silence_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+ data->nrg_th_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "nrg_auto_corr_silence_diff:\t %u\n",
+ data->nrg_auto_corr_silence_diff);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+ data->num_in_cck_no_fa);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+ data->nrg_th_ofdm);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+ ssize_t ret;
+ struct il_chain_noise_data *data;
+
+ data = &il->chain_noise_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+ data->active_chains);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+ data->chain_noise_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+ data->chain_noise_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+ data->chain_noise_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+ data->chain_signal_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+ data->chain_signal_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+ data->chain_signal_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+ data->beacon_count);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->disconn_array[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->delta_gain_code[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+ data->radio_write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+ data->state);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[60];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ u32 pwrsave_status;
+
+ pwrsave_status =
+ _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%s\n",
+ (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+ (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+ (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+ "error");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &clear) != 1)
+ return -EFAULT;
+
+ /* make request to uCode to retrieve stats information */
+ mutex_lock(&il->mutex);
+ il_send_stats_request(il, CMD_SYNC, true);
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len =
+ sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (il->cfg->ops->lib->dump_fh) {
+ ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+ if (buf) {
+ ret =
+ simple_read_from_buffer(user_buf, count, ppos, buf,
+ pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%d\n",
+ il->missed_beacon_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ il->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct il_force_reset *force_reset;
+
+ force_reset = &il->force_reset;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+ force_reset->reset_duration);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ int ret;
+ struct il_priv *il = file->private_data;
+
+ ret = il_force_reset(il, true);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &timeout) != 1)
+ return -EINVAL;
+ if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+ timeout = IL_DEF_WD_TIMEOUT;
+
+ il->cfg->base_params->wd_timeout = timeout;
+ il_setup_watchdog(il);
+ return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ struct dentry *phyd = il->hw->wiphy->debugfsdir;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ il->debugfs_dir = dir_drv;
+
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
+ goto err;
+
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &il->disable_sens_cal);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &il->disable_chain_noise_cal);
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+ return 0;
+
+err:
+ IL_ERR("Can't create the debugfs directory\n");
+ il_dbgfs_unregister(il);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+ if (!il->debugfs_dir)
+ return;
+
+ debugfs_remove_recursive(il->debugfs_dir);
+ il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38793ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->_3945.statistics.flag));
- if (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
- sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
- ssize_t ret;
- struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
- *max_ofdm;
- struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct iwl39_statistics_rx_non_phy *general, *accum_general;
- struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_3945.statistics.rx.ofdm;
- cck = &priv->_3945.statistics.rx.cck;
- general = &priv->_3945.statistics.rx.general;
- accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
- accum_cck = &priv->_3945.accum_statistics.rx.cck;
- accum_general = &priv->_3945.accum_statistics.rx.general;
- delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
- delta_cck = &priv->_3945.delta_statistics.rx.cck;
- delta_general = &priv->_3945.delta_statistics.rx.general;
- max_ofdm = &priv->_3945.max_delta.rx.ofdm;
- max_cck = &priv->_3945.max_delta.rx.cck;
- max_general = &priv->_3945.max_delta.rx.general;
-
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:", le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
- ssize_t ret;
- struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_3945.statistics.tx;
- accum_tx = &priv->_3945.accum_statistics.tx;
- delta_tx = &priv->_3945.delta_statistics.tx;
- max_tx = &priv->_3945.max_delta.tx;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
- ssize_t ret;
- struct iwl39_statistics_general *general, *accum_general;
- struct iwl39_statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_3945.statistics.general;
- dbg = &priv->_3945.statistics.general.dbg;
- div = &priv->_3945.statistics.general.div;
- accum_general = &priv->_3945.accum_statistics.general;
- delta_general = &priv->_3945.delta_statistics.general;
- max_general = &priv->_3945.max_delta.general;
- accum_dbg = &priv->_3945.accum_statistics.general.dbg;
- delta_dbg = &priv->_3945.delta_statistics.general.dbg;
- max_dbg = &priv->_3945.max_delta.general.dbg;
- accum_div = &priv->_3945.accum_statistics.general.div;
- delta_div = &priv->_3945.delta_statistics.general.div;
- max_div = &priv->_3945.max_delta.general.div;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b32b447..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c9919f82e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND (0x0800)
-#define FH39_MEM_UPPER_BOUND (0x1000)
-
-#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
- ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
- (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
- FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-struct iwl3945_tfd_tb {
- __le32 addr;
- __le32 len;
-} __packed;
-
-struct iwl3945_tfd {
- __le32 control_flags;
- struct iwl3945_tfd_tb tbs[4];
- u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d3af12..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET 95
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- * to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- * (PA) output voltage detector. This same detector is used during Tx of
- * long packets in normal operation to provide feedback as to proper output
- * level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
- u8 gain_index; /* index into power (gain) setup table ... */
- s8 power; /* ... for this pwr level for this chnl group */
- u16 v_det; /* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- * to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
- struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
- s32 a, b, c, d, e; /* coefficients for voltage->power
- * formula (signed) */
- s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
- * frequency (signed) */
- s8 saturation_power; /* highest power possible by h/w in this
- * band */
- u8 group_channel; /* "representative" channel # in this band */
- s16 temperature; /* h/w temperature at factory calib this band
- * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- * difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
- u32 Ta;
- u32 Tb;
- u32 Tc;
- u32 Td;
- u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
- u8 reserved0[16];
- u16 device_id; /* abs.ofs: 16 */
- u8 reserved1[2];
- u16 pmc; /* abs.ofs: 20 */
- u8 reserved2[20];
- u8 mac_address[6]; /* abs.ofs: 42 */
- u8 reserved3[58];
- u16 board_revision; /* abs.ofs: 106 */
- u8 reserved4[11];
- u8 board_pba_number[9]; /* abs.ofs: 119 */
- u8 reserved5[8];
- u16 version; /* abs.ofs: 136 */
- u8 sku_cap; /* abs.ofs: 138 */
- u8 leds_mode; /* abs.ofs: 139 */
- u16 oem_mode;
- u16 wowlan_mode; /* abs.ofs: 142 */
- u16 leds_time_interval; /* abs.ofs: 144 */
- u8 leds_off_time; /* abs.ofs: 146 */
- u8 leds_on_time; /* abs.ofs: 147 */
- u8 almgor_m_version; /* abs.ofs: 148 */
- u8 antenna_switch_type; /* abs.ofs: 149 */
- u8 reserved6[42];
- u8 sku_id[4]; /* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
- u16 band_1_count; /* abs.ofs: 196 */
- struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
- u16 band_2_count; /* abs.ofs: 226 */
- struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
- u16 band_3_count; /* abs.ofs: 254 */
- struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
- u16 band_4_count; /* abs.ofs: 280 */
- struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
- u16 band_5_count; /* abs.ofs: 304 */
- struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
-
- u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
- struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
- struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
- u8 reserved16[172]; /* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
-#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES 5
-#define IWL39_CMD_QUEUE_NUM 4
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-/*********************************************/
-
-#define RFD_SIZE 4
-#define NUM_TFD_CHUNKS 4
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-#define U32_PAD(n) ((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n) (n << 24)
-#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n) (n << 28)
-#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
- IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
- IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
- __le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
- return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f38c8ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
- struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
- .cmd = iwl3945_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 96716276eb0d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2dddec..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
- 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
- s8 min_rssi;
- u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-72, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-87, IWL_RATE_9M_INDEX},
- {-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-68, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-86, IWL_RATE_11M_INDEX},
- {-88, IWL_RATE_5M_INDEX},
- {-90, IWL_RATE_2M_INDEX},
- {-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW 62
-#define IWL_RATE_FLUSH (3*HZ)
-#define IWL_RATE_WIN_FLUSH (HZ/2)
-#define IWL39_RATE_HIGH_TH 11520
-#define IWL_SUCCESS_UP_TH 8960
-#define IWL_SUCCESS_DOWN_TH 10880
-#define IWL_RATE_MIN_FAILURE_TH 6
-#define IWL_RATE_MIN_SUCCESS_TH 8
-#define IWL_RATE_DECREASE_TH 1920
-#define IWL_RATE_RETRY_TH 15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
- u32 index = 0;
- u32 table_size = 0;
- struct iwl3945_tpt_entry *tpt_table = NULL;
-
- if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
- rssi = IWL_MIN_RSSI_VAL;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- tpt_table = iwl3945_tpt_table_g;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
- break;
-
- case IEEE80211_BAND_5GHZ:
- tpt_table = iwl3945_tpt_table_a;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
- break;
-
- default:
- BUG();
- break;
- }
-
- while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
- index++;
-
- index = min(index, (table_size - 1));
-
- return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = -1;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed. If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
- int unflushed = 0;
- int i;
- unsigned long flags;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /*
- * For each rate, if we have collected data on that rate
- * and it has been more than IWL_RATE_WIN_FLUSH
- * since we flushed, clear out the gathered statistics
- */
- for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
- if (!rs_sta->win[i].counter)
- continue;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
- if (time_after(jiffies, rs_sta->win[i].stamp +
- IWL_RATE_WIN_FLUSH)) {
- IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
- "index %d\n",
- rs_sta->win[i].counter, i);
- iwl3945_clear_window(&rs_sta->win[i]);
- } else
- unflushed++;
- spin_unlock_irqrestore(&rs_sta->lock, flags);
- }
-
- return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX 5000 /* msec */
-#define IWL_RATE_FLUSH_MIN 50 /* msec */
-#define IWL_AVERAGE_PACKETS 1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
- struct iwl3945_rs_sta *rs_sta = (void *)data;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
- int unflushed = 0;
- unsigned long flags;
- u32 packet_count, duration, pps;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* Number of packets Rx'd since last time this timer ran */
- packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
- rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
- if (unflushed) {
- duration =
- jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
- IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
- packet_count, duration);
-
- /* Determine packets per second */
- if (duration)
- pps = (packet_count * 1000) / duration;
- else
- pps = 0;
-
- if (pps) {
- duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
- if (duration < IWL_RATE_FLUSH_MIN)
- duration = IWL_RATE_FLUSH_MIN;
- else if (duration > IWL_RATE_FLUSH_MAX)
- duration = IWL_RATE_FLUSH_MAX;
- } else
- duration = IWL_RATE_FLUSH_MAX;
-
- rs_sta->flush_time = msecs_to_jiffies(duration);
-
- IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
- duration, packet_count);
-
- mod_timer(&rs_sta->rate_scale_flush, jiffies +
- rs_sta->flush_time);
-
- rs_sta->last_partial_flush = jiffies;
- } else {
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->flush_pending = 0;
- }
- /* If there weren't any unflushed entries, we don't schedule the timer
- * to run again */
-
- rs_sta->last_flush = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
- struct iwl3945_rate_scale_data *window,
- int success, int retries, int index)
-{
- unsigned long flags;
- s32 fail_count;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- if (!retries) {
- IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
- return;
- }
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- * */
- while (retries > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
- window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame (throw away oldest history),
- * OR in "1", and increment "success" if this
- * frame was successful. */
- window->data <<= 1;
- if (success > 0) {
- window->success_counter++;
- window->data |= 0x1;
- success--;
- }
-
- retries--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = ((window->success_ratio *
- rs_sta->expected_tpt[index] + 64) / 128);
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct iwl3945_sta_priv *psta;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_supported_band *sband;
- int i;
-
- IWL_DEBUG_INFO(priv, "enter\n");
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- goto out;
-
- psta = (struct iwl3945_sta_priv *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
- rs_sta->priv = priv;
-
- rs_sta->start_rate = IWL_RATE_INVALID;
-
- /* default to just 802.11b */
- rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->last_flush = jiffies;
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->last_tx_packets = 0;
-
- rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
- rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
- for (i = 0; i < IWL_RATE_COUNT_3945; i++)
- iwl3945_clear_window(&rs_sta->win[i]);
-
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- for (i = sband->n_bitrates - 1; i >= 0; i--) {
- if (sta->supp_rates[sband->band] & (1 << i)) {
- rs_sta->last_txrate_idx = i;
- break;
- }
- }
-
- priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
- /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
- if (sband->band == IEEE80211_BAND_5GHZ) {
- rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
- IWL_FIRST_OFDM_RATE;
- }
-
-out:
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
- IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void iwl3945_rs_free(void *priv)
-{
- return;
-}
-
-static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct iwl3945_rs_sta *rs_sta;
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl_priv *priv __maybe_unused = iwl_priv;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rs_sta = &psta->rs_sta;
-
- spin_lock_init(&rs_sta->lock);
- init_timer(&rs_sta->rate_scale_flush);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-
- return rs_sta;
-}
-
-static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
-
- /*
- * Be careful not to use any members of iwl3945_rs_sta (like trying
- * to use iwl_priv to print out debugging) since it may not be fully
- * initialized at this point.
- */
- del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * iwl3945_rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- s8 retries = 0, current_count;
- int scale_rate_index, first_index, last_index;
- unsigned long flags;
- struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- retries = info->status.rates[0].count;
- /* Sanity Check for retries */
- if (retries > IWL_RATE_RETRY_TH)
- retries = IWL_RATE_RETRY_TH;
-
- first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
- if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
- IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
- return;
- }
-
- if (!priv_sta) {
- IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
- return;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
- return;
- }
-
-
- rs_sta->tx_packets++;
-
- scale_rate_index = first_index;
- last_index = first_index;
-
- /*
- * Update the window for each rate. We determine which rates
- * were Tx'd based on the total number of retries vs. the number
- * of retries configured for each rate -- currently set to the
- * priv value 'retry_rate' vs. rate specific
- *
- * On exit from this while loop last_index indicates the rate
- * at which the frame was finally transmitted (or failed if no
- * ACK)
- */
- while (retries > 1) {
- if ((retries - 1) < priv->retry_rate) {
- current_count = (retries - 1);
- last_index = scale_rate_index;
- } else {
- current_count = priv->retry_rate;
- last_index = iwl3945_rs_next_rate(priv,
- scale_rate_index);
- }
-
- /* Update this rate accounting for as many retries
- * as was used for it (per current_count) */
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[scale_rate_index],
- 0, current_count, scale_rate_index);
- IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
- scale_rate_index, current_count);
-
- retries -= current_count;
-
- scale_rate_index = last_index;
- }
-
-
- /* Update the last index window with success/failure based on ACK */
- IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
- last_index,
- (info->flags & IEEE80211_TX_STAT_ACK) ?
- "success" : "failure");
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[last_index],
- info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
- /* We updated the rate scale window -- if its been more than
- * flush_time since the last run, schedule the flush
- * again */
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- if (!rs_sta->flush_pending &&
- time_after(jiffies, rs_sta->last_flush +
- rs_sta->flush_time)) {
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->flush_pending = 1;
- mod_timer(&rs_sta->rate_scale_flush,
- jiffies + rs_sta->flush_time);
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
- u8 index, u16 rate_mask, enum ieee80211_band band)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /* 802.11A walks to the next literal adjacent rate in
- * the rate table */
- if (unlikely(band == IEEE80211_BAND_5GHZ)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
- i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- low = iwl3945_rates[low].prev_rs_tgg;
- else
- low = iwl3945_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- high = iwl3945_rates[high].next_rs_tgg;
- else
- high = iwl3945_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-/**
- * iwl3945_rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta, struct ieee80211_tx_rate_control *txrc)
-{
- struct ieee80211_supported_band *sband = txrc->sband;
- struct sk_buff *skb = txrc->skb;
- u8 low = IWL_RATE_INVALID;
- u8 high = IWL_RATE_INVALID;
- u16 high_low;
- int index;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl3945_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- unsigned long flags;
- u16 rate_mask;
- s8 max_rate_idx = -1;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (rs_sta && !rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
- priv_sta = NULL;
- }
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- rate_mask = sta->supp_rates[sband->band];
-
- /* get user max rate if set */
- max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
- max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
- max_rate_idx = -1;
-
- index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
- if (sband->band == IEEE80211_BAND_5GHZ)
- rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* for recent assoc, choose best rate regarding
- * to rssi value
- */
- if (rs_sta->start_rate != IWL_RATE_INVALID) {
- if (rs_sta->start_rate < index &&
- (rate_mask & (1 << rs_sta->start_rate)))
- index = rs_sta->start_rate;
- rs_sta->start_rate = IWL_RATE_INVALID;
- }
-
- /* force user max rate if set by user */
- if ((max_rate_idx != -1) && (max_rate_idx < index)) {
- if (rate_mask & (1 << max_rate_idx))
- index = max_rate_idx;
- }
-
- window = &(rs_sta->win[index]);
-
- fail_count = window->counter - window->success_counter;
-
- if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
- "counter: %d, success_counter: %d, "
- "expected_tpt is %sNULL\n",
- index,
- window->counter,
- window->success_counter,
- rs_sta->expected_tpt ? "not " : "");
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
- goto out;
-
- }
-
- current_tpt = window->average_tpt;
-
- high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
- sband->band);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((max_rate_idx != -1) && (max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- /* Collect Measured throughputs of adjacent rates */
- if (low != IWL_RATE_INVALID)
- low_tpt = rs_sta->win[low].average_tpt;
-
- if (high != IWL_RATE_INVALID)
- high_tpt = rs_sta->win[high].average_tpt;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- scale_action = 0;
-
- /* Low success ratio , need to drop the rate */
- if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
- IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates,
- * try increase */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
-
- /* Both adjacent throughputs are measured, but neither one has
- * better throughput; we're using the best rate, don't change
- * it! */
- } else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
- IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
- "current_tpt [%d]\n",
- low_tpt, high_tpt, current_tpt);
- scale_action = 0;
-
- /* At least one of the rates has better throughput */
- } else {
- if (high_tpt != IWL_INVALID_VALUE) {
-
- /* High rate has better throughput, Increase
- * rate */
- if (high_tpt > current_tpt &&
- window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of high tpt\n");
- scale_action = 0;
- }
- } else if (low_tpt != IWL_INVALID_VALUE) {
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
- /* Lower rate has better
- * throughput,decrease rate */
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((window->success_ratio > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * rs_sta->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
-
- /* Decrese rate */
- if (low != IWL_RATE_INVALID)
- index = low;
- break;
-
- case 1:
- /* Increase rate */
- if (high != IWL_RATE_INVALID)
- index = high;
-
- break;
-
- case 0:
- default:
- /* No change */
- break;
- }
-
- IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
- index, scale_action, low, high);
-
- out:
-
- if (sband->band == IEEE80211_BAND_5GHZ) {
- if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
- index = IWL_FIRST_OFDM_RATE;
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
- } else {
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = rs_sta->last_txrate_idx;
- }
-
- IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int j;
- ssize_t ret;
- struct iwl3945_rs_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
- "rate=0x%X flush time %d\n",
- lq_sta->tx_packets,
- lq_sta->last_txrate_idx,
- lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
- for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->win[j].counter,
- lq_sta->win[j].success_counter,
- lq_sta->win[j].success_ratio);
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl3945_sta_dbgfs_stats_table_read,
- .open = iwl3945_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
-
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void iwl3945_rs_rate_init_stub(void *priv_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
- .module = NULL,
- .name = RS_NAME,
- .tx_status = iwl3945_rs_tx_status,
- .get_rate = iwl3945_rs_get_rate,
- .rate_init = iwl3945_rs_rate_init_stub,
- .alloc = iwl3945_rs_alloc,
- .free = iwl3945_rs_free,
- .alloc_sta = iwl3945_rs_alloc_sta,
- .free_sta = iwl3945_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl3945_add_debugfs,
- .remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
- struct iwl_priv *priv = hw->priv;
- s32 rssi = 0;
- unsigned long flags;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_sta *sta;
- struct iwl3945_sta_priv *psta;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rcu_read_lock();
-
- sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
- priv->stations[sta_id].sta.sta.addr);
- if (!sta) {
- IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
- rcu_read_unlock();
- return;
- }
-
- psta = (void *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- rs_sta->tgg = 0;
- switch (priv->band) {
- case IEEE80211_BAND_2GHZ:
- /* TODO: this always does G, not a regression */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_TGG_PROTECT_MSK) {
- rs_sta->tgg = 1;
- rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
- } else
- rs_sta->expected_tpt = iwl3945_expected_tpt_g;
- break;
-
- case IEEE80211_BAND_5GHZ:
- rs_sta->expected_tpt = iwl3945_expected_tpt_a;
- break;
- case IEEE80211_NUM_BANDS:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- rssi = priv->_3945.last_rx_rssi;
- if (rssi == 0)
- rssi = IWL_MIN_RSSI_VAL;
-
- IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
- rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
- IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
- "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
- iwl3945_rates[rs_sta->start_rate].plcp);
- rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a7438476..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX, \
- IWL_RATE_##r##M_INDEX_TABLE, \
- IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- * rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
- IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
- u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
- if (rate == IWL_RATE_INVALID)
- rate = rate_index;
- return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- * bitmap in SRAM. Bit position corresponds to Event # (id/type).
- * Default values of 0 enable uCode events to be logged.
- * Use for only special debugging. This function is just a placeholder as-is,
- * you'll need to provide the special bits! ...
- * ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
- int i;
- u32 base; /* SRAM address of event log header */
- u32 disable_ptr; /* SRAM address of event-disable bitmap array */
- u32 array_size; /* # of u32 entries in array */
- static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
- 0x00000000, /* 31 - 0 Event id numbers */
- 0x00000000, /* 63 - 32 */
- 0x00000000, /* 95 - 64 */
- 0x00000000, /* 127 - 96 */
- 0x00000000, /* 159 - 128 */
- 0x00000000, /* 191 - 160 */
- 0x00000000, /* 223 - 192 */
- 0x00000000, /* 255 - 224 */
- 0x00000000, /* 287 - 256 */
- 0x00000000, /* 319 - 288 */
- 0x00000000, /* 351 - 320 */
- 0x00000000, /* 383 - 352 */
- 0x00000000, /* 415 - 384 */
- 0x00000000, /* 447 - 416 */
- 0x00000000, /* 479 - 448 */
- 0x00000000, /* 511 - 480 */
- 0x00000000, /* 543 - 512 */
- 0x00000000, /* 575 - 544 */
- 0x00000000, /* 607 - 576 */
- 0x00000000, /* 639 - 608 */
- 0x00000000, /* 671 - 640 */
- 0x00000000, /* 703 - 672 */
- 0x00000000, /* 735 - 704 */
- 0x00000000, /* 767 - 736 */
- 0x00000000, /* 799 - 768 */
- 0x00000000, /* 831 - 800 */
- 0x00000000, /* 863 - 832 */
- 0x00000000, /* 895 - 864 */
- 0x00000000, /* 927 - 896 */
- 0x00000000, /* 959 - 928 */
- 0x00000000, /* 991 - 960 */
- 0x00000000, /* 1023 - 992 */
- 0x00000000, /* 1055 - 1024 */
- 0x00000000, /* 1087 - 1056 */
- 0x00000000, /* 1119 - 1088 */
- 0x00000000, /* 1151 - 1120 */
- 0x00000000, /* 1183 - 1152 */
- 0x00000000, /* 1215 - 1184 */
- 0x00000000, /* 1247 - 1216 */
- 0x00000000, /* 1279 - 1248 */
- 0x00000000, /* 1311 - 1280 */
- 0x00000000, /* 1343 - 1312 */
- 0x00000000, /* 1375 - 1344 */
- 0x00000000, /* 1407 - 1376 */
- 0x00000000, /* 1439 - 1408 */
- 0x00000000, /* 1471 - 1440 */
- 0x00000000, /* 1503 - 1472 */
- };
-
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
- }
-
- disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
- array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
- if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
- IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
- disable_ptr);
- for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
- iwl_legacy_write_targ_mem(priv,
- disable_ptr + (i * sizeof(u32)),
- evt_disable[i]);
-
- } else {
- IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
- IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
- IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
- disable_ptr, array_size);
- }
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
- int idx;
-
- for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
- if (iwl3945_rates[idx].plcp == plcp)
- return idx;
- return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- switch (status & TX_STATUS_MSK) {
- case TX_3945_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
- int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- if (rate == IWL_RATE_12M_INDEX)
- next_rate = IWL_RATE_9M_INDEX;
- else if (rate == IWL_RATE_6M_INDEX)
- next_rate = IWL_RATE_6M_INDEX;
- break;
- case IEEE80211_BAND_2GHZ:
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- if (rate == IWL_RATE_11M_INDEX)
- next_rate = IWL_RATE_5M_INDEX;
- }
- break;
-
- default:
- break;
- }
-
- return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
- int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
-
- BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
- tx_info->skb = NULL;
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
-
- if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
- (txq_id != IWL39_CMD_QUEUE_NUM) &&
- priv->mac80211_registered)
- iwl_legacy_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->status);
- int rate_idx;
- int fail;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- ieee80211_tx_info_clear_status(info);
-
- /* Fill the MRR chain with some info about on-chip retransmissions */
- rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
-
- fail = tx_resp->failure_frame;
-
- info->status.rates[0].idx = rate_idx;
- info->status.rates[0].count = fail + 1; /* add final attempt */
-
- /* tx_status->rts_retry_count = tx_resp->failure_rts; */
- info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
- IEEE80211_TX_STAT_ACK : 0;
-
- IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
- txq_id, iwl3945_get_tx_fail_reason(status), status,
- tx_resp->rate, tx_resp->failure_frame);
-
- IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
- iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
- if (status & TX_ABORT_REQUIRED_MSK)
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- * RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
-
- prev_stats = (__le32 *)&priv->_3945.statistics;
- accum_stats = (u32 *)&priv->_3945.accum_statistics;
- delta = (u32 *)&priv->_3945.delta_statistics;
- max_delta = (u32 *)&priv->_3945.max_delta;
-
- for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- priv->_3945.accum_statistics.general.temperature =
- priv->_3945.statistics.general.temperature;
- priv->_3945.accum_statistics.general.ttl_timestamp =
- priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl3945_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-
- memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- __le32 *flag = (__le32 *)&pkt->u.raw;
-
- if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_3945.accum_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.delta_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.max_delta, 0,
- sizeof(struct iwl3945_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 len = le16_to_cpu(rx_hdr->len);
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We received data from the HW, so stop the watchdog */
- if (unlikely(len + IWL39_RX_FRAME_SIZE >
- PAGE_SIZE << priv->hw_params.rx_page_order)) {
- IWL_DEBUG_DROP(priv, "Corruption detected!\n");
- return;
- }
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- if (!iwl3945_mod_params.sw_crypto)
- iwl_legacy_set_decrypted_flag(priv,
- (struct ieee80211_hdr *)rxb_addr(rxb),
- le32_to_cpu(rx_end->status), stats);
-
- skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
- u8 network_packet;
-
- rx_status.flag = 0;
- rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
- rx_status.band);
-
- rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rx_status.band == IEEE80211_BAND_5GHZ)
- rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
- rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
- /* set the preamble flag if appropriate */
- if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- if ((unlikely(rx_stats->phy_count > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- rx_stats->phy_count);
- return;
- }
-
- if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
- || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
- return;
- }
-
-
-
- /* Convert 3945's rssi indicator to dBm */
- rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
- IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_stats_sig_avg,
- rx_stats_noise_diff);
-
- header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
- network_packet = iwl3945_is_network_packet(priv, header);
-
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
- network_packet ? '*' : ' ',
- le16_to_cpu(rx_hdr->channel),
- rx_status.signal, rx_status.signal,
- rx_status.rate_idx);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
- header);
-
- if (network_packet) {
- priv->_3945.last_beacon_time =
- le32_to_cpu(rx_end->beacon_timestamp);
- priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->_3945.last_rx_rssi = rx_status.signal;
- }
-
- iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
- int count;
- struct iwl_queue *q;
- struct iwl3945_tfd *tfd, *tfd_tmp;
-
- q = &txq->q;
- tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
- if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- NUM_TFD_CHUNKS);
- return -EINVAL;
- }
-
- tfd->tbs[count].addr = cpu_to_le32(addr);
- tfd->tbs[count].len = cpu_to_le32(len);
-
- count++;
-
- tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
- TFD_CTL_PAD_SET(pad));
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- int index = txq->q.read_ptr;
- struct iwl3945_tfd *tfd = &tfd_tmp[index];
- struct pci_dev *dev = priv->pci_dev;
- int i;
- int counter;
-
- /* sanity check */
- counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
- if (counter > NUM_TFD_CHUNKS) {
- IWL_ERR(priv, "Too many chunks: %i\n", counter);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (counter)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_TODEVICE);
-
- /* unmap chunks if any */
-
- for (i = 1; i < counter; i++)
- pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
- le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id)
-{
- u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
- u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
- u16 rate_mask;
- int rate;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- __le32 tx_flags;
- __le16 fc = hdr->frame_control;
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
- rate = iwl3945_rates[rate_index].plcp;
- tx_flags = tx_cmd->tx_flags;
-
- /* We need to figure out how to get the sta->supp_rates while
- * in this running context */
- rate_mask = IWL_RATES_MASK_3945;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- if (tx_id >= IWL39_CMD_QUEUE_NUM)
- rts_retry_limit = 3;
- else
- rts_retry_limit = 7;
-
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- tx_cmd->rate = rate;
- tx_cmd->tx_flags = tx_flags;
-
- /* OFDM */
- tx_cmd->supp_rates[0] =
- ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- /* CCK */
- tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
- IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
- "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
- tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
- tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
- unsigned long flags_spin;
- struct iwl_station_entry *station;
-
- if (sta_id == IWL_INVALID_STATION)
- return IWL_INVALID_STATION;
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- station = &priv->stations[sta_id];
-
- station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
- station->sta.rate_n_flags = cpu_to_le16(tx_rate);
- station->sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
- sta_id, tx_rate);
- return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN,
- CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000);
- }
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
- rxq->rb_stts_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
- FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
- FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
- (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
- FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
- (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
- FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
- /* fake read to flush all prev I/O */
- iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
-
- return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
- /* bypass mode */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
- /* RA 0 is active */
- iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
- /* all 6 fifo are active */
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->_3945.shared_phys);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
- return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
- int rc;
- int txq_id, slots_num;
-
- iwl3945_hw_txq_ctx_free(priv);
-
- /* allocate tx queue structure */
- rc = iwl_legacy_alloc_txq_mem(priv);
- if (rc)
- return rc;
-
- /* Tx CMD queue */
- rc = iwl3945_tx_reset(priv);
- if (rc)
- goto error;
-
- /* Tx queue(s) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- if (rc) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return rc;
-
- error:
- iwl3945_hw_txq_ctx_free(priv);
- return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
- int ret = iwl_legacy_apm_init(priv);
-
- /* Clear APMG (NIC's internal power management) interrupts */
- iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
- /* Reset radio chip */
- iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- unsigned long flags;
- u8 rev_id = priv->pci_dev->revision;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Determine HW type */
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
- if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type\n");
- else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
- } else {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
- }
-
- if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
- IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
- } else
- IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
- if ((eeprom->board_revision & 0xF0) == 0xD0) {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- } else {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- }
-
- if (eeprom->almgor_m_version <= 1) {
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
- IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
- eeprom->almgor_m_version);
- } else {
- IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
- eeprom->almgor_m_version);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
- int rc;
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- rc = iwl_legacy_rx_queue_alloc(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl3945_rx_queue_reset(priv, rxq);
-
- iwl3945_rx_replenish(priv);
-
- iwl3945_rx_init(priv, rxq);
-
-
- /* Look at using this instead:
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- */
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
- rc = iwl3945_txq_ctx_reset(priv);
- if (rc)
- return rc;
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq)
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
- txq_id++)
- if (txq_id == IWL39_CMD_QUEUE_NUM)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* stop SCD */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
- /* reset TFD queues */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
- iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
- FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
- 1000);
- }
-
- iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
- return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
- return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
- return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int temperature;
-
- temperature = iwl3945_hw_get_temperature(priv);
-
- /* driver's okay range is -260 to +25.
- * human readable okay range is 0 to +285 */
- IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
- /* handle insane temp reading */
- if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
- IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
-
- /* if really really hot(?),
- * substitute the 3rd band/group's temp measured at factory */
- if (priv->last_temperature > 100)
- temperature = eeprom->groups[2].temperature;
- else /* else use most recent "sane" value from driver */
- temperature = priv->last_temperature;
- }
-
- return temperature; /* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER 6
-
-/**
- * iwl3945_is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- * (assumes caller will actually do the calibration!). */
-static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp,\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
- /* if we don't need calibration, *don't* update last_temperature */
- if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
- IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
- /* assume that caller will actually do calib ...
- * update the "last temperature" value */
- priv->last_temperature = priv->temperature;
- return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
- {
- {251, 127}, /* 2.4 GHz, highest power */
- {251, 127},
- {251, 127},
- {251, 127},
- {251, 125},
- {251, 110},
- {251, 105},
- {251, 98},
- {187, 125},
- {187, 115},
- {187, 108},
- {187, 99},
- {243, 119},
- {243, 111},
- {243, 105},
- {243, 97},
- {243, 92},
- {211, 106},
- {211, 100},
- {179, 120},
- {179, 113},
- {179, 107},
- {147, 125},
- {147, 119},
- {147, 112},
- {147, 106},
- {147, 101},
- {147, 97},
- {147, 91},
- {115, 107},
- {235, 121},
- {235, 115},
- {235, 109},
- {203, 127},
- {203, 121},
- {203, 115},
- {203, 108},
- {203, 102},
- {203, 96},
- {203, 92},
- {171, 110},
- {171, 104},
- {171, 98},
- {139, 116},
- {227, 125},
- {227, 119},
- {227, 113},
- {227, 107},
- {227, 101},
- {227, 96},
- {195, 113},
- {195, 106},
- {195, 102},
- {195, 95},
- {163, 113},
- {163, 106},
- {163, 102},
- {163, 95},
- {131, 113},
- {131, 106},
- {131, 102},
- {131, 95},
- {99, 113},
- {99, 106},
- {99, 102},
- {99, 95},
- {67, 113},
- {67, 106},
- {67, 102},
- {67, 95},
- {35, 113},
- {35, 106},
- {35, 102},
- {35, 95},
- {3, 113},
- {3, 106},
- {3, 102},
- {3, 95} }, /* 2.4 GHz, lowest power */
- {
- {251, 127}, /* 5.x GHz, highest power */
- {251, 120},
- {251, 114},
- {219, 119},
- {219, 101},
- {187, 113},
- {187, 102},
- {155, 114},
- {155, 103},
- {123, 117},
- {123, 107},
- {123, 99},
- {123, 92},
- {91, 108},
- {59, 125},
- {59, 118},
- {59, 109},
- {59, 102},
- {59, 96},
- {59, 90},
- {27, 104},
- {27, 98},
- {27, 92},
- {115, 118},
- {115, 111},
- {115, 104},
- {83, 126},
- {83, 121},
- {83, 113},
- {83, 105},
- {83, 99},
- {51, 118},
- {51, 111},
- {51, 104},
- {51, 98},
- {19, 116},
- {19, 109},
- {19, 102},
- {19, 98},
- {19, 93},
- {171, 113},
- {171, 107},
- {171, 99},
- {139, 120},
- {139, 113},
- {139, 107},
- {139, 99},
- {107, 120},
- {107, 113},
- {107, 107},
- {107, 99},
- {75, 120},
- {75, 113},
- {75, 107},
- {75, 99},
- {43, 120},
- {43, 113},
- {43, 107},
- {43, 99},
- {11, 120},
- {11, 113},
- {11, 107},
- {11, 99},
- {131, 107},
- {131, 99},
- {99, 120},
- {99, 113},
- {99, 107},
- {99, 99},
- {67, 120},
- {67, 113},
- {67, 107},
- {67, 99},
- {35, 120},
- {35, 113},
- {35, 107},
- {35, 99},
- {3, 120} } /* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
- if (index < 0)
- return 0;
- if (index >= IWL_MAX_GAIN_ENTRIES)
- return IWL_MAX_GAIN_ENTRIES - 1;
- return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
- s32 rate_index, const s8 *clip_pwrs,
- struct iwl_channel_info *ch_info,
- int band_index)
-{
- struct iwl3945_scan_power_info *scan_power_info;
- s8 power;
- u8 power_index;
-
- scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
- /* use this channel group's 6Mbit clipping/saturation pwr,
- * but cap at regulatory scan power restriction (set during init
- * based on eeprom channel data) for this channel. */
- power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
- power = min(power, priv->tx_power_user_lmt);
- scan_power_info->requested_power = power;
-
- /* find difference between new scan *power* and current "normal"
- * Tx *power* for 6Mb. Use this difference (x2) to adjust the
- * current "normal" temperature-compensated Tx power *index* for
- * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
- * *index*. */
- power_index = ch_info->power_info[rate_index].power_table_index
- - (power - ch_info->power_info
- [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
- /* store reference index that we use when adjusting *all* scan
- * powers. So we can accommodate user (all channel) or spectrum
- * management (single channel) power changes "between" temperature
- * feedback compensation procedures.
- * don't force fit this reference index into gain table; it may be a
- * negative number. This will help avoid errors when we're at
- * the lower bounds (highest gains, for warmest temperatures)
- * of the table. */
-
- /* don't exceed table bounds for "real" setting */
- power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
- scan_power_info->power_table_index = power_index;
- scan_power_info->tpc.tx_gain =
- power_gain_table[band_index][power_index].tx_gain;
- scan_power_info->tpc.dsp_atten =
- power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
- int rate_idx, i;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_txpowertable_cmd txpower = {
- .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
- };
- u16 chan;
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
- txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
- if (!ch_info) {
- IWL_ERR(priv,
- "Failed to get channel info for channel %d [%d]\n",
- chan, priv->band);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
- "non-Tx channel.\n");
- return 0;
- }
-
- /* fill cmd with power settings for all rates for current channel */
- /* Fill OFDM rate */
- for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
- rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
- /* Fill CCK rates */
- for (rate_idx = IWL_FIRST_CCK_RATE;
- rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
-
- return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
- sizeof(struct iwl3945_txpowertable_cmd),
- &txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update. Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- * properly fill out the scan powers, and actual h/w gain settings,
- * and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
- struct iwl_channel_info *ch_info)
-{
- struct iwl3945_channel_power_info *power_info;
- int power_changed = 0;
- int i;
- const s8 *clip_pwrs;
- int power;
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* Get this channel's rate-to-current-power settings table */
- power_info = ch_info->power_info;
-
- /* update OFDM Txpower settings */
- for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
- i++, ++power_info) {
- int delta_idx;
-
- /* limit new power to be no more than h/w capability */
- power = min(ch_info->curr_txpow, clip_pwrs[i]);
- if (power == power_info->requested_power)
- continue;
-
- /* find difference between old and new requested powers,
- * update base (non-temp-compensated) power index */
- delta_idx = (power - power_info->requested_power) * 2;
- power_info->base_power_index -= delta_idx;
-
- /* save new requested power value */
- power_info->requested_power = power;
-
- power_changed = 1;
- }
-
- /* update CCK Txpower settings, based on OFDM 12M setting ...
- * ... all CCK power settings for a given channel are the *same*. */
- if (power_changed) {
- power =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
- /* do all CCK rates' iwl3945_channel_power_info structures */
- for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
- power_info->requested_power = power;
- power_info->base_power_index =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
- ++power_info;
- }
- }
-
- return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- * based strictly on regulatory (eeprom and spectrum mgt) limitations
- * (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
- s8 max_power;
-
-#if 0
- /* if we're using TGd limits, use lower of TGd or EEPROM */
- if (ch_info->tgd_data.max_power != 0)
- max_power = min(ch_info->tgd_data.max_power,
- ch_info->eeprom.max_power_avg);
-
- /* else just use EEPROM limits */
- else
-#endif
- max_power = ch_info->eeprom.max_power_avg;
-
- return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- * and the factory calibration temperatures, and bases the new settings
- * on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
- u8 a_band;
- u8 rate_index;
- u8 scan_tbl_index;
- u8 i;
- int ref_temp;
- int temperature = priv->temperature;
-
- if (priv->disable_tx_power_cal ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- /* do not perform tx power calibration */
- return 0;
- }
- /* set up new Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* Get this chnlgrp's factory calibration temperature */
- ref_temp = (s16)eeprom->groups[ch_info->group_index].
- temperature;
-
- /* get power index adjustment based on current and factory
- * temps */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- ref_temp);
-
- /* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
- rate_index++) {
- int power_idx =
- ch_info->power_info[rate_index].base_power_index;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within table range */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
- ch_info->power_info[rate_index].
- power_table_index = (u8) power_idx;
- ch_info->power_info[rate_index].tpc =
- power_gain_table[a_band][power_idx];
- }
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs,
- ch_info, a_band);
- }
- }
-
- /* send Txpower command for current channel to ucode */
- return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
- struct iwl_channel_info *ch_info;
- s8 max_power;
- u8 a_band;
- u8 i;
-
- if (priv->tx_power_user_lmt == power) {
- IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
- "limit: %ddBm.\n", power);
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
- priv->tx_power_user_lmt = power;
-
- /* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* find minimum power of all user and regulatory constraints
- * (does not consider h/w clipping limitations) */
- max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
- max_power = min(power, max_power);
- if (max_power != ch_info->curr_txpow) {
- ch_info->curr_txpow = max_power;
-
- /* this considers the h/w clipping limitations */
- iwl3945_hw_reg_set_new_power(priv, ch_info);
- }
- }
-
- /* update txpower settings for all channels,
- * send to NIC if associated. */
- iwl3945_is_temp_calib_needed(priv);
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int rc = 0;
- struct iwl_rx_packet *pkt;
- struct iwl3945_rxon_assoc_cmd rxon_assoc;
- struct iwl_host_cmd cmd = {
- .id = REPLY_RXON_ASSOC,
- .len = sizeof(rxon_assoc),
- .flags = CMD_WANT_SKB,
- .data = &rxon_assoc,
- };
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
- rc = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data. This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
- struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
- int rc = 0;
- bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- if (!iwl_legacy_is_alive(priv))
- return -1;
-
- /* always get timestamp with Rx frame */
- staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
- /* select antenna */
- staging_rxon->flags &=
- ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
- staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
- rc = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (rc) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv,
- &priv->contexts[IWL_RXON_CTX_BSS])) {
- rc = iwl_legacy_send_rxon_assoc(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- if (rc) {
- IWL_ERR(priv, "Error setting RXON_ASSOC "
- "configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- active_rxon->reserved4 = 0;
- active_rxon->reserved5 = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- &priv->contexts[IWL_RXON_CTX_BSS].active);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (rc) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
- "configuration (%d).\n", rc);
- return rc;
- }
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(staging_rxon->channel),
- staging_rxon->bssid_addr);
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- staging_rxon->reserved4 = 0;
- staging_rxon->reserved5 = 0;
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
- /* Apply the new configuration */
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- staging_rxon);
- if (rc) {
- IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
- if (!new_assoc) {
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (rc) {
- IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
- return rc;
- }
-
- /* Init the hardware's rate fallback order based on the band */
- rc = iwl3945_init_hw_rate_table(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic - called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- * -- correct coeffs for temp (can reset temp timer)
- * -- save this temp as "last",
- * -- send new set of gain settings to NIC
- * NOTE: This should continue working, even when we're not associated,
- * so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
- /* This will kick in the "brute force"
- * iwl3945_hw_reg_comp_txpower_temp() below */
- if (!iwl3945_is_temp_calib_needed(priv))
- goto reschedule;
-
- /* Set up a new set of temp-adjusted TxPowers, send to NIC.
- * This is based *only* on current temperature,
- * ignoring any previous power measurements */
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
- queue_delayed_work(priv->workqueue,
- &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- _3945.thermal_periodic.work);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl3945_reg_txpower_periodic(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- * for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- * These channel groups are based on factory-tested channels;
- * on A-band, EEPROM's "group frequency" entries represent the top
- * channel in each group 1-4. Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
- const struct iwl_channel_info *ch_info)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
- u8 group;
- u16 group_index = 0; /* based on factory calib frequencies */
- u8 grp_channel;
-
- /* Find the group index for the channel ... don't use index 1(?) */
- if (iwl_legacy_is_channel_a_band(ch_info)) {
- for (group = 1; group < 5; group++) {
- grp_channel = ch_grp[group].group_channel;
- if (ch_info->channel <= grp_channel) {
- group_index = group;
- break;
- }
- }
- /* group 4 has a few channels *above* its factory cal freq */
- if (group == 5)
- group_index = 4;
- } else
- group_index = 0; /* 2.4 GHz, group 0 */
-
- IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
- group_index);
- return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- * into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
- s8 requested_power,
- s32 setting_index, s32 *new_index)
-{
- const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- s32 index0, index1;
- s32 power = 2 * requested_power;
- s32 i;
- const struct iwl3945_eeprom_txpower_sample *samples;
- s32 gains0, gains1;
- s32 res;
- s32 denominator;
-
- chnl_grp = &eeprom->groups[setting_index];
- samples = chnl_grp->samples;
- for (i = 0; i < 5; i++) {
- if (power == samples[i].power) {
- *new_index = samples[i].gain_index;
- return 0;
- }
- }
-
- if (power > samples[1].power) {
- index0 = 0;
- index1 = 1;
- } else if (power > samples[2].power) {
- index0 = 1;
- index1 = 2;
- } else if (power > samples[3].power) {
- index0 = 2;
- index1 = 3;
- } else {
- index0 = 3;
- index1 = 4;
- }
-
- denominator = (s32) samples[index1].power - (s32) samples[index0].power;
- if (denominator == 0)
- return -EINVAL;
- gains0 = (s32) samples[index0].gain_index * (1 << 19);
- gains1 = (s32) samples[index1].gain_index * (1 << 19);
- res = gains0 + (gains1 - gains0) *
- ((s32) power - (s32) samples[index0].power) / denominator +
- (1 << 18);
- *new_index = res >> 19;
- return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
- u32 i;
- s32 rate_index;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- const struct iwl3945_eeprom_txpower_group *group;
-
- IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
- for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
- s8 *clip_pwrs; /* table of power levels for each rate */
- s8 satur_pwr; /* saturation power for each chnl group */
- group = &eeprom->groups[i];
-
- /* sanity check on factory saturation power value */
- if (group->saturation_power < 40) {
- IWL_WARN(priv, "Error: saturation power is %d, "
- "less than minimum expected 40\n",
- group->saturation_power);
- return;
- }
-
- /*
- * Derive requested power levels for each rate, based on
- * hardware capabilities (saturation power for band).
- * Basic value is 3dB down from saturation, with further
- * power reductions for highest 3 data rates. These
- * backoffs provide headroom for high rate modulation
- * power peaks, without too much distortion (clipping).
- */
- /* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
- /* divide factory saturation power by 2 to find -3dB level */
- satur_pwr = (s8) (group->saturation_power >> 1);
-
- /* fill in channel group's nominal powers for each rate */
- for (rate_index = 0;
- rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
- switch (rate_index) {
- case IWL_RATE_36M_INDEX_TABLE:
- if (i == 0) /* B/G */
- *clip_pwrs = satur_pwr;
- else /* A */
- *clip_pwrs = satur_pwr - 5;
- break;
- case IWL_RATE_48M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 7;
- else
- *clip_pwrs = satur_pwr - 10;
- break;
- case IWL_RATE_54M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 9;
- else
- *clip_pwrs = satur_pwr - 12;
- break;
- default:
- *clip_pwrs = satur_pwr;
- break;
- }
- }
- }
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_channel_power_info *pwr_info;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- u8 rate_index;
- u8 scan_tbl_index;
- const s8 *clip_pwrs; /* array of power levels for each rate */
- u8 gain, dsp_atten;
- s8 power;
- u8 pwr_index, base_pwr_index, a_band;
- u8 i;
- int temperature;
-
- /* save temperature reference,
- * so we can determine next time to calibrate */
- temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- priv->last_temperature = temperature;
-
- iwl3945_hw_reg_init_channel_groups(priv);
-
- /* initialize Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
- i++, ch_info++) {
- a_band = iwl_legacy_is_channel_a_band(ch_info);
- if (!iwl_legacy_is_channel_valid(ch_info))
- continue;
-
- /* find this channel's channel group (*not* "band") index */
- ch_info->group_index =
- iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
- /* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* calculate power index *adjustment* value according to
- * diff between current temperature and factory temperature */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- eeprom->groups[ch_info->group_index].
- temperature);
-
- IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
- ch_info->channel, delta_index, temperature +
- IWL_TEMP_CONVERT);
-
- /* set tx power value for all OFDM rates */
- for (rate_index = 0; rate_index < IWL_OFDM_RATES;
- rate_index++) {
- s32 uninitialized_var(power_idx);
- int rc;
-
- /* use channel group's clip-power table,
- * but don't exceed channel's max power */
- s8 pwr = min(ch_info->max_power_avg,
- clip_pwrs[rate_index]);
-
- pwr_info = &ch_info->power_info[rate_index];
-
- /* get base (i.e. at factory-measured temperature)
- * power table index for this rate's power */
- rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
- ch_info->group_index,
- &power_idx);
- if (rc) {
- IWL_ERR(priv, "Invalid power index\n");
- return rc;
- }
- pwr_info->base_power_index = (u8) power_idx;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within range of gain table */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
- /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
- pwr_info->requested_power = pwr;
- pwr_info->power_table_index = (u8) power_idx;
- pwr_info->tpc.tx_gain =
- power_gain_table[a_band][power_idx].tx_gain;
- pwr_info->tpc.dsp_atten =
- power_gain_table[a_band][power_idx].dsp_atten;
- }
-
- /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
- pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
- power = pwr_info->requested_power +
- IWL_CCK_FROM_OFDM_POWER_DIFF;
- pwr_index = pwr_info->power_table_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
- base_pwr_index = pwr_info->base_power_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
- /* stay within table range */
- pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
- gain = power_gain_table[a_band][pwr_index].tx_gain;
- dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
- /* fill each CCK rate's iwl3945_channel_power_info structure
- * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
- * NOTE: CCK rates start at end of OFDM rates! */
- for (rate_index = 0;
- rate_index < IWL_CCK_RATES; rate_index++) {
- pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
- pwr_info->requested_power = power;
- pwr_info->power_table_index = pwr_index;
- pwr_info->base_power_index = base_pwr_index;
- pwr_info->tpc.tx_gain = gain;
- pwr_info->tpc.dsp_atten = dsp_atten;
- }
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs, ch_info, a_band);
- }
- }
-
- return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
- int rc;
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
- rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
- FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
- if (rc < 0)
- IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
- return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
- shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
- iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
- iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
- FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
- /* fake read to flush all prev. writes */
- iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
- return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return sizeof(struct iwl3945_rxon_cmd);
- case POWER_TABLE_CMD:
- return sizeof(struct iwl3945_powertable_cmd);
- default:
- return len;
- }
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cpu_to_le16(0);
- addsta->rate_n_flags = cmd->rate_n_flags;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
- return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
- const u8 *addr, u8 *sta_id_r)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int ret;
- u8 sta_id;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- int ret;
-
- if (add) {
- ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- if (ret)
- return ret;
-
- iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
- iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
- return 0;
- }
-
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
- int rc, i, index, prev_index;
- struct iwl3945_rate_scaling_cmd rate_cmd = {
- .reserved = {0, 0, 0},
- };
- struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
- for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
- index = iwl3945_rates[i].table_rs_index;
-
- table[index].rate_n_flags =
- iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
- table[index].try_cnt = priv->retry_rate;
- prev_index = iwl3945_get_prev_ieee_rate(i);
- table[index].next_rate_index =
- iwl3945_rates[prev_index].table_rs_index;
- }
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
- /* If one of the following CCK rates is used,
- * have it fall back to the 6M OFDM rate */
- for (i = IWL_RATE_1M_INDEX_TABLE;
- i <= IWL_RATE_11M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
- /* Don't fall back to CCK rates */
- table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
- IWL_RATE_9M_INDEX_TABLE;
-
- /* Don't drop out of OFDM rates */
- table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
- break;
-
- case IEEE80211_BAND_2GHZ:
- IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
- /* If an OFDM rate is used, have it fall back to the
- * 1M CCK rates */
-
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
- index = IWL_FIRST_CCK_RATE;
- for (i = IWL_RATE_6M_INDEX_TABLE;
- i <= IWL_RATE_54M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[index].table_rs_index;
-
- index = IWL_RATE_11M_INDEX_TABLE;
- /* CCK shouldn't fall back to OFDM... */
- table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
- }
- break;
-
- default:
- WARN_ON(1);
- break;
- }
-
- /* Update the rate scaling for control frame Tx */
- rate_cmd.table_id = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
- if (rc)
- return rc;
-
- /* Update the rate scaling for data frame Tx */
- rate_cmd.table_id = 1;
- return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
- memset((void *)&priv->hw_params, 0,
- sizeof(struct iwl_hw_params));
-
- priv->_3945.shared_virt =
- dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->_3945.shared_phys, GFP_KERNEL);
- if (!priv->_3945.shared_virt) {
- IWL_ERR(priv, "failed to allocate pci memory\n");
- return -ENOMEM;
- }
-
- /* Assign number of Usable TX queues */
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
- priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- priv->hw_params.max_stations = IWL3945_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
- priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
- priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate)
-{
- struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
- unsigned int frame_size;
-
- tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- tx_beacon_cmd->tx.sta_id =
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- frame_size = iwl3945_fill_beacon_frame(priv,
- tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
- BUG_ON(frame_size > MAX_MPDU_SIZE);
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
- tx_beacon_cmd->tx.rate = rate;
- tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK);
-
- /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
- tx_beacon_cmd->tx.supp_rates[0] =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- tx_beacon_cmd->tx.supp_rates[1] =
- (IWL_CCK_BASIC_RATES_MASK & 0xF);
-
- return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
- priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
- INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
- iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism. Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
- return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- return;
-}
-
- /**
- * iwl3945_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int rc;
- int i;
- u32 done;
- u32 reg_offset;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL39_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
- * NOTE: iwl3945_initialize_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache. */
- pinst = priv->ucode_init.p_addr;
- pdata = priv->ucode_init_data.p_addr;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset,
- le32_to_cpu(*image));
-
- rc = iwl3945_verify_bsm(priv);
- if (rc)
- return rc;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
- IWL39_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START_EN);
-
- return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
- .rxon_assoc = iwl3945_send_rxon_assoc,
- .commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
- .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl3945_hw_txq_free_tfd,
- .txq_init = iwl3945_hw_tx_queue_init,
- .load_ucode = iwl3945_load_bsm,
- .dump_nic_error_log = iwl3945_dump_nic_error_log,
- .apm_ops = {
- .init = iwl3945_apm_init,
- .config = iwl3945_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
- .release_semaphore = iwl3945_eeprom_release_semaphore,
- },
- .send_tx_power = iwl3945_send_tx_power,
- .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-
- .debugfs_ops = {
- .rx_stats_read = iwl3945_ucode_rx_stats_read,
- .tx_stats_read = iwl3945_ucode_tx_stats_read,
- .general_stats_read = iwl3945_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
- .post_associate = iwl3945_post_associate,
- .config_ap = iwl3945_config_ap,
- .manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
- .get_hcmd_size = iwl3945_get_hcmd_size,
- .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .request_scan = iwl3945_request_scan,
- .post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
- .lib = &iwl3945_lib,
- .hcmd = &iwl3945_hcmd,
- .utils = &iwl3945_hcmd_utils,
- .led = &iwl3945_led_ops,
- .legacy = &iwl3945_legacy_ops,
- .ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
- .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
- .num_of_queues = IWL39_NUM_QUEUES,
- .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
- .set_l0s = false,
- .use_bsm = true,
- .led_compensation = 64,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
- .name = "3945BG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
- .name = "3945ABG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
- {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
- {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59b71de..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE "iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
- u64 data;
- s32 success_counter;
- s32 success_ratio;
- s32 counter;
- s32 average_tpt;
- unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
- spinlock_t lock;
- struct iwl_priv *priv;
- s32 *expected_tpt;
- unsigned long last_partial_flush;
- unsigned long last_flush;
- u32 flush_time;
- u32 last_tx_packets;
- u32 tx_packets;
- u8 tgg;
- u8 flush_pending;
- u8 start_rate;
- struct timer_list rate_scale_flush;
- struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
- /* used to be in sta_info */
- int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl3945_sta_priv {
- struct iwl_station_priv_common common;
- struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
- IWL_ANTENNA_DIVERSITY,
- IWL_ANTENNA_MAIN,
- IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl3945_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-#define STA_PS_STATUS_WAKE 0
-#define STA_PS_STATUS_SLEEP 1
-
-struct iwl3945_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
- x->u.rx_frame.stats.payload + \
- x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
- IWL_RX_HDR(x)->payload + \
- le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl3945-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr, int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl3945-base.c
- *
- * NOTE: The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_ <-- Called from work queue context
- * iwl3945_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE: This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl3945-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
- const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665766e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/******************************************************************************
-*
-* GPL LICENSE SUMMARY
-*
-* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
-* USA
-*
-* The full GNU General Public License is included in this distribution
-* in the file called LICENSE.GPL.
-*
-* Contact Information:
-* Intel Linux Wireless <ilw@linux.intel.com>
-* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*****************************************************************************/
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static const char *fmt_value = " %-30s %10u\n";
-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
-static const char *fmt_header =
- "%-32s current cumulative delta max\n";
-
-static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
- u32 flag;
-
- flag = le32_to_cpu(priv->_4965.statistics.flag);
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
- if (flag & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (flag & UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
-
- return p;
-}
-
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_4965.statistics.rx.ofdm;
- cck = &priv->_4965.statistics.rx.cck;
- general = &priv->_4965.statistics.rx.general;
- ht = &priv->_4965.statistics.rx.ofdm_ht;
- accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
- accum_cck = &priv->_4965.accum_statistics.rx.cck;
- accum_general = &priv->_4965.accum_statistics.rx.general;
- accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
- delta_cck = &priv->_4965.delta_statistics.rx.cck;
- delta_general = &priv->_4965.delta_statistics.rx.general;
- delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->_4965.max_delta.rx.ofdm;
- max_cck = &priv->_4965.max_delta.rx.cck;
- max_general = &priv->_4965.max_delta.rx.general;
- max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
- max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err, delta_cck->overrun_err,
- max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good, max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout, delta_cck->sfd_timeout,
- max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout, delta_cck->fina_timeout,
- max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts, delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err, delta_cck->mh_format_err,
- max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts, delta_general->bogus_cts,
- max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack, delta_general->bogus_ack,
- max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta_tx = &priv->_4965.delta_statistics.tx;
- max_tx = &priv->_4965.max_delta.tx;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general_common *general, *accum_general;
- struct statistics_general_common *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_4965.statistics.general.common;
- dbg = &priv->_4965.statistics.general.common.dbg;
- div = &priv->_4965.statistics.general.common.div;
- accum_general = &priv->_4965.accum_statistics.general.common;
- accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
- accum_div = &priv->_4965.accum_statistics.general.common.div;
- delta_general = &priv->_4965.delta_statistics.general.common;
- max_general = &priv->_4965.max_delta.general.common;
- delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
- max_dbg = &priv->_4965.max_delta.general.common.dbg;
- delta_div = &priv->_4965.delta_statistics.general.common.div;
- max_div = &priv->_4965.max_delta.general.common.div;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "ttl_timestamp:",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "wait_for_silence_timeout_count:",
- le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
- accum_dbg->wait_for_silence_timeout_cnt,
- delta_dbg->wait_for_silence_timeout_cnt,
- max_dbg->wait_for_silence_timeout_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e35361a9e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos);
-#else
-static ssize_t
-iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab1ff7d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-4965.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_IO(priv,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-int iwl4965_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwl_legacy_eeprom_query16(priv,
- EEPROM_4965_CALIB_VERSION_OFFSET);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa2886d9c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE 1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE 7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
-
-#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
- IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
- IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent). The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
- * must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
- (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
- ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- * 1) EEPROM
- * 2) "initialize" alive notification
- * 3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1) Regulatory information (max txpower and channel usage flags) is provided
- * separately for each channel that can possibly supported by 4965.
- * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- * (legacy) channels.
- *
- * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- * for locations in EEPROM.
- *
- * 2) Factory txpower calibration information is provided separately for
- * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
- * but 5 GHz has several sub-bands.
- *
- * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- * See struct iwl4965_eeprom_calib_info (and the tree of structures
- * contained within it) for format, and struct iwl4965_eeprom for
- * locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1) Temperature calculation parameters.
- *
- * 2) Power supply voltage measurement.
- *
- * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1) Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- * Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * 2 transmitters will be used simultaneously; driver must reduce the
- * regulatory limit by 3 dB (half-power) for each transmitter, so the
- * combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
- * reduce target txpower if necessary.
- *
- * Backoff values below are in 1/2 dB units (equivalent to steps in
- * txpower gain tables):
- *
- * OFDM 6 - 36 MBit: 10 steps (5 dB)
- * OFDM 48 MBit: 15 steps (7.5 dB)
- * OFDM 54 MBit: 17 steps (8.5 dB)
- * OFDM 60 MBit: 20 steps (10 dB)
- * CCK all rates: 10 steps (5 dB)
- *
- * Backoff values apply to saturation txpower on a per-transmitter basis;
- * when using MIMO (2 transmitters), each transmitter uses the same
- * saturation level provided in EEPROM, and the same backoff values;
- * no reduction (such as with regulatory txpower limits) is required.
- *
- * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- * factory measurement for ht40 channels.
- *
- * The result of this step is the final target txpower. The rest of
- * the steps figure out the proper settings for the device to achieve
- * that target txpower.
- *
- *
- * 3) Determine (EEPROM) calibration sub band for the target channel, by
- * comparing against first and last channels in each sub band
- * (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
- * referencing the 2 factory-measured (sample) channels within the sub band.
- *
- * Interpolation is based on difference between target channel's frequency
- * and the sample channels' frequencies. Since channel numbers are based
- * on frequency (5 MHz between each channel number), this is equivalent
- * to interpolating based on channel number differences.
- *
- * Note that the sample channels may or may not be the channels at the
- * edges of the sub band. The target channel may be "outside" of the
- * span of the sampled channels.
- *
- * Driver may choose the pair (for 2 Tx chains) of measurements (see
- * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- * txpower comes closest to the desired txpower. Usually, though,
- * the middle set of measurements is closest to the regulatory limits,
- * and is therefore a good choice for all txpower calculations (this
- * assumes that high accuracy is needed for maximizing legal txpower,
- * while lower txpower configurations do not need as much accuracy).
- *
- * Driver should interpolate both members of the chosen measurement pair,
- * i.e. for both Tx chains (radio transmitters), unless the driver knows
- * that only one of the chains will be used (e.g. only one tx antenna
- * connected, but this should be unusual). The rate scaling algorithm
- * switches antennas to find best performance, so both Tx chains will
- * be used (although only one at a time) even for non-MIMO transmissions.
- *
- * Driver should interpolate factory values for temperature, gain table
- * index, and actual power. The power amplifier detector values are
- * not used by the driver.
- *
- * Sanity check: If the target channel happens to be one of the sample
- * channels, the results should agree with the sample channel's
- * measurements!
- *
- *
- * 5) Find difference between desired txpower and (interpolated)
- * factory-measured txpower. Using (interpolated) factory gain table index
- * (shown elsewhere) as a starting point, adjust this index lower to
- * increase txpower, or higher to decrease txpower, until the target
- * txpower is reached. Each step in the gain table is 1/2 dB.
- *
- * For example, if factory measured txpower is 16 dBm, and target txpower
- * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- * by 3 dB.
- *
- *
- * 6) Find difference between current device temperature and (interpolated)
- * factory-measured temperature for sub-band. Factory values are in
- * degrees Celsius. To calculate current temperature, see comments for
- * "4965 temperature calculation".
- *
- * If current temperature is higher than factory temperature, driver must
- * increase gain (lower gain table index), and vice verse.
- *
- * Temperature affects gain differently for different channels:
- *
- * 2.4 GHz all channels: 3.5 degrees per half-dB step
- * 5 GHz channels 34-43: 4.5 degrees per half-dB step
- * 5 GHz channels >= 44: 4.0 degrees per half-dB step
- *
- * NOTE: Temperature can increase rapidly when transmitting, especially
- * with heavy traffic at high txpowers. Driver should update
- * temperature calculations often under these conditions to
- * maintain strong txpower in the face of rising temperature.
- *
- *
- * 7) Find difference between current power supply voltage indicator
- * (from "initialize alive") and factory-measured power supply voltage
- * indicator (EEPROM).
- *
- * If the current voltage is higher (indicator is lower) than factory
- * voltage, gain should be reduced (gain table index increased) by:
- *
- * (eeprom - current) / 7
- *
- * If the current voltage is lower (indicator is higher) than factory
- * voltage, gain should be increased (gain table index decreased) by:
- *
- * 2 * (current - eeprom) / 7
- *
- * If number of index steps in either direction turns out to be > 2,
- * something is wrong ... just use 0.
- *
- * NOTE: Voltage compensation is independent of band/channel.
- *
- * NOTE: "Initialize" uCode measures current voltage, which is assumed
- * to be constant after this initial measurement. Voltage
- * compensation for txpower (number of steps in gain table)
- * may be calculated once and used until the next uCode bootload.
- *
- *
- * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * adjust txpower for each transmitter chain, so txpower is balanced
- * between the two chains. There are 5 pairs of tx_atten[group][chain]
- * values in "initialize alive", one pair for each of 5 channel ranges:
- *
- * Group 0: 5 GHz channel 34-43
- * Group 1: 5 GHz channel 44-70
- * Group 2: 5 GHz channel 71-124
- * Group 3: 5 GHz channel 125-200
- * Group 4: 2.4 GHz all channels
- *
- * Add the tx_atten[group][chain] value to the index for the target chain.
- * The values are signed, but are in pairs of 0 and a non-negative number,
- * so as to reduce gain (if necessary) of the "hotter" channel. This
- * avoids any need to double-check for regulatory compliance after
- * this step.
- *
- *
- * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
- * value to the index:
- *
- * Hardware rev B: 9 steps (4.5 dB)
- * Hardware rev C: 5 steps (2.5 dB)
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- *
- * NOTE: This compensation is in addition to any saturation backoff that
- * might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- * Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- * location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device. That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB. Note that these
- * are *relative* steps, not indications of absolute output power. Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
- * linear value that multiplies the output of the digital signal processor,
- * before being sent to the analog radio.
- * 2) Radio gain. This sets the analog gain of the radio Tx path.
- * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower. This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration). A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index Dsp gain Radio gain
- * 0 110 0x3f (highest gain)
- * 1 104 0x3f
- * 2 98 0x3f
- * 3 110 0x3e
- * 4 104 0x3e
- * 5 98 0x3e
- * 6 110 0x3d
- * 7 104 0x3d
- * 8 98 0x3d
- * 9 110 0x3c
- * 10 104 0x3c
- * 11 98 0x3c
- * 12 110 0x3b
- * 13 104 0x3b
- * 14 98 0x3b
- * 15 110 0x3a
- * 16 104 0x3a
- * 17 98 0x3a
- * 18 110 0x39
- * 19 104 0x39
- * 20 98 0x39
- * 21 110 0x38
- * 22 104 0x38
- * 23 98 0x38
- * 24 110 0x37
- * 25 104 0x37
- * 26 98 0x37
- * 27 110 0x36
- * 28 104 0x36
- * 29 98 0x36
- * 30 110 0x35
- * 31 104 0x35
- * 32 98 0x35
- * 33 110 0x34
- * 34 104 0x34
- * 35 98 0x34
- * 36 110 0x33
- * 37 104 0x33
- * 38 98 0x33
- * 39 110 0x32
- * 40 104 0x32
- * 41 98 0x32
- * 42 110 0x31
- * 43 104 0x31
- * 44 98 0x31
- * 45 110 0x30
- * 46 104 0x30
- * 47 98 0x30
- * 48 110 0x6
- * 49 104 0x6
- * 50 98 0x6
- * 51 110 0x5
- * 52 104 0x5
- * 53 98 0x5
- * 54 110 0x4
- * 55 104 0x4
- * 56 98 0x4
- * 57 110 0x3
- * 58 104 0x3
- * 59 98 0x3
- * 60 110 0x2
- * 61 104 0x2
- * 62 98 0x2
- * 63 110 0x1
- * 64 104 0x1
- * 65 98 0x1
- * 66 110 0x0
- * 67 104 0x0
- * 68 98 0x0
- * 69 97 0
- * 70 96 0
- * 71 95 0
- * 72 94 0
- * 73 93 0
- * 74 92 0
- * 75 91 0
- * 76 90 0
- * 77 89 0
- * 78 88 0
- * 79 87 0
- * 80 86 0
- * 81 85 0
- * 82 84 0
- * 83 83 0
- * 84 82 0
- * 85 81 0
- * 86 80 0
- * 87 79 0
- * 88 78 0
- * 89 77 0
- * 90 76 0
- * 91 75 0
- * 92 74 0
- * 93 73 0
- * 94 72 0
- * 95 71 0
- * 96 70 0
- * 97 69 0
- * 98 68 0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index Dsp gain Radio gain
- * -9 123 0x3F (highest gain)
- * -8 117 0x3F
- * -7 110 0x3F
- * -6 104 0x3F
- * -5 98 0x3F
- * -4 110 0x3E
- * -3 104 0x3E
- * -2 98 0x3E
- * -1 110 0x3D
- * 0 104 0x3D
- * 1 98 0x3D
- * 2 110 0x3C
- * 3 104 0x3C
- * 4 98 0x3C
- * 5 110 0x3B
- * 6 104 0x3B
- * 7 98 0x3B
- * 8 110 0x3A
- * 9 104 0x3A
- * 10 98 0x3A
- * 11 110 0x39
- * 12 104 0x39
- * 13 98 0x39
- * 14 110 0x38
- * 15 104 0x38
- * 16 98 0x38
- * 17 110 0x37
- * 18 104 0x37
- * 19 98 0x37
- * 20 110 0x36
- * 21 104 0x36
- * 22 98 0x36
- * 23 110 0x35
- * 24 104 0x35
- * 25 98 0x35
- * 26 110 0x34
- * 27 104 0x34
- * 28 98 0x34
- * 29 110 0x33
- * 30 104 0x33
- * 31 98 0x33
- * 32 110 0x32
- * 33 104 0x32
- * 34 98 0x32
- * 35 110 0x31
- * 36 104 0x31
- * 37 98 0x31
- * 38 110 0x30
- * 39 104 0x30
- * 40 98 0x30
- * 41 110 0x25
- * 42 104 0x25
- * 43 98 0x25
- * 44 110 0x24
- * 45 104 0x24
- * 46 98 0x24
- * 47 110 0x23
- * 48 104 0x23
- * 49 98 0x23
- * 50 110 0x22
- * 51 104 0x18
- * 52 98 0x18
- * 53 110 0x17
- * 54 104 0x17
- * 55 98 0x17
- * 56 110 0x16
- * 57 104 0x16
- * 58 98 0x16
- * 59 110 0x15
- * 60 104 0x15
- * 61 98 0x15
- * 62 110 0x14
- * 63 104 0x14
- * 64 98 0x14
- * 65 110 0x13
- * 66 104 0x13
- * 67 98 0x13
- * 68 110 0x12
- * 69 104 0x08
- * 70 98 0x08
- * 71 110 0x07
- * 72 104 0x07
- * 73 98 0x07
- * 74 110 0x06
- * 75 104 0x06
- * 76 98 0x06
- * 77 110 0x05
- * 78 104 0x05
- * 79 98 0x05
- * 80 110 0x04
- * 81 104 0x04
- * 82 98 0x04
- * 83 110 0x03
- * 84 104 0x03
- * 85 98 0x03
- * 86 110 0x02
- * 87 104 0x02
- * 88 98 0x02
- * 89 110 0x01
- * 90 104 0x01
- * 91 98 0x01
- * 92 110 0x00
- * 93 104 0x00
- * 94 98 0x00
- * 95 93 0x00
- * 96 88 0x00
- * 97 83 0x00
- * 98 78 0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated. These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
-#define IWL_TX_POWER_REGULATORY_MIN (0)
-#define IWL_TX_POWER_REGULATORY_MAX (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion. This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
-#define IWL_TX_POWER_SATURATION_MIN (20)
-#define IWL_TX_POWER_SATURATION_MAX (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain. Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below. If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
- CALIB_CH_GROUP_1 = 0,
- CALIB_CH_GROUP_2 = 1,
- CALIB_CH_GROUP_3 = 2,
- CALIB_CH_GROUP_4 = 3,
- CALIB_CH_GROUP_5 = 4,
- CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues). All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device. When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS 7
-#define IWL49_CMD_FIFO_NUM 4
-#define IWL49_NUM_QUEUES 16
-#define IWL49_NUM_AMPDU_QUEUES 8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
- u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-
-#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
-
-/* RSSI to dBm */
-#define IWL4965_RSSI_OFFSET 44
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL4965_DEFAULT_TX_RETRY 15
-
-/* EEPROM */
-#define IWL4965_FIRST_AMPDU_QUEUE 10
-
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdcaee62..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-4965-led.h"
-
-/* Send led command */
-static int
-iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
- u32 reg;
-
- reg = iwl_read32(priv, CSR_LED_REG);
- if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-/* Set led register off */
-void iwl4965_led_enable(struct iwl_priv *priv)
-{
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-}
-
-const struct iwl_led_ops iwl4965_led_ops = {
- .cmd = iwl4965_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615fc338..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_4965_led_h__
-#define __iwl_4965_led_h__
-
-extern const struct iwl_led_ops iwl4965_led_ops;
-void iwl4965_led_enable(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e3b019..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-sta.h"
-
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-/*
- * EEPROM
- */
-struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwl4965_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_legacy_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl4965_rx_queue_reset(priv, rxq);
-
- iwl4965_rx_replenish(priv);
-
- iwl4965_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl4965_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl4965_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv,
- "Failed to alloc_pages with %s. "
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl4965_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl4965_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl4965_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwl4965_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl4965_rx_allocate(priv, GFP_ATOMIC);
-
- iwl4965_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwl4965_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
- >> IWL49_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
- >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL4965_RSSI_OFFSET;
-}
-
-
-static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- }
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->_4965.last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = &priv->_4965.last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl4965_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
- rx_status.band);
- rx_status.rate_idx =
- iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
- rx_status.signal, (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->_4965.last_phy_res_valid = true;
- memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
- sizeof(struct iwl_rx_phy_res));
-}
-
-static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = chan->hw_value;
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv,
- "fail to allocate memory for scan\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_any_associated(priv)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(
- priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- rate = IWL_RATE_6M_PLCP;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- band = priv->scan_band;
-
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
-
- priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
- priv->scan_tx_ant[band],
- scan_tx_antennas);
- rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = iwl4965_first_antenna(active_chains);
- }
-
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
-
- cmd_len = iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[cmd_len]);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- return ret;
-}
-
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- if (add)
- return iwl4965_add_bssid_station(priv, vif_priv->ctx,
- vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
-static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
-{
- return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
- priv->current_ht_config.single_chain_sufficient;
-}
-
-#define IWL_NUM_RX_CHAINS_MULTIPLE 3
-#define IWL_NUM_RX_CHAINS_SINGLE 2
-#define IWL_NUM_IDLE_CHAINS_DUAL 2
-#define IWL_NUM_IDLE_CHAINS_SINGLE 1
-
-/*
- * Determine how many receiver/antenna chains to use.
- *
- * More provides better reception via diversity. Fewer saves power
- * at the expense of throughput, but only when not in powersave to
- * start with.
- *
- * MIMO (dual stream) requires at least 2, but works better with 3.
- * This does not determine *which* chains to use, just how many.
- */
-static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
-{
- /* # of Rx chains to use when expecting MIMO. */
- if (iwl4965_is_single_rx_stream(priv))
- return IWL_NUM_RX_CHAINS_SINGLE;
- else
- return IWL_NUM_RX_CHAINS_MULTIPLE;
-}
-
-/*
- * When we are in power saving mode, unless device support spatial
- * multiplexing power save, use the active count for rx chain count.
- */
-static int
-iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
-{
- /* # Rx chains when idling, depending on SMPS mode */
- switch (priv->current_ht_config.smps) {
- case IEEE80211_SMPS_STATIC:
- case IEEE80211_SMPS_DYNAMIC:
- return IWL_NUM_IDLE_CHAINS_SINGLE;
- case IEEE80211_SMPS_OFF:
- return active_cnt;
- default:
- WARN(1, "invalid SMPS mode %d",
- priv->current_ht_config.smps);
- return active_cnt;
- }
-}
-
-/* up to 4 chains */
-static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
-{
- u8 res;
- res = (chain_bitmap & BIT(0)) >> 0;
- res += (chain_bitmap & BIT(1)) >> 1;
- res += (chain_bitmap & BIT(2)) >> 2;
- res += (chain_bitmap & BIT(3)) >> 3;
- return res;
-}
-
-/**
- * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
- *
- * Selects how many and which Rx receivers/antennas/chains to use.
- * This should not be used for scan command ... it puts data in wrong place.
- */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- bool is_single = iwl4965_is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
- u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
- u32 active_chains;
- u16 rx_chain;
-
- /* Tell uCode which antennas are actually connected.
- * Before first association, we assume all antennas are connected.
- * Just after first association, iwl4965_chain_noise_calibration()
- * checks which antennas actually *are* connected. */
- if (priv->chain_noise_data.active_chains)
- active_chains = priv->chain_noise_data.active_chains;
- else
- active_chains = priv->hw_params.valid_rx_ant;
-
- rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
-
- /* How many receivers should we use? */
- active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
- idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
-
-
- /* correct rx chain count according hw settings
- * and chain noise calibration
- */
- valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
- if (valid_rx_cnt < active_rx_cnt)
- active_rx_cnt = valid_rx_cnt;
-
- if (valid_rx_cnt < idle_rx_cnt)
- idle_rx_cnt = valid_rx_cnt;
-
- rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
- rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
-
- ctx->staging.rx_chain = cpu_to_le16(rx_chain);
-
- if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
- ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
- else
- ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
-
- IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
- ctx->staging.rx_chain,
- active_rx_cnt, idle_rx_cnt);
-
- WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
- active_rx_cnt < idle_rx_cnt);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
-{
- int i;
- u8 ind = ant;
-
- for (i = 0; i < RATE_ANT_NUM - 1; i++) {
- ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
- if (valid & BIT(ind))
- return ind;
- }
- return ant;
-}
-
-static const char *iwl4965_get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe214e68c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-dev.h"
-#include "iwl-sta.h"
-#include "iwl-core.h"
-#include "iwl-4965.h"
-
-#define IWL4965_RS_NAME "iwl-4965-rs"
-
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
-
-#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
-
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
-
-static u8 rs_ht_to_legacy[] = {
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
-};
-
-static const u8 ant_toggle_lookup[] = {
- /*ANT_NONE -> */ ANT_NONE,
- /*ANT_A -> */ ANT_B,
- /*ANT_B -> */ ANT_C,
- /*ANT_AB -> */ ANT_BC,
- /*ANT_C -> */ ANT_A,
- /*ANT_AC -> */ ANT_AB,
- /*ANT_BC -> */ ANT_AC,
- /*ANT_ABC -> */ ANT_ABC,
-};
-
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
-
-/*
- * Parameter order:
- * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
-};
-
-static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
-{
- int idx = 0;
-
- /* HT rate format */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
-
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
- if (idx >= IWL_RATE_9M_INDEX)
- idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
- return idx;
-
- /* legacy rate format, search for match in table */
- } else {
- for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx;
- }
-
- return -1;
-}
-
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta);
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
-static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
- bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
-#else
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{}
-#endif
-
-/**
- * The following tables contain the expected throughput metrics for all rates
- *
- * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- *
- * where invalid entries are zeros.
- *
- * CCK rates are only valid in legacy table and will only be used in G
- * (2.4 GHz) band.
- */
-
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
-};
-
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
-};
-
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
-};
-
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
- { "1", "BPSK DSSS"},
- { "2", "QPSK DSSS"},
- {"5.5", "BPSK CCK"},
- { "11", "QPSK CCK"},
- { "6", "BPSK 1/2"},
- { "9", "BPSK 1/2"},
- { "12", "QPSK 1/2"},
- { "18", "QPSK 3/4"},
- { "24", "16QAM 1/2"},
- { "36", "16QAM 3/4"},
- { "48", "64QAM 2/3"},
- { "54", "64QAM 3/4"},
- { "60", "64QAM 5/6"},
-};
-
-#define MCS_INDEX_PER_STREAM (8)
-
-static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
-{
- return (u8)(rate_n_flags & 0xFF);
-}
-
-static void
-iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = IWL_INVALID_VALUE;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
-{
- return (ant_type & valid_antenna) == ant_type;
-}
-
-/*
- * removes the old data from the statistics. All data that is older than
- * TID_MAX_TIME_DIFF, will be deleted.
- */
-static void
-iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
- /* The oldest age we want to keep */
- u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
- while (tl->queue_count &&
- (tl->time_stamp < oldest_time)) {
- tl->total -= tl->packet_count[tl->head];
- tl->packet_count[tl->head] = 0;
- tl->time_stamp += TID_QUEUE_CELL_SPACING;
- tl->queue_count--;
- tl->head++;
- if (tl->head >= TID_QUEUE_MAX_SIZE)
- tl->head = 0;
- }
-}
-
-/*
- * increment traffic load value for tid and also remove
- * any old values if passed the certain time period
- */
-static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
- u8 tid;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- } else
- return MAX_TID_COUNT;
-
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
-
- tl = &lq_data->load[tid];
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- /* Happens only for the first packet. Initialize the data */
- if (!(tl->queue_count)) {
- tl->total = 1;
- tl->time_stamp = curr_time;
- tl->queue_count = 1;
- tl->head = 0;
- tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
- }
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
- tl->packet_count[index] = tl->packet_count[index] + 1;
- tl->total = tl->total + 1;
-
- if ((index + 1) > tl->queue_count)
- tl->queue_count = index + 1;
-
- return tid;
-}
-
-/*
- get the traffic load value for tid
-*/
-static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
-
- if (tid >= TID_MAX_LOAD_COUNT)
- return 0;
-
- tl = &(lq_data->load[tid]);
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- if (!(tl->queue_count))
- return 0;
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
-}
-
-static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_data, u8 tid,
- struct ieee80211_sta *sta)
-{
- int ret = -EAGAIN;
- u32 load;
-
- load = iwl4965_rs_tl_get_load(lq_data, tid);
-
- if (load > IWL_AGG_LOAD_THRESHOLD) {
- IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
- sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
- if (ret == -EAGAIN) {
- /*
- * driver and mac80211 is out of sync
- * this might be cause by reloading firmware
- * stop the tx ba session here
- */
- IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
- tid);
- ieee80211_stop_tx_ba_session(sta, tid);
- }
- } else {
- IWL_ERR(priv, "Aggregation not enabled for tid %d "
- "because load = %u\n", tid, load);
- }
- return ret;
-}
-
-static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
- struct iwl_lq_sta *lq_data,
- struct ieee80211_sta *sta)
-{
- if (tid < TID_MAX_LOAD_COUNT)
- iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
-}
-
-static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
-{
- return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
-}
-
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32
-iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
- if (tbl->expected_tpt)
- return tbl->expected_tpt[rs_index];
- return 0;
-}
-
-/**
- * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 62 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
-{
- struct iwl_rate_scale_data *window = NULL;
- static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
- s32 fail_count, tpt;
-
- if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
- return -EINVAL;
-
- /* Select window for current tx bit rate */
- window = &(tbl->win[scale_index]);
-
- /* Get expected throughput */
- tpt = iwl4965_get_expected_tpt(tbl, scale_index);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- */
- while (attempts > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & mask) {
- window->data &= ~mask;
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame to throw away oldest history */
- window->data <<= 1;
-
- /* Mark the most recent #successes attempts as successful */
- if (successes > 0) {
- window->success_counter++;
- window->data |= 0x1;
- successes--;
- }
-
- attempts--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = (window->success_ratio * tpt + 64) / 128;
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- return 0;
-}
-
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
-{
- u32 rate_n_flags = 0;
-
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwlegacy_rates[index].plcp;
- if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
-
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
- IWL_ERR(priv, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
- }
- rate_n_flags = RATE_MCS_HT_MSK;
-
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwlegacy_rates[index].plcp_siso;
- else
- rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
- } else {
- IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
- }
-
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40) {
- if (tbl->is_dup)
- rate_n_flags |= RATE_MCS_DUP_MSK;
- else
- rate_n_flags |= RATE_MCS_HT40_MSK;
- }
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(priv, "GF was set with SGI:SISO\n");
- }
- }
- }
- return rate_n_flags;
-}
-
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
-{
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
-
- memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
- *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
-
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
- return -EINVAL;
- }
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
- tbl->is_dup = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
-
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
- if (iwl4965_num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
- }
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
- (rate_n_flags & RATE_MCS_DUP_MSK))
- tbl->is_ht40 = 1;
-
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- tbl->is_dup = 1;
-
- mcs = iwl4965_rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (iwl4965_num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else {
- if (iwl4965_num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
- }
- }
- return 0;
-}
-
-/* switch to another antenna/antennas and return 1 */
-/* if no other valid antenna found, return 0 */
-static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
-{
- u8 new_ant_type;
-
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
- return 0;
-
- if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
- return 0;
-
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
-
- while ((new_ant_type != tbl->ant_type) &&
- !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
- new_ant_type = ant_toggle_lookup[new_ant_type];
-
- if (new_ant_type == tbl->ant_type)
- return 0;
-
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
- return 1;
-}
-
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
-{
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
-}
-
-/**
- * iwl4965_rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
-static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
-{
- if (is_legacy(rate_type)) {
- return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else
- return lq_sta->active_mimo2_rate;
- }
-}
-
-static u16
-iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
- int rate_type)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
-
- /* 802.11A or ht walks to the next literal adjacent rate in
- * the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- low = iwlegacy_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- high = iwlegacy_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- u8 scale_index, u8 ht_possible)
-{
- s32 low;
- u16 rate_mask;
- u16 high_low;
- u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
- struct iwl_priv *priv = lq_sta->drv;
-
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
-
- if (iwl4965_num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- tbl->is_ht40 = 0;
- tbl->is_SGI = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- }
-
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
-
- /* Mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- /* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- }
-
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
- goto out;
- }
-
- high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
- scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
-
- if (low == IWL_RATE_INVALID)
- low = scale_index;
-
-out:
- return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
-}
-
-/*
- * Simple function to compare two rate scale table types
- */
-static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
-{
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
-}
-
-/*
- * mac80211 sends us Tx status
- */
-static void
-iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- int legacy_success;
- int retries;
- int rs_index, mac_index, i;
- struct iwl_lq_sta *lq_sta = priv_sta;
- struct iwl_link_quality_cmd *table;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE_LIMIT(priv,
- "get frame ack response, update rate scale window\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!lq_sta) {
- IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
- return;
- } else if (!lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- return;
- }
-
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- /*
- * Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
- priv->band, &tbl_type, &rs_index);
- if (priv->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
- mac_flags = info->status.rates[0].flags;
- mac_index = info->status.rates[0].idx;
- /* For HT packets, map MCS to PLCP */
- if (mac_flags & IEEE80211_TX_RC_MCS) {
- mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
- if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
- mac_index++;
- /*
- * mac80211 HT index is always zero-indexed; we need to move
- * HT OFDM rates after CCK rates in 2.4 GHz band
- */
- if (priv->band == IEEE80211_BAND_2GHZ)
- mac_index += IWL_FIRST_OFDM_RATE;
- }
- /* Here we actually compare this rate to the latest LQ command */
- if ((mac_index < 0) ||
- (tbl_type.is_SGI !=
- !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 !=
- !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
- (tbl_type.is_dup !=
- !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
- (tbl_type.ant_type != info->antenna_sel_tx) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
- IWL_DEBUG_RATE(priv,
- "initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
- /*
- * Since rates mis-match, the last LQ command may have failed.
- * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
- * ... driver.
- */
- lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
- lq_sta->missed_rate_counter = 0;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
- CMD_ASYNC, false);
- }
- /* Regardless, ignore this status info for outdated rate */
- return;
- } else
- /* Rate did match, so reset the missed_rate_counter */
- lq_sta->missed_rate_counter = 0;
-
- /* Figure out if rate scale algorithm is in active or search table */
- if (iwl4965_table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
- curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (iwl4965_table_type_matches(&tbl_type,
- &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
- curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- } else {
- IWL_DEBUG_RATE(priv,
- "Neither active nor search matches tx rate\n");
- tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
- /*
- * no matching table found, let's by-pass the data collection
- * and continue to perform rate scale to find the rate table
- */
- iwl4965_rs_stay_in_table(lq_sta, true);
- goto done;
- }
-
- /*
- * Updating the frame history depends on whether packets were
- * aggregated.
- *
- * For aggregation, all packets were transmitted at the same rate, the
- * first index into rate scale table.
- */
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
- &rs_index);
- iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len);
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_len;
- lq_sta->total_failed += (info->status.ampdu_len -
- info->status.ampdu_ack_len);
- }
- } else {
- /*
- * For legacy, update frame history with for each Tx retry.
- */
- retries = info->status.rates[0].count - 1;
- /* HW doesn't send more than 15 retries */
- retries = min(retries, 15);
-
- /* The last transmission may have been successful */
- legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- /* Collect data for each rate used during failed TX attempts */
- for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
- &tbl_type, &rs_index);
- /*
- * Only collect stats if retried rate is in the same RS
- * table as active/search.
- */
- if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
- tmp_tbl = curr_tbl;
- else if (iwl4965_table_type_matches(&tbl_type,
- other_tbl))
- tmp_tbl = other_tbl;
- else
- continue;
- iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
- i < retries ? 0 : legacy_success);
- }
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += legacy_success;
- lq_sta->total_failed += retries + (1 - legacy_success);
- }
- }
- /* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
-done:
- /* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[sband->band])
- iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
-}
-
-/*
- * Begin a period of staying with a selected modulation mode.
- * Set "stay_in_tbl" flag to prevent any mode switches.
- * Set frame tx success limits according to legacy vs. high-throughput,
- * and reset overall (spanning all rates) tx success history statistics.
- * These control how long we stay using same modulation mode before
- * searching for a new mode.
- */
-static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
- struct iwl_lq_sta *lq_sta)
-{
- IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
- if (is_legacy) {
- lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
- } else {
- lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
- }
- lq_sta->table_count = 0;
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
-}
-
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl)
-{
- /* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
-
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Legacy rates have only one table */
- if (is_legacy(tbl->lq_type)) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
- * status */
- if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
- else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
-}
-
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput. When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
-static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl, /* "search" */
- u16 rate_mask, s8 index)
-{
- /* "active" values */
- struct iwl_scale_tbl_info *active_tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- s32 active_sr = active_tbl->win[index].success_ratio;
- s32 active_tpt = active_tbl->expected_tpt[index];
-
- /* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
-
- s32 new_rate, high, low, start_hi;
- u16 high_low;
- s8 rate = index;
-
- new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
- for (; ;) {
- high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
- tbl->lq_type);
-
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /*
- * Lower the "search" bit rate, to give new "search" mode
- * approximately the same throughput as "active" if:
- *
- * 1) "Active" mode has been working modestly well (but not
- * great), and expected "search" throughput (under perfect
- * conditions) at candidate rate is above the actual
- * measured "active" throughput (but less than expected
- * "active" throughput under perfect conditions).
- * OR
- * 2) "Active" mode has been working perfectly or very well
- * and expected "search" throughput (under perfect
- * conditions) at candidate rate is above expected
- * "active" throughput (under perfect conditions).
- */
- if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
- (active_sr <= IWL_RATE_HIGH_TH) &&
- (tpt_tbl[rate] <= active_tpt))) ||
- ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
- (tpt_tbl[rate] > active_tpt))) {
-
- /* (2nd or later pass)
- * If we've already tried to raise the rate, and are
- * now trying to lower it, use the higher rate. */
- if (start_hi != IWL_RATE_INVALID) {
- new_rate = start_hi;
- break;
- }
-
- new_rate = rate;
-
- /* Loop again with lower rate */
- if (low != IWL_RATE_INVALID)
- rate = low;
-
- /* Lower rate not available, use the original */
- else
- break;
-
- /* Else try to raise the "search" rate to match "active" */
- } else {
- /* (2nd or later pass)
- * If we've already tried to lower the rate, and are
- * now trying to raise it, use the lower rate. */
- if (new_rate != IWL_RATE_INVALID)
- break;
-
- /* Loop again with higher rate */
- else if (high != IWL_RATE_INVALID) {
- start_hi = high;
- rate = high;
-
- /* Higher rate not available, use the original */
- } else {
- new_rate = rate;
- break;
- }
- }
- }
-
- return new_rate;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
- s8 is_green = lq_sta->is_green;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
-
- tbl->lq_type = LQ_MIMO2;
- tbl->is_dup = lq_sta->is_dup;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_mimo2_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
-
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- u8 is_green = lq_sta->is_green;
- s32 rate;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
-
- tbl->is_dup = lq_sta->is_dup;
- tbl->lq_type = LQ_SISO;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_siso_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "can not switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- int ret = 0;
- u8 update_search_tbl_counter = 0;
-
- tbl->action = IWL_LEGACY_SWITCH_SISO;
-
- start_action = tbl->action;
- for (; ;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
-
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
-
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- iwl4965_rs_set_expected_tpt_table(lq_sta,
- search_tbl);
- goto out;
- }
- break;
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
-
- /* Set up search table to try SISO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
-
- break;
- case IWL_LEGACY_SWITCH_MIMO2_AB:
- case IWL_LEGACY_SWITCH_MIMO2_AC:
- case IWL_LEGACY_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
-
- /* Set up search table to try MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
- }
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
-
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
-out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
- return 0;
-
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- u8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
-
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_SISO_SWITCH_MIMO2_AB:
- case IWL_SISO_SWITCH_MIMO2_AC:
- case IWL_SISO_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
- case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
-
- memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(priv,
- "SGI was set in GF+SISO\n");
- }
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
- }
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- s8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- case IWL_MIMO2_SWITCH_SISO_C:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO2 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
-
- }
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-
-}
-
-/*
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
- */
-static void
-iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
-{
- struct iwl_scale_tbl_info *tbl;
- int i;
- int active_tbl;
- int flush_interval_passed = 0;
- struct iwl_priv *priv;
-
- priv = lq_sta->drv;
- active_tbl = lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* If we've been disallowing search, see if we should now allow it */
- if (lq_sta->stay_in_tbl) {
-
- /* Elapsed time using current modulation mode */
- if (lq_sta->flush_timer)
- flush_interval_passed =
- time_after(jiffies,
- (unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
-
- /*
- * Check if we should allow search for new modulation mode.
- * If many frames have failed or succeeded, or we've used
- * this same modulation for a long time, allow search, and
- * reset history stats that keep track of whether we should
- * allow a new search. Also (below) reset all bitmaps and
- * stats in active history.
- */
- if (force_search ||
- (lq_sta->total_failed > lq_sta->max_failure_limit) ||
- (lq_sta->total_success > lq_sta->max_success_limit) ||
- ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
- && (flush_interval_passed))) {
- IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
- lq_sta->total_failed,
- lq_sta->total_success,
- flush_interval_passed);
-
- /* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = 0;
-
- /*
- * Else if we've used this modulation mode enough repetitions
- * (regardless of elapsed time or success/failure), reset
- * history bitmaps and rate-specific stats for all rates in
- * active table.
- */
- } else {
- lq_sta->table_count++;
- if (lq_sta->table_count >=
- lq_sta->table_count_limit) {
- lq_sta->table_count = 0;
-
- IWL_DEBUG_RATE(priv,
- "LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-
- /* If transitioning to allow "search", reset all history
- * bitmaps and stats in active table (this will become the new
- * "search" table). */
- if (!lq_sta->stay_in_tbl) {
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-}
-
-/*
- * setup rate table in uCode
- * return rate_n_flags as used in the table
- */
-static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
-{
- u32 rate;
-
- /* Update uCode's rate table. */
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
- iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
-
- return rate;
-}
-
-/*
- * Do rate scaling and search for new modulation mode.
- */
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int low = IWL_RATE_INVALID;
- int high = IWL_RATE_INVALID;
- int index;
- int i;
- struct iwl_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- u16 rate_mask;
- u8 update_lq = 0;
- struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
- u32 rate;
- u8 is_green = 0;
- u8 active_tbl = 0;
- u8 done_search = 0;
- u16 high_low;
- s32 sr;
- u8 tid = MAX_TID_COUNT;
- struct iwl_tid_data *tid_data;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
-
- /* Send management frames and NO_ACK data using lowest rate. */
- /* TODO: this could probably be improved.. */
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- if (!sta || !lq_sta)
- return;
-
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
- tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF)
- lq_sta->is_agg = 0;
- else
- lq_sta->is_agg = 1;
- } else
- lq_sta->is_agg = 0;
-
- /*
- * Select rate-scale / modulation-mode table to work with in
- * the rest of this function: "search" if searching for better
- * modulation mode, or "active" if doing rate scaling within a mode.
- */
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- is_green = lq_sta->is_green;
-
- /* current tx rate */
- index = lq_sta->last_txrate_idx;
-
- IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
- tbl->lq_type);
-
- /* rates available for this association, and for modulation mode */
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
-
- IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
-
- /* mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- /* supp_rates has no CCK bits in A mode */
- rate_scale_index_msk = (u16) (rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
-
- } else
- rate_scale_index_msk = rate_mask;
-
- if (!rate_scale_index_msk)
- rate_scale_index_msk = rate_mask;
-
- if (!((1 << index) & rate_scale_index_msk)) {
- IWL_ERR(priv, "Current Rate is not valid\n");
- if (lq_sta->search_better_tbl) {
- /* revert to active table if search table is not valid*/
- tbl->lq_type = LQ_NONE;
- lq_sta->search_better_tbl = 0;
- tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
- }
- return;
- }
-
- /* Get expected throughput table and history window for current rate */
- if (!tbl->expected_tpt) {
- IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
- return;
- }
-
- /* force user max rate if set by user */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < index)) {
- index = lq_sta->max_rate_idx;
- update_lq = 1;
- window = &(tbl->win[index]);
- goto lq_update;
- }
-
- window = &(tbl->win[index]);
-
- /*
- * If there is not enough history to calculate actual average
- * throughput, keep analyzing results of more tx frames, without
- * changing rate or mode (bypass most of the rest of this function).
- * Set up new rate table in uCode only if old rate is not supported
- * in current association (use new rate found above).
- */
- fail_count = window->counter - window->success_counter;
- if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
- IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
- "for index %d\n",
- window->success_counter, window->counter, index);
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- goto out;
- }
- /* Else we have enough samples; calculate estimate of
- * actual average throughput */
- if (window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(priv,
- "expected_tpt should have been calculated by now\n");
- window->average_tpt = ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128);
- }
-
- /* If we are searching for better modulation mode, check success. */
- if (lq_sta->search_better_tbl) {
- /* If good success, continue using the "search" mode;
- * no need to send new link quality command, since we're
- * continuing to use the setup that we've been trying. */
- if (window->average_tpt > lq_sta->last_tpt) {
-
- IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
-
- /* Swap tables; "search" becomes "active" */
- lq_sta->active_tbl = active_tbl;
- current_tpt = window->average_tpt;
-
- /* Else poor success; go back to mode in "active" table */
- } else {
-
- IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- /* Nullify "search" table */
- tbl->lq_type = LQ_NONE;
-
- /* Revert to "active" table */
- active_tbl = lq_sta->active_tbl;
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* Revert to "active" rate and throughput info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- current_tpt = lq_sta->last_tpt;
-
- /* Need to set up a new rate table in uCode */
- update_lq = 1;
- }
-
- /* Either way, we've made a decision; modulation mode
- * search is done, allow rate adjustment next time. */
- lq_sta->search_better_tbl = 0;
- done_search = 1; /* Don't switch modes below! */
- goto lq_update;
- }
-
- /* (Else) not in search of better modulation mode, try for better
- * starting rate, while staying in this mode. */
- high_low = iwl4965_rs_get_adjacent_rate(priv, index,
- rate_scale_index_msk,
- tbl->lq_type);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- sr = window->success_ratio;
-
- /* Collect measured throughputs for current and adjacent rates */
- current_tpt = window->average_tpt;
- if (low != IWL_RATE_INVALID)
- low_tpt = tbl->win[low].average_tpt;
- if (high != IWL_RATE_INVALID)
- high_tpt = tbl->win[high].average_tpt;
-
- scale_action = 0;
-
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
-
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
- }
-
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- scale_action = 0;
-
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- } else {
- scale_action = 0;
- }
-
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
- /* Decrease starting rate, update uCode's rate table */
- if (low != IWL_RATE_INVALID) {
- update_lq = 1;
- index = low;
- }
-
- break;
- case 1:
- /* Increase starting rate, update uCode's rate table */
- if (high != IWL_RATE_INVALID) {
- update_lq = 1;
- index = high;
- }
-
- break;
- case 0:
- /* No change */
- default:
- break;
- }
-
- IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
- "high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
-
-lq_update:
- /* Replace uCode's rate table for the destination station. */
- if (update_lq)
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- /*
- * Search for new modulation mode if we're:
- * 1) Not changing rates right now
- * 2) Not just finishing up a search
- * 3) Allowing a new search
- */
- if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
- /* Save current throughput to compare with "search" throughput*/
- lq_sta->last_tpt = current_tpt;
-
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- iwl4965_rs_move_legacy_other(priv, lq_sta,
- conf, sta, index);
- else if (is_siso(tbl->lq_type))
- iwl4965_rs_move_siso_to_other(priv, lq_sta,
- conf, sta, index);
- else /* (is_mimo2(tbl->lq_type)) */
- iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
- conf, sta, index);
-
- /* If new "search" mode was selected, set up in uCode table */
- if (lq_sta->search_better_tbl) {
- /* Access the "search" table, clear its history. */
- tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
-
- /* Use new "search" start rate */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-
- IWL_DEBUG_RATE(priv,
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- iwl4965_rs_fill_link_cmd(priv, lq_sta,
- tbl->current_rate);
- iwl_legacy_send_lq_cmd(priv, ctx,
- &lq_sta->lq, CMD_ASYNC, false);
- } else
- done_search = 1;
- }
-
- if (done_search && !lq_sta->stay_in_tbl) {
- /* If the "active" (non-search) mode was legacy,
- * and we've tried switching antennas,
- * but we haven't been able to try HT modes (not available),
- * stay with best antenna legacy modulation for a while
- * before next round of mode comparisons. */
- tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
- lq_sta->action_counter > tbl1->max_search) {
- IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
- iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
- }
-
- /* If we're in an HT mode, and all 3 mode switch actions
- * have been tried and compared, stay in this best modulation
- * mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
- if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
- (lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
- tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF) {
- IWL_DEBUG_RATE(priv,
- "try to aggregate tid %d\n",
- tid);
- iwl4965_rs_tl_turn_on_agg(priv, tid,
- lq_sta, sta);
- }
- }
- iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
- }
- }
-
-out:
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
- index, is_green);
- i = index;
- lq_sta->last_txrate_idx = i;
-}
-
-/**
- * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
- *
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
- */
-static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct iwl_scale_tbl_info *tbl;
- int rate_idx;
- int i;
- u32 rate;
- u8 use_green = iwl4965_rs_use_green(sta);
- u8 active_tbl = 0;
- u8 valid_tx_ant;
- struct iwl_station_priv *sta_priv;
- struct iwl_rxon_context *ctx;
-
- if (!sta || !lq_sta)
- return;
-
- sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
-
- i = lq_sta->last_txrate_idx;
-
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- if ((i < 0) || (i >= IWL_RATE_COUNT))
- i = 0;
-
- rate = iwlegacy_rates[i].plcp;
- tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
-
- iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
-
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
- tbl->current_rate = rate;
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
- priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
-}
-
-static void
-iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
-
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_lq_sta *lq_sta = priv_sta;
- int rate_idx;
-
- IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
-
- /* Get max rate if user set max rate */
- if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) &&
- (lq_sta->max_rate_idx != -1))
- lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((lq_sta->max_rate_idx < 0) ||
- (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
- lq_sta->max_rate_idx = -1;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- priv_sta = NULL;
- }
-
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- if (!lq_sta)
- return;
-
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_DUP_DATA;
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
-
-}
-
-static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
- gfp_t gfp)
-{
- struct iwl_lq_sta *lq_sta;
- struct iwl_station_priv *sta_priv =
- (struct iwl_station_priv *) sta->drv_priv;
- struct iwl_priv *priv;
-
- priv = (struct iwl_priv *)priv_rate;
- IWL_DEBUG_RATE(priv, "create station rate scale window\n");
-
- lq_sta = &sta_priv->lq_sta;
-
- return lq_sta;
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void
-iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta,
- u8 sta_id)
-{
- int i, j;
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_station_priv *sta_priv;
- struct iwl_lq_sta *lq_sta;
- struct ieee80211_supported_band *sband;
-
- sta_priv = (struct iwl_station_priv *) sta->drv_priv;
- lq_sta = &sta_priv->lq_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
-
- lq_sta->lq.sta_id = sta_id;
-
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- IWL_DEBUG_RATE(priv, "LQ:"
- "*** rate scale station global init for station %d ***\n",
- sta_id);
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- lq_sta->is_dup = 0;
- lq_sta->max_rate_idx = -1;
- lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->band = priv->band;
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- /* as default allow aggregation for all tids */
- lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
- lq_sta->drv = priv;
-
- /* Set last_txrate_idx to lowest rate */
- lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
- lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- lq_sta->is_agg = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- lq_sta->dbg_fixed_rate = 0;
-#endif
-
- iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
-}
-
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 new_rate)
-{
- struct iwl_scale_tbl_info tbl_type;
- int index = 0;
- int rate_idx;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
- u8 valid_tx_ant = 0;
- struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
-
- /* Override starting rate (index 0) if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Interpret new_rate (rate_n_flags) */
- iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
-
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- lq_cmd->general_params.mimo_delimiter =
- is_mimo(tbl_type.lq_type) ? 1 : 0;
-
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
- lq_cmd->general_params.single_stream_ant_msk =
- tbl_type.ant_type;
- } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
- lq_cmd->general_params.dual_stream_ant_msk =
- tbl_type.ant_type;
- } /* otherwise we don't modify the existing value */
-
- index++;
- repeat_rate--;
- if (priv)
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
- }
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags =
- cpu_to_le32(new_rate);
- repeat_rate--;
- index++;
- }
-
- iwl4965_rs_get_tbl_info_from_mcs(new_rate,
- lq_sta->band, &tbl_type,
- &rate_idx);
-
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->general_params.mimo_delimiter = index;
-
- /* Get next rate */
- new_rate = iwl4965_rs_get_lower_rate(lq_sta,
- &tbl_type, rate_idx,
- use_ht_possible);
-
- /* How many times should we repeat the next rate? */
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
-
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- /* Don't allow HT rates after next pass.
- * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
- use_ht_possible = 0;
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- index++;
- repeat_rate--;
- }
-
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-
- lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-}
-
-static void
-*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-/* rate scale requires free function to be implemented */
-static void iwl4965_rs_free(void *priv_rate)
-{
- return;
-}
-
-static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl_priv *priv __maybe_unused = priv_r;
-
- IWL_DEBUG_RATE(priv, "enter\n");
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{
- struct iwl_priv *priv;
- u8 valid_tx_ant;
- u8 ant_sel_tx;
-
- priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
- if (lq_sta->dbg_fixed_rate) {
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
- } else {
- lq_sta->dbg_fixed_rate = 0;
- IWL_ERR(priv,
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
- } else {
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- char buf[64];
- size_t buf_size;
- u32 parsed_rate;
- struct iwl_station_priv *sta_priv =
- container_of(lq_sta, struct iwl_station_priv, lq_sta);
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- priv = lq_sta->drv;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x", &parsed_rate) == 1)
- lq_sta->dbg_fixed_rate = parsed_rate;
- else
- lq_sta->dbg_fixed_rate = 0;
-
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-
- IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
- if (lq_sta->dbg_fixed_rate) {
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
- false);
- }
-
- return count;
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i = 0;
- int index = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
- priv = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
- lq_sta->total_failed, lq_sta->total_success,
- lq_sta->active_legacy_rate);
- desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- lq_sta->dbg_fixed_rate);
- desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
- desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
- desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
- desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
- }
- desc += sprintf(buff+desc, "last tx rate=0x%X\n",
- lq_sta->last_rate_n_flags);
- desc += sprintf(buff+desc, "general:"
- "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
- lq_sta->lq.general_params.flags,
- lq_sta->lq.general_params.mimo_delimiter,
- lq_sta->lq.general_params.single_stream_ant_msk,
- lq_sta->lq.general_params.dual_stream_ant_msk);
-
- desc += sprintf(buff+desc, "agg:"
- "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
- le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
- lq_sta->lq.agg_params.agg_dis_start_th,
- lq_sta->lq.agg_params.agg_frame_cnt_limit);
-
- desc += sprintf(buff+desc,
- "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
- lq_sta->lq.general_params.start_rate_index[0],
- lq_sta->lq.general_params.start_rate_index[1],
- lq_sta->lq.general_params.start_rate_index[2],
- lq_sta->lq.general_params.start_rate_index[3]);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl4965_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
- if (is_legacy(tbl->lq_type)) {
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps);
- } else {
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
- .write = iwl4965_rs_sta_dbgfs_scale_table_write,
- .read = iwl4965_rs_sta_dbgfs_scale_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i, j;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- for (i = 0; i < LQ_SIZE; i++) {
- desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
- "rate=0x%X\n",
- lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->lq_info[i].is_dup,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
- for (j = 0; j < IWL_RATE_COUNT; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
- }
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl4965_rs_sta_dbgfs_stats_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char buff[120];
- int desc = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-
- priv = lq_sta->drv;
-
- if (is_Ht(tbl->lq_type))
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- tbl->expected_tpt[lq_sta->last_txrate_idx]);
- else
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
- .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
- &lq_sta->tx_agg_tid_en);
-
-}
-
-static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void
-iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
- .name = IWL4965_RS_NAME,
- .tx_status = iwl4965_rs_tx_status,
- .get_rate = iwl4965_rs_get_rate,
- .rate_init = iwl4965_rs_rate_init_stub,
- .alloc = iwl4965_rs_alloc,
- .free = iwl4965_rs_free,
- .alloc_sta = iwl4965_rs_alloc_sta,
- .free_sta = iwl4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl4965_rs_add_debugfs,
- .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
-#endif
-};
-
-int iwl4965_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_4965_ops);
-}
-
-void iwl4965_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_4965_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bbfc3c5..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
-
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
- priv->missed_beacon_threshold) {
- IWL_DEBUG_CALIB(priv,
- "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consecutive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl4965_init_sensitivity(priv);
- }
-}
-
-/* Calculate noise level, based on measurements during network silence just
- * before arriving beacon. This measurement can be done only if we know
- * exactly when to expect beacons, therefore only when we're associated. */
-static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
-{
- struct statistics_rx_non_phy *rx_info;
- int num_active_rx = 0;
- int total_silence = 0;
- int bcn_silence_a, bcn_silence_b, bcn_silence_c;
- int last_rx_noise;
-
- rx_info = &(priv->_4965.statistics.rx.general);
- bcn_silence_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
- bcn_silence_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
- bcn_silence_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
- if (bcn_silence_a) {
- total_silence += bcn_silence_a;
- num_active_rx++;
- }
- if (bcn_silence_b) {
- total_silence += bcn_silence_b;
- num_active_rx++;
- }
- if (bcn_silence_c) {
- total_silence += bcn_silence_c;
- num_active_rx++;
- }
-
- /* Average among active antennas */
- if (num_active_rx)
- last_rx_noise = (total_silence / num_active_rx) - 107;
- else
- last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
- bcn_silence_a, bcn_silence_b, bcn_silence_c,
- last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
-static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i, size;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
- struct statistics_general_common *general, *accum_general;
- struct statistics_tx *tx, *accum_tx;
-
- prev_stats = (__le32 *)&priv->_4965.statistics;
- accum_stats = (u32 *)&priv->_4965.accum_statistics;
- size = sizeof(struct iwl_notif_statistics);
- general = &priv->_4965.statistics.general.common;
- accum_general = &priv->_4965.accum_statistics.general.common;
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta = (u32 *)&priv->_4965.delta_statistics;
- max_delta = (u32 *)&priv->_4965.max_delta;
-
- for (i = sizeof(__le32); i < size;
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- accum_general->temperature = general->temperature;
- accum_general->ttl_timestamp = general->ttl_timestamp;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->_4965.statistics.general.common.temperature !=
- pkt->u.stats.general.common.temperature) ||
- ((priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
- /* TODO: reading some of statistics is unneeded */
- memcpy(&priv->_4965.statistics, &pkt->u.stats,
- sizeof(priv->_4965.statistics));
-
- set_bit(STATUS_STATISTICS, &priv->status);
-
- /* Reschedule the statistics timer to occur in
- * REG_RECALIB_PERIOD seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
- mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl4965_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
- }
- if (priv->cfg->ops->lib->temp_ops.temperature && change)
- priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_4965.accum_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.delta_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.max_delta, 0,
- sizeof(struct iwl_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl4965_rx_statistics(priv, rxb);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23553d2..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-4965.h"
-
-static struct iwl_link_quality_cmd *
-iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
-{
- int i, r;
- struct iwl_link_quality_cmd *link_cmd;
- u32 rate_flags = 0;
- __le32 rate_n_flags;
-
- link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
- if (!link_cmd) {
- IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
- return NULL;
- }
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
- rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
- rate_flags);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
- link_cmd->general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!link_cmd->general_params.dual_stream_ant_msk) {
- link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- link_cmd->sta_id = sta_id;
-
- return link_cmd;
-}
-
-/*
- * iwl4965_add_bssid_station - Add the special IBSS BSSID station
- *
- * Function sleeps.
- */
-int
-iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r)
-{
- int ret;
- u8 sta_id;
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- /* Set up default rate scaling table in device's station table */
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for station %pM.\n",
- addr);
- return -ENOMEM;
- }
-
- ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
- if (ret)
- IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- bool send_if_empty)
-{
- int i, not_empty = 0;
- u8 buff[sizeof(struct iwl_wep_cmd) +
- sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
- struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
- size_t cmd_size = sizeof(struct iwl_wep_cmd);
- struct iwl_host_cmd cmd = {
- .id = ctx->wep_key_cmd,
- .data = wep_cmd,
- .flags = CMD_SYNC,
- };
-
- might_sleep();
-
- memset(wep_cmd, 0, cmd_size +
- (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
-
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- wep_cmd->key[i].key_index = i;
- if (ctx->wep_keys[i].key_size) {
- wep_cmd->key[i].key_offset = i;
- not_empty = 1;
- } else {
- wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
- }
-
- wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
- memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
- ctx->wep_keys[i].key_size);
- }
-
- wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
- wep_cmd->num_keys = WEP_KEYS_MAX;
-
- cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
-
- cmd.len = cmd_size;
-
- if (not_empty || send_if_empty)
- return iwl_legacy_send_cmd(priv, &cmd);
- else
- return 0;
-}
-
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- lockdep_assert_held(&priv->mutex);
-
- return iwl4965_static_wepkey_cmd(priv, ctx, false);
-}
-
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
- keyconf->keyidx);
-
- memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- /* but keys in device are clear anyway so return success */
- return 0;
- }
- ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
- IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
- keyconf->keyidx, ret);
-
- return ret;
-}
-
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (keyconf->keylen != WEP_KEY_LEN_128 &&
- keyconf->keylen != WEP_KEY_LEN_64) {
- IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
- return -EINVAL;
- }
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = HW_KEY_DEFAULT;
- priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
-
- ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
- memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
- keyconf->keylen);
-
- ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
- IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
- keyconf->keylen, keyconf->keyidx, ret);
-
- return ret;
-}
-
-static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
- key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
- memcpy(priv->stations[sta_id].keyinfo.key,
- keyconf->key, keyconf->keylen);
-
- memcpy(&priv->stations[sta_id].sta.key.key[3],
- keyconf->key, keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- int ret = 0;
- __le16 key_flags = 0;
-
- key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = 16;
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
-
-
- /* This copy is acutally not needed: we get the key with each TX */
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
-{
- u8 sta_id;
- unsigned long flags;
- int i;
-
- if (iwl_legacy_scan_cancel(priv)) {
- /* cancel scan failed, just live w/ bad key and rely
- briefly on SW decryption */
- return;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
- for (i = 0; i < 5; i++)
- priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
- cpu_to_le16(phase1key[i]);
-
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
-
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- u16 key_flags;
- u8 keyidx;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys--;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
- keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
- IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
- keyconf->keyidx, sta_id);
-
- if (keyconf->keyidx != keyidx) {
- /* We need to remove a key with index different that the one
- * in the uCode. This means that the key we need to remove has
- * been replaced by another one with different index.
- * Don't do anything and return ok
- */
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
- IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
- keyconf->keyidx, key_flags);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
- &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- priv->stations[sta_id].sta.key.key_offset);
- memset(&priv->stations[sta_id].keyinfo, 0,
- sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags =
- STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys++;
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv,
- "Unknown alg: %s cipher = %x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv,
- "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-/**
- * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
- *
- * This adds the broadcast station into the driver's station table
- * and marks it driver active, so that it will be restored to the
- * device at the next best time.
- */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
- false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_update_bcast_station - update broadcast station's LQ command
- *
- * Only used by iwl4965. Placed here to have all bcast station management
- * code together.
- */
-static int iwl4965_update_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- struct iwl_link_quality_cmd *link_cmd;
- u8 sta_id = ctx->bcast_sta_id;
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (priv->stations[sta_id].lq)
- kfree(priv->stations[sta_id].lq);
- else
- IWL_DEBUG_INFO(priv,
- "Bcast station rate scaling has not been initialized yet.\n");
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-int iwl4965_update_bcast_stations(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret = 0;
-
- for_each_context(priv, ctx) {
- ret = iwl4965_update_bcast_station(priv, ctx);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/**
- * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
- */
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION)
- return -ENXIO;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
- priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
- priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-void
-iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask =
- STA_MODIFY_SLEEP_TX_COUNT_MSK;
- priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e3638bae..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int iwl4965_get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int
-iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwlegacy_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
-
- rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
- bool is_agg = false;
-
- if (info->control.vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* For management frames use broadcast id to do not break aggregation */
- if (!ieee80211_is_data(fc))
- sta_id = ctx->bcast_sta_id;
- else {
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
-
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
-
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
-
- spin_unlock(&priv->sta_lock);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-
- iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
-
- iwl_legacy_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, firstlen,
- PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- secondlen, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, secondlen,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_legacy_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /*
- * Avoid atomic ops if it isn't an associated client.
- * Also, if this is a packet for aggregation, don't
- * increase the counter because the ucode will stop
- * aggregation queues when their respective station
- * goes to sleep.
- */
- if (sta_priv && sta_priv->client && !is_agg)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark) &&
- priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_legacy_stop_queue(priv, txq);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwl4965_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
- }
- iwl4965_free_dma_ptr(priv, &priv->kw);
-
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-/**
- * iwl4965_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl4965_hw_txq_ctx_free(priv);
-
- ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_legacy_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_legacy_tx_queue_init(priv,
- &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl4965_hw_txq_ctx_free(priv);
- iwl4965_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == priv->cmd_queue ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- }
-}
-
-/**
- * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwl4965_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_legacy_read_direct32(priv,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq)
- return;
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_unmap(priv);
- else
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_legacy_write_prph(priv,
- IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- * i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
- int ret;
-
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_legacy_write_targ_mem(priv,
- priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
- (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
- & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, sta->addr, tid);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl4965_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- iwl_legacy_set_swq_id(&priv->txq[txq_id],
- iwl4965_get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv,
- "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&priv->sta_lock);
- spin_lock(&priv->lock);
-
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
- return 0;
-}
-
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
- struct iwl_rxon_context *ctx;
-
- ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
- lockdep_assert_held(&priv->sta_lock);
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue DELBA flow\n");
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- }
-
- return 0;
-}
-
-static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr1)
-{
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(ctx->vif, addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
- rcu_read_unlock();
-}
-
-static void
-iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
- bool is_agg)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
-
- if (!is_agg)
- iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
-
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-}
-
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
-
- if (WARN_ON_ONCE(tx_info->skb == NULL))
- continue;
-
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
-
- iwl4965_tx_status(priv, tx_info,
- txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-
-/**
- * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- int successes = 0;
- struct ieee80211_tx_info *info;
- u64 bitmap, sent_bitmap;
-
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
- ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- sent_bitmap = bitmap & agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- i = 0;
- while (sent_bitmap) {
- ack = sent_bitmap & 1ULL;
- successes += ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i,
- (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- sent_bitmap >>= 1;
- ++i;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
- (unsigned long long)bitmap);
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = successes;
- info->status.ampdu_len = agg->frame_count;
- iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- return 0;
-}
-
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-
-/**
- * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
- unsigned long flags;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
- return;
- }
-
- /* Find index just before block-ack window */
- index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
- "scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
-
- iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
-
- switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_POSTPONE(DELAY);
- TX_STATUS_POSTPONE(FEW_BYTES);
- TX_STATUS_POSTPONE(QUIET_PERIOD);
- TX_STATUS_POSTPONE(CALC_TTAK);
- TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
- TX_STATUS_FAIL(SHORT_LIMIT);
- TX_STATUS_FAIL(LONG_LIMIT);
- TX_STATUS_FAIL(FIFO_UNDERRUN);
- TX_STATUS_FAIL(DRAIN_FLOW);
- TX_STATUS_FAIL(RFKILL_FLUSH);
- TX_STATUS_FAIL(LIFE_EXPIRE);
- TX_STATUS_FAIL(DEST_PS);
- TX_STATUS_FAIL(HOST_ABORTED);
- TX_STATUS_FAIL(BT_RETRY);
- TX_STATUS_FAIL(STA_INVALID);
- TX_STATUS_FAIL(FRAG_DROPPED);
- TX_STATUS_FAIL(TID_DISABLE);
- TX_STATUS_FAIL(FIFO_FLUSHED);
- TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
- TX_STATUS_FAIL(PASSIVE_NO_RX);
- TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148feb94..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-4965-calib.h"
-
-#define IWL_AC_UNSET -1
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int
-iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int ret = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL4965_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- ret = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
- u32 len)
-{
- u32 val;
- u32 save_len = len;
- int ret = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL4965_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- ret = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return ret;
-}
-
-/**
- * iwl4965_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-int iwl4965_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int ret;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_full(priv, image, len);
-
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce193e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-4965-led.h"
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int i;
- u32 done;
- u32 reg_offset;
- int ret;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- priv->ucode_type = UCODE_RT;
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL49_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
- * NOTE: iwl_init_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache.
- */
- pinst = priv->ucode_init.p_addr >> 4;
- pdata = priv->ucode_init_data.p_addr >> 4;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
- ret = iwl4965_verify_bsm(priv);
- if (ret)
- return ret;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv,
- BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv,
- BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
- return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
- int ret = 0;
-
- /* bits 35:4 for 4965 */
- pinst = priv->ucode_code.p_addr >> 4;
- pdata = priv->ucode_data_backup.p_addr >> 4;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- * Voltage, temperature, and MIMO tx gain correction, now stored in priv
- * (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Calculate temperature */
- priv->temperature = iwl4965_hw_get_temperature(priv);
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl4965_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
-restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool iw4965_is_ht40_channel(__le32 rxon_flags)
-{
- int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- return ((chan_mod == CHANNEL_MODE_PURE_40) ||
- (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
- unsigned long flags;
- u16 radio_cfg;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
- priv->calib_info = (struct iwl_eeprom_calib_info *)
- iwl_legacy_eeprom_query_addr(priv,
- EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- * ... once chain noise is calibrated the first time, it's good forever. */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_legacy_is_any_associated(priv)) {
- struct iwl_calib_diff_gain_cmd cmd;
-
- /* clear data for chain noise calibration algorithm */
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
- cmd.diff_gain_a = 0;
- cmd.diff_gain_b = 0;
- cmd.diff_gain_c = 0;
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd))
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
- .min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
-
- .auto_corr_min_ofdm = 85,
- .auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 105,
- .auto_corr_min_ofdm_mrc_x1 = 220,
-
- .auto_corr_max_ofdm = 120,
- .auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 140,
- .auto_corr_max_ofdm_mrc_x1 = 270,
-
- .auto_corr_min_cck = 125,
- .auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 200,
- .auto_corr_max_cck_mrc = 400,
-
- .nrg_th_cck = 100,
- .nrg_th_ofdm = 100,
-
- .barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
- .nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
- /* want Kelvin */
- priv->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
- if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
- priv->cfg->mod_params->num_of_queues;
-
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwl4965_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWL4965_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
- priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
- priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- iwl4965_set_ct_threshold(priv);
-
- priv->hw_params.sens = &iwl4965_sensitivity;
- priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
- s32 sign = 1;
-
- if (num < 0) {
- sign = -sign;
- num = -num;
- }
- if (denom < 0) {
- sign = -sign;
- denom = -denom;
- }
- *res = 1;
- *res = ((num * 2 + denom) / (denom * 2)) * sign;
-
- return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
- s32 current_voltage)
-{
- s32 comp = 0;
-
- if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
- (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
- return 0;
-
- iwl4965_math_div_round(current_voltage - eeprom_voltage,
- TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
- if (current_voltage > eeprom_voltage)
- comp *= 2;
- if ((comp < -2) || (comp > 2))
- comp = 0;
-
- return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
- if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
- return CALIB_CH_GROUP_5;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
- return CALIB_CH_GROUP_1;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
- return CALIB_CH_GROUP_2;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
- return CALIB_CH_GROUP_3;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
- return CALIB_CH_GROUP_4;
-
- return -EINVAL;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
- s32 b = -1;
-
- for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
- if (priv->calib_info->band_info[b].ch_from == 0)
- continue;
-
- if ((channel >= priv->calib_info->band_info[b].ch_from)
- && (channel <= priv->calib_info->band_info[b].ch_to))
- break;
- }
-
- return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
- s32 val;
-
- if (x2 == x1)
- return y1;
- else {
- iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
- return val + y2;
- }
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest. Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
- struct iwl_eeprom_calib_ch_info *chan_info)
-{
- s32 s = -1;
- u32 c;
- u32 m;
- const struct iwl_eeprom_calib_measure *m1;
- const struct iwl_eeprom_calib_measure *m2;
- struct iwl_eeprom_calib_measure *omeas;
- u32 ch_i1;
- u32 ch_i2;
-
- s = iwl4965_get_sub_band(priv, channel);
- if (s >= EEPROM_TX_POWER_BANDS) {
- IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
- return -1;
- }
-
- ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
- ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
- chan_info->ch_num = (u8) channel;
-
- IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
- channel, s, ch_i1, ch_i2);
-
- for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
- for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
- m1 = &(priv->calib_info->band_info[s].ch1.
- measurements[c][m]);
- m2 = &(priv->calib_info->band_info[s].ch2.
- measurements[c][m]);
- omeas = &(chan_info->measurements[c][m]);
-
- omeas->actual_pow =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->actual_pow,
- ch_i2,
- m2->actual_pow);
- omeas->gain_idx =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->gain_idx, ch_i2,
- m2->gain_idx);
- omeas->temperature =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->temperature,
- ch_i2,
- m2->temperature);
- omeas->pa_det =
- (s8) iwl4965_interpolate_value(channel, ch_i1,
- m1->pa_det, ch_i2,
- m2->pa_det);
-
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
- m1->actual_pow, m2->actual_pow, omeas->actual_pow);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
- m1->gain_idx, m2->gain_idx, omeas->gain_idx);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
- m1->pa_det, m2->pa_det, omeas->pa_det);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
- m1->temperature, m2->temperature,
- omeas->temperature);
- }
- }
-
- return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
- 10 /* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
- s32 degrees_per_05db_a;
- s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
- {9, 2}, /* group 0 5.2, ch 34-43 */
- {4, 1}, /* group 1 5.2, ch 44-70 */
- {4, 1}, /* group 2 5.2, ch 71-124 */
- {4, 1}, /* group 3 5.2, ch 125-200 */
- {3, 1} /* group 4 2.4, ch all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
- if (!band) {
- if ((rate_power_index & 7) <= 4)
- return MIN_TX_GAIN_INDEX_52GHZ_EXT;
- }
- return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
- u8 dsp;
- u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
- /* 5.2GHz power gain index table */
- {
- {123, 0x3F}, /* highest txpower */
- {117, 0x3F},
- {110, 0x3F},
- {104, 0x3F},
- {98, 0x3F},
- {110, 0x3E},
- {104, 0x3E},
- {98, 0x3E},
- {110, 0x3D},
- {104, 0x3D},
- {98, 0x3D},
- {110, 0x3C},
- {104, 0x3C},
- {98, 0x3C},
- {110, 0x3B},
- {104, 0x3B},
- {98, 0x3B},
- {110, 0x3A},
- {104, 0x3A},
- {98, 0x3A},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x25},
- {104, 0x25},
- {98, 0x25},
- {110, 0x24},
- {104, 0x24},
- {98, 0x24},
- {110, 0x23},
- {104, 0x23},
- {98, 0x23},
- {110, 0x22},
- {104, 0x18},
- {98, 0x18},
- {110, 0x17},
- {104, 0x17},
- {98, 0x17},
- {110, 0x16},
- {104, 0x16},
- {98, 0x16},
- {110, 0x15},
- {104, 0x15},
- {98, 0x15},
- {110, 0x14},
- {104, 0x14},
- {98, 0x14},
- {110, 0x13},
- {104, 0x13},
- {98, 0x13},
- {110, 0x12},
- {104, 0x08},
- {98, 0x08},
- {110, 0x07},
- {104, 0x07},
- {98, 0x07},
- {110, 0x06},
- {104, 0x06},
- {98, 0x06},
- {110, 0x05},
- {104, 0x05},
- {98, 0x05},
- {110, 0x04},
- {104, 0x04},
- {98, 0x04},
- {110, 0x03},
- {104, 0x03},
- {98, 0x03},
- {110, 0x02},
- {104, 0x02},
- {98, 0x02},
- {110, 0x01},
- {104, 0x01},
- {98, 0x01},
- {110, 0x00},
- {104, 0x00},
- {98, 0x00},
- {93, 0x00},
- {88, 0x00},
- {83, 0x00},
- {78, 0x00},
- },
- /* 2.4GHz power gain index table */
- {
- {110, 0x3f}, /* highest txpower */
- {104, 0x3f},
- {98, 0x3f},
- {110, 0x3e},
- {104, 0x3e},
- {98, 0x3e},
- {110, 0x3d},
- {104, 0x3d},
- {98, 0x3d},
- {110, 0x3c},
- {104, 0x3c},
- {98, 0x3c},
- {110, 0x3b},
- {104, 0x3b},
- {98, 0x3b},
- {110, 0x3a},
- {104, 0x3a},
- {98, 0x3a},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x6},
- {104, 0x6},
- {98, 0x6},
- {110, 0x5},
- {104, 0x5},
- {98, 0x5},
- {110, 0x4},
- {104, 0x4},
- {98, 0x4},
- {110, 0x3},
- {104, 0x3},
- {98, 0x3},
- {110, 0x2},
- {104, 0x2},
- {98, 0x2},
- {110, 0x1},
- {104, 0x1},
- {98, 0x1},
- {110, 0x0},
- {104, 0x0},
- {98, 0x0},
- {97, 0},
- {96, 0},
- {95, 0},
- {94, 0},
- {93, 0},
- {92, 0},
- {91, 0},
- {90, 0},
- {89, 0},
- {88, 0},
- {87, 0},
- {86, 0},
- {85, 0},
- {84, 0},
- {83, 0},
- {82, 0},
- {81, 0},
- {80, 0},
- {79, 0},
- {78, 0},
- {77, 0},
- {76, 0},
- {75, 0},
- {74, 0},
- {73, 0},
- {72, 0},
- {71, 0},
- {70, 0},
- {69, 0},
- {68, 0},
- {67, 0},
- {66, 0},
- {65, 0},
- {64, 0},
- {63, 0},
- {62, 0},
- {61, 0},
- {60, 0},
- {59, 0},
- }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
- u8 is_ht40, u8 ctrl_chan_high,
- struct iwl4965_tx_power_db *tx_power_tbl)
-{
- u8 saturation_power;
- s32 target_power;
- s32 user_target_power;
- s32 power_limit;
- s32 current_temp;
- s32 reg_limit;
- s32 current_regulatory;
- s32 txatten_grp = CALIB_CH_GROUP_MAX;
- int i;
- int c;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl_eeprom_calib_ch_info ch_eeprom_info;
- const struct iwl_eeprom_calib_measure *measurement;
- s16 voltage;
- s32 init_voltage;
- s32 voltage_compensation;
- s32 degrees_per_05db_num;
- s32 degrees_per_05db_denom;
- s32 factory_temp;
- s32 temperature_comp[2];
- s32 factory_gain_index[2];
- s32 factory_actual_pwr[2];
- s32 power_index;
-
- /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
- * are used for indexing into txpower table) */
- user_target_power = 2 * priv->tx_power_user_lmt;
-
- /* Get current (RXON) channel, band, width */
- IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
- is_ht40);
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -EINVAL;
-
- /* get txatten group, used to select 1) thermal txpower adjustment
- * and 2) mimo txpower balance between Tx chains. */
- txatten_grp = iwl4965_get_tx_atten_grp(channel);
- if (txatten_grp < 0) {
- IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
- channel);
- return txatten_grp;
- }
-
- IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
- channel, txatten_grp);
-
- if (is_ht40) {
- if (ctrl_chan_high)
- channel -= 2;
- else
- channel += 2;
- }
-
- /* hardware txpower limits ...
- * saturation (clipping distortion) txpowers are in half-dBm */
- if (band)
- saturation_power = priv->calib_info->saturation_power24;
- else
- saturation_power = priv->calib_info->saturation_power52;
-
- if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
- saturation_power > IWL_TX_POWER_SATURATION_MAX) {
- if (band)
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
- else
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
- }
-
- /* regulatory txpower limits ... reg_limit values are in half-dBm,
- * max_power_avg values are in dBm, convert * 2 */
- if (is_ht40)
- reg_limit = ch_info->ht40_max_power_avg * 2;
- else
- reg_limit = ch_info->max_power_avg * 2;
-
- if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
- (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
- if (band)
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
- else
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
- }
-
- /* Interpolate txpower calibration values for this channel,
- * based on factory calibration tests on spaced channels. */
- iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
- /* calculate tx gain adjustment based on power supply voltage */
- voltage = le16_to_cpu(priv->calib_info->voltage);
- init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
- voltage_compensation =
- iwl4965_get_voltage_compensation(voltage, init_voltage);
-
- IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
- init_voltage,
- voltage, voltage_compensation);
-
- /* get current temperature (Celsius) */
- current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
- current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
- current_temp = KELVIN_TO_CELSIUS(current_temp);
-
- /* select thermal txpower adjustment params, based on channel group
- * (same frequency group used for mimo txatten adjustment) */
- degrees_per_05db_num =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
- degrees_per_05db_denom =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
- /* get per-chain txpower values from factory measurements */
- for (c = 0; c < 2; c++) {
- measurement = &ch_eeprom_info.measurements[c][1];
-
- /* txgain adjustment (in half-dB steps) based on difference
- * between factory and current temperature */
- factory_temp = measurement->temperature;
- iwl4965_math_div_round((current_temp - factory_temp) *
- degrees_per_05db_denom,
- degrees_per_05db_num,
- &temperature_comp[c]);
-
- factory_gain_index[c] = measurement->gain_idx;
- factory_actual_pwr[c] = measurement->actual_pow;
-
- IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
- IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
- "curr tmp %d, comp %d steps\n",
- factory_temp, current_temp,
- temperature_comp[c]);
-
- IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
- factory_gain_index[c],
- factory_actual_pwr[c]);
- }
-
- /* for each of 33 bit-rates (including 1 for CCK) */
- for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
- u8 is_mimo_rate;
- union iwl4965_tx_power_dual_stream tx_power;
-
- /* for mimo, reduce each chain's txpower by half
- * (3dB, 6 steps), so total output power is regulatory
- * compliant. */
- if (i & 0x8) {
- current_regulatory = reg_limit -
- IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
- is_mimo_rate = 1;
- } else {
- current_regulatory = reg_limit;
- is_mimo_rate = 0;
- }
-
- /* find txpower limit, either hardware or regulatory */
- power_limit = saturation_power - back_off_table[i];
- if (power_limit > current_regulatory)
- power_limit = current_regulatory;
-
- /* reduce user's txpower request if necessary
- * for this rate on this channel */
- target_power = user_target_power;
- if (target_power > power_limit)
- target_power = power_limit;
-
- IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
- i, saturation_power - back_off_table[i],
- current_regulatory, user_target_power,
- target_power);
-
- /* for each of 2 Tx chains (radio transmitters) */
- for (c = 0; c < 2; c++) {
- s32 atten_value;
-
- if (is_mimo_rate)
- atten_value =
- (s32)le32_to_cpu(priv->card_alive_init.
- tx_atten[txatten_grp][c]);
- else
- atten_value = 0;
-
- /* calculate index; higher index means lower txpower */
- power_index = (u8) (factory_gain_index[c] -
- (target_power -
- factory_actual_pwr[c]) -
- temperature_comp[c] -
- voltage_compensation +
- atten_value);
-
-/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
- power_index); */
-
- if (power_index < get_min_power_index(i, band))
- power_index = get_min_power_index(i, band);
-
- /* adjust 5 GHz index to support negative indexes */
- if (!band)
- power_index += 9;
-
- /* CCK, rate 32, reduce txpower for CCK */
- if (i == POWER_TABLE_CCK_ENTRY)
- power_index +=
- IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
- /* stay within the table! */
- if (power_index > 107) {
- IWL_WARN(priv, "txpower index %d > 107\n",
- power_index);
- power_index = 107;
- }
- if (power_index < 0) {
- IWL_WARN(priv, "txpower index %d < 0\n",
- power_index);
- power_index = 0;
- }
-
- /* fill txpower command for this rate/chain */
- tx_power.s.radio_tx_gain[c] =
- gain_table[band][power_index].radio;
- tx_power.s.dsp_predis_atten[c] =
- gain_table[band][power_index].dsp;
-
- IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
- "gain 0x%02x dsp %d\n",
- c, atten_value, power_index,
- tx_power.s.radio_tx_gain[c],
- tx_power.s.dsp_predis_atten[c]);
- } /* for each chain */
-
- tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
- } /* for each rate */
-
- return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl4965_txpowertable_cmd cmd = { 0 };
- int ret;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
-
- if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.channel = ctx->active.channel;
-
- ret = iwl4965_fill_txpower_tbl(priv, band,
- le16_to_cpu(ctx->active.channel),
- is_ht40, ctrl_chan_high, &cmd.tx_power);
- if (ret)
- goto out;
-
- ret = iwl_legacy_send_cmd_pdu(priv,
- REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
- return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int ret = 0;
- struct iwl4965_rxon_assoc_cmd rxon_assoc;
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- ctx->staging.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- ctx->staging.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
- ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
-
- return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
- int ret;
- bool new_assoc =
- !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (!iwl_legacy_is_alive(priv))
- return -EBUSY;
-
- if (!ctx->is_active)
- return 0;
-
- /* always get timestamp with Rx frame */
- ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
- ret = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /*
- * receive commit_rxon request
- * abort any previous channel switch if still in process
- */
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
- (priv->switch_channel != ctx->staging.channel)) {
- IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv, ctx)) {
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
- return ret;
- }
-
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_print_rx_config_cmd(priv, ctx);
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd),
- active_rxon);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (ret) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
- return ret;
- }
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(ctx->staging.channel),
- ctx->staging.bssid_addr);
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx,
- !priv->cfg->mod_params->sw_crypto);
-
- /* Apply the new configuration
- * RXON unassoc clears the station table in uCode so restoration of
- * stations is needed after it (the RXON command) completes
- */
- if (!new_assoc) {
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
- if (new_assoc) {
- priv->start_calib = 0;
- /* Apply the new configuration
- * RXON assoc doesn't clear the station table in uCode,
- */
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- }
- iwl_legacy_print_rx_config_cmd(priv, ctx);
-
- iwl4965_init_sensitivity(priv);
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (ret) {
- IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int rc;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl4965_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
- u32 switch_time_in_usec, ucode_switch_time;
- u16 ch;
- u32 tsf_low;
- u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
-
- if (is_ht40 &&
- (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.expect_beacon = 0;
- ch = ch_switch->channel->hw_value;
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
- switch_count = ch_switch->count;
- tsf_low = ch_switch->timestamp & 0x0ffffffff;
- /*
- * calculate the ucode channel switch time
- * adding TSF as one of the factor for when to switch
- */
- if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
- if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
- beacon_interval)) {
- switch_count -= (priv->ucode_beacon_time -
- tsf_low) / beacon_interval;
- } else
- switch_count = 0;
- }
- if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- else {
- switch_time_in_usec =
- vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
- switch_time_in_usec,
- beacon_interval);
- cmd.switch_time = iwl_legacy_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
- }
- IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
-
- rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
- ctrl_chan_high, &cmd.tx_power);
- if (rc) {
- IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
- return rc;
- }
-
- return iwl_legacy_send_cmd_pdu(priv,
- REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int write_ptr = txq->q.write_ptr;
- int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- bc_ent = cpu_to_le16(len & 0xFFF);
- /* Set up byte count within first 256 entries */
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- /* If within first 64 entries, duplicate at end */
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
- s32 temperature;
- s32 vt;
- s32 R1, R2, R3;
- u32 R4;
-
- if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
- IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
- } else {
- IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
- }
-
- /*
- * Temperature is only 23 bits, so sign extend out to 32.
- *
- * NOTE If we haven't received a statistics notification yet
- * with an updated temperature, use R4 provided to us in the
- * "initialize" ALIVE response.
- */
- if (!test_bit(STATUS_TEMPERATURE, &priv->status))
- vt = sign_extend32(R4, 23);
- else
- vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
- general.common.temperature), 23);
-
- IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
- if (R3 == R1) {
- IWL_ERR(priv, "Calibration conflict R1 == R3\n");
- return -1;
- }
-
- /* Calculate temperature in degrees Kelvin, adjust by 97%.
- * Add offset to center the adjustment around 0 degrees Centigrade. */
- temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
- temperature /= (R3 - R1);
- temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
- IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
- temperature, KELVIN_TO_CELSIUS(temperature));
-
- return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD 3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- if (!test_bit(STATUS_STATISTICS, &priv->status)) {
- IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
- return 0;
- }
-
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
- if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
- return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
- s32 temp;
-
- temp = iwl4965_hw_get_temperature(priv);
- if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
- return;
-
- if (priv->temperature != temp) {
- if (priv->temperature)
- IWL_DEBUG_TEMP(priv, "Temperature changed "
- "from %dC to %dC\n",
- KELVIN_TO_CELSIUS(priv->temperature),
- KELVIN_TO_CELSIUS(temp));
- else
- IWL_DEBUG_TEMP(priv, "Temperature "
- "initialized to %dC\n",
- KELVIN_TO_CELSIUS(temp));
- }
-
- priv->temperature = temp;
- set_bit(STATUS_TEMPERATURE, &priv->status);
-
- if (!priv->disable_tx_power_cal &&
- unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- iwl4965_is_temp_calib_needed(priv))
- queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return (u16) sizeof(struct iwl4965_rxon_cmd);
- default:
- return len;
- }
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cmd->tid_disable_tx;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
- addsta->sleep_tx_count = cmd->sleep_tx_count;
- addsta->reserved1 = cpu_to_le16(0);
- addsta->reserved2 = cpu_to_le16(0);
-
- return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
- return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl4965_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = tx_resp->u.agg_status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* num frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- /*
- * It may be possible that more commands interacting with stations
- * arrive before we completed processing the adding of
- * station
- */
- if (ret != IWL_INVALID_STATION &&
- (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
- ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
- (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
- IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
- ret);
- ret = IWL_INVALID_STATION;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl4965_find_station(priv, da);
- }
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *info;
- struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
- int sta_id;
- int freed;
- u8 *qc = NULL;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- memset(&info->status, 0, sizeof(info->status));
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- }
-
- sta_id = iwl4965_get_ra_sta_id(priv, hdr);
- if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
- IWL_ERR(priv, "Station not known\n");
- return;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
- WARN_ON(!qc);
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
- txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
- "%d index %d\n", scd_ssn , index);
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc)
- iwl4965_free_tfds_in_queue(priv, sta_id,
- tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
- && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
- }
- } else {
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
- "rate_n_flags 0x%x retries %d\n",
- txq_id,
- iwl4965_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
- else if (sta_id == IWL_INVALID_STATION)
- IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
- iwl_legacy_wake_queue(priv, txq);
- }
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
- u8 rate __maybe_unused =
- iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
- "tsf:0x%.8x%.8x rate:%d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
- /* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
- /* Tx response */
- priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
- .rxon_assoc = iwl4965_send_rxon_assoc,
- .commit_rxon = iwl4965_commit_rxon,
- .set_rxon_chain = iwl4965_set_rxon_chain,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
-
- if (!vif || !priv->is_open)
- return;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl_legacy_commit_rxon(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- vif->bss_conf.aid, ctx->active.bssid_addr);
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl4965_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, vif->type);
- break;
- }
-
- /* the chain noise calibration will enabled PM upon completion
- * If chain noise has already been run, then we need to enable
- * power management here */
- if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_legacy_power_update_mode(priv, false);
-
- /* Enable Rx differential gain and sensitivity calibrations */
- iwl4965_chain_noise_reset(priv);
- priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int ret = 0;
-
- lockdep_assert_held(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!iwl_legacy_is_associated_ctx(ctx)) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing failed - "
- "Attempting to continue.\n");
-
- /* AP has all antennas */
- priv->chain_noise_data.active_chains =
- priv->hw_params.valid_rx_ant;
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* need to send beacon cmd before committing assoc RXON! */
- iwl4965_send_beacon_cmd(priv);
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
- }
- iwl4965_send_beacon_cmd(priv);
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
- .get_hcmd_size = iwl4965_get_hcmd_size,
- .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
- .request_scan = iwl4965_request_scan,
- .post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
- .set_hw_params = iwl4965_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl4965_hw_txq_free_tfd,
- .txq_init = iwl4965_hw_tx_queue_init,
- .rx_handler_setup = iwl4965_rx_handler_setup,
- .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
- .init_alive_start = iwl4965_init_alive_start,
- .load_ucode = iwl4965_load_bsm,
- .dump_nic_error_log = iwl4965_dump_nic_error_log,
- .dump_fh = iwl4965_dump_fh,
- .set_channel_switch = iwl4965_hw_channel_switch,
- .apm_ops = {
- .init = iwl_legacy_apm_init,
- .config = iwl4965_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
- },
- .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
- .release_semaphore = iwl4965_eeprom_release_semaphore,
- },
- .send_tx_power = iwl4965_send_tx_power,
- .update_chain_flags = iwl4965_update_chain_flags,
- .temp_ops = {
- .temperature = iwl4965_temperature_calib,
- },
- .debugfs_ops = {
- .rx_stats_read = iwl4965_ucode_rx_stats_read,
- .tx_stats_read = iwl4965_ucode_tx_stats_read,
- .general_stats_read = iwl4965_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
- .post_associate = iwl4965_post_associate,
- .config_ap = iwl4965_config_ap,
- .manage_ibss_station = iwl4965_manage_ibss_station,
- .update_bcast_stations = iwl4965_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
- .tx = iwl4965_mac_tx,
- .start = iwl4965_mac_start,
- .stop = iwl4965_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl4965_configure_filter,
- .set_key = iwl4965_mac_set_key,
- .update_tkip_key = iwl4965_mac_update_tkip_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .ampdu_action = iwl4965_mac_ampdu_action,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl4965_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .channel_switch = iwl4965_mac_channel_switch,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
- .lib = &iwl4965_lib,
- .hcmd = &iwl4965_hcmd,
- .utils = &iwl4965_hcmd_utils,
- .led = &iwl4965_led_ops,
- .legacy = &iwl4965_legacy_ops,
- .ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
- .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
- .num_of_queues = IWL49_NUM_QUEUES,
- .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .pll_cfg_val = 0,
- .set_l0s = true,
- .use_bsm = true,
- .led_compensation = 61,
- .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .temperature_kelvin = true,
- .ucode_tracing = true,
- .sensitivity_calib_by_driver = true,
- .chain_noise_calib_by_driver = true,
-};
-
-struct iwl_cfg iwl4965_cfg = {
- .name = "Intel(R) Wireless WiFi Link 4965AGN",
- .fw_name_pre = IWL4965_FW_PRE,
- .ucode_api_max = IWL4965_UCODE_API_MAX,
- .ucode_api_min = IWL4965_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_ABC,
- .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
- .ops = &iwl4965_ops,
- .mod_params = &iwl4965_mod_params,
- .base_params = &iwl4965_base_params,
- .led_mode = IWL_LED_BLINK,
- /*
- * Force use of chains B and C for scan RX on 5 GHz band
- * because the device has off-channel reception on chain A.
- */
- .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163daf16..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_4965_h__
-#define __iwl_4965_h__
-
-#include "iwl-dev.h"
-
-/* configuration for the _4965 devices */
-extern struct iwl_cfg iwl4965_cfg;
-
-extern struct iwl_mod_params iwl4965_mod_params;
-
-extern struct ieee80211_ops iwl4965_hw_ops;
-
-/* tx queue */
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
-
-/* RXON */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/* uCode */
-int iwl4965_verify_ucode(struct iwl_priv *priv);
-
-/* lib */
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status);
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_hw_nic_init(struct iwl_priv *priv);
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-
-/* rx */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv);
-void iwl4965_rx_replenish(struct iwl_priv *priv);
-void iwl4965_rx_replenish_now(struct iwl_priv *priv);
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rxq_stop(struct iwl_priv *priv);
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_handle(struct iwl_priv *priv);
-
-/* tx */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid);
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id);
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
-
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE: Acquire priv->lock before calling this function !
- */
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-
-static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl4965_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-
-/* rx */
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/* scan */
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-
-/* station mgmt */
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-
-/* hcmd */
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status);
-#else
-static inline const char *
-iwl4965_get_tx_fail_reason(u32 status) { return ""; }
-#endif
-
-/* station management */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_add_bssid_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r);
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_set_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
- int sta_id, int tid);
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn);
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid);
-void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
- int sta_id, int cnt);
-int iwl4965_update_bcast_stations(struct iwl_priv *priv);
-
-/* rate */
-static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
-{
- return BIT(ant_idx) << RATE_MCS_ANT_POS;
-}
-
-static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
-{
- return le32_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
-{
- return cpu_to_le32(flags|(u32)rate);
-}
-
-/* eeprom */
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
-int iwl4965_eeprom_check_version(struct iwl_priv *priv);
-
-/* mac80211 handlers (for 4965) */
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwl4965_mac_start(struct ieee80211_hw *hw);
-void iwl4965_mac_stop(struct ieee80211_hw *hw);
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast);
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key);
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
-
-#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659310d7..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
-
-
-MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- * Able to scan and finding all the available AP
- * Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-static bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
-
-u32 iwlegacy_debug_level;
-EXPORT_SYMBOL(iwlegacy_debug_level);
-
-const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwlegacy_bcast_addr);
-
-
-/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
-
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
- cfg->ops->ieee80211_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
-
- priv = hw->priv;
- priv->hw = hw;
-
-out:
- return hw;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_all);
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
-{
- u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
- ht_info->cap = 0;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- ht_info->ht_supported = true;
-
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- ht_info->mcs.rx_mask[4] = 0x01;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains_num >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains_num >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains_num;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains_num != rx_chains_num) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-/**
- * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_legacy_init_geos(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *channels;
- struct ieee80211_channel *geo_ch;
- struct ieee80211_rate *rates;
- int i = 0;
- s8 max_tx_power = 0;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
- IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
- return 0;
- }
-
- channels = kzalloc(sizeof(struct ieee80211_channel) *
- priv->channel_count, GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
- GFP_KERNEL);
- if (!rates) {
- kfree(channels);
- return -ENOMEM;
- }
-
- /* 5.2GHz channels start after the 2.4GHz channels */
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
- /* just OFDM */
- sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_5GHZ);
-
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
- sband->channels = channels;
- /* OFDM & CCK */
- sband->bitrates = rates;
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_2GHZ);
-
- priv->ieee_channels = channels;
- priv->ieee_rates = rates;
-
- for (i = 0; i < priv->channel_count; i++) {
- ch = &priv->channel_info[i];
-
- if (!iwl_legacy_is_channel_valid(ch))
- continue;
-
- sband = &priv->bands[ch->band];
-
- geo_ch = &sband->channels[sband->n_channels++];
-
- geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel, ch->band);
- geo_ch->max_power = ch->max_power_avg;
- geo_ch->max_antenna_gain = 0xff;
- geo_ch->hw_value = ch->channel;
-
- if (iwl_legacy_is_channel_valid(ch)) {
- if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
- if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
- if (ch->flags & EEPROM_CHANNEL_RADAR)
- geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
- geo_ch->flags |= ch->ht40_extension_channel;
-
- if (ch->max_power_avg > max_tx_power)
- max_tx_power = ch->max_power_avg;
- } else {
- geo_ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
- ch->channel, geo_ch->center_freq,
- iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
- geo_ch->flags & IEEE80211_CHAN_DISABLED ?
- "restricted" : "valid",
- geo_ch->flags);
- }
-
- priv->tx_power_device_lmt = max_tx_power;
- priv->tx_power_user_lmt = max_tx_power;
- priv->tx_power_next = max_tx_power;
-
- if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & IWL_SKU_A) {
- IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
- priv->pci_dev->device,
- priv->pci_dev->subsystem_device);
- priv->cfg->sku &= ~IWL_SKU_A;
- }
-
- IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- priv->bands[IEEE80211_BAND_2GHZ].n_channels,
- priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_geos);
-
-/*
- * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
- */
-void iwl_legacy_free_geos(struct iwl_priv *priv)
-{
- kfree(priv->ieee_channels);
- kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_free_geos);
-
-static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
- enum ieee80211_band band,
- u16 channel, u8 extension_chan_offset)
-{
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info))
- return false;
-
- if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40PLUS);
- else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40MINUS);
-
- return false;
-}
-
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
-{
- if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
- return false;
-
- /*
- * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
- * the bit will not set if it is pure 40MHz case
- */
- if (ht_cap && !ht_cap->ht_supported)
- return false;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- if (priv->disable_ht40)
- return false;
-#endif
-
- return iwl_legacy_is_channel_extension(priv, priv->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
-}
-EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
-
-static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
- u16 new_val;
- u16 beacon_factor;
-
- /*
- * If mac80211 hasn't given us a beacon interval, program
- * the default into the device.
- */
- if (!beacon_val)
- return DEFAULT_BEACON_INTERVAL;
-
- /*
- * If the beacon interval we obtained from the peer
- * is too large, we'll have to wake up more often
- * (and in IBSS case, we'll beacon too much)
- *
- * For example, if max_beacon_val is 4096, and the
- * requested beacon interval is 7000, we'll have to
- * use 3500 to be able to wake up on the beacons.
- *
- * This could badly influence beacon detection stats.
- */
-
- beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
- new_val = beacon_val / beacon_factor;
-
- if (!new_val)
- new_val = max_beacon_val;
-
- return new_val;
-}
-
-int
-iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- u64 tsf;
- s32 interval_tm, rem;
- struct ieee80211_conf *conf = NULL;
- u16 beacon_int;
- struct ieee80211_vif *vif = ctx->vif;
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- lockdep_assert_held(&priv->mutex);
-
- memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
- ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
- ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
- beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
- /*
- * TODO: For IBSS we need to get atim_window from mac80211,
- * for now just always use 0
- */
- ctx->timing.atim_window = 0;
-
- beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * TIME_UNIT);
- ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-
- tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
- interval_tm = beacon_int * TIME_UNIT;
- rem = do_div(tsf, interval_tm);
- ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
- ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
- IWL_DEBUG_ASSOC(priv,
- "beacon interval %d beacon timer %d beacon tim %d\n",
- le16_to_cpu(ctx->timing.beacon_interval),
- le32_to_cpu(ctx->timing.beacon_init_val),
- le16_to_cpu(ctx->timing.atim_window));
-
- return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- sizeof(ctx->timing), &ctx->timing);
-}
-EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
-
-void
-iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (hw_decrypt)
- rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
- else
- rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
-
-/* validate RXON structure is valid */
-int
-iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
- bool error = false;
-
- if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
- if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong narrow\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong radar\n");
- error = true;
- }
- } else {
- if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "check 5.2G: not short slot!\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_CCK_MSK) {
- IWL_WARN(priv, "check 5.2G: CCK!\n");
- error = true;
- }
- }
- if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
- IWL_WARN(priv, "mac/bssid mcast!\n");
- error = true;
- }
-
- /* make sure basic rates 6Mbps and 1Mbps are supported */
- if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
- (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
- IWL_WARN(priv, "neither 1 nor 6 are basic\n");
- error = true;
- }
-
- if (le16_to_cpu(rxon->assoc_id) > 2007) {
- IWL_WARN(priv, "aid > 2007\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "CCK and short slot\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
- IWL_WARN(priv, "CCK and auto detect");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
- RXON_FLG_TGG_PROTECT_MSK)) ==
- RXON_FLG_TGG_PROTECT_MSK) {
- IWL_WARN(priv, "TGg but no auto-detect\n");
- error = true;
- }
-
- if (error)
- IWL_WARN(priv, "Tuning to channel %d\n",
- le16_to_cpu(rxon->channel));
-
- if (error) {
- IWL_ERR(priv, "Invalid RXON\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
-
-/**
- * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond) \
- if ((cond)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
- return 1; \
- }
-
-#define CHK_NEQ(c1, c2) \
- if ((c1) != (c2)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " \
- #c1 " != " #c2 " - %d != %d\n", \
- (c1), (c2)); \
- return 1; \
- }
-
- /* These items are only settable from the full RXON command */
- CHK(!iwl_legacy_is_associated_ctx(ctx));
- CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
- CHK(compare_ether_addr(staging->node_addr, active->node_addr));
- CHK(compare_ether_addr(staging->wlap_bssid_addr,
- active->wlap_bssid_addr));
- CHK_NEQ(staging->dev_type, active->dev_type);
- CHK_NEQ(staging->channel, active->channel);
- CHK_NEQ(staging->air_propagation, active->air_propagation);
- CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
- active->ofdm_ht_single_stream_basic_rates);
- CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
- active->ofdm_ht_dual_stream_basic_rates);
- CHK_NEQ(staging->assoc_id, active->assoc_id);
-
- /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
- * be updated with the RXON_ASSOC command -- however only some
- * flag transitions are allowed using RXON_ASSOC */
-
- /* Check if we are not switching bands */
- CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
- active->flags & RXON_FLG_BAND_24G_MSK);
-
- /* Check if we are switching association toggle */
- CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
- active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- /*
- * Assign the lowest rate -- should really get this from
- * the beacon skb from mac80211.
- */
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
- return IWL_RATE_1M_PLCP;
- else
- return IWL_RATE_6M_PLCP;
-}
-EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
-
-static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (!ctx->ht.enabled) {
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
- RXON_FLG_HT40_PROT_MSK |
- RXON_FLG_HT_PROT_MSK);
- return;
- }
-
- rxon->flags |= cpu_to_le32(ctx->ht.protection <<
- RXON_FLG_HT_OPERATING_MODE_POS);
-
- /* Set up channel bandwidth:
- * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
- /* clear the HT channel mode before set the mode */
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
- /* pure ht40 */
- if (ctx->ht.protection ==
- IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- }
- } else {
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- default:
- /* channel location only valid if in Mixed mode */
- IWL_ERR(priv,
- "invalid extension channel offset\n");
- break;
- }
- }
- } else {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
- }
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
- "extension channel offset 0x%x\n",
- le32_to_cpu(rxon->flags), ctx->ht.protection,
- ctx->ht.extension_chan_offset);
-}
-
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
- struct iwl_rxon_context *ctx;
-
- for_each_context(priv, ctx)
- _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
-{
- const struct iwl_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
- struct iwl_rxon_context *ctx;
-
- if (band == IEEE80211_BAND_5GHZ) {
- min = 14;
- max = priv->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
- bool busy = false;
-
- for_each_context(priv, ctx) {
- busy = priv->channel_info[i].channel ==
- le16_to_cpu(ctx->staging.channel);
- if (busy)
- break;
- }
-
- if (busy)
- continue;
-
- channel = priv->channel_info[i].channel;
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (iwl_legacy_is_channel_valid(ch_info))
- break;
- }
-
- return channel;
-}
-EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
-
-/**
- * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE: Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-int
-iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx)
-{
- enum ieee80211_band band = ch->band;
- u16 channel = ch->hw_value;
-
- if ((le16_to_cpu(ctx->staging.channel) == channel) &&
- (priv->band == band))
- return 0;
-
- ctx->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
- ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
- else
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
- priv->band = band;
-
- IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
-
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- if (band == IEEE80211_BAND_5GHZ) {
- ctx->staging.flags &=
- ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
- | RXON_FLG_CCK_MSK);
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- } else {
- /* Copied from iwl_post_associate() */
- if (vif && vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
- ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
- ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_channel_info *ch_info;
-
- memset(&ctx->staging, 0, sizeof(ctx->staging));
-
- if (!ctx->vif) {
- ctx->staging.dev_type = ctx->unused_devtype;
- } else
- switch (ctx->vif->type) {
-
- case NL80211_IFTYPE_STATION:
- ctx->staging.dev_type = ctx->station_devtype;
- ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- case NL80211_IFTYPE_ADHOC:
- ctx->staging.dev_type = ctx->ibss_devtype;
- ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
- ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
- RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- default:
- IWL_ERR(priv, "Unsupported interface type %d\n",
- ctx->vif->type);
- break;
- }
-
-#if 0
- /* TODO: Figure out when short_preamble would be set and cache from
- * that */
- if (!hw_to_local(priv->hw)->short_preamble)
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band,
- le16_to_cpu(ctx->active.channel));
-
- if (!ch_info)
- ch_info = &priv->channel_info[0];
-
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
- priv->band = ch_info->band;
-
- iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- ctx->staging.cck_basic_rates =
- (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- /* clear both MIX and PURE40 mode flag */
- ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
- RXON_FLG_CHANNEL_MODE_PURE_40);
- if (ctx->vif)
- memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
- ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
- ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-}
-EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
-
-void iwl_legacy_set_rate(struct iwl_priv *priv)
-{
- const struct ieee80211_supported_band *hw = NULL;
- struct ieee80211_rate *rate;
- struct iwl_rxon_context *ctx;
- int i;
-
- hw = iwl_get_hw_mode(priv, priv->band);
- if (!hw) {
- IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
- return;
- }
-
- priv->active_rate = 0;
-
- for (i = 0; i < hw->n_bitrates; i++) {
- rate = &(hw->bitrates[i]);
- if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
- priv->active_rate |= (1 << rate->hw_value);
- }
-
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
- for_each_context(priv, ctx) {
- ctx->staging.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_rate);
-
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- ieee80211_chswitch_done(ctx->vif, is_success);
-}
-EXPORT_SYMBOL(iwl_legacy_chswitch_done);
-
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
-
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return;
-
- if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_rx_csa);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
- iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
- IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
- le16_to_cpu(rxon->channel));
- IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
- IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
- le32_to_cpu(rxon->filter_flags));
- IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
- IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
- rxon->ofdm_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
- rxon->cck_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
- IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
- IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
- le16_to_cpu(rxon->assoc_id));
-}
-EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
-#endif
-/**
- * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
-{
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
-
- /* Cancel currently queued command. */
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
- IWL_ERR(priv, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
-
- priv->cfg->ops->lib->dump_nic_error_log(priv);
- if (priv->cfg->ops->lib->dump_fh)
- priv->cfg->ops->lib->dump_fh(priv, NULL, false);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
- iwl_legacy_print_rx_config_cmd(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
- wake_up(&priv->wait_command_queue);
-
- /* Keep the restart process from trying to send host
- * commands by clearing the INIT status bit */
- clear_bit(STATUS_READY, &priv->status);
-
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
- "Restarting adapter due to uCode error.\n");
-
- if (priv->cfg->mod_params->restart_fw)
- queue_work(priv->workqueue, &priv->restart);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
-
-static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* stop device's busmaster DMA activity */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
- ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- if (ret)
- IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
- IWL_DEBUG_INFO(priv, "stop master\n");
-
- return ret;
-}
-
-void iwl_legacy_apm_stop(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
- /* Stop device's DMA activity */
- iwl_legacy_apm_stop_master(priv);
-
- /* Reset the entire device */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-EXPORT_SYMBOL(iwl_legacy_apm_stop);
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-int iwl_legacy_apm_init(struct iwl_priv *priv)
-{
- int ret = 0;
- u16 lctl;
-
- IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
- CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- * NOTE: This is no-op for 3945 (non-existent bit)
- */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
- /*
- * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
- */
- if (priv->cfg->base_params->set_l0s) {
- lctl = iwl_legacy_pcie_link_ctl(priv);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
- /* L1-ASPM enabled; disable(!) L0S */
- iwl_legacy_set_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
- } else {
- /* L1-ASPM disabled; enable(!) L0S */
- iwl_legacy_clear_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
- }
- }
-
- /* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
- iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_legacy_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(priv, "Failed to init the card\n");
- goto out;
- }
-
- /*
- * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
- * BSM (Boostrap State Machine) is only in 3945 and 4965.
- *
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
- */
- if (priv->cfg->base_params->use_bsm)
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
- else
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
-
- /* Disable L1-Active */
- iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_apm_init);
-
-
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
- int ret;
- s8 prev_tx_power;
- bool defer;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->tx_power_user_lmt == tx_power && !force)
- return 0;
-
- if (!priv->cfg->ops->lib->send_tx_power)
- return -EOPNOTSUPP;
-
- /* 0 dBm mean 1 milliwatt */
- if (tx_power < 0) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d below 1 mW.\n",
- tx_power);
- return -EINVAL;
- }
-
- if (tx_power > priv->tx_power_device_lmt) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->tx_power_device_lmt);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete and commit_rxon use tx_power_next value,
- * it always need to be updated for newest request */
- priv->tx_power_next = tx_power;
-
- /* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->status) ||
- memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
- if (defer && !force) {
- IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
- return 0;
- }
-
- prev_tx_power = priv->tx_power_user_lmt;
- priv->tx_power_user_lmt = tx_power;
-
- ret = priv->cfg->ops->lib->send_tx_power(priv);
-
- /* if fail to set tx_power, restore the orig. tx power */
- if (ret) {
- priv->tx_power_user_lmt = prev_tx_power;
- priv->tx_power_next = prev_tx_power;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_set_tx_power);
-
-void iwl_legacy_send_bt_config(struct iwl_priv *priv)
-{
- struct iwl_bt_cmd bt_cmd = {
- .lead_time = BT_LEAD_TIME_DEF,
- .max_kill = BT_MAX_KILL_DEF,
- .kill_ack_mask = 0,
- .kill_cts_mask = 0,
- };
-
- if (!bt_coex_active)
- bt_cmd.flags = BT_COEX_DISABLE;
- else
- bt_cmd.flags = BT_COEX_ENABLE;
-
- IWL_DEBUG_INFO(priv, "BT coex %s\n",
- (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd))
- IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-EXPORT_SYMBOL(iwl_legacy_send_bt_config);
-
-int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
- struct iwl_statistics_cmd statistics_cmd = {
- .configuration_flags =
- clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
- };
-
- if (flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd, NULL);
- else
- return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
-
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
-
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n", len,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
-
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
-{
- memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
-
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
-
-static int
-iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_legacy_connection_init_rx_config(priv, ctx);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- return iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static int iwl_legacy_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_legacy_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- return 0;
-}
-
-int
-iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(vif->type)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_legacy_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
-
-static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_legacy_scan_cancel_timeout(priv, 200);
- iwl_legacy_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_legacy_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-}
-
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- WARN_ON(ctx->vif != vif);
- ctx->vif = NULL;
-
- iwl_legacy_teardown_interface(priv, vif, false);
-
- memset(priv->bssid, 0, ETH_ALEN);
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
-
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
-{
- if (!priv->txq)
- priv->txq = kzalloc(
- sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues,
- GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
-
-void iwl_legacy_txq_mem(struct iwl_priv *priv)
-{
- kfree(priv->txq);
- priv->txq = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_mem);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
- priv->tx_traffic_idx = 0;
- priv->rx_traffic_idx = 0;
- if (priv->tx_traffic)
- memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
- if (priv->rx_traffic)
- memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
- if (iwlegacy_debug_level & IWL_DL_TX) {
- if (!priv->tx_traffic) {
- priv->tx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->tx_traffic)
- return -ENOMEM;
- }
- }
- if (iwlegacy_debug_level & IWL_DL_RX) {
- if (!priv->rx_traffic) {
- priv->rx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->rx_traffic)
- return -ENOMEM;
- }
- }
- iwl_legacy_reset_traffic_log(priv);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
-
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
- kfree(priv->tx_traffic);
- priv->tx_traffic = NULL;
-
- kfree(priv->rx_traffic);
- priv->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
-
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
- return;
-
- if (!priv->tx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->tx_traffic +
- (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->tx_traffic_idx =
- (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
-
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
- return;
-
- if (!priv->rx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->rx_traffic +
- (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->rx_traffic_idx =
- (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
-
-const char *iwl_legacy_get_mgmt_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(MANAGEMENT_ASSOC_REQ);
- IWL_CMD(MANAGEMENT_ASSOC_RESP);
- IWL_CMD(MANAGEMENT_REASSOC_REQ);
- IWL_CMD(MANAGEMENT_REASSOC_RESP);
- IWL_CMD(MANAGEMENT_PROBE_REQ);
- IWL_CMD(MANAGEMENT_PROBE_RESP);
- IWL_CMD(MANAGEMENT_BEACON);
- IWL_CMD(MANAGEMENT_ATIM);
- IWL_CMD(MANAGEMENT_DISASSOC);
- IWL_CMD(MANAGEMENT_AUTH);
- IWL_CMD(MANAGEMENT_DEAUTH);
- IWL_CMD(MANAGEMENT_ACTION);
- default:
- return "UNKNOWN";
-
- }
-}
-
-const char *iwl_legacy_get_ctrl_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CONTROL_BACK_REQ);
- IWL_CMD(CONTROL_BACK);
- IWL_CMD(CONTROL_PSPOLL);
- IWL_CMD(CONTROL_RTS);
- IWL_CMD(CONTROL_CTS);
- IWL_CMD(CONTROL_ACK);
- IWL_CMD(CONTROL_CFEND);
- IWL_CMD(CONTROL_CFENDACK);
- default:
- return "UNKNOWN";
-
- }
-}
-
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
-{
- memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
- memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
- * iwl_legacy_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_mgmt(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
- stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
- stats->mgmt[MANAGEMENT_PROBE_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
- stats->mgmt[MANAGEMENT_PROBE_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BEACON):
- stats->mgmt[MANAGEMENT_BEACON]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ATIM):
- stats->mgmt[MANAGEMENT_ATIM]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
- stats->mgmt[MANAGEMENT_DISASSOC]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- stats->mgmt[MANAGEMENT_AUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- stats->mgmt[MANAGEMENT_DEAUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACTION):
- stats->mgmt[MANAGEMENT_ACTION]++;
- break;
- }
- } else if (ieee80211_is_ctl(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
- stats->ctrl[CONTROL_BACK_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BACK):
- stats->ctrl[CONTROL_BACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
- stats->ctrl[CONTROL_PSPOLL]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_RTS):
- stats->ctrl[CONTROL_RTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CTS):
- stats->ctrl[CONTROL_CTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACK):
- stats->ctrl[CONTROL_ACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFEND):
- stats->ctrl[CONTROL_CFEND]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
- stats->ctrl[CONTROL_CFENDACK]++;
- break;
- }
- } else {
- /* data */
- stats->data_cnt++;
- stats->data_bytes += len;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_update_stats);
-#endif
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
-{
- struct iwl_force_reset *force_reset;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- force_reset = &priv->force_reset;
- force_reset->reset_request_count++;
- if (!external) {
- if (force_reset->last_force_reset_jiffies &&
- time_after(force_reset->last_force_reset_jiffies +
- force_reset->reset_duration, jiffies)) {
- IWL_DEBUG_INFO(priv, "force reset rejected\n");
- force_reset->reset_reject_count++;
- return -EAGAIN;
- }
- }
- force_reset->reset_success_count++;
- force_reset->last_force_reset_jiffies = jiffies;
-
- /*
- * if the request is from external(ex: debugfs),
- * then always perform the request in regardless the module
- * parameter setting
- * if the request is from internal (uCode error or driver
- * detect failure), then fw_restart module parameter
- * need to be check before performing firmware reload
- */
-
- if (!external && !priv->cfg->mod_params->restart_fw) {
- IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
- "module parameter setting\n");
- return 0;
- }
-
- IWL_ERR(priv, "On demand firmware reload\n");
-
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up(&priv->wait_command_queue);
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the INIT status bit
- */
- clear_bit(STATUS_READY, &priv->status);
- queue_work(priv->workqueue, &priv->restart);
-
- return 0;
-}
-
-int
-iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *tmp;
- u32 interface_modes;
- int err;
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->mutex);
-
- if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_legacy_teardown_interface(priv, vif, true);
- vif->type = newtype;
- vif->p2p = newp2p;
- err = iwl_legacy_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->mutex);
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
-
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
-{
- struct iwl_tx_queue *txq = &priv->txq[cnt];
- struct iwl_queue *q = &txq->q;
- unsigned long timeout;
- int ret;
-
- if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
- return 0;
- }
-
- timeout = txq->time_stamp +
- msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
-
- if (time_after(jiffies, timeout)) {
- IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
- q->id, priv->cfg->base_params->wd_timeout);
- ret = iwl_legacy_force_reset(priv, false);
- return (ret == -EAGAIN) ? 0 : 1;
- }
-
- return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_legacy_bg_watchdog(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
- int cnt;
- unsigned long timeout;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- timeout = priv->cfg->base_params->wd_timeout;
- if (timeout == 0)
- return;
-
- /* monitor and check for stuck cmd queue */
- if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
- return;
-
- /* monitor and check for other stuck queues */
- if (iwl_legacy_is_any_associated(priv)) {
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == priv->cmd_queue)
- continue;
- if (iwl_legacy_check_stuck_queue(priv, cnt))
- return;
- }
- }
-
- mod_timer(&priv->watchdog, jiffies +
- msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
-
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
-{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
- if (timeout)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32
-iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * TIME_UNIT;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) &
- (iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits) >>
- priv->hw_params.beacon_time_tsf_bits);
- rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
-
- return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
-}
-EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits)) +
- (addon & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits));
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
- } else
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
-
- return cpu_to_le32(res);
-}
-EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
-
-#ifdef CONFIG_PM
-
-int iwl_legacy_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
- * first but since iwl_mac_stop() has no knowledge of who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- */
- iwl_legacy_apm_stop(priv);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_suspend);
-
-int iwl_legacy_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- bool hw_rfkill = false;
-
- /*
- * We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl_legacy_enable_interrupts(priv);
-
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
-
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_resume);
-
-const struct dev_pm_ops iwl_legacy_pm_ops = {
- .suspend = iwl_legacy_pci_suspend,
- .resume = iwl_legacy_pci_resume,
- .freeze = iwl_legacy_pci_suspend,
- .thaw = iwl_legacy_pci_resume,
- .poweroff = iwl_legacy_pci_suspend,
- .restore = iwl_legacy_pci_resume,
-};
-EXPORT_SYMBOL(iwl_legacy_pm_ops);
-
-#endif /* CONFIG_PM */
-
-static void
-iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
-
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active,
- ctx->qos_data.def_qos_parm.qos_flags);
-
- iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
- sizeof(struct iwl_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = conf->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct iwl_rxon_context *ctx;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
- bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- channel->hw_value, changed);
-
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "scan active\n");
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_SMPS |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- /* mac80211 uses static for non-HT which is what we want */
- priv->current_ht_config.smps = conf->smps_mode;
-
- /*
- * Recalculate chain counts.
- *
- * If monitor mode is enabled then mac80211 will
- * set up the SM PS mode to OFF if an HT channel is
- * configured.
- */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = channel->hw_value;
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !iwl_legacy_is_channel_ibss(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- /* Configure HT40 channels */
- if (ctx->ht.enabled != conf_is_ht(conf)) {
- ctx->ht.enabled = conf_is_ht(conf);
- ht_changed[ctx->ctxid] = true;
- }
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- /*
- * Default to no protection. Protection mode will
- * later be set from BSS config in iwl_ht_conf
- */
- ctx->ht.protection =
- IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
-
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->cfg->ops->legacy->update_bcast_stations)
- ret =
- priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_legacy_set_rate(priv);
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_IDLE)) {
- ret = iwl_legacy_power_update_mode(priv, false);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_legacy_set_tx_power(priv, conf->power_level, false);
- }
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- for_each_context(priv, ctx) {
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
- else
- IWL_DEBUG_INFO(priv,
- "Not re-sending same RXON configuration.\n");
- if (ht_changed[ctx->ctxid])
- iwl_legacy_update_qos(priv, ctx);
- }
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- /* IBSS can only be the IWL_RXON_CTX_BSS context */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* new association get rid of ibss beacon skb */
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = NULL;
-
- priv->timestamp = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_legacy_scan_cancel_timeout(priv, 100);
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- iwl_legacy_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_legacy_ht_conf(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_ASSOC(priv, "enter:\n");
-
- if (!ctx->ht.enabled)
- return;
-
- ctx->ht.protection =
- bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
- !!(bss_conf->ht_operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- ht_conf->single_chain_sufficient = false;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- ht_conf->single_chain_sufficient = true;
- if (maxstreams <= 1)
- ht_conf->single_chain_sufficient = true;
- } else {
- /*
- * If at all, this can only happen through a race
- * when the AP disconnects us while we're still
- * setting up the connection, in that case mac80211
- * will soon tell us about that.
- */
- ht_conf->single_chain_sufficient = true;
- }
- rcu_read_unlock();
- break;
- case NL80211_IFTYPE_ADHOC:
- ht_conf->single_chain_sufficient = true;
- break;
- default:
- break;
- }
-
- IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * sent
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
- if (!skb)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "update beacon but no beacon context!\n");
- dev_kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = skb;
-
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return;
- }
-
- priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- int ret;
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_alive(priv)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (changes & BSS_CHANGED_QOS) {
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- iwl_legacy_update_qos(priv, ctx);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
- if (vif->bss_conf.enable_beacon)
- priv->beacon_ctx = ctx;
- else
- priv->beacon_ctx = NULL;
- }
-
- if (changes & BSS_CHANGED_BSSID) {
- IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
- /*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
- */
- if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv,
- "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv,
- "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* currently needed in a few places */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- }
-
- }
-
- /*
- * This needs to be after setting the BSSID in case
- * mac80211 decides to do both changes at once because
- * it will invoke post_associate.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
- iwl_legacy_beacon_update(hw, vif);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv,
- "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- (priv->band != IEEE80211_BAND_5GHZ))
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
- }
-
- if (changes & BSS_CHANGED_BASIC_RATES) {
- /* XXX use this information
- *
- * To do that, remove code from iwl_legacy_set_rate() and put something
- * like this here:
- *
- if (A-band)
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates;
- else
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
- bss_conf->basic_rates & 0xF;
- */
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_legacy_ht_conf(priv, vif);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
-
- if (!iwl_legacy_is_rfkill(priv))
- priv->cfg->ops->legacy->post_associate(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
- IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
- changes);
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (!ret) {
- /* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active,
- &ctx->staging,
- sizeof(struct iwl_legacy_rxon_cmd));
- }
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- priv->cfg->ops->legacy->config_ap(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes & BSS_CHANGED_IBSS) {
- ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
- if (ret)
- IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
- bss_conf->bssid);
- }
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 inta_fh;
- unsigned long flags;
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta && !inta_fh) {
- IWL_DEBUG_ISR(priv,
- "Ignore interrupt, inta == 0, inta_fh == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
-
- inta &= ~CSR_INT_BIT_SCD;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta || inta_fh))
- tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_legacy_isr);
-
-/*
- * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- * function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- *tx_flags |= TX_CMD_FLG_RTS_MSK;
- *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- if (!ieee80211_is_mgmt(fc))
- return;
-
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- break;
- }
- } else if (info->control.rates[0].flags &
- IEEE80211_TX_RC_USE_CTS_PROTECT) {
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe07d4b..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_core_h__
-#define __iwl_legacy_core_h__
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
- .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
- .driver_data = (kernel_ulong_t)&(cfg)
-
-#define TIME_UNIT 1024
-
-#define IWL_SKU_G 0x1
-#define IWL_SKU_A 0x2
-#define IWL_SKU_N 0x8
-
-#define IWL_CMD(x) case x: return #x
-
-struct iwl_hcmd_ops {
- int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- void (*set_rxon_chain)(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-};
-
-struct iwl_hcmd_utils_ops {
- u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
- u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data);
- int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
- void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
- int (*init)(struct iwl_priv *priv);
- void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_debugfs_ops {
- ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-};
-
-struct iwl_temp_ops {
- void (*temperature)(struct iwl_priv *priv);
-};
-
-struct iwl_lib_ops {
- /* set hw dependent parameters */
- int (*set_hw_params)(struct iwl_priv *priv);
- /* Handling TX */
- void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
- int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr,
- u16 len, u8 reset, u8 pad);
- void (*txq_free_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- int (*txq_init)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- /* setup Rx handler */
- void (*rx_handler_setup)(struct iwl_priv *priv);
- /* alive notification after init uCode load */
- void (*init_alive_start)(struct iwl_priv *priv);
- /* check validity of rtc data address */
- int (*is_valid_rtc_data_addr)(u32 addr);
- /* 1st ucode load */
- int (*load_ucode)(struct iwl_priv *priv);
-
- void (*dump_nic_error_log)(struct iwl_priv *priv);
- int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
- int (*set_channel_switch)(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch);
- /* power management */
- struct iwl_apm_ops apm_ops;
-
- /* power */
- int (*send_tx_power) (struct iwl_priv *priv);
- void (*update_chain_flags)(struct iwl_priv *priv);
-
- /* eeprom operations (as defined in iwl-eeprom.h) */
- struct iwl_eeprom_ops eeprom_ops;
-
- /* temperature */
- struct iwl_temp_ops temp_ops;
-
- struct iwl_debugfs_ops debugfs_ops;
-
-};
-
-struct iwl_led_ops {
- int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-};
-
-struct iwl_legacy_ops {
- void (*post_associate)(struct iwl_priv *priv);
- void (*config_ap)(struct iwl_priv *priv);
- /* station management */
- int (*update_bcast_stations)(struct iwl_priv *priv);
- int (*manage_ibss_station)(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-};
-
-struct iwl_ops {
- const struct iwl_lib_ops *lib;
- const struct iwl_hcmd_ops *hcmd;
- const struct iwl_hcmd_utils_ops *utils;
- const struct iwl_led_ops *led;
- const struct iwl_nic_ops *nic;
- const struct iwl_legacy_ops *legacy;
- const struct ieee80211_ops *ieee80211_ops;
-};
-
-struct iwl_mod_params {
- int sw_crypto; /* def: 0 = using hardware encryption */
- int disable_hw_scan; /* def: 0 = use h/w scan */
- int num_of_queues; /* def: HW dependent */
- int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
- int antenna; /* def: 0 = both antennas (use diversity) */
- int restart_fw; /* def: 1 = restart firmware */
-};
-
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- * sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- * chain noise calibration operation
- */
-struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues;/* def: HW dependent */
- /* for iwl_legacy_apm_init() */
- u32 pll_cfg_val;
- bool set_l0s;
- bool use_bsm;
-
- u16 led_compensation;
- int chain_noise_num_beacons;
- unsigned int wd_timeout;
- bool temperature_kelvin;
- const bool ucode_tracing;
- const bool sensitivity_calib_by_driver;
- const bool chain_noise_calib_by_driver;
-};
-
-/**
- * struct iwl_cfg
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- * Driver interacts with Firmware API version >= 2.
- * } else {
- * Driver interacts with Firmware API version 1.
- * }
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions as well as their API
- * versions.
- *
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- unsigned int sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_ops *ops;
- /* module based parameters which can be set from modprobe cmd */
- const struct iwl_mod_params *mod_params;
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
-};
-
-/***************************
- * L i b *
- ***************************/
-
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt);
-int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
- struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band);
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf);
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_rate(struct iwl_priv *priv);
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats);
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
-int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_legacy_txq_mem(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-const char *iwl_legacy_get_mgmt_string(int cmd);
-const char *iwl_legacy_get_ctrl_string(int cmd);
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
- u16 len);
-#else
-static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- return 0;
-}
-static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
- __le16 fc, u16 len)
-{
-}
-#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_legacy_init_scan_params(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_legacy_force_scan_end(struct iwl_priv *priv);
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
-u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
- struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ie, int ie_len, int left);
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes);
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
-#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
-
-/*****************************************************
- * S e n d i n g H o s t C o m m a n d s *
- *****************************************************/
-
-const char *iwl_legacy_get_cmd_string(u8 cmd);
-int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
- struct iwl_host_cmd *cmd);
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
- u16 len, const void *data);
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
- const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt));
-
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI *
- *****************************************************/
-
-static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
-{
- int pos;
- u16 pci_lnk_ctl;
- pos = pci_pcie_cap(priv->pci_dev);
- pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
-void iwl_legacy_bg_watchdog(unsigned long data);
-u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval);
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval);
-
-#ifdef CONFIG_PM
-int iwl_legacy_pci_suspend(struct device *device);
-int iwl_legacy_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_legacy_pm_ops;
-
-#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_LEGACY_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-/*****************************************************
-* Error Handling Debugging
-******************************************************/
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
-
-/*****************************************************
-* GEOS
-******************************************************/
-int iwl_legacy_init_geos(struct iwl_priv *priv);
-void iwl_legacy_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS *****/
-
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_CT_KILL 4
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_CHANNEL_SWITCH_PENDING 18
-
-static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
-{
- return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_legacy_is_init(struct iwl_priv *priv)
-{
- return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
-{
- return iwl_legacy_is_rfkill_hw(priv);
-}
-
-static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
-{
-
- if (iwl_legacy_is_rfkill(priv))
- return 0;
-
- return iwl_legacy_is_ready(priv);
-}
-
-extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
-extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
- u8 flags, bool clear);
-void iwl_legacy_apm_stop(struct iwl_priv *priv);
-int iwl_legacy_apm_init(struct iwl_priv *priv);
-
-int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
-}
-static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
- struct iwl_priv *priv, enum ieee80211_band band)
-{
- return priv->hw->wiphy->bands[band];
-}
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data);
-
-#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112701bf..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_debug_h__
-#define __iwl_legacy_debug_h__
-
-struct iwl_priv;
-extern u32 iwlegacy_debug_level;
-
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
-
-#define iwl_print_hex_error(priv, p, len) \
-do { \
- print_hex_dump(KERN_ERR, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...) \
-do { \
- if (iwl_legacy_get_debug_level(__priv) & (level)) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
-do { \
- if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define iwl_print_hex_dump(priv, level, p, len) \
-do { \
- if (iwl_legacy_get_debug_level(priv) & level) \
- print_hex_dump(KERN_DEBUG, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
- const void *p, u32 len)
-{}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int
-iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- return 0;
-}
-static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-/*
- * To use the debug system:
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of
- *
- * #define IWL_DL_xxxx VALUE
- *
- * where xxxx should be the name of the classification (for example, WEP).
- *
- * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
- * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * The active debug levels can be accessed via files
- *
- * /sys/module/iwl4965/parameters/debug{50}
- * /sys/module/iwl3945/parameters/debug
- * /sys/class/net/wlan0/device/debug_level
- *
- * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
- */
-
-/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
-/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
-/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-#define IWL_DL_NOTIF (1 << 10)
-#define IWL_DL_SCAN (1 << 11)
-/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-#define IWL_DL_TXPOWER (1 << 14)
-#define IWL_DL_AP (1 << 15)
-/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
-/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
-/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
-#define IWL_DL_IO (1 << 27)
-/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
-
-#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
-#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
-#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
-#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
-#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
-#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
-#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
-#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
-#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
-#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
-#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
-#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
-#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
-#define IWL_DEBUG_ASSOC(p, f, a...) \
- IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
-#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
-#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
-#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
-#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 1407dca70def..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1314 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/ieee80211.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, priv, \
- &iwl_legacy_dbgfs_##name##_ops)) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-
-static int
-iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
-
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->tx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->tx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->tx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->tx_stats.data_bytes);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 clear_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &clear_flag) != 1)
- return -EFAULT;
- iwl_legacy_clear_traffic_stats(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->rx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->rx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->rx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->rx_stats.data_bytes);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
-static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- u32 val;
- char *buf;
- ssize_t ret;
- int i;
- int pos = 0;
- struct iwl_priv *priv = file->private_data;
- size_t bufsz;
-
- /* default is to dump the entire data segment */
- if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
- priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init_data.len;
- else
- priv->dbgfs_sram_len = priv->ucode_data.len;
- }
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
- }
- if (!(i % 16))
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[64];
- int buf_size;
- u32 offset, len;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs_sram_offset = offset;
- priv->dbgfs_sram_len = len;
- } else {
- priv->dbgfs_sram_offset = 0;
- priv->dbgfs_sram_len = 0;
- }
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_station_entry *station;
- int max_sta = priv->hw_params.max_stations;
- char *buf;
- int i, j, pos = 0;
- ssize_t ret;
- /* Add 30 for initial string */
- const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
- priv->num_stations);
-
- for (i = 0; i < max_sta; i++) {
- station = &priv->stations[i];
- if (!station->used)
- continue;
- pos += scnprintf(buf + pos, bufsz - pos,
- "station %d - addr: %pM, flags: %#x\n",
- i, station->sta.sta.addr,
- station->sta.station_flags_msk);
- pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\tframes\ttfds\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap\t\t\trate_n_flags\n");
-
- for (j = 0; j < MAX_TID_COUNT; j++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
- j, station->tid[j].seq_number,
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].tfds_in_queue,
- station->tid[j].agg.start_idx,
- station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
-
- if (station->tid[j].agg.wait_for_ba)
- pos += scnprintf(buf + pos, bufsz - pos,
- " - waitforba");
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- ssize_t ret;
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0, buf_size = 0;
- const u8 *ptr;
- char *buf;
- u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
- buf_size = 4 * eeprom_len + 256;
-
- if (eeprom_len % 16) {
- IWL_ERR(priv, "NVM size is not multiple of 16.\n");
- return -ENODATA;
- }
-
- ptr = priv->eeprom;
- if (!ptr) {
- IWL_ERR(priv, "Invalid EEPROM memory\n");
- return -ENOMEM;
- }
-
- /* 4 characters for byte 0xYY */
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
- "version: 0x%x\n", eeprom_ver);
- for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct ieee80211_channel *channels = NULL;
- const struct ieee80211_supported_band *supp_band = NULL;
- int pos = 0, i, bufsz = PAGE_SIZE;
- char *buf;
- ssize_t ret;
-
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 2.4GHz band 802.11bg):\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 5.2GHz band (802.11a)\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[512];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
- test_bit(STATUS_HCMD_ACTIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
- test_bit(STATUS_POWER_PMI, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
- test_bit(STATUS_FW_ERROR, &priv->status));
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = 24 * 64; /* 24 items * 64 char per item */
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- priv->isr_stats.hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- priv->isr_stats.sw);
- if (priv->isr_stats.sw || priv->isr_stats.hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- priv->isr_stats.err_code);
- }
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- priv->isr_stats.sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- priv->isr_stats.alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n",
- priv->isr_stats.rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- priv->isr_stats.ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- priv->isr_stats.wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n",
- priv->isr_stats.rx);
- for (cnt = 0; cnt < REPLY_MAX; cnt++) {
- if (priv->isr_stats.rx_handlers[cnt] > 0)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tRx handler[%36s]:\t\t %u\n",
- iwl_legacy_get_cmd_string(cnt),
- priv->isr_stats.rx_handlers[cnt]);
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- priv->isr_stats.tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- priv->isr_stats.unhandled);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- u32 reset_flag;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &reset_flag) != 1)
- return -EFAULT;
- if (reset_flag == 0)
- iwl_legacy_clear_isr_stats(priv);
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_rxon_context *ctx;
- int pos = 0, i;
- char buf[256 * NUM_IWL_RXON_CTX];
- const size_t bufsz = sizeof(buf);
-
- for_each_context(priv, ctx) {
- pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
- ctx->ctxid);
- for (i = 0; i < AC_NUM; i++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tcw_min\tcw_max\taifsn\ttxop\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "AC[%d]\t%u\t%u\t%u\t%u\n", i,
- ctx->qos_data.def_qos_parm.ac[i].cw_min,
- ctx->qos_data.def_qos_parm.ac[i].cw_max,
- ctx->qos_data.def_qos_parm.ac[i].aifsn,
- ctx->qos_data.def_qos_parm.ac[i].edca_txop);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int ht40;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &ht40) != 1)
- return -EFAULT;
- if (!iwl_legacy_is_any_associated(priv))
- priv->disable_ht40 = ht40 ? true : false;
- else {
- IWL_ERR(priv, "Sta associated with AP - "
- "Change to 40MHz channel support is not allowed\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[100];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "11n 40MHz Mode: %s\n",
- priv->disable_ht40 ? "Disabled" : "Enabled");
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_FILE_OPS(nvm);
-DEBUGFS_READ_FILE_OPS(stations);
-DEBUGFS_READ_FILE_OPS(channels);
-DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0;
- int cnt = 0, entry;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char *buf;
- int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
- const u8 *ptr;
- ssize_t ret;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate buffer\n");
- return -ENOMEM;
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n",
- cnt, q->read_ptr, q->write_ptr);
- }
- if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
- ptr = priv->tx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
- ptr = priv->rx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int traffic_log;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &traffic_log) != 1)
- return -EFAULT;
- if (traffic_log == 0)
- iwl_legacy_reset_traffic_log(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u stop=%d"
- " swq_id=%#.2x (ac %d/hwq %d)\n",
- cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
- txq->swq_id, txq->swq_id & 3,
- (txq->swq_id >> 2) & 0x1f);
- if (cnt >= 4)
- continue;
- /* for the ACs, display the stop count too */
- pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
- ssize_t ret;
- struct iwl_sensitivity_data *data;
-
- data = &priv->sensitivity_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
- data->auto_corr_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc:\t\t %u\n",
- data->auto_corr_ofdm_mrc);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
- data->auto_corr_ofdm_x1);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc_x1:\t\t %u\n",
- data->auto_corr_ofdm_mrc_x1);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
- data->auto_corr_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
- data->auto_corr_cck_mrc);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_ofdm:\t\t %u\n",
- data->last_bad_plcp_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
- data->last_fa_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_cck:\t\t %u\n",
- data->last_bad_plcp_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
- data->last_fa_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
- data->nrg_curr_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
- data->nrg_prev_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
- for (cnt = 0; cnt < 10; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_value[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
- for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_silence_rssi[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
- data->nrg_silence_ref);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
- data->nrg_energy_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
- data->nrg_silence_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
- data->nrg_th_cck);
- pos += scnprintf(buf + pos, bufsz - pos,
- "nrg_auto_corr_silence_diff:\t %u\n",
- data->nrg_auto_corr_silence_diff);
- pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
- data->num_in_cck_no_fa);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
- data->nrg_th_ofdm);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-
-static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
- ssize_t ret;
- struct iwl_chain_noise_data *data;
-
- data = &priv->chain_noise_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
- data->active_chains);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
- data->chain_noise_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
- data->chain_noise_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
- data->chain_noise_c);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
- data->chain_signal_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
- data->chain_signal_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
- data->chain_signal_c);
- pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
- data->beacon_count);
-
- pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->disconn_array[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->delta_gain_code[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
- data->radio_write);
- pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
- data->state);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[60];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- u32 pwrsave_status;
-
- pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_REG_POWER_SAVE_STATUS_MSK;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
- pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
- (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
- (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
- (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
- "error");
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int clear;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &clear) != 1)
- return -EFAULT;
-
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- if (priv->cfg->ops->lib->dump_fh) {
- ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
- }
-
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[12];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
- priv->missed_beacon_threshold);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int missed;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &missed) != 1)
- return -EINVAL;
-
- if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
- missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
- priv->missed_beacon_threshold =
- IWL_MISSED_BEACON_THRESHOLD_DEF;
- else
- priv->missed_beacon_threshold = missed;
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[300];
- const size_t bufsz = sizeof(buf);
- struct iwl_force_reset *force_reset;
-
- force_reset = &priv->force_reset;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request: %d\n",
- force_reset->reset_request_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request success: %d\n",
- force_reset->reset_success_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request reject: %d\n",
- force_reset->reset_reject_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\treset duration: %lu\n",
- force_reset->reset_duration);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- int ret;
- struct iwl_priv *priv = file->private_data;
-
- ret = iwl_legacy_force_reset(priv, true);
-
- return ret ? ret : count;
-}
-
-static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int timeout;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &timeout) != 1)
- return -EINVAL;
- if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
- timeout = IWL_DEF_WD_TIMEOUT;
-
- priv->cfg->base_params->wd_timeout = timeout;
- iwl_legacy_setup_watchdog(priv);
- return count;
-}
-
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
-DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_general_stats);
-DEBUGFS_READ_FILE_OPS(sensitivity);
-DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(power_save_status);
-DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
-DEBUGFS_READ_FILE_OPS(rxon_flags);
-DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
-
-/*
- * Create the debugfs files and directories
- *
- */
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
-
- dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
- priv->debugfs_dir = dir_drv;
-
- dir_data = debugfs_create_dir("data", dir_drv);
- if (!dir_data)
- goto err;
- dir_rf = debugfs_create_dir("rf", dir_drv);
- if (!dir_rf)
- goto err;
- dir_debug = debugfs_create_dir("debug", dir_drv);
- if (!dir_debug)
- goto err;
-
- DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
-
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
- &priv->disable_sens_cal);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
- &priv->disable_tx_power_cal);
- return 0;
-
-err:
- IWL_ERR(priv, "Can't create the debugfs directory\n");
- iwl_legacy_dbgfs_unregister(priv);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
- if (!priv->debugfs_dir)
- return;
-
- debugfs_remove_recursive(priv->debugfs_dir);
- priv->debugfs_dir = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786edf56fd..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-4965-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_legacy_dev_h__
-#define __iwl_legacy_dev_h__
-
-#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/wait.h>
-#include <net/ieee80211_radiotap.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-legacy-rs.h"
-
-struct iwl_tx_queue;
-
-/* CT-KILL constants */
-#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- /*
- * only for ASYNC commands
- * (which is somewhat stupid -- look at iwl-sta.c for instance
- * which duplicates a bunch of code because the callback isn't
- * invoked for SYNC commands, if it were and its result passed
- * through it would be simpler...)
- */
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
-
- /* The CMD_SIZE_HUGE flag bit indicates that the command
- * structure is stored at the end of the shared queue memory. */
- u32 flags;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues
- */
-struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
- struct sk_buff *skb;
- struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
- struct iwl_queue q;
- void *tfds;
- struct iwl_device_cmd **cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_info *txb;
- unsigned long time_stamp;
- u8 need_update;
- u8 sched_retry;
- u8 active;
- u8 swq_id;
-};
-
-#define IWL_NUM_SCAN_RATES (2)
-
-struct iwl4965_channel_tgd_info {
- u8 type;
- s8 max_power;
-};
-
-struct iwl4965_channel_tgh_info {
- s64 last_radar_time;
-};
-
-#define IWL4965_MAX_RATE (33)
-
-struct iwl3945_clip_group {
- /* maximum power level to prevent clipping for each rate, derived by
- * us from this band's saturation power in EEPROM */
- const s8 clip_powers[IWL_MAX_RATES];
-};
-
-/* current Tx power values to use, one for each rate for each channel.
- * requested power is limited by:
- * -- regulatory EEPROM limits for this channel
- * -- hardware capabilities (clip-powers)
- * -- spectrum management
- * -- user preference (e.g. iwconfig)
- * when requested power is set, base power index must also be set. */
-struct iwl3945_channel_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 base_power_index; /* gain index for power at factory temp. */
- s8 requested_power; /* power (dBm) requested for this chnl/rate */
-};
-
-/* current scan Tx power values to use, one for each scan rate for each
- * channel. */
-struct iwl3945_scan_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
-};
-
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- * with one another!
- */
-struct iwl_channel_info {
- struct iwl4965_channel_tgd_info tgd;
- struct iwl4965_channel_tgh_info tgh;
- struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
- struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
- * HT40 channel */
-
- u8 channel; /* channel number */
- u8 flags; /* flags copied from EEPROM */
- s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
- s8 min_power; /* always 0 */
- s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
-
- u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
- u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
-
- /* HT40 channel info */
- s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- u8 ht40_flags; /* flags copied from EEPROM */
- u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-
- /* Radio/DSP gain settings for each "normal" data Tx rate.
- * These include, in addition to RF and DSP gain, a few fields for
- * remembering/modifying gain settings (indexes). */
- struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
-
- /* Radio/DSP gain settings for each scan rate, for directed scans. */
- struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
-};
-
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_UNUSED -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES 10
-
-#define IWL_DEFAULT_CMD_QUEUE_NUM 4
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
- CMD_SYNC = 0,
- CMD_SIZE_NORMAL = 0,
- CMD_NO_SKB = 0,
- CMD_SIZE_HUGE = (1 << 0),
- CMD_ASYNC = (1 << 1),
- CMD_WANT_SKB = (1 << 2),
- CMD_MAPPED = (1 << 3),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for a scan command
- * (which is relatively huge; space is allocated separately).
- */
-struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- union {
- u32 flags;
- u8 val8;
- u16 val16;
- u32 val32;
- struct iwl_tx_cmd tx;
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-
-struct iwl_host_cmd {
- const void *data;
- unsigned long reply_page;
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
- u32 flags;
- u16 len;
- u8 id;
-};
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
- u16 txq_id;
- u16 frame_count;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
- u8 state;
-};
-
-
-struct iwl_tid_data {
- u16 seq_number; /* 4965 only */
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
-};
-
-struct iwl_hw_key {
- u32 cipher;
- int keylen;
- u8 keyidx;
- u8 key[32];
-};
-
-union iwl_ht_rate_supp {
- u16 rates;
- struct {
- u8 siso_rate;
- u8 mimo_rate;
- };
-};
-
-#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN (0x1)
-
-struct iwl_ht_config {
- bool single_chain_sufficient;
- enum ieee80211_smps_mode smps; /* current smps mode */
-};
-
-/* QoS structures */
-struct iwl_qos_info {
- int qos_active;
- struct iwl_qosparam_cmd def_qos_parm;
-};
-
-/*
- * Structure should be accessed with sta_lock held. When station addition
- * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
- * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
- * sta_lock held.
- */
-struct iwl_station_entry {
- struct iwl_legacy_addsta_cmd sta;
- struct iwl_tid_data tid[MAX_TID_COUNT];
- u8 used, ctxid;
- struct iwl_hw_key keyinfo;
- struct iwl_link_quality_cmd *lq;
-};
-
-struct iwl_station_priv_common {
- struct iwl_rxon_context *ctx;
- u8 sta_id;
-};
-
-/*
- * iwl_station_priv: Driver's private station information
- *
- * When mac80211 creates a station it reserves some space (hw->sta_data_size)
- * in the structure for use by driver. This structure is places in that
- * space.
- *
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl_station_priv {
- struct iwl_station_priv_common common;
- struct iwl_lq_sta lq_sta;
- atomic_t pending_frames;
- bool client;
- bool asleep;
-};
-
-/**
- * struct iwl_vif_priv - driver's private per-interface information
- *
- * When mac80211 allocates a virtual interface, it can allocate
- * space for us to put data into.
- */
-struct iwl_vif_priv {
- struct iwl_rxon_context *ctx;
- u8 ibss_bssid_sta_id;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-/* uCode file layout */
-struct iwl_ucode_header {
- __le32 ver; /* major/minor/API/serial */
- struct {
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v1;
-};
-
-struct iwl4965_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-struct iwl_sensitivity_ranges {
- u16 min_nrg_cck;
- u16 max_nrg_cck;
-
- u16 nrg_th_cck;
- u16 nrg_th_ofdm;
-
- u16 auto_corr_min_ofdm;
- u16 auto_corr_min_ofdm_mrc;
- u16 auto_corr_min_ofdm_x1;
- u16 auto_corr_min_ofdm_mrc_x1;
-
- u16 auto_corr_max_ofdm;
- u16 auto_corr_max_ofdm_mrc;
- u16 auto_corr_max_ofdm_x1;
- u16 auto_corr_max_ofdm_mrc_x1;
-
- u16 auto_corr_max_cck;
- u16 auto_corr_max_cck_mrc;
- u16 auto_corr_min_cck;
- u16 auto_corr_min_cck_mrc;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
- u8 max_txq_num;
- u8 dma_chnl_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
- u8 tx_chains_num;
- u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
- u32 rx_page_order;
- u32 rx_wrt_ptr_reg;
- u8 max_stations;
- u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
- u32 max_inst_size;
- u32 max_data_size;
- u32 max_bsm_size;
- u32 ct_kill_threshold; /* value in hw-dependent units */
- u16 beacon_time_tsf_bits;
- const struct iwl_sensitivity_ranges *sens;
-};
-
-
-/******************************************************************************
- *
- * Functions implemented in core module which are forward declared here
- * for use by iwl-[4-5].c
- *
- * NOTE: The implementation of these functions are not hardware specific
- * which is why they are in the core module files.
- *
- * Naming convention --
- * iwl_ <-- Is part of iwlwifi
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl4965_bg_ <-- Called from work queue context
- * iwl4965_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
-extern int iwl_legacy_queue_space(const struct iwl_queue *q);
-static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
-{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
- int is_huge)
-{
- /*
- * This is for init calibration result and scan command which
- * required buffer > TFD_MAX_PAYLOAD_SIZE,
- * the big buffer at end of command array
- */
- if (is_huge)
- return q->n_window; /* must be power of 2 */
-
- /* Otherwise, use normal size buffers */
- return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
-
-#define IWL_OPERATION_MODE_AUTO 0
-#define IWL_OPERATION_MODE_HT_ONLY 1
-#define IWL_OPERATION_MODE_MIXED 2
-#define IWL_OPERATION_MODE_20MHZ 3
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
-
-/* Sensitivity and chain noise calibration */
-#define INITIALIZATION_VALUE 0xFFFF
-#define IWL4965_CAL_NUM_BEACONS 20
-#define IWL_CAL_NUM_BEACONS 16
-#define MAXIMUM_ALLOWED_PATHLOSS 15
-
-#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
-
-#define MAX_FA_OFDM 50
-#define MIN_FA_OFDM 5
-#define MAX_FA_CCK 50
-#define MIN_FA_CCK 5
-
-#define AUTO_CORR_STEP_OFDM 1
-
-#define AUTO_CORR_STEP_CCK 3
-#define AUTO_CORR_MAX_TH_CCK 160
-
-#define NRG_DIFF 2
-#define NRG_STEP_CCK 2
-#define NRG_MARGIN 8
-#define MAX_NUMBER_CCK_NO_FA 100
-
-#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
-
-#define CHAIN_A 0
-#define CHAIN_B 1
-#define CHAIN_C 2
-#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
-#define ALL_BAND_FILTER 0xFF00
-#define IN_BAND_FILTER 0xFF
-#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
-
-#define NRG_NUM_PREV_STAT_L 20
-#define NUM_RX_CHAINS 3
-
-enum iwl4965_false_alarm_state {
- IWL_FA_TOO_MANY = 0,
- IWL_FA_TOO_FEW = 1,
- IWL_FA_GOOD_RANGE = 2,
-};
-
-enum iwl4965_chain_noise_state {
- IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
- IWL_CHAIN_NOISE_ACCUMULATE,
- IWL_CHAIN_NOISE_CALIBRATED,
- IWL_CHAIN_NOISE_DONE,
-};
-
-enum iwl4965_calib_enabled_state {
- IWL_CALIB_DISABLED = 0, /* must be 0 */
- IWL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
-enum ucode_type {
- UCODE_NONE = 0,
- UCODE_INIT,
- UCODE_RT
-};
-
-/* Sensitivity calib data */
-struct iwl_sensitivity_data {
- u32 auto_corr_ofdm;
- u32 auto_corr_ofdm_mrc;
- u32 auto_corr_ofdm_x1;
- u32 auto_corr_ofdm_mrc_x1;
- u32 auto_corr_cck;
- u32 auto_corr_cck_mrc;
-
- u32 last_bad_plcp_cnt_ofdm;
- u32 last_fa_cnt_ofdm;
- u32 last_bad_plcp_cnt_cck;
- u32 last_fa_cnt_cck;
-
- u32 nrg_curr_state;
- u32 nrg_prev_state;
- u32 nrg_value[10];
- u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
- u32 nrg_silence_ref;
- u32 nrg_energy_idx;
- u32 nrg_silence_idx;
- u32 nrg_th_cck;
- s32 nrg_auto_corr_silence_diff;
- u32 num_in_cck_no_fa;
- u32 nrg_th_ofdm;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-/* Chain noise (differential Rx gain) calib data */
-struct iwl_chain_noise_data {
- u32 active_chains;
- u32 chain_noise_a;
- u32 chain_noise_b;
- u32 chain_noise_c;
- u32 chain_signal_a;
- u32 chain_signal_b;
- u32 chain_signal_c;
- u16 beacon_count;
- u8 disconn_array[NUM_RX_CHAINS];
- u8 delta_gain_code[NUM_RX_CHAINS];
- u8 radio_write;
- u8 state;
-};
-
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
-
-enum {
- MEASUREMENT_READY = (1 << 0),
- MEASUREMENT_ACTIVE = (1 << 1),
-};
-
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 rx_handlers[REPLY_MAX];
- u32 tx;
- u32 unhandled;
-};
-
-/* management statistics */
-enum iwl_mgmt_stats {
- MANAGEMENT_ASSOC_REQ = 0,
- MANAGEMENT_ASSOC_RESP,
- MANAGEMENT_REASSOC_REQ,
- MANAGEMENT_REASSOC_RESP,
- MANAGEMENT_PROBE_REQ,
- MANAGEMENT_PROBE_RESP,
- MANAGEMENT_BEACON,
- MANAGEMENT_ATIM,
- MANAGEMENT_DISASSOC,
- MANAGEMENT_AUTH,
- MANAGEMENT_DEAUTH,
- MANAGEMENT_ACTION,
- MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
- CONTROL_BACK_REQ = 0,
- CONTROL_BACK,
- CONTROL_PSPOLL,
- CONTROL_RTS,
- CONTROL_CTS,
- CONTROL_ACK,
- CONTROL_CFEND,
- CONTROL_CFENDACK,
- CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- u32 mgmt[MANAGEMENT_MAX];
- u32 ctrl[CONTROL_MAX];
- u32 data_cnt;
- u64 data_bytes;
-#endif
-};
-
-/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT (2000)
-#define IWL_LONG_WD_TIMEOUT (10000)
-#define IWL_MAX_WD_TIMEOUT (120000)
-
-struct iwl_force_reset {
- int reset_request_count;
- int reset_success_count;
- int reset_reject_count;
- unsigned long reset_duration;
- unsigned long last_force_reset_jiffies;
-};
-
-/* extend beacon time format bit shifting */
-/*
- * for _3945 devices
- * bits 31:24 - extended
- * bits 23:0 - interval
- */
-#define IWL3945_EXT_BEACON_TIME_POS 24
-/*
- * for _4965 devices
- * bits 31:22 - extended
- * bits 21:0 - interval
- */
-#define IWL4965_EXT_BEACON_TIME_POS 22
-
-enum iwl_rxon_context_id {
- IWL_RXON_CTX_BSS,
-
- NUM_IWL_RXON_CTX
-};
-
-struct iwl_rxon_context {
- struct ieee80211_vif *vif;
-
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
- /*
- * We could use the vif to indicate active, but we
- * also need it to be active during disabling when
- * we already removed the vif for type setting.
- */
- bool always_active, is_active;
-
- bool ht_need_multiple_chains;
-
- enum iwl_rxon_context_id ctxid;
-
- u32 interface_modes, exclusive_interface_modes;
- u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
- /*
- * We declare this const so it can only be
- * changed via explicit cast within the
- * routines that actually update the physical
- * hardware.
- */
- const struct iwl_legacy_rxon_cmd active;
- struct iwl_legacy_rxon_cmd staging;
-
- struct iwl_rxon_time_cmd timing;
-
- struct iwl_qos_info qos_data;
-
- u8 bcast_sta_id, ap_sta_id;
-
- u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
- u8 qos_cmd;
- u8 wep_key_cmd;
-
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 key_mapping_keys;
-
- __le32 station_flags;
-
- struct {
- bool non_gf_sta_present;
- u8 protection;
- bool enabled, is_40mhz;
- u8 extension_chan_offset;
- } ht;
-};
-
-struct iwl_priv {
-
- /* ieee device used by generic ieee processing code */
- struct ieee80211_hw *hw;
- struct ieee80211_channel *ieee_channels;
- struct ieee80211_rate *ieee_rates;
- struct iwl_cfg *cfg;
-
- /* temporary frame storage list */
- struct list_head free_frames;
- int frames_count;
-
- enum ieee80211_band band;
- int alloc_rxb_page;
-
- void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
- /* spectrum measurement report caching */
- struct iwl_spectrum_notification measure_report;
- u8 measurement_status;
-
- /* ucode beacon time */
- u32 ucode_beacon_time;
- int missed_beacon_threshold;
-
- /* track IBSS manager (last beacon) status */
- u32 ibss_manager;
-
- /* force reset */
- struct iwl_force_reset force_reset;
-
- /* we allocate array of iwl_channel_info for NIC's valid channels.
- * Access via channel # using indirect index array */
- struct iwl_channel_info *channel_info; /* channel info array */
- u8 channel_count; /* # of channels */
-
- /* thermal calibration */
- s32 temperature; /* degrees Kelvin */
- s32 last_temperature;
-
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
- /* Scan related variables */
- unsigned long scan_start;
- unsigned long scan_start_tsf;
- void *scan_cmd;
- enum ieee80211_band scan_band;
- struct cfg80211_scan_request *scan_request;
- struct ieee80211_vif *scan_vif;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
- u8 mgmt_tx_ant;
-
- /* spinlock */
- spinlock_t lock; /* protect general shared data */
- spinlock_t hcmd_lock; /* protect hcmd */
- spinlock_t reg_lock; /* protect hw register access */
- struct mutex mutex;
-
- /* basic pci-network driver stuff */
- struct pci_dev *pci_dev;
-
- /* pci hardware address support */
- void __iomem *hw_base;
- u32 hw_rev;
- u32 hw_wa_rev;
- u8 rev_id;
-
- /* microcode/device supports multiple contexts */
- u8 valid_contexts;
-
- /* command queue number */
- u8 cmd_queue;
-
- /* max number of station keys */
- u8 sta_key_max_num;
-
- /* EEPROM MAC addresses */
- struct mac_address addresses[1];
-
- /* uCode images, save to reload in case of failure */
- int fw_index; /* firmware we're trying to load */
- u32 ucode_ver; /* version of ucode, copy of
- iwl_ucode.ver */
- struct fw_desc ucode_code; /* runtime inst */
- struct fw_desc ucode_data; /* runtime data original */
- struct fw_desc ucode_data_backup; /* runtime data save/restore */
- struct fw_desc ucode_init; /* initialization inst */
- struct fw_desc ucode_init_data; /* initialization data */
- struct fw_desc ucode_boot; /* bootstrap inst */
- enum ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
- char firmware_name[25];
-
- struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
-
- __le16 switch_channel;
-
- /* 1st responses from initialize and runtime uCode images.
- * _4965's initialize alive response contains some calibration data. */
- struct iwl_init_alive_resp card_alive_init;
- struct iwl_alive_resp card_alive;
-
- u16 active_rate;
-
- u8 start_calib;
- struct iwl_sensitivity_data sensitivity_data;
- struct iwl_chain_noise_data chain_noise_data;
- __le16 sensitivity_tbl[HD_TABLE_SIZE];
-
- struct iwl_ht_config current_ht_config;
-
- /* Rate scaling data */
- u8 retry_rate;
-
- wait_queue_head_t wait_command_queue;
-
- int activity_timer_active;
-
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
- struct iwl_tx_queue *txq;
- unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
- struct iwl_dma_ptr scd_bc_tbls;
-
- u32 scd_base_addr; /* scheduler sram base address */
-
- unsigned long status;
-
- /* counts mgmt, ctl, and data packets */
- struct traffic_stats tx_stats;
- struct traffic_stats rx_stats;
-
- /* counts interrupts */
- struct isr_statistics isr_stats;
-
- struct iwl_power_mgr power_data;
-
- /* context information */
- u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-
- /* station table variables */
-
- /* Note: if lock and sta_lock are needed, lock must be acquired first */
- spinlock_t sta_lock;
- int num_stations;
- struct iwl_station_entry stations[IWL_STATION_COUNT];
- unsigned long ucode_key_table;
-
- /* queue refcounts */
-#define IWL_MAX_HW_QUEUES 32
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- /* for each AC */
- atomic_t queue_stop_count[4];
-
- /* Indication if ieee80211_ops->open has been called */
- u8 is_open;
-
- u8 mac80211_registered;
-
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- struct iwl_eeprom_calib_info *calib_info;
-
- enum nl80211_iftype iw_mode;
-
- /* Last Rx'd beacon timestamp */
- u64 timestamp;
-
- union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
- struct {
- void *shared_virt;
- dma_addr_t shared_phys;
-
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
-
- struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl3945_notif_statistics accum_statistics;
- struct iwl3945_notif_statistics delta_statistics;
- struct iwl3945_notif_statistics max_delta;
-#endif
-
- u32 sta_supp_rates;
- int last_rx_rssi; /* From Rx packet statistics */
-
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
- /*
- * each calibration channel group in the
- * EEPROM has a derived clip setting for
- * each rate.
- */
- const struct iwl3945_clip_group clip_groups[5];
-
- } _3945;
-#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
- struct {
- struct iwl_rx_phy_res last_phy_res;
- bool last_phy_res_valid;
-
- struct completion firmware_loading_complete;
-
- /*
- * chain noise reset and gain commands are the
- * two extra calibration commands follows the standard
- * phy calibration commands
- */
- u8 phy_calib_chain_noise_reset_cmd;
- u8 phy_calib_chain_noise_gain_cmd;
-
- struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl_notif_statistics accum_statistics;
- struct iwl_notif_statistics delta_statistics;
- struct iwl_notif_statistics max_delta;
-#endif
-
- } _4965;
-#endif
- };
-
- struct iwl_hw_params hw_params;
-
- u32 inta_mask;
-
- struct workqueue_struct *workqueue;
-
- struct work_struct restart;
- struct work_struct scan_completed;
- struct work_struct rx_replenish;
- struct work_struct abort_scan;
-
- struct iwl_rxon_context *beacon_ctx;
- struct sk_buff *beacon_skb;
-
- struct work_struct tx_flush;
-
- struct tasklet_struct irq_tasklet;
-
- struct delayed_work init_alive_start;
- struct delayed_work alive_start;
- struct delayed_work scan_check;
-
- /* TX Power */
- s8 tx_power_user_lmt;
- s8 tx_power_device_lmt;
- s8 tx_power_next;
-
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- /* debugging info */
- u32 debug_level; /* per device debugging will override global
- iwlegacy_debug_level if set */
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- /* debugfs */
- u16 tx_traffic_idx;
- u16 rx_traffic_idx;
- u8 *tx_traffic;
- u8 *rx_traffic;
- struct dentry *debugfs_dir;
- u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool disable_ht40;
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
- struct work_struct txpower_work;
- u32 disable_sens_cal;
- u32 disable_chain_noise_cal;
- u32 disable_tx_power_cal;
- struct work_struct run_time_calib_work;
- struct timer_list statistics_periodic;
- struct timer_list watchdog;
- bool hw_ready;
-
- struct led_classdev led;
- unsigned long blink_on, blink_off;
- bool led_registered;
-}; /*iwl_priv */
-
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
- set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
- clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-/*
- * iwl_legacy_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- if (priv->debug_level)
- return priv->debug_level;
- else
- return iwlegacy_debug_level;
-}
-#else
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- return iwlegacy_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *
-iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
- int txq_id, int idx)
-{
- if (priv->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb->data;
- return NULL;
-}
-
-static inline struct iwl_rxon_context *
-iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- return vif_priv->ctx;
-}
-
-#define for_each_context(priv, ctx) \
- for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
- ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
- if (priv->valid_contexts & BIT(ctx->ctxid))
-
-static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctxid)
-{
- return (priv->contexts[ctxid].active.filter_flags &
- RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
-{
- return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-}
-
-static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
-{
- return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
-{
- if (ch_info == NULL)
- return 0;
- return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
-{
- return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline int
-iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
-{
- return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int
-iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
-{
- return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
-}
-
-static inline void
-__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
-{
- __free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-
-static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
-{
- free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec99197ce0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-
-/* sparse doesn't like tracepoint macros */
-#ifndef __CHECKER__
-#include "iwl-dev.h"
-
-#define CREATE_TRACE_POINTS
-#include "iwl-devtrace.h"
-
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725ba6be..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWLWIFI_LEGACY_DEVICE_TRACE
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
-#define PRIV_ASSIGN (__entry->priv = priv)
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_io
-
-TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u8, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_ucode
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
- TP_ARGS(priv, hcmd, len, flags),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, hcmd, len)
- __field(u32, flags)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(hcmd), hcmd, len);
- __entry->flags = flags;
- ),
- TP_printk("[%p] hcmd %#.2x (%ssync)",
- __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
- __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
- TP_ARGS(priv, rxbuf, len),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, rxbuf, len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
- ),
- TP_printk("[%p] RX cmd %#.2x",
- __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
- void *buf0, size_t buf0_len,
- void *buf1, size_t buf1_len),
- TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(size_t, framelen)
- __dynamic_array(u8, tfd, tfdlen)
-
- /*
- * Do not insert between or below these items,
- * we want to keep the frame together (except
- * for the possible padding).
- */
- __dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->framelen = buf0_len + buf1_len;
- memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
- memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
- ),
- TP_printk("[%p] TX %.2x (%zu bytes)",
- __entry->priv,
- ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
- u32 data1, u32 data2, u32 line, u32 blink1,
- u32 blink2, u32 ilink1, u32 ilink2),
- TP_ARGS(priv, desc, time, data1, data2, line,
- blink1, blink2, ilink1, ilink2),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, desc)
- __field(u32, time)
- __field(u32, data1)
- __field(u32, data2)
- __field(u32, line)
- __field(u32, blink1)
- __field(u32, blink2)
- __field(u32, ilink1)
- __field(u32, ilink2)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->desc = desc;
- __entry->time = time;
- __entry->data1 = data1;
- __entry->data2 = data2;
- __entry->line = line;
- __entry->blink1 = blink1;
- __entry->blink2 = blink2;
- __entry->ilink1 = ilink1;
- __entry->ilink2 = ilink2;
- ),
- TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
- __entry->priv, __entry->desc, __entry->time, __entry->data1,
- __entry->data2, __entry->line, __entry->blink1,
- __entry->blink2, __entry->ilink1, __entry->ilink2)
-);
-
-#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49b74ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwlegacy_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwlegacy_eeprom_band_1[14] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
-{
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
- int ret = 0;
-
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
- switch (gp) {
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- break;
- default:
- IWL_ERR(priv, "bad EEPROM signature,"
- "EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- break;
- }
- return ret;
-}
-
-const u8
-*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
- BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[offset];
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
-
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
-{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
-
-/**
- * iwl_legacy_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_legacy_eeprom_init(struct iwl_priv *priv)
-{
- __le16 *e;
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
-
- /* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
- e = (__le16 *)priv->eeprom;
-
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- ret = iwl_legacy_eeprom_verify_signature(priv);
- if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- goto err;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
- if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
- ret = -ENOENT;
- goto err;
- }
-
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- _iwl_legacy_write32(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
- addr);
- goto done;
- }
- r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
-
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- "EEPROM",
- iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
-
- ret = 0;
-done:
- priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
-
-err:
- if (ret)
- iwl_legacy_eeprom_free(priv);
- /* Reset chip to save power until we load uCode during "up". */
- iwl_legacy_apm_stop(priv);
-alloc_err:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_init);
-
-void iwl_legacy_eeprom_free(struct iwl_priv *priv)
-{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_free);
-
-static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
- int eep_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **eeprom_ch_info,
- const u8 **eeprom_ch_index)
-{
- u32 offset = priv->cfg->ops->lib->
- eeprom_ops.regulatory_bands[eep_band - 1];
- switch (eep_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_7;
- break;
- default:
- BUG();
- }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-/**
- * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct iwl_channel_info *ch_info;
-
- ch_info = (struct iwl_channel_info *)
- iwl_legacy_get_channel_info(priv, band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -1;
-
- IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
- "" : "not ");
-
- ch_info->ht40_eeprom = *eeprom_ch;
- ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
- ch_info->ht40_flags = eeprom_ch->flags;
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- ch_info->ht40_extension_channel &=
- ~clear_ht40_extension_channel;
-
- return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-
-/**
- * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_legacy_init_channel_map(struct iwl_priv *priv)
-{
- int eeprom_ch_count = 0;
- const u8 *eeprom_ch_index = NULL;
- const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
- int band, ch;
- struct iwl_channel_info *ch_info;
-
- if (priv->channel_count) {
- IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
- return 0;
- }
-
- IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
- priv->channel_count =
- ARRAY_SIZE(iwlegacy_eeprom_band_1) +
- ARRAY_SIZE(iwlegacy_eeprom_band_2) +
- ARRAY_SIZE(iwlegacy_eeprom_band_3) +
- ARRAY_SIZE(iwlegacy_eeprom_band_4) +
- ARRAY_SIZE(iwlegacy_eeprom_band_5);
-
- IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
- priv->channel_count);
-
- priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
- priv->channel_count, GFP_KERNEL);
- if (!priv->channel_info) {
- IWL_ERR(priv, "Could not allocate channel_info\n");
- priv->channel_count = 0;
- return -ENOMEM;
- }
-
- ch_info = priv->channel_info;
-
- /* Loop through the 5 EEPROM bands adding them in order to the
- * channel map we maintain (that contains additional information than
- * what just in the EEPROM) */
- for (band = 1; band <= 5; band++) {
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- ch_info->channel = eeprom_ch_index[ch];
- ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
-
- /* permanently store EEPROM's channel regulatory flags
- * and max power in channel info database. */
- ch_info->eeprom = eeprom_ch_info[ch];
-
- /* Copy the run-time flags so they are there even on
- * invalid channels */
- ch_info->flags = eeprom_ch_info[ch].flags;
- /* First write that ht40 is not enabled, and then enable
- * one by one */
- ch_info->ht40_extension_channel =
- IEEE80211_CHAN_NO_HT40;
-
- if (!(iwl_legacy_is_channel_valid(ch_info))) {
- IWL_DEBUG_EEPROM(priv,
- "Ch. %d Flags %x [%sGHz] - "
- "No traffic\n",
- ch_info->channel,
- ch_info->flags,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4");
- ch_info++;
- continue;
- }
-
- /* Initialize regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- eeprom_ch_info[ch].max_power_avg;
- ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
- ch_info->min_power = 0;
-
- IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
- "%s%s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch].flags,
- eeprom_ch_info[ch].max_power_avg,
- ((eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
-
- ch_info++;
- }
- }
-
- /* Check if we do have HT40 channels */
- if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return 0;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- /* Set up driver's info for lower half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch],
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch] + 4,
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_channel_map);
-
-/*
- * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
- */
-void iwl_legacy_free_channel_map(struct iwl_priv *priv)
-{
- kfree(priv->channel_info);
- priv->channel_count = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_free_channel_map);
-
-/**
- * iwl_legacy_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct
-iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel)
-{
- int i;
-
- switch (band) {
- case IEEE80211_BAND_5GHZ:
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel == channel)
- return &priv->channel_info[i];
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (channel >= 1 && channel <= 14)
- return &priv->channel_info[channel - 1];
- break;
- default:
- BUG();
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c81002022..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_eeprom_h__
-#define __iwl_legacy_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- * It only indicates that 20 MHz channel use is supported; HT40 channel
- * usage is indicated by a separate set of regulatory flags for each
- * HT40 channel pair.
- *
- * NOTE: Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
- EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
- EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
- /* Bit 2 Reserved */
- EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
- EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
- EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
- /* Bit 6 Reserved (was Narrow Channel) */
- EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-/* 3945 only */
-#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
-#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
- u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
- s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-/* 3945 Specific */
-#define EEPROM_3945_EEPROM_VERSION (0x2f)
-
-/* 4965 has two radio transmitters (and 3 radio receivers) */
-#define EEPROM_TX_POWER_TX_CHAINS (2)
-
-/* 4965 has room for up to 8 sets of txpower calibration data */
-#define EEPROM_TX_POWER_BANDS (8)
-
-/* 4965 factory calibration measures txpower gain settings for
- * each of 3 target output levels */
-#define EEPROM_TX_POWER_MEASUREMENTS (3)
-
-/* 4965 Specific */
-/* 4965 driver does not work with txpower calibration version < 5 */
-#define EEPROM_4965_TX_POWER_VERSION (5)
-#define EEPROM_4965_EEPROM_VERSION (0x2f)
-#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
-#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
-#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
-#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
-
-/* 2.4 GHz */
-extern const u8 iwlegacy_eeprom_band_1[14];
-
-/*
- * factory calibration data for one txpower level, on one channel,
- * measured on one of the 2 tx chains (radio transmitter and associated
- * antenna). EEPROM contains:
- *
- * 1) Temperature (degrees Celsius) of device when measurement was made.
- *
- * 2) Gain table index used to achieve the target measurement power.
- * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
- *
- * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
- *
- * 4) RF power amplifier detector level measurement (not used).
- */
-struct iwl_eeprom_calib_measure {
- u8 temperature; /* Device temperature (Celsius) */
- u8 gain_idx; /* Index into gain table */
- u8 actual_pow; /* Measured RF output power, half-dBm */
- s8 pa_det; /* Power amp detector level (not used) */
-} __packed;
-
-
-/*
- * measurement set for one channel. EEPROM contains:
- *
- * 1) Channel number measured
- *
- * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
- * (a.k.a. "tx chains") (6 measurements altogether)
- */
-struct iwl_eeprom_calib_ch_info {
- u8 ch_num;
- struct iwl_eeprom_calib_measure
- measurements[EEPROM_TX_POWER_TX_CHAINS]
- [EEPROM_TX_POWER_MEASUREMENTS];
-} __packed;
-
-/*
- * txpower subband info.
- *
- * For each frequency subband, EEPROM contains the following:
- *
- * 1) First and last channels within range of the subband. "0" values
- * indicate that this sample set is not being used.
- *
- * 2) Sample measurement sets for 2 channels close to the range endpoints.
- */
-struct iwl_eeprom_calib_subband_info {
- u8 ch_from; /* channel number of lowest channel in subband */
- u8 ch_to; /* channel number of highest channel in subband */
- struct iwl_eeprom_calib_ch_info ch1;
- struct iwl_eeprom_calib_ch_info ch2;
-} __packed;
-
-
-/*
- * txpower calibration info. EEPROM contains:
- *
- * 1) Factory-measured saturation power levels (maximum levels at which
- * tx power amplifier can output a signal without too much distortion).
- * There is one level for 2.4 GHz band and one for 5 GHz band. These
- * values apply to all channels within each of the bands.
- *
- * 2) Factory-measured power supply voltage level. This is assumed to be
- * constant (i.e. same value applies to all channels/bands) while the
- * factory measurements are being made.
- *
- * 3) Up to 8 sets of factory-measured txpower calibration values.
- * These are for different frequency ranges, since txpower gain
- * characteristics of the analog radio circuitry vary with frequency.
- *
- * Not all sets need to be filled with data;
- * struct iwl_eeprom_calib_subband_info contains range of channels
- * (0 if unused) for each set of data.
- */
-struct iwl_eeprom_calib_info {
- u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
- u8 saturation_power52; /* half-dBm */
- __le16 voltage; /* signed */
- struct iwl_eeprom_calib_subband_info
- band_info[EEPROM_TX_POWER_BANDS];
-} __packed;
-
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
-#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by iwl has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
-#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
-
-/*
- * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
- *
- * The channel listed is the center of the lower 20 MHz half of the channel.
- * The overall center frequency is actually 2 channels (10 MHz) above that,
- * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
- * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
- * and the overall HT40 channel width centers on channel 3.
- *
- * NOTE: The RXON command uses 20 MHz channel numbers to specify the
- * control channel to which to tune. RXON also specifies whether the
- * control channel is the upper or lower half of a HT40 channel.
- *
- * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
- */
-#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
-
-/*
- * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
- * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
- */
-#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
-
-#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-
-struct iwl_eeprom_ops {
- const u32 regulatory_bands[7];
- int (*acquire_semaphore) (struct iwl_priv *priv);
- void (*release_semaphore) (struct iwl_priv *priv);
-};
-
-
-int iwl_legacy_eeprom_init(struct iwl_priv *priv);
-void iwl_legacy_eeprom_free(struct iwl_priv *priv);
-const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
-int iwl_legacy_init_channel_map(struct iwl_priv *priv);
-void iwl_legacy_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_legacy_get_channel_info(
- const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel);
-
-#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e6091816e36..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_fh_h__
-#define __iwl_legacy_fh_h__
-
-/****************************/
-/* Flow Handler Definitions */
-/****************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH_MEM_LOWER_BOUND (0x1000)
-#define FH_MEM_UPPER_BOUND (0x2000)
-
-/**
- * Keep-Warm (KW) buffer base address.
- *
- * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
- * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
- * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
- * from going into a power-savings mode that would cause higher DRAM latency,
- * and possible data over/under-runs, before all Tx/Rx is complete.
- *
- * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
- * of the buffer, which must be 4K aligned. Once this is set up, the 4965
- * automatically invokes keep-warm accesses when normal accesses might not
- * be sufficient to maintain fast DRAM response.
- *
- * Bit fields:
- * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
- */
-#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-
-
-/**
- * TFD Circular Buffers Base (CBBC) addresses
- *
- * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
- * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
- * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
- * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
- * aligned (address bits 0-7 must be 0).
- *
- * Bit fields in each pointer register:
- * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
- */
-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
-
-
-/**
- * Rx SRAM Control and Status Registers (RSCSR)
- *
- * These registers provide handshake between driver and 4965 for the Rx queue
- * (this queue handles *all* command responses, notifications, Rx data, etc.
- * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
- * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
- * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
- * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
- * mapping between RBDs and RBs.
- *
- * Driver must allocate host DRAM memory for the following, and set the
- * physical address of each into 4965 registers:
- *
- * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
- * entries (although any power of 2, up to 4096, is selectable by driver).
- * Each entry (1 dword) points to a receive buffer (RB) of consistent size
- * (typically 4K, although 8K or 16K are also selectable by driver).
- * Driver sets up RB size and number of RBDs in the CB via Rx config
- * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
- *
- * Bit fields within one RBD:
- * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
- *
- * Driver sets physical address [35:8] of base of RBD circular buffer
- * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
- *
- * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
- * (RBs) have been filled, via a "write pointer", actually the index of
- * the RB's corresponding RBD within the circular buffer. Driver sets
- * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
- *
- * Bit fields in lower dword of Rx status buffer (upper dword not used
- * by driver; see struct iwl4965_shared, val0):
- * 31-12: Not used by driver
- * 11- 0: Index of last filled Rx buffer descriptor
- * (4965 writes, driver reads this value)
- *
- * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
- * enter pointers to these RBs into contiguous RBD circular buffer entries,
- * and update the 4965's "write" index register,
- * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
- *
- * This "write" index corresponds to the *next* RBD that the driver will make
- * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
- * the circular buffer. This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example), and must
- * wrap back to 0 at the end of the circular buffer (but don't wrap before
- * "read" index has advanced past 1! See below).
- * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
- *
- * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
- * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
- * to tell the driver the index of the latest filled RBD. The driver must
- * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
- *
- * The driver must also internally keep track of a third index, which is the
- * next RBD to process. When receiving an Rx interrupt, driver should process
- * all filled but unprocessed RBs up to, but not including, the RB
- * corresponding to the "read" index. For example, if "read" index becomes "1",
- * driver may process the RB pointed to by RBD 0. Depending on volume of
- * traffic, there may be many RBs to process.
- *
- * If read index == write index, 4965 thinks there is no room to put new data.
- * Due to this, the maximum number of filled RBs is 255, instead of 256. To
- * be safe, make sure that there is a gap of at least 2 RBDs between "write"
- * and "read" indexes; that is, make sure that there are no more than 254
- * buffers waiting to be filled.
- */
-#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
-#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-
-/**
- * Physical base address of 8-byte Rx Status buffer.
- * Bit fields:
- * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
- */
-#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-
-/**
- * Physical base address of Rx Buffer Descriptor Circular Buffer.
- * Bit fields:
- * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
- */
-#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-
-/**
- * Rx write pointer (index, really!).
- * Bit fields:
- * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
- * NOTE: For 256-entry circular buffer, use only bits [7:0].
- */
-#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
-#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
-
-
-/**
- * Rx Config/Status Registers (RCSR)
- * Rx Config Reg for channel 0 (only channel used)
- *
- * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
- * normal operation (see bit fields).
- *
- * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
- * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
- * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
- *
- * Bit fields:
- * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29-24: reserved
- * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
- * min "5" for 32 RBDs, max "12" for 4096 RBDs.
- * 19-18: reserved
- * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
- * '10' 12K, '11' 16K.
- * 15-14: reserved
- * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
- * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
- * typical value 0x10 (about 1/2 msec)
- * 3- 0: reserved
- */
-#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
-#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
-
-#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
-
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
-#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
-#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
-
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
-#define RX_RB_TIMEOUT (0x10)
-
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
-
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
-
-#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
-/**
- * Rx Shared Status Registers (RSSR)
- *
- * After stopping Rx DMA channel (writing 0 to
- * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
- * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
- *
- * Bit fields:
- * 24: 1 = Channel 0 is idle
- *
- * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
- * contain default values that should not be altered by the driver.
- */
-#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
-#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-
-#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
-#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
-#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
- (FH_MEM_RSSR_LOWER_BOUND + 0x008)
-
-#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
-
-/* TFDB Area - TFDs buffer table */
-#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
-#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
-#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
-#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
-#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-
-/**
- * Transmit DMA Channel Control/Status Registers (TCSR)
- *
- * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
- * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
- * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
- *
- * To use a Tx DMA channel, driver must initialize its
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
- *
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
- *
- * All other bits should be 0.
- *
- * Bit fields:
- * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29- 4: Reserved, set to "0"
- * 3: Enable internal DMA requests (1, normal operation), disable (0)
- * 2- 0: Reserved, set to "0"
- */
-#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
-
-/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH49_TCSR_CHNL_NUM (7)
-#define FH50_TCSR_CHNL_NUM (8)
-
-/* TCSR: tx_config register values */
-#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
-#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-
-/**
- * Tx Shared Status Registers (TSSR)
- *
- * After stopping Tx DMA channel (writing 0 to
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
- * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
- * (channel's buffers empty | no pending requests).
- *
- * Bit fields:
- * 31-24: 1 = Channel buffers empty (channel 7:0)
- * 23-16: 1 = No pending requests (channel 7:0)
- */
-#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
-#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
-
-#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-
-/**
- * Bit fields for TSSR(Tx Shared Status & Control) error status register:
- * 31: Indicates an address error when accessed to internal memory
- * uCode/driver must write "1" in order to clear this flag
- * 30: Indicates that Host did not send the expected number of dwords to FH
- * uCode/driver must write "1" in order to clear this flag
- * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
- * command was received from the scheduler while the TRB was already full
- * with previous command
- * uCode/driver must write "1" in order to clear this flag
- * 7-0: Each status bit indicates a channel's TxCredit error. When an error
- * bit is set, it indicates that the FH has received a full indication
- * from the RTC TxFIFO and the current value of the TxCredit counter was
- * not equal to zero. This mean that the credit mechanism was not
- * synchronized to the TxFIFO status
- * uCode/driver must write "1" in order to clear this flag
- */
-#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
-
-#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
-
-/* Tx service channels */
-#define FH_SRVC_CHNL (9)
-#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
-#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
- (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
-
-#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
-/* Instruct FH to increment the retry count of a packet when
- * it is brought from the memory to TX-FIFO
- */
-#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-/**
- * struct iwl_rb_status - reseve buffer status
- * host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
- * in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * which was transferred
- */
-struct iwl_rb_status {
- __le16 closed_rb_num;
- __le16 closed_fr_num;
- __le16 finished_rb_num;
- __le16 finished_fr_nam;
- __le32 __unused; /* 3945 only */
-} __packed;
-
-
-#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_BC_DUP (64)
-#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
-#define IWL_NUM_OF_TBS 20
-
-static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
-{
- return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
-}
-/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
- *
- * This structure contains dma address and length of transmission address
- *
- * @lo: low [31:0] portion of the dma address of TX buffer
- * every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- * 4-15 length of the tx buffer
- */
-struct iwl_tfd_tb {
- __le32 lo;
- __le16 hi_n_len;
-} __packed;
-
-/**
- * struct iwl_tfd
- *
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
- *
- * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
- * Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
- *
- * Driver must indicate the physical address of the base of each
- * circular buffer via the FH_MEM_CBBC_QUEUE registers.
- *
- * Each TFD contains pointer/size information for up to 20 data buffers
- * in host DRAM. These buffers collectively contain the (one) frame described
- * by the TFD. Each buffer must be a single contiguous block of memory within
- * itself, but buffers may be scattered in host DRAM. Each buffer has max size
- * of (4K - 4). The concatenates all of a TFD's buffers into a single
- * Tx frame, up to 8 KBytes in size.
- *
- * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
- */
-struct iwl_tfd {
- u8 __reserved1[3];
- u8 num_tbs;
- struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
- __le32 __pad;
-} __packed;
-
-/* Keep Warm Size */
-#define IWL_KW_SIZE 0x1000 /* 4k */
-
-#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9feb61f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *iwl_legacy_get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_3945_RX);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_RATE_SCALE);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- default:
- return "UNKNOWN";
-
- }
-}
-EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int
-iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int ret;
-
- BUG_ON(!(cmd->flags & CMD_ASYNC));
-
- /* An asynchronous command can not expect an SKB to be set. */
- BUG_ON(cmd->flags & CMD_WANT_SKB);
-
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_legacy_generic_cmd_callback;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EBUSY;
-
- ret = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int cmd_idx;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- BUG_ON(cmd->flags & CMD_ASYNC);
-
- /* A synchronous command can not have a callback set. */
- BUG_ON(cmd->callback);
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- goto out;
- }
-
- ret = wait_event_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: time out after %dms.\n",
- iwl_legacy_get_cmd_string(cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv,
- "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
- }
-
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- ret = 0;
- goto out;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
- ~CMD_WANT_SKB;
- }
-fail:
- if (cmd->reply_page) {
- iwl_legacy_free_pages(priv, cmd->reply_page);
- cmd->reply_page = 0;
- }
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
-
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- if (cmd->flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_async(priv, cmd);
-
- return iwl_legacy_send_cmd_sync(priv, cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd);
-
-int
-iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- return iwl_legacy_send_cmd_sync(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
-
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
- u8 id, u16 len, const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt))
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- cmd.flags |= CMD_ASYNC;
- cmd.callback = callback;
-
- return iwl_legacy_send_cmd_async(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23eaecbbb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_helpers_h__
-#define __iwl_legacy_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
- struct ieee80211_hw *hw)
-{
- return &hw->conf;
-}
-
-/**
- * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-/* TODO: Move fw_desc functions to iwl-pci.ko */
-static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(&pci_dev->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (!desc->len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
- &desc->p_addr, GFP_KERNEL);
- return (desc->v_addr != NULL) ? 0 : -ENOMEM;
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void
-iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
- BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only use 5 bits */
-
- txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (test_and_clear_bit(hwq, priv->queue_stopped))
- if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (!test_and_set_bit(hwq, priv->queue_stopped))
- if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
-{
- clear_bit(STATUS_INT_ENABLED, &priv->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv, CSR_INT, 0xffffffff);
- iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
- IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d342914f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_io_h__
-#define __iwl_legacy_io_h__
-
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-devtrace.h"
-
-/*
- * IO, register, and NIC memory access functions
- *
- * NOTE on naming convention and macro usage for these
- *
- * A single _ prefix before a an access function means that no state
- * check or debug information is printed when that function is called.
- *
- * A double __ prefix before an access function means that state is checked
- * and the current line number and caller function name are printed in addition
- * to any other debug output.
- *
- * The non-prefixed name is the #define that maps the caller into a
- * #define that provides the caller's name and __LINE__ to the double
- * prefix version.
- *
- * If you wish to call the function without any debug or state checking,
- * you should use the single _ prefix version (as is used by dependent IO
- * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
- * _iwl_legacy_read32.)
- *
- * These declarations are *extremely* useful in quickly isolating code deltas
- * which result in misconfiguration of the hardware I/O. In combination with
- * git-bisect and the IO debug level you can quickly determine the specific
- * commit which breaks the IO sequence to the hardware.
- *
- */
-
-static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
-{
- trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
- iowrite8(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u8 val)
-{
- IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write8(priv, ofs, val);
-}
-#define iwl_write8(priv, ofs, val) \
- __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
-#endif
-
-
-static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
-{
- trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
- iowrite32(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u32 val)
-{
- IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write32(priv, ofs, val);
-}
-#define iwl_write32(priv, ofs, val) \
- __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
-#endif
-
-static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
-{
- u32 val = ioread32(priv->hw_base + ofs);
- trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
- return val;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32
-__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
-{
- IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
- return _iwl_legacy_read32(priv, ofs);
-}
-#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
-#else
-#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
-#endif
-
-#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline int
-_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
- IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
- addr, bits, mask,
- unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
- return ret;
-}
-#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
- __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
- bits, mask, timeout)
-#else
-#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
-#endif
-
-static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_set_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) | mask;
- IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
- mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_set_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline void
-_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_clear_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
- IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_clear_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
-{
- int ret;
- u32 val;
-
- /* this bit wakes up the NIC */
- _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * 3945 and 4965 have volatile SRAM, and must save/restore contents
- * to/from host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP check is a
- * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
- *
- */
- ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
- val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
- IWL_ERR(priv,
- "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
- _iwl_legacy_write32(priv, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
- IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
- return _iwl_legacy_grab_nic_access(priv);
-}
-#define iwl_grab_nic_access(priv) \
- __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_grab_nic_access(priv) \
- _iwl_legacy_grab_nic_access(priv)
-#endif
-
-static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
-
- IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
- _iwl_legacy_release_nic_access(priv);
-}
-#define iwl_release_nic_access(priv) \
- __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_release_nic_access(priv) \
- _iwl_legacy_release_nic_access(priv)
-#endif
-
-static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- return _iwl_legacy_read32(priv, reg);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg)
-{
- u32 value = _iwl_legacy_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv,
- "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
- f, l);
- return value;
-}
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-#else
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = _iwl_legacy_read_direct32(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-
-}
-#endif
-
-static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
- u32 reg, u32 value)
-{
- _iwl_legacy_write32(priv, reg, value);
-}
-static inline void
-iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, reg, value);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
- u32 reg, u32 len, u32 *values)
-{
- u32 count = sizeof(u32);
-
- if ((priv != NULL) && (values != NULL)) {
- for (; 0 < len; len -= count, reg += count, values++)
- iwl_legacy_write_direct32(priv, reg, *values);
- }
-}
-
-static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
- u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
- struct iwl_priv *priv,
- u32 addr, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
-
- if (unlikely(ret == -ETIMEDOUT))
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
- "timedout - %s %d\n", addr, mask, f, l);
- else
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
- "- %s %d\n", addr, mask, ret, f, l);
- return ret;
-}
-#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
-__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
-#else
-#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
-#endif
-
-static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
- return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
-}
-static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return val;
-}
-
-static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
- u32 addr, u32 val)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
-}
-
-static inline void
-iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_prph(priv, addr, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
-_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
-
-static inline void
-iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_prph(priv, reg, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
-_iwl_legacy_write_prph(priv, reg, \
- ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
-
-static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
- u32 bits, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
- *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- _iwl_legacy_write_prph(priv, reg, (val & ~mask));
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
-{
- unsigned long reg_flags;
- u32 value;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
- rmb();
- value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-static inline void
-iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void
-iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
- u32 len, u32 *values)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- for (; 0 < len; len -= sizeof(u32), values++)
- _iwl_legacy_write_direct32(priv,
- HBUS_TARG_MEM_WDAT, *values);
-
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a474c5d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
- "1=On(RF On)/Off(RF Off), 2=blinking");
-
-/* Throughput OFF time(ms) ON time (ms)
- * >300 25 25
- * >200 to 300 40 40
- * >100 to 200 55 55
- * >70 to 100 65 65
- * >50 to 70 75 75
- * >20 to 50 85 85
- * >10 to 20 95 95
- * >5 to 10 110 110
- * >1 to 5 130 130
- * >0 to 1 167 167
- * <=0 SOLID ON
- */
-static const struct ieee80211_tpt_blink iwl_blink[] = {
- { .throughput = 0, .blink_time = 334 },
- { .throughput = 1 * 1024 - 1, .blink_time = 260 },
- { .throughput = 5 * 1024 - 1, .blink_time = 220 },
- { .throughput = 10 * 1024 - 1, .blink_time = 190 },
- { .throughput = 20 * 1024 - 1, .blink_time = 170 },
- { .throughput = 50 * 1024 - 1, .blink_time = 150 },
- { .throughput = 70 * 1024 - 1, .blink_time = 130 },
- { .throughput = 100 * 1024 - 1, .blink_time = 110 },
- { .throughput = 200 * 1024 - 1, .blink_time = 80 },
- { .throughput = 300 * 1024 - 1, .blink_time = 50 },
-};
-
-/*
- * Adjust led blink rate to compensate on a MAC Clock difference on every HW
- * Led blink rate analysis showed an average deviation of 0% on 3945,
- * 5% on 4965 HW.
- * Need to compensate on the led on/off time per HW according to the deviation
- * to achieve the desired led frequency
- * The calculation is: (100-averageDeviation)/100 * blinkTime
- * For code efficiency the calculation will be:
- * compensation = (100 - averageDeviation) * 64 / 100
- * NewBlinkTime = (compensation * BlinkTime) / 64
- */
-static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
- u8 time, u16 compensation)
-{
- if (!compensation) {
- IWL_ERR(priv, "undefined blink compensation: "
- "use pre-defined blinking time\n");
- return time;
- }
-
- return (u8)((time * compensation) >> 6);
-}
-
-/* Set led pattern command */
-static int iwl_legacy_led_cmd(struct iwl_priv *priv,
- unsigned long on,
- unsigned long off)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .interval = IWL_DEF_LED_INTRVL
- };
- int ret;
-
- if (!test_bit(STATUS_READY, &priv->status))
- return -EBUSY;
-
- if (priv->blink_on == on && priv->blink_off == off)
- return 0;
-
- if (off == 0) {
- /* led is SOLID_ON */
- on = IWL_LED_SOLID;
- }
-
- IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
- led_cmd.on = iwl_legacy_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
- led_cmd.off = iwl_legacy_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
-
- ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
- if (!ret) {
- priv->blink_on = on;
- priv->blink_off = off;
- }
- return ret;
-}
-
-static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
- unsigned long on = 0;
-
- if (brightness > 0)
- on = IWL_LED_SOLID;
-
- iwl_legacy_led_cmd(priv, on, 0);
-}
-
-static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-
- return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
-}
-
-void iwl_legacy_leds_init(struct iwl_priv *priv)
-{
- int mode = led_mode;
- int ret;
-
- if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
-
- priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
- wiphy_name(priv->hw->wiphy));
- priv->led.brightness_set = iwl_legacy_led_brightness_set;
- priv->led.blink_set = iwl_legacy_led_blink_set;
- priv->led.max_brightness = 1;
-
- switch (mode) {
- case IWL_LED_DEFAULT:
- WARN_ON(1);
- break;
- case IWL_LED_BLINK:
- priv->led.default_trigger =
- ieee80211_create_tpt_led_trigger(priv->hw,
- IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
- iwl_blink, ARRAY_SIZE(iwl_blink));
- break;
- case IWL_LED_RF_STATE:
- priv->led.default_trigger =
- ieee80211_get_radio_led_name(priv->hw);
- break;
- }
-
- ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
- if (ret) {
- kfree(priv->led.name);
- return;
- }
-
- priv->led_registered = true;
-}
-EXPORT_SYMBOL(iwl_legacy_leds_init);
-
-void iwl_legacy_leds_exit(struct iwl_priv *priv)
-{
- if (!priv->led_registered)
- return;
-
- led_classdev_unregister(&priv->led);
- kfree(priv->led.name);
-}
-EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f70f79d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_leds_h__
-#define __iwl_legacy_leds_h__
-
-
-struct iwl_priv;
-
-#define IWL_LED_SOLID 11
-#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
-
-#define IWL_LED_ACTIVITY (0<<1)
-#define IWL_LED_LINK (1<<1)
-
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
-void iwl_legacy_leds_init(struct iwl_priv *priv);
-void iwl_legacy_leds_exit(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e481eb0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_rs_h__
-#define __iwl_legacy_rs_h__
-
-struct iwl_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
-};
-
-struct iwl3945_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
- u8 table_rs_index; /* index in rate scale table cmd */
- u8 prev_table_rs; /* prev in rate table cmd */
-};
-
-
-/*
- * These serve as indexes into
- * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
- */
-enum {
- IWL_RATE_1M_INDEX = 0,
- IWL_RATE_2M_INDEX,
- IWL_RATE_5M_INDEX,
- IWL_RATE_11M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX,
- IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX,
- IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX,
- IWL_RATE_54M_INDEX,
- IWL_RATE_60M_INDEX,
- IWL_RATE_COUNT,
- IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
- IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
- IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
- IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
-enum {
- IWL_RATE_6M_INDEX_TABLE = 0,
- IWL_RATE_9M_INDEX_TABLE,
- IWL_RATE_12M_INDEX_TABLE,
- IWL_RATE_18M_INDEX_TABLE,
- IWL_RATE_24M_INDEX_TABLE,
- IWL_RATE_36M_INDEX_TABLE,
- IWL_RATE_48M_INDEX_TABLE,
- IWL_RATE_54M_INDEX_TABLE,
- IWL_RATE_1M_INDEX_TABLE,
- IWL_RATE_2M_INDEX_TABLE,
- IWL_RATE_5M_INDEX_TABLE,
- IWL_RATE_11M_INDEX_TABLE,
- IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
-};
-
-enum {
- IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
- IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
- IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
- IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-};
-
-/* #define vs. enum to keep from defaulting to 'large integer' */
-#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
-#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
-#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
-#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
-#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
-#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
-#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
-#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
-#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
-#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
-#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
-#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
-#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-
-/* uCode API values for legacy bit rates, both OFDM and CCK */
-enum {
- IWL_RATE_6M_PLCP = 13,
- IWL_RATE_9M_PLCP = 15,
- IWL_RATE_12M_PLCP = 5,
- IWL_RATE_18M_PLCP = 7,
- IWL_RATE_24M_PLCP = 9,
- IWL_RATE_36M_PLCP = 11,
- IWL_RATE_48M_PLCP = 1,
- IWL_RATE_54M_PLCP = 3,
- IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
- IWL_RATE_1M_PLCP = 10,
- IWL_RATE_2M_PLCP = 20,
- IWL_RATE_5M_PLCP = 55,
- IWL_RATE_11M_PLCP = 110,
- /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
-};
-
-/* uCode API values for OFDM high-throughput (HT) bit rates */
-enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-};
-
-/* MAC header values for bit rates */
-enum {
- IWL_RATE_6M_IEEE = 12,
- IWL_RATE_9M_IEEE = 18,
- IWL_RATE_12M_IEEE = 24,
- IWL_RATE_18M_IEEE = 36,
- IWL_RATE_24M_IEEE = 48,
- IWL_RATE_36M_IEEE = 72,
- IWL_RATE_48M_IEEE = 96,
- IWL_RATE_54M_IEEE = 108,
- IWL_RATE_60M_IEEE = 120,
- IWL_RATE_1M_IEEE = 2,
- IWL_RATE_2M_IEEE = 4,
- IWL_RATE_5M_IEEE = 11,
- IWL_RATE_11M_IEEE = 22,
-};
-
-#define IWL_CCK_BASIC_RATES_MASK \
- (IWL_RATE_1M_MASK | \
- IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK \
- (IWL_CCK_BASIC_RATES_MASK | \
- IWL_RATE_5M_MASK | \
- IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK \
- (IWL_RATE_6M_MASK | \
- IWL_RATE_12M_MASK | \
- IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_RATE_9M_MASK | \
- IWL_RATE_18M_MASK | \
- IWL_RATE_36M_MASK | \
- IWL_RATE_48M_MASK | \
- IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_CCK_BASIC_RATES_MASK)
-
-#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
-#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
-
-#define IWL_INVALID_VALUE -1
-
-#define IWL_MIN_RSSI_VAL -100
-#define IWL_MAX_RSSI_VAL 0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT 160
-#define IWL_LEGACY_SUCCESS_LIMIT 480
-#define IWL_LEGACY_TABLE_COUNT 160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
-#define IWL_NONE_LEGACY_TABLE_COUNT 1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO 12800 /* 100% */
-#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
-#define IWL_RATE_HIGH_TH 10880 /* 85% */
-#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define IWL_RATE_DECREASE_TH 1920 /* 15% */
-
-/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2_AB 3
-#define IWL_LEGACY_SWITCH_MIMO2_AC 4
-#define IWL_LEGACY_SWITCH_MIMO2_BC 5
-
-/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2_AB 2
-#define IWL_SISO_SWITCH_MIMO2_AC 3
-#define IWL_SISO_SWITCH_MIMO2_BC 4
-#define IWL_SISO_SWITCH_GI 5
-
-/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_SISO_C 4
-#define IWL_MIMO2_SWITCH_GI 5
-
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
-
-#define IWL_ACTION_LIMIT 3 /* # possible actions */
-
-#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
-
-/* load per tid defines for A-MPDU activation */
-#define IWL_AGG_TPT_THREHOLD 0
-#define IWL_AGG_LOAD_THRESHOLD 10
-#define IWL_AGG_ALL_TID 0xff
-#define TID_QUEUE_CELL_SPACING 50 /*mS */
-#define TID_QUEUE_MAX_SIZE 20
-#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
-
-extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
-
-enum iwl_table_type {
- LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
- LQ_MAX,
-};
-
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
-
-#define ANT_NONE 0x0
-#define ANT_A BIT(0)
-#define ANT_B BIT(1)
-#define ANT_AB (ANT_A | ANT_B)
-#define ANT_C BIT(2)
-#define ANT_AC (ANT_A | ANT_C)
-#define ANT_BC (ANT_B | ANT_C)
-#define ANT_ABC (ANT_AB | ANT_C)
-
-#define IWL_MAX_MCS_DISPLAY_SIZE 12
-
-struct iwl_rate_mcs_info {
- char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
- char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
- u64 data; /* bitmap of successful frames */
- s32 success_counter; /* number of frames successful */
- s32 success_ratio; /* per-cent * 128 */
- s32 counter; /* number of frames attempted */
- s32 average_tpt; /* success ratio * expected throughput */
- unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
- enum iwl_table_type lq_type;
- u8 ant_type;
- u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
- u8 is_dup; /* 1 = duplicated data streams */
- u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
- u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
- u32 current_rate; /* rate_n_flags, uCode API format */
- struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
- unsigned long time_stamp; /* age of the oldest statistics */
- u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
- * slice */
- u32 total; /* total num of packets during the
- * last TID_MAX_TIME_DIFF */
- u8 queue_count; /* number of queues that has
- * been used since the last cleanup */
- u8 head; /* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
- u8 active_tbl; /* index of active table, range 0-1 */
- u8 enable_counter; /* indicates HT mode */
- u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
- u8 search_better_tbl; /* 1: currently trying alternate mode */
- s32 last_tpt;
-
- /* The following determine when to search for a new mode */
- u32 table_count_limit;
- u32 max_failure_limit; /* # failed frames before new search */
- u32 max_success_limit; /* # successful frames before new search */
- u32 table_count;
- u32 total_failed; /* total failed frames, any/all rates */
- u32 total_success; /* total successful frames, any/all rates */
- u64 flush_timer; /* time staying in mode before new search */
-
- u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
- u8 is_dup;
- enum ieee80211_band band;
-
- /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u32 supp_rates;
- u16 active_legacy_rate;
- u16 active_siso_rate;
- u16 active_mimo2_rate;
- s8 max_rate_idx; /* Max rate set by user */
- u8 missed_rate_counter;
-
- struct iwl_link_quality_cmd lq;
- struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
- u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
- u32 dbg_fixed_rate;
-#endif
- struct iwl_priv *drv;
-
- /* used to be in sta_info */
- int last_txrate_idx;
- /* last tx rate_n_flags */
- u32 last_rate_n_flags;
- /* packets destined for this STA are aggregated */
- u8 is_agg;
-};
-
-static inline u8 iwl4965_num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
-static inline u8 iwl4965_first_antenna(u8 mask)
-{
- if (mask & ANT_A)
- return ANT_A;
- if (mask & ANT_B)
- return ANT_B;
- return ANT_C;
-}
-
-
-/**
- * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
- *
- * The specific throughput table used is based on the type of network
- * the associated with, including A, B, G, and G w/ TGG protection
- */
-extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
-
-/* Initialize station's rate scaling information after adding station */
-extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-
-/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
- *
- * Since the rate control algorithm is hardware specific, there is no need
- * or reason to place it as a stand alone module. The driver can call
- * iwl_rate_control_register in order to register the rate control callbacks
- * with the mac80211 subsystem. This should be performed prior to calling
- * ieee80211_register_hw
- *
- */
-extern int iwl4965_rate_control_register(void);
-extern int iwl3945_rate_control_register(void);
-
-/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
- *
- * This should be called after calling ieee80211_unregister_hw, but before
- * the driver is unloaded.
- */
-extern void iwl4965_rate_control_unregister(void);
-extern void iwl3945_rate_control_unregister(void);
-
-#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d6d6cb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-
-/*
- * Setting power level allows the card to go to sleep when not busy.
- *
- * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
- */
-
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct iwl_power_vec_entry {
- struct iwl_powertable_cmd cmd;
- u8 no_dtim; /* number of skip dtim */
-};
-
-static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
- struct iwl_powertable_cmd *cmd)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- if (priv->power_data.pci_pm)
- cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
- IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
-}
-
-static int
-iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
- IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
- IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
- le32_to_cpu(cmd->tx_data_timeout));
- IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(priv,
- "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
- le32_to_cpu(cmd->sleep_interval[0]),
- le32_to_cpu(cmd->sleep_interval[1]),
- le32_to_cpu(cmd->sleep_interval[2]),
- le32_to_cpu(cmd->sleep_interval[3]),
- le32_to_cpu(cmd->sleep_interval[4]));
-
- return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
- sizeof(struct iwl_powertable_cmd), cmd);
-}
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force)
-{
- int ret;
- bool update_chains;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Don't update the RX chain when chain noise calibration is running */
- update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
- priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
- if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
- return 0;
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete use sleep_power_next, need to be updated */
- memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
- IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
- return 0;
- }
-
- if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
-
- ret = iwl_legacy_set_power(priv, cmd);
- if (!ret) {
- if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
-
- if (priv->cfg->ops->lib->update_chain_flags && update_chains)
- priv->cfg->ops->lib->update_chain_flags(priv);
- else if (priv->cfg->ops->lib->update_chain_flags)
- IWL_DEBUG_POWER(priv,
- "Cannot update the power, chain noise "
- "calibration running: %d\n",
- priv->chain_noise_data.state);
-
- memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
- } else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
-
- return ret;
-}
-
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
-{
- struct iwl_powertable_cmd cmd;
-
- iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
- return iwl_legacy_power_set_mode(priv, &cmd, force);
-}
-EXPORT_SYMBOL(iwl_legacy_power_update_mode);
-
-/* initialize to default */
-void iwl_legacy_power_initialize(struct iwl_priv *priv)
-{
- u16 lctl = iwl_legacy_pcie_link_ctl(priv);
-
- priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-
- priv->power_data.debug_sleep_level_override = -1;
-
- memset(&priv->power_data.sleep_cmd, 0,
- sizeof(priv->power_data.sleep_cmd));
-}
-EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36acdc4a..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_legacy_power_setting_h__
-#define __iwl_legacy_power_setting_h__
-
-#include "iwl-commands.h"
-
-enum iwl_power_level {
- IWL_POWER_INDEX_1,
- IWL_POWER_INDEX_2,
- IWL_POWER_INDEX_3,
- IWL_POWER_INDEX_4,
- IWL_POWER_INDEX_5,
- IWL_POWER_NUM
-};
-
-struct iwl_power_mgr {
- struct iwl_powertable_cmd sleep_cmd;
- struct iwl_powertable_cmd sleep_cmd_next;
- int debug_sleep_level_override;
- bool pci_pm;
-};
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force);
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
-void iwl_legacy_power_initialize(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index f4d21ec22497..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_legacy_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
-
-/**
- * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void
-iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
- }
-
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
-
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- rxq->need_update = 0;
- return 0;
-
-err_rb:
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
-err_bd:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
-
-
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-
-/*
- * returns non-zero if packet should be dropped
- */
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats)
-{
- u16 fc = le16_to_cpu(hdr->frame_control);
-
- /*
- * All contexts have the same setting here due to it being
- * a module parameter, so OK to check any context.
- */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
- RXON_FILTER_DIS_DECRYPT_MSK)
- return 0;
-
- if (!(fc & IEEE80211_FCTL_PROTECTED))
- return 0;
-
- IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
- switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- /* The uCode has got a bad phase 1 Key, pushes the packet.
- * Decryption will be done in SW. */
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_KEY_TTAK)
- break;
-
- case RX_RES_STATUS_SEC_TYPE_WEP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_ICV_MIC) {
- /* bad ICV, the packet is destroyed since the
- * decryption is inplace, drop it */
- IWL_DEBUG_RX(priv, "Packet destroyed\n");
- return -1;
- }
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_DECRYPT_OK) {
- IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
- stats->flag |= RX_FLAG_DECRYPTED;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index 521b73b527d3..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
- * sending probe req. This should be set long enough to hear probe responses
- * from more than one AP. */
-#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52 (20)
-
-#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
-#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
-
-/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
- * Must be set longer than active dwell time.
- * For the most reliable scan, set > AP beacon interval (typically 100msec). */
-#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
-#define IWL_PASSIVE_DWELL_TIME_52 (10)
-#define IWL_PASSIVE_DWELL_BASE (100)
-#define IWL_CHANNEL_TUNE_TIME 5
-
-static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
-{
- int ret;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_WANT_SKB,
- };
-
- /* Exit instantly with error when device is not ready
- * to receive scan abort command or it does not perform
- * hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->status) ||
- test_bit(STATUS_FW_ERROR, &priv->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EIO;
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->u.status != CAN_ABORT_STATUS) {
- /* The scan abort will return 1 for success or
- * 2 for "failure". A failure condition can be
- * due to simply not being in an active scan which
- * can occur if we send the scan abort before we
- * the microcode has notified us that a scan is
- * completed. */
- IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
- ret = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
- return ret;
-}
-
-static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
-{
- /* check if scan was requested from mac80211 */
- if (priv->scan_request) {
- IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
- ieee80211_scan_completed(priv->hw, aborted);
- }
-
- priv->scan_vif = NULL;
- priv->scan_request = NULL;
-}
-
-void iwl_legacy_force_scan_end(struct iwl_priv *priv)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
- return;
- }
-
- IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->status);
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- iwl_legacy_complete_scan(priv, true);
-}
-
-static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
- return;
- }
-
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
- return;
- }
-
- ret = iwl_legacy_send_scan_abort(priv);
- if (ret) {
- IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
- iwl_legacy_force_scan_end(priv);
- } else
- IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
-}
-
-/**
- * iwl_scan_cancel - Cancel any currently executing HW scan
- */
-int iwl_legacy_scan_cancel(struct iwl_priv *priv)
-{
- IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->workqueue, &priv->abort_scan);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel);
-
-/**
- * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
- * @ms: amount of time to wait (in milliseconds) for scan to abort
- *
- */
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(ms);
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
-
- iwl_legacy_do_scan_abort(priv);
-
- while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->status))
- break;
- msleep(20);
- }
-
- return test_bit(STATUS_SCAN_HW, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
-
-/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanreq_notification *notif =
- (struct iwl_scanreq_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
-#endif
-}
-
-/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanstart_notification *notif =
- (struct iwl_scanstart_notification *)pkt->u.raw;
- priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
- IWL_DEBUG_SCAN(priv, "Scan start: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- notif->status, notif->beacon_timer);
-}
-
-/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanresults_notification *notif =
- (struct iwl_scanresults_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan ch.res: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
-#endif
-}
-
-/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
-
- IWL_DEBUG_SCAN(priv,
- "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
- scan_notif->scanned_channels,
- scan_notif->tsf_low,
- scan_notif->tsf_high, scan_notif->status);
-
- /* The HW is no longer scanning */
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
- (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
- jiffies_to_msecs(jiffies - priv->scan_start));
-
- queue_work(priv->workqueue, &priv->scan_completed);
-}
-
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
-{
- /* scan handlers */
- priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
- priv->rx_handlers[SCAN_START_NOTIFICATION] =
- iwl_legacy_rx_scan_start_notif;
- priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
- iwl_legacy_rx_scan_results_notif;
- priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
- iwl_legacy_rx_scan_complete_notif;
-}
-EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
-
-inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes)
-{
- if (band == IEEE80211_BAND_5GHZ)
- return IWL_ACTIVE_DWELL_TIME_52 +
- IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
- else
- return IWL_ACTIVE_DWELL_TIME_24 +
- IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
-}
-EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
-
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx;
- u16 passive = (band == IEEE80211_BAND_2GHZ) ?
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
-
- if (iwl_legacy_is_any_associated(priv)) {
- /*
- * If we're associated, we clamp the maximum passive
- * dwell time to be 98% of the smallest beacon interval
- * (minus 2 * channel tune time)
- */
- for_each_context(priv, ctx) {
- u16 value;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- continue;
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
- if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- passive = min(value, passive);
- }
- }
-
- return passive;
-}
-EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
-
-void iwl_legacy_init_scan_params(struct iwl_priv *priv)
-{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
- if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
-}
-EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-
-static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (WARN_ON(!priv->cfg->ops->utils->request_scan))
- return -EOPNOTSUPP;
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Request scan called when driver not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_SCAN(priv,
- "Multiple concurrent scan requests in parallel.\n");
- return -EBUSY;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
- return -EBUSY;
- }
-
- IWL_DEBUG_SCAN(priv, "Starting scan...\n");
-
- set_bit(STATUS_SCANNING, &priv->status);
- priv->scan_start = jiffies;
-
- ret = priv->cfg->ops->utils->request_scan(priv, vif);
- if (ret) {
- clear_bit(STATUS_SCANNING, &priv->status);
- return ret;
- }
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- return 0;
-}
-
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- /* mac80211 will only ask for one band at a time */
- priv->scan_request = req;
- priv->scan_vif = vif;
- priv->scan_band = req->channels[0]->band;
-
- ret = iwl_legacy_scan_initiate(priv, vif);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out_unlock:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-
-static void iwl_legacy_bg_scan_check(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, scan_check.work);
-
- IWL_DEBUG_SCAN(priv, "Scan check work\n");
-
- /* Since we are here firmware does not finish scan and
- * most likely is in bad shape, so we don't bother to
- * send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16
-iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ies, int ie_len, int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* ...next IE... */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our indirect SSID IE */
- left -= 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
-
- if (WARN_ON(left < ie_len))
- return len;
-
- if (ies && ie_len) {
- memcpy(pos, ies, ie_len);
- len += ie_len;
- }
-
- return (u16)len;
-}
-EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
-
-static void iwl_legacy_bg_abort_scan(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
-
- IWL_DEBUG_SCAN(priv, "Abort scan work\n");
-
- /* We keep scan_check work queued in case when firmware will not
- * report back scan completed notification */
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl_legacy_bg_scan_completed(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, scan_completed);
- bool aborted;
-
- IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
- cancel_delayed_work(&priv->scan_check);
-
- mutex_lock(&priv->mutex);
-
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- if (aborted)
- IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
- goto out_settings;
- }
-
- iwl_legacy_complete_scan(priv, aborted);
-
-out_settings:
- /* Can we still talk to firmware ? */
- if (!iwl_legacy_is_ready_rf(priv))
- goto out;
-
- /*
- * We do not commit power settings while scan is pending,
- * do it now if the settings changed.
- */
- iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-
- priv->cfg->ops->utils->post_scan(priv);
-
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
-{
- INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
- INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
- INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
-
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->abort_scan);
- cancel_work_sync(&priv->scan_completed);
-
- if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a4723103..85fe48e520f9 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
*
*****************************************************************************/
-#ifndef __iwl_legacy_spectrum_h__
-#define __iwl_legacy_spectrum_h__
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
enum { /* ieee80211_basic_report.map */
IEEE80211_BASIC_MAP_BSS = (1 << 0),
IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
deleted file mode 100644
index f10df3e2813a..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/lockdep.h>
-#include <linux/export.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-
-/* priv->sta_lock must be held */
-static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
-{
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv,
- "ACTIVATE a non DRIVER active station id %u addr %pM\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_ASSOC(priv,
- "STA id %u addr %pM already present"
- " in uCode (according to driver)\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- } else {
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- }
-}
-
-static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
- bool sync)
-{
- u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
- int ret = -EIO;
-
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_legacy_sta_ucode_activate(priv, sta_id);
- ret = 0;
- break;
- case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
- break;
- case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv,
- "Adding station %d failed, no block ack resource.\n",
- sta_id);
- break;
- case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
- break;
- default:
- IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
- break;
- }
-
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_legacy_addsta_cmd *addsta =
- (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
-
- iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
-
-}
-
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags)
-{
- struct iwl_rx_packet *pkt = NULL;
- int ret = 0;
- u8 data[sizeof(*sta)];
- struct iwl_host_cmd cmd = {
- .id = REPLY_ADD_STA,
- .flags = flags,
- .data = data,
- };
- u8 sta_id __maybe_unused = sta->sta.sta_id;
-
- IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
- sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
-
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_legacy_add_sta_callback;
- else {
- cmd.flags |= CMD_WANT_SKB;
- might_sleep();
- }
-
- cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
- ret = iwl_legacy_send_cmd(priv, &cmd);
-
- if (ret || (flags & CMD_ASYNC))
- return ret;
-
- if (ret == 0) {
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
- }
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_add_sta);
-
-static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
- u8 mimo_ps_mode;
-
- if (!sta || !sta_ht_inf->ht_supported)
- goto done;
-
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
- "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
- "dynamic" : "disabled");
-
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
- break;
- }
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
-
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
-}
-
-/**
- * iwl_legacy_prep_station - Prepare station information for addition
- *
- * should be called with sta_lock held
- */
-u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
-{
- struct iwl_station_entry *station;
- int i;
- u8 sta_id = IWL_INVALID_STATION;
- u16 rate;
-
- if (is_ap)
- sta_id = ctx->ap_sta_id;
- else if (is_broadcast_ether_addr(addr))
- sta_id = ctx->bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
-
- if (!priv->stations[i].used &&
- sta_id == IWL_INVALID_STATION)
- sta_id = i;
- }
-
- /*
- * These two conditions have the same outcome, but keep them
- * separate
- */
- if (unlikely(sta_id == IWL_INVALID_STATION))
- return sta_id;
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
- "STA %d already in process of being added.\n",
- sta_id);
- return sta_id;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
- !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- IWL_DEBUG_ASSOC(priv,
- "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- return sta_id;
- }
-
- station = &priv->stations[sta_id];
- station->used = IWL_STA_DRIVER_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
- sta_id, addr);
- priv->num_stations++;
-
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
- memcpy(station->sta.sta.addr, addr, ETH_ALEN);
- station->sta.mode = 0;
- station->sta.sta.sta_id = sta_id;
- station->sta.station_flags = ctx->station_flags;
- station->ctxid = ctx->ctxid;
-
- if (sta) {
- struct iwl_station_priv_common *sta_priv;
-
- sta_priv = (void *)sta->drv_priv;
- sta_priv->ctx = ctx;
- }
-
- /*
- * OK to call unconditionally, since local stations (IBSS BSSID
- * STA and broadcast STA) pass in a NULL sta, and mac80211
- * doesn't allow HT IBSS.
- */
- iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
-
- /* 3945 only */
- rate = (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
- /* Turn on both antennas for the station... */
- station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
-
- return sta_id;
-
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
-
-#define STA_WAIT_TIMEOUT (HZ/2)
-
-/**
- * iwl_legacy_add_station_common -
- */
-int
-iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r)
-{
- unsigned long flags_spin;
- int ret = 0;
- u8 sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- *sta_id_r = 0;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
- addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
- "STA %d already in process of being added.\n",
- sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv,
- "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- /* Add station to device's station table */
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[sta_id].sta.sta.addr);
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- *sta_id_r = sta_id;
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_add_station_common);
-
-/**
- * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->sta_lock must be held
- */
-static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
-{
- /* Ucode must be active and driver must be non active */
- if ((priv->stations[sta_id].used &
- (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
- IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %u\n", sta_id);
-
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
-
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
-}
-
-static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
- const u8 *addr, int sta_id,
- bool temporary)
-{
- struct iwl_rx_packet *pkt;
- int ret;
-
- unsigned long flags_spin;
- struct iwl_rem_sta_cmd rm_sta_cmd;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_REMOVE_STA,
- .len = sizeof(struct iwl_rem_sta_cmd),
- .flags = CMD_SYNC,
- .data = &rm_sta_cmd,
- };
-
- memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
- rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
-
- cmd.flags |= CMD_WANT_SKB;
-
- ret = iwl_legacy_send_cmd(priv, &cmd);
-
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
- if (!ret) {
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_legacy_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock,
- flags_spin);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
- }
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-/**
- * iwl_legacy_remove_station - Remove driver's knowledge of station.
- */
-int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr)
-{
- unsigned long flags;
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Unable to remove station %pM, device not ready.\n",
- addr);
- /*
- * It is typical for stations to be removed when we are
- * going down. Return success since device will be down
- * soon anyway
- */
- return 0;
- }
-
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
-
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
- addr);
- goto out_err;
- }
-
- if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
- addr);
- goto out_err;
- }
-
- if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
- kfree(priv->stations[sta_id].lq);
- priv->stations[sta_id].lq = NULL;
- }
-
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-
- priv->num_stations--;
-
- BUG_ON(priv->num_stations < 0);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
-out_err:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
-
-/**
- * iwl_legacy_clear_ucode_stations - clear ucode station table bits
- *
- * This function clears all the bits in the driver indicating
- * which stations are active in the ucode. Call when something
- * other than explicit station management would cause this in
- * the ucode, e.g. unassociated RXON.
- */
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int i;
- unsigned long flags_spin;
- bool cleared = false;
-
- IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != priv->stations[i].ctxid)
- continue;
-
- if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_INFO(priv,
- "Clearing ucode active for station %d\n", i);
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- cleared = true;
- }
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- if (!cleared)
- IWL_DEBUG_INFO(priv,
- "No active stations found to be cleared\n");
-}
-EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
-
-/**
- * iwl_legacy_restore_stations() - Restore driver known stations to device
- *
- * All stations considered active by driver, but not present in ucode, is
- * restored.
- *
- * Function sleeps.
- */
-void
-iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
- int i;
- bool found = false;
- int ret;
- bool send_lq;
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Not ready yet, not restoring any stations.\n");
- return;
- }
-
- IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx->ctxid != priv->stations[i].ctxid)
- continue;
- if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].sta.mode = 0;
- priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
- found = true;
- }
- }
-
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
- memcpy(&sta_cmd, &priv->stations[i].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- send_lq = false;
- if (priv->stations[i].lq) {
- memcpy(&lq, priv->stations[i].lq,
- sizeof(struct iwl_link_quality_cmd));
- send_lq = true;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].used &=
- ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[i].used &=
- ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock,
- flags_spin);
- }
- /*
- * Rate scaling has already been initialized, send
- * current LQ command
- */
- if (send_lq)
- iwl_legacy_send_lq_cmd(priv, ctx, &lq,
- CMD_SYNC, true);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- }
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- if (!found)
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
- " .... no stations to be restored.\n");
- else
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
- " .... complete.\n");
-}
-EXPORT_SYMBOL(iwl_legacy_restore_stations);
-
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->sta_key_max_num; i++)
- if (!test_and_set_bit(i, &priv->ucode_key_table))
- return i;
-
- return WEP_INVALID_OFFSET;
-}
-EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
-
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (!(priv->stations[i].used & IWL_STA_BCAST))
- continue;
-
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- priv->num_stations--;
- BUG_ON(priv->num_stations < 0);
- kfree(priv->stations[i].lq);
- priv->stations[i].lq = NULL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
- IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
- IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
- lq->general_params.single_stream_ant_msk,
- lq->general_params.dual_stream_ant_msk);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
- i, lq->rs_table[i].rate_n_flags);
-}
-#else
-static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
-}
-#endif
-
-/**
- * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
- *
- * It sometimes happens when a HT rate has been in use and we
- * loose connectivity with AP then mac80211 will first tell us that the
- * current channel is not HT anymore before removing the station. In such a
- * scenario the RXON flags will be updated to indicate we are not
- * communicating HT anymore, but the LQ command may still contain HT rates.
- * Test for this to prevent driver from sending LQ command between the time
- * RXON flags are updated and when LQ command is updated.
- */
-static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
-
- if (ctx->ht.enabled)
- return true;
-
- IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
- ctx->active.channel);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
- RATE_MCS_HT_MSK) {
- IWL_DEBUG_INFO(priv,
- "index %d of LQ expects HT channel\n",
- i);
- return false;
- }
- }
- return true;
-}
-
-/**
- * iwl_legacy_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- * after station has been added.
- *
- * The link quality command is sent as the last step of station creation.
- * This is the special case in which init is set and we call a callback in
- * this case to clear the state indicating that station creation is in
- * progress.
- */
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init)
-{
- int ret = 0;
- unsigned long flags_spin;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
- .flags = flags,
- .data = lq,
- };
-
- if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- iwl_legacy_dump_lq_cmd(priv, lq);
- BUG_ON(init && (cmd.flags & CMD_ASYNC));
-
- if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
- ret = iwl_legacy_send_cmd(priv, &cmd);
- else
- ret = -EINVAL;
-
- if (cmd.flags & CMD_ASYNC)
- return ret;
-
- if (init) {
- IWL_DEBUG_INFO(priv, "init LQ command complete,"
- " clearing sta addition status for sta %d\n",
- lq->sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
-
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75fe01a1..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_sta_h__
-#define __iwl_legacy_sta_h__
-
-#include "iwl-dev.h"
-
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
- being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
- (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_legacy_restore_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags);
-int iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_legacy_remove_station(struct iwl_priv *priv,
- const u8 sta_id,
- const u8 *addr);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-u8 iwl_legacy_prep_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta);
-
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq,
- u8 flags, bool init);
-
-/**
- * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
-{
- if (WARN_ON(!sta))
- return IWL_INVALID_STATION;
-
- return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index c0dfb1a4e968..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/**
- * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
- */
-void
-iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- u32 reg = 0;
- int txq_id = txq->q.id;
-
- if (txq->need_update == 0)
- return;
-
- /* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
- /*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- } else
- iwl_write32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- txq->need_update = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
-
-/**
- * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (q->n_bd == 0)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
-
-/**
- * iwl_legacy_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- int i;
-
- if (q->n_bd == 0)
- return;
-
- while (q->read_ptr != q->write_ptr) {
- i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
-
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-
- i = q->n_window;
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
-
-/**
- * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_cmd_queue_unmap(priv);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i <= TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
- txq->tfds, txq->q.dma_addr);
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- * See more detailed info in iwl-4965-hw.h.
- ***************************************************/
-
-int iwl_legacy_queue_space(const struct iwl_queue *q)
-{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_queue_space);
-
-
-/**
- * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id)
-{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
-
- /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
- * and iwl_legacy_queue_dec_wrap are broken. */
- BUG_ON(!is_power_of_2(count));
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_legacy_get_cmd_index is broken. */
- BUG_ON(!is_power_of_2(slots_num));
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = q->read_ptr = 0;
-
- return 0;
-}
-
-/**
- * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
- struct iwl_tx_queue *txq, u32 id)
-{
- struct device *dev = &priv->pci_dev->dev;
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = id;
-
- return 0;
-
- error:
- kfree(txq->txb);
- txq->txb = NULL;
-
- return -ENOMEM;
-}
-
-/**
- * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int i, len;
- int ret;
- int actual_slots = slots_num;
-
- /*
- * Alloc buffer array for commands (Tx or other types of commands).
- * For the command queue (#4/#9), allocate command space + one big
- * command for scan, since scan command is very huge; the system will
- * not have two scans at the same time, so only one is needed.
- * For normal Tx queues (all other queues), no super-size command
- * space is needed.
- */
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
- GFP_KERNEL);
-
- if (!txq->meta || !txq->cmd)
- goto out_free_arrays;
-
- len = sizeof(struct iwl_device_cmd);
- for (i = 0; i < actual_slots; i++) {
- /* only happens for cmd queue */
- if (i == slots_num)
- len = IWL_MAX_CMD_SIZE;
-
- txq->cmd[i] = kmalloc(len, GFP_KERNEL);
- if (!txq->cmd[i])
- goto err;
- }
-
- /* Alloc driver data array and TFD circular buffer */
- ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
- if (ret)
- goto err;
-
- txq->need_update = 0;
-
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_legacy_set_swq_id(txq, txq_id, txq_id);
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-
- return 0;
-err:
- for (i = 0; i < actual_slots; i++)
- kfree(txq->cmd[i]);
-out_free_arrays:
- kfree(txq->meta);
- kfree(txq->cmd);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
-
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int actual_slots = slots_num;
-
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
-
- txq->need_update = 0;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/**
- * iwl_legacy_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- unsigned long flags;
- int len;
- u32 idx;
- u16 fix_size;
-
- cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
- fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
-
- /* If any of the command structures end up being larger than
- * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
- * we will need to increase the size of the TFD entries
- * Also, check to see if command buffer should not exceed the size
- * of device_cmd and max_cmd_size. */
- BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
- !(cmd->flags & CMD_SIZE_HUGE));
- BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
-
- if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
- IWL_WARN(priv, "Not sending command - %s KILL\n",
- iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
- IWL_ERR(priv, "Restarting adapter due to command queue full\n");
- queue_work(priv->workqueue, &priv->restart);
- return -ENOSPC;
- }
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
-
- if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return -ENOSPC;
- }
-
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
- out_meta->flags = cmd->flags | CMD_MAPPED;
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
- if (cmd->flags & CMD_ASYNC)
- out_meta->callback = cmd->callback;
-
- out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
-
- /* At this point, the out_cmd now has all of the incoming cmd
- * information */
-
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
- if (cmd->flags & CMD_SIZE_HUGE)
- out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct iwl_device_cmd);
- if (idx == TFD_CMD_SLOTS)
- len = IWL_MAX_CMD_SIZE;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (out_cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv,
- "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- break;
- default:
- IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- }
-#endif
- txq->need_update = 1;
-
- if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
-
- phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- fix_size, PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, fix_size);
-
- trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
- fix_size, cmd->flags);
-
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, fix_size, 1,
- U32_PAD(cmd->len));
-
- /* Increment and update queue's write index */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return idx;
-}
-
-/**
- * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
- int idx, int cmd_idx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- idx, q->n_bd, q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
- queue_work(priv->workqueue, &priv->restart);
- }
-
- }
-}
-
-/**
- * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed. The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void
-iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index;
- bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
- struct iwl_device_cmd *cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- unsigned long flags;
-
- /* If a Tx command is being handled and it isn't in the actual
- * command queue then there a command routing bug has been introduced
- * in the queue management code. */
- if (WARN(txq_id != priv->cmd_queue,
- "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, priv->cmd_queue, sequence,
- priv->txq[priv->cmd_queue].q.read_ptr,
- priv->txq[priv->cmd_queue].q.write_ptr)) {
- iwl_print_hex_error(priv, pkt, 32);
- return;
- }
-
- cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
- cmd = txq->cmd[cmd_index];
- meta = &txq->meta[cmd_index];
-
- txq->time_stamp = jiffies;
-
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(meta, mapping),
- dma_unmap_len(meta, len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Input error checking is done when commands are added to queue. */
- if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
- rxb->page = NULL;
- } else if (meta->callback)
- meta->callback(priv, cmd, pkt);
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
-
- if (!(meta->flags & CMD_ASYNC)) {
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd));
- wake_up(&priv->wait_command_queue);
- }
-
- /* Mark as unmapped */
- meta->flags = 0;
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d869a546..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION \
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
- .sw_crypto = 1,
- .restart_fw = 1,
- .disable_hw_scan = 1,
- /* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN - Force MAIN antenna
- * IWL_ANTENNA_AUX - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- switch (iwl3945_mod_params.antenna) {
- case IWL_ANTENNA_DIVERSITY:
- return 0;
-
- case IWL_ANTENNA_MAIN:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
- case IWL_ANTENNA_AUX:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- }
-
- /* bad antenna selector value */
- IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
- iwl3945_mod_params.antenna);
-
- return 0; /* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- int ret;
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = keyconf->keyidx;
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
- ret = iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret = 0;
-
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
- int ret = -EOPNOTSUPP;
-
- return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *key)
-{
- if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104)
- return -EOPNOTSUPP;
-
- IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
- return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl3945_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
-
- if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- unsigned int frame_size;
- int rc;
- u8 rate;
-
- frame = iwl3945_get_free_frame(priv);
-
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- rate = iwl_legacy_get_lowest_plcp(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl3945_free_frame(priv, frame);
-
- return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
- if (priv->_3945.shared_virt)
- dma_free_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- priv->_3945.shared_virt,
- priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_device_cmd *cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
- tx_cmd->sec_ctl = 0;
-
- switch (keyinfo->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
- (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
- memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", info->control.hw_key->hw_key_idx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
- break;
- }
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr, u8 std_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- __le32 tx_flags = tx_cmd->tx_flags;
- __le16 fc = hdr->frame_control;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl3945_tx_cmd *tx_cmd;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_queue *q = NULL;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- int txq_id = skb_get_queue_mapping(skb);
- u16 len, idx, hdr_len;
- u8 id;
- u8 unicast;
- u8 sta_id;
- u8 tid = 0;
- __le16 fc;
- u8 wait_write_ptr = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
- IWL_ERR(priv, "ERROR: No TX rate available.\n");
- goto drop_unlock;
- }
-
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS],
- info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop;
- }
-
- IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop;
- }
-
- /* Descriptor for chosen Tx queue */
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if ((iwl_legacy_queue_space(q) < q->high_mark))
- goto drop;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /* Init first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
- tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(*tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- if (info->control.hw_key)
- iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
- iwl_legacy_update_stats(priv, true, fc, len);
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl3945_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- len = (len + 3) & ~3;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, U32_PAD(len));
- }
-
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark)
- && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- iwl_legacy_stop_queue(priv, txq);
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
-drop:
- return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl_spectrum_cmd spectrum;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- add_time = iwl_legacy_usecs_to_beacons(priv,
- le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
- le16_to_cpu(ctx->timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- spectrum.start_time =
- iwl_legacy_add_beacon_time(priv,
- priv->_3945.last_beacon_time, add_time,
- le16_to_cpu(ctx->timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (pkt->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
- pkt->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- iwl3945_disable_events(priv);
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
- IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = beacon->beacon_notify_hdr.rate;
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
- priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
- priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
- /* Set up hardware specific Rx handlers */
- iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl3945_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if ((rxq->write_actual != (rxq->write & ~0x7))
- || (abs(rxq->write - rxq->read) > 7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- break;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
- struct iwl_priv *priv = data;
- unsigned long flags;
-
- iwl3945_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl3945_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
- iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/* 0 1 2 3 4 5 6 7 8 9 */
- 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
- 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
- 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
- 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
- 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
- 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
- 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
- 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
- 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
- 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- * (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
- /* 1000:1 or higher just report as 60 dB */
- if (sig_ratio >= 1000)
- return 60;
-
- /* 100:1 or higher, divide by 10 and use table,
- * add 20 dB to make up for divide by 10 */
- if (sig_ratio >= 100)
- return 20 + (int)ratio2dB[sig_ratio/10];
-
- /* We shouldn't see this */
- if (sig_ratio < 1)
- return 0;
-
- /* Use table for ratios 1:1 - 99:1 */
- return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty = 0;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl3945_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode won't assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl3945_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl3945_rx_replenish_now(priv);
- else
- iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *iwl3945_desc_lookup(int i)
-{
- switch (i) {
- case 1:
- return "FAIL";
- case 2:
- return "BAD_PARAM";
- case 3:
- return "BAD_CHECKSUM";
- case 4:
- return "NMI_INTERRUPT";
- case 5:
- return "SYSASSERT";
- case 6:
- return "FATAL_ERROR";
- }
-
- return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 i;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
-
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
- return;
- }
-
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- IWL_ERR(priv, "Desc Time asrtPC blink2 "
- "ilink1 nmiPC Line\n");
- for (i = ERROR_START_OFFSET;
- i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
- i += ERROR_ELEM_SIZE) {
- desc = iwl_legacy_read_targ_mem(priv, base + i);
- time =
- iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
- blink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
- blink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
- ilink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
- ilink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
- data1 =
- iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
- IWL_ERR(priv,
- "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
- iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
- ilink1, ilink2, data1);
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
- 0, blink1, blink2, ilink1, ilink2);
- }
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR39_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR39_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- "Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
-
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl3945_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "Tx interrupt\n");
- priv->isr_stats.tx++;
-
- iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
- (FH39_SRVC_CHNL), 0x0);
- handled |= CSR_INT_BIT_FH_TX;
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~priv->inta_mask) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch,
- struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- scan_ch->channel = chan->hw_value;
-
- ch_info = iwl_legacy_get_channel_info(priv, band,
- scan_ch->channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- scan_ch->channel);
- continue;
- }
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* If passive , set up for auto-switch
- * and use long active_dwell time.
- */
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
- scan_ch->type = 0; /* passive */
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
- } else {
- scan_ch->type = 1; /* active */
- }
-
- /* Set direct probe bits. These may be used both for active
- * scan channels (probes gets sent right away),
- * or for passive channels (probes get se sent only after
- * hearing clear Rx packet).*/
- if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- if (n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- } else {
- /* uCode v1 does not allow setting direct probe bits on
- * passive channel. */
- if ((scan_ch->type & 1) && n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- }
-
- /* Set txpower levels to defaults */
- scan_ch->tpc.dsp_atten = 110;
- /* scan_pwr_info->tpc.dsp_atten; */
-
- /*scan_pwr_info->tpc.tx_gain; */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else {
- scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- }
-
- IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
- scan_ch->channel,
- (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
- (scan_ch->type & 1) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwl3945_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- u32 save_len = len;
- int rc = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL39_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- rc = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int rc = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL39_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- i, val, *image);
-#endif
- rc = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int rc = 0;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_full(priv, image, len);
-
- return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{ \
- return le32_to_cpu(ucode->v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
- return (u8 *) ucode->v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
- const struct iwl_ucode_header *ucode;
- int ret = -EINVAL, index;
- const struct firmware *ucode_raw;
- /* firmware file name contains uCode/driver compatibility version */
- const char *name_pre = priv->cfg->fw_name_pre;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- char buf[25];
- u8 *src;
- size_t len;
- u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
- /* Ask kernel firmware_class module to get the boot firmware off disk.
- * request_firmware() is synchronous, file is in memory on return. */
- for (index = api_max; index >= api_min; index--) {
- sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
- ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
- if (ret < 0) {
- IWL_ERR(priv, "%s firmware file req failed: %d\n",
- buf, ret);
- if (ret == -ENOENT)
- continue;
- else
- goto error;
- } else {
- if (index < api_max)
- IWL_ERR(priv, "Loaded firmware %s, "
- "which is deprecated. "
- " Please use API v%u instead.\n",
- buf, api_max);
- IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
- "(%zd bytes) from disk\n",
- buf, ucode_raw->size);
- break;
- }
- }
-
- if (ret < 0)
- goto error;
-
- /* Make sure that we got at least our header! */
- if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
- IWL_ERR(priv, "File size way too small!\n");
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = iwl3945_ucode_get_inst_size(ucode);
- data_size = iwl3945_ucode_get_data_size(ucode);
- init_size = iwl3945_ucode_get_init_size(ucode);
- init_data_size = iwl3945_ucode_get_init_data_size(ucode);
- boot_size = iwl3945_ucode_get_boot_size(ucode);
- src = iwl3945_ucode_get_data(ucode);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
-
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv, "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- priv->ucode_ver = 0;
- ret = -EINVAL;
- goto err_release;
- }
- if (api_ver != api_max)
- IWL_ERR(priv, "Firmware has old API version. Expected %u, "
- "got %u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
-
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %zd does not match expected size\n",
- ucode_raw->size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Verify that uCode images will fit in card's SRAM */
- if (inst_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- if (data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init instr len %d too large to fit in\n",
- init_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init data len %d too large to fit in\n",
- init_data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (boot_size > IWL39_MAX_BSM_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode boot instr len %d too large to fit in\n",
- boot_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode instr len %zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl3945_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode data len %zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %zd\n", len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
- }
-
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %zd\n", len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
- }
-
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) boot instr len %zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- return 0;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- ret = -ENOMEM;
- iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
- release_firmware(ucode_raw);
-
- error:
- return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
-
- /* bits 31:0 for 3945 */
- pinst = priv->ucode_code.p_addr;
- pdata = priv->ucode_data_backup.p_addr;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl3945_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
- int thermal_spin = 0;
- u32 rfkill;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
- IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
- if (rfkill & 0x1) {
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- /* if RFKILL is not on, then wait for thermal
- * sensor in adapter to kick in */
- while (iwl3945_hw_get_temperature(priv) == 0) {
- thermal_spin++;
- udelay(10);
- }
-
- if (thermal_spin)
- IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
- thermal_spin * 10);
- } else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- /* After the ALIVE response, we can send commands to 3945 uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK_3945;
-
- iwl_legacy_power_update_mode(priv, true);
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- struct iwl3945_rxon_cmd *active_rxon =
- (struct iwl3945_rxon_cmd *)(&ctx->active);
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- /* Initialize our rx_config data */
- iwl_legacy_connection_init_rx_config(priv, ctx);
- }
-
- /* Configure Bluetooth device coexistence support */
- iwl_legacy_send_bt_config(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl3945_commit_rxon(priv, ctx);
-
- iwl3945_reg_txpower_periodic(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- /* Station information will now be cleared in device */
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl3945_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl3945_init() then
- * clear all bits but the RF Kill bits and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl3945_hw_txq_ctx_stop(priv);
- iwl3945_hw_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl3945_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx,
- iwlegacy_bcast_addr, false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
- int rc, i;
-
- rc = iwl3945_alloc_bcast_station(priv);
- if (rc)
- return rc;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bring up\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else {
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- rc = iwl3945_hw_nic_init(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to int nic\n");
- return rc;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- /* We return success when we resume from suspend and rf_kill is on. */
- if (test_bit(STATUS_RF_KILL_HW, &priv->status))
- return 0;
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- rc = priv->cfg->ops->lib->load_ucode(priv);
-
- if (rc) {
- IWL_ERR(priv,
- "Unable to set up bootstrap uCode: %d\n", rc);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl3945_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl3945_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change. This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
- bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
- bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
- & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
- if (new_rfkill != old_rfkill) {
- if (new_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
- IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
- new_rfkill ? "disable radio" : "enable radio");
- }
-
- /* Keep this running, even if radio now enabled. This will be
- * cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl3945_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl3945_scan_cmd *scan;
- u8 n_probes = 0;
- enum ieee80211_band band;
- bool is_active = false;
- int ret;
- u16 len;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
- /*
- * suspend time format:
- * 0-19: beacon interval in usec (time before exec.)
- * 20-23: 0
- * 24-31: number of beacons (suspend between channels)
- */
-
- extra = (suspend_time / interval) << 24;
- scan_suspend_time = 0xFF0FFFFF &
- (extra | ((suspend_time % interval) * 1024));
-
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
- /* We don't build a direct scan probe request; the uCode will do
- * that based on the direct_mask added to each channel entry */
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- /* flags + rate selection */
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
- band = IEEE80211_BAND_2GHZ;
- break;
- case IEEE80211_BAND_5GHZ:
- scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
- band = IEEE80211_BAND_5GHZ;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scaning is requested but a certain channel
- * is marked passive, we can do active scanning if we
- * detect transmissions.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_DISABLED;
-
- len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
- vif->addr, priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(len);
-
- /* select Rx antennas */
- scan->flags |= iwl3945_get_antenna_flags(priv);
-
- scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[len], vif);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl3945_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
- return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
- mutex_unlock(&priv->mutex);
- iwl3945_down(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl3945_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_rx_replenish(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
- int rc = 0;
- struct ieee80211_conf *conf = NULL;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (!ctx->vif || !priv->is_open)
- return;
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
- if (ctx->vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (ctx->vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl3945_commit_rxon(priv, ctx);
-
- switch (ctx->vif->type) {
- case NL80211_IFTYPE_STATION:
- iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl3945_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, ctx->vif->type);
- break;
- }
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
-
- /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
- * ucode filename and max sizes are card-specific. */
-
- if (!priv->ucode_code.len) {
- ret = iwl3945_read_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Could not read microcode: %d\n", ret);
- mutex_unlock(&priv->mutex);
- goto out_release_irq;
- }
- }
-
- ret = __iwl3945_up(priv);
-
- mutex_unlock(&priv->mutex);
-
- if (ret)
- goto out_release_irq;
-
- IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
- /* Wait for START_ALIVE from ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv,
- "Wait for START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- ret = -ETIMEDOUT;
- goto out_release_irq;
- }
- }
-
- /* ucode is running and will send rfkill notifications,
- * no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->_3945.rfkill_poll);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-
-out_release_irq:
- priv->is_open = 0;
- IWL_DEBUG_MAC80211(priv, "leave - failed\n");
- return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open) {
- IWL_DEBUG_MAC80211(priv, "leave - skip\n");
- return;
- }
-
- priv->is_open = 0;
-
- iwl3945_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl3945_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int rc = 0;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
- }
- iwl3945_send_beacon_cmd(priv);
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = 0;
- u8 sta_id = IWL_INVALID_STATION;
- u8 static_key;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwl3945_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- /*
- * To support IBSS RSN, don't program group keys in IBSS, the
- * hardware will then not attempt to decrypt the frames.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-
- if (!static_key) {
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
- }
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- switch (cmd) {
- case SET_KEY:
- if (static_key)
- ret = iwl3945_set_static_key(priv, key);
- else
- ret = iwl3945_set_dynamic_key(priv, key, sta_id);
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (static_key)
- ret = iwl3945_remove_static_key(priv);
- else
- ret = iwl3945_clear_sta_key_info(priv, sta_id);
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
- ret = iwl_legacy_add_station_common(priv,
- &priv->contexts[IWL_RXON_CTX_BSS],
- sta->addr, is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl3945_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but even if hw is ready, committing here breaks for some reason,
- * we'll eventually commit the filter flags change anyway.
- */
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl3945_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl3945_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl3945_show_debug_level, iwl3945_store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-static ssize_t iwl3945_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-
-static ssize_t iwl3945_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl3945_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
- u32 val;
-
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
- else
- iwl3945_hw_reg_set_txpower(priv, val);
-
- return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-
-static ssize_t iwl3945_show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t iwl3945_store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 flags = simple_strtoul(buf, NULL, 0);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
- flags);
- ctx->staging.flags = cpu_to_le32(flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-
-static ssize_t iwl3945_show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t iwl3945_store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- ctx->staging.filter_flags =
- cpu_to_le32(filter_flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
- iwl3945_store_filter_flags);
-
-static ssize_t iwl3945_show_measurement(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_spectrum_notification measure_report;
- u32 size = sizeof(measure_report), len = 0, ofs = 0;
- u8 *data = (u8 *)&measure_report;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (!(priv->measurement_status & MEASUREMENT_READY)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
- memcpy(&measure_report, &priv->measure_report, size);
- priv->measurement_status = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static ssize_t iwl3945_store_measurement(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_measurement_params params = {
- .channel = le16_to_cpu(ctx->active.channel),
- .start_time = cpu_to_le64(priv->_3945.last_tsf),
- .duration = cpu_to_le16(1),
- };
- u8 type = IWL_MEASURE_BASIC;
- u8 buffer[32];
- u8 channel;
-
- if (count) {
- char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
- channel = simple_strtoul(p, NULL, 0);
- if (channel)
- params.channel = channel;
-
- p = buffer;
- while (*p && *p != ' ')
- p++;
- if (*p)
- type = simple_strtoul(p + 1, NULL, 0);
- }
-
- IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
- "channel %d (for '%s')\n", type, params.channel, buf);
- iwl3945_get_measurement(priv, &params, type);
-
- return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
- iwl3945_show_measurement, iwl3945_store_measurement);
-
-static ssize_t iwl3945_store_retry_rate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- priv->retry_rate = simple_strtoul(buf, NULL, 0);
- if (priv->retry_rate <= 0)
- priv->retry_rate = 1;
-
- return count;
-}
-
-static ssize_t iwl3945_show_retry_rate(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
- iwl3945_store_retry_rate);
-
-
-static ssize_t iwl3945_show_channels(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- /* all this shit doesn't belong into sysfs anyway */
- return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-
-static ssize_t iwl3945_show_antenna(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t iwl3945_store_antenna(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
- int ant;
-
- if (count == 0)
- return 0;
-
- if (sscanf(buf, "%1i", &ant) != 1) {
- IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
- return count;
- }
-
- if ((ant >= 0) && (ant <= 2)) {
- IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
- iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
- } else
- IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
- return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-
-static ssize_t iwl3945_show_status(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
- return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-
-static ssize_t iwl3945_dump_error_log(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
-
- if (p[0] == '1')
- iwl3945_dump_nic_error_log(priv);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl3945_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- iwl3945_hw_setup_deferred_work(priv);
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
- iwl3945_hw_cancel_deferred_work(priv);
-
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
- &dev_attr_antenna.attr,
- &dev_attr_channels.attr,
- &dev_attr_dump_errors.attr,
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_measurement.attr,
- &dev_attr_retry_rate.attr,
- &dev_attr_status.attr,
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
- .tx = iwl3945_mac_tx,
- .start = iwl3945_mac_start,
- .stop = iwl3945_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl3945_configure_filter,
- .set_key = iwl3945_mac_set_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl3945_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
- int ret;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- priv->retry_rate = 1;
- priv->beacon_skb = NULL;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
- IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
- eeprom->version);
- ret = -EINVAL;
- goto err;
- }
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- /* Set up txpower settings in driver for all channels */
- if (iwl3945_txpower_set_from_eeprom(priv)) {
- ret = -EIO;
- goto err_free_channel_map;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST 200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
-
- hw->rate_control_algorithm = "iwl-3945-rs";
- hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- hw->wiphy->interface_modes =
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
-
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- struct iwl3945_eeprom *eeprom;
- unsigned long flags;
-
- /***********************
- * 1. Allocating HW data
- * ********************/
-
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- hw = iwl_legacy_alloc_all(cfg);
- if (hw == NULL) {
- pr_err("Can not allocate network device\n");
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
- /* 3945 has only one valid context */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- /*
- * Disabling hardware scan means that mac80211 will perform scans
- * "the hard way", rather than using device's scan.
- */
- if (iwl3945_mod_params.disable_hw_scan) {
- IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
- iwl3945_hw_ops.hw_scan = NULL;
- }
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /***************************
- * 2. Initializing PCI bus
- * *************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
-
- pci_set_drvdata(pdev, priv);
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- /***********************
- * 3. Read REV Register
- * ********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, 0x41, 0x00);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /***********************
- * 4. Read EEPROM
- * ********************/
-
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- /* MAC Address location in EEPROM same for 3945/4965 */
- eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
- SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
- /***********************
- * 5. Setup HW Constants
- * ********************/
- /* Device-specific setup */
- if (iwl3945_hw_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw settings\n");
- goto out_eeprom_free;
- }
-
- /***********************
- * 6. Setup priv
- * ********************/
-
- err = iwl3945_init_drv(priv);
- if (err) {
- IWL_ERR(priv, "initializing driver failed\n");
- goto out_unset_hw_params;
- }
-
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
- priv->cfg->name);
-
- /***********************
- * 7. Setup Services
- * ********************/
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_release_irq;
- }
-
- iwl_legacy_set_rxon_channel(priv,
- &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl3945_setup_deferred_work(priv);
- iwl3945_setup_rx_handlers(priv);
- iwl_legacy_power_initialize(priv);
-
- /*********************************
- * 8. Setup and Register mac80211
- * *******************************/
-
- iwl_legacy_enable_interrupts(priv);
-
- err = iwl3945_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
- /* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- 2 * HZ);
-
- return 0;
-
- out_remove_sysfs:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- out_unset_hw_params:
- iwl3945_unset_hw_params(priv);
- out_eeprom_free:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl3945_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl_down(), but there are paths to
- * run iwl_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_synchronize_irq(priv);
-
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
- cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
- iwl3945_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl3945_rx_queue_free(priv, &priv->rxq);
- iwl3945_hw_txq_ctx_free(priv);
-
- iwl3945_unset_hw_params(priv);
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(pdev->irq, priv);
- pci_disable_msi(pdev);
-
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl_legacy_free_channel_map(priv);
- iwl_legacy_free_geos(priv);
- kfree(priv->scan_cmd);
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
- .name = DRV_NAME,
- .id_table = iwl3945_hw_card_ids,
- .probe = iwl3945_pci_probe,
- .remove = __devexit_p(iwl3945_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl3945_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl3945_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl3945_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
- pci_unregister_driver(&iwl3945_driver);
- iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
- "using software crypto (default 1 [software])");
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
- int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9eae153..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl4965"
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
-#include "iwl-4965-calib.h"
-#include "iwl-4965.h"
-#include "iwl-4965-led.h"
-
-
-/******************************************************************************
- *
- * module boiler plate
- *
- ******************************************************************************/
-
-/*
- * module name, copyright, version, etc.
- */
-#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
-
-void iwl4965_update_chain_flags(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
-
- if (priv->cfg->ops->hcmd->set_rxon_chain) {
- for_each_context(priv, ctx) {
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- if (ctx->active.rx_chain != ctx->staging.rx_chain)
- iwl_legacy_commit_rxon(priv, ctx);
- }
- }
-}
-
-static void iwl4965_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl_frame, list);
-}
-
-static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
-static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
- struct iwl_tx_beacon_cmd *tx_beacon_cmd,
- u8 *beacon, u32 frame_size)
-{
- u16 tim_idx;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
-
- /*
- * The index is relative to frame start but we start looking at the
- * variable-length part of the beacon.
- */
- tim_idx = mgmt->u.beacon.variable - beacon;
-
- /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
- while ((tim_idx < (frame_size - 2)) &&
- (beacon[tim_idx] != WLAN_EID_TIM))
- tim_idx += beacon[tim_idx+1] + 2;
-
- /* If TIM field was found, set variables */
- if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
- tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
- tx_beacon_cmd->tim_size = beacon[tim_idx+1];
- } else
- IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
-}
-
-static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl_frame *frame)
-{
- struct iwl_tx_beacon_cmd *tx_beacon_cmd;
- u32 frame_size;
- u32 rate_flags;
- u32 rate;
- /*
- * We have to set up the TX command, the TX Beacon command, and the
- * beacon contents.
- */
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
- return 0;
- }
-
- /* Initialize memory */
- tx_beacon_cmd = &frame->u.beacon;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- /* Set up TX beacon contents */
- frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
- if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
- return 0;
- if (!frame_size)
- return 0;
-
- /* Set up TX command fields */
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
- tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
-
- /* Set up TX beacon command fields */
- iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
- frame_size);
-
- /* Set up packet rate and flags */
- rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
- rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
- if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
- tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
- rate_flags);
-
- return sizeof(*tx_beacon_cmd) + frame_size;
-}
-
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- unsigned int frame_size;
- int rc;
-
- frame = iwl4965_get_free_frame(priv);
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
- if (!frame_size) {
- IWL_ERR(priv, "Error configuring the beacon command\n");
- iwl4965_free_frame(priv, frame);
- return -EINVAL;
- }
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl4965_free_frame(priv, frame);
-
- return rc;
-}
-
-static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
- struct iwl_tfd *tfd;
- struct pci_dev *dev = priv->pci_dev;
- int index = txq->q.read_ptr;
- int i;
- int num_tbs;
-
- tfd = &tfd_tmp[index];
-
- /* Sanity check on number of chunks */
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++)
- pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
- iwl4965_tfd_tb_get_len(tfd, i),
- PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad)
-{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
- u32 num_tbs;
-
- q = &txq->q;
- tfd_tmp = (struct iwl_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- BUG_ON(addr & ~DMA_BIT_MASK(36));
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- /* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init,
- &pkt->u.alive_frame,
- sizeof(struct iwl_init_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-/**
- * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
- *
- * This callback is provided in order to send a statistics request.
- *
- * This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
- * was received. We need to ensure we receive the statistics in order
- * to update the temperature used for calibrating the TXPOWER.
- */
-static void iwl4965_bg_statistics_periodic(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* dont send host command if rf-kill is on */
- if (!iwl_legacy_is_ready_rf(priv))
- return;
-
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- IWL_DEBUG_POWER(priv, "Stop all queues\n");
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_CARD_DISABLED) ?
- "Reached" : "Not reached");
-
- if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- CT_CARD_DISABLED)) {
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- }
- }
-
- if (flags & CT_CARD_DISABLED)
- iwl4965_perform_ct_kill_task(priv);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (!(flags & RXON_CARD_DISABLED))
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
-
- /* status change handler */
- priv->rx_handlers[CARD_STATE_NOTIFICATION] =
- iwl4965_rx_card_state_notif;
-
- priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
- iwl4965_rx_missed_beacon_notif;
- /* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
- /* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
- /* Set up hardware specific Rx handlers */
- priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
- * iwl4965_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-void iwl4965_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl4965_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl4965_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl4965_rx_replenish_now(priv);
- else
- iwl4965_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl4965_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR49_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR49_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /*
- * uCode wakes up after power-down sleep.
- * Tell device about any new tx or host commands enqueued,
- * and about any Rx buffers made available while asleep.
- */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl4965_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_legacy_enable_rfkill_int(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv,
- "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl4965_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl4965_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl4965_show_debug_level, iwl4965_store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-
-static ssize_t iwl4965_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
-
-static ssize_t iwl4965_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_ready_rf(priv))
- return sprintf(buf, "off\n");
- else
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl4965_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in decimal form.\n", buf);
- else {
- ret = iwl_legacy_set_tx_power(priv, val, false);
- if (ret)
- IWL_ERR(priv, "failed setting tx power (0x%d).\n",
- ret);
- else
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
- iwl4965_show_tx_power, iwl4965_store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl_sysfs_entries,
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-static void iwl4965_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
- void *context);
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length);
-
-static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
-{
- const char *name_pre = priv->cfg->fw_name_pre;
- char tag[8];
-
- if (first) {
- priv->fw_index = priv->cfg->ucode_api_max;
- sprintf(tag, "%d", priv->fw_index);
- } else {
- priv->fw_index--;
- sprintf(tag, "%d", priv->fw_index);
- }
-
- if (priv->fw_index < priv->cfg->ucode_api_min) {
- IWL_ERR(priv, "no suitable firmware found!\n");
- return -ENOENT;
- }
-
- sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
- IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
- priv->firmware_name);
-
- return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- &priv->pci_dev->dev, GFP_KERNEL, priv,
- iwl4965_ucode_callback);
-}
-
-struct iwl4965_firmware_pieces {
- const void *inst, *data, *init, *init_data, *boot;
- size_t inst_size, data_size, init_size, init_data_size, boot_size;
-};
-
-static int iwl4965_load_firmware(struct iwl_priv *priv,
- const struct firmware *ucode_raw,
- struct iwl4965_firmware_pieces *pieces)
-{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
- u32 api_ver, hdr_size;
- const u8 *src;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- switch (api_ver) {
- default:
- case 0:
- case 1:
- case 2:
- hdr_size = 24;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(priv, "File size too small!\n");
- return -EINVAL;
- }
- pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
- pieces->data_size = le32_to_cpu(ucode->v1.data_size);
- pieces->init_size = le32_to_cpu(ucode->v1.init_size);
- pieces->init_data_size =
- le32_to_cpu(ucode->v1.init_data_size);
- pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
- src = ucode->v1.data;
- break;
- }
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != hdr_size + pieces->inst_size +
- pieces->data_size + pieces->init_size +
- pieces->init_data_size + pieces->boot_size) {
-
- IWL_ERR(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- return -EINVAL;
- }
-
- pieces->inst = src;
- src += pieces->inst_size;
- pieces->data = src;
- src += pieces->data_size;
- pieces->init = src;
- src += pieces->init_size;
- pieces->init_data = src;
- src += pieces->init_data_size;
- pieces->boot = src;
- src += pieces->boot_size;
-
- return 0;
-}
-
-/**
- * iwl4965_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void
-iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
- struct iwl_priv *priv = context;
- struct iwl_ucode_header *ucode;
- int err;
- struct iwl4965_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- u32 api_ver;
-
- u32 max_probe_length = 200;
- u32 standard_phy_calibration_size =
- IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- memset(&pieces, 0, sizeof(pieces));
-
- if (!ucode_raw) {
- if (priv->fw_index <= priv->cfg->ucode_api_max)
- IWL_ERR(priv,
- "request for firmware file '%s' failed.\n",
- priv->firmware_name);
- goto try_again;
- }
-
- IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
- priv->firmware_name, ucode_raw->size);
-
- /* Make sure that we got at least the API version number */
- if (ucode_raw->size < 4) {
- IWL_ERR(priv, "File size way too small!\n");
- goto try_again;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
-
- if (err)
- goto try_again;
-
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- /*
- * api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward
- */
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver != api_max)
- IWL_ERR(priv,
- "Firmware has old API version. Expected v%u, "
- "got v%u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- /*
- * For any of the failures below (before allocating pci memory)
- * we will try to load a version with a smaller API -- maybe the
- * user just got a corrupted version of the latest API.
- */
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
- pieces.inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
- pieces.data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
- pieces.init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
- pieces.init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
- pieces.boot_size);
-
- /* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
- pieces.inst_size);
- goto try_again;
- }
-
- if (pieces.data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
- pieces.data_size);
- goto try_again;
- }
-
- if (pieces.init_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
- pieces.init_size);
- goto try_again;
- }
-
- if (pieces.init_data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
- pieces.init_data_size);
- goto try_again;
- }
-
- if (pieces.boot_size > priv->hw_params.max_bsm_size) {
- IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
- pieces.boot_size);
- goto try_again;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = pieces.inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (pieces.init_size && pieces.init_data_size) {
- priv->ucode_init.len = pieces.init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = pieces.init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (pieces.boot_size) {
- priv->ucode_boot.len = pieces.boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Now that we can no longer fail, copy information */
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
- pieces.inst_size);
- memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /*
- * Runtime data
- * NOTE: Copy into backup buffer will be done in iwl_up()
- */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
- pieces.data_size);
- memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
- memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
-
- /* Initialization instructions */
- if (pieces.init_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %Zd\n",
- pieces.init_size);
- memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
- }
-
- /* Initialization data */
- if (pieces.init_data_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %Zd\n",
- pieces.init_data_size);
- memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
- pieces.init_data_size);
- }
-
- /* Bootstrap instructions */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
- pieces.boot_size);
- memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
-
- /*
- * figure out the offset of chain noise reset and gain commands
- * base on the size of standard phy calibration commands table size
- */
- priv->_4965.phy_calib_chain_noise_reset_cmd =
- standard_phy_calibration_size;
- priv->_4965.phy_calib_chain_noise_gain_cmd =
- standard_phy_calibration_size + 1;
-
- /**************************************************
- * This is still part of probe() in a sense...
- *
- * 9. Setup and register with mac80211 and debugfs
- **************************************************/
- err = iwl4965_mac_setup_register(priv, max_probe_length);
- if (err)
- goto out_unbind;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv,
- "failed to create debugfs files. Ignoring error: %d\n", err);
-
- err = sysfs_create_group(&priv->pci_dev->dev.kobj,
- &iwl_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_unbind;
- }
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- complete(&priv->_4965.firmware_loading_complete);
- return;
-
- try_again:
- /* try next, if any */
- if (iwl4965_request_firmware(priv, false))
- goto out_unbind;
- release_firmware(ucode_raw);
- return;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl4965_dealloc_ucode_pci(priv);
- out_unbind:
- complete(&priv->_4965.firmware_loading_complete);
- device_release_driver(&priv->pci_dev->dev);
- release_firmware(ucode_raw);
-}
-
-static const char * const desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT",
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *iwl4965_desc_lookup(u32 num)
-{
- int i;
- int max = ARRAY_SIZE(desc_lookup_text);
-
- if (num < max)
- return desc_lookup_text[num];
-
- max = ARRAY_SIZE(advanced_lookup) - 1;
- for (i = 0; i < max; i++) {
- if (advanced_lookup[i].num == num)
- break;
- }
- return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 data2, line;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
- u32 pc, hcmd;
-
- if (priv->ucode_type == UCODE_INIT) {
- base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- } else {
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
- }
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
- }
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
- priv->isr_stats.err_code = desc;
- pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
- blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
- blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
- ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
- ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
- data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
- data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
- line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
- time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
- hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
-
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
- time, data1, data2, line,
- blink1, blink2, ilink1, ilink2);
-
- IWL_ERR(priv, "Desc Time "
- "data1 data2 line\n");
- IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
- iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
- pc, blink1, blink2, ilink1, ilink2, hcmd);
-}
-
-static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
-{
- struct iwl_ct_kill_config cmd;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- cmd.critical_temperature_R =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
-
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
- else
- IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature is %d\n",
- priv->hw_params.ct_kill_threshold);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear 4965's internal Tx Scheduler data base */
- priv->scd_base_addr = iwl_legacy_read_prph(priv,
- IWL49_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
-
- /* Tel 4965 where to find Tx byte count tables */
- iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Disable chain mode for all queues */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
- /* Initialize each Tx queue (including the command queue) */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
- /* TFD circular buffer read/write indexes */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
- /* Max Tx Window size for Scheduler-ACK mode */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
- (SCD_WIN_SIZE <<
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- /* Frame limit */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- (SCD_FRAME_LIMIT <<
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- }
- iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
-
- iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* Map each Tx/cmd queue to its corresponding fifo */
- BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
- for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
- int ac = default_queue_to_tx_fifo[i];
-
- iwl_txq_ctx_activate(priv, i);
-
- if (ac == IWL_TX_FIFO_UNUSED)
- continue;
-
- iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl_init_alive_start()).
- */
-static void iwl4965_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- ret = iwl4965_alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition [ntf]: %d\n", ret);
- goto restart;
- }
-
-
- /* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK;
-
- if (iwl_legacy_is_associated_ctx(ctx)) {
- struct iwl_legacy_rxon_cmd *active_rxon =
- (struct iwl_legacy_rxon_cmd *)&ctx->active;
- /* apply any changes in staging */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- struct iwl_rxon_context *tmp;
- /* Initialize our rx_config data */
- for_each_context(priv, tmp)
- iwl_legacy_connection_init_rx_config(priv, tmp);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* Configure bluetooth coexistence if enabled */
- iwl_legacy_send_bt_config(priv);
-
- iwl4965_reset_run_time_calib(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* At this point, the NIC is initialized and operational */
- iwl4965_rf_kill_ct_config(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- iwl_legacy_power_update_mode(priv, true);
- IWL_DEBUG_INFO(priv, "Updated power mode\n");
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl4965_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl4965_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl_init() then
- * clear all bits but the RF Kill bit and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl4965_txq_ctx_stop(priv);
- iwl4965_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl4965_clear_free_frames(priv);
-}
-
-static void iwl4965_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl4965_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl4965_cancel_deferred_work(priv);
-}
-
-#define HW_READY_TIMEOUT (50)
-
-static int iwl4965_set_hw_ready(struct iwl_priv *priv)
-{
- int ret = 0;
-
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
- if (ret != -ETIMEDOUT)
- priv->hw_ready = true;
- else
- priv->hw_ready = false;
-
- IWL_DEBUG_INFO(priv, "hardware %s\n",
- (priv->hw_ready == 1) ? "ready" : "not ready");
- return ret;
-}
-
-static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret = 0;
-
- IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
-
- ret = iwl4965_set_hw_ready(priv);
- if (priv->hw_ready)
- return ret;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- /* HW should be ready by now, check again. */
- if (ret != -ETIMEDOUT)
- iwl4965_set_hw_ready(priv);
-
- return ret;
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int __iwl4965_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int i;
- int ret;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bringup\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwl4965_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_legacy_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- iwl4965_prepare_card_hw(priv);
-
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv,
- CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_legacy_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
-
- iwl_legacy_enable_interrupts(priv);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return 0;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- /* must be initialised before iwl_hw_nic_init */
- priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
- ret = iwl4965_hw_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- ret = priv->cfg->ops->lib->load_ucode(priv);
-
- if (ret) {
- IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
- ret);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl4965_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl4965_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl4965_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- priv->cfg->ops->lib->init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl4965_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- run_time_calib_work);
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (priv->start_calib) {
- iwl4965_chain_noise_calibration(priv,
- (void *)&priv->_4965.statistics);
- iwl4965_sensitivity_calibration(priv,
- (void *)&priv->_4965.statistics);
- }
-
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
-
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
-
- __iwl4965_down(priv);
-
- mutex_unlock(&priv->mutex);
- iwl4965_cancel_deferred_work(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl4965_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl4965_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl4965_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (4 * HZ)
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-4965-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- if (priv->cfg->sku & IWL_SKU_N)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
- /*
- * For now, disable PS by default because it affects
- * RX performance significantly.
- */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-int iwl4965_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
- ret = __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
-
- if (ret)
- return ret;
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- return -ETIMEDOUT;
- }
- }
-
- iwl4965_led_enable(priv);
-
-out:
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-void iwl4965_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl4965_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl4965_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
- iv32, phase1key);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (priv->cfg->mod_params->sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- (key->hw_key_idx == HW_KEY_DEFAULT);
- }
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key)
- ret = iwl4965_set_default_wep_key(priv,
- vif_priv->ctx, key);
- else
- ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl4965_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl4965_remove_dynamic_key(priv, ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & IWL_SKU_N))
- return -EACCES;
-
- mutex_lock(&priv->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- ret = 0;
- break;
- }
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
-
- ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl4965_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- goto out;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->ops->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->lock);
-
- iwl_legacy_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- txpower_work);
-
- mutex_lock(&priv->mutex);
-
- /* If a scan happened to start before we got here
- * then just return; the statistics notification will
- * kick off another scheduled work to compensate for
- * any temperature delta we missed here. */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
- goto out;
-
- /* Regardless of if we are associated, we must reconfigure the
- * TX power since frames can be sent on non-radar channels while
- * not associated */
- priv->cfg->ops->lib->send_tx_power(priv);
-
- /* Update last_temperature to keep is_calib_needed from running
- * when it isn't needed... */
- priv->last_temperature = priv->temperature;
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl4965_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
- INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-
- init_timer(&priv->statistics_periodic);
- priv->statistics_periodic.data = (unsigned long)priv;
- priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl4965_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->txpower_work);
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
- cancel_work_sync(&priv->run_time_calib_work);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-
- del_timer_sync(&priv->statistics_periodic);
-}
-
-static void iwl4965_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |=
- (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
-
- /* Find out whether to activate Tx queue */
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- /* Set up and activate */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
- IWL49_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-
-static int iwl4965_init_drv(struct iwl_priv *priv)
-{
- int ret;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- /* Choose which receivers/antennas to use */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- iwl_legacy_init_scan_params(priv);
-
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl4965_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-static void iwl4965_uninit_drv(struct iwl_priv *priv)
-{
- iwl4965_calib_free_results(priv);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- kfree(priv->scan_cmd);
-}
-
-static void iwl4965_hw_detect(struct iwl_priv *priv)
-{
- priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
- priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
- priv->rev_id = priv->pci_dev->revision;
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
-}
-
-static int iwl4965_set_hw_params(struct iwl_priv *priv)
-{
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- if (priv->cfg->mod_params->amsdu_size_8K)
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
- else
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
- priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
- if (priv->cfg->mod_params->disable_11n)
- priv->cfg->sku &= ~IWL_SKU_N;
-
- /* Device-specific setup */
- return priv->cfg->ops->lib->set_hw_params(priv);
-}
-
-static const u8 iwl4965_bss_ac_to_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
-};
-
-static const u8 iwl4965_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
-
-static int
-iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- unsigned long flags;
- u16 pci_cmd;
-
- /************************
- * 1. Allocating HW data
- ************************/
-
- hw = iwl_legacy_alloc_all(cfg);
- if (!hw) {
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- /* At this point both hw and priv are allocated. */
-
- /*
- * The default context is always valid,
- * more may be discovered when firmware
- * is loaded.
- */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION);
- priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
-
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /**************************
- * 2. Initializing PCI bus
- **************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- pci_set_drvdata(pdev, priv);
-
-
- /***********************
- * 3. Read REV register
- ***********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- iwl4965_hw_detect(priv);
- IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->hw_rev);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl4965_prepare_card_hw(priv);
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Failed, HW not ready\n");
- goto out_iounmap;
- }
-
- /*****************
- * 4. Read EEPROM
- *****************/
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- err = iwl4965_eeprom_check_version(priv);
- if (err)
- goto out_free_eeprom;
-
- if (err)
- goto out_free_eeprom;
-
- /* extract MAC Address */
- iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
- priv->hw->wiphy->addresses = priv->addresses;
- priv->hw->wiphy->n_addresses = 1;
-
- /************************
- * 5. Setup HW constants
- ************************/
- if (iwl4965_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw parameters\n");
- goto out_free_eeprom;
- }
-
- /*******************
- * 6. Setup priv
- *******************/
-
- err = iwl4965_init_drv(priv);
- if (err)
- goto out_free_eeprom;
- /* At this point both hw and priv are initialized. */
-
- /********************
- * 7. Setup services
- ********************/
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- iwl4965_setup_deferred_work(priv);
- iwl4965_setup_rx_handlers(priv);
-
- /*********************************************
- * 8. Enable interrupts and read RFKILL state
- *********************************************/
-
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
- }
-
- iwl_legacy_enable_rfkill_int(priv);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
-
- iwl_legacy_power_initialize(priv);
-
- init_completion(&priv->_4965.firmware_loading_complete);
-
- err = iwl4965_request_firmware(priv, true);
- if (err)
- goto out_destroy_workqueue;
-
- return 0;
-
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl4965_uninit_drv(priv);
- out_free_eeprom:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- wait_for_completion(&priv->_4965.firmware_loading_complete);
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
- sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
-
- /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
- * to be called and iwl4965_down since we are removing the device
- * we need to set STATUS_EXIT_PENDING bit.
- */
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl4965_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl4965_down(), but there are paths to
- * run iwl4965_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl4965_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_synchronize_irq(priv);
-
- iwl4965_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl4965_rx_queue_free(priv, &priv->rxq);
- iwl4965_hw_txq_ctx_free(priv);
-
- iwl_legacy_eeprom_free(priv);
-
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(priv->pci_dev->irq, priv);
- pci_disable_msi(priv->pci_dev);
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl4965_uninit_drv(priv);
-
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
-#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
- {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
- {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
-#endif /* CONFIG_IWL4965 */
-
- {0}
-};
-MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
-
-static struct pci_driver iwl4965_driver = {
- .name = DRV_NAME,
- .id_table = iwl4965_hw_card_ids,
- .probe = iwl4965_pci_probe,
- .remove = __devexit_p(iwl4965_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl4965_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl4965_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl4965_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl4965_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl4965_exit(void)
-{
- pci_unregister_driver(&iwl4965_driver);
- iwl4965_rate_control_unregister();
-}
-
-module_exit(iwl4965_exit);
-module_init(iwl4965_init);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a493003ab0..ffec4b4a248a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_legacy_prph_h__
-#define __iwl_legacy_prph_h__
+#ifndef __il_prph_h__
+#define __il_prph_h__
/*
* Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
-#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
*
* 1) Initialization -- performs hardware calibration and sets up some
* internal data, then notifies host via "initialize alive" notification
- * (struct iwl_init_alive_resp) that it has completed all of its work.
+ * (struct il_init_alive_resp) that it has completed all of its work.
* After signal from host, it then loads and starts the runtime program.
* The initialization program must be used when initially setting up the
* NIC after loading the driver.
*
* 2) Runtime/Protocol -- performs all normal runtime operations. This
- * notifies host via "alive" notification (struct iwl_alive_resp) that it
+ * notifies host via "alive" notification (struct il_alive_resp) that it
* is ready to be used.
*
* When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
* procedure.
*
* This save/restore method is mostly for autonomous power management during
- * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
+ * normal operation (result of C_POWER_TBL). Platform suspend/resume and
* RFKILL should use complete restarts (with total re-initialization) of uCode,
* allowing total shutdown (including BSM memory).
*
@@ -202,19 +202,19 @@
*/
/* BSM bit fields */
-#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
-#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
-#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
+#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
/* BSM addresses */
#define BSM_BASE (PRPH_BASE + 0x3400)
#define BSM_END (PRPH_BASE + 0x3800)
-#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
-#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
-#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
-#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
-#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
+#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
/*
* Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
* Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
*/
#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
-#define BSM_SRAM_SIZE (1024) /* bytes */
-
+#define BSM_SRAM_SIZE (1024) /* bytes */
/* 3945 Tx scheduler registers */
#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
* but one DMA channel may take input from several queues.
*
* Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
- * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ * (cf. default_queue_to_tx_fifo in 4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
* The driver sets up each queue to work in one of two modes:
*
* 1) Scheduler-Ack, in which the scheduler automatically supports a
- * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
* contains TFDs for a unique combination of Recipient Address (RA)
* and Traffic Identifier (TID), that is, traffic of a given
* Quality-Of-Service (QOS) priority, destined for a single station.
*
* In scheduler-ack mode, the scheduler keeps track of the Tx status of
- * each frame within the BA window, including whether it's been transmitted,
+ * each frame within the BA win, including whether it's been transmitted,
* and whether it's been acknowledged by the receiving station. The device
* automatically processes block-acks received from the receiving STA,
* and reschedules un-acked frames to be retransmitted (successful
* Tx completion may end up being out-of-order).
*
* The driver must maintain the queue's Byte Count table in host DRAM
- * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
* This mode does not support fragmentation.
*
* 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
*/
/**
- * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
* Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
- * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
*/
#define SCD_WIN_SIZE 64
#define SCD_FRAME_LIMIT 64
/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
-#define IWL49_SCD_START_OFFSET 0xa02c00
+#define IL49_SCD_START_OFFSET 0xa02c00
/*
* 4965 tells driver SRAM address for internal scheduler structs via this reg.
* Value is valid only after "Alive" response from uCode.
*/
-#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
+#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
/*
* Driver may need to update queue-empty bits after changing queue's
- * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
* scheduler is not tracking what's happening).
* Bit fields:
* 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
* 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
* NOTE: This register is not used by Linux driver.
*/
-#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
+#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
/*
* Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
* This register points to BC CB for queue 0, must be on 1024-byte boundary.
* Others are spaced by 1024 bytes.
* Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
- * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
* Bit fields:
* 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
*/
-#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
+#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
/*
* Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
* Bit fields:
* 7- 0: Enable (1), disable (0), one bit for each channel 0-7
*/
-#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
+#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
/*
- * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
* Initialized and updated by driver as new TFDs are added to queue.
- * NOTE: If using Block Ack, index must correspond to frame's
- * Start Sequence Number; index = (SSN & 0xff)
+ * NOTE: If using Block Ack, idx must correspond to frame's
+ * Start Sequence Number; idx = (SSN & 0xff)
* NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
*/
-#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
/*
- * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
- * For FIFO mode, index indicates next frame to transmit.
- * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
* Initialized by driver, updated by scheduler.
*/
-#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
/*
* Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
* NOTE: If driver sets up queue for chain mode, it should be also set up
* Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
*/
-#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
+#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
/*
* Select which queues interrupt driver when scheduler increments
- * a queue's read pointer (index).
+ * a queue's read pointer (idx).
* Bit fields:
* 31-16: Reserved
* 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
* NOTE: This functionality is apparently a no-op; driver relies on interrupts
* from Rx queue to read Tx command responses and update Tx queues.
*/
-#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
+#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
/*
* Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
* Driver should init to "1" for aggregation mode, or "0" otherwise.
* 7-6: Driver should init to "0"
* 5: Window Size Left; indicates whether scheduler can request
- * another TFD, based on window size, etc. Driver should init
+ * another TFD, based on win size, etc. Driver should init
* this bit to "1" for aggregation mode, or "0" for non-agg.
* 4-1: Tx FIFO to use (range 0-7).
* 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
* NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
* via SCD_QUEUECHAIN_SEL.
*/
-#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
- (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+ (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
/* Bit field positions */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
/* Write masks */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
-#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
/**
* 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
* each queue's entry as follows:
*
* LS Dword bit fields:
- * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
+ * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
*
* MS Dword bit fields:
* 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
* Init must be done after driver receives "Alive" response from 4965 uCode,
* and when setting up queue for aggregation.
*/
-#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
-#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
- (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+ (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
/*
* Tx Status Bitmap
@@ -486,7 +485,7 @@
* "Alive" notification from uCode. Area is used only by device itself;
* no other support (besides clearing) is required from driver.
*/
-#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
/*
* RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
* When queue is in Scheduler-ACK mode, frames placed in a that queue must be
* for only one combination of receiver address (RA) and traffic ID (TID), i.e.
* one QOS priority level destined for one station (for this wireless link,
- * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
* mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
* mode, the device ignores the mapping value.
*
@@ -508,16 +507,16 @@
* must read a dword-aligned value from device SRAM, replace the 16-bit map
* value of interest, and write the dword value back into device SRAM.
*/
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
+#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
/* Find translation table dword to read/write for given queue */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
-#define IWL_SCD_TXFIFO_POS_TID (0)
-#define IWL_SCD_TXFIFO_POS_RA (4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+#define IL_SCD_TXFIFO_POS_TID (0)
+#define IL_SCD_TXFIFO_POS_RA (4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
/*********************** END TX SCHEDULER *************************************/
-#endif /* __iwl_legacy_prph_h__ */
+#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 57703d5209d7..ae08498dfcad 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLWIFI_DEVICE_SVTOOL
- bool "iwlwifi device svtool support"
+config IWLWIFI_DEVICE_TESTMODE
+ def_bool y
depends on IWLWIFI
- select NL80211_TESTMODE
+ depends on NL80211_TESTMODE
help
- This option enables the svtool support for iwlwifi device through
- NL80211_TESTMODE. svtool is a software validation tool that runs in
- the user space and interacts with the device in the kernel space
- through the generic netlink message via NL80211_TESTMODE channel.
+ This option enables the testmode support for iwlwifi device through
+ NL80211_TESTMODE. This provide the capabilities of enable user space
+ validation applications to interacts with the device through the
+ generic netlink message via NL80211_TESTMODE channel.
+
+config IWLWIFI_P2P
+ bool "iwlwifi experimental P2P support"
+ depends on IWLWIFI
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
+
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
+
+ Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index c73e5ed8db5e..9dc84a7354db 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
# WIFI
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
-iwlwifi-objs := iwl-agn.o iwl-agn-rs.o
-iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
+iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o
iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
@@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff6..1ef7bfc2ab25 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -124,10 +124,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -135,28 +135,19 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl1000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -191,6 +182,7 @@ static struct iwl_base_params iwl1000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 79431977a968..094693328dbb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -86,7 +86,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
{
iwl_rf_config(priv);
- if (priv->cfg->iq_invert)
+ if (cfg(priv)->iq_invert)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -120,10 +120,10 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -131,29 +131,19 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl2000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -258,25 +248,19 @@ static struct iwl_bt_params iwl2030_bt_params = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
struct iwl_cfg iwl2000_2bgn_cfg = {
- .name = "2000 Series 2x2 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2000_2bg_cfg = {
- .name = "2000 Series 2x2 BG",
- IWL_DEVICE_2000,
-};
-
struct iwl_cfg iwl2000_2bgn_d_cfg = {
- .name = "2000D Series 2x2 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
@@ -291,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -299,16 +282,11 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.iq_invert = true \
struct iwl_cfg iwl2030_2bgn_cfg = {
- .name = "2000 Series 2x2 BGN/BT",
+ .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
IWL_DEVICE_2030,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2030_2bg_cfg = {
- .name = "2000 Series 2x2 BG/BT",
- IWL_DEVICE_2030,
-};
-
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
@@ -318,7 +296,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -326,19 +303,14 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl105_bg_cfg = {
- .name = "105 Series 1x1 BG",
- IWL_DEVICE_105,
-};
-
struct iwl_cfg iwl105_bgn_cfg = {
- .name = "105 Series 1x1 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
struct iwl_cfg iwl105_bgn_d_cfg = {
- .name = "105D Series 1x1 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
@@ -353,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -361,13 +332,8 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl135_bg_cfg = {
- .name = "135 Series 1x1 BG/BT",
- IWL_DEVICE_135,
-};
-
struct iwl_cfg iwl135_bgn_cfg = {
- .name = "135 Series 1x1 BGN/BT",
+ .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
IWL_DEVICE_135,
.ht_params = &iwl2000_ht_params,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a8..b3a365fea7bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
-static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
{
u16 temperature, voltage;
- __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
EEPROM_KELVIN_TEMPERATURE);
temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
- iwl_temp_calib_to_offset(priv);
+ iwl_temp_calib_to_offset(priv->shrd);
hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
@@ -166,10 +166,10 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -178,22 +178,15 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -202,10 +195,10 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -214,22 +207,15 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5150_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
- s32 offset = iwl_temp_calib_to_offset(priv);
+ s32 offset = iwl_temp_calib_to_offset(priv->shrd);
vt = le32_to_cpu(priv->statistics.common.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -364,6 +350,7 @@ static struct iwl_base_params iwl5000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.no_idle_support = true,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
@@ -433,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.lib = &iwl5150_lib, \
.base_params = &iwl5000_base_params, \
- .need_dc_calib = true, \
+ .no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c840c78278db..54b753399e6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -46,11 +46,12 @@
#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
/* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
/* Lowest firmware API version supported */
@@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -101,14 +102,14 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
/* no locking required for register write */
- if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
+ if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
- if (priv->cfg->additional_nic_config)
- priv->cfg->additional_nic_config(priv);
+ if (cfg(priv)->additional_nic_config)
+ cfg(priv)->additional_nic_config(priv);
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -140,10 +141,10 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -152,29 +153,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl6000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
.base_params = &iwl6000_g2_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
.lib = &iwl6030_lib, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
@@ -434,21 +423,11 @@ struct iwl_cfg iwl6030_2bg_cfg = {
};
struct iwl_cfg iwl6035_2agn_cfg = {
- .name = "6035 Series 2x2 AGN/BT",
+ .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6035_2abg_cfg = {
- .name = "6035 Series 2x2 ABG/BT",
- IWL_DEVICE_6030,
-};
-
-struct iwl_cfg iwl6035_2bg_cfg = {
- .name = "6035 Series 2x2 BG/BT",
- IWL_DEVICE_6030,
-};
-
struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
@@ -479,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
@@ -516,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -540,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -559,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
+ .ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.lib = &iwl6000_lib,
.base_params = &iwl6000_base_params,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
.led_mode = IWL_LED_BLINK,
};
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 03bac48558b2..50ff849c9f67 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -82,56 +82,64 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-int iwl_send_calib_results(struct iwl_priv *priv)
+int iwl_send_calib_results(struct iwl_trans *trans)
{
- int ret = 0;
- int i = 0;
-
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
.flags = CMD_SYNC,
};
-
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
- priv->calib_results[i].buf) {
- hcmd.len[0] = priv->calib_results[i].buf_len;
- hcmd.data[0] = priv->calib_results[i].buf;
- hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_trans_send_cmd(trans(priv), &hcmd);
- if (ret) {
- IWL_ERR(priv, "Error %d iteration %d\n",
- ret, i);
- break;
- }
+ struct iwl_calib_result *res;
+
+ list_for_each_entry(res, &trans->calib_results, list) {
+ int ret;
+
+ hcmd.len[0] = res->cmd_len;
+ hcmd.data[0] = &res->hdr;
+ hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret) {
+ IWL_ERR(trans, "Error %d on calib cmd %d\n",
+ ret, res->hdr.op_code);
+ return ret;
}
}
- return ret;
+ return 0;
}
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len)
{
- if (res->buf_len != len) {
- kfree(res->buf);
- res->buf = kzalloc(len, GFP_ATOMIC);
- }
- if (unlikely(res->buf == NULL))
+ struct iwl_calib_result *res, *tmp;
+
+ res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+ GFP_ATOMIC);
+ if (!res)
return -ENOMEM;
+ memcpy(&res->hdr, cmd, len);
+ res->cmd_len = len;
+
+ list_for_each_entry(tmp, &trans->calib_results, list) {
+ if (tmp->hdr.op_code == res->hdr.op_code) {
+ list_replace(&tmp->list, &res->list);
+ kfree(tmp);
+ return 0;
+ }
+ }
+
+ /* wasn't in list already */
+ list_add_tail(&res->list, &trans->calib_results);
- res->buf_len = len;
- memcpy(res->buf, buf, len);
return 0;
}
-void iwl_calib_free_results(struct iwl_priv *priv)
+void iwl_calib_free_results(struct iwl_trans *trans)
{
- int i;
+ struct iwl_calib_result *res, *tmp;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
@@ -505,7 +513,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
- if (priv->cfg->base_params->hd_v2) {
+ if (cfg(priv)->base_params->hd_v2) {
cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -839,7 +847,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(priv->cfg->valid_tx_ant);
+ find_first_chain(cfg(priv)->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -882,7 +890,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
continue;
}
- delta_g = (priv->cfg->base_params->chain_noise_scale *
+ delta_g = (cfg(priv)->base_params->chain_noise_scale *
((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
@@ -1039,8 +1047,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
/* Analyze signal for disconnected antenna */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
data->active_chains = hw_params(priv).valid_rx_ant;
@@ -1074,7 +1082,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
iwlagn_gain_computation(priv, average_noise,
min_average_noise_antenna_i, min_average_noise,
- find_first_chain(priv->cfg->valid_rx_ant));
+ find_first_chain(cfg(priv)->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index a869fc9205d2..10275ce92bde 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
void iwl_init_sensitivity(struct iwl_priv *priv);
void iwl_reset_run_time_calib(struct iwl_priv *priv);
-int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
-void iwl_calib_free_results(struct iwl_priv *priv);
-
#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 1a52ed29f2d6..64cf439035c3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/sched.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -92,11 +93,11 @@ void iwlagn_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
{
struct iwl_eeprom_calib_hdr *hdr;
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
EEPROM_CALIB_ALL);
return hdr->version;
@@ -105,7 +106,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
/*
* EEPROM
*/
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
{
u16 offset = 0;
@@ -114,31 +115,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
switch (address & INDIRECT_TYPE_MSK) {
case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
break;
case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
break;
case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
break;
case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
break;
case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
break;
case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
break;
case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
break;
case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
break;
default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
address & INDIRECT_TYPE_MSK);
break;
}
@@ -147,11 +148,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
return (address & ADDRESS_MSK) + (offset << 1);
}
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[address];
+ u32 address = eeprom_indirect_address(shrd, offset);
+ BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
+ return &shrd->eeprom[address];
}
struct iwl_mod_params iwlagn_mod_params = {
@@ -232,7 +233,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -374,15 +375,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
sizeof(basic.bt3_lookup_table));
- if (priv->cfg->bt_params) {
- if (priv->cfg->bt_params->bt_session_2) {
+ if (cfg(priv)->bt_params) {
+ if (cfg(priv)->bt_params->bt_session_2) {
bt_cmd_2000.prio_boost = cpu_to_le32(
- priv->cfg->bt_params->bt_prio_boost);
+ cfg(priv)->bt_params->bt_prio_boost);
bt_cmd_2000.tx_prio_boost = 0;
bt_cmd_2000.rx_prio_boost = 0;
} else {
bt_cmd_6000.prio_boost =
- priv->cfg->bt_params->bt_prio_boost;
+ cfg(priv)->bt_params->bt_prio_boost;
bt_cmd_6000.tx_prio_boost = 0;
bt_cmd_6000.rx_prio_boost = 0;
}
@@ -430,7 +431,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
- if (priv->cfg->bt_params->bt_session_2) {
+ if (cfg(priv)->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
@@ -799,8 +800,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
*/
static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
{
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -827,6 +828,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
case IEEE80211_SMPS_STATIC:
case IEEE80211_SMPS_DYNAMIC:
return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
return active_cnt;
default:
@@ -870,8 +872,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
active_chains = hw_params(priv).valid_rx_ant;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -933,53 +935,359 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
return ant;
}
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data)
+#ifdef CONFIG_PM_SLEEP
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
- wait_entry->fn = fn;
- wait_entry->fn_data = fn_data;
- wait_entry->cmd = cmd;
- wait_entry->triggered = false;
- wait_entry->aborted = false;
-
- spin_lock_bh(&priv->notif_wait_lock);
- list_add(&wait_entry->list, &priv->notif_waits);
- spin_unlock_bh(&priv->notif_wait_lock);
+ int i;
+
+ for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
}
-int iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout)
+struct wowlan_key_data {
+ struct iwl_rxon_context *ctx;
+ struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwlagn_wowlan_tkip_params_cmd *tkip;
+ const u8 *bssid;
+ bool error, use_rsc_tsc, use_tkip;
+};
+
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
{
- int ret;
+ struct iwl_priv *priv = hw->priv;
+ struct wowlan_key_data *data = _data;
+ struct iwl_rxon_context *ctx = data->ctx;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwlagn_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWLAGN_P1K_SIZE];
+ int ret, i;
+
+ mutex_lock(&priv->shrd->mutex);
- ret = wait_event_timeout(priv->notif_waitq,
- wait_entry->triggered || wait_entry->aborted,
- timeout);
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ !sta && !ctx->key_mapping_keys)
+ ret = iwl_set_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_set_dynamic_key(priv, ctx, key, sta);
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
+ if (ret) {
+ IWL_ERR(priv, "Error setting key during suspend!\n");
+ data->error = true;
+ }
- if (wait_entry->aborted)
- return -EIO;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
- /* return value is always >= 0 */
- if (ret <= 0)
- return -ETIMEDOUT;
- return 0;
+ rx_p1ks = data->tkip->rx_uni;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+ ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, data->bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u8 *pn = seq.ccmp.pn;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ aes_tx_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ } else
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+}
+
+int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .flags = CMD_SYNC,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_trans_send_cmd(trans(priv), &cmd);
+ kfree(pattern_cmd);
+ return err;
}
-void iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry)
+int iwlagn_suspend(struct iwl_priv *priv,
+ struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
+ struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+ struct iwl_rxon_cmd rxon;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+ struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
+ struct wowlan_key_data key_data = {
+ .ctx = ctx,
+ .bssid = ctx->active.bssid_addr,
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret, i;
+ u16 seq;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
+
+ memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+ /*
+ * We know the last used seqno, and the uCode expects to know that
+ * one, it will increment before TX.
+ */
+ seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+ wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 before using the value.
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ seq = priv->tid_data[IWL_AP_ID][i].seq_number;
+ seq -= 0x10;
+ wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+ }
+
+ if (wowlan->disconnect)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ d3_cfg_cmd.wakeup_flags |=
+ cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
+
+ iwl_scan_cancel_timeout(priv, 200);
+
+ memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+ iwl_trans_stop_device(trans(priv));
+
+ priv->shrd->wowlan = true;
+
+ ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
+ if (ret)
+ goto out;
+
+ /* now configure WoWLAN ucode */
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto out;
+
+ memcpy(&ctx->staging, &rxon, sizeof(rxon));
+ ret = iwlagn_commit_rxon(priv, ctx);
+ if (ret)
+ goto out;
+
+ ret = iwl_power_update_mode(priv, true);
+ if (ret)
+ goto out;
+
+ if (!iwlagn_mod_params.sw_crypto) {
+ /* mark all keys clear */
+ priv->ucode_key_table = 0;
+ ctx->key_mapping_keys = 0;
+
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&priv->shrd->mutex);
+ ieee80211_iter_keys(priv->hw, ctx->vif,
+ iwlagn_wowlan_program_keys,
+ &key_data);
+ mutex_lock(&priv->shrd->mutex);
+ if (key_data.error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ struct iwl_host_cmd rsc_tsc_cmd = {
+ .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+ .flags = CMD_SYNC,
+ .data[0] = key_data.rsc_tsc,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(key_data.rsc_tsc),
+ };
+
+ ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (key_data.use_tkip) {
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
+ REPLY_WOWLAN_TKIP_PARAMS,
+ CMD_SYNC, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (priv->have_rekey_data) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
+ REPLY_WOWLAN_KEK_KCK_MATERIAL,
+ CMD_SYNC, sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+ CMD_SYNC, sizeof(wakeup_filter_cmd),
+ &wakeup_filter_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwlagn_send_patterns(priv, wowlan);
+ out:
+ kfree(key_data.rsc_tsc);
+ return ret;
}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 66118cea2af3..334b5ae8fdd4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
} else
return IWL_MAX_TID_COUNT;
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/* testmode has higher priority to overwirte the fixed rate */
if (priv->tm_fixed_rate)
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
s32 index;
struct iwl_traffic_load *tl = NULL;
- if (tid >= TID_MAX_LOAD_COUNT)
+ if (tid >= IWL_MAX_TID_COUNT)
return 0;
tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if (tid < TID_MAX_LOAD_COUNT)
+ if (tid < IWL_MAX_TID_COUNT)
rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
+ IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
}
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,12 +1081,12 @@ done:
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
if ((priv->tm_fixed_rate) &&
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
#endif
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
@@ -1458,10 +1458,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant =
- first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1636,10 +1634,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant =
- first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -2277,7 +2273,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
tid = rs_tl_add_packet(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
+ tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
@@ -2649,8 +2645,7 @@ lq_update:
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
u8 sta_id = lq_sta->lq.sta_id;
- tid_data =
- &priv->shrd->tid_data[sta_id][tid];
+ tid_data = &priv->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
@@ -2908,7 +2903,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
priv->tm_fixed_rate = 0;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -3059,11 +3054,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
* overwrite if needed, pass aggregation time limit
* to uCode in uSec
*/
- if (priv && priv->cfg->bt_params &&
- priv->cfg->bt_params->agg_time_limit &&
+ if (priv && cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->agg_time_limit &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
+ cpu_to_le16(cfg(priv)->bt_params->agg_time_limit);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index f4f6deb829ae..6675b3c816d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -281,7 +281,6 @@ enum {
#define TID_QUEUE_CELL_SPACING 50 /*mS */
#define TID_QUEUE_MAX_SIZE 20
#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@ struct iwl_lq_sta {
struct iwl_link_quality_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+ struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 5af9e6258a16..b22b2976f899 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ IWL_CMD(REPLY_D3_CONFIG);
default:
return "UNKNOWN";
@@ -317,7 +318,7 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
unsigned int msecs)
{
int delta;
- int threshold = priv->cfg->base_params->plcp_delta_threshold;
+ int threshold = cfg(priv)->base_params->plcp_delta_threshold;
if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
@@ -582,8 +583,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
iwlagn_rx_calc_noise(priv);
queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
}
- if (priv->cfg->lib->temperature && change)
- priv->cfg->lib->temperature(priv);
+ if (cfg(priv)->lib->temperature && change)
+ cfg(priv)->lib->temperature(priv);
return 0;
}
@@ -800,7 +801,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
ctx->active.bssid_addr))
continue;
ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
+ iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
+ "channel got active");
}
}
@@ -1032,6 +1034,50 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
return 0;
}
+static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_wipan_noa_data *new_data, *old_data;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+
+ /* no condition -- we're in softirq */
+ old_data = rcu_dereference_protected(priv->noa_data, true);
+
+ if (noa_notif->noa_active) {
+ u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
+ u32 copylen = len;
+
+ /* EID, len, OUI, subtype */
+ len += 1 + 1 + 3 + 1;
+ /* P2P id, P2P length */
+ len += 1 + 2;
+ copylen += 1 + 2;
+
+ new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+ if (new_data) {
+ new_data->length = len;
+ new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ new_data->data[1] = len - 2; /* not counting EID, len */
+ new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
+ memcpy(&new_data->data[6], &noa_notif->noa_attribute,
+ copylen);
+ }
+ } else
+ new_data = NULL;
+
+ rcu_assign_pointer(priv->noa_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+
+ return 0;
+}
+
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
@@ -1055,6 +1101,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
+ handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification;
+
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
@@ -1083,13 +1131,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
/* set up notification wait support */
- spin_lock_init(&priv->notif_wait_lock);
- INIT_LIST_HEAD(&priv->notif_waits);
- init_waitqueue_head(&priv->notif_waitq);
+ spin_lock_init(&priv->shrd->notif_wait_lock);
+ INIT_LIST_HEAD(&priv->shrd->notif_waits);
+ init_waitqueue_head(&priv->shrd->notif_waitq);
/* Set up BT Rx handlers */
- if (priv->cfg->lib->bt_rx_handler_setup)
- priv->cfg->lib->bt_rx_handler_setup(priv);
+ if (cfg(priv)->lib->bt_rx_handler_setup)
+ cfg(priv)->lib->bt_rx_handler_setup(priv);
}
@@ -1104,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
* even if the RX handler consumes the RXB we have
* access to it in the notification wait entry.
*/
- if (!list_empty(&priv->notif_waits)) {
+ if (!list_empty(&priv->shrd->notif_waits)) {
struct iwl_notification_wait *w;
- spin_lock(&priv->notif_wait_lock);
- list_for_each_entry(w, &priv->notif_waits, list) {
+ spin_lock(&priv->shrd->notif_wait_lock);
+ list_for_each_entry(w, &priv->shrd->notif_waits, list) {
if (w->cmd != pkt->hdr.cmd)
continue;
IWL_DEBUG_RX(priv,
@@ -1117,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
pkt->hdr.cmd);
w->triggered = true;
if (w->fn)
- w->fn(priv, pkt, w->fn_data);
+ w->fn(trans(priv), pkt, w->fn_data);
}
- spin_unlock(&priv->notif_wait_lock);
+ spin_unlock(&priv->shrd->notif_wait_lock);
- wake_up_all(&priv->notif_waitq);
+ wake_up_all(&priv->shrd->notif_waitq);
}
if (priv->pre_rx_handler)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c89..1c6659416621 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -45,7 +45,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
send->filter_flags = old_filter;
if (ret)
- IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+ IWL_DEBUG_QUIET_RFKILL(priv,
+ "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
return ret;
}
@@ -59,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
u8 old_dev_type = send->dev_type;
int ret;
- iwlagn_init_notification_wait(priv, &disable_wait,
+ iwl_init_notification_wait(priv->shrd, &disable_wait,
REPLY_WIPAN_DEACTIVATION_COMPLETE,
NULL, NULL);
@@ -73,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
- iwlagn_remove_notification(priv, &disable_wait);
+ iwl_remove_notification(priv->shrd, &disable_wait);
} else {
- ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
@@ -116,7 +117,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
if (ctx->ht.enabled)
ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
@@ -124,7 +125,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
- IWL_ERR(priv, "Failed to update QoS\n");
+ IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
static int iwlagn_update_beacon(struct iwl_priv *priv,
@@ -295,9 +296,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
}
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
- priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
+ cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode)
ieee80211_request_smps(ctx->vif,
- priv->cfg->ht_params->smps_mode);
+ cfg(priv)->ht_params->smps_mode);
return 0;
}
@@ -444,8 +445,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
- if (!(priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation))
+ if (!(cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation))
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -528,6 +529,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx)
+{
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+}
+
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = hw->priv;
@@ -541,6 +560,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ goto out;
+
if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
@@ -586,19 +608,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+ if (!ctx->ht.is_40mhz ||
+ !iwl_is_associated_ctx(ctx))
+ iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
@@ -840,7 +854,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
iwl_trans_wake_any_queue(trans(priv),
- ctx->ctxid);
+ ctx->ctxid,
+ "Disassoc: flush queue");
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed6283623932..7353826095f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -130,25 +130,15 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
return iwl_process_add_sta_resp(priv, addsta, pkt);
}
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags)
{
int ret = 0;
- u8 data[sizeof(*sta)];
struct iwl_host_cmd cmd = {
.id = REPLY_ADD_STA,
.flags = flags,
- .data = { data, },
+ .data = { sta, },
+ .len = { sizeof(*sta), },
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
@@ -160,7 +150,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
might_sleep();
}
- cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
ret = iwl_trans_send_cmd(trans(priv), &cmd);
if (ret || (flags & CMD_ASYNC))
@@ -463,6 +452,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr)
{
unsigned long flags;
+ u8 tid;
if (!iwl_is_ready(priv->shrd)) {
IWL_DEBUG_INFO(priv,
@@ -501,6 +491,10 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
priv->stations[sta_id].lq = NULL;
}
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->num_stations--;
@@ -647,7 +641,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
int ret;
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
- bool active;
+ bool active, have_lq = false;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
@@ -657,7 +651,10 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
sta_cmd.mode = 0;
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+ if (priv->stations[sta_id].lq) {
+ memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+ have_lq = true;
+ }
active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -679,7 +676,8 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (ret)
IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
priv->stations[sta_id].sta.sta.addr, ret);
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+ if (have_lq)
+ iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
}
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
@@ -825,28 +823,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
}
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
- "station %pM\n", sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
@@ -1268,9 +1244,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
if (sta)
addr = sta->addr;
else /* station mode case only */
@@ -1283,8 +1256,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
seq.tkip.iv32, p1k, CMD_SYNC);
break;
case WLAN_CIPHER_SUITE_CCMP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
@@ -1464,20 +1435,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
-static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
-{
- unsigned long flags;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask = 0;
- priv->stations[sta_id].sta.sleep_tx_count = 0;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
-}
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
@@ -1494,36 +1452,3 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
-
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int sta_id;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- switch (cmd) {
- case STA_NOTIFY_SLEEP:
- WARN_ON(!sta_priv->client);
- sta_priv->asleep = true;
- if (atomic_read(&sta_priv->pending_frames) > 0)
- ieee80211_sta_block_awake(hw, sta, true);
- break;
- case STA_NOTIFY_AWAKE:
- WARN_ON(!sta_priv->client);
- if (!sta_priv->asleep)
- break;
- sta_priv->asleep = false;
- sta_id = iwl_sta_id(sta);
- if (sta_id != IWL_INVALID_STATION)
- iwl_sta_modify_ps_wake(priv, sta_id);
- break;
- default:
- break;
- }
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index c27180a73351..b0dff7a753a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -633,7 +633,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
- if (priv->cfg->base_params->adv_thermal_throttle) {
+ if (cfg(priv)->base_params->adv_thermal_throttle) {
IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
tt->restriction = kcalloc(IWL_TI_STATE_MAX,
sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 35a6b71f358c..c664c2726553 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -74,8 +74,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
else if (info->band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
}
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
@@ -148,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
if (priv->tm_fixed_rate) {
/*
* rate overwrite by testmode
@@ -161,7 +164,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
}
#endif
return;
- }
+ } else if (ieee80211_is_back_req(fc))
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
/**
* If the current TX rate stored in mac80211 has the MCS bit set, it's
@@ -187,8 +191,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -258,8 +262,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
__le16 fc;
u8 hdr_len;
- u16 len;
- u8 sta_id;
+ u16 len, seq_number = 0;
+ u8 sta_id, tid = IWL_MAX_TID_COUNT;
unsigned long flags;
bool is_agg = false;
@@ -283,6 +287,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
+ if (unlikely(ieee80211_is_probe_resp(fc))) {
+ struct iwl_wipan_noa_data *noa_data =
+ rcu_dereference(priv->noa_data);
+
+ if (noa_data &&
+ pskb_expand_head(skb, 0, noa_data->length,
+ GFP_ATOMIC) == 0) {
+ memcpy(skb_put(skb, noa_data->length),
+ noa_data->data, noa_data->length);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ }
+ }
+
hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */
@@ -351,9 +368,51 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
- if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ struct iwl_tid_data *tid_data;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ goto drop_unlock_sta;
+ tid_data = &priv->tid_data[sta_id][tid];
+
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ tid_data->agg.state != IWL_AGG_ON) {
+ IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
+ " Tx flags = 0x%08x, agg.state = %d",
+ info->flags, tid_data->agg.state);
+ IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+ sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
+ goto drop_unlock_sta;
+ }
+
+ /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
+ * only. Check this here.
+ */
+ if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
+ tid_data->agg.state != IWL_AGG_OFF,
+ "Tx while agg.state = %d", tid_data->agg.state))
+ goto drop_unlock_sta;
+
+ seq_number = tid_data->seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
goto drop_unlock_sta;
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
+ !ieee80211_has_morefrags(fc))
+ priv->tid_data[sta_id][tid].seq_number = seq_number;
+
spin_unlock(&priv->shrd->sta_lock);
spin_unlock_irqrestore(&priv->shrd->lock, flags);
@@ -378,10 +437,81 @@ drop_unlock_priv:
return -1;
}
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ int sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->tid_data[sta_id][tid];
+
+ switch (priv->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(priv, "Stopping AGG while state not ON "
+ "or starting for %d on %d (%d)\n", sta_id, tid,
+ priv->tid_data[sta_id][tid].agg.state);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ /* There are still packets for this RA / TID in the HW */
+ if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_recl = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ priv->tid_data[sta_id][tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+turn_off:
+ priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_lock(&priv->shrd->lock);
+
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
int sta_id;
int ret;
@@ -396,7 +526,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (unlikely(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
+ if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
@@ -405,27 +535,136 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (ret)
return ret;
- ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
- tid, ssn);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->tid_data[sta_id][tid];
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ *ssn = tid_data->agg.ssn;
+
+ ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
+ if (ret) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return ret;
+ }
+
+ if (*ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_reclaimed = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return ret;
}
-int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size)
{
- int sta_id;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ unsigned long flags;
+ u16 ssn;
- sta_id = iwl_sta_id(sta);
+ buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
+ buf_size, ssn);
+
+ /*
+ * If the limit is 0, then it wasn't initialised yet,
+ * use the default. We can do that since we take the
+ * minimum below, and we don't want to go above our
+ * default due to hardware restrictions.
+ */
+ if (sta_priv->max_agg_bufsize == 0)
+ sta_priv->max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ sta_priv->max_agg_bufsize =
+ min(sta_priv->max_agg_bufsize, buf_size);
+
+ if (cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+
+ sta_priv->lq_sta.lq.general_params.flags |=
+ LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
}
+ priv->agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
+
+ sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize;
+
+ IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
- return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
- sta_id, tid);
+ return iwl_send_lq_cmd(priv, ctx,
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+}
+
+static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
+{
+ struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
+ enum iwl_rxon_context_id ctx;
+ struct ieee80211_vif *vif;
+ u8 *addr;
+
+ lockdep_assert_held(&priv->shrd->sta_lock);
+
+ addr = priv->stations[sta_id].sta.sta.addr;
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ switch (priv->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* There are no packets for this RA / TID in the HW any more */
+ if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can continue DELBA flow ssn = next_recl ="
+ " %d", tid_data->next_reclaimed);
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* There are no packets for this RA / TID in the HW any more */
+ if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can continue ADDBA flow ssn = next_recl ="
+ " %d", tid_data->next_reclaimed);
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+ }
+ break;
+ default:
+ break;
+ }
}
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -565,7 +804,7 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
IWLAGN_TX_RES_TID_POS;
int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
u32 status = le16_to_cpu(tx_resp->status.status);
int i;
@@ -581,8 +820,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
* notification again.
*/
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
}
@@ -755,7 +994,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
struct ieee80211_hdr *hdr;
u32 status = le16_to_cpu(tx_resp->status.status);
- u32 ssn = iwlagn_get_scd_ssn(tx_resp);
+ u16 ssn = iwlagn_get_scd_ssn(tx_resp);
int tid;
int sta_id;
int freed;
@@ -777,10 +1016,34 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_rx_reply_tx_agg(priv, tx_resp);
if (tx_resp->frame_count == 1) {
+ u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
+ next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
+
+ if (is_agg) {
+ /* If this is an aggregation queue, we can rely on the
+ * ssn since the wifi sequence number corresponds to
+ * the index in the TFD ring (%256).
+ * The seq_ctl is the sequence control of the packet
+ * to which this Tx response relates. But if there is a
+ * hole in the bitmap of the BA we received, this Tx
+ * response may allow to reclaim the hole and all the
+ * subsequent packets that were already acked.
+ * In that case, seq_ctl != ssn, and the next packet
+ * to be reclaimed will be ssn and not seq_ctl.
+ */
+ next_reclaimed = ssn;
+ }
+
__skb_queue_head_init(&skbs);
+ priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed;
+
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
+ next_reclaimed);
+
/*we can free until ssn % q.n_bd not inclusive */
- iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
- ssn, status, &skbs);
+ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+ ssn, status, &skbs));
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
@@ -800,7 +1063,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
ctx->last_tx_rejected = true;
- iwl_trans_stop_queue(trans(priv), txq_id);
+ iwl_trans_stop_queue(trans(priv), txq_id,
+ "Tx on passive channel");
IWL_DEBUG_TX_REPLY(priv,
"TXQ %d status %s (0x%08x) "
@@ -874,27 +1138,24 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
- agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ agg = &priv->tid_data[sta_id][tid].agg;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
+ ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -906,11 +1167,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
"scd_flow = %d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
+ ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
+ scd_flow, ba_resp_scd_ssn);
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = false;
@@ -929,13 +1188,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
ba_resp->txed, ba_resp->txed_2_done);
- __skb_queue_head_init(&reclaimed_skbs);
+ priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
- 0, &reclaimed_skbs);
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&reclaimed_skbs)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a7..b5c7c5f0a753 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
@@ -44,6 +43,7 @@
#include <asm/div64.h>
#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -367,7 +367,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
- base = priv->device_pointers.error_event_table;
+ base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
capacity = iwl_read_targ_mem(bus(priv), base);
num_wraps = iwl_read_targ_mem(bus(priv),
@@ -452,52 +452,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(bus(priv)->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
-{
- iwl_free_fw_desc(priv, &img->code);
- iwl_free_fw_desc(priv, &img->data);
-}
-
-static void iwl_dealloc_ucode(struct iwl_priv *priv)
-{
- iwl_free_fw_img(priv, &priv->ucode_rt);
- iwl_free_fw_img(priv, &priv->ucode_init);
- iwl_free_fw_img(priv, &priv->ucode_wowlan);
-}
-
-static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
- const void *data, size_t len)
-{
- if (!len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
- &desc->p_addr, GFP_KERNEL);
- if (!desc->v_addr)
- return -ENOMEM;
-
- desc->len = len;
- memcpy(desc->v_addr, data, len);
- return 0;
-}
-
static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
{
int i;
@@ -555,23 +509,14 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
}
-
-struct iwlagn_ucode_capabilities {
- u32 max_probe_length;
- u32 standard_phy_calibration_size;
- u32 flags;
-};
-
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa);
#define UCODE_EXPERIMENTAL_INDEX 100
#define UCODE_EXPERIMENTAL_TAG "exp"
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
- const char *name_pre = priv->cfg->fw_name_pre;
+ const char *name_pre = cfg(priv)->fw_name_pre;
char tag[8];
if (first) {
@@ -580,14 +525,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
#endif
- priv->fw_index = priv->cfg->ucode_api_max;
+ priv->fw_index = cfg(priv)->ucode_api_max;
sprintf(tag, "%d", priv->fw_index);
} else {
priv->fw_index--;
sprintf(tag, "%d", priv->fw_index);
}
- if (priv->fw_index < priv->cfg->ucode_api_min) {
+ if (priv->fw_index < cfg(priv)->ucode_api_min) {
IWL_ERR(priv, "no suitable firmware found!\n");
return -ENOENT;
}
@@ -892,9 +837,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
struct iwl_ucode_header *ucode;
int err;
struct iwlagn_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- unsigned int api_ok = priv->cfg->ucode_api_ok;
- const unsigned int api_min = priv->cfg->ucode_api_min;
+ const unsigned int api_max = cfg(priv)->ucode_api_max;
+ unsigned int api_ok = cfg(priv)->ucode_api_ok;
+ const unsigned int api_min = cfg(priv)->ucode_api_min;
u32 api_ver;
char buildstr[25];
u32 build;
@@ -1040,30 +985,32 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
pieces.inst, pieces.inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
pieces.data, pieces.data_size))
goto err_pci_alloc;
/* Initialization instructions and data */
if (pieces.init_size && pieces.init_data_size) {
- if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
pieces.init, pieces.init_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
pieces.init_data, pieces.init_data_size))
goto err_pci_alloc;
}
/* WoWLAN instructions and data */
if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
- if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
+ if (iwl_alloc_fw_desc(bus(priv),
+ &trans(priv)->ucode_wowlan.code,
pieces.wowlan_inst,
pieces.wowlan_inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
+ if (iwl_alloc_fw_desc(bus(priv),
+ &trans(priv)->ucode_wowlan.data,
pieces.wowlan_data,
pieces.wowlan_data_size))
goto err_pci_alloc;
@@ -1081,20 +1028,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
else
priv->init_evtlog_size =
- priv->cfg->base_params->max_event_log_size;
+ cfg(priv)->base_params->max_event_log_size;
priv->init_errlog_ptr = pieces.init_errlog_ptr;
priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
if (pieces.inst_evtlog_size)
priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
else
priv->inst_evtlog_size =
- priv->cfg->base_params->max_event_log_size;
+ cfg(priv)->base_params->max_event_log_size;
priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
+#ifndef CONFIG_IWLWIFI_P2P
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+ if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
@@ -1111,7 +1061,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->sta_key_max_num = STA_KEY_MAX_NUM;
priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
-
/*
* figure out the offset of chain noise reset and gain commands
* base on the size of standard phy calibration commands table size
@@ -1156,7 +1105,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
err_pci_alloc:
IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl_dealloc_ucode(priv);
+ iwl_dealloc_ucode(trans(priv));
out_unbind:
complete(&priv->firmware_loading_complete);
device_release_driver(bus(priv)->dev);
@@ -1176,7 +1125,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->shrd->lock, flags);
priv->thermal_throttle.ct_kill_toggle = false;
- if (priv->cfg->base_params->support_ct_kill_exit) {
+ if (cfg(priv)->base_params->support_ct_kill_exit) {
adv_cmd.critical_temperature_enter =
cpu_to_le32(hw_params(priv).ct_kill_threshold);
adv_cmd.critical_temperature_exit =
@@ -1271,10 +1220,10 @@ int iwl_alive_start(struct iwl_priv *priv)
return -ERFKILL;
/* download priority table before any calibration request */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* Configure Bluetooth device coexistence support */
- if (priv->cfg->bt_params->bt_sco_disable)
+ if (cfg(priv)->bt_params->bt_sco_disable)
priv->bt_enable_pspoll = false;
else
priv->bt_enable_pspoll = true;
@@ -1286,14 +1235,14 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
priv->cur_rssi_ctx = NULL;
- iwlagn_send_prio_tbl(priv);
+ iwl_send_prio_tbl(trans(priv));
/* FIXME: w/a to force change uCode BT state machine */
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
@@ -1304,16 +1253,17 @@ int iwl_alive_start(struct iwl_priv *priv)
iwl_send_bt_config(priv);
}
- if (hw_params(priv).calib_rt_cfg)
- iwlagn_send_calib_cfg_rt(priv,
- hw_params(priv).calib_rt_cfg);
+ /*
+ * Perform runtime calibrations, including DC calibration.
+ */
+ iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
ieee80211_wake_queues(priv->hw);
priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
struct iwl_rxon_cmd *active_rxon =
@@ -1352,7 +1302,7 @@ int iwl_alive_start(struct iwl_priv *priv)
static void iwl_cancel_deferred_work(struct iwl_priv *priv);
-static void __iwl_down(struct iwl_priv *priv)
+void __iwl_down(struct iwl_priv *priv)
{
int exit_pending;
@@ -1382,9 +1332,9 @@ static void __iwl_down(struct iwl_priv *priv)
priv->bt_status = 0;
priv->cur_rssi_ctx = NULL;
priv->bt_is_sco = 0;
- if (priv->cfg->bt_params)
+ if (cfg(priv)->bt_params)
priv->bt_traffic_load =
- priv->cfg->bt_params->bt_init_traffic_load;
+ cfg(priv)->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
priv->bt_full_concurrent = false;
@@ -1415,7 +1365,7 @@ static void __iwl_down(struct iwl_priv *priv)
priv->beacon_skb = NULL;
}
-static void iwl_down(struct iwl_priv *priv)
+void iwl_down(struct iwl_priv *priv)
{
mutex_lock(&priv->shrd->mutex);
__iwl_down(priv);
@@ -1424,57 +1374,6 @@ static void iwl_down(struct iwl_priv *priv)
iwl_cancel_deferred_work(priv);
}
-#define MAX_HW_RESTARTS 5
-
-static int __iwl_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret;
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwlagn_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- ret = iwlagn_run_init_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
- goto error;
- }
-
- ret = iwlagn_load_ucode_wait_alive(priv,
- &priv->ucode_rt,
- IWL_UCODE_REGULAR);
- if (ret) {
- IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
- goto error;
- }
-
- ret = iwl_alive_start(priv);
- if (ret)
- goto error;
- return 0;
-
- error:
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
- __iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
- IWL_ERR(priv, "Unable to initialize device.\n");
- return ret;
-}
-
-
/*****************************************************************************
*
* Workqueue callbacks
@@ -1502,7 +1401,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
mutex_unlock(&priv->shrd->mutex);
}
-static void iwlagn_prepare_restart(struct iwl_priv *priv)
+void iwlagn_prepare_restart(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
bool bt_full_concurrent;
@@ -1559,1173 +1458,8 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_dualmode[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_sta_ap_limits,
- .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
- },
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_p2p_sta_go_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_p2p_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
- },
-};
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-agn-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- /*
- * Including the following line will crash some AP's. This
- * workaround removes the stimulus which causes the crash until
- * the AP software can be fixed.
- hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- */
-
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-
- if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_p2p);
- } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
- }
-
- hw->wiphy->max_remain_on_channel_duration = 1000;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
- if (!iwlagn_mod_params.sw_crypto)
- hw->wiphy->wowlan.flags |=
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE;
-
- hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
- hw->wiphy->wowlan.pattern_min_len =
- IWLAGN_WOWLAN_MIN_PATTERN_LEN;
- hw->wiphy->wowlan.pattern_max_len =
- IWLAGN_WOWLAN_MAX_PATTERN_LEN;
- }
-
- if (iwlagn_mod_params.power_save)
- hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
- else
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-static int iwlagn_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->shrd->mutex);
- ret = __iwl_up(priv);
- mutex_unlock(&priv->shrd->mutex);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
- ret = -EIO;
-
- iwlagn_led_enable(priv);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl_down(priv);
-
- flush_workqueue(priv->shrd->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
- iwl_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int iwlagn_send_patterns(struct iwl_priv *priv,
- struct cfg80211_wowlan *wowlan)
-{
- struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
- struct iwl_host_cmd cmd = {
- .id = REPLY_WOWLAN_PATTERNS,
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
- };
- int i, err;
-
- if (!wowlan->n_patterns)
- return 0;
-
- cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
-
- pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
- if (!pattern_cmd)
- return -ENOMEM;
-
- pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
-
- for (i = 0; i < wowlan->n_patterns; i++) {
- int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
-
- memcpy(&pattern_cmd->patterns[i].mask,
- wowlan->patterns[i].mask, mask_len);
- memcpy(&pattern_cmd->patterns[i].pattern,
- wowlan->patterns[i].pattern,
- wowlan->patterns[i].pattern_len);
- pattern_cmd->patterns[i].mask_size = mask_len;
- pattern_cmd->patterns[i].pattern_size =
- wowlan->patterns[i].pattern_len;
- }
-
- cmd.data[0] = pattern_cmd;
- err = iwl_trans_send_cmd(trans(priv), &cmd);
- kfree(pattern_cmd);
- return err;
-}
-#endif
-
-static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
-{
- struct iwl_priv *priv = hw->priv;
-
- if (iwlagn_mod_params.sw_crypto)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
- goto out;
-
- memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
- memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
- priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
- priv->have_rekey_data = true;
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-struct wowlan_key_data {
- struct iwl_rxon_context *ctx;
- struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
- struct iwlagn_wowlan_tkip_params_cmd *tkip;
- const u8 *bssid;
- bool error, use_rsc_tsc, use_tkip;
-};
-
-#ifdef CONFIG_PM_SLEEP
-static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
-{
- int i;
-
- for (i = 0; i < IWLAGN_P1K_SIZE; i++)
- out[i] = cpu_to_le16(p1k[i]);
-}
-
-static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- void *_data)
-{
- struct iwl_priv *priv = hw->priv;
- struct wowlan_key_data *data = _data;
- struct iwl_rxon_context *ctx = data->ctx;
- struct aes_sc *aes_sc, *aes_tx_sc = NULL;
- struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
- struct iwlagn_p1k_cache *rx_p1ks;
- u8 *rx_mic_key;
- struct ieee80211_key_seq seq;
- u32 cur_rx_iv32 = 0;
- u16 p1k[IWLAGN_P1K_SIZE];
- int ret, i;
-
- mutex_lock(&priv->shrd->mutex);
-
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta && !ctx->key_mapping_keys)
- ret = iwl_set_default_wep_key(priv, ctx, key);
- else
- ret = iwl_set_dynamic_key(priv, ctx, key, sta);
-
- if (ret) {
- IWL_ERR(priv, "Error setting key during suspend!\n");
- data->error = true;
- }
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- if (sta) {
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
- tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
-
- rx_p1ks = data->tkip->rx_uni;
-
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
-
- ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
- iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
-
- memcpy(data->tkip->mic_keys.tx,
- &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
- IWLAGN_MIC_KEY_SIZE);
-
- rx_mic_key = data->tkip->mic_keys.rx_unicast;
- } else {
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
- rx_p1ks = data->tkip->rx_multi;
- rx_mic_key = data->tkip->mic_keys.rx_mcast;
- }
-
- /*
- * For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 (as they need to to avoid replay attacks)
- * for checking the IV in the frames.
- */
- for (i = 0; i < IWLAGN_NUM_RSC; i++) {
- ieee80211_get_key_rx_seq(key, i, &seq);
- tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
- /* wrapping isn't allowed, AP must rekey */
- if (seq.tkip.iv32 > cur_rx_iv32)
- cur_rx_iv32 = seq.tkip.iv32;
- }
-
- ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
- iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
- ieee80211_get_tkip_rx_p1k(key, data->bssid,
- cur_rx_iv32 + 1, p1k);
- iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
-
- memcpy(rx_mic_key,
- &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
- IWLAGN_MIC_KEY_SIZE);
-
- data->use_tkip = true;
- data->use_rsc_tsc = true;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- if (sta) {
- u8 *pn = seq.ccmp.pn;
-
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
- aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
-
- ieee80211_get_key_tx_seq(key, &seq);
- aes_tx_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
- } else
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
-
- /*
- * For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 for checking the IV in the frames.
- */
- for (i = 0; i < IWLAGN_NUM_RSC; i++) {
- u8 *pn = seq.ccmp.pn;
-
- ieee80211_get_key_rx_seq(key, i, &seq);
- aes_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
- }
- data->use_rsc_tsc = true;
- break;
- }
-
- mutex_unlock(&priv->shrd->mutex);
-}
-
-static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
- struct iwl_rxon_cmd rxon;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
- struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
- struct wowlan_key_data key_data = {
- .ctx = ctx,
- .bssid = ctx->active.bssid_addr,
- .use_rsc_tsc = false,
- .tkip = &tkip_cmd,
- .use_tkip = false,
- };
- int ret, i;
- u16 seq;
-
- if (WARN_ON(!wowlan))
- return -EINVAL;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- /* Don't attempt WoWLAN when not associated, tear down instead. */
- if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
- !iwl_is_associated_ctx(ctx)) {
- ret = 1;
- goto out;
- }
-
- key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
- if (!key_data.rsc_tsc) {
- ret = -ENOMEM;
- goto out;
- }
-
- memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
-
- /*
- * We know the last used seqno, and the uCode expects to know that
- * one, it will increment before TX.
- */
- seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
- wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
-
- /*
- * For QoS counters, we store the one to use next, so subtract 0x10
- * since the uCode will add 0x10 before using the value.
- */
- for (i = 0; i < 8; i++) {
- seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
- seq -= 0x10;
- wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
- }
-
- if (wowlan->disconnect)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
- IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
- if (wowlan->magic_pkt)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
- if (wowlan->gtk_rekey_failure)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
- if (wowlan->eap_identity_req)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
- if (wowlan->four_way_handshake)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
- if (wowlan->rfkill_release)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
- if (wowlan->n_patterns)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
-
- iwl_scan_cancel_timeout(priv, 200);
-
- memcpy(&rxon, &ctx->active, sizeof(rxon));
-
- iwl_trans_stop_device(trans(priv));
-
- priv->shrd->wowlan = true;
-
- ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
- IWL_UCODE_WOWLAN);
- if (ret)
- goto error;
-
- /* now configure WoWLAN ucode */
- ret = iwl_alive_start(priv);
- if (ret)
- goto error;
-
- memcpy(&ctx->staging, &rxon, sizeof(rxon));
- ret = iwlagn_commit_rxon(priv, ctx);
- if (ret)
- goto error;
-
- ret = iwl_power_update_mode(priv, true);
- if (ret)
- goto error;
-
- if (!iwlagn_mod_params.sw_crypto) {
- /* mark all keys clear */
- priv->ucode_key_table = 0;
- ctx->key_mapping_keys = 0;
-
- /*
- * This needs to be unlocked due to lock ordering
- * constraints. Since we're in the suspend path
- * that isn't really a problem though.
- */
- mutex_unlock(&priv->shrd->mutex);
- ieee80211_iter_keys(priv->hw, ctx->vif,
- iwlagn_wowlan_program_keys,
- &key_data);
- mutex_lock(&priv->shrd->mutex);
- if (key_data.error) {
- ret = -EIO;
- goto error;
- }
-
- if (key_data.use_rsc_tsc) {
- struct iwl_host_cmd rsc_tsc_cmd = {
- .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
- .flags = CMD_SYNC,
- .data[0] = key_data.rsc_tsc,
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .len[0] = sizeof(*key_data.rsc_tsc),
- };
-
- ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
- if (ret)
- goto error;
- }
-
- if (key_data.use_tkip) {
- ret = iwl_trans_send_cmd_pdu(trans(priv),
- REPLY_WOWLAN_TKIP_PARAMS,
- CMD_SYNC, sizeof(tkip_cmd),
- &tkip_cmd);
- if (ret)
- goto error;
- }
-
- if (priv->have_rekey_data) {
- memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
- memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
- kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
- memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
- kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
- kek_kck_cmd.replay_ctr = priv->replay_ctr;
-
- ret = iwl_trans_send_cmd_pdu(trans(priv),
- REPLY_WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC, sizeof(kek_kck_cmd),
- &kek_kck_cmd);
- if (ret)
- goto error;
- }
- }
-
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
- CMD_SYNC, sizeof(wakeup_filter_cmd),
- &wakeup_filter_cmd);
- if (ret)
- goto error;
-
- ret = iwlagn_send_patterns(priv, wowlan);
- if (ret)
- goto error;
-
- device_set_wakeup_enable(bus(priv)->dev, true);
-
- /* Now let the ucode operate on its own */
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
- goto out;
-
- error:
- priv->shrd->wowlan = false;
- iwlagn_prepare_restart(priv);
- ieee80211_restart_hw(priv->hw);
- out:
- mutex_unlock(&priv->shrd->mutex);
- kfree(key_data.rsc_tsc);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwlagn_mac_resume(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif;
- unsigned long flags;
- u32 base, status = 0xffffffff;
- int ret = -EIO;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
- base = priv->device_pointers.error_event_table;
- if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&bus(priv)->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(bus(priv));
- if (ret == 0) {
- iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(bus(priv));
- }
- spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (ret == 0) {
- if (!priv->wowlan_sram)
- priv->wowlan_sram =
- kzalloc(priv->ucode_wowlan.data.len,
- GFP_KERNEL);
-
- if (priv->wowlan_sram)
- _iwl_read_targ_mem_words(
- bus(priv), 0x800000, priv->wowlan_sram,
- priv->ucode_wowlan.data.len / 4);
- }
-#endif
- }
-
- /* we'll clear ctx->vif during iwlagn_prepare_restart() */
- vif = ctx->vif;
-
- priv->shrd->wowlan = false;
-
- device_set_wakeup_enable(bus(priv)->dev, false);
-
- iwlagn_prepare_restart(priv);
-
- memset((void *)&ctx->active, 0, sizeof(ctx->active));
- iwl_connection_init_rx_config(priv, ctx);
- iwlagn_set_rxon_chain(priv, ctx);
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- ieee80211_resume_disconnect(vif);
-
- return 1;
-}
-#endif
-
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwlagn_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
-
- iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
-}
-
-static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwlagn_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- /*
- * We could program these keys into the hardware as well, but we
- * don't expect much multicast traffic in IBSS and having keys
- * for more stations is probably more useful.
- *
- * Mark key TX-only and return 0.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- key->hw_key_idx = WEP_INVALID_OFFSET;
- return 0;
- }
-
- /* If they key was TX-only, accept deletion */
- if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
- return 0;
-
- mutex_lock(&priv->shrd->mutex);
- iwl_scan_cancel_timeout(priv, 100);
-
- BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
- }
-
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key) {
- ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
- break;
- }
- ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
- if (ret) {
- /*
- * can't add key for RX, but we don't need it
- * in the device for TX so still return 0
- */
- ret = 0;
- key->hw_key_idx = WEP_INVALID_OFFSET;
- }
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
- struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
- return -EACCES;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
- if ((ret == 0) && (priv->agg_tids_count > 0)) {
- priv->agg_tids_count--;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
- }
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- if (!priv->agg_tids_count && priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch off RTS/CTS if it was previously enabled
- */
- sta_priv->lq_sta.lq.general_params.flags &=
- ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
- }
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
-
- iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
- tid, buf_size);
-
- /*
- * If the limit is 0, then it wasn't initialised yet,
- * use the default. We can do that since we take the
- * minimum below, and we don't want to go above our
- * default due to hardware restrictions.
- */
- if (sta_priv->max_agg_bufsize == 0)
- sta_priv->max_agg_bufsize =
- LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-
- /*
- * Even though in theory the peer could have different
- * aggregation reorder buffer sizes for different sessions,
- * our ucode doesn't allow for that and has a global limit
- * for each station. Therefore, use the minimum of all the
- * aggregation sessions and our default value.
- */
- sta_priv->max_agg_bufsize =
- min(sta_priv->max_agg_bufsize, buf_size);
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch to RTS/CTS if it is the prefer protection
- * method for HT traffic
- */
-
- sta_priv->lq_sta.lq.general_params.flags |=
- LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- }
- priv->agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
-
- sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
- sta_priv->max_agg_bufsize;
-
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
-
- IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
- sta->addr, tid);
- ret = 0;
- break;
- }
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return ret;
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret = 0;
- u8 sta_id;
-
- IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
- if (vif->type == NL80211_IFTYPE_AP)
- sta_priv->client = true;
-
- ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- goto out;
- }
-
- sta_priv->sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl_rs_rate_init(priv, sta, sta_id);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- /*
- * MULTI-FIXME
- * When we add support for multiple interfaces, we need to
- * revisit this. The channel switch command in the device
- * only affects the BSS context, but what does that really
- * mean? And what if we get a CSA on the second interface?
- * This needs a lot of work.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_rfkill(priv->shrd))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
- goto out;
-
- if (!iwl_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->shrd->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_set_rxon_channel(priv, channel, ctx);
- iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->shrd->lock);
-
- iwl_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->shrd->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->shrd->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
-{
- struct iwl_priv *priv = hw->priv;
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
- IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
- goto done;
- }
- if (iwl_is_rfkill(priv->shrd)) {
- IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
- goto done;
- }
-
- /*
- * mac80211 will not push any more frames for transmit
- * until the flush is completed
- */
- if (drop) {
- IWL_DEBUG_MAC80211(priv, "send flush command\n");
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
- IWL_ERR(priv, "flush request fail\n");
- goto done;
- }
- }
- IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(trans(priv));
-done:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
void iwlagn_disable_roc(struct iwl_priv *priv)
{
@@ -2759,160 +1493,6 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
mutex_unlock(&priv->shrd->mutex);
}
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type,
- int duration)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- int err = 0;
-
- if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
- err = -EBUSY;
- goto out;
- }
-
- priv->hw_roc_channel = channel;
- priv->hw_roc_chantype = channel_type;
- priv->hw_roc_duration = duration;
- priv->hw_roc_start_notified = false;
- cancel_delayed_work(&priv->hw_roc_disable_work);
-
- if (!ctx->is_active) {
- ctx->is_active = true;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- memcpy(ctx->staging.node_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- memcpy(ctx->staging.bssid_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- err = iwlagn_commit_rxon(priv, ctx);
- if (err)
- goto out;
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
-
- err = iwlagn_commit_rxon(priv, ctx);
- if (err) {
- iwlagn_disable_roc(priv);
- goto out;
- }
- priv->hw_roc_setup = true;
- }
-
- err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
- if (err)
- iwlagn_disable_roc(priv);
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
- iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx)) {
- ret = 0;
- goto out;
- }
-
- if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
- if (ret)
- goto out;
-
- if (WARN_ON(sta_id != ctx->ap_sta_id)) {
- ret = -EIO;
- goto out_remove_sta;
- }
-
- memcpy(ctx->bssid, bssid, ETH_ALEN);
- ctx->preauth_bssid = true;
-
- ret = iwlagn_commit_rxon(priv, ctx);
-
- if (ret == 0)
- goto out;
-
- out_remove_sta:
- iwl_remove_station(priv, sta_id, bssid);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx))
- goto out;
-
- iwl_remove_station(priv, ctx->ap_sta_id, bssid);
- ctx->preauth_bssid = false;
- /* no need to commit */
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
/*****************************************************************************
*
* driver setup and teardown
@@ -2936,8 +1516,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
iwl_setup_scan_deferred_work(priv);
- if (priv->cfg->lib->bt_setup_deferred_work)
- priv->cfg->lib->bt_setup_deferred_work(priv);
+ if (cfg(priv)->lib->bt_setup_deferred_work)
+ cfg(priv)->lib->bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
@@ -2954,8 +1534,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
static void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (priv->cfg->lib->cancel_deferred_work)
- priv->cfg->lib->cancel_deferred_work(priv);
+ if (cfg(priv)->lib->cancel_deferred_work)
+ cfg(priv)->lib->cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
@@ -2999,6 +1579,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->shrd->mutex);
+ INIT_LIST_HEAD(&trans(priv)->calib_results);
+
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3022,8 +1604,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
/* init bt coex */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -3055,88 +1637,19 @@ err:
static void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_calib_free_results(priv);
iwl_free_geos(priv);
iwl_free_channel_map(priv);
if (priv->tx_cmd_pool)
kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
+ kfree(rcu_dereference_raw(priv->noa_data));
#ifdef CONFIG_IWLWIFI_DEBUGFS
kfree(priv->wowlan_sram);
#endif
}
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event)
-{
- struct iwl_priv *priv = hw->priv;
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- if (rssi_event == RSSI_EVENT_LOW)
- priv->bt_enable_pspoll = true;
- else if (rssi_event == RSSI_EVENT_HIGH)
- priv->bt_enable_pspoll = false;
-
- iwlagn_send_advance_bt_config(priv);
- } else {
- IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
- "ignoring RSSI callback\n");
- }
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set)
-{
- struct iwl_priv *priv = hw->priv;
-
- queue_work(priv->shrd->workqueue, &priv->beacon_update);
-
- return 0;
-}
-
-struct ieee80211_ops iwlagn_hw_ops = {
- .tx = iwlagn_mac_tx,
- .start = iwlagn_mac_start,
- .stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwlagn_mac_suspend,
- .resume = iwlagn_mac_resume,
-#endif
- .add_interface = iwlagn_mac_add_interface,
- .remove_interface = iwlagn_mac_remove_interface,
- .change_interface = iwlagn_mac_change_interface,
- .config = iwlagn_mac_config,
- .configure_filter = iwlagn_configure_filter,
- .set_key = iwlagn_mac_set_key,
- .update_tkip_key = iwlagn_mac_update_tkip_key,
- .set_rekey_data = iwlagn_mac_set_rekey_data,
- .conf_tx = iwlagn_mac_conf_tx,
- .bss_info_changed = iwlagn_bss_info_changed,
- .ampdu_action = iwlagn_mac_ampdu_action,
- .hw_scan = iwlagn_mac_hw_scan,
- .sta_notify = iwlagn_mac_sta_notify,
- .sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwlagn_mac_sta_remove,
- .channel_switch = iwlagn_mac_channel_switch,
- .flush = iwlagn_mac_flush,
- .tx_last_beacon = iwlagn_mac_tx_last_beacon,
- .remain_on_channel = iwlagn_mac_remain_on_channel,
- .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
- .rssi_callback = iwlagn_mac_rssi_callback,
- CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
- CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
- .tx_sync = iwlagn_mac_tx_sync,
- .finish_tx_sync = iwlagn_mac_finish_tx_sync,
- .set_tim = iwlagn_mac_set_tim,
-};
static u32 iwl_hw_detect(struct iwl_priv *priv)
{
@@ -3156,40 +1669,55 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_4K);
- if (iwlagn_mod_params.disable_11n)
- priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
hw_params(priv).num_ampdu_queues =
- priv->cfg->base_params->num_of_ampdu_queues;
+ cfg(priv)->base_params->num_of_ampdu_queues;
hw_params(priv).shadow_reg_enable =
- priv->cfg->base_params->shadow_reg_enable;
- hw_params(priv).sku = priv->cfg->sku;
- hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
+ cfg(priv)->base_params->shadow_reg_enable;
+ hw_params(priv).sku = cfg(priv)->sku;
+ hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
/* Device-specific setup */
- return priv->cfg->lib->set_hw_params(priv);
+ return cfg(priv)->lib->set_hw_params(priv);
}
-/* This function both allocates and initializes hw and priv. */
-static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
- priv = hw->priv;
- priv->hw = hw;
+static void iwl_debug_config(struct iwl_priv *priv)
+{
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
-out:
- return hw;
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+#ifdef CONFIG_IWLWIFI_P2P
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
}
int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
@@ -3204,8 +1732,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
/************************
* 1. Allocating HW data
************************/
- hw = iwl_alloc_all(cfg);
+ hw = iwl_alloc_all();
if (!hw) {
+ pr_err("%s: Cannot allocate network device\n", cfg->name);
err = -ENOMEM;
goto out;
}
@@ -3226,8 +1755,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
SET_IEEE80211_DEV(hw, bus(priv)->dev);
+ /* what debugging capabilities we have */
+ iwl_debug_config(priv);
+
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
+ cfg(priv) = cfg;
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
@@ -3261,7 +1793,7 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
***********************/
hw_rev = iwl_hw_detect(priv);
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, hw_rev);
+ cfg(priv)->name, hw_rev);
err = iwl_trans_request_irq(trans(priv));
if (err)
@@ -3291,11 +1823,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+ iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+ num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -3360,7 +1892,7 @@ out_destroy_workqueue:
priv->shrd->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
out_free_trans:
iwl_trans_free(trans(priv));
out_free_traffic_mem:
@@ -3385,21 +1917,16 @@ void __devexit iwl_remove(struct iwl_priv * priv)
set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
iwl_testmode_cleanup(priv);
- iwl_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- }
+ iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
/*This will stop the queues, move the device to low power state */
iwl_trans_stop_device(trans(priv));
- iwl_dealloc_ucode(priv);
+ iwl_dealloc_ucode(trans(priv));
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/*netif_stop_queue(dev); */
flush_workqueue(priv->shrd->workqueue);
@@ -3469,8 +1996,9 @@ module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
+MODULE_PARM_DESC(11n_disable,
+ "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
@@ -3499,9 +2027,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer (default: 0 [enabled])");
+ "Disable stuck queue watchdog timer 0=system default, "
+ "1=disable, 2=enable (default: 0)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a541..f84fb3c53563 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,12 @@
#include "iwl-dev.h"
+struct iwlagn_ucode_capabilities {
+ u32 max_probe_length;
+ u32 standard_phy_calibration_size;
+ u32 flags;
+};
+
extern struct ieee80211_ops iwlagn_hw_ops;
int iwl_reset_ict(struct iwl_trans *trans);
@@ -77,6 +83,16 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
+void __iwl_down(struct iwl_priv *priv);
+void iwl_down(struct iwl_priv *priv);
+void iwlagn_prepare_restart(struct iwl_priv *priv);
+
+/* MAC80211 */
+struct ieee80211_hw *iwl_alloc_all(void);
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa);
+void iwlagn_mac_unregister(struct iwl_priv *priv);
+
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -86,25 +102,27 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx);
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
-int iwlagn_run_init_ucode(struct iwl_priv *priv);
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
- struct fw_img *image,
- enum iwlagn_ucode_type ucode_type);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv,
+ struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+#endif
/* rx */
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
@@ -115,6 +133,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size);
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
@@ -196,9 +216,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
@@ -316,10 +333,6 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_update_bcast_stations(struct iwl_priv *priv);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta);
/* rate */
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -339,28 +352,11 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
/* eeprom */
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry);
-extern int iwlagn_init_alive_start(struct iwl_priv *priv);
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
+
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index 08b97594e305..940d5038b39c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -122,7 +122,8 @@ struct iwl_bus;
* struct iwl_bus_ops - bus specific operations
* @get_pm_support: must returns true if the bus can go to sleep
* @apm_config: will be called during the config of the APM
- * @get_hw_id: prints the hw_id in the provided buffer
+ * @get_hw_id_string: prints the hw_id in the provided buffer
+ * @get_hw_id: get hw_id in u32
* @write8: write a byte to register at offset ofs
* @write32: write a dword to register at offset ofs
* @wread32: read a dword at register at offset ofs
@@ -130,7 +131,8 @@ struct iwl_bus;
struct iwl_bus_ops {
bool (*get_pm_support)(struct iwl_bus *bus);
void (*apm_config)(struct iwl_bus *bus);
- void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
+ void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
+ u32 (*get_hw_id)(struct iwl_bus *bus);
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
@@ -172,9 +174,15 @@ static inline void bus_apm_config(struct iwl_bus *bus)
bus->ops->apm_config(bus);
}
-static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
+static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
+ int buf_len)
{
- bus->ops->get_hw_id(bus, buf, buf_len);
+ bus->ops->get_hw_id_string(bus, buf, buf_len);
+}
+
+static inline u32 bus_get_hw_id(struct iwl_bus *bus)
+{
+ return bus->ops->get_hw_id(bus);
}
static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index 2a2dc4597ba1..e1d78257e4a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -101,17 +101,11 @@ extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_d_cfg;
extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
extern struct iwl_cfg iwl105_bgn_cfg;
extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
extern struct iwl_cfg iwl135_bgn_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 69d5f85d11e2..265de39d394c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,10 +109,10 @@ enum {
/* RX, TX, LEDs */
REPLY_TX = 0x1c,
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e,
/* WiMAX coexistence */
- COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
+ COEX_PRIORITY_TABLE_CMD = 0x5a,
COEX_MEDIUM_NOTIFICATION = 0x5b,
COEX_EVENT_CMD = 0x5c,
@@ -198,6 +198,7 @@ enum {
REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
REPLY_WOWLAN_GET_STATUS = 0xe5,
+ REPLY_D3_CONFIG = 0xd3,
REPLY_MAX = 0xff
};
@@ -465,23 +466,27 @@ struct iwl_error_event_table {
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
-#if 0
- /* no need to read the remainder, we don't use the values */
- u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the compilation */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
-#endif
} __packed;
struct iwl_alive_resp {
@@ -809,7 +814,7 @@ struct iwl_qosparam_cmd {
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
-#define IWL_MAX_TID_COUNT 9
+#define IWL_MAX_TID_COUNT 8
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -930,8 +935,7 @@ struct iwl_addsta_cmd {
* corresponding to bit (e.g. bit 5 controls TID 5).
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
-
- __le16 rate_n_flags; /* 3945 only */
+ __le16 legacy_reserved;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1161,8 +1165,7 @@ struct iwl_rx_mpdu_res_start {
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (agn).
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
*
* Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
* This command must be executed after every RXON command, before Tx can occur.
@@ -1174,25 +1177,9 @@ struct iwl_rx_mpdu_res_start {
* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
* before this frame. if CTS-to-self required check
* RXON_FLG_SELF_CTS_EN status.
- * unused in 3945/4965, used in 5000 series and after
*/
#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
-/*
- * 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-
-/*
- * 1: Transmit Clear-To-Send to self before this frame.
- * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
-
/* 1: Expect ACK from receiving station
* 0: Don't expect ACK (MAC header's duration field s/b 0)
* Set this for unicast frames, but not broadcast/multicast. */
@@ -1210,18 +1197,8 @@ struct iwl_rx_mpdu_res_start {
* Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
-/*
- * 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
-
-/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
- * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+/* Tx antenna selection field; reserved (0) for agn devices. */
#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
-#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
-#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1567,7 +1544,6 @@ struct iwl_compressed_ba_resp {
__le64 bitmap;
__le16 scd_flow;
__le16 scd_ssn;
- /* following only for 5000 series and up */
u8 txed; /* number of frames sent */
u8 txed_2_done; /* number of frames acked */
} __packed;
@@ -1669,7 +1645,7 @@ struct iwl_link_qual_agg_params {
/*
* REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For agn devices only; 3945 uses REPLY_RATE_SCALE.
+ * For agn devices
*
* Each station in the agn device's internal station table has its own table
* of 16
@@ -1918,7 +1894,7 @@ struct iwl_link_quality_cmd {
/*
* REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
*
- * 3945 and agn devices support hardware handshake with Bluetooth device on
+ * agn devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
@@ -2202,8 +2178,8 @@ struct iwl_spectrum_notification {
struct iwl_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds;
+ u8 debug_flags;
__le32 rx_data_timeout;
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2324,9 +2300,9 @@ struct iwl_scan_channel {
/**
* struct iwl_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
- * each channel may select different ssids from among the 20 (4) entries.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
* SSID IEs get transmitted in reverse order of entry.
*/
struct iwl_ssid_ie {
@@ -2335,7 +2311,6 @@ struct iwl_ssid_ie {
u8 ssid[32];
} __packed;
-#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH_DISABLED 0
@@ -2416,8 +2391,6 @@ struct iwl_scan_cmd {
* channel */
__le32 suspend_time; /* pause scan this long (in "extended beacon
* format") when returning to service chnl:
- * 3945; 31:24 # beacons, 19:0 additional usec,
- * 4965; 31:22 # beacons, 21:0 additional usec.
*/
__le32 flags; /* RXON_FLG_* */
__le32 filter_flags; /* RXON_FILTER_* */
@@ -2733,7 +2706,7 @@ struct statistics_div {
struct statistics_general_common {
__le32 temperature; /* radio temperature */
- __le32 temperature_m; /* for 5000 and up, this is radio voltage */
+ __le32 temperature_m; /* radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3801,6 +3774,19 @@ struct iwl_bt_coex_prot_env_cmd {
} __attribute__((packed));
/*
+ * REPLY_D3_CONFIG
+ */
+enum iwlagn_d3_wakeup_filters {
+ IWLAGN_D3_WAKEUP_RFKILL = BIT(0),
+ IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1),
+};
+
+struct iwlagn_d3_config_cmd {
+ __le32 min_sleep_time;
+ __le32 wakeup_flags;
+} __packed;
+
+/*
* REPLY_WOWLAN_PATTERNS
*/
#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
@@ -3830,19 +3816,16 @@ enum iwlagn_wowlan_wakeup_filters {
IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
- IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5),
- IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6),
- IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7),
- IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8),
- IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9),
- IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10),
+ IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+ IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+ IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7),
+ IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8),
};
struct iwlagn_wowlan_wakeup_filter_cmd {
__le32 enabled;
__le16 non_qos_seq;
- u8 min_sleep_seconds;
- u8 reserved;
+ __le16 reserved;
__le16 qos_seq[8];
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140abb..7bcfa781e0b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -60,8 +60,8 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->ht_supported = true;
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->ht_greenfield_support)
+ if (cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
@@ -76,11 +76,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
- ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
- if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
- ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
ht_info->mcs.rx_mask[0] = 0xFF;
if (rx_chains_num >= 2)
@@ -141,7 +137,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
@@ -151,7 +147,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = rates;
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
@@ -206,12 +202,12 @@ int iwl_init_geos(struct iwl_priv *priv)
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+ cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
char buf[32];
- bus_get_hw_id(bus(priv), buf, sizeof(buf));
+ bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n", buf);
- priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+ cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
@@ -836,19 +832,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
}
#endif
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_notification_wait *wait_entry;
-
- spin_lock_irqsave(&priv->notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &priv->notif_waits, list)
- wait_entry->aborted = true;
- spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
- wake_up_all(&priv->notif_waitq);
-}
-
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
{
unsigned int reload_msec;
@@ -860,7 +843,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
- iwlagn_abort_notification_waits(priv);
+ iwl_abort_notification_waits(priv->shrd);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
@@ -979,9 +962,9 @@ int iwl_apm_init(struct iwl_priv *priv)
bus_apm_config(bus(priv));
/* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
+ if (cfg(priv)->base_params->pll_cfg_val)
iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
+ cfg(priv)->base_params->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
@@ -1120,229 +1103,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv->shrd)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
- /*
- * MULTI-FIXME
- * This may need to be done per interface in nl80211/cfg80211/mac80211.
- */
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-
-static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_connection_init_rx_config(priv, ctx);
-
- iwlagn_set_rxon_chain(priv, ctx);
-
- return iwlagn_commit_rxon(priv, ctx);
-}
-
-static int iwl_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
- vif->type == NL80211_IFTYPE_ADHOC) {
- /*
- * pretend to have high BT traffic as long as we
- * are operating in IBSS mode, as this will cause
- * the rate scaling etc. to behave as intended.
- */
- priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
- }
-
- return 0;
-}
-
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
- enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- viftype, vif->addr);
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
- mutex_lock(&priv->shrd->mutex);
-
- iwlagn_disable_roc(priv);
-
- if (!iwl_is_ready_rf(priv->shrd)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(viftype)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->shrd->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-
-static void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_scan_cancel_timeout(priv, 200);
- iwl_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-
- /*
- * When removing the IBSS interface, overwrite the
- * BT traffic load with the stored one from the last
- * notification, if any. If this is a device that
- * doesn't implement this, this has no effect since
- * both values are the same and zero.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->bt_traffic_load = priv->last_bt_traffic_load;
-}
-
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->shrd->mutex);
-
- if (WARN_ON(ctx->vif != vif)) {
- struct iwl_rxon_context *tmp;
- IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
- for_each_context(priv, tmp)
- IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
- tmp->ctxid, tmp, tmp->vif);
- }
- ctx->vif = NULL;
-
- iwl_teardown_interface(priv, vif, false);
-
- mutex_unlock(&priv->shrd->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1649,97 +1411,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_context *tmp;
- enum nl80211_iftype newviftype = newtype;
- u32 interface_modes;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->shrd->mutex);
-
- if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- /*
- * Refuse a change that should be done by moving from the PAN
- * context to the BSS context instead, if the BSS context is
- * available and can support the new interface type.
- */
- if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
- (bss_ctx->interface_modes & BIT(newtype) ||
- bss_ctx->exclusive_interface_modes & BIT(newtype))) {
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_teardown_interface(priv, vif, true);
- vif->type = newviftype;
- vif->p2p = newp2p;
- err = iwl_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
int iwl_cmd_echo_test(struct iwl_priv *priv)
{
int ret;
struct iwl_host_cmd cmd = {
.id = REPLY_ECHO,
+ .len = { 0 },
.flags = CMD_SYNC,
};
@@ -1783,7 +1461,7 @@ void iwl_bg_watchdog(unsigned long data)
if (iwl_is_rfkill(priv->shrd))
return;
- timeout = priv->cfg->base_params->wd_timeout;
+ timeout = cfg(priv)->base_params->wd_timeout;
if (timeout == 0)
return;
@@ -1808,13 +1486,25 @@ void iwl_bg_watchdog(unsigned long data)
void iwl_setup_watchdog(struct iwl_priv *priv)
{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
- if (timeout && !iwlagn_mod_params.wd_disable)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
+ unsigned int timeout = cfg(priv)->base_params->wd_timeout;
+
+ if (!iwlagn_mod_params.wd_disable) {
+ /* use system default */
+ if (timeout && !cfg(priv)->base_params->wd_disable)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ } else {
+ /* module parameter overwrite default configuration */
+ if (timeout && iwlagn_mod_params.wd_disable == 2)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ }
}
/**
@@ -1890,34 +1580,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
return cpu_to_le32(res);
}
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid)
-{
- struct ieee80211_vif *vif;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
- if (ctx == NUM_IWL_RXON_CTX)
- ctx = priv->stations[sta_id].ctxid;
- vif = priv->contexts[ctx].vif;
-
- ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid)
-{
- struct ieee80211_vif *vif;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
- if (ctx == NUM_IWL_RXON_CTX)
- ctx = priv->stations[sta_id].ctxid;
- vif = priv->contexts[ctx].vif;
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
{
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
@@ -1925,8 +1587,7 @@ void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
void iwl_nic_config(struct iwl_priv *priv)
{
- priv->cfg->lib->nic_config(priv);
-
+ cfg(priv)->lib->nic_config(priv);
}
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da3380704..7bf76ab94dd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
*/
struct iwl_base_params {
int eeprom_size;
@@ -134,14 +135,13 @@ struct iwl_base_params {
const bool shadow_reg_enable;
const bool no_idle_support;
const bool hd_v2;
+ const bool wd_disable;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
* @bt_init_traffic_load: specify initial bt traffic load
* @bt_prio_boost: default bt priority boost value
* @agg_time_limit: maximum number of uSec in aggregation
- * @ampdu_factor: Maximum A-MPDU length factor
- * @ampdu_density: Minimum A-MPDU spacing
* @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
*/
struct iwl_bt_params {
@@ -149,8 +149,6 @@ struct iwl_bt_params {
u8 bt_init_traffic_load;
u8 bt_prio_boost;
u16 agg_time_limit;
- u8 ampdu_factor;
- u8 ampdu_density;
bool bt_sco_disable;
bool bt_session_2;
};
@@ -163,84 +161,10 @@ struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
};
-/**
- * struct iwl_cfg
- * @name: Offical name of the device
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- * without a warning, for use in transitions
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @valid_tx_ant: valid transmit antenna
- * @valid_rx_ant: valid receive antenna
- * @sku: sku information from EEPROM
- * @eeprom_ver: EEPROM version
- * @eeprom_calib_ver: EEPROM calibration version
- * @lib: pointer to the lib ops
- * @additional_nic_config: additional nic configuration
- * @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
- * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
- * @internal_wimax_coex: internal wifi/wimax combo device
- * @iq_invert: I/Q inversion
- * @temp_offset_v2: support v2 of temperature offset calibration
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version.
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision.
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_ok;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_lib_ops *lib;
- void (*additional_nic_config)(struct iwl_priv *priv);
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- struct iwl_ht_params *ht_params;
- struct iwl_bt_params *bt_params;
- enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
- const bool need_dc_calib; /* if used set to true */
- const bool need_temp_offset_calib; /* if used set to true */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
- const bool adv_pm;
- const bool rx_with_siso_diversity;
- const bool internal_wimax_coex;
- const bool iq_invert;
- const bool temp_offset_v2;
-};
-
/***************************
* L i b *
***************************/
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -260,13 +184,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
int iwl_cmd_echo_test(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
@@ -323,9 +240,6 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -379,8 +293,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
{
- return priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist;
+ return cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist;
}
static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b9f3267e720c..fbc3095c7b44 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -284,8 +284,8 @@
#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
-#define CSR_HW_REV_TYPE_200 (0x0000110)
-#define CSR_HW_REV_TYPE_230 (0x0000120)
+#define CSR_HW_REV_TYPE_105 (0x0000110)
+#define CSR_HW_REV_TYPE_135 (0x0000120)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 69a77e24d229..f8fc2393dd4c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@ do { \
} while (0)
#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, args...) \
+#define IWL_DEBUG(m, level, fmt, ...) \
do { \
if (iwl_get_debug_level((m)->shrd) & (level)) \
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
+#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
do { \
- if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ if (iwl_get_debug_level((m)->shrd) & (level) && \
+ net_ratelimit()) \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
#define iwl_print_hex_dump(m, level, p, len) \
@@ -70,10 +71,29 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
+do { \
+ if (!iwl_is_rfkill(p->shrd)) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+ else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "(RFKILL) ", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+} while (0)
+
#else
#define IWL_DEBUG(m, level, fmt, args...)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
#define iwl_print_hex_dump(m, level, p, len)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill(p->shrd)) \
+ IWL_ERR(p, fmt, ##args); \
+} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -114,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
*/
/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
+#define IWL_DL_INFO 0x00000001
+#define IWL_DL_MAC80211 0x00000002
+#define IWL_DL_HCMD 0x00000004
+#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
+#define IWL_DL_EEPROM 0x00000040
+#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN (1 << 11)
+#define IWL_DL_POWER 0x00000100
+#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX (1 << 15)
+#define IWL_DL_ASSOC 0x00001000
+#define IWL_DL_DROP 0x00002000
+#define IWL_DL_COEX 0x00008000
/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
+#define IWL_DL_FW 0x00010000
+#define IWL_DL_RF_KILL 0x00020000
+#define IWL_DL_FW_ERRORS 0x00040000
+#define IWL_DL_LED 0x00080000
/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
+#define IWL_DL_RATE 0x00100000
+#define IWL_DL_CALIB 0x00200000
+#define IWL_DL_WEP 0x00400000
+#define IWL_DL_TX 0x00800000
/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
+#define IWL_DL_RX 0x01000000
+#define IWL_DL_ISR 0x02000000
+#define IWL_DL_HT 0x04000000
/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
+#define IWL_DL_11H 0x10000000
+#define IWL_DL_STATS 0x20000000
+#define IWL_DL_TX_REPLY 0x40000000
+#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -164,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -186,9 +200,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a1670e3f8bfa..04a3343f4610 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ struct iwl_trans *trans = trans(priv);
priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == IWL_UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init.data.len;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+ priv->dbgfs_sram_len = trans->ucode_init.data.len;
else
- priv->dbgfs_sram_len = priv->ucode_rt.data.len;
+ priv->dbgfs_sram_len = trans->ucode_rt.data.len;
}
len = priv->dbgfs_sram_len;
@@ -341,7 +342,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos,
priv->wowlan_sram,
- priv->ucode_wowlan.data.len);
+ trans(priv)->ucode_wowlan.data.len);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -371,15 +372,13 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
i, station->sta.sta.addr,
station->sta.station_flags_msk);
pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
+ "TID\tseq_num\trate_n_flags\n");
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
- tid_data = &priv->shrd->tid_data[i][j];
+ tid_data = &priv->tid_data[i][j];
pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%#x",
+ "%d:\t%#x\t%#x",
j, tid_data->seq_number,
- tid_data->agg.txq_id,
- tid_data->tfds_in_queue,
tid_data->agg.rate_n_flags);
if (tid_data->agg.wait_for_ba)
@@ -407,7 +406,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
const u8 *ptr;
char *buf;
u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ size_t eeprom_len = cfg(priv)->base_params->eeprom_size;
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16) {
@@ -415,7 +414,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return -ENODATA;
}
- ptr = priv->eeprom;
+ ptr = priv->shrd->eeprom;
if (!ptr) {
IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
return -ENOMEM;
@@ -427,10 +426,10 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
"version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", eeprom_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -1541,15 +1540,15 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
tx->tx_power.ant_c);
@@ -2220,7 +2219,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
- priv->cfg->base_params->plcp_delta_threshold);
+ cfg(priv)->base_params->plcp_delta_threshold);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -2242,10 +2241,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
return -EINVAL;
if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
- priv->cfg->base_params->plcp_delta_threshold =
+ cfg(priv)->base_params->plcp_delta_threshold =
IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
else
- priv->cfg->base_params->plcp_delta_threshold = plcp;
+ cfg(priv)->base_params->plcp_delta_threshold = plcp;
return count;
}
@@ -2347,7 +2346,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
timeout = IWL_DEF_WD_TIMEOUT;
- priv->cfg->base_params->wd_timeout = timeout;
+ cfg(priv)->base_params->wd_timeout = timeout;
iwl_setup_watchdog(priv);
return count;
}
@@ -2407,10 +2406,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
char buf[40];
const size_t bufsz = sizeof(buf);
- if (priv->cfg->ht_params)
+ if (cfg(priv)->ht_params)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
- (priv->cfg->ht_params->use_rts_for_aggregation) ?
+ (cfg(priv)->ht_params->use_rts_for_aggregation) ?
"rts/cts" : "cts-to-self");
else
pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2427,7 +2426,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
int buf_size;
int rts;
- if (!priv->cfg->ht_params)
+ if (!cfg(priv)->ht_params)
return -EINVAL;
memset(buf, 0, sizeof(buf));
@@ -2437,9 +2436,9 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
if (sscanf(buf, "%d", &rts) != 1)
return -EINVAL;
if (rts)
- priv->cfg->ht_params->use_rts_for_aggregation = true;
+ cfg(priv)->ht_params->use_rts_for_aggregation = true;
else
- priv->cfg->ht_params->use_rts_for_aggregation = false;
+ cfg(priv)->ht_params->use_rts_for_aggregation = false;
return count;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6c00a447963d..e54a4d11e584 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -60,11 +60,10 @@ struct iwl_tx_queue;
/* Default noise level to report when noise measurement is not available.
* This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
+ * 1) Not associated no beacon statistics being sent to driver)
* 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
* Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
* Also, -127 works better than 0 when averaging frames with/without
* noise info (e.g. averaging might be done in app); measured dBm values are
* always negative ... using a negative value as the default keeps all
@@ -190,6 +189,69 @@ struct iwl_qos_info {
struct iwl_qosparam_cmd def_qos_parm;
};
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (REPLY_TX), and the block ack notification
+ * (REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session - used by the transport layer.
+ * Needed by the upper layer for debugfs only.
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+ u32 rate_n_flags;
+ enum iwl_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+ u16 seq_number;
+ u16 next_reclaimed;
+ struct iwl_ht_agg agg;
+};
+
/*
* Structure should be accessed with sta_lock held. When station addition
* is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
@@ -230,17 +292,6 @@ struct iwl_vif_priv {
u8 ibss_bssid_sta_id;
};
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-struct fw_img {
- struct fw_desc code, data;
-};
-
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
@@ -452,29 +503,6 @@ enum iwlagn_chain_noise_state {
IWL_CHAIN_NOISE_DONE,
};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_XTAL,
- IWL_CALIB_DC,
- IWL_CALIB_LO,
- IWL_CALIB_TX_IQ,
- IWL_CALIB_TX_IQ_PERD,
- IWL_CALIB_BASE_BAND,
- IWL_CALIB_TEMP_OFFSET,
- IWL_CALIB_MAX
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
/* Sensitivity calib data */
struct iwl_sensitivity_data {
u32 auto_corr_ofdm;
@@ -547,16 +575,6 @@ enum iwl_access_mode {
IWL_OTP_ACCESS_RELATIVE,
};
-/**
- * enum iwl_pa_type - Power Amplifier type
- * @IWL_PA_SYSTEM: based on uCode configuration
- * @IWL_PA_INTERNAL: use Internal only
- */
-enum iwl_pa_type {
- IWL_PA_SYSTEM = 0,
- IWL_PA_INTERNAL = 1,
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -714,35 +732,6 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
- struct list_head list;
-
- void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
- void *data);
- void *fn_data;
-
- u8 cmd;
- bool triggered, aborted;
-};
-
struct iwl_rxon_context {
struct ieee80211_vif *vif;
@@ -805,14 +794,7 @@ enum iwl_scan_type {
IWL_SCAN_ROC,
};
-enum iwlagn_ucode_type {
- IWL_UCODE_NONE,
- IWL_UCODE_REGULAR,
- IWL_UCODE_INIT,
- IWL_UCODE_WOWLAN,
-};
-
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace {
u32 buff_size;
u32 total_size;
@@ -822,8 +804,20 @@ struct iwl_testmode_trace {
dma_addr_t dma_addr;
bool trace_enabled;
};
+struct iwl_testmode_sram {
+ u32 buff_size;
+ u32 num_chunks;
+ u8 *buff_addr;
+ bool sram_readed;
+};
#endif
+struct iwl_wipan_noa_data {
+ struct rcu_head rcu_head;
+ u32 length;
+ u8 data[];
+};
+
struct iwl_priv {
/*data shared among all the driver's layers */
@@ -835,7 +829,6 @@ struct iwl_priv {
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
struct kmem_cache *tx_cmd_pool;
- struct iwl_cfg *cfg;
enum ieee80211_band band;
@@ -880,8 +873,7 @@ struct iwl_priv {
s32 temperature; /* Celsius */
s32 last_temperature;
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+ struct iwl_wipan_noa_data __rcu *noa_data;
/* Scan related variables */
unsigned long scan_start;
@@ -907,23 +899,12 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
- struct fw_img ucode_rt;
- struct fw_img ucode_init;
- struct fw_img ucode_wowlan;
-
- enum iwlagn_ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
char firmware_name[25];
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
__le16 switch_channel;
- struct {
- u32 error_event_table;
- u32 log_event_table;
- } device_pointers;
-
u16 active_rate;
u8 start_calib;
@@ -951,17 +932,13 @@ struct iwl_priv {
int num_stations;
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
+ struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
u8 mac80211_registered;
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- int nvm_device_type;
- struct iwl_eeprom_calib_info *calib_info;
-
enum nl80211_iftype iw_mode;
/* Last Rx'd beacon timestamp */
@@ -1017,10 +994,6 @@ struct iwl_priv {
/* counts reply_tx error */
struct reply_tx_error_statistics reply_tx_stats;
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
@@ -1098,8 +1071,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace testmode_trace;
+ struct iwl_testmode_sram testmode_sram;
u32 tm_fixed_rate;
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index a635a7e75447..2a2c8de64a04 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -28,7 +28,7 @@
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
-#include "iwl-dev.h"
+#include "iwl-trans.h"
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 8a51c5ccda1e..9b212a8f30bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,7 +29,6 @@
#include <linux/tracepoint.h>
-struct iwl_priv;
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
@@ -37,14 +36,14 @@ struct iwl_priv;
static inline void trace_ ## name(proto) {}
#endif
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
+#define PRIV_ENTRY __field(void *, priv)
#define PRIV_ASSIGN __entry->priv = priv
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_io
TRACE_EVENT(iwlwifi_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_PROTO(void *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -60,7 +59,7 @@ TRACE_EVENT(iwlwifi_dev_ioread32,
);
TRACE_EVENT(iwlwifi_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+ TP_PROTO(void *priv, u32 offs, u8 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -76,7 +75,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite8,
);
TRACE_EVENT(iwlwifi_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_PROTO(void *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -91,11 +90,40 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
);
+TRACE_EVENT(iwlwifi_dev_irq,
+ TP_PROTO(void *priv),
+ TP_ARGS(priv),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ ),
+ /* TP_printk("") doesn't compile */
+ TP_printk("%d", 0)
+);
+
+TRACE_EVENT(iwlwifi_dev_ict_read,
+ TP_PROTO(void *priv, u32 index, u32 value),
+ TP_ARGS(priv, index, value),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, index)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->index = index;
+ __entry->value = value;
+ ),
+ TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value)
+);
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_ucode
TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
TP_ARGS(priv, time, data, ev),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -115,7 +143,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
);
TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
- TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
TP_ARGS(priv, wraps, n_entry, p_entry),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -139,7 +167,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, u32 flags,
+ TP_PROTO(void *priv, u32 flags,
const void *hcmd0, size_t len0,
const void *hcmd1, size_t len1,
const void *hcmd2, size_t len2),
@@ -164,7 +192,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+ TP_PROTO(void *priv, void *rxbuf, size_t len),
TP_ARGS(priv, rxbuf, len),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -179,7 +207,7 @@ TRACE_EVENT(iwlwifi_dev_rx,
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+ TP_PROTO(void *priv, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
@@ -211,7 +239,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
+ TP_PROTO(void *priv, u32 desc, u32 tsf_low,
u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
@@ -271,7 +299,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
);
TRACE_EVENT(iwlwifi_dev_ucode_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
TP_ARGS(priv, time, data, ev),
TP_STRUCT__entry(
PRIV_ENTRY
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a4e43bd4a547..c1eda9724f42 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
-static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
+static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
- ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
- IWL_DEBUG_EEPROM(priv,
+ IWL_DEBUG_EEPROM(bus,
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
@@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
return ret;
}
-static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
+static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
{
- iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
{
- u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+ IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
switch (gp) {
case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
- IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
+ if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
- IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
+ if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
default:
- IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
+ IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
"EEPROM_GP=0x%08x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", gp);
ret = -ENOENT;
break;
@@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
return ret;
}
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
{
- if (!priv->eeprom)
+ if (!shrd->eeprom)
return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+ return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
}
int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,11 +227,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
u16 eeprom_ver;
u16 calib_ver;
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwlagn_eeprom_calib_version(priv);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+ calib_ver = iwl_eeprom_calib_version(priv->shrd);
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
+ if (eeprom_ver < cfg(priv)->eeprom_ver ||
+ calib_ver < cfg(priv)->eeprom_calib_ver)
goto err;
IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -241,45 +241,46 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
err:
IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
"CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
+ eeprom_ver, cfg(priv)->eeprom_ver,
+ calib_ver, cfg(priv)->eeprom_calib_ver);
return -EINVAL;
}
int iwl_eeprom_check_sku(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
u16 radio_cfg;
- if (!priv->cfg->sku) {
+ if (!cfg(priv)->sku) {
/* not using sku overwrite */
- priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
+ cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !cfg(priv)->ht_params) {
IWL_ERR(priv, "Invalid 11n configuration\n");
return -EINVAL;
}
}
- if (!priv->cfg->sku) {
+ if (!cfg(priv)->sku) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+ IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku);
- if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+ if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) {
/* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
- priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
- priv->cfg->valid_tx_ant,
- priv->cfg->valid_rx_ant);
+ radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+ cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
+ cfg(priv)->valid_tx_ant,
+ cfg(priv)->valid_rx_ant);
return -EINVAL;
}
- IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
- priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant);
}
/*
* for some special cases,
@@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
return 0;
}
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
{
- const u8 *addr = iwl_eeprom_query_addr(priv,
+ const u8 *addr = iwl_eeprom_query_addr(shrd,
EEPROM_MAC_ADDRESS);
memcpy(mac, addr, ETH_ALEN);
}
@@ -302,19 +303,19 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
*
******************************************************************************/
-static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
+static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
{
- iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ iwl_read32(bus, CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_clear_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
-static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@@ -322,7 +323,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
/* OTP only valid for CP/PP and after */
switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(priv, "Unknown hardware type\n");
+ IWL_ERR(bus, "Unknown hardware type\n");
return -ENOENT;
case CSR_HW_REV_TYPE_5300:
case CSR_HW_REV_TYPE_5350:
@@ -331,7 +332,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
- otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@@ -341,73 +342,73 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
return nvm_type;
}
-static int iwl_init_otp_access(struct iwl_priv *priv)
+static int iwl_init_otp_access(struct iwl_bus *bus)
{
int ret;
/* Enable 40MHz radio clock */
- iwl_write32(bus(priv), CSR_GP_CNTRL,
- iwl_read32(bus(priv), CSR_GP_CNTRL) |
+ iwl_write32(bus, CSR_GP_CNTRL,
+ iwl_read32(bus, CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
- ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
- IWL_ERR(priv, "Time out access OTP\n");
+ IWL_ERR(bus, "Time out access OTP\n");
else {
- iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
- if (priv->cfg->base_params->shadow_ram_support)
- iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
+ if (cfg(bus)->base_params->shadow_ram_support)
+ iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
}
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
u32 otpgp;
- iwl_write32(bus(priv), CSR_EEPROM_REG,
+ iwl_write32(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
- IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
+ IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
return ret;
}
- r = iwl_read32(bus(priv), CSR_EEPROM_REG);
+ r = iwl_read32(bus, CSR_EEPROM_REG);
/* check for ECC errors: */
- otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
+ IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
}
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+ IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
}
*eeprom_data = cpu_to_le16(r >> 16);
return 0;
@@ -416,20 +417,20 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
/*
* iwl_is_otp_empty: check for empty OTP
*/
-static bool iwl_is_otp_empty(struct iwl_priv *priv)
+static bool iwl_is_otp_empty(struct iwl_bus *bus)
{
u16 next_link_addr = 0;
__le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
+ if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
if (!link_value) {
- IWL_ERR(priv, "OTP is empty\n");
+ IWL_ERR(bus, "OTP is empty\n");
is_empty = true;
}
} else {
- IWL_ERR(priv, "Unable to read first block of OTP list.\n");
+ IWL_ERR(bus, "Unable to read first block of OTP list.\n");
is_empty = true;
}
@@ -446,7 +447,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
* we should read and used to configure the device.
* only perform this operation if shadow RAM is disabled
*/
-static int iwl_find_otp_image(struct iwl_priv *priv,
+static int iwl_find_otp_image(struct iwl_bus *bus,
u16 *validblockaddr)
{
u16 next_link_addr = 0, valid_addr;
@@ -454,10 +455,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
+ iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
/* checking for empty OTP or error */
- if (iwl_is_otp_empty(priv))
+ if (iwl_is_otp_empty(bus))
return -EINVAL;
/*
@@ -471,9 +472,9 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
valid_addr = next_link_addr;
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
+ IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
- if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+ if (iwl_read_otp_word(bus, next_link_addr, &link_value))
return -EINVAL;
if (!link_value) {
/*
@@ -488,10 +489,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks <= priv->cfg->base_params->max_ll_items);
+ } while (usedblocks <= cfg(bus)->base_params->max_ll_items);
/* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
+ IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
return -EINVAL;
}
@@ -504,28 +505,28 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
s8 max_txpower_avg = 0; /* (dBm) */
/* Take the highest tx power from any valid chains */
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
+ if ((cfg->valid_tx_ant & ANT_A) &&
(enhanced_txpower[element].chain_a_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
+ if ((cfg->valid_tx_ant & ANT_B) &&
(enhanced_txpower[element].chain_b_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
+ if ((cfg->valid_tx_ant & ANT_C) &&
(enhanced_txpower[element].chain_c_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((priv->cfg->valid_tx_ant == ANT_AB) |
- (priv->cfg->valid_tx_ant == ANT_BC) |
- (priv->cfg->valid_tx_ant == ANT_AC)) &&
+ if (((cfg->valid_tx_ant == ANT_AB) |
+ (cfg->valid_tx_ant == ANT_BC) |
+ (cfg->valid_tx_ant == ANT_AC)) &&
(enhanced_txpower[element].mimo2_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+ if ((cfg->valid_tx_ant == ANT_ABC) &&
(enhanced_txpower[element].mimo3_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo3_max;
@@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
__le16 *txp_len;
@@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+ txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
@@ -627,7 +629,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
((txp->delta_20_in_40 & 0xf0) >> 4),
(txp->delta_20_in_40 & 0x0f));
- max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+ max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx,
&max_txp_avg_halfdbm);
/*
@@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
/**
* iwl_eeprom_init - read EEPROM contents
*
- * Load the EEPROM contents from adapter into priv->eeprom
+ * Load the EEPROM contents from adapter into shrd->eeprom
*
* NOTE: This routine uses the non-debug IO access functions.
*/
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
+ struct iwl_shared *shrd = priv->shrd;
__le16 *e;
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
int sz;
@@ -660,22 +663,22 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
u16 validblockaddr = 0;
u16 cache_addr = 0;
- priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
- if (priv->nvm_device_type == -ENOENT)
+ trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
+ if (trans(priv)->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
+ sz = cfg(priv)->base_params->eeprom_size;
IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
+ shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!shrd->eeprom) {
ret = -ENOMEM;
goto alloc_err;
}
- e = (__le16 *)priv->eeprom;
+ e = (__le16 *)shrd->eeprom;
iwl_apm_init(priv);
- ret = iwl_eeprom_verify_signature(priv);
+ ret = iwl_eeprom_verify_signature(trans(priv));
if (ret < 0) {
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
@@ -683,16 +686,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(priv);
+ ret = iwl_eeprom_acquire_semaphore(bus(priv));
if (ret < 0) {
IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
goto err;
}
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+ if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- ret = iwl_init_otp_access(priv);
+ ret = iwl_init_otp_access(bus(priv));
if (ret) {
IWL_ERR(priv, "Failed to initialize OTP access.\n");
ret = -ENOENT;
@@ -706,8 +709,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
- if (!priv->cfg->base_params->shadow_ram_support) {
- if (iwl_find_otp_image(priv, &validblockaddr)) {
+ if (!cfg(priv)->base_params->shadow_ram_support) {
+ if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
ret = -ENOENT;
goto done;
}
@@ -716,7 +719,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
addr += sizeof(u16)) {
__le16 eeprom_data;
- ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+ ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
if (ret)
goto done;
e[cache_addr / 2] = eeprom_data;
@@ -744,27 +747,27 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
- iwl_eeprom_query16(priv, EEPROM_VERSION));
+ iwl_eeprom_query16(shrd, EEPROM_VERSION));
ret = 0;
done:
- iwl_eeprom_release_semaphore(priv);
+ iwl_eeprom_release_semaphore(bus(priv));
err:
if (ret)
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/* Reset chip to save power until we load uCode during "up". */
iwl_apm_stop(priv);
alloc_err:
return ret;
}
-void iwl_eeprom_free(struct iwl_priv *priv)
+void iwl_eeprom_free(struct iwl_shared *shrd)
{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
+ kfree(shrd->eeprom);
+ shrd->eeprom = NULL;
}
static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
const struct iwl_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_index)
{
- u32 offset = priv->cfg->lib->
+ struct iwl_shared *shrd = priv->shrd;
+ u32 offset = cfg(priv)->lib->
eeprom_ops.regulatory_bands[eep_band - 1];
switch (eep_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_1;
break;
case 2: /* 4.9GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_2;
break;
case 3: /* 5.2GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_3;
break;
case 4: /* 5.5GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_4;
break;
case 5: /* 5.7GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_5;
break;
case 6: /* 2.4GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_6;
break;
case 7: /* 5 GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_7;
break;
default:
@@ -979,9 +983,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
}
/* Check if we do have HT40 channels */
- if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
+ if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] ==
EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
+ cfg(priv)->lib->eeprom_ops.regulatory_bands[6] ==
EEPROM_REGULATORY_BAND_NO_HT40)
return 0;
@@ -1017,8 +1021,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
* driver need to process addition information
* to determine the max channel tx power limits
*/
- if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
- priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
+ if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower)
+ cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv);
return 0;
}
@@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
{
u16 radio_cfg;
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c94747e7299e..9fa937ec35e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,6 +66,7 @@
#include <net/mac80211.h>
struct iwl_priv;
+struct iwl_shared;
/*
* EEPROM access time values:
@@ -305,11 +306,11 @@ struct iwl_eeprom_ops {
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
+void iwl_eeprom_free(struct iwl_shared *shrd);
int iwl_eeprom_check_version(struct iwl_priv *priv);
int iwl_eeprom_check_sku(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
void iwl_free_channel_map(struct iwl_priv *priv);
const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e62b856..d57ea6484bbe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
spin_lock_irqsave(&bus->reg_lock, flags);
iwl_grab_nic_access(bus);
- value = iwl_read32(bus(bus), reg);
+ value = iwl_read32(bus, reg);
iwl_release_nic_access(bus);
spin_unlock_irqrestore(&bus->reg_lock, flags);
@@ -283,16 +283,29 @@ u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
return value;
}
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+ void *buf, int words)
{
unsigned long flags;
+ int offs, result = 0;
+ u32 *vals = buf;
spin_lock_irqsave(&bus->reg_lock, flags);
if (!iwl_grab_nic_access(bus)) {
iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
wmb();
- iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
+
+ for (offs = 0; offs < words; offs++)
+ iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(bus);
- }
+ } else
+ result = -EBUSY;
spin_unlock_irqrestore(&bus->reg_lock, flags);
+
+ return result;
+}
+
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+{
+ return _iwl_write_targ_mem_words(bus, addr, &val, 1);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index ced2cbeb6eae..aae2eeb331a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -85,6 +85,9 @@ void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
(bufsize) / sizeof(u32));\
} while (0)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+ void *buf, int words);
+
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index eb541735296c..14dcbfcdc0fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -137,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv,
}
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
led_cmd.on = iwl_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
@@ -178,7 +178,7 @@ void iwl_leds_init(struct iwl_priv *priv)
int ret;
if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
+ mode = cfg(priv)->led_mode;
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1c93dfef6933..2550b3c7dcbf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -36,20 +36,6 @@ struct iwl_priv;
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
void iwlagn_led_enable(struct iwl_priv *priv);
void iwl_leds_init(struct iwl_priv *priv);
void iwl_leds_exit(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
new file mode 100644
index 000000000000..f980e574e1f9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -0,0 +1,1601 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
+#include "iwl-bus.h"
+#include "iwl-trans.h"
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_sta_ap_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_p2p_sta_go_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_p2p_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+ },
+};
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa)
+{
+ int ret;
+ struct ieee80211_hw *hw = priv->hw;
+ struct iwl_rxon_context *ctx;
+
+ hw->rate_control_algorithm = "iwl-agn-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ /*
+ * Including the following line will crash some AP's. This
+ * workaround removes the stimulus which causes the crash until
+ * the AP software can be fixed.
+ hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ */
+
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+ for_each_context(priv, ctx) {
+ hw->wiphy->interface_modes |= ctx->interface_modes;
+ hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+ }
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+ if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+ hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+ } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ hw->wiphy->iface_combinations =
+ iwlagn_iface_combinations_dualmode;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+ }
+
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ if (trans(priv)->ucode_wowlan.code.len &&
+ device_can_wakeup(bus(priv)->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
+ if (!iwlagn_mod_params.sw_crypto)
+ hw->wiphy->wowlan.flags |=
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+ hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+ hw->wiphy->wowlan.pattern_min_len =
+ IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+ hw->wiphy->wowlan.pattern_max_len =
+ IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+ }
+
+ if (iwlagn_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->bands[IEEE80211_BAND_2GHZ];
+ if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &priv->bands[IEEE80211_BAND_5GHZ];
+
+ hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
+
+ iwl_leds_init(priv);
+
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ priv->mac80211_registered = 1;
+
+ return 0;
+}
+
+void iwlagn_mac_unregister(struct iwl_priv *priv)
+{
+ if (!priv->mac80211_registered)
+ return;
+ iwl_leds_exit(priv);
+ ieee80211_unregister_hw(priv->hw);
+ priv->mac80211_registered = 0;
+}
+
+static int __iwl_up(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int ret;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ for_each_context(priv, ctx) {
+ ret = iwlagn_alloc_bcast_station(priv, ctx);
+ if (ret) {
+ iwl_dealloc_bcast_stations(priv);
+ return ret;
+ }
+ }
+
+ ret = iwl_run_init_ucode(trans(priv));
+ if (ret) {
+ IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto error;
+ return 0;
+
+ error:
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ __iwl_down(priv);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+
+ IWL_ERR(priv, "Unable to initialize device.\n");
+ return ret;
+}
+
+static int iwlagn_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&priv->shrd->mutex);
+ ret = __iwl_up(priv);
+ mutex_unlock(&priv->shrd->mutex);
+ if (ret)
+ return ret;
+
+ IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+ /* Now we should be done, and the READY bit should be set. */
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+ ret = -EIO;
+
+ iwlagn_led_enable(priv);
+
+ priv->is_open = 1;
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!priv->is_open)
+ return;
+
+ priv->is_open = 0;
+
+ iwl_down(priv);
+
+ flush_workqueue(priv->shrd->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
+ iwl_enable_rfkill_int(priv);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (iwlagn_mod_params.sw_crypto)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+ goto out;
+
+ memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+ priv->replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ priv->have_rekey_data = true;
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int ret;
+
+ if (WARN_ON(!wowlan))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ /* Don't attempt WoWLAN when not associated, tear down instead. */
+ if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+ !iwl_is_associated_ctx(ctx)) {
+ ret = 1;
+ goto out;
+ }
+
+ ret = iwlagn_suspend(priv, hw, wowlan);
+ if (ret)
+ goto error;
+
+ device_set_wakeup_enable(bus(priv)->dev, true);
+
+ /* Now let the ucode operate on its own */
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ goto out;
+
+ error:
+ priv->shrd->wowlan = false;
+ iwlagn_prepare_restart(priv);
+ ieee80211_restart_hw(priv->hw);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif;
+ unsigned long flags;
+ u32 base, status = 0xffffffff;
+ int ret = -EIO;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ base = priv->shrd->device_pointers.error_event_table;
+ if (iwlagn_hw_valid_rtc_data_addr(base)) {
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(bus(priv));
+ if (ret == 0) {
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(bus(priv));
+ }
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (ret == 0) {
+ struct iwl_trans *trans = trans(priv);
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(trans->ucode_wowlan.data.len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ _iwl_read_targ_mem_words(
+ bus(priv), 0x800000, priv->wowlan_sram,
+ trans->ucode_wowlan.data.len / 4);
+ }
+#endif
+ }
+
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ priv->shrd->wowlan = false;
+
+ device_set_wakeup_enable(bus(priv)->dev, false);
+
+ iwlagn_prepare_restart(priv);
+
+ memset((void *)&ctx->active, 0, sizeof(ctx->active));
+ iwl_connection_init_rx_config(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ ieee80211_resume_disconnect(vif);
+
+ return 1;
+}
+
+#endif
+
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (iwlagn_tx_skb(priv, skb))
+ dev_kfree_skb_any(skb);
+}
+
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
+}
+
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ bool is_default_wep_key = false;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (iwlagn_mod_params.sw_crypto) {
+ IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We could program these keys into the hardware as well, but we
+ * don't expect much multicast traffic in IBSS and having keys
+ * for more stations is probably more useful.
+ *
+ * Mark key TX-only and return 0.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ return 0;
+ }
+
+ /* If they key was TX-only, accept deletion */
+ if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+ return 0;
+
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, 100);
+
+ BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
+ }
+
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key) {
+ ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
+ break;
+ }
+ ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+ if (ret) {
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ ret = 0;
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ }
+
+ IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = iwl_remove_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
+
+ IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret = -EINVAL;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+
+ IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+ sta->addr, tid);
+
+ if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE))
+ return -EACCES;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ break;
+ IWL_DEBUG_HT(priv, "start Rx\n");
+ ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ IWL_DEBUG_HT(priv, "stop Rx\n");
+ ret = iwl_sta_rx_agg_stop(priv, sta, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ break;
+ IWL_DEBUG_HT(priv, "start Tx\n");
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT(priv, "stop Tx\n");
+ ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->agg_tids_count > 0)) {
+ priv->agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
+ }
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ ret = 0;
+ if (!priv->agg_tids_count && cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch off RTS/CTS if it was previously enabled
+ */
+ sta_priv->lq_sta.lq.general_params.flags &=
+ ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+ iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+ }
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
+ break;
+ }
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return ret;
+}
+
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret = 0;
+ u8 sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->sta_id = IWL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+ if (vif->type == NL80211_IFTYPE_AP)
+ sta_priv->client = true;
+
+ ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
+ is_ap, sta, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ goto out;
+ }
+
+ sta_priv->sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, sta_id);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ /*
+ * MULTI-FIXME
+ * When we add support for multiple interfaces, we need to
+ * revisit this. The channel switch command in the device
+ * only affects the BSS context, but what does that really
+ * mean? And what if we get a CSA on the second interface?
+ * This needs a lot of work.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u16 ch;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_rfkill(priv->shrd))
+ goto out;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ goto out;
+
+ if (!iwl_is_associated_ctx(ctx))
+ goto out;
+
+ if (!cfg(priv)->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = iwl_get_channel_info(priv, channel->band, ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&priv->shrd->lock);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_set_rxon_channel(priv, channel, ctx);
+ iwl_set_rxon_ht(priv, ht_conf);
+ iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&priv->shrd->lock);
+
+ iwl_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->shrd->mutex);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
+ goto done;
+ }
+ if (iwl_is_rfkill(priv->shrd)) {
+ IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
+ goto done;
+ }
+
+ /*
+ * mac80211 will not push any more frames for transmit
+ * until the flush is completed
+ */
+ if (drop) {
+ IWL_DEBUG_MAC80211(priv, "send flush command\n");
+ if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ IWL_ERR(priv, "flush request fail\n");
+ goto done;
+ }
+ }
+ IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+ iwl_trans_wait_tx_queue_empty(trans(priv));
+done:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ int duration)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ int err = 0;
+
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ return -EOPNOTSUPP;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->hw_roc_channel = channel;
+ priv->hw_roc_chantype = channel_type;
+ /* convert from ms to TU */
+ priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
+ priv->hw_roc_start_notified = false;
+ cancel_delayed_work(&priv->hw_roc_disable_work);
+
+ if (!ctx->is_active) {
+ static const struct iwl_qos_info default_qos_data = {
+ .def_qos_parm = {
+ .ac[0] = {
+ .cw_min = cpu_to_le16(3),
+ .cw_max = cpu_to_le16(7),
+ .aifsn = 2,
+ .edca_txop = cpu_to_le16(1504),
+ },
+ .ac[1] = {
+ .cw_min = cpu_to_le16(7),
+ .cw_max = cpu_to_le16(15),
+ .aifsn = 2,
+ .edca_txop = cpu_to_le16(3008),
+ },
+ .ac[2] = {
+ .cw_min = cpu_to_le16(15),
+ .cw_max = cpu_to_le16(1023),
+ .aifsn = 3,
+ },
+ .ac[3] = {
+ .cw_min = cpu_to_le16(15),
+ .cw_max = cpu_to_le16(1023),
+ .aifsn = 7,
+ },
+ },
+ };
+
+ ctx->is_active = true;
+ ctx->qos_data = default_qos_data;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ memcpy(ctx->staging.node_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ memcpy(ctx->staging.bssid_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err)
+ goto out;
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err) {
+ iwlagn_disable_roc(priv);
+ goto out;
+ }
+ priv->hw_roc_setup = true;
+ }
+
+ err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+ if (err)
+ iwlagn_disable_roc(priv);
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return err;
+}
+
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return 0;
+}
+
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
+ &priv->shrd->status)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+ if (ret)
+ goto out;
+
+ if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+ ret = -EIO;
+ goto out_remove_sta;
+ }
+
+ memcpy(ctx->bssid, bssid, ETH_ALEN);
+ ctx->preauth_bssid = true;
+
+ ret = iwlagn_commit_rxon(priv, ctx);
+
+ if (ret == 0)
+ goto out;
+
+ out_remove_sta:
+ iwl_remove_station(priv, sta_id, bssid);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx))
+ goto out;
+
+ iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+ ctx->preauth_bssid = false;
+ /* no need to commit */
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ enum ieee80211_rssi_event rssi_event)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
+ if (rssi_event == RSSI_EVENT_LOW)
+ priv->bt_enable_pspoll = true;
+ else if (rssi_event == RSSI_EVENT_HIGH)
+ priv->bt_enable_pspoll = false;
+
+ iwlagn_send_advance_bt_config(priv);
+ } else {
+ IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+ "ignoring RSSI callback\n");
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+ return 0;
+}
+
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ unsigned long flags;
+ int q;
+
+ if (WARN_ON(!ctx))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_is_ready_rf(priv->shrd)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+
+ ctx->qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ ctx->qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ iwl_connection_init_rx_config(priv, ctx);
+
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ return iwlagn_commit_rxon(priv, ctx);
+}
+
+static int iwl_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ priv->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = iwl_set_mode(priv, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist &&
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /*
+ * pretend to have high BT traffic as long as we
+ * are operating in IBSS mode, as this will cause
+ * the rate scaling etc. to behave as intended.
+ */
+ priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+ }
+
+ return 0;
+}
+
+static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *tmp, *ctx = NULL;
+ int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ viftype, vif->addr);
+
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ iwlagn_disable_roc(priv);
+
+ if (!iwl_is_ready_rf(priv->shrd)) {
+ IWL_WARN(priv, "Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_context(priv, tmp) {
+ u32 possible_modes =
+ tmp->interface_modes | tmp->exclusive_interface_modes;
+
+ if (tmp->vif) {
+ /* check if this busy context is exclusive */
+ if (tmp->exclusive_interface_modes &
+ BIT(tmp->vif->type)) {
+ err = -EINVAL;
+ goto out;
+ }
+ continue;
+ }
+
+ if (!(possible_modes & BIT(viftype)))
+ continue;
+
+ /* have maybe usable context w/o interface */
+ ctx = tmp;
+ break;
+ }
+
+ if (!ctx) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = ctx;
+ ctx->vif = vif;
+
+ err = iwl_setup_interface(priv, ctx);
+ if (!err)
+ goto out;
+
+ ctx->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return err;
+}
+
+static void iwl_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (priv->scan_vif == vif) {
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_force_scan_end(priv);
+ }
+
+ if (!mode_change) {
+ iwl_set_mode(priv, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+
+ /*
+ * When removing the IBSS interface, overwrite the
+ * BT traffic load with the stored one from the last
+ * notification, if any. If this is a device that
+ * doesn't implement this, this has no effect since
+ * both values are the same and zero.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ priv->bt_traffic_load = priv->last_bt_traffic_load;
+}
+
+static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (WARN_ON(ctx->vif != vif)) {
+ struct iwl_rxon_context *tmp;
+ IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+ for_each_context(priv, tmp)
+ IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+ tmp->ctxid, tmp, tmp->vif);
+ }
+ ctx->vif = NULL;
+
+ iwl_teardown_interface(priv, vif, false);
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+
+static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_rxon_context *tmp;
+ enum nl80211_iftype newviftype = newtype;
+ u32 interface_modes;
+ int err;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Refuse a change that should be done by moving from the PAN
+ * context to the BSS context instead, if the BSS context is
+ * available and can support the new interface type.
+ */
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
+ (bss_ctx->interface_modes & BIT(newtype) ||
+ bss_ctx->exclusive_interface_modes & BIT(newtype))) {
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (ctx->exclusive_interface_modes & BIT(newtype)) {
+ for_each_context(priv, tmp) {
+ if (ctx == tmp)
+ continue;
+
+ if (!tmp->vif)
+ continue;
+
+ /*
+ * The current mode switch would be exclusive, but
+ * another context is active ... refuse the switch.
+ */
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* success */
+ iwl_teardown_interface(priv, vif, true);
+ vif->type = newviftype;
+ vif->p2p = newp2p;
+ err = iwl_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return err;
+}
+
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->shrd->mutex);
+
+ /*
+ * If an internal scan is in progress, just set
+ * up the scan_request as per above.
+ */
+ if (priv->scan_type != IWL_SCAN_NORMAL) {
+ IWL_DEBUG_SCAN(priv,
+ "SCAN request during internal scan - defer\n");
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ ret = 0;
+ } else {
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ /*
+ * mac80211 will only ask for one band at a time
+ * so using channels[0] here is ok
+ */
+ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
+ req->channels[0]->band);
+ if (ret) {
+ priv->scan_request = NULL;
+ priv->scan_vif = NULL;
+ }
+ }
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ return ret;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+ "station %pM\n", sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.sta.modify_mask = 0;
+ priv->stations[sta_id].sta.sleep_tx_count = 0;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+}
+
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ WARN_ON(!sta_priv->client);
+ sta_priv->asleep = true;
+ if (atomic_read(&sta_priv->pending_frames) > 0)
+ ieee80211_sta_block_awake(hw, sta, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
+ sta_priv->asleep = false;
+ sta_id = iwl_sta_id(sta);
+ if (sta_id != IWL_INVALID_STATION)
+ iwl_sta_modify_ps_wake(priv, sta_id);
+ break;
+ default:
+ break;
+ }
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+struct ieee80211_ops iwlagn_hw_ops = {
+ .tx = iwlagn_mac_tx,
+ .start = iwlagn_mac_start,
+ .stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwlagn_mac_suspend,
+ .resume = iwlagn_mac_resume,
+#endif
+ .add_interface = iwlagn_mac_add_interface,
+ .remove_interface = iwlagn_mac_remove_interface,
+ .change_interface = iwlagn_mac_change_interface,
+ .config = iwlagn_mac_config,
+ .configure_filter = iwlagn_configure_filter,
+ .set_key = iwlagn_mac_set_key,
+ .update_tkip_key = iwlagn_mac_update_tkip_key,
+ .set_rekey_data = iwlagn_mac_set_rekey_data,
+ .conf_tx = iwlagn_mac_conf_tx,
+ .bss_info_changed = iwlagn_bss_info_changed,
+ .ampdu_action = iwlagn_mac_ampdu_action,
+ .hw_scan = iwlagn_mac_hw_scan,
+ .sta_notify = iwlagn_mac_sta_notify,
+ .sta_add = iwlagn_mac_sta_add,
+ .sta_remove = iwlagn_mac_sta_remove,
+ .channel_switch = iwlagn_mac_channel_switch,
+ .flush = iwlagn_mac_flush,
+ .tx_last_beacon = iwlagn_mac_tx_last_beacon,
+ .remain_on_channel = iwlagn_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+ .rssi_callback = iwlagn_mac_rssi_callback,
+ CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+ .tx_sync = iwlagn_mac_tx_sync,
+ .finish_tx_sync = iwlagn_mac_finish_tx_sync,
+ .set_tim = iwlagn_mac_set_tim,
+};
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_alloc_all(void)
+{
+ struct iwl_priv *priv;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's private structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
+ if (!hw)
+ goto out;
+
+ priv = hw->priv;
+ priv->hw = hw;
+
+out:
+ return hw;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 1800029911ad..fb30ea7ca96b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -135,7 +135,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
}
}
-static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
+static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
int buf_len)
{
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
@@ -144,6 +144,13 @@ static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
pci_dev->subsystem_device);
}
+static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
+{
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+ return (pci_dev->device << 16) + pci_dev->subsystem_device;
+}
+
static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
@@ -163,6 +170,7 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
static const struct iwl_bus_ops bus_ops_pci = {
.get_pm_support = iwl_pci_is_pm_supported,
.apm_config = iwl_pci_apm_config,
+ .get_hw_id_string = iwl_pci_get_hw_id_string,
.get_hw_id = iwl_pci_get_hw_id,
.write8 = iwl_pci_write8,
.write32 = iwl_pci_write32,
@@ -256,6 +264,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -325,46 +335,28 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
/* 2x30 Series */
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
/* 105 Series */
{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
/* 135 Series */
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
{0}
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 4eaab204322d..2b188a6025b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -167,7 +167,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
u8 skip;
u32 slp_itrvl;
- if (priv->cfg->adv_pm) {
+ if (cfg(priv)->adv_pm) {
table = apm_range_2;
if (period <= IWL_DTIM_RANGE_1_MAX)
table = apm_range_1;
@@ -221,7 +221,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!cfg(priv)->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -307,7 +307,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!cfg(priv)->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -350,7 +350,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->shrd->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
- else if (!priv->cfg->base_params->no_idle_support &&
+ else if (!cfg(priv)->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index e5d727f537d0..a6454726737e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -416,6 +416,8 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
if (!iwl_is_associated_ctx(ctx))
continue;
+ if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
+ continue;
value = ctx->beacon_int;
if (!value)
value = IWL_PASSIVE_DWELL_BASE;
@@ -567,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u32 rate_flags = 0;
- u16 cmd_len;
+ u16 cmd_len = 0;
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
@@ -678,7 +680,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->contexts[IWL_RXON_CTX_BSS].active.flags &
RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
+ if ((priv->scan_request && priv->scan_request->no_cck) ||
+ chan_mod == CHANNEL_MODE_PURE_40) {
rate = IWL_RATE_6M_PLCP;
} else {
rate = IWL_RATE_1M_PLCP;
@@ -688,8 +691,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
* Internal scans are passive, so we can indiscriminately set
* the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist)
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
break;
case IEEE80211_BAND_5GHZ:
@@ -730,12 +733,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
+ if (cfg(priv)->scan_rx_antennas[band])
+ rx_ant = cfg(priv)->scan_rx_antennas[band];
if (band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* transmit 2.4 GHz probes only on first antenna */
scan_tx_antennas = first_antenna(scan_tx_antennas);
}
@@ -759,8 +762,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_ant = first_antenna(active_chains);
}
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
rx_ant = first_antenna(rx_ant);
@@ -938,51 +941,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
return 0;
}
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->shrd->mutex);
-
- /*
- * If an internal scan is in progress, just set
- * up the scan_request as per above.
- */
- if (priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv,
- "SCAN request during internal scan - defer\n");
- priv->scan_request = req;
- priv->scan_vif = vif;
- ret = 0;
- } else {
- priv->scan_request = req;
- priv->scan_vif = vif;
- /*
- * mac80211 will only ask for one band at a time
- * so using channels[0] here is ok
- */
- ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
- req->channels[0]->band);
- if (ret) {
- priv->scan_request = NULL;
- priv->scan_vif = NULL;
- }
- }
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- mutex_unlock(&priv->shrd->mutex);
-
- return ret;
-}
/*
* internal short scan, this function should only been called while associated.
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c45..dc55cc4a8108 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -94,9 +94,9 @@
* This implementation is iwl-pci.c
*/
-struct iwl_cfg;
struct iwl_bus;
struct iwl_priv;
+struct iwl_trans;
struct iwl_sensitivity_ranges;
struct iwl_trans_ops;
@@ -107,6 +107,10 @@ struct iwl_trans_ops;
extern struct iwl_mod_params iwlagn_mod_params;
+#define IWL_DISABLE_HT_ALL BIT(0)
+#define IWL_DISABLE_HT_TXAGG BIT(1)
+#define IWL_DISABLE_HT_RXAGG BIT(2)
+
/**
* struct iwl_mod_params
*
@@ -114,13 +118,14 @@ extern struct iwl_mod_params iwlagn_mod_params;
*
* @sw_crypto: using hardware encryption, default = 0
* @num_of_queues: number of tx queue, HW dependent
- * @disable_11n: 11n capabilities enabled, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+ * use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 1
* @antenna: both antennas (use diversity), default = 0
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true
@@ -135,13 +140,13 @@ extern struct iwl_mod_params iwlagn_mod_params;
struct iwl_mod_params {
int sw_crypto;
int num_of_queues;
- int disable_11n;
+ unsigned int disable_11n;
int amsdu_size_8K;
int antenna;
int restart_fw;
bool plcp_check;
bool ack_check;
- bool wd_disable;
+ int wd_disable;
bool bt_coex_active;
int led_mode;
bool no_sleep_autoadjust;
@@ -174,8 +179,6 @@ struct iwl_mod_params {
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
* @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
- * @calib_rt_cfg: setup runtime calibrations for the hw
* @struct iwl_sensitivity_ranges: range of sensitivity values
*/
struct iwl_hw_params {
@@ -195,67 +198,148 @@ struct iwl_hw_params {
u32 ct_kill_exit_threshold;
unsigned int wd_timeout;
- u32 calib_init_cfg;
- u32 calib_rt_cfg;
const struct iwl_sensitivity_ranges *sens;
};
/**
- * enum iwl_agg_state
+ * enum iwl_ucode_type
*
- * The state machine of the BA agreement establishment / tear down.
- * These states relate to a specific RA / TID.
+ * The type of ucode currently loaded on the hardware.
*
- * @IWL_AGG_OFF: aggregation is not used
- * @IWL_AGG_ON: aggregation session is up
- * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
- * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
*/
-enum iwl_agg_state {
- IWL_AGG_OFF = 0,
- IWL_AGG_ON,
- IWL_EMPTYING_HW_QUEUE_ADDBA,
- IWL_EMPTYING_HW_QUEUE_DELBA,
+enum iwl_ucode_type {
+ IWL_UCODE_NONE,
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
};
/**
- * struct iwl_ht_agg - aggregation state machine
-
- * This structs holds the states for the BA agreement establishment and tear
- * down. It also holds the state during the BA session itself. This struct is
- * duplicated for each RA / TID.
-
- * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
- * Tx response (REPLY_TX), and the block ack notification
- * (REPLY_COMPRESSED_BA).
- * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session - used by the transport layer.
- * Needed by the upper layer for debugfs only.
- * @wait_for_ba: Expect block-ack before next Tx reply
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
*/
-struct iwl_ht_agg {
- u32 rate_n_flags;
- enum iwl_agg_state state;
- u16 txq_id;
- bool wait_for_ba;
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
+ void *data);
+ void *fn_data;
+
+ u8 cmd;
+ bool triggered, aborted;
};
/**
- * struct iwl_tid_data - one for each RA / TID
+ * enum iwl_pa_type - Power Amplifier type
+ * @IWL_PA_SYSTEM: based on uCode configuration
+ * @IWL_PA_INTERNAL: use Internal only
+ */
+enum iwl_pa_type {
+ IWL_PA_SYSTEM = 0,
+ IWL_PA_INTERNAL = 1,
+};
- * This structs holds the states for each RA / TID.
+/*
+ * LED mode
+ * IWL_LED_DEFAULT: use device default
+ * IWL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IWL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+ IWL_LED_DEFAULT,
+ IWL_LED_RF_STATE,
+ IWL_LED_BLINK,
+};
- * @seq_number: the next WiFi sequence number to use
- * @tfds_in_queue: number of packets sent to the HW queues.
- * Exported for debugfs only
- * @agg: aggregation state machine
+/**
+ * struct iwl_cfg
+ * @name: Offical name of the device
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ * without a warning, for use in transitions
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @sku: sku information from EEPROM
+ * @eeprom_ver: EEPROM version
+ * @eeprom_calib_ver: EEPROM calibration version
+ * @lib: pointer to the lib ops
+ * @additional_nic_config: additional nic configuration
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht patameters
+ * @bt_params: pointer to bt parameters
+ * @pa_type: used by 6000 series only to identify the type of Power Amplifier
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ * don't send it to those
+ * @scan_rx_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @adv_pm: advance power management
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version.
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision.
*/
-struct iwl_tid_data {
- u16 seq_number;
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
+struct iwl_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_ok;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct iwl_lib_ops *lib;
+ void (*additional_nic_config)(struct iwl_priv *priv);
+ /* params not likely to change within a device family */
+ struct iwl_base_params *base_params;
+ /* params likely to change within a device family */
+ struct iwl_ht_params *ht_params;
+ struct iwl_bt_params *bt_params;
+ enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
+ const bool need_temp_offset_calib; /* if used set to true */
+ const bool no_xtal_calib;
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum iwl_led_mode led_mode;
+ const bool adv_pm;
+ const bool rx_with_siso_diversity;
+ const bool internal_wimax_coex;
+ const bool iq_invert;
+ const bool temp_offset_v2;
};
/**
@@ -266,15 +350,25 @@ struct iwl_tid_data {
* @ucode_owner: IWL_OWNERSHIP_*
* @cmd_queue: command queue number
* @status: STATUS_*
+ * @wowlan: are we running wowlan uCode
* @valid_contexts: microcode/device supports multiple contexts
* @bus: pointer to the bus layer data
+ * @cfg: see struct iwl_cfg
* @priv: pointer to the upper layer data
+ * @trans: pointer to the transport layer data
* @hw_params: see struct iwl_hw_params
* @workqueue: the workqueue used by all the layers of the driver
* @lock: protect general shared data
* @sta_lock: protects the station table.
* If lock and sta_lock are needed, lock must be acquired first.
* @mutex:
+ * @wait_command_queue: the wait_queue for SYNC host command nad uCode load
+ * @eeprom: pointer to the eeprom/OTP image
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
+ * @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -290,6 +384,7 @@ struct iwl_shared {
u8 valid_contexts;
struct iwl_bus *bus;
+ struct iwl_cfg *cfg;
struct iwl_priv *priv;
struct iwl_trans *trans;
struct iwl_hw_params hw_params;
@@ -299,13 +394,29 @@ struct iwl_shared {
spinlock_t sta_lock;
struct mutex mutex;
- struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
-
wait_queue_head_t wait_command_queue;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+
+ /* ucode related variables */
+ enum iwl_ucode_type ucode_type;
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ struct {
+ u32 error_event_table;
+ u32 log_event_table;
+ } device_pointers;
+
};
/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
#define priv(_m) ((_m)->shrd->priv)
+#define cfg(_m) ((_m)->shrd->cfg)
#define bus(_m) ((_m)->shrd->bus)
#define trans(_m) ((_m)->shrd->trans)
#define hw_params(_m) ((_m)->shrd->hw_params)
@@ -427,12 +538,6 @@ int __must_check iwl_rx_dispatch(struct iwl_priv *priv,
struct iwl_device_cmd *cmd);
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid);
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid);
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
void iwl_nic_config(struct iwl_priv *priv);
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -445,6 +550,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_reset_traffic_log(struct iwl_priv *priv);
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 5e50d88f302b..4a5cddd2d56b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -70,6 +70,7 @@
#include <net/mac80211.h>
#include <net/netlink.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
@@ -77,6 +78,7 @@
#include "iwl-agn.h"
#include "iwl-testmode.h"
#include "iwl-trans.h"
+#include "iwl-bus.h"
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -106,6 +108,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+ [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
};
/*
@@ -177,6 +186,18 @@ void iwl_testmode_init(struct iwl_priv *priv)
{
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
priv->testmode_trace.trace_enabled = false;
+ priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+ if (priv->testmode_sram.sram_readed) {
+ kfree(priv->testmode_sram.buff_addr);
+ priv->testmode_sram.buff_addr = NULL;
+ priv->testmode_sram.buff_size = 0;
+ priv->testmode_sram.num_chunks = 0;
+ priv->testmode_sram.sram_readed = false;
+ }
}
static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +222,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
void iwl_testmode_cleanup(struct iwl_priv *priv)
{
iwl_trace_cleanup(priv);
+ iwl_sram_cleanup(priv);
}
/*
@@ -276,7 +298,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read32(bus(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
@@ -291,7 +313,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_DEBUG_INFO(priv,
"Error sending msg : %d\n", status);
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_DEBUG_INFO(priv,
"Error finding value to write\n");
@@ -302,7 +324,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write32(bus(priv), ofs, val32);
}
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_DEBUG_INFO(priv, "Error finding value to write\n");
return -ENOMSG;
@@ -312,6 +334,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write8(bus(priv), ofs, val8);
}
break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ val32 = iwl_read_prph(bus(priv), ofs);
+ IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+ IWL_DEBUG_INFO(priv,
+ "Error finding value to write\n");
+ return -ENOMSG;
+ } else {
+ val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+ IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+ iwl_write_prph(bus(priv), ofs, val32);
+ }
+ break;
default:
IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
return -ENOSYS;
@@ -330,24 +378,24 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(priv->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
- ret = iwlagn_init_alive_start(priv);
+ ret = iwl_init_alive_start(trans(priv));
if (ret) {
IWL_DEBUG_INFO(priv,
"Error configuring init calibration: %d\n", ret);
goto cfg_init_calib_error;
}
- ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
+ ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
if (ret)
IWL_DEBUG_INFO(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(priv->shrd, &calib_wait);
return ret;
}
@@ -370,14 +418,16 @@ cfg_init_calib_error:
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = hw->priv;
+ struct iwl_trans *trans = trans(priv);
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
+ u32 devid;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- rsp_data_ptr = (unsigned char *)priv->cfg->name;
- rsp_data_len = strlen(priv->cfg->name);
+ rsp_data_ptr = (unsigned char *)cfg(priv)->name;
+ rsp_data_len = strlen(cfg(priv)->name);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
rsp_data_len + 20);
if (!skb) {
@@ -396,8 +446,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- IWL_UCODE_INIT);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (status)
IWL_DEBUG_INFO(priv,
"Error loading init ucode: %d\n", status);
@@ -405,13 +454,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
- iwl_trans_stop_device(trans(priv));
+ iwl_trans_stop_device(trans);
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwlagn_load_ucode_wait_alive(priv,
- &priv->ucode_rt,
- IWL_UCODE_REGULAR);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR);
if (status) {
IWL_DEBUG_INFO(priv,
"Error loading runtime ucode: %d\n", status);
@@ -423,10 +470,25 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
"Error starting the device: %d\n", status);
break;
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_trans_stop_device(trans);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN);
+ if (status) {
+ IWL_DEBUG_INFO(priv,
+ "Error loading WOWLAN ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_DEBUG_INFO(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom) {
+ if (priv->shrd->eeprom) {
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- priv->cfg->base_params->eeprom_size + 20);
+ cfg(priv)->base_params->eeprom_size + 20);
if (!skb) {
IWL_DEBUG_INFO(priv,
"Error allocating memory\n");
@@ -435,8 +497,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_EEPROM_RSP);
NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
- priv->cfg->base_params->eeprom_size,
- priv->eeprom);
+ cfg(priv)->base_params->eeprom_size,
+ priv->shrd->eeprom);
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_DEBUG_INFO(priv,
@@ -455,6 +517,37 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ devid = bus_get_hw_id(bus(priv));
+ IWL_INFO(priv, "hw version: 0x%x\n", devid);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
return -ENOSYS;
@@ -535,7 +628,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
}
priv->testmode_trace.num_chunks =
DIV_ROUND_UP(priv->testmode_trace.buff_size,
- TRACE_CHUNK_SIZE);
+ DUMP_CHUNK_SIZE);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -567,15 +660,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
idx = cb->args[4];
if (idx >= priv->testmode_trace.num_chunks)
return -ENOENT;
- length = TRACE_CHUNK_SIZE;
+ length = DUMP_CHUNK_SIZE;
if (((idx + 1) == priv->testmode_trace.num_chunks) &&
- (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+ (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
length = priv->testmode_trace.buff_size %
- TRACE_CHUNK_SIZE;
+ DUMP_CHUNK_SIZE;
NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
priv->testmode_trace.trace_addr +
- (TRACE_CHUNK_SIZE * idx));
+ (DUMP_CHUNK_SIZE * idx));
idx++;
cb->args[4] = idx;
return 0;
@@ -621,6 +714,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
return 0;
}
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = hw->priv;
+ u32 base, ofs, size, maxsize;
+
+ if (priv->testmode_sram.sram_readed)
+ return -EBUSY;
+
+ if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+ IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+ return -ENOMSG;
+ }
+ ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+ if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+ IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+ return -ENOMSG;
+ }
+ size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+ switch (priv->shrd->ucode_type) {
+ case IWL_UCODE_REGULAR:
+ maxsize = trans(priv)->ucode_rt.data.len;
+ break;
+ case IWL_UCODE_INIT:
+ maxsize = trans(priv)->ucode_init.data.len;
+ break;
+ case IWL_UCODE_WOWLAN:
+ maxsize = trans(priv)->ucode_wowlan.data.len;
+ break;
+ case IWL_UCODE_NONE:
+ IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+ return -ENOSYS;
+ default:
+ IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+ return -ENOSYS;
+ }
+ if ((ofs + size) > maxsize) {
+ IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+ return -EINVAL;
+ }
+ priv->testmode_sram.buff_size = (size / 4) * 4;
+ priv->testmode_sram.buff_addr =
+ kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+ if (priv->testmode_sram.buff_addr == NULL) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ base = 0x800000;
+ _iwl_read_targ_mem_words(bus(priv), base + ofs,
+ priv->testmode_sram.buff_addr,
+ priv->testmode_sram.buff_size / 4);
+ priv->testmode_sram.num_chunks =
+ DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+ priv->testmode_sram.sram_readed = true;
+ return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct iwl_priv *priv = hw->priv;
+ int idx, length;
+
+ if (priv->testmode_sram.sram_readed) {
+ idx = cb->args[4];
+ if (idx >= priv->testmode_sram.num_chunks) {
+ iwl_sram_cleanup(priv);
+ return -ENOENT;
+ }
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+ (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+ length = priv->testmode_sram.buff_size %
+ DUMP_CHUNK_SIZE;
+
+ NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+ priv->testmode_sram.buff_addr +
+ (DUMP_CHUNK_SIZE * idx));
+ idx++;
+ cb->args[4] = idx;
+ return 0;
+ } else
+ return -EFAULT;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -668,9 +865,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
result = iwl_testmode_ucode(hw, tb);
break;
- case IWL_TM_CMD_APP2DEV_REG_READ32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
result = iwl_testmode_reg(hw, tb);
break;
@@ -680,6 +879,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
@@ -696,6 +898,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
result = iwl_testmode_ownership(hw, tb);
break;
+ case IWL_TM_CMD_APP2DEV_READ_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+ result = iwl_testmode_sram(hw, tb);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
result = -ENOSYS;
@@ -744,6 +951,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
result = iwl_testmode_trace_dump(hw, tb, skb, cb);
break;
+ case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+ result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+ break;
default:
result = -EINVAL;
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index b980bda4b0f8..26138f110340 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -76,9 +76,9 @@
* the actual uCode host command ID is carried with
* IWL_TM_ATTR_UCODE_CMD_ID
*
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
* commands from user applicaiton to access register
*
* @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -103,16 +103,30 @@
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
* commands from kernel space to carry the eeprom response
* to user application
+ *
* @IWL_TM_CMD_APP2DEV_OWNERSHIP:
* commands from user application to own change the ownership of the uCode
* if application has the ownership, the only host command from
* testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ * commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ * commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
+ *
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
- IWL_TM_CMD_APP2DEV_REG_READ32 = 2,
- IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3,
- IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
@@ -126,7 +140,14 @@ enum iwl_tm_cmd_t {
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
- IWL_TM_CMD_MAX = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
+ IWL_TM_CMD_APP2DEV_READ_SRAM = 20,
+ IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21,
+ IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
+ IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
+ IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
+ IWL_TM_CMD_MAX = 25,
};
/*
@@ -196,6 +217,26 @@ enum iwl_tm_cmd_t {
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ * IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
+ * @IWL_TM_ATTR_FW_VERSION:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
+ * IWL_TM_ATTR_FW_VERSION for the uCode version
+ *
+ * @IWL_TM_ATTR_DEVICE_ID:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
+ * IWL_TM_ATTR_DEVICE_ID for the device ID information
+ *
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -213,7 +254,12 @@ enum iwl_tm_attr_t {
IWL_TM_ATTR_TRACE_DUMP = 12,
IWL_TM_ATTR_FIXRATE = 13,
IWL_TM_ATTR_UCODE_OWNER = 14,
- IWL_TM_ATTR_MAX = 15,
+ IWL_TM_ATTR_SRAM_ADDR = 15,
+ IWL_TM_ATTR_SRAM_SIZE = 16,
+ IWL_TM_ATTR_SRAM_DUMP = 17,
+ IWL_TM_ATTR_FW_VERSION = 18,
+ IWL_TM_ATTR_DEVICE_ID = 19,
+ IWL_TM_ATTR_MAX = 20,
};
/* uCode trace buffer */
@@ -221,6 +267,8 @@ enum iwl_tm_attr_t {
#define TRACE_BUFF_SIZE_MIN 0x20000
#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
#define TRACE_BUFF_PADD 0x2000
-#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 2b6756e8b8f9..f6debf91d7b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -219,9 +219,7 @@ struct iwl_trans_pcie {
/* INT ICT Table */
__le32 *ict_tbl;
- void *ict_tbl_vir;
dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
int ict_index;
u32 inta;
bool use_ict;
@@ -236,6 +234,7 @@ struct iwl_trans_pcie {
const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
u8 mcast_queue[NUM_IWL_RXON_CTX];
+ u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk;
@@ -280,20 +279,16 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
-int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid, u16 *ssn);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
- int sta_id, int tid, int frame_limit);
+ int sta_id, int tid, int frame_limit, u16 ssn);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
int index, enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
@@ -354,8 +349,13 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
txq->swq_id = (hwq << 2) | ac;
}
+static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
+{
+ return txq->swq_id & 0x3;
+}
+
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -363,13 +363,22 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
- if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+ if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
+ if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
iwl_wake_sw_queue(priv(trans), ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
+ hwq, ac, msg);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ }
+ }
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -377,9 +386,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
- if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+ if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
+ if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
iwl_stop_sw_queue(priv(trans), ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ }
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
+ hwq, msg);
+ }
}
#ifdef ieee80211_stop_queue
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 374c68cc1d70..752493f00406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.error_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_errlog_ptr;
} else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+
+ IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
}
/**
@@ -657,7 +672,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
{
struct iwl_priv *priv = priv(trans);
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (priv->cfg->internal_wimax_coex &&
+ if (cfg(priv)->internal_wimax_coex &&
(!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
@@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
if (num_events == 0)
return pos;
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_evtlog_ptr;
} else {
@@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
size_t bufsz = 0;
struct iwl_priv *priv = priv(trans);
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
logsize = priv->init_evtlog_size;
if (!base)
base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
IWL_ERR(trans,
"Invalid event log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return -EINVAL;
}
@@ -1108,7 +1123,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
isr_stats->tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
- priv(trans)->ucode_write_complete = 1;
+ trans->ucode_write_complete = 1;
wake_up(&trans->shrd->wait_command_queue);
}
@@ -1136,7 +1151,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* ICT functions
*
******************************************************************************/
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
/* Free dram table */
void iwl_free_isr_ict(struct iwl_trans *trans)
@@ -1144,21 +1163,19 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans_pcie->ict_tbl_vir) {
- dma_free_coherent(bus(trans)->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- trans_pcie->ict_tbl_vir,
+ if (trans_pcie->ict_tbl) {
+ dma_free_coherent(bus(trans)->dev, ICT_SIZE,
+ trans_pcie->ict_tbl,
trans_pcie->ict_tbl_dma);
- trans_pcie->ict_tbl_vir = NULL;
- memset(&trans_pcie->ict_tbl_dma, 0,
- sizeof(trans_pcie->ict_tbl_dma));
- memset(&trans_pcie->aligned_ict_tbl_dma, 0,
- sizeof(trans_pcie->aligned_ict_tbl_dma));
+ trans_pcie->ict_tbl = NULL;
+ trans_pcie->ict_tbl_dma = 0;
}
}
-/* allocate dram shared table it is a PAGE_SIZE aligned
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
* also reset all data related to ICT table interrupt.
*/
int iwl_alloc_isr_ict(struct iwl_trans *trans)
@@ -1166,36 +1183,26 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- /* allocate shrared data table */
- trans_pcie->ict_tbl_vir =
- dma_alloc_coherent(bus(trans)->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &trans_pcie->ict_tbl_dma, GFP_KERNEL);
- if (!trans_pcie->ict_tbl_vir)
+ trans_pcie->ict_tbl =
+ dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma,
+ GFP_KERNEL);
+ if (!trans_pcie->ict_tbl)
return -ENOMEM;
- /* align table to PAGE_SIZE boundary */
- trans_pcie->aligned_ict_tbl_dma =
- ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)trans_pcie->ict_tbl_dma,
- (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
- (int)(trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma));
+ /* just an API sanity check ... it is guaranteed to be aligned */
+ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+ iwl_free_isr_ict(trans);
+ return -EINVAL;
+ }
- trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
- (trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma);
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma);
- IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
- trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
- (int)(trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma));
+ IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
/* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl_vir, 0,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
trans_pcie->ict_index = 0;
/* add periodic RX interrupt */
@@ -1213,23 +1220,20 @@ int iwl_reset_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans_pcie->ict_tbl_vir)
+ if (!trans_pcie->ict_tbl)
return 0;
spin_lock_irqsave(&trans->shrd->lock, flags);
iwl_disable_interrupts(trans);
- memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
+ val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
val |= CSR_DRAM_INT_TBL_ENABLE;
val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
- IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val,
- (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
trans_pcie->use_ict = true;
@@ -1266,6 +1270,8 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (!trans)
return IRQ_NONE;
+ trace_iwlwifi_dev_irq(priv(trans));
+
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_irqsave(&trans->shrd->lock, flags);
@@ -1340,6 +1346,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
struct iwl_trans_pcie *trans_pcie;
u32 inta, inta_mask;
u32 val = 0;
+ u32 read;
unsigned long flags;
if (!trans)
@@ -1353,6 +1360,8 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (!trans_pcie->use_ict)
return iwl_isr(irq, data);
+ trace_iwlwifi_dev_irq(priv(trans));
+
spin_lock_irqsave(&trans->shrd->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
@@ -1367,24 +1376,29 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
- if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read);
+ if (!read) {
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
goto none;
}
- /* read all entries that not 0 start with ict_index */
- while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
-
- val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index,
- le32_to_cpu(
- trans_pcie->ict_tbl[trans_pcie->ict_index]));
+ trans_pcie->ict_index, read);
trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
trans_pcie->ict_index =
iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
- }
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index,
+ read);
+ } while (read);
/* We should not get this value, just ignore it. */
if (val == 0xffffffff)
@@ -1411,7 +1425,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
- !trans_pcie->inta) {
+ !trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
* tasklet will enable it.
@@ -1427,7 +1441,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
* only Re-enable if disabled by irq.
*/
if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 4a0c95302a7e..bd29568177e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -408,6 +408,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
int txq_id, u32 index)
{
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
@@ -430,7 +431,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
txq->sched_retry = scd_retry;
- IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
+ IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
@@ -446,14 +447,21 @@ static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
return -EINVAL;
}
+static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
+{
+ if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
+ return false;
+ return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues);
+}
+
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
- int tid, int frame_limit)
+ int tid, int frame_limit, u16 ssn)
{
- int tx_fifo, txq_id, ssn_idx;
+ int tx_fifo, txq_id;
u16 ra_tid;
unsigned long flags;
- struct iwl_tid_data *tid_data;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -469,11 +477,15 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
return;
}
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- ssn_idx = SEQ_TO_SN(tid_data->seq_number);
- txq_id = tid_data->agg.txq_id;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+ IWL_ERR(trans,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues - 1);
+ return;
+ }
ra_tid = BUILD_RAxTID(sta_id, tid);
@@ -493,9 +505,9 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
+ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
+ trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+ iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
@@ -539,12 +551,9 @@ static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
}
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid, u16 *ssn)
+ int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tid_data *tid_data;
- unsigned long flags;
int txq_id;
txq_id = iwlagn_txq_ctx_activate_free(trans);
@@ -553,117 +562,39 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
return -ENXIO;
}
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
+ trans_pcie->agg_txq[sta_id][tid] = txq_id;
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(trans, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
- } else {
- IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
- "queue\n", tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-
return 0;
}
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
-
- iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
-
- trans_pcie->txq[txq_id].q.read_ptr = 0;
- trans_pcie->txq[txq_id].q.write_ptr = 0;
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
-
- iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(trans_pcie, txq_id);
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
-}
-
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid)
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- int read_ptr, write_ptr;
- struct iwl_tid_data *tid_data;
- int txq_id;
-
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- txq_id = tid_data->agg.txq_id;
-
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- hw_params(trans).num_ampdu_queues <= txq_id)) {
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
IWLAGN_FIRST_AMPDU_QUEUE +
hw_params(trans).num_ampdu_queues - 1);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return -EINVAL;
}
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(trans, "Stopping AGG while state not ON "
- "or starting for %d on %d (%d)\n", sta_id, tid,
- trans->shrd->tid_data[sta_id][tid].agg.state);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
- read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
- trans->shrd->tid_data[sta_id][tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(trans, "HW queue is empty\n");
-turn_off:
- trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&trans->shrd->sta_lock);
- spin_lock(&trans->shrd->lock);
-
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
- iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ trans_pcie->agg_txq[sta_id][tid] = 0;
+ trans_pcie->txq[txq_id].q.read_ptr = 0;
+ trans_pcie->txq[txq_id].q.write_ptr = 0;
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(trans_pcie, txq_id);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
return 0;
}
@@ -982,7 +913,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_DEBUG_QUIET_RFKILL(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@@ -1000,6 +932,20 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ return -EBUSY;
+
+
+ if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
+ get_cmd_string(cmd->id));
+ return -ECANCELED;
+ }
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s failed: FW Error\n",
+ get_cmd_string(cmd->id));
+ return -EIO;
+ }
set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
@@ -1008,7 +954,8 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_DEBUG_QUIET_RFKILL(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@@ -1022,12 +969,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
&trans_pcie->txq[trans->shrd->cmd_queue];
struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
+ IWL_DEBUG_QUIET_RFKILL(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
+ IWL_DEBUG_QUIET_RFKILL(trans,
"Current CMD queue read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
@@ -1039,18 +986,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
}
- if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s failed: FW Error\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
@@ -1071,7 +1006,7 @@ cancel:
trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
-fail:
+
if (cmd->reply_page) {
iwl_free_pages(trans->shrd, cmd->reply_page);
cmd->reply_page = 0;
@@ -1115,9 +1050,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
return 0;
}
- IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
- q->read_ptr, index);
-
if (WARN_ON(!skb_queue_empty(skbs)))
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index ce918980e977..67d6e324e26f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -88,18 +88,16 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
return -EINVAL;
/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
/*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb_stts;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
return 0;
@@ -1044,7 +1042,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id)
+ u8 sta_id, u8 tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1058,13 +1056,12 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
- u16 seq_number = 0;
u8 wait_write_ptr = 0;
u8 txq_id;
- u8 tid = 0;
bool is_agg = false;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 __maybe_unused wifi_seq;
/*
* Send this frame after DTIM -- there's a special queue
@@ -1085,36 +1082,28 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq_id =
trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- u8 *qc = NULL;
- struct iwl_tid_data *tid_data;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- tid_data = &trans->shrd->tid_data[sta_id][tid];
-
- if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
- return -1;
-
- seq_number = tid_data->seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
- txq_id = tid_data->agg.txq_id;
- is_agg = true;
- }
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ WARN_ON(tid >= IWL_MAX_TID_COUNT);
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ is_agg = true;
}
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
/* Set up driver data for this TFD */
txq->skbs[q->write_ptr] = skb;
txq->cmd[q->write_ptr] = dev_cmd;
@@ -1197,9 +1186,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
- if (is_agg)
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
- le16_to_cpu(tx_cmd->len));
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
@@ -1214,13 +1201,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_txq_update_write_ptr(trans, txq);
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- trans->shrd->tid_data[sta_id][tid].seq_number =
- seq_number;
- }
-
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually,
@@ -1232,7 +1212,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(trans, txq);
+ iwl_stop_queue(trans, txq, "Queue is full");
}
}
return 0;
@@ -1269,101 +1249,48 @@ static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
return 0;
}
-static int iwlagn_txq_check_empty(struct iwl_trans *trans,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
- struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
-
- lockdep_assert_held(&trans->shrd->sta_lock);
-
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- IWL_DEBUG_HT(trans,
- "HW queue empty: continue DELBA flow\n");
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
- tid_data->agg.state = IWL_AGG_OFF;
- iwl_stop_tx_ba_trans_ready(priv(trans),
- NUM_IWL_RXON_CTX,
- sta_id, tid);
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(trans,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- iwl_start_tx_ba_trans_ready(priv(trans),
- NUM_IWL_RXON_CTX,
- sta_id, tid);
- }
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&trans->shrd->sta_lock);
-
- if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
- freed);
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
- }
-}
-
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- enum iwl_agg_state agg_state;
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
- bool cond;
txq->time_stamp = jiffies;
- if (txq->sched_retry) {
- agg_state =
- trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
- cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
- } else {
- cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
+ if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+ txq_id != trans_pcie->agg_txq[sta_id][tid])) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now.
+ * Since it is can possibly happen very often and in order
+ * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+ */
+ IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
+ "agg_txq[sta_id[tid] %d", txq_id,
+ trans_pcie->agg_txq[sta_id][tid]);
+ return 1;
}
if (txq->q.read_ptr != tfd_num) {
- IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- ssn , tfd_num, txq_id, txq->swq_id);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
+ txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
+ tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
- iwl_wake_queue(trans, txq);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ (!txq->sched_retry ||
+ status != TX_STATUS_FAIL_PASSIVE_NO_RX))
+ iwl_wake_queue(trans, txq, "Packets reclaimed");
}
-
- iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
- iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
+ return 0;
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
+ iwl_calib_free_results(trans);
iwl_trans_pcie_tx_free(trans);
iwl_trans_pcie_rx_free(trans);
free_irq(bus(trans)->irq, trans);
@@ -1419,7 +1346,8 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
#endif /* CONFIG_PM_SLEEP */
static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx)
+ enum iwl_rxon_context_id ctx,
+ const char *msg)
{
u8 ac, txq_id;
struct iwl_trans_pcie *trans_pcie =
@@ -1427,11 +1355,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
for (ac = 0; ac < AC_NUM; ac++) {
txq_id = trans_pcie->ac_to_queue[ctx][ac];
- IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+ IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
ac,
(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
? "stopped" : "awake");
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
}
}
@@ -1454,11 +1382,12 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
return iwl_trans;
}
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
+ const char *msg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+ iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
}
#define IWL_FLUSH_WAIT_MS 2000
@@ -1513,8 +1442,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
if (time_after(jiffies, timeout)) {
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
hw_params(trans).wd_timeout);
- IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
+ IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+ iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+ & (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
return 1;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index c5923125c3f9..e6bf3f554772 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -171,32 +171,31 @@ struct iwl_trans_ops {
void (*tx_start)(struct iwl_trans *trans);
void (*wake_any_queue)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx);
+ enum iwl_rxon_context_id ctx,
+ const char *msg);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id);
- void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+ u8 sta_id, u8 tid);
+ int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
int (*tx_agg_alloc)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id, int tid,
- u16 *ssn);
+ int sta_id, int tid);
void (*tx_agg_setup)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id, int tid,
- int frame_limit);
+ int frame_limit, u16 ssn);
void (*kick_nic)(struct iwl_trans *trans);
void (*free)(struct iwl_trans *trans);
- void (*stop_queue)(struct iwl_trans *trans, int q);
+ void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
@@ -207,17 +206,54 @@ struct iwl_trans_ops {
#endif
};
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ dma_addr_t p_addr; /* hardware address */
+ void *v_addr; /* software address */
+ u32 len; /* size in bytes */
+};
+
+struct fw_img {
+ struct fw_desc code; /* firmware code image */
+ struct fw_desc data; /* firmware data image */
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+ struct list_head list;
+ size_t cmd_len;
+ struct iwl_calib_hdr hdr;
+ /* data follows */
+};
+
/**
* struct iwl_trans - transport common data
* @ops - pointer to iwl_trans_ops
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* @hcmd_lock: protects HCMD
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_rt: run time ucode image
+ * @ucode_init: init ucode image
+ * @ucode_wowlan: wake on wireless ucode image (optional)
+ * @nvm_device_type: indicates OTP or eeprom
+ * @calib_results: list head for init calibration results
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_shared *shrd;
spinlock_t hcmd_lock;
+ u8 ucode_write_complete; /* the image write is complete */
+ struct fw_img ucode_rt;
+ struct fw_img ucode_init;
+ struct fw_img ucode_wowlan;
+
+ /* eeprom related variables */
+ int nvm_device_type;
+
+ /* init calibration results */
+ struct list_head calib_results;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -249,9 +285,10 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans)
}
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx)
+ enum iwl_rxon_context_id ctx,
+ const char *msg)
{
- trans->ops->wake_any_queue(trans, ctx);
+ trans->ops->wake_any_queue(trans, ctx, msg);
}
@@ -266,39 +303,38 @@ int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id)
+ u8 sta_id, u8 tid)
{
- return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
+ return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
}
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
int tid, int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
- trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
+ return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
+ status, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
int sta_id, int tid)
{
- return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
+ return trans->ops->tx_agg_disable(trans, sta_id, tid);
}
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- int sta_id, int tid, u16 *ssn)
+ int sta_id, int tid)
{
- return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
+ return trans->ops->tx_agg_alloc(trans, sta_id, tid);
}
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
int sta_id, int tid,
- int frame_limit)
+ int frame_limit, u16 ssn)
{
- trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
+ trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
}
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
@@ -311,9 +347,10 @@ static inline void iwl_trans_free(struct iwl_trans *trans)
trans->ops->free(trans);
}
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
+ const char *msg)
{
- trans->ops->stop_queue(trans, q);
+ trans->ops->stop_queue(trans, q, msg);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -348,4 +385,13 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
******************************************************/
extern const struct iwl_trans_ops trans_ops_pcie;
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+ const void *data, size_t len);
+void iwl_dealloc_ucode(struct iwl_trans *trans);
+
+int iwl_send_calib_results(struct iwl_trans *trans);
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 8ba0dd54e37d..36a1b5b25858 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -31,7 +31,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -72,51 +74,98 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
};
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(bus->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
+{
+ iwl_free_fw_desc(bus, &img->code);
+ iwl_free_fw_desc(bus, &img->data);
+}
+
+void iwl_dealloc_ucode(struct iwl_trans *trans)
+{
+ iwl_free_fw_img(bus(trans), &trans->ucode_rt);
+ iwl_free_fw_img(bus(trans), &trans->ucode_init);
+ iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
+}
+
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+ const void *data, size_t len)
+{
+ if (!len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(bus->dev, len,
+ &desc->p_addr, GFP_KERNEL);
+ if (!desc->v_addr)
+ return -ENOMEM;
+
+ desc->len = len;
+ memcpy(desc->v_addr, data, len);
+ return 0;
+}
+
/*
* ucode
*/
-static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
struct fw_desc *image, u32 dst_addr)
{
+ struct iwl_bus *bus = bus(trans);
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
- priv->ucode_write_complete = 0;
+ trans->ucode_write_complete = 0;
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_timeout(priv->shrd->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
+ IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
+ ret = wait_event_timeout(trans->shrd->wait_command_queue,
+ trans->ucode_write_complete, 5 * HZ);
if (!ret) {
- IWL_ERR(priv, "Could not load the %s uCode section\n",
+ IWL_ERR(trans, "Could not load the %s uCode section\n",
name);
return -ETIMEDOUT;
}
@@ -124,41 +173,65 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
return 0;
}
-static int iwlagn_load_given_ucode(struct iwl_priv *priv,
- struct fw_img *image)
+static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
+{
+ switch (ucode_type) {
+ case IWL_UCODE_INIT:
+ return &trans->ucode_init;
+ case IWL_UCODE_WOWLAN:
+ return &trans->ucode_wowlan;
+ case IWL_UCODE_REGULAR:
+ return &trans->ucode_rt;
+ case IWL_UCODE_NONE:
+ break;
+ }
+ return NULL;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
int ret = 0;
+ struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
- ret = iwlagn_load_section(priv, "INST", &image->code,
+
+ if (!image) {
+ IWL_ERR(trans, "Invalid ucode requested (%d)\n",
+ ucode_type);
+ return -EINVAL;
+ }
+
+ ret = iwl_load_section(trans, "INST", &image->code,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
- return iwlagn_load_section(priv, "DATA", &image->data,
+ return iwl_load_section(trans, "DATA", &image->data,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
/*
* Calibration
*/
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+static int iwl_set_Xtal_calib(struct iwl_trans *trans)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -166,49 +239,48 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
- IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
sizeof(*offset_calib_low));
if (!(cmd.radio_sensor_offset_low)) {
- IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+ IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
memcpy(&cmd.burntVoltageRef, &hdr->voltage,
sizeof(hdr->voltage));
- IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
- IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_low));
- IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+ IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
@@ -224,7 +296,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_trans_send_cmd(trans, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -234,60 +306,37 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
/* reduce the size of the length field itself */
len -= 4;
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to
- * their index. We sort them here
- */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return -1;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+ if (iwl_calib_set(trans(priv), hdr, len))
+ IWL_ERR(priv, "Failed to record calibration data %d\n",
+ hdr->op_code);
+
return 0;
}
-int iwlagn_init_alive_start(struct iwl_priv *priv)
+int iwl_init_alive_start(struct iwl_trans *trans)
{
int ret;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(trans)->bt_params &&
+ cfg(trans)->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
- ret = iwlagn_send_calib_cfg(priv);
+ ret = iwl_send_calib_cfg(trans);
if (ret)
return ret;
@@ -295,21 +344,21 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (priv->cfg->need_temp_offset_calib) {
- if (priv->cfg->temp_offset_v2)
- return iwlagn_set_temperature_offset_calib_v2(priv);
+ if (cfg(trans)->need_temp_offset_calib) {
+ if (cfg(trans)->temp_offset_v2)
+ return iwl_set_temperature_offset_calib_v2(trans);
else
- return iwlagn_set_temperature_offset_calib(priv);
+ return iwl_set_temperature_offset_calib(trans);
}
return 0;
}
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_trans *trans)
{
struct iwl_wimax_coex_cmd coex_cmd;
- if (priv->cfg->base_params->support_wimax_coexist) {
+ if (cfg(trans)->base_params->support_wimax_coexist) {
/* UnMask wake up src at associated sleep */
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
@@ -328,12 +377,12 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return iwl_trans_send_cmd_pdu(trans(priv),
+ return iwl_trans_send_cmd_pdu(trans,
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
@@ -355,61 +404,64 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
0, 0, 0, 0, 0, 0, 0
};
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
+void iwl_send_prio_tbl(struct iwl_trans *trans)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
- memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
- sizeof(iwlagn_bt_prio_tbl));
- if (iwl_trans_send_cmd_pdu(trans(priv),
+ memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+ sizeof(iwl_bt_prio_tbl));
+ if (iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
- IWL_ERR(priv, "failed to send BT prio tbl command\n");
+ IWL_ERR(trans, "failed to send BT prio tbl command\n");
}
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
- IWL_ERR(priv, "failed to send BT env command\n");
+ IWL_ERR(trans, "failed to send BT env command\n");
return ret;
}
-static int iwlagn_alive_notify(struct iwl_priv *priv)
+static int iwl_alive_notify(struct iwl_trans *trans)
{
+ struct iwl_priv *priv = priv(trans);
struct iwl_rxon_context *ctx;
int ret;
if (!priv->tx_cmd_pool)
priv->tx_cmd_pool =
- kmem_cache_create("iwlagn_dev_cmd",
+ kmem_cache_create("iwl_dev_cmd",
sizeof(struct iwl_device_cmd),
sizeof(void *), 0, NULL);
if (!priv->tx_cmd_pool)
return -ENOMEM;
- iwl_trans_tx_start(trans(priv));
+ iwl_trans_tx_start(trans);
for_each_context(priv, ctx)
ctx->last_tx_rejected = false;
- ret = iwlagn_send_wimax_coex(priv);
+ ret = iwl_send_wimax_coex(trans);
if (ret)
return ret;
- ret = iwlagn_set_Xtal_calib(priv);
- if (ret)
- return ret;
+ if (!cfg(priv)->no_xtal_calib) {
+ ret = iwl_set_Xtal_calib(trans);
+ if (ret)
+ return ret;
+ }
- return iwl_send_calib_results(priv);
+ return iwl_send_calib_results(trans);
}
@@ -418,7 +470,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwl_verify_inst_sparse(struct iwl_priv *priv,
+static int iwl_verify_inst_sparse(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -426,15 +478,15 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
u32 val;
u32 i;
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -442,7 +494,7 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
return 0;
}
-static void iwl_print_mismatch_inst(struct iwl_priv *priv,
+static void iwl_print_mismatch_inst(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -451,18 +503,18 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
u32 offs;
int errors = 0;
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
- iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section at "
+ IWL_ERR(bus, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
@@ -474,91 +526,163 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
-static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
+static int iwl_verify_ucode(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
- if (!iwl_verify_inst_sparse(priv, &img->code)) {
- IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
+ struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+
+ if (!img) {
+ IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+ return -EINVAL;
+ }
+
+ if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
+ IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
return 0;
}
- IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+ IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
- iwl_print_mismatch_inst(priv, &img->code);
+ iwl_print_mismatch_inst(bus(trans), &img->code);
return -EIO;
}
-struct iwlagn_alive_data {
+struct iwl_alive_data {
bool valid;
u8 subtype;
};
-static void iwlagn_alive_fn(struct iwl_priv *priv,
+static void iwl_alive_fn(struct iwl_trans *trans,
struct iwl_rx_packet *pkt,
void *data)
{
- struct iwlagn_alive_data *alive_data = data;
+ struct iwl_alive_data *alive_data = data;
struct iwl_alive_resp *palive;
palive = &pkt->u.alive_frame;
- IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
- priv->device_pointers.error_event_table =
+ trans->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
- priv->device_pointers.log_event_table =
+ trans->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data)
+{
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_add(&wait_entry->list, &shrd->notif_waits);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(shrd->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+ unsigned long flags;
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+ wake_up_all(&shrd->notif_waitq);
+}
+
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
- struct fw_img *image,
- enum iwlagn_ucode_type ucode_type)
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
- struct iwlagn_alive_data alive_data;
+ struct iwl_alive_data alive_data;
int ret;
- enum iwlagn_ucode_type old_type;
+ enum iwl_ucode_type old_type;
- ret = iwl_trans_start_device(trans(priv));
+ ret = iwl_trans_start_device(trans);
if (ret)
return ret;
- iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
- iwlagn_alive_fn, &alive_data);
+ iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+ iwl_alive_fn, &alive_data);
- old_type = priv->ucode_type;
- priv->ucode_type = ucode_type;
+ old_type = trans->shrd->ucode_type;
+ trans->shrd->ucode_type = ucode_type;
- ret = iwlagn_load_given_ucode(priv, image);
+ ret = iwl_load_given_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
- iwlagn_remove_notification(priv, &alive_wait);
+ trans->shrd->ucode_type = old_type;
+ iwl_remove_notification(trans->shrd, &alive_wait);
return ret;
}
- iwl_trans_kick_nic(trans(priv));
+ iwl_trans_kick_nic(trans);
/*
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
- ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &alive_wait,
+ UCODE_ALIVE_TIMEOUT);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
- IWL_ERR(priv, "Loaded ucode is not valid!\n");
- priv->ucode_type = old_type;
+ IWL_ERR(trans, "Loaded ucode is not valid!\n");
+ trans->shrd->ucode_type = old_type;
return -EIO;
}
@@ -568,9 +692,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(priv, image);
+ ret = iwl_verify_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
@@ -578,42 +702,41 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
msleep(5);
}
- ret = iwlagn_alive_notify(priv);
+ ret = iwl_alive_notify(trans);
if (ret) {
- IWL_WARN(priv,
+ IWL_WARN(trans,
"Could not complete ALIVE transition: %d\n", ret);
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
return 0;
}
-int iwlagn_run_init_ucode(struct iwl_priv *priv)
+int iwl_run_init_ucode(struct iwl_trans *trans)
{
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&trans->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
- if (!priv->ucode_init.code.len)
+ if (!trans->ucode_init.code.len)
return 0;
- if (priv->ucode_type != IWL_UCODE_NONE)
+ if (trans->shrd->ucode_type != IWL_UCODE_NONE)
return 0;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(trans->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
/* Will also start the device */
- ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- IWL_UCODE_INIT);
+ ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (ret)
goto error;
- ret = iwlagn_init_alive_start(priv);
+ ret = iwl_init_alive_start(trans);
if (ret)
goto error;
@@ -621,14 +744,15 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
- ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &calib_wait,
+ UCODE_CALIB_TIMEOUT);
goto out;
error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(trans->shrd, &calib_wait);
out:
/* Whatever happened, stop the device */
- iwl_trans_stop_device(trans(priv));
+ iwl_trans_stop_device(trans);
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlwifi/iwl-wifi.h
index f46c80e6e005..18501101a530 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-wifi.h
@@ -59,17 +59,16 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_4965_calib_h__
-#define __iwl_4965_calib_h__
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-commands.h"
+#ifndef __iwl_wifi_h__
+#define __iwl_wifi_h__
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
-void iwl4965_init_sensitivity(struct iwl_priv *priv);
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
-void iwl4965_calib_free_results(struct iwl_priv *priv);
+#include "iwl-shared.h"
-#endif /* __iwl_4965_calib_h__ */
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
+int iwl_init_alive_start(struct iwl_trans *trans);
+int iwl_run_init_ucode(struct iwl_trans *trans);
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type);
+#endif /* __iwl_wifi_h__ */
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index c42be81e979e..48e8218fd23b 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -165,11 +165,15 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
struct key_params *params)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
int ret;
IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
+ key = &iwm->keys[key_index];
memset(key, 0, sizeof(struct iwm_key));
ret = iwm_key_init(key, key_index, mac_addr, params);
if (ret < 0) {
@@ -214,8 +218,12 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
+ key = &iwm->keys[key_index];
if (!iwm->keys[key_index].key_len) {
IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
return 0;
@@ -236,6 +244,9 @@ static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
if (!iwm->keys[key_index].key_len) {
IWM_ERR(iwm, "Key %d not used\n", key_index);
return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 98a179f98ea1..1f868b166d10 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = {
.mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
};
-static int modparam_reset;
+static bool modparam_reset;
module_param_named(reset, modparam_reset, bool, 0644);
MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
@@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work)
iwm_invalidate_mlme_profile(iwm);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a414768f40f1..7d708f4395f3 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
umac_sta->mac_addr,
umac_sta->flags & UMAC_STA_FLAG_QOS);
- sta->valid = 1;
+ sta->valid = true;
sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
- sta->valid = 0;
+ sta->valid = false;
break;
case UMAC_OPCODE_CLEAR_ALL:
for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- iwm->sta_table[i].valid = 0;
+ iwm->sta_table[i].valid = false;
break;
default:
@@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
switch (hdr->oid) {
case UMAC_WIFI_IF_CMD_SET_PROFILE:
- iwm->umac_profile_active = 1;
+ iwm->umac_profile_active = true;
break;
default:
break;
@@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
*/
list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
- cmd->resp_received = 1;
+ cmd->resp_received = true;
cmd->buf.len = buf_size;
memcpy(cmd->buf.hdr, buf, buf_size);
wake_up_interruptible(&iwm->nonwifi_queue);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a7f1ab28940d..a7cd311cb1b7 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -485,6 +485,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
struct cmd_header *resp)
{
+ struct cfg80211_bss *bss;
struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
int bsssize;
const u8 *pos;
@@ -632,12 +633,14 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
LBS_SCAN_RSSI_TO_MBM(rssi)/100);
if (channel &&
- !(channel->flags & IEEE80211_CHAN_DISABLED))
- cfg80211_inform_bss(wiphy, channel,
+ !(channel->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(wiphy, channel,
bssid, get_unaligned_le64(tsfdesc),
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
+ cfg80211_put_bss(bss);
+ }
} else
lbs_deb_scan("scan response: missing BSS channel IE\n");
@@ -728,9 +731,11 @@ static void lbs_scan_worker(struct work_struct *work)
le16_to_cpu(scan_cmd->hdr.size),
lbs_ret_scan, 0);
- if (priv->scan_channel >= priv->scan_req->n_channels)
+ if (priv->scan_channel >= priv->scan_req->n_channels) {
/* Mark scan done */
+ cancel_delayed_work(&priv->scan_work);
lbs_scan_done(priv);
+ }
/* Restart network */
if (carrier)
@@ -759,12 +764,12 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal,
request->n_ssids, request->n_channels, request->ie_len);
priv->scan_channel = 0;
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(50));
-
priv->scan_req = request;
priv->internal_scan = internal;
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(50));
+
lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -1720,6 +1725,7 @@ static void lbs_join_post(struct lbs_private *priv,
2 + 2 + /* atim */
2 + 8]; /* extended rates */
u8 *fake = fake_ie;
+ struct cfg80211_bss *bss;
lbs_deb_enter(LBS_DEB_CFG80211);
@@ -1763,14 +1769,15 @@ static void lbs_join_post(struct lbs_private *priv,
*fake++ = 0x6c;
lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
- cfg80211_inform_bss(priv->wdev->wiphy,
- params->channel,
- bssid,
- 0,
- capability,
- params->beacon_interval,
- fake_ie, fake - fake_ie,
- 0, GFP_KERNEL);
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ params->channel,
+ bssid,
+ 0,
+ capability,
+ params->beacon_interval,
+ fake_ie, fake - fake_ie,
+ 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index d8d8f0d0899f..c192671610fc 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -704,7 +704,7 @@ out_unlock:
struct lbs_debugfs_files {
const char *name;
- int perm;
+ umode_t perm;
struct file_operations fops;
};
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 885ddc1c4fed..f955b2d66ed6 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
{
struct lbs_private *priv = dev->ml_priv;
- snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.p%u",
priv->fwrelease >> 24 & 0xff,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
- strcpy(info->driver, "libertas");
- strcpy(info->version, lbs_driver_version);
+ strlcpy(info->driver, "libertas", sizeof(info->driver));
+ strlcpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index e26935179861..3f7bf4d912b6 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
* Most of the libertas cards can do unaligned register access, but some
* weird ones cannot. That's especially true for the CF8305 card.
*/
- card->align_regs = 0;
+ card->align_regs = false;
card->model = get_model(p_dev->manf_id, p_dev->card_id);
if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Check if we have a current silicon */
prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
if (card->model == MODEL_8305) {
- card->align_regs = 1;
+ card->align_regs = true;
if (prod_id < IF_CS_CF8305_B1_REV) {
pr_err("8305 rev B0 and older are not supported\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 728baa445259..50b1ee7721e9 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1291,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
.remove = __devexit_p(libertas_spi_remove),
.driver = {
.name = "libertas_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &if_spi_pm_ops,
},
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ceb51b6e6702..a03457292c88 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
return;
if (skb_queue_empty(&priv->bc_ps_buf)) {
- bool tx_buff_bc = 0;
+ bool tx_buff_bc = false;
while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb_queue_tail(&priv->bc_ps_buf, skb);
- tx_buff_bc = 1;
+ tx_buff_bc = true;
}
if (tx_buff_bc) {
ieee80211_stop_queues(priv->hw);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 523ad55a2885..4b9e730d2c8a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -37,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
-int wmediumd_pid;
+static u32 wmediumd_pid;
+
static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -665,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
- int _pid;
+ u32 _pid;
mac80211_hwsim_monitor_rx(hw, skb);
@@ -676,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -707,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
wiphy_debug(hw->wiphy, "%s\n", __func__);
- data->started = 1;
+ data->started = true;
return 0;
}
@@ -715,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
- data->started = 0;
+ data->started = false;
del_timer(&data->beacon_timer);
wiphy_debug(hw->wiphy, "%s\n", __func__);
}
@@ -764,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- int _pid;
+ u32 _pid;
hwsim_check_magic(vif);
@@ -781,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1254,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1275,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
memcpy(pspoll->ta, mac, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1292,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1314,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1634,8 +1635,6 @@ static int hwsim_init_netlink(void)
int rc;
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- wmediumd_pid = 0;
-
rc = genl_register_family_with_ops(&hwsim_genl_family,
hwsim_ops, ARRAY_SIZE(hwsim_ops));
if (rc)
@@ -1748,6 +1747,8 @@ static int __init init_mac80211_hwsim(void)
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_AMPDU_AGGREGATION;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
hw->sta_data_size = sizeof(struct hwsim_sta_priv);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 7aa9aa0ac958..681d3f2a4c28 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -33,7 +33,7 @@
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
-static int
+static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr, int start_win)
@@ -71,8 +71,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
- return 0;
}
/*
@@ -83,7 +81,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
-static int
+static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
{
@@ -119,7 +117,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
&(MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- return 0;
}
/*
@@ -405,7 +402,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u8 *ta, u8 pkt_type, void *payload)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
- int start_win, end_win, win_size, ret;
+ int start_win, end_win, win_size;
u16 pkt_index;
rx_reor_tbl_ptr =
@@ -452,11 +449,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
- ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
+ mwifiex_11n_dispatch_pkt_until_start_win(priv,
rx_reor_tbl_ptr, start_win);
-
- if (ret)
- return ret;
}
if (pkt_type != PKT_TYPE_BAR) {
@@ -475,9 +469,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
* Dispatch all packets sequentially from start_win until a
* hole is found and adjust the start_win appropriately
*/
- ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
+ mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8f2797aa0c60..2a078cea830a 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8787"
+ tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
depends on MWIFIEX && MMC
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8787 chipset with SDIO interface.
+ 8787/8797 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 462c71067bfb..c3b6c4652cd6 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -24,50 +24,26 @@
* This function maps the nl802.11 channel type into driver channel type.
*
* The mapping is as follows -
- * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL
- * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL
- * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE
- * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW
- * Others -> NO_SEC_CHANNEL
+ * NL80211_CHAN_NO_HT -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ * NL80211_CHAN_HT20 -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ * NL80211_CHAN_HT40PLUS -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE
+ * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
+ * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
*/
-static int
-mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type
- channel_type)
+static u8
+mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type
+ channel_type)
{
switch (channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
- return NO_SEC_CHANNEL;
+ return IEEE80211_HT_PARAM_CHA_SEC_NONE;
case NL80211_CHAN_HT40PLUS:
- return SEC_CHANNEL_ABOVE;
+ return IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
case NL80211_CHAN_HT40MINUS:
- return SEC_CHANNEL_BELOW;
- default:
- return NO_SEC_CHANNEL;
- }
-}
-
-/*
- * This function maps the driver channel type into nl802.11 channel type.
- *
- * The mapping is as follows -
- * NO_SEC_CHANNEL -> NL80211_CHAN_HT20
- * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS
- * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS
- * Others -> NL80211_CHAN_HT20
- */
-static enum nl80211_channel_type
-mwifiex_channels_to_cfg80211_channel_type(int channel_type)
-{
- switch (channel_type) {
- case NO_SEC_CHANNEL:
- return NL80211_CHAN_HT20;
- case SEC_CHANNEL_ABOVE:
- return NL80211_CHAN_HT40PLUS;
- case SEC_CHANNEL_BELOW:
- return NL80211_CHAN_HT40MINUS;
+ return IEEE80211_HT_PARAM_CHA_SEC_BELOW;
default:
- return NL80211_CHAN_HT20;
+ return IEEE80211_HT_PARAM_CHA_SEC_NONE;
}
}
@@ -120,10 +96,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
struct mwifiex_power_cfg power_cfg;
+ int dbm = MBM_TO_DBM(mbm);
if (type == NL80211_TX_POWER_FIXED) {
power_cfg.is_power_auto = 0;
@@ -330,37 +307,51 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
enum nl80211_channel_type channel_type)
{
struct mwifiex_chan_freq_power cfp;
- struct mwifiex_ds_band_cfg band_cfg;
u32 config_bands = 0;
struct wiphy *wiphy = priv->wdev->wiphy;
+ struct mwifiex_adapter *adapter = priv->adapter;
if (chan) {
- memset(&band_cfg, 0, sizeof(band_cfg));
/* Set appropriate bands */
- if (chan->band == IEEE80211_BAND_2GHZ)
- config_bands = BAND_B | BAND_G | BAND_GN;
- else
- config_bands = BAND_AN | BAND_A;
- if (priv->bss_mode == NL80211_IFTYPE_STATION
- || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) {
- band_cfg.config_bands = config_bands;
- } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- band_cfg.config_bands = config_bands;
- band_cfg.adhoc_start_band = config_bands;
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ if (channel_type == NL80211_CHAN_NO_HT)
+ if (priv->adapter->config_bands == BAND_B ||
+ priv->adapter->config_bands == BAND_G)
+ config_bands =
+ priv->adapter->config_bands;
+ else
+ config_bands = BAND_B | BAND_G;
+ else
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ } else {
+ if (channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
}
- band_cfg.sec_chan_offset =
- mwifiex_cfg80211_channel_type_to_mwifiex_channels
+ if (!((config_bands | adapter->fw_bands) &
+ ~adapter->fw_bands)) {
+ adapter->config_bands = config_bands;
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ adapter->adhoc_start_band = config_bands;
+ if ((config_bands & BAND_GN) ||
+ (config_bands & BAND_AN))
+ adapter->adhoc_11n_enabled = true;
+ else
+ adapter->adhoc_11n_enabled = false;
+ }
+ }
+ adapter->sec_chan_offset =
+ mwifiex_cfg80211_channel_type_to_sec_chan_offset
(channel_type);
-
- if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
- return -EFAULT;
+ adapter->channel_type = channel_type;
mwifiex_send_domain_info_cmd_fw(wiphy);
}
wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
- "mode %d\n", config_bands, band_cfg.sec_chan_offset,
+ "mode %d\n", config_bands, adapter->sec_chan_offset,
priv->bss_mode);
if (!chan)
return 0;
@@ -696,9 +687,9 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
const u8 *peer,
const struct cfg80211_bitrate_mask *mask)
{
- struct mwifiex_ds_band_cfg band_cfg;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int index = 0, mode = 0, i;
+ struct mwifiex_adapter *adapter = priv->adapter;
/* Currently only 2.4GHz is supported */
for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
@@ -720,16 +711,15 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
mode |= BAND_B;
}
- memset(&band_cfg, 0, sizeof(band_cfg));
- band_cfg.config_bands = mode;
-
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
- band_cfg.adhoc_start_band = mode;
-
- band_cfg.sec_chan_offset = NO_SEC_CHANNEL;
-
- if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
- return -EFAULT;
+ if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) {
+ adapter->config_bands = mode;
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ adapter->adhoc_start_band = mode;
+ adapter->adhoc_11n_enabled = false;
+ }
+ }
+ adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ adapter->channel_type = NL80211_CHAN_NO_HT;
wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
(mode & BAND_B) ? "b" : "",
@@ -750,17 +740,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
" reason code %d\n", priv->cfg_bssid, reason_code);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -780,6 +766,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
{
struct ieee80211_channel *chan;
struct mwifiex_bss_info bss_info;
+ struct cfg80211_bss *bss;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
enum ieee80211_band band;
@@ -800,9 +787,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
- cfg80211_inform_bss(priv->wdev->wiphy, chan,
+ bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;
@@ -851,8 +839,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
if (channel)
ret = mwifiex_set_rf_channel(priv, channel,
- mwifiex_channels_to_cfg80211_channel_type
- (priv->adapter->chan_offset));
+ priv->adapter->channel_type);
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */
@@ -978,27 +965,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret = 0;
- if (priv->assoc_request)
- return -EBUSY;
-
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "received infra assoc request "
"when station is in ibss mode\n");
goto done;
}
- priv->assoc_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
-
- priv->assoc_request = 1;
done:
- priv->assoc_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
+ NULL, 0, WLAN_STATUS_SUCCESS,
+ GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: associated to bssid %pM successfully\n",
+ priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: association to bssid %pM failed\n",
+ priv->cfg_bssid);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+ }
+
return ret;
}
@@ -1015,28 +1007,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
int ret = 0;
- if (priv->ibss_join_request)
- return -EBUSY;
-
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "request to join ibss received "
"when station is not in ibss mode\n");
goto done;
}
- priv->ibss_join_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
(char *) params->ssid, params->bssid);
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->channel, NULL, params->privacy);
-
- priv->ibss_join_request = 1;
done:
- priv->ibss_join_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: joined/created adhoc network with bssid"
+ " %pM successfully\n", priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: failed creating/joining adhoc network\n");
+ }
+
return ret;
}
@@ -1051,17 +1044,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
-
wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -1078,15 +1066,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_scan_request *request)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int i;
+ struct ieee80211_channel *chan;
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
- if (priv->scan_request && priv->scan_request != request)
- return -EBUSY;
-
priv->scan_request = request;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
+ GFP_KERNEL);
+ if (!priv->user_scan_cfg) {
+ dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < request->n_ssids; i++) {
+ memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
+ request->ssids[i].ssid, request->ssids[i].ssid_len);
+ priv->user_scan_cfg->ssid_list[i].max_len =
+ request->ssids[i].ssid_len;
+ }
+ for (i = 0; i < request->n_channels; i++) {
+ chan = request->channels[i];
+ priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+ priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_PASSIVE;
+ else
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+
+ priv->user_scan_cfg->chan_list[i].scan_time = 0;
+ }
+ if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+ return -EFAULT;
+
return 0;
}
@@ -1292,10 +1307,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
priv->media_connected = false;
- cancel_work_sync(&priv->cfg_workqueue);
- flush_workqueue(priv->workqueue);
- destroy_workqueue(priv->workqueue);
-
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
return 0;
@@ -1373,9 +1384,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- /* We are using custom domains */
- wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-
/* Reserve space for bss band information */
wdev->wiphy->bss_priv_size = sizeof(u8);
@@ -1404,100 +1412,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
return ret;
}
-
-/*
- * This function handles the result of different pending network operations.
- *
- * The following operations are handled and CFG802.11 subsystem is
- * notified accordingly -
- * - Scan request completion
- * - Association request completion
- * - IBSS join request completion
- * - Disconnect request completion
- */
-void
-mwifiex_cfg80211_results(struct work_struct *work)
-{
- struct mwifiex_private *priv =
- container_of(work, struct mwifiex_private, cfg_workqueue);
- struct mwifiex_user_scan_cfg *scan_req;
- int ret = 0, i;
- struct ieee80211_channel *chan;
-
- if (priv->scan_request) {
- scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
- GFP_KERNEL);
- if (!scan_req) {
- dev_err(priv->adapter->dev, "failed to alloc "
- "scan_req\n");
- return;
- }
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- memcpy(scan_req->ssid_list[i].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- scan_req->ssid_list[i].max_len =
- priv->scan_request->ssids[i].ssid_len;
- }
- for (i = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
- scan_req->chan_list[i].chan_number = chan->hw_value;
- scan_req->chan_list[i].radio_type = chan->band;
- if (chan->flags & IEEE80211_CHAN_DISABLED)
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_PASSIVE;
- else
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_ACTIVE;
- scan_req->chan_list[i].scan_time = 0;
- }
- if (mwifiex_set_user_scan_ioctl(priv, scan_req))
- ret = -EFAULT;
- priv->scan_result_status = ret;
- dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
- __func__);
- cfg80211_scan_done(priv->scan_request,
- (priv->scan_result_status < 0));
- priv->scan_request = NULL;
- kfree(scan_req);
- }
-
- if (priv->assoc_request == 1) {
- if (!priv->assoc_result) {
- cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: associated to bssid %pM successfully\n",
- priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: association to bssid %pM failed\n",
- priv->cfg_bssid);
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- }
- priv->assoc_request = 0;
- priv->assoc_result = 0;
- }
-
- if (priv->ibss_join_request == 1) {
- if (!priv->ibss_join_result) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: joined/created adhoc network with bssid"
- " %pM successfully\n", priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: failed creating/joining adhoc network\n");
- }
- priv->ibss_join_request = 0;
- priv->ibss_join_result = 0;
- }
-
- if (priv->disconnect) {
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- priv->disconnect = 0;
- }
-}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 8d010f2500c5..76c76c60438b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -26,5 +26,4 @@
int mwifiex_register_cfg80211(struct mwifiex_private *);
-void mwifiex_cfg80211_results(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index f2e6de03805c..1782a77f15dc 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
* This function maps an index in supported rates table into
* the corresponding data rate.
*/
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+ u8 ht_info)
{
- u16 mcs_rate[4][8] = {
- {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
- , /* LG 40M */
- {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
- , /* SG 40M */
- {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
- , /* LG 20M */
- {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
- }; /* SG 20M */
-
+ /*
+ * For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+ u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+ };
+ u32 mcs_num_supp =
+ (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
if (ht_info & BIT(0)) {
@@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
rate = 0x0D; /* MCS 32 SGI rate */
else
rate = 0x0C; /* MCS 32 LGI rate */
- } else if (index < 8) {
+ } else if (index < mcs_num_supp) {
if (ht_info & BIT(1)) {
if (ht_info & BIT(2))
/* SGI, 40M */
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac278156d390..6e0a3eaecf70 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 0cc5d73cb0c1..51c5417c569c 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_2X2 0x22
#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -375,8 +376,6 @@ enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
};
-#define SECOND_CHANNEL_BELOW 0x30
-#define SECOND_CHANNEL_ABOVE 0x10
struct mwifiex_chan_scan_param_set {
u8 radio_type;
u8 chan_number;
@@ -673,7 +672,7 @@ struct host_cmd_ds_802_11_ad_hoc_start {
union ieee_types_phy_param_set phy_param_set;
u16 reserved1;
__le16 cap_info_bitmap;
- u8 DataRate[HOSTCMD_SUPPORTED_RATES];
+ u8 data_rate[HOSTCMD_SUPPORTED_RATES];
} __packed;
struct host_cmd_ds_802_11_ad_hoc_result {
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index d792b3fb7c16..e05b417a3fae 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -187,8 +187,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
- sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
- (adapter->sleep_cfm->data);
adapter->cmd_sent = false;
@@ -248,12 +246,14 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(adapter->event_body, 0, sizeof(adapter->event_body));
adapter->hw_dot_11n_dev_cap = 0;
adapter->hw_dev_mcs_support = 0;
- adapter->chan_offset = 0;
+ adapter->sec_chan_offset = 0;
adapter->adhoc_11n_enabled = false;
mwifiex_wmm_init(adapter);
if (adapter->sleep_cfm) {
+ sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+ adapter->sleep_cfm->data;
memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
sleep_cfm_buf->command =
cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
@@ -283,6 +283,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function sets trans_start per tx_queue
+ */
+void mwifiex_set_trans_start(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+
+ dev->trans_start = jiffies;
+}
+
+/*
+ * This function wakes up all queues in net_device
+ */
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_wake_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
+ * This function stops all queues in net_device
+ */
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_stop_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
* This function releases the lock variables and frees the locks and
* associated locks.
*/
@@ -359,6 +398,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->int_lock);
spin_lock_init(&adapter->main_proc_lock);
spin_lock_init(&adapter->mwifiex_cmd_lock);
+ spin_lock_init(&adapter->queue_lock);
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e0b68e7c8ca2..d5d81f1fe41c 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -62,17 +62,6 @@ enum {
BAND_AN = 16,
};
-#define NO_SEC_CHANNEL 0
-#define SEC_CHANNEL_ABOVE 1
-#define SEC_CHANNEL_BELOW 3
-
-struct mwifiex_ds_band_cfg {
- u32 config_bands;
- u32 adhoc_start_band;
- u32 adhoc_channel;
- u32 sec_chan_offset;
-};
-
enum {
ADHOC_IDLE,
ADHOC_STARTED,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 62b4c2938608..0b0eb5efba9d 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
u32 cmd_append_size = 0;
u32 i;
u16 tmp_cap;
- uint16_t ht_cap_info;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+ u8 radio_type;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_htinfo *ht_info;
@@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
- memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
- mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
+ memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
+ mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
@@ -850,20 +850,19 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
}
/* Find the last non zero */
- for (i = 0; i < sizeof(adhoc_start->DataRate) &&
- adhoc_start->DataRate[i];
- i++)
- ;
+ for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
+ if (!adhoc_start->data_rate[i])
+ break;
priv->curr_bss_params.num_of_rates = i;
/* Copy the ad-hoc creating rates into Current BSS rate structure */
memcpy(&priv->curr_bss_params.data_rates,
- &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
+ &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
- adhoc_start->DataRate[0], adhoc_start->DataRate[1],
- adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
+ adhoc_start->data_rate[0], adhoc_start->data_rate[1],
+ adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
@@ -886,12 +885,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
if (adapter->adhoc_start_band & BAND_GN
|| adapter->adhoc_start_band & BAND_AN) {
- if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
+ if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
- SECOND_CHANNEL_ABOVE;
- else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
+ (IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4);
+ else if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
- SECOND_CHANNEL_BELOW;
+ (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
chan_tlv->chan_scan_param[0].radio_type);
@@ -914,55 +915,40 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
if (adapter->adhoc_11n_enabled) {
- {
- ht_cap = (struct mwifiex_ie_types_htcap *) pos;
- memset(ht_cap, 0,
- sizeof(struct mwifiex_ie_types_htcap));
- ht_cap->header.type =
- cpu_to_le16(WLAN_EID_HT_CAPABILITY);
- ht_cap->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_cap));
- ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
-
- ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
- if (adapter->chan_offset) {
- ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
- ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
- ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
- }
+ /* Fill HT CAPABILITY */
+ ht_cap = (struct mwifiex_ie_types_htcap *) pos;
+ memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
+ ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ radio_type = mwifiex_band_to_radio_type(
+ priv->adapter->config_bands);
+ mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+
+ pos += sizeof(struct mwifiex_ie_types_htcap);
+ cmd_append_size +=
+ sizeof(struct mwifiex_ie_types_htcap);
- ht_cap->ht_cap.ampdu_params_info
- = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
- pos += sizeof(struct mwifiex_ie_types_htcap);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htcap);
- }
- {
- ht_info = (struct mwifiex_ie_types_htinfo *) pos;
- memset(ht_info, 0,
- sizeof(struct mwifiex_ie_types_htinfo));
- ht_info->header.type =
- cpu_to_le16(WLAN_EID_HT_INFORMATION);
- ht_info->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_info));
- ht_info->ht_info.control_chan =
- (u8) priv->curr_bss_params.bss_descriptor.
- channel;
- if (adapter->chan_offset) {
- ht_info->ht_info.ht_param =
- adapter->chan_offset;
- ht_info->ht_info.ht_param |=
+ /* Fill HT INFORMATION */
+ ht_info = (struct mwifiex_ie_types_htinfo *) pos;
+ memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
+ ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
+ ht_info->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_info));
+
+ ht_info->ht_info.control_chan =
+ (u8) priv->curr_bss_params.bss_descriptor.channel;
+ if (adapter->sec_chan_offset) {
+ ht_info->ht_info.ht_param = adapter->sec_chan_offset;
+ ht_info->ht_info.ht_param |=
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
- }
- ht_info->ht_info.operation_mode =
- cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
- ht_info->ht_info.basic_set[0] = 0xff;
- pos += sizeof(struct mwifiex_ie_types_htinfo);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htinfo);
}
+ ht_info->ht_info.operation_mode =
+ cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ ht_info->ht_info.basic_set[0] = 0xff;
+ pos += sizeof(struct mwifiex_ie_types_htinfo);
+ cmd_append_size +=
+ sizeof(struct mwifiex_ie_types_htinfo);
}
cmd->size = cpu_to_le16((u16)
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 67e6db7d672d..84be196188cc 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
static int
mwifiex_open(struct net_device *dev)
{
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_inc(&priv->adapter->tx_pending);
if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
- netif_stop_queue(priv->netdev);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
}
queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev)
dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
jiffies, priv->bss_index);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
priv->num_tx_timeout++;
}
@@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->media_connected = false;
memset(&priv->nick_name, 0, sizeof(priv->nick_name));
priv->num_tx_timeout = 0;
- priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
- INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
}
@@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
priv = adapter->priv[i];
if (priv && priv->netdev) {
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev,
+ adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 30f138b6fa4c..3186aa437f42 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -453,15 +453,8 @@ struct mwifiex_private {
u8 scan_pending_on_block;
u8 report_scan_result;
struct cfg80211_scan_request *scan_request;
- int scan_result_status;
- int assoc_request;
- u16 assoc_result;
- int ibss_join_request;
- u16 ibss_join_result;
- bool disconnect;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
- struct workqueue_struct *workqueue;
- struct work_struct cfg_workqueue;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct wps wps;
u8 scan_block;
@@ -647,7 +640,8 @@ struct mwifiex_adapter {
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
u8 adhoc_11n_enabled;
- u8 chan_offset;
+ u8 sec_chan_offset;
+ enum nl80211_channel_type channel_type;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
@@ -655,10 +649,19 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
struct cmd_ctrl_node *cmd_queued;
+ spinlock_t queue_lock; /* lock for tx queues */
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
+void mwifiex_set_trans_start(struct net_device *dev);
+
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -775,7 +778,8 @@ struct mwifiex_chan_freq_power *
struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
struct mwifiex_private *priv,
u8 band, u32 freq);
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+ u8 ht_info);
u32 mwifiex_find_freq_from_band_chan(u8, u8);
int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u8 **buffer);
@@ -951,8 +955,6 @@ int mwifiex_main_process(struct mwifiex_adapter *);
int mwifiex_bss_set_channel(struct mwifiex_private *,
struct mwifiex_chan_freq_power *cfp);
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *,
- struct mwifiex_ds_band_cfg *);
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index d34acf082d3a..405350940a45 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -386,8 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
if (!card->txbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
- kfree(card->txbd_ring_vbase);
- return -1;
+ return -ENOMEM;
}
card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
@@ -477,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
if (!card->rxbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for "
"rxbd_ring.\n");
- return -1;
+ return -ENOMEM;
}
card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
@@ -570,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
if (!card->evtbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer. "
"Terminating download\n");
- return -1;
+ return -ENOMEM;
}
card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
@@ -1229,15 +1228,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (!skb)
return 0;
- if (rdptr >= MWIFIEX_MAX_EVT_BD)
+ if (rdptr >= MWIFIEX_MAX_EVT_BD) {
dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
rdptr);
+ return -EINVAL;
+ }
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
if (!card->evt_buf_list[rdptr]) {
@@ -1266,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
/* Write the event ring read pointer in to REG_EVTBD_RDPTR */
if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
-done:
- /* Free the buffer for failure case */
- if (ret && skb)
- dev_kfree_skb_any(skb);
-
dev_dbg(adapter->dev, "info: Check Events Again\n");
ret = mwifiex_pcie_process_event_ready(adapter);
@@ -1672,9 +1666,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
- if (!adapter || !skb) {
- dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n",
- __func__, adapter, skb);
+ if (!skb) {
+ dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8d3ab378662b..6396d3318ead 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -500,7 +500,6 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- u8 scan_type;
for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
@@ -514,19 +513,20 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
if (ch->flags & IEEE80211_CHAN_DISABLED)
continue;
scan_chan_list[chan_idx].radio_type = band;
- scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+
if (user_scan_in &&
user_scan_in->chan_list[0].scan_time)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
- else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+ else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->passive_scan_time);
else
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->active_scan_time);
- if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
@@ -1391,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
{
int status;
- priv->adapter->scan_wait_q_woken = false;
-
status = mwifiex_scan_networks(priv, scan_req);
- if (!status)
- status = mwifiex_wait_queue_complete(priv->adapter);
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
return status;
}
@@ -1537,11 +1534,6 @@ done:
return 0;
}
-static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
-{
- kfree(bss->priv);
-}
-
/*
* This function handles the command response of scan.
*
@@ -1767,7 +1759,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL);
*(u8 *)bss->priv = band;
- bss->free_priv = mwifiex_free_bss_priv;
+ cfg80211_put_bss(bss);
if (priv->media_connected && !memcmp(bssid,
priv->curr_bss_params.bss_descriptor
@@ -1801,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
up(&priv->async_sem);
}
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev, "info: %s: sending scan "
+ "results\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 283171bbcedf..d39d8457f252 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev)
/* Device ID for SD8787 */
#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
+/* Device ID for SD8797 */
+#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
{},
};
@@ -1084,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
(adapter->ioport | 0x1000 |
(card->mpa_rx.ports << 4)) +
card->mpa_rx.start_port, 1))
- return -1;
+ goto error;
curr_ptr = card->mpa_rx.buf;
@@ -1127,12 +1130,29 @@ rx_curr_single:
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
skb->data, skb->len,
adapter->ioport + port))
- return -1;
+ goto error;
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
return 0;
+
+error:
+ if (MP_RX_AGGR_IN_PROGRESS(card)) {
+ /* Multiport-aggregation transfer failed - cleanup */
+ for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+ /* copy pkt to deaggr buf */
+ skb_deaggr = card->mpa_rx.skb_arr[pind];
+ dev_kfree_skb_any(skb_deaggr);
+ }
+ MP_RX_AGGR_BUF_RESET(card);
+ }
+
+ if (f_do_rx_cur)
+ /* Single transfer pending. Free curr buff also */
+ dev_kfree_skb_any(skb);
+
+ return -1;
}
/*
@@ -1268,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev,
"info: CFG reg val =%x\n", cr);
- dev_kfree_skb_any(skb);
return -1;
}
}
@@ -1573,7 +1592,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
- strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+
+ switch (func->device) {
+ case SDIO_DEVICE_ID_MARVELL_8797:
+ strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
+ break;
+ case SDIO_DEVICE_ID_MARVELL_8787:
+ default:
+ strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+ break;
+ }
return 0;
@@ -1630,14 +1658,14 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mpa_tx.pkt_cnt = 0;
card->mpa_tx.start_port = 0;
- card->mpa_tx.enabled = 0;
+ card->mpa_tx.enabled = 1;
card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
card->mpa_rx.buf_len = 0;
card->mpa_rx.pkt_cnt = 0;
card->mpa_rx.start_port = 0;
- card->mpa_rx.enabled = 0;
+ card->mpa_rx.enabled = 1;
card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
/* Allocate buffers for SDIO MP-A */
@@ -1774,4 +1802,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
MODULE_VERSION(SDIO_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 3f711801e58a..a3fb322205b0 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -29,6 +29,7 @@
#include "main.h"
#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index ea6518d1c9e3..6e443ffa0465 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -748,7 +748,7 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
rf_type = le16_to_cpu(rf_chan->rf_type);
- SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
+ SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset);
rf_chan->current_channel = cpu_to_le16(*channel);
}
rf_chan->action = cpu_to_le16(cmd_action);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 7a16b0c417af..e812db8b695c 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
priv->tx_htinfo = resp->params.tx_rate.ht_info;
if (!priv->is_data_rate_auto)
priv->data_rate =
- mwifiex_index_to_data_rate(priv->tx_rate,
+ mwifiex_index_to_data_rate(priv, priv->tx_rate,
priv->tx_htinfo);
return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f204810e8338..d7aa21da84d0 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
if (adapter->num_cmd_timeout && adapter->curr_cmd)
return;
priv->media_connected = false;
- if (!priv->disconnect) {
- priv->disconnect = 1;
- dev_dbg(adapter->dev, "info: successfully disconnected from"
- " %pM: reason code %d\n", priv->cfg_bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- cfg80211_disconnected(priv->netdev,
- WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- GFP_KERNEL);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ dev_dbg(adapter->dev, "info: successfully disconnected from"
+ " %pM: reason code %d\n", priv->cfg_bssid,
+ WLAN_REASON_DEAUTH_LEAVING);
+ if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, GFP_KERNEL);
}
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
/* Reset wireless stats signal info */
@@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
if (netif_queue_stopped(priv->netdev))
- netif_wake_queue(priv->netdev);
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
priv->adhoc_is_link_sensed = false;
mwifiex_clean_txrx(priv);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
break;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index ea4a29b7e331..470ca75ec250 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -55,9 +55,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
int status = adapter->cmd_wait_q.status;
- struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+ struct cmd_ctrl_node *cmd_queued;
+ if (!adapter->cmd_queued)
+ return 0;
+
+ cmd_queued = adapter->cmd_queued;
adapter->cmd_queued = NULL;
+
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
@@ -234,7 +239,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
"associating...\n");
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
/* Clear any past association response stored for
* application retrieval */
@@ -265,7 +270,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
@@ -472,67 +477,6 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
}
/*
- * The function sets band configurations.
- *
- * it performs extra checks to make sure the Ad-Hoc
- * band and channel are compatible. Otherwise it returns an error.
- *
- */
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv,
- struct mwifiex_ds_band_cfg *radio_cfg)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u8 infra_band, adhoc_band;
- u32 adhoc_channel;
-
- infra_band = (u8) radio_cfg->config_bands;
- adhoc_band = (u8) radio_cfg->adhoc_start_band;
- adhoc_channel = radio_cfg->adhoc_channel;
-
- /* SET Infra band */
- if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands)
- return -1;
-
- adapter->config_bands = infra_band;
-
- /* SET Ad-hoc Band */
- if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands)
- return -1;
-
- if (adhoc_band)
- adapter->adhoc_start_band = adhoc_band;
- adapter->chan_offset = (u8) radio_cfg->sec_chan_offset;
- /*
- * If no adhoc_channel is supplied verify if the existing adhoc
- * channel compiles with new adhoc_band
- */
- if (!adhoc_channel) {
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band,
- priv->adhoc_channel)) {
- /* Pass back the default channel */
- radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
- if ((adapter->adhoc_start_band & BAND_A)
- || (adapter->adhoc_start_band & BAND_AN))
- radio_cfg->adhoc_channel =
- DEFAULT_AD_HOC_CHANNEL_A;
- }
- } else { /* Retrurn error if adhoc_band and
- adhoc_channel combination is invalid */
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band, (u16) adhoc_channel))
- return -1;
- priv->adhoc_channel = (u8) adhoc_channel;
- }
- if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN))
- adapter->adhoc_11n_enabled = true;
- else
- adapter->adhoc_11n_enabled = false;
-
- return 0;
-}
-
-/*
* The function disables auto deep sleep mode.
*/
int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
@@ -832,8 +776,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
if (!ret) {
if (rate->is_rate_auto)
- rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
- priv->tx_htinfo);
+ rate->rate = mwifiex_index_to_data_rate(priv,
+ priv->tx_rate, priv->tx_htinfo);
else
rate->rate = priv->data_rate;
} else {
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 27430512f7cd..5e1ef7e5da4f 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -126,6 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
u16 rx_pkt_type;
struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
+ if (!priv)
+ return -1;
+
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = local_rx_pd->rx_pkt_type;
@@ -189,12 +192,11 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
(u8) local_rx_pd->rx_pkt_type,
skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (priv && (ret == -1))
- priv->stats.rx_dropped++;
-
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
dev_kfree_skb_any(skb);
- }
+
+ if (ret)
+ priv->stats.rx_dropped++;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a206f412875f..d9274a1b77ac 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if (!priv)
goto done;
- priv->netdev->trans_start = jiffies;
+ mwifiex_set_trans_start(priv->netdev);
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
&& (tpriv->media_connected)) {
if (netif_queue_stopped(tpriv->netdev))
- netif_wake_queue(tpriv->netdev);
+ mwifiex_wake_up_net_dev_queue(tpriv->netdev,
+ adapter);
}
}
done:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 995695c28d5c..dd5aeaff44ba 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -28,10 +28,10 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.12"
+#define MWL8K_VERSION "0.13"
/* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
module_param(ap_mode_default, bool, 0);
MODULE_PARM_DESC(ap_mode_default,
"Set to 1 to make ap mode the default instead of sta mode");
@@ -198,6 +198,7 @@ struct mwl8k_priv {
/* firmware access */
struct mutex fw_mutex;
struct task_struct *fw_mutex_owner;
+ struct task_struct *hw_restart_owner;
int fw_mutex_depth;
struct completion *hostcmd_wait;
@@ -262,6 +263,10 @@ struct mwl8k_priv {
*/
struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
+ /* To perform the task of reloading the firmware */
+ struct work_struct fw_reload;
+ bool hw_restart_in_progress;
+
/* async firmware loading state */
unsigned fw_state;
char *fw_pref;
@@ -738,10 +743,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
if (ready_code == MWL8K_FWAP_READY) {
- priv->ap_fw = 1;
+ priv->ap_fw = true;
break;
} else if (ready_code == MWL8K_FWSTA_READY) {
- priv->ap_fw = 0;
+ priv->ap_fw = false;
break;
}
@@ -1498,6 +1503,18 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
might_sleep();
+ /* Since fw restart is in progress, allow only the firmware
+ * commands from the restart code and block the other
+ * commands since they are going to fail in any case since
+ * the firmware has crashed
+ */
+ if (priv->hw_restart_in_progress) {
+ if (priv->hw_restart_owner == current)
+ return 0;
+ else
+ return -EBUSY;
+ }
+
/*
* The TX queues are stopped at this point, so this test
* doesn't need to take ->tx_lock.
@@ -1541,6 +1558,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n",
MWL8K_TX_WAIT_TIMEOUT_MS);
mwl8k_dump_tx_rings(hw);
+ priv->hw_restart_in_progress = true;
+ ieee80211_queue_work(hw, &priv->fw_reload);
rc = -ETIMEDOUT;
}
@@ -2058,7 +2077,9 @@ static int mwl8k_fw_lock(struct ieee80211_hw *hw)
rc = mwl8k_tx_wait_empty(hw);
if (rc) {
- ieee80211_wake_queues(hw);
+ if (!priv->hw_restart_in_progress)
+ ieee80211_wake_queues(hw);
+
mutex_unlock(&priv->fw_mutex);
return rc;
@@ -2077,7 +2098,9 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
if (!--priv->fw_mutex_depth) {
- ieee80211_wake_queues(hw);
+ if (!priv->hw_restart_in_progress)
+ ieee80211_wake_queues(hw);
+
priv->fw_mutex_owner = NULL;
mutex_unlock(&priv->fw_mutex);
}
@@ -2754,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->band = cpu_to_le16(0x4);
- cmd->channel = channel->hw_value;
+ cmd->channel = cpu_to_le16(channel->hw_value);
if (conf->channel_type == NL80211_CHAN_NO_HT ||
conf->channel_type == NL80211_CHAN_HT20) {
@@ -4043,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
goto done;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- WLAN_CIPHER_SUITE_WEP104)
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
@@ -4398,7 +4421,8 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_radio_disable(hw);
+ if (!priv->hw_restart_in_progress)
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -4499,6 +4523,16 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return 0;
}
+static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif)
+{
+ /* Has ieee80211_restart_hw re-added the removed interfaces? */
+ if (!priv->macids_used)
+ return;
+
+ priv->macids_used &= ~(1 << vif->macid);
+ list_del(&vif->list);
+}
+
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -4510,8 +4544,54 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->macids_used &= ~(1 << mwl8k_vif->macid);
- list_del(&mwl8k_vif->list);
+ mwl8k_remove_vif(priv, mwl8k_vif);
+}
+
+static void mwl8k_hw_restart_work(struct work_struct *work)
+{
+ struct mwl8k_priv *priv =
+ container_of(work, struct mwl8k_priv, fw_reload);
+ struct ieee80211_hw *hw = priv->hw;
+ struct mwl8k_device_info *di;
+ int rc;
+
+ /* If some command is waiting for a response, clear it */
+ if (priv->hostcmd_wait != NULL) {
+ complete(priv->hostcmd_wait);
+ priv->hostcmd_wait = NULL;
+ }
+
+ priv->hw_restart_owner = current;
+ di = priv->device_info;
+ mwl8k_fw_lock(hw);
+
+ if (priv->ap_fw)
+ rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
+ else
+ rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
+
+ if (rc)
+ goto fail;
+
+ priv->hw_restart_owner = NULL;
+ priv->hw_restart_in_progress = false;
+
+ /*
+ * This unlock will wake up the queues and
+ * also opens the command path for other
+ * commands
+ */
+ mwl8k_fw_unlock(hw);
+
+ ieee80211_restart_hw(hw);
+
+ wiphy_err(hw->wiphy, "Firmware restarted successfully\n");
+
+ return;
+fail:
+ mwl8k_fw_unlock(hw);
+
+ wiphy_err(hw->wiphy, "Firmware restart failed\n");
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -5024,7 +5104,11 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
rc = mwl8k_check_ba(hw, stream);
- if (!rc)
+ /* If HW restart is in progress mwl8k_post_cmd will
+ * return -EBUSY. Avoid retrying mwl8k_check_ba in
+ * such cases
+ */
+ if (!rc || rc == -EBUSY)
break;
/*
* HW queues take time to be flushed, give them
@@ -5044,14 +5128,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP:
- if (stream == NULL)
- break;
- if (stream->state == AMPDU_STREAM_ACTIVE) {
- spin_unlock(&priv->stream_lock);
- mwl8k_destroy_ba(hw, stream);
- spin_lock(&priv->stream_lock);
+ if (stream) {
+ if (stream->state == AMPDU_STREAM_ACTIVE) {
+ spin_unlock(&priv->stream_lock);
+ mwl8k_destroy_ba(hw, stream);
+ spin_lock(&priv->stream_lock);
+ }
+ mwl8k_remove_stream(hw, stream);
}
- mwl8k_remove_stream(hw, stream);
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -5263,12 +5347,15 @@ fail:
mwl8k_release_firmware(priv);
}
+#define MAX_RESTART_ATTEMPTS 1
static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
bool nowait)
{
struct mwl8k_priv *priv = hw->priv;
int rc;
+ int count = MAX_RESTART_ATTEMPTS;
+retry:
/* Reset firmware and hardware */
mwl8k_hw_reset(priv);
@@ -5290,6 +5377,16 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
/* Reclaim memory once firmware is successfully loaded */
mwl8k_release_firmware(priv);
+ if (rc && count) {
+ /* FW did not start successfully;
+ * lets try one more time
+ */
+ count--;
+ wiphy_err(hw->wiphy, "Trying to reload the firmware again\n");
+ msleep(20);
+ goto retry;
+ }
+
return rc;
}
@@ -5365,7 +5462,14 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
goto err_free_queues;
}
- memset(priv->ampdu, 0, sizeof(priv->ampdu));
+ /*
+ * When hw restart is requested,
+ * mac80211 will take care of clearing
+ * the ampdu streams, so do not clear
+ * the ampdu state here
+ */
+ if (!priv->hw_restart_in_progress)
+ memset(priv->ampdu, 0, sizeof(priv->ampdu));
/*
* Temporarily enable interrupts. Initial firmware host
@@ -5439,10 +5543,20 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
{
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *vif, *tmp_vif;
mwl8k_stop(hw);
mwl8k_rxq_deinit(hw, 0);
+ /*
+ * All the existing interfaces are re-added by the ieee80211_reconfig;
+ * which means driver should remove existing interfaces before calling
+ * ieee80211_restart_hw
+ */
+ if (priv->hw_restart_in_progress)
+ list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list)
+ mwl8k_remove_vif(priv, vif);
+
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
@@ -5454,6 +5568,9 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
if (rc)
goto fail;
+ if (priv->hw_restart_in_progress)
+ return rc;
+
rc = mwl8k_start(hw);
if (rc)
goto fail;
@@ -5517,13 +5634,15 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
- priv->radio_on = 0;
- priv->radio_short_preamble = 0;
+ priv->radio_on = false;
+ priv->radio_short_preamble = false;
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
/* Handle watchdog ba events */
INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
+ /* To reload the firmware if it crashes */
+ INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work);
/* TX reclaim and RX tasklets. */
tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
@@ -5667,6 +5786,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
if (rc)
goto err_stop_firmware;
+
+ priv->hw_restart_in_progress = false;
+
return rc;
err_stop_firmware:
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index b52acc4b4086..9fb77d0319f5 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644);
MODULE_PARM_DESC(orinoco_debug, "Debug level");
#endif
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
module_param(suppress_linkstatus, bool, 0644);
MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e99ca1c1e0d8..96e39edfec77 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -76,6 +76,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *cbss;
u8 *ie;
u8 ie_buf[46];
u64 timestamp;
@@ -121,9 +122,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
beacon_interval = le16_to_cpu(bss->a.beacon_interv);
signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
- cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
- capability, beacon_interval, ie_buf, ie_len,
- signal, GFP_KERNEL);
+ cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
+ capability, beacon_interval, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ cfg80211_put_bss(cbss);
}
void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -132,6 +134,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *cbss;
const u8 *ie;
u64 timestamp;
s32 signal;
@@ -152,9 +155,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie = bss->data;
signal = SIGNAL_TO_MBM(bss->level);
- cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
- capability, beacon_interval, ie, ie_len,
- signal, GFP_KERNEL);
+ cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
+ capability, beacon_interval, ie, ie_len,
+ signal, GFP_KERNEL);
+ cfg80211_put_bss(cbss);
}
void orinoco_add_hostscan_results(struct orinoco_private *priv,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index db4d9a02f264..af2ca1a9c7d3 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -27,7 +27,7 @@
#include "p54.h"
#include "lmac.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f18df82eeb92..7faed62c6378 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -581,15 +581,9 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
struct p54s_priv *priv = dev->priv;
unsigned long flags;
- if (mutex_lock_interruptible(&priv->mutex)) {
- /* FIXME: how to handle this error? */
- return;
- }
-
+ mutex_lock(&priv->mutex);
WARN_ON(priv->fw_state != FW_STATE_READY);
- cancel_work_sync(&priv->work);
-
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +591,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
+
+ cancel_work_sync(&priv->work);
}
static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +652,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
+ spin_lock_init(&priv->tx_lock);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
@@ -703,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 6ed9c323e3cb..42b97bc38477 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
skb_unlink(skb, &priv->tx_queue);
p54_tx_qos_accounting_free(priv, skb);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
}
EXPORT_SYMBOL_GPL(p54_free_skb);
@@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
return;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582b..4e44b1af119a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
dwrq->flags = 0;
dwrq->length = 0;
}
- essid->octets[essid->length] = '\0';
+ essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
- PRISM2_SET_ENCRYPTION = 6,
- PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
- PRISM2_HOSTAPD_MLME = 13,
- PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
- offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(dev);
- int rvalue = 0, force = 0;
- int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
- union oid_res_t r;
-
- /* with the new API, it's impossible to get a NULL pointer.
- * New version of iwconfig set the IW_ENCODE_NOKEY flag
- * when no key is given, but older versions don't. */
-
- if (param->u.crypt.key_len > 0) {
- /* we have a key to set */
- int index = param->u.crypt.idx;
- int current_index;
- struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
- /* get the current key index */
- rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
- current_index = r.u;
- /* Verify that the key is not marked as invalid */
- if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
- key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
- sizeof (param->u.crypt.key) : param->u.crypt.key_len;
- memcpy(key.key, param->u.crypt.key, key.length);
- if (key.length == 32)
- /* we want WPA-PSK */
- key.type = DOT11_PRIV_TKIP;
- if ((index < 0) || (index > 3))
- /* no index provided use the current one */
- index = current_index;
-
- /* now send the key to the card */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
- &key);
- }
- /*
- * If a valid key is set, encryption should be enabled
- * (user may turn it off later).
- * This is also how "iwconfig ethX key on" works
- */
- if ((index == current_index) && (key.length > 0))
- force = 1;
- } else {
- int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
- if ((index >= 0) && (index <= 3)) {
- /* we want to set the key index */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
- &index);
- } else {
- if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
- /* we cannot do anything. Complain. */
- return -EINVAL;
- }
- }
- }
- /* now read the flags */
- if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
- * authen = DOT11_AUTH_OS;
- * invoke = 0;
- * exunencrypt = 0; */
- }
- if (param->u.crypt.flags & IW_ENCODE_OPEN)
- /* Encode but accept non-encoded packets. No auth */
- invoke = 1;
- if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
- /* Refuse non-encoded packets. Auth */
- authen = DOT11_AUTH_BOTH;
- invoke = 1;
- exunencrypt = 1;
- }
- /* do the change if requested */
- if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
- rvalue |=
- mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
- &exunencrypt);
- }
- return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(ndev);
- int max_len, len, alen, ret=0;
- struct obj_attachment *attach;
-
- len = param->u.generic_elem.len;
- max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
- if (max_len < 0 || max_len < len)
- return -EINVAL;
-
- alen = sizeof(*attach) + len;
- attach = kzalloc(alen, GFP_KERNEL);
- if (attach == NULL)
- return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
- /* Note: endianness is covered by mgt_set_varlen */
-
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_ASSOC_REQ << 4);
- attach->id = -1;
- attach->size = len;
- memcpy(attach->data, param->u.generic_elem.data, len);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0) {
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0)
- printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
- ndev->name);
- }
-
- kfree(attach);
- return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
- return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
- struct prism2_hostapd_param *param)
-{
- islpci_private *priv = netdev_priv(ndev);
- struct iw_request_info info;
- int i, rvalue;
- struct obj_bsslist *bsslist;
- u32 noise = 0;
- char *extra = "";
- char *current_ev = "foo";
- union oid_res_t r;
-
- if (islpci_get_state(priv) < PRV_STATE_INIT) {
- /* device is not ready, fail gently */
- return 0;
- }
-
- /* first get the noise value. We will use it to report the link quality */
- rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
- noise = r.u;
-
- /* Ask the device for a list of known bss. We can report at most
- * IW_MAX_AP=64 to the range struct. But the device won't repport anything
- * if you change the value of IWMAX_BSS=24.
- */
- rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
- bsslist = r.ptr;
-
- info.cmd = PRISM54_HOSTAPD;
- info.flags = 0;
-
- /* ok now, scan the list and translate its info */
- for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
- current_ev = prism54_translate_bss(ndev, &info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &(bsslist->bsslist[i]),
- noise);
- kfree(bsslist);
-
- return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
- struct prism2_hostapd_param *param;
- int ret = 0;
- u32 uwrq;
-
- printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
- if (p->length < sizeof(struct prism2_hostapd_param) ||
- p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param))
- return PTR_ERR(param);
-
- switch (param->cmd) {
- case PRISM2_SET_ENCRYPTION:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
- ndev->name);
- ret = prism2_ioctl_set_encryption(ndev, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
- ndev->name);
- ret = prism2_ioctl_set_generic_element(ndev, param,
- p->length);
- break;
- case PRISM2_HOSTAPD_MLME:
- printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
- ndev->name);
- ret = prism2_ioctl_mlme(ndev, param);
- break;
- case PRISM2_HOSTAPD_SCAN_REQ:
- printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
- ndev->name);
- ret = prism2_ioctl_scan_req(ndev, param);
- break;
- case PRISM54_SET_WPA:
- printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
- ndev->name);
- uwrq = 1;
- ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
- break;
- case PRISM54_DROP_UNENCRYPTED:
- printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
- ndev->name);
-#if 0
- uwrq = 0x01;
- mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
- down_write(&priv->mib_sem);
- mgt_commit(priv);
- up_write(&priv->mib_sem);
-#endif
- /* Not necessary, as set_wpa does it, should we just do it here though? */
- ret = 0;
- break;
- default:
- printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
- ndev->name);
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-
- return ret;
-}
static int
prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
.private_args = (struct iw_priv_args *) prism54_private_args,
.get_wireless_stats = prism54_get_wireless_stats,
};
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *) rq;
- int ret = -1;
- switch (cmd) {
- case PRISM54_HOSTAPD:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- ret = prism54_hostapd(ndev, &wrq->u.data);
- return ret;
- }
- return -EOPNOTSUPP;
-}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index bcfbfb9281d2..a34bceb6e3cd 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
int prism54_set_mac_address(struct net_device *, void *);
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
extern const struct iw_handler_def prism54_handler_def;
#endif /* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5d0f61508a2e..5970ff6f40cc 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev)
static void islpci_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops islpci_ethtool_ops = {
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
static const struct net_device_ops islpci_netdev_ops = {
.ndo_open = islpci_open,
.ndo_stop = islpci_close,
- .ndo_do_ioctl = prism54_ioctl,
.ndo_start_xmit = islpci_eth_transmit,
.ndo_tx_timeout = islpci_eth_tx_timeout,
.ndo_set_mac_address = prism54_set_mac_address,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0021e4948512..04fec1fa6e0b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
del_timer(&local->timer);
@@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
/* UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
*/
pr_debug("Deauthentication frame received\n");
local->authentication_state = UNAUTHENTICATED;
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index d7646f299bd3..3c3b98b152c3 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -566,9 +566,9 @@ struct phy_header {
UCHAR hdr_3;
UCHAR hdr_4;
};
-struct rx_msg {
+struct ray_rx_msg {
struct mac_header mac;
- UCHAR var[1];
+ UCHAR var[0];
};
struct tx_msg {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 85370a7dfa8b..a330c69583d6 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
NDIS_80211_POWER_MODE_FAST_PSP,
};
+enum ndis_80211_pmkid_cand_list_flag_bits {
+ NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
+};
+
struct ndis_80211_auth_request {
__le32 length;
u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
struct ndis_80211_bssid_info {
u8 bssid[6];
u8 pmkid[16];
-};
+} __packed;
struct ndis_80211_pmkid {
__le32 length;
__le32 bssid_info_count;
struct ndis_80211_bssid_info bssid_info[0];
-};
+} __packed;
/*
* private data
*/
-#define NET_TYPE_11FB 0
-
#define CAP_MODE_80211A 1
#define CAP_MODE_80211B 2
#define CAP_MODE_80211G 4
@@ -414,6 +416,7 @@ struct ndis_80211_pmkid {
#define RNDIS_WLAN_ALG_TKIP (1<<1)
#define RNDIS_WLAN_ALG_CCMP (1<<2)
+#define RNDIS_WLAN_NUM_KEYS 4
#define RNDIS_WLAN_KEY_MGMT_NONE 0
#define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0)
#define RNDIS_WLAN_KEY_MGMT_PSK (1<<1)
@@ -516,7 +519,7 @@ struct rndis_wlan_private {
/* encryption stuff */
int encr_tx_key_index;
- struct rndis_wlan_encr_key encr_keys[4];
+ struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
int wpa_version;
u8 command_buffer[COMMAND_BUFFER_SIZE];
@@ -1346,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
return ret;
}
+static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
+ u16 *beacon_interval)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ieee80211_channel *channel;
+ struct ndis_80211_conf config;
+ int len, ret;
+
+ /* Get channel and beacon interval */
+ len = sizeof(config);
+ ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+ __func__, ret);
+ if (ret < 0)
+ return NULL;
+
+ channel = ieee80211_get_channel(priv->wdev.wiphy,
+ KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+ if (!channel)
+ return NULL;
+
+ if (beacon_interval)
+ *beacon_interval = le16_to_cpu(config.beacon_period);
+ return channel;
+}
+
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
int index)
@@ -1535,6 +1564,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
bool is_wpa;
int ret;
+ if (index >= RNDIS_WLAN_NUM_KEYS)
+ return -ENOENT;
+
if (priv->encr_keys[index].len == 0)
return 0;
@@ -1972,11 +2004,12 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
- struct ndis_80211_bssid_ex *bssid)
+static bool rndis_bss_info_update(struct usbnet *usbdev,
+ struct ndis_80211_bssid_ex *bssid)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
@@ -2015,9 +2048,12 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
capability = le16_to_cpu(fixed->capabilities);
beacon_interval = le16_to_cpu(fixed->beacon_interval);
- return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
timestamp, capability, beacon_interval, ie, ie_len, signal,
GFP_KERNEL);
+ cfg80211_put_bss(bss);
+
+ return (bss != NULL);
}
static struct ndis_80211_bssid_ex *next_bssid_list_item(
@@ -2451,6 +2487,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
+ if (key_index >= RNDIS_WLAN_NUM_KEYS)
+ return -ENOENT;
+
priv->encr_tx_key_index = key_index;
if (is_wpa_key(priv, key_index))
@@ -2639,12 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
- struct ndis_80211_conf config;
struct ndis_80211_ssid ssid;
+ struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
- u16 beacon_interval;
+ u16 beacon_interval = 0;
__le32 rssi;
u8 ie_buf[34];
int len, ret, ie_len;
@@ -2669,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
}
/* Get channel and beacon interval */
- len = sizeof(config);
- ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
- netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
- __func__, ret);
- if (ret >= 0) {
- beacon_interval = le16_to_cpu(config.beacon_period);
- channel = ieee80211_get_channel(priv->wdev.wiphy,
- KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
- if (!channel) {
- netdev_warn(usbdev->net, "%s(): could not get channel."
- "\n", __func__);
- return;
- }
- } else {
- netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
- __func__);
+ channel = get_current_channel(usbdev, &beacon_interval);
+ if (!channel) {
+ netdev_warn(usbdev->net, "%s(): could not get channel.\n",
+ __func__);
return;
}
@@ -2714,9 +2741,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
bssid, (u32)timestamp, capability, beacon_interval, ie_len,
ssid.essid, signal);
- cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
timestamp, capability, beacon_interval, ie_buf, ie_len,
signal, GFP_KERNEL);
+ cfg80211_put_bss(bss);
}
/*
@@ -2828,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
req_ie_len, resp_ie,
resp_ie_len, 0, GFP_KERNEL);
else
- cfg80211_roamed(usbdev->net, NULL, bssid,
- req_ie, req_ie_len,
+ cfg80211_roamed(usbdev->net,
+ get_current_channel(usbdev, NULL),
+ bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -2995,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
struct ndis_80211_pmkid_candidate *cand =
&cand_list->candidate_list[i];
+ bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
- netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
- i, le32_to_cpu(cand->flags), cand->bssid);
-
-#if 0
- struct iw_pmkid_cand pcand;
- union iwreq_data wrqu;
+ netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
+ i, le32_to_cpu(cand->flags), preauth, cand->bssid);
- memset(&pcand, 0, sizeof(pcand));
- if (le32_to_cpu(cand->flags) & 0x01)
- pcand.flags |= IW_PMKID_CAND_PREAUTH;
- pcand.index = i;
- memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(pcand);
- wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
- (u8 *)&pcand);
-#endif
+ cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
+ preauth, GFP_ATOMIC);
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index b31f38d41a47..1de9c752c88b 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4778620347c4..2571a2fa3d09 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -50,7 +50,7 @@
* RF2853 2.4G/5G 3T3R
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
- * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
*/
@@ -66,7 +66,7 @@
#define RF2853 0x000a
#define RF3320 0x000b
#define RF3322 0x000c
-#define RF3853 0x000d
+#define RF3053 0x000d
#define RF5370 0x5370
#define RF5390 0x5390
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186e..22a1a8fc6e02 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+ !(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
!(filter_flags & FIF_CONTROL));
rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
@@ -1942,19 +1944,24 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
}
- if (rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3320))
+ switch (rt2x00dev->chip.rf) {
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3320:
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF3052))
+ break;
+ case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF5370) ||
- rt2x00_rf(rt2x00dev, RF5390))
+ break;
+ case RF5370:
+ case RF5390:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
- else
+ break;
+ default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
+ }
/*
* Change BBP settings
@@ -3771,7 +3778,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
/* The returned value is in CPU order, but eeprom is le */
- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
@@ -3930,15 +3937,18 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_rt(rt2x00dev, RT2860) &&
- !rt2x00_rt(rt2x00dev, RT2872) &&
- !rt2x00_rt(rt2x00dev, RT2883) &&
- !rt2x00_rt(rt2x00dev, RT3070) &&
- !rt2x00_rt(rt2x00dev, RT3071) &&
- !rt2x00_rt(rt2x00dev, RT3090) &&
- !rt2x00_rt(rt2x00dev, RT3390) &&
- !rt2x00_rt(rt2x00dev, RT3572) &&
- !rt2x00_rt(rt2x00dev, RT5390)) {
+ switch (rt2x00dev->chip.rt) {
+ case RT2860:
+ case RT2872:
+ case RT2883:
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ case RT3390:
+ case RT3572:
+ case RT5390:
+ break;
+ default:
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -4552,6 +4562,9 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
survey->channel_time_ext_busy = busy_ext / 1000;
}
+ if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ survey->filled |= SURVEY_INFO_IN_USE;
+
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index da48c8ac27bd..dc88baefa72e 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -50,7 +50,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_ON);
u32 reg;
unsigned long flags;
@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
}
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
+ reg = 0;
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+ }
rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 9ea4ecb3d9c8..262ee9eefb6f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -400,10 +400,10 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
/*
* The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
* TXWI + 802.11 header + L2 pad + payload + pad,
- * so need to decrease size of TXINFO and USB end pad.
+ * so need to decrease size of TXINFO.
*/
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
- entry->skb->len - TXINFO_DESC_SIZE - 4);
+ roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -421,37 +421,20 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
-static void rt2800usb_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc)
+/*
+ * TX data initialization
+ */
+static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
{
- unsigned int len;
- int err;
-
- rt2800_write_tx_data(entry, txdesc);
-
/*
- * pad(1~3 bytes) is added after each 802.11 payload.
- * USB end pad(4 bytes) is added at each USB bulk out packet end.
+ * pad(1~3 bytes) is needed after each 802.11 payload.
+ * USB end pad(4 bytes) is needed at each USB bulk out packet end.
* TX frame format is :
* | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
* |<------------- tx_pkt_len ------------->|
*/
- len = roundup(entry->skb->len, 4) + 4;
- err = skb_padto(entry->skb, len);
- if (unlikely(err)) {
- WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
- return;
- }
- entry->skb->len = len;
-}
-
-/*
- * TX data initialization
- */
-static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
-{
- return entry->skb->len;
+ return roundup(entry->skb->len, 4) + 4;
}
/*
@@ -807,7 +790,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.flush_queue = rt2x00usb_flush_queue,
.tx_dma_done = rt2800usb_tx_dma_done,
.write_tx_desc = rt2800usb_write_tx_desc,
- .write_tx_data = rt2800usb_write_tx_data,
+ .write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
.clear_beacon = rt2800_clear_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
@@ -914,12 +897,14 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x8053) },
{ USB_DEVICE(0x050d, 0x805c) },
{ USB_DEVICE(0x050d, 0x815c) },
+ { USB_DEVICE(0x050d, 0x825a) },
{ USB_DEVICE(0x050d, 0x825b) },
{ USB_DEVICE(0x050d, 0x935a) },
{ USB_DEVICE(0x050d, 0x935b) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00e8) },
{ USB_DEVICE(0x0411, 0x0158) },
+ { USB_DEVICE(0x0411, 0x015d) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
/* Corega */
@@ -934,6 +919,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0e) },
{ USB_DEVICE(0x07d1, 0x3c0f) },
{ USB_DEVICE(0x07d1, 0x3c11) },
+ { USB_DEVICE(0x07d1, 0x3c13) },
+ { USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
@@ -943,6 +930,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x7392, 0x7711) },
{ USB_DEVICE(0x7392, 0x7717) },
{ USB_DEVICE(0x7392, 0x7718) },
+ { USB_DEVICE(0x7392, 0x7722) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480) },
{ USB_DEVICE(0x203d, 0x14a9) },
@@ -976,6 +964,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x13b1, 0x0031) },
{ USB_DEVICE(0x1737, 0x0070) },
{ USB_DEVICE(0x1737, 0x0071) },
+ { USB_DEVICE(0x1737, 0x0077) },
+ { USB_DEVICE(0x1737, 0x0078) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0162) },
{ USB_DEVICE(0x0789, 0x0163) },
@@ -999,9 +989,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0db0, 0x871b) },
{ USB_DEVICE(0x0db0, 0x871c) },
{ USB_DEVICE(0x0db0, 0x899a) },
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071) },
+ { USB_DEVICE(0x1b75, 0x3072) },
/* Para */
{ USB_DEVICE(0x20b8, 0x8888) },
/* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x0002) },
{ USB_DEVICE(0x1d4d, 0x000c) },
{ USB_DEVICE(0x1d4d, 0x000e) },
{ USB_DEVICE(0x1d4d, 0x0011) },
@@ -1054,7 +1048,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sparklan */
{ USB_DEVICE(0x15a9, 0x0006) },
/* Sweex */
+ { USB_DEVICE(0x177f, 0x0153) },
{ USB_DEVICE(0x177f, 0x0302) },
+ { USB_DEVICE(0x177f, 0x0313) },
/* U-Media */
{ USB_DEVICE(0x157e, 0x300e) },
{ USB_DEVICE(0x157e, 0x3013) },
@@ -1138,27 +1134,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x13d3, 0x3322) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x1003) },
- { USB_DEVICE(0x050d, 0x825a) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x012e) },
{ USB_DEVICE(0x0411, 0x0148) },
{ USB_DEVICE(0x0411, 0x0150) },
- { USB_DEVICE(0x0411, 0x015d) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x0041) },
{ USB_DEVICE(0x07aa, 0x0042) },
{ USB_DEVICE(0x18c5, 0x0008) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
- { USB_DEVICE(0x07d1, 0x3c13) },
- { USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c17) },
{ USB_DEVICE(0x2001, 0x3c17) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x4085) },
- { USB_DEVICE(0x7392, 0x7722) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
+ /* Fujitsu Stylistic 550 */
+ { USB_DEVICE(0x1690, 0x0761) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010) },
/* Gigabyte */
@@ -1170,20 +1163,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605) },
{ USB_DEVICE(0x1740, 0x0615) },
- /* Linksys */
- { USB_DEVICE(0x1737, 0x0077) },
- { USB_DEVICE(0x1737, 0x0078) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0168) },
{ USB_DEVICE(0x0789, 0x0169) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9032) },
- /* Ovislink */
- { USB_DEVICE(0x1b75, 0x3071) },
- { USB_DEVICE(0x1b75, 0x3072) },
/* Pegatron */
{ USB_DEVICE(0x05a6, 0x0101) },
- { USB_DEVICE(0x1d4d, 0x0002) },
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0x5201) },
@@ -1202,9 +1188,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x083a, 0xc522) },
{ USB_DEVICE(0x083a, 0xd522) },
{ USB_DEVICE(0x083a, 0xf511) },
- /* Sweex */
- { USB_DEVICE(0x177f, 0x0153) },
- { USB_DEVICE(0x177f, 0x0313) },
/* Zyxel */
{ USB_DEVICE(0x0586, 0x341a) },
#endif
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 99ff12d0c29d..b03b22c47b18 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,9 +189,9 @@ struct rt2x00_chip {
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3390 0x3390
#define RT3572 0x3572
-#define RT3593 0x3593 /* PCIe */
+#define RT3593 0x3593
#define RT3883 0x3883 /* WSOC */
-#define RT5390 0x5390 /* 2.4GHz */
+#define RT5390 0x5390 /* 2.4GHz */
u16 rf;
u16 rev;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index edd317fa7c0a..c3e1aa7c1a80 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -831,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
if (spec->supported_rates & SUPPORT_RATE_OFDM)
num_rates += 8;
- channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+ channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
if (!channels)
return -ENOMEM;
- rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+ rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
if (!rates)
goto exit_free_channels;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bf0acff07807..ede3c58e6783 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
exit_fail:
rt2x00queue_pause_queue(queue);
exit_free_skb:
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1e31050dafc9..2eea3866504d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -298,12 +298,22 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
return false;
/*
- * USB devices cannot blindly pass the skb->len as the
- * length of the data to usb_fill_bulk_urb. Pass the skb
- * to the driver to determine what the length should be.
+ * USB devices require certain padding at the end of each frame
+ * and urb. Those paddings are not included in skbs. Pass entry
+ * to the driver to determine what the overall length should be.
*/
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
+ status = skb_padto(entry->skb, length);
+ if (unlikely(status)) {
+ /* TODO: report something more appropriate than IO_FAILED. */
+ WARNING(rt2x00dev, "TX SKB padding error, out of memory\n");
+ set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+ rt2x00lib_dmadone(entry);
+
+ return false;
+ }
+
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
entry->skb->data, length,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index bf55b4a311e3..e0c6d117429d 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -41,7 +41,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 0e5a10179182..e477a964081d 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index b4ce93436d2e..8d6eb0f56c03 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
if (is_valid_ether_addr(rtlefuse->dev_addr)) {
SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
} else {
- u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
- get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
- SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
}
}
@@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
u8 valid = 0;
/*set init state to on */
- rtlpriv->rfkill.rfkill_state = 1;
+ rtlpriv->rfkill.rfkill_state = true;
wiphy_rfkill_set_hw_state(hw->wiphy, 0);
radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
@@ -448,12 +448,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <4> locks */
mutex_init(&rtlpriv->locks.conf_mutex);
+ mutex_init(&rtlpriv->locks.ps_mutex);
spin_lock_init(&rtlpriv->locks.ips_lock);
spin_lock_init(&rtlpriv->locks.irq_th_lock);
spin_lock_init(&rtlpriv->locks.h2c_lock);
spin_lock_init(&rtlpriv->locks.rf_ps_lock);
spin_lock_init(&rtlpriv->locks.rf_lock);
- spin_lock_init(&rtlpriv->locks.lps_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae905983d0d..f66b5757f6b9 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@ enum ap_peer {
SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
#define SET_80211_PS_POLL_AID(_hdr, _val) \
- (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+ (*(u16 *)((u8 *)(_hdr) + 2) = _val)
#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
#define SET_80211_PS_POLL_TA(_hdr, _val) \
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index eb61061821e4..39e0907a3c4e 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
u8 init_aspm;
ppsc->reg_rfps_level = 0;
- ppsc->support_aspm = 0;
+ ppsc->support_aspm = false;
/*Update PCI ASPM setting */
ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (ieee80211_is_nullfunc(fc)) {
if (ieee80211_has_pm(fc)) {
rtlpriv->mac80211.offchan_delay = true;
- rtlpriv->psc.state_inap = 1;
+ rtlpriv->psc.state_inap = true;
} else {
- rtlpriv->psc.state_inap = 0;
+ rtlpriv->psc.state_inap = false;
}
}
@@ -610,7 +610,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
}
@@ -736,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
dev_kfree_skb_any(skb);
@@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
+ irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
@@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
/*Shared IRQ or HW disappared */
- if (!inta || inta == 0xffff)
+ if (!inta || inta == 0xffff) {
+ ret = IRQ_NONE;
goto done;
+ }
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -890,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (rtlpriv->rtlhal.earlymode_enable)
tasklet_schedule(&rtlpriv->works.irq_tasklet);
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
-
done:
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
+ return ret;
}
static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -903,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
-{
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -945,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
+static void rtl_lps_leave_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_leave_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_lps_leave(hw);
+}
+
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1006,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
(void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
- tasklet_init(&rtlpriv->works.ips_leave_tasklet,
- (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
- (unsigned long)hw);
+ INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
}
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1478,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1553,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
set_hal_stop(rtlhal);
rtlpriv->cfg->ops->disable_interrupt(hw);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db5262844543..130fdd99d573 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -237,11 +237,12 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
+ unsigned long flags;
if (mac->opmode != NL80211_IFTYPE_STATION)
return;
- spin_lock(&rtlpriv->locks.ips_lock);
+ spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -257,7 +258,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.ips_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
}
/*for FW LPS*/
@@ -395,7 +396,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -407,7 +408,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/*Leave the leisure power save mode.*/
@@ -417,7 +418,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 950c65a15b8a..931d97979b04 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -73,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
+static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
+ u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ blockSize);
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ remainSize);
+ }
+}
+
static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
const u8 *buffer, u32 size)
{
@@ -81,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
u8 *bufferPtr = (u8 *) buffer;
u32 *pu4BytePtr = (u32 *) buffer;
u32 i, offset, blockCount, remainSize;
+ u32 data;
+ if (rtlpriv->io.writeN_sync) {
+ rtl_block_fw_writeN(hw, buffer, size);
+ return;
+ }
blockCount = size / blockSize;
remainSize = size % blockSize;
+ if (remainSize) {
+ /* the last word is < 4 bytes - pad it with zeros */
+ for (i = 0; i < 4 - remainSize; i++)
+ *(bufferPtr + size + i) = 0;
+ blockCount++;
+ }
for (i = 0; i < blockCount; i++) {
offset = i * blockSize;
+ /* for big-endian platforms, the firmware data need to be byte
+ * swapped as it was read as a byte string and will be written
+ * as 32-bit dwords and byte swapped when written
+ */
+ data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainSize) {
- offset = blockCount * blockSize;
- bufferPtr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferPtr + i));
- }
+ data);
}
}
@@ -227,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
enum version_8192c version = rtlhal->version;
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
if (!rtlhal->pfirmware)
return 1;
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *) rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -238,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
("Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (uint)sizeof(struct rtl92c_firmware_header)));
+ le16_to_cpu(pfwheader->version),
+ le16_to_cpu(pfwheader->signature),
+ (uint)sizeof(struct rtl92c_firmware_header)));
pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c12621..cec5a3a1cc53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
#define FW_8192C_SIZE 0x3000
#define FW_8192C_START_ADDRESS 0x1000
-#define FW_8192C_END_ADDRESS 0x3FFF
+#define FW_8192C_END_ADDRESS 0x1FFF
#define FW_8192C_PAGE_SIZE 4096
#define FW_8192C_POLLING_DELAY 5
#define FW_8192C_POLLING_TIMEOUT_COUNT 100
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
- (_pfwhdr->signature&0xFFF0) == 0x88C0)
+ ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
+ (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
struct rtl92c_firmware_header {
- u16 signature;
+ __le16 signature;
u8 category;
u8 function;
- u16 version;
+ __le16 version;
u8 subversion;
u8 rsvd1;
u8 month;
u8 date;
u8 hour;
u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
+ __le16 ramcodeSize;
+ __le16 rsvd2;
+ __le32 svnindex;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac5929..3b585aadabfc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index f2aa33dc4d78..89ef6982ce50 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtl8192ce_bt_reg_init(hw);
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05df51e8..124cf633861c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
}
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
hwinfo, HWSET_MAX_SIZE);
- eeprom_id = *((u16 *)&hwinfo[0]);
+ eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
_rtl92cu_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
+ rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
(" VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->eeprom_version =
+ le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -2435,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
"%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
}
if (actuallyset) {
- ppsc->hwradiooff = 1;
+ ppsc->hwradiooff = true;
if (e_rfpowerstate_toset == ERFON) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f4a885..9e0c8fcdf90f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
}
rtlhal->version = (enum version_8192c)chip_version;
+ pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df84..e49cf2244c75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 674cd1a486c5..6d2ca773bbc7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
const struct firmware *firmware;
int err;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
@@ -275,6 +275,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
+ /* RTL8188CTV */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
@@ -291,14 +293,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ /* RTL8188CUS-VL */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
- /* 8191cu 1*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8192cu 2*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
@@ -309,13 +311,17 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
- {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+ /*SW-WF02-AD15 -Abocom*/
+ {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
@@ -326,14 +332,36 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
{RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
+ /****** 8188 RU ********/
+ /* Netcore */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+
+ /****** 8188CUS Slim Solo********/
+ {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+
+ /****** 8188CUS Slim Combo ********/
+ {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
+
/****** 8192CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
+ {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+ {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
{}
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b147f44f..b3cc7b949992 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
for (index = 0; index < 16; index++)
checksum = checksum ^ (*(ptr + index));
- SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
}
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index f5bd3a3cd34a..9d89d7ccdafb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -466,8 +466,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
bool int_migration = *(bool *) (val);
if (int_migration) {
- /* Set interrrupt migration timer and
- * corresponging Tx/Rx counter.
+ /* Set interrupt migration timer and
+ * corresponding Tx/Rx counter.
* timer 25ns*0xfa0=100us for 0xf packets.
* 0x306:Rx, 0x307:Tx */
rtl_write_dword(rtlpriv, REG_INT_MIG, 0xfe000fa0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c5509..0883349e1c83 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 149493f4c25c..7911c9c87085 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->dm.useramask = 1;
+ rtlpriv->dm.useramask = true;
/* dual mac */
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
index 6f91a148c222..3fda6b1dcf46 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
/* Allocate skb buffer to contain firmware */
/* info and tx descriptor info. */
skb = dev_alloc_skb(frag_length);
+ if (!skb)
+ return false;
skb_reserve(skb, extra_descoffset);
seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
extra_descoffset));
@@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
skb = dev_alloc_skb(len);
+ if (!skb)
+ return false;
cb_desc = (struct rtl_tcb_desc *)(skb->cb);
cb_desc->queue_index = TXCMD_QUEUE;
cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979c..f10ac1ad9087 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 92f49d522c56..78723cf59491 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
int err = 0;
u16 earlyrxthreshold = 7;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dm.useramask = true;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 54cb8a60514d..e956fa71d040 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -34,13 +34,14 @@
#include "usb.h"
#include "base.h"
#include "ps.h"
+#include "rtl8192c/fw_common.h"
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
#define REALTEK_USB_VENQT_CMD_REQ 0x05
#define REALTEK_USB_VENQT_CMD_IDX 0x00
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
static void usbctrl_async_callback(struct urb *urb)
{
@@ -82,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(len);
+ /* data are already in little-endian order */
memcpy(buf, pdata, len);
usb_fill_control_urb(urb, udev, pipe,
(unsigned char *)dr, buf, len,
@@ -100,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
unsigned int pipe;
int status;
u8 reqtype;
+ int vendorreq_times = 0;
+ static int count;
pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
reqtype = REALTEK_USB_VENQT_READ;
- status = usb_control_msg(udev, pipe, request, reqtype, value, index,
- pdata, len, 0); /* max. timeout */
+ do {
+ status = usb_control_msg(udev, pipe, request, reqtype, value,
+ index, pdata, len, 0); /*max. timeout*/
+ if (status < 0) {
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8192C_START_ADDRESS &&
+ value <= FW_8192C_END_ADDRESS))
+ break;
+ } else {
+ break;
+ }
+ } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
- if (status < 0)
+ if (status < 0 && count++ < 4)
pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
+ value, status, le32_to_cpu(*(u32 *)pdata));
return status;
}
@@ -129,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- ret = *data;
+ ret = le32_to_cpu(*data);
kfree(data);
return ret;
}
@@ -161,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
u8 request;
u16 wvalue;
u16 index;
- u32 data;
+ __le32 data;
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)(addr&0x0000ffff);
- data = val;
+ data = cpu_to_le32(val);
_usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
len);
}
@@ -192,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
_usb_write_async(to_usb_device(dev), addr, val, 4);
}
+static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+ u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ u8 request = REALTEK_USB_VENQT_CMD_REQ;
+ u8 reqtype = REALTEK_USB_VENQT_WRITE;
+ u16 wvalue;
+ u16 index = REALTEK_USB_VENQT_CMD_IDX;
+ int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ u8 *buffer;
+ dma_addr_t dma_addr;
+
+ wvalue = (u16)(addr&0x0000ffff);
+ buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+ if (!buffer)
+ return;
+ memcpy(buffer, data, len);
+ usb_control_msg(udev, pipe, request, reqtype, wvalue,
+ index, buffer, len, 50);
+
+ usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+}
+
static void _rtl_usb_io_handler_init(struct device *dev,
struct ieee80211_hw *hw)
{
@@ -205,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.read8_sync = _usb_read8_sync;
rtlpriv->io.read16_sync = _usb_read16_sync;
rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.writeN_sync = _usb_writeN_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7ddba8eb..cdaf1429fa0b 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
#define AC_MAX 4
#define QOS_QUEUE_NUM 4
#define RTL_MAC80211_NUM_QUEUE 5
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
@@ -943,8 +944,10 @@ struct rtl_io {
unsigned long pci_base_addr; /*device I/O address */
void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
- void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
+ void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
+ u16 len);
u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
@@ -1485,7 +1488,7 @@ struct rtl_intf_ops {
struct rtl_mod_params {
/* default: 0 = using hardware encryption */
- int sw_crypto;
+ bool sw_crypto;
/* default: 0 = DBG_EMERG (0)*/
int debug;
@@ -1541,6 +1544,7 @@ struct rtl_hal_cfg {
struct rtl_locks {
/* mutex */
struct mutex conf_mutex;
+ struct mutex ps_mutex;
/*spin lock */
spinlock_t ips_lock;
@@ -1548,7 +1552,6 @@ struct rtl_locks {
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
- spinlock_t lps_lock;
spinlock_t waitq_lock;
/*Dual mac*/
@@ -1573,7 +1576,8 @@ struct rtl_works {
/* For SW LPS */
struct delayed_work ps_work;
struct delayed_work ps_rfon_wq;
- struct tasklet_struct ips_leave_tasklet;
+
+ struct work_struct lps_leave_work;
};
struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index eaa5f9556200..6248c354fc5c 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 3fe388b87c2e..af08c8609c63 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -42,16 +42,6 @@ config WL12XX_SDIO
If you choose to build a module, it'll be called wl12xx_sdio.
Say N if unsure.
-config WL12XX_SDIO_TEST
- tristate "TI wl12xx SDIO testing support"
- depends on WL12XX && MMC && WL12XX_SDIO
- default n
- ---help---
- This module adds support for the SDIO bus testing with the
- TI wl12xx chipsets. You probably don't want this unless you are
- testing a new hardware platform. Select this if you want to test the
- SDIO bus which is connected to the wl12xx chip.
-
config WL12XX_PLATFORM_DATA
bool
depends on WL12XX_SDIO != n || WL1251_SDIO != n
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 621b3483ca2c..fe67262ba19f 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -3,14 +3,11 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
wl12xx_spi-objs = spi.o
wl12xx_sdio-objs = sdio.o
-wl12xx_sdio_test-objs = sdio_test.o
wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WL12XX) += wl12xx.o
obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
-obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
-
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index ca044a743191..7537c401a448 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -29,11 +29,12 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "ps.h"
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_wake_up_condition *wake_up;
int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
goto out;
}
- wake_up->role_id = wl->role_id;
+ wake_up->role_id = wlvif->role_id;
wake_up->wake_up_event = wl->conf.conn.wake_up_event;
wake_up->listen_interval = wl->conf.conn.listen_interval;
@@ -84,7 +85,8 @@ out:
return ret;
}
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power)
{
struct acx_current_tx_power *acx;
int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
return ret;
}
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_feature_config *feature;
int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
}
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
- feature->role_id = wl->role_id;
+ feature->role_id = wlvif->role_id;
feature->data_flow_options = 0;
feature->options = 0;
@@ -184,33 +186,8 @@ out:
return ret;
}
-int wl1271_acx_pd_threshold(struct wl1271 *wl)
-{
- struct acx_packet_detection *pd;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "acx data pd threshold");
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
-
- ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
- if (ret < 0) {
- wl1271_warning("failed to set pd threshold: %d", ret);
- goto out;
- }
-
-out:
- kfree(pd);
- return ret;
-}
-
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time)
{
struct acx_slot *slot;
int ret;
@@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
goto out;
}
- slot->role_id = wl->role_id;
+ slot->role_id = wlvif->role_id;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -238,8 +215,8 @@ out:
return ret;
}
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
@@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
}
/* MAC filtering */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@ out:
return ret;
}
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_rx_timeout *rx_timeout;
int ret;
@@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
wl1271_debug(DEBUG_ACX, "acx service period timeout");
- rx_timeout->role_id = wl->role_id;
+ rx_timeout->role_id = wlvif->role_id;
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
@@ -300,7 +278,8 @@ out:
return ret;
}
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold)
{
struct acx_rts_threshold *rts;
int ret;
@@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
goto out;
}
- rts->role_id = wl->role_id;
+ rts->role_id = wlvif->role_id;
rts->threshold = cpu_to_le16((u16)rts_threshold);
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
@@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
goto out;
}
- beacon_filter->role_id = wl->role_id;
+ beacon_filter->role_id = wlvif->role_id;
beacon_filter->enable = enable_filter;
/*
@@ -401,7 +381,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_beacon_filter_ie_table *ie_table;
int i, idx = 0;
@@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
}
/* configure default beacon pass-through rules */
- ie_table->role_id = wl->role_id;
+ ie_table->role_id = wlvif->role_id;
ie_table->num_ie = 0;
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@ out:
#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct acx_conn_monit_params *acx;
u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
timeout = wl->conf.conn.bss_lose_timeout;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->synch_fail_thold = cpu_to_le32(threshold);
acx->bss_lose_timeout = cpu_to_le32(timeout);
@@ -582,7 +564,7 @@ out:
return ret;
}
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_beacon_broadcast *bb;
int ret;
@@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
goto out;
}
- bb->role_id = wl->role_id;
+ bb->role_id = wlvif->role_id;
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@ out:
return ret;
}
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
{
struct acx_aid *acx_aid;
int ret;
@@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
goto out;
}
- acx_aid->role_id = wl->role_id;
+ acx_aid->role_id = wlvif->role_id;
acx_aid->aid = cpu_to_le16(aid);
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@ out:
return ret;
}
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
int ret;
@@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->preamble = preamble;
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@ out:
return ret;
}
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect)
{
struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ctsprotect = ctsprotect;
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
- wl->basic_rate, wl->rate_set);
+ wlvif->basic_rate, wlvif->rate_set);
/* configure one basic rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
/* configure one AP supported rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
* (p2p packets should always go out with OFDM rates, even
* if we are currently connected to 11b AP)
*/
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
acx->rate_policy.enabled_rates =
cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@ out:
return ret;
}
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
int ret = 0;
@@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@ out:
return ret;
}
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1)
{
@@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->queue_id = queue_id;
acx->channel_type = channel_type;
acx->tsid = tsid;
@@ -1098,7 +1082,8 @@ out:
return ret;
}
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
int ret = 0;
@@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
@@ -1129,7 +1114,8 @@ out:
return ret;
}
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address)
{
struct wl1271_acx_arp_filter *acx;
int ret;
@@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
@@ -1189,7 +1175,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_keep_alive_mode *acx = NULL;
int ret = 0;
@@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid)
{
struct wl1271_acx_keep_alive_config *acx = NULL;
int ret = 0;
@@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
acx->index = index;
acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst)
{
struct wl1271_acx_rssi_snr_trigger *acx = NULL;
int ret = 0;
@@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
goto out;
}
- wl->last_rssi_event = -1;
+ wlvif->last_rssi_event = -1;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->rssi_beacon = c->avg_weight_rssi_beacon;
acx->rssi_data = c->avg_weight_rssi_data;
acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@ out:
}
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode)
{
struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@ out:
}
/* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ba_initiator_policy *acx;
int ret;
@@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
}
/* set for the current role */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
acx->win_size = wl->conf.ht.tx_ba_win_size;
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@ out:
return ret;
}
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_ps_rx_streaming *rx_streaming;
u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
if (!(conf_queues & BIT(i)))
continue;
- rx_streaming->role_id = wl->role_id;
+ rx_streaming->role_id = wlvif->role_id;
rx_streaming->tid = i;
rx_streaming->enable = enable_queues & BIT(i);
rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@ out:
return ret;
}
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
@@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
if (!acx)
return -ENOMEM;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@ out:
return ret;
}
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_config_ps *config_ps;
int ret;
@@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
- config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+ config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
sizeof(*config_ps));
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index e3f93b4b3429..69892b40c2df 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime {
__le32 lifetime;
} __packed;
-struct acx_packet_detection {
- struct acx_header header;
-
- __le32 threshold;
-} __packed;
-
-
enum acx_slot_type {
SLOT_TIME_LONG = 0,
SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@ struct acx_rate_class {
u8 reserved;
};
-#define ACX_TX_BASIC_RATE 0
-#define ACX_TX_AP_FULL_RATE 1
-#define ACX_TX_BASIC_RATE_P2P 2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
struct acx_rate_policy {
struct acx_header header;
@@ -1234,39 +1222,48 @@ enum {
};
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_mem_map(struct wl1271 *wl,
struct acx_header *mem_map, size_t len);
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl12xx_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address);
int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+ bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation, u8 hlid);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 681337914976..8f9cf5a816ea 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -25,6 +25,7 @@
#include <linux/wl12xx.h>
#include <linux/export.h>
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "boot.h"
@@ -347,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 3;
for (i = 0; i < burst_len; i++) {
+ if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
@@ -358,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 4;
dest_addr += 4;
}
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
}
/*
@@ -369,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
nvs_ptr = (u8 *)wl->nvs +
ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
@@ -384,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
kfree(nvs_aligned);
return 0;
+
+out_badnvs:
+ wl1271_error("nvs data is malformed");
+ return -EILSEQ;
}
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index a52299e548fa..25990bd38be6 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from INI out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from ini out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id)
{
struct wl12xx_cmd_role_enable *cmd;
int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
goto out_free;
}
- memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->mac_address, addr, ETH_ALEN);
cmd->role_type = role_type;
ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
return ret;
}
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
__set_bit(link, wl->links_map);
+ __set_bit(link, wlvif->links_map);
*hlid = link;
return 0;
}
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
__clear_bit(*hlid, wl->links_map);
+ __clear_bit(*hlid, wlvif->links_map);
*hlid = WL12XX_INVALID_LINK_ID;
}
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->session_counter >= SESSION_COUNTER_MAX)
- wl->session_counter = 0;
+ if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+ wlvif->session_counter = 0;
- wl->session_counter++;
+ wlvif->session_counter++;
- return wl->session_counter;
+ return wlvif->session_counter;
}
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
- cmd->role_id = wl->dev_role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->dev_role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
- if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+ if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
if (ret)
goto out_free;
}
- cmd->device.hlid = wl->dev_hlid;
- cmd->device.session = wl->session_counter;
+ cmd->device.hlid = wlvif->dev_hlid;
+ cmd->device.session = wlvif->session_counter;
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
err_hlid:
/* clear links on error */
- __clear_bit(wl->dev_hlid, wl->links_map);
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -513,12 +539,13 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
wl1271_debug(DEBUG_CMD, "cmd role stop dev");
- cmd->role_id = wl->dev_role_id;
+ cmd->role_id = wlvif->dev_role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->dev_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -554,8 +581,9 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->sta.ssid_len = wl->ssid_len;
- memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->sta.hlid = wl->sta_hlid;
- cmd->sta.session = wl12xx_get_new_session_id(wl);
- cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.hlid = wlvif->sta.hlid;
+ cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+ cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -613,12 +641,12 @@ out:
}
/* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -648,16 +676,17 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
- wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
/* trying to use hidden SSID with an old hostapd version */
- if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+ if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
wl1271_error("got a null SSID from beacon/bss");
ret = -EINVAL;
goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out;
}
- ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
if (ret < 0)
goto out_free;
- ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
if (ret < 0)
goto out_free_global;
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
- cmd->ap.global_hlid = wl->ap_global_hlid;
- cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
- cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->ap.global_hlid = wlvif->ap.global_hlid;
+ cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+ cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
- cmd->ap.ssid_len = wl->ssid_len;
- memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+ cmd->ap.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->ap.local_rates = cpu_to_le32(0xffffffff);
- switch (wl->band) {
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+ wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
cmd->band = RADIO_BAND_2_4GHZ;
break;
}
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out_free;
out_free_bcast:
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
out_free_global:
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -735,7 +764,7 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -766,10 +795,11 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ibss.dtim_interval = bss_conf->dtim_period;
cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->ibss.ssid_len = wl->ssid_len;
- memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->ibss.hlid = wl->sta_hlid;
- cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.hlid = wlvif->sta.hlid;
+ cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
- wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+ wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+ vif->bss_conf.bssid);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -962,7 +993,8 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
goto out;
}
- ps_params->role_id = wl->role_id;
+ ps_params->role_id = wlvif->role_id;
ps_params->ps_mode = ps_mode;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
return ret;
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
int ret = -ENOMEM;
- if (wl->bss_type == BSS_TYPE_IBSS) {
+ if (wlvif->bss_type == BSS_TYPE_IBSS) {
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw,
+ wl12xx_wlvif_to_vif(wlvif));
if (!skb)
goto out;
size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
}
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
skb->data, skb->len,
CMD_TEMPL_KLV_IDX_NULL_DATA,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
}
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret = 0;
- skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ skb = ieee80211_pspoll_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
- skb->len, 0, wl->basic_rate_set);
+ skb->len, 0, wlvif->basic_rate_set);
out:
dev_kfree_skb(skb);
return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
- skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie, ie_len);
if (!skb) {
ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
}
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
int ret;
u32 rate;
if (!skb)
- skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+ skb = ieee80211_ap_probereq_get(wl->hw, vif);
if (!skb)
goto out;
wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
- if (wl->band == IEEE80211_BAND_2GHZ)
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
@@ -1159,9 +1199,11 @@ out:
return skb;
}
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr)
{
int ret;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_arp_rsp_template tmpl;
struct ieee80211_hdr_3addr *hdr;
struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_DATA |
IEEE80211_FCTL_TODS);
- memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
- memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+ memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
memset(hdr->addr3, 0xff, ETH_ALEN);
/* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
- memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+ memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
tmpl.sender_ip = ip_addr;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
&tmpl, sizeof(tmpl), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
return ret;
}
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
- memcpy(template.addr1, wl->bssid, ETH_ALEN);
- memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
- memcpy(template.addr3, wl->bssid, ETH_ALEN);
+ memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(template.addr2, vif->addr, ETH_ALEN);
+ memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
sizeof(template), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
}
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
return ret;
}
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
int ret = 0;
/* hlid might have already been deleted */
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
goto out;
}
- cmd->hlid = wl->sta_hlid;
+ cmd->hlid = wlvif->sta.hlid;
if (key_type == KEY_WEP)
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
* TODO: merge with sta/ibss into 1 set_key function.
* note there are slight diffs
*/
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (!cmd)
return -ENOMEM;
- if (hlid == wl->ap_bcast_hlid) {
+ if (hlid == wlvif->ap.bcast_hlid) {
if (key_type == KEY_WEP)
lid_type = WEP_DEFAULT_LID_TYPE;
else
@@ -1411,7 +1456,8 @@ out:
return ret;
}
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid)
{
struct wl12xx_cmd_add_peer *cmd;
int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
else
cmd->psd_type[i] = WL1271_PSD_LEGACY;
- sta_rates = sta->supp_rates[wl->band];
+ sta_rates = sta->supp_rates[wlvif->band];
if (sta->ht_cap.ht_supported)
sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
- wl->band));
+ wlvif->band));
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
return ret;
}
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 role_id)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
}
cmd->role_id = role_id;
- cmd->channel = wl->channel;
- switch (wl->band) {
+ cmd->channel = wlvif->channel;
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_error("roc - unknown band: %d", (int)wl->band);
+ wl1271_error("roc - unknown band: %d", (int)wlvif->band);
ret = -EINVAL;
goto out_free;
}
@@ -1657,14 +1704,14 @@ out:
return ret;
}
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
{
int ret = 0;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
- ret = wl12xx_cmd_roc(wl, role_id);
+ ret = wl12xx_cmd_roc(wl, wlvif, role_id);
if (ret < 0)
goto out;
@@ -1753,3 +1800,53 @@ out_free:
out:
return ret;
}
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out_stop;
+
+ return 0;
+
+out_stop:
+ wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+ return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ /* flush all pending packets */
+ wl1271_tx_work_locked(wl);
+
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index b7bd42769aa7..3f7d0b93c24d 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct ieee80211_channel_switch *ch_switch);
int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 04bb8fbf93f9..1bcfb017058d 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -440,6 +440,10 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
CONF_HW_BIT_RATE_54MBPS)
+#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_11MBPS)
+
#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644
index 000000000000..b85fd8c41e8f
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+ DEBUG_NONE = 0,
+ DEBUG_IRQ = BIT(0),
+ DEBUG_SPI = BIT(1),
+ DEBUG_BOOT = BIT(2),
+ DEBUG_MAILBOX = BIT(3),
+ DEBUG_TESTMODE = BIT(4),
+ DEBUG_EVENT = BIT(5),
+ DEBUG_TX = BIT(6),
+ DEBUG_RX = BIT(7),
+ DEBUG_SCAN = BIT(8),
+ DEBUG_CRYPT = BIT(9),
+ DEBUG_PSM = BIT(10),
+ DEBUG_MAC80211 = BIT(11),
+ DEBUG_CMD = BIT(12),
+ DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
+ DEBUG_ALL = ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+ pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+ pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+ } while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ 0); \
+ } while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ true); \
+ } while (0)
+
+#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 3999fd528302..15eb3a9c30ca 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "ps.h"
#include "io.h"
@@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
{
struct wl1271 *wl = file->private_data;
int res = 0;
- char buf[1024];
+ ssize_t ret;
+ char *buf;
+
+#define DRIVER_STATE_BUF_LEN 1024
+
+ buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
mutex_lock(&wl->mutex);
#define DRIVER_STATE_PRINT(x, fmt) \
- (res += scnprintf(buf + res, sizeof(buf) - res,\
+ (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
- DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
- DRIVER_STATE_PRINT_INT(session_counter);
DRIVER_STATE_PRINT_INT(state);
- DRIVER_STATE_PRINT_INT(bss_type);
DRIVER_STATE_PRINT_INT(channel);
- DRIVER_STATE_PRINT_HEX(rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate);
DRIVER_STATE_PRINT_INT(band);
- DRIVER_STATE_PRINT_INT(beacon_int);
- DRIVER_STATE_PRINT_INT(psm_entry_retry);
- DRIVER_STATE_PRINT_INT(ps_poll_failures);
DRIVER_STATE_PRINT_INT(power_level);
- DRIVER_STATE_PRINT_INT(rssi_thold);
- DRIVER_STATE_PRINT_INT(last_rssi_event);
DRIVER_STATE_PRINT_INT(sg_enabled);
DRIVER_STATE_PRINT_INT(enable_11a);
DRIVER_STATE_PRINT_INT(noise);
- DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
- DRIVER_STATE_PRINT_INT(last_tx_hlid);
- DRIVER_STATE_PRINT_INT(ba_support);
- DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#undef DRIVER_STATE_PRINT_LHEX
#undef DRIVER_STATE_PRINT_STR
#undef DRIVER_STATE_PRINT
+#undef DRIVER_STATE_BUF_LEN
mutex_unlock(&wl->mutex);
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
}
static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = {
.llseek = default_llseek,
};
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ int ret, res = 0;
+ const int buf_size = 4096;
+ char *buf;
+ char tmp_buf[64];
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt) \
+ (res += scnprintf(buf + res, buf_size - res, \
+ #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len) \
+ do { \
+ memset(tmp_buf, 0, sizeof(tmp_buf)); \
+ memcpy(tmp_buf, wlvif->x, \
+ min_t(u8, len, sizeof(tmp_buf) - 1)); \
+ res += scnprintf(buf + res, buf_size - res, \
+ #x " = %s\n", tmp_buf); \
+ } while (0)
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ VIF_STATE_PRINT_INT(role_id);
+ VIF_STATE_PRINT_INT(bss_type);
+ VIF_STATE_PRINT_LHEX(flags);
+ VIF_STATE_PRINT_INT(p2p);
+ VIF_STATE_PRINT_INT(dev_role_id);
+ VIF_STATE_PRINT_INT(dev_hlid);
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ VIF_STATE_PRINT_INT(sta.hlid);
+ VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+ VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+ VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+ VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+ } else {
+ VIF_STATE_PRINT_INT(ap.global_hlid);
+ VIF_STATE_PRINT_INT(ap.bcast_hlid);
+ VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+ VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+ VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+ }
+ VIF_STATE_PRINT_INT(last_tx_hlid);
+ VIF_STATE_PRINT_LHEX(links_map[0]);
+ VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+ VIF_STATE_PRINT_INT(band);
+ VIF_STATE_PRINT_INT(channel);
+ VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+ VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+ VIF_STATE_PRINT_HEX(basic_rate_set);
+ VIF_STATE_PRINT_HEX(basic_rate);
+ VIF_STATE_PRINT_HEX(rate_set);
+ VIF_STATE_PRINT_INT(beacon_int);
+ VIF_STATE_PRINT_INT(default_key);
+ VIF_STATE_PRINT_INT(aid);
+ VIF_STATE_PRINT_INT(session_counter);
+ VIF_STATE_PRINT_INT(ps_poll_failures);
+ VIF_STATE_PRINT_INT(psm_entry_retry);
+ VIF_STATE_PRINT_INT(power_level);
+ VIF_STATE_PRINT_INT(rssi_thold);
+ VIF_STATE_PRINT_INT(last_rssi_event);
+ VIF_STATE_PRINT_INT(ba_support);
+ VIF_STATE_PRINT_INT(ba_allowed);
+ VIF_STATE_PRINT_LLHEX(tx_security_seq);
+ VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+ }
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+ mutex_unlock(&wl->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+ .read = vifs_state_read,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
char buf[10];
size_t len;
unsigned long value;
@@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file,
if (ret < 0)
goto out;
- ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(gpio_power, rootdir);
DEBUGFS_ADD(start_recovery, rootdir);
DEBUGFS_ADD(driver_state, rootdir);
+ DEBUGFS_ADD(vifs_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
DEBUGFS_ADD(beacon_filtering, rootdir);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 674ad2a9e409..d3280df68f5d 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -22,6 +22,7 @@
*/
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "event.h"
@@ -31,12 +32,16 @@
void wl1271_pspoll_work(struct work_struct *work)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct delayed_work *dwork;
struct wl1271 *wl;
int ret;
dwork = container_of(work, struct delayed_work, work);
- wl = container_of(dwork, struct wl1271, pspoll_work);
+ wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+ vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+ wl = wlvif->wl;
wl1271_debug(DEBUG_EVENT, "pspoll work");
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+ if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+ wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
};
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int delay = wl->conf.conn.ps_poll_recovery_period;
int ret;
- wl->ps_poll_failures++;
- if (wl->ps_poll_failures == 1)
+ wlvif->ps_poll_failures++;
+ if (wlvif->ps_poll_failures == 1)
wl1271_info("AP with dysfunctional ps-poll, "
"trying to work around it.");
/* force active mode receive data from the AP */
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
return;
- set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
- ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+ set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+ ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
msecs_to_jiffies(delay));
}
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
}
static int wl1271_event_ps_report(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox,
bool *beacon_loss)
{
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
case EVENT_ENTER_POWER_SAVE_FAIL:
wl1271_debug(DEBUG_PSM, "PSM entry failed");
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
/* remain in active mode */
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
break;
}
- if (wl->psm_entry_retry < total_retries) {
- wl->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ if (wlvif->psm_entry_retry < total_retries) {
+ wlvif->psm_entry_retry++;
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
} else {
wl1271_info("No ack to nullfunc from AP.");
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
*beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
- wl->psm_entry_retry = 0;
-
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, true);
- if (ret < 0)
- break;
+ wlvif->psm_entry_retry = 0;
/*
* BET has only a minor effect in 5GHz and masks
* channel switch IEs, so we only enable BET on 2.4GHz
*/
- if (wl->band == IEEE80211_BAND_2GHZ)
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
/* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
+ ret = wl1271_acx_bet_enable(wl, wlvif, true);
- if (wl->ps_compl) {
- complete(wl->ps_compl);
- wl->ps_compl = NULL;
+ if (wlvif->ps_compl) {
+ complete(wlvif->ps_compl);
+ wlvif->ps_compl = NULL;
}
break;
default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
}
static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
enum nl80211_cqm_rssi_threshold_event event;
s8 metric = mbox->rssi_snr_trigger_metric[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
- if (metric <= wl->rssi_thold)
+ if (metric <= wlvif->rssi_thold)
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
- if (event != wl->last_rssi_event)
- ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
- wl->last_rssi_event = event;
+ if (event != wlvif->last_rssi_event)
+ ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ wlvif->last_rssi_event = event;
}
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (!wl->ba_rx_bitmap)
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (!wlvif->sta.ba_rx_bitmap)
return;
- ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
- wl->bssid);
+ ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+ vif->bss_conf.bssid);
} else {
- int i;
+ u8 hlid;
struct wl1271_link *lnk;
- for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
- lnk = &wl->links[i];
- if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+ WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ if (!lnk->ba_bitmap)
continue;
- ieee80211_stop_rx_ba_session(wl->vif,
+ ieee80211_stop_rx_ba_session(vif,
lnk->ba_bitmap,
lnk->addr);
}
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
u8 enable)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
if (enable) {
/* disable dynamic PS when requested by the firmware */
- ieee80211_disable_dyn_ps(wl->vif);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_disable_dyn_ps(vif);
+ }
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
- ieee80211_enable_dyn_ps(wl->vif);
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_enable_dyn_ps(vif);
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
}
}
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
u32 vector;
bool beacon_loss = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, wl->scan_vif);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -248,13 +267,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
"(status 0x%0x)", mbox->scheduled_scan_status);
if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl);
ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_scanning = false;
}
}
- if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
- wl->bss_type == BSS_TYPE_STA_BSS)
+ if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
wl12xx_event_soft_gemini_sense(wl,
mbox->soft_gemini_sense_info);
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+ if (vector & BSS_LOSE_EVENT_ID) {
+ /* TODO: check for multi-role */
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+ if (vector & PS_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
- ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
- if (ret < 0)
- return ret;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl1271_event_ps_report(wl, wlvif,
+ mbox, &beacon_loss);
+ if (ret < 0)
+ return ret;
+ }
}
- if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
- wl1271_event_pspoll_delivery_fail(wl);
+ if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_pspoll_delivery_fail(wl, wlvif);
+ }
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ /* TODO: check actual multi-role support */
wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
- if (wl->vif)
- wl1271_event_rssi_trigger(wl, mbox);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_rssi_trigger(wl, wlvif, mbox);
+ }
}
- if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+ u8 role_id = mbox->role_id;
wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+ "ba_allowed = 0x%x, role_id=%d",
+ mbox->rx_ba_allowed, role_id);
- wl->ba_allowed = !!mbox->rx_ba_allowed;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (role_id != 0xff && role_id != wlvif->role_id)
+ continue;
- if (wl->vif && !wl->ba_allowed)
- wl1271_stop_ba_event(wl);
+ wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+ if (!wlvif->ba_allowed)
+ wl1271_stop_ba_event(wl, wlvif);
+ }
}
- if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
"status = 0x%x",
mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* 1) channel switch complete with status=0
* 2) channel switch failed status=1
*/
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
- (wl->vif))
- ieee80211_chswitch_done(wl->vif,
- mbox->channel_switch_status ? false : true);
+
+ /* TODO: configure only the relevant vif */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ bool success;
+
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+ &wl->flags))
+ continue;
+
+ success = mbox->channel_switch_status ? false : true;
+ ieee80211_chswitch_done(vif, success);
+ }
}
if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- if (wl->vif)
- wl1271_tx_dummy_packet(wl);
+ wl1271_tx_dummy_packet(wl);
}
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
- if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+ if (vector & MAX_TX_RETRY_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
disconnect_sta = true;
}
- if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+ if (vector & INACTIVE_STA_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
disconnect_sta = true;
}
- if (is_ap && disconnect_sta) {
+ if (disconnect_sta) {
u32 num_packets = wl->conf.tx.max_tx_retries;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
- for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
- h < AP_MAX_LINKS;
- h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
- if (!wl1271_is_active_sta(wl, h))
+ for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ bool found = false;
+ /* find the ap vif connected to this sta */
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ if (!test_bit(h, wlvif->ap.sta_hlid_map))
+ continue;
+ found = true;
+ break;
+ }
+ if (!found)
continue;
+ vif = wl12xx_wlvif_to_vif(wlvif);
addr = wl->links[h].addr;
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, addr);
+ sta = ieee80211_find_sta(vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
}
}
- if (wl->vif && beacon_loss)
- ieee80211_connection_loss(wl->vif);
+ if (beacon_loss)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+ }
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 49c1a0ede5b1..1d878ba47bf4 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 04db64c94e9a..ca7ee59e4505 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include "debug.h"
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
@@ -33,7 +34,7 @@
#include "tx.h"
#include "io.h"
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template),
+ (struct ieee80211_qos_hdr),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
- WL1271_CMD_TEMPL_DFLT_SIZE, i,
- WL1271_RATE_AUTOMATIC);
+ sizeof(struct ieee80211_qos_hdr),
+ i, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_disconn_template *tmpl;
int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
tmpl, sizeof(*tmpl), 0, rate);
@@ -123,8 +148,10 @@ out:
return ret;
}
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_hdr_3addr *nullfunc;
int ret;
u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
/* nullfunc->addr1 is filled by FW */
- memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
sizeof(*nullfunc), 0, rate);
@@ -153,8 +180,10 @@ out:
return ret;
}
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr *qosnull;
int ret;
u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
/* qosnull->addr1 is filled by FW */
- memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+ memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
sizeof(*qosnull), 0, rate);
@@ -183,49 +212,6 @@ out:
return ret;
}
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
-{
- int ret;
-
- /*
- * Put very large empty placeholders for all templates. These
- * reserve memory for later.
- */
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
- sizeof
- (struct wl12xx_disconn_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
- sizeof
- (struct wl12xx_qos_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int wl12xx_init_rx_config(struct wl1271 *wl)
{
int ret;
@@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl)
return 0;
}
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_pd_threshold(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+ ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
- ret = wl1271_acx_service_period_timeout(wl);
+ ret = wl1271_acx_service_period_timeout(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+ ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- /* disable beacon filtering at this stage */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_table(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_beacon_filter_table(wl);
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
return ret;
@@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_bcn_dtim_options(wl);
+ ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
if (ret < 0)
return ret;
@@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
return 0;
}
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
- }
-
/* PS config */
- ret = wl1271_acx_config_ps(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- return ret;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
+ ret = wl12xx_acx_config_ps(wl, wlvif);
if (ret < 0)
return ret;
@@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- /* Beacons and broadcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- return ret;
-
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_sta_rate_policies(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* Configure the FW logger */
- ret = wl12xx_init_fwlog(wl);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret, i;
/* disable all keep-alive templates */
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
return ret;
}
/* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_ap_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
-
- ret = wl1271_init_ap_rates(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_ap_max_tx_retry(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* initialize Tx power */
- ret = wl1271_acx_tx_power(wl, wl->power_level);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
- ret = wl1271_ap_init_deauth_template(wl);
+ ret = wl1271_ap_init_deauth_template(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_null_template(wl);
+ ret = wl1271_ap_init_null_template(wl, vif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_qos_null_template(wl);
+ ret = wl1271_ap_init_qos_null_template(wl, vif);
if (ret < 0)
return ret;
@@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
* when operating as AP we want to receive external beacons for
* configuring ERP protection.
*/
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
- return wl1271_ap_init_templates(wl);
+ return wl1271_ap_init_templates(wl, vif);
}
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret;
struct conf_tx_rate_class rc;
u32 supported_rates;
- wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+ wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+ wlvif->basic_rate_set);
- if (wl->basic_rate_set == 0)
+ if (wlvif->basic_rate_set == 0)
return -EINVAL;
- rc.enabled_rates = wl->basic_rate_set;
+ rc.enabled_rates = wlvif->basic_rate_set;
rc.long_retry_limit = 10;
rc.short_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
if (ret < 0)
return ret;
/* use the min basic rate for AP broadcast/multicast */
- rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
if (ret < 0)
return ret;
@@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+ if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc,
+ wlvif->ap.ucast_rate_idx[i]);
if (ret < 0)
return ret;
}
@@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
return 0;
}
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
/* Reset the BA RX indicators */
- wl->ba_rx_bitmap = 0;
- wl->ba_allowed = true;
+ wlvif->ba_allowed = true;
wl->ba_rx_session_count = 0;
/* BA is supported in STA/AP modes */
- if (wl->bss_type != BSS_TYPE_AP_BSS &&
- wl->bss_type != BSS_TYPE_STA_BSS) {
- wl->ba_support = false;
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+ wlvif->bss_type != BSS_TYPE_STA_BSS) {
+ wlvif->ba_support = false;
return 0;
}
- wl->ba_support = true;
+ wlvif->ba_support = true;
/* 802.11n initiator BA session setting */
- return wl12xx_acx_set_ba_initiator_policy(wl);
+ return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
}
int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
if (wl->chip.id == CHIP_ID_1283_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+ if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
/* Enable SDIO padding */
host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
@@ -575,39 +497,186 @@ out:
return ret;
}
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
-int wl1271_hw_init(struct wl1271 *wl)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ int ret;
+
+ ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* initialize Tx power */
+ ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_general_parms(wl);
- else
- ret = wl1271_cmd_general_parms(wl);
+ /*
+ * consider all existing roles before configuring psm.
+ * TODO: reconfigure on interface removal.
+ */
+ if (!wl->ap_count) {
+ if (is_ap) {
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ } else if (!wl->sta_count) {
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Mode specific init */
+ if (is_ap) {
+ ret = wl1271_ap_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_ap_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_sta_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_sta_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
+
+ wl12xx_init_phy_vif_config(wl, wlvif);
+
+ /* Default TID/AC configuration */
+ BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+ conf_ac->cw_min, conf_ac->cw_max,
+ conf_ac->aifsn, conf_ac->tx_op_limit);
+ if (ret < 0)
+ return ret;
+
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, wlvif,
+ conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Configure HW encryption */
+ ret = wl1271_acx_feature_cfg(wl, wlvif);
if (ret < 0)
return ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_radio_parms(wl);
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl, vif);
else
- ret = wl1271_cmd_radio_parms(wl);
+ ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+ if (ret < 0)
+ return ret;
+
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl, wlvif);
if (ret < 0)
return ret;
+ return 0;
+}
+
+int wl1271_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl128x_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl128x_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ }
+
/* Chip-specific init */
ret = wl1271_chip_specific_init(wl);
if (ret < 0)
return ret;
- /* Mode specific init */
- if (is_ap)
- ret = wl1271_ap_hw_init(wl);
- else
- ret = wl1271_sta_hw_init(wl);
+ /* Init templates */
+ ret = wl1271_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+ /* Configure the FW logger */
+ ret = wl12xx_init_fwlog(wl);
if (ret < 0)
return ret;
@@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl1271_acx_dco_itrim_params(wl);
if (ret < 0)
goto out_free_memmap;
@@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure HW encryption */
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure PM */
ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
- /* Mode specific init - post mem init */
- if (is_ap)
- ret = wl1271_ap_hw_init_post_mem(wl);
- else
- ret = wl1271_sta_hw_init_post_mem(wl);
-
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_set_rate_mgmt_params(wl);
if (ret < 0)
goto out_free_memmap;
- /* Configure initiator BA sessions policies */
- ret = wl1271_set_ba_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure hangover */
ret = wl12xx_acx_config_hangover(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a3c230fd292..2da0f404ef6e 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,13 +27,13 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
-int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
int wl1271_chip_specific_init(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index c2da66f45046..079ad380e8ff 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "tx.h"
@@ -46,7 +48,7 @@
bool wl1271_set_block_size(struct wl1271 *wl)
{
if (wl->if_ops->set_block_size) {
- wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+ wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
return true;
}
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
void wl1271_disable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->disable_irq(wl);
+ disable_irq(wl->irq);
}
void wl1271_enable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->enable_irq(wl);
+ enable_irq(wl->irq);
}
/* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
if (wl->if_ops->reset)
- wl->if_ops->reset(wl);
+ wl->if_ops->reset(wl->dev);
}
void wl1271_io_init(struct wl1271 *wl)
{
if (wl->if_ops->init)
- wl->if_ops->init(wl);
+ wl->if_ops->init(wl->dev);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index e839341dfafe..d398cbcea986 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
- return wl->if_ops->dev(wl);
-}
-
-
/* Raw target IO, address is not translated */
static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->write(wl, addr, buf, len, fixed);
+ wl->if_ops->write(wl->dev, addr, buf, len, fixed);
}
static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->read(wl, addr, buf, len, fixed);
+ wl->if_ops->read(wl->dev, addr, buf, len, fixed);
}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
static inline void wl1271_power_off(struct wl1271 *wl)
{
- wl->if_ops->power(wl, false);
+ wl->if_ops->power(wl->dev, false);
clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
{
- int ret = wl->if_ops->power(wl, true);
+ int ret = wl->if_ops->power(wl->dev, true);
if (ret == 0)
set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
int wl1271_set_partition(struct wl1271 *wl,
struct wl1271_partition_set *p);
+bool wl1271_set_block_size(struct wl1271 *wl);
+
/* Functions from wl1271_main.c */
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 884f82b63219..d5f55a149de5 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -32,8 +32,10 @@
#include <linux/slab.h>
#include <linux/wl12xx.h>
#include <linux/sched.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
static bool bug_on_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static DEFINE_MUTEX(wl_list_mutex);
static LIST_HEAD(wl_list);
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ unsigned char operstate)
{
int ret;
+
if (operstate != IF_OPER_UP)
return 0;
- if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+ if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
if (ret < 0)
return ret;
- wl12xx_croc(wl, wl->role_id);
+ wl12xx_croc(wl, wlvif->role_id);
wl1271_info("Association completed.");
return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
struct ieee80211_hw *hw;
struct wl1271 *wl;
struct wl1271 *wl_temp;
+ struct wl12xx_vif *wlvif;
int ret = 0;
/* Check that this notification is for us. */
@@ -459,17 +450,28 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl->state == WL1271_STATE_OFF)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (dev->operstate != IF_OPER_UP)
goto out;
+ /*
+ * The correct behavior should be just getting the appropriate wlvif
+ * from the given dev, but currently we don't have a mac80211
+ * interface for it.
+ */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ continue;
- wl1271_check_operstate(wl, dev->operstate);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
- wl1271_ps_elp_sleep(wl);
+ wl1271_check_operstate(wl, wlvif,
+ ieee80211_get_operstate(vif));
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -498,19 +500,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
return 0;
}
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
int ret = 0;
/* we should hold wl->mutex */
- ret = wl1271_acx_ps_rx_streaming(wl, enable);
+ ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
if (ret < 0)
goto out;
if (enable)
- set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
else
- clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
out:
return ret;
}
@@ -519,25 +522,25 @@ out:
* this function is being called when the rx_streaming interval
* has beed changed or rx_streaming should be disabled
*/
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
int period = wl->conf.rx_streaming.interval;
/* don't reconfigure if rx_streaming is disabled */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
/* reconfigure/disable according to new streaming_period */
if (period &&
- test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
(wl->conf.rx_streaming.always ||
test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
else {
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
/* don't cancel_work_sync since we might deadlock */
- del_timer_sync(&wl->rx_streaming_timer);
+ del_timer_sync(&wlvif->rx_streaming_timer);
}
out:
return ret;
@@ -546,13 +549,14 @@ out:
static void wl1271_rx_streaming_enable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_enable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_enable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
- !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
(!wl->conf.rx_streaming.always &&
!test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
goto out;
@@ -564,12 +568,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
goto out_sleep;
/* stop it after some time of inactivity */
- mod_timer(&wl->rx_streaming_timer,
+ mod_timer(&wlvif->rx_streaming_timer,
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
@@ -581,19 +585,20 @@ out:
static void wl1271_rx_streaming_disable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_disable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
@@ -605,8 +610,9 @@ out:
static void wl1271_rx_streaming_timer(unsigned long data)
{
- struct wl1271 *wl = (struct wl1271 *)data;
- ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl1271 *wl = wlvif->wl;
+ ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +651,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
static int wl1271_plt_init(struct wl1271 *wl)
{
- struct conf_tx_ac_category *conf_ac;
- struct conf_tx_tid *conf_tid;
- int ret, i;
+ int ret;
if (wl->chip.id == CHIP_ID_1283_PG20)
ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +680,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- ret = wl1271_acx_dco_itrim_params(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* FM WLAN coexistence */
- ret = wl1271_acx_fm_coex(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Energy detection */
- ret = wl1271_init_energy_detection(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
goto out_free_memmap;
- /* Default fragmentation threshold */
- ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
@@ -768,14 +712,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid, u8 tx_pkts)
{
bool fw_ps, single_sta;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
single_sta = (wl->active_sta_count == 1);
@@ -784,7 +726,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* packets in FW or if the STA is awake.
*/
if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_end(wl, hlid);
+ wl12xx_ps_link_end(wl, wlvif, hlid);
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +734,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* case FW-memory congestion is not a problem.
*/
else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
- int id;
-
- /* global/broadcast "stations" are always active */
- if (hlid < WL1271_AP_STA_HLID_START)
- return true;
-
- id = hlid - WL1271_AP_STA_HLID_START;
- return test_bit(id, wl->ap_hlid_map);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct wl12xx_fw_status *status)
{
+ struct wl1271_link *lnk;
u32 cur_fw_ps_map;
u8 hlid, cnt;
@@ -825,25 +757,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
- if (!wl1271_is_active_sta(wl, hlid))
- continue;
-
- cnt = status->tx_lnk_free_pkts[hlid] -
- wl->links[hlid].prev_freed_pkts;
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
- wl->links[hlid].prev_freed_pkts =
- status->tx_lnk_free_pkts[hlid];
- wl->links[hlid].allocated_pkts -= cnt;
+ lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+ lnk->allocated_pkts -= cnt;
- wl12xx_irq_ps_regulate_link(wl, hlid,
- wl->links[hlid].allocated_pkts);
+ wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+ lnk->allocated_pkts);
}
}
static void wl12xx_fw_status(struct wl1271 *wl,
struct wl12xx_fw_status *status)
{
+ struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
@@ -898,8 +827,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* for AP update num of allocated TX blocks per link and ps status */
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl12xx_irq_update_links_status(wl, status);
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ wl12xx_irq_update_links_status(wl, wlvif, status);
+ }
/* update the host-chipset time offset */
getnstimeofday(&ts);
@@ -932,7 +862,7 @@ static void wl1271_netstack_work(struct work_struct *work)
#define WL1271_IRQ_MAX_LOOPS 256
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
@@ -1054,7 +984,6 @@ out:
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@@ -1069,10 +998,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
- ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, fw_name, wl->dev);
if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
+ wl1271_error("could not get firmware %s: %d", fw_name, ret);
return ret;
}
@@ -1107,10 +1036,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
+ wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+ ret);
return ret;
}
@@ -1217,11 +1147,13 @@ static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, recovery_work);
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
mutex_lock(&wl->mutex);
if (wl->state != WL1271_STATE_ON)
- goto out;
+ goto out_unlock;
/* Avoid a recursive recovery */
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1170,12 @@ static void wl1271_recovery_work(struct work_struct *work)
* in the firmware during recovery. This doens't hurt if the network is
* not encrypted.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
- wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+ wlvif->tx_security_seq +=
+ WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ }
/* Prevent spurious TX during FW restart */
ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1186,14 @@ static void wl1271_recovery_work(struct work_struct *work)
}
/* reboot the chipset */
- __wl1271_op_remove_interface(wl, false);
+ while (!list_empty(&wl->wlvif_list)) {
+ wlvif = list_first_entry(&wl->wlvif_list,
+ struct wl12xx_vif, list);
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ __wl1271_op_remove_interface(wl, vif, false);
+ }
+ mutex_unlock(&wl->mutex);
+ wl1271_op_stop(wl->hw);
clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1262,8 +1204,8 @@ static void wl1271_recovery_work(struct work_struct *work)
* to restart the HW.
*/
ieee80211_wake_queues(wl->hw);
-
-out:
+ return;
+out_unlock:
mutex_unlock(&wl->mutex);
}
@@ -1318,7 +1260,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
/* 0. read chip id from CHIP_ID */
wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
- /* 1. check if chip id is valid */
+ /*
+ * For wl127x based devices we could use the default block
+ * size (512 bytes), but due to a bug in the sdio driver, we
+ * need to set it explicitly after the chip is powered on. To
+ * simplify the code and since the performance impact is
+ * negligible, we use the same block size for all different
+ * chip types.
+ */
+ if (!wl1271_set_block_size(wl))
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
switch (wl->chip.id) {
case CHIP_ID_1271_PG10:
@@ -1328,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
@@ -1336,7 +1289,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1283_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
@@ -1344,9 +1299,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
-
- if (wl1271_set_block_size(wl))
- wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
break;
case CHIP_ID_1283_PG10:
default:
@@ -1389,8 +1341,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->bss_type = BSS_TYPE_STA_BSS;
-
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1432,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct wl12xx_vif *wlvif = NULL;
unsigned long flags;
int q, mapping;
- u8 hlid = 0;
+ u8 hlid;
+
+ if (vif)
+ wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- if (!wl1271_is_active_sta(wl, hlid)) {
- wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
- hlid, q);
- dev_kfree_skb(skb);
- goto out;
- }
-
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- } else {
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (hlid == WL12XX_INVALID_LINK_ID ||
+ (wlvif && !test_bit(hlid, wlvif->links_map))) {
+ wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+ ieee80211_free_txskb(hw, skb);
+ goto out;
}
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
wl->tx_queue_count[q]++;
/*
@@ -1609,13 +1560,14 @@ static struct notifier_block wl1271_dev_notifier = {
};
#ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1575,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
goto out_unlock;
/* enter psm if needed*/
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
DECLARE_COMPLETION_ONSTACK(compl);
- wl->ps_compl = &compl;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ wlvif->ps_compl = &compl;
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
goto out_sleep;
@@ -1638,42 +1590,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
ret = wait_for_completion_timeout(
&compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+
+ mutex_lock(&wl->mutex);
if (ret <= 0) {
wl1271_warning("couldn't enter ps mode!");
ret = -EBUSY;
- goto out;
+ goto out_cleanup;
}
- /* take mutex again, and wakeup */
- mutex_lock(&wl->mutex);
-
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
- goto out_unlock;
+ goto out_cleanup;
}
out_sleep:
wl1271_ps_elp_sleep(wl);
+out_cleanup:
+ wlvif->ps_compl = NULL;
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
- ret = wl1271_acx_beacon_filter_opt(wl, true);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
wl1271_ps_elp_sleep(wl);
out_unlock:
@@ -1682,20 +1635,22 @@ out_unlock:
}
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- return wl1271_configure_suspend_sta(wl);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl1271_configure_suspend_ap(wl);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ return wl1271_configure_suspend_sta(wl, wlvif);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_configure_suspend_ap(wl, wlvif);
return 0;
}
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
- bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
if (!is_sta && !is_ap)
return;
@@ -1707,11 +1662,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
if (is_sta) {
/* exit psm if it wasn't configured */
- if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
- wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+ wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
} else if (is_ap) {
- wl1271_acx_beacon_filter_opt(wl, false);
+ wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1678,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow || !wow->any);
wl->wow_enabled = true;
- ret = wl1271_configure_suspend(wl);
- if (ret < 0) {
- wl1271_warning("couldn't prepare device to suspend");
- return ret;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_configure_suspend(wl, wlvif);
+ if (ret < 0) {
+ wl1271_warning("couldn't prepare device to suspend");
+ return ret;
+ }
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1709,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->pspoll_work);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ flush_delayed_work(&wlvif->pspoll_work);
+ }
flush_delayed_work(&wl->elp_work);
return 0;
@@ -1760,6 +1720,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
static int wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
unsigned long flags;
bool run_irq_work = false;
@@ -1783,7 +1744,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
wl1271_irq(0, wl);
wl1271_enable_interrupts(wl);
}
- wl1271_configure_resume(wl);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ wl1271_configure_resume(wl, wlvif);
+ }
wl->wow_enabled = false;
return 0;
@@ -1810,20 +1773,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
static void wl1271_op_stop(struct ieee80211_hw *hw)
{
+ struct wl1271 *wl = hw->priv;
+ int i;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ /*
+ * this must be before the cancel_work calls below, so that the work
+ * functions don't perform further work.
+ */
+ wl->state = WL1271_STATE_OFF;
+ mutex_unlock(&wl->mutex);
+
+ mutex_lock(&wl_list_mutex);
+ list_del(&wl->list);
+ mutex_unlock(&wl_list_mutex);
+
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_delayed_work_sync(&wl->scan_complete_work);
+ cancel_work_sync(&wl->netstack_work);
+ cancel_work_sync(&wl->tx_work);
+ cancel_delayed_work_sync(&wl->elp_work);
+
+ /* let's notify MAC80211 about the remaining pending TX frames */
+ wl12xx_tx_reset(wl, true);
+ mutex_lock(&wl->mutex);
+
+ wl1271_power_off(wl);
+
+ wl->band = IEEE80211_BAND_2GHZ;
+
+ wl->rx_counter = 0;
+ wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+ wl->tx_blocks_available = 0;
+ wl->tx_allocated_blocks = 0;
+ wl->tx_results_count = 0;
+ wl->tx_packets_count = 0;
+ wl->time_offset = 0;
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
+ wl->sched_scanning = false;
+ memset(wl->roles_map, 0, sizeof(wl->roles_map));
+ memset(wl->links_map, 0, sizeof(wl->links_map));
+ memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ wl->active_sta_count = 0;
+
+ /* The system link is always allocated */
+ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+ /*
+ * this is performed after the cancel_work calls and the associated
+ * mutex_lock, so that wl1271_op_add_interface does not accidentally
+ * get executed before all these vars have been reset.
+ */
+ wl->flags = 0;
+
+ wl->tx_blocks_freed = 0;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ wl->tx_pkts_freed[i] = 0;
+ wl->tx_allocated_pkts[i] = 0;
+ }
+
+ wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+ mutex_unlock(&wl->mutex);
}
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
- switch (wl->bss_type) {
+ u8 policy = find_first_zero_bit(wl->rate_policies_map,
+ WL12XX_MAX_RATE_POLICIES);
+ if (policy >= WL12XX_MAX_RATE_POLICIES)
+ return -EBUSY;
+
+ __set_bit(policy, wl->rate_policies_map);
+ *idx = policy;
+ return 0;
+}
+
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+ if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+ return;
+
+ __clear_bit(*idx, wl->rate_policies_map);
+ *idx = WL12XX_MAX_RATE_POLICIES;
+}
+
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ switch (wlvif->bss_type) {
case BSS_TYPE_AP_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_GO;
else
return WL1271_ROLE_AP;
case BSS_TYPE_STA_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_CL;
else
return WL1271_ROLE_STA;
@@ -1832,78 +1894,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
return WL1271_ROLE_IBSS;
default:
- wl1271_error("invalid bss_type: %d", wl->bss_type);
+ wl1271_error("invalid bss_type: %d", wlvif->bss_type);
}
return WL12XX_INVALID_ROLE_TYPE;
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
- struct wl1271 *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
- int retries = WL1271_BOOT_RETRIES;
- int ret = 0;
- u8 role_type;
- bool booted = false;
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- ieee80211_vif_type_p2p(vif), vif->addr);
-
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- wl1271_debug(DEBUG_MAC80211,
- "multiple vifs are not supported yet");
- ret = -EBUSY;
- goto out;
- }
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i;
- /*
- * in some very corner case HW recovery scenarios its possible to
- * get here before __wl1271_op_remove_interface is complete, so
- * opt out if that is the case.
- */
- if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
- ret = -EBUSY;
- goto out;
- }
+ /* clear everything but the persistent data */
+ memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_IBSS;
break;
case NL80211_IFTYPE_P2P_GO:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_AP:
- wl->bss_type = BSS_TYPE_AP_BSS;
+ wlvif->bss_type = BSS_TYPE_AP_BSS;
break;
default:
- ret = -EOPNOTSUPP;
- goto out;
+ wlvif->bss_type = MAX_BSS_TYPE;
+ return -EOPNOTSUPP;
}
- role_type = wl12xx_get_role_type(wl);
- if (role_type == WL12XX_INVALID_ROLE_TYPE) {
- ret = -EINVAL;
- goto out;
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /* init sta/ibss data */
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ /* init ap data */
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_allocate_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
}
- memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
- if (wl->state != WL1271_STATE_OFF) {
- wl1271_error("cannot start because not in off state: %d",
- wl->state);
- ret = -EBUSY;
- goto out;
- }
+ wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+ wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+ wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+ /*
+ * mac80211 configures some values globally, while we treat them
+ * per-interface. thus, on init, we have to copy them from wl
+ */
+ wlvif->band = wl->band;
+ wlvif->channel = wl->channel;
+ wlvif->power_level = wl->power_level;
+
+ INIT_WORK(&wlvif->rx_streaming_enable_work,
+ wl1271_rx_streaming_enable_work);
+ INIT_WORK(&wlvif->rx_streaming_disable_work,
+ wl1271_rx_streaming_disable_work);
+ INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+ INIT_LIST_HEAD(&wlvif->list);
+
+ setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+ (unsigned long) wlvif);
+ return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+ int retries = WL1271_BOOT_RETRIES;
+ bool booted = false;
+ struct wiphy *wiphy = wl->hw->wiphy;
+ int ret;
while (retries) {
retries--;
@@ -1915,25 +1994,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto power_off;
- if (wl->bss_type == BSS_TYPE_STA_BSS ||
- wl->bss_type == BSS_TYPE_IBSS) {
- /*
- * The device role is a special role used for
- * rx and tx frames prior to association (as
- * the STA role can get packets only from
- * its associated bssid)
- */
- ret = wl12xx_cmd_role_enable(wl,
- WL1271_ROLE_DEVICE,
- &wl->dev_role_id);
- if (ret < 0)
- goto irq_disable;
- }
-
- ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
- if (ret < 0)
- goto irq_disable;
-
ret = wl1271_hw_init(wl);
if (ret < 0)
goto irq_disable;
@@ -1964,9 +2024,6 @@ power_off:
goto out;
}
- wl->vif = vif;
- wl->state = WL1271_STATE_ON;
- set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
@@ -1984,7 +2041,115 @@ power_off:
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
+ wl->state = WL1271_STATE_ON;
+out:
+ return booted;
+}
+
+static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
+{
+ return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret = 0;
+ u8 role_type;
+ bool booted = false;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ ieee80211_vif_type_p2p(vif), vif->addr);
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (wl->vif) {
+ wl1271_debug(DEBUG_MAC80211,
+ "multiple vifs are not supported yet");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * in some very corner case HW recovery scenarios its possible to
+ * get here before __wl1271_op_remove_interface is complete, so
+ * opt out if that is the case.
+ */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+ test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl12xx_init_vif_data(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wlvif->wl = wl;
+ role_type = wl12xx_get_role_type(wl, wlvif);
+ if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO: after the nvs issue will be solved, move this block
+ * to start(), and make sure here the driver is ON.
+ */
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * we still need this in order to configure the fw
+ * while uploading the nvs
+ */
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+ booted = wl12xx_init_fw(wl);
+ if (!booted) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /*
+ * The device role is a special role used for
+ * rx and tx frames prior to association (as
+ * the STA role can get packets only from
+ * its associated bssid)
+ */
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ role_type, &wlvif->role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_init_vif_specific(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wl->vif = vif;
+ list_add(&wlvif->list, &wl->wlvif_list);
+ set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count++;
+ else
+ wl->sta_count++;
out:
+ wl1271_ps_elp_sleep(wl);
+out_unlock:
mutex_unlock(&wl->mutex);
mutex_lock(&wl_list_mutex);
@@ -1996,29 +2161,34 @@ out:
}
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues)
{
- int ret, i;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i, ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+ if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ return;
+
+ wl->vif = NULL;
+
/* because of hardware recovery, we may get here twice */
if (wl->state != WL1271_STATE_ON)
return;
wl1271_info("down");
- mutex_lock(&wl_list_mutex);
- list_del(&wl->list);
- mutex_unlock(&wl_list_mutex);
-
/* enable dyn ps just in case (if left on due to fw crash etc) */
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- ieee80211_enable_dyn_ps(wl->vif);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ ieee80211_enable_dyn_ps(vif);
- if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+ wl->scan_vif == vif) {
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
@@ -2029,13 +2199,17 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (ret < 0)
goto deinit;
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ if (wl12xx_dev_role_started(wlvif))
+ wl12xx_stop_dev(wl, wlvif);
+
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
if (ret < 0)
goto deinit;
}
- ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
if (ret < 0)
goto deinit;
@@ -2043,120 +2217,93 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
}
deinit:
/* clear all hlids (except system_hlid) */
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_free_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
+ }
- /*
- * this must be before the cancel_work calls below, so that the work
- * functions don't perform further work.
- */
- wl->state = WL1271_STATE_OFF;
+ wl12xx_tx_reset_wlvif(wl, wlvif);
+ wl1271_free_ap_keys(wl, wlvif);
+ if (wl->last_wlvif == wlvif)
+ wl->last_wlvif = NULL;
+ list_del(&wlvif->list);
+ memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count--;
+ else
+ wl->sta_count--;
mutex_unlock(&wl->mutex);
-
- wl1271_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->netstack_work);
- cancel_work_sync(&wl->tx_work);
- del_timer_sync(&wl->rx_streaming_timer);
- cancel_work_sync(&wl->rx_streaming_enable_work);
- cancel_work_sync(&wl->rx_streaming_disable_work);
- cancel_delayed_work_sync(&wl->pspoll_work);
- cancel_delayed_work_sync(&wl->elp_work);
+ del_timer_sync(&wlvif->rx_streaming_timer);
+ cancel_work_sync(&wlvif->rx_streaming_enable_work);
+ cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_delayed_work_sync(&wlvif->pspoll_work);
mutex_lock(&wl->mutex);
-
- /* let's notify MAC80211 about the remaining pending TX frames */
- wl1271_tx_reset(wl, reset_tx_queues);
- wl1271_power_off(wl);
-
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
- wl->ssid_len = 0;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->p2p = 0;
- wl->band = IEEE80211_BAND_2GHZ;
-
- wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
- wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->tx_blocks_available = 0;
- wl->tx_allocated_blocks = 0;
- wl->tx_results_count = 0;
- wl->tx_packets_count = 0;
- wl->time_offset = 0;
- wl->session_counter = 0;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
- wl->vif = NULL;
- wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl1271_free_ap_keys(wl);
- memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
- wl->ap_fw_ps_map = 0;
- wl->ap_ps_map = 0;
- wl->sched_scanning = false;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- memset(wl->roles_map, 0, sizeof(wl->roles_map));
- memset(wl->links_map, 0, sizeof(wl->links_map));
- memset(wl->roc_map, 0, sizeof(wl->roc_map));
- wl->active_sta_count = 0;
-
- /* The system link is always allocated */
- __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
- /*
- * this is performed after the cancel_work calls and the associated
- * mutex_lock, so that wl1271_op_add_interface does not accidentally
- * get executed before all these vars have been reset.
- */
- wl->flags = 0;
-
- wl->tx_blocks_freed = 0;
-
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- wl->tx_pkts_freed[i] = 0;
- wl->tx_allocated_pkts[i] = 0;
- }
-
- wl1271_debugfs_reset(wl);
-
- kfree(wl->fw_status);
- wl->fw_status = NULL;
- kfree(wl->tx_res_if);
- wl->tx_res_if = NULL;
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl12xx_vif *iter;
mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF ||
+ !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ goto out;
+
/*
* wl->vif can be null here if someone shuts down the interface
* just when hardware recovery has been started.
*/
- if (wl->vif) {
- WARN_ON(wl->vif != vif);
- __wl1271_op_remove_interface(wl, true);
- }
+ wl12xx_for_each_wlvif(wl, iter) {
+ if (iter != wlvif)
+ continue;
+ __wl1271_op_remove_interface(wl, vif, true);
+ break;
+ }
+ WARN_ON(iter != wlvif);
+out:
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->recovery_work);
}
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type, bool p2p)
+{
+ wl1271_op_remove_interface(hw, vif);
+
+ vif->type = ieee80211_iftype_p2p(new_type, p2p);
+ vif->p2p = p2p;
+ return wl1271_op_add_interface(hw, vif);
+}
+
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool set_assoc)
{
int ret;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
/*
* One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2314,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* Keep the below message for now, unless it starts bothering
* users who really like to roam a lot :)
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
wl1271_info("JOIN while associated.");
if (set_assoc)
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
if (is_ibss)
- ret = wl12xx_cmd_role_start_ibss(wl);
+ ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
else
- ret = wl12xx_cmd_role_start_sta(wl);
+ ret = wl12xx_cmd_role_start_sta(wl, wlvif);
if (ret < 0)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -2189,19 +2336,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* the join. The acx_aid starts the keep-alive process, and the order
* of the commands below is relevant.
*/
- ret = wl1271_acx_keep_alive_mode(wl, true);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
goto out;
- ret = wl1271_acx_aid(wl, wl->aid);
+ ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
- ret = wl1271_cmd_build_klv_null_data(wl);
+ ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
goto out;
@@ -2210,72 +2358,63 @@ out:
return ret;
}
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+ if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
wl12xx_cmd_stop_channel_switch(wl);
- ieee80211_chswitch_done(wl->vif, false);
+ ieee80211_chswitch_done(vif, false);
}
/* to stop listening to a channel, we disconnect */
- ret = wl12xx_cmd_role_stop_sta(wl);
+ ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
if (ret < 0)
goto out;
- memset(wl->bssid, 0, ETH_ALEN);
-
/* reset TX security counters on a clean disconnect */
- wl->tx_security_last_seq_lsb = 0;
- wl->tx_security_seq = 0;
+ wlvif->tx_security_last_seq_lsb = 0;
+ wlvif->tx_security_seq = 0;
out:
return ret;
}
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- wl->basic_rate_set = wl->bitrate_masks[wl->band];
- wl->rate_set = wl->basic_rate_set;
+ wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+ wlvif->rate_set = wlvif->basic_rate_set;
}
-static bool wl12xx_is_roc(struct wl1271 *wl)
-{
- u8 role_id;
-
- role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
- if (role_id >= WL12XX_MAX_ROLES)
- return false;
-
- return true;
-}
-
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
{
int ret;
+ bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return 0;
if (idle) {
/* no need to croc if we weren't busy (e.g. during boot) */
- if (wl12xx_is_roc(wl)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wl12xx_dev_role_started(wlvif)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
- wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
} else {
/* The current firmware only supports sched_scan in idle */
if (wl->sched_scanning) {
@@ -2283,75 +2422,32 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
ieee80211_sched_scan_stopped(wl->hw);
}
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+ set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
}
out:
return ret;
}
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_conf *conf, u32 changed)
{
- struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- int channel, ret = 0;
- bool is_ap;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+ int channel, ret;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
- " changed 0x%x",
- channel,
- conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
- changed);
-
- /*
- * mac80211 will go to idle nearly immediately after transmitting some
- * frames, such as the deauth. To make sure those frames reach the air,
- * wait here until the TX queue is fully flushed.
- */
- if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE))
- wl1271_tx_flush(wl);
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- /* we support configuring the channel and band while off */
- if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- wl->band = conf->channel->band;
- wl->channel = channel;
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_POWER))
- wl->power_level = conf->power_level;
-
- goto out;
- }
-
- is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
/* if the channel changes while joined, join again */
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- ((wl->band != conf->channel->band) ||
- (wl->channel != channel))) {
+ ((wlvif->band != conf->channel->band) ||
+ (wlvif->channel != channel))) {
/* send all pending packets */
wl1271_tx_work_locked(wl);
- wl->band = conf->channel->band;
- wl->channel = channel;
+ wlvif->band = conf->channel->band;
+ wlvif->channel = channel;
if (!is_ap) {
/*
@@ -2360,24 +2456,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* possible rate for the band as a fixed rate for
* association frames and other control messages.
*/
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ wl1271_set_band_rate(wl, wlvif);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- if (wl12xx_is_roc(wl)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags)) {
+ if (wl12xx_dev_role_started(wlvif)) {
/* roaming */
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
- goto out_sleep;
+ return ret;
}
- ret = wl1271_join(wl, false);
+ ret = wl1271_join(wl, wlvif, false);
if (ret < 0)
wl1271_warning("cmd join on channel "
"failed %d", ret);
@@ -2387,66 +2486,114 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* not idle. otherwise, CROC will be called
* anyway.
*/
- if (wl12xx_is_roc(wl) &&
+ if (wl12xx_dev_role_started(wlvif) &&
!(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
- goto out_sleep;
+ return ret;
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
- wl1271_warning("roc failed %d",
- ret);
+ return ret;
}
}
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
- ret = wl1271_sta_handle_idle(wl,
- conf->flags & IEEE80211_CONF_IDLE);
- if (ret < 0)
- wl1271_warning("idle mode change failed %d", ret);
- }
-
/*
* if mac80211 changes the PSM mode, make sure the mode is not
* incorrectly changed after the pspoll failure active window.
*/
if (changed & IEEE80211_CONF_CHANGE_PS)
- clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
if (conf->flags & IEEE80211_CONF_PS &&
- !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+ set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm enabled");
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm disabled");
- clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
- if (test_bit(WL1271_FLAG_PSM, &wl->flags))
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
}
- if (conf->power_level != wl->power_level) {
- ret = wl1271_acx_tx_power(wl, conf->power_level);
+ if (conf->power_level != wlvif->power_level) {
+ ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
- goto out_sleep;
+ return ret;
+
+ wlvif->power_level = conf->power_level;
+ }
+ return 0;
+}
+
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_conf *conf = &hw->conf;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
+ channel,
+ conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
+
+ /*
+ * mac80211 will go to idle nearly immediately after transmitting some
+ * frames, such as the deauth. To make sure those frames reach the air,
+ * wait here until the TX queue is fully flushed.
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_tx_flush(wl);
+
+ mutex_lock(&wl->mutex);
+
+ /* we support configuring the channel and band even while off */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* configure each interface */
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl12xx_config_vif(wl, wlvif, conf, changed);
+ if (ret < 0)
+ goto out_sleep;
}
out_sleep:
@@ -2509,6 +2656,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
{
struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2675,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ false,
+ NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
/*
@@ -2551,9 +2705,10 @@ out:
kfree(fp);
}
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 id, u8 key_type, u8 key_size,
+ const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -2568,10 +2723,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
* an existing key.
*/
for (i = 0; i < MAX_NUM_KEYS; i++) {
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- if (wl->recorded_ap_keys[i]->id == id) {
+ if (wlvif->ap.recorded_keys[i]->id == id) {
wl1271_warning("trying to record key replacement");
return -EINVAL;
}
@@ -2592,21 +2747,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
- wl->recorded_ap_keys[i] = ap_key;
+ wlvif->ap.recorded_keys[i] = ap_key;
return 0;
}
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
for (i = 0; i < MAX_NUM_KEYS; i++) {
- kfree(wl->recorded_ap_keys[i]);
- wl->recorded_ap_keys[i] = NULL;
+ kfree(wlvif->ap.recorded_keys[i]);
+ wlvif->ap.recorded_keys[i] = NULL;
}
}
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret = 0;
struct wl1271_ap_key *key;
@@ -2614,15 +2769,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
for (i = 0; i < MAX_NUM_KEYS; i++) {
u8 hlid;
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- key = wl->recorded_ap_keys[i];
+ key = wlvif->ap.recorded_keys[i];
hlid = key->hlid;
if (hlid == WL12XX_INVALID_LINK_ID)
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
- ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
@@ -2635,23 +2790,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
}
if (wep_key_added) {
- ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
- wl->ap_bcast_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+ wlvif->ap.bcast_hlid);
if (ret < 0)
goto out;
}
out:
- wl1271_free_ap_keys(wl);
+ wl1271_free_ap_keys(wl, wlvif);
return ret;
}
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
u16 tx_seq_16, struct ieee80211_sta *sta)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap) {
struct wl1271_station *wl_sta;
@@ -2661,10 +2817,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
}
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
/*
* We do not support removing keys after AP shutdown.
* Pretend we do to make mac80211 happy.
@@ -2672,12 +2828,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (action != KEY_ADD_OR_REPLACE)
return 0;
- ret = wl1271_record_ap_key(wl, id,
+ ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
} else {
- ret = wl1271_cmd_set_ap_key(wl, action,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
@@ -2718,10 +2874,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* don't remove key if hlid was already deleted */
if (action == KEY_REMOVE &&
- wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
- ret = wl1271_cmd_set_sta_key(wl, action,
+ ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
id, key_type, key_size,
key, addr, tx_seq_32,
tx_seq_16);
@@ -2731,8 +2887,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* the default WEP key needs to be configured at least once */
if (key_type == KEY_WEP) {
ret = wl12xx_cmd_set_default_wep_key(wl,
- wl->default_key,
- wl->sta_hlid);
+ wlvif->default_key,
+ wlvif->sta.hlid);
if (ret < 0)
return ret;
}
@@ -2747,6 +2903,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
@@ -2782,20 +2939,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = KEY_AES;
- key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2963,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2974,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
- ret = wl1271_set_key(wl, KEY_REMOVE,
+ ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
0, 0, sta);
@@ -2847,6 +3004,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
int ret;
u8 *ssid = NULL;
size_t len = 0;
@@ -2874,18 +3033,18 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- /* cancel ROC before scanning */
- if (wl12xx_is_roc(wl)) {
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- /* don't allow scanning right now */
- ret = -EBUSY;
- goto out_sleep;
- }
- wl12xx_croc(wl, wl->dev_role_id);
- wl12xx_cmd_role_stop_dev(wl);
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+ test_bit(wlvif->role_id, wl->roc_map)) {
+ /* don't allow scanning right now */
+ ret = -EBUSY;
+ goto out_sleep;
}
- ret = wl1271_scan(hw->priv, ssid, len, req);
+ /* cancel ROC before scanning */
+ if (wl12xx_dev_role_started(wlvif))
+ wl12xx_stop_dev(wl, wlvif);
+
+ ret = wl1271_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -2921,6 +3080,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
}
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
@@ -2938,6 +3098,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_sched_scan_ies *ies)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3109,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan_sched_scan_config(wl, req, ies);
+ ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
- ret = wl1271_scan_sched_scan_start(wl);
+ ret = wl1271_scan_sched_scan_start(wl, wlvif);
if (ret < 0)
goto out_sleep;
@@ -3017,6 +3178,7 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret = 0;
mutex_lock(&wl->mutex);
@@ -3030,10 +3192,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
if (ret < 0)
goto out;
- ret = wl1271_acx_rts_threshold(wl, value);
- if (ret < 0)
- wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+ if (ret < 0)
+ wl1271_warning("set rts threshold failed: %d", ret);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -3042,9 +3205,10 @@ out:
return ret;
}
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
int offset)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ssid_len;
const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
skb->len - offset);
@@ -3060,8 +3224,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
return -EINVAL;
}
- wl->ssid_len = ssid_len;
- memcpy(wl->ssid, ptr+2, ssid_len);
+ wlvif->ssid_len = ssid_len;
+ memcpy(wlvif->ssid, ptr+2, ssid_len);
return 0;
}
@@ -3096,18 +3260,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
skb_trim(skb, skb->len - len);
}
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
- u8 *probe_rsp_data,
- size_t probe_rsp_len,
- u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ieee80211_proberesp_get(wl->hw, vif);
+ if (!skb)
+ return -EOPNOTSUPP;
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ skb->data,
+ skb->len, 0,
+ rates);
+
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ u8 *probe_rsp_data,
+ size_t probe_rsp_len,
+ u32 rates)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
/* no need to change probe response if the SSID is set correctly */
- if (wl->ssid_len > 0)
+ if (wlvif->ssid_len > 0)
return wl1271_cmd_template_set(wl,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_data,
@@ -3153,16 +3339,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
}
static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
if (ret < 0) {
wl1271_warning("Set slot time failed %d", ret);
goto out;
@@ -3171,16 +3359,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_ENABLE);
else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_DISABLE);
if (ret < 0) {
wl1271_warning("Set ctsprotect failed %d", ret);
goto out;
@@ -3196,14 +3386,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
if ((changed & BSS_CHANGED_BEACON_INT)) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
- wl->beacon_int = bss_conf->beacon_int;
+ wlvif->beacon_int = bss_conf->beacon_int;
+ }
+
+ if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+ u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+ wl1271_debug(DEBUG_AP, "probe response updated");
+ set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+ }
}
if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3413,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
u16 tmpl_id;
- if (!beacon)
+ if (!beacon) {
+ ret = -EINVAL;
goto out;
+ }
wl1271_debug(DEBUG_MASTER, "beacon updated");
- ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(vif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
- min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3437,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
goto out;
}
+ /*
+ * In case we already have a probe-resp beacon set explicitly
+ * by usermode, don't use the beacon data.
+ */
+ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+ goto end_bcn;
+
/* remove TIM ie from probe response */
wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
@@ -3254,7 +3462,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
if (is_ap)
- ret = wl1271_ap_set_probe_resp_tmpl(wl,
+ ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
beacon->data,
beacon->len,
min_rate);
@@ -3264,12 +3472,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
beacon->data,
beacon->len, 0,
min_rate);
+end_bcn:
dev_kfree_skb(beacon);
if (ret < 0)
goto out;
}
out:
+ if (ret != 0)
+ wl1271_error("beacon info change failed: %d", ret);
return ret;
}
@@ -3279,23 +3490,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if ((changed & BSS_CHANGED_BASIC_RATES)) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate = wl1271_tx_min_rate_get(wl,
- wl->basic_rate_set);
+ wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+ wlvif->band);
+ wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
- ret = wl1271_init_ap_rates(wl);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0) {
wl1271_error("AP rate policy change failed %d", ret);
goto out;
}
- ret = wl1271_ap_init_templates(wl);
+ ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
}
@@ -3306,38 +3518,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
if (bss_conf->enable_beacon) {
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_start_ap(wl);
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_start_ap(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_ap_init_hwenc(wl);
+ ret = wl1271_ap_init_hwenc(wl, wlvif);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
wl1271_debug(DEBUG_AP, "started AP");
}
} else {
- if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_stop_ap(wl);
+ if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+ clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+ &wlvif->flags);
wl1271_debug(DEBUG_AP, "stopped AP");
}
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3569,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool do_join = false, set_assoc = false;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
int ret;
@@ -3373,14 +3588,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_IBSS) {
if (bss_conf->ibss_joined) {
- set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+ set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
- if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
- &wl->flags)) {
- wl1271_unjoin(wl);
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+ &wlvif->flags)) {
+ wl1271_unjoin(wl, wlvif);
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3396,46 +3610,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
- if (bss_conf->enable_beacon)
- wl->set_bss_type = BSS_TYPE_IBSS;
- else
- wl->set_bss_type = BSS_TYPE_STA_BSS;
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE) {
+ ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+ if (ret < 0)
+ wl1271_warning("idle mode change failed %d", ret);
+ }
+
if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
- ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
bss_conf->cqm_rssi_thold,
bss_conf->cqm_rssi_hyst);
if (ret < 0)
goto out;
- wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if ((changed & BSS_CHANGED_BSSID) &&
- /*
- * Now we know the correct bssid, so we send a new join command
- * and enable the BSSID filter
- */
- memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
- if (!is_zero_ether_addr(wl->bssid)) {
- ret = wl1271_cmd_build_null_data(wl);
+ if (changed & BSS_CHANGED_BSSID)
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ ret = wl12xx_cmd_build_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_build_qos_null_data(wl);
+ ret = wl1271_build_qos_null_data(wl, vif);
if (ret < 0)
goto out;
/* Need to update the BSSID (for filtering etc) */
do_join = true;
}
- }
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
rcu_read_lock();
@@ -3459,26 +3667,28 @@ sta_not_found:
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
- wl->aid = bss_conf->aid;
+ wlvif->aid = bss_conf->aid;
set_assoc = true;
- wl->ps_poll_failures = 0;
+ wlvif->ps_poll_failures = 0;
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
*/
rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
if (sta_rate_set)
- wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
sta_rate_set,
- wl->band);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->band);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
@@ -3488,53 +3698,56 @@ sta_not_found:
* updates it by itself when the first beacon is
* received after a join.
*/
- ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
/*
* Get a template for hardware connection maintenance
*/
- dev_kfree_skb(wl->probereq);
- wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+ wlvif,
+ NULL);
ieoffset = offsetof(struct ieee80211_mgmt,
u.probe_req.variable);
- wl1271_ssid_set(wl, wl->probereq, ieoffset);
+ wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
/* enable the connection monitoring feature */
- ret = wl1271_acx_conn_monit_params(wl, true);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
if (ret < 0)
goto out;
} else {
/* use defaults when not associated */
bool was_assoc =
- !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
- &wl->flags);
+ !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags);
bool was_ifup =
- !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
- &wl->flags);
- wl->aid = 0;
+ !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+ &wlvif->flags);
+ wlvif->aid = 0;
/* free probe-request template */
- dev_kfree_skb(wl->probereq);
- wl->probereq = NULL;
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = NULL;
/* re-enable dynamic ps - just in case */
- ieee80211_enable_dyn_ps(wl->vif);
+ ieee80211_enable_dyn_ps(vif);
/* revert back to minimum rates for the current band */
- wl1271_set_band_rate(wl);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
/* disable connection monitor features */
- ret = wl1271_acx_conn_monit_params(wl, false);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
/* Disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
goto out;
@@ -3546,7 +3759,7 @@ sta_not_found:
* no IF_OPER_UP notification.
*/
if (!was_ifup) {
- ret = wl12xx_croc(wl, wl->role_id);
+ ret = wl12xx_croc(wl, wlvif->role_id);
if (ret < 0)
goto out;
}
@@ -3555,17 +3768,16 @@ sta_not_found:
* roaming on the same channel. until we will
* have a better flow...)
*/
- if (test_bit(wl->dev_role_id, wl->roc_map)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
goto out;
}
- wl1271_unjoin(wl);
- if (!(conf_flags & IEEE80211_CONF_IDLE)) {
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
- }
+ wl1271_unjoin(wl, wlvif);
+ if (!(conf_flags & IEEE80211_CONF_IDLE))
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3576,27 +3788,28 @@ sta_not_found:
if (bss_conf->ibss_joined) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
/* by default, use 11b + OFDM rates */
- wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+ WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
if (bss_conf->arp_addr_cnt == 1 &&
bss_conf->arp_filter_enabled) {
@@ -3606,24 +3819,24 @@ sta_not_found:
* isn't being set (when sending), so we have to
* reconfigure the template upon every ip change.
*/
- ret = wl1271_cmd_build_arp_rsp(wl, addr);
+ ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
goto out;
}
- ret = wl1271_acx_arp_ip_filter(wl,
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif,
ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
- ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
if (ret < 0)
goto out;
}
if (do_join) {
- ret = wl1271_join(wl, set_assoc);
+ ret = wl1271_join(wl, wlvif, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
@@ -3631,35 +3844,31 @@ sta_not_found:
/* ROC until connected (after EAPOL exchange) */
if (!is_ibss) {
- ret = wl12xx_roc(wl, wl->role_id);
+ ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
if (ret < 0)
goto out;
- wl1271_check_operstate(wl,
+ wl1271_check_operstate(wl, wlvif,
ieee80211_get_operstate(vif));
}
/*
* stop device role if started (we might already be in
- * STA role). TODO: make it better.
+ * STA/IBSS role).
*/
- if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wl12xx_dev_role_started(wlvif)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
/* If we want to go in PSM but we're not there yet */
- if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
- !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
enum wl1271_cmd_ps_mode mode;
mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode,
- wl->basic_rate,
+ ret = wl1271_ps_set_mode(wl, wlvif, mode,
+ wlvif->basic_rate,
true);
if (ret < 0)
goto out;
@@ -3673,7 +3882,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
true,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap true failed %d",
ret);
@@ -3685,7 +3894,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
false,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap false failed %d",
ret);
@@ -3697,7 +3906,7 @@ sta_not_found:
/* Handle HT information change. Done after join. */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3924,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct wl1271 *wl = hw->priv;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3936,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3746,6 +3959,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ps_scheme;
int ret = 0;
@@ -3758,31 +3972,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
- if (wl->state == WL1271_STATE_OFF) {
- /*
- * If the state is off, the parameters will be recorded and
- * configured on init. This happens in AP-mode.
- */
- struct conf_tx_ac_category *conf_ac =
- &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
- struct conf_tx_tid *conf_tid =
- &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
-
- conf_ac->ac = wl1271_tx_get_queue(queue);
- conf_ac->cw_min = (u8)params->cw_min;
- conf_ac->cw_max = params->cw_max;
- conf_ac->aifsn = params->aifs;
- conf_ac->tx_op_limit = params->txop << 5;
-
- conf_tid->queue_id = wl1271_tx_get_queue(queue);
- conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
- conf_tid->tsid = wl1271_tx_get_queue(queue);
- conf_tid->ps_scheme = ps_scheme;
- conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
- conf_tid->apsd_conf[0] = 0;
- conf_tid->apsd_conf[1] = 0;
+ if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- }
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
@@ -3792,13 +3983,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
* the txop is confed in units of 32us by the mac80211,
* we need us
*/
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4052,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static int wl1271_allocate_sta(struct wl1271 *wl,
- struct ieee80211_sta *sta,
- u8 *hlid)
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
- int id;
+ int ret;
- id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
- if (id >= AP_MAX_STATIONS) {
+
+ if (wl->active_sta_count >= AP_MAX_STATIONS) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
wl_sta = (struct wl1271_station *)sta->drv_priv;
- set_bit(id, wl->ap_hlid_map);
- wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
- *hlid = wl_sta->hlid;
+ ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+ if (ret < 0) {
+ wl1271_warning("could not allocate HLID - too many links");
+ return -EBUSY;
+ }
+
+ set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
wl->active_sta_count++;
return 0;
}
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
- int id = hlid - WL1271_AP_STA_HLID_START;
-
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
return;
- if (!test_bit(id, wl->ap_hlid_map))
- return;
-
- clear_bit(id, wl->ap_hlid_map);
+ clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
}
@@ -3906,6 +4097,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
@@ -3914,20 +4107,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
- ret = wl1271_allocate_sta(wl, sta, &hlid);
+ ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
goto out;
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
- ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+ ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
goto out_sleep;
@@ -3944,7 +4140,7 @@ out_sleep:
out_free_sta:
if (ret < 0)
- wl1271_free_sta(wl, hlid);
+ wl1271_free_sta(wl, wlvif, hlid);
out:
mutex_unlock(&wl->mutex);
@@ -3956,6 +4152,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0, id;
@@ -3964,14 +4161,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
wl_sta = (struct wl1271_station *)sta->drv_priv;
- id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
- if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ id = wl_sta->hlid;
+ if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4179,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- wl1271_free_sta(wl, wl_sta->hlid);
+ wl1271_free_sta(wl, wlvif, wl_sta->hlid);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4196,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
u8 buf_size)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
@@ -4016,10 +4214,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
goto out;
}
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- hlid = wl->sta_hlid;
- ba_bitmap = &wl->ba_rx_bitmap;
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+ hlid = wlvif->sta.hlid;
+ ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+ } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4237,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!wl->ba_support || !wl->ba_allowed) {
+ if (!wlvif->ba_support || !wlvif->ba_allowed) {
ret = -ENOTSUPP;
break;
}
@@ -4108,8 +4306,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
- int i;
+ int i, ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4317,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
- wl->bitrate_masks[i] =
+ wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
i);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+ }
+out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4357,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
- mutex_unlock(&wl->mutex);
- ieee80211_chswitch_done(wl->vif, false);
- return;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_chswitch_done(vif, false);
+ }
+ goto out;
}
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+ /* TODO: change mac80211 to pass vif as param */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl12xx_cmd_channel_switch(wl, ch_switch);
- if (!ret)
- set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+ if (!ret)
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+ }
wl1271_ps_elp_sleep(wl);
@@ -4170,10 +4394,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
/* packets are considered pending if in the TX queue or the FW */
ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
- /* the above is appropriate for STA mode for PS purposes */
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
out:
mutex_unlock(&wl->mutex);
@@ -4410,6 +4630,7 @@ static const struct ieee80211_ops wl1271_ops = {
.stop = wl1271_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
+ .change_interface = wl12xx_op_change_interface,
#ifdef CONFIG_PM
.suspend = wl1271_op_suspend,
.resume = wl1271_op_resume,
@@ -4604,7 +4825,7 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -4645,9 +4866,8 @@ int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
{
if (wl->state == WL1271_STATE_PLT)
__wl1271_plt_stop(wl);
@@ -4657,9 +4877,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
wl->mac80211_registered = false;
}
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
{
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4955,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
- SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ /* the FW answers probe-requests in AP-mode */
+ wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ wl->hw->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
+ wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
wl->hw->max_rx_aggregation_subframes = 8;
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
- struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+ BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
@@ -4765,41 +4990,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_hw_alloc;
}
- plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
- if (!plat_dev) {
- wl1271_error("could not allocate platform_device");
- ret = -ENOMEM;
- goto err_plat_alloc;
- }
-
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
+ INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
- wl->plat_dev = plat_dev;
-
- for (i = 0; i < NUM_TX_QUEUES; i++)
- skb_queue_head_init(&wl->tx_queue[i]);
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < AP_MAX_LINKS; j++)
+ for (j = 0; j < WL12XX_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
- INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
- INIT_WORK(&wl->rx_streaming_enable_work,
- wl1271_rx_streaming_enable_work);
- INIT_WORK(&wl->rx_streaming_disable_work,
- wl1271_rx_streaming_disable_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -4808,41 +5018,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
}
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
- wl->default_key = 0;
wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->platform_quirks = 0;
wl->sched_scanning = false;
- wl->tx_security_seq = 0;
- wl->tx_security_last_seq_lsb = 0;
wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
wl->system_hlid = WL12XX_SYSTEM_HLID;
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->session_counter = 0;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
wl->active_sta_count = 0;
- setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wl);
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
@@ -4860,8 +5050,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5071,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_dummy_packet;
}
- /* Register platform device */
- ret = platform_device_register(wl->plat_dev);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto err_fwlog;
- }
- dev_set_drvdata(&wl->plat_dev->dev, wl);
-
- /* Create sysfs file to control bt coex state */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file bt_coex_state");
- goto err_platform;
- }
-
- /* Create sysfs file to get HW PG version */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file hw_pg_ver");
- goto err_bt_coex_state;
- }
-
- /* Create sysfs file for the FW log */
- ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file fwlog");
- goto err_hw_pg_ver;
- }
-
return hw;
-err_hw_pg_ver:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
- platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
- free_page((unsigned long)wl->fwlog);
-
err_dummy_packet:
dev_kfree_skb(wl->dummy_packet);
@@ -4937,18 +5084,14 @@ err_wq:
err_hw:
wl1271_debugfs_exit(wl);
- kfree(plat_dev);
-
-err_plat_alloc:
ieee80211_free_hw(hw);
err_hw_alloc:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
{
/* Unblock any fwlog readers */
mutex_lock(&wl->mutex);
@@ -4956,17 +5099,15 @@ int wl1271_free_hw(struct wl1271 *wl)
wake_up_interruptible_all(&wl->fwlog_waitq);
mutex_unlock(&wl->mutex);
- device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+ device_remove_bin_file(wl->dev, &fwlog_attr);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- platform_device_unregister(wl->plat_dev);
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf,
get_order(WL1271_AGGR_BUFFER_SIZE));
- kfree(wl->plat_dev);
wl1271_debugfs_exit(wl);
@@ -4983,7 +5124,174 @@ int wl1271_free_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+ /* don't enqueue a work right now. mark it as pending */
+ set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+ wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+ disable_irq_nosync(wl->irq);
+ pm_wakeup_event(wl->dev, 0);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+ struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ unsigned long irqflags;
+ int ret = -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ wl->irq = platform_get_irq(pdev, 0);
+ wl->ref_clock = pdata->board_ref_clock;
+ wl->tcxo_clock = pdata->board_tcxo_clock;
+ wl->platform_quirks = pdata->platform_quirks;
+ wl->set_power = pdata->set_power;
+ wl->dev = &pdev->dev;
+ wl->if_ops = pdata->ops;
+
+ platform_set_drvdata(pdev, wl);
+
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ irqflags,
+ pdev->name, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free_hw;
+ }
+
+ ret = enable_irq_wake(wl->irq);
+ if (!ret) {
+ wl->irq_wake_enabled = true;
+ device_init_wakeup(wl->dev, 1);
+ if (pdata->pwr_in_suspend)
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+ }
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto out_irq;
+ }
+
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto out_bt_coex_state;
+ }
+
+ /* Create sysfs file for the FW log */
+ ret = device_create_bin_file(wl->dev, &fwlog_attr);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file fwlog");
+ goto out_hw_pg_ver;
+ }
+
+ return 0;
+
+out_hw_pg_ver:
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+ free_irq(wl->irq, wl);
+
+out_free_hw:
+ wl1271_free_hw(wl);
+
+out:
+ return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+ struct wl1271 *wl = platform_get_drvdata(pdev);
+
+ if (wl->irq_wake_enabled) {
+ device_init_wakeup(wl->dev, 0);
+ disable_irq_wake(wl->irq);
+ }
+ wl1271_unregister_hw(wl);
+ free_irq(wl->irq, wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+ { "wl12xx", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+ .probe = wl12xx_probe,
+ .remove = __devexit_p(wl12xx_remove),
+ .id_table = wl12xx_id_table,
+ .driver = {
+ .name = "wl12xx_driver",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init wl12xx_init(void)
+{
+ return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+ platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index c15ebf2efd40..a2bdacdd7e1d 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -25,6 +25,7 @@
#include "ps.h"
#include "io.h"
#include "tx.h"
+#include "debug.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,18 @@ void wl1271_elp_work(struct work_struct *work)
if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
goto out;
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
goto out;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ goto out;
+
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ goto out;
+ }
+
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +74,20 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
+
/* we shouldn't get consecutive sleep requests */
if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
return;
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags))
- return;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return;
+
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return;
+ }
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +159,8 @@ out:
return 0;
}
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
{
int ret;
@@ -152,39 +168,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm");
- ret = wl1271_acx_wake_up_conditions(wl);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif);
if (ret < 0) {
wl1271_error("couldn't set wake up conditions");
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
- set_bit(WL1271_FLAG_PSM, &wl->flags);
+ set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if (wl->band == IEEE80211_BAND_2GHZ) {
- ret = wl1271_acx_bet_enable(wl, false);
+ if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
}
- /* disable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
- clear_bit(WL1271_FLAG_PSM, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
}
@@ -223,9 +234,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
wl1271_handle_tx_low_watermark(wl);
}
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (test_bit(hlid, &wl->ap_ps_map))
return;
@@ -235,7 +248,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
clean_queues);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for starting ps",
wl->links[hlid].addr);
@@ -253,9 +266,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
__set_bit(hlid, &wl->ap_ps_map);
}
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (!test_bit(hlid, &wl->ap_ps_map))
return;
@@ -265,7 +279,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
__clear_bit(hlid, &wl->ap_ps_map);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for ending ps",
wl->links[hlid].addr);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 25eb9bc9b628..a12052f02026 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -27,13 +27,14 @@
#include "wl12xx.h"
#include "acx.h"
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#define WL1271_PS_COMPLETE_TIMEOUT 500
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 3f570f397586..df34d5977b98 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -408,7 +408,7 @@
/* Firmware image load chunk size */
-#define CHUNK_SIZE 512
+#define CHUNK_SIZE 16384
/* Firmware image header size */
#define FW_HDR_SIZE 8
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index dee4cfe9ccc1..4fbd2a722ffa 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -25,9 +25,11 @@
#include <linux/sched.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "rx.h"
+#include "tx.h"
#include "io.h"
static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
- bool unaligned)
+ bool unaligned, u8 *hlid)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
* payload aligned to 4 bytes.
*/
memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+ *hlid = desc->hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
- wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb->len - desc->pad_len,
beacon ? "beacon" : "",
- seq_num);
+ seq_num, *hlid);
skb_trim(skb, skb->len - desc->pad_len);
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
u32 mem_block;
u32 pkt_length;
u32 pkt_offset;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- bool had_data = false;
+ u8 hlid;
bool unaligned = false;
while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
- pkt_length, unaligned) == 1)
- had_data = true;
+ pkt_length, unaligned,
+ &hlid) == 1) {
+ if (hlid < WL12XX_MAX_LINKS)
+ __set_bit(hlid, active_hlids);
+ else
+ WARN(1,
+ "hlid exceeded WL12XX_MAX_LINKS "
+ "(%d)\n", hlid);
+ }
wl->rx_counter++;
drv_rx_counter++;
@@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* restart rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index fc29c671cf3b..e24111ececc5 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -24,6 +24,7 @@
#include <linux/ieee80211.h>
#include "wl12xx.h"
+#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
bool is_sta, is_ibss;
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
+ vif = wl->scan_vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
+ wl->scan_vif = NULL;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
- wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
/* return to ROC if needed */
- is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
- is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
- if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
- (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
- !test_bit(wl->dev_role_id, wl->roc_map)) {
+ is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+ is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+ if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+ (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+ !test_bit(wlvif->dev_role_id, wl->roc_map)) {
/* restore remain on channel */
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ wl12xx_start_dev(wl, wlvif);
}
wl1271_ps_elp_sleep(wl);
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
#define WL1271_NOTHING_TO_SCAN 1
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
- bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ bool passive, u32 basic_rate)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_cmd_scan *cmd;
struct wl1271_cmd_trigger_scan_to *trigger;
int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+ if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
- cmd->params.role_id = wl->role_id;
+ cmd->params.role_id = wlvif->role_id;
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.tid_trigger = 0;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
- memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->addr, vif->addr, ETH_ALEN);
- ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
- wl->scan.req->ie, wl->scan.req->ie_len,
- band);
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+ wl->scan.ssid_len, wl->scan.req->ie,
+ wl->scan.req->ie_len, band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -241,11 +248,12 @@ out:
return ret;
}
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
enum ieee80211_band band;
- u32 rate;
+ u32 rate, mask;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
else
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
}
}
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req)
{
/*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
wl->scan.ssid_len = 0;
}
+ wl->scan_vif = vif;
wl->scan.req = req;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
return 0;
}
@@ -415,18 +437,19 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_dfs);
- }
- else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ } else {
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_passive);
- } else {
- channels[j].min_duration =
- cpu_to_le16(c->min_dwell_time_active);
- channels[j].max_duration =
- cpu_to_le16(c->max_dwell_time_active);
}
+
+ channels[j].min_duration =
+ cpu_to_le16(c->min_dwell_time_active);
+ channels[j].max_duration =
+ cpu_to_le16(c->max_dwell_time_active);
+
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
@@ -550,6 +573,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
* so they're used in probe requests.
*/
for (i = 0; i < req->n_ssids; i++) {
+ if (!req->ssids[i].ssid_len)
+ continue;
+
for (j = 0; j < cmd->n_ssids; j++)
if (!memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
@@ -585,6 +611,7 @@ out:
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
@@ -631,7 +658,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[0]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +670,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ],
@@ -667,17 +694,17 @@ out:
return ret;
}
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_start *start;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
- if (wl->bss_type != BSS_TYPE_STA_BSS)
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
- if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
+ if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
@@ -727,7 +754,6 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
wl1271_error("failed to send sched scan stop command");
goto out_free;
}
- wl->sched_scanning = false;
out_free:
kfree(stop);
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 92115156522f..a7ed43dc08c9 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -26,18 +26,20 @@
#include "wl12xx.h"
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
void wl1271_scan_complete_work(struct work_struct *work);
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
void wl1271_scan_sched_scan_results(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 516a8980723c..468a50553fac 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
@@ -44,107 +45,67 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+struct wl12xx_sdio_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
+
static const struct sdio_device_id wl1271_devices[] __devinitconst = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
-{
- sdio_claim_host(wl->if_priv);
- sdio_set_block_size(wl->if_priv, blksz);
- sdio_release_host(wl->if_priv);
-}
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+static void wl1271_sdio_set_block_size(struct device *child,
+ unsigned int blksz)
{
- struct wl1271 *wl = cookie;
- unsigned long flags;
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
- /* don't enqueue a work right now. mark it as pending */
- set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
- wl1271_debug(DEBUG_IRQ, "should not enqueue work");
- disable_irq_nosync(wl->irq);
- pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
+ sdio_claim_host(func);
+ sdio_set_block_size(func, blksz);
+ sdio_release_host(func);
}
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
if (fixed)
ret = sdio_readsb(func, buf, addr, len);
else
ret = sdio_memcpy_fromio(func, buf, addr, len);
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+ addr, len);
}
if (ret)
- wl1271_error("sdio read failed (%d)", ret);
+ dev_err(child->parent, "sdio read failed (%d)\n", ret);
}
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+ addr, len);
if (fixed)
ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
}
if (ret)
- wl1271_error("sdio write failed (%d)", ret);
+ dev_err(child->parent, "sdio write failed (%d)\n", ret);
}
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
/* If enabled, tell runtime PM not to power off the card */
if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
return ret;
}
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_disable_func(func);
sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
return ret;
}
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
{
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
if (enable)
- return wl1271_sdio_power_on(wl);
+ return wl12xx_sdio_power_on(glue);
else
- return wl1271_sdio_power_off(wl);
+ return wl12xx_sdio_power_off(glue);
}
static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
+ .read = wl12xx_sdio_raw_read,
+ .write = wl12xx_sdio_raw_write,
+ .power = wl12xx_sdio_set_power,
.set_block_size = wl1271_sdio_set_block_size,
};
static int __devinit wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct ieee80211_hw *hw;
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- unsigned long irqflags;
+ struct wl12xx_platform_data *wlan_data;
+ struct wl12xx_sdio_glue *glue;
+ struct resource res[1];
mmc_pm_flag_t mmcflags;
- int ret;
+ int ret = -ENOMEM;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
return -ENODEV;
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&func->dev, "can't allocate glue\n");
+ goto out;
+ }
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
+ glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
- wl1271_error("missing wlan platform data: %d", ret);
- goto out_free;
+ dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+ goto out_free_glue;
}
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
- wl->platform_quirks = wlan_data->platform_quirks;
+ /* if sdio can keep power while host is suspended, enable wow */
+ mmcflags = sdio_get_host_pm_caps(func);
+ dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
-
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
+ if (mmcflags & MMC_PM_KEEP_POWER)
+ wlan_data->pwr_in_suspend = true;
+
+ wlan_data->ops = &sdio_ops;
- ret = enable_irq_wake(wl->irq);
- if (!ret) {
- wl->irq_wake_enabled = true;
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+ sdio_set_drvdata(func, glue);
- /* if sdio can keep power while host is suspended, enable wow */
- mmcflags = sdio_get_host_pm_caps(func);
- wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+ /* Tell PM core that we don't need the card to be powered now */
+ pm_runtime_put_noidle(&func->dev);
- if (mmcflags & MMC_PM_KEEP_POWER)
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- disable_irq(wl->irq);
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ glue->core->dev.parent = &func->dev;
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ memset(res, 0x00, sizeof(res));
- sdio_set_drvdata(func, wl);
+ res[0].start = wlan_data->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- /* Tell PM core that we don't need the card to be powered now */
- pm_runtime_put_noidle(&func->dev);
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
+ }
+ ret = platform_device_add_data(glue->core, wlan_data,
+ sizeof(*wlan_data));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
+
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't add platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
+out_dev_put:
+ platform_device_put(glue->core);
- out_free:
- wl1271_free_hw(wl);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static void __devexit wl1271_remove(struct sdio_func *func)
{
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
- wl1271_unregister_hw(wl);
- if (wl->irq_wake_enabled) {
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
- disable_irq_wake(wl->irq);
- }
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
}
#ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
/* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely */
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
mmc_pm_flag_t sdio_flags;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
- wl->wow_enabled);
+ dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+ wl->wow_enabled);
/* check whether sdio should keep power */
if (wl->wow_enabled) {
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- wl1271_error("can't keep power while host "
- "is suspended");
+ dev_err(dev, "can't keep power while host "
+ "is suspended\n");
ret = -EINVAL;
goto out;
}
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
/* keep power while host suspended */
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
- wl1271_error("error while trying to keep power");
+ dev_err(dev, "error while trying to keep power\n");
goto out;
}
@@ -367,9 +325,10 @@ out:
static int wl1271_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
- wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+ dev_dbg(dev, "wl1271 resume\n");
if (wl->wow_enabled) {
/* claim back host */
sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644
index f25d5d9212e7..000000000000
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
- "This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
- "This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
- struct wl1271 wl;
- struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
- {}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- if (fixed)
- ret = sdio_readsb(func, buf, addr, len);
- else
- ret = sdio_memcpy_fromio(func, buf, addr, len);
-
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
- }
-
- if (ret)
- wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
- if (fixed)
- ret = sdio_writesb(func, addr, buf, len);
- else
- ret = sdio_memcpy_toio(func, addr, buf, len);
- }
- if (ret)
- wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
- struct sdio_func *func = wl_to_func(wl);
- int ret;
-
- /* Let the SDIO stack handle wlan_enable control, so we
- * keep host claimed while wlan is in use to keep wl1271
- * alive.
- */
- if (enable) {
- /* Power up the card */
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
-
- /* Runtime PM might be disabled, power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
- goto out;
-
- sdio_claim_host(func);
- sdio_enable_func(func);
- } else {
- sdio_disable_func(func);
- sdio_release_host(func);
-
- /* Runtime PM might be disabled, power off the card manually */
- ret = mmc_power_save_host(func->card->host);
- if (ret < 0)
- goto out;
-
- /* Power down the card */
- ret = pm_runtime_put_sync(&func->dev);
- }
-
-out:
- return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
- u32 elp_reg;
-
- elp_reg = ELPCTRL_WAKE_UP;
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = request_firmware(&fw, WL128X_FW_NAME,
- wl1271_wl_to_dev(wl));
- else
- ret = request_firmware(&fw, WL127X_FW_NAME,
- wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
- return ret;
- }
-
- if (fw->size % 4) {
- wl1271_error("firmware size is not multiple of 32 bits: %zu",
- fw->size);
- ret = -EILSEQ;
- goto out;
- }
-
- wl->fw_len = fw->size;
- wl->fw = vmalloc(wl->fw_len);
-
- if (!wl->fw) {
- wl1271_error("could not allocate memory for the firmware");
- ret = -ENOMEM;
- goto out;
- }
-
- memcpy(wl->fw, fw->data, wl->fw_len);
-
- ret = 0;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
- return ret;
- }
-
- wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
- goto out;
- }
-
- wl->nvs_len = fw->size;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
- struct wl1271_partition_set partition;
- int ret;
-
- msleep(WL1271_PRE_POWER_ON_SLEEP);
- ret = wl1271_power_on(wl);
- if (ret)
- return ret;
-
- msleep(WL1271_POWER_ON_SLEEP);
-
- /* We don't need a real memory partition here, because we only want
- * to use the registers at this point. */
- memset(&partition, 0, sizeof(partition));
- partition.reg.start = REGISTERS_BASE;
- partition.reg.size = REGISTERS_DOWN_SIZE;
- wl1271_set_partition(wl, &partition);
-
- /* ELP module wake up */
- wl1271_fw_wakeup(wl);
-
- /* whal_FwCtrl_BootSm() */
-
- /* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
- /* 1. check if chip id is valid */
-
- switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
- wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
- wl->chip.id);
- break;
- case CHIP_ID_1271_PG20:
- wl1271_notice("chip id 0x%x (1271 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG20:
- wl1271_notice("chip id 0x%x (1283 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG10:
- default:
- wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
- return -ENODEV;
- }
-
- return ret;
-}
-
-static struct wl1271_partition_set part_down = {
- .mem = {
- .start = 0x00000000,
- .size = 0x000177c0
- },
- .reg = {
- .start = REGISTERS_BASE,
- .size = 0x00008800
- },
- .mem2 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- .mem3 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
-};
-
-static int tester(void *data)
-{
- struct wl1271 *wl = data;
- struct sdio_func *func = wl_to_func(wl);
- struct device *pdev = &func->dev;
- int ret = 0;
- bool rx_started = 0;
- bool tx_started = 0;
- uint8_t *tx_buf, *rx_buf;
- int test_size = PAGE_SIZE;
- u32 addr = 0;
- struct wl1271_partition_set partition;
-
- /* We assume chip is powered up and firmware fetched */
-
- memcpy(&partition, &part_down, sizeof(partition));
- partition.mem.start = addr;
- wl1271_set_partition(wl, &partition);
-
- tx_buf = kmalloc(test_size, GFP_KERNEL);
- rx_buf = kmalloc(test_size, GFP_KERNEL);
- if (!tx_buf || !rx_buf) {
- dev_err(pdev,
- "Could not allocate memory. Test will not run.\n");
- ret = -ENOMEM;
- goto free;
- }
-
- memset(tx_buf, 0x5a, test_size);
-
- /* write something in data area so we can read it back */
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- while (!kthread_should_stop()) {
- if (rx && !rx_started) {
- dev_info(pdev, "starting rx test\n");
- rx_started = 1;
- } else if (!rx && rx_started) {
- dev_info(pdev, "stopping rx test\n");
- rx_started = 0;
- }
-
- if (tx && !tx_started) {
- dev_info(pdev, "starting tx test\n");
- tx_started = 1;
- } else if (!tx && tx_started) {
- dev_info(pdev, "stopping tx test\n");
- tx_started = 0;
- }
-
- if (rx_started)
- wl1271_read(wl, addr, rx_buf, test_size, false);
-
- if (tx_started)
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- if (!rx_started && !tx_started)
- msleep(100);
- }
-
-free:
- kfree(tx_buf);
- kfree(rx_buf);
- return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- struct wl1271_test *wl_test;
- int ret = 0;
-
- /* wl1271 has 2 sdio functions we handle just the wlan part */
- if (func->num != 0x02)
- return -ENODEV;
-
- wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
- if (!wl_test) {
- dev_err(&func->dev, "Could not allocate memory\n");
- return -ENOMEM;
- }
-
- wl = &wl_test->wl;
-
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
-
- /* Grab access to FN0 for ELP reg. */
- func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
- /* Use block mode for transferring over one block size of data */
- func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
- wlan_data = wl12xx_get_platform_data();
- if (IS_ERR(wlan_data)) {
- ret = PTR_ERR(wlan_data);
- dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
- goto out_free;
- }
-
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
- sdio_set_drvdata(func, wl_test);
-
- /* power up the device */
- ret = wl1271_chip_wakeup(wl);
- if (ret) {
- dev_err(&func->dev, "could not wake up chip\n");
- goto out_free;
- }
-
- if (wl->fw == NULL) {
- ret = wl1271_fetch_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware fetch error\n");
- goto out_off;
- }
- }
-
- /* fetch NVS */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0) {
- dev_err(&func->dev, "NVS fetch error\n");
- goto out_off;
- }
- }
-
- ret = wl1271_load_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware load error: %d\n", ret);
- goto out_free;
- }
-
- dev_info(&func->dev, "initialized\n");
-
- /* I/O testing will be done in the tester thread */
-
- wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
- if (IS_ERR(wl_test->test_task)) {
- dev_err(&func->dev, "unable to create kernel thread\n");
- ret = PTR_ERR(wl_test->test_task);
- goto out_free;
- }
-
- return 0;
-
-out_off:
- /* power off the chip */
- wl1271_power_off(wl);
-
-out_free:
- kfree(wl_test);
- return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
- struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
- /* stop the I/O test thread */
- kthread_stop(wl_test->test_task);
-
- /* power off the chip */
- wl1271_power_off(&wl_test->wl);
-
- vfree(wl_test->wl.fw);
- wl_test->wl.fw = NULL;
- kfree(wl_test->wl.nvs);
- wl_test->wl.nvs = NULL;
-
- kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
- .name = "wl12xx_sdio_test",
- .id_table = wl1271_devices,
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&wl1271_sdio_driver);
- if (ret < 0)
- pr_err("failed to register sdio driver: %d\n", ret);
-
- return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
- sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 0f9718677860..92caa7ce6053 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -27,6 +27,7 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl12xx.h"
@@ -69,35 +70,22 @@
#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
-}
+struct wl12xx_spi_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
-static void wl1271_spi_reset(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi reset");
+ dev_err(child->parent,
+ "could not allocate cmd for spi reset\n");
return;
}
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
- wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi init");
+ dev_err(child->parent,
+ "could not allocate cmd for spi init\n");
return;
}
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
- wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+ spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
t[0].len = sizeof(u32);
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (*busy_buf & 0x1)
return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- wl1271_error("SPI read busy-word timeout!\n");
+ dev_err(child->parent, "SPI read busy-word timeout!\n");
return -ETIMEDOUT;
}
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[2];
struct spi_message m;
u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
- wl1271_spi_read_busy(wl)) {
+ wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
return;
}
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
-
- wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!fixed)
addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
}
}
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+ size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[i].len = chunk_len;
spi_message_add_tail(&t[i++], &m);
- wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
if (!fixed)
addr += chunk_len;
buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
cmd++;
}
- spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
- struct wl1271 *wl = cookie;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
- if (wl->set_power)
- wl->set_power(enable);
-
- return 0;
+ spi_sync(to_spi_device(glue->dev), &m);
}
static struct wl1271_if_operations spi_ops = {
- .read = wl1271_spi_raw_read,
- .write = wl1271_spi_raw_write,
- .reset = wl1271_spi_reset,
- .init = wl1271_spi_init,
- .power = wl1271_spi_set_power,
- .dev = wl1271_spi_wl_to_dev,
- .enable_irq = wl1271_spi_enable_interrupts,
- .disable_irq = wl1271_spi_disable_interrupts,
+ .read = wl12xx_spi_raw_read,
+ .write = wl12xx_spi_raw_write,
+ .reset = wl12xx_spi_reset,
+ .init = wl12xx_spi_init,
.set_block_size = NULL,
};
static int __devinit wl1271_probe(struct spi_device *spi)
{
+ struct wl12xx_spi_glue *glue;
struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- unsigned long irqflags;
- int ret;
+ struct resource res[1];
+ int ret = -ENOMEM;
pdata = spi->dev.platform_data;
if (!pdata) {
- wl1271_error("no platform data");
+ dev_err(&spi->dev, "no platform data\n");
return -ENODEV;
}
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ pdata->ops = &spi_ops;
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&spi->dev, "can't allocate glue\n");
+ goto out;
+ }
- dev_set_drvdata(&spi->dev, wl);
- wl->if_priv = spi;
+ glue->dev = &spi->dev;
- wl->if_ops = &spi_ops;
+ spi_set_drvdata(spi, glue);
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
+ dev_err(glue->dev, "spi_setup failed\n");
+ goto out_free_glue;
}
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device\n");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- wl->ref_clock = pdata->board_ref_clock;
- wl->tcxo_clock = pdata->board_tcxo_clock;
- wl->platform_quirks = pdata->platform_quirks;
+ glue->core->dev.parent = &spi->dev;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ memset(res, 0x00, sizeof(res));
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
+ res[0].start = spi->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
}
- disable_irq(wl->irq);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't register platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
-
- out_free:
- wl1271_free_hw(wl);
+out_dev_put:
+ platform_device_put(glue->core);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static int __devexit wl1271_remove(struct spi_device *spi)
{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+ struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
- wl1271_unregister_hw(wl);
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
return 0;
}
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 4ae8effaee22..25093c0cb0ed 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -26,8 +26,10 @@
#include <net/genetlink.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
+#include "ps.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -36,6 +38,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_TEST,
WL1271_TM_CMD_INTERROGATE,
WL1271_TM_CMD_CONFIGURE,
+ WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER,
@@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
return -EMSGSIZE;
mutex_lock(&wl->mutex);
- ret = wl1271_cmd_test(wl, buf, buf_len, answer);
- mutex_unlock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
wl1271_warning("testmode cmd test failed: %d", ret);
- return ret;
+ goto out_sleep;
}
if (answer) {
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
- return ret;
+ goto out_sleep;
}
- return 0;
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_sleep;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
- mutex_lock(&wl->mutex);
ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
- mutex_unlock(&wl->mutex);
-
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
- kfree(cmd);
- return ret;
+ goto out_free;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
if (!skb) {
- kfree(cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free;
}
NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ goto out_free;
+
+out_free:
+ kfree(cmd);
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_free;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index bad9e29d49b0..4508ccd78328 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -26,22 +26,24 @@
#include <linux/etherdevice.h>
#include "wl12xx.h"
+#include "debug.h"
#include "io.h"
#include "reg.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 id)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap)
ret = wl12xx_cmd_set_default_wep_key(wl, id,
- wl->ap_bcast_hlid);
+ wlvif->ap.bcast_hlid);
else
- ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
if (ret < 0)
return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
}
static int wl1271_tx_update_filters(struct wl1271 *wl,
- struct sk_buff *skb)
+ struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
if (!ieee80211_is_auth(hdr->frame_control))
return 0;
- if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+ if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
goto out;
wl1271_debug(DEBUG_CMD, "starting device role for roaming");
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
}
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid)
{
bool fw_ps, single_sta;
u8 tx_pkts;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
return;
- if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
* case FW-memory congestion is not a problem.
*/
if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
return wl->dummy_packet == skb;
}
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr;
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_mgmt(hdr->frame_control))
- return wl->ap_global_hlid;
+ return wlvif->ap.global_hlid;
else
- return wl->ap_bcast_hlid;
+ return wlvif->ap.bcast_hlid;
}
}
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
return wl->system_hlid;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl12xx_tx_get_hlid_ap(wl, skb);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
- wl1271_tx_update_filters(wl, skb);
+ wl1271_tx_update_filters(wl, wlvif, skb);
- if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+ if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
!ieee80211_is_auth(hdr->frame_control) &&
!ieee80211_is_assoc_req(hdr->frame_control))
- return wl->sta_hlid;
+ return wlvif->sta.hlid;
else
- return wl->dev_hlid;
+ return wlvif->dev_hlid;
}
static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
- return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
- else
+ if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+ else
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra, u32 buf_offset,
+ u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 total_blocks;
int id, ret = -EBUSY, ac;
u32 spare_blocks = wl->tx_spare_blocks;
+ bool is_dummy = false;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
len = wl12xx_calc_packet_alignment(wl, total_len);
/* in case of a dummy packet, use default amount of spare mem blocks */
- if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+ if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+ is_dummy = true;
spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ }
total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (wl->bss_type == BSS_TYPE_AP_BSS &&
- hlid >= WL1271_AP_STA_HLID_START)
+ if (!is_dummy && wlvif &&
+ wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
return ret;
}
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control,
- u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra,
+ struct ieee80211_tx_info *control, u8 hlid)
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int aligned_len, ac, rate_idx;
s64 hosttime;
- u16 tx_attr;
+ u16 tx_attr = 0;
+ bool is_dummy;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+ if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
else
desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = skb->priority;
- if (wl12xx_is_dummy_packet(wl, skb)) {
+ if (is_dummy) {
/*
* FW expects the dummy packet to have an invalid session id -
* any session id that is different than the one set in the join
*/
- tx_attr = ((~wl->session_counter) <<
+ tx_attr = (SESSION_COUNTER_INVALID <<
TX_HW_ATTR_OFST_SESSION_COUNTER) &
TX_HW_ATTR_SESSION_COUNTER;
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
- } else {
+ } else if (wlvif) {
/* configure the tx attributes */
- tx_attr =
- wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+ tx_attr = wlvif->session_counter <<
+ TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
-
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (is_dummy || !wlvif)
+ rate_idx = 0;
+ else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
/* if the packets are destined for AP (have a STA entry)
send them with AP rate policies, otherwise use default
basic rates */
- if (control->control.sta)
- rate_idx = ACX_TX_AP_FULL_RATE;
+ if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ rate_idx = wlvif->sta.p2p_rate_idx;
+ else if (control->control.sta)
+ rate_idx = wlvif->sta.ap_rate_idx;
else
- rate_idx = ACX_TX_BASIC_RATE;
+ rate_idx = wlvif->sta.basic_rate_idx;
} else {
- if (hlid == wl->ap_global_hlid)
- rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
- else if (hlid == wl->ap_bcast_hlid)
- rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ if (hlid == wlvif->ap.global_hlid)
+ rate_idx = wlvif->ap.mgmt_rate_idx;
+ else if (hlid == wlvif->ap.bcast_hlid)
+ rate_idx = wlvif->ap.bcast_rate_idx;
else
- rate_idx = ac;
+ rate_idx = wlvif->ap.ucast_rate_idx[ac];
}
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
- u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 buf_offset)
{
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
u32 total_len;
u8 hlid;
+ bool is_dummy;
if (!skb)
return -EINVAL;
info = IEEE80211_SKB_CB(skb);
+ /* TODO: handle dummy packets on multi-vifs */
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (unlikely(is_wep && wl->default_key != idx)) {
- ret = wl1271_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wlvif->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
- wl->default_key = idx;
+ wlvif->default_key = idx;
}
}
-
- hlid = wl1271_tx_get_hlid(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL;
}
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+ ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
if (ret < 0)
return ret;
- wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+ wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
- wl1271_tx_regulate_link(wl, hlid);
+ wl1271_tx_regulate_link(wl, wlvif, hlid);
}
/*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
/* Revert side effects in the dummy packet skb, so it can be reused */
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (is_dummy)
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
return &queues[q];
}
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
unsigned long flags;
struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, wl->tx_queue);
+ queue = wl1271_select_queue(wl, lnk->tx_queue);
if (!queue)
- goto out;
+ return NULL;
skb = skb_dequeue(queue);
-
-out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
return skb;
}
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
- unsigned long flags;
int i, h, start_hlid;
- struct sk_buff_head *queue;
/* start from the link after the last one */
- start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < AP_MAX_LINKS; i++) {
- h = (start_hlid + i) % AP_MAX_LINKS;
+ for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+ h = (start_hlid + i) % WL12XX_MAX_LINKS;
/* only consider connected stations */
- if (h >= WL1271_AP_STA_HLID_START &&
- !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+ if (!test_bit(h, wlvif->links_map))
continue;
- queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
- if (!queue)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ if (!skb)
continue;
- skb = skb_dequeue(queue);
- if (skb)
- break;
+ wlvif->last_tx_hlid = h;
+ break;
}
- if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- wl->last_tx_hlid = h;
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count[q]--;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- } else {
- wl->last_tx_hlid = 0;
- }
+ if (!skb)
+ wlvif->last_tx_hlid = 0;
return skb;
}
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
{
unsigned long flags;
+ struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- skb = wl1271_ap_skb_dequeue(wl);
- else
- skb = wl1271_sta_skb_dequeue(wl);
+ if (wlvif) {
+ wl12xx_for_each_wlvif_continue(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ /* do another pass */
+ if (!skb) {
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ if (!skb)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
return skb;
}
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
- u8 hlid = wl1271_tx_get_hlid(wl, skb);
+ } else {
+ u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
- } else {
- skb_queue_head(&wl->tx_queue[q], skb);
+ wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+ WL12XX_MAX_LINKS;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
return ieee80211_is_data_present(hdr->frame_control);
}
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+ struct wl12xx_vif *wlvif;
+ u32 timeout;
+ u8 hlid;
+
+ if (!wl->conf.rx_streaming.interval)
+ return;
+
+ if (!wl->conf.rx_streaming.always &&
+ !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+ return;
+
+ timeout = wl->conf.rx_streaming.duration;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ bool found = false;
+ for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ if (test_bit(hlid, wlvif->links_map)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue;
+
+ /* enable rx streaming */
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+ ieee80211_queue_work(wl->hw,
+ &wlvif->rx_streaming_enable_work);
+
+ mod_timer(&wlvif->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
void wl1271_tx_work_locked(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
+ struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0;
bool sent_packets = false;
- bool had_data = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
int ret;
if (unlikely(wl->state == WL1271_STATE_OFF))
return;
while ((skb = wl1271_skb_dequeue(wl))) {
- if (wl1271_tx_is_data_present(skb))
- had_data = true;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool has_data = false;
- ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+ wlvif = NULL;
+ if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+ wlvif = wl12xx_vif_to_data(info->control.vif);
+
+ has_data = wlvif && wl1271_tx_is_data_present(skb);
+ ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
if (ret == -EAGAIN) {
/*
* Aggregation buffer is full.
* Flush buffer and try again.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true);
sent_packets = true;
@@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
} else if (ret < 0) {
- dev_kfree_skb(skb);
+ if (wl12xx_is_dummy_packet(wl, skb))
+ /*
+ * fw still expects dummy packet,
+ * so re-enqueue it
+ */
+ wl1271_skb_queue_head(wl, wlvif, skb);
+ else
+ ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
buf_offset += ret;
wl->tx_packets_count++;
+ if (has_data) {
+ desc = (struct wl1271_tx_hw_descr *) skb->data;
+ __set_bit(desc->hlid, active_hlids);
+ }
}
out_ack:
@@ -701,19 +775,7 @@ out_ack:
wl1271_handle_tx_low_watermark(wl);
}
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* enable rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct wl1271_tx_hw_res_descr *result)
{
struct ieee80211_tx_info *info;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
int id = result->id;
int rate = -1;
@@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
return;
}
+ /* info->control is valid as long as we don't update info->status */
+ vif = info->control.vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
/* update the TX status info */
if (result->status == TX_SUCCESS) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+ rate = wl1271_rate_to_idx(result->rate_class_index,
+ wlvif->band);
retries = result->ack_failures;
} else if (result->status == TX_RETRY_EXCEEDED) {
wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
u8 fw_lsb = result->tx_security_sequence_number_lsb;
- u8 cur_lsb = wl->tx_security_last_seq_lsb;
+ u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
/*
* update security sequence number, taking care of potential
* wrap-around
*/
- wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
- wl->tx_security_last_seq_lsb = fw_lsb;
+ wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+ wlvif->tx_security_last_seq_lsb = fw_lsb;
}
/* remove private header from packet */
@@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- for (i = 0; i < AP_MAX_LINKS; i++) {
- wl1271_free_sta(wl, i);
- wl1271_tx_reset_link_queues(wl, i);
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
- }
-
- wl->last_tx_hlid = 0;
- } else {
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
- skb);
-
- if (!wl12xx_is_dummy_packet(wl, skb)) {
- info = IEEE80211_SKB_CB(skb);
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- ieee80211_tx_status_ni(wl->hw, skb);
- }
- }
- }
+ for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_free_sta(wl, wlvif, i);
+ else
+ wlvif->sta.ba_rx_bitmap = 0;
- wl->ba_rx_bitmap = 0;
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_pkts = 0;
+ wl->links[i].prev_freed_pkts = 0;
}
+ wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index dc4f09adf088..2dbb24e6d541 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum ieee80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1ec90fc7505e..b2b09cd02022 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,83 +35,6 @@
#include "conf.h"
#include "ini.h"
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
-/*
- * FW versions support BA 11n
- * versions marks x.x.x.50-60.x
- */
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
-
-enum {
- DEBUG_NONE = 0,
- DEBUG_IRQ = BIT(0),
- DEBUG_SPI = BIT(1),
- DEBUG_BOOT = BIT(2),
- DEBUG_MAILBOX = BIT(3),
- DEBUG_TESTMODE = BIT(4),
- DEBUG_EVENT = BIT(5),
- DEBUG_TX = BIT(6),
- DEBUG_RX = BIT(7),
- DEBUG_SCAN = BIT(8),
- DEBUG_CRYPT = BIT(9),
- DEBUG_PSM = BIT(10),
- DEBUG_MAC80211 = BIT(11),
- DEBUG_CMD = BIT(12),
- DEBUG_ACX = BIT(13),
- DEBUG_SDIO = BIT(14),
- DEBUG_FILTERS = BIT(15),
- DEBUG_ADHOC = BIT(16),
- DEBUG_AP = BIT(17),
- DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
- DEBUG_ALL = ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
- pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
- pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
- do { \
- if (level & wl12xx_debug_level) \
- pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
- } while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- 0); \
- } while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- true); \
- } while (0)
-
#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
@@ -142,16 +65,12 @@ extern u32 wl12xx_debug_level;
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+#define WL12XX_MAX_RATE_POLICIES 16
+
/* Defined by FW as 0. Will not be freed or allocated. */
#define WL12XX_SYSTEM_HLID 0
/*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START 3
-
-/*
* When in AP-mode, we allow (at least) this number of packets
* to be transmitted to FW for a STA in PS-mode. Only when packets are
* present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +155,6 @@ struct wl1271_stats {
#define AP_MAX_STATIONS 8
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
/* FW status registers */
struct wl12xx_fw_status {
__le32 intr;
@@ -299,17 +211,14 @@ struct wl1271_scan {
};
struct wl1271_if_operations {
- void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*read)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*write)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*reset)(struct wl1271 *wl);
- void (*init)(struct wl1271 *wl);
- int (*power)(struct wl1271 *wl, bool enable);
- struct device* (*dev)(struct wl1271 *wl);
- void (*enable_irq)(struct wl1271 *wl);
- void (*disable_irq)(struct wl1271 *wl);
- void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+ void (*reset)(struct device *child);
+ void (*init)(struct device *child);
+ int (*power)(struct device *child, bool enable);
+ void (*set_block_size) (struct device *child, unsigned int blksz);
};
#define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@ struct wl1271_ap_key {
};
enum wl12xx_flags {
- WL1271_FLAG_STA_ASSOCIATED,
- WL1271_FLAG_IBSS_JOINED,
WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
WL1271_FLAG_ELP_REQUESTED,
- WL1271_FLAG_PSM,
- WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
- WL1271_FLAG_IDLE,
- WL1271_FLAG_PSPOLL_FAILURE,
- WL1271_FLAG_STA_STATE_SENT,
WL1271_FLAG_FW_TX_BUSY,
- WL1271_FLAG_AP_STARTED,
- WL1271_FLAG_IF_INITIALIZED,
WL1271_FLAG_DUMMY_PACKET_PENDING,
WL1271_FLAG_SUSPENDED,
WL1271_FLAG_PENDING_WORK,
WL1271_FLAG_SOFT_GEMINI,
- WL1271_FLAG_RX_STREAMING_STARTED,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
- WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+ WLVIF_FLAG_INITIALIZED,
+ WLVIF_FLAG_STA_ASSOCIATED,
+ WLVIF_FLAG_IBSS_JOINED,
+ WLVIF_FLAG_AP_STARTED,
+ WLVIF_FLAG_PSM,
+ WLVIF_FLAG_PSM_REQUESTED,
+ WLVIF_FLAG_STA_STATE_SENT,
+ WLVIF_FLAG_RX_STREAMING_STARTED,
+ WLVIF_FLAG_PSPOLL_FAILURE,
+ WLVIF_FLAG_CS_PROGRESS,
+ WLVIF_FLAG_AP_PROBE_RESP_SET,
+ WLVIF_FLAG_IN_USE,
};
struct wl1271_link {
@@ -366,10 +279,11 @@ struct wl1271_link {
};
struct wl1271 {
- struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
+ struct device *dev;
+
void *if_priv;
struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@ struct wl1271 {
s8 hw_pg_ver;
- u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
- u8 bss_type;
- u8 set_bss_type;
- u8 p2p; /* we are using p2p role */
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
int channel;
- u8 role_id;
- u8 dev_role_id;
u8 system_hlid;
- u8 sta_hlid;
- u8 dev_hlid;
- u8 ap_global_hlid;
- u8 ap_bcast_hlid;
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+ unsigned long rate_policies_map[
+ BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+ struct list_head wlvif_list;
+
+ u8 sta_count;
+ u8 ap_count;
struct wl1271_acx_mem_map *target_mem_map;
@@ -440,11 +349,7 @@ struct wl1271 {
/* Time-offset between host and chipset clocks */
s64 time_offset;
- /* Session counter for the chipset */
- int session_counter;
-
/* Frames scheduled for transmission, not handled yet */
- struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map;
@@ -462,17 +367,6 @@ struct wl1271 {
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
- /*
- * Security sequence number
- * bits 0-15: lower 16 bits part of sequence number
- * bits 16-47: higher 32 bits part of sequence number
- * bits 48-63: not in use
- */
- u64 tx_security_seq;
-
- /* 8 bits of the last sequence number in use */
- u8 tx_security_last_seq_lsb;
-
/* FW Rx counter */
u32 rx_counter;
@@ -507,59 +401,21 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
+ struct ieee80211_vif *scan_vif;
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
bool sched_scanning;
- /* probe-req template for the current AP */
- struct sk_buff *probereq;
-
- /* Our association ID */
- u16 aid;
-
- /*
- * currently configured rate set:
- * bits 0-15 - 802.11abg rates
- * bits 16-23 - 802.11n MCS index mask
- * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
- */
- u32 basic_rate_set;
- u32 basic_rate;
- u32 rate_set;
- u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
/* The current band */
enum ieee80211_band band;
- /* Beaconing interval (needed for ad-hoc) */
- u32 beacon_int;
-
- /* Default key (for WEP) */
- u32 default_key;
-
- /* Rx Streaming */
- struct work_struct rx_streaming_enable_work;
- struct work_struct rx_streaming_disable_work;
- struct timer_list rx_streaming_timer;
-
struct completion *elp_compl;
- struct completion *ps_compl;
struct delayed_work elp_work;
- struct delayed_work pspoll_work;
-
- /* counter for ps-poll delivery failures */
- int ps_poll_failures;
-
- /* retry counter for PSM entries */
- u8 psm_entry_retry;
/* in dBm */
int power_level;
- int rssi_thold;
- int last_rssi_event;
-
struct wl1271_stats stats;
__le32 buffer_32;
@@ -583,20 +439,9 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
- /* map for HLIDs of associated stations - when operating in AP mode */
- unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
- /* recoreded keys for AP-mode - set here before AP startup */
- struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
/* bands supported by this instance of wl12xx */
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
- /* RX BA constraint value */
- bool ba_support;
- u8 ba_rx_bitmap;
- bool ba_allowed;
-
int tcxo_clock;
/*
@@ -610,10 +455,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[AP_MAX_LINKS];
-
- /* the hlid of the link where the last transmitted skb came from */
- int last_tx_hlid;
+ struct wl1271_link links[WL12XX_MAX_LINKS];
/* AP-mode - a bitmap of links currently in PS mode according to FW */
u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+
+ /* last wlvif we transmitted from */
+ struct wl12xx_vif *last_wlvif;
};
struct wl1271_station {
u8 hlid;
};
+struct wl12xx_vif {
+ struct wl1271 *wl;
+ struct list_head list;
+ unsigned long flags;
+ u8 bss_type;
+ u8 p2p; /* we are using p2p role */
+ u8 role_id;
+
+ /* sta/ibss specific */
+ u8 dev_role_id;
+ u8 dev_hlid;
+
+ union {
+ struct {
+ u8 hlid;
+ u8 ba_rx_bitmap;
+
+ u8 basic_rate_idx;
+ u8 ap_rate_idx;
+ u8 p2p_rate_idx;
+ } sta;
+ struct {
+ u8 global_hlid;
+ u8 bcast_hlid;
+
+ /* HLIDs bitmap of associated stations */
+ unsigned long sta_hlid_map[BITS_TO_LONGS(
+ WL12XX_MAX_LINKS)];
+
+ /* recoreded keys - set here before AP startup */
+ struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+ u8 mgmt_rate_idx;
+ u8 bcast_rate_idx;
+ u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+ } ap;
+ };
+
+ /* the hlid of the last transmitted skb */
+ int last_tx_hlid;
+
+ unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+ u8 ssid_len;
+
+ /* The current band */
+ enum ieee80211_band band;
+ int channel;
+
+ u32 bitrate_masks[IEEE80211_NUM_BANDS];
+ u32 basic_rate_set;
+
+ /*
+ * currently configured rate set:
+ * bits 0-15 - 802.11abg rates
+ * bits 16-23 - 802.11n MCS index mask
+ * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+ */
+ u32 basic_rate;
+ u32 rate_set;
+
+ /* probe-req template for the current AP */
+ struct sk_buff *probereq;
+
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
+ /* Default key (for WEP) */
+ u32 default_key;
+
+ /* Our association ID */
+ u16 aid;
+
+ /* Session counter for the chipset */
+ int session_counter;
+
+ struct completion *ps_compl;
+ struct delayed_work pspoll_work;
+
+ /* counter for ps-poll delivery failures */
+ int ps_poll_failures;
+
+ /* retry counter for PSM entries */
+ u8 psm_entry_retry;
+
+ /* in dBm */
+ int power_level;
+
+ int rssi_thold;
+ int last_rssi_event;
+
+ /* RX BA constraint value */
+ bool ba_support;
+ bool ba_allowed;
+
+ /* Rx Streaming */
+ struct work_struct rx_streaming_enable_work;
+ struct work_struct rx_streaming_disable_work;
+ struct timer_list rx_streaming_timer;
+
+ /*
+ * This struct must be last!
+ * data that has to be saved acrossed reconfigs (e.g. recovery)
+ * should be declared in this struct.
+ */
+ struct {
+ u8 persistent[0];
+ /*
+ * Security sequence number
+ * bits 0-15: lower 16 bits part of sequence number
+ * bits 16-47: higher 32 bits part of sequence number
+ * bits 48-63: not in use
+ */
+ u64 tx_security_seq;
+
+ /* 8 bits of the last sequence number in use */
+ u8 tx_security_last_seq_lsb;
+ };
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+ return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+ return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+ list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+ list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \
+ wl12xx_for_each_wlvif(wl, wlvif) \
+ if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
int wl1271_plt_start(struct wl1271 *wl);
int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
#define WL1271_DEFAULT_POWER_LEVEL 0
@@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
/* Older firmwares did not implement the FW logger over bus feature */
#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f7971d3b0898..8f0ffaf62309 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
u8 ta[ETH_ALEN];
} __packed;
-struct wl12xx_qos_null_data_template {
- struct ieee80211_header header;
- __le16 qos_ctl;
-} __packed;
-
struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
index 973b11060a8f..998e95895f9d 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -1,8 +1,29 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
#include <linux/module.h>
#include <linux/err.h>
#include <linux/wl12xx.h>
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
{
@@ -18,7 +39,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
return 0;
}
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
{
if (!platform_data)
return ERR_PTR(-ENODEV);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 182562952c79..b7d41f8c338a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
@@ -222,7 +223,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
}
-static struct ethtool_ops xenvif_ethtool_ops = {
+static const struct ethtool_ops xenvif_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_sset_count = xenvif_get_sset_count,
@@ -230,7 +231,7 @@ static struct ethtool_ops xenvif_ethtool_ops = {
.get_strings = xenvif_get_strings,
};
-static struct net_device_ops xenvif_netdev_ops = {
+static const struct net_device_ops xenvif_netdev_ops = {
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c86090..59effac15f36 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
- * These variables a used iff get_page_ext returns true,
+ * These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
if (!page)
return NULL;
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
gop->source.offset = txp->offset;
@@ -1021,7 +1019,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
continue;
}
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
@@ -1638,7 +1634,7 @@ static int __init netback_init(void)
int rc = 0;
int group;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
xen_netbk_group_nr = num_online_cpus();
@@ -1668,7 +1664,7 @@ static int __init netback_init(void)
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT "kthread_run() fails at netback\n");
+ printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1ce729d6af75..410018c4c528 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -474,17 +474,14 @@ static const struct xenbus_device_id netback_ids[] = {
};
-static struct xenbus_driver netback = {
- .name = "vif",
- .owner = THIS_MODULE,
- .ids = netback_ids,
+static DEFINE_XENBUS_DRIVER(netback, ,
.probe = netback_probe,
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
-};
+);
int xenvif_xenbus_init(void)
{
- return xenbus_register_backend(&netback);
+ return xenbus_register_backend(&netback_driver);
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 226faab23603..fa679057630f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev);
#define xennet_sysfs_delif(dev) do { } while (0)
#endif
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
{
return dev->features & NETIF_F_SG;
}
@@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev)
gnttab_free_grant_references(np->gref_rx_head);
}
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
int val;
@@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
@@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateConnected:
netif_notify_peers(netdev);
break;
@@ -1910,7 +1914,7 @@ static void xennet_sysfs_delif(struct net_device *netdev)
#endif /* CONFIG_SYSFS */
-static struct xenbus_device_id netfront_ids[] = {
+static const struct xenbus_device_id netfront_ids[] = {
{ "vif" },
{ "" }
};
@@ -1937,15 +1941,12 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
return 0;
}
-static struct xenbus_driver netfront_driver = {
- .name = "vif",
- .owner = THIS_MODULE,
- .ids = netfront_ids,
+static DEFINE_XENBUS_DRIVER(netfront, ,
.probe = netfront_probe,
.remove = __devexit_p(xennet_remove),
.resume = netfront_resume,
.otherend_changed = netback_changed,
-};
+);
static int __init netif_init(void)
{
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index bd023847efb2..1a1500bc8452 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -72,6 +72,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -231,6 +232,26 @@ struct pn533_cmd_activate_response {
u8 gt[];
} __packed;
+/* PN533_CMD_IN_JUMP_FOR_DEP */
+struct pn533_cmd_jump_dep {
+ u8 active;
+ u8 baud;
+ u8 next;
+ u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+ u8 status;
+ u8 tg;
+ u8 nfcid3t[10];
+ u8 didt;
+ u8 bst;
+ u8 brt;
+ u8 to;
+ u8 ppt;
+ /* optional */
+ u8 gt[];
+} __packed;
struct pn533 {
struct usb_device *udev;
@@ -1121,6 +1142,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
{
struct pn533_cmd_activate_param param;
struct pn533_cmd_activate_response *resp;
+ u16 gt_len;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1146,7 +1168,11 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
if (rc != PN533_CMD_RET_SUCCESS)
return -EIO;
- return 0;
+ /* ATR_RES general bytes are located at offset 16 */
+ gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+
+ return rc;
}
static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
@@ -1239,6 +1265,142 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
return;
}
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_cmd_jump_dep *cmd;
+ struct pn533_cmd_jump_dep_response *resp;
+ struct nfc_target nfc_target;
+ u8 target_gt_len;
+ int rc;
+
+ if (params_len == -ENOENT) {
+ nfc_dev_dbg(&dev->interface->dev, "");
+ return 0;
+ }
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when bringing DEP link up",
+ params_len);
+ return 0;
+ }
+
+ if (dev->tgt_available_prots &&
+ !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+ nfc_dev_err(&dev->interface->dev,
+ "The target does not support DEP");
+ return -EINVAL;
+ }
+
+ resp = (struct pn533_cmd_jump_dep_response *) params;
+ cmd = (struct pn533_cmd_jump_dep *) arg;
+ rc = resp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ nfc_dev_err(&dev->interface->dev,
+ "Bringing DEP link up failed %d", rc);
+ return 0;
+ }
+
+ if (!dev->tgt_available_prots) {
+ nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+ if (rc)
+ return 0;
+
+ dev->tgt_available_prots = 0;
+ }
+
+ dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+ /* ATR_RES general bytes are located at offset 17 */
+ target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+ resp->gt, target_gt_len);
+ if (rc == 0)
+ rc = nfc_dep_link_is_up(dev->nfc_dev,
+ dev->nfc_dev->targets[0].idx,
+ !cmd->active, NFC_RF_INITIATOR);
+
+ return 0;
+}
+
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_cmd_jump_dep *cmd;
+ u8 cmd_len, local_gt_len, *local_gt;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (rf_mode == NFC_RF_TARGET) {
+ nfc_dev_err(&dev->interface->dev, "Target mode not supported");
+ return -EOPNOTSUPP;
+ }
+
+
+ if (dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot bring the DEP link up while polling");
+ return -EBUSY;
+ }
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "There is already an active target");
+ return -EBUSY;
+ }
+
+ local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
+ if (local_gt_len > NFC_MAX_GT_LEN)
+ return -EINVAL;
+
+ cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+
+ cmd->active = !comm_mode;
+ cmd->baud = 0;
+ if (local_gt != NULL) {
+ cmd->next = 4; /* We have some Gi */
+ memcpy(cmd->gt, local_gt, local_gt_len);
+ } else {
+ cmd->next = 0;
+ }
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
+ dev->out_frame->datalen += cmd_len;
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_in_dep_link_up_complete,
+ cmd, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+
+out:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+ pn533_deactivate_target(nfc_dev, 0);
+
+ return 0;
+}
+
#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
@@ -1339,7 +1501,7 @@ error:
return 0;
}
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
struct sk_buff *skb,
data_exchange_cb_t cb,
void *cb_context)
@@ -1368,7 +1530,7 @@ int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
PN533_CMD_DATAEXCH_DATA_MAXLEN +
PN533_FRAME_TAIL_SIZE;
- skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+ skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
if (!skb_resp) {
rc = -ENOMEM;
goto error;
@@ -1434,6 +1596,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct nfc_ops pn533_nfc_ops = {
.dev_up = NULL,
.dev_down = NULL,
+ .dep_link_up = pn533_dep_link_up,
+ .dep_link_down = pn533_dep_link_down,
.start_poll = pn533_start_poll,
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index cac63c9f49ae..268163dd71c7 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -15,6 +15,15 @@ config PROC_DEVICETREE
an image of the device tree that the kernel copies from Open
Firmware or other boot firmware. If unsure, say Y here.
+config OF_SELFTEST
+ bool "Device Tree Runtime self tests"
+ help
+ This option builds in test cases for the device tree infrastructure
+ that are executed one at boot time, and the results dumped to the
+ console.
+
+ If unsure, say N here, but this option is safe to enable.
+
config OF_FLATTREE
bool
select DTC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index dccb1176be57..a73f5a51ff4c 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
+obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 72c33fbe451d..66d96f14c274 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -14,7 +14,7 @@
static struct of_bus *of_match_bus(struct device_node *np);
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
- struct resource *r);
+ const char *name, struct resource *r);
/* Debug utility */
#ifdef DEBUG
@@ -215,7 +215,7 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
addrp = of_get_pci_address(dev, bar, &size, &flags);
if (addrp == NULL)
return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, r);
+ return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
#endif /* CONFIG_PCI */
@@ -529,7 +529,7 @@ EXPORT_SYMBOL(of_get_address);
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
- struct resource *r)
+ const char *name, struct resource *r)
{
u64 taddr;
@@ -551,7 +551,8 @@ static int __of_address_to_resource(struct device_node *dev,
r->end = taddr + size - 1;
}
r->flags = flags;
- r->name = dev->full_name;
+ r->name = name ? name : dev->full_name;
+
return 0;
}
@@ -569,11 +570,16 @@ int of_address_to_resource(struct device_node *dev, int index,
const __be32 *addrp;
u64 size;
unsigned int flags;
+ const char *name = NULL;
addrp = of_get_address(dev, index, &size, &flags);
if (addrp == NULL)
return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, r);
+
+ /* Get optional "reg-names" property to add a name to a resource */
+ of_property_read_string_index(dev, "reg-names", index, &name);
+
+ return __of_address_to_resource(dev, addrp, size, flags, name, r);
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 9b6588ef0673..133908a6fd8d 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -752,7 +752,7 @@ int of_property_read_string_index(struct device_node *np, const char *propname,
for (i = 0; total < prop->length; total += l, p += l) {
l = strlen(p) + 1;
- if ((*p != 0) && (i++ == index)) {
+ if (i++ == index) {
*output = p;
return 0;
}
@@ -790,11 +790,9 @@ int of_property_count_strings(struct device_node *np, const char *propname)
p = prop->value;
- for (i = 0; total < prop->length; total += l, p += l) {
+ for (i = 0; total < prop->length; total += l, p += l, i++)
l = strlen(p) + 1;
- if (*p != 0)
- i++;
- }
+
return i;
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
@@ -824,17 +822,19 @@ of_parse_phandle(struct device_node *np, const char *phandle_name, int index)
EXPORT_SYMBOL(of_parse_phandle);
/**
- * of_parse_phandles_with_args - Find a node pointed by phandle in a list
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
* @cells_name: property name that specifies phandles' arguments count
* @index: index of a phandle to parse out
- * @out_node: optional pointer to device_node struct pointer (will be filled)
- * @out_args: optional pointer to arguments pointer (will be filled)
+ * @out_args: optional pointer to output arguments structure (will be filled)
*
* This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_node and out_args, on error returns
- * appropriate errno value.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
*
* Example:
*
@@ -851,94 +851,96 @@ EXPORT_SYMBOL(of_parse_phandle);
* }
*
* To get a device_node of the `node2' node you may call this:
- * of_parse_phandles_with_args(node3, "list", "#list-cells", 2, &node2, &args);
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
*/
-int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
+int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
const char *cells_name, int index,
- struct device_node **out_node,
- const void **out_args)
+ struct of_phandle_args *out_args)
{
- int ret = -EINVAL;
- const __be32 *list;
- const __be32 *list_end;
- int size;
- int cur_index = 0;
+ const __be32 *list, *list_end;
+ int size, cur_index = 0;
+ uint32_t count = 0;
struct device_node *node = NULL;
- const void *args = NULL;
+ phandle phandle;
+ /* Retrieve the phandle list property */
list = of_get_property(np, list_name, &size);
- if (!list) {
- ret = -ENOENT;
- goto err0;
- }
+ if (!list)
+ return -EINVAL;
list_end = list + size / sizeof(*list);
+ /* Loop over the phandles until all the requested entry is found */
while (list < list_end) {
- const __be32 *cells;
- phandle phandle;
+ count = 0;
+ /*
+ * If phandle is 0, then it is an empty entry with no
+ * arguments. Skip forward to the next entry.
+ */
phandle = be32_to_cpup(list++);
- args = list;
-
- /* one cell hole in the list = <>; */
- if (!phandle)
- goto next;
-
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_debug("%s: could not find phandle\n",
- np->full_name);
- goto err0;
- }
+ if (phandle) {
+ /*
+ * Find the provider node and parse the #*-cells
+ * property to determine the argument length
+ */
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find phandle\n",
+ np->full_name);
+ break;
+ }
+ if (of_property_read_u32(node, cells_name, &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ np->full_name, cells_name,
+ node->full_name);
+ break;
+ }
- cells = of_get_property(node, cells_name, &size);
- if (!cells || size != sizeof(*cells)) {
- pr_debug("%s: could not get %s for %s\n",
- np->full_name, cells_name, node->full_name);
- goto err1;
+ /*
+ * Make sure that the arguments actually fit in the
+ * remaining property data length
+ */
+ if (list + count > list_end) {
+ pr_err("%s: arguments longer than property\n",
+ np->full_name);
+ break;
+ }
}
- list += be32_to_cpup(cells);
- if (list > list_end) {
- pr_debug("%s: insufficient arguments length\n",
- np->full_name);
- goto err1;
+ /*
+ * All of the error cases above bail out of the loop, so at
+ * this point, the parsing is successful. If the requested
+ * index matches, then fill the out_args structure and return,
+ * or return -ENOENT for an empty entry.
+ */
+ if (cur_index == index) {
+ if (!phandle)
+ return -ENOENT;
+
+ if (out_args) {
+ int i;
+ if (WARN_ON(count > MAX_PHANDLE_ARGS))
+ count = MAX_PHANDLE_ARGS;
+ out_args->np = node;
+ out_args->args_count = count;
+ for (i = 0; i < count; i++)
+ out_args->args[i] = be32_to_cpup(list++);
+ }
+ return 0;
}
-next:
- if (cur_index == index)
- break;
of_node_put(node);
node = NULL;
- args = NULL;
+ list += count;
cur_index++;
}
- if (!node) {
- /*
- * args w/o node indicates that the loop above has stopped at
- * the 'hole' cell. Report this differently.
- */
- if (args)
- ret = -EEXIST;
- else
- ret = -ENOENT;
- goto err0;
- }
-
- if (out_node)
- *out_node = node;
- if (out_args)
- *out_args = args;
-
- return 0;
-err1:
- of_node_put(node);
-err0:
- pr_debug("%s failed with status %d\n", __func__, ret);
- return ret;
+ /* Loop exited without finding a valid entry; return an error */
+ if (node)
+ of_node_put(node);
+ return -EINVAL;
}
-EXPORT_SYMBOL(of_parse_phandles_with_args);
+EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
* prom_add_property - Add a property to a node
@@ -1159,7 +1161,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
if (!of_aliases)
return;
- for_each_property(pp, of_aliases->properties) {
+ for_each_property_of_node(of_aliases, pp) {
const char *start = pp->name;
const char *end = start + strlen(start);
struct device_node *np;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index fd85fa298e0f..ea2bd1be2640 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,10 +18,12 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#ifdef CONFIG_PPC
#include <asm/machdep.h>
#endif /* CONFIG_PPC */
+#include <asm/setup.h>
#include <asm/page.h>
char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
@@ -107,7 +109,7 @@ int of_fdt_is_compatible(struct boot_param_header *blob,
* of_fdt_match - Return true if node matches a list of compatible values
*/
int of_fdt_match(struct boot_param_header *blob, unsigned long node,
- const char **compat)
+ const char *const *compat)
{
unsigned int tmp, score = 0;
@@ -541,7 +543,7 @@ int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
/**
* of_flat_dt_match - Return true if node matches a list of compatible values
*/
-int __init of_flat_dt_match(unsigned long node, const char **compat)
+int __init of_flat_dt_match(unsigned long node, const char *const *compat)
{
return of_fdt_match(initial_boot_params, node, compat);
}
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index ef0105fa52b1..7e62d15d60f6 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -35,32 +35,27 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
int ret;
- struct device_node *gpio_np;
struct gpio_chip *gc;
- int size;
- const void *gpio_spec;
- const __be32 *gpio_cells;
+ struct of_phandle_args gpiospec;
- ret = of_parse_phandles_with_args(np, propname, "#gpio-cells", index,
- &gpio_np, &gpio_spec);
+ ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
+ &gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
goto err0;
}
- gc = of_node_to_gpiochip(gpio_np);
+ gc = of_node_to_gpiochip(gpiospec.np);
if (!gc) {
pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gpio_np->full_name);
+ np->full_name, gpiospec.np->full_name);
ret = -ENODEV;
goto err1;
}
- gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
- if (!gpio_cells || size != sizeof(*gpio_cells) ||
- be32_to_cpup(gpio_cells) != gc->of_gpio_n_cells) {
+ if (gpiospec.args_count != gc->of_gpio_n_cells) {
pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gpio_np->full_name);
+ np->full_name, gpiospec.np->full_name);
ret = -EINVAL;
goto err1;
}
@@ -69,13 +64,13 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
if (flags)
*flags = 0;
- ret = gc->of_xlate(gc, np, gpio_spec, flags);
+ ret = gc->of_xlate(gc, &gpiospec, flags);
if (ret < 0)
goto err1;
ret += gc->base;
err1:
- of_node_put(gpio_np);
+ of_node_put(gpiospec.np);
err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
@@ -105,8 +100,8 @@ unsigned int of_gpio_count(struct device_node *np)
do {
int ret;
- ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells",
- cnt, NULL, NULL);
+ ret = of_parse_phandle_with_args(np, "gpios", "#gpio-cells",
+ cnt, NULL);
/* A hole in the gpios = <> counts anyway. */
if (ret < 0 && ret != -EEXIST)
break;
@@ -127,12 +122,9 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
{
- const __be32 *gpio = gpio_spec;
- const u32 n = be32_to_cpup(gpio);
-
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
* write your own xlate function (that will have to retrive the GPIO
@@ -144,13 +136,16 @@ int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
return -EINVAL;
}
- if (n > gc->ngpio)
+ if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
+ return -EINVAL;
+
+ if (gpiospec->args[0] > gc->ngpio)
return -EINVAL;
if (flags)
- *flags = be32_to_cpu(gpio[1]);
+ *flags = gpiospec->args[1];
- return n;
+ return gpiospec->args[0];
}
EXPORT_SYMBOL(of_gpio_simple_xlate);
@@ -198,8 +193,6 @@ int of_mm_gpiochip_add(struct device_node *np,
if (ret)
goto err2;
- pr_debug("%s: registered as generic GPIO chip, base is %d\n",
- np->full_name, gc->base);
return 0;
err2:
iounmap(mm_gc->regs);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 791270b8bd1c..9cf00602f566 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
#include <linux/string.h>
#include <linux/slab.h>
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
- return NO_IRQ;
+ return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -345,10 +340,19 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
/* Only dereference the resource if both the
* resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
+ if (r && irq) {
+ const char *name = NULL;
+
+ /*
+ * Get optional "interrupts-names" property to add a name
+ * to the resource.
+ */
+ of_property_read_string_index(dev, "interrupt-names", index,
+ &name);
+
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
- r->name = dev->full_name;
+ r->name = name ? name : dev->full_name;
}
return irq;
@@ -363,7 +367,7 @@ int of_irq_count(struct device_node *dev)
{
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
@@ -383,7 +387,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ if (!of_irq_to_resource(dev, i, res))
break;
return i;
@@ -424,6 +428,8 @@ void __init of_irq_init(const struct of_device_id *matches)
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
+ if (desc->interrupt_parent == np)
+ desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index bc5b3990f6ed..07cc1d678e4d 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -229,7 +229,7 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
return ret;
}
-static void *kernel_tree_alloc(u64 size, u64 align)
+static void * __init kernel_tree_alloc(u64 size, u64 align)
{
return prom_early_alloc(size);
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index cbd5d701c7e0..63b3ec48c203 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
if (!lookup)
return NULL;
- for(; lookup->name != NULL; lookup++) {
+ for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
if (of_address_to_resource(np, 0, &res))
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
new file mode 100644
index 000000000000..9d2b4803a9d6
--- /dev/null
+++ b/drivers/of/selftest.c
@@ -0,0 +1,139 @@
+/*
+ * Self tests for device tree subsystem
+ */
+
+#define pr_fmt(fmt) "### %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+static bool selftest_passed = true;
+#define selftest(result, fmt, ...) { \
+ selftest_passed &= (result); \
+ if (!(result)) \
+ pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
+}
+
+static void __init of_selftest_parse_phandle_with_args(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int rc, i;
+ bool passed_all = true;
+
+ pr_info("start\n");
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 7; i++) {
+ bool passed = true;
+ rc = of_parse_phandle_with_args(np, "phandle-list",
+ "#phandle-cells", i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 0);
+ break;
+ case 2:
+ passed &= (rc == -ENOENT);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 4);
+ passed &= (args.args[2] == 3);
+ break;
+ case 4:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 100);
+ break;
+ case 5:
+ passed &= !rc;
+ passed &= (args.args_count == 0);
+ break;
+ case 6:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+ break;
+ case 7:
+ passed &= (rc == -EINVAL);
+ break;
+ default:
+ passed = false;
+ }
+
+ if (!passed) {
+ int j;
+ pr_err("index %i - data error on node %s rc=%i regs=[",
+ i, args.np->full_name, rc);
+ for (j = 0; j < args.args_count; j++)
+ printk(" %i", args.args[j]);
+ printk(" ]\n");
+
+ passed_all = false;
+ }
+ }
+
+ /* Check for missing list property */
+ rc = of_parse_phandle_with_args(np, "phandle-list-missing",
+ "#phandle-cells", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for missing cells property */
+ rc = of_parse_phandle_with_args(np, "phandle-list",
+ "#phandle-cells-missing", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for bad phandle in list */
+ rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
+ "#phandle-cells", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for incorrectly formed argument list */
+ rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
+ "#phandle-cells", 1, &args);
+ passed_all &= (rc == -EINVAL);
+
+ pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
+}
+
+static int __init of_selftest(void)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ if (!np) {
+ pr_info("No testcase data in device tree; not running tests\n");
+ return 0;
+ }
+ of_node_put(np);
+
+ pr_info("start of selftest - you will see error messages\n");
+ of_selftest_parse_phandle_with_args();
+ pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ return 0;
+}
+late_initcall(of_selftest);
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
new file mode 100644
index 000000000000..76f1c9357f39
--- /dev/null
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -0,0 +1,173 @@
+/**
+ * @file nmi_timer_int.c
+ *
+ * @remark Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
+static int ctr_running;
+
+static struct perf_event_attr nmi_timer_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+static void nmi_timer_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ event->hw.interrupts = 0; /* don't throttle interrupts */
+ oprofile_add_sample(regs, 0);
+}
+
+static int nmi_timer_start_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (!event) {
+ event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
+ nmi_timer_callback, NULL);
+ if (IS_ERR(event))
+ return PTR_ERR(event);
+ per_cpu(nmi_timer_events, cpu) = event;
+ }
+
+ if (event && ctr_running)
+ perf_event_enable(event);
+
+ return 0;
+}
+
+static void nmi_timer_stop_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (event && ctr_running)
+ perf_event_disable(event);
+}
+
+static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
+ void *data)
+{
+ int cpu = (unsigned long)data;
+ switch (action) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ nmi_timer_start_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ nmi_timer_stop_cpu(cpu);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nmi_timer_cpu_nb = {
+ .notifier_call = nmi_timer_cpu_notifier
+};
+
+static int nmi_timer_start(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ ctr_running = 1;
+ for_each_online_cpu(cpu)
+ nmi_timer_start_cpu(cpu);
+ put_online_cpus();
+
+ return 0;
+}
+
+static void nmi_timer_stop(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ nmi_timer_stop_cpu(cpu);
+ ctr_running = 0;
+ put_online_cpus();
+}
+
+static void nmi_timer_shutdown(void)
+{
+ struct perf_event *event;
+ int cpu;
+
+ get_online_cpus();
+ unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ for_each_possible_cpu(cpu) {
+ event = per_cpu(nmi_timer_events, cpu);
+ if (!event)
+ continue;
+ perf_event_disable(event);
+ per_cpu(nmi_timer_events, cpu) = NULL;
+ perf_event_release_kernel(event);
+ }
+
+ put_online_cpus();
+}
+
+static int nmi_timer_setup(void)
+{
+ int cpu, err;
+ u64 period;
+
+ /* clock cycles per tick: */
+ period = (u64)cpu_khz * 1000;
+ do_div(period, HZ);
+ nmi_timer_attr.sample_period = period;
+
+ get_online_cpus();
+ err = register_cpu_notifier(&nmi_timer_cpu_nb);
+ if (err)
+ goto out;
+ /* can't attach events to offline cpus: */
+ for_each_online_cpu(cpu) {
+ err = nmi_timer_start_cpu(cpu);
+ if (err)
+ break;
+ }
+ if (err)
+ nmi_timer_shutdown();
+out:
+ put_online_cpus();
+ return err;
+}
+
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ int err = 0;
+
+ err = nmi_timer_setup();
+ if (err)
+ return err;
+ nmi_timer_shutdown(); /* only check, don't alloc */
+
+ ops->create_files = NULL;
+ ops->setup = nmi_timer_setup;
+ ops->shutdown = nmi_timer_shutdown;
+ ops->start = nmi_timer_start;
+ ops->stop = nmi_timer_stop;
+ ops->cpu_type = "timer";
+
+ printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095c..ed2c3ec07024 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,39 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
return err;
}
+static int timer_mode;
+
static int __init oprofile_init(void)
{
int err;
+ /* always init architecture to setup backtrace support */
+ timer_mode = 0;
err = oprofile_arch_init(&oprofile_ops);
- if (err < 0 || timer) {
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
+ if (!err) {
+ if (!timer && !oprofilefs_register())
+ return 0;
+ oprofile_arch_exit();
+ }
+
+ /* setup timer mode: */
+ timer_mode = 1;
+ /* no nmi timer mode if oprofile.timer is set */
+ if (timer || op_nmi_timer_init(&oprofile_ops)) {
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
+
return oprofilefs_register();
}
static void __exit oprofile_exit(void)
{
- oprofile_timer_exit();
oprofilefs_unregister();
- oprofile_arch_exit();
+ if (!timer_mode)
+ oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 177b73de5e5f..d32ef816337c 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -35,7 +35,15 @@ struct dentry;
void oprofile_create_files(struct super_block *sb, struct dentry *root);
int oprofile_timer_init(struct oprofile_operations *ops);
-void oprofile_timer_exit(void);
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+int op_nmi_timer_init(struct oprofile_operations *ops);
+#else
+static inline int op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ return -ENODEV;
+}
+#endif
+
int oprofile_set_ulong(unsigned long *addr, unsigned long val);
int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 89f63456646f..84a208dbed93 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ retval = 0;
if (val)
retval = oprofile_start();
else
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d0de6cc2d7a5..2f0aa0f700e6 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
}
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
raw_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
+ return count;
}
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
return -EINVAL;
retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(file->private_data, value);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f510..93404f72dfa8 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,23 +97,24 @@ static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
-int oprofile_timer_init(struct oprofile_operations *ops)
+static int oprofile_hrtimer_setup(void)
{
- int rc;
-
- rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
- if (rc)
- return rc;
- ops->create_files = NULL;
- ops->setup = NULL;
- ops->shutdown = NULL;
- ops->start = oprofile_hrtimer_start;
- ops->stop = oprofile_hrtimer_stop;
- ops->cpu_type = "timer";
- return 0;
+ return register_hotcpu_notifier(&oprofile_cpu_notifier);
}
-void oprofile_timer_exit(void)
+static void oprofile_hrtimer_shutdown(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
+
+int oprofile_timer_init(struct oprofile_operations *ops)
+{
+ ops->create_files = NULL;
+ ops->setup = oprofile_hrtimer_setup;
+ ops->shutdown = oprofile_hrtimer_shutdown;
+ ops->start = oprofile_hrtimer_start;
+ ops->stop = oprofile_hrtimer_stop;
+ ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
+ return 0;
+}
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 553a9905299a..620264936341 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -108,13 +108,6 @@ config IOMMU_HELPER
depends on IOMMU_SBA || IOMMU_CCIO
default y
-#config PCI_EPIC
-# bool "EPIC/SAGA PCI support"
-# depends on PCI
-# default y
-# help
-# Say Y here for V-class PCI, DMA/IOMMU, IRQ subsystem support.
-
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index bcd5d54b7d4d..7ff10c1e8664 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -562,19 +562,6 @@ dino_fixup_bus(struct pci_bus *bus)
/* Firmware doesn't set up card-mode dino, so we have to */
if (is_card_dino(&dino_dev->hba.dev->id)) {
dino_card_setup(bus, dino_dev->hba.base_addr);
- } else if(bus->parent == NULL) {
- /* must have a dino above it, reparent the resources
- * into the dino window */
- int i;
- struct resource *res = &dino_dev->hba.lmmio_space;
-
- bus->resource[0] = &(dino_dev->hba.io_space);
- for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
- if(res[i].flags == 0)
- break;
- bus->resource[i+1] = &res[i];
- }
-
} else if (bus->parent) {
int i;
@@ -927,6 +914,7 @@ static int __init dino_probe(struct parisc_device *dev)
const char *version = "unknown";
char *name;
int is_cujo = 0;
+ LIST_HEAD(resources);
struct pci_bus *bus;
unsigned long hpa = dev->hpa.start;
@@ -1003,26 +991,37 @@ static int __init dino_probe(struct parisc_device *dev)
dev->dev.platform_data = dino_dev;
+ pci_add_resource(&resources, &dino_dev->hba.io_space);
+ if (dino_dev->hba.lmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.lmmio_space);
+ if (dino_dev->hba.elmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.elmmio_space);
+ if (dino_dev->hba.gmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.gmmio_space);
+
/*
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
- dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev,
- dino_current_bus, &dino_cfg_ops, NULL);
-
- if(bus) {
- /* This code *depends* on scanning being single threaded
- * if it isn't, this global bus number count will fail
- */
- dino_current_bus = bus->subordinate + 1;
- pci_bus_assign_resources(bus);
- pci_bus_add_devices(bus);
- } else {
+ dino_dev->hba.hba_bus = bus = pci_create_root_bus(&dev->dev,
+ dino_current_bus, &dino_cfg_ops, NULL, &resources);
+ if (!bus) {
printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
dev_name(&dev->dev), dino_current_bus);
+ pci_free_resource_list(&resources);
/* increment the bus number in case of duplicates */
dino_current_bus++;
+ return 0;
}
+
+ bus->subordinate = pci_scan_child_bus(bus);
+
+ /* This code *depends* on scanning being single threaded
+ * if it isn't, this global bus number count will fail
+ */
+ dino_current_bus = bus->subordinate + 1;
+ pci_bus_assign_resources(bus);
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 3aeb3279c92a..d5f3d753a108 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -653,7 +653,7 @@ lba_fixup_bus(struct pci_bus *bus)
}
} else {
/* Host-PCI Bridge */
- int err, i;
+ int err;
DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
ldev->hba.io_space.name,
@@ -669,9 +669,6 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&ioport_resource, 2);
BUG();
}
- /* advertize Host bridge resources to PCI bus */
- bus->resource[0] = &(ldev->hba.io_space);
- i = 1;
if (ldev->hba.elmmio_space.start) {
err = request_resource(&iomem_resource,
@@ -685,35 +682,17 @@ lba_fixup_bus(struct pci_bus *bus)
/* lba_dump_res(&iomem_resource, 2); */
/* BUG(); */
- } else
- bus->resource[i++] = &(ldev->hba.elmmio_space);
+ }
}
-
- /* Overlaps with elmmio can (and should) fail here.
- * We will prune (or ignore) the distributed range.
- *
- * FIXME: SBA code should register all elmmio ranges first.
- * that would take care of elmmio ranges routed
- * to a different rope (already discovered) from
- * getting registered *after* LBA code has already
- * registered it's distributed lmmio range.
- */
- if (truncate_pat_collision(&iomem_resource,
- &(ldev->hba.lmmio_space))) {
-
- printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
- (long)ldev->hba.lmmio_space.start,
- (long)ldev->hba.lmmio_space.end);
- } else {
+ if (ldev->hba.lmmio_space.flags) {
err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
if (err < 0) {
printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
"lmmio_space [%lx/%lx]\n",
(long)ldev->hba.lmmio_space.start,
(long)ldev->hba.lmmio_space.end);
- } else
- bus->resource[i++] = &(ldev->hba.lmmio_space);
+ }
}
#ifdef CONFIG_64BIT
@@ -728,7 +707,6 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&iomem_resource, 2);
BUG();
}
- bus->resource[i++] = &(ldev->hba.gmmio_space);
}
#endif
@@ -1404,6 +1382,7 @@ static int __init
lba_driver_probe(struct parisc_device *dev)
{
struct lba_device *lba_dev;
+ LIST_HEAD(resources);
struct pci_bus *lba_bus;
struct pci_ops *cfg_ops;
u32 func_class;
@@ -1518,10 +1497,41 @@ lba_driver_probe(struct parisc_device *dev)
if (lba_dev->hba.bus_num.start < lba_next_bus)
lba_dev->hba.bus_num.start = lba_next_bus;
+ /* Overlaps with elmmio can (and should) fail here.
+ * We will prune (or ignore) the distributed range.
+ *
+ * FIXME: SBA code should register all elmmio ranges first.
+ * that would take care of elmmio ranges routed
+ * to a different rope (already discovered) from
+ * getting registered *after* LBA code has already
+ * registered it's distributed lmmio range.
+ */
+ if (truncate_pat_collision(&iomem_resource,
+ &(lba_dev->hba.lmmio_space))) {
+ printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
+ (long)lba_dev->hba.lmmio_space.start,
+ (long)lba_dev->hba.lmmio_space.end);
+ lba_dev->hba.lmmio_space.flags = 0;
+ }
+
+ pci_add_resource(&resources, &lba_dev->hba.io_space);
+ if (lba_dev->hba.elmmio_space.start)
+ pci_add_resource(&resources, &lba_dev->hba.elmmio_space);
+ if (lba_dev->hba.lmmio_space.flags)
+ pci_add_resource(&resources, &lba_dev->hba.lmmio_space);
+ if (lba_dev->hba.gmmio_space.flags)
+ pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
+
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
- pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
- cfg_ops, NULL);
+ pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
+ cfg_ops, NULL, &resources);
+ if (!lba_bus) {
+ pci_free_resource_list(&resources);
+ return 0;
+ }
+
+ lba_bus->subordinate = pci_scan_child_bus(lba_bus);
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {
@@ -1551,10 +1561,8 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
}
- if (lba_bus) {
- lba_next_bus = lba_bus->subordinate + 1;
- pci_bus_add_devices(lba_bus);
- }
+ lba_next_bus = lba_bus->subordinate + 1;
+ pci_bus_add_devices(lba_bus);
/* Whew! Finally done! Tell services we got this one covered. */
return 0;
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 844f6137970a..7c5d86696eed 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -420,18 +420,7 @@ static struct platform_driver axdrv = {
.resume = parport_ax88796_resume,
};
-static int __init parport_ax88796_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit parport_ax88796_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(parport_ax88796_init)
-module_exit(parport_ax88796_exit)
+module_platform_driver(axdrv);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 0dc34f12f92e..d4716273651e 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -135,7 +135,7 @@
#define PARPORT_IP32_ENABLE_EPP (1U << 3)
#define PARPORT_IP32_ENABLE_ECP (1U << 4)
static unsigned int features = ~0U;
-static int verbose_probing = DEFAULT_VERBOSE_PROBING;
+static bool verbose_probing = DEFAULT_VERBOSE_PROBING;
/* We do not support more than one port. */
static struct parport *this_port = NULL;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 362db31d8ca6..1c0c642b3e23 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -397,7 +397,7 @@ static void __exit parport_mfc3_exit(void)
MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
-MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Paralllel Port");
+MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
MODULE_LICENSE("GPL");
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index d0b597b50398..0cb64f50cecd 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3404,8 +3404,8 @@ static int __init parport_init_mode_setup(char *str)
#endif
#ifdef MODULE
-static const char *irq[PARPORT_PC_MAX_PORTS];
-static const char *dma[PARPORT_PC_MAX_PORTS];
+static char *irq[PARPORT_PC_MAX_PORTS];
+static char *dma[PARPORT_PC_MAX_PORTS];
MODULE_PARM_DESC(io, "Base I/O address (SPP regs)");
module_param_array(io, int, NULL, 0);
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 910c5a26e347..9390a534a2b2 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -391,21 +391,10 @@ static struct platform_driver bpp_sbus_driver = {
.remove = __devexit_p(bpp_remove),
};
-static int __init parport_sunbpp_init(void)
-{
- return platform_driver_register(&bpp_sbus_driver);
-}
-
-static void __exit parport_sunbpp_exit(void)
-{
- platform_driver_unregister(&bpp_sbus_driver);
-}
+module_platform_driver(bpp_sbus_driver);
MODULE_AUTHOR("Derrick J Brashear");
MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
-
-module_init(parport_sunbpp_init)
-module_exit(parport_sunbpp_exit)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f02b5235056d..37856f7c7781 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,11 +98,11 @@ config PCI_PASID
If unsure, say N.
config PCI_IOAPIC
- bool
+ tristate "PCI IO-APIC hotplug support" if X86
depends on PCI
depends on ACPI
depends on HOTPLUG
- default y
+ default !X86
config PCI_LABEL
def_bool y if (DMI || ACPI)
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index fdaa42aac7c6..2a581642c237 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_RAW_SPINLOCK(pci_lock);
+DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -127,20 +127,20 @@ EXPORT_SYMBOL(pci_write_vpd);
* We have a bit per device to indicate it's blocked and a global wait queue
* for callers to sleep on until devices are unblocked.
*/
-static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
+static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
-static noinline void pci_wait_ucfg(struct pci_dev *dev)
+static noinline void pci_wait_cfg(struct pci_dev *dev)
{
DECLARE_WAITQUEUE(wait, current);
- __add_wait_queue(&pci_ucfg_wait, &wait);
+ __add_wait_queue(&pci_cfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
schedule();
raw_spin_lock_irq(&pci_lock);
- } while (dev->block_ucfg_access);
- __remove_wait_queue(&pci_ucfg_wait, &wait);
+ } while (dev->block_cfg_access);
+ __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
@@ -153,7 +153,8 @@ int pci_user_read_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
@@ -172,7 +173,8 @@ int pci_user_write_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
@@ -401,36 +403,56 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
EXPORT_SYMBOL(pci_vpd_truncate);
/**
- * pci_block_user_cfg_access - Block userspace PCI config reads/writes
+ * pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
*
- * When user access is blocked, any reads or writes to config space will
- * sleep until access is unblocked again. We don't allow nesting of
- * block/unblock calls.
+ * When access is locked, any userspace reads or writes to config
+ * space and concurrent lock requests will sleep until access is
+ * allowed via pci_cfg_access_unlocked again.
*/
-void pci_block_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_lock(struct pci_dev *dev)
+{
+ might_sleep();
+
+ raw_spin_lock_irq(&pci_lock);
+ if (dev->block_cfg_access)
+ pci_wait_cfg(dev);
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irq(&pci_lock);
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
+
+/**
+ * pci_cfg_access_trylock - try to lock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * Same as pci_cfg_access_lock, but will return 0 if access is
+ * already locked, 1 otherwise. This function can be used from
+ * atomic contexts.
+ */
+bool pci_cfg_access_trylock(struct pci_dev *dev)
{
unsigned long flags;
- int was_blocked;
+ bool locked = true;
raw_spin_lock_irqsave(&pci_lock, flags);
- was_blocked = dev->block_ucfg_access;
- dev->block_ucfg_access = 1;
+ if (dev->block_cfg_access)
+ locked = false;
+ else
+ dev->block_cfg_access = 1;
raw_spin_unlock_irqrestore(&pci_lock, flags);
- /* If we BUG() inside the pci_lock, we're guaranteed to hose
- * the machine */
- BUG_ON(was_blocked);
+ return locked;
}
-EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
/**
- * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
+ * pci_cfg_access_unlock - Unlock PCI config reads/writes
* @dev: pci device struct
*
- * This function allows userspace PCI config accesses to resume.
+ * This function allows PCI config accesses to resume.
*/
-void pci_unblock_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_unlock(struct pci_dev *dev)
{
unsigned long flags;
@@ -438,10 +460,10 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
- WARN_ON(!dev->block_ucfg_access);
+ WARN_ON(!dev->block_cfg_access);
- dev->block_ucfg_access = 0;
- wake_up_all(&pci_ucfg_wait);
+ dev->block_cfg_access = 0;
+ wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
-EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd78..95655d7c0d0b 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "pci.h"
@@ -127,6 +128,23 @@ void pci_disable_ats(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_disable_ats);
+void pci_restore_ats_state(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ if (!pci_ats_enabled(dev))
+ return;
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
+ BUG();
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
+
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+}
+EXPORT_SYMBOL_GPL(pci_restore_ats_state);
+
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
* @dev: the PCI device
@@ -174,21 +192,22 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
u32 max_requests;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
- if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED))
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ if ((control & PCI_PRI_CTRL_ENABLE) ||
+ !(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
- pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests);
+ pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs);
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- control |= PCI_PRI_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ control |= PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
}
@@ -205,13 +224,13 @@ void pci_disable_pri(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- control &= ~PCI_PRI_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control &= ~PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
}
EXPORT_SYMBOL_GPL(pci_disable_pri);
@@ -226,13 +245,13 @@ bool pci_pri_enabled(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- return (control & PCI_PRI_ENABLE) ? true : false;
+ return (control & PCI_PRI_CTRL_ENABLE) ? true : false;
}
EXPORT_SYMBOL_GPL(pci_pri_enabled);
@@ -248,17 +267,17 @@ int pci_reset_pri(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- if (control & PCI_PRI_ENABLE)
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ if (control & PCI_PRI_CTRL_ENABLE)
return -EBUSY;
- control |= PCI_PRI_RESET;
+ control |= PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
}
@@ -281,14 +300,14 @@ bool pci_pri_stopped(struct pci_dev *pdev)
u16 control, status;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return true;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- if (control & PCI_PRI_ENABLE)
+ if (control & PCI_PRI_CTRL_ENABLE)
return false;
return (status & PCI_PRI_STATUS_STOPPED) ? true : false;
@@ -310,15 +329,15 @@ int pci_pri_status(struct pci_dev *pdev)
u16 status, control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
/* Stopped bit is undefined when enable == 1, so clear it */
- if (control & PCI_PRI_ENABLE)
+ if (control & PCI_PRI_CTRL_ENABLE)
status &= ~PCI_PRI_STATUS_STOPPED;
return status;
@@ -341,25 +360,25 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
u16 control, supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
- if (!(supported & PCI_PASID_ENABLE))
+ if (control & PCI_PASID_CTRL_ENABLE)
return -EINVAL;
- supported &= PCI_PASID_EXEC | PCI_PASID_PRIV;
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
if ((supported & features) != features)
return -EINVAL;
- control = PCI_PASID_ENABLE | features;
+ control = PCI_PASID_CTRL_ENABLE | features;
- pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
return 0;
}
@@ -375,11 +394,11 @@ void pci_disable_pasid(struct pci_dev *pdev)
u16 control = 0;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return;
- pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
}
EXPORT_SYMBOL_GPL(pci_disable_pasid);
@@ -390,22 +409,21 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
* Returns a negative value when no PASI capability is present.
* Otherwise is returns a bitmask with supported features. Current
* features reported are:
- * PCI_PASID_ENABLE - PASID capability can be enabled
- * PCI_PASID_EXEC - Execute permission supported
- * PCI_PASID_PRIV - Priviledged mode supported
+ * PCI_PASID_CAP_EXEC - Execute permission supported
+ * PCI_PASID_CAP_PRIV - Priviledged mode supported
*/
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
- supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV;
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
return supported;
}
@@ -425,11 +443,11 @@ int pci_max_pasids(struct pci_dev *pdev)
u16 supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 1e2ad92a4752..398f5d859791 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -18,6 +18,32 @@
#include "pci.h"
+void pci_add_resource(struct list_head *resources, struct resource *res)
+{
+ struct pci_bus_resource *bus_res;
+
+ bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
+ if (!bus_res) {
+ printk(KERN_ERR "PCI: can't add bus resource %pR\n", res);
+ return;
+ }
+
+ bus_res->res = res;
+ list_add_tail(&bus_res->list, resources);
+}
+EXPORT_SYMBOL(pci_add_resource);
+
+void pci_free_resource_list(struct list_head *resources)
+{
+ struct pci_bus_resource *bus_res, *tmp;
+
+ list_for_each_entry_safe(bus_res, tmp, resources, list) {
+ list_del(&bus_res->list);
+ kfree(bus_res);
+ }
+}
+EXPORT_SYMBOL(pci_free_resource_list);
+
void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags)
{
@@ -52,16 +78,12 @@ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
void pci_bus_remove_resources(struct pci_bus *bus)
{
- struct pci_bus_resource *bus_res, *tmp;
int i;
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
bus->resource[i] = NULL;
- list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
- list_del(&bus_res->list);
- kfree(bus_res);
- }
+ pci_free_resource_list(&bus->resources);
}
/**
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 095f29e13734..2a47e82821da 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -44,7 +44,7 @@
#define METHOD_NAME__SUN "_SUN"
#define METHOD_NAME_OSHP "OSHP"
-static int debug_acpi;
+static bool debug_acpi;
static acpi_status
decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index efa9f2de51c1..aa41631e9e02 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -47,7 +47,7 @@
/* name size which is used for entries in pcihpfs */
#define SLOT_NAME_SIZE 21 /* {_SUN} */
-static int debug;
+static bool debug;
int acpiphp_debug;
/* local variables */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fce1c54a0c8d..9ddf69e3bbef 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (tmp) {
+ struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+ if (root && (root->osc_control_set &
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return AE_OK;
+ }
+ }
+
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
- struct acpi_pci_root *root;
acpi_handle dummy_handle;
- /*
- * We shouldn't use this bridge if PCIe native hotplug control has been
- * granted by the BIOS for it.
- */
- root = acpi_pci_find_root(handle);
- if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return -ENODEV;
-
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- struct acpi_pci_root *root;
int *count = (int *)context;
if (!acpi_is_root_bridge(handle))
return AE_OK;
- root = acpi_pci_find_root(handle);
- if (!root)
- return AE_OK;
-
- if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- return AE_OK;
-
(*count)++;
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index e525263210ee..c35e8ad6db01 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -43,7 +43,7 @@
#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension"
-static int debug;
+static bool debug;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 41f6a8d79c81..6bf8d2ab164f 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -57,8 +57,8 @@
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
-static int poll;
+static bool debug;
+static bool poll;
static struct cpci_hp_controller_ops zt5550_hpc_ops;
static struct cpci_hp_controller zt5550_hpc;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index f1ce99cceac6..187a199da93c 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -57,8 +57,8 @@ struct irq_routing_table *cpqhp_routing_table;
static void __iomem *smbios_table;
static void __iomem *smbios_start;
static void __iomem *cpqhp_rom_start;
-static int power_mode;
-static int debug;
+static bool power_mode;
+static bool debug;
static int initialized;
#define DRIVER_VERSION "0.9.8"
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index d934dd4fa873..5506e0e8fbc0 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -49,7 +49,7 @@
int ibmphp_debug;
-static int debug;
+static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC (debug, "Debugging mode enabled or not");
MODULE_LICENSE ("GPL");
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 6d2eea93298f..202f4a969eb5 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -51,7 +51,7 @@
/* local variables */
-static int debug;
+static bool debug;
#define DRIVER_VERSION "0.5"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 838f571027b7..4b7cce1de6ec 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -40,12 +40,11 @@
#define MY_NAME "pciehp"
-extern int pciehp_poll_mode;
+extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
-extern int pciehp_debug;
-extern int pciehp_force;
+extern bool pciehp_debug;
+extern bool pciehp_force;
extern struct workqueue_struct *pciehp_wq;
-extern struct workqueue_struct *pciehp_ordered_wq;
#define dbg(format, arg...) \
do { \
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7ac8358df8fd..365c6b96c642 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -38,12 +38,11 @@
#include <linux/time.h>
/* Global variables */
-int pciehp_debug;
-int pciehp_poll_mode;
+bool pciehp_debug;
+bool pciehp_poll_mode;
int pciehp_poll_time;
-int pciehp_force;
+bool pciehp_force;
struct workqueue_struct *pciehp_wq;
-struct workqueue_struct *pciehp_ordered_wq;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -345,18 +344,11 @@ static int __init pcied_init(void)
if (!pciehp_wq)
return -ENOMEM;
- pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
- if (!pciehp_ordered_wq) {
- destroy_workqueue(pciehp_wq);
- return -ENOMEM;
- }
-
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval) {
- destroy_workqueue(pciehp_ordered_wq);
destroy_workqueue(pciehp_wq);
dbg("Failure to register service\n");
}
@@ -366,9 +358,8 @@ static int __init pcied_init(void)
static void __exit pcied_cleanup(void)
{
dbg("unload_pciehpd()\n");
- destroy_workqueue(pciehp_ordered_wq);
- destroy_workqueue(pciehp_wq);
pcie_port_service_unregister(&hpdriver_portdrv);
+ destroy_workqueue(pciehp_wq);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5fc168..27f44295a657 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
kfree(info);
goto out;
}
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(pciehp_wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
else
p_slot->state = POWERON_STATE;
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(pciehp_wq, &info->work);
}
static void interrupt_event_handler(struct work_struct *work)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7b1414810ae3..bcdbb1643621 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -806,7 +806,6 @@ static void pcie_cleanup_slot(struct controller *ctrl)
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
flush_workqueue(pciehp_wq);
- flush_workqueue(pciehp_ordered_wq);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index 5175d9b26f0b..b20ceaaa31f4 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -59,7 +59,7 @@ static LIST_HEAD(slot_list);
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
+static bool debug;
static int num_slots;
#define DRIVER_VERSION "0.3"
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 419919a87b0f..df5677440a08 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -46,7 +46,7 @@
#define PRESENT 1 /* Card in slot */
#define MY_NAME "rpaphp"
-extern int rpaphp_debug;
+extern bool rpaphp_debug;
#define dbg(format, arg...) \
do { \
if (rpaphp_debug) \
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 758adb5f47fd..127d6e600185 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -37,7 +37,7 @@
/* and pci_do_scan_bus */
#include "rpaphp.h"
-int rpaphp_debug;
+bool rpaphp_debug;
LIST_HEAD(rpaphp_slot_head);
#define DRIVER_VERSION "0.1"
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index e0c90e643b5f..ca64932e658b 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -43,9 +43,9 @@
#define MY_NAME THIS_MODULE->name
#endif
-extern int shpchp_poll_mode;
+extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
-extern int shpchp_debug;
+extern bool shpchp_debug;
extern struct workqueue_struct *shpchp_wq;
extern struct workqueue_struct *shpchp_ordered_wq;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index dd7e0c51a33e..7414fd9ad1d2 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -36,8 +36,8 @@
#include "shpchp.h"
/* Global variables */
-int shpchp_debug;
-int shpchp_poll_mode;
+bool shpchp_debug;
+bool shpchp_poll_mode;
int shpchp_poll_time;
struct workqueue_struct *shpchp_wq;
struct workqueue_struct *shpchp_ordered_wq;
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 5775638ac017..205af8dc83c2 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -17,7 +17,7 @@
*/
#include <linux/pci.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
@@ -27,7 +27,7 @@ struct ioapic {
u32 gsi_base;
};
-static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int __devinit ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
acpi_handle handle;
acpi_status status;
@@ -88,7 +88,7 @@ exit_free:
return -ENODEV;
}
-static void ioapic_remove(struct pci_dev *dev)
+static void __devexit ioapic_remove(struct pci_dev *dev)
{
struct ioapic *ioapic = pci_get_drvdata(dev);
@@ -99,13 +99,12 @@ static void ioapic_remove(struct pci_dev *dev)
}
-static struct pci_device_id ioapic_devices[] = {
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
{ }
};
+MODULE_DEVICE_TABLE(pci, ioapic_devices);
static struct pci_driver ioapic_driver = {
.name = "ioapic",
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b37..0321fa3b4226 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
@@ -340,11 +347,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
+ pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
+
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
msleep(100);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
iov->initial = initial;
if (nr_virtfn < initial)
@@ -372,10 +381,10 @@ failed:
virtfn_remove(dev, j, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -398,10 +407,10 @@ static void sriov_disable(struct pci_dev *dev)
virtfn_remove(dev, i, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -445,7 +454,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (total > 1 && !stride))
@@ -458,7 +466,6 @@ found:
return -EIO;
pgsz &= ~(pgsz - 1);
- pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0e6d04d7ba4f..a825d78fd0aa 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -86,6 +86,31 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
}
#endif
+#ifndef arch_restore_msi_irqs
+# define arch_restore_msi_irqs default_restore_msi_irqs
+# define HAVE_DEFAULT_MSI_RESTORE_IRQS
+#endif
+
+#ifdef HAVE_DEFAULT_MSI_RESTORE_IRQS
+void default_restore_msi_irqs(struct pci_dev *dev, int irq)
+{
+ struct msi_desc *entry;
+
+ entry = NULL;
+ if (dev->msix_enabled) {
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (irq == entry->irq)
+ break;
+ }
+ } else if (dev->msi_enabled) {
+ entry = irq_get_msi_desc(irq);
+ }
+
+ if (entry)
+ write_msi_msg(irq, &entry->msg);
+}
+#endif
+
static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
@@ -323,6 +348,18 @@ static void free_msi_irqs(struct pci_dev *dev)
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
+
+ /*
+ * Its possible that we get into this path
+ * When populate_msi_sysfs fails, which means the entries
+ * were not registered with sysfs. In that case don't
+ * unregister them.
+ */
+ if (entry->kobj.parent) {
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ }
+
list_del(&entry->list);
kfree(entry);
}
@@ -360,7 +397,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 0);
- write_msi_msg(dev->irq, &entry->msg);
+ arch_restore_msi_irqs(dev, dev->irq);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -388,7 +425,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
- write_msi_msg(entry->irq, &entry->msg);
+ arch_restore_msi_irqs(dev, entry->irq);
msix_mask_irq(entry, entry->masked);
}
@@ -403,6 +440,98 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
+
+#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
+#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
+
+struct msi_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
+ const char *buf, size_t count);
+};
+
+static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi");
+}
+
+static ssize_t msi_irq_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct msi_attribute *attribute = to_msi_attr(attr);
+ struct msi_desc *entry = to_msi_desc(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(entry, attribute, buf);
+}
+
+static const struct sysfs_ops msi_irq_sysfs_ops = {
+ .show = msi_irq_attr_show,
+};
+
+static struct msi_attribute mode_attribute =
+ __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
+
+
+struct attribute *msi_irq_default_attrs[] = {
+ &mode_attribute.attr,
+ NULL
+};
+
+void msi_kobj_release(struct kobject *kobj)
+{
+ struct msi_desc *entry = to_msi_desc(kobj);
+
+ pci_dev_put(entry->dev);
+}
+
+static struct kobj_type msi_irq_ktype = {
+ .release = msi_kobj_release,
+ .sysfs_ops = &msi_irq_sysfs_ops,
+ .default_attrs = msi_irq_default_attrs,
+};
+
+static int populate_msi_sysfs(struct pci_dev *pdev)
+{
+ struct msi_desc *entry;
+ struct kobject *kobj;
+ int ret;
+ int count = 0;
+
+ pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj);
+ if (!pdev->msi_kset)
+ return -ENOMEM;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ kobj = &entry->kobj;
+ kobj->kset = pdev->msi_kset;
+ pci_dev_get(pdev);
+ ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL,
+ "%u", entry->irq);
+ if (ret)
+ goto out_unroll;
+
+ count++;
+ }
+
+ return 0;
+
+out_unroll:
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (!count)
+ break;
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ count--;
+ }
+ return ret;
+}
+
/**
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
@@ -454,6 +583,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return ret;
}
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 1);
@@ -574,6 +710,12 @@ static int msix_capability_init(struct pci_dev *dev,
msix_program_entries(dev, entries);
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ ret = 0;
+ goto error;
+ }
+
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
@@ -732,6 +874,8 @@ void pci_disable_msi(struct pci_dev *dev)
pci_msi_shutdown(dev);
free_msi_irqs(dev);
+ kset_unregister(dev->msi_kset);
+ dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -830,6 +974,8 @@ void pci_disable_msix(struct pci_dev *dev)
pci_msix_shutdown(dev);
free_msi_irqs(dev);
+ kset_unregister(dev->msi_kset);
+ dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -870,5 +1016,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
+ int pos;
INIT_LIST_HEAD(&dev->msi_list);
+
+ /* Disable the msi hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos)
+ msi_set_enable(dev, pos, 0);
+ msix_set_enable(dev, 0);
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 4ecb6408b0d6..060fd22a1103 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -45,16 +45,20 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
{
struct pci_dev *pci_dev = context;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+ if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
+ return;
+
+ if (!pci_dev->pm_cap || !pci_dev->pme_support
+ || pci_check_pme_status(pci_dev)) {
if (pci_dev->pme_poll)
pci_dev->pme_poll = false;
pci_wakeup_event(pci_dev);
- pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev);
- if (pci_dev->subordinate)
- pci_pme_wakeup_bus(pci_dev->subordinate);
}
+
+ if (pci_dev->subordinate)
+ pci_pme_wakeup_bus(pci_dev->subordinate);
}
/**
@@ -395,7 +399,6 @@ static int __init acpi_pci_init(void)
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
- pcie_clear_aspm();
pcie_no_aspm();
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 12d1e81a8abe..3623d65f8b86 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -604,7 +604,8 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN_ON(ret && drv->driver.pm);
+ WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
+ drv->name, pci_dev->vendor, pci_dev->device);
return ret;
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 81525ae5d869..edaed6f4da6c 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -89,7 +89,7 @@ find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static mode_t
+static umode_t
smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
int n)
{
@@ -275,7 +275,7 @@ device_has_dsm(struct device *dev)
return FALSE;
}
-static mode_t
+static umode_t
acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 106be0d08f81..a3cd8cad532a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -432,7 +432,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(&init_user_ns, filp->f_cred, CAP_SYS_ADMIN) == 0) {
+ if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9f..97fff785e97e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -88,6 +88,12 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
u8 pci_cache_line_size;
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain BIOSes forget to set it properly.
+ */
+unsigned int pcibios_max_latency = 255;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -664,6 +670,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
@@ -956,6 +965,7 @@ void pci_restore_state(struct pci_dev *dev)
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
+ pci_restore_ats_state(dev);
/*
* The Base Address register should be programmed before the command
@@ -964,7 +974,7 @@ void pci_restore_state(struct pci_dev *dev)
for (i = 15; i >= 0; i--) {
pci_read_config_dword(dev, i * 4, &val);
if (val != dev->saved_config_space[i]) {
- dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
+ dev_dbg(&dev->dev, "restoring config "
"space at offset %#x (was %#x, writing %#x)\n",
i, val, (int)dev->saved_config_space[i]);
pci_write_config_dword(dev,i * 4,
@@ -1126,7 +1136,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
@@ -1529,8 +1543,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
out:
- dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
- enable ? "enabled" : "disabled");
+ dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
/**
@@ -2589,6 +2602,33 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
}
/**
+ * pcibios_set_master - enable PCI bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables PCI bus-mastering for the device. This is the default
+ * implementation. Architecture specific implementations can override
+ * this if necessary.
+ */
+void __weak pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
+ if (pci_is_pcie(dev))
+ return;
+
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16)
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+ else if (lat > pcibios_max_latency)
+ lat = pcibios_max_latency;
+ else
+ return;
+ dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+}
+
+/**
* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
@@ -2761,6 +2801,116 @@ pci_intx(struct pci_dev *pdev, int enable)
}
/**
+ * pci_intx_mask_supported - probe for INTx masking support
+ * @pdev: the PCI device to operate on
+ *
+ * Check if the device dev support INTx masking via the config space
+ * command word.
+ */
+bool pci_intx_mask_supported(struct pci_dev *dev)
+{
+ bool mask_supported = false;
+ u16 orig, new;
+
+ pci_cfg_access_lock(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(dev, PCI_COMMAND, &new);
+
+ /*
+ * There's no way to protect against hardware bugs or detect them
+ * reliably, but as long as we know what the value should be, let's
+ * go ahead and check it.
+ */
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&dev->dev, "Command register changed from "
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(dev, PCI_COMMAND, orig);
+ }
+
+ pci_cfg_access_unlock(dev);
+ return mask_supported;
+}
+EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @pdev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and
+ * return true in that case. False is returned if not interrupt was
+ * pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
+ * @pdev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not
+ * and return true. False is returned and the mask remains active if
+ * there was still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
* pci_msi_off - disables any msi or msix capabilities
* @dev: the PCI device to operate on
*
@@ -2958,7 +3108,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
might_sleep();
if (!probe) {
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
}
@@ -2983,7 +3133,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
done:
if (!probe) {
device_unlock(&dev->dev);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
}
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b74084e9ca12..1009a5e88e53 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -136,6 +136,8 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern raw_spinlock_t pci_lock;
+
extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
@@ -249,6 +251,14 @@ struct pci_sriov {
u8 __iomem *mstate; /* VF Migration State Array */
};
+#ifdef CONFIG_PCI_ATS
+extern void pci_restore_ats_state(struct pci_dev *dev);
+#else
+static inline void pci_restore_ats_state(struct pci_dev *dev)
+{
+}
+#endif /* CONFIG_PCI_ATS */
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dc29348264c6..72962cc92e0a 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -39,7 +39,7 @@ config PCIEASPM
Power Management) and Clock Power Management. ASPM supports
state L0/L0s/L1.
- ASPM is initially set up the the firmware. With this option enabled,
+ ASPM is initially set up by the firmware. With this option enabled,
Linux can modify this state in order to disable ASPM on known-bad
hardware or configurations and enable it when known-safe.
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 95489cd9a555..52229863e9fe 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -28,7 +28,7 @@
#include "aerdrv.h"
/* Override the existing corrected and uncorrected error masks */
-static int aer_mask_override;
+static bool aer_mask_override;
module_param(aer_mask_override, bool, 0);
struct aer_error_inj {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9674e9f30d49..0ca053538146 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -27,8 +27,8 @@
#include <linux/kfifo.h>
#include "aerdrv.h"
-static int forceload;
-static int nosourceid;
+static bool forceload;
+static bool nosourceid;
module_param(forceload, bool, 0);
module_param(nosourceid, bool, 0);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index cbfbab18be91..1cfbf228fbb1 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@ struct pcie_link_state {
struct aspm_latency acceptable[8];
};
-static int aspm_disabled, aspm_force, aspm_clear_state;
+static int aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
int pos;
u32 reg32;
- if (aspm_clear_state)
- return -EINVAL;
-
/*
* Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
@@ -574,9 +571,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
return;
- if (aspm_disabled && !aspm_clear_state)
- return;
-
/* VIA has a strange chipset, root port is under a bridge */
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
pdev->bus->self)
@@ -608,7 +602,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
* the BIOS's expectation, we'll do so once pci_enable_device() is
* called.
*/
- if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
+ if (aspm_policy != POLICY_POWERSAVE) {
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
@@ -649,8 +643,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
- !parent || !parent->link_state)
+ if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
return;
if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
(parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -734,13 +727,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
* pci_disable_link_state - disable pci device's link state, so the link will
* never enter specific states
*/
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+ bool force)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
- if (aspm_disabled || !pci_is_pcie(pdev))
+ if (aspm_disabled && !force)
+ return;
+
+ if (!pci_is_pcie(pdev))
return;
+
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
parent = pdev;
@@ -768,16 +766,31 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, false);
+ __pci_disable_link_state(pdev, state, false, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, true);
+ __pci_disable_link_state(pdev, state, true, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
+void pcie_clear_aspm(struct pci_bus *bus)
+{
+ struct pci_dev *child;
+
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+ list_for_each_entry(child, &bus->devices, bus_list) {
+ __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM,
+ false, true);
+ }
+}
+
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
@@ -935,6 +948,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
aspm_support_enabled = false;
printk(KERN_INFO "PCIe ASPM is disabled\n");
@@ -947,16 +961,18 @@ static int __init pcie_aspm_disable(char *str)
__setup("pcie_aspm=", pcie_aspm_disable);
-void pcie_clear_aspm(void)
-{
- if (!aspm_force)
- aspm_clear_state = 1;
-}
-
void pcie_no_aspm(void)
{
- if (!aspm_force)
+ /*
+ * Disabling ASPM is intended to prevent the kernel from modifying
+ * existing hardware state, not to clear existing state. To that end:
+ * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+ * (b) prevent userspace from changing policy
+ */
+ if (!aspm_force) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
+ }
}
/**
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 04e74f485714..7cc9e2f0f47c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1522,19 +1522,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
return max;
}
-struct pci_bus * pci_create_bus(struct device *parent,
- int bus, struct pci_ops *ops, void *sysdata)
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
- int error;
+ int error, i;
struct pci_bus *b, *b2;
struct device *dev;
+ struct pci_bus_resource *bus_res, *n;
+ struct resource *res;
b = pci_alloc_bus();
if (!b)
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev){
+ if (!dev) {
kfree(b);
return NULL;
}
@@ -1577,8 +1579,20 @@ struct pci_bus * pci_create_bus(struct device *parent,
pci_create_legacy_files(b);
b->number = b->secondary = bus;
- b->resource[0] = &ioport_resource;
- b->resource[1] = &iomem_resource;
+
+ /* Add initial resources to the bus */
+ list_for_each_entry_safe(bus_res, n, resources, list)
+ list_move_tail(&bus_res->list, &b->resources);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
+ else
+ printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
+
+ pci_bus_for_each_resource(b, res, i) {
+ if (res)
+ dev_info(&b->dev, "root bus resource %pR\n", res);
+ }
return b;
@@ -1594,18 +1608,58 @@ err_out:
return NULL;
}
+struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ struct pci_bus *b;
+
+ b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ if (!b)
+ return NULL;
+
+ b->subordinate = pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_root_bus);
+
+/* Deprecated; use pci_scan_root_bus() instead */
struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
+ LIST_HEAD(resources);
struct pci_bus *b;
- b = pci_create_bus(parent, bus, ops, sysdata);
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
if (b)
b->subordinate = pci_scan_child_bus(b);
+ else
+ pci_free_resource_list(&resources);
return b;
}
EXPORT_SYMBOL(pci_scan_bus_parented);
+struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
+ void *sysdata)
+{
+ LIST_HEAD(resources);
+ struct pci_bus *b;
+
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
+ if (b) {
+ b->subordinate = pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ } else {
+ pci_free_resource_list(&resources);
+ }
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus);
+
#ifdef CONFIG_HOTPLUG
/**
* pci_rescan_bus - scan a PCI bus for devices.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 7f87beed35ac..6def3624c688 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -89,9 +89,8 @@ EXPORT_SYMBOL(pci_remove_bus);
* device lists, remove the /proc entry, and notify userspace
* (/sbin/hotplug).
*/
-void pci_remove_bus_device(struct pci_dev *dev)
+static void __pci_remove_bus_device(struct pci_dev *dev)
{
- pci_stop_bus_device(dev);
if (dev->subordinate) {
struct pci_bus *b = dev->subordinate;
@@ -102,6 +101,11 @@ void pci_remove_bus_device(struct pci_dev *dev)
pci_destroy_dev(dev);
}
+void pci_remove_bus_device(struct pci_dev *dev)
+{
+ pci_stop_bus_device(dev);
+ __pci_remove_bus_device(dev);
+}
/**
* pci_remove_behind_bridge - remove all devices behind a PCI bridge
@@ -117,7 +121,7 @@ void pci_remove_behind_bridge(struct pci_dev *dev)
if (dev->subordinate)
list_for_each_safe(l, n, &dev->subordinate->devices)
- pci_remove_bus_device(pci_dev_b(l));
+ __pci_remove_bus_device(pci_dev_b(l));
}
static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 5717509becbe..b66bfdbd21f7 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -85,9 +85,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
}
res->flags &= ~IORESOURCE_UNSET;
- dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
- resno, res, (unsigned long long)region.start,
- (unsigned long long)region.end);
+ dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
+ resno, res, (unsigned long long)region.start,
+ (unsigned long long)region.end);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 90832a955991..7cf3d2fcf56a 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1126,14 +1126,11 @@ static const struct xenbus_device_id xenpci_ids[] = {
{""},
};
-static struct xenbus_driver xenbus_pcifront_driver = {
- .name = "pcifront",
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
.probe = pcifront_xenbus_probe,
.remove = pcifront_xenbus_remove,
.otherend_changed = pcifront_backend_changed,
-};
+);
static int __init pcifront_init(void)
{
@@ -1142,12 +1139,12 @@ static int __init pcifront_init(void)
pci_frontend_registrar(1 /* enable */);
- return xenbus_register_frontend(&xenbus_pcifront_driver);
+ return xenbus_register_frontend(&xenpci_driver);
}
static void __exit pcifront_cleanup(void)
{
- xenbus_unregister_driver(&xenbus_pcifront_driver);
+ xenbus_unregister_driver(&xenpci_driver);
pci_frontend_registrar(0 /* disable */);
}
module_init(pcifront_init);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 6e318ce41136..f9e3fb3a285b 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -155,18 +155,14 @@ config PCMCIA_M8XX
This driver is also available as a module called m8xx_pcmcia.
-config PCMCIA_AU1X00
- tristate "Au1x00 pcmcia support"
- depends on MIPS_ALCHEMY && PCMCIA
-
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
depends on MIPS_ALCHEMY && PCMCIA
select 64BIT_PHYS_ADDR
help
Enable this driver of you want PCMCIA support on your Alchemy
- Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200 board.
- NOT suitable for the PB1000!
+ Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
+ board. NOT suitable for the PB1000!
This driver is also available as a module called db1xxx_ss.ko
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 29935ea921df..ec543a4ff2e4 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_base.o sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_base.o sa1111_cs.o
obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
-obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
@@ -39,9 +38,6 @@ obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
-au1x00_ss-y += au1000_generic.o
-au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
-
sa1111_cs-y += sa1111_generic.o
sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o
sa1111_cs-$(CONFIG_SA1100_BADGE4) += sa1100_badge4.o
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
deleted file mode 100644
index 95dd7c62741f..000000000000
--- a/drivers/pcmcia/au1000_generic.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- *
- * Alchemy Semi Au1000 pcmcia driver
- *
- * Copyright 2001-2003 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@embeddedalley.com or source@mvista.com
- *
- * Copyright 2004 Pete Popov, Embedded Alley Solutions, Inc.
- * Updated the driver to 2.6. Followed the sa11xx API and largely
- * copied many of the hardware independent functions.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/notifier.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include "au1000_generic.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pete Popov <ppopov@embeddedalley.com>");
-MODULE_DESCRIPTION("Linux PCMCIA Card Services: Au1x00 Socket Controller");
-
-#if 0
-#define debug(x,args...) printk(KERN_DEBUG "%s: " x, __func__ , ##args)
-#else
-#define debug(x,args...)
-#endif
-
-#define MAP_SIZE 0x100000
-extern struct au1000_pcmcia_socket au1000_pcmcia_socket[];
-#define PCMCIA_SOCKET(x) (au1000_pcmcia_socket + (x))
-#define to_au1000_socket(x) container_of(x, struct au1000_pcmcia_socket, socket)
-
-/* Some boards like to support CF cards as IDE root devices, so they
- * grab pcmcia sockets directly.
- */
-u32 *pcmcia_base_vaddrs[2];
-extern const unsigned long mips_io_port_base;
-
-static DEFINE_MUTEX(pcmcia_sockets_lock);
-
-static int (*au1x00_pcmcia_hw_init[])(struct device *dev) = {
- au1x_board_init,
-};
-
-static int
-au1x00_pcmcia_skt_state(struct au1000_pcmcia_socket *skt)
-{
- struct pcmcia_state state;
- unsigned int stat;
-
- memset(&state, 0, sizeof(struct pcmcia_state));
-
- skt->ops->socket_state(skt, &state);
-
- stat = state.detect ? SS_DETECT : 0;
- stat |= state.ready ? SS_READY : 0;
- stat |= state.wrprot ? SS_WRPROT : 0;
- stat |= state.vs_3v ? SS_3VCARD : 0;
- stat |= state.vs_Xv ? SS_XVCARD : 0;
- stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
-
- if (skt->cs_state.flags & SS_IOCARD)
- stat |= state.bvd1 ? SS_STSCHG : 0;
- else {
- if (state.bvd1 == 0)
- stat |= SS_BATDEAD;
- else if (state.bvd2 == 0)
- stat |= SS_BATWARN;
- }
- return stat;
-}
-
-/*
- * au100_pcmcia_config_skt
- *
- * Convert PCMCIA socket state to our socket configure structure.
- */
-static int
-au1x00_pcmcia_config_skt(struct au1000_pcmcia_socket *skt, socket_state_t *state)
-{
- int ret;
-
- ret = skt->ops->configure_socket(skt, state);
- if (ret == 0) {
- skt->cs_state = *state;
- }
-
- if (ret < 0)
- debug("unable to configure socket %d\n", skt->nr);
-
- return ret;
-}
-
-/* au1x00_pcmcia_sock_init()
- *
- * (Re-)Initialise the socket, turning on status interrupts
- * and PCMCIA bus. This must wait for power to stabilise
- * so that the card status signals report correctly.
- *
- * Returns: 0
- */
-static int au1x00_pcmcia_sock_init(struct pcmcia_socket *sock)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- debug("initializing socket %u\n", skt->nr);
-
- skt->ops->socket_init(skt);
- return 0;
-}
-
-/*
- * au1x00_pcmcia_suspend()
- *
- * Remove power on the socket, disable IRQs from the card.
- * Turn off status interrupts, and disable the PCMCIA bus.
- *
- * Returns: 0
- */
-static int au1x00_pcmcia_suspend(struct pcmcia_socket *sock)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- debug("suspending socket %u\n", skt->nr);
-
- skt->ops->socket_suspend(skt);
-
- return 0;
-}
-
-static DEFINE_SPINLOCK(status_lock);
-
-/*
- * au1x00_check_status()
- */
-static void au1x00_check_status(struct au1000_pcmcia_socket *skt)
-{
- unsigned int events;
-
- debug("entering PCMCIA monitoring thread\n");
-
- do {
- unsigned int status;
- unsigned long flags;
-
- status = au1x00_pcmcia_skt_state(skt);
-
- spin_lock_irqsave(&status_lock, flags);
- events = (status ^ skt->status) & skt->cs_state.csc_mask;
- skt->status = status;
- spin_unlock_irqrestore(&status_lock, flags);
-
- debug("events: %s%s%s%s%s%s\n",
- events == 0 ? "<NONE>" : "",
- events & SS_DETECT ? "DETECT " : "",
- events & SS_READY ? "READY " : "",
- events & SS_BATDEAD ? "BATDEAD " : "",
- events & SS_BATWARN ? "BATWARN " : "",
- events & SS_STSCHG ? "STSCHG " : "");
-
- if (events)
- pcmcia_parse_events(&skt->socket, events);
- } while (events);
-}
-
-/*
- * au1x00_pcmcia_poll_event()
- * Let's poll for events in addition to IRQs since IRQ only is unreliable...
- */
-static void au1x00_pcmcia_poll_event(unsigned long dummy)
-{
- struct au1000_pcmcia_socket *skt = (struct au1000_pcmcia_socket *)dummy;
- debug("polling for events\n");
-
- mod_timer(&skt->poll_timer, jiffies + AU1000_PCMCIA_POLL_PERIOD);
-
- au1x00_check_status(skt);
-}
-
-/* au1x00_pcmcia_get_status()
- *
- * From the sa11xx_core.c:
- * Implements the get_status() operation for the in-kernel PCMCIA
- * service (formerly SS_GetStatus in Card Services). Essentially just
- * fills in bits in `status' according to internal driver state or
- * the value of the voltage detect chipselect register.
- *
- * As a debugging note, during card startup, the PCMCIA core issues
- * three set_socket() commands in a row the first with RESET deasserted,
- * the second with RESET asserted, and the last with RESET deasserted
- * again. Following the third set_socket(), a get_status() command will
- * be issued. The kernel is looking for the SS_READY flag (see
- * setup_socket(), reset_socket(), and unreset_socket() in cs.c).
- *
- * Returns: 0
- */
-static int
-au1x00_pcmcia_get_status(struct pcmcia_socket *sock, unsigned int *status)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- skt->status = au1x00_pcmcia_skt_state(skt);
- *status = skt->status;
-
- return 0;
-}
-
-/* au1x00_pcmcia_set_socket()
- * Implements the set_socket() operation for the in-kernel PCMCIA
- * service (formerly SS_SetSocket in Card Services). We more or
- * less punt all of this work and let the kernel handle the details
- * of power configuration, reset, &c. We also record the value of
- * `state' in order to regurgitate it to the PCMCIA core later.
- *
- * Returns: 0
- */
-static int
-au1x00_pcmcia_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- debug("for sock %u\n", skt->nr);
-
- debug("\tmask: %s%s%s%s%s%s\n\tflags: %s%s%s%s%s%s\n",
- (state->csc_mask==0)?"<NONE>":"",
- (state->csc_mask&SS_DETECT)?"DETECT ":"",
- (state->csc_mask&SS_READY)?"READY ":"",
- (state->csc_mask&SS_BATDEAD)?"BATDEAD ":"",
- (state->csc_mask&SS_BATWARN)?"BATWARN ":"",
- (state->csc_mask&SS_STSCHG)?"STSCHG ":"",
- (state->flags==0)?"<NONE>":"",
- (state->flags&SS_PWR_AUTO)?"PWR_AUTO ":"",
- (state->flags&SS_IOCARD)?"IOCARD ":"",
- (state->flags&SS_RESET)?"RESET ":"",
- (state->flags&SS_SPKR_ENA)?"SPKR_ENA ":"",
- (state->flags&SS_OUTPUT_ENA)?"OUTPUT_ENA ":"");
- debug("\tVcc %d Vpp %d irq %d\n",
- state->Vcc, state->Vpp, state->io_irq);
-
- return au1x00_pcmcia_config_skt(skt, state);
-}
-
-int
-au1x00_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *map)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
- unsigned int speed;
-
- if(map->map>=MAX_IO_WIN){
- debug("map (%d) out of range\n", map->map);
- return -1;
- }
-
- if(map->flags&MAP_ACTIVE){
- speed=(map->speed>0)?map->speed:AU1000_PCMCIA_IO_SPEED;
- skt->spd_io[map->map] = speed;
- }
-
- map->start=(unsigned int)(u32)skt->virt_io;
- map->stop=map->start+MAP_SIZE;
- return 0;
-
-} /* au1x00_pcmcia_set_io_map() */
-
-
-static int
-au1x00_pcmcia_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *map)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
- unsigned short speed = map->speed;
-
- if(map->map>=MAX_WIN){
- debug("map (%d) out of range\n", map->map);
- return -1;
- }
-
- if (map->flags & MAP_ATTRIB) {
- skt->spd_attr[map->map] = speed;
- skt->spd_mem[map->map] = 0;
- } else {
- skt->spd_attr[map->map] = 0;
- skt->spd_mem[map->map] = speed;
- }
-
- if (map->flags & MAP_ATTRIB) {
- map->static_start = skt->phys_attr + map->card_start;
- }
- else {
- map->static_start = skt->phys_mem + map->card_start;
- }
-
- debug("set_mem_map %d start %08lx card_start %08x\n",
- map->map, map->static_start, map->card_start);
- return 0;
-
-} /* au1x00_pcmcia_set_mem_map() */
-
-static struct pccard_operations au1x00_pcmcia_operations = {
- .init = au1x00_pcmcia_sock_init,
- .suspend = au1x00_pcmcia_suspend,
- .get_status = au1x00_pcmcia_get_status,
- .set_socket = au1x00_pcmcia_set_socket,
- .set_io_map = au1x00_pcmcia_set_io_map,
- .set_mem_map = au1x00_pcmcia_set_mem_map,
-};
-
-static const char *skt_names[] = {
- "PCMCIA socket 0",
- "PCMCIA socket 1",
-};
-
-struct skt_dev_info {
- int nskt;
-};
-
-int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr)
-{
- struct skt_dev_info *sinfo;
- struct au1000_pcmcia_socket *skt;
- int ret, i;
-
- sinfo = kzalloc(sizeof(struct skt_dev_info), GFP_KERNEL);
- if (!sinfo) {
- ret = -ENOMEM;
- goto out;
- }
-
- sinfo->nskt = nr;
-
- /*
- * Initialise the per-socket structure.
- */
- for (i = 0; i < nr; i++) {
- skt = PCMCIA_SOCKET(i);
- memset(skt, 0, sizeof(*skt));
-
- skt->socket.resource_ops = &pccard_static_ops;
- skt->socket.ops = &au1x00_pcmcia_operations;
- skt->socket.owner = ops->owner;
- skt->socket.dev.parent = dev;
-
- init_timer(&skt->poll_timer);
- skt->poll_timer.function = au1x00_pcmcia_poll_event;
- skt->poll_timer.data = (unsigned long)skt;
- skt->poll_timer.expires = jiffies + AU1000_PCMCIA_POLL_PERIOD;
-
- skt->nr = first + i;
- skt->irq = 255;
- skt->dev = dev;
- skt->ops = ops;
-
- skt->res_skt.name = skt_names[skt->nr];
- skt->res_io.name = "io";
- skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- skt->res_mem.name = "memory";
- skt->res_mem.flags = IORESOURCE_MEM;
- skt->res_attr.name = "attribute";
- skt->res_attr.flags = IORESOURCE_MEM;
-
- /*
- * PCMCIA client drivers use the inb/outb macros to access the
- * IO registers. Since mips_io_port_base is added to the
- * access address of the mips implementation of inb/outb,
- * we need to subtract it here because we want to access the
- * I/O or MEM address directly, without going through this
- * "mips_io_port_base" mechanism.
- */
- if (i == 0) {
- skt->virt_io = (void *)
- (ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) -
- (u32)mips_io_port_base);
- skt->phys_attr = AU1X_SOCK0_PHYS_ATTR;
- skt->phys_mem = AU1X_SOCK0_PHYS_MEM;
- }
- else {
- skt->virt_io = (void *)
- (ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) -
- (u32)mips_io_port_base);
- skt->phys_attr = AU1X_SOCK1_PHYS_ATTR;
- skt->phys_mem = AU1X_SOCK1_PHYS_MEM;
- }
- pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io;
- ret = ops->hw_init(skt);
-
- skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
- skt->socket.irq_mask = 0;
- skt->socket.map_size = MAP_SIZE;
- skt->socket.pci_irq = skt->irq;
- skt->socket.io_offset = (unsigned long)skt->virt_io;
-
- skt->status = au1x00_pcmcia_skt_state(skt);
-
- ret = pcmcia_register_socket(&skt->socket);
- if (ret)
- goto out_err;
-
- WARN_ON(skt->socket.sock != i);
-
- add_timer(&skt->poll_timer);
- }
-
- dev_set_drvdata(dev, sinfo);
- return 0;
-
-
-out_err:
- ops->hw_shutdown(skt);
- while (i-- > 0) {
- skt = PCMCIA_SOCKET(i);
-
- del_timer_sync(&skt->poll_timer);
- pcmcia_unregister_socket(&skt->socket);
- if (i == 0) {
- iounmap(skt->virt_io + (u32)mips_io_port_base);
- skt->virt_io = NULL;
- }
-#ifndef CONFIG_MIPS_XXS1500
- else {
- iounmap(skt->virt_io + (u32)mips_io_port_base);
- skt->virt_io = NULL;
- }
-#endif
- ops->hw_shutdown(skt);
-
- }
- kfree(sinfo);
-out:
- return ret;
-}
-
-int au1x00_drv_pcmcia_remove(struct platform_device *dev)
-{
- struct skt_dev_info *sinfo = platform_get_drvdata(dev);
- int i;
-
- mutex_lock(&pcmcia_sockets_lock);
- platform_set_drvdata(dev, NULL);
-
- for (i = 0; i < sinfo->nskt; i++) {
- struct au1000_pcmcia_socket *skt = PCMCIA_SOCKET(i);
-
- del_timer_sync(&skt->poll_timer);
- pcmcia_unregister_socket(&skt->socket);
- skt->ops->hw_shutdown(skt);
- au1x00_pcmcia_config_skt(skt, &dead_socket);
- iounmap(skt->virt_io + (u32)mips_io_port_base);
- skt->virt_io = NULL;
- }
-
- kfree(sinfo);
- mutex_unlock(&pcmcia_sockets_lock);
- return 0;
-}
-
-
-/*
- * PCMCIA "Driver" API
- */
-
-static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
-{
- int i, ret = -ENODEV;
-
- mutex_lock(&pcmcia_sockets_lock);
- for (i=0; i < ARRAY_SIZE(au1x00_pcmcia_hw_init); i++) {
- ret = au1x00_pcmcia_hw_init[i](&dev->dev);
- if (ret == 0)
- break;
- }
- mutex_unlock(&pcmcia_sockets_lock);
- return ret;
-}
-
-static struct platform_driver au1x00_pcmcia_driver = {
- .driver = {
- .name = "au1x00-pcmcia",
- .owner = THIS_MODULE,
- },
- .probe = au1x00_drv_pcmcia_probe,
- .remove = au1x00_drv_pcmcia_remove,
-};
-
-
-/* au1x00_pcmcia_init()
- *
- * This routine performs low-level PCMCIA initialization and then
- * registers this socket driver with Card Services.
- *
- * Returns: 0 on success, -ve error code on failure
- */
-static int __init au1x00_pcmcia_init(void)
-{
- int error = 0;
- error = platform_driver_register(&au1x00_pcmcia_driver);
- return error;
-}
-
-/* au1x00_pcmcia_exit()
- * Invokes the low-level kernel service to free IRQs associated with this
- * socket controller and reset GPIO edge detection.
- */
-static void __exit au1x00_pcmcia_exit(void)
-{
- platform_driver_unregister(&au1x00_pcmcia_driver);
-}
-
-module_init(au1x00_pcmcia_init);
-module_exit(au1x00_pcmcia_exit);
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
deleted file mode 100644
index 5c36bda2963b..000000000000
--- a/drivers/pcmcia/au1000_generic.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Alchemy Semi Au1000 pcmcia driver include file
- *
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#ifndef __ASM_AU1000_PCMCIA_H
-#define __ASM_AU1000_PCMCIA_H
-
-/* include the world */
-
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-#include "cs_internal.h"
-
-#define AU1000_PCMCIA_POLL_PERIOD (2*HZ)
-#define AU1000_PCMCIA_IO_SPEED (255)
-#define AU1000_PCMCIA_MEM_SPEED (300)
-
-#define AU1X_SOCK0_IO 0xF00000000ULL
-#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL
-#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL
-
-/* pcmcia socket 1 needs external glue logic so the memory map
- * differs from board to board.
- */
-#if defined(CONFIG_MIPS_PB1000)
-#define AU1X_SOCK1_IO 0xF08000000ULL
-#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
-#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
-#endif
-
-struct pcmcia_state {
- unsigned detect: 1,
- ready: 1,
- wrprot: 1,
- bvd1: 1,
- bvd2: 1,
- vs_3v: 1,
- vs_Xv: 1;
-};
-
-struct pcmcia_configure {
- unsigned sock: 8,
- vcc: 8,
- vpp: 8,
- output: 1,
- speaker: 1,
- reset: 1;
-};
-
-struct pcmcia_irqs {
- int sock;
- int irq;
- const char *str;
-};
-
-
-struct au1000_pcmcia_socket {
- struct pcmcia_socket socket;
-
- /*
- * Info from low level handler
- */
- struct device *dev;
- unsigned int nr;
- unsigned int irq;
-
- /*
- * Core PCMCIA state
- */
- struct pcmcia_low_level *ops;
-
- unsigned int status;
- socket_state_t cs_state;
-
- unsigned short spd_io[MAX_IO_WIN];
- unsigned short spd_mem[MAX_WIN];
- unsigned short spd_attr[MAX_WIN];
-
- struct resource res_skt;
- struct resource res_io;
- struct resource res_mem;
- struct resource res_attr;
-
- void * virt_io;
- unsigned int phys_io;
- unsigned int phys_attr;
- unsigned int phys_mem;
- unsigned short speed_io, speed_attr, speed_mem;
-
- unsigned int irq_state;
-
- struct timer_list poll_timer;
-};
-
-struct pcmcia_low_level {
- struct module *owner;
-
- int (*hw_init)(struct au1000_pcmcia_socket *);
- void (*hw_shutdown)(struct au1000_pcmcia_socket *);
-
- void (*socket_state)(struct au1000_pcmcia_socket *, struct pcmcia_state *);
- int (*configure_socket)(struct au1000_pcmcia_socket *, struct socket_state_t *);
-
- /*
- * Enable card status IRQs on (re-)initialisation. This can
- * be called at initialisation, power management event, or
- * pcmcia event.
- */
- void (*socket_init)(struct au1000_pcmcia_socket *);
-
- /*
- * Disable card status IRQs and PCMCIA bus on suspend.
- */
- void (*socket_suspend)(struct au1000_pcmcia_socket *);
-};
-
-extern int au1x_board_init(struct device *dev);
-
-#endif /* __ASM_AU1000_PCMCIA_H */
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
deleted file mode 100644
index b2396647a165..000000000000
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- *
- * Alchemy Semi Pb1000 boards specific pcmcia routines.
- *
- * Copyright 2002 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/proc_fs.h>
-#include <linux/types.h>
-
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include <asm/au1000.h>
-#include <asm/au1000_pcmcia.h>
-
-#define debug(fmt, arg...) do { } while (0)
-
-#include <asm/pb1000.h>
-#define PCMCIA_IRQ AU1000_GPIO_15
-
-static int pb1x00_pcmcia_init(struct pcmcia_init *init)
-{
- u16 pcr;
- pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
-
- au_writel(0x8000, PB1000_MDR); /* clear pcmcia interrupt */
- au_sync_delay(100);
- au_writel(0x4000, PB1000_MDR); /* enable pcmcia interrupt */
- au_sync();
-
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,1);
- au_writel(pcr, PB1000_PCR);
- au_sync_delay(20);
-
- return PCMCIA_NUM_SOCKS;
-}
-
-static int pb1x00_pcmcia_shutdown(void)
-{
- u16 pcr;
- pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,1);
- au_writel(pcr, PB1000_PCR);
- au_sync_delay(20);
- return 0;
-}
-
-static int
-pb1x00_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
-{
- u32 inserted0, inserted1;
- u16 vs0, vs1;
-
- vs0 = vs1 = (u16)au_readl(PB1000_ACR1);
- inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2));
- inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2));
- vs0 = (vs0 >> 4) & 0x3;
- vs1 = (vs1 >> 12) & 0x3;
-
- state->ready = 0;
- state->vs_Xv = 0;
- state->vs_3v = 0;
- state->detect = 0;
-
- if (sock == 0) {
- if (inserted0) {
- switch (vs0) {
- case 0:
- case 2:
- state->vs_3v=1;
- break;
- case 3: /* 5V */
- break;
- default:
- /* return without setting 'detect' */
- printk(KERN_ERR "pb1x00 bad VS (%d)\n",
- vs0);
- return 0;
- }
- state->detect = 1;
- }
- }
- else {
- if (inserted1) {
- switch (vs1) {
- case 0:
- case 2:
- state->vs_3v=1;
- break;
- case 3: /* 5V */
- break;
- default:
- /* return without setting 'detect' */
- printk(KERN_ERR "pb1x00 bad VS (%d)\n",
- vs1);
- return 0;
- }
- state->detect = 1;
- }
- }
-
- if (state->detect) {
- state->ready = 1;
- }
-
- state->bvd1=1;
- state->bvd2=1;
- state->wrprot=0;
- return 1;
-}
-
-
-static int pb1x00_pcmcia_get_irq_info(struct pcmcia_irq_info *info)
-{
-
- if(info->sock > PCMCIA_MAX_SOCK) return -1;
-
- /*
- * Even in the case of the Pb1000, both sockets are connected
- * to the same irq line.
- */
- info->irq = PCMCIA_IRQ;
-
- return 0;
-}
-
-
-static int
-pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
-{
- u16 pcr;
-
- if(configure->sock > PCMCIA_MAX_SOCK) return -1;
-
- pcr = au_readl(PB1000_PCR);
-
- if (configure->sock == 0) {
- pcr &= ~(PCR_SLOT_0_VCC0 | PCR_SLOT_0_VCC1 |
- PCR_SLOT_0_VPP0 | PCR_SLOT_0_VPP1);
- }
- else {
- pcr &= ~(PCR_SLOT_1_VCC0 | PCR_SLOT_1_VCC1 |
- PCR_SLOT_1_VPP0 | PCR_SLOT_1_VPP1);
- }
-
- pcr &= ~PCR_SLOT_0_RST;
- debug("Vcc %dV Vpp %dV, pcr %x\n",
- configure->vcc, configure->vpp, pcr);
- switch(configure->vcc){
- case 0: /* Vcc 0 */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_GND,
- configure->sock);
- break;
- case 12:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_12V,
- configure->sock);
- break;
- case 50:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_5V,
- configure->sock);
- break;
- case 33:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_3V,
- configure->sock);
- break;
- default:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
- configure->sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- case 50: /* Vcc 5V */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_GND,
- configure->sock);
- break;
- case 50:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_5V,
- configure->sock);
- break;
- case 12:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_12V,
- configure->sock);
- break;
- case 33:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_3V,
- configure->sock);
- break;
- default:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
- configure->sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- case 33: /* Vcc 3.3V */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_GND,
- configure->sock);
- break;
- case 50:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_5V,
- configure->sock);
- break;
- case 12:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_12V,
- configure->sock);
- break;
- case 33:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_3V,
- configure->sock);
- break;
- default:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
- configure->sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- default: /* what's this ? */
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,configure->sock);
- printk(KERN_ERR "%s: bad Vcc %d\n",
- __func__, configure->vcc);
- break;
- }
-
- if (configure->sock == 0) {
- pcr &= ~(PCR_SLOT_0_RST);
- if (configure->reset)
- pcr |= PCR_SLOT_0_RST;
- }
- else {
- pcr &= ~(PCR_SLOT_1_RST);
- if (configure->reset)
- pcr |= PCR_SLOT_1_RST;
- }
- au_writel(pcr, PB1000_PCR);
- au_sync_delay(300);
-
- return 0;
-}
-
-
-struct pcmcia_low_level pb1x00_pcmcia_ops = {
- pb1x00_pcmcia_init,
- pb1x00_pcmcia_shutdown,
- pb1x00_pcmcia_socket_state,
- pb1x00_pcmcia_get_irq_info,
- pb1x00_pcmcia_configure_socket
-};
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 3e49df6d5e3b..5b7c22784aff 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -7,7 +7,7 @@
/* This is a fairly generic PCMCIA socket driver suitable for the
* following Alchemy Development boards:
- * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200.
+ * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200, Db1300
*
* The Db1000 is used as a reference: Per-socket card-, carddetect- and
* statuschange IRQs connected to SoC GPIOs, control and status register
@@ -18,6 +18,7 @@
* - Pb1100/Pb1500: single socket only; voltage key bits VS are
* at STATUS[5:4] (instead of STATUS[1:0]).
* - Au1200-based: additional card-eject irqs, irqs not gpios!
+ * - Db1300: Db1200-like, no pwr ctrl, single socket (#1).
*/
#include <linux/delay.h>
@@ -59,11 +60,17 @@ struct db1x_pcmcia_sock {
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
#define BOARD_TYPE_PB1100 2 /* VS bits slightly different */
+#define BOARD_TYPE_DB1300 3 /* no power control */
int board_type;
};
#define to_db1x_socket(x) container_of(x, struct db1x_pcmcia_sock, socket)
+static int db1300_card_inserted(struct db1x_pcmcia_sock *sock)
+{
+ return bcsr_read(BCSR_SIGSTAT) & (1 << 8);
+}
+
/* DB/PB1200: check CPLD SIGSTATUS register bit 10/12 */
static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
{
@@ -84,6 +91,8 @@ static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
switch (sock->board_type) {
case BOARD_TYPE_DB1200:
return db1200_card_inserted(sock);
+ case BOARD_TYPE_DB1300:
+ return db1300_card_inserted(sock);
default:
return db1000_card_inserted(sock);
}
@@ -160,7 +169,8 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
* ejection handler have been registered and the currently
* active one disabled.
*/
- if (sock->board_type == BOARD_TYPE_DB1200) {
+ if ((sock->board_type == BOARD_TYPE_DB1200) ||
+ (sock->board_type == BOARD_TYPE_DB1300)) {
ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
IRQF_DISABLED, "pcmcia_insert", sock);
if (ret)
@@ -174,7 +184,7 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
}
/* enable the currently silent one */
- if (db1200_card_inserted(sock))
+ if (db1x_card_inserted(sock))
enable_irq(sock->eject_irq);
else
enable_irq(sock->insert_irq);
@@ -270,7 +280,8 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
}
/* create new voltage code */
- cr_set |= ((v << 2) | p) << (sock->nr * 8);
+ if (sock->board_type != BOARD_TYPE_DB1300)
+ cr_set |= ((v << 2) | p) << (sock->nr * 8);
changed = state->flags ^ sock->old_flags;
@@ -343,6 +354,10 @@ static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
/* if Vcc is not zero, we have applied power to a card */
status |= GET_VCC(cr, sock->nr) ? SS_POWERON : 0;
+ /* DB1300: power always on, but don't tell when no card present */
+ if ((sock->board_type == BOARD_TYPE_DB1300) && (status & SS_DETECT))
+ status = SS_POWERON | SS_3VCARD | SS_DETECT;
+
/* reset de-asserted? then we're ready */
status |= (GET_RESET(cr, sock->nr)) ? SS_READY : SS_RESET;
@@ -419,6 +434,9 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
case BCSR_WHOAMI_PB1200 ... BCSR_WHOAMI_DB1200:
sock->board_type = BOARD_TYPE_DB1200;
break;
+ case BCSR_WHOAMI_DB1300:
+ sock->board_type = BOARD_TYPE_DB1300;
+ break;
default:
printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
ret = -ENODEV;
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 0b4f946cf13a..31ab6ddf52c9 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -16,8 +16,6 @@
#include <linux/gpio.h>
#include <linux/export.h>
-#include <asm/mach-types.h>
-
#include "soc_common.h"
#define GPIO_PCMCIA_SKTSEL (54)
@@ -27,15 +25,15 @@
#define GPIO_PCMCIA_S1_RDYINT (8)
#define GPIO_PCMCIA_RESET (9)
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
-#define PCMCIA_S1_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S1_CD_VALID)
-#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
-#define PCMCIA_S1_RDYINT IRQ_GPIO(GPIO_PCMCIA_S1_RDYINT)
+#define PCMCIA_S0_CD_VALID gpio_to_irq(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S1_CD_VALID gpio_to_irq(GPIO_PCMCIA_S1_CD_VALID)
+#define PCMCIA_S0_RDYINT gpio_to_irq(GPIO_PCMCIA_S0_RDYINT)
+#define PCMCIA_S1_RDYINT gpio_to_irq(GPIO_PCMCIA_S1_RDYINT)
static struct pcmcia_irqs irqs[] = {
- { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
- { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
+ { .sock = 0, .str = "PCMCIA0 CD" },
+ { .sock = 1, .str = "PCMCIA1 CD" },
};
static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -46,6 +44,8 @@ static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->socket.pci_irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
+ irqs[0].irq = PCMCIA_S0_CD_VALID;
+ irqs[1].irq = PCMCIA_S1_CD_VALID;
ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
if (!ret)
gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 923f315926ef..3dc7621a0767 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -16,20 +16,18 @@
#include <linux/gpio.h>
#include <linux/export.h>
-#include <asm/mach-types.h>
-
#include "soc_common.h"
#define GPIO_PCMCIA_S0_CD_VALID (84)
#define GPIO_PCMCIA_S0_RDYINT (82)
#define GPIO_PCMCIA_RESET (53)
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
-#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
+#define PCMCIA_S0_CD_VALID gpio_to_irq(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S0_RDYINT gpio_to_irq(GPIO_PCMCIA_S0_RDYINT)
static struct pcmcia_irqs irqs[] = {
- { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
+ { .sock = 0, .str = "PCMCIA0 CD" },
};
static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -40,6 +38,7 @@ static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->socket.pci_irq = PCMCIA_S0_RDYINT;
+ irqs[0].irq = PCMCIA_S0_CD_VALID;
ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
if (!ret)
gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
index 8bfbd4dca131..17cd2ce7428f 100644
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ b/drivers/pcmcia/pxa2xx_e740.c
@@ -26,20 +26,23 @@
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
- .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD0),
.str = "CF card detect"
},
{
.sock = 1,
- .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD1),
.str = "Wifi switch"
},
};
static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->socket.pci_irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) :
- IRQ_GPIO(GPIO_E740_PCMCIA_RDY1);
+ if (skt->nr == 0)
+ skt->socket.pci_irq = gpio_to_irq(GPIO_E740_PCMCIA_RDY0);
+ else
+ skt->socket.pci_irq = gpio_to_irq(GPIO_E740_PCMCIA_RDY1);
+
+ cd_irqs[0].irq = gpio_to_irq(GPIO_E740_PCMCIA_CD0);
+ cd_irqs[1].irq = gpio_to_irq(GPIO_E740_PCMCIA_CD1);
return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1);
}
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index d589ad1dcd4c..6a8e011a8c13 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -33,7 +33,7 @@ static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ret = gpio_request_array(palmld_pcmcia_gpios,
ARRAY_SIZE(palmld_pcmcia_gpios));
- skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMLD_PCMCIA_READY);
return ret;
}
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index 9c6a04b2f71b..9e38de769ba3 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -37,7 +37,7 @@ static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ret = gpio_request_array(palmtc_pcmcia_gpios,
ARRAY_SIZE(palmtc_pcmcia_gpios));
- skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTC_PCMCIA_READY);
return ret;
}
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index 939622251dfb..6c2366b74a35 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -34,7 +34,7 @@
#define SG2_S0_GPIO_READY 81
static struct pcmcia_irqs irqs[] = {
- { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
+ {.sock = 0, .str = "PCMCIA0 CD" },
};
static struct gpio sg2_pcmcia_gpios[] = {
@@ -44,7 +44,9 @@ static struct gpio sg2_pcmcia_gpios[] = {
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
+ skt->socket.pci_irq = gpio_to_irq(SG2_S0_GPIO_READY);
+ irqs[0].irq = gpio_to_irq(SG2_S0_GPIO_DETECT);
+
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
}
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index 57ddb969d888..7c33f898135a 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -30,7 +30,7 @@
extern void board_pcmcia_power(int power);
static struct pcmcia_irqs irqs[] = {
- { 0, IRQ_GPIO(GPIO_PCD), "cs0_cd" }
+ { .sock = 0, .str = "cs0_cd" }
/* on other baseboards we can have more inputs */
};
@@ -53,7 +53,8 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_free(GPIO_PRDY);
return -EINVAL;
}
- skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_PRDY);
+ irqs[0].irq = gpio_to_irq(GPIO_PCD);
break;
default:
break;
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 66ab92cf3105..61b17d235dbe 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -38,12 +38,10 @@ static struct gpio vpac270_cf_gpios[] = {
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
- .irq = IRQ_GPIO(GPIO84_VPAC270_PCMCIA_CD),
.str = "PCMCIA CD"
},
{
.sock = 1,
- .irq = IRQ_GPIO(GPIO17_VPAC270_CF_CD),
.str = "CF CD"
},
};
@@ -57,6 +55,7 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ARRAY_SIZE(vpac270_pcmcia_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
+ cd_irqs[0].irq = gpio_to_irq(GPIO84_VPAC270_PCMCIA_CD);
if (!ret)
ret = soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
@@ -65,6 +64,7 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ARRAY_SIZE(vpac270_cf_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
+ cd_irqs[1].irq = gpio_to_irq(GPIO17_VPAC270_CF_CD);
if (!ret)
ret = soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 9dc565c615bd..849c0c11d2af 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -24,15 +24,15 @@
#include "yenta_socket.h"
#include "i82365.h"
-static int disable_clkrun;
+static bool disable_clkrun;
module_param(disable_clkrun, bool, 0444);
MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
-static int isa_probe = 1;
+static bool isa_probe = 1;
module_param(isa_probe, bool, 0444);
MODULE_PARM_DESC(isa_probe, "If set ISA interrupts are probed (default). Set to N to disable probing");
-static int pwr_irqs_off;
+static bool pwr_irqs_off;
module_param(pwr_irqs_off, bool, 0644);
MODULE_PARM_DESC(pwr_irqs_off, "Force IRQs off during power-on of slot. Use only when seeing IRQ storms!");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index e17e2f8001d2..afaf88558125 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -12,7 +12,10 @@ menu "Pin controllers"
depends on PINCTRL
config PINMUX
- bool "Support pinmux controllers"
+ bool "Support pin multiplexing controllers"
+
+config PINCONF
+ bool "Support pin configuration controllers"
config DEBUG_PINCTRL
bool "Debug PINCTRL calls"
@@ -20,16 +23,25 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
-config PINMUX_SIRF
- bool "CSR SiRFprimaII pinmux driver"
+config PINCTRL_SIRF
+ bool "CSR SiRFprimaII pin controller driver"
depends on ARCH_PRIMA2
select PINMUX
-config PINMUX_U300
- bool "U300 pinmux driver"
+config PINCTRL_U300
+ bool "U300 pin controller driver"
depends on ARCH_U300
select PINMUX
+config PINCTRL_COH901
+ bool "ST-Ericsson U300 COH 901 335/571 GPIO"
+ depends on GPIOLIB && ARCH_U300 && PINMUX_U300
+ help
+ Say yes here to support GPIO interface on ST-Ericsson U300.
+ The names of the two IP block variants supported are
+ COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
+ ports of 8 GPIO pins each.
+
endmenu
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index bdc548a6b7e9..827601cc68f6 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -1,8 +1,10 @@
# generic pinmux support
-ccflags-$(CONFIG_DEBUG_PINMUX) += -DDEBUG
+ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
obj-$(CONFIG_PINCTRL) += core.o
obj-$(CONFIG_PINMUX) += pinmux.o
-obj-$(CONFIG_PINMUX_SIRF) += pinmux-sirf.o
-obj-$(CONFIG_PINMUX_U300) += pinmux-u300.o
+obj-$(CONFIG_PINCONF) += pinconf.o
+obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o
+obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
+obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index eadef9e191ea..569bdb3ef104 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -28,17 +28,12 @@
#include <linux/pinctrl/machine.h>
#include "core.h"
#include "pinmux.h"
+#include "pinconf.h"
/* Global list of pin control devices */
static DEFINE_MUTEX(pinctrldev_list_mutex);
static LIST_HEAD(pinctrldev_list);
-static void pinctrl_dev_release(struct device *dev)
-{
- struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
- kfree(pctldev);
-}
-
const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
{
/* We're not allowed to register devices without name */
@@ -70,14 +65,14 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node) {
- if (dev && &pctldev->dev == dev) {
+ if (dev && pctldev->dev == dev) {
/* Matched on device pointer */
found = true;
break;
}
if (devname &&
- !strcmp(dev_name(&pctldev->dev), devname)) {
+ !strcmp(dev_name(pctldev->dev), devname)) {
/* Matched on device name */
found = true;
break;
@@ -88,7 +83,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
return found ? pctldev : NULL;
}
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct pin_desc *pindesc;
unsigned long flags;
@@ -101,6 +96,31 @@ struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
}
/**
+ * pin_get_from_name() - look up a pin number from a name
+ * @pctldev: the pin control device to lookup the pin on
+ * @name: the name of the pin to look up
+ */
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
+{
+ unsigned i, pin;
+
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
+ struct pin_desc *desc;
+
+ pin = pctldev->desc->pins[i].number;
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+ if (desc->name && !strcmp(name, desc->name))
+ return pin;
+ }
+
+ return -EINVAL;
+}
+
+/**
* pin_is_valid() - check if pin exists on controller
* @pctldev: the pin control device to check the pin on
* @pin: pin to check, use the local pin controller index number
@@ -139,6 +159,8 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
if (pindesc != NULL) {
radix_tree_delete(&pctldev->pin_desc_tree,
pins[i].number);
+ if (pindesc->dynamic_name)
+ kfree(pindesc->name);
}
kfree(pindesc);
}
@@ -160,19 +182,27 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
if (pindesc == NULL)
return -ENOMEM;
+
spin_lock_init(&pindesc->lock);
/* Set owner */
pindesc->pctldev = pctldev;
/* Copy basic pin info */
- pindesc->name = name;
+ if (pindesc->name) {
+ pindesc->name = name;
+ } else {
+ pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
+ if (pindesc->name == NULL)
+ return -ENOMEM;
+ pindesc->dynamic_name = true;
+ }
spin_lock(&pctldev->pin_desc_tree_lock);
radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
spin_unlock(&pctldev->pin_desc_tree_lock);
pr_debug("registered pin %d (%s) on %s\n",
- number, name ? name : "(unnamed)", pctldev->desc->name);
+ number, pindesc->name, pctldev->desc->name);
return 0;
}
@@ -284,21 +314,52 @@ void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
mutex_unlock(&pctldev->gpio_ranges_lock);
}
+/**
+ * pinctrl_get_group_selector() - returns the group selector for a group
+ * @pctldev: the pin controller handling the group
+ * @pin_group: the pin group to look up
+ */
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ unsigned group_selector = 0;
+
+ while (pctlops->list_groups(pctldev, group_selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev,
+ group_selector);
+ if (!strcmp(gname, pin_group)) {
+ dev_dbg(pctldev->dev,
+ "found group selector %u for %s\n",
+ group_selector,
+ pin_group);
+ return group_selector;
+ }
+
+ group_selector++;
+ }
+
+ dev_err(pctldev->dev, "does not have pin group %s\n",
+ pin_group);
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_DEBUG_FS
static int pinctrl_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- unsigned pin;
+ unsigned i, pin;
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
- seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
- /* The highest pin number need to be included in the loop, thus <= */
- for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
struct pin_desc *desc;
+ pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
/* Pin space may be sparse */
if (desc == NULL)
@@ -363,8 +424,11 @@ static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
/* Loop over the ranges */
mutex_lock(&pctldev->gpio_ranges_lock);
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
- seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
- range->base, (range->base + range->npins - 1));
+ seq_printf(s, "%u: %s GPIOS [%u - %u] PINS [%u - %u]\n",
+ range->id, range->name,
+ range->base, (range->base + range->npins - 1),
+ range->pin_base,
+ (range->pin_base + range->npins - 1));
}
mutex_unlock(&pctldev->gpio_ranges_lock);
@@ -375,11 +439,15 @@ static int pinctrl_devices_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev;
- seq_puts(s, "name [pinmux]\n");
+ seq_puts(s, "name [pinmux] [pinconf]\n");
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node) {
seq_printf(s, "%s ", pctldev->desc->name);
if (pctldev->desc->pmxops)
+ seq_puts(s, "yes ");
+ else
+ seq_puts(s, "no ");
+ if (pctldev->desc->confops)
seq_puts(s, "yes");
else
seq_puts(s, "no");
@@ -444,11 +512,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
{
static struct dentry *device_root;
- device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+ device_root = debugfs_create_dir(dev_name(pctldev->dev),
debugfs_root);
if (IS_ERR(device_root) || !device_root) {
pr_warn("failed to create debugfs directory for %s\n",
- dev_name(&pctldev->dev));
+ dev_name(pctldev->dev));
return;
}
debugfs_create_file("pins", S_IFREG | S_IRUGO,
@@ -458,6 +526,7 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
pinmux_init_device_debugfs(device_root, pctldev);
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_init_debugfs(void)
@@ -495,7 +564,6 @@ static void pinctrl_init_debugfs(void)
struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data)
{
- static atomic_t pinmux_no = ATOMIC_INIT(0);
struct pinctrl_dev *pctldev;
int ret;
@@ -514,6 +582,16 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
}
}
+ /* If we're implementing pinconfig, check the ops for sanity */
+ if (pctldesc->confops) {
+ ret = pinconf_check_ops(pctldesc->confops);
+ if (ret) {
+ pr_err("%s pin config ops lacks necessary functions\n",
+ pctldesc->name);
+ return NULL;
+ }
+ }
+
pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
if (pctldev == NULL)
return NULL;
@@ -526,18 +604,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
spin_lock_init(&pctldev->pin_desc_tree_lock);
INIT_LIST_HEAD(&pctldev->gpio_ranges);
mutex_init(&pctldev->gpio_ranges_lock);
-
- /* Register device */
- pctldev->dev.parent = dev;
- dev_set_name(&pctldev->dev, "pinctrl.%d",
- atomic_inc_return(&pinmux_no) - 1);
- pctldev->dev.release = pinctrl_dev_release;
- ret = device_register(&pctldev->dev);
- if (ret != 0) {
- pr_err("error in device registration\n");
- goto out_reg_dev_err;
- }
- dev_set_drvdata(&pctldev->dev, pctldev);
+ pctldev->dev = dev;
/* Register all the pins */
pr_debug("try to register %d pins on %s...\n",
@@ -547,7 +614,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pr_err("error during pin registration\n");
pinctrl_free_pindescs(pctldev, pctldesc->pins,
pctldesc->npins);
- goto out_reg_pins_err;
+ goto out_err;
}
pinctrl_init_device_debugfs(pctldev);
@@ -557,10 +624,8 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pinmux_hog_maps(pctldev);
return pctldev;
-out_reg_pins_err:
- device_del(&pctldev->dev);
-out_reg_dev_err:
- put_device(&pctldev->dev);
+out_err:
+ kfree(pctldev);
return NULL;
}
EXPORT_SYMBOL_GPL(pinctrl_register);
@@ -584,7 +649,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
/* Destroy descriptor tree */
pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
pctldev->desc->npins);
- device_unregister(&pctldev->dev);
+ kfree(pctldev);
}
EXPORT_SYMBOL_GPL(pinctrl_unregister);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 472fa1341cc0..177a3310547f 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -9,6 +9,10 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/pinctrl/pinconf.h>
+
+struct pinctrl_gpio_range;
+
/**
* struct pinctrl_dev - pin control class device
* @node: node to include this pin controller in the global pin controller list
@@ -34,7 +38,7 @@ struct pinctrl_dev {
spinlock_t pin_desc_tree_lock;
struct list_head gpio_ranges;
struct mutex gpio_ranges_lock;
- struct device dev;
+ struct device *dev;
struct module *owner;
void *driver_data;
#ifdef CONFIG_PINMUX
@@ -48,6 +52,7 @@ struct pinctrl_dev {
* @pctldev: corresponding pin control device
* @name: a name for the pin, e.g. the name of the pin/pad/finger on a
* datasheet or such
+ * @dynamic_name: if the name of this pin was dynamically allocated
* @lock: a lock to protect the descriptor structure
* @mux_requested: whether the pin is already requested by pinmux or not
* @mux_function: a named muxing function for the pin that will be passed to
@@ -56,6 +61,7 @@ struct pinctrl_dev {
struct pin_desc {
struct pinctrl_dev *pctldev;
const char *name;
+ bool dynamic_name;
spinlock_t lock;
/* These fields only added when supporting pinmux drivers */
#ifdef CONFIG_PINMUX
@@ -65,7 +71,10 @@ struct pin_desc {
struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
const char *dev_name);
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin);
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin);
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
int pinctrl_get_device_gpio_range(unsigned gpio,
struct pinctrl_dev **outdev,
struct pinctrl_gpio_range **outrange);
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group);
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
new file mode 100644
index 000000000000..1259872b0a1d
--- /dev/null
+++ b/drivers/pinctrl/pinconf.c
@@ -0,0 +1,326 @@
+/*
+ * Core driver for the pin config portions of the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinconfig core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include "core.h"
+#include "pinconf.h"
+
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (!ops || !ops->pin_config_get) {
+ dev_err(pctldev->dev, "cannot get pin configuration, missing "
+ "pin_config_get() function in driver\n");
+ return -EINVAL;
+ }
+
+ return ops->pin_config_get(pctldev, pin, config);
+}
+
+/**
+ * pin_config_get() - get the configuration of a single pin parameter
+ * @dev_name: name of the pin controller device for this pin
+ * @name: name of the pin to get the config for
+ * @config: the config pointed to by this argument will be filled in with the
+ * current pin state, it can be used directly by drivers as a numeral, or
+ * it can be dereferenced to any struct.
+ */
+int pin_config_get(const char *dev_name, const char *name,
+ unsigned long *config)
+{
+ struct pinctrl_dev *pctldev;
+ int pin;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+
+ pin = pin_get_from_name(pctldev, name);
+ if (pin < 0)
+ return pin;
+
+ return pin_config_get_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_get);
+
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+ int ret;
+
+ if (!ops || !ops->pin_config_set) {
+ dev_err(pctldev->dev, "cannot configure pin, missing "
+ "config function in driver\n");
+ return -EINVAL;
+ }
+
+ ret = ops->pin_config_set(pctldev, pin, config);
+ if (ret) {
+ dev_err(pctldev->dev,
+ "unable to set pin configuration on pin %d\n", pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pin_config_set() - set the configuration of a single pin parameter
+ * @dev_name: name of pin controller device for this pin
+ * @name: name of the pin to set the config for
+ * @config: the config in this argument will contain the desired pin state, it
+ * can be used directly by drivers as a numeral, or it can be dereferenced
+ * to any struct.
+ */
+int pin_config_set(const char *dev_name, const char *name,
+ unsigned long config)
+{
+ struct pinctrl_dev *pctldev;
+ int pin;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+
+ pin = pin_get_from_name(pctldev, name);
+ if (pin < 0)
+ return pin;
+
+ return pin_config_set_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_set);
+
+int pin_config_group_get(const char *dev_name, const char *pin_group,
+ unsigned long *config)
+{
+ struct pinctrl_dev *pctldev;
+ const struct pinconf_ops *ops;
+ int selector;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+ ops = pctldev->desc->confops;
+
+ if (!ops || !ops->pin_config_group_get) {
+ dev_err(pctldev->dev, "cannot get configuration for pin "
+ "group, missing group config get function in "
+ "driver\n");
+ return -EINVAL;
+ }
+
+ selector = pinctrl_get_group_selector(pctldev, pin_group);
+ if (selector < 0)
+ return selector;
+
+ return ops->pin_config_group_get(pctldev, selector, config);
+}
+EXPORT_SYMBOL(pin_config_group_get);
+
+
+int pin_config_group_set(const char *dev_name, const char *pin_group,
+ unsigned long config)
+{
+ struct pinctrl_dev *pctldev;
+ const struct pinconf_ops *ops;
+ const struct pinctrl_ops *pctlops;
+ int selector;
+ const unsigned *pins;
+ unsigned num_pins;
+ int ret;
+ int i;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+ ops = pctldev->desc->confops;
+ pctlops = pctldev->desc->pctlops;
+
+ if (!ops || (!ops->pin_config_group_set && !ops->pin_config_set)) {
+ dev_err(pctldev->dev, "cannot configure pin group, missing "
+ "config function in driver\n");
+ return -EINVAL;
+ }
+
+ selector = pinctrl_get_group_selector(pctldev, pin_group);
+ if (selector < 0)
+ return selector;
+
+ ret = pctlops->get_group_pins(pctldev, selector, &pins, &num_pins);
+ if (ret) {
+ dev_err(pctldev->dev, "cannot configure pin group, error "
+ "getting pins\n");
+ return ret;
+ }
+
+ /*
+ * If the pin controller supports handling entire groups we use that
+ * capability.
+ */
+ if (ops->pin_config_group_set) {
+ ret = ops->pin_config_group_set(pctldev, selector, config);
+ /*
+ * If the pin controller prefer that a certain group be handled
+ * pin-by-pin as well, it returns -EAGAIN.
+ */
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ /*
+ * If the controller cannot handle entire groups, we configure each pin
+ * individually.
+ */
+ if (!ops->pin_config_set)
+ return 0;
+
+ for (i = 0; i < num_pins; i++) {
+ ret = ops->pin_config_set(pctldev, pins[i], config);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pin_config_group_set);
+
+int pinconf_check_ops(const struct pinconf_ops *ops)
+{
+ /* We must be able to read out pin status */
+ if (!ops->pin_config_get && !ops->pin_config_group_get)
+ return -EINVAL;
+ /* We have to be able to config the pins in SOME way */
+ if (!ops->pin_config_set && !ops->pin_config_group_set)
+ return -EINVAL;
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
+ struct seq_file *s, int pin)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (ops && ops->pin_config_dbg_show)
+ ops->pin_config_dbg_show(pctldev, s, pin);
+}
+
+static int pinconf_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ unsigned i, pin;
+
+ seq_puts(s, "Pin config settings per pin\n");
+ seq_puts(s, "Format: pin (name): pinmux setting array\n");
+
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; pin < pctldev->desc->npins; i++) {
+ struct pin_desc *desc;
+
+ pin = pctldev->desc->pins[i].number;
+ desc = pin_desc_get(pctldev, pin);
+ /* Skip if we cannot search the pin */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s):", pin,
+ desc->name ? desc->name : "unnamed");
+
+ pinconf_dump_pin(pctldev, s, pin);
+
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+
+static void pinconf_dump_group(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned selector,
+ const char *gname)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (ops && ops->pin_config_group_dbg_show)
+ ops->pin_config_group_dbg_show(pctldev, s, selector);
+}
+
+static int pinconf_groups_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+ unsigned selector = 0;
+
+ if (!ops || !ops->pin_config_group_get)
+ return 0;
+
+ seq_puts(s, "Pin config settings per pin group\n");
+ seq_puts(s, "Format: group (name): pinmux setting array\n");
+
+ while (pctlops->list_groups(pctldev, selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev, selector);
+
+ seq_printf(s, "%u (%s):", selector, gname);
+ pinconf_dump_group(pctldev, s, selector, gname);
+ selector++;
+ }
+
+ return 0;
+}
+
+static int pinconf_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_pins_show, inode->i_private);
+}
+
+static int pinconf_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_groups_show, inode->i_private);
+}
+
+static const struct file_operations pinconf_pins_ops = {
+ .open = pinconf_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinconf_groups_ops = {
+ .open = pinconf_groups_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+ debugfs_create_file("pinconf-pins", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinconf_pins_ops);
+ debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinconf_groups_ops);
+}
+
+#endif
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
new file mode 100644
index 000000000000..e7dc6165032a
--- /dev/null
+++ b/drivers/pinctrl/pinconf.h
@@ -0,0 +1,36 @@
+/*
+ * Internal interface between the core pin control system and the
+ * pin config portions
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifdef CONFIG_PINCONF
+
+int pinconf_check_ops(const struct pinconf_ops *ops);
+void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev);
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config);
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config);
+
+#else
+
+static inline int pinconf_check_ops(const struct pinconf_ops *ops)
+{
+ return 0;
+}
+
+static inline void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+}
+
+#endif
diff --git a/drivers/gpio/gpio-u300.c b/drivers/pinctrl/pinctrl-coh901.c
index 4035778852b0..69fb7072a23e 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/pinctrl/pinmux.h>
#include <mach/gpio-u300.h>
/*
@@ -351,6 +352,24 @@ static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
return container_of(chip, struct u300_gpio, chip);
}
+static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ /*
+ * Map back to global GPIO space and request muxing, the direction
+ * parameter does not matter for this controller.
+ */
+ int gpio = chip->base + offset;
+
+ return pinmux_request_gpio(gpio);
+}
+
+static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ pinmux_free_gpio(gpio);
+}
+
static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
@@ -483,6 +502,8 @@ static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
static struct gpio_chip u300_gpio_chip = {
.label = "u300-gpio-chip",
.owner = THIS_MODULE,
+ .request = u300_gpio_request,
+ .free = u300_gpio_free,
.get = u300_gpio_get,
.set = u300_gpio_set,
.direction_input = u300_gpio_direction_input,
diff --git a/drivers/pinctrl/pinmux-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index d76cae620956..6b3534cc051a 100644
--- a/drivers/pinctrl/pinmux-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -463,7 +463,7 @@ static const struct sirfsoc_padmux spi1_padmux = {
.funcval = BIT(8),
};
-static const unsigned spi1_pins[] = { 33, 34, 35, 36 };
+static const unsigned spi1_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
{
@@ -1067,7 +1067,7 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
spmx = pinctrl_dev_get_drvdata(pmxdev);
muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
- muxval = muxval | (1 << offset);
+ muxval = muxval | (1 << (offset - range->pin_base));
writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
return 0;
@@ -1086,7 +1086,6 @@ static struct pinctrl_desc sirfsoc_pinmux_desc = {
.name = DRIVER_NAME,
.pins = sirfsoc_pads,
.npins = ARRAY_SIZE(sirfsoc_pads),
- .maxpin = SIRFSOC_NUM_PADS - 1,
.pctlops = &sirfsoc_pctrl_ops,
.pmxops = &sirfsoc_pinmux_ops,
.owner = THIS_MODULE,
@@ -1100,21 +1099,25 @@ static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = {
.name = "sirfsoc-gpio*",
.id = 0,
.base = 0,
+ .pin_base = 0,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 1,
.base = 32,
+ .pin_base = 32,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 2,
.base = 64,
+ .pin_base = 64,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 3,
.base = 96,
+ .pin_base = 96,
.npins = 19,
},
};
diff --git a/drivers/pinctrl/pinmux-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 4858a64131f8..c8d02f1c2b5e 100644
--- a/drivers/pinctrl/pinmux-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -940,20 +940,23 @@ static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
{
u16 regval, val, mask;
int i;
+ const struct u300_pmx_mask *upmx_mask;
+ upmx_mask = u300_pmx_functions[selector].mask;
for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
if (enable)
- val = u300_pmx_functions[selector].mask->bits;
+ val = upmx_mask->bits;
else
val = 0;
- mask = u300_pmx_functions[selector].mask->mask;
+ mask = upmx_mask->mask;
if (mask != 0) {
regval = readw(upmx->virtbase + u300_pmx_registers[i]);
regval &= ~mask;
regval |= val;
writew(regval, upmx->virtbase + u300_pmx_registers[i]);
}
+ upmx_mask++;
}
}
@@ -1016,21 +1019,35 @@ static struct pinmux_ops u300_pmx_ops = {
};
/*
- * FIXME: this will be set to sane values as this driver engulfs
- * drivers/gpio/gpio-u300.c and we really know this stuff.
+ * GPIO ranges handled by the application-side COH901XXX GPIO controller
+ * Very many pins can be converted into GPIO pins, but we only list those
+ * that are useful in practice to cut down on tables.
*/
-static struct pinctrl_gpio_range u300_gpio_range = {
- .name = "COH901*",
- .id = 0,
- .base = 0,
- .npins = 64,
+#define U300_GPIO_RANGE(a, b, c) { .name = "COH901XXX", .id = a, .base= a, \
+ .pin_base = b, .npins = c }
+
+static struct pinctrl_gpio_range u300_gpio_ranges[] = {
+ U300_GPIO_RANGE(10, 426, 1),
+ U300_GPIO_RANGE(11, 180, 1),
+ U300_GPIO_RANGE(12, 165, 1), /* MS/MMC card insertion */
+ U300_GPIO_RANGE(13, 179, 1),
+ U300_GPIO_RANGE(14, 178, 1),
+ U300_GPIO_RANGE(16, 194, 1),
+ U300_GPIO_RANGE(17, 193, 1),
+ U300_GPIO_RANGE(18, 192, 1),
+ U300_GPIO_RANGE(19, 191, 1),
+ U300_GPIO_RANGE(20, 186, 1),
+ U300_GPIO_RANGE(21, 185, 1),
+ U300_GPIO_RANGE(22, 184, 1),
+ U300_GPIO_RANGE(23, 183, 1),
+ U300_GPIO_RANGE(24, 182, 1),
+ U300_GPIO_RANGE(25, 181, 1),
};
static struct pinctrl_desc u300_pmx_desc = {
.name = DRIVER_NAME,
.pins = u300_pads,
.npins = ARRAY_SIZE(u300_pads),
- .maxpin = U300_NUM_PADS-1,
.pctlops = &u300_pctrl_ops,
.pmxops = &u300_pmx_ops,
.owner = THIS_MODULE,
@@ -1038,9 +1055,10 @@ static struct pinctrl_desc u300_pmx_desc = {
static int __init u300_pmx_probe(struct platform_device *pdev)
{
- int ret;
struct u300_pmx *upmx;
struct resource *res;
+ int ret;
+ int i;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1077,7 +1095,8 @@ static int __init u300_pmx_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_range);
+ for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+ pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
platform_set_drvdata(pdev, upmx);
@@ -1099,8 +1118,10 @@ out_no_resource:
static int __exit u300_pmx_remove(struct platform_device *pdev)
{
struct u300_pmx *upmx = platform_get_drvdata(pdev);
+ int i;
- pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_range);
+ for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+ pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
pinctrl_unregister(upmx->pctl);
iounmap(upmx->virtbase);
release_mem_region(upmx->phybase, upmx->physize);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index a5467f8709e9..a76a348321bb 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -32,12 +33,8 @@
static DEFINE_MUTEX(pinmux_list_mutex);
static LIST_HEAD(pinmux_list);
-/* List of pinmux hogs */
-static DEFINE_MUTEX(pinmux_hoglist_mutex);
-static LIST_HEAD(pinmux_hoglist);
-
-/* Global pinmux maps, we allow one set only */
-static struct pinmux_map const *pinmux_maps;
+/* Global pinmux maps */
+static struct pinmux_map *pinmux_maps;
static unsigned pinmux_maps_num;
/**
@@ -98,41 +95,35 @@ struct pinmux_hog {
* @function: a functional name to give to this pin, passed to the driver
* so it knows what function to mux in, e.g. the string "gpioNN"
* means that you want to mux in the pin for use as GPIO number NN
- * @gpio: if this request concerns a single GPIO pin
* @gpio_range: the range matching the GPIO pin if this is a request for a
* single GPIO pin
*/
static int pin_request(struct pinctrl_dev *pctldev,
- int pin, const char *function, bool gpio,
+ int pin, const char *function,
struct pinctrl_gpio_range *gpio_range)
{
struct pin_desc *desc;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
int status = -EINVAL;
- dev_dbg(&pctldev->dev, "request pin %d for %s\n", pin, function);
-
- if (!pin_is_valid(pctldev, pin)) {
- dev_err(&pctldev->dev, "pin is invalid\n");
- return -EINVAL;
- }
-
- if (!function) {
- dev_err(&pctldev->dev, "no function name given\n");
- return -EINVAL;
- }
+ dev_dbg(pctldev->dev, "request pin %d for %s\n", pin, function);
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin is not registered so it cannot be requested\n");
goto out;
}
+ if (!function) {
+ dev_err(pctldev->dev, "no function name given\n");
+ return -EINVAL;
+ }
+
spin_lock(&desc->lock);
if (desc->mux_function) {
spin_unlock(&desc->lock);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin already requested\n");
goto out;
}
@@ -141,7 +132,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
/* Let each pin increase references to this module */
if (!try_module_get(pctldev->owner)) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not increase module refcount for pin %d\n",
pin);
status = -EINVAL;
@@ -152,7 +143,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
* If there is no kind of request function for the pin we just assume
* we got it by default and proceed.
*/
- if (gpio && ops->gpio_request_enable)
+ if (gpio_range && ops->gpio_request_enable)
/* This requests and enables a single GPIO pin */
status = ops->gpio_request_enable(pctldev, gpio_range, pin);
else if (ops->request)
@@ -161,7 +152,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
status = 0;
if (status)
- dev_err(&pctldev->dev, "->request on device %s failed "
+ dev_err(pctldev->dev, "->request on device %s failed "
"for pin %d\n",
pctldev->desc->name, pin);
out_free_pin:
@@ -172,7 +163,7 @@ out_free_pin:
}
out:
if (status)
- dev_err(&pctldev->dev, "pin-%d (%s) status %d\n",
+ dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
pin, function ? : "?", status);
return status;
@@ -182,34 +173,52 @@ out:
* pin_free() - release a single muxed in pin so something else can be muxed
* @pctldev: pin controller device handling this pin
* @pin: the pin to free
- * @free_func: whether to free the pin's assigned function name string
+ * @gpio_range: the range matching the GPIO pin if this is a request for a
+ * single GPIO pin
+ *
+ * This function returns a pointer to the function name in use. This is used
+ * for callers that dynamically allocate a function name so it can be freed
+ * once the pin is free. This is done for GPIO request functions.
*/
-static void pin_free(struct pinctrl_dev *pctldev, int pin, int free_func)
+static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ struct pinctrl_gpio_range *gpio_range)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
struct pin_desc *desc;
+ const char *func;
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin is not registered so it cannot be freed\n");
- return;
+ return NULL;
}
- if (ops->free)
+ /*
+ * If there is no kind of request function for the pin we just assume
+ * we got it by default and proceed.
+ */
+ if (gpio_range && ops->gpio_disable_free)
+ ops->gpio_disable_free(pctldev, gpio_range, pin);
+ else if (ops->free)
ops->free(pctldev, pin);
spin_lock(&desc->lock);
- if (free_func)
- kfree(desc->mux_function);
+ func = desc->mux_function;
desc->mux_function = NULL;
spin_unlock(&desc->lock);
module_put(pctldev->owner);
+
+ return func;
}
/**
* pinmux_request_gpio() - request a single pin to be muxed in as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_request() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed in.
*/
int pinmux_request_gpio(unsigned gpio)
{
@@ -225,7 +234,7 @@ int pinmux_request_gpio(unsigned gpio)
return -EINVAL;
/* Convert to the pin controllers number space */
- pin = gpio - range->base;
+ pin = gpio - range->base + range->pin_base;
/* Conjure some name stating what chip and pin this is taken by */
snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
@@ -234,7 +243,7 @@ int pinmux_request_gpio(unsigned gpio)
if (!function)
return -EINVAL;
- ret = pin_request(pctldev, pin, function, true, range);
+ ret = pin_request(pctldev, pin, function, range);
if (ret < 0)
kfree(function);
@@ -245,6 +254,10 @@ EXPORT_SYMBOL_GPL(pinmux_request_gpio);
/**
* pinmux_free_gpio() - free a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_free() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed out.
*/
void pinmux_free_gpio(unsigned gpio)
{
@@ -252,53 +265,108 @@ void pinmux_free_gpio(unsigned gpio)
struct pinctrl_gpio_range *range;
int ret;
int pin;
+ const char *func;
ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
if (ret)
return;
/* Convert to the pin controllers number space */
- pin = gpio - range->base;
+ pin = gpio - range->base + range->pin_base;
- pin_free(pctldev, pin, true);
+ func = pin_free(pctldev, pin, range);
+ kfree(func);
}
EXPORT_SYMBOL_GPL(pinmux_free_gpio);
+static int pinmux_gpio_direction(unsigned gpio, bool input)
+{
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range *range;
+ const struct pinmux_ops *ops;
+ int ret;
+ int pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return ret;
+
+ ops = pctldev->desc->pmxops;
+
+ /* Convert to the pin controllers number space */
+ pin = gpio - range->base + range->pin_base;
+
+ if (ops->gpio_set_direction)
+ ret = ops->gpio_set_direction(pctldev, range, pin, input);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * pinmux_gpio_direction_input() - request a GPIO pin to go into input mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_input() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_input(unsigned gpio)
+{
+ return pinmux_gpio_direction(gpio, true);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_input);
+
+/**
+ * pinmux_gpio_direction_output() - request a GPIO pin to go into output mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_output() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_output(unsigned gpio)
+{
+ return pinmux_gpio_direction(gpio, false);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_output);
+
/**
* pinmux_register_mappings() - register a set of pinmux mappings
- * @maps: the pinmux mappings table to register
+ * @maps: the pinmux mappings table to register, this should be marked with
+ * __initdata so it can be discarded after boot, this function will
+ * perform a shallow copy for the mapping entries.
* @num_maps: the number of maps in the mapping table
*
* Only call this once during initialization of your machine, the function is
* tagged as __init and won't be callable after init has completed. The map
* passed into this function will be owned by the pinmux core and cannot be
- * free:d.
+ * freed.
*/
int __init pinmux_register_mappings(struct pinmux_map const *maps,
unsigned num_maps)
{
+ void *tmp_maps;
int i;
- if (pinmux_maps != NULL) {
- pr_err("pinmux mappings already registered, you can only "
- "register one set of maps\n");
- return -EINVAL;
- }
-
pr_debug("add %d pinmux maps\n", num_maps);
+
+ /* First sanity check the new mapping */
for (i = 0; i < num_maps; i++) {
- /* Sanity check the mapping */
if (!maps[i].name) {
pr_err("failed to register map %d: "
"no map name given\n", i);
return -EINVAL;
}
+
if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
pr_err("failed to register map %s (%d): "
"no pin control device given\n",
maps[i].name, i);
return -EINVAL;
}
+
if (!maps[i].function) {
pr_err("failed to register map %s (%d): "
"no function ID given\n", maps[i].name, i);
@@ -315,9 +383,30 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps,
maps[i].function);
}
- pinmux_maps = maps;
- pinmux_maps_num = num_maps;
+ /*
+ * Make a copy of the map array - string pointers will end up in the
+ * kernel const section anyway so these do not need to be deep copied.
+ */
+ if (!pinmux_maps_num) {
+ /* On first call, just copy them */
+ tmp_maps = kmemdup(maps,
+ sizeof(struct pinmux_map) * num_maps,
+ GFP_KERNEL);
+ if (!tmp_maps)
+ return -ENOMEM;
+ } else {
+ /* Subsequent calls, reallocate array to new size */
+ size_t oldsize = sizeof(struct pinmux_map) * pinmux_maps_num;
+ size_t newsize = sizeof(struct pinmux_map) * num_maps;
+
+ tmp_maps = krealloc(pinmux_maps, oldsize + newsize, GFP_KERNEL);
+ if (!tmp_maps)
+ return -ENOMEM;
+ memcpy((tmp_maps + oldsize), maps, newsize);
+ }
+ pinmux_maps = tmp_maps;
+ pinmux_maps_num += num_maps;
return 0;
}
@@ -345,14 +434,14 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
if (ret)
return ret;
- dev_dbg(&pctldev->dev, "requesting the %u pins from group %u\n",
+ dev_dbg(pctldev->dev, "requesting the %u pins from group %u\n",
num_pins, group_selector);
/* Try to allocate all pins in this group, one by one */
for (i = 0; i < num_pins; i++) {
- ret = pin_request(pctldev, pins[i], func, false, NULL);
+ ret = pin_request(pctldev, pins[i], func, NULL);
if (ret) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not get pin %d for function %s "
"on device %s - conflicting mux mappings?\n",
pins[i], func ? : "(undefined)",
@@ -360,7 +449,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
/* On error release all taken pins */
i--; /* this pin just failed */
for (; i >= 0; i--)
- pin_free(pctldev, pins[i], false);
+ pin_free(pctldev, pins[i], NULL);
return -ENODEV;
}
}
@@ -384,44 +473,13 @@ static void release_pins(struct pinctrl_dev *pctldev,
ret = pctlops->get_group_pins(pctldev, group_selector,
&pins, &num_pins);
if (ret) {
- dev_err(&pctldev->dev, "could not get pins to release for "
+ dev_err(pctldev->dev, "could not get pins to release for "
"group selector %d\n",
group_selector);
return;
}
for (i = 0; i < num_pins; i++)
- pin_free(pctldev, pins[i], false);
-}
-
-/**
- * pinmux_get_group_selector() - returns the group selector for a group
- * @pctldev: the pin controller handling the group
- * @pin_group: the pin group to look up
- */
-static int pinmux_get_group_selector(struct pinctrl_dev *pctldev,
- const char *pin_group)
-{
- const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- unsigned group_selector = 0;
-
- while (pctlops->list_groups(pctldev, group_selector) >= 0) {
- const char *gname = pctlops->get_group_name(pctldev,
- group_selector);
- if (!strcmp(gname, pin_group)) {
- dev_dbg(&pctldev->dev,
- "found group selector %u for %s\n",
- group_selector,
- pin_group);
- return group_selector;
- }
-
- group_selector++;
- }
-
- dev_err(&pctldev->dev, "does not have pin group %s\n",
- pin_group);
-
- return -EINVAL;
+ pin_free(pctldev, pins[i], NULL);
}
/**
@@ -465,9 +523,9 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
return ret;
if (num_groups < 1)
return -EINVAL;
- ret = pinmux_get_group_selector(pctldev, groups[0]);
+ ret = pinctrl_get_group_selector(pctldev, groups[0]);
if (ret < 0) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"function %s wants group %s but the pin "
"controller does not seem to have that group\n",
pmxops->get_function_name(pctldev, func_selector),
@@ -476,7 +534,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
}
if (num_groups > 1)
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"function %s support more than one group, "
"default-selecting first group %s (%d)\n",
pmxops->get_function_name(pctldev, func_selector),
@@ -486,13 +544,13 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
return ret;
}
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"check if we have pin group %s on controller %s\n",
pin_group, pinctrl_dev_get_name(pctldev));
- ret = pinmux_get_group_selector(pctldev, pin_group);
+ ret = pinctrl_get_group_selector(pctldev, pin_group);
if (ret < 0) {
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"%s does not support pin group %s with function %s\n",
pinctrl_dev_get_name(pctldev),
pin_group,
@@ -569,7 +627,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
*/
if (pmx->pctldev && pmx->pctldev != pctldev) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"different pin control devices given for device %s, "
"function %s\n",
devname,
@@ -592,7 +650,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
*/
if (pmx->func_selector != UINT_MAX &&
pmx->func_selector != func_selector) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"dual function defines in the map for device %s\n",
devname);
return -EINVAL;
@@ -698,7 +756,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name)
}
pr_debug("in map, found pctldev %s to handle function %s",
- dev_name(&pctldev->dev), map->function);
+ dev_name(pctldev->dev), map->function);
/*
@@ -874,7 +932,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
* without any problems, so then we can hog pinmuxes for
* all devices that just want a static pin mux at this point.
*/
- dev_err(&pctldev->dev, "map %s wants to hog a non-system "
+ dev_err(pctldev->dev, "map %s wants to hog a non-system "
"pinmux, this is not going to work\n", map->name);
return -EINVAL;
}
@@ -886,7 +944,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
pmx = pinmux_get(NULL, map->name);
if (IS_ERR(pmx)) {
kfree(hog);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not get the %s pinmux mapping for hogging\n",
map->name);
return PTR_ERR(pmx);
@@ -896,7 +954,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
if (ret) {
pinmux_put(pmx);
kfree(hog);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not enable the %s pinmux mapping for hogging\n",
map->name);
return ret;
@@ -905,7 +963,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
hog->map = map;
hog->pmx = pmx;
- dev_info(&pctldev->dev, "hogged map %s, function %s\n", map->name,
+ dev_info(pctldev->dev, "hogged map %s, function %s\n", map->name,
map->function);
mutex_lock(&pctldev->pinmux_hogs_lock);
list_add(&hog->node, &pctldev->pinmux_hogs);
@@ -924,7 +982,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
*/
int pinmux_hog_maps(struct pinctrl_dev *pctldev)
{
- struct device *dev = &pctldev->dev;
+ struct device *dev = pctldev->dev;
const char *devname = dev_name(dev);
int ret;
int i;
@@ -948,7 +1006,7 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev)
}
/**
- * pinmux_hog_maps() - unhog specific map entries on controller device
+ * pinmux_unhog_maps() - unhog specific map entries on controller device
* @pctldev: the pin control device to unhog entries on
*/
void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
@@ -1005,18 +1063,19 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
static int pinmux_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
- unsigned pin;
+ unsigned i, pin;
seq_puts(s, "Pinmux settings per pin\n");
seq_puts(s, "Format: pin (name): pinmuxfunction\n");
- /* The highest pin number need to be included in the loop, thus <= */
- for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
struct pin_desc *desc;
+ pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
- /* Pin space may be sparse */
+ /* Skip if we cannot search the pin */
if (desc == NULL)
continue;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7f43cf86d776..f995e6e2f78c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -639,7 +639,7 @@ config ACPI_CMPC
config INTEL_SCU_IPC
bool "Intel SCU IPC Support"
- depends on X86_MRST
+ depends on X86_INTEL_MID
default y
---help---
IPC is used to bridge the communications between kernel and SCU on
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index edaccad9b5bf..b7944f903886 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1477,7 +1477,7 @@ static struct attribute *asus_attributes[] = {
NULL
};
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index d1049ee3c9e8..72d731c21d45 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -992,7 +992,7 @@ static struct attribute *hwmon_attributes[] = {
NULL
};
-static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1357,7 +1357,7 @@ static struct attribute *platform_attributes[] = {
NULL
};
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index d9312b3073e5..6f966d6c062b 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1053,7 +1053,7 @@ static const struct file_operations disp_proc_fops = {
};
static int
-asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
+asus_proc_add(char *name, const struct file_operations *proc_fops, umode_t mode,
struct acpi_device *device)
{
struct proc_dir_entry *proc;
@@ -1072,7 +1072,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
- mode_t mode;
+ umode_t mode;
if ((asus_uid == 0) && (asus_gid == 0)) {
mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 8877b836d27c..d96734478324 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -189,7 +189,7 @@ struct compal_data{
/* =============== */
/* General globals */
/* =============== */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a36addf106a0..ac902f7a9baa 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -368,7 +368,7 @@ static struct attribute *ideapad_attributes[] = {
NULL
};
-static mode_t ideapad_is_visible(struct kobject *kobj,
+static umode_t ideapad_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index abddc83e9fd7..3271ac85115e 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -389,7 +389,7 @@ static ssize_t bios_enabled_show(struct device *dev,
return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled");
}
-static int intel_menlow_add_one_attribute(char *name, int mode, void *show,
+static int intel_menlow_add_one_attribute(char *name, umode_t mode, void *show,
void *store, struct device *dev,
acpi_handle handle)
{
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 7f88c7923fc6..6ee0b5c90933 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -95,7 +95,7 @@
#define OT_EC_BL_CONTROL_ON_DATA 0x1A
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 48870e504231..f00d0d1e0653 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f204643c5052..bb5132128b33 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -89,7 +89,7 @@ static int msi_laptop_resume(struct platform_device *device);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 09e26bfd4643..fd73ea89b857 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -228,12 +228,12 @@ static struct platform_device *sdev;
static struct rfkill *rfk;
static bool has_stepping_quirk;
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force,
"Disable the DMI check and forces the driver to be loaded");
-static int debug;
+static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7b828680b21d..ea0c6075b720 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -297,7 +297,7 @@ struct ibm_init_struct {
char param[32];
int (*init) (struct ibm_init_struct *);
- mode_t base_procfs_mode;
+ umode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -378,13 +378,13 @@ static unsigned int bright_maxlvl; /* 0 = unknown */
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
static int dbg_wlswemul;
-static int tpacpi_wlsw_emulstate;
+static bool tpacpi_wlsw_emulstate;
static int dbg_bluetoothemul;
-static int tpacpi_bluetooth_emulstate;
+static bool tpacpi_bluetooth_emulstate;
static int dbg_wwanemul;
-static int tpacpi_wwan_emulstate;
+static bool tpacpi_wwan_emulstate;
static int dbg_uwbemul;
-static int tpacpi_uwb_emulstate;
+static bool tpacpi_uwb_emulstate;
#endif
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
u32 poll_mask, event_mask;
unsigned int si, so;
unsigned long t;
- unsigned int change_detector, must_reset;
+ unsigned int change_detector;
unsigned int poll_freq;
+ bool was_frozen;
mutex_lock(&hotkey_thread_mutex);
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
t = 100; /* should never happen... */
}
t = msleep_interruptible(t);
- if (unlikely(kthread_should_stop()))
+ if (unlikely(kthread_freezable_should_stop(&was_frozen)))
break;
- must_reset = try_to_freeze();
- if (t > 0 && !must_reset)
+
+ if (t > 0 && !was_frozen)
continue;
mutex_lock(&hotkey_thread_data_mutex);
- if (must_reset || hotkey_config_change != change_detector) {
+ if (was_frozen || hotkey_config_change != change_detector) {
/* forget old state on thaw or config change */
si = so;
t = 0;
@@ -2528,10 +2529,6 @@ exit:
static void hotkey_poll_stop_sync(void)
{
if (tpacpi_hotkey_task) {
- if (frozen(tpacpi_hotkey_task) ||
- freezing(tpacpi_hotkey_task))
- thaw_process(tpacpi_hotkey_task);
-
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
mutex_lock(&hotkey_thread_mutex);
@@ -6447,7 +6444,7 @@ static struct ibm_struct brightness_driver_data = {
static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
static char *alsa_id = "ThinkPadEC";
-static int alsa_enable = SNDRV_DEFAULT_ENABLE1;
+static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
struct tpacpi_alsa_data {
struct snd_card *card;
@@ -6490,7 +6487,7 @@ static enum tpacpi_volume_access_mode volume_mode =
TPACPI_VOL_MODE_MAX;
static enum tpacpi_volume_capabilities volume_capabilities;
-static int volume_control_allowed;
+static bool volume_control_allowed;
/*
* Used to syncronize writers to TP_EC_AUDIO and
@@ -7268,7 +7265,7 @@ enum fan_control_commands {
* and also watchdog cmd */
};
-static int fan_control_allowed;
+static bool fan_control_allowed;
static enum fan_status_access_mode fan_status_access_mode;
static enum fan_control_access_mode fan_control_access_mode;
@@ -8440,7 +8437,7 @@ static struct proc_dir_entry *proc_dir;
* Module and infrastructure proble, init and exit handling
*/
-static int force_load;
+static bool force_load;
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
static const char * __init str_supported(int is_supported)
@@ -8542,7 +8539,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- mode_t mode = iibm->base_procfs_mode;
+ umode_t mode = iibm->base_procfs_mode;
if (!mode)
mode = S_IRUGO;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471d..dcdc1f4a4624 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
int illumination_supported:1;
int video_supported:1;
int fan_supported:1;
+ int system_event_supported:1;
struct mutex mutex;
};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
u32 hci_result;
u32 value;
- if (!dev->key_event_valid) {
+ if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
/* enable event fifo */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ dev->system_event_supported = 1;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
+ int retries = 3;
- if (event != 0x80)
+ if (!dev->system_event_supported || event != 0x80)
return;
+
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ switch (hci_result) {
+ case HCI_SUCCESS:
if (value == 0x100)
continue;
/* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
pr_info("Unknown key %x\n",
value);
}
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ break;
+ case HCI_NOT_SUPPORTED:
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
}
- } while (hci_result != HCI_EMPTY);
+ } while (retries && hci_result != HCI_EMPTY);
}
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index a134c26870b0..42a4dcc25f92 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -82,12 +82,12 @@ struct wmi_block {
#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
-static int debug_event;
+static bool debug_event;
module_param(debug_event, bool, 0444);
MODULE_PARM_DESC(debug_event,
"Log WMI Events [0/1]");
-static int debug_dump_wdg;
+static bool debug_dump_wdg;
module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index dfbd5a6cc58b..258fef272ea7 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
}
}
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd_nb.h>
+
+static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
+{
+ resource_size_t start, end;
+ struct pnp_resource *pnp_res;
+ struct resource *res;
+ struct resource mmconfig_res, *mmconfig;
+
+ mmconfig = amd_get_mmconfig_range(&mmconfig_res);
+ if (!mmconfig)
+ return;
+
+ list_for_each_entry(pnp_res, &dev->resources, list) {
+ res = &pnp_res->res;
+ if (res->end < mmconfig->start || res->start > mmconfig->end ||
+ (res->start == mmconfig->start && res->end == mmconfig->end))
+ continue;
+
+ dev_info(&dev->dev, FW_BUG
+ "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
+ res, mmconfig);
+ if (mmconfig->start < res->start) {
+ start = mmconfig->start;
+ end = res->start - 1;
+ pnp_add_mem_resource(dev, start, end, 0);
+ }
+ if (mmconfig->end > res->end) {
+ start = res->end + 1;
+ end = mmconfig->end;
+ pnp_add_mem_resource(dev, start, end, 0);
+ }
+ break;
+ }
+}
+#endif
+
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
/* PnP resources that might overlap PCI BARs */
{"PNP0c01", quirk_system_pci_resources},
{"PNP0c02", quirk_system_pci_resources},
+#ifdef CONFIG_AMD_NB
+ {"PNP0c01", quirk_amd_mmconfig_area},
+#endif
{""}
};
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 9f88641e67f9..3a8daf858742 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -116,12 +116,12 @@ config BATTERY_WM97XX
help
Say Y to enable support for battery measured by WM97xx aux port.
-config BATTERY_BQ20Z75
- tristate "TI BQ20z75 gas gauge"
+config BATTERY_SBS
+ tristate "SBS Compliant gas gauge"
depends on I2C
help
- Say Y to include support for TI BQ20z75 SBS-compliant
- gas gauge and protection IC.
+ Say Y to include support for SBS battery driver for SBS-compliant
+ gas gauges.
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
@@ -150,6 +150,14 @@ config BATTERY_DA9030
Say Y here to enable support for batteries charger integrated into
DA9030 PMIC.
+config BATTERY_DA9052
+ tristate "Dialog DA9052 Battery"
+ depends on PMIC_DA9052
+ depends on BROKEN
+ help
+ Say Y here to enable support for batteries charger integrated into
+ DA9052 PMIC.
+
config BATTERY_MAX17040
tristate "Maxim MAX17040 Fuel Gauge"
depends on I2C
@@ -226,6 +234,12 @@ config CHARGER_TWL4030
help
Say Y here to enable support for TWL4030 Battery Charge Interface.
+config CHARGER_LP8727
+ tristate "National Semiconductor LP8727 charger driver"
+ depends on I2C
+ help
+ Say Y here to enable support for LP8727 Charger Driver.
+
config CHARGER_GPIO
tristate "GPIO charger"
depends on GPIOLIB
@@ -236,6 +250,16 @@ config CHARGER_GPIO
This driver can be build as a module. If so, the module will be
called gpio-charger.
+config CHARGER_MANAGER
+ bool "Battery charger manager for multiple chargers"
+ depends on REGULATOR && RTC_CLASS
+ help
+ Say Y to enable charger-manager support, which allows multiple
+ chargers attached to a battery and multiple batteries attached to a
+ system. The charger-manager also can monitor charging status in
+ runtime and in suspend-to-RAM by waking up the system periodically
+ with help of suspend_again support.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b4af13dd8b66..e429008eaf10 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -22,9 +22,10 @@ obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
-obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
+obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
+obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
@@ -35,6 +36,8 @@ obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
+obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index bb16f5b7e167..98bf5676318d 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -54,13 +54,19 @@
#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
-#define BQ27000_FLAG_CHGS BIT(7)
+#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */
#define BQ27000_FLAG_FC BIT(5)
+#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */
#define BQ27500_REG_SOC 0x2C
#define BQ27500_REG_DCAP 0x3C /* Design capacity */
-#define BQ27500_FLAG_DSC BIT(0)
-#define BQ27500_FLAG_FC BIT(9)
+#define BQ27500_FLAG_DSG BIT(0) /* Discharging */
+#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
+#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
+#define BQ27500_FLAG_CHG BIT(8) /* Charging */
+#define BQ27500_FLAG_FC BIT(9) /* Fully charged */
#define BQ27000_RS 20 /* Resistor sense */
@@ -79,9 +85,8 @@ struct bq27x00_reg_cache {
int charge_full;
int cycle_count;
int capacity;
+ int energy;
int flags;
-
- int current_now;
};
struct bq27x00_device_info {
@@ -108,6 +113,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -149,7 +155,7 @@ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
if (rsoc < 0)
- dev_err(di->dev, "error reading relative State-of-Charge\n");
+ dev_dbg(di->dev, "error reading relative State-of-Charge\n");
return rsoc;
}
@@ -164,7 +170,8 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
charge = bq27x00_read(di, reg, false);
if (charge < 0) {
- dev_err(di->dev, "error reading nominal available capacity\n");
+ dev_dbg(di->dev, "error reading charge register %02x: %d\n",
+ reg, charge);
return charge;
}
@@ -208,7 +215,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
if (ilmd < 0) {
- dev_err(di->dev, "error reading initial last measured discharge\n");
+ dev_dbg(di->dev, "error reading initial last measured discharge\n");
return ilmd;
}
@@ -221,6 +228,50 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
}
/*
+ * Return the battery Available energy in µWh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
+{
+ int ae;
+
+ ae = bq27x00_read(di, BQ27x00_REG_AE, false);
+ if (ae < 0) {
+ dev_dbg(di->dev, "error reading available energy\n");
+ return ae;
+ }
+
+ if (di->chip == BQ27500)
+ ae *= 1000;
+ else
+ ae = ae * 29200 / BQ27000_RS;
+
+ return ae;
+}
+
+/*
+ * Return the battery temperature in tenths of degree Celsius
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
+{
+ int temp;
+
+ temp = bq27x00_read(di, BQ27x00_REG_TEMP, false);
+ if (temp < 0) {
+ dev_err(di->dev, "error reading temperature\n");
+ return temp;
+ }
+
+ if (di->chip == BQ27500)
+ temp -= 2731;
+ else
+ temp = ((temp * 5) - 5463) / 2;
+
+ return temp;
+}
+
+/*
* Return the battery Cycle count total
* Or < 0 if something fails.
*/
@@ -245,7 +296,8 @@ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
tval = bq27x00_read(di, reg, false);
if (tval < 0) {
- dev_err(di->dev, "error reading register %02x: %d\n", reg, tval);
+ dev_dbg(di->dev, "error reading time register %02x: %d\n",
+ reg, tval);
return tval;
}
@@ -262,25 +314,31 @@ static void bq27x00_update(struct bq27x00_device_info *di)
cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
if (cache.flags >= 0) {
- cache.capacity = bq27x00_battery_read_rsoc(di);
- cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false);
- cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
- cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
- cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
- cache.charge_full = bq27x00_battery_read_lmd(di);
+ if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) {
+ dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
+ cache.capacity = -ENODATA;
+ cache.energy = -ENODATA;
+ cache.time_to_empty = -ENODATA;
+ cache.time_to_empty_avg = -ENODATA;
+ cache.time_to_full = -ENODATA;
+ cache.charge_full = -ENODATA;
+ } else {
+ cache.capacity = bq27x00_battery_read_rsoc(di);
+ cache.energy = bq27x00_battery_read_energy(di);
+ cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
+ cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
+ cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
+ cache.charge_full = bq27x00_battery_read_lmd(di);
+ }
+ cache.temperature = bq27x00_battery_read_temperature(di);
cache.cycle_count = bq27x00_battery_read_cyct(di);
- if (!is_bq27500)
- cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
-
/* We only have to read charge design full once */
if (di->charge_design_full <= 0)
di->charge_design_full = bq27x00_battery_read_ilmd(di);
}
- /* Ignore current_now which is a snapshot of the current battery state
- * and is likely to be different even between two consecutive reads */
- if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) {
+ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) {
di->cache = cache;
power_supply_changed(&di->bat);
}
@@ -302,25 +360,6 @@ static void bq27x00_battery_poll(struct work_struct *work)
}
}
-
-/*
- * Return the battery temperature in tenths of degree Celsius
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
- union power_supply_propval *val)
-{
- if (di->cache.temperature < 0)
- return di->cache.temperature;
-
- if (di->chip == BQ27500)
- val->intval = di->cache.temperature - 2731;
- else
- val->intval = ((di->cache.temperature * 5) - 5463) / 2;
-
- return 0;
-}
-
/*
* Return the battery average current in µA
* Note that current can be negative signed as well
@@ -330,20 +369,20 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
int curr;
+ int flags;
- if (di->chip == BQ27500)
- curr = bq27x00_read(di, BQ27x00_REG_AI, false);
- else
- curr = di->cache.current_now;
-
- if (curr < 0)
+ curr = bq27x00_read(di, BQ27x00_REG_AI, false);
+ if (curr < 0) {
+ dev_err(di->dev, "error reading current\n");
return curr;
+ }
if (di->chip == BQ27500) {
/* bq27500 returns signed value */
val->intval = (int)((s16)curr) * 1000;
} else {
- if (di->cache.flags & BQ27000_FLAG_CHGS) {
+ flags = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+ if (flags & BQ27000_FLAG_CHGS) {
dev_dbg(di->dev, "negative current!\n");
curr = -curr;
}
@@ -362,10 +401,14 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
if (di->chip == BQ27500) {
if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
- else if (di->cache.flags & BQ27500_FLAG_DSC)
+ else if (di->cache.flags & BQ27500_FLAG_DSG)
status = POWER_SUPPLY_STATUS_DISCHARGING;
- else
+ else if (di->cache.flags & BQ27500_FLAG_CHG)
status = POWER_SUPPLY_STATUS_CHARGING;
+ else if (power_supply_am_i_supplied(&di->bat))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
} else {
if (di->cache.flags & BQ27000_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
@@ -382,50 +425,56 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
return 0;
}
-/*
- * Return the battery Voltage in milivolts
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
+static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
- int volt;
+ int level;
- volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
- if (volt < 0)
- return volt;
+ if (di->chip == BQ27500) {
+ if (di->cache.flags & BQ27500_FLAG_FC)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (di->cache.flags & BQ27500_FLAG_SOC1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (di->cache.flags & BQ27500_FLAG_SOCF)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ } else {
+ if (di->cache.flags & BQ27000_FLAG_FC)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_EDV1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (di->cache.flags & BQ27000_FLAG_EDVF)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ }
- val->intval = volt * 1000;
+ val->intval = level;
return 0;
}
/*
- * Return the battery Available energy in µWh
+ * Return the battery Voltage in milivolts
* Or < 0 if something fails.
*/
-static int bq27x00_battery_energy(struct bq27x00_device_info *di,
+static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
- int ae;
+ int volt;
- ae = bq27x00_read(di, BQ27x00_REG_AE, false);
- if (ae < 0) {
- dev_err(di->dev, "error reading available energy\n");
- return ae;
+ volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
+ if (volt < 0) {
+ dev_err(di->dev, "error reading voltage\n");
+ return volt;
}
- if (di->chip == BQ27500)
- ae *= 1000;
- else
- ae = ae * 29200 / BQ27000_RS;
-
- val->intval = ae;
+ val->intval = volt * 1000;
return 0;
}
-
static int bq27x00_simple_value(int value,
union power_supply_propval *val)
{
@@ -473,8 +522,11 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
ret = bq27x00_simple_value(di->cache.capacity, val);
break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ ret = bq27x00_battery_capacity_level(di, val);
+ break;
case POWER_SUPPLY_PROP_TEMP:
- ret = bq27x00_battery_temperature(di, val);
+ ret = bq27x00_simple_value(di->cache.temperature, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
ret = bq27x00_simple_value(di->cache.time_to_empty, val);
@@ -501,7 +553,7 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
ret = bq27x00_simple_value(di->cache.cycle_count, val);
break;
case POWER_SUPPLY_PROP_ENERGY_NOW:
- ret = bq27x00_battery_energy(di, val);
+ ret = bq27x00_simple_value(di->cache.energy, val);
break;
default:
return -EINVAL;
@@ -546,6 +598,14 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
{
+ /*
+ * power_supply_unregister call bq27x00_battery_get_property which
+ * call bq27x00_battery_poll.
+ * Make sure that bq27x00_battery_poll will not call
+ * schedule_delayed_work again after unregister (which cause OOPS).
+ */
+ poll_interval = 0;
+
cancel_delayed_work_sync(&di->work);
power_supply_unregister(&di->bat);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
new file mode 100644
index 000000000000..0378d019efae
--- /dev/null
+++ b/drivers/power/charger-manager.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This driver enables to monitor battery health and control charger
+ * during suspend-to-mem.
+ * Charger manager depends on other devices. register this later than
+ * the depending devices.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+**/
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/power/charger-manager.h>
+#include <linux/regulator/consumer.h>
+
+/*
+ * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
+ * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
+ * without any delays.
+ */
+#define CM_JIFFIES_SMALL (2)
+
+/* If y is valid (> 0) and smaller than x, do x = y */
+#define CM_MIN_VALID(x, y) x = (((y > 0) && ((x) > (y))) ? (y) : (x))
+
+/*
+ * Regard CM_RTC_SMALL (sec) is small enough to ignore error in invoking
+ * rtc alarm. It should be 2 or larger
+ */
+#define CM_RTC_SMALL (2)
+
+#define UEVENT_BUF_SIZE 32
+
+static LIST_HEAD(cm_list);
+static DEFINE_MUTEX(cm_list_mtx);
+
+/* About in-suspend (suspend-again) monitoring */
+static struct rtc_device *rtc_dev;
+/*
+ * Backup RTC alarm
+ * Save the wakeup alarm before entering suspend-to-RAM
+ */
+static struct rtc_wkalrm rtc_wkalarm_save;
+/* Backup RTC alarm time in terms of seconds since 01-01-1970 00:00:00 */
+static unsigned long rtc_wkalarm_save_time;
+static bool cm_suspended;
+static bool cm_rtc_set;
+static unsigned long cm_suspend_duration_ms;
+
+/* Global charger-manager description */
+static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
+
+/**
+ * is_batt_present - See if the battery presents in place.
+ * @cm: the Charger Manager representing the battery.
+ */
+static bool is_batt_present(struct charger_manager *cm)
+{
+ union power_supply_propval val;
+ bool present = false;
+ int i, ret;
+
+ switch (cm->desc->battery_present) {
+ case CM_FUEL_GAUGE:
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_PRESENT, &val);
+ if (ret == 0 && val.intval)
+ present = true;
+ break;
+ case CM_CHARGER_STAT:
+ for (i = 0; cm->charger_stat[i]; i++) {
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_PRESENT, &val);
+ if (ret == 0 && val.intval) {
+ present = true;
+ break;
+ }
+ }
+ break;
+ }
+
+ return present;
+}
+
+/**
+ * is_ext_pwr_online - See if an external power source is attached to charge
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Returns true if at least one of the chargers of the battery has an external
+ * power source attached to charge the battery regardless of whether it is
+ * actually charging or not.
+ */
+static bool is_ext_pwr_online(struct charger_manager *cm)
+{
+ union power_supply_propval val;
+ bool online = false;
+ int i, ret;
+
+ /* If at least one of them has one, it's yes. */
+ for (i = 0; cm->charger_stat[i]; i++) {
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0 && val.intval) {
+ online = true;
+ break;
+ }
+ }
+
+ return online;
+}
+
+/**
+ * get_batt_uV - Get the voltage level of the battery
+ * @cm: the Charger Manager representing the battery.
+ * @uV: the voltage level returned.
+ *
+ * Returns 0 if there is no error.
+ * Returns a negative value on error.
+ */
+static int get_batt_uV(struct charger_manager *cm, int *uV)
+{
+ union power_supply_propval val;
+ int ret;
+
+ if (cm->fuel_gauge)
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+ else
+ return -ENODEV;
+
+ if (ret)
+ return ret;
+
+ *uV = val.intval;
+ return 0;
+}
+
+/**
+ * is_charging - Returns true if the battery is being charged.
+ * @cm: the Charger Manager representing the battery.
+ */
+static bool is_charging(struct charger_manager *cm)
+{
+ int i, ret;
+ bool charging = false;
+ union power_supply_propval val;
+
+ /* If there is no battery, it cannot be charged */
+ if (!is_batt_present(cm))
+ return false;
+
+ /* If at least one of the charger is charging, return yes */
+ for (i = 0; cm->charger_stat[i]; i++) {
+ /* 1. The charger sholuld not be DISABLED */
+ if (cm->emergency_stop)
+ continue;
+ if (!cm->charger_enabled)
+ continue;
+
+ /* 2. The charger should be online (ext-power) */
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret) {
+ dev_warn(cm->dev, "Cannot read ONLINE value from %s.\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+ if (val.intval == 0)
+ continue;
+
+ /*
+ * 3. The charger should not be FULL, DISCHARGING,
+ * or NOT_CHARGING.
+ */
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_STATUS, &val);
+ if (ret) {
+ dev_warn(cm->dev, "Cannot read STATUS value from %s.\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+ if (val.intval == POWER_SUPPLY_STATUS_FULL ||
+ val.intval == POWER_SUPPLY_STATUS_DISCHARGING ||
+ val.intval == POWER_SUPPLY_STATUS_NOT_CHARGING)
+ continue;
+
+ /* Then, this is charging. */
+ charging = true;
+ break;
+ }
+
+ return charging;
+}
+
+/**
+ * is_polling_required - Return true if need to continue polling for this CM.
+ * @cm: the Charger Manager representing the battery.
+ */
+static bool is_polling_required(struct charger_manager *cm)
+{
+ switch (cm->desc->polling_mode) {
+ case CM_POLL_DISABLE:
+ return false;
+ case CM_POLL_ALWAYS:
+ return true;
+ case CM_POLL_EXTERNAL_POWER_ONLY:
+ return is_ext_pwr_online(cm);
+ case CM_POLL_CHARGING_ONLY:
+ return is_charging(cm);
+ default:
+ dev_warn(cm->dev, "Incorrect polling_mode (%d)\n",
+ cm->desc->polling_mode);
+ }
+
+ return false;
+}
+
+/**
+ * try_charger_enable - Enable/Disable chargers altogether
+ * @cm: the Charger Manager representing the battery.
+ * @enable: true: enable / false: disable
+ *
+ * Note that Charger Manager keeps the charger enabled regardless whether
+ * the charger is charging or not (because battery is full or no external
+ * power source exists) except when CM needs to disable chargers forcibly
+ * bacause of emergency causes; when the battery is overheated or too cold.
+ */
+static int try_charger_enable(struct charger_manager *cm, bool enable)
+{
+ int err = 0, i;
+ struct charger_desc *desc = cm->desc;
+
+ /* Ignore if it's redundent command */
+ if (enable && cm->charger_enabled)
+ return 0;
+ if (!enable && !cm->charger_enabled)
+ return 0;
+
+ if (enable) {
+ if (cm->emergency_stop)
+ return -EAGAIN;
+ err = regulator_bulk_enable(desc->num_charger_regulators,
+ desc->charger_regulators);
+ } else {
+ /*
+ * Abnormal battery state - Stop charging forcibly,
+ * even if charger was enabled at the other places
+ */
+ err = regulator_bulk_disable(desc->num_charger_regulators,
+ desc->charger_regulators);
+
+ for (i = 0; i < desc->num_charger_regulators; i++) {
+ if (regulator_is_enabled(
+ desc->charger_regulators[i].consumer)) {
+ regulator_force_disable(
+ desc->charger_regulators[i].consumer);
+ dev_warn(cm->dev,
+ "Disable regulator(%s) forcibly.\n",
+ desc->charger_regulators[i].supply);
+ }
+ }
+ }
+
+ if (!err)
+ cm->charger_enabled = enable;
+
+ return err;
+}
+
+/**
+ * uevent_notify - Let users know something has changed.
+ * @cm: the Charger Manager representing the battery.
+ * @event: the event string.
+ *
+ * If @event is null, it implies that uevent_notify is called
+ * by resume function. When called in the resume function, cm_suspended
+ * should be already reset to false in order to let uevent_notify
+ * notify the recent event during the suspend to users. While
+ * suspended, uevent_notify does not notify users, but tracks
+ * events so that uevent_notify can notify users later after resumed.
+ */
+static void uevent_notify(struct charger_manager *cm, const char *event)
+{
+ static char env_str[UEVENT_BUF_SIZE + 1] = "";
+ static char env_str_save[UEVENT_BUF_SIZE + 1] = "";
+
+ if (cm_suspended) {
+ /* Nothing in suspended-event buffer */
+ if (env_str_save[0] == 0) {
+ if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
+ return; /* status not changed */
+ strncpy(env_str_save, event, UEVENT_BUF_SIZE);
+ return;
+ }
+
+ if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
+ return; /* Duplicated. */
+ else
+ strncpy(env_str_save, event, UEVENT_BUF_SIZE);
+
+ return;
+ }
+
+ if (event == NULL) {
+ /* No messages pending */
+ if (!env_str_save[0])
+ return;
+
+ strncpy(env_str, env_str_save, UEVENT_BUF_SIZE);
+ kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
+ env_str_save[0] = 0;
+
+ return;
+ }
+
+ /* status not changed */
+ if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
+ return;
+
+ /* save the status and notify the update */
+ strncpy(env_str, event, UEVENT_BUF_SIZE);
+ kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
+
+ dev_info(cm->dev, event);
+}
+
+/**
+ * _cm_monitor - Monitor the temperature and return true for exceptions.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Returns true if there is an event to notify for the battery.
+ * (True if the status of "emergency_stop" changes)
+ */
+static bool _cm_monitor(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+ int temp = desc->temperature_out_of_range(&cm->last_temp_mC);
+
+ dev_dbg(cm->dev, "monitoring (%2.2d.%3.3dC)\n",
+ cm->last_temp_mC / 1000, cm->last_temp_mC % 1000);
+
+ /* It has been stopped or charging already */
+ if (!!temp == !!cm->emergency_stop)
+ return false;
+
+ if (temp) {
+ cm->emergency_stop = temp;
+ if (!try_charger_enable(cm, false)) {
+ if (temp > 0)
+ uevent_notify(cm, "OVERHEAT");
+ else
+ uevent_notify(cm, "COLD");
+ }
+ } else {
+ cm->emergency_stop = 0;
+ if (!try_charger_enable(cm, true))
+ uevent_notify(cm, "CHARGING");
+ }
+
+ return true;
+}
+
+/**
+ * cm_monitor - Monitor every battery.
+ *
+ * Returns true if there is an event to notify from any of the batteries.
+ * (True if the status of "emergency_stop" changes)
+ */
+static bool cm_monitor(void)
+{
+ bool stop = false;
+ struct charger_manager *cm;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry)
+ stop = stop || _cm_monitor(cm);
+
+ mutex_unlock(&cm_list_mtx);
+
+ return stop;
+}
+
+static int charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct charger_manager *cm = container_of(psy,
+ struct charger_manager, charger_psy);
+ struct charger_desc *desc = cm->desc;
+ int i, ret = 0, uV;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (is_charging(cm))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (is_ext_pwr_online(cm))
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (cm->emergency_stop > 0)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (cm->emergency_stop < 0)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (is_batt_present(cm))
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = get_batt_uV(cm, &i);
+ val->intval = i;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CURRENT_NOW, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ /* in thenth of centigrade */
+ if (cm->last_temp_mC == INT_MIN)
+ desc->temperature_out_of_range(&cm->last_temp_mC);
+ val->intval = cm->last_temp_mC / 100;
+ if (!desc->measure_battery_temp)
+ ret = -ENODEV;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT:
+ /* in thenth of centigrade */
+ if (cm->last_temp_mC == INT_MIN)
+ desc->temperature_out_of_range(&cm->last_temp_mC);
+ val->intval = cm->last_temp_mC / 100;
+ if (desc->measure_battery_temp)
+ ret = -ENODEV;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (!cm->fuel_gauge) {
+ ret = -ENODEV;
+ break;
+ }
+
+ if (!is_batt_present(cm)) {
+ /* There is no battery. Assume 100% */
+ val->intval = 100;
+ break;
+ }
+
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CAPACITY, val);
+ if (ret)
+ break;
+
+ if (val->intval > 100) {
+ val->intval = 100;
+ break;
+ }
+ if (val->intval < 0)
+ val->intval = 0;
+
+ /* Do not adjust SOC when charging: voltage is overrated */
+ if (is_charging(cm))
+ break;
+
+ /*
+ * If the capacity value is inconsistent, calibrate it base on
+ * the battery voltage values and the thresholds given as desc
+ */
+ ret = get_batt_uV(cm, &uV);
+ if (ret) {
+ /* Voltage information not available. No calibration */
+ ret = 0;
+ break;
+ }
+
+ if (desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV &&
+ !is_charging(cm)) {
+ val->intval = 100;
+ break;
+ }
+
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (is_ext_pwr_online(cm))
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ if (cm->fuel_gauge) {
+ if (cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_FULL, val) == 0)
+ break;
+ }
+
+ if (is_ext_pwr_online(cm)) {
+ /* Not full if it's charging. */
+ if (is_charging(cm)) {
+ val->intval = 0;
+ break;
+ }
+ /*
+ * Full if it's powered but not charging andi
+ * not forced stop by emergency
+ */
+ if (!cm->emergency_stop) {
+ val->intval = 1;
+ break;
+ }
+ }
+
+ /* Full if it's over the fullbatt voltage */
+ ret = get_batt_uV(cm, &uV);
+ if (!ret && desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV &&
+ !is_charging(cm)) {
+ val->intval = 1;
+ break;
+ }
+
+ /* Full if the cap is 100 */
+ if (cm->fuel_gauge) {
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CAPACITY, val);
+ if (!ret && val->intval >= 100 && !is_charging(cm)) {
+ val->intval = 1;
+ break;
+ }
+ }
+
+ val->intval = 0;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (is_charging(cm)) {
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ val);
+ if (ret) {
+ val->intval = 1;
+ ret = 0;
+ } else {
+ /* If CHARGE_NOW is supplied, use it */
+ val->intval = (val->intval > 0) ?
+ val->intval : 1;
+ }
+ } else {
+ val->intval = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+#define NUM_CHARGER_PSY_OPTIONAL (4)
+static enum power_supply_property default_charger_props[] = {
+ /* Guaranteed to provide */
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ /*
+ * Optional properties are:
+ * POWER_SUPPLY_PROP_CHARGE_NOW,
+ * POWER_SUPPLY_PROP_CURRENT_NOW,
+ * POWER_SUPPLY_PROP_TEMP, and
+ * POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ */
+};
+
+static struct power_supply psy_default = {
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = default_charger_props,
+ .num_properties = ARRAY_SIZE(default_charger_props),
+ .get_property = charger_get_property,
+};
+
+/**
+ * cm_setup_timer - For in-suspend monitoring setup wakeup alarm
+ * for suspend_again.
+ *
+ * Returns true if the alarm is set for Charger Manager to use.
+ * Returns false if
+ * cm_setup_timer fails to set an alarm,
+ * cm_setup_timer does not need to set an alarm for Charger Manager,
+ * or an alarm previously configured is to be used.
+ */
+static bool cm_setup_timer(void)
+{
+ struct charger_manager *cm;
+ unsigned int wakeup_ms = UINT_MAX;
+ bool ret = false;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry) {
+ /* Skip if polling is not required for this CM */
+ if (!is_polling_required(cm) && !cm->emergency_stop)
+ continue;
+ if (cm->desc->polling_interval_ms == 0)
+ continue;
+ CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms);
+ }
+
+ mutex_unlock(&cm_list_mtx);
+
+ if (wakeup_ms < UINT_MAX && wakeup_ms > 0) {
+ pr_info("Charger Manager wakeup timer: %u ms.\n", wakeup_ms);
+ if (rtc_dev) {
+ struct rtc_wkalrm tmp;
+ unsigned long time, now;
+ unsigned long add = DIV_ROUND_UP(wakeup_ms, 1000);
+
+ /*
+ * Set alarm with the polling interval (wakeup_ms)
+ * except when rtc_wkalarm_save comes first.
+ * However, the alarm time should be NOW +
+ * CM_RTC_SMALL or later.
+ */
+ tmp.enabled = 1;
+ rtc_read_time(rtc_dev, &tmp.time);
+ rtc_tm_to_time(&tmp.time, &now);
+ if (add < CM_RTC_SMALL)
+ add = CM_RTC_SMALL;
+ time = now + add;
+
+ ret = true;
+
+ if (rtc_wkalarm_save.enabled &&
+ rtc_wkalarm_save_time &&
+ rtc_wkalarm_save_time < time) {
+ if (rtc_wkalarm_save_time < now + CM_RTC_SMALL)
+ time = now + CM_RTC_SMALL;
+ else
+ time = rtc_wkalarm_save_time;
+
+ /* The timer is not appointed by CM */
+ ret = false;
+ }
+
+ pr_info("Waking up after %lu secs.\n",
+ time - now);
+
+ rtc_time_to_tm(time, &tmp.time);
+ rtc_set_alarm(rtc_dev, &tmp);
+ cm_suspend_duration_ms += wakeup_ms;
+ return ret;
+ }
+ }
+
+ if (rtc_dev)
+ rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
+ return false;
+}
+
+/**
+ * cm_suspend_again - Determine whether suspend again or not
+ *
+ * Returns true if the system should be suspended again
+ * Returns false if the system should be woken up
+ */
+bool cm_suspend_again(void)
+{
+ struct charger_manager *cm;
+ bool ret = false;
+
+ if (!g_desc || !g_desc->rtc_only_wakeup || !g_desc->rtc_only_wakeup() ||
+ !cm_rtc_set)
+ return false;
+
+ if (cm_monitor())
+ goto out;
+
+ ret = true;
+ mutex_lock(&cm_list_mtx);
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
+ cm->status_save_batt != is_batt_present(cm))
+ ret = false;
+ }
+ mutex_unlock(&cm_list_mtx);
+
+ cm_rtc_set = cm_setup_timer();
+out:
+ /* It's about the time when the non-CM appointed timer goes off */
+ if (rtc_wkalarm_save.enabled) {
+ unsigned long now;
+ struct rtc_time tmp;
+
+ rtc_read_time(rtc_dev, &tmp);
+ rtc_tm_to_time(&tmp, &now);
+
+ if (rtc_wkalarm_save_time &&
+ now + CM_RTC_SMALL >= rtc_wkalarm_save_time)
+ return false;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cm_suspend_again);
+
+/**
+ * setup_charger_manager - initialize charger_global_desc data
+ * @gd: pointer to instance of charger_global_desc
+ */
+int setup_charger_manager(struct charger_global_desc *gd)
+{
+ if (!gd)
+ return -EINVAL;
+
+ if (rtc_dev)
+ rtc_class_close(rtc_dev);
+ rtc_dev = NULL;
+ g_desc = NULL;
+
+ if (!gd->rtc_only_wakeup) {
+ pr_err("The callback rtc_only_wakeup is not given.\n");
+ return -EINVAL;
+ }
+
+ if (gd->rtc_name) {
+ rtc_dev = rtc_class_open(gd->rtc_name);
+ if (IS_ERR_OR_NULL(rtc_dev)) {
+ rtc_dev = NULL;
+ /* Retry at probe. RTC may be not registered yet */
+ }
+ } else {
+ pr_warn("No wakeup timer is given for charger manager."
+ "In-suspend monitoring won't work.\n");
+ }
+
+ g_desc = gd;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(setup_charger_manager);
+
+static int charger_manager_probe(struct platform_device *pdev)
+{
+ struct charger_desc *desc = dev_get_platdata(&pdev->dev);
+ struct charger_manager *cm;
+ int ret = 0, i = 0;
+ union power_supply_propval val;
+
+ if (g_desc && !rtc_dev && g_desc->rtc_name) {
+ rtc_dev = rtc_class_open(g_desc->rtc_name);
+ if (IS_ERR_OR_NULL(rtc_dev)) {
+ rtc_dev = NULL;
+ dev_err(&pdev->dev, "Cannot get RTC %s.\n",
+ g_desc->rtc_name);
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+ }
+
+ if (!desc) {
+ dev_err(&pdev->dev, "No platform data (desc) found.\n");
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+
+ cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL);
+ if (!cm) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Basic Values. Unspecified are Null or 0 */
+ cm->dev = &pdev->dev;
+ cm->desc = kzalloc(sizeof(struct charger_desc), GFP_KERNEL);
+ if (!cm->desc) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ ret = -ENOMEM;
+ goto err_alloc_desc;
+ }
+ memcpy(cm->desc, desc, sizeof(struct charger_desc));
+ cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
+
+ if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "charger_regulators undefined.\n");
+ goto err_no_charger;
+ }
+
+ if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) {
+ dev_err(&pdev->dev, "No power supply defined.\n");
+ ret = -EINVAL;
+ goto err_no_charger_stat;
+ }
+
+ /* Counting index only */
+ while (desc->psy_charger_stat[i])
+ i++;
+
+ cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1),
+ GFP_KERNEL);
+ if (!cm->charger_stat) {
+ ret = -ENOMEM;
+ goto err_no_charger_stat;
+ }
+
+ for (i = 0; desc->psy_charger_stat[i]; i++) {
+ cm->charger_stat[i] = power_supply_get_by_name(
+ desc->psy_charger_stat[i]);
+ if (!cm->charger_stat[i]) {
+ dev_err(&pdev->dev, "Cannot find power supply "
+ "\"%s\"\n",
+ desc->psy_charger_stat[i]);
+ ret = -ENODEV;
+ goto err_chg_stat;
+ }
+ }
+
+ cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
+ if (!cm->fuel_gauge) {
+ dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
+ desc->psy_fuel_gauge);
+ ret = -ENODEV;
+ goto err_chg_stat;
+ }
+
+ if (desc->polling_interval_ms == 0 ||
+ msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) {
+ dev_err(&pdev->dev, "polling_interval_ms is too small\n");
+ ret = -EINVAL;
+ goto err_chg_stat;
+ }
+
+ if (!desc->temperature_out_of_range) {
+ dev_err(&pdev->dev, "there is no temperature_out_of_range\n");
+ ret = -EINVAL;
+ goto err_chg_stat;
+ }
+
+ platform_set_drvdata(pdev, cm);
+
+ memcpy(&cm->charger_psy, &psy_default,
+ sizeof(psy_default));
+ if (!desc->psy_name) {
+ strncpy(cm->psy_name_buf, psy_default.name,
+ PSY_NAME_MAX);
+ } else {
+ strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
+ }
+ cm->charger_psy.name = cm->psy_name_buf;
+
+ /* Allocate for psy properties because they may vary */
+ cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property)
+ * (ARRAY_SIZE(default_charger_props) +
+ NUM_CHARGER_PSY_OPTIONAL),
+ GFP_KERNEL);
+ if (!cm->charger_psy.properties) {
+ dev_err(&pdev->dev, "Cannot allocate for psy properties.\n");
+ ret = -ENOMEM;
+ goto err_chg_stat;
+ }
+ memcpy(cm->charger_psy.properties, default_charger_props,
+ sizeof(enum power_supply_property) *
+ ARRAY_SIZE(default_charger_props));
+ cm->charger_psy.num_properties = psy_default.num_properties;
+
+ /* Find which optional psy-properties are available */
+ if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_CHARGE_NOW;
+ cm->charger_psy.num_properties++;
+ }
+ if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ &val)) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_CURRENT_NOW;
+ cm->charger_psy.num_properties++;
+ }
+ if (!desc->measure_battery_temp) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ cm->charger_psy.num_properties++;
+ }
+ if (desc->measure_battery_temp) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP;
+ cm->charger_psy.num_properties++;
+ }
+
+ ret = power_supply_register(NULL, &cm->charger_psy);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register charger-manager with"
+ " name \"%s\".\n", cm->charger_psy.name);
+ goto err_register;
+ }
+
+ ret = regulator_bulk_get(&pdev->dev, desc->num_charger_regulators,
+ desc->charger_regulators);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot get charger regulators.\n");
+ goto err_bulk_get;
+ }
+
+ ret = try_charger_enable(cm, true);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot enable charger regulators\n");
+ goto err_chg_enable;
+ }
+
+ /* Add to the list */
+ mutex_lock(&cm_list_mtx);
+ list_add(&cm->entry, &cm_list);
+ mutex_unlock(&cm_list_mtx);
+
+ return 0;
+
+err_chg_enable:
+ if (desc->charger_regulators)
+ regulator_bulk_free(desc->num_charger_regulators,
+ desc->charger_regulators);
+err_bulk_get:
+ power_supply_unregister(&cm->charger_psy);
+err_register:
+ kfree(cm->charger_psy.properties);
+err_chg_stat:
+ kfree(cm->charger_stat);
+err_no_charger_stat:
+err_no_charger:
+ kfree(cm->desc);
+err_alloc_desc:
+ kfree(cm);
+err_alloc:
+ return ret;
+}
+
+static int __devexit charger_manager_remove(struct platform_device *pdev)
+{
+ struct charger_manager *cm = platform_get_drvdata(pdev);
+ struct charger_desc *desc = cm->desc;
+
+ /* Remove from the list */
+ mutex_lock(&cm_list_mtx);
+ list_del(&cm->entry);
+ mutex_unlock(&cm_list_mtx);
+
+ if (desc->charger_regulators)
+ regulator_bulk_free(desc->num_charger_regulators,
+ desc->charger_regulators);
+
+ power_supply_unregister(&cm->charger_psy);
+ kfree(cm->charger_psy.properties);
+ kfree(cm->charger_stat);
+ kfree(cm->desc);
+ kfree(cm);
+
+ return 0;
+}
+
+const struct platform_device_id charger_manager_id[] = {
+ { "charger-manager", 0 },
+ { },
+};
+
+static int cm_suspend_prepare(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct charger_manager *cm = platform_get_drvdata(pdev);
+
+ if (!cm_suspended) {
+ if (rtc_dev) {
+ struct rtc_time tmp;
+ unsigned long now;
+
+ rtc_read_alarm(rtc_dev, &rtc_wkalarm_save);
+ rtc_read_time(rtc_dev, &tmp);
+
+ if (rtc_wkalarm_save.enabled) {
+ rtc_tm_to_time(&rtc_wkalarm_save.time,
+ &rtc_wkalarm_save_time);
+ rtc_tm_to_time(&tmp, &now);
+ if (now > rtc_wkalarm_save_time)
+ rtc_wkalarm_save_time = 0;
+ } else {
+ rtc_wkalarm_save_time = 0;
+ }
+ }
+ cm_suspended = true;
+ }
+
+ cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
+ cm->status_save_batt = is_batt_present(cm);
+
+ if (!cm_rtc_set) {
+ cm_suspend_duration_ms = 0;
+ cm_rtc_set = cm_setup_timer();
+ }
+
+ return 0;
+}
+
+static void cm_suspend_complete(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct charger_manager *cm = platform_get_drvdata(pdev);
+
+ if (cm_suspended) {
+ if (rtc_dev) {
+ struct rtc_wkalrm tmp;
+
+ rtc_read_alarm(rtc_dev, &tmp);
+ rtc_wkalarm_save.pending = tmp.pending;
+ rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
+ }
+ cm_suspended = false;
+ cm_rtc_set = false;
+ }
+
+ uevent_notify(cm, NULL);
+}
+
+static const struct dev_pm_ops charger_manager_pm = {
+ .prepare = cm_suspend_prepare,
+ .complete = cm_suspend_complete,
+};
+
+static struct platform_driver charger_manager_driver = {
+ .driver = {
+ .name = "charger-manager",
+ .owner = THIS_MODULE,
+ .pm = &charger_manager_pm,
+ },
+ .probe = charger_manager_probe,
+ .remove = __devexit_p(charger_manager_remove),
+ .id_table = charger_manager_id,
+};
+
+static int __init charger_manager_init(void)
+{
+ return platform_driver_register(&charger_manager_driver);
+}
+late_initcall(charger_manager_init);
+
+static void __exit charger_manager_cleanup(void)
+{
+ platform_driver_unregister(&charger_manager_driver);
+}
+module_exit(charger_manager_cleanup);
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("Charger Manager");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("charger-manager");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 548d263b1ad0..74c6b23aeabf 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -146,7 +146,7 @@ static void collie_bat_external_power_changed(struct power_supply *psy)
static irqreturn_t collie_bat_gpio_isr(int irq, void *data)
{
- pr_info("collie_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+ pr_info("collie_bat_gpio irq\n");
schedule_work(&bat_work);
return IRQ_HANDLED;
}
@@ -277,18 +277,13 @@ static struct collie_bat collie_bat_bu = {
.adc_temp_divider = -1,
};
-static struct {
- int gpio;
- char *name;
- bool output;
- int value;
-} gpios[] = {
- { COLLIE_GPIO_CO, "main battery full", 0, 0 },
- { COLLIE_GPIO_MAIN_BAT_LOW, "main battery low", 0, 0 },
- { COLLIE_GPIO_CHARGE_ON, "main charge on", 1, 0 },
- { COLLIE_GPIO_MBAT_ON, "main battery", 1, 0 },
- { COLLIE_GPIO_TMP_ON, "main battery temp", 1, 0 },
- { COLLIE_GPIO_BBAT_ON, "backup battery", 1, 0 },
+static struct gpio collie_batt_gpios[] = {
+ { COLLIE_GPIO_CO, GPIOF_IN, "main battery full" },
+ { COLLIE_GPIO_MAIN_BAT_LOW, GPIOF_IN, "main battery low" },
+ { COLLIE_GPIO_CHARGE_ON, GPIOF_OUT_INIT_LOW, "main charge on" },
+ { COLLIE_GPIO_MBAT_ON, GPIOF_OUT_INIT_LOW, "main battery" },
+ { COLLIE_GPIO_TMP_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
+ { COLLIE_GPIO_BBAT_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
};
#ifdef CONFIG_PM
@@ -313,29 +308,16 @@ static int collie_bat_resume(struct ucb1x00_dev *dev)
static int __devinit collie_bat_probe(struct ucb1x00_dev *dev)
{
int ret;
- int i;
if (!machine_is_collie())
return -ENODEV;
ucb = dev->ucb;
- for (i = 0; i < ARRAY_SIZE(gpios); i++) {
- ret = gpio_request(gpios[i].gpio, gpios[i].name);
- if (ret) {
- i--;
- goto err_gpio;
- }
-
- if (gpios[i].output)
- ret = gpio_direction_output(gpios[i].gpio,
- gpios[i].value);
- else
- ret = gpio_direction_input(gpios[i].gpio);
-
- if (ret)
- goto err_gpio;
- }
+ ret = gpio_request_array(collie_batt_gpios,
+ ARRAY_SIZE(collie_batt_gpios));
+ if (ret)
+ return ret;
mutex_init(&collie_bat_main.work_lock);
@@ -363,19 +345,12 @@ err_psy_reg_main:
/* see comment in collie_bat_remove */
cancel_work_sync(&bat_work);
-
- i--;
-err_gpio:
- for (; i >= 0; i--)
- gpio_free(gpios[i].gpio);
-
+ gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
return ret;
}
static void __devexit collie_bat_remove(struct ucb1x00_dev *dev)
{
- int i;
-
free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main);
power_supply_unregister(&collie_bat_bu.psy);
@@ -387,9 +362,7 @@ static void __devexit collie_bat_remove(struct ucb1x00_dev *dev)
* unregistered now.
*/
cancel_work_sync(&bat_work);
-
- for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
- gpio_free(gpios[i].gpio);
+ gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
}
static struct ucb1x00_driver collie_bat_driver = {
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index d2c793cf6765..3fd3e95d2b85 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -588,18 +588,7 @@ static struct platform_driver da903x_battery_driver = {
.remove = da9030_battery_remove,
};
-static int da903x_battery_init(void)
-{
- return platform_driver_register(&da903x_battery_driver);
-}
-
-static void da903x_battery_exit(void)
-{
- platform_driver_unregister(&da903x_battery_driver);
-}
-
-module_init(da903x_battery_init);
-module_exit(da903x_battery_exit);
+module_platform_driver(da903x_battery_driver);
MODULE_DESCRIPTION("DA9030 battery charger driver");
MODULE_AUTHOR("Mike Rapoport, CompuLab");
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
new file mode 100644
index 000000000000..e8ea47a53dee
--- /dev/null
+++ b/drivers/power/da9052-battery.c
@@ -0,0 +1,664 @@
+/*
+ * Batttery Driver for Dialog DA9052 PMICs
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/reg.h>
+
+/* STATIC CONFIGURATION */
+#define DA9052_BAT_CUTOFF_VOLT 2800
+#define DA9052_BAT_TSH 62000
+#define DA9052_BAT_LOW_CAP 4
+#define DA9052_AVG_SZ 4
+#define DA9052_VC_TBL_SZ 68
+#define DA9052_VC_TBL_REF_SZ 3
+
+#define DA9052_ISET_USB_MASK 0x0F
+#define DA9052_CHG_USB_ILIM_MASK 0x40
+#define DA9052_CHG_LIM_COLS 16
+
+#define DA9052_MEAN(x, y) ((x + y) / 2)
+
+enum charger_type_enum {
+ DA9052_NOCHARGER = 1,
+ DA9052_CHARGER,
+};
+
+static const u16 da9052_chg_current_lim[2][DA9052_CHG_LIM_COLS] = {
+ {70, 80, 90, 100, 110, 120, 400, 450,
+ 500, 550, 600, 650, 700, 900, 1100, 1300},
+ {80, 90, 100, 110, 120, 400, 450, 500,
+ 550, 600, 800, 1000, 1200, 1400, 1600, 1800},
+};
+
+static const u16 vc_tbl_ref[3] = {10, 25, 40};
+/* Lookup table for voltage vs capacity */
+static u32 const vc_tbl[3][68][2] = {
+ /* For temperature 10 degree Celsius */
+ {
+ {4082, 100}, {4036, 98},
+ {4020, 96}, {4008, 95},
+ {3997, 93}, {3983, 91},
+ {3964, 90}, {3943, 88},
+ {3926, 87}, {3912, 85},
+ {3900, 84}, {3890, 82},
+ {3881, 80}, {3873, 79},
+ {3865, 77}, {3857, 76},
+ {3848, 74}, {3839, 73},
+ {3829, 71}, {3820, 70},
+ {3811, 68}, {3802, 67},
+ {3794, 65}, {3785, 64},
+ {3778, 62}, {3770, 61},
+ {3763, 59}, {3756, 58},
+ {3750, 56}, {3744, 55},
+ {3738, 53}, {3732, 52},
+ {3727, 50}, {3722, 49},
+ {3717, 47}, {3712, 46},
+ {3708, 44}, {3703, 43},
+ {3700, 41}, {3696, 40},
+ {3693, 38}, {3691, 37},
+ {3688, 35}, {3686, 34},
+ {3683, 32}, {3681, 31},
+ {3678, 29}, {3675, 28},
+ {3672, 26}, {3669, 25},
+ {3665, 23}, {3661, 22},
+ {3656, 21}, {3651, 19},
+ {3645, 18}, {3639, 16},
+ {3631, 15}, {3622, 13},
+ {3611, 12}, {3600, 10},
+ {3587, 9}, {3572, 7},
+ {3548, 6}, {3503, 5},
+ {3420, 3}, {3268, 2},
+ {2992, 1}, {2746, 0}
+ },
+ /* For temperature 25 degree Celsius */
+ {
+ {4102, 100}, {4065, 98},
+ {4048, 96}, {4034, 95},
+ {4021, 93}, {4011, 92},
+ {4001, 90}, {3986, 88},
+ {3968, 87}, {3952, 85},
+ {3938, 84}, {3926, 82},
+ {3916, 81}, {3908, 79},
+ {3900, 77}, {3892, 76},
+ {3883, 74}, {3874, 73},
+ {3864, 71}, {3855, 70},
+ {3846, 68}, {3836, 67},
+ {3827, 65}, {3819, 64},
+ {3810, 62}, {3801, 61},
+ {3793, 59}, {3786, 58},
+ {3778, 56}, {3772, 55},
+ {3765, 53}, {3759, 52},
+ {3754, 50}, {3748, 49},
+ {3743, 47}, {3738, 46},
+ {3733, 44}, {3728, 43},
+ {3724, 41}, {3720, 40},
+ {3716, 38}, {3712, 37},
+ {3709, 35}, {3706, 34},
+ {3703, 33}, {3701, 31},
+ {3698, 30}, {3696, 28},
+ {3693, 27}, {3690, 25},
+ {3687, 24}, {3683, 22},
+ {3680, 21}, {3675, 19},
+ {3671, 18}, {3666, 17},
+ {3660, 15}, {3654, 14},
+ {3647, 12}, {3639, 11},
+ {3630, 9}, {3621, 8},
+ {3613, 6}, {3606, 5},
+ {3597, 4}, {3582, 2},
+ {3546, 1}, {2747, 0}
+ },
+ /* For temperature 40 degree Celsius */
+ {
+ {4114, 100}, {4081, 98},
+ {4065, 96}, {4050, 95},
+ {4036, 93}, {4024, 92},
+ {4013, 90}, {4002, 88},
+ {3990, 87}, {3976, 85},
+ {3962, 84}, {3950, 82},
+ {3939, 81}, {3930, 79},
+ {3921, 77}, {3912, 76},
+ {3902, 74}, {3893, 73},
+ {3883, 71}, {3874, 70},
+ {3865, 68}, {3856, 67},
+ {3847, 65}, {3838, 64},
+ {3829, 62}, {3820, 61},
+ {3812, 59}, {3803, 58},
+ {3795, 56}, {3787, 55},
+ {3780, 53}, {3773, 52},
+ {3767, 50}, {3761, 49},
+ {3756, 47}, {3751, 46},
+ {3746, 44}, {3741, 43},
+ {3736, 41}, {3732, 40},
+ {3728, 38}, {3724, 37},
+ {3720, 35}, {3716, 34},
+ {3713, 33}, {3710, 31},
+ {3707, 30}, {3704, 28},
+ {3701, 27}, {3698, 25},
+ {3695, 24}, {3691, 22},
+ {3686, 21}, {3681, 19},
+ {3676, 18}, {3671, 17},
+ {3666, 15}, {3661, 14},
+ {3655, 12}, {3648, 11},
+ {3640, 9}, {3632, 8},
+ {3622, 6}, {3616, 5},
+ {3611, 4}, {3604, 2},
+ {3594, 1}, {2747, 0}
+ }
+};
+
+struct da9052_battery {
+ struct da9052 *da9052;
+ struct power_supply psy;
+ struct notifier_block nb;
+ int charger_type;
+ int status;
+ int health;
+};
+
+static inline int volt_reg_to_mV(int value)
+{
+ return ((value * 1000) / 512) + 2500;
+}
+
+static inline int ichg_reg_to_mA(int value)
+{
+ return (value * 3900) / 1000;
+}
+
+static int da9052_read_chgend_current(struct da9052_battery *bat,
+ int *current_mA)
+{
+ int ret;
+
+ if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
+ return -EINVAL;
+
+ ret = da9052_reg_read(bat->da9052, DA9052_ICHG_END_REG);
+ if (ret < 0)
+ return ret;
+
+ *current_mA = ichg_reg_to_mA(ret & DA9052_ICHGEND_ICHGEND);
+
+ return 0;
+}
+
+static int da9052_read_chg_current(struct da9052_battery *bat, int *current_mA)
+{
+ int ret;
+
+ if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
+ return -EINVAL;
+
+ ret = da9052_reg_read(bat->da9052, DA9052_ICHG_AV_REG);
+ if (ret < 0)
+ return ret;
+
+ *current_mA = ichg_reg_to_mA(ret & DA9052_ICHGAV_ICHGAV);
+
+ return 0;
+}
+
+static int da9052_bat_check_status(struct da9052_battery *bat, int *status)
+{
+ u8 v[2] = {0, 0};
+ u8 bat_status;
+ u8 chg_end;
+ int ret;
+ int chg_current;
+ int chg_end_current;
+ bool dcinsel;
+ bool dcindet;
+ bool vbussel;
+ bool vbusdet;
+ bool dc;
+ bool vbus;
+
+ ret = da9052_group_read(bat->da9052, DA9052_STATUS_A_REG, 2, v);
+ if (ret < 0)
+ return ret;
+
+ bat_status = v[0];
+ chg_end = v[1];
+
+ dcinsel = bat_status & DA9052_STATUSA_DCINSEL;
+ dcindet = bat_status & DA9052_STATUSA_DCINDET;
+ vbussel = bat_status & DA9052_STATUSA_VBUSSEL;
+ vbusdet = bat_status & DA9052_STATUSA_VBUSDET;
+ dc = dcinsel && dcindet;
+ vbus = vbussel && vbusdet;
+
+ /* Preference to WALL(DCIN) charger unit */
+ if (dc || vbus) {
+ bat->charger_type = DA9052_CHARGER;
+
+ /* If charging end flag is set and Charging current is greater
+ * than charging end limit then battery is charging
+ */
+ if ((chg_end & DA9052_STATUSB_CHGEND) != 0) {
+ ret = da9052_read_chg_current(bat, &chg_current);
+ if (ret < 0)
+ return ret;
+ ret = da9052_read_chgend_current(bat, &chg_end_current);
+ if (ret < 0)
+ return ret;
+
+ if (chg_current >= chg_end_current)
+ bat->status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ /* If Charging end flag is cleared then battery is
+ * charging
+ */
+ bat->status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ } else if (dcindet || vbusdet) {
+ bat->charger_type = DA9052_CHARGER;
+ bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ bat->charger_type = DA9052_NOCHARGER;
+ bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ if (status != NULL)
+ *status = bat->status;
+ return 0;
+}
+
+static int da9052_bat_read_volt(struct da9052_battery *bat, int *volt_mV)
+{
+ int volt;
+
+ volt = da9052_adc_manual_read(bat->da9052, DA9052_ADC_MAN_MUXSEL_VBAT);
+ if (volt < 0)
+ return volt;
+
+ *volt_mV = volt_reg_to_mV(volt);
+
+ return 0;
+}
+
+static int da9052_bat_check_presence(struct da9052_battery *bat, int *illegal)
+{
+ int bat_temp;
+
+ bat_temp = da9052_adc_read_temp(bat->da9052);
+ if (bat_temp < 0)
+ return bat_temp;
+
+ if (bat_temp > DA9052_BAT_TSH)
+ *illegal = 1;
+ else
+ *illegal = 0;
+
+ return 0;
+}
+
+static int da9052_bat_interpolate(int vbat_lower, int vbat_upper,
+ int level_lower, int level_upper,
+ int bat_voltage)
+{
+ int tmp;
+
+ tmp = ((level_upper - level_lower) * 1000) / (vbat_upper - vbat_lower);
+ tmp = level_lower + (((bat_voltage - vbat_lower) * tmp) / 1000);
+
+ return tmp;
+}
+
+unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp)
+{
+ int i;
+
+ if (adc_temp <= vc_tbl_ref[0])
+ return 0;
+
+ if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1])
+ return DA9052_VC_TBL_REF_SZ - 1;
+
+ for (i = 0; i < DA9052_VC_TBL_REF_SZ; i++) {
+ if ((adc_temp > vc_tbl_ref[i]) &&
+ (adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])))
+ return i;
+ if ((adc_temp > DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1]))
+ && (adc_temp <= vc_tbl_ref[i]))
+ return i + 1;
+ }
+}
+
+static int da9052_bat_read_capacity(struct da9052_battery *bat, int *capacity)
+{
+ int adc_temp;
+ int bat_voltage;
+ int vbat_lower;
+ int vbat_upper;
+ int level_upper;
+ int level_lower;
+ int ret;
+ int flag;
+ int i = 0;
+ int j;
+
+ ret = da9052_bat_read_volt(bat, &bat_voltage);
+ if (ret < 0)
+ return ret;
+
+ adc_temp = da9052_adc_read_temp(bat->da9052);
+ if (adc_temp < 0)
+ return adc_temp;
+
+ i = da9052_determine_vc_tbl_index(adc_temp);
+
+ if (bat_voltage >= vc_tbl[i][0][0]) {
+ *capacity = 100;
+ return 0;
+ }
+ if (bat_voltage <= vc_tbl[i][DA9052_VC_TBL_SZ - 1][0]) {
+ *capacity = 0;
+ return 0;
+ }
+ flag = 0;
+
+ for (j = 0; j < (DA9052_VC_TBL_SZ-1); j++) {
+ if ((bat_voltage <= vc_tbl[i][j][0]) &&
+ (bat_voltage >= vc_tbl[i][j + 1][0])) {
+ vbat_upper = vc_tbl[i][j][0];
+ vbat_lower = vc_tbl[i][j + 1][0];
+ level_upper = vc_tbl[i][j][1];
+ level_lower = vc_tbl[i][j + 1][1];
+ flag = 1;
+ break;
+ }
+ }
+ if (!flag)
+ return -EIO;
+
+ *capacity = da9052_bat_interpolate(vbat_lower, vbat_upper, level_lower,
+ level_upper, bat_voltage);
+
+ return 0;
+}
+
+static int da9052_bat_check_health(struct da9052_battery *bat, int *health)
+{
+ int ret;
+ int bat_illegal;
+ int capacity;
+
+ ret = da9052_bat_check_presence(bat, &bat_illegal);
+ if (ret < 0)
+ return ret;
+
+ if (bat_illegal) {
+ bat->health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ return 0;
+ }
+
+ if (bat->health != POWER_SUPPLY_HEALTH_OVERHEAT) {
+ ret = da9052_bat_read_capacity(bat, &capacity);
+ if (ret < 0)
+ return ret;
+ if (capacity < DA9052_BAT_LOW_CAP)
+ bat->health = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ bat->health = POWER_SUPPLY_HEALTH_GOOD;
+ }
+
+ *health = bat->health;
+
+ return 0;
+}
+
+static irqreturn_t da9052_bat_irq(int irq, void *data)
+{
+ struct da9052_battery *bat = data;
+
+ irq -= bat->da9052->irq_base;
+
+ if (irq == DA9052_IRQ_CHGEND)
+ bat->status = POWER_SUPPLY_STATUS_FULL;
+ else
+ da9052_bat_check_status(bat, NULL);
+
+ if (irq == DA9052_IRQ_CHGEND || irq == DA9052_IRQ_DCIN ||
+ irq == DA9052_IRQ_VBUS || irq == DA9052_IRQ_TBAT) {
+ power_supply_changed(&bat->psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int da9052_USB_current_notifier(struct notifier_block *nb,
+ unsigned long events, void *data)
+{
+ u8 row;
+ u8 col;
+ int *current_mA = data;
+ int ret;
+ struct da9052_battery *bat = container_of(nb, struct da9052_battery,
+ nb);
+
+ if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
+ return -EPERM;
+
+ ret = da9052_reg_read(bat->da9052, DA9052_CHGBUCK_REG);
+ if (ret & DA9052_CHG_USB_ILIM_MASK)
+ return -EPERM;
+
+ if (bat->da9052->chip_id == DA9052)
+ row = 0;
+ else
+ row = 1;
+
+ if (*current_mA < da9052_chg_current_lim[row][0] ||
+ *current_mA > da9052_chg_current_lim[row][DA9052_CHG_LIM_COLS - 1])
+ return -EINVAL;
+
+ for (col = 0; col <= DA9052_CHG_LIM_COLS - 1 ; col++) {
+ if (*current_mA <= da9052_chg_current_lim[row][col])
+ break;
+ }
+
+ return da9052_reg_update(bat->da9052, DA9052_ISET_REG,
+ DA9052_ISET_USB_MASK, col);
+}
+
+static int da9052_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret;
+ int illegal;
+ struct da9052_battery *bat = container_of(psy, struct da9052_battery,
+ psy);
+
+ ret = da9052_bat_check_presence(bat, &illegal);
+ if (ret < 0)
+ return ret;
+
+ if (illegal && psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = da9052_bat_check_status(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval =
+ (bat->charger_type == DA9052_NOCHARGER) ? 0 : 1;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = da9052_bat_check_presence(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = da9052_bat_check_health(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = DA9052_BAT_CUTOFF_VOLT * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ ret = da9052_bat_read_volt(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ ret = da9052_read_chg_current(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = da9052_bat_read_capacity(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = da9052_adc_read_temp(bat->da9052);
+ ret = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static enum power_supply_property da9052_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
+static struct power_supply template_battery = {
+ .name = "da9052-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = da9052_bat_props,
+ .num_properties = ARRAY_SIZE(da9052_bat_props),
+ .get_property = da9052_bat_get_property,
+};
+
+static const char *const da9052_bat_irqs[] = {
+ "BATT TEMP",
+ "DCIN DET",
+ "DCIN REM",
+ "VBUS DET",
+ "VBUS REM",
+ "CHG END",
+};
+
+static s32 __devinit da9052_bat_probe(struct platform_device *pdev)
+{
+ struct da9052_pdata *pdata;
+ struct da9052_battery *bat;
+ int ret;
+ int irq;
+ int i;
+
+ bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ bat->da9052 = dev_get_drvdata(pdev->dev.parent);
+ bat->psy = template_battery;
+ bat->charger_type = DA9052_NOCHARGER;
+ bat->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ bat->health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ bat->nb.notifier_call = da9052_USB_current_notifier;
+
+ pdata = bat->da9052->dev->platform_data;
+ if (pdata != NULL && pdata->use_for_apm)
+ bat->psy.use_for_apm = pdata->use_for_apm;
+ else
+ bat->psy.use_for_apm = 1;
+
+ for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
+ ret = request_threaded_irq(bat->da9052->irq_base + irq,
+ NULL, da9052_bat_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ da9052_bat_irqs[i], bat);
+ if (ret != 0) {
+ dev_err(bat->da9052->dev,
+ "DA9052 failed to request %s IRQ %d: %d\n",
+ da9052_bat_irqs[i], irq, ret);
+ goto err;
+ }
+ }
+
+ ret = power_supply_register(&pdev->dev, &bat->psy);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ for (; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
+ free_irq(bat->da9052->irq_base + irq, bat);
+ }
+ kfree(bat);
+ return ret;
+}
+static int __devexit da9052_bat_remove(struct platform_device *pdev)
+{
+ int i;
+ int irq;
+ struct da9052_battery *bat = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
+ free_irq(bat->da9052->irq_base + irq, bat);
+ }
+ power_supply_unregister(&bat->psy);
+
+ return 0;
+}
+
+static struct platform_driver da9052_bat_driver = {
+ .probe = da9052_bat_probe,
+ .remove = __devexit_p(da9052_bat_remove),
+ .driver = {
+ .name = "da9052-bat",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_bat_init(void)
+{
+ return platform_driver_register(&da9052_bat_driver);
+}
+module_init(da9052_bat_init);
+
+static void __exit da9052_bat_exit(void)
+{
+ platform_driver_unregister(&da9052_bat_driver);
+}
+module_exit(da9052_bat_exit);
+
+MODULE_DESCRIPTION("DA9052 BAT Device Driver");
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-bat");
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index f2c9cc33c0f9..076e211a40b7 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -64,7 +64,7 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-static unsigned int pmod_enabled;
+static bool pmod_enabled;
module_param(pmod_enabled, bool, 0644);
MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
@@ -95,7 +95,11 @@ static int rated_capacities[] = {
2880, /* Samsung */
2880, /* BYD */
2880, /* Lishen */
- 2880 /* NEC */
+ 2880, /* NEC */
+#ifdef CONFIG_MACH_H4700
+ 0,
+ 3600, /* HP iPAQ hx4700 3.7V 3600mAh (359114-001) */
+#endif
};
/* array is level at temps 0°C, 10°C, 20°C, 30°C, 40°C
@@ -637,18 +641,7 @@ static struct platform_driver ds2760_battery_driver = {
.resume = ds2760_battery_resume,
};
-static int __init ds2760_battery_init(void)
-{
- return platform_driver_register(&ds2760_battery_driver);
-}
-
-static void __exit ds2760_battery_exit(void)
-{
- platform_driver_unregister(&ds2760_battery_driver);
-}
-
-module_init(ds2760_battery_init);
-module_exit(ds2760_battery_exit);
+module_platform_driver(ds2760_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
index 91a783d72360..de31cae1ba53 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/ds2780_battery.c
@@ -841,29 +841,17 @@ static int __devexit ds2780_battery_remove(struct platform_device *pdev)
return 0;
}
-MODULE_ALIAS("platform:ds2780-battery");
-
static struct platform_driver ds2780_battery_driver = {
.driver = {
.name = "ds2780-battery",
},
.probe = ds2780_battery_probe,
- .remove = ds2780_battery_remove,
+ .remove = __devexit_p(ds2780_battery_remove),
};
-static int __init ds2780_battery_init(void)
-{
- return platform_driver_register(&ds2780_battery_driver);
-}
-
-static void __exit ds2780_battery_exit(void)
-{
- platform_driver_unregister(&ds2780_battery_driver);
-}
-
-module_init(ds2780_battery_init);
-module_exit(ds2780_battery_exit);
+module_platform_driver(ds2780_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
+MODULE_ALIAS("platform:ds2780-battery");
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index a64b8854cfd5..8672c9177dd7 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -185,17 +185,7 @@ static struct platform_driver gpio_charger_driver = {
},
};
-static int __init gpio_charger_init(void)
-{
- return platform_driver_register(&gpio_charger_driver);
-}
-module_init(gpio_charger_init);
-
-static void __exit gpio_charger_exit(void)
-{
- platform_driver_unregister(&gpio_charger_driver);
-}
-module_exit(gpio_charger_exit);
+module_platform_driver(gpio_charger_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO");
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b00..d09649706bd3 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK 0x86
+
#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- batt_exception = 1;
} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
batt_exception = 1;
} else {
pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ /* PMIC will change charging current automatically */
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ }
}
}
@@ -779,18 +779,7 @@ static struct platform_driver platform_pmic_battery_driver = {
.remove = __devexit_p(platform_pmic_battery_remove),
};
-static int __init platform_pmic_battery_module_init(void)
-{
- return platform_driver_register(&platform_pmic_battery_driver);
-}
-
-static void __exit platform_pmic_battery_module_exit(void)
-{
- platform_driver_unregister(&platform_pmic_battery_driver);
-}
-
-module_init(platform_pmic_battery_module_init);
-module_exit(platform_pmic_battery_module_exit);
+module_platform_driver(platform_pmic_battery_driver);
MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index f6d72b402a8e..b806667b59ae 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -79,7 +79,7 @@ static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
{
struct isp1704_charger_data *board = isp->dev->platform_data;
- if (board->set_power)
+ if (board && board->set_power)
board->set_power(on);
}
@@ -494,17 +494,7 @@ static struct platform_driver isp1704_charger_driver = {
.remove = __devexit_p(isp1704_charger_remove),
};
-static int __init isp1704_charger_init(void)
-{
- return platform_driver_register(&isp1704_charger_driver);
-}
-module_init(isp1704_charger_init);
-
-static void __exit isp1704_charger_exit(void)
-{
- platform_driver_unregister(&isp1704_charger_driver);
-}
-module_exit(isp1704_charger_exit);
+module_platform_driver(isp1704_charger_driver);
MODULE_ALIAS("platform:isp1704_charger");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 763f894ed188..8dbc7bfaab14 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -67,7 +67,7 @@ static irqreturn_t jz_battery_irq_handler(int irq, void *devid)
static long jz_battery_read_voltage(struct jz_battery *battery)
{
- unsigned long t;
+ long t;
unsigned long val;
long voltage;
@@ -441,17 +441,7 @@ static struct platform_driver jz_battery_driver = {
},
};
-static int __init jz_battery_init(void)
-{
- return platform_driver_register(&jz_battery_driver);
-}
-module_init(jz_battery_init);
-
-static void __exit jz_battery_exit(void)
-{
- platform_driver_unregister(&jz_battery_driver);
-}
-module_exit(jz_battery_exit);
+module_platform_driver(jz_battery_driver);
MODULE_ALIAS("platform:jz4740-battery");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
new file mode 100644
index 000000000000..b15b575c070c
--- /dev/null
+++ b/drivers/power/lp8727_charger.c
@@ -0,0 +1,494 @@
+/*
+ * Driver for LP8727 Micro/Mini USB IC with intergrated charger
+ *
+ * Copyright (C) 2011 National Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/lp8727.h>
+
+#define DEBOUNCE_MSEC 270
+
+/* Registers */
+#define CTRL1 0x1
+#define CTRL2 0x2
+#define SWCTRL 0x3
+#define INT1 0x4
+#define INT2 0x5
+#define STATUS1 0x6
+#define STATUS2 0x7
+#define CHGCTRL2 0x9
+
+/* CTRL1 register */
+#define CP_EN (1 << 0)
+#define ADC_EN (1 << 1)
+#define ID200_EN (1 << 4)
+
+/* CTRL2 register */
+#define CHGDET_EN (1 << 1)
+#define INT_EN (1 << 6)
+
+/* SWCTRL register */
+#define SW_DM1_DM (0x0 << 0)
+#define SW_DM1_U1 (0x1 << 0)
+#define SW_DM1_HiZ (0x7 << 0)
+#define SW_DP2_DP (0x0 << 3)
+#define SW_DP2_U2 (0x1 << 3)
+#define SW_DP2_HiZ (0x7 << 3)
+
+/* INT1 register */
+#define IDNO (0xF << 0)
+#define VBUS (1 << 4)
+
+/* STATUS1 register */
+#define CHGSTAT (3 << 4)
+#define CHPORT (1 << 6)
+#define DCPORT (1 << 7)
+
+/* STATUS2 register */
+#define TEMP_STAT (3 << 5)
+
+enum lp8727_dev_id {
+ ID_NONE,
+ ID_TA,
+ ID_DEDICATED_CHG,
+ ID_USB_CHG,
+ ID_USB_DS,
+ ID_MAX,
+};
+
+enum lp8727_chg_stat {
+ PRECHG,
+ CC,
+ CV,
+ EOC,
+};
+
+struct lp8727_psy {
+ struct power_supply ac;
+ struct power_supply usb;
+ struct power_supply batt;
+};
+
+struct lp8727_chg {
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex xfer_lock;
+ struct delayed_work work;
+ struct workqueue_struct *irqthread;
+ struct lp8727_platform_data *pdata;
+ struct lp8727_psy *psy;
+ struct lp8727_chg_param *chg_parm;
+ enum lp8727_dev_id devid;
+};
+
+static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+{
+ s32 ret;
+
+ mutex_lock(&pchg->xfer_lock);
+ ret = i2c_smbus_read_i2c_block_data(pchg->client, reg, len, data);
+ mutex_unlock(&pchg->xfer_lock);
+
+ return (ret != len) ? -EIO : 0;
+}
+
+static int lp8727_i2c_write(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+{
+ s32 ret;
+
+ mutex_lock(&pchg->xfer_lock);
+ ret = i2c_smbus_write_i2c_block_data(pchg->client, reg, len, data);
+ mutex_unlock(&pchg->xfer_lock);
+
+ return ret;
+}
+
+static inline int lp8727_i2c_read_byte(struct lp8727_chg *pchg, u8 reg,
+ u8 *data)
+{
+ return lp8727_i2c_read(pchg, reg, data, 1);
+}
+
+static inline int lp8727_i2c_write_byte(struct lp8727_chg *pchg, u8 reg,
+ u8 *data)
+{
+ return lp8727_i2c_write(pchg, reg, data, 1);
+}
+
+static int lp8727_is_charger_attached(const char *name, int id)
+{
+ if (name) {
+ if (!strcmp(name, "ac"))
+ return (id == ID_TA || id == ID_DEDICATED_CHG) ? 1 : 0;
+ else if (!strcmp(name, "usb"))
+ return (id == ID_USB_CHG) ? 1 : 0;
+ }
+
+ return (id >= ID_TA && id <= ID_USB_CHG) ? 1 : 0;
+}
+
+static void lp8727_init_device(struct lp8727_chg *pchg)
+{
+ u8 val;
+
+ val = ID200_EN | ADC_EN | CP_EN;
+ if (lp8727_i2c_write_byte(pchg, CTRL1, &val))
+ dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL1);
+
+ val = INT_EN | CHGDET_EN;
+ if (lp8727_i2c_write_byte(pchg, CTRL2, &val))
+ dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL2);
+}
+
+static int lp8727_is_dedicated_charger(struct lp8727_chg *pchg)
+{
+ u8 val;
+ lp8727_i2c_read_byte(pchg, STATUS1, &val);
+ return (val & DCPORT);
+}
+
+static int lp8727_is_usb_charger(struct lp8727_chg *pchg)
+{
+ u8 val;
+ lp8727_i2c_read_byte(pchg, STATUS1, &val);
+ return (val & CHPORT);
+}
+
+static void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw)
+{
+ u8 val = sw;
+ lp8727_i2c_write_byte(pchg, SWCTRL, &val);
+}
+
+static void lp8727_id_detection(struct lp8727_chg *pchg, u8 id, int vbusin)
+{
+ u8 devid = ID_NONE;
+ u8 swctrl = SW_DM1_HiZ | SW_DP2_HiZ;
+
+ switch (id) {
+ case 0x5:
+ devid = ID_TA;
+ pchg->chg_parm = &pchg->pdata->ac;
+ break;
+ case 0xB:
+ if (lp8727_is_dedicated_charger(pchg)) {
+ pchg->chg_parm = &pchg->pdata->ac;
+ devid = ID_DEDICATED_CHG;
+ } else if (lp8727_is_usb_charger(pchg)) {
+ pchg->chg_parm = &pchg->pdata->usb;
+ devid = ID_USB_CHG;
+ swctrl = SW_DM1_DM | SW_DP2_DP;
+ } else if (vbusin) {
+ devid = ID_USB_DS;
+ swctrl = SW_DM1_DM | SW_DP2_DP;
+ }
+ break;
+ default:
+ devid = ID_NONE;
+ pchg->chg_parm = NULL;
+ break;
+ }
+
+ pchg->devid = devid;
+ lp8727_ctrl_switch(pchg, swctrl);
+}
+
+static void lp8727_enable_chgdet(struct lp8727_chg *pchg)
+{
+ u8 val;
+
+ lp8727_i2c_read_byte(pchg, CTRL2, &val);
+ val |= CHGDET_EN;
+ lp8727_i2c_write_byte(pchg, CTRL2, &val);
+}
+
+static void lp8727_delayed_func(struct work_struct *_work)
+{
+ u8 intstat[2], idno, vbus;
+ struct lp8727_chg *pchg =
+ container_of(_work, struct lp8727_chg, work.work);
+
+ if (lp8727_i2c_read(pchg, INT1, intstat, 2)) {
+ dev_err(pchg->dev, "can not read INT registers\n");
+ return;
+ }
+
+ idno = intstat[0] & IDNO;
+ vbus = intstat[0] & VBUS;
+
+ lp8727_id_detection(pchg, idno, vbus);
+ lp8727_enable_chgdet(pchg);
+
+ power_supply_changed(&pchg->psy->ac);
+ power_supply_changed(&pchg->psy->usb);
+ power_supply_changed(&pchg->psy->batt);
+}
+
+static irqreturn_t lp8727_isr_func(int irq, void *ptr)
+{
+ struct lp8727_chg *pchg = ptr;
+ unsigned long delay = msecs_to_jiffies(DEBOUNCE_MSEC);
+
+ queue_delayed_work(pchg->irqthread, &pchg->work, delay);
+
+ return IRQ_HANDLED;
+}
+
+static void lp8727_intr_config(struct lp8727_chg *pchg)
+{
+ INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func);
+
+ pchg->irqthread = create_singlethread_workqueue("lp8727-irqthd");
+ if (!pchg->irqthread)
+ dev_err(pchg->dev, "can not create thread for lp8727\n");
+
+ if (request_threaded_irq(pchg->client->irq,
+ NULL,
+ lp8727_isr_func,
+ IRQF_TRIGGER_FALLING, "lp8727_irq", pchg)) {
+ dev_err(pchg->dev, "lp8727 irq can not be registered\n");
+ }
+}
+
+static enum power_supply_property lp8727_charger_prop[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property lp8727_battery_prop[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static char *battery_supplied_to[] = {
+ "main_batt",
+};
+
+static int lp8727_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct lp8727_chg *pchg = dev_get_drvdata(psy->dev->parent);
+
+ if (psp == POWER_SUPPLY_PROP_ONLINE)
+ val->intval = lp8727_is_charger_attached(psy->name,
+ pchg->devid);
+
+ return 0;
+}
+
+static int lp8727_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct lp8727_chg *pchg = dev_get_drvdata(psy->dev->parent);
+ u8 read;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
+ lp8727_i2c_read_byte(pchg, STATUS1, &read);
+ if (((read & CHGSTAT) >> 4) == EOC)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ lp8727_i2c_read_byte(pchg, STATUS2, &read);
+ read = (read & TEMP_STAT) >> 5;
+ if (read >= 0x1 && read <= 0x3)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (pchg->pdata->get_batt_present)
+ val->intval = pchg->pdata->get_batt_present();
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (pchg->pdata->get_batt_level)
+ val->intval = pchg->pdata->get_batt_level();
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (pchg->pdata->get_batt_capacity)
+ val->intval = pchg->pdata->get_batt_capacity();
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ if (pchg->pdata->get_batt_temp)
+ val->intval = pchg->pdata->get_batt_temp();
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void lp8727_charger_changed(struct power_supply *psy)
+{
+ struct lp8727_chg *pchg = dev_get_drvdata(psy->dev->parent);
+ u8 val;
+ u8 eoc_level, ichg;
+
+ if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
+ if (pchg->chg_parm) {
+ eoc_level = pchg->chg_parm->eoc_level;
+ ichg = pchg->chg_parm->ichg;
+ val = (ichg << 4) | eoc_level;
+ lp8727_i2c_write_byte(pchg, CHGCTRL2, &val);
+ }
+ }
+}
+
+static int lp8727_register_psy(struct lp8727_chg *pchg)
+{
+ struct lp8727_psy *psy;
+
+ psy = kzalloc(sizeof(*psy), GFP_KERNEL);
+ if (!psy)
+ goto err_mem;
+
+ pchg->psy = psy;
+
+ psy->ac.name = "ac";
+ psy->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ psy->ac.properties = lp8727_charger_prop;
+ psy->ac.num_properties = ARRAY_SIZE(lp8727_charger_prop);
+ psy->ac.get_property = lp8727_charger_get_property;
+ psy->ac.supplied_to = battery_supplied_to;
+ psy->ac.num_supplicants = ARRAY_SIZE(battery_supplied_to);
+
+ if (power_supply_register(pchg->dev, &psy->ac))
+ goto err_psy;
+
+ psy->usb.name = "usb";
+ psy->usb.type = POWER_SUPPLY_TYPE_USB;
+ psy->usb.properties = lp8727_charger_prop;
+ psy->usb.num_properties = ARRAY_SIZE(lp8727_charger_prop);
+ psy->usb.get_property = lp8727_charger_get_property;
+ psy->usb.supplied_to = battery_supplied_to;
+ psy->usb.num_supplicants = ARRAY_SIZE(battery_supplied_to);
+
+ if (power_supply_register(pchg->dev, &psy->usb))
+ goto err_psy;
+
+ psy->batt.name = "main_batt";
+ psy->batt.type = POWER_SUPPLY_TYPE_BATTERY;
+ psy->batt.properties = lp8727_battery_prop;
+ psy->batt.num_properties = ARRAY_SIZE(lp8727_battery_prop);
+ psy->batt.get_property = lp8727_battery_get_property;
+ psy->batt.external_power_changed = lp8727_charger_changed;
+
+ if (power_supply_register(pchg->dev, &psy->batt))
+ goto err_psy;
+
+ return 0;
+
+err_mem:
+ return -ENOMEM;
+err_psy:
+ kfree(psy);
+ return -EPERM;
+}
+
+static void lp8727_unregister_psy(struct lp8727_chg *pchg)
+{
+ struct lp8727_psy *psy = pchg->psy;
+
+ if (!psy)
+ return;
+
+ power_supply_unregister(&psy->ac);
+ power_supply_unregister(&psy->usb);
+ power_supply_unregister(&psy->batt);
+ kfree(psy);
+}
+
+static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+{
+ struct lp8727_chg *pchg;
+ int ret;
+
+ if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ pchg = kzalloc(sizeof(*pchg), GFP_KERNEL);
+ if (!pchg)
+ return -ENOMEM;
+
+ pchg->client = cl;
+ pchg->dev = &cl->dev;
+ pchg->pdata = cl->dev.platform_data;
+ i2c_set_clientdata(cl, pchg);
+
+ mutex_init(&pchg->xfer_lock);
+
+ lp8727_init_device(pchg);
+ lp8727_intr_config(pchg);
+
+ ret = lp8727_register_psy(pchg);
+ if (ret)
+ dev_err(pchg->dev,
+ "can not register power supplies. err=%d", ret);
+
+ return 0;
+}
+
+static int __devexit lp8727_remove(struct i2c_client *cl)
+{
+ struct lp8727_chg *pchg = i2c_get_clientdata(cl);
+
+ lp8727_unregister_psy(pchg);
+ free_irq(pchg->client->irq, pchg);
+ flush_workqueue(pchg->irqthread);
+ destroy_workqueue(pchg->irqthread);
+ kfree(pchg);
+ return 0;
+}
+
+static const struct i2c_device_id lp8727_ids[] = {
+ {"lp8727", 0},
+};
+
+static struct i2c_driver lp8727_driver = {
+ .driver = {
+ .name = "lp8727",
+ },
+ .probe = lp8727_probe,
+ .remove = __devexit_p(lp8727_remove),
+ .id_table = lp8727_ids,
+};
+
+static int __init lp8727_init(void)
+{
+ return i2c_add_driver(&lp8727_driver);
+}
+
+static void __exit lp8727_exit(void)
+{
+ i2c_del_driver(&lp8727_driver);
+}
+
+module_init(lp8727_init);
+module_exit(lp8727_exit);
+
+MODULE_DESCRIPTION("National Semiconductor LP8727 charger driver");
+MODULE_AUTHOR
+ ("Woogyom Kim <milo.kim@ti.com>, Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 9f0183c73076..86acee2f9889 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -85,55 +85,79 @@ static int max17042_get_property(struct power_supply *psy,
{
struct max17042_chip *chip = container_of(psy,
struct max17042_chip, battery);
+ int ret;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_STATUS);
- if (val->intval & MAX17042_STATUS_BattAbsent)
+ ret = max17042_read_reg(chip->client, MAX17042_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MAX17042_STATUS_BattAbsent)
val->intval = 0;
else
val->intval = 1;
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_Cycles);
+ ret = max17042_read_reg(chip->client, MAX17042_Cycles);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_MinMaxVolt);
- val->intval >>= 8;
+ ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret >> 8;
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_V_empty);
- val->intval >>= 7;
+ ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret >> 7;
val->intval *= 10000; /* Units of LSB = 10mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
+ ret = max17042_read_reg(chip->client, MAX17042_VCELL);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_AvgVCELL) * 83;
+ ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_SOC) / 256;
+ ret = max17042_read_reg(chip->client, MAX17042_SOC);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret >> 8;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_RepSOC);
- if ((val->intval / 256) >= MAX17042_BATTERY_FULL)
+ ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+ if (ret < 0)
+ return ret;
+
+ if ((ret >> 8) >= MAX17042_BATTERY_FULL)
val->intval = 1;
- else if (val->intval >= 0)
+ else if (ret >= 0)
val->intval = 0;
break;
case POWER_SUPPLY_PROP_TEMP:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_TEMP);
+ ret = max17042_read_reg(chip->client, MAX17042_TEMP);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
/* The value is signed. */
if (val->intval & 0x8000) {
val->intval = (0x7fff & ~val->intval) + 1;
@@ -145,24 +169,30 @@ static int max17042_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (chip->pdata->enable_current_sense) {
- val->intval = max17042_read_reg(chip->client,
- MAX17042_Current);
+ ret = max17042_read_reg(chip->client, MAX17042_Current);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
val->intval++;
val->intval *= -1;
}
- val->intval >>= 4;
- val->intval *= 1000000 * 25 / chip->pdata->r_sns;
+ val->intval *= 1562500 / chip->pdata->r_sns;
} else {
return -EINVAL;
}
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
if (chip->pdata->enable_current_sense) {
- val->intval = max17042_read_reg(chip->client,
- MAX17042_AvgCurrent);
+ ret = max17042_read_reg(chip->client,
+ MAX17042_AvgCurrent);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
@@ -210,6 +240,9 @@ static int __devinit max17042_probe(struct i2c_client *client,
if (!chip->pdata->enable_current_sense)
chip->battery.num_properties -= 2;
+ if (chip->pdata->r_sns == 0)
+ chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
+
ret = power_supply_register(&client->dev, &chip->battery);
if (ret) {
dev_err(&client->dev, "failed: power supply register\n");
@@ -226,9 +259,6 @@ static int __devinit max17042_probe(struct i2c_client *client,
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
- } else {
- if (chip->pdata->r_sns == 0)
- chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
}
return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 2595145f3bff..3e23f43e98af 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -374,19 +374,9 @@ static struct platform_driver max8903_driver = {
},
};
-static int __init max8903_init(void)
-{
- return platform_driver_register(&max8903_driver);
-}
-module_init(max8903_init);
-
-static void __exit max8903_exit(void)
-{
- platform_driver_unregister(&max8903_driver);
-}
-module_exit(max8903_exit);
+module_platform_driver(max8903_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MAX8903 Charger Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_ALIAS("max8903-charger");
+MODULE_ALIAS("platform:max8903-charger");
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index a70e16d3a3dc..daa333bd7ebb 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -78,6 +78,8 @@ struct max8925_power_info {
unsigned batt_detect:1; /* detecing MB by ID pin */
unsigned topoff_threshold:2;
unsigned fast_charge:3;
+ unsigned no_temp_support:1;
+ unsigned no_insert_detect:1;
int (*set_charger) (int);
};
@@ -116,17 +118,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
case MAX8925_IRQ_VCHG_DC_F:
info->ac_online = 0;
__set_charger(info, 0);
- dev_dbg(chip->dev, "Adapter is removal\n");
- break;
- case MAX8925_IRQ_VCHG_USB_R:
- info->usb_online = 1;
- __set_charger(info, 1);
- dev_dbg(chip->dev, "USB inserted\n");
- break;
- case MAX8925_IRQ_VCHG_USB_F:
- info->usb_online = 0;
- __set_charger(info, 0);
- dev_dbg(chip->dev, "USB is removal\n");
+ dev_dbg(chip->dev, "Adapter removed\n");
break;
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
@@ -168,27 +160,33 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
static int start_measure(struct max8925_power_info *info, int type)
{
unsigned char buf[2] = {0, 0};
+ int meas_cmd;
int meas_reg = 0, ret;
switch (type) {
case MEASURE_VCHG:
+ meas_cmd = MAX8925_CMD_VCHG;
meas_reg = MAX8925_ADC_VCHG;
break;
case MEASURE_VBBATT:
+ meas_cmd = MAX8925_CMD_VBBATT;
meas_reg = MAX8925_ADC_VBBATT;
break;
case MEASURE_VMBATT:
+ meas_cmd = MAX8925_CMD_VMBATT;
meas_reg = MAX8925_ADC_VMBATT;
break;
case MEASURE_ISNS:
+ meas_cmd = MAX8925_CMD_ISNS;
meas_reg = MAX8925_ADC_ISNS;
break;
default:
return -EINVAL;
}
+ max8925_reg_write(info->adc, meas_cmd, 0);
max8925_bulk_read(info->adc, meas_reg, 2, buf);
- ret = (buf[0] << 4) | (buf[1] >> 4);
+ ret = ((buf[0]<<8) | buf[1]) >> 4;
return ret;
}
@@ -208,7 +206,7 @@ static int max8925_ac_get_prop(struct power_supply *psy,
if (info->ac_online) {
ret = start_measure(info, MEASURE_VCHG);
if (ret >= 0) {
- val->intval = ret << 1; /* unit is mV */
+ val->intval = ret * 2000; /* unit is uV */
goto out;
}
}
@@ -242,7 +240,7 @@ static int max8925_usb_get_prop(struct power_supply *psy,
if (info->usb_online) {
ret = start_measure(info, MEASURE_VCHG);
if (ret >= 0) {
- val->intval = ret << 1; /* unit is mV */
+ val->intval = ret * 2000; /* unit is uV */
goto out;
}
}
@@ -266,7 +264,6 @@ static int max8925_bat_get_prop(struct power_supply *psy,
union power_supply_propval *val)
{
struct max8925_power_info *info = dev_get_drvdata(psy->dev->parent);
- long long int tmp = 0;
int ret = 0;
switch (psp) {
@@ -277,7 +274,7 @@ static int max8925_bat_get_prop(struct power_supply *psy,
if (info->bat_online) {
ret = start_measure(info, MEASURE_VMBATT);
if (ret >= 0) {
- val->intval = ret << 1; /* unit is mV */
+ val->intval = ret * 2000; /* unit is uV */
ret = 0;
break;
}
@@ -288,8 +285,8 @@ static int max8925_bat_get_prop(struct power_supply *psy,
if (info->bat_online) {
ret = start_measure(info, MEASURE_ISNS);
if (ret >= 0) {
- tmp = (long long int)ret * 6250 / 4096 - 3125;
- ret = (int)tmp;
+ /* assume r_sns is 0.02 */
+ ret = ((ret * 6250) - 3125) /* uA */;
val->intval = 0;
if (ret > 0)
val->intval = ret; /* unit is mA */
@@ -365,13 +362,14 @@ static __devinit int max8925_init_charger(struct max8925_chip *chip,
int ret;
REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_OVP, "ac-ovp");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_USB_OVP, "usb-ovp");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_USB_F, "usb-remove");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_USB_R, "usb-insert");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range");
+ if (!info->no_insert_detect) {
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove");
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert");
+ }
+ if (!info->no_temp_support) {
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range");
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range");
+ }
REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_F, "vsys-high");
REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_R, "vsys-low");
REQUEST_IRQ(MAX8925_IRQ_VCHG_RST, "charger-reset");
@@ -379,9 +377,15 @@ static __devinit int max8925_init_charger(struct max8925_chip *chip,
REQUEST_IRQ(MAX8925_IRQ_VCHG_TOPOFF, "charger-topoff");
REQUEST_IRQ(MAX8925_IRQ_VCHG_TMR_FAULT, "charger-timer-expire");
- info->ac_online = 0;
info->usb_online = 0;
info->bat_online = 0;
+
+ /* check for power - can miss interrupt at boot time */
+ if (start_measure(info, MEASURE_VCHG) * 2000 > 500000)
+ info->ac_online = 1;
+ else
+ info->ac_online = 0;
+
ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS);
if (ret >= 0) {
/*
@@ -449,6 +453,8 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
info->ac.properties = max8925_ac_props;
info->ac.num_properties = ARRAY_SIZE(max8925_ac_props);
info->ac.get_property = max8925_ac_get_prop;
+ info->ac.supplied_to = pdata->supplied_to;
+ info->ac.num_supplicants = pdata->num_supplicants;
ret = power_supply_register(&pdev->dev, &info->ac);
if (ret)
goto out;
@@ -459,6 +465,9 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
info->usb.properties = max8925_usb_props;
info->usb.num_properties = ARRAY_SIZE(max8925_usb_props);
info->usb.get_property = max8925_usb_get_prop;
+ info->usb.supplied_to = pdata->supplied_to;
+ info->usb.num_supplicants = pdata->num_supplicants;
+
ret = power_supply_register(&pdev->dev, &info->usb);
if (ret)
goto out_usb;
@@ -478,6 +487,8 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
info->topoff_threshold = pdata->topoff_threshold;
info->fast_charge = pdata->fast_charge;
info->set_charger = pdata->set_charger;
+ info->no_temp_support = pdata->no_temp_support;
+ info->no_insert_detect = pdata->no_insert_detect;
max8925_init_charger(chip, info);
return 0;
@@ -512,17 +523,7 @@ static struct platform_driver max8925_power_driver = {
},
};
-static int __init max8925_power_init(void)
-{
- return platform_driver_register(&max8925_power_driver);
-}
-module_init(max8925_power_init);
-
-static void __exit max8925_power_exit(void)
-{
- platform_driver_unregister(&max8925_power_driver);
-}
-module_exit(max8925_power_exit);
+module_platform_driver(max8925_power_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Power supply driver for MAX8925");
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index a23317d75c5a..6e88c5d026b9 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -19,7 +19,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -98,7 +97,7 @@ static __devinit int max8997_battery_probe(struct platform_device *pdev)
return -EINVAL;
if (pdata->eoc_mA) {
- u8 val = (pdata->eoc_mA - 50) / 10;
+ int val = (pdata->eoc_mA - 50) / 10;
if (val < 0)
val = 0;
if (val > 0xf)
@@ -179,6 +178,7 @@ static int __devexit max8997_battery_remove(struct platform_device *pdev)
static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
+ { }
};
static struct platform_driver max8997_battery_driver = {
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index 93e3bb47a3a8..9b3f2bf56e70 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -154,6 +154,7 @@ static __devinit int max8998_battery_probe(struct platform_device *pdev)
case 0:
dev_dbg(max8998->dev,
"Full Timeout not set: leave it unchanged.\n");
+ break;
default:
dev_err(max8998->dev, "Invalid Full Timeout value\n");
ret = -EINVAL;
@@ -190,6 +191,7 @@ static int __devexit max8998_battery_remove(struct platform_device *pdev)
static const struct platform_device_id max8998_battery_id[] = {
{ "max8998-battery", TYPE_MAX8998 },
+ { }
};
static struct platform_driver max8998_battery_driver = {
@@ -202,17 +204,7 @@ static struct platform_driver max8998_battery_driver = {
.id_table = max8998_battery_id,
};
-static int __init max8998_battery_init(void)
-{
- return platform_driver_register(&max8998_battery_driver);
-}
-module_init(max8998_battery_init);
-
-static void __exit max8998_battery_cleanup(void)
-{
- platform_driver_unregister(&max8998_battery_driver);
-}
-module_exit(max8998_battery_cleanup);
+module_platform_driver(max8998_battery_driver);
MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 0b0ff3a936a6..7385092f9bc8 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -519,29 +519,35 @@ static struct device_attribute olpc_bat_error = {
* Initialisation
*********************************************************************/
-static struct platform_device *bat_pdev;
-
static struct power_supply olpc_bat = {
+ .name = "olpc-battery",
.get_property = olpc_bat_get_property,
.use_for_apm = 1,
};
-void olpc_battery_trigger_uevent(unsigned long cause)
+static int olpc_battery_suspend(struct platform_device *pdev,
+ pm_message_t state)
{
- if (cause & EC_SCI_SRC_ACPWR)
- kobject_uevent(&olpc_ac.dev->kobj, KOBJ_CHANGE);
- if (cause & (EC_SCI_SRC_BATERR|EC_SCI_SRC_BATSOC|EC_SCI_SRC_BATTERY))
- kobject_uevent(&olpc_bat.dev->kobj, KOBJ_CHANGE);
+ if (device_may_wakeup(olpc_ac.dev))
+ olpc_ec_wakeup_set(EC_SCI_SRC_ACPWR);
+ else
+ olpc_ec_wakeup_clear(EC_SCI_SRC_ACPWR);
+
+ if (device_may_wakeup(olpc_bat.dev))
+ olpc_ec_wakeup_set(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
+ | EC_SCI_SRC_BATERR);
+ else
+ olpc_ec_wakeup_clear(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
+ | EC_SCI_SRC_BATERR);
+
+ return 0;
}
-static int __init olpc_bat_init(void)
+static int __devinit olpc_battery_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int ret;
uint8_t status;
- if (!olpc_platform_info.ecver)
- return -ENXIO;
-
/*
* We've seen a number of EC protocol changes; this driver requires
* the latest EC protocol, supported by 0x44 and above.
@@ -558,15 +564,10 @@ static int __init olpc_bat_init(void)
/* Ignore the status. It doesn't actually matter */
- bat_pdev = platform_device_register_simple("olpc-battery", 0, NULL, 0);
- if (IS_ERR(bat_pdev))
- return PTR_ERR(bat_pdev);
-
- ret = power_supply_register(&bat_pdev->dev, &olpc_ac);
+ ret = power_supply_register(&pdev->dev, &olpc_ac);
if (ret)
- goto ac_failed;
+ return ret;
- olpc_bat.name = bat_pdev->name;
if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
olpc_bat.properties = olpc_xo15_bat_props;
olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
@@ -575,7 +576,7 @@ static int __init olpc_bat_init(void)
olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
}
- ret = power_supply_register(&bat_pdev->dev, &olpc_bat);
+ ret = power_supply_register(&pdev->dev, &olpc_bat);
if (ret)
goto battery_failed;
@@ -587,7 +588,12 @@ static int __init olpc_bat_init(void)
if (ret)
goto error_failed;
- goto success;
+ if (olpc_ec_wakeup_available()) {
+ device_set_wakeup_capable(olpc_ac.dev, true);
+ device_set_wakeup_capable(olpc_bat.dev, true);
+ }
+
+ return 0;
error_failed:
device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
@@ -595,23 +601,36 @@ eeprom_failed:
power_supply_unregister(&olpc_bat);
battery_failed:
power_supply_unregister(&olpc_ac);
-ac_failed:
- platform_device_unregister(bat_pdev);
-success:
return ret;
}
-static void __exit olpc_bat_exit(void)
+static int __devexit olpc_battery_remove(struct platform_device *pdev)
{
device_remove_file(olpc_bat.dev, &olpc_bat_error);
device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
power_supply_unregister(&olpc_bat);
power_supply_unregister(&olpc_ac);
- platform_device_unregister(bat_pdev);
+ return 0;
}
-module_init(olpc_bat_init);
-module_exit(olpc_bat_exit);
+static const struct of_device_id olpc_battery_ids[] __devinitconst = {
+ { .compatible = "olpc,xo1-battery" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, olpc_battery_ids);
+
+static struct platform_driver olpc_battery_driver = {
+ .driver = {
+ .name = "olpc-battery",
+ .owner = THIS_MODULE,
+ .of_match_table = olpc_battery_ids,
+ },
+ .probe = olpc_battery_probe,
+ .remove = __devexit_p(olpc_battery_remove),
+ .suspend = olpc_battery_suspend,
+};
+
+module_platform_driver(olpc_battery_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index 4fa52e1781a2..3d1e9efb6f53 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -474,17 +474,7 @@ static struct platform_driver pcf50633_mbc_driver = {
.remove = __devexit_p(pcf50633_mbc_remove),
};
-static int __init pcf50633_mbc_init(void)
-{
- return platform_driver_register(&pcf50633_mbc_driver);
-}
-module_init(pcf50633_mbc_init);
-
-static void __exit pcf50633_mbc_exit(void)
-{
- platform_driver_unregister(&pcf50633_mbc_driver);
-}
-module_exit(pcf50633_mbc_exit);
+module_platform_driver(pcf50633_mbc_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 mbc driver");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 69f8aa3a6a4b..fd49689738af 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/power_supply.h>
#include <linux/pda_power.h>
#include <linux/regulator/consumer.h>
@@ -40,7 +41,9 @@ static int polling;
#ifdef CONFIG_USB_OTG_UTILS
static struct otg_transceiver *transceiver;
+static struct notifier_block otg_nb;
#endif
+
static struct regulator *ac_draw;
enum {
@@ -222,7 +225,42 @@ static void polling_timer_func(unsigned long unused)
#ifdef CONFIG_USB_OTG_UTILS
static int otg_is_usb_online(void)
{
- return (transceiver->state == OTG_STATE_B_PERIPHERAL);
+ return (transceiver->last_event == USB_EVENT_VBUS ||
+ transceiver->last_event == USB_EVENT_ENUMERATED);
+}
+
+static int otg_is_ac_online(void)
+{
+ return (transceiver->last_event == USB_EVENT_CHARGER);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ switch (event) {
+ case USB_EVENT_CHARGER:
+ ac_status = PDA_PSY_TO_CHANGE;
+ break;
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ usb_status = PDA_PSY_TO_CHANGE;
+ break;
+ case USB_EVENT_NONE:
+ ac_status = PDA_PSY_TO_CHANGE;
+ usb_status = PDA_PSY_TO_CHANGE;
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ /*
+ * Wait a bit before reading ac/usb line status and setting charger,
+ * because ac/usb status readings may lag from irq.
+ */
+ mod_timer(&charger_timer,
+ jiffies + msecs_to_jiffies(pdata->wait_for_status));
+
+ return NOTIFY_OK;
}
#endif
@@ -282,6 +320,16 @@ static int pda_power_probe(struct platform_device *pdev)
ret = PTR_ERR(ac_draw);
}
+#ifdef CONFIG_USB_OTG_UTILS
+ transceiver = otg_get_transceiver();
+ if (transceiver && !pdata->is_usb_online) {
+ pdata->is_usb_online = otg_is_usb_online;
+ }
+ if (transceiver && !pdata->is_ac_online) {
+ pdata->is_ac_online = otg_is_ac_online;
+ }
+#endif
+
if (pdata->is_ac_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_ac);
if (ret) {
@@ -303,13 +351,6 @@ static int pda_power_probe(struct platform_device *pdev)
}
}
-#ifdef CONFIG_USB_OTG_UTILS
- transceiver = otg_get_transceiver();
- if (transceiver && !pdata->is_usb_online) {
- pdata->is_usb_online = otg_is_usb_online;
- }
-#endif
-
if (pdata->is_usb_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_usb);
if (ret) {
@@ -331,6 +372,18 @@ static int pda_power_probe(struct platform_device *pdev)
}
}
+#ifdef CONFIG_USB_OTG_UTILS
+ if (transceiver && pdata->use_otg_notifier) {
+ otg_nb.notifier_call = otg_handle_notification;
+ ret = otg_register_notifier(transceiver, &otg_nb);
+ if (ret) {
+ dev_err(dev, "failure to register otg notifier\n");
+ goto otg_reg_notifier_failed;
+ }
+ polling = 0;
+ }
+#endif
+
if (polling) {
dev_dbg(dev, "will poll for status\n");
setup_timer(&polling_timer, polling_timer_func, 0);
@@ -343,6 +396,11 @@ static int pda_power_probe(struct platform_device *pdev)
return 0;
+#ifdef CONFIG_USB_OTG_UTILS
+otg_reg_notifier_failed:
+ if (pdata->is_usb_online && usb_irq)
+ free_irq(usb_irq->start, &pda_psy_usb);
+#endif
usb_irq_failed:
if (pdata->is_usb_online)
power_supply_unregister(&pda_psy_usb);
@@ -440,8 +498,6 @@ static int pda_power_resume(struct platform_device *pdev)
#define pda_power_resume NULL
#endif /* CONFIG_PM */
-MODULE_ALIAS("platform:pda-power");
-
static struct platform_driver pda_power_pdrv = {
.driver = {
.name = "pda-power",
@@ -452,17 +508,8 @@ static struct platform_driver pda_power_pdrv = {
.resume = pda_power_resume,
};
-static int __init pda_power_init(void)
-{
- return platform_driver_register(&pda_power_pdrv);
-}
+module_platform_driver(pda_power_pdrv);
-static void __exit pda_power_exit(void)
-{
- platform_driver_unregister(&pda_power_pdrv);
-}
-
-module_init(pda_power_init);
-module_exit(pda_power_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
+MODULE_ALIAS("platform:pda-power");
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 329b46b2327d..6ad612726785 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -98,7 +98,9 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
{
union power_supply_propval ret = {0,};
struct power_supply *psy = dev_get_drvdata(dev);
+ unsigned int *count = data;
+ (*count)++;
if (psy->type != POWER_SUPPLY_TYPE_BATTERY) {
if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret))
return 0;
@@ -111,10 +113,18 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
int power_supply_is_system_supplied(void)
{
int error;
+ unsigned int count = 0;
- error = class_for_each_device(power_supply_class, NULL, NULL,
+ error = class_for_each_device(power_supply_class, NULL, &count,
__power_supply_is_system_supplied);
+ /*
+ * If no power class device was found at all, most probably we are
+ * running on a desktop system, so assume we are on mains power.
+ */
+ if (count == 0)
+ return 1;
+
return error;
}
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
@@ -147,6 +157,12 @@ struct power_supply *power_supply_get_by_name(char *name)
}
EXPORT_SYMBOL_GPL(power_supply_get_by_name);
+int power_supply_powers(struct power_supply *psy, struct device *dev)
+{
+ return sysfs_create_link(&psy->dev->kobj, &dev->kobj, "powers");
+}
+EXPORT_SYMBOL_GPL(power_supply_powers);
+
static void power_supply_dev_release(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
@@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(power_supply_register);
void power_supply_unregister(struct power_supply *psy)
{
cancel_work_sync(&psy->changed_work);
+ sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
device_unregister(psy->dev);
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index e15d4c9d3988..b52b57ca3084 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -43,7 +43,7 @@ static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
static char *type_text[] = {
- "Battery", "UPS", "Mains", "USB",
+ "Unknown", "Battery", "UPS", "Mains", "USB",
"USB_DCP", "USB_CDP", "USB_ACA"
};
static char *status_text[] = {
@@ -63,6 +63,9 @@ static ssize_t power_supply_show_property(struct device *dev,
static char *capacity_level_text[] = {
"Unknown", "Critical", "Low", "Normal", "High", "Full"
};
+ static char *scope_text[] = {
+ "Unknown", "System", "Device"
+ };
ssize_t ret = 0;
struct power_supply *psy = dev_get_drvdata(dev);
const ptrdiff_t off = attr - power_supply_attrs;
@@ -78,8 +81,8 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV)
- dev_err(dev, "driver failed to report `%s' property\n",
- attr->attr.name);
+ dev_err(dev, "driver failed to report `%s' property: %zd\n",
+ attr->attr.name, ret);
return ret;
}
@@ -95,6 +98,8 @@ static ssize_t power_supply_show_property(struct device *dev,
return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_TYPE)
return sprintf(buf, "%s\n", type_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_SCOPE)
+ return sprintf(buf, "%s\n", scope_text[value.intval]);
else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
return sprintf(buf, "%s\n", value.strval);
@@ -167,6 +172,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(time_to_full_now),
POWER_SUPPLY_ATTR(time_to_full_avg),
POWER_SUPPLY_ATTR(type),
+ POWER_SUPPLY_ATTR(scope),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
@@ -176,13 +182,13 @@ static struct device_attribute power_supply_attrs[] = {
static struct attribute *
__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
-static mode_t power_supply_attr_is_visible(struct kobject *kobj,
+static umode_t power_supply_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int attrno)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct power_supply *psy = dev_get_drvdata(dev);
- mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+ umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
int i;
if (attrno == POWER_SUPPLY_PROP_TYPE)
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d32d0d70f9ba..8b804a566756 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -47,6 +47,22 @@ static void s3c_adc_bat_ext_power_changed(struct power_supply *psy)
msecs_to_jiffies(JITTER_DELAY));
}
+static int gather_samples(struct s3c_adc_client *client, int num, int channel)
+{
+ int value, i;
+
+ /* default to 1 if nothing is set */
+ if (num < 1)
+ num = 1;
+
+ value = 0;
+ for (i = 0; i < num; i++)
+ value += s3c_adc_read(client, channel);
+ value /= num;
+
+ return value;
+}
+
static enum power_supply_property s3c_adc_backup_bat_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
@@ -67,7 +83,8 @@ static int s3c_adc_backup_bat_get_property(struct power_supply *psy,
if (bat->volt_value < 0 ||
jiffies_to_msecs(jiffies - bat->timestamp) >
BAT_POLL_INTERVAL) {
- bat->volt_value = s3c_adc_read(bat->client,
+ bat->volt_value = gather_samples(bat->client,
+ bat->pdata->backup_volt_samples,
bat->pdata->backup_volt_channel);
bat->volt_value *= bat->pdata->backup_volt_mult;
bat->timestamp = jiffies;
@@ -139,9 +156,11 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
if (bat->volt_value < 0 || bat->cur_value < 0 ||
jiffies_to_msecs(jiffies - bat->timestamp) >
BAT_POLL_INTERVAL) {
- bat->volt_value = s3c_adc_read(bat->client,
+ bat->volt_value = gather_samples(bat->client,
+ bat->pdata->volt_samples,
bat->pdata->volt_channel) * bat->pdata->volt_mult;
- bat->cur_value = s3c_adc_read(bat->client,
+ bat->cur_value = gather_samples(bat->client,
+ bat->pdata->current_samples,
bat->pdata->current_channel) * bat->pdata->current_mult;
bat->timestamp = jiffies;
}
@@ -421,17 +440,7 @@ static struct platform_driver s3c_adc_bat_driver = {
.resume = s3c_adc_bat_resume,
};
-static int __init s3c_adc_bat_init(void)
-{
- return platform_driver_register(&s3c_adc_bat_driver);
-}
-module_init(s3c_adc_bat_init);
-
-static void __exit s3c_adc_bat_exit(void)
-{
- platform_driver_unregister(&s3c_adc_bat_driver);
-}
-module_exit(s3c_adc_bat_exit);
+module_platform_driver(s3c_adc_bat_driver);
MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
diff --git a/drivers/power/bq20z75.c b/drivers/power/sbs-battery.c
index 9c5e5beda3a8..9ff8af069da6 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/sbs-battery.c
@@ -1,5 +1,5 @@
/*
- * Gas Gauge driver for TI's BQ20Z75
+ * Gas Gauge driver for SBS Compliant Batteries
*
* Copyright (c) 2010, NVIDIA Corporation.
*
@@ -28,7 +28,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
-#include <linux/power/bq20z75.h>
+#include <linux/power/sbs-battery.h>
enum {
REG_MANUFACTURER_DATA,
@@ -53,7 +53,7 @@ enum {
/* Battery Mode defines */
#define BATTERY_MODE_OFFSET 0x03
#define BATTERY_MODE_MASK 0x8000
-enum bq20z75_battery_mode {
+enum sbs_battery_mode {
BATTERY_MODE_AMPS,
BATTERY_MODE_WATTS
};
@@ -67,62 +67,56 @@ enum bq20z75_battery_mode {
#define BATTERY_FULL_CHARGED 0x20
#define BATTERY_FULL_DISCHARGED 0x10
-#define BQ20Z75_DATA(_psp, _addr, _min_value, _max_value) { \
+#define SBS_DATA(_psp, _addr, _min_value, _max_value) { \
.psp = _psp, \
.addr = _addr, \
.min_value = _min_value, \
.max_value = _max_value, \
}
-static const struct bq20z75_device_data {
+static const struct chip_data {
enum power_supply_property psp;
u8 addr;
int min_value;
int max_value;
-} bq20z75_data[] = {
+} sbs_data[] = {
[REG_MANUFACTURER_DATA] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
[REG_TEMPERATURE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
[REG_VOLTAGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+ SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
[REG_CURRENT] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768,
- 32767),
+ SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
[REG_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
[REG_REMAINING_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
[REG_REMAINING_CAPACITY_CHARGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535),
[REG_FULL_CHARGE_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
[REG_FULL_CHARGE_CAPACITY_CHARGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
[REG_TIME_TO_EMPTY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535),
[REG_TIME_TO_FULL] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
[REG_STATUS] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
[REG_CYCLE_COUNT] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
[REG_DESIGN_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0, 65535),
[REG_DESIGN_CAPACITY_CHARGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0, 65535),
[REG_DESIGN_VOLTAGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0, 65535),
[REG_SERIAL_NUMBER] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535),
};
-static enum power_supply_property bq20z75_properties[] = {
+static enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
@@ -144,10 +138,10 @@ static enum power_supply_property bq20z75_properties[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
};
-struct bq20z75_info {
+struct sbs_info {
struct i2c_client *client;
struct power_supply power_supply;
- struct bq20z75_platform_data *pdata;
+ struct sbs_platform_data *pdata;
bool is_present;
bool gpio_detect;
bool enable_detection;
@@ -158,14 +152,14 @@ struct bq20z75_info {
int ignore_changes;
};
-static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
+static int sbs_read_word_data(struct i2c_client *client, u8 address)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret = 0;
int retries = 1;
- if (bq20z75_device->pdata)
- retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+ if (chip->pdata)
+ retries = max(chip->pdata->i2c_retry_count + 1, 1);
while (retries > 0) {
ret = i2c_smbus_read_word_data(client, address);
@@ -184,15 +178,15 @@ static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
return le16_to_cpu(ret);
}
-static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
+static int sbs_write_word_data(struct i2c_client *client, u8 address,
u16 value)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret = 0;
int retries = 1;
- if (bq20z75_device->pdata)
- retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+ if (chip->pdata)
+ retries = max(chip->pdata->i2c_retry_count + 1, 1);
while (retries > 0) {
ret = i2c_smbus_write_word_data(client, address,
@@ -212,44 +206,41 @@ static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
return 0;
}
-static int bq20z75_get_battery_presence_and_health(
+static int sbs_get_battery_presence_and_health(
struct i2c_client *client, enum power_supply_property psp,
union power_supply_propval *val)
{
s32 ret;
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
if (psp == POWER_SUPPLY_PROP_PRESENT &&
- bq20z75_device->gpio_detect) {
- ret = gpio_get_value(
- bq20z75_device->pdata->battery_detect);
- if (ret == bq20z75_device->pdata->battery_detect_present)
+ chip->gpio_detect) {
+ ret = gpio_get_value(chip->pdata->battery_detect);
+ if (ret == chip->pdata->battery_detect_present)
val->intval = 1;
else
val->intval = 0;
- bq20z75_device->is_present = val->intval;
+ chip->is_present = val->intval;
return ret;
}
/* Write to ManufacturerAccess with
* ManufacturerAccess command and then
* read the status */
- ret = bq20z75_write_word_data(client,
- bq20z75_data[REG_MANUFACTURER_DATA].addr,
- MANUFACTURER_ACCESS_STATUS);
+ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_STATUS);
if (ret < 0) {
if (psp == POWER_SUPPLY_PROP_PRESENT)
val->intval = 0; /* battery removed */
return ret;
}
- ret = bq20z75_read_word_data(client,
- bq20z75_data[REG_MANUFACTURER_DATA].addr);
+ ret = sbs_read_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr);
if (ret < 0)
return ret;
- if (ret < bq20z75_data[REG_MANUFACTURER_DATA].min_value ||
- ret > bq20z75_data[REG_MANUFACTURER_DATA].max_value) {
+ if (ret < sbs_data[REG_MANUFACTURER_DATA].min_value ||
+ ret > sbs_data[REG_MANUFACTURER_DATA].max_value) {
val->intval = 0;
return 0;
}
@@ -279,24 +270,23 @@ static int bq20z75_get_battery_presence_and_health(
return 0;
}
-static int bq20z75_get_battery_property(struct i2c_client *client,
+static int sbs_get_battery_property(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret;
- ret = bq20z75_read_word_data(client,
- bq20z75_data[reg_offset].addr);
+ ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
if (ret < 0)
return ret;
/* returned values are 16 bit */
- if (bq20z75_data[reg_offset].min_value < 0)
+ if (sbs_data[reg_offset].min_value < 0)
ret = (s16)ret;
- if (ret >= bq20z75_data[reg_offset].min_value &&
- ret <= bq20z75_data[reg_offset].max_value) {
+ if (ret >= sbs_data[reg_offset].min_value &&
+ ret <= sbs_data[reg_offset].max_value) {
val->intval = ret;
if (psp != POWER_SUPPLY_PROP_STATUS)
return 0;
@@ -310,12 +300,12 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
else
val->intval = POWER_SUPPLY_STATUS_CHARGING;
- if (bq20z75_device->poll_time == 0)
- bq20z75_device->last_state = val->intval;
- else if (bq20z75_device->last_state != val->intval) {
- cancel_delayed_work_sync(&bq20z75_device->work);
- power_supply_changed(&bq20z75_device->power_supply);
- bq20z75_device->poll_time = 0;
+ if (chip->poll_time == 0)
+ chip->last_state = val->intval;
+ else if (chip->last_state != val->intval) {
+ cancel_delayed_work_sync(&chip->work);
+ power_supply_changed(&chip->power_supply);
+ chip->poll_time = 0;
}
} else {
if (psp == POWER_SUPPLY_PROP_STATUS)
@@ -327,7 +317,7 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
return 0;
}
-static void bq20z75_unit_adjustment(struct i2c_client *client,
+static void sbs_unit_adjustment(struct i2c_client *client,
enum power_supply_property psp, union power_supply_propval *val)
{
#define BASE_UNIT_CONVERSION 1000
@@ -338,7 +328,7 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
case POWER_SUPPLY_PROP_ENERGY_NOW:
case POWER_SUPPLY_PROP_ENERGY_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- /* bq20z75 provides energy in units of 10mWh.
+ /* sbs provides energy in units of 10mWh.
* Convert to µWh
*/
val->intval *= BATTERY_MODE_CAP_MULT_WATT;
@@ -354,7 +344,7 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
break;
case POWER_SUPPLY_PROP_TEMP:
- /* bq20z75 provides battery temperature in 0.1K
+ /* sbs provides battery temperature in 0.1K
* so convert it to 0.1°C
*/
val->intval -= TEMP_KELVIN_TO_CELSIUS;
@@ -362,7 +352,7 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
- /* bq20z75 provides time to empty and time to full in minutes.
+ /* sbs provides time to empty and time to full in minutes.
* Convert to seconds
*/
val->intval *= TIME_UNIT_CONVERSION;
@@ -374,13 +364,12 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
}
}
-static enum bq20z75_battery_mode
-bq20z75_set_battery_mode(struct i2c_client *client,
- enum bq20z75_battery_mode mode)
+static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
+ enum sbs_battery_mode mode)
{
int ret, original_val;
- original_val = bq20z75_read_word_data(client, BATTERY_MODE_OFFSET);
+ original_val = sbs_read_word_data(client, BATTERY_MODE_OFFSET);
if (original_val < 0)
return original_val;
@@ -392,68 +381,67 @@ bq20z75_set_battery_mode(struct i2c_client *client,
else
ret = original_val | BATTERY_MODE_MASK;
- ret = bq20z75_write_word_data(client, BATTERY_MODE_OFFSET, ret);
+ ret = sbs_write_word_data(client, BATTERY_MODE_OFFSET, ret);
if (ret < 0)
return ret;
return original_val & BATTERY_MODE_MASK;
}
-static int bq20z75_get_battery_capacity(struct i2c_client *client,
+static int sbs_get_battery_capacity(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
s32 ret;
- enum bq20z75_battery_mode mode = BATTERY_MODE_WATTS;
+ enum sbs_battery_mode mode = BATTERY_MODE_WATTS;
if (power_supply_is_amp_property(psp))
mode = BATTERY_MODE_AMPS;
- mode = bq20z75_set_battery_mode(client, mode);
+ mode = sbs_set_battery_mode(client, mode);
if (mode < 0)
return mode;
- ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr);
+ ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
if (ret < 0)
return ret;
if (psp == POWER_SUPPLY_PROP_CAPACITY) {
- /* bq20z75 spec says that this can be >100 %
+ /* sbs spec says that this can be >100 %
* even if max value is 100 % */
val->intval = min(ret, 100);
} else
val->intval = ret;
- ret = bq20z75_set_battery_mode(client, mode);
+ ret = sbs_set_battery_mode(client, mode);
if (ret < 0)
return ret;
return 0;
}
-static char bq20z75_serial[5];
-static int bq20z75_get_battery_serial_number(struct i2c_client *client,
+static char sbs_serial[5];
+static int sbs_get_battery_serial_number(struct i2c_client *client,
union power_supply_propval *val)
{
int ret;
- ret = bq20z75_read_word_data(client,
- bq20z75_data[REG_SERIAL_NUMBER].addr);
+ ret = sbs_read_word_data(client, sbs_data[REG_SERIAL_NUMBER].addr);
if (ret < 0)
return ret;
- ret = sprintf(bq20z75_serial, "%04x", ret);
- val->strval = bq20z75_serial;
+ ret = sprintf(sbs_serial, "%04x", ret);
+ val->strval = sbs_serial;
return 0;
}
-static int bq20z75_get_property_index(struct i2c_client *client,
+static int sbs_get_property_index(struct i2c_client *client,
enum power_supply_property psp)
{
int count;
- for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++)
- if (psp == bq20z75_data[count].psp)
+ for (count = 0; count < ARRAY_SIZE(sbs_data); count++)
+ if (psp == sbs_data[count].psp)
return count;
dev_warn(&client->dev,
@@ -462,19 +450,19 @@ static int bq20z75_get_property_index(struct i2c_client *client,
return -EINVAL;
}
-static int bq20z75_get_property(struct power_supply *psy,
+static int sbs_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret = 0;
- struct bq20z75_info *bq20z75_device = container_of(psy,
- struct bq20z75_info, power_supply);
- struct i2c_client *client = bq20z75_device->client;
+ struct sbs_info *chip = container_of(psy,
+ struct sbs_info, power_supply);
+ struct i2c_client *client = chip->client;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
- ret = bq20z75_get_battery_presence_and_health(client, psp, val);
+ ret = sbs_get_battery_presence_and_health(client, psp, val);
if (psp == POWER_SUPPLY_PROP_PRESENT)
return 0;
break;
@@ -490,15 +478,15 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CAPACITY:
- ret = bq20z75_get_property_index(client, psp);
+ ret = sbs_get_property_index(client, psp);
if (ret < 0)
break;
- ret = bq20z75_get_battery_capacity(client, ret, psp, val);
+ ret = sbs_get_battery_capacity(client, ret, psp, val);
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
- ret = bq20z75_get_battery_serial_number(client, val);
+ ret = sbs_get_battery_serial_number(client, val);
break;
case POWER_SUPPLY_PROP_STATUS:
@@ -509,11 +497,11 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- ret = bq20z75_get_property_index(client, psp);
+ ret = sbs_get_property_index(client, psp);
if (ret < 0)
break;
- ret = bq20z75_get_battery_property(client, ret, psp, val);
+ ret = sbs_get_battery_property(client, ret, psp, val);
break;
default:
@@ -522,25 +510,25 @@ static int bq20z75_get_property(struct power_supply *psy,
return -EINVAL;
}
- if (!bq20z75_device->enable_detection)
+ if (!chip->enable_detection)
goto done;
- if (!bq20z75_device->gpio_detect &&
- bq20z75_device->is_present != (ret >= 0)) {
- bq20z75_device->is_present = (ret >= 0);
- power_supply_changed(&bq20z75_device->power_supply);
+ if (!chip->gpio_detect &&
+ chip->is_present != (ret >= 0)) {
+ chip->is_present = (ret >= 0);
+ power_supply_changed(&chip->power_supply);
}
done:
if (!ret) {
/* Convert units to match requirements for power supply class */
- bq20z75_unit_adjustment(client, psp, val);
+ sbs_unit_adjustment(client, psp, val);
}
dev_dbg(&client->dev,
"%s: property = %d, value = %x\n", __func__, psp, val->intval);
- if (ret && bq20z75_device->is_present)
+ if (ret && chip->is_present)
return ret;
/* battery not present, so return NODATA for properties */
@@ -550,7 +538,7 @@ done:
return 0;
}
-static irqreturn_t bq20z75_irq(int irq, void *devid)
+static irqreturn_t sbs_irq(int irq, void *devid)
{
struct power_supply *battery = devid;
@@ -559,36 +547,35 @@ static irqreturn_t bq20z75_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static void bq20z75_external_power_changed(struct power_supply *psy)
+static void sbs_external_power_changed(struct power_supply *psy)
{
- struct bq20z75_info *bq20z75_device;
+ struct sbs_info *chip;
- bq20z75_device = container_of(psy, struct bq20z75_info, power_supply);
+ chip = container_of(psy, struct sbs_info, power_supply);
- if (bq20z75_device->ignore_changes > 0) {
- bq20z75_device->ignore_changes--;
+ if (chip->ignore_changes > 0) {
+ chip->ignore_changes--;
return;
}
/* cancel outstanding work */
- cancel_delayed_work_sync(&bq20z75_device->work);
+ cancel_delayed_work_sync(&chip->work);
- schedule_delayed_work(&bq20z75_device->work, HZ);
- bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
+ schedule_delayed_work(&chip->work, HZ);
+ chip->poll_time = chip->pdata->poll_retry_count;
}
-static void bq20z75_delayed_work(struct work_struct *work)
+static void sbs_delayed_work(struct work_struct *work)
{
- struct bq20z75_info *bq20z75_device;
+ struct sbs_info *chip;
s32 ret;
- bq20z75_device = container_of(work, struct bq20z75_info, work.work);
+ chip = container_of(work, struct sbs_info, work.work);
- ret = bq20z75_read_word_data(bq20z75_device->client,
- bq20z75_data[REG_STATUS].addr);
+ ret = sbs_read_word_data(chip->client, sbs_data[REG_STATUS].addr);
/* if the read failed, give up on this work */
if (ret < 0) {
- bq20z75_device->poll_time = 0;
+ chip->poll_time = 0;
return;
}
@@ -601,62 +588,145 @@ static void bq20z75_delayed_work(struct work_struct *work)
else
ret = POWER_SUPPLY_STATUS_CHARGING;
- if (bq20z75_device->last_state != ret) {
- bq20z75_device->poll_time = 0;
- power_supply_changed(&bq20z75_device->power_supply);
+ if (chip->last_state != ret) {
+ chip->poll_time = 0;
+ power_supply_changed(&chip->power_supply);
return;
}
- if (bq20z75_device->poll_time > 0) {
- schedule_delayed_work(&bq20z75_device->work, HZ);
- bq20z75_device->poll_time--;
+ if (chip->poll_time > 0) {
+ schedule_delayed_work(&chip->work, HZ);
+ chip->poll_time--;
return;
}
}
-static int __devinit bq20z75_probe(struct i2c_client *client,
+#if defined(CONFIG_OF)
+
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+static const struct of_device_id sbs_dt_ids[] = {
+ { .compatible = "sbs,sbs-battery" },
+ { .compatible = "ti,bq20z75" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sbs_dt_ids);
+
+static struct sbs_platform_data *sbs_of_populate_pdata(
+ struct i2c_client *client)
+{
+ struct device_node *of_node = client->dev.of_node;
+ struct sbs_platform_data *pdata = client->dev.platform_data;
+ enum of_gpio_flags gpio_flags;
+ int rc;
+ u32 prop;
+
+ /* verify this driver matches this device */
+ if (!of_node)
+ return NULL;
+
+ /* if platform data is set, honor it */
+ if (pdata)
+ return pdata;
+
+ /* first make sure at least one property is set, otherwise
+ * it won't change behavior from running without pdata.
+ */
+ if (!of_get_property(of_node, "sbs,i2c-retry-count", NULL) &&
+ !of_get_property(of_node, "sbs,poll-retry-count", NULL) &&
+ !of_get_property(of_node, "sbs,battery-detect-gpios", NULL))
+ goto of_out;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(struct sbs_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ goto of_out;
+
+ rc = of_property_read_u32(of_node, "sbs,i2c-retry-count", &prop);
+ if (!rc)
+ pdata->i2c_retry_count = prop;
+
+ rc = of_property_read_u32(of_node, "sbs,poll-retry-count", &prop);
+ if (!rc)
+ pdata->poll_retry_count = prop;
+
+ if (!of_get_property(of_node, "sbs,battery-detect-gpios", NULL)) {
+ pdata->battery_detect = -1;
+ goto of_out;
+ }
+
+ pdata->battery_detect = of_get_named_gpio_flags(of_node,
+ "sbs,battery-detect-gpios", 0, &gpio_flags);
+
+ if (gpio_flags & OF_GPIO_ACTIVE_LOW)
+ pdata->battery_detect_present = 0;
+ else
+ pdata->battery_detect_present = 1;
+
+of_out:
+ return pdata;
+}
+#else
+#define sbs_dt_ids NULL
+static struct sbs_platform_data *sbs_of_populate_pdata(
+ struct i2c_client *client)
+{
+ return client->dev.platform_data;
+}
+#endif
+
+static int __devinit sbs_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct bq20z75_info *bq20z75_device;
- struct bq20z75_platform_data *pdata = client->dev.platform_data;
+ struct sbs_info *chip;
+ struct sbs_platform_data *pdata = client->dev.platform_data;
int rc;
int irq;
+ char *name;
- bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL);
- if (!bq20z75_device)
+ name = kasprintf(GFP_KERNEL, "sbs-%s", dev_name(&client->dev));
+ if (!name) {
+ dev_err(&client->dev, "Failed to allocate device name\n");
return -ENOMEM;
+ }
+
+ chip = kzalloc(sizeof(struct sbs_info), GFP_KERNEL);
+ if (!chip) {
+ rc = -ENOMEM;
+ goto exit_free_name;
+ }
- bq20z75_device->client = client;
- bq20z75_device->enable_detection = false;
- bq20z75_device->gpio_detect = false;
- bq20z75_device->power_supply.name = "battery";
- bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
- bq20z75_device->power_supply.properties = bq20z75_properties;
- bq20z75_device->power_supply.num_properties =
- ARRAY_SIZE(bq20z75_properties);
- bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ chip->client = client;
+ chip->enable_detection = false;
+ chip->gpio_detect = false;
+ chip->power_supply.name = name;
+ chip->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->power_supply.properties = sbs_properties;
+ chip->power_supply.num_properties = ARRAY_SIZE(sbs_properties);
+ chip->power_supply.get_property = sbs_get_property;
/* ignore first notification of external change, it is generated
* from the power_supply_register call back
*/
- bq20z75_device->ignore_changes = 1;
- bq20z75_device->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
- bq20z75_device->power_supply.external_power_changed =
- bq20z75_external_power_changed;
+ chip->ignore_changes = 1;
+ chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ chip->power_supply.external_power_changed = sbs_external_power_changed;
+
+ pdata = sbs_of_populate_pdata(client);
if (pdata) {
- bq20z75_device->gpio_detect =
- gpio_is_valid(pdata->battery_detect);
- bq20z75_device->pdata = pdata;
+ chip->gpio_detect = gpio_is_valid(pdata->battery_detect);
+ chip->pdata = pdata;
}
- i2c_set_clientdata(client, bq20z75_device);
+ i2c_set_clientdata(client, chip);
- if (!bq20z75_device->gpio_detect)
+ if (!chip->gpio_detect)
goto skip_gpio;
rc = gpio_request(pdata->battery_detect, dev_name(&client->dev));
if (rc) {
dev_warn(&client->dev, "Failed to request gpio: %d\n", rc);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
@@ -664,7 +734,7 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
if (rc) {
dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc);
gpio_free(pdata->battery_detect);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
@@ -672,25 +742,25 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
if (irq <= 0) {
dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq);
gpio_free(pdata->battery_detect);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
- rc = request_irq(irq, bq20z75_irq,
+ rc = request_irq(irq, sbs_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&client->dev), &bq20z75_device->power_supply);
+ dev_name(&client->dev), &chip->power_supply);
if (rc) {
dev_warn(&client->dev, "Failed to request irq: %d\n", rc);
gpio_free(pdata->battery_detect);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
- bq20z75_device->irq = irq;
+ chip->irq = irq;
skip_gpio:
- rc = power_supply_register(&client->dev, &bq20z75_device->power_supply);
+ rc = power_supply_register(&client->dev, &chip->power_supply);
if (rc) {
dev_err(&client->dev,
"%s: Failed to register power supply\n", __func__);
@@ -700,95 +770,100 @@ skip_gpio:
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
- INIT_DELAYED_WORK(&bq20z75_device->work, bq20z75_delayed_work);
+ INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
- bq20z75_device->enable_detection = true;
+ chip->enable_detection = true;
return 0;
exit_psupply:
- if (bq20z75_device->irq)
- free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
- if (bq20z75_device->gpio_detect)
+ if (chip->irq)
+ free_irq(chip->irq, &chip->power_supply);
+ if (chip->gpio_detect)
gpio_free(pdata->battery_detect);
- kfree(bq20z75_device);
+ kfree(chip);
+
+exit_free_name:
+ kfree(name);
return rc;
}
-static int __devexit bq20z75_remove(struct i2c_client *client)
+static int __devexit sbs_remove(struct i2c_client *client)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
- if (bq20z75_device->irq)
- free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
- if (bq20z75_device->gpio_detect)
- gpio_free(bq20z75_device->pdata->battery_detect);
+ if (chip->irq)
+ free_irq(chip->irq, &chip->power_supply);
+ if (chip->gpio_detect)
+ gpio_free(chip->pdata->battery_detect);
- power_supply_unregister(&bq20z75_device->power_supply);
+ power_supply_unregister(&chip->power_supply);
- cancel_delayed_work_sync(&bq20z75_device->work);
+ cancel_delayed_work_sync(&chip->work);
- kfree(bq20z75_device);
- bq20z75_device = NULL;
+ kfree(chip->power_supply.name);
+ kfree(chip);
+ chip = NULL;
return 0;
}
#if defined CONFIG_PM
-static int bq20z75_suspend(struct i2c_client *client,
+static int sbs_suspend(struct i2c_client *client,
pm_message_t state)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret;
- if (bq20z75_device->poll_time > 0)
- cancel_delayed_work_sync(&bq20z75_device->work);
+ if (chip->poll_time > 0)
+ cancel_delayed_work_sync(&chip->work);
/* write to manufacturer access with sleep command */
- ret = bq20z75_write_word_data(client,
- bq20z75_data[REG_MANUFACTURER_DATA].addr,
+ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
MANUFACTURER_ACCESS_SLEEP);
- if (bq20z75_device->is_present && ret < 0)
+ if (chip->is_present && ret < 0)
return ret;
return 0;
}
#else
-#define bq20z75_suspend NULL
+#define sbs_suspend NULL
#endif
-/* any smbus transaction will wake up bq20z75 */
-#define bq20z75_resume NULL
+/* any smbus transaction will wake up sbs */
+#define sbs_resume NULL
-static const struct i2c_device_id bq20z75_id[] = {
+static const struct i2c_device_id sbs_id[] = {
{ "bq20z75", 0 },
+ { "sbs-battery", 1 },
{}
};
-MODULE_DEVICE_TABLE(i2c, bq20z75_id);
-
-static struct i2c_driver bq20z75_battery_driver = {
- .probe = bq20z75_probe,
- .remove = __devexit_p(bq20z75_remove),
- .suspend = bq20z75_suspend,
- .resume = bq20z75_resume,
- .id_table = bq20z75_id,
+MODULE_DEVICE_TABLE(i2c, sbs_id);
+
+static struct i2c_driver sbs_battery_driver = {
+ .probe = sbs_probe,
+ .remove = __devexit_p(sbs_remove),
+ .suspend = sbs_suspend,
+ .resume = sbs_resume,
+ .id_table = sbs_id,
.driver = {
- .name = "bq20z75-battery",
+ .name = "sbs-battery",
+ .of_match_table = sbs_dt_ids,
},
};
-static int __init bq20z75_battery_init(void)
+static int __init sbs_battery_init(void)
{
- return i2c_add_driver(&bq20z75_battery_driver);
+ return i2c_add_driver(&sbs_battery_driver);
}
-module_init(bq20z75_battery_init);
+module_init(sbs_battery_init);
-static void __exit bq20z75_battery_exit(void)
+static void __exit sbs_battery_exit(void)
{
- i2c_del_driver(&bq20z75_battery_driver);
+ i2c_del_driver(&sbs_battery_driver);
}
-module_exit(bq20z75_battery_exit);
+module_exit(sbs_battery_exit);
-MODULE_DESCRIPTION("BQ20z75 battery monitor driver");
+MODULE_DESCRIPTION("SBS battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 53f0d3524fcd..28bbe7e094e3 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -307,25 +307,20 @@ static struct tosa_bat tosa_bat_bu = {
.adc_temp_divider = -1,
};
-static struct {
- int gpio;
- char *name;
- bool output;
- int value;
-} gpios[] = {
- { TOSA_GPIO_CHARGE_OFF, "main charge off", 1, 1 },
- { TOSA_GPIO_CHARGE_OFF_JC, "jacket charge off", 1, 1 },
- { TOSA_GPIO_BAT_SW_ON, "battery switch", 1, 0 },
- { TOSA_GPIO_BAT0_V_ON, "main battery", 1, 0 },
- { TOSA_GPIO_BAT1_V_ON, "jacket battery", 1, 0 },
- { TOSA_GPIO_BAT1_TH_ON, "main battery temp", 1, 0 },
- { TOSA_GPIO_BAT0_TH_ON, "jacket battery temp", 1, 0 },
- { TOSA_GPIO_BU_CHRG_ON, "backup battery", 1, 0 },
- { TOSA_GPIO_BAT0_CRG, "main battery full", 0, 0 },
- { TOSA_GPIO_BAT1_CRG, "jacket battery full", 0, 0 },
- { TOSA_GPIO_BAT0_LOW, "main battery low", 0, 0 },
- { TOSA_GPIO_BAT1_LOW, "jacket battery low", 0, 0 },
- { TOSA_GPIO_JACKET_DETECT, "jacket detect", 0, 0 },
+static struct gpio tosa_bat_gpios[] = {
+ { TOSA_GPIO_CHARGE_OFF, GPIOF_OUT_INIT_HIGH, "main charge off" },
+ { TOSA_GPIO_CHARGE_OFF_JC, GPIOF_OUT_INIT_HIGH, "jacket charge off" },
+ { TOSA_GPIO_BAT_SW_ON, GPIOF_OUT_INIT_LOW, "battery switch" },
+ { TOSA_GPIO_BAT0_V_ON, GPIOF_OUT_INIT_LOW, "main battery" },
+ { TOSA_GPIO_BAT1_V_ON, GPIOF_OUT_INIT_LOW, "jacket battery" },
+ { TOSA_GPIO_BAT1_TH_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
+ { TOSA_GPIO_BAT0_TH_ON, GPIOF_OUT_INIT_LOW, "jacket battery temp" },
+ { TOSA_GPIO_BU_CHRG_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
+ { TOSA_GPIO_BAT0_CRG, GPIOF_IN, "main battery full" },
+ { TOSA_GPIO_BAT1_CRG, GPIOF_IN, "jacket battery full" },
+ { TOSA_GPIO_BAT0_LOW, GPIOF_IN, "main battery low" },
+ { TOSA_GPIO_BAT1_LOW, GPIOF_IN, "jacket battery low" },
+ { TOSA_GPIO_JACKET_DETECT, GPIOF_IN, "jacket detect" },
};
#ifdef CONFIG_PM
@@ -350,27 +345,13 @@ static int tosa_bat_resume(struct platform_device *dev)
static int __devinit tosa_bat_probe(struct platform_device *dev)
{
int ret;
- int i;
if (!machine_is_tosa())
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(gpios); i++) {
- ret = gpio_request(gpios[i].gpio, gpios[i].name);
- if (ret) {
- i--;
- goto err_gpio;
- }
-
- if (gpios[i].output)
- ret = gpio_direction_output(gpios[i].gpio,
- gpios[i].value);
- else
- ret = gpio_direction_input(gpios[i].gpio);
-
- if (ret)
- goto err_gpio;
- }
+ ret = gpio_request_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
+ if (ret)
+ return ret;
mutex_init(&tosa_bat_main.work_lock);
mutex_init(&tosa_bat_jacket.work_lock);
@@ -424,18 +405,12 @@ err_psy_reg_main:
/* see comment in tosa_bat_remove */
cancel_work_sync(&bat_work);
- i--;
-err_gpio:
- for (; i >= 0; i--)
- gpio_free(gpios[i].gpio);
-
+ gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return ret;
}
static int __devexit tosa_bat_remove(struct platform_device *dev)
{
- int i;
-
free_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT), &tosa_bat_jacket);
free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
@@ -450,10 +425,7 @@ static int __devexit tosa_bat_remove(struct platform_device *dev)
* unregistered now.
*/
cancel_work_sync(&bat_work);
-
- for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
- gpio_free(gpios[i].gpio);
-
+ gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return 0;
}
@@ -466,18 +438,7 @@ static struct platform_driver tosa_bat_driver = {
.resume = tosa_bat_resume,
};
-static int __init tosa_bat_init(void)
-{
- return platform_driver_register(&tosa_bat_driver);
-}
-
-static void __exit tosa_bat_exit(void)
-{
- platform_driver_unregister(&tosa_bat_driver);
-}
-
-module_init(tosa_bat_init);
-module_exit(tosa_bat_exit);
+module_platform_driver(tosa_bat_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dmitry Baryshkov");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index e648cbea1e6a..6243e6975126 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -226,17 +226,7 @@ static struct platform_driver wm831x_backup_driver = {
},
};
-static int __init wm831x_backup_init(void)
-{
- return platform_driver_register(&wm831x_backup_driver);
-}
-module_init(wm831x_backup_init);
-
-static void __exit wm831x_backup_exit(void)
-{
- platform_driver_unregister(&wm831x_backup_driver);
-}
-module_exit(wm831x_backup_exit);
+module_platform_driver(wm831x_backup_driver);
MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 6cc2ca6427f3..987332b71d8d 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -27,6 +27,7 @@ struct wm831x_power {
char wall_name[20];
char usb_name[20];
char battery_name[20];
+ bool have_battery;
};
static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
@@ -449,7 +450,8 @@ static irqreturn_t wm831x_bat_irq(int irq, void *data)
/* The battery charger is autonomous so we don't need to do
* anything except kick user space */
- power_supply_changed(&wm831x_power->battery);
+ if (wm831x_power->have_battery)
+ power_supply_changed(&wm831x_power->battery);
return IRQ_HANDLED;
}
@@ -479,7 +481,8 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
dev_dbg(wm831x->dev, "Power source changed\n");
/* Just notify for everything - little harm in overnotifying. */
- power_supply_changed(&wm831x_power->battery);
+ if (wm831x_power->have_battery)
+ power_supply_changed(&wm831x_power->battery);
power_supply_changed(&wm831x_power->usb);
power_supply_changed(&wm831x_power->wall);
@@ -537,15 +540,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_kmalloc;
- battery->name = power->battery_name;
- battery->properties = wm831x_bat_props;
- battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
- battery->get_property = wm831x_bat_get_prop;
- battery->use_for_apm = 1;
- ret = power_supply_register(&pdev->dev, battery);
- if (ret)
- goto err_wall;
-
usb->name = power->usb_name,
usb->type = POWER_SUPPLY_TYPE_USB;
usb->properties = wm831x_usb_props;
@@ -553,7 +547,23 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
usb->get_property = wm831x_usb_get_prop;
ret = power_supply_register(&pdev->dev, usb);
if (ret)
- goto err_battery;
+ goto err_wall;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CHARGER_CONTROL_1);
+ if (ret < 0)
+ goto err_wall;
+ power->have_battery = ret & WM831X_CHG_ENA;
+
+ if (power->have_battery) {
+ battery->name = power->battery_name;
+ battery->properties = wm831x_bat_props;
+ battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
+ battery->get_property = wm831x_bat_get_prop;
+ battery->use_for_apm = 1;
+ ret = power_supply_register(&pdev->dev, battery);
+ if (ret)
+ goto err_usb;
+ }
irq = platform_get_irq_byname(pdev, "SYSLO");
ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
@@ -562,7 +572,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
irq, ret);
- goto err_usb;
+ goto err_battery;
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
@@ -601,10 +611,11 @@ err_bat_irq:
err_syslo:
irq = platform_get_irq_byname(pdev, "SYSLO");
free_irq(irq, power);
+err_battery:
+ if (power->have_battery)
+ power_supply_unregister(battery);
err_usb:
power_supply_unregister(usb);
-err_battery:
- power_supply_unregister(battery);
err_wall:
power_supply_unregister(wall);
err_kmalloc:
@@ -628,7 +639,8 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, "SYSLO");
free_irq(irq, wm831x_power);
- power_supply_unregister(&wm831x_power->battery);
+ if (wm831x_power->have_battery)
+ power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
power_supply_unregister(&wm831x_power->usb);
kfree(wm831x_power);
@@ -643,17 +655,7 @@ static struct platform_driver wm831x_power_driver = {
},
};
-static int __init wm831x_power_init(void)
-{
- return platform_driver_register(&wm831x_power_driver);
-}
-module_init(wm831x_power_init);
-
-static void __exit wm831x_power_exit(void)
-{
- platform_driver_unregister(&wm831x_power_driver);
-}
-module_exit(wm831x_power_exit);
+module_platform_driver(wm831x_power_driver);
MODULE_DESCRIPTION("Power supply driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c
index 0693902d6151..fae04d384657 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/wm8350_power.c
@@ -522,17 +522,7 @@ static struct platform_driver wm8350_power_driver = {
},
};
-static int __init wm8350_power_init(void)
-{
- return platform_driver_register(&wm8350_power_driver);
-}
-module_init(wm8350_power_init);
-
-static void __exit wm8350_power_exit(void)
-{
- platform_driver_unregister(&wm8350_power_driver);
-}
-module_exit(wm8350_power_exit);
+module_platform_driver(wm8350_power_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Power supply driver for WM8350");
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 156559e56fa5..d2d4c08c681c 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -25,9 +25,8 @@
#include <linux/irq.h>
#include <linux/slab.h>
-static DEFINE_MUTEX(bat_lock);
static struct work_struct bat_work;
-static struct mutex work_lock;
+static DEFINE_MUTEX(work_lock);
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
static enum power_supply_property *prop;
@@ -181,8 +180,6 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
if (dev->id != -1)
return -EINVAL;
- mutex_init(&work_lock);
-
if (!pdata) {
dev_err(&dev->dev, "No platform_data supplied\n");
return -EINVAL;
@@ -196,7 +193,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
if (ret)
goto err2;
ret = request_irq(gpio_to_irq(pdata->charge_gpio),
- wm97xx_chrg_irq, IRQF_DISABLED,
+ wm97xx_chrg_irq, 0,
"AC Detect", dev);
if (ret)
goto err2;
@@ -291,18 +288,7 @@ static struct platform_driver wm97xx_bat_driver = {
.remove = __devexit_p(wm97xx_bat_remove),
};
-static int __init wm97xx_bat_init(void)
-{
- return platform_driver_register(&wm97xx_bat_driver);
-}
-
-static void __exit wm97xx_bat_exit(void)
-{
- platform_driver_unregister(&wm97xx_bat_driver);
-}
-
-module_init(wm97xx_bat_init);
-module_exit(wm97xx_bat_exit);
+module_platform_driver(wm97xx_bat_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index d119c38b3ff6..636ebb2a0e80 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -218,7 +218,7 @@ static int __devinit z2_batt_probe(struct i2c_client *client,
irq_set_irq_type(gpio_to_irq(info->charge_gpio),
IRQ_TYPE_EDGE_BOTH);
ret = request_irq(gpio_to_irq(info->charge_gpio),
- z2_charge_switch_irq, IRQF_DISABLED,
+ z2_charge_switch_irq, 0,
"AC Detect", charger);
if (ret)
goto err3;
@@ -313,7 +313,7 @@ static struct i2c_driver z2_batt_driver = {
.pm = Z2_BATTERY_PM_OPS
},
.probe = z2_batt_probe,
- .remove = z2_batt_remove,
+ .remove = __devexit_p(z2_batt_remove),
.id_table = z2_batt_id,
};
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546d..10451a15e828 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10cd..691b1ab1a3d0 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb1402..822e54c394d5 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index ca0d608f8248..df33530cec4a 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -427,7 +427,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
/* replace driver_data with info */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- pdata, info);
+ pdata, info, NULL);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9713b1b860cb..7a61b17ddd04 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -93,6 +93,7 @@ config REGULATOR_MAX1586
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
+ select REGMAP_I2C
help
This driver controls a Maxim 8649 voltage output regulator via
I2C bus.
@@ -177,6 +178,13 @@ config REGULATOR_DA903X
Say y here to support the BUCKs and LDOs regulators found on
Dialog Semiconductor DA9030/DA9034 PMIC.
+config REGULATOR_DA9052
+ tristate "Dialog DA9052/DA9053 regulators"
+ depends on PMIC_DA9052
+ help
+ This driver supports the voltage regulators of DA9052-BC and
+ DA9053-AA/Bx PMIC.
+
config REGULATOR_PCF50633
tristate "PCF50633 regulator driver"
depends on MFD_PCF50633
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 93a6318f5328..503bac87715e 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_REGULATOR) += core.o dummy.o
+obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
+obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8d..685ad43b0749 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -63,7 +63,7 @@ static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
- (selector << ri->voltage_shift) & ri->voltage_mask);
+ selector << ri->voltage_shift);
}
static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
break;
}
- if (!ri)
+ if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
@@ -188,7 +188,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
ri->pdev = pdev;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 585e4946fe0a..042271aace6a 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,7 +634,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
rdev = regulator_register(&ab3100_regulator_desc[i],
&pdev->dev,
&plfdata->reg_constraints[i],
- reg);
+ reg, NULL);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 6e1ae69646b3..c9b92531ae60 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -16,8 +16,8 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
@@ -822,7 +822,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
/* register regulator with framework */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- &pdata->regulator[i], info);
+ &pdata->regulator[i], info, NULL);
if (IS_ERR(info->regulator)) {
err = PTR_ERR(info->regulator);
dev_err(&pdev->dev, "failed to register regulator %s\n",
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index a4be41614eeb..483c80930852 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -233,7 +233,7 @@ static int __devinit ad5398_probe(struct i2c_client *client,
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
chip->rdev = regulator_register(&ad5398_reg, &client->dev,
- init_data, chip);
+ init_data, chip, NULL);
if (IS_ERR(chip->rdev)) {
ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index e24d1b7d97a8..9fab6d1bbe80 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -107,7 +107,7 @@ static int __init bq24022_probe(struct platform_device *pdev)
ret = gpio_direction_output(pdata->gpio_nce, 1);
bq24022 = regulator_register(&bq24022_desc, &pdev->dev,
- pdata->init_data, pdata);
+ pdata->init_data, pdata, NULL);
if (IS_ERR(bq24022)) {
dev_dbg(&pdev->dev, "couldn't register regulator\n");
ret = PTR_ERR(bq24022);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d02160221..ca86f39a0fdc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -25,6 +25,8 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -132,6 +134,33 @@ static struct regulator *get_device_regulator(struct device *dev)
return NULL;
}
+/**
+ * of_get_regulator - get a regulator device node based on supply name
+ * @dev: Device pointer for the consumer (of regulator) device
+ * @supply: regulator supply name
+ *
+ * Extract the regulator device node corresponding to the supply name.
+ * retruns the device node corresponding to the regulator if found, else
+ * returns NULL.
+ */
+static struct device_node *of_get_regulator(struct device *dev, const char *supply)
+{
+ struct device_node *regnode = NULL;
+ char prop_name[32]; /* 32 is max size of property name */
+
+ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
+
+ snprintf(prop_name, 32, "%s-supply", supply);
+ regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+
+ if (!regnode) {
+ dev_warn(dev, "%s property in node %s references invalid phandle",
+ prop_name, dev->of_node->full_name);
+ return NULL;
+ }
+ return regnode;
+}
+
/* Platform voltage constraint check */
static int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
@@ -883,8 +912,12 @@ static int set_machine_constraints(struct regulator_dev *rdev,
int ret = 0;
struct regulator_ops *ops = rdev->desc->ops;
- rdev->constraints = kmemdup(constraints, sizeof(*constraints),
- GFP_KERNEL);
+ if (constraints)
+ rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+ GFP_KERNEL);
+ else
+ rdev->constraints = kzalloc(sizeof(*constraints),
+ GFP_KERNEL);
if (!rdev->constraints)
return -ENOMEM;
@@ -893,7 +926,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
goto out;
/* do we need to setup our suspend state */
- if (constraints->initial_state) {
+ if (rdev->constraints->initial_state) {
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
@@ -901,7 +934,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- if (constraints->initial_mode) {
+ if (rdev->constraints->initial_mode) {
if (!ops->set_mode) {
rdev_err(rdev, "no set_mode operation\n");
ret = -EINVAL;
@@ -952,9 +985,8 @@ static int set_supply(struct regulator_dev *rdev,
rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
- if (IS_ERR(rdev->supply)) {
- err = PTR_ERR(rdev->supply);
- rdev->supply = NULL;
+ if (rdev->supply == NULL) {
+ err = -ENOMEM;
return err;
}
@@ -1148,6 +1180,30 @@ static int _regulator_get_enable_time(struct regulator_dev *rdev)
return rdev->desc->ops->enable_time(rdev);
}
+static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+ const char *supply)
+{
+ struct regulator_dev *r;
+ struct device_node *node;
+
+ /* first do a dt based lookup */
+ if (dev && dev->of_node) {
+ node = of_get_regulator(dev, supply);
+ if (node)
+ list_for_each_entry(r, &regulator_list, list)
+ if (r->dev.parent &&
+ node == r->dev.of_node)
+ return r;
+ }
+
+ /* if not found, try doing it non-dt way */
+ list_for_each_entry(r, &regulator_list, list)
+ if (strcmp(rdev_get_name(r), supply) == 0)
+ return r;
+
+ return NULL;
+}
+
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
int exclusive)
@@ -1168,6 +1224,10 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
mutex_lock(&regulator_list_mutex);
+ rdev = regulator_dev_lookup(dev, id);
+ if (rdev)
+ goto found;
+
list_for_each_entry(map, &regulator_map_list, list) {
/* If the mapping has a device set up it must match */
if (map->dev_name &&
@@ -1221,6 +1281,7 @@ found:
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
+ goto out;
}
rdev->open_count++;
@@ -1726,6 +1787,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
return 0;
}
+EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
@@ -2429,6 +2491,43 @@ err:
EXPORT_SYMBOL_GPL(regulator_bulk_disable);
/**
+ * regulator_bulk_force_disable - force disable multiple regulator consumers
+ *
+ * @num_consumers: Number of consumers
+ * @consumers: Consumer data; clients are stored here.
+ * @return 0 on success, an errno on failure
+ *
+ * This convenience API allows consumers to forcibly disable multiple regulator
+ * clients in a single API call.
+ * NOTE: This should be used for situations when device damage will
+ * likely occur if the regulators are not disabled (e.g. over temp).
+ * Although regulator_force_disable function call for some consumers can
+ * return error numbers, the function is called for all consumers.
+ */
+int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].ret =
+ regulator_force_disable(consumers[i].consumer);
+
+ for (i = 0; i < num_consumers; i++) {
+ if (consumers[i].ret != 0) {
+ ret = consumers[i].ret;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
+
+/**
* regulator_bulk_free - free multiple regulator consumers
*
* @num_consumers: Number of consumers
@@ -2503,7 +2602,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
int status = 0;
/* some attributes need specific methods to be displayed */
- if (ops->get_voltage || ops->get_voltage_sel) {
+ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+ (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) {
status = device_create_file(dev, &dev_attr_microvolts);
if (status < 0)
return status;
@@ -2637,11 +2737,13 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
*/
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
struct device *dev, const struct regulator_init_data *init_data,
- void *driver_data)
+ void *driver_data, struct device_node *of_node)
{
+ const struct regulation_constraints *constraints = NULL;
static atomic_t regulator_no = ATOMIC_INIT(0);
struct regulator_dev *rdev;
int ret, i;
+ const char *supply = NULL;
if (regulator_desc == NULL)
return ERR_PTR(-EINVAL);
@@ -2653,9 +2755,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
regulator_desc->type != REGULATOR_CURRENT)
return ERR_PTR(-EINVAL);
- if (!init_data)
- return ERR_PTR(-EINVAL);
-
/* Only one of each should be implemented */
WARN_ON(regulator_desc->ops->get_voltage &&
regulator_desc->ops->get_voltage_sel);
@@ -2688,7 +2787,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
/* preform any regulator specific init */
- if (init_data->regulator_init) {
+ if (init_data && init_data->regulator_init) {
ret = init_data->regulator_init(rdev->reg_data);
if (ret < 0)
goto clean;
@@ -2696,6 +2795,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
/* register with sysfs */
rdev->dev.class = &regulator_class;
+ rdev->dev.of_node = of_node;
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%d",
atomic_inc_return(&regulator_no) - 1);
@@ -2708,7 +2808,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
- ret = set_machine_constraints(rdev, &init_data->constraints);
+ if (init_data)
+ constraints = &init_data->constraints;
+
+ ret = set_machine_constraints(rdev, constraints);
if (ret < 0)
goto scrub;
@@ -2717,21 +2820,18 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (ret < 0)
goto scrub;
- if (init_data->supply_regulator) {
+ if (init_data && init_data->supply_regulator)
+ supply = init_data->supply_regulator;
+ else if (regulator_desc->supply_name)
+ supply = regulator_desc->supply_name;
+
+ if (supply) {
struct regulator_dev *r;
- int found = 0;
- list_for_each_entry(r, &regulator_list, list) {
- if (strcmp(rdev_get_name(r),
- init_data->supply_regulator) == 0) {
- found = 1;
- break;
- }
- }
+ r = regulator_dev_lookup(dev, supply);
- if (!found) {
- dev_err(dev, "Failed to find supply %s\n",
- init_data->supply_regulator);
+ if (!r) {
+ dev_err(dev, "Failed to find supply %s\n", supply);
ret = -ENODEV;
goto scrub;
}
@@ -2739,18 +2839,28 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
ret = set_supply(rdev, r);
if (ret < 0)
goto scrub;
+
+ /* Enable supply if rail is enabled */
+ if (rdev->desc->ops->is_enabled &&
+ rdev->desc->ops->is_enabled(rdev)) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0)
+ goto scrub;
+ }
}
/* add consumers devices */
- for (i = 0; i < init_data->num_consumer_supplies; i++) {
- ret = set_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev,
- init_data->consumer_supplies[i].dev_name,
- init_data->consumer_supplies[i].supply);
- if (ret < 0) {
- dev_err(dev, "Failed to set supply %s\n",
+ if (init_data) {
+ for (i = 0; i < init_data->num_consumer_supplies; i++) {
+ ret = set_consumer_device_supply(rdev,
+ init_data->consumer_supplies[i].dev,
+ init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- goto unset_supplies;
+ if (ret < 0) {
+ dev_err(dev, "Failed to set supply %s\n",
+ init_data->consumer_supplies[i].supply);
+ goto unset_supplies;
+ }
}
}
@@ -2799,8 +2909,8 @@ void regulator_unregister(struct regulator_dev *rdev)
list_del(&rdev->list);
if (rdev->supply)
regulator_put(rdev->supply);
- device_unregister(&rdev->dev);
kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index e23ddfa8b2c6..8dbc54da7d70 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -537,7 +537,7 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
ri->desc.ops = &da9030_regulator_ldo1_15_ops;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
new file mode 100644
index 000000000000..3767364452fd
--- /dev/null
+++ b/drivers/regulator/da9052-regulator.c
@@ -0,0 +1,606 @@
+/*
+* da9052-regulator.c: Regulator driver for DA9052
+*
+* Copyright(c) 2011 Dialog Semiconductor Ltd.
+*
+* Author: David Dajun Chen <dchen@diasemi.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/pdata.h>
+
+/* Buck step size */
+#define DA9052_BUCK_PERI_3uV_STEP 100000
+#define DA9052_BUCK_PERI_REG_MAP_UPTO_3uV 24
+#define DA9052_CONST_3uV 3000000
+
+#define DA9052_MIN_UA 0
+#define DA9052_MAX_UA 3
+#define DA9052_CURRENT_RANGE 4
+
+/* Bit masks */
+#define DA9052_BUCK_ILIM_MASK_EVEN 0x0c
+#define DA9052_BUCK_ILIM_MASK_ODD 0xc0
+
+static const u32 da9052_current_limits[3][4] = {
+ {700000, 800000, 1000000, 1200000}, /* DA9052-BC BUCKs */
+ {1600000, 2000000, 2400000, 3000000}, /* DA9053-AA/Bx BUCK-CORE */
+ {800000, 1000000, 1200000, 1500000}, /* DA9053-AA/Bx BUCK-PRO,
+ * BUCK-MEM and BUCK-PERI
+ */
+};
+
+struct da9052_regulator_info {
+ struct regulator_desc reg_desc;
+ int step_uV;
+ int min_uV;
+ int max_uV;
+ unsigned char volt_shift;
+ unsigned char en_bit;
+ unsigned char activate_bit;
+};
+
+struct da9052_regulator {
+ struct da9052 *da9052;
+ struct da9052_regulator_info *info;
+ struct regulator_dev *rdev;
+};
+
+static int verify_range(struct da9052_regulator_info *info,
+ int min_uV, int max_uV)
+{
+ if (min_uV > info->max_uV || max_uV < info->min_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int da9052_regulator_enable(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ 1 << info->en_bit, 1 << info->en_bit);
+}
+
+static int da9052_regulator_disable(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ 1 << info->en_bit, 0);
+}
+
+static int da9052_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ return ret & (1 << info->en_bit);
+}
+
+static int da9052_dcdc_get_current_limit(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ int offset = rdev_get_id(rdev);
+ int ret, row = 2;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKA_REG + offset/2);
+ if (ret < 0)
+ return ret;
+
+ /* Determine the even or odd position of the buck current limit
+ * register field
+ */
+ if (offset % 2 == 0)
+ ret = (ret & DA9052_BUCK_ILIM_MASK_EVEN) >> 2;
+ else
+ ret = (ret & DA9052_BUCK_ILIM_MASK_ODD) >> 6;
+
+ /* Select the appropriate current limit range */
+ if (regulator->da9052->chip_id == DA9052)
+ row = 0;
+ else if (offset == 0)
+ row = 1;
+
+ return da9052_current_limits[row][ret];
+}
+
+static int da9052_dcdc_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ int offset = rdev_get_id(rdev);
+ int reg_val = 0;
+ int i, row = 2;
+
+ /* Select the appropriate current limit range */
+ if (regulator->da9052->chip_id == DA9052)
+ row = 0;
+ else if (offset == 0)
+ row = 1;
+
+ if (min_uA > da9052_current_limits[row][DA9052_MAX_UA] ||
+ max_uA < da9052_current_limits[row][DA9052_MIN_UA])
+ return -EINVAL;
+
+ for (i = 0; i < DA9052_CURRENT_RANGE; i++) {
+ if (min_uA <= da9052_current_limits[row][i]) {
+ reg_val = i;
+ break;
+ }
+ }
+
+ /* Determine the even or odd position of the buck current limit
+ * register field
+ */
+ if (offset % 2 == 0)
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKA_REG + offset/2,
+ DA9052_BUCK_ILIM_MASK_EVEN,
+ reg_val << 2);
+ else
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKA_REG + offset/2,
+ DA9052_BUCK_ILIM_MASK_ODD,
+ reg_val << 6);
+}
+
+static int da9052_list_buckperi_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int volt_uV;
+
+ if ((regulator->da9052->chip_id == DA9052) &&
+ (selector >= DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)) {
+ volt_uV = ((DA9052_BUCK_PERI_REG_MAP_UPTO_3uV * info->step_uV)
+ + info->min_uV);
+ volt_uV += (selector - DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)
+ * (DA9052_BUCK_PERI_3uV_STEP);
+ } else
+ volt_uV = (selector * info->step_uV) + info->min_uV;
+
+ if (volt_uV > info->max_uV)
+ return -EINVAL;
+
+ return volt_uV;
+}
+
+static int da9052_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int volt_uV;
+
+ volt_uV = info->min_uV + info->step_uV * selector;
+
+ if (volt_uV > info->max_uV)
+ return -EINVAL;
+
+ return volt_uV;
+}
+
+static int da9052_regulator_set_voltage_int(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = verify_range(info, min_uV, max_uV);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV < info->min_uV)
+ min_uV = info->min_uV;
+
+ *selector = (min_uV - info->min_uV) / info->step_uV;
+
+ ret = da9052_list_voltage(rdev, *selector);
+ if (ret < 0)
+ return ret;
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_set_ldo_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ return da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+}
+
+static int da9052_set_ldo5_6_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some LDOs are DVC controlled which requires enabling of
+ * the LDO activate bit to implment the changes on the
+ * LDO output.
+ */
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
+ info->activate_bit);
+}
+
+static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some DCDCs are DVC controlled which requires enabling of
+ * the DCDC activate bit to implment the changes on the
+ * DCDC output.
+ */
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
+ info->activate_bit);
+}
+
+static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ ret &= ((1 << info->volt_shift) - 1);
+
+ return ret;
+}
+
+static int da9052_set_buckperi_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = verify_range(info, min_uV, max_uV);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV < info->min_uV)
+ min_uV = info->min_uV;
+
+ if ((regulator->da9052->chip_id == DA9052) &&
+ (min_uV >= DA9052_CONST_3uV))
+ *selector = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
+ ((min_uV - DA9052_CONST_3uV) /
+ (DA9052_BUCK_PERI_3uV_STEP));
+ else
+ *selector = (min_uV - info->min_uV) / info->step_uV;
+
+ ret = da9052_list_buckperi_voltage(rdev, *selector);
+ if (ret < 0)
+ return ret;
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_get_buckperi_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ ret &= ((1 << info->volt_shift) - 1);
+
+ return ret;
+}
+
+static struct regulator_ops da9052_buckperi_ops = {
+ .list_voltage = da9052_list_buckperi_voltage,
+ .get_voltage_sel = da9052_get_buckperi_voltage_sel,
+ .set_voltage = da9052_set_buckperi_voltage,
+
+ .get_current_limit = da9052_dcdc_get_current_limit,
+ .set_current_limit = da9052_dcdc_set_current_limit,
+
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_dcdc_ops = {
+ .set_voltage = da9052_set_dcdc_voltage,
+ .get_current_limit = da9052_dcdc_get_current_limit,
+ .set_current_limit = da9052_dcdc_set_current_limit,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo5_6_ops = {
+ .set_voltage = da9052_set_ldo5_6_voltage,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo_ops = {
+ .set_voltage = da9052_set_ldo_voltage,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+#define DA9052_LDO5_6(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "LDO" #_id,\
+ .ops = &da9052_ldo5_6_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "LDO" #_id,\
+ .ops = &da9052_ldo_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "BUCK" #_id,\
+ .ops = &da9052_dcdc_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_BUCKPERI(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "BUCK" #_id,\
+ .ops = &da9052_buckperi_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+static struct da9052_regulator_info da9052_regulator_info[] = {
+ /* Buck1 - 4 */
+ DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_BUCKPERI(3, 50, 1800, 3600, 5, 6, 0),
+ /* LD01 - LDO10 */
+ DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static struct da9052_regulator_info da9053_regulator_info[] = {
+ /* Buck1 - 4 */
+ DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_BUCKPERI(3, 25, 925, 2500, 6, 6, 0),
+ /* LD01 - LDO10 */
+ DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
+ int id)
+{
+ struct da9052_regulator_info *info;
+ int i;
+
+ switch (chip_id) {
+ case DA9052:
+ for (i = 0; i < ARRAY_SIZE(da9052_regulator_info); i++) {
+ info = &da9052_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+ break;
+ case DA9053_AA:
+ case DA9053_BA:
+ case DA9053_BB:
+ for (i = 0; i < ARRAY_SIZE(da9053_regulator_info); i++) {
+ info = &da9053_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+static int __devinit da9052_regulator_probe(struct platform_device *pdev)
+{
+ struct da9052_regulator *regulator;
+ struct da9052 *da9052;
+ struct da9052_pdata *pdata;
+ int ret;
+
+ regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9052_regulator),
+ GFP_KERNEL);
+ if (!regulator)
+ return -ENOMEM;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ regulator->da9052 = da9052;
+
+ regulator->info = find_regulator_info(regulator->da9052->chip_id,
+ pdev->id);
+ if (regulator->info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ regulator->rdev = regulator_register(&regulator->info->reg_desc,
+ &pdev->dev,
+ pdata->regulators[pdev->id],
+ regulator, NULL);
+ if (IS_ERR(regulator->rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulator->info->reg_desc.name);
+ ret = PTR_ERR(regulator->rdev);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, regulator);
+
+ return 0;
+err:
+ devm_kfree(&pdev->dev, regulator);
+ return ret;
+}
+
+static int __devexit da9052_regulator_remove(struct platform_device *pdev)
+{
+ struct da9052_regulator *regulator = platform_get_drvdata(pdev);
+
+ regulator_unregister(regulator->rdev);
+ devm_kfree(&pdev->dev, regulator);
+
+ return 0;
+}
+
+static struct platform_driver da9052_regulator_driver = {
+ .probe = da9052_regulator_probe,
+ .remove = __devexit_p(da9052_regulator_remove),
+ .driver = {
+ .name = "da9052-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_regulator_init(void)
+{
+ return platform_driver_register(&da9052_regulator_driver);
+}
+subsys_initcall(da9052_regulator_init);
+
+static void __exit da9052_regulator_exit(void)
+{
+ platform_driver_unregister(&da9052_regulator_driver);
+}
+module_exit(da9052_regulator_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Power Regulator driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-regulator");
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 78329751af54..515443fcd26b 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -486,7 +486,7 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
/* register with the regulator framework */
info->rdev = regulator_register(&info->desc, &pdev->dev,
- init_data, info);
+ init_data, info, NULL);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index b8f520513ce7..0ee00de4be72 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -42,7 +42,7 @@ static int __devinit dummy_regulator_probe(struct platform_device *pdev)
int ret;
dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
- &dummy_initdata, NULL);
+ &dummy_initdata, NULL, NULL);
if (IS_ERR(dummy_regulator_rdev)) {
ret = PTR_ERR(dummy_regulator_rdev);
pr_err("Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 21ecf212a522..e24e3a174c4b 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -27,6 +27,10 @@
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
struct fixed_voltage_data {
struct regulator_desc desc;
@@ -38,6 +42,58 @@ struct fixed_voltage_data {
bool is_enabled;
};
+
+/**
+ * of_get_fixed_voltage_config - extract fixed_voltage_config structure info
+ * @dev: device requesting for fixed_voltage_config
+ *
+ * Populates fixed_voltage_config structure by extracting data from device
+ * tree node, returns a pointer to the populated structure of NULL if memory
+ * alloc fails.
+ */
+static struct fixed_voltage_config *
+of_get_fixed_voltage_config(struct device *dev)
+{
+ struct fixed_voltage_config *config;
+ struct device_node *np = dev->of_node;
+ const __be32 *delay;
+ struct regulator_init_data *init_data;
+
+ config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config),
+ GFP_KERNEL);
+ if (!config)
+ return NULL;
+
+ config->init_data = of_get_regulator_init_data(dev, dev->of_node);
+ if (!config->init_data)
+ return NULL;
+
+ init_data = config->init_data;
+ init_data->constraints.apply_uV = 0;
+
+ config->supply_name = init_data->constraints.name;
+ if (init_data->constraints.min_uV == init_data->constraints.max_uV) {
+ config->microvolts = init_data->constraints.min_uV;
+ } else {
+ dev_err(dev,
+ "Fixed regulator specified with variable voltages\n");
+ return NULL;
+ }
+
+ if (init_data->constraints.boot_on)
+ config->enabled_at_boot = true;
+
+ config->gpio = of_get_named_gpio(np, "gpio", 0);
+ delay = of_get_property(np, "startup-delay-us", NULL);
+ if (delay)
+ config->startup_delay = be32_to_cpu(*delay);
+
+ if (of_find_property(np, "enable-active-high", NULL))
+ config->enable_high = true;
+
+ return config;
+}
+
static int fixed_voltage_is_enabled(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -80,7 +136,10 @@ static int fixed_voltage_get_voltage(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
- return data->microvolts;
+ if (data->microvolts)
+ return data->microvolts;
+ else
+ return -EINVAL;
}
static int fixed_voltage_list_voltage(struct regulator_dev *dev,
@@ -105,10 +164,18 @@ static struct regulator_ops fixed_voltage_ops = {
static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
{
- struct fixed_voltage_config *config = pdev->dev.platform_data;
+ struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
int ret;
+ if (pdev->dev.of_node)
+ config = of_get_fixed_voltage_config(&pdev->dev);
+ else
+ config = pdev->dev.platform_data;
+
+ if (!config)
+ return -ENOMEM;
+
drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
if (drvdata == NULL) {
dev_err(&pdev->dev, "Failed to allocate device data\n");
@@ -180,7 +247,8 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
- config->init_data, drvdata);
+ config->init_data, drvdata,
+ pdev->dev.of_node);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
@@ -217,12 +285,23 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id fixed_of_match[] __devinitconst = {
+ { .compatible = "regulator-fixed", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fixed_of_match);
+#else
+#define fixed_of_match NULL
+#endif
+
static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
.remove = __devexit_p(reg_fixed_voltage_remove),
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
+ .of_match_table = fixed_of_match,
},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index f0acf52498bd..42e1cb1835e5 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -284,7 +284,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
drvdata->state = state;
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
- config->init_data, drvdata);
+ config->init_data, drvdata, NULL);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e4b3592e8176..c1a456c4257c 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -170,7 +170,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
for (i = 0; i < 3; i++) {
pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
- init_data, pmic);
+ init_data, pmic, NULL);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
err = PTR_ERR(pmic->rdev[i]);
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 72b16b5f3db6..0cfabd318a59 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -451,7 +451,7 @@ static int __devinit setup_regulators(struct lp3971 *lp3971,
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
lp3971->rdev[i] = regulator_register(&regulators[reg->id],
- lp3971->dev, reg->initdata, lp3971);
+ lp3971->dev, reg->initdata, lp3971, NULL);
if (IS_ERR(lp3971->rdev[i])) {
err = PTR_ERR(lp3971->rdev[i]);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index fbc5e3741bef..49a15eefe5fe 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -555,7 +555,7 @@ static int __devinit setup_regulators(struct lp3972 *lp3972,
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
lp3972->rdev[i] = regulator_register(&regulators[reg->id],
- lp3972->dev, reg->initdata, lp3972);
+ lp3972->dev, reg->initdata, lp3972, NULL);
if (IS_ERR(lp3972->rdev[i])) {
err = PTR_ERR(lp3972->rdev[i]);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 3f49512c5134..40e7a4db2853 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -214,7 +214,7 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
}
rdev[i] = regulator_register(&max1586_reg[id], &client->dev,
pdata->subdevs[i].platform_data,
- max1586);
+ max1586, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1062cf9f02dc..b06a2399587c 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -16,6 +16,7 @@
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max8649.h>
+#include <linux/regmap.h>
#define MAX8649_DCDC_VMIN 750000 /* uV */
#define MAX8649_DCDC_VMAX 1380000 /* uV */
@@ -49,9 +50,8 @@
struct max8649_regulator_info {
struct regulator_dev *regulator;
- struct i2c_client *i2c;
struct device *dev;
- struct mutex io_lock;
+ struct regmap *regmap;
int vol_reg;
unsigned mode:2; /* bit[1:0] = VID1, VID0 */
@@ -63,71 +63,6 @@ struct max8649_regulator_info {
/* I2C operations */
-static inline int max8649_read_device(struct i2c_client *i2c,
- int reg, int bytes, void *dest)
-{
- unsigned char data;
- int ret;
-
- data = (unsigned char)reg;
- ret = i2c_master_send(i2c, &data, 1);
- if (ret < 0)
- return ret;
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int max8649_write_device(struct i2c_client *i2c,
- int reg, int bytes, void *src)
-{
- unsigned char buf[bytes + 1];
- int ret;
-
- buf[0] = (unsigned char)reg;
- memcpy(&buf[1], src, bytes);
-
- ret = i2c_master_send(i2c, buf, bytes + 1);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int max8649_reg_read(struct i2c_client *i2c, int reg)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
- unsigned char data;
- int ret;
-
- mutex_lock(&info->io_lock);
- ret = max8649_read_device(i2c, reg, 1, &data);
- mutex_unlock(&info->io_lock);
-
- if (ret < 0)
- return ret;
- return (int)data;
-}
-
-static int max8649_set_bits(struct i2c_client *i2c, int reg,
- unsigned char mask, unsigned char data)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
- unsigned char value;
- int ret;
-
- mutex_lock(&info->io_lock);
- ret = max8649_read_device(i2c, reg, 1, &value);
- if (ret < 0)
- goto out;
- value &= ~mask;
- value |= data;
- ret = max8649_write_device(i2c, reg, 1, &value);
-out:
- mutex_unlock(&info->io_lock);
- return ret;
-}
-
static inline int check_range(int min_uV, int max_uV)
{
if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
@@ -144,13 +79,14 @@ static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
static int max8649_get_voltage(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
unsigned char data;
int ret;
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret < 0)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
return ret;
- data = (unsigned char)ret & MAX8649_VOL_MASK;
+ data = (unsigned char)val & MAX8649_VOL_MASK;
return max8649_list_voltage(rdev, data);
}
@@ -170,14 +106,14 @@ static int max8649_set_voltage(struct regulator_dev *rdev,
mask = MAX8649_VOL_MASK;
*selector = data & mask;
- return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
+ return regmap_update_bits(info->regmap, info->vol_reg, mask, data);
}
/* EN_PD means pulldown on EN input */
static int max8649_enable(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
+ return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD, 0);
}
/*
@@ -187,38 +123,40 @@ static int max8649_enable(struct regulator_dev *rdev)
static int max8649_disable(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
+ return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD,
MAX8649_EN_PD);
}
static int max8649_is_enabled(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
int ret;
- ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
- if (ret < 0)
+ ret = regmap_read(info->regmap, MAX8649_CONTROL, &val);
+ if (ret != 0)
return ret;
- return !((unsigned char)ret & MAX8649_EN_PD);
+ return !((unsigned char)val & MAX8649_EN_PD);
}
static int max8649_enable_time(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
int voltage, rate, ret;
+ unsigned int val;
/* get voltage */
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret < 0)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
return ret;
- ret &= MAX8649_VOL_MASK;
+ val &= MAX8649_VOL_MASK;
voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
/* get rate */
- ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
- if (ret < 0)
+ ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
+ if (ret != 0)
return ret;
- ret = (ret & MAX8649_RAMP_MASK) >> 5;
+ ret = (val & MAX8649_RAMP_MASK) >> 5;
rate = (32 * 1000) >> ret; /* uV/uS */
return DIV_ROUND_UP(voltage, rate);
@@ -230,12 +168,12 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
- MAX8649_FORCE_PWM);
+ regmap_update_bits(info->regmap, info->vol_reg, MAX8649_FORCE_PWM,
+ MAX8649_FORCE_PWM);
break;
case REGULATOR_MODE_NORMAL:
- max8649_set_bits(info->i2c, info->vol_reg,
- MAX8649_FORCE_PWM, 0);
+ regmap_update_bits(info->regmap, info->vol_reg,
+ MAX8649_FORCE_PWM, 0);
break;
default:
return -EINVAL;
@@ -246,10 +184,13 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned int max8649_get_mode(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
int ret;
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret & MAX8649_FORCE_PWM)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
+ return ret;
+ if (val & MAX8649_FORCE_PWM)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
@@ -275,11 +216,17 @@ static struct regulator_desc dcdc_desc = {
.owner = THIS_MODULE,
};
+static struct regmap_config max8649_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit max8649_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8649_platform_data *pdata = client->dev.platform_data;
struct max8649_regulator_info *info = NULL;
+ unsigned int val;
unsigned char data;
int ret;
@@ -289,9 +236,14 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
return -ENOMEM;
}
- info->i2c = client;
+ info->regmap = regmap_init_i2c(client, &max8649_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n", ret);
+ goto fail;
+ }
+
info->dev = &client->dev;
- mutex_init(&info->io_lock);
i2c_set_clientdata(client, info);
info->mode = pdata->mode;
@@ -312,8 +264,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
break;
}
- ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
- if (ret < 0) {
+ ret = regmap_read(info->regmap, MAX8649_CHIP_ID1, &val);
+ if (ret != 0) {
dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
ret);
goto out;
@@ -321,33 +273,33 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
/* enable VID0 & VID1 */
- max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
+ regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
/* enable/disable external clock synchronization */
info->extclk = pdata->extclk;
data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
- max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+ regmap_update_bits(info->regmap, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
if (info->extclk) {
/* set external clock frequency */
info->extclk_freq = pdata->extclk_freq;
- max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
- info->extclk_freq << 6);
+ regmap_update_bits(info->regmap, MAX8649_SYNC, MAX8649_EXT_MASK,
+ info->extclk_freq << 6);
}
if (pdata->ramp_timing) {
info->ramp_timing = pdata->ramp_timing;
- max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
- info->ramp_timing << 5);
+ regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_MASK,
+ info->ramp_timing << 5);
}
info->ramp_down = pdata->ramp_down;
if (info->ramp_down) {
- max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
- MAX8649_RAMP_DOWN);
+ regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_DOWN,
+ MAX8649_RAMP_DOWN);
}
info->regulator = regulator_register(&dcdc_desc, &client->dev,
- pdata->regulator, info);
+ pdata->regulator, info, NULL);
if (IS_ERR(info->regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
@@ -358,6 +310,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
dev_info(info->dev, "Max8649 regulator device is detected.\n");
return 0;
out:
+ regmap_exit(info->regmap);
+fail:
kfree(info);
return ret;
}
@@ -369,6 +323,7 @@ static int __devexit max8649_regulator_remove(struct i2c_client *client)
if (info) {
if (info->regulator)
regulator_unregister(info->regulator);
+ regmap_exit(info->regmap);
kfree(info);
}
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 33f5d9a492ef..a838e664569f 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -449,7 +449,7 @@ static int __devinit max8660_probe(struct i2c_client *client,
rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
pdata->subdevs[i].platform_data,
- max8660);
+ max8660, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index cc9ec0e03271..cc290d37c463 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -24,9 +24,13 @@
#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
#define SD1_DVM_EN 6 /* SDV1 bit 6 */
-/* bit definitions in SD & LDO control registers */
-#define OUT_ENABLE 0x1f /* Power U/D sequence as I2C */
-#define OUT_DISABLE 0x1e /* Power U/D sequence as I2C */
+/* bit definitions in LDO control registers */
+#define LDO_SEQ_I2C 0x7 /* Power U/D by i2c */
+#define LDO_SEQ_MASK 0x7 /* Power U/D sequence mask */
+#define LDO_SEQ_SHIFT 2 /* Power U/D sequence offset */
+#define LDO_I2C_EN 0x1 /* Enable by i2c */
+#define LDO_I2C_EN_MASK 0x1 /* Enable mask by i2c */
+#define LDO_I2C_EN_SHIFT 0 /* Enable offset by i2c */
struct max8925_regulator_info {
struct regulator_desc desc;
@@ -40,7 +44,6 @@ struct max8925_regulator_info {
int vol_reg;
int vol_shift;
int vol_nbits;
- int enable_bit;
int enable_reg;
};
@@ -98,8 +101,10 @@ static int max8925_enable(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
- OUT_ENABLE << info->enable_bit,
- OUT_ENABLE << info->enable_bit);
+ LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+ LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+ LDO_SEQ_I2C << LDO_SEQ_SHIFT |
+ LDO_I2C_EN << LDO_I2C_EN_SHIFT);
}
static int max8925_disable(struct regulator_dev *rdev)
@@ -107,20 +112,24 @@ static int max8925_disable(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
- OUT_ENABLE << info->enable_bit,
- OUT_DISABLE << info->enable_bit);
+ LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+ LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+ LDO_SEQ_I2C << LDO_SEQ_SHIFT);
}
static int max8925_is_enabled(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
- int ret;
+ int ldo_seq, ret;
ret = max8925_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
-
- return ret & (1 << info->enable_bit);
+ ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK;
+ if (ldo_seq != LDO_SEQ_I2C)
+ return 1;
+ else
+ return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT);
}
static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
@@ -188,7 +197,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.vol_shift = 0, \
.vol_nbits = 6, \
.enable_reg = MAX8925_SDCTL##_id, \
- .enable_bit = 0, \
}
#define MAX8925_LDO(_id, min, max, step) \
@@ -207,7 +215,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.vol_shift = 0, \
.vol_nbits = 6, \
.enable_reg = MAX8925_LDOCTL##_id, \
- .enable_bit = 0, \
}
static struct max8925_regulator_info max8925_regulator_info[] = {
@@ -266,7 +273,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
ri->chip = chip;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdata->regulator[pdev->id], ri);
+ pdata->regulator[pdev->id], ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 3883d85c5b88..75d89400c123 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -208,7 +208,7 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
max8952->pdata = pdata;
max8952->rdev = regulator_register(&regulator, max8952->dev,
- &pdata->reg_data, max8952);
+ &pdata->reg_data, max8952, NULL);
if (IS_ERR(max8952->rdev)) {
ret = PTR_ERR(max8952->rdev);
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 6176129a27e5..d26e8646277b 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1146,7 +1146,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
regulators[id].n_voltages = 16;
rdev[i] = regulator_register(&regulators[id], max8997->dev,
- pdata->regulators[i].initdata, max8997);
+ pdata->regulators[i].initdata, max8997, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8997->dev, "regulator init failed for %d\n",
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 41a1495eec2b..2d38c2493a07 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -847,7 +847,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
regulators[index].n_voltages = count;
}
rdev[i] = regulator_register(&regulators[index], max8998->dev,
- pdata->regulators[i].initdata, max8998);
+ pdata->regulators[i].initdata, max8998, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8998->dev, "regulator init failed\n");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 8479082e1aea..8e9b90ad88ae 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -344,7 +344,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
- priv = kzalloc(sizeof(*priv) +
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
pdata->num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
@@ -357,7 +357,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
init_data = &pdata->regulators[i];
priv->regulators[i] = regulator_register(
&mc13783_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
+ &pdev->dev, init_data->init_data, priv, NULL);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -374,8 +374,6 @@ err:
while (--i >= 0)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
-
return ret;
}
@@ -391,7 +389,6 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
for (i = 0; i < pdata->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
return 0;
}
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 023d17d022cf..e8cfc99dd8f0 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -527,18 +527,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
- struct mc13xxx_regulator_init_data *init_data;
+ struct mc13xxx_regulator_init_data *mc13xxx_data;
int i, ret;
+ int num_regulators = 0;
u32 val;
- priv = kzalloc(sizeof(*priv) +
- pdata->num_regulators * sizeof(priv->regulators[0]),
+ num_regulators = mc13xxx_get_num_regulators_dt(pdev);
+ if (num_regulators <= 0 && pdata)
+ num_regulators = pdata->num_regulators;
+ if (num_regulators <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
+ num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->num_regulators = num_regulators;
priv->mc13xxx_regulators = mc13892_regulators;
priv->mc13xxx = mc13892;
+ platform_set_drvdata(pdev, priv);
mc13xxx_lock(mc13892);
ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
@@ -569,11 +578,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
= mc13892_vcam_set_mode;
mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
= mc13892_vcam_get_mode;
- for (i = 0; i < pdata->num_regulators; i++) {
- init_data = &pdata->regulators[i];
+
+ mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+ ARRAY_SIZE(mc13892_regulators));
+ for (i = 0; i < num_regulators; i++) {
+ struct regulator_init_data *init_data;
+ struct regulator_desc *desc;
+ struct device_node *node = NULL;
+ int id;
+
+ if (mc13xxx_data) {
+ id = mc13xxx_data[i].id;
+ init_data = mc13xxx_data[i].init_data;
+ node = mc13xxx_data[i].node;
+ } else {
+ id = pdata->regulators[i].id;
+ init_data = pdata->regulators[i].init_data;
+ }
+ desc = &mc13892_regulators[id].desc;
+
priv->regulators[i] = regulator_register(
- &mc13892_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
+ desc, &pdev->dev, init_data, priv, node);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -583,8 +608,6 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, priv);
-
return 0;
err:
while (--i >= 0)
@@ -592,7 +615,6 @@ err:
err_free:
mc13xxx_unlock(mc13892);
- kfree(priv);
return ret;
}
@@ -600,16 +622,13 @@ err_free:
static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
int i;
platform_set_drvdata(pdev, NULL);
- for (i = 0; i < pdata->num_regulators; i++)
+ for (i = 0; i < priv->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
return 0;
}
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 6532853a6ef5..80ecafef1bc3 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -18,12 +18,14 @@
#include <linux/mfd/mc13xxx.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include "mc13xxx.h"
static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
@@ -236,6 +238,61 @@ int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+#ifdef CONFIG_OF
+int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+ struct device_node *parent, *child;
+ int num = 0;
+
+ of_node_get(pdev->dev.parent->of_node);
+ parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!parent)
+ return -ENODEV;
+
+ for_each_child_of_node(parent, child)
+ num++;
+
+ return num;
+}
+
+struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators)
+{
+ struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13xxx_regulator_init_data *data, *p;
+ struct device_node *parent, *child;
+ int i;
+
+ of_node_get(pdev->dev.parent->of_node);
+ parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!parent)
+ return NULL;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+ p = data;
+
+ for_each_child_of_node(parent, child) {
+ for (i = 0; i < num_regulators; i++) {
+ if (!of_node_cmp(child->name,
+ regulators[i].desc.name)) {
+ p->id = i;
+ p->init_data = of_get_regulator_init_data(
+ &pdev->dev, child);
+ p->node = child;
+ p++;
+ break;
+ }
+ }
+ }
+
+ return data;
+}
+#endif
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 27758267e122..b3961c658b05 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -29,6 +29,7 @@ struct mc13xxx_regulator_priv {
struct mc13xxx *mc13xxx;
u32 powermisc_pwgt_state;
struct mc13xxx_regulator *mc13xxx_regulators;
+ int num_regulators;
struct regulator_dev *regulators[];
};
@@ -42,13 +43,32 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector);
extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+#ifdef CONFIG_OF
+extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
+extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators);
+#else
+static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators)
+{
+ return NULL;
+}
+#endif
+
extern struct regulator_ops mc13xxx_regulator_ops;
extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -66,7 +86,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -81,7 +101,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
new file mode 100644
index 000000000000..f1651eb69648
--- /dev/null
+++ b/drivers/regulator/of_regulator.c
@@ -0,0 +1,87 @@
+/*
+ * OF helpers for regulator framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regulator/machine.h>
+
+static void of_get_regulation_constraints(struct device_node *np,
+ struct regulator_init_data **init_data)
+{
+ const __be32 *min_uV, *max_uV, *uV_offset;
+ const __be32 *min_uA, *max_uA;
+ struct regulation_constraints *constraints = &(*init_data)->constraints;
+
+ constraints->name = of_get_property(np, "regulator-name", NULL);
+
+ min_uV = of_get_property(np, "regulator-min-microvolt", NULL);
+ if (min_uV)
+ constraints->min_uV = be32_to_cpu(*min_uV);
+ max_uV = of_get_property(np, "regulator-max-microvolt", NULL);
+ if (max_uV)
+ constraints->max_uV = be32_to_cpu(*max_uV);
+
+ /* Voltage change possible? */
+ if (constraints->min_uV != constraints->max_uV)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+ /* Only one voltage? Then make sure it's set. */
+ if (constraints->min_uV == constraints->max_uV)
+ constraints->apply_uV = true;
+
+ uV_offset = of_get_property(np, "regulator-microvolt-offset", NULL);
+ if (uV_offset)
+ constraints->uV_offset = be32_to_cpu(*uV_offset);
+ min_uA = of_get_property(np, "regulator-min-microamp", NULL);
+ if (min_uA)
+ constraints->min_uA = be32_to_cpu(*min_uA);
+ max_uA = of_get_property(np, "regulator-max-microamp", NULL);
+ if (max_uA)
+ constraints->max_uA = be32_to_cpu(*max_uA);
+
+ /* Current change possible? */
+ if (constraints->min_uA != constraints->max_uA)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+
+ if (of_find_property(np, "regulator-boot-on", NULL))
+ constraints->boot_on = true;
+
+ if (of_find_property(np, "regulator-always-on", NULL))
+ constraints->always_on = true;
+ else /* status change should be possible if not always on. */
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+}
+
+/**
+ * of_get_regulator_init_data - extract regulator_init_data structure info
+ * @dev: device requesting for regulator_init_data
+ *
+ * Populates regulator_init_data structure by extracting data from device
+ * tree node, returns a pointer to the populated struture or NULL if memory
+ * alloc fails.
+ */
+struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node)
+{
+ struct regulator_init_data *init_data;
+
+ if (!node)
+ return NULL;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return NULL; /* Out of memory? */
+
+ of_get_regulation_constraints(node, &init_data);
+ return init_data;
+}
+EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 31f6e11a7f16..a5aab1b08bcf 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -277,7 +277,7 @@ static int __devinit pcap_regulator_probe(struct platform_device *pdev)
void *pcap = dev_get_drvdata(pdev->dev.parent);
rdev = regulator_register(&pcap_regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pcap);
+ pdev->dev.platform_data, pcap, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 69a11d9dd87f..1d1c31056297 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -320,7 +320,7 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
pcf = dev_to_pcf50633(pdev->dev.parent);
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pcf);
+ pdev->dev.platform_data, pcf, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 1011873896dc..d9278da18a9e 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -151,7 +151,8 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
/* Register regulator with framework */
tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
&tps6105x->client->dev,
- pdata->regulator_data, tps6105x);
+ pdata->regulator_data, tps6105x,
+ NULL);
if (IS_ERR(tps6105x->regulator)) {
ret = PTR_ERR(tps6105x->regulator);
dev_err(&tps6105x->client->dev,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 9fb4c7b81753..18d61a0529a9 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -152,48 +152,21 @@ struct tps_driver_data {
u8 core_regulator;
};
-static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps->regmap, reg, mask, mask);
-}
-
-static int tps_65023_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps->regmap, reg, mask, 0);
-}
-
-static int tps_65023_reg_read(struct tps_pmic *tps, u8 reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(tps->regmap, reg, &val);
-
- if (ret != 0)
- return ret;
- else
- return val;
-}
-
-static int tps_65023_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
-{
- return regmap_write(tps->regmap, reg, val);
-}
-
static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
+ int ret;
u8 shift;
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+ ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
- if (data < 0)
- return data;
+ if (ret != 0)
+ return ret;
else
return (data & 1<<shift) ? 1 : 0;
}
@@ -202,16 +175,17 @@ static int tps65023_ldo_is_enabled(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
+ int ret;
u8 shift;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+ ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
- if (data < 0)
- return data;
+ if (ret != 0)
+ return ret;
else
return (data & 1<<shift) ? 1 : 0;
}
@@ -226,7 +200,7 @@ static int tps65023_dcdc_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
}
static int tps65023_dcdc_disable(struct regulator_dev *dev)
@@ -239,7 +213,7 @@ static int tps65023_dcdc_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
}
static int tps65023_ldo_enable(struct regulator_dev *dev)
@@ -252,7 +226,7 @@ static int tps65023_ldo_enable(struct regulator_dev *dev)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
}
static int tps65023_ldo_disable(struct regulator_dev *dev)
@@ -265,21 +239,22 @@ static int tps65023_ldo_disable(struct regulator_dev *dev)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
}
static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ret;
int data, dcdc = rdev_get_id(dev);
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
if (dcdc == tps->core_regulator) {
- data = tps_65023_reg_read(tps, TPS65023_REG_DEF_CORE);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_DEF_CORE, &data);
+ if (ret != 0)
+ return ret;
data &= (tps->info[dcdc]->table_len - 1);
return tps->info[dcdc]->table[data] * 1000;
} else
@@ -318,13 +293,13 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[dcdc]->table_len)
goto failed;
- ret = tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+ ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, vsel);
/* Tell the chip that we have changed the value in DEFCORE
* and its time to update the core voltage
*/
- tps_65023_set_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_GO);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
return ret;
@@ -336,13 +311,14 @@ static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
+ int ret;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
- data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+ if (ret != 0)
+ return ret;
data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
data &= (tps->info[ldo]->table_len - 1);
@@ -354,6 +330,7 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
+ int ret;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
@@ -377,13 +354,13 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
*selector = vsel;
- data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+ if (ret != 0)
+ return ret;
data &= TPS65023_LDO_CTRL_LDOx_MASK(ldo - TPS65023_LDO_1);
data |= (vsel << (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)));
- return tps_65023_reg_write(tps, TPS65023_REG_LDO_CTRL, data);
+ return regmap_write(tps->regmap, TPS65023_REG_LDO_CTRL, data);
}
static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
@@ -496,7 +473,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
/* Register the regulators */
rdev = regulator_register(&tps->desc[i], &client->dev,
- init_data, tps);
+ init_data, tps, NULL);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
@@ -511,12 +488,12 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
/* Enable setting output voltage by I2C */
- tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
/* Enable setting output voltage by I2C */
- tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
return 0;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index bdef70365f52..0b63ef71a5fe 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -599,7 +599,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
tps->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&tps->desc[i],
- tps6507x_dev->dev, init_data, tps);
+ tps6507x_dev->dev, init_data, tps, NULL);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9166aa0a9df7..70b7b1f4f000 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -651,7 +651,7 @@ static int __devinit pmic_probe(struct spi_device *spi)
hw->desc[i].n_voltages = 1;
hw->rdev[i] = regulator_register(&hw->desc[i], dev,
- init_data, hw);
+ init_data, hw, NULL);
if (IS_ERR(hw->rdev[i])) {
ret = PTR_ERR(hw->rdev[i]);
hw->rdev[i] = NULL;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 14b9389dd52a..c75fb20faa57 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -396,7 +396,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
return err;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index b552aae55b41..5c15ba01e9c7 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -25,30 +25,6 @@
#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
-#define TPS65910_REG_VRTC 0
-#define TPS65910_REG_VIO 1
-#define TPS65910_REG_VDD1 2
-#define TPS65910_REG_VDD2 3
-#define TPS65910_REG_VDD3 4
-#define TPS65910_REG_VDIG1 5
-#define TPS65910_REG_VDIG2 6
-#define TPS65910_REG_VPLL 7
-#define TPS65910_REG_VDAC 8
-#define TPS65910_REG_VAUX1 9
-#define TPS65910_REG_VAUX2 10
-#define TPS65910_REG_VAUX33 11
-#define TPS65910_REG_VMMC 12
-
-#define TPS65911_REG_VDDCTRL 4
-#define TPS65911_REG_LDO1 5
-#define TPS65911_REG_LDO2 6
-#define TPS65911_REG_LDO3 7
-#define TPS65911_REG_LDO4 8
-#define TPS65911_REG_LDO5 9
-#define TPS65911_REG_LDO6 10
-#define TPS65911_REG_LDO7 11
-#define TPS65911_REG_LDO8 12
-
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
/* supported VIO voltages in milivolts */
@@ -885,8 +861,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
if (!pmic_plat_data)
return -EINVAL;
- reg_data = pmic_plat_data->tps65910_pmic_init_data;
-
pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -937,7 +911,16 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
goto err_free_info;
}
- for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
+ for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
+ i++, info++) {
+
+ reg_data = pmic_plat_data->tps65910_pmic_init_data[i];
+
+ /* Regulator API handles empty constraints but not NULL
+ * constraints */
+ if (!reg_data)
+ continue;
+
/* Register the regulators */
pmic->info[i] = info;
@@ -965,7 +948,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
pmic->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&pmic->desc[i],
- tps65910->dev, reg_data, pmic);
+ tps65910->dev, reg_data, pmic, NULL);
if (IS_ERR(rdev)) {
dev_err(tps65910->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 39d4a1749e71..da00d88f94b7 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -727,7 +727,7 @@ static __devinit int tps65912_probe(struct platform_device *pdev)
pmic->desc[i].owner = THIS_MODULE;
range = tps65912_get_range(pmic, i);
rdev = regulator_register(&pmic->desc[i],
- tps65912->dev, reg_data, pmic);
+ tps65912->dev, reg_data, pmic, NULL);
if (IS_ERR(rdev)) {
dev_err(tps65912->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa08..181a2cfe180c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
.get_status = twl4030reg_get_status,
};
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+ vsel);
+ return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE_SMPS_4030);
+
+ return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+};
+
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+ { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL4030_REG_##label, \
+ .ops = &twl4030smps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
.min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
@@ -1070,7 +1112,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
break;
}
- rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
+ rdev = regulator_register(&info->desc, &pdev->dev, initdata, info, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
info->desc.name, PTR_ERR(rdev));
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index fc6655146999..518667ef9a0d 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -185,18 +185,7 @@ static struct platform_driver regulator_userspace_consumer_driver = {
},
};
-
-static int __init regulator_userspace_consumer_init(void)
-{
- return platform_driver_register(&regulator_userspace_consumer_driver);
-}
-module_init(regulator_userspace_consumer_init);
-
-static void __exit regulator_userspace_consumer_exit(void)
-{
- platform_driver_unregister(&regulator_userspace_consumer_driver);
-}
-module_exit(regulator_userspace_consumer_exit);
+module_platform_driver(regulator_userspace_consumer_driver);
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 858c1f861ba5..ee0b161c998f 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -352,17 +352,7 @@ static struct platform_driver regulator_virtual_consumer_driver = {
},
};
-static int __init regulator_virtual_consumer_init(void)
-{
- return platform_driver_register(&regulator_virtual_consumer_driver);
-}
-module_init(regulator_virtual_consumer_init);
-
-static void __exit regulator_virtual_consumer_exit(void)
-{
- platform_driver_unregister(&regulator_virtual_consumer_driver);
-}
-module_exit(regulator_virtual_consumer_exit);
+module_platform_driver(regulator_virtual_consumer_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Virtual regulator consumer");
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index bd3531d8b2ac..4904a40b0d46 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -511,7 +511,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->dcdc[id] == NULL)
return -ENODEV;
- dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+ dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+ GFP_KERNEL);
if (dcdc == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -553,7 +554,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -590,7 +591,6 @@ err_regulator:
err:
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
- kfree(dcdc);
return ret;
}
@@ -605,7 +605,6 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
- kfree(dcdc);
return 0;
}
@@ -722,7 +721,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->dcdc[id] == NULL)
return -ENODEV;
- dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+ dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+ GFP_KERNEL);
if (dcdc == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -747,7 +747,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -771,7 +771,6 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(dcdc->regulator);
err:
- kfree(dcdc);
return ret;
}
@@ -783,7 +782,6 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
- kfree(dcdc);
return 0;
}
@@ -874,7 +872,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -973,7 +971,7 @@ static __devinit int wm831x_epe_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->epe[id], dcdc);
+ pdata->epe[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 01f27c7f4236..634aac3f2d5f 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -162,7 +162,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->isink[id] == NULL)
return -ENODEV;
- isink = kzalloc(sizeof(struct wm831x_isink), GFP_KERNEL);
+ isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
+ GFP_KERNEL);
if (isink == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -189,7 +190,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
isink->desc.owner = THIS_MODULE;
isink->regulator = regulator_register(&isink->desc, &pdev->dev,
- pdata->isink[id], isink);
+ pdata->isink[id], isink, NULL);
if (IS_ERR(isink->regulator)) {
ret = PTR_ERR(isink->regulator);
dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
@@ -213,7 +214,6 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(isink->regulator);
err:
- kfree(isink);
return ret;
}
@@ -226,7 +226,6 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
free_irq(platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
- kfree(isink);
return 0;
}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 6709710a059e..f1e4ab0f9fda 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -326,7 +326,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -351,7 +351,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -376,7 +376,6 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(ldo->regulator);
err:
- kfree(ldo);
return ret;
}
@@ -388,7 +387,6 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
@@ -596,7 +594,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -621,7 +619,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -645,7 +643,6 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(ldo->regulator);
err:
- kfree(ldo);
return ret;
}
@@ -655,7 +652,6 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
@@ -793,7 +789,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -818,7 +814,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -831,7 +827,6 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
return 0;
err:
- kfree(ldo);
return ret;
}
@@ -840,7 +835,6 @@ static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev)
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1bcb22c44095..6894009d815a 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1428,7 +1428,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
/* register regulator */
rdev = regulator_register(&wm8350_reg[pdev->id], &pdev->dev,
pdev->dev.platform_data,
- dev_get_drvdata(&pdev->dev));
+ dev_get_drvdata(&pdev->dev), NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
wm8350_reg[pdev->id].name);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 71632ddc3781..706f39563a7b 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -326,7 +326,7 @@ static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, wm8400);
+ pdev->dev.platform_data, wm8400, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index b87bf5c841f8..435e335d6e67 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -269,7 +269,7 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ldo->is_enabled = true;
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
- pdata->ldo[id].init_data, ldo);
+ pdata->ldo[id].init_data, ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 53eb4e55b289..e19a4031f45e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -498,9 +498,9 @@ config RTC_DRV_CMOS
will be called rtc-cmos.
config RTC_DRV_VRTC
- tristate "Virtual RTC for Moorestown platforms"
- depends on X86_MRST
- default y if X86_MRST
+ tristate "Virtual RTC for Intel MID platforms"
+ depends on X86_INTEL_MID
+ default y if X86_INTEL_MID
help
Say "yes" here to get direct support for the real time clock
@@ -774,7 +774,7 @@ config RTC_DRV_EP93XX
config RTC_DRV_SA1100
tristate "SA11x0/PXA2xx"
- depends on ARCH_SA1100 || ARCH_PXA
+ depends on ARCH_SA1100 || ARCH_PXA || ARCH_MMP
help
If you say Y here you will get access to the real time clock
built into your SA11x0 or PXA2xx CPU.
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2f..dc4c2748bbc3 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
- if (abs(delta_delta.tv_sec) >= 2) {
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
- if (new_rtc.tv_sec <= old_rtc.tv_sec) {
- if (new_rtc.tv_sec < old_rtc.tv_sec)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&sleep_time);
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a007..8a1c031391d6 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = now.tm_hour;
/* For simplicity, only support date rollover for now */
- if (alarm->time.tm_mday == -1) {
+ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
alarm->time.tm_mday = now.tm_mday;
missing = day;
}
- if (alarm->time.tm_mon == -1) {
+ if ((unsigned)alarm->time.tm_mon >= 12) {
alarm->time.tm_mon = now.tm_mon;
if (missing == none)
missing = month;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 64b847b7f970..f04761e6622d 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -410,17 +410,7 @@ static struct platform_driver pm860x_rtc_driver = {
.remove = __devexit_p(pm860x_rtc_remove),
};
-static int __init pm860x_rtc_init(void)
-{
- return platform_driver_register(&pm860x_rtc_driver);
-}
-module_init(pm860x_rtc_init);
-
-static void __exit pm860x_rtc_exit(void)
-{
- platform_driver_unregister(&pm860x_rtc_driver);
-}
-module_exit(pm860x_rtc_exit);
+module_platform_driver(pm860x_rtc_driver);
MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index e346705aae92..4bcf9ca2818a 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/delay.h>
#define AB8500_RTC_SOFF_STAT_REG 0x00
@@ -90,7 +90,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* Early AB8500 chips will not clear the rtc read request bit */
if (abx500_get_chip_id(dev) == 0) {
- msleep(1);
+ usleep_range(1000, 1000);
} else {
/* Wait for some cycles after enabling the rtc read in ab8500 */
while (time_before(jiffies, timeout)) {
@@ -102,7 +102,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (!(value & RTC_READ_REQUEST))
break;
- msleep(1);
+ usleep_range(1000, 5000);
}
}
@@ -258,6 +258,109 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return ab8500_rtc_irq_enable(dev, alarm->enabled);
}
+
+static int ab8500_rtc_set_calibration(struct device *dev, int calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ /*
+ * Check that the calibration value (which is in units of 0.5
+ * parts-per-million) is in the AB8500's range for RtcCalibration
+ * register. -128 (0x80) is not permitted because the AB8500 uses
+ * a sign-bit rather than two's complement, so 0x80 is just another
+ * representation of zero.
+ */
+ if ((calibration < -127) || (calibration > 127)) {
+ dev_err(dev, "RtcCalibration value outside permitted range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert to this sort of representation before writing
+ * into RtcCalibration register...
+ */
+ if (calibration >= 0)
+ rtccal = 0x7F & calibration;
+ else
+ rtccal = ~(calibration - 1) | 0x80;
+
+ retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, rtccal);
+
+ return retval;
+}
+
+static int ab8500_rtc_get_calibration(struct device *dev, int *calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ retval = abx500_get_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, &rtccal);
+ if (retval >= 0) {
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert value from RtcCalibration register into
+ * a two's complement signed value...
+ */
+ if (rtccal & 0x80)
+ *calibration = 0 - (rtccal & 0x7F);
+ else
+ *calibration = 0x7F & rtccal;
+ }
+
+ return retval;
+}
+
+static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval;
+ int calibration = 0;
+
+ if (sscanf(buf, " %i ", &calibration) != 1) {
+ dev_err(dev, "Failed to store RTC calibration attribute\n");
+ return -EINVAL;
+ }
+
+ retval = ab8500_rtc_set_calibration(dev, calibration);
+
+ return retval ? retval : count;
+}
+
+static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval = 0;
+ int calibration = 0;
+
+ retval = ab8500_rtc_get_calibration(dev, &calibration);
+ if (retval < 0) {
+ dev_err(dev, "Failed to read RTC calibration attribute\n");
+ sprintf(buf, "0\n");
+ return retval;
+ }
+
+ return sprintf(buf, "%d\n", calibration);
+}
+
+static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR,
+ ab8500_sysfs_show_rtc_calibration,
+ ab8500_sysfs_store_rtc_calibration);
+
+static int ab8500_sysfs_rtc_register(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_rtc_calibration);
+}
+
+static void ab8500_sysfs_rtc_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_rtc_calibration);
+}
+
static irqreturn_t rtc_alarm_handler(int irq, void *data)
{
struct rtc_device *rtc = data;
@@ -295,7 +398,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
/* Wait for reset by the PorRtc */
- msleep(1);
+ usleep_range(1000, 5000);
err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC,
AB8500_RTC_STAT_REG, &rtc_ctrl);
@@ -308,6 +411,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ device_init_wakeup(&pdev->dev, true);
+
rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -316,8 +421,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
}
- err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
- "ab8500-rtc", rtc);
+ err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
+ IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -325,6 +430,13 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+
+ err = ab8500_sysfs_rtc_register(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "sysfs RTC failed to register\n");
+ return err;
+ }
+
return 0;
}
@@ -333,6 +445,8 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ALARM");
+ ab8500_sysfs_rtc_unregister(&pdev->dev);
+
free_irq(irq, rtc);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
@@ -349,18 +463,8 @@ static struct platform_driver ab8500_rtc_driver = {
.remove = __devexit_p(ab8500_rtc_remove),
};
-static int __init ab8500_rtc_init(void)
-{
- return platform_driver_register(&ab8500_rtc_driver);
-}
-
-static void __exit ab8500_rtc_exit(void)
-{
- platform_driver_unregister(&ab8500_rtc_driver);
-}
+module_platform_driver(ab8500_rtc_driver);
-module_init(ab8500_rtc_init);
-module_exit(ab8500_rtc_exit);
MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
MODULE_DESCRIPTION("AB8500 RTC Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e39b77a4609a..dc474bc6522d 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -32,11 +32,17 @@
#include <mach/at91_rtc.h>
+#define at91_rtc_read(field) \
+ __raw_readl(at91_rtc_regs + field)
+#define at91_rtc_write(field, val) \
+ __raw_writel((val), at91_rtc_regs + field)
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
+static void __iomem *at91_rtc_regs;
+static int irq;
/*
* Decode time/date into rtc_time structure
@@ -48,10 +54,10 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/* must read twice in case it changes */
do {
- time = at91_sys_read(timereg);
- date = at91_sys_read(calreg);
- } while ((time != at91_sys_read(timereg)) ||
- (date != at91_sys_read(calreg)));
+ time = at91_rtc_read(timereg);
+ date = at91_rtc_read(calreg);
+ } while ((time != at91_rtc_read(timereg)) ||
+ (date != at91_rtc_read(calreg)));
tm->tm_sec = bcd2bin((time & AT91_RTC_SEC) >> 0);
tm->tm_min = bcd2bin((time & AT91_RTC_MIN) >> 8);
@@ -98,19 +104,19 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* Stop Time/Calendar from counting */
- cr = at91_sys_read(AT91_RTC_CR);
- at91_sys_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
+ cr = at91_rtc_read(AT91_RTC_CR);
+ at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
- at91_sys_write(AT91_RTC_TIMR,
+ at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
| bin2bcd(tm->tm_min) << 8
| bin2bcd(tm->tm_hour) << 16);
- at91_sys_write(AT91_RTC_CALR,
+ at91_rtc_write(AT91_RTC_CALR,
bin2bcd((tm->tm_year + 1900) / 100) /* century */
| bin2bcd(tm->tm_year % 100) << 8 /* year */
| bin2bcd(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */
@@ -118,8 +124,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
| bin2bcd(tm->tm_mday) << 24);
/* Restart Time/Calendar */
- cr = at91_sys_read(AT91_RTC_CR);
- at91_sys_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
+ cr = at91_rtc_read(AT91_RTC_CR);
+ at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
return 0;
}
@@ -135,7 +141,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
? 1 : 0;
pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -160,20 +166,20 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_TIMALR,
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
| bin2bcd(tm.tm_hour) << 16
| AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
- at91_sys_write(AT91_RTC_CALALR,
+ at91_rtc_write(AT91_RTC_CALALR,
bin2bcd(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */
| bin2bcd(tm.tm_mday) << 24
| AT91_RTC_DATEEN | AT91_RTC_MTHEN);
if (alrm->enabled) {
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -188,10 +194,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
pr_debug("%s(): cmd=%08x\n", __func__, enabled);
if (enabled) {
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
} else
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
return 0;
}
@@ -200,7 +206,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_sys_read(AT91_RTC_IMR);
+ unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
seq_printf(seq, "update_IRQ\t: %s\n",
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -220,7 +226,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_sys_read(AT91_RTC_SR) & at91_sys_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -229,7 +235,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
if (rtsr & AT91_RTC_ACKUPD)
complete(&at91_rtc_updated);
- at91_sys_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
+ at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
rtc_update_irq(rtc, 1, events);
@@ -256,22 +262,41 @@ static const struct rtc_class_ops at91_rtc_ops = {
static int __init at91_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- int ret;
+ struct resource *regs;
+ int ret = 0;
- at91_sys_write(AT91_RTC_CR, 0);
- at91_sys_write(AT91_RTC_MR, 0); /* 24 hour mode */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ return -ENXIO;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENXIO;
+ }
+
+ at91_rtc_regs = ioremap(regs->start, resource_size(regs));
+ if (!at91_rtc_regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ return -ENOMEM;
+ }
+
+ at91_rtc_write(AT91_RTC_CR, 0);
+ at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
/* Disable all interrupts */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
+ ret = request_irq(irq, at91_rtc_interrupt,
IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
- AT91_ID_SYS);
+ irq);
return ret;
}
@@ -284,7 +309,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&at91_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- free_irq(AT91_ID_SYS, pdev);
+ free_irq(irq, pdev);
return PTR_ERR(rtc);
}
platform_set_drvdata(pdev, rtc);
@@ -301,10 +326,10 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
/* Disable all interrupts */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- free_irq(AT91_ID_SYS, pdev);
+ free_irq(irq, pdev);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
@@ -323,13 +348,13 @@ static int at91_rtc_suspend(struct device *dev)
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_sys_read(AT91_RTC_IMR)
+ at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
- enable_irq_wake(AT91_ID_SYS);
+ enable_irq_wake(irq);
else
- at91_sys_write(AT91_RTC_IDR, at91_rtc_imr);
+ at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
}
return 0;
}
@@ -338,9 +363,9 @@ static int at91_rtc_resume(struct device *dev)
{
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
- disable_irq_wake(AT91_ID_SYS);
+ disable_irq_wake(irq);
else
- at91_sys_write(AT91_RTC_IER, at91_rtc_imr);
+ at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
}
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 90d866272c8e..abfc1a0c07d9 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -456,18 +456,7 @@ static struct platform_driver bfin_rtc_driver = {
.resume = bfin_rtc_resume,
};
-static int __init bfin_rtc_init(void)
-{
- return platform_driver_register(&bfin_rtc_driver);
-}
-
-static void __exit bfin_rtc_exit(void)
-{
- platform_driver_unregister(&bfin_rtc_driver);
-}
-
-module_init(bfin_rtc_init);
-module_exit(bfin_rtc_exit);
+module_platform_driver(bfin_rtc_driver);
MODULE_DESCRIPTION("Blackfin On-Chip Real Time Clock Driver");
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index 128270ce355d..bf612ef22941 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -218,15 +218,4 @@ static struct platform_driver bq4802_driver = {
.remove = __devexit_p(bq4802_remove),
};
-static int __init bq4802_init(void)
-{
- return platform_driver_register(&bq4802_driver);
-}
-
-static void __exit bq4802_exit(void)
-{
- platform_driver_unregister(&bq4802_driver);
-}
-
-module_init(bq4802_init);
-module_exit(bq4802_exit);
+module_platform_driver(bq4802_driver);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 05beb6c1ca79..d7782aa09943 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -164,7 +164,7 @@ static inline unsigned char cmos_read_bank2(unsigned char addr)
static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
{
outb(addr, RTC_PORT(2));
- outb(val, RTC_PORT(2));
+ outb(val, RTC_PORT(3));
}
#else
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index 2322c43af201..d4457afcba89 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -161,16 +161,6 @@ static struct platform_driver rtc_dm355evm_driver = {
},
};
-static int __init dm355evm_rtc_init(void)
-{
- return platform_driver_register(&rtc_dm355evm_driver);
-}
-module_init(dm355evm_rtc_init);
-
-static void __exit dm355evm_rtc_exit(void)
-{
- platform_driver_unregister(&rtc_dm355evm_driver);
-}
-module_exit(dm355evm_rtc_exit);
+module_platform_driver(rtc_dm355evm_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 68e6caf25496..990c3ff489bf 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -396,21 +396,10 @@ static struct platform_driver ds1286_platform_driver = {
.remove = __devexit_p(ds1286_remove),
};
-static int __init ds1286_init(void)
-{
- return platform_driver_register(&ds1286_platform_driver);
-}
-
-static void __exit ds1286_exit(void)
-{
- platform_driver_unregister(&ds1286_platform_driver);
-}
+module_platform_driver(ds1286_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("DS1286 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1286");
-
-module_init(ds1286_init);
-module_exit(ds1286_exit);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 586c244a05d8..761f36bc83a9 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -580,20 +580,7 @@ static struct platform_driver ds1511_rtc_driver = {
},
};
- static int __init
-ds1511_rtc_init(void)
-{
- return platform_driver_register(&ds1511_rtc_driver);
-}
-
- static void __exit
-ds1511_rtc_exit(void)
-{
- platform_driver_unregister(&ds1511_rtc_driver);
-}
-
-module_init(ds1511_rtc_init);
-module_exit(ds1511_rtc_exit);
+module_platform_driver(ds1511_rtc_driver);
MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>");
MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 1350029044e6..6f0a1b530f2e 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -361,18 +361,7 @@ static struct platform_driver ds1553_rtc_driver = {
},
};
-static __init int ds1553_init(void)
-{
- return platform_driver_register(&ds1553_rtc_driver);
-}
-
-static __exit void ds1553_exit(void)
-{
- platform_driver_unregister(&ds1553_rtc_driver);
-}
-
-module_init(ds1553_init);
-module_exit(ds1553_exit);
+module_platform_driver(ds1553_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1553 RTC driver");
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index e3e0f92b60f0..76112667c507 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -240,18 +240,7 @@ static struct platform_driver ds1742_rtc_driver = {
},
};
-static __init int ds1742_init(void)
-{
- return platform_driver_register(&ds1742_rtc_driver);
-}
-
-static __exit void ds1742_exit(void)
-{
- platform_driver_unregister(&ds1742_rtc_driver);
-}
-
-module_init(ds1742_init);
-module_exit(ds1742_exit);
+module_platform_driver(ds1742_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1742 RTC driver");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b6473631d182..05ab227eeff7 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -345,7 +345,7 @@ static const struct dev_pm_ops jz4740_pm_ops = {
#define JZ4740_RTC_PM_OPS NULL
#endif /* CONFIG_PM */
-struct platform_driver jz4740_rtc_driver = {
+static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.remove = __devexit_p(jz4740_rtc_remove),
.driver = {
@@ -355,17 +355,7 @@ struct platform_driver jz4740_rtc_driver = {
},
};
-static int __init jz4740_rtc_init(void)
-{
- return platform_driver_register(&jz4740_rtc_driver);
-}
-module_init(jz4740_rtc_init);
-
-static void __exit jz4740_rtc_exit(void)
-{
- platform_driver_unregister(&jz4740_rtc_driver);
-}
-module_exit(jz4740_rtc_exit);
+module_platform_driver(jz4740_rtc_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index ae16250c762f..ecc1713b2b4f 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -396,17 +396,7 @@ static struct platform_driver lpc32xx_rtc_driver = {
},
};
-static int __init lpc32xx_rtc_init(void)
-{
- return platform_driver_register(&lpc32xx_rtc_driver);
-}
-module_init(lpc32xx_rtc_init);
-
-static void __exit lpc32xx_rtc_exit(void)
-{
- platform_driver_unregister(&lpc32xx_rtc_driver);
-}
-module_exit(lpc32xx_rtc_exit);
+module_platform_driver(lpc32xx_rtc_driver);
MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index eda128fc1d38..64aedd8cc095 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
+ /*
+ * XXX - m41t80 alarm functionality is reported broken.
+ * until it is fixed, don't register alarm functions.
+ *
.read_alarm = m41t80_rtc_read_alarm,
.set_alarm = m41t80_rtc_set_alarm,
+ */
.proc = m41t80_rtc_proc,
+ /*
+ * See above comment on broken alarm
+ *
.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+ */
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 7317d3b9a3d5..ef71132ff205 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -200,7 +200,6 @@ static int __devexit m41t93_remove(struct spi_device *spi)
static struct spi_driver m41t93_driver = {
.driver = {
.name = "rtc-m41t93",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = m41t93_probe,
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index e259ed76ae85..2a4721f61797 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -147,7 +147,6 @@ static int __devexit m41t94_remove(struct spi_device *spi)
static struct spi_driver m41t94_driver = {
.driver = {
.name = "rtc-m41t94",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = m41t94_probe,
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 8e2a24e33ed6..f9e3b3583733 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -216,21 +216,10 @@ static struct platform_driver m48t35_platform_driver = {
.remove = __devexit_p(m48t35_remove),
};
-static int __init m48t35_init(void)
-{
- return platform_driver_register(&m48t35_platform_driver);
-}
-
-static void __exit m48t35_exit(void)
-{
- platform_driver_unregister(&m48t35_platform_driver);
-}
+module_platform_driver(m48t35_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("M48T35 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t35");
-
-module_init(m48t35_init);
-module_exit(m48t35_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 28365388fb6c..30ebfec9fd2b 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -530,18 +530,7 @@ static struct platform_driver m48t59_rtc_driver = {
.remove = __devexit_p(m48t59_rtc_remove),
};
-static int __init m48t59_rtc_init(void)
-{
- return platform_driver_register(&m48t59_rtc_driver);
-}
-
-static void __exit m48t59_rtc_exit(void)
-{
- platform_driver_unregister(&m48t59_rtc_driver);
-}
-
-module_init(m48t59_rtc_init);
-module_exit(m48t59_rtc_exit);
+module_platform_driver(m48t59_rtc_driver);
MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
MODULE_DESCRIPTION("M48T59/M48T02/M48T08 RTC driver");
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f981287d582b..863fb3363aa6 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -185,21 +185,10 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.remove = __devexit_p(m48t86_rtc_remove),
};
-static int __init m48t86_rtc_init(void)
-{
- return platform_driver_register(&m48t86_rtc_platform_driver);
-}
-
-static void __exit m48t86_rtc_exit(void)
-{
- platform_driver_unregister(&m48t86_rtc_platform_driver);
-}
+module_platform_driver(m48t86_rtc_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("M48T86 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t86");
-
-module_init(m48t86_rtc_init);
-module_exit(m48t86_rtc_exit);
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 0ec3f588a255..1f6b3cc58e8a 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -154,7 +154,6 @@ static int __devexit max6902_remove(struct spi_device *spi)
static struct spi_driver max6902_driver = {
.driver = {
.name = "rtc-max6902",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max6902_probe,
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 3bc046f427e0..2d71943bc436 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -261,6 +261,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
/* XXX - isn't this redundant? */
platform_set_drvdata(pdev, info);
+ device_init_wakeup(&pdev->dev, 1);
+
info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev,
&max8925_rtc_ops, THIS_MODULE);
ret = PTR_ERR(info->rtc_dev);
@@ -290,26 +292,40 @@ static int __devexit max8925_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int max8925_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+
+ if (device_may_wakeup(dev))
+ chip->wakeup_flag |= 1 << MAX8925_IRQ_RTC_ALARM0;
+ return 0;
+}
+static int max8925_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+
+ if (device_may_wakeup(dev))
+ chip->wakeup_flag &= ~(1 << MAX8925_IRQ_RTC_ALARM0);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(max8925_rtc_pm_ops, max8925_rtc_suspend, max8925_rtc_resume);
+
static struct platform_driver max8925_rtc_driver = {
.driver = {
.name = "max8925-rtc",
.owner = THIS_MODULE,
+ .pm = &max8925_rtc_pm_ops,
},
.probe = max8925_rtc_probe,
.remove = __devexit_p(max8925_rtc_remove),
};
-static int __init max8925_rtc_init(void)
-{
- return platform_driver_register(&max8925_rtc_driver);
-}
-module_init(max8925_rtc_init);
-
-static void __exit max8925_rtc_exit(void)
-{
- platform_driver_unregister(&max8925_rtc_driver);
-}
-module_exit(max8925_rtc_exit);
+module_platform_driver(max8925_rtc_driver);
MODULE_DESCRIPTION("Maxim MAX8925 RTC driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 2e48aa604273..7196f438c089 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -327,17 +327,7 @@ static struct platform_driver max8998_rtc_driver = {
.id_table = max8998_rtc_id,
};
-static int __init max8998_rtc_init(void)
-{
- return platform_driver_register(&max8998_rtc_driver);
-}
-module_init(max8998_rtc_init);
-
-static void __exit max8998_rtc_exit(void)
-{
- platform_driver_unregister(&max8998_rtc_driver);
-}
-module_exit(max8998_rtc_exit);
+module_platform_driver(max8998_rtc_driver);
MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 9d0c3b478d55..546f6850bffb 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -399,7 +399,7 @@ static int __exit mc13xxx_rtc_remove(struct platform_device *pdev)
return 0;
}
-const struct platform_device_id mc13xxx_rtc_idtable[] = {
+static const struct platform_device_id mc13xxx_rtc_idtable[] = {
{
.name = "mc13783-rtc",
}, {
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index da60915818b6..9d3caccfc250 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -418,17 +418,7 @@ static struct platform_driver mpc5121_rtc_driver = {
.remove = __devexit_p(mpc5121_rtc_remove),
};
-static int __init mpc5121_rtc_init(void)
-{
- return platform_driver_register(&mpc5121_rtc_driver);
-}
-module_init(mpc5121_rtc_init);
-
-static void __exit mpc5121_rtc_exit(void)
-{
- platform_driver_unregister(&mpc5121_rtc_driver);
-}
-module_exit(mpc5121_rtc_exit);
+module_platform_driver(mpc5121_rtc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bb21f443fb70..6cd6c7235344 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -537,18 +537,7 @@ static struct platform_driver vrtc_mrst_platform_driver = {
}
};
-static int __init vrtc_mrst_init(void)
-{
- return platform_driver_register(&vrtc_mrst_platform_driver);
-}
-
-static void __exit vrtc_mrst_exit(void)
-{
- platform_driver_unregister(&vrtc_mrst_platform_driver);
-}
-
-module_init(vrtc_mrst_init);
-module_exit(vrtc_mrst_exit);
+module_platform_driver(vrtc_mrst_platform_driver);
MODULE_AUTHOR("Jacob Pan; Feng Tang");
MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 39e41fbdf08b..5e1d64ee5228 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -155,7 +155,6 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
{
struct rtc_time alarm_tm, now_tm;
unsigned long now, time;
- int ret;
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
@@ -168,21 +167,33 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
alarm_tm.tm_hour = alrm->tm_hour;
alarm_tm.tm_min = alrm->tm_min;
alarm_tm.tm_sec = alrm->tm_sec;
- rtc_tm_to_time(&now_tm, &now);
rtc_tm_to_time(&alarm_tm, &time);
- if (time < now) {
- time += 60 * 60 * 24;
- rtc_time_to_tm(time, &alarm_tm);
- }
-
- ret = rtc_tm_to_time(&alarm_tm, &time);
-
/* clear all the interrupt status bits */
writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
set_alarm_or_time(dev, MXC_RTC_ALARM, time);
- return ret;
+ return 0;
+}
+
+static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 reg;
+
+ spin_lock_irq(&pdata->rtc->irq_lock);
+ reg = readw(ioaddr + RTC_RTCIENR);
+
+ if (enabled)
+ reg |= bit;
+ else
+ reg &= ~bit;
+
+ writew(reg, ioaddr + RTC_RTCIENR);
+ spin_unlock_irq(&pdata->rtc->irq_lock);
}
/* This function is the RTC interrupt service routine. */
@@ -199,13 +210,12 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
- /* clear alarm interrupt if it has occurred */
- if (status & RTC_ALM_BIT)
- status &= ~RTC_ALM_BIT;
-
/* update irq data & counter */
- if (status & RTC_ALM_BIT)
+ if (status & RTC_ALM_BIT) {
events |= (RTC_AF | RTC_IRQF);
+ /* RTC alarm should be one-shot */
+ mxc_rtc_irq_enable(&pdev->dev, RTC_ALM_BIT, 0);
+ }
if (status & RTC_1HZ_BIT)
events |= (RTC_UF | RTC_IRQF);
@@ -213,9 +223,6 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
if (status & PIT_ALL_ON)
events |= (RTC_PF | RTC_IRQF);
- if ((status & RTC_ALM_BIT) && rtc_valid_tm(&pdata->g_rtc_alarm))
- rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
-
rtc_update_irq(pdata->rtc, 1, events);
spin_unlock_irq(&pdata->rtc->irq_lock);
@@ -242,26 +249,6 @@ static void mxc_rtc_release(struct device *dev)
spin_unlock_irq(&pdata->rtc->irq_lock);
}
-static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
- unsigned int enabled)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- void __iomem *ioaddr = pdata->ioaddr;
- u32 reg;
-
- spin_lock_irq(&pdata->rtc->irq_lock);
- reg = readw(ioaddr + RTC_RTCIENR);
-
- if (enabled)
- reg |= bit;
- else
- reg &= ~bit;
-
- writew(reg, ioaddr + RTC_RTCIENR);
- spin_unlock_irq(&pdata->rtc->irq_lock);
-}
-
static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled);
@@ -290,6 +277,17 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
static int mxc_rtc_set_mmss(struct device *dev, unsigned long time)
{
+ /*
+ * TTC_DAYR register is 9-bit in MX1 SoC, save time and day of year only
+ */
+ if (cpu_is_mx1()) {
+ struct rtc_time tm;
+
+ rtc_time_to_tm(time, &tm);
+ tm.tm_year = 70;
+ rtc_tm_to_time(&tm, &time);
+ }
+
/* Avoid roll-over from reading the different registers */
do {
set_alarm_or_time(dev, MXC_RTC_TIME, time);
@@ -324,21 +322,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
int ret;
- if (rtc_valid_tm(&alrm->time)) {
- if (alrm->time.tm_sec > 59 ||
- alrm->time.tm_hour > 23 ||
- alrm->time.tm_min > 59)
- return -EINVAL;
-
- ret = rtc_update_alarm(dev, &alrm->time);
- } else {
- ret = rtc_valid_tm(&alrm->time);
- if (ret)
- return ret;
-
- ret = rtc_update_alarm(dev, &alrm->time);
- }
-
+ ret = rtc_update_alarm(dev, &alrm->time);
if (ret)
return ret;
@@ -424,6 +408,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
pdata->irq = -1;
}
+ if (pdata->irq >=0)
+ device_init_wakeup(&pdev->dev, 1);
+
rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -459,9 +446,39 @@ static int __exit mxc_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int mxc_rtc_suspend(struct device *dev)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static int mxc_rtc_resume(struct device *dev)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static struct dev_pm_ops mxc_rtc_pm_ops = {
+ .suspend = mxc_rtc_suspend,
+ .resume = mxc_rtc_resume,
+};
+#endif
+
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
+#ifdef CONFIG_PM
+ .pm = &mxc_rtc_pm_ops,
+#endif
.owner = THIS_MODULE,
},
.remove = __exit_p(mxc_rtc_remove),
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 2ee3bbf7e5ea..b46c4004d8fe 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -340,7 +340,6 @@ static int __devexit pcf2123_remove(struct spi_device *spi)
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = pcf2123_probe,
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 0c423892923c..a20202f9ee57 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -294,17 +294,7 @@ static struct platform_driver pcf50633_rtc_driver = {
.remove = __devexit_p(pcf50633_rtc_remove),
};
-static int __init pcf50633_rtc_init(void)
-{
- return platform_driver_register(&pcf50633_rtc_driver);
-}
-module_init(pcf50633_rtc_init);
-
-static void __exit pcf50633_rtc_exit(void)
-{
- platform_driver_unregister(&pcf50633_rtc_driver);
-}
-module_exit(pcf50633_rtc_exit);
+module_platform_driver(pcf50633_rtc_driver);
MODULE_DESCRIPTION("PCF50633 RTC driver");
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 1d28d4451dae..02111fee077e 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -174,6 +174,8 @@ static struct amba_id pl030_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl030_ids);
+
static struct amba_driver pl030_driver = {
.drv = {
.name = "rtc-pl030",
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index ff1b84bd9bb5..a952c8de1dd7 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -420,6 +420,8 @@ static struct amba_id pl031_ids[] = {
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, pl031_ids);
+
static struct amba_driver pl031_driver = {
.drv = {
.name = "rtc-pl031",
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index d420e9d877e8..9f1d6bcbdf6c 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -532,17 +532,7 @@ static struct platform_driver pm8xxx_rtc_driver = {
},
};
-static int __init pm8xxx_rtc_init(void)
-{
- return platform_driver_register(&pm8xxx_rtc_driver);
-}
-module_init(pm8xxx_rtc_init);
-
-static void __exit pm8xxx_rtc_exit(void)
-{
- platform_driver_unregister(&pm8xxx_rtc_driver);
-}
-module_exit(pm8xxx_rtc_exit);
+module_platform_driver(pm8xxx_rtc_driver);
MODULE_ALIAS("platform:rtc-pm8xxx");
MODULE_DESCRIPTION("PMIC8xxx RTC driver");
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index e4b6880aabd0..ab0acaeb2371 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -164,7 +164,7 @@ static int puv3_rtc_open(struct device *dev)
int ret;
ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
- IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev);
+ 0, "pkunity-rtc alarm", rtc_dev);
if (ret) {
dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
@@ -172,7 +172,7 @@ static int puv3_rtc_open(struct device *dev)
}
ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
- IRQF_DISABLED, "pkunity-rtc tick", rtc_dev);
+ 0, "pkunity-rtc tick", rtc_dev);
if (ret) {
dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
@@ -326,7 +326,7 @@ static int puv3_rtc_resume(struct platform_device *pdev)
#define puv3_rtc_resume NULL
#endif
-static struct platform_driver puv3_rtcdrv = {
+static struct platform_driver puv3_rtc_driver = {
.probe = puv3_rtc_probe,
.remove = __devexit_p(puv3_rtc_remove),
.suspend = puv3_rtc_suspend,
@@ -337,21 +337,7 @@ static struct platform_driver puv3_rtcdrv = {
}
};
-static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
-
-static int __init puv3_rtc_init(void)
-{
- printk(banner);
- return platform_driver_register(&puv3_rtcdrv);
-}
-
-static void __exit puv3_rtc_exit(void)
-{
- platform_driver_unregister(&puv3_rtcdrv);
-}
-
-module_init(puv3_rtc_init);
-module_exit(puv3_rtc_exit);
+module_platform_driver(puv3_rtc_driver);
MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
MODULE_AUTHOR("Hu Dongliang");
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 971bc8e08da6..ce2ca8523ddd 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -229,7 +229,6 @@ static int __devexit rs5c348_remove(struct spi_device *spi)
static struct spi_driver rs5c348_driver = {
.driver = {
.name = "rtc-rs5c348",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = rs5c348_probe,
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f02..aef40bd2957b 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <mach/hardware.h>
#include <asm/uaccess.h>
@@ -202,7 +203,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +214,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
@@ -507,7 +508,13 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node)
+ s3c_rtc_cpu_type = of_device_is_compatible(pdev->dev.of_node,
+ "samsung,s3c6410-rtc") ? TYPE_S3C64XX : TYPE_S3C2410;
+ else
+#endif
+ s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
/* Check RTC Time */
@@ -629,6 +636,17 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id s3c_rtc_dt_match[] = {
+ { .compatible = "samsung,s3c2410-rtc" },
+ { .compatible = "samsung,s3c6410-rtc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
+#else
+#define s3c_rtc_dt_match NULL
+#endif
+
static struct platform_device_id s3c_rtc_driver_ids[] = {
{
.name = "s3c2410-rtc",
@@ -651,24 +669,11 @@ static struct platform_driver s3c_rtc_driver = {
.driver = {
.name = "s3c-rtc",
.owner = THIS_MODULE,
+ .of_match_table = s3c_rtc_dt_match,
},
};
-static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics\n";
-
-static int __init s3c_rtc_init(void)
-{
- printk(banner);
- return platform_driver_register(&s3c_rtc_driver);
-}
-
-static void __exit s3c_rtc_exit(void)
-{
- platform_driver_unregister(&s3c_rtc_driver);
-}
-
-module_init(s3c_rtc_init);
-module_exit(s3c_rtc_exit);
+module_platform_driver(s3c_rtc_driver);
MODULE_DESCRIPTION("Samsung S3C RTC Driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 0b40bb88a884..4595d3e645a7 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -27,35 +27,42 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
-#include <linux/string.h>
#include <linux/pm.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-#ifdef CONFIG_ARCH_PXA
-#include <mach/regs-rtc.h>
-#include <mach/regs-ost.h>
-#endif
-
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
-
-static const unsigned long RTC_FREQ = 1024;
-static struct rtc_time rtc_alarm;
-static DEFINE_SPINLOCK(sa1100_rtc_lock);
-
-static inline int rtc_periodic_alarm(struct rtc_time *tm)
-{
- return (tm->tm_year == -1) ||
- ((unsigned)tm->tm_mon >= 12) ||
- ((unsigned)(tm->tm_mday - 1) >= 31) ||
- ((unsigned)tm->tm_hour > 23) ||
- ((unsigned)tm->tm_min > 59) ||
- ((unsigned)tm->tm_sec > 59);
-}
-
+#define RTC_FREQ 1024
+
+#define RCNR 0x00 /* RTC Count Register */
+#define RTAR 0x04 /* RTC Alarm Register */
+#define RTSR 0x08 /* RTC Status Register */
+#define RTTR 0x0c /* RTC Timer Trim Register */
+
+#define RTSR_HZE (1 << 3) /* HZ interrupt enable */
+#define RTSR_ALE (1 << 2) /* RTC alarm interrupt enable */
+#define RTSR_HZ (1 << 1) /* HZ rising-edge detected */
+#define RTSR_AL (1 << 0) /* RTC alarm detected */
+
+#define rtc_readl(sa1100_rtc, reg) \
+ readl_relaxed((sa1100_rtc)->base + (reg))
+#define rtc_writel(sa1100_rtc, reg, value) \
+ writel_relaxed((value), (sa1100_rtc)->base + (reg))
+
+struct sa1100_rtc {
+ struct resource *ress;
+ void __iomem *base;
+ struct clk *clk;
+ int irq_1Hz;
+ int irq_Alrm;
+ struct rtc_device *rtc;
+ spinlock_t lock; /* Protects this structure */
+};
/*
* Calculate the next alarm time given the requested alarm time mask
* and the current time.
@@ -83,46 +90,26 @@ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
}
}
-static int rtc_update_alarm(struct rtc_time *alrm)
-{
- struct rtc_time alarm_tm, now_tm;
- unsigned long now, time;
- int ret;
-
- do {
- now = RCNR;
- rtc_time_to_tm(now, &now_tm);
- rtc_next_alarm_time(&alarm_tm, &now_tm, alrm);
- ret = rtc_tm_to_time(&alarm_tm, &time);
- if (ret != 0)
- break;
-
- RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
- RTAR = time;
- } while (now != RCNR);
-
- return ret;
-}
-
static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = to_platform_device(dev_id);
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev);
unsigned int rtsr;
unsigned long events = 0;
- spin_lock(&sa1100_rtc_lock);
+ spin_lock(&sa1100_rtc->lock);
- rtsr = RTSR;
/* clear interrupt sources */
- RTSR = 0;
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
+ rtc_writel(sa1100_rtc, RTSR, 0);
+
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_probe(). */
if (rtsr & (RTSR_ALE | RTSR_HZE)) {
/* This is the original code, before there was the if test
* above. This code does not clear interrupts that were not
* enabled. */
- RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
+ rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ) & (rtsr >> 2));
} else {
/* For some reason, it is possible to enter this routine
* without interruptions enabled, it has been tested with
@@ -131,13 +118,13 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
* This situation leads to an infinite "loop" of interrupt
* routine calling and as a result the processor seems to
* lock on its first call to open(). */
- RTSR = RTSR_AL | RTSR_HZ;
+ rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ));
}
/* clear alarm interrupt if it has occurred */
if (rtsr & RTSR_AL)
rtsr &= ~RTSR_ALE;
- RTSR = rtsr & (RTSR_ALE | RTSR_HZE);
+ rtc_writel(sa1100_rtc, RTSR, rtsr & (RTSR_ALE | RTSR_HZE));
/* update irq data & counter */
if (rtsr & RTSR_AL)
@@ -145,91 +132,100 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
if (rtsr & RTSR_HZ)
events |= RTC_UF | RTC_IRQF;
- rtc_update_irq(rtc, 1, events);
+ rtc_update_irq(sa1100_rtc->rtc, 1, events);
- if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm))
- rtc_update_alarm(&rtc_alarm);
-
- spin_unlock(&sa1100_rtc_lock);
+ spin_unlock(&sa1100_rtc->lock);
return IRQ_HANDLED;
}
static int sa1100_rtc_open(struct device *dev)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
int ret;
- struct platform_device *plat_dev = to_platform_device(dev);
- struct rtc_device *rtc = platform_get_drvdata(plat_dev);
- ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
- "rtc 1Hz", dev);
+ ret = request_irq(sa1100_rtc->irq_1Hz, sa1100_rtc_interrupt,
+ IRQF_DISABLED, "rtc 1Hz", dev);
if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz);
+ dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_1Hz);
goto fail_ui;
}
- ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, IRQF_DISABLED,
- "rtc Alrm", dev);
+ ret = request_irq(sa1100_rtc->irq_Alrm, sa1100_rtc_interrupt,
+ IRQF_DISABLED, "rtc Alrm", dev);
if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
+ dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_Alrm);
goto fail_ai;
}
- rtc->max_user_freq = RTC_FREQ;
- rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
+ sa1100_rtc->rtc->max_user_freq = RTC_FREQ;
+ rtc_irq_set_freq(sa1100_rtc->rtc, NULL, RTC_FREQ);
return 0;
fail_ai:
- free_irq(IRQ_RTC1Hz, dev);
+ free_irq(sa1100_rtc->irq_1Hz, dev);
fail_ui:
return ret;
}
static void sa1100_rtc_release(struct device *dev)
{
- spin_lock_irq(&sa1100_rtc_lock);
- RTSR = 0;
- OIER &= ~OIER_E1;
- OSSR = OSSR_M1;
- spin_unlock_irq(&sa1100_rtc_lock);
-
- free_irq(IRQ_RTCAlrm, dev);
- free_irq(IRQ_RTC1Hz, dev);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&sa1100_rtc->lock);
+ rtc_writel(sa1100_rtc, RTSR, 0);
+ spin_unlock_irq(&sa1100_rtc->lock);
+
+ free_irq(sa1100_rtc->irq_Alrm, dev);
+ free_irq(sa1100_rtc->irq_1Hz, dev);
}
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- spin_lock_irq(&sa1100_rtc_lock);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ unsigned int rtsr;
+
+ spin_lock_irq(&sa1100_rtc->lock);
+
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
if (enabled)
- RTSR |= RTSR_ALE;
+ rtsr |= RTSR_ALE;
else
- RTSR &= ~RTSR_ALE;
- spin_unlock_irq(&sa1100_rtc_lock);
+ rtsr &= ~RTSR_ALE;
+ rtc_writel(sa1100_rtc, RTSR, rtsr);
+
+ spin_unlock_irq(&sa1100_rtc->lock);
return 0;
}
static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(RCNR, tm);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(rtc_readl(sa1100_rtc, RCNR), tm);
return 0;
}
static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
unsigned long time;
int ret;
ret = rtc_tm_to_time(tm, &time);
if (ret == 0)
- RCNR = time;
+ rtc_writel(sa1100_rtc, RCNR, time);
return ret;
}
static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- u32 rtsr;
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ unsigned long time;
+ unsigned int rtsr;
- memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time));
- rtsr = RTSR;
+ time = rtc_readl(sa1100_rtc, RCNR);
+ rtc_time_to_tm(time, &alrm->time);
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
return 0;
@@ -237,26 +233,39 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- int ret;
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ struct rtc_time now_tm, alarm_tm;
+ unsigned long time, alarm;
+ unsigned int rtsr;
- spin_lock_irq(&sa1100_rtc_lock);
- ret = rtc_update_alarm(&alrm->time);
- if (ret == 0) {
- if (alrm->enabled)
- RTSR |= RTSR_ALE;
- else
- RTSR &= ~RTSR_ALE;
- }
- spin_unlock_irq(&sa1100_rtc_lock);
+ spin_lock_irq(&sa1100_rtc->lock);
- return ret;
+ time = rtc_readl(sa1100_rtc, RCNR);
+ rtc_time_to_tm(time, &now_tm);
+ rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
+ rtc_tm_to_time(&alarm_tm, &alarm);
+ rtc_writel(sa1100_rtc, RTAR, alarm);
+
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
+ if (alrm->enabled)
+ rtsr |= RTSR_ALE;
+ else
+ rtsr &= ~RTSR_ALE;
+ rtc_writel(sa1100_rtc, RTSR, rtsr);
+
+ spin_unlock_irq(&sa1100_rtc->lock);
+
+ return 0;
}
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
{
- seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
- seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ seq_printf(seq, "trim/divider\t\t: 0x%08x\n",
+ rtc_readl(sa1100_rtc, RTTR));
+ seq_printf(seq, "RTSR\t\t\t: 0x%08x\n",
+ rtc_readl(sa1100_rtc, RTSR));
return 0;
}
@@ -273,7 +282,51 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
static int sa1100_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
+ struct sa1100_rtc *sa1100_rtc;
+ unsigned int rttr;
+ int ret;
+
+ sa1100_rtc = kzalloc(sizeof(struct sa1100_rtc), GFP_KERNEL);
+ if (!sa1100_rtc)
+ return -ENOMEM;
+
+ spin_lock_init(&sa1100_rtc->lock);
+ platform_set_drvdata(pdev, sa1100_rtc);
+
+ ret = -ENXIO;
+ sa1100_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!sa1100_rtc->ress) {
+ dev_err(&pdev->dev, "No I/O memory resource defined\n");
+ goto err_ress;
+ }
+
+ sa1100_rtc->irq_1Hz = platform_get_irq(pdev, 0);
+ if (sa1100_rtc->irq_1Hz < 0) {
+ dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
+ goto err_ress;
+ }
+ sa1100_rtc->irq_Alrm = platform_get_irq(pdev, 1);
+ if (sa1100_rtc->irq_Alrm < 0) {
+ dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
+ goto err_ress;
+ }
+
+ ret = -ENOMEM;
+ sa1100_rtc->base = ioremap(sa1100_rtc->ress->start,
+ resource_size(sa1100_rtc->ress));
+ if (!sa1100_rtc->base) {
+ dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n");
+ goto err_map;
+ }
+
+ sa1100_rtc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sa1100_rtc->clk)) {
+ dev_err(&pdev->dev, "failed to find rtc clock source\n");
+ ret = PTR_ERR(sa1100_rtc->clk);
+ goto err_clk;
+ }
+ clk_prepare(sa1100_rtc->clk);
+ clk_enable(sa1100_rtc->clk);
/*
* According to the manual we should be able to let RTTR be zero
@@ -282,24 +335,24 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
* If the clock divider is uninitialized then reset it to the
* default value to get the 1Hz clock.
*/
- if (RTTR == 0) {
- RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
- dev_warn(&pdev->dev, "warning: "
- "initializing default clock divider/trim value\n");
+ if (rtc_readl(sa1100_rtc, RTTR) == 0) {
+ rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
+ rtc_writel(sa1100_rtc, RTTR, rttr);
+ dev_warn(&pdev->dev, "warning: initializing default clock"
+ " divider/trim value\n");
/* The current RTC value probably doesn't make sense either */
- RCNR = 0;
+ rtc_writel(sa1100_rtc, RCNR, 0);
}
device_init_wakeup(&pdev->dev, 1);
- rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops,
- THIS_MODULE);
-
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- platform_set_drvdata(pdev, rtc);
-
+ sa1100_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &sa1100_rtc_ops, THIS_MODULE);
+ if (IS_ERR(sa1100_rtc->rtc)) {
+ dev_err(&pdev->dev, "Failed to register RTC device -> %d\n",
+ ret);
+ goto err_rtc_reg;
+ }
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
*
@@ -322,33 +375,46 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
*
* Notice that clearing bit 1 and 0 is accomplished by writting ONES to
* the corresponding bits in RTSR. */
- RTSR = RTSR_AL | RTSR_HZ;
+ rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ));
return 0;
+
+err_rtc_reg:
+err_clk:
+ iounmap(sa1100_rtc->base);
+err_ress:
+err_map:
+ kfree(sa1100_rtc);
+ return ret;
}
static int sa1100_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- if (rtc)
- rtc_device_unregister(rtc);
+ struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev);
+ rtc_device_unregister(sa1100_rtc->rtc);
+ clk_disable(sa1100_rtc->clk);
+ clk_unprepare(sa1100_rtc->clk);
+ iounmap(sa1100_rtc->base);
return 0;
}
#ifdef CONFIG_PM
static int sa1100_rtc_suspend(struct device *dev)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
if (device_may_wakeup(dev))
- enable_irq_wake(IRQ_RTCAlrm);
+ enable_irq_wake(sa1100_rtc->irq_Alrm);
return 0;
}
static int sa1100_rtc_resume(struct device *dev)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
if (device_may_wakeup(dev))
- disable_irq_wake(IRQ_RTCAlrm);
+ disable_irq_wake(sa1100_rtc->irq_Alrm);
return 0;
}
@@ -369,18 +435,7 @@ static struct platform_driver sa1100_rtc_driver = {
},
};
-static int __init sa1100_rtc_init(void)
-{
- return platform_driver_register(&sa1100_rtc_driver);
-}
-
-static void __exit sa1100_rtc_exit(void)
-{
- platform_driver_unregister(&sa1100_rtc_driver);
-}
-
-module_init(sa1100_rtc_init);
-module_exit(sa1100_rtc_exit);
+module_platform_driver(sa1100_rtc_driver);
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)");
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 893bac2bb21b..19a28a671a8e 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -516,17 +516,7 @@ static struct platform_driver spear_rtc_driver = {
},
};
-static int __init rtc_init(void)
-{
- return platform_driver_register(&spear_rtc_driver);
-}
-module_init(rtc_init);
-
-static void __exit rtc_exit(void)
-{
- platform_driver_unregister(&spear_rtc_driver);
-}
-module_exit(rtc_exit);
+module_platform_driver(spear_rtc_driver);
MODULE_ALIAS("platform:rtc-spear");
MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index ed3e9b599031..7621116bd20d 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -370,18 +370,7 @@ static struct platform_driver stk17ta8_rtc_driver = {
},
};
-static __init int stk17ta8_init(void)
-{
- return platform_driver_register(&stk17ta8_rtc_driver);
-}
-
-static __exit void stk17ta8_exit(void)
-{
- platform_driver_unregister(&stk17ta8_rtc_driver);
-}
-
-module_init(stk17ta8_init);
-module_exit(stk17ta8_exit);
+module_platform_driver(stk17ta8_rtc_driver);
MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 7315068daa59..10287865e330 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -276,18 +276,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
},
};
-static int __init stmp3xxx_rtc_init(void)
-{
- return platform_driver_register(&stmp3xxx_rtcdrv);
-}
-
-static void __exit stmp3xxx_rtc_exit(void)
-{
- platform_driver_unregister(&stmp3xxx_rtcdrv);
-}
-
-module_init(stmp3xxx_rtc_init);
-module_exit(stmp3xxx_rtc_exit);
+module_platform_driver(stmp3xxx_rtcdrv);
MODULE_DESCRIPTION("STMP3xxx RTC Driver");
MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 20687d55e7a7..d43b4f6eb4e4 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -550,6 +550,11 @@ static int twl_rtc_resume(struct platform_device *pdev)
#define twl_rtc_resume NULL
#endif
+static const struct of_device_id twl_rtc_of_match[] = {
+ {.compatible = "ti,twl4030-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
@@ -559,8 +564,9 @@ static struct platform_driver twl4030rtc_driver = {
.suspend = twl_rtc_suspend,
.resume = twl_rtc_resume,
.driver = {
- .owner = THIS_MODULE,
- .name = "twl_rtc",
+ .owner = THIS_MODULE,
+ .name = "twl_rtc",
+ .of_match_table = twl_rtc_of_match,
},
};
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index f71c3ce18036..bca5d677bc85 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -393,18 +393,7 @@ static struct platform_driver rtc_device_driver = {
},
};
-static __init int v3020_init(void)
-{
- return platform_driver_register(&rtc_device_driver);
-}
-
-static __exit void v3020_exit(void)
-{
- platform_driver_unregister(&rtc_device_driver);
-}
-
-module_init(v3020_init);
-module_exit(v3020_exit);
+module_platform_driver(rtc_device_driver);
MODULE_DESCRIPTION("V3020 RTC");
MODULE_AUTHOR("Raphael Assenat");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c5698cda366a..fcbfdda2993b 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -405,15 +405,4 @@ static struct platform_driver rtc_platform_driver = {
},
};
-static int __init vr41xx_rtc_init(void)
-{
- return platform_driver_register(&rtc_platform_driver);
-}
-
-static void __exit vr41xx_rtc_exit(void)
-{
- platform_driver_unregister(&rtc_platform_driver);
-}
-
-module_init(vr41xx_rtc_init);
-module_exit(vr41xx_rtc_exit);
+module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index f93f412423c6..9e94fb147c26 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -311,17 +311,7 @@ static struct platform_driver vt8500_rtc_driver = {
},
};
-static int __init vt8500_rtc_init(void)
-{
- return platform_driver_register(&vt8500_rtc_driver);
-}
-module_init(vt8500_rtc_init);
-
-static void __exit vt8500_rtc_exit(void)
-{
- platform_driver_unregister(&vt8500_rtc_driver);
-}
-module_exit(vt8500_rtc_exit);
+module_platform_driver(vt8500_rtc_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index bdc909bd56da..3b6e6a67e765 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -324,15 +324,6 @@ static irqreturn_t wm831x_alm_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t wm831x_per_irq(int irq, void *data)
-{
- struct wm831x_rtc *wm831x_rtc = data;
-
- rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_UF);
-
- return IRQ_HANDLED;
-}
-
static const struct rtc_class_ops wm831x_rtc_ops = {
.read_time = wm831x_rtc_readtime,
.set_mmss = wm831x_rtc_set_mmss,
@@ -405,11 +396,10 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
- int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
int ret = 0;
- wm831x_rtc = kzalloc(sizeof(*wm831x_rtc), GFP_KERNEL);
+ wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
if (wm831x_rtc == NULL)
return -ENOMEM;
@@ -433,14 +423,6 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
- IRQF_TRIGGER_RISING, "RTC period",
- wm831x_rtc);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
- per_irq, ret);
- }
-
ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
IRQF_TRIGGER_RISING, "RTC alarm",
wm831x_rtc);
@@ -452,20 +434,16 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
return 0;
err:
- kfree(wm831x_rtc);
return ret;
}
static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
{
struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev);
- int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
free_irq(alm_irq, wm831x_rtc);
- free_irq(per_irq, wm831x_rtc);
rtc_device_unregister(wm831x_rtc->rtc);
- kfree(wm831x_rtc);
return 0;
}
@@ -490,17 +468,7 @@ static struct platform_driver wm831x_rtc_driver = {
},
};
-static int __init wm831x_rtc_init(void)
-{
- return platform_driver_register(&wm831x_rtc_driver);
-}
-module_init(wm831x_rtc_init);
-
-static void __exit wm831x_rtc_exit(void)
-{
- platform_driver_unregister(&wm831x_rtc_driver);
-}
-module_exit(wm831x_rtc_exit);
+module_platform_driver(wm831x_rtc_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM831x series PMICs");
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 66421426e404..c2e52d15abb2 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -486,17 +486,7 @@ static struct platform_driver wm8350_rtc_driver = {
},
};
-static int __init wm8350_rtc_init(void)
-{
- return platform_driver_register(&wm8350_rtc_driver);
-}
-module_init(wm8350_rtc_init);
-
-static void __exit wm8350_rtc_exit(void)
-{
- platform_driver_unregister(&wm8350_rtc_driver);
-}
-module_exit(wm8350_rtc_exit);
+module_platform_driver(wm8350_rtc_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM8350");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 65894f05a801..eef27a197c00 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -17,7 +17,6 @@
#include <linux/ctype.h>
#include <linux/major.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
@@ -1073,7 +1072,7 @@ static const struct file_operations dasd_stats_global_fops = {
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
if (!base_dentry)
@@ -1112,7 +1111,7 @@ static void dasd_statistics_removeroot(void)
static void dasd_statistics_createroot(void)
{
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
dasd_debugfs_root_entry = NULL;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 87a0cf160fe5..0326571e7ffa 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1718,7 +1718,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
erp->startdev = device;
erp->memdev = device;
erp->magic = default_erp->magic;
- erp->expires = 0;
+ erp->expires = default_erp->expires;
erp->retries = 256;
erp->buildclk = get_clock();
erp->status = DASD_CQR_FILLED;
@@ -2363,7 +2363,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->memdev = device;
erp->block = cqr->block;
erp->magic = cqr->magic;
- erp->expires = 0;
+ erp->expires = cqr->expires;
erp->retries = 256;
erp->buildclk = get_clock();
erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c388eda1e2b1..553b3c5abb0a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -705,6 +705,16 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
return NULL;
+ if (unlikely(!(private->features.feature[8] & 0x01))) {
+ /*
+ * PAV enabled but prefix not, very unlikely
+ * seems to be a lost pathgroup
+ * use base device to do IO
+ */
+ DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+ "Prefix not enabled with PAV enabled\n");
+ return NULL;
+ }
spin_lock_irqsave(&lcu->lock, flags);
alias_device = group->next;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6ab29680586a..bbcd5e9206ee 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -752,24 +752,13 @@ dasd_eckd_cdl_reclen(int recid)
return sizes_trk0[recid];
return LABEL_SIZE;
}
-
-/*
- * Generate device unique id that specifies the physical device.
- */
-static int dasd_eckd_generate_uid(struct dasd_device *device)
+/* create unique id from private structure. */
+static void create_uid(struct dasd_eckd_private *private)
{
- struct dasd_eckd_private *private;
- struct dasd_uid *uid;
int count;
- unsigned long flags;
+ struct dasd_uid *uid;
- private = (struct dasd_eckd_private *) device->private;
- if (!private)
- return -ENODEV;
- if (!private->ned || !private->gneq)
- return -ENODEV;
uid = &private->uid;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
@@ -792,6 +781,23 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
private->vdsneq->uit[count]);
}
}
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (!private)
+ return -ENODEV;
+ if (!private->ned || !private->gneq)
+ return -ENODEV;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ create_uid(private);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
@@ -811,6 +817,21 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
return -EINVAL;
}
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+ struct dasd_eckd_private *private)
+{
+ struct dasd_uid device_uid;
+
+ create_uid(private);
+ dasd_eckd_get_uid(device, &device_uid);
+
+ return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+}
+
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
@@ -1005,59 +1026,120 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
int conf_len, conf_data_saved;
int rc;
__u8 lpm, opm;
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
+ struct dasd_uid *uid;
+ char print_path_uid[60], print_device_uid[60];
private = (struct dasd_eckd_private *) device->private;
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
- lpm = 0x80;
conf_data_saved = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
- if (lpm & opm) {
- rc = dasd_eckd_read_conf_lpm(device, &conf_data,
- &conf_len, lpm);
- if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read configuration data returned "
- "error %d", rc);
- return rc;
- }
- if (conf_data == NULL) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "No configuration data "
- "retrieved");
- /* no further analysis possible */
- path_data->opm |= lpm;
- continue; /* no error */
+ if (!(lpm & opm))
+ continue;
+ rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+ &conf_len, lpm);
+ if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data returned "
+ "error %d", rc);
+ return rc;
+ }
+ if (conf_data == NULL) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "No configuration data "
+ "retrieved");
+ /* no further analysis possible */
+ path_data->opm |= lpm;
+ continue; /* no error */
+ }
+ /* save first valid configuration data */
+ if (!conf_data_saved) {
+ kfree(private->conf_data);
+ private->conf_data = conf_data;
+ private->conf_len = conf_len;
+ if (dasd_eckd_identify_conf_parts(private)) {
+ private->conf_data = NULL;
+ private->conf_len = 0;
+ kfree(conf_data);
+ continue;
}
- /* save first valid configuration data */
- if (!conf_data_saved) {
- kfree(private->conf_data);
- private->conf_data = conf_data;
- private->conf_len = conf_len;
- if (dasd_eckd_identify_conf_parts(private)) {
- private->conf_data = NULL;
- private->conf_len = 0;
- kfree(conf_data);
- continue;
- }
- conf_data_saved++;
+ /*
+ * build device UID that other path data
+ * can be compared to it
+ */
+ dasd_eckd_generate_uid(device);
+ conf_data_saved++;
+ } else {
+ path_private.conf_data = conf_data;
+ path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(
+ &path_private)) {
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ kfree(conf_data);
+ continue;
}
- switch (dasd_eckd_path_access(conf_data, conf_len)) {
- case 0x02:
- path_data->npm |= lpm;
- break;
- case 0x03:
- path_data->ppm |= lpm;
- break;
+
+ if (dasd_eckd_compare_path_uid(
+ device, &path_private)) {
+ uid = &path_private.uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_path_uid,
+ sizeof(print_path_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_path_uid,
+ sizeof(print_path_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ uid = &private->uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_device_uid,
+ sizeof(print_device_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_device_uid,
+ sizeof(print_device_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ dev_err(&device->cdev->dev,
+ "Not all channel paths lead to "
+ "the same device, path %02X leads to "
+ "device %s instead of %s\n", lpm,
+ print_path_uid, print_device_uid);
+ return -EINVAL;
}
- path_data->opm |= lpm;
- if (conf_data != private->conf_data)
- kfree(conf_data);
+
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
}
+ switch (dasd_eckd_path_access(conf_data, conf_len)) {
+ case 0x02:
+ path_data->npm |= lpm;
+ break;
+ case 0x03:
+ path_data->ppm |= lpm;
+ break;
+ }
+ path_data->opm |= lpm;
+
+ if (conf_data != private->conf_data)
+ kfree(conf_data);
}
+
return 0;
}
@@ -1090,12 +1172,61 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
return 0;
}
+static int rebuild_device_uid(struct dasd_device *device,
+ struct path_verification_work_data *data)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_path *path_data;
+ __u8 lpm, opm;
+ int rc;
+
+ rc = -ENODEV;
+ private = (struct dasd_eckd_private *) device->private;
+ path_data = &device->path_data;
+ opm = device->path_data.opm;
+
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & opm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+ continue;
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data "
+ "returned error %d", rc);
+ break;
+ }
+ memcpy(private->conf_data, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ if (dasd_eckd_identify_conf_parts(private)) {
+ rc = -ENODEV;
+ } else /* first valid path is enough */
+ break;
+ }
+
+ if (!rc)
+ rc = dasd_eckd_generate_uid(device);
+
+ return rc;
+}
+
static void do_path_verification_work(struct work_struct *work)
{
struct path_verification_work_data *data;
struct dasd_device *device;
+ struct dasd_eckd_private path_private;
+ struct dasd_uid *uid;
+ __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm;
unsigned long flags;
+ char print_uid[60];
int rc;
data = container_of(work, struct path_verification_work_data, worker);
@@ -1112,64 +1243,129 @@ static void do_path_verification_work(struct work_struct *work)
ppm = 0;
epm = 0;
for (lpm = 0x80; lpm; lpm >>= 1) {
- if (lpm & data->tbvpm) {
- memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
- memset(&data->cqr, 0, sizeof(data->cqr));
- data->cqr.cpaddr = &data->ccw;
- rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
- data->rcd_buffer,
- lpm);
- if (!rc) {
- switch (dasd_eckd_path_access(data->rcd_buffer,
- DASD_ECKD_RCD_DATA_SIZE)) {
- case 0x02:
- npm |= lpm;
- break;
- case 0x03:
- ppm |= lpm;
- break;
- }
- opm |= lpm;
- } else if (rc == -EOPNOTSUPP) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "path verification: No configuration "
- "data retrieved");
- opm |= lpm;
- } else if (rc == -EAGAIN) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ if (!(lpm & data->tbvpm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+ if (!rc) {
+ switch (dasd_eckd_path_access(data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE)
+ ) {
+ case 0x02:
+ npm |= lpm;
+ break;
+ case 0x03:
+ ppm |= lpm;
+ break;
+ }
+ opm |= lpm;
+ } else if (rc == -EOPNOTSUPP) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "path verification: No configuration "
+ "data retrieved");
+ opm |= lpm;
+ } else if (rc == -EAGAIN) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: device is stopped,"
" try again later");
- epm |= lpm;
- } else {
- dev_warn(&device->cdev->dev,
- "Reading device feature codes failed "
- "(rc=%d) for new path %x\n", rc, lpm);
- continue;
- }
- if (verify_fcx_max_data(device, lpm)) {
+ epm |= lpm;
+ } else {
+ dev_warn(&device->cdev->dev,
+ "Reading device feature codes failed "
+ "(rc=%d) for new path %x\n", rc, lpm);
+ continue;
+ }
+ if (verify_fcx_max_data(device, lpm)) {
+ opm &= ~lpm;
+ npm &= ~lpm;
+ ppm &= ~lpm;
+ continue;
+ }
+
+ /*
+ * save conf_data for comparison after
+ * rebuild_device_uid may have changed
+ * the original data
+ */
+ memcpy(&path_rcd_buf, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ path_private.conf_data = (void *) &path_rcd_buf;
+ path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_private)) {
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ continue;
+ }
+
+ /*
+ * compare path UID with device UID only if at least
+ * one valid path is left
+ * in other case the device UID may have changed and
+ * the first working path UID will be used as device UID
+ */
+ if (device->path_data.opm &&
+ dasd_eckd_compare_path_uid(device, &path_private)) {
+ /*
+ * the comparison was not successful
+ * rebuild the device UID with at least one
+ * known path in case a z/VM hyperswap command
+ * has changed the device
+ *
+ * after this compare again
+ *
+ * if either the rebuild or the recompare fails
+ * the path can not be used
+ */
+ if (rebuild_device_uid(device, data) ||
+ dasd_eckd_compare_path_uid(
+ device, &path_private)) {
+ uid = &path_private.uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ dev_err(&device->cdev->dev,
+ "The newly added channel path %02X "
+ "will not be used because it leads "
+ "to a different device %s\n",
+ lpm, print_uid);
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
+ continue;
}
}
+
+ /*
+ * There is a small chance that a path is lost again between
+ * above path verification and the following modification of
+ * the device opm mask. We could avoid that race here by using
+ * yet another path mask, but we rather deal with this unlikely
+ * situation in dasd_start_IO.
+ */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (!device->path_data.opm && opm) {
+ device->path_data.opm = opm;
+ dasd_generic_path_operational(device);
+ } else
+ device->path_data.opm |= opm;
+ device->path_data.npm |= npm;
+ device->path_data.ppm |= ppm;
+ device->path_data.tbvpm |= epm;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
- /*
- * There is a small chance that a path is lost again between
- * above path verification and the following modification of
- * the device opm mask. We could avoid that race here by using
- * yet another path mask, but we rather deal with this unlikely
- * situation in dasd_start_IO.
- */
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (!device->path_data.opm && opm) {
- device->path_data.opm = opm;
- dasd_generic_path_operational(device);
- } else
- device->path_data.opm |= opm;
- device->path_data.npm |= npm;
- device->path_data.ppm |= ppm;
- device->path_data.tbvpm |= epm;
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_put_device(device);
if (data->isglobal)
@@ -1441,11 +1637,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->default_expires = value;
}
- /* Generate device unique id */
- rc = dasd_eckd_generate_uid(device);
- if (rc)
- goto out_err1;
-
dasd_eckd_get_uid(device, &temp_uid);
if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
@@ -2206,7 +2397,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
- last_trk, cmd, startdev) == -EAGAIN) {
+ last_trk, cmd, basedev) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 98f3e4ade924..690c3338a8ae 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -36,7 +36,7 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index e5cb9248a442..f3b8bb84faf2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -75,7 +75,7 @@ static LIST_HEAD(raw3270_devices);
static int raw3270_registered;
/* Module parameters */
-static int tubxcorrect = 0;
+static bool tubxcorrect = 0;
module_param(tubxcorrect, bool, 0);
/*
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 95b909ac2b73..3c03c1060be6 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
@@ -31,14 +31,14 @@ static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
- struct sys_device *sysdev;
+ struct device *dev;
s390_adjust_jiffies();
pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
- sysdev = get_cpu_sysdev(cpu);
- kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ dev = get_cpu_device(cpu);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 9e32780c317f..ba2092f741d5 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
-#include <linux/kobj_map.h>
#include <linux/cdev.h>
#include <linux/device.h>
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 11312f401c70..2211277a1079 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -28,9 +28,9 @@
#define MAX_CMDLEN 240
#define MIN_INTERVAL 15
static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
-static int vmwdt_conceal;
+static bool vmwdt_conceal;
-static int vmwdt_nowayout = WATCHDOG_NOWAYOUT;
+static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd43..a84631a7391d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- struct chp_link link;
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
+ __s390_vary_chpid_on, &chpid);
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e5..4a1ff5c2eb88 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2985eb439485..204ca728e7fd 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -98,7 +98,7 @@ enum cmb_format {
* enum cmb_format.
*/
static int format = CMF_AUTODETECT;
-module_param(format, bool, 0444);
+module_param(format, bint, 0444);
/**
* struct cmb_operations - functions to use depending on cmb_format
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1c..21908e67bf67 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0ecac..47269858ecb6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b12..1b853513c891 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
cdev->private->pgid_reset_mask = 0;
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -520,12 +538,8 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735e..ec7fb6d3b479 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c17..76253dfcc1be 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 2acc01f90a6a..452989a7ec13 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -22,12 +22,9 @@
static struct kmem_cache *qdio_q_cache;
static struct kmem_cache *qdio_aob_cache;
-struct qaob *qdio_allocate_aob()
+struct qaob *qdio_allocate_aob(void)
{
- struct qaob *aob;
-
- aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
- return aob;
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(qdio_allocate_aob);
@@ -180,7 +177,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+ qdio_init->queue_start_poll_array[i] : NULL;
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec94f049e995..96bbe9d12a79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
rc = ap_init_queue(ap_dev->qid);
if (rc == -ENODEV)
ap_dev->unregistered = 1;
+ else
+ __ap_schedule_poll_timer();
}
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index dd4737808e06..077b7d109fde 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -56,11 +56,6 @@
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
-#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
-#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
-#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
-
-#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
#define PCIXCC_CLEANUP_TIME (15*HZ)
@@ -265,7 +260,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or -EFAULT, -EINVAL.
*/
struct type86_fmt2_msg {
struct type86_hdr hdr;
@@ -295,19 +290,12 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
- return -EFAULT;
- if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
- return -EFAULT;
- if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
- return -EFAULT;
- replylen = CEIL4(xcRB->reply_control_blk_length) +
- CEIL4(xcRB->reply_data_length) +
- sizeof(struct type86_fmt2_msg);
- if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
- xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
- (sizeof(struct type86_fmt2_msg) +
- CEIL4(xcRB->reply_data_length));
- }
+ return -EINVAL;
+ replylen = sizeof(struct type86_fmt2_msg) +
+ CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
+ return -EINVAL;
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
@@ -326,7 +314,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcRB->request_control_blk_length)
- return -EFAULT;
+ return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
@@ -678,7 +666,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
break;
case PCIXCC_RESPONSE_TYPE_XCRB:
length = t86r->fmt2.offset2 + t86r->fmt2.count2;
- length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
+ length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
memcpy(msg->message, reply->message, length);
break;
default:
@@ -1043,7 +1031,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
struct zcrypt_device *zdev;
int rc = 0;
- zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
+ zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 94f49ffa70ba..7bc1955337ea 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -198,7 +198,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
goto out;
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
- vdev, (void *) config->address,
+ vdev, true, (void *) config->address,
kvm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
@@ -263,6 +263,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/*
* The config ops structure as defined by virtio config
*/
@@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
.reset = kvm_reset,
.find_vqs = kvm_find_vqs,
.del_vqs = kvm_del_vqs,
+ .bus_name = kvm_bus_name,
};
/*
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b6a6356d09b3..8160591913f9 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -63,6 +63,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include "fsm.h"
@@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
#define IUCV_DBF_SETUP_PAGES 2
#define IUCV_DBF_SETUP_NR_AREAS 1
#define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@ struct iucv_connection {
struct net_device *netdev;
struct connection_profile prof;
char userid[9];
+ char userdata[17];
};
/**
@@ -263,7 +265,7 @@ struct ll_header {
};
#define NETIUCV_HDRLEN (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX 32768
+#define NETIUCV_BUFSIZE_MAX 65537
#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
#define NETIUCV_MTU_DEFAULT 9216
@@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
return test_and_set_bit(0, &priv->tbusy);
}
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
@@ -301,18 +308,38 @@ static u8 iucvMagic[16] = {
*
* @returns The printable string (static data!!)
*/
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
{
- static char tmp[9];
+ static char tmp[17];
char *p = tmp;
- memcpy(tmp, name, 8);
- tmp[8] = '\0';
- while (*p && (!isspace(*p)))
+ memcpy(tmp, name, len);
+ tmp[len] = '\0';
+ while (*p && ((p - tmp) < len) && (!isspace(*p)))
p++;
*p = '\0';
return tmp;
}
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+ static char tmp_uid[9];
+ static char tmp_udat[17];
+ static char buf[100];
+
+ if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+ tmp_uid[8] = '\0';
+ tmp_udat[16] = '\0';
+ memcpy(tmp_uid, conn->userid, 8);
+ memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+ memcpy(tmp_udat, conn->userdata, 16);
+ EBCASC(tmp_udat, 16);
+ memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+ sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+ return buf;
+ } else
+ return netiucv_printname(conn->userid, 8);
+}
+
/**
* States of the interface statemachine.
*/
@@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path,
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
+ static char tmp_user[9];
+ static char tmp_udat[17];
int rc;
- if (memcmp(iucvMagic, ipuser, 16))
- /* ipuser must match iucvMagic. */
- return -EINVAL;
rc = -EINVAL;
+ memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+ memcpy(tmp_udat, ipuser, 16);
+ EBCASC(tmp_udat, 16);
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(conn, &iucv_connection_list, list) {
- if (strncmp(ipvmid, conn->userid, 8))
+ if (strncmp(ipvmid, conn->userid, 8) ||
+ strncmp(ipuser, conn->userdata, 16))
continue;
/* Found a matching connection for this path. */
conn->path = path;
@@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path,
fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
rc = 0;
}
+ IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+ tmp_user, netiucv_printname(tmp_udat, 16));
read_unlock_bh(&iucv_connection_rwlock);
return rc;
}
@@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT;
path->flags = 0;
- rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+ rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
if (rc) {
IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
@@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
+ iucv_path_sever(conn->path, conn->userdata);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
}
@@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
- dev_info(privptr->dev, "The peer interface of the IUCV device"
- " has closed the connection\n");
+ iucv_path_sever(conn->path, conn->userdata);
+ dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+ "connection\n", netiucv_printuser(conn));
IUCV_DBF_TEXT(data, 2,
"conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
- netdev->name, conn->userid);
/*
* We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+ IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+ netdev->name, netiucv_printuser(conn));
+
rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
- NULL, iucvMagic, conn);
+ NULL, conn->userdata, conn);
switch (rc) {
case 0:
netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
case 11:
dev_warn(privptr->dev,
"The IUCV device failed to connect to z/VM guest %s\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 12:
dev_warn(privptr->dev,
"The IUCV device failed to connect to the peer on z/VM"
- " guest %s\n", netiucv_printname(conn->userid));
+ " guest %s\n", netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 13:
@@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
dev_err(privptr->dev,
"z/VM guest %s has too many IUCV connections"
" to connect with the IUCV device\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
@@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(privptr->dev,
"The IUCV device has been connected"
- " successfully to %s\n", privptr->conn->userid);
+ " successfully to %s\n",
+ netiucv_printuser(privptr->conn));
IUCV_DBF_TEXT(setup, 3,
"connection is up and running\n");
break;
@@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+ return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
}
-static ssize_t user_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+ char *userdata)
{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = priv->conn->netdev;
- char *p;
- char *tmp;
- char username[9];
- int i;
- struct iucv_connection *cp;
+ const char *p;
+ int i;
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (count > 9) {
- IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int) count);
+ p = strchr(buf, '.');
+ if ((p && ((count > 26) ||
+ ((p - buf) > 8) ||
+ (buf + count - p > 18))) ||
+ (!p && (count > 9))) {
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
return -EINVAL;
}
- tmp = strsep((char **) &buf, "\n");
- for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || (*p == '$')) {
- username[i]= toupper(*p);
+ for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+ if (isalnum(*p) || *p == '$') {
+ username[i] = toupper(*p);
continue;
}
- if (*p == '\n') {
+ if (*p == '\n')
/* trailing lf, grr */
break;
- }
IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n", *p);
+ "conn_write: invalid character %02x\n", *p);
return -EINVAL;
}
while (i < 8)
username[i++] = ' ';
username[8] = '\0';
+ if (*p == '.') {
+ p++;
+ for (i = 0; i < 16 && *p; i++, p++) {
+ if (*p == '\n')
+ break;
+ userdata[i] = toupper(*p);
+ }
+ while (i > 0 && i < 16)
+ userdata[i++] = ' ';
+ } else
+ memcpy(userdata, iucvMagic_ascii, 16);
+ userdata[16] = '\0';
+ ASCEBC(userdata, 16);
+
+ return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->conn->netdev;
+ char username[9];
+ char userdata[17];
+ int rc;
+ struct iucv_connection *cp;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
+
if (memcmp(username, priv->conn->userid, 9) &&
(ndev->flags & (IFF_UP | IFF_RUNNING))) {
/* username changed while the interface is active. */
@@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
}
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
memcpy(priv->conn->userid, username, 9);
+ memcpy(priv->conn->userdata, userdata, 17);
return count;
}
@@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+ *e);
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev)
* Add it to the list of netiucv connections;
*/
static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
- char *username)
+ char *username,
+ char *userdata)
{
struct iucv_connection *conn;
@@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
fsm_settimer(conn->fsm, &conn->timer);
fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+ if (userdata)
+ memcpy(conn->userdata, userdata, 17);
if (username) {
memcpy(conn->userid, username, 9);
fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@ out:
*/
static void netiucv_remove_connection(struct iucv_connection *conn)
{
+
IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
fsm_deltimer(&conn->timer);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
/**
* Allocate and initialize everything of a net device.
*/
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
{
struct netiucv_priv *privptr;
struct net_device *dev;
@@ -2004,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username)
if (!privptr->fsm)
goto out_netdev;
- privptr->conn = netiucv_new_connection(dev, username);
+ privptr->conn = netiucv_new_connection(dev, username, userdata);
if (!privptr->conn) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
goto out_fsm;
@@ -2022,47 +2090,31 @@ out_netdev:
static ssize_t conn_write(struct device_driver *drv,
const char *buf, size_t count)
{
- const char *p;
char username[9];
- int i, rc;
+ char userdata[17];
+ int rc;
struct net_device *dev;
struct netiucv_priv *priv;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __func__);
- if (count>9) {
- IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
- return -EINVAL;
- }
-
- for (i = 0, p = buf; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || *p == '$') {
- username[i] = toupper(*p);
- continue;
- }
- if (*p == '\n')
- /* trailing lf, grr */
- break;
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
- return -EINVAL;
- }
- while (i < 8)
- username[i++] = ' ';
- username[8] = '\0';
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9)) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17)) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
- dev = netiucv_init_netdevice(username);
+ dev = netiucv_init_netdevice(username, userdata);
if (!dev) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
@@ -2083,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv,
if (rc)
goto out_unreg;
- dev_info(priv->dev, "The IUCV interface to %s has been"
- " established successfully\n", netiucv_printname(username));
+ dev_info(priv->dev, "The IUCV interface to %s has been established "
+ "successfully\n",
+ netiucv_printuser(priv->conn));
return count;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fff57de78943..9c3f38da4c01 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
@@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
int bidx, int forced_cleanup)
{
+ if (q->card->options.cq != QETH_CQ_ENABLED)
+ return;
+
if (q->bufs[bidx]->next_pending != NULL) {
struct qeth_qdio_out_buffer *head = q->bufs[bidx];
struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
}
}
+ if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED)) {
+ /* for recovery situations */
+ q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+ qeth_init_qdio_out_buf(q, bidx);
+ QETH_CARD_TEXT(q->card, 2, "clprecov");
+ }
}
@@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
notification = TX_NOTIFY_OK;
} else {
BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
notification = TX_NOTIFY_DELAYED_OK;
}
@@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->aob = NULL;
qeth_clear_output_buffer(buffer->q, buffer,
- QETH_QDIO_BUF_HANDLED_DELAYED);
+ QETH_QDIO_BUF_HANDLED_DELAYED);
+
/* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
@@ -1113,11 +1123,25 @@ out:
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
struct sk_buff *skb;
+ struct iucv_sock *iucv;
+ int notify_general_error = 0;
+
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ notify_general_error = 1;
+
+ /* release may never happen from within CQ tasklet scope */
+ BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
skb = skb_dequeue(&buf->skb_list);
while (skb) {
QETH_CARD_TEXT(buf->q->card, 5, "skbr");
QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+ if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+ }
+ }
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
@@ -1160,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
- qeth_cleanup_handled_pending(q, j, free);
+ qeth_cleanup_handled_pending(q, j, 1);
qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1207,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
qeth_free_cq(card);
cancel_delayed_work_sync(&card->buffer_reclaim_work);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+ dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
/* inbound buffer pool */
@@ -1329,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
static void qeth_start_kernel_thread(struct work_struct *work)
{
+ struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1336,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work)
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
return;
- if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
- kthread_run(card->discipline.recover, (void *) card,
+ if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+ ts = kthread_run(card->discipline.recover, (void *)card,
"qeth_recover");
+ if (IS_ERR(ts)) {
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card,
+ QETH_RECOVER_THREAD);
+ }
+ }
}
static int qeth_setup_card(struct qeth_card *card)
@@ -4521,7 +4552,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
init_data.output_handler = card->discipline.output_handler;
- init_data.queue_start_poll = queue_start_poll;
+ init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a21ae3d549db..c12967133114 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
spin_unlock_bh(&card->vlanlock);
}
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
- return;
+ return 0;
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "aidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "aidREC");
- return;
+ return 0;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
+ } else {
+ return -ENOMEM;
}
+ return 0;
}
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "kidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
+ return 0;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void)
static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4d5307ddbe55..9648e4e68337 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
qeth_l3_free_vlan_addresses6(card, vid);
}
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
set_bit(vid, card->active_vlans);
- return;
+ return 0;
}
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, card->active_vlans);
spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
+ return 0;
}
static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2759,7 +2760,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n) {
cast_type = n->type;
rcu_read_unlock();
@@ -2855,7 +2856,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (ipv == 4) {
/* IPv4 */
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -3209,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev)
return 0;
}
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -3223,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
u32 changed = dev->features ^ features;
@@ -3489,14 +3492,13 @@ contin:
else
netif_carrier_off(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
+ rtnl_lock();
if (recovery_mode)
__qeth_l3_open(card->dev);
- else {
- rtnl_lock();
+ else
dev_open(card->dev);
- rtnl_unlock();
- }
qeth_l3_set_multicast_list(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3542,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->info.hwtrap = 1;
}
qeth_l3_stop_card(card, recovery_mode);
+ if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+ rtnl_unlock();
+ }
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3596,6 +3603,7 @@ static int qeth_l3_recover(void *ptr)
static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f888223..b79576b64f45 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491e..542668292900 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66a..4b9939726c34 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154f..339fd6f65eda 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaaa..826157f38694 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26a..0b31658ccde5 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bcfdd2a..16570aa84aac 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
tristate "Intel(R) C600 Series Chipset SAS Controller"
depends on PCI && SCSI
depends on X86
- # (temporary): known alpha quality driver
- depends on EXPERIMENTAL
select SCSI_SAS_LIBSAS
- select SCSI_SAS_HOST_SMP
---help---
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
- The experimental tag will be removed after the driver exits alpha
-
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 8a0b33033177..0bd38da4ada0 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -650,6 +650,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
+ kfree(usg);
rcode = -EINVAL;
goto cleanup;
}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 195823a51aab..ed119cedaae0 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -102,7 +102,7 @@ static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 };
*/
#if defined(MODULE)
-static int isapnp = 0;
+static bool isapnp = 0;
static int aha1542[] = {0x330, 11, 4, -1};
module_param_array(aha1542, int, NULL, 0);
module_param(isapnp, bool, 0);
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index e4a778720301..2e3117aa382f 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -1,5 +1,5 @@
/*
- * Aic7xxx SCSI host adapter firmware asssembler
+ * Aic7xxx SCSI host adapter firmware assembler
*
* Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs.
* Copyright (c) 2001, 2002 Adaptec Inc.
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8b002f6db6ca..33c8f09c7ac1 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -733,7 +733,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
-mode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t be2iscsi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 4a1f2e393f31..5c45be134501 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -26,7 +26,7 @@
#define BE2_IPV4 0x1
#define BE2_IPV6 0x10
-mode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t be2iscsi_attr_is_visible(int param_type, int param);
void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 379c696dac19..375756fa95cf 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,9 +325,9 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
}
-static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
@@ -348,9 +348,9 @@ static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
return rc;
}
-static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
@@ -364,9 +364,9 @@ static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
}
-static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
@@ -1105,7 +1105,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
struct be_status_bhs *sts_bhs =
(struct be_status_bhs *)io_task->cmd_bhs;
struct iscsi_conn *conn = beiscsi_conn->conn;
- unsigned int sense_len;
unsigned char *sense;
u32 resid = 0, exp_cmdsn, max_cmdsn;
u8 rsp, status, flags;
@@ -1153,9 +1152,11 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
}
if (status == SAM_STAT_CHECK_CONDITION) {
+ u16 sense_len;
unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
+
sense = sts_bhs->sense_info + sizeof(unsigned short);
- sense_len = cpu_to_be16(*slen);
+ sense_len = be16_to_cpu(*slen);
memcpy(task->sc->sense_buffer, sense,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 7b3d235d20b4..b5a1595cc0a5 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -902,7 +902,7 @@ struct sfp_mem_s {
union sfp_xcvr_e10g_code_u {
u8 b;
struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
u8 e10g_unall:1; /* 10G Ethernet compliance */
u8 e10g_lrm:1;
u8 e10g_lr:1;
@@ -982,7 +982,7 @@ union sfp_xcvr_fc2_code_u {
union sfp_xcvr_fc3_code_u {
u8 b;
struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
u8 rsv4:1;
u8 mb800:1; /* 800 Mbytes/sec */
u8 mb1600:1; /* 1600 Mbytes/sec */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 863c6ba7d5eb..cb07c628b2f1 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -34,22 +34,22 @@
struct bfa_iocfc_intr_attr_s {
u8 coalesce; /* enable/disable coalescing */
u8 rsvd[3];
- __be16 latency; /* latency in microseconds */
- __be16 delay; /* delay in microseconds */
+ __be16 latency; /* latency in microseconds */
+ __be16 delay; /* delay in microseconds */
};
/*
* IOC firmware configuraton
*/
struct bfa_iocfc_fwcfg_s {
- u16 num_fabrics; /* number of fabrics */
- u16 num_lports; /* number of local lports */
- u16 num_rports; /* number of remote ports */
- u16 num_ioim_reqs; /* number of IO reqs */
- u16 num_tskim_reqs; /* task management requests */
- u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
- u16 num_fcxp_reqs; /* unassisted FC exchanges */
- u16 num_uf_bufs; /* unsolicited recv buffers */
+ u16 num_fabrics; /* number of fabrics */
+ u16 num_lports; /* number of local lports */
+ u16 num_rports; /* number of remote ports */
+ u16 num_ioim_reqs; /* number of IO reqs */
+ u16 num_tskim_reqs; /* task management requests */
+ u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
+ u16 num_fcxp_reqs; /* unassisted FC exchanges */
+ u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
u8 fw_tick_res; /* FW clock resolution in ms */
u8 rsvd[2];
@@ -57,19 +57,19 @@ struct bfa_iocfc_fwcfg_s {
#pragma pack()
struct bfa_iocfc_drvcfg_s {
- u16 num_reqq_elems; /* number of req queue elements */
- u16 num_rspq_elems; /* number of rsp queue elements */
- u16 num_sgpgs; /* number of total SG pages */
- u16 num_sboot_tgts; /* number of SAN boot targets */
- u16 num_sboot_luns; /* number of SAN boot luns */
- u16 ioc_recover; /* IOC recovery mode */
- u16 min_cfg; /* minimum configuration */
- u16 path_tov; /* device path timeout */
- u16 num_tio_reqs; /*!< number of TM IO reqs */
+ u16 num_reqq_elems; /* number of req queue elements */
+ u16 num_rspq_elems; /* number of rsp queue elements */
+ u16 num_sgpgs; /* number of total SG pages */
+ u16 num_sboot_tgts; /* number of SAN boot targets */
+ u16 num_sboot_luns; /* number of SAN boot luns */
+ u16 ioc_recover; /* IOC recovery mode */
+ u16 min_cfg; /* minimum configuration */
+ u16 path_tov; /* device path timeout */
+ u16 num_tio_reqs; /* number of TM IO reqs */
u8 port_mode;
u8 rsvd_a;
- bfa_boolean_t delay_comp; /* delay completion of
- failed inflight IOs */
+ bfa_boolean_t delay_comp; /* delay completion of failed
+ * inflight IOs */
u16 num_ttsk_reqs; /* TM task management requests */
u32 rsvd;
};
@@ -101,8 +101,8 @@ struct bfa_fw_ioim_stats_s {
u32 fw_frm_drop; /* f/w drop the frame */
u32 rec_timeout; /* FW rec timed out */
- u32 error_rec; /* FW sending rec on
- * an error condition*/
+ u32 error_rec; /* FW sending rec on
+ * an error condition*/
u32 wait_for_si; /* FW wait for SI */
u32 rec_rsp_inval; /* REC rsp invalid */
u32 seqr_io_abort; /* target does not know cmd so abort */
@@ -124,9 +124,9 @@ struct bfa_fw_ioim_stats_s {
u32 unexp_fcp_rsp; /* fcp response in wrong state */
u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
- u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
+ u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
- u32 fcp_rsp_resid_inval; /* invalid residue */
+ u32 fcp_rsp_resid_inval; /* invalid residue */
u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
@@ -142,21 +142,20 @@ struct bfa_fw_ioim_stats_s {
u32 ioh_hit_class2_event; /* IOH hit class2 */
u32 ioh_miss_other_event; /* IOH miss other */
u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
- u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
- * bytes xfered */
+ u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
+ * bytes xfered */
u32 ioh_seq_len_err_event; /* IOH seq len error */
u32 ioh_data_oor_event; /* Data out of range */
u32 ioh_ro_ooo_event; /* Relative offset out of range */
u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
u32 ioh_unexp_frame_event; /* unexpected frame received
- * count */
+ * count */
u32 ioh_err_int; /* IOH error int during data-phase
- * for scsi write
- */
+ * for scsi write */
};
struct bfa_fw_tio_stats_s {
- u32 tio_conf_proc; /* TIO CONF processed */
+ u32 tio_conf_proc; /* TIO CONF processed */
u32 tio_conf_drop; /* TIO CONF dropped */
u32 tio_cleanup_req; /* TIO cleanup requested */
u32 tio_cleanup_comp; /* TIO cleanup completed */
@@ -164,34 +163,36 @@ struct bfa_fw_tio_stats_s {
u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
u32 tio_abts_req; /* TIO ABTS requested */
u32 tio_abts_ack; /* TIO ABTS ack-ed */
- u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+ u32 tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
u32 tio_abts_tmo; /* TIO ABTS timeout */
u32 tio_snsdata_dma; /* TIO sense data DMA */
- u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
- u32 tio_rxwchan_avail; /* TIO RX wait channel available */
+ u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+ u32 tio_rxwchan_avail; /* TIO RX wait channel available */
u32 tio_hit_bls; /* TIO IOH BLS event */
u32 tio_uf_recv; /* TIO received UF */
- u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
- u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+ u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+ u32 tio_wr_invalid_sm; /* TIO write reqst in wrong state machine */
- u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
- u32 ds_rxwchan_avail; /* DS RX wait channel available */
+ u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
+ u32 ds_rxwchan_avail; /* DS RX wait channel available */
u32 ds_unaligned_rd; /* DS unaligned read */
- u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
- u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+ u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+ * machine */
+ u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+ * machine */
u32 ds_flush_req; /* DS flush requested */
u32 ds_flush_comp; /* DS flush completed */
u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
u32 ds_seq_cnt_err; /* DS seq cnt error */
u32 ds_seq_len_err; /* DS seq len error */
u32 ds_data_oor; /* DS data out of order */
- u32 ds_hit_bls; /* DS hit BLS */
+ u32 ds_hit_bls; /* DS hit BLS */
u32 ds_edtov_timer_exp; /* DS edtov expired */
u32 ds_cpu_owned; /* DS cpu owned */
u32 ds_hit_class2; /* DS hit class2 */
u32 ds_length_err; /* DS length error */
u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
- u32 ds_rectov_timer_exp; /* DS rectov expired */
+ u32 ds_rectov_timer_exp;/* DS rectov expired */
u32 ds_unexp_fr_err; /* DS unexp frame error */
};
@@ -208,119 +209,119 @@ struct bfa_fw_io_stats_s {
*/
struct bfa_fw_port_fpg_stats_s {
- u32 intr_evt;
- u32 intr;
- u32 intr_excess;
- u32 intr_cause0;
- u32 intr_other;
- u32 intr_other_ign;
- u32 sig_lost;
- u32 sig_regained;
- u32 sync_lost;
- u32 sync_to;
- u32 sync_regained;
- u32 div2_overflow;
- u32 div2_underflow;
- u32 efifo_overflow;
- u32 efifo_underflow;
- u32 idle_rx;
- u32 lrr_rx;
- u32 lr_rx;
- u32 ols_rx;
- u32 nos_rx;
- u32 lip_rx;
- u32 arbf0_rx;
- u32 arb_rx;
- u32 mrk_rx;
- u32 const_mrk_rx;
- u32 prim_unknown;
+ u32 intr_evt;
+ u32 intr;
+ u32 intr_excess;
+ u32 intr_cause0;
+ u32 intr_other;
+ u32 intr_other_ign;
+ u32 sig_lost;
+ u32 sig_regained;
+ u32 sync_lost;
+ u32 sync_to;
+ u32 sync_regained;
+ u32 div2_overflow;
+ u32 div2_underflow;
+ u32 efifo_overflow;
+ u32 efifo_underflow;
+ u32 idle_rx;
+ u32 lrr_rx;
+ u32 lr_rx;
+ u32 ols_rx;
+ u32 nos_rx;
+ u32 lip_rx;
+ u32 arbf0_rx;
+ u32 arb_rx;
+ u32 mrk_rx;
+ u32 const_mrk_rx;
+ u32 prim_unknown;
};
struct bfa_fw_port_lksm_stats_s {
- u32 hwsm_success; /* hwsm state machine success */
- u32 hwsm_fails; /* hwsm fails */
- u32 hwsm_wdtov; /* hwsm timed out */
- u32 swsm_success; /* swsm success */
- u32 swsm_fails; /* swsm fails */
- u32 swsm_wdtov; /* swsm timed out */
- u32 busybufs; /* link init failed due to busybuf */
- u32 buf_waits; /* bufwait state entries */
- u32 link_fails; /* link failures */
- u32 psp_errors; /* primitive sequence protocol errors */
- u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
- u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
- u32 lr_tx; /* No. of times LR tx started */
- u32 lrr_tx; /* No. of times LRR tx started */
- u32 ols_tx; /* No. of times OLS tx started */
- u32 nos_tx; /* No. of times NOS tx started */
- u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
- u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
- u32 bbsc_lr; /* LKSM LR tx for credit recovery */
+ u32 hwsm_success; /* hwsm state machine success */
+ u32 hwsm_fails; /* hwsm fails */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_fails; /* swsm fails */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 busybufs; /* link init failed due to busybuf */
+ u32 buf_waits; /* bufwait state entries */
+ u32 link_fails; /* link failures */
+ u32 psp_errors; /* primitive sequence protocol errors */
+ u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
+ u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
+ u32 lr_tx; /* No. of times LR tx started */
+ u32 lrr_tx; /* No. of times LRR tx started */
+ u32 ols_tx; /* No. of times OLS tx started */
+ u32 nos_tx; /* No. of times NOS tx started */
+ u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
+ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
+ u32 bbsc_lr; /* LKSM LR tx for credit recovery */
};
struct bfa_fw_port_snsm_stats_s {
- u32 hwsm_success; /* Successful hwsm terminations */
- u32 hwsm_fails; /* hwsm fail count */
- u32 hwsm_wdtov; /* hwsm timed out */
- u32 swsm_success; /* swsm success */
- u32 swsm_wdtov; /* swsm timed out */
- u32 error_resets; /* error resets initiated by upsm */
- u32 sync_lost; /* Sync loss count */
- u32 sig_lost; /* Signal loss count */
- u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
+ u32 hwsm_success; /* Successful hwsm terminations */
+ u32 hwsm_fails; /* hwsm fail count */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 error_resets; /* error resets initiated by upsm */
+ u32 sync_lost; /* Sync loss count */
+ u32 sig_lost; /* Signal loss count */
+ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
};
struct bfa_fw_port_physm_stats_s {
- u32 module_inserts; /* Module insert count */
- u32 module_xtracts; /* Module extracts count */
- u32 module_invalids; /* Invalid module inserted count */
- u32 module_read_ign; /* Module validation status ignored */
- u32 laser_faults; /* Laser fault count */
- u32 rsvd;
+ u32 module_inserts; /* Module insert count */
+ u32 module_xtracts; /* Module extracts count */
+ u32 module_invalids; /* Invalid module inserted count */
+ u32 module_read_ign; /* Module validation status ignored */
+ u32 laser_faults; /* Laser fault count */
+ u32 rsvd;
};
struct bfa_fw_fip_stats_s {
- u32 vlan_req; /* vlan discovery requests */
- u32 vlan_notify; /* vlan notifications */
- u32 vlan_err; /* vlan response error */
- u32 vlan_timeouts; /* vlan disvoery timeouts */
- u32 vlan_invalids; /* invalid vlan in discovery advert. */
- u32 disc_req; /* Discovery solicit requests */
- u32 disc_rsp; /* Discovery solicit response */
- u32 disc_err; /* Discovery advt. parse errors */
- u32 disc_unsol; /* Discovery unsolicited */
- u32 disc_timeouts; /* Discovery timeouts */
- u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
- u32 linksvc_unsupp; /* Unsupported link service req */
- u32 linksvc_err; /* Parse error in link service req */
- u32 logo_req; /* FIP logos received */
- u32 clrvlink_req; /* Clear virtual link req */
- u32 op_unsupp; /* Unsupported FIP operation */
- u32 untagged; /* Untagged frames (ignored) */
- u32 invalid_version; /* Invalid FIP version */
+ u32 vlan_req; /* vlan discovery requests */
+ u32 vlan_notify; /* vlan notifications */
+ u32 vlan_err; /* vlan response error */
+ u32 vlan_timeouts; /* vlan disvoery timeouts */
+ u32 vlan_invalids; /* invalid vlan in discovery advert. */
+ u32 disc_req; /* Discovery solicit requests */
+ u32 disc_rsp; /* Discovery solicit response */
+ u32 disc_err; /* Discovery advt. parse errors */
+ u32 disc_unsol; /* Discovery unsolicited */
+ u32 disc_timeouts; /* Discovery timeouts */
+ u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
+ u32 linksvc_unsupp; /* Unsupported link service req */
+ u32 linksvc_err; /* Parse error in link service req */
+ u32 logo_req; /* FIP logos received */
+ u32 clrvlink_req; /* Clear virtual link req */
+ u32 op_unsupp; /* Unsupported FIP operation */
+ u32 untagged; /* Untagged frames (ignored) */
+ u32 invalid_version; /* Invalid FIP version */
};
struct bfa_fw_lps_stats_s {
- u32 mac_invalids; /* Invalid mac assigned */
- u32 rsvd;
+ u32 mac_invalids; /* Invalid mac assigned */
+ u32 rsvd;
};
struct bfa_fw_fcoe_stats_s {
- u32 cee_linkups; /* CEE link up count */
- u32 cee_linkdns; /* CEE link down count */
- u32 fip_linkups; /* FIP link up count */
- u32 fip_linkdns; /* FIP link up count */
- u32 fip_fails; /* FIP fail count */
- u32 mac_invalids; /* Invalid mac assigned */
+ u32 cee_linkups; /* CEE link up count */
+ u32 cee_linkdns; /* CEE link down count */
+ u32 fip_linkups; /* FIP link up count */
+ u32 fip_linkdns; /* FIP link up count */
+ u32 fip_fails; /* FIP fail count */
+ u32 mac_invalids; /* Invalid mac assigned */
};
/*
* IOC firmware FCoE port stats
*/
struct bfa_fw_fcoe_port_stats_s {
- struct bfa_fw_fcoe_stats_s fcoe_stats;
- struct bfa_fw_fip_stats_s fip_stats;
+ struct bfa_fw_fcoe_stats_s fcoe_stats;
+ struct bfa_fw_fip_stats_s fip_stats;
};
/*
@@ -335,8 +336,8 @@ struct bfa_fw_fc_uport_stats_s {
* IOC firmware FC port stats
*/
union bfa_fw_fc_port_stats_s {
- struct bfa_fw_fc_uport_stats_s fc_stats;
- struct bfa_fw_fcoe_port_stats_s fcoe_stats;
+ struct bfa_fw_fc_uport_stats_s fc_stats;
+ struct bfa_fw_fcoe_port_stats_s fcoe_stats;
};
/*
@@ -366,25 +367,25 @@ struct bfa_fw_lpsm_stats_s {
*/
struct bfa_fw_trunk_stats_s {
u32 emt_recvd; /* Trunk EMT received */
- u32 emt_accepted; /* Trunk EMT Accepted */
- u32 emt_rejected; /* Trunk EMT rejected */
+ u32 emt_accepted; /* Trunk EMT Accepted */
+ u32 emt_rejected; /* Trunk EMT rejected */
u32 etp_recvd; /* Trunk ETP received */
- u32 etp_accepted; /* Trunk ETP Accepted */
- u32 etp_rejected; /* Trunk ETP rejected */
+ u32 etp_accepted; /* Trunk ETP Accepted */
+ u32 etp_rejected; /* Trunk ETP rejected */
u32 lr_recvd; /* Trunk LR received */
- u32 rsvd; /* padding for 64 bit alignment */
+ u32 rsvd; /* padding for 64 bit alignment */
};
struct bfa_fw_advsm_stats_s {
u32 flogi_sent; /* Flogi sent */
u32 flogi_acc_recvd; /* Flogi Acc received */
u32 flogi_rjt_recvd; /* Flogi rejects received */
- u32 flogi_retries; /* Flogi retries */
+ u32 flogi_retries; /* Flogi retries */
u32 elp_recvd; /* ELP received */
- u32 elp_accepted; /* ELP Accepted */
- u32 elp_rejected; /* ELP rejected */
- u32 elp_dropped; /* ELP dropped */
+ u32 elp_accepted; /* ELP Accepted */
+ u32 elp_rejected; /* ELP rejected */
+ u32 elp_dropped; /* ELP dropped */
};
/*
@@ -521,7 +522,7 @@ struct bfa_qos_vc_attr_s {
u16 total_vc_count; /* Total VC Count */
u16 shared_credit;
u32 elp_opmode_flags;
- struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
+ struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
* total_vc_count */
};
@@ -531,16 +532,16 @@ struct bfa_qos_vc_attr_s {
struct bfa_qos_stats_s {
u32 flogi_sent; /* QoS Flogi sent */
u32 flogi_acc_recvd; /* QoS Flogi Acc received */
- u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
+ u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
u32 flogi_retries; /* QoS Flogi retries */
u32 elp_recvd; /* QoS ELP received */
u32 elp_accepted; /* QoS ELP Accepted */
- u32 elp_rejected; /* QoS ELP rejected */
- u32 elp_dropped; /* QoS ELP dropped */
+ u32 elp_rejected; /* QoS ELP rejected */
+ u32 elp_dropped; /* QoS ELP dropped */
- u32 qos_rscn_recvd; /* QoS RSCN received */
- u32 rsvd; /* padding for 64 bit alignment */
+ u32 qos_rscn_recvd; /* QoS RSCN received */
+ u32 rsvd; /* padding for 64 bit alignment */
};
/*
@@ -548,9 +549,9 @@ struct bfa_qos_stats_s {
*/
struct bfa_fcoe_stats_s {
u64 secs_reset; /* Seconds since stats reset */
- u64 cee_linkups; /* CEE link up */
+ u64 cee_linkups; /* CEE link up */
u64 cee_linkdns; /* CEE link down */
- u64 fip_linkups; /* FIP link up */
+ u64 fip_linkups; /* FIP link up */
u64 fip_linkdns; /* FIP link down */
u64 fip_fails; /* FIP failures */
u64 mac_invalids; /* Invalid mac assignments */
@@ -560,38 +561,38 @@ struct bfa_fcoe_stats_s {
u64 vlan_timeouts; /* Vlan request timeouts */
u64 vlan_invalids; /* Vlan invalids */
u64 disc_req; /* Discovery requests */
- u64 disc_rsp; /* Discovery responses */
+ u64 disc_rsp; /* Discovery responses */
u64 disc_err; /* Discovery error frames */
u64 disc_unsol; /* Discovery unsolicited */
u64 disc_timeouts; /* Discovery timeouts */
u64 disc_fcf_unavail; /* Discovery FCF not avail */
- u64 linksvc_unsupp; /* FIP link service req unsupp. */
- u64 linksvc_err; /* FIP link service req errors */
+ u64 linksvc_unsupp; /* FIP link service req unsupp */
+ u64 linksvc_err; /* FIP link service req errors */
u64 logo_req; /* FIP logos received */
- u64 clrvlink_req; /* Clear virtual link requests */
+ u64 clrvlink_req; /* Clear virtual link requests */
u64 op_unsupp; /* FIP operation unsupp. */
- u64 untagged; /* FIP untagged frames */
+ u64 untagged; /* FIP untagged frames */
u64 txf_ucast; /* Tx FCoE unicast frames */
- u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
+ u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
u64 txf_ucast_octets; /* Tx FCoE unicast octets */
u64 txf_mcast; /* Tx FCoE multicast frames */
- u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
+ u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
u64 txf_mcast_octets; /* Tx FCoE multicast octets */
u64 txf_bcast; /* Tx FCoE broadcast frames */
- u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
+ u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
- u64 txf_timeout; /* Tx timeouts */
+ u64 txf_timeout; /* Tx timeouts */
u64 txf_parity_errors; /* Transmit parity err */
- u64 txf_fid_parity_errors; /* Transmit FID parity err */
+ u64 txf_fid_parity_errors; /* Transmit FID parity err */
u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
u64 rxf_ucast; /* Rx FCoE unicast frames */
- u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
+ u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
u64 rxf_mcast; /* Rx FCoE multicast frames */
- u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
+ u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
u64 rxf_bcast; /* Rx FCoE broadcast frames */
- u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
+ u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
};
/*
@@ -672,12 +673,7 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
- u32 lm_lun_across_sg; /* LM lun is across sg data buf */
- u32 lm_lun_not_sup; /* LM lun not supported */
- u32 lm_rpl_data_changed; /* LM report-lun data changed */
- u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
- u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
- u32 lm_lun_not_rdy; /* LM lun not ready */
+ u32 rsvd[6];
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -852,12 +848,12 @@ struct bfa_port_cfg_s {
u8 tx_bbcredit; /* transmit buffer credits */
u8 ratelimit; /* ratelimit enabled or not */
u8 trl_def_speed; /* ratelimit default speed */
- u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
- u8 bb_scn_state; /* Config state of BB_SCN */
- u8 faa_state; /* FAA enabled/disabled */
- u8 rsvd[1];
- u16 path_tov; /* device path timeout */
- u16 q_depth; /* SCSI Queue depth */
+ u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
+ u8 bb_scn_state; /* Config state of BB_SCN */
+ u8 faa_state; /* FAA enabled/disabled */
+ u8 rsvd[1];
+ u16 path_tov; /* device path timeout */
+ u16 q_depth; /* SCSI Queue depth */
};
#pragma pack()
@@ -868,20 +864,21 @@ struct bfa_port_attr_s {
/*
* Static fields
*/
- wwn_t nwwn; /* node wwn */
- wwn_t pwwn; /* port wwn */
- wwn_t factorynwwn; /* factory node wwn */
- wwn_t factorypwwn; /* factory port wwn */
- enum fc_cos cos_supported; /* supported class of services */
- u32 rsvd;
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ wwn_t factorynwwn; /* factory node wwn */
+ wwn_t factorypwwn; /* factory port wwn */
+ enum fc_cos cos_supported; /* supported class of
+ * services */
+ u32 rsvd;
struct fc_symname_s port_symname; /* port symbolic name */
- enum bfa_port_speed speed_supported; /* supported speeds */
- bfa_boolean_t pbind_enabled;
+ enum bfa_port_speed speed_supported; /* supported speeds */
+ bfa_boolean_t pbind_enabled;
/*
* Configured values
*/
- struct bfa_port_cfg_s pport_cfg; /* pport cfg */
+ struct bfa_port_cfg_s pport_cfg; /* pport cfg */
/*
* Dynamic field - info from BFA
@@ -890,19 +887,20 @@ struct bfa_port_attr_s {
enum bfa_port_speed speed; /* current speed */
enum bfa_port_topology topology; /* current topology */
bfa_boolean_t beacon; /* current beacon status */
- bfa_boolean_t link_e2e_beacon; /* link beacon is on */
- bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */
+ bfa_boolean_t link_e2e_beacon; /* link beacon is on */
+ bfa_boolean_t bbsc_op_status; /* fc credit recovery oper
+ * state */
/*
* Dynamic field - info from FCS
*/
- u32 pid; /* port ID */
+ u32 pid; /* port ID */
enum bfa_port_type port_type; /* current topology */
- u32 loopback; /* external loopback */
- u32 authfail; /* auth fail state */
+ u32 loopback; /* external loopback */
+ u32 authfail; /* auth fail state */
/* FCoE specific */
- u16 fcoe_vlan;
+ u16 fcoe_vlan;
u8 rsvd1[2];
};
@@ -910,48 +908,48 @@ struct bfa_port_attr_s {
* Port FCP mappings.
*/
struct bfa_port_fcpmap_s {
- char osdevname[256];
+ char osdevname[256];
u32 bus;
u32 target;
u32 oslun;
u32 fcid;
- wwn_t nwwn;
- wwn_t pwwn;
+ wwn_t nwwn;
+ wwn_t pwwn;
u64 fcplun;
- char luid[256];
+ char luid[256];
};
/*
* Port RNID info.
*/
struct bfa_port_rnid_s {
- wwn_t wwn;
+ wwn_t wwn;
u32 unittype;
u32 portid;
u32 attached_nodes_num;
u16 ip_version;
u16 udp_port;
- u8 ipaddr[16];
+ u8 ipaddr[16];
u16 rsvd;
u16 topologydiscoveryflags;
};
#pragma pack(1)
struct bfa_fcport_fcf_s {
- wwn_t name; /* FCF name */
- wwn_t fabric_name; /* Fabric Name */
- u8 fipenabled; /* FIP enabled or not */
- u8 fipfailed; /* FIP failed or not */
- u8 resv[2];
- u8 pri; /* FCF priority */
- u8 version; /* FIP version used */
- u8 available; /* Available for login */
- u8 fka_disabled; /* FKA is disabled */
- u8 maxsz_verified; /* FCoE max size verified */
- u8 fc_map[3]; /* FC map */
- __be16 vlan; /* FCoE vlan tag/priority */
- u32 fka_adv_per; /* FIP ka advert. period */
- mac_t mac; /* FCF mac */
+ wwn_t name; /* FCF name */
+ wwn_t fabric_name; /* Fabric Name */
+ u8 fipenabled; /* FIP enabled or not */
+ u8 fipfailed; /* FIP failed or not */
+ u8 resv[2];
+ u8 pri; /* FCF priority */
+ u8 version; /* FIP version used */
+ u8 available; /* Available for login */
+ u8 fka_disabled; /* FKA is disabled */
+ u8 maxsz_verified; /* FCoE max size verified */
+ u8 fc_map[3]; /* FC map */
+ __be16 vlan; /* FCoE vlan tag/priority */
+ u32 fka_adv_per; /* FIP ka advert. period */
+ mac_t mac; /* FCF mac */
};
/*
@@ -981,7 +979,7 @@ struct bfa_port_link_s {
u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
u8 topology; /* P2P/LOOP bfa_port_topology */
u8 speed; /* Link speed (1/2/4/8 G) */
- u32 linkstate_opt; /* Linkstate optional data (debug) */
+ u32 linkstate_opt; /* Linkstate optional data (debug) */
u8 trunked; /* Trunked or not (1 or 0) */
u8 resvd[3];
struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
@@ -1035,7 +1033,7 @@ struct bfa_rport_hal_stats_s {
u32 sm_fwc_del; /* fw create: delete events */
u32 sm_fwc_off; /* fw create: offline events */
u32 sm_fwc_hwf; /* fw create: IOC down */
- u32 sm_fwc_unexp; /* fw create: exception events*/
+ u32 sm_fwc_unexp; /* fw create: exception events*/
u32 sm_on_off; /* online: offline events */
u32 sm_on_del; /* online: delete events */
u32 sm_on_hwf; /* online: IOC down events */
@@ -1043,25 +1041,25 @@ struct bfa_rport_hal_stats_s {
u32 sm_fwd_rsp; /* fw delete: fw responses */
u32 sm_fwd_del; /* fw delete: delete events */
u32 sm_fwd_hwf; /* fw delete: IOC down events */
- u32 sm_fwd_unexp; /* fw delete: exception events*/
+ u32 sm_fwd_unexp; /* fw delete: exception events*/
u32 sm_off_del; /* offline: delete events */
u32 sm_off_on; /* offline: online events */
u32 sm_off_hwf; /* offline: IOC down events */
- u32 sm_off_unexp; /* offline: exception events */
- u32 sm_del_fwrsp; /* delete: fw responses */
+ u32 sm_off_unexp; /* offline: exception events */
+ u32 sm_del_fwrsp; /* delete: fw responses */
u32 sm_del_hwf; /* delete: IOC down events */
- u32 sm_del_unexp; /* delete: exception events */
- u32 sm_delp_fwrsp; /* delete pend: fw responses */
+ u32 sm_del_unexp; /* delete: exception events */
+ u32 sm_delp_fwrsp; /* delete pend: fw responses */
u32 sm_delp_hwf; /* delete pend: IOC downs */
- u32 sm_delp_unexp; /* delete pend: exceptions */
- u32 sm_offp_fwrsp; /* off-pending: fw responses */
+ u32 sm_delp_unexp; /* delete pend: exceptions */
+ u32 sm_offp_fwrsp; /* off-pending: fw responses */
u32 sm_offp_del; /* off-pending: deletes */
u32 sm_offp_hwf; /* off-pending: IOC downs */
- u32 sm_offp_unexp; /* off-pending: exceptions */
+ u32 sm_offp_unexp; /* off-pending: exceptions */
u32 sm_iocd_off; /* IOC down: offline events */
u32 sm_iocd_del; /* IOC down: delete events */
u32 sm_iocd_on; /* IOC down: online events */
- u32 sm_iocd_unexp; /* IOC down: exceptions */
+ u32 sm_iocd_unexp; /* IOC down: exceptions */
u32 rsvd;
};
#pragma pack(1)
@@ -1069,9 +1067,9 @@ struct bfa_rport_hal_stats_s {
* Rport's QoS attributes
*/
struct bfa_rport_qos_attr_s {
- u8 qos_priority; /* rport's QoS priority */
- u8 rsvd[3];
- u32 qos_flow_id; /* QoS flow Id */
+ u8 qos_priority; /* rport's QoS priority */
+ u8 rsvd[3];
+ u32 qos_flow_id; /* QoS flow Id */
};
#pragma pack()
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c86195..8d0b88f67a38 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
-#define SCSI_SENSE_CUR_ERR 0x70
-#define SCSI_SENSE_DEF_ERR 0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY 0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
-#define SCSI_ASC_TOCC 0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
-
-struct scsi_report_luns_data_s {
- u32 lun_list_length; /* length of LUN list length */
- u32 reserved;
- struct scsi_lun lun[1]; /* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
- u8 vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
- u8 product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
- u8 product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type:5; /* peripheral device type */
- u8 rmb:1; /* removable medium bit */
- u8 device_type_mod:7; /* device type modifier */
- u8 version;
- u8 aenc:1; /* async evt notification capability */
- u8 trm_iop:1; /* terminate I/O process */
- u8 norm_aca:1; /* normal ACA supported */
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 rsp_data_format:4;
- u8 additional_len;
- u8 sccs:1;
- u8 reserved1:7;
- u8 reserved2:1;
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved3:1;
- u8 multi_port:1; /* multi-port device */
- u8 m_chngr:1; /* device in medium transport element */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 addr16:1; /* SIP specific bit */
- u8 rel_adr:1; /* relative address */
- u8 w_bus32:1;
- u8 w_bus16:1;
- u8 synchronous:1;
- u8 linked_commands:1;
- u8 trans_dis:1;
- u8 cmd_queue:1; /* command queueing supported */
- u8 soft_reset:1; /* soft reset alternative (VS) */
-#else
- u8 device_type:5; /* peripheral device type */
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type_mod:7; /* device type modifier */
- u8 rmb:1; /* removable medium bit */
- u8 version;
- u8 rsp_data_format:4;
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 norm_aca:1; /* normal ACA supported */
- u8 terminate_iop:1;/* terminate I/O process */
- u8 aenc:1; /* async evt notification capability */
- u8 additional_len;
- u8 reserved1:7;
- u8 sccs:1;
- u8 addr16:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 m_chngr:1; /* device in medium transport element */
- u8 multi_port:1; /* multi-port device */
- u8 reserved3:1; /* TBD - Vendor Specific */
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved2:1;
- u8 soft_seset:1; /* soft reset alternative (VS) */
- u8 cmd_queue:1; /* command queueing supported */
- u8 trans_dis:1;
- u8 linked_commands:1;
- u8 synchronous:1;
- u8 w_bus16:1;
- u8 w_bus32:1;
- u8 rel_adr:1; /* relative address */
-#endif
- struct scsi_inquiry_vendor_s vendor_id;
- struct scsi_inquiry_prodid_s product_id;
- struct scsi_inquiry_prodrev_s product_rev;
- u8 vendor_specific[20];
- u8 reserved4[40];
-};
-
-/*
- * SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
- u8 valid:1;
- u8 rsp_code:7;
-#else
- u8 rsp_code:7;
- u8 valid:1;
-#endif
- u8 seg_num;
-#ifdef __BIG_ENDIAN
- u8 file_mark:1;
- u8 eom:1; /* end of media */
- u8 ili:1; /* incorrect length indicator */
- u8 reserved:1;
- u8 sense_key:4;
-#else
- u8 sense_key:4;
- u8 reserved:1;
- u8 ili:1; /* incorrect length indicator */
- u8 eom:1; /* end of media */
- u8 file_mark:1;
-#endif
- u8 information[4]; /* device-type or cmd specific info */
- u8 add_sense_length; /* additional sense length */
- u8 command_info[4];/* command specific information */
- u8 asc; /* additional sense code */
- u8 ascq; /* additional sense code qualifier */
- u8 fru_code; /* field replaceable unit code */
-#ifdef __BIG_ENDIAN
- u8 sksv:1; /* sense key specific valid */
- u8 c_d:1; /* command/data bit */
- u8 res1:2;
- u8 bpv:1; /* bit pointer valid */
- u8 bpointer:3; /* bit pointer */
-#else
- u8 bpointer:3; /* bit pointer */
- u8 bpv:1; /* bit pointer valid */
- u8 res1:2;
- u8 c_d:1; /* command/data bit */
- u8 sksv:1; /* sense key specific valid */
-#endif
- u8 fpointer[2]; /* field pointer */
-};
-
/*
* Fibre Channel Header Structure (FCHS) definition
*/
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd4745d8b..f0f80e282e39 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-#define bfa_ioim_rp_wwn(__ioim) \
- (((struct bfa_fcs_rport_s *) \
- (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim) \
- ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
- (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
-
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-enum bfa_ioim_lm_status {
- BFA_IOIM_LM_PRESENT = 1,
- BFA_IOIM_LM_LUN_NOT_SUP = 2,
- BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
- BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
enum bfa_ioim_lm_ua_status {
BFA_IOIM_LM_UA_RESET = 0,
BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
- BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
- BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
- BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
- bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
}
bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
__bfa_cb_ioim_abort, ioim);
break;
- case BFA_IOIM_SM_LM_LUN_NOT_SUP:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_sup, ioim);
- break;
-
- case BFA_IOIM_SM_LM_RPL_DC:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_rpl_dc, ioim);
- break;
-
- case BFA_IOIM_SM_LM_LUN_NOT_RDY:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_rdy, ioim);
- break;
-
default:
bfa_sm_fault(ioim->bfa, event);
}
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
}
}
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
- struct bfa_rport_s *rp, struct scsi_lun lun)
-{
- u8 i;
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
- if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
-
- if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
- scsilun_to_int((struct scsi_lun *)&lun))
- && (rp->rport_tag == lun_list[i].rp_tag)
- && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
- lun_list[i].lp_tag)) {
- bfa_trc(ioim->bfa, lun_list[i].rp_tag);
- bfa_trc(ioim->bfa, lun_list[i].lp_tag);
- bfa_trc(ioim->bfa, scsilun_to_int(
- (struct scsi_lun *)&lun_list[i].lun));
-
- if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
- ((cdb->scsi_cdb[0] != INQUIRY) ||
- (cdb->scsi_cdb[0] != REPORT_LUNS))) {
- lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
- return BFA_IOIM_LM_RPL_DATA_CHANGED;
- }
-
- if (cdb->scsi_cdb[0] == REPORT_LUNS)
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
- return BFA_IOIM_LM_PRESENT;
- }
- }
-
- if ((cdb->scsi_cdb[0] == INQUIRY) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
- return BFA_IOIM_LM_LUN_NOT_RDY;
-
- return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
- return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
- int buf_lun_cnt)
-{
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
- struct scsi_lun lun;
- int i, j;
-
- bfa_trc(ioim->bfa, buf_lun_cnt);
- for (j = 0; j < buf_lun_cnt; j++) {
- lun = *((struct scsi_lun *)(lun_data + j));
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
- if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
- (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
- (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
- == scsilun_to_int((struct scsi_lun *)&lun))) {
- lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
- break;
- }
- } /* next lun in mask DB */
- } /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
- struct scsi_report_luns_data_s *rl)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
- int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
- int lun_across_sg_bytes, bytes_from_next_buf;
- u64 last_lun, temp_last_lun;
-
- /* fetch luns from the first sg element */
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
- (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
- /* fetch luns from multiple sg elements */
- scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
- if (sgeid == 0) {
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- continue;
- }
-
- /* if the buf is having more data */
- lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
- if (lun_across_sg_bytes) {
- bfa_trc(ioim->bfa, lun_across_sg_bytes);
- bfa_stats(ioim->itnim, lm_lun_across_sg);
- bytes_from_next_buf = sizeof(struct scsi_lun) -
- lun_across_sg_bytes;
-
- /* from next buf take higher bytes */
- temp_last_lun = *((u64 *)
- phys_to_virt(sg_dma_address(sg)));
- last_lun |= temp_last_lun >>
- (lun_across_sg_bytes * BITS_PER_BYTE);
-
- /* from prev buf take higher bytes */
- temp_last_lun = *((u64 *)(prev_rl_data +
- (prev_sg_len - lun_across_sg_bytes)));
- temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
- last_lun = last_lun | (temp_last_lun <<
- (bytes_from_next_buf * BITS_PER_BYTE));
-
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
- } else
- bytes_from_next_buf = 0;
-
- *pgdlen += sg_dma_len(sg);
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
- bytes_from_next_buf,
- sg_dma_len(sg) / sizeof(struct scsi_lun));
- }
-
- /* update the report luns data - based on fetched luns */
- sg = scsi_sglist(cmnd);
- base_rl_data = (struct scsi_lun *)rl->lun;
- base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
- for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
- base_rl_data[j] = lun_list[i].lun;
- lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
- j++;
- lun_fetched_cnt++;
- }
-
- if (j > base_count) {
- j = 0;
- sg = sg_next(sg);
- base_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
- }
- }
-
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_inquiry_data_s *inq;
- struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
- bfa_trc(ioim->bfa, inq->device_type);
- inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
- return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfi_ioim_rsp_s *m;
- struct scsi_report_luns_data_s *rl = NULL;
- int lun_count = 0, lun_fetched_cnt = 0;
- u32 residue, pgdlen = 0;
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
- return BFA_TRUE;
-
- m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
- if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
- return BFA_TRUE;
-
- pgdlen = sg_dma_len(sg);
- bfa_trc(ioim->bfa, pgdlen);
- rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
- lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
- lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
- if (lun_count == lun_fetched_cnt)
- return BFA_TRUE;
-
- bfa_trc(ioim->bfa, lun_count);
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
- if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
- rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
- sizeof(struct scsi_lun);
- else
- bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
- bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
- residue = be32_to_cpu(m->residue);
- residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
- bfa_stats(ioim->itnim, lm_wire_residue_changed);
- m->residue = be32_to_cpu(residue);
- bfa_trc(ioim->bfa, ioim->nsges);
- return BFA_FALSE;
-}
-
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
m->scsi_status, sns_len, snsinfo, residue);
}
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
- snsinfo->sense_key = ILLEGAL_REQUEST;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
- ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
- snsinfo->asc = SCSI_ASC_TOCC;
- snsinfo->add_sense_length = 0x6;
- snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->sense_key = NOT_READY;
- snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
- snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
void
bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
}
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
- ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_ioim_cb_profile_comp(fcpim, ioim);
- if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- return;
- }
-
- if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- else
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
}
/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct bfa_lps_s *lps;
- enum bfa_ioim_lm_status status;
- struct scsi_lun scsilun;
-
- if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
- lps = BFA_IOIM_TO_LPS(ioim);
- int_to_scsilun(cmnd->device->lun, &scsilun);
- status = bfa_ioim_lm_check(ioim, lps,
- ioim->itnim->rport, scsilun);
- if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
- bfa_stats(ioim->itnim, lm_lun_not_rdy);
- return;
- }
-
- if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
- bfa_stats(ioim->itnim, lm_lun_not_sup);
- return;
- }
-
- if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
- bfa_stats(ioim->itnim, lm_rpl_data_changed);
- return;
- }
- }
-
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb81cb7..36f26da80f76 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_s {
struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
- u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
u8 reqq; /* Request queue for I/O */
u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
- bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
-#define BFA_IOIM_TO_LPS(__ioim) \
- BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
- __ioim->itnim->rport->rport_info.lp_tag)
-
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 1ac5aecf25a6..eca7ab78085b 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3727,11 +3727,11 @@ bfa_sfp_media_get(struct bfa_sfp_s *sfp)
(xmtr_tech & SFP_XMTR_TECH_SA))
*media = BFA_SFP_MEDIA_SW;
/* Check 10G Ethernet Compilance code */
- else if (e10g.b & 0x10)
+ else if (e10g.r.e10g_sr)
*media = BFA_SFP_MEDIA_SW;
- else if (e10g.b & 0x60)
+ else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
*media = BFA_SFP_MEDIA_LW;
- else if (e10g.r.e10g_unall & 0x80)
+ else if (e10g.r.e10g_unall)
*media = BFA_SFP_MEDIA_UNKNOWN;
else
bfa_trc(sfp, 0);
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86d3769..b52cbb6bcd5a 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
#define BFA_LP_TAG_INVALID 0xff
void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
- wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
/*
* bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb72531b34..404fd10ddb21 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_vport_start(&vport->fcs_vport);
+ list_add_tail(&vport->list_entry, &bfad->vport_list);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfad->ref_count = 0;
bfad->pport.bfad = bfad;
INIT_LIST_HEAD(&bfad->pbc_vport_list);
+ INIT_LIST_HEAD(&bfad->vport_list);
/* Setup the debugfs node for this bfad */
if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844ab463..1938fe0473e9 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
free_scsi_host:
bfad_scsi_host_free(bfad, im_port);
-
+ list_del(&vport->list_entry);
kfree(vport);
return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00caeb41..530de2b1200a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ out:
return 0;
}
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+ struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+ struct bfad_vport_s *vport = NULL;
+
+ /* Set the scsi device LUN SCAN flags for base port */
+ bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+ /* Set the scsi device LUN SCAN flags for the vports */
+ list_for_each_entry(vport, &bfad->vport_list, list_entry)
+ bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
int
bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ /* Set the LUN Scanning mode to be Sequential scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index dee1a094c2c2..439c012be763 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -472,7 +472,7 @@ static const struct file_operations bfad_debugfs_op_regwr = {
struct bfad_debugfs_entry {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *fops;
};
@@ -557,8 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
}
}
- /*
- * Remove the pci_dev debugfs directory for the port */
+ /* Remove the pci_dev debugfs directory for the port */
if (port->port_debugfs_root) {
debugfs_remove(port->port_debugfs_root);
port->port_debugfs_root = NULL;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f820ec..dc5b9d99c450 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
#include "bfa_modules.h"
#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
struct list_head active_aen_q;
struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
spinlock_t bfad_aen_spinlock;
+ struct list_head vport_list;
};
/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649e8eb7..3153923f5b60 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
}
/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+ struct fc_rport *rport)
+{
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+ struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+ int i = 0, ret = -ENXIO;
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+ scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+ lun_list[i].rp_tag == bfa_rport->rport_tag &&
+ lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+ ret = BFA_STATUS_OK;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
* Scsi_Host template entry slave_alloc
*/
static int
bfad_im_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+ /*
+ * We should not mask LUN 0 - since this will translate
+ * to no LUN / TARGET for SCSI ml resulting no scan.
+ */
+ if (sdev->lun == 0) {
+ sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+ BLIST_SPARSELUN;
+ goto done;
+ }
+
+ /*
+ * Query LUN Mask configuration - to expose this LUN
+ * to the SCSI mid-layer or to mask it.
+ */
+ if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+ BFA_STATUS_OK)
+ return -ENXIO;
+ }
+done:
sdev->hostdata = rport->dd_data;
return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
&& (fc_rport->scsi_target_id < MAX_FCP_TARGET))
itnim->scsi_tgt_id = fc_rport->scsi_target_id;
+ itnim->channel = fc_rport->channel;
+
return;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf848d9..0814367ef101 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
struct fc_rport *fc_rport;
struct bfa_itnim_s *bfa_itnim;
u16 scsi_tgt_id;
+ u16 channel;
u16 queue_work;
unsigned long last_ramp_up_time;
unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
int bfad_im_bsg_request(struct fc_bsg_job *job);
int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
+ struct scsi_device *__sdev = NULL; \
+ struct bfad_itnim_s *__itnim = NULL; \
+ u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
+ list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+ list_entry) { \
+ __sdev = scsi_device_lookup((__im_port)->shost, \
+ __itnim->channel, \
+ __itnim->scsi_tgt_id, 0); \
+ if (__sdev) { \
+ if ((__lunmask_cfg) == BFA_TRUE) \
+ __sdev->sdev_bflags |= scan_flags; \
+ else \
+ __sdev->sdev_bflags &= ~scan_flags; \
+ scsi_device_put(__sdev); \
+ } \
+ } \
+} while (0)
+
#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1c..1ad0b8225560 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_lock(&session->lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
- if (!task) {
+ if (!task || !task->sc) {
spin_unlock(&session->lock);
return -EINVAL;
}
sc = task->sc;
- spin_unlock(&session->lock);
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
+ spin_unlock(&session->lock);
+
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index d1e697190970..1a44b45e7bef 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2177,7 +2177,7 @@ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
return 0;
}
-static mode_t bnx2i_attr_is_visible(int param_type, int param)
+static umode_t bnx2i_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 000294a9df80..36739da8bc15 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ac7a9b1e3e23..5a4a3bfc60cf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct port_info *pi = netdev_priv(ndev);
struct sk_buff *skb = NULL;
+ struct neighbour *n;
unsigned int step;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+ n = dst_get_neighbour_noref(csk->dst);
+ if (!n) {
+ pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+ goto rel_resource;
+ }
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c10f74a566f2..d3ff9cd40234 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
struct net_device *ndev;
struct cxgbi_device *cdev;
struct rtable *rt = NULL;
+ struct neighbour *n;
struct flowi4 fl4;
struct cxgbi_sock *csk = NULL;
unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- ndev = dst_get_neighbour(dst)->dev;
+ n = dst_get_neighbour_noref(dst);
+ if (!n) {
+ err = -ENODEV;
+ goto rel_rt;
+ }
+ ndev = n->dev;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
- dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+ n->dev->name, ndev->name, mtu);
}
cdev = cxgbi_device_find_by_netdev(ndev, &port);
@@ -1862,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
- cdev->skb_tx_rsvd, headroom, opcode);
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ ndev->stats.tx_dropped++;
return -ENOMEM;
}
@@ -2569,7 +2576,7 @@ void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
}
EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
-mode_t cxgbi_attr_is_visible(int param_type, int param)
+umode_t cxgbi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 20c88279c7a6..80fa99b3d384 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,7 +709,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *);
void cxgbi_cleanup_task(struct iscsi_task *task);
-mode_t cxgbi_attr_is_visible(int param_type, int param);
+umode_t cxgbi_attr_is_visible(int param_type, int param);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index f5b718d3c31b..13aeca3d51f2 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -546,7 +546,7 @@ static struct ParameterData __devinitdata cfg_data[] = {
* command line overrides will be used. If set to 1 then safe and
* slow settings will be used.
*/
-static int use_safe_settings = 0;
+static bool use_safe_settings = 0;
module_param_named(safe, use_safe_settings, bool, 0);
MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 23149b9e297c..48e46f5b77cc 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -28,7 +28,6 @@
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -45,21 +44,6 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-static struct scsi_device_handler *get_device_handler_by_idx(int idx)
-{
- struct scsi_device_handler *tmp, *found = NULL;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_list, list) {
- if (tmp->idx == idx) {
- found = tmp;
- break;
- }
- }
- spin_unlock(&list_lock);
- return found;
-}
-
/*
* device_handler_match_function - Match a device handler to a device
* @sdev - SCSI device to be tested
@@ -84,23 +68,6 @@ device_handler_match_function(struct scsi_device *sdev)
}
/*
- * device_handler_match_devlist - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against all device_handler registered in the devlist.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_devlist(struct scsi_device *sdev)
-{
- int idx;
-
- idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
- SCSI_DEVINFO_DH);
- return get_device_handler_by_idx(idx);
-}
-
-/*
* device_handler_match - Attach a device handler to a device
* @scsi_dh - The device handler to match against or NULL
* @sdev - SCSI device to be tested against @scsi_dh
@@ -116,8 +83,6 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device_handler *found_dh;
found_dh = device_handler_match_function(sdev);
- if (!found_dh)
- found_dh = device_handler_match_devlist(sdev);
if (scsi_dh && found_dh != scsi_dh)
found_dh = NULL;
@@ -361,25 +326,14 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
- int i;
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
- scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
- for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
- scsi_dev_info_list_add_keyed(0,
- scsi_dh->devlist[i].vendor,
- scsi_dh->devlist[i].model,
- NULL,
- scsi_dh->idx,
- SCSI_DEVINFO_DH);
- }
-
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -396,7 +350,6 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -404,12 +357,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
- for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
- scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
- scsi_dh->devlist[i].model,
- SCSI_DEVINFO_DH);
- }
-
spin_lock(&list_lock);
list_del(&scsi_dh->list);
spin_unlock(&list_lock);
@@ -588,10 +535,6 @@ static int __init scsi_dh_init(void)
{
int r;
- r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
- if (r)
- return r;
-
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -606,7 +549,6 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
- scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef021291a4d..04c5cea47a22 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+ /*
+ * Mode Parameters Changed
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
/*
* ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 591186cf1896..e1c8be06de9d 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -629,6 +629,24 @@ static const struct scsi_dh_devlist clariion_dev_list[] = {
{NULL, NULL},
};
+static bool clariion_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; clariion_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+ strlen(clariion_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, clariion_dev_list[i].model,
+ strlen(clariion_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int clariion_bus_attach(struct scsi_device *sdev);
static void clariion_bus_detach(struct scsi_device *sdev);
@@ -642,6 +660,7 @@ static struct scsi_device_handler clariion_dh = {
.activate = clariion_activate,
.prep_fn = clariion_prep_fn,
.set_params = clariion_set_params,
+ .match = clariion_match,
};
static int clariion_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 0f86a18b157d..084062bb8ee9 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -320,6 +320,24 @@ static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
{NULL, NULL},
};
+static bool hp_sw_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+ strlen(hp_sw_dh_data_list[i].vendor)) &&
+ !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+ strlen(hp_sw_dh_data_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int hp_sw_bus_attach(struct scsi_device *sdev);
static void hp_sw_bus_detach(struct scsi_device *sdev);
@@ -331,6 +349,7 @@ static struct scsi_device_handler hp_sw_dh = {
.detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
.prep_fn = hp_sw_prep_fn,
+ .match = hp_sw_match,
};
static int hp_sw_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1d3127920063..53a31c753cb1 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -820,6 +820,24 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{NULL, NULL},
};
+static bool rdac_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; rdac_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+ strlen(rdac_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, rdac_dev_list[i].model,
+ strlen(rdac_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int rdac_bus_attach(struct scsi_device *sdev);
static void rdac_bus_detach(struct scsi_device *sdev);
@@ -832,6 +850,7 @@ static struct scsi_device_handler rdac_dh = {
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
+ .match = rdac_match,
};
static int rdac_bus_attach(struct scsi_device *sdev)
@@ -934,6 +953,8 @@ static int __init rdac_init(void)
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+ r = -EINVAL;
}
done:
return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84a..e9599600aa23 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -56,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
"Direct Data Placement (DDP).");
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -65,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
/* fcoe host list */
/* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
static int fcoe_reset(struct Scsi_Host *);
@@ -101,6 +107,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +137,11 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
@@ -148,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -188,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -424,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
*
* Caller must be holding the RTNL mutex
*/
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -739,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
*
* Returns: True for read types I/O, otherwise returns false.
*/
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fcp_cmnd *fcp;
@@ -747,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
if (fc_fcp_is_read(fr_fsp(fp)) &&
(fr_fsp(fp)->data_len > fcoe_ddp_min))
return true;
- else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+ else if ((fr_fsp(fp) == NULL) &&
+ (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+ (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
fcp = fc_frame_payload_get(fp, sizeof(*fcp));
- if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
- fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
- (fcp->fc_flags & FCP_CFL_WRDATA))
+ if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+ (ntohl(fcp->fc_dl) > fcoe_ddp_min))
return true;
}
return false;
@@ -1097,7 +1111,7 @@ static int __init fcoe_if_init(void)
*
* Returns: 0 on success
*/
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
{
fc_release_transport(fcoe_nport_scsi_transport);
fc_release_transport(fcoe_vport_scsi_transport);
@@ -1286,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
*
* Returns: 0 for success
*/
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
@@ -1442,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
*
* Return: 0 for success
*/
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
int wlen;
u32 crc;
@@ -1522,6 +1536,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = port->priority;
+
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1640,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
return -EINVAL;
}
@@ -1659,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb->dev ? skb->dev->name : "<NULL>");
port = lport_priv(lport);
- if (skb_is_nonlinear(skb))
- skb_linearize(skb); /* not ideal */
+ skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
/*
* Frame length checks and setting up the header pointers
@@ -1716,7 +1732,7 @@ drop:
*
* Return: 0 for success
*/
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
@@ -1746,6 +1762,7 @@ int fcoe_percpu_receive_thread(void *arg)
*/
static void fcoe_dev_setup(void)
{
+ register_dcbevent_notifier(&dcb_notifier);
register_netdevice_notifier(&fcoe_notifier);
}
@@ -1754,9 +1771,69 @@ static void fcoe_dev_setup(void)
*/
static void fcoe_dev_cleanup(void)
{
+ unregister_dcbevent_notifier(&dcb_notifier);
unregister_netdevice_notifier(&fcoe_notifier);
}
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ struct fcoe_port *port;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ fcoe->ctlr.priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE) {
+ port = lport_priv(fcoe->ctlr.lp);
+ port->priority = prio;
+ }
+
+ return NOTIFY_OK;
+}
+
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
@@ -1965,6 +2042,46 @@ static bool fcoe_match(struct net_device *netdev)
}
/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ port->priority = ffs(up) ? ffs(up) - 1 : 0;
+ fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ }
+#endif
+}
+
+/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
@@ -2007,6 +2124,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
/* add to lports list */
fcoe_hostlist_add(lport);
@@ -2030,7 +2150,7 @@ out_nortnl:
* Returns: 0 if the ethtool query was successful
* -1 if the ethtool query failed
*/
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd;
@@ -2064,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
* Returns: 0 if link is UP and OK, -1 if not
*
*/
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
@@ -2084,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
* there no packets that will be handled by the lport, but also that any
* threads already handling packet have returned.
*/
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
struct fcoe_rcv_info *fr;
@@ -2135,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
*
* Returns: Always 0 (return value required by FC transport template)
*/
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884bcf840..bcc89e639495 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
#define FCOE_LOGGING 0x01 /* General logging, not categorized */
#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71ef..e7522dcc296e 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index d969855ac64a..d3e4d7c6f577 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -359,7 +359,7 @@ typedef struct {
u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_size; /* size of each cmd bufer in bytes */
+ u32 cmd_buff_size; /* size of each cmd buffer in bytes */
u32 reserved1;
u32 reserved2;
} __attribute__((packed)) gdth_perf_modes;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 865d452542be..b96962c39449 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -293,12 +293,14 @@ static u32 unresettable_controller[] = {
0x3215103C, /* Smart Array E200i */
0x3237103C, /* Smart Array E500 */
0x323D103C, /* Smart Array P700m */
+ 0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
+ 0x40800E11, /* Smart Array 5i */
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
@@ -4072,10 +4074,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
if (h->msix_vector || h->msi_vector)
rc = request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h);
+ 0, h->devname, h);
else
rc = request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h);
+ IRQF_SHARED, h->devname, h);
if (rc) {
dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
@@ -4269,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
remove_ctlr_from_lockup_detector_list(h);
/* If the list of ctlr's to monitor is empty, stop the thread */
if (list_empty(&hpsa_ctlr_list)) {
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
kthread_stop(hpsa_lockup_detector);
+ spin_lock_irqsave(&lockup_detector_lock, flags);
hpsa_lockup_detector = NULL;
}
spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fd860d952b28..67b169b7a5be 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7638,8 +7638,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
**/
static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
ENTER;
- pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
ipr_cmd->job_step = ipr_reset_restore_cfg_space;
LEAVE;
return IPR_RC_JOB_CONTINUE;
@@ -7660,8 +7664,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
int rc = PCIBIOS_SUCCESSFUL;
ENTER;
- pci_block_user_cfg_access(ioa_cfg->pdev);
-
if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
writel(IPR_UPROCI_SIS64_START_BIST,
ioa_cfg->regs.set_uproc_interrupt_reg32);
@@ -7673,7 +7675,9 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
rc = IPR_RC_JOB_RETURN;
} else {
- pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
rc = IPR_RC_JOB_CONTINUE;
}
@@ -7716,7 +7720,6 @@ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- pci_block_user_cfg_access(pdev);
pci_set_pcie_reset_state(pdev, pcie_warm_reset);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
@@ -7725,6 +7728,56 @@ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_block_config_access_wait - Wait for permission to block config access
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+
+ if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
+ ioa_cfg->cfg_locked = 1;
+ ipr_cmd->job_step = ioa_cfg->reset;
+ } else {
+ if (ipr_cmd->u.time_left) {
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
+ ipr_reset_start_timer(ipr_cmd,
+ IPR_CHECK_FOR_RESET_TIMEOUT);
+ } else {
+ ipr_cmd->job_step = ioa_cfg->reset;
+ dev_err(&ioa_cfg->pdev->dev,
+ "Timed out waiting to lock config access. Resetting anyway.\n");
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * ipr_reset_block_config_access - Block config access to the IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
+{
+ ipr_cmd->ioa_cfg->cfg_locked = 0;
+ ipr_cmd->job_step = ipr_reset_block_config_access_wait;
+ ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
* ipr_reset_allowed - Query whether or not IOA can be reset
* @ioa_cfg: ioa config struct
*
@@ -7763,7 +7816,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
} else {
- ipr_cmd->job_step = ioa_cfg->reset;
+ ipr_cmd->job_step = ipr_reset_block_config_access;
rc = IPR_RC_JOB_CONTINUE;
}
@@ -7796,7 +7849,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
} else {
- ipr_cmd->job_step = ioa_cfg->reset;
+ ipr_cmd->job_step = ipr_reset_block_config_access;
}
ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index ac84736c1b9c..b13f9cc12279 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1387,6 +1387,7 @@ struct ipr_ioa_cfg {
u8 msi_received:1;
u8 sis64:1;
u8 dump_timeout:1;
+ u8 cfg_locked:1;
u8 revid;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 218f71a8726e..d77891e5683b 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -4494,7 +4494,7 @@ ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
/* */
/* Initialize a CCB to default values */
/* */
-/* ASSUMED to be callled from within a lock */
+/* ASSUMED to be called from within a lock */
/* */
/****************************************************************************/
static ips_scb_t *
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461cabc5..000000000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
- $(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
- $(CC) $(CFLAGS) $< -O $@
-
-clean:
- rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2bd233b..000000000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887a7e95..000000000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-#include <asm/types.h>
-#include <strings.h>
-#include <stdint.h>
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
- FILE *fd;
- int err;
- size_t count;
-
- fd = fopen(blob_name, "w+");
- if (!fd) {
- perror("Open file for write failed");
- fclose(fd);
- return -EIO;
- }
-
- count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
- if (count != 1) {
- perror("Write data failed");
- fclose(fd);
- return -EIO;
- }
-
- fclose(fd);
-
- return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
- int ctrl_idx, phy_idx, port_idx;
-
- /* setting OROM signature */
- strncpy(isci_orom->hdr.signature, sig, strlen(sig));
- isci_orom->hdr.version = version;
- isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
- isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
- isci_orom->hdr.num_elements = num_elements;
-
- for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
- isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
- isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
- max_num_concurrent_dev_spin_up;
- isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
- enable_ssc;
-
- for (port_idx = 0; port_idx < 4; port_idx++)
- isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
- phy_mask[ctrl_idx][port_idx];
-
- for (phy_idx = 0; phy_idx < 4; phy_idx++) {
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
- (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
- (__u32)(sas_addr[ctrl_idx][phy_idx]);
-
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
- afe_tx_amp_control0;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
- afe_tx_amp_control1;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
- afe_tx_amp_control2;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
- afe_tx_amp_control3;
- }
- }
-}
-
-int main(void)
-{
- int err;
- struct isci_orom *isci_orom;
-
- isci_orom = malloc(sizeof(struct isci_orom));
- memset(isci_orom, 0, sizeof(struct isci_orom));
-
- set_binary_values(isci_orom);
-
- err = write_blob(isci_orom);
- if (err < 0) {
- free(isci_orom);
- return err;
- }
-
- free(isci_orom);
- return 0;
-}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f298828d22e..000000000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- * P0 P1 P2 P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- * # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- * # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
- {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
- 0x5FCFFFFFF0000002ULL,
- 0x5FCFFFFFF0000003ULL,
- 0x5FCFFFFFF0000004ULL },
- { 0x5FCFFFFFF0000005ULL,
- 0x5FCFFFFFF0000006ULL,
- 0x5FCFFFFFF0000007ULL,
- 0x5FCFFFFFF0000008ULL } };
-#else /* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL },
- { 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4c85b8..1a65d6514237 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
*/
if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
(iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
- (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
is_controller_start_complete = false;
break;
}
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Default to no SSC operation. */
ihost->oem_parameters.controller.do_enable_ssc = false;
+ /* Default to short cables on all phys. */
+ ihost->oem_parameters.controller.cable_selection_mask = 0;
+
/* Initialize all of the port parameter information to narrow ports. */
for (index = 0; index < SCI_MAX_PORTS; index++) {
ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Initialize all of the phy parameter information. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
- /* Default to 6G (i.e. Gen 3) for now. */
- ihost->user_parameters.phys[index].max_speed_generation = 3;
+ /* Default to 3G (i.e. Gen 2). */
+ ihost->user_parameters.phys[index].max_speed_generation =
+ SCIC_SDS_PARM_GEN2_SPEED;
/* the frequencies cannot be 0 */
ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
ihost->user_parameters.ssp_inactivity_timeout = 5;
ihost->user_parameters.stp_max_occupancy_timeout = 5;
ihost->user_parameters.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.no_outbound_task_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 2;
}
static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
return sci_controller_reset(ihost);
}
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
{
int i;
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
oem->controller.max_concurr_spin_up < 1)
return -EINVAL;
+ if (oem->controller.do_enable_ssc) {
+ if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+ return -EINVAL;
+
+ if (version >= ISCI_ROM_VER_1_1) {
+ u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ case 6:
+ case 7:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ test = oem->controller.ssc_sas_tx_spread_level;
+ if (oem->controller.ssc_sas_tx_type == 0) {
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (oem->controller.ssc_sas_tx_type == 1) {
+ switch (test) {
+ case 0:
+ case 3:
+ case 6:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
return 0;
}
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
if (state == SCIC_RESET ||
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
- if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ if (sci_oem_parameters_validate(&ihost->oem_parameters,
+ pci_info->orom->hdr.version))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
ihost->power_control.phys_waiting--;
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ u8 j;
+
+ for (j = 0; j < SCI_MAX_PHYS; j++) {
+ struct isci_phy *requester = ihost->power_control.requesters[j];
+
+ /*
+ * Search the power_control queue to see if there are other phys
+ * attached to the same remote device. If found, take all of
+ * them out of await_sas_power state.
+ */
+ if (requester != NULL && requester != iphy) {
+ u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+ if (other == 0) {
+ ihost->power_control.requesters[j] = NULL;
+ ihost->power_control.phys_waiting--;
+ sci_phy_consume_power_handler(requester);
+ }
+ }
+ }
+ }
}
/*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
ihost->power_control.timer_started = true;
} else {
- /* Add the phy in the waiting list */
- ihost->power_control.requesters[iphy->phy_index] = iphy;
- ihost->power_control.phys_waiting++;
+ /*
+ * There are phys, attached to the same sas address as this phy, are
+ * already in READY state, this phy don't need wait.
+ */
+ u8 i;
+ struct isci_phy *current_phy;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ u8 other;
+ current_phy = &ihost->phys[i];
+
+ other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+ if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+ current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+ other == 0) {
+ sci_phy_consume_power_handler(iphy);
+ break;
+ }
+ }
+
+ if (i == SCI_MAX_PHYS) {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
}
}
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
ihost->power_control.requesters[iphy->phy_index] = NULL;
}
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+ int phy,
+ unsigned char selection_byte)
+{
+ return ((selection_byte & (1 << phy)) ? 1 : 0)
+ + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+ if (is_cable_select_overridden())
+ return ((unsigned char *)&cable_selection_override)
+ + ihost->id;
+ else
+ return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+ return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+ static char *cable_names[] = {
+ [short_cable] = "short",
+ [long_cable] = "long",
+ [medium_cable] = "medium",
+ [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+ };
+ return (selection <= undefined_cable) ? cable_names[selection]
+ : cable_names[undefined_cable];
+}
+
#define AFE_REGISTER_WRITE_DELAY 10
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
static void sci_controller_afe_initialization(struct isci_host *ihost)
{
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
const struct sci_oem_params *oem = &ihost->oem_parameters;
struct pci_dev *pdev = ihost->pdev;
u32 afe_status;
u32 phy_id;
+ unsigned char cable_selection_mask = *to_cable_select(ihost);
/* Clear DFX Status registers */
- writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x0081000f, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- if (is_b0(pdev)) {
+ if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
- * Timer, PM Stagger Timer */
- writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ * Timer, PM Stagger Timer
+ */
+ writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Configure bias currents to normal */
if (is_a2(pdev))
- writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005A00, &afe->afe_bias_control);
else if (is_b0(pdev) || is_c0(pdev))
- writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005F00, &afe->afe_bias_control);
+ else if (is_c1(pdev))
+ writel(0x00005500, &afe->afe_bias_control);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable PLL */
- if (is_b0(pdev) || is_c0(pdev))
- writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
- else
- writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+ if (is_a2(pdev))
+ writel(0x80040908, &afe->afe_pll_control0);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &afe->afe_pll_control0);
+ else if (is_c1(pdev)) {
+ writel(0x80000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x00000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x80000B08, &afe->afe_pll_control0);
+ }
udelay(AFE_REGISTER_WRITE_DELAY);
/* Wait for the PLL to lock */
do {
- afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ afe_status = readl(&afe->afe_common_block_status);
udelay(AFE_REGISTER_WRITE_DELAY);
} while ((afe_status & 0x00001000) == 0);
if (is_a2(pdev)) {
- /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
- writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ /* Shorten SAS SNW lock time (RxLock timer value from 76
+ * us to 50 us)
+ */
+ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+ int cable_length_long =
+ is_long_cable(phy_id, cable_selection_mask);
+ int cable_length_medium =
+ is_medium_cable(phy_id, cable_selection_mask);
- if (is_b0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ if (is_a2(pdev)) {
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00004512, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
} else if (is_c0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00014500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- } else {
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ } else if (is_c1(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x0001C500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
+ /* Power up TX and RX out from power down (PWRDNTX and
+ * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+ */
if (is_a2(pdev))
- writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003F0, &xcvr->afe_channel_control);
else if (is_b0(pdev)) {
- /* Power down TX and RX (PWRDNTX and PWRDNRX) */
- writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
- } else {
- writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D4, &xcvr->afe_channel_control);
+ } else if (is_c0(pdev)) {
+ writel(0x000001E7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000001E4, &xcvr->afe_channel_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+ &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+ &xcvr->afe_channel_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
if (is_a2(pdev)) {
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
- * RDD=0x0(RX Detect Enabled) ....(0xe800) */
- writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ if (is_a2(pdev) || is_b0(pdev))
+ /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+ * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+ * Enabled) ....(0xe800)
+ */
+ writel(0x00004100, &xcvr->afe_xcvr_control0);
+ else if (is_c0(pdev))
+ writel(0x00014100, &xcvr->afe_xcvr_control0);
+ else if (is_c1(pdev))
+ writel(0x0001C100, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Leave DFE/FFE on */
if (is_a2(pdev))
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
else if (is_b0(pdev)) {
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
- } else {
- writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c0(pdev)) {
+ writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x01500C0C :
+ cable_length_medium ? 0x01400C0D : 0x02400C0D,
+ &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(cable_length_long ? 0x33091C1F :
+ cable_length_medium ? 0x3315181F : 0x2B17161F,
+ &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control0,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control1,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control2,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control3,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Transfer control to the PEs */
- writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x00010f00, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051afd3cb..5477f0fa8233 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
static inline bool is_c0(struct pci_dev *pdev)
{
- if (pdev->revision >= 5)
+ if (pdev->revision == 5)
return true;
return false;
}
+static inline bool is_c1(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 6)
+ return true;
+ return false;
+}
+
+enum cable_selections {
+ short_cable = 0,
+ long_cable = 1,
+ medium_cable = 2,
+ undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+ return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
/* set hw control for 'activity', even though active enclosures seem to drive
* the activity led on their own. Skip setting FSENG control on 'status' due
* to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edabcb85a..17c4c2c89c2e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
#include "probe_roms.h"
#define MAJ 1
-#define MIN 0
+#define MIN 1
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+ "This field indicates length of the SAS/SATA cable between "
+ "host and device. If any bits > 15 are set (default) "
+ "indicates \"use platform defaults\"");
+
static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
return NULL;
isci_host->shost = shost;
+ dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+ "{%s, %s, %s, %s}\n",
+ (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+ lookup_cable_names(decode_cable_selection(isci_host, 3)),
+ lookup_cable_names(decode_cable_selection(isci_host, 2)),
+ lookup_cable_names(decode_cable_selection(isci_host, 1)),
+ lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
err = isci_host_init(isci_host);
if (err)
goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
orom = isci_request_oprom(pdev);
for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
- if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i],
+ orom->hdr.version)) {
dev_warn(&pdev->dev,
"[%d]: invalid oem parameters detected, falling back to firmware\n", i);
devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b08321..234ab46fce33 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
extern u16 stp_inactive_to;
extern unsigned char phy_gen;
extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2183e1..fe18acfd6eb3 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
static enum sci_status
sci_phy_link_layer_initialization(struct isci_phy *iphy,
- struct scu_link_layer_registers __iomem *reg)
+ struct scu_link_layer_registers __iomem *llr)
{
struct isci_host *ihost = iphy->owning_port->owning_controller;
+ struct sci_phy_user_params *phy_user;
+ struct sci_phy_oem_params *phy_oem;
int phy_idx = iphy->phy_index;
- struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
- struct sci_phy_oem_params *phy_oem =
- &ihost->oem_parameters.phys[phy_idx];
- u32 phy_configuration;
struct sci_phy_cap phy_cap;
+ u32 phy_configuration;
u32 parity_check = 0;
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
u32 sp_timeouts = 0;
- iphy->link_layer_registers = reg;
+ phy_user = &ihost->user_parameters.phys[phy_idx];
+ phy_oem = &ihost->oem_parameters.phys[phy_idx];
+ iphy->link_layer_registers = llr;
/* Set our IDENTIFY frame data */
#define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
- &iphy->link_layer_registers->transmit_identification);
+ &llr->transmit_identification);
/* Write the device SAS Address */
- writel(0xFEDCBA98,
- &iphy->link_layer_registers->sas_device_name_high);
- writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+ writel(0xFEDCBA98, &llr->sas_device_name_high);
+ writel(phy_idx, &llr->sas_device_name_low);
/* Write the source SAS Address */
- writel(phy_oem->sas_address.high,
- &iphy->link_layer_registers->source_sas_address_high);
- writel(phy_oem->sas_address.low,
- &iphy->link_layer_registers->source_sas_address_low);
+ writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+ writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
/* Clear and Set the PHY Identifier */
- writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
- writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
- &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(0, &llr->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
/* Change the initial state of the phy configuration register */
- phy_configuration =
- readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration = readl(&llr->phy_configuration);
/* Hold OOB state machine in reset */
phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
- writel(phy_configuration,
- &iphy->link_layer_registers->phy_configuration);
+ writel(phy_configuration, &llr->phy_configuration);
/* Configure the SNW capabilities */
phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
phy_cap.gen3_no_ssc = 1;
phy_cap.gen2_no_ssc = 1;
phy_cap.gen1_no_ssc = 1;
- if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ if (ihost->oem_parameters.controller.do_enable_ssc) {
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+ bool en_sas = false;
+ bool en_sata = false;
+ u32 sas_type = 0;
+ u32 sata_spread = 0x2;
+ u32 sas_spread = 0x2;
+
phy_cap.gen3_ssc = 1;
phy_cap.gen2_ssc = 1;
phy_cap.gen1_ssc = 1;
+
+ if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+ en_sas = en_sata = true;
+ else {
+ sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+ sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+ if (sata_spread)
+ en_sata = true;
+
+ if (sas_spread) {
+ en_sas = true;
+ sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+ }
+
+ }
+
+ if (en_sas) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_xcvr_control0);
+ reg |= (0x00100000 | (sas_type << 19));
+ writel(reg, &xcvr->afe_xcvr_control0);
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sas_spread << 8;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+ }
+
+ if (en_sata) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sata_spread;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+
+ reg = readl(&llr->stp_control);
+ reg |= 1 << 12;
+ writel(reg, &llr->stp_control);
+ }
}
- /*
- * The SAS specification indicates that the phy_capabilities that
- * are transmitted shall have an even parity. Calculate the parity. */
+ /* The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity.
+ */
parity_check = phy_cap.all;
while (parity_check != 0) {
if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
parity_check >>= 1;
}
- /*
- * If parity indicates there are an odd number of bits set, then
- * set the parity bit to 1 in the phy capabilities. */
+ /* If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities.
+ */
if ((parity_count % 2) != 0)
phy_cap.parity = 1;
- writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+ writel(phy_cap.all, &llr->phy_capabilities);
/* Set the enable spinup period but disable the ability to send
* notify enable spinup
*/
writel(SCU_ENSPINUP_GEN_VAL(COUNT,
phy_user->notify_enable_spin_up_insertion_frequency),
- &iphy->link_layer_registers->notify_enable_spinup_control);
+ &llr->notify_enable_spinup_control);
/* Write the ALIGN Insertion Ferequency for connected phy and
* inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
phy_user->align_insertion_frequency);
- writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+ writel(clksm_value, &llr->clock_skew_management);
- /* @todo Provide a way to write this register correctly */
- writel(0x02108421,
- &iphy->link_layer_registers->afe_lookup_table_control);
+ if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+ writel(0x04210400, &llr->afe_lookup_table_control);
+ writel(0x020A7C05, &llr->sas_primitive_timeout);
+ } else
+ writel(0x02108421, &llr->afe_lookup_table_control);
llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
(u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
break;
}
llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
- writel(llctl, &iphy->link_layer_registers->link_layer_control);
+ writel(llctl, &llr->link_layer_control);
- sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+ sp_timeouts = readl(&llr->sas_phy_timeouts);
/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
*/
sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
- writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+ writel(sp_timeouts, &llr->sas_phy_timeouts);
if (is_a2(ihost->pdev)) {
- /* Program the max ARB time for the PHY to 700us so we inter-operate with
- * the PMC expander which shuts down PHYs if the expander PHY generates too
- * many breaks. This time value will guarantee that the initiator PHY will
- * generate the break.
+ /* Program the max ARB time for the PHY to 700us so we
+ * inter-operate with the PMC expander which shuts down
+ * PHYs if the expander PHY generates too many breaks.
+ * This time value will guarantee that the initiator PHY
+ * will generate the break.
*/
writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
- &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ &llr->maximum_arbitration_wait_timer_timeout);
}
- /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
- writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+ /* Disable link layer hang detection, rely on the OS timeout for
+ * I/O timeouts.
+ */
+ writel(0, &llr->link_layer_hang_detection_timeout);
/* We can exit the initial state to the stopped state */
sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
}
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
- struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
{
- u32 scu_sas_pcfg_value;
-
- scu_sas_pcfg_value =
- readl(&iphy->link_layer_registers->phy_configuration);
- scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
- scu_sas_pcfg_value &=
- ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
- SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
- writel(scu_sas_pcfg_value,
- &iphy->link_layer_registers->phy_configuration);
+ struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+ u32 val;
+
+ /** Reset OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Reset OOB sequence - end */
+
+ /** Start OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Start OOB sequence - end */
}
/**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f27749f97..7c6ac58a5c4c 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
* value is returned if the specified port is not valid. When this value is
* returned, no data is copied to the properties output parameter.
*/
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
struct sci_port_properties *prop)
{
if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
}
}
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
- bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ sci_phy_resume(iphy);
+ iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+ struct isci_phy *iphy,
+ u8 flags)
{
struct isci_host *ihost = iport->owning_controller;
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
sci_controller_clear_invalid_phy(ihost, iphy);
- if (do_notify_user == true)
+ if (flags & PF_NOTIFY)
isci_port_link_up(ihost, iport, iphy);
}
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index);
+ iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
if (!iport->active_phy_mask)
iport->last_active_phy = iphy->phy_index;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
- /* Re-assign the phy back to the LP as if it were a narrow port */
- writel(iphy->phy_index,
- &iport->port_pe_configuration_register[iphy->phy_index]);
+ /* Re-assign the phy back to the LP as if it were a narrow port for APC
+ * mode. For MPC mode, the phy will remain in the port.
+ */
+ if (iport->owning_controller->oem_parameters.controller.mode_type ==
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
if (do_notify_user == true)
isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
* sci_port_general_link_up_handler - phy can be assigned to port?
* @sci_port: sci_port object for which has a phy that has gone link up.
* @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- * sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
*
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
*/
static void sci_port_general_link_up_handler(struct isci_port *iport,
- struct isci_phy *iphy,
- bool do_notify_user)
+ struct isci_phy *iphy,
+ u8 flags)
{
struct sci_sas_address port_sas_address;
struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
iport->active_phy_mask == 0) {
struct sci_base_state_machine *sm = &iport->sm;
- sci_port_activate_phy(iport, iphy, do_notify_user);
+ sci_port_activate_phy(iport, iphy, flags);
if (sm->current_state_id == SCI_PORT_RESETTING)
port_state_machine_change(iport, SCI_PORT_READY);
} else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
- (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
- sci_port_is_wide(iport)) {
- sci_port_invalid_link_up(iport, iphy);
-
- return false;
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+ if (sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+ return false;
+ } else {
+ struct isci_host *ihost = iport->owning_controller;
+ struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+ writel(iphy->phy_index,
+ &dst_port->port_pe_configuration_register[iphy->phy_index]);
+ }
}
return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
}
}
+static void scic_sds_port_ready_substate_waiting_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ sci_port_resume_port_task_scheduler(iport);
+}
+
static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
writel(iport->physical_port_index,
&iport->port_pe_configuration_register[
iport->phy_table[index]->phy_index]);
+ if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+ sci_port_resume_phy(iport, iport->phy_table[index]);
}
}
sci_port_update_viit_entry(iport);
- sci_port_resume_port_task_scheduler(iport);
-
/*
* Post the dummy task for the port so the hardware can schedule
* io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
if (iport->active_phy_mask == 0) {
isci_port_not_ready(ihost, iport);
- port_state_machine_change(iport,
- SCI_PORT_SUB_WAITING);
- } else if (iport->started_request_count == 0)
- port_state_machine_change(iport,
- SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
- struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
- sci_port_suspend_port_task_scheduler(iport);
- if (iport->ready_exit)
- sci_port_invalidate_dummy_remote_node(iport);
+ port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+ } else
+ port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
}
enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
/* Re-enter the configuring state since this may be the last phy in
* the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
- sci_port_activate_phy(iport, iphy, true);
+ sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
case SCI_PORT_SUB_OPERATIONAL:
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* In the resetting state we don't notify the user regarding
* link up and link down notifications.
*/
- sci_port_general_link_up_handler(iport, iphy, false);
+ sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
},
[SCI_PORT_SUB_WAITING] = {
.enter_state = sci_port_ready_substate_waiting_enter,
+ .exit_state = scic_sds_port_ready_substate_waiting_exit,
},
[SCI_PORT_SUB_OPERATIONAL] = {
.enter_state = sci_port_ready_substate_operational_enter,
.exit_state = sci_port_ready_substate_operational_exit
},
[SCI_PORT_SUB_CONFIGURING] = {
- .enter_state = sci_port_ready_substate_configuring_enter,
- .exit_state = sci_port_ready_substate_configuring_exit
+ .enter_state = sci_port_ready_substate_configuring_enter
},
[SCI_PORT_RESETTING] = {
.exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
iport->active_phy_mask = 0;
+ iport->enabled_phy_mask = 0;
iport->last_active_phy = 0;
iport->ready_exit = false;
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc38603..08116090eb70 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
#define SCIC_SDS_DUMMY_PORT 0xFF
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
struct isci_phy;
struct isci_host;
@@ -83,6 +86,8 @@ enum isci_status {
* @logical_port_index: software port index
* @physical_port_index: hardware port index
* @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ * that are already part of the port
* @reserved_tag:
* @reserved_rni: reserver for port task scheduler workaround
* @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
u8 logical_port_index;
u8 physical_port_index;
u8 active_phy_mask;
+ u8 enabled_phy_mask;
u8 last_active_phy;
u16 reserved_rni;
u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy);
+enum sci_status sci_port_get_properties(
+ struct isci_port *iport,
+ struct sci_port_properties *prop);
+
enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d281141..6d1e9544cbe5 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
enum SCIC_SDS_APC_ACTIVITY {
SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+ struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
+{
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer, timeout);
+}
+
static void sci_apc_agent_configure_ports(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
break;
case SCIC_SDS_APC_START_TIMER:
- /*
- * This can occur for either a link down event, or a link
- * up event where we cannot yet tell the port to which a
- * phy belongs.
- */
- if (port_agent->timer_pending)
- sci_del_timer(&port_agent->timer);
-
- port_agent->timer_pending = true;
- sci_mod_timer(&port_agent->timer,
- SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
break;
case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
if (!iport) {
/* the phy is not the part of this port */
port_agent->phy_ready_mask |= 1 << phy_index;
- sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341de243..9b8117b9d756 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
memcpy(orom, fw->data, fw->size);
- if (is_c0(pdev))
+ if (is_c0(pdev) || is_c1(pdev))
goto out;
/*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248ca326..bb0e9d4d97c9 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
0x1a, 0x04, 0xc6)
#define ISCI_EFI_VAR_NAME "RstScuO"
+#define ISCI_ROM_VER_1_0 0x10
+#define ISCI_ROM_VER_1_1 0x11
+#define ISCI_ROM_VER_1_3 0x13
+#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
+
/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
* defined by the OEM configuration parameters providing no PHY_MASK parameters
* for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
struct {
uint8_t mode_type;
uint8_t max_concurr_spin_up;
- uint8_t do_enable_ssc;
- uint8_t reserved;
+ /*
+ * This bitfield indicates the OEM's desired default Tx
+ * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+ * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+ */
+ union {
+ struct {
+ /*
+ * NOTE: Max spread for SATA is +0 / -5000 PPM.
+ * Down-spreading SSC (only method allowed for SATA):
+ * SATA SSC Tx Disabled = 0x0
+ * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
+ * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
+ * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
+ */
+ uint8_t ssc_sata_tx_spread_level:4;
+ /*
+ * SAS SSC Tx Disabled = 0x0
+ *
+ * NOTE: Max spread for SAS down-spreading +0 /
+ * -2300 PPM
+ * Down-spreading SSC:
+ * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
+ *
+ * NOTE: Max spread for SAS center-spreading +2300 /
+ * -2300 PPM
+ * Center-spreading SSC:
+ * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
+ * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
+ */
+ uint8_t ssc_sas_tx_spread_level:3;
+ /*
+ * NOTE: Refer to the SSC section of the SAS 2.x
+ * Specification for proper setting of this field.
+ * For standard SAS Initiator SAS PHY operation it
+ * should be 0 for Down-spreading.
+ * SAS SSC Tx spread type:
+ * Down-spreading SSC = 0
+ * Center-spreading SSC = 1
+ */
+ uint8_t ssc_sas_tx_type:1;
+ };
+ uint8_t do_enable_ssc;
+ };
+ /*
+ * This field indicates length of the SAS/SATA cable between
+ * host and device.
+ * This field is used make relationship between analog
+ * parameters of the phy in the silicon and length of the cable.
+ * Supported cable attenuation levels:
+ * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+ * 6m.
+ *
+ * This is bit mask field:
+ *
+ * BIT: (MSB) 7 6 5 4
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
+ * length assignment
+ * BIT: 3 2 1 0 (LSB)
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
+ * assignment
+ *
+ * BITS 7-4 are set when the cable length is assigned to medium
+ * BITS 3-0 are set when the cable length is assigned to long
+ *
+ * The BIT positions are clear when the cable length is
+ * assigned to short.
+ *
+ * Setting the bits for both long and medium cable length is
+ * undefined.
+ *
+ * A value of 0x84 would assign
+ * phy3 - medium
+ * phy2 - long
+ * phy1 - short
+ * phy0 - short
+ */
+ uint8_t cable_selection_mask;
} controller;
struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3b15a0..dd74b6ceeb82 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scsi/sas.h>
+#include <linux/bitops.h>
#include "isci.h"
#include "port.h"
#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
enum sci_status status;
+ struct sci_port_properties properties;
struct domain_device *dev = idev->domain_dev;
sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
* entries will be needed to store the remote node.
*/
idev->is_direct_attached = true;
+
+ sci_port_get_properties(iport, &properties);
+ /* Get accurate port width from port's phy mask for a DA device. */
+ idev->device_port_width = hweight32(properties.phy_mask);
+
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
- /* / @todo Should I assign the port width by reading all of the phys on the port? */
- idev->device_port_width = 1;
-
return SCI_SUCCESS;
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc89498..f5a3f7d2bdab 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
}
}
- isci_print_tmf(tmf);
+ isci_print_tmf(ihost, tmf);
if (tmf->status == SCI_SUCCESS)
ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a41d5c..1b27b3797c6c 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
} resp;
unsigned char lun[8];
u16 io_tag;
- struct isci_remote_device *device;
enum isci_tmf_function_codes tmf_code;
int status;
@@ -120,10 +119,10 @@ struct isci_tmf {
};
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
{
if (SAS_PROTOCOL_SATA == tmf->proto)
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.d2h_fis.status = %x\n"
"tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
tmf->resp.d2h_fis.status,
tmf->resp.d2h_fis.error);
else
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.resp_iu.data_present = %x\n"
"tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 89700cbca16e..14c1c8f6a95e 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -112,7 +112,7 @@ static struct attribute *target_attrs[] = {
NULL
};
-static mode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -193,7 +193,7 @@ static struct attribute *ethernet_attrs[] = {
NULL
};
-static mode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -265,7 +265,7 @@ static struct attribute *initiator_attrs[] = {
NULL
};
-static mode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -306,7 +306,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
struct attribute_group *attr_group,
const char *name, int index, void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
struct iscsi_boot_kobj *boot_kobj;
@@ -369,7 +369,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
@@ -394,7 +394,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
@@ -420,7 +420,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7c34d8e7cc75..db47158e0dde 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -873,7 +873,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
-static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
+static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 08e26d4e3731..27cfb0cb186c 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
*
- * Copyright (C) 2007 Thomas Bogendrfer (tsbogend@alpha.frankende)
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e928824a..1d1b0c9da29b 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
* Locking Note: This function expects that the lport mutex is locked before
* calling it.
*/
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
* fc_disc_stop() - Stop discovery for a given lport
* @lport: The local port that discovery should stop on
*/
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
* This function will block until discovery has been
* completely stopped and all rports have been deleted.
*/
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161dc4ca6..e17a28d324d0 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
+#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db27e874..4d70d96fa5dc 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
* It manages the allocation of exchange IDs.
*/
struct fc_exch_mgr {
- struct fc_exch_pool *pool;
+ struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
enum fc_class class;
struct kref kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875ec3d7c..f607314810ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
+ fsp->timer.data = (unsigned long)fsp;
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
}
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
}
put_cpu();
- init_timer(&fsp->timer);
- fsp->timer.data = (unsigned long)fsp;
-
/*
* send it to the lower layer
* if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a587ed..83750ebb527f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
* @lport: The local port receiving the event
* @event: The discovery event
*/
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+ enum fc_disc_event event)
{
switch (event) {
case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e434844a69..83aa1efec875 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
* @fp: The FLOGI response frame
* @rp_arg: The remote port that received the FLOGI response
*/
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
{
struct fc_rport_priv *rdata = rp_arg;
struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
*
* Locking Note: Called with the lport lock held.
*/
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bb4c8e0584e2..825f9307417a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -247,18 +247,6 @@ struct lpfc_stats {
uint32_t fcpLocalErr;
};
-enum sysfs_mbox_state {
- SMBOX_IDLE,
- SMBOX_WRITING,
- SMBOX_READING
-};
-
-struct lpfc_sysfs_mbox {
- enum sysfs_mbox_state state;
- size_t offset;
- struct lpfcMboxq * mbox;
-};
-
struct lpfc_hba;
@@ -783,8 +771,6 @@ struct lpfc_hba {
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
- struct lpfc_sysfs_mbox sysfs_mbox;
-
/* fastpath list. */
spinlock_t scsi_buf_list_lock;
struct list_head lpfc_scsi_buf_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d0ebaeb7ef60..f6697cb0e216 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -351,10 +351,23 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ uint32_t if_type;
+ uint8_t sli_family;
char fwrev[32];
+ int len;
lpfc_decode_firmware_rev(phba, fwrev, 1);
- return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
+ if_type = phba->sli4_hba.pc_sli4_params.if_type;
+ sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+ fwrev, phba->sli_rev);
+ else
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+ fwrev, phba->sli_rev, if_type, sli_family);
+
+ return len;
}
/**
@@ -488,6 +501,34 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+ return snprintf(buf, PAGE_SIZE, "fcoe\n");
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -773,7 +814,12 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
* the readyness after performing a firmware reset.
*
* Returns:
- * zero for success
+ * zero for success, -EPERM when port does not have privilage to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
**/
int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
@@ -826,9 +872,11 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
struct pci_dev *pdev = phba->pcidev;
+ uint32_t before_fc_flag;
+ uint32_t sriov_nr_virtfn;
uint32_t reg_val;
- int status = 0;
- int rc;
+ int status = 0, rc = 0;
+ int job_posted = 1, sriov_err;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -838,6 +886,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
+ /* Keep state if we need to restore back */
+ before_fc_flag = phba->pport->fc_flag;
+ sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
@@ -869,21 +921,44 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
/* delay driver action following IF_TYPE_2 reset */
rc = lpfc_sli4_pdev_status_reg_wait(phba);
- if (rc)
+ if (rc == -EPERM) {
+ /* no privilage for reset, restore if needed */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
+ } else if (rc == -EIO) {
+ /* reset failed, there is nothing more we can do */
return rc;
+ }
+
+ /* keep the original port state */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
wait_for_completion(&online_compl);
- if (status != 0)
- return -EIO;
+out:
+ /* in any case, restore the virtual functions enabled as before */
+ if (sriov_nr_virtfn) {
+ sriov_err =
+ lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+ if (!sriov_err)
+ phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+ }
- return 0;
+ /* return proper error code */
+ if (!rc) {
+ if (!job_posted)
+ rc = -ENOMEM;
+ else if (status)
+ rc = -EIO;
+ }
+ return rc;
}
/**
@@ -955,33 +1030,38 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
- int status=0;
+ char *board_mode_str = NULL;
+ int status = 0;
int rc;
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
+ if (!phba->cfg_enable_hba_reset) {
+ status = -EACCES;
+ goto board_mode_out;
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "3050 lpfc_board_mode set to %s\n", buf);
+ "3050 lpfc_board_mode set to %s\n", buf);
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
+ if (rc == 0) {
+ status = -ENOMEM;
+ goto board_mode_out;
+ }
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
- return -EINVAL;
+ status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
- return -EINVAL;
+ status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
@@ -991,12 +1071,21 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
- return -EINVAL;
+ status = -EINVAL;
+board_mode_out:
if (!status)
return strlen(buf);
- else
+ else {
+ board_mode_str = strchr(buf, '\n');
+ if (board_mode_str)
+ *board_mode_str = '\0';
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3097 Failed \"%s\", status(%d), "
+ "fc_flag(x%x)\n",
+ buf, status, phba->pport->fc_flag);
return status;
+ }
}
/**
@@ -1942,6 +2031,7 @@ static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -2687,6 +2777,14 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
if (val >= 0 && val <= 6) {
prev_val = phba->cfg_topology;
phba->cfg_topology = val;
+ if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+ val == 4) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3113 Loop mode not supported at speed %d\n",
+ phba->cfg_link_speed);
+ phba->cfg_topology = prev_val;
+ return -EINVAL;
+ }
if (nolip)
return strlen(buf);
@@ -3132,6 +3230,14 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
+ if (val == LPFC_USER_LINK_SPEED_16G &&
+ phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3112 lpfc_link_speed attribute cannot be set "
+ "to %d. Speed is not supported in loop mode.\n",
+ val);
+ return -EINVAL;
+ }
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
prev_val = phba->cfg_link_speed;
@@ -3176,6 +3282,13 @@ lpfc_param_show(link_speed)
static int
lpfc_link_speed_init(struct lpfc_hba *phba, int val)
{
+ if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3111 lpfc_link_speed of %d cannot "
+ "support loop mode, setting topology to default.\n",
+ val);
+ phba->cfg_topology = 0;
+ }
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
phba->cfg_link_speed = val;
@@ -3830,6 +3943,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
+ &dev_attr_protocol,
NULL,
};
@@ -3988,23 +4102,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
};
/**
- * sysfs_mbox_idle - frees the sysfs mailbox
- * @phba: lpfc_hba pointer
- **/
-static void
-sysfs_mbox_idle(struct lpfc_hba *phba)
-{
- phba->sysfs_mbox.state = SMBOX_IDLE;
- phba->sysfs_mbox.offset = 0;
-
- if (phba->sysfs_mbox.mbox) {
- mempool_free(phba->sysfs_mbox.mbox,
- phba->mbox_mem_pool);
- phba->sysfs_mbox.mbox = NULL;
- }
-}
-
-/**
* sysfs_mbox_write - Write method for writing information via mbox
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
@@ -4014,71 +4111,18 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
* @count: bytes to transfer.
*
* Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to send buf contents to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
*
* Returns:
- * -ERANGE off and count combo out of range
- * -EINVAL off, count or buff address invalid
- * zero if count is zero
- * -EPERM adapter is offline
- * -ENOMEM failed to allocate memory for the mail box
- * -EAGAIN offset, state or mbox is NULL
- * count number of bytes transferred
+ * -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct lpfcMboxq *mbox = NULL;
-
- if ((count + off) > MAILBOX_CMD_SIZE)
- return -ERANGE;
-
- if (off % 4 || count % 4 || (unsigned long)buf % 4)
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (off == 0) {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
- memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
- }
-
- spin_lock_irq(&phba->hbalock);
-
- if (off == 0) {
- if (phba->sysfs_mbox.mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- else
- phba->sysfs_mbox.mbox = mbox;
- phba->sysfs_mbox.state = SMBOX_WRITING;
- } else {
- if (phba->sysfs_mbox.state != SMBOX_WRITING ||
- phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.mbox == NULL) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
- }
-
- memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
- buf, count);
-
- phba->sysfs_mbox.offset = off + count;
-
- spin_unlock_irq(&phba->hbalock);
-
- return count;
+ return -EPERM;
}
/**
@@ -4091,201 +4135,18 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
* @count: bytes to transfer.
*
* Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to receive data from to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
*
* Returns:
- * -ERANGE off greater than mailbox command size
- * -EINVAL off, count or buff address invalid
- * zero if off and count are zero
- * -EACCES adapter over temp
- * -EPERM garbage can value to catch a multitude of errors
- * -EAGAIN management IO not permitted, state or off error
- * -ETIME mailbox timeout
- * -ENODEV mailbox error
- * count number of bytes transferred
+ * -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mboxq;
- MAILBOX_t *pmb;
- uint32_t mbox_tmo;
- int rc;
-
- if (off > MAILBOX_CMD_SIZE)
- return -ERANGE;
-
- if ((count + off) > MAILBOX_CMD_SIZE)
- count = MAILBOX_CMD_SIZE - off;
-
- if (off % 4 || count % 4 || (unsigned long)buf % 4)
- return -EINVAL;
-
- if (off && count == 0)
- return 0;
-
- spin_lock_irq(&phba->hbalock);
-
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
-
- if (off == 0 &&
- phba->sysfs_mbox.state == SMBOX_WRITING &&
- phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
- mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
- pmb = &mboxq->u.mb;
- switch (pmb->mbxCommand) {
- /* Offline only */
- case MBX_INIT_LINK:
- case MBX_DOWN_LINK:
- case MBX_CONFIG_LINK:
- case MBX_CONFIG_RING:
- case MBX_RESET_RING:
- case MBX_UNREG_LOGIN:
- case MBX_CLEAR_LA:
- case MBX_DUMP_CONTEXT:
- case MBX_RUN_DIAGS:
- case MBX_RESTART:
- case MBX_SET_MASK:
- case MBX_SET_DEBUG:
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
- printk(KERN_WARNING "mbox_read:Command 0x%x "
- "is illegal in on-line state\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
- case MBX_WRITE_NV:
- case MBX_WRITE_VPARMS:
- case MBX_LOAD_SM:
- case MBX_READ_NV:
- case MBX_READ_CONFIG:
- case MBX_READ_RCONFIG:
- case MBX_READ_STATUS:
- case MBX_READ_XRI:
- case MBX_READ_REV:
- case MBX_READ_LNK_STAT:
- case MBX_DUMP_MEMORY:
- case MBX_DOWN_LOAD:
- case MBX_UPDATE_CFG:
- case MBX_KILL_BOARD:
- case MBX_LOAD_AREA:
- case MBX_LOAD_EXP_ROM:
- case MBX_BEACON:
- case MBX_DEL_LD_ENTRY:
- case MBX_SET_VARIABLE:
- case MBX_WRITE_WWN:
- case MBX_PORT_CAPABILITIES:
- case MBX_PORT_IOV_CONTROL:
- break;
- case MBX_SECURITY_MGMT:
- case MBX_AUTH_PORT:
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
- printk(KERN_WARNING "mbox_read:Command 0x%x "
- "is not permitted\n", pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
- break;
- case MBX_READ_SPARM64:
- case MBX_READ_TOPOLOGY:
- case MBX_REG_LOGIN:
- case MBX_REG_LOGIN64:
- case MBX_CONFIG_PORT:
- case MBX_RUN_BIU_DIAG:
- printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- default:
- printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
-
- /* If HBA encountered an error attention, allow only DUMP
- * or RESTART mailbox commands until the HBA is restarted.
- */
- if (phba->pport->stopped &&
- pmb->mbxCommand != MBX_DUMP_MEMORY &&
- pmb->mbxCommand != MBX_RESTART &&
- pmb->mbxCommand != MBX_WRITE_VPARMS &&
- pmb->mbxCommand != MBX_WRITE_WWN)
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "1259 mbox: Issued mailbox cmd "
- "0x%x while in stopped state.\n",
- pmb->mbxCommand);
-
- phba->sysfs_mbox.mbox->vport = vport;
-
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
-
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
-
- spin_unlock_irq(&phba->hbalock);
- rc = lpfc_sli_issue_mbox (phba,
- phba->sysfs_mbox.mbox,
- MBX_POLL);
- spin_lock_irq(&phba->hbalock);
-
- } else {
- spin_unlock_irq(&phba->hbalock);
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- spin_lock_irq(&phba->hbalock);
- }
-
- if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT) {
- phba->sysfs_mbox.mbox = NULL;
- }
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
- }
- phba->sysfs_mbox.state = SMBOX_READING;
- }
- else if (phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.state != SMBOX_READING) {
- printk(KERN_WARNING "mbox_read: Bad State\n");
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
-
- memcpy(buf, (uint8_t *) &pmb + off, count);
-
- phba->sysfs_mbox.offset = off + count;
-
- if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
- sysfs_mbox_idle(phba);
-
- spin_unlock_irq(&phba->hbalock);
-
- return count;
+ return -EPERM;
}
static struct bin_attribute sysfs_mbox_attr = {
@@ -4429,8 +4290,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
- /* Links up, beyond this port_type reports state */
- fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ /* Links up, reports port state accordingly */
+ if (vport->port_state < LPFC_VPORT_READY)
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_BYPASSED;
+ else
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_ONLINE;
break;
case LPFC_HBA_ERROR:
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6760c69f5253..56a86baece5b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -916,9 +916,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} else {
switch (cmd) {
case ELX_LOOPBACK_DATA:
- diag_cmd_data_free(phba,
- (struct lpfc_dmabufext *)
- dmabuf);
+ if (phba->sli_rev <
+ LPFC_SLI_REV4)
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext
+ *)dmabuf);
break;
case ELX_LOOPBACK_XRI_SETUP:
if ((phba->sli_rev ==
@@ -1000,7 +1002,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
error_ct_unsol_exit:
if (!list_empty(&head))
list_del(&head);
- if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (evt_req_id == SLI_CT_ELX_LOOPBACK))
return 0;
return 1;
}
@@ -1566,7 +1569,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
- LPFC_MBOXQ_t *pmboxq;
+ LPFC_MBOXQ_t *pmboxq = NULL;
int mbxstatus = MBX_SUCCESS;
int i = 0;
int rc = 0;
@@ -1615,7 +1618,6 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
rc = -ETIMEDOUT;
goto loopback_mode_exit;
}
-
msleep(10);
}
@@ -1635,7 +1637,9 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
rc = -ENODEV;
else {
+ spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
/* wait for the link attention interrupt */
msleep(100);
@@ -1659,7 +1663,7 @@ loopback_mode_exit:
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1700,11 +1704,16 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
rc = -ENOMEM;
goto link_diag_state_set_out;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+ diag, phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+
link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
- phba->sli4_hba.link_state.number);
+ phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
- phba->sli4_hba.link_state.type);
+ phba->sli4_hba.lnk_info.lnk_tp);
if (diag)
bf_set(lpfc_mbx_set_diag_state_diag,
&link_diag_state->u.req, 1);
@@ -1727,6 +1736,79 @@ link_diag_state_set_out:
}
/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ uint32_t req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ int mbxstatus = MBX_SUCCESS, rc = 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+ bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3127 Failed setup loopback mode mailbox "
+ "command, rc:x%x, status:x%x\n", mbxstatus,
+ pmboxq->u.mb.mbxStatus);
+ rc = -ENODEV;
+ }
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+ int rc;
+
+ if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3136 Port still had vfi registered: "
+ "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+ phba->pport->fc_myDID, phba->fcf.fcfi,
+ phba->sli4_hba.vfi_ids[phba->pport->vfi],
+ phba->vpi_ids[phba->pport->vpi]);
+ return -EINVAL;
+ }
+ rc = lpfc_issue_reg_vfi(phba->pport);
+ return rc;
+}
+
+/**
* lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
* @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
@@ -1738,10 +1820,8 @@ static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
struct diag_mode_set *loopback_mode;
- uint32_t link_flags, timeout, req_len, alloc_len;
- struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
- LPFC_MBOXQ_t *pmboxq = NULL;
- int mbxstatus = MBX_SUCCESS, i, rc = 0;
+ uint32_t link_flags, timeout;
+ int i, rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
@@ -1762,65 +1842,100 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
if (rc)
goto job_error;
+ /* indicate we are in loobpack diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* reset port to start frome scratch */
+ rc = lpfc_selective_reset(phba);
+ if (rc)
+ goto job_error;
+
/* bring the link to diagnostic mode */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
- if (rc)
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3130 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
goto loopback_mode_exit;
+ }
/* wait for link down before proceeding */
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3131 Timeout waiting for link to "
+ "diagnostic mode, timeout:%d ms\n",
+ timeout * 10);
goto loopback_mode_exit;
}
msleep(10);
}
+
/* set up loopback mode */
- pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmboxq) {
- rc = -ENOMEM;
- goto loopback_mode_exit;
- }
- req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
- alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
- req_len, LPFC_SLI4_MBX_EMBED);
- if (alloc_len != req_len) {
- rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3132 Set up loopback mode:x%x\n", link_flags);
+
+ if (link_flags == INTERNAL_LOOP_BACK)
+ rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+ else if (link_flags == EXTERNAL_LOOP_BACK)
+ rc = lpfc_hba_init_link_fc_topology(phba,
+ FLAGS_TOPOLOGY_MODE_PT_PT,
+ MBX_NOWAIT);
+ else {
+ rc = -EINVAL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3141 Loopback mode:x%x not supported\n",
+ link_flags);
goto loopback_mode_exit;
}
- link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
- bf_set(lpfc_mbx_set_diag_state_link_num,
- &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
- bf_set(lpfc_mbx_set_diag_state_link_type,
- &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
- if (link_flags == INTERNAL_LOOP_BACK)
- bf_set(lpfc_mbx_set_diag_lpbk_type,
- &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
- else
- bf_set(lpfc_mbx_set_diag_lpbk_type,
- &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
- mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
- if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
- rc = -ENODEV;
- else {
- phba->link_flag |= LS_LOOPBACK_MODE;
+ if (!rc) {
/* wait for the link attention interrupt */
msleep(100);
i = 0;
+ while (phba->link_state < LPFC_LINK_UP) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3137 Timeout waiting for link up "
+ "in loopback mode, timeout:%d ms\n",
+ timeout * 10);
+ break;
+ }
+ msleep(10);
+ }
+ }
+
+ /* port resource registration setup for loopback diagnostic */
+ if (!rc) {
+ /* set up a none zero myDID for loopback test */
+ phba->pport->fc_myDID = 1;
+ rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+ } else
+ goto loopback_mode_exit;
+
+ if (!rc) {
+ /* wait for the port ready */
+ msleep(100);
+ i = 0;
while (phba->link_state != LPFC_HBA_READY) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3133 Timeout waiting for port "
+ "loopback mode ready, timeout:%d ms\n",
+ timeout * 10);
break;
}
msleep(10);
@@ -1828,14 +1943,14 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
}
loopback_mode_exit:
+ /* clear loopback diagnostic mode */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ }
lpfc_bsg_diag_mode_exit(phba);
- /*
- * Let SLI layer release mboxq if mbox command completed after timeout.
- */
- if (pmboxq && (mbxstatus != MBX_TIMEOUT))
- mempool_free(pmboxq, phba->mbox_mem_pool);
-
job_error:
/* make error code available to userspace */
job->reply->result = rc;
@@ -1879,7 +1994,6 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
rc = -ENODEV;
return rc;
-
}
/**
@@ -1895,7 +2009,9 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- int rc;
+ struct diag_mode_set *loopback_mode_end_cmd;
+ uint32_t timeout;
+ int rc, i;
shost = job->shost;
if (!shost)
@@ -1913,11 +2029,47 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
+ /* clear loopback diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ loopback_mode_end_cmd = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = loopback_mode_end_cmd->timeout * 100;
+
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3139 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
+ goto loopback_mode_end_exit;
+ }
- if (!rc)
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3140 Timeout waiting for link to "
+ "diagnostic mode_end, timeout:%d ms\n",
+ timeout * 10);
+ /* there is nothing much we can do here */
+ break;
+ }
+ msleep(10);
+ }
+
+ /* reset port resource registrations */
+ rc = lpfc_selective_reset(phba);
+ phba->pport->fc_myDID = 0;
+loopback_mode_end_exit:
+ /* make return code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
return rc;
}
@@ -2012,9 +2164,9 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
- phba->sli4_hba.link_state.number);
+ phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
- phba->sli4_hba.link_state.type);
+ phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
link_diag_test_cmd->test_id);
bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
@@ -2091,10 +2243,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
if (!mbox)
return -ENOMEM;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ else {
*rpi = lpfc_sli4_alloc_rpi(phba);
- status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
- (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
+ status = lpfc_reg_rpi(phba, phba->pport->vpi,
+ phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ }
+
if (status) {
mempool_free(mbox, phba->mbox_mem_pool);
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2117,7 +2277,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENODEV;
}
- *rpi = mbox->u.mb.un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ *rpi = mbox->u.mb.un.varWords[0];
lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
kfree(dmabuff);
@@ -2142,7 +2303,12 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
if (mbox == NULL)
return -ENOMEM;
- lpfc_unreg_login(phba, 0, rpi, mbox);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_unreg_login(phba, 0, rpi, mbox);
+ else
+ lpfc_unreg_login(phba, phba->pport->vpi,
+ phba->sli4_hba.rpi_ids[rpi], mbox);
+
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2630,15 +2796,15 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t full_size;
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
- struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+ IOCB_t *cmd, *rsp = NULL;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
struct lpfc_dmabufext *txbuffer = NULL;
struct list_head head;
struct lpfc_dmabuf *curr;
- uint16_t txxri, rxxri;
+ uint16_t txxri = 0, rxxri;
uint32_t num_bde;
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
@@ -2665,7 +2831,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = -EINVAL;
goto loopback_test_exit;
}
-
diag_mode = (struct diag_mode_test *)
job->request->rqst_data.h_vendor.vendor_cmd;
@@ -2720,18 +2885,19 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
if (rc)
goto loopback_test_exit;
- rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
- if (rc) {
- lpfcdiag_loop_self_unreg(phba, rpi);
- goto loopback_test_exit;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
- if (rc) {
- lpfcdiag_loop_self_unreg(phba, rpi);
- goto loopback_test_exit;
+ rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
}
-
evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
SLI_CT_ELX_LOOPBACK);
if (!evt) {
@@ -2746,7 +2912,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
cmdiocbq = lpfc_sli_get_iocbq(phba);
- rspiocbq = lpfc_sli_get_iocbq(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rspiocbq = lpfc_sli_get_iocbq(phba);
txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (txbmp) {
@@ -2759,14 +2926,18 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
}
}
- if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
- !txbmp->virt) {
+ if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+ rc = -ENOMEM;
+ goto err_loopback_test_exit;
+ }
+ if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rsp = &rspiocbq->iocb;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -2796,7 +2967,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
list_del(&head);
/* Build the XMIT_SEQUENCE iocb */
-
num_bde = (uint32_t)txbuffer->flag;
cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
@@ -2813,16 +2983,27 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
- cmd->ulpContext = txxri;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ cmd->ulpContext = txxri;
+ } else {
+ cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+ cmdiocbq->context3 = txbmp;
+ cmdiocbq->sli4_xritag = NO_XRI;
+ cmd->unsli3.rcvsli3.ox_id = 0xffff;
+ }
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
-
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+ if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (rsp->ulpStatus != IOCB_SUCCESS))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3126 Failed loopback test issue iocb: "
+ "iocb_stat:x%x\n", iocb_stat);
rc = -EIO;
goto err_loopback_test_exit;
}
@@ -2832,9 +3013,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
evt->wq, !list_empty(&evt->events_to_see),
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
evt->waiting = 0;
- if (list_empty(&evt->events_to_see))
+ if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
- else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3125 Not receiving unsolicited event, "
+ "rc:x%x\n", rc);
+ } else {
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_move(evt->events_to_see.prev, &evt->events_to_get);
evdat = list_entry(evt->events_to_get.prev,
@@ -2891,7 +3075,7 @@ loopback_test_exit:
job->reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
- if (rc == 0)
+ if (rc == IOCB_SUCCESS)
job->job_done(job);
return rc;
}
@@ -3078,7 +3262,9 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
&& (mb->un.varWords[1] == 1)) {
phba->wait_4_mlo_maint_flg = 1;
} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+ spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
}
break;
@@ -3140,6 +3326,9 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint32_t size;
int rc = 0;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint8_t *pmbx;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -3156,7 +3345,19 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
*/
pmb = (uint8_t *)&pmboxq->u.mb;
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ /* Copy the byte swapped response mailbox back to the user */
memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+ /* if there is any non-embedded extended data copy that too */
+ dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ pmbx = (uint8_t *)dmabuf->virt;
+ /* byte swap the extended data following the mailbox command */
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+ }
job = dd_data->context_un.mbox.set_job;
if (job) {
@@ -3519,6 +3720,18 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+ /*
+ * Non-embedded mailbox subcommand data gets byte swapped here because
+ * the lower level driver code only does the first 64 mailbox words.
+ */
+ if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+ (nemb_tp == nemb_mse))
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[0].buf_len);
+
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3575,7 +3788,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2953 Handled SLI_CONFIG(mse) wr, "
+ "2953 Failed SLI_CONFIG(mse) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_MSE);
@@ -3593,7 +3806,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2954 Handled SLI_CONFIG(hbd) wr, "
+ "2954 Failed SLI_CONFIG(hbd) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_HBD);
@@ -3687,6 +3900,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"2956 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
rc = -EPIPE;
+ goto job_error;
}
/* wait for additoinal external buffers */
@@ -3721,7 +3935,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t opcode;
int rc = SLI_CONFIG_NOT_HANDLED;
- /* state change */
+ /* state change on new multi-buffer pass-through mailbox command */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3752,18 +3966,36 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2959 Not handled SLI_CONFIG "
+ "2959 Reject SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
opcode);
- rc = SLI_CONFIG_NOT_HANDLED;
+ rc = -EPERM;
+ break;
+ }
+ } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3106 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3107 Reject SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = -EPERM;
break;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2977 Handled SLI_CONFIG "
+ "2977 Reject SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
- rc = SLI_CONFIG_NOT_HANDLED;
+ rc = -EPERM;
}
} else {
subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
@@ -3799,12 +4031,17 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2978 Handled SLI_CONFIG "
+ "2978 Not handled SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
rc = SLI_CONFIG_NOT_HANDLED;
}
}
+
+ /* state reset on not handled new multi-buffer mailbox command */
+ if (rc != SLI_CONFIG_HANDLED)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
return rc;
}
@@ -4262,11 +4499,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- /* any data for the device? */
- if (mbox_req->inExtWLen) {
- from = pmbx;
- ext = from + sizeof(MAILBOX_t);
- }
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index c8c2b47ea886..edfe61fc52b1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -96,7 +96,7 @@ struct get_mgmt_rev {
};
#define MANAGEMENT_MAJOR_REV 1
-#define MANAGEMENT_MINOR_REV 0
+#define MANAGEMENT_MINOR_REV 1
/* the MgmtRevInfo structure */
struct MgmtRevInfo {
@@ -248,6 +248,7 @@ struct lpfc_sli_config_emb1_subsys {
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
#define COMN_OPCODE_DELETE_OBJECT 0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 75e2e569dede..c88e556ea62e 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{
- __iowrite32_copy(dest, src, bytes);
+ /* convert bytes in argument list to word count for copy function */
+ __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
}
static inline void
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 60f95347babf..26924b7a6cde 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -26,7 +26,7 @@ void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
-int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -78,6 +78,7 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -106,7 +107,7 @@ void lpfc_cleanup(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
-
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
int lpfc_do_work(void *);
@@ -453,3 +454,11 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
int lpfc_sli4_queue_create(struct lpfc_hba *);
void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct sli4_wcqe_xri_aborted *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *phba);
+int lpfc_scsi_buf_update(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 28382596fb9a..3587a3fe8fcb 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get slow-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path EQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.sp_eq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.sp_eq->entry_size,
phba->sli4_hba.sp_eq->host_index,
phba->sli4_hba.sp_eq->hba_index);
+ }
/* Get fast-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path EQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ }
}
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX CQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.mbx_cq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.mbx_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_cq->entry_size,
phba->sli4_hba.mbx_cq->host_index,
phba->sli4_hba.mbx_cq->hba_index);
+ }
/* Get slow-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS CQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.els_cq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.els_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID [%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_cq->entry_size,
phba->sli4_hba.els_cq->host_index,
phba->sli4_hba.els_cq->hba_index);
+ }
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
fcp_qidx = 0;
- do {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fcp_cq) {
+ do {
+ if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
/* Get mailbox queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX MQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.mbx_wq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.mbx_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_wq->entry_size,
phba->sli4_hba.mbx_wq->host_index,
phba->sli4_hba.mbx_wq->hba_index);
+ }
/* Get slow-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS WQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.els_wq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.els_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_wq->entry_size,
phba->sli4_hba.els_wq->host_index,
phba->sli4_hba.els_wq->hba_index);
+ }
/* Get fast-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP WQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fcp_wq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+ fcp_qidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_qidx])
+ continue;
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], WQE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get receive queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path RQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.hdr_rq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tHQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.hdr_rq->entry_size,
phba->sli4_hba.hdr_rq->host_index,
phba->sli4_hba.hdr_rq->hba_index);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tDQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.dat_rq->entry_size,
phba->sli4_hba.dat_rq->host_index,
phba->sli4_hba.dat_rq->hba_index);
-
+ }
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) {
case LPFC_IDIAG_EQ:
/* Slow-path event queue */
- if (phba->sli4_hba.sp_eq->queue_id == queid) {
+ if (phba->sli4_hba.sp_eq &&
+ phba->sli4_hba.sp_eq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* Fast-path event queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
- if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fp_eq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ if (phba->sli4_hba.fp_eq[qidx] &&
+ phba->sli4_hba.fp_eq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fp_eq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
- goto pass_check;
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fp_eq[qidx];
+ goto pass_check;
+ }
}
}
goto error_out;
break;
case LPFC_IDIAG_CQ:
/* MBX complete queue */
- if (phba->sli4_hba.mbx_cq->queue_id == queid) {
+ if (phba->sli4_hba.mbx_cq &&
+ phba->sli4_hba.mbx_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* ELS complete queue */
- if (phba->sli4_hba.els_cq->queue_id == queid) {
+ if (phba->sli4_hba.els_cq &&
+ phba->sli4_hba.els_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- qidx = 0;
- do {
- if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fcp_cq) {
+ qidx = 0;
+ do {
+ if (phba->sli4_hba.fcp_cq[qidx] &&
+ phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_cq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
phba->sli4_hba.fcp_cq[qidx];
- goto pass_check;
- }
- } while (++qidx < phba->cfg_fcp_eq_count);
+ goto pass_check;
+ }
+ } while (++qidx < phba->cfg_fcp_eq_count);
+ }
goto error_out;
break;
case LPFC_IDIAG_MQ:
/* MBX work queue */
- if (phba->sli4_hba.mbx_wq->queue_id == queid) {
+ if (phba->sli4_hba.mbx_wq &&
+ phba->sli4_hba.mbx_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.mbx_wq;
goto pass_check;
}
+ goto error_out;
break;
case LPFC_IDIAG_WQ:
/* ELS work queue */
- if (phba->sli4_hba.els_wq->queue_id == queid) {
+ if (phba->sli4_hba.els_wq &&
+ phba->sli4_hba.els_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP work queue */
- for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
- if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fcp_wq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+ if (!phba->sli4_hba.fcp_wq[qidx])
+ continue;
+ if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_wq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
- phba->sli4_hba.fcp_wq[qidx];
- goto pass_check;
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fcp_wq[qidx];
+ goto pass_check;
+ }
}
}
goto error_out;
break;
case LPFC_IDIAG_RQ:
/* HDR queue */
- if (phba->sli4_hba.hdr_rq->queue_id == queid) {
+ if (phba->sli4_hba.hdr_rq &&
+ phba->sli4_hba.hdr_rq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* DAT queue */
- if (phba->sli4_hba.dat_rq->queue_id == queid) {
+ if (phba->sli4_hba.dat_rq &&
+ phba->sli4_hba.dat_rq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.dat_rq, index, count);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 445826a4c981..7afc757338de 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -421,13 +421,13 @@ fail:
* @vport: pointer to a host virtual N_Port data structure.
*
* This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for FCoE only.
+ * the @vport. This mailbox command is necessary for SLI4 port only.
*
* Return code
* 0 - successfully issued REG_VFI for @vport
* A failure code otherwise.
**/
-static int
+int
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@@ -438,10 +438,14 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
int rc = 0;
sp = &phba->fc_fabparam;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = -ENODEV;
- goto fail;
+ /* move forward in case of SLI4 FC port loopback test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ rc = -ENODEV;
+ goto fail;
+ }
}
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -487,6 +491,54 @@ fail:
}
/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ * 0 - successfully issued REG_VFI for @vport
+ * A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost;
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2556 UNREG_VFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
+ }
+
+ lpfc_unreg_vfi(mboxq, vport);
+ mboxq->vport = vport;
+ mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2557 UNREG_VFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+
+/**
* lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
* @vport: pointer to a host virtual N_Port data structure.
* @sp: pointer to service parameter data structure.
@@ -615,7 +667,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"1816 FLOGI NPIV supported, "
"response data 0x%x\n",
sp->cmn.response_multiple_NPort);
+ spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
} else {
/* Because we asked f/w for NPIV it still expects us
to call reg_vnpid atleast for the physcial host */
@@ -623,7 +677,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
LOG_ELS | LOG_VPORT,
"1817 Fabric does not support NPIV "
"- configuring single port mode.\n");
+ spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
}
}
@@ -686,11 +742,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_do_scr_ns_plogi(phba, vport);
} else if (vport->fc_flag & FC_VFI_REGISTERED)
lpfc_issue_init_vpi(vport);
- else
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3135 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
lpfc_issue_reg_vfi(vport);
+ }
}
return 0;
}
+
/**
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
* @vport: pointer to a host virtual N_Port data structure.
@@ -907,17 +968,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
* alpa map would take too long otherwise.
*/
- if (phba->alpa_map[0] == 0) {
+ if (phba->alpa_map[0] == 0)
vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!(vport->fc_flag & FC_VFI_REGISTERED) ||
- (vport->fc_prevDID != vport->fc_myDID))) {
- if (vport->fc_flag & FC_VFI_REGISTERED)
- lpfc_sli4_unreg_all_rpis(vport);
- lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
- goto out;
- }
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+ (vport->fc_prevDID != vport->fc_myDID))) {
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_sli4_unreg_all_rpis(vport);
+ lpfc_issue_reg_vfi(vport);
+ lpfc_nlp_put(ndlp);
+ goto out;
}
goto flogifail;
}
@@ -1075,6 +1135,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
@@ -1163,8 +1224,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
- icmd->un.elsreq64.bdl.ulpIoTag32) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
(ndlp->nlp_DID == Fabric_DID))
@@ -3066,17 +3126,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (did == FDMI_DID)
retry = 1;
- if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
+ if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
!lpfc_error_lost_link(irsp)) {
/* FLOGI retry policy */
retry = 1;
- /* retry forever */
+ /* retry FLOGI forever */
maxretry = 0;
if (cmdiocb->retry >= 100)
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
+ } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ /* retry FDISCs every second up to devloss */
+ retry = 1;
+ maxretry = vport->cfg_devloss_tmo;
+ delay = 1000;
}
cmdiocb->retry++;
@@ -3389,11 +3454,17 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/*
* The driver received a LOGO from the rport and has ACK'd it.
- * At this point, the driver is done so release the IOCB and
- * remove the ndlp reference.
+ * At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
+
+ /*
+ * Remove the ndlp reference if it's a fabric node that has
+ * sent us an unsolicted LOGO.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC)
+ lpfc_nlp_put(ndlp);
+
return;
}
@@ -4867,23 +4938,31 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
sizeof(struct lpfc_name));
if (!rc) {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mbox)
+ return 1;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
-
- lpfc_linkdown(phba);
- lpfc_init_link(phba, mbox,
- phba->cfg_topology,
- phba->cfg_link_speed);
- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- lpfc_set_loopback_flag(phba);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ } else {
+ /* abort the flogi coming back to ourselves
+ * due to external loopback on the port.
+ */
+ lpfc_els_abort_flogi(phba);
+ return 0;
}
- return 1;
} else if (rc > 0) { /* greater than */
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT_PLOGI;
@@ -5838,8 +5917,12 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = vport->fc_prevDID;
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
- else
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3138 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
lpfc_issue_reg_vfi(vport);
+ }
}
}
return 0;
@@ -6596,56 +6679,6 @@ dropit:
}
/**
- * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
- * @phba: pointer to lpfc hba data structure.
- * @vpi: host virtual N_Port identifier.
- *
- * This routine finds a vport on a HBA (referred by @phba) through a
- * @vpi. The function walks the HBA's vport list and returns the address
- * of the vport with the matching @vpi.
- *
- * Return code
- * NULL - No vport with the matching @vpi found
- * Otherwise - Address to the vport with the matching @vpi.
- **/
-struct lpfc_vport *
-lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
-{
- struct lpfc_vport *vport;
- unsigned long flags;
- int i = 0;
-
- /* The physical ports are always vpi 0 - translate is unnecessary. */
- if (vpi > 0) {
- /*
- * Translate the physical vpi to the logical vpi. The
- * vport stores the logical vpi.
- */
- for (i = 0; i < phba->max_vpi; i++) {
- if (vpi == phba->vpi_ids[i])
- break;
- }
-
- if (i >= phba->max_vpi) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2936 Could not find Vport mapped "
- "to vpi %d\n", vpi);
- return NULL;
- }
- }
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry(vport, &phba->port_list, listentry) {
- if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return vport;
- }
- }
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return NULL;
-}
-
-/**
* lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a SLI ring.
@@ -7281,6 +7314,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 091f68e5cb70..678a4b11059c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1074,6 +1074,12 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
+ /* don't perform discovery for SLI4 loopback diagnostic test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->link_flag & LS_LOOPBACK_MODE))
+ return;
+
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
vport->fc_flag & FC_PUBLIC_LOOP &&
!(vport->fc_flag & FC_LBIT)) {
@@ -2646,9 +2652,14 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- /* VFI not supported on interface type 0, just do the flogi */
- if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
+ /*
+ * VFI not supported on interface type 0, just do the flogi
+ * Also continue if the VFI is in use - just use the same one.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2891 Init VFI mailbox failed 0x%x\n",
@@ -2842,10 +2853,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- goto fail_free_mem;
+ goto out_free_mem;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- goto fail_free_mem;
+ goto out_free_mem;
}
/* The VPI is implicitly registered when the VFI is registered */
spin_lock_irq(shost->host_lock);
@@ -2855,10 +2866,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
+ /* In case SLI4 FC loopback test, we are ready */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->link_flag & LS_LOOPBACK_MODE)) {
+ phba->link_state = LPFC_HBA_READY;
+ goto out_free_mem;
+ }
+
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* For private loop just start discovery and we are done. */
if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- (phba->alpa_map[0] == 0) &&
!(vport->fc_flag & FC_PUBLIC_LOOP)) {
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2870,7 +2887,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
-fail_free_mem:
+out_free_mem:
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
@@ -2923,6 +2940,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+ struct Scsi_Host *shost;
int i;
struct lpfc_dmabuf *mp;
int rc;
@@ -2946,6 +2964,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -2957,8 +2976,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"1309 Link Up Event npiv not supported in loop "
"topology\n");
/* Get Loop Map information */
- if (bf_get(lpfc_mbx_read_top_il, la))
+ if (bf_get(lpfc_mbx_read_top_il, la)) {
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
+ }
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3003,11 +3025,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
} else {
if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
if (phba->max_vpi && phba->cfg_enable_npiv &&
- (phba->sli_rev == 3))
+ (phba->sli_rev >= LPFC_SLI_REV3))
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -3224,15 +3248,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
LPFC_ATT_LINK_DOWN) {
phba->fc_stat.LinkDown++;
- if (phba->link_flag & LS_LOOPBACK_MODE) {
+ if (phba->link_flag & LS_LOOPBACK_MODE)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
- }
- else {
+ else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
"Data: x%x x%x x%x x%x x%x\n",
@@ -3240,7 +3263,6 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_issue_link_down(phba);
}
if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
@@ -3594,6 +3616,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
@@ -3639,8 +3662,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* vport discovery */
if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
lpfc_start_fdiscs(phba);
- else
+ else {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -5353,6 +5380,73 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
return ndlp;
}
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_rpi(vport, rpi);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ * NULL - No vport with the matching @vpi found
+ * Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+ struct lpfc_vport *vport;
+ unsigned long flags;
+ int i = 0;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->vpi == i) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return vport;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return NULL;
+}
+
void
lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@@ -5599,7 +5693,7 @@ out:
*
* This function frees memory associated with the mailbox command.
*/
-static void
+void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
@@ -5651,7 +5745,6 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int
lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
{
- LPFC_MBOXQ_t *mbox;
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
@@ -5687,35 +5780,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
- /* Unregister VFI */
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2556 UNREG_VFI mbox allocation failed"
- "HBA state x%x\n", phba->pport->port_state);
- return -ENOMEM;
- }
-
- lpfc_unreg_vfi(mbox, phba->pport);
- mbox->vport = phba->pport;
- mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
-
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2557 UNREG_VFI issue mbox failed rc x%x "
- "HBA state x%x\n",
- rc, phba->pport->port_state);
- mempool_free(mbox, phba->mbox_mem_pool);
- return -EIO;
- }
-
- shost = lpfc_shost_from_vport(phba->pport);
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
-
- return 0;
+ /* Unregister the physical port VFI */
+ rc = lpfc_issue_unreg_vfi(phba->pport);
+ return rc;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 046edc4ab35f..7245bead3755 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -349,6 +349,12 @@ struct csp {
* Word 1 Bit 31 in FLOGI response is clean address bit
*/
#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -1852,8 +1858,8 @@ typedef struct {
uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
#endif
-#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
@@ -2819,7 +2825,8 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1 : 19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
+ uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2839,14 +2846,16 @@ typedef struct {
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
- uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t rsvd2 : 2; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd1 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd3 : 19; /* Reserved */
uint32_t gdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
+ uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2866,7 +2875,8 @@ typedef struct {
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gbg : 1; /* Grant BlockGuard */
- uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t rsvd4 : 2; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 : 19; /* Reserved */
#endif
@@ -3465,6 +3475,7 @@ typedef struct {
} ASYNCSTAT_FIELDS;
#define ASYNC_TEMP_WARN 0x100
#define ASYNC_TEMP_SAFE 0x101
+#define ASYNC_STATUS_CN 0x102
/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98d21521f539..e5bfa7f334e3 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1351,11 +1351,11 @@ struct lpfc_mbx_set_link_diag_loopback {
struct {
uint32_t word0;
#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
-#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
#define lpfc_mbx_set_diag_lpbk_type_WORD word0
#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
@@ -1830,6 +1830,8 @@ struct lpfc_mbx_init_vfi {
#define lpfc_init_vfi_hop_count_MASK 0x000000FF
#define lpfc_init_vfi_hop_count_WORD word4
};
+#define MBX_VFI_IN_USE 0x9F02
+
struct lpfc_mbx_reg_vfi {
uint32_t word1;
@@ -2104,6 +2106,8 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
#define lpfc_mbx_rd_conf_lnk_type_WORD word2
+#define LPFC_LNK_TYPE_GE 0
+#define LPFC_LNK_TYPE_FC 1
#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
@@ -3320,6 +3324,9 @@ struct wqe_rctl_dfctl {
#define wqe_la_SHIFT 3
#define wqe_la_MASK 0x000000001
#define wqe_la_WORD word5
+#define wqe_xo_SHIFT 6
+#define wqe_xo_MASK 0x000000001
+#define wqe_xo_WORD word5
#define wqe_ls_SHIFT 7
#define wqe_ls_MASK 0x000000001
#define wqe_ls_WORD word5
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 55bc4fc7376f..dfea2dada02c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -62,7 +62,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
static int lpfc_setup_endian_order(struct lpfc_hba *);
-static int lpfc_sli4_read_config(struct lpfc_hba *);
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
static void lpfc_free_sgl_list(struct lpfc_hba *);
static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +474,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Get the default values for Model Name and Description */
lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
- if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
- && !(phba->lmt & LMT_1Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
- && !(phba->lmt & LMT_2Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
- && !(phba->lmt & LMT_4Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
- && !(phba->lmt & LMT_8Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
- && !(phba->lmt & LMT_10Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
- && !(phba->lmt & LMT_16Gb))) {
- /* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1302 Invalid speed for this board: "
- "Reset link speed to auto: x%x\n",
- phba->cfg_link_speed);
- phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
- }
-
phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +563,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
- lpfc_init_link(phba, pmb, phba->cfg_topology,
- phba->cfg_link_speed);
- pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0454 Adapter failed to init, mbxCmd x%x "
- "INIT_LINK, mbxStatus x%x\n",
- mb->mbxCommand, mb->mbxStatus);
-
- /* Clear all interrupt enable conditions */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
- /* Clear all pending interrupts */
- writel(0xffffffff, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- phba->link_state = LPFC_HBA_ERROR;
- if (rc != MBX_BUSY)
- mempool_free(pmb, phba->mbox_mem_pool);
- return -EIO;
- }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ return rc;
}
/* MBOX buffer will be freed in mbox compl */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -668,6 +628,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
int
lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
{
+ return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+ uint32_t flag)
+{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
@@ -681,9 +663,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
mb = &pmb->u.mb;
pmb->vport = vport;
- lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+ if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+ !(phba->lmt & LMT_1Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+ !(phba->lmt & LMT_2Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+ !(phba->lmt & LMT_4Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+ !(phba->lmt & LMT_8Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+ !(phba->lmt & LMT_10Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+ !(phba->lmt & LMT_16Gb))) {
+ /* Reset link speed to auto */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1302 Invalid speed for this board:%d "
+ "Reset link speed to auto.\n",
+ phba->cfg_link_speed);
+ phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+ }
+ lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1440,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t event_data;
struct Scsi_Host *shost;
uint32_t if_type;
- struct lpfc_register portstat_reg;
+ struct lpfc_register portstat_reg = {0};
+ uint32_t reg_err1, reg_err2;
+ uint32_t uerrlo_reg, uemasklo_reg;
+ uint32_t pci_rd_rc1, pci_rd_rc2;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1455,52 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return;
- /* Send an internal error event to mgmt application */
- lpfc_board_errevt_to_mgmt(phba);
-
- /* For now, the actual action for SLI4 device handling is not
- * specified yet, just treated it as adaptor hardware failure
- */
- event_data = FC_REG_DUMP_EVENT;
- shost = lpfc_shost_from_vport(vport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(event_data), (char *) &event_data,
- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerrlo_reg);
+ pci_rd_rc2 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+ &uemasklo_reg);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+ return;
lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_2:
- portstat_reg.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO)
+ return;
+ reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+ reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
/* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2889 Port Overtemperature event, "
- "taking port\n");
+ "taking port offline\n");
spin_lock_irq(&phba->hbalock);
phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba);
- return;
+ break;
}
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3143 Port Down: Firmware Restarted\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3144 Port Down: Debug Dump\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3145 Port Down: Provisioning\n");
/*
* On error status condition, driver need to wait for port
* ready before performing reset.
@@ -1489,14 +1509,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!rc) {
/* need reset: attempt for port recovery */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Port Error: Attempting "
- "Port Recovery\n");
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) {
lpfc_unblock_mgmt_io(phba);
- return;
+ /* don't report event on forced debug dump */
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ return;
+ else
+ break;
}
/* fall through for not able to recover */
}
@@ -1506,6 +1531,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
default:
break;
}
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3123 Report dump event to upper layer\n");
+ /* Send an internal error event to mgmt application */
+ lpfc_board_errevt_to_mgmt(phba);
+
+ event_data = FC_REG_DUMP_EVENT;
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
}
/**
@@ -2674,6 +2709,32 @@ lpfc_offline(struct lpfc_hba *phba)
}
/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *sb, *sb_next;
+
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&phba->scsi_buf_list_lock);
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+ sb->cur_iocbq.sli4_xritag =
+ phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+ spin_unlock(&phba->scsi_buf_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+}
+
+/**
* lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
* @phba: pointer to lpfc hba data structure.
*
@@ -5040,15 +5101,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
- /*
- * If the SLI4 port supports extents, posting the rpi header isn't
- * required. Set the expected maximum count and let the actual value
- * get set when extents are fully allocated.
- */
- if (!phba->sli4_hba.rpi_hdrs_in_use) {
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
return rc;
- }
if (phba->sli4_hba.extents_in_use)
return -EIO;
@@ -5942,7 +5996,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
* -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
-static int
+int
lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6028,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3081 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3082 Mailbox (x%x) returned ldv:x0\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe));
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
@@ -6462,6 +6530,7 @@ out_free_fcp_wq:
phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
out_free_els_wq:
lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6543,7 @@ out_free_fcp_cq:
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6556,7 @@ out_free_fp_eq:
phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
}
kfree(phba->sli4_hba.fp_eq);
+ phba->sli4_hba.fp_eq = NULL;
out_free_sp_eq:
lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6590,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.els_wq = NULL;
/* Release FCP work queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+ if (phba->sli4_hba.fcp_wq != NULL)
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+ fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL;
@@ -6540,15 +6613,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release FCP response complete queue */
fcp_qidx = 0;
- do
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
- while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq != NULL)
+ do
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
/* Release fast-path event queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ if (phba->sli4_hba.fp_eq != NULL)
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL;
@@ -6601,11 +6677,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path event queue */
+ if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3147 Fast-path EQs not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_sp_eq;
+ }
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
"allocated\n", fcp_eqidx);
+ rc = -ENOMEM;
goto out_destroy_fp_eq;
}
rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6713,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.mbx_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0528 Mailbox CQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fp_eq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6733,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.els_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0530 ELS CQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_mbx_cq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6750,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3148 Fast-path FCP CQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_cq;
+ }
fcp_cqidx = 0;
do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
+ rc = -ENOMEM;
goto out_destroy_fcp_cq;
}
if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6802,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.mbx_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0538 Slow-path MQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fcp_cq;
}
rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6822,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.els_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0536 Slow-path ELS WQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_mbx_wq;
}
rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6839,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id);
/* Set up fast-path FCP Work Queue */
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3149 Fast-path FCP WQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_wq;
+ }
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0534 Fast-path FCP WQ (%d) not "
"allocated\n", fcp_wqidx);
+ rc = -ENOMEM;
goto out_destroy_fcp_wq;
}
rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6882,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fcp_wq;
}
@@ -6805,18 +6909,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_wq:
for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
out_destroy_fp_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
out_error:
return rc;
@@ -6853,13 +6960,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */
- fcp_qidx = 0;
- do {
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq) {
+ fcp_qidx = 0;
+ do {
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ }
/* Unset fast-path event queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++)
+ lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ }
/* Unset slow-path event queue */
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
}
@@ -7398,22 +7510,25 @@ out:
static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
{
- struct pci_dev *pdev;
-
- /* Obtain PCI device reference */
- if (!phba->pcidev)
- return;
- else
- pdev = phba->pcidev;
-
- /* Free coherent DMA memory allocated */
-
- /* Unmap I/O memory space */
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
- iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ uint32_t if_type;
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
- return;
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ dev_printk(KERN_ERR, &phba->pcidev->dev,
+ "FATAL - unsupported SLI4 interface type - %d\n",
+ if_type);
+ break;
+ }
}
/**
@@ -9198,12 +9313,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
- /* check for firmware upgrade or downgrade */
- snprintf(file_name, 16, "%s.grp", phba->ModelName);
- error = request_firmware(&fw, file_name, &phba->pcidev->dev);
- if (!error) {
- lpfc_write_firmware(phba, fw);
- release_firmware(fw);
+ /* check for firmware upgrade or downgrade (if_type 2 only) */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
}
/* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 2ebc7d2540c0..20336f09fb3c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1293,6 +1293,10 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->sli_rev = LPFC_SLI_REV2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
+ /* If this is an SLI3 port, configure async status notification. */
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varCfgPort.casabt = 1;
+
/* Now setup pcb */
phba->pcb->type = TYPE_NATIVE_SLI2;
phba->pcb->feature = FEATURE_INITIAL_SLI2;
@@ -2129,6 +2133,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+ "3134 Register VFI, mydid:x%x, fcfi:%d, "
+ " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+ vport->fc_myDID,
+ vport->phba->fcf.fcfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi],
+ vport->phba->vpi_ids[vport->vpi],
+ reg_vfi->wwn[0], reg_vfi->wwn[1]);
}
/**
@@ -2175,16 +2187,15 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
}
/**
- * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
- * This function create a SLI4 dump mailbox command to dump FCoE
- * parameters stored in region 23.
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
**/
int
-lpfc_dump_fcoe_param(struct lpfc_hba *phba,
- struct lpfcMboxq *mbox)
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
@@ -2198,9 +2209,9 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
if (!mp || !mp->virt) {
kfree(mp);
- /* dump_fcoe_param failed to allocate memory */
+ /* dump config region 23 failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "2569 lpfc_dump_fcoe_param: memory"
+ "2569 lpfc dump config region 23: memory"
" allocation failed\n");
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 10d5b5e41499..ade763d3930a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *hbqbp;
- hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!hbqbp)
return NULL;
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *dma_buf;
- dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!dma_buf)
return NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ddd02f7c603..e8bb00559943 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -783,6 +783,14 @@ lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
@@ -2147,7 +2155,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* CMPL_ADISC */
lpfc_disc_illegal, /* CMPL_REG_LOGIN */
lpfc_device_rm_unused_node, /* DEVICE_RM */
- lpfc_disc_illegal, /* DEVICE_RECOVERY */
+ lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2e1e54e5c3ae..c60f5d0b3869 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -681,8 +681,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (ndlp)
+ if (ndlp) {
lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
@@ -2911,8 +2913,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
- memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
-
+ memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
+ memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
switch (tag[0]) {
case HEAD_OF_QUEUE_TAG:
@@ -3236,6 +3238,15 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = err;
goto out_fail_command;
}
+ /*
+ * Do not let the mid-layer retry I/O too fast. If an I/O is retried
+ * without waiting a bit then indicate that the device is busy.
+ */
+ if (cmnd->retries &&
+ time_before(jiffies, (cmnd->jiffies_at_alloc +
+ msecs_to_jiffies(LPFC_RETRY_PAUSE *
+ cmnd->retries))))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
ndlp = rdata->pnode;
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index ce645b20a6ad..9075a08cf781 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -21,6 +21,7 @@
#include <asm/byteorder.h>
struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
#define list_remove_head(list, entry, type, member) \
do { \
@@ -102,7 +103,7 @@ struct fcp_cmnd {
#define WRITE_DATA 0x01 /* Bit 0 */
#define READ_DATA 0x02 /* Bit 1 */
- uint8_t fcpCdb[16]; /* SRB cdb field is copied here */
+ uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
uint32_t fcpDl; /* Total transfer length */
};
@@ -153,5 +154,5 @@ struct lpfc_scsi_buf {
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
-
+#define LPFC_RETRY_PAUSE 300
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4d4104f38c98..23a27592388c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -89,15 +89,20 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
static uint32_t
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
{
- union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+ union lpfc_wqe *temp_wqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_wqe = q->qe[q->host_index].wqe;
+
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
/* set consumption flag every once in a while */
- if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+ if (!((q->host_index + 1) % q->entry_repost))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
@@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
uint32_t released = 0;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
if (q->hba_index == index)
return 0;
do {
@@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
{
- struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+ struct lpfc_mqe *temp_mqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_mqe = q->qe[q->host_index].mqe;
+
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
@@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue *q)
{
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
/* Clear the mailbox pointer for completion */
q->phba->mbox = NULL;
q->hba_index = ((q->hba_index + 1) % q->entry_count);
@@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q)
{
- struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+ struct lpfc_eqe *eqe;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+ eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
if (!bf_get_le32(lpfc_eqe_valid, eqe))
@@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
@@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
{
struct lpfc_cqe *cqe;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+
/* If the next CQE is not valid then we are done */
if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
@@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
@@ -359,11 +393,17 @@ static int
lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
{
- struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
- struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+ struct lpfc_rqe *temp_hrqe;
+ struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
int put_index = hq->host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return -ENOMEM;
+ temp_hrqe = hq->qe[hq->host_index].rqe;
+ temp_drqe = dq->qe[dq->host_index].rqe;
+
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
if (hq->host_index != dq->host_index)
@@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
{
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return 0;
+
if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
return 0;
hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@@ -3575,8 +3619,8 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
* lpfc_reset_barrier - Make HBA ready for HBA reset
* @phba: Pointer to HBA context object.
*
- * This function is called before resetting an HBA. This
- * function requests HBA to quiesce DMAs before a reset.
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
**/
void lpfc_reset_barrier(struct lpfc_hba *phba)
{
@@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
- uint8_t qindx;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
phba->fcf.fcf_flag = 0;
- /* Clean up the child queue list for the CQs */
- list_del_init(&phba->sli4_hba.mbx_wq->list);
- list_del_init(&phba->sli4_hba.els_wq->list);
- list_del_init(&phba->sli4_hba.hdr_rq->list);
- list_del_init(&phba->sli4_hba.dat_rq->list);
- list_del_init(&phba->sli4_hba.mbx_cq->list);
- list_del_init(&phba->sli4_hba.els_cq->list);
- for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
- list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- qindx = 0;
- do
- list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
- while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
/* Perform FCoE PCI function reset */
+ lpfc_sli4_queue_destroy(phba);
lpfc_pci_function_reset(phba);
/* Restore PCI cmd register */
@@ -4339,6 +4370,11 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
done = 1;
+
+ if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+ (pmb->u.mb.un.varCfgPort.gasabt == 0))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3110 Port did not grant ASABT\n");
}
}
if (!done) {
@@ -4551,9 +4587,9 @@ lpfc_sli_hba_setup_error:
* data structure.
**/
static int
-lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
- LPFC_MBOXQ_t *mboxq)
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
{
+ LPFC_MBOXQ_t *mboxq;
struct lpfc_dmabuf *mp;
struct lpfc_mqe *mqe;
uint32_t data_length;
@@ -4565,10 +4601,16 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
- mqe = &mboxq->u.mqe;
- if (lpfc_dump_fcoe_param(phba, mboxq))
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
return -ENOMEM;
+ mqe = &mboxq->u.mqe;
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+ rc = -ENOMEM;
+ goto out_free_mboxq;
+ }
+
mp = (struct lpfc_dmabuf *) mboxq->context1;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -4596,19 +4638,25 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
if (rc) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return -EIO;
+ rc = -EIO;
+ goto out_free_mboxq;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return -EIO;
+ rc = -EIO;
+ goto out_free_mboxq;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return 0;
+ rc = 0;
+
+out_free_mboxq:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
}
/**
@@ -4706,7 +4754,6 @@ static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mboxq;
- struct lpfc_mbx_read_config *rd_config;
struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
struct lpfc_controller_attribute *cntl_attr;
struct lpfc_mbx_get_port_name *get_port_name;
@@ -4724,33 +4771,11 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
-
/* obtain link type and link number via READ_CONFIG */
- lpfc_read_config(phba, mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (rc == MBX_SUCCESS) {
- rd_config = &mboxq->u.mqe.un.rd_config;
- if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
- phba->sli4_hba.lnk_info.lnk_tp =
- bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
- phba->sli4_hba.lnk_info.lnk_no =
- bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3081 lnk_type:%d, lnk_numb:%d\n",
- phba->sli4_hba.lnk_info.lnk_tp,
- phba->sli4_hba.lnk_info.lnk_no);
- goto retrieve_ppname;
- } else
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3082 Mailbox (x%x) returned ldv:x0\n",
- bf_get(lpfc_mqe_command,
- &mboxq->u.mqe));
- } else
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3083 Mailbox (x%x) failed, status:x%x\n",
- bf_get(lpfc_mqe_command, &mboxq->u.mqe),
- bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ lpfc_sli4_read_config(phba);
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+ goto retrieve_ppname;
/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
@@ -4875,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
fcp_eqidx = 0;
- do
- lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
- LPFC_QUEUE_REARM);
- while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq) {
+ do
+ lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ }
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
- lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
- LPFC_QUEUE_REARM);
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+ fcp_eqidx++)
+ lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ }
}
/**
@@ -5457,6 +5487,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
uint16_t count, base;
unsigned long longs;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
if (phba->sli4_hba.extents_in_use) {
/*
* The port supports resource extents. The XRI, VPI, VFI, RPI
@@ -5538,9 +5570,10 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
* need any action - just exit.
*/
if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
- LPFC_IDX_RSRC_RDY)
- return 0;
-
+ LPFC_IDX_RSRC_RDY) {
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+ lpfc_sli4_remove_rpis(phba);
+ }
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
base = phba->sli4_hba.max_cfg_param.rpi_base;
@@ -5880,14 +5913,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
if (!mboxq)
return -ENOMEM;
- /*
- * Continue initialization with default values even if driver failed
- * to read FCoE param config regions
- */
- if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
- "2570 Failed to read FCoE parameters\n");
-
/* Issue READ_REV to collect vpd and FW information. */
vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
@@ -5925,6 +5950,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/*
+ * Continue initialization with default values even if driver failed
+ * to read FCoE param config regions, only read parameters if the
+ * board is FCoE
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE &&
+ lpfc_sli4_read_fcoe_params(phba))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+ "2570 Failed to read FCoE parameters\n");
+
+ /*
* Retrieve sli4 device physical port name, failure of doing it
* is considered as non-fatal.
*/
@@ -6044,6 +6079,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"rc = x%x\n", rc);
goto out_free_mbox;
}
+ /* update physical xri mappings in the scsi buffers */
+ lpfc_scsi_buf_update(phba);
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6205,7 +6242,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = 0;
phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
&mboxq->u.mqe.un.reg_fcfi);
+
+ /* Check if the port is configured to be disabled */
+ lpfc_sli_read_link_ste(phba);
}
+
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
@@ -6213,10 +6254,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
- if (rc)
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->hba_flag & LINK_DISABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3103 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3104 Adapter failed to issue "
+ "DOWN_LINK mbox cmd, rc:x%x\n", rc);
goto out_unset_queue;
+ }
+ } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+ /* don't perform init_link on SLI4 FC port loopback test */
+ if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ goto out_unset_queue;
+ }
}
mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
@@ -7487,6 +7543,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
struct ulp_bde64 *bpl = NULL;
struct ulp_bde64 bde;
struct sli4_sge *sgl = NULL;
+ struct lpfc_dmabuf *dmabuf;
IOCB_t *icmd;
int numBdes = 0;
int i = 0;
@@ -7505,9 +7562,12 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
* have not been byteswapped yet so there is no
* need to swap them back.
*/
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)piocbq->context3)->virt;
+ if (piocbq->context3)
+ dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+ else
+ return xritag;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
if (!bpl)
return xritag;
@@ -7616,6 +7676,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
int numBdes, i;
struct ulp_bde64 bde;
struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+ uint32_t if_type;
fip = phba->hba_flag & HBA_FIP_SUPPORT;
/* The fcp commands will set command type */
@@ -7669,6 +7731,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
+
wqe->els_req.payload_len = xmit_len;
/* Els_reguest64 has a TMO */
bf_set(wqe_tmo, &wqe->els_req.wqe_com,
@@ -7683,9 +7746,28 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP) {
+ if (command_type == ELS_COMMAND_FIP)
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ } else if (iocbq->context1) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
}
bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7704,6 +7786,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
@@ -7846,6 +7930,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ }
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -8037,6 +8131,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
return IOCB_ERROR;
@@ -8173,6 +8269,137 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ *
+ * The driver calls this routine in response to a XRI ABORT CQE
+ * event from the port. In this event, the driver is required to
+ * recover its login to the rport even though its login may be valid
+ * from the driver's perspective. The failed ABTS notice from the
+ * port indicates the rport is not responding.
+ */
+static void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+ unsigned long flags = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba = vport->phba;
+ if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI, "3093 No rport recovery needed. "
+ "rport in state 0x%x\n",
+ ndlp->nlp_state);
+ return;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3094 Start rport recovery on shost id 0x%x "
+ "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+ "flags 0x%x\n",
+ shost->host_no, ndlp->nlp_DID,
+ vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ /*
+ * The rport is not responding. Don't attempt ADISC recovery.
+ * Remove the FCP-2 flag to force a PLOGI.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_disc_start(vport);
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port. The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession. The abort could be originated by the
+ * driver or by the port. The ABTS could have been for an ELS
+ * or FCP IO. The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ uint16_t rpi = 0, vpi = 0;
+ struct lpfc_vport *vport = NULL;
+
+ /* The rpi in the ulpContext is vport-sensitive. */
+ vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+ rpi = iocbq->iocb.ulpContext;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3092 Port generated ABTS async event "
+ "on vpi %d rpi %d status 0x%x\n",
+ vpi, rpi, iocbq->iocb.ulpStatus);
+
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ if (!vport)
+ goto err_exit;
+ ndlp = lpfc_findnode_rpi(vport, rpi);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto err_exit;
+
+ if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+ return;
+
+ err_exit:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3095 Event Context not found, no "
+ "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+ iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+ vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port. The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply. The abort could be originated
+ * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ struct lpfc_vport *vport;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3115 Node Context not found, driver "
+ "ignoring abts err event\n");
+ vport = ndlp->vport;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3116 Port generated FCP XRI ABORT event on "
+ "vpi %d rpi %d xri x%x status 0x%x\n",
+ ndlp->vport->vpi, ndlp->nlp_rpi,
+ bf_get(lpfc_wcqe_xa_xri, axri),
+ bf_get(lpfc_wcqe_xa_status, axri));
+
+ if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
/**
* lpfc_sli_async_event_handler - ASYNC iocb handler function
* @phba: Pointer to HBA context object.
@@ -8192,63 +8419,58 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
{
IOCB_t *icmd;
uint16_t evt_code;
- uint16_t temp;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
uint32_t *iocb_w;
icmd = &iocbq->iocb;
evt_code = icmd->un.asyncstat.evt_code;
- temp = icmd->ulpContext;
- if ((evt_code != ASYNC_TEMP_WARN) &&
- (evt_code != ASYNC_TEMP_SAFE)) {
+ switch (evt_code) {
+ case ASYNC_TEMP_WARN:
+ case ASYNC_TEMP_SAFE:
+ temp_event_data.data = (uint32_t) icmd->ulpContext;
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ if (evt_code == ASYNC_TEMP_WARN) {
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0347 Adapter is very hot, please take "
+ "corrective action. temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ } else {
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0340 Adapter temperature is OK now. "
+ "temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ }
+
+ /* Send temperature change event to applications */
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data), (char *) &temp_event_data,
+ LPFC_NL_VENDOR_ID);
+ break;
+ case ASYNC_STATUS_CN:
+ lpfc_sli_abts_err_handler(phba, iocbq);
+ break;
+ default:
iocb_w = (uint32_t *) icmd;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
" evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
- pring->ringno,
- icmd->un.asyncstat.evt_code,
+ pring->ringno, icmd->un.asyncstat.evt_code,
iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
- return;
- }
- temp_event_data.data = (uint32_t)temp;
- temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
- if (evt_code == ASYNC_TEMP_WARN) {
- temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_TEMP,
- "0347 Adapter is very hot, please take "
- "corrective action. temperature : %d Celsius\n",
- temp);
- }
- if (evt_code == ASYNC_TEMP_SAFE) {
- temp_event_data.event_code = LPFC_NORMAL_TEMP;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_TEMP,
- "0340 Adapter temperature is OK now. "
- "temperature : %d Celsius\n",
- temp);
+ break;
}
-
- /* Send temperature change event to applications */
- shost = lpfc_shost_from_vport(phba->pport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(temp_event_data), (char *) &temp_event_data,
- LPFC_NL_VENDOR_ID);
-
}
@@ -8823,12 +9045,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-
- abort_iocb = NULL;
+ struct lpfc_iocbq *abort_iocb = NULL;
if (irsp->ulpStatus) {
+
+ /*
+ * Assume that the port already completed and returned, or
+ * will return the iocb. Just Log the message.
+ */
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
@@ -8846,68 +9070,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
abort_iocb = phba->sli.iocbq_lookup[abort_context];
- /*
- * If the iocb is not found in Firmware queue the iocb
- * might have completed already. Do not free it again.
- */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
- spin_unlock_irq(&phba->hbalock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- return;
- }
- /* For SLI4 the ulpContext field for abort IOCB
- * holds the iotag of the IOCB being aborted so
- * the local abort_context needs to be reset to
- * match the aborted IOCBs ulpContext.
- */
- if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
- abort_context = abort_iocb->iocb.ulpContext;
- }
-
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb %p "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
- /*
- * make sure we have the right iocbq before taking it
- * off the txcmplq and try to call completion routine.
- */
- if (!abort_iocb ||
- abort_iocb->iocb.ulpContext != abort_context ||
- (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
- spin_unlock_irq(&phba->hbalock);
- else if (phba->sli_rev < LPFC_SLI_REV4) {
- /*
- * leave the SLI4 aborted command on the txcmplq
- * list and the command complete WCQE's XB bit
- * will tell whether the SGL (XRI) can be released
- * immediately or to the aborted SGL list for the
- * following abort XRI from the HBA.
- */
- list_del_init(&abort_iocb->list);
- if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
- abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
- pring->txcmplq_cnt--;
- }
- /* Firmware could still be in progress of DMAing
- * payload, so don't free data buffer till after
- * a hbeat.
- */
- abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
- abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irq(&phba->hbalock);
-
- abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
- abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
- (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- } else
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
}
-
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -9258,6 +9429,14 @@ void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3096 ABORT_XRI_CN completing on xri x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "status 0x%x, reason 0x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4]);
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -9771,7 +9950,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2885 Port Error Detected: "
+ "2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
@@ -10777,6 +10956,9 @@ static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_release *wcqe)
{
+ /* sanity check on queue memory */
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return;
/* Check for the slow-path ELS work queue */
if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@@ -10866,6 +11048,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
uint32_t status, rq_id;
unsigned long iflags;
+ /* sanity check on queue memory */
+ if (unlikely(!hrq) || unlikely(!drq))
+ return workposted;
+
if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
else
@@ -11000,6 +11186,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
+ /* sanity check on queue memory */
+ if (unlikely(!speq))
+ return;
list_for_each_entry(childq, &speq->child_list, list) {
if (childq->queue_id == cqid) {
cq = childq;
@@ -11241,12 +11430,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
return;
}
+ if (unlikely(!phba->sli4_hba.fcp_cq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3146 Fast-path completion queues "
+ "does not exist\n");
+ return;
+ }
cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Fast-path completion queue "
- "does not exist\n");
+ "(%d) does not exist\n", fcp_cqidx);
return;
}
@@ -11417,6 +11612,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
/* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11635,6 +11832,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint16_t dmult;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!eq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -11751,6 +11951,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!cq || !eq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -11933,6 +12136,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!mq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12083,6 +12289,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12151,6 +12360,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
+ wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
@@ -12174,6 +12384,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
{
uint32_t cnt;
+ /* sanity check on queue memory */
+ if (!rq)
+ return;
cnt = lpfc_hbq_defs[qno]->entry_count;
/* Recalc repost for RQs based on buffers initially posted */
@@ -12219,6 +12432,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!hrq || !drq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12420,6 +12636,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!eq)
return -ENODEV;
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12475,6 +12692,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!cq)
return -ENODEV;
mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12528,6 +12746,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!mq)
return -ENODEV;
mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12581,6 +12800,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!wq)
return -ENODEV;
mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12634,6 +12854,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!hrq || !drq)
return -ENODEV;
mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -15252,45 +15473,42 @@ lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
* @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
*
- * This function read region 23 and parse TLV for port status to
- * decide if the user disaled the port. If the TLV indicates the
- * port is disabled, the hba_flag is set accordingly.
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
**/
-void
-lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
{
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
- uint8_t *rgn23_data = NULL;
- uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
int rc;
+ if (!rgn23_data)
+ return 0;
+
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2600 lpfc_sli_read_serdes_param failed to"
- " allocate mailbox memory\n");
- goto out;
+ "2600 failed to allocate mailbox memory\n");
+ return 0;
}
mb = &pmb->u.mb;
- /* Get adapter Region 23 data */
- rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
- if (!rgn23_data)
- goto out;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2601 lpfc_sli_read_link_ste failed to"
- " read config region 23 rc 0x%x Status 0x%x\n",
- rc, mb->mbxStatus);
+ "2601 failed to read config "
+ "region 23, rc 0x%x Status 0x%x\n",
+ rc, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
/*
@@ -15303,13 +15521,96 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
- rgn23_data + offset,
- mb->un.varDmp.word_cnt);
+ rgn23_data + offset,
+ mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
- data_size = offset;
- offset = 0;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+ LPFC_MBOXQ_t *mboxq = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct lpfc_mqe *mqe;
+ uint32_t data_length = 0;
+ int rc;
+
+ if (!rgn23_data)
+ return 0;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3105 failed to allocate mailbox memory\n");
+ return 0;
+ }
+
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+ goto out;
+ mqe = &mboxq->u.mqe;
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc)
+ goto out;
+ data_length = mqe->un.mb_words[5];
+ if (data_length == 0)
+ goto out;
+ if (data_length > DMP_RGN23_SIZE) {
+ data_length = 0;
+ goto out;
+ }
+ lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+ uint8_t *rgn23_data = NULL;
+ uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
+
+ /* Get adapter Region 23 data */
+ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+ if (!rgn23_data)
+ goto out;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+ else {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+ goto out;
+ data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+ }
if (!data_size)
goto out;
@@ -15373,9 +15674,8 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
goto out;
}
}
+
out:
- if (pmb)
- mempool_free(pmb, phba->mbox_mem_pool);
kfree(rgn23_data);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d5cffd8af340..3f266e2c54e0 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -291,7 +291,7 @@ struct lpfc_bmbx {
#define LPFC_RQE_SIZE 8
#define LPFC_EQE_DEF_COUNT 1024
-#define LPFC_CQE_DEF_COUNT 256
+#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
void __iomem *STATUSregaddr;
void __iomem *CTRLregaddr;
void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART 0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
} if_type2;
} u;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b0630e37f1ef..dd044d01a07f 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.27"
+#define LPFC_DRIVER_VERSION "8.3.28"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index cff6ca67415c..0fe188e66000 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -774,10 +774,10 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
return NULL;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ if (port_iterator->load_flag & FC_UNLOADING)
+ continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- if (!(port_iterator->load_flag & FC_UNLOADING))
- lpfc_printf_vlog(port_iterator, KERN_ERR,
- LOG_VPORT,
+ lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 590ce1ef2016..4ceeace80453 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -25,6 +25,7 @@
#include <asm/dma.h>
#include <asm/macints.h>
#include <asm/macintosh.h>
+#include <asm/mac_via.h>
#include <scsi/scsi_host.h>
@@ -149,7 +150,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
do {
if (mep->pdma_regs == NULL) {
- if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+ if (via2_scsi_drq_pending())
return 0;
} else {
if (nubus_readl(mep->pdma_regs) & 0x200)
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index af3a6af97cc7..ea2bde206f7f 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -291,8 +291,7 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW,
- "ncr5380", instance)) {
+ if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c1776406c96..15eefa1d61fd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
adapter->host->sg_tablesize = adapter->sglen;
- /* use HP firmware and bios version encoding */
+ /* use HP firmware and bios version encoding
+ Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+ right 8 bits making them zero. This 0 value was hardcoded to fix
+ sparse warnings. */
if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
sprintf (adapter->fw_version, "%c%d%d.%d%d",
adapter->product_info.fw_version[2],
- adapter->product_info.fw_version[1] >> 8,
+ 0,
adapter->product_info.fw_version[1] & 0x0f,
- adapter->product_info.fw_version[0] >> 8,
+ 0,
adapter->product_info.fw_version[0] & 0x0f);
sprintf (adapter->bios_version, "%c%d%d.%d%d",
adapter->product_info.bios_version[2],
- adapter->product_info.bios_version[1] >> 8,
+ 0,
adapter->product_info.bios_version[1] & 0x0f,
- adapter->product_info.bios_version[0] >> 8,
+ 0,
adapter->product_info.bios_version[0] & 0x0f);
} else {
memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d574fb..e5f416f8042d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.06.12-rc1"
-#define MEGASAS_RELDATE "Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.06.14-rc1"
+#define MEGASAS_RELDATE "Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
/*
* Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
u32 mfiStatus;
u32 last_seq_num;
- struct timer_list io_completion_timer;
struct list_head internal_reset_pending_q;
/* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f9c4f1..8b300be44284 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.06.12-rc1
+ * Version : v00.00.06.14-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -59,14 +59,6 @@
#include "megaraid_sas.h"
/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
- "Complete cmds from IO path, (default=0)");
-
-/*
* Number of sectors per IO command
* Will be set in megasas_init_mfi if user does not provide
*/
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
cmd->frame_count-1, instance->reg_set);
- /*
- * Check if we have pend cmds to be completed
- */
- if (poll_mode_io && atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
return 0;
out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
return -EINVAL;
}
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
-{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
- add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr: Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
- struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
-
- if (atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
-
- /* Restart timer */
- if (poll_mode_io)
- mod_timer(&instance->io_completion_timer,
- jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance)
{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
host = instance->host;
instance->unload = 1;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
}
instance->instancet->enable_intr(instance->reg_set);
-
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
/*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
host = instance->host;
fusion = instance->ctrl_context;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cmd->index;
cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
/*
* The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
megasas_sysfs_set_dbg_lvl);
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
- return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
- const char *buf, size_t count)
-{
- int retval = count;
- int tmp = poll_mode_io;
- int i;
- struct megasas_instance *instance;
-
- if (sscanf(buf, "%u", &poll_mode_io) < 1) {
- printk(KERN_ERR "megasas: could not set poll_mode_io\n");
- retval = -EINVAL;
- }
-
- /*
- * Check if poll_mode_io is already set or is same as previous value
- */
- if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
- goto out;
-
- if (poll_mode_io) {
- /*
- * Start timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance) {
- megasas_start_timer(instance,
- &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- }
- }
- } else {
- /*
- * Delete timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance)
- del_timer_sync(&instance->io_completion_timer);
- }
- }
-
-out:
- return retval;
-}
-
static void
megasas_aen_polling(struct work_struct *work)
{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
kfree(ev);
}
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
- megasas_sysfs_show_poll_mode_io,
- megasas_sysfs_set_poll_mode_io);
-
/**
* megasas_init - Driver load entry point
*/
@@ -5566,11 +5438,6 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_dbg_lvl;
rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- if (rval)
- goto err_dcf_poll_mode_io;
-
- rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
if (rval)
goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@ static int __init megasas_init(void)
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
- driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
err_dcf_dbg_lvl:
driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@ err_pcidrv:
static void __exit megasas_exit(void)
{
driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd688aca..294abb0defa6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
else {
*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
if ((raid->level >= 5) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+ ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8dc1b32918dd..a01f0aa66f20 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.20
+ * mpi2.h Version: 02.00.22
*
* Version History
* ---------------
@@ -69,6 +69,8 @@
* 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
* 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -94,7 +96,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x14)
+#define MPI2_HEADER_VERSION_UNIT (0x16)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -1073,8 +1075,10 @@ typedef struct _MPI2_IEEE_SGE_UNION
#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
/* IEEE Simple Element only */
-#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
/* IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
/****************************************************************************
* IEEE SGE operation Macros
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cfd95b4e3004..3a023dad77a1 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.19
+ * mpi2_cnfg.h Version: 02.00.21
*
* Version History
* ---------------
@@ -140,6 +140,13 @@
* Added SASNotifyPrimitiveMasks field to
* MPI2_CONFIG_PAGE_IOC_7.
* 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up
+ * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
+ * SAS IO Unit Page 4.
+
* --------------------------------------------------------------------------
*/
@@ -904,8 +911,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */
/* defines for IO Unit Page 7 IOCTemperatureUnits field */
#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
@@ -1970,10 +1977,14 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
{
U8 MaxTargetSpinup; /* 0x00 */
U8 SpinupDelay; /* 0x01 */
- U16 Reserved1; /* 0x02 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check the value returned for NumPhys at runtime.
@@ -2321,13 +2332,12 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
-
/* values for SAS Expander Page 1 DiscoveryInfo field */
#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
/****************************************************************************
* SAS Device Config Pages
@@ -2447,6 +2457,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
#define MPI2_SASPHY0_PAGEVERSION (0x03)
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
@@ -2454,12 +2466,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
/* values for SAS PHY Page 0 Flags field */
#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
-/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
-
/* SAS PHY Page 1 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 93d9b6956d05..9a925c07a9ec 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.17
+ * mpi2_ioc.h Version: 02.00.19
*
* Version History
* ---------------
@@ -110,6 +110,13 @@
* Added Temperature Threshold Event.
* Added Host Message Event.
* Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
* --------------------------------------------------------------------------
*/
@@ -578,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
{
U16 TaskTag; /* 0x00 */
U8 ReasonCode; /* 0x02 */
- U8 Reserved1; /* 0x03 */
+ U8 PhysicalPort; /* 0x03 */
U8 ASC; /* 0x04 */
U8 ASCQ; /* 0x05 */
U16 DevHandle; /* 0x06 */
@@ -1366,16 +1373,18 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
/* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MEGARAID)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+#define MPI2_EXT_IMAGE_TYPE_MAX \
+ (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */
@@ -1568,7 +1577,7 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
/* defines for the Feature field */
#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
-#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */
#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1597,14 +1606,14 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
/* Parameter1 indicates desired PCIe link speed using these defines */
-#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */
/* Parameter2 indicates desired PCIe link width using these defines */
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */
/* Parameter3 and Parameter4 are reserved */
/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index bd61a7b60a2b..0601612b875a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -6,7 +6,7 @@
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.05
+ * mpi2_raid.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,10 @@
* 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
* VolumeCreationFlags and marked the old one as obsolete.
* 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+
* --------------------------------------------------------------------------
*/
@@ -176,7 +180,9 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
-
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
/* RAID Volume Creation Structure */
@@ -244,6 +250,23 @@ typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
Mpi2RaidOnlineCapacityExpansion_t,
MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /* 0x00 */
+ U16 CandidateDevHandle; /* 0x02 */
+ U32 Flags; /* 0x04 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+Mpi2RaidCompatibilityInputStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
/* RAID Volume Indicator Structure */
@@ -263,15 +286,45 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 GenericAttributes; /* 0x04 */
+ U32 OEMSpecificAttributes; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+Mpi2RaidCompatibilityResultStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
/* RAID Action Reply ActionData union */
typedef union _MPI2_RAID_ACTION_REPLY_DATA
{
- U32 Word[5];
- MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
- U16 VolDevHandle;
- U8 VolumeState;
- U8 PhysDiskNum;
+ U32 Word[5];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 2a4bceda364b..3cbe677c6886 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.06
+ * mpi2_tool.h Version: 02.00.07
*
* Version History
* ---------------
@@ -25,6 +25,8 @@
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
* 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
* Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
* --------------------------------------------------------------------------
*/
@@ -181,7 +183,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
U8 DevIndex; /* 0x14 */
U8 Action; /* 0x15 */
U8 SGLFlags; /* 0x16 */
- U8 Reserved7; /* 0x17 */
+ U8 Flags; /* 0x17 */
U16 TxDataLength; /* 0x18 */
U16 RxDataLength; /* 0x1A */
U32 Reserved8; /* 0x1C */
@@ -205,6 +207,9 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
/* Toolbox ISTWI Read Write Tool reply message */
typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index beda04a8404b..0b2c95583660 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -57,6 +57,7 @@
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/kthread.h>
#include <linux/aer.h>
#include "mpt2sas_base.h"
@@ -65,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
static int max_queue_depth = -1;
module_param(max_queue_depth, int, 0);
MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -89,19 +92,6 @@ static int disable_discovery = -1;
module_param(disable_discovery, int, 0);
MODULE_PARM_DESC(disable_discovery, " disable discovery ");
-
-/* diag_buffer_enable is bitwise
- * bit 0 set = TRACE
- * bit 1 set = SNAPSHOT
- * bit 2 set = EXTENDED
- *
- * Either bit can be set, or both
- */
-static int diag_buffer_enable;
-module_param(diag_buffer_enable, int, 0);
-MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
- "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
*
@@ -120,10 +110,34 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
ioc->fwfault_debug = mpt2sas_fwfault_debug;
return 0;
}
+
module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
param_get_int, &mpt2sas_fwfault_debug, 0644);
/**
+ * mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt2sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_remove_bus_device(pdev);
+ return 0;
+}
+
+
+/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
* Context: sleep.
@@ -138,6 +152,7 @@ _base_fault_reset_work(struct work_struct *work)
unsigned long flags;
u32 doorbell;
int rc;
+ struct task_struct *p;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery)
@@ -145,6 +160,39 @@ _base_fault_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
+ ioc->name, __func__);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
+ "mpt2sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p)) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ } else {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ }
+
+ return; /* don't rearm timer */
+ }
+
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -1346,7 +1394,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
if (_base_check_enable_msix(ioc) != 0)
goto try_ioapic;
- ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
@@ -1916,6 +1964,10 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
+ case MPT2SAS_INTEL_RAMSDALE_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RAMSDALE_BRANDING);
+ break;
default:
break;
}
@@ -1925,6 +1977,22 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RS25GB008_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25JB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25JB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ break;
default:
break;
}
@@ -2311,8 +2379,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
}
if (ioc->chain_dma_pool)
pci_pool_destroy(ioc->chain_dma_pool);
- }
- if (ioc->chain_lookup) {
free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
ioc->chain_lookup = NULL;
}
@@ -2330,9 +2396,7 @@ static int
_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
struct mpt2sas_facts *facts;
- u32 queue_size, queue_diff;
u16 max_sge_elements;
- u16 num_of_reply_frames;
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
@@ -2359,7 +2423,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
max_request_credit = (max_queue_depth < facts->RequestCredit)
? max_queue_depth : facts->RequestCredit;
else
- max_request_credit = facts->RequestCredit;
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
ioc->hba_queue_depth = max_request_credit;
ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2400,50 +2465,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
}
ioc->chains_needed_per_io = chains_needed_per_io;
- /* reply free queue sizing - taking into account for events */
- num_of_reply_frames = ioc->hba_queue_depth + 32;
-
- /* number of replies frames can't be a multiple of 16 */
- /* decrease number of reply frames by 1 */
- if (!(num_of_reply_frames % 16))
- num_of_reply_frames--;
-
- /* calculate number of reply free queue entries
- * (must be multiple of 16)
- */
-
- /* (we know reply_free_queue_depth is not a multiple of 16) */
- queue_size = num_of_reply_frames;
- queue_size += 16 - (queue_size % 16);
- ioc->reply_free_queue_depth = queue_size;
-
- /* reply descriptor post queue sizing */
- /* this size should be the number of request frames + number of reply
- * frames
- */
-
- queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
- /* round up to 16 byte boundary */
- if (queue_size % 16)
- queue_size += 16 - (queue_size % 16);
-
- /* check against IOC maximum reply post queue depth */
- if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
- queue_diff = queue_size -
- facts->MaxReplyDescriptorPostQueueDepth;
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- /* round queue_diff up to multiple of 16 */
- if (queue_diff % 16)
- queue_diff += 16 - (queue_diff % 16);
-
- /* adjust hba_queue_depth, reply_free_queue_depth,
- * and queue_size
- */
- ioc->hba_queue_depth -= (queue_diff / 2);
- ioc->reply_free_queue_depth -= (queue_diff / 2);
- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+ /* align the reply post queue on the next 16 count boundary */
+ if (!ioc->reply_free_queue_depth % 16)
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+ else
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+ 32 - (ioc->reply_free_queue_depth % 16);
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth = min_t(u16,
+ (facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+ (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+ ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+ ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
}
- ioc->reply_post_queue_depth = queue_size;
+
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
"sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2529,15 +2569,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
"depth(%d)\n", ioc->name, ioc->request,
ioc->scsiio_depth));
- /* loop till the allocation succeeds */
- do {
- sz = ioc->chain_depth * sizeof(struct chain_tracker);
- ioc->chain_pages = get_order(sz);
- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->chain_pages);
- if (ioc->chain_lookup == NULL)
- ioc->chain_depth -= 100;
- } while (ioc->chain_lookup == NULL);
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ioc->request_sz, 16, 0);
if (!ioc->chain_dma_pool) {
@@ -3136,8 +3173,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -3238,8 +3275,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -3746,8 +3783,8 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -4062,7 +4099,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->reply_free[i] = cpu_to_le32(reply_address);
/* initialize reply queues */
- _base_assign_reply_queues(ioc);
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
reply_post_free = (long)ioc->reply_post_free;
@@ -4110,24 +4148,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
if (ioc->is_driver_loading) {
-
-
-
- ioc->wait_for_discovery_to_complete =
- _base_determine_wait_on_discovery(ioc);
- return r; /* scan_start and scan_finished support */
- }
-
-
- if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
- if (ioc->manu_pg10.OEMIdentifier == 0x80) {
+ if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+ == 0x80) {
hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
MFG_PAGE10_HIDE_SSDS_MASK);
if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
ioc->mfg_pg10_hide_flag = hide_flag;
}
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+ return r; /* scan_start and scan_finished support */
}
-
r = _base_send_port_enable(ioc, sleep_flag);
if (r)
return r;
@@ -4206,7 +4237,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
r = mpt2sas_base_map_resources(ioc);
if (r)
- return r;
+ goto out_free_resources;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] =
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 3c3babc7d260..c7459fdc06cc 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "10.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 10
+#define MPT2SAS_DRIVER_VERSION "12.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 12
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -157,20 +157,33 @@
/*
* Intel HBA branding
*/
+#define MPT2SAS_INTEL_RMS25JB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB080"
+#define MPT2SAS_INTEL_RMS25JB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB040"
+#define MPT2SAS_INTEL_RMS25KB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB080"
+#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB040"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
"Intel Integrated RAID Module RMS2LL040"
#define MPT2SAS_INTEL_RS25GB008_BRANDING \
"Intel(R) RAID Controller RS25GB008"
-
+#define MPT2SAS_INTEL_RAMSDALE_BRANDING \
+ "Intel 720 Series SSD"
/*
* Intel HBA SSDIDs
*/
+#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516
+#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
+#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
+#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
-
+#define MPT2SAS_INTEL_RAMSDALE_SSDID 0x3700
/*
* HP HBA branding
@@ -373,6 +386,7 @@ struct _sas_device {
* @percent_complete: resync percent complete
* @direct_io_enabled: Whether direct io to PDs are allowed or not
* @stripe_exponent: X where 2powX is the stripe sz in blocks
+ * @block_exponent: X where 2powX is the block sz in bytes
* @max_lba: Maximum number of LBA in the volume
* @stripe_sz: Stripe Size of the volume
* @device_info: Device info of the volume member disk
@@ -394,6 +408,7 @@ struct _raid_device {
u8 percent_complete;
u8 direct_io_enabled;
u8 stripe_exponent;
+ u8 block_exponent;
u64 max_lba;
u32 stripe_sz;
u32 device_info;
@@ -623,6 +638,7 @@ enum mutex_type {
TM_MUTEX_ON = 1,
};
+typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
/**
* struct MPT2SAS_ADAPTER - per adapter struct
* @list: ioc_list
@@ -665,6 +681,7 @@ enum mutex_type {
* @msix_vector_count: number msix vectors
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -816,6 +833,7 @@ struct MPT2SAS_ADAPTER {
resource_size_t **reply_post_host_index;
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
+ MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index aabcb911706e..7fceb899029e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -818,6 +818,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
#endif
+ init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
case MPI2_FUNCTION_SCSI_IO_REQUEST:
case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
@@ -903,7 +904,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
else
timeout = karg.timeout;
- init_completion(&ioc->ctl_cmds.done);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
timeout*HZ);
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1477,8 +1477,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
mpi_request->ProductSpecific[i] =
cpu_to_le32(ioc->product_specific[buffer_type][i]);
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1821,8 +1821,8 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2095,8 +2095,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6d808c..193e33e28e49 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
static ushort max_sectors = 0xFFFF;
module_param(max_sectors, ushort, 0);
-MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT2SAS_MAX_LUN (16895)
@@ -612,13 +612,17 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
- } else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
- mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
- }
+ } else if (!sas_device->starget) {
+ /* When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
/**
@@ -1007,8 +1011,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->free_chain_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
- ioc->name);
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+ "available\n", ioc->name));
return NULL;
}
chain_req = list_entry(ioc->free_chain_list.next,
@@ -1449,7 +1453,7 @@ _scsih_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_target_priv_data->sas_address);
- if (sas_device)
+ if (sas_device && !sas_target_priv_data->num_luns)
sas_device->starget = NULL;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -1776,11 +1780,9 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
u16 sz;
u8 num_pds, count;
- u64 mb = 1024 * 1024;
- u64 tb_2 = 2 * mb * mb;
- u64 capacity;
- u32 stripe_sz;
- u8 i, stripe_exp;
+ unsigned long stripe_sz, block_sz;
+ u8 stripe_exp, block_exp;
+ u64 dev_max_lba;
if (!ioc->is_warpdrive)
return;
@@ -1844,51 +1846,57 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
vol_pg0->PhysDisk[count].PhysDiskNum);
goto out_error;
}
+ /* Disable direct I/O if member drive lba exceeds 4 bytes */
+ dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
+ if (dev_max_lba >> 32) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+ "disabled for the drive with handle(0x%04x) member"
+ "handle (0x%04x) unsupported max lba 0x%016llx\n",
+ ioc->name, raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (unsigned long long)dev_max_lba);
+ goto out_error;
+ }
+
raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
}
/*
* Assumption for WD: Direct I/O is not supported if the volume is
- * not RAID0, if the stripe size is not 64KB, if the block size is
- * not 512 and if the volume size is >2TB
+ * not RAID0
*/
- if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
- le16_to_cpu(vol_pg0->BlockSize) != 512) {
+ if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
"for the drive with handle(0x%04x): type=%d, "
"s_sz=%uK, blk_size=%u\n", ioc->name,
raid_device->handle, raid_device->volume_type,
- le32_to_cpu(vol_pg0->StripeSize)/2,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
- capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
- (le64_to_cpu(vol_pg0->MaxLBA) + 1);
-
- if (capacity > tb_2) {
+ stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+ stripe_exp = find_first_bit(&stripe_sz, 32);
+ if (stripe_exp == 32) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) since drive sz > 2TB\n",
- ioc->name, raid_device->handle);
+ "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ ioc->name, raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
goto out_error;
}
-
- stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
- stripe_exp = 0;
- for (i = 0; i < 32; i++) {
- if (stripe_sz & 1)
- break;
- stripe_exp++;
- stripe_sz >>= 1;
- }
- if (i == 32) {
+ raid_device->stripe_exponent = stripe_exp;
+ block_sz = le16_to_cpu(vol_pg0->BlockSize);
+ block_exp = find_first_bit(&block_sz, 16);
+ if (block_exp == 16) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ "for the drive with handle(0x%04x) invalid block sz %u\n",
ioc->name, raid_device->handle,
- le32_to_cpu(vol_pg0->StripeSize)/2);
+ le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
- raid_device->stripe_exponent = stripe_exp;
+ raid_device->block_exponent = block_exp;
raid_device->direct_io_enabled = 1;
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
@@ -3804,8 +3812,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
{
u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
u32 stripe_sz, stripe_exp;
- u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
+ u8 num_pds, *cdb_ptr, i;
u8 cdb0 = scmd->cmnd[0];
+ u64 v_llba;
/*
* Try Direct I/O to RAID memeber disks
@@ -3816,15 +3825,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
| cdb_ptr[5])) {
- io_size = scsi_bufflen(scmd) >> 9;
+ io_size = scsi_bufflen(scmd) >>
+ raid_device->block_exponent;
+ i = (cdb0 < READ_16) ? 2 : 6;
/* get virtual lba */
- lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
- &cdb_ptr[6];
- tmp_ptr = (u8 *)&v_lba + 3;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr = *lba_ptr1;
+ v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
if (((u64)v_lba + (u64)io_size - 1) <=
(u32)raid_device->max_lba) {
@@ -3843,11 +3848,39 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request->DevHandle =
cpu_to_le16(raid_device->
pd_handle[column]);
- tmp_ptr = (u8 *)&p_lba + 3;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2 = *tmp_ptr;
+ (*(__be32 *)(&cdb_ptr[i])) =
+ cpu_to_be32(p_lba);
+ /*
+ * WD: To indicate this I/O is directI/O
+ */
+ _scsih_scsi_direct_io_set(ioc, smid, 1);
+ }
+ }
+ } else {
+ io_size = scsi_bufflen(scmd) >>
+ raid_device->block_exponent;
+ /* get virtual lba */
+ v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
+
+ if ((v_llba + (u64)io_size - 1) <=
+ raid_device->max_lba) {
+ stripe_sz = raid_device->stripe_sz;
+ stripe_exp = raid_device->stripe_exponent;
+ stripe_off = (u32) (v_llba & (stripe_sz - 1));
+
+ /* Check whether IO falls within a stripe */
+ if ((stripe_off + io_size) <= stripe_sz) {
+ num_pds = raid_device->num_pds;
+ p_lba = (u32)(v_llba >> stripe_exp);
+ stripe_unit = p_lba / num_pds;
+ column = p_lba % num_pds;
+ p_lba = (stripe_unit << stripe_exp) +
+ stripe_off;
+ mpi_request->DevHandle =
+ cpu_to_le16(raid_device->
+ pd_handle[column]);
+ (*(__be64 *)(&cdb_ptr[2])) =
+ cpu_to_be64((u64)p_lba);
/*
* WD: To indicate this I/O is directI/O
*/
@@ -4335,7 +4368,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -4403,11 +4436,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_NO_CONNECT << 16;
goto out;
}
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
/*
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (_scsih_scsi_direct_io_get(ioc, smid)) {
+ if (_scsih_scsi_direct_io_get(ioc, smid) &&
+ ((ioc_status & MPI2_IOCSTATUS_MASK)
+ != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->scsi_lookup[smid - 1].scmd = scmd;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -4441,7 +4477,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
else
@@ -4485,6 +4520,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_TRANSPORT_DISRUPTED << 16;
goto out;
}
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
scmd->result = DID_RESET << 16;
@@ -6714,6 +6751,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
} else
sas_target_priv_data = NULL;
raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
(unsigned long long)raid_device->wwid);
@@ -6724,16 +6762,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
*/
_scsih_init_warpdrive_properties(ioc, raid_device);
if (raid_device->handle == handle)
- goto out;
+ return;
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
if (sas_target_priv_data)
sas_target_priv_data->handle = handle;
- goto out;
+ return;
}
}
- out:
+
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -7418,7 +7456,7 @@ static struct scsi_host_template scsih_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = MPT2SAS_SG_DEPTH,
- .max_sectors = 8192,
+ .max_sectors = 32767,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mpt2sas_host_attrs,
@@ -7928,6 +7966,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -7958,11 +7997,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
"for max_sectors, range is 64 to 8192. Assigning "
"value of 64.\n", ioc->name, max_sectors);
- } else if (max_sectors > 8192) {
- shost->max_sectors = 8192;
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
"for max_sectors, range is 64 to 8192. Assigning "
- "default value of 8192.\n", ioc->name,
+ "default value of 32767.\n", ioc->name,
max_sectors);
} else {
shost->max_sectors = max_sectors & 0xFFFE;
@@ -8000,7 +8039,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_attach_fail;
}
- scsi_scan_host(shost);
if (ioc->is_warpdrive) {
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
ioc->hide_drives = 0;
@@ -8014,8 +8052,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
} else
ioc->hide_drives = 0;
+ scsi_scan_host(shost);
- _scsih_probe_devices(ioc);
return 0;
out_attach_fail:
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 230732241aa2..831047466a5a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -398,8 +398,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
"send to sas_addr(0x%016llx)\n", ioc->name,
(unsigned long long)sas_address));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1184,8 +1184,8 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
"send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1509,8 +1509,9 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
"send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number,
phy_operation));
- mpt2sas_base_put_smid_default(ioc, smid);
+
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1949,8 +1950,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
"sending smp request\n", ioc->name, __func__));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index f6a50c98c36f..002924963cd8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -59,11 +59,11 @@ MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra2
#define ASYNC_MODE 1
#define ULTRA20M_MODE 2
-static int auto_param = 0; /* default: ON */
+static bool auto_param = 0; /* default: ON */
module_param (auto_param, bool, 0);
MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)");
-static int disc_priv = 1; /* default: OFF */
+static bool disc_priv = 1; /* default: OFF */
module_param (disc_priv, bool, 0);
MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))");
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index ca86721a71b9..b61a753eb896 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -70,7 +70,7 @@ module_param(nsp_burst_mode, int, 0);
MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))");
/* Release IO ports after configuration? */
-static int free_ports = 0;
+static bool free_ports = 0;
module_param(free_ports, bool, 0);
MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 5163edb925cb..ea8a0b47d66d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4105,7 +4105,7 @@ static long pmcraid_chr_ioctl(
hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
if (!hdr) {
- pmcraid_err("faile to allocate memory for ioctl header\n");
+ pmcraid_err("failed to allocate memory for ioctl header\n");
return -ENOMEM;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e931..a2f1b3043dfb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -107,7 +107,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
}
- return -EINVAL;
+ return count;
}
static struct bin_attribute sysfs_fw_dump_attr = {
@@ -387,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
case 3:
if (ha->optrom_state != QLA_SWRITING)
- return -ENOMEM;
+ return -EINVAL;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7068,
@@ -667,7 +667,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7074,
- "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
+ "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
rval, dev, adr, opt, len, buf[8]);
return -EIO;
}
@@ -724,7 +724,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x7075,
- "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+ "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
rval, dev, adr, opt, len);
return -EIO;
}
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!base_vha->flags.online)
+ if (!base_vha->flags.online) {
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
- fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
- else
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
}
static int
@@ -1952,8 +1971,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Queue delete failed.\n");
}
- scsi_host_put(vha->host);
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+ scsi_host_put(vha->host);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8b641a8a0c74..b1d0f936bf2d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -31,6 +31,7 @@ qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
+ ctx->iocbs = 1;
done:
return sp;
}
@@ -102,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
- if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
+ if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -389,6 +390,20 @@ done:
return rval;
}
+inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 2) {
+ iocbs += (dsds - 2) / 5;
+ if ((dsds - 2) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
+
static int
qla2x00_process_ct(struct fc_bsg_job *bsg_job)
{
@@ -489,6 +504,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ct = sp->ctx;
ct->type = SRB_CT_CMD;
ct->name = "bsg_ct";
+ ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
ct->u.bsg_job = bsg_job;
ql_dbg(ql_dbg_user, vha, 0x7016,
@@ -1653,7 +1669,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
}
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c0..7c54624b5b13 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,18 +11,20 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1129 | |
- * | Device Discovery | 0x2083 | |
- * | Queue Command and IO tracing | 0x302e | 0x3008 |
+ * | Module Init and Probe | 0x0116 | 0xfa |
+ * | Mailbox commands | 0x112b | |
+ * | Device Discovery | 0x2084 | |
+ * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
+ * | | | 0x302e |
* | DPC Thread | 0x401c | |
- * | Async Events | 0x5059 | |
- * | Timer Routines | 0x600d | |
- * | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | |
+ * | Async Events | 0x5057 | 0x5052 |
+ * | Timer Routines | 0x6011 | 0x600e,0x600f |
+ * | User Space Interactions | 0x709e | |
+ * | Task Management | 0x803c | 0x8025-0x8026 |
+ * | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb051 | |
+ * | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
@@ -368,7 +370,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
memcpy(iter_reg, ha->fce, ntohl(fcec->size));
- return iter_reg;
+ return (char *)iter_reg + ntohl(fcec->size);
}
static inline void *
@@ -1650,6 +1652,15 @@ qla81xx_fw_dump_failed:
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
+
+static inline int
+ql_mask_match(uint32_t level)
+{
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ return (level & ql2xextended_error_logging) == level;
+}
+
/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
@@ -1664,34 +1675,31 @@ qla81xx_fw_dump_failed:
* msg: The message to be displayed.
*/
void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
- struct pci_dev *pdev = NULL;
-
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if ((level & ql2xextended_error_logging) == level) {
- if (vha != NULL) {
- pdev = vha->hw->pdev;
- /* <module-name> <pci-name> <msg-id>:<host> Message */
- sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id + ql_dbg_offset,
- vha->host_no);
- } else
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- "0000:00:00.0", id + ql_dbg_offset);
-
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- pr_warning("%s", pbuf);
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ pr_warn("%s [%s]-%04x:%ld: %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ vha->host_no, &vaf);
+ } else {
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
}
- va_end(ap);
+ va_end(va);
}
@@ -1710,31 +1718,27 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
if (pdev == NULL)
return;
+ if (!ql_mask_match(level))
+ return;
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if ((level & ql2xextended_error_logging) == level) {
- /* <module-name> <dev-name>:<msg-id> Message */
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id + ql_dbg_offset);
+ va_start(va, fmt);
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- pr_warning("%s", pbuf);
- }
+ vaf.fmt = fmt;
+ vaf.va = &va;
- va_end(ap);
+ /* <module-name> <dev-name>:<msg-id> Message */
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
+ va_end(va);
}
/*
@@ -1751,47 +1755,47 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
- struct pci_dev *pdev = NULL;
-
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if (level <= ql_errlev) {
- if (vha != NULL) {
- pdev = vha->hw->pdev;
- /* <module-name> <msg-id>:<host> Message */
- sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id, vha->host_no);
- } else
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- "0000:00:00.0", id);
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
+ if (level > ql_errlev)
+ return;
- switch (level) {
- case 0: /* FATAL LOG */
- pr_crit("%s", pbuf);
- break;
- case 1:
- pr_err("%s", pbuf);
- break;
- case 2:
- pr_warn("%s", pbuf);
- break;
- default:
- pr_info("%s", pbuf);
- break;
- }
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case 1:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case 2:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
}
- va_end(ap);
+ va_end(va);
}
/*
@@ -1809,43 +1813,44 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
if (pdev == NULL)
return;
+ if (level > ql_errlev)
+ return;
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if (level <= ql_errlev) {
- /* <module-name> <dev-name>:<msg-id> Message */
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id);
-
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- switch (level) {
- case 0: /* FATAL LOG */
- pr_crit("%s", pbuf);
- break;
- case 1:
- pr_err("%s", pbuf);
- break;
- case 2:
- pr_warn("%s", pbuf);
- break;
- default:
- pr_info("%s", pbuf);
- break;
- }
+ /* <module-name> <dev-name>:<msg-id> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id);
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case 1:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case 2:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
}
- va_end(ap);
+ va_end(va);
}
void
@@ -1858,20 +1863,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
uint16_t __iomem *mbx_reg;
- if ((level & ql2xextended_error_logging) == level) {
-
- if (IS_QLA82XX(ha))
- mbx_reg = &reg82->mailbox_in[0];
- else if (IS_FWI2_CAPABLE(ha))
- mbx_reg = &reg24->mailbox0;
- else
- mbx_reg = MAILBOX_REG(ha, reg, 0);
+ if (!ql_mask_match(level))
+ return;
- ql_dbg(level, vha, id, "Mailbox registers:\n");
- for (i = 0; i < 6; i++)
- ql_dbg(level, vha, id,
- "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
- }
+ if (IS_QLA82XX(ha))
+ mbx_reg = &reg82->mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha))
+ mbx_reg = &reg24->mailbox0;
+ else
+ mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+ ql_dbg(level, vha, id, "Mailbox registers:\n");
+ for (i = 0; i < 6; i++)
+ ql_dbg(level, vha, id,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
}
@@ -1881,24 +1886,25 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
{
uint32_t cnt;
uint8_t c;
- if ((level & ql2xextended_error_logging) == level) {
-
- ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
- "9 Ah Bh Ch Dh Eh Fh\n");
- ql_dbg(level, vha, id, "----------------------------------"
- "----------------------------\n");
-
- ql_dbg(level, vha, id, "");
- for (cnt = 0; cnt < size;) {
- c = *b++;
- printk("%02x", (uint32_t) c);
- cnt++;
- if (!(cnt % 16))
- printk("\n");
- else
- printk(" ");
- }
- if (cnt % 16)
- ql_dbg(level, vha, id, "\n");
+
+ if (!ql_mask_match(level))
+ return;
+
+ ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
+ "9 Ah Bh Ch Dh Eh Fh\n");
+ ql_dbg(level, vha, id, "----------------------------------"
+ "----------------------------\n");
+
+ ql_dbg(level, vha, id, " ");
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ printk("%02x", (uint32_t) c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
}
+ if (cnt % 16)
+ ql_dbg(level, vha, id, "\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 98a377b99017..5f1b6d9c3dcb 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -232,6 +232,7 @@ struct qla2xxx_fw_dump {
};
#define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK 0x1e400000
#define ql_log_fatal 0 /* display fatal errors */
#define ql_log_warn 1 /* display critical errors */
@@ -244,15 +245,15 @@ struct qla2xxx_fw_dump {
extern int ql_errlev;
-void
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
-void
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
@@ -275,5 +276,3 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
-
-#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index fcf052c50bf5..a6a4eebce4a8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -271,6 +271,7 @@ struct srb_iocb {
struct srb_ctx {
uint16_t type;
char *name;
+ int iocbs;
union {
struct srb_iocb *iocb_cmd;
struct fc_bsg_job *bsg_job;
@@ -2244,6 +2245,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
+ int (*iospace_config)(struct qla_hw_data*);
};
/* MSI-X Support *************************************************************/
@@ -2978,10 +2980,6 @@ typedef struct scsi_qla_host {
atomic_dec(&__vha->vref_count); \
} while (0)
-
-#define qla_printk(level, ha, format, arg...) \
- dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
-
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9e..408679be8fdf 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -572,12 +572,13 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
size_t, char *);
extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
-extern void qla82xx_start_iocbs(srb_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 37937aa3c3b8..4aea4ae23300 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -758,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
"GA_NXT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
- ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
"GA_NXT failed, rejected request ga_nxt_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
sns_cmd->p.gan_data, 16);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f1877..1fa067e053d2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -111,6 +111,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
+ ctx->iocbs = 1;
ctx->u.iocb_cmd = iocb;
iocb->free = qla2x00_ctx_sp_free;
@@ -154,8 +155,8 @@ qla2x00_async_iocb_timeout(srb_t *sp)
struct srb_ctx *ctx = sp->ctx;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - portid=%02x%02x%02x.\n",
- ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+ "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
+ ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
@@ -211,9 +212,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, fcport->login_retry);
+ "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
return rval;
done_free_sp:
@@ -258,9 +260,9 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -308,9 +310,9 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -360,9 +362,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
goto done_free_sp;
ql_dbg(ql_dbg_taskm, vha, 0x802f,
- "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -514,7 +516,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
- ql_log(ql_log_info, vha, 0x0040,
+ ql_dbg(ql_dbg_init, vha, 0x0040,
"Configuring PCI space...\n");
rval = ha->isp_ops->pci_config(vha);
if (rval) {
@@ -533,7 +535,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
ha->isp_ops->get_flash_version(vha, req->ring);
- ql_log(ql_log_info, vha, 0x0061,
+ ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
ha->isp_ops->nvram_config(vha);
@@ -550,7 +552,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
- ql_log(ql_log_info, vha, 0x0078,
+ ql_dbg(ql_dbg_init, vha, 0x0078,
"Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
@@ -1294,7 +1296,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
ha->flags.fce_enabled = 0;
goto try_eft;
}
- ql_log(ql_log_info, vha, 0x00c0,
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
@@ -1321,7 +1323,7 @@ try_eft:
tc_dma);
goto cont_alloc;
}
- ql_log(ql_log_info, vha, 0x00c3,
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
eft_size = EFT_SIZE;
@@ -1358,7 +1360,7 @@ cont_alloc:
}
return;
}
- ql_log(ql_log_info, vha, 0x00c5,
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
"Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
ha->fw_dump_len = dump_size;
@@ -1509,7 +1511,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
+ if (!fw_major_version && ql2xallocfwdump
+ && !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
}
} else {
@@ -1928,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn,
- vha, 0x8026,
+ vha, 0x8007,
"Init chip failed.\n");
break;
}
@@ -1937,7 +1940,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
cs84xx_time = jiffies - cs84xx_time;
wtime += cs84xx_time;
mtime += cs84xx_time;
- ql_dbg(ql_dbg_taskm, vha, 0x8025,
+ ql_dbg(ql_dbg_taskm, vha, 0x8008,
"Increasing wait time by %ld. "
"New time %ld.\n", cs84xx_time,
wtime);
@@ -1980,16 +1983,13 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Delay for a while */
msleep(500);
-
- ql_dbg(ql_dbg_taskm, vha, 0x8039,
- "fw_state=%x curr time=%lx.\n", state[0], jiffies);
} while (1);
ql_dbg(ql_dbg_taskm, vha, 0x803a,
"fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
state[1], state[2], state[3], state[4], jiffies);
- if (rval) {
+ if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
ql_log(ql_log_warn, vha, 0x803b,
"Firmware ready **** FAILED ****.\n");
}
@@ -2385,7 +2385,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
* internal driver logging.
*/
if (nv->host_p[0] & BIT_7)
- ql2xextended_error_logging = 0x7fffffff;
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
/* Always load RISC code on non ISP2[12]00 chips. */
if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -4187,7 +4187,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
+ ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+ __func__);
}
return(status);
@@ -4637,7 +4638,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
struct req_que *req = ha->req_q_map[0];
ql_dbg(ql_dbg_init, vha, 0x008b,
- "Loading firmware from flash (%x).\n", faddr);
+ "FW: Loading firmware from flash (%x).\n", faddr);
rval = QLA_SUCCESS;
@@ -4835,8 +4836,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- ql_log(ql_log_info, vha, 0x0092,
- "Loading via request-firmware.\n");
+ ql_dbg(ql_dbg_init, vha, 0x0092,
+ "FW: Loading via request-firmware.\n");
rval = QLA_SUCCESS;
@@ -5424,7 +5425,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- ql_log(ql_log_info, vha, 0x803d,
+ ql_log(ql_log_info, vha, 0x8000,
"Configure loop done, status = 0x%x.\n", status);
}
@@ -5457,7 +5458,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- ql_log(ql_log_warn, vha, 0x803e,
+ ql_log(ql_log_warn, vha, 0x8001,
"Unable to reinitialize FCE (%d).\n",
rval);
ha->flags.fce_enabled = 0;
@@ -5469,7 +5470,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- ql_log(ql_log_warn, vha, 0x803f,
+ ql_log(ql_log_warn, vha, 0x8010,
"Unable to reinitialize EFT (%d).\n",
rval);
}
@@ -5477,7 +5478,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
}
if (!status) {
- ql_dbg(ql_dbg_taskm, vha, 0x8040,
+ ql_dbg(ql_dbg_taskm, vha, 0x8011,
"qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -5495,7 +5496,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- ql_log(ql_log_warn, vha, 0x8041,
+ ql_log(ql_log_warn, vha, 0x8016,
"qla82xx_restart_isp **** FAILED ****.\n");
}
@@ -5642,13 +5643,26 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (priority < 0)
return QLA_FUNCTION_FAILED;
+ if (IS_QLA82XX(vha->hw)) {
+ fcport->fcp_prio = priority & 0xf;
+ return QLA_SUCCESS;
+ }
+
ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
- if (ret == QLA_SUCCESS)
- fcport->fcp_prio = priority;
- else
+ if (ret == QLA_SUCCESS) {
+ if (fcport->fcp_prio != priority)
+ ql_dbg(ql_dbg_user, vha, 0x709e,
+ "Updated FCP_CMND priority - value=%d loop_id=%d "
+ "port_id=%02x%02x%02x.\n", priority,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ fcport->fcp_prio = priority & 0xf;
+ } else
ql_dbg(ql_dbg_user, vha, 0x704f,
- "Unable to activate fcp priority, ret=0x%x.\n", ret);
-
+ "Unable to update FCP_CMND priority - ret=0x%x for "
+ "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0f..55a96761b5a4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,6 @@
#include <scsi/scsi_tcq.h>
-static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
-
static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -120,11 +118,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
- struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -292,7 +289,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -469,6 +466,42 @@ queuing_error:
}
/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+static void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+
+ if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(vha);
+ } else {
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
+ }
+}
+
+/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
* @ha: HA context
* @loop_id: loop ID
@@ -490,6 +523,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
+ req = ha->req_q_map[0];
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
@@ -516,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
wmb();
- qla2x00_isp_cmd(vha, req);
+ qla2x00_start_iocbs(vha, req);
return (QLA_SUCCESS);
}
@@ -537,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
/**
- * qla2x00_isp_cmd() - Modify the request ring pointer.
- * @ha: HA context
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
*
- * Note: The caller must hold the hardware lock before calling this routine.
+ * Returns the number of IOCB entries needed to store @dsds.
*/
-static void
-qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
+inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
- struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ uint16_t iocbs;
- ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
- "IOCB data:\n");
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
- (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
+ iocbs = 1;
+ if (dsds > 1) {
+ iocbs += (dsds - 1) / 5;
+ if ((dsds - 1) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
- /* Set chip new ring index. */
- if (IS_QLA82XX(ha)) {
- uint32_t dbval = 0x04 | (ha->portnum << 5);
+ cmd = sp->cmd;
- /* write, read and verify logic */
- dbval = dbval | (req->id << 8) | (req->ring_index << 16);
- if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
- else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD((unsigned long __iomem *)
- ha->nxdb_wr_ptr, dbval);
- wmb();
- }
- }
- } else if (ha->mqenable) {
- /* Set chip new ring index. */
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else {
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = sp->ctx;
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
- req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(dsd_list_len);
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg = sg_next(cur_seg);
+ avail_dsds--;
}
}
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
}
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
*
* @dsds: number of data segment decriptors needed
*
- * Returns the number of IOCB entries needed to store @dsds.
+ * Returns the number of dsd list needed to store @dsds.
*/
inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+qla24xx_calc_dsd_lists(uint16_t dsds)
{
- uint16_t iocbs;
+ uint16_t dsd_lists = 0;
- iocbs = 1;
- if (dsds > 1) {
- iocbs += (dsds - 1) / 5;
- if ((dsds - 1) % 5)
- iocbs++;
- }
- return iocbs;
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
}
+
/**
* qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
* IOCB types.
@@ -684,7 +769,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -946,6 +1031,7 @@ alloc_and_fill:
*cur_dsd++ = 0;
return 0;
}
+
static int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds)
@@ -1005,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+ i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
sp->cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -1732,6 +1818,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
+ struct srb_ctx *ctx;
pkt = NULL;
req_cnt = 1;
@@ -1760,6 +1847,12 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
+ /* Adjust entry-counts as needed. */
+ if (sp->ctx) {
+ ctx = sp->ctx;
+ req_cnt = ctx->iocbs;
+ }
+
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
@@ -1794,42 +1887,6 @@ queuing_error:
}
static void
-qla2x00_start_iocbs(srb_t *sp)
-{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct req_que *req = ha->req_q_map[0];
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
-
- if (IS_QLA82XX(ha)) {
- qla82xx_start_iocbs(sp);
- } else {
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- /* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else if (IS_QLA82XX(ha)) {
- qla82xx_start_iocbs(sp);
- } else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
- } else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
- req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
- }
- }
-}
-
-static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
struct srb_ctx *ctx = sp->ctx;
@@ -2070,7 +2127,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2096,6 +2154,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
@@ -2141,7 +2200,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2158,6 +2218,381 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->entry_count = entry_count;
}
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ char tag[2];
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = sp->cmd;
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x300d,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x300e,
+ "Failed to allocate memory for dsd_dma "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x300f,
+ "Failed to allocate memory for dsd_addr "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!sp->ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 "
+ "for cmd=%p.\n", cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->ctx) {
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -2194,8 +2629,8 @@ qla2x00_start_sp(srb_t *sp)
break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_ct_iocb(sp, pkt) :
- qla2x00_ct_iocb(sp, pkt);
+ qla24xx_ct_iocb(sp, pkt) :
+ qla2x00_ct_iocb(sp, pkt);
break;
case SRB_ADISC_CMD:
IS_FWI2_CAPABLE(ha) ?
@@ -2210,7 +2645,7 @@ qla2x00_start_sp(srb_t *sp)
}
wmb();
- qla2x00_start_iocbs(sp);
+ qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aeea..e804585cc59c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -242,32 +242,34 @@ static void
qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
+ uint32_t mboxes;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
- if (cnt == 4 || cnt == 5)
+ if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
- else
+ else if (mboxes & BIT_0)
ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
wptr++;
- }
-
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x5000,
- "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x5001,
- "MBX pointer ERROR.\n");
+ mboxes >>= 1;
}
}
@@ -298,7 +300,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
return;
ql_dbg(ql_dbg_async, vha, 0x5022,
- "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+ "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
vha->host_no, event[aen & 0xff], timeout);
rval = qla2x00_post_idc_ack_work(vha, mb);
@@ -453,7 +455,7 @@ skip_rio:
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- ql_log(ql_log_info, vha, 0x5009,
+ ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -487,7 +489,7 @@ skip_rio:
ha->link_data_rate = mb[1];
}
- ql_log(ql_log_info, vha, 0x500a,
+ ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n", link_speed);
vha->flags.management_server_logged_in = 0;
@@ -497,7 +499,7 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
- ql_log(ql_log_info, vha, 0x500b,
+ ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
@@ -519,7 +521,7 @@ skip_rio:
break;
case MBA_LIP_RESET: /* LIP reset occurred */
- ql_log(ql_log_info, vha, 0x500c,
+ ql_dbg(ql_dbg_async, vha, 0x500c,
"LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -587,7 +589,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- ql_log(ql_log_info, vha, 0x500f,
+ ql_dbg(ql_dbg_async, vha, 0x500f,
"Configuration change detected: value=%x.\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -920,15 +922,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
ql_dbg(ql_dbg_async, vha, 0x5043,
- "Async-%s error entry - portid=%02x%02x%02x "
+ "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
"entry-status=%x status=%x state-flag=%x "
- "status-flags=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ "status-flags=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mbx->entry_status,
le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
le16_to_cpu(mbx->status_flags));
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
(uint8_t *)mbx, sizeof(*mbx));
goto logio_done;
@@ -940,9 +942,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_async, vha, 0x5045,
- "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -968,11 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_log(ql_log_warn, vha, 0x5046,
- "Async-%s failed - portid=%02x%02x%02x status=%x "
- "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
- type, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
- le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+ "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
le16_to_cpu(mbx->mb7));
@@ -1036,7 +1038,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
bsg_job->reply->result = DID_OK << 16;
@@ -1111,9 +1113,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
ql_log(ql_log_info, vha, 0x503f,
- "ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
- type, comp_status, fw_status[1], fw_status[2],
+ type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
@@ -1121,9 +1123,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
ql_log(ql_log_info, vha, 0x5040,
- "ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
- type, comp_status,
+ type, sp->handle, comp_status,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_1),
le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1184,11 +1186,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, vha, 0x5034,
- "Async-%s error entry - "
+ "Async-%s error entry - hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, logio->entry_status);
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ logio->entry_status);
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
(uint8_t *)logio, sizeof(*logio));
goto logio_done;
@@ -1196,10 +1199,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, vha, 0x5036,
- "Async-%s complete - portid=%02x%02x%02x "
- "iop0=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa,
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
@@ -1238,9 +1240,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, vha, 0x5037,
- "Async-%s failed - portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n",
- type, fcport->d_id.b.domain,
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -1274,25 +1275,25 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (sts->entry_status) {
ql_log(ql_log_warn, vha, 0x5038,
- "Async-%s error - entry-status(%x).\n",
- type, sts->entry_status);
+ "Async-%s error - hdl=%x entry-status(%x).\n",
+ type, sp->handle, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, vha, 0x5039,
- "Async-%s error - completion status(%x).\n",
- type, sts->comp_status);
+ "Async-%s error - hdl=%x completion status(%x).\n",
+ type, sp->handle, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
ql_log(ql_log_warn, vha, 0x503a,
- "Async-%s error - no response info(%x).\n",
- type, sts->scsi_status);
+ "Async-%s error - hdl=%x no response info(%x).\n",
+ type, sp->handle, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, vha, 0x503b,
- "Async-%s error - not enough response(%d).\n",
- type, sts->rsp_data_len);
+ "Async-%s error - hdl=%x not enough response(%d).\n",
+ type, sp->handle, sts->rsp_data_len);
} else if (sts->data[3]) {
ql_log(ql_log_warn, vha, 0x503c,
- "Async-%s error - response(%x).\n",
- type, sts->data[3]);
+ "Async-%s error - hdl=%x response(%x).\n",
+ type, sp->handle, sts->data[3]);
} else {
error = 0;
}
@@ -1337,9 +1338,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
if (pkt->entry_status != 0) {
- ql_log(ql_log_warn, vha, 0x5035,
- "Process error entry.\n");
-
qla2x00_error_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
@@ -1391,7 +1389,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
static inline void
-
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp)
{
@@ -1413,13 +1410,14 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sp->request_sense_length != 0)
rsp->status_srb = sp;
- ql_dbg(ql_dbg_io, vha, 0x301c,
- "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
- cp->device->lun, cp);
- if (sense_len)
+ if (sense_len) {
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+ "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
cp->sense_buffer, sense_len);
+ }
}
struct scsi_dif_tuple {
@@ -1506,7 +1504,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
}
if (k != blocks_done) {
- qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+ ql_log(ql_log_warn, vha, 0x302f,
"unexpected tag values tag:lba=%x:%llx)\n",
e_ref_tag, (unsigned long long)lba_s);
return 1;
@@ -1611,7 +1609,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- ql_log(ql_log_warn, vha, 0x3017,
+ ql_dbg(ql_dbg_io, vha, 0x3017,
"Invalid status handle (0x%x).\n", sts->handle);
if (IS_QLA82XX(ha))
@@ -1623,7 +1621,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- ql_log(ql_log_warn, vha, 0x3018,
+ ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
sts->handle, sp);
@@ -1670,7 +1668,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- ql_log(ql_log_warn, vha, 0x3019,
+ ql_dbg(ql_dbg_io, vha, 0x3019,
"FCP I/O protocol failure (0x%x/0x%x).\n",
rsp_info_len, rsp_info[3]);
@@ -1701,7 +1699,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_log(ql_log_warn, vha, 0x301a,
+ ql_dbg(ql_dbg_io, vha, 0x301a,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1713,7 +1711,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_log(ql_log_warn, vha, 0x301b,
+ ql_dbg(ql_dbg_io, vha, 0x301b,
"QUEUE FULL detected.\n");
break;
}
@@ -1735,19 +1733,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_log(ql_log_warn, vha, 0x301d,
+ ql_dbg(ql_dbg_io, vha, 0x301d,
"Dropped frame(s) detected "
"(0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
- break;
+ goto check_scsi_status;
}
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_log(ql_log_warn, vha, 0x301e,
+ ql_dbg(ql_dbg_io, vha, 0x301e,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1756,7 +1754,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
}
} else {
- ql_log(ql_log_warn, vha, 0x301f,
+ ql_dbg(ql_dbg_io, vha, 0x301f,
"Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", resid, scsi_bufflen(cp));
@@ -1774,7 +1772,7 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_log(ql_log_warn, vha, 0x3020,
+ ql_dbg(ql_dbg_io, vha, 0x3020,
"QUEUE FULL detected.\n");
logit = 1;
break;
@@ -1838,10 +1836,15 @@ out:
if (logit)
ql_dbg(ql_dbg_io, vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) "
- "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+ "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
+ "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
- comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
- cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ comp_status, scsi_status, cp->result, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+ cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
+ cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
+ cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
@@ -1899,6 +1902,45 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
}
}
+static int
+qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx;
+
+ if (!sp->ctx)
+ return 1;
+
+ ctx = sp->ctx;
+
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD ||
+ ctx->type == SRB_TM_CMD) {
+ ctx->u.iocb_cmd->done(sp);
+ return 0;
+ } else if (ctx->type == SRB_ADISC_CMD) {
+ ctx->u.iocb_cmd->free(sp);
+ return 0;
+ } else {
+ struct fc_bsg_job *bsg_job;
+
+ bsg_job = ctx->u.bsg_job;
+ if (ctx->type == SRB_ELS_CMD_HST ||
+ ctx->type == SRB_CT_CMD)
+ kfree(sp->fcport);
+
+ bsg_job->reply->reply_data.ctels_reply.status =
+ FC_CTELS_STATUS_OK;
+ bsg_job->reply->result = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ bsg_job->job_done(bsg_job);
+ return 0;
+ }
+ return 1;
+}
+
/**
* qla2x00_error_entry() - Process an error entry.
* @ha: SCSI driver HA context
@@ -1909,7 +1951,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
- uint32_t handle = LSW(pkt->handle);
+ const char func[] = "ERROR-IOCB";
uint16_t que = MSW(pkt->handle);
struct req_que *req = ha->req_q_map[que];
@@ -1932,28 +1974,20 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
ql_dbg(ql_dbg_async, vha, 0x502f,
"UNKNOWN flag error.\n");
- /* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS)
- sp = req->outstanding_cmds[handle];
- else
- sp = NULL;
-
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[handle] = NULL;
-
- /* Bad payload or header */
- if (pkt->entry_status &
- (RF_INV_E_ORDER | RF_INV_E_COUNT |
- RF_INV_E_PARAM | RF_INV_E_TYPE)) {
- sp->cmd->result = DID_ERROR << 16;
- } else if (pkt->entry_status & RF_BUSY) {
- sp->cmd->result = DID_BUS_BUSY << 16;
- } else {
- sp->cmd->result = DID_ERROR << 16;
+ if (qla2x00_free_sp_ctx(vha, sp)) {
+ if (pkt->entry_status &
+ (RF_INV_E_ORDER | RF_INV_E_COUNT |
+ RF_INV_E_PARAM | RF_INV_E_TYPE)) {
+ sp->cmd->result = DID_ERROR << 16;
+ } else if (pkt->entry_status & RF_BUSY) {
+ sp->cmd->result = DID_BUS_BUSY << 16;
+ } else {
+ sp->cmd->result = DID_ERROR << 16;
+ }
+ qla2x00_sp_compl(ha, sp);
}
- qla2x00_sp_compl(ha, sp);
-
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
|| pkt->entry_type == COMMAND_TYPE_6) {
@@ -1977,26 +2011,30 @@ static void
qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
+ uint32_t mboxes;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
wptr = (uint16_t __iomem *)&reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
- wptr++;
- }
+ if (mboxes & BIT_0)
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x504d,
- "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x504e,
- "MBX pointer ERROR.\n");
+ mboxes >>= 1;
+ wptr++;
}
}
@@ -2025,9 +2063,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- ql_dbg(ql_dbg_async, vha, 0x5029,
- "Process error entry.\n");
-
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac2..34344d3f8658 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, base_vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
+ return QLA_FUNCTION_TIMEOUT;
}
/*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101c,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101e,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort.\n");
@@ -2870,7 +2887,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
- if (MSB(stat) == 1) {
+ if (MSB(stat) != 0) {
ql_dbg(ql_dbg_mbx, vha, 0x10ba,
"Could not acquire ID for VP[%d].\n", vp_idx);
return;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce4..1cd46cd7ff90 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
"!= Read crbwin (0x%x), off=0x%lx.\n",
- ha->crb_win, win_read, *off);
+ __func__, ha->crb_win, win_read, *off);
}
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
@@ -409,7 +409,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
}
/* strange address given */
ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%x: Warning: unm_nic_pci_set_crbwindow "
+ "%s: Warning: unm_nic_pci_set_crbwindow "
"called with an unknown address(%llx).\n",
QLA2XXX_DRIVER_NAME, off);
return off;
@@ -1055,7 +1055,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
"ROM lock failed.\n");
return -1;
}
- return 0;;
+ return 0;
}
static int
@@ -1711,12 +1711,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- ha->nx_pcibase, ha->iobase,
+ (void *)ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- ha->nx_pcibase, ha->iobase,
+ (void *)ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
return 0;
@@ -1744,7 +1744,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
ret = pci_set_mwi(ha->pdev);
ha->chip_revision = ha->pdev->revision;
ql_dbg(ql_dbg_init, vha, 0x0043,
- "Chip revision:%ld.\n",
+ "Chip revision:%d.\n",
ha->chip_revision);
return 0;
}
@@ -2023,13 +2023,9 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
wptr++;
}
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x5052,
- "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
+ if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x5053,
"MBX pointer ERROR.\n");
- }
}
/*
@@ -2543,484 +2539,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static inline int
-qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
- uint16_t tot_dsds)
-{
- uint32_t *cur_dsd = NULL;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
- struct scsi_cmnd *cmd;
- struct scatterlist *cur_seg;
- uint32_t *dsd_seg;
- void *next_dsd;
- uint8_t avail_dsds;
- uint8_t first_iocb = 1;
- uint32_t dsd_list_len;
- struct dsd_dma *dsd_ptr;
- struct ct6_dsd *ctx;
-
- cmd = sp->cmd;
-
- /* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE_6);
-
- /* No data transfer */
- if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
- return 0;
- }
-
- vha = sp->fcport->vha;
- ha = vha->hw;
-
- /* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_WRITE_DATA);
- ha->qla_stats.output_bytes += scsi_bufflen(cmd);
- } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_READ_DATA);
- ha->qla_stats.input_bytes += scsi_bufflen(cmd);
- }
-
- cur_seg = scsi_sglist(cmd);
- ctx = sp->ctx;
-
- while (tot_dsds) {
- avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
- QLA_DSDS_PER_IOCB : tot_dsds;
- tot_dsds -= avail_dsds;
- dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
-
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
- next_dsd = dsd_ptr->dsd_addr;
- list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
- list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
- ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
-
- if (first_iocb) {
- first_iocb = 0;
- dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
- *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(dsd_list_len);
- }
- cur_dsd = (uint32_t *)next_dsd;
- while (avail_dsds) {
- dma_addr_t sle_dma;
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- cur_seg = sg_next(cur_seg);
- avail_dsds--;
- }
- }
-
- /* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
- return 0;
-}
-
-/*
- * qla82xx_calc_dsd_lists() - Determine number of DSD list required
- * for Command Type 6.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of dsd list needed to store @dsds.
- */
-inline uint16_t
-qla82xx_calc_dsd_lists(uint16_t dsds)
-{
- uint16_t dsd_lists = 0;
-
- dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
- if (dsds % QLA_DSDS_PER_IOCB)
- dsd_lists++;
- return dsd_lists;
-}
-
-/*
- * qla82xx_start_scsi() - Send a SCSI command to the ISP
- * @sp: command to send to the ISP
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-int
-qla82xx_start_scsi(srb_t *sp)
-{
- int ret, nseg;
- unsigned long flags;
- struct scsi_cmnd *cmd;
- uint32_t *clr_ptr;
- uint32_t index;
- uint32_t handle;
- uint16_t cnt;
- uint16_t req_cnt;
- uint16_t tot_dsds;
- struct device_reg_82xx __iomem *reg;
- uint32_t dbval;
- uint32_t *fcp_dl;
- uint8_t additional_cdb_len;
- struct ct6_dsd *ctx;
- struct scsi_qla_host *vha = sp->fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = NULL;
- struct rsp_que *rsp = NULL;
- char tag[2];
-
- /* Setup device pointers. */
- ret = 0;
- reg = &ha->iobase->isp82;
- cmd = sp->cmd;
- req = vha->req;
- rsp = ha->rsp_q_map[0];
-
- /* So we know we haven't pci_map'ed anything yet */
- tot_dsds = 0;
-
- dbval = 0x04 | (ha->portnum << 5);
-
- /* Send marker if required */
- if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x300c,
- "qla2x00_marker failed for cmd=%p.\n", cmd);
- return QLA_FUNCTION_FAILED;
- }
- vha->marker_needed = 0;
- }
-
- /* Acquire ring specific lock */
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
- handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == MAX_OUTSTANDING_COMMANDS)
- goto queuing_error;
-
- /* Map the sg table so we have an accurate count of sg entries needed */
- if (scsi_sg_count(cmd)) {
- nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), cmd->sc_data_direction);
- if (unlikely(!nseg))
- goto queuing_error;
- } else
- nseg = 0;
-
- tot_dsds = nseg;
-
- if (tot_dsds > ql2xshiftctondsd) {
- struct cmd_type_6 *cmd_pkt;
- uint16_t more_dsd_lists = 0;
- struct dsd_dma *dsd_ptr;
- uint16_t i;
-
- more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
- ql_dbg(ql_dbg_io, vha, 0x300d,
- "Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
- cmd);
- goto queuing_error;
- }
-
- if (more_dsd_lists <= ha->gbl_dsd_avail)
- goto sufficient_dsds;
- else
- more_dsd_lists -= ha->gbl_dsd_avail;
-
- for (i = 0; i < more_dsd_lists; i++) {
- dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
- if (!dsd_ptr) {
- ql_log(ql_log_fatal, vha, 0x300e,
- "Failed to allocate memory for dsd_dma "
- "for cmd=%p.\n", cmd);
- goto queuing_error;
- }
-
- dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
- GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
- if (!dsd_ptr->dsd_addr) {
- kfree(dsd_ptr);
- ql_log(ql_log_fatal, vha, 0x300f,
- "Failed to allocate memory for dsd_addr "
- "for cmd=%p.\n", cmd);
- goto queuing_error;
- }
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
- }
-
-sufficient_dsds:
- req_cnt = 1;
-
- if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
- &reg->req_q_out[0]);
- if (req->ring_index < cnt)
- req->cnt = cnt - req->ring_index;
- else
- req->cnt = req->length -
- (req->ring_index - cnt);
- }
-
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
- ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!sp->ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
- memset(ctx, 0, sizeof(struct ct6_dsd));
- ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
- GFP_ATOMIC, &ctx->fcp_cmnd_dma);
- if (!ctx->fcp_cmnd) {
- ql_log(ql_log_fatal, vha, 0x3011,
- "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
- goto queuing_error_fcp_cmnd;
- }
-
- /* Initialize the DSD list and dma handle */
- INIT_LIST_HEAD(&ctx->dsd_list);
- ctx->dsd_use_cnt = 0;
-
- if (cmd->cmd_len > 16) {
- additional_cdb_len = cmd->cmd_len - 16;
- if ((cmd->cmd_len % 4) != 0) {
- /* SCSI command bigger than 16 bytes must be
- * multiple of 4
- */
- ql_log(ql_log_warn, vha, 0x3012,
- "scsi cmd len %d not multiple of 4 "
- "for cmd=%p.\n", cmd->cmd_len, cmd);
- goto queuing_error_fcp_cmnd;
- }
- ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
- } else {
- additional_cdb_len = 0;
- ctx->fcp_cmnd_len = 12 + 16 + 4;
- }
-
- cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
- /* Zero out remaining portion of packet. */
- /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
- clr_ptr = (uint32_t *)cmd_pkt + 2;
- memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
- cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
- /* Set NPORT-ID and LUN number*/
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
- cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
- cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
-
- /* Build IOCB segments */
- if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
- goto queuing_error_fcp_cmnd;
-
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
- host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
-
- /* build FCP_CMND IU */
- memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
- ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 1;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 2;
-
- /*
- * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
- */
- if (scsi_populate_tag_msg(cmd, tag)) {
- switch (tag[0]) {
- case HEAD_OF_QUEUE_TAG:
- ctx->fcp_cmnd->task_attribute =
- TSK_HEAD_OF_QUEUE;
- break;
- case ORDERED_QUEUE_TAG:
- ctx->fcp_cmnd->task_attribute =
- TSK_ORDERED;
- break;
- }
- }
-
- memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
-
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
- additional_cdb_len);
- *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
-
- cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
- cmd_pkt->fcp_cmnd_dseg_address[0] =
- cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
- cmd_pkt->fcp_cmnd_dseg_address[1] =
- cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
-
- sp->flags |= SRB_FCP_CMND_DMA_VALID;
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
- /* Set total data segment count. */
- cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where
- * completion should happen
- */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
- } else {
- struct cmd_type_7 *cmd_pkt;
- req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
- &reg->req_q_out[0]);
- if (req->ring_index < cnt)
- req->cnt = cnt - req->ring_index;
- else
- req->cnt = req->length -
- (req->ring_index - cnt);
- }
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
- cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
- /* Zero out remaining portion of packet. */
- /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
- clr_ptr = (uint32_t *)cmd_pkt + 2;
- memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
- cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
- /* Set NPORT-ID and LUN number*/
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
- cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
- cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
-
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
- host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
- sizeof(cmd_pkt->lun));
-
- /*
- * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
- */
- if (scsi_populate_tag_msg(cmd, tag)) {
- switch (tag[0]) {
- case HEAD_OF_QUEUE_TAG:
- cmd_pkt->task = TSK_HEAD_OF_QUEUE;
- break;
- case ORDERED_QUEUE_TAG:
- cmd_pkt->task = TSK_ORDERED;
- break;
- }
- }
-
- /* Load SCSI command packet. */
- memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
- host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
-
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-
- /* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
-
- /* Set total data segment count. */
- cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where
- * completion should happen.
- */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
-
- }
- /* Build command packet. */
- req->current_outstanding_cmd = handle;
- req->outstanding_cmds[handle] = sp;
- sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- req->cnt -= req_cnt;
- wmb();
-
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- sp->flags |= SRB_DMA_VALID;
-
- /* Set chip new ring index. */
- /* write, read and verify logic */
- dbval = dbval | (req->id << 8) | (req->ring_index << 16);
- if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
- else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- }
- }
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- return QLA_SUCCESS;
-
-queuing_error_fcp_cmnd:
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
-queuing_error:
- if (tot_dsds)
- scsi_dma_unmap(cmd);
-
- if (sp->ctx) {
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- return QLA_FUNCTION_FAILED;
-}
-
static uint32_t *
qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t length)
@@ -3272,9 +2790,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
}
void
-qla82xx_start_iocbs(srb_t *sp)
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
@@ -3659,11 +3177,10 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
qla82xx_md_prep(vha);
- } else
- ql_log(ql_log_info, vha, 0xb02e,
- "Firmware dump available to retrieve\n",
- vha->host_no);
- }
+ }
+ } else
+ ql_log(ql_log_info, vha, 0xb02e,
+ "Firmware dump available to retrieve\n");
}
return rval;
}
@@ -3758,7 +3275,6 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
switch (dev_state) {
case QLA82XX_DEV_READY:
- qla82xx_check_md_needed(vha);
ha->flags.isp82xx_reset_owner = 0;
goto exit;
case QLA82XX_DEV_COLD:
@@ -3817,6 +3333,20 @@ exit:
return rval;
}
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
uint32_t dev_state, halt_status;
@@ -3839,9 +3369,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- ql_dbg(ql_dbg_timer, vha, 0x6005,
+ ql_log(ql_log_info, vha, 0x6005,
"dumping hw/fw registers:.\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3392,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
@@ -3869,16 +3408,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_log(ql_log_warn, vha, 0x6007,
- "Due to FW hung, doing "
- "premature completion of mbx "
- "command.\n");
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
}
}
}
@@ -4052,7 +3583,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
}
}
ql_dbg(ql_dbg_p3p, vha, 0xb027,
- "%s status=%d.\n", status);
+ "%s: status=%d.\n", __func__, status);
return status;
}
@@ -4073,10 +3604,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
msleep(1000);
if (qla82xx_check_fw_alive(vha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- complete(&ha->mbx_intr_comp);
- }
+ qla82xx_clear_pending_mbx(vha);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc2..57a226be339a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc626..4ed1e4a96b95 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+ "\t\t0x1e400000 - Preferred value for capturing essential "
+ "debug information (equivalent to old "
+ "ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xshiftctondsd = 6;
@@ -199,14 +202,14 @@ int ql2xmdcapmask = 0x1F;
module_param(ql2xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
- "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+ "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
-int ql2xmdenable;
+int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
- "0 (Default) - MiniDump disabled. "
- "1 - MiniDump enabled.");
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
/*
* SCSI host template entry points
@@ -423,6 +426,7 @@ fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -814,49 +818,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-/*
- * qla2x00_wait_for_loop_ready
- * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- * to be in LOOP_READY state.
- * Input:
- * ha - pointer to host adapter structure
- *
- * Note:
- * Does context switching-Release SPIN_LOCK
- * (if any) before calling this routine.
- *
- *
- * Return:
- * Success (LOOP_READY) : 0
- * Failed (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
- int return_status = QLA_SUCCESS;
- unsigned long loop_timeout ;
- struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
- /* wait for 5 min at the max for loop to be ready */
- loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
- while ((!atomic_read(&base_vha->loop_down_timer) &&
- atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
- atomic_read(&base_vha->loop_state) != LOOP_READY) {
- if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- msleep(1000);
- if (time_after_eq(jiffies, loop_timeout)) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- }
- return (return_status);
-}
-
static void
sp_get(struct srb *sp)
{
@@ -889,14 +850,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_taskm, vha, 0x8000,
- "Entered %s for cmd=%p.\n", __func__, cmd);
if (!CMD_SP(cmd))
return SUCCESS;
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8001,
- "Return value of fc_block_scsi_eh=%d.\n", ret);
if (ret != 0)
return ret;
ret = SUCCESS;
@@ -912,7 +869,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
ql_dbg(ql_dbg_taskm, vha, 0x8002,
- "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
+ "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
+ vha->host_no, id, lun, sp, cmd);
/* Get a reference to the sp and drop the lock.*/
sp_get(sp);
@@ -920,10 +878,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_dbg(ql_dbg_taskm, vha, 0x8003,
- "Abort command mbx failed for cmd=%p.\n", cmd);
+ "Abort command mbx failed cmd=%p.\n", cmd);
} else {
ql_dbg(ql_dbg_taskm, vha, 0x8004,
- "Abort command mbx success.\n");
+ "Abort command mbx success cmd=%p.\n", cmd);
wait = 1;
}
@@ -939,13 +897,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x8006,
- "Abort handler timed out for cmd=%p.\n", cmd);
+ "Abort handler timed out cmd=%p.\n", cmd);
ret = FAILED;
}
}
ql_log(ql_log_info, vha, 0x801c,
- "Abort command issued -- %d %x.\n", wait, ret);
+ "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
+ vha->host_no, id, lun, wait, ret);
return ret;
}
@@ -1014,19 +973,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
int err;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8007,
- "fcport is NULL.\n");
return FAILED;
}
err = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8008,
- "fc_block_scsi_eh ret=%d.\n", err);
if (err != 0)
return err;
ql_log(ql_log_info, vha, 0x8009,
- "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+ "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
cmd->device->id, cmd->device->lun, cmd);
err = 0;
@@ -1035,12 +990,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
- err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x800b,
- "Wait for loop ready failed for cmd=%p.\n", cmd);
- goto eh_reset_failed;
- }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
!= QLA_SUCCESS) {
@@ -1057,15 +1006,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
}
ql_log(ql_log_info, vha, 0x800e,
- "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
- cmd->device->id, cmd->device->lun, cmd);
+ "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
+ vha->host_no, cmd->device->id, cmd->device->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
- "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
- reset_errors[err], cmd->device->id, cmd->device->lun);
+ "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
+ reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ cmd);
return FAILED;
}
@@ -1116,20 +1066,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8010,
- "fcport is NULL.\n");
return ret;
}
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8011,
- "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -1137,10 +1083,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
- ret = SUCCESS;
- }
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
if (ret == FAILED)
goto eh_bus_reset_done;
@@ -1154,7 +1099,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
eh_bus_reset_done:
ql_log(ql_log_warn, vha, 0x802b,
- "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
+ "BUS RESET %s nexus=%ld:%d:%d.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
return ret;
}
@@ -1188,33 +1134,20 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8016,
- "fcport is NULL.\n");
return ret;
}
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8017,
- "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
ql_log(ql_log_info, vha, 0x8018,
- "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
+ "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
- /*
- * Fixme-may be dpc thread is active and processing
- * loop_resync,so wait a while for it to
- * be completed and then issue big hammer.Otherwise
- * it may cause I/O failure as big hammer marks the
- * devices as lost kicking of the port_down_timer
- * while dpc is stuck for the mailbox to complete.
- */
- qla2x00_wait_for_loop_ready(vha);
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
@@ -1251,8 +1184,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
ret = SUCCESS;
eh_host_reset_lock:
- qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
- (ret == FAILED) ? "failed" : "succeeded");
+ ql_log(ql_log_info, vha, 0x8017,
+ "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
}
@@ -1297,16 +1231,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha, 0);
- qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
- } else
- qla2x00_wait_for_loop_ready(vha);
}
/* Issue marker command only when we are going to start the I/O */
@@ -1405,10 +1336,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
return;
ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
- "Queue depth adjusted-down "
- "to %d for scsi(%ld:%d:%d:%d).\n",
- sdev->queue_depth, fcport->vha->host_no,
- sdev->channel, sdev->id, sdev->lun);
+ "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
+ sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1430,10 +1359,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
ql_dbg(ql_dbg_io, vha, 0x302a,
- "Queue depth adjusted-up to %d for "
- "scsi(%ld:%d:%d:%d).\n",
- sdev->queue_depth, fcport->vha->host_no,
- sdev->channel, sdev->id, sdev->lun);
+ "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
+ sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
static int
@@ -1557,6 +1484,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+ resource_size_t pio;
+ uint16_t msix;
+ int cpus;
+
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (!(ha->bars & 1))
+ goto skip_pio;
+
+ /* We only need PIO for Flash operations on ISP2312 v2 chips. */
+ pio = pci_resource_start(ha->pdev, 0);
+ if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ } else {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+ "Region #0 no a PIO resource (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ ha->pio_address = pio;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+ "PIO address=%llu.\n",
+ (unsigned long long)ha->pio_address);
+
+skip_pio:
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+ "Region #1 not an MMIO resource (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+ "Invalid PCI mem region size (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
+ (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ goto mqiobase_exit;
+
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+ pci_resource_len(ha->pdev, 3));
+ if (ha->mqiobase) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+ "MQIO Base=%p.\n", ha->mqiobase);
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+ "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return (0);
+
+iospace_error_exit:
+ return (-ENOMEM);
+}
+
+
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
@@ -1591,6 +1630,7 @@ static struct isp_operations qla2100_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1627,6 +1667,7 @@ static struct isp_operations qla2300_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1663,6 +1704,7 @@ static struct isp_operations qla24xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1699,6 +1741,7 @@ static struct isp_operations qla25xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1735,6 +1778,7 @@ static struct isp_operations qla81xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla82xx_isp_ops = {
@@ -1771,6 +1815,7 @@ static struct isp_operations qla82xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
};
static inline void
@@ -1880,121 +1925,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
else
ha->flags.port0 = 0;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
- "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+ "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
ha->device_type, ha->flags.port0, ha->fw_srisc_address);
}
-static int
-qla2x00_iospace_config(struct qla_hw_data *ha)
-{
- resource_size_t pio;
- uint16_t msix;
- int cpus;
-
- if (IS_QLA82XX(ha))
- return qla82xx_iospace_config(ha);
-
- if (pci_request_selected_regions(ha->pdev, ha->bars,
- QLA2XXX_DRIVER_NAME)) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
- "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
- if (!(ha->bars & 1))
- goto skip_pio;
-
- /* We only need PIO for Flash operations on ISP2312 v2 chips. */
- pio = pci_resource_start(ha->pdev, 0);
- if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
- if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
- ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
- "Invalid pci I/O region size (%s).\n",
- pci_name(ha->pdev));
- pio = 0;
- }
- } else {
- ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
- "Region #0 no a PIO resource (%s).\n",
- pci_name(ha->pdev));
- pio = 0;
- }
- ha->pio_address = pio;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
- "PIO address=%p.\n",
- ha->pio_address);
-
-skip_pio:
- /* Use MMIO operations for all accesses. */
- if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
- "Region #1 not an MMIO resource (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
- if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
- "Invalid PCI mem region size (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
-
- ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
- if (!ha->iobase) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
- "Cannot remap MMIO (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
-
- /* Determine queue resources */
- ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
- goto mqiobase_exit;
-
- ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
- pci_resource_len(ha->pdev, 3));
- if (ha->mqiobase) {
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
- "MQIO Base=%p.\n", ha->mqiobase);
- /* Read MSIX vector size of the board */
- pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
- ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
- } else
- ql_log_pci(ql_log_info, ha->pdev, 0x001b,
- "BAR 3 not enabled.\n");
-
-mqiobase_exit:
- ha->msix_count = ha->max_rsp_queues + 1;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
- "MSIX Count:%d.\n", ha->msix_count);
- return (0);
-
-iospace_error_exit:
- return (-ENOMEM);
-}
-
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
@@ -2093,14 +2027,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->needs_freset = 1;
}
- /* Configure PCI I/O space */
- ret = qla2x00_iospace_config(ha);
- if (ret)
- goto probe_hw_failed;
-
- ql_log_pci(ql_log_info, pdev, 0x001d,
- "Found an ISP%04X irq %d iobase 0x%p.\n",
- pdev->device, pdev->irq, ha->iobase);
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2213,6 +2139,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
ha->nvram_conf_off, ha->nvram_data_off);
+
+ /* Configure PCI I/O space */
+ ret = ha->isp_ops->iospace_config(ha);
+ if (ret)
+ goto probe_hw_failed;
+
+ ql_log_pci(ql_log_info, pdev, 0x001d,
+ "Found an ISP%04X irq %d iobase 0x%p.\n",
+ pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -2288,7 +2223,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg(ql_dbg_init, base_vha, 0x0033,
"max_id=%d this_id=%d "
"cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
- "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+ "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
host->this_id, host->cmd_per_lun, host->unique_id,
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
@@ -2443,9 +2378,6 @@ skip_dpc:
qla2x00_dfs_setup(base_vha);
- ql_log(ql_log_info, base_vha, 0x00fa,
- "QLogic Fibre Channed HBA Driver: %s.\n",
- qla2x00_version_str);
ql_log(ql_log_info, base_vha, 0x00fb,
"QLogic %s - %s.\n",
ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2894,7 +2826,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->sns_cmd)
goto fail_dma_pool;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
- "sns_cmd.\n", ha->sns_cmd);
+ "sns_cmd: %p.\n", ha->sns_cmd);
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3521,27 +3453,21 @@ qla2x00_do_dpc(void *data)
schedule();
__set_current_state(TASK_RUNNING);
- ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
- "DPC handler waking up.\n");
- ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
- "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
-
- /* Initialization not yet finished. Don't do anything yet. */
- if (!base_vha->flags.init_done)
- continue;
+ if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+ goto end_loop;
if (ha->flags.eeh_busy) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
"eeh_busy=%d.\n", ha->flags.eeh_busy);
- continue;
+ goto end_loop;
}
ha->dpc_active = 1;
- if (ha->flags.mbox_busy) {
- ha->dpc_active = 0;
- continue;
- }
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+ "DPC handler waking up.\n");
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+ "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@@ -3683,6 +3609,7 @@ qla2x00_do_dpc(void *data)
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
+end_loop:
set_current_state(TASK_INTERRUPTIBLE);
} /* End of while(1) */
__set_current_state(TASK_RUNNING);
@@ -3766,16 +3693,6 @@ qla2x00_sp_free_dma(srb_t *sp)
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
- CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
-
- qla2x00_sp_free_dma(sp);
-
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx = sp->ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3787,6 +3704,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
sp->ctx = NULL;
}
+ CMD_SP(cmd) = NULL;
+}
+
+static void
+qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
+{
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
@@ -4070,13 +3996,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command.\n");
- complete(&ha->mbx_intr_comp);
- }
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
}
qla2x00_free_irqs(vha);
pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index eff13563c82d..16bc72844a97 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -904,8 +904,9 @@ no_flash_data:
}
done:
ql_dbg(ql_dbg_init, vha, 0x004d,
- "FDT[%x]: (0x%x/0x%x) erase=0x%x "
- "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pr=%x wrtd=0x%x blk=0x%x.\n",
+ loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_wrt_disable, ha->fdt_block_size);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa2..23f33a6d52d7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.07-k"
+#define QLA2XXX_VERSION "8.03.07.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index af62c3cf8752..8d58ae274829 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -20,12 +20,12 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
printk("------------------------------------------------------------"
"--\n");
for (cnt = 0; cnt < size; c++) {
- printk(KERN_INFO "%02x", *c);
+ printk("%02x", *c);
if (!(++cnt % 16))
- printk(KERN_INFO "\n");
+ printk("\n");
else
- printk(KERN_INFO " ");
+ printk(" ");
}
printk(KERN_INFO "\n");
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254e..bfe68545203f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,9 +147,11 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
-#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -173,8 +175,11 @@
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
+#define FW_ALIVE_WAIT_TOV 3
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
@@ -240,6 +245,45 @@ struct ddb_entry {
uint16_t fw_ddb_index; /* DDB firmware index */
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
};
/*
@@ -411,7 +455,7 @@ struct scsi_qla_host {
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +648,7 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+ uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
#define QLFLASH_WAITING 0
@@ -623,6 +668,12 @@ struct scsi_qla_host {
uint16_t iscsi_pci_func_cnt;
uint8_t model_name[16];
struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
+ uint16_t temperature;
};
struct ql4_task_data {
@@ -835,6 +886,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd1..7825c141bc1a 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
/*************************************************************************
*
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
uint8_t res14[140]; /* 274-2FF */
};
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
/* struct addr_ctrl_blk sec;*/
@@ -744,7 +752,7 @@ struct dev_db_entry {
uint8_t res4[0x36]; /* 8A-BF */
uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
* pointer to a string so we
- * don't have to reserve soooo
+ * don't have to reserve so
* much RAM */
uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea21..d0dd4b330206 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef553..90614f38b55d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
+ writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Get firmware "
@@ -773,22 +776,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
- if (ret == QLA_ERROR)
+ if (ret == QLA_ERROR) {
+ next_idx++;
continue;
+ }
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +809,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
}
}
-
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
@@ -812,7 +816,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
@@ -840,7 +844,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
if (status == QLA_ERROR)
goto exit_init_hba;
- qla4xxx_free_ddb_index(ha);
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
@@ -855,38 +860,12 @@ exit_init_hba:
return status;
}
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
- uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
{
- struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
- /* check for out of range index */
- if (fw_ddb_index >= MAX_DDB_ENTRIES)
- goto exit_ddb_event;
-
- /* Get the corresponging ddb entry */
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
- /* Device does not currently exist in our database. */
- if (ddb_entry == NULL) {
- ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
- __func__, fw_ddb_index);
-
- if (state == DDB_DS_NO_CONNECTION_ACTIVE)
- clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
- goto exit_ddb_event;
- }
-
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +879,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -936,9 +913,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -954,7 +929,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
__func__));
break;
}
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 827e93078b94..95828862eea0 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -123,13 +123,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
if (!srb) {
- DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
- "handle 0x%x, sp=%p. This cmd may have already "
- "been completed.\n", ha->host_no, __func__,
- le32_to_cpu(sts_entry->handle), srb));
- ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
- " handle=0x%0x\n", __func__, sts_entry->handle);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+ "handle=0x%0x, srb=%p\n", __func__,
+ sts_entry->handle, srb);
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
return;
}
@@ -563,7 +563,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_LINK_UP:
@@ -617,9 +621,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
(mbox_sts[2] == ACB_STATE_ACQUIRING)))
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID))
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+ (mbox_sts[2] == ACB_STATE_VALID)) {
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
complete(&ha->disable_acb_comp);
break;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b84870392..e1e66a45e4d0 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -209,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
goto mbox_exit;
}
@@ -413,6 +430,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index f484ff438199..78f1111158d7 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1792,8 +1792,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
int rval = QLA_SUCCESS;
unsigned long dev_init_timeout;
- if (!test_bit(AF_INIT_DONE, &ha->flags))
+ if (!test_bit(AF_INIT_DONE, &ha->flags)) {
+ qla4_8xxx_idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
+ qla4_8xxx_idc_unlock(ha);
+ }
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
@@ -1802,8 +1805,8 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+ qla4_8xxx_idc_lock(ha);
while (1) {
- qla4_8xxx_idc_lock(ha);
if (time_after_eq(jiffies, dev_init_timeout)) {
ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
@@ -1819,15 +1822,14 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
case QLA82XX_DEV_READY:
- qla4_8xxx_idc_unlock(ha);
goto exit;
case QLA82XX_DEV_COLD:
rval = qla4_8xxx_device_bootstrap(ha);
- qla4_8xxx_idc_unlock(ha);
goto exit;
case QLA82XX_DEV_INITIALIZING:
qla4_8xxx_idc_unlock(ha);
msleep(1000);
+ qla4_8xxx_idc_lock(ha);
break;
case QLA82XX_DEV_NEED_RESET:
if (!ql4xdontresethba) {
@@ -1836,38 +1838,48 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
* reset handler */
dev_init_timeout = jiffies +
(ha->nx_dev_init_timeout * HZ);
+ } else {
+ qla4_8xxx_idc_unlock(ha);
+ msleep(1000);
+ qla4_8xxx_idc_lock(ha);
}
- qla4_8xxx_idc_unlock(ha);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
- qla4_8xxx_idc_unlock(ha);
/* idc locked/unlocked in handler */
qla4_8xxx_need_qsnt_handler(ha);
- qla4_8xxx_idc_lock(ha);
- /* fall thru needs idc_locked */
+ break;
case QLA82XX_DEV_QUIESCENT:
qla4_8xxx_idc_unlock(ha);
msleep(1000);
+ qla4_8xxx_idc_lock(ha);
break;
case QLA82XX_DEV_FAILED:
qla4_8xxx_idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
+ qla4_8xxx_idc_lock(ha);
goto exit;
default:
qla4_8xxx_idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
+ qla4_8xxx_idc_lock(ha);
goto exit;
}
}
exit:
+ qla4_8xxx_idc_unlock(ha);
return rval;
}
int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
{
int retval;
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_8xxx_reg->host_int);
+ readl(&ha->qla4_8xxx_reg->host_int);
+
retval = qla4_8xxx_device_state_handler(ha);
if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1c3f1b..dc45ac923691 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
#define PHAN_PEG_RCV_INITIALIZED 0xff01
/*CRB_RELATED*/
-#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f33..ce6d3b7f0c61 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -31,39 +32,47 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ " Set to disable exporting boot targets to sysfs.\n"
+ "\t\t 0 - Export boot targets\n"
+ "\t\t 1 - Do not export boot targets (Default)");
+
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
- "Don't reset the HBA for driver recovery \n"
- " 0 - It will reset HBA (Default)\n"
- " 1 - It will NOT reset HBA");
+ " Don't reset the HBA for driver recovery.\n"
+ "\t\t 0 - It will reset HBA (Default)\n"
+ "\t\t 1 - It will NOT reset HBA");
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xextended_error_logging,
- "Option to enable extended error logging, "
- "Default is 0 - no logging, 1 - debug logging");
+ " Option to enable extended error logging.\n"
+ "\t\t 0 - no logging (Default)\n"
+ "\t\t 2 - debug logging");
int ql4xenablemsix = 1;
module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql4xenablemsix,
- "Set to enable MSI or MSI-X interrupt mechanism.\n"
- " 0 = enable INTx interrupt mechanism.\n"
- " 1 = enable MSI-X interrupt mechanism (Default).\n"
- " 2 = enable MSI interrupt mechanism.");
+ " Set to enable MSI or MSI-X interrupt mechanism.\n"
+ "\t\t 0 = enable INTx interrupt mechanism.\n"
+ "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
+ "\t\t 2 = enable MSI interrupt mechanism.");
#define QL4_DEF_QDEPTH 32
static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xmaxqdepth,
- "Maximum queue depth to report for target devices.\n"
- " Default: 32.");
+ " Maximum queue depth to report for target devices.\n"
+ "\t\t Default: 32.");
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 30 sec.");
+ "\t\t Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -120,7 +129,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static mode_t ql4_attr_is_visible(int param_type, int param);
+static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
@@ -189,7 +198,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
static struct scsi_transport_template *qla4xxx_scsi_transport;
-static mode_t ql4_attr_is_visible(int param_type, int param)
+static umode_t ql4_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
@@ -415,7 +424,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
- if (adapter_up(ha))
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
@@ -927,7 +936,16 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
goto exit_init_fw_cb;
}
- qla4xxx_disable_acb(ha);
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ wait_for_completion_timeout(&ha->disable_acb_comp,
+ DISABLE_ACB_TOV * HZ);
qla4xxx_initcb_to_acb(init_fw_cb);
@@ -975,6 +993,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1146,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
- uint32_t ddb_index;
- uint32_t mbx_sts = 0;
+ uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
@@ -1000,32 +1161,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
-get_ddb_index:
- ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
- if (ddb_index >= MAX_DDB_ENTRIES) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free DDB index not available\n"));
- return NULL;
- }
-
- if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
- goto get_ddb_index;
-
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Found a free DDB index at %d\n", ddb_index));
- ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
- if (ret == QLA_ERROR) {
- if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
- ql4_printk(KERN_INFO, ha,
- "DDB index = %d not available trying next\n",
- ddb_index);
- goto get_ddb_index;
- }
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free FW DDB not available\n"));
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
return NULL;
- }
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1178,8 @@ get_ddb_index:
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -1077,6 +1217,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
+ if (!cls_conn)
+ return NULL;
+
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
@@ -1109,7 +1252,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- struct dev_db_entry *fw_ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
@@ -1120,12 +1263,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1294,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
@@ -1167,8 +1321,9 @@ exit_set_param:
ret = 0;
exit_conn_start:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
@@ -1344,6 +1499,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
@@ -1360,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return;
+ goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1620,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
- return;
+ goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
@@ -1379,6 +1629,14 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+ (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+ le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
/* Update params */
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1665,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
/*
@@ -1607,6 +1870,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
vfree(ha->chap_list);
ha->chap_list = NULL;
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -1689,6 +1955,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -1697,14 +1973,51 @@ mem_alloc_error_exit:
}
/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down.\n", temp_val);
+ status = QLA_ERROR;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ if (ha->temperature == QLA82XX_TEMP_NORMAL)
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ } else {
+ if (ha->temperature == QLA82XX_TEMP_WARN)
+ ql4_printk(KERN_INFO, ha, "Device temperature is"
+ " now %d degrees C in normal range.\n",
+ temp_val);
+ }
+ ha->temperature = temp_state;
+ return status;
+}
+
+/**
* qla4_8xxx_check_fw_alive - Check firmware health
* @ha: Pointer to host adapter structure.
*
* Context: Interrupt
**/
-static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
{
- uint32_t fw_heartbeat_counter, halt_status;
+ uint32_t fw_heartbeat_counter;
+ int status = QLA_SUCCESS;
fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
@@ -1712,7 +2025,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
"state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
ha->host_no, __func__));
- return;
+ return status;
}
if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
@@ -1720,8 +2033,6 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
/* FW not alive after 2 seconds */
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
- halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
ql4_printk(KERN_INFO, ha,
"scsi(%ld): %s, Dumping hw/fw registers:\n "
@@ -1729,7 +2040,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
" 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
" 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
" 0x%x,\n PEG_NET_4_PC: 0x%x\n",
- ha->host_no, __func__, halt_status,
+ ha->host_no, __func__,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1),
qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS2),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
@@ -1742,24 +2055,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
0x3c));
-
- /* Since we cannot change dev_state in interrupt
- * context, set appropriate DPC flag then wakeup
- * DPC */
- if (halt_status & HALT_STATUS_UNRECOVERABLE)
- set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
- else {
- printk("scsi%ld: %s: detect abort needed!\n",
- ha->host_no, __func__);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- }
- qla4xxx_wake_dpc(ha);
- qla4xxx_mailbox_premature_completion(ha);
+ status = QLA_ERROR;
}
} else
ha->seconds_since_last_heartbeat = 0;
ha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/**
@@ -1770,22 +2072,29 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
**/
void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
{
- uint32_t dev_state;
-
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ uint32_t dev_state, halt_status;
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ if (qla4_8xxx_check_temp(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
ql4_printk(KERN_INFO, ha, "%s: HW State: "
"NEED RESET!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
- qla4xxx_mailbox_premature_completion(ha);
}
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
@@ -1795,7 +2104,90 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
qla4xxx_wake_dpc(ha);
} else {
/* Check firmware health */
- qla4_8xxx_check_fw_alive(ha);
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ halt_status = qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql4_printk(KERN_ERR, ha, "%s:"
+ " Firmware aborted with"
+ " error code 0x00006700."
+ " Device is being reset\n",
+ __func__);
+
+ /* Since we cannot change dev_state in interrupt
+ * context, set appropriate DPC flag then wakeup
+ * DPC */
+ if (halt_status & HALT_STATUS_UNRECOVERABLE)
+ set_bit(DPC_HA_UNRECOVERABLE,
+ &ha->dpc_flags);
+ else {
+ ql4_printk(KERN_INFO, ha, "%s: detect "
+ "abort needed!\n", __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_wake_dpc(ha);
+ }
+ }
+ }
+}
+
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
}
}
}
@@ -1809,6 +2201,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
int start_dpc = 0;
uint16_t w;
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
@@ -2078,7 +2472,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
/**
@@ -2089,6 +2488,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
uint8_t reset_chip = 0;
+ uint32_t dev_state;
+ unsigned long wait;
/* Stall incoming I/O until we are done */
scsi_block_requests(ha->host);
@@ -2139,8 +2540,29 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
* or if stop_firmware fails for ISP-82xx.
* This is the default case for ISP-4xxx */
if (!is_qla8022(ha) || reset_chip) {
+ if (!is_qla8022(ha))
+ goto chip_reset;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only */
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
+ goto chip_reset;
+
+ wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+ while (time_before(jiffies, wait)) {
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ qla4xxx_mailbox_premature_completion(ha);
+ break;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
+chip_reset:
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2163,7 +2585,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
@@ -2176,6 +2598,25 @@ recover_ha_init_adapter:
* Since we don't want to block the DPC for too long
* with multiple resets in the same thread,
* utilize DPC to retry */
+ if (is_qla8022(ha)) {
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_INFO, ha, "%s: don't retry "
+ "recover adapter. H/W is in Failed "
+ "state\n", __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ status = QLA_ERROR;
+
+ goto exit_recover;
+ }
+ }
+
if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -2214,6 +2655,7 @@ recover_ha_init_adapter:
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
}
+exit_recover:
ha->adapter_error_count++;
if (test_bit(AF_ONLINE, &ha->flags))
@@ -2245,17 +2687,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
- iscsi_session_failure(cls_session->dd_data,
- ISCSI_ERR_CONN_FAILED);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+
+ return QLA_SUCCESS;
+}
+
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -2356,6 +2889,12 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2907,12 @@ dpc_post_reset_ha:
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
-
- qla4xxx_relogin_all_devices(ha);
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
}
}
}
@@ -2380,6 +2923,7 @@ dpc_post_reset_ha:
**/
static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
/* Turn-off interrupts on the card. */
@@ -2613,7 +3157,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
return rc;
}
-static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
{
int rc;
@@ -2647,7 +3191,7 @@ static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
return rc;
}
-static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
{
int rc;
@@ -2734,7 +3278,7 @@ static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
}
-static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
{
int rc;
@@ -2867,6 +3411,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
@@ -3034,6 +3581,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
return ret;
}
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
if (ddb_index[0] == 0xffff)
goto sec_target;
@@ -3066,7 +3616,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
- return 0;
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login"
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
@@ -3108,7 +3666,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (!boot_kobj)
goto put_host;
- return 0;
+ return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
@@ -3174,7 +3732,586 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
- return;
+}
+
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ dst_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ dst_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ uint16_t def_timeout;
+
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+ def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_relogin_timeout =
+ (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+ def_timeout : LOGIN_TOV;
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+ struct list_head *list_st)
+{
+ struct qla_ddb_index *st_ddb_idx;
+ int max_ddbs;
+ int fw_idx_size;
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_st_list;
+ }
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+exit_st_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+ struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ int ret;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx, &state,
+ &conn_err, NULL, NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+ }
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ int is_reset)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32;
+ uint16_t conn_id = 0;
+ uint32_t initial_cmdsn = 0;
+ int ret = QLA_SUCCESS;
+
+ struct ddb_entry *ddb_entry = NULL;
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
+
+ /*
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+ if (!cls_conn) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+
+exit_setup:
+ return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt, int is_reset)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED))
+ goto continue_next_nt;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+ QLA_SUCCESS)
+ goto continue_next_nt;
+ }
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ if (ret == QLA_ERROR)
+ goto exit_nt_list;
+
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+
+exit_nt_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ uint16_t tmo = 0;
+ struct list_head list_st, list_nt;
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ unsigned long wtime;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+
+ qla4xxx_build_st_list(ha, &list_st);
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ if (list_empty(&list_st))
+ break;
+
+ qla4xxx_remove_failed_ddb(ha, &list_st);
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ qla4xxx_free_ddb_list(&list_st);
+
+ qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+
+ qla4xxx_free_ddb_list(&list_nt);
+
+ qla4xxx_free_ddb_index(ha);
}
/**
@@ -3298,7 +4435,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -3319,7 +4456,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4523,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- qla4xxx_create_chap_list(ha);
-
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
__func__);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+ qla4xxx_create_chap_list(ha);
+
qla4xxx_create_ifaces(ha);
return 0;
@@ -3449,6 +4590,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int options;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+ == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+ __func__);
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
@@ -3465,9 +4638,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
- if (ha->boot_kset)
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
+ qla4xxx_destroy_fw_ddb_session(ha);
+
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -3840,6 +5015,20 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
}
/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+ if (shost->shost_state == SHOST_RECOVERY)
+ return 1;
+ return 0;
+}
+
+/**
* qla4xxx_eh_host_reset - kernel callback
* @cmd: Pointer to Linux's SCSI command structure
*
@@ -3856,6 +5045,11 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
+
+ /* Clear outstanding srb in queues */
+ if (qla4xxx_is_eh_active(cmd->device->host))
+ qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
return FAILED;
}
@@ -4115,7 +5309,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -4151,7 +5345,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f532..133989b3a9f4 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index dc6131e6a1ba..5f84a148eb14 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1812,7 +1812,7 @@ int scsi_error_handler(void *data)
* what we need to do to get it up and online again (if we can).
* If we fail, we end up taking the thing offline.
*/
- if (scsi_autopm_get_host(shost) != 0) {
+ if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
SCSI_LOG_ERROR_RECOVERY(1,
printk(KERN_ERR "Error handler scsi_eh_%d "
"unable to autoresume\n",
@@ -1833,7 +1833,8 @@ int scsi_error_handler(void *data)
* which are still online.
*/
scsi_restart_operations(shost);
- scsi_autopm_put_host(shost);
+ if (!shost->eh_noresume)
+ scsi_autopm_put_host(shost);
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6c47b5..b2c95dbe9d65 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
}
if (scsi_target_is_busy(starget)) {
- if (list_empty(&sdev->starved_entry))
- list_add_tail(&sdev->starved_entry,
- &shost->starved_list);
+ list_move_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
}
- /* We're OK to process the command, so we can't be starved */
- if (!list_empty(&sdev->starved_entry))
- list_del_init(&sdev->starved_entry);
return 1;
}
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 44f76e8b58af..c77628afbf9f 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -112,7 +112,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
goto next_msg;
}
- if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
+ if (!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto next_msg;
}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d329f8b12e2b..bf8bf79e6a1f 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -49,8 +49,22 @@ static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
{
int err = 0;
- if (scsi_is_sdev_device(dev))
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * sd is the only high-level SCSI driver to implement runtime
+ * PM, and sd treats runtime suspend, system suspend, and
+ * system hibernate identically (but not system freeze).
+ */
+ if (pm_runtime_suspended(dev)) {
+ if (msg.event == PM_EVENT_SUSPEND ||
+ msg.event == PM_EVENT_HIBERNATE)
+ return 0; /* already suspended */
+
+ /* wake up device so that FREEZE will succeed */
+ pm_runtime_resume(dev);
+ }
err = scsi_dev_type_suspend(dev, msg);
+ }
return err;
}
@@ -58,8 +72,17 @@ static int scsi_bus_resume_common(struct device *dev)
{
int err = 0;
- if (scsi_is_sdev_device(dev))
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * Parent device may have runtime suspended as soon as
+ * it is woken up during the system resume.
+ *
+ * Resume it on behalf of child.
+ */
+ pm_runtime_get_sync(dev->parent);
err = scsi_dev_type_resume(dev);
+ pm_runtime_put_sync(dev->parent);
+ }
if (err == 0) {
pm_runtime_disable(dev);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 2a588955423a..68eadd1c67fd 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
- SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b3c6d957fbd8..89da43f73c00 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
- blk_get_queue(sdev->request_queue);
+ WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b214910b714..f59d4a05ecd7 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
- FC_RPORT_DEVLOSS_PENDING);
+ FC_RPORT_DEVLOSS_PENDING |
+ FC_RPORT_DEVLOSS_CALLBK_DONE);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 96029e6d027f..cfd491437239 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -328,7 +328,7 @@ iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
-static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1030,6 +1030,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
return NULL;
session->transport = transport;
+ session->creator = -1;
session->recovery_tmo = 120;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
@@ -1634,8 +1635,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_event);
static int
iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
- uint16_t cmds_max, uint16_t queue_depth)
+ struct iscsi_uevent *ev, pid_t pid,
+ uint32_t initial_cmdsn, uint16_t cmds_max,
+ uint16_t queue_depth)
{
struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session;
@@ -1646,6 +1648,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
if (!session)
return -ENOMEM;
+ session->creator = pid;
shost = iscsi_session_to_shost(session);
ev->r.c_session_ret.host_no = shost->host_no;
ev->r.c_session_ret.sid = session->sid;
@@ -1938,6 +1941,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CREDS(skb)->pid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
@@ -1950,6 +1954,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
}
err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CREDS(skb)->pid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
@@ -2199,7 +2204,7 @@ static struct attribute *iscsi_conn_attrs[] = {
NULL,
};
-static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2298,6 +2303,15 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
+static ssize_t
+show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->creator);
+}
+static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+ NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -2367,10 +2381,11 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_targetalias.attr,
&dev_attr_priv_sess_recovery_tmo.attr,
&dev_attr_priv_sess_state.attr,
+ &dev_attr_priv_sess_creator.attr,
NULL,
};
-static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2424,6 +2439,8 @@ static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_creator.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
@@ -2468,7 +2485,7 @@ static struct attribute *iscsi_host_attrs[] = {
NULL,
};
-static mode_t iscsi_host_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 5fbeadd96819..a2715c31e754 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1434,7 +1434,7 @@ static int spi_host_configure(struct transport_container *tc,
(si->f->show_##name ? S_IRUGO : 0) | \
(si->f->set_##name ? S_IWUSR : 0)
-static mode_t target_attribute_is_visible(struct kobject *kobj,
+static umode_t target_attribute_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 6803b1e26ecc..92d24d6dcb39 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -16,7 +16,6 @@
#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <asm/unaligned.h>
#include <scsi/scsicam.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fa3a5918009c..c691fb50e6cb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -50,6 +50,7 @@
#include <linux/string_helpers.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
@@ -1074,6 +1075,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
"cmd=0x%x\n", disk->disk_name, cmd));
+ error = scsi_verify_blk_ioctl(bdev, cmd);
+ if (error < 0)
+ return error;
+
/*
* If we are in the middle of error recovery, don't let anyone
* else try and use this device. Also, if error recovery fails, it
@@ -1096,7 +1101,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
error = scsi_ioctl(sdp, cmd, p);
break;
default:
- error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
+ error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
if (error != -ENOTTY)
break;
error = scsi_ioctl(sdp, cmd, p);
@@ -1266,6 +1271,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ int ret;
+
+ ret = scsi_verify_blk_ioctl(bdev, cmd);
+ if (ret < 0)
+ return ret;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -1277,8 +1287,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
return -ENODEV;
if (sdev->host->hostt->compat_ioctl) {
- int ret;
-
ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
return ret;
@@ -2741,6 +2749,9 @@ static void sd_shutdown(struct device *dev)
if (!sdkp)
return; /* this can happen */
+ if (pm_runtime_suspended(dev))
+ goto exit;
+
if (sdkp->WCE) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp);
@@ -2751,6 +2762,7 @@ static void sd_shutdown(struct device *dev)
sd_start_stop_device(sdkp, 0);
}
+exit:
scsi_disk_put(sdkp);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 441a1c5b8974..eacd46bb36b9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2325,16 +2325,15 @@ static struct sg_proc_leaf sg_proc_leaf_arr[] = {
static int
sg_proc_init(void)
{
- int k, mask;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
- struct sg_proc_leaf * leaf;
+ int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
- leaf = &sg_proc_leaf_arr[k];
- mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+ struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+ umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
@@ -2369,16 +2368,15 @@ static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
- char buff[11];
+ int err;
+ unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+ err = kstrtoul_from_user(buffer, count, 0, &num);
+ if (err)
+ return err;
+ sg_allow_dio = num ? 1 : 0;
return count;
}
@@ -2391,17 +2389,15 @@ static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
+ int err;
unsigned long k = ULONG_MAX;
- char buff[11];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- k = simple_strtoul(buff, NULL, 10);
+
+ err = kstrtoul_from_user(buffer, count, 0, &k);
+ if (err)
+ return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 9acc2b2a3601..cf51432f8e72 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -51,7 +51,7 @@
#include "53c700.h"
-MODULE_AUTHOR("Thomas Bogendrfer");
+MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:snirm_53c710");
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f575f46..36d1ed7817eb 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
+ /* if slave_alloc returned before allocating a sym_lcb, return */
+ if (!lp)
+ return;
+
spin_lock_irqsave(np->s.host->host_lock, flags);
if (lp->busy_itlq || lp->busy_itl) {
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index a18996d24466..7264116185d5 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1144,7 +1144,7 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
*
* These are statically allocated. Trying to be clever was not worth it.
*
- * Dynamic allocation can fail, and we can't go deeep into the memory
+ * Dynamic allocation can fail, and we can't go deep into the memory
* allocator, since we're a SCSI driver, and trying too hard to allocate
* memory might generate disk I/O. We also don't want to fail disk I/O
* in that case because we can't get an allocation - the I/O could be
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 67e272ab1623..7139ad2f2086 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -7,11 +7,4 @@ obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
-
-#
-# For the moment we only use this framework for ARM-based SH/R-Mobile
-# platforms and generic SH. SH-based SH-Mobile platforms are still using
-# an older framework that is pending up-porting, at which point this
-# special casing can go away.
-#
-obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o
+obj-y += pm_runtime.o
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index db257a35e71a..7715de2629c1 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -355,7 +355,7 @@ static int clk_establish_mapping(struct clk *clk)
*/
if (!clk->parent) {
clk->mapping = &dummy_mapping;
- return 0;
+ goto out;
}
/*
@@ -384,6 +384,9 @@ static int clk_establish_mapping(struct clk *clk)
}
clk->mapping = mapping;
+out:
+ clk->mapped_reg = clk->mapping->base;
+ clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
return 0;
}
@@ -402,10 +405,12 @@ static void clk_teardown_mapping(struct clk *clk)
/* Nothing to do */
if (mapping == &dummy_mapping)
- return;
+ goto out;
kref_put(&mapping->ref, clk_destroy_mapping);
clk->mapping = NULL;
+out:
+ clk->mapped_reg = NULL;
}
int clk_register(struct clk *clk)
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 82dd6fb17838..45fee368b092 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -15,15 +15,15 @@
static int sh_clk_mstp32_enable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
- clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) & ~(1 << clk->enable_bit),
+ clk->mapped_reg);
return 0;
}
static void sh_clk_mstp32_disable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
- clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) | (1 << clk->enable_bit),
+ clk->mapped_reg);
}
static struct clk_ops sh_clk_mstp32_clk_ops = {
@@ -72,7 +72,7 @@ static unsigned long sh_clk_div6_recalc(struct clk *clk)
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, NULL);
- idx = __raw_readl(clk->enable_reg) & 0x003f;
+ idx = ioread32(clk->mapped_reg) & 0x003f;
return clk->freq_table[idx].frequency;
}
@@ -98,10 +98,10 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
if (ret < 0)
return ret;
- value = __raw_readl(clk->enable_reg) &
+ value = ioread32(clk->mapped_reg) &
~(((1 << clk->src_width) - 1) << clk->src_shift);
- __raw_writel(value | (i << clk->src_shift), clk->enable_reg);
+ iowrite32(value | (i << clk->src_shift), clk->mapped_reg);
/* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -119,10 +119,10 @@ static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
if (idx < 0)
return idx;
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value &= ~0x3f;
value |= idx;
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
return 0;
}
@@ -133,9 +133,9 @@ static int sh_clk_div6_enable(struct clk *clk)
ret = sh_clk_div6_set_rate(clk, clk->rate);
if (ret == 0) {
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value &= ~0x100; /* clear stop bit to enable clock */
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
}
return ret;
}
@@ -144,10 +144,10 @@ static void sh_clk_div6_disable(struct clk *clk)
{
unsigned long value;
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value |= 0x100; /* stop clock */
value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
}
static struct clk_ops sh_clk_div6_clk_ops = {
@@ -167,6 +167,38 @@ static struct clk_ops sh_clk_div6_reparent_clk_ops = {
.set_parent = sh_clk_div6_set_parent,
};
+static int __init sh_clk_init_parent(struct clk *clk)
+{
+ u32 val;
+
+ if (clk->parent)
+ return 0;
+
+ if (!clk->parent_table || !clk->parent_num)
+ return 0;
+
+ if (!clk->src_width) {
+ pr_err("sh_clk_init_parent: cannot select parent clock\n");
+ return -EINVAL;
+ }
+
+ val = (ioread32(clk->mapped_reg) >> clk->src_shift);
+ val &= (1 << clk->src_width) - 1;
+
+ if (val >= clk->parent_num) {
+ pr_err("sh_clk_init_parent: parent table size failed\n");
+ return -EINVAL;
+ }
+
+ clk->parent = clk->parent_table[val];
+ if (!clk->parent) {
+ pr_err("sh_clk_init_parent: unable to set parent");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
struct clk_ops *ops)
{
@@ -190,8 +222,11 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
clkp->ops = ops;
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
-
ret = clk_register(clkp);
+ if (ret < 0)
+ break;
+
+ ret = sh_clk_init_parent(clkp);
}
return ret;
@@ -217,7 +252,7 @@ static unsigned long sh_clk_div4_recalc(struct clk *clk)
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, &clk->arch_flags);
- idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
+ idx = (ioread32(clk->mapped_reg) >> clk->enable_bit) & 0x000f;
return clk->freq_table[idx].frequency;
}
@@ -235,15 +270,15 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
*/
if (parent->flags & CLK_ENABLE_ON_INIT)
- value = __raw_readl(clk->enable_reg) & ~(1 << 7);
+ value = ioread32(clk->mapped_reg) & ~(1 << 7);
else
- value = __raw_readl(clk->enable_reg) | (1 << 7);
+ value = ioread32(clk->mapped_reg) | (1 << 7);
ret = clk_reparent(clk, parent);
if (ret < 0)
return ret;
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
/* Rebiuld the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -260,10 +295,10 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
if (idx < 0)
return idx;
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value &= ~(0xf << clk->enable_bit);
value |= (idx << clk->enable_bit);
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
if (d4t->kick)
d4t->kick(clk);
@@ -273,13 +308,13 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
static int sh_clk_div4_enable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) & ~(1 << 8), clk->mapped_reg);
return 0;
}
static void sh_clk_div4_disable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
}
static struct clk_ops sh_clk_div4_clk_ops = {
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8b7a141ff35e..e53e449b4eca 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -25,7 +25,7 @@
#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -354,6 +354,8 @@ int __init register_intc_controller(struct intc_desc *desc)
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+ d->skip_suspend = desc->skip_syscore_suspend;
+
nr_intc_controllers++;
return 0;
@@ -386,6 +388,9 @@ static int intc_suspend(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
/* enable wakeup irqs belonging to this intc controller */
for_each_active_irq(irq) {
struct irq_data *data;
@@ -409,6 +414,9 @@ static void intc_resume(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
for_each_active_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
@@ -434,46 +442,47 @@ struct syscore_ops intc_syscore_ops = {
.resume = intc_resume,
};
-struct sysdev_class intc_sysdev_class = {
+struct bus_type intc_subsys = {
.name = "intc",
+ .dev_name = "intc",
};
static ssize_t
-show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+show_intc_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct intc_desc_int *d;
- d = container_of(dev, struct intc_desc_int, sysdev);
+ d = container_of(dev, struct intc_desc_int, dev);
return sprintf(buf, "%s\n", d->chip.name);
}
-static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+static DEVICE_ATTR(name, S_IRUGO, show_intc_name, NULL);
-static int __init register_intc_sysdevs(void)
+static int __init register_intc_devs(void)
{
struct intc_desc_int *d;
int error;
register_syscore_ops(&intc_syscore_ops);
- error = sysdev_class_register(&intc_sysdev_class);
+ error = subsys_system_register(&intc_subsys, NULL);
if (!error) {
list_for_each_entry(d, &intc_list, list) {
- d->sysdev.id = d->index;
- d->sysdev.cls = &intc_sysdev_class;
- error = sysdev_register(&d->sysdev);
+ d->dev.id = d->index;
+ d->dev.bus = &intc_subsys;
+ error = device_register(&d->dev);
if (error == 0)
- error = sysdev_create_file(&d->sysdev,
- &attr_name);
+ error = device_create_file(&d->dev,
+ &dev_attr_name);
if (error)
break;
}
}
if (error)
- pr_err("sysdev registration error\n");
+ pr_err("device registration error\n");
return error;
}
-device_initcall(register_intc_sysdevs);
+device_initcall(register_intc_devs);
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 5b934851efa8..b0e9155ff739 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/radix-tree.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -51,7 +51,7 @@ struct intc_subgroup_entry {
struct intc_desc_int {
struct list_head list;
- struct sys_device sysdev;
+ struct device dev;
struct radix_tree_root tree;
raw_spinlock_t lock;
unsigned int index;
@@ -67,6 +67,7 @@ struct intc_desc_int {
struct intc_window *window;
unsigned int nr_windows;
struct irq_chip chip;
+ bool skip_suspend;
};
@@ -157,7 +158,7 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
extern struct list_head intc_list;
extern raw_spinlock_t intc_big_lock;
extern unsigned int nr_intc_controllers;
-extern struct sysdev_class intc_sysdev_class;
+extern struct bus_type intc_subsys;
unsigned int intc_get_dfl_prio_level(void);
unsigned int intc_get_prio_level(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 56bf9336b92b..e649ceaaa410 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "intc: " fmt
#include <linux/errno.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/stat.h>
@@ -20,15 +20,15 @@
static void __iomem *uimask;
static ssize_t
-show_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr, char *buf)
+show_intc_userimask(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
}
static ssize_t
-store_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr,
+store_intc_userimask(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long level;
@@ -55,8 +55,8 @@ store_intc_userimask(struct sysdev_class *cls,
return count;
}
-static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
- show_intc_userimask, store_intc_userimask);
+static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
static int __init userimask_sysdev_init(void)
@@ -64,7 +64,7 @@ static int __init userimask_sysdev_init(void)
if (unlikely(!uimask))
return -ENXIO;
- return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
+ return device_create_file(intc_subsys.dev_root, &dev_attr_userimask);
}
late_initcall(userimask_sysdev_init);
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index e67fe170d8d5..522c6c46d1be 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -19,6 +19,75 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+
+static void pfc_iounmap(struct pinmux_info *pip)
+{
+ int k;
+
+ for (k = 0; k < pip->num_resources; k++)
+ if (pip->window[k].virt)
+ iounmap(pip->window[k].virt);
+
+ kfree(pip->window);
+ pip->window = NULL;
+}
+
+static int pfc_ioremap(struct pinmux_info *pip)
+{
+ struct resource *res;
+ int k;
+
+ if (!pip->num_resources)
+ return 0;
+
+ pip->window = kzalloc(pip->num_resources * sizeof(*pip->window),
+ GFP_NOWAIT);
+ if (!pip->window)
+ goto err1;
+
+ for (k = 0; k < pip->num_resources; k++) {
+ res = pip->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ pip->window[k].phys = res->start;
+ pip->window[k].size = resource_size(res);
+ pip->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!pip->window[k].virt)
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ pfc_iounmap(pip);
+err1:
+ return -1;
+}
+
+static void __iomem *pfc_phys_to_virt(struct pinmux_info *pip,
+ unsigned long address)
+{
+ struct pfc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < pip->num_resources; k++) {
+ window = pip->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ return window->virt + (address - window->phys);
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return (void __iomem *)address;
+}
static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
{
@@ -31,41 +100,54 @@ static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
return 1;
}
-static unsigned long gpio_read_raw_reg(unsigned long reg,
+static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
unsigned long reg_width)
{
switch (reg_width) {
case 8:
- return __raw_readb(reg);
+ return ioread8(mapped_reg);
case 16:
- return __raw_readw(reg);
+ return ioread16(mapped_reg);
case 32:
- return __raw_readl(reg);
+ return ioread32(mapped_reg);
}
BUG();
return 0;
}
-static void gpio_write_raw_reg(unsigned long reg,
+static void gpio_write_raw_reg(void __iomem *mapped_reg,
unsigned long reg_width,
unsigned long data)
{
switch (reg_width) {
case 8:
- __raw_writeb(data, reg);
+ iowrite8(data, mapped_reg);
return;
case 16:
- __raw_writew(data, reg);
+ iowrite16(data, mapped_reg);
return;
case 32:
- __raw_writel(data, reg);
+ iowrite32(data, mapped_reg);
return;
}
BUG();
}
+static int gpio_read_bit(struct pinmux_data_reg *dr,
+ unsigned long in_pos)
+{
+ unsigned long pos;
+
+ pos = dr->reg_width - (in_pos + 1);
+
+ pr_debug("read_bit: addr = %lx, pos = %ld, "
+ "r_width = %ld\n", dr->reg, pos, dr->reg_width);
+
+ return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
+}
+
static void gpio_write_bit(struct pinmux_data_reg *dr,
unsigned long in_pos, unsigned long value)
{
@@ -82,53 +164,72 @@ static void gpio_write_bit(struct pinmux_data_reg *dr,
else
clear_bit(pos, &dr->reg_shadow);
- gpio_write_raw_reg(dr->reg, dr->reg_width, dr->reg_shadow);
+ gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
}
-static int gpio_read_reg(unsigned long reg, unsigned long reg_width,
- unsigned long field_width, unsigned long in_pos)
+static void config_reg_helper(struct pinmux_info *gpioc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long in_pos,
+ void __iomem **mapped_regp,
+ unsigned long *maskp,
+ unsigned long *posp)
{
- unsigned long data, mask, pos;
+ int k;
+
+ *mapped_regp = pfc_phys_to_virt(gpioc, crp->reg);
- data = 0;
- mask = (1 << field_width) - 1;
- pos = reg_width - ((in_pos + 1) * field_width);
+ if (crp->field_width) {
+ *maskp = (1 << crp->field_width) - 1;
+ *posp = crp->reg_width - ((in_pos + 1) * crp->field_width);
+ } else {
+ *maskp = (1 << crp->var_field_width[in_pos]) - 1;
+ *posp = crp->reg_width;
+ for (k = 0; k <= in_pos; k++)
+ *posp -= crp->var_field_width[k];
+ }
+}
- pr_debug("read_reg: addr = %lx, pos = %ld, "
+static int read_config_reg(struct pinmux_info *gpioc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field)
+{
+ void __iomem *mapped_reg;
+ unsigned long mask, pos;
+
+ config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
+
+ pr_debug("read_reg: addr = %lx, field = %ld, "
"r_width = %ld, f_width = %ld\n",
- reg, pos, reg_width, field_width);
+ crp->reg, field, crp->reg_width, crp->field_width);
- data = gpio_read_raw_reg(reg, reg_width);
- return (data >> pos) & mask;
+ return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
}
-static void gpio_write_reg(unsigned long reg, unsigned long reg_width,
- unsigned long field_width, unsigned long in_pos,
- unsigned long value)
+static void write_config_reg(struct pinmux_info *gpioc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field, unsigned long value)
{
- unsigned long mask, pos;
+ void __iomem *mapped_reg;
+ unsigned long mask, pos, data;
- mask = (1 << field_width) - 1;
- pos = reg_width - ((in_pos + 1) * field_width);
+ config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
- pr_debug("write_reg addr = %lx, value = %ld, pos = %ld, "
+ pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
"r_width = %ld, f_width = %ld\n",
- reg, value, pos, reg_width, field_width);
+ crp->reg, value, field, crp->reg_width, crp->field_width);
mask = ~(mask << pos);
value = value << pos;
- switch (reg_width) {
- case 8:
- __raw_writeb((__raw_readb(reg) & mask) | value, reg);
- break;
- case 16:
- __raw_writew((__raw_readw(reg) & mask) | value, reg);
- break;
- case 32:
- __raw_writel((__raw_readl(reg) & mask) | value, reg);
- break;
- }
+ data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
+ data &= mask;
+ data |= value;
+
+ if (gpioc->unlock_reg)
+ gpio_write_raw_reg(pfc_phys_to_virt(gpioc, gpioc->unlock_reg),
+ 32, ~data);
+
+ gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
}
static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
@@ -147,6 +248,8 @@ static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
if (!data_reg->reg_width)
break;
+ data_reg->mapped_reg = pfc_phys_to_virt(gpioc, data_reg->reg);
+
for (n = 0; n < data_reg->reg_width; n++) {
if (data_reg->enum_ids[n] == gpiop->enum_id) {
gpiop->flags &= ~PINMUX_FLAG_DREG;
@@ -179,7 +282,8 @@ static void setup_data_regs(struct pinmux_info *gpioc)
if (!drp->reg_width)
break;
- drp->reg_shadow = gpio_read_raw_reg(drp->reg, drp->reg_width);
+ drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
+ drp->reg_width);
k++;
}
}
@@ -201,12 +305,13 @@ static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
}
static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp, int *indexp,
+ struct pinmux_cfg_reg **crp,
+ int *fieldp, int *valuep,
unsigned long **cntp)
{
struct pinmux_cfg_reg *config_reg;
- unsigned long r_width, f_width;
- int k, n;
+ unsigned long r_width, f_width, curr_width, ncomb;
+ int k, m, n, pos, bit_pos;
k = 0;
while (1) {
@@ -217,13 +322,27 @@ static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
if (!r_width)
break;
- for (n = 0; n < (r_width / f_width) * (1 << f_width); n++) {
- if (config_reg->enum_ids[n] == enum_id) {
- *crp = config_reg;
- *indexp = n;
- *cntp = &config_reg->cnt[n / (1 << f_width)];
- return 0;
+
+ pos = 0;
+ m = 0;
+ for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
+ if (f_width)
+ curr_width = f_width;
+ else
+ curr_width = config_reg->var_field_width[m];
+
+ ncomb = 1 << curr_width;
+ for (n = 0; n < ncomb; n++) {
+ if (config_reg->enum_ids[pos + n] == enum_id) {
+ *crp = config_reg;
+ *fieldp = m;
+ *valuep = n;
+ *cntp = &config_reg->cnt[m];
+ return 0;
+ }
}
+ pos += ncomb;
+ m++;
}
k++;
}
@@ -261,36 +380,6 @@ static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
return -1;
}
-static void write_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- int index)
-{
- unsigned long ncomb, pos, value;
-
- ncomb = 1 << crp->field_width;
- pos = index / ncomb;
- value = index % ncomb;
-
- gpio_write_reg(crp->reg, crp->reg_width, crp->field_width, pos, value);
-}
-
-static int check_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- int index)
-{
- unsigned long ncomb, pos, value;
-
- ncomb = 1 << crp->field_width;
- pos = index / ncomb;
- value = index % ncomb;
-
- if (gpio_read_reg(crp->reg, crp->reg_width,
- crp->field_width, pos) == value)
- return 0;
-
- return -1;
-}
-
enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
@@ -299,7 +388,7 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
struct pinmux_cfg_reg *cr = NULL;
pinmux_enum_t enum_id;
struct pinmux_range *range;
- int in_range, pos, index;
+ int in_range, pos, field, value;
unsigned long *cntp;
switch (pinmux_type) {
@@ -330,7 +419,8 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
pos = 0;
enum_id = 0;
- index = 0;
+ field = 0;
+ value = 0;
while (1) {
pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
if (pos <= 0)
@@ -377,17 +467,19 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
if (!in_range)
continue;
- if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
+ if (get_config_reg(gpioc, enum_id, &cr,
+ &field, &value, &cntp) != 0)
goto out_err;
switch (cfg_mode) {
case GPIO_CFG_DRYRUN:
- if (!*cntp || !check_config_reg(gpioc, cr, index))
+ if (!*cntp ||
+ (read_config_reg(gpioc, cr, field) != value))
continue;
break;
case GPIO_CFG_REQ:
- write_config_reg(gpioc, cr, index);
+ write_config_reg(gpioc, cr, field, value);
*cntp = *cntp + 1;
break;
@@ -564,7 +656,7 @@ static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
return -EINVAL;
- return gpio_read_reg(dr->reg, dr->reg_width, 1, bit);
+ return gpio_read_bit(dr, bit);
}
static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -606,10 +698,15 @@ static int sh_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
int register_pinmux(struct pinmux_info *pip)
{
struct gpio_chip *chip = &pip->chip;
+ int ret;
pr_info("%s handling gpio %d -> %d\n",
pip->name, pip->first_gpio, pip->last_gpio);
+ ret = pfc_ioremap(pip);
+ if (ret < 0)
+ return ret;
+
setup_data_regs(pip);
chip->request = sh_gpio_request;
@@ -627,12 +724,16 @@ int register_pinmux(struct pinmux_info *pip)
chip->base = pip->first_gpio;
chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
- return gpiochip_add(chip);
+ ret = gpiochip_add(chip);
+ if (ret < 0)
+ pfc_iounmap(pip);
+
+ return ret;
}
int unregister_pinmux(struct pinmux_info *pip)
{
pr_info("%s deregistering\n", pip->name);
-
+ pfc_iounmap(pip);
return gpiochip_remove(&pip->chip);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df5416..3f9a47ec67dc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -87,12 +87,12 @@ config SPI_BFIN_SPORT
Enable support for a SPI bus via the Blackfin SPORT peripheral.
config SPI_AU1550
- tristate "Au1550/Au12x0 SPI Controller"
+ tristate "Au1550/Au1200/Au1300 SPI Controller"
depends on MIPS_ALCHEMY && EXPERIMENTAL
select SPI_BITBANG
help
If you say yes to this option, support will be included for the
- Au1550 SPI controller (may also work with Au1200,Au1210,Au1250).
+ PSC SPI controller found on Au1550, Au1200 and Au1300 series.
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
@@ -174,8 +174,7 @@ config SPI_LM70_LLP
config SPI_MPC52xx
tristate "Freescale MPC52xx SPI (non-PSC) controller support"
- depends on PPC_MPC52xx && SPI
- select SPI_MASTER_OF
+ depends on PPC_MPC52xx
help
This drivers supports the MPC52xx SPI controller in master SPI
mode.
@@ -199,7 +198,7 @@ config SPI_FSL_LIB
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -208,7 +207,7 @@ config SPI_FSL_SPI
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -333,8 +332,7 @@ config SPI_STMP3XXX
config SPI_TEGRA
tristate "Nvidia Tegra SPI controller"
- depends on ARCH_TEGRA
- select TEGRA_SYSTEM_DMA
+ depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
help
SPI driver for NVidia Tegra SoCs
@@ -346,14 +344,14 @@ config SPI_TI_SSP
serial port.
config SPI_TOPCLIFF_PCH
- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
depends on PCI
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
- This driver also supports the ML7213, a companion chip for the
- Atom E6xx series and compatible with the Intel EG20T PCH.
+ This driver also supports the ML7213/ML7223/ML7831, a companion chip
+ for the Atom E6xx series and compatible with the Intel EG20T PCH.
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5ca..acc88b4d2869 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e743a45ee92c..8418eb036651 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxchan = dws->rxchan;
/* 2. Prepare the TX dma transfer */
- txconf.direction = DMA_TO_DEVICE;
+ txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
txdesc = txchan->device->device_prep_slave_sg(txchan,
&dws->tx_sgl,
1,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
/* 3. Prepare the RX dma transfer */
- rxconf.direction = DMA_FROM_DEVICE;
+ rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
&dws->rx_sgl,
1,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 0a282e5fcc9c..d46e55c720b7 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
+ enum dma_transfer_direction slave_dirn;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
+ slave_dirn = DMA_DEV_TO_MEM;
} else {
chan = espi->dma_tx;
buf = t->tx_buf;
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
+ slave_dirn = DMA_MEM_TO_DEV;
}
ret = dmaengine_slave_config(chan, &conf);
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
return ERR_PTR(-ENOMEM);
txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
- dir, DMA_CTRL_ACK);
+ slave_dirn, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
dma_cap_set(DMA_SLAVE, mask);
espi->dma_rx_data.port = EP93XX_DMA_SSP;
- espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+ espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
espi->dma_rx_data.name = "ep93xx-spi-rx";
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
}
espi->dma_tx_data.port = EP93XX_DMA_SSP;
- espi->dma_tx_data.direction = DMA_TO_DEVICE;
+ espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
espi->dma_tx_data.name = "ep93xx-spi-tx";
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41ba..0094c645ff0d 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c2..182e9c873822 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk;
}
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 322be7aea8b4..0b0dfb71c640 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -121,6 +121,7 @@ struct omap2_mcspi {
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
+ struct workqueue_struct *wq;
};
struct omap2_mcspi_cs {
@@ -143,8 +144,6 @@ struct omap2_mcspi_regs {
static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
-static struct workqueue_struct *omap2_mcspi_wq;
-
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
@@ -1043,7 +1042,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
spin_lock_irqsave(&mcspi->lock, flags);
list_add_tail(&m->queue, &mcspi->msg_queue);
- queue_work(omap2_mcspi_wq, &mcspi->work);
+ queue_work(mcspi->wq, &mcspi->work);
spin_unlock_irqrestore(&mcspi->lock, flags);
return 0;
@@ -1088,6 +1087,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
+ char wq_name[20];
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1111,10 +1111,17 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
mcspi->master = master;
+ sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
+ mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
+ if (mcspi->wq == NULL) {
+ status = -ENOMEM;
+ goto free_master;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
- goto err1;
+ goto free_master;
}
r->start += pdata->regs_offset;
@@ -1123,14 +1130,14 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
- goto err1;
+ goto free_master;
}
mcspi->base = ioremap(r->start, resource_size(r));
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto err2;
+ goto release_region;
}
mcspi->dev = &pdev->dev;
@@ -1145,7 +1152,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto err2;
+ goto unmap_io;
for (i = 0; i < master->num_chipselect; i++) {
char dma_ch_name[14];
@@ -1175,25 +1182,33 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
+ if (status < 0)
+ goto dma_chnl_free;
+
pm_runtime_enable(&pdev->dev);
if (status || omap2_mcspi_master_setup(mcspi) < 0)
- goto err3;
+ goto disable_pm;
status = spi_register_master(master);
if (status < 0)
- goto err4;
+ goto err_spi_register;
return status;
-err4:
+err_spi_register:
spi_master_put(master);
-err3:
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+dma_chnl_free:
kfree(mcspi->dma_channels);
-err2:
- release_mem_region(r->start, resource_size(r));
+unmap_io:
iounmap(mcspi->base);
-err1:
+release_region:
+ release_mem_region(r->start, resource_size(r));
+free_master:
+ kfree(master);
+ platform_set_drvdata(pdev, NULL);
return status;
}
@@ -1210,6 +1225,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
dma_channels = mcspi->dma_channels;
omap2_mcspi_disable_clocks(mcspi);
+ pm_runtime_disable(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
@@ -1217,6 +1233,8 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
spi_unregister_master(master);
iounmap(base);
kfree(dma_channels);
+ destroy_workqueue(mcspi->wq);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -1275,10 +1293,6 @@ static struct platform_driver omap2_mcspi_driver = {
static int __init omap2_mcspi_init(void)
{
- omap2_mcspi_wq = create_singlethread_workqueue(
- omap2_mcspi_driver.driver.name);
- if (omap2_mcspi_wq == NULL)
- return -1;
return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
}
subsys_initcall(omap2_mcspi_init);
@@ -1287,7 +1301,6 @@ static void __exit omap2_mcspi_exit(void)
{
platform_driver_unregister(&omap2_mcspi_driver);
- destroy_workqueue(omap2_mcspi_wq);
}
module_exit(omap2_mcspi_exit);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5559b2299198..2f9cb43a2398 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -340,6 +340,10 @@ struct vendor_data {
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
@@ -373,6 +377,7 @@ struct pl022 {
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
+ bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
@@ -445,23 +450,9 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
- void (*curr_cs_control) (u32 command);
+ pl022->next_msg_cs_active = false;
- /*
- * This local reference to the chip select function
- * is needed because we set curr_chip to NULL
- * as a step toward termininating the message.
- */
- curr_cs_control = pl022->cur_chip->cs_control;
- spin_lock_irqsave(&pl022->queue_lock, flags);
- msg = pl022->cur_msg;
- pl022->cur_msg = NULL;
- pl022->cur_transfer = NULL;
- pl022->cur_chip = NULL;
- queue_work(pl022->workqueue, &pl022->pump_messages);
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
- last_transfer = list_entry(msg->transfers.prev,
+ last_transfer = list_entry(pl022->cur_msg->transfers.prev,
struct spi_transfer,
transfer_list);
@@ -473,18 +464,13 @@ static void giveback(struct pl022 *pl022)
*/
udelay(last_transfer->delay_usecs);
- /*
- * Drop chip select UNLESS cs_change is true or we are returning
- * a message with an error, or next message is for another chip
- */
- if (!last_transfer->cs_change)
- curr_cs_control(SSP_CHIP_DESELECT);
- else {
+ if (!last_transfer->cs_change) {
struct spi_message *next_msg;
- /* Holding of cs was hinted, but we need to make sure
- * the next message is for the same chip. Don't waste
- * time with the following tests unless this was hinted.
+ /*
+ * cs_change was not set. We can keep the chip select
+ * enabled if there is message in the queue and it is
+ * for the same spi device.
*
* We cannot postpone this until pump_messages, because
* after calling msg->complete (below) the driver that
@@ -501,19 +487,29 @@ static void giveback(struct pl022 *pl022)
struct spi_message, queue);
spin_unlock_irqrestore(&pl022->queue_lock, flags);
- /* see if the next and current messages point
- * to the same chip
+ /*
+ * see if the next and current messages point
+ * to the same spi device.
*/
- if (next_msg && next_msg->spi != msg->spi)
+ if (next_msg && next_msg->spi != pl022->cur_msg->spi)
next_msg = NULL;
- if (!next_msg || msg->state == STATE_ERROR)
- curr_cs_control(SSP_CHIP_DESELECT);
+ if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
+ pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ else
+ pl022->next_msg_cs_active = true;
}
+
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ msg = pl022->cur_msg;
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+ queue_work(pl022->workqueue, &pl022->pump_messages);
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clocks & power */
- pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -904,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
{
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
- .direction = DMA_FROM_DEVICE,
+ .direction = DMA_DEV_TO_MEM,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
- .direction = DMA_TO_DEVICE,
+ .direction = DMA_MEM_TO_DEV,
};
unsigned int pages;
int ret;
@@ -1045,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
pl022->sgt_rx.sgl,
rx_sglen,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_rxdesc;
@@ -1053,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
txdesc = txchan->device->device_prep_slave_sg(txchan,
pl022->sgt_tx.sgl,
tx_sglen,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto err_txdesc;
@@ -1244,9 +1240,9 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
flag = 1;
- /* Disable Transmit interrupt */
- writew(readw(SSP_IMSC(pl022->virtbase)) &
- (~SSP_IMSC_MASK_TXIM),
+ /* Disable Transmit interrupt, enable receive interrupt */
+ writew((readw(SSP_IMSC(pl022->virtbase)) &
+ ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
SSP_IMSC(pl022->virtbase));
}
@@ -1352,7 +1348,7 @@ static void pump_transfers(unsigned long data)
*/
udelay(previous->delay_usecs);
- /* Drop chip select only if cs_change is requested */
+ /* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
} else {
@@ -1379,15 +1375,22 @@ static void pump_transfers(unsigned long data)
}
err_config_dma:
- writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ /* enable all interrupts except RX */
+ writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
}
static void do_interrupt_dma_transfer(struct pl022 *pl022)
{
- u32 irqflags = ENABLE_ALL_INTERRUPTS;
+ /*
+ * Default is to enable all interrupts except RX -
+ * this will be enabled once TX is complete
+ */
+ u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM;
+
+ /* Enable target chip, if not already active */
+ if (!pl022->next_msg_cs_active)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
- /* Enable target chip */
- pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
/* Error path */
pl022->cur_msg->state = STATE_ERROR;
@@ -1442,7 +1445,8 @@ static void do_polling_transfer(struct pl022 *pl022)
} else {
/* STATE_START */
message->state = STATE_RUNNING;
- pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ if (!pl022->next_msg_cs_active)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
}
/* Configuration Changing Per Transfer */
@@ -1504,14 +1508,28 @@ static void pump_messages(struct work_struct *work)
struct pl022 *pl022 =
container_of(work, struct pl022, pump_messages);
unsigned long flags;
+ bool was_busy = false;
/* Lock queue and check for queue work */
spin_lock_irqsave(&pl022->queue_lock, flags);
if (list_empty(&pl022->queue) || !pl022->running) {
+ if (pl022->busy) {
+ /* nothing more to do - disable spi/ssp and power off */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ if (pl022->master_info->autosuspend_delay > 0) {
+ pm_runtime_mark_last_busy(&pl022->adev->dev);
+ pm_runtime_put_autosuspend(&pl022->adev->dev);
+ } else {
+ pm_runtime_put(&pl022->adev->dev);
+ }
+ }
pl022->busy = false;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
return;
}
+
/* Make sure we are not already running a message */
if (pl022->cur_msg) {
spin_unlock_irqrestore(&pl022->queue_lock, flags);
@@ -1522,7 +1540,10 @@ static void pump_messages(struct work_struct *work)
list_entry(pl022->queue.next, struct spi_message, queue);
list_del_init(&pl022->cur_msg->queue);
- pl022->busy = true;
+ if (pl022->busy)
+ was_busy = true;
+ else
+ pl022->busy = true;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
/* Initial message state */
@@ -1532,12 +1553,14 @@ static void pump_messages(struct work_struct *work)
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
- /*
- * We enable the core voltage and clocks here, then the clocks
- * and core will be disabled when giveback() is called in each method
- * (poll/interrupt/DMA)
- */
- pm_runtime_get_sync(&pl022->adev->dev);
+ if (!was_busy)
+ /*
+ * We enable the core voltage and clocks here, then the clocks
+ * and core will be disabled when this workqueue is run again
+ * and there is no more work to be done.
+ */
+ pm_runtime_get_sync(&pl022->adev->dev);
+
restore_state(pl022);
flush(pl022);
@@ -1582,6 +1605,7 @@ static int start_queue(struct pl022 *pl022)
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
+ pl022->next_msg_cs_active = false;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
queue_work(pl022->workqueue, &pl022->pump_messages);
@@ -1881,7 +1905,7 @@ static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
- struct ssp_clock_params clk_freq = {0, };
+ struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
unsigned int bits = spi->bits_per_word;
@@ -2231,7 +2255,17 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
dev_dbg(dev, "probe succeeded\n");
/* let runtime pm put suspend */
- pm_runtime_put(dev);
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&adev->dev,
+ "will use autosuspend for runtime pm, delay %dms\n",
+ platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev,
+ platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
+ } else {
+ pm_runtime_put(dev);
+ }
return 0;
err_spi_register:
@@ -2305,11 +2339,6 @@ static int pl022_suspend(struct device *dev)
return status;
}
- amba_vcore_enable(pl022->adev);
- amba_pclk_enable(pl022->adev);
- load_ssp_default_config(pl022);
- amba_pclk_disable(pl022->adev);
- amba_vcore_disable(pl022->adev);
dev_dbg(dev, "suspended\n");
return 0;
}
@@ -2432,6 +2461,8 @@ static struct amba_id pl022_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl022_ids);
+
static struct amba_driver pl022_driver = {
.drv = {
.name = "ssp-pl022",
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 019a7163572f..dcf7e1006426 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -971,6 +971,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
struct s3c64xx_spi_info *sci;
struct spi_master *master;
int ret;
+ char clk_name[16];
if (pdev->id < 0) {
dev_err(&pdev->dev,
@@ -984,11 +985,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
}
sci = pdev->dev.platform_data;
- if (!sci->src_clk_name) {
- dev_err(&pdev->dev,
- "Board init must call s3c64xx_spi_set_info()\n");
- return -EINVAL;
- }
/* Check for availability of necessary resource */
@@ -1073,17 +1069,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err4;
}
- sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+ sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
+ sdd->src_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
- "Unable to acquire clock '%s'\n", sci->src_clk_name);
+ "Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
goto err5;
}
if (clk_enable(sdd->src_clk)) {
- dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
- sci->src_clk_name);
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
goto err6;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 6a80749391db..2a6429d8c363 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1,7 +1,7 @@
/*
* SPI bus driver for the Topcliff PCH used by Intel SoCs
*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -95,16 +95,18 @@
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+#define PCI_DEVICE_ID_ML7831_SPI 0x8816
/*
* Set the number of SPI instance max
* Intel EG20T PCH : 1ch
- * OKI SEMICONDUCTOR ML7213 IOH : 2ch
- * OKI SEMICONDUCTOR ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7213 IOH : 2ch
+ * LAPIS Semiconductor ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_SPI_MAX_DEV 2
@@ -218,6 +220,7 @@ static struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
{ }
};
@@ -1076,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
sg = dma->sg_rx_p;
desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
- num, DMA_FROM_DEVICE,
+ num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1121,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
sg = dma->sg_tx_p;
desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
- sg, num, DMA_TO_DEVICE,
+ sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1753,4 +1756,4 @@ MODULE_PARM_DESC(use_dma,
"to use DMA for data transfers pass 1 else 0; default 1");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 77eae99af11c..b2ccdea30cb9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -319,7 +319,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
spi->master = master;
- spi->dev.parent = dev;
+ spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
device_initialize(&spi->dev);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a545..520e8286db28 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 34c3bab90b9a..973223f5de8e 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -607,6 +607,29 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
sizeof(out->antenna_gain.ghz5));
+ /* Extract FEM info */
+ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+ SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
sprom_extract_r458(out, in);
/* TODO - get remaining rev 8 stuff needed */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 25cdff36a78a..21e2f4b87f14 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -72,8 +72,6 @@ source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
-source "drivers/staging/spectra/Kconfig"
-
source "drivers/staging/quatech_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
@@ -116,8 +114,6 @@ source "drivers/staging/bcm/Kconfig"
source "drivers/staging/ft1000/Kconfig"
-source "drivers/staging/intel_sst/Kconfig"
-
source "drivers/staging/speakup/Kconfig"
source "drivers/staging/cptm1217/Kconfig"
@@ -132,4 +128,8 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
+source "drivers/staging/omapdrm/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a25f3f26c7ff..7c5808d7212d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
obj-$(CONFIG_RTS5139) += rts5139/
-obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
@@ -50,10 +49,11 @@ obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
obj-$(CONFIG_USB_ENESTORAGE) += keucr/
obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
-obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_DRM_PSB) += gma500/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644
index 000000000000..becf711117ef
--- /dev/null
+++ b/drivers/staging/android/Kconfig
@@ -0,0 +1,110 @@
+menu "Android"
+
+config ANDROID
+ bool "Android Drivers"
+ default N
+ ---help---
+ Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+ bool "Android Binder IPC Driver"
+ default n
+
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ default n
+ depends on SHMEM || TINY_SHMEM
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
+config ANDROID_LOGGER
+ tristate "Android log driver"
+ default n
+
+config ANDROID_RAM_CONSOLE
+ bool "Android RAM buffer console"
+ default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ bool "Enable verbose console messages on Android RAM console"
+ default y
+ depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ bool "Android RAM Console Enable error correction"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+ depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC8
+ select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+ int "Android RAM Console Data data size"
+ default 128
+ help
+ Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+ int "Android RAM Console ECC size"
+ default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+ int "Android RAM Console Symbol size"
+ default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+ hex "Android RAM Console Polynomial"
+ default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
+ default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
+ default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
+ default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
+ default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
+
+endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+ bool "Start Android RAM console early"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+ hex "Android RAM console virtual address"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+ hex "Android RAM console buffer size"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_TIMED_OUTPUT
+ bool "Timed output class driver"
+ default y
+
+config ANDROID_TIMED_GPIO
+ tristate "Android timed gpio driver"
+ depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+ default n
+
+config ANDROID_LOW_MEMORY_KILLER
+ bool "Android Low Memory Killer"
+ default N
+ ---help---
+ Register processes to be killed when memory is low
+
+config ANDROID_PMEM
+ bool "Android pmem allocator"
+ depends on ARM
+
+source "drivers/staging/android/switch/Kconfig"
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644
index 000000000000..eaed1ff64f0f
--- /dev/null
+++ b/drivers/staging/android/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ASHMEM) += ashmem.o
+obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
+obj-$(CONFIG_ANDROID_PMEM) += pmem.o
+obj-$(CONFIG_ANDROID_SWITCH) += switch/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
new file mode 100644
index 000000000000..e59c5be4be2b
--- /dev/null
+++ b/drivers/staging/android/TODO
@@ -0,0 +1,10 @@
+TODO:
+ - checkpatch.pl cleanups
+ - sparse fixes
+ - rename files to be not so "generic"
+ - make sure things build as modules properly
+ - add proper arch dependancies as needed
+ - audit userspace interfaces to make sure they are sane
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/android_pmem.h b/drivers/staging/android/android_pmem.h
new file mode 100644
index 000000000000..f633621f5be3
--- /dev/null
+++ b/drivers/staging/android/android_pmem.h
@@ -0,0 +1,93 @@
+/* include/linux/android_pmem.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ANDROID_PMEM_H_
+#define _ANDROID_PMEM_H_
+
+#define PMEM_IOCTL_MAGIC 'p'
+#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
+#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
+#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
+#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
+/* This ioctl will allocate pmem space, backing the file, it will fail
+ * if the file already has an allocation, pass it the len as the argument
+ * to the ioctl */
+#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
+/* This will connect a one pmem file to another, pass the file that is already
+ * backed in memory as the argument to the ioctl
+ */
+#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
+/* Returns the total size of the pmem region it is sent to as a pmem_region
+ * struct (with offset set to 0).
+ */
+#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
+#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
+
+struct android_pmem_platform_data
+{
+ const char* name;
+ /* starting physical address of memory region */
+ unsigned long start;
+ /* size of memory region */
+ unsigned long size;
+ /* set to indicate the region should not be managed with an allocator */
+ unsigned no_allocator;
+ /* set to indicate maps of this region should be cached, if a mix of
+ * cached and uncached is desired, set this and open the device with
+ * O_SYNC to get an uncached region */
+ unsigned cached;
+ /* The MSM7k has bits to enable a write buffer in the bus controller*/
+ unsigned buffered;
+};
+
+struct pmem_region {
+ unsigned long offset;
+ unsigned long len;
+};
+
+#ifdef CONFIG_ANDROID_PMEM
+int is_pmem_file(struct file *file);
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+ unsigned long *end, struct file **filp);
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *end);
+void put_pmem_file(struct file* file);
+void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
+int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *));
+int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation);
+
+#else
+static inline int is_pmem_file(struct file *file) { return 0; }
+static inline int get_pmem_file(int fd, unsigned long *start,
+ unsigned long *vstart, unsigned long *end,
+ struct file **filp) { return -ENOSYS; }
+static inline int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *end) { return -ENOSYS; }
+static inline void put_pmem_file(struct file* file) { return; }
+static inline void flush_pmem_file(struct file *file, unsigned long start,
+ unsigned long len) { return; }
+static inline int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *)) { return -ENOSYS; }
+
+static inline int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation) { return -ENOSYS; }
+#endif
+
+#endif //_ANDROID_PPP_H_
+
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
new file mode 100644
index 000000000000..99052bfd3a2d
--- /dev/null
+++ b/drivers/staging/android/ashmem.c
@@ -0,0 +1,752 @@
+/* mm/ashmem.c
+**
+** Anonymous Shared Memory Subsystem, ashmem
+**
+** Copyright (C) 2008 Google, Inc.
+**
+** Robert Love <rlove@google.com>
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include "ashmem.h"
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/*
+ * ashmem_area - anonymous shared memory area
+ * Lifecycle: From our parent file's open() until its release()
+ * Locking: Protected by `ashmem_mutex'
+ * Big Note: Mappings do NOT pin this structure; it dies on close()
+ */
+struct ashmem_area {
+ char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
+ struct list_head unpinned_list; /* list of all ashmem areas */
+ struct file *file; /* the shmem-based backing file */
+ size_t size; /* size of the mapping, in bytes */
+ unsigned long prot_mask; /* allowed prot bits, as vm_flags */
+};
+
+/*
+ * ashmem_range - represents an interval of unpinned (evictable) pages
+ * Lifecycle: From unpin to pin
+ * Locking: Protected by `ashmem_mutex'
+ */
+struct ashmem_range {
+ struct list_head lru; /* entry in LRU list */
+ struct list_head unpinned; /* entry in its area's unpinned list */
+ struct ashmem_area *asma; /* associated area */
+ size_t pgstart; /* starting page, inclusive */
+ size_t pgend; /* ending page, inclusive */
+ unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+/* Count of pages on our LRU list, protected by ashmem_mutex */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+#define range_size(range) \
+ ((range)->pgend - (range)->pgstart + 1)
+
+#define range_on_lru(range) \
+ ((range)->purged == ASHMEM_NOT_PURGED)
+
+#define page_range_subsumes_range(range, start, end) \
+ (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+
+#define page_range_subsumed_by_range(range, start, end) \
+ (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+
+#define page_in_range(range, page) \
+ (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+
+#define page_range_in_range(range, start, end) \
+ (page_in_range(range, start) || page_in_range(range, end) || \
+ page_range_subsumes_range(range, start, end))
+
+#define range_before_page(range, page) \
+ ((range)->pgend < (page))
+
+#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
+
+static inline void lru_add(struct ashmem_range *range)
+{
+ list_add_tail(&range->lru, &ashmem_lru_list);
+ lru_count += range_size(range);
+}
+
+static inline void lru_del(struct ashmem_range *range)
+{
+ list_del(&range->lru);
+ lru_count -= range_size(range);
+}
+
+/*
+ * range_alloc - allocate and initialize a new ashmem_range structure
+ *
+ * 'asma' - associated ashmem_area
+ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
+ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * 'start' - starting page, inclusive
+ * 'end' - ending page, inclusive
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end)
+{
+ struct ashmem_range *range;
+
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (unlikely(!range))
+ return -ENOMEM;
+
+ range->asma = asma;
+ range->pgstart = start;
+ range->pgend = end;
+ range->purged = purged;
+
+ list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+ if (range_on_lru(range))
+ lru_add(range);
+
+ return 0;
+}
+
+static void range_del(struct ashmem_range *range)
+{
+ list_del(&range->unpinned);
+ if (range_on_lru(range))
+ lru_del(range);
+ kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/*
+ * range_shrink - shrinks a range
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ size_t pre = range_size(range);
+
+ range->pgstart = start;
+ range->pgend = end;
+
+ if (range_on_lru(range))
+ lru_count -= pre - range_size(range);
+}
+
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+ struct ashmem_area *asma;
+ int ret;
+
+ ret = generic_file_open(inode, file);
+ if (unlikely(ret))
+ return ret;
+
+ asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+ if (unlikely(!asma))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&asma->unpinned_list);
+ memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+ asma->prot_mask = PROT_MASK;
+ file->private_data = asma;
+
+ return 0;
+}
+
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+ struct ashmem_range *range, *next;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+ range_del(range);
+ mutex_unlock(&ashmem_mutex);
+
+ if (asma->file)
+ fput(asma->file);
+ kmem_cache_free(ashmem_area_cachep, asma);
+
+ return 0;
+}
+
+static ssize_t ashmem_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* If size is not set, or set to 0, always return EOF. */
+ if (asma->size == 0)
+ goto out;
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret < 0)
+ goto out;
+
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->size == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->llseek(asma->file, offset, origin);
+ if (ret < 0)
+ goto out;
+
+ /** Copy f_pos from backing file, since f_ops->llseek() sets it */
+ file->f_pos = asma->file->f_pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static inline unsigned long calc_vm_may_flags(unsigned long prot)
+{
+ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
+ _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* user needs to SET_SIZE before mapping */
+ if (unlikely(!asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested protection bits must match our allowed protection mask */
+ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
+ calc_vm_prot_bits(PROT_MASK))) {
+ ret = -EPERM;
+ goto out;
+ }
+ vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
+
+ if (!asma->file) {
+ char *name = ASHMEM_NAME_DEF;
+ struct file *vmfile;
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ name = asma->name;
+
+ /* ... and allocate the backing shmem file */
+ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
+ if (unlikely(IS_ERR(vmfile))) {
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
+ asma->file = vmfile;
+ }
+ get_file(asma->file);
+
+ /*
+ * XXX - Reworked to use shmem_zero_setup() instead of
+ * shmem_set_file while we're in staging. -jstultz
+ */
+ if (vma->vm_flags & VM_SHARED) {
+ ret = shmem_zero_setup(vma);
+ if (ret) {
+ fput(asma->file);
+ goto out;
+ }
+ }
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ *
+ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
+ * many objects (pages) we have in total.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct ashmem_range *range, *next;
+
+ /* We might recurse into filesystem code, so bail out if necessary */
+ if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+ return -1;
+ if (!sc->nr_to_scan)
+ return lru_count;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+ struct inode *inode = range->asma->file->f_dentry->d_inode;
+ loff_t start = range->pgstart * PAGE_SIZE;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+
+ vmtruncate_range(inode, start, end);
+ range->purged = ASHMEM_WAS_PURGED;
+ lru_del(range);
+
+ sc->nr_to_scan -= range_size(range);
+ if (sc->nr_to_scan <= 0)
+ break;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return lru_count;
+}
+
+static struct shrinker ashmem_shrinker = {
+ .shrink = ashmem_shrink,
+ .seeks = DEFAULT_SEEKS * 4,
+};
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* the user can only remove, not add, protection bits */
+ if (unlikely((asma->prot_mask & prot) != prot)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* does the application expect PROT_READ to imply PROT_EXEC? */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ asma->prot_mask = prot;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* cannot change an existing mapping's name */
+ if (unlikely(asma->file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
+ name, ASHMEM_NAME_LEN)))
+ ret = -EFAULT;
+ asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
+
+out:
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+ size_t len;
+
+ /*
+ * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+ * prevents us from revealing one user's stack to another.
+ */
+ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+ if (unlikely(copy_to_user(name,
+ asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
+ ret = -EFAULT;
+ } else {
+ if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
+ sizeof(ASHMEM_NAME_DEF))))
+ ret = -EFAULT;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ int ret = ASHMEM_NOT_PURGED;
+
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* moved past last applicable page; we can short circuit */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to pin pages that span multiple ranges,
+ * or to pin pages that aren't even unpinned, so this is messy.
+ *
+ * Four cases:
+ * 1. The requested range subsumes an existing range, so we
+ * just remove the entire matching range.
+ * 2. The requested range overlaps the start of an existing
+ * range, so we just update that range.
+ * 3. The requested range overlaps the end of an existing
+ * range, so we just update that range.
+ * 4. The requested range punches a hole in an existing range,
+ * so we have to update one side of the range and then
+ * create a new range for the other side.
+ */
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret |= range->purged;
+
+ /* Case #1: Easy. Just nuke the whole thing. */
+ if (page_range_subsumes_range(range, pgstart, pgend)) {
+ range_del(range);
+ continue;
+ }
+
+ /* Case #2: We overlap from the start, so adjust it */
+ if (range->pgstart >= pgstart) {
+ range_shrink(range, pgend + 1, range->pgend);
+ continue;
+ }
+
+ /* Case #3: We overlap from the rear, so adjust it */
+ if (range->pgend <= pgend) {
+ range_shrink(range, range->pgstart, pgstart-1);
+ continue;
+ }
+
+ /*
+ * Case #4: We eat a chunk out of the middle. A bit
+ * more complicated, we allocate a new range for the
+ * second half and adjust the first chunk's endpoint.
+ */
+ range_alloc(asma, range, range->purged,
+ pgend + 1, range->pgend);
+ range_shrink(range, range->pgstart, pgstart - 1);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* short circuit: this is our insertion point */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to unpin pages that are already entirely
+ * or partially pinned. We handle those two cases here.
+ */
+ if (page_range_subsumed_by_range(range, pgstart, pgend))
+ return 0;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ pgstart = min_t(size_t, range->pgstart, pgstart),
+ pgend = max_t(size_t, range->pgend, pgend);
+ purged |= range->purged;
+ range_del(range);
+ goto restart;
+ }
+ }
+
+ return range_alloc(asma, range, purged, pgstart, pgend);
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+ size_t pgend)
+{
+ struct ashmem_range *range;
+ int ret = ASHMEM_IS_PINNED;
+
+ list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+ if (range_before_page(range, pgstart))
+ break;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret = ASHMEM_IS_UNPINNED;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ void __user *p)
+{
+ struct ashmem_pin pin;
+ size_t pgstart, pgend;
+ int ret = -EINVAL;
+
+ if (unlikely(!asma->file))
+ return -EINVAL;
+
+ if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ return -EFAULT;
+
+ /* per custom, you can pass zero for len to mean "everything onward" */
+ if (!pin.len)
+ pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+ if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+ return -EINVAL;
+
+ if (unlikely(((__u32) -1) - pin.offset < pin.len))
+ return -EINVAL;
+
+ if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+ return -EINVAL;
+
+ pgstart = pin.offset / PAGE_SIZE;
+ pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+ mutex_lock(&ashmem_mutex);
+
+ switch (cmd) {
+ case ASHMEM_PIN:
+ ret = ashmem_pin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_UNPIN:
+ ret = ashmem_unpin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_get_pin_status(asma, pgstart, pgend);
+ break;
+ }
+
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ashmem_area *asma = file->private_data;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ASHMEM_SET_NAME:
+ ret = set_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_GET_NAME:
+ ret = get_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_SET_SIZE:
+ ret = -EINVAL;
+ if (!asma->file) {
+ ret = 0;
+ asma->size = (size_t) arg;
+ }
+ break;
+ case ASHMEM_GET_SIZE:
+ ret = asma->size;
+ break;
+ case ASHMEM_SET_PROT_MASK:
+ ret = set_prot_mask(asma, arg);
+ break;
+ case ASHMEM_GET_PROT_MASK:
+ ret = asma->prot_mask;
+ break;
+ case ASHMEM_PIN:
+ case ASHMEM_UNPIN:
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+ break;
+ case ASHMEM_PURGE_ALL_CACHES:
+ ret = -EPERM;
+ if (capable(CAP_SYS_ADMIN)) {
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .nr_to_scan = 0,
+ };
+ ret = ashmem_shrink(&ashmem_shrinker, &sc);
+ sc.nr_to_scan = ret;
+ ashmem_shrink(&ashmem_shrinker, &sc);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static struct file_operations ashmem_fops = {
+ .owner = THIS_MODULE,
+ .open = ashmem_open,
+ .release = ashmem_release,
+ .read = ashmem_read,
+ .llseek = ashmem_llseek,
+ .mmap = ashmem_mmap,
+ .unlocked_ioctl = ashmem_ioctl,
+ .compat_ioctl = ashmem_ioctl,
+};
+
+static struct miscdevice ashmem_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ashmem",
+ .fops = &ashmem_fops,
+};
+
+static int __init ashmem_init(void)
+{
+ int ret;
+
+ ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+ sizeof(struct ashmem_area),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_area_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+ sizeof(struct ashmem_range),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_range_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ret = misc_register(&ashmem_misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "ashmem: failed to register misc device!\n");
+ return ret;
+ }
+
+ register_shrinker(&ashmem_shrinker);
+
+ printk(KERN_INFO "ashmem: initialized\n");
+
+ return 0;
+}
+
+static void __exit ashmem_exit(void)
+{
+ int ret;
+
+ unregister_shrinker(&ashmem_shrinker);
+
+ ret = misc_deregister(&ashmem_misc);
+ if (unlikely(ret))
+ printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
+
+ kmem_cache_destroy(ashmem_range_cachep);
+ kmem_cache_destroy(ashmem_area_cachep);
+
+ printk(KERN_INFO "ashmem: unloaded\n");
+}
+
+module_init(ashmem_init);
+module_exit(ashmem_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
new file mode 100644
index 000000000000..1976b10ef93e
--- /dev/null
+++ b/drivers/staging/android/ashmem.h
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
new file mode 100644
index 000000000000..7491801a661c
--- /dev/null
+++ b/drivers/staging/android/binder.c
@@ -0,0 +1,3600 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = binder_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K 0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M 0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
+ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
+ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
+ BINDER_DEBUG_DEAD_BINDER = 1U << 4,
+ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
+ BINDER_DEBUG_READ_WRITE = 1U << 6,
+ BINDER_DEBUG_USER_REFS = 1U << 7,
+ BINDER_DEBUG_THREADS = 1U << 8,
+ BINDER_DEBUG_TRANSACTION = 1U << 9,
+ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
+ BINDER_DEBUG_FREE_BUFFER = 1U << 11,
+ BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+ struct kernel_param *kp)
+{
+ int ret;
+ ret = param_set_int(val, kp);
+ if (binder_stop_on_user_error < 2)
+ wake_up(&binder_user_error_wait);
+ return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+ do { \
+ if (binder_debug_mask & mask) \
+ printk(KERN_INFO x); \
+ } while (0)
+
+#define binder_user_error(x...) \
+ do { \
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+ printk(KERN_INFO x); \
+ if (binder_stop_on_user_error) \
+ binder_stop_on_user_error = 2; \
+ } while (0)
+
+enum binder_stat_types {
+ BINDER_STAT_PROC,
+ BINDER_STAT_THREAD,
+ BINDER_STAT_NODE,
+ BINDER_STAT_REF,
+ BINDER_STAT_DEATH,
+ BINDER_STAT_TRANSACTION,
+ BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+ int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int obj_created[BINDER_STAT_COUNT];
+ int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+ binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+ binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+};
+struct binder_transaction_log {
+ int next;
+ int full;
+ struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_transaction_log *log)
+{
+ struct binder_transaction_log_entry *e;
+ e = &log->entry[log->next];
+ memset(e, 0, sizeof(*e));
+ log->next++;
+ if (log->next == ARRAY_SIZE(log->entry)) {
+ log->next = 0;
+ log->full = 1;
+ }
+ return e;
+}
+
+struct binder_work {
+ struct list_head entry;
+ enum {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_NODE,
+ BINDER_WORK_DEAD_BINDER,
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ } type;
+};
+
+struct binder_node {
+ int debug_id;
+ struct binder_work work;
+ union {
+ struct rb_node rb_node;
+ struct hlist_node dead_node;
+ };
+ struct binder_proc *proc;
+ struct hlist_head refs;
+ int internal_strong_refs;
+ int local_weak_refs;
+ int local_strong_refs;
+ void __user *ptr;
+ void __user *cookie;
+ unsigned has_strong_ref:1;
+ unsigned pending_strong_ref:1;
+ unsigned has_weak_ref:1;
+ unsigned pending_weak_ref:1;
+ unsigned has_async_transaction:1;
+ unsigned accept_fds:1;
+ unsigned min_priority:8;
+ struct list_head async_todo;
+};
+
+struct binder_ref_death {
+ struct binder_work work;
+ void __user *cookie;
+};
+
+struct binder_ref {
+ /* Lookups needed: */
+ /* node + proc => ref (transaction) */
+ /* desc + proc => ref (transaction, inc/dec ref) */
+ /* node => refs + procs (proc exit) */
+ int debug_id;
+ struct rb_node rb_node_desc;
+ struct rb_node rb_node_node;
+ struct hlist_node node_entry;
+ struct binder_proc *proc;
+ struct binder_node *node;
+ uint32_t desc;
+ int strong;
+ int weak;
+ struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by addesss */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned debug_id:29;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ uint8_t data[0];
+};
+
+enum binder_deferred_state {
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
+};
+
+struct binder_proc {
+ struct hlist_node proc_node;
+ struct rb_root threads;
+ struct rb_root nodes;
+ struct rb_root refs_by_desc;
+ struct rb_root refs_by_node;
+ int pid;
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct files_struct *files;
+ struct hlist_node deferred_work_node;
+ int deferred_work;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ struct list_head todo;
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+ int max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int ready_threads;
+ long default_priority;
+ struct dentry *debugfs_entry;
+};
+
+enum {
+ BINDER_LOOPER_STATE_REGISTERED = 0x01,
+ BINDER_LOOPER_STATE_ENTERED = 0x02,
+ BINDER_LOOPER_STATE_EXITED = 0x04,
+ BINDER_LOOPER_STATE_INVALID = 0x08,
+ BINDER_LOOPER_STATE_WAITING = 0x10,
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+ struct binder_proc *proc;
+ struct rb_node rb_node;
+ int pid;
+ int looper;
+ struct binder_transaction *transaction_stack;
+ struct list_head todo;
+ uint32_t return_error; /* Write failed, return error code in read buf */
+ uint32_t return_error2; /* Write failed, return error code in read */
+ /* buffer. Used when sending a reply to a dead process that */
+ /* we are also waiting on */
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+};
+
+struct binder_transaction {
+ int debug_id;
+ struct binder_work work;
+ struct binder_thread *from;
+ struct binder_transaction *from_parent;
+ struct binder_proc *to_proc;
+ struct binder_thread *to_thread;
+ struct binder_transaction *to_parent;
+ unsigned need_reply:1;
+ /* unsigned is_dead:1; */ /* not used at the moment */
+
+ struct binder_buffer *buffer;
+ unsigned int code;
+ unsigned int flags;
+ long priority;
+ long saved_priority;
+ uid_t sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+ struct files_struct *files = proc->files;
+ int fd, error;
+ struct fdtable *fdt;
+ unsigned long rlim_cur;
+ unsigned long irqs;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ error = -EMFILE;
+ spin_lock(&files->file_lock);
+
+repeat:
+ fdt = files_fdtable(files);
+ fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+ files->next_fd);
+
+ /*
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
+ rlim_cur = 0;
+ if (lock_task_sighand(proc->tsk, &irqs)) {
+ rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+ unlock_task_sighand(proc->tsk, &irqs);
+ }
+ if (fd >= rlim_cur)
+ goto out;
+
+ /* Do we need to expand the fd array or fd set? */
+ error = expand_files(files, fd);
+ if (error < 0)
+ goto out;
+
+ if (error) {
+ /*
+ * If we needed to expand the fs array we
+ * might have blocked - try again.
+ */
+ error = -EMFILE;
+ goto repeat;
+ }
+
+ FD_SET(fd, fdt->open_fds);
+ if (flags & O_CLOEXEC)
+ FD_SET(fd, fdt->close_on_exec);
+ else
+ FD_CLR(fd, fdt->close_on_exec);
+ files->next_fd = fd + 1;
+#if 1
+ /* Sanity check */
+ if (fdt->fd[fd] != NULL) {
+ printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+ fdt->fd[fd] = NULL;
+ }
+#endif
+ error = fd;
+
+out:
+ spin_unlock(&files->file_lock);
+ return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+ struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+
+ if (files == NULL)
+ return;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ BUG_ON(fdt->fd[fd] != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+ struct fdtable *fdt = files_fdtable(files);
+ __FD_CLR(fd, fdt->open_fds);
+ if (fd < files->next_fd)
+ files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+ struct file *filp;
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+ int retval;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+ filp = fdt->fd[fd];
+ if (!filp)
+ goto out_unlock;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ FD_CLR(fd, fdt->close_on_exec);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ retval = filp_close(filp, files);
+
+ /* can't restart close syscall because file table entry was cleared */
+ if (unlikely(retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK))
+ retval = -EINTR;
+
+ return retval;
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+ long min_nice;
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
+ }
+ min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "binder: %d: nice value %ld not allowed use "
+ "%ld instead\n", current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice < 20)
+ return;
+ binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &proc->buffers))
+ return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ else
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: add free buffer, size %zd, "
+ "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+ void __user *user_ptr)
+{
+ struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else
+ return buffer;
+ }
+ return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct vm_struct tmp_area;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: %s pages %p-%p\n", proc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(proc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = proc->vma;
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+ "map pages in userspace, no vma\n", proc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ struct page **page_array_ptr;
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (*page == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "for page at %p\n", proc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ tmp_area.addr = page_addr;
+ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+ page_array_ptr = page;
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %p in kernel\n",
+ proc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + proc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %lx in userspace\n",
+ proc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+ size_t data_size,
+ size_t offsets_size, int is_async)
+{
+ struct rb_node *n = proc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size;
+
+ if (proc->vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+ proc->pid);
+ return NULL;
+ }
+
+ size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (size < data_size || size < offsets_size) {
+ binder_user_error("binder: %d: got transaction with invalid "
+ "size %zd-%zd\n", proc->pid, data_size, offsets_size);
+ return NULL;
+ }
+
+ if (is_async &&
+ proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd"
+ "failed, no async space left\n", proc->pid, size);
+ return NULL;
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+ "no address space\n", proc->pid, size);
+ return NULL;
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_buffer_size(proc, buffer);
+ }
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got buff"
+ "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ if (binder_update_page_range(proc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+ return NULL;
+
+ rb_erase(best_fit, &proc->free_buffers);
+ buffer->free = 0;
+ binder_insert_allocated_buffer(proc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(proc, new_buffer);
+ }
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got "
+ "%p\n", proc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ if (is_async) {
+ proc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_alloc_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(proc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p "
+ "share page with %p\n", proc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer"
+ " %p share page with %p\n", proc->pid,
+ buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p do "
+ "not share page%s%s with with %p or %p\n",
+ proc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(proc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *));
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_free_buf %p size %zd buffer"
+ "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < proc->buffer);
+ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+ if (buffer->async_transaction) {
+ proc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_free_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ binder_update_page_range(proc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+ rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (next->free) {
+ rb_erase(&next->rb_node, &proc->free_buffers);
+ binder_delete_free_buffer(proc, next);
+ }
+ }
+ if (proc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+ if (prev->free) {
+ binder_delete_free_buffer(proc, buffer);
+ rb_erase(&prev->rb_node, &proc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ void __user *ptr)
+{
+ struct rb_node *n = proc->nodes.rb_node;
+ struct binder_node *node;
+
+ while (n) {
+ node = rb_entry(n, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ n = n->rb_left;
+ else if (ptr > node->ptr)
+ n = n->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ void __user *ptr,
+ void __user *cookie)
+{
+ struct rb_node **p = &proc->nodes.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_node *node;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ p = &(*p)->rb_left;
+ else if (ptr > node->ptr)
+ p = &(*p)->rb_right;
+ else
+ return NULL;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_NODE);
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &proc->nodes);
+ node->debug_id = ++binder_last_id;
+ node->proc = proc;
+ node->ptr = ptr;
+ node->cookie = cookie;
+ node->work.type = BINDER_WORK_NODE;
+ INIT_LIST_HEAD(&node->work.entry);
+ INIT_LIST_HEAD(&node->async_todo);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p created\n",
+ proc->pid, current->pid, node->debug_id,
+ node->ptr, node->cookie);
+ return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ if (strong) {
+ if (internal) {
+ if (target_list == NULL &&
+ node->internal_strong_refs == 0 &&
+ !(node == binder_context_mgr_node &&
+ node->has_strong_ref)) {
+ printk(KERN_ERR "binder: invalid inc strong "
+ "node for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ node->internal_strong_refs++;
+ } else
+ node->local_strong_refs++;
+ if (!node->has_strong_ref && target_list) {
+ list_del_init(&node->work.entry);
+ list_add_tail(&node->work.entry, target_list);
+ }
+ } else {
+ if (!internal)
+ node->local_weak_refs++;
+ if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+ if (target_list == NULL) {
+ printk(KERN_ERR "binder: invalid inc weak node "
+ "for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ list_add_tail(&node->work.entry, target_list);
+ }
+ }
+ return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ if (strong) {
+ if (internal)
+ node->internal_strong_refs--;
+ else
+ node->local_strong_refs--;
+ if (node->local_strong_refs || node->internal_strong_refs)
+ return 0;
+ } else {
+ if (!internal)
+ node->local_weak_refs--;
+ if (node->local_weak_refs || !hlist_empty(&node->refs))
+ return 0;
+ }
+ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+ if (list_empty(&node->work.entry)) {
+ list_add_tail(&node->work.entry, &node->proc->todo);
+ wake_up_interruptible(&node->proc->wait);
+ }
+ } else {
+ if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+ !node->local_weak_refs) {
+ list_del_init(&node->work.entry);
+ if (node->proc) {
+ rb_erase(&node->rb_node, &node->proc->nodes);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: refless node %d deleted\n",
+ node->debug_id);
+ } else {
+ hlist_del(&node->dead_node);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: dead node %d deleted\n",
+ node->debug_id);
+ }
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ }
+ }
+
+ return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ uint32_t desc)
+{
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if (desc < ref->desc)
+ n = n->rb_left;
+ else if (desc > ref->desc)
+ n = n->rb_right;
+ else
+ return ref;
+ }
+ return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node)
+{
+ struct rb_node *n;
+ struct rb_node **p = &proc->refs_by_node.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_ref *ref, *new_ref;
+
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+ if (node < ref->node)
+ p = &(*p)->rb_left;
+ else if (node > ref->node)
+ p = &(*p)->rb_right;
+ else
+ return ref;
+ }
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (new_ref == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_REF);
+ new_ref->debug_id = ++binder_last_id;
+ new_ref->proc = proc;
+ new_ref->node = node;
+ rb_link_node(&new_ref->rb_node_node, parent, p);
+ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->desc > new_ref->desc)
+ break;
+ new_ref->desc = ref->desc + 1;
+ }
+
+ p = &proc->refs_by_desc.rb_node;
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+ if (new_ref->desc < ref->desc)
+ p = &(*p)->rb_left;
+ else if (new_ref->desc > ref->desc)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_ref->rb_node_desc, parent, p);
+ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+ if (node) {
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "node %d\n", proc->pid, new_ref->debug_id,
+ new_ref->desc, node->debug_id);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "dead node\n", proc->pid, new_ref->debug_id,
+ new_ref->desc);
+ }
+ return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d delete ref %d desc %d for "
+ "node %d\n", ref->proc->pid, ref->debug_id,
+ ref->desc, ref->node->debug_id);
+
+ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+ if (ref->strong)
+ binder_dec_node(ref->node, 1, 1);
+ hlist_del(&ref->node_entry);
+ binder_dec_node(ref->node, 0, 1);
+ if (ref->death) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d delete ref %d desc %d "
+ "has death notification\n", ref->proc->pid,
+ ref->debug_id, ref->desc);
+ list_del(&ref->death->work.entry);
+ kfree(ref->death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ }
+ kfree(ref);
+ binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
+{
+ int ret;
+ if (strong) {
+ if (ref->strong == 0) {
+ ret = binder_inc_node(ref->node, 1, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->strong++;
+ } else {
+ if (ref->weak == 0) {
+ ret = binder_inc_node(ref->node, 0, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->weak++;
+ }
+ return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+ if (strong) {
+ if (ref->strong == 0) {
+ binder_user_error("binder: %d invalid dec strong, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->strong--;
+ if (ref->strong == 0) {
+ int ret;
+ ret = binder_dec_node(ref->node, strong, 1);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (ref->weak == 0) {
+ binder_user_error("binder: %d invalid dec weak, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->weak--;
+ }
+ if (ref->strong == 0 && ref->weak == 0)
+ binder_delete_ref(ref);
+ return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ if (target_thread) {
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+ }
+ t->need_reply = 0;
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+ uint32_t error_code)
+{
+ struct binder_thread *target_thread;
+ BUG_ON(t->flags & TF_ONE_WAY);
+ while (1) {
+ target_thread = t->from;
+ if (target_thread) {
+ if (target_thread->return_error != BR_OK &&
+ target_thread->return_error2 == BR_OK) {
+ target_thread->return_error2 =
+ target_thread->return_error;
+ target_thread->return_error = BR_OK;
+ }
+ if (target_thread->return_error == BR_OK) {
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply for "
+ "transaction %d to %d:%d\n",
+ t->debug_id, target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction(target_thread, t);
+ target_thread->return_error = error_code;
+ wake_up_interruptible(&target_thread->wait);
+ } else {
+ printk(KERN_ERR "binder: reply failed, target "
+ "thread, %d:%d, has error code %d "
+ "already\n", target_thread->proc->pid,
+ target_thread->pid,
+ target_thread->return_error);
+ }
+ return;
+ } else {
+ struct binder_transaction *next = t->from_parent;
+
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply "
+ "for transaction %d, target dead\n",
+ t->debug_id);
+
+ binder_pop_transaction(target_thread, t);
+ if (next == NULL) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed,"
+ " no target thread at root\n");
+ return;
+ }
+ t = next;
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed, no target "
+ "thread -- retry %d\n", t->debug_id);
+ }
+ }
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_buffer *buffer,
+ size_t *failed_at)
+{
+ size_t *offp, *off_end;
+ int debug_id = buffer->debug_id;
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+ proc->pid, buffer->debug_id,
+ buffer->data_size, buffer->offsets_size, failed_at);
+
+ if (buffer->target_node)
+ binder_dec_node(buffer->target_node, 1, 0);
+
+ offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ if (failed_at)
+ off_end = failed_at;
+ else
+ off_end = (void *)offp + buffer->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > buffer->data_size - sizeof(*fp) ||
+ buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ printk(KERN_ERR "binder: transaction release %d bad"
+ "offset %zd, size %zd\n", debug_id,
+ *offp, buffer->data_size);
+ continue;
+ }
+ fp = (struct flat_binder_object *)(buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad node %p\n", debug_id, fp->binder);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p\n",
+ node->debug_id, node->ptr);
+ binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad handle %ld\n", debug_id,
+ fp->handle);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, ref->node->debug_id);
+ binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ } break;
+
+ case BINDER_TYPE_FD:
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld\n", fp->handle);
+ if (failed_at)
+ task_close_fd(proc, fp->handle);
+ break;
+
+ default:
+ printk(KERN_ERR "binder: transaction release %d bad "
+ "object type %lx\n", debug_id, fp->type);
+ break;
+ }
+ }
+}
+
+static void binder_transaction(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_transaction_data *tr, int reply)
+{
+ struct binder_transaction *t;
+ struct binder_work *tcomplete;
+ size_t *offp, *off_end;
+ struct binder_proc *target_proc;
+ struct binder_thread *target_thread = NULL;
+ struct binder_node *target_node = NULL;
+ struct list_head *target_list;
+ wait_queue_head_t *target_wait;
+ struct binder_transaction *in_reply_to = NULL;
+ struct binder_transaction_log_entry *e;
+ uint32_t return_error;
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+ e->from_proc = proc->pid;
+ e->from_thread = thread->pid;
+ e->target_handle = tr->target.handle;
+ e->data_size = tr->data_size;
+ e->offsets_size = tr->offsets_size;
+
+ if (reply) {
+ in_reply_to = thread->transaction_stack;
+ if (in_reply_to == NULL) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with no transaction stack\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_empty_call_stack;
+ }
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ goto err_bad_call_stack;
+ }
+ thread->transaction_stack = in_reply_to->to_parent;
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ target_thread = NULL;
+ goto err_dead_binder;
+ }
+ target_proc = target_thread->proc;
+ } else {
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+ ref = binder_get_ref(proc, tr->target.handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction to invalid handle\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
+ target_node = ref->node;
+ } else {
+ target_node = binder_context_mgr_node;
+ if (target_node == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_no_context_mgr_node;
+ }
+ }
+ e->to_node = target_node->debug_id;
+ target_proc = target_node->proc;
+ if (target_proc == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+ struct binder_transaction *tmp;
+ tmp = thread->transaction_stack;
+ if (tmp->to_thread != thread) {
+ binder_user_error("binder: %d:%d got new "
+ "transaction with bad transaction stack"
+ ", transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, tmp->debug_id,
+ tmp->to_proc ? tmp->to_proc->pid : 0,
+ tmp->to_thread ?
+ tmp->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_call_stack;
+ }
+ while (tmp) {
+ if (tmp->from && tmp->from->proc == target_proc)
+ target_thread = tmp->from;
+ tmp = tmp->from_parent;
+ }
+ }
+ }
+ if (target_thread) {
+ e->to_thread = target_thread->pid;
+ target_list = &target_thread->todo;
+ target_wait = &target_thread->wait;
+ } else {
+ target_list = &target_proc->todo;
+ target_wait = &target_proc->wait;
+ }
+ e->to_proc = target_proc->pid;
+
+ /* TODO: reuse incoming transaction for reply */
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_t_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ if (tcomplete == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_tcomplete_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+ t->debug_id = ++binder_last_id;
+ e->debug_id = t->debug_id;
+
+ if (reply)
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_REPLY %d -> %d:%d, "
+ "data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_thread->pid,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ else
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_TRANSACTION %d -> "
+ "%d - node %d, data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_node->debug_id,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+ else
+ t->from = NULL;
+ t->sender_euid = proc->tsk->cred->euid;
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ if (t->buffer == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_alloc_buf_failed;
+ }
+ t->buffer->allow_user_free = 0;
+ t->buffer->debug_id = t->debug_id;
+ t->buffer->transaction = t;
+ t->buffer->target_node = target_node;
+ if (target_node)
+ binder_inc_node(target_node, 1, 0, NULL);
+
+ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "data ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "offsets ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offsets size, %zd\n",
+ proc->pid, thread->pid, tr->offsets_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)offp + tr->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ t->buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offset, %zd\n",
+ proc->pid, thread->pid, *offp);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_ref *ref;
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (node == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_new_node_failed;
+ }
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("binder: %d:%d sending u%p "
+ "node %d, cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ fp->binder, node->debug_id,
+ fp->cookie, node->cookie);
+ goto err_binder_get_ref_for_node_failed;
+ }
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (fp->type == BINDER_TYPE_BINDER)
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->handle = ref->desc;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ &thread->todo);
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p -> ref %d desc %d\n",
+ node->debug_id, node->ptr, ref->debug_id,
+ ref->desc);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction with invalid "
+ "handle, %ld\n", proc->pid,
+ thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
+ if (ref->node->proc == target_proc) {
+ if (fp->type == BINDER_TYPE_HANDLE)
+ fp->type = BINDER_TYPE_BINDER;
+ else
+ fp->type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%p\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (new_ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ fp->handle = new_ref->desc;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ } break;
+
+ case BINDER_TYPE_FD: {
+ int target_fd;
+ struct file *file;
+
+ if (reply) {
+ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+ binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+ } else if (!target_node->accept_fds) {
+ binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+
+ file = fget(fp->handle);
+ if (file == NULL) {
+ binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fget_failed;
+ }
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld -> %d\n", fp->handle, target_fd);
+ /* TODO: fput? */
+ fp->handle = target_fd;
+ } break;
+
+ default:
+ binder_user_error("binder: %d:%d got transactio"
+ "n with invalid object type, %lx\n",
+ proc->pid, thread->pid, fp->type);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_object_type;
+ }
+ }
+ if (reply) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ binder_pop_transaction(target_thread, in_reply_to);
+ } else if (!(t->flags & TF_ONE_WAY)) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ t->need_reply = 1;
+ t->from_parent = thread->transaction_stack;
+ thread->transaction_stack = t;
+ } else {
+ BUG_ON(target_node == NULL);
+ BUG_ON(t->buffer->async_transaction != 1);
+ if (target_node->has_async_transaction) {
+ target_list = &target_node->async_todo;
+ target_wait = NULL;
+ } else
+ target_node->has_async_transaction = 1;
+ }
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+ if (target_wait)
+ wake_up_interruptible(target_wait);
+ return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+ binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+ kfree(tcomplete);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+ proc->pid, thread->pid, return_error,
+ tr->data_size, tr->offsets_size);
+
+ {
+ struct binder_transaction_log_entry *fe;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
+ *fe = *e;
+ }
+
+ BUG_ON(thread->return_error != BR_OK);
+ if (in_reply_to) {
+ thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ } else
+ thread->return_error = return_error;
+}
+
+int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+ void __user *buffer, int size, signed long *consumed)
+{
+ uint32_t cmd;
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ while (ptr < end && thread->return_error == BR_OK) {
+ if (get_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+ binder_stats.bc[_IOC_NR(cmd)]++;
+ proc->stats.bc[_IOC_NR(cmd)]++;
+ thread->stats.bc[_IOC_NR(cmd)]++;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ case BC_ACQUIRE:
+ case BC_RELEASE:
+ case BC_DECREFS: {
+ uint32_t target;
+ struct binder_ref *ref;
+ const char *debug_string;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (target == 0 && binder_context_mgr_node &&
+ (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+ ref = binder_get_ref_for_node(proc,
+ binder_context_mgr_node);
+ if (ref->desc != target) {
+ binder_user_error("binder: %d:"
+ "%d tried to acquire "
+ "reference to desc 0, "
+ "got %d instead\n",
+ proc->pid, thread->pid,
+ ref->desc);
+ }
+ } else
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d refcou"
+ "nt change on invalid ref %d\n",
+ proc->pid, thread->pid, target);
+ break;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ debug_string = "IncRefs";
+ binder_inc_ref(ref, 0, NULL);
+ break;
+ case BC_ACQUIRE:
+ debug_string = "Acquire";
+ binder_inc_ref(ref, 1, NULL);
+ break;
+ case BC_RELEASE:
+ debug_string = "Release";
+ binder_dec_ref(ref, 1);
+ break;
+ case BC_DECREFS:
+ default:
+ debug_string = "DecRefs";
+ binder_dec_ref(ref, 0);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid, debug_string, ref->debug_id,
+ ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ break;
+ }
+ case BC_INCREFS_DONE:
+ case BC_ACQUIRE_DONE: {
+ void __user *node_ptr;
+ void *cookie;
+ struct binder_node *node;
+
+ if (get_user(node_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (get_user(cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ node = binder_get_node(proc, node_ptr);
+ if (node == NULL) {
+ binder_user_error("binder: %d:%d "
+ "%s u%p no match\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" :
+ "BC_ACQUIRE_DONE",
+ node_ptr);
+ break;
+ }
+ if (cookie != node->cookie) {
+ binder_user_error("binder: %d:%d %s u%p node %d"
+ " cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node_ptr, node->debug_id,
+ cookie, node->cookie);
+ break;
+ }
+ if (cmd == BC_ACQUIRE_DONE) {
+ if (node->pending_strong_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_ACQUIRE_DONE node %d has "
+ "no pending acquire request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_strong_ref = 0;
+ } else {
+ if (node->pending_weak_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_INCREFS_DONE node %d has "
+ "no pending increfs request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_weak_ref = 0;
+ }
+ binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s node %d ls %d lw %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ break;
+ }
+ case BC_ATTEMPT_ACQUIRE:
+ printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+ return -EINVAL;
+ case BC_ACQUIRE_RESULT:
+ printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+ return -EINVAL;
+
+ case BC_FREE_BUFFER: {
+ void __user *data_ptr;
+ struct binder_buffer *buffer;
+
+ if (get_user(data_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ buffer = binder_buffer_lookup(proc, data_ptr);
+ if (buffer == NULL) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p no match\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ if (!buffer->allow_user_free) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p matched "
+ "unreturned buffer\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_FREE_BUFFER,
+ "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ buffer->transaction ? "active" : "finished");
+
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ BUG_ON(!buffer->target_node->has_async_transaction);
+ if (list_empty(&buffer->target_node->async_todo))
+ buffer->target_node->has_async_transaction = 0;
+ else
+ list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ }
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_free_buf(proc, buffer);
+ break;
+ }
+
+ case BC_TRANSACTION:
+ case BC_REPLY: {
+ struct binder_transaction_data tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ break;
+ }
+
+ case BC_REGISTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "after BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ } else if (proc->requested_threads == 0) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "without request\n",
+ proc->pid, thread->pid);
+ } else {
+ proc->requested_threads--;
+ proc->requested_threads_started++;
+ }
+ thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ break;
+ case BC_ENTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_ENTER_LOOPER called after "
+ "BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ }
+ thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+ break;
+ case BC_EXIT_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_EXIT_LOOPER\n",
+ proc->pid, thread->pid);
+ thread->looper |= BINDER_LOOPER_STATE_EXITED;
+ break;
+
+ case BC_REQUEST_DEATH_NOTIFICATION:
+ case BC_CLEAR_DEATH_NOTIFICATION: {
+ uint32_t target;
+ void __user *cookie;
+ struct binder_ref *ref;
+ struct binder_ref_death *death;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d %s "
+ "invalid ref %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ target);
+ break;
+ }
+
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ cookie, ref->debug_id, ref->desc,
+ ref->strong, ref->weak, ref->node->debug_id);
+
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ if (ref->death) {
+ binder_user_error("binder: %d:%"
+ "d BC_REQUEST_DEATH_NOTI"
+ "FICATION death notific"
+ "ation already set\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ thread->return_error = BR_ERROR;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d "
+ "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ binder_stats_created(BINDER_STAT_DEATH);
+ INIT_LIST_HEAD(&death->work.entry);
+ death->cookie = cookie;
+ ref->death = death;
+ if (ref->node->proc == NULL) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&ref->death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&ref->death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } else {
+ if (ref->death == NULL) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion not active\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = ref->death;
+ if (death->cookie != cookie) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion cookie mismatch "
+ "%p != %p\n",
+ proc->pid, thread->pid,
+ death->cookie, cookie);
+ break;
+ }
+ ref->death = NULL;
+ if (list_empty(&death->work.entry)) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ } else {
+ BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+ death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+ }
+ }
+ } break;
+ case BC_DEAD_BINDER_DONE: {
+ struct binder_work *w;
+ void __user *cookie;
+ struct binder_ref_death *death = NULL;
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(void *);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ if (tmp_death->cookie == cookie) {
+ death = tmp_death;
+ break;
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+ proc->pid, thread->pid, cookie, death);
+ if (death == NULL) {
+ binder_user_error("binder: %d:%d BC_DEAD"
+ "_BINDER_DONE %p not found\n",
+ proc->pid, thread->pid, cookie);
+ break;
+ }
+
+ list_del_init(&death->work.entry);
+ if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } break;
+
+ default:
+ printk(KERN_ERR "binder: %d:%d unknown command %d\n",
+ proc->pid, thread->pid, cmd);
+ return -EINVAL;
+ }
+ *consumed = ptr - buffer;
+ }
+ return 0;
+}
+
+void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
+ uint32_t cmd)
+{
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+ binder_stats.br[_IOC_NR(cmd)]++;
+ proc->stats.br[_IOC_NR(cmd)]++;
+ thread->stats.br[_IOC_NR(cmd)]++;
+ }
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ return !list_empty(&proc->todo) ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+ return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user *buffer, int size,
+ signed long *consumed, int non_block)
+{
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ int ret = 0;
+ int wait_for_proc_work;
+
+ if (*consumed == 0) {
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ }
+
+retry:
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo);
+
+ if (thread->return_error != BR_OK && ptr < end) {
+ if (thread->return_error2 != BR_OK) {
+ if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (ptr == end)
+ goto done;
+ thread->return_error2 = BR_OK;
+ }
+ if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ thread->return_error = BR_OK;
+ goto done;
+ }
+
+
+ thread->looper |= BINDER_LOOPER_STATE_WAITING;
+ if (wait_for_proc_work)
+ proc->ready_threads++;
+ mutex_unlock(&binder_lock);
+ if (wait_for_proc_work) {
+ if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))) {
+ binder_user_error("binder: %d:%d ERROR: Thread waiting "
+ "for process work before calling BC_REGISTER_"
+ "LOOPER or BC_ENTER_LOOPER (state %x)\n",
+ proc->pid, thread->pid, thread->looper);
+ wait_event_interruptible(binder_user_error_wait,
+ binder_stop_on_user_error < 2);
+ }
+ binder_set_nice(proc->default_priority);
+ if (non_block) {
+ if (!binder_has_proc_work(proc, thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ } else {
+ if (non_block) {
+ if (!binder_has_thread_work(thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ }
+ mutex_lock(&binder_lock);
+ if (wait_for_proc_work)
+ proc->ready_threads--;
+ thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+ if (ret)
+ return ret;
+
+ while (1) {
+ uint32_t cmd;
+ struct binder_transaction_data tr;
+ struct binder_work *w;
+ struct binder_transaction *t = NULL;
+
+ if (!list_empty(&thread->todo))
+ w = list_first_entry(&thread->todo, struct binder_work, entry);
+ else if (!list_empty(&proc->todo) && wait_for_proc_work)
+ w = list_first_entry(&proc->todo, struct binder_work, entry);
+ else {
+ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+ goto retry;
+ break;
+ }
+
+ if (end - ptr < sizeof(tr) + 4)
+ break;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ t = container_of(w, struct binder_transaction, work);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ cmd = BR_TRANSACTION_COMPLETE;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+ "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+ proc->pid, thread->pid);
+
+ list_del(&w->entry);
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ case BINDER_WORK_NODE: {
+ struct binder_node *node = container_of(w, struct binder_node, work);
+ uint32_t cmd = BR_NOOP;
+ const char *cmd_name;
+ int strong = node->internal_strong_refs || node->local_strong_refs;
+ int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+ if (weak && !node->has_weak_ref) {
+ cmd = BR_INCREFS;
+ cmd_name = "BR_INCREFS";
+ node->has_weak_ref = 1;
+ node->pending_weak_ref = 1;
+ node->local_weak_refs++;
+ } else if (strong && !node->has_strong_ref) {
+ cmd = BR_ACQUIRE;
+ cmd_name = "BR_ACQUIRE";
+ node->has_strong_ref = 1;
+ node->pending_strong_ref = 1;
+ node->local_strong_refs++;
+ } else if (!strong && node->has_strong_ref) {
+ cmd = BR_RELEASE;
+ cmd_name = "BR_RELEASE";
+ node->has_strong_ref = 0;
+ } else if (!weak && node->has_weak_ref) {
+ cmd = BR_DECREFS;
+ cmd_name = "BR_DECREFS";
+ node->has_weak_ref = 0;
+ }
+ if (cmd != BR_NOOP) {
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(node->ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (put_user(node->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s %d u%p c%p\n",
+ proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ } else {
+ list_del_init(&w->entry);
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p deleted\n",
+ proc->pid, thread->pid, node->debug_id,
+ node->ptr, node->cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p state unchanged\n",
+ proc->pid, thread->pid, node->debug_id, node->ptr,
+ node->cookie);
+ }
+ }
+ } break;
+ case BINDER_WORK_DEAD_BINDER:
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+ struct binder_ref_death *death;
+ uint32_t cmd;
+
+ death = container_of(w, struct binder_ref_death, work);
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+ cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+ else
+ cmd = BR_DEAD_BINDER;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(death->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p\n",
+ proc->pid, thread->pid,
+ cmd == BR_DEAD_BINDER ?
+ "BR_DEAD_BINDER" :
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ death->cookie);
+
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+ list_del(&w->entry);
+ kfree(death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ } else
+ list_move(&w->entry, &proc->delivered_death);
+ if (cmd == BR_DEAD_BINDER)
+ goto done; /* DEAD_BINDER notifications can cause transactions */
+ } break;
+ }
+
+ if (!t)
+ continue;
+
+ BUG_ON(t->buffer == NULL);
+ if (t->buffer->target_node) {
+ struct binder_node *target_node = t->buffer->target_node;
+ tr.target.ptr = target_node->ptr;
+ tr.cookie = target_node->cookie;
+ t->saved_priority = task_nice(current);
+ if (t->priority < target_node->min_priority &&
+ !(t->flags & TF_ONE_WAY))
+ binder_set_nice(t->priority);
+ else if (!(t->flags & TF_ONE_WAY) ||
+ t->saved_priority > target_node->min_priority)
+ binder_set_nice(target_node->min_priority);
+ cmd = BR_TRANSACTION;
+ } else {
+ tr.target.ptr = NULL;
+ tr.cookie = NULL;
+ cmd = BR_REPLY;
+ }
+ tr.code = t->code;
+ tr.flags = t->flags;
+ tr.sender_euid = t->sender_euid;
+
+ if (t->from) {
+ struct task_struct *sender = t->from->proc->tsk;
+ tr.sender_pid = task_tgid_nr_ns(sender,
+ current->nsproxy->pid_ns);
+ } else {
+ tr.sender_pid = 0;
+ }
+
+ tr.data_size = t->buffer->data_size;
+ tr.offsets_size = t->buffer->offsets_size;
+ tr.data.ptr.buffer = (void *)t->buffer->data +
+ proc->user_buffer_offset;
+ tr.data.ptr.offsets = tr.data.ptr.buffer +
+ ALIGN(t->buffer->data_size,
+ sizeof(void *));
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &tr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d %s %d %d:%d, cmd %d"
+ "size %zd-%zd ptr %p-%p\n",
+ proc->pid, thread->pid,
+ (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+ "BR_REPLY",
+ t->debug_id, t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0, cmd,
+ t->buffer->data_size, t->buffer->offsets_size,
+ tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+ list_del(&t->work.entry);
+ t->buffer->allow_user_free = 1;
+ if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ t->to_parent = thread->transaction_stack;
+ t->to_thread = thread;
+ thread->transaction_stack = t;
+ } else {
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
+ break;
+ }
+
+done:
+
+ *consumed = ptr - buffer;
+ if (proc->requested_threads + proc->ready_threads == 0 &&
+ proc->requested_threads_started < proc->max_threads &&
+ (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+ /*spawn a new thread if we leave this out */) {
+ proc->requested_threads++;
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BR_SPAWN_LOOPER\n",
+ proc->pid, thread->pid);
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+ struct binder_work *w;
+ while (!list_empty(list)) {
+ w = list_first_entry(list, struct binder_work, entry);
+ list_del_init(&w->entry);
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ struct binder_transaction *t;
+
+ t = container_of(w, struct binder_transaction, work);
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+ binder_send_failed_reply(t, BR_DEAD_REPLY);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ default:
+ break;
+ }
+ }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread = NULL;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &proc->threads.rb_node;
+
+ while (*p) {
+ parent = *p;
+ thread = rb_entry(parent, struct binder_thread, rb_node);
+
+ if (current->pid < thread->pid)
+ p = &(*p)->rb_left;
+ else if (current->pid > thread->pid)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+ if (*p == NULL) {
+ thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (thread == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->return_error = BR_OK;
+ thread->return_error2 = BR_OK;
+ }
+ return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct binder_transaction *t;
+ struct binder_transaction *send_reply = NULL;
+ int active_transactions = 0;
+
+ rb_erase(&thread->rb_node, &proc->threads);
+ t = thread->transaction_stack;
+ if (t && t->to_thread == thread)
+ send_reply = t;
+ while (t) {
+ active_transactions++;
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: release %d:%d transaction %d "
+ "%s, still active\n", proc->pid, thread->pid,
+ t->debug_id,
+ (t->to_thread == thread) ? "in" : "out");
+
+ if (t->to_thread == thread) {
+ t->to_proc = NULL;
+ t->to_thread = NULL;
+ if (t->buffer) {
+ t->buffer->transaction = NULL;
+ t->buffer = NULL;
+ }
+ t = t->to_parent;
+ } else if (t->from == thread) {
+ t->from = NULL;
+ t = t->from_parent;
+ } else
+ BUG();
+ }
+ if (send_reply)
+ binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+ binder_release_work(&thread->todo);
+ kfree(thread);
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread = NULL;
+ int wait_for_proc_work;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo) && thread->return_error == BR_OK;
+ mutex_unlock(&binder_lock);
+
+ if (wait_for_proc_work) {
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ poll_wait(filp, &proc->wait, wait);
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ } else {
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ poll_wait(filp, &thread->wait, wait);
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ }
+ return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread;
+ unsigned int size = _IOC_SIZE(cmd);
+ void __user *ubuf = (void __user *)arg;
+
+ /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret)
+ return ret;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+ if (thread == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ switch (cmd) {
+ case BINDER_WRITE_READ: {
+ struct binder_write_read bwr;
+ if (size != sizeof(struct binder_write_read)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+ proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
+ bwr.read_size, bwr.read_buffer);
+
+ if (bwr.write_size > 0) {
+ ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ if (ret < 0) {
+ bwr.read_consumed = 0;
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ if (bwr.read_size > 0) {
+ ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ if (!list_empty(&proc->todo))
+ wake_up_interruptible(&proc->wait);
+ if (ret < 0) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
+ bwr.read_consumed, bwr.read_size);
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_SET_MAX_THREADS:
+ if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case BINDER_SET_CONTEXT_MGR:
+ if (binder_context_mgr_node != NULL) {
+ printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ if (binder_context_mgr_uid != -1) {
+ if (binder_context_mgr_uid != current->cred->euid) {
+ printk(KERN_ERR "binder: BINDER_SET_"
+ "CONTEXT_MGR bad uid %d != %d\n",
+ current->cred->euid,
+ binder_context_mgr_uid);
+ ret = -EPERM;
+ goto err;
+ }
+ } else
+ binder_context_mgr_uid = current->cred->euid;
+ binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ if (binder_context_mgr_node == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ binder_context_mgr_node->local_weak_refs++;
+ binder_context_mgr_node->local_strong_refs++;
+ binder_context_mgr_node->has_strong_ref = 1;
+ binder_context_mgr_node->has_weak_ref = 1;
+ break;
+ case BINDER_THREAD_EXIT:
+ binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
+ proc->pid, thread->pid);
+ binder_free_thread(proc, thread);
+ thread = NULL;
+ break;
+ case BINDER_VERSION:
+ if (size != sizeof(struct binder_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = 0;
+err:
+ if (thread)
+ thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+ mutex_unlock(&binder_lock);
+ wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret && ret != -ERESTARTSYS)
+ printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+ return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ dump_stack();
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ proc->vma = NULL;
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+ .open = binder_vma_open,
+ .close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ struct binder_proc *proc = filp->private_data;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ if ((vma->vm_end - vma->vm_start) > SZ_4M)
+ vma->vm_end = vma->vm_start + SZ_4M;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+
+ if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+ ret = -EPERM;
+ failure_string = "bad vm_flags";
+ goto err_bad_arg;
+ }
+ vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+ if (proc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ proc->buffer = area->addr;
+ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+ if (proc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ proc->buffer_size = vma->vm_end - vma->vm_start;
+
+ vma->vm_ops = &binder_vm_ops;
+ vma->vm_private_data = proc;
+
+ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = proc->buffer;
+ INIT_LIST_HEAD(&proc->buffers);
+ list_add(&buffer->entry, &proc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(proc, buffer);
+ proc->free_async_space = proc->buffer_size / 2;
+ barrier();
+ proc->files = get_files_struct(current);
+ proc->vma = vma;
+
+ /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
+ proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(proc->pages);
+ proc->pages = NULL;
+err_alloc_pages_failed:
+ vfree(proc->buffer);
+ proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+err_bad_arg:
+ printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
+ proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ current->group_leader->pid, current->pid);
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (proc == NULL)
+ return -ENOMEM;
+ get_task_struct(current);
+ proc->tsk = current;
+ INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->wait);
+ proc->default_priority = task_nice(current);
+ mutex_lock(&binder_lock);
+ binder_stats_created(BINDER_STAT_PROC);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ proc->pid = current->group_leader->pid;
+ INIT_LIST_HEAD(&proc->delivered_death);
+ filp->private_data = proc;
+ mutex_unlock(&binder_lock);
+
+ if (binder_debugfs_dir_entry_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ }
+
+ return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+ struct binder_proc *proc = filp->private_data;
+
+ binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+ return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ int wake_count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+ wake_up_interruptible(&thread->wait);
+ wake_count++;
+ }
+ }
+ wake_up_interruptible_all(&proc->wait);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_flush: %d woke %d threads\n", proc->pid,
+ wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc = filp->private_data;
+ debugfs_remove(proc->debugfs_entry);
+ binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+ return 0;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+ struct hlist_node *pos;
+ struct binder_transaction *t;
+ struct rb_node *n;
+ int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+ BUG_ON(proc->vma);
+ BUG_ON(proc->files);
+
+ hlist_del(&proc->proc_node);
+ if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder_release: %d context_mgr_node gone\n",
+ proc->pid);
+ binder_context_mgr_node = NULL;
+ }
+
+ threads = 0;
+ active_transactions = 0;
+ while ((n = rb_first(&proc->threads))) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ threads++;
+ active_transactions += binder_free_thread(proc, thread);
+ }
+ nodes = 0;
+ incoming_refs = 0;
+ while ((n = rb_first(&proc->nodes))) {
+ struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+ nodes++;
+ rb_erase(&node->rb_node, &proc->nodes);
+ list_del_init(&node->work.entry);
+ if (hlist_empty(&node->refs)) {
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ struct binder_ref *ref;
+ int death = 0;
+
+ node->proc = NULL;
+ node->local_strong_refs = 0;
+ node->local_weak_refs = 0;
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ incoming_refs++;
+ if (ref->death) {
+ death++;
+ if (list_empty(&ref->death->work.entry)) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ } else
+ BUG();
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: node %d now dead, "
+ "refs %d, death %d\n", node->debug_id,
+ incoming_refs, death);
+ }
+ }
+ outgoing_refs = 0;
+ while ((n = rb_first(&proc->refs_by_desc))) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ outgoing_refs++;
+ binder_delete_ref(ref);
+ }
+ binder_release_work(&proc->todo);
+ buffers = 0;
+
+ while ((n = rb_first(&proc->allocated_buffers))) {
+ struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
+ rb_node);
+ t = buffer->transaction;
+ if (t) {
+ t->buffer = NULL;
+ buffer->transaction = NULL;
+ printk(KERN_ERR "binder: release proc %d, "
+ "transaction %d, not freed\n",
+ proc->pid, t->debug_id);
+ /*BUG();*/
+ }
+ binder_free_buf(proc, buffer);
+ buffers++;
+ }
+
+ binder_stats_deleted(BINDER_STAT_PROC);
+
+ page_count = 0;
+ if (proc->pages) {
+ int i;
+ for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ if (proc->pages[i]) {
+ void *page_addr = proc->buffer + i * PAGE_SIZE;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder_release: %d: "
+ "page %d at %p not freed\n",
+ proc->pid, i,
+ page_addr);
+ unmap_kernel_range((unsigned long)page_addr,
+ PAGE_SIZE);
+ __free_page(proc->pages[i]);
+ page_count++;
+ }
+ }
+ kfree(proc->pages);
+ vfree(proc->buffer);
+ }
+
+ put_task_struct(proc->tsk);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_release: %d threads %d, nodes %d (ref %d), "
+ "refs %d, active transactions %d, buffers %d, "
+ "pages %d\n",
+ proc->pid, threads, nodes, incoming_refs, outgoing_refs,
+ active_transactions, buffers, page_count);
+
+ kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+ struct binder_proc *proc;
+ struct files_struct *files;
+
+ int defer;
+ do {
+ mutex_lock(&binder_lock);
+ mutex_lock(&binder_deferred_lock);
+ if (!hlist_empty(&binder_deferred_list)) {
+ proc = hlist_entry(binder_deferred_list.first,
+ struct binder_proc, deferred_work_node);
+ hlist_del_init(&proc->deferred_work_node);
+ defer = proc->deferred_work;
+ proc->deferred_work = 0;
+ } else {
+ proc = NULL;
+ defer = 0;
+ }
+ mutex_unlock(&binder_deferred_lock);
+
+ files = NULL;
+ if (defer & BINDER_DEFERRED_PUT_FILES) {
+ files = proc->files;
+ if (files)
+ proc->files = NULL;
+ }
+
+ if (defer & BINDER_DEFERRED_FLUSH)
+ binder_deferred_flush(proc);
+
+ if (defer & BINDER_DEFERRED_RELEASE)
+ binder_deferred_release(proc); /* frees proc */
+
+ mutex_unlock(&binder_lock);
+ if (files)
+ put_files_struct(files);
+ } while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+ mutex_lock(&binder_deferred_lock);
+ proc->deferred_work |= defer;
+ if (hlist_unhashed(&proc->deferred_work_node)) {
+ hlist_add_head(&proc->deferred_work_node,
+ &binder_deferred_list);
+ queue_work(binder_deferred_workqueue, &binder_deferred_work);
+ }
+ mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+ struct binder_transaction *t)
+{
+ seq_printf(m,
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ prefix, t->debug_id, t,
+ t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0,
+ t->to_proc ? t->to_proc->pid : 0,
+ t->to_thread ? t->to_thread->pid : 0,
+ t->code, t->flags, t->priority, t->need_reply);
+ if (t->buffer == NULL) {
+ seq_puts(m, " buffer free\n");
+ return;
+ }
+ if (t->buffer->target_node)
+ seq_printf(m, " node %d",
+ t->buffer->target_node->debug_id);
+ seq_printf(m, " size %zd:%zd data %p\n",
+ t->buffer->data_size, t->buffer->offsets_size,
+ t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
+{
+ struct binder_node *node;
+ struct binder_transaction *t;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ t = container_of(w, struct binder_transaction, work);
+ print_binder_transaction(m, transaction_prefix, t);
+ break;
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ seq_printf(m, "%stransaction complete\n", prefix);
+ break;
+ case BINDER_WORK_NODE:
+ node = container_of(w, struct binder_node, work);
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id, node->ptr, node->cookie);
+ break;
+ case BINDER_WORK_DEAD_BINDER:
+ seq_printf(m, "%shas dead binder\n", prefix);
+ break;
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ seq_printf(m, "%shas cleared dead binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+ seq_printf(m, "%shas cleared death notification\n", prefix);
+ break;
+ default:
+ seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+ break;
+ }
+}
+
+static void print_binder_thread(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
+{
+ struct binder_transaction *t;
+ struct binder_work *w;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ header_pos = m->count;
+ t = thread->transaction_stack;
+ while (t) {
+ if (t->from == thread) {
+ print_binder_transaction(m,
+ " outgoing transaction", t);
+ t = t->from_parent;
+ } else if (t->to_thread == thread) {
+ print_binder_transaction(m,
+ " incoming transaction", t);
+ t = t->to_parent;
+ } else {
+ print_binder_transaction(m, " bad transaction", t);
+ t = NULL;
+ }
+ }
+ list_for_each_entry(w, &thread->todo, entry) {
+ print_binder_work(m, " ", " pending transaction", w);
+ }
+ if (!print_always && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+ struct binder_ref *ref;
+ struct hlist_node *pos;
+ struct binder_work *w;
+ int count;
+
+ count = 0;
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ count++;
+
+ seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, node->ptr, node->cookie,
+ node->has_strong_ref, node->has_weak_ref,
+ node->local_strong_refs, node->local_weak_refs,
+ node->internal_strong_refs, count);
+ if (count) {
+ seq_puts(m, " proc");
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ seq_printf(m, " %d", ref->proc->pid);
+ }
+ seq_puts(m, "\n");
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work(m, " ",
+ " pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
+ ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+ struct binder_proc *proc, int print_all)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ header_pos = m->count;
+
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ print_binder_thread(m, rb_entry(n, struct binder_thread,
+ rb_node), print_all);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (print_all || node->has_async_transaction)
+ print_binder_node(m, node);
+ }
+ if (print_all) {
+ for (n = rb_first(&proc->refs_by_desc);
+ n != NULL;
+ n = rb_next(n))
+ print_binder_ref(m, rb_entry(n, struct binder_ref,
+ rb_node_desc));
+ }
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ list_for_each_entry(w, &proc->todo, entry)
+ print_binder_work(m, " ", " pending transaction", w);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ seq_puts(m, " has delivered dead binder\n");
+ break;
+ }
+ if (!print_all && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static const char *binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+ "proc",
+ "thread",
+ "node",
+ "ref",
+ "death",
+ "transaction",
+ "transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+ struct binder_stats *stats)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+ ARRAY_SIZE(binder_command_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+ if (stats->bc[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_command_strings[i], stats->bc[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+ ARRAY_SIZE(binder_return_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+ if (stats->br[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_return_strings[i], stats->br[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(binder_objstat_strings));
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(stats->obj_deleted));
+ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+ if (stats->obj_created[i] || stats->obj_deleted[i])
+ seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ binder_objstat_strings[i],
+ stats->obj_created[i] - stats->obj_deleted[i],
+ stats->obj_created[i]);
+ }
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+ struct binder_proc *proc)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ int count, strong, weak;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " threads: %d\n", count);
+ seq_printf(m, " requested threads: %d+%d/%d\n"
+ " ready threads %d\n"
+ " free async space %zd\n", proc->requested_threads,
+ proc->requested_threads_started, proc->max_threads,
+ proc->ready_threads, proc->free_async_space);
+ count = 0;
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " nodes: %d\n", count);
+ count = 0;
+ strong = 0;
+ weak = 0;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ count++;
+ strong += ref->strong;
+ weak += ref->weak;
+ }
+ seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
+
+ count = 0;
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " buffers: %d\n", count);
+
+ count = 0;
+ list_for_each_entry(w, &proc->todo, entry) {
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ count++;
+ break;
+ default:
+ break;
+ }
+ }
+ seq_printf(m, " pending transactions: %d\n", count);
+
+ print_binder_stats(m, " ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ struct binder_node *node;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder state:\n");
+
+ if (!hlist_empty(&binder_dead_nodes))
+ seq_puts(m, "dead nodes:\n");
+ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+ print_binder_node(m, node);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder stats:\n");
+
+ print_binder_stats(m, "", &binder_stats);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc_stats(m, proc);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder transactions:\n");
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 0);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc = m->private;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+ struct binder_transaction_log_entry *e)
+{
+ seq_printf(m,
+ "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ e->debug_id, (e->call_type == 2) ? "reply" :
+ ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+ e->from_thread, e->to_proc, e->to_thread, e->to_node,
+ e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+ struct binder_transaction_log *log = m->private;
+ int i;
+
+ if (log->full) {
+ for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ }
+ for (i = 0; i < log->next; i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ return 0;
+}
+
+static const struct file_operations binder_fops = {
+ .owner = THIS_MODULE,
+ .poll = binder_poll,
+ .unlocked_ioctl = binder_ioctl,
+ .mmap = binder_mmap,
+ .open = binder_open,
+ .flush = binder_flush,
+ .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "binder",
+ .fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+ int ret;
+
+ binder_deferred_workqueue = create_singlethread_workqueue("binder");
+ if (!binder_deferred_workqueue)
+ return -ENOMEM;
+
+ binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+ if (binder_debugfs_dir_entry_root)
+ binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+ binder_debugfs_dir_entry_root);
+ ret = misc_register(&binder_miscdev);
+ if (binder_debugfs_dir_entry_root) {
+ debugfs_create_file("state",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_state_fops);
+ debugfs_create_file("stats",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_stats_fops);
+ debugfs_create_file("transactions",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_transactions_fops);
+ debugfs_create_file("transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log,
+ &binder_transaction_log_fops);
+ debugfs_create_file("failed_transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log_failed,
+ &binder_transaction_log_fops);
+ }
+ return ret;
+}
+
+device_initcall(binder_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
new file mode 100644
index 000000000000..25ab6f2759e9
--- /dev/null
+++ b/drivers/staging/android/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
+#define BINDER_THREAD_EXIT _IOW('b', 8, int)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incomming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum BinderDriverCommandProtocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644
index 000000000000..ffc2d043dd8e
--- /dev/null
+++ b/drivers/staging/android/logger.c
@@ -0,0 +1,616 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+ unsigned char *buffer;/* the ring buffer itself */
+ struct miscdevice misc; /* misc device representing the log */
+ wait_queue_head_t wq; /* wait queue for readers */
+ struct list_head readers; /* this log's readers */
+ struct mutex mutex; /* mutex protecting buffer */
+ size_t w_off; /* current write head offset */
+ size_t head; /* new readers start here */
+ size_t size; /* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+ struct logger_log *log; /* associated log */
+ struct list_head list; /* entry in logger_log's list */
+ size_t r_off; /* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+#define logger_offset(n) ((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 1) Need to quickly obtain the associated log during an I/O operation
+ * 2) Readers need to maintain state (logger_reader)
+ * 3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ return reader->log;
+ } else
+ return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+ __u16 val;
+
+ switch (log->size - off) {
+ case 1:
+ memcpy(&val, log->buffer + off, 1);
+ memcpy(((char *) &val) + 1, log->buffer, 1);
+ break;
+ default:
+ memcpy(&val, log->buffer + off, 2);
+ }
+
+ return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+ struct logger_reader *reader,
+ char __user *buf,
+ size_t count)
+{
+ size_t len;
+
+ /*
+ * We read from the log in two disjoint operations. First, we read from
+ * the current read head offset up to 'count' bytes or to the end of
+ * the log, whichever comes first.
+ */
+ len = min(count, log->size - reader->r_off);
+ if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ return -EFAULT;
+
+ /*
+ * Second, we read any remaining bytes, starting back at the head of
+ * the log.
+ */
+ if (count != len)
+ if (copy_to_user(buf + len, log->buffer, count - len))
+ return -EFAULT;
+
+ reader->r_off = logger_offset(reader->r_off + count);
+
+ return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is written to
+ * - Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+start:
+ while (1) {
+ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&log->mutex);
+ ret = (log->w_off == reader->r_off);
+ mutex_unlock(&log->mutex);
+ if (!ret)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&log->wq, &wait);
+ if (ret)
+ return ret;
+
+ mutex_lock(&log->mutex);
+
+ /* is there still something to read or did we race? */
+ if (unlikely(log->w_off == reader->r_off)) {
+ mutex_unlock(&log->mutex);
+ goto start;
+ }
+
+ /* get the size of the next entry */
+ ret = get_entry_len(log, reader->r_off);
+ if (count < ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+ size_t count = 0;
+
+ do {
+ size_t nr = get_entry_len(log, off);
+ off = logger_offset(off + nr);
+ count += nr;
+ } while (count < len);
+
+ return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+ if (b < a) {
+ if (a < c || b >= c)
+ return 1;
+ } else {
+ if (a < c && b >= c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+ size_t old = log->w_off;
+ size_t new = logger_offset(old + len);
+ struct logger_reader *reader;
+
+ if (clock_interval(old, new, log->head))
+ log->head = get_next_entry(log, log->head, len);
+
+ list_for_each_entry(reader, &log->readers, list)
+ if (clock_interval(old, new, reader->r_off))
+ reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, buf, len);
+
+ if (count != len)
+ memcpy(log->buffer, buf + len, count - len);
+
+ log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+ const void __user *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+ return -EFAULT;
+
+ if (count != len)
+ if (copy_from_user(log->buffer, buf + len, count - len))
+ return -EFAULT;
+
+ log->w_off = logger_offset(log->w_off + count);
+
+ return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t ppos)
+{
+ struct logger_log *log = file_get_log(iocb->ki_filp);
+ size_t orig = log->w_off;
+ struct logger_entry header;
+ struct timespec now;
+ ssize_t ret = 0;
+
+ now = current_kernel_time();
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+ header.sec = now.tv_sec;
+ header.nsec = now.tv_nsec;
+ header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+ /* null writes succeed, return zero */
+ if (unlikely(!header.len))
+ return 0;
+
+ mutex_lock(&log->mutex);
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+ do_write_log(log, &header, sizeof(struct logger_entry));
+
+ while (nr_segs-- > 0) {
+ size_t len;
+ ssize_t nr;
+
+ /* figure out how much of this vector we can keep */
+ len = min_t(size_t, iov->iov_len, header.len - ret);
+
+ /* write out this segment's payload */
+ nr = do_write_log_from_user(log, iov->iov_base, len);
+ if (unlikely(nr < 0)) {
+ log->w_off = orig;
+ mutex_unlock(&log->mutex);
+ return nr;
+ }
+
+ iov++;
+ ret += nr;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ /* wake up any blocked readers */
+ wake_up_interruptible(&log->wq);
+
+ return ret;
+}
+
+static struct logger_log *get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+ struct logger_log *log;
+ int ret;
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ log = get_log_from_minor(MINOR(inode->i_rdev));
+ if (!log)
+ return -ENODEV;
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader;
+
+ reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->log = log;
+ INIT_LIST_HEAD(&reader->list);
+
+ mutex_lock(&log->mutex);
+ reader->r_off = log->head;
+ list_add_tail(&reader->list, &log->readers);
+ mutex_unlock(&log->mutex);
+
+ file->private_data = reader;
+ } else
+ file->private_data = log;
+
+ return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ list_del(&reader->list);
+ kfree(reader);
+ }
+
+ return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+ struct logger_reader *reader;
+ struct logger_log *log;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = file->private_data;
+ log = reader->log;
+
+ poll_wait(file, &log->wq, wait);
+
+ mutex_lock(&log->mutex);
+ if (log->w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct logger_log *log = file_get_log(file);
+ struct logger_reader *reader;
+ long ret = -ENOTTY;
+
+ mutex_lock(&log->mutex);
+
+ switch (cmd) {
+ case LOGGER_GET_LOG_BUF_SIZE:
+ ret = log->size;
+ break;
+ case LOGGER_GET_LOG_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off >= reader->r_off)
+ ret = log->w_off - reader->r_off;
+ else
+ ret = (log->size - reader->r_off) + log->w_off;
+ break;
+ case LOGGER_GET_NEXT_ENTRY_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off != reader->r_off)
+ ret = get_entry_len(log, reader->r_off);
+ else
+ ret = 0;
+ break;
+ case LOGGER_FLUSH_LOG:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ break;
+ }
+ list_for_each_entry(reader, &log->readers, list)
+ reader->r_off = log->w_off;
+ log->head = log->w_off;
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static const struct file_operations logger_fops = {
+ .owner = THIS_MODULE,
+ .read = logger_read,
+ .aio_write = logger_aio_write,
+ .poll = logger_poll,
+ .unlocked_ioctl = logger_ioctl,
+ .compat_ioctl = logger_ioctl,
+ .open = logger_open,
+ .release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+ .buffer = _buf_ ## VAR, \
+ .misc = { \
+ .minor = MISC_DYNAMIC_MINOR, \
+ .name = NAME, \
+ .fops = &logger_fops, \
+ .parent = NULL, \
+ }, \
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+ .readers = LIST_HEAD_INIT(VAR .readers), \
+ .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+ .w_off = 0, \
+ .head = 0, \
+ .size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
+DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+ if (log_main.misc.minor == minor)
+ return &log_main;
+ if (log_events.misc.minor == minor)
+ return &log_events;
+ if (log_radio.misc.minor == minor)
+ return &log_radio;
+ if (log_system.misc.minor == minor)
+ return &log_system;
+ return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+ int ret;
+
+ ret = misc_register(&log->misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "logger: failed to register misc "
+ "device for log '%s'!\n", log->misc.name);
+ return ret;
+ }
+
+ printk(KERN_INFO "logger: created %luK log '%s'\n",
+ (unsigned long) log->size >> 10, log->misc.name);
+
+ return 0;
+}
+
+static int __init logger_init(void)
+{
+ int ret;
+
+ ret = init_log(&log_main);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_events);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_radio);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_system);
+ if (unlikely(ret))
+ goto out;
+
+out:
+ return ret;
+}
+device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644
index 000000000000..2cb06e9d8f98
--- /dev/null
+++ b/drivers/staging/android/logger.h
@@ -0,0 +1,49 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+ __u16 len; /* length of the payload */
+ __u16 __pad; /* no matter what, we get 2 bytes of padding */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char msg[0]; /* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
+#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
+#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
+#define LOGGER_LOG_MAIN "log_main" /* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN (4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD \
+ (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO 0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
new file mode 100644
index 000000000000..2d8d2b796101
--- /dev/null
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -0,0 +1,219 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * The lowmemorykiller driver lets user-space specify a set of memory thresholds
+ * where processes with a range of oom_adj values will get killed. Specify the
+ * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
+ * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
+ * files take a comma separated list of numbers in ascending order.
+ *
+ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
+ * processes with a oom_adj value of 8 or higher when the free memory drops
+ * below 4096 pages and kill processes with a oom_adj value of 0 or higher
+ * when the free memory drops below 1024 pages.
+ *
+ * The driver considers memory used for caches to be free, but if a large
+ * percentage of the cached memory is locked this can be very inaccurate
+ * and processes may not get killed until the normal oom killer is triggered.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/notifier.h>
+
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+ 0,
+ 1,
+ 6,
+ 12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+ 3 * 512, /* 6MB */
+ 2 * 1024, /* 8MB */
+ 4 * 1024, /* 16MB */
+ 16 * 1024, /* 64MB */
+};
+static int lowmem_minfree_size = 4;
+
+static struct task_struct *lowmem_deathpending;
+
+#define lowmem_print(level, x...) \
+ do { \
+ if (lowmem_debug_level >= (level)) \
+ printk(x); \
+ } while (0)
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data);
+
+static struct notifier_block task_nb = {
+ .notifier_call = task_notify_func,
+};
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct task_struct *task = data;
+ if (task == lowmem_deathpending) {
+ lowmem_deathpending = NULL;
+ task_handoff_unregister(&task_nb);
+ }
+ return NOTIFY_OK;
+}
+
+static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct task_struct *p;
+ struct task_struct *selected = NULL;
+ int rem = 0;
+ int tasksize;
+ int i;
+ int min_adj = OOM_ADJUST_MAX + 1;
+ int selected_tasksize = 0;
+ int selected_oom_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+ int other_free = global_page_state(NR_FREE_PAGES);
+ int other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM);
+
+ /*
+ * If we already have a death outstanding, then
+ * bail out right away; indicating to vmscan
+ * that we have nothing further to offer on
+ * this pass.
+ *
+ * Note: Currently you need CONFIG_PROFILING
+ * for this to work correctly.
+ */
+ if (lowmem_deathpending)
+ return 0;
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+ for (i = 0; i < array_size; i++) {
+ if (other_free < lowmem_minfree[i] &&
+ other_file < lowmem_minfree[i]) {
+ min_adj = lowmem_adj[i];
+ break;
+ }
+ }
+ if (sc->nr_to_scan > 0)
+ lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free,
+ other_file, min_adj);
+ rem = global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+ if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+ lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ return rem;
+ }
+ selected_oom_adj = min_adj;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ struct mm_struct *mm;
+ struct signal_struct *sig;
+ int oom_adj;
+
+ task_lock(p);
+ mm = p->mm;
+ sig = p->signal;
+ if (!mm || !sig) {
+ task_unlock(p);
+ continue;
+ }
+ oom_adj = sig->oom_adj;
+ if (oom_adj < min_adj) {
+ task_unlock(p);
+ continue;
+ }
+ tasksize = get_mm_rss(mm);
+ task_unlock(p);
+ if (tasksize <= 0)
+ continue;
+ if (selected) {
+ if (oom_adj < selected_oom_adj)
+ continue;
+ if (oom_adj == selected_oom_adj &&
+ tasksize <= selected_tasksize)
+ continue;
+ }
+ selected = p;
+ selected_tasksize = tasksize;
+ selected_oom_adj = oom_adj;
+ lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+ p->pid, p->comm, oom_adj, tasksize);
+ }
+ if (selected) {
+ lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+ selected->pid, selected->comm,
+ selected_oom_adj, selected_tasksize);
+ /*
+ * If CONFIG_PROFILING is off, then task_handoff_register()
+ * is a nop. In that case we don't want to stall the killer
+ * by setting lowmem_deathpending.
+ */
+#ifdef CONFIG_PROFILING
+ lowmem_deathpending = selected;
+ task_handoff_register(&task_nb);
+#endif
+ force_sig(SIGKILL, selected);
+ rem -= selected_tasksize;
+ }
+ lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ read_unlock(&tasklist_lock);
+ return rem;
+}
+
+static struct shrinker lowmem_shrinker = {
+ .shrink = lowmem_shrink,
+ .seeks = DEFAULT_SEEKS * 16
+};
+
+static int __init lowmem_init(void)
+{
+ register_shrinker(&lowmem_shrinker);
+ return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+ unregister_shrinker(&lowmem_shrinker);
+}
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
+ S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
+ S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/android/pmem.c b/drivers/staging/android/pmem.c
new file mode 100644
index 000000000000..7d97032c6508
--- /dev/null
+++ b/drivers/staging/android/pmem.c
@@ -0,0 +1,1345 @@
+/* pmem.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include "android_pmem.h"
+
+#define PMEM_MAX_DEVICES 10
+#define PMEM_MAX_ORDER 128
+#define PMEM_MIN_ALLOC PAGE_SIZE
+
+#define PMEM_DEBUG 1
+
+/* indicates that a refernce to this file has been taken via get_pmem_file,
+ * the file should not be released until put_pmem_file is called */
+#define PMEM_FLAGS_BUSY 0x1
+/* indicates that this is a suballocation of a larger master range */
+#define PMEM_FLAGS_CONNECTED 0x1 << 1
+/* indicates this is a master and not a sub allocation and that it is mmaped */
+#define PMEM_FLAGS_MASTERMAP 0x1 << 2
+/* submap and unsubmap flags indicate:
+ * 00: subregion has never been mmaped
+ * 10: subregion has been mmaped, reference to the mm was taken
+ * 11: subretion has ben released, refernece to the mm still held
+ * 01: subretion has been released, reference to the mm has been released
+ */
+#define PMEM_FLAGS_SUBMAP 0x1 << 3
+#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
+
+
+struct pmem_data {
+ /* in alloc mode: an index into the bitmap
+ * in no_alloc mode: the size of the allocation */
+ int index;
+ /* see flags above for descriptions */
+ unsigned int flags;
+ /* protects this data field, if the mm_mmap sem will be held at the
+ * same time as this sem, the mm sem must be taken first (as this is
+ * the order for vma_open and vma_close ops */
+ struct rw_semaphore sem;
+ /* info about the mmaping process */
+ struct vm_area_struct *vma;
+ /* task struct of the mapping process */
+ struct task_struct *task;
+ /* process id of teh mapping process */
+ pid_t pid;
+ /* file descriptor of the master */
+ int master_fd;
+ /* file struct of the master */
+ struct file *master_file;
+ /* a list of currently available regions if this is a suballocation */
+ struct list_head region_list;
+ /* a linked list of data so we can access them for debugging */
+ struct list_head list;
+#if PMEM_DEBUG
+ int ref;
+#endif
+};
+
+struct pmem_bits {
+ unsigned allocated:1; /* 1 if allocated, 0 if free */
+ unsigned order:7; /* size of the region in pmem space */
+};
+
+struct pmem_region_node {
+ struct pmem_region region;
+ struct list_head list;
+};
+
+#define PMEM_DEBUG_MSGS 0
+#if PMEM_DEBUG_MSGS
+#define DLOG(fmt,args...) \
+ do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+ ##args); } \
+ while (0)
+#else
+#define DLOG(x...) do {} while (0)
+#endif
+
+struct pmem_info {
+ struct miscdevice dev;
+ /* physical start address of the remaped pmem space */
+ unsigned long base;
+ /* vitual start address of the remaped pmem space */
+ unsigned char __iomem *vbase;
+ /* total size of the pmem space */
+ unsigned long size;
+ /* number of entries in the pmem space */
+ unsigned long num_entries;
+ /* pfn of the garbage page in memory */
+ unsigned long garbage_pfn;
+ /* index of the garbage page in the pmem space */
+ int garbage_index;
+ /* the bitmap for the region indicating which entries are allocated
+ * and which are free */
+ struct pmem_bits *bitmap;
+ /* indicates the region should not be managed with an allocator */
+ unsigned no_allocator;
+ /* indicates maps of this region should be cached, if a mix of
+ * cached and uncached is desired, set this and open the device with
+ * O_SYNC to get an uncached region */
+ unsigned cached;
+ unsigned buffered;
+ /* in no_allocator mode the first mapper gets the whole space and sets
+ * this flag */
+ unsigned allocated;
+ /* for debugging, creates a list of pmem file structs, the
+ * data_list_lock should be taken before pmem_data->sem if both are
+ * needed */
+ struct mutex data_list_lock;
+ struct list_head data_list;
+ /* pmem_sem protects the bitmap array
+ * a write lock should be held when modifying entries in bitmap
+ * a read lock should be held when reading data from bits or
+ * dereferencing a pointer into bitmap
+ *
+ * pmem_data->sem protects the pmem data of a particular file
+ * Many of the function that require the pmem_data->sem have a non-
+ * locking version for when the caller is already holding that sem.
+ *
+ * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
+ * down(pmem_data->sem) => down(bitmap_sem)
+ */
+ struct rw_semaphore bitmap_sem;
+
+ long (*ioctl)(struct file *, unsigned int, unsigned long);
+ int (*release)(struct inode *, struct file *);
+};
+
+static struct pmem_info pmem[PMEM_MAX_DEVICES];
+static int id_count;
+
+#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
+#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
+#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
+#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
+#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
+#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
+ PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
+ PMEM_LEN(id, index))
+#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
+#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
+ (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
+
+static int pmem_release(struct inode *, struct file *);
+static int pmem_mmap(struct file *, struct vm_area_struct *);
+static int pmem_open(struct inode *, struct file *);
+static long pmem_ioctl(struct file *, unsigned int, unsigned long);
+
+struct file_operations pmem_fops = {
+ .release = pmem_release,
+ .mmap = pmem_mmap,
+ .open = pmem_open,
+ .unlocked_ioctl = pmem_ioctl,
+};
+
+static int get_id(struct file *file)
+{
+ return MINOR(file->f_dentry->d_inode->i_rdev);
+}
+
+int is_pmem_file(struct file *file)
+{
+ int id;
+
+ if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
+ return 0;
+ id = get_id(file);
+ if (unlikely(id >= PMEM_MAX_DEVICES))
+ return 0;
+ if (unlikely(file->f_dentry->d_inode->i_rdev !=
+ MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
+ return 0;
+ return 1;
+}
+
+static int has_allocation(struct file *file)
+{
+ struct pmem_data *data;
+ /* check is_pmem_file first if not accessed via pmem_file_ops */
+
+ if (unlikely(!file->private_data))
+ return 0;
+ data = (struct pmem_data *)file->private_data;
+ if (unlikely(data->index < 0))
+ return 0;
+ return 1;
+}
+
+static int is_master_owner(struct file *file)
+{
+ struct file *master_file;
+ struct pmem_data *data;
+ int put_needed, ret = 0;
+
+ if (!is_pmem_file(file) || !has_allocation(file))
+ return 0;
+ data = (struct pmem_data *)file->private_data;
+ if (PMEM_FLAGS_MASTERMAP & data->flags)
+ return 1;
+ master_file = fget_light(data->master_fd, &put_needed);
+ if (master_file && data->master_file == master_file)
+ ret = 1;
+ fput_light(master_file, put_needed);
+ return ret;
+}
+
+static int pmem_free(int id, int index)
+{
+ /* caller should hold the write lock on pmem_sem! */
+ int buddy, curr = index;
+ DLOG("index %d\n", index);
+
+ if (pmem[id].no_allocator) {
+ pmem[id].allocated = 0;
+ return 0;
+ }
+ /* clean up the bitmap, merging any buddies */
+ pmem[id].bitmap[curr].allocated = 0;
+ /* find a slots buddy Buddy# = Slot# ^ (1 << order)
+ * if the buddy is also free merge them
+ * repeat until the buddy is not free or end of the bitmap is reached
+ */
+ do {
+ buddy = PMEM_BUDDY_INDEX(id, curr);
+ if (PMEM_IS_FREE(id, buddy) &&
+ PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
+ PMEM_ORDER(id, buddy)++;
+ PMEM_ORDER(id, curr)++;
+ curr = min(buddy, curr);
+ } else {
+ break;
+ }
+ } while (curr < pmem[id].num_entries);
+
+ return 0;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data);
+
+static int pmem_release(struct inode *inode, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_region_node *region_node;
+ struct list_head *elt, *elt2;
+ int id = get_id(file), ret = 0;
+
+
+ mutex_lock(&pmem[id].data_list_lock);
+ /* if this file is a master, revoke all the memory in the connected
+ * files */
+ if (PMEM_FLAGS_MASTERMAP & data->flags) {
+ struct pmem_data *sub_data;
+ list_for_each(elt, &pmem[id].data_list) {
+ sub_data = list_entry(elt, struct pmem_data, list);
+ down_read(&sub_data->sem);
+ if (PMEM_IS_SUBMAP(sub_data) &&
+ file == sub_data->master_file) {
+ up_read(&sub_data->sem);
+ pmem_revoke(file, sub_data);
+ } else
+ up_read(&sub_data->sem);
+ }
+ }
+ list_del(&data->list);
+ mutex_unlock(&pmem[id].data_list_lock);
+
+
+ down_write(&data->sem);
+
+ /* if its not a conencted file and it has an allocation, free it */
+ if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
+ down_write(&pmem[id].bitmap_sem);
+ ret = pmem_free(id, data->index);
+ up_write(&pmem[id].bitmap_sem);
+ }
+
+ /* if this file is a submap (mapped, connected file), downref the
+ * task struct */
+ if (PMEM_FLAGS_SUBMAP & data->flags)
+ if (data->task) {
+ put_task_struct(data->task);
+ data->task = NULL;
+ }
+
+ file->private_data = NULL;
+
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node, list);
+ list_del(elt);
+ kfree(region_node);
+ }
+ BUG_ON(!list_empty(&data->region_list));
+
+ up_write(&data->sem);
+ kfree(data);
+ if (pmem[id].release)
+ ret = pmem[id].release(inode, file);
+
+ return ret;
+}
+
+static int pmem_open(struct inode *inode, struct file *file)
+{
+ struct pmem_data *data;
+ int id = get_id(file);
+ int ret = 0;
+
+ DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
+ /* setup file->private_data to indicate its unmapped */
+ /* you can only open a pmem device one time */
+ if (file->private_data != NULL)
+ return -1;
+ data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
+ if (!data) {
+ printk("pmem: unable to allocate memory for pmem metadata.");
+ return -1;
+ }
+ data->flags = 0;
+ data->index = -1;
+ data->task = NULL;
+ data->vma = NULL;
+ data->pid = 0;
+ data->master_file = NULL;
+#if PMEM_DEBUG
+ data->ref = 0;
+#endif
+ INIT_LIST_HEAD(&data->region_list);
+ init_rwsem(&data->sem);
+
+ file->private_data = data;
+ INIT_LIST_HEAD(&data->list);
+
+ mutex_lock(&pmem[id].data_list_lock);
+ list_add(&data->list, &pmem[id].data_list);
+ mutex_unlock(&pmem[id].data_list_lock);
+ return ret;
+}
+
+static unsigned long pmem_order(unsigned long len)
+{
+ int i;
+
+ len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
+ len--;
+ for (i = 0; i < sizeof(len)*8; i++)
+ if (len >> i == 0)
+ break;
+ return i;
+}
+
+static int pmem_allocate(int id, unsigned long len)
+{
+ /* caller should hold the write lock on pmem_sem! */
+ /* return the corresponding pdata[] entry */
+ int curr = 0;
+ int end = pmem[id].num_entries;
+ int best_fit = -1;
+ unsigned long order = pmem_order(len);
+
+ if (pmem[id].no_allocator) {
+ DLOG("no allocator");
+ if ((len > pmem[id].size) || pmem[id].allocated)
+ return -1;
+ pmem[id].allocated = 1;
+ return len;
+ }
+
+ if (order > PMEM_MAX_ORDER)
+ return -1;
+ DLOG("order %lx\n", order);
+
+ /* look through the bitmap:
+ * if you find a free slot of the correct order use it
+ * otherwise, use the best fit (smallest with size > order) slot
+ */
+ while (curr < end) {
+ if (PMEM_IS_FREE(id, curr)) {
+ if (PMEM_ORDER(id, curr) == (unsigned char)order) {
+ /* set the not free bit and clear others */
+ best_fit = curr;
+ break;
+ }
+ if (PMEM_ORDER(id, curr) > (unsigned char)order &&
+ (best_fit < 0 ||
+ PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
+ best_fit = curr;
+ }
+ curr = PMEM_NEXT_INDEX(id, curr);
+ }
+
+ /* if best_fit < 0, there are no suitable slots,
+ * return an error
+ */
+ if (best_fit < 0) {
+ printk("pmem: no space left to allocate!\n");
+ return -1;
+ }
+
+ /* now partition the best fit:
+ * split the slot into 2 buddies of order - 1
+ * repeat until the slot is of the correct order
+ */
+ while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
+ int buddy;
+ PMEM_ORDER(id, best_fit) -= 1;
+ buddy = PMEM_BUDDY_INDEX(id, best_fit);
+ PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
+ }
+ pmem[id].bitmap[best_fit].allocated = 1;
+ return best_fit;
+}
+
+static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
+{
+ int id = get_id(file);
+#ifdef pgprot_noncached
+ if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
+ return pgprot_noncached(vma_prot);
+#endif
+#ifdef pgprot_ext_buffered
+ else if (pmem[id].buffered)
+ return pgprot_ext_buffered(vma_prot);
+#endif
+ return vma_prot;
+}
+
+static unsigned long pmem_start_addr(int id, struct pmem_data *data)
+{
+ if (pmem[id].no_allocator)
+ return PMEM_START_ADDR(id, 0);
+ else
+ return PMEM_START_ADDR(id, data->index);
+
+}
+
+static void *pmem_start_vaddr(int id, struct pmem_data *data)
+{
+ return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+}
+
+static unsigned long pmem_len(int id, struct pmem_data *data)
+{
+ if (pmem[id].no_allocator)
+ return data->index;
+ else
+ return PMEM_LEN(id, data->index);
+}
+
+static int pmem_map_garbage(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ int i, garbage_pages = len >> PAGE_SHIFT;
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
+ for (i = 0; i < garbage_pages; i++) {
+ if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
+ pmem[id].garbage_pfn))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ int garbage_pages;
+ DLOG("unmap offset %lx len %lx\n", offset, len);
+
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+
+ garbage_pages = len >> PAGE_SHIFT;
+ zap_page_range(vma, vma->vm_start + offset, len, NULL);
+ pmem_map_garbage(id, vma, data, offset, len);
+ return 0;
+}
+
+static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ DLOG("map offset %lx len %lx\n", offset, len);
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
+
+ if (io_remap_pfn_range(vma, vma->vm_start + offset,
+ (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
+ len, vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ /* hold the mm semp for the vma you are modifying when you call this */
+ BUG_ON(!vma);
+ zap_page_range(vma, vma->vm_start + offset, len, NULL);
+ return pmem_map_pfn_range(id, vma, data, offset, len);
+}
+
+static void pmem_vma_open(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct pmem_data *data = file->private_data;
+ int id = get_id(file);
+ /* this should never be called as we don't support copying pmem
+ * ranges via fork */
+ BUG_ON(!has_allocation(file));
+ down_write(&data->sem);
+ /* remap the garbage pages, forkers don't get access to the data */
+ pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
+ up_write(&data->sem);
+}
+
+static void pmem_vma_close(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct pmem_data *data = file->private_data;
+
+ DLOG("current %u ppid %u file %p count %d\n", current->pid,
+ current->parent->pid, file, file_count(file));
+ if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
+ printk(KERN_WARNING "pmem: something is very wrong, you are "
+ "closing a vm backing an allocation that doesn't "
+ "exist!\n");
+ return;
+ }
+ down_write(&data->sem);
+ if (data->vma == vma) {
+ data->vma = NULL;
+ if ((data->flags & PMEM_FLAGS_CONNECTED) &&
+ (data->flags & PMEM_FLAGS_SUBMAP))
+ data->flags |= PMEM_FLAGS_UNSUBMAP;
+ }
+ /* the kernel is going to free this vma now anyway */
+ up_write(&data->sem);
+}
+
+static struct vm_operations_struct vm_ops = {
+ .open = pmem_vma_open,
+ .close = pmem_vma_close,
+};
+
+static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct pmem_data *data;
+ int index;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ int ret = 0, id = get_id(file);
+
+ if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
+#if PMEM_DEBUG
+ printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
+ " and a multiple of pages_size.\n");
+#endif
+ return -EINVAL;
+ }
+
+ data = (struct pmem_data *)file->private_data;
+ down_write(&data->sem);
+ /* check this file isn't already mmaped, for submaps check this file
+ * has never been mmaped */
+ if ((data->flags & PMEM_FLAGS_SUBMAP) ||
+ (data->flags & PMEM_FLAGS_UNSUBMAP)) {
+#if PMEM_DEBUG
+ printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
+ "this file is already mmaped. %x\n", data->flags);
+#endif
+ ret = -EINVAL;
+ goto error;
+ }
+ /* if file->private_data == unalloced, alloc*/
+ if (data && data->index == -1) {
+ down_write(&pmem[id].bitmap_sem);
+ index = pmem_allocate(id, vma->vm_end - vma->vm_start);
+ up_write(&pmem[id].bitmap_sem);
+ data->index = index;
+ }
+ /* either no space was available or an error occured */
+ if (!has_allocation(file)) {
+ ret = -EINVAL;
+ printk("pmem: could not find allocation for map.\n");
+ goto error;
+ }
+
+ if (pmem_len(id, data) < vma_size) {
+#if PMEM_DEBUG
+ printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
+ "size of backing region [%lu].\n", vma_size,
+ pmem_len(id, data));
+#endif
+ ret = -EINVAL;
+ goto error;
+ }
+
+ vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
+ vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
+
+ if (data->flags & PMEM_FLAGS_CONNECTED) {
+ struct pmem_region_node *region_node;
+ struct list_head *elt;
+ if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
+ printk("pmem: mmap failed in kernel!\n");
+ ret = -EAGAIN;
+ goto error;
+ }
+ list_for_each(elt, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ DLOG("remapping file: %p %lx %lx\n", file,
+ region_node->region.offset,
+ region_node->region.len);
+ if (pmem_remap_pfn_range(id, vma, data,
+ region_node->region.offset,
+ region_node->region.len)) {
+ ret = -EAGAIN;
+ goto error;
+ }
+ }
+ data->flags |= PMEM_FLAGS_SUBMAP;
+ get_task_struct(current->group_leader);
+ data->task = current->group_leader;
+ data->vma = vma;
+#if PMEM_DEBUG
+ data->pid = current->pid;
+#endif
+ DLOG("submmapped file %p vma %p pid %u\n", file, vma,
+ current->pid);
+ } else {
+ if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
+ printk(KERN_INFO "pmem: mmap failed in kernel!\n");
+ ret = -EAGAIN;
+ goto error;
+ }
+ data->flags |= PMEM_FLAGS_MASTERMAP;
+ data->pid = current->pid;
+ }
+ vma->vm_ops = &vm_ops;
+error:
+ up_write(&data->sem);
+ return ret;
+}
+
+/* the following are the api for accessing pmem regions by other drivers
+ * from inside the kernel */
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *len)
+{
+ struct pmem_data *data;
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: requested pmem data from invalid"
+ "file.\n");
+#endif
+ return -1;
+ }
+ data = (struct pmem_data *)file->private_data;
+ down_read(&data->sem);
+ if (data->vma) {
+ *start = data->vma->vm_start;
+ *len = data->vma->vm_end - data->vma->vm_start;
+ } else {
+ *start = 0;
+ *len = 0;
+ }
+ up_read(&data->sem);
+ return 0;
+}
+
+int get_pmem_addr(struct file *file, unsigned long *start,
+ unsigned long *vstart, unsigned long *len)
+{
+ struct pmem_data *data;
+ int id;
+
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+ return -1;
+ }
+
+ data = (struct pmem_data *)file->private_data;
+ if (data->index == -1) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: requested pmem data from file with no "
+ "allocation.\n");
+ return -1;
+#endif
+ }
+ id = get_id(file);
+
+ down_read(&data->sem);
+ *start = pmem_start_addr(id, data);
+ *len = pmem_len(id, data);
+ *vstart = (unsigned long)pmem_start_vaddr(id, data);
+ up_read(&data->sem);
+#if PMEM_DEBUG
+ down_write(&data->sem);
+ data->ref++;
+ up_write(&data->sem);
+#endif
+ return 0;
+}
+
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+ unsigned long *len, struct file **filp)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ printk(KERN_INFO "pmem: requested data from file descriptor "
+ "that doesn't exist.");
+ return -1;
+ }
+
+ if (get_pmem_addr(file, start, vstart, len))
+ goto end;
+
+ if (filp)
+ *filp = file;
+ return 0;
+end:
+ fput(file);
+ return -1;
+}
+
+void put_pmem_file(struct file *file)
+{
+ struct pmem_data *data;
+ int id;
+
+ if (!is_pmem_file(file))
+ return;
+ id = get_id(file);
+ data = (struct pmem_data *)file->private_data;
+#if PMEM_DEBUG
+ down_write(&data->sem);
+ if (data->ref == 0) {
+ printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
+ pmem[id].dev.name, data->pid);
+ BUG();
+ }
+ data->ref--;
+ up_write(&data->sem);
+#endif
+ fput(file);
+}
+
+void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
+{
+ struct pmem_data *data;
+ int id;
+ void *vaddr;
+ struct pmem_region_node *region_node;
+ struct list_head *elt;
+ void *flush_start, *flush_end;
+
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+ return;
+ }
+
+ id = get_id(file);
+ data = (struct pmem_data *)file->private_data;
+ if (!pmem[id].cached || file->f_flags & O_SYNC)
+ return;
+
+ down_read(&data->sem);
+ vaddr = pmem_start_vaddr(id, data);
+ /* if this isn't a submmapped file, flush the whole thing */
+ if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
+ dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
+ goto end;
+ }
+ /* otherwise, flush the region of the file we are drawing */
+ list_for_each(elt, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node, list);
+ if ((offset >= region_node->region.offset) &&
+ ((offset + len) <= (region_node->region.offset +
+ region_node->region.len))) {
+ flush_start = vaddr + region_node->region.offset;
+ flush_end = flush_start + region_node->region.len;
+ dmac_flush_range(flush_start, flush_end);
+ break;
+ }
+ }
+end:
+ up_read(&data->sem);
+}
+
+static int pmem_connect(unsigned long connect, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *src_data;
+ struct file *src_file;
+ int ret = 0, put_needed;
+
+ down_write(&data->sem);
+ /* retrieve the src file and check it is a pmem file with an alloc */
+ src_file = fget_light(connect, &put_needed);
+ DLOG("connect %p to %p\n", file, src_file);
+ if (!src_file) {
+ printk("pmem: src file not found!\n");
+ ret = -EINVAL;
+ goto err_no_file;
+ }
+ if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
+ printk(KERN_INFO "pmem: src file is not a pmem file or has no "
+ "alloc!\n");
+ ret = -EINVAL;
+ goto err_bad_file;
+ }
+ src_data = (struct pmem_data *)src_file->private_data;
+
+ if (has_allocation(file) && (data->index != src_data->index)) {
+ printk("pmem: file is already mapped but doesn't match this"
+ " src_file!\n");
+ ret = -EINVAL;
+ goto err_bad_file;
+ }
+ data->index = src_data->index;
+ data->flags |= PMEM_FLAGS_CONNECTED;
+ data->master_fd = connect;
+ data->master_file = src_file;
+
+err_bad_file:
+ fput_light(src_file, put_needed);
+err_no_file:
+ up_write(&data->sem);
+ return ret;
+}
+
+static void pmem_unlock_data_and_mm(struct pmem_data *data,
+ struct mm_struct *mm)
+{
+ up_write(&data->sem);
+ if (mm != NULL) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
+ struct mm_struct **locked_mm)
+{
+ int ret = 0;
+ struct mm_struct *mm = NULL;
+ *locked_mm = NULL;
+lock_mm:
+ down_read(&data->sem);
+ if (PMEM_IS_SUBMAP(data)) {
+ mm = get_task_mm(data->task);
+ if (!mm) {
+#if PMEM_DEBUG
+ printk("pmem: can't remap task is gone!\n");
+#endif
+ up_read(&data->sem);
+ return -1;
+ }
+ }
+ up_read(&data->sem);
+
+ if (mm)
+ down_write(&mm->mmap_sem);
+
+ down_write(&data->sem);
+ /* check that the file didn't get mmaped before we could take the
+ * data sem, this should be safe b/c you can only submap each file
+ * once */
+ if (PMEM_IS_SUBMAP(data) && !mm) {
+ pmem_unlock_data_and_mm(data, mm);
+ up_write(&data->sem);
+ goto lock_mm;
+ }
+ /* now check that vma.mm is still there, it could have been
+ * deleted by vma_close before we could get the data->sem */
+ if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
+ /* might as well release this */
+ if (data->flags & PMEM_FLAGS_SUBMAP) {
+ put_task_struct(data->task);
+ data->task = NULL;
+ /* lower the submap flag to show the mm is gone */
+ data->flags &= ~(PMEM_FLAGS_SUBMAP);
+ }
+ pmem_unlock_data_and_mm(data, mm);
+ return -1;
+ }
+ *locked_mm = mm;
+ return ret;
+}
+
+int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation)
+{
+ int ret;
+ struct pmem_region_node *region_node;
+ struct mm_struct *mm = NULL;
+ struct list_head *elt, *elt2;
+ int id = get_id(file);
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+
+ /* pmem region must be aligned on a page boundry */
+ if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
+ !PMEM_IS_PAGE_ALIGNED(region->len))) {
+#if PMEM_DEBUG
+ printk("pmem: request for unaligned pmem suballocation "
+ "%lx %lx\n", region->offset, region->len);
+#endif
+ return -EINVAL;
+ }
+
+ /* if userspace requests a region of len 0, there's nothing to do */
+ if (region->len == 0)
+ return 0;
+
+ /* lock the mm and data */
+ ret = pmem_lock_data_and_mm(file, data, &mm);
+ if (ret)
+ return 0;
+
+ /* only the owner of the master file can remap the client fds
+ * that back in it */
+ if (!is_master_owner(file)) {
+#if PMEM_DEBUG
+ printk("pmem: remap requested from non-master process\n");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* check that the requested range is within the src allocation */
+ if (unlikely((region->offset > pmem_len(id, data)) ||
+ (region->len > pmem_len(id, data)) ||
+ (region->offset + region->len > pmem_len(id, data)))) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (operation == PMEM_MAP) {
+ region_node = kmalloc(sizeof(struct pmem_region_node),
+ GFP_KERNEL);
+ if (!region_node) {
+ ret = -ENOMEM;
+#if PMEM_DEBUG
+ printk(KERN_INFO "No space to allocate metadata!");
+#endif
+ goto err;
+ }
+ region_node->region = *region;
+ list_add(&region_node->list, &data->region_list);
+ } else if (operation == PMEM_UNMAP) {
+ int found = 0;
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ if (region->len == 0 ||
+ (region_node->region.offset == region->offset &&
+ region_node->region.len == region->len)) {
+ list_del(elt);
+ kfree(region_node);
+ found = 1;
+ }
+ }
+ if (!found) {
+#if PMEM_DEBUG
+ printk("pmem: Unmap region does not map any mapped "
+ "region!");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (data->vma && PMEM_IS_SUBMAP(data)) {
+ if (operation == PMEM_MAP)
+ ret = pmem_remap_pfn_range(id, data->vma, data,
+ region->offset, region->len);
+ else if (operation == PMEM_UNMAP)
+ ret = pmem_unmap_pfn_range(id, data->vma, data,
+ region->offset, region->len);
+ }
+
+err:
+ pmem_unlock_data_and_mm(data, mm);
+ return ret;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data)
+{
+ struct pmem_region_node *region_node;
+ struct list_head *elt, *elt2;
+ struct mm_struct *mm = NULL;
+ int id = get_id(file);
+ int ret = 0;
+
+ data->master_file = NULL;
+ ret = pmem_lock_data_and_mm(file, data, &mm);
+ /* if lock_data_and_mm fails either the task that mapped the fd, or
+ * the vma that mapped it have already gone away, nothing more
+ * needs to be done */
+ if (ret)
+ return;
+ /* unmap everything */
+ /* delete the regions and region list nothing is mapped any more */
+ if (data->vma)
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ pmem_unmap_pfn_range(id, data->vma, data,
+ region_node->region.offset,
+ region_node->region.len);
+ list_del(elt);
+ kfree(region_node);
+ }
+ /* delete the master file */
+ pmem_unlock_data_and_mm(data, mm);
+}
+
+static void pmem_get_size(struct pmem_region *region, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ int id = get_id(file);
+
+ if (!has_allocation(file)) {
+ region->offset = 0;
+ region->len = 0;
+ return;
+ } else {
+ region->offset = pmem_start_addr(id, data);
+ region->len = pmem_len(id, data);
+ }
+ DLOG("offset %lx len %lx\n", region->offset, region->len);
+}
+
+
+static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pmem_data *data;
+ int id = get_id(file);
+
+ switch (cmd) {
+ case PMEM_GET_PHYS:
+ {
+ struct pmem_region region;
+ DLOG("get_phys\n");
+ if (!has_allocation(file)) {
+ region.offset = 0;
+ region.len = 0;
+ } else {
+ data = (struct pmem_data *)file->private_data;
+ region.offset = pmem_start_addr(id, data);
+ region.len = pmem_len(id, data);
+ }
+ printk(KERN_INFO "pmem: request for physical address of pmem region "
+ "from process %d.\n", current->pid);
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_MAP:
+ {
+ struct pmem_region region;
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ data = (struct pmem_data *)file->private_data;
+ return pmem_remap(&region, file, PMEM_MAP);
+ }
+ break;
+ case PMEM_UNMAP:
+ {
+ struct pmem_region region;
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ data = (struct pmem_data *)file->private_data;
+ return pmem_remap(&region, file, PMEM_UNMAP);
+ break;
+ }
+ case PMEM_GET_SIZE:
+ {
+ struct pmem_region region;
+ DLOG("get_size\n");
+ pmem_get_size(&region, file);
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_GET_TOTAL_SIZE:
+ {
+ struct pmem_region region;
+ DLOG("get total size\n");
+ region.offset = 0;
+ get_id(file);
+ region.len = pmem[id].size;
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_ALLOCATE:
+ {
+ if (has_allocation(file))
+ return -EINVAL;
+ data = (struct pmem_data *)file->private_data;
+ data->index = pmem_allocate(id, arg);
+ break;
+ }
+ case PMEM_CONNECT:
+ DLOG("connect\n");
+ return pmem_connect(arg, file);
+ break;
+ case PMEM_CACHE_FLUSH:
+ {
+ struct pmem_region region;
+ DLOG("flush\n");
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ flush_pmem_file(file, region.offset, region.len);
+ break;
+ }
+ default:
+ if (pmem[id].ioctl)
+ return pmem[id].ioctl(file, cmd, arg);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if PMEM_DEBUG
+static ssize_t debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct list_head *elt, *elt2;
+ struct pmem_data *data;
+ struct pmem_region_node *region_node;
+ int id = (int)file->private_data;
+ const int debug_bufmax = 4096;
+ static char buffer[4096];
+ int n = 0;
+
+ DLOG("debug open\n");
+ n = scnprintf(buffer, debug_bufmax,
+ "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+ mutex_lock(&pmem[id].data_list_lock);
+ list_for_each(elt, &pmem[id].data_list) {
+ data = list_entry(elt, struct pmem_data, list);
+ down_read(&data->sem);
+ n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
+ data->pid);
+ list_for_each(elt2, &data->region_list) {
+ region_node = list_entry(elt2, struct pmem_region_node,
+ list);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "(%lx,%lx) ",
+ region_node->region.offset,
+ region_node->region.len);
+ }
+ n += scnprintf(buffer + n, debug_bufmax - n, "\n");
+ up_read(&data->sem);
+ }
+ mutex_unlock(&pmem[id].data_list_lock);
+
+ n++;
+ buffer[n] = 0;
+ return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+
+static struct file_operations debug_fops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+#endif
+
+#if 0
+static struct miscdevice pmem_dev = {
+ .name = "pmem",
+ .fops = &pmem_fops,
+};
+#endif
+
+int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *))
+{
+ int err = 0;
+ int i, index = 0;
+ int id = id_count;
+ id_count++;
+
+ pmem[id].no_allocator = pdata->no_allocator;
+ pmem[id].cached = pdata->cached;
+ pmem[id].buffered = pdata->buffered;
+ pmem[id].base = pdata->start;
+ pmem[id].size = pdata->size;
+ pmem[id].ioctl = ioctl;
+ pmem[id].release = release;
+ init_rwsem(&pmem[id].bitmap_sem);
+ mutex_init(&pmem[id].data_list_lock);
+ INIT_LIST_HEAD(&pmem[id].data_list);
+ pmem[id].dev.name = pdata->name;
+ pmem[id].dev.minor = id;
+ pmem[id].dev.fops = &pmem_fops;
+ printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
+
+ err = misc_register(&pmem[id].dev);
+ if (err) {
+ printk(KERN_ALERT "Unable to register pmem driver!\n");
+ goto err_cant_register_device;
+ }
+ pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
+
+ pmem[id].bitmap = kmalloc(pmem[id].num_entries *
+ sizeof(struct pmem_bits), GFP_KERNEL);
+ if (!pmem[id].bitmap)
+ goto err_no_mem_for_metadata;
+
+ memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
+ pmem[id].num_entries);
+
+ for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
+ if ((pmem[id].num_entries) & 1<<i) {
+ PMEM_ORDER(id, index) = i;
+ index = PMEM_NEXT_INDEX(id, index);
+ }
+ }
+
+ if (pmem[id].cached)
+ pmem[id].vbase = ioremap_cached(pmem[id].base,
+ pmem[id].size);
+#ifdef ioremap_ext_buffered
+ else if (pmem[id].buffered)
+ pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+ pmem[id].size);
+#endif
+ else
+ pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+
+ if (pmem[id].vbase == 0)
+ goto error_cant_remap;
+
+ pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
+ if (pmem[id].no_allocator)
+ pmem[id].allocated = 0;
+
+#if PMEM_DEBUG
+ debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
+ &debug_fops);
+#endif
+ return 0;
+error_cant_remap:
+ kfree(pmem[id].bitmap);
+err_no_mem_for_metadata:
+ misc_deregister(&pmem[id].dev);
+err_cant_register_device:
+ return -1;
+}
+
+static int pmem_probe(struct platform_device *pdev)
+{
+ struct android_pmem_platform_data *pdata;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ printk(KERN_ALERT "Unable to probe pmem!\n");
+ return -1;
+ }
+ pdata = pdev->dev.platform_data;
+ return pmem_setup(pdata, NULL, NULL);
+}
+
+
+static int pmem_remove(struct platform_device *pdev)
+{
+ int id = pdev->id;
+ __free_page(pfn_to_page(pmem[id].garbage_pfn));
+ misc_deregister(&pmem[id].dev);
+ return 0;
+}
+
+static struct platform_driver pmem_driver = {
+ .probe = pmem_probe,
+ .remove = pmem_remove,
+ .driver = { .name = "android_pmem" }
+};
+
+
+static int __init pmem_init(void)
+{
+ return platform_driver_register(&pmem_driver);
+}
+
+static void __exit pmem_exit(void)
+{
+ platform_driver_unregister(&pmem_driver);
+}
+
+module_init(pmem_init);
+module_exit(pmem_exit);
+
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
new file mode 100644
index 000000000000..6d4d67924f22
--- /dev/null
+++ b/drivers/staging/android/ram_console.c
@@ -0,0 +1,443 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include "ram_console.h"
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+ uint32_t sig;
+ uint32_t start;
+ uint32_t size;
+ uint8_t data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+ ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+ for (i = 0; i < ECC_SIZE; i++)
+ ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ for (i = 0; i < ECC_SIZE; i++)
+ par[i] = ecc[i];
+ return decode_rs8(ram_console_rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+ struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int size = ECC_BLOCK_SIZE;
+#endif
+ memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+ par = ram_console_par_buffer +
+ (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+ do {
+ if (block + ECC_BLOCK_SIZE > buffer_end)
+ size = buffer_end - block;
+ ram_console_encode_rs8(block, size, par);
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ } while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ struct ram_console_buffer *buffer = ram_console_buffer;
+ uint8_t *par;
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+ ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+ int rem;
+ struct ram_console_buffer *buffer = ram_console_buffer;
+
+ if (count > ram_console_buffer_size) {
+ s += count - ram_console_buffer_size;
+ count = ram_console_buffer_size;
+ }
+ rem = ram_console_buffer_size - buffer->start;
+ if (rem < count) {
+ ram_console_update(s, rem);
+ s += rem;
+ count -= rem;
+ buffer->start = 0;
+ buffer->size = ram_console_buffer_size;
+ }
+ ram_console_update(s, count);
+
+ buffer->start += count;
+ if (buffer->size < ram_console_buffer_size)
+ buffer->size += count;
+ ram_console_update_header();
+}
+
+static struct console ram_console = {
+ .name = "ram",
+ .write = ram_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+void ram_console_enable_console(int enabled)
+{
+ if (enabled)
+ ram_console.flags |= CON_ENABLED;
+ else
+ ram_console.flags &= ~CON_ENABLED;
+}
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
+ char *dest)
+{
+ size_t old_log_size = buffer->size;
+ size_t bootinfo_size = 0;
+ size_t total_size = old_log_size;
+ char *ptr;
+ const char *bootinfo_label = "Boot info:\n";
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *block;
+ uint8_t *par;
+ char strbuf[80];
+ int strbuf_len = 0;
+
+ block = buffer->data;
+ par = ram_console_par_buffer;
+ while (block < buffer->data + buffer->size) {
+ int numerr;
+ int size = ECC_BLOCK_SIZE;
+ if (block + size > buffer->data + ram_console_buffer_size)
+ size = buffer->data + ram_console_buffer_size - block;
+ numerr = ram_console_decode_rs8(block, size, par);
+ if (numerr > 0) {
+#if 0
+ printk(KERN_INFO "ram_console: error in block %p, %d\n",
+ block, numerr);
+#endif
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+#if 0
+ printk(KERN_INFO "ram_console: uncorrectable error in "
+ "block %p\n", block);
+#endif
+ ram_console_bad_blocks++;
+ }
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ }
+ if (ram_console_corrected_bytes || ram_console_bad_blocks)
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ ram_console_corrected_bytes, ram_console_bad_blocks);
+ else
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\nNo errors detected\n");
+ if (strbuf_len >= sizeof(strbuf))
+ strbuf_len = sizeof(strbuf) - 1;
+ total_size += strbuf_len;
+#endif
+
+ if (bootinfo)
+ bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
+ total_size += bootinfo_size;
+
+ if (dest == NULL) {
+ dest = kmalloc(total_size, GFP_KERNEL);
+ if (dest == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer\n");
+ return;
+ }
+ }
+
+ ram_console_old_log = dest;
+ ram_console_old_log_size = total_size;
+ memcpy(ram_console_old_log,
+ &buffer->data[buffer->start], buffer->size - buffer->start);
+ memcpy(ram_console_old_log + buffer->size - buffer->start,
+ &buffer->data[0], buffer->start);
+ ptr = ram_console_old_log + old_log_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ memcpy(ptr, strbuf, strbuf_len);
+ ptr += strbuf_len;
+#endif
+ if (bootinfo) {
+ memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
+ ptr += strlen(bootinfo_label);
+ memcpy(ptr, bootinfo, bootinfo_size);
+ ptr += bootinfo_size;
+ }
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+ size_t buffer_size, const char *bootinfo,
+ char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ int numerr;
+ uint8_t *par;
+#endif
+ ram_console_buffer = buffer;
+ ram_console_buffer_size =
+ buffer_size - sizeof(struct ram_console_buffer);
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "datasize %zu\n", buffer, buffer_size,
+ ram_console_buffer_size);
+ return 0;
+ }
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+ ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "non-ecc datasize %zu\n",
+ buffer, buffer_size, ram_console_buffer_size);
+ return 0;
+ }
+
+ ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+ /* first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
+ if (ram_console_rs_decoder == NULL) {
+ printk(KERN_INFO "ram_console: init_rs failed\n");
+ return 0;
+ }
+
+ ram_console_corrected_bytes = 0;
+ ram_console_bad_blocks = 0;
+
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+
+ numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+ if (numerr > 0) {
+ printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ printk(KERN_INFO
+ "ram_console: uncorrectable error in header\n");
+ ram_console_bad_blocks++;
+ }
+#endif
+
+ if (buffer->sig == RAM_CONSOLE_SIG) {
+ if (buffer->size > ram_console_buffer_size
+ || buffer->start > buffer->size)
+ printk(KERN_INFO "ram_console: found existing invalid "
+ "buffer, size %d, start %d\n",
+ buffer->size, buffer->start);
+ else {
+ printk(KERN_INFO "ram_console: found existing buffer, "
+ "size %d, start %d\n",
+ buffer->size, buffer->start);
+ ram_console_save_old(buffer, bootinfo, old_buf);
+ }
+ } else {
+ printk(KERN_INFO "ram_console: no valid data in buffer "
+ "(sig = 0x%08x)\n", buffer->sig);
+ }
+
+ buffer->sig = RAM_CONSOLE_SIG;
+ buffer->start = 0;
+ buffer->size = 0;
+
+ register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ console_verbose();
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+ return ram_console_init((struct ram_console_buffer *)
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+ NULL,
+ ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res = pdev->resource;
+ size_t start;
+ size_t buffer_size;
+ void *buffer;
+ const char *bootinfo = NULL;
+ struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+
+ if (res == NULL || pdev->num_resources != 1 ||
+ !(res->flags & IORESOURCE_MEM)) {
+ printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+ "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+ return -ENXIO;
+ }
+ buffer_size = res->end - res->start + 1;
+ start = res->start;
+ printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
+ start, buffer_size);
+ buffer = ioremap(res->start, buffer_size);
+ if (buffer == NULL) {
+ printk(KERN_ERR "ram_console: failed to map memory\n");
+ return -ENOMEM;
+ }
+
+ if (pdata)
+ bootinfo = pdata->bootinfo;
+
+ return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+ .probe = ram_console_driver_probe,
+ .driver = {
+ .name = "ram_console",
+ },
+};
+
+static int __init ram_console_module_init(void)
+{
+ int err;
+ err = platform_driver_register(&ram_console_driver);
+ return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= ram_console_old_log_size)
+ return 0;
+
+ count = min(len, (size_t)(ram_console_old_log_size - pos));
+ if (copy_to_user(buf, ram_console_old_log + pos, count))
+ return -EFAULT;
+
+ *offset += count;
+ return count;
+}
+
+static const struct file_operations ram_console_file_ops = {
+ .owner = THIS_MODULE,
+ .read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (ram_console_old_log == NULL)
+ return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+ ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+ if (ram_console_old_log == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer for old log\n");
+ ram_console_old_log_size = 0;
+ return 0;
+ }
+ memcpy(ram_console_old_log,
+ ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+ entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ printk(KERN_ERR "ram_console: failed to create proc entry\n");
+ kfree(ram_console_old_log);
+ ram_console_old_log = NULL;
+ return 0;
+ }
+
+ entry->proc_fops = &ram_console_file_ops;
+ entry->size = ram_console_old_log_size;
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+postcore_initcall(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+
diff --git a/drivers/staging/android/ram_console.h b/drivers/staging/android/ram_console.h
new file mode 100644
index 000000000000..9f1125c11066
--- /dev/null
+++ b/drivers/staging/android/ram_console.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+
+struct ram_console_platform_data {
+ const char *bootinfo;
+};
+
+#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */
diff --git a/drivers/staging/android/switch/Kconfig b/drivers/staging/android/switch/Kconfig
new file mode 100644
index 000000000000..36846f62f4bc
--- /dev/null
+++ b/drivers/staging/android/switch/Kconfig
@@ -0,0 +1,11 @@
+menuconfig ANDROID_SWITCH
+ tristate "Android Switch class support"
+ help
+ Say Y here to enable Android switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+config ANDROID_SWITCH_GPIO
+ tristate "Android GPIO Switch support"
+ depends on GENERIC_GPIO && ANDROID_SWITCH
+ help
+ Say Y here to enable GPIO based switch support.
diff --git a/drivers/staging/android/switch/Makefile b/drivers/staging/android/switch/Makefile
new file mode 100644
index 000000000000..d76bfdcedfaf
--- /dev/null
+++ b/drivers/staging/android/switch/Makefile
@@ -0,0 +1,4 @@
+# Android Switch Class Driver
+obj-$(CONFIG_ANDROID_SWITCH) += switch_class.o
+obj-$(CONFIG_ANDROID_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/staging/android/switch/switch.h b/drivers/staging/android/switch/switch.h
new file mode 100644
index 000000000000..4fcb3109875a
--- /dev/null
+++ b/drivers/staging/android/switch/switch.h
@@ -0,0 +1,53 @@
+/*
+ * Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+ const char *name;
+ struct device *dev;
+ int index;
+ int state;
+
+ ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+ ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+ const char *name;
+ unsigned gpio;
+
+ /* if NULL, switch_dev.name will be printed */
+ const char *name_on;
+ const char *name_off;
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+ return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/drivers/staging/android/switch/switch_class.c b/drivers/staging/android/switch/switch_class.c
new file mode 100644
index 000000000000..74680446fc66
--- /dev/null
+++ b/drivers/staging/android/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include "switch.h"
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/switch/switch_gpio.c b/drivers/staging/android/switch/switch_gpio.c
new file mode 100644
index 000000000000..38b2c2f6004e
--- /dev/null
+++ b/drivers/staging/android/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include "switch.h"
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = __devexit_p(gpio_switch_remove),
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
new file mode 100644
index 000000000000..a64481c3e86d
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.c
@@ -0,0 +1,176 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include "timed_output.h"
+#include "timed_gpio.h"
+
+
+struct timed_gpio_data {
+ struct timed_output_dev dev;
+ struct hrtimer timer;
+ spinlock_t lock;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+ struct timed_gpio_data *data =
+ container_of(timer, struct timed_gpio_data, timer);
+
+ gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
+ return HRTIMER_NORESTART;
+}
+
+static int gpio_get_time(struct timed_output_dev *dev)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+
+ if (hrtimer_active(&data->timer)) {
+ ktime_t r = hrtimer_get_remaining(&data->timer);
+ struct timeval t = ktime_to_timeval(r);
+ return t.tv_sec * 1000 + t.tv_usec / 1000;
+ } else
+ return 0;
+}
+
+static void gpio_enable(struct timed_output_dev *dev, int value)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* cancel previous timer and set GPIO according to value */
+ hrtimer_cancel(&data->timer);
+ gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
+
+ if (value > 0) {
+ if (value > data->max_timeout)
+ value = data->max_timeout;
+
+ hrtimer_start(&data->timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio *cur_gpio;
+ struct timed_gpio_data *gpio_data, *gpio_dat;
+ int i, j, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
+ GFP_KERNEL);
+ if (!gpio_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ cur_gpio = &pdata->gpios[i];
+ gpio_dat = &gpio_data[i];
+
+ hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ gpio_dat->timer.function = gpio_timer_func;
+ spin_lock_init(&gpio_dat->lock);
+
+ gpio_dat->dev.name = cur_gpio->name;
+ gpio_dat->dev.get_time = gpio_get_time;
+ gpio_dat->dev.enable = gpio_enable;
+ ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
+ if (ret >= 0) {
+ ret = timed_output_dev_register(&gpio_dat->dev);
+ if (ret < 0)
+ gpio_free(cur_gpio->gpio);
+ }
+ if (ret < 0) {
+ for (j = 0; j < i; j++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+ kfree(gpio_data);
+ return ret;
+ }
+
+ gpio_dat->gpio = cur_gpio->gpio;
+ gpio_dat->max_timeout = cur_gpio->max_timeout;
+ gpio_dat->active_low = cur_gpio->active_low;
+ gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+ }
+
+ platform_set_drvdata(pdev, gpio_data);
+
+ return 0;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+
+ kfree(gpio_data);
+
+ return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+ .probe = timed_gpio_probe,
+ .remove = timed_gpio_remove,
+ .driver = {
+ .name = TIMED_GPIO_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init timed_gpio_init(void)
+{
+ return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+ platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
new file mode 100644
index 000000000000..a0e15f8be3f7
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.h
@@ -0,0 +1,33 @@
+/* include/linux/timed_gpio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
+
+#define TIMED_GPIO_NAME "timed-gpio"
+
+struct timed_gpio {
+ const char *name;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+struct timed_gpio_platform_data {
+ int num_gpios;
+ struct timed_gpio *gpios;
+};
+
+#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
new file mode 100644
index 000000000000..f373422308e0
--- /dev/null
+++ b/drivers/staging/android/timed_output.c
@@ -0,0 +1,123 @@
+/* drivers/misc/timed_output.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include "timed_output.h"
+
+static struct class *timed_output_class;
+static atomic_t device_count;
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int remaining = tdev->get_time(tdev);
+
+ return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t enable_store(
+ struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int value;
+
+ if (sscanf(buf, "%d", &value) != 1)
+ return -EINVAL;
+
+ tdev->enable(tdev, value);
+
+ return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+
+static int create_timed_output_class(void)
+{
+ if (!timed_output_class) {
+ timed_output_class = class_create(THIS_MODULE, "timed_output");
+ if (IS_ERR(timed_output_class))
+ return PTR_ERR(timed_output_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int timed_output_dev_register(struct timed_output_dev *tdev)
+{
+ int ret;
+
+ if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
+ return -EINVAL;
+
+ ret = create_timed_output_class();
+ if (ret < 0)
+ return ret;
+
+ tdev->index = atomic_inc_return(&device_count);
+ tdev->dev = device_create(timed_output_class, NULL,
+ MKDEV(0, tdev->index), NULL, tdev->name);
+ if (IS_ERR(tdev->dev))
+ return PTR_ERR(tdev->dev);
+
+ ret = device_create_file(tdev->dev, &dev_attr_enable);
+ if (ret < 0)
+ goto err_create_file;
+
+ dev_set_drvdata(tdev->dev, tdev);
+ tdev->state = 0;
+ return 0;
+
+err_create_file:
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ printk(KERN_ERR "timed_output: Failed to register driver %s\n",
+ tdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_register);
+
+void timed_output_dev_unregister(struct timed_output_dev *tdev)
+{
+ device_remove_file(tdev->dev, &dev_attr_enable);
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ dev_set_drvdata(tdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
+
+static int __init timed_output_init(void)
+{
+ return create_timed_output_class();
+}
+
+static void __exit timed_output_exit(void)
+{
+ class_destroy(timed_output_class);
+}
+
+module_init(timed_output_init);
+module_exit(timed_output_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed output class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
new file mode 100644
index 000000000000..ec907ab2ff54
--- /dev/null
+++ b/drivers/staging/android/timed_output.h
@@ -0,0 +1,37 @@
+/* include/linux/timed_output.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_OUTPUT_H
+#define _LINUX_TIMED_OUTPUT_H
+
+struct timed_output_dev {
+ const char *name;
+
+ /* enable the output and set the timer */
+ void (*enable)(struct timed_output_dev *sdev, int timeout);
+
+ /* returns the current number of milliseconds remaining on the timer */
+ int (*get_time)(struct timed_output_dev *sdev);
+
+ /* private data */
+ struct device *dev;
+ int index;
+ int state;
+};
+
+extern int timed_output_dev_register(struct timed_output_dev *dev);
+extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+
+#endif
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 7bb7da7959a2..e77e4e0396cf 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -201,7 +201,7 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
enable_oled(odev, value);
@@ -217,7 +217,7 @@ static ssize_t class_set_enabled(struct device *device,
(struct asus_oled_dev *) dev_get_drvdata(device);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
enable_oled(odev, value);
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 2fa658eb74dc..179707b5e7c7 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -161,6 +161,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
INT Status = STATUS_FAILURE;
int timeout = 0;
IOCTL_BUFFER IoBuffer;
+ int bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
@@ -230,11 +231,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (!temp_buff)
return -ENOMEM;
- Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
+ bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
(PUINT)temp_buff, Bufflen);
- if (Status == STATUS_SUCCESS) {
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ } else {
+ Status = bytes;
}
kfree(temp_buff);
@@ -302,7 +308,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- /* FIXME: don't trust user supplied length */
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
+ }
+
temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
if (!temp_buff)
return STATUS_FAILURE;
@@ -318,11 +328,17 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
- if (Status == STATUS_SUCCESS)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ } else {
+ Status = bytes;
+ }
kfree(temp_buff);
break;
@@ -437,12 +453,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
}
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
-
- if (STATUS_SUCCESS != Status) {
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"GPIO_MODE_REGISTER read failed");
break;
+ } else {
+ Status = STATUS_SUCCESS;
}
/* Set the gpio mode register to output */
@@ -519,12 +537,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
uiBit = gpio_info.uiGpioNumber;
/* Set the gpio output register */
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
(PUINT)ucRead, sizeof(UINT));
- if (Status != STATUS_SUCCESS) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
}
break;
@@ -590,11 +611,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if (Status != STATUS_SUCCESS) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
@@ -605,7 +629,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
- break;
+ return -EFAULT;
}
}
break;
@@ -629,11 +653,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if (STATUS_SUCCESS != Status) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
/* Validating the request */
@@ -678,7 +705,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
- break;
+ return -EFAULT;
}
}
break;
@@ -706,9 +733,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
return -ENOMEM;
if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- Status = -EFAULT;
kfree(pvBuffer);
- break;
+ return -EFAULT;
}
down(&Adapter->LowPowerModeSync);
@@ -733,8 +759,7 @@ cntrlEnd:
}
case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if (NVMAccess) {
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
return -EACCES;
@@ -743,157 +768,162 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Starting the firmware download PID =0x%x!!!!\n", current->pid);
- if (!down_trylock(&Adapter->fw_download_sema)) {
- Adapter->bBinDownloaded = FALSE;
- Adapter->fw_download_process_pid = current->pid;
- Adapter->bCfgDownloaded = FALSE;
- Adapter->fw_download_done = FALSE;
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- Status = reset_card_proc(Adapter);
- if (Status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- }
- mdelay(10);
- } else {
- Status = -EBUSY;
+ if (down_trylock(&Adapter->fw_download_sema))
+ return -EBUSY;
+
+ Adapter->bBinDownloaded = FALSE;
+ Adapter->fw_download_process_pid = current->pid;
+ Adapter->bCfgDownloaded = FALSE;
+ Adapter->fw_download_done = FALSE;
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
+ Status = reset_card_proc(Adapter);
+ if (Status) {
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
}
+ mdelay(10);
up(&Adapter->NVMRdmWrmLock);
- break;
+ return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD: {
FIRMWARE_INFO *psFwInfo = NULL;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
- do {
- if (!down_trylock(&Adapter->fw_download_sema)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- Status = -EINVAL;
- break;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (!down_trylock(&Adapter->fw_download_sema)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
+ "Invalid way to download buffer. Use Start and then call this!!!\n");
+ up(&Adapter->fw_download_sema);
+ Status = -EINVAL;
+ return Status;
+ }
- if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
- return -EINVAL;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
- if (!psFwInfo)
- return -ENOMEM;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
- if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- if (!psFwInfo->pvMappedFirmwareAddress ||
- (psFwInfo->u32FirmwareLength == 0)) {
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+ if (!psFwInfo) {
+ up(&Adapter->fw_download_sema);
+ return -ENOMEM;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
- psFwInfo->u32FirmwareLength);
- Status = -EINVAL;
- break;
- }
+ if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
+ if (!psFwInfo->pvMappedFirmwareAddress ||
+ (psFwInfo->u32FirmwareLength == 0)) {
- if (Status != STATUS_SUCCESS) {
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
+ psFwInfo->u32FirmwareLength);
+ up(&Adapter->fw_download_sema);
+ Status = -EINVAL;
+ return Status;
+ }
- /* up(&Adapter->fw_download_sema); */
+ Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = FALSE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
- break;
+ if (Status != STATUS_SUCCESS) {
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+
+ /* up(&Adapter->fw_download_sema); */
- } while (0);
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = DRIVER_INIT;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
+ }
+ }
if (Status != STATUS_SUCCESS)
up(&Adapter->fw_download_sema);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
kfree(psFwInfo);
- break;
+ return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (!down_trylock(&Adapter->fw_download_sema)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- if (NVMAccess) {
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"FW download blocked as EEPROM Read/Write is in progress\n");
up(&Adapter->fw_download_sema);
return -EACCES;
}
- if (down_trylock(&Adapter->fw_download_sema)) {
- Adapter->bBinDownloaded = TRUE;
- Adapter->bCfgDownloaded = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->downloadDDR = 0;
-
- /* setting the Mips to Run */
- Status = run_card_proc(Adapter);
+ Adapter->bBinDownloaded = TRUE;
+ Adapter->bCfgDownloaded = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->downloadDDR = 0;
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
- }
-
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = FALSE;
- wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
- Adapter->waiting_to_fw_download_done, timeout);
- Adapter->fw_download_process_pid = INVALID_PID;
- Adapter->fw_download_done = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
+ /* setting the Mips to Run */
+ Status = run_card_proc(Adapter);
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- if (!timeout)
- Status = -ENODEV;
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
} else {
- Status = -EINVAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Firm Download Over...\n");
+ }
+
+ mdelay(10);
+
+ /* Wait for MailBox Interrupt */
+ if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
+
+ timeout = 5*HZ;
+ Adapter->waiting_to_fw_download_done = FALSE;
+ wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
+ Adapter->waiting_to_fw_download_done, timeout);
+ Adapter->fw_download_process_pid = INVALID_PID;
+ Adapter->fw_download_done = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = FW_DOWNLOAD_DONE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
+ if (!timeout)
+ Status = -ENODEV;
+
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
- break;
+ return Status;
}
case IOCTL_BE_BUCKET_SIZE:
@@ -969,11 +999,15 @@ cntrlEnd:
}
case IOCTL_BCM_GET_DRIVER_VERSION: {
+ ulong len;
+
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(VER_FILEVERSION_STR) + 1);
+
+ if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, len))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
@@ -985,8 +1019,7 @@ cntrlEnd:
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IoBuffer.OutputLength != sizeof(link_state)) {
@@ -1001,8 +1034,7 @@ cntrlEnd:
if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
Status = STATUS_SUCCESS;
break;
@@ -1068,8 +1100,10 @@ cntrlEnd:
GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
if (Status != STATUS_FAILURE)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
- Status = -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS))) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
kfree(temp_buff);
break;
@@ -1103,7 +1137,9 @@ cntrlEnd:
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- /* FIXME: restrict length */
+ if (IoBuffer.InputLength < sizeof(ULONG) * 2)
+ return -EINVAL;
+
pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
if (!pvBuffer)
return -ENOMEM;
@@ -1111,8 +1147,7 @@ cntrlEnd:
/* Get WrmBuffer structure */
if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
kfree(pvBuffer);
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
@@ -1242,8 +1277,7 @@ cntrlEnd:
memset(&tv1, 0, sizeof(struct timeval));
if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IsFlash2x(Adapter)) {
@@ -1252,7 +1286,7 @@ cntrlEnd:
(Adapter->eActiveDSD != DSD2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE ;
+ return STATUS_FAILURE;
}
}
@@ -1271,8 +1305,7 @@ cntrlEnd:
if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
- Status = STATUS_FAILURE;
- break;
+ return STATUS_FAILURE;
}
pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
@@ -1280,9 +1313,8 @@ cntrlEnd:
return -ENOMEM;
if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
- Status = -EFAULT;
kfree(pReadData);
- break;
+ return -EFAULT;
}
do_gettimeofday(&tv0);
@@ -1309,7 +1341,7 @@ cntrlEnd:
if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
kfree(pReadData);
- Status = -EFAULT;
+ return -EFAULT;
}
} else {
down(&Adapter->NVMRdmWrmLock);
@@ -1377,9 +1409,8 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
kfree(pReadData);
- Status = STATUS_SUCCESS;
+ return STATUS_SUCCESS;
}
- break;
case IOCTL_BCM_FLASH2X_SECTION_READ: {
FLASH2X_READWRITE sFlash2xRead = {0};
@@ -1456,7 +1487,9 @@ cntrlEnd:
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
@@ -1548,7 +1581,9 @@ cntrlEnd:
Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return -EFAULT;
}
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
@@ -1608,8 +1643,10 @@ cntrlEnd:
BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
up(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
- Status = -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP))) {
+ kfree(psFlash2xBitMap);
+ return -EFAULT;
+ }
kfree(psFlash2xBitMap);
}
@@ -1627,13 +1664,13 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
+ return -EFAULT;
}
down(&Adapter->NVMRdmWrmLock);
@@ -1677,13 +1714,13 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
- return Status;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
@@ -1744,7 +1781,7 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- break;
+ return -EFAULT;
}
if (Adapter->eNVMType != NVM_FLASH) {
@@ -1783,12 +1820,12 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
@@ -1830,8 +1867,7 @@ cntrlEnd:
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(NVM_READWRITE)))
@@ -1886,7 +1922,9 @@ cntrlEnd:
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
@@ -1907,8 +1945,7 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IoBuffer.InputLength != sizeof(unsigned long)) {
@@ -1919,8 +1956,7 @@ cntrlEnd:
Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
index 2b1e9e17e11c..b058e30b2ca6 100644
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ b/drivers/staging/bcm/HandleControlPacket.c
@@ -1,195 +1,208 @@
/**
-@file HandleControlPacket.c
-This file contains the routines to deal with
-sending and receiving of control packets.
-*/
+ * @file HandleControlPacket.c
+ * This file contains the routines to deal with
+ * sending and receiving of control packets.
+ */
#include "headers.h"
/**
-When a control packet is received, analyze the
-"status" and call appropriate response function.
-Enqueue the control packet for Application.
-@return None
-*/
+ * When a control packet is received, analyze the
+ * "status" and call appropriate response function.
+ * Enqueue the control packet for Application.
+ * @return None
+ */
static VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, struct sk_buff *skb)
{
- PPER_TARANG_DATA pTarang = NULL;
+ PPER_TARANG_DATA pTarang = NULL;
BOOLEAN HighPriorityMessage = FALSE;
- struct sk_buff * newPacket = NULL;
+ struct sk_buff *newPacket = NULL;
CHAR cntrl_msg_mask_bit = 0;
- BOOLEAN drop_pkt_flag = TRUE ;
+ BOOLEAN drop_pkt_flag = TRUE;
USHORT usStatus = *(PUSHORT)(skb->data);
if (netif_msg_pktdata(Adapter))
print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, skb->len, 0);
-
- switch(usStatus)
- {
- case CM_RESPONSES: // 0xA0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
- HighPriorityMessage = TRUE ;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- HighPriorityMessage = TRUE ;
- if(Adapter->LinkStatus==LINKUP_DONE)
- {
- CmControlResponseMessage(Adapter,(skb->data +sizeof(USHORT)));
- }
- break;
- case LINK_CONTROL_RESP: //0xA2
- case STATUS_RSP: //0xA1
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"LINK_CONTROL_RESP");
- HighPriorityMessage = TRUE ;
- LinkControlResponseMessage(Adapter,(skb->data + sizeof(USHORT)));
- break;
- case STATS_POINTER_RESP: //0xA6
- HighPriorityMessage = TRUE ;
- StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
- break;
- case IDLE_MODE_STATUS: //0xA3
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"IDLE_MODE_STATUS Type Message Got from F/W");
- InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
- sizeof(USHORT)));
- HighPriorityMessage = TRUE ;
- break;
-
- case AUTH_SS_HOST_MSG:
- HighPriorityMessage = TRUE ;
- break;
-
- default:
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"Got Default Response");
- /* Let the Application Deal with This Packet */
- break;
+ 16, 1, skb->data, skb->len, 0);
+
+ switch (usStatus) {
+ case CM_RESPONSES: /* 0xA0 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL,
+ "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
+ HighPriorityMessage = TRUE;
+ break;
+ case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
+ HighPriorityMessage = TRUE;
+ if (Adapter->LinkStatus == LINKUP_DONE)
+ CmControlResponseMessage(Adapter,
+ (skb->data + sizeof(USHORT)));
+ break;
+ case LINK_CONTROL_RESP: /* 0xA2 */
+ case STATUS_RSP: /* 0xA1 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "LINK_CONTROL_RESP");
+ HighPriorityMessage = TRUE;
+ LinkControlResponseMessage(Adapter,
+ (skb->data + sizeof(USHORT)));
+ break;
+ case STATS_POINTER_RESP: /* 0xA6 */
+ HighPriorityMessage = TRUE;
+ StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
+ break;
+ case IDLE_MODE_STATUS: /* 0xA3 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL,
+ "IDLE_MODE_STATUS Type Message Got from F/W");
+ InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
+ sizeof(USHORT)));
+ HighPriorityMessage = TRUE;
+ break;
+
+ case AUTH_SS_HOST_MSG:
+ HighPriorityMessage = TRUE;
+ break;
+
+ default:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "Got Default Response");
+ /* Let the Application Deal with This Packet */
+ break;
}
- //Queue The Control Packet to The Application Queues
+ /* Queue The Control Packet to The Application Queues */
down(&Adapter->RxAppControlQueuelock);
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next)
- {
- if(Adapter->device_removed)
- {
+ for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
+ if (Adapter->device_removed)
break;
- }
- drop_pkt_flag = TRUE ;
+ drop_pkt_flag = TRUE;
/*
- There are cntrl msg from A0 to AC. It has been mapped to 0 to C bit in the cntrl mask.
- Also, by default AD to BF has been masked to the rest of the bits... which wil be ON by default.
- if mask bit is enable to particular pkt status, send it out to app else stop it.
- */
+ * There are cntrl msg from A0 to AC. It has been mapped to 0 to
+ * C bit in the cntrl mask.
+ * Also, by default AD to BF has been masked to the rest of the
+ * bits... which wil be ON by default.
+ * if mask bit is enable to particular pkt status, send it out
+ * to app else stop it.
+ */
cntrl_msg_mask_bit = (usStatus & 0x1F);
- //printk("\ninew msg mask bit which is disable in mask:%X", cntrl_msg_mask_bit);
- if(pTarang->RxCntrlMsgBitMask & (1<<cntrl_msg_mask_bit))
- drop_pkt_flag = FALSE;
-
- if ((drop_pkt_flag == TRUE) || (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN) ||
- ((pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN/2) && (HighPriorityMessage == FALSE)))
- {
+ /*
+ * printk("\ninew msg mask bit which is disable in mask:%X",
+ * cntrl_msg_mask_bit);
+ */
+ if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit))
+ drop_pkt_flag = FALSE;
+
+ if ((drop_pkt_flag == TRUE) ||
+ (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN)
+ || ((pTarang->AppCtrlQueueLen >
+ MAX_APP_QUEUE_LEN / 2) &&
+ (HighPriorityMessage == FALSE))) {
/*
- Assumption:-
- 1. every tarang manages it own dropped pkt statitistics
- 2. Total packet dropped per tarang will be equal to the sum of all types of dropped
- pkt by that tarang only.
-
- */
- switch(*(PUSHORT)skb->data)
- {
- case CM_RESPONSES:
- pTarang->stDroppedAppCntrlMsgs.cm_responses++;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++;
- break;
- case LINK_CONTROL_RESP:
- pTarang->stDroppedAppCntrlMsgs.link_control_resp++;
- break;
- case STATUS_RSP:
- pTarang->stDroppedAppCntrlMsgs.status_rsp++;
- break;
- case STATS_POINTER_RESP:
- pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++;
- break;
- case IDLE_MODE_STATUS:
- pTarang->stDroppedAppCntrlMsgs.idle_mode_status++ ;
- break;
- case AUTH_SS_HOST_MSG:
- pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++ ;
- break;
+ * Assumption:-
+ * 1. every tarang manages it own dropped pkt
+ * statitistics
+ * 2. Total packet dropped per tarang will be equal to
+ * the sum of all types of dropped pkt by that
+ * tarang only.
+ */
+ switch (*(PUSHORT)skb->data) {
+ case CM_RESPONSES:
+ pTarang->stDroppedAppCntrlMsgs.cm_responses++;
+ break;
+ case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
+ pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++;
+ break;
+ case LINK_CONTROL_RESP:
+ pTarang->stDroppedAppCntrlMsgs.link_control_resp++;
+ break;
+ case STATUS_RSP:
+ pTarang->stDroppedAppCntrlMsgs.status_rsp++;
+ break;
+ case STATS_POINTER_RESP:
+ pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++;
+ break;
+ case IDLE_MODE_STATUS:
+ pTarang->stDroppedAppCntrlMsgs.idle_mode_status++;
+ break;
+ case AUTH_SS_HOST_MSG:
+ pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++;
+ break;
default:
- pTarang->stDroppedAppCntrlMsgs.low_priority_message++ ;
- break;
+ pTarang->stDroppedAppCntrlMsgs.low_priority_message++;
+ break;
}
continue;
}
- newPacket = skb_clone(skb, GFP_KERNEL);
- if (!newPacket)
- break;
- ENQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail,
- newPacket);
- pTarang->AppCtrlQueueLen++;
- }
+ newPacket = skb_clone(skb, GFP_KERNEL);
+ if (!newPacket)
+ break;
+ ENQUEUEPACKET(pTarang->RxAppControlHead,
+ pTarang->RxAppControlTail, newPacket);
+ pTarang->AppCtrlQueueLen++;
+ }
up(&Adapter->RxAppControlQueuelock);
- wake_up(&Adapter->process_read_wait_queue);
- dev_kfree_skb(skb);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible");
+ wake_up(&Adapter->process_read_wait_queue);
+ dev_kfree_skb(skb);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
+ "After wake_up_interruptible");
}
/**
-@ingroup ctrl_pkt_functions
-Thread to handle control pkt reception
-*/
-int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter object*/
- )
+ * @ingroup ctrl_pkt_functions
+ * Thread to handle control pkt reception
+ */
+int control_packet_handler(PMINI_ADAPTER Adapter /* pointer to adapter object*/)
{
- struct sk_buff *ctrl_packet= NULL;
+ struct sk_buff *ctrl_packet = NULL;
unsigned long flags = 0;
- //struct timeval tv ;
- //int *puiBuffer = NULL ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Entering to make thread wait on control packet event!");
- while(1)
- {
+ /* struct timeval tv; */
+ /* int *puiBuffer = NULL; */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
+ "Entering to make thread wait on control packet event!");
+ while (1) {
wait_event_interruptible(Adapter->process_rx_cntrlpkt,
- atomic_read(&Adapter->cntrlpktCnt) ||
- Adapter->bWakeUpDevice ||
- kthread_should_stop()
- );
+ atomic_read(&Adapter->cntrlpktCnt) ||
+ Adapter->bWakeUpDevice ||
+ kthread_should_stop());
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Exiting \n");
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "Exiting\n");
return 0;
}
- if(TRUE == Adapter->bWakeUpDevice)
- {
+ if (TRUE == Adapter->bWakeUpDevice) {
Adapter->bWakeUpDevice = FALSE;
- if((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) &&
- ((TRUE == Adapter->IdleMode)|| (TRUE == Adapter->bShutStatus)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Calling InterfaceAbortIdlemode\n");
- // Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
- InterfaceIdleModeWakeup (Adapter);
+ if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode)
+ && ((TRUE == Adapter->IdleMode) ||
+ (TRUE == Adapter->bShutStatus))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ CP_CTRL_PKT, DBG_LVL_ALL,
+ "Calling InterfaceAbortIdlemode\n");
+ /*
+ * Adapter->bTriedToWakeUpFromlowPowerMode
+ * = TRUE;
+ */
+ InterfaceIdleModeWakeup(Adapter);
}
continue;
}
- while(atomic_read(&Adapter->cntrlpktCnt))
- {
+ while (atomic_read(&Adapter->cntrlpktCnt)) {
spin_lock_irqsave(&Adapter->control_queue_lock, flags);
ctrl_packet = Adapter->RxControlHead;
- if(ctrl_packet)
- {
- DEQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail);
-// Adapter->RxControlHead=ctrl_packet->next;
+ if (ctrl_packet) {
+ DEQUEUEPACKET(Adapter->RxControlHead,
+ Adapter->RxControlTail);
+ /* Adapter->RxControlHead=ctrl_packet->next; */
}
- spin_unlock_irqrestore (&Adapter->control_queue_lock, flags);
- handle_rx_control_packet(Adapter, ctrl_packet);
+ spin_unlock_irqrestore(&Adapter->control_queue_lock,
+ flags);
+ handle_rx_control_packet(Adapter, ctrl_packet);
atomic_dec(&Adapter->cntrlpktCnt);
}
@@ -201,22 +214,22 @@ int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter obje
INT flushAllAppQ(void)
{
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPER_TARANG_DATA pTarang = NULL;
+ PPER_TARANG_DATA pTarang = NULL;
struct sk_buff *PacketToDrop = NULL;
- for(pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next)
- {
- while(pTarang->RxAppControlHead != NULL)
- {
- PacketToDrop=pTarang->RxAppControlHead;
- DEQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail);
+ for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
+ while (pTarang->RxAppControlHead != NULL) {
+ PacketToDrop = pTarang->RxAppControlHead;
+ DEQUEUEPACKET(pTarang->RxAppControlHead,
+ pTarang->RxAppControlTail);
dev_kfree_skb(PacketToDrop);
}
pTarang->AppCtrlQueueLen = 0;
- //dropped contrl packet statistics also should be reset.
- memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0, sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
+ /* dropped contrl packet statistics also should be reset. */
+ memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0,
+ sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
- return STATUS_SUCCESS ;
+ return STATUS_SUCCESS;
}
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index bcd86bbef2fd..65c352f35681 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -62,6 +62,7 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
static int fw_down;
INT Status = STATUS_SUCCESS;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
+ int bytes;
buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
buff_readback = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
@@ -94,8 +95,9 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
break;
}
- Status = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
- if (Status) {
+ bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
goto exit;
}
@@ -302,6 +304,7 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32Fi
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
+ int bytes;
if (NULL == readbackbuff) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
@@ -310,9 +313,10 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32Fi
while (u32FirmwareLength && !retval) {
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- retval = rdm(Adapter, u32StartingAddress, readbackbuff, len);
+ bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len);
- if (retval) {
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval);
break;
}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 96fa4ead7930..faeb03e62c06 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -46,6 +46,7 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
{
int status = STATUS_SUCCESS;
unsigned int uiRegRead = 0;
+ int bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"SubType of Message :0x%X", ntohl(*puiBuffer));
@@ -77,16 +78,16 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
else if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
{
//clear on read Register
- status = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
return status;
}
//clear on read Register
- status = rdmalt (Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
return status;
}
@@ -117,9 +118,9 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
Adapter->chip_id== BCS220_3)
{
- status = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
return status;
}
@@ -266,6 +267,8 @@ void InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter)
{
unsigned int uiRegVal = 0;
INT Status = 0;
+ int bytes;
+
if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
{
// clear idlemode interrupt.
@@ -282,16 +285,16 @@ void InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter)
{
//clear Interrupt EP registers.
- Status = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
+ bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
return;
}
- Status = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
+ bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
return;
}
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index a09d35108f04..8e3c586a699c 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -68,7 +68,7 @@ static void InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
{
unsigned long ulReg = 0;
- int ret;
+ int bytes;
/* Program EP2 MAX_PKT_SIZE */
ulReg = ntohl(EP2_MPS_REG);
@@ -94,8 +94,8 @@ static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
/* Program TX EP as interrupt(Alternate Setting) */
- ret = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
- if (ret) {
+ bytes = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+ if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"reading of Tx EP failed\n");
return;
@@ -430,6 +430,7 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
int usedIntOutForBulkTransfer = 0 ;
BOOLEAN bBcm16 = FALSE;
UINT uiData = 0;
+ int bytes;
/* Store the usb dev into interface adapter */
psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
@@ -438,9 +439,10 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
- retval = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
+ bytes = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
(u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
- if (retval) {
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
return retval;
}
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index 61f878b4f56c..2218faeaf8ac 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -5,7 +5,7 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
PVOID buff,
INT len)
{
- int retval = 0;
+ int bytes;
USHORT usRetries = 0;
if (psIntfAdapter == NULL) {
@@ -30,7 +30,7 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
psIntfAdapter->psAdapter->DeviceAccess = TRUE;
do {
- retval = usb_control_msg(psIntfAdapter->udev,
+ bytes = usb_control_msg(psIntfAdapter->udev,
usb_rcvctrlpipe(psIntfAdapter->udev, 0),
0x02,
0xC2,
@@ -41,22 +41,20 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
5000);
usRetries++;
- if (-ENODEV == retval) {
+ if (-ENODEV == bytes) {
psIntfAdapter->psAdapter->device_removed = TRUE;
break;
}
- } while ((retval < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
+ } while ((bytes < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
- if (retval < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", retval, usRetries);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
- return retval;
- } else {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
- return STATUS_SUCCESS;
- }
+ if (bytes < 0)
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", bytes, usRetries);
+ else
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", bytes);
+
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ return bytes;
}
INT InterfaceWRM(PS_INTERFACE_ADAPTER psIntfAdapter,
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index e9f29d597518..c7725e141fd5 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -814,6 +814,7 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
unsigned int value = 0, uiResetValue = 0;
+ int bytes;
psIntfAdapter = ((PS_INTERFACE_ADAPTER)(ps_adapter->pvInterfaceAdapter));
ps_adapter->bDDRInitDone = FALSE;
@@ -848,8 +849,9 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
ps_adapter->chip_id == BCS250_BC ||
ps_adapter->chip_id == BCS220_3) {
- retval = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (retval < 0) {
+ bytes = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
@@ -862,8 +864,9 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
}
}
} else {
- retval = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (retval < 0) {
+ bytes = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
@@ -925,11 +928,16 @@ err_exit:
int run_card_proc(PMINI_ADAPTER ps_adapter)
{
+ int status = STATUS_SUCCESS;
+ int bytes;
+
unsigned int value = 0;
{
- if (rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
+ bytes = rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return STATUS_FAILURE;
+ return status;
}
if (ps_adapter->bFlashBoot)
@@ -942,7 +950,7 @@ int run_card_proc(PMINI_ADAPTER ps_adapter)
return STATUS_FAILURE;
}
}
- return STATUS_SUCCESS;
+ return status;
}
int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
@@ -1215,6 +1223,7 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
int status = 0, i = 0;
unsigned int temp = 0;
unsigned char *pucmacaddr = kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
+ int bytes;
if (!pucmacaddr) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
@@ -1231,8 +1240,9 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
}
for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- status = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
- if (status != STATUS_SUCCESS) {
+ bytes = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
kfree(pucmacaddr);
pucmacaddr = NULL;
@@ -1574,11 +1584,13 @@ void update_per_sf_desc_cnts(PMINI_ADAPTER Adapter)
{
INT iIndex = 0;
u32 uibuff[MAX_TARGET_DSX_BUFFERS];
+ int bytes;
if (!atomic_read(&Adapter->uiMBupdate))
return;
- if (rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS) < 0) {
+ bytes = rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS);
+ if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
return;
}
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index c13ea5c9a2aa..101c4e31249e 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -1,4 +1,3 @@
-
/*
* File Name: hostmibs.c
*
@@ -6,73 +5,72 @@
*
* Abstract: This file contains the routines to copy the statistics used by
* the driver to the Host MIBS structure and giving the same to Application.
- *
*/
+
#include "headers.h"
-INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
+INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_PHS_RULE *pstPhsRule = NULL;
- S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
- S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
- PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION)&Adapter->stBCMPhsContext;
+ S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
+ S_PHS_RULE *pstPhsRule = NULL;
+ S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
+ S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
+ PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION) &Adapter->stBCMPhsContext;
- UINT nClassifierIndex = 0, nPhsTableIndex = 0,nSfIndex = 0, uiIndex = 0;
+ UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0;
- if(pDeviceExtension == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
+ if (pDeviceExtension == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
return STATUS_FAILURE;
}
- //Copy the classifier Table
- for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
- nClassifierIndex++)
- {
- if(Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy((PVOID)&pstHostMibs->astClassifierTable[nClassifierIndex],
- (PVOID)&Adapter->astClassifierTable[nClassifierIndex],
- sizeof(S_MIBS_CLASSIFIER_RULE));
+ /* Copy the classifier Table */
+ for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) {
+ if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
+ memcpy((PVOID) & pstHostMibs->
+ astClassifierTable[nClassifierIndex],
+ (PVOID) & Adapter->
+ astClassifierTable[nClassifierIndex],
+ sizeof(S_MIBS_CLASSIFIER_RULE));
}
- //Copy the SF Table
- for(nSfIndex=0; nSfIndex < NO_OF_QUEUES ; nSfIndex++)
- {
- if(Adapter->PackInfo[nSfIndex].bValid)
- {
- memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
- }
- else
- {
- //if index in not valid, don't process this for the PHS table. Go For the next entry.
- continue ;
- }
+ /* Copy the SF Table */
+ for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
+ if (Adapter->PackInfo[nSfIndex].bValid) {
+ memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex],
+ (PVOID) & Adapter->PackInfo[nSfIndex],
+ sizeof(S_MIBS_SERVICEFLOW_TABLE));
+ } else {
+ /* If index in not valid,
+ * don't process this for the PHS table.
+ * Go For the next entry.
+ */
+ continue;
+ }
- //Retrieve the SFID Entry Index for requested Service Flow
- if(PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- Adapter->PackInfo[nSfIndex].usVCID_Value ,&pstServiceFlowEntry))
- {
+ /* Retrieve the SFID Entry Index for requested Service Flow */
+ if (PHS_INVALID_TABLE_INDEX ==
+ GetServiceFlowEntry(pDeviceExtension->
+ pstServiceFlowPhsRulesTable,
+ Adapter->PackInfo[nSfIndex].
+ usVCID_Value, &pstServiceFlowEntry))
- continue;
- }
+ continue;
pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
-
- for(uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) {
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
- if(pstClassifierRule->bUsed)
- {
- pstPhsRule = pstClassifierRule->pstPhsRule;
+ if (pstClassifierRule->bUsed) {
+ pstPhsRule = pstClassifierRule->pstPhsRule;
- pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
+ pstHostMibs->astPhsRulesTable[nPhsTableIndex].
+ ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
- memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
- &pstPhsRule->u8PHSI,
- sizeof(S_PHS_RULE));
+ memcpy(&pstHostMibs->
+ astPhsRulesTable[nPhsTableIndex].u8PHSI,
+ &pstPhsRule->u8PHSI, sizeof(S_PHS_RULE));
nPhsTableIndex++;
}
@@ -81,65 +79,63 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMi
}
-
- //copy other Host Statistics parameters
+ /* Copy other Host Statistics parameters */
pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
- pstHostMibs->stHostInfo.CurrNumFreeDesc =
- atomic_read(&Adapter->CurrNumFreeTxDesc);
+ pstHostMibs->stHostInfo.CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize;
pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive;
pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD;
- memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist,Adapter->aTxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
- memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist,Adapter->aRxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
+ memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist, Adapter->aTxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
+ memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist, Adapter->aRxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
return STATUS_SUCCESS;
}
-
VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
- &(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
+ &(pTarang->stDroppedAppCntrlMsgs),
+ sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
-
-VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
- CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
+VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter, CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
{
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfSfid = psfLocalSet->u32SFID;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid);
+ S_MIBS_EXTSERVICEFLOW_PARAMETERS *t = &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable;
+
+ t->wmanIfSfid = psfLocalSet->u32SFID;
+ t->wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
+ t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
+ t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
+ t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
+ t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
+ t->wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
+ t->wmanIfCmnCpsFixedVsVariableSduInd = ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd);
+ t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
+ t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize);
+ t->wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
+ t->wmanIfCmnCpsSfSchedulingType = ntohl(t->wmanIfCmnCpsSfSchedulingType);
+ t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
+ t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable);
+ t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
+ t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize);
+ t->wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
+ t->wmanIfCmnCpsArqBlockLifetime = ntohl(t->wmanIfCmnCpsArqBlockLifetime);
+ t->wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
+ t->wmanIfCmnCpsArqSyncLossTimeout = ntohl(t->wmanIfCmnCpsArqSyncLossTimeout);
+ t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
+ t->wmanIfCmnCpsArqDeliverInOrder = ntohl(t->wmanIfCmnCpsArqDeliverInOrder);
+ t->wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
+ t->wmanIfCmnCpsArqRxPurgeTimeout = ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout);
+ t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
+ t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize);
+ t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
+ t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy);
+ t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
+ t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification);
+ t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
+ t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid);
}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 16e939fa15d6..c7f488629722 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -5,65 +5,69 @@
static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
{
- B_UINT16 u16CheckSum=0;
- while(u32Size--) {
+ B_UINT16 u16CheckSum = 0;
+ while (u32Size--) {
u16CheckSum += (B_UINT8)~(*pu8Buffer);
- pu8Buffer++;
+ pu8Buffer++;
}
return u16CheckSum;
}
+
BOOLEAN IsReqGpioIsLedInNVM(PMINI_ADAPTER Adapter, UINT gpios)
{
- INT Status ;
- Status = (Adapter->gpioBitMap & gpios) ^ gpios ;
- if(Status)
+ INT Status;
+ Status = (Adapter->gpioBitMap & gpios) ^ gpios;
+ if (Status)
return FALSE;
else
return TRUE;
}
-static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
+static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex,
+ ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
{
int Status = STATUS_SUCCESS;
BOOLEAN bInfinite = FALSE;
- /*Check if num_of_time is -ve. If yes, blink led in infinite loop*/
- if(num_of_time < 0)
- {
+ /* Check if num_of_time is -ve. If yes, blink led in infinite loop */
+ if (num_of_time < 0) {
bInfinite = TRUE;
num_of_time = 1;
}
- while(num_of_time)
- {
-
- if(currdriverstate == Adapter->DriverState)
+ while (num_of_time) {
+ if (currdriverstate == Adapter->DriverState)
TURN_ON_LED(GPIO_Num, uiLedIndex);
- /*Wait for timeout after setting on the LED*/
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
+ /* Wait for timeout after setting on the LED */
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState ||
+ kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status=EVENT_SIGNALED;
+ Status = EVENT_SIGNALED;
break;
}
- if(Status)
- {
+ if (Status) {
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status=EVENT_SIGNALED;
+ Status = EVENT_SIGNALED;
break;
}
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies(timeout));
- if(bInfinite == FALSE)
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState ||
+ kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+ if (bInfinite == FALSE)
num_of_time--;
}
return Status;
@@ -71,19 +75,19 @@ static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULO
static INT ScaleRateofTransfer(ULONG rate)
{
- if(rate <= 3)
+ if (rate <= 3)
return rate;
- else if((rate > 3) && (rate <= 100))
+ else if ((rate > 3) && (rate <= 100))
return 5;
- else if((rate > 100) && (rate <= 200))
+ else if ((rate > 100) && (rate <= 200))
return 6;
- else if((rate > 200) && (rate <= 300))
+ else if ((rate > 200) && (rate <= 300))
return 7;
- else if((rate > 300) && (rate <= 400))
+ else if ((rate > 300) && (rate <= 400))
return 8;
- else if((rate > 400) && (rate <= 500))
+ else if ((rate > 400) && (rate <= 500))
return 9;
- else if((rate > 500) && (rate <= 600))
+ else if ((rate > 500) && (rate <= 600))
return 10;
else
return MAX_NUM_OF_BLINKS;
@@ -92,215 +96,232 @@ static INT ScaleRateofTransfer(ULONG rate)
static INT LED_Proportional_Blink(PMINI_ADAPTER Adapter, UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex, LedEventInfo_t currdriverstate)
+ UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex,
+ LedEventInfo_t currdriverstate)
{
- /* Initial values of TX and RX packets*/
+ /* Initial values of TX and RX packets */
ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
- /*values of TX and RX packets after 1 sec*/
+ /* values of TX and RX packets after 1 sec */
ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0;
- /*Rate of transfer of Tx and Rx in 1 sec*/
+ /* Rate of transfer of Tx and Rx in 1 sec */
ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0;
int Status = STATUS_SUCCESS;
INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
UINT remDelay = 0;
BOOLEAN bBlinkBothLED = TRUE;
- //UINT GPIO_num = DISABLE_GPIO_NUM;
+ /* UINT GPIO_num = DISABLE_GPIO_NUM; */
ulong timeout = 0;
- /*Read initial value of packets sent/received */
+ /* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
- /*Scale the rate of transfer to no of blinks.*/
- num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
+ /* Scale the rate of transfer to no of blinks. */
+ num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
+ num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
- while((Adapter->device_removed == FALSE))
- {
+ while ((Adapter->device_removed == FALSE)) {
timeout = 50;
- /*Blink Tx and Rx LED when both Tx and Rx is in normal bandwidth*/
- if(bBlinkBothLED)
- {
- /*Assign minimum number of blinks of either Tx or Rx.*/
- if(num_of_time_tx > num_of_time_rx)
+ /*
+ * Blink Tx and Rx LED when both Tx and Rx is
+ * in normal bandwidth
+ */
+ if (bBlinkBothLED) {
+ /*
+ * Assign minimum number of blinks of
+ * either Tx or Rx.
+ */
+ if (num_of_time_tx > num_of_time_rx)
num_of_time = num_of_time_rx;
else
num_of_time = num_of_time_tx;
- if(num_of_time > 0)
- {
- /*Blink both Tx and Rx LEDs*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time,currdriverstate)
+ if (num_of_time > 0) {
+ /* Blink both Tx and Rx LEDs */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
- if(LED_Blink(Adapter, 1<<GPIO_Num_rx, uiRxLedIndex, timeout, num_of_time,currdriverstate)
+
+ if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
+ uiRxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
}
- if(num_of_time == num_of_time_tx)
- {
- /*Blink pending rate of Rx*/
- if(LED_Blink(Adapter, (1 << GPIO_Num_rx), uiRxLedIndex, timeout,
- num_of_time_rx-num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ if (num_of_time == num_of_time_tx) {
+ /* Blink pending rate of Rx */
+ if (LED_Blink(Adapter, (1 << GPIO_Num_rx),
+ uiRxLedIndex, timeout,
+ num_of_time_rx-num_of_time,
+ currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
+
num_of_time = num_of_time_rx;
- }
- else
- {
- /*Blink pending rate of Tx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout,
- num_of_time_tx-num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ } else {
+ /* Blink pending rate of Tx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time_tx-num_of_time,
+ currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
+
num_of_time = num_of_time_tx;
}
- }
- else
- {
- if(num_of_time == num_of_time_tx)
- {
- /*Blink pending rate of Rx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time,currdriverstate)
+ } else {
+ if (num_of_time == num_of_time_tx) {
+ /* Blink pending rate of Rx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
- }
- else
- {
- /*Blink pending rate of Tx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_rx, uiRxLedIndex, timeout,
- num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ } else {
+ /* Blink pending rate of Tx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
+ uiRxLedIndex, timeout,
+ num_of_time, currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
}
}
- /* If Tx/Rx rate is less than maximum blinks per second,
- * wait till delay completes to 1 second
- */
+
+ /*
+ * If Tx/Rx rate is less than maximum blinks per second,
+ * wait till delay completes to 1 second
+ */
remDelay = MAX_NUM_OF_BLINKS - num_of_time;
- if(remDelay > 0)
- {
- timeout= 100 * remDelay;
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState ||kthread_should_stop() ,
- msecs_to_jiffies (timeout));
-
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
+ if (remDelay > 0) {
+ timeout = 100 * remDelay;
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState
+ || kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ LED_DUMP_INFO, DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
return EVENT_SIGNALED;
}
- if(Status)
+ if (Status)
return EVENT_SIGNALED;
}
- /*Turn off both Tx and Rx LEDs before next second*/
- TURN_OFF_LED(1<<GPIO_Num_tx, uiTxLedIndex);
- TURN_OFF_LED(1<<GPIO_Num_rx, uiTxLedIndex);
+ /* Turn off both Tx and Rx LEDs before next second */
+ TURN_OFF_LED(1 << GPIO_Num_tx, uiTxLedIndex);
+ TURN_OFF_LED(1 << GPIO_Num_rx, uiTxLedIndex);
/*
- * Read the Tx & Rx packets transmission after 1 second and
- * calculate rate of transfer
- */
+ * Read the Tx & Rx packets transmission after 1 second and
+ * calculate rate of transfer
+ */
Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
- rate_of_transfer_tx = Final_num_of_packts_tx - Initial_num_of_packts_tx;
- rate_of_transfer_rx = Final_num_of_packts_rx - Initial_num_of_packts_rx;
+ rate_of_transfer_tx = Final_num_of_packts_tx -
+ Initial_num_of_packts_tx;
+ rate_of_transfer_rx = Final_num_of_packts_rx -
+ Initial_num_of_packts_rx;
- /*Read initial value of packets sent/received */
+ /* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Final_num_of_packts_tx;
- Initial_num_of_packts_rx = Final_num_of_packts_rx ;
+ Initial_num_of_packts_rx = Final_num_of_packts_rx;
- /*Scale the rate of transfer to no of blinks.*/
- num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
+ /* Scale the rate of transfer to no of blinks. */
+ num_of_time_tx =
+ ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
+ num_of_time_rx =
+ ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
}
return Status;
}
-
-//-----------------------------------------------------------------------------
-// Procedure: ValidateDSDParamsChecksum
-//
-// Description: Reads DSD Params and validates checkusm.
-//
-// Arguments:
-// Adapter - Pointer to Adapter structure.
-// ulParamOffset - Start offset of the DSD parameter to be read and validated.
-// usParamLen - Length of the DSD Parameter.
-//
-// Returns:
-// <OSAL_STATUS_CODE>
-//-----------------------------------------------------------------------------
-
-static INT ValidateDSDParamsChecksum(
- PMINI_ADAPTER Adapter,
- ULONG ulParamOffset,
- USHORT usParamLen )
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: ValidateDSDParamsChecksum
+ *
+ * Description: Reads DSD Params and validates checkusm.
+ *
+ * Arguments:
+ * Adapter - Pointer to Adapter structure.
+ * ulParamOffset - Start offset of the DSD parameter to be read and
+ * validated.
+ * usParamLen - Length of the DSD Parameter.
+ *
+ * Returns:
+ * <OSAL_STATUS_CODE>
+ * -----------------------------------------------------------------------------
+ */
+static INT ValidateDSDParamsChecksum(PMINI_ADAPTER Adapter, ULONG ulParamOffset,
+ USHORT usParamLen)
{
INT Status = STATUS_SUCCESS;
- PUCHAR puBuffer = NULL;
- USHORT usChksmOrg = 0;
+ PUCHAR puBuffer = NULL;
+ USHORT usChksmOrg = 0;
USHORT usChecksumCalculated = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",ulParamOffset, usParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",
+ ulParamOffset, usParamLen);
puBuffer = kmalloc(usParamLen, GFP_KERNEL);
- if(!puBuffer)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum Allocation failed");
+ if (!puBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum Allocation failed");
return -ENOMEM;
}
- //
- // Read the DSD data from the parameter offset.
- //
- if(STATUS_SUCCESS != BeceemNVMRead(Adapter,(PUINT)puBuffer,ulParamOffset,usParamLen))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status=STATUS_IMAGE_CHECKSUM_MISMATCH;
+ /* Read the DSD data from the parameter offset. */
+ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer,
+ ulParamOffset, usParamLen)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
- //
- // Calculate the checksum of the data read from the DSD parameter.
- //
- usChecksumCalculated = CFG_CalculateChecksum(puBuffer,usParamLen);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: usCheckSumCalculated = 0x%x\n", usChecksumCalculated);
-
- //
- // End of the DSD parameter will have a TWO bytes checksum stored in it. Read it and compare with the calculated
- // Checksum.
- //
- if(STATUS_SUCCESS != BeceemNVMRead(Adapter,(PUINT)&usChksmOrg,ulParamOffset+usParamLen,2))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status=STATUS_IMAGE_CHECKSUM_MISMATCH;
+ /* Calculate the checksum of the data read from the DSD parameter. */
+ usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: usCheckSumCalculated = 0x%x\n",
+ usChecksumCalculated);
+
+ /*
+ * End of the DSD parameter will have a TWO bytes checksum stored in it.
+ * Read it and compare with the calculated Checksum.
+ */
+ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg,
+ ulParamOffset+usParamLen, 2)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
usChksmOrg = ntohs(usChksmOrg);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: usChksmOrg = 0x%x", usChksmOrg);
-
- //
- // Compare the checksum calculated with the checksum read from DSD section
- //
- if(usChecksumCalculated ^ usChksmOrg)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: usChksmOrg = 0x%x", usChksmOrg);
+
+ /*
+ * Compare the checksum calculated with the checksum read
+ * from DSD section
+ */
+ if (usChecksumCalculated ^ usChksmOrg) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
@@ -311,523 +332,526 @@ exit:
}
-//-----------------------------------------------------------------------------
-// Procedure: ValidateHWParmStructure
-//
-// Description: Validates HW Parameters.
-//
-// Arguments:
-// Adapter - Pointer to Adapter structure.
-// ulHwParamOffset - Start offset of the HW parameter Section to be read and validated.
-//
-// Returns:
-// <OSAL_STATUS_CODE>
-//-----------------------------------------------------------------------------
-
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: ValidateHWParmStructure
+ *
+ * Description: Validates HW Parameters.
+ *
+ * Arguments:
+ * Adapter - Pointer to Adapter structure.
+ * ulHwParamOffset - Start offset of the HW parameter Section to be read
+ * and validated.
+ *
+ * Returns:
+ * <OSAL_STATUS_CODE>
+ * -----------------------------------------------------------------------------
+ */
static INT ValidateHWParmStructure(PMINI_ADAPTER Adapter, ULONG ulHwParamOffset)
{
- INT Status = STATUS_SUCCESS ;
+ INT Status = STATUS_SUCCESS;
USHORT HwParamLen = 0;
- // Add DSD start offset to the hwParamOffset to get the actual address.
+ /*
+ * Add DSD start offset to the hwParamOffset to get
+ * the actual address.
+ */
ulHwParamOffset += DSD_START_OFFSET;
- /*Read the Length of HW_PARAM structure*/
- BeceemNVMRead(Adapter,(PUINT)&HwParamLen,ulHwParamOffset,2);
+ /* Read the Length of HW_PARAM structure */
+ BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2);
HwParamLen = ntohs(HwParamLen);
- if(0==HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
- {
+ if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
return STATUS_IMAGE_CHECKSUM_MISMATCH;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread:HwParamLen = 0x%x", HwParamLen);
- Status =ValidateDSDParamsChecksum(Adapter,ulHwParamOffset,HwParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread:HwParamLen = 0x%x", HwParamLen);
+ Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset,
+ HwParamLen);
return Status;
} /* ValidateHWParmStructure() */
-static int ReadLEDInformationFromEEPROM(PMINI_ADAPTER Adapter, UCHAR GPIO_Array[])
+static int ReadLEDInformationFromEEPROM(PMINI_ADAPTER Adapter,
+ UCHAR GPIO_Array[])
{
int Status = STATUS_SUCCESS;
- ULONG dwReadValue = 0;
- USHORT usHwParamData = 0;
- USHORT usEEPROMVersion = 0;
- UCHAR ucIndex = 0;
- UCHAR ucGPIOInfo[32] = {0};
+ ULONG dwReadValue = 0;
+ USHORT usHwParamData = 0;
+ USHORT usEEPROMVersion = 0;
+ UCHAR ucIndex = 0;
+ UCHAR ucGPIOInfo[32] = {0};
- BeceemNVMRead(Adapter,(PUINT)&usEEPROMVersion,EEPROM_VERSION_OFFSET,2);
+ BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion,
+ EEPROM_VERSION_OFFSET, 2);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"usEEPROMVersion: Minor:0x%X Major:0x%x",usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "usEEPROMVersion: Minor:0x%X Major:0x%x",
+ usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
- if(((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION)
- {
- BeceemNVMRead(Adapter,(PUINT)&usHwParamData,EEPROM_HW_PARAM_POINTER_ADDRESS,2);
+ if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) {
+ BeceemNVMRead(Adapter, (PUINT)&usHwParamData,
+ EEPROM_HW_PARAM_POINTER_ADDRESS, 2);
usHwParamData = ntohs(usHwParamData);
dwReadValue = usHwParamData;
- }
- else
- {
- //
- // Validate Compatibility section and then read HW param if compatibility section is valid.
- //
+ } else {
+ /*
+ * Validate Compatibility section and then read HW param
+ * if compatibility section is valid.
+ */
Status = ValidateDSDParamsChecksum(Adapter,
- DSD_START_OFFSET,
- COMPATIBILITY_SECTION_LENGTH_MAP5);
+ DSD_START_OFFSET,
+ COMPATIBILITY_SECTION_LENGTH_MAP5);
- if(Status != STATUS_SUCCESS)
- {
+ if (Status != STATUS_SUCCESS)
return Status;
- }
- BeceemNVMRead(Adapter,(PUINT)&dwReadValue,EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5,4);
+
+ BeceemNVMRead(Adapter, (PUINT)&dwReadValue,
+ EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4);
dwReadValue = ntohl(dwReadValue);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Start address of HW_PARAM structure = 0x%lx",dwReadValue);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: Start address of HW_PARAM structure = 0x%lx",
+ dwReadValue);
- //
- // Validate if the address read out is within the DSD.
- // Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
- // lower limit should be above DSD_START_OFFSET and
- // upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
- //
- if(dwReadValue < DSD_START_OFFSET ||
- dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
- {
+ /*
+ * Validate if the address read out is within the DSD.
+ * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
+ * lower limit should be above DSD_START_OFFSET and
+ * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
+ */
+ if (dwReadValue < DSD_START_OFFSET ||
+ dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
return STATUS_IMAGE_CHECKSUM_MISMATCH;
- }
Status = ValidateHWParmStructure(Adapter, dwReadValue);
- if(Status){
+ if (Status)
return Status;
- }
/*
- Add DSD_START_OFFSET to the offset read from the EEPROM.
- This will give the actual start HW Parameters start address.
- To read GPIO section, add GPIO offset further.
- */
-
- dwReadValue += DSD_START_OFFSET; // = start address of hw param section.
- dwReadValue += GPIO_SECTION_START_OFFSET; // = GPIO start offset within HW Param section.
-
- /* Read the GPIO values for 32 GPIOs from EEPROM and map the function
- * number to GPIO pin number to GPIO_Array
- */
- BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo,dwReadValue,32);
- for(ucIndex = 0; ucIndex < 32; ucIndex++)
- {
-
- switch(ucGPIOInfo[ucIndex])
- {
- case RED_LED:
- {
- GPIO_Array[RED_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case BLUE_LED:
- {
- GPIO_Array[BLUE_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case YELLOW_LED:
- {
- GPIO_Array[YELLOW_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case GREEN_LED:
- {
- GPIO_Array[GREEN_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- default:
- break;
- }
+ * Add DSD_START_OFFSET to the offset read from the EEPROM.
+ * This will give the actual start HW Parameters start address.
+ * To read GPIO section, add GPIO offset further.
+ */
+
+ dwReadValue +=
+ DSD_START_OFFSET; /* = start address of hw param section. */
+ dwReadValue += GPIO_SECTION_START_OFFSET;
+ /* = GPIO start offset within HW Param section. */
+ /*
+ * Read the GPIO values for 32 GPIOs from EEPROM and map the function
+ * number to GPIO pin number to GPIO_Array
+ */
+ BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32);
+ for (ucIndex = 0; ucIndex < 32; ucIndex++) {
+
+ switch (ucGPIOInfo[ucIndex]) {
+ case RED_LED:
+ GPIO_Array[RED_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case BLUE_LED:
+ GPIO_Array[BLUE_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case YELLOW_LED:
+ GPIO_Array[YELLOW_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case GREEN_LED:
+ GPIO_Array[GREEN_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ default:
+ break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"GPIO's bit map correspond to LED :0x%X",Adapter->gpioBitMap);
- return Status;
+
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "GPIO's bit map correspond to LED :0x%X", Adapter->gpioBitMap);
+ return Status;
}
-static int ReadConfigFileStructure(PMINI_ADAPTER Adapter, BOOLEAN *bEnableThread)
+static int ReadConfigFileStructure(PMINI_ADAPTER Adapter,
+ BOOLEAN *bEnableThread)
{
int Status = STATUS_SUCCESS;
- UCHAR GPIO_Array[NUM_OF_LEDS+1]; /*Array to store GPIO numbers from EEPROM*/
+ /* Array to store GPIO numbers from EEPROM */
+ UCHAR GPIO_Array[NUM_OF_LEDS+1];
UINT uiIndex = 0;
UINT uiNum_of_LED_Type = 0;
PUCHAR puCFGData = NULL;
UCHAR bData = 0;
memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
- if(!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams))
- {
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Target Params not Avail.\n");
+ if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) {
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "Target Params not Avail.\n");
return -ENOENT;
}
- /*Populate GPIO_Array with GPIO numbers for LED functions*/
- /*Read the GPIO numbers from EEPROM*/
+ /* Populate GPIO_Array with GPIO numbers for LED functions */
+ /* Read the GPIO numbers from EEPROM */
Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
- if(Status == STATUS_IMAGE_CHECKSUM_MISMATCH)
- {
+ if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
*bEnableThread = FALSE;
return STATUS_SUCCESS;
- }
- else if(Status)
- {
+ } else if (Status) {
*bEnableThread = FALSE;
return Status;
}
- /*
- * CONFIG file read successfully. Deallocate the memory of
- * uiFileNameBufferSize
- */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Config file read successfully\n");
+
+ /*
+ * CONFIG file read successfully. Deallocate the memory of
+ * uiFileNameBufferSize
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: Config file read successfully\n");
puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1;
/*
- * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
- * will have the information of LED type, LED on state for different
- * driver state and LED blink state.
- */
+ * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
+ * will have the information of LED type, LED on state for different
+ * driver state and LED blink state.
+ */
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
bData = *puCFGData;
- /*Check Bit 8 for polarity. If it is set, polarity is reverse polarity*/
- if(bData & 0x80)
- {
+ /*
+ * Check Bit 8 for polarity. If it is set,
+ * polarity is reverse polarity
+ */
+ if (bData & 0x80) {
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 0;
- /*unset the bit 8*/
+ /* unset the bit 8 */
bData = bData & 0x7f;
}
Adapter->LEDInfo.LEDState[uiIndex].LED_Type = bData;
- if(bData <= NUM_OF_LEDS)
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = GPIO_Array[bData];
+ if (bData <= NUM_OF_LEDS)
+ Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
+ GPIO_Array[bData];
else
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = DISABLE_GPIO_NUM;
+ Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
+ DISABLE_GPIO_NUM;
puCFGData++;
bData = *puCFGData;
Adapter->LEDInfo.LEDState[uiIndex].LED_On_State = bData;
puCFGData++;
bData = *puCFGData;
- Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State= bData;
+ Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State = bData;
puCFGData++;
}
- /*Check if all the LED settings are disabled. If it is disabled, dont launch the LED control thread.*/
- for(uiIndex = 0; uiIndex<NUM_OF_LEDS; uiIndex++)
- {
- if((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
- (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
+ /*
+ * Check if all the LED settings are disabled. If it is disabled,
+ * dont launch the LED control thread.
+ */
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if ((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
+ (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
(Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0))
uiNum_of_LED_Type++;
}
- if(uiNum_of_LED_Type >= NUM_OF_LEDS)
+ if (uiNum_of_LED_Type >= NUM_OF_LEDS)
*bEnableThread = FALSE;
return Status;
}
-//--------------------------------------------------------------------------
-// Procedure: LedGpioInit
-//
-// Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode and make the
-// initial state to be OFF.
-//
-// Arguments:
-// Adapter - Pointer to MINI_ADAPTER structure.
-//
-// Returns: VOID
-//
-//-----------------------------------------------------------------------------
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: LedGpioInit
+ *
+ * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode
+ * and make the initial state to be OFF.
+ *
+ * Arguments:
+ * Adapter - Pointer to MINI_ADAPTER structure.
+ *
+ * Returns: VOID
+ *
+ * -----------------------------------------------------------------------------
+ */
static VOID LedGpioInit(PMINI_ADAPTER Adapter)
{
UINT uiResetValue = 0;
UINT uiIndex = 0;
/* Set all LED GPIO Mode to output mode */
- if(rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) <0)
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: RDM Failed\n");
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
+ if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
+ sizeof(uiResetValue)) < 0)
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "LED Thread: RDM Failed\n");
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
+ DISABLE_GPIO_NUM)
uiResetValue |= (1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num);
- TURN_OFF_LED(1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,uiIndex);
+ TURN_OFF_LED(1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,
+ uiIndex);
}
- if(wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: WRM Failed\n");
+ if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
+ sizeof(uiResetValue)) < 0)
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "LED Thread: WRM Failed\n");
- Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
}
-//-----------------------------------------------------------------------------
-static INT BcmGetGPIOPinInfo(PMINI_ADAPTER Adapter, UCHAR *GPIO_num_tx, UCHAR *GPIO_num_rx ,UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,LedEventInfo_t currdriverstate)
+static INT BcmGetGPIOPinInfo(PMINI_ADAPTER Adapter, UCHAR *GPIO_num_tx,
+ UCHAR *GPIO_num_rx, UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,
+ LedEventInfo_t currdriverstate)
{
UINT uiIndex = 0;
*GPIO_num_tx = DISABLE_GPIO_NUM;
*GPIO_num_rx = DISABLE_GPIO_NUM;
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if((currdriverstate == NORMAL_OPERATION)||
- (currdriverstate == IDLEMODE_EXIT)||
- (currdriverstate == FW_DOWNLOAD))
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State & currdriverstate)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- {
- if(*GPIO_num_tx == DISABLE_GPIO_NUM)
- {
+ if ((currdriverstate == NORMAL_OPERATION) ||
+ (currdriverstate == IDLEMODE_EXIT) ||
+ (currdriverstate == FW_DOWNLOAD)) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State &
+ currdriverstate) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM) {
+ if (*GPIO_num_tx == DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
- }
- else
- {
+ } else {
*GPIO_num_rx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedRxIndex = uiIndex;
}
}
}
- }
- else
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].LED_On_State & currdriverstate)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- {
+ } else {
+ if (Adapter->LEDInfo.LEDState[uiIndex].LED_On_State
+ & currdriverstate) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
}
}
}
}
- return STATUS_SUCCESS ;
+ return STATUS_SUCCESS;
}
static VOID LEDControlThread(PMINI_ADAPTER Adapter)
{
UINT uiIndex = 0;
UCHAR GPIO_num = 0;
- UCHAR uiLedIndex = 0 ;
+ UCHAR uiLedIndex = 0;
UINT uiResetValue = 0;
LedEventInfo_t currdriverstate = 0;
ulong timeout = 0;
INT Status = 0;
- UCHAR dummyGPIONum = 0;
- UCHAR dummyIndex = 0;
+ UCHAR dummyGPIONum = 0;
+ UCHAR dummyIndex = 0;
- //currdriverstate = Adapter->DriverState;
+ /* currdriverstate = Adapter->DriverState; */
Adapter->LEDInfo.bIdleMode_tx_from_host = FALSE;
- /*Wait till event is triggered*/
- //wait_event(Adapter->LEDInfo.notify_led_event,
- // currdriverstate!= Adapter->DriverState);
+ /*
+ * Wait till event is triggered
+ *
+ * wait_event(Adapter->LEDInfo.notify_led_event,
+ * currdriverstate!= Adapter->DriverState);
+ */
- GPIO_num = DISABLE_GPIO_NUM ;
+ GPIO_num = DISABLE_GPIO_NUM;
- while(TRUE)
- {
- /*Wait till event is triggered*/
- if( (GPIO_num == DISABLE_GPIO_NUM)
+ while (TRUE) {
+ /* Wait till event is triggered */
+ if ((GPIO_num == DISABLE_GPIO_NUM)
||
- ((currdriverstate != FW_DOWNLOAD) &&
- (currdriverstate != NORMAL_OPERATION) &&
- (currdriverstate != LOWPOWER_MODE_ENTER))
- ||
- (currdriverstate == LED_THREAD_INACTIVE) )
- {
- Status = wait_event_interruptible(Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState || kthread_should_stop());
- }
-
- if(kthread_should_stop() || Adapter->device_removed )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- return ;//STATUS_FAILURE;
+ ((currdriverstate != FW_DOWNLOAD) &&
+ (currdriverstate != NORMAL_OPERATION) &&
+ (currdriverstate != LOWPOWER_MODE_ENTER))
+ ||
+ (currdriverstate == LED_THREAD_INACTIVE))
+ Status = wait_event_interruptible(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState
+ || kthread_should_stop());
+
+ if (kthread_should_stop() || Adapter->device_removed) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
+ TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
+ return; /* STATUS_FAILURE; */
}
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- }
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
- if(Adapter->LEDInfo.bLedInitDone == FALSE)
- {
+ if (Adapter->LEDInfo.bLedInitDone == FALSE) {
LedGpioInit(Adapter);
Adapter->LEDInfo.bLedInitDone = TRUE;
}
- switch(Adapter->DriverState)
- {
- case DRIVER_INIT:
- {
- currdriverstate = DRIVER_INIT;//Adapter->DriverState;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
+ switch (Adapter->DriverState) {
+ case DRIVER_INIT:
+ currdriverstate = DRIVER_INIT;
+ /* Adapter->DriverState; */
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
break;
- case FW_DOWNLOAD:
- {
- //BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FW_DN_DONE called\n");
- currdriverstate = FW_DOWNLOAD;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
-
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- timeout = 50;
- LED_Blink(Adapter, 1<<GPIO_num, uiLedIndex, timeout, -1,currdriverstate);
- }
+ case FW_DOWNLOAD:
+ /*
+ * BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ * LED_DUMP_INFO, DBG_LVL_ALL,
+ * "LED Thread: FW_DN_DONE called\n");
+ */
+ currdriverstate = FW_DOWNLOAD;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+
+ if (GPIO_num != DISABLE_GPIO_NUM) {
+ timeout = 50;
+ LED_Blink(Adapter, 1 << GPIO_num, uiLedIndex,
+ timeout, -1, currdriverstate);
}
break;
- case FW_DOWNLOAD_DONE:
- {
- currdriverstate = FW_DOWNLOAD_DONE;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex,currdriverstate);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
+ case FW_DOWNLOAD_DONE:
+ currdriverstate = FW_DOWNLOAD_DONE;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
- case SHUTDOWN_EXIT:
- //no break, continue to NO_NETWORK_ENTRY state as well.
-
- case NO_NETWORK_ENTRY:
- {
- currdriverstate = NO_NETWORK_ENTRY;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex,&dummyGPIONum,currdriverstate);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
+ case SHUTDOWN_EXIT:
+ /*
+ * no break, continue to NO_NETWORK_ENTRY
+ * state as well.
+ */
+ case NO_NETWORK_ENTRY:
+ currdriverstate = NO_NETWORK_ENTRY;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyGPIONum, currdriverstate);
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
- case NORMAL_OPERATION:
+ case NORMAL_OPERATION:
{
UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
UCHAR uiLEDTx = 0;
UCHAR uiLEDRx = 0;
currdriverstate = NORMAL_OPERATION;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
-
- BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx, &GPIO_num_rx, &uiLEDTx,&uiLEDRx,currdriverstate);
- if((GPIO_num_tx == DISABLE_GPIO_NUM) && (GPIO_num_rx == DISABLE_GPIO_NUM))
- {
- GPIO_num = DISABLE_GPIO_NUM ;
- }
- else
- {
- /*If single LED is selected, use same for both Tx and Rx*/
- if(GPIO_num_tx == DISABLE_GPIO_NUM)
- {
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
+
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx,
+ &GPIO_num_rx, &uiLEDTx, &uiLEDRx,
+ currdriverstate);
+ if ((GPIO_num_tx == DISABLE_GPIO_NUM) &&
+ (GPIO_num_rx ==
+ DISABLE_GPIO_NUM)) {
+ GPIO_num = DISABLE_GPIO_NUM;
+ } else {
+ /*
+ * If single LED is selected, use same
+ * for both Tx and Rx
+ */
+ if (GPIO_num_tx == DISABLE_GPIO_NUM) {
GPIO_num_tx = GPIO_num_rx;
uiLEDTx = uiLEDRx;
- }
- else if(GPIO_num_rx == DISABLE_GPIO_NUM)
- {
+ } else if (GPIO_num_rx ==
+ DISABLE_GPIO_NUM) {
GPIO_num_rx = GPIO_num_tx;
uiLEDRx = uiLEDTx;
}
- /*Blink the LED in proportionate to Tx and Rx transmissions.*/
- LED_Proportional_Blink(Adapter, GPIO_num_tx, uiLEDTx, GPIO_num_rx, uiLEDRx,currdriverstate);
+ /*
+ * Blink the LED in proportionate
+ * to Tx and Rx transmissions.
+ */
+ LED_Proportional_Blink(Adapter,
+ GPIO_num_tx, uiLEDTx,
+ GPIO_num_rx, uiLEDRx,
+ currdriverstate);
}
}
break;
- case LOWPOWER_MODE_ENTER:
- {
- currdriverstate = LOWPOWER_MODE_ENTER;
- if( DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING == Adapter->ulPowerSaveMode)
- {
- /* Turn OFF all the LED */
- uiResetValue = 0;
- for(uiIndex =0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
-
+ case LOWPOWER_MODE_ENTER:
+ currdriverstate = LOWPOWER_MODE_ENTER;
+ if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING ==
+ Adapter->ulPowerSaveMode) {
+ /* Turn OFF all the LED */
+ uiResetValue = 0;
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
- /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
- Adapter->LEDInfo.bLedInitDone = FALSE;
- Adapter->LEDInfo.bIdle_led_off = TRUE;
- wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- }
- case IDLEMODE_CONTINUE:
- {
- currdriverstate = IDLEMODE_CONTINUE;
- GPIO_num = DISABLE_GPIO_NUM;
+
}
+ /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = TRUE;
+ wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
+ GPIO_num = DISABLE_GPIO_NUM;
break;
- case IDLEMODE_EXIT:
- {
- }
+ case IDLEMODE_CONTINUE:
+ currdriverstate = IDLEMODE_CONTINUE;
+ GPIO_num = DISABLE_GPIO_NUM;
break;
- case DRIVER_HALT:
- {
- currdriverstate = DRIVER_HALT;
- GPIO_num = DISABLE_GPIO_NUM;
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
- //Adapter->DriverState = DRIVER_INIT;
+ case IDLEMODE_EXIT:
+ break;
+ case DRIVER_HALT:
+ currdriverstate = DRIVER_HALT;
+ GPIO_num = DISABLE_GPIO_NUM;
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
+ /* Adapter->DriverState = DRIVER_INIT; */
break;
- case LED_THREAD_INACTIVE :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"InActivating LED thread...");
- currdriverstate = LED_THREAD_INACTIVE;
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_INACTIVELY ;
- Adapter->LEDInfo.bLedInitDone = FALSE ;
- //disable ALL LED
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
+ case LED_THREAD_INACTIVE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "InActivating LED thread...");
+ currdriverstate = LED_THREAD_INACTIVE;
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_INACTIVELY;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ /* disable ALL LED */
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
break;
- case LED_THREAD_ACTIVE :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"Activating LED thread again...");
- if(Adapter->LinkUpStatus == FALSE)
- Adapter->DriverState = NO_NETWORK_ENTRY;
- else
- Adapter->DriverState = NORMAL_OPERATION;
+ case LED_THREAD_ACTIVE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "Activating LED thread again...");
+ if (Adapter->LinkUpStatus == FALSE)
+ Adapter->DriverState = NO_NETWORK_ENTRY;
+ else
+ Adapter->DriverState = NORMAL_OPERATION;
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY ;
- }
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_ACTIVELY;
+ break;
+ /* return; */
+ default:
break;
- //return;
- default:
- break;
}
}
Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
@@ -839,49 +863,54 @@ int InitLedSettings(PMINI_ADAPTER Adapter)
BOOLEAN bEnableThread = TRUE;
UCHAR uiIndex = 0;
- /*Initially set BitPolarity to normal polarity. The bit 8 of LED type
- * is used to change the polarity of the LED.*/
+ /*
+ * Initially set BitPolarity to normal polarity. The bit 8 of LED type
+ * is used to change the polarity of the LED.
+ */
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1;
- }
- /*Read the LED settings of CONFIG file and map it to GPIO numbers in EEPROM*/
+ /*
+ * Read the LED settings of CONFIG file and map it
+ * to GPIO numbers in EEPROM
+ */
Status = ReadConfigFileStructure(Adapter, &bEnableThread);
- if(STATUS_SUCCESS != Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FAILED in ReadConfigFileStructure\n");
+ if (STATUS_SUCCESS != Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: FAILED in ReadConfigFileStructure\n");
return Status;
}
- if(Adapter->LEDInfo.led_thread_running)
- {
- if(bEnableThread)
+ if (Adapter->LEDInfo.led_thread_running) {
+ if (bEnableThread) {
;
- else
- {
+ } else {
Adapter->DriverState = DRIVER_HALT;
wake_up(&Adapter->LEDInfo.notify_led_event);
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
}
- }
-
- else if(bEnableThread)
- {
- /*Create secondary thread to handle the LEDs*/
+ } else if (bEnableThread) {
+ /* Create secondary thread to handle the LEDs */
init_waitqueue_head(&Adapter->LEDInfo.notify_led_event);
init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
- Adapter->LEDInfo.led_cntrl_threadid = kthread_run((int (*)(void *))
- LEDControlThread, Adapter, "led_control_thread");
- if(IS_ERR(Adapter->LEDInfo.led_cntrl_threadid))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Not able to spawn Kernel Thread\n");
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
- return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
- }
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_ACTIVELY;
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.led_cntrl_threadid =
+ kthread_run((int (*)(void *)) LEDControlThread,
+ Adapter, "led_control_thread");
+ if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Not able to spawn Kernel Thread\n");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
+ return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
+ }
}
return Status;
}
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 3de0daf5edb2..7d703cb3c5e0 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -78,7 +78,7 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
{
value=0;
uiStatus = 0 ;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem has got removed hence exiting....");
@@ -93,7 +93,7 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
wrmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
uiData = (UCHAR)value;
break;
@@ -102,8 +102,8 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
dwRetries-- ;
if ( dwRetries == 0 )
{
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG,&value1, sizeof(value1));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"0x3004 = %x 0x3008 = %x, retries = %d failed.\n",value,value1, MAX_EEPROM_RETRIES*RETRIES_PER_DELAY);
return uiData;
}
@@ -158,7 +158,7 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
{
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem has got Removed.hence exiting from loop...");
@@ -202,8 +202,8 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
{
value=0;
value1=0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG,&value1, sizeof(value1));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "dwNumWords %d 0x3004 = %x 0x3008 = %x retries = %d failed.\n", dwNumWords, value, value1, MAX_EEPROM_RETRIES*RETRIES_PER_DELAY);
return STATUS_FAILURE;
}
@@ -217,22 +217,22 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
pvalue = (PUCHAR)(pdwData + dwIndex);
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[0] = value;
value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[1] = value;
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[2] = value;
value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[3] = value;
}
@@ -445,6 +445,7 @@ static INT BeceemFlashBulkRead(
UINT uiBytesToRead = uiNumBytes;
INT Status = 0;
UINT uiPartOffset = 0;
+ int bytes;
if(Adapter->device_removed )
{
@@ -469,9 +470,9 @@ static INT BeceemFlashBulkRead(
uiBytesToRead = MAX_RW_SIZE - (uiOffset%MAX_RW_SIZE);
uiBytesToRead = MIN(uiNumBytes,uiBytesToRead);
- if(rdm(Adapter,uiPartOffset, (PCHAR)pBuffer+uiIndex,uiBytesToRead))
- {
- Status = -1;
+ bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer+uiIndex, uiBytesToRead);
+ if (bytes < 0) {
+ Status = bytes;
Adapter->SelectedChip = RESET_CHIP_SELECT;
return Status;
}
@@ -488,9 +489,9 @@ static INT BeceemFlashBulkRead(
uiBytesToRead = MIN(uiNumBytes,MAX_RW_SIZE);
- if(rdm(Adapter,uiPartOffset, (PCHAR)pBuffer+uiIndex,uiBytesToRead))
- {
- Status = -1;
+ bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer+uiIndex, uiBytesToRead);
+ if (bytes < 0) {
+ Status = bytes;
break;
}
@@ -613,6 +614,7 @@ static INT FlashSectorErase(PMINI_ADAPTER Adapter,
UINT iIndex = 0, iRetries = 0;
UINT uiStatus = 0;
UINT value;
+ int bytes;
for(iIndex=0;iIndex<numOfSectors;iIndex++)
{
@@ -632,10 +634,11 @@ static INT FlashSectorErase(PMINI_ADAPTER Adapter,
return STATUS_FAILURE;
}
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries++;
//After every try lets make the CPU free for 10 ms. generally time taken by the
@@ -679,6 +682,7 @@ static INT flashByteWrite(
UINT value;
ULONG ulData = *(PUCHAR)pData;
+ int bytes;
//
// need not write 0xFF because write requires an erase and erase will
@@ -720,10 +724,11 @@ static INT flashByteWrite(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
if( iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
@@ -771,6 +776,7 @@ static INT flashWrite(
UINT value;
UINT uiErasePattern[4] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
// make whole sector 0xFFFFFFFF.
@@ -803,10 +809,11 @@ static INT flashWrite(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
@@ -849,6 +856,7 @@ static INT flashByteWriteStatus(
INT iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; //3
ULONG ulData = *(PUCHAR)pData;
UINT value;
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
@@ -891,10 +899,11 @@ static INT flashByteWriteStatus(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
@@ -935,6 +944,7 @@ static INT flashWriteStatus(
//UINT uiReadBack = 0;
UINT value;
UINT uiErasePattern[4] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
@@ -967,10 +977,11 @@ static INT flashWriteStatus(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
//this will ensure that in there will be no changes in the current path.
@@ -1841,7 +1852,7 @@ static INT BeceemEEPROMWritePage( PMINI_ADAPTER Adapter, UINT uiData[], UINT uiO
* What we are checking if the previous write has completed, and this
* may take time. We should wait till the Empty bit is set. */
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus)) ;
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
while ( ( uiStatus & EEPROM_WRITE_QUEUE_EMPTY ) == 0 )
{
uiRetries--;
@@ -1855,7 +1866,7 @@ static INT BeceemEEPROMWritePage( PMINI_ADAPTER Adapter, UINT uiData[], UINT uiO
msleep(1);
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus)) ;
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem got removed hence exiting from loop....");
@@ -2500,7 +2511,7 @@ static ULONG BcmReadFlashRDID(PMINI_ADAPTER Adapter)
// Read SPI READQ REG. The output will be WWXXYYZZ.
// The ID is 3Bytes long and is WWXXYY. ZZ needs to be Ignored.
//
- rdmalt(Adapter, FLASH_SPI_READQ_REG,(PUINT)&ulRDID, sizeof(ulRDID));
+ rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulRDID, sizeof(ulRDID));
return (ulRDID >>8);
@@ -4735,8 +4746,8 @@ static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
Adapter->SelectedChip = ChipNum ;
//bit[13..12] will select the appropriate chip
- rdmalt(Adapter,FLASH_CONFIG_REG, &FlashConfig, 4);
- rdmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
+ rdmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
+ rdmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
{
switch(ChipNum)
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
index 2d8b8a367b38..14876388b879 100644
--- a/drivers/staging/bcm/target_params.h
+++ b/drivers/staging/bcm/target_params.h
@@ -72,8 +72,8 @@ typedef struct _TARGET_PARAMS
// removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
//BAMC Related Parameters
- //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 Enable Band AMC signaling.
- //bit 16-31 Band AMC Data configuration: Bit 16 = 1 Band AMC 2x3 support.
+ //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
+ //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
B_UINT32 m_u32BandAMCEnable;
} stTargetParams,TARGET_PARAMS,*PTARGET_PARAMS, STARGETPARAMS, *PSTARGETPARAMS;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd8..9bcf87ae4c00 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(comedi_debug);
module_param(comedi_debug, int, 0644);
#endif
-int comedi_autoconfig = 1;
+bool comedi_autoconfig = 1;
module_param(comedi_autoconfig, bool, 0444);
static int comedi_num_legacy_minors;
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
insns =
- kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+ kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
if (!insns) {
DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+ struct comedi_async *async;
+ struct comedi_device *dev;
+
+ async = area->vm_private_data;
+ dev = async->subdevice->device;
+
+ mutex_lock(&dev->mutex);
+ async->mmap_count++;
+ mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
}
static struct vm_operations_struct comedi_vm_ops = {
- .close = comedi_unmap,
+ .open = comedi_vm_open,
+ .close = comedi_vm_close,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_async *async = NULL;
unsigned long start = vma->vm_start;
unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int i;
int retval;
struct comedi_subdevice *s;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+
+ dev_file_info = comedi_get_device_file_info(minor);
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy)
break;
if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy) {
retval = 0;
break;
@@ -1885,11 +1924,17 @@ ok:
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *s = NULL;
int i;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
static int comedi_fasync(int fd, struct file *file, int on)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
@@ -2429,18 +2479,18 @@ static ssize_t store_max_read_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_max_size_kb;
- uint64_t new_max_size;
+ unsigned int new_max_size_kb;
+ unsigned int new_max_size;
+ int ret;
struct comedi_subdevice *const read_subdevice =
comedi_get_read_subdevice(info);
- if (strict_strtoul(buf, 10, &new_max_size_kb))
- return -EINVAL;
- if (new_max_size_kb != (uint32_t) new_max_size_kb)
- return -EINVAL;
- new_max_size = ((uint64_t) new_max_size_kb) * bytes_per_kibi;
- if (new_max_size != (uint32_t) new_max_size)
+ ret = kstrtouint(buf, 10, &new_max_size_kb);
+ if (ret)
+ return ret;
+ if (new_max_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_max_size = new_max_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (read_subdevice == NULL ||
@@ -2490,19 +2540,19 @@ static ssize_t store_read_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_size_kb;
- uint64_t new_size;
+ unsigned int new_size_kb;
+ unsigned int new_size;
int retval;
+ int ret;
struct comedi_subdevice *const read_subdevice =
comedi_get_read_subdevice(info);
- if (strict_strtoul(buf, 10, &new_size_kb))
- return -EINVAL;
- if (new_size_kb != (uint32_t) new_size_kb)
- return -EINVAL;
- new_size = ((uint64_t) new_size_kb) * bytes_per_kibi;
- if (new_size != (uint32_t) new_size)
+ ret = kstrtouint(buf, 10, &new_size_kb);
+ if (ret)
+ return ret;
+ if (new_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_size = new_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (read_subdevice == NULL ||
@@ -2556,18 +2606,18 @@ static ssize_t store_max_write_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_max_size_kb;
- uint64_t new_max_size;
+ unsigned int new_max_size_kb;
+ unsigned int new_max_size;
+ int ret;
struct comedi_subdevice *const write_subdevice =
comedi_get_write_subdevice(info);
- if (strict_strtoul(buf, 10, &new_max_size_kb))
- return -EINVAL;
- if (new_max_size_kb != (uint32_t) new_max_size_kb)
- return -EINVAL;
- new_max_size = ((uint64_t) new_max_size_kb) * bytes_per_kibi;
- if (new_max_size != (uint32_t) new_max_size)
+ ret = kstrtouint(buf, 10, &new_max_size_kb);
+ if (ret)
+ return ret;
+ if (new_max_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_max_size = new_max_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (write_subdevice == NULL ||
@@ -2617,19 +2667,19 @@ static ssize_t store_write_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_size_kb;
- uint64_t new_size;
+ unsigned int new_size_kb;
+ unsigned int new_size;
int retval;
+ int ret;
struct comedi_subdevice *const write_subdevice =
comedi_get_write_subdevice(info);
- if (strict_strtoul(buf, 10, &new_size_kb))
- return -EINVAL;
- if (new_size_kb != (uint32_t) new_size_kb)
+ ret = kstrtouint(buf, 10, &new_size_kb);
+ if (ret)
+ return ret;
+ if (new_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
new_size = ((uint64_t) new_size_kb) * bytes_per_kibi;
- if (new_size != (uint32_t) new_size)
- return -EINVAL;
mutex_lock(&info->device->mutex);
if (write_subdevice == NULL ||
diff --git a/drivers/staging/comedi/comedi_fops.h b/drivers/staging/comedi/comedi_fops.h
index da4b4f5553f5..006cf14c577a 100644
--- a/drivers/staging/comedi/comedi_fops.h
+++ b/drivers/staging/comedi/comedi_fops.h
@@ -1,10 +1,11 @@
#ifndef _COMEDI_FOPS_H
#define _COMEDI_FOPS_H
+#include <linux/types.h>
extern struct class *comedi_class;
extern const struct file_operations comedi_fops;
-extern int comedi_autoconfig;
+extern bool comedi_autoconfig;
extern struct comedi_driver *comedi_drivers;
#endif /* _COMEDI_FOPS_H */
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 6fb7594319c6..ca5bd9b8704a 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -145,78 +145,77 @@ void fpu_end(void)
static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = {
#ifdef CONFIG_APCI_3120
- {APCI3120_BOARD_VENDOR_ID, 0x818D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x818D)},
#endif
#ifdef CONFIG_APCI_1032
- {APCI1032_BOARD_VENDOR_ID, 0x1003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1032_BOARD_VENDOR_ID, 0x1003)},
#endif
#ifdef CONFIG_APCI_1516
- {APCI1516_BOARD_VENDOR_ID, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1516_BOARD_VENDOR_ID, 0x1001)},
#endif
#ifdef CONFIG_APCI_2016
- {APCI2016_BOARD_VENDOR_ID, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2016_BOARD_VENDOR_ID, 0x1002)},
#endif
#ifdef CONFIG_APCI_2032
- {APCI2032_BOARD_VENDOR_ID, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2032_BOARD_VENDOR_ID, 0x1004)},
#endif
#ifdef CONFIG_APCI_2200
- {APCI2200_BOARD_VENDOR_ID, 0x1005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2200_BOARD_VENDOR_ID, 0x1005)},
#endif
#ifdef CONFIG_APCI_1564
- {APCI1564_BOARD_VENDOR_ID, 0x1006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1564_BOARD_VENDOR_ID, 0x1006)},
#endif
#ifdef CONFIG_APCI_1500
- {APCI1500_BOARD_VENDOR_ID, 0x80fc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1500_BOARD_VENDOR_ID, 0x80fc)},
#endif
#ifdef CONFIG_APCI_3001
- {APCI3120_BOARD_VENDOR_ID, 0x828D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x828D)},
#endif
#ifdef CONFIG_APCI_3501
- {APCI3501_BOARD_VENDOR_ID, 0x3001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3501_BOARD_VENDOR_ID, 0x3001)},
#endif
#ifdef CONFIG_APCI_035
- {APCI035_BOARD_VENDOR_ID, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI035_BOARD_VENDOR_ID, 0x0300)},
#endif
#ifdef CONFIG_APCI_3200
- {APCI3200_BOARD_VENDOR_ID, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3000)},
#endif
#ifdef CONFIG_APCI_3300
- {APCI3200_BOARD_VENDOR_ID, 0x3007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3007)},
#endif
#ifdef CONFIG_APCI_1710
- {APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID)},
#endif
#ifdef CONFIG_APCI_16XX
- {0x15B8, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x100A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1009)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x100A)},
#endif
#ifdef CONFIG_APCI_3XXX
- {0x15B8, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3010)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3013)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3014)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3015)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3016)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3017)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3018)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3019)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301A)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301D)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3020)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3021)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3022)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3023)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3002)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3003)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3004)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3024)},
#endif
{0}
};
@@ -1019,7 +1018,7 @@ static const struct addi_board boardtypes[] = {
#endif
#ifdef CONFIG_APCI_16XX
{"apci1648",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x1009,
128,
0,
@@ -1075,7 +1074,7 @@ static const struct addi_board boardtypes[] = {
i_APCI16XX_InsnBitsWriteTTLIO},
{"apci1696",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x100A,
128,
0,
@@ -1132,7 +1131,7 @@ static const struct addi_board boardtypes[] = {
#endif
#ifdef CONFIG_APCI_3XXX
{"apci3000-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3010,
256,
256,
@@ -1188,7 +1187,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300F,
256,
256,
@@ -1244,7 +1243,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300E,
256,
256,
@@ -1300,7 +1299,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3013,
256,
256,
@@ -1356,7 +1355,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3014,
256,
256,
@@ -1412,7 +1411,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3015,
256,
256,
@@ -1468,7 +1467,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3016,
256,
256,
@@ -1524,7 +1523,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3017,
256,
256,
@@ -1580,7 +1579,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3018,
256,
256,
@@ -1636,7 +1635,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3019,
256,
256,
@@ -1692,7 +1691,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301A,
256,
256,
@@ -1748,7 +1747,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301B,
256,
256,
@@ -1804,7 +1803,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301C,
256,
256,
@@ -1860,7 +1859,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301D,
256,
256,
@@ -1916,7 +1915,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301E,
256,
256,
@@ -1972,7 +1971,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301F,
256,
256,
@@ -2028,7 +2027,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3020,
256,
256,
@@ -2084,7 +2083,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3021,
256,
256,
@@ -2140,7 +2139,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3022,
256,
256,
@@ -2196,7 +2195,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3023,
256,
256,
@@ -2252,7 +2251,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3003",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300B,
256,
256,
@@ -2307,7 +2306,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3002,
256,
256,
@@ -2362,7 +2361,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3003,
256,
256,
@@ -2417,7 +2416,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3004,
256,
256,
@@ -2472,7 +2471,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3500",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3024,
256,
256,
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index c75a1a1fd775..f9545b064eaf 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -3598,7 +3598,7 @@ int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
n = comedi_buf_write_alloc(s->async,
(7 + 12) * sizeof(unsigned int));
- /* If not enougth memory available, event is set to Comedi Buffer Errror */
+ /* If not enough memory available, event is set to Comedi Buffer Error */
if (n > ((7 + 12) * sizeof(unsigned int))) {
printk("\ncomedi_buf_write_alloc n = %i", n);
s->async->events |= COMEDI_CB_ERROR;
diff --git a/drivers/staging/comedi/drivers/adl_pci7230.c b/drivers/staging/comedi/drivers/adl_pci7230.c
index 72a7258b5b92..20d570554fa4 100644
--- a/drivers/staging/comedi/drivers/adl_pci7230.c
+++ b/drivers/staging/comedi/drivers/adl_pci7230.c
@@ -44,15 +44,7 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7230 0x7230
static DEFINE_PCI_DEVICE_TABLE(adl_pci7230_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK,
- PCI_DEVICE_ID_PCI7230,
- PCI_ANY_ID,
- PCI_ANY_ID,
- 0,
- 0,
- 0
- },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7230) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/adl_pci7296.c b/drivers/staging/comedi/drivers/adl_pci7296.c
index f28fe6bec050..9a2320537bdb 100644
--- a/drivers/staging/comedi/drivers/adl_pci7296.c
+++ b/drivers/staging/comedi/drivers/adl_pci7296.c
@@ -49,10 +49,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7296 0x7296
static DEFINE_PCI_DEVICE_TABLE(adl_pci7296_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7296_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci7432.c b/drivers/staging/comedi/drivers/adl_pci7432.c
index 262da7b29b28..86ee21976041 100644
--- a/drivers/staging/comedi/drivers/adl_pci7432.c
+++ b/drivers/staging/comedi/drivers/adl_pci7432.c
@@ -44,10 +44,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7432 0x7432
static DEFINE_PCI_DEVICE_TABLE(adl_pci7432_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7432_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index 767a594935c7..3b83d65bc1bc 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -57,10 +57,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI8164 0x8164
static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci8164_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index fc48bae42ab7..2a9bd88a4abb 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -311,10 +311,8 @@ static const struct comedi_lrange pci9111_hr_ai_range = {
};
static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
- { PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0 },
- /* { PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- * 0, 0, 0 }, */
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) },
+ /* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index da2b75b15d4e..8318c82a555a 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -1382,16 +1382,14 @@ static int pci1710_attach(struct comedi_device *dev,
int i;
int board_index;
- printk("comedi%d: adv_pci1710: ", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: adv_pci1710:\n", dev->minor);
opt_bus = it->options[0];
opt_slot = it->options[1];
ret = alloc_private(dev, sizeof(struct pci1710_private));
- if (ret < 0) {
- printk(" - Allocation failed!\n");
+ if (ret < 0)
return -ENOMEM;
- }
/* Look for matching PCI device */
errstr = "not found!";
@@ -1436,10 +1434,10 @@ static int pci1710_attach(struct comedi_device *dev,
if (!pcidev) {
if (opt_bus || opt_slot) {
- printk(" - Card at b:s %d:%d %s\n",
- opt_bus, opt_slot, errstr);
+ dev_err(dev->hw_dev, "- Card at b:s %d:%d %s\n",
+ opt_bus, opt_slot, errstr);
} else {
- printk(" - Card %s\n", errstr);
+ dev_err(dev->hw_dev, "- Card %s\n", errstr);
}
return -EIO;
}
@@ -1450,8 +1448,8 @@ static int pci1710_attach(struct comedi_device *dev,
irq = pcidev->irq;
iobase = pci_resource_start(pcidev, 2);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx", pci_bus, pci_slot, pci_func,
- iobase);
+ dev_dbg(dev->hw_dev, "b:s:f=%d:%d:%d, io=0x%4lx\n", pci_bus, pci_slot,
+ pci_func, iobase);
dev->iobase = iobase;
@@ -1471,10 +1469,8 @@ static int pci1710_attach(struct comedi_device *dev,
n_subdevices++;
ret = alloc_subdevices(dev, n_subdevices);
- if (ret < 0) {
- printk(" - Allocation failed!\n");
+ if (ret < 0)
return ret;
- }
pci1710_reset(dev);
@@ -1483,24 +1479,20 @@ static int pci1710_attach(struct comedi_device *dev,
if (request_irq(irq, interrupt_service_pci1710,
IRQF_SHARED, "Advantech PCI-1710",
dev)) {
- printk
- (", unable to allocate IRQ %d, DISABLING IT",
- irq);
+ dev_dbg(dev->hw_dev, "unable to allocate IRQ %d, DISABLING IT",
+ irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ dev_dbg(dev->hw_dev, "irq=%u", irq);
}
} else {
- printk(", IRQ disabled");
+ dev_dbg(dev->hw_dev, "IRQ disabled");
}
} else {
irq = 0;
}
dev->irq = irq;
-
- printk(".\n");
-
subdev = 0;
if (this_board->n_aichan) {
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 69334f6f64e8..537e58534275 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1106,13 +1106,10 @@ static int pci_dio_attach(struct comedi_device *dev,
unsigned long iobase;
struct pci_dev *pcidev = NULL;
- printk("comedi%d: adv_pci_dio: ", dev->minor);
ret = alloc_private(dev, sizeof(struct pci_dio_private));
- if (ret < 0) {
- printk(", Error: Cann't allocate private memory!\n");
+ if (ret < 0)
return -ENOMEM;
- }
for_each_pci_dev(pcidev) {
/* loop through cards supported by this driver */
@@ -1140,19 +1137,18 @@ static int pci_dio_attach(struct comedi_device *dev,
}
if (!dev->board_ptr) {
- printk(", Error: Requested type of the card was not found!\n");
+ dev_err(dev->hw_dev, "Error: Requested type of the card was not found!\n");
return -EIO;
}
if (comedi_pci_enable(pcidev, driver_pci_dio.driver_name)) {
- printk
- (", Error: Can't enable PCI device and request regions!\n");
+ dev_err(dev->hw_dev, "Error: Can't enable PCI device and request regions!\n");
return -EIO;
}
iobase = pci_resource_start(pcidev, this_board->main_pci_region);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx",
- pcidev->bus->number, PCI_SLOT(pcidev->devfn),
- PCI_FUNC(pcidev->devfn), iobase);
+ dev_dbg(dev->hw_dev, "b:s:f=%d:%d:%d, io=0x%4lx\n",
+ pcidev->bus->number, PCI_SLOT(pcidev->devfn),
+ PCI_FUNC(pcidev->devfn), iobase);
dev->iobase = iobase;
dev->board_name = this_board->name;
@@ -1177,15 +1173,10 @@ static int pci_dio_attach(struct comedi_device *dev,
}
ret = alloc_subdevices(dev, n_subdevices);
- if (ret < 0) {
- printk(", Error: Cann't allocate subdevice memory!\n");
+ if (ret < 0)
return ret;
- }
-
- printk(".\n");
subdev = 0;
-
for (i = 0; i < MAX_DI_SUBDEVS; i++)
if (this_board->sdi[i].chans) {
s = dev->subdevices + subdev;
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 93bbe4ec318d..566cc4411452 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -421,12 +421,9 @@ static const struct dio200_layout_struct dio200_layouts[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, dio200_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 48246cd50d47..7972cadd403e 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -134,10 +134,8 @@ static const struct pc236_board pc236_boards[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pc236_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 8a3388079094..191ac0d23ce7 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -101,10 +101,8 @@ static const struct pc263_board pc263_boards[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc263_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pc263_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 1b5ba1c27259..b278917cec25 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -384,12 +384,9 @@ static const struct pci224_board pci224_boards[] = {
*/
static DEFINE_PCI_DEVICE_TABLE(pci224_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci224_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 7edeb1103dc8..538979551c8e 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -501,12 +501,9 @@ static const struct pci230_board pci230_boards[] = {
};
static DEFINE_PCI_DEVICE_TABLE(pci230_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci230_pci_table);
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index e171c56112d8..49404f49f7b7 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -99,7 +99,7 @@ static struct comedi_driver driver_das16cs = {
.detach = das16cs_detach,
};
-static struct pcmcia_device *cur_dev = NULL;
+static struct pcmcia_device *cur_dev;
static const struct comedi_lrange das16cs_ai_range = { 4, {
RANGE(-10, 10),
@@ -150,7 +150,7 @@ static const struct das16cs_board *das16cs_probe(struct comedi_device *dev,
return das16cs_boards + i;
}
- printk("unknown board!\n");
+ dev_dbg(dev->hw_dev, "unknown board!\n");
return NULL;
}
@@ -163,20 +163,19 @@ static int das16cs_attach(struct comedi_device *dev,
int ret;
int i;
- printk("comedi%d: cb_das16_cs: ", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: cb_das16_cs: attached\n", dev->minor);
link = cur_dev; /* XXX hack */
if (!link)
return -EIO;
dev->iobase = link->resource[0]->start;
- printk("I/O base=0x%04lx ", dev->iobase);
+ dev_dbg(dev->hw_dev, "I/O base=0x%04lx\n", dev->iobase);
- printk("fingerprint:\n");
+ dev_dbg(dev->hw_dev, "fingerprint:\n");
for (i = 0; i < 48; i += 2)
- printk("%04x ", inw(dev->iobase + i));
+ dev_dbg(dev->hw_dev, "%04x\n", inw(dev->iobase + i));
- printk("\n");
ret = request_irq(link->irq, das16cs_interrupt,
IRQF_SHARED, "cb_das16_cs", dev);
@@ -185,7 +184,7 @@ static int das16cs_attach(struct comedi_device *dev,
dev->irq = link->irq;
- printk("irq=%u ", dev->irq);
+ dev_dbg(dev->hw_dev, "irq=%u\n", dev->irq);
dev->board_ptr = das16cs_probe(dev, link);
if (!dev->board_ptr)
@@ -252,14 +251,13 @@ static int das16cs_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- printk("attached\n");
return 1;
}
static int das16cs_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16cs: remove\n", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: das16cs: remove\n", dev->minor);
if (dev->irq)
free_irq(dev->irq, dev);
@@ -312,7 +310,7 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
break;
}
if (to == TIMEOUT) {
- printk("cb_das16_cs: ai timeout\n");
+ dev_dbg(dev->hw_dev, "cb_das16_cs: ai timeout\n");
return -ETIME;
}
data[i] = (unsigned short)inw(dev->iobase + 0);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 61968a505f24..7e4ffcfdac62 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -565,8 +565,6 @@ static int cb_pcidas_attach(struct comedi_device *dev,
int index;
int i;
- printk("comedi%d: cb_pcidas: ", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -576,7 +574,6 @@ static int cb_pcidas_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
@@ -600,20 +597,20 @@ static int cb_pcidas_attach(struct comedi_device *dev,
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
- printk("Found %s on bus %i, slot %i\n", cb_pcidas_boards[index].name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n",
+ cb_pcidas_boards[index].name, pcidev->bus->number,
+ PCI_SLOT(pcidev->devfn));
/*
* Enable PCI device and reserve I/O ports.
*/
if (comedi_pci_enable(pcidev, "cb_pcidas")) {
- printk(" Failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n");
return -EIO;
}
/*
@@ -639,7 +636,8 @@ found:
/* get irq */
if (request_irq(devpriv->pci_dev->irq, cb_pcidas_interrupt,
IRQF_SHARED, "cb_pcidas", dev)) {
- printk(" unable to allocate irq %d\n", devpriv->pci_dev->irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %d\n",
+ devpriv->pci_dev->irq);
return -EINVAL;
}
dev->irq = devpriv->pci_dev->irq;
@@ -768,7 +766,6 @@ found:
*/
static int cb_pcidas_detach(struct comedi_device *dev)
{
- printk("comedi%d: cb_pcidas: remove\n", dev->minor);
if (devpriv) {
if (devpriv->s5933_config) {
@@ -776,8 +773,8 @@ static int cb_pcidas_detach(struct comedi_device *dev)
outl(INTCSR_INBOX_INTR_STATUS,
devpriv->s5933_config + AMCC_OP_REG_INTCSR);
#ifdef CB_PCIDAS_DEBUG
- printk("detaching, incsr is 0x%x\n",
- inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR));
+ dev_dbg(dev->hw_dev, "detaching, incsr is 0x%x\n",
+ inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR));
#endif
}
}
@@ -858,7 +855,8 @@ static int ai_config_calibration_source(struct comedi_device *dev,
unsigned int source = data[1];
if (source >= num_calibration_sources) {
- printk("invalid calibration source: %i\n", source);
+ dev_err(dev->hw_dev, "invalid calibration source: %i\n",
+ source);
return -EINVAL;
}
@@ -1279,7 +1277,7 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
outw(bits, devpriv->control_status + ADCMUX_CONT);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to adcmux control\n", bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to adcmux control\n", bits);
#endif
/* load counters */
@@ -1306,7 +1304,8 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
devpriv->adc_fifo_bits |= INT_FHF; /* interrupt fifo half full */
}
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits);
+ dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n",
+ devpriv->adc_fifo_bits);
#endif
/* enable (and clear) interrupts */
outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL,
@@ -1332,7 +1331,7 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
bits |= BURSTE;
outw(bits, devpriv->control_status + TRIG_CONTSTAT);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to trig control\n", bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to trig control\n", bits);
#endif
return 0;
@@ -1549,7 +1548,8 @@ static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->adc_fifo_bits |= DAEMIE | DAHFIE;
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits);
+ dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n",
+ devpriv->adc_fifo_bits);
#endif
/* enable and clear interrupts */
outw(devpriv->adc_fifo_bits | DAEMI | DAHFI,
@@ -1559,7 +1559,8 @@ static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY;
outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to dac control\n", devpriv->ao_control_bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to dac control\n",
+ devpriv->ao_control_bits);
#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -1587,8 +1588,9 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR);
#ifdef CB_PCIDAS_DEBUG
- printk("intcsr 0x%x\n", s5933_status);
- printk("mbef 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_MBEF));
+ dev_dbg(dev->hw_dev, "intcsr 0x%x\n", s5933_status);
+ dev_dbg(dev->hw_dev, "mbef 0x%x\n",
+ inl(devpriv->s5933_config + AMCC_OP_REG_MBEF));
#endif
if ((INTCSR_INTR_ASSERTED & s5933_status) == 0)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 1e324198996c..c9e8c4785768 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1739,8 +1739,6 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
uint32_t local_range, local_decode;
int retval;
- printk("comedi%d: cb_pcidas64\n", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -1781,12 +1779,11 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- printk("Found %s on bus %i, slot %i\n", board(dev)->name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", board(dev)->name,
+ pcidev->bus->number, PCI_SLOT(pcidev->devfn));
if (comedi_pci_enable(pcidev, driver_cb_pcidas.driver_name)) {
- printk(KERN_WARNING
- " failed to enable PCI device and request regions\n");
+ dev_warn(dev->hw_dev, "failed to enable PCI device and request regions\n");
return -EIO;
}
pci_set_master(pcidev);
@@ -1814,7 +1811,7 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!priv(dev)->plx9080_iobase || !priv(dev)->main_iobase
|| !priv(dev)->dio_counter_iobase) {
- printk(" failed to remap io memory\n");
+ dev_warn(dev->hw_dev, "failed to remap io memory\n");
return -ENOMEM;
}
@@ -1850,17 +1847,19 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
priv(dev)->hw_revision =
hw_revision(dev, readw(priv(dev)->main_iobase + HW_STATUS_REG));
- printk(" stc hardware revision %i\n", priv(dev)->hw_revision);
+ dev_dbg(dev->hw_dev, "stc hardware revision %i\n",
+ priv(dev)->hw_revision);
init_plx9080(dev);
init_stc_registers(dev);
/* get irq */
if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
"cb_pcidas64", dev)) {
- printk(" unable to allocate irq %u\n", pcidev->irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %u\n",
+ pcidev->irq);
return -EINVAL;
}
dev->irq = pcidev->irq;
- printk(" irq %u\n", dev->irq);
+ dev_dbg(dev->hw_dev, "irq %u\n", dev->irq);
retval = setup_subdevices(dev);
if (retval < 0)
@@ -1882,8 +1881,6 @@ static int detach(struct comedi_device *dev)
{
unsigned int i;
- printk("comedi%d: cb_pcidas: remove\n", dev->minor);
-
if (dev->irq)
free_irq(dev->irq, dev);
if (priv(dev)) {
@@ -2093,7 +2090,8 @@ static int ai_config_calibration_source(struct comedi_device *dev,
else
num_calibration_sources = 8;
if (source >= num_calibration_sources) {
- printk("invalid calibration source: %i\n", source);
+ dev_dbg(dev->hw_dev, "invalid calibration source: %i\n",
+ source);
return -EINVAL;
}
@@ -2924,7 +2922,7 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
}
if (num_samples < 0) {
- printk(" cb_pcidas64: bug! num_samples < 0\n");
+ dev_err(dev->hw_dev, "cb_pcidas64: bug! num_samples < 0\n");
break;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 49102b3a6c4a..abba220a767f 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -282,7 +282,6 @@ static int cb_pcidda_attach(struct comedi_device *dev,
struct pci_dev *pcidev = NULL;
int index;
- printk("comedi%d: cb_pcidda: ", dev->minor);
/*
* Allocate the private structure area.
@@ -293,7 +292,6 @@ static int cb_pcidda_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_CB) {
@@ -312,22 +310,21 @@ static int cb_pcidda_attach(struct comedi_device *dev,
}
}
if (!pcidev) {
- printk
- ("Not a ComputerBoards/MeasurementComputing card on requested position\n");
+ dev_err(dev->hw_dev, "Not a ComputerBoards/MeasurementComputing card on requested position\n");
return -EIO;
}
found:
devpriv->pci_dev = pcidev;
dev->board_ptr = cb_pcidda_boards + index;
/* "thisboard" macro can be used from here. */
- printk("Found %s at requested position\n", thisboard->name);
+ dev_dbg(dev->hw_dev, "Found %s at requested position\n",
+ thisboard->name);
/*
* Enable PCI device and request regions.
*/
if (comedi_pci_enable(pcidev, thisboard->name)) {
- printk
- ("cb_pcidda: failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "cb_pcidda: failed to enable PCI device and request regions\n");
return -EIO;
}
@@ -377,12 +374,11 @@ found:
s = dev->subdevices + 2;
subdev_8255_init(dev, s, NULL, devpriv->digitalio + PORT2A);
- printk(" eeprom:");
+ dev_dbg(dev->hw_dev, "eeprom:\n");
for (index = 0; index < EEPROM_SIZE; index++) {
devpriv->eeprom_data[index] = cb_pcidda_read_eeprom(dev, index);
- printk(" %i:0x%x ", index, devpriv->eeprom_data[index]);
+ dev_dbg(dev->hw_dev, "%i:0x%x\n", index, devpriv->eeprom_data[index]);
}
- printk("\n");
/* set calibrations dacs */
for (index = 0; index < thisboard->ao_chans; index++)
@@ -417,8 +413,6 @@ static int cb_pcidda_detach(struct comedi_device *dev)
subdev_8255_cleanup(dev, dev->subdevices + 2);
}
- printk("comedi%d: cb_pcidda: remove\n", dev->minor);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
index 79477a595ef9..8f3215239a15 100644
--- a/drivers/staging/comedi/drivers/cb_pcidio.c
+++ b/drivers/staging/comedi/drivers/cb_pcidio.c
@@ -184,8 +184,6 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int index;
int i;
- printk("comedi%d: cb_pcidio: \n", dev->minor);
-
/*
* Allocate the private structure area. alloc_private() is a
* convenient macro defined in comedidev.h.
@@ -223,8 +221,7 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
@@ -236,14 +233,12 @@ found:
dev->board_name = thisboard->name;
devpriv->pci_dev = pcidev;
- printk("Found %s on bus %i, slot %i\n", thisboard->name,
- devpriv->pci_dev->bus->number,
- PCI_SLOT(devpriv->pci_dev->devfn));
- if (comedi_pci_enable(pcidev, thisboard->name)) {
- printk
- ("cb_pcidio: failed to enable PCI device and request regions\n");
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", thisboard->name,
+ devpriv->pci_dev->bus->number,
+ PCI_SLOT(devpriv->pci_dev->devfn));
+ if (comedi_pci_enable(pcidev, thisboard->name))
return -EIO;
- }
+
devpriv->dio_reg_base
=
pci_resource_start(devpriv->pci_dev,
@@ -259,11 +254,10 @@ found:
for (i = 0; i < thisboard->n_8255; i++) {
subdev_8255_init(dev, dev->subdevices + i,
NULL, devpriv->dio_reg_base + i * 4);
- printk(" subdev %d: base = 0x%lx\n", i,
- devpriv->dio_reg_base + i * 4);
+ dev_dbg(dev->hw_dev, "subdev %d: base = 0x%lx\n", i,
+ devpriv->dio_reg_base + i * 4);
}
- printk("attached\n");
return 1;
}
@@ -277,7 +271,6 @@ found:
*/
static int pcidio_detach(struct comedi_device *dev)
{
- printk("comedi%d: cb_pcidio: remove\n", dev->minor);
if (devpriv) {
if (devpriv->pci_dev) {
if (devpriv->dio_reg_base)
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index b1b832b65bc1..8ba694263bd3 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -212,8 +212,6 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
int index;
/* int i; */
- printk("comedi%d: cb_pcimdas: ", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -223,7 +221,6 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
@@ -248,26 +245,26 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
- printk("Found %s on bus %i, slot %i\n", cb_pcimdas_boards[index].name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n",
+ cb_pcimdas_boards[index].name, pcidev->bus->number,
+ PCI_SLOT(pcidev->devfn));
/* Warn about non-tested features */
switch (thisboard->device_id) {
case 0x56:
break;
default:
- printk("THIS CARD IS UNSUPPORTED.\n"
- "PLEASE REPORT USAGE TO <mocelet@sucs.org>\n");
+ dev_dbg(dev->hw_dev, "THIS CARD IS UNSUPPORTED.\n"
+ "PLEASE REPORT USAGE TO <mocelet@sucs.org>\n");
}
if (comedi_pci_enable(pcidev, "cb_pcimdas")) {
- printk(" Failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n");
return -EIO;
}
@@ -277,13 +274,11 @@ found:
devpriv->BADR3 = pci_resource_start(devpriv->pci_dev, 3);
devpriv->BADR4 = pci_resource_start(devpriv->pci_dev, 4);
-#ifdef CBPCIMDAS_DEBUG
- printk("devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
- printk("devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
- printk("devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
- printk("devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
- printk("devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
-#endif
+ dev_dbg(dev->hw_dev, "devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
+ dev_dbg(dev->hw_dev, "devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
+ dev_dbg(dev->hw_dev, "devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
+ dev_dbg(dev->hw_dev, "devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
+ dev_dbg(dev->hw_dev, "devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
/* Dont support IRQ yet */
/* get irq */
@@ -333,8 +328,6 @@ found:
else
s->type = COMEDI_SUBD_UNUSED;
- printk("attached\n");
-
return 1;
}
@@ -348,16 +341,19 @@ found:
*/
static int cb_pcimdas_detach(struct comedi_device *dev)
{
-#ifdef CBPCIMDAS_DEBUG
if (devpriv) {
- printk("devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
- printk("devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
- printk("devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
- printk("devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
- printk("devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
+ dev_dbg(dev->hw_dev, "devpriv->BADR0 = 0x%lx\n",
+ devpriv->BADR0);
+ dev_dbg(dev->hw_dev, "devpriv->BADR1 = 0x%lx\n",
+ devpriv->BADR1);
+ dev_dbg(dev->hw_dev, "devpriv->BADR2 = 0x%lx\n",
+ devpriv->BADR2);
+ dev_dbg(dev->hw_dev, "devpriv->BADR3 = 0x%lx\n",
+ devpriv->BADR3);
+ dev_dbg(dev->hw_dev, "devpriv->BADR4 = 0x%lx\n",
+ devpriv->BADR4);
}
-#endif
- printk("comedi%d: cb_pcimdas: remove\n", dev->minor);
+
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 8c981a89ab63..40bddfa22220 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -105,7 +105,8 @@ struct board_struct {
int ao_bits;
int dio_chans;
int dio_method;
- int dio_offset; /* how many bytes into the BADR are the DIO ports */
+ /* how many bytes into the BADR are the DIO ports */
+ int dio_offset;
int regs_badrindex; /* IO Region for the control, analog output,
and DIO registers */
int reg_sz; /* number of bytes of registers in io region */
@@ -144,17 +145,18 @@ static const struct board_struct boards[] = {
/* Please add your PCI vendor ID to comedidev.h, and it will be forwarded
* upstream. */
static DEFINE_PCI_DEVICE_TABLE(pci_table) = {
- {
- PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci_table);
-/* this structure is for data unique to this hardware driver. If
- several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
+/*
+ * this structure is for data unique to this hardware driver. If
+ * several hardware drivers keep similar information in this structure,
+ * feel free to suggest moving the variable to the struct comedi_device
+ * struct.
+ */
struct board_private_struct {
unsigned long registers; /* set by probe */
unsigned long dio_registers;
@@ -335,7 +337,10 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (thisboard->dio_chans) {
switch (thisboard->dio_method) {
case DIO_8255:
- /* this is a straight 8255, so register us with the 8255 driver */
+ /*
+ * this is a straight 8255, so register us with
+ * the 8255 driver
+ */
subdev_8255_init(dev, s, NULL, devpriv->dio_registers);
devpriv->attached_to_8255 = 1;
break;
@@ -436,8 +441,11 @@ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
for (i = 0; i < insn->n; i++) {
inw(devpriv->registers + chan * 2);
- /* should I set data[i] to the result of the actual read on the register
- or the cached unsigned int in devpriv->ao_readback[]? */
+ /*
+ * should I set data[i] to the result of the actual read
+ * on the register or the cached unsigned int in
+ * devpriv->ao_readback[]?
+ */
data[i] = devpriv->ao_readback[chan];
}
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 871f109bcfa3..e3659bd6e85e 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -57,10 +57,9 @@ static const struct contec_board contec_boards[] = {
#define PCI_DEVICE_ID_PIO1616L 0x8172
static DEFINE_PCI_DEVICE_TABLE(contec_pci_table) = {
- {
- PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, PIO1616L}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L),
+ .driver_data = PIO1616L },
+ {0}
};
MODULE_DEVICE_TABLE(pci, contec_pci_table);
@@ -197,8 +196,8 @@ static int contec_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
- printk("contec_do_insn_bits called\n");
- printk(" data: %d %d\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "contec_do_insn_bits called\n");
+ dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
@@ -206,8 +205,8 @@ static int contec_do_insn_bits(struct comedi_device *dev,
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
- printk(" out: %d on %lx\n", s->state,
- dev->iobase + thisboard->out_offs);
+ dev_dbg(dev->hw_dev, "out: %d on %lx\n", s->state,
+ dev->iobase + thisboard->out_offs);
outw(s->state, dev->iobase + thisboard->out_offs);
}
return 2;
@@ -218,8 +217,8 @@ static int contec_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
- printk("contec_di_insn_bits called\n");
- printk(" data: %d %d\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "contec_di_insn_bits called\n");
+ dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 82be77daa7d7..e61c6a8f2857 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -325,9 +325,8 @@ static const struct daq200_boardtype boardtypes[] = {
#define this_board ((const struct daq200_boardtype *)dev->board_ptr)
static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = {
- {
- 0x1616, 0x0409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(0x1616, 0x0409) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, daqboard2000_pci_table);
@@ -430,16 +429,14 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
/* Enable reading from the scanlist FIFO */
fpga->acqControl = DAQBOARD2000_SeqStartScanList;
for (timeout = 0; timeout < 20; timeout++) {
- if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull) {
+ if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull)
break;
- }
/* udelay(2); */
}
fpga->acqControl = DAQBOARD2000_AdcPacerEnable;
for (timeout = 0; timeout < 20; timeout++) {
- if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning) {
+ if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning)
break;
- }
/* udelay(2); */
}
for (timeout = 0; timeout < 20; timeout++) {
@@ -465,9 +462,8 @@ static int daqboard2000_ao_insn_read(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
return i;
}
@@ -490,9 +486,8 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
/* fpga->dacControl = (chan + 2) * 0x0010 | 0x0001; udelay(1000); */
fpga->dacSetting[chan] = data[i];
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0) {
+ if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0)
break;
- }
/* udelay(2); */
}
devpriv->ao_readback[chan] = data[i];
@@ -507,7 +502,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
static void daqboard2000_resetLocalBus(struct comedi_device *dev)
{
- printk("daqboard2000_resetLocalBus\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_resetLocalBus\n");
writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
@@ -516,7 +511,7 @@ static void daqboard2000_resetLocalBus(struct comedi_device *dev)
static void daqboard2000_reloadPLX(struct comedi_device *dev)
{
- printk("daqboard2000_reloadPLX\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_reloadPLX\n");
writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
@@ -527,7 +522,7 @@ static void daqboard2000_reloadPLX(struct comedi_device *dev)
static void daqboard2000_pulseProgPin(struct comedi_device *dev)
{
- printk("daqboard2000_pulseProgPin 1\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_pulseProgPin 1\n");
writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
@@ -579,14 +574,14 @@ static int initialize_daqboard2000(struct comedi_device *dev,
secr = readl(devpriv->plx + 0x6c);
if (!(secr & DAQBOARD2000_EEPROM_PRESENT)) {
#ifdef DEBUG_EEPROM
- printk("no serial eeprom\n");
+ dev_dbg(dev->hw_dev, "no serial eeprom\n");
#endif
return -EIO;
}
for (retry = 0; retry < 3; retry++) {
#ifdef DEBUG_EEPROM
- printk("Programming EEPROM try %x\n", retry);
+ dev_dbg(dev->hw_dev, "Programming EEPROM try %x\n", retry);
#endif
daqboard2000_resetLocalBus(dev);
@@ -597,7 +592,8 @@ static int initialize_daqboard2000(struct comedi_device *dev,
if (cpld_array[i] == 0xff
&& cpld_array[i + 1] == 0x20) {
#ifdef DEBUG_EEPROM
- printk("Preamble found at %d\n", i);
+ dev_dbg(dev->hw_dev, "Preamble found at %d\n",
+ i);
#endif
break;
}
@@ -605,13 +601,12 @@ static int initialize_daqboard2000(struct comedi_device *dev,
for (; i < len; i += 2) {
int data =
(cpld_array[i] << 8) + cpld_array[i + 1];
- if (!daqboard2000_writeCPLD(dev, data)) {
+ if (!daqboard2000_writeCPLD(dev, data))
break;
- }
}
if (i >= len) {
#ifdef DEBUG_EEPROM
- printk("Programmed\n");
+ dev_dbg(dev->hw_dev, "Programmed\n");
#endif
daqboard2000_resetLocalBus(dev);
daqboard2000_reloadPLX(dev);
@@ -658,9 +653,8 @@ static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
/* Set the + reference dac value in the FPGA */
fpga->refDacs = 0x80 | DAQBOARD2000_PosRefDacSelect;
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) {
+ if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0)
break;
- }
udelay(2);
}
/* printk("DAQBOARD2000_PosRefDacSelect %d\n", timeout);*/
@@ -668,9 +662,8 @@ static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
/* Set the - reference dac value in the FPGA */
fpga->refDacs = 0x80 | DAQBOARD2000_NegRefDacSelect;
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) {
+ if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0)
break;
- }
udelay(2);
}
/* printk("DAQBOARD2000_NegRefDacSelect %d\n", timeout);*/
@@ -737,15 +730,13 @@ static int daqboard2000_attach(struct comedi_device *dev,
unsigned int aux_len;
int bus, slot;
- printk("comedi%d: daqboard2000:", dev->minor);
-
bus = it->options[0];
slot = it->options[1];
result = alloc_private(dev, sizeof(struct daqboard2000_private));
- if (result < 0) {
+ if (result < 0)
return -ENOMEM;
- }
+
for (card = pci_get_device(0x1616, 0x0409, NULL);
card != NULL; card = pci_get_device(0x1616, 0x0409, card)) {
if (bus || slot) {
@@ -759,10 +750,10 @@ static int daqboard2000_attach(struct comedi_device *dev,
}
if (!card) {
if (bus || slot)
- printk(" no daqboard2000 found at bus/slot: %d/%d\n",
- bus, slot);
+ dev_err(dev->hw_dev, "no daqboard2000 found at bus/slot: %d/%d\n",
+ bus, slot);
else
- printk(" no daqboard2000 found\n");
+ dev_err(dev->hw_dev, "no daqboard2000 found\n");
return -EIO;
} else {
u32 id;
@@ -772,7 +763,8 @@ static int daqboard2000_attach(struct comedi_device *dev,
subsystem_device << 16) | card->subsystem_vendor;
for (i = 0; i < n_boardtypes; i++) {
if (boardtypes[i].id == id) {
- printk(" %s", boardtypes[i].name);
+ dev_dbg(dev->hw_dev, "%s\n",
+ boardtypes[i].name);
dev->board_ptr = boardtypes + i;
}
}
@@ -786,7 +778,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
result = comedi_pci_enable(card, "daqboard2000");
if (result < 0) {
- printk(" failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "failed to enable PCI device and request regions\n");
return -EIO;
}
devpriv->got_regions = 1;
@@ -794,9 +786,8 @@ static int daqboard2000_attach(struct comedi_device *dev,
ioremap(pci_resource_start(card, 0), DAQBOARD2000_PLX_SIZE);
devpriv->daq =
ioremap(pci_resource_start(card, 2), DAQBOARD2000_DAQ_SIZE);
- if (!devpriv->plx || !devpriv->daq) {
+ if (!devpriv->plx || !devpriv->daq)
return -ENOMEM;
- }
result = alloc_subdevices(dev, 3);
if (result < 0)
@@ -817,7 +808,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
if (aux_data && aux_len) {
result = initialize_daqboard2000(dev, aux_data, aux_len);
} else {
- printk("no FPGA initialization code, aborting\n");
+ dev_dbg(dev->hw_dev, "no FPGA initialization code, aborting\n");
result = -EIO;
}
if (result < 0)
@@ -857,30 +848,26 @@ static int daqboard2000_attach(struct comedi_device *dev,
result = subdev_8255_init(dev, s, daqboard2000_8255_cb,
(unsigned long)(dev->iobase + 0x40));
- printk("\n");
out:
return result;
}
static int daqboard2000_detach(struct comedi_device *dev)
{
- printk("comedi%d: daqboard2000: remove\n", dev->minor);
-
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 2);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (devpriv) {
if (devpriv->daq)
iounmap(devpriv->daq);
if (devpriv->plx)
iounmap(devpriv->plx);
if (devpriv->pci_dev) {
- if (devpriv->got_regions) {
+ if (devpriv->got_regions)
comedi_pci_disable(devpriv->pci_dev);
- }
pci_dev_put(devpriv->pci_dev);
}
}
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 3141dc80fe74..c2dd0ed36a73 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -506,10 +506,8 @@ struct das08_board_struct das08_cs_boards[NUM_DAS08_CS_BOARDS] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
- {
- PCI_VENDOR_ID_COMPUTERBOARDS, PCI_DEVICE_ID_PCIDAS08,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, PCI_DEVICE_ID_PCIDAS08) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, das08_pci_table);
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 6d91d3028178..4ad398aad72c 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -79,22 +79,20 @@ static int das08_cs_attach(struct comedi_device *dev,
if (ret < 0)
return ret;
- printk("comedi%d: das08_cs: ", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: das08_cs:\n", dev->minor);
/* deal with a pci board */
if (thisboard->bustype == pcmcia) {
if (link == NULL) {
- printk(" no pcmcia cards found\n");
+ dev_err(dev->hw_dev, "no pcmcia cards found\n");
return -EIO;
}
iobase = link->resource[0]->start;
} else {
- printk(" bug! board does not have PCMCIA bustype\n");
+ dev_err(dev->hw_dev, "bug! board does not have PCMCIA bustype\n");
return -EINVAL;
}
- printk("\n");
-
return das08_common_attach(dev, iobase);
}
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index a5ce3b2abe4a..5376e718e3d7 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -384,20 +384,20 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
byte = 0;
/* if we are using external start trigger (also board dislikes having
* both start and conversion triggers external simultaneously) */
- if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT) {
+ if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
byte |= EXT_TRIG_BIT;
- }
+
outb(byte, dev->iobase + DAS16M1_CS);
/* clear interrupt bit */
outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
/* enable interrupts and internal pacer */
devpriv->control_state &= ~PACER_MASK;
- if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->convert_src == TRIG_TIMER)
devpriv->control_state |= INT_PACER;
- } else {
+ else
devpriv->control_state |= EXT_PACER;
- }
+
devpriv->control_state |= INTE;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
@@ -531,9 +531,8 @@ static void munge_sample_array(short *array, unsigned int num_elements)
{
unsigned int i;
- for (i = 0; i < num_elements; i++) {
+ for (i = 0; i < num_elements; i++)
array[i] = munge_sample(array[i]);
- }
}
static void das16m1_handler(struct comedi_device *dev, unsigned int status)
@@ -668,25 +667,20 @@ static int das16m1_attach(struct comedi_device *dev,
iobase = it->options[0];
- printk("comedi%d: das16m1:", dev->minor);
-
ret = alloc_private(dev, sizeof(struct das16m1_private_struct));
if (ret < 0)
return ret;
dev->board_name = thisboard->name;
- printk(" io 0x%lx-0x%lx 0x%lx-0x%lx",
- iobase, iobase + DAS16M1_SIZE,
- iobase + DAS16M1_82C55, iobase + DAS16M1_82C55 + DAS16M1_SIZE2);
if (!request_region(iobase, DAS16M1_SIZE, driver_das16m1.driver_name)) {
- printk(" I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
if (!request_region(iobase + DAS16M1_82C55, DAS16M1_SIZE2,
driver_das16m1.driver_name)) {
release_region(iobase, DAS16M1_SIZE);
- printk(" I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -697,17 +691,17 @@ static int das16m1_attach(struct comedi_device *dev,
if (das16m1_irq_bits(irq) >= 0) {
ret = request_irq(irq, das16m1_interrupt, 0,
driver_das16m1.driver_name, dev);
- if (ret < 0) {
- printk(", irq unavailable\n");
+ if (ret < 0)
return ret;
- }
dev->irq = irq;
- printk(", irq %u\n", irq);
+ printk
+ ("irq %u\n", irq);
} else if (irq == 0) {
- printk(", no irq\n");
+ printk
+ (", no irq\n");
} else {
- printk(", invalid irq\n"
- " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
+ comedi_error(dev, "invalid irq\n"
+ " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
return -EINVAL;
}
@@ -771,7 +765,6 @@ static int das16m1_attach(struct comedi_device *dev,
static int das16m1_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16m1: remove\n", dev->minor);
/* das16m1_reset(dev); */
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index a6df30b7fd7c..99ada5a53b9e 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -573,22 +573,23 @@ static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0,
devpriv->dma_bits |= DMA_CH7_CH5;
break;
default:
- printk(" only supports dma channels 5 through 7\n"
- " Dual dma only allows the following combinations:\n"
- " dma 5,6 / 6,7 / or 7,5\n");
+ dev_err(dev->hw_dev, " only supports dma channels 5 through 7\n"
+ " Dual dma only allows the following combinations:\n"
+ " dma 5,6 / 6,7 / or 7,5\n");
return -EINVAL;
break;
}
if (request_dma(dma0, driver_das1800.driver_name)) {
- printk(" failed to allocate dma channel %i\n", dma0);
+ dev_err(dev->hw_dev, "failed to allocate dma channel %i\n",
+ dma0);
return -EINVAL;
}
devpriv->dma0 = dma0;
devpriv->dma_current = dma0;
if (dma1) {
if (request_dma(dma1, driver_das1800.driver_name)) {
- printk(" failed to allocate dma channel %i\n",
- dma1);
+ dev_err(dev->hw_dev, "failed to allocate dma channel %i\n",
+ dma1);
return -EINVAL;
}
devpriv->dma1 = dma1;
@@ -631,20 +632,20 @@ static int das1800_attach(struct comedi_device *dev,
if (alloc_private(dev, sizeof(struct das1800_private)) < 0)
return -ENOMEM;
- printk("comedi%d: %s: io 0x%lx", dev->minor, driver_das1800.driver_name,
- iobase);
+ printk(KERN_DEBUG "comedi%d: %s: io 0x%lx", dev->minor,
+ driver_das1800.driver_name, iobase);
if (irq) {
- printk(", irq %u", irq);
+ printk(KERN_CONT ", irq %u", irq);
if (dma0) {
- printk(", dma %u", dma0);
+ printk(KERN_CONT ", dma %u", dma0);
if (dma1)
- printk(" and %u", dma1);
+ printk(KERN_CONT " and %u", dma1);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
if (iobase == 0) {
- printk(" io base address required\n");
+ dev_err(dev->hw_dev, "io base address required\n");
return -EINVAL;
}
@@ -659,7 +660,7 @@ static int das1800_attach(struct comedi_device *dev,
board = das1800_probe(dev);
if (board < 0) {
- printk(" unable to determine board type\n");
+ dev_err(dev->hw_dev, "unable to determine board type\n");
return -ENODEV;
}
@@ -683,7 +684,8 @@ static int das1800_attach(struct comedi_device *dev,
if (irq) {
if (request_irq(irq, das1800_interrupt, 0,
driver_das1800.driver_name, dev)) {
- printk(" unable to allocate irq %u\n", irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %u\n",
+ irq);
return -EINVAL;
}
}
@@ -712,7 +714,7 @@ static int das1800_attach(struct comedi_device *dev,
devpriv->irq_dma_bits |= 0x38;
break;
default:
- printk(" irq out of range\n");
+ dev_err(dev->hw_dev, "irq out of range\n");
return -EINVAL;
break;
}
@@ -813,8 +815,8 @@ static int das1800_detach(struct comedi_device *dev)
kfree(devpriv->ai_buf1);
}
- printk("comedi%d: %s: remove\n", dev->minor,
- driver_das1800.driver_name);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: remove\n", dev->minor,
+ driver_das1800.driver_name);
return 0;
};
@@ -833,8 +835,8 @@ static int das1800_probe(struct comedi_device *dev)
case 0x3:
if (board == das1801st_da || board == das1802st_da ||
board == das1701st_da || board == das1702st_da) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -843,8 +845,8 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x4:
if (board == das1802hr_da || board == das1702hr_da) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -854,8 +856,8 @@ static int das1800_probe(struct comedi_device *dev)
case 0x5:
if (board == das1801ao || board == das1802ao ||
board == das1701ao || board == das1702ao) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -864,18 +866,19 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x6:
if (board == das1802hr || board == das1702hr) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
- printk(" Board model (probed, not recommended): das-1802hr\n");
+ printk
+ (" Board model (probed, not recommended): das-1802hr\n");
return das1802hr;
break;
case 0x7:
if (board == das1801st || board == das1802st ||
board == das1701st || board == das1702st) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -884,8 +887,8 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x8:
if (board == das1801hc || board == das1802hc) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 6328f5280b66..f25684145e84 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -171,7 +171,7 @@ static irqreturn_t intr_handler(int irq, void *d)
struct comedi_subdevice *s = dev->subdevices;
if (!dev->attached || devpriv->das6402_ignoreirq) {
- printk("das6402: BUG: spurious interrupt\n");
+ dev_warn(dev->hw_dev, "BUG: spurious interrupt\n");
return IRQ_HANDLED;
}
#ifdef DEBUG
@@ -228,9 +228,7 @@ static int das6402_ai_cancel(struct comedi_device *dev,
*/
devpriv->das6402_ignoreirq = 1;
-#ifdef DEBUG
- printk("das6402: Stopping acquisition\n");
-#endif
+ dev_dbg(dev->hw_dev, "Stopping acquisition\n");
devpriv->das6402_ignoreirq = 1;
outb_p(0x02, dev->iobase + 10); /* disable external trigging */
outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
@@ -246,10 +244,7 @@ static int das6402_ai_mode2(struct comedi_device *dev,
struct comedi_subdevice *s, comedi_trig * it)
{
devpriv->das6402_ignoreirq = 1;
-
-#ifdef DEBUG
- printk("das6402: Starting acquisition\n");
-#endif
+ dev_dbg(dev->hw_dev, "Starting acquisition\n");
outb_p(0x03, dev->iobase + 10); /* enable external trigging */
outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9);
@@ -329,10 +324,8 @@ static int das6402_attach(struct comedi_device *dev,
if (iobase == 0)
iobase = 0x300;
- printk("comedi%d: das6402: 0x%04lx", dev->minor, iobase);
-
if (!request_region(iobase, DAS6402_SIZE, "das6402")) {
- printk(" I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -340,14 +333,12 @@ static int das6402_attach(struct comedi_device *dev,
/* should do a probe here */
irq = it->options[0];
- printk(" ( irq = %u )", irq);
+ dev_dbg(dev->hw_dev, "( irq = %u )\n", irq);
ret = request_irq(irq, intr_handler, 0, "das6402", dev);
- if (ret < 0) {
- printk("irq conflict\n");
+ if (ret < 0)
return ret;
- }
- dev->irq = irq;
+ dev->irq = irq;
ret = alloc_private(dev, sizeof(struct das6402_private));
if (ret < 0)
return ret;
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 96d41ad76956..6e347b40fe61 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -296,47 +296,47 @@ static int das800_probe(struct comedi_device *dev)
switch (id_bits) {
case 0x0:
if (board == das800) {
- printk(" Board model: DAS-800\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-800\n");
return board;
}
if (board == ciodas800) {
- printk(" Board model: CIO-DAS800\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS800\n");
return board;
}
- printk(" Board model (probed): DAS-800\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-800\n");
return das800;
break;
case 0x2:
if (board == das801) {
- printk(" Board model: DAS-801\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-801\n");
return board;
}
if (board == ciodas801) {
- printk(" Board model: CIO-DAS801\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS801\n");
return board;
}
- printk(" Board model (probed): DAS-801\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-801\n");
return das801;
break;
case 0x3:
if (board == das802) {
- printk(" Board model: DAS-802\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-802\n");
return board;
}
if (board == ciodas802) {
- printk(" Board model: CIO-DAS802\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS802\n");
return board;
}
if (board == ciodas80216) {
- printk(" Board model: CIO-DAS802/16\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS802/16\n");
return board;
}
- printk(" Board model (probed): DAS-802\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-802\n");
return das802;
break;
default:
- printk(" Board model: probe returned 0x%x (unknown)\n",
- id_bits);
+ dev_dbg(dev->hw_dev, "Board model: probe returned 0x%x (unknown)\n",
+ id_bits);
return board;
break;
}
@@ -466,42 +466,43 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long irq_flags;
int board;
- printk("comedi%d: das800: io 0x%lx", dev->minor, iobase);
+ dev_info(dev->hw_dev, "comedi%d: das800: io 0x%lx\n", dev->minor,
+ iobase);
if (irq)
- printk(", irq %u", irq);
- printk("\n");
+ dev_dbg(dev->hw_dev, "irq %u\n", irq);
/* allocate and initialize dev->private */
if (alloc_private(dev, sizeof(struct das800_private)) < 0)
return -ENOMEM;
if (iobase == 0) {
- printk("io base address required for das800\n");
+ dev_err(dev->hw_dev, "io base address required for das800\n");
return -EINVAL;
}
/* check if io addresses are available */
if (!request_region(iobase, DAS800_SIZE, "das800")) {
- printk("I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
board = das800_probe(dev);
if (board < 0) {
- printk("unable to determine board type\n");
+ dev_dbg(dev->hw_dev, "unable to determine board type\n");
return -ENODEV;
}
dev->board_ptr = das800_boards + board;
/* grab our IRQ */
if (irq == 1 || irq > 7) {
- printk("irq out of range\n");
+ dev_err(dev->hw_dev, "irq out of range\n");
return -EINVAL;
}
if (irq) {
if (request_irq(irq, das800_interrupt, 0, "das800", dev)) {
- printk("unable to allocate irq %u\n", irq);
+ dev_err(dev->hw_dev, "unable to allocate irq %u\n",
+ irq);
return -EINVAL;
}
}
@@ -557,7 +558,7 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int das800_detach(struct comedi_device *dev)
{
- printk("comedi%d: das800: remove\n", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: das800: remove\n", dev->minor);
/* only free stuff if it has been allocated by _attach */
if (dev->iobase)
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 6170f7bac46e..0a7979e52999 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -352,7 +352,8 @@ static int dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
if ((status & DT3000_COMPLETION_MASK) == DT3000_NOERROR)
return 0;
- printk("dt3k_send_cmd() timeout/error status=0x%04x\n", status);
+ dev_dbg(dev->hw_dev, "dt3k_send_cmd() timeout/error status=0x%04x\n",
+ status);
return -ETIME;
}
@@ -383,7 +384,7 @@ static void dt3k_writesingle(struct comedi_device *dev, unsigned int subsys,
dt3k_send_cmd(dev, CMD_WRITESINGLE);
}
-static int debug_n_ints = 0;
+static int debug_n_ints;
/* FIXME! Assumes shared interrupt is for this card. */
/* What's this debug_n_ints stuff? Obviously needs some work... */
@@ -429,12 +430,12 @@ static char *intr_flags[] = {
static void debug_intr_flags(unsigned int flags)
{
int i;
- printk("dt3k: intr_flags:");
+ printk(KERN_DEBUG "dt3k: intr_flags:");
for (i = 0; i < 8; i++) {
if (flags & (1 << i))
- printk(" %s", intr_flags[i]);
+ printk(KERN_CONT " %s", intr_flags[i]);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
#endif
@@ -452,7 +453,7 @@ static void dt3k_ai_empty_fifo(struct comedi_device *dev,
if (count < 0)
count += AI_FIFO_DEPTH;
- printk("reading %d samples\n", count);
+ dev_dbg(dev->hw_dev, "reading %d samples\n", count);
rear = devpriv->ai_rear;
@@ -640,7 +641,7 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int ret;
unsigned int mode;
- printk("dt3k_ai_cmd:\n");
+ dev_dbg(dev->hw_dev, "dt3k_ai_cmd:\n");
for (i = 0; i < cmd->chanlist_len; i++) {
chan = CR_CHAN(cmd->chanlist[i]);
range = CR_RANGE(cmd->chanlist[i]);
@@ -651,15 +652,15 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
aref = CR_AREF(cmd->chanlist[0]);
writew(cmd->scan_end_arg, devpriv->io_addr + DPR_Params(0));
- printk("param[0]=0x%04x\n", cmd->scan_end_arg);
+ dev_dbg(dev->hw_dev, "param[0]=0x%04x\n", cmd->scan_end_arg);
if (cmd->convert_src == TRIG_TIMER) {
divider = dt3k_ns_to_timer(50, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
writew((divider >> 16), devpriv->io_addr + DPR_Params(1));
- printk("param[1]=0x%04x\n", divider >> 16);
+ dev_dbg(dev->hw_dev, "param[1]=0x%04x\n", divider >> 16);
writew((divider & 0xffff), devpriv->io_addr + DPR_Params(2));
- printk("param[2]=0x%04x\n", divider & 0xffff);
+ dev_dbg(dev->hw_dev, "param[2]=0x%04x\n", divider & 0xffff);
} else {
/* not supported */
}
@@ -668,21 +669,21 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
tscandiv = dt3k_ns_to_timer(100, &cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
writew((tscandiv >> 16), devpriv->io_addr + DPR_Params(3));
- printk("param[3]=0x%04x\n", tscandiv >> 16);
+ dev_dbg(dev->hw_dev, "param[3]=0x%04x\n", tscandiv >> 16);
writew((tscandiv & 0xffff), devpriv->io_addr + DPR_Params(4));
- printk("param[4]=0x%04x\n", tscandiv & 0xffff);
+ dev_dbg(dev->hw_dev, "param[4]=0x%04x\n", tscandiv & 0xffff);
} else {
/* not supported */
}
mode = DT3000_AD_RETRIG_INTERNAL | 0 | 0;
writew(mode, devpriv->io_addr + DPR_Params(5));
- printk("param[5]=0x%04x\n", mode);
+ dev_dbg(dev->hw_dev, "param[5]=0x%04x\n", mode);
writew(aref == AREF_DIFF, devpriv->io_addr + DPR_Params(6));
- printk("param[6]=0x%04x\n", aref == AREF_DIFF);
+ dev_dbg(dev->hw_dev, "param[6]=0x%04x\n", aref == AREF_DIFF);
writew(AI_FIFO_DEPTH / 2, devpriv->io_addr + DPR_Params(7));
- printk("param[7]=0x%04x\n", AI_FIFO_DEPTH / 2);
+ dev_dbg(dev->hw_dev, "param[7]=0x%04x\n", AI_FIFO_DEPTH / 2);
writew(SUBS_AI, devpriv->io_addr + DPR_SubSys);
ret = dt3k_send_cmd(dev, CMD_CONFIG);
@@ -848,7 +849,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int bus, slot;
int ret = 0;
- printk("dt3000:");
+ dev_dbg(dev->hw_dev, "dt3000:\n");
bus = it->options[0];
slot = it->options[1];
@@ -860,7 +861,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
if (ret == 0) {
- printk(" no DT board found\n");
+ dev_warn(dev->hw_dev, "no DT board found\n");
return -ENODEV;
}
@@ -868,7 +869,8 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (request_irq(devpriv->pci_dev->irq, dt3k_interrupt, IRQF_SHARED,
"dt3000", dev)) {
- printk(" unable to allocate IRQ %u\n", devpriv->pci_dev->irq);
+ dev_err(dev->hw_dev, "unable to allocate IRQ %u\n",
+ devpriv->pci_dev->irq);
return -EINVAL;
}
dev->irq = devpriv->pci_dev->irq;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 8d98cf412709..6a79ba10630d 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -71,18 +71,12 @@ static struct comedi_driver driver_jr3_pci = {
};
static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
- {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, jr3_pci_pci_table);
@@ -378,14 +372,14 @@ static int jr3_pci_open(struct comedi_device *dev)
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
- printk("jr3_pci_open\n");
+ dev_dbg(dev->hw_dev, "jr3_pci_open\n");
for (i = 0; i < devpriv->n_channels; i++) {
struct jr3_pci_subdev_private *p;
p = dev->subdevices[i].private;
if (p) {
- printk("serial: %p %d (%d)\n", p, p->serial_no,
- p->channel_no);
+ dev_dbg(dev->hw_dev, "serial: %p %d (%d)\n", p,
+ p->serial_no, p->channel_no);
}
}
return 0;
@@ -463,8 +457,8 @@ static int jr3_download_firmware(struct comedi_device *dev, const u8 * data,
break;
more = more
&& read_idm_word(data, size, &pos, &addr);
- printk("Loading#%d %4.4x bytes at %4.4x\n", i,
- count, addr);
+ dev_dbg(dev->hw_dev, "Loading#%d %4.4x bytes at %4.4x\n",
+ i, count, addr);
while (more && count > 0) {
if (addr & 0x4000) {
/* 16 bit data, never seen in real life!! */
@@ -599,24 +593,24 @@ static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
min_full_scale =
get_min_full_scales(channel);
printk("Obtained Min. Full Scales:\n");
- printk("%i ", (min_full_scale).fx);
- printk("%i ", (min_full_scale).fy);
- printk("%i ", (min_full_scale).fz);
- printk("%i ", (min_full_scale).mx);
- printk("%i ", (min_full_scale).my);
- printk("%i ", (min_full_scale).mz);
- printk("\n");
+ printk(KERN_DEBUG "%i ", (min_full_scale).fx);
+ printk(KERN_CONT "%i ", (min_full_scale).fy);
+ printk(KERN_CONT "%i ", (min_full_scale).fz);
+ printk(KERN_CONT "%i ", (min_full_scale).mx);
+ printk(KERN_CONT "%i ", (min_full_scale).my);
+ printk(KERN_CONT "%i ", (min_full_scale).mz);
+ printk(KERN_CONT "\n");
max_full_scale =
get_max_full_scales(channel);
printk("Obtained Max. Full Scales:\n");
- printk("%i ", (max_full_scale).fx);
- printk("%i ", (max_full_scale).fy);
- printk("%i ", (max_full_scale).fz);
- printk("%i ", (max_full_scale).mx);
- printk("%i ", (max_full_scale).my);
- printk("%i ", (max_full_scale).mz);
- printk("\n");
+ printk(KERN_DEBUG "%i ", (max_full_scale).fx);
+ printk(KERN_CONT "%i ", (max_full_scale).fy);
+ printk(KERN_CONT "%i ", (max_full_scale).fz);
+ printk(KERN_CONT "%i ", (max_full_scale).mx);
+ printk(KERN_CONT "%i ", (max_full_scale).my);
+ printk(KERN_CONT "%i ", (max_full_scale).mz);
+ printk(KERN_CONT "\n");
set_full_scales(channel,
max_full_scale);
@@ -779,14 +773,12 @@ static int jr3_pci_attach(struct comedi_device *dev,
int opt_bus, opt_slot, i;
struct jr3_pci_dev_private *devpriv;
- printk("comedi%d: jr3_pci\n", dev->minor);
-
opt_bus = it->options[0];
opt_slot = it->options[1];
if (sizeof(struct jr3_channel) != 0xc00) {
- printk("sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned)sizeof(struct jr3_channel), 0xc00);
+ dev_err(dev->hw_dev, "sizeof(struct jr3_channel) = %x [expected %x]\n",
+ (unsigned)sizeof(struct jr3_channel), 0xc00);
return -EINVAL;
}
@@ -840,7 +832,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
}
}
if (!card) {
- printk(" no jr3_pci found\n");
+ dev_err(dev->hw_dev, "no jr3_pci found\n");
return -EIO;
} else {
devpriv->pci_dev = card;
@@ -875,10 +867,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
p = dev->subdevices[i].private;
p->channel = &devpriv->iobase->channel[i].data;
- printk("p->channel %p %p (%tx)\n",
- p->channel, devpriv->iobase,
- ((char *)(p->channel) -
- (char *)(devpriv->iobase)));
+ dev_dbg(dev->hw_dev, "p->channel %p %p (%tx)\n",
+ p->channel, devpriv->iobase,
+ ((char *)(p->channel) -
+ (char *)(devpriv->iobase)));
p->channel_no = i;
for (j = 0; j < 8; j++) {
int k;
@@ -916,7 +908,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
devpriv->iobase->channel[0].reset = 0;
result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
- printk("Firmare load %d\n", result);
+ dev_dbg(dev->hw_dev, "Firmare load %d\n", result);
if (result < 0)
goto out;
@@ -934,9 +926,9 @@ static int jr3_pci_attach(struct comedi_device *dev,
*/
msleep_interruptible(25);
for (i = 0; i < 0x18; i++) {
- printk("%c",
- get_u16(&devpriv->iobase->channel[0].
- data.copyright[i]) >> 8);
+ dev_dbg(dev->hw_dev, "%c\n",
+ get_u16(&devpriv->iobase->channel[0].
+ data.copyright[i]) >> 8);
}
/* Start card timer */
@@ -963,7 +955,6 @@ static int jr3_pci_detach(struct comedi_device *dev)
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
- printk("comedi%d: jr3_pci: remove\n", dev->minor);
if (devpriv) {
del_timer_sync(&devpriv->timer);
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 286093bca3fb..4e9e9a078652 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -52,10 +52,8 @@ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it);
static int cnt_detach(struct comedi_device *dev);
static DEFINE_PCI_DEVICE_TABLE(cnt_pci_table) = {
- {
- PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, cnt_pci_table);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index cda4b224b30f..8b812e41c52b 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -188,12 +188,9 @@ static const struct comedi_lrange me2600_ao_range = {
};
static DEFINE_PCI_DEVICE_TABLE(me_pci_table) = {
- {
- PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, me_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 32e675e3f0b9..c25e44c1905e 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -731,9 +731,8 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outw(trigger_bits, dev->iobase + TRIGGER_REG);
/* start acquisition for soft trigger */
- if (cmd->start_src == TRIG_NOW) {
+ if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
- }
#ifdef A2150_DEBUG
ni_dump_regs(dev);
#endif
@@ -860,11 +859,10 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
case TRIG_ROUND_NEAREST:
default:
/* if least upper bound is better approximation */
- if (lub - *period < *period - glb) {
+ if (lub - *period < *period - glb)
*period = lub;
- } else {
+ else
*period = glb;
- }
break;
case TRIG_ROUND_UP:
*period = lub;
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 49b824c7bd2e..c0423a8c3e36 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -52,7 +52,7 @@ the PCMCIA interface.
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev = NULL;
+static struct pcmcia_device *pcmcia_cur_dev;
#define DIO24_SIZE 4 /* size of io region used by board */
@@ -133,22 +133,19 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
break;
default:
- printk("bug! couldn't determine board type\n");
+ pr_err("bug! couldn't determine board type\n");
return -EINVAL;
break;
}
- printk("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
- thisboard->name, iobase);
+ pr_debug("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
+ thisboard->name, iobase);
#ifdef incomplete
- if (irq) {
- printk(", irq %u", irq);
- }
+ if (irq)
+ pr_debug("irq %u\n", irq);
#endif
- printk("\n");
-
if (iobase == 0) {
- printk("io base address is zero!\n");
+ pr_err("io base address is zero!\n");
return -EINVAL;
}
@@ -173,7 +170,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dio24_detach(struct comedi_device *dev)
{
- printk("comedi%d: ni_daq_dio24: remove\n", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: ni_daq_dio24: remove\n", dev->minor);
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 0);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 832a5178b638..ff3840544dd4 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -145,7 +145,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = link->irq;
break;
default:
- printk("bug! couldn't determine board type\n");
+ pr_err("bug! couldn't determine board type\n");
return -EINVAL;
break;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 9148abdad074..0f0d995f137c 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1470,7 +1470,7 @@ static void m_series_stc_writew(struct comedi_device *dev, uint16_t data,
/* FIXME: DIO_Output_Register (16 bit reg) is replaced by M_Offset_Static_Digital_Output (32 bit)
and M_Offset_SCXI_Serial_Data_Out (8 bit) */
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
@@ -1505,7 +1505,7 @@ static uint16_t m_series_stc_readw(struct comedi_device *dev, int reg)
offset = M_Offset_G01_Status;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -1547,7 +1547,7 @@ static void m_series_stc_writel(struct comedi_device *dev, uint32_t data,
offset = M_Offset_G1_Load_B;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
@@ -1573,7 +1573,7 @@ static uint32_t m_series_stc_readl(struct comedi_device *dev, int reg)
offset = M_Offset_G1_Save;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -1632,9 +1632,8 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
}
devpriv->serial_number = be32_to_cpu(devpriv->serial_number);
- for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i) {
+ for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
- }
writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
@@ -1665,9 +1664,9 @@ static void init_6143(struct comedi_device *dev)
static int pcimio_detach(struct comedi_device *dev)
{
mio_common_detach(dev);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (dev->private) {
mite_free_ring(devpriv->ai_mite_ring);
mite_free_ring(devpriv->ao_mite_ring);
@@ -1685,7 +1684,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
- printk("comedi%d: ni_pcimio:", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: ni_pcimio:\n", dev->minor);
ret = ni_alloc_private(dev);
if (ret < 0)
@@ -1695,7 +1694,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
- printk(" %s", boardtype.name);
+ dev_dbg(dev->hw_dev, "%s\n", boardtype.name);
dev->board_name = boardtype.name;
if (boardtype.reg_type & ni_reg_m_series_mask) {
@@ -1712,7 +1711,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = mite_setup(devpriv->mite);
if (ret < 0) {
- printk(" error setting up mite\n");
+ pr_warn("error setting up mite\n");
return ret;
}
comedi_set_hw_dev(dev, &devpriv->mite->pcidev->dev);
@@ -1740,13 +1739,13 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->irq = mite_irq(devpriv->mite);
if (dev->irq == 0) {
- printk(" unknown irq (bad)\n");
+ pr_warn("unknown irq (bad)\n");
} else {
- printk(" ( irq = %u )", dev->irq);
+ pr_debug("( irq = %u )\n", dev->irq);
ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
DRV_NAME, dev);
if (ret < 0) {
- printk(" irq not available\n");
+ pr_warn("irq not available\n");
dev->irq = 0;
}
}
@@ -1787,7 +1786,7 @@ static int pcimio_find_device(struct comedi_device *dev, int bus, int slot)
}
}
}
- printk("no device found\n");
+ pr_warn("no device found\n");
mite_list_devices();
return -EIO;
}
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 0b9bee36eb5f..96cd7ec2ad53 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -155,8 +155,8 @@ static int pcl816_attach(struct comedi_device *dev,
static int pcl816_detach(struct comedi_device *dev);
#ifdef unused
-static int RTC_lock = 0; /* RTC lock */
-static int RTC_timer_lock = 0; /* RTC int lock */
+static int RTC_lock; /* RTC lock */
+static int RTC_timer_lock; /* RTC int lock */
#endif
static struct comedi_driver driver_pcl816 = {
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index b45a9bd8b489..7344a53a81c4 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -252,8 +252,8 @@ static int pcl818_attach(struct comedi_device *dev,
static int pcl818_detach(struct comedi_device *dev);
#ifdef unused
-static int RTC_lock = 0; /* RTC lock */
-static int RTC_timer_lock = 0; /* RTC int lock */
+static int RTC_lock; /* RTC lock */
+static int RTC_timer_lock; /* RTC int lock */
#endif
struct pcl818_board {
@@ -463,9 +463,8 @@ static int pcl818_ao_insn_read(struct comedi_device *dev,
int n;
int chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++) {
+ for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_readback[chan];
- }
return n;
}
@@ -571,9 +570,9 @@ conv_finish:
return IRQ_HANDLED;
}
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
/* printk("E"); */
@@ -645,9 +644,9 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
comedi_buf_put(s->async, ptr[bufptr++] >> 4); /* get one sample */
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
@@ -805,11 +804,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
return IRQ_HANDLED;
}
- if (lo & 2) {
+ if (lo & 2)
len = 512;
- } else {
+ else
len = 0;
- }
for (i = 0; i < len; i++) {
lo = inb(dev->iobase + PCL818_FI_DATALO);
@@ -827,9 +825,9 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
comedi_buf_put(s->async, (lo >> 4) | (inb(dev->iobase + PCL818_FI_DATAHI) << 4)); /* get one sample */
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
@@ -1009,7 +1007,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
int divisor1 = 0, divisor2 = 0;
unsigned int seglen;
- printk("pcl818_ai_cmd_mode()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd_mode()\n");
if ((!dev->irq) && (!devpriv->dma_rtc)) {
comedi_error(dev, "IRQ not defined!");
return -EINVAL;
@@ -1112,7 +1110,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
break;
}
#endif
- printk("pcl818_ai_cmd_mode() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd_mode() end\n");
return 0;
}
@@ -1309,11 +1307,9 @@ static void setup_channel_list(struct comedi_device *dev,
*/
static int check_single_ended(unsigned int port)
{
- if (inb(port + PCL818_STATUS) & 0x20) {
+ if (inb(port + PCL818_STATUS) & 0x20)
return 1;
- } else {
- return 0;
- }
+ return 0;
}
/*
@@ -1352,9 +1348,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
- if (err) {
+ if (err)
return 1;
- }
/* step 2: make sure trigger sources are unique and mutually compatible */
@@ -1377,9 +1372,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
- if (err) {
+ if (err)
return 2;
- }
/* step 3: make sure arguments are trivially compatible */
@@ -1421,9 +1415,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
}
}
- if (err) {
+ if (err)
return 3;
- }
/* step 4: fix up any arguments */
@@ -1438,9 +1431,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
err++;
}
- if (err) {
+ if (err)
return 4;
- }
/* step 5: complain about special chanlist considerations */
@@ -1461,7 +1453,7 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
int retval;
- printk("pcl818_ai_cmd()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd()\n");
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
@@ -1470,17 +1462,16 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
- if (cmd->stop_src == TRIG_COUNT) {
+ if (cmd->stop_src == TRIG_COUNT)
devpriv->ai_scans = cmd->stop_arg;
- } else {
+ else
devpriv->ai_scans = 0;
- }
if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 3 */
if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
devpriv->ai_timer1 = cmd->convert_arg;
retval = pcl818_ai_cmd_mode(1, dev, s);
- printk("pcl818_ai_cmd() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd() end\n");
return retval;
}
if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
@@ -1499,7 +1490,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
if (devpriv->irq_blocked > 0) {
- printk("pcl818_ai_cancel()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cancel()\n");
devpriv->irq_was_now_closed = 1;
switch (devpriv->ai_mode) {
@@ -1549,7 +1540,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
}
end:
- printk("pcl818_ai_cancel() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cancel() end\n");
return 0;
}
@@ -1633,11 +1624,11 @@ static int set_rtc_irq_bit(unsigned char bit)
save_flags(flags);
cli();
val = CMOS_READ(RTC_CONTROL);
- if (bit) {
+ if (bit)
val |= RTC_PIE;
- } else {
+ else
val &= ~RTC_PIE;
- }
+
CMOS_WRITE(val, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
restore_flags(flags);
@@ -1754,22 +1745,23 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* claim our I/O space */
iobase = it->options[0];
- printk("comedi%d: pcl818: board=%s, ioport=0x%03lx",
- dev->minor, this_board->name, iobase);
+ printk
+ ("comedi%d: pcl818: board=%s, ioport=0x%03lx",
+ dev->minor, this_board->name, iobase);
devpriv->io_range = this_board->io_range;
if ((this_board->fifo) && (it->options[2] == -1)) { /* we've board with FIFO and we want to use FIFO */
devpriv->io_range = PCLx1xFIFO_RANGE;
devpriv->usefifo = 1;
}
if (!request_region(iobase, devpriv->io_range, "pcl818")) {
- printk("I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
if (pcl818_check(iobase)) {
- printk(", I can't detect board. FAIL!\n");
+ comedi_error(dev, "I can't detect board. FAIL!\n");
return -EIO;
}
@@ -1793,19 +1785,18 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ printk(KERN_DEBUG "irq=%u", irq);
}
}
}
}
dev->irq = irq;
- if (irq) {
- devpriv->irq_free = 1;
- } /* 1=we have allocated irq */
- else {
+ if (irq)
+ devpriv->irq_free = 1; /* 1=we have allocated irq */
+ else
devpriv->irq_free = 0;
- }
+
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->ai_mode = 0; /* mode of irq */
@@ -1825,7 +1816,7 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
"pcl818 DMA (RTC)", dev)) {
devpriv->dma_rtc = 1;
devpriv->rtc_irq = RTC_IRQ;
- printk(", dma_irq=%u", devpriv->rtc_irq);
+ printk(KERN_DEBUG "dma_irq=%u", devpriv->rtc_irq);
} else {
RTC_lock--;
if (RTC_lock == 0) {
@@ -1850,34 +1841,26 @@ no_rtc:
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & this_board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ printk(KERN_ERR "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, "pcl818");
- if (ret) {
- printk(", unable to allocate DMA %u, FAIL!\n", dma);
+ if (ret)
return -EBUSY; /* DMA isn't free */
- }
devpriv->dma = dma;
- printk(", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ if (!devpriv->dmabuf[0])
/* maybe experiment with try_to_free_pages() will help .... */
return -EBUSY; /* no buffer :-( */
- }
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
/* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- printk
- (", unable to allocate DMA buffer, FAIL!\n");
+ if (!devpriv->dmabuf[1])
return -EBUSY;
- }
devpriv->dmapages[1] = pages;
devpriv->hwdmaptr[1] =
virt_to_bus((void *)devpriv->dmabuf[1]);
@@ -2017,11 +2000,10 @@ no_dma:
}
/* select 1/10MHz oscilator */
- if ((it->options[3] == 0) || (it->options[3] == 10)) {
+ if ((it->options[3] == 0) || (it->options[3] == 10))
devpriv->i8253_osc_base = 100;
- } else {
+ else
devpriv->i8253_osc_base = 1000;
- }
/* max sampling speed */
devpriv->ns_min = this_board->ns_min;
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 3ad04aaa1e3c..eddac00e4e29 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -371,15 +371,15 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = it->options[0];
irq[0] = it->options[1];
- printk(KERN_INFO "comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
- iobase);
+ printk(KERN_INFO "comedi%d: %s: io: %lx attaching...\n", dev->minor,
+ driver.driver_name, iobase);
dev->iobase = iobase;
if (!iobase || !request_region(iobase,
thisboard->total_iosize,
driver.driver_name)) {
- printk(KERN_ERR "I/O port conflict\n");
+ printk(KERN_ERR "comedi%d: I/O port conflict\n", dev->minor);
return -EIO;
}
@@ -394,7 +394,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmmio_private)) < 0) {
- printk(KERN_ERR "cannot allocate private data structure\n");
+ printk(KERN_ERR "comedi%d: cannot allocate private data structure\n",
+ dev->minor);
return -ENOMEM;
}
@@ -417,7 +418,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk(KERN_ERR "cannot allocate subdevice private data structures\n");
+ printk(KERN_ERR "comedi%d: cannot allocate subdevice private data structures\n",
+ dev->minor);
return -ENOMEM;
}
/*
@@ -427,7 +429,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* Allocate 1 AI + 1 AO + 2 DIO subdevs (24 lines per DIO)
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk(KERN_ERR "cannot allocate subdevice data structures\n");
+ printk(KERN_ERR "comedi%d: cannot allocate subdevice data structures\n",
+ dev->minor);
return -ENOMEM;
}
@@ -557,14 +560,15 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
*/
if (irq[0]) {
- printk(KERN_DEBUG "irq: %u ", irq[0]);
+ printk(KERN_DEBUG "comedi%d: irq: %u\n", dev->minor, irq[0]);
if (thisboard->dio_num_asics == 2 && irq[1])
- printk(KERN_DEBUG "second ASIC irq: %u ", irq[1]);
+ printk(KERN_DEBUG "comedi%d: second ASIC irq: %u\n",
+ dev->minor, irq[1]);
} else {
- printk(KERN_INFO "(IRQ mode disabled) ");
+ printk(KERN_INFO "comedi%d: (IRQ mode disabled)\n", dev->minor);
}
- printk(KERN_INFO "attached\n");
+ printk(KERN_INFO "comedi%d: attached\n", dev->minor);
return 1;
}
@@ -663,7 +667,7 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte);
+ printk("data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index b2c2c8971a32..661ba2e03892 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -295,15 +295,15 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq[0] = it->options[1];
irq[1] = it->options[2];
- printk("comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
- iobase);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: io: %lx attached\n", dev->minor,
+ driver.driver_name, iobase);
dev->iobase = iobase;
if (!iobase || !request_region(iobase,
thisboard->num_asics * ASIC_IOSIZE,
driver.driver_name)) {
- printk("I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
@@ -318,7 +318,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmuio_private)) < 0) {
- printk("cannot allocate private data structure\n");
+ dev_warn(dev->hw_dev, "cannot allocate private data structure\n");
return -ENOMEM;
}
@@ -337,7 +337,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmuio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk("cannot allocate subdevice private data structures\n");
+ dev_warn(dev->hw_dev, "cannot allocate subdevice private data structures\n");
return -ENOMEM;
}
/*
@@ -348,7 +348,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* 96-channel version of the board.
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk("cannot allocate subdevice data structures\n");
+ dev_dbg(dev->hw_dev, "cannot allocate subdevice data structures\n");
return -ENOMEM;
}
@@ -436,14 +436,13 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irqs.. */
if (irq[0]) {
- printk("irq: %u ", irq[0]);
+ dev_dbg(dev->hw_dev, "irq: %u\n", irq[0]);
if (irq[1] && thisboard->num_asics == 2)
- printk("second ASIC irq: %u ", irq[1]);
+ dev_dbg(dev->hw_dev, "second ASIC irq: %u\n", irq[1]);
} else {
- printk("(IRQ mode disabled) ");
+ dev_dbg(dev->hw_dev, "(IRQ mode disabled)\n");
}
- printk("attached\n");
return 1;
}
@@ -460,7 +459,8 @@ static int pcmuio_detach(struct comedi_device *dev)
{
int i;
- printk("comedi%d: %s: remove\n", dev->minor, driver.driver_name);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: remove\n", dev->minor,
+ driver.driver_name);
if (dev->iobase)
release_region(dev->iobase, ASIC_IOSIZE * thisboard->num_asics);
@@ -501,7 +501,8 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("write mask: %08x data: %08x\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "write mask: %08x data: %08x\n", data[0],
+ data[1]);
#endif
s->state = 0;
@@ -537,7 +538,7 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("data_out_byte %02x\n", (unsigned)byte);
+ dev_dbg(dev->hw_dev, "data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
@@ -548,7 +549,8 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("s->state %08x data_out %08x\n", s->state, data[1]);
+ dev_dbg(dev->hw_dev, "s->state %08x data_out %08x\n", s->state,
+ data[1]);
#endif
return 2;
@@ -951,14 +953,13 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
spin_lock_irqsave(&subpriv->intr.spinlock, flags);
s->async->inttrig = 0;
- if (subpriv->intr.active) {
+ if (subpriv->intr.active)
event = pcmuio_start_intr(dev, s);
- }
+
spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- if (event) {
+ if (event)
comedi_event(dev, s);
- }
return 1;
}
@@ -1000,9 +1001,8 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- if (event) {
+ if (event)
comedi_event(dev, s);
- }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index ade2202b6231..d880c2f6fbc1 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -824,7 +824,7 @@ static int serial2002_attach(struct comedi_device *dev,
{
struct comedi_subdevice *s;
- printk("comedi%d: serial2002: ", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: attached\n", dev->minor);
dev->board_name = thisboard->name;
if (alloc_private(dev, sizeof(struct serial2002_private)) < 0)
return -ENOMEM;
@@ -832,7 +832,8 @@ static int serial2002_attach(struct comedi_device *dev,
dev->close = serial_2002_close;
devpriv->port = it->options[0];
devpriv->speed = it->options[1];
- printk("/dev/ttyS%d @ %d\n", devpriv->port, devpriv->speed);
+ dev_dbg(dev->hw_dev, "/dev/ttyS%d @ %d\n", devpriv->port,
+ devpriv->speed);
if (alloc_subdevices(dev, 5) < 0)
return -ENOMEM;
@@ -891,7 +892,7 @@ static int serial2002_detach(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
- printk("comedi%d: serial2002: remove\n", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: remove\n", dev->minor);
for (i = 0; i < 5; i++) {
s = &dev->subdevices[i];
kfree(s->maxdata_list);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a91733..ca6bcf8b0231 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbduxsigma.o)
Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
Status: testing
*/
/*
@@ -44,6 +44,7 @@ Status: testing
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
+ * 0.6: corrected wrong input range
*/
/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 1, {
BIP_RANGE
- (2.65)
+ (2.65/2.0)
}
};
@@ -1523,15 +1524,17 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
return -EFAULT;
down(&this_usbduxsub->sem);
+
if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
if (trignum != 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_inttrig: invalid trignum\n",
dev->minor);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (!(this_usbduxsub->ao_cmd_running)) {
this_usbduxsub->ao_cmd_running = 1;
@@ -1541,8 +1544,7 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
"comedi%d: usbdux_ao_inttrig: submitURB: "
"err=%d\n", dev->minor, ret);
this_usbduxsub->ao_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ goto out;
}
s->async->inttrig = NULL;
} else {
@@ -1550,8 +1552,10 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
"comedi%d: ao_inttrig but acqu is already running.\n",
dev->minor);
}
+ ret = 1;
+out:
up(&this_usbduxsub->sem);
- return 1;
+ return ret;
}
static int usbdux_ao_cmdtest(struct comedi_device *dev,
@@ -2688,6 +2692,7 @@ static int usbduxsigma_attach(struct comedi_device *dev,
if (ret < 0) {
dev_err(&udev->interface->dev,
"comedi%d: no space for subdev\n", dev->minor);
+ up(&udev->sem);
up(&start_stop_sem);
return ret;
}
diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h
index fde5b0627f6b..8cd51a7aad8e 100644
--- a/drivers/staging/crystalhd/bc_dts_defs.h
+++ b/drivers/staging/crystalhd/bc_dts_defs.h
@@ -296,43 +296,79 @@ enum {
vdecColourPrimariesSMPTE240M,
vdecColourPrimariesGenericFilm,
};
-
+/**
+ * @vdecRESOLUTION_CUSTOM: custom
+ * @vdecRESOLUTION_480i: 480i
+ * @vdecRESOLUTION_1080i: 1080i (1920x1080, 60i)
+ * @vdecRESOLUTION_NTSC: NTSC (720x483, 60i)
+ * @vdecRESOLUTION_480p: 480p (720x480, 60p)
+ * @vdecRESOLUTION_720p: 720p (1280x720, 60p)
+ * @vdecRESOLUTION_PAL1: PAL_1 (720x576, 50i)
+ * @vdecRESOLUTION_1080i25: 1080i25 (1920x1080, 50i)
+ * @vdecRESOLUTION_720p50: 720p50 (1280x720, 50p)
+ * @vdecRESOLUTION_576p: 576p (720x576, 50p)
+ * @vdecRESOLUTION_1080i29_97: 1080i (1920x1080, 59.94i)
+ * @vdecRESOLUTION_720p59_94: 720p (1280x720, 59.94p)
+ * @vdecRESOLUTION_SD_DVD: SD DVD (720x483, 60i)
+ * @vdecRESOLUTION_480p656: 480p (720x480, 60p),
+ * output bus width 8 bit, clock 74.25MHz
+ * @vdecRESOLUTION_1080p23_976: 1080p23_976 (1920x1080, 23.976p)
+ * @vdecRESOLUTION_720p23_976: 720p23_976 (1280x720p, 23.976p)
+ * @vdecRESOLUTION_240p29_97: 240p (1440x240, 29.97p )
+ * @vdecRESOLUTION_240p30: 240p (1440x240, 30p)
+ * @vdecRESOLUTION_288p25: 288p (1440x288p, 25p)
+ * @vdecRESOLUTION_1080p29_97: 1080p29_97 (1920x1080, 29.97p)
+ * @vdecRESOLUTION_1080p30: 1080p30 (1920x1080, 30p)
+ * @vdecRESOLUTION_1080p24: 1080p24 (1920x1080, 24p)
+ * @vdecRESOLUTION_1080p25: 1080p25 (1920x1080, 25p)
+ * @vdecRESOLUTION_720p24: 720p24 (1280x720, 25p)
+ * @vdecRESOLUTION_720p29_97: 720p29.97 (1280x720, 29.97p)
+ * @vdecRESOLUTION_480p23_976: 480p23.976 (720*480, 23.976)
+ * @vdecRESOLUTION_480p29_97: 480p29.976 (720*480, 29.97p)
+ * @vdecRESOLUTION_576p25: 576p25 (720*576, 25p)
+ * @vdecRESOLUTION_480p0: 480p (720x480, 0p)
+ * @vdecRESOLUTION_480i0: 480i (720x480, 0i)
+ * @vdecRESOLUTION_576p0: 576p (720x576, 0p)
+ * @vdecRESOLUTION_720p0: 720p (1280x720, 0p)
+ * @vdecRESOLUTION_1080p0: 1080p (1920x1080, 0p)
+ * @vdecRESOLUTION_1080i0: 1080i (1920x1080, 0i)
+ */
enum {
- vdecRESOLUTION_CUSTOM = 0x00000000, /* custom */
- vdecRESOLUTION_480i = 0x00000001, /* 480i */
- vdecRESOLUTION_1080i = 0x00000002, /* 1080i (1920x1080, 60i) */
- vdecRESOLUTION_NTSC = 0x00000003, /* NTSC (720x483, 60i) */
- vdecRESOLUTION_480p = 0x00000004, /* 480p (720x480, 60p) */
- vdecRESOLUTION_720p = 0x00000005, /* 720p (1280x720, 60p) */
- vdecRESOLUTION_PAL1 = 0x00000006, /* PAL_1 (720x576, 50i) */
- vdecRESOLUTION_1080i25 = 0x00000007, /* 1080i25 (1920x1080, 50i) */
- vdecRESOLUTION_720p50 = 0x00000008, /* 720p50 (1280x720, 50p) */
- vdecRESOLUTION_576p = 0x00000009, /* 576p (720x576, 50p) */
- vdecRESOLUTION_1080i29_97 = 0x0000000A, /* 1080i (1920x1080, 59.94i) */
- vdecRESOLUTION_720p59_94 = 0x0000000B, /* 720p (1280x720, 59.94p) */
- vdecRESOLUTION_SD_DVD = 0x0000000C, /* SD DVD (720x483, 60i) */
- vdecRESOLUTION_480p656 = 0x0000000D, /* 480p (720x480, 60p), output bus width 8 bit, clock 74.25MHz */
- vdecRESOLUTION_1080p23_976 = 0x0000000E, /* 1080p23_976 (1920x1080, 23.976p) */
- vdecRESOLUTION_720p23_976 = 0x0000000F, /* 720p23_976 (1280x720p, 23.976p) */
- vdecRESOLUTION_240p29_97 = 0x00000010, /* 240p (1440x240, 29.97p ) */
- vdecRESOLUTION_240p30 = 0x00000011, /* 240p (1440x240, 30p) */
- vdecRESOLUTION_288p25 = 0x00000012, /* 288p (1440x288p, 25p) */
- vdecRESOLUTION_1080p29_97 = 0x00000013, /* 1080p29_97 (1920x1080, 29.97p) */
- vdecRESOLUTION_1080p30 = 0x00000014, /* 1080p30 (1920x1080, 30p) */
- vdecRESOLUTION_1080p24 = 0x00000015, /* 1080p24 (1920x1080, 24p) */
- vdecRESOLUTION_1080p25 = 0x00000016, /* 1080p25 (1920x1080, 25p) */
- vdecRESOLUTION_720p24 = 0x00000017, /* 720p24 (1280x720, 25p) */
- vdecRESOLUTION_720p29_97 = 0x00000018, /* 720p29.97 (1280x720, 29.97p) */
- vdecRESOLUTION_480p23_976 = 0x00000019, /* 480p23.976 (720*480, 23.976) */
- vdecRESOLUTION_480p29_97 = 0x0000001A, /* 480p29.976 (720*480, 29.97p) */
- vdecRESOLUTION_576p25 = 0x0000001B, /* 576p25 (720*576, 25p) */
+ vdecRESOLUTION_CUSTOM = 0x00000000,
+ vdecRESOLUTION_480i = 0x00000001,
+ vdecRESOLUTION_1080i = 0x00000002,
+ vdecRESOLUTION_NTSC = 0x00000003,
+ vdecRESOLUTION_480p = 0x00000004,
+ vdecRESOLUTION_720p = 0x00000005,
+ vdecRESOLUTION_PAL1 = 0x00000006,
+ vdecRESOLUTION_1080i25 = 0x00000007,
+ vdecRESOLUTION_720p50 = 0x00000008,
+ vdecRESOLUTION_576p = 0x00000009,
+ vdecRESOLUTION_1080i29_97 = 0x0000000A,
+ vdecRESOLUTION_720p59_94 = 0x0000000B,
+ vdecRESOLUTION_SD_DVD = 0x0000000C,
+ vdecRESOLUTION_480p656 = 0x0000000D,
+ vdecRESOLUTION_1080p23_976 = 0x0000000E,
+ vdecRESOLUTION_720p23_976 = 0x0000000F,
+ vdecRESOLUTION_240p29_97 = 0x00000010,
+ vdecRESOLUTION_240p30 = 0x00000011,
+ vdecRESOLUTION_288p25 = 0x00000012,
+ vdecRESOLUTION_1080p29_97 = 0x00000013,
+ vdecRESOLUTION_1080p30 = 0x00000014,
+ vdecRESOLUTION_1080p24 = 0x00000015,
+ vdecRESOLUTION_1080p25 = 0x00000016,
+ vdecRESOLUTION_720p24 = 0x00000017,
+ vdecRESOLUTION_720p29_97 = 0x00000018,
+ vdecRESOLUTION_480p23_976 = 0x00000019,
+ vdecRESOLUTION_480p29_97 = 0x0000001A,
+ vdecRESOLUTION_576p25 = 0x0000001B,
/* For Zero Frame Rate */
- vdecRESOLUTION_480p0 = 0x0000001C, /* 480p (720x480, 0p) */
- vdecRESOLUTION_480i0 = 0x0000001D, /* 480i (720x480, 0i) */
- vdecRESOLUTION_576p0 = 0x0000001E, /* 576p (720x576, 0p) */
- vdecRESOLUTION_720p0 = 0x0000001F, /* 720p (1280x720, 0p) */
- vdecRESOLUTION_1080p0 = 0x00000020, /* 1080p (1920x1080, 0p) */
- vdecRESOLUTION_1080i0 = 0x00000021, /* 1080i (1920x1080, 0i) */
+ vdecRESOLUTION_480p0 = 0x0000001C,
+ vdecRESOLUTION_480i0 = 0x0000001D,
+ vdecRESOLUTION_576p0 = 0x0000001E,
+ vdecRESOLUTION_720p0 = 0x0000001F,
+ vdecRESOLUTION_1080p0 = 0x00000020,
+ vdecRESOLUTION_1080i0 = 0x00000021,
};
/* Bit definitions for 'flags' field */
@@ -359,14 +395,23 @@ enum _BC_OUTPUT_FORMAT {
MODE422_YUY2 = 0x1,
MODE422_UYVY = 0x2,
};
-
+/**
+ * struct BC_PIC_INFO_BLOCK
+ * @timeStam;: Timestamp
+ * @picture_number: Ordinal display number
+ * @width: pixels
+ * @height: pixels
+ * @chroma_format: 0x420, 0x422 or 0x444
+ * @n_drop;: number of non-reference frames
+ * remaining to be dropped
+ */
struct BC_PIC_INFO_BLOCK {
/* Common fields. */
- uint64_t timeStamp; /* Timestamp */
- uint32_t picture_number; /* Ordinal display number */
- uint32_t width; /* pixels */
- uint32_t height; /* pixels */
- uint32_t chroma_format; /* 0x420, 0x422 or 0x444 */
+ uint64_t timeStamp;
+ uint32_t picture_number;
+ uint32_t width;
+ uint32_t height;
+ uint32_t chroma_format;
uint32_t pulldown;
uint32_t flags;
uint32_t frame_rate;
@@ -376,7 +421,8 @@ struct BC_PIC_INFO_BLOCK {
uint32_t sess_num;
uint32_t ycom;
uint32_t custom_aspect_ratio_width_height;
- uint32_t n_drop; /* number of non-reference frames remaining to be dropped */
+ uint32_t n_drop; /* number of non-reference frames
+ remaining to be dropped */
/* Protocol-specific extensions. */
union {
@@ -390,43 +436,71 @@ struct BC_PIC_INFO_BLOCK {
/*------------------------------------------------------*
* ProcOut Info *
*------------------------------------------------------*/
-/* Optional flags for ProcOut Interface.*/
+
+/**
+ * enum POUT_OPTIONAL_IN_FLAGS - Optional flags for ProcOut Interface.
+ * @BC_POUT_FLAGS_YV12: Copy Data in YV12 format
+ * @BC_POUT_FLAGS_STRIDE: Stride size is valid.
+ * @BC_POUT_FLAGS_SIZE: Take size information from Application
+ * @BC_POUT_FLAGS_INTERLACED: copy only half the bytes
+ * @BC_POUT_FLAGS_INTERLEAVED: interleaved frame
+ * @: * @BC_POUT_FLAGS_FMT_CHANGE: Data is not VALID when this flag is set
+ * @BC_POUT_FLAGS_PIB_VALID: PIB Information valid
+ * @BC_POUT_FLAGS_ENCRYPTED: Data is encrypted.
+ * @BC_POUT_FLAGS_FLD_BOT: Bottom Field data
+ */
enum POUT_OPTIONAL_IN_FLAGS_ {
/* Flags from App to Device */
- BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */
- BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */
- BC_POUT_FLAGS_SIZE = 0x04, /* Take size information from Application */
- BC_POUT_FLAGS_INTERLACED = 0x08, /* copy only half the bytes */
- BC_POUT_FLAGS_INTERLEAVED = 0x10, /* interleaved frame */
+ BC_POUT_FLAGS_YV12 = 0x01,
+ BC_POUT_FLAGS_STRIDE = 0x02,
+ BC_POUT_FLAGS_SIZE = 0x04,
+ BC_POUT_FLAGS_INTERLACED = 0x08,
+ BC_POUT_FLAGS_INTERLEAVED = 0x10,
/* Flags from Device to APP */
- BC_POUT_FLAGS_FMT_CHANGE = 0x10000, /* Data is not VALID when this flag is set */
- BC_POUT_FLAGS_PIB_VALID = 0x20000, /* PIB Information valid */
- BC_POUT_FLAGS_ENCRYPTED = 0x40000, /* Data is encrypted. */
- BC_POUT_FLAGS_FLD_BOT = 0x80000, /* Bottom Field data */
+ BC_POUT_FLAGS_FMT_CHANGE = 0x10000,
+ BC_POUT_FLAGS_PIB_VALID = 0x20000,
+ BC_POUT_FLAGS_ENCRYPTED = 0x40000,
+ BC_POUT_FLAGS_FLD_BOT = 0x80000,
};
-typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut);
+typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width,
+ uint32_t height, uint32_t stride, void *pOut);
/* Line 21 Closed Caption */
/* User Data */
#define MAX_UD_SIZE 1792 /* 1920 - 128 */
+/**
+ * struct BC_DTS_PROC_OUT
+ * @Ybuff: Caller Supplied buffer for Y data
+ * @YbuffSz: Caller Supplied Y buffer size
+ * @YBuffDoneSz: Transferred Y datasize
+ * @*UVbuff: Caller Supplied buffer for UV data
+ * @UVbuffSz: Caller Supplied UV buffer size
+ * @UVBuffDoneSz: Transferred UV data size
+ * @StrideSz: Caller supplied Stride Size
+ * @PoutFlags: Call IN Flags
+ * @discCnt: Picture discontinuity count
+ * @PicInfo: Picture Information Block Data
+ * @b422Mode: Picture output Mode
+ * @bPibEnc: PIB encrypted
+ */
struct BC_DTS_PROC_OUT {
- uint8_t *Ybuff; /* Caller Supplied buffer for Y data */
- uint32_t YbuffSz; /* Caller Supplied Y buffer size */
- uint32_t YBuffDoneSz; /* Transferred Y datasize */
+ uint8_t *Ybuff;
+ uint32_t YbuffSz;
+ uint32_t YBuffDoneSz;
- uint8_t *UVbuff; /* Caller Supplied buffer for UV data */
- uint32_t UVbuffSz; /* Caller Supplied UV buffer size */
- uint32_t UVBuffDoneSz; /* Transferred UV data size */
+ uint8_t *UVbuff;
+ uint32_t UVbuffSz;
+ uint32_t UVBuffDoneSz;
- uint32_t StrideSz; /* Caller supplied Stride Size */
- uint32_t PoutFlags; /* Call IN Flags */
+ uint32_t StrideSz;
+ uint32_t PoutFlags;
- uint32_t discCnt; /* Picture discontinuity count */
+ uint32_t discCnt;
- struct BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */
+ struct BC_PIC_INFO_BLOCK PicInfo;
/* Line 21 Closed Caption */
/* User Data */
@@ -436,39 +510,47 @@ struct BC_DTS_PROC_OUT {
void *hnd;
dts_pout_callback AppCallBack;
uint8_t DropFrames;
- uint8_t b422Mode; /* Picture output Mode */
- uint8_t bPibEnc; /* PIB encrypted */
+ uint8_t b422Mode;
+ uint8_t bPibEnc;
uint8_t bRevertScramble;
};
-
+/**
+ * struct BC_DTS_STATUS
+ * @ReadyListCount: Number of frames in ready list (reported by driver)
+ * @PowerStateChange: Number of active state power
+ * transitions (reported by driver)
+ * @FramesDropped: Number of frames dropped. (reported by DIL)
+ * @FramesCaptured: Number of frames captured. (reported by DIL)
+ * @FramesRepeated: Number of frames repeated. (reported by DIL)
+ * @InputCount: Times compressed video has been sent to the HW.
+ * i.e. Successful DtsProcInput() calls (reported by DIL)
+ * @InputTotalSize: Amount of compressed video that has been sent to the HW.
+ * (reported by DIL)
+ * @InputBusyCount: Times compressed video has attempted to be sent to the HW
+ * but the input FIFO was full. (reported by DIL)
+ * @PIBMissCount: Amount of times a PIB is invalid. (reported by DIL)
+ * @cpbEmptySize: supported only for H.264, specifically changed for
+ * Adobe. Report size of CPB buffer available. (reported by DIL)
+ * @NextTimeStamp: TimeStamp of the next picture that will be returned
+ * by a call to ProcOutput. Added for Adobe. Reported
+ * back from the driver
+ */
struct BC_DTS_STATUS {
- uint8_t ReadyListCount; /* Number of frames in ready list (reported by driver) */
- uint8_t FreeListCount; /* Number of frame buffers free. (reported by driver) */
- uint8_t PowerStateChange; /* Number of active state power transitions (reported by driver) */
+ uint8_t ReadyListCount;
+ uint8_t FreeListCount;
+ uint8_t PowerStateChange;
uint8_t reserved_[1];
-
- uint32_t FramesDropped; /* Number of frames dropped. (reported by DIL) */
- uint32_t FramesCaptured; /* Number of frames captured. (reported by DIL) */
- uint32_t FramesRepeated; /* Number of frames repeated. (reported by DIL) */
-
- uint32_t InputCount; /* Times compressed video has been sent to the HW.
- * i.e. Successful DtsProcInput() calls (reported by DIL) */
- uint64_t InputTotalSize; /* Amount of compressed video that has been sent to the HW.
- * (reported by DIL) */
- uint32_t InputBusyCount; /* Times compressed video has attempted to be sent to the HW
- * but the input FIFO was full. (reported by DIL) */
-
- uint32_t PIBMissCount; /* Amount of times a PIB is invalid. (reported by DIL) */
-
- uint32_t cpbEmptySize; /* supported only for H.264, specifically changed for
- * Adobe. Report size of CPB buffer available.
- * Reported by DIL */
- uint64_t NextTimeStamp; /* TimeStamp of the next picture that will be returned
- * by a call to ProcOutput. Added for Adobe. Reported
- * back from the driver */
+ uint32_t FramesDropped;
+ uint32_t FramesCaptured;
+ uint32_t FramesRepeated;
+ uint32_t InputCount;
+ uint64_t InputTotalSize;
+ uint32_t InputBusyCount;
+ uint32_t PIBMissCount;
+ uint32_t cpbEmptySize;
+ uint64_t NextTimeStamp;
uint8_t reserved__[16];
-
};
#define BC_SWAP32(_v) \
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
index 5cb3afda0112..e06da4a6f6f6 100644
--- a/drivers/staging/cxt1e1/comet.h
+++ b/drivers/staging/cxt1e1/comet.h
@@ -1,7 +1,3 @@
-/*
- * $Id: comet.h,v 1.3 2005/09/28 00:10:07 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_COMET_H_
#define _INC_COMET_H_
@@ -23,27 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.3 $
- * Last changed on $Date: 2005/09/28 00:10:07 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet.h,v $
- * Revision 1.3 2005/09/28 00:10:07 rickd
- * Add RCS header. Switch to structure usage.
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-#if defined(__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-
#define VINT32 volatile u_int32_t
diff --git a/drivers/staging/cxt1e1/comet_tables.c b/drivers/staging/cxt1e1/comet_tables.c
index db1293c71a6d..84931117da35 100644
--- a/drivers/staging/cxt1e1/comet_tables.c
+++ b/drivers/staging/cxt1e1/comet_tables.c
@@ -1,7 +1,3 @@
-/*
- * $Id: comet_tables.c,v 1.2 2005/10/17 23:55:27 rickd PMCC4_3_1B $
- */
-
/*-----------------------------------------------------------------------------
* comet_tables.c - waveform tables for the PM4351 'COMET'
*
@@ -20,28 +16,8 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2005/10/17 23:55:27 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet_tables.c,v $
- * Revision 1.2 2005/10/17 23:55:27 rickd
- * Note that 75 Ohm transmit waveform is not supported on PMCC4.
- *
- * Revision 1.1 2005/09/28 00:10:05 rickd
- * Cosmetic alignment of tables for readability.
- *
- * Revision 1.0 2005/05/10 22:47:53 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-char SBEid_pmcc4_comet_tblc[] =
- "@(#)comet_tables.c - $Revision: 1.2 $ (c) Copyright 2004-2005 SBE, Inc.";
-
-
#include <linux/types.h>
/*****************************************************************************
diff --git a/drivers/staging/cxt1e1/comet_tables.h b/drivers/staging/cxt1e1/comet_tables.h
index 80424a26a169..3e2e5badf787 100644
--- a/drivers/staging/cxt1e1/comet_tables.h
+++ b/drivers/staging/cxt1e1/comet_tables.h
@@ -1,7 +1,3 @@
-/*
- * $Id: comet_tables.h,v 1.5 2006/01/02 22:37:31 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_COMET_TBLS_H_
#define _INC_COMET_TBLS_H_
@@ -23,26 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.5 $
- * Last changed on $Date: 2006/01/02 22:37:31 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet_tables.h,v $
- * Revision 1.5 2006/01/02 22:37:31 rickd
- * Double indexed arrays need sizings to avoid CC errors under
- * gcc 4.0.0
- *
- * Revision 1.4 2005/10/17 23:55:28 rickd
- * The 75 Ohm transmit waveform is not supported on PMCC4.
- *
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU License info. Structures moved to -C- file.
- *
- * Revision 1.2 2005/04/28 23:43:04 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
index 5c99646cd103..4254c0426db9 100644
--- a/drivers/staging/cxt1e1/libsbew.h
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -1,7 +1,3 @@
-/*
- * $Id: libsbew.h,v 2.1 2005/10/27 18:54:19 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_LIBSBEW_H_
#define _INC_LIBSBEW_H_
@@ -25,32 +21,8 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.1 $
- * Last changed on $Date: 2005/10/27 18:54:19 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: libsbew.h,v $
- * Revision 2.1 2005/10/27 18:54:19 rickd
- * Add E1PLAIN support.
- *
- * Revision 2.0 2005/09/28 00:10:08 rickd
- * Customized for PMCC4 comet-per-port design.
- *
- * Revision 1.15 2005/03/29 00:51:31 rickd
- * File imported from C1T3 port, Revision 1.15
- *-----------------------------------------------------------------------------
*/
-#ifndef __KERNEL__
-#include <sys/types.h>
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
/********************************/
/** set driver logging level **/
/********************************/
@@ -323,7 +295,7 @@ struct sbecom_port_param
#define CFG_CH_DINV_TX 0x02
-/* Posssible resettable chipsets/functions */
+/* Possible resettable chipsets/functions */
#define RESET_DEV_TEMUX 1
#define RESET_DEV_TECT3 RESET_DEV_TEMUX
#define RESET_DEV_PLL 2
@@ -574,8 +546,4 @@ struct sbecom_port_param
extern int wancfg_set_tsioc (wcfg_t *, struct wanc1t3_ts_param *);
#endif
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_LIBSBEW_H_ ***/
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 5cc3423a6646..90c0f1e30f65 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -1,7 +1,3 @@
-/*
- * $Id: musycc.c,v 2.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
- */
-
unsigned int max_intcnt = 0;
unsigned int max_bh = 0;
@@ -24,53 +20,8 @@ unsigned int max_bh = 0;
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.1 $
- * Last changed on $Date: 2007/08/15 23:32:17 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: musycc.c,v $
- * Revision 2.1 2007/08/15 23:32:17 rickd
- * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
- *
- * Revision 2.0 2007/08/15 22:13:20 rickd
- * Update to printf pointer %p usage and correct some UINT to ULONG for
- * 64bit comptibility.
- *
- * Revision 1.7 2006/04/21 00:56:40 rickd
- * workqueue files now prefixed with <sbecom> prefix.
- *
- * Revision 1.6 2005/10/27 18:54:19 rickd
- * Clean out old code. Default to HDLC_FCS16, not TRANS.
- *
- * Revision 1.5 2005/10/17 23:55:28 rickd
- * Initial port of NCOMM support patches from original work found
- * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
- *
- * Revision 1.4 2005/10/13 20:35:25 rickd
- * Cleanup warning for unused <flags> variable.
- *
- * Revision 1.3 2005/10/13 19:19:22 rickd
- * Disable redundant driver removal cleanup code.
- *
- * Revision 1.2 2005/10/11 18:36:16 rickd
- * Clean up warning messages caused by de-implemented some <flags> associated
- * with spin_lock() removals.
- *
- * Revision 1.1 2005/10/05 00:45:28 rickd
- * Re-enable xmit on flow-controlled and full channel to fix restart hang.
- * Add some temp spin-lock debug code (rld_spin_owner).
- *
- * Revision 1.0 2005/09/28 00:10:06 rickd
- * Initial release for C4T1E1 support. Lots of transparent
- * mode updates.
- *
- *-----------------------------------------------------------------------------
*/
-char SBEid_pmcc4_musyccc[] =
-"@(#)musycc.c - $Revision: 2.1 $ (c) Copyright 2004-2006 SBE, Inc.";
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
diff --git a/drivers/staging/cxt1e1/musycc.h b/drivers/staging/cxt1e1/musycc.h
index 68f3660f4477..cf6b54e15689 100644
--- a/drivers/staging/cxt1e1/musycc.h
+++ b/drivers/staging/cxt1e1/musycc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: musycc.h,v 1.3 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_MUSYCC_H_
#define _INC_MUSYCC_H_
@@ -24,36 +20,13 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.3 $
- * Last changed on $Date: 2005/09/28 00:10:08 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: musycc.h,v $
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU license info. Add PMCC4 PCI/DevIDs. Implement new
- * musycc reg&bits namings. Use PORTMAP_0 GCD grouping.
- *
- * Revision 1.2 2005/04/28 23:43:04 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
#define VINT8 volatile u_int8_t
#define VINT32 volatile u_int32_t
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
#include "pmcc4_defs.h"
@@ -448,10 +421,6 @@ extern "C"
/* This must be defined on an entire channel group (Port) basis */
#define SUERM_THRESHOLD 0x1f
-#ifdef __cplusplus
-}
-#endif
-
#undef VINT32
#undef VINT8
diff --git a/drivers/staging/cxt1e1/ossiRelease.c b/drivers/staging/cxt1e1/ossiRelease.c
index a56029866c2d..f17a902e95e3 100644
--- a/drivers/staging/cxt1e1/ossiRelease.c
+++ b/drivers/staging/cxt1e1/ossiRelease.c
@@ -1,7 +1,3 @@
-/*
- * $Id: ossiRelease.c,v 1.2 2008/05/08 20:14:03 rdobbs PMCC4_3_1B $
- */
-
/*-----------------------------------------------------------------------------
* ossiRelease.c -
*
@@ -26,14 +22,8 @@
* One Stop Systems, Inc. Escondido, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2008/05/08 20:14:03 $
- * Changed by $Author: rdobbs $
- *-----------------------------------------------------------------------------
*/
-
char pmcc4_OSSI_release[] = "$Release: PMCC4_3_1B, Copyright (c) 2008 One Stop Systems$";
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.h b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
index c3ada87efd26..96c48cb83260 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.h
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmc93x6_eeprom.h,v 1.1 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMC93X6_EEPROM_H_
#define _INC_PMC93X6_EEPROM_H_
@@ -23,26 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- *-----------------------------------------------------------------------------
- * $Log: pmc93x6_eeprom.h,v $
- * Revision 1.1 2005/09/28 00:10:08 rickd
- * pmc_verify_cksum return value is char.
- *
- * Revision 1.0 2005/05/04 17:20:51 rickd
- * Initial revision
- *
- * Revision 1.0 2005/04/22 23:48:48 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
#ifdef __KERNEL__
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index e046b87763a2..b0ed4ad13011 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4.h,v 1.4 2005/11/01 19:24:48 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_H_
#define _INC_PMCC4_H_
@@ -23,49 +19,15 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.4 $
- * Last changed on $Date: 2005/11/01 19:24:48 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4.h,v $
- * Revision 1.4 2005/11/01 19:24:48 rickd
- * Remove de-implement function prototypes. Several <int> to
- * <status_t> changes for consistent usage of same.
- *
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU license info. Use config params from libsbew.h
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-
-#if defined(__FreeBSD__) || defined(__NetBSD__)
-#include <sys/types.h>
-#else
-#ifndef __KERNEL__
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-#endif
-
-
typedef int status_t;
#define SBE_DRVR_FAIL 0
#define SBE_DRVR_SUCCESS 1
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
/********************/
/* PMCC4 memory Map */
/********************/
@@ -105,10 +67,6 @@ extern "C"
#define sbeE1errSMF 0x02
#define sbeE1CRC 0x01
-#ifdef __cplusplus
-}
-#endif
-
#ifdef __KERNEL__
/*
diff --git a/drivers/staging/cxt1e1/pmcc4_cpld.h b/drivers/staging/cxt1e1/pmcc4_cpld.h
index 6d8f0337aa3e..a51209bc5274 100644
--- a/drivers/staging/cxt1e1/pmcc4_cpld.h
+++ b/drivers/staging/cxt1e1/pmcc4_cpld.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4_cpld.h,v 1.0 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_CPLD_H_
#define _INC_PMCC4_CPLD_H_
@@ -23,34 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:08 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_cpld.h,v $
- * Revision 1.0 2005/09/28 00:10:08 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-
-#if defined(__FreeBSD__) || defined(__NetBSD__)
-#include <sys/types.h>
-#else
-#ifndef __KERNEL__
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
/********************************/
/* iSPLD control chip registers */
@@ -117,8 +88,4 @@ extern "C"
#define PMCC4_CPLD_INTR_CMT_3 0x04
#define PMCC4_CPLD_INTR_CMT_4 0x08
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _INC_PMCC4_CPLD_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_defs.h b/drivers/staging/cxt1e1/pmcc4_defs.h
index a39505f45c29..83ceae4324b2 100644
--- a/drivers/staging/cxt1e1/pmcc4_defs.h
+++ b/drivers/staging/cxt1e1/pmcc4_defs.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4_defs.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_DEFS_H_
#define _INC_PMCC4_DEFS_H_
@@ -25,16 +21,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_defs.h,v $
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index e1f07fabd22d..8a7b3a646451 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -1,8 +1,3 @@
-/*
- * $Id: pmcc4_drv.c,v 3.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
- */
-
-
/*-----------------------------------------------------------------------------
* pmcc4_drv.c -
*
@@ -22,74 +17,10 @@
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 3.1 $
- * Last changed on $Date: 2007/08/15 23:32:17 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_drv.c,v $
- * Revision 3.1 2007/08/15 23:32:17 rickd
- * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
- *
- * Revision 3.0 2007/08/15 22:19:55 rickd
- * Correct sizeof() castings and pi->regram to support 64bit compatibility.
- *
- * Revision 2.10 2006/04/21 00:56:40 rickd
- * workqueue files now prefixed with <sbecom> prefix.
- *
- * Revision 2.9 2005/11/01 19:22:49 rickd
- * Add sanity checks against max_port for ioctl functions.
- *
- * Revision 2.8 2005/10/27 18:59:25 rickd
- * Code cleanup. Default channel config to HDLC_FCS16.
- *
- * Revision 2.7 2005/10/18 18:16:30 rickd
- * Further NCOMM code repairs - (1) interrupt matrix usage inconsistent
- * for indexing into nciInterrupt[][], code missing double parameters.
- * (2) check input of ncomm interrupt registration cardID for correct
- * boundary values.
- *
- * Revision 2.6 2005/10/17 23:55:28 rickd
- * Initial port of NCOMM support patches from original work found
- * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
- * Corrected NCOMMs wanpmcC4T1E1_getBaseAddress() to correctly handle
- * multiple boards.
- *
- * Revision 2.5 2005/10/13 23:01:28 rickd
- * Correct panic for illegal address reference w/in get_brdinfo on
- * first_if/last_if name acquistion under Linux 2.6
- *
- * Revision 2.4 2005/10/13 21:20:19 rickd
- * Correction of c4_cleanup() wherein next should be acquired before
- * ci_t structure is free'd.
- *
- * Revision 2.3 2005/10/13 19:20:10 rickd
- * Correct driver removal cleanup code for multiple boards.
- *
- * Revision 2.2 2005/10/11 18:34:04 rickd
- * New routine added to determine number of ports (comets) on board.
- *
- * Revision 2.1 2005/10/05 00:48:13 rickd
- * Add some RX activation trace code.
- *
- * Revision 2.0 2005/09/28 00:10:06 rickd
- * Implement 2.6 workqueue for TX/RX restart. Correction to
- * hardware register boundary checks allows expanded access of MUSYCC.
- * Implement new musycc reg&bits namings.
- *
- *-----------------------------------------------------------------------------
*/
-char OSSIid_pmcc4_drvc[] =
-"@(#)pmcc4_drv.c - $Revision: 3.1 $ (c) Copyright 2002-2007 One Stop Systems, Inc.";
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/errno.h>
-#else
#include <linux/types.h>
#include "pmcc4_sysdep.h"
#include <linux/errno.h>
@@ -98,7 +29,6 @@ char OSSIid_pmcc4_drvc[] =
#include <linux/timer.h> /* include for timer */
#include <linux/hdlc.h>
#include <asm/io.h>
-#endif
#include "sbecom_inline_linux.h"
#include "libsbew.h"
diff --git a/drivers/staging/cxt1e1/pmcc4_ioctls.h b/drivers/staging/cxt1e1/pmcc4_ioctls.h
index 6b8d65673c78..56a1ee39be18 100644
--- a/drivers/staging/cxt1e1/pmcc4_ioctls.h
+++ b/drivers/staging/cxt1e1/pmcc4_ioctls.h
@@ -1,6 +1,3 @@
-/* RCSid: $Header: /home/rickd/projects/pmcc4/include/pmcc4_ioctls.h,v 2.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_IOCTLS_H_
#define _INC_PMCC4_IOCTLS_H_
@@ -22,19 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_ioctls.h,v $
- * Revision 2.0 2005/09/28 00:10:09 rickd
- * Add GNU license info. Switch Ioctls to sbe_ioc.h usage.
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
#include "sbew_ioc.h"
diff --git a/drivers/staging/cxt1e1/sbe_bid.h b/drivers/staging/cxt1e1/sbe_bid.h
index 1f49b4061fb7..abc2e55f62fc 100644
--- a/drivers/staging/cxt1e1/sbe_bid.h
+++ b/drivers/staging/cxt1e1/sbe_bid.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbe_bid.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEBID_H_
#define _INC_SBEBID_H_
@@ -24,16 +20,6 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbe_bid.h,v $
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
#define SBE_BID_REG 0x00000000 /* Board ID Register */
diff --git a/drivers/staging/cxt1e1/sbe_promformat.h b/drivers/staging/cxt1e1/sbe_promformat.h
index 746f81b15c73..aad411d185f5 100644
--- a/drivers/staging/cxt1e1/sbe_promformat.h
+++ b/drivers/staging/cxt1e1/sbe_promformat.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbe_promformat.h,v 2.2 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBE_PROMFORMAT_H_
#define _INC_SBE_PROMFORMAT_H_
@@ -24,19 +20,6 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.2 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbe_promformat.h,v $
- * Revision 2.2 2005/09/28 00:10:09 rickd
- * Add EEPROM sample from C4T1E1 board.
- *
- * Revision 2.1 2005/05/04 17:18:24 rickd
- * Initial CI.
- *
- *-----------------------------------------------------------------------------
*/
@@ -85,12 +68,6 @@
*
*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
#define STRUCT_OFFSET(type, symbol) ((long)&(((type *)0)->symbol))
/*------------------------------------------------------------------------
@@ -150,8 +127,4 @@ extern "C"
FLD_TYPE2 fldType2;
} PROMFORMAT;
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_SBE_PROMFORMAT_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 9ea2c0c2061f..68ed445ab0cb 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbecom_inline_linux.h,v 1.2 2007/08/15 22:51:35 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBECOM_INLNX_H_
#define _INC_SBECOM_INLNX_H_
@@ -24,22 +20,6 @@
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2007/08/15 22:51:35 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbecom_inline_linux.h,v $
- * Revision 1.2 2007/08/15 22:51:35 rickd
- * Remove duplicate version.h entry.
- *
- * Revision 1.1 2007/08/15 22:50:29 rickd
- * Update linux/config for 2.6.18 and later.
- *
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/sbeproc.h b/drivers/staging/cxt1e1/sbeproc.h
index 4aa53f44ec0b..e82be6afd1e8 100644
--- a/drivers/staging/cxt1e1/sbeproc.h
+++ b/drivers/staging/cxt1e1/sbeproc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbeproc.h,v 1.2 2005/10/17 23:55:28 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEPROC_H_
#define _INC_SBEPROC_H_
@@ -23,22 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2005/10/17 23:55:28 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbeproc.h,v $
- * Revision 1.2 2005/10/17 23:55:28 rickd
- * sbecom_proc_brd_init() is an declared an __init function.
- *
- * Revision 1.1 2005/09/28 00:10:09 rickd
- * Remove unneeded inclusion of c4_private.h.
- *
- * Revision 1.0 2005/05/10 22:21:46 rickd
- * Initial check-in.
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/sbew_ioc.h b/drivers/staging/cxt1e1/sbew_ioc.h
index 14d371904d1f..ce9b15c71894 100644
--- a/drivers/staging/cxt1e1/sbew_ioc.h
+++ b/drivers/staging/cxt1e1/sbew_ioc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbew_ioc.h,v 1.0 2005/09/28 00:10:10 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEWIOC_H_
#define _INC_SBEWIOC_H_
@@ -24,55 +20,9 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:10 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbew_ioc.h,v $
- * Revision 1.0 2005/09/28 00:10:10 rickd
- * Initial revision
- *
- * Revision 1.6 2005/01/11 18:41:01 rickd
- * Add BRDADDR_GET Ioctl.
- *
- * Revision 1.5 2004/09/16 18:55:59 rickd
- * Start setting up for generic framer configuration Ioctl by switch
- * from tect3_framer_param[] to sbecom_framer_param[].
- *
- * Revision 1.4 2004/06/28 17:58:15 rickd
- * Rename IOC_TSMAP_[GS] to IOC_TSIOC_[GS] to support need for
- * multiple formats of data when setting up TimeSlots.
- *
- * Revision 1.3 2004/06/22 21:18:13 rickd
- * read_vec now() ONLY handles a single common wrt_vec array.
- *
- * Revision 1.1 2004/06/10 18:11:34 rickd
- * Add IID_GET Ioctl reference.
- *
- * Revision 1.0 2004/06/08 22:59:38 rickd
- * Initial revision
- *
- * Revision 2.0 2004/06/07 17:49:47 rickd
- * Initial library release following merge of wanc1t3/wan256 into
- * common elements for lib.
- *
- *-----------------------------------------------------------------------------
*/
-#ifndef __KERNEL__
-#include <sys/types.h>
-#endif
-#ifdef SunOS
-#include <sys/ioccom.h>
-#else
#include <linux/ioctl.h>
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
#define SBE_LOCKFILE "/tmp/.sbewan.LCK"
@@ -128,9 +78,4 @@ extern "C"
#define SBE_IOC_MAXVEC 1
-
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_SBEWIOC_H_ ***/
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 0c1c6ca8c379..2c4069fcd981 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -155,7 +155,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
/* Some offsets in PCI config space that are actually used. */
-#define ET1310_PCI_MAX_PYLD 0x4C
#define ET1310_PCI_MAC_ADDRESS 0xA4
#define ET1310_PCI_EEPROM_STATUS 0xB2
#define ET1310_PCI_ACK_NACK 0xC0
@@ -178,9 +177,7 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
/* RX defines */
#define USE_FBR0 1
-
#define FBR_CHUNKS 32
-
#define MAX_DESC_PER_RING_RX 1024
/* number of RFDs - default and min */
@@ -195,7 +192,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
#endif
#define NIC_MIN_NUM_RFD 64
-
#define NUM_PACKETS_HANDLED 256
#define ALCATEL_MULTICAST_PKT 0x01000000
@@ -303,8 +299,8 @@ struct fbr_lookup {
dma_addr_t ring_physaddr;
void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- uint64_t real_physaddr;
- uint64_t offset;
+ u64 real_physaddr;
+ u64 offset;
u32 local_full;
u32 num_entries;
u32 buffsize;
@@ -427,7 +423,6 @@ struct tx_ring {
int since_irq;
};
-/* ADAPTER defines */
/*
* Do not change these values: if changed, then change also in respective
* TXdma and Rxdma engines
@@ -576,8 +571,6 @@ struct et131x_adapter {
struct net_device_stats net_stats;
};
-/* EEPROM functions */
-
static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
{
u32 reg;
@@ -795,7 +788,7 @@ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
}
-int et131x_init_eeprom(struct et131x_adapter *adapter)
+static int et131x_init_eeprom(struct et131x_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
u8 eestatus;
@@ -868,7 +861,7 @@ int et131x_init_eeprom(struct et131x_adapter *adapter)
* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
* @adapter: pointer to our adapter structure
*/
-void et131x_rx_dma_enable(struct et131x_adapter *adapter)
+static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the receive dma configuration register for normal operation */
u32 csr = 0x2000; /* FBR1 enable */
@@ -906,7 +899,7 @@ void et131x_rx_dma_enable(struct et131x_adapter *adapter)
* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
-void et131x_rx_dma_disable(struct et131x_adapter *adapter)
+static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
{
u32 csr;
/* Setup the receive dma configuration register */
@@ -928,7 +921,7 @@ void et131x_rx_dma_disable(struct et131x_adapter *adapter)
*
* Mainly used after a return to the D0 (full-power) state from a lower state.
*/
-void et131x_tx_dma_enable(struct et131x_adapter *adapter)
+static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the transmit dma configuration register for normal
* operation
@@ -948,23 +941,10 @@ static inline void add_12bit(u32 *v, int n)
}
/**
- * nic_rx_pkts - Checks the hardware for available packets
- * @adapter: pointer to our adapter
- *
- * Returns rfd, a pointer to our MPRFD.
- *
- * Checks the hardware for available packets, using completion ring
- * If packets are available, it gets an RFD from the recv_list, attaches
- * the packet to it, puts the RFD in the RecvPendList, and also returns
- * the pointer to the RFD.
- */
-/* MAC functions */
-
-/**
* et1310_config_mac_regs1 - Initialize the first part of MAC regs
* @adapter: pointer to our adapter structure
*/
-void et1310_config_mac_regs1(struct et131x_adapter *adapter)
+static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
{
struct mac_regs __iomem *macregs = &adapter->regs->mac;
u32 station1;
@@ -1024,7 +1004,7 @@ void et1310_config_mac_regs1(struct et131x_adapter *adapter)
* et1310_config_mac_regs2 - Initialize the second part of MAC regs
* @adapter: pointer to our adapter structure
*/
-void et1310_config_mac_regs2(struct et131x_adapter *adapter)
+static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
{
int32_t delay = 0;
struct mac_regs __iomem *mac = &adapter->regs->mac;
@@ -1105,7 +1085,7 @@ void et1310_config_mac_regs2(struct et131x_adapter *adapter)
*
* Returns 0 if the device is not in phy coma, 1 if it is in phy coma
*/
-int et1310_in_phy_coma(struct et131x_adapter *adapter)
+static int et1310_in_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -1114,15 +1094,13 @@ int et1310_in_phy_coma(struct et131x_adapter *adapter)
return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
}
-void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
+static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
- uint32_t nIndex;
- uint32_t result;
- uint32_t hash1 = 0;
- uint32_t hash2 = 0;
- uint32_t hash3 = 0;
- uint32_t hash4 = 0;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ u32 hash3 = 0;
+ u32 hash4 = 0;
u32 pm_csr;
/* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
@@ -1131,10 +1109,13 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
* driver.
*/
if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
+ int i;
+
/* Loop through our multicast array and set up the device */
- for (nIndex = 0; nIndex < adapter->multicast_addr_count;
- nIndex++) {
- result = ether_crc(6, adapter->multicast_list[nIndex]);
+ for (i = 0; i < adapter->multicast_addr_count; i++) {
+ u32 result;
+
+ result = ether_crc(6, adapter->multicast_list[i]);
result = (result & 0x3F800000) >> 23;
@@ -1163,7 +1144,7 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
}
}
-void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
+static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
u32 uni_pf1;
@@ -1203,7 +1184,7 @@ void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
}
}
-void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
+static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
struct phy_device *phydev = adapter->phydev;
@@ -1334,7 +1315,7 @@ void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0x9, &rxmac->ctrl);
}
-void et1310_config_txmac_regs(struct et131x_adapter *adapter)
+static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
{
struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
@@ -1348,7 +1329,7 @@ void et1310_config_txmac_regs(struct et131x_adapter *adapter)
writel(0x40, &txmac->cf_param);
}
-void et1310_config_macstat_regs(struct et131x_adapter *adapter)
+static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
{
struct macstat_regs __iomem *macstat =
&adapter->regs->macstat;
@@ -1422,7 +1403,7 @@ void et1310_config_macstat_regs(struct et131x_adapter *adapter)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
+static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
u8 reg, u16 *value)
{
struct mac_regs __iomem *mac = &adapter->regs->mac;
@@ -1478,7 +1459,7 @@ int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
return status;
}
-int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
+static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
{
struct phy_device *phydev = adapter->phydev;
@@ -1498,7 +1479,7 @@ int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
*
* Return 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
+static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
{
struct mac_regs __iomem *mac = &adapter->regs->mac;
struct phy_device *phydev = adapter->phydev;
@@ -1564,8 +1545,9 @@ int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
}
/* Still used from _mac for BIT_READ */
-void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
- u16 regnum, u16 bitnum, u8 *value)
+static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
+ u16 action, u16 regnum, u16 bitnum,
+ u8 *value)
{
u16 reg;
u16 mask = 0x0001 << bitnum;
@@ -1591,7 +1573,7 @@ void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
}
}
-void et1310_config_flow_control(struct et131x_adapter *adapter)
+static void et1310_config_flow_control(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
@@ -1632,7 +1614,7 @@ void et1310_config_flow_control(struct et131x_adapter *adapter)
* et1310_update_macstat_host_counters - Update the local copy of the statistics
* @adapter: pointer to the adapter structure
*/
-void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
+static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
{
struct ce_stats *stats = &adapter->stats;
struct macstat_regs __iomem *macstat =
@@ -1664,7 +1646,7 @@ void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
* the statistics held in the adapter structure, checking the "wrap"
* bit for each counter.
*/
-void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
+static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
{
u32 carry_reg1;
u32 carry_reg2;
@@ -1714,9 +1696,7 @@ void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
}
-/* PHY functions */
-
-int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
+static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1731,7 +1711,7 @@ int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
return value;
}
-int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
+static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1739,7 +1719,7 @@ int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
return et131x_mii_write(adapter, reg, value);
}
-int et131x_mdio_reset(struct mii_bus *bus)
+static int et131x_mdio_reset(struct mii_bus *bus)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1759,7 +1739,7 @@ int et131x_mdio_reset(struct mii_bus *bus)
* Can't you see that this code processed
* Phy power, phy power..
*/
-void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
+static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
{
u16 data;
@@ -1775,7 +1755,7 @@ void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
* @adapter: pointer to our private adapter structure
*
*/
-void et131x_xcvr_init(struct et131x_adapter *adapter)
+static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 imr;
u16 isr;
@@ -1822,7 +1802,7 @@ void et131x_xcvr_init(struct et131x_adapter *adapter)
*
* Used to configure the global registers on the JAGCore
*/
-void et131x_configure_global_regs(struct et131x_adapter *adapter)
+static void et131x_configure_global_regs(struct et131x_adapter *adapter)
{
struct global_regs __iomem *regs = &adapter->regs->global;
@@ -1863,13 +1843,11 @@ void et131x_configure_global_regs(struct et131x_adapter *adapter)
writel(0, &regs->watchdog_timer);
}
-/* PM functions */
-
/**
* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
* @adapter: pointer to our adapter structure
*/
-void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
+static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
{
struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
struct rx_ring *rx_local = &adapter->rx_ring;
@@ -1987,7 +1965,7 @@ void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
* Configure the transmit engine with the ring buffers we have created
* and prepare it for use.
*/
-void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
+static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
{
struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
@@ -2017,7 +1995,7 @@ void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-void et131x_adapter_setup(struct et131x_adapter *adapter)
+static void et131x_adapter_setup(struct et131x_adapter *adapter)
{
/* Configure the JAGCore */
et131x_configure_global_regs(adapter);
@@ -2044,7 +2022,7 @@ void et131x_adapter_setup(struct et131x_adapter *adapter)
* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
* @adapter: pointer to our private adapter structure
*/
-void et131x_soft_reset(struct et131x_adapter *adapter)
+static void et131x_soft_reset(struct et131x_adapter *adapter)
{
/* Disable MAC Core */
writel(0xc00f0000, &adapter->regs->mac.cfg1);
@@ -2062,7 +2040,7 @@ void et131x_soft_reset(struct et131x_adapter *adapter)
* Enable the appropriate interrupts on the ET131x according to our
* configuration
*/
-void et131x_enable_interrupts(struct et131x_adapter *adapter)
+static void et131x_enable_interrupts(struct et131x_adapter *adapter)
{
u32 mask;
@@ -2082,7 +2060,7 @@ void et131x_enable_interrupts(struct et131x_adapter *adapter)
*
* Block all interrupts from the et131x device at the device itself
*/
-void et131x_disable_interrupts(struct et131x_adapter *adapter)
+static void et131x_disable_interrupts(struct et131x_adapter *adapter)
{
/* Disable all global interrupts */
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
@@ -2092,7 +2070,7 @@ void et131x_disable_interrupts(struct et131x_adapter *adapter)
* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
-void et131x_tx_dma_disable(struct et131x_adapter *adapter)
+static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
{
/* Setup the tramsmit dma configuration register */
writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
@@ -2103,7 +2081,7 @@ void et131x_tx_dma_disable(struct et131x_adapter *adapter)
* et131x_enable_txrx - Enable tx/rx queues
* @netdev: device to be enabled
*/
-void et131x_enable_txrx(struct net_device *netdev)
+static void et131x_enable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2123,7 +2101,7 @@ void et131x_enable_txrx(struct net_device *netdev)
* et131x_disable_txrx - Disable tx/rx queues
* @netdev: device to be disabled
*/
-void et131x_disable_txrx(struct net_device *netdev)
+static void et131x_disable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2142,7 +2120,7 @@ void et131x_disable_txrx(struct net_device *netdev)
* et131x_init_send - Initialize send data structures
* @adapter: pointer to our private adapter structure
*/
-void et131x_init_send(struct et131x_adapter *adapter)
+static void et131x_init_send(struct et131x_adapter *adapter)
{
struct tcb *tcb;
u32 ct;
@@ -2192,7 +2170,7 @@ void et131x_init_send(struct et131x_adapter *adapter)
* indicating linkup status, call the MPDisablePhyComa routine to
* restore JAGCore and gigE PHY
*/
-void et1310_enable_phy_coma(struct et131x_adapter *adapter)
+static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 pmcsr;
@@ -2231,7 +2209,7 @@ void et1310_enable_phy_coma(struct et131x_adapter *adapter)
* et1310_disable_phy_coma - Disable the Phy Coma Mode
* @adapter: pointer to our adapter structure
*/
-void et1310_disable_phy_coma(struct et131x_adapter *adapter)
+static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -2269,8 +2247,6 @@ void et1310_disable_phy_coma(struct et131x_adapter *adapter)
et131x_enable_txrx(adapter->netdev);
}
-/* RX functions */
-
static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
{
u32 tmp_free_buff_ring = *free_buff_ring;
@@ -2296,16 +2272,14 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
* @offset: pointer to the offset variable
* @mask: correct mask
*/
-void et131x_align_allocated_memory(struct et131x_adapter *adapter,
- uint64_t *phys_addr,
- uint64_t *offset, uint64_t mask)
+static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
+ u64 *phys_addr, u64 *offset,
+ u64 mask)
{
- uint64_t new_addr;
+ u64 new_addr = *phys_addr & ~mask;
*offset = 0;
- new_addr = *phys_addr & ~mask;
-
if (new_addr != *phys_addr) {
/* Move to next aligned block */
new_addr += mask + 1;
@@ -2325,7 +2299,7 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter,
* Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
* and the Packet Status Ring.
*/
-int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
{
u32 i, j;
u32 bufsize;
@@ -2454,8 +2428,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
rx_ring->fbr[1]->offset);
#endif
for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr1_offset;
u64 fbr1_tmp_physaddr;
+ u64 fbr1_offset;
u32 fbr1_align;
/* This code allocates an area of memory big enough for N
@@ -2520,8 +2494,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
#ifdef USE_FBR0
/* Same for FBR0 (if in use) */
for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr0_offset;
u64 fbr0_tmp_physaddr;
+ u64 fbr0_offset;
fbr_chunksize =
((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
@@ -2629,7 +2603,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
* et131x_rx_dma_memory_free - Free all memory allocated within this module.
* @adapter: pointer to our private adapter structure
*/
-void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
+static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
{
u32 index;
u32 bufsize;
@@ -2774,7 +2748,7 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
*
* Returns 0 on success and errno on failure (as defined in errno.h)
*/
-int et131x_init_recv(struct et131x_adapter *adapter)
+static int et131x_init_recv(struct et131x_adapter *adapter)
{
int status = -ENOMEM;
struct rfd *rfd = NULL;
@@ -2824,7 +2798,7 @@ int et131x_init_recv(struct et131x_adapter *adapter)
* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
* @adapter: pointer to our adapter structure
*/
-void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
+static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
@@ -2918,6 +2892,17 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
}
+/**
+ * nic_rx_pkts - Checks the hardware for available packets
+ * @adapter: pointer to our adapter
+ *
+ * Returns rfd, a pointer to our MPRFD.
+ *
+ * Checks the hardware for available packets, using completion ring
+ * If packets are available, it gets an RFD from the recv_list, attaches
+ * the packet to it, puts the RFD in the RecvPendList, and also returns
+ * the pointer to the RFD.
+ */
static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
{
struct rx_ring *rx_local = &adapter->rx_ring;
@@ -3139,7 +3124,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
*
* Assumption, Rcv spinlock has been acquired.
*/
-void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
{
struct rfd *rfd = NULL;
u32 count = 0;
@@ -3188,8 +3173,6 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->rx_ring.unfinished_receives = false;
}
-/* TX functions */
-
/**
* et131x_tx_dma_memory_alloc
* @adapter: pointer to our private adapter structure
@@ -3202,7 +3185,7 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
* memory. The device will update the "status" in memory each time it xmits a
* packet.
*/
-int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
{
int desc_size = 0;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -3256,7 +3239,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
*
* Returns 0 on success and errno on failure (as defined in errno.h).
*/
-void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
+static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
{
int desc_size = 0;
@@ -3578,7 +3561,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only
*/
-int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
+static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -3696,7 +3679,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
*
* Assumption - Send spinlock has been acquired
*/
-void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
+static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
{
struct tcb *tcb;
unsigned long flags;
@@ -3743,7 +3726,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
*
* Assumption - Send spinlock has been acquired
*/
-void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 serviced;
@@ -3798,8 +3781,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-/* ETHTOOL functions */
-
static int et131x_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
@@ -3965,18 +3946,16 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-void et131x_set_ethtool_ops(struct net_device *netdev)
+static void et131x_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
}
-/* PCI functions */
-
/**
* et131x_hwaddr_init - set up the MAC Address on the ET1310
* @adapter: pointer to our private adapter structure
*/
-void et131x_hwaddr_init(struct et131x_adapter *adapter)
+static void et131x_hwaddr_init(struct et131x_adapter *adapter)
{
/* If have our default mac from init and no mac address from
* EEPROM then we need to generate the last octet and set it on the
@@ -4022,24 +4001,31 @@ void et131x_hwaddr_init(struct et131x_adapter *adapter)
static int et131x_pci_init(struct et131x_adapter *adapter,
struct pci_dev *pdev)
{
- int i;
- u8 max_payload;
- u8 read_size_reg;
+ int cap = pci_pcie_cap(pdev);
+ u16 max_payload;
+ u16 ctl;
+ int i, rc;
- if (et131x_init_eeprom(adapter) < 0)
- return -EIO;
+ rc = et131x_init_eeprom(adapter);
+ if (rc < 0)
+ goto out;
+ if (!cap) {
+ dev_err(&pdev->dev, "Missing PCIe capabilities\n");
+ goto err_out;
+ }
+
/* Let's set up the PORT LOGIC Register. First we need to know what
* the max_payload_size is
*/
- if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
+ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max Payload Size\n");
- return -EIO;
+ goto err_out;
}
/* Program the Ack/Nak latency and replay timers */
- max_payload &= 0x07; /* Only the lower 3 bits are valid */
+ max_payload &= 0x07;
if (max_payload < 2) {
static const u16 acknak[2] = { 0x76, 0xD0 };
@@ -4049,13 +4035,13 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
acknak[max_payload])) {
dev_err(&pdev->dev,
"Could not write PCI config space for ACK/NAK\n");
- return -EIO;
+ goto err_out;
}
if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
replay[max_payload])) {
dev_err(&pdev->dev,
"Could not write PCI config space for Replay Timer\n");
- return -EIO;
+ goto err_out;
}
}
@@ -4065,23 +4051,22 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
dev_err(&pdev->dev,
"Could not write PCI config space for Latency Timers\n");
- return -EIO;
+ goto err_out;
}
/* Change the max read size to 2k */
- if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
+ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max read size\n");
- return -EIO;
+ goto err_out;
}
- read_size_reg &= 0x8f;
- read_size_reg |= 0x40;
+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12);
- if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
+ if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
dev_err(&pdev->dev,
"Could not write PCI config space for Max read size\n");
- return -EIO;
+ goto err_out;
}
/* Get MAC address from config space if an eeprom exists, otherwise
@@ -4096,11 +4081,15 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
adapter->rom_addr + i)) {
dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
- return -EIO;
+ goto err_out;
}
}
memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
- return 0;
+out:
+ return rc;
+err_out:
+ rc = -EIO;
+ goto out;
}
/**
@@ -4110,7 +4099,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(unsigned long data)
{
struct et131x_adapter *adapter = (struct et131x_adapter *) data;
struct phy_device *phydev = adapter->phydev;
@@ -4153,7 +4142,7 @@ void et131x_error_timer_handler(unsigned long data)
*
* Allocate all the memory blocks for send, receive and others.
*/
-int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
{
int status;
@@ -4188,7 +4177,7 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
* @adapter: pointer to our private adapter structure
*/
-void et131x_adapter_memory_free(struct et131x_adapter *adapter)
+static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
{
/* Free DMA memory */
et131x_tx_dma_memory_free(adapter);
@@ -4364,10 +4353,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->pdev = pci_dev_get(pdev);
adapter->netdev = netdev;
- /* Do the same for the netdev struct */
- netdev->irq = pdev->irq;
- netdev->base_addr = pci_resource_start(pdev, 0);
-
/* Initialize spinlocks here */
spin_lock_init(&adapter->lock);
spin_lock_init(&adapter->tcb_send_qlock);
@@ -4400,6 +4385,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
+ phy_disconnect(adapter->phydev);
mdiobus_unregister(adapter->mii_bus);
kfree(adapter->mii_bus->irq);
mdiobus_free(adapter->mii_bus);
@@ -4417,7 +4403,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
* et131x_up - Bring up a device for use.
* @netdev: device to be opened
*/
-void et131x_up(struct net_device *netdev)
+static void et131x_up(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4429,7 +4415,7 @@ void et131x_up(struct net_device *netdev)
* et131x_down - Bring down the device
* @netdev: device to be broght down
*/
-void et131x_down(struct net_device *netdev)
+static void et131x_down(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4475,8 +4461,6 @@ static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
#define ET131X_PM_OPS NULL
#endif
-/* ISR functions */
-
/**
* et131x_isr - The Interrupt Service Routine for the driver.
* @irq: the IRQ on which the interrupt was received.
@@ -4573,7 +4557,7 @@ out:
* scheduled to run in a deferred context by the ISR. This is where the ISR's
* work actually gets done.
*/
-void et131x_isr_handler(struct work_struct *work)
+static void et131x_isr_handler(struct work_struct *work)
{
struct et131x_adapter *adapter =
container_of(work, struct et131x_adapter, task);
@@ -4774,8 +4758,6 @@ void et131x_isr_handler(struct work_struct *work)
et131x_enable_interrupts(adapter);
}
-/* NETDEV functions */
-
/**
* et131x_stats - Return the current device statistics.
* @netdev: device whose stats are being queried
@@ -4829,10 +4811,12 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_open(struct net_device *netdev)
+static int et131x_open(struct net_device *netdev)
{
- int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int irq = pdev->irq;
+ int result;
/* Start the timer to track NIC errors */
init_timer(&adapter->error_timer);
@@ -4841,12 +4825,9 @@ int et131x_open(struct net_device *netdev)
adapter->error_timer.data = (unsigned long)adapter;
add_timer(&adapter->error_timer);
- /* Register our IRQ */
- result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
- netdev->name, netdev);
+ result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev);
if (result) {
- dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
- netdev->irq);
+ dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
return result;
}
@@ -4863,14 +4844,14 @@ int et131x_open(struct net_device *netdev)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_close(struct net_device *netdev)
+static int et131x_close(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
et131x_down(netdev);
adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
- free_irq(netdev->irq, netdev);
+ free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
return del_timer_sync(&adapter->error_timer);
@@ -4905,8 +4886,8 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
*/
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
+ int filter = adapter->packet_filter;
int status = 0;
- uint32_t filter = adapter->packet_filter;
u32 ctrl;
u32 pf_ctrl;
@@ -4968,7 +4949,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- uint32_t packet_filter = 0;
+ int packet_filter;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
@@ -5249,40 +5230,6 @@ static const struct net_device_ops et131x_netdev_ops = {
};
/**
- * et131x_device_alloc
- *
- * Returns pointer to the allocated and initialized net_device struct for
- * this device.
- *
- * Create instances of net_device and wl_private for the new adapter and
- * register the device's entry points in the net_device structure.
- */
-struct net_device *et131x_device_alloc(void)
-{
- struct net_device *netdev;
-
- /* Alloc net_device and adapter structs */
- netdev = alloc_etherdev(sizeof(struct et131x_adapter));
-
- if (!netdev) {
- printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
- return NULL;
- }
-
- /*
- * Setup the function registration table (and other data) for a
- * net_device
- */
- netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
- netdev->netdev_ops = &et131x_netdev_ops;
-
- /* Poll? */
- /* netdev->poll = &et131x_poll; */
- /* netdev->poll_controller = &et131x_poll_controller; */
- return netdev;
-}
-
-/**
* et131x_pci_setup - Perform device initialization
* @pdev: a pointer to the device's pci_dev structure
* @ent: this device's entry in the pci_device_id table
@@ -5297,24 +5244,26 @@ struct net_device *et131x_device_alloc(void)
static int __devinit et131x_pci_setup(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int result;
struct net_device *netdev;
struct et131x_adapter *adapter;
+ int rc;
int ii;
- result = pci_enable_device(pdev);
- if (result) {
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "pci_enable_device() failed\n");
- goto err_out;
+ goto out;
}
/* Perform some basic PCI checks */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Can't find PCI device's base address\n");
+ rc = -ENODEV;
goto err_disable;
}
- if (pci_request_regions(pdev, DRIVER_NAME)) {
+ rc = pci_request_regions(pdev, DRIVER_NAME);
+ if (rc < 0) {
dev_err(&pdev->dev, "Can't get PCI resources\n");
goto err_disable;
}
@@ -5323,46 +5272,50 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
/* Check the DMA addressing support of this device */
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (result) {
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc < 0) {
dev_err(&pdev->dev,
"Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (result) {
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc < 0) {
dev_err(&pdev->dev,
"Unable to obtain 32 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- result = -EIO;
+ rc = -EIO;
goto err_release_res;
}
/* Allocate netdev and private adapter structs */
- netdev = et131x_device_alloc();
+ netdev = alloc_etherdev(sizeof(struct et131x_adapter));
if (!netdev) {
dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
- result = -ENOMEM;
+ rc = -ENOMEM;
goto err_release_res;
}
+ netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
+ netdev->netdev_ops = &et131x_netdev_ops;
+
SET_NETDEV_DEV(netdev, &pdev->dev);
et131x_set_ethtool_ops(netdev);
adapter = et131x_adapter_init(netdev, pdev);
- /* Initialise the PCI setup for the device */
- et131x_pci_init(adapter, pdev);
+ rc = et131x_pci_init(adapter, pdev);
+ if (rc < 0)
+ goto err_free_dev;
/* Map the bus-relative registers to system virtual memory */
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "Cannot map device registers\n");
- result = -ENOMEM;
+ rc = -ENOMEM;
goto err_free_dev;
}
@@ -5376,8 +5329,8 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
et131x_disable_interrupts(adapter);
/* Allocate DMA memory */
- result = et131x_adapter_memory_alloc(adapter);
- if (result) {
+ rc = et131x_adapter_memory_alloc(adapter);
+ if (rc < 0) {
dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
goto err_iounmap;
}
@@ -5395,6 +5348,8 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
adapter->boot_coma = 0;
et1310_disable_phy_coma(adapter);
+ rc = -ENOMEM;
+
/* Setup the mii_bus struct */
adapter->mii_bus = mdiobus_alloc();
if (!adapter->mii_bus) {
@@ -5418,13 +5373,14 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
for (ii = 0; ii < PHY_MAX_ADDR; ii++)
adapter->mii_bus->irq[ii] = PHY_POLL;
- if (mdiobus_register(adapter->mii_bus)) {
+ rc = mdiobus_register(adapter->mii_bus);
+ if (rc < 0) {
dev_err(&pdev->dev, "failed to register MII bus\n");
- mdiobus_free(adapter->mii_bus);
goto err_mdio_free_irq;
}
- if (et131x_mii_probe(netdev)) {
+ rc = et131x_mii_probe(netdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "failed to probe MII bus\n");
goto err_mdio_unregister;
}
@@ -5440,10 +5396,10 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
*/
/* Register the net_device struct with the Linux network layer */
- result = register_netdev(netdev);
- if (result != 0) {
+ rc = register_netdev(netdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "register_netdev() failed\n");
- goto err_mdio_unregister;
+ goto err_phy_disconnect;
}
/* Register the net_device struct with the PCI subsystem. Save a copy
@@ -5451,10 +5407,11 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
* been initialized, just in case it needs to be quickly restored.
*/
pci_set_drvdata(pdev, netdev);
- pci_save_state(adapter->pdev);
-
- return result;
+out:
+ return rc;
+err_phy_disconnect:
+ phy_disconnect(adapter->phydev);
err_mdio_unregister:
mdiobus_unregister(adapter->mii_bus);
err_mdio_free_irq:
@@ -5472,8 +5429,7 @@ err_release_res:
pci_release_regions(pdev);
err_disable:
pci_disable_device(pdev);
-err_out:
- return result;
+ goto out;
}
static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index c263284ddc0e..cf47a5d191fc 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -199,7 +199,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
unsigned long temp; \
- if (strict_strtoul(buf, 10, &temp)) \
+ if (kstrtoul(buf, 10, &temp)) \
return -EINVAL; \
t->value = temp; \
return count; \
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index b3d743a3d308..917bbb082a6e 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -344,10 +344,10 @@ static void ft1000_reset_asic(struct net_device *dev)
}
mdelay(1);
if (info->AsicID == ELECTRABUZZ_ID) {
- // set watermark to -1 in order to not generate an interrrupt
+ // set watermark to -1 in order to not generate an interrupt
ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
} else {
- // set watermark to -1 in order to not generate an interrrupt
+ // set watermark to -1 in order to not generate an interrupt
ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
}
// clear interrupts
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index aaf44c359827..43b1d363107e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -601,7 +601,7 @@ static void ft1000_reset_asic(struct net_device *dev)
mdelay(1);
- /* set watermark to -1 in order to not generate an interrrupt */
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_MAG_WATERMARK);
/* clear interrupts */
diff --git a/drivers/staging/gma500/Kconfig b/drivers/staging/gma500/Kconfig
index bfe2166acda6..c7a2b3bc0a18 100644
--- a/drivers/staging/gma500/Kconfig
+++ b/drivers/staging/gma500/Kconfig
@@ -1,6 +1,6 @@
config DRM_PSB
tristate "Intel GMA5/600 KMS Framebuffer"
- depends on DRM && PCI && X86
+ depends on DRM && PCI && X86 && BROKEN
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/staging/gma500/accel_2d.c
index 114b99a1ce19..b8f78ebbb145 100644
--- a/drivers/staging/gma500/accel_2d.c
+++ b/drivers/staging/gma500/accel_2d.c
@@ -253,7 +253,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
return;
offset = psbfb->gtt->offset;
- stride = fb->pitch;
+ stride = fb->pitches[0];
switch (fb->depth) {
case 8:
diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
index 7b97c600eff0..c63a32776a9e 100644
--- a/drivers/staging/gma500/cdv_intel_display.c
+++ b/drivers/staging/gma500/cdv_intel_display.c
@@ -507,9 +507,9 @@ int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
if (ret < 0)
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
index 3f39a37456fc..b00761cba144 100644
--- a/drivers/staging/gma500/framebuffer.c
+++ b/drivers/staging/gma500/framebuffer.c
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -273,14 +274,17 @@ static struct fb_ops psbfb_unaccel_ops = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct psb_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
+ u32 bpp, depth;
int ret;
- if (mode_cmd->pitch & 63)
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (mode_cmd->bpp) {
+ switch (bpp) {
case 8:
case 16:
case 24:
@@ -313,7 +317,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *psb_framebuffer_create
(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
struct psb_framebuffer *fb;
@@ -387,27 +391,28 @@ static int psbfb_create(struct psb_fbdev *fbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct device *device = &dev->pdev->dev;
int size;
int ret;
struct gtt_range *backing;
int gtt_roll = 1;
+ u32 bpp, depth;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
+ bpp = sizes->surface_bpp;
/* No 24bit packed */
- if (mode_cmd.bpp == 24)
- mode_cmd.bpp = 32;
+ if (bpp == 24)
+ bpp = 32;
/* Acceleration via the GTT requires pitch to be 4096 byte aligned
(ie 1024 or 2048 pixels in normal use) */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096);
+ depth = sizes->surface_depth;
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
@@ -421,10 +426,10 @@ static int psbfb_create(struct psb_fbdev *fbdev,
gtt_roll = 0; /* Don't use GTT accelerated scrolling */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+ depth = sizes->surface_depth;
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page
@@ -443,6 +448,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
}
info->par = fbdev;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
if (ret)
goto out_unref;
@@ -504,7 +511,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
@@ -546,7 +553,7 @@ out_err1:
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
- struct drm_mode_fb_cmd *cmd)
+ struct drm_mode_fb_cmd2 *cmd)
{
struct gtt_range *r;
struct drm_gem_object *obj;
@@ -555,7 +562,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
* Find the GEM object and thus the gtt range object that is
* to back this space
*/
- obj = drm_gem_object_lookup(dev, filp, cmd->handle);
+ obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
index 8eb827ecc3d3..0b37b7b6b02a 100644
--- a/drivers/staging/gma500/mdfld_intel_display.c
+++ b/drivers/staging/gma500/mdfld_intel_display.c
@@ -390,9 +390,9 @@ int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_f
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
index c9311a573c28..980837e37d80 100644
--- a/drivers/staging/gma500/mrst_crtc.c
+++ b/drivers/staging/gma500/mrst_crtc.c
@@ -543,9 +543,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
index 436fe9733b16..408257038335 100644
--- a/drivers/staging/gma500/power.c
+++ b/drivers/staging/gma500/power.c
@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
ret = gma_resume_pci(dev->pdev);
if (ret == 0) {
/* FIXME: we want to defer this for Medfield/Oaktrail */
- gma_resume_display(dev);
+ gma_resume_display(dev->pdev);
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
pm_runtime_get(&dev->pdev->dev);
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 986a04d16ba8..95816808f867 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -1151,6 +1151,17 @@ static struct vm_operations_struct psb_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static const struct file_operations gma500_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = psb_unlocked_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
@@ -1179,17 +1190,7 @@ static struct drm_driver driver = {
.dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt,
.dumb_destroy = psb_gem_dumb_destroy,
-
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = psb_unlocked_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
- },
+ .fops = &gma500_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = PSB_DRM_DRIVER_DATE,
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
index caa9d86f26d8..85659613ae62 100644
--- a/drivers/staging/gma500/psb_intel_display.c
+++ b/drivers/staging/gma500/psb_intel_display.c
@@ -367,9 +367,9 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
index 072185ebe95b..60ac479a2909 100644
--- a/drivers/staging/hv/Kconfig
+++ b/drivers/staging/hv/Kconfig
@@ -3,15 +3,3 @@ config HYPERV_STORAGE
depends on HYPERV && SCSI
help
Select this option to enable the Hyper-V virtual storage driver.
-
-config HYPERV_NET
- tristate "Microsoft Hyper-V virtual network driver"
- depends on HYPERV && NET
- help
- Select this option to enable the Hyper-V virtual network driver.
-
-config HYPERV_MOUSE
- tristate "Microsoft Hyper-V mouse driver"
- depends on HYPERV && HID
- help
- Select this option to enable the Hyper-V mouse driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index 0f55ceee919b..af95a6b7e436 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -1,6 +1,3 @@
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
-obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-obj-$(CONFIG_HYPERV_MOUSE) += hv_mouse.o
hv_storvsc-y := storvsc_drv.o
-hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
index ed4d63619df2..dea7d92dfdc1 100644
--- a/drivers/staging/hv/TODO
+++ b/drivers/staging/hv/TODO
@@ -1,7 +1,5 @@
TODO:
- - audit the network driver
- audit the scsi driver
Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
-Hank Janssen <hjanssen@microsoft.com>, Haiyang Zhang <haiyangz@microsoft.com>,
-K. Y. Srinivasan <kys@microsoft.com>
+Haiyang Zhang <haiyangz@microsoft.com>, and K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index ae8c33e7849c..eb853f71089a 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/hyperv.h>
+#include <linux/mempool.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -42,6 +43,7 @@
#include <scsi/scsi_dbg.h>
+#define STORVSC_MIN_BUF_NR 64
#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
@@ -78,7 +80,7 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
/* V1 Beta 0.1 */
/* V1 RC < 2008/1/31 1.0 */
/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(2, 0)
+#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
@@ -104,7 +106,8 @@ enum vstor_packet_operation {
VSTOR_OPERATION_END_INITIALIZATION = 8,
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
VSTOR_OPERATION_QUERY_PROPERTIES = 10,
- VSTOR_OPERATION_MAXIMUM = 10
+ VSTOR_OPERATION_ENUMERATE_BUS = 11,
+ VSTOR_OPERATION_MAXIMUM = 11
};
/*
@@ -234,8 +237,6 @@ struct vstor_packet {
#define STORVSC_MAX_CHANNELS 1
#define STORVSC_MAX_CMD_LEN 16
-struct hv_storvsc_request;
-
/* Matches Windows-end */
enum storvsc_request_type {
WRITE_TYPE,
@@ -284,9 +285,13 @@ struct storvsc_device {
struct hv_storvsc_request reset_request;
};
+struct stor_mem_pools {
+ struct kmem_cache *request_pool;
+ mempool_t *request_mempool;
+};
+
struct hv_host_device {
struct hv_device *dev;
- struct kmem_cache *request_pool;
unsigned int port;
unsigned char path;
unsigned char target;
@@ -302,6 +307,51 @@ struct storvsc_cmd_request {
struct hv_storvsc_request request;
};
+struct storvsc_scan_work {
+ struct work_struct work;
+ struct Scsi_Host *host;
+ uint lun;
+};
+
+static void storvsc_bus_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ int id, order_id;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ for (id = 0; id < wrk->host->max_id; ++id) {
+ if (wrk->host->reverse_ordering)
+ order_id = wrk->host->max_id - id - 1;
+ else
+ order_id = id;
+
+ scsi_scan_target(&wrk->host->shost_gendev, 0,
+ order_id, SCAN_WILD_CARD, 1);
+ }
+ kfree(wrk);
+}
+
+static void storvsc_remove_lun(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ if (!scsi_host_get(wrk->host))
+ goto done;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ scsi_host_put(wrk->host);
+
+done:
+ kfree(wrk);
+}
+
static inline struct storvsc_device *get_out_stor_device(
struct hv_device *device)
{
@@ -549,11 +599,25 @@ static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
+ struct storvsc_scan_work *work;
+ struct storvsc_device *stor_device;
+
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(device, vstor_packet, request);
break;
+
case VSTOR_OPERATION_REMOVE_DEVICE:
+ case VSTOR_OPERATION_ENUMERATE_BUS:
+ stor_device = get_in_stor_device(device);
+ work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, storvsc_bus_scan);
+ work->host = stor_device->host;
+ schedule_work(&work->work);
+ break;
default:
break;
@@ -729,19 +793,48 @@ static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
- /*
- * This enables luns to be located sparsely. Otherwise, we may not
- * discovered them.
- */
- sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
+ struct stor_mem_pools *memp;
+ int number = STORVSC_MIN_BUF_NR;
+
+ memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
+ if (!memp)
+ return -ENOMEM;
+
+ memp->request_pool =
+ kmem_cache_create(dev_name(&sdevice->sdev_dev),
+ sizeof(struct storvsc_cmd_request), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+
+ if (!memp->request_pool)
+ goto err0;
+
+ memp->request_mempool = mempool_create(number, mempool_alloc_slab,
+ mempool_free_slab,
+ memp->request_pool);
+
+ if (!memp->request_mempool)
+ goto err1;
+
+ sdevice->hostdata = memp;
+
return 0;
+
+err1:
+ kmem_cache_destroy(memp->request_pool);
+
+err0:
+ kfree(memp);
+ return -ENOMEM;
}
-static int storvsc_merge_bvec(struct request_queue *q,
- struct bvec_merge_data *bmd, struct bio_vec *bvec)
+static void storvsc_device_destroy(struct scsi_device *sdevice)
{
- /* checking done by caller. */
- return bvec->bv_len;
+ struct stor_mem_pools *memp = sdevice->hostdata;
+
+ mempool_destroy(memp->request_mempool);
+ kmem_cache_destroy(memp->request_pool);
+ kfree(memp);
+ sdevice->hostdata = NULL;
}
static int storvsc_device_configure(struct scsi_device *sdevice)
@@ -751,8 +844,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
- blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
-
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
return 0;
@@ -802,12 +893,14 @@ static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
unsigned int sg_count,
- unsigned int len)
+ unsigned int len,
+ int write)
{
int i;
int num_pages;
struct scatterlist *bounce_sgl;
struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
@@ -819,7 +912,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
goto cleanup;
- sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
}
return bounce_sgl;
@@ -833,7 +926,8 @@ cleanup:
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count)
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
{
int i;
int j = 0;
@@ -874,6 +968,24 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
kunmap_atomic((void *)bounce_addr, KM_IRQ0);
j++;
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
+ orig_sgl[i].offset),
+ KM_IRQ0);
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
/* if we need to use another bounce buffer */
if (destlen || i != orig_sgl_count - 1)
bounce_addr =
@@ -965,18 +1077,13 @@ static int storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)host->hostdata;
scsi_remove_host(host);
scsi_host_put(host);
storvsc_dev_remove(dev);
- if (host_dev->request_pool) {
- kmem_cache_destroy(host_dev->request_pool);
- host_dev->request_pool = NULL;
- }
+
return 0;
}
@@ -1014,7 +1121,7 @@ static int storvsc_host_reset(struct hv_device *device)
stor_device = get_out_stor_device(device);
if (!stor_device)
- return -ENODEV;
+ return FAILED;
request = &stor_device->reset_request;
vstor_packet = &request->vstor_packet;
@@ -1031,13 +1138,11 @@ static int storvsc_host_reset(struct hv_device *device)
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
- goto cleanup;
+ return FAILED;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (t == 0)
+ return TIMEOUT_ERROR;
/*
@@ -1045,8 +1150,7 @@ static int storvsc_host_reset(struct hv_device *device)
* should have been flushed out and return to us
*/
-cleanup:
- return ret;
+ return SUCCESS;
}
@@ -1055,16 +1159,10 @@ cleanup:
*/
static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
{
- int ret;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
struct hv_device *dev = host_dev->dev;
- ret = storvsc_host_reset(dev);
- if (ret != 0)
- return ret;
-
- return ret;
+ return storvsc_host_reset(dev);
}
@@ -1076,21 +1174,22 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
struct storvsc_cmd_request *cmd_request =
(struct storvsc_cmd_request *)request->context;
struct scsi_cmnd *scmnd = cmd_request->cmd;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
+ struct storvsc_scan_work *wrk;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
vm_srb = &request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
- if (vm_srb->data_in == READ_TYPE) {
+ if (vm_srb->data_in == READ_TYPE)
copy_from_bounce_buffer(scsi_sglist(scmnd),
cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
- destroy_bounce_buffer(cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- }
}
/*
@@ -1103,6 +1202,29 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
else
scmnd->result = vm_srb->scsi_status;
+ /*
+ * If the LUN is invalid; remove the device.
+ */
+ if (vm_srb->srb_status == 0x20) {
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ struct Scsi_Host *host;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ wrk = kmalloc(sizeof(struct storvsc_scan_work),
+ GFP_ATOMIC);
+ if (!wrk) {
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ } else {
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, storvsc_remove_lun);
+ schedule_work(&wrk->work);
+ }
+ }
+
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
@@ -1120,7 +1242,7 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
scsi_done_fn(scmnd);
- kmem_cache_free(host_dev->request_pool, cmd_request);
+ mempool_free(cmd_request, memp->request_mempool);
}
static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
@@ -1131,7 +1253,7 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
switch (scsi_op) {
/* smartd sends this command, which will offline the device */
case SET_WINDOW:
- scmnd->result = DID_ERROR << 16;
+ scmnd->result = ILLEGAL_REQUEST << 16;
allowed = false;
break;
default:
@@ -1143,12 +1265,10 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
/*
* storvsc_queuecommand - Initiate command processing
*/
-static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
- void (*done)(struct scsi_cmnd *))
+static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
{
int ret;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct hv_storvsc_request *request;
struct storvsc_cmd_request *cmd_request;
@@ -1157,9 +1277,10 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
if (storvsc_check_scsi_cmd(scmnd) == false) {
- done(scmnd);
+ scmnd->scsi_done(scmnd);
return 0;
}
@@ -1172,16 +1293,14 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
goto retry_request;
}
- scmnd->scsi_done = done;
-
request_size = sizeof(struct storvsc_cmd_request);
- cmd_request = kmem_cache_zalloc(host_dev->request_pool,
+ cmd_request = mempool_alloc(memp->request_mempool,
GFP_ATOMIC);
- if (!cmd_request) {
- scmnd->scsi_done = NULL;
+ if (!cmd_request)
return SCSI_MLQUEUE_DEVICE_BUSY;
- }
+
+ memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
/* Setup the cmd request */
cmd_request->bounce_sgl_count = 0;
@@ -1231,12 +1350,12 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
cmd_request->bounce_sgl =
create_bounce_buffer(sgl, scsi_sg_count(scmnd),
- scsi_bufflen(scmnd));
+ scsi_bufflen(scmnd),
+ vm_srb->data_in);
if (!cmd_request->bounce_sgl) {
- scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
- kmem_cache_free(host_dev->request_pool,
- cmd_request);
+ mempool_free(cmd_request,
+ memp->request_mempool);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1278,9 +1397,8 @@ retry_request:
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- kmem_cache_free(host_dev->request_pool, cmd_request);
+ mempool_free(cmd_request, memp->request_mempool);
- scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
ret = SCSI_MLQUEUE_DEVICE_BUSY;
@@ -1289,9 +1407,6 @@ retry_request:
return ret;
}
-static DEF_SCSI_QCMD(storvsc_queuecommand)
-
-
/* Scsi driver */
static struct scsi_host_template scsi_driver = {
.module = THIS_MODULE,
@@ -1300,6 +1415,7 @@ static struct scsi_host_template scsi_driver = {
.queuecommand = storvsc_queuecommand,
.eh_host_reset_handler = storvsc_host_reset_handler,
.slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 1,
/* 64 max_queue * 1 target */
@@ -1308,14 +1424,7 @@ static struct scsi_host_template scsi_driver = {
/* no use setting to 0 since ll_blk_rw reset it to 1 */
/* currently 32 */
.sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
- /*
- * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
- * into 1 sg element. If set, we must limit the max_segment_size to
- * PAGE_SIZE, otherwise we may get 1 sg element that represents
- * multiple
- */
- /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
- .use_clustering = ENABLE_CLUSTERING,
+ .use_clustering = DISABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
};
@@ -1360,27 +1469,17 @@ static int storvsc_probe(struct hv_device *device,
if (!host)
return -ENOMEM;
- host_dev = (struct hv_host_device *)host->hostdata;
+ host_dev = shost_priv(host);
memset(host_dev, 0, sizeof(struct hv_host_device));
host_dev->port = host->host_no;
host_dev->dev = device;
- host_dev->request_pool =
- kmem_cache_create(dev_name(&device->device),
- sizeof(struct storvsc_cmd_request), 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (!host_dev->request_pool) {
- scsi_host_put(host);
- return -ENOMEM;
- }
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
if (!stor_device) {
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_out0;
}
stor_device->destroy = false;
@@ -1391,12 +1490,8 @@ static int storvsc_probe(struct hv_device *device,
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
- if (ret) {
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- kfree(stor_device);
- return ret;
- }
+ if (ret)
+ goto err_out1;
if (dev_is_ide)
storvsc_get_ide_info(device, &target, &path);
@@ -1416,7 +1511,7 @@ static int storvsc_probe(struct hv_device *device,
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
- goto err_out;
+ goto err_out2;
if (!dev_is_ide) {
scsi_scan_host(host);
@@ -1425,21 +1520,32 @@ static int storvsc_probe(struct hv_device *device,
ret = scsi_add_device(host, 0, target, 0);
if (ret) {
scsi_remove_host(host);
- goto err_out;
+ goto err_out2;
}
return 0;
-err_out:
+err_out2:
+ /*
+ * Once we have connected with the host, we would need to
+ * to invoke storvsc_dev_remove() to rollback this state and
+ * this call also frees up the stor_device; hence the jump around
+ * err_out1 label.
+ */
storvsc_dev_remove(device);
- kmem_cache_destroy(host_dev->request_pool);
+ goto err_out0;
+
+err_out1:
+ kfree(stor_device);
+
+err_out0:
scsi_host_put(host);
- return -ENODEV;
+ return ret;
}
/* The one and only one */
static struct hv_driver storvsc_drv = {
- .name = "storvsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
diff --git a/drivers/staging/iio/Documentation/generic_buffer.c b/drivers/staging/iio/Documentation/generic_buffer.c
index d58095321491..69a05b9456d6 100644
--- a/drivers/staging/iio/Documentation/generic_buffer.c
+++ b/drivers/staging/iio/Documentation/generic_buffer.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <string.h>
#include <poll.h>
+#include <endian.h>
#include "iio_utils.h"
/**
@@ -56,6 +57,13 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
void print2byte(int input, struct iio_channel_info *info)
{
+ /* First swap if incorrect endian */
+
+ if (info->be)
+ input = be16toh((uint_16t)input);
+ else
+ input = le16toh((uint_16t)input);
+
/* shift before conversion to avoid sign extension
of left aligned data */
input = input >> info->shift;
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 75938b2412ff..6f3a392297ec 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -13,6 +13,7 @@
#include <ctype.h>
#include <stdio.h>
#include <stdint.h>
+#include <dirent.h>
#define IIO_MAX_NAME_LENGTH 30
@@ -73,6 +74,7 @@ struct iio_channel_info {
unsigned bits_used;
unsigned shift;
uint64_t mask;
+ unsigned be;
unsigned is_signed;
unsigned enabled;
unsigned location;
@@ -83,6 +85,7 @@ struct iio_channel_info {
* @is_signed: output whether channel is signed
* @bytes: output how many bytes the channel storage occupies
* @mask: output a bit mask for the raw data
+ * @be: big endian
* @device_dir: the iio device directory
* @name: the channel name
* @generic_name: the channel type name
@@ -92,6 +95,7 @@ inline int iioutils_get_type(unsigned *is_signed,
unsigned *bits_used,
unsigned *shift,
uint64_t *mask,
+ unsigned *be,
const char *device_dir,
const char *name,
const char *generic_name)
@@ -100,7 +104,7 @@ inline int iioutils_get_type(unsigned *is_signed,
int ret;
DIR *dp;
char *scan_el_dir, *builtname, *builtname_generic, *filename = 0;
- char signchar;
+ char signchar, endianchar;
unsigned padint;
const struct dirent *ent;
@@ -144,9 +148,18 @@ inline int iioutils_get_type(unsigned *is_signed,
ret = -errno;
goto error_free_filename;
}
- fscanf(sysfsfp,
- "%c%u/%u>>%u", &signchar, bits_used,
- &padint, shift);
+
+ ret = fscanf(sysfsfp,
+ "%ce:%c%u/%u>>%u",
+ &endianchar,
+ &signchar,
+ bits_used,
+ &padint, shift);
+ if (ret < 0) {
+ printf("failed to pass scan type description\n");
+ return ret;
+ }
+ *be = (endianchar == 'b');
*bytes = padint / 8;
if (*bits_used == 64)
*mask = ~0;
@@ -156,6 +169,10 @@ inline int iioutils_get_type(unsigned *is_signed,
*is_signed = 1;
else
*is_signed = 0;
+ fclose(sysfsfp);
+ free(filename);
+
+ filename = 0;
}
error_free_filename:
if (filename)
@@ -386,6 +403,7 @@ inline int build_channel_array(const char *device_dir,
&current->bits_used,
&current->shift,
&current->mask,
+ &current->be,
device_dir,
current->name,
current->generic_name);
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
index 7e99ef2b7bc0..e33807761cd7 100644
--- a/drivers/staging/iio/Documentation/ring.txt
+++ b/drivers/staging/iio/Documentation/ring.txt
@@ -23,10 +23,6 @@ The function pointers within here are used to allow the core to handle
as much buffer functionality as possible. Note almost all of these
are optional.
-mark_in_use, unmark_in_use
- Basically indicate that not changes should be made to the buffer state that
- will effect the form of the data being captures (e.g. scan elements or length)
-
store_to
If possible, push data to the buffer.
@@ -39,8 +35,6 @@ rip_first_n
The primary buffer reading function. Note that it may well not return
as much data as requested.
-mark_param_changed
- Used to indicate that something has changed. Used in conjunction with
request_update
If parameters have changed that require reinitialization or configuration of
the buffer this will trigger it.
@@ -51,7 +45,3 @@ get_bytes_per_datum, set_bytes_per_datum
get_length / set_length
Get/set the number of complete scans that may be held by the buffer.
-is_enabled
- Query if ring buffer is in use
-enable
- Start the ring buffer.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio b/drivers/staging/iio/Documentation/sysfs-bus-iio
index 0d6823d19b61..46a995d6d261 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio
@@ -276,6 +276,16 @@ Description:
If a discrete set of scale values are available, they
are listed in this attribute.
+What: /sys/.../in_accel_filter_low_pass_3db_frequency
+What: /sys/.../in_magn_filter_low_pass_3db_frequency
+What: /sys/.../in_anglvel_filter_low_pass_3db_frequency
+KernelVersion: 3.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ If a known or controllable low pass filter is applied
+ to the underlying data channel, then this parameter
+ gives the 3dB frequency of the filter in Hz.
+
What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 4ec9118955f9..90162aa8b2df 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -76,7 +76,6 @@ config IIO_DUMMY_EVGEN
config IIO_SIMPLE_DUMMY
tristate "An example driver with no hardware requirements"
- select IIO_SIMPLE_DUMMY_EVGEN if IIO_SIMPLE_DUMMY_EVENTS
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 97f747eac647..d439e45d07fa 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16201.h"
@@ -322,8 +322,7 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -348,10 +347,10 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -388,7 +387,7 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -408,36 +407,36 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16201_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16201_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16201_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16201_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16201_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16201_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16201_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_y, ADIS16201_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(7)
@@ -554,3 +553,4 @@ module_spi_driver(adis16201_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 0016ed378e3a..26c610faee3f 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -74,11 +74,11 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16201_read_ring_data(indio_dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)
+ && adis16201_read_ring_data(indio_dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -116,11 +116,9 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
}
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
- ring->bpe = 2;
ring->scan_timestamp = true;
ring->access = &ring_sw_access_funcs;
- ring->setup_ops = &adis16201_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16201_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16201_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index a6d6d27f3c97..1a5140f9e3f4 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16203.h"
@@ -329,8 +329,7 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -350,10 +349,10 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
bits = 14;
mutex_lock(&indio_dev->mlock);
addr = adis16203_addresses[chan->address][1];
@@ -374,26 +373,26 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16203_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16203_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16203_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16203_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
/* Fixme: Not what it appears to be - see data sheet */
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_y, ADIS16203_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16203_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
@@ -509,3 +508,4 @@ module_spi_driver(adis16203_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16203");
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 1fdfe6f6ac6e..064640d15e41 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -74,11 +74,11 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
+ adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -118,11 +118,9 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
}
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
- ring->bpe = 2;
ring->scan_timestamp = true;
ring->access = &ring_sw_access_funcs;
- ring->setup_ops = &adis16203_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16203_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16203_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 7ac5b4c533d8..fa89364b841e 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16204.h"
@@ -366,7 +366,7 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -390,12 +390,12 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
- if (mask == (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE)) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ case IIO_CHAN_INFO_PEAK:
+ if (mask == IIO_CHAN_INFO_CALIBBIAS) {
bits = 12;
addrind = 1;
} else { /* PEAK_SEPARATE */
@@ -428,7 +428,7 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -445,28 +445,28 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16204_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 0, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16204_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16204_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16204_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
accel_x, ADIS16204_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
accel_y, ADIS16204_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
@@ -582,3 +582,4 @@ module_spi_driver(adis16204_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 6fd3d8f51f2c..4081179dfa5c 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -71,11 +71,11 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
+ adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -114,10 +114,8 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16204_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16204_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16204_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index c03afbf5bbdc..a98715f6bd6d 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -18,7 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16209.h"
@@ -304,7 +304,7 @@ static int adis16209_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
case IIO_INCLI:
@@ -355,8 +355,7 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -381,10 +380,10 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 14;
@@ -410,34 +409,34 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16209_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16209_SCAN_SUPPLY,
IIO_ST('u', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16209_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16209_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16209_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16209_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_x, ADIS16209_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_y, ADIS16209_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ROT, 0, 1, 0, NULL, 0, IIO_MOD_X,
@@ -558,3 +557,4 @@ module_spi_driver(adis16209_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16209");
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index d17e39d95459..2a6fd334f5f1 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -72,9 +72,10 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -114,10 +115,8 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16209_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16209_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16209_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 73298e7849e6..51a852d45482 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -167,9 +167,9 @@ static ssize_t adis16220_write_16bit(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = adis16220_spi_write_reg_16(indio_dev, this_attr->address, val);
@@ -510,17 +510,17 @@ static int adis16220_read_raw(struct iio_dev *indio_dev,
case 0:
addrind = 0;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP) {
*val = 25;
return IIO_VAL_INT;
}
addrind = 1;
break;
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ case IIO_CHAN_INFO_PEAK:
addrind = 2;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
switch (chan->type) {
case IIO_TEMP:
@@ -575,27 +575,27 @@ static const struct iio_chan_spec adis16220_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
}, {
.type = IIO_ACCEL,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
.address = accel,
}, {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_1,
}, {
.type = IIO_VOLTAGE,
@@ -713,3 +713,4 @@ module_spi_driver(adis16220_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 88881b9919ef..17f77fef7f2b 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -21,7 +21,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16240.h"
@@ -389,8 +389,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -411,14 +410,14 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_PEAK_SCALE_SHARED):
+ case IIO_CHAN_INFO_PEAK_SCALE:
*val = 6;
*val2 = 629295;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
bits = 10;
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][1];
@@ -432,7 +431,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ case IIO_CHAN_INFO_PEAK:
bits = 10;
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][2];
@@ -460,7 +459,7 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
val16 = val & ((1 << bits) - 1);
addr = adis16240_addresses[chan->address][1];
return adis16240_spi_write_reg_16(indio_dev, addr, val16);
@@ -470,7 +469,7 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16240_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16240_SCAN_SUPPLY,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
@@ -478,22 +477,22 @@ static struct iio_chan_spec adis16240_channels[] = {
in_aux, ADIS16240_SCAN_AUX_ADC,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16240_SCAN_ACC_X,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16240_SCAN_ACC_Y,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_z, ADIS16240_SCAN_ACC_Z,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
temp, ADIS16240_SCAN_TEMP,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(6)
@@ -611,3 +610,4 @@ module_spi_driver(adis16240_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16240");
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index b907ca3f4fdf..e23622d96f9f 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -69,9 +69,10 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16240_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -111,10 +112,8 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16240_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16240_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16240_trigger_handler,
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index cfce21c2eddc..d13d7215ff6e 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -140,7 +140,7 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
{
int ret = -EINVAL;
- if (mask == (1 << IIO_CHAN_INFO_SCALE_SHARED)) {
+ if (mask == IIO_CHAN_INFO_SCALE) {
/* Check no integer component */
if (val)
return -EINVAL;
@@ -164,7 +164,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
goto error_ret;
*val = ret;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret)
goto error_ret;
@@ -181,7 +181,7 @@ error_ret:
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = 1 << IIO_CHAN_INFO_SCALE_SHARED, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = KXSD9_REG_##axis, \
}
@@ -268,8 +268,10 @@ static int __devexit kxsd9_remove(struct spi_device *spi)
}
static const struct spi_device_id kxsd9_id[] = {
- {"kxsd9", 0}
+ {"kxsd9", 0},
+ { },
};
+MODULE_DEVICE_TABLE(spi, kxsd9_id);
static struct spi_driver kxsd9_driver = {
.driver = {
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 7237a9ab61a8..2db383fc2743 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -181,11 +181,6 @@ int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val);
-
-
int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
@@ -212,13 +207,6 @@ static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
}
-static inline ssize_t
-lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val)
-{
- return 0;
-}
static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 6877521ec173..376da5137967 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -25,7 +25,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "lis3l02dq.h"
@@ -226,14 +227,14 @@ static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
u8 uval;
s8 sval;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if (val > 255 || val < -256)
return -EINVAL;
sval = val;
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val & ~0xFF)
return -EINVAL;
uval = val;
@@ -259,23 +260,20 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
case 0:
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
- ret = lis3l02dq_read_accel_from_buffer(indio_dev->
- buffer,
- chan->scan_index,
- val);
- else {
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ } else {
reg = lis3l02dq_axis_map
[LIS3L02DQ_ACCEL][chan->address];
ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
}
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 9580;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
if (ret)
@@ -284,7 +282,7 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
*val = utemp;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
/* to match with what previous code does */
@@ -331,11 +329,11 @@ static ssize_t lis3l02dq_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- long val;
+ unsigned long val;
int ret;
u8 t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -515,9 +513,9 @@ static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
}
#define LIS3L02DQ_INFO_MASK \
- ((1 << IIO_CHAN_INFO_SCALE_SHARED) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE))
+ (IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT)
#define LIS3L02DQ_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
@@ -534,7 +532,7 @@ static struct iio_chan_spec lis3l02dq_channels[] = {
};
-static ssize_t lis3l02dq_read_event_config(struct iio_dev *indio_dev,
+static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
u64 event_code)
{
@@ -809,3 +807,4 @@ module_spi_driver(lis3l02dq_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:lis3l02dq");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 89527af8f4c5..98c5c92d3450 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -38,38 +38,6 @@ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
return IRQ_WAKE_THREAD;
}
-/**
- * lis3l02dq_read_accel_from_buffer() individual acceleration read from buffer
- **/
-ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val)
-{
- int ret;
- s16 *data;
-
- if (!iio_scan_mask_query(buffer, index))
- return -EINVAL;
-
- if (!buffer->access->read_last)
- return -EBUSY;
-
- data = kmalloc(buffer->access->get_bytes_per_datum(buffer),
- GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- ret = buffer->access->read_last(buffer, (u8 *)data);
- if (ret)
- goto error_free_data;
- *val = data[bitmap_weight(buffer->scan_mask, index)];
-error_free_data:
-
- kfree(data);
-
- return ret;
-}
-
static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
@@ -87,21 +55,21 @@ static const u8 read_all_tx_array[] = {
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
- struct iio_buffer *buffer = indio_dev->buffer;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
- xfers = kzalloc((buffer->scan_count) * 2
- * sizeof(*xfers), GFP_KERNEL);
+ xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2,
+ sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
- if (test_bit(i, buffer->scan_mask)) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
@@ -129,7 +97,8 @@ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
* values in alternate bytes
*/
spi_message_init(&msg);
- for (j = 0; j < buffer->scan_count * 2; j++)
+ for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -145,14 +114,16 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
int ret, i;
u8 *rx_array ;
s16 *data = (s16 *)buf;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- rx_array = kzalloc(4 * (indio_dev->buffer->scan_count), GFP_KERNEL);
+ rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
if (rx_array == NULL)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0)
return ret;
- for (i = 0; i < indio_dev->buffer->scan_count; i++)
+ for (i = 0; i < scan_count; i++)
data[i] = combine_8_to_16(rx_array[i*4+1],
rx_array[i*4+3]);
kfree(rx_array);
@@ -175,7 +146,7 @@ static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (buffer->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
/* Guaranteed to be aligned with 8 byte boundary */
@@ -363,17 +334,17 @@ static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
if (ret)
goto error_ret;
- if (iio_scan_mask_query(indio_dev->buffer, 0)) {
+ if (test_bit(0, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- if (iio_scan_mask_query(indio_dev->buffer, 1)) {
+ if (test_bit(1, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- if (iio_scan_mask_query(indio_dev->buffer, 2)) {
+ if (test_bit(2, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else
@@ -437,11 +408,9 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
indio_dev->buffer = buffer;
/* Effectively select the buffer implementation */
indio_dev->buffer->access = &lis3l02dq_access_funcs;
- buffer->bpe = 2;
buffer->scan_timestamp = true;
- buffer->setup_ops = &lis3l02dq_buffer_setup_ops;
- buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 6c359074a06d..49764fb7181c 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -20,7 +20,8 @@
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "sca3000.h"
@@ -381,13 +382,17 @@ sca3000_store_measurement_mode(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
int ret;
- int mask = 0x03;
- long val;
+ u8 mask = 0x03;
+ u8 val;
mutex_lock(&st->lock);
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
+ if (val > 3) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto error_ret;
@@ -424,7 +429,7 @@ static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
#define SCA3000_INFO_MASK \
- (1 << IIO_CHAN_INFO_SCALE_SHARED)
+ IIO_CHAN_INFO_SCALE_SHARED_BIT
#define SCA3000_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING))
@@ -474,7 +479,7 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
(sizeof(*val)*8 - 13);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
if (chan->type == IIO_ACCEL)
*val2 = st->info->scale;
@@ -1161,9 +1166,9 @@ static int __devinit sca3000_probe(struct spi_device *spi)
if (ret < 0)
goto error_unregister_dev;
if (indio_dev->buffer) {
- iio_scan_mask_set(indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev->buffer, 1);
- iio_scan_mask_set(indio_dev->buffer, 2);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 2);
}
if (spi->irq) {
@@ -1240,6 +1245,7 @@ static const struct spi_device_id sca3000_id[] = {
{"sca3000_e05", e05},
{}
};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
static struct spi_driver sca3000_driver = {
.driver = {
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 4a9a01dccd0c..6b824a11f7f4 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_hw.h"
#include "sca3000.h"
@@ -146,7 +146,6 @@ static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
}
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/**
@@ -158,8 +157,7 @@ static ssize_t sca3000_query_ring_int(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, val;
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
@@ -180,8 +178,7 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
@@ -222,8 +219,7 @@ static ssize_t sca3000_show_buffer_scale(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
return sprintf(buf, "0.%06d\n", 4*st->info->scale);
@@ -243,7 +239,6 @@ static IIO_DEVICE_ATTR(in_accel_scale,
*/
static struct attribute *sca3000_ring_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
&iio_dev_attr_50_percent.dev_attr.attr,
&iio_dev_attr_75_percent.dev_attr.attr,
@@ -269,7 +264,7 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
buf = &ring->buf;
buf->stufftoread = 0;
buf->attrs = &sca3000_ring_attr;
- iio_buffer_init(buf, indio_dev);
+ iio_buffer_init(buf);
return buf;
}
@@ -350,7 +345,7 @@ static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
{
- indio_dev->buffer->setup_ops = &sca3000_ring_setup_ops;
+ indio_dev->setup_ops = &sca3000_ring_setup_ops;
}
/**
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index a6a4a4e1f183..45f4504ed927 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -19,7 +19,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
@@ -453,25 +453,6 @@ out:
return ret;
}
-static int ad7192_scan_from_ring(struct ad7192_state *st, unsigned ch, int *val)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int ret;
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!(test_bit(ch, ring->scan_mask)))
- return -EBUSY;
-
- ret = ring->access->read_last(ring, (u8 *) &dat64);
- if (ret)
- return ret;
-
- *val = *dat32;
-
- return 0;
-}
-
static int ad7192_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
@@ -479,12 +460,14 @@ static int ad7192_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- d_size = ring->scan_count *
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
indio_dev->channels[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -544,7 +527,7 @@ static irqreturn_t ad7192_trigger_handler(int irq, void *p)
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
__ad7192_read_reg(st, 1, 1, AD7192_REG_DATA,
dat32,
indio_dev->channels[0].scan_type.realbits/8);
@@ -592,7 +575,7 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7192_ring_setup_ops;
+ indio_dev->setup_ops = &ad7192_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -626,6 +609,10 @@ static irqreturn_t ad7192_data_rdy_trig_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static struct iio_trigger_ops ad7192_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int ad7192_probe_trigger(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
@@ -638,7 +625,7 @@ static int ad7192_probe_trigger(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
-
+ st->trig->ops = &ad7192_trigger_ops;
ret = request_irq(st->spi->irq,
ad7192_data_rdy_trig_poll,
IRQF_TRIGGER_LOW,
@@ -650,7 +637,6 @@ static int ad7192_probe_trigger(struct iio_dev *indio_dev)
disable_irq_nosync(st->spi->irq);
st->irq_dis = true;
st->trig->dev.parent = &st->spi->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
@@ -795,7 +781,7 @@ static ssize_t ad7192_set(struct device *dev,
return -EBUSY;
}
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7192_REG_GPOCON:
if (val)
st->gpocon |= AD7192_GPOCON_BPDSW;
@@ -838,14 +824,14 @@ static struct attribute *ad7192_attributes[] = {
NULL
};
-static mode_t ad7192_attr_is_visible(struct kobject *kobj,
+static umode_t ad7192_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7192_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if ((st->devid != ID_AD7195) &&
(attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
@@ -873,8 +859,7 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7192_scan_from_ring(st,
- chan->scan_index, &smpl);
+ ret = -EBUSY;
else
ret = ad7192_read(st, chan->address,
chan->scan_type.realbits / 8, &smpl);
@@ -901,18 +886,20 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- mutex_lock(&indio_dev->mlock);
- *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
- *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
- mutex_unlock(&indio_dev->mlock);
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- *val = 1000;
-
- return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&indio_dev->mlock);
+ *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
+ *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
@@ -935,7 +922,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
}
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
@@ -992,7 +979,7 @@ static const struct iio_info ad7192_info = {
.extend_name = _name, \
.channel = _chan, \
.channel2 = _chan2, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1001,7 +988,7 @@ static const struct iio_info ad7192_info = {
{ .type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1010,7 +997,7 @@ static const struct iio_info ad7192_info = {
{ .type = IIO_TEMP, \
.indexed = 1, \
.channel = _chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1151,6 +1138,7 @@ static const struct spi_device_id ad7192_id[] = {
{"ad7195", ID_AD7195},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7192_id);
static struct spi_driver ad7192_driver = {
.driver = {
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index dbaeae81e873..7dbd6812c240 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -18,6 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "ad7280a.h"
@@ -455,10 +456,10 @@ static ssize_t ad7280_store_balance_timer(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long val;
+ unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -487,8 +488,8 @@ static int ad7280_channel_init(struct ad7280_state *st)
{
int dev, ch, cnt;
- st->channels = kzalloc(sizeof(*st->channels) *
- ((st->slave_num + 1) * 12 + 2), GFP_KERNEL);
+ st->channels = kcalloc((st->slave_num + 1) * 12 + 2,
+ sizeof(*st->channels), GFP_KERNEL);
if (st->channels == NULL)
return -ENOMEM;
@@ -507,7 +508,7 @@ static int ad7280_channel_init(struct ad7280_state *st)
}
st->channels[cnt].indexed = 1;
st->channels[cnt].info_mask =
- (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ IIO_CHAN_INFO_SCALE_SHARED_BIT;
st->channels[cnt].address =
AD7280A_DEVADDR(dev) << 8 | ch;
st->channels[cnt].scan_index = cnt;
@@ -523,7 +524,7 @@ static int ad7280_channel_init(struct ad7280_state *st)
st->channels[cnt].channel2 = dev * 6;
st->channels[cnt].address = AD7280A_ALL_CELLS;
st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ st->channels[cnt].info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT;
st->channels[cnt].scan_index = cnt;
st->channels[cnt].scan_type.sign = 'u';
st->channels[cnt].scan_type.realbits = 32;
@@ -600,7 +601,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned val;
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
val = 1000 + (st->cell_threshhigh * 1568) / 100;
break;
@@ -636,7 +637,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
if (ret)
return ret;
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
case AD7280A_CELL_UNDERVOLTAGE:
val = ((val - 1000) * 100) / 1568; /* LSB 15.68mV */
@@ -652,7 +653,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
val = clamp(val, 0L, 0xFFL);
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
st->cell_threshhigh = val;
break;
@@ -682,13 +683,13 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
unsigned *channels;
int i, ret;
- channels = kzalloc(sizeof(*channels) * st->scan_cnt, GFP_KERNEL);
+ channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
if (channels == NULL)
return IRQ_HANDLED;
ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
if (ret < 0)
- return IRQ_HANDLED;
+ goto out;
for (i = 0; i < st->scan_cnt; i++) {
if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
@@ -731,6 +732,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
}
}
+out:
kfree(channels);
return IRQ_HANDLED;
@@ -801,7 +803,7 @@ static int ad7280_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
scale_uv = (4000 * 1000) >> AD7280A_BITS;
else
@@ -968,11 +970,11 @@ static const struct spi_device_id ad7280_id[] = {
{"ad7280a", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7280_id);
static struct spi_driver ad7280_driver = {
.driver = {
.name = "ad7280",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7280_probe,
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index aa44a52a7adb..0a13616e3db9 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -19,6 +19,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* Simplified handling
@@ -500,7 +501,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_data(chip->client,
AD7291_T_AVERAGE);
if (ret < 0)
@@ -509,18 +510,24 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
AD7291_VALUE_MASK) << 4) >> 4;
*val = signval;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- /*
- * One LSB of the ADC corresponds to 0.25 deg C.
- * The temperature reading is in 12-bit twos complement format
- */
- *val = 250;
- return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /*
+ * One LSB of the ADC corresponds to 0.25 deg C.
+ * The temperature reading is in 12-bit twos
+ * complement format
+ */
+ *val = 250;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -529,7 +536,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
#define AD7291_VOLTAGE_CHAN(_chan) \
{ \
.type = IIO_VOLTAGE, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.indexed = 1, \
.channel = _chan, \
.event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|\
@@ -547,8 +554,8 @@ static const struct iio_chan_spec ad7291_channels[] = {
AD7291_VOLTAGE_CHAN(7),
{
.type = IIO_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.indexed = 1,
.channel = 0,
.event_mask =
diff --git a/drivers/staging/iio/adc/ad7298.h b/drivers/staging/iio/adc/ad7298.h
index 9ddf5cf0e519..a0e5dea415ef 100644
--- a/drivers/staging/iio/adc/ad7298.h
+++ b/drivers/staging/iio/adc/ad7298.h
@@ -54,14 +54,9 @@ struct ad7298_state {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch);
int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7298_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
-{
- return 0;
-}
static inline int
ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7298_core.c b/drivers/staging/iio/adc/ad7298_core.c
index a799bd1922dc..8dd6aa9cf928 100644
--- a/drivers/staging/iio/adc/ad7298_core.c
+++ b/drivers/staging/iio/adc/ad7298_core.c
@@ -18,37 +18,37 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7298.h"
static struct iio_chan_spec ad7298_channels[] = {
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
9, AD7298_CH_TEMP, IIO_ST('s', 32, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
1, 1, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
2, 2, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
3, 3, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
4, 4, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
5, 5, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 6, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
6, 6, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 7, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
7, 7, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
@@ -69,27 +69,28 @@ static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
static int ad7298_scan_temp(struct ad7298_state *st, int *val)
{
int tmp, ret;
+ __be16 buf;
- tmp = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
+ buf = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
AD7298_TAVG | st->ext_ref);
- ret = spi_write(st->spi, (u8 *)&tmp, 2);
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
- tmp = 0;
+ buf = cpu_to_be16(0);
- ret = spi_write(st->spi, (u8 *)&tmp, 2);
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
usleep_range(101, 1000); /* sleep > 100us */
- ret = spi_read(st->spi, (u8 *)&tmp, 2);
+ ret = spi_read(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
- tmp = be16_to_cpu(tmp) & RES_MASK(AD7298_BITS);
+ tmp = be16_to_cpu(buf) & RES_MASK(AD7298_BITS);
/*
* One LSB of the ADC corresponds to 0.25 deg C.
@@ -122,12 +123,8 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- if (chan->address == AD7298_CH_TEMP)
- ret = -ENODEV;
- else
- ret = ad7298_scan_from_ring(indio_dev,
- chan->address);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
} else {
if (chan->address == AD7298_CH_TEMP)
ret = ad7298_scan_temp(st, val);
@@ -143,15 +140,20 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
*val = ret & RES_MASK(AD7298_BITS);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- *val = 1;
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 1;
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
}
@@ -265,11 +267,11 @@ static const struct spi_device_id ad7298_id[] = {
{"ad7298", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7298_id);
static struct spi_driver ad7298_driver = {
.driver = {
.name = "ad7298",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7298_probe,
@@ -281,4 +283,3 @@ module_spi_driver(ad7298_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7298 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7298");
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index 47630d506a63..d1a12dd015e2 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -12,41 +12,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7298.h"
-int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u16 *ring_data;
-
- if (!(test_bit(ch, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = be16_to_cpu(ring_data[ch]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7298_ring_preenable() setup the parameters of the ring before enabling
*
@@ -61,8 +32,9 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
int i, m;
unsigned short command;
-
- d_size = ring->scan_count * (AD7298_STORAGE_BITS / 8);
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ d_size = scan_count * (AD7298_STORAGE_BITS / 8);
if (ring->scan_timestamp) {
d_size += sizeof(s64);
@@ -79,7 +51,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
command = AD7298_WRITE | st->ext_ref;
for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
- if (test_bit(i, ring->scan_mask))
+ if (test_bit(i, indio_dev->active_scan_mask))
command |= m;
st->tx_buf[0] = cpu_to_be16(command);
@@ -96,7 +68,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
- for (i = 0; i < ring->scan_count; i++) {
+ for (i = 0; i < scan_count; i++) {
st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
st->ring_xfer[i + 2].len = 2;
st->ring_xfer[i + 2].cs_change = 1;
@@ -134,7 +106,8 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
&time_ns, sizeof(time_ns));
}
- for (i = 0; i < ring->scan_count; i++)
+ for (i = 0; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
buf[i] = be16_to_cpu(st->rx_buf[i]);
indio_dev->buffer->access->store_to(ring, (u8 *)buf, time_ns);
@@ -174,7 +147,7 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7298_ring_setup_ops;
+ indio_dev->setup_ops = &ad7298_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/ad7476.h b/drivers/staging/iio/adc/ad7476.h
index 60142926565f..27f696c75cc4 100644
--- a/drivers/staging/iio/adc/ad7476.h
+++ b/drivers/staging/iio/adc/ad7476.h
@@ -50,14 +50,9 @@ enum ad7476_supported_device_ids {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7476_scan_from_ring(struct iio_dev *indio_dev);
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7476_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7476_scan_from_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
static inline int
ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index 0b5852030ab6..0c064d1c3927 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7476.h"
@@ -46,7 +46,7 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7476_scan_from_ring(indio_dev);
+ ret = -EBUSY;
else
ret = ad7476_scan_direct(st);
mutex_unlock(&indio_dev->mlock);
@@ -56,7 +56,7 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
*val = (ret >> st->chip_info->channel[0].scan_type.shift) &
RES_MASK(st->chip_info->channel[0].scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000)
>> st->chip_info->channel[0].scan_type.realbits;
*val = scale_uv/1000;
@@ -69,49 +69,49 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7466] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7467] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7468] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1 , 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7475] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7476] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7477] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7478] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7495] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.int_vref_mv = 2500,
@@ -237,11 +237,11 @@ static const struct spi_device_id ad7476_id[] = {
{"ad7495", ID_AD7495},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7476_id);
static struct spi_driver ad7476_driver = {
.driver = {
.name = "ad7476",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7476_probe,
@@ -253,4 +253,3 @@ module_spi_driver(ad7476_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7475/6/7/8(A) AD7466/7/8 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7476");
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index e82c1a433f4f..4e298b2a05b2 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -14,36 +14,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7476.h"
-int ad7476_scan_from_ring(struct iio_dev *indio_dev)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u8 *ring_data;
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = (ring_data[0] << 8) | ring_data[1];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7476_ring_preenable() setup the parameters of the ring before enabling
*
@@ -56,7 +32,8 @@ static int ad7476_ring_preenable(struct iio_dev *indio_dev)
struct ad7476_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- st->d_size = ring->scan_count *
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -137,7 +114,7 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7476_ring_setup_ops;
+ indio_dev->setup_ops = &ad7476_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 35018c3f518e..10f59896597f 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -99,7 +99,6 @@ enum ad7606_supported_device_ids {
ID_AD7606_4
};
-int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch);
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7606_ring_cleanup(struct iio_dev *indio_dev);
#endif /* IIO_ADC_AD7606_H_ */
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 54423ab196fe..ddb7ef92f5c1 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7606.h"
@@ -91,7 +91,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7606_scan_from_ring(indio_dev, chan->address);
+ ret = -EBUSY;
else
ret = ad7606_scan_direct(indio_dev, chan->address);
mutex_unlock(&indio_dev->mlock);
@@ -100,7 +100,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return ret;
*val = (short) ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->range * 1000 * 2)
>> st->chip_info->channels[0].scan_type.realbits;
*val = scale_uv / 1000;
@@ -205,14 +205,14 @@ static struct attribute *ad7606_attributes[] = {
NULL,
};
-static mode_t ad7606_attr_is_visible(struct kobject *kobj,
+static umode_t ad7606_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (!(gpio_is_valid(st->pdata->gpio_os0) &&
gpio_is_valid(st->pdata->gpio_os1) &&
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 688632edd3d7..cff97568189e 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -189,4 +189,3 @@ module_exit(ad7606_cleanup);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ad7606_par");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index 20927fd53728..e8f94a18a943 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -12,36 +12,12 @@
#include <linux/slab.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7606.h"
-int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u16 *ring_data;
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = ring_data[ch];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
*
@@ -136,8 +112,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
- indio_dev->buffer->bpe =
- st->chip_info->channels[0].scan_type.storagebits / 8;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
@@ -152,7 +126,7 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7606_ring_setup_ops;
+ indio_dev->setup_ops = &ad7606_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true ;
INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index b984bd2048b6..237f1c44d296 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -97,11 +97,11 @@ static const struct spi_device_id ad7606_id[] = {
{"ad7606-4", ID_AD7606_4},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7606_id);
static struct spi_driver ad7606_driver = {
.driver = {
.name = "ad7606",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = AD7606_SPI_PM_OPS,
},
@@ -114,4 +114,3 @@ module_spi_driver(ad7606_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7606_spi");
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index ec90261cbc3c..a13e58c814e6 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -114,7 +114,7 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
*val *= 128;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 100000)
>> (channel.scan_type.realbits - 1);
*val = scale_uv / 100000;
@@ -127,12 +127,12 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7780] = {
.channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('s', 24, 32, 8), 0),
},
[ID_AD7781] = {
.channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('s', 20, 32, 12), 0),
},
};
@@ -272,11 +272,11 @@ static const struct spi_device_id ad7780_id[] = {
{"ad7781", ID_AD7781},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7780_id);
static struct spi_driver ad7780_driver = {
.driver = {
.name = "ad7780",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7780_probe,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 1c5588e88cdf..6a058b19c49a 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
@@ -316,25 +316,6 @@ out:
return ret;
}
-static int ad7793_scan_from_ring(struct ad7793_state *st, unsigned ch, int *val)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int ret;
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!(test_bit(ch, ring->scan_mask)))
- return -EBUSY;
-
- ret = ring->access->read_last(ring, (u8 *) &dat64);
- if (ret)
- return ret;
-
- *val = *dat32;
-
- return 0;
-}
-
static int ad7793_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
@@ -342,14 +323,15 @@ static int ad7793_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask,
+ channel = find_first_bit(indio_dev->active_scan_mask,
indio_dev->masklength);
- d_size = ring->scan_count *
- indio_dev->channels[0].scan_type.storagebits / 8;
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
+ indio_dev->channels[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
d_size += sizeof(s64);
@@ -411,7 +393,7 @@ static irqreturn_t ad7793_trigger_handler(int irq, void *p)
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
__ad7793_read_reg(st, 1, 1, AD7793_REG_DATA,
dat32,
indio_dev->channels[0].scan_type.realbits/8);
@@ -459,7 +441,7 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7793_ring_setup_ops;
+ indio_dev->setup_ops = &ad7793_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -493,6 +475,10 @@ static irqreturn_t ad7793_data_rdy_trig_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static struct iio_trigger_ops ad7793_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int ad7793_probe_trigger(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
@@ -505,6 +491,7 @@ static int ad7793_probe_trigger(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
+ st->trig->ops = &ad7793_trigger_ops;
ret = request_irq(st->spi->irq,
ad7793_data_rdy_trig_poll,
@@ -517,7 +504,6 @@ static int ad7793_probe_trigger(struct iio_dev *indio_dev)
disable_irq_nosync(st->spi->irq);
st->irq_dis = true;
st->trig->dev.parent = &st->spi->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
@@ -649,8 +635,7 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7793_scan_from_ring(st,
- chan->scan_index, &smpl);
+ ret = -EBUSY;
else
ret = ad7793_read(st, chan->address,
chan->scan_type.realbits / 8, &smpl);
@@ -667,19 +652,21 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- *val = st->scale_avail[(st->conf >> 8) & 0x7][0];
- *val2 = st->scale_avail[(st->conf >> 8) & 0x7][1];
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- /* 1170mV / 2^23 * 6 */
- scale_uv = (1170ULL * 100000000ULL * 6ULL)
- >> (chan->scan_type.realbits -
- (unipolar ? 0 : 1));
+ if (chan->differential) {
+ *val = st->
+ scale_avail[(st->conf >> 8) & 0x7][0];
+ *val2 = st->
+ scale_avail[(st->conf >> 8) & 0x7][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ } else {
+ /* 1170mV / 2^23 * 6 */
+ scale_uv = (1170ULL * 100000000ULL * 6ULL)
+ >> (chan->scan_type.realbits -
+ (unipolar ? 0 : 1));
+ }
break;
case IIO_TEMP:
/* Always uses unity gain and internal ref */
@@ -716,7 +703,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
}
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
@@ -775,7 +762,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 0,
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 0,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -786,7 +773,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 1,
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 1,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -797,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -809,7 +796,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -818,7 +805,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.address = AD7793_CH_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
.scan_type = IIO_ST('s', 24, 32, 0),
},
@@ -828,7 +815,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 5,
.scan_type = IIO_ST('s', 24, 32, 0),
},
@@ -842,7 +829,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 0,
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 0,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -853,7 +840,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 1,
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 1,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -864,7 +851,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -876,7 +863,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -885,7 +872,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.address = AD7793_CH_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
.scan_type = IIO_ST('s', 16, 32, 0),
},
@@ -895,7 +882,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 5,
.scan_type = IIO_ST('s', 16, 32, 0),
},
@@ -1034,11 +1021,11 @@ static const struct spi_device_id ad7793_id[] = {
{"ad7793", ID_AD7793},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7793_id);
static struct spi_driver ad7793_driver = {
.driver = {
.name = "ad7793",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7793_probe,
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index acbf9363132b..52b720e2b03a 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -18,6 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* AD7816 config masks
@@ -459,7 +460,6 @@ MODULE_DEVICE_TABLE(spi, ad7816_id);
static struct spi_driver ad7816_driver = {
.driver = {
.name = "ad7816",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7816_probe,
diff --git a/drivers/staging/iio/adc/ad7887.h b/drivers/staging/iio/adc/ad7887.h
index 3452d1819077..bc53b6532121 100644
--- a/drivers/staging/iio/adc/ad7887.h
+++ b/drivers/staging/iio/adc/ad7887.h
@@ -83,14 +83,9 @@ enum ad7887_supported_device_ids {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7887_scan_from_ring(struct ad7887_state *st, int channum);
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7887_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
-{
- return 0;
-}
static inline int
ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 91b8fb09d92b..e9bbc3eed15d 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7887.h"
@@ -45,7 +45,7 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7887_scan_from_ring(st, 1 << chan->address);
+ ret = -EBUSY;
else
ret = ad7887_scan_direct(st, chan->address);
mutex_unlock(&indio_dev->mlock);
@@ -55,7 +55,7 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
*val = (ret >> st->chip_info->channel[0].scan_type.shift) &
RES_MASK(st->chip_info->channel[0].scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000)
>> st->chip_info->channel[0].scan_type.realbits;
*val = scale_uv/1000;
@@ -75,7 +75,7 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = 1,
.scan_index = 1,
.scan_type = IIO_ST('u', 12, 16, 0),
@@ -84,7 +84,7 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = 0,
.scan_index = 0,
.scan_type = IIO_ST('u', 12, 16, 0),
@@ -246,11 +246,11 @@ static const struct spi_device_id ad7887_id[] = {
{"ad7887", ID_AD7887},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7887_id);
static struct spi_driver ad7887_driver = {
.driver = {
.name = "ad7887",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7887_probe,
@@ -262,4 +262,3 @@ module_spi_driver(ad7887_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7887");
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index cb74cada5611..85076cd962e7 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -13,46 +13,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7887.h"
-int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int count = 0, ret;
- u16 *ring_data;
-
- if (!(test_bit(channum, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- /* for single channel scan the result is stored with zero offset */
- if ((test_bit(1, ring->scan_mask) || test_bit(0, ring->scan_mask)) &&
- (channum == 1))
- count = 1;
-
- ret = be16_to_cpu(ring_data[count]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7887_ring_preenable() setup the parameters of the ring before enabling
*
@@ -65,7 +31,8 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
struct ad7887_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- st->d_size = ring->scan_count *
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -80,7 +47,7 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
set_bytes_per_datum(indio_dev->buffer, st->d_size);
/* We know this is a single long so can 'cheat' */
- switch (*ring->scan_mask) {
+ switch (*indio_dev->active_scan_mask) {
case (1 << 0):
st->ring_msg = &st->msg[AD7887_CH0];
break;
@@ -121,7 +88,8 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
__u8 *buf;
int b_sent;
- unsigned int bytes = ring->scan_count *
+ unsigned int bytes = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
buf = kzalloc(st->d_size, GFP_KERNEL);
@@ -176,7 +144,7 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_deallocate_sw_rb;
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7887_ring_setup_ops;
+ indio_dev->setup_ops = &ad7887_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index eda01d5bab7d..356f690a76fb 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -124,15 +124,9 @@ struct ad799x_platform_data {
int ad7997_8_set_scan_mode(struct ad799x_state *st, unsigned mask);
#ifdef CONFIG_AD799X_RING_BUFFER
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum);
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad799x_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_AD799X_RING_BUFFER */
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
-{
- return -EINVAL;
-}
-
static inline int
ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index c0d2f886ea2c..d5b581d8bc2b 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -35,7 +35,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "ad799x.h"
@@ -150,8 +151,7 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad799x_single_channel_from_ring(indio_dev,
- chan->scan_index);
+ ret = -EBUSY;
else
ret = ad799x_scan_direct(st, chan->scan_index);
mutex_unlock(&indio_dev->mlock);
@@ -161,7 +161,7 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
*val = (ret >> chan->scan_type.shift) &
RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000) >> chan->scan_type.realbits;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
@@ -934,4 +934,3 @@ module_i2c_driver(ad799x_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD799x ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:ad799x");
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index e3f4698d7815..5dded9e7820a 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -17,42 +17,12 @@
#include <linux/bitops.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad799x.h"
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int count = 0, ret;
- u16 *ring_data;
-
- if (!(test_bit(channum, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
- /* Need a count of channels prior to this one */
- count = bitmap_weight(ring->scan_mask, channum);
- ret = be16_to_cpu(ring_data[count]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad799x_ring_preenable() setup the parameters of the ring before enabling
*
@@ -71,9 +41,10 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev)
*/
if (st->id == ad7997 || st->id == ad7998)
- ad7997_8_set_scan_mode(st, *ring->scan_mask);
+ ad7997_8_set_scan_mode(st, *indio_dev->active_scan_mask);
- st->d_size = ring->scan_count * 2;
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2;
if (ring->scan_timestamp) {
st->d_size += sizeof(s64);
@@ -115,12 +86,13 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
case ad7991:
case ad7995:
case ad7999:
- cmd = st->config | (*ring->scan_mask << AD799X_CHANNEL_SHIFT);
+ cmd = st->config |
+ (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT);
break;
case ad7992:
case ad7993:
case ad7994:
- cmd = (*ring->scan_mask << AD799X_CHANNEL_SHIFT) |
+ cmd = (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT) |
AD7998_CONV_RES_REG;
break;
case ad7997:
@@ -132,7 +104,8 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
}
b_sent = i2c_smbus_read_i2c_block_data(st->client,
- cmd, ring->scan_count * 2, rxbuf);
+ cmd, bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2, rxbuf);
if (b_sent < 0)
goto done;
@@ -183,7 +156,7 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad799x_buf_setup_ops;
+ indio_dev->setup_ops = &ad799x_buf_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index bc307f3b024e..eec2f325d549 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-
+#include "../events.h"
/*
* ADT7310 registers definition
*/
@@ -877,7 +877,6 @@ MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = adt7310_probe,
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index 3481cf6f7574..c62248ceb37a 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -17,6 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* ADT7410 registers definition
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
index cbcb08a2cb50..2cd0112067b2 100644
--- a/drivers/staging/iio/adc/max1363.h
+++ b/drivers/staging/iio/adc/max1363.h
@@ -146,22 +146,22 @@ struct max1363_state {
};
const struct max1363_mode
-*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci);
+*max1363_match_mode(const unsigned long *mask,
+ const struct max1363_chip_info *ci);
int max1363_set_scan_mode(struct max1363_state *st);
#ifdef CONFIG_MAX1363_RING_BUFFER
-
-int max1363_single_channel_from_ring(const long *mask,
- struct max1363_state *st);
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void max1363_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_MAX1363_RING_BUFFER */
-
-int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const long *scan_mask)
{
- return -EINVAL;
+ return 0;
}
static inline int
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 3f28f1ab52a2..b92cb4af18ce 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -34,7 +34,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "max1363.h"
@@ -147,7 +148,8 @@ static const struct max1363_mode max1363_mode_table[] = {
};
const struct max1363_mode
-*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci)
+*max1363_match_mode(const unsigned long *mask,
+const struct max1363_chip_info *ci)
{
int i;
if (mask)
@@ -189,7 +191,6 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int ret = 0;
s32 data;
char rxbuf[2];
- const unsigned long *mask;
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
@@ -198,46 +199,38 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
* If monitor mode is enabled, the method for reading a single
* channel will have to be rather different and has not yet
* been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
*/
- if (st->monitor_on) {
+ if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
- /* If ring buffer capture is occurring, query the buffer */
- if (iio_buffer_enabled(indio_dev)) {
- mask = max1363_mode_table[chan->address].modemask;
- data = max1363_single_channel_from_ring(mask, st);
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ goto error_ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 2);
if (data < 0) {
ret = data;
goto error_ret;
}
+ data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
} else {
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- goto error_ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = i2c_master_recv(client, rxbuf, 2);
- if (data < 0) {
- ret = data;
- goto error_ret;
- }
- data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
- } else {
- /* Get reading */
- data = i2c_master_recv(client, rxbuf, 1);
- if (data < 0) {
- ret = data;
- goto error_ret;
- }
- data = rxbuf[0];
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 1);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
}
+ data = rxbuf[0];
}
*val = data;
error_ret:
@@ -260,7 +253,7 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
if ((1 << (st->chip_info->bits + 1)) >
st->chip_info->int_vref_mv) {
*val = 0;
@@ -288,7 +281,7 @@ static const enum max1363_modes max1363_mode_list[] = {
#define MAX1363_EV_M \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) \
| IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
-#define MAX1363_INFO_MASK (1 << IIO_CHAN_INFO_SCALE_SHARED)
+#define MAX1363_INFO_MASK IIO_CHAN_INFO_SCALE_SHARED_BIT
#define MAX1363_CHAN_U(num, addr, si, bits, evmask) \
{ \
.type = IIO_VOLTAGE, \
@@ -296,7 +289,13 @@ static const enum max1363_modes max1363_mode_list[] = {
.channel = num, \
.address = addr, \
.info_mask = MAX1363_INFO_MASK, \
- .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .datasheet_name = "AIN"#num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
.scan_index = si, \
.event_mask = evmask, \
}
@@ -311,7 +310,13 @@ static const enum max1363_modes max1363_mode_list[] = {
.channel2 = num2, \
.address = addr, \
.info_mask = MAX1363_INFO_MASK, \
- .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .datasheet_name = "AIN"#num"-AIN"#num2, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
.scan_index = si, \
.event_mask = evmask, \
}
@@ -833,6 +838,7 @@ static const struct iio_info max1363_info = {
.read_event_config = &max1363_read_event_config,
.write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
+ .update_scan_mode = &max1363_update_scan_mode,
.driver_module = THIS_MODULE,
.event_attrs = &max1363_event_attribute_group,
};
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index df6893e058cc..f730b3fb971a 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -15,88 +15,25 @@
#include <linux/bitops.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "max1363.h"
-int max1363_single_channel_from_ring(const long *mask, struct max1363_state *st)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int count = 0, ret, index;
- u8 *ring_data;
- index = find_first_bit(mask, MAX1363_MAX_CHANNELS);
-
- if (!(test_bit(index, st->current_mode->modemask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, ring_data);
- if (ret)
- goto error_free_ring_data;
- /* Need a count of channels prior to this one */
-
- count = bitmap_weight(mask, index - 1);
- if (st->chip_info->bits != 8)
- ret = ((int)(ring_data[count*2 + 0] & 0x0F) << 8)
- + (int)(ring_data[count*2 + 1]);
- else
- ret = ring_data[count];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
-
-/**
- * max1363_ring_preenable() - setup the parameters of the ring before enabling
- *
- * The complex nature of the setting of the nuber of bytes per datum is due
- * to this driver currently ensuring that the timestamp is stored at an 8
- * byte boundary.
- **/
-static int max1363_ring_preenable(struct iio_dev *indio_dev)
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
{
struct max1363_state *st = iio_priv(indio_dev);
- struct iio_buffer *ring = indio_dev->buffer;
- size_t d_size = 0;
- unsigned long numvals;
/*
* Need to figure out the current mode based upon the requested
* scan mask in iio_dev
*/
- st->current_mode = max1363_match_mode(ring->scan_mask,
- st->chip_info);
+ st->current_mode = max1363_match_mode(scan_mask, st->chip_info);
if (!st->current_mode)
return -EINVAL;
-
max1363_set_scan_mode(st);
-
- numvals = bitmap_weight(st->current_mode->modemask,
- indio_dev->masklength);
- if (ring->access->set_bytes_per_datum) {
- if (ring->scan_timestamp)
- d_size += sizeof(s64);
- if (st->chip_info->bits != 8)
- d_size += numvals*2;
- else
- d_size += numvals;
- if (ring->scan_timestamp && (d_size % 8))
- d_size += 8 - (d_size % 8);
- ring->access->set_bytes_per_datum(ring, d_size);
- }
-
return 0;
}
@@ -114,12 +51,14 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
/* Ensure the timestamp is 8 byte aligned */
if (st->chip_info->bits != 8)
- d_size = numvals*2 + sizeof(s64);
+ d_size = numvals*2;
else
- d_size = numvals + sizeof(s64);
- if (d_size % sizeof(s64))
- d_size += sizeof(s64) - (d_size % sizeof(s64));
-
+ d_size = numvals;
+ if (indio_dev->buffer->scan_timestamp) {
+ d_size += sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
/* Monitor mode prevents reading. Whilst not currently implemented
* might as well have this test in here in the meantime as it does
* no harm.
@@ -139,9 +78,10 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
time_ns = iio_get_time_ns();
- memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ if (indio_dev->buffer->scan_timestamp)
+ memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ iio_push_to_buffer(indio_dev->buffer, rxbuf, time_ns);
- indio_dev->buffer->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
kfree(rxbuf);
@@ -151,7 +91,7 @@ done:
static const struct iio_buffer_setup_ops max1363_ring_setup_ops = {
.postenable = &iio_triggered_buffer_postenable,
- .preenable = &max1363_ring_preenable,
+ .preenable = &iio_sw_buffer_preenable,
.predisable = &iio_triggered_buffer_predisable,
};
@@ -179,7 +119,7 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &max1363_ring_setup_ops;
+ indio_dev->setup_ops = &max1363_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 1e93c7b7aed9..1ea3cd06299d 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -151,7 +151,6 @@ static int adt7316_spi_resume(struct spi_device *spi_dev)
static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 8df24704ff26..13c39292d3f2 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include "../iio.h"
+#include "../events.h"
#include "../sysfs.h"
#include "adt7316.h"
diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer.h
index 9e8f01009979..6fb6e64181a5 100644
--- a/drivers/staging/iio/buffer_generic.h
+++ b/drivers/staging/iio/buffer.h
@@ -11,7 +11,6 @@
#define _IIO_BUFFER_GENERIC_H_
#include <linux/sysfs.h>
#include "iio.h"
-#include "chrdev.h"
#ifdef CONFIG_IIO_BUFFER
@@ -19,22 +18,14 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @mark_in_use: reference counting, typically to prevent module removal
- * @unmark_in_use: reduce reference count when no longer using buffer
* @store_to: actually store stuff to the buffer
- * @read_last: get the last element stored
- * @read_first_n: try to get a specified number of elements (must exist)
- * @mark_param_change: notify buffer that some relevant parameter has changed
- * Often this means the underlying storage may need to
- * change.
+ * @read_first_n: try to get a specified number of bytes (must exist)
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @get_bytes_per_datum:get current bytes per datum
* @set_bytes_per_datum:set number of bytes per datum
* @get_length: get number of datums in buffer
* @set_length: set number of datums in buffer
- * @is_enabled: query if buffer is currently being used
- * @enable: enable the buffer
*
* The purpose of this structure is to make the buffer element
* modular as event for a given driver, different usecases may require
@@ -45,85 +36,60 @@ struct iio_buffer;
* any of them not existing.
**/
struct iio_buffer_access_funcs {
- void (*mark_in_use)(struct iio_buffer *buffer);
- void (*unmark_in_use)(struct iio_buffer *buffer);
-
int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp);
- int (*read_last)(struct iio_buffer *buffer, u8 *data);
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
char __user *buf);
- int (*mark_param_change)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
int (*get_bytes_per_datum)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
int (*get_length)(struct iio_buffer *buffer);
int (*set_length)(struct iio_buffer *buffer, int length);
-
- int (*is_enabled)(struct iio_buffer *buffer);
- int (*enable)(struct iio_buffer *buffer);
-};
-
-/**
- * struct iio_buffer_setup_ops - buffer setup related callbacks
- * @preenable: [DRIVER] function to run prior to marking buffer enabled
- * @postenable: [DRIVER] function to run after marking buffer enabled
- * @predisable: [DRIVER] function to run prior to marking buffer
- * disabled
- * @postdisable: [DRIVER] function to run after marking buffer disabled
- */
-struct iio_buffer_setup_ops {
- int (*preenable)(struct iio_dev *);
- int (*postenable)(struct iio_dev *);
- int (*predisable)(struct iio_dev *);
- int (*postdisable)(struct iio_dev *);
};
/**
* struct iio_buffer - general buffer structure
- * @indio_dev: industrial I/O device structure
- * @owner: module that owns the buffer (for ref counting)
* @length: [DEVICE] number of datums in buffer
* @bytes_per_datum: [DEVICE] size of individual datum including timestamp
- * @bpe: [DEVICE] size of individual channel value
* @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
* control method is used
- * @scan_count: [INTERN] the number of elements in the current scan mode
* @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @scan_timestamp: [INTERN] does the scan mode include a timestamp
* @access: [DRIVER] buffer access functions associated with the
* implementation.
- * @flags: [INTERN] file ops related flags including busy flag.
+ * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @scan_el_group: [DRIVER] attribute group for those attributes not
+ * created from the iio_chan_info array.
+ * @pollq: [INTERN] wait queue to allow for polling on the buffer.
+ * @stufftoread: [INTERN] flag to indicate new data.
+ * @demux_list: [INTERN] list of operations required to demux the scan.
+ * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
**/
struct iio_buffer {
- struct iio_dev *indio_dev;
- struct module *owner;
int length;
int bytes_per_datum;
- int bpe;
struct attribute_group *scan_el_attrs;
- int scan_count;
long *scan_mask;
bool scan_timestamp;
+ unsigned scan_index_timestamp;
const struct iio_buffer_access_funcs *access;
- const struct iio_buffer_setup_ops *setup_ops;
struct list_head scan_el_dev_attr_list;
struct attribute_group scan_el_group;
wait_queue_head_t pollq;
bool stufftoread;
- unsigned long flags;
const struct attribute_group *attrs;
+ struct list_head demux_list;
+ unsigned char *demux_bounce;
};
/**
* iio_buffer_init() - Initialize the buffer structure
* @buffer: buffer to be initialized
- * @indio_dev: the iio device the buffer is assocated with
**/
-void iio_buffer_init(struct iio_buffer *buffer,
- struct iio_dev *indio_dev);
+void iio_buffer_init(struct iio_buffer *buffer);
void iio_buffer_deinit(struct iio_buffer *buffer);
@@ -140,17 +106,27 @@ static inline void __iio_update_buffer(struct iio_buffer *buffer,
buffer->length = length;
}
-int iio_scan_mask_query(struct iio_buffer *buffer, int bit);
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
-int iio_scan_mask_set(struct iio_buffer *buffer, int bit);
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
-#define to_iio_buffer(d) \
- container_of(d, struct iio_buffer, dev)
+/**
+ * iio_push_to_buffer() - push to a registered buffer.
+ * @buffer: IIO buffer structure for device
+ * @scan: Full scan.
+ * @timestamp:
+ */
+int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
+ s64 timestamp);
+
+int iio_update_demux(struct iio_dev *indio_dev);
/**
* iio_buffer_register() - register the buffer with IIO core
@@ -180,12 +156,6 @@ ssize_t iio_buffer_write_length(struct device *dev,
const char *buf,
size_t len);
/**
- * iio_buffer_read_bytes_per_datum() - attr for number of bytes in whole datum
- **/
-ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
* iio_buffer_store_enable() - attr to turn the buffer on
**/
ssize_t iio_buffer_store_enable(struct device *dev,
@@ -201,9 +171,6 @@ ssize_t iio_buffer_show_enable(struct device *dev,
#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
iio_buffer_read_length, \
iio_buffer_write_length)
-#define IIO_BUFFER_BYTES_PER_DATUM_ATTR \
- DEVICE_ATTR(bytes_per_datum, S_IRUGO | S_IWUSR, \
- iio_buffer_read_bytes_per_datum, NULL)
#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
iio_buffer_show_enable, \
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 47181875e113..b73007dcf4b3 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -15,7 +15,7 @@
#include "../iio.h"
#include "../sysfs.h"
-
+#include "../events.h"
/*
* AD7150 registers definition
*/
@@ -111,7 +111,7 @@ static int ad7150_read_raw(struct iio_dev *indio_dev,
return ret;
*val = swab16(ret);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_data(chip->client,
ad7150_addresses[chan->channel][1]);
if (ret < 0)
@@ -429,7 +429,7 @@ static const struct iio_chan_spec ad7150_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT,
.event_mask =
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
@@ -441,7 +441,7 @@ static const struct iio_chan_spec ad7150_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT,
.event_mask =
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 152d3be6d97d..fdb83c35e6dd 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -259,7 +259,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1) {
ret = -EINVAL;
goto out;
@@ -276,7 +276,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if ((val < 0) | (val > 0xFFFF)) {
ret = -EINVAL;
goto out;
@@ -289,7 +289,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
if (val != 0) {
ret = -EINVAL;
goto out;
@@ -372,7 +372,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
ret = i2c_smbus_read_word_data(chip->client,
ad7152_addresses[chan->channel][AD7152_GAIN]);
@@ -384,7 +384,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_MICRO;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_word_data(chip->client,
ad7152_addresses[chan->channel][AD7152_OFFS]);
if (ret < 0)
@@ -393,7 +393,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
ret = i2c_smbus_read_byte_data(chip->client,
ad7152_addresses[chan->channel][AD7152_SETUP]);
if (ret < 0)
@@ -416,7 +416,7 @@ static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
long mask)
{
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
default:
return IIO_VAL_INT_PLUS_MICRO;
@@ -436,34 +436,34 @@ static const struct iio_chan_spec ad7152_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 0,
.channel2 = 2,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 1,
.channel2 = 3,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}
};
/*
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 9df590891a69..40b8512cbc32 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -123,7 +123,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_EXT_VIN,
},
@@ -132,7 +132,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_VDD_MON,
},
@@ -156,10 +156,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8,
},
[CIN1_DIFF] = {
@@ -168,10 +168,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 0,
.channel2 = 2,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF
},
@@ -179,10 +179,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CIN2,
},
@@ -192,10 +192,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 1,
.channel2 = 3,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
}
@@ -477,7 +477,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1) {
ret = -EINVAL;
goto out;
@@ -503,7 +503,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ case IIO_CHAN_INFO_CALIBBIAS:
if ((val < 0) | (val > 0xFFFF)) {
ret = -EINVAL;
goto out;
@@ -515,7 +515,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if ((val < 0) | (val > 43008000)) { /* 21pF */
ret = -EINVAL;
goto out;
@@ -612,7 +612,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
reg = AD7746_REG_CAP_GAINH;
@@ -634,7 +634,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_MICRO;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_word_data(chip->client,
AD7746_REG_CAP_OFFH);
if (ret < 0)
@@ -643,13 +643,13 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
[chan->differential]) * 338646;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
deleted file mode 100644
index d8e736f60522..000000000000
--- a/drivers/staging/iio/chrdev.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* The industrial I/O core - character device related
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#ifndef _IIO_CHRDEV_H_
-#define _IIO_CHRDEV_H_
-
-/**
- * struct iio_event_data - The actual event being pushed to userspace
- * @id: event identifier
- * @timestamp: best estimate of time of event occurrence (often from
- * the interrupt handler)
- */
-struct iio_event_data {
- u64 id;
- s64 timestamp;
-};
-
-#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
-#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index fac8549dc8e7..13e27979df24 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -24,6 +24,29 @@ config AD5360
To compile this driver as module choose M here: the module will be called
ad5360.
+config AD5380
+ tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
+ depends on (SPI_MASTER || I2C)
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5380, AD5381,
+ AD5382, AD5383, AD5384, AD5390, AD5391, AD5392 multi-channel
+ Digital to Analog Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5380.
+
+config AD5421
+ tristate "Analog Devices AD5421 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5421 loop-powered
+ digital-to-analog convertors (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5421.
+
config AD5624R_SPI
tristate "Analog Devices AD5624/44/64R DAC spi driver"
depends on SPI
@@ -37,7 +60,7 @@ config AD5446
help
Say yes here to build support for Analog Devices AD5444, AD5446,
AD5512A, AD5542A, AD5543, AD5553, AD5601, AD5611, AD5620, AD5621,
- AD5640, AD5660 DACs.
+ AD5640, AD5660, AD5662 DACs.
To compile this driver as a module, choose M here: the
module will be called ad5446.
@@ -52,12 +75,22 @@ config AD5504
To compile this driver as a module, choose M here: the
module will be called ad5504.
+config AD5764
+ tristate "Analog Devices AD5764/64R/44/44R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5764, AD5764R, AD5744,
+ AD5744R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5764.
+
config AD5791
- tristate "Analog Devices AD5760/AD5780/AD5781/AD5791 DAC SPI driver"
+ tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
depends on SPI
help
Say yes here to build support for Analog Devices AD5760, AD5780,
- AD5781, AD5791 High Resolution Voltage Output Digital to
+ AD5781, AD5790, AD5791 High Resolution Voltage Output Digital to
Analog Converter.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
index 07b6f5ebdb52..8ab1d264aab7 100644
--- a/drivers/staging/iio/dac/Makefile
+++ b/drivers/staging/iio/dac/Makefile
@@ -3,10 +3,13 @@
#
obj-$(CONFIG_AD5360) += ad5360.o
+obj-$(CONFIG_AD5380) += ad5380.o
+obj-$(CONFIG_AD5421) += ad5421.o
obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
+obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 39cfe6cb02a2..049a855039c2 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -91,7 +91,7 @@ enum ad5064_type {
.indexed = 1, \
.output = 1, \
.channel = (chan), \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = AD5064_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
}
@@ -280,14 +280,14 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5064_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int vref;
+ int scale_uv;
switch (m) {
case 0:
*val = st->dac_cache[chan->channel];
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
vref = st->chip_info->shared_vref ? 0 : chan->channel;
scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
if (scale_uv < 0)
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index bc0459e32d04..710b256affcc 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -103,10 +103,10 @@ enum ad5360_type {
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
.scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \
}
@@ -326,21 +326,21 @@ static int ad5360_write_raw(struct iio_dev *indio_dev,
return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if (val >= max_val || val < 0)
return -EINVAL;
return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val >= max_val || val < 0)
return -EINVAL;
return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if (val <= -max_val || val > 0)
return -EINVAL;
@@ -371,8 +371,8 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5360_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int ofs_index;
+ int scale_uv;
int ret;
switch (m) {
@@ -383,7 +383,7 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret >> chan->scan_type.shift;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
/* vout = 4 * vref * dac_code */
scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100;
if (scale_uv < 0)
@@ -393,21 +393,21 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
*val = scale_uv / 100000;
*val2 = (scale_uv % 100000) * 10;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET,
chan->address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN,
chan->address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
ret = ad5360_read(indio_dev, AD5360_READBACK_SF,
AD5360_REG_SF_OFS(ofs_index));
diff --git a/drivers/staging/iio/dac/ad5380.c b/drivers/staging/iio/dac/ad5380.c
new file mode 100644
index 000000000000..eff97ae05c4b
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5380.c
@@ -0,0 +1,676 @@
+/*
+ * Analog devices AD5380, AD5381, AD5382, AD5383, AD5390, AD5391, AD5392
+ * multi-channel Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+
+#define AD5380_REG_DATA(x) (((x) << 2) | 3)
+#define AD5380_REG_OFFSET(x) (((x) << 2) | 2)
+#define AD5380_REG_GAIN(x) (((x) << 2) | 1)
+#define AD5380_REG_SF_PWR_DOWN (8 << 2)
+#define AD5380_REG_SF_PWR_UP (9 << 2)
+#define AD5380_REG_SF_CTRL (12 << 2)
+
+#define AD5380_CTRL_PWR_DOWN_MODE_OFFSET 13
+#define AD5380_CTRL_INT_VREF_2V5 BIT(12)
+#define AD5380_CTRL_INT_VREF_EN BIT(10)
+
+/**
+ * struct ad5380_chip_info - chip specific information
+ * @channel_template: channel specification template
+ * @num_channels: number of channels
+ * @int_vref: internal vref in uV
+*/
+
+struct ad5380_chip_info {
+ struct iio_chan_spec channel_template;
+ unsigned int num_channels;
+ unsigned int int_vref;
+};
+
+/**
+ * struct ad5380_state - driver instance specific data
+ * @regmap: regmap instance used by the device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulator
+ * @vref: actual reference voltage used in uA
+ * @pwr_down: whether the chip is currently in power down mode
+ */
+
+struct ad5380_state {
+ struct regmap *regmap;
+ const struct ad5380_chip_info *chip_info;
+ struct regulator *vref_reg;
+ int vref;
+ bool pwr_down;
+};
+
+enum ad5380_type {
+ ID_AD5380_3,
+ ID_AD5380_5,
+ ID_AD5381_3,
+ ID_AD5381_5,
+ ID_AD5382_3,
+ ID_AD5382_5,
+ ID_AD5383_3,
+ ID_AD5383_5,
+ ID_AD5390_3,
+ ID_AD5390_5,
+ ID_AD5391_3,
+ ID_AD5391_5,
+ ID_AD5392_3,
+ ID_AD5392_5,
+};
+
+#define AD5380_CHANNEL(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
+ .scan_type = IIO_ST('u', (_bits), 16, 14 - (_bits)) \
+}
+
+static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
+ [ID_AD5380_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 1250000,
+ },
+ [ID_AD5380_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 2500000,
+ },
+ [ID_AD5381_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5381_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5382_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 1250000,
+ },
+ [ID_AD5382_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 2500000,
+ },
+ [ID_AD5383_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 1250000,
+ },
+ [ID_AD5383_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 2500000,
+ },
+ [ID_AD5390_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5390_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5391_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5391_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5392_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 1250000,
+ },
+ [ID_AD5392_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 2500000,
+ },
+};
+
+static ssize_t ad5380_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5380_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if (pwr_down)
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
+ else
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_UP, 0);
+
+ st->pwr_down = pwr_down;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown,
+ S_IRUGO | S_IWUSR,
+ ad5380_read_dac_powerdown,
+ ad5380_write_dac_powerdown, 0);
+
+static const char ad5380_powerdown_modes[][15] = {
+ [0] = "100kohm_to_gnd",
+ [1] = "three_state",
+};
+
+static ssize_t ad5380_read_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int mode;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD5380_REG_SF_CTRL, &mode);
+ if (ret)
+ return ret;
+
+ mode = (mode >> AD5380_CTRL_PWR_DOWN_MODE_OFFSET) & 1;
+
+ return sprintf(buf, "%s\n", ad5380_powerdown_modes[mode]);
+}
+
+static ssize_t ad5380_write_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad5380_powerdown_modes); ++i) {
+ if (sysfs_streq(buf, ad5380_powerdown_modes[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ad5380_powerdown_modes))
+ return -EINVAL;
+
+ ret = regmap_update_bits(st->regmap, AD5380_REG_SF_CTRL,
+ 1 << AD5380_CTRL_PWR_DOWN_MODE_OFFSET,
+ i << AD5380_CTRL_PWR_DOWN_MODE_OFFSET);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode,
+ S_IRUGO | S_IWUSR,
+ ad5380_read_powerdown_mode,
+ ad5380_write_powerdown_mode, 0);
+
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
+ "100kohm_to_gnd three_state");
+
+static struct attribute *ad5380_attributes[] = {
+ &iio_dev_attr_out_voltage_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5380_attribute_group = {
+ .attrs = ad5380_attributes,
+};
+
+static unsigned int ad5380_info_to_reg(struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case 0:
+ return AD5380_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5380_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5380_REG_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const unsigned int max_val = (1 << chan->scan_type.realbits);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case 0:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ ad5380_info_to_reg(chan, info),
+ val << chan->scan_type.shift);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += (1 << chan->scan_type.realbits) / 2;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ AD5380_REG_OFFSET(chan->address),
+ val << chan->scan_type.shift);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad5380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ int ret;
+
+ switch (info) {
+ case 0:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = regmap_read(st->regmap, ad5380_info_to_reg(chan, info),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(st->regmap, AD5380_REG_OFFSET(chan->address),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ val -= (1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = ((2 * st->vref) >> chan->scan_type.realbits) * 100;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5380_info = {
+ .read_raw = ad5380_read_raw,
+ .write_raw = ad5380_write_raw,
+ .attrs = &ad5380_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+ channels = kcalloc(sizeof(struct iio_chan_spec),
+ st->chip_info->num_channels, GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = i;
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
+ enum ad5380_type type, const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct ad5380_state *st;
+ unsigned int ctrl = 0;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(dev, "Failed to allocate iio device\n");
+ ret = -ENOMEM;
+ goto error_regmap_exit;
+ }
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
+ st->chip_info = &ad5380_chip_info_tbl[type];
+ st->regmap = regmap;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &ad5380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ ret = ad5380_alloc_channels(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
+ goto error_free;
+ }
+
+ if (st->chip_info->int_vref == 2500000)
+ ctrl |= AD5380_CTRL_INT_VREF_2V5;
+
+ st->vref_reg = regulator_get(dev, "vref");
+ if (!IS_ERR(st->vref_reg)) {
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+
+ st->vref = regulator_get_voltage(st->vref_reg);
+ } else {
+ st->vref = st->chip_info->int_vref;
+ ctrl |= AD5380_CTRL_INT_VREF_EN;
+ }
+
+ ret = regmap_write(st->regmap, AD5380_REG_SF_CTRL, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to write to device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_disable(st->vref_reg);
+error_free_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_put(st->vref_reg);
+
+ kfree(indio_dev->channels);
+error_free:
+ iio_free_device(indio_dev);
+error_regmap_exit:
+ regmap_exit(regmap);
+
+ return ret;
+}
+
+static int __devexit ad5380_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ kfree(indio_dev->channels);
+
+ if (!IS_ERR(st->vref_reg)) {
+ regulator_disable(st->vref_reg);
+ regulator_put(st->vref_reg);
+ }
+
+ regmap_exit(st->regmap);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static bool ad5380_reg_false(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static const struct regmap_config ad5380_regmap_config = {
+ .reg_bits = 10,
+ .val_bits = 14,
+
+ .max_register = AD5380_REG_DATA(40),
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = ad5380_reg_false,
+ .readable_reg = ad5380_reg_false,
+};
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int __devinit ad5380_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ regmap = regmap_init_spi(spi, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
+}
+
+static int __devexit ad5380_spi_remove(struct spi_device *spi)
+{
+ return ad5380_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5380_spi_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5380_spi_ids);
+
+static struct spi_driver ad5380_spi_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_spi_probe,
+ .remove = __devexit_p(ad5380_spi_remove),
+ .id_table = ad5380_spi_ids,
+};
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return spi_register_driver(&ad5380_spi_driver);
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&ad5380_spi_driver);
+}
+
+#else
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+
+static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = regmap_init_i2c(i2c, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
+}
+
+static int __devexit ad5380_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5380_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5380_i2c_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5380_i2c_ids);
+
+static struct i2c_driver ad5380_i2c_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_i2c_probe,
+ .remove = __devexit_p(ad5380_i2c_remove),
+ .id_table = ad5380_i2c_ids,
+};
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return i2c_add_driver(&ad5380_i2c_driver);
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&ad5380_i2c_driver);
+}
+
+#else
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init ad5380_spi_init(void)
+{
+ int ret;
+
+ ret = ad5380_spi_register_driver();
+ if (ret)
+ return ret;
+
+ ret = ad5380_i2c_register_driver();
+ if (ret) {
+ ad5380_spi_unregister_driver();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ad5380_spi_init);
+
+static void __exit ad5380_spi_exit(void)
+{
+ ad5380_i2c_unregister_driver();
+ ad5380_spi_unregister_driver();
+
+}
+module_exit(ad5380_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5380/81/82/83/84/90/91/92 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5421.c b/drivers/staging/iio/dac/ad5421.c
new file mode 100644
index 000000000000..71ee86824763
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5421.c
@@ -0,0 +1,555 @@
+/*
+ * AD5421 Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../events.h"
+#include "dac.h"
+#include "ad5421.h"
+
+
+#define AD5421_REG_DAC_DATA 0x1
+#define AD5421_REG_CTRL 0x2
+#define AD5421_REG_OFFSET 0x3
+#define AD5421_REG_GAIN 0x4
+/* load dac and fault shared the same register number. Writing to it will cause
+ * a dac load command, reading from it will return the fault status register */
+#define AD5421_REG_LOAD_DAC 0x5
+#define AD5421_REG_FAULT 0x5
+#define AD5421_REG_FORCE_ALARM_CURRENT 0x6
+#define AD5421_REG_RESET 0x7
+#define AD5421_REG_START_CONVERSION 0x8
+#define AD5421_REG_NOOP 0x9
+
+#define AD5421_CTRL_WATCHDOG_DISABLE BIT(12)
+#define AD5421_CTRL_AUTO_FAULT_READBACK BIT(11)
+#define AD5421_CTRL_MIN_CURRENT BIT(9)
+#define AD5421_CTRL_ADC_SOURCE_TEMP BIT(8)
+#define AD5421_CTRL_ADC_ENABLE BIT(7)
+#define AD5421_CTRL_PWR_DOWN_INT_VREF BIT(6)
+
+#define AD5421_FAULT_SPI BIT(15)
+#define AD5421_FAULT_PEC BIT(14)
+#define AD5421_FAULT_OVER_CURRENT BIT(13)
+#define AD5421_FAULT_UNDER_CURRENT BIT(12)
+#define AD5421_FAULT_TEMP_OVER_140 BIT(11)
+#define AD5421_FAULT_TEMP_OVER_100 BIT(10)
+#define AD5421_FAULT_UNDER_VOLTAGE_6V BIT(9)
+#define AD5421_FAULT_UNDER_VOLTAGE_12V BIT(8)
+
+/* These bits will cause the fault pin to go high */
+#define AD5421_FAULT_TRIGGER_IRQ \
+ (AD5421_FAULT_SPI | AD5421_FAULT_PEC | AD5421_FAULT_OVER_CURRENT | \
+ AD5421_FAULT_UNDER_CURRENT | AD5421_FAULT_TEMP_OVER_140)
+
+/**
+ * struct ad5421_state - driver instance specific data
+ * @spi: spi_device
+ * @ctrl: control register cache
+ * @current_range: current range which the device is configured for
+ * @data: spi transfer buffers
+ * @fault_mask: software masking of events
+ */
+struct ad5421_state {
+ struct spi_device *spi;
+ unsigned int ctrl;
+ enum ad5421_current_range current_range;
+ unsigned int fault_mask;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ u32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec ad5421_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
+ .scan_type = IIO_ST('u', 16, 16, 0),
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ },
+ {
+ .type = IIO_TEMP,
+ .channel = -1,
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ },
+};
+
+static int ad5421_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int val)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5421_write_unlocked(indio_dev, reg, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ unsigned int clr)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->ctrl &= ~clr;
+ st->ctrl |= set;
+
+ ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static irqreturn_t ad5421_fault_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int fault;
+ unsigned int old_fault = 0;
+ unsigned int events;
+
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+ if (!fault)
+ return IRQ_NONE;
+
+ /* If we had a fault, this might mean that the DAC has lost its state
+ * and has been reset. Make sure that the control register actually
+ * contains what we expect it to contain. Otherwise the watchdog might
+ * be enabled and we get watchdog timeout faults, which will render the
+ * DAC unusable. */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+
+ /* The fault pin stays high as long as a fault condition is present and
+ * it is not possible to mask fault conditions. For certain fault
+ * conditions for example like over-temperature it takes some time
+ * until the fault condition disappears. If we would exit the interrupt
+ * handler immediately after handling the event it would be entered
+ * again instantly. Thus we fall back to polling in case we detect that
+ * a interrupt condition is still present.
+ */
+ do {
+ /* 0xffff is a invalid value for the register and will only be
+ * read if there has been a communication error */
+ if (fault == 0xffff)
+ fault = 0;
+
+ /* we are only interested in new events */
+ events = (old_fault ^ fault) & fault;
+ events &= st->fault_mask;
+
+ if (events & AD5421_FAULT_OVER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_UNDER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_TEMP_OVER_140) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ old_fault = fault;
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+
+ /* still active? go to sleep for some time */
+ if (fault & AD5421_FAULT_TRIGGER_IRQ)
+ msleep(1000);
+
+ } while (fault & AD5421_FAULT_TRIGGER_IRQ);
+
+
+ return IRQ_HANDLED;
+}
+
+static void ad5421_get_current_min_max(struct ad5421_state *st,
+ unsigned int *min, unsigned int *max)
+{
+ /* The current range is configured using external pins, which are
+ * usually hard-wired and not run-time switchable. */
+ switch (st->current_range) {
+ case AD5421_CURRENT_RANGE_4mA_20mA:
+ *min = 4000;
+ *max = 20000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA8_21mA:
+ *min = 3800;
+ *max = 21000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA2_24mA:
+ *min = 3200;
+ *max = 24000;
+ break;
+ default:
+ *min = 0;
+ *max = 1;
+ break;
+ }
+}
+
+static inline unsigned int ad5421_get_offset(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return (min * (1 << 16)) / (max - min);
+}
+
+static inline unsigned int ad5421_get_scale(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return ((max - min) * 1000) / (1 << 16);
+}
+
+static int ad5421_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long m)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ switch (m) {
+ case 0:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = ad5421_get_scale(st);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5421_get_offset(st);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad5421_read(indio_dev, AD5421_REG_OFFSET);
+ if (ret < 0)
+ return ret;
+ *val = ret - 32768;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad5421_read(indio_dev, AD5421_REG_GAIN);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ const unsigned int max_val = 1 << 16;
+
+ switch (mask) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_DAC_DATA, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += 32768;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_OFFSET, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_GAIN, val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code, int state)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ if (state)
+ st->fault_mask |= mask;
+ else
+ st->fault_mask &= ~mask;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static int ad5421_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (bool)(st->fault_mask & mask);
+}
+
+static int ad5421_read_event_value(struct iio_dev *indio_dev, u64 event_code,
+ int *val)
+{
+ int ret;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ case IIO_TEMP:
+ *val = 140000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad5421_info = {
+ .read_raw = ad5421_read_raw,
+ .write_raw = ad5421_write_raw,
+ .read_event_config = ad5421_read_event_config,
+ .write_event_config = ad5421_write_event_config,
+ .read_event_value = ad5421_read_event_value,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5421_probe(struct spi_device *spi)
+{
+ struct ad5421_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct iio_dev *indio_dev;
+ struct ad5421_state *st;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = "ad5421";
+ indio_dev->info = &ad5421_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5421_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+
+ st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
+ AD5421_CTRL_AUTO_FAULT_READBACK;
+
+ if (pdata) {
+ st->current_range = pdata->current_range;
+ if (pdata->external_vref)
+ st->ctrl |= AD5421_CTRL_PWR_DOWN_INT_VREF;
+ } else {
+ st->current_range = AD5421_CURRENT_RANGE_4mA_20mA;
+ }
+
+ /* write initial ctrl register value */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+ if (spi->irq) {
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ ad5421_fault_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ad5421 fault",
+ indio_dev);
+ if (ret)
+ goto error_free;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_free_irq;
+ }
+
+ return 0;
+
+error_free_irq:
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5421_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver ad5421_driver = {
+ .driver = {
+ .name = "ad5421",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5421_probe,
+ .remove = __devexit_p(ad5421_remove),
+};
+
+static __init int ad5421_init(void)
+{
+ return spi_register_driver(&ad5421_driver);
+}
+module_init(ad5421_init);
+
+static __exit void ad5421_exit(void)
+{
+ spi_unregister_driver(&ad5421_driver);
+}
+module_exit(ad5421_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad5421");
diff --git a/drivers/staging/iio/dac/ad5421.h b/drivers/staging/iio/dac/ad5421.h
new file mode 100644
index 000000000000..cd2bb84ff1b0
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5421.h
@@ -0,0 +1,32 @@
+#ifndef __IIO_DAC_AD5421_H__
+#define __IIO_DAC_AD5421_H__
+
+/*
+ * TODO: This file needs to go into include/linux/iio
+ */
+
+/**
+ * enum ad5421_current_range - Current range the AD5421 is configured for.
+ * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00)
+ * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1)
+ * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10)
+ */
+
+enum ad5421_current_range {
+ AD5421_CURRENT_RANGE_4mA_20mA,
+ AD5421_CURRENT_RANGE_3mA8_21mA,
+ AD5421_CURRENT_RANGE_3mA2_24mA,
+};
+
+/**
+ * struct ad5421_platform_data - AD5421 DAC driver platform data
+ * @external_vref: whether an external reference voltage is used or not
+ * @current_range: Current range the AD5421 is configured for
+ */
+
+struct ad5421_platform_data {
+ bool external_vref;
+ enum ad5421_current_range current_range;
+};
+
+#endif
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index ac3bc5f52dcc..693e7482524c 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -26,19 +26,17 @@
static void ad5446_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(AD5446_LOAD |
- (val << st->chip_info->left_shift));
+ st->data.d16 = cpu_to_be16(AD5446_LOAD | val);
}
static void ad5542_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(val << st->chip_info->left_shift);
+ st->data.d16 = cpu_to_be16(val);
}
static void ad5620_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(AD5620_LOAD |
- (val << st->chip_info->left_shift));
+ st->data.d16 = cpu_to_be16(AD5620_LOAD | val);
}
static void ad5660_store_sample(struct ad5446_state *st, unsigned val)
@@ -63,50 +61,6 @@ static void ad5660_store_pwr_down(struct ad5446_state *st, unsigned mode)
st->data.d24[2] = val & 0xFF;
}
-static ssize_t ad5446_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
- int ret;
- long val;
-
- ret = strict_strtol(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- if (val > RES_MASK(st->chip_info->bits)) {
- ret = -EINVAL;
- goto error_ret;
- }
-
- mutex_lock(&indio_dev->mlock);
- st->cached_val = val;
- st->chip_info->store_sample(st, val);
- ret = spi_sync(st->spi, &st->msg);
- mutex_unlock(&indio_dev->mlock);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_OUT_RAW(0, ad5446_write, 0);
-
-static ssize_t ad5446_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
-
static ssize_t ad5446_write_powerdown_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -189,22 +143,20 @@ static IIO_DEVICE_ATTR(out_voltage0_powerdown, S_IRUGO | S_IWUSR,
ad5446_write_dac_powerdown, 0);
static struct attribute *ad5446_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
-static mode_t ad5446_attr_is_visible(struct kobject *kobj,
+static umode_t ad5446_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (!st->chip_info->store_pwr_down &&
(attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
@@ -223,121 +175,148 @@ static const struct attribute_group ad5446_attribute_group = {
.is_visible = ad5446_attr_is_visible,
};
+#define AD5446_CHANNEL(bits, storage, shift) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .scan_type = IIO_ST('u', (bits), (storage), (shift)) \
+}
+
static const struct ad5446_chip_info ad5446_chip_info_tbl[] = {
[ID_AD5444] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5446_store_sample,
},
[ID_AD5446] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5446_store_sample,
},
[ID_AD5541A] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5542A] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5543] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5512A] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 4,
+ .channel = AD5446_CHANNEL(12, 16, 4),
.store_sample = ad5542_store_sample,
},
[ID_AD5553] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5601] = {
- .bits = 8,
- .storagebits = 16,
- .left_shift = 6,
+ .channel = AD5446_CHANNEL(8, 16, 6),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5611] = {
- .bits = 10,
- .storagebits = 16,
- .left_shift = 4,
+ .channel = AD5446_CHANNEL(10, 16, 4),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5621] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_2500] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_1250] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_2500] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_1250] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5660_2500] = {
- .bits = 16,
- .storagebits = 24,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
[ID_AD5660_1250] = {
- .bits = 16,
- .storagebits = 24,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
};
+static int ad5446_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
+}
+
+static int ad5446_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ val <<= chan->scan_type.shift;
+ mutex_lock(&indio_dev->mlock);
+ st->cached_val = val;
+ st->chip_info->store_sample(st, val);
+ ret = spi_sync(st->spi, &st->msg);
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static const struct iio_info ad5446_info = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
.attrs = &ad5446_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -376,11 +355,13 @@ static int __devinit ad5446_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5446_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &st->chip_info->channel;
+ indio_dev->num_channels = 1;
/* Setup default message */
st->xfer.tx_buf = &st->data;
- st->xfer.len = st->chip_info->storagebits / 8;
+ st->xfer.len = st->chip_info->channel.scan_type.storagebits / 8;
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
@@ -454,11 +435,11 @@ static const struct spi_device_id ad5446_id[] = {
{"ad5660-1250", ID_AD5660_1250},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5446_id);
static struct spi_driver ad5446_driver = {
.driver = {
.name = "ad5446",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad5446_probe,
@@ -470,4 +451,3 @@ module_spi_driver(ad5446_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad5446");
diff --git a/drivers/staging/iio/dac/ad5446.h b/drivers/staging/iio/dac/ad5446.h
index 7118d653ac3e..4ea3476fb065 100644
--- a/drivers/staging/iio/dac/ad5446.h
+++ b/drivers/staging/iio/dac/ad5446.h
@@ -25,8 +25,6 @@
#define AD5660_PWRDWN_100k (0x2 << 16) /* Power-down: 100kOhm to GND */
#define AD5660_PWRDWN_TRISTATE (0x3 << 16) /* Power-down: Three-state */
-#define RES_MASK(bits) ((1 << (bits)) - 1)
-
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
@@ -62,18 +60,14 @@ struct ad5446_state {
/**
* struct ad5446_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
- * @storagebits: number of bits written to the DAC
- * @left_shift: number of bits the datum must be shifted
+ * @channel: channel spec for the DAC
* @int_vref_mv: AD5620/40/60: the internal reference voltage
* @store_sample: chip specific helper function to store the datum
* @store_sample: chip specific helper function to store the powerpown cmd
*/
struct ad5446_chip_info {
- u8 bits;
- u8 storagebits;
- u8 left_shift;
+ struct iio_chan_spec channel;
u16 int_vref_mv;
void (*store_sample) (struct ad5446_state *st, unsigned val);
void (*store_pwr_down) (struct ad5446_state *st, unsigned mode);
diff --git a/drivers/staging/iio/dac/ad5504.c b/drivers/staging/iio/dac/ad5504.c
index 57539ce8e6cf..bc17205fe722 100644
--- a/drivers/staging/iio/dac/ad5504.c
+++ b/drivers/staging/iio/dac/ad5504.c
@@ -18,9 +18,27 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "dac.h"
#include "ad5504.h"
+#define AD5504_CHANNEL(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = AD5504_ADDR_DAC(_chan), \
+ .scan_type = IIO_ST('u', 12, 16, 0), \
+}
+
+static const struct iio_chan_spec ad5504_channels[] = {
+ AD5504_CHANNEL(0),
+ AD5504_CHANNEL(1),
+ AD5504_CHANNEL(2),
+ AD5504_CHANNEL(3),
+};
+
static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
{
u16 tmp = cpu_to_be16(AD5504_CMD_WRITE |
@@ -30,13 +48,14 @@ static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
return spi_write(spi, (u8 *)&tmp, 2);
}
-static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
+static int ad5504_spi_read(struct spi_device *spi, u8 addr)
{
u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
+ u16 val;
int ret;
struct spi_transfer t = {
.tx_buf = &tmp,
- .rx_buf = val,
+ .rx_buf = &val,
.len = 2,
};
struct spi_message m;
@@ -45,44 +64,61 @@ static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
- *val = be16_to_cpu(*val) & AD5504_RES_MASK;
+ if (ret < 0)
+ return ret;
- return ret;
+ return be16_to_cpu(val) & AD5504_RES_MASK;
}
-static ssize_t ad5504_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ad5504_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5504_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long readin;
+ unsigned long scale_uv;
int ret;
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
+ switch (m) {
+ case 0:
+ ret = ad5504_spi_read(st->spi, chan->address);
+ if (ret < 0)
+ return ret;
- ret = ad5504_spi_write(st->spi, this_attr->address, readin);
- return ret ? ret : len;
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
}
-static ssize_t ad5504_read_dac(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad5504_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5504_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- u16 val;
- ret = ad5504_spi_read(st->spi, this_attr->address, &val);
- if (ret)
- return ret;
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
- return sprintf(buf, "%d\n", val);
+ return ad5504_spi_write(st->spi, chan->address, val);
+ default:
+ ret = -EINVAL;
+ }
+
+ return -EINVAL;
}
static ssize_t ad5504_read_powerdown_mode(struct device *dev,
@@ -157,32 +193,6 @@ static ssize_t ad5504_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5504_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> AD5505_BITS;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5504_show_scale, NULL, 0);
-
-#define IIO_DEV_ATTR_OUT_RW_RAW(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out_voltage##_num##_raw, \
- S_IRUGO | S_IWUSR, _show, _store, _addr)
-
-static IIO_DEV_ATTR_OUT_RW_RAW(0, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC0);
-static IIO_DEV_ATTR_OUT_RW_RAW(1, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC1);
-static IIO_DEV_ATTR_OUT_RW_RAW(2, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC2);
-static IIO_DEV_ATTR_OUT_RW_RAW(3, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC3);
-
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5504_read_powerdown_mode,
ad5504_write_powerdown_mode, 0);
@@ -203,17 +213,12 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5504_read_dac_powerdown,
ad5504_write_dac_powerdown, 3);
static struct attribute *ad5504_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -222,11 +227,9 @@ static const struct attribute_group ad5504_attribute_group = {
};
static struct attribute *ad5501_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -261,12 +264,16 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
}
static const struct iio_info ad5504_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
.attrs = &ad5504_attribute_group,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad5501_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
.attrs = &ad5501_attribute_group,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
@@ -307,10 +314,14 @@ static int __devinit ad5504_probe(struct spi_device *spi)
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(st->spi)->name;
- if (spi_get_device_id(st->spi)->driver_data == ID_AD5501)
+ if (spi_get_device_id(st->spi)->driver_data == ID_AD5501) {
indio_dev->info = &ad5501_info;
- else
+ indio_dev->num_channels = 1;
+ } else {
indio_dev->info = &ad5504_info;
+ indio_dev->num_channels = 4;
+ }
+ indio_dev->channels = ad5504_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
if (spi->irq) {
@@ -367,6 +378,7 @@ static const struct spi_device_id ad5504_id[] = {
{"ad5501", ID_AD5501},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5504_id);
static struct spi_driver ad5504_driver = {
.driver = {
diff --git a/drivers/staging/iio/dac/ad5504.h b/drivers/staging/iio/dac/ad5504.h
index 85beb1dd29b9..afe09522f53c 100644
--- a/drivers/staging/iio/dac/ad5504.h
+++ b/drivers/staging/iio/dac/ad5504.h
@@ -18,10 +18,7 @@
/* Registers */
#define AD5504_ADDR_NOOP 0
-#define AD5504_ADDR_DAC0 1
-#define AD5504_ADDR_DAC1 2
-#define AD5504_ADDR_DAC2 3
-#define AD5504_ADDR_DAC3 4
+#define AD5504_ADDR_DAC(x) ((x) + 1)
#define AD5504_ADDR_ALL_DAC 5
#define AD5504_ADDR_CTRL 7
diff --git a/drivers/staging/iio/dac/ad5624r.h b/drivers/staging/iio/dac/ad5624r.h
index b71c6a03e780..5dca3028cdfd 100644
--- a/drivers/staging/iio/dac/ad5624r.h
+++ b/drivers/staging/iio/dac/ad5624r.h
@@ -32,12 +32,12 @@
/**
* struct ad5624r_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
+ * @channels: channel spec for the DAC
* @int_vref_mv: AD5620/40/60: the internal reference voltage
*/
struct ad5624r_chip_info {
- u8 bits;
+ const struct iio_chan_spec *channels;
u16 int_vref_mv;
};
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
index 6e05f0dbae0b..10c7484366ef 100644
--- a/drivers/staging/iio/dac/ad5624r_spi.c
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -21,29 +21,51 @@
#include "dac.h"
#include "ad5624r.h"
+#define AD5624R_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = (_chan), \
+ .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+}
+
+#define DECLARE_AD5624R_CHANNELS(_name, _bits) \
+ const struct iio_chan_spec _name##_channels[] = { \
+ AD5624R_CHANNEL(0, _bits), \
+ AD5624R_CHANNEL(1, _bits), \
+ AD5624R_CHANNEL(2, _bits), \
+ AD5624R_CHANNEL(3, _bits), \
+}
+
+static DECLARE_AD5624R_CHANNELS(ad5624r, 12);
+static DECLARE_AD5624R_CHANNELS(ad5644r, 14);
+static DECLARE_AD5624R_CHANNELS(ad5664r, 16);
+
static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
[ID_AD5624R3] = {
- .bits = 12,
- .int_vref_mv = 1250,
- },
- [ID_AD5644R3] = {
- .bits = 14,
- .int_vref_mv = 1250,
- },
- [ID_AD5664R3] = {
- .bits = 16,
+ .channels = ad5624r_channels,
.int_vref_mv = 1250,
},
[ID_AD5624R5] = {
- .bits = 12,
+ .channels = ad5624r_channels,
.int_vref_mv = 2500,
},
+ [ID_AD5644R3] = {
+ .channels = ad5644r_channels,
+ .int_vref_mv = 1250,
+ },
[ID_AD5644R5] = {
- .bits = 14,
+ .channels = ad5644r_channels,
.int_vref_mv = 2500,
},
+ [ID_AD5664R3] = {
+ .channels = ad5664r_channels,
+ .int_vref_mv = 1250,
+ },
[ID_AD5664R5] = {
- .bits = 16,
+ .channels = ad5664r_channels,
.int_vref_mv = 2500,
},
};
@@ -70,24 +92,49 @@ static int ad5624r_spi_write(struct spi_device *spi,
return spi_write(spi, msg, 3);
}
-static ssize_t ad5624r_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ad5624r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
- long readin;
- int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5624r_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long scale_uv;
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
- ret = ad5624r_spi_write(st->us, AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
- this_attr->address, readin,
- st->chip_info->bits);
- return ret ? ret : len;
+ }
+ return -EINVAL;
+}
+
+static int ad5624r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5624r_spi_write(st->us,
+ AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val,
+ chan->scan_type.shift);
+ default:
+ ret = -EINVAL;
+ }
+
+ return -EINVAL;
}
static ssize_t ad5624r_read_powerdown_mode(struct device *dev,
@@ -161,24 +208,6 @@ static ssize_t ad5624r_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5624r_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5624r_show_scale, NULL, 0);
-
-static IIO_DEV_ATTR_OUT_RAW(0, ad5624r_write_dac, AD5624R_ADDR_DAC0);
-static IIO_DEV_ATTR_OUT_RAW(1, ad5624r_write_dac, AD5624R_ADDR_DAC1);
-static IIO_DEV_ATTR_OUT_RAW(2, ad5624r_write_dac, AD5624R_ADDR_DAC2);
-static IIO_DEV_ATTR_OUT_RAW(3, ad5624r_write_dac, AD5624R_ADDR_DAC3);
-
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5624r_read_powerdown_mode,
ad5624r_write_powerdown_mode, 0);
@@ -200,17 +229,12 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5624r_read_dac_powerdown,
ad5624r_write_dac_powerdown, 3);
static struct attribute *ad5624r_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -219,6 +243,8 @@ static const struct attribute_group ad5624r_attribute_group = {
};
static const struct iio_info ad5624r_info = {
+ .write_raw = ad5624r_write_raw,
+ .read_raw = ad5624r_read_raw,
.attrs = &ad5624r_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -259,6 +285,8 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5624r_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = AD5624R_DAC_CHANNELS;
ret = ad5624r_spi_write(spi, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
!!voltage_uv, 16);
@@ -307,6 +335,7 @@ static const struct spi_device_id ad5624r_id[] = {
{"ad5664r5", ID_AD5664R5},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5624r_id);
static struct spi_driver ad5624r_driver = {
.driver = {
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index e72db2fbfedf..ce2d6193dd89 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -99,7 +99,7 @@ enum ad5686_supported_device_ids {
.indexed = 1, \
.output = 1, \
.channel = chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = AD5686_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', bits, 16, shift) \
}
@@ -306,7 +306,7 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->vref_mv * 100000)
>> (chan->scan_type.realbits);
*val = scale_uv / 100000;
@@ -437,6 +437,7 @@ static const struct spi_device_id ad5686_id[] = {
{"ad5686", ID_AD5686},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5686_id);
static struct spi_driver ad5686_driver = {
.driver = {
diff --git a/drivers/staging/iio/dac/ad5764.c b/drivers/staging/iio/dac/ad5764.c
new file mode 100644
index 000000000000..ff91480ae65c
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5764.c
@@ -0,0 +1,393 @@
+/*
+ * Analog devices AD5764, AD5764R, AD5744, AD5744R quad-channel
+ * Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#define AD5764_REG_SF_NOP 0x0
+#define AD5764_REG_SF_CONFIG 0x1
+#define AD5764_REG_SF_CLEAR 0x4
+#define AD5764_REG_SF_LOAD 0x5
+#define AD5764_REG_DATA(x) ((2 << 3) | (x))
+#define AD5764_REG_COARSE_GAIN(x) ((3 << 3) | (x))
+#define AD5764_REG_FINE_GAIN(x) ((4 << 3) | (x))
+#define AD5764_REG_OFFSET(x) ((5 << 3) | (x))
+
+#define AD5764_NUM_CHANNELS 4
+
+/**
+ * struct ad5764_chip_info - chip specific information
+ * @int_vref: Value of the internal reference voltage in uV - 0 if external
+ * reference voltage is used
+ * @channel channel specification
+*/
+
+struct ad5764_chip_info {
+ unsigned long int_vref;
+ const struct iio_chan_spec *channels;
+};
+
+/**
+ * struct ad5764_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip info
+ * @vref_reg: vref supply regulators
+ * @data: spi transfer buffers
+ */
+
+struct ad5764_state {
+ struct spi_device *spi;
+ const struct ad5764_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5764_type {
+ ID_AD5744,
+ ID_AD5744R,
+ ID_AD5764,
+ ID_AD5764R,
+};
+
+#define AD5764_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask = IIO_CHAN_INFO_OFFSET_SHARED_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
+ .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)) \
+}
+
+#define DECLARE_AD5764_CHANNELS(_name, _bits) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD5764_CHANNEL(0, (_bits)), \
+ AD5764_CHANNEL(1, (_bits)), \
+ AD5764_CHANNEL(2, (_bits)), \
+ AD5764_CHANNEL(3, (_bits)), \
+};
+
+static DECLARE_AD5764_CHANNELS(ad5764, 16);
+static DECLARE_AD5764_CHANNELS(ad5744, 14);
+
+static const struct ad5764_chip_info ad5764_chip_infos[] = {
+ [ID_AD5744] = {
+ .int_vref = 0,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5744R] = {
+ .int_vref = 5000000,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5764] = {
+ .int_vref = 0,
+ .channels = ad5764_channels,
+ },
+ [ID_AD5764R] = {
+ .int_vref = 5000000,
+ .channels = ad5764_channels,
+ },
+};
+
+static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ ret = spi_write(st->spi, &st->data[0].d8[1], 3);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ *val = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_chan_info_to_reg(struct iio_chan_spec const *chan, long info)
+{
+ switch (info) {
+ case 0:
+ return AD5764_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5764_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5764_REG_FINE_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5764_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const int max_val = (1 << chan->scan_type.realbits);
+ unsigned int reg;
+
+ switch (info) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val >= 128 || val < -128)
+ return -EINVAL;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= 32 || val < -32)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = ad5764_chan_info_to_reg(chan, info);
+ return ad5764_write(indio_dev, reg, (u16)val);
+}
+
+static int ad5764_get_channel_vref(struct ad5764_state *st,
+ unsigned int channel)
+{
+ if (st->chip_info->int_vref)
+ return st->chip_info->int_vref;
+ else
+ return regulator_get_voltage(st->vref_reg[channel / 2].consumer);
+}
+
+static int ad5764_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ unsigned int reg;
+ int vref;
+ int ret;
+
+ switch (info) {
+ case 0:
+ reg = AD5764_REG_DATA(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ reg = AD5764_REG_OFFSET(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 7);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ reg = AD5764_REG_FINE_GAIN(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* vout = 4 * vref + ((dac_code / 65535) - 0.5) */
+ vref = ad5764_get_channel_vref(st, chan->channel);
+ if (vref < 0)
+ return vref;
+
+ scale_uv = (vref * 4 * 100) >> chan->scan_type.realbits;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -(1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5764_info = {
+ .read_raw = ad5764_read_raw,
+ .write_raw = ad5764_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5764_probe(struct spi_device *spi)
+{
+ enum ad5764_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5764_state *st;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+ st->chip_info = &ad5764_chip_infos[type];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5764_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = AD5764_NUM_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+
+ if (st->chip_info->int_vref == 0) {
+ st->vref_reg[0].supply = "vrefAB";
+ st->vref_reg[1].supply = "vrefCD";
+
+ ret = regulator_bulk_get(&st->spi->dev,
+ ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request vref regulators: %d\n",
+ ret);
+ goto error_free;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(st->vref_reg),
+ st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+error_free_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5764_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5764_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->chip_info->int_vref == 0) {
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ }
+
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5764_ids[] = {
+ { "ad5744", ID_AD5744 },
+ { "ad5744r", ID_AD5744R },
+ { "ad5764", ID_AD5764 },
+ { "ad5764r", ID_AD5764R },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5764_ids);
+
+static struct spi_driver ad5764_driver = {
+ .driver = {
+ .name = "ad5764",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5764_probe,
+ .remove = __devexit_p(ad5764_remove),
+ .id_table = ad5764_ids,
+};
+
+static int __init ad5764_spi_init(void)
+{
+ return spi_register_driver(&ad5764_driver);
+}
+module_init(ad5764_spi_init);
+
+static void __exit ad5764_spi_exit(void)
+{
+ spi_unregister_driver(&ad5764_driver);
+}
+module_exit(ad5764_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5791.c b/drivers/staging/iio/dac/ad5791.c
index 4a80fd822231..ac45636a8d72 100644
--- a/drivers/staging/iio/dac/ad5791.c
+++ b/drivers/staging/iio/dac/ad5791.c
@@ -1,5 +1,6 @@
/*
- * AD5760, AD5780, AD5781, AD5791 Voltage Output Digital to Analog Converter
+ * AD5760, AD5780, AD5781, AD5790, AD5791 Voltage Output Digital to Analog
+ * Converter
*
* Copyright 2011 Analog Devices Inc.
*
@@ -77,8 +78,8 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
.indexed = 1, \
.address = AD5791_ADDR_DAC0, \
.channel = 0, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED) | \
- (1 << IIO_CHAN_INFO_OFFSET_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.scan_type = IIO_ST('u', bits, 24, shift) \
}
@@ -237,11 +238,11 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
*val &= AD5791_DAC_MASK;
*val >>= chan->scan_type.shift;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = (((u64)st->vref_mv) * 1000000ULL) >> chan->scan_type.realbits;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_OFFSET_SHARED):
+ case IIO_CHAN_INFO_OFFSET:
val64 = (((u64)st->vref_neg_mv) << chan->scan_type.realbits);
do_div(val64, st->vref_mv);
*val = -val64;
@@ -397,9 +398,11 @@ static const struct spi_device_id ad5791_id[] = {
{"ad5760", ID_AD5760},
{"ad5780", ID_AD5780},
{"ad5781", ID_AD5781},
+ {"ad5790", ID_AD5791},
{"ad5791", ID_AD5791},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5791_id);
static struct spi_driver ad5791_driver = {
.driver = {
@@ -413,5 +416,5 @@ static struct spi_driver ad5791_driver = {
module_spi_driver(ad5791_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5791 DAC");
+MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
index 4a360d044a36..9c32d1beae25 100644
--- a/drivers/staging/iio/dds/ad5930.c
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -148,3 +148,4 @@ module_spi_driver(ad5930_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad5930 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
index cc32fd65b8b3..2ccf25dd9289 100644
--- a/drivers/staging/iio/dds/ad9832.c
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -88,7 +88,7 @@ static ssize_t ad9832_write(struct device *dev,
goto error_ret;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD9832_FREQ0HM:
case AD9832_FREQ1HM:
ret = ad9832_write_frequency(st, this_attr->address, val);
@@ -344,11 +344,11 @@ static const struct spi_device_id ad9832_id[] = {
{"ad9835", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad9832_id);
static struct spi_driver ad9832_driver = {
.driver = {
.name = "ad9832",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad9832_probe,
@@ -360,4 +360,3 @@ module_spi_driver(ad9832_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD9832/AD9835 DDS");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad9832");
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index 2b31e3524826..5e67104fea18 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -77,7 +77,7 @@ static ssize_t ad9834_write(struct device *dev,
goto error_ret;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD9834_REG_FREQ0:
case AD9834_REG_FREQ1:
ret = ad9834_write_frequency(st, this_attr->address, val);
@@ -153,7 +153,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case 0:
if (sysfs_streq(buf, "sine")) {
st->control &= ~AD9834_MODE;
@@ -281,14 +281,14 @@ static struct attribute *ad9834_attributes[] = {
NULL,
};
-static mode_t ad9834_attr_is_visible(struct kobject *kobj,
+static umode_t ad9834_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad9834_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
@@ -435,11 +435,11 @@ static const struct spi_device_id ad9834_id[] = {
{"ad9838", ID_AD9838},
{}
};
+MODULE_DEVICE_TABLE(spi, ad9834_id);
static struct spi_driver ad9834_driver = {
.driver = {
.name = "ad9834",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad9834_probe,
@@ -451,4 +451,3 @@ module_spi_driver(ad9834_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad9834");
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
index f9c96afcb996..f4f731bb2191 100644
--- a/drivers/staging/iio/dds/ad9850.c
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -134,3 +134,4 @@ module_spi_driver(ad9850_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9850 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
index 9fc73fdc9c3b..554266c615a8 100644
--- a/drivers/staging/iio/dds/ad9852.c
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -285,3 +285,4 @@ module_spi_driver(ad9852_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9852 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
index 57046b03121f..3985766d6f87 100644
--- a/drivers/staging/iio/dds/ad9910.c
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -418,3 +418,4 @@ module_spi_driver(ad9910_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9910 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
index d29130e9acdf..4d150048002a 100644
--- a/drivers/staging/iio/dds/ad9951.c
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -229,3 +229,4 @@ module_spi_driver(ad9951_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9951 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/events.h b/drivers/staging/iio/events.h
new file mode 100644
index 000000000000..bfb63400fa60
--- /dev/null
+++ b/drivers/staging/iio/events.h
@@ -0,0 +1,103 @@
+/* The industrial I/O - event passing to userspace
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_EVENTS_H_
+#define _IIO_EVENTS_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include "types.h"
+
+/**
+ * struct iio_event_data - The actual event being pushed to userspace
+ * @id: event identifier
+ * @timestamp: best estimate of time of event occurrence (often from
+ * the interrupt handler)
+ */
+struct iio_event_data {
+ __u64 id;
+ __s64 timestamp;
+};
+
+#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
+
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+};
+
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+};
+
+/**
+ * IIO_EVENT_CODE() - create event identifier
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @diff: Whether the event is for an differential channel or not.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @chan: Channel number for non-differential channels.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ */
+
+#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
+ (((u64)type << 56) | ((u64)diff << 55) | \
+ ((u64)direction << 48) | ((u64)modifier << 40) | \
+ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
+ ((u16)chan))
+
+
+#define IIO_EV_DIR_MAX 4
+#define IIO_EV_BIT(type, direction) \
+ (1 << (type*IIO_EV_DIR_MAX + direction))
+
+/**
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
+ type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+
+/**
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+
+#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+
+/* Event code number extraction depends on which type of event we have.
+ * Perhaps review this function in the future*/
+#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((__s16)(mask & 0xFFFF))
+
+#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+
+#endif
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index 22aea5b4e61d..ea295b25308c 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -37,11 +37,11 @@ config ADIS16260
will be called adis16260.
config ADXRS450
- tristate "Analog Devices ADXRS450 Digital Output Gyroscope SPI driver"
+ tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
depends on SPI
help
- Say yes here to build support for Analog Devices ADXRS450 programmable
- digital output gyroscope.
+ Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
+ programmable digital output gyroscope.
This driver can also be built as a module. If so, the module
will be called adxrs450.
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index ff1b5a82b3d6..c0ca7093e0ed 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -98,11 +98,11 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
*val = tval;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = -7;
*val2 = 461117;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 34000;
return IIO_VAL_INT_PLUS_MICRO;
@@ -136,8 +136,8 @@ static const struct iio_chan_spec adis16060_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = ADIS16060_TEMP_OUT,
}
};
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index 9405f2d368ee..1815490db8b4 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -194,3 +194,4 @@ module_spi_driver(adis16080_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16080");
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
index c9aaca9631f4..947eb86f05d8 100644
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -173,3 +173,4 @@ module_spi_driver(adis16130_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16130");
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 886dddf867ef..8f6af47e9559 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16260.h"
@@ -390,9 +390,9 @@ enum adis16260_channel {
#define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \
struct iio_chan_spec adis16260_channels_##axis[] = { \
IIO_CHAN(IIO_ANGL_VEL, 1, 0, 0, NULL, 0, mod, \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
gyro, ADIS16260_SCAN_GYRO, \
IIO_ST('s', 14, 16, 0), 0), \
IIO_CHAN(IIO_ANGL, 1, 0, 0, NULL, 0, mod, \
@@ -400,16 +400,16 @@ enum adis16260_channel {
angle, ADIS16260_SCAN_ANGL, \
IIO_ST('u', 14, 16, 0), 0), \
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, \
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
temp, ADIS16260_SCAN_TEMP, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
in_supply, ADIS16260_SCAN_SUPPLY, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
in_aux, ADIS16260_SCAN_AUX_ADC, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN_SOFT_TIMESTAMP(5) \
@@ -464,8 +464,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
@@ -489,10 +488,10 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ANGL_VEL:
bits = 12;
@@ -512,7 +511,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
bits = 12;
@@ -544,11 +543,11 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
val16 = val & ((1 << bits) - 1);
addr = adis16260_addresses[chan->address][1];
return adis16260_spi_write_reg_16(indio_dev, addr, val16);
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
val16 = val & ((1 << bits) - 1);
addr = adis16260_addresses[chan->address][2];
return adis16260_spi_write_reg_16(indio_dev, addr, val16);
@@ -633,11 +632,16 @@ static int __devinit adis16260_probe(struct spi_device *spi)
}
if (indio_dev->buffer) {
/* Set default scan mode */
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_SUPPLY);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_GYRO);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_AUX_ADC);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_TEMP);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_ANGL);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_SUPPLY);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_GYRO);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_AUX_ADC);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_TEMP);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_ANGL);
}
if (spi->irq) {
ret = adis16260_probe_trigger(indio_dev);
@@ -701,6 +705,7 @@ static const struct spi_device_id adis16260_id[] = {
{"adis16251", 1},
{}
};
+MODULE_DEVICE_TABLE(spi, adis16260_id);
static struct spi_driver adis16260_driver = {
.driver = {
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 52a9e784e7c8..699a6152c409 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -74,9 +74,10 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16260_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -116,10 +117,8 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16260_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16260_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16260_trigger_handler,
diff --git a/drivers/staging/iio/gyro/adxrs450.h b/drivers/staging/iio/gyro/adxrs450.h
index b6b682876406..af0c870100b6 100644
--- a/drivers/staging/iio/gyro/adxrs450.h
+++ b/drivers/staging/iio/gyro/adxrs450.h
@@ -39,6 +39,11 @@
#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
+enum {
+ ID_ADXRS450,
+ ID_ADXRS453,
+};
+
/**
* struct adxrs450_state - device instance specific data
* @us: actual spi_device
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/staging/iio/gyro/adxrs450_core.c
index 70fd468b6850..15e2496f70c8 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/staging/iio/gyro/adxrs450_core.c
@@ -1,5 +1,5 @@
/*
- * ADXRS450 Digital Output Gyroscope Driver
+ * ADXRS450/ADXRS453 Digital Output Gyroscope Driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -243,7 +243,7 @@ static int adxrs450_write_raw(struct iio_dev *indio_dev,
{
int ret;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = adxrs450_spi_write_reg_16(indio_dev,
ADXRS450_DNC1,
val & 0x3FF);
@@ -263,7 +263,7 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
{
int ret;
s16 t;
- u16 ut;
+
switch (mask) {
case 0:
switch (chan->type) {
@@ -276,10 +276,10 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
break;
case IIO_TEMP:
ret = adxrs450_spi_read_reg_16(indio_dev,
- ADXRS450_TEMP1, &ut);
+ ADXRS450_TEMP1, &t);
if (ret)
break;
- *val = ut;
+ *val = (t >> 6) + 225;
ret = IIO_VAL_INT;
break;
default:
@@ -287,13 +287,34 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
break;
}
break;
- case (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = 218166;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 200;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW:
ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
if (ret)
break;
*val = t;
ret = IIO_VAL_INT;
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
break;
@@ -302,18 +323,36 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static const struct iio_chan_spec adxrs450_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE)
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- }
+static const struct iio_chan_spec adxrs450_channels[2][2] = {
+ [ID_ADXRS450] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }
+ },
+ [ID_ADXRS453] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }
+ },
};
static const struct iio_info adxrs450_info = {
@@ -343,7 +382,8 @@ static int __devinit adxrs450_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adxrs450_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adxrs450_channels;
+ indio_dev->channels =
+ adxrs450_channels[spi_get_device_id(spi)->driver_data];
indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
indio_dev->name = spi->dev.driver->name;
@@ -373,6 +413,13 @@ static int adxrs450_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id adxrs450_id[] = {
+ {"adxrs450", ID_ADXRS450},
+ {"adxrs453", ID_ADXRS453},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adxrs450_id);
+
static struct spi_driver adxrs450_driver = {
.driver = {
.name = "adxrs450",
@@ -380,9 +427,10 @@ static struct spi_driver adxrs450_driver = {
},
.probe = adxrs450_probe,
.remove = __devexit_p(adxrs450_remove),
+ .id_table = adxrs450_id,
};
module_spi_driver(adxrs450_driver);
MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>");
-MODULE_DESCRIPTION("Analog Devices ADXRS450 Gyroscope SPI driver");
+MODULE_DESCRIPTION("Analog Devices ADXRS450/ADXRS453 Gyroscope SPI driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index f3d88cd7e8a0..be6ced31f65e 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -7,13 +7,12 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
#include <linux/device.h>
#include <linux/cdev.h>
-
+#include "types.h"
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
@@ -25,62 +24,64 @@ enum iio_data_type {
IIO_PROCESSED,
};
-enum iio_chan_type {
- /* real channel types */
- IIO_VOLTAGE,
- IIO_CURRENT,
- IIO_POWER,
- IIO_ACCEL,
- IIO_ANGL_VEL,
- IIO_MAGN,
- IIO_LIGHT,
- IIO_INTENSITY,
- IIO_PROXIMITY,
- IIO_TEMP,
- IIO_INCLI,
- IIO_ROT,
- IIO_ANGL,
- IIO_TIMESTAMP,
- IIO_CAPACITANCE,
-};
-
-enum iio_modifier {
- IIO_NO_MOD,
- IIO_MOD_X,
- IIO_MOD_Y,
- IIO_MOD_Z,
- IIO_MOD_X_AND_Y,
- IIO_MOD_X_ANX_Z,
- IIO_MOD_Y_AND_Z,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_MOD_X_OR_Y,
- IIO_MOD_X_OR_Z,
- IIO_MOD_Y_OR_Z,
- IIO_MOD_X_OR_Y_OR_Z,
- IIO_MOD_LIGHT_BOTH,
- IIO_MOD_LIGHT_IR,
-};
-
/* Could add the raw attributes as well - allowing buffer only devices */
enum iio_chan_info_enum {
- IIO_CHAN_INFO_SCALE_SHARED,
- IIO_CHAN_INFO_SCALE_SEPARATE,
- IIO_CHAN_INFO_OFFSET_SHARED,
- IIO_CHAN_INFO_OFFSET_SEPARATE,
- IIO_CHAN_INFO_CALIBSCALE_SHARED,
- IIO_CHAN_INFO_CALIBSCALE_SEPARATE,
- IIO_CHAN_INFO_CALIBBIAS_SHARED,
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE,
- IIO_CHAN_INFO_PEAK_SHARED,
- IIO_CHAN_INFO_PEAK_SEPARATE,
- IIO_CHAN_INFO_PEAK_SCALE_SHARED,
- IIO_CHAN_INFO_PEAK_SCALE_SEPARATE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE,
- IIO_CHAN_INFO_AVERAGE_RAW_SHARED,
- IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE,
+ /* 0 is reserverd for raw attributes */
+ IIO_CHAN_INFO_SCALE = 1,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
};
+#define IIO_CHAN_INFO_SHARED_BIT(type) BIT(type*2)
+#define IIO_CHAN_INFO_SEPARATE_BIT(type) BIT(type*2 + 1)
+
+#define IIO_CHAN_INFO_SCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_SCALE)
+#define IIO_CHAN_INFO_SCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_SCALE)
+#define IIO_CHAN_INFO_OFFSET_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_OFFSET)
+#define IIO_CHAN_INFO_OFFSET_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_OFFSET)
+#define IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_CALIBSCALE)
+#define IIO_CHAN_INFO_CALIBSCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_CALIBSCALE)
+#define IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_CALIBBIAS)
+#define IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_CALIBBIAS)
+#define IIO_CHAN_INFO_PEAK_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_PEAK)
+#define IIO_CHAN_INFO_PEAK_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_PEAK)
+#define IIO_CHAN_INFO_PEAKSCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_PEAKSCALE)
+#define IIO_CHAN_INFO_PEAKSCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_PEAKSCALE)
+#define IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT( \
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW)
+#define IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT( \
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW)
+#define IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_AVERAGE_RAW)
+#define IIO_CHAN_INFO_AVERAGE_RAW_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_AVERAGE_RAW)
+#define IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT( \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)
+#define IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT( \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)
+
enum iio_endian {
IIO_CPU,
IIO_BE,
@@ -109,6 +110,10 @@ enum iio_endian {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * @datasheet_name: A name used in in kernel mapping of channels. It should
+ * corrspond to the first name that the channel is referred
+ * to by in the datasheet (e.g. IND), or the nearest
+ * possible compound name (e.g. IND-INC).
* @processed_val: Flag to specify the data access attribute should be
* *_input rather than *_raw.
* @modified: Does a modifier apply to this channel. What these are
@@ -137,6 +142,7 @@ struct iio_chan_spec {
long info_mask;
long event_mask;
char *extend_name;
+ const char *datasheet_name;
unsigned processed_val:1;
unsigned modified:1;
unsigned indexed:1;
@@ -261,7 +267,23 @@ struct iio_info {
int val);
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*update_scan_mode)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+};
+/**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+ * @preenable: [DRIVER] function to run prior to marking buffer enabled
+ * @postenable: [DRIVER] function to run after marking buffer enabled
+ * @predisable: [DRIVER] function to run prior to marking buffer
+ * disabled
+ * @postdisable: [DRIVER] function to run after marking buffer disabled
+ */
+struct iio_buffer_setup_ops {
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
};
/**
@@ -278,6 +300,7 @@ struct iio_info {
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
* channels
+ * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @trig: [INTERN] current device trigger (buffer modes)
* @pollfunc: [DRIVER] function run on trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -290,6 +313,7 @@ struct iio_info {
* @chrdev: [INTERN] associated character device
* @groups: [INTERN] attribute groups
* @groupcounter: [INTERN] index of next attribute group
+ * @flags: [INTERN] file ops related flags including busy flag.
**/
struct iio_dev {
int id;
@@ -305,6 +329,7 @@ struct iio_dev {
unsigned long *available_scan_masks;
unsigned masklength;
+ unsigned long *active_scan_mask;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
@@ -315,13 +340,24 @@ struct iio_dev {
struct attribute_group chan_attr_group;
const char *name;
const struct iio_info *info;
+ const struct iio_buffer_setup_ops *setup_ops;
struct cdev chrdev;
#define IIO_MAX_GROUPS 6
const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
int groupcounter;
+
+ unsigned long flags;
};
/**
+ * iio_find_channel_from_si() - get channel from its scan index
+ * @indio_dev: device
+ * @si: scan index to match
+ */
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
+
+/**
* iio_device_register() - register a device with the IIO subsystem
* @indio_dev: Device structure filled by the device driver
**/
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 36159e0dbfc3..107cfb1cbb01 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -33,9 +33,6 @@ int __iio_add_chan_devattr(const char *postfix,
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev);
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev);
-
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
@@ -47,14 +44,6 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#else
-static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- return -EINVAL;
-}
-
-static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{}
-
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
diff --git a/drivers/staging/iio/iio_core_trigger.h b/drivers/staging/iio/iio_core_trigger.h
index 523c288b776b..6f7c56fcbe78 100644
--- a/drivers/staging/iio/iio_core_trigger.h
+++ b/drivers/staging/iio/iio_core_trigger.h
@@ -13,8 +13,7 @@
* iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
* @indio_dev: iio_dev associated with the device that will consume the trigger
**/
-
-int iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
/**
* iio_device_unregister_trigger_consumer() - reverse the registration process
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index da657d10471d..cdbf289bfe2d 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -102,6 +102,10 @@ static int iio_dummy_evgen_create(void)
int iio_dummy_evgen_get_irq(void)
{
int i, ret = 0;
+
+ if (iio_evgen == NULL)
+ return -ENODEV;
+
mutex_lock(&iio_evgen->lock);
for (i = 0; i < IIO_EVENTGEN_NO; i++)
if (iio_evgen->inuse[i] == false) {
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index af0c99236d4f..e3a94572bb40 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -21,7 +21,8 @@
#include "iio.h"
#include "sysfs.h"
-#include "buffer_generic.h"
+#include "events.h"
+#include "buffer.h"
#include "iio_simple_dummy.h"
/*
@@ -76,13 +77,13 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* Offset for userspace to apply prior to scale
* when converting to standard units (microvolts)
*/
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
/*
* in_voltage0_scale
* Multipler for userspace to apply post offset
* when converting to standard units (microvolts)
*/
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
/* The ordering of elements in the buffer via an enum */
.scan_index = voltage0,
.scan_type = { /* Description of storage in buffer */
@@ -117,7 +118,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* Shared version of scale - shared by differential
* input channels of type IIO_VOLTAGE.
*/
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = diffvoltage1m2,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -134,7 +135,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
.channel = 3,
.channel2 = 4,
.info_mask =
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = diffvoltage3m4,
.scan_type = {
.sign = 's',
@@ -159,7 +160,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* seeing the readings. Typically part of hardware
* calibration.
*/
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
.scan_index = accelx,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -228,29 +229,32 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
break;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- /* only single ended adc -> 0.001333 */
- *val = 0;
- *val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- /* all differential adc channels -> 0.000001344 */
- *val = 0;
- *val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->differential) {
+ case 0:
+ /* only single ended adc -> 0.001333 */
+ *val = 0;
+ *val2 = 1333;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case 1:
+ /* all differential adc channels -> 0.000001344 */
+ *val = 0;
+ *val2 = 1344;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ }
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
/* only the acceleration axis - read from cache */
*val = st->accel_calibbias;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
*val = st->accel_calibscale->val;
*val2 = st->accel_calibscale->val2;
ret = IIO_VAL_INT_PLUS_MICRO;
@@ -295,7 +299,7 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
st->dac_val = val;
mutex_unlock(&st->lock);
return 0;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&st->lock);
/* Compare against table - hard matching here */
for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
@@ -514,7 +518,8 @@ static __init int iio_dummy_init(void)
return -EINVAL;
}
/* Fake a bus */
- iio_dummy_devs = kzalloc(sizeof(*iio_dummy_devs)*instances, GFP_KERNEL);
+ iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
+ GFP_KERNEL);
/* Here we have no actual device so call probe */
for (i = 0; i < instances; i++) {
ret = iio_dummy_probe(i);
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index edad0e7b4f4d..d6a1c0e82a5b 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -57,7 +57,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
if (data == NULL)
return -ENOMEM;
- if (buffer->scan_count) {
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
/*
* Three common options here:
* hardware scans: certain combinations of channels make
@@ -75,7 +75,10 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
* in the constant table fakedata.
*/
int i, j;
- for (i = 0, j = 0; i < buffer->scan_count; i++) {
+ for (i = 0, j = 0;
+ i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ i++) {
j = find_next_bit(buffer->scan_mask,
indio_dev->masklength, j + 1);
/* random access read form the 'device' */
@@ -142,8 +145,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
/* Tell the core how to access the buffer */
buffer->access = &kfifo_access_funcs;
- /* Number of bytes per element */
- buffer->bpe = 2;
/* Enable timestamps by default */
buffer->scan_timestamp = true;
@@ -151,8 +152,7 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
* Tell the core what device type specific functions should
* be run on either side of buffer capture enable / disable.
*/
- buffer->setup_ops = &iio_simple_dummy_buffer_setup_ops;
- buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops;
/*
* Configure a polling function.
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index 9f00cff7ddd5..449c7a5ece80 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -14,6 +14,7 @@
#include "iio.h"
#include "sysfs.h"
+#include "events.h"
#include "iio_simple_dummy.h"
/* Evgen 'fakes' interrupt events for this example */
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 454d131455de..9a2ca55625f4 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -21,7 +21,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "ad5933.h"
@@ -113,10 +113,10 @@ static struct iio_chan_spec ad5933_channels[] = {
0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
/* Ring Channels */
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
};
@@ -329,7 +329,7 @@ static ssize_t ad5933_show(struct device *dev,
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%d\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -380,7 +380,7 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
@@ -537,14 +537,14 @@ static const struct iio_info ad5933_info = {
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
- struct iio_buffer *ring = indio_dev->buffer;
size_t d_size;
int ret;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- d_size = ring->scan_count *
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
ad5933_channels[1].scan_type.storagebits / 8;
if (indio_dev->buffer->access->set_bytes_per_datum)
@@ -611,7 +611,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad5933_ring_setup_ops;
+ indio_dev->setup_ops = &ad5933_ring_setup_ops;
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
@@ -640,12 +640,14 @@ static void ad5933_work(struct work_struct *work)
ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
if (status & AD5933_STAT_DATA_VALID) {
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
ad5933_i2c_read(st->client,
- test_bit(1, ring->scan_mask) ?
+ test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
- ring->scan_count * 2, (u8 *)buf);
+ scan_count * 2, (u8 *)buf);
- if (ring->scan_count == 2) {
+ if (scan_count == 2) {
buf[0] = be16_to_cpu(buf[0]);
buf[1] = be16_to_cpu(buf[1]);
} else {
@@ -734,8 +736,8 @@ static int __devinit ad5933_probe(struct i2c_client *client,
goto error_unreg_ring;
/* enable both REAL and IMAG channels by default */
- iio_scan_mask_set(indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
ret = ad5933_setup(st);
if (ret)
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index f3546ee910a2..83d133efaac6 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -148,12 +148,14 @@ struct adis16400_chip_info {
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
+ * @filt_int: integer part of requested filter frequency
**/
struct adis16400_state {
struct spi_device *us;
struct iio_trigger *trig;
struct mutex buf_lock;
struct adis16400_chip_info *variant;
+ int filt_int;
u8 tx[ADIS16400_MAX_TX] ____cacheline_aligned;
u8 rx[ADIS16400_MAX_RX] ____cacheline_aligned;
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index efc0f6529008..e73ad7818d85 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -28,7 +28,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16400.h"
enum adis16400_chip_variant {
@@ -161,25 +161,65 @@ error_ret:
return ret;
}
+static int adis16400_get_freq(struct iio_dev *indio_dev)
+{
+ u16 t;
+ int sps, ret;
+
+ ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+
+ return sps;
+}
+
static ssize_t adis16400_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
int ret, len = 0;
- u16 t;
- int sps;
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_SMPL_PRD,
- &t);
- if (ret)
+ ret = adis16400_get_freq(indio_dev);
+ if (ret < 0)
return ret;
- sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
- sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
- len = sprintf(buf, "%d SPS\n", sps);
+ len = sprintf(buf, "%d SPS\n", ret);
return len;
}
+static const unsigned adis16400_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 5,
+ [2] = 10,
+ [3] = 50,
+ [4] = 200,
+};
+
+static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+{
+ int i, ret;
+ u16 val16;
+ for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 0; i--)
+ if (sps/adis16400_3db_divisors[i] > val)
+ break;
+ if (i == -1)
+ ret = -EINVAL;
+ else {
+ ret = adis16400_spi_read_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = adis16400_spi_write_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ (val16 & ~0x03) | i);
+ }
+error_ret:
+ return ret;
+}
+
static ssize_t adis16400_write_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -210,6 +250,7 @@ static ssize_t adis16400_write_frequency(struct device *dev,
ADIS16400_SMPL_PRD,
t);
+ /* Also update the filter */
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
@@ -455,22 +496,39 @@ static u8 adis16400_addresses[17][2] = {
[incli_y] = { ADIS16300_ROLL_OUT }
};
+
static int adis16400_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
- int ret;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret, sps;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&indio_dev->mlock);
ret = adis16400_spi_write_reg_16(indio_dev,
adis16400_addresses[chan->address][1],
val);
mutex_unlock(&indio_dev->mlock);
return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ /* Need to cache values so we can update if the frequency
+ changes */
+ mutex_lock(&indio_dev->mlock);
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = adis16400_get_freq(indio_dev);
+ if (sps < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return sps;
+ }
+
+ ret = adis16400_set_filter(indio_dev, sps, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
default:
return -EINVAL;
}
@@ -504,8 +562,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
@@ -533,7 +590,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&indio_dev->mlock);
ret = adis16400_spi_read_reg_16(indio_dev,
adis16400_addresses[chan->address][1],
@@ -544,11 +601,29 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
val16 = ((val16 & 0xFFF) << 4) >> 4;
*val = val16;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
/* currently only temperature */
*val = 198;
*val2 = 160000;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&indio_dev->mlock);
+ /* Need both the number of taps and the sampling frequency */
+ ret = adis16400_spi_read_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = adis16400_get_freq(indio_dev);
+ if (ret > 0)
+ *val = ret/adis16400_3db_divisors[val16 & 0x03];
+ *val2 = 0;
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -560,7 +635,7 @@ static struct iio_chan_spec adis16400_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 14, 16, 0)
@@ -568,8 +643,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0)
@@ -577,8 +653,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -586,8 +663,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -595,8 +673,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -604,8 +683,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -613,8 +693,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -622,7 +703,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_x,
.scan_index = ADIS16400_SCAN_MAGN_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -630,7 +712,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_y,
.scan_index = ADIS16400_SCAN_MAGN_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -638,7 +721,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_z,
.scan_index = ADIS16400_SCAN_MAGN_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -646,8 +730,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
.scan_index = ADIS16400_SCAN_TEMP,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -655,7 +739,7 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16400_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -669,7 +753,7 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 12, 16, 0)
@@ -677,8 +761,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0)
@@ -686,8 +771,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -695,8 +781,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -704,8 +791,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -713,8 +801,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -722,8 +811,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -732,8 +822,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "x",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = temp0,
.scan_index = ADIS16350_SCAN_TEMP_X,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -742,8 +833,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "y",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = temp1,
.scan_index = ADIS16350_SCAN_TEMP_Y,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -752,8 +844,8 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "z",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp2,
.scan_index = ADIS16350_SCAN_TEMP_Z,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -761,7 +853,7 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16350_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -775,7 +867,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 12, 16, 0)
@@ -783,8 +875,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -792,8 +885,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -801,8 +895,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -810,8 +905,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -819,8 +915,8 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
.scan_index = ADIS16400_SCAN_TEMP,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -828,7 +924,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16350_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -836,7 +932,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = incli_x,
.scan_index = ADIS16300_SCAN_INCLI_X,
.scan_type = IIO_ST('s', 13, 16, 0),
@@ -844,7 +940,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = incli_y,
.scan_index = ADIS16300_SCAN_INCLI_Y,
.scan_type = IIO_ST('s', 13, 16, 0),
@@ -857,8 +953,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -866,8 +963,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -875,8 +973,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -884,8 +983,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -893,8 +993,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -902,8 +1003,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -911,8 +1013,8 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -1118,6 +1220,7 @@ static const struct spi_device_id adis16400_id[] = {
{"adis16405", ADIS16400},
{}
};
+MODULE_DEVICE_TABLE(spi, adis16400_id);
static struct spi_driver adis16400_driver = {
.driver = {
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index fd886bf51a6d..ac22de573f3e 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -79,14 +79,16 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
struct spi_message msg;
int i, j = 0, ret;
struct spi_transfer *xfers;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- xfers = kzalloc(sizeof(*xfers)*indio_dev->buffer->scan_count + 1,
+ xfers = kzalloc(sizeof(*xfers)*(scan_count + 1),
GFP_KERNEL);
if (xfers == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
- if (test_bit(i, indio_dev->buffer->scan_mask)) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
xfers[j].tx_buf = &read_all_tx_array[i];
xfers[j].bits_per_word = 16;
xfers[j].len = 2;
@@ -97,7 +99,7 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
xfers[j].len = 2;
spi_message_init(&msg);
- for (j = 0; j < indio_dev->buffer->scan_count + 1; j++)
+ for (j = 0; j < scan_count + 1; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -119,26 +121,27 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
/* Asumption that long is enough for maximum channels */
- unsigned long mask = *ring->scan_mask;
-
+ unsigned long mask = *indio_dev->active_scan_mask;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
data = kmalloc(datasize , GFP_KERNEL);
if (data == NULL) {
dev_err(&st->us->dev, "memory alloc failed in ring bh");
return -ENOMEM;
}
- if (ring->scan_count) {
+ if (scan_count) {
if (st->variant->flags & ADIS16400_NO_BURST) {
ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < ring->scan_count; i++)
+ for (; i < scan_count; i++)
data[i] = *(s16 *)(st->rx + i*2);
} else {
ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < indio_dev->buffer->scan_count; i++) {
+ for (; i < scan_count; i++) {
j = __ffs(mask);
mask &= ~(1 << j);
data[i] = be16_to_cpup(
@@ -186,10 +189,8 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16400_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16400_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16400_trigger_handler,
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index 9df0ce81dade..d7b1e9e435ae 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -24,7 +24,7 @@
#include "iio.h"
#include "iio_core.h"
#include "sysfs.h"
-#include "buffer_generic.h"
+#include "buffer.h"
static const char * const iio_endian_prefix[] = {
[IIO_BE] = "be",
@@ -43,7 +43,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
- if (!rb->access->read_first_n)
+ if (!rb || !rb->access->read_first_n)
return -EINVAL;
return rb->access->read_first_n(rb, n, buf);
}
@@ -64,28 +64,9 @@ unsigned int iio_buffer_poll(struct file *filp,
return 0;
}
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+void iio_buffer_init(struct iio_buffer *buffer)
{
- struct iio_buffer *rb = indio_dev->buffer;
- if (!rb)
- return -EINVAL;
- if (rb->access->mark_in_use)
- rb->access->mark_in_use(rb);
- return 0;
-}
-
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
-
- clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
- if (rb->access->unmark_in_use)
- rb->access->unmark_in_use(rb);
-}
-
-void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
-{
- buffer->indio_dev = indio_dev;
+ INIT_LIST_HEAD(&buffer->demux_list);
init_waitqueue_head(&buffer->pollq);
}
EXPORT_SYMBOL(iio_buffer_init);
@@ -126,17 +107,15 @@ static ssize_t iio_scan_el_show(struct device *dev,
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- ret = iio_scan_mask_query(indio_dev->buffer,
- to_iio_dev_attr(attr)->address);
- if (ret < 0)
- return ret;
+ ret = test_bit(to_iio_dev_attr(attr)->address,
+ indio_dev->buffer->scan_mask);
+
return sprintf(buf, "%d\n", ret);
}
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{
clear_bit(bit, buffer->scan_mask);
- buffer->scan_count--;
return 0;
}
@@ -153,11 +132,11 @@ static ssize_t iio_scan_el_store(struct device *dev,
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
- ret = iio_scan_mask_query(buffer, this_attr->address);
+ ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
@@ -165,7 +144,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret)
goto error_ret;
} else if (state && !ret) {
- ret = iio_scan_mask_set(buffer, this_attr->address);
+ ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
@@ -173,7 +152,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
error_ret:
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return ret < 0 ? ret : len;
}
@@ -196,7 +175,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
@@ -311,12 +290,14 @@ int iio_buffer_register(struct iio_dev *indio_dev,
if (ret < 0)
goto error_cleanup_dynamic;
attrcount += ret;
+ if (channels[i].type == IIO_TIMESTAMP)
+ buffer->scan_index_timestamp =
+ channels[i].scan_index;
}
if (indio_dev->masklength && buffer->scan_mask == NULL) {
- buffer->scan_mask
- = kzalloc(sizeof(*buffer->scan_mask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
+ buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*buffer->scan_mask),
+ GFP_KERNEL);
if (buffer->scan_mask == NULL) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
@@ -326,10 +307,9 @@ int iio_buffer_register(struct iio_dev *indio_dev,
buffer->scan_el_group.name = iio_scan_elements_group_name;
- buffer->scan_el_group.attrs
- = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
- (attrcount + 1),
- GFP_KERNEL);
+ buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
+ sizeof(buffer->scan_el_group.attrs[0]),
+ GFP_KERNEL);
if (buffer->scan_el_group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_scan_mask;
@@ -395,31 +375,20 @@ ssize_t iio_buffer_write_length(struct device *dev,
if (val == buffer->access->get_length(buffer))
return len;
- if (buffer->access->set_length) {
- buffer->access->set_length(buffer, val);
- if (buffer->access->mark_param_change)
- buffer->access->mark_param_change(buffer);
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ } else {
+ if (buffer->access->set_length)
+ buffer->access->set_length(buffer, val);
+ ret = 0;
}
+ mutex_unlock(&indio_dev->mlock);
- return len;
+ return ret ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_write_length);
-ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
-
- if (buffer->access->get_bytes_per_datum)
- return sprintf(buf, "%d\n",
- buffer->access->get_bytes_per_datum(buffer));
-
- return 0;
-}
-EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
-
ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -434,14 +403,14 @@ ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
previous_mode = indio_dev->currentmode;
requested_state = !(buf[0] == '0');
- current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
+ current_state = iio_buffer_enabled(indio_dev);
if (current_state == requested_state) {
printk(KERN_INFO "iio-buffer, current state requested again\n");
goto done;
}
if (requested_state) {
- if (buffer->setup_ops->preenable) {
- ret = buffer->setup_ops->preenable(indio_dev);
+ if (indio_dev->setup_ops->preenable) {
+ ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
printk(KERN_ERR
"Buffer not started:"
@@ -458,16 +427,12 @@ ssize_t iio_buffer_store_enable(struct device *dev,
goto error_ret;
}
}
- if (buffer->access->mark_in_use)
- buffer->access->mark_in_use(buffer);
/* Definitely possible for devices to support both of these.*/
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO
"Buffer not started: no trigger\n");
ret = -EINVAL;
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
goto error_ret;
}
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
@@ -478,32 +443,28 @@ ssize_t iio_buffer_store_enable(struct device *dev,
goto error_ret;
}
- if (buffer->setup_ops->postenable) {
- ret = buffer->setup_ops->postenable(indio_dev);
+ if (indio_dev->setup_ops->postenable) {
+ ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
printk(KERN_INFO
"Buffer not started:"
"postenable failed\n");
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = previous_mode;
- if (buffer->setup_ops->postdisable)
- buffer->setup_ops->
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->
postdisable(indio_dev);
goto error_ret;
}
}
} else {
- if (buffer->setup_ops->predisable) {
- ret = buffer->setup_ops->predisable(indio_dev);
+ if (indio_dev->setup_ops->predisable) {
+ ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
goto error_ret;
}
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
- if (buffer->setup_ops->postdisable) {
- ret = buffer->setup_ops->postdisable(indio_dev);
+ if (indio_dev->setup_ops->postdisable) {
+ ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
goto error_ret;
}
@@ -523,37 +484,10 @@ ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", !!(indio_dev->currentmode
- & INDIO_ALL_BUFFER_MODES));
+ return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer = indio_dev->buffer;
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(buffer->scan_count || buffer->scan_timestamp))
- return -EINVAL;
- if (buffer->scan_timestamp)
- if (buffer->scan_count)
- /* Timestamp (aligned to s64) and data */
- size = (((buffer->scan_count * buffer->bpe)
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = buffer->scan_count * buffer->bpe;
- buffer->access->set_bytes_per_datum(buffer, size);
-
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_buffer_preenable);
-
-
/* note NULL used as error indicator as it doesn't make sense. */
static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
unsigned int masklength,
@@ -569,14 +503,57 @@ static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
return NULL;
}
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+ const struct iio_chan_spec *ch;
+ unsigned bytes = 0;
+ int length, i;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+
+ /* How much space will the demuxed element take? */
+ for_each_set_bit(i, buffer->scan_mask,
+ indio_dev->masklength) {
+ ch = iio_find_channel_from_si(indio_dev, i);
+ length = ch->scan_type.storagebits/8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ if (buffer->scan_timestamp) {
+ ch = iio_find_channel_from_si(indio_dev,
+ buffer->scan_index_timestamp);
+ length = ch->scan_type.storagebits/8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+
+ /* What scan mask do we actually have ?*/
+ if (indio_dev->available_scan_masks)
+ indio_dev->active_scan_mask =
+ iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ buffer->scan_mask);
+ else
+ indio_dev->active_scan_mask = buffer->scan_mask;
+ iio_update_demux(indio_dev);
+
+ if (indio_dev->info->update_scan_mode)
+ return indio_dev->info
+ ->update_scan_mode(indio_dev,
+ indio_dev->active_scan_mask);
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_buffer_preenable);
+
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
-int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
{
- struct iio_dev *indio_dev = buffer->indio_dev;
unsigned long *mask;
unsigned long *trialmask;
@@ -604,7 +581,6 @@ int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
}
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
- buffer->scan_count++;
kfree(trialmask);
@@ -612,25 +588,147 @@ int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
};
EXPORT_SYMBOL_GPL(iio_scan_mask_set);
-int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
{
- struct iio_dev *indio_dev = buffer->indio_dev;
- long *mask;
-
if (bit > indio_dev->masklength)
return -EINVAL;
if (!buffer->scan_mask)
return 0;
- if (indio_dev->available_scan_masks)
- mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- buffer->scan_mask);
- else
- mask = buffer->scan_mask;
- if (!mask)
- return 0;
- return test_bit(bit, mask);
+ return test_bit(bit, buffer->scan_mask);
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
+
+/**
+ * struct iio_demux_table() - table describing demux memcpy ops
+ * @from: index to copy from
+ * @to: index to copy to
+ * @length: how many bytes to copy
+ * @l: list head used for management
+ */
+struct iio_demux_table {
+ unsigned from;
+ unsigned to;
+ unsigned length;
+ struct list_head l;
+};
+
+static unsigned char *iio_demux(struct iio_buffer *buffer,
+ unsigned char *datain)
+{
+ struct iio_demux_table *t;
+
+ if (list_empty(&buffer->demux_list))
+ return datain;
+ list_for_each_entry(t, &buffer->demux_list, l)
+ memcpy(buffer->demux_bounce + t->to,
+ datain + t->from, t->length);
+
+ return buffer->demux_bounce;
+}
+
+int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
+ s64 timestamp)
+{
+ unsigned char *dataout = iio_demux(buffer, data);
+
+ return buffer->access->store_to(buffer, dataout, timestamp);
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffer);
+
+int iio_update_demux(struct iio_dev *indio_dev)
+{
+ const struct iio_chan_spec *ch;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, in_ind = -1, out_ind, length;
+ unsigned in_loc = 0, out_loc = 0;
+ struct iio_demux_table *p, *q;
+
+ /* Clear out any old demux */
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+ kfree(buffer->demux_bounce);
+ buffer->demux_bounce = NULL;
+
+ /* First work out which scan mode we will actually have */
+ if (bitmap_equal(indio_dev->active_scan_mask,
+ buffer->scan_mask,
+ indio_dev->masklength))
+ return 0;
+
+ /* Now we have the two masks, work from least sig and build up sizes */
+ for_each_set_bit(out_ind,
+ indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ while (in_ind != out_ind) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ length = ch->scan_type.storagebits/8;
+ /* Make sure we are aligned */
+ in_loc += length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ }
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ length = ch->scan_type.storagebits/8;
+ if (out_loc % length)
+ out_loc += length - out_loc % length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ list_add_tail(&p->l, &buffer->demux_list);
+ out_loc += length;
+ in_loc += length;
+ }
+ /* Relies on scan_timestamp being last */
+ if (buffer->scan_timestamp) {
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ ch = iio_find_channel_from_si(indio_dev,
+ buffer->scan_index_timestamp);
+ length = ch->scan_type.storagebits/8;
+ if (out_loc % length)
+ out_loc += length - out_loc % length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ list_add_tail(&p->l, &buffer->demux_list);
+ out_loc += length;
+ in_loc += length;
+ }
+ buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
+ if (buffer->demux_bounce == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ return 0;
+
+error_clear_mux_table:
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_update_demux);
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index aec9311b108c..19f897f3c85e 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -25,8 +25,8 @@
#include "iio.h"
#include "iio_core.h"
#include "iio_core_trigger.h"
-#include "chrdev.h"
#include "sysfs.h"
+#include "events.h"
/* IDA to assign each registered device a unique id*/
static DEFINE_IDA(iio_ida);
@@ -77,17 +77,29 @@ static const char * const iio_modifier_names[] = {
/* relies on pairs of these shared then separate */
static const char * const iio_chan_info_postfix[] = {
- [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
- [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
- [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
- [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
- [IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
- [IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
- [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
- = "quadrature_correction_raw",
- [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
+ [IIO_CHAN_INFO_SCALE] = "scale",
+ [IIO_CHAN_INFO_OFFSET] = "offset",
+ [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
+ [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
+ [IIO_CHAN_INFO_PEAK] = "peak_raw",
+ [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
+ [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
+ [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
+ [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
+ = "filter_low_pass_3db_frequency",
};
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == si)
+ return &indio_dev->channels[i];
+ return NULL;
+}
+
/**
* struct iio_detected_event_list - list element for events that have occurred
* @list: linked list header
@@ -169,8 +181,11 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
{
struct iio_event_interface *ev_int = filep->private_data;
struct iio_detected_event_list *el;
+ size_t len = sizeof(el->ev);
int ret;
- size_t len;
+
+ if (count < len)
+ return -EINVAL;
mutex_lock(&ev_int->event_list_lock);
if (list_empty(&ev_int->det_events)) {
@@ -192,7 +207,6 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
el = list_first_entry(&ev_int->det_events,
struct iio_detected_event_list,
list);
- len = sizeof el->ev;
if (copy_to_user(buf, &(el->ev), len)) {
ret = -EFAULT;
goto error_mutex_unlock;
@@ -415,7 +429,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
- if (chan->modified) {
+ if (chan->modified && !generic) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_modifier_names[chan
@@ -600,7 +614,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
chan,
&iio_read_channel_info,
&iio_write_channel_info,
- (1 << i),
+ i/2,
!(i%2),
&indio_dev->dev,
&indio_dev->channel_attr_list);
@@ -665,10 +679,9 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
if (indio_dev->name)
attrcount++;
- indio_dev->chan_attr_group.attrs
- = kzalloc(sizeof(indio_dev->chan_attr_group.attrs[0])*
- (attrcount + 1),
- GFP_KERNEL);
+ indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->chan_attr_group.attrs[0]),
+ GFP_KERNEL);
if (indio_dev->chan_attr_group.attrs == NULL) {
ret = -ENOMEM;
goto error_clear_attrs;
@@ -788,6 +801,9 @@ static ssize_t iio_ev_value_store(struct device *dev,
unsigned long val;
int ret;
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
ret = strict_strtoul(buf, 10, &val);
if (ret)
return ret;
@@ -958,10 +974,9 @@ static int iio_device_register_eventset(struct iio_dev *indio_dev)
}
indio_dev->event_interface->group.name = iio_event_group_name;
- indio_dev->event_interface->group.attrs =
- kzalloc(sizeof(indio_dev->event_interface->group.attrs[0])
- *(attrcount + 1),
- GFP_KERNEL);
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
if (indio_dev->event_interface->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
@@ -1068,9 +1083,13 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
+ return -EBUSY;
+
filp->private_data = indio_dev;
- return iio_chrdev_buffer_open(indio_dev);
+ return 0;
}
/**
@@ -1078,8 +1097,9 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
**/
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
- iio_chrdev_buffer_release(container_of(inode->i_cdev,
- struct iio_dev, chrdev));
+ struct iio_dev *indio_dev = container_of(inode->i_cdev,
+ struct iio_dev, chrdev);
+ clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
return 0;
}
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 2c626e0cb29c..47ecadd4818d 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -159,13 +159,12 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count) {
+ if (!trig->use_count)
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
if (trig->subirqs[i].enabled) {
trig->use_count++;
handle_nested_irq(trig->subirq_base + i);
}
- }
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
@@ -173,10 +172,9 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
{
trig->use_count--;
if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
- if (trig->ops->try_reenable(trig)) {
+ if (trig->ops->try_reenable(trig))
/* Missed and interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
- }
}
EXPORT_SYMBOL(iio_trigger_notify_done);
@@ -222,8 +220,16 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
- if (trig->ops && trig->ops->set_trigger_state && notinuse)
+ if (ret < 0) {
+ module_put(pf->indio_dev->info->driver_module);
+ return ret;
+ }
+
+ if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
+ if (ret < 0)
+ module_put(pf->indio_dev->info->driver_module);
+ }
return ret;
}
@@ -295,7 +301,7 @@ void iio_dealloc_pollfunc(struct iio_poll_func *pf)
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
- * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger
+ * iio_trigger_read_current() - trigger consumer sysfs query which trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
@@ -336,6 +342,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
mutex_unlock(&indio_dev->mlock);
trig = iio_trigger_find_by_name(buf, len);
+ if (oldtrig == trig)
+ return len;
if (trig && indio_dev->info->validate_trigger) {
ret = indio_dev->info->validate_trigger(indio_dev, trig);
@@ -473,12 +481,10 @@ void iio_free_trigger(struct iio_trigger *trig)
}
EXPORT_SYMBOL(iio_free_trigger);
-int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
&iio_trigger_consumer_attr_group;
-
- return 0;
}
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
@@ -490,18 +496,14 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
+ return iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_postenable);
int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
+ return iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_predisable);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e8c234bb18f0..e1e9c06cde4a 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -11,9 +11,7 @@
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
- int use_count;
int update_needed;
- struct mutex use_lock;
};
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
@@ -33,54 +31,25 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
if (!buf->update_needed)
goto error_ret;
- if (buf->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
error_ret:
- mutex_unlock(&buf->use_lock);
return ret;
}
-static void iio_mark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count++;
- mutex_unlock(&buf->use_lock);
-}
-
-static void iio_unmark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count--;
- mutex_unlock(&buf->use_lock);
-}
-
static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
-static inline void __iio_init_kfifo(struct iio_kfifo *kf)
-{
- mutex_init(&kf->use_lock);
-}
-
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
static struct attribute *iio_kfifo_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
NULL,
};
@@ -98,9 +67,8 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
if (!kf)
return NULL;
kf->update_needed = true;
- iio_buffer_init(&kf->buffer, indio_dev);
+ iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
- __iio_init_kfifo(kf);
return &kf->buffer;
}
@@ -111,20 +79,19 @@ static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
return r->bytes_per_datum;
}
-static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
+static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
- if (r->bytes_per_datum != bpd) {
- r->bytes_per_datum = bpd;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
- }
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ kf->update_needed = true;
return 0;
}
-static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
+static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
{
- struct iio_kfifo *kf = iio_to_kfifo(r);
- kf->update_needed = true;
+ if (r->bytes_per_datum != bpd) {
+ r->bytes_per_datum = bpd;
+ iio_mark_update_needed_kfifo(r);
+ }
return 0;
}
@@ -132,8 +99,7 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_kfifo(r);
}
return 0;
}
@@ -150,16 +116,9 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
{
int ret;
struct iio_kfifo *kf = iio_to_kfifo(r);
- u8 *datal = kmalloc(r->bytes_per_datum, GFP_KERNEL);
- memcpy(datal, data, r->bytes_per_datum - sizeof(timestamp));
- memcpy(datal + r->bytes_per_datum - sizeof(timestamp),
- &timestamp, sizeof(timestamp));
ret = kfifo_in(&kf->kf, data, r->bytes_per_datum);
- if (ret != r->bytes_per_datum) {
- kfree(datal);
+ if (ret != r->bytes_per_datum)
return -EBUSY;
- }
- kfree(datal);
return 0;
}
@@ -169,17 +128,18 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
- ret = kfifo_to_user(&kf->kf, buf, r->bytes_per_datum*n, &copied);
+ if (n < r->bytes_per_datum)
+ return -EINVAL;
+
+ n = rounddown(n, r->bytes_per_datum);
+ ret = kfifo_to_user(&kf->kf, buf, n, &copied);
return copied;
}
const struct iio_buffer_access_funcs kfifo_access_funcs = {
- .mark_in_use = &iio_mark_kfifo_in_use,
- .unmark_in_use = &iio_unmark_kfifo_in_use,
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
- .mark_param_change = &iio_mark_update_needed_kfifo,
.request_update = &iio_request_update_kfifo,
.get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index a15598bb9fdb..cc2bd9a1ccfe 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -1,7 +1,7 @@
#include <linux/kfifo.h>
#include "iio.h"
-#include "buffer_generic.h"
+#include "buffer.h"
extern const struct iio_buffer_access_funcs kfifo_access_funcs;
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 47638362224b..849d6a564afa 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -362,8 +362,7 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
mutex_lock(&chip->lock);
- if (mask == (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) &&
- chan->type == IIO_LIGHT) {
+ if (mask == IIO_CHAN_INFO_CALIBSCALE && chan->type == IIO_LIGHT) {
chip->lux_scale = val;
ret = 0;
}
@@ -402,7 +401,7 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
if (!ret)
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT) {
*val = chip->lux_scale;
ret = IIO_VAL_INT;
@@ -421,7 +420,7 @@ static const struct iio_chan_spec isl29018_channels[] = {
.indexed = 1,
.channel = 0,
.processed_val = IIO_PROCESSED,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}, {
.type = IIO_INTENSITY,
.modified = 1,
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 1942db13b03b..ffca85e81ef5 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -37,6 +37,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "tsl2563.h"
/* Use this many bits for fraction part. */
@@ -226,6 +227,8 @@ static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
if (ret < 0)
return ret;
+ *id = ret;
+
return 0;
}
@@ -510,7 +513,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
}
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (chan->channel == 0)
*val = calib_to_sysfs(chip->calib0);
else
@@ -536,7 +539,7 @@ static const struct iio_chan_spec tsl2563_channels[] = {
.type = IIO_INTENSITY,
.modified = 1,
.channel2 = IIO_MOD_LIGHT_BOTH,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
.event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH,
@@ -544,8 +547,8 @@ static const struct iio_chan_spec tsl2563_channels[] = {
}, {
.type = IIO_INTENSITY,
.modified = 1,
- .channel2 = IIO_MOD_LIGHT_BOTH,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}
};
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 3836f73a5296..5b6455a238d8 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -194,6 +194,7 @@ static int taos_get_lux(struct iio_dev *indio_dev)
{
u16 ch0, ch1; /* separated ch0/ch1 data from device */
u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
u32 ratio;
u8 buf[5];
struct taos_lux *p;
@@ -297,9 +298,19 @@ static int taos_get_lux(struct iio_dev *indio_dev)
lux = (lux + (chip->als_time_scale >> 1)) /
chip->als_time_scale;
- /* adjust for active gain scale */
- lux >>= 13; /* tables have factor of 8192 builtin for accuracy */
- lux = (lux * chip->taos_settings.als_gain_trim + 500) / 1000;
+ /* Adjust for active gain scale.
+ * The taos_device_lux tables above have a factor of 8192 built in,
+ * so we need to shift right.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+ lux64 = lux;
+ lux64 = lux64 * chip->taos_settings.als_gain_trim;
+ lux64 >>= 13;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
return_max:
lux = TSL258X_LUX_CALC_OVER_FLOW;
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index db31d6d0e5b6..3158f12cb051 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -431,7 +431,7 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case 0:
return ak8975_read_axis(indio_dev, chan->address, val);
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = data->raw_to_gauss[chan->address];
return IIO_VAL_INT;
}
@@ -443,7 +443,7 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = index, \
}
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 7bb1bc605136..f2e85a9cf196 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -463,7 +463,7 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
return hmc5843_read_measurement(indio_dev,
chan->address,
val);
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = hmc5843_regval_to_nanoscale[data->range];
return IIO_VAL_INT_PLUS_NANO;
@@ -476,7 +476,7 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = add \
}
@@ -605,6 +605,7 @@ static const struct i2c_device_id hmc5843_id[] = {
{ "hmc5843", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, hmc5843_id);
static struct i2c_driver hmc5843_driver = {
.driver = {
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 4c7b0cbf49fa..57baac6c0d40 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -582,3 +582,4 @@ module_spi_driver(ade7753_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Meter");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ade7753");
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 15c98cde76d1..8d81c92007e9 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -605,3 +605,4 @@ module_spi_driver(ade7754_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7754");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 39338bcb1872..dcb20294dfe8 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "meter.h"
#include "ade7758.h"
@@ -663,63 +663,63 @@ static const struct attribute_group ade7758_attribute_group = {
static struct iio_chan_spec ade7758_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
0, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
1, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
2, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
3, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
4, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
5, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
6, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
7, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
8, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
9, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
10, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
11, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
12, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
13, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
14, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(15),
@@ -746,12 +746,12 @@ static int __devinit ade7758_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
/* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7758_MAX_RX, GFP_KERNEL);
+ st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL);
if (st->rx == NULL) {
ret = -ENOMEM;
goto error_free_dev;
}
- st->tx = kzalloc(sizeof(*st->tx)*ADE7758_MAX_TX, GFP_KERNEL);
+ st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL);
if (st->tx == NULL) {
ret = -ENOMEM;
goto error_free_rx;
@@ -843,6 +843,7 @@ static const struct spi_device_id ade7758_id[] = {
{"ade7758", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ade7758_id);
static struct spi_driver ade7758_driver = {
.driver = {
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 00fa2ac5c459..f29f2b278fe4 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -67,7 +67,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
s64 dat64[2];
u32 *dat32 = (u32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
if (ade7758_spi_read_burst(&indio_dev->dev) >= 0)
*dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
@@ -96,10 +96,11 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
d_size = st->ade7758_ring_channels[channel].scan_type.storagebits / 8;
@@ -145,8 +146,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
- indio_dev->buffer->setup_ops = &ade7758_ring_setup_ops;
- indio_dev->buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ade7758_trigger_handler,
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index cfa2a5eff122..0beab478dcd9 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -526,3 +526,4 @@ module_spi_driver(ade7759_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7759");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index c485a79aeec3..81121862c1bd 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -343,6 +343,7 @@ static const struct spi_device_id ade7854_id[] = {
{ "ade7878", 0 },
{ }
};
+MODULE_DEVICE_TABLE(spi, ade7854_id);
static struct spi_driver ade7854_driver = {
.driver = {
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 1c6a02bfd45d..d8ce854c1897 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -160,6 +160,7 @@ static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1205" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s1200_id);
static struct spi_driver ad2s1200_driver = {
.driver = {
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ff1b3316d016..c439fcf72be7 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -749,6 +749,7 @@ static const struct spi_device_id ad2s1210_id[] = {
{ "ad2s1210" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s1210_id);
static struct spi_driver ad2s1210_driver = {
.driver = {
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 6d0794389e74..2a86f582ddf1 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -109,6 +109,7 @@ static const struct spi_device_id ad2s90_id[] = {
{ "ad2s90" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s90_id);
static struct spi_driver ad2s90_driver = {
.driver = {
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 66a34addb75f..3e24ec455854 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -23,11 +23,8 @@
* @data: the ring buffer memory
* @read_p: read pointer (oldest available)
* @write_p: write pointer
- * @last_written_p: read pointer (newest available)
* @half_p: half buffer length behind write_p (event generation)
- * @use_count: reference count to prevent resizing when in use
* @update_needed: flag to indicated change in size requested
- * @use_lock: lock to prevent change in size when in use
*
* Note that the first element of all ring buffers must be a
* struct iio_buffer.
@@ -37,12 +34,9 @@ struct iio_sw_ring_buffer {
unsigned char *data;
unsigned char *read_p;
unsigned char *write_p;
- unsigned char *last_written_p;
/* used to act as a point at which to signal an event */
unsigned char *half_p;
- int use_count;
int update_needed;
- spinlock_t use_lock;
};
#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
@@ -56,38 +50,15 @@ static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
ring->read_p = NULL;
ring->write_p = NULL;
- ring->last_written_p = NULL;
ring->half_p = NULL;
return ring->data ? 0 : -ENOMEM;
}
-static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
-{
- spin_lock_init(&ring->use_lock);
-}
-
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
}
-static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count++;
- spin_unlock(&ring->use_lock);
-}
-
-static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count--;
- spin_unlock(&ring->use_lock);
-}
-
-
/* Ring buffer related functionality */
/* Store to ring is typically called in the bh of a data ready interrupt handler
* in the device driver */
@@ -115,7 +86,6 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
* Always valid as either points to latest or second latest value.
* Before this runs it is null and read attempts fail with -EAGAIN.
*/
- ring->last_written_p = ring->write_p;
barrier();
/* temp_ptr used to ensure we never have an invalid pointer
* it may be slightly lagging, but never invalid
@@ -174,6 +144,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
u8 *data;
int ret, max_copied, bytes_to_rip, dead_offset;
+ size_t data_available, buffer_size;
/* A userspace program has probably made an error if it tries to
* read something that is not a whole number of bpds.
@@ -186,9 +157,11 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
n, ring->buf.bytes_per_datum);
goto error_ret;
}
+
+ buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
+
/* Limit size to whole of ring buffer */
- bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
- n);
+ bytes_to_rip = min_t(size_t, buffer_size, n);
data = kmalloc(bytes_to_rip, GFP_KERNEL);
if (data == NULL) {
@@ -217,38 +190,24 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
goto error_free_data_cpy;
}
- if (initial_write_p >= initial_read_p + bytes_to_rip) {
- /* write_p is greater than necessary, all is easy */
- max_copied = bytes_to_rip;
- memcpy(data, initial_read_p, max_copied);
- end_read_p = initial_read_p + max_copied;
- } else if (initial_write_p > initial_read_p) {
- /*not enough data to cpy */
- max_copied = initial_write_p - initial_read_p;
+ if (initial_write_p >= initial_read_p)
+ data_available = initial_write_p - initial_read_p;
+ else
+ data_available = buffer_size - (initial_read_p - initial_write_p);
+
+ if (data_available < bytes_to_rip)
+ bytes_to_rip = data_available;
+
+ if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
+ max_copied = ring->data + buffer_size - initial_read_p;
memcpy(data, initial_read_p, max_copied);
- end_read_p = initial_write_p;
+ memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
+ end_read_p = ring->data + bytes_to_rip - max_copied;
} else {
- /* going through 'end' of ring buffer */
- max_copied = ring->data
- + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
- memcpy(data, initial_read_p, max_copied);
- /* possible we are done if we align precisely with end */
- if (max_copied == bytes_to_rip)
- end_read_p = ring->data;
- else if (initial_write_p
- > ring->data + bytes_to_rip - max_copied) {
- /* enough data to finish */
- memcpy(data + max_copied, ring->data,
- bytes_to_rip - max_copied);
- max_copied = bytes_to_rip;
- end_read_p = ring->data + (bytes_to_rip - max_copied);
- } else { /* not enough data */
- memcpy(data + max_copied, ring->data,
- initial_write_p - ring->data);
- max_copied += initial_write_p - ring->data;
- end_read_p = initial_write_p;
- }
+ memcpy(data, initial_read_p, bytes_to_rip);
+ end_read_p = initial_read_p + bytes_to_rip;
}
+
/* Now to verify which section was cleanly copied - i.e. how far
* read pointer has been pushed */
current_read_p = ring->read_p;
@@ -256,15 +215,14 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
if (initial_read_p <= current_read_p)
dead_offset = current_read_p - initial_read_p;
else
- dead_offset = ring->buf.length*ring->buf.bytes_per_datum
- - (initial_read_p - current_read_p);
+ dead_offset = buffer_size - (initial_read_p - current_read_p);
/* possible issue if the initial write has been lapped or indeed
* the point we were reading to has been passed */
/* No valid data read.
* In this case the read pointer is already correct having been
* pushed further than we would look. */
- if (max_copied - dead_offset < 0) {
+ if (bytes_to_rip - dead_offset < 0) {
ret = 0;
goto error_free_data_cpy;
}
@@ -280,7 +238,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
while (ring->read_p != end_read_p)
ring->read_p = end_read_p;
- ret = max_copied - dead_offset;
+ ret = bytes_to_rip - dead_offset;
if (copy_to_user(buf, data + dead_offset, ret)) {
ret = -EFAULT;
@@ -305,52 +263,18 @@ static int iio_store_to_sw_rb(struct iio_buffer *r,
return iio_store_to_sw_ring(ring, data, timestamp);
}
-static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
- unsigned char *data)
-{
- unsigned char *last_written_p_copy;
-
- iio_mark_sw_rb_in_use(&ring->buf);
-again:
- barrier();
- last_written_p_copy = ring->last_written_p;
- barrier(); /*unnessecary? */
- /* Check there is anything here */
- if (last_written_p_copy == NULL)
- return -EAGAIN;
- memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
-
- if (unlikely(ring->last_written_p != last_written_p_copy))
- goto again;
-
- iio_unmark_sw_rb_in_use(&ring->buf);
- return 0;
-}
-
-static int iio_read_last_from_sw_rb(struct iio_buffer *r,
- unsigned char *data)
-{
- return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
-}
-
static int iio_request_update_sw_rb(struct iio_buffer *r)
{
int ret = 0;
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
r->stufftoread = false;
- spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
- if (ring->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
__iio_free_sw_ring_buffer(ring);
ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
ring->buf.length);
error_ret:
- spin_unlock(&ring->use_lock);
return ret;
}
@@ -360,12 +284,18 @@ static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
return ring->buf.bytes_per_datum;
}
+static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ ring->update_needed = true;
+ return 0;
+}
+
static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_sw_rb(r);
}
return 0;
}
@@ -379,27 +309,17 @@ static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_sw_rb(r);
}
return 0;
}
-static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- ring->update_needed = true;
- return 0;
-}
-
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/* Standard set of ring buffer attributes */
static struct attribute *iio_ring_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
NULL,
};
@@ -419,8 +339,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
return NULL;
ring->update_needed = true;
buf = &ring->buf;
- iio_buffer_init(buf, indio_dev);
- __iio_init_sw_ring_buffer(ring);
+ iio_buffer_init(buf);
buf->attrs = &iio_ring_attribute_group;
return buf;
@@ -434,12 +353,8 @@ void iio_sw_rb_free(struct iio_buffer *r)
EXPORT_SYMBOL(iio_sw_rb_free);
const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .mark_in_use = &iio_mark_sw_rb_in_use,
- .unmark_in_use = &iio_unmark_sw_rb_in_use,
.store_to = &iio_store_to_sw_rb,
- .read_last = &iio_read_last_from_sw_rb,
.read_first_n = &iio_read_first_n_sw_rb,
- .mark_param_change = &iio_mark_update_needed_sw_rb,
.request_update = &iio_request_update_sw_rb,
.get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
.set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index a3e15784c225..e6a6e2c40960 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -23,7 +23,7 @@
#ifndef _IIO_RING_SW_H_
#define _IIO_RING_SW_H_
-#include "buffer_generic.h"
+#include "buffer.h"
/**
* ring_sw_access_funcs - access functions for a software ring buffer
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index 868952b5ba63..bfedb73b850e 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -114,47 +114,4 @@ struct iio_const_attr {
#define IIO_CONST_ATTR_TEMP_SCALE(_string) \
IIO_CONST_ATTR(in_temp_scale, _string)
-enum iio_event_type {
- IIO_EV_TYPE_THRESH,
- IIO_EV_TYPE_MAG,
- IIO_EV_TYPE_ROC,
- IIO_EV_TYPE_THRESH_ADAPTIVE,
- IIO_EV_TYPE_MAG_ADAPTIVE,
-};
-
-enum iio_event_direction {
- IIO_EV_DIR_EITHER,
- IIO_EV_DIR_RISING,
- IIO_EV_DIR_FALLING,
-};
-
-#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
- type, chan, chan1, chan2) \
- (((u64)type << 56) | ((u64)diff << 55) | \
- ((u64)direction << 48) | ((u64)modifier << 40) | \
- ((u64)chan_type << 32) | (chan2 << 16) | chan1 | chan)
-
-#define IIO_EV_DIR_MAX 4
-#define IIO_EV_BIT(type, direction) \
- (1 << (type*IIO_EV_DIR_MAX + direction))
-
-#define IIO_MOD_EVENT_CODE(channelclass, number, modifier, \
- type, direction) \
- IIO_EVENT_CODE(channelclass, 0, modifier, direction, type, number, 0, 0)
-
-#define IIO_UNMOD_EVENT_CODE(channelclass, number, type, direction) \
- IIO_EVENT_CODE(channelclass, 0, 0, direction, type, number, 0, 0)
-
-#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
-
-#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
-
-/* Event code number extraction depends on which type of event we have.
- * Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_NUM(mask) (mask & 0xFFFF)
-
-#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
-
#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index 5cc42a655c88..1cfca231db8f 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -46,7 +46,6 @@ struct iio_trigger_ops {
* @private_data: [DRIVER] device specific data
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @owner: [DRIVER] used to monitor usage count of the trigger.
* @use_count: use count for the trigger
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
@@ -63,7 +62,6 @@ struct iio_trigger {
void *private_data;
struct list_head list;
struct list_head alloc_list;
- struct module *owner;
int use_count;
struct irq_chip subirq_chip;
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index d35d085da949..bd7416b2c561 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -125,7 +125,6 @@ static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
goto error_put_trigger_and_remove_from_list;
}
trig->private_data = trig_info;
- trig->owner = THIS_MODULE;
trig->ops = &iio_prtc_trigger_ops;
/* RTC access */
trig_info->rtc
diff --git a/drivers/staging/iio/types.h b/drivers/staging/iio/types.h
new file mode 100644
index 000000000000..b7d26474ad06
--- /dev/null
+++ b/drivers/staging/iio/types.h
@@ -0,0 +1,49 @@
+/* industrial I/O data types needed both in and out of kernel
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_TYPES_H_
+#define _IIO_TYPES_H_
+
+enum iio_chan_type {
+ /* real channel types */
+ IIO_VOLTAGE,
+ IIO_CURRENT,
+ IIO_POWER,
+ IIO_ACCEL,
+ IIO_ANGL_VEL,
+ IIO_MAGN,
+ IIO_LIGHT,
+ IIO_INTENSITY,
+ IIO_PROXIMITY,
+ IIO_TEMP,
+ IIO_INCLI,
+ IIO_ROT,
+ IIO_ANGL,
+ IIO_TIMESTAMP,
+ IIO_CAPACITANCE,
+};
+
+enum iio_modifier {
+ IIO_NO_MOD,
+ IIO_MOD_X,
+ IIO_MOD_Y,
+ IIO_MOD_Z,
+ IIO_MOD_X_AND_Y,
+ IIO_MOD_X_AND_Z,
+ IIO_MOD_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_OR_Y,
+ IIO_MOD_X_OR_Z,
+ IIO_MOD_Y_OR_Z,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_MOD_LIGHT_BOTH,
+ IIO_MOD_LIGHT_IR,
+};
+
+#endif /* _IIO_TYPES_H_ */
diff --git a/drivers/staging/intel_sst/Kconfig b/drivers/staging/intel_sst/Kconfig
deleted file mode 100644
index 82391077b384..000000000000
--- a/drivers/staging/intel_sst/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-config SND_INTEL_SST
- tristate "Intel SST (LPE) Driver"
- depends on X86 && INTEL_SCU_IPC
- default n
- help
- Say Y here to include support for the Intel(R) MID SST DSP driver
- On other PC platforms if you are unsure answer 'N'
-
-config SND_INTELMID
- tristate "Intel MID sound card driver"
- depends on SOUND && SND
- select SND_PCM
- select SND_SEQUENCER
- select SND_JACK
- depends on SND_INTEL_SST
- default n
- help
- Say Y here to include support for the Intel(R) MID sound card driver
- On other PC platforms if you are unsure answer 'N'
diff --git a/drivers/staging/intel_sst/Makefile b/drivers/staging/intel_sst/Makefile
deleted file mode 100644
index 9eb7c158bb65..000000000000
--- a/drivers/staging/intel_sst/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Intel MID Audio drivers
-#
-snd-intel-sst-y := intel_sst.o intel_sst_ipc.o intel_sst_stream.o intel_sst_drv_interface.o intel_sst_dsp.o intel_sst_pvt.o intel_sst_stream_encoded.o intel_sst_app_interface.o
-snd-intelmid-y := intelmid.o intelmid_msic_control.o intelmid_ctrl.o intelmid_pvt.o intelmid_v0_control.o intelmid_v1_control.o intelmid_v2_control.o
-obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
-obj-$(CONFIG_SND_INTELMID) += snd-intelmid.o
diff --git a/drivers/staging/intel_sst/TODO b/drivers/staging/intel_sst/TODO
deleted file mode 100644
index c733d7011091..000000000000
--- a/drivers/staging/intel_sst/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO
-----
-
-Get the memrar driver cleaned up and upstream (dependency blocking SST)
-Replace long/short press with two virtual buttons
-Review the printks and kill off any left over ST_ERR: messages
-Review the misc device ioctls for 32/64bit safety and sanity
-Review the misc device ioctls for size safety depending on config and decide
- if space/unused areas should be left
-What the sound folks turn up on full review
-Using the ALSA frameworks properly
-
-
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
deleted file mode 100644
index ff9aaec0557f..000000000000
--- a/drivers/staging/intel_sst/intel_sst.c
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * intel_sst.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all init functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/firmware.h>
-#include <linux/miscdevice.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <asm/mrst.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
-MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
-MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(SST_DRIVER_VERSION);
-
-struct intel_sst_drv *sst_drv_ctx;
-static struct mutex drv_ctx_lock;
-struct class *sst_class;
-
-/* fops Routines */
-static const struct file_operations intel_sst_fops = {
- .owner = THIS_MODULE,
- .open = intel_sst_open,
- .release = intel_sst_release,
- .read = intel_sst_read,
- .write = intel_sst_write,
- .unlocked_ioctl = intel_sst_ioctl,
- .mmap = intel_sst_mmap,
- .aio_read = intel_sst_aio_read,
- .aio_write = intel_sst_aio_write,
-};
-static const struct file_operations intel_sst_fops_cntrl = {
- .owner = THIS_MODULE,
- .open = intel_sst_open_cntrl,
- .release = intel_sst_release_cntrl,
- .unlocked_ioctl = intel_sst_ioctl,
-};
-
-static struct miscdevice lpe_dev = {
- .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
- .name = "intel_sst",/* /dev/intel_sst */
- .fops = &intel_sst_fops
-};
-
-
-static struct miscdevice lpe_ctrl = {
- .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
- .name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
- .fops = &intel_sst_fops_cntrl
-};
-
-/**
-* intel_sst_interrupt - Interrupt service routine for SST
-*
-* @irq: irq number of interrupt
-* @context: pointer to device structre
-*
-* This function is called by OS when SST device raises
-* an interrupt. This will be result of write in IPC register
-* Source can be busy or done interrupt
-*/
-static irqreturn_t intel_sst_interrupt(int irq, void *context)
-{
- union interrupt_reg isr;
- union ipc_header header;
- union interrupt_reg imr;
- struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
- unsigned int size = 0, str_id;
- struct stream_info *stream ;
-
- /* Do not handle interrupt in suspended state */
- if (drv->sst_state == SST_SUSPENDED)
- return IRQ_NONE;
- /* Interrupt arrived, check src */
- isr.full = sst_shim_read(drv->shim, SST_ISRX);
-
- if (isr.part.busy_interrupt) {
- header.full = sst_shim_read(drv->shim, SST_IPCD);
- if (header.part.msg_id == IPC_SST_PERIOD_ELAPSED) {
- sst_clear_interrupt();
- str_id = header.part.str_id;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->period_elapsed)
- stream->period_elapsed(stream->pcm_substream);
- return IRQ_HANDLED;
- }
- if (header.part.large)
- size = header.part.data;
- if (header.part.msg_id & REPLY_MSG) {
- sst_drv_ctx->ipc_process_msg.header = header;
- memcpy_fromio(sst_drv_ctx->ipc_process_msg.mailbox,
- drv->mailbox + SST_MAILBOX_RCV, size);
- queue_work(sst_drv_ctx->process_msg_wq,
- &sst_drv_ctx->ipc_process_msg.wq);
- } else {
- sst_drv_ctx->ipc_process_reply.header = header;
- memcpy_fromio(sst_drv_ctx->ipc_process_reply.mailbox,
- drv->mailbox + SST_MAILBOX_RCV, size);
- queue_work(sst_drv_ctx->process_reply_wq,
- &sst_drv_ctx->ipc_process_reply.wq);
- }
- /* mask busy inetrrupt */
- imr.full = sst_shim_read(drv->shim, SST_IMRX);
- imr.part.busy_interrupt = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- return IRQ_HANDLED;
- } else if (isr.part.done_interrupt) {
- /* Clear done bit */
- header.full = sst_shim_read(drv->shim, SST_IPCX);
- header.part.done = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_IPCX, header.full);
- /* write 1 to clear status register */;
- isr.part.done_interrupt = 1;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
- queue_work(sst_drv_ctx->post_msg_wq,
- &sst_drv_ctx->ipc_post_msg.wq);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-
-}
-
-
-/*
-* intel_sst_probe - PCI probe function
-*
-* @pci: PCI device structure
-* @pci_id: PCI device ID structure
-*
-* This function is called by OS when a device is found
-* This enables the device, interrupt etc
-*/
-static int __devinit intel_sst_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
-{
- int i, ret = 0;
-
- pr_debug("Probe for DID %x\n", pci->device);
- mutex_lock(&drv_ctx_lock);
- if (sst_drv_ctx) {
- pr_err("Only one sst handle is supported\n");
- mutex_unlock(&drv_ctx_lock);
- return -EBUSY;
- }
-
- sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
- if (!sst_drv_ctx) {
- pr_err("malloc fail\n");
- mutex_unlock(&drv_ctx_lock);
- return -ENOMEM;
- }
- mutex_unlock(&drv_ctx_lock);
-
- sst_drv_ctx->pci_id = pci->device;
-
- mutex_init(&sst_drv_ctx->stream_lock);
- mutex_init(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
-
- sst_drv_ctx->stream_cnt = 0;
- sst_drv_ctx->encoded_cnt = 0;
- sst_drv_ctx->am_cnt = 0;
- sst_drv_ctx->pb_streams = 0;
- sst_drv_ctx->cp_streams = 0;
- sst_drv_ctx->unique_id = 0;
- sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
-
- INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
- INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
- INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
- INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
- INIT_WORK(&sst_drv_ctx->mad_ops.wq, sst_process_mad_ops);
- init_waitqueue_head(&sst_drv_ctx->wait_queue);
-
- sst_drv_ctx->mad_wq = create_workqueue("sst_mad_wq");
- if (!sst_drv_ctx->mad_wq)
- goto do_free_drv_ctx;
- sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
- if (!sst_drv_ctx->post_msg_wq)
- goto free_mad_wq;
- sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
- if (!sst_drv_ctx->process_msg_wq)
- goto free_post_msg_wq;
- sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
- if (!sst_drv_ctx->process_reply_wq)
- goto free_process_msg_wq;
-
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- }
- spin_lock_init(&sst_drv_ctx->list_spin_lock);
-
- sst_drv_ctx->max_streams = pci_id->driver_data;
- pr_debug("Got drv data max stream %d\n",
- sst_drv_ctx->max_streams);
- for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
- struct stream_info *stream = &sst_drv_ctx->streams[i];
- INIT_LIST_HEAD(&stream->bufs);
- mutex_init(&stream->lock);
- spin_lock_init(&stream->pcm_lock);
- }
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- sst_drv_ctx->mmap_mem = NULL;
- sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
- while (sst_drv_ctx->mmap_len > 0) {
- sst_drv_ctx->mmap_mem =
- kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
- if (sst_drv_ctx->mmap_mem) {
- pr_debug("Got memory %p size 0x%x\n",
- sst_drv_ctx->mmap_mem,
- sst_drv_ctx->mmap_len);
- break;
- }
- if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
- pr_err("mem alloc fail...abort!!\n");
- ret = -ENOMEM;
- goto free_process_reply_wq;
- }
- sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
- pr_debug("mem alloc failed...trying %d\n",
- sst_drv_ctx->mmap_len);
- }
- }
-
- /* Init the device */
- ret = pci_enable_device(pci);
- if (ret) {
- pr_err("device can't be enabled\n");
- goto do_free_mem;
- }
- sst_drv_ctx->pci = pci_dev_get(pci);
- ret = pci_request_regions(pci, SST_DRV_NAME);
- if (ret)
- goto do_disable_device;
- /* map registers */
- /* SST Shim */
- sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
- sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
- if (!sst_drv_ctx->shim)
- goto do_release_regions;
- pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
-
- /* Shared SRAM */
- sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
- if (!sst_drv_ctx->mailbox)
- goto do_unmap_shim;
- pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
-
- /* IRAM */
- sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
- if (!sst_drv_ctx->iram)
- goto do_unmap_sram;
- pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
-
- /* DRAM */
- sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
- if (!sst_drv_ctx->dram)
- goto do_unmap_iram;
- pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- /* Register the ISR */
- ret = request_irq(pci->irq, intel_sst_interrupt,
- IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
- if (ret)
- goto do_unmap_dram;
- pr_debug("Registered IRQ 0x%x\n", pci->irq);
-
- /*Register LPE Control as misc driver*/
- ret = misc_register(&lpe_ctrl);
- if (ret) {
- pr_err("couldn't register control device\n");
- goto do_free_irq;
- }
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- ret = misc_register(&lpe_dev);
- if (ret) {
- pr_err("couldn't register LPE device\n");
- goto do_free_misc;
- }
- } else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
- u32 csr;
-
- /*allocate mem for fw context save during suspend*/
- sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
- if (!sst_drv_ctx->fw_cntx) {
- ret = -ENOMEM;
- goto do_free_misc;
- }
- /*setting zero as that is valid mem to restore*/
- sst_drv_ctx->fw_cntx_size = 0;
-
- /*set lpe start clock and ram size*/
- csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr |= 0x30060; /*remove the clock ratio after fw fix*/
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);
- }
- sst_drv_ctx->lpe_stalled = 0;
- pci_set_drvdata(pci, sst_drv_ctx);
- pm_runtime_allow(&pci->dev);
- pm_runtime_put_noidle(&pci->dev);
- pr_debug("...successfully done!!!\n");
- return ret;
-
-do_free_misc:
- misc_deregister(&lpe_ctrl);
-do_free_irq:
- free_irq(pci->irq, sst_drv_ctx);
-do_unmap_dram:
- iounmap(sst_drv_ctx->dram);
-do_unmap_iram:
- iounmap(sst_drv_ctx->iram);
-do_unmap_sram:
- iounmap(sst_drv_ctx->mailbox);
-do_unmap_shim:
- iounmap(sst_drv_ctx->shim);
-do_release_regions:
- pci_release_regions(pci);
-do_disable_device:
- pci_disable_device(pci);
-do_free_mem:
- kfree(sst_drv_ctx->mmap_mem);
-free_process_reply_wq:
- destroy_workqueue(sst_drv_ctx->process_reply_wq);
-free_process_msg_wq:
- destroy_workqueue(sst_drv_ctx->process_msg_wq);
-free_post_msg_wq:
- destroy_workqueue(sst_drv_ctx->post_msg_wq);
-free_mad_wq:
- destroy_workqueue(sst_drv_ctx->mad_wq);
-do_free_drv_ctx:
- kfree(sst_drv_ctx);
- sst_drv_ctx = NULL;
- pr_err("Probe failed with %d\n", ret);
- return ret;
-}
-
-/**
-* intel_sst_remove - PCI remove function
-*
-* @pci: PCI device structure
-*
-* This function is called by OS when a device is unloaded
-* This frees the interrupt etc
-*/
-static void __devexit intel_sst_remove(struct pci_dev *pci)
-{
- pm_runtime_get_noresume(&pci->dev);
- pm_runtime_forbid(&pci->dev);
- pci_dev_put(sst_drv_ctx->pci);
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- misc_deregister(&lpe_ctrl);
- free_irq(pci->irq, sst_drv_ctx);
- iounmap(sst_drv_ctx->dram);
- iounmap(sst_drv_ctx->iram);
- iounmap(sst_drv_ctx->mailbox);
- iounmap(sst_drv_ctx->shim);
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- misc_deregister(&lpe_dev);
- kfree(sst_drv_ctx->mmap_mem);
- } else
- kfree(sst_drv_ctx->fw_cntx);
- flush_scheduled_work();
- destroy_workqueue(sst_drv_ctx->process_reply_wq);
- destroy_workqueue(sst_drv_ctx->process_msg_wq);
- destroy_workqueue(sst_drv_ctx->post_msg_wq);
- destroy_workqueue(sst_drv_ctx->mad_wq);
- kfree(pci_get_drvdata(pci));
- sst_drv_ctx = NULL;
- pci_release_regions(pci);
- pci_disable_device(pci);
- pci_set_drvdata(pci, NULL);
-}
-
-void sst_save_dsp_context(void)
-{
- struct snd_sst_ctxt_params fw_context;
- unsigned int pvt_id, i;
- struct ipc_post *msg = NULL;
-
- /*check cpu type*/
- if (sst_drv_ctx->pci_id != SST_MFLD_PCI_ID)
- return;
- /*not supported for rest*/
- if (sst_drv_ctx->sst_state != SST_FW_RUNNING) {
- pr_debug("fw not running no context save ...\n");
- return;
- }
-
- /*send msg to fw*/
- if (sst_create_large_msg(&msg))
- return;
- pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- i = sst_get_block_stream(sst_drv_ctx);
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- sst_fill_header(&msg->header, IPC_IA_GET_FW_CTXT, 1, pvt_id);
- msg->header.part.data = sizeof(fw_context) + sizeof(u32);
- fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
- fw_context.size = FW_CONTEXT_MEM;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32),
- &fw_context, sizeof(fw_context));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- /*wait for reply*/
- if (sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]))
- pr_debug("err fw context save timeout ...\n");
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_debug("fw context saved ...\n");
- return;
-}
-
-/* Power Management */
-/*
-* intel_sst_suspend - PCI suspend function
-*
-* @pci: PCI device structure
-* @state: PM message
-*
-* This function is called by OS when a power event occurs
-*/
-int intel_sst_suspend(struct pci_dev *pci, pm_message_t state)
-{
- union config_status_reg csr;
-
- pr_debug("intel_sst_suspend called\n");
-
- if (sst_drv_ctx->stream_cnt) {
- pr_err("active streams,not able to suspend\n");
- return -EBUSY;
- }
- /*save fw context*/
- sst_save_dsp_context();
- /*Assert RESET on LPE Processor*/
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full = csr.full | 0x2;
- /* Move the SST state to Suspended */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_SUSPENDED;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pci_set_drvdata(pci, sst_drv_ctx);
- pci_save_state(pci);
- pci_disable_device(pci);
- pci_set_power_state(pci, PCI_D3hot);
- return 0;
-}
-
-/**
-* intel_sst_resume - PCI resume function
-*
-* @pci: PCI device structure
-*
-* This function is called by OS when a power event occurs
-*/
-int intel_sst_resume(struct pci_dev *pci)
-{
- int ret = 0;
-
- pr_debug("intel_sst_resume called\n");
- if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("SST is not in suspended state\n");
- return 0;
- }
- sst_drv_ctx = pci_get_drvdata(pci);
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- ret = pci_enable_device(pci);
- if (ret)
- pr_err("device can't be enabled\n");
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-/* The runtime_suspend/resume is pretty much similar to the legacy
- * suspend/resume with the noted exception below:
- * The PCI core takes care of taking the system through D3hot and
- * restoring it back to D0 and so there is no need to duplicate
- * that here.
- */
-static int intel_sst_runtime_suspend(struct device *dev)
-{
- union config_status_reg csr;
-
- pr_debug("intel_sst_runtime_suspend called\n");
- if (sst_drv_ctx->stream_cnt) {
- pr_err("active streams,not able to suspend\n");
- return -EBUSY;
- }
- /*save fw context*/
- sst_save_dsp_context();
- /*Assert RESET on LPE Processor*/
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full = csr.full | 0x2;
- /* Move the SST state to Suspended */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_SUSPENDED;
-
- /* Only needed by Medfield */
- if (sst_drv_ctx->pci_id != SST_MRST_PCI_ID)
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-static int intel_sst_runtime_resume(struct device *dev)
-{
-
- pr_debug("intel_sst_runtime_resume called\n");
- if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("SST is not in suspended state\n");
- return 0;
- }
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-static int intel_sst_runtime_idle(struct device *dev)
-{
- pr_debug("runtime_idle called\n");
- if (sst_drv_ctx->stream_cnt == 0 && sst_drv_ctx->am_cnt == 0)
- pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
- return -EBUSY;
-}
-
-static const struct dev_pm_ops intel_sst_pm = {
- .runtime_suspend = intel_sst_runtime_suspend,
- .runtime_resume = intel_sst_runtime_resume,
- .runtime_idle = intel_sst_runtime_idle,
-};
-
-/* PCI Routines */
-static struct pci_device_id intel_sst_ids[] = {
- { PCI_VDEVICE(INTEL, SST_MRST_PCI_ID), 3},
- { PCI_VDEVICE(INTEL, SST_MFLD_PCI_ID), 6},
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, intel_sst_ids);
-
-static struct pci_driver driver = {
- .name = SST_DRV_NAME,
- .id_table = intel_sst_ids,
- .probe = intel_sst_probe,
- .remove = __devexit_p(intel_sst_remove),
-#ifdef CONFIG_PM
- .suspend = intel_sst_suspend,
- .resume = intel_sst_resume,
- .driver = {
- .pm = &intel_sst_pm,
- },
-#endif
-};
-
-/**
-* intel_sst_init - Module init function
-*
-* Registers with PCI
-* Registers with /dev
-* Init all data strutures
-*/
-static int __init intel_sst_init(void)
-{
- /* Init all variables, data structure etc....*/
- int ret = 0;
- pr_debug("INFO: ******** SST DRIVER loading.. Ver: %s\n",
- SST_DRIVER_VERSION);
-
- mutex_init(&drv_ctx_lock);
- /* Register with PCI */
- ret = pci_register_driver(&driver);
- if (ret)
- pr_err("PCI register failed\n");
- return ret;
-}
-
-/**
-* intel_sst_exit - Module exit function
-*
-* Unregisters with PCI
-* Unregisters with /dev
-* Frees all data strutures
-*/
-static void __exit intel_sst_exit(void)
-{
- pci_unregister_driver(&driver);
-
- pr_debug("driver unloaded\n");
- sst_drv_ctx = NULL;
- return;
-}
-
-module_init(intel_sst_init);
-module_exit(intel_sst_exit);
diff --git a/drivers/staging/intel_sst/intel_sst.h b/drivers/staging/intel_sst/intel_sst.h
deleted file mode 100644
index 4ad2829105a7..000000000000
--- a/drivers/staging/intel_sst/intel_sst.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __INTEL_SST_H__
-#define __INTEL_SST_H__
-/*
- * intel_sst.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * This file is shared between the SST and MAD drivers
- */
-#include "intel_sst_ioctl.h"
-#include <sound/jack.h>
-
-#define SST_CARD_NAMES "intel_mid_card"
-
-#define MFLD_MAX_HW_CH 4
-/* control list Pmic & Lpe */
-/* Input controls */
-enum port_status {
- ACTIVATE = 1,
- DEACTIVATE,
-};
-
-/* Card states */
-enum sst_card_states {
- SND_CARD_UN_INIT = 0,
- SND_CARD_INIT_DONE,
-};
-
-enum sst_controls {
- SST_SND_ALLOC = 0x1000,
- SST_SND_PAUSE = 0x1001,
- SST_SND_RESUME = 0x1002,
- SST_SND_DROP = 0x1003,
- SST_SND_FREE = 0x1004,
- SST_SND_BUFFER_POINTER = 0x1005,
- SST_SND_STREAM_INIT = 0x1006,
- SST_SND_START = 0x1007,
- SST_SND_STREAM_PROCESS = 0x1008,
- SST_MAX_CONTROLS = 0x1008,
- SST_CONTROL_BASE = 0x1000,
- SST_ENABLE_RX_TIME_SLOT = 0x1009,
-};
-
-enum SND_CARDS {
- SND_FS = 0,
- SND_MX,
- SND_NC,
- SND_MSIC
-};
-
-struct pcm_stream_info {
- int str_id;
- void *mad_substream;
- void (*period_elapsed) (void *mad_substream);
- unsigned long long buffer_ptr;
- int sfreq;
-};
-
-struct snd_pmic_ops {
- int card_status;
- int master_mute;
- int num_channel;
- int input_dev_id;
- int mute_status;
- struct mutex lock;
- int pb_on, pbhs_on;
- int cap_on;
- int output_dev_id;
- int lineout_dev_id, line_out_names_cnt;
- int prev_lineout_dev_id;
- bool jack_interrupt_status;
- int (*set_input_dev) (u8 value);
- int (*set_output_dev) (u8 value);
- int (*set_lineout_dev) (u8 value);
- int (*set_mute) (int dev_id, u8 value);
- int (*get_mute) (int dev_id, u8 *value);
-
- int (*set_vol) (int dev_id, int value);
- int (*get_vol) (int dev_id, int *value);
-
- int (*init_card) (void);
- int (*set_pcm_audio_params)
- (int sfreq, int word_size , int num_channel);
- int (*set_pcm_voice_params) (void);
- int (*set_voice_port) (int status);
- int (*set_audio_port) (int status);
-
- int (*power_up_pmic_pb) (unsigned int port);
- int (*power_up_pmic_cp) (unsigned int port);
- int (*power_down_pmic_pb) (unsigned int device);
- int (*power_down_pmic_cp) (unsigned int device);
- int (*power_down_pmic) (void);
- void (*pmic_irq_cb) (void *cb_data, u8 value);
- void (*pmic_irq_enable)(void *data);
- int (*pmic_jack_enable) (void);
- int (*pmic_get_mic_bias)(void *intelmaddata);
- int (*pmic_set_headset_state)(int state);
-
- unsigned int hw_dmic_map[MFLD_MAX_HW_CH];
- unsigned int available_dmics;
- int (*set_hw_dmic_route) (u8 index);
-
- int gpio_amp;
-};
-
-extern void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent,
- int status);
-
-
-int intemad_set_headset_state(int state);
-int intelmad_get_mic_bias(void);
-
-struct intel_sst_pcm_control {
- int (*open) (struct snd_sst_params *str_param);
- int (*device_control) (int cmd, void *arg);
- int (*close) (unsigned int str_id);
-};
-struct intel_sst_card_ops {
- char *module_name;
- unsigned int vendor_id;
- struct intel_sst_pcm_control *pcm_control;
- struct snd_pmic_ops *scard_ops;
-};
-
-/* modified for generic access */
-struct sc_reg_access {
- u16 reg_addr;
- u8 value;
- u8 mask;
-};
-enum sc_reg_access_type {
- PMIC_READ = 0,
- PMIC_WRITE,
- PMIC_READ_MODIFY,
-};
-
-int register_sst_card(struct intel_sst_card_ops *card);
-void unregister_sst_card(struct intel_sst_card_ops *card);
-#endif /* __INTEL_SST_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
deleted file mode 100644
index 93b41a284d83..000000000000
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ /dev/null
@@ -1,1460 +0,0 @@
-/*
- * intel_sst_interface.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * Jeeja KP <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * Upper layer interfaces (MAD driver, MMF) to SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/uio.h>
-#include <linux/aio.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-#include <linux/ioctl.h>
-#ifdef CONFIG_MRST_RAR_HANDLER
-#include <linux/rar_register.h>
-#include "../../../drivers/staging/memrar/memrar.h"
-#endif
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-#define AM_MODULE 1
-#define STREAM_MODULE 0
-
-
-/**
-* intel_sst_check_device - checks SST device
-*
-* This utility function checks the state of SST device and downlaods FW if
-* not done, or resumes the device if suspended
-*/
-
-static int intel_sst_check_device(void)
-{
- int retval = 0;
- if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
- pr_warn("Sound card not available\n");
- return -EIO;
- }
- if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- pr_debug("Resuming from Suspended state\n");
- retval = intel_sst_resume(sst_drv_ctx->pci);
- if (retval) {
- pr_debug("Resume Failed= %#x,abort\n", retval);
- return retval;
- }
- }
-
- if (sst_drv_ctx->sst_state == SST_UN_INIT) {
- /* FW is not downloaded */
- retval = sst_download_fw();
- if (retval)
- return -ENODEV;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- retval = sst_drv_ctx->rx_time_slot_status;
- if (retval != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(retval);
- }
- }
- return 0;
-}
-
-/**
- * intel_sst_open - opens a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr:pointer to file
- *
- * This function is called by OS when a user space component
- * tries to get a driver handle. Only one handle at a time
- * will be allowed
- */
-int intel_sst_open(struct inode *i_node, struct file *file_ptr)
-{
- unsigned int retval;
-
- mutex_lock(&sst_drv_ctx->stream_lock);
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
- retval = intel_sst_check_device();
- if (retval) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
- }
-
- if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
- struct ioctl_pvt_data *data =
- kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL);
- if (!data) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return -ENOMEM;
- }
-
- sst_drv_ctx->encoded_cnt++;
- mutex_unlock(&sst_drv_ctx->stream_lock);
- data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- data->str_id = 0;
- file_ptr->private_data = (void *)data;
- pr_debug("pvt_id handle = %d!\n", data->pvt_id);
- } else {
- retval = -EUSERS;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- return retval;
-}
-
-/**
- * intel_sst_open_cntrl - opens a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr:pointer to file
- *
- * This function is called by OS when a user space component
- * tries to get a driver handle to /dev/intel_sst_control.
- * Only one handle at a time will be allowed
- * This is for control operations only
- */
-int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
-{
- unsigned int retval;
-
- /* audio manager open */
- mutex_lock(&sst_drv_ctx->stream_lock);
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
- retval = intel_sst_check_device();
- if (retval) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
- }
-
- if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
- sst_drv_ctx->am_cnt++;
- pr_debug("AM handle opened...\n");
- file_ptr->private_data = NULL;
- } else {
- retval = -EACCES;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- }
-
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
-}
-
-/**
- * intel_sst_release - releases a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr: pointer to file
- *
- * This function is called by OS when a user space component
- * tries to release a driver handle.
- */
-int intel_sst_release(struct inode *i_node, struct file *file_ptr)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
-
- pr_debug("Release called, closing app handle\n");
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_drv_ctx->encoded_cnt--;
- sst_drv_ctx->stream_cnt--;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- free_stream_context(data->str_id);
- kfree(data);
- return 0;
-}
-
-int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
-{
- /* audio manager close */
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_drv_ctx->am_cnt--;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("AM handle closed\n");
- return 0;
-}
-
-/**
-* intel_sst_mmap - mmaps a kernel buffer to user space for copying data
-*
-* @vma: vm area structure instance
-* @file_ptr: pointer to file
-*
-* This function is called by OS when a user space component
-* tries to get mmap memory from driver
-*/
-int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
-{
- int retval, length;
- struct ioctl_pvt_data *data =
- (struct ioctl_pvt_data *)file_ptr->private_data;
- int str_id = data->str_id;
- void *mem_area;
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
-
- length = vma->vm_end - vma->vm_start;
- pr_debug("called for stream %d length 0x%x\n", str_id, length);
-
- if (length > sst_drv_ctx->mmap_len)
- return -ENOMEM;
- if (!sst_drv_ctx->mmap_mem)
- return -EIO;
-
- /* round it up to the page boundary */
- /*mem_area = (void *)((((unsigned long)sst_drv_ctx->mmap_mem)
- + PAGE_SIZE - 1) & PAGE_MASK);*/
- mem_area = (void *) PAGE_ALIGN((unsigned int) sst_drv_ctx->mmap_mem);
-
- /* map the whole physically contiguous area in one piece */
- retval = remap_pfn_range(vma,
- vma->vm_start,
- virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
- length,
- vma->vm_page_prot);
- if (retval)
- sst_drv_ctx->streams[str_id].mmapped = false;
- else
- sst_drv_ctx->streams[str_id].mmapped = true;
-
- pr_debug("mmap ret 0x%x\n", retval);
- return retval;
-}
-
-/* sets mmap data buffers to play/capture*/
-static int intel_sst_mmap_play_capture(u32 str_id,
- struct snd_sst_mmap_buffs *mmap_buf)
-{
- struct sst_stream_bufs *bufs;
- int retval, i;
- struct stream_info *stream;
- struct snd_sst_mmap_buff_entry *buf_entry;
- struct snd_sst_mmap_buff_entry *tmp_buf;
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
-
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped != true)
- return -EIO;
-
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
-
- tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
- if (!tmp_buf)
- return -ENOMEM;
- if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
- mmap_buf->entries * sizeof(*tmp_buf))) {
- retval = -EFAULT;
- goto out_free;
- }
-
- pr_debug("new buffers count %d status %d\n",
- mmap_buf->entries, stream->status);
- buf_entry = tmp_buf;
- for (i = 0; i < mmap_buf->entries; i++) {
- bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
- if (!bufs) {
- retval = -ENOMEM;
- goto out_free;
- }
- bufs->size = buf_entry->size;
- bufs->offset = buf_entry->offset;
- bufs->addr = sst_drv_ctx->mmap_mem;
- bufs->in_use = false;
- buf_entry++;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- }
-
- mutex_lock(&stream->lock);
- stream->data_blk.condition = false;
- stream->data_blk.ret_code = 0;
- if (stream->status == STREAM_INIT &&
- stream->prev != STREAM_UN_INIT &&
- stream->need_draining != true) {
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- if (stream->ops == STREAM_OPS_PLAYBACK) {
- if (sst_play_frame(str_id) < 0) {
- pr_warn("play frames fail\n");
- mutex_unlock(&stream->lock);
- retval = -EIO;
- goto out_free;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- if (sst_capture_frame(str_id) < 0) {
- pr_warn("capture frame fail\n");
- mutex_unlock(&stream->lock);
- retval = -EIO;
- goto out_free;
- }
- }
- }
- mutex_unlock(&stream->lock);
- /* Block the call for reply */
- if (!list_empty(&stream->bufs)) {
- stream->data_blk.on = true;
- retval = sst_wait_interruptible(sst_drv_ctx,
- &stream->data_blk);
- }
-
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec ioctl bytes = %d!!\n", retval);
-
-out_free:
- kfree(tmp_buf);
- return retval;
-}
-
-/*sets user data buffers to play/capture*/
-static int intel_sst_play_capture(struct stream_info *stream, int str_id)
-{
- int retval;
-
- stream->data_blk.ret_code = 0;
- stream->data_blk.on = true;
- stream->data_blk.condition = false;
-
- mutex_lock(&stream->lock);
- if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT) {
- /* stream is started */
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- }
-
- if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
- /* stream is not started yet */
- pr_debug("Stream isn't in started state %d, prev %d\n",
- stream->status, stream->prev);
- } else if ((stream->status == STREAM_RUNNING ||
- stream->status == STREAM_PAUSED) &&
- stream->need_draining != true) {
- /* stream is started */
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (sst_play_frame(str_id) < 0) {
- pr_warn("play frames failed\n");
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- if (sst_capture_frame(str_id) < 0) {
- pr_warn("capture frames failed\n");
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- }
- } else {
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- mutex_unlock(&stream->lock);
- /* Block the call for reply */
-
- retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
- if (retval) {
- stream->status = STREAM_INIT;
- pr_debug("wait returned error...\n");
- }
- return retval;
-}
-
-/* fills kernel list with buffer addresses for SST DSP driver to process*/
-static int snd_sst_fill_kernel_list(struct stream_info *stream,
- const struct iovec *iovec, unsigned long nr_segs,
- struct list_head *copy_to_list)
-{
- struct sst_stream_bufs *stream_bufs;
- unsigned long index, mmap_len;
- unsigned char __user *bufp;
- unsigned long size, copied_size;
- int retval = 0, add_to_list = 0;
- static int sent_offset;
- static unsigned long sent_index;
-
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- for (index = stream->sg_index; index < nr_segs; index++) {
- __u32 rar_handle;
- struct sst_stream_bufs *stream_bufs =
- kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
-
- stream->sg_index = index;
- if (!stream_bufs)
- return -ENOMEM;
- if (copy_from_user((void *) &rar_handle,
- iovec[index].iov_base,
- sizeof(__u32))) {
- kfree(stream_bufs);
- return -EFAULT;
- }
- stream_bufs->addr = (char *)rar_handle;
- stream_bufs->in_use = false;
- stream_bufs->size = iovec[0].iov_len;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&stream_bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- }
- stream->sg_index = index;
- return retval;
- }
-#endif
- stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
- if (!stream_bufs)
- return -ENOMEM;
- stream_bufs->addr = sst_drv_ctx->mmap_mem;
- mmap_len = sst_drv_ctx->mmap_len;
- stream_bufs->addr = sst_drv_ctx->mmap_mem;
- bufp = stream->cur_ptr;
-
- copied_size = 0;
-
- if (!stream->sg_index)
- sent_index = sent_offset = 0;
-
- for (index = stream->sg_index; index < nr_segs; index++) {
- stream->sg_index = index;
- if (!stream->cur_ptr)
- bufp = iovec[index].iov_base;
-
- size = ((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) - (unsigned long) bufp;
-
- if ((copied_size + size) > mmap_len)
- size = mmap_len - copied_size;
-
-
- if (stream->ops == STREAM_OPS_PLAYBACK) {
- if (copy_from_user((void *)
- (stream_bufs->addr + copied_size),
- bufp, size)) {
- /* Clean up the list and return error code */
- retval = -EFAULT;
- break;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- struct snd_sst_user_cap_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry) {
- kfree(stream_bufs);
- return -ENOMEM;
- }
- entry->iov_index = index;
- entry->iov_offset = (unsigned long) bufp -
- (unsigned long)iovec[index].iov_base;
- entry->offset = copied_size;
- entry->size = size;
- list_add_tail(&entry->node, copy_to_list);
- }
-
- stream->cur_ptr = bufp + size;
-
- if (((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) <
- ((unsigned long)iovec[index].iov_base)) {
- pr_debug("Buffer overflows\n");
- kfree(stream_bufs);
- return -EINVAL;
- }
-
- if (((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) ==
- (unsigned long)stream->cur_ptr) {
- stream->cur_ptr = NULL;
- stream->sg_index++;
- }
-
- copied_size += size;
- pr_debug("copied_size - %lx\n", copied_size);
- if ((copied_size >= mmap_len) ||
- (stream->sg_index == nr_segs)) {
- add_to_list = 1;
- }
-
- if (add_to_list) {
- stream_bufs->in_use = false;
- stream_bufs->size = copied_size;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&stream_bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- break;
- }
- }
- return retval;
-}
-
-/* This function copies the captured data returned from SST DSP engine
- * to the user buffers*/
-static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
- const struct iovec *iovec,
- struct list_head *copy_to_list)
-{
- struct snd_sst_user_cap_list *entry, *_entry;
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- int retval = 0;
-
- /* copy sent buffers */
- pr_debug("capture stream copying to user now...\n");
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- /* copy to user */
- list_for_each_entry_safe(entry, _entry,
- copy_to_list, node) {
- if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
- kbufs->addr + entry->offset,
- entry->size)) {
- /* Clean up the list and return error */
- retval = -EFAULT;
- break;
- }
- list_del(&entry->node);
- kfree(entry);
- }
- }
- }
- pr_debug("end of cap copy\n");
- return retval;
-}
-
-/*
- * snd_sst_userbufs_play_cap - constructs the list from user buffers
- *
- * @iovec:pointer to iovec structure
- * @nr_segs:number entries in the iovec structure
- * @str_id:stream id
- * @stream:pointer to stream_info structure
- *
- * This function will traverse the user list and copy the data to the kernel
- * space buffers.
- */
-static int snd_sst_userbufs_play_cap(const struct iovec *iovec,
- unsigned long nr_segs, unsigned int str_id,
- struct stream_info *stream)
-{
- int retval;
- LIST_HEAD(copy_to_list);
-
-
- retval = snd_sst_fill_kernel_list(stream, iovec, nr_segs,
- &copy_to_list);
-
- retval = intel_sst_play_capture(stream, str_id);
- if (retval < 0)
- return retval;
-
- if (stream->ops == STREAM_OPS_CAPTURE) {
- retval = snd_sst_copy_userbuf_capture(stream, iovec,
- &copy_to_list);
- }
- return retval;
-}
-
-/* This function is common function across read/write
- for user buffers called from system calls*/
-static int intel_sst_read_write(unsigned int str_id, char __user *buf,
- size_t count)
-{
- int retval;
- struct stream_info *stream;
- struct iovec iovec;
- unsigned long nr_segs;
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true) {
- pr_warn("user write and stream is mapped\n");
- return -EIO;
- }
- if (!count)
- return -EINVAL;
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
- /* copy user buf details */
- pr_debug("new buffers %p, copy size %d, status %d\n" ,
- buf, (int) count, (int) stream->status);
-
- stream->buf_type = SST_BUF_USER_STATIC;
- iovec.iov_base = buf;
- iovec.iov_len = count;
- nr_segs = 1;
-
- do {
- retval = snd_sst_userbufs_play_cap(
- &iovec, nr_segs, str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/***
- * intel_sst_write - This function is called when user tries to play out data
- *
- * @file_ptr:pointer to file
- * @buf:user buffer to be played out
- * @count:size of tthe buffer
- * @offset:offset to start from
- *
- * writes the encoded data into DSP
- */
-int intel_sst_write(struct file *file_ptr, const char __user *buf,
- size_t count, loff_t *offset)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
- int str_id = data->str_id;
- struct stream_info *stream = &sst_drv_ctx->streams[str_id];
-
- pr_debug("called for %d\n", str_id);
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- return intel_sst_read_write(str_id, (char __user *)buf, count);
-}
-
-/*
- * intel_sst_aio_write - write buffers
- *
- * @kiocb:pointer to a structure containing file pointer
- * @iov:list of user buffer to be played out
- * @nr_segs:number of entries
- * @offset:offset to start from
- *
- * This function is called when user tries to play out multiple data buffers
- */
-ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
-{
- int retval;
- struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
- int str_id = data->str_id;
- struct stream_info *stream;
-
- pr_debug("entry - %ld\n", nr_segs);
-
- if (is_sync_kiocb(kiocb) == false)
- return -EINVAL;
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true)
- return -EIO;
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
- pr_debug("new segs %ld, offset %d, status %d\n" ,
- nr_segs, (int) offset, (int) stream->status);
- stream->buf_type = SST_BUF_USER_STATIC;
- do {
- retval = snd_sst_userbufs_play_cap(iov, nr_segs,
- str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/*
- * intel_sst_read - read the encoded data
- *
- * @file_ptr: pointer to file
- * @buf: user buffer to be filled with captured data
- * @count: size of tthe buffer
- * @offset: offset to start from
- *
- * This function is called when user tries to capture data
- */
-int intel_sst_read(struct file *file_ptr, char __user *buf,
- size_t count, loff_t *offset)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
- int str_id = data->str_id;
- struct stream_info *stream = &sst_drv_ctx->streams[str_id];
-
- pr_debug("called for %d\n", str_id);
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE)
- return -EBADRQC;
- return intel_sst_read_write(str_id, buf, count);
-}
-
-/*
- * intel_sst_aio_read - aio read
- *
- * @kiocb: pointer to a structure containing file pointer
- * @iov: list of user buffer to be filled with captured
- * @nr_segs: number of entries
- * @offset: offset to start from
- *
- * This function is called when user tries to capture out multiple data buffers
- */
-ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
-{
- int retval;
- struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
- int str_id = data->str_id;
- struct stream_info *stream;
-
- pr_debug("entry - %ld\n", nr_segs);
-
- if (is_sync_kiocb(kiocb) == false) {
- pr_debug("aio_read from user space is not allowed\n");
- return -EINVAL;
- }
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true)
- return -EIO;
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE)
- return -EBADRQC;
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
-
- pr_debug("new segs %ld, offset %d, status %d\n" ,
- nr_segs, (int) offset, (int) stream->status);
- stream->buf_type = SST_BUF_USER_STATIC;
- do {
- retval = snd_sst_userbufs_play_cap(iov, nr_segs,
- str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/* sst_print_stream_params - prints the stream parameters (debug fn)*/
-static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
-{
- pr_debug("codec params:result = %d\n",
- get_prm->codec_params.result);
- pr_debug("codec params:stream = %d\n",
- get_prm->codec_params.stream_id);
- pr_debug("codec params:codec = %d\n",
- get_prm->codec_params.codec);
- pr_debug("codec params:ops = %d\n",
- get_prm->codec_params.ops);
- pr_debug("codec params:stream_type = %d\n",
- get_prm->codec_params.stream_type);
- pr_debug("pcmparams:sfreq = %d\n",
- get_prm->pcm_params.sfreq);
- pr_debug("pcmparams:num_chan = %d\n",
- get_prm->pcm_params.num_chan);
- pr_debug("pcmparams:pcm_wd_sz = %d\n",
- get_prm->pcm_params.pcm_wd_sz);
- return;
-}
-
-/**
- * sst_create_algo_ipc - create ipc msg for algorithm parameters
- *
- * @algo_params: Algorithm parameters
- * @msg: post msg pointer
- *
- * This function is called to create ipc msg
- */
-int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
- struct ipc_post **msg)
-{
- if (sst_create_large_msg(msg))
- return -ENOMEM;
- sst_fill_header(&(*msg)->header,
- IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
- (*msg)->header.part.data = sizeof(u32) +
- sizeof(*algo_params) + algo_params->size;
- memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
- memcpy((*msg)->mailbox_data + sizeof(u32),
- algo_params, sizeof(*algo_params));
- return 0;
-}
-
-/**
- * sst_send_algo_ipc - send ipc msg for algorithm parameters
- *
- * @msg: post msg pointer
- *
- * This function is called to send ipc msg
- */
-int sst_send_algo_ipc(struct ipc_post **msg)
-{
- sst_drv_ctx->ppp_params_blk.condition = false;
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- sst_drv_ctx->ppp_params_blk.on = true;
- sst_drv_ctx->ppp_params_blk.data = NULL;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&(*msg)->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->ppp_params_blk, SST_BLOCK_TIMEOUT);
-}
-
-/**
- * intel_sst_ioctl_dsp - receives the device ioctl's
- *
- * @cmd:Ioctl cmd
- * @arg:data
- *
- * This function is called when a user space component
- * sends a DSP Ioctl to SST driver
- */
-long intel_sst_ioctl_dsp(unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- struct snd_ppp_params algo_params;
- struct snd_ppp_params *algo_params_copied;
- struct ipc_post *msg;
-
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_SST_SET_ALGO):
- if (copy_from_user(&algo_params, (void __user *)arg,
- sizeof(algo_params)))
- return -EFAULT;
- if (algo_params.size > SST_MAILBOX_SIZE)
- return -EMSGSIZE;
-
- pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
- algo_params.algo_id, algo_params.str_id,
- algo_params.enable, algo_params.size);
- retval = sst_create_algo_ipc(&algo_params, &msg);
- if (retval)
- break;
- algo_params.reserved = 0;
- if (copy_from_user(msg->mailbox_data + sizeof(algo_params),
- algo_params.params, algo_params.size))
- return -EFAULT;
-
- retval = sst_send_algo_ipc(&msg);
- if (retval) {
- pr_debug("Error in sst_set_algo = %d\n", retval);
- retval = -EIO;
- }
- break;
-
- case _IOC_NR(SNDRV_SST_GET_ALGO):
- if (copy_from_user(&algo_params, (void __user *)arg,
- sizeof(algo_params)))
- return -EFAULT;
- pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
- algo_params.algo_id, algo_params.str_id,
- algo_params.enable, algo_params.size);
- retval = sst_create_algo_ipc(&algo_params, &msg);
- if (retval)
- break;
- algo_params.reserved = 1;
- retval = sst_send_algo_ipc(&msg);
- if (retval) {
- pr_debug("Error in sst_get_algo = %d\n", retval);
- retval = -EIO;
- break;
- }
- algo_params_copied = (struct snd_ppp_params *)
- sst_drv_ctx->ppp_params_blk.data;
- if (algo_params_copied->size > algo_params.size) {
- pr_debug("mem insufficient to copy\n");
- retval = -EMSGSIZE;
- goto free_mem;
- } else {
- char __user *tmp;
-
- if (copy_to_user(algo_params.params,
- algo_params_copied->params,
- algo_params_copied->size)) {
- retval = -EFAULT;
- goto free_mem;
- }
- tmp = (char __user *)arg + offsetof(
- struct snd_ppp_params, size);
- if (copy_to_user(tmp, &algo_params_copied->size,
- sizeof(__u32))) {
- retval = -EFAULT;
- goto free_mem;
- }
-
- }
-free_mem:
- kfree(algo_params_copied->params);
- kfree(algo_params_copied);
- break;
- }
- return retval;
-}
-
-
-int sst_ioctl_tuning_params(unsigned long arg)
-{
- struct snd_sst_tuning_params params;
- struct ipc_post *msg;
-
- if (copy_from_user(&params, (void __user *)arg, sizeof(params)))
- return -EFAULT;
- if (params.size > SST_MAILBOX_SIZE)
- return -ENOMEM;
- pr_debug("Parameter %d, Stream %d, Size %d\n", params.type,
- params.str_id, params.size);
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1, params.str_id);
- msg->header.part.data = sizeof(u32) + sizeof(params) + params.size;
- memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &params, sizeof(params));
- if (copy_from_user(msg->mailbox_data + sizeof(params),
- (void __user *)(unsigned long)params.addr,
- params.size)) {
- kfree(msg->mailbox_data);
- kfree(msg);
- return -EFAULT;
- }
- return sst_send_algo_ipc(&msg);
-}
-/**
- * intel_sst_ioctl - receives the device ioctl's
- * @file_ptr:pointer to file
- * @cmd:Ioctl cmd
- * @arg:data
- *
- * This function is called by OS when a user space component
- * sends an Ioctl to SST driver
- */
-long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- struct ioctl_pvt_data *data = NULL;
- int str_id = 0, minor = 0;
-
- data = file_ptr->private_data;
- if (data) {
- minor = 0;
- str_id = data->str_id;
- } else
- minor = 1;
-
- if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
- return -EBUSY;
-
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
- pr_debug("IOCTL_PAUSE received for %d!\n", str_id);
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_pause_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_RESUME):
- pr_debug("SNDRV_SST_IOCTL_RESUME received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_resume_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
- struct snd_sst_params str_param;
-
- pr_debug("IOCTL_SET_PARAMS received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
-
- if (copy_from_user(&str_param, (void __user *)arg,
- sizeof(str_param))) {
- retval = -EFAULT;
- break;
- }
-
- if (!str_id) {
-
- retval = sst_get_stream(&str_param);
- if (retval > 0) {
- struct stream_info *str_info;
- char __user *dest;
-
- sst_drv_ctx->stream_cnt++;
- data->str_id = retval;
- str_info = &sst_drv_ctx->streams[retval];
- str_info->src = SST_DRV;
- dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
- retval = copy_to_user(dest, &retval, sizeof(__u32));
- if (retval)
- retval = -EFAULT;
- } else {
- if (retval == -SST_ERR_INVALID_PARAMS)
- retval = -EINVAL;
- }
- } else {
- pr_debug("SET_STREAM_PARAMS received!\n");
- /* allocated set params only */
- retval = sst_set_stream_param(str_id, &str_param);
- /* Block the call for reply */
- if (!retval) {
- int sfreq = 0, word_size = 0, num_channel = 0;
- sfreq = str_param.sparams.uc.pcm_params.sfreq;
- word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
- num_channel = str_param.sparams.uc.pcm_params.num_chan;
- if (str_param.ops == STREAM_OPS_CAPTURE) {
- sst_drv_ctx->scard_ops->\
- set_pcm_audio_params(sfreq,
- word_size, num_channel);
- }
- }
- }
- break;
- }
- case _IOC_NR(SNDRV_SST_SET_VOL): {
- struct snd_sst_vol set_vol;
-
- if (copy_from_user(&set_vol, (void __user *)arg,
- sizeof(set_vol))) {
- pr_debug("copy failed\n");
- retval = -EFAULT;
- break;
- }
- pr_debug("SET_VOLUME received for %d!\n",
- set_vol.stream_id);
- if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
- pr_debug("invalid operation!\n");
- retval = -EPERM;
- break;
- }
- retval = sst_set_vol(&set_vol);
- break;
- }
- case _IOC_NR(SNDRV_SST_GET_VOL): {
- struct snd_sst_vol get_vol;
-
- if (copy_from_user(&get_vol, (void __user *)arg,
- sizeof(get_vol))) {
- retval = -EFAULT;
- break;
- }
- pr_debug("IOCTL_GET_VOLUME received for stream = %d!\n",
- get_vol.stream_id);
- if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
- pr_debug("invalid operation!\n");
- retval = -EPERM;
- break;
- }
- retval = sst_get_vol(&get_vol);
- if (retval) {
- retval = -EIO;
- break;
- }
- pr_debug("id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
- get_vol.stream_id, get_vol.volume,
- get_vol.ramp_duration, get_vol.ramp_type);
- if (copy_to_user((struct snd_sst_vol __user *)arg,
- &get_vol, sizeof(get_vol))) {
- retval = -EFAULT;
- break;
- }
- /*sst_print_get_vol_info(str_id, &get_vol);*/
- break;
- }
-
- case _IOC_NR(SNDRV_SST_MUTE): {
- struct snd_sst_mute set_mute;
-
- if (copy_from_user(&set_mute, (void __user *)arg,
- sizeof(set_mute))) {
- retval = -EFAULT;
- break;
- }
- pr_debug("SNDRV_SST_SET_VOLUME received for %d!\n",
- set_mute.stream_id);
- if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
- retval = -EPERM;
- break;
- }
- retval = sst_set_mute(&set_mute);
- break;
- }
- case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
- struct snd_sst_get_stream_params get_params;
-
- pr_debug("IOCTL_GET_PARAMS received!\n");
- if (minor != 0) {
- retval = -EBADRQC;
- break;
- }
-
- retval = sst_get_stream_params(str_id, &get_params);
- if (retval) {
- retval = -EIO;
- break;
- }
- if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
- &get_params, sizeof(get_params))) {
- retval = -EFAULT;
- break;
- }
- sst_print_stream_params(&get_params);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_MMAP_PLAY):
- case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
- struct snd_sst_mmap_buffs mmap_buf;
-
- pr_debug("SNDRV_SST_MMAP_PLAY/CAPTURE received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- if (copy_from_user(&mmap_buf, (void __user *)arg,
- sizeof(mmap_buf))) {
- retval = -EFAULT;
- break;
- }
- retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
- break;
- }
- case _IOC_NR(SNDRV_SST_STREAM_DROP):
- pr_debug("SNDRV_SST_IOCTL_DROP received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_drop_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
- struct snd_sst_tstamp tstamp = {0};
- unsigned long long time, freq, mod;
-
- pr_debug("SNDRV_SST_STREAM_GET_TSTAMP received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- memcpy_fromio(&tstamp,
- sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
- sizeof(tstamp));
- time = tstamp.samples_rendered;
- freq = (unsigned long long) tstamp.sampling_frequency;
- time = time * 1000; /* converting it to ms */
- mod = do_div(time, freq);
- if (copy_to_user((void __user *)arg, &time,
- sizeof(unsigned long long)))
- retval = -EFAULT;
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_START):{
- struct stream_info *stream;
-
- pr_debug("SNDRV_SST_STREAM_START received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
- stream = &sst_drv_ctx->streams[str_id];
- mutex_lock(&stream->lock);
- if (stream->status == STREAM_INIT &&
- stream->need_draining != true) {
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- retval = sst_play_frame(str_id);
- } else if (stream->ops == STREAM_OPS_CAPTURE)
- retval = sst_capture_frame(str_id);
- else {
- retval = -EINVAL;
- mutex_unlock(&stream->lock);
- break;
- }
- if (retval < 0) {
- stream->status = STREAM_INIT;
- mutex_unlock(&stream->lock);
- break;
- }
- } else {
- retval = -EINVAL;
- }
- mutex_unlock(&stream->lock);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
- struct snd_sst_target_device target_device;
-
- pr_debug("SET_TARGET_DEVICE received!\n");
- if (copy_from_user(&target_device, (void __user *)arg,
- sizeof(target_device))) {
- retval = -EFAULT;
- break;
- }
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_target_device_select(&target_device);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
- struct snd_sst_driver_info info;
-
- pr_debug("SNDRV_SST_DRIVER_INFO received\n");
- info.version = SST_VERSION_NUM;
- /* hard coding, shud get sumhow later */
- info.active_pcm_streams = sst_drv_ctx->stream_cnt -
- sst_drv_ctx->encoded_cnt;
- info.active_enc_streams = sst_drv_ctx->encoded_cnt;
- info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
- info.max_enc_streams = MAX_ENC_STREAM;
- info.buf_per_stream = sst_drv_ctx->mmap_len;
- if (copy_to_user((void __user *)arg, &info,
- sizeof(info)))
- retval = -EFAULT;
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
- struct snd_sst_dbufs param;
- struct snd_sst_dbufs dbufs_local;
- struct snd_sst_buffs ibufs, obufs;
- struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
- char __user *dest;
-
- pr_debug("SNDRV_SST_STREAM_DECODE received\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- if (copy_from_user(&param, (void __user *)arg,
- sizeof(param))) {
- retval = -EFAULT;
- break;
- }
-
- dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
- dbufs_local.output_bytes_produced =
- param.output_bytes_produced;
-
- if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
- retval = -EFAULT;
- break;
- }
- if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
- retval = -EFAULT;
- break;
- }
-
- ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
- obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
- if (!ibuf_tmp || !obuf_tmp) {
- retval = -ENOMEM;
- goto free_iobufs;
- }
-
- if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
- ibufs.entries * sizeof(*ibuf_tmp))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
- ibufs.buff_entry = ibuf_tmp;
- dbufs_local.ibufs = &ibufs;
-
- if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
- obufs.entries * sizeof(*obuf_tmp))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
- obufs.buff_entry = obuf_tmp;
- dbufs_local.obufs = &obufs;
-
- retval = sst_decode(str_id, &dbufs_local);
- if (retval) {
- retval = -EAGAIN;
- goto free_iobufs;
- }
-
- dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
- if (copy_to_user(dest,
- &dbufs_local.input_bytes_consumed,
- sizeof(unsigned long long))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
-
- dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
- if (copy_to_user(dest,
- &dbufs_local.output_bytes_produced,
- sizeof(unsigned long long))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
-free_iobufs:
- kfree(ibuf_tmp);
- kfree(obuf_tmp);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
- pr_debug("SNDRV_SST_STREAM_DRAIN received\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_drain_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
- unsigned long long __user *bytes = (unsigned long long __user *)arg;
- struct snd_sst_tstamp tstamp = {0};
-
- pr_debug("STREAM_BYTES_DECODED received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- memcpy_fromio(&tstamp,
- sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
- sizeof(tstamp));
- if (copy_to_user(bytes, &tstamp.bytes_processed,
- sizeof(*bytes)))
- retval = -EFAULT;
- break;
- }
- case _IOC_NR(SNDRV_SST_FW_INFO): {
- struct snd_sst_fw_info *fw_info;
-
- pr_debug("SNDRV_SST_FW_INFO received\n");
-
- fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
- if (!fw_info) {
- retval = -ENOMEM;
- break;
- }
- retval = sst_get_fw_info(fw_info);
- if (retval) {
- retval = -EIO;
- kfree(fw_info);
- break;
- }
- if (copy_to_user((struct snd_sst_dbufs __user *)arg,
- fw_info, sizeof(*fw_info))) {
- kfree(fw_info);
- retval = -EFAULT;
- break;
- }
- /*sst_print_fw_info(fw_info);*/
- kfree(fw_info);
- break;
- }
- case _IOC_NR(SNDRV_SST_GET_ALGO):
- case _IOC_NR(SNDRV_SST_SET_ALGO):
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = intel_sst_ioctl_dsp(cmd, arg);
- break;
-
- case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_ioctl_tuning_params(arg);
- break;
-
- default:
- retval = -EINVAL;
- }
- pr_debug("intel_sst_ioctl:complete ret code = %d\n", retval);
- return retval;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
deleted file mode 100644
index 870981ba3c97..000000000000
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ /dev/null
@@ -1,623 +0,0 @@
-#ifndef __INTEL_SST_COMMON_H__
-#define __INTEL_SST_COMMON_H__
-/*
- * intel_sst_common.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Common private declarations for SST
- */
-
-#define SST_DRIVER_VERSION "1.2.17"
-#define SST_VERSION_NUM 0x1217
-
-/* driver names */
-#define SST_DRV_NAME "intel_sst_driver"
-#define SST_MRST_PCI_ID 0x080A
-#define SST_MFLD_PCI_ID 0x082F
-#define PCI_ID_LENGTH 4
-#define SST_SUSPEND_DELAY 2000
-#define FW_CONTEXT_MEM (64*1024)
-
-enum sst_states {
- SST_FW_LOADED = 1,
- SST_FW_RUNNING,
- SST_UN_INIT,
- SST_ERROR,
- SST_SUSPENDED
-};
-
-#define MAX_ACTIVE_STREAM 3
-#define MAX_ENC_STREAM 1
-#define MAX_AM_HANDLES 1
-#define ALLOC_TIMEOUT 5000
-/* SST numbers */
-#define SST_BLOCK_TIMEOUT 5000
-#define TARGET_DEV_BLOCK_TIMEOUT 5000
-
-#define BLOCK_UNINIT -1
-#define RX_TIMESLOT_UNINIT -1
-
-/* SST register map */
-#define SST_CSR 0x00
-#define SST_PISR 0x08
-#define SST_PIMR 0x10
-#define SST_ISRX 0x18
-#define SST_IMRX 0x28
-#define SST_IPCX 0x38 /* IPC IA-SST */
-#define SST_IPCD 0x40 /* IPC SST-IA */
-#define SST_ISRD 0x20 /* dummy register for shim workaround */
-#define SST_SHIM_SIZE 0X44
-
-#define SPI_MODE_ENABLE_BASE_ADDR 0xffae4000
-#define FW_SIGNATURE_SIZE 4
-
-/* PMIC and SST hardware states */
-enum sst_mad_states {
- SND_MAD_UN_INIT = 0,
- SND_MAD_INIT_DONE,
-};
-
-/* stream states */
-enum sst_stream_states {
- STREAM_UN_INIT = 0, /* Freed/Not used stream */
- STREAM_RUNNING = 1, /* Running */
- STREAM_PAUSED = 2, /* Paused stream */
- STREAM_DECODE = 3, /* stream is in decoding only state */
- STREAM_INIT = 4, /* stream init, waiting for data */
-};
-
-
-enum sst_ram_type {
- SST_IRAM = 1,
- SST_DRAM = 2,
-};
-/* SST shim registers to structure mapping */
-union config_status_reg {
- struct {
- u32 mfld_strb:1;
- u32 sst_reset:1;
- u32 hw_rsvd:3;
- u32 sst_clk:2;
- u32 bypass:3;
- u32 run_stall:1;
- u32 rsvd1:2;
- u32 strb_cntr_rst:1;
- u32 rsvd:18;
- } part;
- u32 full;
-};
-
-union interrupt_reg {
- struct {
- u32 done_interrupt:1;
- u32 busy_interrupt:1;
- u32 rsvd:30;
- } part;
- u32 full;
-};
-
-union sst_pisr_reg {
- struct {
- u32 pssp0:1;
- u32 pssp1:1;
- u32 rsvd0:3;
- u32 dmac:1;
- u32 rsvd1:26;
- } part;
- u32 full;
-};
-
-union sst_pimr_reg {
- struct {
- u32 ssp0:1;
- u32 ssp1:1;
- u32 rsvd0:3;
- u32 dmac:1;
- u32 rsvd1:10;
- u32 ssp0_sc:1;
- u32 ssp1_sc:1;
- u32 rsvd2:3;
- u32 dmac_sc:1;
- u32 rsvd3:10;
- } part;
- u32 full;
-};
-
-
-struct sst_stream_bufs {
- struct list_head node;
- u32 size;
- const char *addr;
- u32 data_copied;
- bool in_use;
- u32 offset;
-};
-
-struct snd_sst_user_cap_list {
- unsigned int iov_index; /* index of iov */
- unsigned long iov_offset; /* offset in iov */
- unsigned long offset; /* offset in kmem */
- unsigned long size; /* size copied */
- struct list_head node;
-};
-/*
-This structure is used to block a user/fw data call to another
-fw/user call
-*/
-struct sst_block {
- bool condition; /* condition for blocking check */
- int ret_code; /* ret code when block is released */
- void *data; /* data to be appsed for block if any */
- bool on;
-};
-
-enum snd_sst_buf_type {
- SST_BUF_USER_STATIC = 1,
- SST_BUF_USER_DYNAMIC,
- SST_BUF_MMAP_STATIC,
- SST_BUF_MMAP_DYNAMIC,
-};
-
-enum snd_src {
- SST_DRV = 1,
- MAD_DRV = 2
-};
-
-/**
- * struct stream_info - structure that holds the stream information
- *
- * @status : stream current state
- * @prev : stream prev state
- * @codec : stream codec
- * @sst_id : stream id
- * @ops : stream operation pb/cp/drm...
- * @bufs: stream buffer list
- * @lock : stream mutex for protecting state
- * @pcm_lock : spinlock for pcm path only
- * @mmapped : is stream mmapped
- * @sg_index : current stream user buffer index
- * @cur_ptr : stream user buffer pointer
- * @buf_entry : current user buffer
- * @data_blk : stream block for data operations
- * @ctrl_blk : stream block for ctrl operations
- * @buf_type : stream user buffer type
- * @pcm_substream : PCM substream
- * @period_elapsed : PCM period elapsed callback
- * @sfreq : stream sampling freq
- * @decode_ibuf : Decoded i/p buffers pointer
- * @decode_obuf : Decoded o/p buffers pointer
- * @decode_isize : Decoded i/p buffers size
- * @decode_osize : Decoded o/p buffers size
- * @decode_ibuf_type : Decoded i/p buffer type
- * @decode_obuf_type : Decoded o/p buffer type
- * @idecode_alloc : Decode alloc index
- * @need_draining : stream set for drain
- * @str_type : stream type
- * @curr_bytes : current bytes decoded
- * @cumm_bytes : cummulative bytes decoded
- * @str_type : stream type
- * @src : stream source
- * @device : output device type (medfield only)
- * @pcm_slot : pcm slot value
- */
-struct stream_info {
- unsigned int status;
- unsigned int prev;
- u8 codec;
- unsigned int sst_id;
- unsigned int ops;
- struct list_head bufs;
- struct mutex lock; /* mutex */
- spinlock_t pcm_lock;
- bool mmapped;
- unsigned int sg_index; /* current buf Index */
- unsigned char __user *cur_ptr; /* Current static bufs */
- struct snd_sst_buf_entry __user *buf_entry;
- struct sst_block data_blk; /* stream ops block */
- struct sst_block ctrl_blk; /* stream control cmd block */
- enum snd_sst_buf_type buf_type;
- void *pcm_substream;
- void (*period_elapsed) (void *pcm_substream);
- unsigned int sfreq;
- void *decode_ibuf, *decode_obuf;
- unsigned int decode_isize, decode_osize;
- u8 decode_ibuf_type, decode_obuf_type;
- unsigned int idecode_alloc;
- unsigned int need_draining;
- unsigned int str_type;
- u32 curr_bytes;
- u32 cumm_bytes;
- u32 src;
- enum snd_sst_audio_device_type device;
- u8 pcm_slot;
-};
-
-/*
- * struct stream_alloc_bloc - this structure is used for blocking the user's
- * alloc calls to fw's response to alloc calls
- *
- * @sst_id : session id of blocked stream
- * @ops_block : ops block struture
- */
-struct stream_alloc_block {
- int sst_id; /* session id of blocked stream */
- struct sst_block ops_block; /* ops block struture */
-};
-
-#define SST_FW_SIGN "$SST"
-#define SST_FW_LIB_SIGN "$LIB"
-
-/*
- * struct fw_header - FW file headers
- *
- * @signature : FW signature
- * @modules : # of modules
- * @file_format : version of header format
- * @reserved : reserved fields
- */
-struct fw_header {
- unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
- u32 file_size; /* size of fw minus this header */
- u32 modules; /* # of modules */
- u32 file_format; /* version of header format */
- u32 reserved[4];
-};
-
-struct fw_module_header {
- unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
- u32 mod_size; /* size of module */
- u32 blocks; /* # of blocks */
- u32 type; /* codec type, pp lib */
- u32 entry_point;
-};
-
-struct dma_block_info {
- enum sst_ram_type type; /* IRAM/DRAM */
- u32 size; /* Bytes */
- u32 ram_offset; /* Offset in I/DRAM */
- u32 rsvd; /* Reserved field */
-};
-
-struct ioctl_pvt_data {
- int str_id;
- int pvt_id;
-};
-
-struct sst_ipc_msg_wq {
- union ipc_header header;
- char mailbox[SST_MAILBOX_SIZE];
- struct work_struct wq;
-};
-
-struct mad_ops_wq {
- int stream_id;
- enum sst_controls control_op;
- struct work_struct wq;
-
-};
-
-#define SST_MMAP_PAGES (640*1024 / PAGE_SIZE)
-#define SST_MMAP_STEP (40*1024 / PAGE_SIZE)
-
-/***
- * struct intel_sst_drv - driver ops
- *
- * @pmic_state : pmic state
- * @pmic_vendor : pmic vendor detected
- * @sst_state : current sst device state
- * @pci_id : PCI device id loaded
- * @shim : SST shim pointer
- * @mailbox : SST mailbox pointer
- * @iram : SST IRAM pointer
- * @dram : SST DRAM pointer
- * @shim_phy_add : SST shim phy addr
- * @ipc_dispatch_list : ipc messages dispatched
- * @ipc_post_msg_wq : wq to post IPC messages context
- * @ipc_process_msg : wq to process msgs from FW context
- * @ipc_process_reply : wq to process reply from FW context
- * @ipc_post_msg : wq to post reply from FW context
- * @mad_ops : MAD driver operations registered
- * @mad_wq : MAD driver wq
- * @post_msg_wq : wq to post IPC messages
- * @process_msg_wq : wq to process msgs from FW
- * @process_reply_wq : wq to process reply from FW
- * @streams : sst stream contexts
- * @alloc_block : block structure for alloc
- * @tgt_dev_blk : block structure for target device
- * @fw_info_blk : block structure for fw info block
- * @vol_info_blk : block structure for vol info block
- * @mute_info_blk : block structure for mute info block
- * @hs_info_blk : block structure for hs info block
- * @list_lock : sst driver list lock (deprecated)
- * @list_spin_lock : sst driver spin lock block
- * @scard_ops : sst card ops
- * @pci : sst pci device struture
- * @active_streams : sst active streams
- * @sst_lock : sst device lock
- * @stream_lock : sst stream lock
- * @unique_id : sst unique id
- * @stream_cnt : total sst active stream count
- * @pb_streams : total active pb streams
- * @cp_streams : total active cp streams
- * @lpe_stalled : lpe stall status
- * @pmic_port_instance : active pmic port instance
- * @rx_time_slot_status : active rx slot
- * @lpaudio_start : lpaudio status
- * @audio_start : audio status
- * @devt_d : pointer to /dev/lpe node
- * @devt_c : pointer to /dev/lpe_ctrl node
- * @max_streams : max streams allowed
- */
-struct intel_sst_drv {
- bool pmic_state;
- int pmic_vendor;
- int sst_state;
- unsigned int pci_id;
- void __iomem *shim;
- void __iomem *mailbox;
- void __iomem *iram;
- void __iomem *dram;
- unsigned int shim_phy_add;
- struct list_head ipc_dispatch_list;
- struct work_struct ipc_post_msg_wq;
- struct sst_ipc_msg_wq ipc_process_msg;
- struct sst_ipc_msg_wq ipc_process_reply;
- struct sst_ipc_msg_wq ipc_post_msg;
- struct mad_ops_wq mad_ops;
- wait_queue_head_t wait_queue;
- struct workqueue_struct *mad_wq;
- struct workqueue_struct *post_msg_wq;
- struct workqueue_struct *process_msg_wq;
- struct workqueue_struct *process_reply_wq;
-
- struct stream_info streams[MAX_NUM_STREAMS];
- struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
- struct sst_block tgt_dev_blk, fw_info_blk, ppp_params_blk,
- vol_info_blk, mute_info_blk, hs_info_blk;
- struct mutex list_lock;/* mutex for IPC list locking */
- spinlock_t list_spin_lock; /* mutex for IPC list locking */
- struct snd_pmic_ops *scard_ops;
- struct pci_dev *pci;
- int active_streams[MAX_NUM_STREAMS];
- void *mmap_mem;
- struct mutex sst_lock;
- struct mutex stream_lock;
- unsigned int mmap_len;
- unsigned int unique_id;
- unsigned int stream_cnt; /* total streams */
- unsigned int encoded_cnt; /* enocded streams only */
- unsigned int am_cnt;
- unsigned int pb_streams; /* pb streams active */
- unsigned int cp_streams; /* cp streams active */
- unsigned int lpe_stalled; /* LPE is stalled or not */
- unsigned int pmic_port_instance; /*pmic port instance*/
- int rx_time_slot_status;
- unsigned int lpaudio_start;
- /* 1 - LPA stream(MP3 pb) in progress*/
- unsigned int audio_start;
- dev_t devt_d, devt_c;
- unsigned int max_streams;
- unsigned int *fw_cntx;
- unsigned int fw_cntx_size;
-
- unsigned int fw_downloaded;
-};
-
-extern struct intel_sst_drv *sst_drv_ctx;
-
-#define CHIP_REV_REG 0xff108000
-#define CHIP_REV_ADDR 0x78
-
-/* misc definitions */
-#define FW_DWNL_ID 0xFF
-#define LOOP1 0x11111111
-#define LOOP2 0x22222222
-#define LOOP3 0x33333333
-#define LOOP4 0x44444444
-
-#define SST_DEFAULT_PMIC_PORT 1 /*audio port*/
-/* NOTE: status will have +ve for good cases and -ve for error ones */
-#define MAX_STREAM_FIELD 255
-
-int sst_alloc_stream(char *params, unsigned int stream_ops, u8 codec,
- unsigned int session_id);
-int sst_alloc_stream_response(unsigned int str_id,
- struct snd_sst_alloc_response *response);
-int sst_stalled(void);
-int sst_pause_stream(int id);
-int sst_resume_stream(int id);
-int sst_enable_rx_timeslot(int status);
-int sst_drop_stream(int id);
-int sst_free_stream(int id);
-int sst_start_stream(int streamID);
-int sst_play_frame(int streamID);
-int sst_pcm_play_frame(int str_id, struct sst_stream_bufs *sst_buf);
-int sst_capture_frame(int streamID);
-int sst_set_stream_param(int streamID, struct snd_sst_params *str_param);
-int sst_target_device_select(struct snd_sst_target_device *target_device);
-int sst_decode(int str_id, struct snd_sst_dbufs *dbufs);
-int sst_get_decoded_bytes(int str_id, unsigned long long *bytes);
-int sst_get_fw_info(struct snd_sst_fw_info *info);
-int sst_get_stream_params(int str_id,
- struct snd_sst_get_stream_params *get_params);
-int sst_get_stream(struct snd_sst_params *str_param);
-int sst_get_stream_allocated(struct snd_sst_params *str_param,
- struct snd_sst_lib_download **lib_dnld);
-int sst_drain_stream(int str_id);
-int sst_get_vol(struct snd_sst_vol *set_vol);
-int sst_set_vol(struct snd_sst_vol *set_vol);
-int sst_set_mute(struct snd_sst_mute *set_mute);
-
-
-void sst_post_message(struct work_struct *work);
-void sst_process_message(struct work_struct *work);
-void sst_process_reply(struct work_struct *work);
-void sst_process_mad_ops(struct work_struct *work);
-void sst_process_mad_jack_detection(struct work_struct *work);
-
-long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd,
- unsigned long arg);
-int intel_sst_open(struct inode *i_node, struct file *file_ptr);
-int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
-int intel_sst_release(struct inode *i_node, struct file *file_ptr);
-int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
-int intel_sst_read(struct file *file_ptr, char __user *buf,
- size_t count, loff_t *ppos);
-int intel_sst_write(struct file *file_ptr, const char __user *buf,
- size_t count, loff_t *ppos);
-int intel_sst_mmap(struct file *fp, struct vm_area_struct *vma);
-ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset);
-ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset);
-
-int sst_load_fw(const struct firmware *fw, void *context);
-int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
-int sst_spi_mode_enable(void);
-int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
-
-int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block);
-int sst_wait_interruptible_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block, int timeout);
-int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct stream_alloc_block *block);
-int sst_create_large_msg(struct ipc_post **arg);
-int sst_create_short_msg(struct ipc_post **arg);
-void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
- u8 sst_id, int status, void *data);
-void sst_clear_interrupt(void);
-int intel_sst_resume(struct pci_dev *pci);
-int sst_download_fw(void);
-void free_stream_context(unsigned int str_id);
-void sst_clean_stream(struct stream_info *stream);
-
-/*
- * sst_fill_header - inline to fill sst header
- *
- * @header : ipc header
- * @msg : IPC message to be sent
- * @large : is ipc large msg
- * @str_id : stream id
- *
- * this function is an inline function that sets the headers before
- * sending a message
- */
-static inline void sst_fill_header(union ipc_header *header,
- int msg, int large, int str_id)
-{
- header->part.msg_id = msg;
- header->part.str_id = str_id;
- header->part.large = large;
- header->part.done = 0;
- header->part.busy = 1;
- header->part.data = 0;
-}
-
-/*
- * sst_assign_pvt_id - assign a pvt id for stream
- *
- * @sst_drv_ctx : driver context
- *
- * this inline function assigns a private id for calls that dont have stream
- * context yet, should be called with lock held
- */
-static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
-{
- sst_drv_ctx->unique_id++;
- if (sst_drv_ctx->unique_id >= MAX_NUM_STREAMS)
- sst_drv_ctx->unique_id = 1;
- return sst_drv_ctx->unique_id;
-}
-
-/*
- * sst_init_stream - this function initialzes stream context
- *
- * @stream : stream struture
- * @codec : codec for stream
- * @sst_id : stream id
- * @ops : stream operation
- * @slot : stream pcm slot
- * @device : device type
- *
- * this inline function initialzes stream context for allocated stream
- */
-static inline void sst_init_stream(struct stream_info *stream,
- int codec, int sst_id, int ops, u8 slot,
- enum snd_sst_audio_device_type device)
-{
- stream->status = STREAM_INIT;
- stream->prev = STREAM_UN_INIT;
- stream->codec = codec;
- stream->sst_id = sst_id;
- stream->str_type = 0;
- stream->ops = ops;
- stream->data_blk.on = false;
- stream->data_blk.condition = false;
- stream->data_blk.ret_code = 0;
- stream->data_blk.data = NULL;
- stream->ctrl_blk.on = false;
- stream->ctrl_blk.condition = false;
- stream->ctrl_blk.ret_code = 0;
- stream->ctrl_blk.data = NULL;
- stream->need_draining = false;
- stream->decode_ibuf = NULL;
- stream->decode_isize = 0;
- stream->mmapped = false;
- stream->pcm_slot = slot;
- stream->device = device;
-}
-
-
-/*
- * sst_validate_strid - this function validates the stream id
- *
- * @str_id : stream id to be validated
- *
- * returns 0 if valid stream
- */
-static inline int sst_validate_strid(int str_id)
-{
- if (str_id <= 0 || str_id > sst_drv_ctx->max_streams) {
- pr_err("SST ERR: invalid stream id : %d MAX_STREAMS:%d\n",
- str_id, sst_drv_ctx->max_streams);
- return -EINVAL;
- } else
- return 0;
-}
-
-static inline int sst_shim_write(void __iomem *addr, int offset, int value)
-{
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- writel(value, addr + SST_ISRD); /*dummy*/
- writel(value, addr + offset);
- return 0;
-}
-
-static inline int sst_shim_read(void __iomem *addr, int offset)
-{
- return readl(addr + offset);
-}
-#endif /* __INTEL_SST_COMMON_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
deleted file mode 100644
index 22bd29c0c439..000000000000
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * intel_sst_interface.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com)
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * Upper layer interfaces (MAD driver, MMF) to SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-#include <linux/export.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-/*
- * sst_download_fw - download the audio firmware to DSP
- *
- * This function is called when the FW needs to be downloaded to SST DSP engine
- */
-int sst_download_fw(void)
-{
- int retval;
- const struct firmware *fw_sst;
- char name[20];
-
- if (sst_drv_ctx->sst_state != SST_UN_INIT)
- return -EPERM;
-
- /* Reload firmware is not needed for MRST */
- if ( (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) && sst_drv_ctx->fw_downloaded) {
- pr_debug("FW already downloaded, skip for MRST platform\n");
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- return 0;
- }
-
- snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
- sst_drv_ctx->pci_id, ".bin");
-
- pr_debug("Downloading %s FW now...\n", name);
- retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
- if (retval) {
- pr_err("request fw failed %d\n", retval);
- return retval;
- }
- sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
- sst_drv_ctx->alloc_block[0].ops_block.condition = false;
- retval = sst_load_fw(fw_sst, NULL);
- if (retval)
- goto end_restore;
-
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
- if (retval)
- pr_err("fw download failed %d\n" , retval);
- else
- sst_drv_ctx->fw_downloaded = 1;
-
-end_restore:
- release_firmware(fw_sst);
- sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
- return retval;
-}
-
-
-/*
- * sst_stalled - this function checks if the lpe is in stalled state
- */
-int sst_stalled(void)
-{
- int retry = 1000;
- int retval = -1;
-
- while (retry) {
- if (!sst_drv_ctx->lpe_stalled)
- return 0;
- /*wait for time and re-check*/
- msleep(1);
-
- retry--;
- }
- pr_debug("in Stalled State\n");
- return retval;
-}
-
-void free_stream_context(unsigned int str_id)
-{
- struct stream_info *stream;
-
- if (!sst_validate_strid(str_id)) {
- /* str_id is valid, so stream is alloacted */
- stream = &sst_drv_ctx->streams[str_id];
- if (sst_free_stream(str_id))
- sst_clean_stream(&sst_drv_ctx->streams[str_id]);
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- sst_drv_ctx->pb_streams--;
- if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- sst_drv_ctx->scard_ops->power_down_pmic_pb(
- stream->device);
- else {
- if (sst_drv_ctx->pb_streams == 0)
- sst_drv_ctx->scard_ops->
- power_down_pmic_pb(stream->device);
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- sst_drv_ctx->cp_streams--;
- if (sst_drv_ctx->cp_streams == 0)
- sst_drv_ctx->scard_ops->power_down_pmic_cp(
- stream->device);
- }
- if (sst_drv_ctx->pb_streams == 0
- && sst_drv_ctx->cp_streams == 0)
- sst_drv_ctx->scard_ops->power_down_pmic();
- }
-}
-
-/*
- * sst_get_stream_allocated - this function gets a stream allocated with
- * the given params
- *
- * @str_param : stream params
- * @lib_dnld : pointer to pointer of lib downlaod struct
- *
- * This creates new stream id for a stream, in case lib is to be downloaded to
- * DSP, it downloads that
- */
-int sst_get_stream_allocated(struct snd_sst_params *str_param,
- struct snd_sst_lib_download **lib_dnld)
-{
- int retval, str_id;
- struct stream_info *str_info;
-
- retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
- str_param->codec, str_param->device_type);
- if (retval < 0) {
- pr_err("sst_alloc_stream failed %d\n", retval);
- return retval;
- }
- pr_debug("Stream allocated %d\n", retval);
- str_id = retval;
- str_info = &sst_drv_ctx->streams[str_id];
- /* Block the call for reply */
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
- pr_debug("FW alloc failed retval %d, ret_code %d\n",
- retval, str_info->ctrl_blk.ret_code);
- str_id = -str_info->ctrl_blk.ret_code; /*return error*/
- *lib_dnld = str_info->ctrl_blk.data;
- sst_clean_stream(str_info);
- } else
- pr_debug("FW Stream allocated success\n");
- return str_id; /*will ret either error (in above if) or correct str id*/
-}
-
-/*
- * sst_get_sfreq - this function returns the frequency of the stream
- *
- * @str_param : stream params
- */
-static int sst_get_sfreq(struct snd_sst_params *str_param)
-{
- switch (str_param->codec) {
- case SST_CODEC_TYPE_PCM:
- return 48000; /*str_param->sparams.uc.pcm_params.sfreq;*/
- case SST_CODEC_TYPE_MP3:
- return str_param->sparams.uc.mp3_params.sfreq;
- case SST_CODEC_TYPE_AAC:
- return str_param->sparams.uc.aac_params.sfreq;
- case SST_CODEC_TYPE_WMA9:
- return str_param->sparams.uc.wma_params.sfreq;
- default:
- return 0;
- }
-}
-
-/*
- * sst_get_stream - this function prepares for stream allocation
- *
- * @str_param : stream param
- */
-int sst_get_stream(struct snd_sst_params *str_param)
-{
- int i, retval;
- struct stream_info *str_info;
- struct snd_sst_lib_download *lib_dnld;
-
- /* stream is not allocated, we are allocating */
- retval = sst_get_stream_allocated(str_param, &lib_dnld);
- if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
- /* codec download is required */
- struct snd_sst_alloc_response *response;
-
- pr_debug("Codec is required.... trying that\n");
- if (lib_dnld == NULL) {
- pr_err("lib download null!!! abort\n");
- return -EIO;
- }
- i = sst_get_block_stream(sst_drv_ctx);
- response = sst_drv_ctx->alloc_block[i].ops_block.data;
- pr_debug("alloc block allocated = %d\n", i);
- if (i < 0) {
- kfree(lib_dnld);
- return -ENOMEM;
- }
- retval = sst_load_library(lib_dnld, str_param->ops);
- kfree(lib_dnld);
-
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- if (!retval) {
- pr_debug("codec was downloaded successfully\n");
-
- retval = sst_get_stream_allocated(str_param, &lib_dnld);
- if (retval <= 0)
- goto err;
-
- pr_debug("Alloc done stream id %d\n", retval);
- } else {
- pr_debug("codec download failed\n");
- retval = -EIO;
- goto err;
- }
- } else if (retval <= 0)
- goto err;
- /*else
- set_port_params(str_param, str_param->ops);*/
-
- /* store sampling freq */
- str_info = &sst_drv_ctx->streams[retval];
- str_info->sfreq = sst_get_sfreq(str_param);
-
- /* power on the analog, if reqd */
- if (str_param->ops == STREAM_OPS_PLAYBACK ||
- str_param->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- sst_drv_ctx->scard_ops->power_up_pmic_pb(
- sst_drv_ctx->pmic_port_instance);
- else
- sst_drv_ctx->scard_ops->power_up_pmic_pb(
- str_info->device);
- /*Only if the playback is MP3 - Send a message*/
- sst_drv_ctx->pb_streams++;
- } else if (str_param->ops == STREAM_OPS_CAPTURE) {
-
- sst_drv_ctx->scard_ops->power_up_pmic_cp(
- sst_drv_ctx->pmic_port_instance);
- /*Send a messageif not sent already*/
- sst_drv_ctx->cp_streams++;
- }
-
-err:
- return retval;
-}
-
-void sst_process_mad_ops(struct work_struct *work)
-{
-
- struct mad_ops_wq *mad_ops =
- container_of(work, struct mad_ops_wq, wq);
- int retval = 0;
-
- switch (mad_ops->control_op) {
- case SST_SND_PAUSE:
- retval = sst_pause_stream(mad_ops->stream_id);
- break;
- case SST_SND_RESUME:
- retval = sst_resume_stream(mad_ops->stream_id);
- break;
- case SST_SND_DROP:
- retval = sst_drop_stream(mad_ops->stream_id);
- break;
- case SST_SND_START:
- pr_debug("SST Debug: start stream\n");
- retval = sst_start_stream(mad_ops->stream_id);
- break;
- case SST_SND_STREAM_PROCESS:
- pr_debug("play/capt frames...\n");
- break;
- default:
- pr_err(" wrong control_ops reported\n");
- }
- return;
-}
-
-void send_intial_rx_timeslot(void)
-{
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
- sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
-}
-
-/*
- * sst_open_pcm_stream - Open PCM interface
- *
- * @str_param: parameters of pcm stream
- *
- * This function is called by MID sound card driver to open
- * a new pcm interface
- */
-int sst_open_pcm_stream(struct snd_sst_params *str_param)
-{
- struct stream_info *str_info;
- int retval;
-
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
-
- if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- /* LPE is suspended, resume it before proceeding*/
- pr_debug("Resuming from Suspended state\n");
- retval = intel_sst_resume(sst_drv_ctx->pci);
- if (retval) {
- pr_err("Resume Failed = %#x, abort\n", retval);
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return retval;
- }
- }
- if (sst_drv_ctx->sst_state == SST_UN_INIT) {
- /* FW is not downloaded */
- pr_debug("DSP Downloading FW now...\n");
- retval = sst_download_fw();
- if (retval) {
- pr_err("FW download fail %x, abort\n", retval);
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return retval;
- }
- send_intial_rx_timeslot();
- }
-
- if (!str_param) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return -EINVAL;
- }
-
- retval = sst_get_stream(str_param);
- if (retval > 0) {
- sst_drv_ctx->stream_cnt++;
- str_info = &sst_drv_ctx->streams[retval];
- str_info->src = MAD_DRV;
- } else
- pm_runtime_put(&sst_drv_ctx->pci->dev);
-
- return retval;
-}
-
-/*
- * sst_close_pcm_stream - Close PCM interface
- *
- * @str_id: stream id to be closed
- *
- * This function is called by MID sound card driver to close
- * an existing pcm interface
- */
-int sst_close_pcm_stream(unsigned int str_id)
-{
- struct stream_info *stream;
-
- pr_debug("sst: stream free called\n");
- if (sst_validate_strid(str_id))
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- free_stream_context(str_id);
- stream->pcm_substream = NULL;
- stream->status = STREAM_UN_INIT;
- stream->period_elapsed = NULL;
- sst_drv_ctx->stream_cnt--;
- pr_debug("sst: will call runtime put now\n");
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return 0;
-}
-
-/*
- * sst_device_control - Set Control params
- *
- * @cmd: control cmd to be set
- * @arg: command argument
- *
- * This function is called by MID sound card driver to set
- * SST/Sound card controls for an opened stream.
- * This is registered with MID driver
- */
-int sst_device_control(int cmd, void *arg)
-{
- int retval = 0, str_id = 0;
-
- switch (cmd) {
- case SST_SND_PAUSE:
- case SST_SND_RESUME:
- case SST_SND_DROP:
- case SST_SND_START:
- sst_drv_ctx->mad_ops.control_op = cmd;
- sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
- queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
- break;
-
- case SST_SND_STREAM_INIT: {
- struct pcm_stream_info *str_info;
- struct stream_info *stream;
-
- pr_debug("stream init called\n");
- str_info = (struct pcm_stream_info *)arg;
- str_id = str_info->str_id;
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
-
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("setting the period ptrs\n");
- stream->pcm_substream = str_info->mad_substream;
- stream->period_elapsed = str_info->period_elapsed;
- stream->sfreq = str_info->sfreq;
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- break;
- }
-
- case SST_SND_BUFFER_POINTER: {
- struct pcm_stream_info *stream_info;
- struct snd_sst_tstamp fw_tstamp = {0,};
- struct stream_info *stream;
-
-
- stream_info = (struct pcm_stream_info *)arg;
- str_id = stream_info->str_id;
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
- stream = &sst_drv_ctx->streams[str_id];
-
- if (!stream->pcm_substream)
- break;
- memcpy_fromio(&fw_tstamp,
- ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
- +(str_id * sizeof(fw_tstamp))),
- sizeof(fw_tstamp));
-
- pr_debug("Pointer Query on strid = %d ops %d\n",
- str_id, stream->ops);
-
- if (stream->ops == STREAM_OPS_PLAYBACK)
- stream_info->buffer_ptr = fw_tstamp.samples_rendered;
- else
- stream_info->buffer_ptr = fw_tstamp.samples_processed;
- pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
- fw_tstamp.samples_rendered, stream_info->buffer_ptr);
- break;
- }
- case SST_ENABLE_RX_TIME_SLOT: {
- int status = *(int *)arg;
- sst_drv_ctx->rx_time_slot_status = status ;
- sst_enable_rx_timeslot(status);
- break;
- }
- default:
- /* Illegal case */
- pr_warn("illegal req\n");
- return -EINVAL;
- }
-
- return retval;
-}
-
-
-struct intel_sst_pcm_control pcm_ops = {
- .open = sst_open_pcm_stream,
- .device_control = sst_device_control,
- .close = sst_close_pcm_stream,
-};
-
-struct intel_sst_card_ops sst_pmic_ops = {
- .pcm_control = &pcm_ops,
-};
-
-/*
- * register_sst_card - function for sound card to register
- *
- * @card: pointer to structure of operations
- *
- * This function is called card driver loads and is ready for registration
- */
-int register_sst_card(struct intel_sst_card_ops *card)
-{
- if (!sst_drv_ctx) {
- pr_err("No SST driver register card reject\n");
- return -ENODEV;
- }
-
- if (!card || !card->module_name) {
- pr_err("Null Pointer Passed\n");
- return -EINVAL;
- }
- if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
- /* register this driver */
- if ((strncmp(SST_CARD_NAMES, card->module_name,
- strlen(SST_CARD_NAMES))) == 0) {
- sst_drv_ctx->pmic_vendor = card->vendor_id;
- sst_drv_ctx->scard_ops = card->scard_ops;
- sst_pmic_ops.module_name = card->module_name;
- sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
- sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
- card->pcm_control = sst_pmic_ops.pcm_control;
- return 0;
- } else {
- pr_err("strcmp fail %s\n", card->module_name);
- return -EINVAL;
- }
-
- } else {
- /* already registered a driver */
- pr_err("Repeat for registration..denied\n");
- return -EBADRQC;
- }
- /* The ASoC code doesn't set scard_ops */
- if (sst_drv_ctx->scard_ops)
- sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_sst_card);
-
-/*
- * unregister_sst_card- function for sound card to un-register
- *
- * @card: pointer to structure of operations
- *
- * This function is called when card driver unloads
- */
-void unregister_sst_card(struct intel_sst_card_ops *card)
-{
- if (sst_pmic_ops.pcm_control == card->pcm_control) {
- /* unreg */
- sst_pmic_ops.module_name = "";
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- pr_debug("Unregistered %s\n", card->module_name);
- }
- return;
-}
-EXPORT_SYMBOL_GPL(unregister_sst_card);
diff --git a/drivers/staging/intel_sst/intel_sst_dsp.c b/drivers/staging/intel_sst/intel_sst_dsp.c
deleted file mode 100644
index 426d2b92073a..000000000000
--- a/drivers/staging/intel_sst/intel_sst_dsp.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * intel_sst_dsp.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all dsp controlling functions like firmware download,
- * setting/resetting dsp cores, etc
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-/**
- * intel_sst_reset_dsp_mrst - Resetting SST DSP
- *
- * This resets DSP in case of MRST platfroms
- */
-static int intel_sst_reset_dsp_mrst(void)
-{
- union config_status_reg csr;
-
- pr_debug("Resetting the DSP in mrst\n");
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full |= 0x382;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.strb_cntr_rst = 0;
- csr.part.run_stall = 0x1;
- csr.part.bypass = 0x7;
- csr.part.sst_reset = 0x1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- return 0;
-}
-
-/**
- * intel_sst_reset_dsp_medfield - Resetting SST DSP
- *
- * This resets DSP in case of Medfield platfroms
- */
-static int intel_sst_reset_dsp_medfield(void)
-{
- union config_status_reg csr;
-
- pr_debug("Resetting the DSP in medfield\n");
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full |= 0x382;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- return 0;
-}
-
-/**
- * sst_start_mrst - Start the SST DSP processor
- *
- * This starts the DSP in MRST platfroms
- */
-static int sst_start_mrst(void)
-{
- union config_status_reg csr;
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.part.run_stall = 0;
- csr.part.sst_reset = 0;
- csr.part.strb_cntr_rst = 1;
- pr_debug("Setting SST to execute_mrst 0x%x\n", csr.full);
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- return 0;
-}
-
-/**
- * sst_start_medfield - Start the SST DSP processor
- *
- * This starts the DSP in Medfield platfroms
- */
-static int sst_start_medfield(void)
-{
- union config_status_reg csr;
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.mfld_strb = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.run_stall = 0;
- csr.part.sst_reset = 0;
- pr_debug("Starting the DSP_medfld %x\n", csr.full);
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- pr_debug("Starting the DSP_medfld\n");
-
- return 0;
-}
-
-/**
- * sst_parse_module - Parse audio FW modules
- *
- * @module: FW module header
- *
- * Parses modules that need to be placed in SST IRAM and DRAM
- * returns error or 0 if module sizes are proper
- */
-static int sst_parse_module(struct fw_module_header *module)
-{
- struct dma_block_info *block;
- u32 count;
- void __iomem *ram;
-
- pr_debug("module sign %s size %x blocks %x type %x\n",
- module->signature, module->mod_size,
- module->blocks, module->type);
- pr_debug("module entrypoint 0x%x\n", module->entry_point);
-
- block = (void *)module + sizeof(*module);
-
- for (count = 0; count < module->blocks; count++) {
- if (block->size <= 0) {
- pr_err("block size invalid\n");
- return -EINVAL;
- }
- switch (block->type) {
- case SST_IRAM:
- ram = sst_drv_ctx->iram;
- break;
- case SST_DRAM:
- ram = sst_drv_ctx->dram;
- break;
- default:
- pr_err("wrong ram type0x%x in block0x%x\n",
- block->type, count);
- return -EINVAL;
- }
- memcpy_toio(ram + block->ram_offset,
- (void *)block + sizeof(*block), block->size);
- block = (void *)block + sizeof(*block) + block->size;
- }
- return 0;
-}
-
-/**
- * sst_parse_fw_image - parse and load FW
- *
- * @sst_fw: pointer to audio fw
- *
- * This function is called to parse and download the FW image
- */
-static int sst_parse_fw_image(const struct firmware *sst_fw)
-{
- struct fw_header *header;
- u32 count;
- int ret_val;
- struct fw_module_header *module;
-
- BUG_ON(!sst_fw);
-
- /* Read the header information from the data pointer */
- header = (struct fw_header *)sst_fw->data;
-
- /* verify FW */
- if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
- (sst_fw->size != header->file_size + sizeof(*header))) {
- /* Invalid FW signature */
- pr_err("Invalid FW sign/filesize mismatch\n");
- return -EINVAL;
- }
- pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%x\n",
- header->signature, header->file_size, header->modules,
- header->file_format, sizeof(*header));
- module = (void *)sst_fw->data + sizeof(*header);
- for (count = 0; count < header->modules; count++) {
- /* module */
- ret_val = sst_parse_module(module);
- if (ret_val)
- return ret_val;
- module = (void *)module + sizeof(*module) + module->mod_size ;
- }
-
- return 0;
-}
-
-/**
- * sst_load_fw - function to load FW into DSP
- *
- * @fw: Pointer to driver loaded FW
- * @context: driver context
- *
- * This function is called by OS when the FW is loaded into kernel
- */
-int sst_load_fw(const struct firmware *fw, void *context)
-{
- int ret_val;
-
- pr_debug("load_fw called\n");
- BUG_ON(!fw);
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- ret_val = intel_sst_reset_dsp_mrst();
- else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- ret_val = intel_sst_reset_dsp_medfield();
- if (ret_val)
- return ret_val;
-
- ret_val = sst_parse_fw_image(fw);
- if (ret_val)
- return ret_val;
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_LOADED;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- /* 7. ask scu to reset the bypass bits */
- /* 8.bring sst out of reset */
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- ret_val = sst_start_mrst();
- else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- ret_val = sst_start_medfield();
- if (ret_val)
- return ret_val;
-
- pr_debug("fw loaded successful!!!\n");
- return ret_val;
-}
-
-/*This function is called when any codec/post processing library
- needs to be downloaded*/
-static int sst_download_library(const struct firmware *fw_lib,
- struct snd_sst_lib_download_info *lib)
-{
- /* send IPC message and wait */
- int i;
- u8 pvt_id;
- struct ipc_post *msg = NULL;
- union config_status_reg csr;
- struct snd_sst_str_type str_type = {0};
- int retval = 0;
-
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- i = sst_get_block_stream(sst_drv_ctx);
- pr_debug("alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
- if (i < 0) {
- kfree(msg);
- return -ENOMEM;
- }
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
- msg->header.part.data = sizeof(u32) + sizeof(str_type);
- str_type.codec_type = lib->dload_lib.lib_info.lib_type;
- /*str_type.pvt_id = pvt_id;*/
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
- if (retval) {
- /* error */
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_err("Prep codec downloaded failed %d\n",
- retval);
- return -EIO;
- }
- pr_debug("FW responded, ready for download now...\n");
- /* downloading on success */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_LOADED;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- csr.full = readl(sst_drv_ctx->shim + SST_CSR);
- csr.part.run_stall = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0x7;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- sst_parse_fw_image(fw_lib);
-
- /* set the FW to running again */
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0x0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.run_stall = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- /* send download complete and wait */
- if (sst_create_large_msg(&msg)) {
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- msg->header.part.data = sizeof(u32) + sizeof(*lib);
- lib->pvt_id = pvt_id;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("Waiting for FW response Download complete\n");
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
- if (retval) {
- /* error */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- return -EIO;
- }
-
- pr_debug("FW success on Download complete\n");
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-
-}
-
-/* This function is called before downloading the codec/postprocessing
-library is set for download to SST DSP*/
-static int sst_validate_library(const struct firmware *fw_lib,
- struct lib_slot_info *slot,
- u32 *entry_point)
-{
- struct fw_header *header;
- struct fw_module_header *module;
- struct dma_block_info *block;
- unsigned int n_blk, isize = 0, dsize = 0;
- int err = 0;
-
- header = (struct fw_header *)fw_lib->data;
- if (header->modules != 1) {
- pr_err("Module no mismatch found\n");
- err = -EINVAL;
- goto exit;
- }
- module = (void *)fw_lib->data + sizeof(*header);
- *entry_point = module->entry_point;
- pr_debug("Module entry point 0x%x\n", *entry_point);
- pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
- module->signature, module->mod_size,
- module->blocks, module->type);
-
- block = (void *)module + sizeof(*module);
- for (n_blk = 0; n_blk < module->blocks; n_blk++) {
- switch (block->type) {
- case SST_IRAM:
- isize += block->size;
- break;
- case SST_DRAM:
- dsize += block->size;
- break;
- default:
- pr_err("Invalid block type for 0x%x\n", n_blk);
- err = -EINVAL;
- goto exit;
- }
- block = (void *)block + sizeof(*block) + block->size;
- }
- if (isize > slot->iram_size || dsize > slot->dram_size) {
- pr_err("library exceeds size allocated\n");
- err = -EINVAL;
- goto exit;
- } else
- pr_debug("Library is safe for download...\n");
-
- pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
- isize, dsize, slot->iram_size, slot->dram_size);
-exit:
- return err;
-
-}
-
-/* This function is called when FW requests for a particular library download
-This function prepares the library to download*/
-int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
-{
- char buf[20];
- const char *type, *dir;
- int len = 0, error = 0;
- u32 entry_point;
- const struct firmware *fw_lib;
- struct snd_sst_lib_download_info dload_info = {{{0},},};
-
- memset(buf, 0, sizeof(buf));
-
- pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
- lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
- pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
- lib->lib_info.lib_version, lib->lib_info.lib_name,
- lib->lib_info.lib_caps, lib->lib_info.media_type);
-
- pr_debug("IRAM Size 0x%x, offset 0x%x\n",
- lib->slot_info.iram_size, lib->slot_info.iram_offset);
- pr_debug("DRAM Size 0x%x, offset 0x%x\n",
- lib->slot_info.dram_size, lib->slot_info.dram_offset);
-
- switch (lib->lib_info.lib_type) {
- case SST_CODEC_TYPE_MP3:
- type = "mp3_";
- break;
- case SST_CODEC_TYPE_AAC:
- type = "aac_";
- break;
- case SST_CODEC_TYPE_AACP:
- type = "aac_v1_";
- break;
- case SST_CODEC_TYPE_eAACP:
- type = "aac_v2_";
- break;
- case SST_CODEC_TYPE_WMA9:
- type = "wma9_";
- break;
- default:
- pr_err("Invalid codec type\n");
- error = -EINVAL;
- goto wake;
- }
-
- if (ops == STREAM_OPS_CAPTURE)
- dir = "enc_";
- else
- dir = "dec_";
- len = strlen(type) + strlen(dir);
- strncpy(buf, type, sizeof(buf)-1);
- strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
- len += snprintf(buf + len, sizeof(buf) - len, "%d",
- lib->slot_info.slot_num);
- len += snprintf(buf + len, sizeof(buf) - len, ".bin");
-
- pr_debug("Requesting %s\n", buf);
-
- error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
- if (error) {
- pr_err("library load failed %d\n", error);
- goto wake;
- }
- error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
- if (error)
- goto wake_free;
-
- lib->mod_entry_pt = entry_point;
- memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
- error = sst_download_library(fw_lib, &dload_info);
- if (error)
- goto wake_free;
-
- /* lib is downloaded and init send alloc again */
- pr_debug("Library is downloaded now...\n");
-wake_free:
- /* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
- release_firmware(fw_lib);
-wake:
- return error;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_fw_ipc.h b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
deleted file mode 100644
index 5d0cc56aaef9..000000000000
--- a/drivers/staging/intel_sst/intel_sst_fw_ipc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-#ifndef __INTEL_SST_FW_IPC_H__
-#define __INTEL_SST_FW_IPC_H__
-/*
-* intel_sst_fw_ipc.h - Intel SST Driver for audio engine
-*
-* Copyright (C) 2008-10 Intel Corporation
-* Author: Vinod Koul <vinod.koul@intel.com>
-* Harsha Priya <priya.harsha@intel.com>
-* Dharageswari R <dharageswari.r@intel.com>
-* KP Jeeja <jeeja.kp@intel.com>
-* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; version 2 of the License.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License along
-* with this program; if not, write to the Free Software Foundation, Inc.,
-* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
-*
-* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*
-* This driver exposes the audio engine functionalities to the ALSA
-* and middleware.
-* This file has definitions shared between the firmware and driver
-*/
-
-#define MAX_NUM_STREAMS_MRST 3
-#define MAX_NUM_STREAMS_MFLD 6
-#define MAX_NUM_STREAMS 6
-#define MAX_DBG_RW_BYTES 80
-#define MAX_NUM_SCATTER_BUFFERS 8
-#define MAX_LOOP_BACK_DWORDS 8
-/* IPC base address and mailbox, timestamp offsets */
-#define SST_MAILBOX_SIZE 0x0400
-#define SST_MAILBOX_SEND 0x0000
-#define SST_MAILBOX_RCV 0x0804
-#define SST_TIME_STAMP 0x1800
-#define SST_RESERVED_OFFSET 0x1A00
-#define SST_CHEKPOINT_OFFSET 0x1C00
-#define REPLY_MSG 0x80
-
-/* Message ID's for IPC messages */
-/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
-
-/* I2L Firmware/Codec Download msgs */
-#define IPC_IA_PREP_LIB_DNLD 0x01
-#define IPC_IA_LIB_DNLD_CMPLT 0x02
-
-#define IPC_IA_SET_PMIC_TYPE 0x03
-#define IPC_IA_GET_FW_VERSION 0x04
-#define IPC_IA_GET_FW_BUILD_INF 0x05
-#define IPC_IA_GET_FW_INFO 0x06
-#define IPC_IA_GET_FW_CTXT 0x07
-#define IPC_IA_SET_FW_CTXT 0x08
-
-/* I2L Codec Config/control msgs */
-#define IPC_IA_SET_CODEC_PARAMS 0x10
-#define IPC_IA_GET_CODEC_PARAMS 0x11
-#define IPC_IA_SET_PPP_PARAMS 0x12
-#define IPC_IA_GET_PPP_PARAMS 0x13
-#define IPC_IA_PLAY_FRAMES 0x14
-#define IPC_IA_CAPT_FRAMES 0x15
-#define IPC_IA_PLAY_VOICE 0x16
-#define IPC_IA_CAPT_VOICE 0x17
-#define IPC_IA_DECODE_FRAMES 0x18
-
-#define IPC_IA_ALG_PARAMS 0x1A
-#define IPC_IA_TUNING_PARAMS 0x1B
-
-/* I2L Stream config/control msgs */
-#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
-#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
-#define IPC_IA_SET_STREAM_PARAMS 0x22
-#define IPC_IA_GET_STREAM_PARAMS 0x23
-#define IPC_IA_PAUSE_STREAM 0x24
-#define IPC_IA_RESUME_STREAM 0x25
-#define IPC_IA_DROP_STREAM 0x26
-#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
-#define IPC_IA_TARGET_DEV_SELECT 0x28
-#define IPC_IA_CONTROL_ROUTING 0x29
-
-#define IPC_IA_SET_STREAM_VOL 0x2A /*Vol for stream, pre mixer */
-#define IPC_IA_GET_STREAM_VOL 0x2B
-#define IPC_IA_SET_STREAM_MUTE 0x2C
-#define IPC_IA_GET_STREAM_MUTE 0x2D
-#define IPC_IA_ENABLE_RX_TIME_SLOT 0x2E /* Enable Rx time slot 0 or 1 */
-
-#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
-
-/* Debug msgs */
-#define IPC_IA_DBG_MEM_READ 0x40
-#define IPC_IA_DBG_MEM_WRITE 0x41
-#define IPC_IA_DBG_LOOP_BACK 0x42
-
-/* L2I Firmware/Codec Download msgs */
-#define IPC_IA_FW_INIT_CMPLT 0x81
-#define IPC_IA_LPE_GETTING_STALLED 0x82
-#define IPC_IA_LPE_UNSTALLED 0x83
-
-/* L2I Codec Config/control msgs */
-#define IPC_SST_GET_PLAY_FRAMES 0x90 /* Request IA more data */
-#define IPC_SST_GET_CAPT_FRAMES 0x91 /* Request IA more data */
-#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
-#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
-#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
-#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
-#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
-#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
-#define IPC_IA_TARGET_DEV_CHNGD 0x98 /* error in processing a stream */
-
-#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occurred */
-/* L2S messages */
-#define IPC_SC_DDR_LINK_UP 0xC0
-#define IPC_SC_DDR_LINK_DOWN 0xC1
-#define IPC_SC_SET_LPECLK_REQ 0xC2
-#define IPC_SC_SSP_BIT_BANG 0xC3
-
-/* L2I Error reporting msgs */
-#define IPC_IA_MEM_ALLOC_FAIL 0xE0
-#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
- stream can be used by playback and
- capture modules */
-
-/* L2I Debug msgs */
-#define IPC_IA_PRINT_STRING 0xF0
-
-
-
-/* Command Response or Acknowledge message to any IPC message will have
- * same message ID and stream ID information which is sent.
- * There is no specific Ack message ID. The data field is used as response
- * meaning.
- */
-enum ackData {
- IPC_ACK_SUCCESS = 0,
- IPC_ACK_FAILURE
-};
-
-
-enum sst_error_codes {
- /* Error code,response to msgId: Description */
- /* Common error codes */
- SST_SUCCESS = 0, /* Success */
- SST_ERR_INVALID_STREAM_ID = 1,
- SST_ERR_INVALID_MSG_ID = 2,
- SST_ERR_INVALID_STREAM_OP = 3,
- SST_ERR_INVALID_PARAMS = 4,
- SST_ERR_INVALID_CODEC = 5,
- SST_ERR_INVALID_MEDIA_TYPE = 6,
- SST_ERR_STREAM_ERR = 7,
-
- /* IPC specific error codes */
- SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
- SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
- SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
- SST_IPC_ERR_GET_STREAM_FAILED = 11,
- SST_ERR_MOD_NOT_AVAIL = 12,
- SST_ERR_MOD_DNLD_RQD = 13,
- SST_ERR_STREAM_STOPPED = 14,
- SST_ERR_STREAM_IN_USE = 15,
-
- /* Capture specific error codes */
- SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
- SST_CAP_ERR_CAPTURE_FAIL = 17,
- SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
- SST_CAP_ERR_UNDER_RUN = 19,
- SST_CAP_ERR_OVERFLOW = 20,
-
- /* Playback specific error codes*/
- SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
- SST_PB_ERR_PLAY_FAIL = 22,
- SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
-
- /* Codec manager specific error codes */
- SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
- SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
-
- /* Library manager specific error codes */
- SST_SCC_ERR_PREP_DNLD_FAILED = 26,
- SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
- /* Scheduler specific error codes */
- SST_SCH_ERR_FAIL = 28,
-
- /* DMA specific error codes */
- SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
- SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
- SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
- SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
- SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
- SST_DMA_ERR_TRANSFER_FAILED = 34,
-
- SST_SSP_ERR_ALREADY_ENABLED = 35,
- SST_SSP_ERR_ALREADY_DISABLED = 36,
- SST_SSP_ERR_NOT_INITIALIZED = 37,
- SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
-
- /* Other error codes */
- SST_ERR_MOD_INIT_FAIL = 39,
-
- /* FW init error codes */
- SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
- SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
- SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
- SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
-
- /* Memory debug error codes */
- SST_ERR_DBG_MEM_READ_FAIL = 44,
- SST_ERR_DBG_MEM_WRITE_FAIL = 45,
- SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
- SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
-
- SST_ERR_BUFFER_NOT_AVAILABLE = 48,
- SST_ERR_BUFFER_NOT_ALLOCATED = 49,
- SST_ERR_INVALID_REGION_TYPE = 50,
- SST_ERR_NULL_PTR = 51,
- SST_ERR_INVALID_BUFFER_SIZE = 52,
- SST_ERR_INVALID_BUFFER_INDEX = 53,
-
- /*IIPC specific error codes */
- SST_IIPC_QUEUE_FULL = 54,
- SST_IIPC_ERR_MSG_SND_FAILED = 55,
- SST_PB_ERR_UNDERRUN_OCCURED = 56,
- SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
- SST_INVALID_TIME_SLOTS = 58,
-};
-
-enum dbg_mem_data_type {
- /* Data type of debug read/write */
- DATA_TYPE_U32,
- DATA_TYPE_U16,
- DATA_TYPE_U8,
-};
-
-/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
-
-/* IPC Header */
-union ipc_header {
- struct {
- u32 msg_id:8; /* Message ID - Max 256 Message Types */
- u32 str_id:5;
- u32 large:1; /* Large Message if large = 1 */
- u32 reserved:2; /* Reserved for future use */
- u32 data:14; /* Ack/Info for msg, size of msg in Mailbox */
- u32 done:1; /* bit 30 */
- u32 busy:1; /* bit 31 */
- } part;
- u32 full;
-} __attribute__ ((packed));
-
-/* Firmware build info */
-struct sst_fw_build_info {
- unsigned char date[16]; /* Firmware build date */
- unsigned char time[16]; /* Firmware build time */
-} __attribute__ ((packed));
-
-struct ipc_header_fw_init {
- struct snd_sst_fw_version fw_version;/* Firmware version details */
- struct sst_fw_build_info build_info;
- u16 result; /* Fw init result */
- u8 module_id; /* Module ID in case of error */
- u8 debug_info; /* Debug info from Module ID in case of fail */
-} __attribute__ ((packed));
-
-/* Address and size info of a frame buffer in DDR */
-struct sst_address_info {
- u32 addr; /* Address at IA */
- u32 size; /* Size of the buffer */
-} __attribute__ ((packed));
-
-/* Time stamp */
-struct snd_sst_tstamp {
- u64 samples_processed;/* capture - data in DDR */
- u64 samples_rendered;/* playback - data rendered */
- u64 bytes_processed;/* bytes decoded or encoded */
- u32 sampling_frequency;/* eg: 48000, 44100 */
- u32 dma_base_address;/* DMA base address */
- u16 dma_channel_no;/* DMA Channel used for the data transfer*/
- u16 reserved;/* 32 bit alignment */
-};
-
-/* Frame info to play or capture */
-struct sst_frame_info {
- u16 num_entries; /* number of entries to follow */
- u16 rsrvd;
- struct sst_address_info addr[MAX_NUM_SCATTER_BUFFERS];
-} __attribute__ ((packed));
-
-/* Frames info for decode */
-struct snd_sst_decode_info {
- unsigned long long input_bytes_consumed;
- unsigned long long output_bytes_produced;
- struct sst_frame_info frames_in;
- struct sst_frame_info frames_out;
-} __attribute__ ((packed));
-
-/* SST to IA print debug message*/
-struct ipc_sst_ia_print_params {
- u32 string_size;/* Max value is 160 */
- u8 prt_string[160];/* Null terminated Char string */
-} __attribute__ ((packed));
-
-/* Voice data message */
-struct snd_sst_voice_data {
- u16 num_bytes;/* Number of valid voice data bytes */
- u8 pcm_wd_size;/* 0=8 bit, 1=16 bit 2=32 bit */
- u8 reserved;/* Reserved */
- u8 voice_data_buf[0];/* Voice data buffer in bytes, little endian */
-} __attribute__ ((packed));
-
-/* SST to IA memory read debug message */
-struct ipc_sst_ia_dbg_mem_rw {
- u16 num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
- u16 data_type;/* enum: dbg_mem_data_type */
- u32 address; /* Memory address of data memory of data_type */
- u8 rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
-} __attribute__ ((packed));
-
-struct ipc_sst_ia_dbg_loop_back {
- u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
- u16 increment_val;/* Increments dwords by this value, 0- no increment */
- u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
-} __attribute__ ((packed));
-
-/* Stream type params struture for Alloc stream */
-struct snd_sst_str_type {
- u8 codec_type; /* Codec type */
- u8 str_type; /* 1 = voice 2 = music */
- u8 operation; /* Playback or Capture */
- u8 protected_str; /* 0=Non DRM, 1=DRM */
- u8 time_slots;
- u8 reserved; /* Reserved */
- u16 result; /* Result used for acknowledgment */
-} __attribute__ ((packed));
-
-/* Library info structure */
-struct module_info {
- u32 lib_version;
- u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
- u32 media_type;
- u8 lib_name[12];
- u32 lib_caps;
- unsigned char b_date[16]; /* Lib build date */
- unsigned char b_time[16]; /* Lib build time */
-} __attribute__ ((packed));
-
-/* Library slot info */
-struct lib_slot_info {
- u8 slot_num; /* 1 or 2 */
- u8 reserved1;
- u16 reserved2;
- u32 iram_size; /* slot size in IRAM */
- u32 dram_size; /* slot size in DRAM */
- u32 iram_offset; /* starting offset of slot in IRAM */
- u32 dram_offset; /* starting offset of slot in DRAM */
-} __attribute__ ((packed));
-
-struct snd_sst_lib_download {
- struct module_info lib_info; /* library info type, capabilities etc */
- struct lib_slot_info slot_info; /* slot info to be downloaded */
- u32 mod_entry_pt;
-};
-
-struct snd_sst_lib_download_info {
- struct snd_sst_lib_download dload_lib;
- u16 result; /* Result used for acknowledgment */
- u8 pvt_id; /* Private ID */
- u8 reserved; /* for alignment */
-};
-
-/* Alloc stream params structure */
-struct snd_sst_alloc_params {
- struct snd_sst_str_type str_type;
- struct snd_sst_stream_params stream_params;
-};
-
-struct snd_sst_fw_get_stream_params {
- struct snd_sst_stream_params codec_params;
- struct snd_sst_pmic_config pcm_params;
-};
-
-/* Alloc stream response message */
-struct snd_sst_alloc_response {
- struct snd_sst_str_type str_type; /* Stream type for allocation */
- struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
-};
-
-/* Drop response */
-struct snd_sst_drop_response {
- u32 result;
- u32 bytes;
-};
-
-/* CSV Voice call routing structure */
-struct snd_sst_control_routing {
- u8 control; /* 0=start, 1=Stop */
- u8 reserved[3]; /* Reserved- for 32 bit alignment */
-};
-
-
-struct ipc_post {
- struct list_head node;
- union ipc_header header; /* driver specific */
- char *mailbox_data;
-};
-
-struct snd_sst_ctxt_params {
- u32 address; /* Physical Address in DDR where the context is stored */
- u32 size; /* size of the context */
-};
-#endif /* __INTEL_SST_FW_IPC_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ioctl.h b/drivers/staging/intel_sst/intel_sst_ioctl.h
deleted file mode 100644
index 5da5ee092c69..000000000000
--- a/drivers/staging/intel_sst/intel_sst_ioctl.h
+++ /dev/null
@@ -1,440 +0,0 @@
-#ifndef __INTEL_SST_IOCTL_H__
-#define __INTEL_SST_IOCTL_H__
-/*
- * intel_sst_ioctl.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all sst ioctls
- */
-
-/* codec and post/pre processing related info */
-
-#include <linux/types.h>
-
-enum sst_codec_types {
-/* AUDIO/MUSIC CODEC Type Definitions */
- SST_CODEC_TYPE_UNKNOWN = 0,
- SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
- SST_CODEC_TYPE_MP3,
- SST_CODEC_TYPE_MP24,
- SST_CODEC_TYPE_AAC,
- SST_CODEC_TYPE_AACP,
- SST_CODEC_TYPE_eAACP,
- SST_CODEC_TYPE_WMA9,
- SST_CODEC_TYPE_WMA10,
- SST_CODEC_TYPE_WMA10P,
- SST_CODEC_TYPE_RA,
- SST_CODEC_TYPE_DDAC3,
- SST_CODEC_TYPE_STEREO_TRUE_HD,
- SST_CODEC_TYPE_STEREO_HD_PLUS,
-
- /* VOICE CODEC Type Definitions */
- SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
-};
-
-enum sst_algo_types {
- SST_CODEC_SRC = 0x64,
- SST_CODEC_MIXER = 0x65,
- SST_CODEC_DOWN_MIXER = 0x66,
- SST_CODEC_VOLUME_CONTROL = 0x67,
- SST_CODEC_OEM1 = 0xC8,
- SST_CODEC_OEM2 = 0xC9,
-};
-
-enum snd_sst_stream_ops {
- STREAM_OPS_PLAYBACK = 0, /* Decode */
- STREAM_OPS_CAPTURE, /* Encode */
- STREAM_OPS_PLAYBACK_DRM, /* Play Audio/Voice */
- STREAM_OPS_PLAYBACK_ALERT, /* Play Audio/Voice */
- STREAM_OPS_CAPTURE_VOICE_CALL, /* CSV Voice recording */
-};
-
-enum stream_mode {
- SST_STREAM_MODE_NONE = 0,
- SST_STREAM_MODE_DNR = 1,
- SST_STREAM_MODE_FNF = 2,
- SST_STREAM_MODE_CAPTURE = 3
-};
-
-enum stream_type {
- SST_STREAM_TYPE_NONE = 0,
- SST_STREAM_TYPE_MUSIC = 1,
- SST_STREAM_TYPE_NORMAL = 2,
- SST_STREAM_TYPE_LONG_PB = 3,
- SST_STREAM_TYPE_LOW_LATENCY = 4,
-};
-
-enum snd_sst_audio_device_type {
- SND_SST_DEVICE_HEADSET = 1,
- SND_SST_DEVICE_IHF,
- SND_SST_DEVICE_VIBRA,
- SND_SST_DEVICE_HAPTIC,
- SND_SST_DEVICE_CAPTURE,
-};
-
-/* Firmware Version info */
-struct snd_sst_fw_version {
- __u8 build; /* build number*/
- __u8 minor; /* minor number*/
- __u8 major; /* major number*/
- __u8 type; /* build type */
-};
-
-/* Port info structure */
-struct snd_sst_port_info {
- __u16 port_type;
- __u16 reserved;
-};
-
-/* Mixer info structure */
-struct snd_sst_mix_info {
- __u16 max_streams;
- __u16 reserved;
-};
-
-/* PCM Parameters */
-struct snd_pcm_params {
- __u16 codec; /* codec type */
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 reserved; /* Bitrate in bits per second */
- __u32 sfreq; /* Sampling rate in Hz */
- __u32 ring_buffer_size;
- __u32 period_count; /* period elapsed in samples*/
- __u32 ring_buffer_addr;
-};
-
-/* MP3 Music Parameters Message */
-struct snd_mp3_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate; /* Use the hard coded value. */
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u8 crc_check; /* crc_check - disable (0) or enable (1) */
- __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB*/
- __u16 reserved; /* Unused */
-};
-
-#define AAC_BIT_STREAM_ADTS 0
-#define AAC_BIT_STREAM_ADIF 1
-#define AAC_BIT_STREAM_RAW 2
-
-/* AAC Music Parameters Message */
-struct snd_aac_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo*/
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate;
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u32 aac_srate; /* Plain AAC decoder operating sample rate */
- __u8 mpg_id; /* 0=MPEG-2, 1=MPEG-4 */
- __u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
- __u8 aac_profile; /* 0=Main Profile, 1=LC profile, 3=SSR profile */
- __u8 ext_chl; /* No.of external channels */
- __u8 aot; /* Audio object type. 1=Main , 2=LC , 3=SSR, 4=SBR*/
- __u8 op_align; /* output alignment 0=16 bit , 1=MSB, 2= LSB align */
- __u8 brate_type; /* 0=CBR, 1=VBR */
- __u8 crc_check; /* crc check 0= disable, 1=enable */
- __s8 bit_stream_format[8]; /* input bit stream format adts/adif/raw */
- __u8 jstereo; /* Joint stereo Flag */
- __u8 sbr_present; /* 1 = SBR Present, 0 = SBR absent, for RAW */
- __u8 downsample; /* 1 = Downsampling ON, 0 = Downsampling OFF */
- __u8 num_syntc_elems; /* 1- Mono/stereo, 0 - Dual Mono, 0 - for raw */
- __s8 syntc_id[2]; /* 0 for ID_SCE(Dula Mono), -1 for raw */
- __s8 syntc_tag[2]; /* raw - -1 and 0 -16 for rest of the streams */
- __u8 pce_present; /* Flag. 1- present 0 - not present, for RAW */
- __u8 sbr_type; /* sbr_type: 0-plain aac, 1-aac-v1, 2-aac-v2 */
- __u8 outchmode; /*0- mono, 1-stereo, 2-dual mono 3-Parametric stereo */
- __u8 ps_present;
-};
-
-/* WMA Music Parameters Message */
-struct snd_wma_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate; /* Use the hard coded value. */
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u32 channel_mask; /* Channel Mask */
- __u16 format_tag; /* Format Tag */
- __u16 block_align; /* packet size */
- __u16 wma_encode_opt;/* Encoder option */
- __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
- __u8 pcm_src; /* input pcm bit width */
-};
-
-/* Pre processing param structure */
-struct snd_prp_params {
- __u32 reserved; /* No pre-processing defined yet */
-};
-
-/* Pre and post processing params structure */
-struct snd_ppp_params {
- __u8 algo_id;/* Post/Pre processing algorithm ID */
- __u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
- __u8 enable; /* 0= disable, 1= enable*/
- __u8 reserved;
- __u32 size; /*Size of parameters for all blocks*/
- void *params;
-} __attribute__ ((packed));
-
-struct snd_sst_postproc_info {
- __u32 src_min; /* Supported SRC Min sampling freq */
- __u32 src_max; /* Supported SRC Max sampling freq */
- __u8 src; /* 0=Not supported, 1=Supported */
- __u8 bass_boost; /* 0=Not Supported, 1=Supported */
- __u8 stereo_widening; /* 0=Not Supported, 1=Supported */
- __u8 volume_control; /* 0=Not Supported, 1=Supported */
- __s16 min_vol; /* Minimum value of Volume in dB */
- __s16 max_vol; /* Maximum value of Volume in dB */
- __u8 mute_control; /* 0=No Mute, 1=Mute */
- __u8 reserved1;
- __u16 reserved2;
-};
-
-/* pre processing Capability info structure */
-struct snd_sst_prp_info {
- __s16 min_vol; /* Minimum value of Volume in dB */
- __s16 max_vol; /* Maximum value of Volume in dB */
- __u8 volume_control; /* 0=Not Supported, 1=Supported */
- __u8 reserved1; /* for 32 bit alignment */
- __u16 reserved2; /* for 32 bit alignment */
-} __attribute__ ((packed));
-
-/*Pre / Post processing algorithms support*/
-struct snd_sst_ppp_info {
- __u32 src:1; /* 0=Not supported, 1=Supported */
- __u32 mixer:1; /* 0=Not supported, 1=Supported */
- __u32 volume_control:1; /* 0=Not Supported, 1=Supported */
- __u32 mute_control:1; /* 0=Not Supported, 1=Supported */
- __u32 anc:1; /* 0=Not Supported, 1=Supported */
- __u32 side_tone:1; /* 0=Not Supported, 1=Supported */
- __u32 dc_removal:1; /* 0=Not Supported, 1=Supported */
- __u32 equalizer:1; /* 0=Not Supported, 1=Supported */
- __u32 spkr_prot:1; /* 0=Not Supported, 1=Supported */
- __u32 bass_boost:1; /* 0=Not Supported, 1=Supported */
- __u32 stereo_widening:1;/* 0=Not Supported, 1=Supported */
- __u32 rsvd1:21;
- __u32 rsvd2;
-};
-
-/* Firmware capabilities info */
-struct snd_sst_fw_info {
- struct snd_sst_fw_version fw_version; /* Firmware version */
- __u8 audio_codecs_supported[8]; /* Codecs supported by FW */
- __u32 recommend_min_duration; /* Min duration for Lowpower Playback */
- __u8 max_pcm_streams_supported; /* Max num of PCM streams supported */
- __u8 max_enc_streams_supported; /* Max number of Encoded streams */
- __u16 reserved; /* 32 bit alignment*/
- struct snd_sst_ppp_info ppp_info; /* pre_processing mod cap info */
- struct snd_sst_postproc_info pop_info; /* Post processing cap info*/
- struct snd_sst_port_info port_info[3]; /* Port info */
- struct snd_sst_mix_info mix_info;/* Mixer info */
- __u32 min_input_buf; /* minmum i/p buffer for decode */
-};
-
-/* Codec params struture */
-union snd_sst_codec_params {
- struct snd_pcm_params pcm_params;
- struct snd_mp3_params mp3_params;
- struct snd_aac_params aac_params;
- struct snd_wma_params wma_params;
-};
-
-
-struct snd_sst_stream_params {
- union snd_sst_codec_params uc;
-} __attribute__ ((packed));
-
-struct snd_sst_params {
- __u32 result;
- __u32 stream_id;
- __u8 codec;
- __u8 ops;
- __u8 stream_type;
- __u8 device_type;
- struct snd_sst_stream_params sparams;
-};
-
-struct snd_sst_vol {
- __u32 stream_id;
- __s32 volume;
- __u32 ramp_duration;
- __u32 ramp_type; /* Ramp type, default=0 */
-};
-
-struct snd_sst_mute {
- __u32 stream_id;
- __u32 mute;
-};
-
-/* ioctl related stuff here */
-struct snd_sst_pmic_config {
- __u32 sfreq; /* Sampling rate in Hz */
- __u16 num_chan; /* Mono =1 or Stereo =2 */
- __u16 pcm_wd_sz; /* Number of bits per sample */
-} __attribute__ ((packed));
-
-struct snd_sst_get_stream_params {
- struct snd_sst_params codec_params;
- struct snd_sst_pmic_config pcm_params;
-};
-
-enum snd_sst_target_type {
- SND_SST_TARGET_PMIC = 1,
- SND_SST_TARGET_LPE,
- SND_SST_TARGET_MODEM,
- SND_SST_TARGET_BT,
- SND_SST_TARGET_FM,
- SND_SST_TARGET_NONE,
-};
-
-enum snd_sst_device_type {
- SND_SST_DEVICE_SSP = 1,
- SND_SST_DEVICE_PCM,
- SND_SST_DEVICE_OTHER,
-};
-
-enum snd_sst_device_mode {
-
- SND_SST_DEV_MODE_PCM_MODE1 = 1, /*(16-bit word, bit-length frame sync)*/
- SND_SST_DEV_MODE_PCM_MODE2,
- SND_SST_DEV_MODE_PCM_MODE3,
- SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED,
- SND_SST_DEV_MODE_PCM_MODE4_LEFT_JUSTIFIED,
- SND_SST_DEV_MODE_PCM_MODE4_I2S, /*(I2S mode, 16-bit words)*/
- SND_SST_DEV_MODE_PCM_MODE5,
- SND_SST_DEV_MODE_PCM_MODE6,
-};
-
-enum snd_sst_port_action {
- SND_SST_PORT_PREPARE = 1,
- SND_SST_PORT_ACTIVATE,
-};
-
-/* Target selection per device structure */
-struct snd_sst_slot_info {
- __u8 mix_enable; /* Mixer enable or disable */
- __u8 device_type;
- __u8 device_instance; /* 0, 1, 2 */
- __u8 target_device;
- __u16 target_sink;
- __u8 slot[2];
- __u8 master;
- __u8 action;
- __u8 device_mode;
- __u8 reserved;
- struct snd_sst_pmic_config pcm_params;
-} __attribute__ ((packed));
-
-#define SST_MAX_TARGET_DEVICES 3
-/* Target device list structure */
-struct snd_sst_target_device {
- __u32 device_route;
- struct snd_sst_slot_info devices[SST_MAX_TARGET_DEVICES];
-} __attribute__ ((packed));
-
-struct snd_sst_driver_info {
- __u32 version; /* Version of the driver */
- __u32 active_pcm_streams;
- __u32 active_enc_streams;
- __u32 max_pcm_streams;
- __u32 max_enc_streams;
- __u32 buf_per_stream;
-};
-
-enum snd_sst_buff_type {
- SST_BUF_USER = 1,
- SST_BUF_MMAP,
- SST_BUF_RAR,
-};
-
-struct snd_sst_mmap_buff_entry {
- unsigned int offset;
- unsigned int size;
-};
-
-struct snd_sst_mmap_buffs {
- unsigned int entries;
- enum snd_sst_buff_type type;
- struct snd_sst_mmap_buff_entry *buff;
-};
-
-struct snd_sst_buff_entry {
- void *buffer;
- unsigned int size;
-};
-
-struct snd_sst_buffs {
- unsigned int entries;
- __u8 type;
- struct snd_sst_buff_entry *buff_entry;
-};
-
-struct snd_sst_dbufs {
- unsigned long long input_bytes_consumed;
- unsigned long long output_bytes_produced;
- struct snd_sst_buffs *ibufs;
- struct snd_sst_buffs *obufs;
-};
-
-struct snd_sst_tuning_params {
- __u8 type;
- __u8 str_id;
- __u8 size;
- __u8 rsvd;
- __aligned_u64 addr;
-} __attribute__ ((packed));
-/*IOCTL defined here */
-/*SST MMF IOCTLS only */
-#define SNDRV_SST_STREAM_SET_PARAMS _IOR('L', 0x00, \
- struct snd_sst_stream_params *)
-#define SNDRV_SST_STREAM_GET_PARAMS _IOWR('L', 0x01, \
- struct snd_sst_get_stream_params *)
-#define SNDRV_SST_STREAM_GET_TSTAMP _IOWR('L', 0x02, __u64 *)
-#define SNDRV_SST_STREAM_DECODE _IOWR('L', 0x03, struct snd_sst_dbufs *)
-#define SNDRV_SST_STREAM_BYTES_DECODED _IOWR('L', 0x04, __u64 *)
-#define SNDRV_SST_STREAM_START _IO('A', 0x42)
-#define SNDRV_SST_STREAM_DROP _IO('A', 0x43)
-#define SNDRV_SST_STREAM_DRAIN _IO('A', 0x44)
-#define SNDRV_SST_STREAM_PAUSE _IOW('A', 0x45, int)
-#define SNDRV_SST_STREAM_RESUME _IO('A', 0x47)
-#define SNDRV_SST_MMAP_PLAY _IOW('L', 0x05, struct snd_sst_mmap_buffs *)
-#define SNDRV_SST_MMAP_CAPTURE _IOW('L', 0x06, struct snd_sst_mmap_buffs *)
-/*SST common ioctls */
-#define SNDRV_SST_DRIVER_INFO _IOR('L', 0x10, struct snd_sst_driver_info *)
-#define SNDRV_SST_SET_VOL _IOW('L', 0x11, struct snd_sst_vol *)
-#define SNDRV_SST_GET_VOL _IOW('L', 0x12, struct snd_sst_vol *)
-#define SNDRV_SST_MUTE _IOW('L', 0x13, struct snd_sst_mute *)
-/*AM Ioctly only */
-#define SNDRV_SST_FW_INFO _IOR('L', 0x20, struct snd_sst_fw_info *)
-#define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
- struct snd_sst_target_device *)
-/*DSP Ioctls on /dev/intel_sst_ctrl only*/
-#define SNDRV_SST_SET_ALGO _IOW('L', 0x30, struct snd_ppp_params *)
-#define SNDRV_SST_GET_ALGO _IOWR('L', 0x31, struct snd_ppp_params *)
-#define SNDRV_SST_TUNING_PARAMS _IOW('L', 0x32, struct snd_sst_tuning_params *)
-
-#endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ipc.c b/drivers/staging/intel_sst/intel_sst_ipc.c
deleted file mode 100644
index 5c3444f6ab41..000000000000
--- a/drivers/staging/intel_sst/intel_sst_ipc.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * intel_sst_ipc.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all ipc functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_send_sound_card_type - send sound card type
- *
- * this function sends the sound card type to sst dsp engine
- */
-static void sst_send_sound_card_type(void)
-{
- struct ipc_post *msg = NULL;
-
- if (sst_create_short_msg(&msg))
- return;
-
- sst_fill_header(&msg->header, IPC_IA_SET_PMIC_TYPE, 0, 0);
- msg->header.part.data = sst_drv_ctx->pmic_vendor;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return;
-}
-
-/**
-* sst_post_message - Posts message to SST
-*
-* @work: Pointer to work structure
-*
-* This function is called by any component in driver which
-* wants to send an IPC message. This will post message only if
-* busy bit is free
-*/
-void sst_post_message(struct work_struct *work)
-{
- struct ipc_post *msg;
- union ipc_header header;
- union interrupt_reg imr;
- int retval = 0;
- imr.full = 0;
-
- /*To check if LPE is in stalled state.*/
- retval = sst_stalled();
- if (retval < 0) {
- pr_err("in stalled state\n");
- return;
- }
- pr_debug("post message called\n");
- spin_lock(&sst_drv_ctx->list_spin_lock);
-
- /* check list */
- if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
- /* list is empty, mask imr */
- pr_debug("Empty msg queue... masking\n");
- imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
- imr.part.done_interrupt = 1;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- return;
- }
-
- /* check busy bit */
- header.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCX);
- if (header.part.busy) {
- /* busy, unmask */
- pr_debug("Busy not free... unmasking\n");
- imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
- imr.part.done_interrupt = 0;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- return;
- }
- /* copy msg from list */
- msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
- struct ipc_post, node);
- list_del(&msg->node);
- pr_debug("Post message: header = %x\n", msg->header.full);
- pr_debug("size: = %x\n", msg->header.part.data);
- if (msg->header.part.large)
- memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
- msg->mailbox_data, msg->header.part.data);
- /* dummy register for shim workaround */
-
- sst_shim_write(sst_drv_ctx->shim, SST_IPCX, msg->header.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
-
- kfree(msg->mailbox_data);
- kfree(msg);
- return;
-}
-
-/*
- * sst_clear_interrupt - clear the SST FW interrupt
- *
- * This function clears the interrupt register after the interrupt
- * bottom half is complete allowing next interrupt to arrive
- */
-void sst_clear_interrupt(void)
-{
- union interrupt_reg isr;
- union interrupt_reg imr;
- union ipc_header clear_ipc;
-
- imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
- isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
- /* write 1 to clear */;
- isr.part.busy_interrupt = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
- /* Set IA done bit */
- clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCD);
- clear_ipc.part.busy = 0;
- clear_ipc.part.done = 1;
- clear_ipc.part.data = IPC_ACK_SUCCESS;
- sst_shim_write(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
- /* un mask busy interrupt */
- imr.part.busy_interrupt = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
-}
-
-void sst_restore_fw_context(void)
-{
- struct snd_sst_ctxt_params fw_context;
- struct ipc_post *msg = NULL;
-
- pr_debug("restore_fw_context\n");
- /*check cpu type*/
- if (sst_drv_ctx->pci_id != SST_MFLD_PCI_ID)
- return;
- /*not supported for rest*/
- if (!sst_drv_ctx->fw_cntx_size)
- return;
- /*nothing to restore*/
- pr_debug("restoring context......\n");
- /*send msg to fw*/
- if (sst_create_large_msg(&msg))
- return;
-
- sst_fill_header(&msg->header, IPC_IA_SET_FW_CTXT, 1, 0);
- msg->header.part.data = sizeof(fw_context) + sizeof(u32);
- fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
- fw_context.size = sst_drv_ctx->fw_cntx_size;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32),
- &fw_context, sizeof(fw_context));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return;
-}
-/*
- * process_fw_init - process the FW init msg
- *
- * @msg: IPC message from FW
- *
- * This function processes the FW init msg from FW
- * marks FW state and prints debug info of loaded FW
- */
-int process_fw_init(struct sst_ipc_msg_wq *msg)
-{
- struct ipc_header_fw_init *init =
- (struct ipc_header_fw_init *)msg->mailbox;
- int retval = 0;
-
- pr_debug("*** FW Init msg came***\n");
- if (init->result) {
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_ERROR;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("FW Init failed, Error %x\n", init->result);
- pr_err("FW Init failed, Error %x\n", init->result);
- retval = -init->result;
- return retval;
- }
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- sst_send_sound_card_type();
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- sst_drv_ctx->lpe_stalled = 0;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("FW Version %02x.%02x.%02x\n", init->fw_version.major,
- init->fw_version.minor, init->fw_version.build);
- pr_debug("Build Type %x\n", init->fw_version.type);
- pr_debug(" Build date %s Time %s\n",
- init->build_info.date, init->build_info.time);
- sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, retval, NULL);
- sst_restore_fw_context();
- return retval;
-}
-/**
-* sst_process_message - Processes message from SST
-*
-* @work: Pointer to work structure
-*
-* This function is scheduled by ISR
-* It take a msg from process_queue and does action based on msg
-*/
-void sst_process_message(struct work_struct *work)
-{
- struct sst_ipc_msg_wq *msg =
- container_of(work, struct sst_ipc_msg_wq, wq);
- int str_id = msg->header.part.str_id;
-
- pr_debug("IPC process for %x\n", msg->header.full);
-
- /* based on msg in list call respective handler */
- switch (msg->header.part.msg_id) {
- case IPC_SST_BUF_UNDER_RUN:
- case IPC_SST_BUF_OVER_RUN:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_err("Buffer under/overrun for %d\n",
- msg->header.part.str_id);
- pr_err("Got Underrun & not to send data...ignore\n");
- break;
-
- case IPC_SST_GET_PLAY_FRAMES:
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- struct stream_info *stream ;
-
- if (sst_validate_strid(str_id)) {
- pr_err("strid %d invalid\n", str_id);
- break;
- }
- /* call sst_play_frame */
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst_play_frames for %d\n",
- msg->header.part.str_id);
- mutex_lock(&sst_drv_ctx->streams[str_id].lock);
- sst_play_frame(msg->header.part.str_id);
- mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
- break;
- } else
- pr_err("sst_play_frames for Penwell!!\n");
-
- case IPC_SST_GET_CAPT_FRAMES:
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- struct stream_info *stream;
- /* call sst_capture_frame */
- if (sst_validate_strid(str_id)) {
- pr_err("str id %d invalid\n", str_id);
- break;
- }
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst_capture_frames for %d\n",
- msg->header.part.str_id);
- mutex_lock(&stream->lock);
- if (stream->mmapped == false &&
- stream->src == SST_DRV) {
- pr_debug("waking up block for copy.\n");
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
- } else
- sst_capture_frame(msg->header.part.str_id);
- mutex_unlock(&stream->lock);
- } else
- pr_err("sst_play_frames for Penwell!!\n");
- break;
-
- case IPC_IA_PRINT_STRING:
- pr_debug("been asked to print something by fw\n");
- /* TBD */
- break;
-
- case IPC_IA_FW_INIT_CMPLT: {
- /* send next data to FW */
- process_fw_init(msg);
- break;
- }
-
- case IPC_SST_STREAM_PROCESS_FATAL_ERR:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_err("codec fatal error %x stream %d...\n",
- msg->header.full, msg->header.part.str_id);
- pr_err("Dropping the stream\n");
- sst_drop_stream(msg->header.part.str_id);
- break;
- case IPC_IA_LPE_GETTING_STALLED:
- sst_drv_ctx->lpe_stalled = 1;
- break;
- case IPC_IA_LPE_UNSTALLED:
- sst_drv_ctx->lpe_stalled = 0;
- break;
- default:
- /* Illegal case */
- pr_err("Unhandled msg %x header %x\n",
- msg->header.part.msg_id, msg->header.full);
- }
- sst_clear_interrupt();
- return;
-}
-
-/**
-* sst_process_reply - Processes reply message from SST
-*
-* @work: Pointer to work structure
-*
-* This function is scheduled by ISR
-* It take a reply msg from response_queue and
-* does action based on msg
-*/
-void sst_process_reply(struct work_struct *work)
-{
- struct sst_ipc_msg_wq *msg =
- container_of(work, struct sst_ipc_msg_wq, wq);
-
- int str_id = msg->header.part.str_id;
- struct stream_info *str_info;
-
- switch (msg->header.part.msg_id) {
- case IPC_IA_TARGET_DEV_SELECT:
- if (!msg->header.part.data) {
- sst_drv_ctx->tgt_dev_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->tgt_dev_blk.ret_code =
- -msg->header.part.data;
- }
-
- if (sst_drv_ctx->tgt_dev_blk.on == true) {
- sst_drv_ctx->tgt_dev_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ALG_PARAMS: {
- pr_debug("sst:IPC_ALG_PARAMS response %x\n", msg->header.full);
- pr_debug("sst: data value %x\n", msg->header.part.data);
- pr_debug("sst: large value %x\n", msg->header.part.large);
-
- if (!msg->header.part.large) {
- if (!msg->header.part.data) {
- pr_debug("sst: alg set success\n");
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- } else {
- pr_debug("sst: alg set failed\n");
- sst_drv_ctx->ppp_params_blk.ret_code =
- -msg->header.part.data;
- }
-
- } else if (msg->header.part.data) {
- struct snd_ppp_params *mailbox_params, *get_params;
- char *params;
-
- pr_debug("sst: alg get success\n");
- mailbox_params = (struct snd_ppp_params *)msg->mailbox;
- get_params = kzalloc(sizeof(*get_params), GFP_KERNEL);
- if (get_params == NULL) {
- pr_err("sst: out of memory for ALG PARAMS");
- break;
- }
- memcpy_fromio(get_params, mailbox_params,
- sizeof(*get_params));
- get_params->params = kzalloc(mailbox_params->size,
- GFP_KERNEL);
- if (get_params->params == NULL) {
- kfree(get_params);
- pr_err("sst: out of memory for ALG PARAMS block");
- break;
- }
- params = msg->mailbox;
- params = params + sizeof(*mailbox_params) - sizeof(u32);
- memcpy_fromio(get_params->params, params,
- get_params->size);
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- sst_drv_ctx->ppp_params_blk.data = get_params;
- }
-
- if (sst_drv_ctx->ppp_params_blk.on == true) {
- sst_drv_ctx->ppp_params_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- }
-
- case IPC_IA_TUNING_PARAMS: {
- pr_debug("sst:IPC_TUNING_PARAMS resp: %x\n", msg->header.full);
- pr_debug("data value %x\n", msg->header.part.data);
- if (msg->header.part.large) {
- pr_debug("alg set failed\n");
- sst_drv_ctx->ppp_params_blk.ret_code =
- -msg->header.part.data;
- } else {
- pr_debug("alg set success\n");
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- }
- if (sst_drv_ctx->ppp_params_blk.on == true) {
- sst_drv_ctx->ppp_params_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- }
-
- case IPC_IA_GET_FW_INFO: {
- struct snd_sst_fw_info *fw_info =
- (struct snd_sst_fw_info *)msg->mailbox;
- if (msg->header.part.large) {
- int major = fw_info->fw_version.major;
- int minor = fw_info->fw_version.minor;
- int build = fw_info->fw_version.build;
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
- major, minor, build);
- memcpy_fromio(sst_drv_ctx->fw_info_blk.data,
- ((struct snd_sst_fw_info *)(msg->mailbox)),
- sizeof(struct snd_sst_fw_info));
- sst_drv_ctx->fw_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->fw_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->fw_info_blk.on == true) {
- pr_debug("Memcopy succeeded\n");
- sst_drv_ctx->fw_info_blk.on = false;
- sst_drv_ctx->fw_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- }
- case IPC_IA_SET_STREAM_MUTE:
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- sst_drv_ctx->mute_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->mute_info_blk.ret_code =
- -msg->header.part.data;
-
- }
- if (sst_drv_ctx->mute_info_blk.on == true) {
- sst_drv_ctx->mute_info_blk.on = false;
- sst_drv_ctx->mute_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_SET_STREAM_VOL:
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- sst_drv_ctx->vol_info_blk.ret_code =
- -msg->header.part.data;
-
- }
-
- if (sst_drv_ctx->vol_info_blk.on == true) {
- sst_drv_ctx->vol_info_blk.on = false;
- sst_drv_ctx->vol_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_GET_STREAM_VOL:
- if (msg->header.part.large) {
- pr_debug("Large Msg Received Successfully\n");
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
- (void *) msg->mailbox,
- sizeof(struct snd_sst_vol));
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->vol_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->vol_info_blk.on == true) {
- sst_drv_ctx->vol_info_blk.on = false;
- sst_drv_ctx->vol_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_GET_STREAM_PARAMS:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- pr_debug("Get stream large success\n");
- memcpy_fromio(str_info->ctrl_blk.data,
- ((void *)(msg->mailbox)),
- sizeof(struct snd_sst_fw_get_stream_params));
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_DECODE_FRAMES:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- memcpy_fromio(str_info->data_blk.data,
- ((void *)(msg->mailbox)),
- sizeof(struct snd_sst_decode_info));
- str_info->data_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->data_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->data_blk.on == true) {
- str_info->data_blk.on = false;
- str_info->data_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_DRAIN_STREAM:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- str_info->ctrl_blk.ret_code = 0;
-
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
-
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->data_blk.on == true) {
- str_info->data_blk.on = false;
- str_info->data_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_DROP_STREAM:
- if (sst_validate_strid(str_id)) {
- pr_err("str id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- struct snd_sst_drop_response *drop_resp =
- (struct snd_sst_drop_response *)msg->mailbox;
-
- pr_debug("Drop ret bytes %x\n", drop_resp->bytes);
-
- str_info->curr_bytes = drop_resp->bytes;
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ENABLE_RX_TIME_SLOT:
- if (!msg->header.part.data) {
- pr_debug("RX_TIME_SLOT success\n");
- sst_drv_ctx->hs_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- sst_drv_ctx->hs_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->hs_info_blk.on == true) {
- sst_drv_ctx->hs_info_blk.on = false;
- sst_drv_ctx->hs_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_PAUSE_STREAM:
- case IPC_IA_RESUME_STREAM:
- case IPC_IA_SET_STREAM_PARAMS:
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (sst_validate_strid(str_id)) {
- pr_err(" stream id %d invalid\n", str_id);
- break;
- }
-
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_FREE_STREAM:
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Stream %d freed\n", str_id);
- } else {
- pr_err("Free for %d ret error %x\n",
- str_id, msg->header.part.data);
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ALLOC_STREAM: {
- /* map to stream, call play */
- struct snd_sst_alloc_response *resp =
- (struct snd_sst_alloc_response *)msg->mailbox;
- if (resp->str_type.result)
- pr_err("error alloc stream = %x\n",
- resp->str_type.result);
- sst_alloc_stream_response(str_id, resp);
- break;
- }
-
- case IPC_IA_PLAY_FRAMES:
- case IPC_IA_CAPT_FRAMES:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_debug("Ack for play/capt frames received\n");
- break;
-
- case IPC_IA_PREP_LIB_DNLD: {
- struct snd_sst_str_type *str_type =
- (struct snd_sst_str_type *)msg->mailbox;
- pr_debug("Prep Lib download %x\n",
- msg->header.part.msg_id);
- if (str_type->result)
- pr_err("Prep lib download %x\n", str_type->result);
- else
- pr_debug("Can download codec now...\n");
- sst_wake_up_alloc_block(sst_drv_ctx, str_id,
- str_type->result, NULL);
- break;
- }
-
- case IPC_IA_LIB_DNLD_CMPLT: {
- struct snd_sst_lib_download_info *resp =
- (struct snd_sst_lib_download_info *)msg->mailbox;
- int retval = resp->result;
-
- pr_debug("Lib downloaded %x\n", msg->header.part.msg_id);
- if (resp->result) {
- pr_err("err in lib dload %x\n", resp->result);
- } else {
- pr_debug("Codec download complete...\n");
- pr_debug("codec Type %d Ver %d Built %s: %s\n",
- resp->dload_lib.lib_info.lib_type,
- resp->dload_lib.lib_info.lib_version,
- resp->dload_lib.lib_info.b_date,
- resp->dload_lib.lib_info.b_time);
- }
- sst_wake_up_alloc_block(sst_drv_ctx, str_id,
- retval, NULL);
- break;
- }
-
- case IPC_IA_GET_FW_VERSION: {
- struct ipc_header_fw_init *version =
- (struct ipc_header_fw_init *)msg->mailbox;
- int major = version->fw_version.major;
- int minor = version->fw_version.minor;
- int build = version->fw_version.build;
- dev_info(&sst_drv_ctx->pci->dev,
- "INFO: ***LOADED SST FW VERSION*** = %02d.%02d.%02d\n",
- major, minor, build);
- break;
- }
- case IPC_IA_GET_FW_BUILD_INF: {
- struct sst_fw_build_info *build =
- (struct sst_fw_build_info *)msg->mailbox;
- pr_debug("Build date:%sTime:%s", build->date, build->time);
- break;
- }
- case IPC_IA_SET_PMIC_TYPE:
- break;
- case IPC_IA_START_STREAM:
- pr_debug("reply for START STREAM %x\n", msg->header.full);
- break;
-
- case IPC_IA_GET_FW_CTXT:
- pr_debug("reply for get fw ctxt %x\n", msg->header.full);
- if (msg->header.part.data)
- sst_drv_ctx->fw_cntx_size = 0;
- else
- sst_drv_ctx->fw_cntx_size = *sst_drv_ctx->fw_cntx;
- pr_debug("fw copied data %x\n", sst_drv_ctx->fw_cntx_size);
- sst_wake_up_alloc_block(
- sst_drv_ctx, str_id, msg->header.part.data, NULL);
- break;
- default:
- /* Illegal case */
- pr_err("process reply:default = %x\n", msg->header.full);
- }
- sst_clear_interrupt();
- return;
-}
diff --git a/drivers/staging/intel_sst/intel_sst_pvt.c b/drivers/staging/intel_sst/intel_sst_pvt.c
deleted file mode 100644
index e034bea56f14..000000000000
--- a/drivers/staging/intel_sst/intel_sst_pvt.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * intel_sst_pvt.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all private functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_get_block_stream - get a new block stream
- *
- * @sst_drv_ctx: Driver context structure
- *
- * This function assigns a block for the calls that dont have stream context yet
- * the blocks are used for waiting on Firmware's response for any operation
- * Should be called with stream lock held
- */
-int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx)
-{
- int i;
-
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- if (sst_drv_ctx->alloc_block[i].sst_id == BLOCK_UNINIT) {
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- sst_drv_ctx->alloc_block[i].ops_block.ret_code = 0;
- sst_drv_ctx->alloc_block[i].sst_id = 0;
- break;
- }
- }
- if (i == MAX_ACTIVE_STREAM) {
- pr_err("max alloc_stream reached\n");
- i = -EBUSY; /* active stream limit reached */
- }
- return i;
-}
-
-/*
- * sst_wait_interruptible - wait on event
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- *
- * This function waits without a timeout (and is interruptable) for a
- * given block event
- */
-int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block)
-{
- int retval = 0;
-
- if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
- block->condition)) {
- /* event wake */
- if (block->ret_code < 0) {
- pr_err("stream failed %d\n", block->ret_code);
- retval = -EBUSY;
- } else {
- pr_debug("event up\n");
- retval = 0;
- }
- } else {
- pr_err("signal interrupted\n");
- retval = -EINTR;
- }
- return retval;
-
-}
-
-
-/*
- * sst_wait_interruptible_timeout - wait on event interruptable
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- * @timeout: time for wait on
- *
- * This function waits with a timeout value (and is interruptible) on a
- * given block event
- */
-int sst_wait_interruptible_timeout(
- struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block, int timeout)
-{
- int retval = 0;
-
- pr_debug("sst_wait_interruptible_timeout - waiting....\n");
- if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
- block->condition,
- msecs_to_jiffies(timeout))) {
- if (block->ret_code < 0)
- pr_err("stream failed %d\n", block->ret_code);
- else
- pr_debug("event up\n");
- retval = block->ret_code;
- } else {
- block->on = false;
- pr_err("timeout occurred...\n");
- /*setting firmware state as uninit so that the
- firmware will get re-downloaded on next request
- this is because firmare not responding for 5 sec
- is equalant to some unrecoverable error of FW
- sst_drv_ctx->sst_state = SST_UN_INIT;*/
- retval = -EBUSY;
- }
- return retval;
-
-}
-
-
-/*
- * sst_wait_timeout - wait on event for timeout
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- *
- * This function waits with a timeout value (and is not interruptible) on a
- * given block event
- */
-int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct stream_alloc_block *block)
-{
- int retval = 0;
-
- /* NOTE:
- Observed that FW processes the alloc msg and replies even
- before the alloc thread has finished execution */
- pr_debug("waiting for %x, condition %x\n",
- block->sst_id, block->ops_block.condition);
- if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
- block->ops_block.condition,
- msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
- /* event wake */
- pr_debug("Event wake %x\n", block->ops_block.condition);
- pr_debug("message ret: %d\n", block->ops_block.ret_code);
- retval = block->ops_block.ret_code;
- } else {
- block->ops_block.on = false;
- pr_err("Wait timed-out %x\n", block->ops_block.condition);
- /* settign firmware state as uninit so that the
- firmware will get redownloaded on next request
- this is because firmare not responding for 5 sec
- is equalant to some unrecoverable error of FW
- sst_drv_ctx->sst_state = SST_UN_INIT;*/
- retval = -EBUSY;
- }
- return retval;
-
-}
-
-/*
- * sst_create_large_msg - create a large IPC message
- *
- * @arg: ipc message
- *
- * this function allocates structures to send a large message to the firmware
- */
-int sst_create_large_msg(struct ipc_post **arg)
-{
- struct ipc_post *msg;
-
- msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
- if (!msg) {
- pr_err("kzalloc msg failed\n");
- return -ENOMEM;
- }
-
- msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
- if (!msg->mailbox_data) {
- kfree(msg);
- pr_err("kzalloc mailbox_data failed");
- return -ENOMEM;
- }
- *arg = msg;
- return 0;
-}
-
-/*
- * sst_create_short_msg - create a short IPC message
- *
- * @arg: ipc message
- *
- * this function allocates structures to send a short message to the firmware
- */
-int sst_create_short_msg(struct ipc_post **arg)
-{
- struct ipc_post *msg;
-
- msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg) {
- pr_err("kzalloc msg failed\n");
- return -ENOMEM;
- }
- msg->mailbox_data = NULL;
- *arg = msg;
- return 0;
-}
-
-/*
- * sst_clean_stream - clean the stream context
- *
- * @stream: stream structure
- *
- * this function resets the stream contexts
- * should be called in free
- */
-void sst_clean_stream(struct stream_info *stream)
-{
- struct sst_stream_bufs *bufs = NULL, *_bufs;
- stream->status = STREAM_UN_INIT;
- stream->prev = STREAM_UN_INIT;
- mutex_lock(&stream->lock);
- list_for_each_entry_safe(bufs, _bufs, &stream->bufs, node) {
- list_del(&bufs->node);
- kfree(bufs);
- }
- mutex_unlock(&stream->lock);
-
- if (stream->ops != STREAM_OPS_PLAYBACK_DRM)
- kfree(stream->decode_ibuf);
-}
-
-/*
- * sst_wake_up_alloc_block - wake up waiting block
- *
- * @sst_drv_ctx: Driver context
- * @sst_id: stream id
- * @status: status of wakeup
- * @data: data pointer of wakeup
- *
- * This function wakes up a sleeping block event based on the response
- */
-void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
- u8 sst_id, int status, void *data)
-{
- int i;
-
- /* Unblock with retval code */
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- if (sst_id == sst_drv_ctx->alloc_block[i].sst_id) {
- sst_drv_ctx->alloc_block[i].ops_block.condition = true;
- sst_drv_ctx->alloc_block[i].ops_block.ret_code = status;
- sst_drv_ctx->alloc_block[i].ops_block.data = data;
- wake_up(&sst_drv_ctx->wait_queue);
- break;
- }
- }
-}
-
-/*
- * sst_enable_rx_timeslot - Send msg to query for stream parameters
- * @status: rx timeslot to be enabled
- *
- * This function is called when the RX timeslot is required to be enabled
- */
-int sst_enable_rx_timeslot(int status)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- if (sst_create_short_msg(&msg)) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- pr_debug("ipc message sending: ENABLE_RX_TIME_SLOT\n");
- sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
- msg->header.part.data = status;
- sst_drv_ctx->hs_info_blk.condition = false;
- sst_drv_ctx->hs_info_blk.ret_code = 0;
- sst_drv_ctx->hs_info_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->hs_info_blk, SST_BLOCK_TIMEOUT);
- return retval;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_stream.c b/drivers/staging/intel_sst/intel_sst_stream.c
deleted file mode 100644
index be4565e74f8c..000000000000
--- a/drivers/staging/intel_sst/intel_sst_stream.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * intel_sst_stream.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the stream operations of SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include "intel_sst_ioctl.h"
-#include "intel_sst.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_check_device_type - Check the medfield device type
- *
- * @device: Device to be checked
- * @num_ch: Number of channels queried
- * @pcm_slot: slot to be enabled for this device
- *
- * This checks the deivce against the map and calculates pcm_slot value
- */
-int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
-{
- if (device >= MAX_NUM_STREAMS_MFLD) {
- pr_debug("device type invalid %d\n", device);
- return -EINVAL;
- }
- if (sst_drv_ctx->streams[device].status == STREAM_UN_INIT) {
- if (device == SND_SST_DEVICE_VIBRA && num_chan == 1)
- *pcm_slot = 0x10;
- else if (device == SND_SST_DEVICE_HAPTIC && num_chan == 1)
- *pcm_slot = 0x20;
- else if (device == SND_SST_DEVICE_IHF && num_chan == 1)
- *pcm_slot = 0x04;
- else if (device == SND_SST_DEVICE_IHF && num_chan == 2)
- *pcm_slot = 0x0C;
- else if (device == SND_SST_DEVICE_HEADSET && num_chan == 1)
- *pcm_slot = 0x01;
- else if (device == SND_SST_DEVICE_HEADSET && num_chan == 2)
- *pcm_slot = 0x03;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 1)
- *pcm_slot = 0x01;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 2)
- *pcm_slot = 0x03;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 3)
- *pcm_slot = 0x07;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 4)
- *pcm_slot = 0x0F;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan > 4)
- *pcm_slot = 0x1F;
- else {
- pr_debug("No condition satisfied.. ret err\n");
- return -EINVAL;
- }
- } else {
- pr_debug("this stream state is not uni-init, is %d\n",
- sst_drv_ctx->streams[device].status);
- return -EBADRQC;
- }
- pr_debug("returning slot %x\n", *pcm_slot);
- return 0;
-}
-/**
- * get_mrst_stream_id - gets a new stream id for use
- *
- * This functions searches the current streams and allocated an empty stream
- * lock stream_lock required to be held before calling this
- */
-static unsigned int get_mrst_stream_id(void)
-{
- int i;
-
- for (i = 1; i <= MAX_NUM_STREAMS_MRST; i++) {
- if (sst_drv_ctx->streams[i].status == STREAM_UN_INIT)
- return i;
- }
- pr_debug("Didn't find empty stream for mrst\n");
- return -EBUSY;
-}
-
-/**
- * sst_alloc_stream - Send msg for a new stream ID
- *
- * @params: stream params
- * @stream_ops: operation of stream PB/capture
- * @codec: codec for stream
- * @device: device stream to be allocated for
- *
- * This function is called by any function which wants to start
- * a new stream. This also check if a stream exists which is idle
- * it initializes idle stream id to this request
- */
-int sst_alloc_stream(char *params, unsigned int stream_ops,
- u8 codec, unsigned int device)
-{
- struct ipc_post *msg = NULL;
- struct snd_sst_alloc_params alloc_param;
- unsigned int pcm_slot = 0, num_ch;
- int str_id;
- struct snd_sst_stream_params *sparams;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:entering sst_alloc_stream\n");
- pr_debug("SST DBG:%d %d %d\n", stream_ops, codec, device);
-
- BUG_ON(!params);
- sparams = (struct snd_sst_stream_params *)params;
- num_ch = sparams->uc.pcm_params.num_chan;
- /*check the device type*/
- if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
- if (sst_check_device_type(device, num_ch, &pcm_slot))
- return -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- str_id = device;
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("SST_DBG: slot %x\n", pcm_slot);
- } else {
- mutex_lock(&sst_drv_ctx->stream_lock);
- str_id = get_mrst_stream_id();
- mutex_unlock(&sst_drv_ctx->stream_lock);
- if (str_id <= 0)
- return -EBUSY;
- }
- /*allocate device type context*/
- sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
- str_id, stream_ops, pcm_slot, device);
- /* send msg to FW to allocate a stream */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
- msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
- alloc_param.str_type.codec_type = codec;
- alloc_param.str_type.str_type = SST_STREAM_TYPE_MUSIC;
- alloc_param.str_type.operation = stream_ops;
- alloc_param.str_type.protected_str = 0; /* non drm */
- alloc_param.str_type.time_slots = pcm_slot;
- alloc_param.str_type.result = alloc_param.str_type.reserved = 0;
- memcpy(&alloc_param.stream_params, params,
- sizeof(struct snd_sst_stream_params));
-
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
- sizeof(alloc_param));
- str_info = &sst_drv_ctx->streams[str_id];
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("SST DBG:alloc stream done\n");
- return str_id;
-}
-
-
-/*
- * sst_alloc_stream_response - process alloc reply
- *
- * @str_id: stream id for which the stream has been allocated
- * @resp the stream response from firware
- *
- * This function is called by firmware as a response to stream allcoation
- * request
- */
-int sst_alloc_stream_response(unsigned int str_id,
- struct snd_sst_alloc_response *resp)
-{
- int retval = 0;
- struct stream_info *str_info;
- struct snd_sst_lib_download *lib_dnld;
-
- pr_debug("SST DEBUG: stream number given = %d\n", str_id);
- str_info = &sst_drv_ctx->streams[str_id];
- if (resp->str_type.result == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
- lib_dnld = kzalloc(sizeof(*lib_dnld), GFP_KERNEL);
- memcpy(lib_dnld, &resp->lib_dnld, sizeof(*lib_dnld));
- } else
- lib_dnld = NULL;
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.data = lib_dnld;
- str_info->ctrl_blk.condition = true;
- str_info->ctrl_blk.ret_code = resp->str_type.result;
- pr_debug("SST DEBUG: sst_alloc_stream_response: waking up.\n");
- wake_up(&sst_drv_ctx->wait_queue);
- }
- return retval;
-}
-
-
-/**
-* sst_get_fw_info - Send msg to query for firmware configurations
-* @info: out param that holds the firmare configurations
-*
-* This function is called when the firmware configurations are queiried for
-*/
-int sst_get_fw_info(struct snd_sst_fw_info *info)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("SST DBG:sst_get_fw_info called\n");
-
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: message creation failed\n");
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_GET_FW_INFO, 0, 0);
- sst_drv_ctx->fw_info_blk.condition = false;
- sst_drv_ctx->fw_info_blk.ret_code = 0;
- sst_drv_ctx->fw_info_blk.on = true;
- sst_drv_ctx->fw_info_blk.data = info;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->fw_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("SST ERR: error in fw_info = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-
-/**
-* sst_pause_stream - Send msg for a pausing stream
-* @str_id: stream ID
-*
-* This function is called by any function which wants to pause
-* an already running stream.
-*/
-int sst_start_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("sst_start_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_INIT)
- return -EBADRQC;
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_START_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return retval;
-}
-
-/*
- * sst_pause_stream - Send msg for a pausing stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to pause
- * an already running stream.
- */
-int sst_pause_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status == STREAM_PAUSED)
- return 0;
- if (str_info->status == STREAM_RUNNING ||
- str_info->status == STREAM_INIT) {
- if (str_info->prev == STREAM_UN_INIT)
- return -EBADRQC;
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path is in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval == 0) {
- str_info->prev = str_info->status;
- str_info->status = STREAM_PAUSED;
- } else if (retval == SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
-
- return retval;
-}
-
-/**
- * sst_resume_stream - Send msg for resuming stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to resume
- * an already paused stream.
- */
-int sst_resume_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status == STREAM_RUNNING)
- return 0;
- if (str_info->status == STREAM_PAUSED) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (!retval) {
- if (str_info->prev == STREAM_RUNNING)
- str_info->status = STREAM_RUNNING;
- else
- str_info->status = STREAM_INIT;
- str_info->prev = STREAM_PAUSED;
- } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
-
- return retval;
-}
-
-
-/**
- * sst_drop_stream - Send msg for stopping stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to stop
- * a stream.
- */
-int sst_drop_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_stream_bufs *bufs = NULL, *_bufs;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_UN_INIT &&
- str_info->status != STREAM_DECODE) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DROP_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (!retval) {
- pr_debug("SST DBG:drop success\n");
- str_info->prev = STREAM_UN_INIT;
- str_info->status = STREAM_INIT;
- if (str_info->src != MAD_DRV) {
- mutex_lock(&str_info->lock);
- list_for_each_entry_safe(bufs, _bufs,
- &str_info->bufs, node) {
- list_del(&bufs->node);
- kfree(bufs);
- }
- mutex_unlock(&str_info->lock);
- }
- str_info->cumm_bytes += str_info->curr_bytes;
- } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- if (str_info->data_blk.on == true) {
- str_info->data_blk.condition = true;
- str_info->data_blk.ret_code = retval;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
- return retval;
-}
-
-/**
-* sst_drain_stream - Send msg for draining stream
-* @str_id: stream ID
-*
-* This function is called by any function which wants to drain
-* a stream.
-*/
-int sst_drain_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_RUNNING &&
- str_info->status != STREAM_INIT &&
- str_info->status != STREAM_PAUSED) {
- pr_err("SST ERR: BADQRC for stream = %d\n",
- str_info->status);
- return -EBADRQC;
- }
-
- if (str_info->status == STREAM_INIT) {
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- } else
- str_info->need_draining = true;
- str_info->data_blk.condition = false;
- str_info->data_blk.ret_code = 0;
- str_info->data_blk.on = true;
- retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
- str_info->need_draining = false;
- return retval;
-}
-
-/**
- * sst_free_stream - Frees a stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to free
- * a stream.
- */
-int sst_free_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_UN_INIT) {
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- str_info->prev = str_info->status;
- str_info->status = STREAM_UN_INIT;
- if (str_info->data_blk.on == true) {
- str_info->data_blk.condition = true;
- str_info->data_blk.ret_code = 0;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- str_info->data_blk.on = true;
- str_info->data_blk.condition = false;
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- pr_debug("wait for free returned %d\n", retval);
- msleep(100);
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("SST DBG:Stream freed\n");
- } else {
- retval = -EBADRQC;
- pr_debug("SST DBG:BADQRC for stream\n");
- }
-
- return retval;
-}
-
-
diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
deleted file mode 100644
index 2be58c5cba02..000000000000
--- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c
+++ /dev/null
@@ -1,1273 +0,0 @@
-/*
- * intel_sst_stream.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the stream operations of SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#ifdef CONFIG_MRST_RAR_HANDLER
-#include <linux/rar_register.h>
-#include "../memrar/memrar.h"
-#endif
-#include "intel_sst_ioctl.h"
-#include "intel_sst.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-/**
-* sst_get_stream_params - Send msg to query for stream parameters
-* @str_id: stream id for which the parameters are queried for
-* @get_params: out parameters to which the parameters are copied to
-*
-* This function is called when the stream parameters are queiried for
-*/
-int sst_get_stream_params(int str_id,
- struct snd_sst_get_stream_params *get_params)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
- struct snd_sst_fw_get_stream_params *fw_params;
-
- pr_debug("get_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_UN_INIT) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
- if (!fw_params) {
- pr_err("mem allocation failed\n");
- kfree(msg);
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_GET_STREAM_PARAMS,
- 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- str_info->ctrl_blk.data = (void *) fw_params;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- get_params->codec_params.result = retval;
- kfree(fw_params);
- return -EIO;
- }
- memcpy(&get_params->pcm_params, &fw_params->pcm_params,
- sizeof(fw_params->pcm_params));
- memcpy(&get_params->codec_params.sparams,
- &fw_params->codec_params,
- sizeof(fw_params->codec_params));
- get_params->codec_params.result = 0;
- get_params->codec_params.stream_id = str_id;
- get_params->codec_params.codec = str_info->codec;
- get_params->codec_params.ops = str_info->ops;
- get_params->codec_params.stream_type = str_info->str_type;
- kfree(fw_params);
- } else {
- pr_debug("Stream is not in the init state\n");
- }
- return retval;
-}
-
-/**
- * sst_set_stream_param - Send msg for setting stream parameters
- *
- * @str_id: stream id
- * @str_param: stream params
- *
- * This function sets stream params during runtime
- */
-int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- BUG_ON(!str_param);
- if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
- pr_err("Invalid operation\n");
- return -EINVAL;
- }
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- pr_debug("set_stream for %d\n", str_id);
- str_info = &sst_drv_ctx->streams[str_id];
- if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("control path in use\n");
- return -EAGAIN;
- }
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header,
- IPC_IA_SET_STREAM_PARAMS, 1, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- msg->header.part.data = sizeof(u32) +
- sizeof(str_param->sparams);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &str_param->sparams,
- sizeof(str_param->sparams));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval < 0) {
- retval = -EIO;
- sst_clean_stream(str_info);
- }
- } else {
- retval = -EBADRQC;
- pr_err("BADQRC for stream\n");
- }
- return retval;
-}
-
-/**
-* sst_get_vol - This function allows to get the premix gain or gain of a stream
-*
-* @get_vol: this is an output param through which the volume
-* structure is passed back to user
-*
-* This function is called when the premix gain or stream gain is queried for
-*/
-int sst_get_vol(struct snd_sst_vol *get_vol)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct snd_sst_vol *fw_get_vol;
- int str_id = get_vol->stream_id;
-
- pr_debug("get vol called\n");
-
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header,
- IPC_IA_GET_STREAM_VOL, 0, str_id);
- sst_drv_ctx->vol_info_blk.condition = false;
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- sst_drv_ctx->vol_info_blk.on = true;
- fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
- if (!fw_get_vol) {
- pr_err("mem allocation failed\n");
- kfree(msg);
- return -ENOMEM;
- }
- sst_drv_ctx->vol_info_blk.data = (void *)fw_get_vol;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
- if (retval)
- retval = -EIO;
- else {
- pr_debug("stream id %d\n", fw_get_vol->stream_id);
- pr_debug("volume %d\n", fw_get_vol->volume);
- pr_debug("ramp duration %d\n", fw_get_vol->ramp_duration);
- pr_debug("ramp_type %d\n", fw_get_vol->ramp_type);
- memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
- }
- return retval;
-}
-
-/**
-* sst_set_vol - This function allows to set the premix gain or gain of a stream
-*
-* @set_vol: this holds the volume structure that needs to be set
-*
-* This function is called when premix gain or stream gain is requested to be set
-*/
-int sst_set_vol(struct snd_sst_vol *set_vol)
-{
-
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("set vol called\n");
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
- set_vol->stream_id);
-
- msg->header.part.data = sizeof(u32) + sizeof(*set_vol);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), set_vol, sizeof(*set_vol));
- sst_drv_ctx->vol_info_blk.condition = false;
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- sst_drv_ctx->vol_info_blk.on = true;
- sst_drv_ctx->vol_info_blk.data = set_vol;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("error in set_vol = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-/**
-* sst_set_mute - This function sets premix mute or soft mute of a stream
-*
-* @set_mute: this holds the mute structure that needs to be set
-*
-* This function is called when premix mute or stream mute requested to be set
-*/
-int sst_set_mute(struct snd_sst_mute *set_mute)
-{
-
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("set mute called\n");
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
- set_mute->stream_id);
- sst_drv_ctx->mute_info_blk.condition = false;
- sst_drv_ctx->mute_info_blk.ret_code = 0;
- sst_drv_ctx->mute_info_blk.on = true;
- sst_drv_ctx->mute_info_blk.data = set_mute;
-
- msg->header.part.data = sizeof(u32) + sizeof(*set_mute);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), set_mute,
- sizeof(*set_mute));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("error in set_mute = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-int sst_prepare_target(struct snd_sst_slot_info *slot)
-{
- if (slot->target_device == SND_SST_TARGET_PMIC
- && slot->device_instance == 1) {
- /*music mode*/
- if (sst_drv_ctx->pmic_port_instance == 0)
- sst_drv_ctx->scard_ops->set_voice_port(
- DEACTIVATE);
- } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
- slot->target_device == SND_SST_TARGET_MODEM) &&
- slot->device_instance == 0) {
- /*voip mode where pcm0 is active*/
- if (sst_drv_ctx->pmic_port_instance == 1)
- sst_drv_ctx->scard_ops->set_audio_port(
- DEACTIVATE);
- }
- return 0;
-}
-
-int sst_activate_target(struct snd_sst_slot_info *slot)
-{
- if (slot->target_device == SND_SST_TARGET_PMIC &&
- slot->device_instance == 1) {
- /*music mode*/
- sst_drv_ctx->pmic_port_instance = 1;
- sst_drv_ctx->scard_ops->set_audio_port(ACTIVATE);
- sst_drv_ctx->scard_ops->set_pcm_audio_params(
- slot->pcm_params.sfreq,
- slot->pcm_params.pcm_wd_sz,
- slot->pcm_params.num_chan);
- if (sst_drv_ctx->pb_streams)
- sst_drv_ctx->scard_ops->power_up_pmic_pb(1);
- if (sst_drv_ctx->cp_streams)
- sst_drv_ctx->scard_ops->power_up_pmic_cp(1);
- } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
- slot->target_device == SND_SST_TARGET_MODEM) &&
- slot->device_instance == 0) {
- /*voip mode where pcm0 is active*/
- sst_drv_ctx->pmic_port_instance = 0;
- sst_drv_ctx->scard_ops->set_voice_port(
- ACTIVATE);
- sst_drv_ctx->scard_ops->power_up_pmic_pb(0);
- /*sst_drv_ctx->scard_ops->power_up_pmic_cp(0);*/
- }
- return 0;
-}
-
-int sst_parse_target(struct snd_sst_slot_info *slot)
-{
- int retval = 0;
-
- if (slot->action == SND_SST_PORT_ACTIVATE &&
- slot->device_type == SND_SST_DEVICE_PCM) {
- retval = sst_activate_target(slot);
- if (retval)
- pr_err("SST_Activate_target_fail\n");
- else
- pr_err("SST_Activate_target_pass\n");
- } else if (slot->action == SND_SST_PORT_PREPARE &&
- slot->device_type == SND_SST_DEVICE_PCM) {
- retval = sst_prepare_target(slot);
- if (retval)
- pr_err("SST_prepare_target_fail\n");
- else
- pr_err("SST_prepare_target_pass\n");
- } else {
- pr_err("slot_action : %d, device_type: %d\n",
- slot->action, slot->device_type);
- }
- return retval;
-}
-
-int sst_send_target(struct snd_sst_target_device *target)
-{
- int retval;
- struct ipc_post *msg;
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
- sst_drv_ctx->tgt_dev_blk.condition = false;
- sst_drv_ctx->tgt_dev_blk.ret_code = 0;
- sst_drv_ctx->tgt_dev_blk.on = true;
-
- msg->header.part.data = sizeof(u32) + sizeof(*target);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), target,
- sizeof(*target));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("message sent- waiting\n");
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
- if (retval)
- pr_err("target device ipc failed = 0x%x\n", retval);
- return retval;
-
-}
-
-int sst_target_device_validate(struct snd_sst_target_device *target)
-{
- int retval = 0;
- int i;
-
- for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
- if (target->devices[i].device_type == SND_SST_DEVICE_PCM) {
- /*pcm device, check params*/
- if (target->devices[i].device_instance == 1) {
- if ((target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_I2S) &&
- (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE1))
- goto err;
- } else if (target->devices[i].device_instance == 0) {
- if ((target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE2)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_I2S)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE1))
- goto err;
- if (target->devices[i].pcm_params.sfreq != 8000
- || target->devices[i].pcm_params.num_chan != 1
- || target->devices[i].pcm_params.pcm_wd_sz !=
- 16)
- goto err;
- } else {
-err:
- pr_err("i/p params incorrect\n");
- return -EINVAL;
- }
- }
- }
- return retval;
-}
-
-/**
- * sst_target_device_select - This function sets the target device configurations
- *
- * @target: this parameter holds the configurations to be set
- *
- * This function is called when the user layer wants to change the target
- * device's configurations
- */
-
-int sst_target_device_select(struct snd_sst_target_device *target)
-{
- int retval, i, prepare_count = 0;
-
- pr_debug("Target Device Select\n");
-
- if (target->device_route < 0 || target->device_route > 2) {
- pr_err("device route is invalid\n");
- return -EINVAL;
- }
-
- if (target->device_route != 0) {
- pr_err("Unsupported config\n");
- return -EIO;
- }
- retval = sst_target_device_validate(target);
- if (retval)
- return retval;
-
- retval = sst_send_target(target);
- if (retval)
- return retval;
- for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
- if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
- pr_debug("activate called in %d\n", i);
- retval = sst_parse_target(&target->devices[i]);
- if (retval)
- return retval;
- } else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
- pr_debug("PREPARE in %d, Forwarding\n", i);
- retval = sst_parse_target(&target->devices[i]);
- if (retval) {
- pr_err("Parse Target fail %d\n", retval);
- return retval;
- }
- pr_debug("Parse Target successful %d\n", retval);
- if (target->devices[i].device_type ==
- SND_SST_DEVICE_PCM)
- prepare_count++;
- }
- }
- if (target->devices[0].action == SND_SST_PORT_PREPARE &&
- prepare_count == 0)
- sst_drv_ctx->scard_ops->power_down_pmic();
-
- return retval;
-}
-#ifdef CONFIG_MRST_RAR_HANDLER
-/*This function gets the physical address of the secure memory from the handle*/
-static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
-{
- int retval = 0, rar_status = 0;
-
- rar_status = rar_handle_to_bus(buffers, count);
-
- if (count != rar_status) {
- pr_err("The rar CALL Failed");
- retval = -EIO;
- }
- if (buffers->info.type != RAR_TYPE_AUDIO) {
- pr_err("Invalid RAR type\n");
- return -EINVAL;
- }
- return retval;
-}
-
-#endif
-
-/* This function creates the scatter gather list to be sent to firmware to
-capture/playback data*/
-static int sst_create_sg_list(struct stream_info *stream,
- struct sst_frame_info *sg_list)
-{
- struct sst_stream_bufs *kbufs = NULL;
-#ifdef CONFIG_MRST_RAR_HANDLER
- struct RAR_buffer rar_buffers;
- int retval = 0;
-#endif
- int i = 0;
- list_for_each_entry(kbufs, &stream->bufs, node) {
- if (kbufs->in_use == false) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- pr_debug("DRM playback handling\n");
- rar_buffers.info.handle = (__u32)kbufs->addr;
- rar_buffers.info.size = kbufs->size;
- pr_debug("rar handle 0x%x size=0x%x\n",
- rar_buffers.info.handle,
- rar_buffers.info.size);
- retval = sst_get_RAR(&rar_buffers, 1);
-
- if (retval)
- return retval;
- sg_list->addr[i].addr = rar_buffers.bus_address;
- /* rar_buffers.info.size; */
- sg_list->addr[i].size = (__u32)kbufs->size;
- pr_debug("phyaddr[%d] 0x%x Size:0x%x\n"
- , i, sg_list->addr[i].addr,
- sg_list->addr[i].size);
- }
-#endif
- if (stream->ops != STREAM_OPS_PLAYBACK_DRM) {
- sg_list->addr[i].addr =
- virt_to_phys((void *)
- kbufs->addr + kbufs->offset);
- sg_list->addr[i].size = kbufs->size;
- pr_debug("phyaddr[%d]:0x%x Size:0x%x\n"
- , i , sg_list->addr[i].addr, kbufs->size);
- }
- stream->curr_bytes += sg_list->addr[i].size;
- kbufs->in_use = true;
- i++;
- }
- if (i >= MAX_NUM_SCATTER_BUFFERS)
- break;
- }
-
- sg_list->num_entries = i;
- pr_debug("sg list entries = %d\n", sg_list->num_entries);
- return i;
-}
-
-
-/**
- * sst_play_frame - Send msg for sending stream frames
- *
- * @str_id: ID of stream
- *
- * This function is called to send data to be played out
- * to the firmware
- */
-int sst_play_frame(int str_id)
-{
- int i = 0, retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_frame_info sg_list = {0};
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- struct stream_info *stream;
-
- pr_debug("play frame for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- stream = &sst_drv_ctx->streams[str_id];
- /* clear prev sent buffers */
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- spin_lock(&stream->pcm_lock);
- list_del(&kbufs->node);
- spin_unlock(&stream->pcm_lock);
- kfree(kbufs);
- }
- }
- /* update bytes sent */
- stream->cumm_bytes += stream->curr_bytes;
- stream->curr_bytes = 0;
- if (list_empty(&stream->bufs)) {
- /* no user buffer available */
- pr_debug("Null buffer stream status %d\n", stream->status);
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- pr_debug("new stream status = %d\n", stream->status);
- if (stream->need_draining == true) {
- pr_debug("draining stream\n");
- if (sst_create_short_msg(&msg)) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
- 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- } else if (stream->data_blk.on == true) {
- pr_debug("user list empty.. wake\n");
- /* unblock */
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- return 0;
- }
-
- /* create list */
- i = sst_create_sg_list(stream, &sg_list);
-
- /* post msg */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_PLAY_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(sg_list);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return 0;
-
-}
-
-/**
- * sst_capture_frame - Send msg for sending stream frames
- *
- * @str_id: ID of stream
- *
- * This function is called to capture data from the firmware
- */
-int sst_capture_frame(int str_id)
-{
- int i = 0, retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_frame_info sg_list = {0};
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- struct stream_info *stream;
-
-
- pr_debug("capture frame for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- stream = &sst_drv_ctx->streams[str_id];
- /* clear prev sent buffers */
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- list_del(&kbufs->node);
- kfree(kbufs);
- pr_debug("del node\n");
- }
- }
- if (list_empty(&stream->bufs)) {
- /* no user buffer available */
- pr_debug("Null buffer!!!!stream status %d\n",
- stream->status);
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- pr_debug("new stream status = %d\n",
- stream->status);
- if (stream->data_blk.on == true) {
- pr_debug("user list empty.. wake\n");
- /* unblock */
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
-
- }
- return 0;
- }
- /* create new sg list */
- i = sst_create_sg_list(stream, &sg_list);
-
- /* post msg */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_CAPT_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(sg_list);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
-
-
- /*update bytes recevied*/
- stream->cumm_bytes += stream->curr_bytes;
- stream->curr_bytes = 0;
-
- pr_debug("Cum bytes = %d\n", stream->cumm_bytes);
- return 0;
-}
-
-/*This function is used to calculate the minimum size of input buffers given*/
-static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
-{
- int i, min_val = bufs->buff_entry[0].size;
- for (i = 1 ; i < bufs->entries; i++) {
- if (bufs->buff_entry[i].size < min_val)
- min_val = bufs->buff_entry[i].size;
- }
- pr_debug("min_val = %d\n", min_val);
- return min_val;
-}
-
-static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
-{
- int i, max_val = bufs->buff_entry[0].size;
- for (i = 1 ; i < bufs->entries; i++) {
- if (bufs->buff_entry[i].size > max_val)
- max_val = bufs->buff_entry[i].size;
- }
- pr_debug("max_val = %d\n", max_val);
- return max_val;
-}
-
-/*This function is used to allocate input and output buffers to be sent to
-the firmware that will take encoded data and return decoded data*/
-static int sst_allocate_decode_buf(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- unsigned int cum_input_given,
- unsigned int cum_output_given)
-{
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
-
- if (dbufs->ibufs->type == SST_BUF_RAR &&
- dbufs->obufs->type == SST_BUF_RAR) {
- if (dbufs->ibufs->entries == dbufs->obufs->entries)
- return 0;
- else {
- pr_err("RAR entries dont match\n");
- return -EINVAL;
- }
- } else
- str_info->decode_osize = cum_output_given;
- return 0;
-
- }
-#endif
- if (!str_info->decode_ibuf) {
- pr_debug("no i/p buffers, trying full size\n");
- str_info->decode_isize = cum_input_given;
- str_info->decode_ibuf = kzalloc(str_info->decode_isize,
- GFP_KERNEL);
- str_info->idecode_alloc = str_info->decode_isize;
- }
- if (!str_info->decode_ibuf) {
- pr_debug("buff alloc failed, try max size\n");
- str_info->decode_isize = calculate_max_size(dbufs->ibufs);
- str_info->decode_ibuf = kzalloc(
- str_info->decode_isize, GFP_KERNEL);
- str_info->idecode_alloc = str_info->decode_isize;
- }
- if (!str_info->decode_ibuf) {
- pr_debug("buff alloc failed, try min size\n");
- str_info->decode_isize = calculate_min_size(dbufs->ibufs);
- str_info->decode_ibuf = kzalloc(str_info->decode_isize,
- GFP_KERNEL);
- if (!str_info->decode_ibuf) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- str_info->idecode_alloc = str_info->decode_isize;
- }
- str_info->decode_osize = cum_output_given;
- if (str_info->decode_osize > sst_drv_ctx->mmap_len)
- str_info->decode_osize = sst_drv_ctx->mmap_len;
- return 0;
-}
-
-/*This function is used to send the message to firmware to decode the data*/
-static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
- struct snd_sst_decode_info *dec_info)
-{
- struct ipc_post *msg = NULL;
- int retval = 0;
-
- pr_debug("SST DBG:sst_set_mute:called\n");
-
- if (str_info->decode_ibuf_type == SST_BUF_RAR) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- dec_info->frames_in.addr[0].addr =
- (unsigned long)str_info->decode_ibuf;
- dec_info->frames_in.addr[0].size =
- str_info->decode_isize;
-#endif
-
- } else {
- dec_info->frames_in.addr[0].addr = virt_to_phys((void *)
- str_info->decode_ibuf);
- dec_info->frames_in.addr[0].size = str_info->decode_isize;
- }
-
-
- if (str_info->decode_obuf_type == SST_BUF_RAR) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- dec_info->frames_out.addr[0].addr =
- (unsigned long)str_info->decode_obuf;
- dec_info->frames_out.addr[0].size = str_info->decode_osize;
-#endif
-
- } else {
- dec_info->frames_out.addr[0].addr = virt_to_phys((void *)
- str_info->decode_obuf) ;
- dec_info->frames_out.addr[0].size = str_info->decode_osize;
- }
-
- dec_info->frames_in.num_entries = 1;
- dec_info->frames_out.num_entries = 1;
- dec_info->frames_in.rsrvd = 0;
- dec_info->frames_out.rsrvd = 0;
- dec_info->input_bytes_consumed = 0;
- dec_info->output_bytes_produced = 0;
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_DECODE_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(*dec_info);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), dec_info,
- sizeof(*dec_info));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- str_info->data_blk.condition = false;
- str_info->data_blk.ret_code = 0;
- str_info->data_blk.on = true;
- str_info->data_blk.data = dec_info;
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
- return retval;
-}
-
-#ifdef CONFIG_MRST_RAR_HANDLER
-static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *input_index, int *in_copied,
- int *input_index_valid_size, int *new_entry_flag)
-{
- int retval = 0, i;
-
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
- struct RAR_buffer rar_buffers;
- __u32 info;
- retval = copy_from_user((void *) &info,
- dbufs->ibufs->buff_entry[i].buffer,
- sizeof(__u32));
- if (retval) {
- pr_err("cpy from user fail\n");
- return -EAGAIN;
- }
- rar_buffers.info.type = dbufs->ibufs->type;
- rar_buffers.info.size = dbufs->ibufs->buff_entry[i].size;
- rar_buffers.info.handle = info;
- pr_debug("rar in DnR(input buffer function)=0x%x size=0x%x",
- rar_buffers.info.handle,
- rar_buffers.info.size);
- retval = sst_get_RAR(&rar_buffers, 1);
- if (retval) {
- pr_debug("SST ERR: RAR API failed\n");
- return retval;
- }
- str_info->decode_ibuf =
- (void *) ((unsigned long) rar_buffers.bus_address);
- pr_debug("RAR buf addr in DnR (input buffer function)0x%lu",
- (unsigned long) str_info->decode_ibuf);
- pr_debug("rar in DnR decode function/output b_add rar =0x%lu",
- (unsigned long) rar_buffers.bus_address);
- *input_index = i + 1;
- str_info->decode_isize = dbufs->ibufs->buff_entry[i].size;
- str_info->decode_ibuf_type = dbufs->ibufs->type;
- *in_copied = str_info->decode_isize;
- }
- return retval;
-}
-#endif
-/*This function is used to prepare the kernel input buffers with contents
-before sending for decode*/
-static int sst_prepare_input_buffers(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *input_index, int *in_copied,
- int *input_index_valid_size, int *new_entry_flag)
-{
- int i, cpy_size, retval = 0;
-
- pr_debug("input_index = %d, input entries = %d\n",
- *input_index, dbufs->ibufs->entries);
- for (i = *input_index; i < dbufs->ibufs->entries; i++) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- retval = sst_prepare_input_buffers_rar(str_info,
- dbufs, input_index, in_copied,
- input_index_valid_size, new_entry_flag);
- if (retval) {
- pr_err("In prepare input buffers for RAR\n");
- return -EIO;
- }
-#endif
- *input_index = i;
- if (*input_index_valid_size == 0)
- *input_index_valid_size =
- dbufs->ibufs->buff_entry[i].size;
- pr_debug("inout addr = %p, size = %d\n",
- dbufs->ibufs->buff_entry[i].buffer,
- *input_index_valid_size);
- pr_debug("decode_isize = %d, in_copied %d\n",
- str_info->decode_isize, *in_copied);
- if (*input_index_valid_size <=
- (str_info->decode_isize - *in_copied))
- cpy_size = *input_index_valid_size;
- else
- cpy_size = str_info->decode_isize - *in_copied;
-
- pr_debug("cpy size = %d\n", cpy_size);
- if (!dbufs->ibufs->buff_entry[i].buffer) {
- pr_err("i/p buffer is null\n");
- return -EINVAL;
- }
- pr_debug("Try copy To %p, From %p, size %d\n",
- str_info->decode_ibuf + *in_copied,
- dbufs->ibufs->buff_entry[i].buffer, cpy_size);
-
- retval =
- copy_from_user((void *)(str_info->decode_ibuf + *in_copied),
- (void *) dbufs->ibufs->buff_entry[i].buffer,
- cpy_size);
- if (retval) {
- pr_err("copy from user failed\n");
- return -EIO;
- }
- *in_copied += cpy_size;
- *input_index_valid_size -= cpy_size;
- pr_debug("in buff size = %d, in_copied = %d\n",
- *input_index_valid_size, *in_copied);
- if (*input_index_valid_size != 0) {
- pr_debug("more input buffers left\n");
- dbufs->ibufs->buff_entry[i].buffer += cpy_size;
- break;
- }
- if (*in_copied == str_info->decode_isize &&
- *input_index_valid_size == 0 &&
- (i+1) <= dbufs->ibufs->entries) {
- pr_debug("all input buffers copied\n");
- *new_entry_flag = true;
- *input_index = i + 1;
- break;
- }
- }
- return retval;
-}
-
-/* This function is used to copy the decoded data from kernel buffers to
-the user output buffers with contents after decode*/
-static int sst_prepare_output_buffers(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *output_index, int output_size,
- int *out_copied)
-
-{
- int i, cpy_size, retval = 0;
- pr_debug("output_index = %d, output entries = %d\n",
- *output_index,
- dbufs->obufs->entries);
- for (i = *output_index; i < dbufs->obufs->entries; i++) {
- *output_index = i;
- pr_debug("output addr = %p, size = %d\n",
- dbufs->obufs->buff_entry[i].buffer,
- dbufs->obufs->buff_entry[i].size);
- pr_debug("output_size = %d, out_copied = %d\n",
- output_size, *out_copied);
- if (dbufs->obufs->buff_entry[i].size <
- (output_size - *out_copied))
- cpy_size = dbufs->obufs->buff_entry[i].size;
- else
- cpy_size = output_size - *out_copied;
- pr_debug("cpy size = %d\n", cpy_size);
- pr_debug("Try copy To: %p, From %p, size %d\n",
- dbufs->obufs->buff_entry[i].buffer,
- sst_drv_ctx->mmap_mem + *out_copied,
- cpy_size);
- retval = copy_to_user(dbufs->obufs->buff_entry[i].buffer,
- sst_drv_ctx->mmap_mem + *out_copied,
- cpy_size);
- if (retval) {
- pr_err("copy to user failed\n");
- return -EIO;
- } else
- pr_debug("copy to user passed\n");
- *out_copied += cpy_size;
- dbufs->obufs->buff_entry[i].size -= cpy_size;
- pr_debug("o/p buff size %d, out_copied %d\n",
- dbufs->obufs->buff_entry[i].size, *out_copied);
- if (dbufs->obufs->buff_entry[i].size != 0) {
- *output_index = i;
- dbufs->obufs->buff_entry[i].buffer += cpy_size;
- break;
- } else if (*out_copied == output_size) {
- *output_index = i + 1;
- break;
- }
- }
- return retval;
-}
-
-/**
- * sst_decode - Send msg for decoding frames
- *
- * @str_id: ID of stream
- * @dbufs: param that holds the user input and output buffers and size
- *
- * This function is called to decode data from the firmware
- */
-int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
-{
- int retval = 0, i;
- unsigned long long total_input = 0 , total_output = 0;
- unsigned int cum_input_given = 0 , cum_output_given = 0;
- int copy_in_done = false, copy_out_done = false;
- int input_index = 0, output_index = 0;
- int input_index_valid_size = 0;
- int in_copied, out_copied;
- int new_entry_flag;
- u64 output_size;
- struct stream_info *str_info;
- struct snd_sst_decode_info dec_info;
- unsigned long long input_bytes, output_bytes;
-
- sst_drv_ctx->scard_ops->power_down_pmic();
- pr_debug("Powering_down_PMIC...\n");
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_INIT) {
- pr_err("invalid stream state = %d\n",
- str_info->status);
- return -EINVAL;
- }
-
- str_info->prev = str_info->status;
- str_info->status = STREAM_DECODE;
-
- for (i = 0; i < dbufs->ibufs->entries; i++)
- cum_input_given += dbufs->ibufs->buff_entry[i].size;
- for (i = 0; i < dbufs->obufs->entries; i++)
- cum_output_given += dbufs->obufs->buff_entry[i].size;
-
- /* input and output buffer allocation */
- retval = sst_allocate_decode_buf(str_info, dbufs,
- cum_input_given, cum_output_given);
- if (retval) {
- pr_err("mem allocation failed, abort!!!\n");
- retval = -ENOMEM;
- goto finish;
- }
-
- str_info->decode_isize = str_info->idecode_alloc;
- str_info->decode_ibuf_type = dbufs->ibufs->type;
- str_info->decode_obuf_type = dbufs->obufs->type;
-
- while ((copy_out_done == false) && (copy_in_done == false)) {
- in_copied = 0;
- new_entry_flag = false;
- retval = sst_prepare_input_buffers(str_info,\
- dbufs, &input_index, &in_copied,
- &input_index_valid_size, &new_entry_flag);
- if (retval) {
- pr_err("prepare in buffers failed\n");
- goto finish;
- }
-
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM)
- str_info->decode_obuf = sst_drv_ctx->mmap_mem;
-
-#ifdef CONFIG_MRST_RAR_HANDLER
- else {
- if (dbufs->obufs->type == SST_BUF_RAR) {
- struct RAR_buffer rar_buffers;
- __u32 info;
-
- pr_debug("DRM");
- retval = copy_from_user((void *) &info,
- dbufs->obufs->
- buff_entry[output_index].buffer,
- sizeof(__u32));
-
- rar_buffers.info.size = dbufs->obufs->
- buff_entry[output_index].size;
- rar_buffers.info.handle = info;
- retval = sst_get_RAR(&rar_buffers, 1);
- if (retval)
- return retval;
-
- str_info->decode_obuf = (void *)((unsigned long)
- rar_buffers.bus_address);
- str_info->decode_osize = dbufs->obufs->
- buff_entry[output_index].size;
- str_info->decode_obuf_type = dbufs->obufs->type;
- pr_debug("DRM handling\n");
- pr_debug("o/p_add=0x%lu Size=0x%x\n",
- (unsigned long) str_info->decode_obuf,
- str_info->decode_osize);
- } else {
- str_info->decode_obuf = sst_drv_ctx->mmap_mem;
- str_info->decode_osize = dbufs->obufs->
- buff_entry[output_index].size;
-
- }
- }
-#endif
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
- if (str_info->decode_isize > in_copied) {
- str_info->decode_isize = in_copied;
- pr_debug("i/p size = %d\n",
- str_info->decode_isize);
- }
- }
-
-
- retval = sst_send_decode_mess(str_id, str_info, &dec_info);
- if (retval || dec_info.input_bytes_consumed == 0) {
- pr_err("SST ERR: mess failed or no input consumed\n");
- goto finish;
- }
- input_bytes = dec_info.input_bytes_consumed;
- output_bytes = dec_info.output_bytes_produced;
-
- pr_debug("in_copied=%d, con=%lld, prod=%lld\n",
- in_copied, input_bytes, output_bytes);
- if (dbufs->obufs->type == SST_BUF_RAR) {
- output_index += 1;
- if (output_index == dbufs->obufs->entries) {
- copy_in_done = true;
- pr_debug("all i/p cpy done\n");
- }
- total_output += output_bytes;
- } else {
- out_copied = 0;
- output_size = output_bytes;
- retval = sst_prepare_output_buffers(str_info, dbufs,
- &output_index, output_size, &out_copied);
- if (retval) {
- pr_err("prep out buff fail\n");
- goto finish;
- }
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
- if (in_copied != input_bytes) {
- int bytes_left = in_copied -
- input_bytes;
- pr_debug("bytes %d\n",
- bytes_left);
- if (new_entry_flag == true)
- input_index--;
- while (bytes_left) {
- struct snd_sst_buffs *ibufs;
- struct snd_sst_buff_entry
- *buff_entry;
- unsigned int size_sent;
-
- ibufs = dbufs->ibufs;
- buff_entry =
- &ibufs->buff_entry[input_index];
- size_sent = buff_entry->size -\
- input_index_valid_size;
- if (bytes_left == size_sent) {
- bytes_left = 0;
- } else if (bytes_left <
- size_sent) {
- buff_entry->buffer +=
- (size_sent -
- bytes_left);
- buff_entry->size -=
- (size_sent -
- bytes_left);
- bytes_left = 0;
- } else {
- bytes_left -= size_sent;
- input_index--;
- input_index_valid_size =
- 0;
- }
- }
-
- }
- }
-
- total_output += out_copied;
- if (str_info->decode_osize != out_copied) {
- str_info->decode_osize -= out_copied;
- pr_debug("output size modified = %d\n",
- str_info->decode_osize);
- }
- }
- total_input += input_bytes;
-
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (total_input == cum_input_given)
- copy_in_done = true;
- copy_out_done = true;
-
- } else {
- if (total_output == cum_output_given) {
- copy_out_done = true;
- pr_debug("all o/p cpy done\n");
- }
-
- if (total_input == cum_input_given) {
- copy_in_done = true;
- pr_debug("all i/p cpy done\n");
- }
- }
-
- pr_debug("copy_out = %d, copy_in = %d\n",
- copy_out_done, copy_in_done);
- }
-
-finish:
- dbufs->input_bytes_consumed = total_input;
- dbufs->output_bytes_produced = total_output;
- str_info->status = str_info->prev;
- str_info->prev = STREAM_DECODE;
- kfree(str_info->decode_ibuf);
- str_info->decode_ibuf = NULL;
- return retval;
-}
diff --git a/drivers/staging/intel_sst/intelmid.c b/drivers/staging/intel_sst/intelmid.c
deleted file mode 100644
index 492b660246b4..000000000000
--- a/drivers/staging/intel_sst/intelmid.c
+++ /dev/null
@@ -1,1022 +0,0 @@
-/*
- * intelmid.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver for Intel MID sound card chipset
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/input.h>
-#include <sound/control.h>
-#include <asm/mrst.h>
-#include <sound/pcm.h>
-#include <sound/jack.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <linux/gpio.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-#include "intelmid_snd_control.h"
-#include "intelmid_adc_control.h"
-#include "intelmid.h"
-
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
-MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
-MODULE_DESCRIPTION("Intel MAD Sound card driver");
-MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}");
-
-
-static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */
-static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */
-
-module_param(card_index, int, 0444);
-MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard.");
-module_param(card_id, charp, 0444);
-MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard.");
-
-int sst_card_vendor_id;
-int intelmid_audio_interrupt_enable;/*checkpatch fix*/
-struct snd_intelmad *intelmad_drv;
-
-#define INFO(_cpu_id, _irq_cache, _size) \
- ((kernel_ulong_t)&(struct snd_intelmad_probe_info) { \
- .cpu_id = (_cpu_id), \
- .irq_cache = (_irq_cache), \
- .size = (_size), \
- })
-/* Data path functionalities */
-static struct snd_pcm_hardware snd_intelmad_stream = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_DOUBLE |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME |
- SNDRV_PCM_INFO_MMAP|
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_SYNC_START),
- .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
- SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
- SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
- .rates = (SNDRV_PCM_RATE_8000|
- SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000),
- .rate_min = MIN_RATE,
-
- .rate_max = MAX_RATE,
- .channels_min = MIN_CHANNEL,
- .channels_max = MAX_CHANNEL_AMIC,
- .buffer_bytes_max = MAX_BUFFER,
- .period_bytes_min = MIN_PERIOD_BYTES,
- .period_bytes_max = MAX_PERIOD_BYTES,
- .periods_min = MIN_PERIODS,
- .periods_max = MAX_PERIODS,
- .fifo_size = FIFO_SIZE,
-};
-
-
-/**
- * snd_intelmad_pcm_trigger - stream activities are handled here
- *
- * @substream:substream for which the stream function is called
- * @cmd:the stream commamd that requested from upper layer
- *
- * This function is called whenever an a stream activity is invoked
- */
-static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- int ret_val = 0, str_id;
- struct snd_intelmad *intelmaddata;
- struct mad_stream_pvt *stream;
- struct intel_sst_pcm_control *sst_ops;
-
- WARN_ON(!substream);
-
- intelmaddata = snd_pcm_substream_chip(substream);
- stream = substream->runtime->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
- WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
- sst_ops = intelmaddata->sstdrv_ops->pcm_control;
- str_id = stream->stream_info.str_id;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- pr_debug("Trigger Start\n");
- ret_val = sst_ops->device_control(SST_SND_START, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = RUNNING;
- stream->substream = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- pr_debug("in stop\n");
- ret_val = sst_ops->device_control(SST_SND_DROP, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = DROPPED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pr_debug("in pause\n");
- ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = PAUSED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- pr_debug("in pause release\n");
- ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = RUNNING;
- break;
- default:
- return -EINVAL;
- }
- return ret_val;
-}
-
-/**
-* snd_intelmad_pcm_prepare- internal preparation before starting a stream
-*
-* @substream: substream for which the function is called
-*
-* This function is called when a stream is started for internal preparation.
-*/
-static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct mad_stream_pvt *stream;
- int ret_val = 0;
- struct snd_intelmad *intelmaddata;
-
- pr_debug("pcm_prepare called\n");
-
- WARN_ON(!substream);
- stream = substream->runtime->private_data;
- intelmaddata = snd_pcm_substream_chip(substream);
- pr_debug("pb cnt = %d cap cnt = %d\n",\
- intelmaddata->playback_cnt,
- intelmaddata->capture_cnt);
-
- if (stream->stream_info.str_id) {
- pr_debug("Prepare called for already set stream\n");
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_DROP, &stream->stream_info.str_id);
- return ret_val;
- }
-
- ret_val = snd_intelmad_alloc_stream(substream);
- if (ret_val < 0)
- return ret_val;
- stream->dbg_cum_bytes = 0;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- intelmaddata->playback_cnt++;
- else
- intelmaddata->capture_cnt++;
- /* return back the stream id */
- snprintf(substream->pcm->id, sizeof(substream->pcm->id),
- "%d", stream->stream_info.str_id);
- pr_debug("stream id to user = %s\n",
- substream->pcm->id);
-
- ret_val = snd_intelmad_init_stream(substream);
- if (ret_val)
- return ret_val;
- substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
- return ret_val;
-}
-
-static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int ret_val;
-
- pr_debug("snd_intelmad_hw_params called\n");
- ret_val = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- memset(substream->runtime->dma_area, 0,
- params_buffer_bytes(hw_params));
-
- return ret_val;
-}
-
-static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
-{
- pr_debug("snd_intelmad_hw_free called\n");
- return snd_pcm_lib_free_pages(substream);
-}
-
-/**
- * snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw
- *
- * @substream: substream for which the function is called
- *
- * This function is called by ALSA framework to get the current hw buffer ptr
- * when a period is elapsed
- */
-static snd_pcm_uframes_t snd_intelmad_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- /* struct snd_pcm_runtime *runtime = substream->runtime; */
- struct mad_stream_pvt *stream;
- struct snd_intelmad *intelmaddata;
- int ret_val;
-
- WARN_ON(!substream);
-
- intelmaddata = snd_pcm_substream_chip(substream);
- stream = substream->runtime->private_data;
- if (stream->stream_status == INIT)
- return 0;
-
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_BUFFER_POINTER, &stream->stream_info);
- if (ret_val) {
- pr_err("error code = 0x%x\n", ret_val);
- return ret_val;
- }
- pr_debug("samples reported out 0x%llx\n",
- stream->stream_info.buffer_ptr);
- pr_debug("Frame bits:: %d period_count :: %d\n",
- (int)substream->runtime->frame_bits,
- (int)substream->runtime->period_size);
-
- return stream->stream_info.buffer_ptr;
-
-}
-
-/**
- * snd_intelmad_close- to free parameteres when stream is stopped
- *
- * @substream: substream for which the function is called
- *
- * This function is called by ALSA framework when stream is stopped
- */
-static int snd_intelmad_close(struct snd_pcm_substream *substream)
-{
- struct snd_intelmad *intelmaddata;
- struct mad_stream_pvt *stream;
- int ret_val = 0, str_id;
-
- WARN_ON(!substream);
-
- stream = substream->runtime->private_data;
- str_id = stream->stream_info.str_id;
-
- pr_debug("sst: snd_intelmad_close called for %d\n", str_id);
- intelmaddata = snd_pcm_substream_chip(substream);
-
- pr_debug("str id = %d\n", stream->stream_info.str_id);
- if (stream->stream_info.str_id) {
- /* SST API to actually stop/free the stream */
- ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- intelmaddata->playback_cnt--;
- else
- intelmaddata->capture_cnt--;
- }
- pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
- intelmaddata->playback_cnt, intelmaddata->capture_cnt);
- kfree(substream->runtime->private_data);
- return ret_val;
-}
-
-/**
- * snd_intelmad_open- to set runtime parameters during stream start
- *
- * @substream: substream for which the function is called
- * @type: audio device type
- *
- * This function is called by ALSA framework when stream is started
- */
-static int snd_intelmad_open(struct snd_pcm_substream *substream,
- enum snd_sst_audio_device_type type)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pcm_runtime *runtime;
- struct mad_stream_pvt *stream;
-
- WARN_ON(!substream);
-
- pr_debug("snd_intelmad_open called\n");
-
- intelmaddata = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
- /* set the runtime hw parameter with local snd_pcm_hardware struct */
- runtime->hw = snd_intelmad_stream;
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- /*
- * MRST firmware currently denies stereo recording requests.
- */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- runtime->hw.formats = (SNDRV_PCM_FMTBIT_S16 |
- SNDRV_PCM_FMTBIT_U16);
- runtime->hw.channels_max = 1;
- }
- }
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- runtime->hw = snd_intelmad_stream;
- runtime->hw.rates = SNDRV_PCM_RATE_48000;
- runtime->hw.rate_min = MAX_RATE;
- runtime->hw.formats = (SNDRV_PCM_FMTBIT_S24 |
- SNDRV_PCM_FMTBIT_U24);
- if (intelmaddata->sstdrv_ops->scard_ops->input_dev_id == AMIC)
- runtime->hw.channels_max = MAX_CHANNEL_AMIC;
- else
- runtime->hw.channels_max = MAX_CHANNEL_DMIC;
-
- }
- /* setup the internal datastruture stream pointers based on it being
- playback or capture stream */
- stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
- stream->stream_info.str_id = 0;
- stream->device = type;
- stream->stream_status = INIT;
- runtime->private_data = stream;
- return snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
-}
-
-static int snd_intelmad_headset_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_HEADSET);
-}
-
-static int snd_intelmad_ihf_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_IHF);
-}
-
-static int snd_intelmad_vibra_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_VIBRA);
-}
-
-static int snd_intelmad_haptic_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_HAPTIC);
-}
-
-static struct snd_pcm_ops snd_intelmad_headset_ops = {
- .open = snd_intelmad_headset_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_ihf_ops = {
- .open = snd_intelmad_ihf_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_vibra_ops = {
- .open = snd_intelmad_vibra_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_haptic_ops = {
- .open = snd_intelmad_haptic_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_capture_ops = {
- .open = snd_intelmad_headset_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-int intelmad_get_mic_bias(void)
-{
- struct snd_pmic_ops *pmic_ops;
-
- if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
- return -ENODEV;
- pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
- if (pmic_ops && pmic_ops->pmic_get_mic_bias)
- return pmic_ops->pmic_get_mic_bias(intelmad_drv);
- else
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(intelmad_get_mic_bias);
-
-int intelmad_set_headset_state(int state)
-{
- struct snd_pmic_ops *pmic_ops;
-
- if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
- return -ENODEV;
- pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
- if (pmic_ops && pmic_ops->pmic_set_headset_state)
- return pmic_ops->pmic_set_headset_state(state);
- else
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(intelmad_set_headset_state);
-
-void sst_process_mad_jack_detection(struct work_struct *work)
-{
- u8 interrupt_status;
- struct mad_jack_msg_wq *mad_jack_detect =
- container_of(work, struct mad_jack_msg_wq, wq);
-
- struct snd_intelmad *intelmaddata =
- mad_jack_detect->intelmaddata;
-
- if (!intelmaddata)
- return;
-
- interrupt_status = mad_jack_detect->intsts;
- if (intelmaddata->sstdrv_ops && intelmaddata->sstdrv_ops->scard_ops
- && intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb) {
- intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb(
- (void *)intelmaddata, interrupt_status);
- intelmaddata->sstdrv_ops->scard_ops->pmic_jack_enable();
- }
- kfree(mad_jack_detect);
-}
-/**
- * snd_intelmad_intr_handler- interrupt handler
- *
- * @irq : irq number of the interrupt received
- * @dev: device context
- *
- * This function is called when an interrupt is raised at the sound card
- */
-static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev)
-{
- struct snd_intelmad *intelmaddata =
- (struct snd_intelmad *)dev;
- u8 interrupt_status;
- struct mad_jack_msg_wq *mad_jack_msg;
- memcpy_fromio(&interrupt_status,
- ((void *)(intelmaddata->int_base)),
- sizeof(u8));
-
- mad_jack_msg = kzalloc(sizeof(*mad_jack_msg), GFP_ATOMIC);
- mad_jack_msg->intsts = interrupt_status;
- mad_jack_msg->intelmaddata = intelmaddata;
- INIT_WORK(&mad_jack_msg->wq, sst_process_mad_jack_detection);
- queue_work(intelmaddata->mad_jack_wq, &mad_jack_msg->wq);
-
- return IRQ_HANDLED;
-}
-
-void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent , int status)
-{
-
- if (!jack) {
- pr_debug("MAD error jack empty\n");
-
- } else {
- snd_jack_report(jack, status);
- /* button pressed and released */
- if (buttonpressevent)
- snd_jack_report(jack, 0);
- pr_debug("MAD sending jack report Done !!!\n");
- }
-}
-
-static int __devinit snd_intelmad_register_irq(
- struct snd_intelmad *intelmaddata, unsigned int regbase,
- unsigned int regsize)
-{
- int ret_val;
- char *drv_name;
-
- pr_debug("irq reg regbase 0x%x, regsize 0x%x\n",
- regbase, regsize);
- intelmaddata->int_base = ioremap_nocache(regbase, regsize);
- if (!intelmaddata->int_base)
- pr_err("Mapping of cache failed\n");
- pr_debug("irq = 0x%x\n", intelmaddata->irq);
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
- drv_name = DRIVER_NAME_MFLD;
- else
- drv_name = DRIVER_NAME_MRST;
- ret_val = request_irq(intelmaddata->irq,
- snd_intelmad_intr_handler,
- IRQF_SHARED, drv_name,
- intelmaddata);
- if (ret_val)
- pr_err("cannot register IRQ\n");
- return ret_val;
-}
-
-static int __devinit snd_intelmad_sst_register(
- struct snd_intelmad *intelmaddata)
-{
- int ret_val = 0;
- struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = {
- &snd_pmic_ops_fs,
- &snd_pmic_ops_mx,
- &snd_pmic_ops_nc,
- &snd_msic_ops
- };
-
- struct sc_reg_access vendor_addr = {0x00, 0x00, 0x00};
-
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- ret_val = sst_sc_reg_access(&vendor_addr, PMIC_READ, 1);
- if (ret_val)
- return ret_val;
- sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
- pr_debug("original n extrated vendor id = 0x%x %d\n",
- vendor_addr.value, sst_card_vendor_id);
- if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
- pr_err("vendor card not supported!!\n");
- return -EIO;
- }
- } else
- sst_card_vendor_id = 0x3;
-
- intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
- intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id;
- BUG_ON(!intelmad_vendor_ops[sst_card_vendor_id]);
- intelmaddata->sstdrv_ops->scard_ops =
- intelmad_vendor_ops[sst_card_vendor_id];
-
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- intelmaddata->sstdrv_ops->scard_ops->pb_on = 0;
- intelmaddata->sstdrv_ops->scard_ops->cap_on = 0;
- intelmaddata->sstdrv_ops->scard_ops->input_dev_id = DMIC;
- intelmaddata->sstdrv_ops->scard_ops->output_dev_id =
- STEREO_HEADPHONE;
- intelmaddata->sstdrv_ops->scard_ops->lineout_dev_id = NONE;
- }
-
- /* registering with SST driver to get access to SST APIs to use */
- ret_val = register_sst_card(intelmaddata->sstdrv_ops);
- if (ret_val) {
- pr_err("sst card registration failed\n");
- return ret_val;
- }
- sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id;
- intelmaddata->pmic_status = PMIC_UNINIT;
- return ret_val;
-}
-
-static void snd_intelmad_page_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-/* Driver Init/exit functionalities */
-/**
- * snd_intelmad_pcm_new - to setup pcm for the card
- *
- * @card: pointer to the sound card structure
- * @intelmaddata: pointer to internal context
- * @pb: playback count for this card
- * @cap: capture count for this card
- * @index: device index
- *
- * This function is called from probe function to set up pcm params
- * and functions
- */
-static int __devinit snd_intelmad_pcm_new(struct snd_card *card,
- struct snd_intelmad *intelmaddata,
- unsigned int pb, unsigned int cap, unsigned int index)
-{
- int ret_val = 0;
- struct snd_pcm *pcm;
- char name[32] = INTEL_MAD;
- struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
-
- pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index);
- ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
- if (ret_val)
- return ret_val;
- /* setup the ops for playback and capture streams */
- switch (index) {
- case 0:
- pb_ops = &snd_intelmad_headset_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 1:
- pb_ops = &snd_intelmad_ihf_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 2:
- pb_ops = &snd_intelmad_vibra_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 3:
- pb_ops = &snd_intelmad_haptic_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- }
- if (pb)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, pb_ops);
- if (cap)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, cap_ops);
- /* setup private data which can be retrieved when required */
- pcm->private_data = intelmaddata;
- pcm->private_free = snd_intelmad_page_free;
- pcm->info_flags = 0;
- strncpy(pcm->name, card->shortname, strlen(card->shortname));
- /* allocate dma pages for ALSA stream operations */
- snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- MIN_BUFFER, MAX_BUFFER);
- return ret_val;
-}
-
-static int __devinit snd_intelmad_pcm(struct snd_card *card,
- struct snd_intelmad *intelmaddata)
-{
- int ret_val = 0;
-
- WARN_ON(!card);
- WARN_ON(!intelmaddata);
- pr_debug("snd_intelmad_pcm called\n");
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
- return ret_val;
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 1);
- if (ret_val)
- return ret_val;
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 2);
- if (ret_val)
- return ret_val;
- return snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 3);
-}
-
-/**
- * snd_intelmad_jack- to setup jack settings of the card
- *
- * @intelmaddata: pointer to internal context
- *
- * This function is called send jack events
- */
-static int snd_intelmad_jack(struct snd_intelmad *intelmaddata)
-{
- struct snd_jack *jack;
- int retval;
-
- pr_debug("snd_intelmad_jack called\n");
- jack = &intelmaddata->jack[0].jack;
- snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PHONE);
- retval = snd_jack_new(intelmaddata->card, "Intel(R) MID Audio Jack",
- SND_JACK_HEADPHONE | SND_JACK_HEADSET |
- SW_JACK_PHYSICAL_INSERT | SND_JACK_BTN_0
- | SND_JACK_BTN_1, &jack);
- pr_debug("snd_intelmad_jack called\n");
- if (retval < 0)
- return retval;
- snd_jack_report(jack, 0);
-
- jack->private_data = jack;
- intelmaddata->jack[0].jack = *jack;
-
- return retval;
-}
-
-/**
- * snd_intelmad_mixer- to setup mixer settings of the card
- *
- * @intelmaddata: pointer to internal context
- *
- * This function is called from probe function to set up mixer controls
- */
-static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
-{
- struct snd_card *card;
- unsigned int idx;
- int ret_val = 0, max_controls = 0;
- char *mixername = "IntelMAD Controls";
- struct snd_kcontrol_new *controls;
-
- WARN_ON(!intelmaddata);
-
- card = intelmaddata->card;
- strncpy(card->mixername, mixername, sizeof(card->mixername)-1);
- /* add all widget controls and expose the same */
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- max_controls = MAX_CTRL_MFLD;
- controls = snd_intelmad_controls_mfld;
- } else {
- max_controls = MAX_CTRL_MRST;
- controls = snd_intelmad_controls_mrst;
- }
- for (idx = 0; idx < max_controls; idx++) {
- ret_val = snd_ctl_add(card,
- snd_ctl_new1(&controls[idx],
- intelmaddata));
- pr_debug("mixer[idx]=%d added\n", idx);
- if (ret_val) {
- pr_err("in adding of control index = %d\n", idx);
- break;
- }
- }
- return ret_val;
-}
-
-static int snd_intelmad_dev_free(struct snd_device *device)
-{
- struct snd_intelmad *intelmaddata;
-
- WARN_ON(!device);
-
- intelmaddata = device->device_data;
-
- pr_debug("snd_intelmad_dev_free called\n");
- unregister_sst_card(intelmaddata->sstdrv_ops);
-
- /* free allocated memory for internal context */
- destroy_workqueue(intelmaddata->mad_jack_wq);
- device->device_data = NULL;
- kfree(intelmaddata->sstdrv_ops);
- kfree(intelmaddata);
-
- return 0;
-}
-
-static int __devinit snd_intelmad_create(
- struct snd_intelmad *intelmaddata,
- struct snd_card *card)
-{
- int ret_val;
- static struct snd_device_ops ops = {
- .dev_free = snd_intelmad_dev_free,
- };
-
- WARN_ON(!intelmaddata);
- WARN_ON(!card);
- /* ALSA api to register for the device */
- ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops);
- return ret_val;
-}
-
-/**
-* snd_intelmad_probe- function registred for init
-* @pdev : pointer to the device struture
-* This function is called when the device is initialized
-*/
-int __devinit snd_intelmad_probe(struct platform_device *pdev)
-{
- struct snd_card *card;
- int ret_val;
- struct snd_intelmad *intelmaddata;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct snd_intelmad_probe_info *info = (void *)id->driver_data;
-
- pr_debug("probe for %s cpu_id %d\n", pdev->name, info->cpu_id);
- pr_debug("rq_chache %x of size %x\n", info->irq_cache, info->size);
- if (!strcmp(pdev->name, DRIVER_NAME_MRST))
- pr_debug("detected MRST\n");
- else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
- pr_debug("detected MFLD\n");
- else {
- pr_err("detected unknown device abort!!\n");
- return -EIO;
- }
- if ((info->cpu_id < CPU_CHIP_LINCROFT) ||
- (info->cpu_id > CPU_CHIP_PENWELL)) {
- pr_err("detected unknown cpu_id abort!!\n");
- return -EIO;
- }
- /* allocate memory for saving internal context and working */
- intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
- if (!intelmaddata) {
- pr_debug("mem alloctn fail\n");
- return -ENOMEM;
- }
- intelmad_drv = intelmaddata;
-
- /* allocate memory for LPE API set */
- intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
- GFP_KERNEL);
- if (!intelmaddata->sstdrv_ops) {
- pr_err("mem allocation for ops fail\n");
- kfree(intelmaddata);
- return -ENOMEM;
- }
-
- intelmaddata->cpu_id = info->cpu_id;
- /* create a card instance with ALSA framework */
- ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
- if (ret_val) {
- pr_err("snd_card_create fail\n");
- goto free_allocs;
- }
-
- intelmaddata->pdev = pdev;
- intelmaddata->irq = platform_get_irq(pdev, 0);
- platform_set_drvdata(pdev, intelmaddata);
- intelmaddata->card = card;
- intelmaddata->card_id = card_id;
- intelmaddata->card_index = card_index;
- intelmaddata->master_mute = UNMUTE;
- intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0;
- strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD));
- strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD));
-
- intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
- /* registering with LPE driver to get access to SST APIs to use */
- ret_val = snd_intelmad_sst_register(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_sst_register failed\n");
- goto set_null_data;
- }
-
- intelmaddata->pmic_status = PMIC_INIT;
-
- ret_val = snd_intelmad_pcm(card, intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_pcm failed\n");
- goto free_sst;
- }
-
- ret_val = snd_intelmad_mixer(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_mixer failed\n");
- goto free_card;
- }
-
- ret_val = snd_intelmad_jack(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_jack failed\n");
- goto free_card;
- }
- intelmaddata->adc_address = mid_initialize_adc();
-
- /*create work queue for jack interrupt*/
- INIT_WORK(&intelmaddata->mad_jack_msg.wq,
- sst_process_mad_jack_detection);
-
- intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq");
- if (!intelmaddata->mad_jack_wq)
- goto free_card;
-
- ret_val = snd_intelmad_register_irq(intelmaddata,
- info->irq_cache, info->size);
- if (ret_val) {
- pr_err("snd_intelmad_register_irq fail\n");
- goto free_mad_jack_wq;
- }
-
- /* internal function call to register device with ALSA */
- ret_val = snd_intelmad_create(intelmaddata, card);
- if (ret_val) {
- pr_err("snd_intelmad_create failed\n");
- goto set_pvt_data;
- }
- card->private_data = &intelmaddata;
- snd_card_set_dev(card, &pdev->dev);
- ret_val = snd_card_register(card);
- if (ret_val) {
- pr_err("snd_card_register failed\n");
- goto set_pvt_data;
- }
- if (pdev->dev.platform_data) {
- int gpio_amp = *(int *)pdev->dev.platform_data;
- if (gpio_request_one(gpio_amp, GPIOF_OUT_INIT_LOW, "amp power"))
- gpio_amp = 0;
- intelmaddata->sstdrv_ops->scard_ops->gpio_amp = gpio_amp;
- }
-
- pr_debug("snd_intelmad_probe complete\n");
- return ret_val;
-
-set_pvt_data:
- card->private_data = NULL;
-free_mad_jack_wq:
- destroy_workqueue(intelmaddata->mad_jack_wq);
-free_card:
- snd_card_free(intelmaddata->card);
-free_sst:
- unregister_sst_card(intelmaddata->sstdrv_ops);
-set_null_data:
- platform_set_drvdata(pdev, NULL);
-free_allocs:
- pr_err("probe failed\n");
- snd_card_free(card);
- kfree(intelmaddata->sstdrv_ops);
- kfree(intelmaddata);
- return ret_val;
-}
-
-
-static int snd_intelmad_remove(struct platform_device *pdev)
-{
- struct snd_intelmad *intelmaddata = platform_get_drvdata(pdev);
-
- if (intelmaddata) {
- if (intelmaddata->sstdrv_ops->scard_ops->gpio_amp)
- gpio_free(intelmaddata->sstdrv_ops->scard_ops->gpio_amp);
- free_irq(intelmaddata->irq, intelmaddata);
- snd_card_free(intelmaddata->card);
- }
- intelmad_drv = NULL;
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-/*********************************************************************
- * Driver initialization and exit
- *********************************************************************/
-static const struct platform_device_id snd_intelmad_ids[] = {
- {DRIVER_NAME_MRST, INFO(CPU_CHIP_LINCROFT, AUDINT_BASE, 1)},
- {DRIVER_NAME_MFLD, INFO(CPU_CHIP_PENWELL, 0xFFFF7FCD, 1)},
- {"", 0},
-
-};
-
-static struct platform_driver snd_intelmad_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "intel_mid_sound_card",
- },
- .id_table = snd_intelmad_ids,
- .probe = snd_intelmad_probe,
- .remove = __devexit_p(snd_intelmad_remove),
-};
-
-/*
- * alsa_card_intelmad_init- driver init function
- *
- * This function is called when driver module is inserted
- */
-static int __init alsa_card_intelmad_init(void)
-{
- pr_debug("mad_init called\n");
- return platform_driver_register(&snd_intelmad_driver);
-}
-
-/**
- * alsa_card_intelmad_exit- driver exit function
- *
- * This function is called when driver module is removed
- */
-static void __exit alsa_card_intelmad_exit(void)
-{
- pr_debug("mad_exit called\n");
- return platform_driver_unregister(&snd_intelmad_driver);
-}
-
-module_init(alsa_card_intelmad_init)
-module_exit(alsa_card_intelmad_exit)
-
diff --git a/drivers/staging/intel_sst/intelmid.h b/drivers/staging/intel_sst/intelmid.h
deleted file mode 100644
index 14a7ba078b7c..000000000000
--- a/drivers/staging/intel_sst/intelmid.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * intelmid.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver header for Intel MAD chipset
- */
-#ifndef __INTELMID_H
-#define __INTELMID_H
-
-#include <linux/time.h>
-#include <sound/jack.h>
-
-#define DRIVER_NAME_MFLD "msic_audio"
-#define DRIVER_NAME_MRST "pmic_audio"
-#define DRIVER_NAME "intelmid_audio"
-#define PMIC_SOUND_IRQ_TYPE_MASK (1 << 15)
-#define AUDINT_BASE (0xFFFFEFF8 + (6 * sizeof(u8)))
-#define REG_IRQ
-/* values #defined */
-/* will differ for different hw - to be taken from config */
-#define MAX_DEVICES 1
-#define MIN_RATE 8000
-#define MAX_RATE 48000
-#define MAX_BUFFER (800*1024) /* for PCM */
-#define MIN_BUFFER (800*1024)
-#define MAX_PERIODS (1024*2)
-#define MIN_PERIODS 2
-#define MAX_PERIOD_BYTES MAX_BUFFER
-#define MIN_PERIOD_BYTES 32
-/*#define MIN_PERIOD_BYTES 160*/
-#define MAX_MUTE 1
-#define MIN_MUTE 0
-#define MONO_CNTL 1
-#define STEREO_CNTL 2
-#define MIN_CHANNEL 1
-#define MAX_CHANNEL_AMIC 2
-#define MAX_CHANNEL_DMIC 5
-#define FIFO_SIZE 0 /* fifo not being used */
-#define INTEL_MAD "Intel MAD"
-#define MAX_CTRL_MRST 8
-#define MAX_CTRL_MFLD 7
-#define MAX_CTRL 8
-#define MAX_VENDORS 4
-/* TODO +6 db */
-#define MAX_VOL 64
-/* TODO -57 db */
-#define MIN_VOL 0
-#define PLAYBACK_COUNT 1
-#define CAPTURE_COUNT 1
-#define ADC_ONE_LSB_MULTIPLIER 2346
-
-#define MID_JACK_HS_LONG_PRESS SND_JACK_BTN_0
-#define MID_JACK_HS_SHORT_PRESS SND_JACK_BTN_1
-
-extern int sst_card_vendor_id;
-
-struct mad_jack {
- struct snd_jack jack;
- int jack_status;
- int jack_dev_state;
- struct timeval buttonpressed;
- struct timeval buttonreleased;
-};
-
-struct mad_jack_msg_wq {
- u8 intsts;
- struct snd_intelmad *intelmaddata;
- struct work_struct wq;
-
-};
-
-struct snd_intelmad_probe_info {
- unsigned int cpu_id;
- unsigned int irq_cache;
- unsigned int size;
-};
-
-/**
- * struct snd_intelmad - intelmad driver structure
- *
- * @card: ptr to the card details
- * @card_index: sound card index
- * @card_id: sound card id detected
- * @sstdrv_ops: ptr to sst driver ops
- * @pdev: ptr to platform device
- * @irq: interrupt number detected
- * @pmic_status: Device status of sound card
- * @int_base: ptr to MMIO interrupt region
- * @output_sel: device selected as o/p
- * @input_sel: device selected as i/p
- * @master_mute: master mute status
- * @jack: jack status
- * @playback_cnt: active pb streams
- * @capture_cnt: active cp streams
- * @mad_jack_msg: wq struct for jack interrupt processing
- * @mad_jack_wq: wq for jack interrupt processing
- * @jack_prev_state: Previos state of jack detected
- * @cpu_id: current cpu id loaded for
- */
-struct snd_intelmad {
- struct snd_card *card; /* ptr to the card details */
- int card_index;/* card index */
- char *card_id; /* card id */
- struct intel_sst_card_ops *sstdrv_ops;/* ptr to sst driver ops */
- struct platform_device *pdev;
- int irq;
- int pmic_status;
- void __iomem *int_base;
- int output_sel;
- int input_sel;
- int lineout_sel;
- int master_mute;
- struct mad_jack jack[4];
- int playback_cnt;
- int capture_cnt;
- u16 adc_address;
- struct mad_jack_msg_wq mad_jack_msg;
- struct workqueue_struct *mad_jack_wq;
- u8 jack_prev_state;
- unsigned int cpu_id;
-};
-
-struct snd_control_val {
- int playback_vol_max;
- int playback_vol_min;
- int capture_vol_max;
- int capture_vol_min;
- int master_vol_max;
- int master_vol_min;
-};
-
-struct mad_stream_pvt {
- int stream_status;
- int stream_ops;
- struct snd_pcm_substream *substream;
- struct pcm_stream_info stream_info;
- ssize_t dbg_cum_bytes;
- enum snd_sst_device_type device;
-};
-
-enum mad_drv_status {
- INIT = 1,
- STARTED,
- RUNNING,
- PAUSED,
- DROPPED,
-};
-
-enum mad_pmic_status {
- PMIC_UNINIT = 1,
- PMIC_INIT,
-};
-enum _widget_ctrl {
- OUTPUT_SEL = 1,
- INPUT_SEL,
- PLAYBACK_VOL,
- PLAYBACK_MUTE,
- CAPTURE_VOL,
- CAPTURE_MUTE,
- MASTER_VOL,
- MASTER_MUTE
-};
-enum _widget_ctrl_mfld {
- LINEOUT_SEL_MFLD = 3,
-};
-enum hw_chs {
- HW_CH0 = 0,
- HW_CH1,
- HW_CH2,
- HW_CH3
-};
-
-void period_elapsed(void *mad_substream);
-int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream);
-int snd_intelmad_init_stream(struct snd_pcm_substream *substream);
-
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val);
-#define CPU_CHIP_LINCROFT 1 /* System running lincroft */
-#define CPU_CHIP_PENWELL 2 /* System running penwell */
-
-extern struct snd_control_val intelmad_ctrl_val[];
-extern struct snd_kcontrol_new snd_intelmad_controls_mrst[];
-extern struct snd_kcontrol_new snd_intelmad_controls_mfld[];
-extern struct snd_pmic_ops *intelmad_vendor_ops[];
-void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent , int status);
-
-#endif /* __INTELMID_H */
diff --git a/drivers/staging/intel_sst/intelmid_adc_control.h b/drivers/staging/intel_sst/intelmid_adc_control.h
deleted file mode 100644
index 65d5c3988762..000000000000
--- a/drivers/staging/intel_sst/intelmid_adc_control.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef __INTELMID_ADC_CONTROL_H__
-#define __INTELMID_ADC_CONTROL_H_
-/*
- * intelmid_adc_control.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: R Durgadadoss <r.durgadoss@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Common private ADC declarations for SST
- */
-
-
-#define MSIC_ADC1CNTL1 0x1C0
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
-
-#define MSIC_ADC1CNTL3 0x1C2
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-
-#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - 1)
-
-/* ADC channel code values */
-#define AUDIO_DETECT_CODE 0x06
-
-/* ADC base addresses */
-#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
-
-
-/**
- * configure_adc - enables/disables the ADC for conversion
- * @val: zero: disables the ADC non-zero:enables the ADC
- *
- * Enable/Disable the ADC depending on the argument
- *
- * Can sleep
- */
-static inline int configure_adc(int val)
-{
- int ret;
- struct sc_reg_access sc_access = {0,};
-
-
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if (val)
- /* Enable and start the ADC */
- sc_access.value |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- else
- /* Just stop the ADC */
- sc_access.value &= (~MSIC_ADC_START);
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- return sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
-}
-
-/**
- * reset_stopbit - sets the stop bit to 0 on the given channel
- * @addr: address of the channel
- *
- * Can sleep
- */
-static inline int reset_stopbit(uint16_t addr)
-{
- int ret;
- struct sc_reg_access sc_access = {0,};
- sc_access.reg_addr = addr;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- sc_access.reg_addr = addr;
- sc_access.value = (sc_access.value) & 0xEF;
- return sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
-}
-
-/**
- * find_free_channel - finds an empty channel for conversion
- *
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- */
-static inline int find_free_channel(void)
-{
- int ret;
- int i;
-
- struct sc_reg_access sc_access = {0,};
-
- /* check whether ADC is enabled */
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if ((sc_access.value & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
-
- sc_access.reg_addr = ADC_CHNL_START_ADDR + i;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if (sc_access.value & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
-}
-
-/**
- * mid_initialize_adc - initializing the ADC
- * @dev: our device structure
- *
- * Initialize the ADC for reading thermistor values. Can sleep.
- */
-static inline int mid_initialize_adc(void)
-{
- int base_addr, chnl_addr;
- int ret;
- static int channel_index;
- struct sc_reg_access sc_access = {0,};
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- pr_err("No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- sc_access.reg_addr = base_addr;
- sc_access.value = AUDIO_DETECT_CODE | 0x10;
- ret = sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
- if (ret) {
- pr_err("unable to enable ADC");
- return ret;
- }
-
- chnl_addr = ADC_DATA_START_ADDR + 2 * channel_index;
- pr_debug("mid_initialize : %x", chnl_addr);
- configure_adc(1);
- return chnl_addr;
-}
-#endif
-
diff --git a/drivers/staging/intel_sst/intelmid_ctrl.c b/drivers/staging/intel_sst/intelmid_ctrl.c
deleted file mode 100644
index 19ec474b362a..000000000000
--- a/drivers/staging/intel_sst/intelmid_ctrl.c
+++ /dev/null
@@ -1,921 +0,0 @@
-/*
- * intelmid_ctrl.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver handling mixer controls for Intel MAD chipset
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <sound/core.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-#define HW_CH_BASE 4
-
-
-#define HW_CH_0 "Hw1"
-#define HW_CH_1 "Hw2"
-#define HW_CH_2 "Hw3"
-#define HW_CH_3 "Hw4"
-
-static char *router_dmics[] = { "DMIC1",
- "DMIC2",
- "DMIC3",
- "DMIC4",
- "DMIC5",
- "DMIC6"
- };
-
-static char *out_names_mrst[] = {"Headphones",
- "Internal speakers"};
-static char *in_names_mrst[] = {"AMIC",
- "DMIC",
- "HS_MIC"};
-static char *line_out_names_mfld[] = {"Headset",
- "IHF ",
- "Vibra1 ",
- "Vibra2 ",
- "NONE "};
-static char *out_names_mfld[] = {"Headset ",
- "EarPiece "};
-static char *in_names_mfld[] = {"AMIC",
- "DMIC"};
-
-struct snd_control_val intelmad_ctrl_val[MAX_VENDORS] = {
- {
- .playback_vol_max = 63,
- .playback_vol_min = 0,
- .capture_vol_max = 63,
- .capture_vol_min = 0,
- },
- {
- .playback_vol_max = 0,
- .playback_vol_min = -31,
- .capture_vol_max = 0,
- .capture_vol_min = -20,
- },
- {
- .playback_vol_max = 0,
- .playback_vol_min = -31,
- .capture_vol_max = 0,
- .capture_vol_min = -31,
- .master_vol_max = 0,
- .master_vol_min = -126,
- },
-};
-
-/* control path functionalities */
-
-static inline int snd_intelmad_volume_info(struct snd_ctl_elem_info *uinfo,
- int control_type, int max, int min)
-{
- WARN_ON(!uinfo);
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = control_type;
- uinfo->value.integer.min = min;
- uinfo->value.integer.max = max;
- return 0;
-}
-
-/**
-* snd_intelmad_mute_info - provides information about the mute controls
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_mute_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- WARN_ON(!uinfo);
- WARN_ON(!kcontrol);
-
- /* set up the mute as a boolean mono control with min-max values */
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- uinfo->count = MONO_CNTL;
- uinfo->value.integer.min = MIN_MUTE;
- uinfo->value.integer.max = MAX_MUTE;
- return 0;
-}
-
-/**
-* snd_intelmad_capture_volume_info - provides info about the volume control
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_capture_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, MONO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].capture_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].capture_vol_min);
- return 0;
-}
-
-/**
-* snd_intelmad_playback_volume_info - provides info about the volume control
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_playback_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, STEREO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].playback_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].playback_vol_min);
- return 0;
-}
-
-static int snd_intelmad_master_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, STEREO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].master_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].master_vol_min);
- return 0;
-}
-
-/**
-* snd_intelmad_device_info_mrst - provides information about the devices available
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the devices's info need
-* to be filled
-*
-* This function is called when a mixer application requests for device's info
-*/
-static int snd_intelmad_device_info_mrst(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
-
- WARN_ON(!kcontrol);
- WARN_ON(!uinfo);
-
- /* setup device select as drop down controls with different values */
- if (kcontrol->id.numid == OUTPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mrst);
- else
- uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mrst);
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = 1;
- if (kcontrol->id.numid == OUTPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- out_names_mrst[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else
- strncpy(uinfo->value.enumerated.name,
- in_names_mrst[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- return 0;
-}
-
-static int snd_intelmad_device_info_mfld(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_pmic_ops *scard_ops;
- struct snd_intelmad *intelmaddata;
-
- WARN_ON(!kcontrol);
- WARN_ON(!uinfo);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- /* setup device select as drop down controls with different values */
- if (kcontrol->id.numid == OUTPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mfld);
- else if (kcontrol->id.numid == INPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mfld);
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD) {
- uinfo->value.enumerated.items = ARRAY_SIZE(line_out_names_mfld);
- scard_ops->line_out_names_cnt = uinfo->value.enumerated.items;
- } else
- return -EINVAL;
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = 1;
- if (kcontrol->id.numid == OUTPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- out_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else if (kcontrol->id.numid == INPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- in_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD)
- strncpy(uinfo->value.enumerated.name,
- line_out_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else
- return -EINVAL;
- return 0;
-}
-
-/**
-* snd_intelmad_volume_get - gets the current volume for the control
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- int ret_val = 0, cntl_list[2] = {0,};
- int value = 0;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("snd_intelmad_volume_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_VOL:
- cntl_list[0] = PMIC_SND_RIGHT_PB_VOL;
- cntl_list[1] = PMIC_SND_LEFT_PB_VOL;
- break;
-
- case CAPTURE_VOL:
- cntl_list[0] = PMIC_SND_CAPTURE_VOL;
- break;
-
- case MASTER_VOL:
- cntl_list[0] = PMIC_SND_RIGHT_MASTER_VOL;
- cntl_list[1] = PMIC_SND_LEFT_MASTER_VOL;
- break;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->get_vol(cntl_list[0], &value);
- uval->value.integer.value[0] = value;
-
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_VOL ||
- kcontrol->id.numid == MASTER_VOL) {
- ret_val = scard_ops->get_vol(cntl_list[1], &value);
- uval->value.integer.value[1] = value;
- }
- return ret_val;
-}
-
-/**
-* snd_intelmad_mute_get - gets the current mute status for the control
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_mute_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
-
- int cntl_list = 0, ret_val = 0;
- u8 value = 0;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("Mute_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_MUTE:
- if (intelmaddata->output_sel == STEREO_HEADPHONE)
- cntl_list = PMIC_SND_LEFT_HP_MUTE;
- else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
- (intelmaddata->output_sel == MONO_EARPIECE))
- cntl_list = PMIC_SND_LEFT_SPEAKER_MUTE;
- break;
-
- case CAPTURE_MUTE:
- if (intelmaddata->input_sel == DMIC)
- cntl_list = PMIC_SND_DMIC_MUTE;
- else if (intelmaddata->input_sel == AMIC)
- cntl_list = PMIC_SND_AMIC_MUTE;
- else if (intelmaddata->input_sel == HS_MIC)
- cntl_list = PMIC_SND_HP_MIC_MUTE;
- break;
- case MASTER_MUTE:
- uval->value.integer.value[0] = intelmaddata->master_mute;
- return 0;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->get_mute(cntl_list, &value);
- uval->value.integer.value[0] = value;
- return ret_val;
-}
-
-/**
-* snd_intelmad_volume_set - sets the volume control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_volume_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
-
- int ret_val, cntl_list[2] = {0,};
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("volume set called:%ld %ld\n",
- uval->value.integer.value[0],
- uval->value.integer.value[1]);
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_VOL:
- cntl_list[0] = PMIC_SND_LEFT_PB_VOL;
- cntl_list[1] = PMIC_SND_RIGHT_PB_VOL;
- break;
-
- case CAPTURE_VOL:
- cntl_list[0] = PMIC_SND_CAPTURE_VOL;
- break;
-
- case MASTER_VOL:
- cntl_list[0] = PMIC_SND_LEFT_MASTER_VOL;
- cntl_list[1] = PMIC_SND_RIGHT_MASTER_VOL;
- break;
-
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->set_vol(cntl_list[0],
- uval->value.integer.value[0]);
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_VOL ||
- kcontrol->id.numid == MASTER_VOL)
- ret_val = scard_ops->set_vol(cntl_list[1],
- uval->value.integer.value[1]);
- return ret_val;
-}
-
-/**
-* snd_intelmad_mute_set - sets the mute control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_mute_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- int cntl_list[2] = {0,}, ret_val;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("snd_intelmad_mute_set called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- kcontrol->private_value = uval->value.integer.value[0];
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_MUTE:
- if (intelmaddata->output_sel == STEREO_HEADPHONE) {
- cntl_list[0] = PMIC_SND_LEFT_HP_MUTE;
- cntl_list[1] = PMIC_SND_RIGHT_HP_MUTE;
- } else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
- (intelmaddata->output_sel == MONO_EARPIECE)) {
- cntl_list[0] = PMIC_SND_LEFT_SPEAKER_MUTE;
- cntl_list[1] = PMIC_SND_RIGHT_SPEAKER_MUTE;
- }
- break;
-
- case CAPTURE_MUTE:/*based on sel device mute the i/p dev*/
- if (intelmaddata->input_sel == DMIC)
- cntl_list[0] = PMIC_SND_DMIC_MUTE;
- else if (intelmaddata->input_sel == AMIC)
- cntl_list[0] = PMIC_SND_AMIC_MUTE;
- else if (intelmaddata->input_sel == HS_MIC)
- cntl_list[0] = PMIC_SND_HP_MIC_MUTE;
- break;
- case MASTER_MUTE:
- cntl_list[0] = PMIC_SND_MUTE_ALL;
- intelmaddata->master_mute = uval->value.integer.value[0];
- break;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->set_mute(cntl_list[0],
- uval->value.integer.value[0]);
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_MUTE)
- ret_val = scard_ops->set_mute(cntl_list[1],
- uval->value.integer.value[0]);
- return ret_val;
-}
-
-/**
-* snd_intelmad_device_get - get the device select control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_device_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- pr_debug("device_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- if (kcontrol->id.numid == OUTPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->output_dev_id;
- else if (kcontrol->id.numid == INPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->input_dev_id;
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD)
- uval->value.enumerated.item[0] =
- scard_ops->lineout_dev_id;
- else
- return -EINVAL;
- } else if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- if (kcontrol->id.numid == OUTPUT_SEL)
- /* There is a mismatch here.
- * ALSA expects 1 for internal speaker.
- * But internally, we may give 2 for internal speaker.
- */
- if (scard_ops->output_dev_id == MONO_EARPIECE ||
- scard_ops->output_dev_id == INTERNAL_SPKR)
- uval->value.enumerated.item[0] = MONO_EARPIECE;
- else if (scard_ops->output_dev_id == STEREO_HEADPHONE)
- uval->value.enumerated.item[0] =
- STEREO_HEADPHONE;
- else
- return -EINVAL;
- else if (kcontrol->id.numid == INPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->input_dev_id;
- else
- return -EINVAL;
- } else
- uval->value.enumerated.item[0] = kcontrol->private_value;
- return 0;
-}
-
-/**
-* snd_intelmad_device_set - set the device select control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- int ret_val = 0, vendor, status;
- struct intel_sst_pcm_control *pcm_control;
-
- pr_debug("snd_intelmad_device_set called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
- status = -1;
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- /* store value with driver */
- kcontrol->private_value = uval->value.enumerated.item[0];
-
- switch (kcontrol->id.numid) {
- case OUTPUT_SEL:
- ret_val = scard_ops->set_output_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->output_sel = uval->value.enumerated.item[0];
- break;
- case INPUT_SEL:
- vendor = intelmaddata->sstdrv_ops->vendor_id;
- if ((vendor == SND_MX) || (vendor == SND_FS)) {
- pcm_control = intelmaddata->sstdrv_ops->pcm_control;
- if (uval->value.enumerated.item[0] == HS_MIC)
- status = 1;
- else
- status = 0;
- pcm_control->device_control(
- SST_ENABLE_RX_TIME_SLOT, &status);
- }
- ret_val = scard_ops->set_input_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->input_sel = uval->value.enumerated.item[0];
- break;
- case LINEOUT_SEL_MFLD:
- ret_val = scard_ops->set_lineout_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->lineout_sel = uval->value.enumerated.item[0];
- break;
- default:
- return -EINVAL;
- }
- kcontrol->private_value = uval->value.enumerated.item[0];
- return ret_val;
-}
-
-static int snd_intelmad_device_dmic_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- if (scard_ops->input_dev_id != DMIC) {
- pr_debug("input dev = 0x%x\n", scard_ops->input_dev_id);
- return 0;
- }
-
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
- uval->value.enumerated.item[0] = kcontrol->private_value;
- else
- pr_debug(" CPU id = 0x%xis invalid.\n",
- intelmaddata->cpu_id);
- return 0;
-}
-
-void msic_set_bit(u8 index, unsigned int *available_dmics)
-{
- *available_dmics |= (1 << index);
-}
-
-void msic_clear_bit(u8 index, unsigned int *available_dmics)
-{
- *available_dmics &= ~(1 << index);
-}
-
-int msic_is_set_bit(u8 index, unsigned int *available_dmics)
-{
- int ret_val;
-
- ret_val = (*available_dmics & (1 << index));
- return ret_val;
-}
-
-static int snd_intelmad_device_dmic_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- int i, dmic_index;
- unsigned int available_dmics;
- int jump_count;
- int max_dmics = ARRAY_SIZE(router_dmics);
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- WARN_ON(!scard_ops);
-
- if (scard_ops->input_dev_id != DMIC) {
- pr_debug("input dev = 0x%x\n", scard_ops->input_dev_id);
- return 0;
- }
-
- available_dmics = scard_ops->available_dmics;
-
- if (kcontrol->private_value > uval->value.enumerated.item[0]) {
- pr_debug("jump count -1.\n");
- jump_count = -1;
- } else {
- pr_debug("jump count 1.\n");
- jump_count = 1;
- }
-
- dmic_index = uval->value.enumerated.item[0];
- pr_debug("set function. dmic_index = %d, avl_dmic = 0x%x\n",
- dmic_index, available_dmics);
- for (i = 0; i < max_dmics; i++) {
- pr_debug("set function. loop index = 0x%x. dmic_index = 0x%x\n",
- i, dmic_index);
- if (!msic_is_set_bit(dmic_index, &available_dmics)) {
- msic_clear_bit(kcontrol->private_value,
- &available_dmics);
- msic_set_bit(dmic_index, &available_dmics);
- kcontrol->private_value = dmic_index;
- scard_ops->available_dmics = available_dmics;
- scard_ops->hw_dmic_map[kcontrol->id.numid-HW_CH_BASE] =
- kcontrol->private_value;
- scard_ops->set_hw_dmic_route
- (kcontrol->id.numid-HW_CH_BASE);
- return 0;
- }
-
- dmic_index += jump_count;
-
- if (dmic_index > (max_dmics - 1) && jump_count == 1) {
- pr_debug("Resettingthe dmic index to 0.\n");
- dmic_index = 0;
- } else if (dmic_index == -1 && jump_count == -1) {
- pr_debug("Resetting the dmic index to 5.\n");
- dmic_index = max_dmics - 1;
- }
- }
-
- return -EINVAL;
-}
-
-static int snd_intelmad_device_dmic_info_mfld(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->value.enumerated.items = ARRAY_SIZE(router_dmics);
-
- intelmaddata = kcontrol->private_data;
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- WARN_ON(!scard_ops);
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strncpy(uinfo->value.enumerated.name,
- router_dmics[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
-
-
- msic_set_bit(kcontrol->private_value, &scard_ops->available_dmics);
- pr_debug("info function. avl_dmic = 0x%x",
- scard_ops->available_dmics);
-
- scard_ops->hw_dmic_map[kcontrol->id.numid-HW_CH_BASE] =
- kcontrol->private_value;
-
- return 0;
-}
-
-struct snd_kcontrol_new snd_intelmad_controls_mrst[MAX_CTRL] __devinitdata = {
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mrst,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mrst,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_playback_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_capture_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_master_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-};
-
-struct snd_kcontrol_new
-snd_intelmad_controls_mfld[MAX_CTRL_MFLD] __devinitdata = {
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Line out",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 0
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_1,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 1
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_2,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 2
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_3,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 3
-}
-};
-
diff --git a/drivers/staging/intel_sst/intelmid_msic_control.c b/drivers/staging/intel_sst/intelmid_msic_control.c
deleted file mode 100644
index 70cdb1697815..000000000000
--- a/drivers/staging/intel_sst/intelmid_msic_control.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * intelmid_vm_control.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2010 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of msic vendors
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/file.h>
-#include <linux/delay.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include <linux/input.h>
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-#define AUDIOMUX12 0x24c
-#define AUDIOMUX34 0x24d
-
-static int msic_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- /* dmic configuration */
- {0x241, 0x85, 0},
- {0x242, 0x02, 0},
- /* audio paths config */
- {0x24C, 0x10, 0},
- {0x24D, 0x32, 0},
- /* PCM2 interface slots */
- /* preconfigured slots for 0-5 both tx, rx */
- {0x272, 0x10, 0},
- {0x273, 0x32, 0},
- {0x274, 0xFF, 0},
- {0x275, 0x10, 0},
- {0x276, 0x32, 0},
- {0x277, 0x54, 0},
- /*Sinc5 decimator*/
- {0x24E, 0x28, 0},
- /*TI vibra w/a settings*/
- {0x384, 0x80, 0},
- {0x385, 0x80, 0},
- {0x267, 0x00, 0},
- {0x261, 0x00, 0},
- /* pcm port setting */
- {0x278, 0x00, 0},
- {0x27B, 0x01, 0},
- {0x27C, 0x0a, 0},
- /* Set vol HSLRVOLCTRL, IHFVOL */
- {0x259, 0x08, 0},
- {0x25A, 0x08, 0},
- {0x25B, 0x08, 0},
- {0x25C, 0x08, 0},
- /* HSEPRXCTRL Enable the headset left and right FIR filters */
- {0x250, 0x30, 0},
- /* HSMIXER */
- {0x256, 0x11, 0},
- /* amic configuration */
- {0x249, 0x01, 0x0},
- {0x24A, 0x01, 0x0},
- /* unmask ocaudio/accdet interrupts */
- {0x1d, 0x00, 0x00},
- {0x1e, 0x00, 0x00},
- };
- snd_msic_ops.card_status = SND_CARD_INIT_DONE;
- sst_sc_reg_access(sc_access, PMIC_WRITE, 28);
- snd_msic_ops.pb_on = 0;
- snd_msic_ops.pbhs_on = 0;
- snd_msic_ops.cap_on = 0;
- snd_msic_ops.input_dev_id = DMIC; /*def dev*/
- snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
- snd_msic_ops.jack_interrupt_status = false;
- pr_debug("msic init complete!!\n");
- return 0;
-}
-static int msic_line_out_restore(u8 value)
-{
- struct sc_reg_access hs_drv_en[] = {
- {0x25d, 0x03, 0x03},
- };
- struct sc_reg_access ep_drv_en[] = {
- {0x25d, 0x40, 0x40},
- };
- struct sc_reg_access ihf_drv_en[] = {
- {0x25d, 0x0c, 0x0c},
- };
- struct sc_reg_access vib1_drv_en[] = {
- {0x25d, 0x10, 0x10},
- };
- struct sc_reg_access vib2_drv_en[] = {
- {0x25d, 0x20, 0x20},
- };
- struct sc_reg_access pmode_enable[] = {
- {0x381, 0x10, 0x10},
- };
- int retval = 0;
-
- pr_debug("msic_lineout_restore_lineout_dev:%d\n", value);
-
- switch (value) {
- case HEADSET:
- pr_debug("Selecting Lineout-HEADSET-restore\n");
- if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE)
- retval = sst_sc_reg_access(hs_drv_en,
- PMIC_READ_MODIFY, 1);
- else
- retval = sst_sc_reg_access(ep_drv_en,
- PMIC_READ_MODIFY, 1);
- break;
- case IHF:
- pr_debug("Selecting Lineout-IHF-restore\n");
- retval = sst_sc_reg_access(ihf_drv_en, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_enable, PMIC_READ_MODIFY, 1);
- break;
- case VIBRA1:
- pr_debug("Selecting Lineout-Vibra1-restore\n");
- retval = sst_sc_reg_access(vib1_drv_en, PMIC_READ_MODIFY, 1);
- break;
- case VIBRA2:
- pr_debug("Selecting Lineout-VIBRA2-restore\n");
- retval = sst_sc_reg_access(vib2_drv_en, PMIC_READ_MODIFY, 1);
- break;
- case NONE:
- pr_debug("Selecting Lineout-NONE-restore\n");
- break;
- default:
- return -EINVAL;
- }
- return retval;
-}
-static int msic_get_lineout_prvstate(void)
-{
- struct sc_reg_access hs_ihf_drv[2] = {
- {0x257, 0x0, 0x0},
- {0x25d, 0x0, 0x0},
- };
- struct sc_reg_access vib1drv[2] = {
- {0x264, 0x0, 0x0},
- {0x25D, 0x0, 0x0},
- };
- struct sc_reg_access vib2drv[2] = {
- {0x26A, 0x0, 0x0},
- {0x25D, 0x0, 0x0},
- };
- int retval = 0, drv_en, dac_en, dev_id, mask;
- for (dev_id = 0; dev_id < snd_msic_ops.line_out_names_cnt; dev_id++) {
- switch (dev_id) {
- case HEADSET:
- pr_debug("msic_get_lineout_prvs_state: HEADSET\n");
- sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
-
- mask = (MASK0|MASK1);
- dac_en = (hs_ihf_drv[0].value) & mask;
-
- mask = ((MASK0|MASK1)|MASK6);
- drv_en = (hs_ihf_drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = HEADSET;
- return retval;
- }
- break;
- case IHF:
- pr_debug("msic_get_lineout_prvstate: IHF\n");
- sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
-
- mask = (MASK2 | MASK3);
- dac_en = (hs_ihf_drv[0].value) & mask;
-
- mask = (MASK2 | MASK3);
- drv_en = (hs_ihf_drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = IHF;
- return retval;
- }
- break;
- case VIBRA1:
- pr_debug("msic_get_lineout_prvstate: vibra1\n");
- sst_sc_reg_access(vib1drv, PMIC_READ, 2);
-
- mask = MASK1;
- dac_en = (vib1drv[0].value) & mask;
-
- mask = MASK4;
- drv_en = (vib1drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = VIBRA1;
- return retval;
- }
- break;
- case VIBRA2:
- pr_debug("msic_get_lineout_prvstate: vibra2\n");
- sst_sc_reg_access(vib2drv, PMIC_READ, 2);
-
- mask = MASK1;
- dac_en = (vib2drv[0].value) & mask;
-
- mask = MASK5;
- drv_en = ((vib2drv[1].value) & mask);
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = VIBRA2;
- return retval;
- }
- break;
- case NONE:
- pr_debug("msic_get_lineout_prvstate: NONE\n");
- snd_msic_ops.prev_lineout_dev_id = NONE;
- return retval;
- default:
- pr_debug("Invalid device id\n");
- snd_msic_ops.prev_lineout_dev_id = NONE;
- return -EINVAL;
- }
- }
- return retval;
-}
-static int msic_set_selected_lineout_dev(u8 value)
-{
- struct sc_reg_access lout_hs[] = {
- {0x25e, 0x33, 0xFF},
- {0x25d, 0x0, 0x43},
- };
- struct sc_reg_access lout_ihf[] = {
- {0x25e, 0x55, 0xff},
- {0x25d, 0x0, 0x0c},
- };
- struct sc_reg_access lout_vibra1[] = {
-
- {0x25e, 0x61, 0xff},
- {0x25d, 0x0, 0x10},
- };
- struct sc_reg_access lout_vibra2[] = {
-
- {0x25e, 0x16, 0xff},
- {0x25d, 0x0, 0x20},
- };
- struct sc_reg_access lout_def[] = {
- {0x25e, 0x66, 0x0},
- };
- struct sc_reg_access pmode_disable[] = {
- {0x381, 0x00, 0x10},
- };
- struct sc_reg_access pmode_enable[] = {
- {0x381, 0x10, 0x10},
- };
- int retval = 0;
-
- pr_debug("msic_set_selected_lineout_dev:%d\n", value);
- msic_get_lineout_prvstate();
- msic_line_out_restore(snd_msic_ops.prev_lineout_dev_id);
- snd_msic_ops.lineout_dev_id = value;
-
- switch (value) {
- case HEADSET:
- pr_debug("Selecting Lineout-HEADSET\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_hs,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case IHF:
- pr_debug("Selecting Lineout-IHF\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_ihf,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_enable,
- PMIC_READ_MODIFY, 1);
- break;
- case VIBRA1:
- pr_debug("Selecting Lineout-Vibra1\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_vibra1,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case VIBRA2:
- pr_debug("Selecting Lineout-VIBRA2\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_vibra2,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case NONE:
- pr_debug("Selecting Lineout-NONE\n");
- retval = sst_sc_reg_access(lout_def,
- PMIC_WRITE, 1);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- default:
- return -EINVAL;
- }
- return retval;
-}
-
-
-static int msic_power_up_pb(unsigned int device)
-{
- struct sc_reg_access vaud[] = {
- /* turn on the audio power supplies */
- {0x0DB, 0x07, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn on PLL */
- {0x240, 0x20, 0},
- };
- struct sc_reg_access vhs[] = {
- /* VHSP */
- {0x0DC, 0x3D, 0},
- /* VHSN */
- {0x0DD, 0x3F, 0},
- };
- struct sc_reg_access hsdac[] = {
- {0x382, 0x40, 0x40},
- /* disable driver */
- {0x25D, 0x0, 0x43},
- /* DAC CONFIG ; both HP, LP on */
- {0x257, 0x03, 0x03},
- };
- struct sc_reg_access hs_filter[] = {
- /* HSEPRXCTRL Enable the headset left and right FIR filters */
- {0x250, 0x30, 0},
- /* HSMIXER */
- {0x256, 0x11, 0},
- };
- struct sc_reg_access hs_enable[] = {
- /* enable driver */
- {0x25D, 0x3, 0x3},
- {0x26C, 0x0, 0x2},
- /* unmute the headset */
- { 0x259, 0x80, 0x80},
- { 0x25A, 0x80, 0x80},
- };
- struct sc_reg_access vihf[] = {
- /* VIHF ON */
- {0x0C9, 0x27, 0x00},
- };
- struct sc_reg_access ihf_filter[] = {
- /* disable driver */
- {0x25D, 0x00, 0x0C},
- /*Filer DAC enable*/
- {0x251, 0x03, 0x03},
- {0x257, 0x0C, 0x0C},
- };
- struct sc_reg_access ihf_en[] = {
- /*enable drv*/
- {0x25D, 0x0C, 0x0c},
- };
- struct sc_reg_access ihf_unmute[] = {
- /*unmute headset*/
- {0x25B, 0x80, 0x80},
- {0x25C, 0x80, 0x80},
- };
- struct sc_reg_access epdac[] = {
- /* disable driver */
- {0x25D, 0x0, 0x43},
- /* DAC CONFIG ; both HP, LP on */
- {0x257, 0x03, 0x03},
- };
- struct sc_reg_access ep_enable[] = {
- /* enable driver */
- {0x25D, 0x40, 0x40},
- /* unmute the headset */
- { 0x259, 0x80, 0x80},
- { 0x25A, 0x80, 0x80},
- };
- struct sc_reg_access vib1_en[] = {
- /* enable driver, ADC */
- {0x25D, 0x10, 0x10},
- {0x264, 0x02, 0x82},
- };
- struct sc_reg_access vib2_en[] = {
- /* enable driver, ADC */
- {0x25D, 0x20, 0x20},
- {0x26A, 0x02, 0x82},
- };
- struct sc_reg_access pcm2_en[] = {
- /* enable pcm 2 */
- {0x27C, 0x1, 0x1},
- };
- int retval = 0;
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("powering up pb.... Device %d\n", device);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- msleep(1);
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- switch (device) {
- case SND_SST_DEVICE_HEADSET:
- snd_msic_ops.pb_on = 1;
- snd_msic_ops.pbhs_on = 1;
- if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE) {
- sst_sc_reg_access(vhs, PMIC_WRITE, 2);
- sst_sc_reg_access(hsdac, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 4);
- } else {
- sst_sc_reg_access(epdac, PMIC_READ_MODIFY, 2);
- sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
- sst_sc_reg_access(ep_enable, PMIC_READ_MODIFY, 3);
- }
- if (snd_msic_ops.lineout_dev_id == HEADSET)
- msic_set_selected_lineout_dev(HEADSET);
- break;
- case SND_SST_DEVICE_IHF:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vihf, PMIC_WRITE, 1);
- sst_sc_reg_access(ihf_filter, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(ihf_en, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(ihf_unmute, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == IHF)
- msic_set_selected_lineout_dev(IHF);
- break;
-
- case SND_SST_DEVICE_VIBRA:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vib1_en, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == VIBRA1)
- msic_set_selected_lineout_dev(VIBRA1);
- break;
-
- case SND_SST_DEVICE_HAPTIC:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vib2_en, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == VIBRA2)
- msic_set_selected_lineout_dev(VIBRA2);
- break;
-
- default:
- pr_warn("Wrong Device %d, selected %d\n",
- device, snd_msic_ops.output_dev_id);
- }
- return sst_sc_reg_access(pcm2_en, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_power_up_cp(unsigned int device)
-{
- struct sc_reg_access vaud[] = {
- /* turn on the audio power supplies */
- {0x0DB, 0x07, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn on PLL */
- {0x240, 0x20, 0},
- };
- struct sc_reg_access dmic_bias[] = {
- /* Turn on AMIC supply */
- {0x247, 0xA0, 0xA0},
- };
- struct sc_reg_access dmic[] = {
- /* mic demux enable */
- {0x245, 0x3F, 0x3F},
- {0x246, 0x07, 0x07},
-
- };
- struct sc_reg_access amic_bias[] = {
- /* Turn on AMIC supply */
- {0x247, 0xFC, 0xFC},
- };
- struct sc_reg_access amic[] = {
- /*MIC EN*/
- {0x249, 0x01, 0x01},
- {0x24A, 0x01, 0x01},
- /*ADC EN*/
- {0x248, 0x05, 0x0F},
-
- };
- struct sc_reg_access pcm2[] = {
- /* enable pcm 2 */
- {0x27C, 0x1, 0x1},
- };
- struct sc_reg_access tx_on[] = {
- /*wait for mic to stabalize before turning on audio channels*/
- {0x24F, 0x3C, 0x0},
- };
- int retval = 0;
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("powering up cp....%d\n", snd_msic_ops.input_dev_id);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- msleep(500);/*FIXME need optimzed value here*/
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- snd_msic_ops.cap_on = 1;
- if (snd_msic_ops.input_dev_id == AMIC) {
- sst_sc_reg_access(amic_bias, PMIC_READ_MODIFY, 1);
- msleep(1);
- sst_sc_reg_access(amic, PMIC_READ_MODIFY, 3);
- } else {
- sst_sc_reg_access(dmic_bias, PMIC_READ_MODIFY, 1);
- msleep(1);
- sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 2);
- }
- msleep(1);
- sst_sc_reg_access(tx_on, PMIC_WRITE, 1);
- return sst_sc_reg_access(pcm2, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_power_down(void)
-{
- struct sc_reg_access power_dn[] = {
- /* VHSP */
- {0x0DC, 0xC4, 0},
- /* VHSN */
- {0x0DD, 0x04, 0},
- /* VIHF */
- {0x0C9, 0x24, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn off PLL*/
- {0x240, 0x00, 0x0},
- };
- struct sc_reg_access vaud[] = {
- /* turn off VAUD*/
- {0x0DB, 0x04, 0},
- };
-
- pr_debug("powering dn msic\n");
- snd_msic_ops.pbhs_on = 0;
- snd_msic_ops.pb_on = 0;
- snd_msic_ops.cap_on = 0;
- sst_sc_reg_access(power_dn, PMIC_WRITE, 3);
- msleep(1);
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- return 0;
-}
-
-static int msic_power_down_pb(unsigned int device)
-{
- struct sc_reg_access drv_enable[] = {
- {0x25D, 0x00, 0x00},
- };
- struct sc_reg_access hs_mute[] = {
- {0x259, 0x80, 0x80},
- {0x25A, 0x80, 0x80},
- {0x26C, 0x02, 0x02},
- };
- struct sc_reg_access hs_off[] = {
- {0x257, 0x00, 0x03},
- {0x250, 0x00, 0x30},
- {0x382, 0x00, 0x40},
- };
- struct sc_reg_access ihf_mute[] = {
- {0x25B, 0x80, 0x80},
- {0x25C, 0x80, 0x80},
- };
- struct sc_reg_access ihf_off[] = {
- {0x257, 0x00, 0x0C},
- {0x251, 0x00, 0x03},
- };
- struct sc_reg_access vib1_off[] = {
- {0x264, 0x00, 0x82},
- };
- struct sc_reg_access vib2_off[] = {
- {0x26A, 0x00, 0x82},
- };
- struct sc_reg_access lout_off[] = {
- {0x25e, 0x66, 0x00},
- };
- struct sc_reg_access pmode_disable[] = {
- {0x381, 0x00, 0x10},
- };
-
-
-
- pr_debug("powering dn pb for device %d\n", device);
- switch (device) {
- case SND_SST_DEVICE_HEADSET:
- snd_msic_ops.pbhs_on = 0;
- sst_sc_reg_access(hs_mute, PMIC_READ_MODIFY, 3);
- drv_enable[0].mask = 0x43;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(hs_off, PMIC_READ_MODIFY, 3);
- if (snd_msic_ops.lineout_dev_id == HEADSET)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
-
- case SND_SST_DEVICE_IHF:
- sst_sc_reg_access(ihf_mute, PMIC_READ_MODIFY, 2);
- drv_enable[0].mask = 0x0C;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(ihf_off, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == IHF) {
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- sst_sc_reg_access(pmode_disable, PMIC_READ_MODIFY, 1);
- }
- break;
-
- case SND_SST_DEVICE_VIBRA:
- sst_sc_reg_access(vib1_off, PMIC_READ_MODIFY, 1);
- drv_enable[0].mask = 0x10;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.lineout_dev_id == VIBRA1)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
-
- case SND_SST_DEVICE_HAPTIC:
- sst_sc_reg_access(vib2_off, PMIC_READ_MODIFY, 1);
- drv_enable[0].mask = 0x20;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.lineout_dev_id == VIBRA2)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
- }
- return 0;
-}
-
-static int msic_power_down_cp(unsigned int device)
-{
- struct sc_reg_access dmic[] = {
- {0x247, 0x00, 0xA0},
- {0x245, 0x00, 0x38},
- {0x246, 0x00, 0x07},
- };
- struct sc_reg_access amic[] = {
- {0x248, 0x00, 0x05},
- {0x249, 0x00, 0x01},
- {0x24A, 0x00, 0x01},
- {0x247, 0x00, 0xA3},
- };
- struct sc_reg_access tx_off[] = {
- {0x24F, 0x00, 0x3C},
- };
-
- pr_debug("powering dn cp....\n");
- snd_msic_ops.cap_on = 0;
- sst_sc_reg_access(tx_off, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.input_dev_id == DMIC)
- sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 3);
- else
- sst_sc_reg_access(amic, PMIC_READ_MODIFY, 4);
- return 0;
-}
-
-static int msic_set_selected_output_dev(u8 value)
-{
- int retval = 0;
-
- pr_debug("msic set selected output:%d\n", value);
- snd_msic_ops.output_dev_id = value;
- if (snd_msic_ops.pbhs_on)
- msic_power_up_pb(SND_SST_DEVICE_HEADSET);
- return retval;
-}
-
-static int msic_set_selected_input_dev(u8 value)
-{
-
- struct sc_reg_access sc_access_dmic[] = {
- {0x24C, 0x10, 0x0},
- };
- struct sc_reg_access sc_access_amic[] = {
- {0x24C, 0x76, 0x0},
-
- };
- int retval = 0;
-
- pr_debug("msic_set_selected_input_dev:%d\n", value);
- snd_msic_ops.input_dev_id = value;
- switch (value) {
- case AMIC:
- pr_debug("Selecting AMIC1\n");
- retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
- break;
- case DMIC:
- pr_debug("Selecting DMIC1\n");
- retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
- break;
- default:
- return -EINVAL;
-
- }
- if (snd_msic_ops.cap_on)
- retval = msic_power_up_cp(SND_SST_DEVICE_CAPTURE);
- return retval;
-}
-
-static int msic_set_hw_dmic_route(u8 hw_ch_index)
-{
- struct sc_reg_access sc_access_router;
- int retval = -EINVAL;
-
- switch (hw_ch_index) {
- case HW_CH0:
- sc_access_router.reg_addr = AUDIOMUX12;
- sc_access_router.value = snd_msic_ops.hw_dmic_map[0];
- sc_access_router.mask = (MASK2 | MASK1 | MASK0);
- pr_debug("hw_ch0. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH1:
- sc_access_router.reg_addr = AUDIOMUX12;
- sc_access_router.value = (snd_msic_ops.hw_dmic_map[1]) << 4;
- sc_access_router.mask = (MASK6 | MASK5 | MASK4);
- pr_debug("### hw_ch1. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH2:
- sc_access_router.reg_addr = AUDIOMUX34;
- sc_access_router.value = snd_msic_ops.hw_dmic_map[2];
- sc_access_router.mask = (MASK2 | MASK1 | MASK0);
- pr_debug("hw_ch2. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH3:
- sc_access_router.reg_addr = AUDIOMUX34;
- sc_access_router.value = (snd_msic_ops.hw_dmic_map[3]) << 4;
- sc_access_router.mask = (MASK6 | MASK5 | MASK4);
- pr_debug("hw_ch3. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
- }
-
- return retval;
-}
-
-
-static int msic_set_pcm_voice_params(void)
-{
- return 0;
-}
-
-static int msic_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- return 0;
-}
-
-static int msic_set_audio_port(int status)
-{
- return 0;
-}
-
-static int msic_set_voice_port(int status)
-{
- return 0;
-}
-
-static int msic_set_mute(int dev_id, u8 value)
-{
- return 0;
-}
-
-static int msic_set_vol(int dev_id, int value)
-{
- return 0;
-}
-
-static int msic_get_mute(int dev_id, u8 *value)
-{
- return 0;
-}
-
-static int msic_get_vol(int dev_id, int *value)
-{
- return 0;
-}
-
-static int msic_set_headset_state(int state)
-{
- struct sc_reg_access hs_enable[] = {
- {0x25D, 0x03, 0x03},
- };
-
- if (state)
- /*enable*/
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
- else {
- hs_enable[0].value = 0;
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
- }
- return 0;
-}
-
-static int msic_enable_mic_bias(void)
-{
- struct sc_reg_access jack_interrupt_reg[] = {
- {0x0DB, 0x07, 0x00},
-
- };
- struct sc_reg_access jack_bias_reg[] = {
- {0x247, 0x0C, 0x0C},
- };
-
- sst_sc_reg_access(jack_interrupt_reg, PMIC_WRITE, 1);
- sst_sc_reg_access(jack_bias_reg, PMIC_READ_MODIFY, 1);
- return 0;
-}
-
-static int msic_disable_mic_bias(void)
-{
- if (snd_msic_ops.jack_interrupt_status == true)
- return 0;
- if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
- msic_power_down();
- return 0;
-}
-
-static int msic_disable_jack_btn(void)
-{
- struct sc_reg_access btn_disable[] = {
- {0x26C, 0x00, 0x01}
- };
-
- if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
- msic_power_down();
- snd_msic_ops.jack_interrupt_status = false;
- return sst_sc_reg_access(btn_disable, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_enable_jack_btn(void)
-{
- struct sc_reg_access btn_enable[] = {
- {0x26b, 0x77, 0x00},
- {0x26C, 0x01, 0x00},
- };
- return sst_sc_reg_access(btn_enable, PMIC_WRITE, 2);
-}
-static int msic_convert_adc_to_mvolt(unsigned int mic_bias)
-{
- return (ADC_ONE_LSB_MULTIPLIER * mic_bias) / 1000;
-}
-int msic_get_headset_state(int mic_bias)
-{
- struct sc_reg_access msic_hs_toggle[] = {
- {0x070, 0x00, 0x01},
- };
- if (mic_bias >= 0 && mic_bias < 400) {
-
- pr_debug("Detected Headphone!!!\n");
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
-
- } else if (mic_bias > 400 && mic_bias < 650) {
-
- pr_debug("Detected American headset\n");
- msic_hs_toggle[0].value = 0x01;
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
-
- } else if (mic_bias >= 650 && mic_bias < 2000) {
-
- pr_debug("Detected Headset!!!\n");
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
- /*power on jack and btn*/
- snd_msic_ops.jack_interrupt_status = true;
- msic_enable_jack_btn();
- msic_enable_mic_bias();
- return SND_JACK_HEADSET;
-
- } else
- pr_debug("Detected Open Cable!!!\n");
-
- return SND_JACK_HEADPHONE;
-}
-
-static int msic_get_mic_bias(void *arg)
-{
- struct snd_intelmad *intelmad_drv = (struct snd_intelmad *)arg;
- u16 adc_adr = intelmad_drv->adc_address;
- u16 adc_val;
- int ret;
- struct sc_reg_access adc_ctrl3[2] = {
- {0x1C2, 0x05, 0x0},
- };
-
- struct sc_reg_access audio_adc_reg1 = {0,};
- struct sc_reg_access audio_adc_reg2 = {0,};
-
- msic_enable_mic_bias();
- /* Enable the msic for conversion before reading */
- ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
- if (ret)
- return ret;
- adc_ctrl3[0].value = 0x04;
- /* Re-toggle the RRDATARD bit */
- ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
- if (ret)
- return ret;
-
- audio_adc_reg1.reg_addr = adc_adr;
- /* Read the higher bits of data */
- msleep(1000);
- ret = sst_sc_reg_access(&audio_adc_reg1, PMIC_READ, 1);
- if (ret)
- return ret;
- pr_debug("adc read value %x", audio_adc_reg1.value);
-
- /* Shift bits to accomodate the lower two data bits */
- adc_val = (audio_adc_reg1.value << 2);
- adc_adr++;
- audio_adc_reg2. reg_addr = adc_adr;
- ret = sst_sc_reg_access(&audio_adc_reg2, PMIC_READ, 1);
- if (ret)
- return ret;
- pr_debug("adc read value %x", audio_adc_reg2.value);
-
- /* Adding lower two bits to the higher bits */
- audio_adc_reg2.value &= 03;
- adc_val += audio_adc_reg2.value;
-
- pr_debug("ADC value 0x%x", adc_val);
- msic_disable_mic_bias();
- return adc_val;
-}
-
-static void msic_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- struct snd_intelmad *intelmaddata = cb_data;
- int retval = 0;
-
- pr_debug("value returned = 0x%x\n", intsts);
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return;
- }
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x1) {
- pr_debug("MAD short_push detected\n");
- present = SND_JACK_BTN_0;
- jack_event_flag = buttonpressflag = 1;
- mjack->jack.type = SND_JACK_BTN_0;
- mjack->jack.key[0] = BTN_0 ;
- }
-
- if (intsts & 0x2) {
- pr_debug(":MAD long_push detected\n");
- jack_event_flag = buttonpressflag = 1;
- mjack->jack.type = present = SND_JACK_BTN_1;
- mjack->jack.key[1] = BTN_1;
- }
-
- if (intsts & 0x4) {
- unsigned int mic_bias;
- jack_event_flag = 1;
- buttonpressflag = 0;
- mic_bias = msic_get_mic_bias(intelmaddata);
- pr_debug("mic_bias = %d\n", mic_bias);
- mic_bias = msic_convert_adc_to_mvolt(mic_bias);
- pr_debug("mic_bias after conversion = %d mV\n", mic_bias);
- mjack->jack_dev_state = msic_get_headset_state(mic_bias);
- mjack->jack.type = present = mjack->jack_dev_state;
- }
-
- if (intsts & 0x8) {
- mjack->jack.type = mjack->jack_dev_state;
- present = 0;
- jack_event_flag = 1;
- buttonpressflag = 0;
- msic_disable_jack_btn();
- msic_disable_mic_bias();
- }
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-
-
-
-struct snd_pmic_ops snd_msic_ops = {
- .set_input_dev = msic_set_selected_input_dev,
- .set_output_dev = msic_set_selected_output_dev,
- .set_lineout_dev = msic_set_selected_lineout_dev,
- .set_hw_dmic_route = msic_set_hw_dmic_route,
- .set_mute = msic_set_mute,
- .get_mute = msic_get_mute,
- .set_vol = msic_set_vol,
- .get_vol = msic_get_vol,
- .init_card = msic_init_card,
- .set_pcm_audio_params = msic_set_pcm_audio_params,
- .set_pcm_voice_params = msic_set_pcm_voice_params,
- .set_voice_port = msic_set_voice_port,
- .set_audio_port = msic_set_audio_port,
- .power_up_pmic_pb = msic_power_up_pb,
- .power_up_pmic_cp = msic_power_up_cp,
- .power_down_pmic_pb = msic_power_down_pb,
- .power_down_pmic_cp = msic_power_down_cp,
- .power_down_pmic = msic_power_down,
- .pmic_irq_cb = msic_pmic_irq_cb,
- .pmic_jack_enable = msic_enable_mic_bias,
- .pmic_get_mic_bias = msic_get_mic_bias,
- .pmic_set_headset_state = msic_set_headset_state,
-};
diff --git a/drivers/staging/intel_sst/intelmid_pvt.c b/drivers/staging/intel_sst/intelmid_pvt.c
deleted file mode 100644
index 90e0e64c0ab0..000000000000
--- a/drivers/staging/intel_sst/intelmid_pvt.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * intelmid_pvt.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver for Intel MID sound card chipset - holding private functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/io.h>
-#include <asm/intel_scu_ipc.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/pcm.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-
-void period_elapsed(void *mad_substream)
-{
- struct snd_pcm_substream *substream = mad_substream;
- struct mad_stream_pvt *stream;
-
-
-
- if (!substream || !substream->runtime)
- return;
- stream = substream->runtime->private_data;
- if (!stream)
- return;
-
- if (stream->stream_status != RUNNING)
- return;
- pr_debug("calling period elapsed\n");
- snd_pcm_period_elapsed(substream);
- return;
-}
-
-
-int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
-{
- struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
- struct mad_stream_pvt *stream = substream->runtime->private_data;
- struct snd_sst_stream_params param = {{{0,},},};
- struct snd_sst_params str_params = {0};
- int ret_val;
-
- /* set codec params and inform SST driver the same */
-
- param.uc.pcm_params.codec = SST_CODEC_TYPE_PCM;
- param.uc.pcm_params.num_chan = (u8) substream->runtime->channels;
- param.uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
- param.uc.pcm_params.reserved = 0;
- param.uc.pcm_params.sfreq = substream->runtime->rate;
- param.uc.pcm_params.ring_buffer_size =
- snd_pcm_lib_buffer_bytes(substream);
- param.uc.pcm_params.period_count = substream->runtime->period_size;
- param.uc.pcm_params.ring_buffer_addr =
- virt_to_phys(substream->runtime->dma_area);
- pr_debug("period_cnt = %d\n", param.uc.pcm_params.period_count);
- pr_debug("sfreq= %d, wd_sz = %d\n",
- param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
-
- str_params.sparams = param;
- str_params.codec = SST_CODEC_TYPE_PCM;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- str_params.ops = STREAM_OPS_PLAYBACK;
- pr_debug("Playbck stream,Device %d\n", stream->device);
- } else {
- str_params.ops = STREAM_OPS_CAPTURE;
- stream->device = SND_SST_DEVICE_CAPTURE;
- pr_debug("Capture stream,Device %d\n", stream->device);
- }
- str_params.device_type = stream->device;
- ret_val = intelmaddata->sstdrv_ops->pcm_control->open(&str_params);
- pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
- if (ret_val < 0)
- return ret_val;
-
- stream->stream_info.str_id = ret_val;
- stream->stream_status = INIT;
- stream->stream_info.buffer_ptr = 0;
- pr_debug("str id : %d\n", stream->stream_info.str_id);
-
- return ret_val;
-}
-
-int snd_intelmad_init_stream(struct snd_pcm_substream *substream)
-{
- struct mad_stream_pvt *stream = substream->runtime->private_data;
- struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
- int ret_val;
-
- pr_debug("setting buffer ptr param\n");
- stream->stream_info.period_elapsed = period_elapsed;
- stream->stream_info.mad_substream = substream;
- stream->stream_info.buffer_ptr = 0;
- stream->stream_info.sfreq = substream->runtime->rate;
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_STREAM_INIT, &stream->stream_info);
- if (ret_val)
- pr_err("control_set ret error %d\n", ret_val);
- return ret_val;
-
-}
-
-
-/**
- * sst_sc_reg_access - IPC read/write wrapper
- *
- * @sc_access: array of data, addresses and mask
- * @type: operation type
- * @num_val: number of reg to opertae on
- *
- * Reads/writes/read-modify operations on registers accessed through SCU (sound
- * card and few SST DSP regsisters that are not accissible to IA)
- */
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val)
-{
- int i, retval = 0;
- if (type == PMIC_WRITE) {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_iowrite8(sc_access[i].reg_addr,
- sc_access[i].value);
- if (retval)
- goto err;
- }
- } else if (type == PMIC_READ) {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_ioread8(sc_access[i].reg_addr,
- &(sc_access[i].value));
- if (retval)
- goto err;
- }
- } else {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_update_register(
- sc_access[i].reg_addr, sc_access[i].value,
- sc_access[i].mask);
- if (retval)
- goto err;
- }
- }
- return 0;
-err:
- pr_err("IPC failed for cmd %d, %d\n", retval, type);
- pr_err("reg:0x%2x addr:0x%2x\n",
- sc_access[i].reg_addr, sc_access[i].value);
- return retval;
-}
diff --git a/drivers/staging/intel_sst/intelmid_snd_control.h b/drivers/staging/intel_sst/intelmid_snd_control.h
deleted file mode 100644
index 06ad3a10099c..000000000000
--- a/drivers/staging/intel_sst/intelmid_snd_control.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef __INTELMID_SND_CTRL_H__
-#define __INTELMID_SND_CTRL_H__
-/*
- * intelmid_snd_control.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all snd control functions
- */
-
-/*
-Mask bits
-*/
-#define MASK0 0x01 /* 0000 0001 */
-#define MASK1 0x02 /* 0000 0010 */
-#define MASK2 0x04 /* 0000 0100 */
-#define MASK3 0x08 /* 0000 1000 */
-#define MASK4 0x10 /* 0001 0000 */
-#define MASK5 0x20 /* 0010 0000 */
-#define MASK6 0x40 /* 0100 0000 */
-#define MASK7 0x80 /* 1000 0000 */
-/*
-value bits
-*/
-#define VALUE0 0x01 /* 0000 0001 */
-#define VALUE1 0x02 /* 0000 0010 */
-#define VALUE2 0x04 /* 0000 0100 */
-#define VALUE3 0x08 /* 0000 1000 */
-#define VALUE4 0x10 /* 0001 0000 */
-#define VALUE5 0x20 /* 0010 0000 */
-#define VALUE6 0x40 /* 0100 0000 */
-#define VALUE7 0x80 /* 1000 0000 */
-
-#define MUTE 0 /* ALSA Passes 0 for mute */
-#define UNMUTE 1 /* ALSA Passes 1 for unmute */
-
-#define MAX_VOL_PMIC_VENDOR0 0x3f /* max vol in dB for stereo & voice DAC */
-#define MIN_VOL_PMIC_VENDOR0 0 /* min vol in dB for stereo & voice DAC */
-/* Head phone volume control */
-#define MAX_HP_VOL_PMIC_VENDOR1 6 /* max volume in dB for HP */
-#define MIN_HP_VOL_PMIC_VENDOR1 (-84) /* min volume in dB for HP */
-#define MAX_HP_VOL_INDX_PMIC_VENDOR1 40 /* Number of HP volume control values */
-
-/* Mono Earpiece Volume control */
-#define MAX_EP_VOL_PMIC_VENDOR1 0 /* max volume in dB for EP */
-#define MIN_EP_VOL_PMIC_VENDOR1 (-75) /* min volume in dB for EP */
-#define MAX_EP_VOL_INDX_PMIC_VENDOR1 32 /* Number of EP volume control values */
-
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val);
-extern struct snd_pmic_ops snd_pmic_ops_fs;
-extern struct snd_pmic_ops snd_pmic_ops_mx;
-extern struct snd_pmic_ops snd_pmic_ops_nc;
-extern struct snd_pmic_ops snd_msic_ops;
-
-/* device */
-enum SND_INPUT_DEVICE {
- AMIC,
- DMIC,
- HS_MIC,
- IN_UNDEFINED
-};
-enum SND_LINE_OUT_DEVICE {
- HEADSET,
- IHF,
- VIBRA1,
- VIBRA2,
- NONE,
-};
-
-enum SND_OUTPUT_DEVICE {
- STEREO_HEADPHONE,
- MONO_EARPIECE,
-
- INTERNAL_SPKR,
- RECEIVER,
- OUT_UNDEFINED
-};
-
-enum pmic_controls {
- PMIC_SND_HP_MIC_MUTE = 0x0001,
- PMIC_SND_AMIC_MUTE = 0x0002,
- PMIC_SND_DMIC_MUTE = 0x0003,
- PMIC_SND_CAPTURE_VOL = 0x0004,
-/* Output controls */
- PMIC_SND_LEFT_PB_VOL = 0x0010,
- PMIC_SND_RIGHT_PB_VOL = 0x0011,
- PMIC_SND_LEFT_HP_MUTE = 0x0012,
- PMIC_SND_RIGHT_HP_MUTE = 0x0013,
- PMIC_SND_LEFT_SPEAKER_MUTE = 0x0014,
- PMIC_SND_RIGHT_SPEAKER_MUTE = 0x0015,
- PMIC_SND_RECEIVER_VOL = 0x0016,
- PMIC_SND_RECEIVER_MUTE = 0x0017,
- PMIC_SND_LEFT_MASTER_VOL = 0x0018,
- PMIC_SND_RIGHT_MASTER_VOL = 0x0019,
-/* Other controls */
- PMIC_SND_MUTE_ALL = 0x0020,
- PMIC_MAX_CONTROLS = 0x0020,
-};
-
-#endif /* __INTELMID_SND_CTRL_H__ */
-
-
diff --git a/drivers/staging/intel_sst/intelmid_v0_control.c b/drivers/staging/intel_sst/intelmid_v0_control.c
deleted file mode 100644
index b8dfdb9bc1aa..000000000000
--- a/drivers/staging/intel_sst/intelmid_v0_control.c
+++ /dev/null
@@ -1,866 +0,0 @@
-/*
- * intel_sst_v0_control.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 1
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/file.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-enum _reg_v1 {
- VOICEPORT1 = 0x180,
- VOICEPORT2 = 0x181,
- AUDIOPORT1 = 0x182,
- AUDIOPORT2 = 0x183,
- MISCVOICECTRL = 0x184,
- MISCAUDCTRL = 0x185,
- DMICCTRL1 = 0x186,
- AUDIOBIAS = 0x187,
- MICCTRL = 0x188,
- MICLICTRL1 = 0x189,
- MICLICTRL2 = 0x18A,
- MICLICTRL3 = 0x18B,
- VOICEDACCTRL1 = 0x18C,
- STEREOADCCTRL = 0x18D,
- AUD15 = 0x18E,
- AUD16 = 0x18F,
- AUD17 = 0x190,
- AUD18 = 0x191,
- RMIXOUTSEL = 0x192,
- ANALOGLBR = 0x193,
- ANALOGLBL = 0x194,
- POWERCTRL1 = 0x195,
- POWERCTRL2 = 0x196,
- HEADSETDETECTINT = 0x197,
- HEADSETDETECTINTMASK = 0x198,
- TRIMENABLE = 0x199,
-};
-
-int rev_id = 0x20;
-static bool jack_det_enabled;
-
-/****
- * fs_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int fs_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x180, 0x00, 0x0},
- {0x181, 0x00, 0x0},
- {0x182, 0xF8, 0x0},
- {0x183, 0x08, 0x0},
- {0x184, 0x00, 0x0},
- {0x185, 0x40, 0x0},
- {0x186, 0x06, 0x0},
- {0x187, 0x80, 0x0},
- {0x188, 0x40, 0x0},
- {0x189, 0x39, 0x0},
- {0x18a, 0x39, 0x0},
- {0x18b, 0x1F, 0x0},
- {0x18c, 0x00, 0x0},
- {0x18d, 0x00, 0x0},
- {0x18e, 0x39, 0x0},
- {0x18f, 0x39, 0x0},
- {0x190, 0x39, 0x0},
- {0x191, 0x11, 0x0},
- {0x192, 0x0E, 0x0},
- {0x193, 0x00, 0x0},
- {0x194, 0x00, 0x0},
- {0x195, 0x00, 0x0},
- {0x196, 0x7C, 0x0},
- {0x197, 0x00, 0x0},
- {0x198, 0x0B, 0x0},
- {0x199, 0x00, 0x0},
- {0x037, 0x3F, 0x0},
- };
-
- snd_pmic_ops_fs.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_fs.master_mute = UNMUTE;
- snd_pmic_ops_fs.mute_status = UNMUTE;
- snd_pmic_ops_fs.num_channel = 2;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
-}
-
-static int fs_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD17;
- sc_access[2].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = sc_access[2].mask = MASK7;
-
- if (snd_pmic_ops_fs.mute_status == MUTE)
- return 0;
- if (value == MUTE) {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = 0x80;
-
- } else {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = 0x0;
- }
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[1].value = sc_access[2].value = 0x80;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
-}
-
-static int fs_power_up_pb(unsigned int port)
-{
- struct sc_reg_access sc_access[] = {
- {AUDIOBIAS, 0x00, MASK7},
- {POWERCTRL1, 0xC6, 0xC6},
- {POWERCTRL2, 0x30, 0x30},
-
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- retval = fs_enable_audiodac(MUTE);
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
- if (retval)
- return retval;
-
- pr_debug("in fs power up pb\n");
- return fs_enable_audiodac(UNMUTE);
-}
-
-static int fs_power_down_pb(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL1, 0x00, 0xC6},
- {POWERCTRL2, 0x00, 0x30},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- retval = fs_enable_audiodac(MUTE);
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- if (retval)
- return retval;
-
- pr_debug("in fsl power down pb\n");
- return fs_enable_audiodac(UNMUTE);
-}
-
-static int fs_power_up_cp(unsigned int port)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL2, 0x32, 0x32}, /*NOTE power up A ADC only as*/
- {AUDIOBIAS, 0x00, MASK7},
- /*as turning on V ADC causes noise*/
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int fs_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL2, 0x00, 0x03},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int fs_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {AUDIOBIAS, MASK7, MASK7},
- };
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int fs_set_pcm_voice_params(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x180, 0xA0, 0},
- {0x181, 0x04, 0},
- {0x182, 0x0, 0},
- {0x183, 0x0, 0},
- {0x184, 0x18, 0},
- {0x185, 0x40, 0},
- {0x186, 0x06, 0},
- {0x187, 0x0, 0},
- {0x188, 0x10, 0},
- {0x189, 0x39, 0},
- {0x18a, 0x39, 0},
- {0x18b, 0x02, 0},
- {0x18c, 0x0, 0},
- {0x18d, 0x0, 0},
- {0x18e, 0x39, 0},
- {0x18f, 0x0, 0},
- {0x190, 0x0, 0},
- {0x191, 0x20, 0},
- {0x192, 0x20, 0},
- {0x193, 0x0, 0},
- {0x194, 0x0, 0},
- {0x195, 0x06, 0},
- {0x196, 0x25, 0},
- {0x197, 0x0, 0},
- {0x198, 0xF, 0},
- {0x199, 0x0, 0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
-}
-
-static int fs_set_audio_port(int status)
-{
- struct sc_reg_access sc_access[2];
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK4|MASK5;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- sc_access[0].value = 0xC0;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[1].value = 0x30;
- sc_access[1].mask = MASK4|MASK5;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else
- return -EINVAL;
-}
-
-static int fs_set_voice_port(int status)
-{
- struct sc_reg_access sc_access[2];
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = VOICEPORT1;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK0|MASK1;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- sc_access[0].value = 0xC0;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = VOICEPORT1;
- sc_access[1].value = 0x03;
- sc_access[1].mask = MASK0|MASK1;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else
- return -EINVAL;
-}
-
-static int fs_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- u8 config1 = 0;
- struct sc_reg_access sc_access[4];
- int retval = 0, num_value = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- switch (sfreq) {
- case 8000:
- config1 = 0x00;
- break;
- case 11025:
- config1 = 0x01;
- break;
- case 12000:
- config1 = 0x02;
- break;
- case 16000:
- config1 = 0x03;
- break;
- case 22050:
- config1 = 0x04;
- break;
- case 24000:
- config1 = 0x05;
- break;
- case 26000:
- config1 = 0x06;
- break;
- case 32000:
- config1 = 0x07;
- break;
- case 44100:
- config1 = 0x08;
- break;
- case 48000:
- config1 = 0x09;
- break;
- }
- snd_pmic_ops_fs.num_channel = num_channel;
- if (snd_pmic_ops_fs.num_channel == 1) {
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = 0x80;
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- } else {
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = 0x00;
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- }
- pr_debug("sfreq:%d,Register value = %x\n", sfreq, config1);
-
- if (word_size == 24) {
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
- sc_access[0].value = 0xFB;
-
-
- sc_access[1].reg_addr = AUDIOPORT2;
- sc_access[1].value = config1 | 0x10;
- sc_access[1].mask = MASK0 | MASK1 | MASK2 | MASK3
- | MASK4 | MASK5 | MASK6;
-
- sc_access[2].reg_addr = MISCAUDCTRL;
- sc_access[2].value = 0x02;
- sc_access[2].mask = 0x02;
-
- num_value = 3 ;
-
- } else {
-
- sc_access[0].reg_addr = AUDIOPORT2;
- sc_access[0].value = config1;
- sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
-
- sc_access[1].reg_addr = MISCAUDCTRL;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x02;
- num_value = 2;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_value);
-
-}
-
-static int fs_set_selected_input_dev(u8 value)
-{
- struct sc_reg_access sc_access_dmic[] = {
- {MICCTRL, 0x81, 0xf7},
- {MICLICTRL3, 0x00, 0xE0},
- };
- struct sc_reg_access sc_access_mic[] = {
- {MICCTRL, 0x40, MASK2|MASK4|MASK5|MASK6|MASK7},
- {MICLICTRL3, 0x00, 0xE0},
- };
- struct sc_reg_access sc_access_hsmic[] = {
- {MICCTRL, 0x10, MASK2|MASK4|MASK5|MASK6|MASK7},
- {MICLICTRL3, 0x00, 0xE0},
- };
-
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (value) {
- case AMIC:
- pr_debug("Selecting amic not supported in mono cfg\n");
- return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
- break;
-
- case HS_MIC:
- pr_debug("Selecting hsmic\n");
- return sst_sc_reg_access(sc_access_hsmic,
- PMIC_READ_MODIFY, 2);
- break;
-
- case DMIC:
- pr_debug("Selecting dmic\n");
- return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
- break;
-
- default:
- return -EINVAL;
-
- }
-}
-
-static int fs_set_selected_output_dev(u8 value)
-{
- struct sc_reg_access sc_access_hp[] = {
- {0x191, 0x11, 0x0},
- {0x192, 0x0E, 0x0},
- };
- struct sc_reg_access sc_access_is[] = {
- {0x191, 0x17, 0xFF},
- {0x192, 0x08, 0xFF},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (value) {
- case STEREO_HEADPHONE:
- pr_debug("SST DBG:Selecting headphone\n");
- return sst_sc_reg_access(sc_access_hp, PMIC_WRITE, 2);
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- pr_debug("SST DBG:Selecting internal spkr\n");
- return sst_sc_reg_access(sc_access_is, PMIC_READ_MODIFY, 2);
- break;
-
- default:
- return -EINVAL;
-
- }
-}
-
-static int fs_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[6] = {{0,},};
- int reg_num = 0;
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
-
- pr_debug("dev_id:0x%x value:0x%x\n", dev_id, value);
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- sc_access[0].reg_addr = MICCTRL;
- sc_access[1].reg_addr = MICLICTRL1;
- sc_access[2].reg_addr = MICLICTRL2;
- sc_access[0].mask = MASK5;
- sc_access[1].mask = sc_access[2].mask = MASK6;
- if (value == MUTE) {
- sc_access[0].value = 0x20;
- sc_access[2].value = sc_access[1].value = 0x40;
- } else
- sc_access[0].value = sc_access[1].value
- = sc_access[2].value = 0x0;
- reg_num = 3;
- break;
- case PMIC_SND_HP_MIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- sc_access[0].reg_addr = MICLICTRL1;
- sc_access[1].reg_addr = MICLICTRL2;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x40;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- reg_num = 2;
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_LEFT_HP_MUTE:
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD15;
-
- sc_access[0].mask = sc_access[1].mask = MASK7;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x80;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- reg_num = 2;
- snd_pmic_ops_fs.mute_status = value;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x80;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- snd_pmic_ops_fs.mute_status = value;
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[0].value = sc_access[1].value = 0x80;
- reg_num = 2;
- break;
- case PMIC_SND_MUTE_ALL:
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD17;
- sc_access[2].reg_addr = AUD15;
- sc_access[3].reg_addr = MICCTRL;
- sc_access[4].reg_addr = MICLICTRL1;
- sc_access[5].reg_addr = MICLICTRL2;
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = MASK7;
- sc_access[3].mask = MASK5;
- sc_access[4].mask = sc_access[5].mask = MASK6;
-
- if (value == MUTE) {
- sc_access[0].value =
- sc_access[1].value = sc_access[2].value = 0x80;
- sc_access[3].value = 0x20;
- sc_access[4].value = sc_access[5].value = 0x40;
-
- } else {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = sc_access[3].value =
- sc_access[4].value = sc_access[5].value = 0x0;
- }
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[1].value = sc_access[2].value = 0x80;
- reg_num = 6;
- snd_pmic_ops_fs.mute_status = value;
- snd_pmic_ops_fs.master_mute = value;
- break;
-
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
-}
-
-static int fs_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_acces, sc_access[4] = {{0},};
- int reg_num = 0;
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_PB_VOL:%d\n", value);
- sc_access[0].value = sc_access[1].value = value;
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- reg_num = 2;
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RIGHT_PB_VOL:%d\n", value);
- sc_access[0].value = sc_access[1].value = value;
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- if (snd_pmic_ops_fs.num_channel == 1) {
- sc_access[0].value = sc_access[1].value = 0x80;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- }
- reg_num = 2;
- break;
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL:%d\n", value);
- sc_access[0].reg_addr = MICLICTRL1;
- sc_access[1].reg_addr = MICLICTRL2;
- sc_access[2].reg_addr = DMICCTRL1;
- sc_access[2].value = value;
- sc_access[0].value = sc_access[1].value = value;
- sc_acces.reg_addr = MICLICTRL3;
- sc_acces.value = value;
- sc_acces.mask = (MASK0|MASK1|MASK2|MASK3|MASK5|MASK6|MASK7);
- retval = sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- reg_num = 3;
- break;
-
- default:
- return -EINVAL;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
-}
-
-static int fs_get_mute(int dev_id, u8 *value)
-{
- struct sc_reg_access sc_access[6] = {{0,},};
-
- int retval = 0, temp_value = 0, mask = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
-
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = MICLICTRL1;
- mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- if (sc_access[0].value & mask)
- *value = MUTE;
- else
- *value = UNMUTE;
- break;
- case PMIC_SND_DMIC_MUTE:
- sc_access[0].reg_addr = MICCTRL;
- mask = MASK5;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = (sc_access[0].value & mask);
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
-
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD16;
- mask = MASK7;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = sc_access[0].value & mask;
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD17;
- mask = MASK7;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = sc_access[0].value & mask;
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
- default:
- return -EINVAL;
- }
-
- return retval;
-}
-
-static int fs_get_vol(int dev_id, int *value)
-{
- struct sc_reg_access sc_access = {0,};
- int retval = 0, mask = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL\n");
- sc_access.reg_addr = MICLICTRL1;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_PB_VOL\n");
- sc_access.reg_addr = AUD16;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RT_PB_VOL\n");
- sc_access.reg_addr = AUD17;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- default:
- return -EINVAL;
- }
-
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("value read = 0x%x\n", sc_access.value);
- *value = (int) (sc_access.value & mask);
- pr_debug("value returned = 0x%x\n", *value);
- return retval;
-}
-
-static void fs_pmic_irq_enable(void *data)
-{
- struct snd_intelmad *intelmaddata = data;
- struct sc_reg_access sc_access[] = {
- {0x187, 0x00, MASK7},
- {0x188, 0x10, MASK4},
- {0x18b, 0x10, MASK4},
- };
-
- struct sc_reg_access sc_access_write[] = {
- {0x198, 0x00, 0x0},
- };
- pr_debug("Audio interrupt enable\n");
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
-
- intelmaddata->jack[0].jack_status = 0;
- /*intelmaddata->jack[1].jack_status = 0;*/
-
- jack_det_enabled = true;
- return;
-}
-
-static void fs_pmic_irq_cb(void *cb_data, u8 value)
-{
- struct mad_jack *mjack = NULL;
- struct snd_intelmad *intelmaddata = cb_data;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
-
- mjack = &intelmaddata->jack[0];
-
- if (value & 0x4) {
- if (!jack_det_enabled)
- fs_pmic_irq_enable(intelmaddata);
-
- /* send headphone detect */
- pr_debug(":MAD headphone %d\n", value & 0x4);
- present = !(mjack->jack_status);
- mjack->jack_status = present;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if (value & 0x2) {
- /* send short push */
- pr_debug(":MAD short push %d\n", value & 0x2);
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_SHORT_PRESS;
- }
-
- if (value & 0x1) {
- /* send long push */
- pr_debug(":MAD long push %d\n", value & 0x1);
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_LONG_PRESS;
- }
-
- if (value & 0x8) {
- if (!jack_det_enabled)
- fs_pmic_irq_enable(intelmaddata);
- /* send headset detect */
- pr_debug(":MAD headset = %d\n", value & 0x8);
- present = !(mjack->jack_status);
- mjack->jack_status = present;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-
- return;
-}
-static int fs_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_fs = {
- .set_input_dev = fs_set_selected_input_dev,
- .set_output_dev = fs_set_selected_output_dev,
- .set_mute = fs_set_mute,
- .get_mute = fs_get_mute,
- .set_vol = fs_set_vol,
- .get_vol = fs_get_vol,
- .init_card = fs_init_card,
- .set_pcm_audio_params = fs_set_pcm_audio_params,
- .set_pcm_voice_params = fs_set_pcm_voice_params,
- .set_voice_port = fs_set_voice_port,
- .set_audio_port = fs_set_audio_port,
- .power_up_pmic_pb = fs_power_up_pb,
- .power_up_pmic_cp = fs_power_up_cp,
- .power_down_pmic_pb = fs_power_down_pb,
- .power_down_pmic_cp = fs_power_down_cp,
- .power_down_pmic = fs_power_down,
- .pmic_irq_cb = fs_pmic_irq_cb,
- /*
- * Jack detection enabling
- * need be delayed till first IRQ happen.
- */
- .pmic_irq_enable = NULL,
- .pmic_jack_enable = fs_jack_enable,
-};
diff --git a/drivers/staging/intel_sst/intelmid_v1_control.c b/drivers/staging/intel_sst/intelmid_v1_control.c
deleted file mode 100644
index 9d00728d8deb..000000000000
--- a/drivers/staging/intel_sst/intelmid_v1_control.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/* intel_sst_v1_control.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <asm/mrst.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/control.h>
-#include <sound/initval.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid.h"
-#include "intelmid_snd_control.h"
-
-#include <linux/gpio.h>
-#define KOSKI_VOICE_CODEC_ENABLE 46
-
-enum _reg_v2 {
-
- MASTER_CLOCK_PRESCALAR = 0x205,
- SET_MASTER_AND_LR_CLK1 = 0x20b,
- SET_MASTER_AND_LR_CLK2 = 0x20c,
- MASTER_MODE_AND_DATA_DELAY = 0x20d,
- DIGITAL_INTERFACE_TO_DAI2 = 0x20e,
- CLK_AND_FS1 = 0x208,
- CLK_AND_FS2 = 0x209,
- DAI2_TO_DAC_HP = 0x210,
- HP_OP_SINGLE_ENDED = 0x224,
- ENABLE_OPDEV_CTRL = 0x226,
- ENABLE_DEV_AND_USE_XTAL = 0x227,
-
- /* Max audio subsystem (PQ49) MAX 8921 */
- AS_IP_MODE_CTL = 0xF9,
- AS_LEFT_SPKR_VOL_CTL = 0xFA, /* Mono Earpiece volume control */
- AS_RIGHT_SPKR_VOL_CTL = 0xFB,
- AS_LEFT_HP_VOL_CTL = 0xFC,
- AS_RIGHT_HP_VOL_CTL = 0xFD,
- AS_OP_MIX_CTL = 0xFE,
- AS_CONFIG = 0xFF,
-
- /* Headphone volume control & mute registers */
- VOL_CTRL_LT = 0x21c,
- VOL_CTRL_RT = 0x21d,
-
-};
-/**
- * mx_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int mx_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x200, 0x80, 0x00},
- {0x201, 0xC0, 0x00},
- {0x202, 0x00, 0x00},
- {0x203, 0x00, 0x00},
- {0x204, 0x02, 0x00},
- {0x205, 0x10, 0x00},
- {0x206, 0x60, 0x00},
- {0x207, 0x00, 0x00},
- {0x208, 0x90, 0x00},
- {0x209, 0x51, 0x00},
- {0x20a, 0x00, 0x00},
- {0x20b, 0x10, 0x00},
- {0x20c, 0x00, 0x00},
- {0x20d, 0x00, 0x00},
- {0x20e, 0x21, 0x00},
- {0x20f, 0x00, 0x00},
- {0x210, 0x84, 0x00},
- {0x211, 0xB3, 0x00},
- {0x212, 0x00, 0x00},
- {0x213, 0x00, 0x00},
- {0x214, 0x41, 0x00},
- {0x215, 0x00, 0x00},
- {0x216, 0x00, 0x00},
- {0x217, 0x00, 0x00},
- {0x218, 0x03, 0x00},
- {0x219, 0x03, 0x00},
- {0x21a, 0x00, 0x00},
- {0x21b, 0x00, 0x00},
- {0x21c, 0x00, 0x00},
- {0x21d, 0x00, 0x00},
- {0x21e, 0x00, 0x00},
- {0x21f, 0x00, 0x00},
- {0x220, 0x20, 0x00},
- {0x221, 0x20, 0x00},
- {0x222, 0x51, 0x00},
- {0x223, 0x20, 0x00},
- {0x224, 0x04, 0x00},
- {0x225, 0x80, 0x00},
- {0x226, 0x0F, 0x00},
- {0x227, 0x08, 0x00},
- {0xf9, 0x40, 0x00},
- {0xfa, 0x1f, 0x00},
- {0xfb, 0x1f, 0x00},
- {0xfc, 0x1f, 0x00},
- {0xfd, 0x1f, 0x00},
- {0xfe, 0x00, 0x00},
- {0xff, 0x0c, 0x00},
- };
- snd_pmic_ops_mx.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_mx.num_channel = 2;
- snd_pmic_ops_mx.master_mute = UNMUTE;
- snd_pmic_ops_mx.mute_status = UNMUTE;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
-}
-
-static int mx_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- int mute_val = 0;
- int mute_val1 = 0;
- int retval = 0;
-
- sc_access[0].reg_addr = AS_LEFT_HP_VOL_CTL;
- sc_access[1].reg_addr = AS_RIGHT_HP_VOL_CTL;
-
- if (value == UNMUTE) {
- mute_val = 0x1F;
- mute_val1 = 0x00;
- } else {
- mute_val = 0x00;
- mute_val1 = 0x40;
- }
- sc_access[0].mask = sc_access[1].mask = MASK0|MASK1|MASK2|MASK3|MASK4;
- sc_access[0].value = sc_access[1].value = (u8)mute_val;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- pr_debug("mute status = %d\n", snd_pmic_ops_mx.mute_status);
- if (snd_pmic_ops_mx.mute_status == MUTE ||
- snd_pmic_ops_mx.master_mute == MUTE)
- return retval;
-
- sc_access[0].reg_addr = VOL_CTRL_LT;
- sc_access[1].reg_addr = VOL_CTRL_RT;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- sc_access[0].value = sc_access[1].value = mute_val1;
- if (snd_pmic_ops_mx.num_channel == 1)
- sc_access[1].value = 0x40;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int mx_power_up_pb(unsigned int port)
-{
-
- int retval = 0;
- struct sc_reg_access sc_access[3];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- msleep(10);
-
- sc_access[0].reg_addr = AS_CONFIG;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x80;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = 0xff;
- sc_access[0].value = 0x3C;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
- sc_access[0].mask = 0x80;
- sc_access[0].value = 0x80;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_power_down_pb(unsigned int device)
-{
- struct sc_reg_access sc_access[3];
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = MASK3|MASK2;
- sc_access[0].value = 0x00;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_power_up_cp(unsigned int port)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {ENABLE_DEV_AND_USE_XTAL, 0x80, MASK7},
- {ENABLE_OPDEV_CTRL, 0x3, 0x3},
- };
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int mx_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {ENABLE_OPDEV_CTRL, 0x00, MASK1|MASK0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int mx_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[3];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = AS_CONFIG;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = MASK3|MASK2;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_set_pcm_voice_params(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {0x200, 0x80, 0x00},
- {0x201, 0xC0, 0x00},
- {0x202, 0x00, 0x00},
- {0x203, 0x00, 0x00},
- {0x204, 0x0e, 0x00},
- {0x205, 0x20, 0x00},
- {0x206, 0x8f, 0x00},
- {0x207, 0x21, 0x00},
- {0x208, 0x18, 0x00},
- {0x209, 0x32, 0x00},
- {0x20a, 0x00, 0x00},
- {0x20b, 0x5A, 0x00},
- {0x20c, 0xBE, 0x00},/* 0x00 -> 0xBE Koski */
- {0x20d, 0x00, 0x00}, /* DAI2 'off' */
- {0x20e, 0x40, 0x00},
- {0x20f, 0x00, 0x00},
- {0x210, 0x84, 0x00},
- {0x211, 0x33, 0x00}, /* Voice filter */
- {0x212, 0x00, 0x00},
- {0x213, 0x00, 0x00},
- {0x214, 0x41, 0x00},
- {0x215, 0x00, 0x00},
- {0x216, 0x00, 0x00},
- {0x217, 0x20, 0x00},
- {0x218, 0x00, 0x00},
- {0x219, 0x00, 0x00},
- {0x21a, 0x40, 0x00},
- {0x21b, 0x40, 0x00},
- {0x21c, 0x09, 0x00},
- {0x21d, 0x09, 0x00},
- {0x21e, 0x00, 0x00},
- {0x21f, 0x00, 0x00},
- {0x220, 0x00, 0x00}, /* Microphone configurations */
- {0x221, 0x00, 0x00}, /* Microphone configurations */
- {0x222, 0x50, 0x00}, /* Microphone configurations */
- {0x223, 0x21, 0x00}, /* Microphone configurations */
- {0x224, 0x00, 0x00},
- {0x225, 0x80, 0x00},
- {0xf9, 0x40, 0x00},
- {0xfa, 0x19, 0x00},
- {0xfb, 0x19, 0x00},
- {0xfc, 0x12, 0x00},
- {0xfd, 0x12, 0x00},
- {0xfe, 0x00, 0x00},
- };
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- pr_debug("SST DBG:mx_set_pcm_voice_params called\n");
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 44);
-}
-
-static int mx_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- int retval = 0;
-
- int config1 = 0, config2 = 0, filter = 0xB3;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- switch (sfreq) {
- case 8000:
- config1 = 0x10;
- config2 = 0x00;
- filter = 0x33;
- break;
- case 11025:
- config1 = 0x16;
- config2 = 0x0d;
- break;
- case 12000:
- config1 = 0x18;
- config2 = 0x00;
- break;
- case 16000:
- config1 = 0x20;
- config2 = 0x00;
- break;
- case 22050:
- config1 = 0x2c;
- config2 = 0x1a;
- break;
- case 24000:
- config1 = 0x30;
- config2 = 0x00;
- break;
- case 32000:
- config1 = 0x40;
- config2 = 0x00;
- break;
- case 44100:
- config1 = 0x58;
- config2 = 0x33;
- break;
- case 48000:
- config1 = 0x60;
- config2 = 0x00;
- break;
- }
- snd_pmic_ops_mx.num_channel = num_channel;
- /*mute the right channel if MONO*/
- if (snd_pmic_ops_mx.num_channel == 1) {
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6;
-
- sc_access[1].reg_addr = 0x224;
- sc_access[1].value = 0x05;
- sc_access[1].mask = MASK0|MASK1|MASK2;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- } else {
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
-
- sc_access[1].reg_addr = 0x224;
- sc_access[1].value = 0x04;
- sc_access[1].mask = MASK0|MASK1|MASK2;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- }
- sc_access[0].reg_addr = 0x206;
- sc_access[0].value = config1;
- sc_access[1].reg_addr = 0x207;
- sc_access[1].value = config2;
-
- if (word_size == 16) {
- sc_access[2].value = 0x51;
- sc_access[3].value = 0x31;
- } else if (word_size == 24) {
- sc_access[2].value = 0x52;
- sc_access[3].value = 0x92;
- }
-
- sc_access[2].reg_addr = 0x209;
- sc_access[3].reg_addr = 0x20e;
-
- sc_access[4].reg_addr = 0x211;
- sc_access[4].value = filter;
-
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 5);
-}
-
-static int mx_set_selected_output_dev(u8 dev_id)
-{
- struct sc_reg_access sc_access[2];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
- snd_pmic_ops_mx.output_dev_id = dev_id;
- switch (dev_id) {
- case STEREO_HEADPHONE:
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0x8C;
- sc_access[0].mask =
- MASK2|MASK3|MASK5|MASK6|MASK4;
-
- num_reg = 1;
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0xb0;
- sc_access[0].mask = MASK2|MASK3|MASK5|MASK6|MASK4;
-
- num_reg = 1;
- break;
- case RECEIVER:
- pr_debug("RECEIVER Koski selected\n");
-
- /* configuration - AS enable, receiver enable */
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0x81;
- sc_access[0].mask = 0xff;
-
- num_reg = 1;
- break;
- default:
- pr_err("Not a valid output dev\n");
- return 0;
- }
- return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
-}
-
-
-static int mx_set_voice_port(int status)
-{
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- if (status == ACTIVATE)
- retval = mx_set_pcm_voice_params();
-
- return retval;
-}
-
-static int mx_set_audio_port(int status)
-{
- return 0;
-}
-
-static int mx_set_selected_input_dev(u8 dev_id)
-{
- struct sc_reg_access sc_access[2];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- snd_pmic_ops_mx.input_dev_id = dev_id;
- pr_debug("mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
-
- switch (dev_id) {
- case AMIC:
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x50;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
- num_reg = 2;
- break;
-
- case HS_MIC:
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x20;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x51;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
- num_reg = 2;
- break;
- case DMIC:
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x20;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- num_reg = 2;
- break;
- }
- return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
-}
-
-static int mx_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[5];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
-
- pr_debug("set_mute dev_id:0x%x , value:%d\n", dev_id, value);
-
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = 0x220;
- sc_access[1].reg_addr = 0x221;
- sc_access[2].reg_addr = 0x223;
- if (value == MUTE) {
- sc_access[0].value = 0x00;
- sc_access[1].value = 0x00;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[2].value = 0x00;
- else
- sc_access[2].value = 0x20;
- } else {
- sc_access[0].value = 0x20;
- sc_access[1].value = 0x20;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[2].value = 0x20;
- else
- sc_access[2].value = 0x00;
- }
- sc_access[0].mask = MASK5|MASK6;
- sc_access[1].mask = MASK5|MASK6;
- sc_access[2].mask = MASK5|MASK6;
- num_reg = 3;
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_LEFT_HP_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_LT;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- snd_pmic_ops_mx.mute_status = value;
- break;
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- if (snd_pmic_ops_mx.num_channel == 1)
- value = MUTE;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- snd_pmic_ops_mx.mute_status = value;
- break;
- case PMIC_SND_MUTE_ALL:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[1].reg_addr = VOL_CTRL_LT;
- sc_access[2].reg_addr = 0x220;
- sc_access[3].reg_addr = 0x221;
- sc_access[4].reg_addr = 0x223;
- snd_pmic_ops_mx.master_mute = value;
- if (value == MUTE) {
- sc_access[0].value = sc_access[1].value = 0x40;
- sc_access[2].value = 0x00;
- sc_access[3].value = 0x00;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[4].value = 0x00;
- else
- sc_access[4].value = 0x20;
-
- } else {
- sc_access[0].value = sc_access[1].value = 0x00;
- sc_access[2].value = sc_access[3].value = 0x20;
- sc_access[4].value = 0x20;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[4].value = 0x20;
- else
- sc_access[4].value = 0x00;
-
-
- }
- if (snd_pmic_ops_mx.num_channel == 1)
- sc_access[0].value = 0x40;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- sc_access[2].mask = MASK5|MASK6;
- sc_access[3].mask = MASK5|MASK6|MASK2|MASK4;
- sc_access[4].mask = MASK5|MASK6|MASK4;
-
- num_reg = 5;
- break;
- case PMIC_SND_RECEIVER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- break;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
-}
-
-static int mx_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_access[2] = {{0},};
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- pr_debug("set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
- switch (dev_id) {
- case PMIC_SND_RECEIVER_VOL:
- return 0;
- break;
- case PMIC_SND_CAPTURE_VOL:
- sc_access[0].reg_addr = 0x220;
- sc_access[1].reg_addr = 0x221;
- sc_access[0].value = sc_access[1].value = -value;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4);
- num_reg = 2;
- break;
- case PMIC_SND_LEFT_PB_VOL:
- sc_access[0].value = -value;
- sc_access[0].reg_addr = VOL_CTRL_LT;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- num_reg = 1;
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- sc_access[0].value = -value;
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- if (snd_pmic_ops_mx.num_channel == 1) {
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6;
- sc_access[0].reg_addr = VOL_CTRL_RT;
- }
- num_reg = 1;
- break;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
-}
-
-static int mx_get_mute(int dev_id, u8 *value)
-{
- struct sc_reg_access sc_access[4] = {{0},};
- int retval = 0, num_reg = 0, mask = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = 0x220;
- mask = MASK5|MASK6;
- num_reg = 1;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = sc_access[0].value & mask;
- if (*value)
- *value = UNMUTE;
- else
- *value = MUTE;
- return retval;
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_LT;
- num_reg = 1;
- mask = MASK6;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- num_reg = 1;
- mask = MASK6;
- break;
- }
- retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = sc_access[0].value & mask;
- if (*value)
- *value = MUTE;
- else
- *value = UNMUTE;
- return retval;
-}
-
-static int mx_get_vol(int dev_id, int *value)
-{
- struct sc_reg_access sc_access = {0,};
- int retval = 0, mask = 0, num_reg = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- sc_access.reg_addr = 0x220;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4;
- num_reg = 1;
- break;
- case PMIC_SND_LEFT_PB_VOL:
- sc_access.reg_addr = VOL_CTRL_LT;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
- num_reg = 1;
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- sc_access.reg_addr = VOL_CTRL_RT;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
- num_reg = 1;
- break;
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = -(sc_access.value & mask);
- pr_debug("get volume value extracted %d\n", *value);
- return retval;
-}
-
-static u8 mx_get_jack_status(void)
-{
- struct sc_reg_access sc_access_read = {0,};
-
- sc_access_read.reg_addr = 0x201;
- sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
- pr_debug("value returned = 0x%x\n", sc_access_read.value);
- return sc_access_read.value;
-}
-
-static void mx_pmic_irq_enable(void *data)
-{
- struct snd_intelmad *intelmaddata = data;
-
- intelmaddata->jack_prev_state = 0xc0;
- return;
-}
-
-static void mx_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- u8 jack_cur_status, jack_prev_state = 0;
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- time_t timediff;
- struct snd_intelmad *intelmaddata = cb_data;
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x2) {
- jack_cur_status = mx_get_jack_status();
- jack_prev_state = intelmaddata->jack_prev_state;
- if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x40)) {
- /*headset insert detected. */
- pr_debug("MAD headset inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack_status = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
- if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x00)) {
- /* headphone insert detected. */
- pr_debug("MAD headphone inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if ((jack_prev_state == 0x40) && (jack_cur_status == 0xc0)) {
- /* headset remove detected. */
- pr_debug("MAD headset removed\n");
-
- present = 0;
- jack_event_flag = 1;
- mjack->jack_status = 0;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
- if ((jack_prev_state == 0x00) && (jack_cur_status == 0xc0)) {
- /* headphone remove detected. */
- pr_debug("MAD headphone removed\n");
- present = 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if ((jack_prev_state == 0x40) && (jack_cur_status == 0x00)) {
- /* button pressed */
- do_gettimeofday(&mjack->buttonpressed);
- pr_debug("MAD button press detected\n");
- }
-
- if ((jack_prev_state == 0x00) && (jack_cur_status == 0x40)) {
- if (mjack->jack_status) {
- /*button pressed */
- do_gettimeofday(
- &mjack->buttonreleased);
- /*button pressed */
- pr_debug("MAD Button Released detected\n");
- timediff = mjack->buttonreleased.tv_sec -
- mjack->buttonpressed.tv_sec;
- buttonpressflag = 1;
-
- if (timediff > 1) {
- pr_debug("MAD long press dtd\n");
- /* send headphone detect/undetect */
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type =
- MID_JACK_HS_LONG_PRESS;
- } else {
- pr_debug("MAD short press dtd\n");
- /* send headphone detect/undetect */
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type =
- MID_JACK_HS_SHORT_PRESS;
- }
- } else {
- /***workaround for maxim
- hw issue,0x00 t 0x40 is not
- a valid transiton for Headset insertion */
- /*headset insert detected. */
- pr_debug("MAD headset inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack_status = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
- }
- intelmaddata->jack_prev_state = jack_cur_status;
- pr_debug("mx_pmic_irq_cb prv_state= 0x%x\n",
- intelmaddata->jack_prev_state);
- }
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-static int mx_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_mx = {
- .set_input_dev = mx_set_selected_input_dev,
- .set_output_dev = mx_set_selected_output_dev,
- .set_mute = mx_set_mute,
- .get_mute = mx_get_mute,
- .set_vol = mx_set_vol,
- .get_vol = mx_get_vol,
- .init_card = mx_init_card,
- .set_pcm_audio_params = mx_set_pcm_audio_params,
- .set_pcm_voice_params = mx_set_pcm_voice_params,
- .set_voice_port = mx_set_voice_port,
- .set_audio_port = mx_set_audio_port,
- .power_up_pmic_pb = mx_power_up_pb,
- .power_up_pmic_cp = mx_power_up_cp,
- .power_down_pmic_pb = mx_power_down_pb,
- .power_down_pmic_cp = mx_power_down_cp,
- .power_down_pmic = mx_power_down,
- .pmic_irq_cb = mx_pmic_irq_cb,
- .pmic_irq_enable = mx_pmic_irq_enable,
- .pmic_jack_enable = mx_jack_enable,
-};
-
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
deleted file mode 100644
index 46ab55eb8097..000000000000
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ /dev/null
@@ -1,1156 +0,0 @@
-/*
- * intelmid_v2_control.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 3
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/gpio.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-enum reg_v3 {
- VAUDIOCNT = 0x51,
- VOICEPORT1 = 0x100,
- VOICEPORT2 = 0x101,
- AUDIOPORT1 = 0x102,
- AUDIOPORT2 = 0x103,
- ADCSAMPLERATE = 0x104,
- DMICCTRL1 = 0x105,
- DMICCTRL2 = 0x106,
- MICCTRL = 0x107,
- MICSELVOL = 0x108,
- LILSEL = 0x109,
- LIRSEL = 0x10a,
- VOICEVOL = 0x10b,
- AUDIOLVOL = 0x10c,
- AUDIORVOL = 0x10d,
- LMUTE = 0x10e,
- RMUTE = 0x10f,
- POWERCTRL1 = 0x110,
- POWERCTRL2 = 0x111,
- DRVPOWERCTRL = 0x112,
- VREFPLL = 0x113,
- PCMBUFCTRL = 0x114,
- SOFTMUTE = 0x115,
- DTMFPATH = 0x116,
- DTMFVOL = 0x117,
- DTMFFREQ = 0x118,
- DTMFHFREQ = 0x119,
- DTMFLFREQ = 0x11a,
- DTMFCTRL = 0x11b,
- DTMFASON = 0x11c,
- DTMFASOFF = 0x11d,
- DTMFASINUM = 0x11e,
- CLASSDVOL = 0x11f,
- VOICEDACAVOL = 0x120,
- AUDDACAVOL = 0x121,
- LOMUTEVOL = 0x122,
- HPLVOL = 0x123,
- HPRVOL = 0x124,
- MONOVOL = 0x125,
- LINEOUTMIXVOL = 0x126,
- EPMIXVOL = 0x127,
- LINEOUTLSEL = 0x128,
- LINEOUTRSEL = 0x129,
- EPMIXOUTSEL = 0x12a,
- HPLMIXSEL = 0x12b,
- HPRMIXSEL = 0x12c,
- LOANTIPOP = 0x12d,
- AUXDBNC = 0x12f,
-};
-
-static void nc_set_amp_power(int power)
-{
- if (snd_pmic_ops_nc.gpio_amp)
- gpio_set_value(snd_pmic_ops_nc.gpio_amp, power);
-}
-
-/****
- * nc_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int nc_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {VAUDIOCNT, 0x25, 0},
- {VOICEPORT1, 0x00, 0},
- {VOICEPORT2, 0x00, 0},
- {AUDIOPORT1, 0x98, 0},
- {AUDIOPORT2, 0x09, 0},
- {AUDIOLVOL, 0x00, 0},
- {AUDIORVOL, 0x00, 0},
- {LMUTE, 0x03, 0},
- {RMUTE, 0x03, 0},
- {POWERCTRL1, 0x00, 0},
- {POWERCTRL2, 0x00, 0},
- {DRVPOWERCTRL, 0x00, 0},
- {VREFPLL, 0x10, 0},
- {HPLMIXSEL, 0xee, 0},
- {HPRMIXSEL, 0xf6, 0},
- {PCMBUFCTRL, 0x0, 0},
- {VOICEVOL, 0x0e, 0},
- {HPLVOL, 0x06, 0},
- {HPRVOL, 0x06, 0},
- {MICCTRL, 0x51, 0x00},
- {ADCSAMPLERATE, 0x8B, 0x00},
- {MICSELVOL, 0x5B, 0x00},
- {LILSEL, 0x06, 0},
- {LIRSEL, 0x46, 0},
- {LOANTIPOP, 0x00, 0},
- {DMICCTRL1, 0x40, 0},
- {AUXDBNC, 0xff, 0},
- };
- snd_pmic_ops_nc.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_nc.master_mute = UNMUTE;
- snd_pmic_ops_nc.mute_status = UNMUTE;
- sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
- mutex_init(&snd_pmic_ops_nc.lock);
- pr_debug("init complete!!\n");
- return 0;
-}
-
-static int nc_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- int mute_val = 0;
-
- if (snd_pmic_ops_nc.mute_status == MUTE)
- return 0;
-
- if (((snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE) ||
- (snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)) &&
- (value == UNMUTE))
- return 0;
- if (value == UNMUTE) {
- /* unmute the system, set the 7th bit to zero */
- mute_val = 0x00;
- } else {
- /* MUTE:Set the seventh bit */
- mute_val = 0x04;
-
- }
- sc_access[0].reg_addr = LMUTE;
- sc_access[1].reg_addr = RMUTE;
- sc_access[0].mask = sc_access[1].mask = MASK2;
- sc_access[0].value = sc_access[1].value = mute_val;
-
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[1].value = 0x04;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-}
-
-static int nc_power_up_pb(unsigned int port)
-{
- struct sc_reg_access sc_access[7];
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- if (port == 0xFF)
- return 0;
- mutex_lock(&snd_pmic_ops_nc.lock);
- nc_enable_audiodac(MUTE);
- msleep(30);
-
- pr_debug("powering up pb....\n");
-
- sc_access[0].reg_addr = VAUDIOCNT;
- sc_access[0].value = 0x27;
- sc_access[0].mask = 0x27;
- sc_access[1].reg_addr = VREFPLL;
- if (port == 0) {
- sc_access[1].value = 0x3A;
- sc_access[1].mask = 0x3A;
- } else if (port == 1) {
- sc_access[1].value = 0x35;
- sc_access[1].mask = 0x35;
- }
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-
-
- sc_access[0].reg_addr = POWERCTRL1;
- if (port == 0) {
- sc_access[0].value = 0x40;
- sc_access[0].mask = 0x40;
- } else if (port == 1) {
- sc_access[0].value = 0x01;
- sc_access[0].mask = 0x01;
- }
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x0C;
- sc_access[1].mask = 0x0C;
-
- sc_access[2].reg_addr = DRVPOWERCTRL;
- sc_access[2].value = 0x86;
- sc_access[2].mask = 0x86;
-
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
- msleep(30);
-
- snd_pmic_ops_nc.pb_on = 1;
-
- /*
- * There is a mismatch between Playback Sources and the enumerated
- * values of output sources. This mismatch causes ALSA upper to send
- * Item 1 for Internal Speaker, but the expected enumeration is 2! For
- * now, treat MONO_EARPIECE and INTERNAL_SPKR identically and power up
- * the needed resources
- */
- if (snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE ||
- snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)
- nc_set_amp_power(1);
- nc_enable_audiodac(UNMUTE);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return 0;
-}
-
-static int nc_power_up_cp(unsigned int port)
-{
- struct sc_reg_access sc_access[5];
- int retval = 0;
-
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
-
- pr_debug("powering up cp....\n");
-
- if (port == 0xFF)
- return 0;
- sc_access[0].reg_addr = VAUDIOCNT;
- sc_access[0].value = 0x27;
- sc_access[0].mask = 0x27;
- sc_access[1].reg_addr = VREFPLL;
- if (port == 0) {
- sc_access[1].value = 0x3E;
- sc_access[1].mask = 0x3E;
- } else if (port == 1) {
- sc_access[1].value = 0x35;
- sc_access[1].mask = 0x35;
- }
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-
- sc_access[0].reg_addr = POWERCTRL1;
- if (port == 0) {
- sc_access[0].value = 0xB4;
- sc_access[0].mask = 0xB4;
- } else if (port == 1) {
- sc_access[0].value = 0xBF;
- sc_access[0].mask = 0xBF;
- }
- sc_access[1].reg_addr = POWERCTRL2;
- if (port == 0) {
- sc_access[1].value = 0x0C;
- sc_access[1].mask = 0x0C;
- } else if (port == 1) {
- sc_access[1].value = 0x02;
- sc_access[1].mask = 0x02;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-}
-
-static int nc_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- nc_enable_audiodac(MUTE);
-
-
- pr_debug("powering dn nc_power_down ....\n");
-
- if (snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE ||
- snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)
- nc_set_amp_power(0);
-
- msleep(30);
-
- sc_access[0].reg_addr = DRVPOWERCTRL;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
-
- sc_access[0].reg_addr = POWERCTRL1;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x00;
-
-
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
-
- msleep(30);
- sc_access[0].reg_addr = VREFPLL;
- sc_access[0].value = 0x10;
- sc_access[0].mask = 0x10;
-
- sc_access[1].reg_addr = VAUDIOCNT;
- sc_access[1].value = 0x25;
- sc_access[1].mask = 0x25;
-
-
- retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
-
- msleep(30);
- return nc_enable_audiodac(UNMUTE);
-}
-
-static int nc_power_down_pb(unsigned int device)
-{
-
- int retval = 0;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("powering dn pb....\n");
- mutex_lock(&snd_pmic_ops_nc.lock);
- nc_enable_audiodac(MUTE);
-
-
- msleep(30);
-
-
- sc_access[0].reg_addr = DRVPOWERCTRL;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
-
- msleep(30);
-
- sc_access[0].reg_addr = POWERCTRL1;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x41;
-
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x0C;
-
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- msleep(30);
-
- snd_pmic_ops_nc.pb_on = 0;
-
- nc_enable_audiodac(UNMUTE);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return 0;
-}
-
-static int nc_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL1, 0x00, 0xBE},
- {POWERCTRL2, 0x00, 0x02},
- };
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("powering dn cp....\n");
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int nc_set_pcm_voice_params(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x100, 0xD5, 0},
- {0x101, 0x08, 0},
- {0x104, 0x03, 0},
- {0x107, 0x10, 0},
- {0x10B, 0x0E, 0},
- {0x10E, 0x03, 0},
- {0x10F, 0x03, 0},
- {0x114, 0x13, 0},
- {0x115, 0x00, 0},
- {0x128, 0xFE, 0},
- {0x129, 0xFE, 0},
- {0x12A, 0xFE, 0},
- {0x12B, 0xDE, 0},
- {0x12C, 0xDE, 0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
- pr_debug("Voice parameters set successfully!!\n");
- return 0;
-}
-
-
-static int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- int config2 = 0;
- struct sc_reg_access sc_access;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- switch (sfreq) {
- case 8000:
- config2 = 0x00;
- break;
- case 11025:
- config2 = 0x01;
- break;
- case 12000:
- config2 = 0x02;
- break;
- case 16000:
- config2 = 0x03;
- break;
- case 22050:
- config2 = 0x04;
- break;
- case 24000:
- config2 = 0x05;
- break;
- case 32000:
- config2 = 0x07;
- break;
- case 44100:
- config2 = 0x08;
- break;
- case 48000:
- config2 = 0x09;
- break;
- }
-
- snd_pmic_ops_nc.num_channel = num_channel;
- if (snd_pmic_ops_nc.num_channel == 1) {
-
- sc_access.value = 0x07;
- sc_access.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_access.value);
- sc_access.mask = MASK2;
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
- } else {
- sc_access.value = 0x00;
- sc_access.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value %d\n", sc_access.value);
- sc_access.mask = MASK2;
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
-
- }
-
- pr_debug("word_size = %d\n", word_size);
-
- if (word_size == 24) {
- sc_access.reg_addr = AUDIOPORT2;
- sc_access.value = config2 | 0x10;
- sc_access.mask = 0x1F;
- } else {
- sc_access.value = config2;
- sc_access.mask = 0x1F;
- sc_access.reg_addr = AUDIOPORT2;
- }
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
- pr_debug("word_size = %d\n", word_size);
- sc_access.reg_addr = AUDIOPORT1;
- sc_access.mask = MASK5|MASK4|MASK1|MASK0;
- if (word_size == 16)
- sc_access.value = 0x98;
- else if (word_size == 24)
- sc_access.value = 0xAB;
-
- return sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
-
-
-}
-
-static int nc_set_selected_output_dev(u8 value)
-{
- struct sc_reg_access sc_access_HP[] = {
- {LMUTE, 0x02, 0x06},
- {RMUTE, 0x02, 0x06},
- {DRVPOWERCTRL, 0x06, 0x06},
- };
- struct sc_reg_access sc_access_IS[] = {
- {LMUTE, 0x04, 0x06},
- {RMUTE, 0x04, 0x06},
- {DRVPOWERCTRL, 0x00, 0x06},
- };
- int retval = 0;
-
- snd_pmic_ops_nc.output_dev_id = value;
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- pr_debug("nc set selected output:%d\n", value);
- mutex_lock(&snd_pmic_ops_nc.lock);
- switch (value) {
- case STEREO_HEADPHONE:
- if (snd_pmic_ops_nc.pb_on)
- sst_sc_reg_access(sc_access_HP+2, PMIC_WRITE, 1);
- retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
- nc_set_amp_power(0);
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- retval = sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 3);
- if (snd_pmic_ops_nc.pb_on)
- nc_set_amp_power(1);
- break;
- default:
- pr_err("rcvd illegal request: %d\n", value);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return -EINVAL;
- }
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return retval;
-}
-
-static int nc_audio_init(void)
-{
- struct sc_reg_access sc_acces, sc_access[] = {
- {0x100, 0x00, 0},
- {0x101, 0x00, 0},
- {0x104, 0x8B, 0},
- {0x107, 0x11, 0},
- {0x10B, 0x0E, 0},
- {0x114, 0x00, 0},
- {0x115, 0x00, 0},
- {0x128, 0x00, 0},
- {0x129, 0x00, 0},
- {0x12A, 0x00, 0},
- {0x12B, 0xee, 0},
- {0x12C, 0xf6, 0},
- };
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 12);
- pr_debug("Audio Init successfully!!\n");
-
- /*set output device */
- nc_set_selected_output_dev(snd_pmic_ops_nc.output_dev_id);
-
- if (snd_pmic_ops_nc.num_channel == 1) {
- sc_acces.value = 0x07;
- sc_acces.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
- sc_acces.mask = MASK2;
- sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- } else {
- sc_acces.value = 0x00;
- sc_acces.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
- sc_acces.mask = MASK2;
- sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- }
-
- return 0;
-}
-
-static int nc_set_audio_port(int status)
-{
- struct sc_reg_access sc_access[2] = {{0,},};
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK4|MASK5;
- sc_access[0].reg_addr = AUDIOPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- nc_audio_init();
- sc_access[0].value = 0x10;
- sc_access[0].mask = MASK4|MASK5 ;
- sc_access[0].reg_addr = AUDIOPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else
- return -EINVAL;
-
-}
-
-static int nc_set_voice_port(int status)
-{
- struct sc_reg_access sc_access[2] = {{0,},};
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- if (status == DEACTIVATE) {
- /* Activate Voice port */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK4;
- sc_access[0].reg_addr = VOICEPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else if (status == ACTIVATE) {
- /* Deactivate voice port */
- nc_set_pcm_voice_params();
- sc_access[0].value = 0x10;
- sc_access[0].mask = MASK4;
- sc_access[0].reg_addr = VOICEPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else
- return -EINVAL;
-}
-
-static int nc_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[3];
- u8 mute_val, cap_mute;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("set device id::%d, value %d\n", dev_id, value);
-
- switch (dev_id) {
- case PMIC_SND_MUTE_ALL:
- pr_debug("PMIC_SND_MUTE_ALL value %d\n", value);
- snd_pmic_ops_nc.mute_status = value;
- snd_pmic_ops_nc.master_mute = value;
- if (value == UNMUTE) {
- /* unmute the system, set the 7th bit to zero */
- mute_val = cap_mute = 0x00;
- } else {
- /* MUTE:Set the seventh bit */
- mute_val = 0x80;
- cap_mute = 0x40;
- }
- sc_access[0].reg_addr = AUDIOLVOL;
- sc_access[1].reg_addr = AUDIORVOL;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = mute_val;
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[1].value = 0x80;
- if (!sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2)) {
- sc_access[0].reg_addr = 0x109;
- sc_access[1].reg_addr = 0x10a;
- sc_access[2].reg_addr = 0x105;
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = MASK6;
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = cap_mute;
-
- if ((snd_pmic_ops_nc.input_dev_id == AMIC) ||
- (snd_pmic_ops_nc.input_dev_id == DMIC))
- sc_access[1].value = 0x40;
- if (snd_pmic_ops_nc.input_dev_id == HS_MIC)
- sc_access[0].value = 0x40;
- retval = sst_sc_reg_access(sc_access,
- PMIC_READ_MODIFY, 3);
- }
- break;
- case PMIC_SND_HP_MIC_MUTE:
- pr_debug("PMIC_SND_HPMIC_MUTE value %d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = LIRSEL;
- sc_access[0].mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- case PMIC_SND_AMIC_MUTE:
- pr_debug("PMIC_SND_AMIC_MUTE value %d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = LILSEL;
- sc_access[0].mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
-
- case PMIC_SND_DMIC_MUTE:
- pr_debug("INPUT_MUTE_DMIC value%d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[1].value = 0x00;
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[1].value = 0x40;
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = DMICCTRL1;
- sc_access[0].mask = MASK6;
- sc_access[1].reg_addr = LILSEL;
- sc_access[1].mask = MASK6;
- retval = sst_sc_reg_access(sc_access,
- PMIC_READ_MODIFY, 2);
- break;
-
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- snd_pmic_ops_nc.mute_status = value;
- if (value == UNMUTE)
- sc_access[0].value = 0x0;
- else
- sc_access[0].value = 0x04;
-
- if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
- sc_access[0].reg_addr = LMUTE;
- pr_debug("LEFT_HP_MUTE value %d\n",
- sc_access[0].value);
- } else {
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[0].value = 0x04;
- sc_access[0].reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value %d\n",
- sc_access[0].value);
- }
- sc_access[0].mask = MASK2;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- if (value == UNMUTE)
- sc_access[0].value = 0x00;
- else
- sc_access[0].value = 0x03;
- sc_access[0].reg_addr = LMUTE;
- pr_debug("SPEAKER_MUTE %d\n", sc_access[0].value);
- sc_access[0].mask = MASK1;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- default:
- return -EINVAL;
- }
- return retval ;
-
-}
-
-static int nc_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_access[3];
- int retval = 0, entries = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("set volume:%d\n", dev_id);
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL:value::%d\n", value);
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = -value;
- sc_access[0].mask = sc_access[1].mask = sc_access[2].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- sc_access[0].reg_addr = 0x10a;
- sc_access[1].reg_addr = 0x109;
- sc_access[2].reg_addr = 0x105;
- entries = 3;
- break;
-
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_HP_VOL %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = HPLVOL;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- entries = 1;
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RIGHT_HP_VOL value %d\n", value);
- if (snd_pmic_ops_nc.num_channel == 1) {
- sc_access[0].value = 0x04;
- sc_access[0].reg_addr = RMUTE;
- sc_access[0].mask = MASK2;
- } else {
- sc_access[0].value = -value;
- sc_access[0].reg_addr = HPRVOL;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- }
- entries = 1;
- break;
-
- case PMIC_SND_LEFT_MASTER_VOL:
- pr_debug("PMIC_SND_LEFT_MASTER_VOL value %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = AUDIOLVOL;
- sc_access[0].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- entries = 1;
- break;
-
- case PMIC_SND_RIGHT_MASTER_VOL:
- pr_debug("PMIC_SND_RIGHT_MASTER_VOL value %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = AUDIORVOL;
- sc_access[0].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- entries = 1;
- break;
-
- default:
- return -EINVAL;
-
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, entries);
-}
-
-static int nc_set_selected_input_dev(u8 value)
-{
- struct sc_reg_access sc_access[6];
- u8 num_val;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- snd_pmic_ops_nc.input_dev_id = value;
-
- pr_debug("nc set selected input:%d\n", value);
-
- switch (value) {
- case AMIC:
- pr_debug("Selecting AMIC\n");
- sc_access[0].reg_addr = 0x107;
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[1].reg_addr = 0x10a;
- sc_access[1].value = 0x40;
- sc_access[1].mask = MASK6;
- sc_access[2].reg_addr = 0x109;
- sc_access[2].value = 0x00;
- sc_access[2].mask = MASK6;
- sc_access[3].reg_addr = 0x105;
- sc_access[3].value = 0x40;
- sc_access[3].mask = MASK6;
- num_val = 4;
- break;
-
- case HS_MIC:
- pr_debug("Selecting HS_MIC\n");
- sc_access[0].reg_addr = MICCTRL;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[0].value = 0x00;
- sc_access[1].reg_addr = 0x109;
- sc_access[1].mask = MASK6;
- sc_access[1].value = 0x40;
- sc_access[2].reg_addr = 0x10a;
- sc_access[2].mask = MASK6;
- sc_access[2].value = 0x00;
- sc_access[3].reg_addr = 0x105;
- sc_access[3].value = 0x40;
- sc_access[3].mask = MASK6;
- sc_access[4].reg_addr = ADCSAMPLERATE;
- sc_access[4].mask = MASK7|MASK6|MASK5|MASK4|MASK3;
- sc_access[4].value = 0xc8;
- num_val = 5;
- break;
-
- case DMIC:
- pr_debug("DMIC\n");
- sc_access[0].reg_addr = MICCTRL;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[0].value = 0x0B;
- sc_access[1].reg_addr = 0x105;
- sc_access[1].value = 0x80;
- sc_access[1].mask = MASK7|MASK6;
- sc_access[2].reg_addr = 0x10a;
- sc_access[2].value = 0x40;
- sc_access[2].mask = MASK6;
- sc_access[3].reg_addr = LILSEL;
- sc_access[3].mask = MASK6;
- sc_access[3].value = 0x00;
- sc_access[4].reg_addr = ADCSAMPLERATE;
- sc_access[4].mask = MASK7|MASK6|MASK5|MASK4|MASK3;
- sc_access[4].value = 0x33;
- num_val = 5;
- break;
- default:
- return -EINVAL;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_val);
-}
-
-static int nc_get_mute(int dev_id, u8 *value)
-{
- int retval = 0, mask = 0;
- struct sc_reg_access sc_access = {0,};
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("get mute::%d\n", dev_id);
-
- switch (dev_id) {
- case PMIC_SND_AMIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_MIC1\n");
- sc_access.reg_addr = LILSEL;
- mask = MASK6;
- break;
- case PMIC_SND_HP_MIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_MIC2\n");
- sc_access.reg_addr = LIRSEL;
- mask = MASK6;
- break;
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- mask = MASK2;
- pr_debug("PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
- if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
- sc_access.reg_addr = RMUTE;
- else
- sc_access.reg_addr = LMUTE;
- break;
-
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- pr_debug("PMIC_MONO_EARPIECE_MUTE\n");
- sc_access.reg_addr = RMUTE;
- mask = MASK1;
- break;
- case PMIC_SND_DMIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_DMIC\n");
- sc_access.reg_addr = 0x105;
- mask = MASK6;
- break;
- default:
- return -EINVAL;
-
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("reg value = %d\n", sc_access.value);
- if (retval)
- return retval;
- *value = (sc_access.value) & mask;
- pr_debug("masked value = %d\n", *value);
- if (*value)
- *value = 0;
- else
- *value = 1;
- pr_debug("value returned = 0x%x\n", *value);
- return retval;
-}
-
-static int nc_get_vol(int dev_id, int *value)
-{
- int retval = 0, mask = 0;
- struct sc_reg_access sc_access = {0,};
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_INPUT_CAPTURE_VOL\n");
- sc_access.reg_addr = LILSEL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- break;
-
- case PMIC_SND_LEFT_MASTER_VOL:
- pr_debug("GET_VOLUME_PMIC_LEFT_MASTER_VOL\n");
- sc_access.reg_addr = AUDIOLVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- break;
-
- case PMIC_SND_RIGHT_MASTER_VOL:
- pr_debug("GET_VOLUME_PMIC_RIGHT_MASTER_VOL\n");
- sc_access.reg_addr = AUDIORVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
- sc_access.reg_addr = HPRVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- break;
-
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("GET_VOLUME_PMIC_LEFT_HP_VOL\n");
- sc_access.reg_addr = HPLVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- break;
-
- default:
- return -EINVAL;
-
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("value read = 0x%x\n", sc_access.value);
- *value = -((sc_access.value) & mask);
- pr_debug("get vol value returned = %d\n", *value);
- return retval;
-}
-
-static void hp_automute(enum snd_jack_types type, int present)
-{
- u8 in = DMIC;
- u8 out = INTERNAL_SPKR;
- if (present) {
- if (type == SND_JACK_HEADSET)
- in = HS_MIC;
- out = STEREO_HEADPHONE;
- }
- nc_set_selected_input_dev(in);
- nc_set_selected_output_dev(out);
-}
-
-static void nc_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- u8 value = 0;
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- struct snd_intelmad *intelmaddata = cb_data;
- struct sc_reg_access sc_access_read = {0,};
-
- sc_access_read.reg_addr = 0x132;
- sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
- value = (sc_access_read.value);
- pr_debug("value returned = 0x%x\n", value);
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x1) {
- pr_debug("SST DBG:MAD headset detected\n");
- /* send headset detect/undetect */
- present = (value == 0x1) ? 3 : 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- hp_automute(SND_JACK_HEADSET, present);
- }
-
- if (intsts & 0x2) {
- pr_debug(":MAD headphone detected\n");
- /* send headphone detect/undetect */
- present = (value == 0x2) ? 1 : 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- hp_automute(SND_JACK_HEADPHONE, present);
- }
-
- if (intsts & 0x4) {
- pr_debug("MAD short push detected\n");
- /* send short push */
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_SHORT_PRESS;
- }
-
- if (intsts & 0x8) {
- pr_debug(":MAD long push detected\n");
- /* send long push */
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_LONG_PRESS;
- }
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-static int nc_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_nc = {
- .input_dev_id = DMIC,
- .output_dev_id = INTERNAL_SPKR,
- .set_input_dev = nc_set_selected_input_dev,
- .set_output_dev = nc_set_selected_output_dev,
- .set_mute = nc_set_mute,
- .get_mute = nc_get_mute,
- .set_vol = nc_set_vol,
- .get_vol = nc_get_vol,
- .init_card = nc_init_card,
- .set_pcm_audio_params = nc_set_pcm_audio_params,
- .set_pcm_voice_params = nc_set_pcm_voice_params,
- .set_voice_port = nc_set_voice_port,
- .set_audio_port = nc_set_audio_port,
- .power_up_pmic_pb = nc_power_up_pb,
- .power_up_pmic_cp = nc_power_up_cp,
- .power_down_pmic_pb = nc_power_down_pb,
- .power_down_pmic_cp = nc_power_down_cp,
- .power_down_pmic = nc_power_down,
- .pmic_irq_cb = nc_pmic_irq_cb,
- .pmic_jack_enable = nc_jack_enable,
-};
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 31f7813cab0d..cc49038e55d6 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -148,7 +148,7 @@ int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
{
WORD len, bn;
- //if (Check_D_MediaPower()) ; b 6250 don't care
+ //if (Check_D_MediaPower()) ; ¦b 6250 don't care
// return(ErrCode);
//if (Check_D_MediaFmt(fdoExt)) ;
// return(ErrCode);
@@ -594,7 +594,7 @@ int Media_D_OneSectWriteFlush(PFDO_DEVICE_EXTENSION fdoExt)
// if (Check_D_CardStsChg())
// MediaChange = ERROR;
// //usleep(56*1024);
-// if ((!Check_D_CntPower())&&(!MediaChange)) // power & Media SQ change, h return success
+// if ((!Check_D_CntPower())&&(!MediaChange)) // ¦³ power & Media ¨S³Q change, «h return success
// return(SMSUCCESS);
// //usleep(56*1024);
//
diff --git a/drivers/staging/line6/Makefile b/drivers/staging/line6/Makefile
index de6bd12e9736..34a2ddacc7e9 100644
--- a/drivers/staging/line6/Makefile
+++ b/drivers/staging/line6/Makefile
@@ -12,4 +12,5 @@ line6usb-y := \
playback.o \
pod.o \
toneport.o \
- variax.o
+ variax.o \
+ podhd.o
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 9647154a4923..127f95247749 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -192,6 +193,31 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
}
}
+int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
+{
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (line6pcm->buffer_in)
+ return 0;
+
+ line6pcm->buffer_in =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_in) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc capture buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
+{
+ kfree(line6pcm->buffer_in);
+ line6pcm->buffer_in = NULL;
+}
+
/*
* Callback for completed capture URB.
*/
@@ -243,11 +269,7 @@ static void audio_in_callback(struct urb *urb)
length += fsize;
/* the following assumes LINE6_ISO_PACKETS == 1: */
-#if LINE6_BACKUP_MONITOR_SIGNAL
- memcpy(line6pcm->prev_fbuf, fbuf, fsize);
-#else
line6pcm->prev_fbuf = fbuf;
-#endif
line6pcm->prev_fsize = fsize;
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
@@ -319,6 +341,13 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
+ if ((line6pcm->flags & MASK_CAPTURE) == 0) {
+ ret = line6_alloc_capture_buffer(line6pcm);
+
+ if (ret < 0)
+ return ret;
+ }
+
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
@@ -331,6 +360,13 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
/* hw_free capture callback */
static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ if ((line6pcm->flags & MASK_CAPTURE) == 0) {
+ line6_unlink_wait_clear_audio_in_urbs(line6pcm);
+ line6_free_capture_buffer(line6pcm);
+ }
+
return snd_pcm_lib_free_pages(substream);
}
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index a7509fbbb954..366cbaa7c88d 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,11 +19,13 @@
extern struct snd_pcm_ops snd_line6_capture_ops;
+extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
int length);
extern int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm);
+extern void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 851b762319cf..6a1959e16e00 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -21,6 +21,7 @@
#include "midi.h"
#include "playback.h"
#include "pod.h"
+#include "podhd.h"
#include "revision.h"
#include "toneport.h"
#include "usbdefs.h"
@@ -37,6 +38,8 @@ static const struct usb_device_id line6_id_table[] = {
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_BASSPODXTPRO)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_GUITARPORT)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_POCKETPOD)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD300)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD500)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_GX)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX1)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX2)},
@@ -56,23 +59,25 @@ MODULE_DEVICE_TABLE(usb, line6_id_table);
/* *INDENT-OFF* */
static struct line6_properties line6_properties_table[] = {
- { "BassPODxt", "BassPODxt", LINE6_BIT_BASSPODXT, LINE6_BIT_CONTROL_PCM_HWMON },
- { "BassPODxtLive", "BassPODxt Live", LINE6_BIT_BASSPODXTLIVE, LINE6_BIT_CONTROL_PCM_HWMON },
- { "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_BASSPODXTPRO, LINE6_BIT_CONTROL_PCM_HWMON },
- { "GuitarPort", "GuitarPort", LINE6_BIT_GUITARPORT, LINE6_BIT_PCM },
- { "PocketPOD", "Pocket POD", LINE6_BIT_POCKETPOD, LINE6_BIT_CONTROL },
- { "PODStudioGX", "POD Studio GX", LINE6_BIT_PODSTUDIO_GX, LINE6_BIT_PCM },
- { "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PODSTUDIO_UX1, LINE6_BIT_PCM },
- { "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PODSTUDIO_UX2, LINE6_BIT_PCM },
- { "PODX3", "POD X3", LINE6_BIT_PODX3, LINE6_BIT_PCM },
- { "PODX3Live", "POD X3 Live", LINE6_BIT_PODX3LIVE, LINE6_BIT_PCM },
- { "PODxt", "PODxt", LINE6_BIT_PODXT, LINE6_BIT_CONTROL_PCM_HWMON },
- { "PODxtLive", "PODxt Live", LINE6_BIT_PODXTLIVE, LINE6_BIT_CONTROL_PCM_HWMON },
- { "PODxtPro", "PODxt Pro", LINE6_BIT_PODXTPRO, LINE6_BIT_CONTROL_PCM_HWMON },
- { "TonePortGX", "TonePort GX", LINE6_BIT_TONEPORT_GX, LINE6_BIT_PCM },
- { "TonePortUX1", "TonePort UX1", LINE6_BIT_TONEPORT_UX1, LINE6_BIT_PCM },
- { "TonePortUX2", "TonePort UX2", LINE6_BIT_TONEPORT_UX2, LINE6_BIT_PCM },
- { "Variax", "Variax Workbench", LINE6_BIT_VARIAX, LINE6_BIT_CONTROL }
+ { LINE6_BIT_BASSPODXT, "BassPODxt", "BassPODxt", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
+ { LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
+ { LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
+ { LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
+ { LINE6_BIT_PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PCM },
+ { LINE6_BIT_PODX3, "PODX3", "POD X3", LINE6_BIT_PCM },
+ { LINE6_BIT_PODX3LIVE, "PODX3Live", "POD X3 Live", LINE6_BIT_PCM },
+ { LINE6_BIT_PODXT, "PODxt", "PODxt", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODXTLIVE, "PODxtLive", "PODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODXTPRO, "PODxtPro", "PODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_TONEPORT_GX, "TonePortGX", "TonePort GX", LINE6_BIT_PCM },
+ { LINE6_BIT_TONEPORT_UX1, "TonePortUX1", "TonePort UX1", LINE6_BIT_PCM },
+ { LINE6_BIT_TONEPORT_UX2, "TonePortUX2", "TonePort UX2", LINE6_BIT_PCM },
+ { LINE6_BIT_VARIAX, "Variax", "Variax Workbench", LINE6_BIT_CONTROL },
};
/* *INDENT-ON* */
@@ -437,6 +442,10 @@ static void line6_data_received(struct urb *urb)
line6);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ break; /* let userspace handle MIDI */
+
case LINE6_DEVID_PODXTLIVE:
switch (line6->interface_number) {
case PODXTLIVE_INTERFACE_POD:
@@ -720,8 +729,8 @@ static int line6_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int devtype;
- struct usb_device *usbdev = NULL;
- struct usb_line6 *line6 = NULL;
+ struct usb_device *usbdev;
+ struct usb_line6 *line6;
const struct line6_properties *properties;
int devnum;
int interface_number, alternate = 0;
@@ -794,6 +803,7 @@ static int line6_probe(struct usb_interface *interface,
}
break;
+ case LINE6_DEVID_PODHD500:
case LINE6_DEVID_PODX3:
case LINE6_DEVID_PODX3LIVE:
switch (interface_number) {
@@ -812,6 +822,7 @@ static int line6_probe(struct usb_interface *interface,
case LINE6_DEVID_BASSPODXTPRO:
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTPRO:
+ case LINE6_DEVID_PODHD300:
alternate = 5;
break;
@@ -865,6 +876,18 @@ static int line6_probe(struct usb_interface *interface,
ep_write = 0x03;
break;
+ case LINE6_DEVID_PODHD300:
+ size = sizeof(struct usb_line6_podhd);
+ ep_read = 0x84;
+ ep_write = 0x03;
+ break;
+
+ case LINE6_DEVID_PODHD500:
+ size = sizeof(struct usb_line6_podhd);
+ ep_read = 0x81;
+ ep_write = 0x01;
+ break;
+
case LINE6_DEVID_POCKETPOD:
size = sizeof(struct usb_line6_pod);
ep_read = 0x82;
@@ -923,7 +946,7 @@ static int line6_probe(struct usb_interface *interface,
}
if (size == 0) {
- dev_err(line6->ifcdev,
+ dev_err(&interface->dev,
"driver bug: interface data size not set\n");
ret = -ENODEV;
goto err_put;
@@ -1017,6 +1040,12 @@ static int line6_probe(struct usb_interface *interface,
ret = line6_pod_init(interface, (struct usb_line6_pod *)line6);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ ret = line6_podhd_init(interface,
+ (struct usb_line6_podhd *)line6);
+ break;
+
case LINE6_DEVID_PODXTLIVE:
switch (interface_number) {
case PODXTLIVE_INTERFACE_POD:
@@ -1139,6 +1168,11 @@ static void line6_disconnect(struct usb_interface *interface)
line6_pod_disconnect(interface);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ line6_podhd_disconnect(interface);
+ break;
+
case LINE6_DEVID_PODXTLIVE:
switch (interface_number) {
case PODXTLIVE_INTERFACE_POD:
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index 553192f49317..117bf9943568 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -88,6 +88,11 @@ static const int SYSEX_EXTRA_SIZE = sizeof(line6_midi_id) + 4;
*/
struct line6_properties {
/**
+ Bit identifying this device in the line6usb driver.
+ */
+ int device_bit;
+
+ /**
Card id string (maximum 16 characters).
This can be used to address the device in ALSA programs as
"default:CARD=<id>"
@@ -100,11 +105,6 @@ struct line6_properties {
const char *name;
/**
- Bit identifying this device in the line6usb driver.
- */
- int device_bit;
-
- /**
Bit vector defining this device's capabilities in the
line6usb driver.
*/
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index e554a2da643a..13d02939c3cb 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -135,7 +135,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
line6_write_hexdump(line6, 'S', data, length);
#endif
- transfer_buffer = kmalloc(length, GFP_ATOMIC);
+ transfer_buffer = kmemdup(data, length, GFP_ATOMIC);
if (transfer_buffer == NULL) {
usb_free_urb(urb);
@@ -143,7 +143,6 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
return -ENOMEM;
}
- memcpy(transfer_buffer, data, length);
usb_fill_int_urb(urb, line6->usbdev,
usb_sndbulkpipe(line6->usbdev,
line6->ep_control_write),
@@ -173,6 +172,8 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
break;
case LINE6_DEVID_VARIAX:
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
break;
default:
@@ -307,10 +308,10 @@ static ssize_t midi_set_midi_mask_transmit(struct device *dev,
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
- unsigned long value;
+ unsigned short value;
int ret;
- ret = strict_strtoul(buf, 10, &value);
+ ret = kstrtou16(buf, 10, &value);
if (ret)
return ret;
@@ -339,10 +340,10 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev,
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
- unsigned long value;
+ unsigned short value;
int ret;
- ret = strict_strtoul(buf, 10, &value);
+ ret = kstrtou16(buf, 10, &value);
if (ret)
return ret;
@@ -391,16 +392,32 @@ int line6_init_midi(struct usb_line6 *line6)
return -ENOMEM;
err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0);
- if (err < 0)
+ if (err < 0) {
+ kfree(line6midi);
return err;
+ }
err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1);
- if (err < 0)
+ if (err < 0) {
+ kfree(line6midi->midibuf_in.buf);
+ kfree(line6midi);
return err;
+ }
line6midi->line6 = line6;
- line6midi->midi_mask_transmit = 1;
- line6midi->midi_mask_receive = 4;
+
+ switch(line6->product) {
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ line6midi->midi_mask_transmit = 1;
+ line6midi->midi_mask_receive = 1;
+ break;
+
+ default:
+ line6midi->midi_mask_transmit = 1;
+ line6midi->midi_mask_receive = 4;
+ }
+
line6->line6midi = line6midi;
err = snd_device_new(line6->card, SNDRV_DEV_RAWMIDI, line6midi,
diff --git a/drivers/staging/line6/midi.h b/drivers/staging/line6/midi.h
index b73a025d8be9..4a9e9f947297 100644
--- a/drivers/staging/line6/midi.h
+++ b/drivers/staging/line6/midi.h
@@ -57,12 +57,12 @@ struct snd_line6_midi {
/**
Bit mask for output MIDI channels.
*/
- int midi_mask_transmit;
+ unsigned short midi_mask_transmit;
/**
Bit mask for input MIDI channels.
*/
- int midi_mask_receive;
+ unsigned short midi_mask_receive;
/**
Buffer for incoming MIDI stream.
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 9d4c8a606eea..37675e66da81 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -86,31 +86,22 @@ static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period,
#endif
+static bool test_flags(unsigned long flags0, unsigned long flags1,
+ unsigned long mask)
+{
+ return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
+}
+
int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_or(&line6pcm->flags, channels);
unsigned long flags_new = flags_old | channels;
int err = 0;
-
-#if LINE6_BACKUP_MONITOR_SIGNAL
- if (!(line6pcm->line6->properties->capabilities & LINE6_BIT_HWMON)) {
- line6pcm->prev_fbuf =
- kmalloc(LINE6_ISO_PACKETS * line6pcm->max_packet_size,
- GFP_KERNEL);
-
- if (!line6pcm->prev_fbuf) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc monitor buffer\n");
- return -ENOMEM;
- }
- }
-#else
+
line6pcm->prev_fbuf = NULL;
-#endif
- if (((flags_old & MASK_CAPTURE) == 0) &&
- ((flags_new & MASK_CAPTURE) != 0)) {
+ if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
/*
Waiting for completion of active URBs in the stop handler is
a bug, we therefore report an error if capturing is restarted
@@ -119,54 +110,47 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
return -EBUSY;
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
+ err = line6_alloc_capture_buffer(line6pcm);
- if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
- return -ENOMEM;
+ if (err < 0)
+ goto pcm_start_error;
}
line6pcm->count_in = 0;
line6pcm->prev_fsize = 0;
err = line6_submit_audio_in_all_urbs(line6pcm);
- if (err < 0) {
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- return err;
- }
+ if (err < 0)
+ goto pcm_start_error;
}
- if (((flags_old & MASK_PLAYBACK) == 0) &&
- ((flags_new & MASK_PLAYBACK) != 0)) {
+ if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
/*
See comment above regarding PCM restart.
*/
if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
return -EBUSY;
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
+ err = line6_alloc_playback_buffer(line6pcm);
- if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
- return -ENOMEM;
+ if (err < 0)
+ goto pcm_start_error;
}
line6pcm->count_out = 0;
err = line6_submit_audio_out_all_urbs(line6pcm);
- if (err < 0) {
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- return err;
- }
+ if (err < 0)
+ goto pcm_start_error;
}
return 0;
+
+pcm_start_error:
+ __sync_fetch_and_and(&line6pcm->flags, ~channels);
+ return err;
}
int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
@@ -175,22 +159,19 @@ int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
__sync_fetch_and_and(&line6pcm->flags, ~channels);
unsigned long flags_new = flags_old & ~channels;
- if (((flags_old & MASK_CAPTURE) != 0) &&
- ((flags_new & MASK_CAPTURE) == 0)) {
+ if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
line6_unlink_audio_in_urbs(line6pcm);
- kfree(line6pcm->buffer_in);
- line6pcm->buffer_in = NULL;
+
+ if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
+ line6_free_capture_buffer(line6pcm);
}
- if (((flags_old & MASK_PLAYBACK) != 0) &&
- ((flags_new & MASK_PLAYBACK) == 0)) {
+ if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
line6_unlink_audio_out_urbs(line6pcm);
- kfree(line6pcm->buffer_out);
- line6pcm->buffer_out = NULL;
+
+ if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
+ line6_free_playback_buffer(line6pcm);
}
-#if LINE6_BACKUP_MONITOR_SIGNAL
- kfree(line6pcm->prev_fbuf);
-#endif
return 0;
}
@@ -403,10 +384,12 @@ int line6_init_pcm(struct usb_line6 *line6,
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTLIVE:
case LINE6_DEVID_PODXTPRO:
+ case LINE6_DEVID_PODHD300:
ep_read = 0x82;
ep_write = 0x01;
break;
+ case LINE6_DEVID_PODHD500:
case LINE6_DEVID_PODX3:
case LINE6_DEVID_PODX3LIVE:
ep_read = 0x86;
@@ -451,9 +434,14 @@ int line6_init_pcm(struct usb_line6 *line6,
line6pcm->line6 = line6;
line6pcm->ep_audio_read = ep_read;
line6pcm->ep_audio_write = ep_write;
- line6pcm->max_packet_size = usb_maxpacket(line6->usbdev,
- usb_rcvintpipe(line6->usbdev,
- ep_read), 0);
+
+ /* Read and write buffers are sized identically, so choose minimum */
+ line6pcm->max_packet_size = min(
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0),
+ usb_maxpacket(line6->usbdev,
+ usb_sndisocpipe(line6->usbdev, ep_write), 1));
+
line6pcm->properties = properties;
line6->line6pcm = line6pcm;
@@ -508,6 +496,23 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0)
+ line6_unlink_wait_clear_audio_out_urbs(line6pcm);
+
+ break;
+
+ case SNDRV_PCM_STREAM_CAPTURE:
+ if ((line6pcm->flags & MASK_CAPTURE) == 0)
+ line6_unlink_wait_clear_audio_in_urbs(line6pcm);
+
+ break;
+
+ default:
+ MISSING_CASE;
+ }
+
if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) {
line6pcm->count_out = 0;
line6pcm->pos_out = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 77055b3724ad..55d8297dd3d9 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -39,9 +39,6 @@
#define LINE6_IMPULSE_DEFAULT_PERIOD 100
#endif
-#define LINE6_BACKUP_MONITOR_SIGNAL 0
-#define LINE6_REUSE_DMA_AREA_FOR_PLAYBACK 0
-
/*
Get substream from Line6 PCM data structure
*/
@@ -149,11 +146,6 @@ struct snd_line6_pcm {
unsigned char *buffer_in;
/**
- Temporary buffer index for playback.
- */
- int index_out;
-
- /**
Previously captured frame (for software monitoring).
*/
unsigned char *prev_fbuf;
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 10c543836583..4152db2328b7 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -191,13 +192,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_frames = urb_size / bytes_per_frame;
urb_out->transfer_buffer =
line6pcm->buffer_out +
- line6pcm->max_packet_size * line6pcm->index_out;
+ index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (++line6pcm->index_out == LINE6_ISO_BUFFERS)
- line6pcm->index_out = 0;
-
if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) &&
!test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
@@ -222,18 +220,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
} else
dev_err(line6pcm->line6->ifcdev, "driver bug: len = %d\n", len); /* this is somewhat paranoid */
} else {
-#if LINE6_REUSE_DMA_AREA_FOR_PLAYBACK
- /* set the buffer pointer */
- urb_out->transfer_buffer =
- runtime->dma_area +
- line6pcm->pos_out * bytes_per_frame;
-#else
- /* copy data */
memcpy(urb_out->transfer_buffer,
runtime->dma_area +
line6pcm->pos_out * bytes_per_frame,
urb_out->transfer_buffer_length);
-#endif
}
line6pcm->pos_out += urb_frames;
@@ -361,6 +351,31 @@ void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
wait_clear_audio_out_urbs(line6pcm);
}
+int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
+{
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (line6pcm->buffer_out)
+ return 0;
+
+ line6pcm->buffer_out =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc playback buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
+{
+ kfree(line6pcm->buffer_out);
+ line6pcm->buffer_out = NULL;
+}
+
/*
Callback for completed playback URB.
*/
@@ -469,6 +484,13 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
+ ret = line6_alloc_playback_buffer(line6pcm);
+
+ if (ret < 0)
+ return ret;
+ }
+
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
@@ -481,6 +503,13 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
/* hw_free playback callback */
static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
+ line6_unlink_wait_clear_audio_out_urbs(line6pcm);
+ line6_free_playback_buffer(line6pcm);
+ }
+
return snd_pcm_lib_free_pages(substream);
}
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index f2fc8c0526e3..02487ff24538 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,7 +29,9 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
+extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
+extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index d9b30212585c..4dadc571d961 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -1149,14 +1149,10 @@ static struct snd_kcontrol_new pod_control_monitor = {
static void pod_destruct(struct usb_interface *interface)
{
struct usb_line6_pod *pod = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (pod == NULL)
return;
- line6 = &pod->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&pod->line6);
del_timer(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
diff --git a/drivers/staging/line6/podhd.c b/drivers/staging/line6/podhd.c
new file mode 100644
index 000000000000..7ef45437b4f2
--- /dev/null
+++ b/drivers/staging/line6/podhd.c
@@ -0,0 +1,154 @@
+/*
+ * Line6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "audio.h"
+#include "driver.h"
+#include "pcm.h"
+#include "podhd.h"
+
+#define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
+
+static struct snd_ratden podhd_ratden = {
+ .num_min = 48000,
+ .num_max = 48000,
+ .num_step = 1,
+ .den = 1,
+};
+
+static struct line6_pcm_properties podhd_pcm_properties = {
+ .snd_line6_playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+#ifdef CONFIG_PM
+ SNDRV_PCM_INFO_RESUME |
+#endif
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .snd_line6_capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+#ifdef CONFIG_PM
+ SNDRV_PCM_INFO_RESUME |
+#endif
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .snd_line6_rates = {
+ .nrats = 1,
+ .rats = &podhd_ratden},
+ .bytes_per_frame = PODHD_BYTES_PER_FRAME
+};
+
+/*
+ POD HD destructor.
+*/
+static void podhd_destruct(struct usb_interface *interface)
+{
+ struct usb_line6_podhd *podhd = usb_get_intfdata(interface);
+
+ if (podhd == NULL)
+ return;
+ line6_cleanup_audio(&podhd->line6);
+}
+
+/*
+ Try to init POD HD device.
+*/
+static int podhd_try_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd)
+{
+ int err;
+ struct usb_line6 *line6 = &podhd->line6;
+
+ if ((interface == NULL) || (podhd == NULL))
+ return -ENODEV;
+
+ /* initialize audio system: */
+ err = line6_init_audio(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize MIDI subsystem: */
+ err = line6_init_midi(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize PCM subsystem: */
+ err = line6_init_pcm(line6, &podhd_pcm_properties);
+ if (err < 0)
+ return err;
+
+ /* register USB audio system: */
+ err = line6_register_audio(line6);
+ return err;
+}
+
+/*
+ Init POD HD device (and clean up in case of failure).
+*/
+int line6_podhd_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd)
+{
+ int err = podhd_try_init(interface, podhd);
+
+ if (err < 0)
+ podhd_destruct(interface);
+
+ return err;
+}
+
+/*
+ POD HD device disconnected.
+*/
+void line6_podhd_disconnect(struct usb_interface *interface)
+{
+ struct usb_line6_podhd *podhd;
+
+ if (interface == NULL)
+ return;
+ podhd = usb_get_intfdata(interface);
+
+ if (podhd != NULL) {
+ struct snd_line6_pcm *line6pcm = podhd->line6.line6pcm;
+
+ if (line6pcm != NULL)
+ line6_pcm_disconnect(line6pcm);
+ }
+
+ podhd_destruct(interface);
+}
diff --git a/drivers/staging/line6/podhd.h b/drivers/staging/line6/podhd.h
new file mode 100644
index 000000000000..652f74056bb9
--- /dev/null
+++ b/drivers/staging/line6/podhd.h
@@ -0,0 +1,30 @@
+/*
+ * Line6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#ifndef PODHD_H
+#define PODHD_H
+
+#include <linux/usb.h>
+
+#include "driver.h"
+
+struct usb_line6_podhd {
+ /**
+ Generic Line6 USB data.
+ */
+ struct usb_line6 line6;
+};
+
+extern void line6_podhd_disconnect(struct usb_interface *interface);
+extern int line6_podhd_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd);
+
+#endif /* PODHD_H */
diff --git a/drivers/staging/line6/revision.h b/drivers/staging/line6/revision.h
index 350d0dfff8f8..b4eee2b73831 100644
--- a/drivers/staging/line6/revision.h
+++ b/drivers/staging/line6/revision.h
@@ -1,4 +1,4 @@
#ifndef DRIVER_REVISION
/* current subversion revision */
-#define DRIVER_REVISION " (revision 690)"
+#define DRIVER_REVISION " (904)"
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 879e6992bbc6..f31057830dbc 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -295,14 +295,10 @@ static struct snd_kcontrol_new toneport_control_source = {
static void toneport_destruct(struct usb_interface *interface)
{
struct usb_line6_toneport *toneport = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (toneport == NULL)
return;
- line6 = &toneport->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&toneport->line6);
}
/*
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index c6dffe6bc1a5..aff9e5caea46 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -24,6 +24,8 @@
#define LINE6_DEVID_BASSPODXTPRO 0x4252
#define LINE6_DEVID_GUITARPORT 0x4750
#define LINE6_DEVID_POCKETPOD 0x5051
+#define LINE6_DEVID_PODHD300 0x5057
+#define LINE6_DEVID_PODHD500 0x414D
#define LINE6_DEVID_PODSTUDIO_GX 0x4153
#define LINE6_DEVID_PODSTUDIO_UX1 0x4150
#define LINE6_DEVID_PODSTUDIO_UX2 0x4151
@@ -37,48 +39,71 @@
#define LINE6_DEVID_TONEPORT_UX2 0x4142
#define LINE6_DEVID_VARIAX 0x534d
-#define LINE6_BIT_BASSPODXT (1 << 0)
-#define LINE6_BIT_BASSPODXTLIVE (1 << 1)
-#define LINE6_BIT_BASSPODXTPRO (1 << 2)
-#define LINE6_BIT_GUITARPORT (1 << 3)
-#define LINE6_BIT_POCKETPOD (1 << 4)
-#define LINE6_BIT_PODSTUDIO_GX (1 << 5)
-#define LINE6_BIT_PODSTUDIO_UX1 (1 << 6)
-#define LINE6_BIT_PODSTUDIO_UX2 (1 << 7)
-#define LINE6_BIT_PODX3 (1 << 8)
-#define LINE6_BIT_PODX3LIVE (1 << 9)
-#define LINE6_BIT_PODXT (1 << 10)
-#define LINE6_BIT_PODXTLIVE (1 << 11)
-#define LINE6_BIT_PODXTPRO (1 << 12)
-#define LINE6_BIT_TONEPORT_GX (1 << 13)
-#define LINE6_BIT_TONEPORT_UX1 (1 << 14)
-#define LINE6_BIT_TONEPORT_UX2 (1 << 15)
-#define LINE6_BIT_VARIAX (1 << 16)
+enum {
+ LINE6_ID_BASSPODXT,
+ LINE6_ID_BASSPODXTLIVE,
+ LINE6_ID_BASSPODXTPRO,
+ LINE6_ID_GUITARPORT,
+ LINE6_ID_POCKETPOD,
+ LINE6_ID_PODHD300,
+ LINE6_ID_PODHD500,
+ LINE6_ID_PODSTUDIO_GX,
+ LINE6_ID_PODSTUDIO_UX1,
+ LINE6_ID_PODSTUDIO_UX2,
+ LINE6_ID_PODX3,
+ LINE6_ID_PODX3LIVE,
+ LINE6_ID_PODXT,
+ LINE6_ID_PODXTLIVE,
+ LINE6_ID_PODXTPRO,
+ LINE6_ID_TONEPORT_GX,
+ LINE6_ID_TONEPORT_UX1,
+ LINE6_ID_TONEPORT_UX2,
+ LINE6_ID_VARIAX
+};
-#define LINE6_BITS_PRO (LINE6_BIT_BASSPODXTPRO | \
- LINE6_BIT_PODXTPRO)
-#define LINE6_BITS_LIVE (LINE6_BIT_BASSPODXTLIVE | \
- LINE6_BIT_PODXTLIVE | \
- LINE6_BIT_PODX3LIVE)
-#define LINE6_BITS_PODXTALL (LINE6_BIT_PODXT | \
- LINE6_BIT_PODXTLIVE | \
- LINE6_BIT_PODXTPRO)
-#define LINE6_BITS_BASSPODXTALL (LINE6_BIT_BASSPODXT | \
- LINE6_BIT_BASSPODXTLIVE | \
- LINE6_BIT_BASSPODXTPRO)
+#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
+
+enum {
+ LINE6_BIT(BASSPODXT),
+ LINE6_BIT(BASSPODXTLIVE),
+ LINE6_BIT(BASSPODXTPRO),
+ LINE6_BIT(GUITARPORT),
+ LINE6_BIT(POCKETPOD),
+ LINE6_BIT(PODHD300),
+ LINE6_BIT(PODHD500),
+ LINE6_BIT(PODSTUDIO_GX),
+ LINE6_BIT(PODSTUDIO_UX1),
+ LINE6_BIT(PODSTUDIO_UX2),
+ LINE6_BIT(PODX3),
+ LINE6_BIT(PODX3LIVE),
+ LINE6_BIT(PODXT),
+ LINE6_BIT(PODXTLIVE),
+ LINE6_BIT(PODXTPRO),
+ LINE6_BIT(TONEPORT_GX),
+ LINE6_BIT(TONEPORT_UX1),
+ LINE6_BIT(TONEPORT_UX2),
+ LINE6_BIT(VARIAX),
+
+ LINE6_BITS_PRO = LINE6_BIT_BASSPODXTPRO | LINE6_BIT_PODXTPRO,
+ LINE6_BITS_LIVE = LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_PODXTLIVE | LINE6_BIT_PODX3LIVE,
+ LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE | LINE6_BIT_PODXTPRO,
+ LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 | LINE6_BIT_PODHD500,
+ LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT | LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_BASSPODXTPRO
+};
/* device supports settings parameter via USB */
-#define LINE6_BIT_CONTROL (1 << 0)
+#define LINE6_BIT_CONTROL (1 << 0)
/* device supports PCM input/output via USB */
-#define LINE6_BIT_PCM (1 << 1)
+#define LINE6_BIT_PCM (1 << 1)
/* device support hardware monitoring */
-#define LINE6_BIT_HWMON (1 << 2)
+#define LINE6_BIT_HWMON (1 << 2)
#define LINE6_BIT_CONTROL_PCM_HWMON (LINE6_BIT_CONTROL | \
LINE6_BIT_PCM | \
LINE6_BIT_HWMON)
-#define LINE6_FALLBACK_INTERVAL 10
-#define LINE6_FALLBACK_MAXPACKETSIZE 16
+#define LINE6_FALLBACK_INTERVAL 10
+#define LINE6_FALLBACK_MAXPACKETSIZE 16
#endif
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index 81241cdf1be9..d36622228b2d 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -572,14 +572,10 @@ static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
static void variax_destruct(struct usb_interface *interface)
{
struct usb_line6_variax *variax = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (variax == NULL)
return;
- line6 = &variax->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&variax->line6);
del_timer(&variax->startup_timer1);
del_timer(&variax->startup_timer2);
diff --git a/drivers/staging/media/as102/Kconfig b/drivers/staging/media/as102/Kconfig
index 5865029db0f6..28aba00dc629 100644
--- a/drivers/staging/media/as102/Kconfig
+++ b/drivers/staging/media/as102/Kconfig
@@ -1,6 +1,7 @@
config DVB_AS102
tristate "Abilis AS102 DVB receiver"
depends on DVB_CORE && USB && I2C && INPUT
+ select FW_LOADER
help
Choose Y or M here if you have a device containing an AS102
diff --git a/drivers/staging/media/as102/Makefile b/drivers/staging/media/as102/Makefile
index e7dbb6f814d5..1bca43e847c7 100644
--- a/drivers/staging/media/as102/Makefile
+++ b/drivers/staging/media/as102/Makefile
@@ -3,4 +3,4 @@ dvb-as102-objs := as102_drv.o as102_fw.o as10x_cmd.o as10x_cmd_stream.o \
obj-$(CONFIG_DVB_AS102) += dvb-as102.o
-EXTRA_CFLAGS += -DCONFIG_AS102_USB -Idrivers/media/dvb/dvb-core
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index 828526d4c289..aae0505a36c4 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kref.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/usb.h>
/* header file for Usb device driver*/
@@ -56,13 +56,11 @@ int elna_enable = 1;
module_param_named(elna_enable, elna_enable, int, 0644);
MODULE_PARM_DESC(elna_enable, "Activate eLNA (default: on)");
-#ifdef DVB_DEFINE_MOD_OPT_ADAPTER_NR
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#endif
static void as102_stop_stream(struct as102_dev_t *dev)
{
- struct as102_bus_adapter_t *bus_adap;
+ struct as10x_bus_adapter_t *bus_adap;
if (dev != NULL)
bus_adap = &dev->bus_adap;
@@ -85,7 +83,7 @@ static void as102_stop_stream(struct as102_dev_t *dev)
static int as102_start_stream(struct as102_dev_t *dev)
{
- struct as102_bus_adapter_t *bus_adap;
+ struct as10x_bus_adapter_t *bus_adap;
int ret = -EFAULT;
if (dev != NULL)
@@ -111,7 +109,7 @@ static int as102_start_stream(struct as102_dev_t *dev)
static int as10x_pid_filter(struct as102_dev_t *dev,
int index, u16 pid, int onoff) {
- struct as102_bus_adapter_t *bus_adap = &dev->bus_adap;
+ struct as10x_bus_adapter_t *bus_adap = &dev->bus_adap;
int ret = -EFAULT;
ENTER();
@@ -123,22 +121,22 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
switch (onoff) {
case 0:
- ret = as10x_cmd_del_PID_filter(bus_adap, (uint16_t) pid);
- dprintk(debug, "DEL_PID_FILTER([%02d] 0x%04x) ret = %d\n",
- index, pid, ret);
- break;
+ ret = as10x_cmd_del_PID_filter(bus_adap, (uint16_t) pid);
+ dprintk(debug, "DEL_PID_FILTER([%02d] 0x%04x) ret = %d\n",
+ index, pid, ret);
+ break;
case 1:
{
- struct as10x_ts_filter filter;
+ struct as10x_ts_filter filter;
- filter.type = TS_PID_TYPE_TS;
- filter.idx = 0xFF;
- filter.pid = pid;
+ filter.type = TS_PID_TYPE_TS;
+ filter.idx = 0xFF;
+ filter.pid = pid;
- ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
- dprintk(debug, "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
- index, filter.idx, filter.pid, ret);
- break;
+ ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
+ dprintk(debug, "ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
+ index, filter.idx, filter.pid, ret);
+ break;
}
}
@@ -159,10 +157,9 @@ static int as102_dvb_dmx_start_feed(struct dvb_demux_feed *dvbdmxfeed)
if (mutex_lock_interruptible(&as102_dev->sem))
return -ERESTARTSYS;
- if (pid_filtering) {
- as10x_pid_filter(as102_dev,
- dvbdmxfeed->index, dvbdmxfeed->pid, 1);
- }
+ if (pid_filtering)
+ as10x_pid_filter(as102_dev, dvbdmxfeed->index,
+ dvbdmxfeed->pid, 1);
if (as102_dev->streaming++ == 0)
ret = as102_start_stream(as102_dev);
@@ -185,10 +182,9 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
if (--as102_dev->streaming == 0)
as102_stop_stream(as102_dev);
- if (pid_filtering) {
- as10x_pid_filter(as102_dev,
- dvbdmxfeed->index, dvbdmxfeed->pid, 0);
- }
+ if (pid_filtering)
+ as10x_pid_filter(as102_dev, dvbdmxfeed->index,
+ dvbdmxfeed->pid, 0);
mutex_unlock(&as102_dev->sem);
LEAVE();
@@ -197,27 +193,16 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
int as102_dvb_register(struct as102_dev_t *as102_dev)
{
- int ret = 0;
- ENTER();
+ struct device *dev = &as102_dev->bus_adap.usb_dev->dev;
+ int ret;
ret = dvb_register_adapter(&as102_dev->dvb_adap,
- as102_dev->name,
- THIS_MODULE,
-#if defined(CONFIG_AS102_USB)
- &as102_dev->bus_adap.usb_dev->dev
-#elif defined(CONFIG_AS102_SPI)
- &as102_dev->bus_adap.spi_dev->dev
-#else
-#error >>> dvb_register_adapter <<<
-#endif
-#ifdef DVB_DEFINE_MOD_OPT_ADAPTER_NR
- , adapter_nr
-#endif
- );
+ as102_dev->name, THIS_MODULE,
+ dev, adapter_nr);
if (ret < 0) {
- err("%s: dvb_register_adapter() failed (errno = %d)",
- __func__, ret);
- goto failed;
+ dev_err(dev, "%s: dvb_register_adapter() failed: %d\n",
+ __func__, ret);
+ return ret;
}
as102_dev->dvb_dmx.priv = as102_dev;
@@ -235,22 +220,22 @@ int as102_dvb_register(struct as102_dev_t *as102_dev)
ret = dvb_dmx_init(&as102_dev->dvb_dmx);
if (ret < 0) {
- err("%s: dvb_dmx_init() failed (errno = %d)", __func__, ret);
- goto failed;
+ dev_err(dev, "%s: dvb_dmx_init() failed: %d\n", __func__, ret);
+ goto edmxinit;
}
ret = dvb_dmxdev_init(&as102_dev->dvb_dmxdev, &as102_dev->dvb_adap);
if (ret < 0) {
- err("%s: dvb_dmxdev_init() failed (errno = %d)", __func__,
- ret);
- goto failed;
+ dev_err(dev, "%s: dvb_dmxdev_init() failed: %d\n",
+ __func__, ret);
+ goto edmxdinit;
}
ret = as102_dvb_register_fe(as102_dev, &as102_dev->dvb_fe);
if (ret < 0) {
- err("%s: as102_dvb_register_frontend() failed (errno = %d)",
+ dev_err(dev, "%s: as102_dvb_register_frontend() failed: %d",
__func__, ret);
- goto failed;
+ goto efereg;
}
/* init bus mutex for token locking */
@@ -259,7 +244,6 @@ int as102_dvb_register(struct as102_dev_t *as102_dev)
/* init start / stop stream mutex */
mutex_init(&as102_dev->sem);
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
/*
* try to load as102 firmware. If firmware upload failed, we'll be
* able to upload it later.
@@ -267,18 +251,21 @@ int as102_dvb_register(struct as102_dev_t *as102_dev)
if (fw_upload)
try_then_request_module(as102_fw_upload(&as102_dev->bus_adap),
"firmware_class");
-#endif
-failed:
- LEAVE();
- /* FIXME: free dvb_XXX */
+ pr_info("Registered device %s", as102_dev->name);
+ return 0;
+
+efereg:
+ dvb_dmxdev_release(&as102_dev->dvb_dmxdev);
+edmxdinit:
+ dvb_dmx_release(&as102_dev->dvb_dmx);
+edmxinit:
+ dvb_unregister_adapter(&as102_dev->dvb_adap);
return ret;
}
void as102_dvb_unregister(struct as102_dev_t *as102_dev)
{
- ENTER();
-
/* unregister as102 frontend */
as102_dvb_unregister_fe(&as102_dev->dvb_fe);
@@ -289,28 +276,18 @@ void as102_dvb_unregister(struct as102_dev_t *as102_dev)
/* unregister dvb adapter */
dvb_unregister_adapter(&as102_dev->dvb_adap);
- LEAVE();
+ pr_info("Unregistered device %s", as102_dev->name);
}
static int __init as102_driver_init(void)
{
- int ret = 0;
-
- ENTER();
+ int ret;
/* register this driver with the low level subsystem */
-#if defined(CONFIG_AS102_USB)
ret = usb_register(&as102_usb_driver);
if (ret)
err("usb_register failed (ret = %d)", ret);
-#endif
-#if defined(CONFIG_AS102_SPI)
- ret = spi_register_driver(&as102_spi_driver);
- if (ret)
- printk(KERN_ERR "spi_register failed (ret = %d)", ret);
-#endif
- LEAVE();
return ret;
}
@@ -327,15 +304,8 @@ module_init(as102_driver_init);
*/
static void __exit as102_driver_exit(void)
{
- ENTER();
/* deregister this driver with the low level bus subsystem */
-#if defined(CONFIG_AS102_USB)
usb_deregister(&as102_usb_driver);
-#endif
-#if defined(CONFIG_AS102_SPI)
- spi_unregister_driver(&as102_spi_driver);
-#endif
- LEAVE();
}
/*
@@ -347,5 +317,3 @@ module_exit(as102_driver_exit);
MODULE_DESCRIPTION(DRIVER_FULL_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pierrick Hascoet <pierrick.hascoet@abilis.com>");
-
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index fd33f5a12dcc..957f0ed0d81a 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -17,38 +17,30 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#if defined(CONFIG_AS102_USB)
#include <linux/usb.h>
-extern struct usb_driver as102_usb_driver;
-#endif
-
-#if defined(CONFIG_AS102_SPI)
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/cdev.h>
-
-extern struct spi_driver as102_spi_driver;
-#endif
-
-#include "dvb_demux.h"
-#include "dvb_frontend.h"
-#include "dmxdev.h"
+#include <dvb_demux.h>
+#include <dvb_frontend.h>
+#include <dmxdev.h>
+#include "as10x_cmd.h"
+#include "as102_usb_drv.h"
#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
#define DRIVER_NAME "as10x_usb"
extern int as102_debug;
#define debug as102_debug
+extern struct usb_driver as102_usb_driver;
+extern int elna_enable;
#define dprintk(debug, args...) \
do { if (debug) { \
- printk(KERN_DEBUG "%s: ",__FUNCTION__); \
+ pr_debug("%s: ", __func__); \
printk(args); \
} } while (0)
#ifdef TRACE
-#define ENTER() printk(">> enter %s\n", __FUNCTION__)
-#define LEAVE() printk("<< leave %s\n", __FUNCTION__)
+#define ENTER() pr_debug(">> enter %s\n", __func__)
+#define LEAVE() pr_debug("<< leave %s\n", __func__)
#else
#define ENTER()
#define LEAVE()
@@ -59,39 +51,14 @@ extern int as102_debug;
#define AS102_USB_BUF_SIZE 512
#define MAX_STREAM_URB 32
-#include "as10x_cmd.h"
-
-#if defined(CONFIG_AS102_USB)
-#include "as102_usb_drv.h"
-#endif
-
-#if defined(CONFIG_AS102_SPI)
-#include "as10x_spi_drv.h"
-#endif
-
-
-struct as102_bus_adapter_t {
-#if defined(CONFIG_AS102_USB)
+struct as10x_bus_adapter_t {
struct usb_device *usb_dev;
-#elif defined(CONFIG_AS102_SPI)
- struct spi_device *spi_dev;
- struct cdev cdev; /* spidev raw device */
-
- struct timer_list timer;
- struct completion xfer_done;
-#endif
/* bus token lock */
struct mutex lock;
/* low level interface for bus adapter */
union as10x_bus_token_t {
-#if defined(CONFIG_AS102_USB)
/* usb token */
struct as10x_usb_token_cmd_t usb;
-#endif
-#if defined(CONFIG_AS102_SPI)
- /* spi token */
- struct as10x_spi_token_cmd_t spi;
-#endif
} token;
/* token cmd xfer id */
@@ -106,7 +73,7 @@ struct as102_bus_adapter_t {
struct as102_dev_t {
const char *name;
- struct as102_bus_adapter_t bus_adap;
+ struct as10x_bus_adapter_t bus_adap;
struct list_head device_entry;
struct kref kref;
unsigned long minor;
@@ -138,5 +105,3 @@ void as102_dvb_unregister(struct as102_dev_t *dev);
int as102_dvb_register_fe(struct as102_dev_t *dev, struct dvb_frontend *fe);
int as102_dvb_unregister_fe(struct dvb_frontend *dev);
-
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_fe.c b/drivers/staging/media/as102/as102_fe.c
index 3550f905367e..bdc5a38cddf7 100644
--- a/drivers/staging/media/as102/as102_fe.c
+++ b/drivers/staging/media/as102/as102_fe.c
@@ -23,17 +23,15 @@
#include "as10x_types.h"
#include "as10x_cmd.h"
-extern int elna_enable;
-
-static void as10x_fe_copy_tps_parameters(struct dvb_frontend_parameters *dst,
+static void as10x_fe_copy_tps_parameters(struct dtv_frontend_properties *dst,
struct as10x_tps *src);
static void as102_fe_copy_tune_parameters(struct as10x_tune_args *dst,
- struct dvb_frontend_parameters *src);
+ struct dtv_frontend_properties *src);
-static int as102_fe_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
+static int as102_fe_set_frontend(struct dvb_frontend *fe)
{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
int ret = 0;
struct as102_dev_t *dev;
struct as10x_tune_args tune_args = { 0 };
@@ -47,7 +45,7 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe,
if (mutex_lock_interruptible(&dev->bus_adap.lock))
return -EBUSY;
- as102_fe_copy_tune_parameters(&tune_args, params);
+ as102_fe_copy_tune_parameters(&tune_args, p);
/* send abilis command: SET_TUNE */
ret = as10x_cmd_set_tune(&dev->bus_adap, &tune_args);
@@ -60,8 +58,9 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe,
return (ret < 0) ? -EINVAL : 0;
}
-static int as102_fe_get_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p) {
+static int as102_fe_get_frontend(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
int ret = 0;
struct as102_dev_t *dev;
struct as10x_tps tps = { 0 };
@@ -280,9 +279,9 @@ static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
}
static struct dvb_frontend_ops as102_fe_ops = {
+ .delsys = { SYS_DVBT },
.info = {
.name = "Unknown AS102 device",
- .type = FE_OFDM,
.frequency_min = 174000000,
.frequency_max = 862000000,
.frequency_stepsize = 166667,
@@ -346,38 +345,36 @@ int as102_dvb_register_fe(struct as102_dev_t *as102_dev,
return errno;
}
-static void as10x_fe_copy_tps_parameters(struct dvb_frontend_parameters *dst,
+static void as10x_fe_copy_tps_parameters(struct dtv_frontend_properties *fe_tps,
struct as10x_tps *as10x_tps)
{
- struct dvb_ofdm_parameters *fe_tps = &dst->u.ofdm;
-
/* extract consteallation */
- switch (as10x_tps->constellation) {
+ switch (as10x_tps->modulation) {
case CONST_QPSK:
- fe_tps->constellation = QPSK;
+ fe_tps->modulation = QPSK;
break;
case CONST_QAM16:
- fe_tps->constellation = QAM_16;
+ fe_tps->modulation = QAM_16;
break;
case CONST_QAM64:
- fe_tps->constellation = QAM_64;
+ fe_tps->modulation = QAM_64;
break;
}
/* extract hierarchy */
switch (as10x_tps->hierarchy) {
case HIER_NONE:
- fe_tps->hierarchy_information = HIERARCHY_NONE;
+ fe_tps->hierarchy = HIERARCHY_NONE;
break;
case HIER_ALPHA_1:
- fe_tps->hierarchy_information = HIERARCHY_1;
+ fe_tps->hierarchy = HIERARCHY_1;
break;
case HIER_ALPHA_2:
- fe_tps->hierarchy_information = HIERARCHY_2;
+ fe_tps->hierarchy = HIERARCHY_2;
break;
case HIER_ALPHA_4:
- fe_tps->hierarchy_information = HIERARCHY_4;
+ fe_tps->hierarchy = HIERARCHY_4;
break;
}
@@ -475,7 +472,7 @@ static uint8_t as102_fe_get_code_rate(fe_code_rate_t arg)
}
static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
- struct dvb_frontend_parameters *params)
+ struct dtv_frontend_properties *params)
{
/* set frequency */
@@ -484,21 +481,21 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
/* fix interleaving_mode */
tune_args->interleaving_mode = INTLV_NATIVE;
- switch (params->u.ofdm.bandwidth) {
- case BANDWIDTH_8_MHZ:
+ switch (params->bandwidth_hz) {
+ case 8000000:
tune_args->bandwidth = BW_8_MHZ;
break;
- case BANDWIDTH_7_MHZ:
+ case 7000000:
tune_args->bandwidth = BW_7_MHZ;
break;
- case BANDWIDTH_6_MHZ:
+ case 6000000:
tune_args->bandwidth = BW_6_MHZ;
break;
default:
tune_args->bandwidth = BW_8_MHZ;
}
- switch (params->u.ofdm.guard_interval) {
+ switch (params->guard_interval) {
case GUARD_INTERVAL_1_32:
tune_args->guard_interval = GUARD_INT_1_32;
break;
@@ -517,22 +514,22 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
break;
}
- switch (params->u.ofdm.constellation) {
+ switch (params->modulation) {
case QPSK:
- tune_args->constellation = CONST_QPSK;
+ tune_args->modulation = CONST_QPSK;
break;
case QAM_16:
- tune_args->constellation = CONST_QAM16;
+ tune_args->modulation = CONST_QAM16;
break;
case QAM_64:
- tune_args->constellation = CONST_QAM64;
+ tune_args->modulation = CONST_QAM64;
break;
default:
- tune_args->constellation = CONST_UNKNOWN;
+ tune_args->modulation = CONST_UNKNOWN;
break;
}
- switch (params->u.ofdm.transmission_mode) {
+ switch (params->transmission_mode) {
case TRANSMISSION_MODE_2K:
tune_args->transmission_mode = TRANS_MODE_2K;
break;
@@ -543,7 +540,7 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
tune_args->transmission_mode = TRANS_MODE_UNKNOWN;
}
- switch (params->u.ofdm.hierarchy_information) {
+ switch (params->hierarchy) {
case HIERARCHY_NONE:
tune_args->hierarchy = HIER_NONE;
break;
@@ -571,19 +568,19 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
* if HP/LP are both set to FEC_NONE, HP will be selected.
*/
if ((tune_args->hierarchy != HIER_NONE) &&
- ((params->u.ofdm.code_rate_LP == FEC_NONE) ||
- (params->u.ofdm.code_rate_HP == FEC_NONE))) {
+ ((params->code_rate_LP == FEC_NONE) ||
+ (params->code_rate_HP == FEC_NONE))) {
- if (params->u.ofdm.code_rate_LP == FEC_NONE) {
+ if (params->code_rate_LP == FEC_NONE) {
tune_args->hier_select = HIER_HIGH_PRIORITY;
tune_args->code_rate =
- as102_fe_get_code_rate(params->u.ofdm.code_rate_HP);
+ as102_fe_get_code_rate(params->code_rate_HP);
}
- if (params->u.ofdm.code_rate_HP == FEC_NONE) {
+ if (params->code_rate_HP == FEC_NONE) {
tune_args->hier_select = HIER_LOW_PRIORITY;
tune_args->code_rate =
- as102_fe_get_code_rate(params->u.ofdm.code_rate_LP);
+ as102_fe_get_code_rate(params->code_rate_LP);
}
dprintk(debug, "\thierarchy: 0x%02x "
@@ -596,8 +593,6 @@ static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
tune_args->code_rate);
} else {
tune_args->code_rate =
- as102_fe_get_code_rate(params->u.ofdm.code_rate_HP);
+ as102_fe_get_code_rate(params->code_rate_HP);
}
}
-
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
index c019df933cc9..43ebc43e6b9a 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -26,7 +26,6 @@
#include "as102_drv.h"
#include "as102_fw.h"
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
char as102_st_fw1[] = "as102_data1_st.hex";
char as102_st_fw2[] = "as102_data2_st.hex";
char as102_dt_fw1[] = "as102_data1_dt.hex";
@@ -59,7 +58,7 @@ static int parse_hex_line(unsigned char *fw_data, unsigned char *addr,
unsigned char *src, dst;
if (*fw_data++ != ':') {
- printk(KERN_ERR "invalid firmware file\n");
+ pr_err("invalid firmware file\n");
return -EFAULT;
}
@@ -102,7 +101,7 @@ static int parse_hex_line(unsigned char *fw_data, unsigned char *addr,
return (count * 2) + 2;
}
-static int as102_firmware_upload(struct as102_bus_adapter_t *bus_adap,
+static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
unsigned char *cmd,
const struct firmware *firmware) {
@@ -163,19 +162,14 @@ error:
return (errno == 0) ? total_read_bytes : errno;
}
-int as102_fw_upload(struct as102_bus_adapter_t *bus_adap)
+int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap)
{
int errno = -EFAULT;
const struct firmware *firmware;
unsigned char *cmd_buf = NULL;
char *fw1, *fw2;
-
-#if defined(CONFIG_AS102_USB)
struct usb_device *dev = bus_adap->usb_dev;
-#endif
-#if defined(CONFIG_AS102_SPI)
- struct spi_device *dev = bus_adap->spi_dev;
-#endif
+
ENTER();
/* select fw file to upload */
@@ -187,7 +181,6 @@ int as102_fw_upload(struct as102_bus_adapter_t *bus_adap)
fw2 = as102_st_fw2;
}
-#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)
/* allocate buffer to store firmware upload command and data */
cmd_buf = kzalloc(MAX_FW_PKT_SIZE, GFP_KERNEL);
if (cmd_buf == NULL) {
@@ -198,21 +191,21 @@ int as102_fw_upload(struct as102_bus_adapter_t *bus_adap)
/* request kernel to locate firmware file: part1 */
errno = request_firmware(&firmware, fw1, &dev->dev);
if (errno < 0) {
- printk(KERN_ERR "%s: unable to locate firmware file: %s\n",
- DRIVER_NAME, fw1);
+ pr_err("%s: unable to locate firmware file: %s\n",
+ DRIVER_NAME, fw1);
goto error;
}
/* initiate firmware upload */
errno = as102_firmware_upload(bus_adap, cmd_buf, firmware);
if (errno < 0) {
- printk(KERN_ERR "%s: error during firmware upload part1\n",
- DRIVER_NAME);
+ pr_err("%s: error during firmware upload part1\n",
+ DRIVER_NAME);
goto error;
}
- printk(KERN_INFO "%s: fimrware: %s loaded with success\n",
- DRIVER_NAME, fw1);
+ pr_info("%s: firmware: %s loaded with success\n",
+ DRIVER_NAME, fw1);
release_firmware(firmware);
/* wait for boot to complete */
@@ -221,31 +214,28 @@ int as102_fw_upload(struct as102_bus_adapter_t *bus_adap)
/* request kernel to locate firmware file: part2 */
errno = request_firmware(&firmware, fw2, &dev->dev);
if (errno < 0) {
- printk(KERN_ERR "%s: unable to locate firmware file: %s\n",
- DRIVER_NAME, fw2);
+ pr_err("%s: unable to locate firmware file: %s\n",
+ DRIVER_NAME, fw2);
goto error;
}
/* initiate firmware upload */
errno = as102_firmware_upload(bus_adap, cmd_buf, firmware);
if (errno < 0) {
- printk(KERN_ERR "%s: error during firmware upload part2\n",
- DRIVER_NAME);
+ pr_err("%s: error during firmware upload part2\n",
+ DRIVER_NAME);
goto error;
}
- printk(KERN_INFO "%s: fimrware: %s loaded with success\n",
- DRIVER_NAME, fw2);
+ pr_info("%s: firmware: %s loaded with success\n",
+ DRIVER_NAME, fw2);
error:
/* free data buffer */
kfree(cmd_buf);
/* release firmware if needed */
if (firmware != NULL)
release_firmware(firmware);
-#endif
+
LEAVE();
return errno;
}
-#endif
-
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_fw.h b/drivers/staging/media/as102/as102_fw.h
index 27e5347e2e19..bd21f0554392 100644
--- a/drivers/staging/media/as102/as102_fw.h
+++ b/drivers/staging/media/as102/as102_fw.h
@@ -20,11 +20,10 @@
extern int dual_tuner;
-#pragma pack(1)
struct as10x_raw_fw_pkt {
unsigned char address[4];
unsigned char data[MAX_FW_PKT_SIZE - 6];
-};
+} __packed;
struct as10x_fw_pkt_t {
union {
@@ -32,11 +31,8 @@ struct as10x_fw_pkt_t {
unsigned char length[2];
} u;
struct as10x_raw_fw_pkt raw;
-};
-#pragma pack()
+} __packed;
#ifdef __KERNEL__
-int as102_fw_upload(struct as102_bus_adapter_t *bus_adap);
+int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap);
#endif
-
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/staging/media/as102/as102_usb_drv.c
index 264be2dbd2a4..d775be0173ea 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/staging/media/as102/as102_usb_drv.c
@@ -42,30 +42,32 @@ static struct usb_device_id as102_usb_id_table[] = {
{ USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) },
{ USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) },
{ USB_DEVICE(NBOX_DVBT_DONGLE_USB_VID, NBOX_DVBT_DONGLE_USB_PID) },
+ { USB_DEVICE(SKY_IT_DIGITAL_KEY_USB_VID, SKY_IT_DIGITAL_KEY_USB_PID) },
{ } /* Terminating entry */
};
/* Note that this table must always have the same number of entries as the
as102_usb_id_table struct */
-static const char *as102_device_names[] = {
+static const char * const as102_device_names[] = {
AS102_REFERENCE_DESIGN,
AS102_PCTV_74E,
AS102_ELGATO_EYETV_DTT_NAME,
AS102_NBOX_DVBT_DONGLE_NAME,
+ AS102_SKY_IT_DIGITAL_KEY_NAME,
NULL /* Terminating entry */
};
struct usb_driver as102_usb_driver = {
- .name = DRIVER_FULL_NAME,
- .probe = as102_usb_probe,
- .disconnect = as102_usb_disconnect,
- .id_table = as102_usb_id_table
+ .name = DRIVER_FULL_NAME,
+ .probe = as102_usb_probe,
+ .disconnect = as102_usb_disconnect,
+ .id_table = as102_usb_id_table
};
static const struct file_operations as102_dev_fops = {
- .owner = THIS_MODULE,
- .open = as102_open,
- .release = as102_release,
+ .owner = THIS_MODULE,
+ .open = as102_open,
+ .release = as102_release,
};
static struct usb_class_driver as102_usb_class_driver = {
@@ -74,7 +76,7 @@ static struct usb_class_driver as102_usb_class_driver = {
.minor_base = AS102_DEVICE_MAJOR,
};
-static int as102_usb_xfer_cmd(struct as102_bus_adapter_t *bus_adap,
+static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
unsigned char *send_buf, int send_buf_len,
unsigned char *recv_buf, int recv_buf_len)
{
@@ -131,7 +133,7 @@ static int as102_usb_xfer_cmd(struct as102_bus_adapter_t *bus_adap,
return ret;
}
-static int as102_send_ep1(struct as102_bus_adapter_t *bus_adap,
+static int as102_send_ep1(struct as10x_bus_adapter_t *bus_adap,
unsigned char *send_buf,
int send_buf_len,
int swap32)
@@ -154,7 +156,7 @@ static int as102_send_ep1(struct as102_bus_adapter_t *bus_adap,
return ret ? ret : actual_len;
}
-static int as102_read_ep2(struct as102_bus_adapter_t *bus_adap,
+static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf, int recv_buf_len)
{
int ret = 0, actual_len;
@@ -337,7 +339,7 @@ static void as102_usb_disconnect(struct usb_interface *intf)
/* decrement usage counter */
kref_put(&as102_dev->kref, as102_usb_release);
- printk(KERN_INFO "%s: device has been disconnected\n", DRIVER_NAME);
+ pr_info("%s: device has been disconnected\n", DRIVER_NAME);
LEAVE();
}
@@ -351,19 +353,19 @@ static int as102_usb_probe(struct usb_interface *intf,
ENTER();
- as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL);
- if (as102_dev == NULL) {
- err("%s: kzalloc failed", __func__);
- return -ENOMEM;
- }
-
/* This should never actually happen */
if ((sizeof(as102_usb_id_table) / sizeof(struct usb_device_id)) !=
(sizeof(as102_device_names) / sizeof(const char *))) {
- printk(KERN_ERR "Device names table invalid size");
+ pr_err("Device names table invalid size");
return -EINVAL;
}
+ as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL);
+ if (as102_dev == NULL) {
+ err("%s: kzalloc failed", __func__);
+ return -ENOMEM;
+ }
+
/* Assign the user-friendly device name */
for (i = 0; i < (sizeof(as102_usb_id_table) /
sizeof(struct usb_device_id)); i++) {
@@ -399,7 +401,7 @@ static int as102_usb_probe(struct usb_interface *intf,
goto failed;
}
- printk(KERN_INFO "%s: device has been detected\n", DRIVER_NAME);
+ pr_info("%s: device has been detected\n", DRIVER_NAME);
/* request buffer allocation for streaming */
ret = as102_alloc_usb_stream_buffer(as102_dev);
@@ -432,8 +434,8 @@ static int as102_open(struct inode *inode, struct file *file)
/* fetch device from usb interface */
intf = usb_find_interface(&as102_usb_driver, minor);
if (intf == NULL) {
- printk(KERN_ERR "%s: can't find device for minor %d\n",
- __func__, minor);
+ pr_err("%s: can't find device for minor %d\n",
+ __func__, minor);
ret = -ENODEV;
goto exit;
}
@@ -474,5 +476,3 @@ static int as102_release(struct inode *inode, struct file *file)
}
MODULE_DEVICE_TABLE(usb, as102_usb_id_table);
-
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as102_usb_drv.h b/drivers/staging/media/as102/as102_usb_drv.h
index fb1fc41dcd79..fc2884ab02a2 100644
--- a/drivers/staging/media/as102/as102_usb_drv.h
+++ b/drivers/staging/media/as102/as102_usb_drv.h
@@ -47,6 +47,11 @@
#define NBOX_DVBT_DONGLE_USB_VID 0x0b89
#define NBOX_DVBT_DONGLE_USB_PID 0x0007
+/* Sky Italia: Digital Key (green led) */
+#define AS102_SKY_IT_DIGITAL_KEY_NAME "Sky IT Digital Key (green led)"
+#define SKY_IT_DIGITAL_KEY_USB_VID 0x2137
+#define SKY_IT_DIGITAL_KEY_USB_PID 0x0001
+
void as102_urb_stream_irq(struct urb *urb);
struct as10x_usb_token_cmd_t {
@@ -56,4 +61,3 @@ struct as10x_usb_token_cmd_t {
struct as10x_cmd_t r;
};
#endif
-/* EOF - vim: set textwidth=80 ts=8 sw=8 sts=8 noet: */
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
index 0dcba8065780..262bb94ad27e 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -25,35 +25,35 @@
/**
* as10x_cmd_turn_on - send turn on command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
*
* Return 0 when no error, < 0 in case of error.
*/
-int as10x_cmd_turn_on(as10x_handle_t *phandle)
+int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.turn_on.req));
/* fill command */
pcmd->body.turn_on.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNON);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
- sizeof(pcmd->body.turn_on.req) +
- HEADER_SIZE,
- (uint8_t *) prsp,
- sizeof(prsp->body.turn_on.rsp) +
- HEADER_SIZE);
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
+ sizeof(pcmd->body.turn_on.req) +
+ HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.turn_on.rsp) +
+ HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
@@ -71,31 +71,31 @@ out:
/**
* as10x_cmd_turn_off - send turn off command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_turn_off(as10x_handle_t *phandle)
+int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.turn_off.req));
/* fill command */
pcmd->body.turn_off.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNOFF);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(
- phandle, (uint8_t *) pcmd,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(
+ adap, (uint8_t *) pcmd,
sizeof(pcmd->body.turn_off.req) + HEADER_SIZE,
(uint8_t *) prsp,
sizeof(prsp->body.turn_off.rsp) + HEADER_SIZE);
@@ -116,23 +116,24 @@ out:
/**
* as10x_cmd_set_tune - send set tune command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @ptune: tune parameters
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_set_tune(as10x_handle_t *phandle, struct as10x_tune_args *ptune)
+int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
+ struct as10x_tune_args *ptune)
{
int error;
struct as10x_cmd_t *preq, *prsp;
ENTER();
- preq = phandle->cmd;
- prsp = phandle->rsp;
+ preq = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(preq, (++phandle->cmd_xid),
+ as10x_cmd_build(preq, (++adap->cmd_xid),
sizeof(preq->body.set_tune.req));
/* fill command */
@@ -140,7 +141,7 @@ int as10x_cmd_set_tune(as10x_handle_t *phandle, struct as10x_tune_args *ptune)
preq->body.set_tune.req.args.freq = cpu_to_le32(ptune->freq);
preq->body.set_tune.req.args.bandwidth = ptune->bandwidth;
preq->body.set_tune.req.args.hier_select = ptune->hier_select;
- preq->body.set_tune.req.args.constellation = ptune->constellation;
+ preq->body.set_tune.req.args.modulation = ptune->modulation;
preq->body.set_tune.req.args.hierarchy = ptune->hierarchy;
preq->body.set_tune.req.args.interleaving_mode =
ptune->interleaving_mode;
@@ -150,14 +151,14 @@ int as10x_cmd_set_tune(as10x_handle_t *phandle, struct as10x_tune_args *ptune)
ptune->transmission_mode;
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle,
- (uint8_t *) preq,
- sizeof(preq->body.set_tune.req)
- + HEADER_SIZE,
- (uint8_t *) prsp,
- sizeof(prsp->body.set_tune.rsp)
- + HEADER_SIZE);
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap,
+ (uint8_t *) preq,
+ sizeof(preq->body.set_tune.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.set_tune.rsp)
+ + HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
@@ -175,12 +176,12 @@ out:
/**
* as10x_cmd_get_tune_status - send get tune status command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @pstatus: pointer to updated status structure of the current tune
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_get_tune_status(as10x_handle_t *phandle,
+int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
struct as10x_tune_status *pstatus)
{
int error;
@@ -188,11 +189,11 @@ int as10x_cmd_get_tune_status(as10x_handle_t *phandle,
ENTER();
- preq = phandle->cmd;
- prsp = phandle->rsp;
+ preq = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(preq, (++phandle->cmd_xid),
+ as10x_cmd_build(preq, (++adap->cmd_xid),
sizeof(preq->body.get_tune_status.req));
/* fill command */
@@ -200,9 +201,9 @@ int as10x_cmd_get_tune_status(as10x_handle_t *phandle,
cpu_to_le16(CONTROL_PROC_GETTUNESTAT);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(
- phandle,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(
+ adap,
(uint8_t *) preq,
sizeof(preq->body.get_tune_status.req) + HEADER_SIZE,
(uint8_t *) prsp,
@@ -232,24 +233,24 @@ out:
}
/**
- * send get TPS command to AS10x
- * @phandle: pointer to AS10x handle
+ * as10x_cmd_get_tps - send get TPS command to AS10x
+ * @adap: pointer to AS10x handle
* @ptps: pointer to TPS parameters structure
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_get_tps(as10x_handle_t *phandle, struct as10x_tps *ptps)
+int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.get_tps.req));
/* fill command */
@@ -257,14 +258,14 @@ int as10x_cmd_get_tps(as10x_handle_t *phandle, struct as10x_tps *ptps)
cpu_to_le16(CONTROL_PROC_GETTPS);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle,
- (uint8_t *) pcmd,
- sizeof(pcmd->body.get_tps.req) +
- HEADER_SIZE,
- (uint8_t *) prsp,
- sizeof(prsp->body.get_tps.rsp) +
- HEADER_SIZE);
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.get_tps.req) +
+ HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.get_tps.rsp) +
+ HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
@@ -278,7 +279,7 @@ int as10x_cmd_get_tps(as10x_handle_t *phandle, struct as10x_tps *ptps)
goto out;
/* Response OK -> get response data */
- ptps->constellation = prsp->body.get_tps.rsp.tps.constellation;
+ ptps->modulation = prsp->body.get_tps.rsp.tps.modulation;
ptps->hierarchy = prsp->body.get_tps.rsp.tps.hierarchy;
ptps->interleaving_mode = prsp->body.get_tps.rsp.tps.interleaving_mode;
ptps->code_rate_HP = prsp->body.get_tps.rsp.tps.code_rate_HP;
@@ -296,12 +297,12 @@ out:
/**
* as10x_cmd_get_demod_stats - send get demod stats command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @pdemod_stats: pointer to demod stats parameters structure
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_get_demod_stats(as10x_handle_t *phandle,
+int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
struct as10x_demod_stats *pdemod_stats)
{
int error;
@@ -309,11 +310,11 @@ int as10x_cmd_get_demod_stats(as10x_handle_t *phandle,
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.get_demod_stats.req));
/* fill command */
@@ -321,8 +322,8 @@ int as10x_cmd_get_demod_stats(as10x_handle_t *phandle,
cpu_to_le16(CONTROL_PROC_GET_DEMOD_STATS);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap,
(uint8_t *) pcmd,
sizeof(pcmd->body.get_demod_stats.req)
+ HEADER_SIZE,
@@ -360,13 +361,13 @@ out:
/**
* as10x_cmd_get_impulse_resp - send get impulse response command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @is_ready: pointer to value indicating when impulse
* response data is ready
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_get_impulse_resp(as10x_handle_t *phandle,
+int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
uint8_t *is_ready)
{
int error;
@@ -374,11 +375,11 @@ int as10x_cmd_get_impulse_resp(as10x_handle_t *phandle,
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.get_impulse_rsp.req));
/* fill command */
@@ -386,8 +387,8 @@ int as10x_cmd_get_impulse_resp(as10x_handle_t *phandle,
cpu_to_le16(CONTROL_PROC_GET_IMPULSE_RESP);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap,
(uint8_t *) pcmd,
sizeof(pcmd->body.get_impulse_rsp.req)
+ HEADER_SIZE,
diff --git a/drivers/staging/media/as102/as10x_cmd.h b/drivers/staging/media/as102/as10x_cmd.h
index 01a716380e0a..4ea249e7adab 100644
--- a/drivers/staging/media/as102/as10x_cmd.h
+++ b/drivers/staging/media/as102/as10x_cmd.h
@@ -28,459 +28,456 @@
/*********************************/
/* MACRO DEFINITIONS */
/*********************************/
-#define AS10X_CMD_ERROR -1
+#define AS10X_CMD_ERROR -1
-#define SERVICE_PROG_ID 0x0002
-#define SERVICE_PROG_VERSION 0x0001
+#define SERVICE_PROG_ID 0x0002
+#define SERVICE_PROG_VERSION 0x0001
-#define HIER_NONE 0x00
-#define HIER_LOW_PRIORITY 0x01
+#define HIER_NONE 0x00
+#define HIER_LOW_PRIORITY 0x01
#define HEADER_SIZE (sizeof(struct as10x_cmd_header_t))
/* context request types */
-#define GET_CONTEXT_DATA 1
-#define SET_CONTEXT_DATA 2
+#define GET_CONTEXT_DATA 1
+#define SET_CONTEXT_DATA 2
/* ODSP suspend modes */
-#define CFG_MODE_ODSP_RESUME 0
-#define CFG_MODE_ODSP_SUSPEND 1
+#define CFG_MODE_ODSP_RESUME 0
+#define CFG_MODE_ODSP_SUSPEND 1
/* Dump memory size */
-#define DUMP_BLOCK_SIZE_MAX 0x20
+#define DUMP_BLOCK_SIZE_MAX 0x20
/*********************************/
/* TYPE DEFINITION */
/*********************************/
-typedef enum {
- CONTROL_PROC_TURNON = 0x0001,
- CONTROL_PROC_TURNON_RSP = 0x0100,
- CONTROL_PROC_SET_REGISTER = 0x0002,
- CONTROL_PROC_SET_REGISTER_RSP = 0x0200,
- CONTROL_PROC_GET_REGISTER = 0x0003,
- CONTROL_PROC_GET_REGISTER_RSP = 0x0300,
- CONTROL_PROC_SETTUNE = 0x000A,
- CONTROL_PROC_SETTUNE_RSP = 0x0A00,
- CONTROL_PROC_GETTUNESTAT = 0x000B,
- CONTROL_PROC_GETTUNESTAT_RSP = 0x0B00,
- CONTROL_PROC_GETTPS = 0x000D,
- CONTROL_PROC_GETTPS_RSP = 0x0D00,
- CONTROL_PROC_SETFILTER = 0x000E,
- CONTROL_PROC_SETFILTER_RSP = 0x0E00,
- CONTROL_PROC_REMOVEFILTER = 0x000F,
- CONTROL_PROC_REMOVEFILTER_RSP = 0x0F00,
- CONTROL_PROC_GET_IMPULSE_RESP = 0x0012,
- CONTROL_PROC_GET_IMPULSE_RESP_RSP = 0x1200,
- CONTROL_PROC_START_STREAMING = 0x0013,
- CONTROL_PROC_START_STREAMING_RSP = 0x1300,
- CONTROL_PROC_STOP_STREAMING = 0x0014,
- CONTROL_PROC_STOP_STREAMING_RSP = 0x1400,
- CONTROL_PROC_GET_DEMOD_STATS = 0x0015,
- CONTROL_PROC_GET_DEMOD_STATS_RSP = 0x1500,
- CONTROL_PROC_ELNA_CHANGE_MODE = 0x0016,
- CONTROL_PROC_ELNA_CHANGE_MODE_RSP = 0x1600,
- CONTROL_PROC_ODSP_CHANGE_MODE = 0x0017,
- CONTROL_PROC_ODSP_CHANGE_MODE_RSP = 0x1700,
- CONTROL_PROC_AGC_CHANGE_MODE = 0x0018,
- CONTROL_PROC_AGC_CHANGE_MODE_RSP = 0x1800,
-
- CONTROL_PROC_CONTEXT = 0x00FC,
- CONTROL_PROC_CONTEXT_RSP = 0xFC00,
- CONTROL_PROC_DUMP_MEMORY = 0x00FD,
- CONTROL_PROC_DUMP_MEMORY_RSP = 0xFD00,
- CONTROL_PROC_DUMPLOG_MEMORY = 0x00FE,
- CONTROL_PROC_DUMPLOG_MEMORY_RSP = 0xFE00,
- CONTROL_PROC_TURNOFF = 0x00FF,
- CONTROL_PROC_TURNOFF_RSP = 0xFF00
-} control_proc;
-
-
-#pragma pack(1)
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- } rsp;
-} TURN_ON;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t err;
- } rsp;
-} TURN_OFF;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* tune params */
- struct as10x_tune_args args;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* response error */
- uint8_t error;
- } rsp;
-} SET_TUNE;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* response error */
- uint8_t error;
- /* tune status */
- struct as10x_tune_status sts;
- } rsp;
-} GET_TUNE_STATUS;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* response error */
- uint8_t error;
- /* tps details */
- struct as10x_tps tps;
- } rsp;
-} GET_TPS;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* response error */
- uint8_t error;
- } rsp;
-} COMMON;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* PID to filter */
- uint16_t pid;
- /* stream type (MPE, PSI/SI or PES )*/
- uint8_t stream_type;
- /* PID index in filter table */
- uint8_t idx;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* response error */
- uint8_t error;
- /* Filter id */
- uint8_t filter_id;
- } rsp;
-} ADD_PID_FILTER;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* PID to remove */
- uint16_t pid;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* response error */
- uint8_t error;
- } rsp;
-} DEL_PID_FILTER;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- } rsp;
-} START_STREAMING;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- } rsp;
-} STOP_STREAMING;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- /* demod stats */
- struct as10x_demod_stats stats;
- } rsp;
-} GET_DEMOD_STATS;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- /* impulse response ready */
- uint8_t is_ready;
- } rsp;
-} GET_IMPULSE_RESP;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* value to write (for set context)*/
- struct as10x_register_value reg_val;
- /* context tag */
- uint16_t tag;
- /* context request type */
- uint16_t type;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* value read (for get context) */
- struct as10x_register_value reg_val;
- /* context request type */
- uint16_t type;
- /* error */
- uint8_t error;
- } rsp;
-} FW_CONTEXT;
-
-typedef union {
- /* request */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* register description */
- struct as10x_register_addr reg_addr;
- /* register content */
- struct as10x_register_value reg_val;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- } rsp;
-} SET_REGISTER;
-
-typedef union {
- /* request */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* register description */
- struct as10x_register_addr reg_addr;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- /* register content */
- struct as10x_register_value reg_val;
- } rsp;
-} GET_REGISTER;
-
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* mode */
- uint8_t mode;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- } rsp;
-} CFG_CHANGE_MODE;
+enum control_proc {
+ CONTROL_PROC_TURNON = 0x0001,
+ CONTROL_PROC_TURNON_RSP = 0x0100,
+ CONTROL_PROC_SET_REGISTER = 0x0002,
+ CONTROL_PROC_SET_REGISTER_RSP = 0x0200,
+ CONTROL_PROC_GET_REGISTER = 0x0003,
+ CONTROL_PROC_GET_REGISTER_RSP = 0x0300,
+ CONTROL_PROC_SETTUNE = 0x000A,
+ CONTROL_PROC_SETTUNE_RSP = 0x0A00,
+ CONTROL_PROC_GETTUNESTAT = 0x000B,
+ CONTROL_PROC_GETTUNESTAT_RSP = 0x0B00,
+ CONTROL_PROC_GETTPS = 0x000D,
+ CONTROL_PROC_GETTPS_RSP = 0x0D00,
+ CONTROL_PROC_SETFILTER = 0x000E,
+ CONTROL_PROC_SETFILTER_RSP = 0x0E00,
+ CONTROL_PROC_REMOVEFILTER = 0x000F,
+ CONTROL_PROC_REMOVEFILTER_RSP = 0x0F00,
+ CONTROL_PROC_GET_IMPULSE_RESP = 0x0012,
+ CONTROL_PROC_GET_IMPULSE_RESP_RSP = 0x1200,
+ CONTROL_PROC_START_STREAMING = 0x0013,
+ CONTROL_PROC_START_STREAMING_RSP = 0x1300,
+ CONTROL_PROC_STOP_STREAMING = 0x0014,
+ CONTROL_PROC_STOP_STREAMING_RSP = 0x1400,
+ CONTROL_PROC_GET_DEMOD_STATS = 0x0015,
+ CONTROL_PROC_GET_DEMOD_STATS_RSP = 0x1500,
+ CONTROL_PROC_ELNA_CHANGE_MODE = 0x0016,
+ CONTROL_PROC_ELNA_CHANGE_MODE_RSP = 0x1600,
+ CONTROL_PROC_ODSP_CHANGE_MODE = 0x0017,
+ CONTROL_PROC_ODSP_CHANGE_MODE_RSP = 0x1700,
+ CONTROL_PROC_AGC_CHANGE_MODE = 0x0018,
+ CONTROL_PROC_AGC_CHANGE_MODE_RSP = 0x1800,
+
+ CONTROL_PROC_CONTEXT = 0x00FC,
+ CONTROL_PROC_CONTEXT_RSP = 0xFC00,
+ CONTROL_PROC_DUMP_MEMORY = 0x00FD,
+ CONTROL_PROC_DUMP_MEMORY_RSP = 0xFD00,
+ CONTROL_PROC_DUMPLOG_MEMORY = 0x00FE,
+ CONTROL_PROC_DUMPLOG_MEMORY_RSP = 0xFE00,
+ CONTROL_PROC_TURNOFF = 0x00FF,
+ CONTROL_PROC_TURNOFF_RSP = 0xFF00
+};
+
+union as10x_turn_on {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_turn_off {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t err;
+ } rsp;
+} __packed;
+
+union as10x_set_tune {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* tune params */
+ struct as10x_tune_args args;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_get_tune_status {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ /* tune status */
+ struct as10x_tune_status sts;
+ } rsp;
+} __packed;
+
+union as10x_get_tps {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ /* tps details */
+ struct as10x_tps tps;
+ } rsp;
+} __packed;
+
+union as10x_common {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_add_pid_filter {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* PID to filter */
+ uint16_t pid;
+ /* stream type (MPE, PSI/SI or PES )*/
+ uint8_t stream_type;
+ /* PID index in filter table */
+ uint8_t idx;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ /* Filter id */
+ uint8_t filter_id;
+ } rsp;
+} __packed;
+
+union as10x_del_pid_filter {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* PID to remove */
+ uint16_t pid;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* response error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_start_streaming {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_stop_streaming {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_get_demod_stats {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* demod stats */
+ struct as10x_demod_stats stats;
+ } rsp;
+} __packed;
+
+union as10x_get_impulse_resp {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* impulse response ready */
+ uint8_t is_ready;
+ } rsp;
+} __packed;
+
+union as10x_fw_context {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* value to write (for set context)*/
+ struct as10x_register_value reg_val;
+ /* context tag */
+ uint16_t tag;
+ /* context request type */
+ uint16_t type;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* value read (for get context) */
+ struct as10x_register_value reg_val;
+ /* context request type */
+ uint16_t type;
+ /* error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_set_register {
+ /* request */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* register description */
+ struct as10x_register_addr reg_addr;
+ /* register content */
+ struct as10x_register_value reg_val;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} __packed;
+
+union as10x_get_register {
+ /* request */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* register description */
+ struct as10x_register_addr reg_addr;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* register content */
+ struct as10x_register_value reg_val;
+ } rsp;
+} __packed;
+
+union as10x_cfg_change_mode {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* mode */
+ uint8_t mode;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ } rsp;
+} __packed;
struct as10x_cmd_header_t {
- uint16_t req_id;
- uint16_t prog;
- uint16_t version;
- uint16_t data_len;
-};
+ uint16_t req_id;
+ uint16_t prog;
+ uint16_t version;
+ uint16_t data_len;
+} __packed;
#define DUMP_BLOCK_SIZE 16
-typedef union {
- /* request */
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* dump memory type request */
- uint8_t dump_req;
- /* register description */
- struct as10x_register_addr reg_addr;
- /* nb blocks to read */
- uint16_t num_blocks;
- } req;
- /* response */
- struct {
- /* response identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- /* dump response */
- uint8_t dump_rsp;
- /* data */
- union {
- uint8_t data8[DUMP_BLOCK_SIZE];
- uint16_t data16[DUMP_BLOCK_SIZE / sizeof(uint16_t)];
- uint32_t data32[DUMP_BLOCK_SIZE / sizeof(uint32_t)];
- } u;
- } rsp;
-} DUMP_MEMORY;
-
-typedef union {
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* dump memory type request */
- uint8_t dump_req;
- } req;
- struct {
- /* request identifier */
- uint16_t proc_id;
- /* error */
- uint8_t error;
- /* dump response */
- uint8_t dump_rsp;
- /* dump data */
- uint8_t data[DUMP_BLOCK_SIZE];
- } rsp;
-} DUMPLOG_MEMORY;
-
-typedef union {
- /* request */
- struct {
- uint16_t proc_id;
- uint8_t data[64 - sizeof(struct as10x_cmd_header_t) -2 /* proc_id */];
- } req;
- /* response */
- struct {
- uint16_t proc_id;
- uint8_t error;
- uint8_t data[64 - sizeof(struct as10x_cmd_header_t) /* header */
- - 2 /* proc_id */ - 1 /* rc */];
- } rsp;
-} RAW_DATA;
+
+union as10x_dump_memory {
+ /* request */
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* dump memory type request */
+ uint8_t dump_req;
+ /* register description */
+ struct as10x_register_addr reg_addr;
+ /* nb blocks to read */
+ uint16_t num_blocks;
+ } req;
+ /* response */
+ struct {
+ /* response identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* dump response */
+ uint8_t dump_rsp;
+ /* data */
+ union {
+ uint8_t data8[DUMP_BLOCK_SIZE];
+ uint16_t data16[DUMP_BLOCK_SIZE / sizeof(uint16_t)];
+ uint32_t data32[DUMP_BLOCK_SIZE / sizeof(uint32_t)];
+ } u;
+ } rsp;
+} __packed;
+
+union as10x_dumplog_memory {
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* dump memory type request */
+ uint8_t dump_req;
+ } req;
+ struct {
+ /* request identifier */
+ uint16_t proc_id;
+ /* error */
+ uint8_t error;
+ /* dump response */
+ uint8_t dump_rsp;
+ /* dump data */
+ uint8_t data[DUMP_BLOCK_SIZE];
+ } rsp;
+} __packed;
+
+union as10x_raw_data {
+ /* request */
+ struct {
+ uint16_t proc_id;
+ uint8_t data[64 - sizeof(struct as10x_cmd_header_t)
+ - 2 /* proc_id */];
+ } req;
+ /* response */
+ struct {
+ uint16_t proc_id;
+ uint8_t error;
+ uint8_t data[64 - sizeof(struct as10x_cmd_header_t)
+ - 2 /* proc_id */ - 1 /* rc */];
+ } rsp;
+} __packed;
struct as10x_cmd_t {
- /* header */
- struct as10x_cmd_header_t header;
- /* body */
- union {
- TURN_ON turn_on;
- TURN_OFF turn_off;
- SET_TUNE set_tune;
- GET_TUNE_STATUS get_tune_status;
- GET_TPS get_tps;
- COMMON common;
- ADD_PID_FILTER add_pid_filter;
- DEL_PID_FILTER del_pid_filter;
- START_STREAMING start_streaming;
- STOP_STREAMING stop_streaming;
- GET_DEMOD_STATS get_demod_stats;
- GET_IMPULSE_RESP get_impulse_rsp;
- FW_CONTEXT context;
- SET_REGISTER set_register;
- GET_REGISTER get_register;
- CFG_CHANGE_MODE cfg_change_mode;
- DUMP_MEMORY dump_memory;
- DUMPLOG_MEMORY dumplog_memory;
- RAW_DATA raw_data;
- } body;
-};
+ struct as10x_cmd_header_t header;
+ union {
+ union as10x_turn_on turn_on;
+ union as10x_turn_off turn_off;
+ union as10x_set_tune set_tune;
+ union as10x_get_tune_status get_tune_status;
+ union as10x_get_tps get_tps;
+ union as10x_common common;
+ union as10x_add_pid_filter add_pid_filter;
+ union as10x_del_pid_filter del_pid_filter;
+ union as10x_start_streaming start_streaming;
+ union as10x_stop_streaming stop_streaming;
+ union as10x_get_demod_stats get_demod_stats;
+ union as10x_get_impulse_resp get_impulse_rsp;
+ union as10x_fw_context context;
+ union as10x_set_register set_register;
+ union as10x_get_register get_register;
+ union as10x_cfg_change_mode cfg_change_mode;
+ union as10x_dump_memory dump_memory;
+ union as10x_dumplog_memory dumplog_memory;
+ union as10x_raw_data raw_data;
+ } body;
+} __packed;
struct as10x_token_cmd_t {
- /* token cmd */
- struct as10x_cmd_t c;
- /* token response */
- struct as10x_cmd_t r;
-};
-#pragma pack()
+ /* token cmd */
+ struct as10x_cmd_t c;
+ /* token response */
+ struct as10x_cmd_t r;
+} __packed;
/**************************/
@@ -491,50 +488,42 @@ void as10x_cmd_build(struct as10x_cmd_t *pcmd, uint16_t proc_id,
uint16_t cmd_len);
int as10x_rsp_parse(struct as10x_cmd_t *r, uint16_t proc_id);
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/* as10x cmd */
-int as10x_cmd_turn_on(as10x_handle_t *phandle);
-int as10x_cmd_turn_off(as10x_handle_t *phandle);
+int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap);
+int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap);
-int as10x_cmd_set_tune(as10x_handle_t *phandle,
+int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
struct as10x_tune_args *ptune);
-int as10x_cmd_get_tune_status(as10x_handle_t *phandle,
+int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
struct as10x_tune_status *pstatus);
-int as10x_cmd_get_tps(as10x_handle_t *phandle,
+int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap,
struct as10x_tps *ptps);
-int as10x_cmd_get_demod_stats(as10x_handle_t *phandle,
+int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
struct as10x_demod_stats *pdemod_stats);
-int as10x_cmd_get_impulse_resp(as10x_handle_t *phandle,
+int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
uint8_t *is_ready);
/* as10x cmd stream */
-int as10x_cmd_add_PID_filter(as10x_handle_t *phandle,
+int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
struct as10x_ts_filter *filter);
-int as10x_cmd_del_PID_filter(as10x_handle_t *phandle,
+int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
uint16_t pid_value);
-int as10x_cmd_start_streaming(as10x_handle_t *phandle);
-int as10x_cmd_stop_streaming(as10x_handle_t *phandle);
+int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap);
+int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap);
/* as10x cmd cfg */
-int as10x_cmd_set_context(as10x_handle_t *phandle,
+int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap,
uint16_t tag,
uint32_t value);
-int as10x_cmd_get_context(as10x_handle_t *phandle,
+int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap,
uint16_t tag,
uint32_t *pvalue);
-int as10x_cmd_eLNA_change_mode(as10x_handle_t *phandle, uint8_t mode);
+int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode);
int as10x_context_rsp_parse(struct as10x_cmd_t *prsp, uint16_t proc_id);
-#ifdef __cplusplus
-}
-#endif
#endif
-/* EOF - vim: set textwidth=80 ts=3 sw=3 sts=3 et: */
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/staging/media/as102/as10x_cmd_cfg.c
index ec6f69fcf399..d2a4bce89623 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/staging/media/as102/as10x_cmd_cfg.c
@@ -28,13 +28,13 @@
/**
* as10x_cmd_get_context - Send get context command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @tag: context tag
* @pvalue: pointer where to store context value read
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_get_context(as10x_handle_t *phandle, uint16_t tag,
+int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
uint32_t *pvalue)
{
int error;
@@ -42,11 +42,11 @@ int as10x_cmd_get_context(as10x_handle_t *phandle, uint16_t tag,
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.context.req));
/* fill command */
@@ -55,14 +55,14 @@ int as10x_cmd_get_context(as10x_handle_t *phandle, uint16_t tag,
pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle,
- (uint8_t *) pcmd,
- sizeof(pcmd->body.context.req)
- + HEADER_SIZE,
- (uint8_t *) prsp,
- sizeof(prsp->body.context.rsp)
- + HEADER_SIZE);
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.context.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.context.rsp)
+ + HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
@@ -87,13 +87,13 @@ out:
/**
* as10x_cmd_set_context - send set context command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @tag: context tag
* @value: value to set in context
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_set_context(as10x_handle_t *phandle, uint16_t tag,
+int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
uint32_t value)
{
int error;
@@ -101,11 +101,11 @@ int as10x_cmd_set_context(as10x_handle_t *phandle, uint16_t tag,
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.context.req));
/* fill command */
@@ -116,14 +116,14 @@ int as10x_cmd_set_context(as10x_handle_t *phandle, uint16_t tag,
pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle,
- (uint8_t *) pcmd,
- sizeof(pcmd->body.context.req)
- + HEADER_SIZE,
- (uint8_t *) prsp,
- sizeof(prsp->body.context.rsp)
- + HEADER_SIZE);
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap,
+ (uint8_t *) pcmd,
+ sizeof(pcmd->body.context.req)
+ + HEADER_SIZE,
+ (uint8_t *) prsp,
+ sizeof(prsp->body.context.rsp)
+ + HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
@@ -142,7 +142,7 @@ out:
/**
* as10x_cmd_eLNA_change_mode - send eLNA change mode command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @mode: mode selected:
* - ON : 0x0 => eLNA always ON
* - OFF : 0x1 => eLNA always OFF
@@ -151,18 +151,18 @@ out:
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_eLNA_change_mode(as10x_handle_t *phandle, uint8_t mode)
+int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.cfg_change_mode.req));
/* fill command */
@@ -171,8 +171,8 @@ int as10x_cmd_eLNA_change_mode(as10x_handle_t *phandle, uint8_t mode)
pcmd->body.cfg_change_mode.req.mode = mode;
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.cfg_change_mode.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.cfg_change_mode.rsp)
diff --git a/drivers/staging/media/as102/as10x_cmd_stream.c b/drivers/staging/media/as102/as10x_cmd_stream.c
index 045c70683193..6d000f60fb0e 100644
--- a/drivers/staging/media/as102/as10x_cmd_stream.c
+++ b/drivers/staging/media/as102/as10x_cmd_stream.c
@@ -23,12 +23,12 @@
/**
* as10x_cmd_add_PID_filter - send add filter command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
* @filter: TSFilter filter for DVB-T
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_add_PID_filter(as10x_handle_t *phandle,
+int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
struct as10x_ts_filter *filter)
{
int error;
@@ -36,11 +36,11 @@ int as10x_cmd_add_PID_filter(as10x_handle_t *phandle,
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.add_pid_filter.req));
/* fill command */
@@ -55,8 +55,8 @@ int as10x_cmd_add_PID_filter(as10x_handle_t *phandle,
pcmd->body.add_pid_filter.req.idx = 0xFF;
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.add_pid_filter.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.add_pid_filter.rsp)
@@ -83,12 +83,12 @@ out:
/**
* as10x_cmd_del_PID_filter - Send delete filter command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapte
* @pid_value: PID to delete
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_del_PID_filter(as10x_handle_t *phandle,
+int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
uint16_t pid_value)
{
int error;
@@ -96,11 +96,11 @@ int as10x_cmd_del_PID_filter(as10x_handle_t *phandle,
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.del_pid_filter.req));
/* fill command */
@@ -109,8 +109,8 @@ int as10x_cmd_del_PID_filter(as10x_handle_t *phandle,
pcmd->body.del_pid_filter.req.pid = cpu_to_le16(pid_value);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.del_pid_filter.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.del_pid_filter.rsp)
@@ -132,22 +132,22 @@ out:
/**
* as10x_cmd_start_streaming - Send start streaming command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_start_streaming(as10x_handle_t *phandle)
+int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.start_streaming.req));
/* fill command */
@@ -155,8 +155,8 @@ int as10x_cmd_start_streaming(as10x_handle_t *phandle)
cpu_to_le16(CONTROL_PROC_START_STREAMING);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.start_streaming.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.start_streaming.rsp)
@@ -178,22 +178,22 @@ out:
/**
* as10x_cmd_stop_streaming - Send stop streaming command to AS10x
- * @phandle: pointer to AS10x handle
+ * @adap: pointer to AS10x bus adapter
*
* Return 0 on success or negative value in case of error.
*/
-int as10x_cmd_stop_streaming(as10x_handle_t *phandle)
+int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap)
{
int8_t error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
- pcmd = phandle->cmd;
- prsp = phandle->rsp;
+ pcmd = adap->cmd;
+ prsp = adap->rsp;
/* prepare command */
- as10x_cmd_build(pcmd, (++phandle->cmd_xid),
+ as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.stop_streaming.req));
/* fill command */
@@ -201,8 +201,8 @@ int as10x_cmd_stop_streaming(as10x_handle_t *phandle)
cpu_to_le16(CONTROL_PROC_STOP_STREAMING);
/* send command */
- if (phandle->ops->xfer_cmd) {
- error = phandle->ops->xfer_cmd(phandle, (uint8_t *) pcmd,
+ if (adap->ops->xfer_cmd) {
+ error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.stop_streaming.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.stop_streaming.rsp)
diff --git a/drivers/staging/media/as102/as10x_handle.h b/drivers/staging/media/as102/as10x_handle.h
index 4f01a76e9829..62b9795ee424 100644
--- a/drivers/staging/media/as102/as10x_handle.h
+++ b/drivers/staging/media/as102/as10x_handle.h
@@ -17,41 +17,37 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __KERNEL__
-struct as102_bus_adapter_t;
+struct as10x_bus_adapter_t;
struct as102_dev_t;
-#define as10x_handle_t struct as102_bus_adapter_t
#include "as10x_cmd.h"
/* values for "mode" field */
-#define REGMODE8 8
-#define REGMODE16 16
-#define REGMODE32 32
+#define REGMODE8 8
+#define REGMODE16 16
+#define REGMODE32 32
struct as102_priv_ops_t {
- int (*upload_fw_pkt) (struct as102_bus_adapter_t *bus_adap,
+ int (*upload_fw_pkt) (struct as10x_bus_adapter_t *bus_adap,
unsigned char *buf, int buflen, int swap32);
- int (*send_cmd) (struct as102_bus_adapter_t *bus_adap,
+ int (*send_cmd) (struct as10x_bus_adapter_t *bus_adap,
unsigned char *buf, int buflen);
- int (*xfer_cmd) (struct as102_bus_adapter_t *bus_adap,
+ int (*xfer_cmd) (struct as10x_bus_adapter_t *bus_adap,
unsigned char *send_buf, int send_buf_len,
unsigned char *recv_buf, int recv_buf_len);
-/*
- int (*pid_filter) (struct as102_bus_adapter_t *bus_adap,
- int index, u16 pid, int onoff);
-*/
+
int (*start_stream) (struct as102_dev_t *dev);
void (*stop_stream) (struct as102_dev_t *dev);
- int (*reset_target) (struct as102_bus_adapter_t *bus_adap);
+ int (*reset_target) (struct as10x_bus_adapter_t *bus_adap);
- int (*read_write)(struct as102_bus_adapter_t *bus_adap, uint8_t mode,
+ int (*read_write)(struct as10x_bus_adapter_t *bus_adap, uint8_t mode,
uint32_t rd_addr, uint16_t rd_len,
uint32_t wr_addr, uint16_t wr_len);
- int (*as102_read_ep2) (struct as102_bus_adapter_t *bus_adap,
+ int (*as102_read_ep2) (struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf,
int recv_buf_len);
};
diff --git a/drivers/staging/media/as102/as10x_types.h b/drivers/staging/media/as102/as10x_types.h
index 3dedb3c1420a..fde8140ae88b 100644
--- a/drivers/staging/media/as102/as10x_types.h
+++ b/drivers/staging/media/as102/as10x_types.h
@@ -26,173 +26,169 @@
/*********************************/
/* bandwidth constant values */
-#define BW_5_MHZ 0x00
-#define BW_6_MHZ 0x01
-#define BW_7_MHZ 0x02
-#define BW_8_MHZ 0x03
+#define BW_5_MHZ 0x00
+#define BW_6_MHZ 0x01
+#define BW_7_MHZ 0x02
+#define BW_8_MHZ 0x03
/* hierarchy priority selection values */
-#define HIER_NO_PRIORITY 0x00
-#define HIER_LOW_PRIORITY 0x01
-#define HIER_HIGH_PRIORITY 0x02
+#define HIER_NO_PRIORITY 0x00
+#define HIER_LOW_PRIORITY 0x01
+#define HIER_HIGH_PRIORITY 0x02
/* constellation available values */
-#define CONST_QPSK 0x00
-#define CONST_QAM16 0x01
-#define CONST_QAM64 0x02
-#define CONST_UNKNOWN 0xFF
+#define CONST_QPSK 0x00
+#define CONST_QAM16 0x01
+#define CONST_QAM64 0x02
+#define CONST_UNKNOWN 0xFF
/* hierarchy available values */
-#define HIER_NONE 0x00
-#define HIER_ALPHA_1 0x01
-#define HIER_ALPHA_2 0x02
-#define HIER_ALPHA_4 0x03
-#define HIER_UNKNOWN 0xFF
+#define HIER_NONE 0x00
+#define HIER_ALPHA_1 0x01
+#define HIER_ALPHA_2 0x02
+#define HIER_ALPHA_4 0x03
+#define HIER_UNKNOWN 0xFF
/* interleaving available values */
-#define INTLV_NATIVE 0x00
-#define INTLV_IN_DEPTH 0x01
-#define INTLV_UNKNOWN 0xFF
+#define INTLV_NATIVE 0x00
+#define INTLV_IN_DEPTH 0x01
+#define INTLV_UNKNOWN 0xFF
/* code rate available values */
-#define CODE_RATE_1_2 0x00
-#define CODE_RATE_2_3 0x01
-#define CODE_RATE_3_4 0x02
-#define CODE_RATE_5_6 0x03
-#define CODE_RATE_7_8 0x04
-#define CODE_RATE_UNKNOWN 0xFF
+#define CODE_RATE_1_2 0x00
+#define CODE_RATE_2_3 0x01
+#define CODE_RATE_3_4 0x02
+#define CODE_RATE_5_6 0x03
+#define CODE_RATE_7_8 0x04
+#define CODE_RATE_UNKNOWN 0xFF
/* guard interval available values */
-#define GUARD_INT_1_32 0x00
-#define GUARD_INT_1_16 0x01
-#define GUARD_INT_1_8 0x02
-#define GUARD_INT_1_4 0x03
-#define GUARD_UNKNOWN 0xFF
+#define GUARD_INT_1_32 0x00
+#define GUARD_INT_1_16 0x01
+#define GUARD_INT_1_8 0x02
+#define GUARD_INT_1_4 0x03
+#define GUARD_UNKNOWN 0xFF
/* transmission mode available values */
-#define TRANS_MODE_2K 0x00
-#define TRANS_MODE_8K 0x01
-#define TRANS_MODE_4K 0x02
-#define TRANS_MODE_UNKNOWN 0xFF
+#define TRANS_MODE_2K 0x00
+#define TRANS_MODE_8K 0x01
+#define TRANS_MODE_4K 0x02
+#define TRANS_MODE_UNKNOWN 0xFF
/* DVBH signalling available values */
-#define TIMESLICING_PRESENT 0x01
-#define MPE_FEC_PRESENT 0x02
+#define TIMESLICING_PRESENT 0x01
+#define MPE_FEC_PRESENT 0x02
/* tune state available */
-#define TUNE_STATUS_NOT_TUNED 0x00
-#define TUNE_STATUS_IDLE 0x01
-#define TUNE_STATUS_LOCKING 0x02
-#define TUNE_STATUS_SIGNAL_DVB_OK 0x03
-#define TUNE_STATUS_STREAM_DETECTED 0x04
-#define TUNE_STATUS_STREAM_TUNED 0x05
-#define TUNE_STATUS_ERROR 0xFF
+#define TUNE_STATUS_NOT_TUNED 0x00
+#define TUNE_STATUS_IDLE 0x01
+#define TUNE_STATUS_LOCKING 0x02
+#define TUNE_STATUS_SIGNAL_DVB_OK 0x03
+#define TUNE_STATUS_STREAM_DETECTED 0x04
+#define TUNE_STATUS_STREAM_TUNED 0x05
+#define TUNE_STATUS_ERROR 0xFF
/* available TS FID filter types */
-#define TS_PID_TYPE_TS 0
-#define TS_PID_TYPE_PSI_SI 1
-#define TS_PID_TYPE_MPE 2
+#define TS_PID_TYPE_TS 0
+#define TS_PID_TYPE_PSI_SI 1
+#define TS_PID_TYPE_MPE 2
/* number of echos available */
-#define MAX_ECHOS 15
+#define MAX_ECHOS 15
/* Context types */
-#define CONTEXT_LNA 1010
-#define CONTEXT_ELNA_HYSTERESIS 4003
-#define CONTEXT_ELNA_GAIN 4004
-#define CONTEXT_MER_THRESHOLD 5005
-#define CONTEXT_MER_OFFSET 5006
-#define CONTEXT_IR_STATE 7000
-#define CONTEXT_TSOUT_MSB_FIRST 7004
-#define CONTEXT_TSOUT_FALLING_EDGE 7005
+#define CONTEXT_LNA 1010
+#define CONTEXT_ELNA_HYSTERESIS 4003
+#define CONTEXT_ELNA_GAIN 4004
+#define CONTEXT_MER_THRESHOLD 5005
+#define CONTEXT_MER_OFFSET 5006
+#define CONTEXT_IR_STATE 7000
+#define CONTEXT_TSOUT_MSB_FIRST 7004
+#define CONTEXT_TSOUT_FALLING_EDGE 7005
/* Configuration modes */
-#define CFG_MODE_ON 0
-#define CFG_MODE_OFF 1
-#define CFG_MODE_AUTO 2
+#define CFG_MODE_ON 0
+#define CFG_MODE_OFF 1
+#define CFG_MODE_AUTO 2
-#pragma pack(1)
struct as10x_tps {
- uint8_t constellation;
- uint8_t hierarchy;
- uint8_t interleaving_mode;
- uint8_t code_rate_HP;
- uint8_t code_rate_LP;
- uint8_t guard_interval;
- uint8_t transmission_mode;
- uint8_t DVBH_mask_HP;
- uint8_t DVBH_mask_LP;
- uint16_t cell_ID;
-};
+ uint8_t modulation;
+ uint8_t hierarchy;
+ uint8_t interleaving_mode;
+ uint8_t code_rate_HP;
+ uint8_t code_rate_LP;
+ uint8_t guard_interval;
+ uint8_t transmission_mode;
+ uint8_t DVBH_mask_HP;
+ uint8_t DVBH_mask_LP;
+ uint16_t cell_ID;
+} __packed;
struct as10x_tune_args {
- /* frequency */
- uint32_t freq;
- /* bandwidth */
- uint8_t bandwidth;
- /* hierarchy selection */
- uint8_t hier_select;
- /* constellation */
- uint8_t constellation;
- /* hierarchy */
- uint8_t hierarchy;
- /* interleaving mode */
- uint8_t interleaving_mode;
- /* code rate */
- uint8_t code_rate;
- /* guard interval */
- uint8_t guard_interval;
- /* transmission mode */
- uint8_t transmission_mode;
-};
+ /* frequency */
+ uint32_t freq;
+ /* bandwidth */
+ uint8_t bandwidth;
+ /* hierarchy selection */
+ uint8_t hier_select;
+ /* constellation */
+ uint8_t modulation;
+ /* hierarchy */
+ uint8_t hierarchy;
+ /* interleaving mode */
+ uint8_t interleaving_mode;
+ /* code rate */
+ uint8_t code_rate;
+ /* guard interval */
+ uint8_t guard_interval;
+ /* transmission mode */
+ uint8_t transmission_mode;
+} __packed;
struct as10x_tune_status {
- /* tune status */
- uint8_t tune_state;
- /* signal strength */
- int16_t signal_strength;
- /* packet error rate 10^-4 */
- uint16_t PER;
- /* bit error rate 10^-4 */
- uint16_t BER;
-};
+ /* tune status */
+ uint8_t tune_state;
+ /* signal strength */
+ int16_t signal_strength;
+ /* packet error rate 10^-4 */
+ uint16_t PER;
+ /* bit error rate 10^-4 */
+ uint16_t BER;
+} __packed;
struct as10x_demod_stats {
- /* frame counter */
- uint32_t frame_count;
- /* Bad frame counter */
- uint32_t bad_frame_count;
- /* Number of wrong bytes fixed by Reed-Solomon */
- uint32_t bytes_fixed_by_rs;
- /* Averaged MER */
- uint16_t mer;
- /* statistics calculation state indicator (started or not) */
- uint8_t has_started;
-};
+ /* frame counter */
+ uint32_t frame_count;
+ /* Bad frame counter */
+ uint32_t bad_frame_count;
+ /* Number of wrong bytes fixed by Reed-Solomon */
+ uint32_t bytes_fixed_by_rs;
+ /* Averaged MER */
+ uint16_t mer;
+ /* statistics calculation state indicator (started or not) */
+ uint8_t has_started;
+} __packed;
struct as10x_ts_filter {
- uint16_t pid; /** valid PID value 0x00 : 0x2000 */
- uint8_t type; /** Red TS_PID_TYPE_<N> values */
- uint8_t idx; /** index in filtering table */
-};
+ uint16_t pid; /* valid PID value 0x00 : 0x2000 */
+ uint8_t type; /* Red TS_PID_TYPE_<N> values */
+ uint8_t idx; /* index in filtering table */
+} __packed;
struct as10x_register_value {
- uint8_t mode;
- union {
- uint8_t value8; /* 8 bit value */
- uint16_t value16; /* 16 bit value */
- uint32_t value32; /* 32 bit value */
- }u;
-};
-
-#pragma pack()
+ uint8_t mode;
+ union {
+ uint8_t value8; /* 8 bit value */
+ uint16_t value16; /* 16 bit value */
+ uint32_t value32; /* 32 bit value */
+ } u;
+} __packed;
struct as10x_register_addr {
- /* register addr */
- uint32_t addr;
- /* register mode access */
- uint8_t mode;
+ /* register addr */
+ uint32_t addr;
+ /* register mode access */
+ uint8_t mode;
};
-
#endif
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 04e93c49f03a..280c84ec4cc2 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -218,9 +218,10 @@ dt3155_start_acq(struct dt3155_priv *pd)
* driver-specific callbacks (vb2_ops)
*/
static int
-dt3155_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
- unsigned int *num_planes, unsigned long sizes[],
- void *alloc_ctxs[])
+dt3155_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
void *ret;
@@ -262,12 +263,6 @@ dt3155_buf_prepare(struct vb2_buffer *vb)
}
static int
-dt3155_start_streaming(struct vb2_queue *q)
-{
- return 0;
-}
-
-static int
dt3155_stop_streaming(struct vb2_queue *q)
{
struct dt3155_priv *pd = vb2_get_drv_priv(q);
@@ -308,7 +303,6 @@ const struct vb2_ops q_ops = {
.wait_prepare = dt3155_wait_prepare,
.wait_finish = dt3155_wait_finish,
.buf_prepare = dt3155_buf_prepare,
- .start_streaming = dt3155_start_streaming,
.stop_streaming = dt3155_stop_streaming,
.buf_queue = dt3155_buf_queue,
};
@@ -914,9 +908,10 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_req_region;
pd->regs = pci_iomap(pdev, 0, pci_resource_len(pd->pdev, 0));
- if (!pd->regs)
+ if (!pd->regs) {
err = -ENOMEM;
goto err_pci_iomap;
+ }
err = dt3155_init_board(pdev);
if (err)
goto err_init_board;
diff --git a/drivers/staging/media/easycap/easycap.h b/drivers/staging/media/easycap/easycap.h
index 7b256a948c27..a007e7442be8 100644
--- a/drivers/staging/media/easycap/easycap.h
+++ b/drivers/staging/media/easycap/easycap.h
@@ -98,7 +98,6 @@
#define EASYCAP_DRIVER_VERSION "0.9.01"
#define EASYCAP_DRIVER_DESCRIPTION "easycapdc60"
-#define USB_SKEL_MINOR_BASE 192
#define DONGLE_MANY 8
#define INPUT_MANY 6
/*---------------------------------------------------------------------------*/
@@ -324,8 +323,6 @@ struct easycap {
int lost[INPUT_MANY];
int merit[180];
- long long int dnbydt;
-
int video_interface;
int video_altsetting_on;
int video_altsetting_off;
@@ -353,7 +350,6 @@ struct easycap {
u8 *pcache;
int video_mt;
int audio_mt;
- long long audio_bytes;
u32 isequence;
int vma_many;
@@ -450,9 +446,6 @@ struct easycap {
* SOUND PROPERTIES
*/
/*---------------------------------------------------------------------------*/
-
- int audio_buffer_many;
-
int allocation_audio_urb;
int allocation_audio_page;
int allocation_audio_struct;
@@ -469,72 +462,53 @@ struct easycap {
* VIDEO FUNCTION PROTOTYPES
*/
/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+int easycap_newinput(struct easycap *, int);
+void easycap_testcard(struct easycap *, int);
+int easycap_isdongle(struct easycap *);
+
long easycap_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-int easycap_dqbuf(struct easycap *, int);
-int submit_video_urbs(struct easycap *);
-int kill_video_urbs(struct easycap *);
-int field2frame(struct easycap *);
-int redaub(struct easycap *, void *, void *,
- int, int, u8, u8, bool);
-void easycap_testcard(struct easycap *, int);
-int fillin_formats(void);
-int newinput(struct easycap *, int);
-int adjust_standard(struct easycap *, v4l2_std_id);
-int adjust_format(struct easycap *, u32, u32, u32,
- int, bool);
-int adjust_brightness(struct easycap *, int);
-int adjust_contrast(struct easycap *, int);
-int adjust_saturation(struct easycap *, int);
-int adjust_hue(struct easycap *, int);
-int adjust_volume(struct easycap *, int);
+
+int easycap_video_dqbuf(struct easycap *, int);
+int easycap_video_submit_urbs(struct easycap *);
+int easycap_video_kill_urbs(struct easycap *);
+int easycap_video_fillin_formats(void);
+
+int adjust_standard(struct easycap *, v4l2_std_id);
+int adjust_format(struct easycap *, u32, u32, u32, int, bool);
+int adjust_brightness(struct easycap *, int);
+int adjust_contrast(struct easycap *, int);
+int adjust_saturation(struct easycap *, int);
+int adjust_hue(struct easycap *, int);
/*---------------------------------------------------------------------------*/
/*
* AUDIO FUNCTION PROTOTYPES
*/
/*---------------------------------------------------------------------------*/
-int easycap_alsa_probe(struct easycap *);
-void easycap_alsa_complete(struct urb *);
-
-int easycap_sound_setup(struct easycap *);
-int submit_audio_urbs(struct easycap *);
-int kill_audio_urbs(struct easycap *);
-void easyoss_testtone(struct easycap *, int);
-int audio_setup(struct easycap *);
+int easycap_alsa_probe(struct easycap *);
+int easycap_audio_kill_urbs(struct easycap *);
+void easycap_alsa_complete(struct urb *);
/*---------------------------------------------------------------------------*/
/*
* LOW-LEVEL FUNCTION PROTOTYPES
*/
/*---------------------------------------------------------------------------*/
-int audio_gainget(struct usb_device *);
-int audio_gainset(struct usb_device *, s8);
+int easycap_audio_gainset(struct usb_device *, s8);
+int easycap_audio_setup(struct easycap *);
-int set_interface(struct usb_device *, u16);
-int wakeup_device(struct usb_device *);
-int confirm_resolution(struct usb_device *);
-int confirm_stream(struct usb_device *);
+int easycap_wakeup_device(struct usb_device *);
-int setup_stk(struct usb_device *, bool);
-int setup_saa(struct usb_device *, bool);
-int setup_vt(struct usb_device *);
-int check_stk(struct usb_device *, bool);
-int check_saa(struct usb_device *, bool);
-int ready_saa(struct usb_device *);
-int merit_saa(struct usb_device *);
-int check_vt(struct usb_device *);
-int select_input(struct usb_device *, int, int);
-int set_resolution(struct usb_device *,
- u16, u16, u16, u16);
+int setup_stk(struct usb_device *, bool);
+int setup_saa(struct usb_device *, bool);
+int ready_saa(struct usb_device *);
+int merit_saa(struct usb_device *);
+int check_vt(struct usb_device *);
+int select_input(struct usb_device *, int, int);
+int set_resolution(struct usb_device *, u16, u16, u16, u16);
-int read_saa(struct usb_device *, u16);
-int read_stk(struct usb_device *, u32);
-int write_saa(struct usb_device *, u16, u16);
-int write_000(struct usb_device *, u16, u16);
-int start_100(struct usb_device *);
-int stop_100(struct usb_device *);
-int write_300(struct usb_device *);
-int read_vt(struct usb_device *, u16);
-int write_vt(struct usb_device *, u16, u16);
-int isdongle(struct easycap *);
+int read_saa(struct usb_device *, u16);
+int write_saa(struct usb_device *, u16, u16);
+int start_100(struct usb_device *);
+int stop_100(struct usb_device *);
/*---------------------------------------------------------------------------*/
@@ -588,7 +562,6 @@ extern bool easycap_readback;
extern const struct easycap_standard easycap_standard[];
extern struct easycap_format easycap_format[];
extern struct v4l2_queryctrl easycap_control[];
-extern struct usb_driver easycap_usb_driver;
extern struct easycap_dongle easycapdc60_dongle[];
#endif /* !__EASYCAP_H__ */
diff --git a/drivers/staging/media/easycap/easycap_ioctl.c b/drivers/staging/media/easycap/easycap_ioctl.c
index c99addfb6242..9413b37490c2 100644
--- a/drivers/staging/media/easycap/easycap_ioctl.c
+++ b/drivers/staging/media/easycap/easycap_ioctl.c
@@ -25,7 +25,6 @@
*/
/*****************************************************************************/
-#include <linux/version.h>
#include "easycap.h"
/*--------------------------------------------------------------------------*/
@@ -125,7 +124,7 @@ int adjust_standard(struct easycap *peasycap, v4l2_std_id std_id)
}
if (peasycap->video_isoc_streaming) {
resubmit = true;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
} else
resubmit = false;
/*--------------------------------------------------------------------------*/
@@ -331,7 +330,7 @@ int adjust_standard(struct easycap *peasycap, v4l2_std_id std_id)
"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
}
if (resubmit)
- submit_video_urbs(peasycap);
+ easycap_video_submit_urbs(peasycap);
return 0;
}
/*****************************************************************************/
@@ -558,7 +557,7 @@ int adjust_format(struct easycap *peasycap,
peasycap->bytesperpixel * peasycap->width * peasycap->height;
if (peasycap->video_isoc_streaming) {
resubmit = true;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
} else
resubmit = false;
/*---------------------------------------------------------------------------*/
@@ -622,7 +621,7 @@ int adjust_format(struct easycap *peasycap,
}
/*---------------------------------------------------------------------------*/
if (resubmit)
- submit_video_urbs(peasycap);
+ easycap_video_submit_urbs(peasycap);
return peasycap_best_format - easycap_format;
}
@@ -667,16 +666,15 @@ int adjust_brightness(struct easycap *peasycap, int value)
peasycap->inputset[peasycap->input].brightness_ok = 1;
} else
JOM(8, "%i=peasycap->input\n", peasycap->input);
+
mood = 0x00FF & (unsigned int)peasycap->brightness;
- if (!write_saa(peasycap->pusb_device, 0x0A, mood)) {
- SAM("adjusting brightness to 0x%02X\n", mood);
- return 0;
- } else {
+ if (write_saa(peasycap->pusb_device, 0x0A, mood)) {
SAM("WARNING: failed to adjust brightness "
"to 0x%02X\n", mood);
return -ENOENT;
}
- break;
+ SAM("adjusting brightness to 0x%02X\n", mood);
+ return 0;
}
i1++;
}
@@ -726,15 +724,13 @@ int adjust_contrast(struct easycap *peasycap, int value)
JOM(8, "%i=peasycap->input\n", peasycap->input);
mood = 0x00FF & (unsigned int) (peasycap->contrast - 128);
- if (!write_saa(peasycap->pusb_device, 0x0B, mood)) {
- SAM("adjusting contrast to 0x%02X\n", mood);
- return 0;
- } else {
+ if (write_saa(peasycap->pusb_device, 0x0B, mood)) {
SAM("WARNING: failed to adjust contrast to "
"0x%02X\n", mood);
return -ENOENT;
}
- break;
+ SAM("adjusting contrast to 0x%02X\n", mood);
+ return 0;
}
i1++;
}
@@ -784,14 +780,13 @@ int adjust_saturation(struct easycap *peasycap, int value)
} else
JOM(8, "%i=peasycap->input\n", peasycap->input);
mood = 0x00FF & (unsigned int) (peasycap->saturation - 128);
- if (!write_saa(peasycap->pusb_device, 0x0C, mood)) {
- SAM("adjusting saturation to 0x%02X\n", mood);
- return 0;
- } else {
+ if (write_saa(peasycap->pusb_device, 0x0C, mood)) {
SAM("WARNING: failed to adjust saturation to "
"0x%02X\n", mood);
return -ENOENT;
}
+ SAM("adjusting saturation to 0x%02X\n", mood);
+ return 0;
break;
}
i1++;
@@ -839,13 +834,12 @@ int adjust_hue(struct easycap *peasycap, int value)
JOM(8, "%i=peasycap->input\n", peasycap->input);
i2 = peasycap->hue - 128;
mood = 0x00FF & ((int) i2);
- if (!write_saa(peasycap->pusb_device, 0x0D, mood)) {
- SAM("adjusting hue to 0x%02X\n", mood);
- return 0;
- } else {
+ if (write_saa(peasycap->pusb_device, 0x0D, mood)) {
SAM("WARNING: failed to adjust hue to 0x%02X\n", mood);
return -ENOENT;
}
+ SAM("adjusting hue to 0x%02X\n", mood);
+ return 0;
break;
}
i1++;
@@ -854,7 +848,7 @@ int adjust_hue(struct easycap *peasycap, int value)
return -ENOENT;
}
/*****************************************************************************/
-int adjust_volume(struct easycap *peasycap, int value)
+static int adjust_volume(struct easycap *peasycap, int value)
{
s8 mood;
int i1;
@@ -885,15 +879,13 @@ int adjust_volume(struct easycap *peasycap, int value)
mood = (16 > peasycap->volume) ? 16 :
((31 < peasycap->volume) ? 31 :
(s8) peasycap->volume);
- if (!audio_gainset(peasycap->pusb_device, mood)) {
- SAM("adjusting volume to 0x%02X\n", mood);
- return 0;
- } else {
+ if (!easycap_audio_gainset(peasycap->pusb_device, mood)) {
SAM("WARNING: failed to adjust volume to "
"0x%2X\n", mood);
return -ENOENT;
}
- break;
+ SAM("adjusting volume to 0x%02X\n", mood);
+ return 0;
}
i1++;
}
@@ -971,7 +963,7 @@ long easycap_unlocked_ioctl(struct file *file,
SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
}
- kd = isdongle(peasycap);
+ kd = easycap_isdongle(peasycap);
if (0 <= kd && DONGLE_MANY > kd) {
if (mutex_lock_interruptible(&easycapdc60_dongle[kd].mutex_video)) {
SAY("ERROR: cannot lock "
@@ -986,7 +978,7 @@ long easycap_unlocked_ioctl(struct file *file,
* IF NECESSARY, BAIL OUT.
*/
/*---------------------------------------------------------------------------*/
- if (kd != isdongle(peasycap))
+ if (kd != easycap_isdongle(peasycap))
return -ERESTARTSYS;
if (!file) {
SAY("ERROR: file is NULL\n");
@@ -1226,7 +1218,7 @@ long easycap_unlocked_ioctl(struct file *file,
return -EINVAL;
}
- rc = newinput(peasycap, (int)index);
+ rc = easycap_newinput(peasycap, (int)index);
if (0 == rc) {
JOM(8, "newinput(.,%i) OK\n", (int)index);
} else {
@@ -2209,7 +2201,7 @@ long easycap_unlocked_ioctl(struct file *file,
if (!peasycap->polled) {
do {
- rcdq = easycap_dqbuf(peasycap, 0);
+ rcdq = easycap_video_dqbuf(peasycap, 0);
if (-EIO == rcdq) {
JOM(8, "returning -EIO because "
"dqbuf() returned -EIO\n");
@@ -2313,7 +2305,7 @@ long easycap_unlocked_ioctl(struct file *file,
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
return -EFAULT;
}
- submit_video_urbs(peasycap);
+ easycap_video_submit_urbs(peasycap);
peasycap->video_idle = 0;
peasycap->audio_idle = 0;
peasycap->video_eof = 0;
diff --git a/drivers/staging/media/easycap/easycap_low.c b/drivers/staging/media/easycap/easycap_low.c
index 0385735ac6df..0380babed22c 100644
--- a/drivers/staging/media/easycap/easycap_low.c
+++ b/drivers/staging/media/easycap/easycap_low.c
@@ -40,6 +40,7 @@
#include "easycap.h"
+
#define GET(X, Y, Z) do { \
int __rc; \
*(Z) = (u16)0; \
@@ -59,9 +60,9 @@
/*--------------------------------------------------------------------------*/
static const struct stk1160config {
- int reg;
- int set;
-} stk1160configPAL[256] = {
+ u16 reg;
+ u16 set;
+} stk1160configPAL[] = {
{0x000, 0x0098},
{0x002, 0x0093},
@@ -103,7 +104,7 @@ static const struct stk1160config {
{0xFFF, 0xFFFF}
};
/*--------------------------------------------------------------------------*/
-static const struct stk1160config stk1160configNTSC[256] = {
+static const struct stk1160config stk1160configNTSC[] = {
{0x000, 0x0098},
{0x002, 0x0093},
@@ -146,9 +147,9 @@ static const struct stk1160config stk1160configNTSC[256] = {
};
/*--------------------------------------------------------------------------*/
static const struct saa7113config {
- int reg;
- int set;
-} saa7113configPAL[256] = {
+ u8 reg;
+ u8 set;
+} saa7113configPAL[] = {
{0x01, 0x08},
{0x02, 0x80},
{0x03, 0x33},
@@ -202,7 +203,7 @@ static const struct saa7113config {
{0xFF, 0xFF}
};
/*--------------------------------------------------------------------------*/
-static const struct saa7113config saa7113configNTSC[256] = {
+static const struct saa7113config saa7113configNTSC[] = {
{0x01, 0x08},
{0x02, 0x80},
{0x03, 0x33},
@@ -355,101 +356,6 @@ static int wait_i2c(struct usb_device *p)
}
/****************************************************************************/
-int confirm_resolution(struct usb_device *p)
-{
- u8 get0, get1, get2, get3, get4, get5, get6, get7;
-
- if (!p)
- return -ENODEV;
- GET(p, 0x0110, &get0);
- GET(p, 0x0111, &get1);
- GET(p, 0x0112, &get2);
- GET(p, 0x0113, &get3);
- GET(p, 0x0114, &get4);
- GET(p, 0x0115, &get5);
- GET(p, 0x0116, &get6);
- GET(p, 0x0117, &get7);
- JOT(8, "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X\n",
- get0, get1, get2, get3, get4, get5, get6, get7);
- JOT(8, "....cf PAL_720x526: "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X\n",
- 0x000, 0x000, 0x001, 0x000, 0x5A0, 0x005, 0x121, 0x001);
- JOT(8, "....cf PAL_704x526: "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X\n",
- 0x004, 0x000, 0x001, 0x000, 0x584, 0x005, 0x121, 0x001);
- JOT(8, "....cf VGA_640x480: "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X, "
- "0x%03X, 0x%03X\n",
- 0x008, 0x000, 0x020, 0x000, 0x508, 0x005, 0x110, 0x001);
- return 0;
-}
-/****************************************************************************/
-int confirm_stream(struct usb_device *p)
-{
- u16 get2;
- u8 igot;
-
- if (!p)
- return -ENODEV;
- GET(p, 0x0100, &igot); get2 = 0x80 & igot;
- if (0x80 == get2)
- JOT(8, "confirm_stream: OK\n");
- else
- JOT(8, "confirm_stream: STUCK\n");
- return 0;
-}
-/****************************************************************************/
-int setup_stk(struct usb_device *p, bool ntsc)
-{
- int i;
- const struct stk1160config *cfg;
- if (!p)
- return -ENODEV;
- cfg = (ntsc) ? stk1160configNTSC : stk1160configPAL;
- for (i = 0; cfg[i].reg != 0xFFF; i++)
- SET(p, cfg[i].reg, cfg[i].set);
-
- write_300(p);
-
- return 0;
-}
-/****************************************************************************/
-int setup_saa(struct usb_device *p, bool ntsc)
-{
- int i, ir;
- const struct saa7113config *cfg;
- if (!p)
- return -ENODEV;
- cfg = (ntsc) ? saa7113configNTSC : saa7113configPAL;
- for (i = 0; cfg[i].reg != 0xFF; i++)
- ir = write_saa(p, cfg[i].reg, cfg[i].set);
- return 0;
-}
-/****************************************************************************/
-int write_000(struct usb_device *p, u16 set2, u16 set0)
-{
- u8 igot0, igot2;
-
- if (!p)
- return -ENODEV;
- GET(p, 0x0002, &igot2);
- GET(p, 0x0000, &igot0);
- SET(p, 0x0002, set2);
- SET(p, 0x0000, set0);
- return 0;
-}
-/****************************************************************************/
int write_saa(struct usb_device *p, u16 reg0, u16 set0)
{
if (!p)
@@ -470,8 +376,7 @@ int write_saa(struct usb_device *p, u16 reg0, u16 set0)
* REGISTER 504: TARGET ADDRESS ON VT1612A
*/
/*--------------------------------------------------------------------------*/
-int
-write_vt(struct usb_device *p, u16 reg0, u16 set0)
+static int write_vt(struct usb_device *p, u16 reg0, u16 set0)
{
u8 igot;
u16 got502, got503;
@@ -508,7 +413,7 @@ write_vt(struct usb_device *p, u16 reg0, u16 set0)
* REGISTER 504: TARGET ADDRESS ON VT1612A
*/
/*--------------------------------------------------------------------------*/
-int read_vt(struct usb_device *p, u16 reg0)
+static int read_vt(struct usb_device *p, u16 reg0)
{
u8 igot;
u16 got502, got503;
@@ -532,7 +437,7 @@ int read_vt(struct usb_device *p, u16 reg0)
* THESE APPEAR TO HAVE NO EFFECT ON EITHER VIDEO OR AUDIO.
*/
/*--------------------------------------------------------------------------*/
-int write_300(struct usb_device *p)
+static int write_300(struct usb_device *p)
{
if (!p)
return -ENODEV;
@@ -545,32 +450,36 @@ int write_300(struct usb_device *p)
return 0;
}
/****************************************************************************/
-/*--------------------------------------------------------------------------*/
-/*
- * NOTE: THE FOLLOWING IS NOT CHECKED:
- * REGISTER 0x0F, WHICH IS INVOLVED IN CHROMINANCE AUTOMATIC GAIN CONTROL.
- */
-/*--------------------------------------------------------------------------*/
-int check_saa(struct usb_device *p, bool ntsc)
+/****************************************************************************/
+int setup_stk(struct usb_device *p, bool ntsc)
{
- int i, ir, rc = 0;
- struct saa7113config const *cfg;
+ int i;
+ const struct stk1160config *cfg;
if (!p)
return -ENODEV;
+ cfg = (ntsc) ? stk1160configNTSC : stk1160configPAL;
+ for (i = 0; cfg[i].reg != 0xFFF; i++)
+ SET(p, cfg[i].reg, cfg[i].set);
+
+ write_300(p);
- cfg = (ntsc) ? saa7113configNTSC : saa7113configPAL;
+ return 0;
+}
+/****************************************************************************/
+int setup_saa(struct usb_device *p, bool ntsc)
+{
+ int i, rc;
+ const struct saa7113config *cfg;
+ if (!p)
+ return -ENODEV;
+ cfg = (ntsc) ? saa7113configNTSC : saa7113configPAL;
for (i = 0; cfg[i].reg != 0xFF; i++) {
- if (0x0F == cfg[i].reg)
- continue;
- ir = read_saa(p, cfg[i].reg);
- if (ir != cfg[i].set) {
- SAY("SAA register 0x%02X has 0x%02X, expected 0x%02X\n",
- cfg[i].reg, ir, cfg[i].set);
- rc--;
- }
+ rc = write_saa(p, cfg[i].reg, cfg[i].set);
+ if (rc)
+ dev_err(&p->dev,
+ "Failed to set SAA register %d", cfg[i].reg);
}
-
- return (rc < -8) ? rc : 0;
+ return 0;
}
/****************************************************************************/
int merit_saa(struct usb_device *p)
@@ -609,60 +518,22 @@ int ready_saa(struct usb_device *p)
msleep(marktime);
j++;
}
+
if (max == j)
return -1;
- else {
- if (0x20 & rc) {
- rate = 2;
- JOT(8, "hardware detects 60 Hz\n");
- } else {
- rate = 0;
- JOT(8, "hardware detects 50 Hz\n");
- }
- if (0x80 & rc)
- JOT(8, "hardware detects interlacing\n");
- else {
- rate++;
- JOT(8, "hardware detects no interlacing\n");
- }
- }
- return 0;
-}
-/****************************************************************************/
-/*--------------------------------------------------------------------------*/
-/*
- * NOTE: THE FOLLOWING ARE NOT CHECKED:
- * REGISTERS 0x000, 0x002: FUNCTIONALITY IS NOT KNOWN
- * REGISTER 0x100: ACCEPT ALSO (0x80 | stk1160config....[.].set)
- */
-/*--------------------------------------------------------------------------*/
-int check_stk(struct usb_device *p, bool ntsc)
-{
- int i, ir;
- const struct stk1160config *cfg;
-
- if (!p)
- return -ENODEV;
- cfg = (ntsc) ? stk1160configNTSC : stk1160configPAL;
-
- for (i = 0; 0xFFF != cfg[i].reg; i++) {
- if (0x000 == cfg[i].reg || 0x002 == cfg[i].reg)
- continue;
-
- ir = read_stk(p, cfg[i].reg);
- if (0x100 == cfg[i].reg) {
- if ((ir != (0xFF & cfg[i].set)) &&
- (ir != (0x80 | (0xFF & cfg[i].set))) &&
- (0xFFFF != cfg[i].set)) {
- SAY("STK reg[0x%03X]=0x%02X expected 0x%02X\n",
- cfg[i].reg, ir, cfg[i].set);
- }
- continue;
- }
- if ((ir != (0xFF & cfg[i].set)) && (0xFFFF != cfg[i].set))
- SAY("STK register 0x%03X has 0x%02X,expected 0x%02X\n",
- cfg[i].reg, ir, cfg[i].set);
+ if (0x20 & rc) {
+ rate = 2;
+ JOT(8, "hardware detects 60 Hz\n");
+ } else {
+ rate = 0;
+ JOT(8, "hardware detects 50 Hz\n");
+ }
+ if (0x80 & rc)
+ JOT(8, "hardware detects interlacing\n");
+ else {
+ rate++;
+ JOT(8, "hardware detects no interlacing\n");
}
return 0;
}
@@ -682,7 +553,7 @@ int read_saa(struct usb_device *p, u16 reg0)
return igot;
}
/****************************************************************************/
-int read_stk(struct usb_device *p, u32 reg0)
+static int read_stk(struct usb_device *p, u32 reg0)
{
u8 igot;
@@ -692,27 +563,7 @@ int read_stk(struct usb_device *p, u32 reg0)
GET(p, reg0, &igot);
return igot;
}
-/****************************************************************************/
-/*--------------------------------------------------------------------------*/
-/*
- * HARDWARE USERSPACE INPUT NUMBER PHYSICAL INPUT DRIVER input VALUE
- *
- * CVBS+S-VIDEO 0 or 1 CVBS 1
- * FOUR-CVBS 0 or 1 CVBS1 1
- * FOUR-CVBS 2 CVBS2 2
- * FOUR-CVBS 3 CVBS3 3
- * FOUR-CVBS 4 CVBS4 4
- * CVBS+S-VIDEO 5 S-VIDEO 5
- *
- * WHEN 5==input THE ARGUMENT mode MUST ALSO BE SUPPLIED:
- *
- * mode 7 => GAIN TO BE SET EXPLICITLY USING REGISTER 0x05 (UNTESTED)
- * mode 9 => USE AUTOMATIC GAIN CONTROL (DEFAULT)
- *
-*/
-/*---------------------------------------------------------------------------*/
-int
-select_input(struct usb_device *p, int input, int mode)
+int select_input(struct usb_device *p, int input, int mode)
{
int ir;
@@ -877,10 +728,11 @@ int stop_100(struct usb_device *p)
/****************************************************************************/
/****************************************************************************/
/*****************************************************************************/
-int wakeup_device(struct usb_device *pusb_device)
+int easycap_wakeup_device(struct usb_device *pusb_device)
{
if (!pusb_device)
return -ENODEV;
+
return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0),
USB_REQ_SET_FEATURE,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
@@ -888,8 +740,7 @@ int wakeup_device(struct usb_device *pusb_device)
0, NULL, 0, 50000);
}
/*****************************************************************************/
-int
-audio_setup(struct easycap *peasycap)
+int easycap_audio_setup(struct easycap *peasycap)
{
struct usb_device *pusb_device;
u8 buffer[1];
@@ -970,7 +821,7 @@ audio_setup(struct easycap *peasycap)
* SELECT AUDIO SOURCE "LINE IN" AND SET THE AUDIO GAIN.
*/
/*---------------------------------------------------------------------------*/
- if (0 != audio_gainset(pusb_device, peasycap->gain))
+ if (easycap_audio_gainset(pusb_device, peasycap->gain))
SAY("ERROR: audio_gainset() failed\n");
check_vt(pusb_device);
return 0;
@@ -1047,7 +898,7 @@ int check_vt(struct usb_device *pusb_device)
* 31 12.0 22.5 34.5
*/
/*---------------------------------------------------------------------------*/
-int audio_gainset(struct usb_device *pusb_device, s8 loud)
+int easycap_audio_gainset(struct usb_device *pusb_device, s8 loud)
{
int igot;
u8 tmp;
@@ -1115,15 +966,3 @@ int audio_gainset(struct usb_device *pusb_device, s8 loud)
return 0;
}
/*****************************************************************************/
-int audio_gainget(struct usb_device *pusb_device)
-{
- int igot;
-
- if (!pusb_device)
- return -ENODEV;
- igot = read_vt(pusb_device, 0x001C);
- if (0 > igot)
- SAY("ERROR: failed to read VT1612A register 0x1C\n");
- return igot;
-}
-/*****************************************************************************/
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index a45c0b507067..8ff5f38ea196 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -66,6 +66,10 @@ struct easycap_dongle easycapdc60_dongle[DONGLE_MANY];
static struct mutex mutex_dongle;
static void easycap_complete(struct urb *purb);
static int reset(struct easycap *peasycap);
+static int field2frame(struct easycap *peasycap);
+static int redaub(struct easycap *peasycap,
+ void *pad, void *pex, int much, int more,
+ u8 mask, u8 margin, bool isuy);
const char *strerror(int err)
{
@@ -109,23 +113,13 @@ const char *strerror(int err)
#undef ERRNOSTR
}
-/*---------------------------------------------------------------------------*/
-/*
- * PARAMETERS USED WHEN REGISTERING THE VIDEO INTERFACE
- *
- * NOTE: SOME KERNELS IGNORE usb_class_driver.minor_base, AS MENTIONED BY
- * CORBET ET AL. "LINUX DEVICE DRIVERS", 3rd EDITION, PAGE 253.
- * THIS IS THE CASE FOR OpenSUSE.
- */
-/*---------------------------------------------------------------------------*/
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
/****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* THIS ROUTINE DOES NOT DETECT DUPLICATE OCCURRENCES OF POINTER peasycap
*/
/*---------------------------------------------------------------------------*/
-int isdongle(struct easycap *peasycap)
+int easycap_isdongle(struct easycap *peasycap)
{
int k;
if (!peasycap)
@@ -161,14 +155,13 @@ static int easycap_open(struct inode *inode, struct file *file)
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
- } else {
- JOM(16, "peasycap->pusb_device=%p\n", peasycap->pusb_device);
}
+
+ JOM(16, "peasycap->pusb_device=%p\n", peasycap->pusb_device);
+
file->private_data = peasycap;
- rc = wakeup_device(peasycap->pusb_device);
- if (0 == rc)
- JOM(8, "wakeup_device() OK\n");
- else {
+ rc = easycap_wakeup_device(peasycap->pusb_device);
+ if (rc) {
SAM("ERROR: wakeup_device() rc = %i\n", rc);
if (-ENODEV == rc)
SAM("ERROR: wakeup_device() returned -ENODEV\n");
@@ -176,6 +169,7 @@ static int easycap_open(struct inode *inode, struct file *file)
SAM("ERROR: wakeup_device() rc = %i\n", rc);
return rc;
}
+ JOM(8, "wakeup_device() OK\n");
peasycap->input = 0;
rc = reset(peasycap);
if (rc) {
@@ -303,7 +297,7 @@ static int reset(struct easycap *peasycap)
peasycap->saturation = -8192;
peasycap->hue = -8192;
- rc = newinput(peasycap, input);
+ rc = easycap_newinput(peasycap, input);
if (rc) {
SAM("ERROR: newinput(.,%i) rc = %i\n", rc, input);
@@ -364,8 +358,7 @@ static int reset(struct easycap *peasycap)
* SO IT SHOULD WRITE ONLY SPARINGLY TO THE LOGFILE.
*/
/*---------------------------------------------------------------------------*/
-int
-newinput(struct easycap *peasycap, int input)
+int easycap_newinput(struct easycap *peasycap, int input)
{
int rc, k, m, mood, off;
int inputnow, video_idlenow, audio_idlenow;
@@ -397,7 +390,7 @@ newinput(struct easycap *peasycap, int input)
peasycap->audio_idle = 1;
if (peasycap->video_isoc_streaming) {
resubmit = true;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
} else {
resubmit = false;
}
@@ -532,7 +525,7 @@ newinput(struct easycap *peasycap, int input)
return -EFAULT;
}
if (resubmit)
- submit_video_urbs(peasycap);
+ easycap_video_submit_urbs(peasycap);
peasycap->video_isoc_sequence = VIDEO_ISOC_BUFFER_MANY - 1;
peasycap->video_idle = video_idlenow;
@@ -542,7 +535,7 @@ newinput(struct easycap *peasycap, int input)
return 0;
}
/*****************************************************************************/
-int submit_video_urbs(struct easycap *peasycap)
+int easycap_video_submit_urbs(struct easycap *peasycap)
{
struct data_urb *pdata_urb;
struct urb *purb;
@@ -616,43 +609,53 @@ int submit_video_urbs(struct easycap *peasycap)
peasycap->video_eof = 1;
}
- if (isbad) {
- JOM(4, "attempting cleanup instead of submitting\n");
- list_for_each(plist_head, (peasycap->purb_video_head)) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- purb = pdata_urb->purb;
- if (purb)
- usb_kill_urb(purb);
- }
- }
- peasycap->video_isoc_streaming = 0;
- } else {
+ if (isbad)
+ easycap_video_kill_urbs(peasycap);
+ else
peasycap->video_isoc_streaming = 1;
- JOM(4, "submitted %i video urbs\n", m);
- }
} else {
JOM(4, "already streaming video urbs\n");
}
return 0;
}
/*****************************************************************************/
-int kill_video_urbs(struct easycap *peasycap)
+int easycap_audio_kill_urbs(struct easycap *peasycap)
{
int m;
struct list_head *plist_head;
struct data_urb *pdata_urb;
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
+ if (!peasycap->audio_isoc_streaming)
+ return 0;
+
+ if (!peasycap->purb_audio_head) {
+ SAM("ERROR: peasycap->purb_audio_head is NULL\n");
return -EFAULT;
}
- if (!peasycap->video_isoc_streaming) {
- JOM(8, "%i=video_isoc_streaming, no video urbs killed\n",
- peasycap->video_isoc_streaming);
- return 0;
+
+ peasycap->audio_isoc_streaming = 0;
+ m = 0;
+ list_for_each(plist_head, peasycap->purb_audio_head) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if (pdata_urb && pdata_urb->purb) {
+ usb_kill_urb(pdata_urb->purb);
+ m++;
+ }
}
+
+ JOM(4, "%i audio urbs killed\n", m);
+
+ return 0;
+}
+int easycap_video_kill_urbs(struct easycap *peasycap)
+{
+ int m;
+ struct list_head *plist_head;
+ struct data_urb *pdata_urb;
+
+ if (!peasycap->video_isoc_streaming)
+ return 0;
+
if (!peasycap->purb_video_head) {
SAM("ERROR: peasycap->purb_video_head is NULL\n");
return -EFAULT;
@@ -690,8 +693,8 @@ static int videodev_release(struct video_device *pvideo_device)
SAY("ending unsuccessfully\n");
return -EFAULT;
}
- if (0 != kill_video_urbs(peasycap)) {
- SAM("ERROR: kill_video_urbs() failed\n");
+ if (easycap_video_kill_urbs(peasycap)) {
+ SAM("ERROR: easycap_video_kill_urbs() failed\n");
return -EFAULT;
}
JOM(4, "ending successfully\n");
@@ -727,27 +730,22 @@ static void easycap_delete(struct kref *pkref)
SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
return;
}
- kd = isdongle(peasycap);
+ kd = easycap_isdongle(peasycap);
/*---------------------------------------------------------------------------*/
/*
* FREE VIDEO.
*/
/*---------------------------------------------------------------------------*/
if (peasycap->purb_video_head) {
- JOM(4, "freeing video urbs\n");
m = 0;
- list_for_each(plist_head, (peasycap->purb_video_head)) {
+ list_for_each(plist_head, peasycap->purb_video_head) {
pdata_urb = list_entry(plist_head,
struct data_urb, list_head);
- if (!pdata_urb) {
- JOM(4, "ERROR: pdata_urb is NULL\n");
- } else {
- if (pdata_urb->purb) {
- usb_free_urb(pdata_urb->purb);
- pdata_urb->purb = NULL;
- peasycap->allocation_video_urb -= 1;
- m++;
- }
+ if (pdata_urb && pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = NULL;
+ peasycap->allocation_video_urb--;
+ m++;
}
}
@@ -763,7 +761,6 @@ static void easycap_delete(struct kref *pkref)
peasycap->allocation_video_struct -=
sizeof(struct data_urb);
kfree(pdata_urb);
- pdata_urb = NULL;
m++;
}
}
@@ -828,15 +825,11 @@ static void easycap_delete(struct kref *pkref)
list_for_each(plist_head, (peasycap->purb_audio_head)) {
pdata_urb = list_entry(plist_head,
struct data_urb, list_head);
- if (!pdata_urb)
- JOM(4, "ERROR: pdata_urb is NULL\n");
- else {
- if (pdata_urb->purb) {
- usb_free_urb(pdata_urb->purb);
- pdata_urb->purb = NULL;
- peasycap->allocation_audio_urb -= 1;
- m++;
- }
+ if (pdata_urb && pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = NULL;
+ peasycap->allocation_audio_urb--;
+ m++;
}
}
JOM(4, "%i audio urbs freed\n", m);
@@ -851,7 +844,6 @@ static void easycap_delete(struct kref *pkref)
peasycap->allocation_audio_struct -=
sizeof(struct data_urb);
kfree(pdata_urb);
- pdata_urb = NULL;
m++;
}
}
@@ -940,7 +932,7 @@ static unsigned int easycap_poll(struct file *file, poll_table *wait)
return -EFAULT;
}
/*---------------------------------------------------------------------------*/
- kd = isdongle(peasycap);
+ kd = easycap_isdongle(peasycap);
if (0 <= kd && DONGLE_MANY > kd) {
if (mutex_lock_interruptible(&easycapdc60_dongle[kd].mutex_video)) {
SAY("ERROR: cannot down dongle[%i].mutex_video\n", kd);
@@ -952,7 +944,7 @@ static unsigned int easycap_poll(struct file *file, poll_table *wait)
* peasycap, IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
* IF NECESSARY, BAIL OUT.
*/
- if (kd != isdongle(peasycap)) {
+ if (kd != easycap_isdongle(peasycap)) {
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
return -ERESTARTSYS;
}
@@ -980,21 +972,21 @@ static unsigned int easycap_poll(struct file *file, poll_table *wait)
*/
return -ERESTARTSYS;
/*---------------------------------------------------------------------------*/
- rc = easycap_dqbuf(peasycap, 0);
+ rc = easycap_video_dqbuf(peasycap, 0);
peasycap->polled = 1;
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- if (0 == rc)
- return POLLIN | POLLRDNORM;
- else
+ if (rc)
return POLLERR;
- }
+
+ return POLLIN | POLLRDNORM;
+}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
* IF mode IS NONZERO THIS ROUTINE RETURNS -EAGAIN RATHER THAN BLOCKING.
*/
/*---------------------------------------------------------------------------*/
-int easycap_dqbuf(struct easycap *peasycap, int mode)
+int easycap_video_dqbuf(struct easycap *peasycap, int mode)
{
int input, ifield, miss, rc;
@@ -1080,7 +1072,7 @@ int easycap_dqbuf(struct easycap *peasycap, int mode)
JOM(8, " ... failed returning -EIO\n");
peasycap->video_eof = 1;
peasycap->audio_eof = 1;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
return -EIO;
}
peasycap->status = 0;
@@ -1090,7 +1082,7 @@ int easycap_dqbuf(struct easycap *peasycap, int mode)
#endif /*PERSEVERE*/
peasycap->video_eof = 1;
peasycap->audio_eof = 1;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
JOM(8, "returning -EIO\n");
return -EIO;
}
@@ -1143,7 +1135,7 @@ int easycap_dqbuf(struct easycap *peasycap, int mode)
JOM(8, " ... failed returning -EIO\n");
peasycap->video_eof = 1;
peasycap->audio_eof = 1;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
return -EIO;
}
peasycap->status = 0;
@@ -1153,7 +1145,7 @@ int easycap_dqbuf(struct easycap *peasycap, int mode)
#endif /*PERSEVERE*/
peasycap->video_eof = 1;
peasycap->audio_eof = 1;
- kill_video_urbs(peasycap);
+ easycap_video_kill_urbs(peasycap);
JOM(8, "returning -EIO\n");
return -EIO;
}
@@ -1207,12 +1199,9 @@ int easycap_dqbuf(struct easycap *peasycap, int mode)
* WHEN BOOLEAN PARAMETER decimatepixel IS true, ONLY THE FIELD FOR WHICH
* odd==false IS TRANSFERRED TO THE FRAME BUFFER.
*
- * THE BOOLEAN PARAMETER offerfields IS true ONLY WHEN THE USER PROGRAM
- * CHOOSES THE OPTION V4L2_FIELD_INTERLACED.
*/
/*---------------------------------------------------------------------------*/
-int
-field2frame(struct easycap *peasycap)
+static int field2frame(struct easycap *peasycap)
{
void *pex, *pad;
@@ -1221,7 +1210,7 @@ field2frame(struct easycap *peasycap)
int rc, bytesperpixel, multiplier;
int much, more, over, rump, caches, input;
u8 mask, margin;
- bool odd, isuy, decimatepixel, offerfields, badinput;
+ bool odd, isuy, decimatepixel, badinput;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
@@ -1237,8 +1226,6 @@ field2frame(struct easycap *peasycap)
peasycap->field_buffer[peasycap->field_read][0].input,
peasycap->field_read, peasycap->frame_fill);
JOM(8, "===== %i=bytesperpixel\n", peasycap->bytesperpixel);
- if (peasycap->offerfields)
- JOM(8, "===== offerfields\n");
/*---------------------------------------------------------------------------*/
/*
@@ -1260,7 +1247,6 @@ field2frame(struct easycap *peasycap)
#endif /*EASYCAP_TESTCARD*/
/*---------------------------------------------------------------------------*/
- offerfields = peasycap->offerfields;
bytesperpixel = peasycap->bytesperpixel;
decimatepixel = peasycap->decimatepixel;
@@ -1601,9 +1587,9 @@ field2frame(struct easycap *peasycap)
* REDUCE CODE LENGTH WILL GENERALLY IMPAIR RUNTIME PERFORMANCE. BEWARE.
*/
/*---------------------------------------------------------------------------*/
-int
-redaub(struct easycap *peasycap, void *pad, void *pex, int much, int more,
- u8 mask, u8 margin, bool isuy)
+static int redaub(struct easycap *peasycap,
+ void *pad, void *pex, int much, int more,
+ u8 mask, u8 margin, bool isuy)
{
static s32 ay[256], bu[256], rv[256], gu[256], gv[256];
u8 *pcache;
@@ -2855,20 +2841,7 @@ static void easycap_complete(struct urb *purb)
}
return;
}
-static const struct file_operations easycap_fops = {
- .owner = THIS_MODULE,
- .open = easycap_open,
- .unlocked_ioctl = easycap_unlocked_ioctl,
- .poll = easycap_poll,
- .mmap = easycap_mmap,
- .llseek = no_llseek,
-};
-static const struct usb_class_driver easycap_class = {
- .name = "usb/easycap%d",
- .fops = &easycap_fops,
- .minor_base = USB_SKEL_MINOR_BASE,
-};
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+
static const struct v4l2_file_operations v4l2_fops = {
.owner = THIS_MODULE,
.open = easycap_open_noinode,
@@ -2917,6 +2890,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAY("ERROR: usb_host_interface not found\n");
return -EFAULT;
}
+
interface = &alt->desc;
if (!interface) {
SAY("ERROR: intf_descriptor is NULL\n");
@@ -2976,44 +2950,31 @@ static int easycap_usb_probe(struct usb_interface *intf,
if (mutex_lock_interruptible(&mutex_dongle)) {
SAY("ERROR: cannot down mutex_dongle\n");
return -ERESTARTSYS;
- } else {
-/*---------------------------------------------------------------------------*/
- /*
- * FOR INTERFACES 1 AND 2 THE POINTER peasycap WILL NEED TO
- * TO BE THE SAME AS THAT ALLOCATED NOW FOR INTERFACE 0.
- *
- * NORMALLY ndong WILL NOT HAVE CHANGED SINCE INTERFACE 0 WAS
- * PROBED, BUT THIS MAY NOT BE THE CASE IF, FOR EXAMPLE, TWO
- * EASYCAPs ARE PLUGGED IN SIMULTANEOUSLY.
- */
-/*---------------------------------------------------------------------------*/
- for (ndong = 0; ndong < DONGLE_MANY; ndong++) {
- if ((!easycapdc60_dongle[ndong].peasycap) &&
- (!mutex_is_locked(&easycapdc60_dongle
- [ndong].mutex_video)) &&
- (!mutex_is_locked(&easycapdc60_dongle
- [ndong].mutex_audio))) {
- easycapdc60_dongle[ndong].peasycap = peasycap;
- peasycap->isdongle = ndong;
- JOM(8, "intf[%i]: peasycap-->easycap"
- "_dongle[%i].peasycap\n",
- bInterfaceNumber, ndong);
- break;
- }
- }
- if (DONGLE_MANY <= ndong) {
- SAM("ERROR: too many dongles\n");
- mutex_unlock(&mutex_dongle);
- return -ENOMEM;
+ }
+
+ for (ndong = 0; ndong < DONGLE_MANY; ndong++) {
+ if ((!easycapdc60_dongle[ndong].peasycap) &&
+ (!mutex_is_locked(&easycapdc60_dongle
+ [ndong].mutex_video)) &&
+ (!mutex_is_locked(&easycapdc60_dongle
+ [ndong].mutex_audio))) {
+ easycapdc60_dongle[ndong].peasycap = peasycap;
+ peasycap->isdongle = ndong;
+ JOM(8, "intf[%i]: peasycap-->easycap"
+ "_dongle[%i].peasycap\n",
+ bInterfaceNumber, ndong);
+ break;
}
+ }
+
+ if (DONGLE_MANY <= ndong) {
+ SAM("ERROR: too many dongles\n");
mutex_unlock(&mutex_dongle);
+ return -ENOMEM;
}
+ mutex_unlock(&mutex_dongle);
+
peasycap->allocation_video_struct = sizeof(struct easycap);
- peasycap->allocation_video_page = 0;
- peasycap->allocation_video_urb = 0;
- peasycap->allocation_audio_struct = 0;
- peasycap->allocation_audio_page = 0;
- peasycap->allocation_audio_urb = 0;
/*---------------------------------------------------------------------------*/
/*
@@ -3023,7 +2984,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
peasycap->pusb_device = usbdev;
peasycap->pusb_interface = intf;
- peasycap->ilk = 0;
peasycap->microphone = false;
peasycap->video_interface = -1;
@@ -3042,38 +3002,21 @@ static int easycap_usb_probe(struct usb_interface *intf,
peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
- for (k = 0; k < INPUT_MANY; k++)
- peasycap->lost[k] = 0;
- peasycap->skip = 0;
- peasycap->skipped = 0;
- peasycap->offerfields = 0;
/*---------------------------------------------------------------------------*/
/*
* DYNAMICALLY FILL IN THE AVAILABLE FORMATS ...
*/
/*---------------------------------------------------------------------------*/
- rc = fillin_formats();
+ rc = easycap_video_fillin_formats();
if (0 > rc) {
SAM("ERROR: fillin_formats() rc = %i\n", rc);
return -EFAULT;
}
JOM(4, "%i formats available\n", rc);
-/*---------------------------------------------------------------------------*/
-/*
- * ... AND POPULATE easycap.inputset[]
-*/
-/*---------------------------------------------------------------------------*/
- /* FIXME: maybe we just use memset 0 */
+
+ /* ... AND POPULATE easycap.inputset[] */
+
inputset = peasycap->inputset;
- for (k = 0; k < INPUT_MANY; k++) {
- inputset[k].input_ok = 0;
- inputset[k].standard_offset_ok = 0;
- inputset[k].format_offset_ok = 0;
- inputset[k].brightness_ok = 0;
- inputset[k].contrast_ok = 0;
- inputset[k].saturation_ok = 0;
- inputset[k].hue_ok = 0;
- }
fmtidx = peasycap->ntsc ? NTSC_M : PAL_BGHIN;
m = 0;
@@ -3390,11 +3333,10 @@ static int easycap_usb_probe(struct usb_interface *intf,
if (!isokalt) {
SAM("ERROR: no viable video_altsetting_on\n");
return -ENOENT;
- } else {
- peasycap->video_altsetting_on = okalt[isokalt - 1];
- JOM(4, "%i=video_altsetting_on <====\n",
- peasycap->video_altsetting_on);
}
+ peasycap->video_altsetting_on = okalt[isokalt - 1];
+ JOM(4, "%i=video_altsetting_on <====\n",
+ peasycap->video_altsetting_on);
/*---------------------------------------------------------------------------*/
/*
* DECIDE THE VIDEO STREAMING PARAMETERS
@@ -3480,8 +3422,9 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ERROR: Could not allocate frame "
"buffer %i page %i\n", k, m);
return -ENOMEM;
- } else
- peasycap->allocation_video_page += 1;
+ }
+
+ peasycap->allocation_video_page += 1;
peasycap->frame_buffer[k][m].pgo = pbuf;
}
peasycap->frame_buffer[k][m].pto =
@@ -3510,11 +3453,11 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ERROR: Could not allocate field"
" buffer %i page %i\n", k, m);
return -ENOMEM;
- }
- else
- peasycap->allocation_video_page += 1;
- peasycap->field_buffer[k][m].pgo = pbuf;
}
+
+ peasycap->allocation_video_page += 1;
+ peasycap->field_buffer[k][m].pgo = pbuf;
+ }
peasycap->field_buffer[k][m].pto =
peasycap->field_buffer[k][m].pgo;
}
@@ -3538,9 +3481,9 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ERROR: Could not allocate isoc video buffer "
"%i\n", k);
return -ENOMEM;
- } else
- peasycap->allocation_video_page +=
- BIT(VIDEO_ISOC_ORDER);
+ }
+ peasycap->allocation_video_page +=
+ BIT(VIDEO_ISOC_ORDER);
peasycap->video_isoc_buffer[k].pgo = pbuf;
peasycap->video_isoc_buffer[k].pto =
@@ -3569,15 +3512,17 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ERROR: usb_alloc_urb returned NULL for buffer "
"%i\n", k);
return -ENOMEM;
- } else
- peasycap->allocation_video_urb += 1;
+ }
+
+ peasycap->allocation_video_urb += 1;
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
if (!pdata_urb) {
SAM("ERROR: Could not allocate struct data_urb.\n");
return -ENOMEM;
- } else
- peasycap->allocation_video_struct +=
+ }
+
+ peasycap->allocation_video_struct +=
sizeof(struct data_urb);
pdata_urb->purb = purb;
@@ -3694,13 +3639,12 @@ static int easycap_usb_probe(struct usb_interface *intf,
err("Not able to register with videodev");
videodev_release(&(peasycap->video_device));
return -ENODEV;
- } else {
- (peasycap->registered_video)++;
- SAM("registered with videodev: %i=minor\n",
- peasycap->video_device.minor);
- peasycap->minor = peasycap->video_device.minor;
}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+ peasycap->registered_video++;
+ SAM("registered with videodev: %i=minor\n",
+ peasycap->video_device.minor);
+ peasycap->minor = peasycap->video_device.minor;
break;
}
@@ -3734,11 +3678,10 @@ static int easycap_usb_probe(struct usb_interface *intf,
if (!isokalt) {
SAM("ERROR: no viable audio_altsetting_on\n");
return -ENOENT;
- } else {
- peasycap->audio_altsetting_on = okalt[isokalt - 1];
- JOM(4, "%i=audio_altsetting_on <====\n",
- peasycap->audio_altsetting_on);
}
+ peasycap->audio_altsetting_on = okalt[isokalt - 1];
+ JOM(4, "%i=audio_altsetting_on <====\n",
+ peasycap->audio_altsetting_on);
peasycap->audio_endpointnumber = okepn[isokalt - 1];
JOM(4, "%i=audio_endpointnumber\n", peasycap->audio_endpointnumber);
@@ -3847,8 +3790,8 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ERROR: Could not allocate isoc audio buffer "
"%i\n", k);
return -ENOMEM;
- } else
- peasycap->allocation_audio_page +=
+ }
+ peasycap->allocation_audio_page +=
BIT(AUDIO_ISOC_ORDER);
peasycap->audio_isoc_buffer[k].pgo = pbuf;
@@ -3996,12 +3939,9 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
{
struct usb_host_interface *pusb_host_interface;
struct usb_interface_descriptor *pusb_interface_descriptor;
- u8 bInterfaceNumber;
struct easycap *peasycap;
-
- struct list_head *plist_head;
- struct data_urb *pdata_urb;
- int minor, m, kd;
+ int minor, kd;
+ u8 bInterfaceNumber;
JOT(4, "\n");
@@ -4036,45 +3976,14 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
peasycap->audio_eof = 1;
wake_up_interruptible(&(peasycap->wq_video));
wake_up_interruptible(&(peasycap->wq_audio));
-/*---------------------------------------------------------------------------*/
+
switch (bInterfaceNumber) {
- case 0: {
- if (peasycap->purb_video_head) {
- JOM(4, "killing video urbs\n");
- m = 0;
- list_for_each(plist_head, peasycap->purb_video_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- if (pdata_urb->purb) {
- usb_kill_urb(pdata_urb->purb);
- m++;
- }
- }
- }
- JOM(4, "%i video urbs killed\n", m);
- }
+ case 0:
+ easycap_video_kill_urbs(peasycap);
break;
- }
-/*---------------------------------------------------------------------------*/
- case 2: {
- if (peasycap->purb_audio_head) {
- JOM(4, "killing audio urbs\n");
- m = 0;
- list_for_each(plist_head, peasycap->purb_audio_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- if (pdata_urb->purb) {
- usb_kill_urb(pdata_urb->purb);
- m++;
- }
- }
- }
- JOM(4, "%i audio urbs killed\n", m);
- }
+ case 2:
+ easycap_audio_kill_urbs(peasycap);
break;
- }
default:
break;
}
@@ -4087,7 +3996,7 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
* AN EasyCAP IS UNPLUGGED WHILE THE URBS ARE RUNNING. BEWARE.
*/
/*--------------------------------------------------------------------------*/
- kd = isdongle(peasycap);
+ kd = easycap_isdongle(peasycap);
switch (bInterfaceNumber) {
case 0: {
if (0 <= kd && DONGLE_MANY > kd) {
@@ -4212,7 +4121,7 @@ static struct usb_device_id easycap_usb_device_id_table[] = {
};
MODULE_DEVICE_TABLE(usb, easycap_usb_device_id_table);
-struct usb_driver easycap_usb_driver = {
+static struct usb_driver easycap_usb_driver = {
.name = "easycap",
.id_table = easycap_usb_device_id_table,
.probe = easycap_usb_probe,
diff --git a/drivers/staging/media/easycap/easycap_settings.c b/drivers/staging/media/easycap/easycap_settings.c
index 70f59b13c34d..3f5f5b3e5a35 100644
--- a/drivers/staging/media/easycap/easycap_settings.c
+++ b/drivers/staging/media/easycap/easycap_settings.c
@@ -313,7 +313,7 @@ const struct easycap_standard easycap_standard[] = {
struct easycap_format easycap_format[1 + SETTINGS_MANY];
-int fillin_formats(void)
+int easycap_video_fillin_formats(void)
{
const char *name1, *name2, *name3, *name4;
struct v4l2_format *fmt;
diff --git a/drivers/staging/media/easycap/easycap_sound.c b/drivers/staging/media/easycap/easycap_sound.c
index b22bb39b5f69..8c8bcae8ded8 100644
--- a/drivers/staging/media/easycap/easycap_sound.c
+++ b/drivers/staging/media/easycap/easycap_sound.c
@@ -56,6 +56,141 @@ static const struct snd_pcm_hardware alsa_hardware = {
};
+/*---------------------------------------------------------------------------*/
+/*
+ * SUBMIT ALL AUDIO URBS.
+ */
+/*---------------------------------------------------------------------------*/
+static int easycap_audio_submit_urbs(struct easycap *peasycap)
+{
+ struct data_urb *pdata_urb;
+ struct urb *purb;
+ struct list_head *plist_head;
+ int j, isbad, nospc, m, rc;
+ int isbuf;
+
+ if (!peasycap->purb_audio_head) {
+ SAM("ERROR: peasycap->urb_audio_head uninitialized\n");
+ return -EFAULT;
+ }
+ if (!peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ return -EFAULT;
+ }
+
+ if (peasycap->audio_isoc_streaming) {
+ JOM(4, "already streaming audio urbs\n");
+ return 0;
+ }
+
+ JOM(4, "initial submission of all audio urbs\n");
+ rc = usb_set_interface(peasycap->pusb_device,
+ peasycap->audio_interface,
+ peasycap->audio_altsetting_on);
+ JOM(8, "usb_set_interface(.,%i,%i) returned %i\n",
+ peasycap->audio_interface,
+ peasycap->audio_altsetting_on, rc);
+
+ isbad = 0;
+ nospc = 0;
+ m = 0;
+ list_for_each(plist_head, peasycap->purb_audio_head) {
+ pdata_urb = list_entry(plist_head, struct data_urb, list_head);
+ if (pdata_urb && pdata_urb->purb) {
+ purb = pdata_urb->purb;
+ isbuf = pdata_urb->isbuf;
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
+ peasycap->audio_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->audio_isoc_buffer[isbuf].pgo;
+ purb->transfer_buffer_length = peasycap->audio_isoc_buffer_size;
+ purb->complete = easycap_alsa_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
+ for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset = j * peasycap->audio_isoc_maxframesize;
+ purb->iso_frame_desc[j].length = peasycap->audio_isoc_maxframesize;
+ }
+
+ rc = usb_submit_urb(purb, GFP_KERNEL);
+ if (rc) {
+ isbad++;
+ SAM("ERROR: usb_submit_urb() failed"
+ " for urb with rc: -%s: %d\n",
+ strerror(rc), rc);
+ } else {
+ m++;
+ }
+ } else {
+ isbad++;
+ }
+ }
+ if (nospc) {
+ SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
+ SAM("..... possibly inadequate USB bandwidth\n");
+ peasycap->audio_eof = 1;
+ }
+
+ if (isbad)
+ easycap_audio_kill_urbs(peasycap);
+ else
+ peasycap->audio_isoc_streaming = m;
+
+ return 0;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ * COMMON AUDIO INITIALIZATION
+ */
+/*---------------------------------------------------------------------------*/
+static int easycap_sound_setup(struct easycap *peasycap)
+{
+ int rc;
+
+ JOM(4, "starting initialization\n");
+
+ if (!peasycap) {
+ SAY("ERROR: peasycap is NULL.\n");
+ return -EFAULT;
+ }
+ if (!peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device is NULL\n");
+ return -ENODEV;
+ }
+ JOM(16, "0x%08lX=peasycap->pusb_device\n", (long int)peasycap->pusb_device);
+
+ rc = easycap_audio_setup(peasycap);
+ JOM(8, "audio_setup() returned %i\n", rc);
+
+ if (!peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device has become NULL\n");
+ return -ENODEV;
+ }
+/*---------------------------------------------------------------------------*/
+ if (!peasycap->pusb_device) {
+ SAM("ERROR: peasycap->pusb_device has become NULL\n");
+ return -ENODEV;
+ }
+ rc = usb_set_interface(peasycap->pusb_device, peasycap->audio_interface,
+ peasycap->audio_altsetting_on);
+ JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface,
+ peasycap->audio_altsetting_on, rc);
+
+ rc = easycap_wakeup_device(peasycap->pusb_device);
+ JOM(8, "wakeup_device() returned %i\n", rc);
+
+ peasycap->audio_eof = 0;
+ peasycap->audio_idle = 0;
+
+ easycap_audio_submit_urbs(peasycap);
+
+ JOM(4, "finished initialization\n");
+ return 0;
+}
/*****************************************************************************/
/*---------------------------------------------------------------------------*/
/*
@@ -64,8 +199,7 @@ static const struct snd_pcm_hardware alsa_hardware = {
* IT IS RESUBMITTED PROVIDED peasycap->audio_isoc_streaming IS NOT ZERO.
*/
/*---------------------------------------------------------------------------*/
-void
-easycap_alsa_complete(struct urb *purb)
+void easycap_alsa_complete(struct urb *purb)
{
struct easycap *peasycap;
struct snd_pcm_substream *pss;
@@ -458,7 +592,6 @@ static int easycap_alsa_ack(struct snd_pcm_substream *pss)
static int easycap_alsa_trigger(struct snd_pcm_substream *pss, int cmd)
{
struct easycap *peasycap;
- int retval;
JOT(4, "%i=cmd cf %i=START %i=STOP\n", cmd, SNDRV_PCM_TRIGGER_START,
SNDRV_PCM_TRIGGER_STOP);
@@ -481,7 +614,7 @@ static int easycap_alsa_trigger(struct snd_pcm_substream *pss, int cmd)
break;
}
default:
- retval = -EINVAL;
+ return -EINVAL;
}
return 0;
}
@@ -615,202 +748,3 @@ int easycap_alsa_probe(struct easycap *peasycap)
return 0;
}
-/*****************************************************************************/
-/*****************************************************************************/
-/*****************************************************************************/
-/*****************************************************************************/
-/*****************************************************************************/
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * COMMON AUDIO INITIALIZATION
- */
-/*---------------------------------------------------------------------------*/
-int
-easycap_sound_setup(struct easycap *peasycap)
-{
- int rc;
-
- JOM(4, "starting initialization\n");
-
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL.\n");
- return -EFAULT;
- }
- if (!peasycap->pusb_device) {
- SAM("ERROR: peasycap->pusb_device is NULL\n");
- return -ENODEV;
- }
- JOM(16, "0x%08lX=peasycap->pusb_device\n", (long int)peasycap->pusb_device);
-
- rc = audio_setup(peasycap);
- JOM(8, "audio_setup() returned %i\n", rc);
-
- if (!peasycap->pusb_device) {
- SAM("ERROR: peasycap->pusb_device has become NULL\n");
- return -ENODEV;
- }
-/*---------------------------------------------------------------------------*/
- if (!peasycap->pusb_device) {
- SAM("ERROR: peasycap->pusb_device has become NULL\n");
- return -ENODEV;
- }
- rc = usb_set_interface(peasycap->pusb_device, peasycap->audio_interface,
- peasycap->audio_altsetting_on);
- JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface,
- peasycap->audio_altsetting_on, rc);
-
- rc = wakeup_device(peasycap->pusb_device);
- JOM(8, "wakeup_device() returned %i\n", rc);
-
- peasycap->audio_eof = 0;
- peasycap->audio_idle = 0;
-
- submit_audio_urbs(peasycap);
-
- JOM(4, "finished initialization\n");
- return 0;
-}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * SUBMIT ALL AUDIO URBS.
- */
-/*---------------------------------------------------------------------------*/
-int
-submit_audio_urbs(struct easycap *peasycap)
-{
- struct data_urb *pdata_urb;
- struct urb *purb;
- struct list_head *plist_head;
- int j, isbad, nospc, m, rc;
- int isbuf;
-
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- return -EFAULT;
- }
- if (!peasycap->purb_audio_head) {
- SAM("ERROR: peasycap->urb_audio_head uninitialized\n");
- return -EFAULT;
- }
- if (!peasycap->pusb_device) {
- SAM("ERROR: peasycap->pusb_device is NULL\n");
- return -EFAULT;
- }
-
- if (peasycap->audio_isoc_streaming) {
- JOM(4, "already streaming audio urbs\n");
- return 0;
- }
-
- JOM(4, "initial submission of all audio urbs\n");
- rc = usb_set_interface(peasycap->pusb_device,
- peasycap->audio_interface,
- peasycap->audio_altsetting_on);
- JOM(8, "usb_set_interface(.,%i,%i) returned %i\n",
- peasycap->audio_interface,
- peasycap->audio_altsetting_on, rc);
-
- isbad = 0;
- nospc = 0;
- m = 0;
- list_for_each(plist_head, peasycap->purb_audio_head) {
- pdata_urb = list_entry(plist_head, struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb) {
- purb = pdata_urb->purb;
- isbuf = pdata_urb->isbuf;
-
- purb->interval = 1;
- purb->dev = peasycap->pusb_device;
- purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
- peasycap->audio_endpointnumber);
- purb->transfer_flags = URB_ISO_ASAP;
- purb->transfer_buffer = peasycap->audio_isoc_buffer[isbuf].pgo;
- purb->transfer_buffer_length = peasycap->audio_isoc_buffer_size;
- purb->complete = easycap_alsa_complete;
- purb->context = peasycap;
- purb->start_frame = 0;
- purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
- for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
- purb->iso_frame_desc[j].offset = j * peasycap->audio_isoc_maxframesize;
- purb->iso_frame_desc[j].length = peasycap->audio_isoc_maxframesize;
- }
-
- rc = usb_submit_urb(purb, GFP_KERNEL);
- if (rc) {
- isbad++;
- SAM("ERROR: usb_submit_urb() failed"
- " for urb with rc: -%s: %d\n",
- strerror(rc), rc);
- } else {
- m++;
- }
- } else {
- isbad++;
- }
- }
- if (nospc) {
- SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
- SAM("..... possibly inadequate USB bandwidth\n");
- peasycap->audio_eof = 1;
- }
- if (isbad) {
- JOM(4, "attempting cleanup instead of submitting\n");
- list_for_each(plist_head, (peasycap->purb_audio_head)) {
- pdata_urb = list_entry(plist_head, struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb)
- usb_kill_urb(pdata_urb->purb);
- }
- peasycap->audio_isoc_streaming = 0;
- } else {
- peasycap->audio_isoc_streaming = m;
- JOM(4, "submitted %i audio urbs\n", m);
- }
-
- return 0;
-}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * KILL ALL AUDIO URBS.
- */
-/*---------------------------------------------------------------------------*/
-int
-kill_audio_urbs(struct easycap *peasycap)
-{
- int m;
- struct list_head *plist_head;
- struct data_urb *pdata_urb;
-
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- return -EFAULT;
- }
-
- if (!peasycap->audio_isoc_streaming) {
- JOM(8, "%i=audio_isoc_streaming, no audio urbs killed\n",
- peasycap->audio_isoc_streaming);
- return 0;
- }
-
- if (!peasycap->purb_audio_head) {
- SAM("ERROR: peasycap->purb_audio_head is NULL\n");
- return -EFAULT;
- }
-
- peasycap->audio_isoc_streaming = 0;
- JOM(4, "killing audio urbs\n");
- m = 0;
- list_for_each(plist_head, (peasycap->purb_audio_head)) {
- pdata_urb = list_entry(plist_head, struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb) {
- usb_kill_urb(pdata_urb->purb);
- m++;
- }
- }
- JOM(4, "%i audio urbs killed\n", m);
-
- return 0;
-}
-/*****************************************************************************/
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index b7175fe1b15f..70e006b50f29 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1054,7 +1054,13 @@ static int go7007_usb_probe(struct usb_interface *intf,
else
go->hpi_ops = &go7007_usb_onboard_hpi_ops;
go->hpi_context = usb;
- usb_fill_int_urb(usb->intr_urb, usb->usbdev,
+ if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
+ usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
+ usb_rcvbulkpipe(usb->usbdev, 4),
+ usb->intr_urb->transfer_buffer, 2*sizeof(u16),
+ go7007_usb_readinterrupt_complete, go);
+ else
+ usb_fill_int_urb(usb->intr_urb, usb->usbdev,
usb_rcvintpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
go7007_usb_readinterrupt_complete, go, 8);
diff --git a/drivers/staging/media/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index deac938d8505..d071c838ac2a 100644
--- a/drivers/staging/media/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
@@ -38,7 +38,7 @@
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
module_param_array(id, charp, NULL, 0444);
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index c5a0d27a02dc..4d20e9f74118 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -53,7 +53,7 @@ static unsigned char do_get_bits(void);
#define DRIVER_NAME "lirc_bt829"
-static int debug;
+static bool debug;
#define dprintk(fmt, args...) \
do { \
if (debug) \
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 6cd4cd67a1dd..7a2501776679 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -62,9 +62,9 @@
/* debugging support */
#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
#define dprintk(fmt, args...) \
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index f68218012f23..5f7f8cd3a661 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -63,7 +63,7 @@ static int display_open(struct inode *inode, struct file *file);
static int display_close(struct inode *inode, struct file *file);
/* VFD write operation */
-static ssize_t vfd_write(struct file *file, const char *buf,
+static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos);
/* LIRC driver function prototypes */
@@ -369,7 +369,7 @@ static int send_packet(struct imon_context *context)
* than 32 bytes are provided spaces will be appended to
* generate a full screen.
*/
-static ssize_t vfd_write(struct file *file, const char *buf,
+static ssize_t vfd_write(struct file *file, const char __user *buf,
size_t n_bytes, loff_t *pos)
{
int i;
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 792aac0a8e7b..dd2bca7b56fa 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -63,8 +63,8 @@
/*** Global Variables ***/
-static int debug;
-static int check_pselecd;
+static bool debug;
+static bool check_pselecd;
unsigned int irq = LIRC_IRQ;
unsigned int io = LIRC_PORT;
@@ -752,4 +752,4 @@ module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging messages");
module_param(check_pselecd, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Check for printer (default: 0)");
+MODULE_PARM_DESC(check_pselecd, "Check for printer (default: 0)");
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 8a060a8a7224..8dd8897ad860 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -107,13 +107,13 @@ struct lirc_serial {
static int type;
static int io;
static int irq;
-static int iommap;
+static bool iommap;
static int ioshift;
-static int softcarrier = 1;
-static int share_irq;
-static int debug;
+static bool softcarrier = 1;
+static bool share_irq;
+static bool debug;
static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
-static int txsense; /* 0 = active high, 1 = active low */
+static bool txsense; /* 0 = active high, 1 = active low */
#define dprintk(fmt, args...) \
do { \
@@ -773,7 +773,7 @@ static int hardware_init_port(void)
/* we fail, there's nothing here */
printk(KERN_ERR LIRC_DRIVER_NAME ": port existence test "
"failed, cannot continue\n");
- return -EINVAL;
+ return -ENODEV;
}
@@ -836,25 +836,22 @@ static int hardware_init_port(void)
return 0;
}
-static int init_port(void)
+static int __devinit lirc_serial_probe(struct platform_device *dev)
{
int i, nlow, nhigh, result;
result = request_irq(irq, irq_handler,
(share_irq ? IRQF_SHARED : 0),
LIRC_DRIVER_NAME, (void *)&hardware);
-
- switch (result) {
- case -EBUSY:
- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
- return -EBUSY;
- case -EINVAL:
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": Bad irq number or handler\n");
- return -EINVAL;
- default:
- break;
- };
+ if (result < 0) {
+ if (result == -EBUSY)
+ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n",
+ irq);
+ else if (result == -EINVAL)
+ printk(KERN_ERR LIRC_DRIVER_NAME
+ ": Bad irq number or handler\n");
+ return result;
+ }
/* Reserve io region. */
/*
@@ -875,11 +872,13 @@ static int init_port(void)
": or compile the serial port driver as module and\n");
printk(KERN_WARNING LIRC_DRIVER_NAME
": make sure this module is loaded first\n");
- return -EBUSY;
+ result = -EBUSY;
+ goto exit_free_irq;
}
- if (hardware_init_port() < 0)
- return -EINVAL;
+ result = hardware_init_port();
+ if (result < 0)
+ goto exit_release_region;
/* Initialize pulse/space widths */
init_timing_params(duty_cycle, freq);
@@ -911,6 +910,28 @@ static int init_port(void)
dprintk("Interrupt %d, port %04x obtained\n", irq, io);
return 0;
+
+exit_release_region:
+ if (iommap != 0)
+ release_mem_region(iommap, 8 << ioshift);
+ else
+ release_region(io, 8);
+exit_free_irq:
+ free_irq(irq, (void *)&hardware);
+
+ return result;
+}
+
+static int __devexit lirc_serial_remove(struct platform_device *dev)
+{
+ free_irq(irq, (void *)&hardware);
+
+ if (iommap != 0)
+ release_mem_region(iommap, 8 << ioshift);
+ else
+ release_region(io, 8);
+
+ return 0;
}
static int set_use_inc(void *data)
@@ -955,7 +976,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
int *wbuf;
if (!(hardware[type].features & LIRC_CAN_SEND_PULSE))
- return -EBADF;
+ return -EPERM;
count = n / sizeof(int);
if (n % sizeof(int) || count % 2 == 0)
@@ -1006,11 +1027,11 @@ static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return result;
/* only LIRC_MODE_PULSE supported */
if (value != LIRC_MODE_PULSE)
- return -ENOSYS;
+ return -EINVAL;
break;
case LIRC_GET_LENGTH:
- return -ENOSYS;
+ return -ENOIOCTLCMD;
break;
case LIRC_SET_SEND_DUTY_CYCLE:
@@ -1076,16 +1097,6 @@ static struct lirc_driver driver = {
static struct platform_device *lirc_serial_dev;
-static int __devinit lirc_serial_probe(struct platform_device *dev)
-{
- return 0;
-}
-
-static int __devexit lirc_serial_remove(struct platform_device *dev)
-{
- return 0;
-}
-
static int lirc_serial_suspend(struct platform_device *dev,
pm_message_t state)
{
@@ -1111,11 +1122,11 @@ static void lirc_serial_exit(void);
static int lirc_serial_resume(struct platform_device *dev)
{
unsigned long flags;
+ int result;
- if (hardware_init_port() < 0) {
- lirc_serial_exit();
- return -EINVAL;
- }
+ result = hardware_init_port();
+ if (result < 0)
+ return result;
spin_lock_irqsave(&hardware[type].lock, flags);
/* Enable Interrupt */
@@ -1148,7 +1159,7 @@ static int __init lirc_serial_init(void)
/* Init read buffer. */
result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
if (result < 0)
- return -ENOMEM;
+ return result;
result = platform_driver_register(&lirc_serial_driver);
if (result) {
@@ -1188,10 +1199,6 @@ static int __init lirc_serial_init_module(void)
{
int result;
- result = lirc_serial_init();
- if (result)
- return result;
-
switch (type) {
case LIRC_HOMEBREW:
case LIRC_IRDEO:
@@ -1211,8 +1218,7 @@ static int __init lirc_serial_init_module(void)
break;
#endif
default:
- result = -EINVAL;
- goto exit_serial_exit;
+ return -EINVAL;
}
if (!softcarrier) {
switch (type) {
@@ -1228,37 +1234,26 @@ static int __init lirc_serial_init_module(void)
}
}
- result = init_port();
- if (result < 0)
- goto exit_serial_exit;
+ result = lirc_serial_init();
+ if (result)
+ return result;
+
driver.features = hardware[type].features;
driver.dev = &lirc_serial_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
printk(KERN_ERR LIRC_DRIVER_NAME
": register_chrdev failed!\n");
- result = -EIO;
- goto exit_release;
+ lirc_serial_exit();
+ return driver.minor;
}
return 0;
-exit_release:
- release_region(io, 8);
-exit_serial_exit:
- lirc_serial_exit();
- return result;
}
static void __exit lirc_serial_exit_module(void)
{
- lirc_serial_exit();
-
- free_irq(irq, (void *)&hardware);
-
- if (iommap != 0)
- release_mem_region(iommap, 8 << ioshift);
- else
- release_region(io, 8);
lirc_unregister_driver(driver.minor);
+ lirc_serial_exit();
dprintk("cleaned up module\n");
}
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index 6903d3992eca..c94382b917ac 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -173,7 +173,7 @@ static DEFINE_SPINLOCK(hardware_lock);
static int rx_buf[RBUF_LEN];
static unsigned int rx_tail, rx_head;
-static int debug;
+static bool debug;
#define dprintk(fmt, args...) \
do { \
if (debug) \
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 0302d82a12f7..76ea4a8f2c75 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -155,8 +155,8 @@ static struct mutex tx_data_lock;
#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
-static int debug; /* debug output */
-static int tx_only; /* only handle the IR Tx function */
+static bool debug; /* debug output */
+static bool tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
diff --git a/drivers/staging/media/solo6x10/Makefile b/drivers/staging/media/solo6x10/Makefile
index 72816cf16704..337e38c3a0f0 100644
--- a/drivers/staging/media/solo6x10/Makefile
+++ b/drivers/staging/media/solo6x10/Makefile
@@ -1,3 +1,3 @@
solo6x10-y := core.o i2c.o p2m.o v4l2.o tw28.o gpio.o disp.o enc.o v4l2-enc.o g723.o
-obj-$(CONFIG_SOLO6X10) := solo6x10.o
+obj-$(CONFIG_SOLO6X10) += solo6x10.o
diff --git a/drivers/staging/media/solo6x10/jpeg.h b/drivers/staging/media/solo6x10/solo6x10-jpeg.h
index 50defec318cc..50defec318cc 100644
--- a/drivers/staging/media/solo6x10/jpeg.h
+++ b/drivers/staging/media/solo6x10/solo6x10-jpeg.h
diff --git a/drivers/staging/media/solo6x10/v4l2-enc.c b/drivers/staging/media/solo6x10/v4l2-enc.c
index bee7280bbed9..f8f0da952288 100644
--- a/drivers/staging/media/solo6x10/v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/v4l2-enc.c
@@ -26,7 +26,7 @@
#include <media/videobuf-dma-sg.h>
#include "solo6x10.h"
#include "tw28.h"
-#include "jpeg.h"
+#include "solo6x10-jpeg.h"
#define MIN_VID_BUFFERS 4
#define FRAME_BUF_SIZE (128 * 1024)
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 8bf34794489c..4ac3696883cb 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -38,7 +38,6 @@ void mei_io_list_init(struct mei_io_list *list)
{
/* initialize our queue list */
INIT_LIST_HEAD(&list->mei_cb.cb_list);
- list->status = 0;
}
/**
@@ -49,22 +48,15 @@ void mei_io_list_init(struct mei_io_list *list)
*/
void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl_cb *pos;
+ struct mei_cl_cb *next;
- if (list->status != 0)
- return;
-
- if (list_empty(&list->mei_cb.cb_list))
- return;
-
- list_for_each_entry_safe(cb_pos, cb_next,
- &list->mei_cb.cb_list, cb_list) {
- if (cb_pos) {
+ list_for_each_entry_safe(pos, next, &list->mei_cb.cb_list, cb_list) {
+ if (pos->file_private) {
struct mei_cl *cl_tmp;
- cl_tmp = (struct mei_cl *)cb_pos->file_private;
+ cl_tmp = (struct mei_cl *)pos->file_private;
if (mei_cl_cmp_id(cl, cl_tmp))
- list_del(&cb_pos->cb_list);
+ list_del(&pos->cb_list);
}
}
}
@@ -338,16 +330,10 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
}
}
/* remove all waiting requests */
- if (dev->write_list.status == 0 &&
- !list_empty(&dev->write_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- if (cb_pos) {
- list_del(&cb_pos->cb_list);
- mei_free_cb_private(cb_pos);
- cb_pos = NULL;
- }
- }
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->write_list.mei_cb.cb_list, cb_list) {
+ list_del(&cb_pos->cb_list);
+ mei_free_cb_private(cb_pos);
}
}
@@ -380,8 +366,7 @@ void mei_host_start_message(struct mei_device *dev)
host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_start_req),
+ if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
mei_hdr->length)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->mei_state = MEI_RESETING;
@@ -414,8 +399,7 @@ void mei_host_enum_clients_message(struct mei_device *dev)
host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
host_enum_req->cmd.cmd = HOST_ENUM_REQ_CMD;
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_enum_req),
+ if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
mei_hdr->length)) {
dev->mei_state = MEI_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
@@ -605,15 +589,10 @@ void mei_host_init_iamthif(struct mei_device *dev)
return;
}
- /* Do not render the system unusable when iamthif_mtu is not equal to
- the value received from ME.
- Assign iamthif_mtu to the value received from ME in order to solve the
- hardware macro incompatibility. */
+ /* Assign iamthif_mtu to the value received from ME */
- dev_dbg(&dev->pdev->dev, "[DEFAULT] IAMTHIF = %d\n", dev->iamthif_mtu);
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
- dev_dbg(&dev->pdev->dev,
- "IAMTHIF = %d\n",
+ dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n",
dev->me_clients[i].props.max_msg_length);
kfree(dev->iamthif_msg_buf);
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index a65dacf00213..eb5df7fc2269 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -128,9 +128,9 @@ int mei_count_empty_write_slots(struct mei_device *dev)
* returns 1 if success, 0 - otherwise.
*/
int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *write_buffer,
- unsigned long write_length)
+ struct mei_msg_hdr *header,
+ unsigned char *write_buffer,
+ unsigned long write_length)
{
u32 temp_msg = 0;
unsigned long bytes_written = 0;
@@ -216,7 +216,7 @@ int mei_count_full_read_slots(struct mei_device *dev)
* @buffer_length: message size will be read
*/
void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length)
+ unsigned char *buffer, unsigned long buffer_length)
{
u32 i = 0;
unsigned char temp_buf[sizeof(u32)];
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index 7bd38ae2c233..aeae511419c7 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -52,6 +52,17 @@ int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev, bool preserve);
bool mei_wd_host_init(struct mei_device *dev);
void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
+/*
+ * mei_watchdog_register - Registering watchdog interface
+ * once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister - Uegistering watchdog interface
+ * @dev - mei device
+ */
+void mei_watchdog_unregister(struct mei_device *dev);
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 882d106d54e5..3544fee34e48 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -198,8 +198,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
unsigned char *buffer = NULL;
dev_dbg(&dev->pdev->dev, "start client msg\n");
- if (!(dev->read_list.status == 0 &&
- !list_empty(&dev->read_list.mei_cb.cb_list)))
+ if (list_empty(&dev->read_list.mei_cb.cb_list))
goto quit;
list_for_each_entry_safe(cb_pos, cb_next,
@@ -210,9 +209,6 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
buffer = (unsigned char *)
(cb_pos->response_buffer.data +
cb_pos->information);
- BUG_ON(cb_pos->response_buffer.size <
- mei_hdr->length +
- cb_pos->information);
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
@@ -390,24 +386,10 @@ static void mei_client_connect_response(struct mei_device *dev,
/* if WD or iamthif client treat specially */
if (is_treat_specially_client(&(dev->wd_cl), rs)) {
- dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n",
- dev->wd_timeout);
-
- dev->wd_due_counter = (dev->wd_timeout) ? 1 : 0;
-
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+ mei_watchdog_register(dev);
- /* Registering watchdog interface device once we got connection
- to the WD Client
- */
- if (watchdog_register_device(&amt_wd_dev)) {
- printk(KERN_ERR "mei: unable to register watchdog device.\n");
- dev->wd_interface_reg = false;
- } else {
- dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
- dev->wd_interface_reg = true;
- }
-
+ /* next step in the state maching */
mei_host_init_iamthif(dev);
return;
}
@@ -416,22 +398,20 @@ static void mei_client_connect_response(struct mei_device *dev,
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
- if (!dev->ctrl_rd_list.status &&
- !list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- if (!cl) {
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+
+ cl = (struct mei_cl *)cb_pos->file_private;
+ if (!cl) {
+ list_del(&cb_pos->cb_list);
+ return;
+ }
+ if (MEI_IOCTL == cb_pos->major_file_operations) {
+ if (is_treat_specially_client(cl, rs)) {
list_del(&cb_pos->cb_list);
- return;
- }
- if (MEI_IOCTL == cb_pos->major_file_operations) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&cb_pos->cb_list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
}
}
}
@@ -458,29 +438,26 @@ static void mei_client_disconnect_response(struct mei_device *dev,
rs->host_addr,
rs->status);
- if (!dev->ctrl_rd_list.status &&
- !list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)cb_pos->file_private;
- if (!cl) {
- list_del(&cb_pos->cb_list);
- return;
- }
+ if (!cl) {
+ list_del(&cb_pos->cb_list);
+ return;
+ }
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
+ dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+ if (cl->host_client_id == rs->host_addr &&
+ cl->me_client_id == rs->me_addr) {
- list_del(&cb_pos->cb_list);
- if (!rs->status)
- cl->state = MEI_FILE_DISCONNECTED;
+ list_del(&cb_pos->cb_list);
+ if (!rs->status)
+ cl->state = MEI_FILE_DISCONNECTED;
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
}
}
}
@@ -718,7 +695,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
case CLIENT_DISCONNECT_RES_CMD:
disconnect_res =
(struct hbm_client_connect_response *) mei_msg;
- mei_client_disconnect_response(dev, disconnect_res);
+ mei_client_disconnect_response(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
@@ -736,7 +713,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
mei_reset(dev, 1);
return;
}
- if (dev->me_clients[dev->me_client_presentation_num]
+ if (dev->me_clients[dev->me_client_presentation_num]
.client_id == props_res->address) {
dev->me_clients[dev->me_client_presentation_num].props
@@ -1228,7 +1205,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
{
struct mei_cl *cl;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
struct mei_io_list *list;
int ret;
@@ -1241,36 +1218,31 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
list = &dev->write_waiting_list;
- if (!list->status && !list_empty(&list->mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &list->mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- if (cl) {
- cl->status = 0;
- list_del(&cb_pos->cb_list);
- if (MEI_WRITING == cl->writing_state &&
- (cb_pos->major_file_operations ==
- MEI_WRITE) &&
- (cl != &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev,
- "MEI WRITE COMPLETE\n");
- cl->writing_state =
- MEI_WRITE_COMPLETE;
- list_add_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- }
- if (cl == &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
- if (dev->iamthif_flow_control_pending) {
- ret =
- _mei_irq_thread_iamthif_read(
- dev, slots);
- if (ret)
- return ret;
- }
- }
+ list_for_each_entry_safe(pos, next,
+ &list->mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)pos->file_private;
+ if (cl == NULL)
+ continue;
+
+ cl->status = 0;
+ list_del(&pos->cb_list);
+ if (MEI_WRITING == cl->writing_state &&
+ (pos->major_file_operations == MEI_WRITE) &&
+ (cl != &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "MEI WRITE COMPLETE\n");
+ cl->writing_state = MEI_WRITE_COMPLETE;
+ list_add_tail(&pos->cb_list,
+ &cmpl_list->mei_cb.cb_list);
+ }
+ if (cl == &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
+ if (dev->iamthif_flow_control_pending) {
+ ret = _mei_irq_thread_iamthif_read(
+ dev, slots);
+ if (ret)
+ return ret;
}
-
}
}
@@ -1317,101 +1289,88 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
return ~ENODEV;
/* complete control write list CB */
- if (!dev->ctrl_wr_list.status) {
- /* complete control write list CB */
- dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
- list_for_each_entry_safe(cb_pos, cb_next,
+ dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
+ list_for_each_entry_safe(pos, next,
&dev->ctrl_wr_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)
- cb_pos->file_private;
- if (!cl) {
- list_del(&cb_pos->cb_list);
- return -ENODEV;
- }
- switch (cb_pos->major_file_operations) {
- case MEI_CLOSE:
- /* send disconnect message */
- ret = _mei_irq_thread_close(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
-
- break;
- case MEI_READ:
- /* send flow control message */
- ret = _mei_irq_thread_read(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
+ cl = (struct mei_cl *) pos->file_private;
+ if (!cl) {
+ list_del(&pos->cb_list);
+ return -ENODEV;
+ }
+ switch (pos->major_file_operations) {
+ case MEI_CLOSE:
+ /* send disconnect message */
+ ret = _mei_irq_thread_close(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- break;
- case MEI_IOCTL:
- /* connect message */
- if (!mei_other_client_is_connecting(dev,
- cl))
- continue;
- ret = _mei_irq_thread_ioctl(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
+ break;
+ case MEI_READ:
+ /* send flow control message */
+ ret = _mei_irq_thread_read(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- break;
+ break;
+ case MEI_IOCTL:
+ /* connect message */
+ if (mei_other_client_is_connecting(dev, cl))
+ continue;
+ ret = _mei_irq_thread_ioctl(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- default:
- BUG();
- }
+ break;
+ default:
+ BUG();
}
+
}
/* complete write list CB */
- if (!dev->write_list.status &&
- !list_empty(&dev->write_list.mei_cb.cb_list)) {
- dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
-
- if (cl) {
- if (cl != &dev->iamthif_cl) {
- if (!mei_flow_ctrl_creds(dev,
- cl)) {
- dev_dbg(&dev->pdev->dev,
- "No flow control"
- " credentials for client"
- " %d, not sending.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl(dev, slots,
- cb_pos,
- cl, cmpl_list);
- if (ret)
- return ret;
-
- } else if (cl == &dev->iamthif_cl) {
- /* IAMTHIF IOCTL */
- dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
- if (!mei_flow_ctrl_creds(dev,
- cl)) {
- dev_dbg(&dev->pdev->dev,
- "No flow control"
- " credentials for amthi"
- " client %d.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl_iamthif(dev,
- slots,
- cb_pos,
- cl,
- cmpl_list);
- if (ret)
- return ret;
-
- }
+ dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
+ list_for_each_entry_safe(pos, next,
+ &dev->write_list.mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)pos->file_private;
+ if (cl == NULL)
+ continue;
+
+ if (cl != &dev->iamthif_cl) {
+ if (!mei_flow_ctrl_creds(dev, cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control"
+ " credentials for client"
+ " %d, not sending.\n",
+ cl->host_client_id);
+ continue;
+ }
+ ret = _mei_irq_thread_cmpl(dev, slots,
+ pos,
+ cl, cmpl_list);
+ if (ret)
+ return ret;
+
+ } else if (cl == &dev->iamthif_cl) {
+ /* IAMTHIF IOCTL */
+ dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
+ if (!mei_flow_ctrl_creds(dev, cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control"
+ " credentials for amthi"
+ " client %d.\n",
+ cl->host_client_id);
+ continue;
}
+ ret = _mei_irq_thread_cmpl_iamthif(dev,
+ slots,
+ pos,
+ cl,
+ cmpl_list);
+ if (ret)
+ return ret;
}
+
}
return 0;
}
@@ -1502,18 +1461,13 @@ void mei_timer(struct work_struct *work)
amthi_complete_list = &dev->amthi_read_complete_list.
mei_cb.cb_list;
- if (!list_empty(amthi_complete_list)) {
-
- list_for_each_entry_safe(cb_pos, cb_next,
- amthi_complete_list,
- cb_list) {
+ list_for_each_entry_safe(cb_pos, cb_next, amthi_complete_list, cb_list) {
- cl_pos = cb_pos->file_object->private_data;
+ cl_pos = cb_pos->file_object->private_data;
- /* Finding the AMTHI entry. */
- if (cl_pos == &dev->iamthif_cl)
- list_del(&cb_pos->cb_list);
- }
+ /* Finding the AMTHI entry. */
+ if (cl_pos == &dev->iamthif_cl)
+ list_del(&cb_pos->cb_list);
}
if (dev->iamthif_current_cb)
mei_free_cb_private(dev->iamthif_current_cb);
@@ -1527,8 +1481,8 @@ void mei_timer(struct work_struct *work)
}
}
out:
- schedule_delayed_work(&dev->timer_work, 2 * HZ);
- mutex_unlock(&dev->device_lock);
+ schedule_delayed_work(&dev->timer_work, 2 * HZ);
+ mutex_unlock(&dev->device_lock);
}
/**
@@ -1624,7 +1578,7 @@ end:
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
- if (complete_list.status || list_empty(&complete_list.mei_cb.cb_list))
+ if (list_empty(&complete_list.mei_cb.cb_list))
return IRQ_HANDLED;
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 8a61d1266515..0752ead4269a 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -228,18 +228,15 @@ struct mei_cl_cb *find_amthi_read_list_entry(
struct file *file)
{
struct mei_cl *cl_temp;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
-
- if (!dev->amthi_read_complete_list.status &&
- !list_empty(&dev->amthi_read_complete_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
- cl_temp = (struct mei_cl *)cb_pos->file_private;
- if (cl_temp && cl_temp == &dev->iamthif_cl &&
- cb_pos->file_object == file)
- return cb_pos;
- }
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
+ cl_temp = (struct mei_cl *)pos->file_private;
+ if (cl_temp && cl_temp == &dev->iamthif_cl &&
+ pos->file_object == file)
+ return pos;
}
return NULL;
}
@@ -262,7 +259,7 @@ struct mei_cl_cb *find_amthi_read_list_entry(
* negative on failure.
*/
int amthi_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset)
+ char __user *ubuf, size_t length, loff_t *offset)
{
int rets;
int wait_ret;
@@ -334,8 +331,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
}
}
/* if the whole message will fit remove it from the list */
- if (cb->information >= *offset &&
- length >= (cb->information - *offset))
+ if (cb->information >= *offset && length >= (cb->information - *offset))
list_del(&cb->cb_list);
else if (cb->information > 0 && cb->information <= *offset) {
/* end of the message has been reached */
@@ -356,9 +352,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
* the information may be longer */
length = min_t(size_t, length, (cb->information - *offset));
- if (copy_to_user(ubuf,
- cb->response_buffer.data + *offset,
- length))
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
rets = -EFAULT;
else {
rets = length;
@@ -427,7 +421,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
cb->response_buffer.size = dev->me_clients[i].props.max_msg_length;
cb->response_buffer.data =
- kmalloc(cb->response_buffer.size, GFP_KERNEL);
+ kmalloc(cb->response_buffer.size, GFP_KERNEL);
if (!cb->response_buffer.data) {
rets = -ENOMEM;
goto unlock;
@@ -448,8 +442,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
&dev->read_list.mei_cb.cb_list);
}
} else {
- list_add_tail(&cb->cb_list,
- &dev->ctrl_wr_list.mei_cb.cb_list);
+ list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
}
return rets;
unlock:
@@ -482,7 +475,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_ioctl = true;
dev->iamthif_msg_buf_size = cb->request_buffer.size;
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
- cb->request_buffer.size);
+ cb->request_buffer.size);
ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
if (ret < 0)
@@ -534,8 +527,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev_dbg(&dev->pdev->dev, "No flow control credentials, "
"so add iamthif cb to write list.\n");
- list_add_tail(&cb->cb_list,
- &dev->write_list.mei_cb.cb_list);
+ list_add_tail(&cb->cb_list, &dev->write_list.mei_cb.cb_list);
}
return 0;
}
@@ -550,8 +542,8 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
void mei_run_next_iamthif_cmd(struct mei_device *dev)
{
struct mei_cl *cl_tmp;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
int status;
if (!dev)
@@ -565,25 +557,22 @@ void mei_run_next_iamthif_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
- if (dev->amthi_cmd_list.status == 0 &&
- !list_empty(&dev->amthi_cmd_list.mei_cb.cb_list)) {
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
-
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
- list_del(&cb_pos->cb_list);
- cl_tmp = (struct mei_cl *)cb_pos->file_private;
-
- if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
- status = amthi_write(dev, cb_pos);
- if (status) {
- dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
- status);
- return;
- }
- break;
+ dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
+ list_del(&pos->cb_list);
+ cl_tmp = (struct mei_cl *)pos->file_private;
+
+ if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
+ status = amthi_write(dev, pos);
+ if (status) {
+ dev_dbg(&dev->pdev->dev,
+ "amthi write failed status = %d\n",
+ status);
+ return;
}
+ break;
}
}
}
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index eb05c36f45d4..1e1a9f996e7c 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -33,6 +33,7 @@
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
#include "mei_dev.h"
#include "mei.h"
@@ -51,18 +52,10 @@ static char mei_driver_name[] = MEI_DRIVER_NAME;
static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
static const char mei_driver_version[] = MEI_DRIVER_VERSION;
-/* mei char device for registration */
-static struct cdev mei_cdev;
-
-/* major number for device */
-static int mei_major;
/* The device pointer */
/* Currently this driver works as long as there is only a single AMT device. */
struct pci_dev *mei_device;
-static struct class *mei_class;
-
-
/* mei_pci_tbl - PCI Device ID Table */
static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
@@ -105,173 +98,6 @@ MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
static DEFINE_MUTEX(mei_mutex);
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int __devinit mei_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mei_device *dev;
- int err;
-
- mutex_lock(&mei_mutex);
- if (mei_device) {
- err = -EEXIST;
- goto end;
- }
- /* enable pci dev */
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "mei: Failed to enable pci device.\n");
- goto end;
- }
- /* set PCI host mastering */
- pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, mei_driver_name);
- if (err) {
- printk(KERN_ERR "mei: Failed to get pci regions.\n");
- goto disable_device;
- }
- /* allocates and initializes the mei dev structure */
- dev = mei_device_init(pdev);
- if (!dev) {
- err = -ENOMEM;
- goto release_regions;
- }
- /* mapping IO device memory */
- dev->mem_addr = pci_iomap(pdev, 0, 0);
- if (!dev->mem_addr) {
- printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, mei_driver_name, dev);
-
- if (err) {
- printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
- pdev->irq);
- goto unmap_memory;
- }
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- if (mei_hw_init(dev)) {
- printk(KERN_ERR "mei: Init hw failure.\n");
- err = -ENODEV;
- goto release_irq;
- }
- mei_device = pdev;
- pci_set_drvdata(pdev, dev);
- schedule_delayed_work(&dev->timer_work, HZ);
-
- mutex_unlock(&mei_mutex);
-
- pr_debug("mei: Driver initialization successful.\n");
-
- return 0;
-
-release_irq:
- /* disable interrupts */
- dev->host_hw_state = mei_hcsr_read(dev);
- mei_disable_interrupts(dev);
- flush_scheduled_work();
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-unmap_memory:
- pci_iounmap(pdev, dev->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-end:
- mutex_unlock(&mei_mutex);
- printk(KERN_ERR "mei: Driver initialization failed.\n");
- return err;
-}
-
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void __devexit mei_remove(struct pci_dev *pdev)
-{
- struct mei_device *dev;
-
- if (mei_device != pdev)
- return;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
-
- mutex_lock(&dev->device_lock);
-
- mei_wd_stop(dev, false);
-
- mei_device = NULL;
-
- if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->iamthif_cl);
- }
- if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
- dev->wd_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->wd_cl);
- }
-
- /* Unregistering watchdog device */
- if (dev->wd_interface_reg)
- watchdog_unregister_device(&amt_wd_dev);
-
- /* remove entry if already in list */
- dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
- mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
-
- dev->iamthif_current_cb = NULL;
- dev->me_clients_num = 0;
-
- mutex_unlock(&dev->device_lock);
-
- flush_scheduled_work();
-
- /* disable interrupts */
- mei_disable_interrupts(dev);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
-
- if (dev->mem_addr)
- pci_iounmap(pdev, dev->mem_addr);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
/**
* mei_clear_list - removes all callbacks associated with file
@@ -372,21 +198,17 @@ static struct mei_cl_cb *find_read_list_entry(
struct mei_device *dev,
struct mei_cl *cl)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
-
- if (!dev->read_list.status &&
- !list_empty(&dev->read_list.mei_cb.cb_list)) {
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
- dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->read_list.mei_cb.cb_list, cb_list) {
- struct mei_cl *cl_temp;
- cl_temp = (struct mei_cl *)cb_pos->file_private;
+ dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
+ list_for_each_entry_safe(pos, next,
+ &dev->read_list.mei_cb.cb_list, cb_list) {
+ struct mei_cl *cl_temp;
+ cl_temp = (struct mei_cl *)pos->file_private;
- if (mei_cl_cmp_id(cl, cl_temp))
- return cb_pos;
- }
+ if (mei_cl_cmp_id(cl, cl_temp))
+ return pos;
}
return NULL;
}
@@ -402,15 +224,16 @@ static struct mei_cl_cb *find_read_list_entry(
static int mei_open(struct inode *inode, struct file *file)
{
struct mei_cl *cl;
- int if_num = iminor(inode), err;
struct mei_device *dev;
+ unsigned long cl_id;
+ int err;
err = -ENODEV;
if (!mei_device)
goto out;
dev = pci_get_drvdata(mei_device);
- if (if_num != MEI_MINOR_NUMBER || !dev)
+ if (!dev)
goto out;
mutex_lock(&dev->device_lock);
@@ -429,14 +252,16 @@ static int mei_open(struct inode *inode, struct file *file)
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
goto out_unlock;
- cl->host_client_id = find_first_zero_bit(dev->host_clients_map,
- MEI_CLIENTS_MAX);
- if (cl->host_client_id > MEI_CLIENTS_MAX)
+ cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
+ if (cl_id >= MEI_CLIENTS_MAX)
goto out_unlock;
+ cl->host_client_id = cl_id;
+
dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
dev->open_handle_count++;
+
list_add_tail(&cl->link, &dev->file_list);
set_bit(cl->host_client_id, dev->host_clients_map);
@@ -446,7 +271,7 @@ static int mei_open(struct inode *inode, struct file *file)
file->private_data = cl;
mutex_unlock(&dev->device_lock);
- return 0;
+ return nonseekable_open(inode, file);
out_unlock:
mutex_unlock(&dev->device_lock);
@@ -492,8 +317,7 @@ static int mei_release(struct inode *inode, struct file *file)
cl->me_client_id);
if (dev->open_handle_count > 0) {
- clear_bit(cl->host_client_id,
- dev->host_clients_map);
+ clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
mei_remove_client_from_file_list(dev, cl->host_client_id);
@@ -554,7 +378,7 @@ static int mei_release(struct inode *inode, struct file *file)
* returns >=0 data length on success , <0 on error
*/
static ssize_t mei_read(struct file *file, char __user *ubuf,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb_pos = NULL;
@@ -673,9 +497,7 @@ copy_buffer:
/* information size may be longer */
length = min_t(size_t, length, (cb->information - *offset));
- if (copy_to_user(ubuf,
- cb->response_buffer.data + *offset,
- length)) {
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
rets = -EFAULT;
goto free;
}
@@ -711,7 +533,7 @@ out:
* returns >=0 data length on success , <0 on error
*/
static ssize_t mei_write(struct file *file, const char __user *ubuf,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *write_cb = NULL;
@@ -762,8 +584,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
cl->read_cb = NULL;
cl->read_pending = 0;
}
- } else if (cl->reading_state == MEI_IDLE &&
- !cl->read_pending)
+ } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
*offset = 0;
@@ -1034,7 +855,7 @@ out:
*/
#ifdef CONFIG_COMPAT
static long mei_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long data)
+ unsigned int cmd, unsigned long data)
{
return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
}
@@ -1090,6 +911,206 @@ out:
return mask;
}
+/*
+ * file operations structure will be used for mei char device.
+ */
+static const struct file_operations mei_fops = {
+ .owner = THIS_MODULE,
+ .read = mei_read,
+ .unlocked_ioctl = mei_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mei_compat_ioctl,
+#endif
+ .open = mei_open,
+ .release = mei_release,
+ .write = mei_write,
+ .poll = mei_poll,
+ .llseek = no_llseek
+};
+
+
+/*
+ * Misc Device Struct
+ */
+static struct miscdevice mei_misc_device = {
+ .name = MEI_DRIVER_NAME,
+ .fops = &mei_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int __devinit mei_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ int err;
+
+ mutex_lock(&mei_mutex);
+ if (mei_device) {
+ err = -EEXIST;
+ goto end;
+ }
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "mei: Failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, mei_driver_name);
+ if (err) {
+ printk(KERN_ERR "mei: Failed to get pci regions.\n");
+ goto disable_device;
+ }
+ /* allocates and initializes the mei dev structure */
+ dev = mei_device_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ /* mapping IO device memory */
+ dev->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!dev->mem_addr) {
+ printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_interrupt_thread_handler,
+ 0, mei_driver_name, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_interrupt_quick_handler,
+ mei_interrupt_thread_handler,
+ IRQF_SHARED, mei_driver_name, dev);
+
+ if (err) {
+ printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto unmap_memory;
+ }
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ if (mei_hw_init(dev)) {
+ printk(KERN_ERR "mei: Init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = misc_register(&mei_misc_device);
+ if (err)
+ goto release_irq;
+
+ mei_device = pdev;
+ pci_set_drvdata(pdev, dev);
+
+
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ mutex_unlock(&mei_mutex);
+
+ pr_debug("mei: Driver initialization successful.\n");
+
+ return 0;
+
+release_irq:
+ /* disable interrupts */
+ dev->host_hw_state = mei_hcsr_read(dev);
+ mei_disable_interrupts(dev);
+ flush_scheduled_work();
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+unmap_memory:
+ pci_iounmap(pdev, dev->mem_addr);
+free_device:
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ mutex_unlock(&mei_mutex);
+ printk(KERN_ERR "mei: Driver initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void __devexit mei_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ if (mei_device != pdev)
+ return;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->device_lock);
+
+ mei_wd_stop(dev, false);
+
+ mei_device = NULL;
+
+ if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+ mei_disconnect_host_client(dev, &dev->iamthif_cl);
+ }
+ if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+ dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+ mei_disconnect_host_client(dev, &dev->wd_cl);
+ }
+
+ /* Unregistering watchdog device */
+ mei_watchdog_unregister(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+ mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
+ mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
+
+ dev->iamthif_current_cb = NULL;
+ dev->me_clients_num = 0;
+
+ mutex_unlock(&dev->device_lock);
+
+ flush_scheduled_work();
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (dev->mem_addr)
+ pci_iounmap(pdev, dev->mem_addr);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
{
@@ -1173,131 +1194,6 @@ static struct pci_driver mei_driver = {
.driver.pm = MEI_PM_OPS,
};
-/*
- * file operations structure will be used for mei char device.
- */
-static const struct file_operations mei_fops = {
- .owner = THIS_MODULE,
- .read = mei_read,
- .unlocked_ioctl = mei_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mei_compat_ioctl,
-#endif
- .open = mei_open,
- .release = mei_release,
- .write = mei_write,
- .poll = mei_poll,
-};
-
-/**
- * mei_registration_cdev - sets up the cdev structure for mei device.
- *
- * @dev: char device struct
- * @hminor: minor number for registration char device
- * @fops: file operations structure
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_registration_cdev(struct cdev *dev, int hminor,
- const struct file_operations *fops)
-{
- int ret, devno = MKDEV(mei_major, hminor);
-
- cdev_init(dev, fops);
- dev->owner = THIS_MODULE;
- ret = cdev_add(dev, devno, 1);
- /* Fail gracefully if need be */
- if (ret)
- printk(KERN_ERR "mei: Error %d registering mei device %d\n",
- ret, hminor);
- return ret;
-}
-
-/**
- * mei_register_cdev - registers mei char device
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_register_cdev(void)
-{
- int ret;
- dev_t dev;
-
- /* registration of char devices */
- ret = alloc_chrdev_region(&dev, MEI_MINORS_BASE, MEI_MINORS_COUNT,
- MEI_DRIVER_NAME);
- if (ret) {
- printk(KERN_ERR "mei: Error allocating char device region.\n");
- return ret;
- }
-
- mei_major = MAJOR(dev);
-
- ret = mei_registration_cdev(&mei_cdev, MEI_MINOR_NUMBER,
- &mei_fops);
- if (ret)
- unregister_chrdev_region(MKDEV(mei_major, MEI_MINORS_BASE),
- MEI_MINORS_COUNT);
-
- return ret;
-}
-
-/**
- * mei_unregister_cdev - unregisters mei char device
- */
-static void mei_unregister_cdev(void)
-{
- cdev_del(&mei_cdev);
- unregister_chrdev_region(MKDEV(mei_major, MEI_MINORS_BASE),
- MEI_MINORS_COUNT);
-}
-
-/**
- * mei_sysfs_device_create - adds device entry to sysfs
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_sysfs_device_create(void)
-{
- struct class *class;
- void *tmphdev;
- int err;
-
- class = class_create(THIS_MODULE, MEI_DRIVER_NAME);
- if (IS_ERR(class)) {
- err = PTR_ERR(class);
- printk(KERN_ERR "mei: Error creating mei class.\n");
- goto err_out;
- }
-
- tmphdev = device_create(class, NULL, mei_cdev.dev, NULL,
- MEI_DEV_NAME);
- if (IS_ERR(tmphdev)) {
- err = PTR_ERR(tmphdev);
- goto err_destroy;
- }
-
- mei_class = class;
- return 0;
-
-err_destroy:
- class_destroy(class);
-err_out:
- return err;
-}
-
-/**
- * mei_sysfs_device_remove - unregisters the device entry on sysfs
- */
-static void mei_sysfs_device_remove(void)
-{
- if (IS_ERR_OR_NULL(mei_class))
- return;
-
- device_destroy(mei_class, mei_cdev.dev);
- class_destroy(mei_class);
-}
-
/**
* mei_init_module - Driver Registration Routine
*
@@ -1314,26 +1210,9 @@ static int __init mei_init_module(void)
mei_driver_string, mei_driver_version);
/* init pci module */
ret = pci_register_driver(&mei_driver);
- if (ret < 0) {
+ if (ret < 0)
printk(KERN_ERR "mei: Error registering driver.\n");
- goto end;
- }
- ret = mei_register_cdev();
- if (ret)
- goto unregister_pci;
-
- ret = mei_sysfs_device_create();
- if (ret)
- goto unregister_cdev;
-
- return ret;
-
-unregister_cdev:
- mei_unregister_cdev();
-unregister_pci:
- pci_unregister_driver(&mei_driver);
-end:
return ret;
}
@@ -1347,8 +1226,7 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- mei_sysfs_device_remove();
- mei_unregister_cdev();
+ misc_deregister(&mei_misc_device);
pci_unregister_driver(&mei_driver);
pr_debug("mei: Driver unloaded successfully.\n");
diff --git a/drivers/staging/mei/mei.txt b/drivers/staging/mei/mei.txt
index 17302ad2531f..516bfe7319a6 100644
--- a/drivers/staging/mei/mei.txt
+++ b/drivers/staging/mei/mei.txt
@@ -1,78 +1,74 @@
-Intel MEI
+Intel(R) Management Engine Interface (Intel(R) MEI)
=======================
Introduction
=======================
-The Intel Management Engine (Intel ME) is an isolated and
-protected computing resource (Coprocessor) residing inside
-Intel chipsets. The Intel ME provides support for computer/IT
-management features.
-The Feature set depends on the Intel chipset SKU.
+The Intel Management Engine (Intel ME) is an isolated andprotected computing
+resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
+provides support for computer/IT management features. The feature set
+depends on the Intel chipset SKU.
-The Intel Management Engine Interface (Intel MEI, previously known
-as HECI) is the interface between the Host and Intel ME.
-This interface is exposed to the host as a PCI device.
-The Intel MEI Driver is in charge of the communication channel
-between a host application and the ME feature.
+The Intel Management Engine Interface (Intel MEI, previously known as HECI)
+is the interface between the Host and Intel ME. This interface is exposed
+to the host as a PCI device. The Intel MEI Driver is in charge of the
+communication channel between a host application and the Intel ME feature.
-Each Intel ME feature (Intel ME Client) is addressed by
-GUID/UUID and each feature defines its own protocol.
-The protocol is message-based with a header and payload up to
-512 bytes.
+Each Intel ME feature (Intel ME Client) is addressed by a GUID/UUID and
+each client has its own protocol. The protocol is message-based with a
+header and payload up to 512 bytes.
-[place holder to URL to protocol definitions]
-
-Prominent usage of the Interface is to communicate with
-Intel Active Management Technology (Intel AMT)
-implemented in firmware running on the Intel ME.
+Prominent usage of the Intel ME Interface is to communicate with Intel(R)
+Active Management Technology (Intel AMT)implemented in firmware running on
+the Intel ME.
Intel AMT provides the ability to manage a host remotely out-of-band (OOB)
-even when the host processor has crashed or is in a sleep state.
+even when the operating system running on the host processor has crashed or
+is in a sleep state.
Some examples of Intel AMT usage are:
- Monitoring hardware state and platform components
- - Remote power off/on (useful for green computing or overnight IT maintenance)
+ - Remote power off/on (useful for green computing or overnight IT
+ maintenance)
- OS updates
- Storage of useful platform information such as software assets
- - built-in hardware KVM
- - selective network isolation of Ethernet and IP protocol flows based on
- policies set by a remote management console
+ - Built-in hardware KVM
+ - Selective network isolation of Ethernet and IP protocol flows based
+ on policies set by a remote management console
- IDE device redirection from remote management console
Intel AMT (OOB) communication is based on SOAP (deprecated
-starting with Release 6.0) over HTTP/HTTPS or WS-Management protocol
-over HTTP and HTTPS that are received from a remote
-management console application.
+starting with Release 6.0) over HTTP/S or WS-Management protocol over
+HTTP/S that are received from a remote management console application.
For more information about Intel AMT:
-http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/aboutintelamt.htm
-
+http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
-MEI Driver
+Intel MEI Driver
=======================
-The driver exposes a character device called /dev/mei.
+The driver exposes a misc device called /dev/mei.
-An application maintains communication with an ME feature while
-/dev/mei is open. The binding to a specific features is performed
-by calling MEI_CONNECT_CLIENT_IOCTL, which passes the desired UUID.
-The number of instances of an ME feature that can be opened
-at the same time depends on the ME feature, but most of the
+An application maintains communication with an Intel ME feature while
+/dev/mei is open. The binding to a specific features is performed by calling
+MEI_CONNECT_CLIENT_IOCTL, which passes the desired UUID.
+The number of instances of an Intel ME feature that can be opened
+at the same time depends on the Intel ME feature, but most of the
features allow only a single instance.
-
-The Intel AMT Host Interface (AMTHI) feature requires multiple
-simultaneous user applications, therefore the MEI driver handles
+The Intel AMT Host Interface (Intel AMTHI) feature supports multiple
+simultaneous user applications. Therefore, the Intel MEI driver handles
this internally by maintaining request queues for the applications.
-The driver is oblivious to data that are passed between
+The driver is oblivious to data that is passed between firmware feature
+and host application.
-Because some of the ME features can change the system
-configuration, the driver by default allows only privileged
+Because some of the Intel ME features can change the system
+configuration, the driver by default allows only a privileged
user to access it.
-A Code snippet for application communicating with AMTHI client:
+A code snippet for an application communicating with
+Intel AMTHI client:
struct mei_connect_client_data data;
fd = open(MEI_DEVICE);
@@ -80,7 +76,7 @@ A Code snippet for application communicating with AMTHI client:
ioctl(fd, IOCTL_MEI_CONNECT_CLIENT, &data);
- printf(“Ver=%d, MaxLen=%ld\n”,
+ printf("Ver=%d, MaxLen=%ld\n",
data.d.in_client_uuid.protocol_version,
data.d.in_client_uuid.max_msg_length);
@@ -95,76 +91,106 @@ A Code snippet for application communicating with AMTHI client:
[...]
close(fd);
-ME Applications:
+IOCTL:
+======
+The Intel MEI Driver supports the following IOCTL command:
+ IOCTL_MEI_CONNECT_CLIENT Connect to firmware Feature (client).
+
+ usage:
+ struct mei_connect_client_data clientData;
+ ioctl(fd, IOCTL_MEI_CONNECT_CLIENT, &clientData);
+
+ inputs:
+ mei_connect_client_data struct contain the following
+ input field:
+
+ in_client_uuid - UUID of the FW Feature that needs
+ to connect to.
+ outputs:
+ out_client_properties - Client Properties: MTU and Protocol Version.
+
+ error returns:
+ EINVAL Wrong IOCTL Number
+ ENODEV Device or Connection is not initialized or ready.
+ (e.g. Wrong UUID)
+ ENOMEM Unable to allocate memory to client internal data.
+ EFAULT Fatal Error (e.g. Unable to access user input data)
+ EBUSY Connection Already Open
+
+ Notes:
+ max_msg_length (MTU) in client properties describes the maximum
+ data that can be sent or received. (e.g. if MTU=2K, can send
+ requests up to bytes 2k and received responses upto 2k bytes).
+
+Intel ME Applications:
==============
1) Intel Local Management Service (Intel LMS)
- Applications running locally on the platform communicate with
- Intel AMT Release 2.0 and later releases in the same way
- that network applications do via SOAP over HTTP (deprecated
- starting with Release 6.0) or with WS-Management over SOAP over
- HTTP. which means that some Intel AMT feature can be access
- from a local application using same Network interface as for
- remote application.
-
- When a local application sends a message addressed to the local
- Intel AMT host name, the Local Manageability Service (LMS),
- which listens for traffic directed to the host name, intercepts
- the message and routes it to the Intel Management Engine Interface.
+
+ Applications running locally on the platform communicate with Intel AMT Release
+ 2.0 and later releases in the same way that network applications do via SOAP
+ over HTTP (deprecated starting with Release 6.0) or with WS-Management over
+ SOAP over HTTP. This means that some Intel AMT features can be accessed from a
+ local application using the same network interface as a remote application
+ communicating with Intel AMT over the network.
+
+ When a local application sends a message addressed to the local Intel AMT host
+ name, the Intel LMS, which listens for traffic directed to the host name,
+ intercepts the message and routes it to the Intel MEI.
For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_
- Reference_Guide/WordDocuments/localaccess1.htm
-
- The LMS opens a connection using the MEI driver to the LMS
- FW feature using a defined UUID and then communicates with the
- feature using a protocol
- called Intel(R) AMT Port Forwarding Protocol (APF protocol).
- The protocol is used to maintain multiple sessions with
- Intel AMT from a single application.
- See the protocol specification in
- the Intel(R) AMT Implementation and Reference Guide
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/HTMLDocuments/MPSDocuments/Intel%20AMT%20Port%20Forwarding%20Protocol%20Reference%20Manual.pdf
-
- 2) Intel AMT Remote configuration using a Local Agent:
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "About Intel AMT" => "Local Access"
+
+ For downloading Intel LMS:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
+
+ The Intel LMS opens a connection using the Intel MEI driver to the Intel LMS
+ firmware feature using a defined UUID and then communicates with the feature
+ using a protocol called Intel AMT Port Forwarding Protocol(Intel APF protocol).
+ The protocol is used to maintain multiple sessions with Intel AMT from a
+ single application.
+
+ See the protocol specification in the Intel AMT Software Development Kit(SDK)
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "SDK Resources" => "Intel(R) vPro(TM) Gateway(MPS)"
+ => "Information for Intel(R) vPro(TM) Gateway Developers"
+ => "Description of the Intel AMT Port Forwarding (APF)Protocol"
+
+ 2) Intel AMT Remote configuration using a Local Agent
A Local Agent enables IT personnel to configure Intel AMT out-of-the-box
- without requiring installing additional data to enable setup.
- The remote configuration process may involve an ISV-developed remote
- configuration agent that runs on the host.
+ without requiring installing additional data to enable setup. The remote
+ configuration process may involve an ISV-developed remote configuration
+ agent that runs on the host.
For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/remoteconfigurationwithalocalagent.htm
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "Setup and Configuration of Intel AMT" =>
+ "SDK Tools Supporting Setup and Configuration" =>
+ "Using the Local Agent Sample"
+
+ An open source Intel AMT configuration utility, implementing a local agent
+ that accesses the Intel MEI driver, can be found here:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
- How the Local Agent Works (including Command structs):
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/howthelocalagentsampleworks.htm
Intel AMT OS Health Watchdog:
=============================
The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
Whenever the OS hangs or crashes, Intel AMT will send an event
-to whoever subscribed to this event. This mechanism means that
-IT knows when a platform crashes even when there is a hard failure
-on the host.
-The AMT Watchdog is composed of two parts:
- 1) FW Feature - that receives the heartbeats
- and sends an event when the heartbeats stop.
- 2) MEI driver – connects to the watchdog (WD) feature,
- configures the watchdog and sends the heartbeats.
-
-The MEI driver configures the Watchdog to expire by default
-every 120sec unless set by the user using module parameters.
-The Driver then sends heartbeats every 2sec.
+to any subsciber to this event. This mechanism means that
+IT knows when a platform crashes even when there is a hard failureon the host.
-If WD feature does not exist (i.e. the connection failed),
-the MEI driver will disable the sending of heartbeats.
+The Intel AMT Watchdog is composed of two parts:
+ 1) Firmware feature - receives the heartbeats
+ and sends an event when the heartbeats stop.
+ 2) Intel MEI driver - connects to the watchdog feature, configures the
+ watchdog and sends the heartbeats.
-Module Parameters
-=================
-watchdog_timeout - the user can use this module parameter
-to change the watchdog timeout setting.
+The Intel MEI driver uses the kernel watchdog to configure the Intel AMT
+Watchdog and to send heartbeats to it. The default timeout of the
+watchdog is 120 seconds.
-This value sets the Intel AMT watchdog timeout interval in seconds;
-the default value is 120sec.
-in order to disable the watchdog activites set the value to 0.
-Normal values should be between 120 and 65535
+If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
+the Intel MEI driver will disable the sending of heartbeats.
Supported Chipsets:
==================
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index af4b1af9eeac..82bacfc624c5 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -23,13 +23,6 @@
#include "hw.h"
/*
- * MEI Char Driver Minors
- */
-#define MEI_MINORS_BASE 1
-#define MEI_MINORS_COUNT 1
-#define MEI_MINOR_NUMBER 1
-
-/*
* watch dog definition
*/
#define MEI_WATCHDOG_DATA_SIZE 16
@@ -42,11 +35,6 @@
*/
extern struct pci_dev *mei_device;
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-extern struct watchdog_device amt_wd_dev;
/*
* AMTHI Client UUID
@@ -175,7 +163,6 @@ struct mei_cl {
struct mei_io_list {
struct mei_cl_cb mei_cb;
- int status;
};
/* MEI private device struct */
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index ffca7ca32658..8094941a98f1 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -35,12 +35,16 @@ const u8 mei_wd_state_independence_msg[3][4] = {
{0x07, 0x02, 0x01, 0x10}
};
+/*
+ * AMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
+
/* UUIDs for AMT F/W clients */
const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
0x9D, 0xA9, 0x15, 0x14, 0xCB,
0x32, 0xAB);
-
void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
{
dev_dbg(&dev->pdev->dev, "timeout=%d.\n", timeout);
@@ -331,14 +335,14 @@ static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, unsigned int t
/*
* Watchdog Device structs
*/
-const struct watchdog_ops wd_ops = {
+static const struct watchdog_ops wd_ops = {
.owner = THIS_MODULE,
.start = mei_wd_ops_start,
.stop = mei_wd_ops_stop,
.ping = mei_wd_ops_ping,
.set_timeout = mei_wd_ops_set_timeout,
};
-const struct watchdog_info wd_info = {
+static const struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
.options = WDIOF_KEEPALIVEPING,
};
@@ -352,3 +356,25 @@ struct watchdog_device amt_wd_dev = {
};
+void mei_watchdog_register(struct mei_device *dev)
+{
+ dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n", dev->wd_timeout);
+
+ dev->wd_due_counter = !!dev->wd_timeout;
+
+ if (watchdog_register_device(&amt_wd_dev)) {
+ dev_err(&dev->pdev->dev, "unable to register watchdog device.\n");
+ dev->wd_interface_reg = false;
+ } else {
+ dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
+ dev->wd_interface_reg = true;
+ }
+}
+
+void mei_watchdog_unregister(struct mei_device *dev)
+{
+ if (dev->wd_interface_reg)
+ watchdog_unregister_device(&amt_wd_dev);
+ dev->wd_interface_reg = false;
+}
+
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index e06b867d1e0c..fafdfa25e139 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -27,6 +27,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/list.h>
#include <linux/mfd/core.h>
#include <linux/mutex.h>
@@ -727,8 +729,24 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, nvec);
nvec->dev = &pdev->dev;
- nvec->gpio = pdata->gpio;
- nvec->i2c_addr = pdata->i2c_addr;
+
+ if (pdata) {
+ nvec->gpio = pdata->gpio;
+ nvec->i2c_addr = pdata->i2c_addr;
+ } else if (nvec->dev->of_node) {
+ nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
+ if (nvec->gpio < 0) {
+ dev_err(&pdev->dev, "no gpio specified");
+ goto failed;
+ }
+ if (of_property_read_u32(nvec->dev->of_node, "slave-addr", &nvec->i2c_addr)) {
+ dev_err(&pdev->dev, "no i2c address specified");
+ goto failed;
+ }
+ } else {
+ dev_err(&pdev->dev, "no platform data\n");
+ goto failed;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -893,6 +911,13 @@ static int tegra_nvec_resume(struct platform_device *pdev)
#define tegra_nvec_resume NULL
#endif
+/* Match table for of_platform binding */
+static const struct of_device_id nvidia_nvec_of_match[] __devinitconst = {
+ { .compatible = "nvidia,nvec", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
+
static struct platform_driver nvec_device_driver = {
.probe = tegra_nvec_probe,
.remove = __devexit_p(tegra_nvec_remove),
@@ -901,6 +926,7 @@ static struct platform_driver nvec_device_driver = {
.driver = {
.name = "nvec",
.owner = THIS_MODULE,
+ .of_match_table = nvidia_nvec_of_match,
}
};
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index fc850bac88c1..9012dee0c348 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -20,9 +20,4 @@ octeon-ethernet-y += ethernet-sgmii.o
octeon-ethernet-y += ethernet-spi.o
octeon-ethernet-y += ethernet-tx.o
octeon-ethernet-y += ethernet-xaui.o
-octeon-ethernet-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
- cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \
- cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \
- cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \
- cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o
diff --git a/drivers/staging/octeon/cvmx-address.h b/drivers/staging/octeon/cvmx-address.h
deleted file mode 100644
index 3c74d826e2e6..000000000000
--- a/drivers/staging/octeon/cvmx-address.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2009 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * Typedefs and defines for working with Octeon physical addresses.
- *
- */
-#ifndef __CVMX_ADDRESS_H__
-#define __CVMX_ADDRESS_H__
-
-#if 0
-typedef enum {
- CVMX_MIPS_SPACE_XKSEG = 3LL,
- CVMX_MIPS_SPACE_XKPHYS = 2LL,
- CVMX_MIPS_SPACE_XSSEG = 1LL,
- CVMX_MIPS_SPACE_XUSEG = 0LL
-} cvmx_mips_space_t;
-#endif
-
-typedef enum {
- CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
- CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
- CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
- CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
-} cvmx_mips_xkseg_space_t;
-
-/* decodes <14:13> of a kseg3 window address */
-typedef enum {
- CVMX_ADD_WIN_SCR = 0L,
- /* see cvmx_add_win_dma_dec_t for further decode */
- CVMX_ADD_WIN_DMA = 1L,
- CVMX_ADD_WIN_UNUSED = 2L,
- CVMX_ADD_WIN_UNUSED2 = 3L
-} cvmx_add_win_dec_t;
-
-/* decode within DMA space */
-typedef enum {
- /*
- * Add store data to the write buffer entry, allocating it if
- * necessary.
- */
- CVMX_ADD_WIN_DMA_ADD = 0L,
- /* send out the write buffer entry to DRAM */
- CVMX_ADD_WIN_DMA_SENDMEM = 1L,
- /* store data must be normal DRAM memory space address in this case */
- /* send out the write buffer entry as an IOBDMA command */
- CVMX_ADD_WIN_DMA_SENDDMA = 2L,
- /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
- /* send out the write buffer entry as an IO write */
- CVMX_ADD_WIN_DMA_SENDIO = 3L,
- /* store data must be normal IO space address in this case */
- /* send out a single-tick command on the NCB bus */
- CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
- /* no write buffer data needed/used */
-} cvmx_add_win_dma_dec_t;
-
-/*
- * Physical Address Decode
- *
- * Octeon-I HW never interprets this X (<39:36> reserved
- * for future expansion), software should set to 0.
- *
- * - 0x0 XXX0 0000 0000 to DRAM Cached
- * - 0x0 XXX0 0FFF FFFF
- *
- * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
- * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
- *
- * - 0x0 XXX0 2000 0000 to DRAM Cached
- * - 0x0 XXXF FFFF FFFF
- *
- * - 0x1 00X0 0000 0000 to Boot Bus Uncached
- * - 0x1 00XF FFFF FFFF
- *
- * - 0x1 01X0 0000 0000 to Other NCB Uncached
- * - 0x1 FFXF FFFF FFFF devices
- *
- * Decode of all Octeon addresses
- */
-typedef union {
-
- uint64_t u64;
- /* mapped or unmapped virtual address */
- struct {
- uint64_t R:2;
- uint64_t offset:62;
- } sva;
-
- /* mapped USEG virtual addresses (typically) */
- struct {
- uint64_t zeroes:33;
- uint64_t offset:31;
- } suseg;
-
- /* mapped or unmapped virtual address */
- struct {
- uint64_t ones:33;
- uint64_t sp:2;
- uint64_t offset:29;
- } sxkseg;
-
- /*
- * physical address accessed through xkphys unmapped virtual
- * address.
- */
- struct {
- uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
- uint64_t cca:3; /* ignored by octeon */
- uint64_t mbz:10;
- uint64_t pa:49; /* physical address */
- } sxkphys;
-
- /* physical address */
- struct {
- uint64_t mbz:15;
- /* if set, the address is uncached and resides on MCB bus */
- uint64_t is_io:1;
- /*
- * the hardware ignores this field when is_io==0, else
- * device ID.
- */
- uint64_t did:8;
- /* the hardware ignores <39:36> in Octeon I */
- uint64_t unaddr:4;
- uint64_t offset:36;
- } sphys;
-
- /* physical mem address */
- struct {
- /* techically, <47:40> are dont-cares */
- uint64_t zeroes:24;
- /* the hardware ignores <39:36> in Octeon I */
- uint64_t unaddr:4;
- uint64_t offset:36;
- } smem;
-
- /* physical IO address */
- struct {
- uint64_t mem_region:2;
- uint64_t mbz:13;
- /* 1 in this case */
- uint64_t is_io:1;
- /*
- * The hardware ignores this field when is_io==0, else
- * device ID.
- */
- uint64_t did:8;
- /* the hardware ignores <39:36> in Octeon I */
- uint64_t unaddr:4;
- uint64_t offset:36;
- } sio;
-
- /*
- * Scratchpad virtual address - accessed through a window at
- * the end of kseg3
- */
- struct {
- uint64_t ones:49;
- /* CVMX_ADD_WIN_SCR (0) in this case */
- cvmx_add_win_dec_t csrdec:2;
- uint64_t addr:13;
- } sscr;
-
- /* there should only be stores to IOBDMA space, no loads */
- /*
- * IOBDMA virtual address - accessed through a window at the
- * end of kseg3
- */
- struct {
- uint64_t ones:49;
- uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */
- uint64_t unused2:3;
- uint64_t type:3;
- uint64_t addr:7;
- } sdma;
-
- struct {
- uint64_t didspace:24;
- uint64_t unused:40;
- } sfilldidspace;
-
-} cvmx_addr_t;
-
-/* These macros for used by 32 bit applications */
-
-#define CVMX_MIPS32_SPACE_KSEG0 1l
-#define CVMX_ADD_SEG32(segment, add) \
- (((int32_t)segment << 31) | (int32_t)(add))
-
-/*
- * Currently all IOs are performed using XKPHYS addressing. Linux uses
- * the CvmMemCtl register to enable XKPHYS addressing to IO space from
- * user mode. Future OSes may need to change the upper bits of IO
- * addresses. The following define controls the upper two bits for all
- * IO addresses generated by the simple executive library.
- */
-#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
-
-/* These macros simplify the process of creating common IO addresses */
-#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
-#ifndef CVMX_ADD_IO_SEG
-#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
-#endif
-#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
-#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
-#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
-
- /* from include/ncb_rsl_id.v */
-#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
-#define CVMX_OCT_DID_GMX0 1ULL
-#define CVMX_OCT_DID_GMX1 2ULL
-#define CVMX_OCT_DID_PCI 3ULL
-#define CVMX_OCT_DID_KEY 4ULL
-#define CVMX_OCT_DID_FPA 5ULL
-#define CVMX_OCT_DID_DFA 6ULL
-#define CVMX_OCT_DID_ZIP 7ULL
-#define CVMX_OCT_DID_RNG 8ULL
-#define CVMX_OCT_DID_IPD 9ULL
-#define CVMX_OCT_DID_PKT 10ULL
-#define CVMX_OCT_DID_TIM 11ULL
-#define CVMX_OCT_DID_TAG 12ULL
- /* the rest are not on the IO bus */
-#define CVMX_OCT_DID_L2C 16ULL
-#define CVMX_OCT_DID_LMC 17ULL
-#define CVMX_OCT_DID_SPX0 18ULL
-#define CVMX_OCT_DID_SPX1 19ULL
-#define CVMX_OCT_DID_PIP 20ULL
-#define CVMX_OCT_DID_ASX0 22ULL
-#define CVMX_OCT_DID_ASX1 23ULL
-#define CVMX_OCT_DID_IOB 30ULL
-
-#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
-#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
-#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
-#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
-#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
-#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
-#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
-#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
-#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
-#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
-#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
-#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
-#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
-#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
-#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
-#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
-#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
-
-#endif /* __CVMX_ADDRESS_H__ */
diff --git a/drivers/staging/octeon/cvmx-asxx-defs.h b/drivers/staging/octeon/cvmx-asxx-defs.h
deleted file mode 100644
index 91415a85e8d2..000000000000
--- a/drivers/staging/octeon/cvmx-asxx-defs.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_ASXX_DEFS_H__
-#define __CVMX_ASXX_DEFS_H__
-
-#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000180ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000188ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_ASXX_INT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000018ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000010ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_MII_RX_DAT_SET(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000190ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_ASXX_PRT_LOOP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000040ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_BYPASS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000248ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000250ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_COMP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000220ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_DATA_DRV(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000218ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000210ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000230ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000240ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000228ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000238ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_SETTING(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000258ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000020ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_PRT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000000ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000100ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL_MSK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000108ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL_POWOK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000118ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL_SIG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000110ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000048ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_COMP_BYP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000068ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000080ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_PRT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000008ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_asxx_gmii_rx_clk_set {
- uint64_t u64;
- struct cvmx_asxx_gmii_rx_clk_set_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
- struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
- struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
-};
-
-union cvmx_asxx_gmii_rx_dat_set {
- uint64_t u64;
- struct cvmx_asxx_gmii_rx_dat_set_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
- struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
- struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
-};
-
-union cvmx_asxx_int_en {
- uint64_t u64;
- struct cvmx_asxx_int_en_s {
- uint64_t reserved_12_63:52;
- uint64_t txpsh:4;
- uint64_t txpop:4;
- uint64_t ovrflw:4;
- } s;
- struct cvmx_asxx_int_en_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t txpsh:3;
- uint64_t reserved_7_7:1;
- uint64_t txpop:3;
- uint64_t reserved_3_3:1;
- uint64_t ovrflw:3;
- } cn30xx;
- struct cvmx_asxx_int_en_cn30xx cn31xx;
- struct cvmx_asxx_int_en_s cn38xx;
- struct cvmx_asxx_int_en_s cn38xxp2;
- struct cvmx_asxx_int_en_cn30xx cn50xx;
- struct cvmx_asxx_int_en_s cn58xx;
- struct cvmx_asxx_int_en_s cn58xxp1;
-};
-
-union cvmx_asxx_int_reg {
- uint64_t u64;
- struct cvmx_asxx_int_reg_s {
- uint64_t reserved_12_63:52;
- uint64_t txpsh:4;
- uint64_t txpop:4;
- uint64_t ovrflw:4;
- } s;
- struct cvmx_asxx_int_reg_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t txpsh:3;
- uint64_t reserved_7_7:1;
- uint64_t txpop:3;
- uint64_t reserved_3_3:1;
- uint64_t ovrflw:3;
- } cn30xx;
- struct cvmx_asxx_int_reg_cn30xx cn31xx;
- struct cvmx_asxx_int_reg_s cn38xx;
- struct cvmx_asxx_int_reg_s cn38xxp2;
- struct cvmx_asxx_int_reg_cn30xx cn50xx;
- struct cvmx_asxx_int_reg_s cn58xx;
- struct cvmx_asxx_int_reg_s cn58xxp1;
-};
-
-union cvmx_asxx_mii_rx_dat_set {
- uint64_t u64;
- struct cvmx_asxx_mii_rx_dat_set_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
- struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
-};
-
-union cvmx_asxx_prt_loop {
- uint64_t u64;
- struct cvmx_asxx_prt_loop_s {
- uint64_t reserved_8_63:56;
- uint64_t ext_loop:4;
- uint64_t int_loop:4;
- } s;
- struct cvmx_asxx_prt_loop_cn30xx {
- uint64_t reserved_7_63:57;
- uint64_t ext_loop:3;
- uint64_t reserved_3_3:1;
- uint64_t int_loop:3;
- } cn30xx;
- struct cvmx_asxx_prt_loop_cn30xx cn31xx;
- struct cvmx_asxx_prt_loop_s cn38xx;
- struct cvmx_asxx_prt_loop_s cn38xxp2;
- struct cvmx_asxx_prt_loop_cn30xx cn50xx;
- struct cvmx_asxx_prt_loop_s cn58xx;
- struct cvmx_asxx_prt_loop_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_bypass {
- uint64_t u64;
- struct cvmx_asxx_rld_bypass_s {
- uint64_t reserved_1_63:63;
- uint64_t bypass:1;
- } s;
- struct cvmx_asxx_rld_bypass_s cn38xx;
- struct cvmx_asxx_rld_bypass_s cn38xxp2;
- struct cvmx_asxx_rld_bypass_s cn58xx;
- struct cvmx_asxx_rld_bypass_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_bypass_setting {
- uint64_t u64;
- struct cvmx_asxx_rld_bypass_setting_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_rld_bypass_setting_s cn38xx;
- struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
- struct cvmx_asxx_rld_bypass_setting_s cn58xx;
- struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_comp {
- uint64_t u64;
- struct cvmx_asxx_rld_comp_s {
- uint64_t reserved_9_63:55;
- uint64_t pctl:5;
- uint64_t nctl:4;
- } s;
- struct cvmx_asxx_rld_comp_cn38xx {
- uint64_t reserved_8_63:56;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } cn38xx;
- struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
- struct cvmx_asxx_rld_comp_s cn58xx;
- struct cvmx_asxx_rld_comp_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_data_drv {
- uint64_t u64;
- struct cvmx_asxx_rld_data_drv_s {
- uint64_t reserved_8_63:56;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } s;
- struct cvmx_asxx_rld_data_drv_s cn38xx;
- struct cvmx_asxx_rld_data_drv_s cn38xxp2;
- struct cvmx_asxx_rld_data_drv_s cn58xx;
- struct cvmx_asxx_rld_data_drv_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_fcram_mode {
- uint64_t u64;
- struct cvmx_asxx_rld_fcram_mode_s {
- uint64_t reserved_1_63:63;
- uint64_t mode:1;
- } s;
- struct cvmx_asxx_rld_fcram_mode_s cn38xx;
- struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
-};
-
-union cvmx_asxx_rld_nctl_strong {
- uint64_t u64;
- struct cvmx_asxx_rld_nctl_strong_s {
- uint64_t reserved_5_63:59;
- uint64_t nctl:5;
- } s;
- struct cvmx_asxx_rld_nctl_strong_s cn38xx;
- struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
- struct cvmx_asxx_rld_nctl_strong_s cn58xx;
- struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_nctl_weak {
- uint64_t u64;
- struct cvmx_asxx_rld_nctl_weak_s {
- uint64_t reserved_5_63:59;
- uint64_t nctl:5;
- } s;
- struct cvmx_asxx_rld_nctl_weak_s cn38xx;
- struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
- struct cvmx_asxx_rld_nctl_weak_s cn58xx;
- struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_pctl_strong {
- uint64_t u64;
- struct cvmx_asxx_rld_pctl_strong_s {
- uint64_t reserved_5_63:59;
- uint64_t pctl:5;
- } s;
- struct cvmx_asxx_rld_pctl_strong_s cn38xx;
- struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
- struct cvmx_asxx_rld_pctl_strong_s cn58xx;
- struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_pctl_weak {
- uint64_t u64;
- struct cvmx_asxx_rld_pctl_weak_s {
- uint64_t reserved_5_63:59;
- uint64_t pctl:5;
- } s;
- struct cvmx_asxx_rld_pctl_weak_s cn38xx;
- struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
- struct cvmx_asxx_rld_pctl_weak_s cn58xx;
- struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_setting {
- uint64_t u64;
- struct cvmx_asxx_rld_setting_s {
- uint64_t reserved_13_63:51;
- uint64_t dfaset:5;
- uint64_t dfalag:1;
- uint64_t dfalead:1;
- uint64_t dfalock:1;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_rld_setting_cn38xx {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } cn38xx;
- struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
- struct cvmx_asxx_rld_setting_s cn58xx;
- struct cvmx_asxx_rld_setting_s cn58xxp1;
-};
-
-union cvmx_asxx_rx_clk_setx {
- uint64_t u64;
- struct cvmx_asxx_rx_clk_setx_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_rx_clk_setx_s cn30xx;
- struct cvmx_asxx_rx_clk_setx_s cn31xx;
- struct cvmx_asxx_rx_clk_setx_s cn38xx;
- struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
- struct cvmx_asxx_rx_clk_setx_s cn50xx;
- struct cvmx_asxx_rx_clk_setx_s cn58xx;
- struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
-};
-
-union cvmx_asxx_rx_prt_en {
- uint64_t u64;
- struct cvmx_asxx_rx_prt_en_s {
- uint64_t reserved_4_63:60;
- uint64_t prt_en:4;
- } s;
- struct cvmx_asxx_rx_prt_en_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t prt_en:3;
- } cn30xx;
- struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
- struct cvmx_asxx_rx_prt_en_s cn38xx;
- struct cvmx_asxx_rx_prt_en_s cn38xxp2;
- struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
- struct cvmx_asxx_rx_prt_en_s cn58xx;
- struct cvmx_asxx_rx_prt_en_s cn58xxp1;
-};
-
-union cvmx_asxx_rx_wol {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_s {
- uint64_t reserved_2_63:62;
- uint64_t status:1;
- uint64_t enable:1;
- } s;
- struct cvmx_asxx_rx_wol_s cn38xx;
- struct cvmx_asxx_rx_wol_s cn38xxp2;
-};
-
-union cvmx_asxx_rx_wol_msk {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_msk_s {
- uint64_t msk:64;
- } s;
- struct cvmx_asxx_rx_wol_msk_s cn38xx;
- struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
-};
-
-union cvmx_asxx_rx_wol_powok {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_powok_s {
- uint64_t reserved_1_63:63;
- uint64_t powerok:1;
- } s;
- struct cvmx_asxx_rx_wol_powok_s cn38xx;
- struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
-};
-
-union cvmx_asxx_rx_wol_sig {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_sig_s {
- uint64_t reserved_32_63:32;
- uint64_t sig:32;
- } s;
- struct cvmx_asxx_rx_wol_sig_s cn38xx;
- struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
-};
-
-union cvmx_asxx_tx_clk_setx {
- uint64_t u64;
- struct cvmx_asxx_tx_clk_setx_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_tx_clk_setx_s cn30xx;
- struct cvmx_asxx_tx_clk_setx_s cn31xx;
- struct cvmx_asxx_tx_clk_setx_s cn38xx;
- struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
- struct cvmx_asxx_tx_clk_setx_s cn50xx;
- struct cvmx_asxx_tx_clk_setx_s cn58xx;
- struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
-};
-
-union cvmx_asxx_tx_comp_byp {
- uint64_t u64;
- struct cvmx_asxx_tx_comp_byp_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_asxx_tx_comp_byp_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t bypass:1;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } cn30xx;
- struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
- struct cvmx_asxx_tx_comp_byp_cn38xx {
- uint64_t reserved_8_63:56;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } cn38xx;
- struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
- struct cvmx_asxx_tx_comp_byp_cn50xx {
- uint64_t reserved_17_63:47;
- uint64_t bypass:1;
- uint64_t reserved_13_15:3;
- uint64_t pctl:5;
- uint64_t reserved_5_7:3;
- uint64_t nctl:5;
- } cn50xx;
- struct cvmx_asxx_tx_comp_byp_cn58xx {
- uint64_t reserved_13_63:51;
- uint64_t pctl:5;
- uint64_t reserved_5_7:3;
- uint64_t nctl:5;
- } cn58xx;
- struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
-};
-
-union cvmx_asxx_tx_hi_waterx {
- uint64_t u64;
- struct cvmx_asxx_tx_hi_waterx_s {
- uint64_t reserved_4_63:60;
- uint64_t mark:4;
- } s;
- struct cvmx_asxx_tx_hi_waterx_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t mark:3;
- } cn30xx;
- struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
- struct cvmx_asxx_tx_hi_waterx_s cn38xx;
- struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
- struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
- struct cvmx_asxx_tx_hi_waterx_s cn58xx;
- struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
-};
-
-union cvmx_asxx_tx_prt_en {
- uint64_t u64;
- struct cvmx_asxx_tx_prt_en_s {
- uint64_t reserved_4_63:60;
- uint64_t prt_en:4;
- } s;
- struct cvmx_asxx_tx_prt_en_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t prt_en:3;
- } cn30xx;
- struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
- struct cvmx_asxx_tx_prt_en_s cn38xx;
- struct cvmx_asxx_tx_prt_en_s cn38xxp2;
- struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
- struct cvmx_asxx_tx_prt_en_s cn58xx;
- struct cvmx_asxx_tx_prt_en_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.c b/drivers/staging/octeon/cvmx-cmd-queue.c
deleted file mode 100644
index e9809d375162..000000000000
--- a/drivers/staging/octeon/cvmx-cmd-queue.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Support functions for managing command queues used for
- * various hardware blocks.
- */
-
-#include <linux/kernel.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-#include "cvmx-fpa.h"
-#include "cvmx-cmd-queue.h"
-
-#include <asm/octeon/cvmx-npei-defs.h>
-#include <asm/octeon/cvmx-pexp-defs.h>
-#include "cvmx-pko-defs.h"
-
-/**
- * This application uses this pointer to access the global queue
- * state. It points to a bootmem named block.
- */
-__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
-
-/**
- * Initialize the Global queue state pointer.
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
-{
- char *alloc_name = "cvmx_cmd_queues";
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
- extern uint64_t octeon_reserve32_memory;
-#endif
-
- if (likely(__cvmx_cmd_queue_state_ptr))
- return CVMX_CMD_QUEUE_SUCCESS;
-
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
- if (octeon_reserve32_memory)
- __cvmx_cmd_queue_state_ptr =
- cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
- octeon_reserve32_memory,
- octeon_reserve32_memory +
- (CONFIG_CAVIUM_RESERVE32 <<
- 20) - 1, 128, alloc_name);
- else
-#endif
- __cvmx_cmd_queue_state_ptr =
- cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
- 128,
- alloc_name);
- if (__cvmx_cmd_queue_state_ptr)
- memset(__cvmx_cmd_queue_state_ptr, 0,
- sizeof(*__cvmx_cmd_queue_state_ptr));
- else {
- struct cvmx_bootmem_named_block_desc *block_desc =
- cvmx_bootmem_find_named_block(alloc_name);
- if (block_desc)
- __cvmx_cmd_queue_state_ptr =
- cvmx_phys_to_ptr(block_desc->base_addr);
- else {
- cvmx_dprintf
- ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
- alloc_name);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- }
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Initialize a command queue for use. The initial FPA buffer is
- * allocated and the hardware unit is configured to point to the
- * new command queue.
- *
- * @queue_id: Hardware command queue to initialize.
- * @max_depth: Maximum outstanding commands that can be queued.
- * @fpa_pool: FPA pool the command queues should come from.
- * @pool_size: Size of each buffer in the FPA pool (bytes)
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
- int max_depth, int fpa_pool,
- int pool_size)
-{
- __cvmx_cmd_queue_state_t *qstate;
- cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
- if (result != CVMX_CMD_QUEUE_SUCCESS)
- return result;
-
- qstate = __cvmx_cmd_queue_get_state(queue_id);
- if (qstate == NULL)
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-
- /*
- * We artificially limit max_depth to 1<<20 words. It is an
- * arbitrary limit.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
- if ((max_depth < 0) || (max_depth > 1 << 20))
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- } else if (max_depth != 0)
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-
- if ((fpa_pool < 0) || (fpa_pool > 7))
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- if ((pool_size < 128) || (pool_size > 65536))
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-
- /* See if someone else has already initialized the queue */
- if (qstate->base_ptr_div128) {
- if (max_depth != (int)qstate->max_depth) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initialized with different "
- "max_depth (%d).\n",
- (int)qstate->max_depth);
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- if (fpa_pool != qstate->fpa_pool) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initialized with different "
- "FPA pool (%u).\n",
- qstate->fpa_pool);
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initialized with different "
- "FPA pool size (%u).\n",
- (qstate->pool_size_m1 + 1) << 3);
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- CVMX_SYNCWS;
- return CVMX_CMD_QUEUE_ALREADY_SETUP;
- } else {
- union cvmx_fpa_ctl_status status;
- void *buffer;
-
- status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
- if (!status.s.enb) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "FPA is not enabled.\n");
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- buffer = cvmx_fpa_alloc(fpa_pool);
- if (buffer == NULL) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Unable to allocate initial buffer.\n");
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
-
- memset(qstate, 0, sizeof(*qstate));
- qstate->max_depth = max_depth;
- qstate->fpa_pool = fpa_pool;
- qstate->pool_size_m1 = (pool_size >> 3) - 1;
- qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
- /*
- * We zeroed the now serving field so we need to also
- * zero the ticket.
- */
- __cvmx_cmd_queue_state_ptr->
- ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
- CVMX_SYNCWS;
- return CVMX_CMD_QUEUE_SUCCESS;
- }
-}
-
-/**
- * Shutdown a queue a free it's command buffers to the FPA. The
- * hardware connected to the queue must be stopped before this
- * function is called.
- *
- * @queue_id: Queue to shutdown
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
- if (qptr == NULL) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
- "get queue information.\n");
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
-
- if (cvmx_cmd_queue_length(queue_id) > 0) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
- "has data in it.\n");
- return CVMX_CMD_QUEUE_FULL;
- }
-
- __cvmx_cmd_queue_lock(queue_id, qptr);
- if (qptr->base_ptr_div128) {
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) qptr->base_ptr_div128 << 7),
- qptr->fpa_pool, 0);
- qptr->base_ptr_div128 = 0;
- }
- __cvmx_cmd_queue_unlock(qptr);
-
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Return the number of command words pending in the queue. This
- * function may be relatively slow for some hardware units.
- *
- * @queue_id: Hardware command queue to query
- *
- * Returns Number of outstanding commands
- */
-int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
-{
- if (CVMX_ENABLE_PARAMETER_CHECKING) {
- if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
-
- /*
- * The cast is here so gcc with check that all values in the
- * cvmx_cmd_queue_id_t enumeration are here.
- */
- switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
- case CVMX_CMD_QUEUE_PKO_BASE:
- /*
- * FIXME: Need atomic lock on
- * CVMX_PKO_REG_READ_IDX. Right now we are normally
- * called with the queue lock, so that is a SLIGHT
- * amount of protection.
- */
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pko_mem_debug9 debug9;
- debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
- return debug9.cn38xx.doorbell;
- } else {
- union cvmx_pko_mem_debug8 debug8;
- debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- return debug8.cn58xx.doorbell;
- }
- case CVMX_CMD_QUEUE_ZIP:
- case CVMX_CMD_QUEUE_DFA:
- case CVMX_CMD_QUEUE_RAID:
- /* FIXME: Implement other lengths */
- return 0;
- case CVMX_CMD_QUEUE_DMA_BASE:
- {
- union cvmx_npei_dmax_counts dmax_counts;
- dmax_counts.u64 =
- cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
- (queue_id & 0x7));
- return dmax_counts.s.dbell;
- }
- case CVMX_CMD_QUEUE_END:
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-}
-
-/**
- * Return the command buffer to be written to. The purpose of this
- * function is to allow CVMX routine access t othe low level buffer
- * for initial hardware setup. User applications should not call this
- * function directly.
- *
- * @queue_id: Command queue to query
- *
- * Returns Command buffer or NULL on failure
- */
-void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
- if (qptr && qptr->base_ptr_div128)
- return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
- else
- return NULL;
-}
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.h b/drivers/staging/octeon/cvmx-cmd-queue.h
deleted file mode 100644
index 614653b686a0..000000000000
--- a/drivers/staging/octeon/cvmx-cmd-queue.h
+++ /dev/null
@@ -1,617 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Support functions for managing command queues used for
- * various hardware blocks.
- *
- * The common command queue infrastructure abstracts out the
- * software necessary for adding to Octeon's chained queue
- * structures. These structures are used for commands to the
- * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
- * hardware unit takes commands and CSRs of different types,
- * they all use basic linked command buffers to store the
- * pending request. In general, users of the CVMX API don't
- * call cvmx-cmd-queue functions directly. Instead the hardware
- * unit specific wrapper should be used. The wrappers perform
- * unit specific validation and CSR writes to submit the
- * commands.
- *
- * Even though most software will never directly interact with
- * cvmx-cmd-queue, knowledge of its internal working can help
- * in diagnosing performance problems and help with debugging.
- *
- * Command queue pointers are stored in a global named block
- * called "cvmx_cmd_queues". Except for the PKO queues, each
- * hardware queue is stored in its own cache line to reduce SMP
- * contention on spin locks. The PKO queues are stored such that
- * every 16th queue is next to each other in memory. This scheme
- * allows for queues being in separate cache lines when there
- * are low number of queues per port. With 16 queues per port,
- * the first queue for each port is in the same cache area. The
- * second queues for each port are in another area, etc. This
- * allows software to implement very efficient lockless PKO with
- * 16 queues per port using a minimum of cache lines per core.
- * All queues for a given core will be isolated in the same
- * cache area.
- *
- * In addition to the memory pointer layout, cvmx-cmd-queue
- * provides an optimized fair ll/sc locking mechanism for the
- * queues. The lock uses a "ticket / now serving" model to
- * maintain fair order on contended locks. In addition, it uses
- * predicted locking time to limit cache contention. When a core
- * know it must wait in line for a lock, it spins on the
- * internal cycle counter to completely eliminate any causes of
- * bus traffic.
- *
- */
-
-#ifndef __CVMX_CMD_QUEUE_H__
-#define __CVMX_CMD_QUEUE_H__
-
-#include <linux/prefetch.h>
-
-#include "cvmx-fpa.h"
-/**
- * By default we disable the max depth support. Most programs
- * don't use it and it slows down the command queue processing
- * significantly.
- */
-#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
-#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
-#endif
-
-/**
- * Enumeration representing all hardware blocks that use command
- * queues. Each hardware block has up to 65536 sub identifiers for
- * multiple command queues. Not all chips support all hardware
- * units.
- */
-typedef enum {
- CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
-
-#define CVMX_CMD_QUEUE_PKO(queue) \
- ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
-
- CVMX_CMD_QUEUE_ZIP = 0x10000,
- CVMX_CMD_QUEUE_DFA = 0x20000,
- CVMX_CMD_QUEUE_RAID = 0x30000,
- CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
-
-#define CVMX_CMD_QUEUE_DMA(queue) \
- ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
-
- CVMX_CMD_QUEUE_END = 0x50000,
-} cvmx_cmd_queue_id_t;
-
-/**
- * Command write operations can fail if the command queue needs
- * a new buffer and the associated FPA pool is empty. It can also
- * fail if the number of queued command words reaches the maximum
- * set at initialization.
- */
-typedef enum {
- CVMX_CMD_QUEUE_SUCCESS = 0,
- CVMX_CMD_QUEUE_NO_MEMORY = -1,
- CVMX_CMD_QUEUE_FULL = -2,
- CVMX_CMD_QUEUE_INVALID_PARAM = -3,
- CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
-} cvmx_cmd_queue_result_t;
-
-typedef struct {
- /* You have lock when this is your ticket */
- uint8_t now_serving;
- uint64_t unused1:24;
- /* Maximum outstanding command words */
- uint32_t max_depth;
- /* FPA pool buffers come from */
- uint64_t fpa_pool:3;
- /* Top of command buffer pointer shifted 7 */
- uint64_t base_ptr_div128:29;
- uint64_t unused2:6;
- /* FPA buffer size in 64bit words minus 1 */
- uint64_t pool_size_m1:13;
- /* Number of commands already used in buffer */
- uint64_t index:13;
-} __cvmx_cmd_queue_state_t;
-
-/**
- * This structure contains the global state of all command queues.
- * It is stored in a bootmem named block and shared by all
- * applications running on Octeon. Tickets are stored in a differnet
- * cahce line that queue information to reduce the contention on the
- * ll/sc used to get a ticket. If this is not the case, the update
- * of queue state causes the ll/sc to fail quite often.
- */
-typedef struct {
- uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
- __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
-} __cvmx_cmd_queue_all_state_t;
-
-/**
- * Initialize a command queue for use. The initial FPA buffer is
- * allocated and the hardware unit is configured to point to the
- * new command queue.
- *
- * @queue_id: Hardware command queue to initialize.
- * @max_depth: Maximum outstanding commands that can be queued.
- * @fpa_pool: FPA pool the command queues should come from.
- * @pool_size: Size of each buffer in the FPA pool (bytes)
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
- int max_depth, int fpa_pool,
- int pool_size);
-
-/**
- * Shutdown a queue a free it's command buffers to the FPA. The
- * hardware connected to the queue must be stopped before this
- * function is called.
- *
- * @queue_id: Queue to shutdown
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
-
-/**
- * Return the number of command words pending in the queue. This
- * function may be relatively slow for some hardware units.
- *
- * @queue_id: Hardware command queue to query
- *
- * Returns Number of outstanding commands
- */
-int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
-
-/**
- * Return the command buffer to be written to. The purpose of this
- * function is to allow CVMX routine access t othe low level buffer
- * for initial hardware setup. User applications should not call this
- * function directly.
- *
- * @queue_id: Command queue to query
- *
- * Returns Command buffer or NULL on failure
- */
-void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
-
-/**
- * Get the index into the state arrays for the supplied queue id.
- *
- * @queue_id: Queue ID to get an index for
- *
- * Returns Index into the state arrays
- */
-static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
-{
- /*
- * Warning: This code currently only works with devices that
- * have 256 queues or less. Devices with more than 16 queues
- * are laid out in memory to allow cores quick access to
- * every 16th queue. This reduces cache thrashing when you are
- * running 16 queues per port to support lockless operation.
- */
- int unit = queue_id >> 16;
- int q = (queue_id >> 4) & 0xf;
- int core = queue_id & 0xf;
- return unit * 256 + core * 16 + q;
-}
-
-/**
- * Lock the supplied queue so nobody else is updating it at the same
- * time as us.
- *
- * @queue_id: Queue ID to lock
- * @qptr: Pointer to the queue's global state
- */
-static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
- __cvmx_cmd_queue_state_t *qptr)
-{
- extern __cvmx_cmd_queue_all_state_t
- *__cvmx_cmd_queue_state_ptr;
- int tmp;
- int my_ticket;
- prefetch(qptr);
- asm volatile (
- ".set push\n"
- ".set noreorder\n"
- "1:\n"
- /* Atomic add one to ticket_ptr */
- "ll %[my_ticket], %[ticket_ptr]\n"
- /* and store the original value */
- "li %[ticket], 1\n"
- /* in my_ticket */
- "baddu %[ticket], %[my_ticket]\n"
- "sc %[ticket], %[ticket_ptr]\n"
- "beqz %[ticket], 1b\n"
- " nop\n"
- /* Load the current now_serving ticket */
- "lbu %[ticket], %[now_serving]\n"
- "2:\n"
- /* Jump out if now_serving == my_ticket */
- "beq %[ticket], %[my_ticket], 4f\n"
- /* Find out how many tickets are in front of me */
- " subu %[ticket], %[my_ticket], %[ticket]\n"
- /* Use tickets in front of me minus one to delay */
- "subu %[ticket], 1\n"
- /* Delay will be ((tickets in front)-1)*32 loops */
- "cins %[ticket], %[ticket], 5, 7\n"
- "3:\n"
- /* Loop here until our ticket might be up */
- "bnez %[ticket], 3b\n"
- " subu %[ticket], 1\n"
- /* Jump back up to check out ticket again */
- "b 2b\n"
- /* Load the current now_serving ticket */
- " lbu %[ticket], %[now_serving]\n"
- "4:\n"
- ".set pop\n" :
- [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
- [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
- [my_ticket] "=r"(my_ticket)
- );
-}
-
-/**
- * Unlock the queue, flushing all writes.
- *
- * @qptr: Queue to unlock
- */
-static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
-{
- qptr->now_serving++;
- CVMX_SYNCWS;
-}
-
-/**
- * Get the queue state structure for the given queue id
- *
- * @queue_id: Queue id to get
- *
- * Returns Queue structure or NULL on failure
- */
-static inline __cvmx_cmd_queue_state_t
- *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
-{
- extern __cvmx_cmd_queue_all_state_t
- *__cvmx_cmd_queue_state_ptr;
- return &__cvmx_cmd_queue_state_ptr->
- state[__cvmx_cmd_queue_get_index(queue_id)];
-}
-
-/**
- * Write an arbitrary number of command words to a command queue.
- * This is a generic function; the fixed number of command word
- * functions yield higher performance.
- *
- * @queue_id: Hardware command queue to write to
- * @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
- * @cmd_count: Number of command words to write
- * @cmds: Array of commands to write
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
- queue_id,
- int use_locking,
- int cmd_count,
- uint64_t *cmds)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
-
- /* Make sure nobody else is updating the same queue */
- if (likely(use_locking))
- __cvmx_cmd_queue_lock(queue_id, qptr);
-
- /*
- * If a max queue length was specified then make sure we don't
- * exceed it. If any part of the command would be below the
- * limit we allow it.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
- if (unlikely
- (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_FULL;
- }
- }
-
- /*
- * Normally there is plenty of room in the current buffer for
- * the command.
- */
- if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
- uint64_t *ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- qptr->index += cmd_count;
- while (cmd_count--)
- *ptr++ = *cmds++;
- } else {
- uint64_t *ptr;
- int count;
- /*
- * We need a new command buffer. Fail if there isn't
- * one available.
- */
- uint64_t *new_buffer =
- (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
- if (unlikely(new_buffer == NULL)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- /*
- * Figure out how many command words will fit in this
- * buffer. One location will be needed for the next
- * buffer pointer.
- */
- count = qptr->pool_size_m1 - qptr->index;
- ptr += qptr->index;
- cmd_count -= count;
- while (count--)
- *ptr++ = *cmds++;
- *ptr = cvmx_ptr_to_phys(new_buffer);
- /*
- * The current buffer is full and has a link to the
- * next buffer. Time to write the rest of the commands
- * into the new buffer.
- */
- qptr->base_ptr_div128 = *ptr >> 7;
- qptr->index = cmd_count;
- ptr = new_buffer;
- while (cmd_count--)
- *ptr++ = *cmds++;
- }
-
- /* All updates are complete. Release the lock and return */
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Simple function to write two command words to a command
- * queue.
- *
- * @queue_id: Hardware command queue to write to
- * @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
- * @cmd1: Command
- * @cmd2: Command
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
- queue_id,
- int use_locking,
- uint64_t cmd1,
- uint64_t cmd2)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
-
- /* Make sure nobody else is updating the same queue */
- if (likely(use_locking))
- __cvmx_cmd_queue_lock(queue_id, qptr);
-
- /*
- * If a max queue length was specified then make sure we don't
- * exceed it. If any part of the command would be below the
- * limit we allow it.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
- if (unlikely
- (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_FULL;
- }
- }
-
- /*
- * Normally there is plenty of room in the current buffer for
- * the command.
- */
- if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
- uint64_t *ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- qptr->index += 2;
- ptr[0] = cmd1;
- ptr[1] = cmd2;
- } else {
- uint64_t *ptr;
- /*
- * Figure out how many command words will fit in this
- * buffer. One location will be needed for the next
- * buffer pointer.
- */
- int count = qptr->pool_size_m1 - qptr->index;
- /*
- * We need a new command buffer. Fail if there isn't
- * one available.
- */
- uint64_t *new_buffer =
- (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
- if (unlikely(new_buffer == NULL)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- count--;
- ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- *ptr++ = cmd1;
- if (likely(count))
- *ptr++ = cmd2;
- *ptr = cvmx_ptr_to_phys(new_buffer);
- /*
- * The current buffer is full and has a link to the
- * next buffer. Time to write the rest of the commands
- * into the new buffer.
- */
- qptr->base_ptr_div128 = *ptr >> 7;
- qptr->index = 0;
- if (unlikely(count == 0)) {
- qptr->index = 1;
- new_buffer[0] = cmd2;
- }
- }
-
- /* All updates are complete. Release the lock and return */
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Simple function to write three command words to a command
- * queue.
- *
- * @queue_id: Hardware command queue to write to
- * @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
- * @cmd1: Command
- * @cmd2: Command
- * @cmd3: Command
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
- queue_id,
- int use_locking,
- uint64_t cmd1,
- uint64_t cmd2,
- uint64_t cmd3)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
-
- /* Make sure nobody else is updating the same queue */
- if (likely(use_locking))
- __cvmx_cmd_queue_lock(queue_id, qptr);
-
- /*
- * If a max queue length was specified then make sure we don't
- * exceed it. If any part of the command would be below the
- * limit we allow it.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
- if (unlikely
- (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_FULL;
- }
- }
-
- /*
- * Normally there is plenty of room in the current buffer for
- * the command.
- */
- if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
- uint64_t *ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- qptr->index += 3;
- ptr[0] = cmd1;
- ptr[1] = cmd2;
- ptr[2] = cmd3;
- } else {
- uint64_t *ptr;
- /*
- * Figure out how many command words will fit in this
- * buffer. One location will be needed for the next
- * buffer pointer
- */
- int count = qptr->pool_size_m1 - qptr->index;
- /*
- * We need a new command buffer. Fail if there isn't
- * one available
- */
- uint64_t *new_buffer =
- (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
- if (unlikely(new_buffer == NULL)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- count--;
- ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- *ptr++ = cmd1;
- if (count) {
- *ptr++ = cmd2;
- if (count > 1)
- *ptr++ = cmd3;
- }
- *ptr = cvmx_ptr_to_phys(new_buffer);
- /*
- * The current buffer is full and has a link to the
- * next buffer. Time to write the rest of the commands
- * into the new buffer.
- */
- qptr->base_ptr_div128 = *ptr >> 7;
- qptr->index = 0;
- ptr = new_buffer;
- if (count == 0) {
- *ptr++ = cmd2;
- qptr->index++;
- }
- if (count < 2) {
- *ptr++ = cmd3;
- qptr->index++;
- }
- }
-
- /* All updates are complete. Release the lock and return */
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-#endif /* __CVMX_CMD_QUEUE_H__ */
diff --git a/drivers/staging/octeon/cvmx-config.h b/drivers/staging/octeon/cvmx-config.h
deleted file mode 100644
index 078a520481cf..000000000000
--- a/drivers/staging/octeon/cvmx-config.h
+++ /dev/null
@@ -1,169 +0,0 @@
-#ifndef __CVMX_CONFIG_H__
-#define __CVMX_CONFIG_H__
-
-/************************* Config Specific Defines ************************/
-#define CVMX_LLM_NUM_PORTS 1
-#define CVMX_NULL_POINTER_PROTECT 1
-#define CVMX_ENABLE_DEBUG_PRINTS 1
-/* PKO queues per port for interface 0 (ports 0-15) */
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
-/* PKO queues per port for interface 1 (ports 16-31) */
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
-/* Limit on the number of PKO ports enabled for interface 0 */
-#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
-/* Limit on the number of PKO ports enabled for interface 1 */
-#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
-/* PKO queues per port for PCI (ports 32-35) */
-#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
-/* PKO queues per port for Loop devices (ports 36-39) */
-#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
-
-/************************* FPA allocation *********************************/
-/* Pool sizes in bytes, must be multiple of a cache line */
-#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-
-/* Pools in use */
-/* Packet buffers */
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
-/* Work queue entrys */
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
-/* PKO queue command buffers */
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
-
-/************************* FAU allocation ********************************/
-/* The fetch and add registers are allocated here. They are arranged
- * in order of descending size so that all alignment constraints are
- * automatically met. The enums are linked so that the following enum
- * continues allocating where the previous one left off, so the
- * numbering within each enum always starts with zero. The macros
- * take care of the address increment size, so the values entered
- * always increase by 1. FAU registers are accessed with byte
- * addresses.
- */
-
-#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START)
-typedef enum {
- CVMX_FAU_REG_64_START = 0,
- CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0),
-} cvmx_fau_reg_64_t;
-
-#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START)
-typedef enum {
- CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
- CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
-} cvmx_fau_reg_32_t;
-
-#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START)
-typedef enum {
- CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
- CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
-} cvmx_fau_reg_16_t;
-
-#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
-typedef enum {
- CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
- CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
-} cvmx_fau_reg_8_t;
-
-/*
- * The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first
- * available FAU address that is not allocated in cvmx-config.h. This
- * is 64 bit aligned.
- */
-#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
-#define CVMX_FAU_REG_END (2048)
-
-/********************** scratch memory allocation *************************/
-/* Scratchpad memory allocation. Note that these are byte memory
- * addresses. Some uses of scratchpad (IOBDMA for example) require
- * the use of 8-byte aligned addresses, so proper alignment needs to
- * be taken into account.
- */
-/* Generic scratch iobdma area */
-#define CVMX_SCR_SCRATCH (0)
-/* First location available after cvmx-config.h allocated region. */
-#define CVMX_SCR_REG_AVAIL_BASE (8)
-
-/*
- * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
- * before the beginning of the packet. If necessary, override the
- * default here. See the IPD section of the hardware manual for MBUFF
- * SKIP details.
- */
-#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
-
-/*
- * CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve
- * in each chained packet element. If necessary, override the default
- * here.
- */
-#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
-
-/*
- * CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is
- * enabled for all input ports. This controls if IPD sends
- * backpressure to all ports if Octeon's FPA pools don't have enough
- * packet or work queue entries. Even when this is off, it is still
- * possible to get backpressure from individual hardware ports. When
- * configuring backpressure, also check
- * CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override
- * the default here.
- */
-#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1
-
-/*
- * CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
- * function. Once it is enabled the hardware starts accepting
- * packets. You might want to skip the IPD enable if configuration
- * changes are need from the default helper setup. If necessary,
- * override the default here.
- */
-#define CVMX_HELPER_ENABLE_IPD 0
-
-/*
- * CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns
- * to incoming packets.
- */
-#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
-
-#define CVMX_ENABLE_PARAMETER_CHECKING 0
-
-/*
- * The following select which fields are used by the PIP to generate
- * the tag on INPUT
- * 0: don't include
- * 1: include
- */
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
-#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
-
-/* Select skip mode for input ports */
-#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
-
-/*
- * Force backpressure to be disabled. This overrides all other
- * backpressure configuration.
- */
-#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
-
-#endif /* __CVMX_CONFIG_H__ */
-
diff --git a/drivers/staging/octeon/cvmx-dbg-defs.h b/drivers/staging/octeon/cvmx-dbg-defs.h
deleted file mode 100644
index abbf42d05e5a..000000000000
--- a/drivers/staging/octeon/cvmx-dbg-defs.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_DBG_DEFS_H__
-#define __CVMX_DBG_DEFS_H__
-
-#define CVMX_DBG_DATA \
- CVMX_ADD_IO_SEG(0x00011F00000001E8ull)
-
-union cvmx_dbg_data {
- uint64_t u64;
- struct cvmx_dbg_data_s {
- uint64_t reserved_23_63:41;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } s;
- struct cvmx_dbg_data_cn30xx {
- uint64_t reserved_31_63:33;
- uint64_t pll_mul:3;
- uint64_t reserved_23_27:5;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } cn30xx;
- struct cvmx_dbg_data_cn30xx cn31xx;
- struct cvmx_dbg_data_cn38xx {
- uint64_t reserved_29_63:35;
- uint64_t d_mul:4;
- uint64_t dclk_mul2:1;
- uint64_t cclk_div2:1;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } cn38xx;
- struct cvmx_dbg_data_cn38xx cn38xxp2;
- struct cvmx_dbg_data_cn30xx cn50xx;
- struct cvmx_dbg_data_cn58xx {
- uint64_t reserved_29_63:35;
- uint64_t rem:6;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } cn58xx;
- struct cvmx_dbg_data_cn58xx cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-fau.h b/drivers/staging/octeon/cvmx-fau.h
deleted file mode 100644
index a6939fc8ba18..000000000000
--- a/drivers/staging/octeon/cvmx-fau.h
+++ /dev/null
@@ -1,597 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Interface to the hardware Fetch and Add Unit.
- */
-
-#ifndef __CVMX_FAU_H__
-#define __CVMX_FAU_H__
-
-/*
- * Octeon Fetch and Add Unit (FAU)
- */
-
-#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
-#define CVMX_FAU_BITS_SCRADDR 63, 56
-#define CVMX_FAU_BITS_LEN 55, 48
-#define CVMX_FAU_BITS_INEVAL 35, 14
-#define CVMX_FAU_BITS_TAGWAIT 13, 13
-#define CVMX_FAU_BITS_NOADD 13, 13
-#define CVMX_FAU_BITS_SIZE 12, 11
-#define CVMX_FAU_BITS_REGISTER 10, 0
-
-typedef enum {
- CVMX_FAU_OP_SIZE_8 = 0,
- CVMX_FAU_OP_SIZE_16 = 1,
- CVMX_FAU_OP_SIZE_32 = 2,
- CVMX_FAU_OP_SIZE_64 = 3
-} cvmx_fau_op_size_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int64_t value:63;
-} cvmx_fau_tagwait64_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int32_t value:31;
-} cvmx_fau_tagwait32_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int16_t value:15;
-} cvmx_fau_tagwait16_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int8_t value:7;
-} cvmx_fau_tagwait8_t;
-
-/**
- * Asynchronous tagwait return definition. If a timeout occurs,
- * the error bit will be set. Otherwise the value of the
- * register before the update will be returned.
- */
-typedef union {
- uint64_t u64;
- struct {
- uint64_t invalid:1;
- uint64_t data:63; /* unpredictable if invalid is set */
- } s;
-} cvmx_fau_async_tagwait_result_t;
-
-/**
- * Builds a store I/O address for writing to the FAU
- *
- * @noadd: 0 = Store value is atomically added to the current value
- * 1 = Store value is atomically written over the current value
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
- * Returns Address to store for atomic update
- */
-static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
-{
- return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
- cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
- cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
-}
-
-/**
- * Builds a I/O address for accessing the FAU
- *
- * @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
- * Returns Address to read from for atomic update
- */
-static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
- int64_t value)
-{
- return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
- cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
- cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
- cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
-}
-
-/**
- * Perform an atomic 64 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Value of the register before the update
- */
-static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
- int64_t value)
-{
- return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 32 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Value of the register before the update
- */
-static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
- int32_t value)
-{
- return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 16 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- * Returns Value of the register before the update
- */
-static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
- int16_t value)
-{
- return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 8 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- * Returns Value of the register before the update
- */
-static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 64 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait64_t
-cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
-{
- union {
- uint64_t i64;
- cvmx_fau_tagwait64_t t;
- } result;
- result.i64 =
- cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Perform an atomic 32 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait32_t
-cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
-{
- union {
- uint64_t i32;
- cvmx_fau_tagwait32_t t;
- } result;
- result.i32 =
- cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Perform an atomic 16 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait16_t
-cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
-{
- union {
- uint64_t i16;
- cvmx_fau_tagwait16_t t;
- } result;
- result.i16 =
- cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Perform an atomic 8 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait8_t
-cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- union {
- uint64_t i8;
- cvmx_fau_tagwait8_t t;
- } result;
- result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Builds I/O data for async operations
- *
- * @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
- * @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
- * @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
- * @size: The size of the operation:
- * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
- * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
- * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
- * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
- * Returns Data to write using cvmx_send_single
- */
-static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
- uint64_t tagwait,
- cvmx_fau_op_size_t size,
- uint64_t reg)
-{
- return CVMX_FAU_LOAD_IO_ADDRESS |
- cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
- cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
- cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
- cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
- cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
- cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
-}
-
-/**
- * Perform an async atomic 64 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
- cvmx_fau_reg_64_t reg,
- int64_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
-}
-
-/**
- * Perform an async atomic 32 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
- int32_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
-}
-
-/**
- * Perform an async atomic 16 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
- cvmx_fau_reg_16_t reg,
- int16_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
-}
-
-/**
- * Perform an async atomic 8 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
- cvmx_fau_reg_8_t reg,
- int8_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
-}
-
-/**
- * Perform an async atomic 64 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
- cvmx_fau_reg_64_t reg,
- int64_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
-}
-
-/**
- * Perform an async atomic 32 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
- int32_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
-}
-
-/**
- * Perform an async atomic 16 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- *
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
- cvmx_fau_reg_16_t reg,
- int16_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
-}
-
-/**
- * Perform an async atomic 8 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- *
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
- cvmx_fau_reg_8_t reg,
- int8_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
-}
-
-/**
- * Perform an atomic 64 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
-{
- cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 32 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
-{
- cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 16 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
-{
- cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 8 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 64 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
-{
- cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
-}
-
-/**
- * Perform an atomic 32 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
-{
- cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
-}
-
-/**
- * Perform an atomic 16 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
-{
- cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
-}
-
-/**
- * Perform an atomic 8 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
-}
-
-#endif /* __CVMX_FAU_H__ */
diff --git a/drivers/staging/octeon/cvmx-fpa-defs.h b/drivers/staging/octeon/cvmx-fpa-defs.h
deleted file mode 100644
index bf5546b90110..000000000000
--- a/drivers/staging/octeon/cvmx-fpa-defs.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_FPA_DEFS_H__
-#define __CVMX_FPA_DEFS_H__
-
-#define CVMX_FPA_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011800280000E8ull)
-#define CVMX_FPA_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x0001180028000050ull)
-#define CVMX_FPA_FPF0_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000000ull)
-#define CVMX_FPA_FPF0_SIZE \
- CVMX_ADD_IO_SEG(0x0001180028000058ull)
-#define CVMX_FPA_FPF1_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000008ull)
-#define CVMX_FPA_FPF2_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000010ull)
-#define CVMX_FPA_FPF3_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000018ull)
-#define CVMX_FPA_FPF4_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000020ull)
-#define CVMX_FPA_FPF5_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000028ull)
-#define CVMX_FPA_FPF6_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000030ull)
-#define CVMX_FPA_FPF7_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000038ull)
-#define CVMX_FPA_FPFX_MARKS(offset) \
- CVMX_ADD_IO_SEG(0x0001180028000008ull + (((offset) & 7) * 8) - 8 * 1)
-#define CVMX_FPA_FPFX_SIZE(offset) \
- CVMX_ADD_IO_SEG(0x0001180028000060ull + (((offset) & 7) * 8) - 8 * 1)
-#define CVMX_FPA_INT_ENB \
- CVMX_ADD_IO_SEG(0x0001180028000048ull)
-#define CVMX_FPA_INT_SUM \
- CVMX_ADD_IO_SEG(0x0001180028000040ull)
-#define CVMX_FPA_QUE0_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x00011800280000F0ull)
-#define CVMX_FPA_QUE1_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x00011800280000F8ull)
-#define CVMX_FPA_QUE2_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000100ull)
-#define CVMX_FPA_QUE3_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000108ull)
-#define CVMX_FPA_QUE4_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000110ull)
-#define CVMX_FPA_QUE5_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000118ull)
-#define CVMX_FPA_QUE6_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000120ull)
-#define CVMX_FPA_QUE7_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000128ull)
-#define CVMX_FPA_QUEX_AVAILABLE(offset) \
- CVMX_ADD_IO_SEG(0x0001180028000098ull + (((offset) & 7) * 8))
-#define CVMX_FPA_QUEX_PAGE_INDEX(offset) \
- CVMX_ADD_IO_SEG(0x00011800280000F0ull + (((offset) & 7) * 8))
-#define CVMX_FPA_QUE_ACT \
- CVMX_ADD_IO_SEG(0x0001180028000138ull)
-#define CVMX_FPA_QUE_EXP \
- CVMX_ADD_IO_SEG(0x0001180028000130ull)
-#define CVMX_FPA_WART_CTL \
- CVMX_ADD_IO_SEG(0x00011800280000D8ull)
-#define CVMX_FPA_WART_STATUS \
- CVMX_ADD_IO_SEG(0x00011800280000E0ull)
-
-union cvmx_fpa_bist_status {
- uint64_t u64;
- struct cvmx_fpa_bist_status_s {
- uint64_t reserved_5_63:59;
- uint64_t frd:1;
- uint64_t fpf0:1;
- uint64_t fpf1:1;
- uint64_t ffr:1;
- uint64_t fdr:1;
- } s;
- struct cvmx_fpa_bist_status_s cn30xx;
- struct cvmx_fpa_bist_status_s cn31xx;
- struct cvmx_fpa_bist_status_s cn38xx;
- struct cvmx_fpa_bist_status_s cn38xxp2;
- struct cvmx_fpa_bist_status_s cn50xx;
- struct cvmx_fpa_bist_status_s cn52xx;
- struct cvmx_fpa_bist_status_s cn52xxp1;
- struct cvmx_fpa_bist_status_s cn56xx;
- struct cvmx_fpa_bist_status_s cn56xxp1;
- struct cvmx_fpa_bist_status_s cn58xx;
- struct cvmx_fpa_bist_status_s cn58xxp1;
-};
-
-union cvmx_fpa_ctl_status {
- uint64_t u64;
- struct cvmx_fpa_ctl_status_s {
- uint64_t reserved_18_63:46;
- uint64_t reset:1;
- uint64_t use_ldt:1;
- uint64_t use_stt:1;
- uint64_t enb:1;
- uint64_t mem1_err:7;
- uint64_t mem0_err:7;
- } s;
- struct cvmx_fpa_ctl_status_s cn30xx;
- struct cvmx_fpa_ctl_status_s cn31xx;
- struct cvmx_fpa_ctl_status_s cn38xx;
- struct cvmx_fpa_ctl_status_s cn38xxp2;
- struct cvmx_fpa_ctl_status_s cn50xx;
- struct cvmx_fpa_ctl_status_s cn52xx;
- struct cvmx_fpa_ctl_status_s cn52xxp1;
- struct cvmx_fpa_ctl_status_s cn56xx;
- struct cvmx_fpa_ctl_status_s cn56xxp1;
- struct cvmx_fpa_ctl_status_s cn58xx;
- struct cvmx_fpa_ctl_status_s cn58xxp1;
-};
-
-union cvmx_fpa_fpfx_marks {
- uint64_t u64;
- struct cvmx_fpa_fpfx_marks_s {
- uint64_t reserved_22_63:42;
- uint64_t fpf_wr:11;
- uint64_t fpf_rd:11;
- } s;
- struct cvmx_fpa_fpfx_marks_s cn38xx;
- struct cvmx_fpa_fpfx_marks_s cn38xxp2;
- struct cvmx_fpa_fpfx_marks_s cn56xx;
- struct cvmx_fpa_fpfx_marks_s cn56xxp1;
- struct cvmx_fpa_fpfx_marks_s cn58xx;
- struct cvmx_fpa_fpfx_marks_s cn58xxp1;
-};
-
-union cvmx_fpa_fpfx_size {
- uint64_t u64;
- struct cvmx_fpa_fpfx_size_s {
- uint64_t reserved_11_63:53;
- uint64_t fpf_siz:11;
- } s;
- struct cvmx_fpa_fpfx_size_s cn38xx;
- struct cvmx_fpa_fpfx_size_s cn38xxp2;
- struct cvmx_fpa_fpfx_size_s cn56xx;
- struct cvmx_fpa_fpfx_size_s cn56xxp1;
- struct cvmx_fpa_fpfx_size_s cn58xx;
- struct cvmx_fpa_fpfx_size_s cn58xxp1;
-};
-
-union cvmx_fpa_fpf0_marks {
- uint64_t u64;
- struct cvmx_fpa_fpf0_marks_s {
- uint64_t reserved_24_63:40;
- uint64_t fpf_wr:12;
- uint64_t fpf_rd:12;
- } s;
- struct cvmx_fpa_fpf0_marks_s cn38xx;
- struct cvmx_fpa_fpf0_marks_s cn38xxp2;
- struct cvmx_fpa_fpf0_marks_s cn56xx;
- struct cvmx_fpa_fpf0_marks_s cn56xxp1;
- struct cvmx_fpa_fpf0_marks_s cn58xx;
- struct cvmx_fpa_fpf0_marks_s cn58xxp1;
-};
-
-union cvmx_fpa_fpf0_size {
- uint64_t u64;
- struct cvmx_fpa_fpf0_size_s {
- uint64_t reserved_12_63:52;
- uint64_t fpf_siz:12;
- } s;
- struct cvmx_fpa_fpf0_size_s cn38xx;
- struct cvmx_fpa_fpf0_size_s cn38xxp2;
- struct cvmx_fpa_fpf0_size_s cn56xx;
- struct cvmx_fpa_fpf0_size_s cn56xxp1;
- struct cvmx_fpa_fpf0_size_s cn58xx;
- struct cvmx_fpa_fpf0_size_s cn58xxp1;
-};
-
-union cvmx_fpa_int_enb {
- uint64_t u64;
- struct cvmx_fpa_int_enb_s {
- uint64_t reserved_28_63:36;
- uint64_t q7_perr:1;
- uint64_t q7_coff:1;
- uint64_t q7_und:1;
- uint64_t q6_perr:1;
- uint64_t q6_coff:1;
- uint64_t q6_und:1;
- uint64_t q5_perr:1;
- uint64_t q5_coff:1;
- uint64_t q5_und:1;
- uint64_t q4_perr:1;
- uint64_t q4_coff:1;
- uint64_t q4_und:1;
- uint64_t q3_perr:1;
- uint64_t q3_coff:1;
- uint64_t q3_und:1;
- uint64_t q2_perr:1;
- uint64_t q2_coff:1;
- uint64_t q2_und:1;
- uint64_t q1_perr:1;
- uint64_t q1_coff:1;
- uint64_t q1_und:1;
- uint64_t q0_perr:1;
- uint64_t q0_coff:1;
- uint64_t q0_und:1;
- uint64_t fed1_dbe:1;
- uint64_t fed1_sbe:1;
- uint64_t fed0_dbe:1;
- uint64_t fed0_sbe:1;
- } s;
- struct cvmx_fpa_int_enb_s cn30xx;
- struct cvmx_fpa_int_enb_s cn31xx;
- struct cvmx_fpa_int_enb_s cn38xx;
- struct cvmx_fpa_int_enb_s cn38xxp2;
- struct cvmx_fpa_int_enb_s cn50xx;
- struct cvmx_fpa_int_enb_s cn52xx;
- struct cvmx_fpa_int_enb_s cn52xxp1;
- struct cvmx_fpa_int_enb_s cn56xx;
- struct cvmx_fpa_int_enb_s cn56xxp1;
- struct cvmx_fpa_int_enb_s cn58xx;
- struct cvmx_fpa_int_enb_s cn58xxp1;
-};
-
-union cvmx_fpa_int_sum {
- uint64_t u64;
- struct cvmx_fpa_int_sum_s {
- uint64_t reserved_28_63:36;
- uint64_t q7_perr:1;
- uint64_t q7_coff:1;
- uint64_t q7_und:1;
- uint64_t q6_perr:1;
- uint64_t q6_coff:1;
- uint64_t q6_und:1;
- uint64_t q5_perr:1;
- uint64_t q5_coff:1;
- uint64_t q5_und:1;
- uint64_t q4_perr:1;
- uint64_t q4_coff:1;
- uint64_t q4_und:1;
- uint64_t q3_perr:1;
- uint64_t q3_coff:1;
- uint64_t q3_und:1;
- uint64_t q2_perr:1;
- uint64_t q2_coff:1;
- uint64_t q2_und:1;
- uint64_t q1_perr:1;
- uint64_t q1_coff:1;
- uint64_t q1_und:1;
- uint64_t q0_perr:1;
- uint64_t q0_coff:1;
- uint64_t q0_und:1;
- uint64_t fed1_dbe:1;
- uint64_t fed1_sbe:1;
- uint64_t fed0_dbe:1;
- uint64_t fed0_sbe:1;
- } s;
- struct cvmx_fpa_int_sum_s cn30xx;
- struct cvmx_fpa_int_sum_s cn31xx;
- struct cvmx_fpa_int_sum_s cn38xx;
- struct cvmx_fpa_int_sum_s cn38xxp2;
- struct cvmx_fpa_int_sum_s cn50xx;
- struct cvmx_fpa_int_sum_s cn52xx;
- struct cvmx_fpa_int_sum_s cn52xxp1;
- struct cvmx_fpa_int_sum_s cn56xx;
- struct cvmx_fpa_int_sum_s cn56xxp1;
- struct cvmx_fpa_int_sum_s cn58xx;
- struct cvmx_fpa_int_sum_s cn58xxp1;
-};
-
-union cvmx_fpa_quex_available {
- uint64_t u64;
- struct cvmx_fpa_quex_available_s {
- uint64_t reserved_29_63:35;
- uint64_t que_siz:29;
- } s;
- struct cvmx_fpa_quex_available_s cn30xx;
- struct cvmx_fpa_quex_available_s cn31xx;
- struct cvmx_fpa_quex_available_s cn38xx;
- struct cvmx_fpa_quex_available_s cn38xxp2;
- struct cvmx_fpa_quex_available_s cn50xx;
- struct cvmx_fpa_quex_available_s cn52xx;
- struct cvmx_fpa_quex_available_s cn52xxp1;
- struct cvmx_fpa_quex_available_s cn56xx;
- struct cvmx_fpa_quex_available_s cn56xxp1;
- struct cvmx_fpa_quex_available_s cn58xx;
- struct cvmx_fpa_quex_available_s cn58xxp1;
-};
-
-union cvmx_fpa_quex_page_index {
- uint64_t u64;
- struct cvmx_fpa_quex_page_index_s {
- uint64_t reserved_25_63:39;
- uint64_t pg_num:25;
- } s;
- struct cvmx_fpa_quex_page_index_s cn30xx;
- struct cvmx_fpa_quex_page_index_s cn31xx;
- struct cvmx_fpa_quex_page_index_s cn38xx;
- struct cvmx_fpa_quex_page_index_s cn38xxp2;
- struct cvmx_fpa_quex_page_index_s cn50xx;
- struct cvmx_fpa_quex_page_index_s cn52xx;
- struct cvmx_fpa_quex_page_index_s cn52xxp1;
- struct cvmx_fpa_quex_page_index_s cn56xx;
- struct cvmx_fpa_quex_page_index_s cn56xxp1;
- struct cvmx_fpa_quex_page_index_s cn58xx;
- struct cvmx_fpa_quex_page_index_s cn58xxp1;
-};
-
-union cvmx_fpa_que_act {
- uint64_t u64;
- struct cvmx_fpa_que_act_s {
- uint64_t reserved_29_63:35;
- uint64_t act_que:3;
- uint64_t act_indx:26;
- } s;
- struct cvmx_fpa_que_act_s cn30xx;
- struct cvmx_fpa_que_act_s cn31xx;
- struct cvmx_fpa_que_act_s cn38xx;
- struct cvmx_fpa_que_act_s cn38xxp2;
- struct cvmx_fpa_que_act_s cn50xx;
- struct cvmx_fpa_que_act_s cn52xx;
- struct cvmx_fpa_que_act_s cn52xxp1;
- struct cvmx_fpa_que_act_s cn56xx;
- struct cvmx_fpa_que_act_s cn56xxp1;
- struct cvmx_fpa_que_act_s cn58xx;
- struct cvmx_fpa_que_act_s cn58xxp1;
-};
-
-union cvmx_fpa_que_exp {
- uint64_t u64;
- struct cvmx_fpa_que_exp_s {
- uint64_t reserved_29_63:35;
- uint64_t exp_que:3;
- uint64_t exp_indx:26;
- } s;
- struct cvmx_fpa_que_exp_s cn30xx;
- struct cvmx_fpa_que_exp_s cn31xx;
- struct cvmx_fpa_que_exp_s cn38xx;
- struct cvmx_fpa_que_exp_s cn38xxp2;
- struct cvmx_fpa_que_exp_s cn50xx;
- struct cvmx_fpa_que_exp_s cn52xx;
- struct cvmx_fpa_que_exp_s cn52xxp1;
- struct cvmx_fpa_que_exp_s cn56xx;
- struct cvmx_fpa_que_exp_s cn56xxp1;
- struct cvmx_fpa_que_exp_s cn58xx;
- struct cvmx_fpa_que_exp_s cn58xxp1;
-};
-
-union cvmx_fpa_wart_ctl {
- uint64_t u64;
- struct cvmx_fpa_wart_ctl_s {
- uint64_t reserved_16_63:48;
- uint64_t ctl:16;
- } s;
- struct cvmx_fpa_wart_ctl_s cn30xx;
- struct cvmx_fpa_wart_ctl_s cn31xx;
- struct cvmx_fpa_wart_ctl_s cn38xx;
- struct cvmx_fpa_wart_ctl_s cn38xxp2;
- struct cvmx_fpa_wart_ctl_s cn50xx;
- struct cvmx_fpa_wart_ctl_s cn52xx;
- struct cvmx_fpa_wart_ctl_s cn52xxp1;
- struct cvmx_fpa_wart_ctl_s cn56xx;
- struct cvmx_fpa_wart_ctl_s cn56xxp1;
- struct cvmx_fpa_wart_ctl_s cn58xx;
- struct cvmx_fpa_wart_ctl_s cn58xxp1;
-};
-
-union cvmx_fpa_wart_status {
- uint64_t u64;
- struct cvmx_fpa_wart_status_s {
- uint64_t reserved_32_63:32;
- uint64_t status:32;
- } s;
- struct cvmx_fpa_wart_status_s cn30xx;
- struct cvmx_fpa_wart_status_s cn31xx;
- struct cvmx_fpa_wart_status_s cn38xx;
- struct cvmx_fpa_wart_status_s cn38xxp2;
- struct cvmx_fpa_wart_status_s cn50xx;
- struct cvmx_fpa_wart_status_s cn52xx;
- struct cvmx_fpa_wart_status_s cn52xxp1;
- struct cvmx_fpa_wart_status_s cn56xx;
- struct cvmx_fpa_wart_status_s cn56xxp1;
- struct cvmx_fpa_wart_status_s cn58xx;
- struct cvmx_fpa_wart_status_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-fpa.c b/drivers/staging/octeon/cvmx-fpa.c
deleted file mode 100644
index ad44b8bd8057..000000000000
--- a/drivers/staging/octeon/cvmx-fpa.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Support library for the hardware Free Pool Allocator.
- *
- *
- */
-
-#include "cvmx-config.h"
-#include "cvmx.h"
-#include "cvmx-fpa.h"
-#include "cvmx-ipd.h"
-
-/**
- * Current state of all the pools. Use access functions
- * instead of using it directly.
- */
-CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
-
-/**
- * Setup a FPA pool to control a new block of memory. The
- * buffer pointer must be a physical address.
- *
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
- * @block_size: Size for each block controlled by the FPA
- * @num_blocks: Number of blocks
- *
- * Returns 0 on Success,
- * -1 on failure
- */
-int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
- uint64_t block_size, uint64_t num_blocks)
-{
- char *ptr;
- if (!buffer) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
- return -1;
- }
- if (pool >= CVMX_FPA_NUM_POOLS) {
- cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
- return -1;
- }
-
- if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
- return -1;
- }
-
- if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
- return -1;
- }
-
- cvmx_fpa_pool_info[pool].name = name;
- cvmx_fpa_pool_info[pool].size = block_size;
- cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
- cvmx_fpa_pool_info[pool].base = buffer;
-
- ptr = (char *)buffer;
- while (num_blocks--) {
- cvmx_fpa_free(ptr, pool, 0);
- ptr += block_size;
- }
- return 0;
-}
-
-/**
- * Shutdown a Memory pool and validate that it had all of
- * the buffers originally placed in it.
- *
- * @pool: Pool to shutdown
- * Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
- */
-uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
-{
- uint64_t errors = 0;
- uint64_t count = 0;
- uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
- uint64_t finish =
- base +
- cvmx_fpa_pool_info[pool].size *
- cvmx_fpa_pool_info[pool].starting_element_count;
- void *ptr;
- uint64_t address;
-
- count = 0;
- do {
- ptr = cvmx_fpa_alloc(pool);
- if (ptr)
- address = cvmx_ptr_to_phys(ptr);
- else
- address = 0;
- if (address) {
- if ((address >= base) && (address < finish) &&
- (((address -
- base) % cvmx_fpa_pool_info[pool].size) == 0)) {
- count++;
- } else {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
- (unsigned long long)address,
- cvmx_fpa_pool_info[pool].name, (int)pool);
- errors++;
- }
- }
- } while (address);
-
-#ifdef CVMX_ENABLE_PKO_FUNCTIONS
- if (pool == 0)
- cvmx_ipd_free_ptr();
-#endif
-
- if (errors) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n",
- cvmx_fpa_pool_info[pool].name, (int)pool,
- (unsigned long long)base, (unsigned long long)finish,
- (unsigned long long)cvmx_fpa_pool_info[pool].size);
- return -errors;
- } else
- return 0;
-}
-
-uint64_t cvmx_fpa_get_block_size(uint64_t pool)
-{
- switch (pool) {
- case 0:
- return CVMX_FPA_POOL_0_SIZE;
- case 1:
- return CVMX_FPA_POOL_1_SIZE;
- case 2:
- return CVMX_FPA_POOL_2_SIZE;
- case 3:
- return CVMX_FPA_POOL_3_SIZE;
- case 4:
- return CVMX_FPA_POOL_4_SIZE;
- case 5:
- return CVMX_FPA_POOL_5_SIZE;
- case 6:
- return CVMX_FPA_POOL_6_SIZE;
- case 7:
- return CVMX_FPA_POOL_7_SIZE;
- default:
- return 0;
- }
-}
diff --git a/drivers/staging/octeon/cvmx-fpa.h b/drivers/staging/octeon/cvmx-fpa.h
deleted file mode 100644
index 1f04f9658736..000000000000
--- a/drivers/staging/octeon/cvmx-fpa.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the hardware Free Pool Allocator.
- *
- *
- */
-
-#ifndef __CVMX_FPA_H__
-#define __CVMX_FPA_H__
-
-#include "cvmx-address.h"
-#include "cvmx-fpa-defs.h"
-
-#define CVMX_FPA_NUM_POOLS 8
-#define CVMX_FPA_MIN_BLOCK_SIZE 128
-#define CVMX_FPA_ALIGNMENT 128
-
-/**
- * Structure describing the data format used for stores to the FPA.
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * the (64-bit word) location in scratchpad to write
- * to (if len != 0)
- */
- uint64_t scraddr:8;
- /* the number of words in the response (0 => no response) */
- uint64_t len:8;
- /* the ID of the device on the non-coherent bus */
- uint64_t did:8;
- /*
- * the address that will appear in the first tick on
- * the NCB bus.
- */
- uint64_t addr:40;
- } s;
-} cvmx_fpa_iobdma_data_t;
-
-/**
- * Structure describing the current state of a FPA pool.
- */
-typedef struct {
- /* Name it was created under */
- const char *name;
- /* Size of each block */
- uint64_t size;
- /* The base memory address of whole block */
- void *base;
- /* The number of elements in the pool at creation */
- uint64_t starting_element_count;
-} cvmx_fpa_pool_info_t;
-
-/**
- * Current state of all the pools. Use access functions
- * instead of using it directly.
- */
-extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Return the name of the pool
- *
- * @pool: Pool to get the name of
- * Returns The name
- */
-static inline const char *cvmx_fpa_get_name(uint64_t pool)
-{
- return cvmx_fpa_pool_info[pool].name;
-}
-
-/**
- * Return the base of the pool
- *
- * @pool: Pool to get the base of
- * Returns The base
- */
-static inline void *cvmx_fpa_get_base(uint64_t pool)
-{
- return cvmx_fpa_pool_info[pool].base;
-}
-
-/**
- * Check if a pointer belongs to an FPA pool. Return non-zero
- * if the supplied pointer is inside the memory controlled by
- * an FPA pool.
- *
- * @pool: Pool to check
- * @ptr: Pointer to check
- * Returns Non-zero if pointer is in the pool. Zero if not
- */
-static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
-{
- return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
- ((char *)ptr <
- ((char *)(cvmx_fpa_pool_info[pool].base)) +
- cvmx_fpa_pool_info[pool].size *
- cvmx_fpa_pool_info[pool].starting_element_count));
-}
-
-/**
- * Enable the FPA for use. Must be performed after any CSR
- * configuration but before any other FPA functions.
- */
-static inline void cvmx_fpa_enable(void)
-{
- union cvmx_fpa_ctl_status status;
-
- status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
- if (status.s.enb) {
- cvmx_dprintf
- ("Warning: Enabling FPA when FPA already enabled.\n");
- }
-
- /*
- * Do runtime check as we allow pass1 compiled code to run on
- * pass2 chips.
- */
- if (cvmx_octeon_is_pass1()) {
- union cvmx_fpa_fpfx_marks marks;
- int i;
- for (i = 1; i < 8; i++) {
- marks.u64 =
- cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
- marks.s.fpf_wr = 0xe0;
- cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
- marks.u64);
- }
-
- /* Enforce a 10 cycle delay between config and enable */
- cvmx_wait(10);
- }
-
- /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
- status.u64 = 0;
- status.s.enb = 1;
- cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
-}
-
-/**
- * Get a new block from the FPA
- *
- * @pool: Pool to get the block from
- * Returns Pointer to the block or NULL on failure
- */
-static inline void *cvmx_fpa_alloc(uint64_t pool)
-{
- uint64_t address =
- cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
- if (address)
- return cvmx_phys_to_ptr(address);
- else
- return NULL;
-}
-
-/**
- * Asynchronously get a new block from the FPA
- *
- * @scr_addr: Local scratch address to put response in. This is a byte address,
- * but must be 8 byte aligned.
- * @pool: Pool to get the block from
- */
-static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
-{
- cvmx_fpa_iobdma_data_t data;
-
- /*
- * Hardware only uses 64 bit aligned locations, so convert
- * from byte address to 64-bit index
- */
- data.s.scraddr = scr_addr >> 3;
- data.s.len = 1;
- data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
- data.s.addr = 0;
- cvmx_send_single(data.u64);
-}
-
-/**
- * Free a block allocated with a FPA pool. Does NOT provide memory
- * ordering in cases where the memory block was modified by the core.
- *
- * @ptr: Block to free
- * @pool: Pool to put it in
- * @num_cache_lines:
- * Cache lines to invalidate
- */
-static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
- uint64_t num_cache_lines)
-{
- cvmx_addr_t newptr;
- newptr.u64 = cvmx_ptr_to_phys(ptr);
- newptr.sfilldidspace.didspace =
- CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
- /* Prevent GCC from reordering around free */
- barrier();
- /* value written is number of cache lines not written back */
- cvmx_write_io(newptr.u64, num_cache_lines);
-}
-
-/**
- * Free a block allocated with a FPA pool. Provides required memory
- * ordering in cases where memory block was modified by core.
- *
- * @ptr: Block to free
- * @pool: Pool to put it in
- * @num_cache_lines:
- * Cache lines to invalidate
- */
-static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
- uint64_t num_cache_lines)
-{
- cvmx_addr_t newptr;
- newptr.u64 = cvmx_ptr_to_phys(ptr);
- newptr.sfilldidspace.didspace =
- CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
- /*
- * Make sure that any previous writes to memory go out before
- * we free this buffer. This also serves as a barrier to
- * prevent GCC from reordering operations to after the
- * free.
- */
- CVMX_SYNCWS;
- /* value written is number of cache lines not written back */
- cvmx_write_io(newptr.u64, num_cache_lines);
-}
-
-/**
- * Setup a FPA pool to control a new block of memory.
- * This can only be called once per pool. Make sure proper
- * locking enforces this.
- *
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
- * @block_size: Size for each block controlled by the FPA
- * @num_blocks: Number of blocks
- *
- * Returns 0 on Success,
- * -1 on failure
- */
-extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
- uint64_t block_size, uint64_t num_blocks);
-
-/**
- * Shutdown a Memory pool and validate that it had all of
- * the buffers originally placed in it. This should only be
- * called by one processor after all hardware has finished
- * using the pool.
- *
- * @pool: Pool to shutdown
- * Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
- */
-extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
-
-/**
- * Get the size of blocks controlled by the pool
- * This is resolved to a constant at compile time.
- *
- * @pool: Pool to access
- * Returns Size of the block in bytes
- */
-uint64_t cvmx_fpa_get_block_size(uint64_t pool);
-
-#endif /* __CVM_FPA_H__ */
diff --git a/drivers/staging/octeon/cvmx-gmxx-defs.h b/drivers/staging/octeon/cvmx-gmxx-defs.h
deleted file mode 100644
index 946a43a73fd7..000000000000
--- a/drivers/staging/octeon/cvmx-gmxx-defs.h
+++ /dev/null
@@ -1,2529 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_GMXX_DEFS_H__
-#define __CVMX_GMXX_DEFS_H__
-
-#define CVMX_GMXX_BAD_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000518ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_BIST(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000400ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_CLK_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080007F0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_HG2_CONTROL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000550ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_INF_MODE(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080007F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_NXA_ADR(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000510ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000580ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_PRTX_CFG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000010ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000180ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000188ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000190ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000198ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080001A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080001A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000108ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000100ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_DECISION(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000040ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000020ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000018ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000030ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000028ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_IFG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000058ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_INT_EN(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000008ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_INT_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000000ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_JABBER(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000038ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000068ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000060ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000050ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000088ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000098ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000080ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000090ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000048ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000420ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000460ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_BP_ONX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000440ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_HG2_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000548ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PASS_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080005F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000600ull + (((offset) & 15) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PRTS(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000410ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PRT_INFO(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004E8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_TX_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080007E8ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000538ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_XAUI_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000530ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_SMACX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000230ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_STAT_BP(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000520ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_APPEND(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000218ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_BURST(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000228ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080005A0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080005C0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CLK(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000208ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000270ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000240ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000248ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000238ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000258ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000260ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000300ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_SLOT(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000220ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000250ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT0(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000280ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT1(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000288ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT2(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000290ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT3(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000298ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT4(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT5(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT6(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT7(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT8(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT9(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002C8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000268ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_THRESH(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000210ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_BP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004D0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000780ull + (((offset) & 1) * 8) + (((block_id) & 0) * 0x0ull))
-#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000498ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_CORRUPT(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004D8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_HG2_REG1(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000558ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_HG2_REG2(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000560ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_IFG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000488ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_INT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000508ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000500ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_JAM(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000490ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_LFSR(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_OVR_BP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004C8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004A0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004A8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_PRTS(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000480ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004C0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_DRAIN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004E0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_MAX(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004B0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000680ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_THRESH(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004B8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_XAUI_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000528ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000540ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_gmxx_bad_reg {
- uint64_t u64;
- struct cvmx_gmxx_bad_reg_s {
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t loststat:4;
- uint64_t reserved_18_21:4;
- uint64_t out_ovr:16;
- uint64_t ncb_ovr:1;
- uint64_t out_col:1;
- } s;
- struct cvmx_gmxx_bad_reg_cn30xx {
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t reserved_25_25:1;
- uint64_t loststat:3;
- uint64_t reserved_5_21:17;
- uint64_t out_ovr:3;
- uint64_t reserved_0_1:2;
- } cn30xx;
- struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
- struct cvmx_gmxx_bad_reg_s cn38xx;
- struct cvmx_gmxx_bad_reg_s cn38xxp2;
- struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
- struct cvmx_gmxx_bad_reg_cn52xx {
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t loststat:4;
- uint64_t reserved_6_21:16;
- uint64_t out_ovr:4;
- uint64_t reserved_0_1:2;
- } cn52xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
- struct cvmx_gmxx_bad_reg_s cn58xx;
- struct cvmx_gmxx_bad_reg_s cn58xxp1;
-};
-
-union cvmx_gmxx_bist {
- uint64_t u64;
- struct cvmx_gmxx_bist_s {
- uint64_t reserved_17_63:47;
- uint64_t status:17;
- } s;
- struct cvmx_gmxx_bist_cn30xx {
- uint64_t reserved_10_63:54;
- uint64_t status:10;
- } cn30xx;
- struct cvmx_gmxx_bist_cn30xx cn31xx;
- struct cvmx_gmxx_bist_cn30xx cn38xx;
- struct cvmx_gmxx_bist_cn30xx cn38xxp2;
- struct cvmx_gmxx_bist_cn50xx {
- uint64_t reserved_12_63:52;
- uint64_t status:12;
- } cn50xx;
- struct cvmx_gmxx_bist_cn52xx {
- uint64_t reserved_16_63:48;
- uint64_t status:16;
- } cn52xx;
- struct cvmx_gmxx_bist_cn52xx cn52xxp1;
- struct cvmx_gmxx_bist_cn52xx cn56xx;
- struct cvmx_gmxx_bist_cn52xx cn56xxp1;
- struct cvmx_gmxx_bist_s cn58xx;
- struct cvmx_gmxx_bist_s cn58xxp1;
-};
-
-union cvmx_gmxx_clk_en {
- uint64_t u64;
- struct cvmx_gmxx_clk_en_s {
- uint64_t reserved_1_63:63;
- uint64_t clk_en:1;
- } s;
- struct cvmx_gmxx_clk_en_s cn52xx;
- struct cvmx_gmxx_clk_en_s cn52xxp1;
- struct cvmx_gmxx_clk_en_s cn56xx;
- struct cvmx_gmxx_clk_en_s cn56xxp1;
-};
-
-union cvmx_gmxx_hg2_control {
- uint64_t u64;
- struct cvmx_gmxx_hg2_control_s {
- uint64_t reserved_19_63:45;
- uint64_t hg2tx_en:1;
- uint64_t hg2rx_en:1;
- uint64_t phys_en:1;
- uint64_t logl_en:16;
- } s;
- struct cvmx_gmxx_hg2_control_s cn52xx;
- struct cvmx_gmxx_hg2_control_s cn52xxp1;
- struct cvmx_gmxx_hg2_control_s cn56xx;
-};
-
-union cvmx_gmxx_inf_mode {
- uint64_t u64;
- struct cvmx_gmxx_inf_mode_s {
- uint64_t reserved_10_63:54;
- uint64_t speed:2;
- uint64_t reserved_6_7:2;
- uint64_t mode:2;
- uint64_t reserved_3_3:1;
- uint64_t p0mii:1;
- uint64_t en:1;
- uint64_t type:1;
- } s;
- struct cvmx_gmxx_inf_mode_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t p0mii:1;
- uint64_t en:1;
- uint64_t type:1;
- } cn30xx;
- struct cvmx_gmxx_inf_mode_cn31xx {
- uint64_t reserved_2_63:62;
- uint64_t en:1;
- uint64_t type:1;
- } cn31xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
- struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
- struct cvmx_gmxx_inf_mode_cn52xx {
- uint64_t reserved_10_63:54;
- uint64_t speed:2;
- uint64_t reserved_6_7:2;
- uint64_t mode:2;
- uint64_t reserved_2_3:2;
- uint64_t en:1;
- uint64_t type:1;
- } cn52xx;
- struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
- struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
- struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
- struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
-};
-
-union cvmx_gmxx_nxa_adr {
- uint64_t u64;
- struct cvmx_gmxx_nxa_adr_s {
- uint64_t reserved_6_63:58;
- uint64_t prt:6;
- } s;
- struct cvmx_gmxx_nxa_adr_s cn30xx;
- struct cvmx_gmxx_nxa_adr_s cn31xx;
- struct cvmx_gmxx_nxa_adr_s cn38xx;
- struct cvmx_gmxx_nxa_adr_s cn38xxp2;
- struct cvmx_gmxx_nxa_adr_s cn50xx;
- struct cvmx_gmxx_nxa_adr_s cn52xx;
- struct cvmx_gmxx_nxa_adr_s cn52xxp1;
- struct cvmx_gmxx_nxa_adr_s cn56xx;
- struct cvmx_gmxx_nxa_adr_s cn56xxp1;
- struct cvmx_gmxx_nxa_adr_s cn58xx;
- struct cvmx_gmxx_nxa_adr_s cn58xxp1;
-};
-
-union cvmx_gmxx_prtx_cbfc_ctl {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cbfc_ctl_s {
- uint64_t phys_en:16;
- uint64_t logl_en:16;
- uint64_t phys_bp:16;
- uint64_t reserved_4_15:12;
- uint64_t bck_en:1;
- uint64_t drp_en:1;
- uint64_t tx_en:1;
- uint64_t rx_en:1;
- } s;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
-};
-
-union cvmx_gmxx_prtx_cfg {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cfg_s {
- uint64_t reserved_14_63:50;
- uint64_t tx_idle:1;
- uint64_t rx_idle:1;
- uint64_t reserved_9_11:3;
- uint64_t speed_msb:1;
- uint64_t reserved_4_7:4;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } s;
- struct cvmx_gmxx_prtx_cfg_cn30xx {
- uint64_t reserved_4_63:60;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } cn30xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
- struct cvmx_gmxx_prtx_cfg_s cn52xx;
- struct cvmx_gmxx_prtx_cfg_s cn52xxp1;
- struct cvmx_gmxx_prtx_cfg_s cn56xx;
- struct cvmx_gmxx_prtx_cfg_s cn56xxp1;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam0 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam0_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam1 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam1_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam2 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam2_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam3 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam3_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam4 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam4_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam5 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam5_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam_en {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam_en_s {
- uint64_t reserved_8_63:56;
- uint64_t en:8;
- } s;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_ctl_s {
- uint64_t reserved_4_63:60;
- uint64_t cam_mode:1;
- uint64_t mcst:2;
- uint64_t bcst:1;
- } s;
- struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_decision {
- uint64_t u64;
- struct cvmx_gmxx_rxx_decision_s {
- uint64_t reserved_5_63:59;
- uint64_t cnt:5;
- } s;
- struct cvmx_gmxx_rxx_decision_s cn30xx;
- struct cvmx_gmxx_rxx_decision_s cn31xx;
- struct cvmx_gmxx_rxx_decision_s cn38xx;
- struct cvmx_gmxx_rxx_decision_s cn38xxp2;
- struct cvmx_gmxx_rxx_decision_s cn50xx;
- struct cvmx_gmxx_rxx_decision_s cn52xx;
- struct cvmx_gmxx_rxx_decision_s cn52xxp1;
- struct cvmx_gmxx_rxx_decision_s cn56xx;
- struct cvmx_gmxx_rxx_decision_s cn56xxp1;
- struct cvmx_gmxx_rxx_decision_s cn58xx;
- struct cvmx_gmxx_rxx_decision_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_chk {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_chk_s {
- uint64_t reserved_10_63:54;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } s;
- struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_chk_cn50xx {
- uint64_t reserved_10_63:54;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx {
- uint64_t reserved_9_63:55;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn52xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
- struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_ctl_s {
- uint64_t reserved_11_63:53;
- uint64_t null_dis:1;
- uint64_t pre_align:1;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } s;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn30xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
- uint64_t reserved_8_63:56;
- uint64_t vlan_len:1;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn31xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
- uint64_t reserved_11_63:53;
- uint64_t null_dis:1;
- uint64_t pre_align:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
- uint64_t reserved_10_63:54;
- uint64_t pre_align:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_max {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_max_s {
- uint64_t reserved_16_63:48;
- uint64_t len:16;
- } s;
- struct cvmx_gmxx_rxx_frm_max_s cn30xx;
- struct cvmx_gmxx_rxx_frm_max_s cn31xx;
- struct cvmx_gmxx_rxx_frm_max_s cn38xx;
- struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_max_s cn58xx;
- struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_min {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_min_s {
- uint64_t reserved_16_63:48;
- uint64_t len:16;
- } s;
- struct cvmx_gmxx_rxx_frm_min_s cn30xx;
- struct cvmx_gmxx_rxx_frm_min_s cn31xx;
- struct cvmx_gmxx_rxx_frm_min_s cn38xx;
- struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_min_s cn58xx;
- struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_ifg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_ifg_s {
- uint64_t reserved_4_63:60;
- uint64_t ifg:4;
- } s;
- struct cvmx_gmxx_rxx_ifg_s cn30xx;
- struct cvmx_gmxx_rxx_ifg_s cn31xx;
- struct cvmx_gmxx_rxx_ifg_s cn38xx;
- struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
- struct cvmx_gmxx_rxx_ifg_s cn50xx;
- struct cvmx_gmxx_rxx_ifg_s cn52xx;
- struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn56xx;
- struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn58xx;
- struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_int_en {
- uint64_t u64;
- struct cvmx_gmxx_rxx_int_en_s {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } s;
- struct cvmx_gmxx_rxx_int_en_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn30xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
- struct cvmx_gmxx_rxx_int_en_cn50xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_int_en_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn52xx;
- struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
- uint64_t reserved_27_63:37;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_int_en_cn58xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn58xx;
- struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_int_reg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_int_reg_s {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } s;
- struct cvmx_gmxx_rxx_int_reg_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn30xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
- struct cvmx_gmxx_rxx_int_reg_cn50xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn52xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
- uint64_t reserved_27_63:37;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn58xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn58xx;
- struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_jabber {
- uint64_t u64;
- struct cvmx_gmxx_rxx_jabber_s {
- uint64_t reserved_16_63:48;
- uint64_t cnt:16;
- } s;
- struct cvmx_gmxx_rxx_jabber_s cn30xx;
- struct cvmx_gmxx_rxx_jabber_s cn31xx;
- struct cvmx_gmxx_rxx_jabber_s cn38xx;
- struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
- struct cvmx_gmxx_rxx_jabber_s cn50xx;
- struct cvmx_gmxx_rxx_jabber_s cn52xx;
- struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn56xx;
- struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn58xx;
- struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_pause_drop_time {
- uint64_t u64;
- struct cvmx_gmxx_rxx_pause_drop_time_s {
- uint64_t reserved_16_63:48;
- uint64_t status:16;
- } s;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_rx_inbnd {
- uint64_t u64;
- struct cvmx_gmxx_rxx_rx_inbnd_s {
- uint64_t reserved_4_63:60;
- uint64_t duplex:1;
- uint64_t speed:2;
- uint64_t status:1;
- } s;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t rd_clr:1;
- } s;
- struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs_dmac {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs_drp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_drp_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_bad {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_dmac {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_drp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_udd_skp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_udd_skp_s {
- uint64_t reserved_9_63:55;
- uint64_t fcssel:1;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
- } s;
- struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
- struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_bp_dropx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_dropx_s {
- uint64_t reserved_6_63:58;
- uint64_t mark:6;
- } s;
- struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_bp_offx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_offx_s {
- uint64_t reserved_6_63:58;
- uint64_t mark:6;
- } s;
- struct cvmx_gmxx_rx_bp_offx_s cn30xx;
- struct cvmx_gmxx_rx_bp_offx_s cn31xx;
- struct cvmx_gmxx_rx_bp_offx_s cn38xx;
- struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_offx_s cn50xx;
- struct cvmx_gmxx_rx_bp_offx_s cn52xx;
- struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn56xx;
- struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn58xx;
- struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_bp_onx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_onx_s {
- uint64_t reserved_9_63:55;
- uint64_t mark:9;
- } s;
- struct cvmx_gmxx_rx_bp_onx_s cn30xx;
- struct cvmx_gmxx_rx_bp_onx_s cn31xx;
- struct cvmx_gmxx_rx_bp_onx_s cn38xx;
- struct cvmx_gmxx_rx_bp_onx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_onx_s cn50xx;
- struct cvmx_gmxx_rx_bp_onx_s cn52xx;
- struct cvmx_gmxx_rx_bp_onx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_onx_s cn56xx;
- struct cvmx_gmxx_rx_bp_onx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_onx_s cn58xx;
- struct cvmx_gmxx_rx_bp_onx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_hg2_status {
- uint64_t u64;
- struct cvmx_gmxx_rx_hg2_status_s {
- uint64_t reserved_48_63:16;
- uint64_t phtim2go:16;
- uint64_t xof:16;
- uint64_t lgtim2go:16;
- } s;
- struct cvmx_gmxx_rx_hg2_status_s cn52xx;
- struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
- struct cvmx_gmxx_rx_hg2_status_s cn56xx;
-};
-
-union cvmx_gmxx_rx_pass_en {
- uint64_t u64;
- struct cvmx_gmxx_rx_pass_en_s {
- uint64_t reserved_16_63:48;
- uint64_t en:16;
- } s;
- struct cvmx_gmxx_rx_pass_en_s cn38xx;
- struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
- struct cvmx_gmxx_rx_pass_en_s cn58xx;
- struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_pass_mapx {
- uint64_t u64;
- struct cvmx_gmxx_rx_pass_mapx_s {
- uint64_t reserved_4_63:60;
- uint64_t dprt:4;
- } s;
- struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
- struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
- struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
- struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_prt_info {
- uint64_t u64;
- struct cvmx_gmxx_rx_prt_info_s {
- uint64_t reserved_32_63:32;
- uint64_t drop:16;
- uint64_t commit:16;
- } s;
- struct cvmx_gmxx_rx_prt_info_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t drop:3;
- uint64_t reserved_3_15:13;
- uint64_t commit:3;
- } cn30xx;
- struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
- struct cvmx_gmxx_rx_prt_info_s cn38xx;
- struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx {
- uint64_t reserved_20_63:44;
- uint64_t drop:4;
- uint64_t reserved_4_15:12;
- uint64_t commit:4;
- } cn52xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
- struct cvmx_gmxx_rx_prt_info_s cn58xx;
- struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_prts {
- uint64_t u64;
- struct cvmx_gmxx_rx_prts_s {
- uint64_t reserved_3_63:61;
- uint64_t prts:3;
- } s;
- struct cvmx_gmxx_rx_prts_s cn30xx;
- struct cvmx_gmxx_rx_prts_s cn31xx;
- struct cvmx_gmxx_rx_prts_s cn38xx;
- struct cvmx_gmxx_rx_prts_s cn38xxp2;
- struct cvmx_gmxx_rx_prts_s cn50xx;
- struct cvmx_gmxx_rx_prts_s cn52xx;
- struct cvmx_gmxx_rx_prts_s cn52xxp1;
- struct cvmx_gmxx_rx_prts_s cn56xx;
- struct cvmx_gmxx_rx_prts_s cn56xxp1;
- struct cvmx_gmxx_rx_prts_s cn58xx;
- struct cvmx_gmxx_rx_prts_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_tx_status {
- uint64_t u64;
- struct cvmx_gmxx_rx_tx_status_s {
- uint64_t reserved_7_63:57;
- uint64_t tx:3;
- uint64_t reserved_3_3:1;
- uint64_t rx:3;
- } s;
- struct cvmx_gmxx_rx_tx_status_s cn30xx;
- struct cvmx_gmxx_rx_tx_status_s cn31xx;
- struct cvmx_gmxx_rx_tx_status_s cn50xx;
-};
-
-union cvmx_gmxx_rx_xaui_bad_col {
- uint64_t u64;
- struct cvmx_gmxx_rx_xaui_bad_col_s {
- uint64_t reserved_40_63:24;
- uint64_t val:1;
- uint64_t state:3;
- uint64_t lane_rxc:4;
- uint64_t lane_rxd:32;
- } s;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
-};
-
-union cvmx_gmxx_rx_xaui_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rx_xaui_ctl_s {
- uint64_t reserved_2_63:62;
- uint64_t status:2;
- } s;
- struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
- struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
-};
-
-union cvmx_gmxx_smacx {
- uint64_t u64;
- struct cvmx_gmxx_smacx_s {
- uint64_t reserved_48_63:16;
- uint64_t smac:48;
- } s;
- struct cvmx_gmxx_smacx_s cn30xx;
- struct cvmx_gmxx_smacx_s cn31xx;
- struct cvmx_gmxx_smacx_s cn38xx;
- struct cvmx_gmxx_smacx_s cn38xxp2;
- struct cvmx_gmxx_smacx_s cn50xx;
- struct cvmx_gmxx_smacx_s cn52xx;
- struct cvmx_gmxx_smacx_s cn52xxp1;
- struct cvmx_gmxx_smacx_s cn56xx;
- struct cvmx_gmxx_smacx_s cn56xxp1;
- struct cvmx_gmxx_smacx_s cn58xx;
- struct cvmx_gmxx_smacx_s cn58xxp1;
-};
-
-union cvmx_gmxx_stat_bp {
- uint64_t u64;
- struct cvmx_gmxx_stat_bp_s {
- uint64_t reserved_17_63:47;
- uint64_t bp:1;
- uint64_t cnt:16;
- } s;
- struct cvmx_gmxx_stat_bp_s cn30xx;
- struct cvmx_gmxx_stat_bp_s cn31xx;
- struct cvmx_gmxx_stat_bp_s cn38xx;
- struct cvmx_gmxx_stat_bp_s cn38xxp2;
- struct cvmx_gmxx_stat_bp_s cn50xx;
- struct cvmx_gmxx_stat_bp_s cn52xx;
- struct cvmx_gmxx_stat_bp_s cn52xxp1;
- struct cvmx_gmxx_stat_bp_s cn56xx;
- struct cvmx_gmxx_stat_bp_s cn56xxp1;
- struct cvmx_gmxx_stat_bp_s cn58xx;
- struct cvmx_gmxx_stat_bp_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_append {
- uint64_t u64;
- struct cvmx_gmxx_txx_append_s {
- uint64_t reserved_4_63:60;
- uint64_t force_fcs:1;
- uint64_t fcs:1;
- uint64_t pad:1;
- uint64_t preamble:1;
- } s;
- struct cvmx_gmxx_txx_append_s cn30xx;
- struct cvmx_gmxx_txx_append_s cn31xx;
- struct cvmx_gmxx_txx_append_s cn38xx;
- struct cvmx_gmxx_txx_append_s cn38xxp2;
- struct cvmx_gmxx_txx_append_s cn50xx;
- struct cvmx_gmxx_txx_append_s cn52xx;
- struct cvmx_gmxx_txx_append_s cn52xxp1;
- struct cvmx_gmxx_txx_append_s cn56xx;
- struct cvmx_gmxx_txx_append_s cn56xxp1;
- struct cvmx_gmxx_txx_append_s cn58xx;
- struct cvmx_gmxx_txx_append_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_burst {
- uint64_t u64;
- struct cvmx_gmxx_txx_burst_s {
- uint64_t reserved_16_63:48;
- uint64_t burst:16;
- } s;
- struct cvmx_gmxx_txx_burst_s cn30xx;
- struct cvmx_gmxx_txx_burst_s cn31xx;
- struct cvmx_gmxx_txx_burst_s cn38xx;
- struct cvmx_gmxx_txx_burst_s cn38xxp2;
- struct cvmx_gmxx_txx_burst_s cn50xx;
- struct cvmx_gmxx_txx_burst_s cn52xx;
- struct cvmx_gmxx_txx_burst_s cn52xxp1;
- struct cvmx_gmxx_txx_burst_s cn56xx;
- struct cvmx_gmxx_txx_burst_s cn56xxp1;
- struct cvmx_gmxx_txx_burst_s cn58xx;
- struct cvmx_gmxx_txx_burst_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_cbfc_xoff {
- uint64_t u64;
- struct cvmx_gmxx_txx_cbfc_xoff_s {
- uint64_t reserved_16_63:48;
- uint64_t xoff:16;
- } s;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
-};
-
-union cvmx_gmxx_txx_cbfc_xon {
- uint64_t u64;
- struct cvmx_gmxx_txx_cbfc_xon_s {
- uint64_t reserved_16_63:48;
- uint64_t xon:16;
- } s;
- struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
-};
-
-union cvmx_gmxx_txx_clk {
- uint64_t u64;
- struct cvmx_gmxx_txx_clk_s {
- uint64_t reserved_6_63:58;
- uint64_t clk_cnt:6;
- } s;
- struct cvmx_gmxx_txx_clk_s cn30xx;
- struct cvmx_gmxx_txx_clk_s cn31xx;
- struct cvmx_gmxx_txx_clk_s cn38xx;
- struct cvmx_gmxx_txx_clk_s cn38xxp2;
- struct cvmx_gmxx_txx_clk_s cn50xx;
- struct cvmx_gmxx_txx_clk_s cn58xx;
- struct cvmx_gmxx_txx_clk_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_ctl_s {
- uint64_t reserved_2_63:62;
- uint64_t xsdef_en:1;
- uint64_t xscol_en:1;
- } s;
- struct cvmx_gmxx_txx_ctl_s cn30xx;
- struct cvmx_gmxx_txx_ctl_s cn31xx;
- struct cvmx_gmxx_txx_ctl_s cn38xx;
- struct cvmx_gmxx_txx_ctl_s cn38xxp2;
- struct cvmx_gmxx_txx_ctl_s cn50xx;
- struct cvmx_gmxx_txx_ctl_s cn52xx;
- struct cvmx_gmxx_txx_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_ctl_s cn56xx;
- struct cvmx_gmxx_txx_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_ctl_s cn58xx;
- struct cvmx_gmxx_txx_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_min_pkt {
- uint64_t u64;
- struct cvmx_gmxx_txx_min_pkt_s {
- uint64_t reserved_8_63:56;
- uint64_t min_size:8;
- } s;
- struct cvmx_gmxx_txx_min_pkt_s cn30xx;
- struct cvmx_gmxx_txx_min_pkt_s cn31xx;
- struct cvmx_gmxx_txx_min_pkt_s cn38xx;
- struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
- struct cvmx_gmxx_txx_min_pkt_s cn50xx;
- struct cvmx_gmxx_txx_min_pkt_s cn52xx;
- struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn56xx;
- struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn58xx;
- struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_pkt_interval {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_pkt_interval_s {
- uint64_t reserved_16_63:48;
- uint64_t interval:16;
- } s;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_pkt_time {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_pkt_time_s {
- uint64_t reserved_16_63:48;
- uint64_t time:16;
- } s;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_togo {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_togo_s {
- uint64_t reserved_32_63:32;
- uint64_t msg_time:16;
- uint64_t time:16;
- } s;
- struct cvmx_gmxx_txx_pause_togo_cn30xx {
- uint64_t reserved_16_63:48;
- uint64_t time:16;
- } cn30xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
- struct cvmx_gmxx_txx_pause_togo_s cn52xx;
- struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_togo_s cn56xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_zero {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_zero_s {
- uint64_t reserved_1_63:63;
- uint64_t send:1;
- } s;
- struct cvmx_gmxx_txx_pause_zero_s cn30xx;
- struct cvmx_gmxx_txx_pause_zero_s cn31xx;
- struct cvmx_gmxx_txx_pause_zero_s cn38xx;
- struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_zero_s cn50xx;
- struct cvmx_gmxx_txx_pause_zero_s cn52xx;
- struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn56xx;
- struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn58xx;
- struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_sgmii_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_sgmii_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t align:1;
- } s;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
-};
-
-union cvmx_gmxx_txx_slot {
- uint64_t u64;
- struct cvmx_gmxx_txx_slot_s {
- uint64_t reserved_10_63:54;
- uint64_t slot:10;
- } s;
- struct cvmx_gmxx_txx_slot_s cn30xx;
- struct cvmx_gmxx_txx_slot_s cn31xx;
- struct cvmx_gmxx_txx_slot_s cn38xx;
- struct cvmx_gmxx_txx_slot_s cn38xxp2;
- struct cvmx_gmxx_txx_slot_s cn50xx;
- struct cvmx_gmxx_txx_slot_s cn52xx;
- struct cvmx_gmxx_txx_slot_s cn52xxp1;
- struct cvmx_gmxx_txx_slot_s cn56xx;
- struct cvmx_gmxx_txx_slot_s cn56xxp1;
- struct cvmx_gmxx_txx_slot_s cn58xx;
- struct cvmx_gmxx_txx_slot_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_soft_pause {
- uint64_t u64;
- struct cvmx_gmxx_txx_soft_pause_s {
- uint64_t reserved_16_63:48;
- uint64_t time:16;
- } s;
- struct cvmx_gmxx_txx_soft_pause_s cn30xx;
- struct cvmx_gmxx_txx_soft_pause_s cn31xx;
- struct cvmx_gmxx_txx_soft_pause_s cn38xx;
- struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
- struct cvmx_gmxx_txx_soft_pause_s cn50xx;
- struct cvmx_gmxx_txx_soft_pause_s cn52xx;
- struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn56xx;
- struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn58xx;
- struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat0 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat0_s {
- uint64_t xsdef:32;
- uint64_t xscol:32;
- } s;
- struct cvmx_gmxx_txx_stat0_s cn30xx;
- struct cvmx_gmxx_txx_stat0_s cn31xx;
- struct cvmx_gmxx_txx_stat0_s cn38xx;
- struct cvmx_gmxx_txx_stat0_s cn38xxp2;
- struct cvmx_gmxx_txx_stat0_s cn50xx;
- struct cvmx_gmxx_txx_stat0_s cn52xx;
- struct cvmx_gmxx_txx_stat0_s cn52xxp1;
- struct cvmx_gmxx_txx_stat0_s cn56xx;
- struct cvmx_gmxx_txx_stat0_s cn56xxp1;
- struct cvmx_gmxx_txx_stat0_s cn58xx;
- struct cvmx_gmxx_txx_stat0_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat1 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat1_s {
- uint64_t scol:32;
- uint64_t mcol:32;
- } s;
- struct cvmx_gmxx_txx_stat1_s cn30xx;
- struct cvmx_gmxx_txx_stat1_s cn31xx;
- struct cvmx_gmxx_txx_stat1_s cn38xx;
- struct cvmx_gmxx_txx_stat1_s cn38xxp2;
- struct cvmx_gmxx_txx_stat1_s cn50xx;
- struct cvmx_gmxx_txx_stat1_s cn52xx;
- struct cvmx_gmxx_txx_stat1_s cn52xxp1;
- struct cvmx_gmxx_txx_stat1_s cn56xx;
- struct cvmx_gmxx_txx_stat1_s cn56xxp1;
- struct cvmx_gmxx_txx_stat1_s cn58xx;
- struct cvmx_gmxx_txx_stat1_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat2 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat2_s {
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
- } s;
- struct cvmx_gmxx_txx_stat2_s cn30xx;
- struct cvmx_gmxx_txx_stat2_s cn31xx;
- struct cvmx_gmxx_txx_stat2_s cn38xx;
- struct cvmx_gmxx_txx_stat2_s cn38xxp2;
- struct cvmx_gmxx_txx_stat2_s cn50xx;
- struct cvmx_gmxx_txx_stat2_s cn52xx;
- struct cvmx_gmxx_txx_stat2_s cn52xxp1;
- struct cvmx_gmxx_txx_stat2_s cn56xx;
- struct cvmx_gmxx_txx_stat2_s cn56xxp1;
- struct cvmx_gmxx_txx_stat2_s cn58xx;
- struct cvmx_gmxx_txx_stat2_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat3 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat3_s {
- uint64_t reserved_32_63:32;
- uint64_t pkts:32;
- } s;
- struct cvmx_gmxx_txx_stat3_s cn30xx;
- struct cvmx_gmxx_txx_stat3_s cn31xx;
- struct cvmx_gmxx_txx_stat3_s cn38xx;
- struct cvmx_gmxx_txx_stat3_s cn38xxp2;
- struct cvmx_gmxx_txx_stat3_s cn50xx;
- struct cvmx_gmxx_txx_stat3_s cn52xx;
- struct cvmx_gmxx_txx_stat3_s cn52xxp1;
- struct cvmx_gmxx_txx_stat3_s cn56xx;
- struct cvmx_gmxx_txx_stat3_s cn56xxp1;
- struct cvmx_gmxx_txx_stat3_s cn58xx;
- struct cvmx_gmxx_txx_stat3_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat4 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat4_s {
- uint64_t hist1:32;
- uint64_t hist0:32;
- } s;
- struct cvmx_gmxx_txx_stat4_s cn30xx;
- struct cvmx_gmxx_txx_stat4_s cn31xx;
- struct cvmx_gmxx_txx_stat4_s cn38xx;
- struct cvmx_gmxx_txx_stat4_s cn38xxp2;
- struct cvmx_gmxx_txx_stat4_s cn50xx;
- struct cvmx_gmxx_txx_stat4_s cn52xx;
- struct cvmx_gmxx_txx_stat4_s cn52xxp1;
- struct cvmx_gmxx_txx_stat4_s cn56xx;
- struct cvmx_gmxx_txx_stat4_s cn56xxp1;
- struct cvmx_gmxx_txx_stat4_s cn58xx;
- struct cvmx_gmxx_txx_stat4_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat5 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat5_s {
- uint64_t hist3:32;
- uint64_t hist2:32;
- } s;
- struct cvmx_gmxx_txx_stat5_s cn30xx;
- struct cvmx_gmxx_txx_stat5_s cn31xx;
- struct cvmx_gmxx_txx_stat5_s cn38xx;
- struct cvmx_gmxx_txx_stat5_s cn38xxp2;
- struct cvmx_gmxx_txx_stat5_s cn50xx;
- struct cvmx_gmxx_txx_stat5_s cn52xx;
- struct cvmx_gmxx_txx_stat5_s cn52xxp1;
- struct cvmx_gmxx_txx_stat5_s cn56xx;
- struct cvmx_gmxx_txx_stat5_s cn56xxp1;
- struct cvmx_gmxx_txx_stat5_s cn58xx;
- struct cvmx_gmxx_txx_stat5_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat6 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat6_s {
- uint64_t hist5:32;
- uint64_t hist4:32;
- } s;
- struct cvmx_gmxx_txx_stat6_s cn30xx;
- struct cvmx_gmxx_txx_stat6_s cn31xx;
- struct cvmx_gmxx_txx_stat6_s cn38xx;
- struct cvmx_gmxx_txx_stat6_s cn38xxp2;
- struct cvmx_gmxx_txx_stat6_s cn50xx;
- struct cvmx_gmxx_txx_stat6_s cn52xx;
- struct cvmx_gmxx_txx_stat6_s cn52xxp1;
- struct cvmx_gmxx_txx_stat6_s cn56xx;
- struct cvmx_gmxx_txx_stat6_s cn56xxp1;
- struct cvmx_gmxx_txx_stat6_s cn58xx;
- struct cvmx_gmxx_txx_stat6_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat7 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat7_s {
- uint64_t hist7:32;
- uint64_t hist6:32;
- } s;
- struct cvmx_gmxx_txx_stat7_s cn30xx;
- struct cvmx_gmxx_txx_stat7_s cn31xx;
- struct cvmx_gmxx_txx_stat7_s cn38xx;
- struct cvmx_gmxx_txx_stat7_s cn38xxp2;
- struct cvmx_gmxx_txx_stat7_s cn50xx;
- struct cvmx_gmxx_txx_stat7_s cn52xx;
- struct cvmx_gmxx_txx_stat7_s cn52xxp1;
- struct cvmx_gmxx_txx_stat7_s cn56xx;
- struct cvmx_gmxx_txx_stat7_s cn56xxp1;
- struct cvmx_gmxx_txx_stat7_s cn58xx;
- struct cvmx_gmxx_txx_stat7_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat8 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat8_s {
- uint64_t mcst:32;
- uint64_t bcst:32;
- } s;
- struct cvmx_gmxx_txx_stat8_s cn30xx;
- struct cvmx_gmxx_txx_stat8_s cn31xx;
- struct cvmx_gmxx_txx_stat8_s cn38xx;
- struct cvmx_gmxx_txx_stat8_s cn38xxp2;
- struct cvmx_gmxx_txx_stat8_s cn50xx;
- struct cvmx_gmxx_txx_stat8_s cn52xx;
- struct cvmx_gmxx_txx_stat8_s cn52xxp1;
- struct cvmx_gmxx_txx_stat8_s cn56xx;
- struct cvmx_gmxx_txx_stat8_s cn56xxp1;
- struct cvmx_gmxx_txx_stat8_s cn58xx;
- struct cvmx_gmxx_txx_stat8_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat9 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat9_s {
- uint64_t undflw:32;
- uint64_t ctl:32;
- } s;
- struct cvmx_gmxx_txx_stat9_s cn30xx;
- struct cvmx_gmxx_txx_stat9_s cn31xx;
- struct cvmx_gmxx_txx_stat9_s cn38xx;
- struct cvmx_gmxx_txx_stat9_s cn38xxp2;
- struct cvmx_gmxx_txx_stat9_s cn50xx;
- struct cvmx_gmxx_txx_stat9_s cn52xx;
- struct cvmx_gmxx_txx_stat9_s cn52xxp1;
- struct cvmx_gmxx_txx_stat9_s cn56xx;
- struct cvmx_gmxx_txx_stat9_s cn56xxp1;
- struct cvmx_gmxx_txx_stat9_s cn58xx;
- struct cvmx_gmxx_txx_stat9_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stats_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_stats_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t rd_clr:1;
- } s;
- struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
- struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_thresh {
- uint64_t u64;
- struct cvmx_gmxx_txx_thresh_s {
- uint64_t reserved_9_63:55;
- uint64_t cnt:9;
- } s;
- struct cvmx_gmxx_txx_thresh_cn30xx {
- uint64_t reserved_7_63:57;
- uint64_t cnt:7;
- } cn30xx;
- struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
- struct cvmx_gmxx_txx_thresh_s cn38xx;
- struct cvmx_gmxx_txx_thresh_s cn38xxp2;
- struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
- struct cvmx_gmxx_txx_thresh_s cn52xx;
- struct cvmx_gmxx_txx_thresh_s cn52xxp1;
- struct cvmx_gmxx_txx_thresh_s cn56xx;
- struct cvmx_gmxx_txx_thresh_s cn56xxp1;
- struct cvmx_gmxx_txx_thresh_s cn58xx;
- struct cvmx_gmxx_txx_thresh_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_bp {
- uint64_t u64;
- struct cvmx_gmxx_tx_bp_s {
- uint64_t reserved_4_63:60;
- uint64_t bp:4;
- } s;
- struct cvmx_gmxx_tx_bp_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t bp:3;
- } cn30xx;
- struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
- struct cvmx_gmxx_tx_bp_s cn38xx;
- struct cvmx_gmxx_tx_bp_s cn38xxp2;
- struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
- struct cvmx_gmxx_tx_bp_s cn52xx;
- struct cvmx_gmxx_tx_bp_s cn52xxp1;
- struct cvmx_gmxx_tx_bp_s cn56xx;
- struct cvmx_gmxx_tx_bp_s cn56xxp1;
- struct cvmx_gmxx_tx_bp_s cn58xx;
- struct cvmx_gmxx_tx_bp_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_clk_mskx {
- uint64_t u64;
- struct cvmx_gmxx_tx_clk_mskx_s {
- uint64_t reserved_1_63:63;
- uint64_t msk:1;
- } s;
- struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
- struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
-};
-
-union cvmx_gmxx_tx_col_attempt {
- uint64_t u64;
- struct cvmx_gmxx_tx_col_attempt_s {
- uint64_t reserved_5_63:59;
- uint64_t limit:5;
- } s;
- struct cvmx_gmxx_tx_col_attempt_s cn30xx;
- struct cvmx_gmxx_tx_col_attempt_s cn31xx;
- struct cvmx_gmxx_tx_col_attempt_s cn38xx;
- struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
- struct cvmx_gmxx_tx_col_attempt_s cn50xx;
- struct cvmx_gmxx_tx_col_attempt_s cn52xx;
- struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn56xx;
- struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn58xx;
- struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_corrupt {
- uint64_t u64;
- struct cvmx_gmxx_tx_corrupt_s {
- uint64_t reserved_4_63:60;
- uint64_t corrupt:4;
- } s;
- struct cvmx_gmxx_tx_corrupt_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t corrupt:3;
- } cn30xx;
- struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
- struct cvmx_gmxx_tx_corrupt_s cn38xx;
- struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
- struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
- struct cvmx_gmxx_tx_corrupt_s cn52xx;
- struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn56xx;
- struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn58xx;
- struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_hg2_reg1 {
- uint64_t u64;
- struct cvmx_gmxx_tx_hg2_reg1_s {
- uint64_t reserved_16_63:48;
- uint64_t tx_xof:16;
- } s;
- struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
- struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
-};
-
-union cvmx_gmxx_tx_hg2_reg2 {
- uint64_t u64;
- struct cvmx_gmxx_tx_hg2_reg2_s {
- uint64_t reserved_16_63:48;
- uint64_t tx_xon:16;
- } s;
- struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
- struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
-};
-
-union cvmx_gmxx_tx_ifg {
- uint64_t u64;
- struct cvmx_gmxx_tx_ifg_s {
- uint64_t reserved_8_63:56;
- uint64_t ifg2:4;
- uint64_t ifg1:4;
- } s;
- struct cvmx_gmxx_tx_ifg_s cn30xx;
- struct cvmx_gmxx_tx_ifg_s cn31xx;
- struct cvmx_gmxx_tx_ifg_s cn38xx;
- struct cvmx_gmxx_tx_ifg_s cn38xxp2;
- struct cvmx_gmxx_tx_ifg_s cn50xx;
- struct cvmx_gmxx_tx_ifg_s cn52xx;
- struct cvmx_gmxx_tx_ifg_s cn52xxp1;
- struct cvmx_gmxx_tx_ifg_s cn56xx;
- struct cvmx_gmxx_tx_ifg_s cn56xxp1;
- struct cvmx_gmxx_tx_ifg_s cn58xx;
- struct cvmx_gmxx_tx_ifg_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_int_en {
- uint64_t u64;
- struct cvmx_gmxx_tx_int_en_s {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } s;
- struct cvmx_gmxx_tx_int_en_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t late_col:3;
- uint64_t reserved_15_15:1;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn30xx;
- struct cvmx_gmxx_tx_int_en_cn31xx {
- uint64_t reserved_15_63:49;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn31xx;
- struct cvmx_gmxx_tx_int_en_s cn38xx;
- struct cvmx_gmxx_tx_int_en_cn38xxp2 {
- uint64_t reserved_16_63:48;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } cn38xxp2;
- struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
- struct cvmx_gmxx_tx_int_en_cn52xx {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn52xx;
- struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
- struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
- struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
- struct cvmx_gmxx_tx_int_en_s cn58xx;
- struct cvmx_gmxx_tx_int_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_int_reg {
- uint64_t u64;
- struct cvmx_gmxx_tx_int_reg_s {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } s;
- struct cvmx_gmxx_tx_int_reg_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t late_col:3;
- uint64_t reserved_15_15:1;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn30xx;
- struct cvmx_gmxx_tx_int_reg_cn31xx {
- uint64_t reserved_15_63:49;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn31xx;
- struct cvmx_gmxx_tx_int_reg_s cn38xx;
- struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
- uint64_t reserved_16_63:48;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } cn38xxp2;
- struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn52xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
- struct cvmx_gmxx_tx_int_reg_s cn58xx;
- struct cvmx_gmxx_tx_int_reg_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_jam {
- uint64_t u64;
- struct cvmx_gmxx_tx_jam_s {
- uint64_t reserved_8_63:56;
- uint64_t jam:8;
- } s;
- struct cvmx_gmxx_tx_jam_s cn30xx;
- struct cvmx_gmxx_tx_jam_s cn31xx;
- struct cvmx_gmxx_tx_jam_s cn38xx;
- struct cvmx_gmxx_tx_jam_s cn38xxp2;
- struct cvmx_gmxx_tx_jam_s cn50xx;
- struct cvmx_gmxx_tx_jam_s cn52xx;
- struct cvmx_gmxx_tx_jam_s cn52xxp1;
- struct cvmx_gmxx_tx_jam_s cn56xx;
- struct cvmx_gmxx_tx_jam_s cn56xxp1;
- struct cvmx_gmxx_tx_jam_s cn58xx;
- struct cvmx_gmxx_tx_jam_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_lfsr {
- uint64_t u64;
- struct cvmx_gmxx_tx_lfsr_s {
- uint64_t reserved_16_63:48;
- uint64_t lfsr:16;
- } s;
- struct cvmx_gmxx_tx_lfsr_s cn30xx;
- struct cvmx_gmxx_tx_lfsr_s cn31xx;
- struct cvmx_gmxx_tx_lfsr_s cn38xx;
- struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
- struct cvmx_gmxx_tx_lfsr_s cn50xx;
- struct cvmx_gmxx_tx_lfsr_s cn52xx;
- struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn56xx;
- struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn58xx;
- struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_ovr_bp {
- uint64_t u64;
- struct cvmx_gmxx_tx_ovr_bp_s {
- uint64_t reserved_48_63:16;
- uint64_t tx_prt_bp:16;
- uint64_t reserved_12_31:20;
- uint64_t en:4;
- uint64_t bp:4;
- uint64_t ign_full:4;
- } s;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t en:3;
- uint64_t reserved_7_7:1;
- uint64_t bp:3;
- uint64_t reserved_3_3:1;
- uint64_t ign_full:3;
- } cn30xx;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx {
- uint64_t reserved_12_63:52;
- uint64_t en:4;
- uint64_t bp:4;
- uint64_t ign_full:4;
- } cn38xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
- struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
-};
-
-union cvmx_gmxx_tx_pause_pkt_dmac {
- uint64_t u64;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s {
- uint64_t reserved_48_63:16;
- uint64_t dmac:48;
- } s;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_pause_pkt_type {
- uint64_t u64;
- struct cvmx_gmxx_tx_pause_pkt_type_s {
- uint64_t reserved_16_63:48;
- uint64_t type:16;
- } s;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_prts {
- uint64_t u64;
- struct cvmx_gmxx_tx_prts_s {
- uint64_t reserved_5_63:59;
- uint64_t prts:5;
- } s;
- struct cvmx_gmxx_tx_prts_s cn30xx;
- struct cvmx_gmxx_tx_prts_s cn31xx;
- struct cvmx_gmxx_tx_prts_s cn38xx;
- struct cvmx_gmxx_tx_prts_s cn38xxp2;
- struct cvmx_gmxx_tx_prts_s cn50xx;
- struct cvmx_gmxx_tx_prts_s cn52xx;
- struct cvmx_gmxx_tx_prts_s cn52xxp1;
- struct cvmx_gmxx_tx_prts_s cn56xx;
- struct cvmx_gmxx_tx_prts_s cn56xxp1;
- struct cvmx_gmxx_tx_prts_s cn58xx;
- struct cvmx_gmxx_tx_prts_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_ctl {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_ctl_s {
- uint64_t reserved_2_63:62;
- uint64_t tpa_clr:1;
- uint64_t cont_pkt:1;
- } s;
- struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
- struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
- struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
- struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_drain {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_drain_s {
- uint64_t reserved_16_63:48;
- uint64_t drain:16;
- } s;
- struct cvmx_gmxx_tx_spi_drain_s cn38xx;
- struct cvmx_gmxx_tx_spi_drain_s cn58xx;
- struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_max {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_max_s {
- uint64_t reserved_23_63:41;
- uint64_t slice:7;
- uint64_t max2:8;
- uint64_t max1:8;
- } s;
- struct cvmx_gmxx_tx_spi_max_cn38xx {
- uint64_t reserved_16_63:48;
- uint64_t max2:8;
- uint64_t max1:8;
- } cn38xx;
- struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
- struct cvmx_gmxx_tx_spi_max_s cn58xx;
- struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_roundx {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_roundx_s {
- uint64_t reserved_16_63:48;
- uint64_t round:16;
- } s;
- struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
- struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_thresh {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_thresh_s {
- uint64_t reserved_6_63:58;
- uint64_t thresh:6;
- } s;
- struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
- struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
- struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
- struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_xaui_ctl {
- uint64_t u64;
- struct cvmx_gmxx_tx_xaui_ctl_s {
- uint64_t reserved_11_63:53;
- uint64_t hg_pause_hgi:2;
- uint64_t hg_en:1;
- uint64_t reserved_7_7:1;
- uint64_t ls_byp:1;
- uint64_t ls:2;
- uint64_t reserved_2_3:2;
- uint64_t uni_en:1;
- uint64_t dic_en:1;
- } s;
- struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
- struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
-};
-
-union cvmx_gmxx_xaui_ext_loopback {
- uint64_t u64;
- struct cvmx_gmxx_xaui_ext_loopback_s {
- uint64_t reserved_5_63:59;
- uint64_t en:1;
- uint64_t thresh:4;
- } s;
- struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
- struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-board.c b/drivers/staging/octeon/cvmx-helper-board.c
deleted file mode 100644
index 57d35dc63ddb..000000000000
--- a/drivers/staging/octeon/cvmx-helper-board.c
+++ /dev/null
@@ -1,695 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Helper functions to abstract board specific data about
- * network ports from the rest of the cvmx-helper files.
- */
-
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-bootinfo.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-mdio.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-helper-util.h"
-#include "cvmx-helper-board.h"
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-asxx-defs.h"
-
-/**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) =
- NULL;
-
-/**
- * Return the MII PHY address associated with the given IPD
- * port. A result of -1 means there isn't a MII capable PHY
- * connected to this port. On chips supporting multiple MII
- * busses the bus number is encoded in bits <15:8>.
- *
- * This function must be modified for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It replies on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: Octeon IPD port to get the MII address for.
- *
- * Returns MII PHY address and bus number or -1.
- */
-int cvmx_helper_board_get_mii_address(int ipd_port)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_SIM:
- /* Simulator doesn't have MII */
- return -1;
- case CVMX_BOARD_TYPE_EBT3000:
- case CVMX_BOARD_TYPE_EBT5800:
- case CVMX_BOARD_TYPE_THUNDER:
- case CVMX_BOARD_TYPE_NICPRO2:
- /* Interface 0 is SPI4, interface 1 is RGMII */
- if ((ipd_port >= 16) && (ipd_port < 20))
- return ipd_port - 16;
- else
- return -1;
- case CVMX_BOARD_TYPE_KODAMA:
- case CVMX_BOARD_TYPE_EBH3100:
- case CVMX_BOARD_TYPE_HIKARI:
- case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
- /*
- * Port 0 is WAN connected to a PHY, Port 1 is GMII
- * connected to a switch
- */
- if (ipd_port == 0)
- return 4;
- else if (ipd_port == 1)
- return 9;
- else
- return -1;
- case CVMX_BOARD_TYPE_NAC38:
- /* Board has 8 RGMII ports PHYs are 0-7 */
- if ((ipd_port >= 0) && (ipd_port < 4))
- return ipd_port;
- else if ((ipd_port >= 16) && (ipd_port < 20))
- return ipd_port - 16 + 4;
- else
- return -1;
- case CVMX_BOARD_TYPE_EBH3000:
- /* Board has dual SPI4 and no PHYs */
- return -1;
- case CVMX_BOARD_TYPE_EBH5200:
- case CVMX_BOARD_TYPE_EBH5201:
- case CVMX_BOARD_TYPE_EBT5200:
- /*
- * Board has 4 SGMII ports. The PHYs start right after the MII
- * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
- */
- if ((ipd_port >= 0) && (ipd_port < 4))
- return ipd_port + 2;
- else
- return -1;
- case CVMX_BOARD_TYPE_EBH5600:
- case CVMX_BOARD_TYPE_EBH5601:
- case CVMX_BOARD_TYPE_EBH5610:
- /*
- * Board has 8 SGMII ports. 4 connect out, two connect
- * to a switch, and 2 loop to each other
- */
- if ((ipd_port >= 0) && (ipd_port < 4))
- return ipd_port + 1;
- else
- return -1;
- case CVMX_BOARD_TYPE_CUST_NB5:
- if (ipd_port == 2)
- return 4;
- else
- return -1;
- case CVMX_BOARD_TYPE_NIC_XLE_4G:
- /* Board has 4 SGMII ports. connected QLM3(interface 1) */
- if ((ipd_port >= 16) && (ipd_port < 20))
- return ipd_port - 16 + 1;
- else
- return -1;
- case CVMX_BOARD_TYPE_BBGW_REF:
- /*
- * No PHYs are connected to Octeon, everything is
- * through switch.
- */
- return -1;
-
- case CVMX_BOARD_TYPE_CUST_WSX16:
- if (ipd_port >= 0 && ipd_port <= 3)
- return ipd_port;
- else if (ipd_port >= 16 && ipd_port <= 19)
- return ipd_port - 16 + 4;
- else
- return -1;
- }
-
- /* Some unknown board. Somebody forgot to update this function... */
- cvmx_dprintf
- ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
- cvmx_sysinfo_get()->board_type);
- return -1;
-}
-
-/**
- * This function is the board specific method of determining an
- * ethernet ports link speed. Most Octeon boards have Marvell PHYs
- * and are handled by the fall through case. This function must be
- * updated for boards that don't have the normal Marvell PHYs.
- *
- * This function must be modified for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relies on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: IPD input port associated with the port we want to get link
- * status for.
- *
- * Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
- */
-cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- int phy_addr;
- int is_broadcom_phy = 0;
-
- /* Give the user a chance to override the processing of this function */
- if (cvmx_override_board_link_get)
- return cvmx_override_board_link_get(ipd_port);
-
- /* Unless we fix it later, all links are defaulted to down */
- result.u64 = 0;
-
- /*
- * This switch statement should handle all ports that either don't use
- * Marvell PHYS, or don't support in-band status.
- */
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_SIM:
- /* The simulator gives you a simulated 1Gbps full duplex link */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- case CVMX_BOARD_TYPE_EBH3100:
- case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 1) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- /* Fall through to the generic code below */
- break;
- case CVMX_BOARD_TYPE_CUST_NB5:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 1) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- } else /* The other port uses a broadcom PHY */
- is_broadcom_phy = 1;
- break;
- case CVMX_BOARD_TYPE_BBGW_REF:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 2) {
- /* Port 2 is not hooked up */
- result.u64 = 0;
- return result;
- } else {
- /* Ports 0 and 1 connect to the switch */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- break;
- }
-
- phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
- if (phy_addr != -1) {
- if (is_broadcom_phy) {
- /*
- * Below we are going to read SMI/MDIO
- * register 0x19 which works on Broadcom
- * parts
- */
- int phy_status =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- 0x19);
- switch ((phy_status >> 8) & 0x7) {
- case 0:
- result.u64 = 0;
- break;
- case 1:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 10;
- break;
- case 2:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10;
- break;
- case 3:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 100;
- break;
- case 4:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 100;
- break;
- case 5:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 100;
- break;
- case 6:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 1000;
- break;
- case 7:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- break;
- }
- } else {
- /*
- * This code assumes we are using a Marvell
- * Gigabit PHY. All the speed information can
- * be read from register 17 in one
- * go. Somebody using a different PHY will
- * need to handle it above in the board
- * specific area.
- */
- int phy_status =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
-
- /*
- * If the resolve bit 11 isn't set, see if
- * autoneg is turned off (bit 12, reg 0). The
- * resolve bit doesn't get set properly when
- * autoneg is off, so force it.
- */
- if ((phy_status & (1 << 11)) == 0) {
- int auto_status =
- cvmx_mdio_read(phy_addr >> 8,
- phy_addr & 0xff, 0);
- if ((auto_status & (1 << 12)) == 0)
- phy_status |= 1 << 11;
- }
-
- /*
- * Only return a link if the PHY has finished
- * auto negotiation and set the resolved bit
- * (bit 11)
- */
- if (phy_status & (1 << 11)) {
- result.s.link_up = 1;
- result.s.full_duplex = ((phy_status >> 13) & 1);
- switch ((phy_status >> 14) & 3) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.u64 = 0;
- break;
- }
- }
- }
- } else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /*
- * We don't have a PHY address, so attempt to use
- * in-band status. It is really important that boards
- * not supporting in-band status never get
- * here. Reading broken in-band status tends to do bad
- * things
- */
- union cvmx_gmxx_rxx_rx_inbnd inband_status;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- inband_status.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
-
- result.s.link_up = inband_status.s.status;
- result.s.full_duplex = inband_status.s.duplex;
- switch (inband_status.s.speed) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.u64 = 0;
- break;
- }
- } else {
- /*
- * We don't have a PHY address and we don't have
- * in-band status. There is no way to determine the
- * link speed. Return down assuming this port isn't
- * wired
- */
- result.u64 = 0;
- }
-
- /* If link is down, return all fields as zero. */
- if (!result.s.link_up)
- result.u64 = 0;
-
- return result;
-}
-
-/**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and auto-negotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr: The address of the PHY to program
- * @enable_autoneg:
- * Non zero if you want to enable auto-negotiation.
- * @link_info: Link speed to program. If the speed is zero and auto-negotiation
- * is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
- cvmx_helper_board_set_phy_link_flags_types_t
- link_flags,
- cvmx_helper_link_info_t link_info)
-{
-
- /* Set the flow control settings based on link_flags */
- if ((link_flags & set_phy_link_flags_flow_control_mask) !=
- set_phy_link_flags_flow_control_dont_touch) {
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.asymmetric_pause =
- (link_flags & set_phy_link_flags_flow_control_mask) ==
- set_phy_link_flags_flow_control_enable;
- reg_autoneg_adver.s.pause =
- (link_flags & set_phy_link_flags_flow_control_mask) ==
- set_phy_link_flags_flow_control_enable;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- }
-
- /* If speed isn't set and autoneg is on advertise all supported modes */
- if ((link_flags & set_phy_link_flags_autoneg)
- && (link_info.s.speed == 0)) {
- cvmx_mdio_phy_reg_control_t reg_control;
- cvmx_mdio_phy_reg_status_t reg_status;
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
- cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
- reg_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_STATUS);
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.advert_100base_t4 =
- reg_status.s.capable_100base_t4;
- reg_autoneg_adver.s.advert_10base_tx_full =
- reg_status.s.capable_10_full;
- reg_autoneg_adver.s.advert_10base_tx_half =
- reg_status.s.capable_10_half;
- reg_autoneg_adver.s.advert_100base_tx_full =
- reg_status.s.capable_100base_x_full;
- reg_autoneg_adver.s.advert_100base_tx_half =
- reg_status.s.capable_100base_x_half;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- if (reg_status.s.capable_extended_status) {
- reg_extended_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
- reg_control_1000.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000);
- reg_control_1000.s.advert_1000base_t_full =
- reg_extended_status.s.capable_1000base_t_full;
- reg_control_1000.s.advert_1000base_t_half =
- reg_extended_status.s.capable_1000base_t_half;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000,
- reg_control_1000.u16);
- }
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 1;
- reg_control.s.restart_autoneg = 1;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- } else if ((link_flags & set_phy_link_flags_autoneg)) {
- cvmx_mdio_phy_reg_control_t reg_control;
- cvmx_mdio_phy_reg_status_t reg_status;
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
- cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
- reg_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_STATUS);
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.advert_100base_t4 = 0;
- reg_autoneg_adver.s.advert_10base_tx_full = 0;
- reg_autoneg_adver.s.advert_10base_tx_half = 0;
- reg_autoneg_adver.s.advert_100base_tx_full = 0;
- reg_autoneg_adver.s.advert_100base_tx_half = 0;
- if (reg_status.s.capable_extended_status) {
- reg_extended_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
- reg_control_1000.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000);
- reg_control_1000.s.advert_1000base_t_full = 0;
- reg_control_1000.s.advert_1000base_t_half = 0;
- }
- switch (link_info.s.speed) {
- case 10:
- reg_autoneg_adver.s.advert_10base_tx_full =
- link_info.s.full_duplex;
- reg_autoneg_adver.s.advert_10base_tx_half =
- !link_info.s.full_duplex;
- break;
- case 100:
- reg_autoneg_adver.s.advert_100base_tx_full =
- link_info.s.full_duplex;
- reg_autoneg_adver.s.advert_100base_tx_half =
- !link_info.s.full_duplex;
- break;
- case 1000:
- reg_control_1000.s.advert_1000base_t_full =
- link_info.s.full_duplex;
- reg_control_1000.s.advert_1000base_t_half =
- !link_info.s.full_duplex;
- break;
- }
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- if (reg_status.s.capable_extended_status)
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000,
- reg_control_1000.u16);
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 1;
- reg_control.s.restart_autoneg = 1;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- } else {
- cvmx_mdio_phy_reg_control_t reg_control;
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 0;
- reg_control.s.restart_autoneg = 1;
- reg_control.s.duplex = link_info.s.full_duplex;
- if (link_info.s.speed == 1000) {
- reg_control.s.speed_msb = 1;
- reg_control.s.speed_lsb = 0;
- } else if (link_info.s.speed == 100) {
- reg_control.s.speed_msb = 0;
- reg_control.s.speed_lsb = 1;
- } else if (link_info.s.speed == 10) {
- reg_control.s.speed_msb = 0;
- reg_control.s.speed_lsb = 0;
- }
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- }
- return 0;
-}
-
-/**
- * This function is called by cvmx_helper_interface_probe() after it
- * determines the number of ports Octeon can support on a specific
- * interface. This function is the per board location to override
- * this value. It is called with the number of ports Octeon might
- * support and should return the number of actual ports on the
- * board.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @interface: Interface to probe
- * @supported_ports:
- * Number of ports Octeon supports.
- *
- * Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
- */
-int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- if (interface == 0)
- return 2;
- break;
- case CVMX_BOARD_TYPE_BBGW_REF:
- if (interface == 0)
- return 2;
- break;
- case CVMX_BOARD_TYPE_NIC_XLE_4G:
- if (interface == 0)
- return 0;
- break;
- /* The 2nd interface on the EBH5600 is connected to the Marvel switch,
- which we don't support. Disable ports connected to it */
- case CVMX_BOARD_TYPE_EBH5600:
- if (interface == 1)
- return 0;
- break;
- }
- return supported_ports;
-}
-
-/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_board_hardware_enable(int interface)
-{
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
- if (interface == 0) {
- /* Different config for switch port */
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
- /*
- * Boards with gigabit WAN ports need a
- * different setting that is compatible with
- * 100 Mbit settings
- */
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
- 0xc);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
- 0xc);
- }
- } else if (cvmx_sysinfo_get()->board_type ==
- CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
- /*
- * Broadcom PHYs require differnet ASX
- * clocks. Unfortunately many boards don't define a
- * new board Id and simply mangle the
- * CN3010_EVB_HS5
- */
- if (interface == 0) {
- /*
- * Some boards use a hacked up bootloader that
- * identifies them as CN3010_EVB_HS5
- * evaluation boards. This leads to all kinds
- * of configuration problems. Detect one
- * case, and print warning, while trying to do
- * the right thing.
- */
- int phy_addr = cvmx_helper_board_get_mii_address(0);
- if (phy_addr != -1) {
- int phy_identifier =
- cvmx_mdio_read(phy_addr >> 8,
- phy_addr & 0xff, 0x2);
- /* Is it a Broadcom PHY? */
- if (phy_identifier == 0x0143) {
- cvmx_dprintf("\n");
- cvmx_dprintf("ERROR:\n");
- cvmx_dprintf
- ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
- cvmx_dprintf
- ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
- cvmx_dprintf
- ("ERROR: All boards require a unique board type to identify them.\n");
- cvmx_dprintf("ERROR:\n");
- cvmx_dprintf("\n");
- cvmx_wait(1000000000);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX
- (0, interface), 5);
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX
- (0, interface), 5);
- }
- }
- }
- }
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-board.h b/drivers/staging/octeon/cvmx-helper-board.h
deleted file mode 100644
index b465bec43553..000000000000
--- a/drivers/staging/octeon/cvmx-helper-board.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Helper functions to abstract board specific data about
- * network ports from the rest of the cvmx-helper files.
- *
- */
-#ifndef __CVMX_HELPER_BOARD_H__
-#define __CVMX_HELPER_BOARD_H__
-
-#include "cvmx-helper.h"
-
-typedef enum {
- set_phy_link_flags_autoneg = 0x1,
- set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
- set_phy_link_flags_flow_control_enable = 0x1 << 1,
- set_phy_link_flags_flow_control_disable = 0x2 << 1,
- set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
-} cvmx_helper_board_set_phy_link_flags_types_t;
-
-/**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
-
-/**
- * Return the MII PHY address associated with the given IPD
- * port. A result of -1 means there isn't a MII capable PHY
- * connected to this port. On chips supporting multiple MII
- * busses the bus number is encoded in bits <15:8>.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: Octeon IPD port to get the MII address for.
- *
- * Returns MII PHY address and bus number or -1.
- */
-extern int cvmx_helper_board_get_mii_address(int ipd_port);
-
-/**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and autonegotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr: The address of the PHY to program
- * @link_flags:
- * Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
- * @link_info: Link speed to program. If the speed is zero and autonegotiation
- * is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
- cvmx_helper_board_set_phy_link_flags_types_t
- link_flags,
- cvmx_helper_link_info_t link_info);
-
-/**
- * This function is the board specific method of determining an
- * ethernet ports link speed. Most Octeon boards have Marvell PHYs
- * and are handled by the fall through case. This function must be
- * updated for boards that don't have the normal Marvell PHYs.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: IPD input port associated with the port we want to get link
- * status for.
- *
- * Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
- */
-extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
-
-/**
- * This function is called by cvmx_helper_interface_probe() after it
- * determines the number of ports Octeon can support on a specific
- * interface. This function is the per board location to override
- * this value. It is called with the number of ports Octeon might
- * support and should return the number of actual ports on the
- * board.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @interface: Interface to probe
- * @supported_ports:
- * Number of ports Octeon supports.
- *
- * Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
- */
-extern int __cvmx_helper_board_interface_probe(int interface,
- int supported_ports);
-
-/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_board_hardware_enable(int interface);
-
-#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.c b/drivers/staging/octeon/cvmx-helper-fpa.c
deleted file mode 100644
index c239e5f4ab9a..000000000000
--- a/drivers/staging/octeon/cvmx-helper-fpa.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Helper functions for FPA setup.
- *
- */
-#include "executive-config.h"
-#include "cvmx-config.h"
-#include "cvmx.h"
-#include "cvmx-bootmem.h"
-#include "cvmx-fpa.h"
-#include "cvmx-helper-fpa.h"
-
-/**
- * Allocate memory for and initialize a single FPA pool.
- *
- * @pool: Pool to initialize
- * @buffer_size: Size of buffers to allocate in bytes
- * @buffers: Number of buffers to put in the pool. Zero is allowed
- * @name: String name of the pool for debugging purposes
- * Returns Zero on success, non-zero on failure
- */
-static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
- uint64_t buffers, const char *name)
-{
- uint64_t current_num;
- void *memory;
- uint64_t align = CVMX_CACHE_LINE_SIZE;
-
- /*
- * Align the allocation so that power of 2 size buffers are
- * naturally aligned.
- */
- while (align < buffer_size)
- align = align << 1;
-
- if (buffers == 0)
- return 0;
-
- current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
- if (current_num) {
- cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. "
- "Skipping setup.\n",
- pool, name, (unsigned long long)current_num);
- return 0;
- }
-
- memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
- if (memory == NULL) {
- cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n",
- pool, name);
- return -1;
- }
- cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
- return 0;
-}
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Specifying zero for the number of
- * buffers will cause that FPA pool to not be setup. This is
- * useful if you aren't using some of the hardware and want
- * to save memory. Use cvmx_helper_initialize_fpa instead of
- * this function directly.
- *
- * @pip_pool: Should always be CVMX_FPA_PACKET_POOL
- * @pip_size: Should always be CVMX_FPA_PACKET_POOL_SIZE
- * @pip_buffers:
- * Number of packet buffers.
- * @wqe_pool: Should always be CVMX_FPA_WQE_POOL
- * @wqe_size: Should always be CVMX_FPA_WQE_POOL_SIZE
- * @wqe_entries:
- * Number of work queue entries
- * @pko_pool: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
- * @pko_size: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_pool: Should always be CVMX_FPA_TIMER_POOL
- * @tim_size: Should always be CVMX_FPA_TIMER_POOL_SIZE
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_pool: Should always be CVMX_FPA_DFA_POOL
- * @dfa_size: Should always be CVMX_FPA_DFA_POOL_SIZE
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size,
- int pip_buffers, int wqe_pool,
- int wqe_size, int wqe_entries,
- int pko_pool, int pko_size,
- int pko_buffers, int tim_pool,
- int tim_size, int tim_buffers,
- int dfa_pool, int dfa_size,
- int dfa_buffers)
-{
- int status;
-
- cvmx_fpa_enable();
-
- if ((pip_buffers > 0) && (pip_buffers <= 64))
- cvmx_dprintf
- ("Warning: %d packet buffers may not be enough for hardware"
- " prefetch. 65 or more is recommended.\n", pip_buffers);
-
- if (pip_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size,
- pip_buffers,
- "Packet Buffers");
- if (status)
- return status;
- }
-
- if (wqe_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size,
- wqe_entries,
- "Work Queue Entries");
- if (status)
- return status;
- }
-
- if (pko_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size,
- pko_buffers,
- "PKO Command Buffers");
- if (status)
- return status;
- }
-
- if (tim_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size,
- tim_buffers,
- "TIM Command Buffers");
- if (status)
- return status;
- }
-
- if (dfa_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size,
- dfa_buffers,
- "DFA Command Buffers");
- if (status)
- return status;
- }
-
- return 0;
-}
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Sizes of each element in the pools is
- * controlled by the cvmx-config.h header file. Specifying
- * zero for any parameter will cause that FPA pool to not be
- * setup. This is useful if you aren't using some of the
- * hardware and want to save memory.
- *
- * @packet_buffers:
- * Number of packet buffers to allocate
- * @work_queue_entries:
- * Number of work queue entries
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
- int pko_buffers, int tim_buffers,
- int dfa_buffers)
-{
-#ifndef CVMX_FPA_PACKET_POOL
-#define CVMX_FPA_PACKET_POOL -1
-#define CVMX_FPA_PACKET_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_WQE_POOL
-#define CVMX_FPA_WQE_POOL -1
-#define CVMX_FPA_WQE_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
-#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_TIMER_POOL
-#define CVMX_FPA_TIMER_POOL -1
-#define CVMX_FPA_TIMER_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_DFA_POOL
-#define CVMX_FPA_DFA_POOL -1
-#define CVMX_FPA_DFA_POOL_SIZE 0
-#endif
- return __cvmx_helper_initialize_fpa(CVMX_FPA_PACKET_POOL,
- CVMX_FPA_PACKET_POOL_SIZE,
- packet_buffers, CVMX_FPA_WQE_POOL,
- CVMX_FPA_WQE_POOL_SIZE,
- work_queue_entries,
- CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
- pko_buffers, CVMX_FPA_TIMER_POOL,
- CVMX_FPA_TIMER_POOL_SIZE,
- tim_buffers, CVMX_FPA_DFA_POOL,
- CVMX_FPA_DFA_POOL_SIZE,
- dfa_buffers);
-}
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.h b/drivers/staging/octeon/cvmx-helper-fpa.h
deleted file mode 100644
index 5ff8c93198de..000000000000
--- a/drivers/staging/octeon/cvmx-helper-fpa.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Helper functions for FPA setup.
- *
- */
-#ifndef __CVMX_HELPER_H_FPA__
-#define __CVMX_HELPER_H_FPA__
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Sizes of each element in the pools is
- * controlled by the cvmx-config.h header file. Specifying
- * zero for any parameter will cause that FPA pool to not be
- * setup. This is useful if you aren't using some of the
- * hardware and want to save memory.
- *
- * @packet_buffers:
- * Number of packet buffers to allocate
- * @work_queue_entries:
- * Number of work queue entries
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-extern int cvmx_helper_initialize_fpa(int packet_buffers,
- int work_queue_entries, int pko_buffers,
- int tim_buffers, int dfa_buffers);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-loop.c b/drivers/staging/octeon/cvmx-helper-loop.c
deleted file mode 100644
index 55a571a69529..000000000000
--- a/drivers/staging/octeon/cvmx-helper-loop.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for LOOP initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-pip-defs.h"
-
-/**
- * Probe a LOOP interface and determine the number of ports
- * connected to it. The LOOP interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_loop_probe(int interface)
-{
- union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
- int num_ports = 4;
- int port;
-
- /* We need to disable length checking so packet < 64 bytes and jumbo
- frames don't get errors */
- for (port = 0; port < num_ports; port++) {
- union cvmx_pip_prt_cfgx port_cfg;
- int ipd_port = cvmx_helper_get_ipd_port(interface, port);
- port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- port_cfg.s.maxerr_en = 0;
- port_cfg.s.minerr_en = 0;
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
- }
-
- /* Disable FCS stripping for loopback ports */
- ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
- ipd_sub_port_fcs.s.port_bit2 = 0;
- cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
- return num_ports;
-}
-
-/**
- * Bringup and enable a LOOP interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_loop_enable(int interface)
-{
- /* Do nothing. */
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-loop.h b/drivers/staging/octeon/cvmx-helper-loop.h
deleted file mode 100644
index e646a6ccce75..000000000000
--- a/drivers/staging/octeon/cvmx-helper-loop.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as published by
- * the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for LOOP initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_LOOP_H__
-#define __CVMX_HELPER_LOOP_H__
-
-/**
- * Probe a LOOP interface and determine the number of ports
- * connected to it. The LOOP interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_loop_probe(int interface);
-
-/**
- * Bringup and enable a LOOP interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_loop_enable(int interface);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-npi.c b/drivers/staging/octeon/cvmx-helper-npi.c
deleted file mode 100644
index 7388a1e72b38..000000000000
--- a/drivers/staging/octeon/cvmx-helper-npi.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for NPI initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-helper.h"
-
-#include "cvmx-pip-defs.h"
-
-/**
- * Probe a NPI interface and determine the number of ports
- * connected to it. The NPI interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_npi_probe(int interface)
-{
-#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
- return 4;
- else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
- && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
- /* The packet engines didn't exist before pass 2 */
- return 4;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
- && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
- /* The packet engines didn't exist before pass 2 */
- return 4;
-#if 0
- /*
- * Technically CN30XX, CN31XX, and CN50XX contain packet
- * engines, but nobody ever uses them. Since this is the case,
- * we disable them here.
- */
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX))
- return 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- return 1;
-#endif
-#endif
- return 0;
-}
-
-/**
- * Bringup and enable a NPI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_npi_enable(int interface)
-{
- /*
- * On CN50XX, CN52XX, and CN56XX we need to disable length
- * checking so packet < 64 bytes and jumbo frames don't get
- * errors.
- */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
- !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- for (port = 0; port < num_ports; port++) {
- union cvmx_pip_prt_cfgx port_cfg;
- int ipd_port =
- cvmx_helper_get_ipd_port(interface, port);
- port_cfg.u64 =
- cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- port_cfg.s.maxerr_en = 0;
- port_cfg.s.minerr_en = 0;
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
- port_cfg.u64);
- }
- }
-
- /* Enables are controlled by the remote host, so nothing to do here */
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-npi.h b/drivers/staging/octeon/cvmx-helper-npi.h
deleted file mode 100644
index 908e7b08c214..000000000000
--- a/drivers/staging/octeon/cvmx-helper-npi.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for NPI initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_NPI_H__
-#define __CVMX_HELPER_NPI_H__
-
-/**
- * Probe a NPI interface and determine the number of ports
- * connected to it. The NPI interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_npi_probe(int interface);
-
-/**
- * Bringup and enable a NPI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_npi_enable(int interface);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.c b/drivers/staging/octeon/cvmx-helper-rgmii.c
deleted file mode 100644
index aa2d5d7fee2b..000000000000
--- a/drivers/staging/octeon/cvmx-helper-rgmii.c
+++ /dev/null
@@ -1,525 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for RGMII/GMII/MII initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-
-#include "cvmx-mdio.h"
-#include "cvmx-pko.h"
-#include "cvmx-helper.h"
-#include "cvmx-helper-board.h"
-
-#include <asm/octeon/cvmx-npi-defs.h>
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-asxx-defs.h"
-#include "cvmx-dbg-defs.h"
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_asxx_enable(int block);
-
-/**
- * Probe RGMII ports and determine the number present
- *
- * @interface: Interface to probe
- *
- * Returns Number of RGMII/GMII/MII ports (0-4).
- */
-int __cvmx_helper_rgmii_probe(int interface)
-{
- int num_ports = 0;
- union cvmx_gmxx_inf_mode mode;
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (mode.s.type) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- cvmx_dprintf("ERROR: RGMII initialize called in "
- "SPI interface\n");
- } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /*
- * On these chips "type" says we're in
- * GMII/MII mode. This limits us to 2 ports
- */
- num_ports = 2;
- } else {
- cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
- __func__);
- }
- } else {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- num_ports = 4;
- } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- num_ports = 3;
- } else {
- cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
- __func__);
- }
- }
- return num_ports;
-}
-
-/**
- * Put an RGMII interface in loopback mode. Internal packets sent
- * out will be received back again on the same port. Externally
- * received packets will echo back out.
- *
- * @port: IPD port number to loop.
- */
-void cvmx_helper_rgmii_internal_loopback(int port)
-{
- int interface = (port >> 4) & 1;
- int index = port & 0xf;
- uint64_t tmp;
-
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- gmx_cfg.u64 = 0;
- gmx_cfg.s.duplex = 1;
- gmx_cfg.s.slottime = 1;
- gmx_cfg.s.speed = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-}
-
-/**
- * Workaround ASX setup errata with CN38XX pass1
- *
- * @interface: Interface to setup
- * @port: Port to setup (0..3)
- * @cpu_clock_hz:
- * Chip frequency in Hertz
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_errata_asx_pass1(int interface, int port,
- int cpu_clock_hz)
-{
- /* Set hi water mark as per errata GMX-4 */
- if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
- else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
- else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
- else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
- else
- cvmx_dprintf("Illegal clock frequency (%d). "
- "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
- return 0;
-}
-
-/**
- * Configure all of the ASX, GMX, and PKO regsiters required
- * to get RGMII to function on the supplied interface.
- *
- * @interface: PKO Interface to configure (0 or 1)
- *
- * Returns Zero on success
- */
-int __cvmx_helper_rgmii_enable(int interface)
-{
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
- union cvmx_gmxx_inf_mode mode;
- union cvmx_asxx_tx_prt_en asx_tx;
- union cvmx_asxx_rx_prt_en asx_rx;
-
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (mode.s.en == 0)
- return -1;
- if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
- OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
- /* Ignore SPI interfaces */
- return -1;
-
- /* Configure the ASX registers needed to use the RGMII ports */
- asx_tx.u64 = 0;
- asx_tx.s.prt_en = cvmx_build_mask(num_ports);
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
-
- asx_rx.u64 = 0;
- asx_rx.s.prt_en = cvmx_build_mask(num_ports);
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
-
- /* Configure the GMX registers needed to use the RGMII ports */
- for (port = 0; port < num_ports; port++) {
- /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
- __cvmx_helper_setup_gmx() */
-
- if (cvmx_octeon_is_pass1())
- __cvmx_helper_errata_asx_pass1(interface, port,
- sys_info_ptr->
- cpu_clock_hz);
- else {
- /*
- * Configure more flexible RGMII preamble
- * checking. Pass 1 doesn't support this
- * feature.
- */
- union cvmx_gmxx_rxx_frm_ctl frm_ctl;
- frm_ctl.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
- (port, interface));
- /* New field, so must be compile time */
- frm_ctl.s.pre_free = 1;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
- frm_ctl.u64);
- }
-
- /*
- * Each pause frame transmitted will ask for about 10M
- * bit times before resume. If buffer space comes
- * available before that time has expired, an XON
- * pause frame (0 time) will be transmitted to restart
- * the flow.
- */
- cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
- 20000);
- cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
- (port, interface), 19000);
-
- if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
- 16);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
- 16);
- } else {
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
- 24);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
- 24);
- }
- }
-
- __cvmx_helper_setup_gmx(interface, num_ports);
-
- /* enable the ports now */
- for (port = 0; port < num_ports; port++) {
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
- (interface, port));
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
- gmx_cfg.u64);
- }
- __cvmx_interrupt_asxx_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
-
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_asxx_prt_loop asxx_prt_loop;
-
- asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- if (asxx_prt_loop.s.int_loop & (1 << index)) {
- /* Force 1Gbps full duplex on internal loopback */
- cvmx_helper_link_info_t result;
- result.u64 = 0;
- result.s.full_duplex = 1;
- result.s.link_up = 1;
- result.s.speed = 1000;
- return result;
- } else
- return __cvmx_helper_board_link_get(ipd_port);
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_rgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
-{
- int result = 0;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_gmxx_prtx_cfg original_gmx_cfg;
- union cvmx_gmxx_prtx_cfg new_gmx_cfg;
- union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
- union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
- union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
- union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
- int i;
-
- /* Ignore speed sets in the simulator */
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
- return 0;
-
- /* Read the current settings so we know the current enable state */
- original_gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- new_gmx_cfg = original_gmx_cfg;
-
- /* Disable the lowest level RX */
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
- ~(1 << index));
-
- /* Disable all queues so that TX should become idle */
- for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
- int queue = cvmx_pko_get_base_queue(ipd_port) + i;
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
- pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
- pko_mem_queue_qos.s.pid = ipd_port;
- pko_mem_queue_qos.s.qid = queue;
- pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
- pko_mem_queue_qos.s.qos_mask = 0;
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
- }
-
- /* Disable backpressure */
- gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
- gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
- gmx_tx_ovr_bp.s.bp &= ~(1 << index);
- gmx_tx_ovr_bp.s.en |= 1 << index;
- cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
- cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
-
- /*
- * Poll the GMX state machine waiting for it to become
- * idle. Preferably we should only change speed when it is
- * idle. If it doesn't become idle we will still do the speed
- * change, but there is a slight chance that GMX will
- * lockup.
- */
- cvmx_write_csr(CVMX_NPI_DBG_SELECT,
- interface * 0x800 + index * 0x100 + 0x880);
- CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
- ==, 0, 10000);
- CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
- ==, 0, 10000);
-
- /* Disable the port before we make any changes */
- new_gmx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /* Set full/half duplex */
- if (cvmx_octeon_is_pass1())
- /* Half duplex is broken for 38XX Pass 1 */
- new_gmx_cfg.s.duplex = 1;
- else if (!link_info.s.link_up)
- /* Force full duplex on down links */
- new_gmx_cfg.s.duplex = 1;
- else
- new_gmx_cfg.s.duplex = link_info.s.full_duplex;
-
- /* Set the link speed. Anything unknown is set to 1Gbps */
- if (link_info.s.speed == 10) {
- new_gmx_cfg.s.slottime = 0;
- new_gmx_cfg.s.speed = 0;
- } else if (link_info.s.speed == 100) {
- new_gmx_cfg.s.slottime = 0;
- new_gmx_cfg.s.speed = 0;
- } else {
- new_gmx_cfg.s.slottime = 1;
- new_gmx_cfg.s.speed = 1;
- }
-
- /* Adjust the clocks */
- if (link_info.s.speed == 10) {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- } else if (link_info.s.speed == 100) {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- } else {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- }
-
- if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
- union cvmx_gmxx_inf_mode mode;
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- /*
- * Port .en .type .p0mii Configuration
- * ---- --- ----- ------ -----------------------------------------
- * X 0 X X All links are disabled.
- * 0 1 X 0 Port 0 is RGMII
- * 0 1 X 1 Port 0 is MII
- * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
- * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
- * MII port is selected by GMX_PRT1_CFG[SPEED].
- */
-
- /* In MII mode, CLK_CNT = 1. */
- if (((index == 0) && (mode.s.p0mii == 1))
- || ((index != 0) && (mode.s.type == 1))) {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK
- (index, interface), 1);
- }
- }
- }
-
- /* Do a read to make sure all setup stuff is complete */
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /* Save the new GMX setting without enabling the port */
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
-
- /* Enable the lowest level RX */
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
- index));
-
- /* Re-enable the TX path */
- for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
- int queue = cvmx_pko_get_base_queue(ipd_port) + i;
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
- pko_mem_queue_qos_save[i].u64);
- }
-
- /* Restore backpressure */
- cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
-
- /* Restore the GMX enable state. Port config is complete */
- new_gmx_cfg.s.en = original_gmx_cfg.s.en;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
-
- return result;
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- int original_enable;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- union cvmx_asxx_prt_loop asxx_prt_loop;
-
- /* Read the current enable state and save it */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- original_enable = gmx_cfg.s.en;
- /* Force port to be disabled */
- gmx_cfg.s.en = 0;
- if (enable_internal) {
- /* Force speed if we're doing internal loopback */
- gmx_cfg.s.duplex = 1;
- gmx_cfg.s.slottime = 1;
- gmx_cfg.s.speed = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- }
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- /* Set the loopback bits */
- asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- if (enable_internal)
- asxx_prt_loop.s.int_loop |= 1 << index;
- else
- asxx_prt_loop.s.int_loop &= ~(1 << index);
- if (enable_external)
- asxx_prt_loop.s.ext_loop |= 1 << index;
- else
- asxx_prt_loop.s.ext_loop &= ~(1 << index);
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
-
- /* Force enables in internal loopback */
- if (enable_internal) {
- uint64_t tmp;
- tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
- (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- (1 << index) | tmp);
- original_enable = 1;
- }
-
- /* Restore the enable state */
- gmx_cfg.s.en = original_enable;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.h b/drivers/staging/octeon/cvmx-helper-rgmii.h
deleted file mode 100644
index ea2652604a57..000000000000
--- a/drivers/staging/octeon/cvmx-helper-rgmii.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for RGMII/GMII/MII initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_RGMII_H__
-#define __CVMX_HELPER_RGMII_H__
-
-/**
- * Probe RGMII ports and determine the number present
- *
- * @interface: Interface to probe
- *
- * Returns Number of RGMII/GMII/MII ports (0-4).
- */
-extern int __cvmx_helper_rgmii_probe(int interface);
-
-/**
- * Put an RGMII interface in loopback mode. Internal packets sent
- * out will be received back again on the same port. Externally
- * received packets will echo back out.
- *
- * @port: IPD port number to loop.
- */
-extern void cvmx_helper_rgmii_internal_loopback(int port);
-
-/**
- * Configure all of the ASX, GMX, and PKO regsiters required
- * to get RGMII to function on the supplied interface.
- *
- * @interface: PKO Interface to configure (0 or 1)
- *
- * Returns Zero on success
- */
-extern int __cvmx_helper_rgmii_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_rgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.c b/drivers/staging/octeon/cvmx-helper-sgmii.c
deleted file mode 100644
index 6214e3b6d975..000000000000
--- a/drivers/staging/octeon/cvmx-helper-sgmii.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for SGMII initialization, configuration,
- * and monitoring.
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-mdio.h"
-#include "cvmx-helper.h"
-#include "cvmx-helper-board.h"
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-pcsx-defs.h"
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-
-/**
- * Perform initialization required only once for an SGMII port.
- *
- * @interface: Interface to init
- * @index: Index of prot on the interface
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
-{
- const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
- union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
- union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
- union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
-
- /* Disable GMX */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmxx_prtx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- /*
- * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
- * appropriate value. 1000BASE-X specifies a 10ms
- * interval. SGMII specifies a 1.6ms interval.
- */
- pcs_misc_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- pcsx_linkx_timer_count_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
- if (pcs_misc_ctl_reg.s.mode) {
- /* 1000BASE-X */
- pcsx_linkx_timer_count_reg.s.count =
- (10000ull * clock_mhz) >> 10;
- } else {
- /* SGMII */
- pcsx_linkx_timer_count_reg.s.count =
- (1600ull * clock_mhz) >> 10;
- }
- cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
- pcsx_linkx_timer_count_reg.u64);
-
- /*
- * Write the advertisement register to be used as the
- * tx_Config_Reg<D15:D0> of the autonegotiation. In
- * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
- * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
- * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
- * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
- * step can be skipped.
- */
- if (pcs_misc_ctl_reg.s.mode) {
- /* 1000BASE-X */
- union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
- pcsx_anx_adv_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
- pcsx_anx_adv_reg.s.rem_flt = 0;
- pcsx_anx_adv_reg.s.pause = 3;
- pcsx_anx_adv_reg.s.hfd = 1;
- pcsx_anx_adv_reg.s.fd = 1;
- cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
- pcsx_anx_adv_reg.u64);
- } else {
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- if (pcsx_miscx_ctl_reg.s.mac_phy) {
- /* PHY Mode */
- union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
- pcsx_sgmx_an_adv_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
- (index, interface));
- pcsx_sgmx_an_adv_reg.s.link = 1;
- pcsx_sgmx_an_adv_reg.s.dup = 1;
- pcsx_sgmx_an_adv_reg.s.speed = 2;
- cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
- (index, interface),
- pcsx_sgmx_an_adv_reg.u64);
- } else {
- /* MAC Mode - Nothing to do */
- }
- }
- return 0;
-}
-
-/**
- * Initialize the SERTES link for the first time or after a loss
- * of link.
- *
- * @interface: Interface to init
- * @index: Index of prot on the interface
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
-{
- union cvmx_pcsx_mrx_control_reg control_reg;
-
- /*
- * Take PCS through a reset sequence.
- * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
- * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
- * value of the other PCS*_MR*_CONTROL_REG bits). Read
- * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
- * zero.
- */
- control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
- control_reg.s.reset = 1;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- control_reg.u64);
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
- cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
- "to finish reset\n",
- interface, index);
- return -1;
- }
- }
-
- /*
- * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
- * sgmii negotiation starts.
- */
- control_reg.s.rst_an = 1;
- control_reg.s.an_en = 1;
- control_reg.s.pwr_dn = 0;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- control_reg.u64);
-
- /*
- * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
- * that sgmii autonegotiation is complete. In MAC mode this
- * isn't an ethernet link, but a link between Octeon and the
- * PHY.
- */
- if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
- CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
- union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
- 10000)) {
- /* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
- return -1;
- }
- return 0;
-}
-
-/**
- * Configure an SGMII link to the specified speed after the SERTES
- * link is up.
- *
- * @interface: Interface to init
- * @index: Index of prot on the interface
- * @link_info: Link state to configure
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
- int index,
- cvmx_helper_link_info_t
- link_info)
-{
- int is_enabled;
- union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
-
- /* Disable GMX before we make any changes. Remember the enable state */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- is_enabled = gmxx_prtx_cfg.s.en;
- gmxx_prtx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- /* Wait for GMX to be idle */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
- rx_idle, ==, 1, 10000)
- || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
- union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
- 10000)) {
- cvmx_dprintf
- ("SGMII%d: Timeout waiting for port %d to be idle\n",
- interface, index);
- return -1;
- }
-
- /* Read GMX CFG again to make sure the disable completed */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /*
- * Get the misc control for PCS. We will need to set the
- * duplication amount.
- */
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
-
- /*
- * Use GMXENO to force the link down if the status we get says
- * it should be down.
- */
- pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
-
- /* Only change the duplex setting if the link is up */
- if (link_info.s.link_up)
- gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
-
- /* Do speed based setting for GMX */
- switch (link_info.s.speed) {
- case 10:
- gmxx_prtx_cfg.s.speed = 0;
- gmxx_prtx_cfg.s.speed_msb = 1;
- gmxx_prtx_cfg.s.slottime = 0;
- /* Setting from GMX-603 */
- pcsx_miscx_ctl_reg.s.samp_pt = 25;
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- break;
- case 100:
- gmxx_prtx_cfg.s.speed = 0;
- gmxx_prtx_cfg.s.speed_msb = 0;
- gmxx_prtx_cfg.s.slottime = 0;
- pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- break;
- case 1000:
- gmxx_prtx_cfg.s.speed = 1;
- gmxx_prtx_cfg.s.speed_msb = 0;
- gmxx_prtx_cfg.s.slottime = 1;
- pcsx_miscx_ctl_reg.s.samp_pt = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
- break;
- default:
- break;
- }
-
- /* Write the new misc control for PCS */
- cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
- pcsx_miscx_ctl_reg.u64);
-
- /* Write the new GMX settings with the port still disabled */
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- /* Read GMX CFG again to make sure the config completed */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /* Restore the enabled / disabled state */
- gmxx_prtx_cfg.s.en = is_enabled;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- return 0;
-}
-
-/**
- * Bring up the SGMII interface to be ready for packet I/O but
- * leave I/O disabled using the GMX override. This function
- * follows the bringup documented in 10.6.3 of the manual.
- *
- * @interface: Interface to bringup
- * @num_ports: Number of ports on the interface
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
-{
- int index;
-
- __cvmx_helper_setup_gmx(interface, num_ports);
-
- for (index = 0; index < num_ports; index++) {
- int ipd_port = cvmx_helper_get_ipd_port(interface, index);
- __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
- __cvmx_helper_sgmii_link_set(ipd_port,
- __cvmx_helper_sgmii_link_get
- (ipd_port));
-
- }
-
- return 0;
-}
-
-/**
- * Probe a SGMII interface and determine the number of ports
- * connected to it. The SGMII interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_sgmii_probe(int interface)
-{
- union cvmx_gmxx_inf_mode mode;
-
- /*
- * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
- * interface needs to be enabled before IPD otherwise per port
- * backpressure may not work properly
- */
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
- mode.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
- return 4;
-}
-
-/**
- * Bringup and enable a SGMII interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_sgmii_enable(int interface)
-{
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int index;
-
- __cvmx_helper_sgmii_hardware_init(interface, num_ports);
-
- for (index = 0; index < num_ports; index++) {
- union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
- gmxx_prtx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmxx_prtx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmxx_prtx_cfg.u64);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
- }
- __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
-
- result.u64 = 0;
-
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
- /* The simulator gives you a simulated 1Gbps full duplex link */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
-
- pcsx_mrx_control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- if (pcsx_mrx_control_reg.s.loopbck1) {
- /* Force 1Gbps full duplex link for internal loopback */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
-
- pcs_misc_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- if (pcs_misc_ctl_reg.s.mode) {
- /* 1000BASE-X */
- /* FIXME */
- } else {
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- if (pcsx_miscx_ctl_reg.s.mac_phy) {
- /* PHY Mode */
- union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
- union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
-
- /*
- * Don't bother continuing if the SERTES low
- * level link is down
- */
- pcsx_mrx_status_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
- (index, interface));
- if (pcsx_mrx_status_reg.s.lnk_st == 0) {
- if (__cvmx_helper_sgmii_hardware_init_link
- (interface, index) != 0)
- return result;
- }
-
- /* Read the autoneg results */
- pcsx_anx_results_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
- (index, interface));
- if (pcsx_anx_results_reg.s.an_cpt) {
- /*
- * Auto negotiation is complete. Set
- * status accordingly.
- */
- result.s.full_duplex =
- pcsx_anx_results_reg.s.dup;
- result.s.link_up =
- pcsx_anx_results_reg.s.link_ok;
- switch (pcsx_anx_results_reg.s.spd) {
- case 0:
- result.s.speed = 10;
- break;
- case 1:
- result.s.speed = 100;
- break;
- case 2:
- result.s.speed = 1000;
- break;
- default:
- result.s.speed = 0;
- result.s.link_up = 0;
- break;
- }
- } else {
- /*
- * Auto negotiation isn't
- * complete. Return link down.
- */
- result.s.speed = 0;
- result.s.link_up = 0;
- }
- } else { /* MAC Mode */
-
- result = __cvmx_helper_board_link_get(ipd_port);
- }
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_sgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- __cvmx_helper_sgmii_hardware_init_link(interface, index);
- return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
- link_info);
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal
- * loopback causes packets sent by the port to be received by
- * Octeon. External loopback causes packets received from the wire to
- * sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
-
- pcsx_mrx_control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- pcsx_mrx_control_reg.u64);
-
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
- cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
- pcsx_miscx_ctl_reg.u64);
-
- __cvmx_helper_sgmii_hardware_init_link(interface, index);
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.h b/drivers/staging/octeon/cvmx-helper-sgmii.h
deleted file mode 100644
index 19b48d60857f..000000000000
--- a/drivers/staging/octeon/cvmx-helper-sgmii.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for SGMII initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_SGMII_H__
-#define __CVMX_HELPER_SGMII_H__
-
-/**
- * Probe a SGMII interface and determine the number of ports
- * connected to it. The SGMII interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_sgmii_probe(int interface);
-
-/**
- * Bringup and enable a SGMII interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_sgmii_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_sgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-spi.c b/drivers/staging/octeon/cvmx-helper-spi.c
deleted file mode 100644
index 8ba6c832471e..000000000000
--- a/drivers/staging/octeon/cvmx-helper-spi.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_spxx_int_msk_enable(int index);
-void __cvmx_interrupt_stxx_int_msk_enable(int index);
-
-/*
- * Functions for SPI initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-#include "cvmx-spi.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-pip-defs.h"
-#include "cvmx-pko-defs.h"
-
-/*
- * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
- * initialization routines wait for SPI training. You can override the
- * value using executive-config.h if necessary.
- */
-#ifndef CVMX_HELPER_SPI_TIMEOUT
-#define CVMX_HELPER_SPI_TIMEOUT 10
-#endif
-
-/**
- * Probe a SPI interface and determine the number of ports
- * connected to it. The SPI interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_spi_probe(int interface)
-{
- int num_ports = 0;
-
- if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
- cvmx_spi4000_is_present(interface)) {
- num_ports = 10;
- } else {
- union cvmx_pko_reg_crc_enable enable;
- num_ports = 16;
- /*
- * Unlike the SPI4000, most SPI devices don't
- * automatically put on the L2 CRC. For everything
- * except for the SPI4000 have PKO append the L2 CRC
- * to the packet.
- */
- enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
- enable.s.enable |= 0xffff << (interface * 16);
- cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
- }
- __cvmx_helper_setup_gmx(interface, num_ports);
- return num_ports;
-}
-
-/**
- * Bringup and enable a SPI interface. After this call packet I/O
- * should be fully functional. This is called with IPD enabled but
- * PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_spi_enable(int interface)
-{
- /*
- * Normally the ethernet L2 CRC is checked and stripped in the
- * GMX block. When you are using SPI, this isn' the case and
- * IPD needs to check the L2 CRC.
- */
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int ipd_port;
- for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
- ipd_port++) {
- union cvmx_pip_prt_cfgx port_config;
- port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- port_config.s.crc_en = 1;
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
- }
-
- if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
- cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
- CVMX_HELPER_SPI_TIMEOUT, num_ports);
- if (cvmx_spi4000_is_present(interface))
- cvmx_spi4000_initialize(interface);
- }
- __cvmx_interrupt_spxx_int_msk_enable(interface);
- __cvmx_interrupt_stxx_int_msk_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- result.u64 = 0;
-
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
- /* The simulator gives you a simulated full duplex link */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10000;
- } else if (cvmx_spi4000_is_present(interface)) {
- union cvmx_gmxx_rxx_rx_inbnd inband =
- cvmx_spi4000_check_speed(interface, index);
- result.s.link_up = inband.s.status;
- result.s.full_duplex = inband.s.duplex;
- switch (inband.s.speed) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.s.speed = 0;
- result.s.link_up = 0;
- break;
- }
- } else {
- /* For generic SPI we can't determine the link, just return some
- sane results */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10000;
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
-{
- /* Nothing to do. If we have a SPI4000 then the setup was already performed
- by cvmx_spi4000_check_speed(). If not then there isn't any link
- info */
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-spi.h b/drivers/staging/octeon/cvmx-helper-spi.h
deleted file mode 100644
index 69bac036d10e..000000000000
--- a/drivers/staging/octeon/cvmx-helper-spi.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for SPI initialization, configuration,
- * and monitoring.
- */
-#ifndef __CVMX_HELPER_SPI_H__
-#define __CVMX_HELPER_SPI_H__
-
-/**
- * Probe a SPI interface and determine the number of ports
- * connected to it. The SPI interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_spi_probe(int interface);
-
-/**
- * Bringup and enable a SPI interface. After this call packet I/O
- * should be fully functional. This is called with IPD enabled but
- * PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_spi_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_spi_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-util.c b/drivers/staging/octeon/cvmx-helper-util.c
deleted file mode 100644
index 131182bf5abb..000000000000
--- a/drivers/staging/octeon/cvmx-helper-util.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Small helper utilities.
- */
-#include <linux/kernel.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-fpa.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-ipd.h"
-#include "cvmx-spi.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-helper-util.h"
-
-#include <asm/octeon/cvmx-ipd-defs.h>
-
-/**
- * Convert a interface mode into a human readable string
- *
- * @mode: Mode to convert
- *
- * Returns String
- */
-const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
- mode)
-{
- switch (mode) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- return "DISABLED";
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- return "RGMII";
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- return "GMII";
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- return "SPI";
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- return "PCIE";
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- return "XAUI";
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- return "SGMII";
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- return "PICMG";
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- return "NPI";
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- return "LOOP";
- }
- return "UNKNOWN";
-}
-
-/**
- * Debug routine to dump the packet structure to the console
- *
- * @work: Work queue entry containing the packet to dump
- * Returns
- */
-int cvmx_helper_dump_packet(cvmx_wqe_t *work)
-{
- uint64_t count;
- uint64_t remaining_bytes;
- union cvmx_buf_ptr buffer_ptr;
- uint64_t start_of_buffer;
- uint8_t *data_address;
- uint8_t *end_of_data;
-
- cvmx_dprintf("Packet Length: %u\n", work->len);
- cvmx_dprintf(" Input Port: %u\n", work->ipprt);
- cvmx_dprintf(" QoS: %u\n", work->qos);
- cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
-
- if (work->word2.s.bufs == 0) {
- union cvmx_ipd_wqe_fpa_queue wqe_pool;
- wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
- buffer_ptr.u64 = 0;
- buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
- buffer_ptr.s.size = 128;
- buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
- if (likely(!work->word2.s.not_IP)) {
- union cvmx_pip_ip_offset pip_ip_offset;
- pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
- buffer_ptr.s.addr +=
- (pip_ip_offset.s.offset << 3) -
- work->word2.s.ip_offset;
- buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
- } else {
- /*
- * WARNING: This code assumes that the packet
- * is not RAW. If it was, we would use
- * PIP_GBL_CFG[RAW_SHF] instead of
- * PIP_GBL_CFG[NIP_SHF].
- */
- union cvmx_pip_gbl_cfg pip_gbl_cfg;
- pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
- buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
- }
- } else
- buffer_ptr = work->packet_ptr;
- remaining_bytes = work->len;
-
- while (remaining_bytes) {
- start_of_buffer =
- ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- cvmx_dprintf(" Buffer Start:%llx\n",
- (unsigned long long)start_of_buffer);
- cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
- cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
- cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
- cvmx_dprintf(" Buffer Data: %llx\n",
- (unsigned long long)buffer_ptr.s.addr);
- cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
-
- cvmx_dprintf("\t\t");
- data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
- end_of_data = data_address + buffer_ptr.s.size;
- count = 0;
- while (data_address < end_of_data) {
- if (remaining_bytes == 0)
- break;
- else
- remaining_bytes--;
- cvmx_dprintf("%02x", (unsigned int)*data_address);
- data_address++;
- if (remaining_bytes && (count == 7)) {
- cvmx_dprintf("\n\t\t");
- count = 0;
- } else
- count++;
- }
- cvmx_dprintf("\n");
-
- if (remaining_bytes)
- buffer_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- }
- return 0;
-}
-
-/**
- * Setup Random Early Drop on a specific input queue
- *
- * @queue: Input queue to setup RED on (0-7)
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
-{
- union cvmx_ipd_qosx_red_marks red_marks;
- union cvmx_ipd_red_quex_param red_param;
-
- /* Set RED to begin dropping packets when there are pass_thresh buffers
- left. It will linearly drop more packets until reaching drop_thresh
- buffers */
- red_marks.u64 = 0;
- red_marks.s.drop = drop_thresh;
- red_marks.s.pass = pass_thresh;
- cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
-
- /* Use the actual queue 0 counter, not the average */
- red_param.u64 = 0;
- red_param.s.prb_con =
- (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
- red_param.s.avg_con = 1;
- red_param.s.new_con = 255;
- red_param.s.use_pcnt = 1;
- cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
- return 0;
-}
-
-/**
- * Setup Random Early Drop to automatically begin dropping packets.
- *
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
- union cvmx_ipd_portx_bp_page_cnt page_cnt;
- union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
- union cvmx_ipd_red_port_enable red_port_enable;
- int queue;
- int interface;
- int port;
-
- /* Disable backpressure based on queued buffers. It needs SW support */
- page_cnt.u64 = 0;
- page_cnt.s.bp_enb = 0;
- page_cnt.s.page_cnt = 100;
- for (interface = 0; interface < 2; interface++) {
- for (port = cvmx_helper_get_first_ipd_port(interface);
- port < cvmx_helper_get_last_ipd_port(interface); port++)
- cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
- page_cnt.u64);
- }
-
- for (queue = 0; queue < 8; queue++)
- cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
-
- /* Shutoff the dropping based on the per port page count. SW isn't
- decrementing it right now */
- ipd_bp_prt_red_end.u64 = 0;
- ipd_bp_prt_red_end.s.prt_enb = 0;
- cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
-
- red_port_enable.u64 = 0;
- red_port_enable.s.prt_enb = 0xfffffffffull;
- red_port_enable.s.avg_dly = 10000;
- red_port_enable.s.prb_dly = 10000;
- cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
-
- return 0;
-}
-
-/**
- * Setup the common GMX settings that determine the number of
- * ports. These setting apply to almost all configurations of all
- * chips.
- *
- * @interface: Interface to configure
- * @num_ports: Number of ports on the interface
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_setup_gmx(int interface, int num_ports)
-{
- union cvmx_gmxx_tx_prts gmx_tx_prts;
- union cvmx_gmxx_rx_prts gmx_rx_prts;
- union cvmx_pko_reg_gmx_port_mode pko_mode;
- union cvmx_gmxx_txx_thresh gmx_tx_thresh;
- int index;
-
- /* Tell GMX the number of TX ports on this interface */
- gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
- gmx_tx_prts.s.prts = num_ports;
- cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
-
- /* Tell GMX the number of RX ports on this interface. This only
- ** applies to *GMII and XAUI ports */
- if (cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_RGMII
- || cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_SGMII
- || cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_GMII
- || cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_XAUI) {
- if (num_ports > 4) {
- cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
- "num_ports\n");
- return -1;
- }
-
- gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
- gmx_rx_prts.s.prts = num_ports;
- cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
- }
-
- /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
- if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
- && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /* Tell PKO the number of ports on this interface */
- pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
- if (interface == 0) {
- if (num_ports == 1)
- pko_mode.s.mode0 = 4;
- else if (num_ports == 2)
- pko_mode.s.mode0 = 3;
- else if (num_ports <= 4)
- pko_mode.s.mode0 = 2;
- else if (num_ports <= 8)
- pko_mode.s.mode0 = 1;
- else
- pko_mode.s.mode0 = 0;
- } else {
- if (num_ports == 1)
- pko_mode.s.mode1 = 4;
- else if (num_ports == 2)
- pko_mode.s.mode1 = 3;
- else if (num_ports <= 4)
- pko_mode.s.mode1 = 2;
- else if (num_ports <= 8)
- pko_mode.s.mode1 = 1;
- else
- pko_mode.s.mode1 = 0;
- }
- cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
- }
-
- /*
- * Set GMX to buffer as much data as possible before starting
- * transmit. This reduces the chances that we have a TX under
- * run due to memory contention. Any packet that fits entirely
- * in the GMX FIFO can never have an under run regardless of
- * memory load.
- */
- gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
- if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /* These chips have a fixed max threshold of 0x40 */
- gmx_tx_thresh.s.cnt = 0x40;
- } else {
- /* Choose the max value for the number of ports */
- if (num_ports <= 1)
- gmx_tx_thresh.s.cnt = 0x100 / 1;
- else if (num_ports == 2)
- gmx_tx_thresh.s.cnt = 0x100 / 2;
- else
- gmx_tx_thresh.s.cnt = 0x100 / 4;
- }
- /*
- * SPI and XAUI can have lots of ports but the GMX hardware
- * only ever has a max of 4.
- */
- if (num_ports > 4)
- num_ports = 4;
- for (index = 0; index < num_ports; index++)
- cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
- gmx_tx_thresh.u64);
-
- return 0;
-}
-
-/**
- * Returns the IPD/PKO port number for a port on the given
- * interface.
- *
- * @interface: Interface to use
- * @port: Port on the interface
- *
- * Returns IPD/PKO port number
- */
-int cvmx_helper_get_ipd_port(int interface, int port)
-{
- switch (interface) {
- case 0:
- return port;
- case 1:
- return port + 16;
- case 2:
- return port + 32;
- case 3:
- return port + 36;
- }
- return -1;
-}
-
-/**
- * Returns the interface number for an IPD/PKO port number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface number
- */
-int cvmx_helper_get_interface_num(int ipd_port)
-{
- if (ipd_port < 16)
- return 0;
- else if (ipd_port < 32)
- return 1;
- else if (ipd_port < 36)
- return 2;
- else if (ipd_port < 40)
- return 3;
- else
- cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
- "port number\n");
-
- return -1;
-}
-
-/**
- * Returns the interface index number for an IPD/PKO port
- * number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface index number
- */
-int cvmx_helper_get_interface_index_num(int ipd_port)
-{
- if (ipd_port < 32)
- return ipd_port & 15;
- else if (ipd_port < 36)
- return ipd_port & 3;
- else if (ipd_port < 40)
- return ipd_port & 3;
- else
- cvmx_dprintf("cvmx_helper_get_interface_index_num: "
- "Illegal IPD port number\n");
-
- return -1;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-util.h b/drivers/staging/octeon/cvmx-helper-util.h
deleted file mode 100644
index 6a6e52fc22c1..000000000000
--- a/drivers/staging/octeon/cvmx-helper-util.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Small helper utilities.
- *
- */
-
-#ifndef __CVMX_HELPER_UTIL_H__
-#define __CVMX_HELPER_UTIL_H__
-
-/**
- * Convert a interface mode into a human readable string
- *
- * @mode: Mode to convert
- *
- * Returns String
- */
-extern const char
- *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
-
-/**
- * Debug routine to dump the packet structure to the console
- *
- * @work: Work queue entry containing the packet to dump
- * Returns
- */
-extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
-
-/**
- * Setup Random Early Drop on a specific input queue
- *
- * @queue: Input queue to setup RED on (0-7)
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
- int drop_thresh);
-
-/**
- * Setup Random Early Drop to automatically begin dropping packets.
- *
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
-
-/**
- * Get the version of the CVMX libraries.
- *
- * Returns Version string. Note this buffer is allocated statically
- * and will be shared by all callers.
- */
-extern const char *cvmx_helper_get_version(void);
-
-/**
- * Setup the common GMX settings that determine the number of
- * ports. These setting apply to almost all configurations of all
- * chips.
- *
- * @interface: Interface to configure
- * @num_ports: Number of ports on the interface
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
-
-/**
- * Returns the IPD/PKO port number for a port on the given
- * interface.
- *
- * @interface: Interface to use
- * @port: Port on the interface
- *
- * Returns IPD/PKO port number
- */
-extern int cvmx_helper_get_ipd_port(int interface, int port);
-
-/**
- * Returns the IPD/PKO port number for the first port on the given
- * interface.
- *
- * @interface: Interface to use
- *
- * Returns IPD/PKO port number
- */
-static inline int cvmx_helper_get_first_ipd_port(int interface)
-{
- return cvmx_helper_get_ipd_port(interface, 0);
-}
-
-/**
- * Returns the IPD/PKO port number for the last port on the given
- * interface.
- *
- * @interface: Interface to use
- *
- * Returns IPD/PKO port number
- */
-static inline int cvmx_helper_get_last_ipd_port(int interface)
-{
- extern int cvmx_helper_ports_on_interface(int interface);
-
- return cvmx_helper_get_first_ipd_port(interface) +
- cvmx_helper_ports_on_interface(interface) - 1;
-}
-
-/**
- * Free the packet buffers contained in a work queue entry.
- * The work queue entry is not freed.
- *
- * @work: Work queue entry with packet to free
- */
-static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
-{
- uint64_t number_buffers;
- union cvmx_buf_ptr buffer_ptr;
- union cvmx_buf_ptr next_buffer_ptr;
- uint64_t start_of_buffer;
-
- number_buffers = work->word2.s.bufs;
- if (number_buffers == 0)
- return;
- buffer_ptr = work->packet_ptr;
-
- /*
- * Since the number of buffers is not zero, we know this is
- * not a dynamic short packet. We need to check if it is a
- * packet received with IPD_CTL_STATUS[NO_WPTR]. If this is
- * true, we need to free all buffers except for the first
- * one. The caller doesn't expect their WQE pointer to be
- * freed
- */
- start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- if (cvmx_ptr_to_phys(work) == start_of_buffer) {
- next_buffer_ptr =
- *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- buffer_ptr = next_buffer_ptr;
- number_buffers--;
- }
-
- while (number_buffers--) {
- /*
- * Remember the back pointer is in cache lines, not
- * 64bit words
- */
- start_of_buffer =
- ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- /*
- * Read pointer to next buffer before we free the
- * current buffer.
- */
- next_buffer_ptr =
- *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer),
- buffer_ptr.s.pool, 0);
- buffer_ptr = next_buffer_ptr;
- }
-}
-
-/**
- * Returns the interface number for an IPD/PKO port number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface number
- */
-extern int cvmx_helper_get_interface_num(int ipd_port);
-
-/**
- * Returns the interface index number for an IPD/PKO port
- * number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface index number
- */
-extern int cvmx_helper_get_interface_index_num(int ipd_port);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.c b/drivers/staging/octeon/cvmx-helper-xaui.c
deleted file mode 100644
index a11e6769e234..000000000000
--- a/drivers/staging/octeon/cvmx-helper-xaui.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for XAUI initialization, configuration,
- * and monitoring.
- *
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-helper.h"
-
-#include "cvmx-pko-defs.h"
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-pcsxx-defs.h"
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-/**
- * Probe a XAUI interface and determine the number of ports
- * connected to it. The XAUI interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_xaui_probe(int interface)
-{
- int i;
- union cvmx_gmxx_hg2_control gmx_hg2_control;
- union cvmx_gmxx_inf_mode mode;
-
- /*
- * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
- * interface needs to be enabled before IPD otherwise per port
- * backpressure may not work properly.
- */
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
- mode.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
-
- __cvmx_helper_setup_gmx(interface, 1);
-
- /*
- * Setup PKO to support 16 ports for HiGig2 virtual
- * ports. We're pointing all of the PKO packet ports for this
- * interface to the XAUI. This allows us to use HiGig2
- * backpressure per port.
- */
- for (i = 0; i < 16; i++) {
- union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
- pko_mem_port_ptrs.u64 = 0;
- /*
- * We set each PKO port to have equal priority in a
- * round robin fashion.
- */
- pko_mem_port_ptrs.s.static_p = 0;
- pko_mem_port_ptrs.s.qos_mask = 0xff;
- /* All PKO ports map to the same XAUI hardware port */
- pko_mem_port_ptrs.s.eid = interface * 4;
- pko_mem_port_ptrs.s.pid = interface * 16 + i;
- cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
- }
-
- /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
- gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
- if (gmx_hg2_control.s.hg2tx_en)
- return 16;
- else
- return 1;
-}
-
-/**
- * Bringup and enable a XAUI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_xaui_enable(int interface)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- union cvmx_pcsxx_control1_reg xauiCtl;
- union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
- union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- union cvmx_gmxx_tx_int_en gmx_tx_int_en;
- union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
-
- /* (1) Interface has already been enabled. */
-
- /* (2) Disable GMX. */
- xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
- xauiMiscCtl.s.gmxeno = 1;
- cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
-
- /* (3) Disable GMX and PCSX interrupts. */
- gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
- gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
- pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
-
- /* (4) Bring up the PCSX and GMX reconciliation layer. */
- /* (4)a Set polarity and lane swapping. */
- /* (4)b */
- gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
- /* Enable better IFG packing and improves performance */
- gmxXauiTxCtl.s.dic_en = 1;
- gmxXauiTxCtl.s.uni_en = 0;
- cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
-
- /* (4)c Aply reset sequence */
- xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
- xauiCtl.s.lo_pwr = 0;
- xauiCtl.s.reset = 1;
- cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
-
- /* Wait for PCS to come out of reset */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
- reset, ==, 0, 10000))
- return -1;
- /* Wait for PCS to be aligned */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_10GBX_STATUS_REG(interface),
- union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
- return -1;
- /* Wait for RX to be ready */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
- status, ==, 0, 10000))
- return -1;
-
- /* (6) Configure GMX */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
- gmx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
-
- /* Wait for GMX RX to be idle */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
- rx_idle, ==, 1, 10000))
- return -1;
- /* Wait for GMX TX to be idle */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
- tx_idle, ==, 1, 10000))
- return -1;
-
- /* GMX configure */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
- gmx_cfg.s.speed = 1;
- gmx_cfg.s.speed_msb = 0;
- gmx_cfg.s.slottime = 1;
- cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
-
- /* (7) Clear out any error state */
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
- cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
- cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
- cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
- cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
- cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
-
- /* Wait for receive link */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
- rcv_lnk, ==, 1, 10000))
- return -1;
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
- xmtflt, ==, 0, 10000))
- return -1;
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
- rcvflt, ==, 0, 10000))
- return -1;
-
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
-
- cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0));
-
- /* (8) Enable packet reception */
- xauiMiscCtl.s.gmxeno = 0;
- cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
-
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
-
- __cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
- __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
-
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
- union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
- union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
- cvmx_helper_link_info_t result;
-
- gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
- gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
- pcsxx_status1_reg.u64 =
- cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
- result.u64 = 0;
-
- /* Only return a link if both RX and TX are happy */
- if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
- (pcsxx_status1_reg.s.rcv_lnk == 1)) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10000;
- } else {
- /* Disable GMX and PCSX interrupts. */
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
- union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
-
- gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
- gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
-
- /* If the link shouldn't be up, then just return */
- if (!link_info.s.link_up)
- return 0;
-
- /* Do nothing if both RX and TX are happy */
- if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
- return 0;
-
- /* Bring the link up */
- return __cvmx_helper_xaui_enable(interface);
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
- union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
-
- /* Set the internal loop */
- pcsxx_control1_reg.u64 =
- cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
- pcsxx_control1_reg.s.loopbck1 = enable_internal;
- cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface),
- pcsxx_control1_reg.u64);
-
- /* Set the external loop */
- gmxx_xaui_ext_loopback.u64 =
- cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
- gmxx_xaui_ext_loopback.s.en = enable_external;
- cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
- gmxx_xaui_ext_loopback.u64);
-
- /* Take the link through a reset */
- return __cvmx_helper_xaui_enable(interface);
-}
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.h b/drivers/staging/octeon/cvmx-helper-xaui.h
deleted file mode 100644
index 4b4db2f93cd4..000000000000
--- a/drivers/staging/octeon/cvmx-helper-xaui.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for XAUI initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_XAUI_H__
-#define __CVMX_HELPER_XAUI_H__
-
-/**
- * Probe a XAUI interface and determine the number of ports
- * connected to it. The XAUI interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_xaui_probe(int interface);
-
-/**
- * Bringup and enable a XAUI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_xaui_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_xaui_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper.c b/drivers/staging/octeon/cvmx-helper.c
deleted file mode 100644
index e9c5c836ceff..000000000000
--- a/drivers/staging/octeon/cvmx-helper.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Helper functions for common, but complicated tasks.
- *
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-fpa.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-ipd.h"
-#include "cvmx-spi.h"
-#include "cvmx-helper.h"
-#include "cvmx-helper-board.h"
-
-#include "cvmx-pip-defs.h"
-#include "cvmx-smix-defs.h"
-#include "cvmx-asxx-defs.h"
-
-/**
- * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
- * priorities[16]) is a function pointer. It is meant to allow
- * customization of the PKO queue priorities based on the port
- * number. Users should set this pointer to a function before
- * calling any cvmx-helper operations.
- */
-void (*cvmx_override_pko_queue_priority) (int pko_port,
- uint64_t priorities[16]);
-
-/**
- * cvmx_override_ipd_port_setup(int ipd_port) is a function
- * pointer. It is meant to allow customization of the IPD port
- * setup before packet input/output comes online. It is called
- * after cvmx-helper does the default IPD configuration, but
- * before IPD is enabled. Users should set this pointer to a
- * function before calling any cvmx-helper operations.
- */
-void (*cvmx_override_ipd_port_setup) (int ipd_port);
-
-/* Port count per interface */
-static int interface_port_count[4] = { 0, 0, 0, 0 };
-
-/* Port last configured link info index by IPD/PKO port */
-static cvmx_helper_link_info_t
- port_link_info[CVMX_PIP_NUM_INPUT_PORTS];
-
-/**
- * Return the number of interfaces the chip has. Each interface
- * may have multiple ports. Most chips support two interfaces,
- * but the CNX0XX and CNX1XX are exceptions. These only support
- * one interface.
- *
- * Returns Number of interfaces on chip
- */
-int cvmx_helper_get_number_of_interfaces(void)
-{
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
- return 4;
- else
- return 3;
-}
-
-/**
- * Return the number of ports on an interface. Depending on the
- * chip and configuration, this can be 1-16. A value of 0
- * specifies that the interface doesn't exist or isn't usable.
- *
- * @interface: Interface to get the port count for
- *
- * Returns Number of ports on interface. Can be Zero.
- */
-int cvmx_helper_ports_on_interface(int interface)
-{
- return interface_port_count[interface];
-}
-
-/**
- * Get the operating mode of an interface. Depending on the Octeon
- * chip and configuration, this function returns an enumeration
- * of the type of packet I/O supported by an interface.
- *
- * @interface: Interface to probe
- *
- * Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
- */
-cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
-{
- union cvmx_gmxx_inf_mode mode;
- if (interface == 2)
- return CVMX_HELPER_INTERFACE_MODE_NPI;
-
- if (interface == 3) {
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX))
- return CVMX_HELPER_INTERFACE_MODE_LOOP;
- else
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
- }
-
- if (interface == 0
- && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
- && cvmx_sysinfo_get()->board_rev_major == 1) {
- /*
- * Lie about interface type of CN3005 board. This
- * board has a switch on port 1 like the other
- * evaluation boards, but it is connected over RGMII
- * instead of GMII. Report GMII mode so that the
- * speed is forced to 1 Gbit full duplex. Other than
- * some initial configuration (which does not use the
- * output of this function) there is no difference in
- * setup between GMII and RGMII modes.
- */
- return CVMX_HELPER_INTERFACE_MODE_GMII;
- }
-
- /* Interface 1 is always disabled on CN31XX and CN30XX */
- if ((interface == 1)
- && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX)))
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
-
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- switch (mode.cn56xx.mode) {
- case 0:
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
- case 1:
- return CVMX_HELPER_INTERFACE_MODE_XAUI;
- case 2:
- return CVMX_HELPER_INTERFACE_MODE_SGMII;
- case 3:
- return CVMX_HELPER_INTERFACE_MODE_PICMG;
- default:
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
- }
- } else {
- if (!mode.s.en)
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
-
- if (mode.s.type) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX))
- return CVMX_HELPER_INTERFACE_MODE_SPI;
- else
- return CVMX_HELPER_INTERFACE_MODE_GMII;
- } else
- return CVMX_HELPER_INTERFACE_MODE_RGMII;
- }
-}
-
-/**
- * Configure the IPD/PIP tagging and QoS options for a specific
- * port. This function determines the POW work queue entry
- * contents for a port. The setup performed here is controlled by
- * the defines in executive-config.h.
- *
- * @ipd_port: Port to configure. This follows the IPD numbering, not the
- * per interface numbering
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_port_setup_ipd(int ipd_port)
-{
- union cvmx_pip_prt_cfgx port_config;
- union cvmx_pip_prt_tagx tag_config;
-
- port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
-
- /* Have each port go to a different POW queue */
- port_config.s.qos = ipd_port & 0x7;
-
- /* Process the headers and place the IP header in the work queue */
- port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
-
- tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
- tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
- tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
- tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
- tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
- tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
- tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
- tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
- tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
- tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
- tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
- tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- /* Put all packets in group 0. Other groups can be used by the app */
- tag_config.s.grp = 0;
-
- cvmx_pip_config_port(ipd_port, port_config, tag_config);
-
- /* Give the user a chance to override our setting for each port */
- if (cvmx_override_ipd_port_setup)
- cvmx_override_ipd_port_setup(ipd_port);
-
- return 0;
-}
-
-/**
- * This function probes an interface to determine the actual
- * number of hardware ports connected to it. It doesn't setup the
- * ports or enable them. The main goal here is to set the global
- * interface_port_count[interface] correctly. Hardware setup of the
- * ports will be performed later.
- *
- * @interface: Interface to probe
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_interface_probe(int interface)
-{
- /* At this stage in the game we don't want packets to be moving yet.
- The following probe calls should perform hardware setup
- needed to determine port counts. Receive must still be disabled */
- switch (cvmx_helper_interface_get_mode(interface)) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- interface_port_count[interface] = 0;
- break;
- /* XAUI is a single high speed port */
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- interface_port_count[interface] =
- __cvmx_helper_xaui_probe(interface);
- break;
- /*
- * RGMII/GMII/MII are all treated about the same. Most
- * functions refer to these ports as RGMII.
- */
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- interface_port_count[interface] =
- __cvmx_helper_rgmii_probe(interface);
- break;
- /*
- * SPI4 can have 1-16 ports depending on the device at
- * the other end.
- */
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- interface_port_count[interface] =
- __cvmx_helper_spi_probe(interface);
- break;
- /*
- * SGMII can have 1-4 ports depending on how many are
- * hooked up.
- */
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- interface_port_count[interface] =
- __cvmx_helper_sgmii_probe(interface);
- break;
- /* PCI target Network Packet Interface */
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- interface_port_count[interface] =
- __cvmx_helper_npi_probe(interface);
- break;
- /*
- * Special loopback only ports. These are not the same
- * as other ports in loopback mode.
- */
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- interface_port_count[interface] =
- __cvmx_helper_loop_probe(interface);
- break;
- }
-
- interface_port_count[interface] =
- __cvmx_helper_board_interface_probe(interface,
- interface_port_count
- [interface]);
-
- /* Make sure all global variables propagate to other cores */
- CVMX_SYNCWS;
-
- return 0;
-}
-
-/**
- * Setup the IPD/PIP for the ports on an interface. Packet
- * classification and tagging are set for every port on the
- * interface. The number of ports on the interface must already
- * have been probed.
- *
- * @interface: Interface to setup IPD/PIP for
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_interface_setup_ipd(int interface)
-{
- int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
- int num_ports = interface_port_count[interface];
-
- while (num_ports--) {
- __cvmx_helper_port_setup_ipd(ipd_port);
- ipd_port++;
- }
- return 0;
-}
-
-/**
- * Setup global setting for IPD/PIP not related to a specific
- * interface or port. This must be called before IPD is enabled.
- *
- * Returns Zero on success, negative on failure.
- */
-static int __cvmx_helper_global_setup_ipd(void)
-{
- /* Setup the global packet input options */
- cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
- CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
- CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
- /* The +8 is to account for the next ptr */
- (CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
- /* The +8 is to account for the next ptr */
- (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
- CVMX_FPA_WQE_POOL,
- CVMX_IPD_OPC_MODE_STT,
- CVMX_HELPER_ENABLE_BACK_PRESSURE);
- return 0;
-}
-
-/**
- * Setup the PKO for the ports on an interface. The number of
- * queues per port and the priority of each PKO output queue
- * is set here. PKO must be disabled when this function is called.
- *
- * @interface: Interface to setup PKO for
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_interface_setup_pko(int interface)
-{
- /*
- * Each packet output queue has an associated priority. The
- * higher the priority, the more often it can send a packet. A
- * priority of 8 means it can send in all 8 rounds of
- * contention. We're going to make each queue one less than
- * the last. The vector of priorities has been extended to
- * support CN5xxx CPUs, where up to 16 queues can be
- * associated to a port. To keep backward compatibility we
- * don't change the initial 8 priorities and replicate them in
- * the second half. With per-core PKO queues (PKO lockless
- * operation) all queues have the same priority.
- */
- uint64_t priorities[16] =
- { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
-
- /*
- * Setup the IPD/PIP and PKO for the ports discovered
- * above. Here packet classification, tagging and output
- * priorities are set.
- */
- int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
- int num_ports = interface_port_count[interface];
- while (num_ports--) {
- /*
- * Give the user a chance to override the per queue
- * priorities.
- */
- if (cvmx_override_pko_queue_priority)
- cvmx_override_pko_queue_priority(ipd_port, priorities);
-
- cvmx_pko_config_port(ipd_port,
- cvmx_pko_get_base_queue_per_core(ipd_port,
- 0),
- cvmx_pko_get_num_queues(ipd_port),
- priorities);
- ipd_port++;
- }
- return 0;
-}
-
-/**
- * Setup global setting for PKO not related to a specific
- * interface or port. This must be called before PKO is enabled.
- *
- * Returns Zero on success, negative on failure.
- */
-static int __cvmx_helper_global_setup_pko(void)
-{
- /*
- * Disable tagwait FAU timeout. This needs to be done before
- * anyone might start packet output using tags.
- */
- union cvmx_iob_fau_timeout fau_to;
- fau_to.u64 = 0;
- fau_to.s.tout_val = 0xfff;
- fau_to.s.tout_enb = 0;
- cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
- return 0;
-}
-
-/**
- * Setup global backpressure setting.
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_global_setup_backpressure(void)
-{
-#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
- /* Disable backpressure if configured to do so */
- /* Disable backpressure (pause frame) generation */
- int num_interfaces = cvmx_helper_get_number_of_interfaces();
- int interface;
- for (interface = 0; interface < num_interfaces; interface++) {
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- cvmx_gmx_set_backpressure_override(interface, 0xf);
- break;
- }
- }
-#endif
-
- return 0;
-}
-
-/**
- * Enable packet input/output from the hardware. This function is
- * called after all internal setup is complete and IPD is enabled.
- * After this function completes, packets will be accepted from the
- * hardware ports. PKO should still be disabled to make sure packets
- * aren't sent out partially setup hardware.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_packet_hardware_enable(int interface)
-{
- int result = 0;
- switch (cvmx_helper_interface_get_mode(interface)) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- /* Nothing to do */
- break;
- /* XAUI is a single high speed port */
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result = __cvmx_helper_xaui_enable(interface);
- break;
- /*
- * RGMII/GMII/MII are all treated about the same. Most
- * functions refer to these ports as RGMII
- */
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result = __cvmx_helper_rgmii_enable(interface);
- break;
- /*
- * SPI4 can have 1-16 ports depending on the device at
- * the other end
- */
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- result = __cvmx_helper_spi_enable(interface);
- break;
- /*
- * SGMII can have 1-4 ports depending on how many are
- * hooked up
- */
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result = __cvmx_helper_sgmii_enable(interface);
- break;
- /* PCI target Network Packet Interface */
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- result = __cvmx_helper_npi_enable(interface);
- break;
- /*
- * Special loopback only ports. These are not the same
- * as other ports in loopback mode
- */
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- result = __cvmx_helper_loop_enable(interface);
- break;
- }
- result |= __cvmx_helper_board_hardware_enable(interface);
- return result;
-}
-
-/**
- * Function to adjust internal IPD pointer alignments
- *
- * Returns 0 on success
- * !0 on failure
- */
-int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
-{
-#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
- (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
-#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
- (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
-#define FIX_IPD_OUTPORT 0
- /* Ports 0-15 are interface 0, 16-31 are interface 1 */
-#define INTERFACE(port) (port >> 4)
-#define INDEX(port) (port & 0xf)
- uint64_t *p64;
- cvmx_pko_command_word0_t pko_command;
- union cvmx_buf_ptr g_buffer, pkt_buffer;
- cvmx_wqe_t *work;
- int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int retry_cnt;
- int retry_loop_cnt;
- int mtu;
- int i;
- cvmx_helper_link_info_t link_info;
-
- /* Save values for restore at end */
- uint64_t prtx_cfg =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t tx_ptr_en =
- cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t rx_ptr_en =
- cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t rxx_jabber =
- cvmx_read_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t frame_max =
- cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
-
- /* Configure port to gig FDX as required for loopback mode */
- cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
-
- /*
- * Disable reception on all ports so if traffic is present it
- * will not interfere.
- */
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
-
- cvmx_wait(100000000ull);
-
- for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
- retry_cnt = 100000;
- wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
- pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
- wqe_pcnt &= 0x7f;
-
- num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
-
- if (num_segs == 0)
- goto fix_ipd_exit;
-
- num_segs += 1;
-
- size =
- FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
- ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
- (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
-
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
- 1 << INDEX(FIX_IPD_OUTPORT));
- CVMX_SYNC;
-
- g_buffer.u64 = 0;
- g_buffer.s.addr =
- cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
- if (g_buffer.s.addr == 0) {
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
- "buffer allocation failure.\n");
- goto fix_ipd_exit;
- }
-
- g_buffer.s.pool = CVMX_FPA_WQE_POOL;
- g_buffer.s.size = num_segs;
-
- pkt_buffer.u64 = 0;
- pkt_buffer.s.addr =
- cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
- if (pkt_buffer.s.addr == 0) {
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
- "buffer allocation failure.\n");
- goto fix_ipd_exit;
- }
- pkt_buffer.s.i = 1;
- pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
- pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
-
- p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
- p64[0] = 0xffffffffffff0000ull;
- p64[1] = 0x08004510ull;
- p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
- p64[3] = 0x3a5fc0a81073c0a8ull;
-
- for (i = 0; i < num_segs; i++) {
- if (i > 0)
- pkt_buffer.s.size =
- FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
-
- if (i == (num_segs - 1))
- pkt_buffer.s.i = 0;
-
- *(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
- 8 * i) = pkt_buffer.u64;
- }
-
- /* Build the PKO command */
- pko_command.u64 = 0;
- pko_command.s.segs = num_segs;
- pko_command.s.total_bytes = size;
- pko_command.s.dontfree = 0;
- pko_command.s.gather = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)));
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- 1 << INDEX(FIX_IPD_OUTPORT));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- 1 << INDEX(FIX_IPD_OUTPORT));
-
- mtu =
- cvmx_read_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)));
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
-
- cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
- cvmx_pko_get_base_queue
- (FIX_IPD_OUTPORT),
- CVMX_PKO_LOCK_CMD_QUEUE);
- cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
- cvmx_pko_get_base_queue
- (FIX_IPD_OUTPORT), pko_command,
- g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
-
- CVMX_SYNC;
-
- do {
- work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
- retry_cnt--;
- } while ((work == NULL) && (retry_cnt > 0));
-
- if (!retry_cnt)
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
- "get_work() timeout occurred.\n");
-
- /* Free packet */
- if (work)
- cvmx_helper_free_packet_data(work);
- }
-
-fix_ipd_exit:
-
- /* Return CSR configs to saved values */
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
- prtx_cfg);
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- tx_ptr_en);
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- rx_ptr_en);
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
- rxx_jabber);
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
- frame_max);
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
- /* Set link to down so autonegotiation will set it up again */
- link_info.u64 = 0;
- cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
-
- /*
- * Bring the link back up as autonegotiation is not done in
- * user applications.
- */
- cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
-
- CVMX_SYNC;
- if (num_segs)
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
-
- return !!num_segs;
-
-}
-
-/**
- * Called after all internal packet IO paths are setup. This
- * function enables IPD/PIP and begins packet input and output.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_ipd_and_packet_input_enable(void)
-{
- int num_interfaces;
- int interface;
-
- /* Enable IPD */
- cvmx_ipd_enable();
-
- /*
- * Time to enable hardware ports packet input and output. Note
- * that at this point IPD/PIP must be fully functional and PKO
- * must be disabled
- */
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- if (cvmx_helper_ports_on_interface(interface) > 0)
- __cvmx_helper_packet_hardware_enable(interface);
- }
-
- /* Finally enable PKO now that the entire path is up and running */
- cvmx_pko_enable();
-
- if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
- || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
- && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
- __cvmx_helper_errata_fix_ipd_ptr_alignment();
- return 0;
-}
-
-/**
- * Initialize the PIP, IPD, and PKO hardware to support
- * simple priority based queues for the ethernet ports. Each
- * port is configured with a number of priority queues based
- * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
- * priority than the previous.
- *
- * Returns Zero on success, non-zero on failure
- */
-int cvmx_helper_initialize_packet_io_global(void)
-{
- int result = 0;
- int interface;
- union cvmx_l2c_cfg l2c_cfg;
- union cvmx_smix_en smix_en;
- const int num_interfaces = cvmx_helper_get_number_of_interfaces();
-
- /*
- * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
- * be disabled.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
- __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
-
- /*
- * Tell L2 to give the IOB statically higher priority compared
- * to the cores. This avoids conditions where IO blocks might
- * be starved under very high L2 loads.
- */
- l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
- l2c_cfg.s.lrf_arb_mode = 0;
- l2c_cfg.s.rfb_arb_mode = 0;
- cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
-
- /* Make sure SMI/MDIO is enabled so we can query PHYs */
- smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
- if (!smix_en.s.en) {
- smix_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
- }
-
- /* Newer chips actually have two SMI/MDIO interfaces */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
- !OCTEON_IS_MODEL(OCTEON_CN58XX) &&
- !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
- if (!smix_en.s.en) {
- smix_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
- }
- }
-
- cvmx_pko_initialize_global();
- for (interface = 0; interface < num_interfaces; interface++) {
- result |= cvmx_helper_interface_probe(interface);
- if (cvmx_helper_ports_on_interface(interface) > 0)
- cvmx_dprintf("Interface %d has %d ports (%s)\n",
- interface,
- cvmx_helper_ports_on_interface(interface),
- cvmx_helper_interface_mode_to_string
- (cvmx_helper_interface_get_mode
- (interface)));
- result |= __cvmx_helper_interface_setup_ipd(interface);
- result |= __cvmx_helper_interface_setup_pko(interface);
- }
-
- result |= __cvmx_helper_global_setup_ipd();
- result |= __cvmx_helper_global_setup_pko();
-
- /* Enable any flow control and backpressure */
- result |= __cvmx_helper_global_setup_backpressure();
-
-#if CVMX_HELPER_ENABLE_IPD
- result |= cvmx_helper_ipd_and_packet_input_enable();
-#endif
- return result;
-}
-
-/**
- * Does core local initialization for packet io
- *
- * Returns Zero on success, non-zero on failure
- */
-int cvmx_helper_initialize_packet_io_local(void)
-{
- return cvmx_pko_initialize_local();
-}
-
-/**
- * Auto configure an IPD/PKO port link state and speed. This
- * function basically does the equivalent of:
- * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
- *
- * @ipd_port: IPD/PKO port to auto configure
- *
- * Returns Link state after configure
- */
-cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
-{
- cvmx_helper_link_info_t link_info;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface)) {
- link_info.u64 = 0;
- return link_info;
- }
-
- link_info = cvmx_helper_link_get(ipd_port);
- if (link_info.u64 == port_link_info[ipd_port].u64)
- return link_info;
-
- /* If we fail to set the link speed, port_link_info will not change */
- cvmx_helper_link_set(ipd_port, link_info);
-
- /*
- * port_link_info should be the current value, which will be
- * different than expect if cvmx_helper_link_set() failed.
- */
- return port_link_info[ipd_port];
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- /* The default result will be a down link unless the code below
- changes it */
- result.u64 = 0;
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return result;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- /* Network links are not supported */
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result = __cvmx_helper_xaui_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- if (index == 0)
- result = __cvmx_helper_rgmii_link_get(ipd_port);
- else {
- result.s.full_duplex = 1;
- result.s.link_up = 1;
- result.s.speed = 1000;
- }
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- result = __cvmx_helper_rgmii_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- result = __cvmx_helper_spi_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result = __cvmx_helper_sgmii_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- /* Network links are not supported */
- break;
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
-{
- int result = -1;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return -1;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
- break;
- /*
- * RGMII/GMII/MII are all treated about the same. Most
- * functions refer to these ports as RGMII.
- */
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- result = __cvmx_helper_spi_link_set(ipd_port, link_info);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
- break;
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- break;
- }
- /* Set the port_link_info here so that the link status is updated
- no matter how cvmx_helper_link_set is called. We don't change
- the value if link_set failed */
- if (result == 0)
- port_link_info[ipd_port].u64 = link_info.u64;
- return result;
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int result = -1;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return -1;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result =
- __cvmx_helper_xaui_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result =
- __cvmx_helper_rgmii_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result =
- __cvmx_helper_sgmii_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- }
- return result;
-}
diff --git a/drivers/staging/octeon/cvmx-helper.h b/drivers/staging/octeon/cvmx-helper.h
deleted file mode 100644
index 51916f3cc40c..000000000000
--- a/drivers/staging/octeon/cvmx-helper.h
+++ /dev/null
@@ -1,227 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Helper functions for common, but complicated tasks.
- *
- */
-
-#ifndef __CVMX_HELPER_H__
-#define __CVMX_HELPER_H__
-
-#include "cvmx-config.h"
-#include "cvmx-fpa.h"
-#include "cvmx-wqe.h"
-
-typedef enum {
- CVMX_HELPER_INTERFACE_MODE_DISABLED,
- CVMX_HELPER_INTERFACE_MODE_RGMII,
- CVMX_HELPER_INTERFACE_MODE_GMII,
- CVMX_HELPER_INTERFACE_MODE_SPI,
- CVMX_HELPER_INTERFACE_MODE_PCIE,
- CVMX_HELPER_INTERFACE_MODE_XAUI,
- CVMX_HELPER_INTERFACE_MODE_SGMII,
- CVMX_HELPER_INTERFACE_MODE_PICMG,
- CVMX_HELPER_INTERFACE_MODE_NPI,
- CVMX_HELPER_INTERFACE_MODE_LOOP,
-} cvmx_helper_interface_mode_t;
-
-typedef union {
- uint64_t u64;
- struct {
- uint64_t reserved_20_63:44;
- uint64_t link_up:1; /**< Is the physical link up? */
- uint64_t full_duplex:1; /**< 1 if the link is full duplex */
- uint64_t speed:18; /**< Speed of the link in Mbps */
- } s;
-} cvmx_helper_link_info_t;
-
-#include "cvmx-helper-fpa.h"
-
-#include <asm/octeon/cvmx-helper-errata.h>
-#include "cvmx-helper-loop.h"
-#include "cvmx-helper-npi.h"
-#include "cvmx-helper-rgmii.h"
-#include "cvmx-helper-sgmii.h"
-#include "cvmx-helper-spi.h"
-#include "cvmx-helper-util.h"
-#include "cvmx-helper-xaui.h"
-
-/**
- * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
- * priorities[16]) is a function pointer. It is meant to allow
- * customization of the PKO queue priorities based on the port
- * number. Users should set this pointer to a function before
- * calling any cvmx-helper operations.
- */
-extern void (*cvmx_override_pko_queue_priority) (int pko_port,
- uint64_t priorities[16]);
-
-/**
- * cvmx_override_ipd_port_setup(int ipd_port) is a function
- * pointer. It is meant to allow customization of the IPD port
- * setup before packet input/output comes online. It is called
- * after cvmx-helper does the default IPD configuration, but
- * before IPD is enabled. Users should set this pointer to a
- * function before calling any cvmx-helper operations.
- */
-extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
-
-/**
- * This function enables the IPD and also enables the packet interfaces.
- * The packet interfaces (RGMII and SPI) must be enabled after the
- * IPD. This should be called by the user program after any additional
- * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
- * is not set in the executive-config.h file.
- *
- * Returns 0 on success
- * -1 on failure
- */
-extern int cvmx_helper_ipd_and_packet_input_enable(void);
-
-/**
- * Initialize the PIP, IPD, and PKO hardware to support
- * simple priority based queues for the ethernet ports. Each
- * port is configured with a number of priority queues based
- * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
- * priority than the previous.
- *
- * Returns Zero on success, non-zero on failure
- */
-extern int cvmx_helper_initialize_packet_io_global(void);
-
-/**
- * Does core local initialization for packet io
- *
- * Returns Zero on success, non-zero on failure
- */
-extern int cvmx_helper_initialize_packet_io_local(void);
-
-/**
- * Returns the number of ports on the given interface.
- * The interface must be initialized before the port count
- * can be returned.
- *
- * @interface: Which interface to return port count for.
- *
- * Returns Port count for interface
- * -1 for uninitialized interface
- */
-extern int cvmx_helper_ports_on_interface(int interface);
-
-/**
- * Return the number of interfaces the chip has. Each interface
- * may have multiple ports. Most chips support two interfaces,
- * but the CNX0XX and CNX1XX are exceptions. These only support
- * one interface.
- *
- * Returns Number of interfaces on chip
- */
-extern int cvmx_helper_get_number_of_interfaces(void);
-
-/**
- * Get the operating mode of an interface. Depending on the Octeon
- * chip and configuration, this function returns an enumeration
- * of the type of packet I/O supported by an interface.
- *
- * @interface: Interface to probe
- *
- * Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
- */
-extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
- interface);
-
-/**
- * Auto configure an IPD/PKO port link state and speed. This
- * function basically does the equivalent of:
- * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
- *
- * @ipd_port: IPD/PKO port to auto configure
- *
- * Returns Link state after configure
- */
-extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_helper_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * This function probes an interface to determine the actual
- * number of hardware ports connected to it. It doesn't setup the
- * ports or enable them. The main goal here is to set the global
- * interface_port_count[interface] correctly. Hardware setup of the
- * ports will be performed later.
- *
- * @interface: Interface to probe
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_helper_interface_probe(int interface);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
- int enable_external);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-interrupt-decodes.c b/drivers/staging/octeon/cvmx-interrupt-decodes.c
deleted file mode 100644
index a3337e382ee9..000000000000
--- a/drivers/staging/octeon/cvmx-interrupt-decodes.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2009 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Automatically generated functions useful for enabling
- * and decoding RSL_INT_BLOCKS interrupts.
- *
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-pcsx-defs.h"
-#include "cvmx-pcsxx-defs.h"
-#include "cvmx-spxx-defs.h"
-#include "cvmx-stxx-defs.h"
-
-#ifndef PRINT_ERROR
-#define PRINT_ERROR(format, ...)
-#endif
-
-
-/**
- * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
- */
-void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
-{
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
- cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
- gmx_rx_int_en.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_29_63 */
- gmx_rx_int_en.s.hg2cc = 1;
- gmx_rx_int_en.s.hg2fld = 1;
- gmx_rx_int_en.s.undat = 1;
- gmx_rx_int_en.s.uneop = 1;
- gmx_rx_int_en.s.unsop = 1;
- gmx_rx_int_en.s.bad_term = 1;
- gmx_rx_int_en.s.bad_seq = 1;
- gmx_rx_int_en.s.rem_fault = 1;
- gmx_rx_int_en.s.loc_fault = 1;
- gmx_rx_int_en.s.pause_drp = 1;
- /* Skipping gmx_rx_int_en.s.reserved_16_18 */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_9_9 */
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_5_6 */
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- /* Skipping gmx_rx_int_en.s.reserved_2_2 */
- gmx_rx_int_en.s.carext = 1;
- /* Skipping gmx_rx_int_en.s.reserved_0_0 */
- }
- if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_19_63 */
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_20_63 */
- gmx_rx_int_en.s.pause_drp = 1;
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_6_6 */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- /* Skipping gmx_rx_int_en.s.reserved_2_2 */
- gmx_rx_int_en.s.carext = 1;
- /* Skipping gmx_rx_int_en.s.reserved_0_0 */
- }
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_19_63 */
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_19_63 */
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_20_63 */
- gmx_rx_int_en.s.pause_drp = 1;
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_29_63 */
- gmx_rx_int_en.s.hg2cc = 1;
- gmx_rx_int_en.s.hg2fld = 1;
- gmx_rx_int_en.s.undat = 1;
- gmx_rx_int_en.s.uneop = 1;
- gmx_rx_int_en.s.unsop = 1;
- gmx_rx_int_en.s.bad_term = 1;
- gmx_rx_int_en.s.bad_seq = 0;
- gmx_rx_int_en.s.rem_fault = 1;
- gmx_rx_int_en.s.loc_fault = 0;
- gmx_rx_int_en.s.pause_drp = 1;
- /* Skipping gmx_rx_int_en.s.reserved_16_18 */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_9_9 */
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_5_6 */
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- /* Skipping gmx_rx_int_en.s.reserved_2_2 */
- gmx_rx_int_en.s.carext = 1;
- /* Skipping gmx_rx_int_en.s.reserved_0_0 */
- }
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
-}
-/**
- * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
- */
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
-{
- union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
- cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
- cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
- pcs_int_en_reg.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- /* Skipping pcs_int_en_reg.s.reserved_12_63 */
- /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
- pcs_int_en_reg.s.sync_bad_en = 1;
- pcs_int_en_reg.s.an_bad_en = 1;
- pcs_int_en_reg.s.rxlock_en = 1;
- pcs_int_en_reg.s.rxbad_en = 1;
- /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
- pcs_int_en_reg.s.txbad_en = 1;
- pcs_int_en_reg.s.txfifo_en = 1;
- pcs_int_en_reg.s.txfifu_en = 1;
- pcs_int_en_reg.s.an_err_en = 1;
- /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
- /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
- }
- if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- /* Skipping pcs_int_en_reg.s.reserved_12_63 */
- /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
- pcs_int_en_reg.s.sync_bad_en = 1;
- pcs_int_en_reg.s.an_bad_en = 1;
- pcs_int_en_reg.s.rxlock_en = 1;
- pcs_int_en_reg.s.rxbad_en = 1;
- /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
- pcs_int_en_reg.s.txbad_en = 1;
- pcs_int_en_reg.s.txfifo_en = 1;
- pcs_int_en_reg.s.txfifu_en = 1;
- pcs_int_en_reg.s.an_err_en = 1;
- /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
- /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
- }
- cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
-}
-/**
- * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
- */
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
-{
- union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
- cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
- cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
- pcsx_int_en_reg.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
- pcsx_int_en_reg.s.algnlos_en = 1;
- pcsx_int_en_reg.s.synlos_en = 1;
- pcsx_int_en_reg.s.bitlckls_en = 1;
- pcsx_int_en_reg.s.rxsynbad_en = 1;
- pcsx_int_en_reg.s.rxbad_en = 1;
- pcsx_int_en_reg.s.txflt_en = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
- pcsx_int_en_reg.s.algnlos_en = 1;
- pcsx_int_en_reg.s.synlos_en = 1;
- pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
- pcsx_int_en_reg.s.rxsynbad_en = 1;
- pcsx_int_en_reg.s.rxbad_en = 1;
- pcsx_int_en_reg.s.txflt_en = 1;
- }
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
-}
-
-/**
- * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
- */
-void __cvmx_interrupt_spxx_int_msk_enable(int index)
-{
- union cvmx_spxx_int_msk spx_int_msk;
- cvmx_write_csr(CVMX_SPXX_INT_REG(index),
- cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
- spx_int_msk.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- /* Skipping spx_int_msk.s.reserved_12_63 */
- spx_int_msk.s.calerr = 1;
- spx_int_msk.s.syncerr = 1;
- spx_int_msk.s.diperr = 1;
- spx_int_msk.s.tpaovr = 1;
- spx_int_msk.s.rsverr = 1;
- spx_int_msk.s.drwnng = 1;
- spx_int_msk.s.clserr = 1;
- spx_int_msk.s.spiovr = 1;
- /* Skipping spx_int_msk.s.reserved_2_3 */
- spx_int_msk.s.abnorm = 1;
- spx_int_msk.s.prtnxa = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Skipping spx_int_msk.s.reserved_12_63 */
- spx_int_msk.s.calerr = 1;
- spx_int_msk.s.syncerr = 1;
- spx_int_msk.s.diperr = 1;
- spx_int_msk.s.tpaovr = 1;
- spx_int_msk.s.rsverr = 1;
- spx_int_msk.s.drwnng = 1;
- spx_int_msk.s.clserr = 1;
- spx_int_msk.s.spiovr = 1;
- /* Skipping spx_int_msk.s.reserved_2_3 */
- spx_int_msk.s.abnorm = 1;
- spx_int_msk.s.prtnxa = 1;
- }
- cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
-}
-/**
- * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
- */
-void __cvmx_interrupt_stxx_int_msk_enable(int index)
-{
- union cvmx_stxx_int_msk stx_int_msk;
- cvmx_write_csr(CVMX_STXX_INT_REG(index),
- cvmx_read_csr(CVMX_STXX_INT_REG(index)));
- stx_int_msk.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- /* Skipping stx_int_msk.s.reserved_8_63 */
- stx_int_msk.s.frmerr = 1;
- stx_int_msk.s.unxfrm = 1;
- stx_int_msk.s.nosync = 1;
- stx_int_msk.s.diperr = 1;
- stx_int_msk.s.datovr = 1;
- stx_int_msk.s.ovrbst = 1;
- stx_int_msk.s.calpar1 = 1;
- stx_int_msk.s.calpar0 = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Skipping stx_int_msk.s.reserved_8_63 */
- stx_int_msk.s.frmerr = 1;
- stx_int_msk.s.unxfrm = 1;
- stx_int_msk.s.nosync = 1;
- stx_int_msk.s.diperr = 1;
- stx_int_msk.s.datovr = 1;
- stx_int_msk.s.ovrbst = 1;
- stx_int_msk.s.calpar1 = 1;
- stx_int_msk.s.calpar0 = 1;
- }
- cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
-}
diff --git a/drivers/staging/octeon/cvmx-interrupt-rsl.c b/drivers/staging/octeon/cvmx-interrupt-rsl.c
deleted file mode 100644
index df50048cfbc0..000000000000
--- a/drivers/staging/octeon/cvmx-interrupt-rsl.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Utility functions to decode Octeon's RSL_INT_BLOCKS
- * interrupts into error messages.
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-asxx-defs.h"
-#include "cvmx-gmxx-defs.h"
-
-#ifndef PRINT_ERROR
-#define PRINT_ERROR(format, ...)
-#endif
-
-void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
-
-/**
- * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
- * CN58XX.
- *
- * @block: Interface to enable 0-1
- */
-void __cvmx_interrupt_asxx_enable(int block)
-{
- int mask;
- union cvmx_asxx_int_en csr;
- /*
- * CN38XX and CN58XX have two interfaces with 4 ports per
- * interface. All other chips have a max of 3 ports on
- * interface 0
- */
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
- mask = 0xf; /* Set enables for 4 ports */
- else
- mask = 0x7; /* Set enables for 3 ports */
-
- /* Enable interface interrupts */
- csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
- csr.s.txpsh = mask;
- csr.s.txpop = mask;
- csr.s.ovrflw = mask;
- cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
-}
-/**
- * Enable GMX error reporting for the supplied interface
- *
- * @interface: Interface to enable
- */
-void __cvmx_interrupt_gmxx_enable(int interface)
-{
- union cvmx_gmxx_inf_mode mode;
- union cvmx_gmxx_tx_int_en gmx_tx_int_en;
- int num_ports;
- int index;
-
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- if (mode.s.en) {
- switch (mode.cn56xx.mode) {
- case 1: /* XAUI */
- num_ports = 1;
- break;
- case 2: /* SGMII */
- case 3: /* PICMG */
- num_ports = 4;
- break;
- default: /* Disabled */
- num_ports = 0;
- break;
- }
- } else
- num_ports = 0;
- } else {
- if (mode.s.en) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /*
- * SPI on CN38XX and CN58XX report all
- * errors through port 0. RGMII needs
- * to check all 4 ports
- */
- if (mode.s.type)
- num_ports = 1;
- else
- num_ports = 4;
- } else {
- /*
- * CN30XX, CN31XX, and CN50XX have two
- * or three ports. GMII and MII has 2,
- * RGMII has three
- */
- if (mode.s.type)
- num_ports = 2;
- else
- num_ports = 3;
- }
- } else
- num_ports = 0;
- }
-
- gmx_tx_int_en.u64 = 0;
- if (num_ports) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX))
- gmx_tx_int_en.s.ncb_nxa = 1;
- gmx_tx_int_en.s.pko_nxa = 1;
- }
- gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
- for (index = 0; index < num_ports; index++)
- __cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
-}
diff --git a/drivers/staging/octeon/cvmx-ipd.h b/drivers/staging/octeon/cvmx-ipd.h
deleted file mode 100644
index 115a552c5c7f..000000000000
--- a/drivers/staging/octeon/cvmx-ipd.h
+++ /dev/null
@@ -1,338 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Interface to the hardware Input Packet Data unit.
- */
-
-#ifndef __CVMX_IPD_H__
-#define __CVMX_IPD_H__
-
-#include <asm/octeon/octeon-feature.h>
-
-#include <asm/octeon/cvmx-ipd-defs.h>
-
-enum cvmx_ipd_mode {
- CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
- CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
- CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
-};
-
-#ifndef CVMX_ENABLE_LEN_M8_FIX
-#define CVMX_ENABLE_LEN_M8_FIX 0
-#endif
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
-typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
-
-typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
-typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
-
-/**
- * Configure IPD
- *
- * @mbuff_size: Packets buffer size in 8 byte words
- * @first_mbuff_skip:
- * Number of 8 byte words to skip in the first buffer
- * @not_first_mbuff_skip:
- * Number of 8 byte words to skip in each following buffer
- * @first_back: Must be same as first_mbuff_skip / 128
- * @second_back:
- * Must be same as not_first_mbuff_skip / 128
- * @wqe_fpa_pool:
- * FPA pool to get work entries from
- * @cache_mode:
- * @back_pres_enable_flag:
- * Enable or disable port back pressure
- */
-static inline void cvmx_ipd_config(uint64_t mbuff_size,
- uint64_t first_mbuff_skip,
- uint64_t not_first_mbuff_skip,
- uint64_t first_back,
- uint64_t second_back,
- uint64_t wqe_fpa_pool,
- enum cvmx_ipd_mode cache_mode,
- uint64_t back_pres_enable_flag)
-{
- cvmx_ipd_mbuff_first_skip_t first_skip;
- cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
- union cvmx_ipd_packet_mbuff_size size;
- cvmx_ipd_first_next_ptr_back_t first_back_struct;
- cvmx_ipd_second_next_ptr_back_t second_back_struct;
- union cvmx_ipd_wqe_fpa_queue wqe_pool;
- union cvmx_ipd_ctl_status ipd_ctl_reg;
-
- first_skip.u64 = 0;
- first_skip.s.skip_sz = first_mbuff_skip;
- cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
-
- not_first_skip.u64 = 0;
- not_first_skip.s.skip_sz = not_first_mbuff_skip;
- cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
-
- size.u64 = 0;
- size.s.mb_size = mbuff_size;
- cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
-
- first_back_struct.u64 = 0;
- first_back_struct.s.back = first_back;
- cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
-
- second_back_struct.u64 = 0;
- second_back_struct.s.back = second_back;
- cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
-
- wqe_pool.u64 = 0;
- wqe_pool.s.wqe_pool = wqe_fpa_pool;
- cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
-
- ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_reg.s.opc_mode = cache_mode;
- ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
-
- /* Note: the example RED code that used to be here has been moved to
- cvmx_helper_setup_red */
-}
-
-/**
- * Enable IPD
- */
-static inline void cvmx_ipd_enable(void)
-{
- union cvmx_ipd_ctl_status ipd_reg;
- ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- if (ipd_reg.s.ipd_en) {
- cvmx_dprintf
- ("Warning: Enabling IPD when IPD already enabled.\n");
- }
- ipd_reg.s.ipd_en = 1;
-#if CVMX_ENABLE_LEN_M8_FIX
- if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
- ipd_reg.s.len_m8 = TRUE;
-#endif
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
-}
-
-/**
- * Disable IPD
- */
-static inline void cvmx_ipd_disable(void)
-{
- union cvmx_ipd_ctl_status ipd_reg;
- ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_reg.s.ipd_en = 0;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
-}
-
-/**
- * Supportive function for cvmx_fpa_shutdown_pool.
- */
-static inline void cvmx_ipd_free_ptr(void)
-{
- /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
- if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
- && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
- int no_wptr = 0;
- union cvmx_ipd_ptr_count ipd_ptr_count;
- ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
-
- /* Handle Work Queue Entry in cn56xx and cn52xx */
- if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
- union cvmx_ipd_ctl_status ipd_ctl_status;
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- if (ipd_ctl_status.s.no_wptr)
- no_wptr = 1;
- }
-
- /* Free the prefetched WQE */
- if (ipd_ptr_count.s.wqev_cnt) {
- union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
- ipd_wqe_ptr_valid.u64 =
- cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
- if (no_wptr)
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) ipd_wqe_ptr_valid.s.
- ptr << 7), CVMX_FPA_PACKET_POOL,
- 0);
- else
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) ipd_wqe_ptr_valid.s.
- ptr << 7), CVMX_FPA_WQE_POOL, 0);
- }
-
- /* Free all WQE in the fifo */
- if (ipd_ptr_count.s.wqe_pcnt) {
- int i;
- union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
- for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
- ipd_pwp_ptr_fifo_ctl.s.cena = 0;
- ipd_pwp_ptr_fifo_ctl.s.raddr =
- ipd_pwp_ptr_fifo_ctl.s.max_cnts +
- (ipd_pwp_ptr_fifo_ctl.s.wraddr +
- i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
- if (no_wptr)
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_pwp_ptr_fifo_ctl.s.
- ptr << 7),
- CVMX_FPA_PACKET_POOL, 0);
- else
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_pwp_ptr_fifo_ctl.s.
- ptr << 7),
- CVMX_FPA_WQE_POOL, 0);
- }
- ipd_pwp_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- }
-
- /* Free the prefetched packet */
- if (ipd_ptr_count.s.pktv_cnt) {
- union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
- ipd_pkt_ptr_valid.u64 =
- cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
- cvmx_fpa_free(cvmx_phys_to_ptr
- (ipd_pkt_ptr_valid.s.ptr << 7),
- CVMX_FPA_PACKET_POOL, 0);
- }
-
- /* Free the per port prefetched packets */
- if (1) {
- int i;
- union cvmx_ipd_prc_port_ptr_fifo_ctl
- ipd_prc_port_ptr_fifo_ctl;
- ipd_prc_port_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
-
- for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
- i++) {
- ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
- ipd_prc_port_ptr_fifo_ctl.s.raddr =
- i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
- cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
- ipd_prc_port_ptr_fifo_ctl.u64);
- ipd_prc_port_ptr_fifo_ctl.u64 =
- cvmx_read_csr
- (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_prc_port_ptr_fifo_ctl.s.
- ptr << 7), CVMX_FPA_PACKET_POOL,
- 0);
- }
- ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
- ipd_prc_port_ptr_fifo_ctl.u64);
- }
-
- /* Free all packets in the holding fifo */
- if (ipd_ptr_count.s.pfif_cnt) {
- int i;
- union cvmx_ipd_prc_hold_ptr_fifo_ctl
- ipd_prc_hold_ptr_fifo_ctl;
-
- ipd_prc_hold_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
-
- for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
- ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
- ipd_prc_hold_ptr_fifo_ctl.s.raddr =
- (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
- i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
- cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
- ipd_prc_hold_ptr_fifo_ctl.u64);
- ipd_prc_hold_ptr_fifo_ctl.u64 =
- cvmx_read_csr
- (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_prc_hold_ptr_fifo_ctl.s.
- ptr << 7), CVMX_FPA_PACKET_POOL,
- 0);
- }
- ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
- ipd_prc_hold_ptr_fifo_ctl.u64);
- }
-
- /* Free all packets in the fifo */
- if (ipd_ptr_count.s.pkt_pcnt) {
- int i;
- union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
-
- for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
- ipd_pwp_ptr_fifo_ctl.s.cena = 0;
- ipd_pwp_ptr_fifo_ctl.s.raddr =
- (ipd_pwp_ptr_fifo_ctl.s.praddr +
- i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) ipd_pwp_ptr_fifo_ctl.
- s.ptr << 7),
- CVMX_FPA_PACKET_POOL, 0);
- }
- ipd_pwp_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- }
-
- /* Reset the IPD to get all buffers out of it */
- {
- union cvmx_ipd_ctl_status ipd_ctl_status;
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_status.s.reset = 1;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
- }
-
- /* Reset the PIP */
- {
- union cvmx_pip_sft_rst pip_sft_rst;
- pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
- pip_sft_rst.s.rst = 1;
- cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
- }
- }
-}
-
-#endif /* __CVMX_IPD_H__ */
diff --git a/drivers/staging/octeon/cvmx-mdio.h b/drivers/staging/octeon/cvmx-mdio.h
deleted file mode 100644
index d88ab8d8e37d..000000000000
--- a/drivers/staging/octeon/cvmx-mdio.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
- * clause 22 and clause 45 operations.
- *
- */
-
-#ifndef __CVMX_MIO_H__
-#define __CVMX_MIO_H__
-
-#include "cvmx-smix-defs.h"
-
-/**
- * PHY register 0 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL 0
-typedef union {
- uint16_t u16;
- struct {
- uint16_t reset:1;
- uint16_t loopback:1;
- uint16_t speed_lsb:1;
- uint16_t autoneg_enable:1;
- uint16_t power_down:1;
- uint16_t isolate:1;
- uint16_t restart_autoneg:1;
- uint16_t duplex:1;
- uint16_t collision_test:1;
- uint16_t speed_msb:1;
- uint16_t unidirectional_enable:1;
- uint16_t reserved_0_4:5;
- } s;
-} cvmx_mdio_phy_reg_control_t;
-
-/**
- * PHY register 1 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS 1
-typedef union {
- uint16_t u16;
- struct {
- uint16_t capable_100base_t4:1;
- uint16_t capable_100base_x_full:1;
- uint16_t capable_100base_x_half:1;
- uint16_t capable_10_full:1;
- uint16_t capable_10_half:1;
- uint16_t capable_100base_t2_full:1;
- uint16_t capable_100base_t2_half:1;
- uint16_t capable_extended_status:1;
- uint16_t capable_unidirectional:1;
- uint16_t capable_mf_preamble_suppression:1;
- uint16_t autoneg_complete:1;
- uint16_t remote_fault:1;
- uint16_t capable_autoneg:1;
- uint16_t link_status:1;
- uint16_t jabber_detect:1;
- uint16_t capable_extended_registers:1;
-
- } s;
-} cvmx_mdio_phy_reg_status_t;
-
-/**
- * PHY register 2 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID1 2
-typedef union {
- uint16_t u16;
- struct {
- uint16_t oui_bits_3_18;
- } s;
-} cvmx_mdio_phy_reg_id1_t;
-
-/**
- * PHY register 3 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID2 3
-typedef union {
- uint16_t u16;
- struct {
- uint16_t oui_bits_19_24:6;
- uint16_t model:6;
- uint16_t revision:4;
- } s;
-} cvmx_mdio_phy_reg_id2_t;
-
-/**
- * PHY register 4 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
-typedef union {
- uint16_t u16;
- struct {
- uint16_t next_page:1;
- uint16_t reserved_14:1;
- uint16_t remote_fault:1;
- uint16_t reserved_12:1;
- uint16_t asymmetric_pause:1;
- uint16_t pause:1;
- uint16_t advert_100base_t4:1;
- uint16_t advert_100base_tx_full:1;
- uint16_t advert_100base_tx_half:1;
- uint16_t advert_10base_tx_full:1;
- uint16_t advert_10base_tx_half:1;
- uint16_t selector:5;
- } s;
-} cvmx_mdio_phy_reg_autoneg_adver_t;
-
-/**
- * PHY register 5 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
-typedef union {
- uint16_t u16;
- struct {
- uint16_t next_page:1;
- uint16_t ack:1;
- uint16_t remote_fault:1;
- uint16_t reserved_12:1;
- uint16_t asymmetric_pause:1;
- uint16_t pause:1;
- uint16_t advert_100base_t4:1;
- uint16_t advert_100base_tx_full:1;
- uint16_t advert_100base_tx_half:1;
- uint16_t advert_10base_tx_full:1;
- uint16_t advert_10base_tx_half:1;
- uint16_t selector:5;
- } s;
-} cvmx_mdio_phy_reg_link_partner_ability_t;
-
-/**
- * PHY register 6 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
-typedef union {
- uint16_t u16;
- struct {
- uint16_t reserved_5_15:11;
- uint16_t parallel_detection_fault:1;
- uint16_t link_partner_next_page_capable:1;
- uint16_t local_next_page_capable:1;
- uint16_t page_received:1;
- uint16_t link_partner_autoneg_capable:1;
-
- } s;
-} cvmx_mdio_phy_reg_autoneg_expansion_t;
-
-/**
- * PHY register 9 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
-typedef union {
- uint16_t u16;
- struct {
- uint16_t test_mode:3;
- uint16_t manual_master_slave:1;
- uint16_t master:1;
- uint16_t port_type:1;
- uint16_t advert_1000base_t_full:1;
- uint16_t advert_1000base_t_half:1;
- uint16_t reserved_0_7:8;
- } s;
-} cvmx_mdio_phy_reg_control_1000_t;
-
-/**
- * PHY register 10 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS_1000 10
-typedef union {
- uint16_t u16;
- struct {
- uint16_t master_slave_fault:1;
- uint16_t is_master:1;
- uint16_t local_receiver_ok:1;
- uint16_t remote_receiver_ok:1;
- uint16_t remote_capable_1000base_t_full:1;
- uint16_t remote_capable_1000base_t_half:1;
- uint16_t reserved_8_9:2;
- uint16_t idle_error_count:8;
- } s;
-} cvmx_mdio_phy_reg_status_1000_t;
-
-/**
- * PHY register 15 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
-typedef union {
- uint16_t u16;
- struct {
- uint16_t capable_1000base_x_full:1;
- uint16_t capable_1000base_x_half:1;
- uint16_t capable_1000base_t_full:1;
- uint16_t capable_1000base_t_half:1;
- uint16_t reserved_0_11:12;
- } s;
-} cvmx_mdio_phy_reg_extended_status_t;
-
-/**
- * PHY register 13 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
-typedef union {
- uint16_t u16;
- struct {
- uint16_t function:2;
- uint16_t reserved_5_13:9;
- uint16_t devad:5;
- } s;
-} cvmx_mdio_phy_reg_mmd_control_t;
-
-/**
- * PHY register 14 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
-typedef union {
- uint16_t u16;
- struct {
- uint16_t address_data:16;
- } s;
-} cvmx_mdio_phy_reg_mmd_address_data_t;
-
-/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE 0
-#define MDIO_CLAUSE_22_READ 1
-
-#define MDIO_CLAUSE_45_ADDRESS 0
-#define MDIO_CLAUSE_45_WRITE 1
-#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ 3
-
-/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD 1
-#define CVMX_MMD_DEVICE_WIS 2
-#define CVMX_MMD_DEVICE_PCS 3
-#define CVMX_MMD_DEVICE_PHY_XS 4
-#define CVMX_MMD_DEVICE_DTS_XS 5
-#define CVMX_MMD_DEVICE_TC 6
-#define CVMX_MMD_DEVICE_CL22_EXT 29
-#define CVMX_MMD_DEVICE_VENDOR_1 30
-#define CVMX_MMD_DEVICE_VENDOR_2 31
-
-/* Helper function to put MDIO interface into clause 45 mode */
-static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
-{
- union cvmx_smix_clk smi_clk;
- /* Put bus into clause 45 mode */
- smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
- smi_clk.s.mode = 1;
- smi_clk.s.preamble = 1;
- cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/* Helper function to put MDIO interface into clause 22 mode */
-static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
-{
- union cvmx_smix_clk smi_clk;
- /* Put bus into clause 22 mode */
- smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
- smi_clk.s.mode = 0;
- cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/**
- * Perform an MII read. This function is used to read PHY
- * registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- int timeout = 1000;
-
- if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- __cvmx_mdio_set_clause22_mode(bus_id);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
- } while (smi_rd.s.pending && timeout--);
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else
- return -1;
-}
-
-/**
- * Perform an MII write. This function is used to write PHY
- * registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @location: Register location to write
- * @val: Value to write
- *
- * Returns -1 on error
- * 0 on success
- */
-static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- __cvmx_mdio_set_clause22_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII read. This function is used to
- * read PHY registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @device: MDIO Managable Device (MMD) id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-
-static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
- int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- return -1;
-
- __cvmx_mdio_set_clause45_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = location;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0) {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(address)\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
- } while (smi_rd.s.pending && --timeout);
-
- if (timeout <= 0) {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(data)\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d INVALID READ\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII write. This function is used to
- * write PHY registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @device: MDIO Managable Device (MMD) id
- * @location: Register location to write
- * @val: Value to write
- *
- * Returns -1 on error
- * 0 on success
- */
-static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
- int location, int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- return -1;
-
- __cvmx_mdio_set_clause45_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = location;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- return 0;
-}
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-packet.h b/drivers/staging/octeon/cvmx-packet.h
deleted file mode 100644
index 62ffe78a8c81..000000000000
--- a/drivers/staging/octeon/cvmx-packet.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Packet buffer defines.
- */
-
-#ifndef __CVMX_PACKET_H__
-#define __CVMX_PACKET_H__
-
-/**
- * This structure defines a buffer pointer on Octeon
- */
-union cvmx_buf_ptr {
- void *ptr;
- uint64_t u64;
- struct {
- /*
- * if set, invert the "free" pick of the overall
- * packet. HW always sets this bit to 0 on inbound
- * packet
- */
- uint64_t i:1;
- /*
- * Indicates the amount to back up to get to the
- * buffer start in cache lines. In most cases this is
- * less than one complete cache line, so the value is
- * zero.
- */
- uint64_t back:4;
- /* The pool that the buffer came from / goes to */
- uint64_t pool:3;
- /* The size of the segment pointed to by addr (in bytes) */
- uint64_t size:16;
- /* Pointer to the first byte of the data, NOT buffer */
- uint64_t addr:40;
- } s;
-};
-
-#endif /* __CVMX_PACKET_H__ */
diff --git a/drivers/staging/octeon/cvmx-pcsx-defs.h b/drivers/staging/octeon/cvmx-pcsx-defs.h
deleted file mode 100644
index d45952df5f5b..000000000000
--- a/drivers/staging/octeon/cvmx-pcsx-defs.h
+++ /dev/null
@@ -1,370 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCSX_DEFS_H__
-#define __CVMX_PCSX_DEFS_H__
-
-#define CVMX_PCSX_ANX_ADV_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001010ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_ANX_EXT_ST_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001028ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_ANX_LP_ABIL_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001018ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_ANX_RESULTS_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001020ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_INTX_EN_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001088ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_INTX_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001080ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_LINKX_TIMER_COUNT_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001040ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_LOG_ANLX_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001090ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_MISCX_CTL_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001078ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_MRX_CONTROL_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001000ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_MRX_STATUS_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001008ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_RXX_STATES_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001058ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_RXX_SYNC_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001050ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_SGMX_AN_ADV_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001068ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_SGMX_LP_ADV_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001070ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_TXX_STATES_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001060ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_TX_RXX_POLARITY_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001048ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_pcsx_anx_adv_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_adv_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t np:1;
- uint64_t reserved_14_14:1;
- uint64_t rem_flt:2;
- uint64_t reserved_9_11:3;
- uint64_t pause:2;
- uint64_t hfd:1;
- uint64_t fd:1;
- uint64_t reserved_0_4:5;
- } s;
- struct cvmx_pcsx_anx_adv_reg_s cn52xx;
- struct cvmx_pcsx_anx_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_adv_reg_s cn56xx;
- struct cvmx_pcsx_anx_adv_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_anx_ext_st_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_ext_st_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t thou_xfd:1;
- uint64_t thou_xhd:1;
- uint64_t thou_tfd:1;
- uint64_t thou_thd:1;
- uint64_t reserved_0_11:12;
- } s;
- struct cvmx_pcsx_anx_ext_st_reg_s cn52xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_ext_st_reg_s cn56xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_anx_lp_abil_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_lp_abil_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t np:1;
- uint64_t ack:1;
- uint64_t rem_flt:2;
- uint64_t reserved_9_11:3;
- uint64_t pause:2;
- uint64_t hfd:1;
- uint64_t fd:1;
- uint64_t reserved_0_4:5;
- } s;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_anx_results_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_results_reg_s {
- uint64_t reserved_7_63:57;
- uint64_t pause:2;
- uint64_t spd:2;
- uint64_t an_cpt:1;
- uint64_t dup:1;
- uint64_t link_ok:1;
- } s;
- struct cvmx_pcsx_anx_results_reg_s cn52xx;
- struct cvmx_pcsx_anx_results_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_results_reg_s cn56xx;
- struct cvmx_pcsx_anx_results_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_intx_en_reg {
- uint64_t u64;
- struct cvmx_pcsx_intx_en_reg_s {
- uint64_t reserved_12_63:52;
- uint64_t dup:1;
- uint64_t sync_bad_en:1;
- uint64_t an_bad_en:1;
- uint64_t rxlock_en:1;
- uint64_t rxbad_en:1;
- uint64_t rxerr_en:1;
- uint64_t txbad_en:1;
- uint64_t txfifo_en:1;
- uint64_t txfifu_en:1;
- uint64_t an_err_en:1;
- uint64_t xmit_en:1;
- uint64_t lnkspd_en:1;
- } s;
- struct cvmx_pcsx_intx_en_reg_s cn52xx;
- struct cvmx_pcsx_intx_en_reg_s cn52xxp1;
- struct cvmx_pcsx_intx_en_reg_s cn56xx;
- struct cvmx_pcsx_intx_en_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_intx_reg {
- uint64_t u64;
- struct cvmx_pcsx_intx_reg_s {
- uint64_t reserved_12_63:52;
- uint64_t dup:1;
- uint64_t sync_bad:1;
- uint64_t an_bad:1;
- uint64_t rxlock:1;
- uint64_t rxbad:1;
- uint64_t rxerr:1;
- uint64_t txbad:1;
- uint64_t txfifo:1;
- uint64_t txfifu:1;
- uint64_t an_err:1;
- uint64_t xmit:1;
- uint64_t lnkspd:1;
- } s;
- struct cvmx_pcsx_intx_reg_s cn52xx;
- struct cvmx_pcsx_intx_reg_s cn52xxp1;
- struct cvmx_pcsx_intx_reg_s cn56xx;
- struct cvmx_pcsx_intx_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_linkx_timer_count_reg {
- uint64_t u64;
- struct cvmx_pcsx_linkx_timer_count_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t count:16;
- } s;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_log_anlx_reg {
- uint64_t u64;
- struct cvmx_pcsx_log_anlx_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t lafifovfl:1;
- uint64_t la_en:1;
- uint64_t pkt_sz:2;
- } s;
- struct cvmx_pcsx_log_anlx_reg_s cn52xx;
- struct cvmx_pcsx_log_anlx_reg_s cn52xxp1;
- struct cvmx_pcsx_log_anlx_reg_s cn56xx;
- struct cvmx_pcsx_log_anlx_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_miscx_ctl_reg {
- uint64_t u64;
- struct cvmx_pcsx_miscx_ctl_reg_s {
- uint64_t reserved_13_63:51;
- uint64_t sgmii:1;
- uint64_t gmxeno:1;
- uint64_t loopbck2:1;
- uint64_t mac_phy:1;
- uint64_t mode:1;
- uint64_t an_ovrd:1;
- uint64_t samp_pt:7;
- } s;
- struct cvmx_pcsx_miscx_ctl_reg_s cn52xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1;
- struct cvmx_pcsx_miscx_ctl_reg_s cn56xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_mrx_control_reg {
- uint64_t u64;
- struct cvmx_pcsx_mrx_control_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t reset:1;
- uint64_t loopbck1:1;
- uint64_t spdlsb:1;
- uint64_t an_en:1;
- uint64_t pwr_dn:1;
- uint64_t reserved_10_10:1;
- uint64_t rst_an:1;
- uint64_t dup:1;
- uint64_t coltst:1;
- uint64_t spdmsb:1;
- uint64_t uni:1;
- uint64_t reserved_0_4:5;
- } s;
- struct cvmx_pcsx_mrx_control_reg_s cn52xx;
- struct cvmx_pcsx_mrx_control_reg_s cn52xxp1;
- struct cvmx_pcsx_mrx_control_reg_s cn56xx;
- struct cvmx_pcsx_mrx_control_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_mrx_status_reg {
- uint64_t u64;
- struct cvmx_pcsx_mrx_status_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t hun_t4:1;
- uint64_t hun_xfd:1;
- uint64_t hun_xhd:1;
- uint64_t ten_fd:1;
- uint64_t ten_hd:1;
- uint64_t hun_t2fd:1;
- uint64_t hun_t2hd:1;
- uint64_t ext_st:1;
- uint64_t reserved_7_7:1;
- uint64_t prb_sup:1;
- uint64_t an_cpt:1;
- uint64_t rm_flt:1;
- uint64_t an_abil:1;
- uint64_t lnk_st:1;
- uint64_t reserved_1_1:1;
- uint64_t extnd:1;
- } s;
- struct cvmx_pcsx_mrx_status_reg_s cn52xx;
- struct cvmx_pcsx_mrx_status_reg_s cn52xxp1;
- struct cvmx_pcsx_mrx_status_reg_s cn56xx;
- struct cvmx_pcsx_mrx_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_rxx_states_reg {
- uint64_t u64;
- struct cvmx_pcsx_rxx_states_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t rx_bad:1;
- uint64_t rx_st:5;
- uint64_t sync_bad:1;
- uint64_t sync:4;
- uint64_t an_bad:1;
- uint64_t an_st:4;
- } s;
- struct cvmx_pcsx_rxx_states_reg_s cn52xx;
- struct cvmx_pcsx_rxx_states_reg_s cn52xxp1;
- struct cvmx_pcsx_rxx_states_reg_s cn56xx;
- struct cvmx_pcsx_rxx_states_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_rxx_sync_reg {
- uint64_t u64;
- struct cvmx_pcsx_rxx_sync_reg_s {
- uint64_t reserved_2_63:62;
- uint64_t sync:1;
- uint64_t bit_lock:1;
- } s;
- struct cvmx_pcsx_rxx_sync_reg_s cn52xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1;
- struct cvmx_pcsx_rxx_sync_reg_s cn56xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_sgmx_an_adv_reg {
- uint64_t u64;
- struct cvmx_pcsx_sgmx_an_adv_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t link:1;
- uint64_t ack:1;
- uint64_t reserved_13_13:1;
- uint64_t dup:1;
- uint64_t speed:2;
- uint64_t reserved_1_9:9;
- uint64_t one:1;
- } s;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_sgmx_lp_adv_reg {
- uint64_t u64;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t link:1;
- uint64_t reserved_13_14:2;
- uint64_t dup:1;
- uint64_t speed:2;
- uint64_t reserved_1_9:9;
- uint64_t one:1;
- } s;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_txx_states_reg {
- uint64_t u64;
- struct cvmx_pcsx_txx_states_reg_s {
- uint64_t reserved_7_63:57;
- uint64_t xmit:2;
- uint64_t tx_bad:1;
- uint64_t ord_st:4;
- } s;
- struct cvmx_pcsx_txx_states_reg_s cn52xx;
- struct cvmx_pcsx_txx_states_reg_s cn52xxp1;
- struct cvmx_pcsx_txx_states_reg_s cn56xx;
- struct cvmx_pcsx_txx_states_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_tx_rxx_polarity_reg {
- uint64_t u64;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t rxovrd:1;
- uint64_t autorxpl:1;
- uint64_t rxplrt:1;
- uint64_t txplrt:1;
- } s;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pcsxx-defs.h b/drivers/staging/octeon/cvmx-pcsxx-defs.h
deleted file mode 100644
index 55d120fe8aed..000000000000
--- a/drivers/staging/octeon/cvmx-pcsxx-defs.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCSXX_DEFS_H__
-#define __CVMX_PCSXX_DEFS_H__
-
-#define CVMX_PCSXX_10GBX_STATUS_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000828ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_BIST_STATUS_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000870ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_BIT_LOCK_STATUS_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000850ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_CONTROL1_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000800ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_CONTROL2_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000818ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_INT_EN_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000860ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000858ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_LOG_ANL_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000868ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_MISC_CTL_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000848ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_RX_SYNC_STATES_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000838ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_SPD_ABIL_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000810ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_STATUS1_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000808ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_STATUS2_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000820ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_TX_RX_POLARITY_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000840ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_TX_RX_STATES_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000830ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_pcsxx_10gbx_status_reg {
- uint64_t u64;
- struct cvmx_pcsxx_10gbx_status_reg_s {
- uint64_t reserved_13_63:51;
- uint64_t alignd:1;
- uint64_t pattst:1;
- uint64_t reserved_4_10:7;
- uint64_t l3sync:1;
- uint64_t l2sync:1;
- uint64_t l1sync:1;
- uint64_t l0sync:1;
- } s;
- struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_bist_status_reg {
- uint64_t u64;
- struct cvmx_pcsxx_bist_status_reg_s {
- uint64_t reserved_1_63:63;
- uint64_t bist_status:1;
- } s;
- struct cvmx_pcsxx_bist_status_reg_s cn52xx;
- struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_bist_status_reg_s cn56xx;
- struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_bit_lock_status_reg {
- uint64_t u64;
- struct cvmx_pcsxx_bit_lock_status_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t bitlck3:1;
- uint64_t bitlck2:1;
- uint64_t bitlck1:1;
- uint64_t bitlck0:1;
- } s;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_control1_reg {
- uint64_t u64;
- struct cvmx_pcsxx_control1_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t reset:1;
- uint64_t loopbck1:1;
- uint64_t spdsel1:1;
- uint64_t reserved_12_12:1;
- uint64_t lo_pwr:1;
- uint64_t reserved_7_10:4;
- uint64_t spdsel0:1;
- uint64_t spd:4;
- uint64_t reserved_0_1:2;
- } s;
- struct cvmx_pcsxx_control1_reg_s cn52xx;
- struct cvmx_pcsxx_control1_reg_s cn52xxp1;
- struct cvmx_pcsxx_control1_reg_s cn56xx;
- struct cvmx_pcsxx_control1_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_control2_reg {
- uint64_t u64;
- struct cvmx_pcsxx_control2_reg_s {
- uint64_t reserved_2_63:62;
- uint64_t type:2;
- } s;
- struct cvmx_pcsxx_control2_reg_s cn52xx;
- struct cvmx_pcsxx_control2_reg_s cn52xxp1;
- struct cvmx_pcsxx_control2_reg_s cn56xx;
- struct cvmx_pcsxx_control2_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_int_en_reg {
- uint64_t u64;
- struct cvmx_pcsxx_int_en_reg_s {
- uint64_t reserved_6_63:58;
- uint64_t algnlos_en:1;
- uint64_t synlos_en:1;
- uint64_t bitlckls_en:1;
- uint64_t rxsynbad_en:1;
- uint64_t rxbad_en:1;
- uint64_t txflt_en:1;
- } s;
- struct cvmx_pcsxx_int_en_reg_s cn52xx;
- struct cvmx_pcsxx_int_en_reg_s cn52xxp1;
- struct cvmx_pcsxx_int_en_reg_s cn56xx;
- struct cvmx_pcsxx_int_en_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_int_reg {
- uint64_t u64;
- struct cvmx_pcsxx_int_reg_s {
- uint64_t reserved_6_63:58;
- uint64_t algnlos:1;
- uint64_t synlos:1;
- uint64_t bitlckls:1;
- uint64_t rxsynbad:1;
- uint64_t rxbad:1;
- uint64_t txflt:1;
- } s;
- struct cvmx_pcsxx_int_reg_s cn52xx;
- struct cvmx_pcsxx_int_reg_s cn52xxp1;
- struct cvmx_pcsxx_int_reg_s cn56xx;
- struct cvmx_pcsxx_int_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_log_anl_reg {
- uint64_t u64;
- struct cvmx_pcsxx_log_anl_reg_s {
- uint64_t reserved_7_63:57;
- uint64_t enc_mode:1;
- uint64_t drop_ln:2;
- uint64_t lafifovfl:1;
- uint64_t la_en:1;
- uint64_t pkt_sz:2;
- } s;
- struct cvmx_pcsxx_log_anl_reg_s cn52xx;
- struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
- struct cvmx_pcsxx_log_anl_reg_s cn56xx;
- struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_misc_ctl_reg {
- uint64_t u64;
- struct cvmx_pcsxx_misc_ctl_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t tx_swap:1;
- uint64_t rx_swap:1;
- uint64_t xaui:1;
- uint64_t gmxeno:1;
- } s;
- struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
- struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_rx_sync_states_reg {
- uint64_t u64;
- struct cvmx_pcsxx_rx_sync_states_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t sync3st:4;
- uint64_t sync2st:4;
- uint64_t sync1st:4;
- uint64_t sync0st:4;
- } s;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_spd_abil_reg {
- uint64_t u64;
- struct cvmx_pcsxx_spd_abil_reg_s {
- uint64_t reserved_2_63:62;
- uint64_t tenpasst:1;
- uint64_t tengb:1;
- } s;
- struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
- struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_status1_reg {
- uint64_t u64;
- struct cvmx_pcsxx_status1_reg_s {
- uint64_t reserved_8_63:56;
- uint64_t flt:1;
- uint64_t reserved_3_6:4;
- uint64_t rcv_lnk:1;
- uint64_t lpable:1;
- uint64_t reserved_0_0:1;
- } s;
- struct cvmx_pcsxx_status1_reg_s cn52xx;
- struct cvmx_pcsxx_status1_reg_s cn52xxp1;
- struct cvmx_pcsxx_status1_reg_s cn56xx;
- struct cvmx_pcsxx_status1_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_status2_reg {
- uint64_t u64;
- struct cvmx_pcsxx_status2_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t dev:2;
- uint64_t reserved_12_13:2;
- uint64_t xmtflt:1;
- uint64_t rcvflt:1;
- uint64_t reserved_3_9:7;
- uint64_t tengb_w:1;
- uint64_t tengb_x:1;
- uint64_t tengb_r:1;
- } s;
- struct cvmx_pcsxx_status2_reg_s cn52xx;
- struct cvmx_pcsxx_status2_reg_s cn52xxp1;
- struct cvmx_pcsxx_status2_reg_s cn56xx;
- struct cvmx_pcsxx_status2_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_tx_rx_polarity_reg {
- uint64_t u64;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s {
- uint64_t reserved_10_63:54;
- uint64_t xor_rxplrt:4;
- uint64_t xor_txplrt:4;
- uint64_t rxplrt:1;
- uint64_t txplrt:1;
- } s;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
- uint64_t reserved_2_63:62;
- uint64_t rxplrt:1;
- uint64_t txplrt:1;
- } cn52xxp1;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
-};
-
-union cvmx_pcsxx_tx_rx_states_reg {
- uint64_t u64;
- struct cvmx_pcsxx_tx_rx_states_reg_s {
- uint64_t reserved_14_63:50;
- uint64_t term_err:1;
- uint64_t syn3bad:1;
- uint64_t syn2bad:1;
- uint64_t syn1bad:1;
- uint64_t syn0bad:1;
- uint64_t rxbad:1;
- uint64_t algn_st:3;
- uint64_t rx_st:2;
- uint64_t tx_st:3;
- } s;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
- struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
- uint64_t reserved_13_63:51;
- uint64_t syn3bad:1;
- uint64_t syn2bad:1;
- uint64_t syn1bad:1;
- uint64_t syn0bad:1;
- uint64_t rxbad:1;
- uint64_t algn_st:3;
- uint64_t rx_st:2;
- uint64_t tx_st:3;
- } cn52xxp1;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
- struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pip-defs.h b/drivers/staging/octeon/cvmx-pip-defs.h
deleted file mode 100644
index 5a369100ca68..000000000000
--- a/drivers/staging/octeon/cvmx-pip-defs.h
+++ /dev/null
@@ -1,1267 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PIP_DEFS_H__
-#define __CVMX_PIP_DEFS_H__
-
-/*
- * Enumeration representing the amount of packet processing
- * and validation performed by the input hardware.
- */
-enum cvmx_pip_port_parse_mode {
- /*
- * Packet input doesn't perform any processing of the input
- * packet.
- */
- CVMX_PIP_PORT_CFG_MODE_NONE = 0ull,
- /*
- * Full packet processing is performed with pointer starting
- * at the L2 (ethernet MAC) header.
- */
- CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
- /*
- * Input packets are assumed to be IP. Results from non IP
- * packets is undefined. Pointers reference the beginning of
- * the IP header.
- */
- CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull
-};
-
-#define CVMX_PIP_BCK_PRS \
- CVMX_ADD_IO_SEG(0x00011800A0000038ull)
-#define CVMX_PIP_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011800A0000000ull)
-#define CVMX_PIP_CRC_CTLX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000040ull + (((offset) & 1) * 8))
-#define CVMX_PIP_CRC_IVX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000050ull + (((offset) & 1) * 8))
-#define CVMX_PIP_DEC_IPSECX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000080ull + (((offset) & 3) * 8))
-#define CVMX_PIP_DSA_SRC_GRP \
- CVMX_ADD_IO_SEG(0x00011800A0000190ull)
-#define CVMX_PIP_DSA_VID_GRP \
- CVMX_ADD_IO_SEG(0x00011800A0000198ull)
-#define CVMX_PIP_FRM_LEN_CHKX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000180ull + (((offset) & 1) * 8))
-#define CVMX_PIP_GBL_CFG \
- CVMX_ADD_IO_SEG(0x00011800A0000028ull)
-#define CVMX_PIP_GBL_CTL \
- CVMX_ADD_IO_SEG(0x00011800A0000020ull)
-#define CVMX_PIP_HG_PRI_QOS \
- CVMX_ADD_IO_SEG(0x00011800A00001A0ull)
-#define CVMX_PIP_INT_EN \
- CVMX_ADD_IO_SEG(0x00011800A0000010ull)
-#define CVMX_PIP_INT_REG \
- CVMX_ADD_IO_SEG(0x00011800A0000008ull)
-#define CVMX_PIP_IP_OFFSET \
- CVMX_ADD_IO_SEG(0x00011800A0000060ull)
-#define CVMX_PIP_PRT_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000200ull + (((offset) & 63) * 8))
-#define CVMX_PIP_PRT_TAGX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000400ull + (((offset) & 63) * 8))
-#define CVMX_PIP_QOS_DIFFX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000600ull + (((offset) & 63) * 8))
-#define CVMX_PIP_QOS_VLANX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A00000C0ull + (((offset) & 7) * 8))
-#define CVMX_PIP_QOS_WATCHX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000100ull + (((offset) & 7) * 8))
-#define CVMX_PIP_RAW_WORD \
- CVMX_ADD_IO_SEG(0x00011800A00000B0ull)
-#define CVMX_PIP_SFT_RST \
- CVMX_ADD_IO_SEG(0x00011800A0000030ull)
-#define CVMX_PIP_STAT0_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000800ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT1_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000808ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT2_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000810ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT3_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000818ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT4_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000820ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT5_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000828ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT6_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000830ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT7_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000838ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT8_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000840ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT9_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000848ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT_CTL \
- CVMX_ADD_IO_SEG(0x00011800A0000018ull)
-#define CVMX_PIP_STAT_INB_ERRSX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001A10ull + (((offset) & 63) * 32))
-#define CVMX_PIP_STAT_INB_OCTSX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001A08ull + (((offset) & 63) * 32))
-#define CVMX_PIP_STAT_INB_PKTSX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001A00ull + (((offset) & 63) * 32))
-#define CVMX_PIP_TAG_INCX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001800ull + (((offset) & 63) * 8))
-#define CVMX_PIP_TAG_MASK \
- CVMX_ADD_IO_SEG(0x00011800A0000070ull)
-#define CVMX_PIP_TAG_SECRET \
- CVMX_ADD_IO_SEG(0x00011800A0000068ull)
-#define CVMX_PIP_TODO_ENTRY \
- CVMX_ADD_IO_SEG(0x00011800A0000078ull)
-
-union cvmx_pip_bck_prs {
- uint64_t u64;
- struct cvmx_pip_bck_prs_s {
- uint64_t bckprs:1;
- uint64_t reserved_13_62:50;
- uint64_t hiwater:5;
- uint64_t reserved_5_7:3;
- uint64_t lowater:5;
- } s;
- struct cvmx_pip_bck_prs_s cn38xx;
- struct cvmx_pip_bck_prs_s cn38xxp2;
- struct cvmx_pip_bck_prs_s cn56xx;
- struct cvmx_pip_bck_prs_s cn56xxp1;
- struct cvmx_pip_bck_prs_s cn58xx;
- struct cvmx_pip_bck_prs_s cn58xxp1;
-};
-
-union cvmx_pip_bist_status {
- uint64_t u64;
- struct cvmx_pip_bist_status_s {
- uint64_t reserved_18_63:46;
- uint64_t bist:18;
- } s;
- struct cvmx_pip_bist_status_s cn30xx;
- struct cvmx_pip_bist_status_s cn31xx;
- struct cvmx_pip_bist_status_s cn38xx;
- struct cvmx_pip_bist_status_s cn38xxp2;
- struct cvmx_pip_bist_status_cn50xx {
- uint64_t reserved_17_63:47;
- uint64_t bist:17;
- } cn50xx;
- struct cvmx_pip_bist_status_s cn52xx;
- struct cvmx_pip_bist_status_s cn52xxp1;
- struct cvmx_pip_bist_status_s cn56xx;
- struct cvmx_pip_bist_status_s cn56xxp1;
- struct cvmx_pip_bist_status_s cn58xx;
- struct cvmx_pip_bist_status_s cn58xxp1;
-};
-
-union cvmx_pip_crc_ctlx {
- uint64_t u64;
- struct cvmx_pip_crc_ctlx_s {
- uint64_t reserved_2_63:62;
- uint64_t invres:1;
- uint64_t reflect:1;
- } s;
- struct cvmx_pip_crc_ctlx_s cn38xx;
- struct cvmx_pip_crc_ctlx_s cn38xxp2;
- struct cvmx_pip_crc_ctlx_s cn58xx;
- struct cvmx_pip_crc_ctlx_s cn58xxp1;
-};
-
-union cvmx_pip_crc_ivx {
- uint64_t u64;
- struct cvmx_pip_crc_ivx_s {
- uint64_t reserved_32_63:32;
- uint64_t iv:32;
- } s;
- struct cvmx_pip_crc_ivx_s cn38xx;
- struct cvmx_pip_crc_ivx_s cn38xxp2;
- struct cvmx_pip_crc_ivx_s cn58xx;
- struct cvmx_pip_crc_ivx_s cn58xxp1;
-};
-
-union cvmx_pip_dec_ipsecx {
- uint64_t u64;
- struct cvmx_pip_dec_ipsecx_s {
- uint64_t reserved_18_63:46;
- uint64_t tcp:1;
- uint64_t udp:1;
- uint64_t dprt:16;
- } s;
- struct cvmx_pip_dec_ipsecx_s cn30xx;
- struct cvmx_pip_dec_ipsecx_s cn31xx;
- struct cvmx_pip_dec_ipsecx_s cn38xx;
- struct cvmx_pip_dec_ipsecx_s cn38xxp2;
- struct cvmx_pip_dec_ipsecx_s cn50xx;
- struct cvmx_pip_dec_ipsecx_s cn52xx;
- struct cvmx_pip_dec_ipsecx_s cn52xxp1;
- struct cvmx_pip_dec_ipsecx_s cn56xx;
- struct cvmx_pip_dec_ipsecx_s cn56xxp1;
- struct cvmx_pip_dec_ipsecx_s cn58xx;
- struct cvmx_pip_dec_ipsecx_s cn58xxp1;
-};
-
-union cvmx_pip_dsa_src_grp {
- uint64_t u64;
- struct cvmx_pip_dsa_src_grp_s {
- uint64_t map15:4;
- uint64_t map14:4;
- uint64_t map13:4;
- uint64_t map12:4;
- uint64_t map11:4;
- uint64_t map10:4;
- uint64_t map9:4;
- uint64_t map8:4;
- uint64_t map7:4;
- uint64_t map6:4;
- uint64_t map5:4;
- uint64_t map4:4;
- uint64_t map3:4;
- uint64_t map2:4;
- uint64_t map1:4;
- uint64_t map0:4;
- } s;
- struct cvmx_pip_dsa_src_grp_s cn52xx;
- struct cvmx_pip_dsa_src_grp_s cn52xxp1;
- struct cvmx_pip_dsa_src_grp_s cn56xx;
-};
-
-union cvmx_pip_dsa_vid_grp {
- uint64_t u64;
- struct cvmx_pip_dsa_vid_grp_s {
- uint64_t map15:4;
- uint64_t map14:4;
- uint64_t map13:4;
- uint64_t map12:4;
- uint64_t map11:4;
- uint64_t map10:4;
- uint64_t map9:4;
- uint64_t map8:4;
- uint64_t map7:4;
- uint64_t map6:4;
- uint64_t map5:4;
- uint64_t map4:4;
- uint64_t map3:4;
- uint64_t map2:4;
- uint64_t map1:4;
- uint64_t map0:4;
- } s;
- struct cvmx_pip_dsa_vid_grp_s cn52xx;
- struct cvmx_pip_dsa_vid_grp_s cn52xxp1;
- struct cvmx_pip_dsa_vid_grp_s cn56xx;
-};
-
-union cvmx_pip_frm_len_chkx {
- uint64_t u64;
- struct cvmx_pip_frm_len_chkx_s {
- uint64_t reserved_32_63:32;
- uint64_t maxlen:16;
- uint64_t minlen:16;
- } s;
- struct cvmx_pip_frm_len_chkx_s cn50xx;
- struct cvmx_pip_frm_len_chkx_s cn52xx;
- struct cvmx_pip_frm_len_chkx_s cn52xxp1;
- struct cvmx_pip_frm_len_chkx_s cn56xx;
- struct cvmx_pip_frm_len_chkx_s cn56xxp1;
-};
-
-union cvmx_pip_gbl_cfg {
- uint64_t u64;
- struct cvmx_pip_gbl_cfg_s {
- uint64_t reserved_19_63:45;
- uint64_t tag_syn:1;
- uint64_t ip6_udp:1;
- uint64_t max_l2:1;
- uint64_t reserved_11_15:5;
- uint64_t raw_shf:3;
- uint64_t reserved_3_7:5;
- uint64_t nip_shf:3;
- } s;
- struct cvmx_pip_gbl_cfg_s cn30xx;
- struct cvmx_pip_gbl_cfg_s cn31xx;
- struct cvmx_pip_gbl_cfg_s cn38xx;
- struct cvmx_pip_gbl_cfg_s cn38xxp2;
- struct cvmx_pip_gbl_cfg_s cn50xx;
- struct cvmx_pip_gbl_cfg_s cn52xx;
- struct cvmx_pip_gbl_cfg_s cn52xxp1;
- struct cvmx_pip_gbl_cfg_s cn56xx;
- struct cvmx_pip_gbl_cfg_s cn56xxp1;
- struct cvmx_pip_gbl_cfg_s cn58xx;
- struct cvmx_pip_gbl_cfg_s cn58xxp1;
-};
-
-union cvmx_pip_gbl_ctl {
- uint64_t u64;
- struct cvmx_pip_gbl_ctl_s {
- uint64_t reserved_27_63:37;
- uint64_t dsa_grp_tvid:1;
- uint64_t dsa_grp_scmd:1;
- uint64_t dsa_grp_sid:1;
- uint64_t reserved_21_23:3;
- uint64_t ring_en:1;
- uint64_t reserved_17_19:3;
- uint64_t ignrs:1;
- uint64_t vs_wqe:1;
- uint64_t vs_qos:1;
- uint64_t l2_mal:1;
- uint64_t tcp_flag:1;
- uint64_t l4_len:1;
- uint64_t l4_chk:1;
- uint64_t l4_prt:1;
- uint64_t l4_mal:1;
- uint64_t reserved_6_7:2;
- uint64_t ip6_eext:2;
- uint64_t ip4_opts:1;
- uint64_t ip_hop:1;
- uint64_t ip_mal:1;
- uint64_t ip_chk:1;
- } s;
- struct cvmx_pip_gbl_ctl_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t ignrs:1;
- uint64_t vs_wqe:1;
- uint64_t vs_qos:1;
- uint64_t l2_mal:1;
- uint64_t tcp_flag:1;
- uint64_t l4_len:1;
- uint64_t l4_chk:1;
- uint64_t l4_prt:1;
- uint64_t l4_mal:1;
- uint64_t reserved_6_7:2;
- uint64_t ip6_eext:2;
- uint64_t ip4_opts:1;
- uint64_t ip_hop:1;
- uint64_t ip_mal:1;
- uint64_t ip_chk:1;
- } cn30xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn31xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn38xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2;
- struct cvmx_pip_gbl_ctl_cn30xx cn50xx;
- struct cvmx_pip_gbl_ctl_s cn52xx;
- struct cvmx_pip_gbl_ctl_s cn52xxp1;
- struct cvmx_pip_gbl_ctl_s cn56xx;
- struct cvmx_pip_gbl_ctl_cn56xxp1 {
- uint64_t reserved_21_63:43;
- uint64_t ring_en:1;
- uint64_t reserved_17_19:3;
- uint64_t ignrs:1;
- uint64_t vs_wqe:1;
- uint64_t vs_qos:1;
- uint64_t l2_mal:1;
- uint64_t tcp_flag:1;
- uint64_t l4_len:1;
- uint64_t l4_chk:1;
- uint64_t l4_prt:1;
- uint64_t l4_mal:1;
- uint64_t reserved_6_7:2;
- uint64_t ip6_eext:2;
- uint64_t ip4_opts:1;
- uint64_t ip_hop:1;
- uint64_t ip_mal:1;
- uint64_t ip_chk:1;
- } cn56xxp1;
- struct cvmx_pip_gbl_ctl_cn30xx cn58xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_hg_pri_qos {
- uint64_t u64;
- struct cvmx_pip_hg_pri_qos_s {
- uint64_t reserved_11_63:53;
- uint64_t qos:3;
- uint64_t reserved_6_7:2;
- uint64_t pri:6;
- } s;
- struct cvmx_pip_hg_pri_qos_s cn52xx;
- struct cvmx_pip_hg_pri_qos_s cn52xxp1;
- struct cvmx_pip_hg_pri_qos_s cn56xx;
-};
-
-union cvmx_pip_int_en {
- uint64_t u64;
- struct cvmx_pip_int_en_s {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } s;
- struct cvmx_pip_int_en_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn30xx;
- struct cvmx_pip_int_en_cn30xx cn31xx;
- struct cvmx_pip_int_en_cn30xx cn38xx;
- struct cvmx_pip_int_en_cn30xx cn38xxp2;
- struct cvmx_pip_int_en_cn50xx {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn50xx;
- struct cvmx_pip_int_en_cn52xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn52xx;
- struct cvmx_pip_int_en_cn52xx cn52xxp1;
- struct cvmx_pip_int_en_s cn56xx;
- struct cvmx_pip_int_en_cn56xxp1 {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn56xxp1;
- struct cvmx_pip_int_en_cn58xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t reserved_9_11:3;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn58xx;
- struct cvmx_pip_int_en_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_int_reg {
- uint64_t u64;
- struct cvmx_pip_int_reg_s {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } s;
- struct cvmx_pip_int_reg_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn30xx;
- struct cvmx_pip_int_reg_cn30xx cn31xx;
- struct cvmx_pip_int_reg_cn30xx cn38xx;
- struct cvmx_pip_int_reg_cn30xx cn38xxp2;
- struct cvmx_pip_int_reg_cn50xx {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn50xx;
- struct cvmx_pip_int_reg_cn52xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn52xx;
- struct cvmx_pip_int_reg_cn52xx cn52xxp1;
- struct cvmx_pip_int_reg_s cn56xx;
- struct cvmx_pip_int_reg_cn56xxp1 {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn56xxp1;
- struct cvmx_pip_int_reg_cn58xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t reserved_9_11:3;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn58xx;
- struct cvmx_pip_int_reg_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_ip_offset {
- uint64_t u64;
- struct cvmx_pip_ip_offset_s {
- uint64_t reserved_3_63:61;
- uint64_t offset:3;
- } s;
- struct cvmx_pip_ip_offset_s cn30xx;
- struct cvmx_pip_ip_offset_s cn31xx;
- struct cvmx_pip_ip_offset_s cn38xx;
- struct cvmx_pip_ip_offset_s cn38xxp2;
- struct cvmx_pip_ip_offset_s cn50xx;
- struct cvmx_pip_ip_offset_s cn52xx;
- struct cvmx_pip_ip_offset_s cn52xxp1;
- struct cvmx_pip_ip_offset_s cn56xx;
- struct cvmx_pip_ip_offset_s cn56xxp1;
- struct cvmx_pip_ip_offset_s cn58xx;
- struct cvmx_pip_ip_offset_s cn58xxp1;
-};
-
-union cvmx_pip_prt_cfgx {
- uint64_t u64;
- struct cvmx_pip_prt_cfgx_s {
- uint64_t reserved_53_63:11;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t lenerr_en:1;
- uint64_t maxerr_en:1;
- uint64_t minerr_en:1;
- uint64_t grp_wat_47:4;
- uint64_t qos_wat_47:4;
- uint64_t reserved_37_39:3;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t hg_qos:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t qos_vsel:1;
- uint64_t qos_vod:1;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t higig_en:1;
- uint64_t dsa_en:1;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } s;
- struct cvmx_pip_prt_cfgx_cn30xx {
- uint64_t reserved_37_63:27;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_18_19:2;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_10_15:6;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn30xx;
- struct cvmx_pip_prt_cfgx_cn30xx cn31xx;
- struct cvmx_pip_prt_cfgx_cn38xx {
- uint64_t reserved_37_63:27;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_18_19:2;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t reserved_10_11:2;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn38xx;
- struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2;
- struct cvmx_pip_prt_cfgx_cn50xx {
- uint64_t reserved_53_63:11;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t lenerr_en:1;
- uint64_t maxerr_en:1;
- uint64_t minerr_en:1;
- uint64_t grp_wat_47:4;
- uint64_t qos_wat_47:4;
- uint64_t reserved_37_39:3;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_19_19:1;
- uint64_t qos_vod:1;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t reserved_10_11:2;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn50xx;
- struct cvmx_pip_prt_cfgx_s cn52xx;
- struct cvmx_pip_prt_cfgx_s cn52xxp1;
- struct cvmx_pip_prt_cfgx_s cn56xx;
- struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1;
- struct cvmx_pip_prt_cfgx_cn58xx {
- uint64_t reserved_37_63:27;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_19_19:1;
- uint64_t qos_vod:1;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t reserved_10_11:2;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn58xx;
- struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1;
-};
-
-union cvmx_pip_prt_tagx {
- uint64_t u64;
- struct cvmx_pip_prt_tagx_s {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t grptag_mskip:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } s;
- struct cvmx_pip_prt_tagx_cn30xx {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t reserved_30_30:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } cn30xx;
- struct cvmx_pip_prt_tagx_cn30xx cn31xx;
- struct cvmx_pip_prt_tagx_cn30xx cn38xx;
- struct cvmx_pip_prt_tagx_cn30xx cn38xxp2;
- struct cvmx_pip_prt_tagx_s cn50xx;
- struct cvmx_pip_prt_tagx_s cn52xx;
- struct cvmx_pip_prt_tagx_s cn52xxp1;
- struct cvmx_pip_prt_tagx_s cn56xx;
- struct cvmx_pip_prt_tagx_s cn56xxp1;
- struct cvmx_pip_prt_tagx_cn30xx cn58xx;
- struct cvmx_pip_prt_tagx_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_qos_diffx {
- uint64_t u64;
- struct cvmx_pip_qos_diffx_s {
- uint64_t reserved_3_63:61;
- uint64_t qos:3;
- } s;
- struct cvmx_pip_qos_diffx_s cn30xx;
- struct cvmx_pip_qos_diffx_s cn31xx;
- struct cvmx_pip_qos_diffx_s cn38xx;
- struct cvmx_pip_qos_diffx_s cn38xxp2;
- struct cvmx_pip_qos_diffx_s cn50xx;
- struct cvmx_pip_qos_diffx_s cn52xx;
- struct cvmx_pip_qos_diffx_s cn52xxp1;
- struct cvmx_pip_qos_diffx_s cn56xx;
- struct cvmx_pip_qos_diffx_s cn56xxp1;
- struct cvmx_pip_qos_diffx_s cn58xx;
- struct cvmx_pip_qos_diffx_s cn58xxp1;
-};
-
-union cvmx_pip_qos_vlanx {
- uint64_t u64;
- struct cvmx_pip_qos_vlanx_s {
- uint64_t reserved_7_63:57;
- uint64_t qos1:3;
- uint64_t reserved_3_3:1;
- uint64_t qos:3;
- } s;
- struct cvmx_pip_qos_vlanx_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t qos:3;
- } cn30xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn31xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn38xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2;
- struct cvmx_pip_qos_vlanx_cn30xx cn50xx;
- struct cvmx_pip_qos_vlanx_s cn52xx;
- struct cvmx_pip_qos_vlanx_s cn52xxp1;
- struct cvmx_pip_qos_vlanx_s cn56xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1;
- struct cvmx_pip_qos_vlanx_cn30xx cn58xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_qos_watchx {
- uint64_t u64;
- struct cvmx_pip_qos_watchx_s {
- uint64_t reserved_48_63:16;
- uint64_t mask:16;
- uint64_t reserved_28_31:4;
- uint64_t grp:4;
- uint64_t reserved_23_23:1;
- uint64_t qos:3;
- uint64_t reserved_19_19:1;
- uint64_t match_type:3;
- uint64_t match_value:16;
- } s;
- struct cvmx_pip_qos_watchx_cn30xx {
- uint64_t reserved_48_63:16;
- uint64_t mask:16;
- uint64_t reserved_28_31:4;
- uint64_t grp:4;
- uint64_t reserved_23_23:1;
- uint64_t qos:3;
- uint64_t reserved_18_19:2;
- uint64_t match_type:2;
- uint64_t match_value:16;
- } cn30xx;
- struct cvmx_pip_qos_watchx_cn30xx cn31xx;
- struct cvmx_pip_qos_watchx_cn30xx cn38xx;
- struct cvmx_pip_qos_watchx_cn30xx cn38xxp2;
- struct cvmx_pip_qos_watchx_s cn50xx;
- struct cvmx_pip_qos_watchx_s cn52xx;
- struct cvmx_pip_qos_watchx_s cn52xxp1;
- struct cvmx_pip_qos_watchx_s cn56xx;
- struct cvmx_pip_qos_watchx_s cn56xxp1;
- struct cvmx_pip_qos_watchx_cn30xx cn58xx;
- struct cvmx_pip_qos_watchx_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_raw_word {
- uint64_t u64;
- struct cvmx_pip_raw_word_s {
- uint64_t reserved_56_63:8;
- uint64_t word:56;
- } s;
- struct cvmx_pip_raw_word_s cn30xx;
- struct cvmx_pip_raw_word_s cn31xx;
- struct cvmx_pip_raw_word_s cn38xx;
- struct cvmx_pip_raw_word_s cn38xxp2;
- struct cvmx_pip_raw_word_s cn50xx;
- struct cvmx_pip_raw_word_s cn52xx;
- struct cvmx_pip_raw_word_s cn52xxp1;
- struct cvmx_pip_raw_word_s cn56xx;
- struct cvmx_pip_raw_word_s cn56xxp1;
- struct cvmx_pip_raw_word_s cn58xx;
- struct cvmx_pip_raw_word_s cn58xxp1;
-};
-
-union cvmx_pip_sft_rst {
- uint64_t u64;
- struct cvmx_pip_sft_rst_s {
- uint64_t reserved_1_63:63;
- uint64_t rst:1;
- } s;
- struct cvmx_pip_sft_rst_s cn30xx;
- struct cvmx_pip_sft_rst_s cn31xx;
- struct cvmx_pip_sft_rst_s cn38xx;
- struct cvmx_pip_sft_rst_s cn50xx;
- struct cvmx_pip_sft_rst_s cn52xx;
- struct cvmx_pip_sft_rst_s cn52xxp1;
- struct cvmx_pip_sft_rst_s cn56xx;
- struct cvmx_pip_sft_rst_s cn56xxp1;
- struct cvmx_pip_sft_rst_s cn58xx;
- struct cvmx_pip_sft_rst_s cn58xxp1;
-};
-
-union cvmx_pip_stat0_prtx {
- uint64_t u64;
- struct cvmx_pip_stat0_prtx_s {
- uint64_t drp_pkts:32;
- uint64_t drp_octs:32;
- } s;
- struct cvmx_pip_stat0_prtx_s cn30xx;
- struct cvmx_pip_stat0_prtx_s cn31xx;
- struct cvmx_pip_stat0_prtx_s cn38xx;
- struct cvmx_pip_stat0_prtx_s cn38xxp2;
- struct cvmx_pip_stat0_prtx_s cn50xx;
- struct cvmx_pip_stat0_prtx_s cn52xx;
- struct cvmx_pip_stat0_prtx_s cn52xxp1;
- struct cvmx_pip_stat0_prtx_s cn56xx;
- struct cvmx_pip_stat0_prtx_s cn56xxp1;
- struct cvmx_pip_stat0_prtx_s cn58xx;
- struct cvmx_pip_stat0_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat1_prtx {
- uint64_t u64;
- struct cvmx_pip_stat1_prtx_s {
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
- } s;
- struct cvmx_pip_stat1_prtx_s cn30xx;
- struct cvmx_pip_stat1_prtx_s cn31xx;
- struct cvmx_pip_stat1_prtx_s cn38xx;
- struct cvmx_pip_stat1_prtx_s cn38xxp2;
- struct cvmx_pip_stat1_prtx_s cn50xx;
- struct cvmx_pip_stat1_prtx_s cn52xx;
- struct cvmx_pip_stat1_prtx_s cn52xxp1;
- struct cvmx_pip_stat1_prtx_s cn56xx;
- struct cvmx_pip_stat1_prtx_s cn56xxp1;
- struct cvmx_pip_stat1_prtx_s cn58xx;
- struct cvmx_pip_stat1_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat2_prtx {
- uint64_t u64;
- struct cvmx_pip_stat2_prtx_s {
- uint64_t pkts:32;
- uint64_t raw:32;
- } s;
- struct cvmx_pip_stat2_prtx_s cn30xx;
- struct cvmx_pip_stat2_prtx_s cn31xx;
- struct cvmx_pip_stat2_prtx_s cn38xx;
- struct cvmx_pip_stat2_prtx_s cn38xxp2;
- struct cvmx_pip_stat2_prtx_s cn50xx;
- struct cvmx_pip_stat2_prtx_s cn52xx;
- struct cvmx_pip_stat2_prtx_s cn52xxp1;
- struct cvmx_pip_stat2_prtx_s cn56xx;
- struct cvmx_pip_stat2_prtx_s cn56xxp1;
- struct cvmx_pip_stat2_prtx_s cn58xx;
- struct cvmx_pip_stat2_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat3_prtx {
- uint64_t u64;
- struct cvmx_pip_stat3_prtx_s {
- uint64_t bcst:32;
- uint64_t mcst:32;
- } s;
- struct cvmx_pip_stat3_prtx_s cn30xx;
- struct cvmx_pip_stat3_prtx_s cn31xx;
- struct cvmx_pip_stat3_prtx_s cn38xx;
- struct cvmx_pip_stat3_prtx_s cn38xxp2;
- struct cvmx_pip_stat3_prtx_s cn50xx;
- struct cvmx_pip_stat3_prtx_s cn52xx;
- struct cvmx_pip_stat3_prtx_s cn52xxp1;
- struct cvmx_pip_stat3_prtx_s cn56xx;
- struct cvmx_pip_stat3_prtx_s cn56xxp1;
- struct cvmx_pip_stat3_prtx_s cn58xx;
- struct cvmx_pip_stat3_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat4_prtx {
- uint64_t u64;
- struct cvmx_pip_stat4_prtx_s {
- uint64_t h65to127:32;
- uint64_t h64:32;
- } s;
- struct cvmx_pip_stat4_prtx_s cn30xx;
- struct cvmx_pip_stat4_prtx_s cn31xx;
- struct cvmx_pip_stat4_prtx_s cn38xx;
- struct cvmx_pip_stat4_prtx_s cn38xxp2;
- struct cvmx_pip_stat4_prtx_s cn50xx;
- struct cvmx_pip_stat4_prtx_s cn52xx;
- struct cvmx_pip_stat4_prtx_s cn52xxp1;
- struct cvmx_pip_stat4_prtx_s cn56xx;
- struct cvmx_pip_stat4_prtx_s cn56xxp1;
- struct cvmx_pip_stat4_prtx_s cn58xx;
- struct cvmx_pip_stat4_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat5_prtx {
- uint64_t u64;
- struct cvmx_pip_stat5_prtx_s {
- uint64_t h256to511:32;
- uint64_t h128to255:32;
- } s;
- struct cvmx_pip_stat5_prtx_s cn30xx;
- struct cvmx_pip_stat5_prtx_s cn31xx;
- struct cvmx_pip_stat5_prtx_s cn38xx;
- struct cvmx_pip_stat5_prtx_s cn38xxp2;
- struct cvmx_pip_stat5_prtx_s cn50xx;
- struct cvmx_pip_stat5_prtx_s cn52xx;
- struct cvmx_pip_stat5_prtx_s cn52xxp1;
- struct cvmx_pip_stat5_prtx_s cn56xx;
- struct cvmx_pip_stat5_prtx_s cn56xxp1;
- struct cvmx_pip_stat5_prtx_s cn58xx;
- struct cvmx_pip_stat5_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat6_prtx {
- uint64_t u64;
- struct cvmx_pip_stat6_prtx_s {
- uint64_t h1024to1518:32;
- uint64_t h512to1023:32;
- } s;
- struct cvmx_pip_stat6_prtx_s cn30xx;
- struct cvmx_pip_stat6_prtx_s cn31xx;
- struct cvmx_pip_stat6_prtx_s cn38xx;
- struct cvmx_pip_stat6_prtx_s cn38xxp2;
- struct cvmx_pip_stat6_prtx_s cn50xx;
- struct cvmx_pip_stat6_prtx_s cn52xx;
- struct cvmx_pip_stat6_prtx_s cn52xxp1;
- struct cvmx_pip_stat6_prtx_s cn56xx;
- struct cvmx_pip_stat6_prtx_s cn56xxp1;
- struct cvmx_pip_stat6_prtx_s cn58xx;
- struct cvmx_pip_stat6_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat7_prtx {
- uint64_t u64;
- struct cvmx_pip_stat7_prtx_s {
- uint64_t fcs:32;
- uint64_t h1519:32;
- } s;
- struct cvmx_pip_stat7_prtx_s cn30xx;
- struct cvmx_pip_stat7_prtx_s cn31xx;
- struct cvmx_pip_stat7_prtx_s cn38xx;
- struct cvmx_pip_stat7_prtx_s cn38xxp2;
- struct cvmx_pip_stat7_prtx_s cn50xx;
- struct cvmx_pip_stat7_prtx_s cn52xx;
- struct cvmx_pip_stat7_prtx_s cn52xxp1;
- struct cvmx_pip_stat7_prtx_s cn56xx;
- struct cvmx_pip_stat7_prtx_s cn56xxp1;
- struct cvmx_pip_stat7_prtx_s cn58xx;
- struct cvmx_pip_stat7_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat8_prtx {
- uint64_t u64;
- struct cvmx_pip_stat8_prtx_s {
- uint64_t frag:32;
- uint64_t undersz:32;
- } s;
- struct cvmx_pip_stat8_prtx_s cn30xx;
- struct cvmx_pip_stat8_prtx_s cn31xx;
- struct cvmx_pip_stat8_prtx_s cn38xx;
- struct cvmx_pip_stat8_prtx_s cn38xxp2;
- struct cvmx_pip_stat8_prtx_s cn50xx;
- struct cvmx_pip_stat8_prtx_s cn52xx;
- struct cvmx_pip_stat8_prtx_s cn52xxp1;
- struct cvmx_pip_stat8_prtx_s cn56xx;
- struct cvmx_pip_stat8_prtx_s cn56xxp1;
- struct cvmx_pip_stat8_prtx_s cn58xx;
- struct cvmx_pip_stat8_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat9_prtx {
- uint64_t u64;
- struct cvmx_pip_stat9_prtx_s {
- uint64_t jabber:32;
- uint64_t oversz:32;
- } s;
- struct cvmx_pip_stat9_prtx_s cn30xx;
- struct cvmx_pip_stat9_prtx_s cn31xx;
- struct cvmx_pip_stat9_prtx_s cn38xx;
- struct cvmx_pip_stat9_prtx_s cn38xxp2;
- struct cvmx_pip_stat9_prtx_s cn50xx;
- struct cvmx_pip_stat9_prtx_s cn52xx;
- struct cvmx_pip_stat9_prtx_s cn52xxp1;
- struct cvmx_pip_stat9_prtx_s cn56xx;
- struct cvmx_pip_stat9_prtx_s cn56xxp1;
- struct cvmx_pip_stat9_prtx_s cn58xx;
- struct cvmx_pip_stat9_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat_ctl {
- uint64_t u64;
- struct cvmx_pip_stat_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t rdclr:1;
- } s;
- struct cvmx_pip_stat_ctl_s cn30xx;
- struct cvmx_pip_stat_ctl_s cn31xx;
- struct cvmx_pip_stat_ctl_s cn38xx;
- struct cvmx_pip_stat_ctl_s cn38xxp2;
- struct cvmx_pip_stat_ctl_s cn50xx;
- struct cvmx_pip_stat_ctl_s cn52xx;
- struct cvmx_pip_stat_ctl_s cn52xxp1;
- struct cvmx_pip_stat_ctl_s cn56xx;
- struct cvmx_pip_stat_ctl_s cn56xxp1;
- struct cvmx_pip_stat_ctl_s cn58xx;
- struct cvmx_pip_stat_ctl_s cn58xxp1;
-};
-
-union cvmx_pip_stat_inb_errsx {
- uint64_t u64;
- struct cvmx_pip_stat_inb_errsx_s {
- uint64_t reserved_16_63:48;
- uint64_t errs:16;
- } s;
- struct cvmx_pip_stat_inb_errsx_s cn30xx;
- struct cvmx_pip_stat_inb_errsx_s cn31xx;
- struct cvmx_pip_stat_inb_errsx_s cn38xx;
- struct cvmx_pip_stat_inb_errsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_errsx_s cn50xx;
- struct cvmx_pip_stat_inb_errsx_s cn52xx;
- struct cvmx_pip_stat_inb_errsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn56xx;
- struct cvmx_pip_stat_inb_errsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn58xx;
- struct cvmx_pip_stat_inb_errsx_s cn58xxp1;
-};
-
-union cvmx_pip_stat_inb_octsx {
- uint64_t u64;
- struct cvmx_pip_stat_inb_octsx_s {
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
- } s;
- struct cvmx_pip_stat_inb_octsx_s cn30xx;
- struct cvmx_pip_stat_inb_octsx_s cn31xx;
- struct cvmx_pip_stat_inb_octsx_s cn38xx;
- struct cvmx_pip_stat_inb_octsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_octsx_s cn50xx;
- struct cvmx_pip_stat_inb_octsx_s cn52xx;
- struct cvmx_pip_stat_inb_octsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn56xx;
- struct cvmx_pip_stat_inb_octsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn58xx;
- struct cvmx_pip_stat_inb_octsx_s cn58xxp1;
-};
-
-union cvmx_pip_stat_inb_pktsx {
- uint64_t u64;
- struct cvmx_pip_stat_inb_pktsx_s {
- uint64_t reserved_32_63:32;
- uint64_t pkts:32;
- } s;
- struct cvmx_pip_stat_inb_pktsx_s cn30xx;
- struct cvmx_pip_stat_inb_pktsx_s cn31xx;
- struct cvmx_pip_stat_inb_pktsx_s cn38xx;
- struct cvmx_pip_stat_inb_pktsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_pktsx_s cn50xx;
- struct cvmx_pip_stat_inb_pktsx_s cn52xx;
- struct cvmx_pip_stat_inb_pktsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn56xx;
- struct cvmx_pip_stat_inb_pktsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn58xx;
- struct cvmx_pip_stat_inb_pktsx_s cn58xxp1;
-};
-
-union cvmx_pip_tag_incx {
- uint64_t u64;
- struct cvmx_pip_tag_incx_s {
- uint64_t reserved_8_63:56;
- uint64_t en:8;
- } s;
- struct cvmx_pip_tag_incx_s cn30xx;
- struct cvmx_pip_tag_incx_s cn31xx;
- struct cvmx_pip_tag_incx_s cn38xx;
- struct cvmx_pip_tag_incx_s cn38xxp2;
- struct cvmx_pip_tag_incx_s cn50xx;
- struct cvmx_pip_tag_incx_s cn52xx;
- struct cvmx_pip_tag_incx_s cn52xxp1;
- struct cvmx_pip_tag_incx_s cn56xx;
- struct cvmx_pip_tag_incx_s cn56xxp1;
- struct cvmx_pip_tag_incx_s cn58xx;
- struct cvmx_pip_tag_incx_s cn58xxp1;
-};
-
-union cvmx_pip_tag_mask {
- uint64_t u64;
- struct cvmx_pip_tag_mask_s {
- uint64_t reserved_16_63:48;
- uint64_t mask:16;
- } s;
- struct cvmx_pip_tag_mask_s cn30xx;
- struct cvmx_pip_tag_mask_s cn31xx;
- struct cvmx_pip_tag_mask_s cn38xx;
- struct cvmx_pip_tag_mask_s cn38xxp2;
- struct cvmx_pip_tag_mask_s cn50xx;
- struct cvmx_pip_tag_mask_s cn52xx;
- struct cvmx_pip_tag_mask_s cn52xxp1;
- struct cvmx_pip_tag_mask_s cn56xx;
- struct cvmx_pip_tag_mask_s cn56xxp1;
- struct cvmx_pip_tag_mask_s cn58xx;
- struct cvmx_pip_tag_mask_s cn58xxp1;
-};
-
-union cvmx_pip_tag_secret {
- uint64_t u64;
- struct cvmx_pip_tag_secret_s {
- uint64_t reserved_32_63:32;
- uint64_t dst:16;
- uint64_t src:16;
- } s;
- struct cvmx_pip_tag_secret_s cn30xx;
- struct cvmx_pip_tag_secret_s cn31xx;
- struct cvmx_pip_tag_secret_s cn38xx;
- struct cvmx_pip_tag_secret_s cn38xxp2;
- struct cvmx_pip_tag_secret_s cn50xx;
- struct cvmx_pip_tag_secret_s cn52xx;
- struct cvmx_pip_tag_secret_s cn52xxp1;
- struct cvmx_pip_tag_secret_s cn56xx;
- struct cvmx_pip_tag_secret_s cn56xxp1;
- struct cvmx_pip_tag_secret_s cn58xx;
- struct cvmx_pip_tag_secret_s cn58xxp1;
-};
-
-union cvmx_pip_todo_entry {
- uint64_t u64;
- struct cvmx_pip_todo_entry_s {
- uint64_t val:1;
- uint64_t reserved_62_62:1;
- uint64_t entry:62;
- } s;
- struct cvmx_pip_todo_entry_s cn30xx;
- struct cvmx_pip_todo_entry_s cn31xx;
- struct cvmx_pip_todo_entry_s cn38xx;
- struct cvmx_pip_todo_entry_s cn38xxp2;
- struct cvmx_pip_todo_entry_s cn50xx;
- struct cvmx_pip_todo_entry_s cn52xx;
- struct cvmx_pip_todo_entry_s cn52xxp1;
- struct cvmx_pip_todo_entry_s cn56xx;
- struct cvmx_pip_todo_entry_s cn56xxp1;
- struct cvmx_pip_todo_entry_s cn58xx;
- struct cvmx_pip_todo_entry_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pip.h b/drivers/staging/octeon/cvmx-pip.h
deleted file mode 100644
index 78dbce8f2c5e..000000000000
--- a/drivers/staging/octeon/cvmx-pip.h
+++ /dev/null
@@ -1,524 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Interface to the hardware Packet Input Processing unit.
- *
- */
-
-#ifndef __CVMX_PIP_H__
-#define __CVMX_PIP_H__
-
-#include "cvmx-wqe.h"
-#include "cvmx-fpa.h"
-#include "cvmx-pip-defs.h"
-
-#define CVMX_PIP_NUM_INPUT_PORTS 40
-#define CVMX_PIP_NUM_WATCHERS 4
-
-/*
- * Encodes the different error and exception codes
- */
-typedef enum {
- CVMX_PIP_L4_NO_ERR = 0ull,
- /*
- * 1 = TCP (UDP) packet not long enough to cover TCP (UDP)
- * header
- */
- CVMX_PIP_L4_MAL_ERR = 1ull,
- /* 2 = TCP/UDP checksum failure */
- CVMX_PIP_CHK_ERR = 2ull,
- /*
- * 3 = TCP/UDP length check (TCP/UDP length does not match IP
- * length).
- */
- CVMX_PIP_L4_LENGTH_ERR = 3ull,
- /* 4 = illegal TCP/UDP port (either source or dest port is zero) */
- CVMX_PIP_BAD_PRT_ERR = 4ull,
- /* 8 = TCP flags = FIN only */
- CVMX_PIP_TCP_FLG8_ERR = 8ull,
- /* 9 = TCP flags = 0 */
- CVMX_PIP_TCP_FLG9_ERR = 9ull,
- /* 10 = TCP flags = FIN+RST+* */
- CVMX_PIP_TCP_FLG10_ERR = 10ull,
- /* 11 = TCP flags = SYN+URG+* */
- CVMX_PIP_TCP_FLG11_ERR = 11ull,
- /* 12 = TCP flags = SYN+RST+* */
- CVMX_PIP_TCP_FLG12_ERR = 12ull,
- /* 13 = TCP flags = SYN+FIN+* */
- CVMX_PIP_TCP_FLG13_ERR = 13ull
-} cvmx_pip_l4_err_t;
-
-typedef enum {
-
- CVMX_PIP_IP_NO_ERR = 0ull,
- /* 1 = not IPv4 or IPv6 */
- CVMX_PIP_NOT_IP = 1ull,
- /* 2 = IPv4 header checksum violation */
- CVMX_PIP_IPV4_HDR_CHK = 2ull,
- /* 3 = malformed (packet not long enough to cover IP hdr) */
- CVMX_PIP_IP_MAL_HDR = 3ull,
- /* 4 = malformed (packet not long enough to cover len in IP hdr) */
- CVMX_PIP_IP_MAL_PKT = 4ull,
- /* 5 = TTL / hop count equal zero */
- CVMX_PIP_TTL_HOP = 5ull,
- /* 6 = IPv4 options / IPv6 early extension headers */
- CVMX_PIP_OPTS = 6ull
-} cvmx_pip_ip_exc_t;
-
-/**
- * NOTES
- * late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as bad FCS
- * or carrier extend error which is CVMX_PIP_EXTEND_ERR
- */
-typedef enum {
- /* No error */
- CVMX_PIP_RX_NO_ERR = 0ull,
- /* RGM+SPI 1 = partially received packet (buffering/bandwidth
- * not adequate) */
- CVMX_PIP_PARTIAL_ERR = 1ull,
- /* RGM+SPI 2 = receive packet too large and truncated */
- CVMX_PIP_JABBER_ERR = 2ull,
- /*
- * RGM 3 = max frame error (pkt len > max frame len) (with FCS
- * error)
- */
- CVMX_PIP_OVER_FCS_ERR = 3ull,
- /* RGM+SPI 4 = max frame error (pkt len > max frame len) */
- CVMX_PIP_OVER_ERR = 4ull,
- /*
- * RGM 5 = nibble error (data not byte multiple - 100M and 10M
- * only)
- */
- CVMX_PIP_ALIGN_ERR = 5ull,
- /*
- * RGM 6 = min frame error (pkt len < min frame len) (with FCS
- * error)
- */
- CVMX_PIP_UNDER_FCS_ERR = 6ull,
- /* RGM 7 = FCS error */
- CVMX_PIP_GMX_FCS_ERR = 7ull,
- /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
- CVMX_PIP_UNDER_ERR = 8ull,
- /* RGM 9 = Frame carrier extend error */
- CVMX_PIP_EXTEND_ERR = 9ull,
- /*
- * RGM 10 = length mismatch (len did not match len in L2
- * length/type)
- */
- CVMX_PIP_LENGTH_ERR = 10ull,
- /* RGM 11 = Frame error (some or all data bits marked err) */
- CVMX_PIP_DAT_ERR = 11ull,
- /* SPI 11 = DIP4 error */
- CVMX_PIP_DIP_ERR = 11ull,
- /*
- * RGM 12 = packet was not large enough to pass the skipper -
- * no inspection could occur.
- */
- CVMX_PIP_SKIP_ERR = 12ull,
- /*
- * RGM 13 = studder error (data not repeated - 100M and 10M
- * only)
- */
- CVMX_PIP_NIBBLE_ERR = 13ull,
- /* RGM+SPI 16 = FCS error */
- CVMX_PIP_PIP_FCS = 16L,
- /*
- * RGM+SPI+PCI 17 = packet was not large enough to pass the
- * skipper - no inspection could occur.
- */
- CVMX_PIP_PIP_SKIP_ERR = 17L,
- /*
- * RGM+SPI+PCI 18 = malformed l2 (packet not long enough to
- * cover L2 hdr).
- */
- CVMX_PIP_PIP_L2_MAL_HDR = 18L
- /*
- * NOTES: xx = late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as
- * bad FCS or carrier extend error which is
- * CVMX_PIP_EXTEND_ERR
- */
-} cvmx_pip_rcv_err_t;
-
-/**
- * This defines the err_code field errors in the work Q entry
- */
-typedef union {
- cvmx_pip_l4_err_t l4_err;
- cvmx_pip_ip_exc_t ip_exc;
- cvmx_pip_rcv_err_t rcv_err;
-} cvmx_pip_err_t;
-
-/**
- * Status statistics for a port
- */
-typedef struct {
- /* Inbound octets marked to be dropped by the IPD */
- uint32_t dropped_octets;
- /* Inbound packets marked to be dropped by the IPD */
- uint32_t dropped_packets;
- /* RAW PCI Packets received by PIP per port */
- uint32_t pci_raw_packets;
- /* Number of octets processed by PIP */
- uint32_t octets;
- /* Number of packets processed by PIP */
- uint32_t packets;
- /*
- * Number of indentified L2 multicast packets. Does not
- * include broadcast packets. Only includes packets whose
- * parse mode is SKIP_TO_L2
- */
- uint32_t multicast_packets;
- /*
- * Number of indentified L2 broadcast packets. Does not
- * include multicast packets. Only includes packets whose
- * parse mode is SKIP_TO_L2
- */
- uint32_t broadcast_packets;
- /* Number of 64B packets */
- uint32_t len_64_packets;
- /* Number of 65-127B packets */
- uint32_t len_65_127_packets;
- /* Number of 128-255B packets */
- uint32_t len_128_255_packets;
- /* Number of 256-511B packets */
- uint32_t len_256_511_packets;
- /* Number of 512-1023B packets */
- uint32_t len_512_1023_packets;
- /* Number of 1024-1518B packets */
- uint32_t len_1024_1518_packets;
- /* Number of 1519-max packets */
- uint32_t len_1519_max_packets;
- /* Number of packets with FCS or Align opcode errors */
- uint32_t fcs_align_err_packets;
- /* Number of packets with length < min */
- uint32_t runt_packets;
- /* Number of packets with length < min and FCS error */
- uint32_t runt_crc_packets;
- /* Number of packets with length > max */
- uint32_t oversize_packets;
- /* Number of packets with length > max and FCS error */
- uint32_t oversize_crc_packets;
- /* Number of packets without GMX/SPX/PCI errors received by PIP */
- uint32_t inb_packets;
- /*
- * Total number of octets from all packets received by PIP,
- * including CRC
- */
- uint64_t inb_octets;
- /* Number of packets with GMX/SPX/PCI errors received by PIP */
- uint16_t inb_errors;
-} cvmx_pip_port_status_t;
-
-/**
- * Definition of the PIP custom header that can be prepended
- * to a packet by external hardware.
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * Documented as R - Set if the Packet is RAWFULL. If
- * set, this header must be the full 8 bytes.
- */
- uint64_t rawfull:1;
- /* Must be zero */
- uint64_t reserved0:5;
- /* PIP parse mode for this packet */
- uint64_t parse_mode:2;
- /* Must be zero */
- uint64_t reserved1:1;
- /*
- * Skip amount, including this header, to the
- * beginning of the packet
- */
- uint64_t skip_len:7;
- /* Must be zero */
- uint64_t reserved2:6;
- /* POW input queue for this packet */
- uint64_t qos:3;
- /* POW input group for this packet */
- uint64_t grp:4;
- /*
- * Flag to store this packet in the work queue entry,
- * if possible
- */
- uint64_t rs:1;
- /* POW input tag type */
- uint64_t tag_type:2;
- /* POW input tag */
- uint64_t tag:32;
- } s;
-} cvmx_pip_pkt_inst_hdr_t;
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Configure an ethernet input port
- *
- * @port_num: Port number to configure
- * @port_cfg: Port hardware configuration
- * @port_tag_cfg:
- * Port POW tagging configuration
- */
-static inline void cvmx_pip_config_port(uint64_t port_num,
- union cvmx_pip_prt_cfgx port_cfg,
- union cvmx_pip_prt_tagx port_tag_cfg)
-{
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
- cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
-}
-#if 0
-/**
- * @deprecated This function is a thin wrapper around the Pass1 version
- * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
- * setting the group that is incompatible with this function,
- * the preferred upgrade path is to use the CSR directly.
- *
- * Configure the global QoS packet watchers. Each watcher is
- * capable of matching a field in a packet to determine the
- * QoS queue for scheduling.
- *
- * @watcher: Watcher number to configure (0 - 3).
- * @match_type: Watcher match type
- * @match_value:
- * Value the watcher will match against
- * @qos: QoS queue for packets matching this watcher
- */
-static inline void cvmx_pip_config_watcher(uint64_t watcher,
- cvmx_pip_qos_watch_types match_type,
- uint64_t match_value, uint64_t qos)
-{
- cvmx_pip_port_watcher_cfg_t watcher_config;
-
- watcher_config.u64 = 0;
- watcher_config.s.match_type = match_type;
- watcher_config.s.match_value = match_value;
- watcher_config.s.qos = qos;
-
- cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64);
-}
-#endif
-/**
- * Configure the VLAN priority to QoS queue mapping.
- *
- * @vlan_priority:
- * VLAN priority (0-7)
- * @qos: QoS queue for packets matching this watcher
- */
-static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
- uint64_t qos)
-{
- union cvmx_pip_qos_vlanx pip_qos_vlanx;
- pip_qos_vlanx.u64 = 0;
- pip_qos_vlanx.s.qos = qos;
- cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
-}
-
-/**
- * Configure the Diffserv to QoS queue mapping.
- *
- * @diffserv: Diffserv field value (0-63)
- * @qos: QoS queue for packets matching this watcher
- */
-static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
-{
- union cvmx_pip_qos_diffx pip_qos_diffx;
- pip_qos_diffx.u64 = 0;
- pip_qos_diffx.s.qos = qos;
- cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
-}
-
-/**
- * Get the status counters for a port.
- *
- * @port_num: Port number to get statistics for.
- * @clear: Set to 1 to clear the counters after they are read
- * @status: Where to put the results.
- */
-static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pip_port_status_t *status)
-{
- union cvmx_pip_stat_ctl pip_stat_ctl;
- union cvmx_pip_stat0_prtx stat0;
- union cvmx_pip_stat1_prtx stat1;
- union cvmx_pip_stat2_prtx stat2;
- union cvmx_pip_stat3_prtx stat3;
- union cvmx_pip_stat4_prtx stat4;
- union cvmx_pip_stat5_prtx stat5;
- union cvmx_pip_stat6_prtx stat6;
- union cvmx_pip_stat7_prtx stat7;
- union cvmx_pip_stat8_prtx stat8;
- union cvmx_pip_stat9_prtx stat9;
- union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx;
- union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx;
- union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx;
-
- pip_stat_ctl.u64 = 0;
- pip_stat_ctl.s.rdclr = clear;
- cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
-
- stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
- stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
- stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
- stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
- stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
- stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
- stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
- stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
- stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
- stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
- pip_stat_inb_pktsx.u64 =
- cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
- pip_stat_inb_octsx.u64 =
- cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
- pip_stat_inb_errsx.u64 =
- cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
-
- status->dropped_octets = stat0.s.drp_octs;
- status->dropped_packets = stat0.s.drp_pkts;
- status->octets = stat1.s.octs;
- status->pci_raw_packets = stat2.s.raw;
- status->packets = stat2.s.pkts;
- status->multicast_packets = stat3.s.mcst;
- status->broadcast_packets = stat3.s.bcst;
- status->len_64_packets = stat4.s.h64;
- status->len_65_127_packets = stat4.s.h65to127;
- status->len_128_255_packets = stat5.s.h128to255;
- status->len_256_511_packets = stat5.s.h256to511;
- status->len_512_1023_packets = stat6.s.h512to1023;
- status->len_1024_1518_packets = stat6.s.h1024to1518;
- status->len_1519_max_packets = stat7.s.h1519;
- status->fcs_align_err_packets = stat7.s.fcs;
- status->runt_packets = stat8.s.undersz;
- status->runt_crc_packets = stat8.s.frag;
- status->oversize_packets = stat9.s.oversz;
- status->oversize_crc_packets = stat9.s.jabber;
- status->inb_packets = pip_stat_inb_pktsx.s.pkts;
- status->inb_octets = pip_stat_inb_octsx.s.octs;
- status->inb_errors = pip_stat_inb_errsx.s.errs;
-
- if (cvmx_octeon_is_pass1()) {
- /*
- * Kludge to fix Octeon Pass 1 errata - Drop counts
- * don't work.
- */
- if (status->inb_packets > status->packets)
- status->dropped_packets =
- status->inb_packets - status->packets;
- else
- status->dropped_packets = 0;
- if (status->inb_octets - status->inb_packets * 4 >
- status->octets)
- status->dropped_octets =
- status->inb_octets - status->inb_packets * 4 -
- status->octets;
- else
- status->dropped_octets = 0;
- }
-}
-
-/**
- * Configure the hardware CRC engine
- *
- * @interface: Interface to configure (0 or 1)
- * @invert_result:
- * Invert the result of the CRC
- * @reflect: Reflect
- * @initialization_vector:
- * CRC initialization vector
- */
-static inline void cvmx_pip_config_crc(uint64_t interface,
- uint64_t invert_result, uint64_t reflect,
- uint32_t initialization_vector)
-{
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- union cvmx_pip_crc_ctlx config;
- union cvmx_pip_crc_ivx pip_crc_ivx;
-
- config.u64 = 0;
- config.s.invres = invert_result;
- config.s.reflect = reflect;
- cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
-
- pip_crc_ivx.u64 = 0;
- pip_crc_ivx.s.iv = initialization_vector;
- cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
- }
-}
-
-/**
- * Clear all bits in a tag mask. This should be called on
- * startup before any calls to cvmx_pip_tag_mask_set. Each bit
- * set in the final mask represent a byte used in the packet for
- * tag generation.
- *
- * @mask_index: Which tag mask to clear (0..3)
- */
-static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
-{
- uint64_t index;
- union cvmx_pip_tag_incx pip_tag_incx;
- pip_tag_incx.u64 = 0;
- pip_tag_incx.s.en = 0;
- for (index = mask_index * 16; index < (mask_index + 1) * 16; index++)
- cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
-}
-
-/**
- * Sets a range of bits in the tag mask. The tag mask is used
- * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
- * There are four separate masks that can be configured.
- *
- * @mask_index: Which tag mask to modify (0..3)
- * @offset: Offset into the bitmask to set bits at. Use the GCC macro
- * offsetof() to determine the offsets into packet headers.
- * For example, offsetof(ethhdr, protocol) returns the offset
- * of the ethernet protocol field. The bitmask selects which
- * bytes to include the the tag, with bit offset X selecting
- * byte at offset X from the beginning of the packet data.
- * @len: Number of bytes to include. Usually this is the sizeof()
- * the field.
- */
-static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
- uint64_t len)
-{
- while (len--) {
- union cvmx_pip_tag_incx pip_tag_incx;
- uint64_t index = mask_index * 16 + offset / 8;
- pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
- pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
- cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
- offset++;
- }
-}
-
-#endif /* __CVMX_PIP_H__ */
diff --git a/drivers/staging/octeon/cvmx-pko-defs.h b/drivers/staging/octeon/cvmx-pko-defs.h
deleted file mode 100644
index 50e779cf1ad8..000000000000
--- a/drivers/staging/octeon/cvmx-pko-defs.h
+++ /dev/null
@@ -1,1133 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PKO_DEFS_H__
-#define __CVMX_PKO_DEFS_H__
-
-#define CVMX_PKO_MEM_COUNT0 \
- CVMX_ADD_IO_SEG(0x0001180050001080ull)
-#define CVMX_PKO_MEM_COUNT1 \
- CVMX_ADD_IO_SEG(0x0001180050001088ull)
-#define CVMX_PKO_MEM_DEBUG0 \
- CVMX_ADD_IO_SEG(0x0001180050001100ull)
-#define CVMX_PKO_MEM_DEBUG1 \
- CVMX_ADD_IO_SEG(0x0001180050001108ull)
-#define CVMX_PKO_MEM_DEBUG10 \
- CVMX_ADD_IO_SEG(0x0001180050001150ull)
-#define CVMX_PKO_MEM_DEBUG11 \
- CVMX_ADD_IO_SEG(0x0001180050001158ull)
-#define CVMX_PKO_MEM_DEBUG12 \
- CVMX_ADD_IO_SEG(0x0001180050001160ull)
-#define CVMX_PKO_MEM_DEBUG13 \
- CVMX_ADD_IO_SEG(0x0001180050001168ull)
-#define CVMX_PKO_MEM_DEBUG14 \
- CVMX_ADD_IO_SEG(0x0001180050001170ull)
-#define CVMX_PKO_MEM_DEBUG2 \
- CVMX_ADD_IO_SEG(0x0001180050001110ull)
-#define CVMX_PKO_MEM_DEBUG3 \
- CVMX_ADD_IO_SEG(0x0001180050001118ull)
-#define CVMX_PKO_MEM_DEBUG4 \
- CVMX_ADD_IO_SEG(0x0001180050001120ull)
-#define CVMX_PKO_MEM_DEBUG5 \
- CVMX_ADD_IO_SEG(0x0001180050001128ull)
-#define CVMX_PKO_MEM_DEBUG6 \
- CVMX_ADD_IO_SEG(0x0001180050001130ull)
-#define CVMX_PKO_MEM_DEBUG7 \
- CVMX_ADD_IO_SEG(0x0001180050001138ull)
-#define CVMX_PKO_MEM_DEBUG8 \
- CVMX_ADD_IO_SEG(0x0001180050001140ull)
-#define CVMX_PKO_MEM_DEBUG9 \
- CVMX_ADD_IO_SEG(0x0001180050001148ull)
-#define CVMX_PKO_MEM_PORT_PTRS \
- CVMX_ADD_IO_SEG(0x0001180050001010ull)
-#define CVMX_PKO_MEM_PORT_QOS \
- CVMX_ADD_IO_SEG(0x0001180050001018ull)
-#define CVMX_PKO_MEM_PORT_RATE0 \
- CVMX_ADD_IO_SEG(0x0001180050001020ull)
-#define CVMX_PKO_MEM_PORT_RATE1 \
- CVMX_ADD_IO_SEG(0x0001180050001028ull)
-#define CVMX_PKO_MEM_QUEUE_PTRS \
- CVMX_ADD_IO_SEG(0x0001180050001000ull)
-#define CVMX_PKO_MEM_QUEUE_QOS \
- CVMX_ADD_IO_SEG(0x0001180050001008ull)
-#define CVMX_PKO_REG_BIST_RESULT \
- CVMX_ADD_IO_SEG(0x0001180050000080ull)
-#define CVMX_PKO_REG_CMD_BUF \
- CVMX_ADD_IO_SEG(0x0001180050000010ull)
-#define CVMX_PKO_REG_CRC_CTLX(offset) \
- CVMX_ADD_IO_SEG(0x0001180050000028ull + (((offset) & 1) * 8))
-#define CVMX_PKO_REG_CRC_ENABLE \
- CVMX_ADD_IO_SEG(0x0001180050000020ull)
-#define CVMX_PKO_REG_CRC_IVX(offset) \
- CVMX_ADD_IO_SEG(0x0001180050000038ull + (((offset) & 1) * 8))
-#define CVMX_PKO_REG_DEBUG0 \
- CVMX_ADD_IO_SEG(0x0001180050000098ull)
-#define CVMX_PKO_REG_DEBUG1 \
- CVMX_ADD_IO_SEG(0x00011800500000A0ull)
-#define CVMX_PKO_REG_DEBUG2 \
- CVMX_ADD_IO_SEG(0x00011800500000A8ull)
-#define CVMX_PKO_REG_DEBUG3 \
- CVMX_ADD_IO_SEG(0x00011800500000B0ull)
-#define CVMX_PKO_REG_ENGINE_INFLIGHT \
- CVMX_ADD_IO_SEG(0x0001180050000050ull)
-#define CVMX_PKO_REG_ENGINE_THRESH \
- CVMX_ADD_IO_SEG(0x0001180050000058ull)
-#define CVMX_PKO_REG_ERROR \
- CVMX_ADD_IO_SEG(0x0001180050000088ull)
-#define CVMX_PKO_REG_FLAGS \
- CVMX_ADD_IO_SEG(0x0001180050000000ull)
-#define CVMX_PKO_REG_GMX_PORT_MODE \
- CVMX_ADD_IO_SEG(0x0001180050000018ull)
-#define CVMX_PKO_REG_INT_MASK \
- CVMX_ADD_IO_SEG(0x0001180050000090ull)
-#define CVMX_PKO_REG_QUEUE_MODE \
- CVMX_ADD_IO_SEG(0x0001180050000048ull)
-#define CVMX_PKO_REG_QUEUE_PTRS1 \
- CVMX_ADD_IO_SEG(0x0001180050000100ull)
-#define CVMX_PKO_REG_READ_IDX \
- CVMX_ADD_IO_SEG(0x0001180050000008ull)
-
-union cvmx_pko_mem_count0 {
- uint64_t u64;
- struct cvmx_pko_mem_count0_s {
- uint64_t reserved_32_63:32;
- uint64_t count:32;
- } s;
- struct cvmx_pko_mem_count0_s cn30xx;
- struct cvmx_pko_mem_count0_s cn31xx;
- struct cvmx_pko_mem_count0_s cn38xx;
- struct cvmx_pko_mem_count0_s cn38xxp2;
- struct cvmx_pko_mem_count0_s cn50xx;
- struct cvmx_pko_mem_count0_s cn52xx;
- struct cvmx_pko_mem_count0_s cn52xxp1;
- struct cvmx_pko_mem_count0_s cn56xx;
- struct cvmx_pko_mem_count0_s cn56xxp1;
- struct cvmx_pko_mem_count0_s cn58xx;
- struct cvmx_pko_mem_count0_s cn58xxp1;
-};
-
-union cvmx_pko_mem_count1 {
- uint64_t u64;
- struct cvmx_pko_mem_count1_s {
- uint64_t reserved_48_63:16;
- uint64_t count:48;
- } s;
- struct cvmx_pko_mem_count1_s cn30xx;
- struct cvmx_pko_mem_count1_s cn31xx;
- struct cvmx_pko_mem_count1_s cn38xx;
- struct cvmx_pko_mem_count1_s cn38xxp2;
- struct cvmx_pko_mem_count1_s cn50xx;
- struct cvmx_pko_mem_count1_s cn52xx;
- struct cvmx_pko_mem_count1_s cn52xxp1;
- struct cvmx_pko_mem_count1_s cn56xx;
- struct cvmx_pko_mem_count1_s cn56xxp1;
- struct cvmx_pko_mem_count1_s cn58xx;
- struct cvmx_pko_mem_count1_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug0 {
- uint64_t u64;
- struct cvmx_pko_mem_debug0_s {
- uint64_t fau:28;
- uint64_t cmd:14;
- uint64_t segs:6;
- uint64_t size:16;
- } s;
- struct cvmx_pko_mem_debug0_s cn30xx;
- struct cvmx_pko_mem_debug0_s cn31xx;
- struct cvmx_pko_mem_debug0_s cn38xx;
- struct cvmx_pko_mem_debug0_s cn38xxp2;
- struct cvmx_pko_mem_debug0_s cn50xx;
- struct cvmx_pko_mem_debug0_s cn52xx;
- struct cvmx_pko_mem_debug0_s cn52xxp1;
- struct cvmx_pko_mem_debug0_s cn56xx;
- struct cvmx_pko_mem_debug0_s cn56xxp1;
- struct cvmx_pko_mem_debug0_s cn58xx;
- struct cvmx_pko_mem_debug0_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug1 {
- uint64_t u64;
- struct cvmx_pko_mem_debug1_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } s;
- struct cvmx_pko_mem_debug1_s cn30xx;
- struct cvmx_pko_mem_debug1_s cn31xx;
- struct cvmx_pko_mem_debug1_s cn38xx;
- struct cvmx_pko_mem_debug1_s cn38xxp2;
- struct cvmx_pko_mem_debug1_s cn50xx;
- struct cvmx_pko_mem_debug1_s cn52xx;
- struct cvmx_pko_mem_debug1_s cn52xxp1;
- struct cvmx_pko_mem_debug1_s cn56xx;
- struct cvmx_pko_mem_debug1_s cn56xxp1;
- struct cvmx_pko_mem_debug1_s cn58xx;
- struct cvmx_pko_mem_debug1_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug10 {
- uint64_t u64;
- struct cvmx_pko_mem_debug10_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug10_cn30xx {
- uint64_t fau:28;
- uint64_t cmd:14;
- uint64_t segs:6;
- uint64_t size:16;
- } cn30xx;
- struct cvmx_pko_mem_debug10_cn30xx cn31xx;
- struct cvmx_pko_mem_debug10_cn30xx cn38xx;
- struct cvmx_pko_mem_debug10_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug10_cn50xx {
- uint64_t reserved_49_63:15;
- uint64_t ptrs1:17;
- uint64_t reserved_17_31:15;
- uint64_t ptrs2:17;
- } cn50xx;
- struct cvmx_pko_mem_debug10_cn50xx cn52xx;
- struct cvmx_pko_mem_debug10_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn56xx;
- struct cvmx_pko_mem_debug10_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn58xx;
- struct cvmx_pko_mem_debug10_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug11 {
- uint64_t u64;
- struct cvmx_pko_mem_debug11_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t reserved_0_39:40;
- } s;
- struct cvmx_pko_mem_debug11_cn30xx {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } cn30xx;
- struct cvmx_pko_mem_debug11_cn30xx cn31xx;
- struct cvmx_pko_mem_debug11_cn30xx cn38xx;
- struct cvmx_pko_mem_debug11_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug11_cn50xx {
- uint64_t reserved_23_63:41;
- uint64_t maj:1;
- uint64_t uid:3;
- uint64_t sop:1;
- uint64_t len:1;
- uint64_t chk:1;
- uint64_t cnt:13;
- uint64_t mod:3;
- } cn50xx;
- struct cvmx_pko_mem_debug11_cn50xx cn52xx;
- struct cvmx_pko_mem_debug11_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn56xx;
- struct cvmx_pko_mem_debug11_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn58xx;
- struct cvmx_pko_mem_debug11_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug12 {
- uint64_t u64;
- struct cvmx_pko_mem_debug12_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug12_cn30xx {
- uint64_t data:64;
- } cn30xx;
- struct cvmx_pko_mem_debug12_cn30xx cn31xx;
- struct cvmx_pko_mem_debug12_cn30xx cn38xx;
- struct cvmx_pko_mem_debug12_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug12_cn50xx {
- uint64_t fau:28;
- uint64_t cmd:14;
- uint64_t segs:6;
- uint64_t size:16;
- } cn50xx;
- struct cvmx_pko_mem_debug12_cn50xx cn52xx;
- struct cvmx_pko_mem_debug12_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn56xx;
- struct cvmx_pko_mem_debug12_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn58xx;
- struct cvmx_pko_mem_debug12_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug13 {
- uint64_t u64;
- struct cvmx_pko_mem_debug13_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t reserved_0_55:56;
- } s;
- struct cvmx_pko_mem_debug13_cn30xx {
- uint64_t reserved_51_63:13;
- uint64_t widx:17;
- uint64_t ridx2:17;
- uint64_t widx2:17;
- } cn30xx;
- struct cvmx_pko_mem_debug13_cn30xx cn31xx;
- struct cvmx_pko_mem_debug13_cn30xx cn38xx;
- struct cvmx_pko_mem_debug13_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug13_cn50xx {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } cn50xx;
- struct cvmx_pko_mem_debug13_cn50xx cn52xx;
- struct cvmx_pko_mem_debug13_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn56xx;
- struct cvmx_pko_mem_debug13_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn58xx;
- struct cvmx_pko_mem_debug13_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug14 {
- uint64_t u64;
- struct cvmx_pko_mem_debug14_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug14_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t ridx:17;
- } cn30xx;
- struct cvmx_pko_mem_debug14_cn30xx cn31xx;
- struct cvmx_pko_mem_debug14_cn30xx cn38xx;
- struct cvmx_pko_mem_debug14_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug14_cn52xx {
- uint64_t data:64;
- } cn52xx;
- struct cvmx_pko_mem_debug14_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug14_cn52xx cn56xx;
- struct cvmx_pko_mem_debug14_cn52xx cn56xxp1;
-};
-
-union cvmx_pko_mem_debug2 {
- uint64_t u64;
- struct cvmx_pko_mem_debug2_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } s;
- struct cvmx_pko_mem_debug2_s cn30xx;
- struct cvmx_pko_mem_debug2_s cn31xx;
- struct cvmx_pko_mem_debug2_s cn38xx;
- struct cvmx_pko_mem_debug2_s cn38xxp2;
- struct cvmx_pko_mem_debug2_s cn50xx;
- struct cvmx_pko_mem_debug2_s cn52xx;
- struct cvmx_pko_mem_debug2_s cn52xxp1;
- struct cvmx_pko_mem_debug2_s cn56xx;
- struct cvmx_pko_mem_debug2_s cn56xxp1;
- struct cvmx_pko_mem_debug2_s cn58xx;
- struct cvmx_pko_mem_debug2_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug3 {
- uint64_t u64;
- struct cvmx_pko_mem_debug3_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug3_cn30xx {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } cn30xx;
- struct cvmx_pko_mem_debug3_cn30xx cn31xx;
- struct cvmx_pko_mem_debug3_cn30xx cn38xx;
- struct cvmx_pko_mem_debug3_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug3_cn50xx {
- uint64_t data:64;
- } cn50xx;
- struct cvmx_pko_mem_debug3_cn50xx cn52xx;
- struct cvmx_pko_mem_debug3_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn56xx;
- struct cvmx_pko_mem_debug3_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn58xx;
- struct cvmx_pko_mem_debug3_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug4 {
- uint64_t u64;
- struct cvmx_pko_mem_debug4_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug4_cn30xx {
- uint64_t data:64;
- } cn30xx;
- struct cvmx_pko_mem_debug4_cn30xx cn31xx;
- struct cvmx_pko_mem_debug4_cn30xx cn38xx;
- struct cvmx_pko_mem_debug4_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug4_cn50xx {
- uint64_t cmnd_segs:3;
- uint64_t cmnd_siz:16;
- uint64_t cmnd_off:6;
- uint64_t uid:3;
- uint64_t dread_sop:1;
- uint64_t init_dwrite:1;
- uint64_t chk_once:1;
- uint64_t chk_mode:1;
- uint64_t active:1;
- uint64_t static_p:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_off_max:4;
- uint64_t qid_off:4;
- uint64_t qid_base:8;
- uint64_t wait:1;
- uint64_t minor:2;
- uint64_t major:3;
- } cn50xx;
- struct cvmx_pko_mem_debug4_cn52xx {
- uint64_t curr_siz:8;
- uint64_t curr_off:16;
- uint64_t cmnd_segs:6;
- uint64_t cmnd_siz:16;
- uint64_t cmnd_off:6;
- uint64_t uid:2;
- uint64_t dread_sop:1;
- uint64_t init_dwrite:1;
- uint64_t chk_once:1;
- uint64_t chk_mode:1;
- uint64_t wait:1;
- uint64_t minor:2;
- uint64_t major:3;
- } cn52xx;
- struct cvmx_pko_mem_debug4_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug4_cn52xx cn56xx;
- struct cvmx_pko_mem_debug4_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug4_cn50xx cn58xx;
- struct cvmx_pko_mem_debug4_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug5 {
- uint64_t u64;
- struct cvmx_pko_mem_debug5_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug5_cn30xx {
- uint64_t dwri_mod:1;
- uint64_t dwri_sop:1;
- uint64_t dwri_len:1;
- uint64_t dwri_cnt:13;
- uint64_t cmnd_siz:16;
- uint64_t uid:1;
- uint64_t xfer_wor:1;
- uint64_t xfer_dwr:1;
- uint64_t cbuf_fre:1;
- uint64_t reserved_27_27:1;
- uint64_t chk_mode:1;
- uint64_t active:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_off:3;
- uint64_t qid_base:7;
- uint64_t wait:1;
- uint64_t minor:2;
- uint64_t major:4;
- } cn30xx;
- struct cvmx_pko_mem_debug5_cn30xx cn31xx;
- struct cvmx_pko_mem_debug5_cn30xx cn38xx;
- struct cvmx_pko_mem_debug5_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug5_cn50xx {
- uint64_t curr_ptr:29;
- uint64_t curr_siz:16;
- uint64_t curr_off:16;
- uint64_t cmnd_segs:3;
- } cn50xx;
- struct cvmx_pko_mem_debug5_cn52xx {
- uint64_t reserved_54_63:10;
- uint64_t nxt_inflt:6;
- uint64_t curr_ptr:40;
- uint64_t curr_siz:8;
- } cn52xx;
- struct cvmx_pko_mem_debug5_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug5_cn52xx cn56xx;
- struct cvmx_pko_mem_debug5_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug5_cn50xx cn58xx;
- struct cvmx_pko_mem_debug5_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug6 {
- uint64_t u64;
- struct cvmx_pko_mem_debug6_s {
- uint64_t reserved_37_63:27;
- uint64_t qid_offres:4;
- uint64_t qid_offths:4;
- uint64_t preempter:1;
- uint64_t preemptee:1;
- uint64_t preempted:1;
- uint64_t active:1;
- uint64_t statc:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_offmax:4;
- uint64_t reserved_0_11:12;
- } s;
- struct cvmx_pko_mem_debug6_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t qid_offm:3;
- uint64_t static_p:1;
- uint64_t work_min:3;
- uint64_t dwri_chk:1;
- uint64_t dwri_uid:1;
- uint64_t dwri_mod:2;
- } cn30xx;
- struct cvmx_pko_mem_debug6_cn30xx cn31xx;
- struct cvmx_pko_mem_debug6_cn30xx cn38xx;
- struct cvmx_pko_mem_debug6_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug6_cn50xx {
- uint64_t reserved_11_63:53;
- uint64_t curr_ptr:11;
- } cn50xx;
- struct cvmx_pko_mem_debug6_cn52xx {
- uint64_t reserved_37_63:27;
- uint64_t qid_offres:4;
- uint64_t qid_offths:4;
- uint64_t preempter:1;
- uint64_t preemptee:1;
- uint64_t preempted:1;
- uint64_t active:1;
- uint64_t statc:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_offmax:4;
- uint64_t qid_off:4;
- uint64_t qid_base:8;
- } cn52xx;
- struct cvmx_pko_mem_debug6_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug6_cn52xx cn56xx;
- struct cvmx_pko_mem_debug6_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug6_cn50xx cn58xx;
- struct cvmx_pko_mem_debug6_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug7 {
- uint64_t u64;
- struct cvmx_pko_mem_debug7_s {
- uint64_t qos:5;
- uint64_t tail:1;
- uint64_t reserved_0_57:58;
- } s;
- struct cvmx_pko_mem_debug7_cn30xx {
- uint64_t reserved_58_63:6;
- uint64_t dwb:9;
- uint64_t start:33;
- uint64_t size:16;
- } cn30xx;
- struct cvmx_pko_mem_debug7_cn30xx cn31xx;
- struct cvmx_pko_mem_debug7_cn30xx cn38xx;
- struct cvmx_pko_mem_debug7_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug7_cn50xx {
- uint64_t qos:5;
- uint64_t tail:1;
- uint64_t buf_siz:13;
- uint64_t buf_ptr:33;
- uint64_t qcb_widx:6;
- uint64_t qcb_ridx:6;
- } cn50xx;
- struct cvmx_pko_mem_debug7_cn50xx cn52xx;
- struct cvmx_pko_mem_debug7_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn56xx;
- struct cvmx_pko_mem_debug7_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn58xx;
- struct cvmx_pko_mem_debug7_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug8 {
- uint64_t u64;
- struct cvmx_pko_mem_debug8_s {
- uint64_t reserved_59_63:5;
- uint64_t tail:1;
- uint64_t buf_siz:13;
- uint64_t reserved_0_44:45;
- } s;
- struct cvmx_pko_mem_debug8_cn30xx {
- uint64_t qos:5;
- uint64_t tail:1;
- uint64_t buf_siz:13;
- uint64_t buf_ptr:33;
- uint64_t qcb_widx:6;
- uint64_t qcb_ridx:6;
- } cn30xx;
- struct cvmx_pko_mem_debug8_cn30xx cn31xx;
- struct cvmx_pko_mem_debug8_cn30xx cn38xx;
- struct cvmx_pko_mem_debug8_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug8_cn50xx {
- uint64_t reserved_28_63:36;
- uint64_t doorbell:20;
- uint64_t reserved_6_7:2;
- uint64_t static_p:1;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn50xx;
- struct cvmx_pko_mem_debug8_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t preempter:1;
- uint64_t doorbell:20;
- uint64_t reserved_7_7:1;
- uint64_t preemptee:1;
- uint64_t static_p:1;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn52xx;
- struct cvmx_pko_mem_debug8_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug8_cn52xx cn56xx;
- struct cvmx_pko_mem_debug8_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug8_cn50xx cn58xx;
- struct cvmx_pko_mem_debug8_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug9 {
- uint64_t u64;
- struct cvmx_pko_mem_debug9_s {
- uint64_t reserved_49_63:15;
- uint64_t ptrs0:17;
- uint64_t reserved_0_31:32;
- } s;
- struct cvmx_pko_mem_debug9_cn30xx {
- uint64_t reserved_28_63:36;
- uint64_t doorbell:20;
- uint64_t reserved_5_7:3;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn30xx;
- struct cvmx_pko_mem_debug9_cn30xx cn31xx;
- struct cvmx_pko_mem_debug9_cn38xx {
- uint64_t reserved_28_63:36;
- uint64_t doorbell:20;
- uint64_t reserved_6_7:2;
- uint64_t static_p:1;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn38xx;
- struct cvmx_pko_mem_debug9_cn38xx cn38xxp2;
- struct cvmx_pko_mem_debug9_cn50xx {
- uint64_t reserved_49_63:15;
- uint64_t ptrs0:17;
- uint64_t reserved_17_31:15;
- uint64_t ptrs3:17;
- } cn50xx;
- struct cvmx_pko_mem_debug9_cn50xx cn52xx;
- struct cvmx_pko_mem_debug9_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn56xx;
- struct cvmx_pko_mem_debug9_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn58xx;
- struct cvmx_pko_mem_debug9_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_port_ptrs {
- uint64_t u64;
- struct cvmx_pko_mem_port_ptrs_s {
- uint64_t reserved_62_63:2;
- uint64_t static_p:1;
- uint64_t qos_mask:8;
- uint64_t reserved_16_52:37;
- uint64_t bp_port:6;
- uint64_t eid:4;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_ptrs_s cn52xx;
- struct cvmx_pko_mem_port_ptrs_s cn52xxp1;
- struct cvmx_pko_mem_port_ptrs_s cn56xx;
- struct cvmx_pko_mem_port_ptrs_s cn56xxp1;
-};
-
-union cvmx_pko_mem_port_qos {
- uint64_t u64;
- struct cvmx_pko_mem_port_qos_s {
- uint64_t reserved_61_63:3;
- uint64_t qos_mask:8;
- uint64_t reserved_10_52:43;
- uint64_t eid:4;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_qos_s cn52xx;
- struct cvmx_pko_mem_port_qos_s cn52xxp1;
- struct cvmx_pko_mem_port_qos_s cn56xx;
- struct cvmx_pko_mem_port_qos_s cn56xxp1;
-};
-
-union cvmx_pko_mem_port_rate0 {
- uint64_t u64;
- struct cvmx_pko_mem_port_rate0_s {
- uint64_t reserved_51_63:13;
- uint64_t rate_word:19;
- uint64_t rate_pkt:24;
- uint64_t reserved_6_7:2;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_rate0_s cn52xx;
- struct cvmx_pko_mem_port_rate0_s cn52xxp1;
- struct cvmx_pko_mem_port_rate0_s cn56xx;
- struct cvmx_pko_mem_port_rate0_s cn56xxp1;
-};
-
-union cvmx_pko_mem_port_rate1 {
- uint64_t u64;
- struct cvmx_pko_mem_port_rate1_s {
- uint64_t reserved_32_63:32;
- uint64_t rate_lim:24;
- uint64_t reserved_6_7:2;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_rate1_s cn52xx;
- struct cvmx_pko_mem_port_rate1_s cn52xxp1;
- struct cvmx_pko_mem_port_rate1_s cn56xx;
- struct cvmx_pko_mem_port_rate1_s cn56xxp1;
-};
-
-union cvmx_pko_mem_queue_ptrs {
- uint64_t u64;
- struct cvmx_pko_mem_queue_ptrs_s {
- uint64_t s_tail:1;
- uint64_t static_p:1;
- uint64_t static_q:1;
- uint64_t qos_mask:8;
- uint64_t buf_ptr:36;
- uint64_t tail:1;
- uint64_t index:3;
- uint64_t port:6;
- uint64_t queue:7;
- } s;
- struct cvmx_pko_mem_queue_ptrs_s cn30xx;
- struct cvmx_pko_mem_queue_ptrs_s cn31xx;
- struct cvmx_pko_mem_queue_ptrs_s cn38xx;
- struct cvmx_pko_mem_queue_ptrs_s cn38xxp2;
- struct cvmx_pko_mem_queue_ptrs_s cn50xx;
- struct cvmx_pko_mem_queue_ptrs_s cn52xx;
- struct cvmx_pko_mem_queue_ptrs_s cn52xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn56xx;
- struct cvmx_pko_mem_queue_ptrs_s cn56xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn58xx;
- struct cvmx_pko_mem_queue_ptrs_s cn58xxp1;
-};
-
-union cvmx_pko_mem_queue_qos {
- uint64_t u64;
- struct cvmx_pko_mem_queue_qos_s {
- uint64_t reserved_61_63:3;
- uint64_t qos_mask:8;
- uint64_t reserved_13_52:40;
- uint64_t pid:6;
- uint64_t qid:7;
- } s;
- struct cvmx_pko_mem_queue_qos_s cn30xx;
- struct cvmx_pko_mem_queue_qos_s cn31xx;
- struct cvmx_pko_mem_queue_qos_s cn38xx;
- struct cvmx_pko_mem_queue_qos_s cn38xxp2;
- struct cvmx_pko_mem_queue_qos_s cn50xx;
- struct cvmx_pko_mem_queue_qos_s cn52xx;
- struct cvmx_pko_mem_queue_qos_s cn52xxp1;
- struct cvmx_pko_mem_queue_qos_s cn56xx;
- struct cvmx_pko_mem_queue_qos_s cn56xxp1;
- struct cvmx_pko_mem_queue_qos_s cn58xx;
- struct cvmx_pko_mem_queue_qos_s cn58xxp1;
-};
-
-union cvmx_pko_reg_bist_result {
- uint64_t u64;
- struct cvmx_pko_reg_bist_result_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_reg_bist_result_cn30xx {
- uint64_t reserved_27_63:37;
- uint64_t psb2:5;
- uint64_t count:1;
- uint64_t rif:1;
- uint64_t wif:1;
- uint64_t ncb:1;
- uint64_t out:1;
- uint64_t crc:1;
- uint64_t chk:1;
- uint64_t qsb:2;
- uint64_t qcb:2;
- uint64_t pdb:4;
- uint64_t psb:7;
- } cn30xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn31xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn38xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2;
- struct cvmx_pko_reg_bist_result_cn50xx {
- uint64_t reserved_33_63:31;
- uint64_t csr:1;
- uint64_t iob:1;
- uint64_t out_crc:1;
- uint64_t out_ctl:3;
- uint64_t out_sta:1;
- uint64_t out_wif:1;
- uint64_t prt_chk:3;
- uint64_t prt_nxt:1;
- uint64_t prt_psb:6;
- uint64_t ncb_inb:2;
- uint64_t prt_qcb:2;
- uint64_t prt_qsb:3;
- uint64_t dat_dat:4;
- uint64_t dat_ptr:4;
- } cn50xx;
- struct cvmx_pko_reg_bist_result_cn52xx {
- uint64_t reserved_35_63:29;
- uint64_t csr:1;
- uint64_t iob:1;
- uint64_t out_dat:1;
- uint64_t out_ctl:3;
- uint64_t out_sta:1;
- uint64_t out_wif:1;
- uint64_t prt_chk:3;
- uint64_t prt_nxt:1;
- uint64_t prt_psb:8;
- uint64_t ncb_inb:2;
- uint64_t prt_qcb:2;
- uint64_t prt_qsb:3;
- uint64_t prt_ctl:2;
- uint64_t dat_dat:2;
- uint64_t dat_ptr:4;
- } cn52xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1;
- struct cvmx_pko_reg_bist_result_cn52xx cn56xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1;
- struct cvmx_pko_reg_bist_result_cn50xx cn58xx;
- struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_reg_cmd_buf {
- uint64_t u64;
- struct cvmx_pko_reg_cmd_buf_s {
- uint64_t reserved_23_63:41;
- uint64_t pool:3;
- uint64_t reserved_13_19:7;
- uint64_t size:13;
- } s;
- struct cvmx_pko_reg_cmd_buf_s cn30xx;
- struct cvmx_pko_reg_cmd_buf_s cn31xx;
- struct cvmx_pko_reg_cmd_buf_s cn38xx;
- struct cvmx_pko_reg_cmd_buf_s cn38xxp2;
- struct cvmx_pko_reg_cmd_buf_s cn50xx;
- struct cvmx_pko_reg_cmd_buf_s cn52xx;
- struct cvmx_pko_reg_cmd_buf_s cn52xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn56xx;
- struct cvmx_pko_reg_cmd_buf_s cn56xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn58xx;
- struct cvmx_pko_reg_cmd_buf_s cn58xxp1;
-};
-
-union cvmx_pko_reg_crc_ctlx {
- uint64_t u64;
- struct cvmx_pko_reg_crc_ctlx_s {
- uint64_t reserved_2_63:62;
- uint64_t invres:1;
- uint64_t refin:1;
- } s;
- struct cvmx_pko_reg_crc_ctlx_s cn38xx;
- struct cvmx_pko_reg_crc_ctlx_s cn38xxp2;
- struct cvmx_pko_reg_crc_ctlx_s cn58xx;
- struct cvmx_pko_reg_crc_ctlx_s cn58xxp1;
-};
-
-union cvmx_pko_reg_crc_enable {
- uint64_t u64;
- struct cvmx_pko_reg_crc_enable_s {
- uint64_t reserved_32_63:32;
- uint64_t enable:32;
- } s;
- struct cvmx_pko_reg_crc_enable_s cn38xx;
- struct cvmx_pko_reg_crc_enable_s cn38xxp2;
- struct cvmx_pko_reg_crc_enable_s cn58xx;
- struct cvmx_pko_reg_crc_enable_s cn58xxp1;
-};
-
-union cvmx_pko_reg_crc_ivx {
- uint64_t u64;
- struct cvmx_pko_reg_crc_ivx_s {
- uint64_t reserved_32_63:32;
- uint64_t iv:32;
- } s;
- struct cvmx_pko_reg_crc_ivx_s cn38xx;
- struct cvmx_pko_reg_crc_ivx_s cn38xxp2;
- struct cvmx_pko_reg_crc_ivx_s cn58xx;
- struct cvmx_pko_reg_crc_ivx_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug0 {
- uint64_t u64;
- struct cvmx_pko_reg_debug0_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug0_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t asserts:17;
- } cn30xx;
- struct cvmx_pko_reg_debug0_cn30xx cn31xx;
- struct cvmx_pko_reg_debug0_cn30xx cn38xx;
- struct cvmx_pko_reg_debug0_cn30xx cn38xxp2;
- struct cvmx_pko_reg_debug0_s cn50xx;
- struct cvmx_pko_reg_debug0_s cn52xx;
- struct cvmx_pko_reg_debug0_s cn52xxp1;
- struct cvmx_pko_reg_debug0_s cn56xx;
- struct cvmx_pko_reg_debug0_s cn56xxp1;
- struct cvmx_pko_reg_debug0_s cn58xx;
- struct cvmx_pko_reg_debug0_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug1 {
- uint64_t u64;
- struct cvmx_pko_reg_debug1_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug1_s cn50xx;
- struct cvmx_pko_reg_debug1_s cn52xx;
- struct cvmx_pko_reg_debug1_s cn52xxp1;
- struct cvmx_pko_reg_debug1_s cn56xx;
- struct cvmx_pko_reg_debug1_s cn56xxp1;
- struct cvmx_pko_reg_debug1_s cn58xx;
- struct cvmx_pko_reg_debug1_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug2 {
- uint64_t u64;
- struct cvmx_pko_reg_debug2_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug2_s cn50xx;
- struct cvmx_pko_reg_debug2_s cn52xx;
- struct cvmx_pko_reg_debug2_s cn52xxp1;
- struct cvmx_pko_reg_debug2_s cn56xx;
- struct cvmx_pko_reg_debug2_s cn56xxp1;
- struct cvmx_pko_reg_debug2_s cn58xx;
- struct cvmx_pko_reg_debug2_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug3 {
- uint64_t u64;
- struct cvmx_pko_reg_debug3_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug3_s cn50xx;
- struct cvmx_pko_reg_debug3_s cn52xx;
- struct cvmx_pko_reg_debug3_s cn52xxp1;
- struct cvmx_pko_reg_debug3_s cn56xx;
- struct cvmx_pko_reg_debug3_s cn56xxp1;
- struct cvmx_pko_reg_debug3_s cn58xx;
- struct cvmx_pko_reg_debug3_s cn58xxp1;
-};
-
-union cvmx_pko_reg_engine_inflight {
- uint64_t u64;
- struct cvmx_pko_reg_engine_inflight_s {
- uint64_t reserved_40_63:24;
- uint64_t engine9:4;
- uint64_t engine8:4;
- uint64_t engine7:4;
- uint64_t engine6:4;
- uint64_t engine5:4;
- uint64_t engine4:4;
- uint64_t engine3:4;
- uint64_t engine2:4;
- uint64_t engine1:4;
- uint64_t engine0:4;
- } s;
- struct cvmx_pko_reg_engine_inflight_s cn52xx;
- struct cvmx_pko_reg_engine_inflight_s cn52xxp1;
- struct cvmx_pko_reg_engine_inflight_s cn56xx;
- struct cvmx_pko_reg_engine_inflight_s cn56xxp1;
-};
-
-union cvmx_pko_reg_engine_thresh {
- uint64_t u64;
- struct cvmx_pko_reg_engine_thresh_s {
- uint64_t reserved_10_63:54;
- uint64_t mask:10;
- } s;
- struct cvmx_pko_reg_engine_thresh_s cn52xx;
- struct cvmx_pko_reg_engine_thresh_s cn52xxp1;
- struct cvmx_pko_reg_engine_thresh_s cn56xx;
- struct cvmx_pko_reg_engine_thresh_s cn56xxp1;
-};
-
-union cvmx_pko_reg_error {
- uint64_t u64;
- struct cvmx_pko_reg_error_s {
- uint64_t reserved_3_63:61;
- uint64_t currzero:1;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } s;
- struct cvmx_pko_reg_error_cn30xx {
- uint64_t reserved_2_63:62;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } cn30xx;
- struct cvmx_pko_reg_error_cn30xx cn31xx;
- struct cvmx_pko_reg_error_cn30xx cn38xx;
- struct cvmx_pko_reg_error_cn30xx cn38xxp2;
- struct cvmx_pko_reg_error_s cn50xx;
- struct cvmx_pko_reg_error_s cn52xx;
- struct cvmx_pko_reg_error_s cn52xxp1;
- struct cvmx_pko_reg_error_s cn56xx;
- struct cvmx_pko_reg_error_s cn56xxp1;
- struct cvmx_pko_reg_error_s cn58xx;
- struct cvmx_pko_reg_error_s cn58xxp1;
-};
-
-union cvmx_pko_reg_flags {
- uint64_t u64;
- struct cvmx_pko_reg_flags_s {
- uint64_t reserved_4_63:60;
- uint64_t reset:1;
- uint64_t store_be:1;
- uint64_t ena_dwb:1;
- uint64_t ena_pko:1;
- } s;
- struct cvmx_pko_reg_flags_s cn30xx;
- struct cvmx_pko_reg_flags_s cn31xx;
- struct cvmx_pko_reg_flags_s cn38xx;
- struct cvmx_pko_reg_flags_s cn38xxp2;
- struct cvmx_pko_reg_flags_s cn50xx;
- struct cvmx_pko_reg_flags_s cn52xx;
- struct cvmx_pko_reg_flags_s cn52xxp1;
- struct cvmx_pko_reg_flags_s cn56xx;
- struct cvmx_pko_reg_flags_s cn56xxp1;
- struct cvmx_pko_reg_flags_s cn58xx;
- struct cvmx_pko_reg_flags_s cn58xxp1;
-};
-
-union cvmx_pko_reg_gmx_port_mode {
- uint64_t u64;
- struct cvmx_pko_reg_gmx_port_mode_s {
- uint64_t reserved_6_63:58;
- uint64_t mode1:3;
- uint64_t mode0:3;
- } s;
- struct cvmx_pko_reg_gmx_port_mode_s cn30xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn31xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn38xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2;
- struct cvmx_pko_reg_gmx_port_mode_s cn50xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn52xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn56xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn58xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1;
-};
-
-union cvmx_pko_reg_int_mask {
- uint64_t u64;
- struct cvmx_pko_reg_int_mask_s {
- uint64_t reserved_3_63:61;
- uint64_t currzero:1;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } s;
- struct cvmx_pko_reg_int_mask_cn30xx {
- uint64_t reserved_2_63:62;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } cn30xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn31xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn38xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2;
- struct cvmx_pko_reg_int_mask_s cn50xx;
- struct cvmx_pko_reg_int_mask_s cn52xx;
- struct cvmx_pko_reg_int_mask_s cn52xxp1;
- struct cvmx_pko_reg_int_mask_s cn56xx;
- struct cvmx_pko_reg_int_mask_s cn56xxp1;
- struct cvmx_pko_reg_int_mask_s cn58xx;
- struct cvmx_pko_reg_int_mask_s cn58xxp1;
-};
-
-union cvmx_pko_reg_queue_mode {
- uint64_t u64;
- struct cvmx_pko_reg_queue_mode_s {
- uint64_t reserved_2_63:62;
- uint64_t mode:2;
- } s;
- struct cvmx_pko_reg_queue_mode_s cn30xx;
- struct cvmx_pko_reg_queue_mode_s cn31xx;
- struct cvmx_pko_reg_queue_mode_s cn38xx;
- struct cvmx_pko_reg_queue_mode_s cn38xxp2;
- struct cvmx_pko_reg_queue_mode_s cn50xx;
- struct cvmx_pko_reg_queue_mode_s cn52xx;
- struct cvmx_pko_reg_queue_mode_s cn52xxp1;
- struct cvmx_pko_reg_queue_mode_s cn56xx;
- struct cvmx_pko_reg_queue_mode_s cn56xxp1;
- struct cvmx_pko_reg_queue_mode_s cn58xx;
- struct cvmx_pko_reg_queue_mode_s cn58xxp1;
-};
-
-union cvmx_pko_reg_queue_ptrs1 {
- uint64_t u64;
- struct cvmx_pko_reg_queue_ptrs1_s {
- uint64_t reserved_2_63:62;
- uint64_t idx3:1;
- uint64_t qid7:1;
- } s;
- struct cvmx_pko_reg_queue_ptrs1_s cn50xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn52xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn56xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn58xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1;
-};
-
-union cvmx_pko_reg_read_idx {
- uint64_t u64;
- struct cvmx_pko_reg_read_idx_s {
- uint64_t reserved_16_63:48;
- uint64_t inc:8;
- uint64_t index:8;
- } s;
- struct cvmx_pko_reg_read_idx_s cn30xx;
- struct cvmx_pko_reg_read_idx_s cn31xx;
- struct cvmx_pko_reg_read_idx_s cn38xx;
- struct cvmx_pko_reg_read_idx_s cn38xxp2;
- struct cvmx_pko_reg_read_idx_s cn50xx;
- struct cvmx_pko_reg_read_idx_s cn52xx;
- struct cvmx_pko_reg_read_idx_s cn52xxp1;
- struct cvmx_pko_reg_read_idx_s cn56xx;
- struct cvmx_pko_reg_read_idx_s cn56xxp1;
- struct cvmx_pko_reg_read_idx_s cn58xx;
- struct cvmx_pko_reg_read_idx_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pko.c b/drivers/staging/octeon/cvmx-pko.c
deleted file mode 100644
index 50a2c9bd5a55..000000000000
--- a/drivers/staging/octeon/cvmx-pko.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Support library for the hardware Packet Output unit.
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-#include "cvmx-pko.h"
-#include "cvmx-helper.h"
-
-/**
- * Internal state of packet output
- */
-
-/**
- * Call before any other calls to initialize the packet
- * output system. This does chip global config, and should only be
- * done by one core.
- */
-
-void cvmx_pko_initialize_global(void)
-{
- int i;
- uint64_t priority = 8;
- union cvmx_pko_reg_cmd_buf config;
-
- /*
- * Set the size of the PKO command buffers to an odd number of
- * 64bit words. This allows the normal two word send to stay
- * aligned and never span a command word buffer.
- */
- config.u64 = 0;
- config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
- config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
-
- cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
-
- for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
- cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
- &priority);
-
- /*
- * If we aren't using all of the queues optimize PKO's
- * internal memory.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
- || OCTEON_IS_MODEL(OCTEON_CN56XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- int num_interfaces = cvmx_helper_get_number_of_interfaces();
- int last_port =
- cvmx_helper_get_last_ipd_port(num_interfaces - 1);
- int max_queues =
- cvmx_pko_get_base_queue(last_port) +
- cvmx_pko_get_num_queues(last_port);
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- if (max_queues <= 32)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
- else if (max_queues <= 64)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
- } else {
- if (max_queues <= 64)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
- else if (max_queues <= 128)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
- }
- }
-}
-
-/**
- * This function does per-core initialization required by the PKO routines.
- * This must be called on all cores that will do packet output, and must
- * be called after the FPA has been initialized and filled with pages.
- *
- * Returns 0 on success
- * !0 on failure
- */
-int cvmx_pko_initialize_local(void)
-{
- /* Nothing to do */
- return 0;
-}
-
-/**
- * Enables the packet output hardware. It must already be
- * configured.
- */
-void cvmx_pko_enable(void)
-{
- union cvmx_pko_reg_flags flags;
-
- flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
- if (flags.s.ena_pko)
- cvmx_dprintf
- ("Warning: Enabling PKO when PKO already enabled.\n");
-
- flags.s.ena_dwb = 1;
- flags.s.ena_pko = 1;
- /*
- * always enable big endian for 3-word command. Does nothing
- * for 2-word.
- */
- flags.s.store_be = 1;
- cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
-}
-
-/**
- * Disables the packet output. Does not affect any configuration.
- */
-void cvmx_pko_disable(void)
-{
- union cvmx_pko_reg_flags pko_reg_flags;
- pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
- pko_reg_flags.s.ena_pko = 0;
- cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
-}
-
-
-/**
- * Reset the packet output.
- */
-static void __cvmx_pko_reset(void)
-{
- union cvmx_pko_reg_flags pko_reg_flags;
- pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
- pko_reg_flags.s.reset = 1;
- cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
-}
-
-/**
- * Shutdown and free resources required by packet output.
- */
-void cvmx_pko_shutdown(void)
-{
- union cvmx_pko_mem_queue_ptrs config;
- int queue;
-
- cvmx_pko_disable();
-
- for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
- config.u64 = 0;
- config.s.tail = 1;
- config.s.index = 0;
- config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
- config.s.queue = queue & 0x7f;
- config.s.qos_mask = 0;
- config.s.buf_ptr = 0;
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pko_reg_queue_ptrs1 config1;
- config1.u64 = 0;
- config1.s.qid7 = queue >> 7;
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
- }
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
- cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
- }
- __cvmx_pko_reset();
-}
-
-/**
- * Configure a output port and the associated queues for use.
- *
- * @port: Port to configure.
- * @base_queue: First queue number to associate with this port.
- * @num_queues: Number of queues to associate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 0-8. A value of 8 get 8 times the traffic
- * of a value of 1. A value of 0 indicates that no rounds
- * will be participated in. These priorities can be changed
- * on the fly while the pko is enabled. A priority of 9
- * indicates that static priority should be used. If static
- * priority is used all queues with static priority must be
- * contiguous starting at the base_queue, and lower numbered
- * queues have higher priority than higher numbered queues.
- * There must be num_queues elements in the array.
- */
-cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
- uint64_t num_queues,
- const uint64_t priority[])
-{
- cvmx_pko_status_t result_code;
- uint64_t queue;
- union cvmx_pko_mem_queue_ptrs config;
- union cvmx_pko_reg_queue_ptrs1 config1;
- int static_priority_base = -1;
- int static_priority_end = -1;
-
- if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
- && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
- cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
- (unsigned long long)port);
- return CVMX_PKO_INVALID_PORT;
- }
-
- if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
- cvmx_dprintf
- ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
- (unsigned long long)(base_queue + num_queues));
- return CVMX_PKO_INVALID_QUEUE;
- }
-
- if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
- /*
- * Validate the static queue priority setup and set
- * static_priority_base and static_priority_end
- * accordingly.
- */
- for (queue = 0; queue < num_queues; queue++) {
- /* Find first queue of static priority */
- if (static_priority_base == -1
- && priority[queue] ==
- CVMX_PKO_QUEUE_STATIC_PRIORITY)
- static_priority_base = queue;
- /* Find last queue of static priority */
- if (static_priority_base != -1
- && static_priority_end == -1
- && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
- && queue)
- static_priority_end = queue - 1;
- else if (static_priority_base != -1
- && static_priority_end == -1
- && queue == num_queues - 1)
- /* all queues are static priority */
- static_priority_end = queue;
- /*
- * Check to make sure all static priority
- * queues are contiguous. Also catches some
- * cases of static priorites not starting at
- * queue 0.
- */
- if (static_priority_end != -1
- && (int)queue > static_priority_end
- && priority[queue] ==
- CVMX_PKO_QUEUE_STATIC_PRIORITY) {
- cvmx_dprintf("ERROR: cvmx_pko_config_port: "
- "Static priority queues aren't "
- "contiguous or don't start at "
- "base queue. q: %d, eq: %d\n",
- (int)queue, static_priority_end);
- return CVMX_PKO_INVALID_PRIORITY;
- }
- }
- if (static_priority_base > 0) {
- cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
- "priority queues don't start at base "
- "queue. sq: %d\n",
- static_priority_base);
- return CVMX_PKO_INVALID_PRIORITY;
- }
-#if 0
- cvmx_dprintf("Port %d: Static priority queue base: %d, "
- "end: %d\n", port,
- static_priority_base, static_priority_end);
-#endif
- }
- /*
- * At this point, static_priority_base and static_priority_end
- * are either both -1, or are valid start/end queue
- * numbers.
- */
-
- result_code = CVMX_PKO_SUCCESS;
-
-#ifdef PKO_DEBUG
- cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
-#endif
-
- for (queue = 0; queue < num_queues; queue++) {
- uint64_t *buf_ptr = NULL;
-
- config1.u64 = 0;
- config1.s.idx3 = queue >> 3;
- config1.s.qid7 = (base_queue + queue) >> 7;
-
- config.u64 = 0;
- config.s.tail = queue == (num_queues - 1);
- config.s.index = queue;
- config.s.port = port;
- config.s.queue = base_queue + queue;
-
- if (!cvmx_octeon_is_pass1()) {
- config.s.static_p = static_priority_base >= 0;
- config.s.static_q = (int)queue <= static_priority_end;
- config.s.s_tail = (int)queue == static_priority_end;
- }
- /*
- * Convert the priority into an enable bit field. Try
- * to space the bits out evenly so the packet don't
- * get grouped up
- */
- switch ((int)priority[queue]) {
- case 0:
- config.s.qos_mask = 0x00;
- break;
- case 1:
- config.s.qos_mask = 0x01;
- break;
- case 2:
- config.s.qos_mask = 0x11;
- break;
- case 3:
- config.s.qos_mask = 0x49;
- break;
- case 4:
- config.s.qos_mask = 0x55;
- break;
- case 5:
- config.s.qos_mask = 0x57;
- break;
- case 6:
- config.s.qos_mask = 0x77;
- break;
- case 7:
- config.s.qos_mask = 0x7f;
- break;
- case 8:
- config.s.qos_mask = 0xff;
- break;
- case CVMX_PKO_QUEUE_STATIC_PRIORITY:
- /* Pass 1 will fall through to the error case */
- if (!cvmx_octeon_is_pass1()) {
- config.s.qos_mask = 0xff;
- break;
- }
- default:
- cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
- "priority %llu\n",
- (unsigned long long)priority[queue]);
- config.s.qos_mask = 0xff;
- result_code = CVMX_PKO_INVALID_PRIORITY;
- break;
- }
-
- if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
- cvmx_cmd_queue_result_t cmd_res =
- cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
- (base_queue + queue),
- CVMX_PKO_MAX_QUEUE_DEPTH,
- CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
- -
- CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
- * 8);
- if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
- switch (cmd_res) {
- case CVMX_CMD_QUEUE_NO_MEMORY:
- cvmx_dprintf("ERROR: "
- "cvmx_pko_config_port: "
- "Unable to allocate "
- "output buffer.\n");
- return CVMX_PKO_NO_MEMORY;
- case CVMX_CMD_QUEUE_ALREADY_SETUP:
- cvmx_dprintf
- ("ERROR: cvmx_pko_config_port: Port already setup.\n");
- return CVMX_PKO_PORT_ALREADY_SETUP;
- case CVMX_CMD_QUEUE_INVALID_PARAM:
- default:
- cvmx_dprintf
- ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
- return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
- }
- }
-
- buf_ptr =
- (uint64_t *)
- cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
- (base_queue + queue));
- config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
- } else
- config.s.buf_ptr = 0;
-
- CVMX_SYNCWS;
-
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
- }
-
- return result_code;
-}
-
-#ifdef PKO_DEBUG
-/**
- * Show map of ports -> queues for different cores.
- */
-void cvmx_pko_show_queue_map()
-{
- int core, port;
- int pko_output_ports = 36;
-
- cvmx_dprintf("port");
- for (port = 0; port < pko_output_ports; port++)
- cvmx_dprintf("%3d ", port);
- cvmx_dprintf("\n");
-
- for (core = 0; core < CVMX_MAX_CORES; core++) {
- cvmx_dprintf("\n%2d: ", core);
- for (port = 0; port < pko_output_ports; port++) {
- cvmx_dprintf("%3d ",
- cvmx_pko_get_base_queue_per_core(port,
- core));
- }
- }
- cvmx_dprintf("\n");
-}
-#endif
-
-/**
- * Rate limit a PKO port to a max packets/sec. This function is only
- * supported on CN51XX and higher, excluding CN58XX.
- *
- * @port: Port to rate limit
- * @packets_s: Maximum packet/sec
- * @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
-{
- union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
- union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
-
- pko_mem_port_rate0.u64 = 0;
- pko_mem_port_rate0.s.pid = port;
- pko_mem_port_rate0.s.rate_pkt =
- cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
- /* No cost per word since we are limited by packets/sec, not bits/sec */
- pko_mem_port_rate0.s.rate_word = 0;
-
- pko_mem_port_rate1.u64 = 0;
- pko_mem_port_rate1.s.pid = port;
- pko_mem_port_rate1.s.rate_lim =
- ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
-
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
- return 0;
-}
-
-/**
- * Rate limit a PKO port to a max bits/sec. This function is only
- * supported on CN51XX and higher, excluding CN58XX.
- *
- * @port: Port to rate limit
- * @bits_s: PKO rate limit in bits/sec
- * @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
-{
- union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
- union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
- uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
- uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
-
- pko_mem_port_rate0.u64 = 0;
- pko_mem_port_rate0.s.pid = port;
- /*
- * Each packet has a 12 bytes of interframe gap, an 8 byte
- * preamble, and a 4 byte CRC. These are not included in the
- * per word count. Multiply by 8 to covert to bits and divide
- * by 256 for limit granularity.
- */
- pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
- /* Each 8 byte word has 64bits */
- pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
-
- pko_mem_port_rate1.u64 = 0;
- pko_mem_port_rate1.s.pid = port;
- pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
-
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-pko.h b/drivers/staging/octeon/cvmx-pko.h
deleted file mode 100644
index de3412aada5d..000000000000
--- a/drivers/staging/octeon/cvmx-pko.h
+++ /dev/null
@@ -1,610 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Interface to the hardware Packet Output unit.
- *
- * Starting with SDK 1.7.0, the PKO output functions now support
- * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
- * function similarly to previous SDKs by using POW atomic tags
- * to preserve ordering and exclusivity. As a new option, you
- * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
- * memory based locking instead. This locking has the advantage
- * of not affecting the tag state but doesn't preserve packet
- * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
- * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
- * with hand tuned fast path code.
- *
- * Some of other SDK differences visible to the command command
- * queuing:
- * - PKO indexes are no longer stored in the FAU. A large
- * percentage of the FAU register block used to be tied up
- * maintaining PKO queue pointers. These are now stored in a
- * global named block.
- * - The PKO <b>use_locking</b> parameter can now have a global
- * effect. Since all application use the same named block,
- * queue locking correctly applies across all operating
- * systems when using CVMX_PKO_LOCK_CMD_QUEUE.
- * - PKO 3 word commands are now supported. Use
- * cvmx_pko_send_packet_finish3().
- *
- */
-
-#ifndef __CVMX_PKO_H__
-#define __CVMX_PKO_H__
-
-#include "cvmx-fpa.h"
-#include "cvmx-pow.h"
-#include "cvmx-cmd-queue.h"
-#include "cvmx-pko-defs.h"
-
-/* Adjust the command buffer size by 1 word so that in the case of using only
- * two word PKO commands no command words stradle buffers. The useful values
- * for this are 0 and 1. */
-#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
-
-#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
-#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
- OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
- OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
- (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
- OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
-#define CVMX_PKO_NUM_OUTPUT_PORTS 40
-/* use this for queues that are not used */
-#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
-#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
-#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
-#define CVMX_PKO_MAX_QUEUE_DEPTH 0
-
-typedef enum {
- CVMX_PKO_SUCCESS,
- CVMX_PKO_INVALID_PORT,
- CVMX_PKO_INVALID_QUEUE,
- CVMX_PKO_INVALID_PRIORITY,
- CVMX_PKO_NO_MEMORY,
- CVMX_PKO_PORT_ALREADY_SETUP,
- CVMX_PKO_CMD_QUEUE_INIT_ERROR
-} cvmx_pko_status_t;
-
-/**
- * This enumeration represents the differnet locking modes supported by PKO.
- */
-typedef enum {
- /*
- * PKO doesn't do any locking. It is the responsibility of the
- * application to make sure that no other core is accessing
- * the same queue at the same time
- */
- CVMX_PKO_LOCK_NONE = 0,
- /*
- * PKO performs an atomic tagswitch to insure exclusive access
- * to the output queue. This will maintain packet ordering on
- * output.
- */
- CVMX_PKO_LOCK_ATOMIC_TAG = 1,
- /*
- * PKO uses the common command queue locks to insure exclusive
- * access to the output queue. This is a memory based
- * ll/sc. This is the most portable locking mechanism.
- */
- CVMX_PKO_LOCK_CMD_QUEUE = 2,
-} cvmx_pko_lock_t;
-
-typedef struct {
- uint32_t packets;
- uint64_t octets;
- uint64_t doorbell;
-} cvmx_pko_port_status_t;
-
-/**
- * This structure defines the address to use on a packet enqueue
- */
-typedef union {
- uint64_t u64;
- struct {
- /* Must CVMX_IO_SEG */
- uint64_t mem_space:2;
- /* Must be zero */
- uint64_t reserved:13;
- /* Must be one */
- uint64_t is_io:1;
- /* The ID of the device on the non-coherent bus */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved2:4;
- /* Must be zero */
- uint64_t reserved3:18;
- /*
- * The hardware likes to have the output port in
- * addition to the output queue,
- */
- uint64_t port:6;
- /*
- * The output queue to send the packet to (0-127 are
- * legal)
- */
- uint64_t queue:9;
- /* Must be zero */
- uint64_t reserved4:3;
- } s;
-} cvmx_pko_doorbell_address_t;
-
-/**
- * Structure of the first packet output command word.
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * The size of the reg1 operation - could be 8, 16,
- * 32, or 64 bits.
- */
- uint64_t size1:2;
- /*
- * The size of the reg0 operation - could be 8, 16,
- * 32, or 64 bits.
- */
- uint64_t size0:2;
- /*
- * If set, subtract 1, if clear, subtract packet
- * size.
- */
- uint64_t subone1:1;
- /*
- * The register, subtract will be done if reg1 is
- * non-zero.
- */
- uint64_t reg1:11;
- /* If set, subtract 1, if clear, subtract packet size */
- uint64_t subone0:1;
- /* The register, subtract will be done if reg0 is non-zero */
- uint64_t reg0:11;
- /*
- * When set, interpret segment pointer and segment
- * bytes in little endian order.
- */
- uint64_t le:1;
- /*
- * When set, packet data not allocated in L2 cache by
- * PKO.
- */
- uint64_t n2:1;
- /*
- * If set and rsp is set, word3 contains a pointer to
- * a work queue entry.
- */
- uint64_t wqp:1;
- /* If set, the hardware will send a response when done */
- uint64_t rsp:1;
- /*
- * If set, the supplied pkt_ptr is really a pointer to
- * a list of pkt_ptr's.
- */
- uint64_t gather:1;
- /*
- * If ipoffp1 is non zero, (ipoffp1-1) is the number
- * of bytes to IP header, and the hardware will
- * calculate and insert the UDP/TCP checksum.
- */
- uint64_t ipoffp1:7;
- /*
- * If set, ignore the I bit (force to zero) from all
- * pointer structures.
- */
- uint64_t ignore_i:1;
- /*
- * If clear, the hardware will attempt to free the
- * buffers containing the packet.
- */
- uint64_t dontfree:1;
- /*
- * The total number of segs in the packet, if gather
- * set, also gather list length.
- */
- uint64_t segs:6;
- /* Including L2, but no trailing CRC */
- uint64_t total_bytes:16;
- } s;
-} cvmx_pko_command_word0_t;
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Definition of internal state for Packet output processing
- */
-typedef struct {
- /* ptr to start of buffer, offset kept in FAU reg */
- uint64_t *start_ptr;
-} cvmx_pko_state_elem_t;
-
-/**
- * Call before any other calls to initialize the packet
- * output system.
- */
-extern void cvmx_pko_initialize_global(void);
-extern int cvmx_pko_initialize_local(void);
-
-/**
- * Enables the packet output hardware. It must already be
- * configured.
- */
-extern void cvmx_pko_enable(void);
-
-/**
- * Disables the packet output. Does not affect any configuration.
- */
-extern void cvmx_pko_disable(void);
-
-/**
- * Shutdown and free resources required by packet output.
- */
-
-extern void cvmx_pko_shutdown(void);
-
-/**
- * Configure a output port and the associated queues for use.
- *
- * @port: Port to configure.
- * @base_queue: First queue number to associate with this port.
- * @num_queues: Number of queues t oassociate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 1-8. A value of 8 get 8 times the traffic
- * of a value of 1. There must be num_queues elements in the
- * array.
- */
-extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
- uint64_t base_queue,
- uint64_t num_queues,
- const uint64_t priority[]);
-
-/**
- * Ring the packet output doorbell. This tells the packet
- * output hardware that "len" command words have been added
- * to its pending list. This command includes the required
- * CVMX_SYNCWS before the doorbell ring.
- *
- * @port: Port the packet is for
- * @queue: Queue the packet is for
- * @len: Length of the command in 64 bit words
- */
-static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
- uint64_t len)
-{
- cvmx_pko_doorbell_address_t ptr;
-
- ptr.u64 = 0;
- ptr.s.mem_space = CVMX_IO_SEG;
- ptr.s.did = CVMX_OCT_DID_PKT_SEND;
- ptr.s.is_io = 1;
- ptr.s.port = port;
- ptr.s.queue = queue;
- /*
- * Need to make sure output queue data is in DRAM before
- * doorbell write.
- */
- CVMX_SYNCWS;
- cvmx_write_io(ptr.u64, len);
-}
-
-/**
- * Prepare to send a packet. This may initiate a tag switch to
- * get exclusive access to the output queue structure, and
- * performs other prep work for the packet send operation.
- *
- * cvmx_pko_send_packet_finish() MUST be called after this function is called,
- * and must be called with the same port/queue/use_locking arguments.
- *
- * The use_locking parameter allows the caller to use three
- * possible locking modes.
- * - CVMX_PKO_LOCK_NONE
- * - PKO doesn't do any locking. It is the responsibility
- * of the application to make sure that no other core
- * is accessing the same queue at the same time.
- * - CVMX_PKO_LOCK_ATOMIC_TAG
- * - PKO performs an atomic tagswitch to insure exclusive
- * access to the output queue. This will maintain
- * packet ordering on output.
- * - CVMX_PKO_LOCK_CMD_QUEUE
- * - PKO uses the common command queue locks to insure
- * exclusive access to the output queue. This is a
- * memory based ll/sc. This is the most portable
- * locking mechanism.
- *
- * NOTE: If atomic locking is used, the POW entry CANNOT be
- * descheduled, as it does not contain a valid WQE pointer.
- *
- * @port: Port to send it on
- * @queue: Queue to use
- * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
- */
-
-static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
- cvmx_pko_lock_t use_locking)
-{
- if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
- /*
- * Must do a full switch here to handle all cases. We
- * use a fake WQE pointer, as the POW does not access
- * this memory. The WQE pointer and group are only
- * used if this work is descheduled, which is not
- * supported by the
- * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
- * combination. Note that this is a special case in
- * which these fake values can be used - this is not a
- * general technique.
- */
- uint32_t tag =
- CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
- CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
- (CVMX_TAG_SUBGROUP_MASK & queue);
- cvmx_pow_tag_sw_full((cvmx_wqe_t *) cvmx_phys_to_ptr(0x80), tag,
- CVMX_POW_TAG_TYPE_ATOMIC, 0);
- }
-}
-
-/**
- * Complete packet output. cvmx_pko_send_packet_prepare() must be
- * called exactly once before this, and the same parameters must be
- * passed to both cvmx_pko_send_packet_prepare() and
- * cvmx_pko_send_packet_finish().
- *
- * @port: Port to send it on
- * @queue: Queue to use
- * @pko_command:
- * PKO HW command word
- * @packet: Packet to send
- * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
- *
- * Returns returns CVMX_PKO_SUCCESS on success, or error code on
- * failure of output
- */
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
- uint64_t port,
- uint64_t queue,
- cvmx_pko_command_word0_t pko_command,
- union cvmx_buf_ptr packet,
- cvmx_pko_lock_t use_locking)
-{
- cvmx_cmd_queue_result_t result;
- if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
- cvmx_pow_tag_sw_wait();
- result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
- (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
- pko_command.u64, packet.u64);
- if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
- cvmx_pko_doorbell(port, queue, 2);
- return CVMX_PKO_SUCCESS;
- } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
- || (result == CVMX_CMD_QUEUE_FULL)) {
- return CVMX_PKO_NO_MEMORY;
- } else {
- return CVMX_PKO_INVALID_QUEUE;
- }
-}
-
-/**
- * Complete packet output. cvmx_pko_send_packet_prepare() must be
- * called exactly once before this, and the same parameters must be
- * passed to both cvmx_pko_send_packet_prepare() and
- * cvmx_pko_send_packet_finish().
- *
- * @port: Port to send it on
- * @queue: Queue to use
- * @pko_command:
- * PKO HW command word
- * @packet: Packet to send
- * @addr: Plysical address of a work queue entry or physical address
- * to zero on complete.
- * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
- *
- * Returns returns CVMX_PKO_SUCCESS on success, or error code on
- * failure of output
- */
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(
- uint64_t port,
- uint64_t queue,
- cvmx_pko_command_word0_t pko_command,
- union cvmx_buf_ptr packet,
- uint64_t addr,
- cvmx_pko_lock_t use_locking)
-{
- cvmx_cmd_queue_result_t result;
- if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
- cvmx_pow_tag_sw_wait();
- result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
- (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
- pko_command.u64, packet.u64, addr);
- if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
- cvmx_pko_doorbell(port, queue, 3);
- return CVMX_PKO_SUCCESS;
- } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
- || (result == CVMX_CMD_QUEUE_FULL)) {
- return CVMX_PKO_NO_MEMORY;
- } else {
- return CVMX_PKO_INVALID_QUEUE;
- }
-}
-
-/**
- * Return the pko output queue associated with a port and a specific core.
- * In normal mode (PKO lockless operation is disabled), the value returned
- * is the base queue.
- *
- * @port: Port number
- * @core: Core to get queue for
- *
- * Returns Core-specific output queue
- */
-static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
-{
-#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
-#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
-#endif
-#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
-#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
-#endif
-
- if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
- return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
- else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
- return CVMX_PKO_MAX_PORTS_INTERFACE0 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port -
- 16) *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
- else if ((port >= 32) && (port < 36))
- return CVMX_PKO_MAX_PORTS_INTERFACE0 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
- CVMX_PKO_MAX_PORTS_INTERFACE1 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port -
- 32) *
- CVMX_PKO_QUEUES_PER_PORT_PCI;
- else if ((port >= 36) && (port < 40))
- return CVMX_PKO_MAX_PORTS_INTERFACE0 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
- CVMX_PKO_MAX_PORTS_INTERFACE1 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
- 4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port -
- 36) *
- CVMX_PKO_QUEUES_PER_PORT_LOOP;
- else
- /* Given the limit on the number of ports we can map to
- * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
- * divided among all cores), the remaining unmapped ports
- * are assigned an illegal queue number */
- return CVMX_PKO_ILLEGAL_QUEUE;
-}
-
-/**
- * For a given port number, return the base pko output queue
- * for the port.
- *
- * @port: Port number
- * Returns Base output queue
- */
-static inline int cvmx_pko_get_base_queue(int port)
-{
- return cvmx_pko_get_base_queue_per_core(port, 0);
-}
-
-/**
- * For a given port number, return the number of pko output queues.
- *
- * @port: Port number
- * Returns Number of output queues
- */
-static inline int cvmx_pko_get_num_queues(int port)
-{
- if (port < 16)
- return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
- else if (port < 32)
- return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
- else if (port < 36)
- return CVMX_PKO_QUEUES_PER_PORT_PCI;
- else if (port < 40)
- return CVMX_PKO_QUEUES_PER_PORT_LOOP;
- else
- return 0;
-}
-
-/**
- * Get the status counters for a port.
- *
- * @port_num: Port number to get statistics for.
- * @clear: Set to 1 to clear the counters after they are read
- * @status: Where to put the results.
- */
-static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pko_port_status_t *status)
-{
- union cvmx_pko_reg_read_idx pko_reg_read_idx;
- union cvmx_pko_mem_count0 pko_mem_count0;
- union cvmx_pko_mem_count1 pko_mem_count1;
-
- pko_reg_read_idx.u64 = 0;
- pko_reg_read_idx.s.index = port_num;
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
-
- pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
- status->packets = pko_mem_count0.s.count;
- if (clear) {
- pko_mem_count0.s.count = port_num;
- cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
- }
-
- pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
- status->octets = pko_mem_count1.s.count;
- if (clear) {
- pko_mem_count1.s.count = port_num;
- cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
- }
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pko_mem_debug9 debug9;
- pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
- debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
- status->doorbell = debug9.cn38xx.doorbell;
- } else {
- union cvmx_pko_mem_debug8 debug8;
- pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
- debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- status->doorbell = debug8.cn58xx.doorbell;
- }
-}
-
-/**
- * Rate limit a PKO port to a max packets/sec. This function is only
- * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
- *
- * @port: Port to rate limit
- * @packets_s: Maximum packet/sec
- * @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
-
-/**
- * Rate limit a PKO port to a max bits/sec. This function is only
- * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
- *
- * @port: Port to rate limit
- * @bits_s: PKO rate limit in bits/sec
- * @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
-
-#endif /* __CVMX_PKO_H__ */
diff --git a/drivers/staging/octeon/cvmx-pow.h b/drivers/staging/octeon/cvmx-pow.h
deleted file mode 100644
index 999aefe3274c..000000000000
--- a/drivers/staging/octeon/cvmx-pow.h
+++ /dev/null
@@ -1,1982 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * Interface to the hardware Packet Order / Work unit.
- *
- * New, starting with SDK 1.7.0, cvmx-pow supports a number of
- * extended consistency checks. The define
- * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
- * internal state checks to find common programming errors. If
- * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
- * enabled. For example, cvmx-pow will check for the following
- * program errors or POW state inconsistency.
- * - Requesting a POW operation with an active tag switch in
- * progress.
- * - Waiting for a tag switch to complete for an excessively
- * long period. This is normally a sign of an error in locking
- * causing deadlock.
- * - Illegal tag switches from NULL_NULL.
- * - Illegal tag switches from NULL.
- * - Illegal deschedule request.
- * - WQE pointer not matching the one attached to the core by
- * the POW.
- *
- */
-
-#ifndef __CVMX_POW_H__
-#define __CVMX_POW_H__
-
-#include <asm/octeon/cvmx-pow-defs.h>
-
-#include "cvmx-scratch.h"
-#include "cvmx-wqe.h"
-
-/* Default to having all POW constancy checks turned on */
-#ifndef CVMX_ENABLE_POW_CHECKS
-#define CVMX_ENABLE_POW_CHECKS 1
-#endif
-
-enum cvmx_pow_tag_type {
- /* Tag ordering is maintained */
- CVMX_POW_TAG_TYPE_ORDERED = 0L,
- /* Tag ordering is maintained, and at most one PP has the tag */
- CVMX_POW_TAG_TYPE_ATOMIC = 1L,
- /*
- * The work queue entry from the order - NEVER tag switch from
- * NULL to NULL
- */
- CVMX_POW_TAG_TYPE_NULL = 2L,
- /* A tag switch to NULL, and there is no space reserved in POW
- * - NEVER tag switch to NULL_NULL
- * - NEVER tag switch from NULL_NULL
- * - NULL_NULL is entered at the beginning of time and on a deschedule.
- * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
- * load can also switch the state to NULL
- */
- CVMX_POW_TAG_TYPE_NULL_NULL = 3L
-};
-
-/**
- * Wait flag values for pow functions.
- */
-typedef enum {
- CVMX_POW_WAIT = 1,
- CVMX_POW_NO_WAIT = 0,
-} cvmx_pow_wait_t;
-
-/**
- * POW tag operations. These are used in the data stored to the POW.
- */
-typedef enum {
- /*
- * switch the tag (only) for this PP
- * - the previous tag should be non-NULL in this case
- * - tag switch response required
- * - fields used: op, type, tag
- */
- CVMX_POW_TAG_OP_SWTAG = 0L,
- /*
- * switch the tag for this PP, with full information
- * - this should be used when the previous tag is NULL
- * - tag switch response required
- * - fields used: address, op, grp, type, tag
- */
- CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
- /*
- * switch the tag (and/or group) for this PP and de-schedule
- * - OK to keep the tag the same and only change the group
- * - fields used: op, no_sched, grp, type, tag
- */
- CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
- /*
- * just de-schedule
- * - fields used: op, no_sched
- */
- CVMX_POW_TAG_OP_DESCH = 3L,
- /*
- * create an entirely new work queue entry
- * - fields used: address, op, qos, grp, type, tag
- */
- CVMX_POW_TAG_OP_ADDWQ = 4L,
- /*
- * just update the work queue pointer and grp for this PP
- * - fields used: address, op, grp
- */
- CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
- /*
- * set the no_sched bit on the de-schedule list
- *
- * - does nothing if the selected entry is not on the
- * de-schedule list
- *
- * - does nothing if the stored work queue pointer does not
- * match the address field
- *
- * - fields used: address, index, op
- *
- * Before issuing a *_NSCHED operation, SW must guarantee
- * that all prior deschedules and set/clr NSCHED operations
- * are complete and all prior switches are complete. The
- * hardware provides the opsdone bit and swdone bit for SW
- * polling. After issuing a *_NSCHED operation, SW must
- * guarantee that the set/clr NSCHED is complete before any
- * subsequent operations.
- */
- CVMX_POW_TAG_OP_SET_NSCHED = 6L,
- /*
- * clears the no_sched bit on the de-schedule list
- *
- * - does nothing if the selected entry is not on the
- * de-schedule list
- *
- * - does nothing if the stored work queue pointer does not
- * match the address field
- *
- * - fields used: address, index, op
- *
- * Before issuing a *_NSCHED operation, SW must guarantee that
- * all prior deschedules and set/clr NSCHED operations are
- * complete and all prior switches are complete. The hardware
- * provides the opsdone bit and swdone bit for SW
- * polling. After issuing a *_NSCHED operation, SW must
- * guarantee that the set/clr NSCHED is complete before any
- * subsequent operations.
- */
- CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
- /* do nothing */
- CVMX_POW_TAG_OP_NOP = 15L
-} cvmx_pow_tag_op_t;
-
-/**
- * This structure defines the store data on a store to POW
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * Don't reschedule this entry. no_sched is used for
- * CVMX_POW_TAG_OP_SWTAG_DESCH and
- * CVMX_POW_TAG_OP_DESCH
- */
- uint64_t no_sched:1;
- uint64_t unused:2;
- /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
- uint64_t index:13;
- /* The operation to perform */
- cvmx_pow_tag_op_t op:4;
- uint64_t unused2:2;
- /*
- * The QOS level for the packet. qos is only used for
- * CVMX_POW_TAG_OP_ADDWQ
- */
- uint64_t qos:3;
- /*
- * The group that the work queue entry will be
- * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
- * CVMX_POW_TAG_OP_SWTAG_FULL,
- * CVMX_POW_TAG_OP_SWTAG_DESCH, and
- * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
- */
- uint64_t grp:4;
- /*
- * The type of the tag. type is used for everything
- * except CVMX_POW_TAG_OP_DESCH,
- * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
- * CVMX_POW_TAG_OP_*_NSCHED
- */
- uint64_t type:3;
- /*
- * The actual tag. tag is used for everything except
- * CVMX_POW_TAG_OP_DESCH,
- * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
- * CVMX_POW_TAG_OP_*_NSCHED
- */
- uint64_t tag:32;
- } s;
-} cvmx_pow_tag_req_t;
-
-/**
- * This structure describes the address to load stuff from POW
- */
-typedef union {
- uint64_t u64;
-
- /**
- * Address for new work request loads (did<2:0> == 0)
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 0 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_4_39:36;
- /*
- * If set, don't return load response until work is
- * available.
- */
- uint64_t wait:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } swork;
-
- /**
- * Address for loads to get POW internal status
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 1 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_10_39:30;
- /* The core id to get status for */
- uint64_t coreid:4;
- /*
- * If set and get_cur is set, return reverse tag-list
- * pointer rather than forward tag-list pointer.
- */
- uint64_t get_rev:1;
- /*
- * If set, return current status rather than pending
- * status.
- */
- uint64_t get_cur:1;
- /*
- * If set, get the work-queue pointer rather than
- * tag/type.
- */
- uint64_t get_wqp:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } sstatus;
-
- /**
- * Address for memory loads to get POW internal state
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 2 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_16_39:24;
- /* POW memory index */
- uint64_t index:11;
- /*
- * If set, return deschedule information rather than
- * the standard response for work-queue index (invalid
- * if the work-queue entry is not on the deschedule
- * list).
- */
- uint64_t get_des:1;
- /*
- * If set, get the work-queue pointer rather than
- * tag/type (no effect when get_des set).
- */
- uint64_t get_wqp:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } smemload;
-
- /**
- * Address for index/pointer loads
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 3 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_9_39:31;
- /*
- * when {get_rmt ==0 AND get_des_get_tail == 0}, this
- * field selects one of eight POW internal-input
- * queues (0-7), one per QOS level; values 8-15 are
- * illegal in this case; when {get_rmt ==0 AND
- * get_des_get_tail == 1}, this field selects one of
- * 16 deschedule lists (per group); when get_rmt ==1,
- * this field selects one of 16 memory-input queue
- * lists. The two memory-input queue lists associated
- * with each QOS level are:
- *
- * - qosgrp = 0, qosgrp = 8: QOS0
- * - qosgrp = 1, qosgrp = 9: QOS1
- * - qosgrp = 2, qosgrp = 10: QOS2
- * - qosgrp = 3, qosgrp = 11: QOS3
- * - qosgrp = 4, qosgrp = 12: QOS4
- * - qosgrp = 5, qosgrp = 13: QOS5
- * - qosgrp = 6, qosgrp = 14: QOS6
- * - qosgrp = 7, qosgrp = 15: QOS7
- */
- uint64_t qosgrp:4;
- /*
- * If set and get_rmt is clear, return deschedule list
- * indexes rather than indexes for the specified qos
- * level; if set and get_rmt is set, return the tail
- * pointer rather than the head pointer for the
- * specified qos level.
- */
- uint64_t get_des_get_tail:1;
- /*
- * If set, return remote pointers rather than the
- * local indexes for the specified qos level.
- */
- uint64_t get_rmt:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } sindexload;
-
- /**
- * address for NULL_RD request (did<2:0> == 4) when this is read,
- * HW attempts to change the state to NULL if it is NULL_NULL (the
- * hardware cannot switch from NULL_NULL to NULL if a POW entry is
- * not available - software may need to recover by finishing
- * another piece of work before a POW entry can ever become
- * available.)
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 4 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_0_39:40;
- } snull_rd;
-} cvmx_pow_load_addr_t;
-
-/**
- * This structure defines the response to a load/SENDSINGLE to POW
- * (except CSR reads)
- */
-typedef union {
- uint64_t u64;
-
- /**
- * Response to new work request loads
- */
- struct {
- /*
- * Set when no new work queue entry was returned. *
- * If there was de-scheduled work, the HW will
- * definitely return it. When this bit is set, it
- * could mean either mean:
- *
- * - There was no work, or
- *
- * - There was no work that the HW could find. This
- * case can happen, regardless of the wait bit value
- * in the original request, when there is work in
- * the IQ's that is too deep down the list.
- */
- uint64_t no_work:1;
- /* Must be zero */
- uint64_t reserved_40_62:23;
- /* 36 in O1 -- the work queue pointer */
- uint64_t addr:40;
- } s_work;
-
- /**
- * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
- */
- struct {
- uint64_t reserved_62_63:2;
- /* Set when there is a pending non-NULL SWTAG or
- * SWTAG_FULL, and the POW entry has not left the list
- * for the original tag. */
- uint64_t pend_switch:1;
- /* Set when SWTAG_FULL and pend_switch is set. */
- uint64_t pend_switch_full:1;
- /*
- * Set when there is a pending NULL SWTAG, or an
- * implicit switch to NULL.
- */
- uint64_t pend_switch_null:1;
- /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
- uint64_t pend_desched:1;
- /*
- * Set when there is a pending SWTAG_DESCHED and
- * pend_desched is set.
- */
- uint64_t pend_desched_switch:1;
- /* Set when nosched is desired and pend_desched is set. */
- uint64_t pend_nosched:1;
- /* Set when there is a pending GET_WORK. */
- uint64_t pend_new_work:1;
- /*
- * When pend_new_work is set, this bit indicates that
- * the wait bit was set.
- */
- uint64_t pend_new_work_wait:1;
- /* Set when there is a pending NULL_RD. */
- uint64_t pend_null_rd:1;
- /* Set when there is a pending CLR_NSCHED. */
- uint64_t pend_nosched_clr:1;
- uint64_t reserved_51:1;
- /* This is the index when pend_nosched_clr is set. */
- uint64_t pend_index:11;
- /*
- * This is the new_grp when (pend_desched AND
- * pend_desched_switch) is set.
- */
- uint64_t pend_grp:4;
- uint64_t reserved_34_35:2;
- /*
- * This is the tag type when pend_switch or
- * (pend_desched AND pend_desched_switch) are set.
- */
- uint64_t pend_type:2;
- /*
- * - this is the tag when pend_switch or (pend_desched
- * AND pend_desched_switch) are set.
- */
- uint64_t pend_tag:32;
- } s_sstatus0;
-
- /**
- * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Set when there is a pending non-NULL SWTAG or
- * SWTAG_FULL, and the POW entry has not left the list
- * for the original tag.
- */
- uint64_t pend_switch:1;
- /* Set when SWTAG_FULL and pend_switch is set. */
- uint64_t pend_switch_full:1;
- /*
- * Set when there is a pending NULL SWTAG, or an
- * implicit switch to NULL.
- */
- uint64_t pend_switch_null:1;
- /*
- * Set when there is a pending DESCHED or
- * SWTAG_DESCHED.
- */
- uint64_t pend_desched:1;
- /*
- * Set when there is a pending SWTAG_DESCHED and
- * pend_desched is set.
- */
- uint64_t pend_desched_switch:1;
- /* Set when nosched is desired and pend_desched is set. */
- uint64_t pend_nosched:1;
- /* Set when there is a pending GET_WORK. */
- uint64_t pend_new_work:1;
- /*
- * When pend_new_work is set, this bit indicates that
- * the wait bit was set.
- */
- uint64_t pend_new_work_wait:1;
- /* Set when there is a pending NULL_RD. */
- uint64_t pend_null_rd:1;
- /* Set when there is a pending CLR_NSCHED. */
- uint64_t pend_nosched_clr:1;
- uint64_t reserved_51:1;
- /* This is the index when pend_nosched_clr is set. */
- uint64_t pend_index:11;
- /*
- * This is the new_grp when (pend_desched AND
- * pend_desched_switch) is set.
- */
- uint64_t pend_grp:4;
- /* This is the wqp when pend_nosched_clr is set. */
- uint64_t pend_wqp:36;
- } s_sstatus1;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
- * get_rev==0)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the next POW entry in the tag list when
- * tail == 0 (and tag_type is not NULL or NULL_NULL).
- */
- uint64_t link_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /*
- * Set when this POW entry is at the head of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t head:1;
- /*
- * Set when this POW entry is at the tail of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t tail:1;
- /*
- * The tag type attached to the core (updated when new
- * tag list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag_type:2;
- /*
- * The tag attached to the core (updated when new tag
- * list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag:32;
- } s_sstatus2;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the prior POW entry in the tag list when
- * head == 0 (and tag_type is not NULL or
- * NULL_NULL). This field is unpredictable when the
- * core's state is NULL or NULL_NULL.
- */
- uint64_t revlink_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /* Set when this POW entry is at the head of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t head:1;
- /*
- * Set when this POW entry is at the tail of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t tail:1;
- /*
- * The tag type attached to the core (updated when new
- * tag list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag_type:2;
- /*
- * The tag attached to the core (updated when new tag
- * list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag:32;
- } s_sstatus3;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
- * get_rev==0)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the next POW entry in the tag list when
- * tail == 0 (and tag_type is not NULL or NULL_NULL).
- */
- uint64_t link_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /*
- * The wqp attached to the core (updated when new tag
- * list entered on SWTAG_FULL).
- */
- uint64_t wqp:36;
- } s_sstatus4;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
- * get_rev==1)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the prior POW entry in the tag list when
- * head == 0 (and tag_type is not NULL or
- * NULL_NULL). This field is unpredictable when the
- * core's state is NULL or NULL_NULL.
- */
- uint64_t revlink_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /*
- * The wqp attached to the core (updated when new tag
- * list entered on SWTAG_FULL).
- */
- uint64_t wqp:36;
- } s_sstatus5;
-
- /**
- * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
- */
- struct {
- uint64_t reserved_51_63:13;
- /*
- * The next entry in the input, free, descheduled_head
- * list (unpredictable if entry is the tail of the
- * list).
- */
- uint64_t next_index:11;
- /* The group of the POW entry. */
- uint64_t grp:4;
- uint64_t reserved_35:1;
- /*
- * Set when this POW entry is at the tail of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t tail:1;
- /* The tag type of the POW entry. */
- uint64_t tag_type:2;
- /* The tag of the POW entry. */
- uint64_t tag:32;
- } s_smemload0;
-
- /**
- * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
- */
- struct {
- uint64_t reserved_51_63:13;
- /*
- * The next entry in the input, free, descheduled_head
- * list (unpredictable if entry is the tail of the
- * list).
- */
- uint64_t next_index:11;
- /* The group of the POW entry. */
- uint64_t grp:4;
- /* The WQP held in the POW entry. */
- uint64_t wqp:36;
- } s_smemload1;
-
- /**
- * Result For POW Memory Load (get_des == 1)
- */
- struct {
- uint64_t reserved_51_63:13;
- /*
- * The next entry in the tag list connected to the
- * descheduled head.
- */
- uint64_t fwd_index:11;
- /* The group of the POW entry. */
- uint64_t grp:4;
- /* The nosched bit for the POW entry. */
- uint64_t nosched:1;
- /* There is a pending tag switch */
- uint64_t pend_switch:1;
- /*
- * The next tag type for the new tag list when
- * pend_switch is set.
- */
- uint64_t pend_type:2;
- /*
- * The next tag for the new tag list when pend_switch
- * is set.
- */
- uint64_t pend_tag:32;
- } s_smemload2;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
- */
- struct {
- uint64_t reserved_52_63:12;
- /*
- * set when there is one or more POW entries on the
- * free list.
- */
- uint64_t free_val:1;
- /*
- * set when there is exactly one POW entry on the free
- * list.
- */
- uint64_t free_one:1;
- uint64_t reserved_49:1;
- /*
- * when free_val is set, indicates the first entry on
- * the free list.
- */
- uint64_t free_head:11;
- uint64_t reserved_37:1;
- /*
- * when free_val is set, indicates the last entry on
- * the free list.
- */
- uint64_t free_tail:11;
- /*
- * set when there is one or more POW entries on the
- * input Q list selected by qosgrp.
- */
- uint64_t loc_val:1;
- /*
- * set when there is exactly one POW entry on the
- * input Q list selected by qosgrp.
- */
- uint64_t loc_one:1;
- uint64_t reserved_23:1;
- /*
- * when loc_val is set, indicates the first entry on
- * the input Q list selected by qosgrp.
- */
- uint64_t loc_head:11;
- uint64_t reserved_11:1;
- /*
- * when loc_val is set, indicates the last entry on
- * the input Q list selected by qosgrp.
- */
- uint64_t loc_tail:11;
- } sindexload0;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
- */
- struct {
- uint64_t reserved_52_63:12;
- /*
- * set when there is one or more POW entries on the
- * nosched list.
- */
- uint64_t nosched_val:1;
- /*
- * set when there is exactly one POW entry on the
- * nosched list.
- */
- uint64_t nosched_one:1;
- uint64_t reserved_49:1;
- /*
- * when nosched_val is set, indicates the first entry
- * on the nosched list.
- */
- uint64_t nosched_head:11;
- uint64_t reserved_37:1;
- /*
- * when nosched_val is set, indicates the last entry
- * on the nosched list.
- */
- uint64_t nosched_tail:11;
- /*
- * set when there is one or more descheduled heads on
- * the descheduled list selected by qosgrp.
- */
- uint64_t des_val:1;
- /*
- * set when there is exactly one descheduled head on
- * the descheduled list selected by qosgrp.
- */
- uint64_t des_one:1;
- uint64_t reserved_23:1;
- /*
- * when des_val is set, indicates the first
- * descheduled head on the descheduled list selected
- * by qosgrp.
- */
- uint64_t des_head:11;
- uint64_t reserved_11:1;
- /*
- * when des_val is set, indicates the last descheduled
- * head on the descheduled list selected by qosgrp.
- */
- uint64_t des_tail:11;
- } sindexload1;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
- */
- struct {
- uint64_t reserved_39_63:25;
- /*
- * Set when this DRAM list is the current head
- * (i.e. is the next to be reloaded when the POW
- * hardware reloads a POW entry from DRAM). The POW
- * hardware alternates between the two DRAM lists
- * associated with a QOS level when it reloads work
- * from DRAM into the POW unit.
- */
- uint64_t rmt_is_head:1;
- /*
- * Set when the DRAM portion of the input Q list
- * selected by qosgrp contains one or more pieces of
- * work.
- */
- uint64_t rmt_val:1;
- /*
- * Set when the DRAM portion of the input Q list
- * selected by qosgrp contains exactly one piece of
- * work.
- */
- uint64_t rmt_one:1;
- /*
- * When rmt_val is set, indicates the first piece of
- * work on the DRAM input Q list selected by
- * qosgrp.
- */
- uint64_t rmt_head:36;
- } sindexload2;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt ==
- * 1/get_des_get_tail == 1)
- */
- struct {
- uint64_t reserved_39_63:25;
- /*
- * set when this DRAM list is the current head
- * (i.e. is the next to be reloaded when the POW
- * hardware reloads a POW entry from DRAM). The POW
- * hardware alternates between the two DRAM lists
- * associated with a QOS level when it reloads work
- * from DRAM into the POW unit.
- */
- uint64_t rmt_is_head:1;
- /*
- * set when the DRAM portion of the input Q list
- * selected by qosgrp contains one or more pieces of
- * work.
- */
- uint64_t rmt_val:1;
- /*
- * set when the DRAM portion of the input Q list
- * selected by qosgrp contains exactly one piece of
- * work.
- */
- uint64_t rmt_one:1;
- /*
- * when rmt_val is set, indicates the last piece of
- * work on the DRAM input Q list selected by
- * qosgrp.
- */
- uint64_t rmt_tail:36;
- } sindexload3;
-
- /**
- * Response to NULL_RD request loads
- */
- struct {
- uint64_t unused:62;
- /* of type cvmx_pow_tag_type_t. state is one of the
- * following:
- *
- * - CVMX_POW_TAG_TYPE_ORDERED
- * - CVMX_POW_TAG_TYPE_ATOMIC
- * - CVMX_POW_TAG_TYPE_NULL
- * - CVMX_POW_TAG_TYPE_NULL_NULL
- */
- uint64_t state:2;
- } s_null_rd;
-
-} cvmx_pow_tag_load_resp_t;
-
-/**
- * This structure describes the address used for stores to the POW.
- * The store address is meaningful on stores to the POW. The
- * hardware assumes that an aligned 64-bit store was used for all
- * these stores. Note the assumption that the work queue entry is
- * aligned on an 8-byte boundary (since the low-order 3 address bits
- * must be zero). Note that not all fields are used by all
- * operations.
- *
- * NOTE: The following is the behavior of the pending switch bit at the PP
- * for POW stores (i.e. when did<7:3> == 0xc)
- * - did<2:0> == 0 => pending switch bit is set
- * - did<2:0> == 1 => no affect on the pending switch bit
- * - did<2:0> == 3 => pending switch bit is cleared
- * - did<2:0> == 7 => no affect on the pending switch bit
- * - did<2:0> == others => must not be used
- * - No other loads/stores have an affect on the pending switch bit
- * - The switch bus from POW can clear the pending switch bit
- *
- * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
- * ADDWQ command that only contains the pointer). SW must never use
- * did<2:0> == 2.
- */
-typedef union {
- /**
- * Unsigned 64 bit integer representation of store address
- */
- uint64_t u64;
-
- struct {
- /* Memory region. Should be CVMX_IO_SEG in most cases */
- uint64_t mem_reg:2;
- uint64_t reserved_49_61:13; /* Must be zero */
- uint64_t is_io:1; /* Must be one */
- /* Device ID of POW. Note that different sub-dids are used. */
- uint64_t did:8;
- uint64_t reserved_36_39:4; /* Must be zero */
- /* Address field. addr<2:0> must be zero */
- uint64_t addr:36;
- } stag;
-} cvmx_pow_tag_store_addr_t;
-
-/**
- * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
- */
-typedef union {
- uint64_t u64;
-
- struct {
- /*
- * the (64-bit word) location in scratchpad to write
- * to (if len != 0)
- */
- uint64_t scraddr:8;
- /* the number of words in the response (0 => no response) */
- uint64_t len:8;
- /* the ID of the device on the non-coherent bus */
- uint64_t did:8;
- uint64_t unused:36;
- /* if set, don't return load response until work is available */
- uint64_t wait:1;
- uint64_t unused2:3;
- } s;
-
-} cvmx_pow_iobdma_store_t;
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Get the POW tag for this core. This returns the current
- * tag type, tag, group, and POW entry index associated with
- * this core. Index is only valid if the tag type isn't NULL_NULL.
- * If a tag switch is pending this routine returns the tag before
- * the tag switch, not after.
- *
- * Returns Current tag
- */
-static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
-{
- cvmx_pow_load_addr_t load_addr;
- cvmx_pow_tag_load_resp_t load_resp;
- cvmx_pow_tag_req_t result;
-
- load_addr.u64 = 0;
- load_addr.sstatus.mem_region = CVMX_IO_SEG;
- load_addr.sstatus.is_io = 1;
- load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
- load_addr.sstatus.coreid = cvmx_get_core_num();
- load_addr.sstatus.get_cur = 1;
- load_resp.u64 = cvmx_read_csr(load_addr.u64);
- result.u64 = 0;
- result.s.grp = load_resp.s_sstatus2.grp;
- result.s.index = load_resp.s_sstatus2.index;
- result.s.type = load_resp.s_sstatus2.tag_type;
- result.s.tag = load_resp.s_sstatus2.tag;
- return result;
-}
-
-/**
- * Get the POW WQE for this core. This returns the work queue
- * entry currently associated with this core.
- *
- * Returns WQE pointer
- */
-static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
-{
- cvmx_pow_load_addr_t load_addr;
- cvmx_pow_tag_load_resp_t load_resp;
-
- load_addr.u64 = 0;
- load_addr.sstatus.mem_region = CVMX_IO_SEG;
- load_addr.sstatus.is_io = 1;
- load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
- load_addr.sstatus.coreid = cvmx_get_core_num();
- load_addr.sstatus.get_cur = 1;
- load_addr.sstatus.get_wqp = 1;
- load_resp.u64 = cvmx_read_csr(load_addr.u64);
- return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
-}
-
-#ifndef CVMX_MF_CHORD
-#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
-#endif
-
-/**
- * Print a warning if a tag switch is pending for this core
- *
- * @function: Function name checking for a pending tag switch
- */
-static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
-{
- uint64_t switch_complete;
- CVMX_MF_CHORD(switch_complete);
- if (!switch_complete)
- pr_warning("%s called with tag switch in progress\n", function);
-}
-
-/**
- * Waits for a tag switch to complete by polling the completion bit.
- * Note that switches to NULL complete immediately and do not need
- * to be waited for.
- */
-static inline void cvmx_pow_tag_sw_wait(void)
-{
- const uint64_t MAX_CYCLES = 1ull << 31;
- uint64_t switch_complete;
- uint64_t start_cycle = cvmx_get_cycle();
- while (1) {
- CVMX_MF_CHORD(switch_complete);
- if (unlikely(switch_complete))
- break;
- if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
- pr_warning("Tag switch is taking a long time, "
- "possible deadlock\n");
- start_cycle = -MAX_CYCLES - 1;
- }
- }
-}
-
-/**
- * Synchronous work request. Requests work from the POW.
- * This function does NOT wait for previous tag switches to complete,
- * so the caller must ensure that there is not a pending tag switch.
- *
- * @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
- *
- * Returns Returns the WQE pointer from POW. Returns NULL if no work
- * was available.
- */
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
- wait)
-{
- cvmx_pow_load_addr_t ptr;
- cvmx_pow_tag_load_resp_t result;
-
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- ptr.u64 = 0;
- ptr.swork.mem_region = CVMX_IO_SEG;
- ptr.swork.is_io = 1;
- ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
- ptr.swork.wait = wait;
-
- result.u64 = cvmx_read_csr(ptr.u64);
-
- if (result.s_work.no_work)
- return NULL;
- else
- return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
-}
-
-/**
- * Synchronous work request. Requests work from the POW.
- * This function waits for any previous tag switch to complete before
- * requesting the new work.
- *
- * @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
- *
- * Returns Returns the WQE pointer from POW. Returns NULL if no work
- * was available.
- */
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Must not have a switch pending when requesting work */
- cvmx_pow_tag_sw_wait();
- return cvmx_pow_work_request_sync_nocheck(wait);
-
-}
-
-/**
- * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
- * This function waits for any previous tag switch to complete before
- * requesting the null_rd.
- *
- * Returns Returns the POW state of type cvmx_pow_tag_type_t.
- */
-static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
-{
- cvmx_pow_load_addr_t ptr;
- cvmx_pow_tag_load_resp_t result;
-
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Must not have a switch pending when requesting work */
- cvmx_pow_tag_sw_wait();
-
- ptr.u64 = 0;
- ptr.snull_rd.mem_region = CVMX_IO_SEG;
- ptr.snull_rd.is_io = 1;
- ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
-
- result.u64 = cvmx_read_csr(ptr.u64);
-
- return (enum cvmx_pow_tag_type) result.s_null_rd.state;
-}
-
-/**
- * Asynchronous work request. Work is requested from the POW unit,
- * and should later be checked with function
- * cvmx_pow_work_response_async. This function does NOT wait for
- * previous tag switches to complete, so the caller must ensure that
- * there is not a pending tag switch.
- *
- * @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
- *
- * @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
- */
-static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
- cvmx_pow_wait_t wait)
-{
- cvmx_pow_iobdma_store_t data;
-
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* scr_addr must be 8 byte aligned */
- data.s.scraddr = scr_addr >> 3;
- data.s.len = 1;
- data.s.did = CVMX_OCT_DID_TAG_SWTAG;
- data.s.wait = wait;
- cvmx_send_single(data.u64);
-}
-
-/**
- * Asynchronous work request. Work is requested from the POW unit,
- * and should later be checked with function
- * cvmx_pow_work_response_async. This function waits for any previous
- * tag switch to complete before requesting the new work.
- *
- * @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
- *
- * @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
- */
-static inline void cvmx_pow_work_request_async(int scr_addr,
- cvmx_pow_wait_t wait)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Must not have a switch pending when requesting work */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_work_request_async_nocheck(scr_addr, wait);
-}
-
-/**
- * Gets result of asynchronous work request. Performs a IOBDMA sync
- * to wait for the response.
- *
- * @scr_addr: Scratch memory address to get result from Byte address,
- * must be 8 byte aligned.
- *
- * Returns Returns the WQE from the scratch register, or NULL if no
- * work was available.
- */
-static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
-{
- cvmx_pow_tag_load_resp_t result;
-
- CVMX_SYNCIOBDMA;
- result.u64 = cvmx_scratch_read64(scr_addr);
-
- if (result.s_work.no_work)
- return NULL;
- else
- return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
-}
-
-/**
- * Checks if a work queue entry pointer returned by a work
- * request is valid. It may be invalid due to no work
- * being available or due to a timeout.
- *
- * @wqe_ptr: pointer to a work queue entry returned by the POW
- *
- * Returns 0 if pointer is valid
- * 1 if invalid (no work was returned)
- */
-static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
-{
- return wqe_ptr == NULL;
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * NOTE: This should not be used when switching from a NULL tag. Use
- * cvmx_pow_tag_sw_full() instead.
- *
- * This function does no checks, so the caller must ensure that any
- * previous tag switch has completed.
- *
- * @tag: new tag value
- * @tag_type: new tag type (ordered or atomic)
- */
-static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
- enum cvmx_pow_tag_type tag_type)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag\n", __func__);
- if ((current_tag.s.type == tag_type)
- && (current_tag.s.tag == tag))
- pr_warning("%s called to perform a tag switch to the "
- "same tag\n",
- __func__);
- if (tag_type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called to perform a tag switch to "
- "NULL. Use cvmx_pow_tag_sw_null() instead\n",
- __func__);
- }
-
- /*
- * Note that WQE in DRAM is not updated here, as the POW does
- * not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
- * responsibility to keep track of the current tag value if
- * that is important.
- */
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
- tag_req.s.tag = tag;
- tag_req.s.type = tag_type;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
-
- /* once this store arrives at POW, it will attempt the switch
- software must wait for the switch to complete separately */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * NOTE: This should not be used when switching from a NULL tag. Use
- * cvmx_pow_tag_sw_full() instead.
- *
- * This function waits for any previous tag switch to complete, and also
- * displays an error on tag switches to NULL.
- *
- * @tag: new tag value
- * @tag_type: new tag type (ordered or atomic)
- */
-static inline void cvmx_pow_tag_sw(uint32_t tag,
- enum cvmx_pow_tag_type tag_type)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /*
- * Note that WQE in DRAM is not updated here, as the POW does
- * not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
- * responsibility to keep track of the current tag value if
- * that is important.
- */
-
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_nocheck(tag, tag_type);
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * This function must be used for tag switches from NULL.
- *
- * This function does no checks, so the caller must ensure that any
- * previous tag switch has completed.
- *
- * @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
- * @tag: tag value to be assigned to work queue entry
- * @tag_type: type of tag
- * @group: group value for the work queue entry.
- */
-static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if ((current_tag.s.type == tag_type)
- && (current_tag.s.tag == tag))
- pr_warning("%s called to perform a tag switch to "
- "the same tag\n",
- __func__);
- if (tag_type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called to perform a tag switch to "
- "NULL. Use cvmx_pow_tag_sw_null() instead\n",
- __func__);
- if (wqp != cvmx_phys_to_ptr(0x80))
- if (wqp != cvmx_pow_get_current_wqp())
- pr_warning("%s passed WQE(%p) doesn't match "
- "the address in the POW(%p)\n",
- __func__, wqp,
- cvmx_pow_get_current_wqp());
- }
-
- /*
- * Note that WQE in DRAM is not updated here, as the POW does
- * not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
- * responsibility to keep track of the current tag value if
- * that is important.
- */
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
- tag_req.s.tag = tag;
- tag_req.s.type = tag_type;
- tag_req.s.grp = group;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
- ptr.sio.offset = CAST64(wqp);
-
- /*
- * once this store arrives at POW, it will attempt the switch
- * software must wait for the switch to complete separately.
- */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * This function must be used for tag switches from NULL.
- *
- * This function waits for any pending tag switches to complete
- * before requesting the tag switch.
- *
- * @wqp: pointer to work queue entry to submit. This entry is updated
- * to match the other parameters
- * @tag: tag value to be assigned to work queue entry
- * @tag_type: type of tag
- * @group: group value for the work queue entry.
- */
-static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
-}
-
-/**
- * Switch to a NULL tag, which ends any ordering or
- * synchronization provided by the POW for the current
- * work queue entry. This operation completes immediately,
- * so completion should not be waited for.
- * This function does NOT wait for previous tag switches to complete,
- * so the caller must ensure that any previous tag switches have completed.
- */
-static inline void cvmx_pow_tag_sw_null_nocheck(void)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called when we already have a "
- "NULL tag\n",
- __func__);
- }
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
- tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
-
- cvmx_write_io(ptr.u64, tag_req.u64);
-
- /* switch to NULL completes immediately */
-}
-
-/**
- * Switch to a NULL tag, which ends any ordering or
- * synchronization provided by the POW for the current
- * work queue entry. This operation completes immediately,
- * so completion should not be waited for.
- * This function waits for any pending tag switches to complete
- * before requesting the switch to NULL.
- */
-static inline void cvmx_pow_tag_sw_null(void)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_null_nocheck();
-
- /* switch to NULL completes immediately */
-}
-
-/**
- * Submits work to an input queue. This function updates the work
- * queue entry in DRAM to match the arguments given. Note that the
- * tag provided is for the work queue entry submitted, and is
- * unrelated to the tag that the core currently holds.
- *
- * @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
- * @tag: tag value to be assigned to work queue entry
- * @tag_type: type of tag
- * @qos: Input queue to add to.
- * @grp: group value for the work queue entry.
- */
-static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t qos, uint64_t grp)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- wqp->qos = qos;
- wqp->tag = tag;
- wqp->tag_type = tag_type;
- wqp->grp = grp;
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
- tag_req.s.type = tag_type;
- tag_req.s.tag = tag;
- tag_req.s.qos = qos;
- tag_req.s.grp = grp;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
- ptr.sio.offset = cvmx_ptr_to_phys(wqp);
-
- /*
- * SYNC write to memory before the work submit. This is
- * necessary as POW may read values from DRAM at this time.
- */
- CVMX_SYNCWS;
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * This function sets the group mask for a core. The group mask
- * indicates which groups each core will accept work from. There are
- * 16 groups.
- *
- * @core_num: core to apply mask to
- * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
- * representing groups 0-15.
- * Each 1 bit in the mask enables the core to accept work from
- * the corresponding group.
- */
-static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
-{
- union cvmx_pow_pp_grp_mskx grp_msk;
-
- grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
- grp_msk.s.grp_msk = mask;
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
-}
-
-/**
- * This function sets POW static priorities for a core. Each input queue has
- * an associated priority value.
- *
- * @core_num: core to apply priorities to
- * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
- * Highest priority is 0 and lowest is 7. A priority value
- * of 0xF instructs POW to skip the Input Queue when
- * scheduling to this specific core.
- * NOTE: priorities should not have gaps in values, meaning
- * {0,1,1,1,1,1,1,1} is a valid configuration while
- * {0,2,2,2,2,2,2,2} is not.
- */
-static inline void cvmx_pow_set_priority(uint64_t core_num,
- const uint8_t priority[])
-{
- /* POW priorities are supported on CN5xxx and later */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pow_pp_grp_mskx grp_msk;
-
- grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
- grp_msk.s.qos0_pri = priority[0];
- grp_msk.s.qos1_pri = priority[1];
- grp_msk.s.qos2_pri = priority[2];
- grp_msk.s.qos3_pri = priority[3];
- grp_msk.s.qos4_pri = priority[4];
- grp_msk.s.qos5_pri = priority[5];
- grp_msk.s.qos6_pri = priority[6];
- grp_msk.s.qos7_pri = priority[7];
-
- /* Detect gaps between priorities and flag error */
- {
- int i;
- uint32_t prio_mask = 0;
-
- for (i = 0; i < 8; i++)
- if (priority[i] != 0xF)
- prio_mask |= 1 << priority[i];
-
- if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
- pr_err("POW static priorities should be "
- "contiguous (0x%llx)\n",
- (unsigned long long)prio_mask);
- return;
- }
- }
-
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
- }
-}
-
-/**
- * Performs a tag switch and then an immediate deschedule. This completes
- * immediately, so completion must not be waited for. This function does NOT
- * update the wqe in DRAM to match arguments.
- *
- * This function does NOT wait for any prior tag switches to complete, so the
- * calling code must do this.
- *
- * Note the following CAVEAT of the Octeon HW behavior when
- * re-scheduling DE-SCHEDULEd items whose (next) state is
- * ORDERED:
- * - If there are no switches pending at the time that the
- * HW executes the de-schedule, the HW will only re-schedule
- * the head of the FIFO associated with the given tag. This
- * means that in many respects, the HW treats this ORDERED
- * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
- * case (to an ORDERED tag), the HW will do the switch
- * before the deschedule whenever it is possible to do
- * the switch immediately, so it may often look like
- * this case.
- * - If there is a pending switch to ORDERED at the time
- * the HW executes the de-schedule, the HW will perform
- * the switch at the time it re-schedules, and will be
- * able to reschedule any/all of the entries with the
- * same tag.
- * Due to this behavior, the RECOMMENDATION to software is
- * that they have a (next) state of ATOMIC when they
- * DE-SCHEDULE. If an ORDERED tag is what was really desired,
- * SW can choose to immediately switch to an ORDERED tag
- * after the work (that has an ATOMIC tag) is re-scheduled.
- * Note that since there are never any tag switches pending
- * when the HW re-schedules, this switch can be IMMEDIATE upon
- * the reception of the pointer during the re-schedule.
- *
- * @tag: New tag value
- * @tag_type: New tag type
- * @group: New group value
- * @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
- */
-static inline void cvmx_pow_tag_sw_desched_nocheck(
- uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group,
- uint64_t no_sched)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag. Deschedule not "
- "allowed from NULL state\n",
- __func__);
- if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
- && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
- pr_warning("%s called where neither the before or "
- "after tag is ATOMIC\n",
- __func__);
- }
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
- tag_req.s.tag = tag;
- tag_req.s.type = tag_type;
- tag_req.s.grp = group;
- tag_req.s.no_sched = no_sched;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
- /*
- * since TAG3 is used, this store will clear the local pending
- * switch bit.
- */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * Performs a tag switch and then an immediate deschedule. This completes
- * immediately, so completion must not be waited for. This function does NOT
- * update the wqe in DRAM to match arguments.
- *
- * This function waits for any prior tag switches to complete, so the
- * calling code may call this function with a pending tag switch.
- *
- * Note the following CAVEAT of the Octeon HW behavior when
- * re-scheduling DE-SCHEDULEd items whose (next) state is
- * ORDERED:
- * - If there are no switches pending at the time that the
- * HW executes the de-schedule, the HW will only re-schedule
- * the head of the FIFO associated with the given tag. This
- * means that in many respects, the HW treats this ORDERED
- * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
- * case (to an ORDERED tag), the HW will do the switch
- * before the deschedule whenever it is possible to do
- * the switch immediately, so it may often look like
- * this case.
- * - If there is a pending switch to ORDERED at the time
- * the HW executes the de-schedule, the HW will perform
- * the switch at the time it re-schedules, and will be
- * able to reschedule any/all of the entries with the
- * same tag.
- * Due to this behavior, the RECOMMENDATION to software is
- * that they have a (next) state of ATOMIC when they
- * DE-SCHEDULE. If an ORDERED tag is what was really desired,
- * SW can choose to immediately switch to an ORDERED tag
- * after the work (that has an ATOMIC tag) is re-scheduled.
- * Note that since there are never any tag switches pending
- * when the HW re-schedules, this switch can be IMMEDIATE upon
- * the reception of the pointer during the re-schedule.
- *
- * @tag: New tag value
- * @tag_type: New tag type
- * @group: New group value
- * @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
- */
-static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group, uint64_t no_sched)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Need to make sure any writes to the work queue entry are complete */
- CVMX_SYNCWS;
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
-}
-
-/**
- * Descchedules the current work queue entry.
- *
- * @no_sched: no schedule flag value to be set on the work queue
- * entry. If this is set the entry will not be
- * rescheduled.
- */
-static inline void cvmx_pow_desched(uint64_t no_sched)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag. Deschedule not "
- "expected from NULL state\n",
- __func__);
- }
-
- /* Need to make sure any writes to the work queue entry are complete */
- CVMX_SYNCWS;
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
- tag_req.s.no_sched = no_sched;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
- /*
- * since TAG3 is used, this store will clear the local pending
- * switch bit.
- */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/****************************************************
-* Define usage of bits within the 32 bit tag values.
-*****************************************************/
-
-/*
- * Number of bits of the tag used by software. The SW bits are always
- * a contiguous block of the high starting at bit 31. The hardware
- * bits are always the low bits. By default, the top 8 bits of the
- * tag are reserved for software, and the low 24 are set by the IPD
- * unit.
- */
-#define CVMX_TAG_SW_BITS (8)
-#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
-
-/* Below is the list of values for the top 8 bits of the tag. */
-/*
- * Tag values with top byte of this value are reserved for internal
- * executive uses.
- */
-#define CVMX_TAG_SW_BITS_INTERNAL 0x1
-/* The executive divides the remaining 24 bits as follows:
- * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
- *
- * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
- * with the subgroup
- *
- * Note that this section describes the format of tags generated by
- * software - refer to the hardware documentation for a description of
- * the tags values generated by the packet input hardware. Subgroups
- * are defined here.
- */
-/* Mask for the value portion of the tag */
-#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
-#define CVMX_TAG_SUBGROUP_SHIFT 16
-#define CVMX_TAG_SUBGROUP_PKO 0x1
-
-/* End of executive tag subgroup definitions */
-
-/*
- * The remaining values software bit values 0x2 - 0xff are available
- * for application use.
- */
-
-/**
- * This function creates a 32 bit tag value from the two values provided.
- *
- * @sw_bits: The upper bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * hw_bits parameter.
- *
- * @hw_bits: The lower bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * sw_bits parameter.
- *
- * Returns 32 bit value of the combined hw and sw bits.
- */
-static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
-{
- return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
- CVMX_TAG_SW_SHIFT) |
- (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
-}
-
-/**
- * Extracts the bits allocated for software use from the tag
- *
- * @tag: 32 bit tag value
- *
- * Returns N bit software tag value, where N is configurable with the
- * CVMX_TAG_SW_BITS define
- */
-static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
-{
- return (tag >> (32 - CVMX_TAG_SW_BITS)) &
- cvmx_build_mask(CVMX_TAG_SW_BITS);
-}
-
-/**
- *
- * Extracts the bits allocated for hardware use from the tag
- *
- * @tag: 32 bit tag value
- *
- * Returns (32 - N) bit software tag value, where N is configurable
- * with the CVMX_TAG_SW_BITS define
- */
-static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
-{
- return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
-}
-
-/**
- * Store the current POW internal state into the supplied
- * buffer. It is recommended that you pass a buffer of at least
- * 128KB. The format of the capture may change based on SDK
- * version and Octeon chip.
- *
- * @buffer: Buffer to store capture into
- * @buffer_size:
- * The size of the supplied buffer
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_pow_capture(void *buffer, int buffer_size);
-
-/**
- * Dump a POW capture to the console in a human readable format.
- *
- * @buffer: POW capture from cvmx_pow_capture()
- * @buffer_size:
- * Size of the buffer
- */
-extern void cvmx_pow_display(void *buffer, int buffer_size);
-
-/**
- * Return the number of POW entries supported by this chip
- *
- * Returns Number of POW entries
- */
-extern int cvmx_pow_get_num_entries(void);
-
-#endif /* __CVMX_POW_H__ */
diff --git a/drivers/staging/octeon/cvmx-scratch.h b/drivers/staging/octeon/cvmx-scratch.h
deleted file mode 100644
index 96b70cfd6245..000000000000
--- a/drivers/staging/octeon/cvmx-scratch.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * This file provides support for the processor local scratch memory.
- * Scratch memory is byte addressable - all addresses are byte addresses.
- *
- */
-
-#ifndef __CVMX_SCRATCH_H__
-#define __CVMX_SCRATCH_H__
-
-/*
- * Note: This define must be a long, not a long long in order to
- * compile without warnings for both 32bit and 64bit.
- */
-#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
-
-/**
- * Reads an 8 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint8_t cvmx_scratch_read8(uint64_t address)
-{
- return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Reads a 16 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint16_t cvmx_scratch_read16(uint64_t address)
-{
- return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Reads a 32 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint32_t cvmx_scratch_read32(uint64_t address)
-{
- return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Reads a 64 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint64_t cvmx_scratch_read64(uint64_t address)
-{
- return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Writes an 8 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) =
- (uint8_t) value;
-}
-
-/**
- * Writes a 32 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) =
- (uint16_t) value;
-}
-
-/**
- * Writes a 16 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) =
- (uint32_t) value;
-}
-
-/**
- * Writes a 64 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
-}
-
-#endif /* __CVMX_SCRATCH_H__ */
diff --git a/drivers/staging/octeon/cvmx-smix-defs.h b/drivers/staging/octeon/cvmx-smix-defs.h
deleted file mode 100644
index 9ae45fcbe3e3..000000000000
--- a/drivers/staging/octeon/cvmx-smix-defs.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SMIX_DEFS_H__
-#define __CVMX_SMIX_DEFS_H__
-
-#define CVMX_SMIX_CLK(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_CMD(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_EN(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_RD_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_WR_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
-
-union cvmx_smix_clk {
- uint64_t u64;
- struct cvmx_smix_clk_s {
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t sample_mode:1;
- uint64_t reserved_14_14:1;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } s;
- struct cvmx_smix_clk_cn30xx {
- uint64_t reserved_21_63:43;
- uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } cn30xx;
- struct cvmx_smix_clk_cn30xx cn31xx;
- struct cvmx_smix_clk_cn30xx cn38xx;
- struct cvmx_smix_clk_cn30xx cn38xxp2;
- struct cvmx_smix_clk_cn50xx {
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } cn50xx;
- struct cvmx_smix_clk_s cn52xx;
- struct cvmx_smix_clk_cn50xx cn52xxp1;
- struct cvmx_smix_clk_s cn56xx;
- struct cvmx_smix_clk_cn50xx cn56xxp1;
- struct cvmx_smix_clk_cn30xx cn58xx;
- struct cvmx_smix_clk_cn30xx cn58xxp1;
-};
-
-union cvmx_smix_cmd {
- uint64_t u64;
- struct cvmx_smix_cmd_s {
- uint64_t reserved_18_63:46;
- uint64_t phy_op:2;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
- } s;
- struct cvmx_smix_cmd_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t phy_op:1;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
- } cn30xx;
- struct cvmx_smix_cmd_cn30xx cn31xx;
- struct cvmx_smix_cmd_cn30xx cn38xx;
- struct cvmx_smix_cmd_cn30xx cn38xxp2;
- struct cvmx_smix_cmd_s cn50xx;
- struct cvmx_smix_cmd_s cn52xx;
- struct cvmx_smix_cmd_s cn52xxp1;
- struct cvmx_smix_cmd_s cn56xx;
- struct cvmx_smix_cmd_s cn56xxp1;
- struct cvmx_smix_cmd_cn30xx cn58xx;
- struct cvmx_smix_cmd_cn30xx cn58xxp1;
-};
-
-union cvmx_smix_en {
- uint64_t u64;
- struct cvmx_smix_en_s {
- uint64_t reserved_1_63:63;
- uint64_t en:1;
- } s;
- struct cvmx_smix_en_s cn30xx;
- struct cvmx_smix_en_s cn31xx;
- struct cvmx_smix_en_s cn38xx;
- struct cvmx_smix_en_s cn38xxp2;
- struct cvmx_smix_en_s cn50xx;
- struct cvmx_smix_en_s cn52xx;
- struct cvmx_smix_en_s cn52xxp1;
- struct cvmx_smix_en_s cn56xx;
- struct cvmx_smix_en_s cn56xxp1;
- struct cvmx_smix_en_s cn58xx;
- struct cvmx_smix_en_s cn58xxp1;
-};
-
-union cvmx_smix_rd_dat {
- uint64_t u64;
- struct cvmx_smix_rd_dat_s {
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
- } s;
- struct cvmx_smix_rd_dat_s cn30xx;
- struct cvmx_smix_rd_dat_s cn31xx;
- struct cvmx_smix_rd_dat_s cn38xx;
- struct cvmx_smix_rd_dat_s cn38xxp2;
- struct cvmx_smix_rd_dat_s cn50xx;
- struct cvmx_smix_rd_dat_s cn52xx;
- struct cvmx_smix_rd_dat_s cn52xxp1;
- struct cvmx_smix_rd_dat_s cn56xx;
- struct cvmx_smix_rd_dat_s cn56xxp1;
- struct cvmx_smix_rd_dat_s cn58xx;
- struct cvmx_smix_rd_dat_s cn58xxp1;
-};
-
-union cvmx_smix_wr_dat {
- uint64_t u64;
- struct cvmx_smix_wr_dat_s {
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
- } s;
- struct cvmx_smix_wr_dat_s cn30xx;
- struct cvmx_smix_wr_dat_s cn31xx;
- struct cvmx_smix_wr_dat_s cn38xx;
- struct cvmx_smix_wr_dat_s cn38xxp2;
- struct cvmx_smix_wr_dat_s cn50xx;
- struct cvmx_smix_wr_dat_s cn52xx;
- struct cvmx_smix_wr_dat_s cn52xxp1;
- struct cvmx_smix_wr_dat_s cn56xx;
- struct cvmx_smix_wr_dat_s cn56xxp1;
- struct cvmx_smix_wr_dat_s cn58xx;
- struct cvmx_smix_wr_dat_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-spi.c b/drivers/staging/octeon/cvmx-spi.c
deleted file mode 100644
index 82794d920cec..000000000000
--- a/drivers/staging/octeon/cvmx-spi.c
+++ /dev/null
@@ -1,667 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Support library for the SPI
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-pko.h"
-#include "cvmx-spi.h"
-
-#include "cvmx-spxx-defs.h"
-#include "cvmx-stxx-defs.h"
-#include "cvmx-srxx-defs.h"
-
-#define INVOKE_CB(function_p, args...) \
- do { \
- if (function_p) { \
- res = function_p(args); \
- if (res) \
- return res; \
- } \
- } while (0)
-
-#if CVMX_ENABLE_DEBUG_PRINTS
-static const char *modes[] =
- { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
-#endif
-
-/* Default callbacks, can be overridden
- * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
- */
-static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
- .reset_cb = cvmx_spi_reset_cb,
- .calendar_setup_cb = cvmx_spi_calendar_setup_cb,
- .clock_detect_cb = cvmx_spi_clock_detect_cb,
- .training_cb = cvmx_spi_training_cb,
- .calendar_sync_cb = cvmx_spi_calendar_sync_cb,
- .interface_up_cb = cvmx_spi_interface_up_cb
-};
-
-/**
- * Get current SPI4 initialization callbacks
- *
- * @callbacks: Pointer to the callbacks structure.to fill
- *
- * Returns Pointer to cvmx_spi_callbacks_t structure.
- */
-void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
-{
- memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
-}
-
-/**
- * Set new SPI4 initialization callbacks
- *
- * @new_callbacks: Pointer to an updated callbacks structure.
- */
-void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
-{
- memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
-}
-
-/**
- * Initialize and start the SPI interface.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- * @num_ports: Number of SPI ports to configure
- *
- * Returns Zero on success, negative of failure.
- */
-int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
- int num_ports)
-{
- int res = -1;
-
- if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
- return res;
-
- /* Callback to perform SPI4 reset */
- INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
-
- /* Callback to perform calendar setup */
- INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
- num_ports);
-
- /* Callback to perform clock detection */
- INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
-
- /* Callback to perform SPI4 link training */
- INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
-
- /* Callback to perform calendar sync */
- INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
- timeout);
-
- /* Callback to handle interface coming up */
- INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
-
- return res;
-}
-
-/**
- * This routine restarts the SPI interface after it has lost synchronization
- * with its correspondent system.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- *
- * Returns Zero on success, negative of failure.
- */
-int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- int res = -1;
-
- if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
- return res;
-
- cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
-
- /* Callback to perform SPI4 reset */
- INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
-
- /* NOTE: Calendar setup is not performed during restart */
- /* Refer to cvmx_spi_start_interface() for the full sequence */
-
- /* Callback to perform clock detection */
- INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
-
- /* Callback to perform SPI4 link training */
- INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
-
- /* Callback to perform calendar sync */
- INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
- timeout);
-
- /* Callback to handle interface coming up */
- INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
-
- return res;
-}
-
-/**
- * Callback to perform SPI4 reset
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
-{
- union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
- union cvmx_spxx_clk_ctl spxx_clk_ctl;
- union cvmx_spxx_bist_stat spxx_bist_stat;
- union cvmx_spxx_int_msk spxx_int_msk;
- union cvmx_stxx_int_msk stxx_int_msk;
- union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
- int index;
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
-
- /* Disable SPI error events while we run BIST */
- spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
- stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
-
- /* Run BIST in the SPI interface */
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
- cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
- spxx_clk_ctl.u64 = 0;
- spxx_clk_ctl.s.runbist = 1;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(10 * MS);
- spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
- if (spxx_bist_stat.s.stat0)
- cvmx_dprintf
- ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
- interface);
- if (spxx_bist_stat.s.stat1)
- cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
- interface);
- if (spxx_bist_stat.s.stat2)
- cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
- interface);
-
- /* Clear the calendar table after BIST to fix parity errors */
- for (index = 0; index < 32; index++) {
- union cvmx_srxx_spi4_calx srxx_spi4_calx;
- union cvmx_stxx_spi4_calx stxx_spi4_calx;
-
- srxx_spi4_calx.u64 = 0;
- srxx_spi4_calx.s.oddpar = 1;
- cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
- srxx_spi4_calx.u64);
-
- stxx_spi4_calx.u64 = 0;
- stxx_spi4_calx.s.oddpar = 1;
- cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
- stxx_spi4_calx.u64);
- }
-
- /* Re enable reporting of error interrupts */
- cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
- cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
- cvmx_write_csr(CVMX_STXX_INT_REG(interface),
- cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
-
- /* Setup the CLKDLY right in the middle */
- spxx_clk_ctl.u64 = 0;
- spxx_clk_ctl.s.seetrn = 0;
- spxx_clk_ctl.s.clkdly = 0x10;
- spxx_clk_ctl.s.runbist = 0;
- spxx_clk_ctl.s.statdrv = 0;
- /* This should always be on the opposite edge as statdrv */
- spxx_clk_ctl.s.statrcv = 1;
- spxx_clk_ctl.s.sndtrn = 0;
- spxx_clk_ctl.s.drptrn = 0;
- spxx_clk_ctl.s.rcvtrn = 0;
- spxx_clk_ctl.s.srxdlck = 0;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(100 * MS);
-
- /* Reset SRX0 DLL */
- spxx_clk_ctl.s.srxdlck = 1;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
-
- /* Waiting for Inf0 Spi4 RX DLL to lock */
- cvmx_wait(100 * MS);
-
- /* Enable dynamic alignment */
- spxx_trn4_ctl.s.trntest = 0;
- spxx_trn4_ctl.s.jitter = 1;
- spxx_trn4_ctl.s.clr_boot = 1;
- spxx_trn4_ctl.s.set_boot = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN58XX))
- spxx_trn4_ctl.s.maxdist = 3;
- else
- spxx_trn4_ctl.s.maxdist = 8;
- spxx_trn4_ctl.s.macro_en = 1;
- spxx_trn4_ctl.s.mux_en = 1;
- cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
-
- spxx_dbg_deskew_ctl.u64 = 0;
- cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
- spxx_dbg_deskew_ctl.u64);
-
- return 0;
-}
-
-/**
- * Callback to setup calendar and miscellaneous settings before clock detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @num_ports: Number of ports to configure on SPI
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
- int num_ports)
-{
- int port;
- int index;
- if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
- union cvmx_srxx_com_ctl srxx_com_ctl;
- union cvmx_srxx_spi4_stat srxx_spi4_stat;
-
- /* SRX0 number of Ports */
- srxx_com_ctl.u64 = 0;
- srxx_com_ctl.s.prts = num_ports - 1;
- srxx_com_ctl.s.st_en = 0;
- srxx_com_ctl.s.inf_en = 0;
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
-
- /* SRX0 Calendar Table. This round robbins through all ports */
- port = 0;
- index = 0;
- while (port < num_ports) {
- union cvmx_srxx_spi4_calx srxx_spi4_calx;
- srxx_spi4_calx.u64 = 0;
- srxx_spi4_calx.s.prt0 = port++;
- srxx_spi4_calx.s.prt1 = port++;
- srxx_spi4_calx.s.prt2 = port++;
- srxx_spi4_calx.s.prt3 = port++;
- srxx_spi4_calx.s.oddpar =
- ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
- cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
- srxx_spi4_calx.u64);
- index++;
- }
- srxx_spi4_stat.u64 = 0;
- srxx_spi4_stat.s.len = num_ports;
- srxx_spi4_stat.s.m = 1;
- cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
- srxx_spi4_stat.u64);
- }
-
- if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
- union cvmx_stxx_arb_ctl stxx_arb_ctl;
- union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
- union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
- union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
- union cvmx_stxx_spi4_stat stxx_spi4_stat;
- union cvmx_stxx_spi4_dat stxx_spi4_dat;
-
- /* STX0 Config */
- stxx_arb_ctl.u64 = 0;
- stxx_arb_ctl.s.igntpa = 0;
- stxx_arb_ctl.s.mintrn = 0;
- cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
-
- gmxx_tx_spi_max.u64 = 0;
- gmxx_tx_spi_max.s.max1 = 8;
- gmxx_tx_spi_max.s.max2 = 4;
- gmxx_tx_spi_max.s.slice = 0;
- cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
- gmxx_tx_spi_max.u64);
-
- gmxx_tx_spi_thresh.u64 = 0;
- gmxx_tx_spi_thresh.s.thresh = 4;
- cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
- gmxx_tx_spi_thresh.u64);
-
- gmxx_tx_spi_ctl.u64 = 0;
- gmxx_tx_spi_ctl.s.tpa_clr = 0;
- gmxx_tx_spi_ctl.s.cont_pkt = 0;
- cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
- gmxx_tx_spi_ctl.u64);
-
- /* STX0 Training Control */
- stxx_spi4_dat.u64 = 0;
- /*Minimum needed by dynamic alignment */
- stxx_spi4_dat.s.alpha = 32;
- stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
- cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
- stxx_spi4_dat.u64);
-
- /* STX0 Calendar Table. This round robbins through all ports */
- port = 0;
- index = 0;
- while (port < num_ports) {
- union cvmx_stxx_spi4_calx stxx_spi4_calx;
- stxx_spi4_calx.u64 = 0;
- stxx_spi4_calx.s.prt0 = port++;
- stxx_spi4_calx.s.prt1 = port++;
- stxx_spi4_calx.s.prt2 = port++;
- stxx_spi4_calx.s.prt3 = port++;
- stxx_spi4_calx.s.oddpar =
- ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
- cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
- stxx_spi4_calx.u64);
- index++;
- }
- stxx_spi4_stat.u64 = 0;
- stxx_spi4_stat.s.len = num_ports;
- stxx_spi4_stat.s.m = 1;
- cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
- stxx_spi4_stat.u64);
- }
-
- return 0;
-}
-
-/**
- * Callback to perform clock detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- int clock_transitions;
- union cvmx_spxx_clk_stat stat;
- uint64_t timeout_time;
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
-
- /*
- * Regardless of operating mode, both Tx and Rx clocks must be
- * present for the SPI interface to operate.
- */
- cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
- timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- /*
- * Require 100 clock transitions in order to avoid any noise
- * in the beginning.
- */
- clock_transitions = 100;
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
- /*
- * We've seen a clock transition, so decrement
- * the number we still need.
- */
- clock_transitions--;
- cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
- stat.s.s4clk0 = 0;
- stat.s.s4clk1 = 0;
- }
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
-
- cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
- timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- /*
- * Require 100 clock transitions in order to avoid any noise in the
- * beginning.
- */
- clock_transitions = 100;
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
- /*
- * We've seen a clock transition, so decrement
- * the number we still need
- */
- clock_transitions--;
- cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
- stat.s.d4clk0 = 0;
- stat.s.d4clk1 = 0;
- }
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
-
- return 0;
-}
-
-/**
- * Callback to perform link training
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for link to be trained (in seconds)
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
- union cvmx_spxx_clk_stat stat;
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
- uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- int rx_training_needed;
-
- /* SRX0 & STX0 Inf0 Links are configured - begin training */
- union cvmx_spxx_clk_ctl spxx_clk_ctl;
- spxx_clk_ctl.u64 = 0;
- spxx_clk_ctl.s.seetrn = 0;
- spxx_clk_ctl.s.clkdly = 0x10;
- spxx_clk_ctl.s.runbist = 0;
- spxx_clk_ctl.s.statdrv = 0;
- /* This should always be on the opposite edge as statdrv */
- spxx_clk_ctl.s.statrcv = 1;
- spxx_clk_ctl.s.sndtrn = 1;
- spxx_clk_ctl.s.drptrn = 1;
- spxx_clk_ctl.s.rcvtrn = 1;
- spxx_clk_ctl.s.srxdlck = 1;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(1000 * MS);
-
- /* SRX0 clear the boot bit */
- spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
- spxx_trn4_ctl.s.clr_boot = 1;
- cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
-
- /* Wait for the training sequence to complete */
- cvmx_dprintf("SPI%d: Waiting for training\n", interface);
- cvmx_wait(1000 * MS);
- /* Wait a really long time here */
- timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
- /*
- * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
- * We'll be pessimistic and wait for a lot more.
- */
- rx_training_needed = 500;
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (stat.s.srxtrn && rx_training_needed) {
- rx_training_needed--;
- cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
- stat.s.srxtrn = 0;
- }
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.srxtrn == 0);
-
- return 0;
-}
-
-/**
- * Callback to perform calendar data synchronization
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for calendar data in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
- if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
- /* SRX0 interface should be good, send calendar data */
- union cvmx_srxx_com_ctl srxx_com_ctl;
- cvmx_dprintf
- ("SPI%d: Rx is synchronized, start sending calendar data\n",
- interface);
- srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
- srxx_com_ctl.s.inf_en = 1;
- srxx_com_ctl.s.st_en = 1;
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
- }
-
- if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
- /* STX0 has achieved sync */
- /* The corespondant board should be sending calendar data */
- /* Enable the STX0 STAT receiver. */
- union cvmx_spxx_clk_stat stat;
- uint64_t timeout_time;
- union cvmx_stxx_com_ctl stxx_com_ctl;
- stxx_com_ctl.u64 = 0;
- stxx_com_ctl.s.st_en = 1;
- cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
-
- /* Waiting for calendar sync on STX0 STAT */
- cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
- interface, interface);
- timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.stxcal == 0);
- }
-
- return 0;
-}
-
-/**
- * Callback to handle interface up
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
-{
- union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
- union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
- union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
-
- if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
- union cvmx_srxx_com_ctl srxx_com_ctl;
- srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
- srxx_com_ctl.s.inf_en = 1;
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
- cvmx_dprintf("SPI%d: Rx is now up\n", interface);
- }
-
- if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
- union cvmx_stxx_com_ctl stxx_com_ctl;
- stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
- stxx_com_ctl.s.inf_en = 1;
- cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
- cvmx_dprintf("SPI%d: Tx is now up\n", interface);
- }
-
- gmxx_rxx_frm_min.u64 = 0;
- gmxx_rxx_frm_min.s.len = 64;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
- gmxx_rxx_frm_min.u64);
- gmxx_rxx_frm_max.u64 = 0;
- gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
- gmxx_rxx_frm_max.u64);
- gmxx_rxx_jabber.u64 = 0;
- gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
-
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-spi.h b/drivers/staging/octeon/cvmx-spi.h
deleted file mode 100644
index e814648953a5..000000000000
--- a/drivers/staging/octeon/cvmx-spi.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * This file contains defines for the SPI interface
- */
-#ifndef __CVMX_SPI_H__
-#define __CVMX_SPI_H__
-
-#include "cvmx-gmxx-defs.h"
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-typedef enum {
- CVMX_SPI_MODE_UNKNOWN = 0,
- CVMX_SPI_MODE_TX_HALFPLEX = 1,
- CVMX_SPI_MODE_RX_HALFPLEX = 2,
- CVMX_SPI_MODE_DUPLEX = 3
-} cvmx_spi_mode_t;
-
-/** Callbacks structure to customize SPI4 initialization sequence */
-typedef struct {
- /** Called to reset SPI4 DLL */
- int (*reset_cb) (int interface, cvmx_spi_mode_t mode);
-
- /** Called to setup calendar */
- int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode,
- int num_ports);
-
- /** Called for Tx and Rx clock detection */
- int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode,
- int timeout);
-
- /** Called to perform link training */
- int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout);
-
- /** Called for calendar data synchronization */
- int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode,
- int timeout);
-
- /** Called when interface is up */
- int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode);
-
-} cvmx_spi_callbacks_t;
-
-/**
- * Return true if the supplied interface is configured for SPI
- *
- * @interface: Interface to check
- * Returns True if interface is SPI
- */
-static inline int cvmx_spi_is_spi_interface(int interface)
-{
- uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
- return (gmxState & 0x2) && (gmxState & 0x1);
-}
-
-/**
- * Initialize and start the SPI interface.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- * @num_ports: Number of SPI ports to configure
- *
- * Returns Zero on success, negative of failure.
- */
-extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
- int timeout, int num_ports);
-
-/**
- * This routine restarts the SPI interface after it has lost synchronization
- * with its corespondant system.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- * Returns Zero on success, negative of failure.
- */
-extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Return non-zero if the SPI interface has a SPI4000 attached
- *
- * @interface: SPI interface the SPI4000 is connected to
- *
- * Returns
- */
-static inline int cvmx_spi4000_is_present(int interface)
-{
- return 0;
-}
-
-/**
- * Initialize the SPI4000 for use
- *
- * @interface: SPI interface the SPI4000 is connected to
- */
-static inline int cvmx_spi4000_initialize(int interface)
-{
- return 0;
-}
-
-/**
- * Poll all the SPI4000 port and check its speed
- *
- * @interface: Interface the SPI4000 is on
- * @port: Port to poll (0-9)
- * Returns Status of the port. 0=down. All other values the port is up.
- */
-static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
- int interface,
- int port)
-{
- union cvmx_gmxx_rxx_rx_inbnd r;
- r.u64 = 0;
- return r;
-}
-
-/**
- * Get current SPI4 initialization callbacks
- *
- * @callbacks: Pointer to the callbacks structure.to fill
- *
- * Returns Pointer to cvmx_spi_callbacks_t structure.
- */
-extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks);
-
-/**
- * Set new SPI4 initialization callbacks
- *
- * @new_callbacks: Pointer to an updated callbacks structure.
- */
-extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
-
-/**
- * Callback to perform SPI4 reset
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
-
-/**
- * Callback to setup calendar and miscellaneous settings before clock
- * detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @num_ports: Number of ports to configure on SPI
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
- int num_ports);
-
-/**
- * Callback to perform clock detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Callback to perform link training
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for link to be trained (in seconds)
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Callback to perform calendar data synchronization
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for calendar data in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Callback to handle interface up
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
-
-#endif /* __CVMX_SPI_H__ */
diff --git a/drivers/staging/octeon/cvmx-spxx-defs.h b/drivers/staging/octeon/cvmx-spxx-defs.h
deleted file mode 100644
index b16940e32c83..000000000000
--- a/drivers/staging/octeon/cvmx-spxx-defs.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SPXX_DEFS_H__
-#define __CVMX_SPXX_DEFS_H__
-
-#define CVMX_SPXX_BCKPRS_CNT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000340ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_BIST_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x00011800900007F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_CLK_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000348ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_CLK_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000350ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000368ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000370ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_DRV_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000358ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_ERR_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000320ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_DAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000318ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_MSK(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000308ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000300ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_SYNC(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000310ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TPA_ACC(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000338ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TPA_MAX(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000330ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TPA_SEL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000328ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TRN4_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000360ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_spxx_bckprs_cnt {
- uint64_t u64;
- struct cvmx_spxx_bckprs_cnt_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_spxx_bckprs_cnt_s cn38xx;
- struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
- struct cvmx_spxx_bckprs_cnt_s cn58xx;
- struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
-};
-
-union cvmx_spxx_bist_stat {
- uint64_t u64;
- struct cvmx_spxx_bist_stat_s {
- uint64_t reserved_3_63:61;
- uint64_t stat2:1;
- uint64_t stat1:1;
- uint64_t stat0:1;
- } s;
- struct cvmx_spxx_bist_stat_s cn38xx;
- struct cvmx_spxx_bist_stat_s cn38xxp2;
- struct cvmx_spxx_bist_stat_s cn58xx;
- struct cvmx_spxx_bist_stat_s cn58xxp1;
-};
-
-union cvmx_spxx_clk_ctl {
- uint64_t u64;
- struct cvmx_spxx_clk_ctl_s {
- uint64_t reserved_17_63:47;
- uint64_t seetrn:1;
- uint64_t reserved_12_15:4;
- uint64_t clkdly:5;
- uint64_t runbist:1;
- uint64_t statdrv:1;
- uint64_t statrcv:1;
- uint64_t sndtrn:1;
- uint64_t drptrn:1;
- uint64_t rcvtrn:1;
- uint64_t srxdlck:1;
- } s;
- struct cvmx_spxx_clk_ctl_s cn38xx;
- struct cvmx_spxx_clk_ctl_s cn38xxp2;
- struct cvmx_spxx_clk_ctl_s cn58xx;
- struct cvmx_spxx_clk_ctl_s cn58xxp1;
-};
-
-union cvmx_spxx_clk_stat {
- uint64_t u64;
- struct cvmx_spxx_clk_stat_s {
- uint64_t reserved_11_63:53;
- uint64_t stxcal:1;
- uint64_t reserved_9_9:1;
- uint64_t srxtrn:1;
- uint64_t s4clk1:1;
- uint64_t s4clk0:1;
- uint64_t d4clk1:1;
- uint64_t d4clk0:1;
- uint64_t reserved_0_3:4;
- } s;
- struct cvmx_spxx_clk_stat_s cn38xx;
- struct cvmx_spxx_clk_stat_s cn38xxp2;
- struct cvmx_spxx_clk_stat_s cn58xx;
- struct cvmx_spxx_clk_stat_s cn58xxp1;
-};
-
-union cvmx_spxx_dbg_deskew_ctl {
- uint64_t u64;
- struct cvmx_spxx_dbg_deskew_ctl_s {
- uint64_t reserved_30_63:34;
- uint64_t fallnop:1;
- uint64_t fall8:1;
- uint64_t reserved_26_27:2;
- uint64_t sstep_go:1;
- uint64_t sstep:1;
- uint64_t reserved_22_23:2;
- uint64_t clrdly:1;
- uint64_t dec:1;
- uint64_t inc:1;
- uint64_t mux:1;
- uint64_t offset:5;
- uint64_t bitsel:5;
- uint64_t offdly:6;
- uint64_t dllfrc:1;
- uint64_t dlldis:1;
- } s;
- struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
- struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
- struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
- struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
-};
-
-union cvmx_spxx_dbg_deskew_state {
- uint64_t u64;
- struct cvmx_spxx_dbg_deskew_state_s {
- uint64_t reserved_9_63:55;
- uint64_t testres:1;
- uint64_t unxterm:1;
- uint64_t muxsel:2;
- uint64_t offset:5;
- } s;
- struct cvmx_spxx_dbg_deskew_state_s cn38xx;
- struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
- struct cvmx_spxx_dbg_deskew_state_s cn58xx;
- struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
-};
-
-union cvmx_spxx_drv_ctl {
- uint64_t u64;
- struct cvmx_spxx_drv_ctl_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_spxx_drv_ctl_cn38xx {
- uint64_t reserved_16_63:48;
- uint64_t stx4ncmp:4;
- uint64_t stx4pcmp:4;
- uint64_t srx4cmp:8;
- } cn38xx;
- struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
- struct cvmx_spxx_drv_ctl_cn58xx {
- uint64_t reserved_24_63:40;
- uint64_t stx4ncmp:4;
- uint64_t stx4pcmp:4;
- uint64_t reserved_10_15:6;
- uint64_t srx4cmp:10;
- } cn58xx;
- struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
-};
-
-union cvmx_spxx_err_ctl {
- uint64_t u64;
- struct cvmx_spxx_err_ctl_s {
- uint64_t reserved_9_63:55;
- uint64_t prtnxa:1;
- uint64_t dipcls:1;
- uint64_t dippay:1;
- uint64_t reserved_4_5:2;
- uint64_t errcnt:4;
- } s;
- struct cvmx_spxx_err_ctl_s cn38xx;
- struct cvmx_spxx_err_ctl_s cn38xxp2;
- struct cvmx_spxx_err_ctl_s cn58xx;
- struct cvmx_spxx_err_ctl_s cn58xxp1;
-};
-
-union cvmx_spxx_int_dat {
- uint64_t u64;
- struct cvmx_spxx_int_dat_s {
- uint64_t reserved_32_63:32;
- uint64_t mul:1;
- uint64_t reserved_14_30:17;
- uint64_t calbnk:2;
- uint64_t rsvop:4;
- uint64_t prt:8;
- } s;
- struct cvmx_spxx_int_dat_s cn38xx;
- struct cvmx_spxx_int_dat_s cn38xxp2;
- struct cvmx_spxx_int_dat_s cn58xx;
- struct cvmx_spxx_int_dat_s cn58xxp1;
-};
-
-union cvmx_spxx_int_msk {
- uint64_t u64;
- struct cvmx_spxx_int_msk_s {
- uint64_t reserved_12_63:52;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
- struct cvmx_spxx_int_msk_s cn38xx;
- struct cvmx_spxx_int_msk_s cn38xxp2;
- struct cvmx_spxx_int_msk_s cn58xx;
- struct cvmx_spxx_int_msk_s cn58xxp1;
-};
-
-union cvmx_spxx_int_reg {
- uint64_t u64;
- struct cvmx_spxx_int_reg_s {
- uint64_t reserved_32_63:32;
- uint64_t spf:1;
- uint64_t reserved_12_30:19;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
- struct cvmx_spxx_int_reg_s cn38xx;
- struct cvmx_spxx_int_reg_s cn38xxp2;
- struct cvmx_spxx_int_reg_s cn58xx;
- struct cvmx_spxx_int_reg_s cn58xxp1;
-};
-
-union cvmx_spxx_int_sync {
- uint64_t u64;
- struct cvmx_spxx_int_sync_s {
- uint64_t reserved_12_63:52;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
- struct cvmx_spxx_int_sync_s cn38xx;
- struct cvmx_spxx_int_sync_s cn38xxp2;
- struct cvmx_spxx_int_sync_s cn58xx;
- struct cvmx_spxx_int_sync_s cn58xxp1;
-};
-
-union cvmx_spxx_tpa_acc {
- uint64_t u64;
- struct cvmx_spxx_tpa_acc_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_spxx_tpa_acc_s cn38xx;
- struct cvmx_spxx_tpa_acc_s cn38xxp2;
- struct cvmx_spxx_tpa_acc_s cn58xx;
- struct cvmx_spxx_tpa_acc_s cn58xxp1;
-};
-
-union cvmx_spxx_tpa_max {
- uint64_t u64;
- struct cvmx_spxx_tpa_max_s {
- uint64_t reserved_32_63:32;
- uint64_t max:32;
- } s;
- struct cvmx_spxx_tpa_max_s cn38xx;
- struct cvmx_spxx_tpa_max_s cn38xxp2;
- struct cvmx_spxx_tpa_max_s cn58xx;
- struct cvmx_spxx_tpa_max_s cn58xxp1;
-};
-
-union cvmx_spxx_tpa_sel {
- uint64_t u64;
- struct cvmx_spxx_tpa_sel_s {
- uint64_t reserved_4_63:60;
- uint64_t prtsel:4;
- } s;
- struct cvmx_spxx_tpa_sel_s cn38xx;
- struct cvmx_spxx_tpa_sel_s cn38xxp2;
- struct cvmx_spxx_tpa_sel_s cn58xx;
- struct cvmx_spxx_tpa_sel_s cn58xxp1;
-};
-
-union cvmx_spxx_trn4_ctl {
- uint64_t u64;
- struct cvmx_spxx_trn4_ctl_s {
- uint64_t reserved_13_63:51;
- uint64_t trntest:1;
- uint64_t jitter:3;
- uint64_t clr_boot:1;
- uint64_t set_boot:1;
- uint64_t maxdist:5;
- uint64_t macro_en:1;
- uint64_t mux_en:1;
- } s;
- struct cvmx_spxx_trn4_ctl_s cn38xx;
- struct cvmx_spxx_trn4_ctl_s cn38xxp2;
- struct cvmx_spxx_trn4_ctl_s cn58xx;
- struct cvmx_spxx_trn4_ctl_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-srxx-defs.h b/drivers/staging/octeon/cvmx-srxx-defs.h
deleted file mode 100644
index d82b366c279f..000000000000
--- a/drivers/staging/octeon/cvmx-srxx-defs.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SRXX_DEFS_H__
-#define __CVMX_SRXX_DEFS_H__
-
-#define CVMX_SRXX_COM_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000200ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_IGN_RX_FULL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000218ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SPI4_CALX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000000ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SPI4_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000208ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SW_TICK_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000220ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SW_TICK_DAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000228ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_srxx_com_ctl {
- uint64_t u64;
- struct cvmx_srxx_com_ctl_s {
- uint64_t reserved_8_63:56;
- uint64_t prts:4;
- uint64_t st_en:1;
- uint64_t reserved_1_2:2;
- uint64_t inf_en:1;
- } s;
- struct cvmx_srxx_com_ctl_s cn38xx;
- struct cvmx_srxx_com_ctl_s cn38xxp2;
- struct cvmx_srxx_com_ctl_s cn58xx;
- struct cvmx_srxx_com_ctl_s cn58xxp1;
-};
-
-union cvmx_srxx_ign_rx_full {
- uint64_t u64;
- struct cvmx_srxx_ign_rx_full_s {
- uint64_t reserved_16_63:48;
- uint64_t ignore:16;
- } s;
- struct cvmx_srxx_ign_rx_full_s cn38xx;
- struct cvmx_srxx_ign_rx_full_s cn38xxp2;
- struct cvmx_srxx_ign_rx_full_s cn58xx;
- struct cvmx_srxx_ign_rx_full_s cn58xxp1;
-};
-
-union cvmx_srxx_spi4_calx {
- uint64_t u64;
- struct cvmx_srxx_spi4_calx_s {
- uint64_t reserved_17_63:47;
- uint64_t oddpar:1;
- uint64_t prt3:4;
- uint64_t prt2:4;
- uint64_t prt1:4;
- uint64_t prt0:4;
- } s;
- struct cvmx_srxx_spi4_calx_s cn38xx;
- struct cvmx_srxx_spi4_calx_s cn38xxp2;
- struct cvmx_srxx_spi4_calx_s cn58xx;
- struct cvmx_srxx_spi4_calx_s cn58xxp1;
-};
-
-union cvmx_srxx_spi4_stat {
- uint64_t u64;
- struct cvmx_srxx_spi4_stat_s {
- uint64_t reserved_16_63:48;
- uint64_t m:8;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
- } s;
- struct cvmx_srxx_spi4_stat_s cn38xx;
- struct cvmx_srxx_spi4_stat_s cn38xxp2;
- struct cvmx_srxx_spi4_stat_s cn58xx;
- struct cvmx_srxx_spi4_stat_s cn58xxp1;
-};
-
-union cvmx_srxx_sw_tick_ctl {
- uint64_t u64;
- struct cvmx_srxx_sw_tick_ctl_s {
- uint64_t reserved_14_63:50;
- uint64_t eop:1;
- uint64_t sop:1;
- uint64_t mod:4;
- uint64_t opc:4;
- uint64_t adr:4;
- } s;
- struct cvmx_srxx_sw_tick_ctl_s cn38xx;
- struct cvmx_srxx_sw_tick_ctl_s cn58xx;
- struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
-};
-
-union cvmx_srxx_sw_tick_dat {
- uint64_t u64;
- struct cvmx_srxx_sw_tick_dat_s {
- uint64_t dat:64;
- } s;
- struct cvmx_srxx_sw_tick_dat_s cn38xx;
- struct cvmx_srxx_sw_tick_dat_s cn58xx;
- struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-stxx-defs.h b/drivers/staging/octeon/cvmx-stxx-defs.h
deleted file mode 100644
index 4f209b62cae1..000000000000
--- a/drivers/staging/octeon/cvmx-stxx-defs.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_STXX_DEFS_H__
-#define __CVMX_STXX_DEFS_H__
-
-#define CVMX_STXX_ARB_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000608ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_BCKPRS_CNT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000688ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_COM_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000600ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_DIP_CNT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000690ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_IGN_CAL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000610ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_INT_MSK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800900006A0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000698ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_INT_SYNC(block_id) \
- CVMX_ADD_IO_SEG(0x00011800900006A8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_MIN_BST(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000618ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_SPI4_CALX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000400ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_SPI4_DAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000628ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_SPI4_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000630ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_BYTES_HI(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000648ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_BYTES_LO(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000680ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000638ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_PKT_XMT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000640ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_stxx_arb_ctl {
- uint64_t u64;
- struct cvmx_stxx_arb_ctl_s {
- uint64_t reserved_6_63:58;
- uint64_t mintrn:1;
- uint64_t reserved_4_4:1;
- uint64_t igntpa:1;
- uint64_t reserved_0_2:3;
- } s;
- struct cvmx_stxx_arb_ctl_s cn38xx;
- struct cvmx_stxx_arb_ctl_s cn38xxp2;
- struct cvmx_stxx_arb_ctl_s cn58xx;
- struct cvmx_stxx_arb_ctl_s cn58xxp1;
-};
-
-union cvmx_stxx_bckprs_cnt {
- uint64_t u64;
- struct cvmx_stxx_bckprs_cnt_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_bckprs_cnt_s cn38xx;
- struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
- struct cvmx_stxx_bckprs_cnt_s cn58xx;
- struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
-};
-
-union cvmx_stxx_com_ctl {
- uint64_t u64;
- struct cvmx_stxx_com_ctl_s {
- uint64_t reserved_4_63:60;
- uint64_t st_en:1;
- uint64_t reserved_1_2:2;
- uint64_t inf_en:1;
- } s;
- struct cvmx_stxx_com_ctl_s cn38xx;
- struct cvmx_stxx_com_ctl_s cn38xxp2;
- struct cvmx_stxx_com_ctl_s cn58xx;
- struct cvmx_stxx_com_ctl_s cn58xxp1;
-};
-
-union cvmx_stxx_dip_cnt {
- uint64_t u64;
- struct cvmx_stxx_dip_cnt_s {
- uint64_t reserved_8_63:56;
- uint64_t frmmax:4;
- uint64_t dipmax:4;
- } s;
- struct cvmx_stxx_dip_cnt_s cn38xx;
- struct cvmx_stxx_dip_cnt_s cn38xxp2;
- struct cvmx_stxx_dip_cnt_s cn58xx;
- struct cvmx_stxx_dip_cnt_s cn58xxp1;
-};
-
-union cvmx_stxx_ign_cal {
- uint64_t u64;
- struct cvmx_stxx_ign_cal_s {
- uint64_t reserved_16_63:48;
- uint64_t igntpa:16;
- } s;
- struct cvmx_stxx_ign_cal_s cn38xx;
- struct cvmx_stxx_ign_cal_s cn38xxp2;
- struct cvmx_stxx_ign_cal_s cn58xx;
- struct cvmx_stxx_ign_cal_s cn58xxp1;
-};
-
-union cvmx_stxx_int_msk {
- uint64_t u64;
- struct cvmx_stxx_int_msk_s {
- uint64_t reserved_8_63:56;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
- struct cvmx_stxx_int_msk_s cn38xx;
- struct cvmx_stxx_int_msk_s cn38xxp2;
- struct cvmx_stxx_int_msk_s cn58xx;
- struct cvmx_stxx_int_msk_s cn58xxp1;
-};
-
-union cvmx_stxx_int_reg {
- uint64_t u64;
- struct cvmx_stxx_int_reg_s {
- uint64_t reserved_9_63:55;
- uint64_t syncerr:1;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
- struct cvmx_stxx_int_reg_s cn38xx;
- struct cvmx_stxx_int_reg_s cn38xxp2;
- struct cvmx_stxx_int_reg_s cn58xx;
- struct cvmx_stxx_int_reg_s cn58xxp1;
-};
-
-union cvmx_stxx_int_sync {
- uint64_t u64;
- struct cvmx_stxx_int_sync_s {
- uint64_t reserved_8_63:56;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
- struct cvmx_stxx_int_sync_s cn38xx;
- struct cvmx_stxx_int_sync_s cn38xxp2;
- struct cvmx_stxx_int_sync_s cn58xx;
- struct cvmx_stxx_int_sync_s cn58xxp1;
-};
-
-union cvmx_stxx_min_bst {
- uint64_t u64;
- struct cvmx_stxx_min_bst_s {
- uint64_t reserved_9_63:55;
- uint64_t minb:9;
- } s;
- struct cvmx_stxx_min_bst_s cn38xx;
- struct cvmx_stxx_min_bst_s cn38xxp2;
- struct cvmx_stxx_min_bst_s cn58xx;
- struct cvmx_stxx_min_bst_s cn58xxp1;
-};
-
-union cvmx_stxx_spi4_calx {
- uint64_t u64;
- struct cvmx_stxx_spi4_calx_s {
- uint64_t reserved_17_63:47;
- uint64_t oddpar:1;
- uint64_t prt3:4;
- uint64_t prt2:4;
- uint64_t prt1:4;
- uint64_t prt0:4;
- } s;
- struct cvmx_stxx_spi4_calx_s cn38xx;
- struct cvmx_stxx_spi4_calx_s cn38xxp2;
- struct cvmx_stxx_spi4_calx_s cn58xx;
- struct cvmx_stxx_spi4_calx_s cn58xxp1;
-};
-
-union cvmx_stxx_spi4_dat {
- uint64_t u64;
- struct cvmx_stxx_spi4_dat_s {
- uint64_t reserved_32_63:32;
- uint64_t alpha:16;
- uint64_t max_t:16;
- } s;
- struct cvmx_stxx_spi4_dat_s cn38xx;
- struct cvmx_stxx_spi4_dat_s cn38xxp2;
- struct cvmx_stxx_spi4_dat_s cn58xx;
- struct cvmx_stxx_spi4_dat_s cn58xxp1;
-};
-
-union cvmx_stxx_spi4_stat {
- uint64_t u64;
- struct cvmx_stxx_spi4_stat_s {
- uint64_t reserved_16_63:48;
- uint64_t m:8;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
- } s;
- struct cvmx_stxx_spi4_stat_s cn38xx;
- struct cvmx_stxx_spi4_stat_s cn38xxp2;
- struct cvmx_stxx_spi4_stat_s cn58xx;
- struct cvmx_stxx_spi4_stat_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_bytes_hi {
- uint64_t u64;
- struct cvmx_stxx_stat_bytes_hi_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_stat_bytes_hi_s cn38xx;
- struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
- struct cvmx_stxx_stat_bytes_hi_s cn58xx;
- struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_bytes_lo {
- uint64_t u64;
- struct cvmx_stxx_stat_bytes_lo_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_stat_bytes_lo_s cn38xx;
- struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
- struct cvmx_stxx_stat_bytes_lo_s cn58xx;
- struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_ctl {
- uint64_t u64;
- struct cvmx_stxx_stat_ctl_s {
- uint64_t reserved_5_63:59;
- uint64_t clr:1;
- uint64_t bckprs:4;
- } s;
- struct cvmx_stxx_stat_ctl_s cn38xx;
- struct cvmx_stxx_stat_ctl_s cn38xxp2;
- struct cvmx_stxx_stat_ctl_s cn58xx;
- struct cvmx_stxx_stat_ctl_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_pkt_xmt {
- uint64_t u64;
- struct cvmx_stxx_stat_pkt_xmt_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
- struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
- struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
- struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-wqe.h b/drivers/staging/octeon/cvmx-wqe.h
deleted file mode 100644
index 653610953d28..000000000000
--- a/drivers/staging/octeon/cvmx-wqe.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * This header file defines the work queue entry (wqe) data structure.
- * Since this is a commonly used structure that depends on structures
- * from several hardware blocks, those definitions have been placed
- * in this file to create a single point of definition of the wqe
- * format.
- * Data structures are still named according to the block that they
- * relate to.
- *
- */
-
-#ifndef __CVMX_WQE_H__
-#define __CVMX_WQE_H__
-
-#include "cvmx-packet.h"
-
-
-#define OCT_TAG_TYPE_STRING(x) \
- (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
- (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
- (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
- "NULL_NULL")))
-
-/**
- * HW decode / err_code in work queue entry
- */
-typedef union {
- uint64_t u64;
-
- /* Use this struct if the hardware determines that the packet is IP */
- struct {
- /* HW sets this to the number of buffers used by this packet */
- uint64_t bufs:8;
- /* HW sets to the number of L2 bytes prior to the IP */
- uint64_t ip_offset:8;
- /* set to 1 if we found DSA/VLAN in the L2 */
- uint64_t vlan_valid:1;
- /* Set to 1 if the DSA/VLAN tag is stacked */
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- /* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
- uint64_t vlan_cfi:1;
- /* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
- uint64_t vlan_id:12;
- /* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
- uint64_t pr:4;
- uint64_t unassigned2:8;
- /* the packet needs to be decompressed */
- uint64_t dec_ipcomp:1;
- /* the packet is either TCP or UDP */
- uint64_t tcp_or_udp:1;
- /* the packet needs to be decrypted (ESP or AH) */
- uint64_t dec_ipsec:1;
- /* the packet is IPv6 */
- uint64_t is_v6:1;
-
- /*
- * (rcv_error, not_IP, IP_exc, is_frag, L4_error,
- * software, etc.).
- */
-
- /*
- * reserved for software use, hardware will clear on
- * packet creation.
- */
- uint64_t software:1;
- /* exceptional conditions below */
- /* the receive interface hardware detected an L4 error
- * (only applies if !is_frag) (only applies if
- * !rcv_error && !not_IP && !IP_exc && !is_frag)
- * failure indicated in err_code below, decode:
- *
- * - 1 = Malformed L4
- * - 2 = L4 Checksum Error: the L4 checksum value is
- * - 3 = UDP Length Error: The UDP length field would
- * make the UDP data longer than what remains in
- * the IP packet (as defined by the IP header
- * length field).
- * - 4 = Bad L4 Port: either the source or destination
- * TCP/UDP port is 0.
- * - 8 = TCP FIN Only: the packet is TCP and only the
- * FIN flag set.
- * - 9 = TCP No Flags: the packet is TCP and no flags
- * are set.
- * - 10 = TCP FIN RST: the packet is TCP and both FIN
- * and RST are set.
- * - 11 = TCP SYN URG: the packet is TCP and both SYN
- * and URG are set.
- * - 12 = TCP SYN RST: the packet is TCP and both SYN
- * and RST are set.
- * - 13 = TCP SYN FIN: the packet is TCP and both SYN
- * and FIN are set.
- */
- uint64_t L4_error:1;
- /* set if the packet is a fragment */
- uint64_t is_frag:1;
- /* the receive interface hardware detected an IP error
- * / exception (only applies if !rcv_error && !not_IP)
- * failure indicated in err_code below, decode:
- *
- * - 1 = Not IP: the IP version field is neither 4 nor
- * 6.
- * - 2 = IPv4 Header Checksum Error: the IPv4 header
- * has a checksum violation.
- * - 3 = IP Malformed Header: the packet is not long
- * enough to contain the IP header.
- * - 4 = IP Malformed: the packet is not long enough
- * to contain the bytes indicated by the IP
- * header. Pad is allowed.
- * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
- * Hop Count field are zero.
- * - 6 = IP Options
- */
- uint64_t IP_exc:1;
- /*
- * Set if the hardware determined that the packet is a
- * broadcast.
- */
- uint64_t is_bcast:1;
- /*
- * St if the hardware determined that the packet is a
- * multi-cast.
- */
- uint64_t is_mcast:1;
- /*
- * Set if the packet may not be IP (must be zero in
- * this case).
- */
- uint64_t not_IP:1;
- /*
- * The receive interface hardware detected a receive
- * error (must be zero in this case).
- */
- uint64_t rcv_error:1;
- /* lower err_code = first-level descriptor of the
- * work */
- /* zero for packet submitted by hardware that isn't on
- * the slow path */
- /* type is cvmx_pip_err_t */
- uint64_t err_code:8;
- } s;
-
- /* use this to get at the 16 vlan bits */
- struct {
- uint64_t unused1:16;
- uint64_t vlan:16;
- uint64_t unused2:32;
- } svlan;
-
- /*
- * use this struct if the hardware could not determine that
- * the packet is ip.
- */
- struct {
- /*
- * HW sets this to the number of buffers used by this
- * packet.
- */
- uint64_t bufs:8;
- uint64_t unused:8;
- /* set to 1 if we found DSA/VLAN in the L2 */
- uint64_t vlan_valid:1;
- /* Set to 1 if the DSA/VLAN tag is stacked */
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- /*
- * HW sets to the DSA/VLAN CFI flag (valid when
- * vlan_valid)
- */
- uint64_t vlan_cfi:1;
- /*
- * HW sets to the DSA/VLAN_ID field (valid when
- * vlan_valid).
- */
- uint64_t vlan_id:12;
- /*
- * Ring Identifier (if PCIe). Requires
- * PIP_GBL_CTL[RING_EN]=1
- */
- uint64_t pr:4;
- uint64_t unassigned2:12;
- /*
- * reserved for software use, hardware will clear on
- * packet creation.
- */
- uint64_t software:1;
- uint64_t unassigned3:1;
- /*
- * set if the hardware determined that the packet is
- * rarp.
- */
- uint64_t is_rarp:1;
- /*
- * set if the hardware determined that the packet is
- * arp
- */
- uint64_t is_arp:1;
- /*
- * set if the hardware determined that the packet is a
- * broadcast.
- */
- uint64_t is_bcast:1;
- /*
- * set if the hardware determined that the packet is a
- * multi-cast
- */
- uint64_t is_mcast:1;
- /*
- * set if the packet may not be IP (must be one in
- * this case)
- */
- uint64_t not_IP:1;
- /* The receive interface hardware detected a receive
- * error. Failure indicated in err_code below,
- * decode:
- *
- * - 1 = partial error: a packet was partially
- * received, but internal buffering / bandwidth
- * was not adequate to receive the entire
- * packet.
- * - 2 = jabber error: the RGMII packet was too large
- * and is truncated.
- * - 3 = overrun error: the RGMII packet is longer
- * than allowed and had an FCS error.
- * - 4 = oversize error: the RGMII packet is longer
- * than allowed.
- * - 5 = alignment error: the RGMII packet is not an
- * integer number of bytes
- * and had an FCS error (100M and 10M only).
- * - 6 = fragment error: the RGMII packet is shorter
- * than allowed and had an FCS error.
- * - 7 = GMX FCS error: the RGMII packet had an FCS
- * error.
- * - 8 = undersize error: the RGMII packet is shorter
- * than allowed.
- * - 9 = extend error: the RGMII packet had an extend
- * error.
- * - 10 = length mismatch error: the RGMII packet had
- * a length that did not match the length field
- * in the L2 HDR.
- * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
- * packet had one or more data reception errors
- * (RXERR) or the SPI4 packet had one or more
- * DIP4 errors.
- * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
- * packet was not large enough to cover the
- * skipped bytes or the SPI4 packet was
- * terminated with an About EOPS.
- * - 13 = RGMII nibble error/SPI4 Port NXA Error: the
- * RGMII packet had a studder error (data not
- * repeated - 10/100M only) or the SPI4 packet
- * was sent to an NXA.
- * - 16 = FCS error: a SPI4.2 packet had an FCS error.
- * - 17 = Skip error: a packet was not large enough to
- * cover the skipped bytes.
- * - 18 = L2 header malformed: the packet is not long
- * enough to contain the L2.
- */
-
- uint64_t rcv_error:1;
- /*
- * lower err_code = first-level descriptor of the
- * work
- */
- /*
- * zero for packet submitted by hardware that isn't on
- * the slow path
- */
- /* type is cvmx_pip_err_t (union, so can't use directly */
- uint64_t err_code:8;
- } snoip;
-
-} cvmx_pip_wqe_word2;
-
-/**
- * Work queue entry format
- *
- * must be 8-byte aligned
- */
-typedef struct {
-
- /*****************************************************************
- * WORD 0
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
- */
-
- /**
- * raw chksum result generated by the HW
- */
- uint16_t hw_chksum;
- /**
- * Field unused by hardware - available for software
- */
- uint8_t unused;
- /**
- * Next pointer used by hardware for list maintenance.
- * May be written/read by HW before the work queue
- * entry is scheduled to a PP
- * (Only 36 bits used in Octeon 1)
- */
- uint64_t next_ptr:40;
-
- /*****************************************************************
- * WORD 1
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
- */
-
- /**
- * HW sets to the total number of bytes in the packet
- */
- uint64_t len:16;
- /**
- * HW sets this to input physical port
- */
- uint64_t ipprt:6;
-
- /**
- * HW sets this to what it thought the priority of the input packet was
- */
- uint64_t qos:3;
-
- /**
- * the group that the work queue entry will be scheduled to
- */
- uint64_t grp:4;
- /**
- * the type of the tag (ORDERED, ATOMIC, NULL)
- */
- uint64_t tag_type:3;
- /**
- * the synchronization/ordering tag
- */
- uint64_t tag:32;
-
- /**
- * WORD 2 HW WRITE: the following 64-bits are filled in by
- * hardware when a packet arrives This indicates a variety of
- * status and error conditions.
- */
- cvmx_pip_wqe_word2 word2;
-
- /**
- * Pointer to the first segment of the packet.
- */
- union cvmx_buf_ptr packet_ptr;
-
- /**
- * HW WRITE: octeon will fill in a programmable amount from the
- * packet, up to (at most, but perhaps less) the amount
- * needed to fill the work queue entry to 128 bytes
- *
- * If the packet is recognized to be IP, the hardware starts
- * (except that the IPv4 header is padded for appropriate
- * alignment) writing here where the IP header starts. If the
- * packet is not recognized to be IP, the hardware starts
- * writing the beginning of the packet here.
- */
- uint8_t packet_data[96];
-
- /**
- * If desired, SW can make the work Q entry any length. For the
- * purposes of discussion here, Assume 128B always, as this is all that
- * the hardware deals with.
- *
- */
-
-} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
-
-#endif /* __CVMX_WQE_H__ */
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 6a2cd50a17df..bdaec8d2ca0c 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -59,7 +59,7 @@
#ifndef __ETHERNET_DEFINES_H__
#define __ETHERNET_DEFINES_H__
-#include "cvmx-config.h"
+#include <asm/octeon/cvmx-config.h>
#define OCTEON_ETHERNET_VERSION "1.9"
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index f18e3e140413..63800ba71d06 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -38,9 +38,9 @@
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include "cvmx-helper-board.h"
+#include <asm/octeon/cvmx-helper-board.h>
-#include "cvmx-smix-defs.h"
+#include <asm/octeon/cvmx-smix-defs.h>
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 635bb86cdcff..78b6cb743769 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -32,7 +32,7 @@
#include "ethernet-defines.h"
-#include "cvmx-fpa.h"
+#include <asm/octeon/cvmx-fpa.h>
/**
* cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index c3d73f8431ae..d8f5f694ec35 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -37,11 +37,11 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-npi-defs.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
DEFINE_SPINLOCK(global_register_lock);
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 8b307b428791..400df8cbee53 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -52,14 +52,14 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
-#include "cvmx-wqe.h"
-#include "cvmx-fau.h"
-#include "cvmx-pow.h"
-#include "cvmx-pip.h"
-#include "cvmx-scratch.h"
-
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-wqe.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-scratch.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
struct cvm_napi_wrapper {
struct napi_struct napi;
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a0743b85d54e..9240c85ce241 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -24,7 +24,7 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
*********************************************************************/
-#include "cvmx-fau.h"
+#include <asm/octeon/cvmx-fau.h>
void cvm_oct_poll_controller(struct net_device *dev);
void cvm_oct_rx_initialize(void);
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 5e148b512c97..d3e82430eba6 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -35,9 +35,9 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-helper.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
int cvm_oct_sgmii_open(struct net_device *dev)
{
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index d0e2d514968a..af8d62818f13 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -35,11 +35,11 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-spi.h"
+#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-npi-defs.h>
-#include "cvmx-spxx-defs.h"
-#include "cvmx-stxx-defs.h"
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
static int number_spi_ports;
static int need_retrain[2] = { 0, 0 };
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 2542c3743904..56d74dc2fbd5 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -47,13 +47,13 @@
#include "ethernet-tx.h"
#include "ethernet-util.h"
-#include "cvmx-wqe.h"
-#include "cvmx-fau.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-wqe.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 861a4b3fe857..419f8c34ecdf 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -35,9 +35,9 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-helper.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
int cvm_oct_xaui_open(struct net_device *dev)
{
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 076f86675ce6..9112cd882154 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -44,14 +44,14 @@
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-fau.h"
-#include "cvmx-ipd.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-smix-defs.h"
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-smix-defs.h>
#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
&& CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index af24ddfb58c9..3d9199320d86 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -34,8 +34,8 @@
/* Module definitions */
-static int resumeline = 898;
-module_param(resumeline, int, 0444);
+static ushort resumeline = 898;
+module_param(resumeline, ushort, 0444);
/* Default off since it doesn't work on DCON ASIC in B-test OLPC board */
static int useaa = 1;
@@ -456,7 +456,7 @@ static ssize_t dcon_mono_store(struct device *dev,
unsigned long enable_mono;
int rc;
- rc = strict_strtoul(buf, 10, &enable_mono);
+ rc = kstrtoul(buf, 10, &enable_mono);
if (rc)
return rc;
@@ -472,7 +472,7 @@ static ssize_t dcon_freeze_store(struct device *dev,
unsigned long output;
int ret;
- ret = strict_strtoul(buf, 10, &output);
+ ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
@@ -498,10 +498,10 @@ static ssize_t dcon_freeze_store(struct device *dev,
static ssize_t dcon_resumeline_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- unsigned long rl;
+ unsigned short rl;
int rc;
- rc = strict_strtoul(buf, 10, &rl);
+ rc = kstrtou16(buf, 10, &rl);
if (rc)
return rc;
@@ -517,7 +517,7 @@ static ssize_t dcon_sleep_store(struct device *dev,
unsigned long output;
int ret;
- ret = strict_strtoul(buf, 10, &output);
+ ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
@@ -755,9 +755,9 @@ static int dcon_resume(struct i2c_client *client)
irqreturn_t dcon_interrupt(int irq, void *id)
{
struct dcon_priv *dcon = id;
- int status = pdata->read_status();
+ u8 status;
- if (status == -1)
+ if (pdata->read_status(&status))
return IRQ_NONE;
switch (status & 3) {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 0264c94375aa..167a41778be6 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -84,7 +84,7 @@ struct dcon_platform_data {
int (*init)(struct dcon_priv *);
void (*bus_stabilize_wiggle)(void);
void (*set_dconload)(int);
- u8 (*read_status)(void);
+ int (*read_status)(u8 *);
};
#include <linux/interrupt.h>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 2245213df607..cb6ce0cf92a0 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -183,17 +183,15 @@ static void dcon_set_dconload_1(int val)
gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
}
-static u8 dcon_read_status_xo_1(void)
+static int dcon_read_status_xo_1(u8 *status)
{
- u8 status;
-
- status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
- status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+ *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+ *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
/* Clear the negative edge status for GPIO7 */
cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
- return status;
+ return 0;
}
struct dcon_platform_data dcon_pdata_xo_1 = {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index a6a6cf2adc4d..69415eec425c 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -167,20 +167,18 @@ static void dcon_set_dconload_xo_1_5(int val)
gpio_set_value(VX855_GPIO(1), val);
}
-static u8 dcon_read_status_xo_1_5(void)
+static int dcon_read_status_xo_1_5(u8 *status)
{
- u8 status;
-
if (!dcon_was_irq())
return -1;
/* i believe this is the same as "inb(0x44b) & 3" */
- status = gpio_get_value(VX855_GPI(10));
- status |= gpio_get_value(VX855_GPI(11)) << 1;
+ *status = gpio_get_value(VX855_GPI(10));
+ *status |= gpio_get_value(VX855_GPI(11)) << 1;
dcon_clear_irq();
- return status;
+ return 0;
}
struct dcon_platform_data dcon_pdata_xo_1_5 = {
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig
new file mode 100644
index 000000000000..81a7cba4a0c5
--- /dev/null
+++ b/drivers/staging/omapdrm/Kconfig
@@ -0,0 +1,25 @@
+
+config DRM_OMAP
+ tristate "OMAP DRM"
+ depends on DRM && !CONFIG_FB_OMAP2
+ depends on ARCH_OMAP2PLUS
+ select DRM_KMS_HELPER
+ select OMAP2_DSS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default n
+ help
+ DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+ int "Number of CRTCs"
+ range 1 10
+ default 1 if ARCH_OMAP2 || ARCH_OMAP3
+ default 2 if ARCH_OMAP4
+ depends on DRM_OMAP
+ help
+ Select the number of video overlays which can be used as framebuffers.
+ The remaining overlays are reserved for video.
+
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
new file mode 100644
index 000000000000..592cf69020cd
--- /dev/null
+++ b/drivers/staging/omapdrm/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI)
+#
+
+ccflags-y := -Iinclude/drm -Werror
+omapdrm-y := omap_drv.o \
+ omap_debugfs.o \
+ omap_crtc.o \
+ omap_encoder.o \
+ omap_connector.o \
+ omap_fb.o \
+ omap_fbdev.o \
+ omap_gem.o \
+ omap_dmm_tiler.o \
+ tcm-sita.o
+
+# temporary:
+omapdrm-y += omap_gem_helpers.o
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
new file mode 100644
index 000000000000..55b18377ac4f
--- /dev/null
+++ b/drivers/staging/omapdrm/TODO
@@ -0,0 +1,38 @@
+TODO
+. check error handling/cleanup paths
+. add drm_plane / overlay support
+. add video decode/encode support (via syslink3 + codec-engine)
+. still some rough edges with flipping.. event back to userspace should
+ really come after VSYNC interrupt
+. where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd. (It is mostly only
+ of interest in case you have a swap partition/file.. which a lot of
+ these devices do not.. but it doesn't hurt for the driver to do the
+ right thing anyways.)
+ . Use mm_shrinker to trigger unpinning pages. Need to figure out how
+ to handle next issue first (I think?)
+ . Note TTM already has some mm_shrinker stuff.. maybe an argument to
+ move to TTM? Or maybe something that could be factored out in common?
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+. Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
+ part connector. Which results in a bit of duct tape to fwd calls from
+ encoder to connector. Possibly this could be done a bit better.
+. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
+ access is made from any component in the system. Which means on suspend
+ CRTC's should be disabled, and on resume the LUT should be reprogrammed
+ before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
+ buffer mapped in DMM/TILER before LUT is reloaded.
+. Add debugfs information for DMM/TILER
+
+Userspace:
+. git://github.com/robclark/xf86-video-omap.git
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
new file mode 100644
index 000000000000..5e2856c0e0bb
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -0,0 +1,371 @@
+/*
+ * drivers/staging/omapdrm/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+ struct drm_connector base;
+ struct omap_dss_device *dssdev;
+};
+
+static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings)
+{
+ mode->clock = timings->pixel_clock;
+
+ mode->hdisplay = timings->x_res;
+ mode->hsync_start = mode->hdisplay + timings->hfp;
+ mode->hsync_end = mode->hsync_start + timings->hsw;
+ mode->htotal = mode->hsync_end + timings->hbp;
+
+ mode->vdisplay = timings->y_res;
+ mode->vsync_start = mode->vdisplay + timings->vfp;
+ mode->vsync_end = mode->vsync_start + timings->vsw;
+ mode->vtotal = mode->vsync_end + timings->vbp;
+
+ /* note: whether or not it is interlaced, +/- h/vsync, etc,
+ * which should be set in the mode flags, is not exposed in
+ * the omap_video_timings struct.. but hdmi driver tracks
+ * those separately so all we have to have to set the mode
+ * is the way to recover these timings values, and the
+ * omap_dss_driver would do the rest.
+ */
+}
+
+static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode)
+{
+ timings->pixel_clock = mode->clock;
+
+ timings->x_res = mode->hdisplay;
+ timings->hfp = mode->hsync_start - mode->hdisplay;
+ timings->hsw = mode->hsync_end - mode->hsync_start;
+ timings->hbp = mode->htotal - mode->hsync_end;
+
+ timings->y_res = mode->vdisplay;
+ timings->vfp = mode->vsync_start - mode->vdisplay;
+ timings->vsw = mode->vsync_end - mode->vsync_start;
+ timings->vbp = mode->vtotal - mode->vsync_end;
+}
+
+static void omap_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ int old_dpms;
+
+ DBG("%s: %d", dssdev->name, mode);
+
+ old_dpms = connector->dpms;
+
+ /* from off to on, do from crtc to connector */
+ if (mode < old_dpms)
+ drm_helper_connector_dpms(connector, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ /* store resume info for suspended displays */
+ switch (dssdev->state) {
+ case OMAP_DSS_DISPLAY_SUSPENDED:
+ dssdev->activate_after_resume = true;
+ break;
+ case OMAP_DSS_DISPLAY_DISABLED: {
+ int ret = dssdev->driver->enable(dssdev);
+ if (ret) {
+ DBG("%s: failed to enable: %d",
+ dssdev->name, ret);
+ dssdev->driver->disable(dssdev);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ } else {
+ /* TODO */
+ }
+
+ /* from on to off, do from connector to crtc */
+ if (mode > old_dpms)
+ drm_helper_connector_dpms(connector, mode);
+}
+
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum drm_connector_status ret;
+
+ if (dssdrv->detect) {
+ if (dssdrv->detect(dssdev)) {
+ ret = connector_status_connected;
+ } else {
+ ret = connector_status_disconnected;
+ }
+ } else {
+ ret = connector_status_unknown;
+ }
+
+ VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+ return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+ dssdev->driver->disable(dssdev);
+
+ DBG("%s", omap_connector->dssdev->name);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(omap_connector);
+
+ omap_dss_put_device(dssdev);
+}
+
+#define MAX_EDID 512
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct drm_device *dev = connector->dev;
+ int n = 0;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ /* if display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.. otherwise (ie. fixed resolution
+ * LCD panels) we just return a single mode corresponding to the
+ * currently configured timings:
+ */
+ if (dssdrv->read_edid) {
+ void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+ if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+ drm_edid_is_valid(edid)) {
+ drm_mode_connector_update_edid_property(
+ connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ kfree(connector->display_info.raw_edid);
+ connector->display_info.raw_edid = edid;
+ } else {
+ drm_mode_connector_update_edid_property(
+ connector, NULL);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ } else {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct omap_video_timings timings;
+
+ dssdrv->get_timings(dssdev, &timings);
+
+ copy_timings_omap_to_drm(mode, &timings);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ n = 1;
+ }
+
+ return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings = {0};
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *new_mode;
+ int ret = MODE_BAD;
+
+ copy_timings_drm_to_omap(&timings, mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ if (!dssdrv->check_timings(dssdev, &timings)) {
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ new_mode->clock = timings.pixel_clock;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+ }
+
+ DBG("connector: mode %s: "
+ "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ (ret == MODE_OK) ? "valid" : "invalid",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ return ret;
+}
+
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector)
+{
+ int i;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct drm_mode_object *obj;
+
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+
+ if (obj) {
+ struct drm_encoder *encoder = obj_to_encoder(obj);
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+ DBG("%s: found %s", omap_connector->dssdev->name,
+ mgr->name);
+ return encoder;
+ }
+ }
+
+ DBG("%s: no encoder", omap_connector->dssdev->name);
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+ .dpms = omap_connector_dpms,
+ .detect = omap_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+ .get_modes = omap_connector_get_modes,
+ .mode_valid = omap_connector_mode_valid,
+ .best_encoder = omap_connector_attached_encoder,
+};
+
+/* called from encoder when mode is set, to propagate settings to the dssdev */
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings;
+
+ copy_timings_drm_to_omap(&timings, mode);
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_connector->dssdev->name,
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ if (dssdrv->check_timings(dssdev, &timings)) {
+ dev_err(dev->dev, "could not set timings\n");
+ return;
+ }
+
+ dssdrv->set_timings(dssdev, &timings);
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ /* TODO: enable when supported in dss */
+ VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev)
+{
+ struct drm_connector *connector = NULL;
+ struct omap_connector *omap_connector;
+
+ DBG("%s", dssdev->name);
+
+ omap_dss_get_device(dssdev);
+
+ omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+ if (!omap_connector) {
+ dev_err(dev->dev, "could not allocate connector\n");
+ goto fail;
+ }
+
+ omap_connector->dssdev = dssdev;
+ connector = &omap_connector->base;
+
+ drm_connector_init(dev, connector, &omap_connector_funcs,
+ connector_type);
+ drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+#if 0 /* enable when dss2 supports hotplug */
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+ connector->polled = 0;
+ else
+#endif
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ if (connector) {
+ omap_connector_destroy(connector);
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
new file mode 100644
index 000000000000..cffdf5e12394
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -0,0 +1,326 @@
+/*
+ * drivers/staging/omapdrm/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_mode.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+ struct drm_crtc base;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info info;
+ int id;
+
+ /* if there is a pending flip, this will be non-null: */
+ struct drm_pending_vblank_event *event;
+};
+
+/* push changes down to dss2 */
+static int commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_overlay *ovl = omap_crtc->ovl;
+ struct omap_overlay_info *info = &omap_crtc->info;
+ int ret;
+
+ DBG("%s", omap_crtc->ovl->name);
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
+ info->out_height, info->screen_width);
+ DBG("%d,%d %08x", info->pos_x, info->pos_y, info->paddr);
+
+ /* NOTE: do we want to do this at all here, or just wait
+ * for dpms(ON) since other CRTC's may not have their mode
+ * set yet, so fb dimensions may still change..
+ */
+ ret = ovl->set_overlay_info(ovl, info);
+ if (ret) {
+ dev_err(dev->dev, "could not set overlay info\n");
+ return ret;
+ }
+
+ /* our encoder doesn't necessarily get a commit() after this, in
+ * particular in the dpms() and mode_set_base() cases, so force the
+ * manager to update:
+ *
+ * could this be in the encoder somehow?
+ */
+ if (ovl->manager) {
+ ret = ovl->manager->apply(ovl->manager);
+ if (ret) {
+ dev_err(dev->dev, "could not apply settings\n");
+ return ret;
+ }
+ }
+
+ if (info->enabled) {
+ omap_framebuffer_flush(crtc->fb, crtc->x, crtc->y,
+ crtc->fb->width, crtc->fb->height);
+ }
+
+ return 0;
+}
+
+/* update parameters that are dependent on the framebuffer dimensions and
+ * position within the fb that this crtc scans out from. This is called
+ * when framebuffer dimensions or x,y base may have changed, either due
+ * to our mode, or a change in another crtc that is scanning out of the
+ * same fb.
+ */
+static void update_scanout(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ dma_addr_t paddr;
+ unsigned int screen_width;
+
+ omap_framebuffer_get_buffer(crtc->fb, crtc->x, crtc->y,
+ NULL, &paddr, &screen_width);
+
+ DBG("%s: %d,%d: %08x (%d)", omap_crtc->ovl->name,
+ crtc->x, crtc->y, (u32)paddr, screen_width);
+
+ omap_crtc->info.paddr = paddr;
+ omap_crtc->info.screen_width = screen_width;
+}
+
+static void omap_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+}
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ drm_crtc_cleanup(crtc);
+ kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s: %d", omap_crtc->ovl->name, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ update_scanout(crtc);
+ omap_crtc->info.enabled = true;
+ } else {
+ omap_crtc->info.enabled = false;
+ }
+
+ WARN_ON(commit(crtc));
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s: %d,%d: %dx%d", omap_crtc->ovl->name, x, y,
+ mode->hdisplay, mode->vdisplay);
+
+ /* just use adjusted mode */
+ mode = adjusted_mode;
+
+ omap_crtc->info.width = mode->hdisplay;
+ omap_crtc->info.height = mode->vdisplay;
+ omap_crtc->info.out_width = mode->hdisplay;
+ omap_crtc->info.out_height = mode->vdisplay;
+ omap_crtc->info.color_mode = OMAP_DSS_COLOR_RGB24U;
+ omap_crtc->info.rotation_type = OMAP_DSS_ROT_DMA;
+ omap_crtc->info.rotation = OMAP_DSS_ROT_0;
+ omap_crtc->info.global_alpha = 0xff;
+ omap_crtc->info.mirror = 0;
+ omap_crtc->info.mirror = 0;
+ omap_crtc->info.pos_x = 0;
+ omap_crtc->info.pos_y = 0;
+#if 0 /* re-enable when these are available in DSS2 driver */
+ omap_crtc->info.zorder = 3; /* GUI in the front, video behind */
+ omap_crtc->info.min_x_decim = 1;
+ omap_crtc->info.max_x_decim = 1;
+ omap_crtc->info.min_y_decim = 1;
+ omap_crtc->info.max_y_decim = 1;
+#endif
+
+ update_scanout(crtc);
+
+ return 0;
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_overlay *ovl = omap_crtc->ovl;
+
+ DBG("%s", omap_crtc->ovl->name);
+
+ ovl->get_overlay_info(ovl, &omap_crtc->info);
+
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s %d,%d: fb=%p", omap_crtc->ovl->name, x, y, old_fb);
+
+ update_scanout(crtc);
+
+ return commit(crtc);
+}
+
+static void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+}
+
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_pending_vblank_event *event = omap_crtc->event;
+ struct timeval now;
+ unsigned long flags;
+
+ WARN_ON(!event);
+
+ omap_crtc->event = NULL;
+
+ update_scanout(crtc);
+ WARN_ON(commit(crtc));
+
+ /* wakeup userspace */
+ /* TODO: this should happen *after* flip in vsync IRQ handler */
+ if (event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event->event.sequence = drm_vblank_count_and_time(
+ dev, omap_crtc->id, &now);
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+ list_add_tail(&event->base.link,
+ &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+}
+
+static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+
+ if (omap_crtc->event) {
+ dev_err(dev->dev, "already a pending flip\n");
+ return -EINVAL;
+ }
+
+ crtc->fb = fb;
+ omap_crtc->event = event;
+
+ omap_gem_op_async(omap_framebuffer_bo(fb), OMAP_GEM_READ,
+ page_flip_cb, crtc);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+ .gamma_set = omap_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = omap_crtc_destroy,
+ .page_flip = omap_crtc_page_flip_locked,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+ .dpms = omap_crtc_dpms,
+ .mode_fixup = omap_crtc_mode_fixup,
+ .mode_set = omap_crtc_mode_set,
+ .prepare = omap_crtc_prepare,
+ .commit = omap_crtc_commit,
+ .mode_set_base = omap_crtc_mode_set_base,
+ .load_lut = omap_crtc_load_lut,
+};
+
+struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->ovl;
+}
+
+/* initialize crtc */
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+
+ DBG("%s", ovl->name);
+
+ if (!omap_crtc) {
+ dev_err(dev->dev, "could not allocate CRTC\n");
+ goto fail;
+ }
+
+ omap_crtc->ovl = ovl;
+ omap_crtc->id = id;
+ crtc = &omap_crtc->base;
+ drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ if (crtc) {
+ omap_crtc_destroy(crtc);
+ }
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/staging/omapdrm/omap_debugfs.c
new file mode 100644
index 000000000000..da920dfdc59c
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_debugfs.c
@@ -0,0 +1,42 @@
+/*
+ * drivers/staging/omapdrm/omap_debugfs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct drm_info_list omap_debugfs_list[] = {
+ {"tiler_map", tiler_map_show, 0},
+};
+
+int omap_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+void omap_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list), minor);
+}
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
new file mode 100644
index 000000000000..2f529ab4b7c7
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -0,0 +1,187 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_PRIV_H
+#define OMAP_DMM_PRIV_H
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_DESCR__1 0x510
+#define DMM_PAT_DESCR__2 0x520
+#define DMM_PAT_DESCR__3 0x530
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+#define DMM_IRQSTAT_DST (1<<0)
+#define DMM_IRQSTAT_LST (1<<1)
+#define DMM_IRQSTAT_ERR_INV_DSC (1<<2)
+#define DMM_IRQSTAT_ERR_INV_DATA (1<<3)
+#define DMM_IRQSTAT_ERR_UPD_AREA (1<<4)
+#define DMM_IRQSTAT_ERR_UPD_CTRL (1<<5)
+#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
+#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
+
+#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
+ DMM_IRQ_STAT_ERR_INV_DATA | \
+ DMM_IRQ_STAT_ERR_UPD_AREA | \
+ DMM_IRQ_STAT_ERR_UPD_CTRL | \
+ DMM_IRQ_STAT_ERR_UPD_DATA | \
+ DMM_IRQ_STAT_ERR_LUT_MISS)
+
+#define DMM_PATSTATUS_READY (1<<0)
+#define DMM_PATSTATUS_VALID (1<<1)
+#define DMM_PATSTATUS_RUN (1<<2)
+#define DMM_PATSTATUS_DONE (1<<3)
+#define DMM_PATSTATUS_LINKED (1<<4)
+#define DMM_PATSTATUS_BYPASSED (1<<7)
+#define DMM_PATSTATUS_ERR_INV_DESCR (1<<10)
+#define DMM_PATSTATUS_ERR_INV_DATA (1<<11)
+#define DMM_PATSTATUS_ERR_UPD_AREA (1<<12)
+#define DMM_PATSTATUS_ERR_UPD_CTRL (1<<13)
+#define DMM_PATSTATUS_ERR_UPD_DATA (1<<14)
+#define DMM_PATSTATUS_ERR_ACCESS (1<<15)
+
+/* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */
+#define DMM_PATSTATUS_ERR (DMM_PATSTATUS_ERR_INV_DESCR | \
+ DMM_PATSTATUS_ERR_INV_DATA | \
+ DMM_PATSTATUS_ERR_UPD_AREA | \
+ DMM_PATSTATUS_ERR_UPD_CTRL | \
+ DMM_PATSTATUS_ERR_UPD_DATA)
+
+
+
+enum {
+ PAT_STATUS,
+ PAT_DESCR
+};
+
+struct pat_ctrl {
+ u32 start:4;
+ u32 dir:4;
+ u32 lut_id:8;
+ u32 sync:12;
+ u32 ini:4;
+};
+
+struct pat {
+ uint32_t next_pa;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ uint32_t data_pa;
+};
+
+#define DMM_FIXED_RETRY_COUNT 1000
+
+/* create refill buffer big enough to refill all slots, plus 3 descriptors..
+ * 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area,
+ * but I guess you don't hit that worst case at the same time as full area
+ * refill
+ */
+#define DESCR_SIZE 128
+#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
+
+struct dmm;
+
+struct dmm_txn {
+ void *engine_handle;
+ struct tcm *tcm;
+
+ uint8_t *current_va;
+ dma_addr_t current_pa;
+
+ struct pat *last_pat;
+};
+
+struct refill_engine {
+ int id;
+ struct dmm *dmm;
+ struct tcm *tcm;
+
+ uint8_t *refill_va;
+ dma_addr_t refill_pa;
+
+ /* only one trans per engine for now */
+ struct dmm_txn txn;
+
+ /* offset to lut associated with container */
+ u32 *lut_offset;
+
+ wait_queue_head_t wait_for_refill;
+
+ struct list_head idle_node;
+};
+
+struct dmm {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+
+ struct page *dummy_page;
+ dma_addr_t dummy_pa;
+
+ void *refill_va;
+ dma_addr_t refill_pa;
+
+ /* refill engines */
+ struct semaphore engine_sem;
+ struct list_head idle_head;
+ struct refill_engine *engines;
+ int num_engines;
+
+ /* container information */
+ int container_width;
+ int container_height;
+ int lut_width;
+ int lut_height;
+ int num_lut;
+
+ /* array of LUT - TCM containers */
+ struct tcm **tcm;
+
+ /* LUT table storage */
+ u32 *lut;
+
+ /* allocation list and lock */
+ struct list_head alloc_head;
+ spinlock_t list_lock;
+};
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
new file mode 100644
index 000000000000..852d9440f725
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -0,0 +1,830 @@
+/*
+ * DMM IOMMU driver support functions for TI OMAP processors.
+ *
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_dmm_priv.h"
+
+/* mappings for associating views to luts */
+static struct tcm *containers[TILFMT_NFORMATS];
+static struct dmm *omap_dmm;
+
+/* Geometry table */
+#define GEOM(xshift, yshift, bytes_per_pixel) { \
+ .x_shft = (xshift), \
+ .y_shft = (yshift), \
+ .cpp = (bytes_per_pixel), \
+ .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
+ .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
+ }
+
+static const struct {
+ uint32_t x_shft; /* unused X-bits (as part of bpp) */
+ uint32_t y_shft; /* unused Y-bits (as part of bpp) */
+ uint32_t cpp; /* bytes/chars per pixel */
+ uint32_t slot_w; /* width of each slot (in pixels) */
+ uint32_t slot_h; /* height of each slot (in pixels) */
+} geom[TILFMT_NFORMATS] = {
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+};
+
+
+/* lookup table for registers w/ per-engine instances */
+static const uint32_t reg[][4] = {
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+};
+
+/* simple allocator to grab next 16 byte aligned memory from txn */
+static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
+{
+ void *ptr;
+ struct refill_engine *engine = txn->engine_handle;
+
+ /* dmm programming requires 16 byte aligned addresses */
+ txn->current_pa = round_up(txn->current_pa, 16);
+ txn->current_va = (void *)round_up((long)txn->current_va, 16);
+
+ ptr = txn->current_va;
+ *pa = txn->current_pa;
+
+ txn->current_pa += sz;
+ txn->current_va += sz;
+
+ BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
+
+ return ptr;
+}
+
+/* check status and spin until wait_mask comes true */
+static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+{
+ struct dmm *dmm = engine->dmm;
+ uint32_t r = 0, err, i;
+
+ i = DMM_FIXED_RETRY_COUNT;
+ while (true) {
+ r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+ err = r & DMM_PATSTATUS_ERR;
+ if (err)
+ return -EFAULT;
+
+ if ((r & wait_mask) == wait_mask)
+ break;
+
+ if (--i == 0)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ }
+
+ return 0;
+}
+
+irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
+{
+ struct dmm *dmm = arg;
+ uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+ int i;
+
+ /* ack IRQ */
+ writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+
+ for (i = 0; i < dmm->num_engines; i++) {
+ if (status & DMM_IRQSTAT_LST)
+ wake_up_interruptible(&dmm->engines[i].wait_for_refill);
+
+ status >>= 8;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Get a handle for a DMM transaction
+ */
+static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+{
+ struct dmm_txn *txn = NULL;
+ struct refill_engine *engine = NULL;
+
+ down(&dmm->engine_sem);
+
+ /* grab an idle engine */
+ spin_lock(&dmm->list_lock);
+ if (!list_empty(&dmm->idle_head)) {
+ engine = list_entry(dmm->idle_head.next, struct refill_engine,
+ idle_node);
+ list_del(&engine->idle_node);
+ }
+ spin_unlock(&dmm->list_lock);
+
+ BUG_ON(!engine);
+
+ txn = &engine->txn;
+ engine->tcm = tcm;
+ txn->engine_handle = engine;
+ txn->last_pat = NULL;
+ txn->current_va = engine->refill_va;
+ txn->current_pa = engine->refill_pa;
+
+ return txn;
+}
+
+/**
+ * Add region to DMM transaction. If pages or pages[i] is NULL, then the
+ * corresponding slot is cleared (ie. dummy_pa is programmed)
+ */
+static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ struct page **pages, uint32_t npages, uint32_t roll)
+{
+ dma_addr_t pat_pa = 0;
+ uint32_t *data;
+ struct pat *pat;
+ struct refill_engine *engine = txn->engine_handle;
+ int columns = (1 + area->x1 - area->x0);
+ int rows = (1 + area->y1 - area->y0);
+ int i = columns*rows;
+ u32 *lut = omap_dmm->lut + (engine->tcm->lut_id * omap_dmm->lut_width *
+ omap_dmm->lut_height) +
+ (area->y0 * omap_dmm->lut_width) + area->x0;
+
+ pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+
+ if (txn->last_pat)
+ txn->last_pat->next_pa = (uint32_t)pat_pa;
+
+ pat->area = *area;
+ pat->ctrl = (struct pat_ctrl){
+ .start = 1,
+ .lut_id = engine->tcm->lut_id,
+ };
+
+ data = alloc_dma(txn, 4*i, &pat->data_pa);
+
+ while (i--) {
+ int n = i + roll;
+ if (n >= npages)
+ n -= npages;
+ data[i] = (pages && pages[n]) ?
+ page_to_phys(pages[n]) : engine->dmm->dummy_pa;
+ }
+
+ /* fill in lut with new addresses */
+ for (i = 0; i < rows; i++, lut += omap_dmm->lut_width)
+ memcpy(lut, &data[i*columns], columns * sizeof(u32));
+
+ txn->last_pat = pat;
+
+ return 0;
+}
+
+/**
+ * Commit the DMM transaction.
+ */
+static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+{
+ int ret = 0;
+ struct refill_engine *engine = txn->engine_handle;
+ struct dmm *dmm = engine->dmm;
+
+ if (!txn->last_pat) {
+ dev_err(engine->dmm->dev, "need at least one txn\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ txn->last_pat->next_pa = 0;
+
+ /* write to PAT_DESCR to clear out any pending transaction */
+ writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+
+ /* wait for engine ready: */
+ ret = wait_status(engine, DMM_PATSTATUS_READY);
+ if (ret) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* kick reload */
+ writel(engine->refill_pa,
+ dmm->base + reg[PAT_DESCR][engine->id]);
+
+ if (wait) {
+ if (wait_event_interruptible_timeout(engine->wait_for_refill,
+ wait_status(engine, DMM_PATSTATUS_READY) == 0,
+ msecs_to_jiffies(1)) <= 0) {
+ dev_err(dmm->dev, "timed out waiting for done\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+cleanup:
+ spin_lock(&dmm->list_lock);
+ list_add(&engine->idle_node, &dmm->idle_head);
+ spin_unlock(&dmm->list_lock);
+
+ up(&omap_dmm->engine_sem);
+ return ret;
+}
+
+/*
+ * DMM programming
+ */
+static int fill(struct tcm_area *area, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret = 0;
+ struct tcm_area slice, area_s;
+ struct dmm_txn *txn;
+
+ txn = dmm_txn_init(omap_dmm, area->tcm);
+ if (IS_ERR_OR_NULL(txn))
+ return PTR_ERR(txn);
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ struct pat_area p_area = {
+ .x0 = slice.p0.x, .y0 = slice.p0.y,
+ .x1 = slice.p1.x, .y1 = slice.p1.y,
+ };
+
+ ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
+ if (ret)
+ goto fail;
+
+ roll += tcm_sizeof(slice);
+ }
+
+ ret = dmm_txn_commit(txn, wait);
+
+fail:
+ return ret;
+}
+
+/*
+ * Pin/unpin
+ */
+
+/* note: slots for which pages[i] == NULL are filled w/ dummy page
+ */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret;
+
+ ret = fill(&block->area, pages, npages, roll, wait);
+
+ if (ret)
+ tiler_unpin(block);
+
+ return ret;
+}
+
+int tiler_unpin(struct tiler_block *block)
+{
+ return fill(&block->area, NULL, 0, 0, false);
+}
+
+/*
+ * Reserve/release
+ */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+ uint16_t h, uint16_t align)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ u32 min_align = 128;
+ int ret;
+
+ BUG_ON(!validfmt(fmt));
+
+ /* convert width/height to slots */
+ w = DIV_ROUND_UP(w, geom[fmt].slot_w);
+ h = DIV_ROUND_UP(h, geom[fmt].slot_h);
+
+ /* convert alignment to slots */
+ min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
+ align = ALIGN(align, min_align);
+ align /= geom[fmt].slot_w * geom[fmt].cpp;
+
+ block->fmt = fmt;
+
+ ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
+ if (ret) {
+ kfree(block);
+ return 0;
+ }
+
+ /* add to allocation list */
+ spin_lock(&omap_dmm->list_lock);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock(&omap_dmm->list_lock);
+
+ return block;
+}
+
+struct tiler_block *tiler_reserve_1d(size_t size)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (!block)
+ return 0;
+
+ block->fmt = TILFMT_PAGE;
+
+ if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
+ &block->area)) {
+ kfree(block);
+ return 0;
+ }
+
+ spin_lock(&omap_dmm->list_lock);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock(&omap_dmm->list_lock);
+
+ return block;
+}
+
+/* note: if you have pin'd pages, you should have already unpin'd first! */
+int tiler_release(struct tiler_block *block)
+{
+ int ret = tcm_free(&block->area);
+
+ if (block->area.tcm)
+ dev_err(omap_dmm->dev, "failed to release block\n");
+
+ spin_lock(&omap_dmm->list_lock);
+ list_del(&block->alloc_node);
+ spin_unlock(&omap_dmm->list_lock);
+
+ kfree(block);
+ return ret;
+}
+
+/*
+ * Utils
+ */
+
+/* calculate the tiler space address of a pixel in a view orientation */
+static u32 tiler_get_address(u32 orient, enum tiler_fmt fmt, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+ /* validate coordinate */
+ x_mask = MASK(x_bits);
+ y_mask = MASK(y_bits);
+
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask)
+ return 0;
+
+ /* account for mirroring */
+ if (orient & MASK_X_INVERT)
+ x ^= x_mask;
+ if (orient & MASK_Y_INVERT)
+ y ^= y_mask;
+
+ /* get coordinate address */
+ if (orient & MASK_XY_FLIP)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+dma_addr_t tiler_ssptr(struct tiler_block *block)
+{
+ BUG_ON(!validfmt(block->fmt));
+
+ return TILVIEW_8BIT + tiler_get_address(0, block->fmt,
+ block->area.p0.x * geom[block->fmt].slot_w,
+ block->area.p0.y * geom[block->fmt].slot_h);
+}
+
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+{
+ BUG_ON(!validfmt(fmt));
+ *w = round_up(*w, geom[fmt].slot_w);
+ *h = round_up(*h, geom[fmt].slot_h);
+}
+
+uint32_t tiler_stride(enum tiler_fmt fmt)
+{
+ BUG_ON(!validfmt(fmt));
+
+ return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ tiler_align(fmt, &w, &h);
+ return geom[fmt].cpp * w * h;
+}
+
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ BUG_ON(!validfmt(fmt));
+ return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
+}
+
+int omap_dmm_remove(void)
+{
+ struct tiler_block *block, *_block;
+ int i;
+
+ if (omap_dmm) {
+ /* free all area regions */
+ spin_lock(&omap_dmm->list_lock);
+ list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
+ alloc_node) {
+ list_del(&block->alloc_node);
+ kfree(block);
+ }
+ spin_unlock(&omap_dmm->list_lock);
+
+ for (i = 0; i < omap_dmm->num_lut; i++)
+ if (omap_dmm->tcm && omap_dmm->tcm[i])
+ omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
+ kfree(omap_dmm->tcm);
+
+ kfree(omap_dmm->engines);
+ if (omap_dmm->refill_va)
+ dma_free_coherent(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va,
+ omap_dmm->refill_pa);
+ if (omap_dmm->dummy_page)
+ __free_page(omap_dmm->dummy_page);
+
+ vfree(omap_dmm->lut);
+
+ if (omap_dmm->irq != -1)
+ free_irq(omap_dmm->irq, omap_dmm);
+
+ kfree(omap_dmm);
+ }
+
+ return 0;
+}
+
+int omap_dmm_init(struct drm_device *dev)
+{
+ int ret = -EFAULT, i;
+ struct tcm_area area = {0};
+ u32 hwinfo, pat_geom, lut_table_size;
+ struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+
+ if (!pdata || !pdata->dmm_pdata) {
+ dev_err(dev->dev, "dmm platform data not present, skipping\n");
+ return ret;
+ }
+
+ omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
+ if (!omap_dmm) {
+ dev_err(dev->dev, "failed to allocate driver data section\n");
+ goto fail;
+ }
+
+ /* lookup hwmod data - base address and irq */
+ omap_dmm->base = pdata->dmm_pdata->base;
+ omap_dmm->irq = pdata->dmm_pdata->irq;
+ omap_dmm->dev = dev->dev;
+
+ if (!omap_dmm->base) {
+ dev_err(dev->dev, "failed to get dmm base address\n");
+ goto fail;
+ }
+
+ hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+ omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
+ omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
+ omap_dmm->container_width = 256;
+ omap_dmm->container_height = 128;
+
+ /* read out actual LUT width and height */
+ pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+ omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
+ omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+
+ /* initialize DMM registers */
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
+ writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
+ writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+
+ ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+ "omap_dmm_irq_handler", omap_dmm);
+
+ if (ret) {
+ dev_err(dev->dev, "couldn't register IRQ %d, error %d\n",
+ omap_dmm->irq, ret);
+ omap_dmm->irq = -1;
+ goto fail;
+ }
+
+ /* Enable all interrupts for each refill engine except
+ * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+ * about because we want to be able to refill live scanout
+ * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+ * we just generally don't care about.
+ */
+ writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+
+ lut_table_size = omap_dmm->lut_width * omap_dmm->lut_height *
+ omap_dmm->num_lut;
+
+ omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
+ if (!omap_dmm->lut) {
+ dev_err(dev->dev, "could not allocate lut table\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+ if (!omap_dmm->dummy_page) {
+ dev_err(dev->dev, "could not allocate dummy page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
+
+ /* alloc refill memory */
+ omap_dmm->refill_va = dma_alloc_coherent(dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
+ if (!omap_dmm->refill_va) {
+ dev_err(dev->dev, "could not allocate refill memory\n");
+ goto fail;
+ }
+
+ /* alloc engines */
+ omap_dmm->engines = kzalloc(
+ omap_dmm->num_engines * sizeof(struct refill_engine),
+ GFP_KERNEL);
+ if (!omap_dmm->engines) {
+ dev_err(dev->dev, "could not allocate engines\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
+ INIT_LIST_HEAD(&omap_dmm->idle_head);
+ for (i = 0; i < omap_dmm->num_engines; i++) {
+ omap_dmm->engines[i].id = i;
+ omap_dmm->engines[i].dmm = omap_dmm;
+ omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
+ (REFILL_BUFFER_SIZE * i);
+ omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
+ (REFILL_BUFFER_SIZE * i);
+ init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+
+ list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
+ }
+
+ omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
+ GFP_KERNEL);
+ if (!omap_dmm->tcm) {
+ dev_err(dev->dev, "failed to allocate lut ptrs\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* init containers */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
+ omap_dmm->container_height,
+ NULL);
+
+ if (!omap_dmm->tcm[i]) {
+ dev_err(dev->dev, "failed to allocate container\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->tcm[i]->lut_id = i;
+ }
+
+ /* assign access mode containers to applicable tcm container */
+ /* OMAP 4 has 1 container for all 4 views */
+ containers[TILFMT_8BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_16BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_32BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+
+ INIT_LIST_HEAD(&omap_dmm->alloc_head);
+ spin_lock_init(&omap_dmm->list_lock);
+
+ area = (struct tcm_area) {
+ .is2d = true,
+ .tcm = NULL,
+ .p1.x = omap_dmm->container_width - 1,
+ .p1.y = omap_dmm->container_height - 1,
+ };
+
+ for (i = 0; i < lut_table_size; i++)
+ omap_dmm->lut[i] = omap_dmm->dummy_pa;
+
+ /* initialize all LUTs to dummy page entries */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ area.tcm = omap_dmm->tcm[i];
+ if (fill(&area, NULL, 0, 0, true))
+ dev_err(omap_dmm->dev, "refill failed");
+ }
+
+ dev_info(omap_dmm->dev, "initialized all PAT entries\n");
+
+ return 0;
+
+fail:
+ omap_dmm_remove();
+ return ret;
+}
+
+/*
+ * debugfs support
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+static const char *special = ".,:;'\"`~!^-+";
+
+static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+ char c, bool ovw)
+{
+ int x, y;
+ for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+ for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+ if (map[y][x] == ' ' || ovw)
+ map[y][x] = c;
+}
+
+static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+ char c)
+{
+ map[p->y / ydiv][p->x / xdiv] = c;
+}
+
+static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+{
+ return map[p->y / ydiv][p->x / xdiv];
+}
+
+static int map_width(int xdiv, int x0, int x1)
+{
+ return (x1 / xdiv) - (x0 / xdiv) + 1;
+}
+
+static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+{
+ char *p = map[yd] + (x0 / xdiv);
+ int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+ if (w >= 0) {
+ p += w;
+ while (*nice)
+ *p++ = *nice++;
+ }
+}
+
+static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+ if (a->p0.y + 1 < a->p1.y) {
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+ 256 - 1);
+ } else if (a->p0.y < a->p1.y) {
+ if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
+ text_map(map, xdiv, nice, a->p0.y / ydiv,
+ a->p0.x + xdiv, 256 - 1);
+ else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+ text_map(map, xdiv, nice, a->p1.y / ydiv,
+ 0, a->p1.y - xdiv);
+ } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+ text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+ }
+}
+
+static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+ if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+ a->p0.x, a->p1.x);
+}
+
+int tiler_map_show(struct seq_file *s, void *arg)
+{
+ int xdiv = 2, ydiv = 1;
+ char **map = NULL, *global_map;
+ struct tiler_block *block;
+ struct tcm_area a, p;
+ int i;
+ const char *m2d = alphabet;
+ const char *a2d = special;
+ const char *m2dp = m2d, *a2dp = a2d;
+ char nice[128];
+ int h_adj = omap_dmm->lut_height / ydiv;
+ int w_adj = omap_dmm->lut_width / xdiv;
+ unsigned long flags;
+
+ map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
+ global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+
+ if (!map || !global_map)
+ goto error;
+
+ memset(global_map, ' ', (w_adj + 1) * h_adj);
+ for (i = 0; i < omap_dmm->lut_height; i++) {
+ map[i] = global_map + i * (w_adj + 1);
+ map[i][w_adj] = 0;
+ }
+ spin_lock_irqsave(&omap_dmm->list_lock, flags);
+
+ list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+ if (block->fmt != TILFMT_PAGE) {
+ fill_map(map, xdiv, ydiv, &block->area, *m2dp, true);
+ if (!*++a2dp)
+ a2dp = a2d;
+ if (!*++m2dp)
+ m2dp = m2d;
+ map_2d_info(map, xdiv, ydiv, nice, &block->area);
+ } else {
+ bool start = read_map_pt(map, xdiv, ydiv,
+ &block->area.p0)
+ == ' ';
+ bool end = read_map_pt(map, xdiv, ydiv, &block->area.p1)
+ == ' ';
+ tcm_for_each_slice(a, block->area, p)
+ fill_map(map, xdiv, ydiv, &a, '=', true);
+ fill_map_pt(map, xdiv, ydiv, &block->area.p0,
+ start ? '<' : 'X');
+ fill_map_pt(map, xdiv, ydiv, &block->area.p1,
+ end ? '>' : 'X');
+ map_1d_info(map, xdiv, ydiv, nice, &block->area);
+ }
+ }
+
+ spin_unlock_irqrestore(&omap_dmm->list_lock, flags);
+
+ if (s) {
+ seq_printf(s, "BEGIN DMM TILER MAP\n");
+ for (i = 0; i < 128; i++)
+ seq_printf(s, "%03d:%s\n", i, map[i]);
+ seq_printf(s, "END TILER MAP\n");
+ } else {
+ dev_dbg(omap_dmm->dev, "BEGIN DMM TILER MAP\n");
+ for (i = 0; i < 128; i++)
+ dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+ dev_dbg(omap_dmm->dev, "END TILER MAP\n");
+ }
+
+error:
+ kfree(map);
+ kfree(global_map);
+
+ return 0;
+}
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/staging/omapdrm/omap_dmm_tiler.h
new file mode 100644
index 000000000000..f87cb657d683
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.h
@@ -0,0 +1,135 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_TILER_H
+#define OMAP_DMM_TILER_H
+
+#include "omap_drv.h"
+#include "tcm.h"
+
+enum tiler_fmt {
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT,
+ TILFMT_32BIT,
+ TILFMT_PAGE,
+ TILFMT_NFORMATS
+};
+
+struct pat_area {
+ u32 x0:8;
+ u32 y0:8;
+ u32 x1:8;
+ u32 y1:8;
+};
+
+struct tiler_block {
+ struct list_head alloc_node; /* node for global block list */
+ struct tcm_area area; /* area */
+ enum tiler_fmt fmt; /* format */
+};
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS 6
+#define SLOT_HEIGHT_BITS 6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS 14
+#define CONT_HEIGHT_BITS 13
+
+/* calculated constants */
+#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+/* tiler space addressing bitfields */
+#define MASK_XY_FLIP (1 << 31)
+#define MASK_Y_INVERT (1 << 30)
+#define MASK_X_INVERT (1 << 29)
+#define SHIFT_ACC_MODE 27
+#define MASK_ACC_MODE 3
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILVIEW_8BIT 0x60000000u
+#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
+#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+ ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+/* externally accessible functions */
+int omap_dmm_init(struct drm_device *dev);
+int omap_dmm_remove(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tiler_map_show(struct seq_file *s, void *arg);
+#endif
+
+/* pin/unpin */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait);
+int tiler_unpin(struct tiler_block *block);
+
+/* reserve/release */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
+ uint16_t align);
+struct tiler_block *tiler_reserve_1d(size_t size);
+int tiler_release(struct tiler_block *block);
+
+/* utilities */
+dma_addr_t tiler_ssptr(struct tiler_block *block);
+uint32_t tiler_stride(enum tiler_fmt fmt);
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+
+
+/* GEM bo flags -> tiler fmt */
+static inline enum tiler_fmt gem2fmt(uint32_t flags)
+{
+ switch (flags & OMAP_BO_TILED) {
+ case OMAP_BO_TILED_8:
+ return TILFMT_8BIT;
+ case OMAP_BO_TILED_16:
+ return TILFMT_16BIT;
+ case OMAP_BO_TILED_32:
+ return TILFMT_32BIT;
+ default:
+ return TILFMT_PAGE;
+ }
+}
+
+static inline bool validfmt(enum tiler_fmt fmt)
+{
+ switch (fmt) {
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ case TILFMT_PAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct omap_dmm_platform_data {
+ void __iomem *base;
+ int irq;
+};
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_drm.h b/drivers/staging/omapdrm/omap_drm.h
new file mode 100644
index 000000000000..f0ac34a8973e
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drm.h
@@ -0,0 +1,123 @@
+/*
+ * include/drm/omap_drm.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRM_H__
+#define __OMAP_DRM_H__
+
+#include <drm/drm.h>
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
+
+struct drm_omap_param {
+ uint64_t param; /* in */
+ uint64_t value; /* in (set_param), out (get_param) */
+};
+
+#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
+#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
+#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+
+/* cache modes */
+#define OMAP_BO_CACHED 0x00000000 /* default */
+#define OMAP_BO_WC 0x00000002 /* write-combine */
+#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+
+/* tiled modes */
+#define OMAP_BO_TILED_8 0x00000100
+#define OMAP_BO_TILED_16 0x00000200
+#define OMAP_BO_TILED_32 0x00000300
+#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+
+union omap_gem_size {
+ uint32_t bytes; /* (for non-tiled formats) */
+ struct {
+ uint16_t width;
+ uint16_t height;
+ } tiled; /* (for tiled formats) */
+};
+
+struct drm_omap_gem_new {
+ union omap_gem_size size; /* in */
+ uint32_t flags; /* in */
+ uint32_t handle; /* out */
+ uint32_t __pad;
+};
+
+/* mask of operations: */
+enum omap_gem_op {
+ OMAP_GEM_READ = 0x01,
+ OMAP_GEM_WRITE = 0x02,
+};
+
+struct drm_omap_gem_cpu_prep {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+};
+
+struct drm_omap_gem_cpu_fini {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+ /* TODO maybe here we pass down info about what regions are touched
+ * by sw so we can be clever about cache ops? For now a placeholder,
+ * set to zero and we just do full buffer flush..
+ */
+ uint32_t nregions;
+ uint32_t __pad;
+};
+
+struct drm_omap_gem_info {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t pad;
+ uint64_t offset; /* mmap offset (out) */
+ /* note: in case of tiled buffers, the user virtual size can be
+ * different from the physical size (ie. how many pages are needed
+ * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
+ * This size here is the one that should be used if you want to
+ * mmap() the buffer:
+ */
+ uint32_t size; /* virtual size for mmap'ing (out) */
+ uint32_t __pad;
+};
+
+#define DRM_OMAP_GET_PARAM 0x00
+#define DRM_OMAP_SET_PARAM 0x01
+/* placeholder for plugin-api
+#define DRM_OMAP_GET_BASE 0x02
+*/
+#define DRM_OMAP_GEM_NEW 0x03
+#define DRM_OMAP_GEM_CPU_PREP 0x04
+#define DRM_OMAP_GEM_CPU_FINI 0x05
+#define DRM_OMAP_GEM_INFO 0x06
+#define DRM_OMAP_NUM_IOCTLS 0x07
+
+#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
+#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
+/* placeholder for plugin-api
+#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
+*/
+#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
+#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
+#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
+#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
new file mode 100644
index 000000000000..602aa2dd49c8
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -0,0 +1,821 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+
+#define DRIVER_NAME MODULE_NAME
+#define DRIVER_DESC "OMAP DRM"
+#define DRIVER_DATE "20110917"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct drm_device *drm_device;
+
+static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+
+MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+module_param(num_crtc, int, 0600);
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ * CRTC: overlay
+ * encoder: manager.. with some extension to allow one primary CRTC
+ * and zero or more video CRTC's to be mapped to one encoder?
+ * connector: dssdev.. manager can be attached/detached from different
+ * devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ DBG("dev=%p", dev);
+ if (priv->fbdev) {
+ drm_fb_helper_hotplug_event(priv->fbdev);
+ }
+}
+
+static struct drm_mode_config_funcs omap_mode_config_funcs = {
+ .fb_create = omap_framebuffer_create,
+ .output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!strcmp(dssdev->name, "dvi"))
+ return DRM_MODE_CONNECTOR_DVID;
+ /* fallthrough */
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+#if 0 /* enable when dss2 supports hotplug */
+static int omap_drm_notifier(struct notifier_block *nb,
+ unsigned long evt, void *arg)
+{
+ switch (evt) {
+ case OMAP_DSS_SIZE_CHANGE:
+ case OMAP_DSS_HOTPLUG_CONNECT:
+ case OMAP_DSS_HOTPLUG_DISCONNECT: {
+ struct drm_device *dev = drm_device;
+ DBG("hotplug event: evt=%d, dev=%p", evt, dev);
+ if (dev) {
+ drm_sysfs_hotplug_event(dev);
+ }
+ return NOTIFY_OK;
+ }
+ default: /* don't care about other events for now */
+ return NOTIFY_DONE;
+ }
+}
+#endif
+
+static void dump_video_chains(void)
+{
+ int i;
+
+ DBG("dumping video chains: ");
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+ struct omap_overlay_manager *mgr = ovl->manager;
+ struct omap_dss_device *dssdev = mgr ? mgr->device : NULL;
+ if (dssdev) {
+ DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
+ dssdev->name);
+ } else if (mgr) {
+ DBG("%d: %s -> %s", i, ovl->name, mgr->name);
+ } else {
+ DBG("%d: %s", i, ovl->name);
+ }
+ }
+}
+
+/* create encoders for each manager */
+static int create_encoder(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
+
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder: %s\n",
+ mgr->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return 0;
+}
+
+/* create connectors for each display device */
+static int create_connector(struct drm_device *dev,
+ struct omap_dss_device *dssdev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ static struct notifier_block *notifier;
+ struct drm_connector *connector;
+ int j;
+
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
+ }
+
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->read_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or read_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
+ }
+
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev);
+
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector: %s\n",
+ dssdev->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+
+ priv->connectors[priv->num_connectors++] = connector;
+
+#if 0 /* enable when dss2 supports hotplug */
+ notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ notifier->notifier_call = omap_drm_notifier;
+ omap_dss_add_notify(dssdev, notifier);
+#else
+ notifier = NULL;
+#endif
+
+ for (j = 0; j < priv->num_encoders; j++) {
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(priv->encoders[j]);
+ if (mgr->device == dssdev) {
+ drm_mode_connector_attach_encoder(connector,
+ priv->encoders[j]);
+ }
+ }
+
+ return 0;
+}
+
+/* create up to max_overlays CRTCs mapping to overlays.. by default,
+ * connect the overlays to different managers/encoders, giving priority
+ * to encoders connected to connectors with a detected connection
+ */
+static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
+ int *j, unsigned int connected_connectors)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_overlay_manager *mgr = NULL;
+ struct drm_crtc *crtc;
+
+ if (ovl->manager) {
+ DBG("disconnecting %s from %s", ovl->name,
+ ovl->manager->name);
+ ovl->unset_manager(ovl);
+ }
+
+ /* find next best connector, ones with detected connection first
+ */
+ while (*j < priv->num_connectors && !mgr) {
+ if (connected_connectors & (1 << *j)) {
+ struct drm_encoder *encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[*j]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ (*j)++;
+ }
+
+ /* if we couldn't find another connected connector, lets start
+ * looking at the unconnected connectors:
+ *
+ * note: it might not be immediately apparent, but thanks to
+ * the !mgr check in both this loop and the one above, the only
+ * way to enter this loop is with *j == priv->num_connectors,
+ * so idx can never go negative.
+ */
+ while (*j < 2 * priv->num_connectors && !mgr) {
+ int idx = *j - priv->num_connectors;
+ if (!(connected_connectors & (1 << idx))) {
+ struct drm_encoder *encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[idx]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ (*j)++;
+ }
+
+ if (mgr) {
+ DBG("connecting %s to %s", ovl->name, mgr->name);
+ ovl->set_manager(ovl, mgr);
+ }
+
+ crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
+
+ if (!crtc) {
+ dev_err(dev->dev, "could not create CRTC: %s\n",
+ ovl->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ return 0;
+}
+
+static int match_dev_name(struct omap_dss_device *dssdev, void *data)
+{
+ return !strcmp(dssdev->name, data);
+}
+
+static unsigned int detect_connectors(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned int connected_connectors = 0;
+ int i;
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (omap_connector_detect(connector, true) ==
+ connector_status_connected) {
+ connected_connectors |= (1 << i);
+ }
+ }
+
+ return connected_connectors;
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+ const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+ struct omap_kms_platform_data *kms_pdata = NULL;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev = NULL;
+ int i, j;
+ unsigned int connected_connectors = 0;
+
+ drm_mode_config_init(dev);
+
+ if (pdata && pdata->kms_pdata) {
+ kms_pdata = pdata->kms_pdata;
+
+ /* if platform data is provided by the board file, use it to
+ * control which overlays, managers, and devices we own.
+ */
+ for (i = 0; i < kms_pdata->mgr_cnt; i++) {
+ struct omap_overlay_manager *mgr =
+ omap_dss_get_overlay_manager(
+ kms_pdata->mgr_ids[i]);
+ create_encoder(dev, mgr);
+ }
+
+ for (i = 0; i < kms_pdata->dev_cnt; i++) {
+ struct omap_dss_device *dssdev =
+ omap_dss_find_device(
+ (void *)kms_pdata->dev_names[i],
+ match_dev_name);
+ if (!dssdev) {
+ dev_warn(dev->dev, "no such dssdev: %s\n",
+ kms_pdata->dev_names[i]);
+ continue;
+ }
+ create_connector(dev, dssdev);
+ }
+
+ connected_connectors = detect_connectors(dev);
+
+ j = 0;
+ for (i = 0; i < kms_pdata->ovl_cnt; i++) {
+ struct omap_overlay *ovl =
+ omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
+ create_crtc(dev, ovl, &j, connected_connectors);
+ }
+ } else {
+ /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
+ * to make educated guesses about everything else
+ */
+ int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ create_encoder(dev, omap_dss_get_overlay_manager(i));
+ }
+
+ for_each_dss_dev(dssdev) {
+ create_connector(dev, dssdev);
+ }
+
+ connected_connectors = detect_connectors(dev);
+
+ j = 0;
+ for (i = 0; i < max_overlays; i++) {
+ create_crtc(dev, omap_dss_get_overlay(i),
+ &j, connected_connectors);
+ }
+ }
+
+ /* for now keep the mapping of CRTCs and encoders static.. */
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+
+ encoder->possible_crtcs = 0;
+
+ for (j = 0; j < priv->num_crtcs; j++) {
+ struct omap_overlay *ovl =
+ omap_crtc_get_overlay(priv->crtcs[j]);
+ if (ovl->manager == mgr) {
+ encoder->possible_crtcs |= (1 << j);
+ }
+ }
+
+ DBG("%s: possible_crtcs=%08x", mgr->name,
+ encoder->possible_crtcs);
+ }
+
+ dump_video_chains();
+
+ dev->mode_config.min_width = 256;
+ dev->mode_config.min_height = 256;
+
+ /* note: eventually will need some cpu_is_omapXYZ() type stuff here
+ * to fill in these limits properly on different OMAP generations..
+ */
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &omap_mode_config_funcs;
+
+ return 0;
+}
+
+static void omap_modeset_free(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+/*
+ * drm ioctl funcs
+ */
+
+
+static int ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ DBG("%p: param=%llu", dev, args->param);
+
+ switch (args->param) {
+ case OMAP_PARAM_CHIPSET_ID:
+ args->value = GET_OMAP_TYPE;
+ break;
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_set_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ switch (args->param) {
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_new *args = data;
+ DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+ args->size.bytes, args->flags);
+ return omap_gem_new_handle(dev, file_priv, args->size,
+ args->flags, &args->handle);
+}
+
+static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ ret = omap_gem_op_sync(obj, args->op);
+
+ if (!ret) {
+ ret = omap_gem_op_start(obj, args->op);
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ /* XXX flushy, flushy */
+ ret = 0;
+
+ if (!ret) {
+ ret = omap_gem_op_finish(obj, args->op);
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ args->size = omap_gem_mmap_size(obj);
+ args->offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+ DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+ struct omap_drm_private *priv;
+ int ret;
+
+ DBG("load: dev=%p", dev);
+
+ drm_device = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ omap_gem_init(dev);
+
+ ret = omap_modeset_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ dev->dev_private = NULL;
+ kfree(priv);
+ return ret;
+ }
+
+ priv->fbdev = omap_fbdev_init(dev);
+ if (!priv->fbdev) {
+ dev_warn(dev->dev, "omap_fbdev_init failed\n");
+ /* well, limp along without an fbdev.. maybe X11 will work? */
+ }
+
+ drm_kms_helper_poll_init(dev);
+
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret) {
+ dev_warn(dev->dev, "could not init vblank\n");
+ }
+
+ return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+ DBG("unload: dev=%p", dev);
+
+ drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+
+ omap_fbdev_free(dev);
+ omap_modeset_free(dev);
+ omap_gem_deinit(dev);
+
+ kfree(dev->dev_private);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+ file->driver_priv = NULL;
+
+ DBG("open: dev=%p, file=%p", dev, file);
+
+ return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+ DBG("firstopen: dev=%p", dev);
+ return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ */
+static void dev_lastclose(struct drm_device *dev)
+{
+ /* we don't support vga-switcheroo.. so just make sure the fbdev
+ * mode is active
+ */
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ DBG("lastclose: dev=%p", dev);
+
+ ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+}
+
+static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("postclose: dev=%p, file=%p", dev, file);
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+static int dev_enable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+static void dev_disable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
+}
+
+static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
+{
+ return IRQ_HANDLED;
+}
+
+static void dev_irq_preinstall(struct drm_device *dev)
+{
+ DBG("irq_preinstall: dev=%p", dev);
+}
+
+static int dev_irq_postinstall(struct drm_device *dev)
+{
+ DBG("irq_postinstall: dev=%p", dev);
+ return 0;
+}
+
+static void dev_irq_uninstall(struct drm_device *dev)
+{
+ DBG("irq_uninstall: dev=%p", dev);
+}
+
+static struct vm_operations_struct omap_gem_vm_ops = {
+ .fault = omap_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static struct drm_driver omap_drm_driver = {
+ .driver_features =
+ DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .firstopen = dev_firstopen,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = dev_enable_vblank,
+ .disable_vblank = dev_disable_vblank,
+ .irq_preinstall = dev_irq_preinstall,
+ .irq_postinstall = dev_irq_postinstall,
+ .irq_uninstall = dev_irq_uninstall,
+ .irq_handler = dev_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
+#endif
+ .gem_init_object = omap_gem_init_object,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = omap_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+ DBG("");
+ return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+ DBG("");
+ return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+ DBG("");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+ DBG("%s", device->name);
+ return drm_platform_init(&omap_drm_driver, device);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+ DBG("");
+ drm_platform_exit(&omap_drm_driver, device);
+ return 0;
+}
+
+struct platform_driver pdev = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
+ .suspend = pdev_suspend,
+ .resume = pdev_resume,
+ .shutdown = pdev_shutdown,
+};
+
+static int __init omap_drm_init(void)
+{
+ DBG("init");
+ return platform_driver_register(&pdev);
+}
+
+static void __exit omap_drm_fini(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&pdev);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_drm_init);
+module_exit(omap_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
new file mode 100644
index 000000000000..76c42515ecc5
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -0,0 +1,135 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRV_H__
+#define __OMAP_DRV_H__
+
+#include <video/omapdss.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include "omap_drm.h"
+#include "omap_priv.h"
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+
+#define MODULE_NAME "omapdrm"
+
+/* max # of mapper-id's that can be assigned.. todo, come up with a better
+ * (but still inexpensive) way to store/access per-buffer mapper private
+ * data..
+ */
+#define MAX_MAPPERS 2
+
+struct omap_drm_private {
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+
+ struct drm_fb_helper *fbdev;
+
+ bool has_dmm;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl, int id);
+struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc);
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr);
+struct omap_overlay_manager *omap_encoder_get_manager(
+ struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector);
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev);
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h);
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo);
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb);
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+ void **vaddr, dma_addr_t *paddr, unsigned int *screen_width);
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from);
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h);
+
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+int omap_gem_init_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg);
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap);
+int omap_gem_put_paddr(struct drm_gem_object *obj);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+
+static inline int align_pitch(int pitch, int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* in case someone tries to feed us a completely bogus stride: */
+ pitch = max(pitch, width * bytespp);
+ /* PVR needs alignment to 8 pixels.. right now that is the most
+ * restrictive stride requirement..
+ */
+ return ALIGN(pitch, 8 * bytespp);
+}
+
+#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
new file mode 100644
index 000000000000..06c52cb62d2f
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -0,0 +1,171 @@
+/*
+ * drivers/staging/omapdrm/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+struct omap_encoder {
+ struct drm_encoder base;
+ struct omap_overlay_manager *mgr;
+};
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ drm_encoder_cleanup(encoder);
+ kfree(omap_encoder);
+}
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s: %d", omap_encoder->mgr->name, mode);
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
+ mode->hdisplay, mode->vdisplay);
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (connector->encoder == encoder) {
+ omap_connector_mode_set(connector, mode);
+ }
+ }
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ omap_encoder->mgr->apply(omap_encoder->mgr);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+ .dpms = omap_encoder_dpms,
+ .mode_fixup = omap_encoder_mode_fixup,
+ .mode_set = omap_encoder_mode_set,
+ .prepare = omap_encoder_prepare,
+ .commit = omap_encoder_commit,
+};
+
+struct omap_overlay_manager *omap_encoder_get_manager(
+ struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ return omap_encoder->mgr;
+}
+
+/* initialize encoder */
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct drm_encoder *encoder = NULL;
+ struct omap_encoder *omap_encoder;
+ struct omap_overlay_manager_info info;
+ int ret;
+
+ DBG("%s", mgr->name);
+
+ omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+ if (!omap_encoder) {
+ dev_err(dev->dev, "could not allocate encoder\n");
+ goto fail;
+ }
+
+ omap_encoder->mgr = mgr;
+ encoder = &omap_encoder->base;
+
+ drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+ mgr->get_manager_info(mgr, &info);
+
+ /* TODO: fix hard-coded setup.. */
+ info.default_color = 0x00000000;
+ info.trans_key = 0x00000000;
+ info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info.trans_enabled = false;
+
+ ret = mgr->set_manager_info(mgr, &info);
+ if (ret) {
+ dev_err(dev->dev, "could not set manager info\n");
+ goto fail;
+ }
+
+ ret = mgr->apply(mgr);
+ if (ret) {
+ dev_err(dev->dev, "could not apply\n");
+ goto fail;
+ }
+
+ return encoder;
+
+fail:
+ if (encoder) {
+ omap_encoder_destroy(encoder);
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
new file mode 100644
index 000000000000..0b50c5b3b564
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -0,0 +1,243 @@
+/*
+ * drivers/staging/omapdrm/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+/*
+ * framebuffer funcs
+ */
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *bo;
+ int size;
+ dma_addr_t paddr;
+};
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ return drm_gem_handle_create(file_priv, omap_fb->bo, handle);
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ if (omap_fb->bo) {
+ if (omap_fb->paddr && omap_gem_put_paddr(omap_fb->bo))
+ dev_err(dev->dev, "could not unmap!\n");
+ drm_gem_object_unreference_unlocked(omap_fb->bo);
+ }
+
+ kfree(omap_fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ int i;
+
+ for (i = 0; i < num_clips; i++) {
+ omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+ clips[i].x2 - clips[i].x1,
+ clips[i].y2 - clips[i].y1);
+ }
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+ .create_handle = omap_framebuffer_create_handle,
+ .destroy = omap_framebuffer_destroy,
+ .dirty = omap_framebuffer_dirty,
+};
+
+/* returns the buffer size */
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+ void **vaddr, dma_addr_t *paddr, unsigned int *screen_width)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int bpp = fb->bits_per_pixel / 8;
+ unsigned long offset;
+
+ offset = (x * bpp) + (y * fb->pitch);
+
+ if (vaddr) {
+ void *bo_vaddr = omap_gem_vaddr(omap_fb->bo);
+ /* note: we can only count on having a vaddr for buffers that
+ * are allocated physically contiguously to begin with (ie.
+ * dma_alloc_coherent()). But this should be ok because it
+ * is only used by legacy fbdev
+ */
+ BUG_ON(IS_ERR_OR_NULL(bo_vaddr));
+ *vaddr = bo_vaddr + offset;
+ }
+
+ *paddr = omap_fb->paddr + offset;
+ *screen_width = fb->pitch / bpp;
+
+ return omap_fb->size - offset;
+}
+
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ return omap_fb->bo;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from)
+{
+ struct drm_device *dev = fb->dev;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector *connector = from;
+
+ if (!from) {
+ return list_first_entry(connector_list, typeof(*from), head);
+ }
+
+ list_for_each_entry_from(connector, connector_list, head) {
+ if (connector != from) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ if (crtc && crtc->fb == fb) {
+ return connector;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h)
+{
+ struct drm_connector *connector = NULL;
+
+ VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+ while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+ /* only consider connectors that are part of a chain */
+ if (connector->encoder && connector->encoder->crtc) {
+ /* TODO: maybe this should propagate thru the crtc who
+ * could do the coordinate translation..
+ */
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ int cx = max(0, x - crtc->x);
+ int cy = max(0, y - crtc->y);
+ int cw = w + (x - crtc->x) - cx;
+ int ch = h + (y - crtc->y) - cy;
+
+ omap_connector_flush(connector, cx, cy, cw, ch);
+ }
+ }
+}
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct drm_gem_object *bo;
+ struct drm_framebuffer *fb;
+ bo = drm_gem_object_lookup(dev, file, mode_cmd->handle);
+ if (!bo) {
+ return ERR_PTR(-ENOENT);
+ }
+ fb = omap_framebuffer_init(dev, mode_cmd, bo);
+ if (!fb) {
+ return ERR_PTR(-ENOMEM);
+ }
+ return fb;
+}
+
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo)
+{
+ struct omap_framebuffer *omap_fb;
+ struct drm_framebuffer *fb = NULL;
+ int size, ret;
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%d)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ mode_cmd->bpp);
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ mode_cmd->pitch = align_pitch(mode_cmd->pitch,
+ mode_cmd->width, mode_cmd->bpp);
+
+ omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+ if (!omap_fb) {
+ dev_err(dev->dev, "could not allocate fb\n");
+ goto fail;
+ }
+
+ fb = &omap_fb->base;
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ size = PAGE_ALIGN(mode_cmd->pitch * mode_cmd->height);
+
+ if (size > bo->size) {
+ dev_err(dev->dev, "provided buffer object is too small!\n");
+ goto fail;
+ }
+
+ omap_fb->bo = bo;
+ omap_fb->size = size;
+
+ if (omap_gem_get_paddr(bo, &omap_fb->paddr, true)) {
+ dev_err(dev->dev, "could not map (paddr)!\n");
+ goto fail;
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ return fb;
+
+fail:
+ if (fb) {
+ omap_framebuffer_destroy(fb);
+ }
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
new file mode 100644
index 000000000000..093ae2f87b20
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -0,0 +1,372 @@
+/*
+ * drivers/staging/omapdrm/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
+static bool ywrap_enabled = true;
+module_param_named(ywrap, ywrap_enabled, bool, 0644);
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+ bool ywrap_enabled;
+};
+
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+static struct drm_fb_helper *get_fb(struct fb_info *fbi);
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t res;
+
+ res = fb_sys_write(fbi, buf, count, ppos);
+ omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+ return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(fbi, rect);
+ omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(fbi, area);
+ omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+ const struct fb_image *image)
+{
+ sys_imageblit(fbi, image);
+ omap_fbdev_flush(fbi, image->dx, image->dy,
+ image->width, image->height);
+}
+
+static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ int npages;
+
+ if (!helper)
+ goto fallback;
+
+ if (!fbdev->ywrap_enabled)
+ goto fallback;
+
+ /* DMM roll shifts in 4K pages: */
+ npages = fbi->fix.line_length >> PAGE_SHIFT;
+ omap_gem_roll(fbdev->bo, var->yoffset * npages);
+
+ return 0;
+
+fallback:
+ return drm_fb_helper_pan_display(var, fbi);
+}
+
+static struct fb_ops omap_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = omap_fbdev_write,
+ .fb_fillrect = omap_fbdev_fillrect,
+ .fb_copyarea = omap_fbdev_copyarea,
+ .fb_imageblit = omap_fbdev_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = omap_fbdev_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb = NULL;
+ union omap_gem_size gsize;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd mode_cmd = {0};
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+ int size, screen_width;
+ int ret;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
+
+ mode_cmd.pitch = align_pitch(
+ mode_cmd.width * ((mode_cmd.bpp + 7) / 8),
+ mode_cmd.width, mode_cmd.bpp);
+
+ fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
+ if (fbdev->ywrap_enabled) {
+ /* need to align pitch to page size if using DMM scrolling */
+ mode_cmd.pitch = ALIGN(mode_cmd.pitch, PAGE_SIZE);
+ }
+
+ /* allocate backing bo */
+ gsize = (union omap_gem_size){
+ .bytes = PAGE_ALIGN(mode_cmd.pitch * mode_cmd.height),
+ };
+ DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
+ fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+ if (!fbdev->bo) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ goto fail;
+ }
+
+ fb = omap_framebuffer_init(dev, &mode_cmd, fbdev->bo);
+ if (!fb) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &omap_fb_ops;
+
+ strcpy(fbi->fix.id, MODULE_NAME);
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ size = omap_framebuffer_get_buffer(fb, 0, 0,
+ &vaddr, &paddr, &screen_width);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = vaddr;
+ fbi->screen_size = size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = size;
+
+ /* if we have DMM, then we can use it for scrolling by just
+ * shuffling pages around in DMM rather than doing sw blit.
+ */
+ if (fbdev->ywrap_enabled) {
+ DRM_INFO("Enabling DMM ywrap scrolling\n");
+ fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
+ fbi->fix.ywrapstep = 1;
+ }
+
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb)
+ fb->funcs->destroy(fb);
+ }
+
+ return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static int omap_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = omap_fbdev_create(helper, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ .gamma_set = omap_crtc_fb_gamma_set,
+ .gamma_get = omap_crtc_fb_gamma_get,
+ .fb_probe = omap_fbdev_probe,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+ if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+ /* these are not the fb's you're looking for */
+ return NULL;
+ }
+ return fbi->par;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+
+ if (!helper)
+ return;
+
+ VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+ omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+
+/* initialize fbdev helper */
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "could not allocate fbdev\n");
+ goto fail;
+ }
+
+ helper = &fbdev->base;
+
+ helper->funcs = &omap_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void omap_fbdev_free(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct omap_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_omap_fbdev(priv->fbdev);
+
+ kfree(fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb)
+ fbdev->fb->funcs->destroy(fbdev->fb);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
new file mode 100644
index 000000000000..e0ebd1d139f6
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -0,0 +1,1231 @@
+/*
+ * drivers/staging/omapdrm/omap_gem.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* remove these once drm core helpers are merged */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+/*
+ * GEM buffer object implementation.
+ */
+
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+
+/* note: we use upper 8 bits of flags for driver-internal flags: */
+#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
+#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
+#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
+
+
+struct omap_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /** width/height for tiled formats (rounded up to slot boundaries) */
+ uint16_t width, height;
+
+ /** roll applied when mapping to DMM */
+ uint32_t roll;
+
+ /**
+ * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+ * is set and the paddr is valid. Also if the buffer is remapped in
+ * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
+ * the physical address and OMAP_BO_DMA is not set, then you should
+ * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
+ * not removed from under your feet.
+ *
+ * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+ * buffer is requested, but doesn't mean that it is. Use the
+ * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+ * physical address.
+ */
+ dma_addr_t paddr;
+
+ /**
+ * # of users of paddr
+ */
+ uint32_t paddr_cnt;
+
+ /**
+ * tiler block used when buffer is remapped in DMM/TILER.
+ */
+ struct tiler_block *block;
+
+ /**
+ * Array of backing pages, if allocated. Note that pages are never
+ * allocated for buffers originally allocated from contiguous memory
+ */
+ struct page **pages;
+
+ /** addresses corresponding to pages in above array */
+ dma_addr_t *addrs;
+
+ /**
+ * Virtual address, if mapped.
+ */
+ void *vaddr;
+
+ /**
+ * sync-object allocated on demand (if needed)
+ *
+ * Per-buffer sync-object for tracking pending and completed hw/dma
+ * read and write operations. The layout in memory is dictated by
+ * the SGX firmware, which uses this information to stall the command
+ * stream if a surface is not ready yet.
+ *
+ * Note that when buffer is used by SGX, the sync-object needs to be
+ * allocated from a special heap of sync-objects. This way many sync
+ * objects can be packed in a page, and not waste GPU virtual address
+ * space. Because of this we have to have a omap_gem_set_sync_object()
+ * API to allow replacement of the syncobj after it has (potentially)
+ * already been allocated. A bit ugly but I haven't thought of a
+ * better alternative.
+ */
+ struct {
+ uint32_t write_pending;
+ uint32_t write_complete;
+ uint32_t read_pending;
+ uint32_t read_complete;
+ } *sync;
+};
+
+/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
+ * not necessarily pinned in TILER all the time, and (b) when they are
+ * they are not necessarily page aligned, we reserve one or more small
+ * regions in each of the 2d containers to use as a user-GART where we
+ * can create a second page-aligned mapping of parts of the buffer
+ * being accessed from userspace.
+ *
+ * Note that we could optimize slightly when we know that multiple
+ * tiler containers are backed by the same PAT.. but I'll leave that
+ * for later..
+ */
+#define NUM_USERGART_ENTRIES 2
+struct usergart_entry {
+ struct tiler_block *block; /* the reserved tiler block */
+ dma_addr_t paddr;
+ struct drm_gem_object *obj; /* the current pinned obj */
+ pgoff_t obj_pgoff; /* page offset of obj currently
+ mapped in */
+};
+static struct {
+ struct usergart_entry entry[NUM_USERGART_ENTRIES];
+ int height; /* height in rows */
+ int height_shift; /* ilog2(height in rows) */
+ int slot_shift; /* ilog2(width per slot) */
+ int stride_pfn; /* stride in pages */
+ int last; /* index of last used entry */
+} *usergart;
+
+static void evict_entry(struct drm_gem_object *obj,
+ enum tiler_fmt fmt, struct usergart_entry *entry)
+{
+ if (obj->dev->dev_mapping) {
+ size_t size = PAGE_SIZE * usergart[fmt].height;
+ loff_t off = omap_gem_mmap_offset(obj) +
+ (entry->obj_pgoff << PAGE_SHIFT);
+ unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ }
+
+ entry->obj = NULL;
+}
+
+/* Evict a buffer from usergart, if it is mapped there */
+static void evict(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ int i;
+
+ if (!usergart)
+ return;
+
+ for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
+ struct usergart_entry *entry = &usergart[fmt].entry[i];
+ if (entry->obj == obj)
+ evict_entry(obj, fmt, entry);
+ }
+ }
+}
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+ return obj->filp != NULL;
+}
+
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+
+static DEFINE_SPINLOCK(sync_lock);
+
+/** ensure backing pages are allocated */
+static int omap_gem_attach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct page **pages;
+
+ WARN_ON(omap_obj->pages);
+
+ /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+ * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+ * we actually want CMA memory for it all anyways..
+ */
+ pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+ return PTR_ERR(pages);
+ }
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
+ for (i = 0; i < npages; i++) {
+ addrs[i] = dma_map_page(obj->dev->dev, pages[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ omap_obj->addrs = addrs;
+ }
+
+ omap_obj->pages = pages;
+ return 0;
+}
+
+/** release backing pages */
+static void omap_gem_detach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ kfree(omap_obj->addrs);
+ omap_obj->addrs = NULL;
+ }
+
+ _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+ omap_obj->pages = NULL;
+}
+
+/** get mmap offset */
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ if (!obj->map_list.map) {
+ /* Make it mmapable */
+ size_t size = omap_gem_mmap_size(obj);
+ int ret = _drm_gem_create_mmap_offset_size(obj, size);
+
+ if (ret) {
+ dev_err(obj->dev->dev, "could not allocate mmap offset");
+ return 0;
+ }
+ }
+
+ return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+/** get mmap size */
+size_t omap_gem_mmap_size(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ size_t size = obj->size;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ /* for tiled buffers, the virtual size has stride rounded up
+ * to 4kb.. (to hide the fact that row n+1 might start 16kb or
+ * 32kb later!). But we don't back the entire buffer with
+ * pages, only the valid picture part.. so need to adjust for
+ * this in the size used to mmap and generate mmap offset
+ */
+ size = tiler_vsize(gem2fmt(omap_obj->flags),
+ omap_obj->width, omap_obj->height);
+ }
+
+ return size;
+}
+
+
+/* Normal handling for the case of faulting in non-tiled buffers */
+static int fault_1d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ if (omap_obj->pages) {
+ pfn = page_to_pfn(omap_obj->pages[pgoff]);
+ } else {
+ BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+ pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+ }
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+}
+
+/* Special handling for the case of faulting in 2d tiled buffers */
+static int fault_2d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct usergart_entry *entry;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct page *pages[64]; /* XXX is this too much to have on stack? */
+ unsigned long pfn;
+ pgoff_t pgoff, base_pgoff;
+ void __user *vaddr;
+ int i, ret, slots;
+
+ if (!usergart)
+ return -EFAULT;
+
+ /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
+ * that are wider than 4kb
+ */
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ /* actual address we start mapping at is rounded down to previous slot
+ * boundary in the y direction:
+ */
+ base_pgoff = round_down(pgoff, usergart[fmt].height);
+ vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+ entry = &usergart[fmt].entry[usergart[fmt].last];
+
+ slots = omap_obj->width >> usergart[fmt].slot_shift;
+
+ /* evict previous buffer using this usergart entry, if any: */
+ if (entry->obj)
+ evict_entry(entry->obj, fmt, entry);
+
+ entry->obj = obj;
+ entry->obj_pgoff = base_pgoff;
+
+ /* now convert base_pgoff to phys offset from virt offset:
+ */
+ base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
+
+ /* map in pages. Note the height of the slot is also equal to the
+ * number of pages that need to be mapped in to fill 4kb wide CPU page.
+ * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
+ * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
+ * get a dummy page mapped in.. if someone reads/writes it they will get
+ * random/undefined content, but at least it won't be corrupting
+ * whatever other random page used to be mapped in, or other undefined
+ * behavior.
+ */
+ memcpy(pages, &omap_obj->pages[base_pgoff],
+ sizeof(struct page *) * slots);
+ memset(pages + slots, 0,
+ sizeof(struct page *) * (usergart[fmt].height - slots));
+
+ ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (ret) {
+ dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ return ret;
+ }
+
+ i = usergart[fmt].height;
+ pfn = entry->paddr >> PAGE_SHIFT;
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ while (i--) {
+ vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+ pfn += usergart[fmt].stride_pfn;
+ vaddr += PAGE_SIZE;
+ }
+
+ /* simple round-robin: */
+ usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+
+ return 0;
+}
+
+/**
+ * omap_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /* if a shmem backed object, make sure we have pages attached now */
+ ret = get_pages(obj, &pages);
+ if (ret) {
+ goto fail;
+ }
+
+ /* where should we do corresponding put_pages().. we are mapping
+ * the original page, rather than thru a GART, so we can't rely
+ * on eviction to trigger this. But munmap() or all mappings should
+ * probably trigger put_pages()?
+ */
+
+ if (omap_obj->flags & OMAP_BO_TILED)
+ ret = fault_2d(obj, vma, vmf);
+ else
+ ret = fault_1d(obj, vma, vmf);
+
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** We override mainly to fix up some of the vm mapping flags.. */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct omap_gem_object *omap_obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ /* after drm_gem_mmap(), it is safe to access the obj */
+ omap_obj = to_omap_bo(vma->vm_private_data);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (omap_obj->flags & OMAP_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return ret;
+}
+
+/**
+ * omap_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ union omap_gem_size gsize;
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ gsize = (union omap_gem_size){
+ .bytes = args->size,
+ };
+
+ return omap_gem_new_handle(dev, file, gsize,
+ OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+}
+
+/**
+ * omap_gem_dumb_destroy - destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via omap_gem_dumb_create.
+ */
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * omap_gem_dumb_map - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+/* Set scrolling position. This allows us to implement fast scrolling
+ * for console.
+ */
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ int ret = 0;
+
+ if (roll > npages) {
+ dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
+ return -EINVAL;
+ }
+
+ omap_obj->roll = roll;
+
+ if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
+ /* this can get called from fbcon in atomic context.. so
+ * just ignore it and wait for next time called from
+ * interruptible context to update the PAT.. the result
+ * may be that user sees wrap-around instead of scrolling
+ * momentarily on the screen. If we wanted to be fancier
+ * we could perhaps schedule some workqueue work at this
+ * point.
+ */
+ return 0;
+ }
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ /* if we aren't mapped yet, we don't need to do anything */
+ if (omap_obj->block) {
+ struct page **pages;
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+ ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+ if (ret)
+ dev_err(obj->dev->dev, "could not repin: %d\n", ret);
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+ * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+ * map in TILER)
+ */
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap)
+{
+ struct omap_drm_private *priv = obj->dev->dev_private;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ if (remap && is_shmem(obj) && priv->has_dmm) {
+ if (omap_obj->paddr_cnt == 0) {
+ struct page **pages;
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct tiler_block *block;
+
+ BUG_ON(omap_obj->block);
+
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ block = tiler_reserve_2d(fmt,
+ omap_obj->width,
+ omap_obj->height, 0);
+ } else {
+ block = tiler_reserve_1d(obj->size);
+ }
+
+ if (IS_ERR(block)) {
+ ret = PTR_ERR(block);
+ dev_err(obj->dev->dev,
+ "could not remap: %d (%d)\n", ret, fmt);
+ goto fail;
+ }
+
+ /* TODO: enable async refill.. */
+ ret = tiler_pin(block, pages, npages,
+ omap_obj->roll, true);
+ if (ret) {
+ tiler_release(block);
+ dev_err(obj->dev->dev,
+ "could not pin: %d\n", ret);
+ goto fail;
+ }
+
+ omap_obj->paddr = tiler_ssptr(block);
+ omap_obj->block = block;
+
+ DBG("got paddr: %08x", omap_obj->paddr);
+ }
+
+ omap_obj->paddr_cnt++;
+
+ *paddr = omap_obj->paddr;
+ } else if (omap_obj->flags & OMAP_BO_DMA) {
+ *paddr = omap_obj->paddr;
+ } else {
+ ret = -EINVAL;
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Release physical address, when DMA is no longer being performed.. this
+ * could potentially unpin and unmap buffers from TILER
+ */
+int omap_gem_put_paddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ if (omap_obj->paddr_cnt > 0) {
+ omap_obj->paddr_cnt--;
+ if (omap_obj->paddr_cnt == 0) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ goto fail;
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->block = NULL;
+ }
+ }
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ if (is_shmem(obj) && !omap_obj->pages) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ dev_err(obj->dev->dev, "could not attach pages\n");
+ return ret;
+ }
+ }
+
+ /* TODO: even phys-contig.. we should have a list of pages? */
+ *pages = omap_obj->pages;
+
+ return 0;
+}
+
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ int ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = get_pages(obj, pages);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* release pages when DMA no longer being performed */
+int omap_gem_put_pages(struct drm_gem_object *obj)
+{
+ /* do something here if we dynamically attach/detach pages.. at
+ * least they would no longer need to be pinned if everyone has
+ * released the pages..
+ */
+ return 0;
+}
+
+/* Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev. This should be called with struct_mutex
+ * held.
+ */
+void *omap_gem_vaddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ WARN_ON(! mutex_is_locked(&obj->dev->struct_mutex));
+ if (!omap_obj->vaddr) {
+ struct page **pages;
+ int ret = get_pages(obj, &pages);
+ if (ret)
+ return ERR_PTR(ret);
+ omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return omap_obj->vaddr;
+}
+
+/* Buffer Synchronization:
+ */
+
+struct omap_gem_sync_waiter {
+ struct list_head list;
+ struct omap_gem_object *omap_obj;
+ enum omap_gem_op op;
+ uint32_t read_target, write_target;
+ /* notify called w/ sync_lock held */
+ void (*notify)(void *arg);
+ void *arg;
+};
+
+/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+ * the read and/or write target count is achieved which can call a user
+ * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+ * cpu access), etc.
+ */
+static LIST_HEAD(waiters);
+
+static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+{
+ struct omap_gem_object *omap_obj = waiter->omap_obj;
+ if ((waiter->op & OMAP_GEM_READ) &&
+ (omap_obj->sync->read_complete < waiter->read_target))
+ return true;
+ if ((waiter->op & OMAP_GEM_WRITE) &&
+ (omap_obj->sync->write_complete < waiter->write_target))
+ return true;
+ return false;
+}
+
+/* macro for sync debug.. */
+#define SYNCDBG 0
+#define SYNC(fmt, ...) do { if (SYNCDBG) \
+ printk(KERN_ERR "%s:%d: "fmt"\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+
+static void sync_op_update(void)
+{
+ struct omap_gem_sync_waiter *waiter, *n;
+ list_for_each_entry_safe(waiter, n, &waiters, list) {
+ if (!is_waiting(waiter)) {
+ list_del(&waiter->list);
+ SYNC("notify: %p", waiter);
+ waiter->notify(waiter->arg);
+ kfree(waiter);
+ }
+ }
+}
+
+static inline int sync_op(struct drm_gem_object *obj,
+ enum omap_gem_op op, bool start)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if (!omap_obj->sync) {
+ omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!omap_obj->sync) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ if (start) {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_pending++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_pending++;
+ } else {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_complete++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_complete++;
+ sync_op_update();
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+
+ return ret;
+}
+
+/* it is a bit lame to handle updates in this sort of polling way, but
+ * in case of PVR, the GPU can directly update read/write complete
+ * values, and not really tell us which ones it updated.. this also
+ * means that sync_lock is not quite sufficient. So we'll need to
+ * do something a bit better when it comes time to add support for
+ * separate 2d hw..
+ */
+void omap_gem_op_update(void)
+{
+ spin_lock(&sync_lock);
+ sync_op_update();
+ spin_unlock(&sync_lock);
+}
+
+/* mark the start of read and/or write operation */
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, true);
+}
+
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, false);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+
+static void sync_notify(void *arg)
+{
+ struct task_struct **waiter_task = arg;
+ *waiter_task = NULL;
+ wake_up_all(&sync_event);
+}
+
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+ if (omap_obj->sync) {
+ struct task_struct *waiter_task = current;
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_KERNEL);
+
+ if (!waiter) {
+ return -ENOMEM;
+ }
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = sync_notify;
+ waiter->arg = &waiter_task;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ ret = wait_event_interruptible(sync_event,
+ (waiter_task == NULL));
+ spin_lock(&sync_lock);
+ if (waiter_task) {
+ SYNC("interrupted: %p", waiter);
+ /* we were interrupted */
+ list_del(&waiter->list);
+ waiter_task = NULL;
+ } else {
+ /* freed in sync_op_update() */
+ waiter = NULL;
+ }
+ }
+ spin_unlock(&sync_lock);
+
+ if (waiter) {
+ kfree(waiter);
+ }
+ }
+ return ret;
+}
+
+/* call fxn(arg), either synchronously or asynchronously if the op
+ * is currently blocked.. fxn() can be called from any context
+ *
+ * (TODO for now fxn is called back from whichever context calls
+ * omap_gem_op_update().. but this could be better defined later
+ * if needed)
+ *
+ * TODO more code in common w/ _sync()..
+ */
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (omap_obj->sync) {
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_ATOMIC);
+
+ if (!waiter) {
+ return -ENOMEM;
+ }
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = fxn;
+ waiter->arg = arg;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ return 0;
+ }
+
+ spin_unlock(&sync_lock);
+ }
+
+ /* no waiting.. */
+ fxn(arg);
+
+ return 0;
+}
+
+/* special API so PVR can update the buffer to use a sync-object allocated
+ * from it's sync-obj heap. Only used for a newly allocated (from PVR's
+ * perspective) sync-object, so we overwrite the new syncobj w/ values
+ * from the already allocated syncobj (if there is one)
+ */
+int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+ /* clearing a previously set syncobj */
+ syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!syncobj) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ /* replacing an existing syncobj */
+ if (omap_obj->sync) {
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ kfree(omap_obj->sync);
+ }
+ omap_obj->flags |= OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+ return ret;
+}
+
+int omap_gem_init_object(struct drm_gem_object *obj)
+{
+ return -EINVAL; /* unused */
+}
+
+/* don't call directly.. called from GEM core when it is time to actually
+ * free the object..
+ */
+void omap_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ evict(obj);
+
+ if (obj->map_list.map) {
+ drm_gem_free_mmap_offset(obj);
+ }
+
+ /* don't free externally allocated backing memory */
+ if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+ if (omap_obj->pages) {
+ omap_gem_detach_pages(obj);
+ }
+ if (!is_shmem(obj)) {
+ dma_free_writecombine(dev->dev, obj->size,
+ omap_obj->vaddr, omap_obj->paddr);
+ } else if (omap_obj->vaddr) {
+ vunmap(omap_obj->vaddr);
+ }
+ }
+
+ /* don't free externally allocated syncobj */
+ if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ kfree(omap_obj->sync);
+ }
+
+ drm_gem_object_release(obj);
+
+ kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = omap_gem_new(dev, gsize, flags);
+ if (!obj)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, obj, handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+ return ret;
+ }
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
+}
+
+/* GEM buffer object constructor */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ struct drm_gem_object *obj = NULL;
+ size_t size;
+ int ret;
+
+ if (flags & OMAP_BO_TILED) {
+ if (!usergart) {
+ dev_err(dev->dev, "Tiled buffers require DMM\n");
+ goto fail;
+ }
+
+ /* tiled buffers are always shmem paged backed.. when they are
+ * scanned out, they are remapped into DMM/TILER
+ */
+ flags &= ~OMAP_BO_SCANOUT;
+
+ /* currently don't allow cached buffers.. there is some caching
+ * stuff that needs to be handled better
+ */
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
+ flags |= OMAP_BO_WC;
+
+ /* align dimensions to slot boundaries... */
+ tiler_align(gem2fmt(flags),
+ &gsize.tiled.width, &gsize.tiled.height);
+
+ /* ...and calculate size based on aligned dimensions */
+ size = tiler_size(gem2fmt(flags),
+ gsize.tiled.width, gsize.tiled.height);
+ } else {
+ size = PAGE_ALIGN(gsize.bytes);
+ }
+
+ omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
+ if (!omap_obj) {
+ dev_err(dev->dev, "could not allocate GEM object\n");
+ goto fail;
+ }
+
+ obj = &omap_obj->base;
+
+ if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+ /* attempt to allocate contiguous memory if we don't
+ * have DMM for remappign discontiguous buffers
+ */
+ omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
+ &omap_obj->paddr, GFP_KERNEL);
+ if (omap_obj->vaddr) {
+ flags |= OMAP_BO_DMA;
+ }
+ }
+
+ omap_obj->flags = flags;
+
+ if (flags & OMAP_BO_TILED) {
+ omap_obj->width = gsize.tiled.width;
+ omap_obj->height = gsize.tiled.height;
+ }
+
+ if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
+ ret = drm_gem_private_object_init(dev, obj, size);
+ } else {
+ ret = drm_gem_object_init(dev, obj, size);
+ }
+
+ if (ret) {
+ goto fail;
+ }
+
+ return obj;
+
+fail:
+ if (obj) {
+ omap_gem_free_object(obj);
+ }
+ return NULL;
+}
+
+/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+void omap_gem_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ const enum tiler_fmt fmts[] = {
+ TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
+ };
+ int i, j, ret;
+
+ ret = omap_dmm_init(dev);
+ if (ret) {
+ /* DMM only supported on OMAP4 and later, so this isn't fatal */
+ dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
+ return;
+ }
+
+ usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
+ if (!usergart) {
+ dev_warn(dev->dev, "could not allocate usergart\n");
+ return;
+ }
+
+ /* reserve 4k aligned/wide regions for userspace mappings: */
+ for (i = 0; i < ARRAY_SIZE(fmts); i++) {
+ uint16_t h = 1, w = PAGE_SIZE >> i;
+ tiler_align(fmts[i], &w, &h);
+ /* note: since each region is 1 4kb page wide, and minimum
+ * number of rows, the height ends up being the same as the
+ * # of pages in the region
+ */
+ usergart[i].height = h;
+ usergart[i].height_shift = ilog2(h);
+ usergart[i].stride_pfn = tiler_stride(fmts[i]) >> PAGE_SHIFT;
+ usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
+ for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
+ struct usergart_entry *entry = &usergart[i].entry[j];
+ struct tiler_block *block =
+ tiler_reserve_2d(fmts[i], w, h,
+ PAGE_SIZE);
+ if (IS_ERR(block)) {
+ dev_err(dev->dev,
+ "reserve failed: %d, %d, %ld\n",
+ i, j, PTR_ERR(block));
+ return;
+ }
+ entry->paddr = tiler_ssptr(block);
+ entry->block = block;
+
+ DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+ entry->paddr,
+ usergart[i].stride_pfn << PAGE_SHIFT);
+ }
+ }
+
+ priv->has_dmm = true;
+}
+
+void omap_gem_deinit(struct drm_device *dev)
+{
+ /* I believe we can rely on there being no more outstanding GEM
+ * objects which could depend on usergart/dmm at this point.
+ */
+ omap_dmm_remove();
+ kfree(usergart);
+}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
new file mode 100644
index 000000000000..29275c7209e9
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -0,0 +1,169 @@
+/*
+ * drivers/staging/omapdrm/omap_gem_helpers.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* temporary copy of drm_gem_{get,put}_pages() until the
+ * "drm/gem: add functions to get/put pages" patch is merged..
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--) {
+ page_cache_release(pages[i]);
+ }
+ drm_free_large(pages);
+ return ERR_PTR(PTR_ERR(p));
+}
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+
+int
+_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ size / PAGE_SIZE, 0, 0);
+
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ kfree(list->map);
+ list->map = NULL;
+
+ return ret;
+}
diff --git a/drivers/staging/omapdrm/omap_priv.h b/drivers/staging/omapdrm/omap_priv.h
new file mode 100644
index 000000000000..c324709aa9a1
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_priv.h
@@ -0,0 +1,47 @@
+/*
+ * include/drm/omap_priv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_PRIV_H__
+#define __OMAP_PRIV_H__
+
+/* Non-userspace facing APIs
+ */
+
+/* optional platform data to configure the default configuration of which
+ * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
+ * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
+ * one manager, with priority given to managers that are connected to
+ * detected devices. This should be a good default behavior for most cases,
+ * but yet there still might be times when you wish to do something different.
+ */
+struct omap_kms_platform_data {
+ int ovl_cnt;
+ const int *ovl_ids;
+ int mgr_cnt;
+ const int *mgr_ids;
+ int dev_cnt;
+ const char **dev_names;
+};
+
+struct omap_drm_platform_data {
+ struct omap_kms_platform_data *kms_pdata;
+ struct omap_dmm_platform_data *dmm_pdata;
+};
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/staging/omapdrm/tcm-sita.c
new file mode 100644
index 000000000000..10d5ac3dae4b
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm-sita.c
@@ -0,0 +1,703 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "tcm-sita.h"
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+
+/*********************************************
+ * TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ * Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ * Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+ struct tcm_area *candidate,
+ struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+ struct tcm_area *area, struct tcm_area *parent);
+
+
+/*********************************************/
+
+/*********************************************
+ * Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+ struct tcm *tcm;
+ struct sita_pvt *pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ if (width == 0 || height == 0)
+ return NULL;
+
+ tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (!tcm || !pvt)
+ goto error;
+
+ memset(tcm, 0, sizeof(*tcm));
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Updating the pointers to SiTA implementation APIs */
+ tcm->height = height;
+ tcm->width = width;
+ tcm->reserve_2d = sita_reserve_2d;
+ tcm->reserve_1d = sita_reserve_1d;
+ tcm->free = sita_free;
+ tcm->deinit = sita_deinit;
+ tcm->pvt = (void *)pvt;
+
+ spin_lock_init(&(pvt->lock));
+
+ /* Creating tam map */
+ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+ if (!pvt->map)
+ goto error;
+
+ for (i = 0; i < tcm->width; i++) {
+ pvt->map[i] =
+ kmalloc(sizeof(**pvt->map) * tcm->height,
+ GFP_KERNEL);
+ if (pvt->map[i] == NULL) {
+ while (i--)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ goto error;
+ }
+ }
+
+ if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+ pvt->div_pt.x = attr->x;
+ pvt->div_pt.y = attr->y;
+
+ } else {
+ /* Defaulting to 3:1 ratio on width for 2D area split */
+ /* Defaulting to 3:1 ratio on height for 2D and 1D split */
+ pvt->div_pt.x = (tcm->width * 3) / 4;
+ pvt->div_pt.y = (tcm->height * 3) / 4;
+ }
+
+ spin_lock(&(pvt->lock));
+ assign(&area, 0, 0, width - 1, height - 1);
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+ return tcm;
+
+error:
+ kfree(tcm);
+ kfree(pvt);
+ return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ area.p1.x = tcm->width - 1;
+ area.p1.y = tcm->height - 1;
+
+ spin_lock(&(pvt->lock));
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+
+ for (i = 0; i < tcm->height; i++)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots size of 1D area
+ * @param area pointer to the area that will be populated with the
+ * reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct tcm_area field = {0};
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* Scanning entire container */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+
+ ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w width
+ * @param h height
+ * @param area pointer to the area that will be populated with the reesrved
+ * area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* not supporting more than 64 as alignment */
+ if (align > 64)
+ return -EINVAL;
+
+ /* we prefer 1, 32 and 64 as alignment */
+ align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+ spin_lock(&(pvt->lock));
+ ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* check that this is in fact an existing area */
+ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+ pvt->map[area->p1.x][area->p1.y] != area);
+
+ /* Clear the contents of the associated tiles in the map */
+ fill_area(tcm, area, NULL);
+
+ spin_unlock(&(pvt->lock));
+
+ return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p0.x < field->p1.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ /* adjust start_x and end_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ /* scan field top-to-bottom, right-to-left */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_T2B, &best))
+ goto done;
+
+ /* change upper x bound */
+ end_x = x + 1;
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and end_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* scan field top-to-bottom, left-to-right */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_T2B, &best))
+ goto done;
+ /* change upper x bound */
+ end_x = x - 1;
+
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots size of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best
+ * position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 found = 0;
+ s16 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area *p;
+
+ /* check scan area co-ordinates */
+ if (field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ /**
+ * Currently we only support full width 1D scan field, which makes sense
+ * since 1D slot-ordering spans the full container width.
+ */
+ if (tcm->width != field->p0.x - field->p1.x + 1)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+ return -ENOSPC;
+
+ x = field->p0.x;
+ y = field->p0.y;
+
+ /* find num_slots consecutive free slots to the left */
+ while (found < num_slots) {
+ if (y < 0)
+ return -ENOSPC;
+
+ /* remember bottom-right corner */
+ if (found == 0) {
+ area->p1.x = x;
+ area->p1.y = y;
+ }
+
+ /* skip busy regions */
+ p = pvt->map[x][y];
+ if (p) {
+ /* move to left of 2D areas, top left of 1D */
+ x = p->p0.x;
+ if (!p->is2d)
+ y = p->p0.y;
+
+ /* start over */
+ found = 0;
+ } else {
+ /* count consecutive free slots */
+ found++;
+ if (found == num_slots)
+ break;
+ }
+
+ /* move to the left */
+ if (x == 0)
+ y--;
+ x = (x ? : tcm->width) - 1;
+
+ }
+
+ /* set top-left corner */
+ area->p0.x = x;
+ area->p0.y = y;
+ return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area)
+{
+ s32 ret = 0;
+ struct tcm_area field = {0};
+ u16 boundary_x, boundary_y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ if (align > 1) {
+ /* prefer top-left corner */
+ boundary_x = pvt->div_pt.x - 1;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > pvt->div_pt.x)
+ boundary_x = tcm->width - 1;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != tcm->width - 1 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+ }
+ } else if (align == 1) {
+ /* prefer top-right corner */
+ boundary_x = pvt->div_pt.x;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > (tcm->width - pvt->div_pt.x))
+ boundary_x = 0;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != 0 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field,
+ area);
+ }
+ }
+
+ return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+ u16 x = 0, y = 0;
+ for (y = y0; y < y0 + h; y++) {
+ for (x = x0; x < x0 + w; x++) {
+ if (map[x][y])
+ return false;
+ }
+ }
+ return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+ struct tcm_area *parent)
+{
+ s32 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area a, a_;
+
+ /* set area's tcm; otherwise, enumerator considers it invalid */
+ area->tcm = tcm;
+
+ tcm_for_each_slice(a, *area, a_) {
+ for (x = a.p0.x; x <= a.p1.x; ++x)
+ for (y = a.p0.y; y <= a.p1.y; ++y)
+ pvt->map[x][y] = parent;
+
+ }
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h top, left, width, height of candidate area
+ * @param field scan field
+ * @param criteria scan criteria
+ * @param best best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best)
+{
+ struct score me; /* score for area */
+
+ /*
+ * NOTE: For horizontal bias we always give the first found, because our
+ * scan is horizontal-raster-based and the first candidate will always
+ * have the horizontal bias.
+ */
+ bool first = criteria & CR_BIAS_HORIZONTAL;
+
+ assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+ /* calculate score for current candidate */
+ if (!first) {
+ get_neighbor_stats(tcm, &me.a, &me.n);
+ me.neighs = me.n.edge + me.n.busy;
+ get_nearness_factor(field, &me.a, &me.f);
+ }
+
+ /* the 1st candidate is always the best */
+ if (!best->a.tcm)
+ goto better;
+
+ BUG_ON(first);
+
+ /* diagonal balance check */
+ if ((criteria & CR_DIAGONAL_BALANCE) &&
+ best->neighs <= me.neighs &&
+ (best->neighs < me.neighs ||
+ /* this implies that neighs and occupied match */
+ best->n.busy < me.n.busy ||
+ (best->n.busy == me.n.busy &&
+ /* check the nearness factor */
+ best->f.x + best->f.y > me.f.x + me.f.y)))
+ goto better;
+
+ /* not better, keep going */
+ return 0;
+
+better:
+ /* save current area as best */
+ memcpy(best, &me, sizeof(me));
+ best->a.tcm = tcm;
+ return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field. The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+ struct nearness_factor *nf)
+{
+ /**
+ * Using signed math as field coordinates may be reversed if
+ * search direction is right-to-left or bottom-to-top.
+ */
+ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+ (field->p1.x - field->p0.x);
+ nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+ (field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat)
+{
+ s16 x = 0, y = 0;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* Clearing any exisiting values */
+ memset(stat, 0, sizeof(*stat));
+
+ /* process top & bottom edges */
+ for (x = area->p0.x; x <= area->p1.x; x++) {
+ if (area->p0.y == 0)
+ stat->edge++;
+ else if (pvt->map[x][area->p0.y - 1])
+ stat->busy++;
+
+ if (area->p1.y == tcm->height - 1)
+ stat->edge++;
+ else if (pvt->map[x][area->p1.y + 1])
+ stat->busy++;
+ }
+
+ /* process left & right edges */
+ for (y = area->p0.y; y <= area->p1.y; ++y) {
+ if (area->p0.x == 0)
+ stat->edge++;
+ else if (pvt->map[area->p0.x - 1][y])
+ stat->busy++;
+
+ if (area->p1.x == tcm->width - 1)
+ stat->edge++;
+ else if (pvt->map[area->p1.x + 1][y])
+ stat->busy++;
+ }
+}
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/staging/omapdrm/tcm-sita.h
new file mode 100644
index 000000000000..0444f868671c
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm-sita.h
@@ -0,0 +1,95 @@
+/*
+ * tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots. Edge is the number of
+ * border segments that are also border segments of the scan field. Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+ u16 edge;
+ u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+ struct nearness_factor f;
+ struct neighbor_stats n;
+ struct tcm_area a;
+ u16 neighs; /* number of busy neighbors */
+};
+
+struct sita_pvt {
+ spinlock_t lock; /* spinlock to protect access */
+ struct tcm_pt div_pt; /* divider point splitting container */
+ struct tcm_area ***map; /* pointers to the parent area for each slot */
+};
+
+/* assign coordinates to area */
+static inline
+void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
+{
+ a->p0.x = x0;
+ a->p0.y = y0;
+ a->p1.x = x1;
+ a->p1.y = y1;
+}
+
+#endif
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/staging/omapdrm/tcm.h
new file mode 100644
index 000000000000..d273e3ee0b4c
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm.h
@@ -0,0 +1,326 @@
+/*
+ * tcm.h
+ *
+ * TILER container manager specification and support functions for TI
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_H
+#define TCM_H
+
+struct tcm;
+
+/* point */
+struct tcm_pt {
+ u16 x;
+ u16 y;
+};
+
+/* 1d or 2d area */
+struct tcm_area {
+ bool is2d; /* whether area is 1d or 2d */
+ struct tcm *tcm; /* parent */
+ struct tcm_pt p0;
+ struct tcm_pt p1;
+};
+
+struct tcm {
+ u16 width, height; /* container dimensions */
+ int lut_id; /* Lookup table identifier */
+
+ /* 'pvt' structure shall contain any tcm details (attr) along with
+ linked list of allocated areas and mutex for mutually exclusive access
+ to the list. It may also contain copies of width and height to notice
+ any changes to the publicly available width and height fields. */
+ void *pvt;
+
+ /* function table */
+ s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
+ struct tcm_area *area);
+ s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
+ s32 (*free) (struct tcm *tcm, struct tcm_area *area);
+ void (*deinit) (struct tcm *tcm);
+};
+
+/*=============================================================================
+ BASIC TILER CONTAINER MANAGER INTERFACE
+=============================================================================*/
+
+/*
+ * NOTE:
+ *
+ * Since some basic parameter checking is done outside the TCM algorithms,
+ * TCM implementation do NOT have to check the following:
+ *
+ * area pointer is NULL
+ * width and height fits within container
+ * number of pages is more than the size of the container
+ *
+ */
+
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
+
+
+/**
+ * Deinitialize tiler container manager.
+ *
+ * @param tcm Pointer to container manager.
+ *
+ * @return 0 on success, non-0 error value on error. The call
+ * should free as much memory as possible and meaningful
+ * even on failure. Some error codes: -ENODEV: invalid
+ * manager.
+ */
+static inline void tcm_deinit(struct tcm *tcm)
+{
+ if (tcm)
+ tcm->deinit(tcm);
+}
+
+/**
+ * Reserves a 2D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param height Height(in pages) of area to be reserved.
+ * @param width Width(in pages) of area to be reserved.
+ * @param align Alignment requirement for top-left corner of area. Not
+ * all values may be supported by the container manager,
+ * but it must support 0 (1), 32 and 64.
+ * 0 value is equivalent to 1.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
+ u16 align, struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || width == 0 || height == 0 ||
+ /* align must be a 2 power */
+ (align & (align - 1))) ? -EINVAL :
+ (height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = true;
+ res = tcm->reserve_2d(tcm, height, width, align, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Reserves a 1D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param slots Number of (contiguous) slots to reserve.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
+ struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || slots == 0) ? -EINVAL :
+ slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = false;
+ res = tcm->reserve_1d(tcm, slots, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Free a previously reserved area from the container.
+ *
+ * @param area Pointer to area reserved by a prior call to
+ * tcm_reserve_1d or tcm_reserve_2d call, whether
+ * it was successful or not. (Note: all fields of
+ * the structure must match.)
+ *
+ * @return 0 on success. Non-0 error code on failure. Also, the tcm
+ * field of the area is set to NULL on success to avoid subsequent
+ * freeing. This call will succeed even if supplying
+ * the area from a failed reserved call.
+ */
+static inline s32 tcm_free(struct tcm_area *area)
+{
+ s32 res = 0; /* free succeeds by default */
+
+ if (area && area->tcm) {
+ res = area->tcm->free(area->tcm, area);
+ if (res == 0)
+ area->tcm = NULL;
+ }
+
+ return res;
+}
+
+/*=============================================================================
+ HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
+=============================================================================*/
+
+/**
+ * This method slices off the topmost 2D slice from the parent area, and stores
+ * it in the 'slice' parameter. The 'parent' parameter will get modified to
+ * contain the remaining portion of the area. If the whole parent area can
+ * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
+ * longer a valid area.
+ *
+ * @param parent Pointer to a VALID parent area that will get modified
+ * @param slice Pointer to the slice area that will get modified
+ */
+static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
+{
+ *slice = *parent;
+
+ /* check if we need to slice */
+ if (slice->tcm && !slice->is2d &&
+ slice->p0.y != slice->p1.y &&
+ (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
+ /* set end point of slice (start always remains) */
+ slice->p1.x = slice->tcm->width - 1;
+ slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
+ /* adjust remaining area */
+ parent->p0.x = 0;
+ parent->p0.y = slice->p1.y + 1;
+ } else {
+ /* mark this as the last slice */
+ parent->tcm = NULL;
+ }
+}
+
+/* Verify if a tcm area is logically valid */
+static inline bool tcm_area_is_valid(struct tcm_area *area)
+{
+ return area && area->tcm &&
+ /* coordinate bounds */
+ area->p1.x < area->tcm->width &&
+ area->p1.y < area->tcm->height &&
+ area->p0.y <= area->p1.y &&
+ /* 1D coordinate relationship + p0.x check */
+ ((!area->is2d &&
+ area->p0.x < area->tcm->width &&
+ area->p0.x + area->p0.y * area->tcm->width <=
+ area->p1.x + area->p1.y * area->tcm->width) ||
+ /* 2D coordinate relationship */
+ (area->is2d &&
+ area->p0.x <= area->p1.x));
+}
+
+/* see if a coordinate is within an area */
+static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
+{
+ u16 i;
+
+ if (a->is2d) {
+ return p->x >= a->p0.x && p->x <= a->p1.x &&
+ p->y >= a->p0.y && p->y <= a->p1.y;
+ } else {
+ i = p->x + p->y * a->tcm->width;
+ return i >= a->p0.x + a->p0.y * a->tcm->width &&
+ i <= a->p1.x + a->p1.y * a->tcm->width;
+ }
+}
+
+/* calculate area width */
+static inline u16 __tcm_area_width(struct tcm_area *area)
+{
+ return area->p1.x - area->p0.x + 1;
+}
+
+/* calculate area height */
+static inline u16 __tcm_area_height(struct tcm_area *area)
+{
+ return area->p1.y - area->p0.y + 1;
+}
+
+/* calculate number of slots in an area */
+static inline u16 __tcm_sizeof(struct tcm_area *area)
+{
+ return area->is2d ?
+ __tcm_area_width(area) * __tcm_area_height(area) :
+ (area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
+ area->tcm->width;
+}
+#define tcm_sizeof(area) __tcm_sizeof(&(area))
+#define tcm_awidth(area) __tcm_area_width(&(area))
+#define tcm_aheight(area) __tcm_area_height(&(area))
+#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
+
+/* limit a 1D area to the first N pages */
+static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
+{
+ if (__tcm_sizeof(a) < num_pg)
+ return -ENOMEM;
+ if (!num_pg)
+ return -EINVAL;
+
+ a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
+ a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
+ return 0;
+}
+
+/**
+ * Iterate through 2D slices of a valid area. Behaves
+ * syntactically as a for(;;) statement.
+ *
+ * @param var Name of a local variable of type 'struct
+ * tcm_area *' that will get modified to
+ * contain each slice.
+ * @param area Pointer to the VALID parent area. This
+ * structure will not get modified
+ * throughout the loop.
+ *
+ */
+#define tcm_for_each_slice(var, area, safe) \
+ for (safe = area, \
+ tcm_slice(&safe, &var); \
+ var.tcm; tcm_slice(&safe, &var))
+
+#endif
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 683657cb21f5..d77b21f1eb2d 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -70,7 +70,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
- { PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000, PCI_ANY_ID, PCI_ANY_ID,
+ { PCI_DEVICE(PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000),
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
};
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 7598e77672a5..2ee4491b7136 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -590,13 +590,13 @@ out:
* during writeback for given inode.
*/
struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode)
+ struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode)
{
struct pohmelfs_inode *npi;
int err = -ENOMEM;
struct netfs_inode_info info;
- dprintk("%s: name: '%s', mode: %o, start: %llu.\n",
+ dprintk("%s: name: '%s', mode: %ho, start: %llu.\n",
__func__, str->name, mode, start);
info.mode = mode;
@@ -630,7 +630,8 @@ err_out_unlock:
/*
* Create local object and bind it to dentry.
*/
-static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 start, int mode)
+static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry,
+ u64 start, umode_t mode)
{
struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
struct pohmelfs_inode *npi, *parent;
@@ -661,13 +662,13 @@ static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 s
/*
* VFS create and mkdir callbacks.
*/
-static int pohmelfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int pohmelfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return pohmelfs_create_entry(dir, dentry, 0, mode);
}
-static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7a1955583b7d..807e3f324113 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -830,7 +830,6 @@ const struct address_space_operations pohmelfs_aops = {
static void pohmelfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
}
@@ -1370,9 +1369,9 @@ static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int pohmelfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
+ struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
seq_printf(seq, ",idx=%u", psb->idx);
seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
@@ -1760,11 +1759,11 @@ err_out_exit:
return err;
}
-static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+static int pohmelfs_show_stats(struct seq_file *m, struct dentry *root)
{
struct netfs_state *st;
struct pohmelfs_ctl *ctl;
- struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
+ struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
struct pohmelfs_config *c;
mutex_lock(&psb->state_lock);
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
index 985b6b755d5d..f26894f2a57f 100644
--- a/drivers/staging/pohmelfs/netfs.h
+++ b/drivers/staging/pohmelfs/netfs.h
@@ -776,7 +776,7 @@ struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash);
void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi);
struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode);
+ struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode);
int pohmelfs_write_create_inode(struct pohmelfs_inode *pi);
diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
index 02fafecd4773..897a3a99c794 100644
--- a/drivers/staging/quatech_usb2/quatech_usb2.c
+++ b/drivers/staging/quatech_usb2/quatech_usb2.c
@@ -16,7 +16,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "v2.00"
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 750c347bfbe1..f87e21101857 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,9 +1,43 @@
-config RTL8192E
- tristate "RealTek RTL8192E Wireless LAN NIC driver"
- depends on PCI && WLAN
- depends on m
- select WIRELESS_EXT
- select WEXT_PRIV
- select CRYPTO
- default N
+config RTLLIB
+ tristate "Support for rtllib wireless devices"
+ depends on WLAN && m
+ default n
+ select LIB80211
---help---
+ If you have a wireless card that uses rtllib, say
+ Y. Currently the only card is the rtl8192e.
+
+ If unsure, say N.
+
+if RTLLIB
+
+config RTLLIB_CRYPTO_CCMP
+ tristate "Support for rtllib CCMP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ CCMP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+config RTLLIB_CRYPTO_TKIP
+ tristate "Support for rtllib TKIP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ TKIP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+config RTLLIB_CRYPTO_WEP
+ tristate "Support for rtllib WEP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ TKIP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+source "drivers/staging/rtl8192e/rtl8192e/Kconfig"
+
+endif
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index a66a9ad33686..cb18db74d78c 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -1,41 +1,21 @@
-ccflags-y += -DUSE_FW_SOURCE_IMG_FILE
-ccflags-y += -DCONFIG_PM_RTL
-ccflags-y += -DCONFIG_PM
-ccflags-y += -DHAVE_NET_DEVICE_OPS
-ccflags-y += -DENABLE_DOT11D
-
-r8192e_pci-objs := \
- rtl_core.o \
- rtl_eeprom.o \
- rtl_ps.o \
- rtl_wx.o \
- rtl_cam.o \
- rtl_dm.o \
- rtl_pm.o \
- rtl_pci.o \
- rtl_debug.o \
- rtl_ethtool.o \
- r8192E_dev.o \
- r8192E_phy.o \
- r8192E_firmware.o \
- r8192E_cmdpkt.o \
- r8192E_hwimg.o \
- r8190P_rtl8256.o \
+rtllib-objs := \
+ dot11d.o \
+ rtllib_module.o \
rtllib_rx.o \
- rtllib_softmac.o \
rtllib_tx.o \
rtllib_wx.o \
- rtllib_module.o \
+ rtllib_softmac.o \
rtllib_softmac_wx.o \
- rtl819x_HTProc.o \
- rtl819x_TSProc.o \
rtl819x_BAProc.o \
- dot11d.o \
- rtllib_crypt.o \
- rtllib_crypt_tkip.o \
- rtllib_crypt_ccmp.o \
- rtllib_crypt_wep.o
+ rtl819x_HTProc.o \
+ rtl819x_TSProc.o
+
+obj-$(CONFIG_RTLLIB) += rtllib.o
+
+obj-$(CONFIG_RTLLIB_CRYPTO_CCMP) += rtllib_crypt_ccmp.o
+obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o
+obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
-obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+obj-$(CONFIG_RTL8192E) += rtl8192e/
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index ee0381e0a81f..f7b14f8b7b83 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -46,7 +46,7 @@ static struct channel_list ChannelPlan[] = {
56, 60, 64}, 21}
};
-void Dot11d_Init(struct rtllib_device *ieee)
+void dot11d_init(struct rtllib_device *ieee)
{
struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
pDot11dInfo->bEnabled = false;
@@ -58,6 +58,7 @@ void Dot11d_Init(struct rtllib_device *ieee)
RESET_CIE_WATCHDOG(ieee);
}
+EXPORT_SYMBOL(dot11d_init);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
{
@@ -99,6 +100,7 @@ void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
break;
}
}
+EXPORT_SYMBOL(Dot11d_Channelmap);
void Dot11d_Reset(struct rtllib_device *ieee)
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 032f7004a7f0..71f4549a378f 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -93,7 +93,7 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-void Dot11d_Init(struct rtllib_device *dev);
+void dot11d_init(struct rtllib_device *dev);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
void Dot11d_Reset(struct rtllib_device *dev);
void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
new file mode 100644
index 000000000000..50e0d91a409a
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -0,0 +1,9 @@
+config RTL8192E
+ tristate "RealTek RTL8192E Wireless LAN NIC driver"
+ depends on PCI && WLAN && RTLLIB
+ depends on m
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select CRYPTO
+ default N
+ ---help---
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
new file mode 100644
index 000000000000..313a92ec6833
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -0,0 +1,21 @@
+r8192e_pci-objs := \
+ r8192E_dev.o \
+ r8192E_phy.o \
+ r8192E_firmware.o \
+ r8192E_cmdpkt.o \
+ r8192E_hwimg.o \
+ r8190P_rtl8256.o \
+ rtl_cam.o \
+ rtl_core.o \
+ rtl_debug.o \
+ rtl_dm.o \
+ rtl_eeprom.o \
+ rtl_ethtool.o \
+ rtl_pci.o \
+ rtl_pm.o \
+ rtl_ps.o \
+ rtl_wx.o \
+
+obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index b7bb71fa9ecd..b7bb71fa9ecd 100644
--- a/drivers/staging/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 0da56c80f088..0da56c80f088 100644
--- a/drivers/staging/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 64e831d2f4e5..64e831d2f4e5 100644
--- a/drivers/staging/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index 58d044ea5524..58d044ea5524 100644
--- a/drivers/staging/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index 23219e17e5a1..23219e17e5a1 100644
--- a/drivers/staging/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
diff --git a/drivers/staging/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 808aab6fa5ef..808aab6fa5ef 100644
--- a/drivers/staging/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
diff --git a/drivers/staging/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index b9b3b52f9120..b9b3b52f9120 100644
--- a/drivers/staging/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 37719859bdae..37719859bdae 100644
--- a/drivers/staging/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index caa878833106..caa878833106 100644
--- a/drivers/staging/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
diff --git a/drivers/staging/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 43c3fb859d10..43c3fb859d10 100644
--- a/drivers/staging/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
index 08e7dbb6694b..08e7dbb6694b 100644
--- a/drivers/staging/rtl8192e/r8192E_hwimg.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
index 019836bb36c2..019836bb36c2 100644
--- a/drivers/staging/rtl8192e/r8192E_hwimg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
diff --git a/drivers/staging/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 7fe69a3348d7..3e705efaaf22 100644
--- a/drivers/staging/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -23,7 +23,6 @@
#include "r8190P_rtl8256.h"
#include "r8192E_phy.h"
#include "rtl_dm.h"
-#include "dot11d.h"
#include "r8192E_hwimg.h"
@@ -859,7 +858,7 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n",
__func__, *stage, *step, channel);
- if (!IsLegalChannel(priv->rtllib, channel)) {
+ if (!rtllib_legal_channel(priv->rtllib, channel)) {
RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n",
channel);
return true;
diff --git a/drivers/staging/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 7318f8857af2..7318f8857af2 100644
--- a/drivers/staging/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
diff --git a/drivers/staging/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 7899dd538dcd..7899dd538dcd 100644
--- a/drivers/staging/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
diff --git a/drivers/staging/rtl8192e/r819xE_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h
index d5de279f6644..d5de279f6644 100644
--- a/drivers/staging/rtl8192e/r819xE_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h
diff --git a/drivers/staging/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index baf3b6342e44..baf3b6342e44 100644
--- a/drivers/staging/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
diff --git a/drivers/staging/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index fa607f98b172..fa607f98b172 100644
--- a/drivers/staging/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
diff --git a/drivers/staging/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 5ad96649f407..71adb6b3344d 100644
--- a/drivers/staging/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -53,9 +53,7 @@
#include "rtl_wx.h"
#include "rtl_dm.h"
-#ifdef CONFIG_PM_RTL
#include "rtl_pm.h"
-#endif
int hwwep = 1;
static int channels = 0x3fff;
@@ -581,7 +579,7 @@ static void rtl8192_update_beacon(void *data)
struct rtllib_network *net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
- HTUpdateSelfAndPeerSetting(ieee, net);
+ HT_update_self_and_peer_setting(ieee, net);
ieee->pHTInfo->bCurrentRT2RTLongSlotTime =
net->bssht.bdRT2RTLongSlotTime;
ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.RT2RT_HT_Mode;
@@ -1289,7 +1287,7 @@ static short rtl8192_get_channel_map(struct net_device *dev)
priv->ChannelPlan = COUNTRY_CODE_FCC;
}
RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
- Dot11d_Init(priv->rtllib);
+ dot11d_init(priv->rtllib);
Dot11d_Channelmap(priv->ChannelPlan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
@@ -1305,7 +1303,6 @@ static short rtl8192_init(struct net_device *dev)
memset(&(priv->stats), 0, sizeof(struct rt_stats));
- rtl8192_dbgp_flag_init(dev);
rtl8192_init_priv_handler(dev);
rtl8192_init_priv_constant(dev);
rtl8192_init_priv_variable(dev);
@@ -2839,7 +2836,6 @@ done:
/****************************************************************************
---------------------------- PCI_STUFF---------------------------
*****************************************************************************/
-#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = rtl8192_open,
.ndo_stop = rtl8192_close,
@@ -2851,7 +2847,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_start_xmit = rtllib_xmit,
};
-#endif
static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -2938,17 +2933,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->irq = 0;
-#ifdef HAVE_NET_DEVICE_OPS
dev->netdev_ops = &rtl8192_netdev_ops;
-#else
- dev->open = rtl8192_open;
- dev->stop = rtl8192_close;
- dev->tx_timeout = rtl8192_tx_timeout;
- dev->do_ioctl = rtl8192_ioctl;
- dev->set_multicast_list = r8192_set_multicast;
- dev->set_mac_address = r8192_set_mac_adr;
- dev->hard_start_xmit = rtllib_xmit;
-#endif
dev->wireless_handlers = (struct iw_handler_def *)
&r8192_wx_handlers_def;
@@ -2974,10 +2959,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
register_netdev(dev);
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
- err = rtl_debug_module_init(priv, dev->name);
- if (err)
- RT_TRACE(COMP_DBG, "failed to create debugfs files. Ignoring "
- "error: %d\n", err);
+
rtl8192_proc_init_one(dev);
if (priv->polling_timer_on == 0)
@@ -3015,7 +2997,6 @@ static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
del_timer_sync(&priv->gpio_polling_timer);
cancel_delayed_work(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
- rtl_debug_module_remove(priv);
rtl8192_proc_remove_one(dev);
rtl8192_down(dev, true);
deinit_hal_dm(dev);
@@ -3103,43 +3084,9 @@ bool NicIFDisableNIC(struct net_device *dev)
static int __init rtl8192_pci_module_init(void)
{
- int ret;
- int error;
-
- ret = rtllib_init();
- if (ret) {
- printk(KERN_ERR "rtllib_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_tkip_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_tkip_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_ccmp_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_ccmp_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_wep_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_wep_init() failed %d\n", ret);
- return ret;
- }
printk(KERN_INFO "\nLinux kernel driver for RTL8192E WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan Driver\n");
- error = rtl_create_debugfs_root();
- if (error) {
- RT_TRACE(COMP_DBG, "Create debugfs root fail: %d\n", error);
- goto err_out;
- }
-
rtl8192_proc_module_init();
if (0 != pci_register_driver(&rtl8192_pci_driver)) {
DMESG("No device found");
@@ -3147,9 +3094,6 @@ static int __init rtl8192_pci_module_init(void)
return -ENODEV;
}
return 0;
-err_out:
- return error;
-
}
static void __exit rtl8192_pci_module_exit(void)
@@ -3158,12 +3102,6 @@ static void __exit rtl8192_pci_module_exit(void)
RT_TRACE(COMP_DOWN, "Exiting");
rtl8192_proc_module_remove();
- rtl_remove_debugfs_root();
- rtllib_crypto_tkip_exit();
- rtllib_crypto_ccmp_exit();
- rtllib_crypto_wep_exit();
- rtllib_crypto_deinit();
- rtllib_exit();
}
void check_rfctrl_gpio_timer(unsigned long data)
diff --git a/drivers/staging/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index f9af5153d9cf..2a2519cc284d 100644
--- a/drivers/staging/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -44,11 +44,14 @@
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <linux/random.h>
-#include <linux/version.h>
#include <linux/io.h>
-#include "rtllib.h"
-#include "dot11d.h"
+/* Need this defined before including local include files */
+#define DRV_NAME "rtl819xE"
+
+#include "../rtllib.h"
+
+#include "../dot11d.h"
#include "r8192E_firmware.h"
#include "r8192E_hw.h"
@@ -56,7 +59,6 @@
#include "r8190P_def.h"
#include "r8192E_dev.h"
-#include "rtl_debug.h"
#include "rtl_eeprom.h"
#include "rtl_ps.h"
#include "rtl_pci.h"
@@ -67,8 +69,6 @@
#define DRV_AUTHOR "<wlanfae@realtek.com>"
#define DRV_VERSION "0014.0401.2010"
-#define DRV_NAME "rtl819xE"
-
#define IS_HARDWARE_TYPE_819xP(_priv) \
((((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8190P) || \
(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192E))
@@ -215,41 +215,6 @@ enum RTL819x_PHY_PARAM {
RTL819X_EFUSE_MAP = 19,
};
-enum RTL_DEBUG {
- COMP_TRACE = BIT0,
- COMP_DBG = BIT1,
- COMP_INIT = BIT2,
- COMP_RECV = BIT3,
- COMP_SEND = BIT4,
- COMP_CMD = BIT5,
- COMP_POWER = BIT6,
- COMP_EPROM = BIT7,
- COMP_SWBW = BIT8,
- COMP_SEC = BIT9,
- COMP_LPS = BIT10,
- COMP_QOS = BIT11,
- COMP_RATE = BIT12,
- COMP_RXDESC = BIT13,
- COMP_PHY = BIT14,
- COMP_DIG = BIT15,
- COMP_TXAGC = BIT16,
- COMP_HALDM = BIT17,
- COMP_POWER_TRACKING = BIT18,
- COMP_CH = BIT19,
- COMP_RF = BIT20,
- COMP_FIRMWARE = BIT21,
- COMP_HT = BIT22,
- COMP_RESET = BIT23,
- COMP_CMDPKT = BIT24,
- COMP_SCAN = BIT25,
- COMP_PS = BIT26,
- COMP_DOWN = BIT27,
- COMP_INTR = BIT28,
- COMP_LED = BIT29,
- COMP_MLME = BIT30,
- COMP_ERR = BIT31
-};
-
enum nic_t {
NIC_UNKNOWN = 0,
NIC_8192E = 1,
@@ -1121,4 +1086,10 @@ void ActUpdateChannelAccessSetting(struct net_device *dev,
enum wireless_mode WirelessMode,
struct channel_access_setting *ChnlAccessSetting);
+/* proc stuff from rtl_debug.c */
+void rtl8192_proc_init_one(struct net_device *dev);
+void rtl8192_proc_remove_one(struct net_device *dev);
+void rtl8192_proc_module_init(void);
+void rtl8192_proc_module_remove(void);
+
#endif
diff --git a/drivers/staging/rtl8192e/rtl_crypto.h b/drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h
index ee57c0f4fa69..ee57c0f4fa69 100644
--- a/drivers/staging/rtl8192e/rtl_crypto.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h
diff --git a/drivers/staging/rtl8192e/rtl_debug.c b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
index 22bc2dd6e438..c19b14cd6f77 100644
--- a/drivers/staging/rtl8192e/rtl_debug.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
@@ -22,91 +22,12 @@
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#include "rtl_debug.h"
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
-u32 rt_global_debug_component = \
- COMP_ERR ;
-
-/*------------------Declare variable-----------------------*/
-u32 DBGP_Type[DBGP_TYPE_MAX];
-
-/*-----------------------------------------------------------------------------
- * Function: DBGP_Flag_Init
- *
- * Overview: Refresh all debug print control flag content to zero.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 10/20/2006 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void rtl8192_dbgp_flag_init(struct net_device *dev)
-{
- u8 i;
-
- for (i = 0; i < DBGP_TYPE_MAX; i++)
- DBGP_Type[i] = 0;
-
-
-} /* DBGP_Flag_Init */
-
-/* this is only for debugging */
-void print_buffer(u32 *buffer, int len)
-{
- int i;
- u8 *buf = (u8 *)buffer;
-
- printk(KERN_INFO "ASCII BUFFER DUMP (len: %x):\n", len);
-
- for (i = 0; i < len; i++)
- printk(KERN_INFO "%c", buf[i]);
-
- printk(KERN_INFO "\nBINARY BUFFER DUMP (len: %x):\n", len);
-
- for (i = 0; i < len; i++)
- printk(KERN_INFO "%x", buf[i]);
-
- printk(KERN_INFO "\n");
-}
-
-/* this is only for debug */
-void dump_eprom(struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < 0xff; i++)
- RT_TRACE(COMP_INIT, "EEPROM addr %x : %x", i,
- eprom_read(dev, i));
-}
-
-/* this is only for debug */
-void rtl8192_dump_reg(struct net_device *dev)
-{
- int i;
- int n;
- int max = 0x5ff;
-
- RT_TRACE(COMP_INIT, "Dumping NIC register map");
-
- for (n = 0; n <= max; ) {
- printk(KERN_INFO "\nD: %2x> ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- printk(KERN_INFO "%2x ", read_nic_byte(dev, n));
- }
- printk(KERN_INFO "\n");
-}
-
/****************************************************************************
-----------------------------PROCFS STUFF-------------------------
*****************************************************************************/
diff --git a/drivers/staging/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index a7fa9aad6f2d..a7fa9aad6f2d 100644
--- a/drivers/staging/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
diff --git a/drivers/staging/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index ab44a9a6927c..ab44a9a6927c 100644
--- a/drivers/staging/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index c1ccff4a8321..c1ccff4a8321 100644
--- a/drivers/staging/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index 9452e1683a72..9452e1683a72 100644
--- a/drivers/staging/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
diff --git a/drivers/staging/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 36452fb7cef8..36452fb7cef8 100644
--- a/drivers/staging/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
diff --git a/drivers/staging/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index ddadcc3e4e7c..ddadcc3e4e7c 100644
--- a/drivers/staging/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
diff --git a/drivers/staging/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 7ea5a47dfd2b..28c7da677a80 100644
--- a/drivers/staging/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/pci.h>
-#include "rtllib.h"
static inline void NdisRawWritePortUlong(u32 port, u32 val)
{
diff --git a/drivers/staging/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 92e2fde7f5f4..8e1a5d55dce8 100644
--- a/drivers/staging/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -17,7 +17,6 @@
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#ifdef CONFIG_PM_RTL
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8190P_rtl8256.h"
@@ -133,4 +132,3 @@ int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable)
return -EAGAIN;
}
-#endif
diff --git a/drivers/staging/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 4d7f4067cc78..e5299fc3b34a 100644
--- a/drivers/staging/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -17,8 +17,6 @@
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#ifdef CONFIG_PM_RTL
-
#ifndef R8192E_PM_H
#define R8192E_PM_H
@@ -31,5 +29,3 @@ int rtl8192E_resume(struct pci_dev *dev);
int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable);
#endif
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c9a7c563b682..c9a7c563b682 100644
--- a/drivers/staging/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
diff --git a/drivers/staging/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index a9c2d799c08f..df79d6c4ca03 100644
--- a/drivers/staging/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -26,7 +26,7 @@
#define _RTL_PS_H
#include <linux/types.h>
-#include "rtllib.h"
+
struct net_device;
#define RT_CHECK_FOR_HANG_PERIOD 2
diff --git a/drivers/staging/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 93b1edbe6bae..4e93669210af 100644
--- a/drivers/staging/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include "rtl_core.h"
-#include "dot11d.h"
#define RATE_COUNT 12
static u32 rtl8192_rates[] = {
@@ -803,7 +802,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
case 0:
- key_idx = ieee->tx_keyidx;
+ key_idx = ieee->crypt_info.tx_keyidx;
break;
case 1:
key_idx = 0;
diff --git a/drivers/staging/rtl8192e/rtl_wx.h b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
index 6a51a25ec87d..6a51a25ec87d 100644
--- a/drivers/staging/rtl8192e/rtl_wx.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 8b9d85c48be6..32fbbc9d0d92 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -18,7 +18,6 @@
******************************************************************************/
#include "rtllib.h"
#include "rtl819x_BA.h"
-#include "rtl_core.h"
static void ActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA,
u16 Time)
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index b1c0c566882f..8b7412980ebb 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -943,8 +943,8 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
}
}
-void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct ht_info_ele *pPeerHTInfo =
@@ -955,6 +955,7 @@ void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
}
}
+EXPORT_SYMBOL(HT_update_self_and_peer_setting);
void HTUseDefaultSetting(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 09a602f74329..711a096be7a7 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -497,6 +497,7 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
}
}
}
+EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtl_debug.h b/drivers/staging/rtl8192e/rtl_debug.h
deleted file mode 100644
index 50fb9a9b828a..000000000000
--- a/drivers/staging/rtl8192e/rtl_debug.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
-#ifndef _RTL_DEBUG_H
-#define _RTL_DEBUG_H
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/debugfs.h>
-
-struct r8192_priv;
-struct _tx_desc_8192se;
-struct _TX_DESC_8192CE;
-struct net_device;
-
-#define DBG_LOUD 4
-
-#define RT_ASSERT(_Exp, Fmt) \
- if (!(_Exp)) { \
- printk("Rtl819x: "); \
- printk Fmt; \
- }
-
-enum dbgp_flag {
- FQoS = 0,
- FTX = 1,
- FRX = 2,
- FSEC = 3,
- FMGNT = 4,
- FMLME = 5,
- FRESOURCE = 6,
- FBEACON = 7,
- FISR = 8,
- FPHY = 9,
- FMP = 10,
- FEEPROM = 11,
- FPWR = 12,
- FDM = 13,
- FDBGCtrl = 14,
- FC2H = 15,
- FBT = 16,
- FINIT = 17,
- FIOCTL = 18,
- DBGP_TYPE_MAX
-};
-
-#define QoS_INIT BIT0
-#define QoS_VISTA BIT1
-
-#define TX_DESC BIT0
-#define TX_DESC_TID BIT1
-
-#define RX_DATA BIT0
-#define RX_PHY_STS BIT1
-#define RX_PHY_SS BIT2
-#define RX_PHY_SQ BIT3
-#define RX_PHY_ASTS BIT4
-#define RX_ERR_LEN BIT5
-#define RX_DEFRAG BIT6
-#define RX_ERR_RATE BIT7
-
-
-
-#define MEDIA_STS BIT0
-#define LINK_STS BIT1
-
-#define OS_CHK BIT0
-
-#define BCN_SHOW BIT0
-#define BCN_PEER BIT1
-
-#define ISR_CHK BIT0
-
-#define PHY_BBR BIT0
-#define PHY_BBW BIT1
-#define PHY_RFR BIT2
-#define PHY_RFW BIT3
-#define PHY_MACR BIT4
-#define PHY_MACW BIT5
-#define PHY_ALLR BIT6
-#define PHY_ALLW BIT7
-#define PHY_TXPWR BIT8
-#define PHY_PWRDIFF BIT9
-
-#define MP_RX BIT0
-#define MP_SWICH_CH BIT1
-
-#define EEPROM_W BIT0
-#define EFUSE_PG BIT1
-#define EFUSE_READ_ALL BIT2
-
-#define LPS BIT0
-#define IPS BIT1
-#define PWRSW BIT2
-#define PWRHW BIT3
-#define PWRHAL BIT4
-
-#define WA_IOT BIT0
-#define DM_PWDB BIT1
-#define DM_Monitor BIT2
-#define DM_DIG BIT3
-#define DM_EDCA_Turbo BIT4
-
-#define DbgCtrl_Trace BIT0
-#define DbgCtrl_InbandNoise BIT1
-
-#define BT_TRACE BIT0
-#define BT_RFPoll BIT1
-
-#define C2H_Summary BIT0
-#define C2H_PacketData BIT1
-#define C2H_ContentData BIT2
-#define BT_TRACE BIT0
-#define BT_RFPoll BIT1
-
-#define INIT_EEPROM BIT0
-#define INIT_TxPower BIT1
-#define INIT_IQK BIT2
-#define INIT_RF BIT3
-
-#define IOCTL_TRACE BIT0
-#define IOCTL_BT_EVENT BIT1
-#define IOCTL_BT_EVENT_DETAIL BIT2
-#define IOCTL_BT_TX_ACLDATA BIT3
-#define IOCTL_BT_TX_ACLDATA_DETAIL BIT4
-#define IOCTL_BT_RX_ACLDATA BIT5
-#define IOCTL_BT_RX_ACLDATA_DETAIL BIT6
-#define IOCTL_BT_HCICMD BIT7
-#define IOCTL_BT_HCICMD_DETAIL BIT8
-#define IOCTL_IRP BIT9
-#define IOCTL_IRP_DETAIL BIT10
-#define IOCTL_CALLBACK_FUN BIT11
-#define IOCTL_STATE BIT12
-#define IOCTL_BT_TP BIT13
-#define IOCTL_BT_LOGO BIT14
-
-/* 2007/07/13 MH ------For DeBuG Print modeue------*/
-/*------------------------------Define structure----------------------------*/
-
-
-/*------------------------Export Marco Definition---------------------------*/
-#define DEBUG_PRINT 1
-
-#if (DEBUG_PRINT == 1)
-#define RTPRINT(dbgtype, dbgflag, printstr) \
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- printk printstr; \
- } \
-}
-
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr) \
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- int __i; \
- u8 *ptr = (u8 *)_Ptr; \
- printk printstr; \
- printk(" "); \
- for (__i = 0; __i < 6; __i++) \
- printk("%02X%s", ptr[__i], \
- (__i == 5) ? "" : "-"); \
- printk("\n"); \
- } \
-}
-
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- int __i; \
- u8 *ptr = (u8 *)_HexData; \
- printk(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) \
- % 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
-}
-#else
-#define RTPRINT(dbgtype, dbgflag, printstr)
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)
-#endif
-
-extern u32 DBGP_Type[DBGP_TYPE_MAX];
-
-#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) \
-do {\
- if (((_Comp) & rt_global_debug_component) && \
- (_Level <= rt_global_debug_component)) { \
- int __i; \
- u8* ptr = (u8 *)_HexData; \
- printk(KERN_INFO "Rtl819x: "); \
- printk(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) % \
- 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
-} while (0);
-
-#define DMESG(x, a...)
-#define DMESGW(x, a...)
-#define DMESGE(x, a...)
-extern u32 rt_global_debug_component;
-#define RT_TRACE(component, x, args...) \
-do { \
- if (rt_global_debug_component & component) \
- printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
- ##args);\
-} while (0);
-
-#define assert(expr) \
- if (!(expr)) { \
- printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#define RT_DEBUG_DATA(level, data, datalen) \
- do { \
- if ((rt_global_debug_component & (level)) == (level)) {\
- int _i; \
- u8 *_pdata = (u8 *)data; \
- printk(KERN_DEBUG DRV_NAME ": %s()\n", __func__); \
- for (_i = 0; _i < (int)(datalen); _i++) { \
- printk(KERN_INFO "%2x ", _pdata[_i]); \
- if ((_i+1) % 16 == 0) \
- printk("\n"); \
- } \
- printk(KERN_INFO "\n"); \
- } \
- } while (0)
-
-struct rtl_fs_debug {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *debug_register;
- u32 hw_type;
- u32 hw_offset;
- bool hw_holding;
-};
-
-void print_buffer(u32 *buffer, int len);
-void dump_eprom(struct net_device *dev);
-void rtl8192_dump_reg(struct net_device *dev);
-
-/* debugfs stuff */
-static inline int rtl_debug_module_init(struct r8192_priv *priv,
- const char *name)
-{
- return 0;
-}
-
-static inline void rtl_debug_module_remove(struct r8192_priv *priv)
-{
-}
-
-static inline int rtl_create_debugfs_root(void)
-{
- return 0;
-}
-
-static inline void rtl_remove_debugfs_root(void)
-{
-}
-
-/* proc stuff */
-void rtl8192_proc_init_one(struct net_device *dev);
-void rtl8192_proc_remove_one(struct net_device *dev);
-void rtl8192_proc_module_init(void);
-void rtl8192_proc_module_remove(void);
-void rtl8192_dbgp_flag_init(struct net_device *dev);
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index de25975ccee4..e26aec86a5c8 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -25,7 +25,6 @@
#define RTLLIB_H
#include <linux/if_ether.h> /* ETH_ALEN */
#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -36,12 +35,14 @@
#include <linux/delay.h>
#include <linux/wireless.h>
+#include "rtllib_debug.h"
#include "rtl819x_HT.h"
#include "rtl819x_BA.h"
#include "rtl819x_TS.h"
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <net/lib80211.h>
#define MAX_PRECMD_CNT 16
#define MAX_RFDEPENDCMD_CNT 16
@@ -870,69 +871,6 @@ enum _REG_PREAMBLE_MODE {
#define WLAN_ERP_USE_PROTECTION (1<<1)
#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
-/* Status codes */
-enum rtllib_statuscode {
- WLAN_STATUS_SUCCESS = 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
- WLAN_STATUS_CAPS_UNSUPPORTED = 10,
- WLAN_STATUS_REASSOC_NO_ASSOC = 11,
- WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
- WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
- WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
- WLAN_STATUS_CHALLENGE_FAIL = 15,
- WLAN_STATUS_AUTH_TIMEOUT = 16,
- WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
- WLAN_STATUS_ASSOC_DENIED_RATES = 18,
- /* 802.11b */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
- WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
- WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
- /* 802.11h */
- WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
- WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
- WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
- /* 802.11g */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
- WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
- /* 802.11i */
- WLAN_STATUS_INVALID_IE = 40,
- WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
- WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
- WLAN_STATUS_INVALID_AKMP = 43,
- WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
- WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
- WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
-};
-
-/* Reason codes */
-enum rtllib_reasoncode {
- WLAN_REASON_UNSPECIFIED = 1,
- WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
- WLAN_REASON_DEAUTH_LEAVING = 3,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
- WLAN_REASON_DISASSOC_AP_BUSY = 5,
- WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
- WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
- /* 802.11h */
- WLAN_REASON_DISASSOC_BAD_POWER = 10,
- WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
- /* 802.11i */
- WLAN_REASON_INVALID_IE = 13,
- WLAN_REASON_MIC_FAILURE = 14,
- WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
- WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
- WLAN_REASON_IE_DIFFERENT = 17,
- WLAN_REASON_INVALID_GROUP_CIPHER = 18,
- WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
- WLAN_REASON_INVALID_AKMP = 20,
- WLAN_REASON_UNSUPP_RSN_VERSION = 21,
- WLAN_REASON_INVALID_RSN_IE_CAP = 22,
- WLAN_REASON_IEEE8021X_FAILED = 23,
- WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
-};
-
#define RTLLIB_STATMASK_SIGNAL (1<<0)
#define RTLLIB_STATMASK_RSSI (1<<1)
#define RTLLIB_STATMASK_NOISE (1<<2)
@@ -1122,8 +1060,6 @@ struct rtllib_stats {
struct rtllib_device;
-#include "rtllib_crypt.h"
-
#define SEC_KEY_1 (1<<0)
#define SEC_KEY_2 (1<<1)
#define SEC_KEY_3 (1<<2)
@@ -1146,7 +1082,6 @@ struct rtllib_device;
#define SEC_ALG_TKIP 2
#define SEC_ALG_CCMP 4
-#define WEP_KEYS 4
#define WEP_KEY_LEN 13
#define SCM_KEY_LEN 32
#define SCM_TEMPORAL_KEY_LENGTH 16
@@ -1158,8 +1093,8 @@ struct rtllib_security {
auth_algo:4,
unicast_uses_group:1,
encrypt:1;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][SCM_KEY_LEN];
+ u8 key_sizes[NUM_WEP_KEYS];
+ u8 keys[NUM_WEP_KEYS][SCM_KEY_LEN];
u8 level;
u16 flags;
} __packed;
@@ -2251,14 +2186,10 @@ struct rtllib_device {
u8 ap_mac_addr[6];
u16 pairwise_key_type;
u16 group_key_type;
- struct list_head crypt_deinit_list;
- struct rtllib_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
+ struct lib80211_crypt_info crypt_info;
+ struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
int bcrx_sta_key; /* use individual keys to override default keys even
* with RX of broad/multicast frames */
@@ -2774,7 +2705,7 @@ extern void rtllib_rx_mgt(struct rtllib_device *ieee,
struct rtllib_rx_stats *stats);
extern void rtllib_rx_probe_rq(struct rtllib_device *ieee,
struct sk_buff *skb);
-extern int IsLegalChannel(struct rtllib_device *rtllib, u8 channel);
+extern int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
/* rtllib_wx.c */
extern int rtllib_wx_get_scan(struct rtllib_device *ieee,
@@ -2804,7 +2735,7 @@ extern int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
/* rtllib_softmac.c */
extern short rtllib_is_54g(struct rtllib_network *net);
-extern short rtllib_is_shortslot(struct rtllib_network net);
+extern short rtllib_is_shortslot(const struct rtllib_network *net);
extern int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats, u16 type,
@@ -2971,8 +2902,8 @@ extern void HTInitializeHTInfo(struct rtllib_device *ieee);
extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
extern void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+extern void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
extern u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
@@ -3052,21 +2983,6 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
(HTMcsToDataRate(_ieee, (u8)_MGN_RATE)))
/* fun with the built-in rtllib stack... */
-int rtllib_init(void);
-void rtllib_exit(void);
-int rtllib_crypto_init(void);
-void rtllib_crypto_deinit(void);
-int rtllib_crypto_tkip_init(void);
-void rtllib_crypto_tkip_exit(void);
-int rtllib_crypto_ccmp_init(void);
-void rtllib_crypto_ccmp_exit(void);
-int rtllib_crypto_wep_init(void);
-void rtllib_crypto_wep_exit(void);
-
-void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib);
-void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
- u8 asRsn);
-void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn);
bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
@@ -3133,12 +3049,5 @@ extern void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p);
#define MUTEX_LOCK_PRIV(pmutex) mutex_lock(pmutex)
#define MUTEX_UNLOCK_PRIV(pmutex) mutex_unlock(pmutex)
#endif
-static inline void dump_buf(u8 *buf, u32 len)
-{
- u32 i;
- printk(KERN_INFO "-----------------Len %d----------------\n", len);
- for (i = 0; i < len; i++)
- printk("%2.2x-", *(buf+i));
- printk("\n");
-}
+
#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.c b/drivers/staging/rtl8192e/rtllib_crypt.c
index acda37b81848..86152d0e6b5d 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt.c
@@ -11,7 +11,6 @@
*
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -22,7 +21,7 @@
struct rtllib_crypto_alg {
struct list_head list;
- struct rtllib_crypto_ops *ops;
+ struct lib80211_crypto_ops *ops;
};
@@ -33,15 +32,15 @@ struct rtllib_crypto {
static struct rtllib_crypto *hcrypt;
-void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
+void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info,
int force)
{
struct list_head *ptr, *n;
- struct rtllib_crypt_data *entry;
+ struct lib80211_crypt_data *entry;
- for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
- ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
- entry = list_entry(ptr, struct rtllib_crypt_data, list);
+ for (ptr = info->crypt_deinit_list.next, n = ptr->next;
+ ptr != &info->crypt_deinit_list; ptr = n, n = ptr->next) {
+ entry = list_entry(ptr, struct lib80211_crypt_data, list);
if (atomic_read(&entry->refcnt) != 0 && !force)
continue;
@@ -53,28 +52,30 @@ void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
kfree(entry);
}
}
+EXPORT_SYMBOL(rtllib_crypt_deinit_entries);
void rtllib_crypt_deinit_handler(unsigned long data)
{
- struct rtllib_device *ieee = (struct rtllib_device *)data;
+ struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
- spin_lock_irqsave(&ieee->lock, flags);
- rtllib_crypt_deinit_entries(ieee, 0);
- if (!list_empty(&ieee->crypt_deinit_list)) {
+ spin_lock_irqsave(info->lock, flags);
+ rtllib_crypt_deinit_entries(info, 0);
+ if (!list_empty(&info->crypt_deinit_list)) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
- "deletion list\n", ieee->dev->name);
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ "deletion list\n", info->name);
+ info->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&info->crypt_deinit_timer);
}
- spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_unlock_irqrestore(info->lock, flags);
}
+EXPORT_SYMBOL(rtllib_crypt_deinit_handler);
-void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
- struct rtllib_crypt_data **crypt)
+void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt)
{
- struct rtllib_crypt_data *tmp;
+ struct lib80211_crypt_data *tmp;
unsigned long flags;
if (*crypt == NULL)
@@ -87,16 +88,17 @@ void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
* decrypt operations. Use a list of delayed deinits to avoid needing
* locking. */
- spin_lock_irqsave(&ieee->lock, flags);
- list_add(&tmp->list, &ieee->crypt_deinit_list);
- if (!timer_pending(&ieee->crypt_deinit_timer)) {
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ spin_lock_irqsave(info->lock, flags);
+ list_add(&tmp->list, &info->crypt_deinit_list);
+ if (!timer_pending(&info->crypt_deinit_timer)) {
+ info->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&info->crypt_deinit_timer);
}
- spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_unlock_irqrestore(info->lock, flags);
}
+EXPORT_SYMBOL(rtllib_crypt_delayed_deinit);
-int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
+int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct rtllib_crypto_alg *alg;
@@ -104,11 +106,10 @@ int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -120,8 +121,9 @@ int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
return 0;
}
+EXPORT_SYMBOL(rtllib_register_crypto_ops);
-int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
+int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct list_head *ptr;
@@ -150,9 +152,10 @@ int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
return del_alg ? 0 : -1;
}
+EXPORT_SYMBOL(rtllib_unregister_crypto_ops);
-struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
+struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name)
{
unsigned long flags;
struct list_head *ptr;
@@ -177,12 +180,13 @@ struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
else
return NULL;
}
+EXPORT_SYMBOL(rtllib_get_crypto_ops);
static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
static void rtllib_crypt_null_deinit(void *priv) {}
-static struct rtllib_crypto_ops rtllib_crypt_null = {
+static struct lib80211_crypto_ops rtllib_crypt_null = {
.name = "NULL",
.init = rtllib_crypt_null_init,
.deinit = rtllib_crypt_null_deinit,
@@ -192,8 +196,10 @@ static struct rtllib_crypto_ops rtllib_crypt_null = {
.decrypt_msdu = NULL,
.set_key = NULL,
.get_key = NULL,
- .extra_prefix_len = 0,
- .extra_postfix_len = 0,
+ .extra_mpdu_prefix_len = 0,
+ .extra_mpdu_postfix_len = 0,
+ .extra_msdu_prefix_len = 0,
+ .extra_msdu_postfix_len = 0,
.owner = THIS_MODULE,
};
@@ -202,15 +208,14 @@ int __init rtllib_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
- ret = rtllib_register_crypto_ops(&rtllib_crypt_null);
+ ret = lib80211_register_crypto_ops(&rtllib_crypt_null);
if (ret < 0) {
kfree(hcrypt);
hcrypt = NULL;
@@ -239,3 +244,8 @@ void __exit rtllib_crypto_deinit(void)
kfree(hcrypt);
}
+
+module_init(rtllib_crypto_init);
+module_exit(rtllib_crypto_deinit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.h b/drivers/staging/rtl8192e/rtllib_crypt.h
index 49b90b73ed9c..e177c9287b44 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.h
+++ b/drivers/staging/rtl8192e/rtllib_crypt.h
@@ -25,61 +25,11 @@
#include <linux/skbuff.h>
-struct rtllib_crypto_ops {
- const char *name;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void * (*init)(int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv, struct rtllib_device* ieee);
-
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- char * (*print_stats)(char *p, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the struct buffer and
- * correct length must be returned */
- int extra_prefix_len, extra_postfix_len;
-
- struct module *owner;
-};
-
-struct rtllib_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct rtllib_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops);
-int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops);
-struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name);
-void rtllib_crypt_deinit_entries(struct rtllib_device *, int);
-void rtllib_crypt_deinit_handler(unsigned long);
-void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
- struct rtllib_crypt_data **crypt);
+int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops);
+int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
+struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name);
+void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info, int force);
+void rtllib_crypt_deinit_handler(unsigned long data);
+void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt);
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 6196b9aa3a09..4217b88e6fc3 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -63,10 +62,9 @@ static void *rtllib_ccmp_init(int key_idx)
{
struct rtllib_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
@@ -429,13 +427,8 @@ static char *rtllib_ccmp_print_stats(char *p, void *priv)
return p;
}
-void rtllib_ccmp_null(void)
-{
- return;
-}
-
-static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
- .name = "CCMP",
+static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
+ .name = "R-CCMP",
.init = rtllib_ccmp_init,
.deinit = rtllib_ccmp_deinit,
.encrypt_mpdu = rtllib_ccmp_encrypt,
@@ -445,19 +438,24 @@ static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
.set_key = rtllib_ccmp_set_key,
.get_key = rtllib_ccmp_get_key,
.print_stats = rtllib_ccmp_print_stats,
- .extra_prefix_len = CCMP_HDR_LEN,
- .extra_postfix_len = CCMP_MIC_LEN,
+ .extra_mpdu_prefix_len = CCMP_HDR_LEN,
+ .extra_mpdu_postfix_len = CCMP_MIC_LEN,
.owner = THIS_MODULE,
};
int __init rtllib_crypto_ccmp_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_ccmp);
+ return lib80211_register_crypto_ops(&rtllib_crypt_ccmp);
}
void __exit rtllib_crypto_ccmp_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_ccmp);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp);
}
+
+module_init(rtllib_crypto_ccmp_init);
+module_exit(rtllib_crypto_ccmp_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 6a0c87886422..800925053fb0 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -60,10 +59,9 @@ static void *rtllib_tkip_init(int key_idx)
{
struct rtllib_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
@@ -598,8 +596,7 @@ static void rtllib_michael_mic_failure(struct net_device *dev,
}
static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv,
- struct rtllib_device *ieee)
+ int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 mic[8];
@@ -618,23 +615,20 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
- if ((memcmp(mic, skb->data + skb->len - 8, 8) != 0) ||
- (ieee->force_mic_error)) {
+ if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtllib_hdr_4addr *hdr;
hdr = (struct rtllib_hdr_4addr *) skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
skb->dev ? skb->dev->name : "N/A", hdr->addr2,
keyidx);
- printk(KERN_DEBUG "%d, force_mic_error = %d\n",
- (memcmp(mic, skb->data + skb->len - 8, 8) != 0),\
- ieee->force_mic_error);
+ printk(KERN_DEBUG "%d\n",
+ memcmp(mic, skb->data + skb->len - 8, 8) != 0);
if (skb->dev) {
printk(KERN_INFO "skb->dev != NULL\n");
rtllib_michael_mic_failure(skb->dev, hdr, keyidx);
}
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
- ieee->force_mic_error = false;
return -1;
}
@@ -740,9 +734,8 @@ static char *rtllib_tkip_print_stats(char *p, void *priv)
return p;
}
-
-static struct rtllib_crypto_ops rtllib_crypt_tkip = {
- .name = "TKIP",
+static struct lib80211_crypto_ops rtllib_crypt_tkip = {
+ .name = "R-TKIP",
.init = rtllib_tkip_init,
.deinit = rtllib_tkip_deinit,
.encrypt_mpdu = rtllib_tkip_encrypt,
@@ -752,24 +745,25 @@ static struct rtllib_crypto_ops rtllib_crypt_tkip = {
.set_key = rtllib_tkip_set_key,
.get_key = rtllib_tkip_get_key,
.print_stats = rtllib_tkip_print_stats,
- .extra_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_postfix_len = 8 + 4, /* MIC + ICV */
+ .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
+ .extra_msdu_postfix_len = 8, /* MIC */
.owner = THIS_MODULE,
};
int __init rtllib_crypto_tkip_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_tkip);
+ return lib80211_register_crypto_ops(&rtllib_crypt_tkip);
}
void __exit rtllib_crypto_tkip_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_tkip);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_tkip);
}
-void rtllib_tkip_null(void)
-{
- return;
-}
+module_init(rtllib_crypto_tkip_init);
+module_exit(rtllib_crypto_tkip_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index c59bf10fe780..8cdf38913a33 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -38,10 +37,9 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
@@ -257,9 +255,8 @@ static char *prism2_wep_print_stats(char *p, void *priv)
return p;
}
-
-static struct rtllib_crypto_ops rtllib_crypt_wep = {
- .name = "WEP",
+static struct lib80211_crypto_ops rtllib_crypt_wep = {
+ .name = "R-WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
.encrypt_mpdu = prism2_wep_encrypt,
@@ -269,24 +266,24 @@ static struct rtllib_crypto_ops rtllib_crypt_wep = {
.set_key = prism2_wep_set_key,
.get_key = prism2_wep_get_key,
.print_stats = prism2_wep_print_stats,
- .extra_prefix_len = 4, /* IV */
- .extra_postfix_len = 4, /* ICV */
+ .extra_mpdu_prefix_len = 4, /* IV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
.owner = THIS_MODULE,
};
int __init rtllib_crypto_wep_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_wep);
+ return lib80211_register_crypto_ops(&rtllib_crypt_wep);
}
void __exit rtllib_crypto_wep_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_wep);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_wep);
}
-void rtllib_wep_null(void)
-{
- return;
-}
+module_init(rtllib_crypto_wep_init);
+module_exit(rtllib_crypto_wep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
new file mode 100644
index 000000000000..2bfc1155f505
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -0,0 +1,86 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL_DEBUG_H
+#define _RTL_DEBUG_H
+
+/* Allow files to override DRV_NAME */
+#ifndef DRV_NAME
+#define DRV_NAME "rtllib_92e"
+#endif
+
+#define DMESG(x, a...)
+
+extern u32 rt_global_debug_component;
+
+/* These are the defines for rt_global_debug_component */
+enum RTL_DEBUG {
+ COMP_TRACE = (1 << 0),
+ COMP_DBG = (1 << 1),
+ COMP_INIT = (1 << 2),
+ COMP_RECV = (1 << 3),
+ COMP_SEND = (1 << 4),
+ COMP_CMD = (1 << 5),
+ COMP_POWER = (1 << 6),
+ COMP_EPROM = (1 << 7),
+ COMP_SWBW = (1 << 8),
+ COMP_SEC = (1 << 9),
+ COMP_LPS = (1 << 10),
+ COMP_QOS = (1 << 11),
+ COMP_RATE = (1 << 12),
+ COMP_RXDESC = (1 << 13),
+ COMP_PHY = (1 << 14),
+ COMP_DIG = (1 << 15),
+ COMP_TXAGC = (1 << 16),
+ COMP_HALDM = (1 << 17),
+ COMP_POWER_TRACKING = (1 << 18),
+ COMP_CH = (1 << 19),
+ COMP_RF = (1 << 20),
+ COMP_FIRMWARE = (1 << 21),
+ COMP_HT = (1 << 22),
+ COMP_RESET = (1 << 23),
+ COMP_CMDPKT = (1 << 24),
+ COMP_SCAN = (1 << 25),
+ COMP_PS = (1 << 26),
+ COMP_DOWN = (1 << 27),
+ COMP_INTR = (1 << 28),
+ COMP_LED = (1 << 29),
+ COMP_MLME = (1 << 30),
+ COMP_ERR = (1 << 31)
+};
+
+#define RT_TRACE(component, x, args...) \
+do { \
+ if (rt_global_debug_component & component) \
+ printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
+ ##args);\
+} while (0);
+
+#define assert(expr) \
+ if (!(expr)) { \
+ printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index c36a140a4568..f9dae958a5d4 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -45,7 +45,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -54,7 +53,9 @@
#include "rtllib.h"
-#define DRV_NAME "rtllib_92e"
+u32 rt_global_debug_component = COMP_ERR;
+EXPORT_SYMBOL(rt_global_debug_component);
+
void _setup_timer(struct timer_list *ptimer, void *fun, unsigned long data)
{
@@ -135,10 +136,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
- INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- _setup_timer(&ieee->crypt_deinit_timer,
- rtllib_crypt_deinit_handler,
- (unsigned long) ieee);
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
spin_lock_init(&ieee->lock);
@@ -148,6 +145,9 @@ struct net_device *alloc_rtllib(int sizeof_priv)
atomic_set(&(ieee->atm_chnlop), 0);
atomic_set(&(ieee->atm_swbw), 0);
+ /* SAM FIXME */
+ lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock);
+
ieee->bHalfNMode = false;
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
@@ -177,10 +177,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->last_packet_time[i] = 0;
}
- rtllib_tkip_null();
- rtllib_wep_null();
- rtllib_ccmp_null();
-
return dev;
failed:
@@ -188,32 +184,23 @@ struct net_device *alloc_rtllib(int sizeof_priv)
free_netdev(dev);
return NULL;
}
+EXPORT_SYMBOL(alloc_rtllib);
void free_rtllib(struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
- int i;
kfree(ieee->pHTInfo);
ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
- del_timer_sync(&ieee->crypt_deinit_timer);
- rtllib_crypt_deinit_entries(ieee, 1);
-
- for (i = 0; i < WEP_KEYS; i++) {
- struct rtllib_crypt_data *crypt = ieee->crypt[i];
- if (crypt) {
- if (crypt->ops)
- crypt->ops->deinit(crypt->priv);
- kfree(crypt);
- ieee->crypt[i] = NULL;
- }
- }
+
+ lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev(dev);
}
+EXPORT_SYMBOL(free_rtllib);
u32 rtllib_debug_level;
static int debug = \
@@ -287,3 +274,8 @@ void __exit rtllib_exit(void)
rtllib_proc = NULL;
}
}
+
+module_init(rtllib_init);
+module_exit(rtllib_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 8d0af5ed8ecf..6c5061f12bad 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -281,7 +280,7 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_crypt_data *crypt)
+ struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
@@ -322,7 +321,7 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
- int keyidx, struct rtllib_crypt_data *crypt)
+ int keyidx, struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
@@ -341,7 +340,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv, ieee);
+ res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
@@ -1010,7 +1009,7 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
}
static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_crypt_data **crypt, size_t hdrlen)
+ struct lib80211_crypt_data **crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
@@ -1020,7 +1019,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
- *crypt = ieee->crypt[idx];
+ *crypt = ieee->crypt_info.crypt[idx];
/* allow NULL decrypt to indicate an station specific override
* for default encryption */
if (*crypt && ((*crypt)->ops == NULL ||
@@ -1045,7 +1044,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
- struct rtllib_crypt_data *crypt, size_t hdrlen)
+ struct lib80211_crypt_data *crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr;
int keyidx = 0;
@@ -1253,7 +1252,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
{
struct net_device *dev = ieee->dev;
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
struct rx_ts_record *pTS = NULL;
u16 fc, sc, SeqNum = 0;
@@ -1497,6 +1496,7 @@ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->stats.rx_dropped++;
return 0;
}
+EXPORT_SYMBOL(rtllib_rx);
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
@@ -2492,7 +2492,7 @@ static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
return 0;
}
-int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
+int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel)
{
if (MAX_CHANNEL_NUMBER < channel) {
printk(KERN_INFO "%s(): Invalid Channel\n", __func__);
@@ -2503,6 +2503,7 @@ int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
return 0;
}
+EXPORT_SYMBOL(rtllib_legal_channel);
static inline void rtllib_process_probe_response(
struct rtllib_device *ieee,
@@ -2553,7 +2554,7 @@ static inline void rtllib_process_probe_response(
}
- if (!IsLegalChannel(ieee, network->channel))
+ if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b5086850f0de..1637f1110991 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -15,11 +15,9 @@
#include "rtllib.h"
-#include "rtl_core.h"
#include <linux/random.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/uaccess.h>
#include "dot11d.h"
@@ -28,9 +26,9 @@ short rtllib_is_54g(struct rtllib_network *net)
return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
-short rtllib_is_shortslot(struct rtllib_network net)
+short rtllib_is_shortslot(const struct rtllib_network *net)
{
- return net.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ return net->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
}
/* returns the total length needed for pleacing the RATE MFIE
@@ -468,6 +466,7 @@ void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
ieee->bNetPromiscuousMode = true;
}
+EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode);
/*
@@ -490,6 +489,7 @@ void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
ieee->bNetPromiscuousMode = false;
}
+EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode);
static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
{
@@ -685,6 +685,7 @@ void rtllib_stop_send_beacons(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_stop(ieee);
}
+EXPORT_SYMBOL(rtllib_stop_send_beacons);
void rtllib_start_send_beacons(struct rtllib_device *ieee)
@@ -694,6 +695,7 @@ void rtllib_start_send_beacons(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_start(ieee);
}
+EXPORT_SYMBOL(rtllib_start_send_beacons);
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
@@ -719,6 +721,7 @@ void rtllib_stop_scan(struct rtllib_device *ieee)
ieee->rtllib_stop_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_stop_scan);
void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
{
@@ -729,6 +732,7 @@ void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
ieee->rtllib_stop_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_stop_scan_syncro);
bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
{
@@ -741,6 +745,7 @@ bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
return test_bit(STATUS_SCANNING, &ieee->status);
}
}
+EXPORT_SYMBOL(rtllib_act_scanning);
/* called with ieee->lock held */
static void rtllib_start_scan(struct rtllib_device *ieee)
@@ -781,6 +786,7 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
ieee->rtllib_start_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_start_scan_syncro);
inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
struct rtllib_device *ieee, int challengelen, u8 *daddr)
@@ -830,7 +836,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
struct sk_buff *skb = NULL;
int encrypt;
int atim_len, erp_len;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
@@ -865,9 +871,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
} else
erp_len = 0;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
+ ((0 == strcmp(crypt->ops->name, "R-WEP") || wpa_ie_len));
if (ieee->pHTInfo->bCurrentHTSupport) {
tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
@@ -917,7 +923,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
cpu_to_le16((beacon_buf->capability |=
WLAN_CAPABILITY_SHORT_SLOT_TIME));
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
@@ -976,7 +982,7 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
struct sk_buff *skb;
u8 *tag;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
struct rtllib_assoc_response_frame *assoc;
short encrypt;
@@ -1007,7 +1013,7 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
if (ieee->host_encrypt)
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
else
crypt = NULL;
@@ -1172,7 +1178,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
unsigned int ckip_ie_len = 0;
unsigned int ccxrm_ie_len = 0;
unsigned int cxvernum_ie_len = 0;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int encrypt;
int PMKCacheIdx;
@@ -1185,10 +1191,10 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
int len = 0;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (crypt != NULL)
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") ||
+ ((0 == strcmp(crypt->ops->name, "R-WEP") ||
wpa_ie_len));
else
encrypt = 0;
@@ -1956,6 +1962,7 @@ void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
}
+EXPORT_SYMBOL(rtllib_sta_ps_send_null_frame);
void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee)
{
@@ -2168,6 +2175,7 @@ void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
+EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee, struct sk_buff *skb)
{
@@ -2540,6 +2548,7 @@ void rtllib_reset_queue(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
+EXPORT_SYMBOL(rtllib_reset_queue);
void rtllib_wake_queue(struct rtllib_device *ieee)
{
@@ -2928,6 +2937,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
return skb;
}
+EXPORT_SYMBOL(rtllib_get_beacon);
void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
u8 shutdown)
@@ -2937,6 +2947,7 @@ void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
rtllib_stop_protocol(ieee, shutdown);
up(&ieee->wx_sem);
}
+EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
@@ -2985,6 +2996,7 @@ void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
rtllib_start_protocol(ieee);
up(&ieee->wx_sem);
}
+EXPORT_SYMBOL(rtllib_softmac_start_protocol);
void rtllib_start_protocol(struct rtllib_device *ieee)
{
@@ -3048,10 +3060,9 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->state = RTLLIB_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->pDot11dInfo = kmalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for DOT11D\n");
- memset(ieee->pDot11dInfo, 0, sizeof(struct rt_dot11d_info));
ieee->LinkDetectInfo.SlotIndex = 0;
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3207,11 +3218,11 @@ static int rtllib_wpa_set_wpa_ie(struct rtllib_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -3334,8 +3345,8 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
u8 is_mesh)
{
int ret = 0;
- struct rtllib_crypto_ops *ops;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypto_ops *ops;
+ struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
@@ -3354,9 +3365,9 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS)
+ if (param->u.crypt.idx >= NUM_WEP_KEYS)
return -EINVAL;
- crypt = &ieee->crypt[param->u.crypt.idx];
+ crypt = &ieee->crypt_info.crypt[param->u.crypt.idx];
} else {
return -EINVAL;
}
@@ -3366,7 +3377,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
goto done;
}
@@ -3375,19 +3386,19 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
/* IPW HW cannot build TKIP MIC, host decryption still needed. */
if (!(ieee->host_encrypt || ieee->host_decrypt) &&
- strcmp(param->u.crypt.alg, "TKIP"))
+ strcmp(param->u.crypt.alg, "R-TKIP"))
goto skip_host_crypt;
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ if (ops == NULL && strcmp(param->u.crypt.alg, "R-WEP") == 0) {
request_module("rtllib_crypt_wep");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
request_module("rtllib_crypt_tkip");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
request_module("rtllib_crypt_ccmp");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
}
if (ops == NULL) {
printk(KERN_INFO "unknown crypto alg '%s'\n",
@@ -3397,17 +3408,17 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
goto done;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- new_crypt = (struct rtllib_crypt_data *)
+ new_crypt = (struct lib80211_crypt_data *)
kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
- memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
+ memset(new_crypt, 0, sizeof(struct lib80211_crypt_data));
new_crypt->ops = ops;
if (new_crypt->ops)
new_crypt->priv =
@@ -3435,7 +3446,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
skip_host_crypt:
if (param->u.crypt.set_tx) {
- ieee->tx_keyidx = param->u.crypt.idx;
+ ieee->crypt_info.tx_keyidx = param->u.crypt.idx;
sec.active_key = param->u.crypt.idx;
sec.flags |= SEC_ACTIVE_KEY;
} else
@@ -3448,13 +3459,13 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
sec.flags |= (1 << param->u.crypt.idx);
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ if (strcmp(param->u.crypt.alg, "R-WEP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
@@ -3551,13 +3562,13 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
int wpa_ie_len = ieee->wpa_ie_len;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int encrypt;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY)
|| (ieee->host_encrypt && crypt && crypt->ops &&
- (0 == strcmp(crypt->ops->name, "WEP")));
+ (0 == strcmp(crypt->ops->name, "R-WEP")));
/* simply judge */
if (encrypt && (wpa_ie_len == 0)) {
@@ -3634,6 +3645,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(rtllib_wpa_supplicant_ioctl);
void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
{
@@ -3719,6 +3731,7 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
return true;
}
+EXPORT_SYMBOL(rtllib_MgntDisconnect);
void notify_wx_assoc_event(struct rtllib_device *ieee)
{
@@ -3739,3 +3752,4 @@ void notify_wx_assoc_event(struct rtllib_device *ieee)
}
wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
}
+EXPORT_SYMBOL(notify_wx_assoc_event);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 22988fbd444b..1523bc7a2105 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -15,7 +15,6 @@
#include "rtllib.h"
-#include "rtl_core.h"
#include "dot11d.h"
/* FIXME: add A freqs */
@@ -25,6 +24,7 @@ const long rtllib_wlan_frequencies[] = {
2452, 2457, 2462, 2467,
2472, 2484
};
+EXPORT_SYMBOL(rtllib_wlan_frequencies);
int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
@@ -82,6 +82,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_freq);
int rtllib_wx_get_freq(struct rtllib_device *ieee,
@@ -97,6 +98,7 @@ int rtllib_wx_get_freq(struct rtllib_device *ieee,
fwrq->e = 1;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_freq);
int rtllib_wx_get_wap(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -125,6 +127,7 @@ int rtllib_wx_get_wap(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_wap);
int rtllib_wx_set_wap(struct rtllib_device *ieee,
@@ -184,6 +187,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_wap);
int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -220,6 +224,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_get_essid);
int rtllib_wx_set_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -231,6 +236,7 @@ int rtllib_wx_set_rate(struct rtllib_device *ieee,
ieee->rate = target_rate/100000;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rate);
int rtllib_wx_get_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -243,6 +249,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_rate);
int rtllib_wx_set_rts(struct rtllib_device *ieee,
@@ -259,6 +266,7 @@ int rtllib_wx_set_rts(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rts);
int rtllib_wx_get_rts(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -269,6 +277,7 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee,
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_rts);
int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -314,6 +323,7 @@ out:
up(&ieee->wx_sem);
return set_mode_status;
}
+EXPORT_SYMBOL(rtllib_wx_set_mode);
void rtllib_wx_sync_scan_wq(void *data)
{
@@ -428,6 +438,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_scan);
int rtllib_wx_set_essid(struct rtllib_device *ieee,
struct iw_request_info *a,
@@ -490,6 +501,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_essid);
int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -497,6 +509,7 @@ int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
wrqu->mode = ieee->iw_mode;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_mode);
int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -533,6 +546,7 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rawtx);
int rtllib_wx_get_name(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -548,6 +562,7 @@ int rtllib_wx_get_name(struct rtllib_device *ieee,
strcat(wrqu->name, "n");
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_name);
/* this is mostly stolen from hostap */
@@ -605,6 +620,7 @@ exit:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_power);
/* this is stolen from hostap */
int rtllib_wx_get_power(struct rtllib_device *ieee,
@@ -643,3 +659,4 @@ exit:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_get_power);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 44e8006bc1af..f451bfc27a86 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -180,10 +179,10 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
int hdr_len)
{
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
int res;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (!(crypt && crypt->ops)) {
printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
@@ -569,7 +568,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
int qos_actived = ieee->current_network.qos_data.active;
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
struct cb_desc *tcb_desc;
u8 bIsMulticast = false;
u8 IsAmsdu = false;
@@ -646,7 +645,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
skb->priority = rtllib_classify(skb, IsAmsdu);
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
ieee->host_encrypt && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
@@ -742,8 +741,10 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
/* Each fragment may need to have room for encryptiong
* pre/postfix */
if (encrypt) {
- bytes_per_frag -= crypt->ops->extra_prefix_len +
- crypt->ops->extra_postfix_len;
+ bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_mpdu_postfix_len +
+ crypt->ops->extra_msdu_prefix_len +
+ crypt->ops->extra_msdu_postfix_len;
}
/* Number of fragments is the total bytes_per_frag /
* payload_per_fragment */
@@ -791,7 +792,8 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag,
- crypt->ops->extra_prefix_len);
+ crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_msdu_prefix_len);
} else {
tcb_desc->bHwSec = 0;
}
@@ -965,3 +967,4 @@ int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
memset(skb->cb, 0, sizeof(skb->cb));
return rtllib_xmit_inter(skb, dev);
}
+EXPORT_SYMBOL(rtllib_xmit);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 8cea4a60e1b3..c27ff7edbaf2 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -30,7 +30,6 @@
******************************************************************************/
#include <linux/wireless.h>
-#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/module.h>
@@ -295,6 +294,7 @@ int rtllib_wx_get_scan(struct rtllib_device *ieee,
return err;
}
+EXPORT_SYMBOL(rtllib_wx_get_scan);
int rtllib_wx_set_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -306,44 +306,44 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
.flags = 0
};
int i, key, key_provided, len;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypt_data **crypt;
RTLLIB_DEBUG_WX("SET_ENCODE\n");
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
- if (key > WEP_KEYS)
+ if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
key_provided = 1;
} else {
key_provided = 0;
- key = ieee->tx_keyidx;
+ key = ieee->crypt_info.tx_keyidx;
}
RTLLIB_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
"provided" : "default");
- crypt = &ieee->crypt[key];
+ crypt = &ieee->crypt_info.crypt[key];
if (erq->flags & IW_ENCODE_DISABLED) {
if (key_provided && *crypt) {
RTLLIB_DEBUG_WX("Disabling encryption on key %d.\n",
key);
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
} else
RTLLIB_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all */
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL) {
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (ieee->crypt_info.crypt[i] != NULL) {
if (key_provided)
break;
- rtllib_crypt_delayed_deinit(ieee,
- &ieee->crypt[i]);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info,
+ &ieee->crypt_info.crypt[i]);
}
}
- if (i == WEP_KEYS) {
+ if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
@@ -358,25 +358,24 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
- strcmp((*crypt)->ops->name, "WEP") != 0) {
+ strcmp((*crypt)->ops->name, "R-WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
* on this key */
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
if (*crypt == NULL) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct rtllib_crypt_data),
+ new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
- new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
if (!new_crypt->ops) {
request_module("rtllib_crypt_wep");
- new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
}
if (new_crypt->ops)
@@ -412,7 +411,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
* explicitely set */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;
+ ieee->crypt_info.tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
@@ -435,7 +434,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
if (key_provided) {
RTLLIB_DEBUG_WX(
"Setting key %d to default Tx key.\n", key);
- ieee->tx_keyidx = key;
+ ieee->crypt_info.tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
@@ -470,6 +469,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_encode);
int rtllib_wx_get_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -477,7 +477,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
RTLLIB_DEBUG_WX("GET_ENCODE\n");
@@ -486,13 +486,13 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
- if (key > WEP_KEYS)
+ if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
} else {
- key = ieee->tx_keyidx;
+ key = ieee->crypt_info.tx_keyidx;
}
- crypt = ieee->crypt[key];
+ crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
@@ -513,6 +513,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_encode);
int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -525,29 +526,29 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
int i, idx;
int group_key = 0;
const char *alg, *module;
- struct rtllib_crypto_ops *ops;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypto_ops *ops;
+ struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
};
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
+ if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else{
- idx = ieee->tx_keyidx;
+ idx = ieee->crypt_info.tx_keyidx;
}
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- crypt = &ieee->crypt[idx];
+ crypt = &ieee->crypt_info.crypt[idx];
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
- crypt = &ieee->crypt[idx];
+ crypt = &ieee->crypt_info.crypt[idx];
else
return -EINVAL;
}
@@ -556,13 +557,13 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL)
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (ieee->crypt_info.crypt[i] != NULL)
break;
}
- if (i == WEP_KEYS) {
+ if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
@@ -573,15 +574,15 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
sec.enabled = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
- alg = "WEP";
+ alg = "R-WEP";
module = "rtllib_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
+ alg = "R-TKIP";
module = "rtllib_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
+ alg = "R-CCMP";
module = "rtllib_crypt_ccmp";
break;
default:
@@ -592,14 +593,14 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
}
printk(KERN_INFO "alg name:%s\n", alg);
- ops = rtllib_get_crypto_ops(alg);
+ ops = lib80211_get_crypto_ops(alg);
if (ops == NULL) {
char tempbuf[100];
memset(tempbuf, 0x00, 100);
sprintf(tempbuf, "%s", module);
request_module("%s", tempbuf);
- ops = rtllib_get_crypto_ops(alg);
+ ops = lib80211_get_crypto_ops(alg);
}
if (ops == NULL) {
RTLLIB_DEBUG_WX("%s: unknown crypto alg %d\n",
@@ -610,9 +611,9 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
}
if (*crypt == NULL || (*crypt)->ops != ops) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
@@ -641,7 +642,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
goto done;
}
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- ieee->tx_keyidx = idx;
+ ieee->crypt_info.tx_keyidx = idx;
sec.active_key = idx;
sec.flags |= SEC_ACTIVE_KEY;
}
@@ -674,6 +675,7 @@ done:
}
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_encode_ext);
int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -681,7 +683,7 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
{
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int idx, max_key_len;
max_key_len = encoding->length - sizeof(*ext);
@@ -690,18 +692,18 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
+ if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else {
- idx = ieee->tx_keyidx;
+ idx = ieee->crypt_info.tx_keyidx;
}
if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
(ext->alg != IW_ENCODE_ALG_WEP))
if (idx != 0 || (ieee->iw_mode != IW_MODE_INFRA))
return -EINVAL;
- crypt = ieee->crypt[idx];
+ crypt = ieee->crypt_info.crypt[idx];
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
@@ -711,11 +713,11 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
- if (strcmp(crypt->ops->name, "WEP") == 0)
+ if (strcmp(crypt->ops->name, "R-WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP"))
+ else if (strcmp(crypt->ops->name, "R-TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP"))
+ else if (strcmp(crypt->ops->name, "R-CCMP"))
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
@@ -778,6 +780,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_mlme);
int rtllib_wx_set_auth(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -830,6 +833,7 @@ int rtllib_wx_set_auth(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_auth);
int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
{
@@ -846,10 +850,9 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
ieee->wps_ie_len = (len < MAX_WZC_IE_LEN) ? (len) :
(MAX_WZC_IE_LEN);
- buf = kmalloc(ieee->wps_ie_len, GFP_KERNEL);
+ buf = kmemdup(ie, ieee->wps_ie_len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, ieee->wps_ie_len);
ieee->wps_ie = buf;
return 0;
}
@@ -860,10 +863,9 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
if (len) {
if (len != ie[1]+2)
return -EINVAL;
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
@@ -874,3 +876,4 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_gen_ie);
diff --git a/drivers/staging/rtl8192u/ieee80211/api.c b/drivers/staging/rtl8192u/ieee80211/api.c
deleted file mode 100644
index 5f46e50e586e..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/api.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
- *
- * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
- * and Nettle, by Niels M鰈ler.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include "kmap_types.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/rwsem.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-LIST_HEAD(crypto_alg_list);
-DECLARE_RWSEM(crypto_alg_sem);
-
-static inline int crypto_alg_get(struct crypto_alg *alg)
-{
- return try_inc_mod_count(alg->cra_module);
-}
-
-static inline void crypto_alg_put(struct crypto_alg *alg)
-{
- if (alg->cra_module)
- __MOD_DEC_USE_COUNT(alg->cra_module);
-}
-
-struct crypto_alg *crypto_alg_lookup(const char *name)
-{
- struct crypto_alg *q, *alg = NULL;
-
- if (!name)
- return NULL;
-
- down_read(&crypto_alg_sem);
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, name))) {
- if (crypto_alg_get(q))
- alg = q;
- break;
- }
- }
-
- up_read(&crypto_alg_sem);
- return alg;
-}
-
-static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
-{
- tfm->crt_flags = 0;
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_flags(tfm, flags);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
-}
-
-static int crypto_init_ops(struct crypto_tfm *tfm)
-{
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_ops(tfm);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_ops(tfm);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
-}
-
-static void crypto_exit_ops(struct crypto_tfm *tfm)
-{
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- crypto_exit_cipher_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- crypto_exit_digest_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- crypto_exit_compress_ops(tfm);
- break;
-
- default:
- BUG();
-
- }
-}
-
-struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
-{
- struct crypto_tfm *tfm = NULL;
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name);
- if (alg == NULL)
- goto out;
-
- tfm = kzalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL);
- if (tfm == NULL)
- goto out_put;
-
- tfm->__crt_alg = alg;
-
- if (crypto_init_flags(tfm, flags))
- goto out_free_tfm;
-
- if (crypto_init_ops(tfm)) {
- crypto_exit_ops(tfm);
- goto out_free_tfm;
- }
-
- goto out;
-
-out_free_tfm:
- kfree(tfm);
- tfm = NULL;
-out_put:
- crypto_alg_put(alg);
-out:
- return tfm;
-}
-
-void crypto_free_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- int size = sizeof(*tfm) + alg->cra_ctxsize;
-
- crypto_exit_ops(tfm);
- crypto_alg_put(alg);
- memset(tfm, 0, size);
- kfree(tfm);
-}
-
-int crypto_register_alg(struct crypto_alg *alg)
-{
- int ret = 0;
- struct crypto_alg *q;
-
- down_write(&crypto_alg_sem);
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, alg->cra_name))) {
- ret = -EEXIST;
- goto out;
- }
- }
-
- list_add_tail(&alg->cra_list, &crypto_alg_list);
-out:
- up_write(&crypto_alg_sem);
- return ret;
-}
-
-int crypto_unregister_alg(struct crypto_alg *alg)
-{
- int ret = -ENOENT;
- struct crypto_alg *q;
-
- BUG_ON(!alg->cra_module);
-
- down_write(&crypto_alg_sem);
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (alg == q) {
- list_del(&alg->cra_list);
- ret = 0;
- goto out;
- }
- }
-out:
- up_write(&crypto_alg_sem);
- return ret;
-}
-
-int crypto_alg_available(const char *name, u32 flags)
-{
- int ret = 0;
- struct crypto_alg *alg = crypto_alg_mod_lookup(name);
-
- if (alg) {
- crypto_alg_put(alg);
- ret = 1;
- }
-
- return ret;
-}
-
-static int __init init_crypto(void)
-{
- printk(KERN_INFO "Initializing Cryptographic API\n");
- crypto_init_proc();
- return 0;
-}
-
-__initcall(init_crypto);
-
-/*
-EXPORT_SYMBOL_GPL(crypto_register_alg);
-EXPORT_SYMBOL_GPL(crypto_unregister_alg);
-EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-EXPORT_SYMBOL_GPL(crypto_free_tfm);
-EXPORT_SYMBOL_GPL(crypto_alg_available);
-*/
-
-EXPORT_SYMBOL_NOVERS(crypto_register_alg);
-EXPORT_SYMBOL_NOVERS(crypto_unregister_alg);
-EXPORT_SYMBOL_NOVERS(crypto_alloc_tfm);
-EXPORT_SYMBOL_NOVERS(crypto_free_tfm);
-EXPORT_SYMBOL_NOVERS(crypto_alg_available);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index ef8eb6c7ee41..4277d0304b7a 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -551,7 +551,7 @@ void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf)
ibss_wlan = r8712_find_network(
&pmlmepriv->scanned_queue,
pnetwork->MacAddress);
- if (!ibss_wlan) {
+ if (ibss_wlan) {
memcpy(ibss_wlan->network.IEs,
pnetwork->IEs, 8);
goto exit;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056c..5385da2e9cdb 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts5139/rts51x.h b/drivers/staging/rts5139/rts51x.h
index 9415d5c05502..b2c58390bfc5 100644
--- a/drivers/staging/rts5139/rts51x.h
+++ b/drivers/staging/rts5139/rts51x.h
@@ -34,7 +34,6 @@
#include <linux/mutex.h>
#include <linux/cdrom.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index f7aa87f7f1a9..8464c4836d5b 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -28,7 +28,6 @@
#define __RTS51X_TRANSPORT_H
#include <linux/kernel.h>
-#include <linux/version.h>
#include "rts51x.h"
#include "rts51x_chip.h"
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 480b0ed2e4de..a7feb3e328a0 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
@@ -1021,6 +1019,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 8ac3faea2d2f..6b3d156d4140 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -1235,7 +1235,7 @@ static void sep_build_lli_table(struct sep_device *sep,
/* Counter of lli array entry */
u32 array_counter;
- /* Init currrent table data size and lli array entry counter */
+ /* Init current table data size and lli array entry counter */
curr_table_data_size = 0;
array_counter = 0;
*num_table_entries_ptr = 1;
@@ -2120,6 +2120,8 @@ static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
}
}
if (tail_size) {
+ if (tail_size > sizeof(dcb_table_ptr->tail_data))
+ return -EINVAL;
if (is_kva == true) {
memcpy(dcb_table_ptr->tail_data,
(void *)(app_in_address + data_in_size -
diff --git a/drivers/staging/serial/68360serial.c b/drivers/staging/serial/68360serial.c
index 0a3e8787ed50..daf0b1d0dc28 100644
--- a/drivers/staging/serial/68360serial.c
+++ b/drivers/staging/serial/68360serial.c
@@ -2771,8 +2771,8 @@ static int __init rs_360_init(void)
*/
/* cpm_install_handler(IRQ_MACHSPEC | state->irq, rs_360_interrupt, info); */
/*request_irq(IRQ_MACHSPEC | state->irq, rs_360_interrupt, */
- request_irq(state->irq, rs_360_interrupt,
- IRQ_FLG_LOCK, "ttyS", (void *)info);
+ request_irq(state->irq, rs_360_interrupt, 0, "ttyS",
+ (void *)info);
/* Set up the baud rate generator.
*/
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index c44e41af2880..1c5780f1571b 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -16,7 +16,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "v2.14"
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 39dbf339a4fc..ae0035f327e7 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -1024,9 +1024,9 @@ failed_free:
/* Jason (08/11/2009) PCI_DRV wrapper essential structs */
static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
- {0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(0x126f, 0x710), },
+ { PCI_DEVICE(0x126f, 0x712), },
+ { PCI_DEVICE(0x126f, 0x720), },
{0,}
};
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 07a7f5432597..2093896c546b 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -265,12 +265,11 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
unsigned long flags;
spk_lock(flags);
- in_buff = kmalloc(count + 1, GFP_ATOMIC);
+ in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
spk_unlock(flags);
return -ENOMEM;
}
- memcpy(in_buff, buf, count + 1);
if (strchr("dDrR", *in_buff)) {
set_key_info(key_defaults, key_buf);
pr_info("keymap set to default values\n");
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 8be560458977..c7b03f0ef2dd 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2268,8 +2268,6 @@ static int __init speakup_init(void)
set_mask_bits(0, i, 2);
set_key_info(key_defaults, key_buf);
- if (quiet_boot)
- spk_shut_up |= 0x01;
/* From here on out, initializations can fail. */
err = speakup_add_virtual_keyboard();
@@ -2292,6 +2290,9 @@ static int __init speakup_init(void)
goto error_kobjects;
}
+ if (quiet_boot)
+ spk_shut_up |= 0x01;
+
err = speakup_kobj_init();
if (err)
goto error_kobjects;
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index 412b87947f66..e66579e6147a 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -116,7 +116,7 @@ extern int bleep_time, bell_pos;
extern int spell_delay, key_echo;
extern short punc_mask;
extern short pitch_shift, synth_flags;
-extern int quiet_boot;
+extern bool quiet_boot;
extern char *synth_name;
extern struct bleep unprocessed_sound;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index c241074a4b5e..2222d6919ef5 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -22,7 +22,7 @@ static struct spk_synth *synths[MAXSYNTHS];
struct spk_synth *synth;
char pitch_buff[32] = "";
static int module_status;
-int quiet_boot;
+bool quiet_boot;
struct speakup_info_t speakup_info = {
.spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock),
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
deleted file mode 100644
index 4fc206484830..000000000000
--- a/drivers/staging/spectra/Kconfig
+++ /dev/null
@@ -1,41 +0,0 @@
-
-menuconfig SPECTRA
- tristate "Denali Spectra Flash Translation Layer"
- depends on BLOCK
- depends on X86_MRST
- default n
- ---help---
- Enable the FTL pseudo-filesystem used with the NAND Flash
- controller on Intel Moorestown Platform to pretend to be a disk.
-
-choice
- prompt "Compile for"
- depends on SPECTRA
- default SPECTRA_MRST_HW
-
-config SPECTRA_MRST_HW
- bool "Moorestown hardware mode"
- help
- Driver communicates with the Moorestown hardware's register interface.
- in DMA mode.
-
-config SPECTRA_MTD
- bool "Linux MTD mode"
- depends on MTD
- help
- Driver communicates with the kernel MTD subsystem instead of its own
- built-in hardware driver.
-
-config SPECTRA_EMU
- bool "RAM emulator testing"
- help
- Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
-
-endchoice
-
-config SPECTRA_MRST_HW_DMA
- bool
- default n
- depends on SPECTRA_MRST_HW
- help
- Use DMA for native hardware interface.
diff --git a/drivers/staging/spectra/Makefile b/drivers/staging/spectra/Makefile
deleted file mode 100644
index f777dfba05a5..000000000000
--- a/drivers/staging/spectra/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile of Intel Moorestown NAND controller driver
-#
-
-obj-$(CONFIG_SPECTRA) += spectra.o
-spectra-y := ffsport.o flash.o lld.o
-spectra-$(CONFIG_SPECTRA_MRST_HW) += lld_nand.o
-spectra-$(CONFIG_SPECTRA_MRST_HW_DMA) += lld_cdma.o
-spectra-$(CONFIG_SPECTRA_EMU) += lld_emu.o
-spectra-$(CONFIG_SPECTRA_MTD) += lld_mtd.o
-
diff --git a/drivers/staging/spectra/README b/drivers/staging/spectra/README
deleted file mode 100644
index ecba559b899c..000000000000
--- a/drivers/staging/spectra/README
+++ /dev/null
@@ -1,29 +0,0 @@
-This is a driver for NAND controller of Intel Moorestown platform.
-
-This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
-It includes three layer:
- block layer interface - file ffsport.c
- Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
- Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
-
-This driver can be build as modules or build-in.
-
-Dependency:
-This driver has dependency on IA Firmware of Intel Moorestown platform.
-It need the IA Firmware to create the block table for the first time.
-And to validate this driver code without IA Firmware, you can change the
-macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
-driver will erase the whole nand flash and create a new block table.
-
-TODO:
- - Enable Command DMA feature support
- - lower the memory footprint
- - Remove most of the unnecessary global variables
- - Change all the upcase variable / functions name to lowercase
- - Some other misc bugs
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
-
-And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
-
diff --git a/drivers/staging/spectra/ffsdefs.h b/drivers/staging/spectra/ffsdefs.h
deleted file mode 100644
index a9e9cd233d2a..000000000000
--- a/drivers/staging/spectra/ffsdefs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FFSDEFS_
-#define _FFSDEFS_
-
-#define CLEAR 0 /*use this to clear a field instead of "fail"*/
-#define SET 1 /*use this to set a field instead of "pass"*/
-#define FAIL 1 /*failed flag*/
-#define PASS 0 /*success flag*/
-#define ERR -1 /*error flag*/
-
-#define ERASE_CMD 10
-#define WRITE_MAIN_CMD 11
-#define READ_MAIN_CMD 12
-#define WRITE_SPARE_CMD 13
-#define READ_SPARE_CMD 14
-#define WRITE_MAIN_SPARE_CMD 15
-#define READ_MAIN_SPARE_CMD 16
-#define MEMCOPY_CMD 17
-#define DUMMY_CMD 99
-
-#define EVENT_PASS 0x00
-#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
-#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
-#define EVENT_TIME_OUT 0x03
-#define EVENT_PROGRAM_FAILURE 0x04
-#define EVENT_ERASE_FAILURE 0x05
-#define EVENT_MEMCOPY_FAILURE 0x06
-#define EVENT_FAIL 0x07
-
-#define EVENT_NONE 0x22
-#define EVENT_DMA_CMD_COMP 0x77
-#define EVENT_ECC_TRANSACTION_DONE 0x88
-#define EVENT_DMA_CMD_FAIL 0x99
-
-#define CMD_PASS 0
-#define CMD_FAIL 1
-#define CMD_ABORT 2
-#define CMD_NOT_DONE 3
-
-#endif /* _FFSDEFS_ */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
deleted file mode 100644
index 86d556d6cf98..000000000000
--- a/drivers/staging/spectra/ffsport.c
+++ /dev/null
@@ -1,834 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "ffsport.h"
-#include "flash.h"
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
-#include <linux/log2.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/async.h>
-
-/**** Helper functions used for Div, Remainder operation on u64 ****/
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_Calc_Used_Bits
-* Inputs: Power of 2 number
-* Outputs: Number of Used Bits
-* 0, if the argument is 0
-* Description: Calculate the number of bits used by a given power of 2 number
-* Number can be up to 32 bit
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_Calc_Used_Bits(u32 n)
-{
- int tot_bits = 0;
-
- if (n >= 1 << 16) {
- n >>= 16;
- tot_bits += 16;
- }
-
- if (n >= 1 << 8) {
- n >>= 8;
- tot_bits += 8;
- }
-
- if (n >= 1 << 4) {
- n >>= 4;
- tot_bits += 4;
- }
-
- if (n >= 1 << 2) {
- n >>= 2;
- tot_bits += 2;
- }
-
- if (n >= 1 << 1)
- tot_bits += 1;
-
- return ((n == 0) ? (0) : tot_bits);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_u64_Div
-* Inputs: Number of u64
-* A power of 2 number as Division
-* Outputs: Quotient of the Divisor operation
-* Description: It divides the address by divisor by using bit shift operation
-* (essentially without explicitely using "/").
-* Divisor is a power of 2 number and Divided is of u64
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u64 GLOB_u64_Div(u64 addr, u32 divisor)
-{
- return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_u64_Remainder
-* Inputs: Number of u64
-* Divisor Type (1 -PageAddress, 2- BlockAddress)
-* Outputs: Remainder of the Division operation
-* Description: It calculates the remainder of a number (of u64) by
-* divisor(power of 2 number ) by using bit shifting and multiply
-* operation(essentially without explicitely using "/").
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
-{
- u64 result = 0;
-
- if (divisor_type == 1) { /* Remainder -- Page */
- result = (addr >> DeviceInfo.nBitsInPageDataSize);
- result = result * DeviceInfo.wPageDataSize;
- } else if (divisor_type == 2) { /* Remainder -- Block */
- result = (addr >> DeviceInfo.nBitsInBlockDataSize);
- result = result * DeviceInfo.wBlockDataSize;
- }
-
- result = addr - result;
-
- return result;
-}
-
-#define NUM_DEVICES 1
-#define PARTITIONS 8
-
-#define GLOB_SBD_NAME "nd"
-#define GLOB_SBD_IRQ_NUM (29)
-
-#define GLOB_SBD_IOCTL_GC (0x7701)
-#define GLOB_SBD_IOCTL_WL (0x7702)
-#define GLOB_SBD_IOCTL_FORMAT (0x7703)
-#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
-#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
-#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
-#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
-#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
-#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
-#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
-
-static int reserved_mb = 0;
-module_param(reserved_mb, int, 0);
-MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
-
-int nand_debug_level;
-module_param(nand_debug_level, int, 0644);
-MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
-
-MODULE_LICENSE("GPL");
-
-struct spectra_nand_dev {
- struct pci_dev *dev;
- u64 size;
- u16 users;
- spinlock_t qlock;
- void __iomem *ioaddr; /* Mapped address */
- struct request_queue *queue;
- struct task_struct *thread;
- struct gendisk *gd;
- u8 *tmp_buf;
-};
-
-
-static int GLOB_SBD_majornum;
-
-static char *GLOB_version = GLOB_VERSION;
-
-static struct spectra_nand_dev nand_device[NUM_DEVICES];
-
-static struct mutex spectra_lock;
-
-static int res_blks_os = 1;
-
-struct spectra_indentfy_dev_tag IdentifyDeviceData;
-
-static int force_flush_cache(void)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (ERR == GLOB_FTL_Flush_Cache()) {
- printk(KERN_ERR "Fail to Flush FTL Cache!\n");
- return -EFAULT;
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-}
-
-struct ioctl_rw_page_info {
- u8 *data;
- unsigned int page;
-};
-
-static int ioctl_read_page_data(unsigned long arg)
-{
- u8 *buf;
- struct ioctl_rw_page_info info;
- int result = PASS;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!buf) {
- printk(KERN_ERR "ioctl_read_page_data: "
- "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- mutex_lock(&spectra_lock);
- result = GLOB_FTL_Page_Read(buf,
- (u64)info.page * IdentifyDeviceData.PageDataSize);
- mutex_unlock(&spectra_lock);
-
- if (copy_to_user((void __user *)info.data, buf,
- IdentifyDeviceData.PageDataSize)) {
- printk(KERN_ERR "ioctl_read_page_data: "
- "failed to copy user data\n");
- kfree(buf);
- return -EFAULT;
- }
-
- kfree(buf);
- return result;
-}
-
-static int ioctl_write_page_data(unsigned long arg)
-{
- u8 *buf;
- struct ioctl_rw_page_info info;
- int result = PASS;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- buf = memdup_user((void __user *)info.data,
- IdentifyDeviceData.PageDataSize);
- if (IS_ERR(buf)) {
- printk(KERN_ERR "ioctl_write_page_data: "
- "failed to copy user data\n");
- return PTR_ERR(buf);
- }
-
- mutex_lock(&spectra_lock);
- result = GLOB_FTL_Page_Write(buf,
- (u64)info.page * IdentifyDeviceData.PageDataSize);
- mutex_unlock(&spectra_lock);
-
- kfree(buf);
- return result;
-}
-
-/* Return how many blocks should be reserved for bad block replacement */
-static int get_res_blk_num_bad_blk(void)
-{
- return IdentifyDeviceData.wDataBlockNum / 10;
-}
-
-/* Return how many blocks should be reserved for OS image */
-static int get_res_blk_num_os(void)
-{
- u32 res_blks, blk_size;
-
- blk_size = IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock;
-
- res_blks = (reserved_mb * 1024 * 1024) / blk_size;
-
- if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
- res_blks = 1; /* Reserved 1 block for block table */
-
- return res_blks;
-}
-
-/* Transfer a full request. */
-static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
-{
- u64 start_addr, addr;
- u32 logical_start_sect, hd_start_sect;
- u32 nsect, hd_sects;
- u32 rsect, tsect = 0;
- char *buf;
- u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
-
- start_addr = (u64)(blk_rq_pos(req)) << 9;
- /* Add a big enough offset to prevent the OS Image from
- * being accessed or damaged by file system */
- start_addr += IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock *
- res_blks_os;
-
- if (req->cmd_type & REQ_FLUSH) {
- if (force_flush_cache()) /* Fail to flush cache */
- return -EIO;
- else
- return 0;
- }
-
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
- printk(KERN_ERR "Spectra error: request over the NAND "
- "capacity!sector %d, current_nr_sectors %d, "
- "while capacity is %d\n",
- (int)blk_rq_pos(req),
- blk_rq_cur_sectors(req),
- (int)get_capacity(tr->gd));
- return -EIO;
- }
-
- logical_start_sect = start_addr >> 9;
- hd_start_sect = logical_start_sect / ratio;
- rsect = logical_start_sect - hd_start_sect * ratio;
-
- addr = (u64)hd_start_sect * ratio * 512;
- buf = req->buffer;
- nsect = blk_rq_cur_sectors(req);
-
- if (rsect)
- tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
-
- switch (rq_data_dir(req)) {
- case READ:
- /* Read the first NAND page */
- if (rsect) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
- addr += IdentifyDeviceData.PageDataSize;
- buf += tsect << 9;
- nsect -= tsect;
- }
-
- /* Read the other NAND pages */
- for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
- if (GLOB_FTL_Page_Read(buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += IdentifyDeviceData.PageDataSize;
- }
-
- /* Read the last NAND pages */
- if (nsect % ratio) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-
- case WRITE:
- /* Write the first NAND page */
- if (rsect) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
- if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += tsect << 9;
- nsect -= tsect;
- }
-
- /* Write the other NAND pages */
- for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
- if (GLOB_FTL_Page_Write(buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += IdentifyDeviceData.PageDataSize;
- }
-
- /* Write the last NAND pages */
- if (nsect % ratio) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
- if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-
- default:
- printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return -EIO;
- }
-}
-
-/* This function is copied from drivers/mtd/mtd_blkdevs.c */
-static int spectra_trans_thread(void *arg)
-{
- struct spectra_nand_dev *tr = arg;
- struct request_queue *rq = tr->queue;
- struct request *req = NULL;
-
- /* we might get involved when memory gets low, so use PF_MEMALLOC */
- current->flags |= PF_MEMALLOC;
-
- spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
- int res;
-
- if (!req) {
- req = blk_fetch_request(rq);
- if (!req) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
- }
- }
-
- spin_unlock_irq(rq->queue_lock);
-
- mutex_lock(&spectra_lock);
- res = do_transfer(tr, req);
- mutex_unlock(&spectra_lock);
-
- spin_lock_irq(rq->queue_lock);
-
- if (!__blk_end_request_cur(req, res))
- req = NULL;
- }
-
- if (req)
- __blk_end_request_all(req, -EIO);
-
- spin_unlock_irq(rq->queue_lock);
-
- return 0;
-}
-
-
-/* Request function that "handles clustering". */
-static void GLOB_SBD_request(struct request_queue *rq)
-{
- struct spectra_nand_dev *pdev = rq->queuedata;
- wake_up_process(pdev->thread);
-}
-
-static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
-
-{
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- return 0;
-}
-
-static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
-{
- int ret;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- mutex_lock(&spectra_lock);
- ret = force_flush_cache();
- mutex_unlock(&spectra_lock);
-
- return 0;
-}
-
-static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- geo->heads = 4;
- geo->sectors = 16;
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "heads: %d, sectors: %d, cylinders: %d\n",
- geo->heads, geo->sectors, geo->cylinders);
-
- return 0;
-}
-
-int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- switch (cmd) {
- case GLOB_SBD_IOCTL_GC:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra IOCTL: Garbage Collection "
- "being performed\n");
- if (PASS != GLOB_FTL_Garbage_Collection())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_WL:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra IOCTL: Static Wear Leveling "
- "being performed\n");
- if (PASS != GLOB_FTL_Wear_Leveling())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_FORMAT:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
- "being performed\n");
- if (PASS != GLOB_FTL_Flash_Format())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_FLUSH_CACHE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
- "being performed\n");
- mutex_lock(&spectra_lock);
- ret = force_flush_cache();
- mutex_unlock(&spectra_lock);
- return ret;
-
- case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Copy block table\n");
- if (copy_to_user((void __user *)arg,
- get_blk_table_start_addr(),
- get_blk_table_len()))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Copy wear leveling table\n");
- if (copy_to_user((void __user *)arg,
- get_wear_leveling_table_start_addr(),
- get_wear_leveling_table_len()))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_GET_NAND_INFO:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Get NAND info\n");
- if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
- sizeof(IdentifyDeviceData)))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_WRITE_DATA:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Write one page data\n");
- return ioctl_write_page_data(arg);
-
- case GLOB_SBD_IOCTL_READ_DATA:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Read one page data\n");
- return ioctl_read_page_data(arg);
- }
-
- return -ENOTTY;
-}
-
-static DEFINE_MUTEX(ffsport_mutex);
-
-int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ffsport_mutex);
- ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ffsport_mutex);
-
- return ret;
-}
-
-static struct block_device_operations GLOB_SBD_ops = {
- .owner = THIS_MODULE,
- .open = GLOB_SBD_open,
- .release = GLOB_SBD_release,
- .ioctl = GLOB_SBD_unlocked_ioctl,
- .getgeo = GLOB_SBD_getgeo,
-};
-
-static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
-{
- int res_blks;
- u32 sects;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- memset(dev, 0, sizeof(struct spectra_nand_dev));
-
- nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
- "for OS image, %d blocks for bad block replacement.\n",
- get_res_blk_num_os(),
- get_res_blk_num_bad_blk());
-
- res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
-
- dev->size = (u64)IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock *
- (IdentifyDeviceData.wDataBlockNum - res_blks);
-
- res_blks_os = get_res_blk_num_os();
-
- spin_lock_init(&dev->qlock);
-
- dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!dev->tmp_buf) {
- printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
- __FILE__, __LINE__);
- goto out_vfree;
- }
-
- dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
- if (dev->queue == NULL) {
- printk(KERN_ERR
- "Spectra: Request queue could not be initialized."
- " Aborting\n ");
- goto out_vfree;
- }
- dev->queue->queuedata = dev;
-
- /* As Linux block layer doesn't support >4KB hardware sector, */
- /* Here we force report 512 byte hardware sector size to Kernel */
- blk_queue_logical_block_size(dev->queue, 512);
-
- blk_queue_flush(dev->queue, REQ_FLUSH);
-
- dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
- if (IS_ERR(dev->thread)) {
- blk_cleanup_queue(dev->queue);
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
- return PTR_ERR(dev->thread);
- }
-
- dev->gd = alloc_disk(PARTITIONS);
- if (!dev->gd) {
- printk(KERN_ERR
- "Spectra: Could not allocate disk. Aborting \n ");
- goto out_vfree;
- }
- dev->gd->major = GLOB_SBD_majornum;
- dev->gd->first_minor = which * PARTITIONS;
- dev->gd->fops = &GLOB_SBD_ops;
- dev->gd->queue = dev->queue;
- dev->gd->private_data = dev;
- snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
-
- sects = dev->size >> 9;
- nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
- set_capacity(dev->gd, sects);
-
- add_disk(dev->gd);
-
- return 0;
-out_vfree:
- return -ENOMEM;
-}
-
-/*
-static ssize_t show_nand_block_num(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.wDataBlockNum);
-}
-
-static ssize_t show_nand_pages_per_block(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.PagesPerBlock);
-}
-
-static ssize_t show_nand_page_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.PageDataSize);
-}
-
-static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
-static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
-static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
-
-static void create_sysfs_entry(struct device *dev)
-{
- if (device_create_file(dev, &dev_attr_nand_block_num))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_block_num.\n");
- if (device_create_file(dev, &dev_attr_nand_pages_per_block))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_pages_per_block.\n");
- if (device_create_file(dev, &dev_attr_nand_page_size))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_page_size.\n");
-}
-*/
-
-static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
-{
- int i;
-
- /* create_sysfs_entry(&dev->dev); */
-
- if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
- printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
- "Aborting\n");
- return;
- } else {
- nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
- "Num blocks=%d, pagesperblock=%d, "
- "pagedatasize=%d, ECCBytesPerSector=%d\n",
- (int)IdentifyDeviceData.NumBlocks,
- (int)IdentifyDeviceData.PagesPerBlock,
- (int)IdentifyDeviceData.PageDataSize,
- (int)IdentifyDeviceData.wECCBytesPerSector);
- }
-
- printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
- if (GLOB_FTL_Init() != PASS) {
- printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
- "Aborting\n");
- goto out_ftl_flash_register;
- }
- printk(KERN_ALERT "Spectra: block table has been found.\n");
-
- GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
- if (GLOB_SBD_majornum <= 0) {
- printk(KERN_ERR "Unable to get the major %d for Spectra",
- GLOB_SBD_majornum);
- goto out_ftl_flash_register;
- }
-
- for (i = 0; i < NUM_DEVICES; i++)
- if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
- goto out_blk_register;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra: module loaded with major number %d\n",
- GLOB_SBD_majornum);
-
- return;
-
-out_blk_register:
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-out_ftl_flash_register:
- GLOB_FTL_Cache_Release();
- printk(KERN_ERR "Spectra: Module load failed.\n");
-}
-
-int register_spectra_ftl()
-{
- async_schedule(register_spectra_ftl_async, NULL);
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_spectra_ftl);
-
-static int GLOB_SBD_init(void)
-{
- /* Set debug output level (0~3) here. 3 is most verbose */
- printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
-
- mutex_init(&spectra_lock);
-
- if (PASS != GLOB_FTL_Flash_Init()) {
- printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
- "Aborting\n");
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit GLOB_SBD_exit(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < NUM_DEVICES; i++) {
- struct spectra_nand_dev *dev = &nand_device[i];
- if (dev->gd) {
- del_gendisk(dev->gd);
- put_disk(dev->gd);
- }
- if (dev->queue)
- blk_cleanup_queue(dev->queue);
- kfree(dev->tmp_buf);
- }
-
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-
- mutex_lock(&spectra_lock);
- force_flush_cache();
- mutex_unlock(&spectra_lock);
-
- GLOB_FTL_Cache_Release();
-
- GLOB_FTL_Flash_Release();
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra FTL module (major number %d) unloaded.\n",
- GLOB_SBD_majornum);
-}
-
-module_init(GLOB_SBD_init);
-module_exit(GLOB_SBD_exit);
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
deleted file mode 100644
index 85c0750612f6..000000000000
--- a/drivers/staging/spectra/ffsport.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FFSPORT_
-#define _FFSPORT_
-
-#include "ffsdefs.h"
-
-#if defined __GNUC__
-#define PACKED
-#define PACKED_GNU __attribute__ ((packed))
-#define UNALIGNED
-#endif
-
-#include <linux/semaphore.h>
-#include <linux/string.h> /* for strcpy(), stricmp(), etc */
-#include <linux/mm.h> /* for kmalloc(), kfree() */
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h> /* printk() */
-#include <linux/fs.h> /* everything... */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/hdreg.h>
-#include <linux/pci.h>
-#include "flash.h"
-
-#define VERBOSE 1
-
-#define NAND_DBG_WARN 1
-#define NAND_DBG_DEBUG 2
-#define NAND_DBG_TRACE 3
-
-extern int nand_debug_level;
-
-#ifdef VERBOSE
-#define nand_dbg_print(level, args...) \
- do { \
- if (level <= nand_debug_level) \
- printk(KERN_ALERT args); \
- } while (0)
-#else
-#define nand_dbg_print(level, args...)
-#endif
-
-#ifdef SUPPORT_BIG_ENDIAN
-#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
- (u16)((u16)(w) >> 8))
-
-#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
- (((u32)(dw) << 8) & 0x00ff0000) | \
- (((u32)(dw) >> 8) & 0x0000ff00) | \
- ((u32)(dw) >> 24))
-#else
-#define INVERTUINT16(w) w
-#define INVERTUINT32(dw) dw
-#endif
-
-extern int GLOB_Calc_Used_Bits(u32 n);
-extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
-extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
-extern int register_spectra_ftl(void);
-
-#endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
deleted file mode 100644
index aead358e5c2a..000000000000
--- a/drivers/staging/spectra/flash.c
+++ /dev/null
@@ -1,4305 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld.h"
-#include "lld_nand.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-#endif
-
-#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
-#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
- DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
-
-#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
- BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
-
-#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
-
-#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
- BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
-
-#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
-
-#if DEBUG_BNDRY
-void debug_boundary_lineno_error(int chnl, int limit, int no,
- int lineno, char *filename)
-{
- if (chnl >= limit)
- printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
- "at %s:%d. Other info:%d. Aborting...\n",
- chnl, limit, filename, lineno, no);
-}
-/* static int globalmemsize; */
-#endif
-
-static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
-static int FTL_Cache_Read(u64 dwPageAddr);
-static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
- u16 cache_blk);
-static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
- u8 cache_blk, u16 flag);
-static int FTL_Cache_Write(void);
-static void FTL_Calculate_LRU(void);
-static u32 FTL_Get_Block_Index(u32 wBlockNum);
-
-static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
- u8 BT_Tag, u16 *Page);
-static int FTL_Read_Block_Table(void);
-static int FTL_Write_Block_Table(int wForce);
-static int FTL_Write_Block_Table_Data(void);
-static int FTL_Check_Block_Table(int wOldTable);
-static int FTL_Static_Wear_Leveling(void);
-static u32 FTL_Replace_Block_Table(void);
-static int FTL_Write_IN_Progress_Block_Table_Page(void);
-
-static u32 FTL_Get_Page_Num(u64 length);
-static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
-
-static u32 FTL_Replace_OneBlock(u32 wBlockNum,
- u32 wReplaceNum);
-static u32 FTL_Replace_LWBlock(u32 wBlockNum,
- int *pGarbageCollect);
-static u32 FTL_Replace_MWBlock(void);
-static int FTL_Replace_Block(u64 blk_addr);
-static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
-
-struct device_info_tag DeviceInfo;
-struct flash_cache_tag Cache;
-static struct spectra_l2_cache_info cache_l2;
-
-static u8 *cache_l2_page_buf;
-static u8 *cache_l2_blk_buf;
-
-u8 *g_pBlockTable;
-u8 *g_pWearCounter;
-u16 *g_pReadCounter;
-u32 *g_pBTBlocks;
-static u16 g_wBlockTableOffset;
-static u32 g_wBlockTableIndex;
-static u8 g_cBlockTableStatus;
-
-static u8 *g_pTempBuf;
-static u8 *flag_check_blk_table;
-static u8 *tmp_buf_search_bt_in_block;
-static u8 *spare_buf_search_bt_in_block;
-static u8 *spare_buf_bt_search_bt_in_block;
-static u8 *tmp_buf1_read_blk_table;
-static u8 *tmp_buf2_read_blk_table;
-static u8 *flags_static_wear_leveling;
-static u8 *tmp_buf_write_blk_table_data;
-static u8 *tmp_buf_read_disturbance;
-
-u8 *buf_read_page_main_spare;
-u8 *buf_write_page_main_spare;
-u8 *buf_read_page_spare;
-u8 *buf_get_bad_block;
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
-struct flash_cache_tag cache_start_copy;
-#endif
-
-int g_wNumFreeBlocks;
-u8 g_SBDCmdIndex;
-
-static u8 *g_pIPF;
-static u8 bt_flag = FIRST_BT_ID;
-static u8 bt_block_changed;
-
-static u16 cache_block_to_write;
-static u8 last_erased = FIRST_BT_ID;
-
-static u8 GC_Called;
-static u8 BT_GC_Called;
-
-#if CMD_DMA
-#define COPY_BACK_BUF_NUM 10
-
-static u8 ftl_cmd_cnt; /* Init value is 0 */
-u8 *g_pBTDelta;
-u8 *g_pBTDelta_Free;
-u8 *g_pBTStartingCopy;
-u8 *g_pWearCounterCopy;
-u16 *g_pReadCounterCopy;
-u8 *g_pBlockTableCopies;
-u8 *g_pNextBlockTable;
-static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
-static int cp_back_buf_idx;
-
-static u8 *g_temp_buf;
-
-#pragma pack(push, 1)
-#pragma pack(1)
-struct BTableChangesDelta {
- u8 ftl_cmd_cnt;
- u8 ValidFields;
- u16 g_wBlockTableOffset;
- u32 g_wBlockTableIndex;
- u32 BT_Index;
- u32 BT_Entry_Value;
- u32 WC_Index;
- u8 WC_Entry_Value;
- u32 RC_Index;
- u16 RC_Entry_Value;
-};
-
-#pragma pack(pop)
-
-struct BTableChangesDelta *p_BTableChangesDelta;
-#endif
-
-
-#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
-#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
-
-#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u32))
-#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u8))
-#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u16))
-#if SUPPORT_LARGE_BLOCKNUM
-#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u8) * 3)
-#else
-#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u16))
-#endif
-#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
- FTL_Get_WearCounter_Table_Mem_Size_Bytes
-#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
- FTL_Get_ReadCounter_Table_Mem_Size_Bytes
-
-static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
-{
- u32 byte_num;
-
- if (DeviceInfo.MLCDevice) {
- byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
- DeviceInfo.wDataBlockNum * sizeof(u8) +
- DeviceInfo.wDataBlockNum * sizeof(u16);
- } else {
- byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
- DeviceInfo.wDataBlockNum * sizeof(u8);
- }
-
- byte_num += 4 * sizeof(u8);
-
- return byte_num;
-}
-
-static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
-{
- return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
-}
-
-static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
- u32 sizeTxed)
-{
- u32 wBytesCopied, blk_tbl_size, wBytes;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
- for (wBytes = 0;
- (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
- wBytes++) {
-#if SUPPORT_LARGE_BLOCKNUM
- flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
- >> (((wBytes + sizeTxed) % 3) ?
- ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
-#else
- flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
- >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
-#endif
- }
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
- blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
- wBytesCopied = wBytes;
- wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
- (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
- memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-
- if (DeviceInfo.MLCDevice) {
- blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
- wBytesCopied += wBytes;
- for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
- flashBuf[wBytes + wBytesCopied] =
- (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
- (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
- }
-
- return wBytesCopied + wBytes;
-}
-
-static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
- u32 sizeToTx, u32 sizeTxed)
-{
- u32 wBytesCopied, blk_tbl_size, wBytes;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
- for (wBytes = 0; (wBytes < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
-#if SUPPORT_LARGE_BLOCKNUM
- if (!((wBytes + sizeTxed) % 3))
- pbt[(wBytes + sizeTxed) / 3] = 0;
- pbt[(wBytes + sizeTxed) / 3] |=
- (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
- ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
-#else
- if (!((wBytes + sizeTxed) % 2))
- pbt[(wBytes + sizeTxed) / 2] = 0;
- pbt[(wBytes + sizeTxed) / 2] |=
- (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
- 0 : 8));
-#endif
- }
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
- blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
- wBytesCopied = wBytes;
- wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
- (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
- memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-
- if (DeviceInfo.MLCDevice) {
- wBytesCopied += wBytes;
- blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
- for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
- if (((wBytes + sizeTxed) % 2))
- g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
- g_pReadCounter[(wBytes + sizeTxed) / 2] |=
- (flashBuf[wBytes] <<
- (((wBytes + sizeTxed) % 2) ? 0 : 8));
- }
- }
-
- return wBytesCopied+wBytes;
-}
-
-static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
-{
- int i;
-
- for (i = 0; i < BTSIG_BYTES; i++)
- buf[BTSIG_OFFSET + i] =
- ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
- (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
-
- return PASS;
-}
-
-static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
-{
- static u8 tag[BTSIG_BYTES >> 1];
- int i, j, k, tagi, tagtemp, status;
-
- *tagarray = (u8 *)tag;
- tagi = 0;
-
- for (i = 0; i < (BTSIG_BYTES - 1); i++) {
- for (j = i + 1; (j < BTSIG_BYTES) &&
- (tagi < (BTSIG_BYTES >> 1)); j++) {
- tagtemp = buf[BTSIG_OFFSET + j] -
- buf[BTSIG_OFFSET + i];
- if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
- tagtemp = (buf[BTSIG_OFFSET + i] +
- (1 + LAST_BT_ID - FIRST_BT_ID) -
- (i * BTSIG_DELTA)) %
- (1 + LAST_BT_ID - FIRST_BT_ID);
- status = FAIL;
- for (k = 0; k < tagi; k++) {
- if (tagtemp == tag[k])
- status = PASS;
- }
-
- if (status == FAIL) {
- tag[tagi++] = tagtemp;
- i = (j == (i + 1)) ? i + 1 : i;
- j = (j == (i + 1)) ? i + 1 : i;
- }
- }
- }
- }
-
- return tagi;
-}
-
-
-static int FTL_Execute_SPL_Recovery(void)
-{
- u32 j, block, blks;
- u32 *pbt = (u32 *)g_pBlockTable;
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
- for (j = 0; j <= blks; j++) {
- block = (pbt[j]);
- if (((block & BAD_BLOCK) != BAD_BLOCK) &&
- ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
- ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
- if (FAIL == ret) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)(block & ~BAD_BLOCK));
- MARK_BLOCK_AS_BAD(pbt[j]);
- }
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_IdentifyDevice
-* Inputs: pointer to identify data structure
-* Outputs: PASS / FAIL
-* Description: the identify data structure is filled in with
-* information for the block driver.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
- dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
- dev_data->PageDataSize = DeviceInfo.wPageDataSize;
- dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
- dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
-
- return PASS;
-}
-
-/* ..... */
-static int allocate_memory(void)
-{
- u32 block_table_size, page_size, block_size, mem_size;
- u32 total_bytes = 0;
- int i;
-#if CMD_DMA
- int j;
-#endif
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- page_size = DeviceInfo.wPageSize;
- block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-
- block_table_size = DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8) + sizeof(u16));
- block_table_size += (DeviceInfo.wPageDataSize -
- (block_table_size % DeviceInfo.wPageDataSize)) %
- DeviceInfo.wPageDataSize;
-
- /* Malloc memory for block tables */
- g_pBlockTable = kzalloc(block_table_size, GFP_ATOMIC);
- if (!g_pBlockTable)
- goto block_table_fail;
- total_bytes += block_table_size;
-
- g_pWearCounter = (u8 *)(g_pBlockTable +
- DeviceInfo.wDataBlockNum * sizeof(u32));
-
- if (DeviceInfo.MLCDevice)
- g_pReadCounter = (u16 *)(g_pBlockTable +
- DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8)));
-
- /* Malloc memory and init for cache items */
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- Cache.array[i].address = NAND_CACHE_INIT_ADDR;
- Cache.array[i].use_cnt = 0;
- Cache.array[i].changed = CLEAR;
- Cache.array[i].buf = kzalloc(Cache.cache_item_size,
- GFP_ATOMIC);
- if (!Cache.array[i].buf)
- goto cache_item_fail;
- total_bytes += Cache.cache_item_size;
- }
-
- /* Malloc memory for IPF */
- g_pIPF = kzalloc(page_size, GFP_ATOMIC);
- if (!g_pIPF)
- goto ipf_fail;
- total_bytes += page_size;
-
- /* Malloc memory for data merging during Level2 Cache flush */
- cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
- if (!cache_l2_page_buf)
- goto cache_l2_page_buf_fail;
- memset(cache_l2_page_buf, 0xff, page_size);
- total_bytes += page_size;
-
- cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
- if (!cache_l2_blk_buf)
- goto cache_l2_blk_buf_fail;
- memset(cache_l2_blk_buf, 0xff, block_size);
- total_bytes += block_size;
-
- /* Malloc memory for temp buffer */
- g_pTempBuf = kzalloc(Cache.cache_item_size, GFP_ATOMIC);
- if (!g_pTempBuf)
- goto Temp_buf_fail;
- total_bytes += Cache.cache_item_size;
-
- /* Malloc memory for block table blocks */
- mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
- g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
- if (!g_pBTBlocks)
- goto bt_blocks_fail;
- memset(g_pBTBlocks, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Check_Block_Table */
- flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
- if (!flag_check_blk_table)
- goto flag_check_blk_table_fail;
- total_bytes += DeviceInfo.wDataBlockNum;
-
- /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
- tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf_search_bt_in_block)
- goto tmp_buf_search_bt_in_block_fail;
- memset(tmp_buf_search_bt_in_block, 0xff, page_size);
- total_bytes += page_size;
-
- mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
- spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
- if (!spare_buf_search_bt_in_block)
- goto spare_buf_search_bt_in_block_fail;
- memset(spare_buf_search_bt_in_block, 0xff, mem_size);
- total_bytes += mem_size;
-
- spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
- if (!spare_buf_bt_search_bt_in_block)
- goto spare_buf_bt_search_bt_in_block_fail;
- memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Read_Block_Table */
- tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf1_read_blk_table)
- goto tmp_buf1_read_blk_table_fail;
- memset(tmp_buf1_read_blk_table, 0xff, page_size);
- total_bytes += page_size;
-
- tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf2_read_blk_table)
- goto tmp_buf2_read_blk_table_fail;
- memset(tmp_buf2_read_blk_table, 0xff, page_size);
- total_bytes += page_size;
-
- /* Malloc memory for function FTL_Static_Wear_Leveling */
- flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
- GFP_ATOMIC);
- if (!flags_static_wear_leveling)
- goto flags_static_wear_leveling_fail;
- total_bytes += DeviceInfo.wDataBlockNum;
-
- /* Malloc memory for function FTL_Write_Block_Table_Data */
- if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
- mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
- 2 * DeviceInfo.wPageSize;
- else
- mem_size = DeviceInfo.wPageSize;
- tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
- if (!tmp_buf_write_blk_table_data)
- goto tmp_buf_write_blk_table_data_fail;
- memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Read_Disturbance */
- tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
- if (!tmp_buf_read_disturbance)
- goto tmp_buf_read_disturbance_fail;
- memset(tmp_buf_read_disturbance, 0xff, block_size);
- total_bytes += block_size;
-
- /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
- buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
- if (!buf_read_page_main_spare)
- goto buf_read_page_main_spare_fail;
- total_bytes += DeviceInfo.wPageSize;
-
- /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
- buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
- if (!buf_write_page_main_spare)
- goto buf_write_page_main_spare_fail;
- total_bytes += DeviceInfo.wPageSize;
-
- /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
- buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
- if (!buf_read_page_spare)
- goto buf_read_page_spare_fail;
- memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
- total_bytes += DeviceInfo.wPageSpareSize;
-
- /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
- buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
- if (!buf_get_bad_block)
- goto buf_get_bad_block_fail;
- memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
- total_bytes += DeviceInfo.wPageSpareSize;
-
-#if CMD_DMA
- g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
- if (!g_temp_buf)
- goto temp_buf_fail;
- memset(g_temp_buf, 0xff, block_size);
- total_bytes += block_size;
-
- /* Malloc memory for copy of block table used in CDMA mode */
- g_pBTStartingCopy = kzalloc(block_table_size, GFP_ATOMIC);
- if (!g_pBTStartingCopy)
- goto bt_starting_copy;
- total_bytes += block_table_size;
-
- g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
- DeviceInfo.wDataBlockNum * sizeof(u32));
-
- if (DeviceInfo.MLCDevice)
- g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
- DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8)));
-
- /* Malloc memory for block table copies */
- mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
- 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
- if (DeviceInfo.MLCDevice)
- mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
- g_pBlockTableCopies = kzalloc(mem_size, GFP_ATOMIC);
- if (!g_pBlockTableCopies)
- goto blk_table_copies_fail;
- total_bytes += mem_size;
- g_pNextBlockTable = g_pBlockTableCopies;
-
- /* Malloc memory for Block Table Delta */
- mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
- g_pBTDelta = kzalloc(mem_size, GFP_ATOMIC);
- if (!g_pBTDelta)
- goto bt_delta_fail;
- total_bytes += mem_size;
- g_pBTDelta_Free = g_pBTDelta;
-
- /* Malloc memory for Copy Back Buffers */
- for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
- cp_back_buf_copies[j] = kzalloc(block_size, GFP_ATOMIC);
- if (!cp_back_buf_copies[j])
- goto cp_back_buf_copies_fail;
- total_bytes += block_size;
- }
- cp_back_buf_idx = 0;
-
- /* Malloc memory for pending commands list */
- mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
- info.pcmds = kzalloc(mem_size, GFP_KERNEL);
- if (!info.pcmds)
- goto pending_cmds_buf_fail;
- total_bytes += mem_size;
-
- /* Malloc memory for CDMA descripter table */
- mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
- info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!info.cdma_desc_buf)
- goto cdma_desc_buf_fail;
- total_bytes += mem_size;
-
- /* Malloc memory for Memcpy descripter table */
- mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
- info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!info.memcp_desc_buf)
- goto memcp_desc_buf_fail;
- total_bytes += mem_size;
-#endif
-
- nand_dbg_print(NAND_DBG_WARN,
- "Total memory allocated in FTL layer: %d\n", total_bytes);
-
- return PASS;
-
-#if CMD_DMA
-memcp_desc_buf_fail:
- kfree(info.cdma_desc_buf);
-cdma_desc_buf_fail:
- kfree(info.pcmds);
-pending_cmds_buf_fail:
-cp_back_buf_copies_fail:
- j--;
- for (; j >= 0; j--)
- kfree(cp_back_buf_copies[j]);
- kfree(g_pBTDelta);
-bt_delta_fail:
- kfree(g_pBlockTableCopies);
-blk_table_copies_fail:
- kfree(g_pBTStartingCopy);
-bt_starting_copy:
- kfree(g_temp_buf);
-temp_buf_fail:
- kfree(buf_get_bad_block);
-#endif
-
-buf_get_bad_block_fail:
- kfree(buf_read_page_spare);
-buf_read_page_spare_fail:
- kfree(buf_write_page_main_spare);
-buf_write_page_main_spare_fail:
- kfree(buf_read_page_main_spare);
-buf_read_page_main_spare_fail:
- kfree(tmp_buf_read_disturbance);
-tmp_buf_read_disturbance_fail:
- kfree(tmp_buf_write_blk_table_data);
-tmp_buf_write_blk_table_data_fail:
- kfree(flags_static_wear_leveling);
-flags_static_wear_leveling_fail:
- kfree(tmp_buf2_read_blk_table);
-tmp_buf2_read_blk_table_fail:
- kfree(tmp_buf1_read_blk_table);
-tmp_buf1_read_blk_table_fail:
- kfree(spare_buf_bt_search_bt_in_block);
-spare_buf_bt_search_bt_in_block_fail:
- kfree(spare_buf_search_bt_in_block);
-spare_buf_search_bt_in_block_fail:
- kfree(tmp_buf_search_bt_in_block);
-tmp_buf_search_bt_in_block_fail:
- kfree(flag_check_blk_table);
-flag_check_blk_table_fail:
- kfree(g_pBTBlocks);
-bt_blocks_fail:
- kfree(g_pTempBuf);
-Temp_buf_fail:
- kfree(cache_l2_blk_buf);
-cache_l2_blk_buf_fail:
- kfree(cache_l2_page_buf);
-cache_l2_page_buf_fail:
- kfree(g_pIPF);
-ipf_fail:
-cache_item_fail:
- i--;
- for (; i >= 0; i--)
- kfree(Cache.array[i].buf);
- kfree(g_pBlockTable);
-block_table_fail:
- printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
- __FILE__, __LINE__);
-
- return -ENOMEM;
-}
-
-/* .... */
-static int free_memory(void)
-{
- int i;
-
-#if CMD_DMA
- kfree(info.memcp_desc_buf);
- kfree(info.cdma_desc_buf);
- kfree(info.pcmds);
- for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
- kfree(cp_back_buf_copies[i]);
- kfree(g_pBTDelta);
- kfree(g_pBlockTableCopies);
- kfree(g_pBTStartingCopy);
- kfree(g_temp_buf);
- kfree(buf_get_bad_block);
-#endif
- kfree(buf_read_page_spare);
- kfree(buf_write_page_main_spare);
- kfree(buf_read_page_main_spare);
- kfree(tmp_buf_read_disturbance);
- kfree(tmp_buf_write_blk_table_data);
- kfree(flags_static_wear_leveling);
- kfree(tmp_buf2_read_blk_table);
- kfree(tmp_buf1_read_blk_table);
- kfree(spare_buf_bt_search_bt_in_block);
- kfree(spare_buf_search_bt_in_block);
- kfree(tmp_buf_search_bt_in_block);
- kfree(flag_check_blk_table);
- kfree(g_pBTBlocks);
- kfree(g_pTempBuf);
- kfree(g_pIPF);
- for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
- kfree(Cache.array[i].buf);
- kfree(g_pBlockTable);
-
- return 0;
-}
-
-static void dump_cache_l2_table(void)
-{
- struct list_head *p;
- struct spectra_l2_cache_list *pnd;
- int n;
-
- n = 0;
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
-/*
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
- if (pnd->pages_array[i] != MAX_U32_VALUE)
- nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
- }
-*/
- n++;
- }
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Init
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: allocates the memory for cache array,
-* important data structures
-* clears the cache array
-* reads the block table from flash into array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Init(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- Cache.pages_per_item = 1;
- Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
-
- if (allocate_memory() != PASS)
- return FAIL;
-
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_CHANS + MAX_DESCS));
-#endif
- ftl_cmd_cnt = 0;
-#endif
-
- if (FTL_Read_Block_Table() != PASS)
- return FAIL;
-
- /* Init the Level2 Cache data structure */
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
- cache_l2.blk_array[i] = MAX_U32_VALUE;
- cache_l2.cur_blk_idx = 0;
- cache_l2.cur_page_num = 0;
- INIT_LIST_HEAD(&cache_l2.table.list);
- cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-
- dump_cache_l2_table();
-
- return 0;
-}
-
-
-#if CMD_DMA
-#if 0
-static void save_blk_table_changes(u16 idx)
-{
- u8 ftl_cmd;
- u32 *pbt = (u32 *)g_pBTStartingCopy;
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u16 id;
- u8 cache_blks;
-
- id = idx - MAX_CHANS;
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed =
- int_cache[id].cache.changed;
- }
-#endif
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- if (p_BTableChangesDelta->ValidFields == 0x01) {
- g_wBlockTableOffset =
- p_BTableChangesDelta->g_wBlockTableOffset;
- } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
- pbt[p_BTableChangesDelta->BT_Index] =
- p_BTableChangesDelta->BT_Entry_Value;
- debug_boundary_error(((
- p_BTableChangesDelta->BT_Index)),
- DeviceInfo.wDataBlockNum, 0);
- } else if (p_BTableChangesDelta->ValidFields == 0x03) {
- g_wBlockTableOffset =
- p_BTableChangesDelta->g_wBlockTableOffset;
- g_wBlockTableIndex =
- p_BTableChangesDelta->g_wBlockTableIndex;
- } else if (p_BTableChangesDelta->ValidFields == 0x30) {
- g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
- p_BTableChangesDelta->WC_Entry_Value;
- } else if ((DeviceInfo.MLCDevice) &&
- (p_BTableChangesDelta->ValidFields == 0xC0)) {
- g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
- p_BTableChangesDelta->RC_Entry_Value;
- nand_dbg_print(NAND_DBG_DEBUG,
- "In event status setting read counter "
- "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
- ftl_cmd,
- p_BTableChangesDelta->RC_Entry_Value,
- (unsigned int)p_BTableChangesDelta->RC_Index);
- } else {
- nand_dbg_print(NAND_DBG_DEBUG,
- "This should never occur \n");
- }
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-}
-
-static void discard_cmds(u16 n)
-{
- u32 *pbt = (u32 *)g_pBTStartingCopy;
- u8 ftl_cmd;
- unsigned long k;
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u8 cache_blks;
- u16 id;
-#endif
-
- if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
- (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
- for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
- if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
- MARK_BLK_AS_DISCARD(pbt[k]);
- }
- }
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[n].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- id = n - MAX_CHANS;
-
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- if (PendingCMD[n].CMD == MEMCOPY_CMD) {
- if ((cache_start_copy.array[cache_blks].buf <=
- PendingCMD[n].DataDestAddr) &&
- ((cache_start_copy.array[cache_blks].buf +
- Cache.cache_item_size) >
- PendingCMD[n].DataDestAddr)) {
- cache_start_copy.array[cache_blks].address =
- NAND_CACHE_INIT_ADDR;
- cache_start_copy.array[cache_blks].use_cnt =
- 0;
- cache_start_copy.array[cache_blks].changed =
- CLEAR;
- }
- } else {
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed =
- int_cache[id].cache.changed;
- }
- }
-#endif
-}
-
-static void process_cmd_pass(int *first_failed_cmd, u16 idx)
-{
- if (0 == *first_failed_cmd)
- save_blk_table_changes(idx);
- else
- discard_cmds(idx);
-}
-
-static void process_cmd_fail_abort(int *first_failed_cmd,
- u16 idx, int event)
-{
- u32 *pbt = (u32 *)g_pBTStartingCopy;
- u8 ftl_cmd;
- unsigned long i;
- int erase_fail, program_fail;
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u8 cache_blks;
- u16 id;
-#endif
-
- if (0 == *first_failed_cmd)
- *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
-
- nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occurred "
- "while executing %u Command %u accesing Block %u\n",
- (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
- PendingCMD[idx].CMD,
- (unsigned int)PendingCMD[idx].Block);
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- id = idx - MAX_CHANS;
-
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed = SET;
- } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
- cache_start_copy.array[cache_blks].address =
- NAND_CACHE_INIT_ADDR;
- cache_start_copy.array[cache_blks].use_cnt = 0;
- cache_start_copy.array[cache_blks].changed =
- CLEAR;
- } else if (PendingCMD[idx].CMD == ERASE_CMD) {
- /* ? */
- } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
- /* ? */
- }
- }
-#endif
-
- erase_fail = (event == EVENT_ERASE_FAILURE) &&
- (PendingCMD[idx].CMD == ERASE_CMD);
-
- program_fail = (event == EVENT_PROGRAM_FAILURE) &&
- ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
- (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
-
- if (erase_fail || program_fail) {
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (PendingCMD[idx].Block ==
- (pbt[i] & (~BAD_BLOCK)))
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-}
-
-static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-{
- u8 ftl_cmd;
- int cmd_match = 0;
-
- if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
- cmd_match = 1;
-
- if (PendingCMD[idx].Status == CMD_PASS) {
- process_cmd_pass(first_failed_cmd, idx);
- } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
- (PendingCMD[idx].Status == CMD_ABORT)) {
- process_cmd_fail_abort(first_failed_cmd, idx, event);
- } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
- PendingCMD[idx].Tag) {
- nand_dbg_print(NAND_DBG_DEBUG,
- " Command no. %hu is not executed\n",
- (unsigned int)PendingCMD[idx].Tag);
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
- }
-}
-#endif
-
-static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-{
- printk(KERN_ERR "temporary workaround function. "
- "Should not be called! \n");
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Event_Status
-* Inputs: none
-* Outputs: Event Code
-* Description: It is called by SBD after hardware interrupt signalling
-* completion of commands chain
-* It does following things
-* get event status from LLD
-* analyze command chain status
-* determine last command executed
-* analyze results
-* rebuild the block table in case of uncorrectable error
-* return event code
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Event_Status(int *first_failed_cmd)
-{
- int event_code = PASS;
- u16 i_P;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- *first_failed_cmd = 0;
-
- event_code = GLOB_LLD_Event_Status();
-
- switch (event_code) {
- case EVENT_PASS:
- nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
- break;
- case EVENT_UNCORRECTABLE_DATA_ERROR:
- nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
- break;
- case EVENT_PROGRAM_FAILURE:
- case EVENT_ERASE_FAILURE:
- nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
- "Event code: 0x%x\n", event_code);
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta;
- for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
- i_P++)
- process_cmd(first_failed_cmd, i_P, event_code);
- memcpy(g_pBlockTable, g_pBTStartingCopy,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memcpy(g_pWearCounter, g_pWearCounterCopy,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memcpy(g_pReadCounter, g_pReadCounterCopy,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&Cache, (void *)&cache_start_copy,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_DESCS + MAX_CHANS));
-#endif
- break;
- default:
- nand_dbg_print(NAND_DBG_WARN,
- "Handling unexpected event code - 0x%x\n",
- event_code);
- event_code = ERR;
- break;
- }
-
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memcpy(g_pWearCounterCopy, g_pWearCounter,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memcpy(g_pReadCounterCopy, g_pReadCounter,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-
- g_pBTDelta_Free = g_pBTDelta;
- ftl_cmd_cnt = 0;
- g_pNextBlockTable = g_pBlockTableCopies;
- cp_back_buf_idx = 0;
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_DESCS + MAX_CHANS));
-#endif
-
- return event_code;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: glob_ftl_execute_cmds
-* Inputs: none
-* Outputs: none
-* Description: pass thru to LLD
-***************************************************************/
-u16 glob_ftl_execute_cmds(void)
-{
- nand_dbg_print(NAND_DBG_TRACE,
- "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
- (unsigned int)ftl_cmd_cnt);
- g_SBDCmdIndex = 0;
- return glob_lld_execute_cmds();
-}
-
-#endif
-
-#if !CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Read Immediate
-* Inputs: pointer to data
-* address of data
-* Outputs: PASS / FAIL
-* Description: Reads one page of data into RAM directly from flash without
-* using or disturbing cache.It is assumed this function is called
-* with CMD-DMA disabled.
-*****************************************************************/
-int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
-{
- int wResult = FAIL;
- u32 Block;
- u16 Page;
- u32 phy_blk;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- Block = BLK_FROM_ADDR(addr);
- Page = PAGE_FROM_ADDR(addr, Block);
-
- if (!IS_SPARE_BLOCK(Block))
- return FAIL;
-
- phy_blk = pbt[Block];
- wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
- if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
- >= MAX_READ_COUNTER)
- FTL_Read_Disturbance(phy_blk);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-
- return wResult;
-}
-#endif
-
-#ifdef SUPPORT_BIG_ENDIAN
-/*********************************************************************
-* Function: FTL_Invert_Block_Table
-* Inputs: none
-* Outputs: none
-* Description: Re-format the block table in ram based on BIG_ENDIAN and
-* LARGE_BLOCKNUM if necessary
-**********************************************************************/
-static void FTL_Invert_Block_Table(void)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#ifdef SUPPORT_LARGE_BLOCKNUM
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- pbt[i] = INVERTUINT32(pbt[i]);
- g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
- }
-#else
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- pbt[i] = INVERTUINT16(pbt[i]);
- g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
- }
-#endif
-}
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-* Description: The flash controller is initialized
-* The flash device is reset
-* Perform a flash READ ID command to confirm that a
-* valid device is attached and active.
-* The DeviceInfo structure gets filled in
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flash_Init(void)
-{
- int status = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- g_SBDCmdIndex = 0;
-
- status = GLOB_LLD_Flash_Init();
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Inputs: none
-* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-* Description: The flash controller is released
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return GLOB_LLD_Flash_Release();
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Cache_Release
-* Inputs: none
-* Outputs: none
-* Description: release all allocated memory in GLOB_FTL_Init
-* (allocated in GLOB_FTL_Init)
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void GLOB_FTL_Cache_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- free_memory();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_If_Hit
-* Inputs: Page Address
-* Outputs: Block number/UNHIT BLOCK
-* Description: Determines if the addressed page is in cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u16 FTL_Cache_If_Hit(u64 page_addr)
-{
- u16 item;
- u64 addr;
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- item = UNHIT_CACHE_ITEM;
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- addr = Cache.array[i].address;
- if ((page_addr >= addr) &&
- (page_addr < (addr + Cache.cache_item_size))) {
- item = i;
- break;
- }
- }
-
- return item;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Calculate_LRU
-* Inputs: None
-* Outputs: None
-* Description: Calculate the least recently block in a cache and record its
-* index in LRU field.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Calculate_LRU(void)
-{
- u16 i, bCurrentLRU, bTempCount;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bCurrentLRU = 0;
- bTempCount = MAX_WORD_VALUE;
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (Cache.array[i].use_cnt < bTempCount) {
- bCurrentLRU = i;
- bTempCount = Cache.array[i].use_cnt;
- }
- }
-
- Cache.LRU = bCurrentLRU;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read_Page
-* Inputs: pointer to read buffer, logical address and cache item number
-* Outputs: None
-* Description: Read the page from the cached block addressed by blocknumber
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
-{
- u8 *start_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- start_addr = Cache.array[cache_item].buf;
- start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
- DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
-
-#if CMD_DMA
- GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
- DeviceInfo.wPageDataSize, 0);
- ftl_cmd_cnt++;
-#else
- memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
-#endif
-
- if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
- Cache.array[cache_item].use_cnt++;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read_All
-* Inputs: pointer to read buffer,block address
-* Outputs: PASS=0 / FAIL =1
-* Description: It reads pages in cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
-{
- int wResult = PASS;
- u32 Block;
- u32 lba;
- u16 Page;
- u16 PageCount;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 i;
-
- Block = BLK_FROM_ADDR(phy_addr);
- Page = PAGE_FROM_ADDR(phy_addr, Block);
- PageCount = Cache.pages_per_item;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "%s, Line %d, Function: %s, Block: 0x%x\n",
- __FILE__, __LINE__, __func__, Block);
-
- lba = 0xffffffff;
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if ((pbt[i] & (~BAD_BLOCK)) == Block) {
- lba = i;
- if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
- IS_DISCARDED_BLOCK(i)) {
- /* Add by yunpeng -2008.12.3 */
-#if CMD_DMA
- GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
- PageCount * DeviceInfo.wPageDataSize, 0);
- ftl_cmd_cnt++;
-#else
- memset(pData, 0xFF,
- PageCount * DeviceInfo.wPageDataSize);
-#endif
- return wResult;
- } else {
- continue; /* break ?? */
- }
- }
- }
-
- if (0xffffffff == lba)
- printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
-
-#if CMD_DMA
- wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
- PageCount, LLD_CMD_FLAG_MODE_CDMA);
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Counter modified in ftl_cmd_cnt %u"
- " Block %u Counter%u\n",
- ftl_cmd_cnt, (unsigned int)Block,
- g_pReadCounter[Block -
- DeviceInfo.wSpectraStartBlock]);
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->RC_Index =
- Block - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->RC_Entry_Value =
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0xC0;
-
- ftl_cmd_cnt++;
-
- if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
- MAX_READ_COUNTER)
- FTL_Read_Disturbance(Block);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- } else {
- ftl_cmd_cnt++;
- }
-#else
- wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
- if (wResult == FAIL)
- return wResult;
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
- if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
- MAX_READ_COUNTER)
- FTL_Read_Disturbance(Block);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_All
-* Inputs: pointer to cache in sys memory
-* address of free block in flash
-* Outputs: PASS=0 / FAIL=1
-* Description: writes all the pages of the block in cache to flash
-*
-* NOTE:need to make sure this works ok when cache is limited
-* to a partial block. This is where copy-back would be
-* activated. This would require knowing which pages in the
-* cached block are clean/dirty.Right now we only know if
-* the whole block is clean/dirty.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
-{
- u16 wResult = PASS;
- u32 Block;
- u16 Page;
- u16 PageCount;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
- "on %d\n", cache_block_to_write,
- (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
-
- Block = BLK_FROM_ADDR(blk_addr);
- Page = PAGE_FROM_ADDR(blk_addr, Block);
- PageCount = Cache.pages_per_item;
-
-#if CMD_DMA
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
- Block, Page, PageCount)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated! "
- "Need Bad Block replacing.\n",
- __FILE__, __LINE__, __func__, Block);
- wResult = FAIL;
- }
- ftl_cmd_cnt++;
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
- " Line %d, Function %s, new Bad Block %d generated!"
- "Need Bad Block replacing.\n",
- __FILE__, __LINE__, __func__, Block);
- wResult = FAIL;
- }
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Copy_Block
-* Inputs: source block address
-* Destination block address
-* Outputs: PASS=0 / FAIL=1
-* Description: used only for static wear leveling to move the block
-* containing static data to new blocks(more worn)
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
-{
- int i, r1, r2, wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
- r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
- i * DeviceInfo.wPageDataSize);
- r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
- i * DeviceInfo.wPageDataSize);
- if ((ERR == r1) || (FAIL == r2)) {
- wResult = FAIL;
- break;
- }
- }
-
- return wResult;
-}
-
-/* Search the block table to find out the least wear block and then return it */
-static u32 find_least_worn_blk_for_l2_cache(void)
-{
- int i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 least_wear_cnt = MAX_BYTE_VALUE;
- u32 least_wear_blk_idx = MAX_U32_VALUE;
- u32 phy_idx;
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
- if (phy_idx > DeviceInfo.wSpectraEndBlock)
- printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
- "Too big phy block num (%d)\n", phy_idx);
- if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
- least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
- least_wear_blk_idx = i;
- }
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN,
- "find_least_worn_blk_for_l2_cache: "
- "find block %d with least worn counter (%d)\n",
- least_wear_blk_idx, least_wear_cnt);
-
- return least_wear_blk_idx;
-}
-
-
-
-/* Get blocks for Level2 Cache */
-static int get_l2_cache_blks(void)
-{
- int n;
- u32 blk;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
- blk = find_least_worn_blk_for_l2_cache();
- if (blk >= DeviceInfo.wDataBlockNum) {
- nand_dbg_print(NAND_DBG_WARN,
- "find_least_worn_blk_for_l2_cache: "
- "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
- return FAIL;
- }
- /* Tag the free block as discard in block table */
- pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
- /* Add the free block to the L2 Cache block array */
- cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
- }
-
- return PASS;
-}
-
-static int erase_l2_cache_blocks(void)
-{
- int i, ret = PASS;
- u32 pblk, lblk = BAD_BLOCK;
- u64 addr;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
- pblk = cache_l2.blk_array[i];
-
- /* If the L2 cache block is invalid, then just skip it */
- if (MAX_U32_VALUE == pblk)
- continue;
-
- BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
-
- addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- /* Get logical block number of the erased block */
- lblk = FTL_Get_Block_Index(pblk);
- BUG_ON(BAD_BLOCK == lblk);
- /* Tag it as free in the block table */
- pbt[lblk] &= (u32)(~DISCARD_BLOCK);
- pbt[lblk] |= (u32)(SPARE_BLOCK);
- } else {
- MARK_BLOCK_AS_BAD(pbt[lblk]);
- ret = ERR;
- }
- }
-
- return ret;
-}
-
-/*
- * Merge the valid data page in the L2 cache blocks into NAND.
-*/
-static int flush_l2_cache(void)
-{
- struct list_head *p;
- struct spectra_l2_cache_list *pnd, *tmp_pnd;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 phy_blk, l2_blk;
- u64 addr;
- u16 l2_page;
- int i, ret = PASS;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (list_empty(&cache_l2.table.list)) /* No data to flush */
- return ret;
-
- //dump_cache_l2_table();
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
- IS_BAD_BLOCK(pnd->logical_blk_num) ||
- IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
- memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
- } else {
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
- phy_blk, 0, DeviceInfo.wPagesPerBlock);
- if (ret == FAIL) {
- printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
- }
- }
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
- if (pnd->pages_array[i] != MAX_U32_VALUE) {
- l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
- l2_page = pnd->pages_array[i] & 0xffff;
- ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
- if (ret == FAIL) {
- printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
- }
- memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
- }
- }
-
- /* Find a free block and tag the original block as discarded */
- addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
- ret = FTL_Replace_Block(addr);
- if (ret == FAIL) {
- printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
- }
-
- /* Write back the updated data into NAND */
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
- nand_dbg_print(NAND_DBG_WARN,
- "Program NAND block %d fail in %s, Line %d\n",
- phy_blk, __FILE__, __LINE__);
- /* This may not be really a bad block. So just tag it as discarded. */
- /* Then it has a chance to be erased when garbage collection. */
- /* If it is really bad, then the erase will fail and it will be marked */
- /* as bad then. Otherwise it will be marked as free and can be used again */
- MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
- /* Find another free block and write it again */
- FTL_Replace_Block(addr);
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
- printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
- "Some data will be lost!\n", phy_blk);
- MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
- }
- } else {
- /* tag the new free block as used block */
- pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
- }
- }
-
- /* Destroy the L2 Cache table and free the memory of all nodes */
- list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
- list_del(&pnd->list);
- kfree(pnd);
- }
-
- /* Erase discard L2 cache blocks */
- if (erase_l2_cache_blocks() != PASS)
- nand_dbg_print(NAND_DBG_WARN,
- " Erase L2 cache blocks error in %s, Line %d\n",
- __FILE__, __LINE__);
-
- /* Init the Level2 Cache data structure */
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
- cache_l2.blk_array[i] = MAX_U32_VALUE;
- cache_l2.cur_blk_idx = 0;
- cache_l2.cur_page_num = 0;
- INIT_LIST_HEAD(&cache_l2.table.list);
- cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-
- return ret;
-}
-
-/*
- * Write back a changed victim cache item to the Level2 Cache
- * and update the L2 Cache table to map the change.
- * If the L2 Cache is full, then start to do the L2 Cache flush.
-*/
-static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
-{
- u32 logical_blk_num;
- u16 logical_page_num;
- struct list_head *p;
- struct spectra_l2_cache_list *pnd, *pnd_new;
- u32 node_size;
- int i, found;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /*
- * If Level2 Cache table is empty, then it means either:
- * 1. This is the first time that the function called after FTL_init
- * or
- * 2. The Level2 Cache has just been flushed
- *
- * So, 'steal' some free blocks from NAND for L2 Cache using
- * by just mask them as discard in the block table
- */
- if (list_empty(&cache_l2.table.list)) {
- BUG_ON(cache_l2.cur_blk_idx != 0);
- BUG_ON(cache_l2.cur_page_num!= 0);
- BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
- if (FAIL == get_l2_cache_blks()) {
- GLOB_FTL_Garbage_Collection();
- if (FAIL == get_l2_cache_blks()) {
- printk(KERN_ALERT "Fail to get L2 cache blks!\n");
- return FAIL;
- }
- }
- }
-
- logical_blk_num = BLK_FROM_ADDR(logical_addr);
- logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
- BUG_ON(logical_blk_num == MAX_U32_VALUE);
-
- /* Write the cache item data into the current position of L2 Cache */
-#if CMD_DMA
- /*
- * TODO
- */
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(buf,
- cache_l2.blk_array[cache_l2.cur_blk_idx],
- cache_l2.cur_page_num, 1)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
- "%s, Line %d, new Bad Block %d generated!\n",
- __FILE__, __LINE__,
- cache_l2.blk_array[cache_l2.cur_blk_idx]);
-
- /* TODO: tag the current block as bad and try again */
-
- return FAIL;
- }
-#endif
-
- /*
- * Update the L2 Cache table.
- *
- * First seaching in the table to see whether the logical block
- * has been mapped. If not, then kmalloc a new node for the
- * logical block, fill data, and then insert it to the list.
- * Otherwise, just update the mapped node directly.
- */
- found = 0;
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (pnd->logical_blk_num == logical_blk_num) {
- pnd->pages_array[logical_page_num] =
- (cache_l2.cur_blk_idx << 16) |
- cache_l2.cur_page_num;
- found = 1;
- break;
- }
- }
- if (!found) { /* Create new node for the logical block here */
-
- /* The logical pages to physical pages map array is
- * located at the end of struct spectra_l2_cache_list.
- */
- node_size = sizeof(struct spectra_l2_cache_list) +
- sizeof(u32) * DeviceInfo.wPagesPerBlock;
- pnd_new = kmalloc(node_size, GFP_ATOMIC);
- if (!pnd_new) {
- printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
- __FILE__, __LINE__);
- /*
- * TODO: Need to flush all the L2 cache into NAND ASAP
- * since no memory available here
- */
- }
- pnd_new->logical_blk_num = logical_blk_num;
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
- pnd_new->pages_array[i] = MAX_U32_VALUE;
- pnd_new->pages_array[logical_page_num] =
- (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
- list_add(&pnd_new->list, &cache_l2.table.list);
- }
-
- /* Increasing the current position pointer of the L2 Cache */
- cache_l2.cur_page_num++;
- if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
- cache_l2.cur_blk_idx++;
- if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
- /* The L2 Cache is full. Need to flush it now */
- nand_dbg_print(NAND_DBG_WARN,
- "L2 Cache is full, will start to flush it\n");
- flush_l2_cache();
- } else {
- cache_l2.cur_page_num = 0;
- }
- }
-
- return PASS;
-}
-
-/*
- * Search in the Level2 Cache table to find the cache item.
- * If find, read the data from the NAND page of L2 Cache,
- * Otherwise, return FAIL.
- */
-static int search_l2_cache(u8 *buf, u64 logical_addr)
-{
- u32 logical_blk_num;
- u16 logical_page_num;
- struct list_head *p;
- struct spectra_l2_cache_list *pnd;
- u32 tmp = MAX_U32_VALUE;
- u32 phy_blk;
- u16 phy_page;
- int ret = FAIL;
-
- logical_blk_num = BLK_FROM_ADDR(logical_addr);
- logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
-
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (pnd->logical_blk_num == logical_blk_num) {
- tmp = pnd->pages_array[logical_page_num];
- break;
- }
- }
-
- if (tmp != MAX_U32_VALUE) { /* Found valid map */
- phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
- phy_page = tmp & 0xFFFF;
-#if CMD_DMA
- /* TODO */
-#else
- ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
-#endif
- }
-
- return ret;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_Page
-* Inputs: Pointer to buffer, page address, cache block number
-* Outputs: PASS=0 / FAIL=1
-* Description: It writes the data in Cache Block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
- u8 cache_blk, u16 flag)
-{
- u8 *pDest;
- u64 addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- addr = Cache.array[cache_blk].address;
- pDest = Cache.array[cache_blk].buf;
-
- pDest += (unsigned long)(page_addr - addr);
- Cache.array[cache_blk].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = cache_blk;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[cache_blk].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[cache_blk].changed;
-#endif
- GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
- ftl_cmd_cnt++;
-#else
- memcpy(pDest, pData, DeviceInfo.wPageDataSize);
-#endif
- if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
- Cache.array[cache_blk].use_cnt++;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: It writes least frequently used Cache block to flash if it
-* has been changed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write(void)
-{
- int i, bResult = PASS;
- u16 bNO, least_count = 0xFFFF;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FTL_Calculate_LRU();
-
- bNO = Cache.LRU;
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
- "Least used cache block is %d\n", bNO);
-
- if (Cache.array[bNO].changed != SET)
- return bResult;
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
- " Block %d containing logical block %d is dirty\n",
- bNO,
- (u32)(Cache.array[bNO].address >>
- DeviceInfo.nBitsInBlockDataSize));
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = bNO;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[bNO].address;
- int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-#endif
-#endif
- bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
- Cache.array[bNO].address);
- if (bResult != ERR)
- Cache.array[bNO].changed = CLEAR;
-
- least_count = Cache.array[bNO].use_cnt;
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (i == bNO)
- continue;
- if (Cache.array[i].use_cnt > 0)
- Cache.array[i].use_cnt -= least_count;
- }
-
- return bResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read
-* Inputs: Page address
-* Outputs: PASS=0 / FAIL=1
-* Description: It reads the block from device in Cache Block
-* Set the LRU count to 1
-* Mark the Cache Block as clean
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Read(u64 logical_addr)
-{
- u64 item_addr, phy_addr;
- u16 num;
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- num = Cache.LRU; /* The LRU cache item will be overwritten */
-
- item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
- Cache.cache_item_size;
- Cache.array[num].address = item_addr;
- Cache.array[num].use_cnt = 1;
- Cache.array[num].changed = CLEAR;
-
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = num;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[num].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[num].changed;
-#endif
-#endif
- /*
- * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
- * Otherwise, read it from NAND
- */
- ret = search_l2_cache(Cache.array[num].buf, logical_addr);
- if (PASS == ret) /* Hit in L2 Cache */
- return ret;
-
- /* Compute the physical start address of NAND device according to */
- /* the logical start address of the cache item (LRU cache item) */
- phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
- GLOB_u64_Remainder(item_addr, 2);
-
- return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Check_Block_Table
-* Inputs: ?
-* Outputs: PASS=0 / FAIL=1
-* Description: It checks the correctness of each block table entry
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Check_Block_Table(int wOldTable)
-{
- u32 i;
- int wResult = PASS;
- u32 blk_idx;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 *pFlag = flag_check_blk_table;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (NULL != pFlag) {
- memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
-
- /*
- * 20081006/KBV - Changed to pFlag[i] reference
- * to avoid buffer overflow
- */
-
- /*
- * 2008-10-20 Yunpeng Note: This change avoid
- * buffer overflow, but changed function of
- * the code, so it should be re-write later
- */
- if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
- PASS == pFlag[i]) {
- wResult = FAIL;
- break;
- } else {
- pFlag[i] = PASS;
- }
- }
- }
-
- return wResult;
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_Block_Table
-* Inputs: flasg
-* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
-* happen. -1 Error
-* Description: It writes the block table
-* Block table always mapped to LBA 0 which inturn mapped
-* to any physical block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Write_Block_Table(int wForce)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- int wSuccess = PASS;
- u32 wTempBlockTableIndex;
- u16 bt_pages, new_bt_offset;
- u8 blockchangeoccured = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
- return 0;
-
- if (PASS == wForce) {
- g_wBlockTableOffset =
- (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset =
- g_wBlockTableOffset;
- p_BTableChangesDelta->ValidFields = 0x01;
-#endif
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Inside FTL_Write_Block_Table: block %d Page:%d\n",
- g_wBlockTableIndex, g_wBlockTableOffset);
-
- do {
- new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
- if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
- (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
- (FAIL == wSuccess)) {
- wTempBlockTableIndex = FTL_Replace_Block_Table();
- if (BAD_BLOCK == wTempBlockTableIndex)
- return ERR;
- if (!blockchangeoccured) {
- bt_block_changed = 1;
- blockchangeoccured = 1;
- }
-
- g_wBlockTableIndex = wTempBlockTableIndex;
- g_wBlockTableOffset = 0;
- pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset =
- g_wBlockTableOffset;
- p_BTableChangesDelta->g_wBlockTableIndex =
- g_wBlockTableIndex;
- p_BTableChangesDelta->ValidFields = 0x03;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- BLOCK_TABLE_INDEX;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[BLOCK_TABLE_INDEX];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- }
-
- wSuccess = FTL_Write_Block_Table_Data();
- if (FAIL == wSuccess)
- MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
- } while (FAIL == wSuccess);
-
- g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
-
- return 1;
-}
-
-static int force_format_nand(void)
-{
- u32 i;
-
- /* Force erase the whole unprotected physical partiton of NAND */
- printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
- printk(KERN_ALERT "From phyical block %d to %d\n",
- DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
- for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
- if (GLOB_LLD_Erase_Block(i))
- printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
- }
- printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
- while(1);
-
- return PASS;
-}
-
-int GLOB_FTL_Flash_Format(void)
-{
- //return FTL_Format_Flash(1);
- return force_format_nand();
-
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Search_Block_Table_IN_Block
-* Inputs: Block Number
-* Pointer to page
-* Outputs: PASS / FAIL
-* Page contatining the block table
-* Description: It searches the block table in the block
-* passed as an argument.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
- u8 BT_Tag, u16 *Page)
-{
- u16 i, j, k;
- u16 Result = PASS;
- u16 Last_IPF = 0;
- u8 BT_Found = 0;
- u8 *tagarray;
- u8 *tempbuf = tmp_buf_search_bt_in_block;
- u8 *pSpareBuf = spare_buf_search_bt_in_block;
- u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
- u8 bt_flag_last_page = 0xFF;
- u8 search_in_previous_pages = 0;
- u16 bt_pages;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Searching block table in %u block\n",
- (unsigned int)BT_Block);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
- i += (bt_pages + 1)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Searching last IPF: %d\n", i);
- Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
- BT_Block, i, 1);
-
- if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
- if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
- continue;
- } else {
- search_in_previous_pages = 1;
- Last_IPF = i;
- }
- }
-
- if (!search_in_previous_pages) {
- if (i != bt_pages) {
- i -= (bt_pages + 1);
- Last_IPF = i;
- }
- }
-
- if (0 == Last_IPF)
- break;
-
- if (!search_in_previous_pages) {
- i = i + 1;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
- BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(
- pSpareBufBTLastPage, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag_last_page = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found"
- " in page after IPF "
- "at block %d "
- "page %d\n",
- (int)BT_Block, i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- CURRENT_BLOCK_TABLE;
- break;
- } else {
- Result = FAIL;
- }
- }
- }
- }
-
- if (search_in_previous_pages)
- i = i - bt_pages;
- else
- i = i - (bt_pages + 1);
-
- Result = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %d Page %d",
- (int)BT_Block, i);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
- &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j) {
- bt_flag_last_page = tagarray[k];
- } else {
- Result = FAIL;
- break;
- }
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found "
- "in page prior to IPF "
- "at block %u page %d\n",
- (unsigned int)BT_Block, i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- IN_PROGRESS_BLOCK_TABLE;
- break;
- } else {
- Result = FAIL;
- break;
- }
- }
- }
- }
-
- if (Result == FAIL) {
- if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
- BT_Found = 1;
- *Page = i - (bt_pages + 1);
- }
- if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
- goto func_return;
- }
-
- if (Last_IPF == 0) {
- i = 0;
- Result = PASS;
- nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
- "Block %u Page %u", (unsigned int)BT_Block, i);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
- &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag_last_page = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found "
- "in page after IPF at "
- "block %u page %u\n",
- (unsigned int)BT_Block,
- (unsigned int)i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- CURRENT_BLOCK_TABLE;
- goto func_return;
- } else {
- Result = FAIL;
- }
- }
- }
-
- if (Result == FAIL)
- goto func_return;
- }
-func_return:
- return Result;
-}
-
-u8 *get_blk_table_start_addr(void)
-{
- return g_pBlockTable;
-}
-
-unsigned long get_blk_table_len(void)
-{
- return DeviceInfo.wDataBlockNum * sizeof(u32);
-}
-
-u8 *get_wear_leveling_table_start_addr(void)
-{
- return g_pWearCounter;
-}
-
-unsigned long get_wear_leveling_table_len(void)
-{
- return DeviceInfo.wDataBlockNum * sizeof(u8);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Read_Block_Table
-* Inputs: none
-* Outputs: PASS / FAIL
-* Description: read the flash spare area and find a block containing the
-* most recent block table(having largest block_table_counter).
-* Find the last written Block table in this block.
-* Check the correctness of Block Table
-* If CDMA is enabled, this function is called in
-* polling mode.
-* We don't need to store changes in Block table in this
-* function as it is called only at initialization
-*
-* Note: Currently this function is called at initialization
-* before any read/erase/write command issued to flash so,
-* there is no need to wait for CDMA list to complete as of now
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Read_Block_Table(void)
-{
- u16 i = 0;
- int k, j;
- u8 *tempBuf, *tagarray;
- int wResult = FAIL;
- int status = FAIL;
- u8 block_table_found = 0;
- int search_result;
- u32 Block;
- u16 Page = 0;
- u16 PageCount;
- u16 bt_pages;
- int wBytesCopied = 0, tempvar;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- tempBuf = tmp_buf1_read_blk_table;
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- for (j = DeviceInfo.wSpectraStartBlock;
- j <= (int)DeviceInfo.wSpectraEndBlock;
- j++) {
- status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
- k = 0;
- i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
- if (i) {
- status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
- j, 0, 1);
- for (; k < i; k++) {
- if (tagarray[k] == tempBuf[3])
- break;
- }
- }
-
- if (k < i)
- k = tagarray[k];
- else
- continue;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is contained in Block %d %d\n",
- (unsigned int)j, (unsigned int)k);
-
- if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
- g_pBTBlocks[k-FIRST_BT_ID] = j;
- block_table_found = 1;
- } else {
- printk(KERN_ERR "FTL_Read_Block_Table -"
- "This should never happens. "
- "Two block table have same counter %u!\n", k);
- }
- }
-
- if (block_table_found) {
- if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
- g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
- j = LAST_BT_ID;
- while ((j > FIRST_BT_ID) &&
- (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
- j--;
- if (j == FIRST_BT_ID) {
- j = LAST_BT_ID;
- last_erased = LAST_BT_ID;
- } else {
- last_erased = (u8)j + 1;
- while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
- g_pBTBlocks[j - FIRST_BT_ID]))
- j--;
- }
- } else {
- j = FIRST_BT_ID;
- while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
- j++;
- last_erased = (u8)j;
- while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
- g_pBTBlocks[j - FIRST_BT_ID]))
- j++;
- if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
- j--;
- }
-
- if (last_erased > j)
- j += (1 + LAST_BT_ID - FIRST_BT_ID);
-
- for (; (j >= last_erased) && (FAIL == wResult); j--) {
- i = (j - FIRST_BT_ID) %
- (1 + LAST_BT_ID - FIRST_BT_ID);
- search_result =
- FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
- i + FIRST_BT_ID, &Page);
- if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
- block_table_found = 0;
-
- while ((search_result == PASS) && (FAIL == wResult)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Read_Block_Table:"
- "Block: %u Page: %u "
- "contains block table\n",
- (unsigned int)g_pBTBlocks[i],
- (unsigned int)Page);
-
- tempBuf = tmp_buf2_read_blk_table;
-
- for (k = 0; k < bt_pages; k++) {
- Block = g_pBTBlocks[i];
- PageCount = 1;
-
- status =
- GLOB_LLD_Read_Page_Main_Polling(
- tempBuf, Block, Page, PageCount);
-
- tempvar = k ? 0 : 4;
-
- wBytesCopied +=
- FTL_Copy_Block_Table_From_Flash(
- tempBuf + tempvar,
- DeviceInfo.wPageDataSize - tempvar,
- wBytesCopied);
-
- Page++;
- }
-
- wResult = FTL_Check_Block_Table(FAIL);
- if (FAIL == wResult) {
- block_table_found = 0;
- if (Page > bt_pages)
- Page -= ((bt_pages<<1) + 1);
- else
- search_result = FAIL;
- }
- }
- }
- }
-
- if (PASS == wResult) {
- if (!block_table_found)
- FTL_Execute_SPL_Recovery();
-
- if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
- g_wBlockTableOffset = (u16)Page + 1;
- else
- g_wBlockTableOffset = (u16)Page - bt_pages;
-
- g_wBlockTableIndex = (u32)g_pBTBlocks[i];
-
-#if CMD_DMA
- if (DeviceInfo.MLCDevice)
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32)
- + DeviceInfo.wDataBlockNum * sizeof(u8)
- + DeviceInfo.wDataBlockNum * sizeof(u16));
- else
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32)
- + DeviceInfo.wDataBlockNum * sizeof(u8));
-#endif
- }
-
- if (FAIL == wResult)
- printk(KERN_ERR "Yunpeng - "
- "Can not find valid spectra block table!\n");
-
-#if AUTO_FORMAT_FLASH
- if (FAIL == wResult) {
- nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
- wResult = FTL_Format_Flash(0);
- }
-#endif
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Page_Num
-* Inputs: Size in bytes
-* Outputs: Size in pages
-* Description: It calculates the pages required for the length passed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Get_Page_Num(u64 length)
-{
- return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
- (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Physical_Block_Addr
-* Inputs: Block Address (byte format)
-* Outputs: Physical address of the block.
-* Description: It translates LBA to PBA by returning address stored
-* at the LBA location in the block table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
-{
- u32 *pbt;
- u64 physical_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- pbt = (u32 *)g_pBlockTable;
- physical_addr = (u64) DeviceInfo.wBlockDataSize *
- (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
-
- return physical_addr;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Block_Index
-* Inputs: Physical Block no.
-* Outputs: Logical block no. /BAD_BLOCK
-* Description: It returns the logical block no. for the PBA passed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Get_Block_Index(u32 wBlockNum)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
- if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
- return i;
-
- return BAD_BLOCK;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Wear_Leveling
-* Inputs: none
-* Outputs: PASS=0
-* Description: This is static wear leveling (done by explicit call)
-* do complete static wear leveling
-* do complete garbage collection
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Wear_Leveling(void)
-{
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FTL_Static_Wear_Leveling();
- GLOB_FTL_Garbage_Collection();
-
- return PASS;
-}
-
-static void find_least_most_worn(u8 *chg,
- u32 *least_idx, u8 *least_cnt,
- u32 *most_idx, u8 *most_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 idx;
- u8 cnt;
- int i;
-
- for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_BAD_BLOCK(i) || PASS == chg[i])
- continue;
-
- idx = (u32) ((~BAD_BLOCK) & pbt[i]);
- cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
-
- if (IS_SPARE_BLOCK(i)) {
- if (cnt > *most_cnt) {
- *most_cnt = cnt;
- *most_idx = idx;
- }
- }
-
- if (IS_DATA_BLOCK(i)) {
- if (cnt < *least_cnt) {
- *least_cnt = cnt;
- *least_idx = idx;
- }
- }
-
- if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
- debug_boundary_error(*most_idx,
- DeviceInfo.wDataBlockNum, 0);
- debug_boundary_error(*least_idx,
- DeviceInfo.wDataBlockNum, 0);
- continue;
- }
- }
-}
-
-static int move_blks_for_wear_leveling(u8 *chg,
- u32 *least_idx, u32 *rep_blk_num, int *result)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 rep_blk;
- int j, ret_cp_blk, ret_erase;
- int ret = PASS;
-
- chg[*least_idx] = PASS;
- debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
-
- rep_blk = FTL_Replace_MWBlock();
- if (rep_blk != BAD_BLOCK) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "More than two spare blocks exist so do it\n");
- nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
- rep_blk);
-
- chg[rep_blk] = PASS;
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- for (j = 0; j < RETRY_TIMES; j++) {
- ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
- DeviceInfo.wBlockDataSize,
- (u64)rep_blk * DeviceInfo.wBlockDataSize);
- if (FAIL == ret_cp_blk) {
- ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
- * DeviceInfo.wBlockDataSize);
- if (FAIL == ret_erase)
- MARK_BLOCK_AS_BAD(pbt[rep_blk]);
- } else {
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Copy_Block == OK\n");
- break;
- }
- }
-
- if (j < RETRY_TIMES) {
- u32 tmp;
- u32 old_idx = FTL_Get_Block_Index(*least_idx);
- u32 rep_idx = FTL_Get_Block_Index(rep_blk);
- tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
- pbt[old_idx] = (u32)((~SPARE_BLOCK) &
- pbt[rep_idx]);
- pbt[rep_idx] = tmp;
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = old_idx;
- p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = rep_idx;
- p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- } else {
- pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- FTL_Get_Block_Index(rep_blk);
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[FTL_Get_Block_Index(rep_blk)];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- *result = FAIL;
- ret = FAIL;
- }
-
- if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
- ret = FAIL;
- } else {
- printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
- ret = FAIL;
- }
-
- return ret;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Static_Wear_Leveling
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: This is static wear leveling (done by explicit call)
-* search for most&least used
-* if difference < GATE:
-* update the block table with exhange
-* mark block table in flash as IN_PROGRESS
-* copy flash block
-* the caller should handle GC clean up after calling this function
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Static_Wear_Leveling(void)
-{
- u8 most_worn_cnt;
- u8 least_worn_cnt;
- u32 most_worn_idx;
- u32 least_worn_idx;
- int result = PASS;
- int go_on = PASS;
- u32 replaced_blks = 0;
- u8 *chang_flag = flags_static_wear_leveling;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!chang_flag)
- return FAIL;
-
- memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
- while (go_on == PASS) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "starting static wear leveling\n");
- most_worn_cnt = 0;
- least_worn_cnt = 0xFF;
- least_worn_idx = BLOCK_TABLE_INDEX;
- most_worn_idx = BLOCK_TABLE_INDEX;
-
- find_least_most_worn(chang_flag, &least_worn_idx,
- &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Used and least worn is block %u, whos count is %u\n",
- (unsigned int)least_worn_idx,
- (unsigned int)least_worn_cnt);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Free and most worn is block %u, whos count is %u\n",
- (unsigned int)most_worn_idx,
- (unsigned int)most_worn_cnt);
-
- if ((most_worn_cnt > least_worn_cnt) &&
- (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
- go_on = move_blks_for_wear_leveling(chang_flag,
- &least_worn_idx, &replaced_blks, &result);
- else
- go_on = FAIL;
- }
-
- return result;
-}
-
-#if CMD_DMA
-static int do_garbage_collection(u32 discard_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 pba;
- u8 bt_block_erased = 0;
- int i, cnt, ret = FAIL;
- u64 addr;
-
- i = 0;
- while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
- ((ftl_cmd_cnt + 28) < 256)) {
- if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[i] & DISCARD_BLOCK)) {
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
- pba = BLK_FROM_ADDR(addr);
-
- for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
- if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase BT block %u\n",
- (unsigned int)pba);
- discard_cnt--;
- i++;
- bt_block_erased = 1;
- break;
- }
- }
-
- if (bt_block_erased) {
- bt_block_erased = 0;
- continue;
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
-
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[i] &= (u32)(~DISCARD_BLOCK);
- pbt[i] |= (u32)(SPARE_BLOCK);
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt - 1;
- p_BTableChangesDelta->BT_Index = i;
- p_BTableChangesDelta->BT_Entry_Value = pbt[i];
- p_BTableChangesDelta->ValidFields = 0x0C;
- discard_cnt--;
- ret = PASS;
- } else {
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-
- i++;
- }
-
- return ret;
-}
-
-#else
-static int do_garbage_collection(u32 discard_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 pba;
- u8 bt_block_erased = 0;
- int i, cnt, ret = FAIL;
- u64 addr;
-
- i = 0;
- while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
- if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[i] & DISCARD_BLOCK)) {
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
- pba = BLK_FROM_ADDR(addr);
-
- for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
- if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase BT block %d\n",
- pba);
- discard_cnt--;
- i++;
- bt_block_erased = 1;
- break;
- }
- }
-
- if (bt_block_erased) {
- bt_block_erased = 0;
- continue;
- }
-
- /* If the discard block is L2 cache block, then just skip it */
- for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
- if (cache_l2.blk_array[cnt] == pba) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase L2 cache blk %d\n",
- pba);
- break;
- }
- }
- if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
- discard_cnt--;
- i++;
- continue;
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
-
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[i] &= (u32)(~DISCARD_BLOCK);
- pbt[i] |= (u32)(SPARE_BLOCK);
- discard_cnt--;
- ret = PASS;
- } else {
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-
- i++;
- }
-
- return ret;
-}
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Garbage_Collection
-* Inputs: none
-* Outputs: PASS / FAIL (returns the number of un-erased blocks
-* Description: search the block table for all discarded blocks to erase
-* for each discarded block:
-* set the flash block to IN_PROGRESS
-* erase the block
-* update the block table
-* write the block table to flash
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Garbage_Collection(void)
-{
- u32 i;
- u32 wDiscard = 0;
- int wResult = FAIL;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (GC_Called) {
- printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
- "has been re-entered! Exit.\n");
- return PASS;
- }
-
- GC_Called = 1;
-
- GLOB_FTL_BT_Garbage_Collection();
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_DISCARDED_BLOCK(i))
- wDiscard++;
- }
-
- if (wDiscard <= 0) {
- GC_Called = 0;
- return wResult;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Found %d discarded blocks\n", wDiscard);
-
- FTL_Write_Block_Table(FAIL);
-
- wResult = do_garbage_collection(wDiscard);
-
- FTL_Write_Block_Table(FAIL);
-
- GC_Called = 0;
-
- return wResult;
-}
-
-
-#if CMD_DMA
-static int do_bt_garbage_collection(void)
-{
- u32 pba, lba;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
- u64 addr;
- int i, ret = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (BT_GC_Called)
- return PASS;
-
- BT_GC_Called = 1;
-
- for (i = last_erased; (i <= LAST_BT_ID) &&
- (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
- FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
- ((ftl_cmd_cnt + 28)) < 256; i++) {
- pba = pBTBlocksNode[i - FIRST_BT_ID];
- lba = FTL_Get_Block_Index(pba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection: pba %d, lba %d\n",
- pba, lba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Entry: %d", pbt[lba]);
-
- if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[lba] & DISCARD_BLOCK)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection_cdma: "
- "Erasing Block tables present in block %d\n",
- pba);
- addr = FTL_Get_Physical_Block_Addr((u64)lba *
- DeviceInfo.wBlockDataSize);
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[lba] &= (u32)(~DISCARD_BLOCK);
- pbt[lba] |= (u32)(SPARE_BLOCK);
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt - 1;
- p_BTableChangesDelta->BT_Index = lba;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[lba];
-
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- ret = PASS;
- pBTBlocksNode[last_erased - FIRST_BT_ID] =
- BTBLOCK_INVAL;
- nand_dbg_print(NAND_DBG_DEBUG,
- "resetting bt entry at index %d "
- "value %d\n", i,
- pBTBlocksNode[i - FIRST_BT_ID]);
- if (last_erased == LAST_BT_ID)
- last_erased = FIRST_BT_ID;
- else
- last_erased++;
- } else {
- MARK_BLOCK_AS_BAD(pbt[lba]);
- }
- }
- }
-
- BT_GC_Called = 0;
-
- return ret;
-}
-
-#else
-static int do_bt_garbage_collection(void)
-{
- u32 pba, lba;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
- u64 addr;
- int i, ret = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (BT_GC_Called)
- return PASS;
-
- BT_GC_Called = 1;
-
- for (i = last_erased; (i <= LAST_BT_ID) &&
- (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
- FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
- pba = pBTBlocksNode[i - FIRST_BT_ID];
- lba = FTL_Get_Block_Index(pba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
- pba, lba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Entry: %d", pbt[lba]);
-
- if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[lba] & DISCARD_BLOCK)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection: "
- "Erasing Block tables present in block %d\n",
- pba);
- addr = FTL_Get_Physical_Block_Addr((u64)lba *
- DeviceInfo.wBlockDataSize);
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[lba] &= (u32)(~DISCARD_BLOCK);
- pbt[lba] |= (u32)(SPARE_BLOCK);
- ret = PASS;
- pBTBlocksNode[last_erased - FIRST_BT_ID] =
- BTBLOCK_INVAL;
- nand_dbg_print(NAND_DBG_DEBUG,
- "resetting bt entry at index %d "
- "value %d\n", i,
- pBTBlocksNode[i - FIRST_BT_ID]);
- if (last_erased == LAST_BT_ID)
- last_erased = FIRST_BT_ID;
- else
- last_erased++;
- } else {
- MARK_BLOCK_AS_BAD(pbt[lba]);
- }
- }
- }
-
- BT_GC_Called = 0;
-
- return ret;
-}
-
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_BT_Garbage_Collection
-* Inputs: none
-* Outputs: PASS / FAIL (returns the number of un-erased blocks
-* Description: Erases discarded blocks containing Block table
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_BT_Garbage_Collection(void)
-{
- return do_bt_garbage_collection();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_OneBlock
-* Inputs: Block number 1
-* Block number 2
-* Outputs: Replaced Block Number
-* Description: Interchange block table entries at wBlockNum and wReplaceNum
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
-{
- u32 tmp_blk;
- u32 replace_node = BAD_BLOCK;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (rep_blk != BAD_BLOCK) {
- if (IS_BAD_BLOCK(blk))
- tmp_blk = pbt[blk];
- else
- tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
-
- replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
- pbt[blk] = replace_node;
- pbt[rep_blk] = tmp_blk;
-
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
-
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = rep_blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- }
-
- return replace_node;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_Block_Table_Data
-* Inputs: Block table size in pages
-* Outputs: PASS=0 / FAIL=1
-* Description: Write block table data in flash
-* If first page and last page
-* Write data+BT flag
-* else
-* Write data
-* BT flag is a counter. Its value is incremented for block table
-* write in a new Block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Write_Block_Table_Data(void)
-{
- u64 dwBlockTableAddr, pTempAddr;
- u32 Block;
- u16 Page, PageCount;
- u8 *tempBuf = tmp_buf_write_blk_table_data;
- int wBytesCopied;
- u16 bt_pages;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dwBlockTableAddr =
- (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
- (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
- pTempAddr = dwBlockTableAddr;
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
- "page= %d BlockTableIndex= %d "
- "BlockTableOffset=%d\n", bt_pages,
- g_wBlockTableIndex, g_wBlockTableOffset);
-
- Block = BLK_FROM_ADDR(pTempAddr);
- Page = PAGE_FROM_ADDR(pTempAddr, Block);
- PageCount = 1;
-
- if (bt_block_changed) {
- if (bt_flag == LAST_BT_ID) {
- bt_flag = FIRST_BT_ID;
- g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
- } else if (bt_flag < LAST_BT_ID) {
- bt_flag++;
- g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
- }
-
- if ((bt_flag > (LAST_BT_ID-4)) &&
- g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
- BTBLOCK_INVAL) {
- bt_block_changed = 0;
- GLOB_FTL_BT_Garbage_Collection();
- }
-
- bt_block_changed = 0;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Counter is %u Block %u\n",
- bt_flag, (unsigned int)Block);
- }
-
- memset(tempBuf, 0, 3);
- tempBuf[3] = bt_flag;
- wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
- DeviceInfo.wPageDataSize - 4, 0);
- memset(&tempBuf[wBytesCopied + 4], 0xff,
- DeviceInfo.wPageSize - (wBytesCopied + 4));
- FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
- bt_flag);
-
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- DeviceInfo.wPageSize * sizeof(u8));
- nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
- "Block %u Page %u\n", (unsigned int)Block, Page);
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
- Block, Page, 1,
- LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-
- ftl_cmd_cnt++;
- g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-#endif
-
- if (bt_pages > 1) {
- PageCount = bt_pages - 1;
- if (PageCount > 1) {
- wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
- DeviceInfo.wPageDataSize * (PageCount - 1),
- wBytesCopied);
-
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- (PageCount - 1) * DeviceInfo.wPageDataSize);
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
- g_pNextBlockTable, Block, Page + 1,
- PageCount - 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)Block);
- goto func_return;
- }
-
- ftl_cmd_cnt++;
- g_pNextBlockTable += (PageCount - 1) *
- DeviceInfo.wPageDataSize * sizeof(u8);
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
- Block, Page + 1, PageCount - 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)Block);
- goto func_return;
- }
-#endif
- }
-
- wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
- DeviceInfo.wPageDataSize, wBytesCopied);
- memset(&tempBuf[wBytesCopied], 0xff,
- DeviceInfo.wPageSize-wBytesCopied);
- FTL_Insert_Block_Table_Signature(
- &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- DeviceInfo.wPageSize * sizeof(u8));
- nand_dbg_print(NAND_DBG_DEBUG,
- "Writing the last Page of Block Table "
- "Block %u Page %u\n",
- (unsigned int)Block, Page + bt_pages - 1);
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
- g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
- LLD_CMD_FLAG_MODE_CDMA |
- LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
- ftl_cmd_cnt++;
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
- Block, Page+bt_pages - 1, 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-#endif
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
-
-func_return:
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_Block_Table
-* Inputs: None
-* Outputs: PASS=0 / FAIL=1
-* Description: Get a new block to write block table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_Block_Table(void)
-{
- u32 blk;
- int gc;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
-
- if ((BAD_BLOCK == blk) && (PASS == gc)) {
- GLOB_FTL_Garbage_Collection();
- blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
- }
- if (BAD_BLOCK == blk)
- printk(KERN_ERR "%s, %s: There is no spare block. "
- "It should never happen\n",
- __FILE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
-
- return blk;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_LWBlock
-* Inputs: Block number
-* Pointer to Garbage Collect flag
-* Outputs:
-* Description: Determine the least weared block by traversing
-* block table
-* Set Garbage collection to be called if number of spare
-* block is less than Free Block Gate count
-* Change Block table entry to map least worn block for current
-* operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 wLeastWornCounter = 0xFF;
- u32 wLeastWornIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
- u32 wDiscardBlockNum = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (IS_SPARE_BLOCK(wBlockNum)) {
- *pGarbageCollect = FAIL;
- pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
- p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- return pbt[wBlockNum];
- }
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_DISCARDED_BLOCK(i))
- wDiscardBlockNum++;
-
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
- if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
- printk(KERN_ERR "FTL_Replace_LWBlock: "
- "This should never occur!\n");
- if (g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] <
- wLeastWornCounter) {
- wLeastWornCounter =
- g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wLeastWornIndex = i;
- }
- wSpareBlockNum++;
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN,
- "FTL_Replace_LWBlock: Least Worn Counter %d\n",
- (int)wLeastWornCounter);
-
- if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
- (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
- *pGarbageCollect = PASS;
- else
- *pGarbageCollect = FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
- " Blocks %u\n",
- (unsigned int)wDiscardBlockNum,
- (unsigned int)wSpareBlockNum);
-
- return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_MWBlock
-* Inputs: None
-* Outputs: most worn spare block no./BAD_BLOCK
-* Description: It finds most worn spare block.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_MWBlock(void)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 wMostWornCounter = 0;
- u32 wMostWornIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
- if (g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] >
- wMostWornCounter) {
- wMostWornCounter =
- g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wMostWornIndex = wPhysicalIndex;
- }
- wSpareBlockNum++;
- }
- }
-
- if (wSpareBlockNum <= 2)
- return BAD_BLOCK;
-
- return wMostWornIndex;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_Block
-* Inputs: Block Address
-* Outputs: PASS=0 / FAIL=1
-* Description: If block specified by blk_addr parameter is not free,
-* replace it with the least worn block.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Replace_Block(u64 blk_addr)
-{
- u32 current_blk = BLK_FROM_ADDR(blk_addr);
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
- int GarbageCollect = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (IS_SPARE_BLOCK(current_blk)) {
- pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = current_blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
- p_BTableChangesDelta->ValidFields = 0x0C ;
-#endif
- return wResult;
- }
-
- FTL_Replace_LWBlock(current_blk, &GarbageCollect);
-
- if (PASS == GarbageCollect)
- wResult = GLOB_FTL_Garbage_Collection();
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Is_BadBlock
-* Inputs: block number to test
-* Outputs: PASS (block is BAD) / FAIL (block is not bad)
-* Description: test if this block number is flagged as bad
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (wBlockNum >= DeviceInfo.wSpectraStartBlock
- && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
- return PASS;
- else
- return FAIL;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Flush_Cache
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: flush all the cache blocks to flash
-* if a cache block is not dirty, don't do anything with it
-* else, write the block and update the block table
-* Note: This function should be called at shutdown/power down.
-* to write important data into device
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flush_Cache(void)
-{
- int i, ret;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (SET == Cache.array[i].changed) {
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = i;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[i].address;
- int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-#endif
-#endif
- ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
- if (PASS == ret) {
- Cache.array[i].changed = CLEAR;
- } else {
- printk(KERN_ALERT "Failed when write back to L2 cache!\n");
- /* TODO - How to handle this? */
- }
- }
- }
-
- flush_l2_cache();
-
- return FTL_Write_Block_Table(FAIL);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Page_Read
-* Inputs: pointer to data
-* logical address of data (u64 is LBA * Bytes/Page)
-* Outputs: PASS=0 / FAIL=1
-* Description: reads a page of data into RAM from the cache
-* if the data is not already in cache, read from flash to cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
-{
- u16 cache_item;
- int res = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
- "page_addr: %llu\n", logical_addr);
-
- cache_item = FTL_Cache_If_Hit(logical_addr);
-
- if (UNHIT_CACHE_ITEM == cache_item) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GLOB_FTL_Page_Read: Cache not hit\n");
- res = FTL_Cache_Write();
- if (ERR == FTL_Cache_Read(logical_addr))
- res = ERR;
- cache_item = Cache.LRU;
- }
-
- FTL_Cache_Read_Page(data, logical_addr, cache_item);
-
- return res;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Page_Write
-* Inputs: pointer to data
-* address of data (ADDRESSTYPE is LBA * Bytes/Page)
-* Outputs: PASS=0 / FAIL=1
-* Description: writes a page of data from RAM to the cache
-* if the data is not already in cache, write back the
-* least recently used block and read the addressed block
-* from flash to cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
-{
- u16 cache_blk;
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
- "dwPageAddr: %llu\n", dwPageAddr);
-
- cache_blk = FTL_Cache_If_Hit(dwPageAddr);
-
- if (UNHIT_CACHE_ITEM == cache_blk) {
- wResult = FTL_Cache_Write();
- if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
- wResult = FTL_Replace_Block(dwPageAddr);
- pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
- if (wResult == FAIL)
- return FAIL;
- }
- if (ERR == FTL_Cache_Read(dwPageAddr))
- wResult = ERR;
- cache_blk = Cache.LRU;
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
- } else {
-#if CMD_DMA
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
- LLD_CMD_FLAG_ORDER_BEFORE_REST);
-#else
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
-#endif
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Block_Erase
-* Inputs: address of block to erase (now in byte format, should change to
-* block format)
-* Outputs: PASS=0 / FAIL=1
-* Description: erases the specified block
-* increments the erase count
-* If erase count reaches its upper limit,call function to
-* do the adjustment as per the relative erase count values
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Block_Erase(u64 blk_addr)
-{
- int status;
- u32 BlkIdx;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
-
- if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
- printk(KERN_ERR "GLOB_FTL_Block_Erase: "
- "This should never occur\n");
- return FAIL;
- }
-
-#if CMD_DMA
- status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
- if (status == FAIL)
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, BlkIdx);
-#else
- status = GLOB_LLD_Erase_Block(BlkIdx);
- if (status == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, BlkIdx);
- return status;
- }
-#endif
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-
- g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
-
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index =
- BlkIdx - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-
- if (DeviceInfo.MLCDevice) {
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->RC_Index =
- BlkIdx - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->RC_Entry_Value =
- g_pReadCounter[BlkIdx -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0xC0;
- }
-
- ftl_cmd_cnt++;
-#endif
-
- if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
- FTL_Adjust_Relative_Erase_Count(BlkIdx);
-
- return status;
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Adjust_Relative_Erase_Count
-* Inputs: index to block that was just incremented and is at the max
-* Outputs: PASS=0 / FAIL=1
-* Description: If any erase counts at MAX, adjusts erase count of every
-* block by subtracting least worn
-* counter from counter value of every entry in wear table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
-{
- u8 wLeastWornCounter = MAX_BYTE_VALUE;
- u8 wWearCounter;
- u32 i, wWearIndex;
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_BAD_BLOCK(i))
- continue;
- wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
-
- if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
- printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
- "This should never occur\n");
- wWearCounter = g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock];
- if (wWearCounter < wLeastWornCounter)
- wLeastWornCounter = wWearCounter;
- }
-
- if (wLeastWornCounter == 0) {
- nand_dbg_print(NAND_DBG_WARN,
- "Adjusting Wear Levelling Counters: Special Case\n");
- g_pWearCounter[Index_of_MAX -
- DeviceInfo.wSpectraStartBlock]--;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index =
- Index_of_MAX - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[Index_of_MAX -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-#endif
- FTL_Static_Wear_Leveling();
- } else {
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
- if (!IS_BAD_BLOCK(i)) {
- wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
- g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock] =
- (u8)(g_pWearCounter
- [wWearIndex -
- DeviceInfo.wSpectraStartBlock] -
- wLeastWornCounter);
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index = wWearIndex -
- DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-#endif
- }
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_IN_Progress_Block_Table_Page
-* Inputs: None
-* Outputs: None
-* Description: It writes in-progress flag page to the page next to
-* block table
-***********************************************************************/
-static int FTL_Write_IN_Progress_Block_Table_Page(void)
-{
- int wResult = PASS;
- u16 bt_pages;
- u16 dwIPFPageAddr;
-#if CMD_DMA
-#else
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 wTempBlockTableIndex;
-#endif
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
-
- nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
- "Block %d Page %d\n",
- g_wBlockTableIndex, dwIPFPageAddr);
-
-#if CMD_DMA
- wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
- g_wBlockTableIndex, dwIPFPageAddr, 1,
- LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- g_wBlockTableIndex);
- }
- g_wBlockTableOffset = dwIPFPageAddr + 1;
- p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
- p_BTableChangesDelta->ValidFields = 0x01;
- ftl_cmd_cnt++;
-#else
- wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
- g_wBlockTableIndex, dwIPFPageAddr, 1);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)g_wBlockTableIndex);
- MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
- wTempBlockTableIndex = FTL_Replace_Block_Table();
- bt_block_changed = 1;
- if (BAD_BLOCK == wTempBlockTableIndex)
- return ERR;
- g_wBlockTableIndex = wTempBlockTableIndex;
- g_wBlockTableOffset = 0;
- /* Block table tag is '00'. Means it's used one */
- pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
- return FAIL;
- }
- g_wBlockTableOffset = dwIPFPageAddr + 1;
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Read_Disturbance
-* Inputs: block address
-* Outputs: PASS=0 / FAIL=1
-* Description: used to handle read disturbance. Data in block that
-* reaches its read limit is moved to new block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Read_Disturbance(u32 blk_addr)
-{
- int wResult = FAIL;
- u32 *pbt = (u32 *) g_pBlockTable;
- u32 dwOldBlockAddr = blk_addr;
- u32 wBlockNum;
- u32 i;
- u32 wLeastReadCounter = 0xFFFF;
- u32 wLeastReadIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
- u32 wTempNode;
- u32 wReplacedNode;
- u8 *g_pTempBuf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
- g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
- cp_back_buf_idx++;
- if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
- printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
- "Maybe too many pending commands in your CDMA chain.\n");
- return FAIL;
- }
-#else
- g_pTempBuf = tmp_buf_read_disturbance;
-#endif
-
- wBlockNum = FTL_Get_Block_Index(blk_addr);
-
- do {
- /* This is a bug.Here 'i' should be logical block number
- * and start from 1 (0 is reserved for block table).
- * Have fixed it. - Yunpeng 2008. 12. 19
- */
- for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex =
- (u32)((~SPARE_BLOCK) & pbt[i]);
- if (g_pReadCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] <
- wLeastReadCounter) {
- wLeastReadCounter =
- g_pReadCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wLeastReadIndex = i;
- }
- wSpareBlockNum++;
- }
- }
-
- if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
- wResult = GLOB_FTL_Garbage_Collection();
- if (PASS == wResult)
- continue;
- else
- break;
- } else {
- wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
- wReplacedNode = (u32)((~SPARE_BLOCK) &
- pbt[wLeastReadIndex]);
-#if CMD_DMA
- pbt[wBlockNum] = wReplacedNode;
- pbt[wLeastReadIndex] = wTempNode;
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = wBlockNum;
- p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = wLeastReadIndex;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[wLeastReadIndex];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
- dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
- LLD_CMD_FLAG_MODE_CDMA);
- if (wResult == FAIL)
- return wResult;
-
- ftl_cmd_cnt++;
-
- if (wResult != FAIL) {
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
- g_pTempBuf, pbt[wBlockNum], 0,
- DeviceInfo.wPagesPerBlock)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)pbt[wBlockNum]);
- wResult = FAIL;
- MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
- }
- ftl_cmd_cnt++;
- }
-#else
- wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
- dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
- if (wResult == FAIL)
- return wResult;
-
- if (wResult != FAIL) {
- /* This is a bug. At this time, pbt[wBlockNum]
- is still the physical address of
- discard block, and should not be write.
- Have fixed it as below.
- -- Yunpeng 2008.12.19
- */
- wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
- wReplacedNode, 0,
- DeviceInfo.wPagesPerBlock);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)wReplacedNode);
- MARK_BLOCK_AS_BAD(wReplacedNode);
- } else {
- pbt[wBlockNum] = wReplacedNode;
- pbt[wLeastReadIndex] = wTempNode;
- }
- }
-
- if ((wResult == PASS) && (g_cBlockTableStatus !=
- IN_PROGRESS_BLOCK_TABLE)) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-#endif
- }
- } while (wResult != PASS)
- ;
-
-#if CMD_DMA
- /* ... */
-#endif
-
- return wResult;
-}
-
diff --git a/drivers/staging/spectra/flash.h b/drivers/staging/spectra/flash.h
deleted file mode 100644
index e59cf4ede551..000000000000
--- a/drivers/staging/spectra/flash.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FLASH_INTERFACE_
-#define _FLASH_INTERFACE_
-
-#include "ffsport.h"
-#include "spectraswconfig.h"
-
-#define MAX_BYTE_VALUE 0xFF
-#define MAX_WORD_VALUE 0xFFFF
-#define MAX_U32_VALUE 0xFFFFFFFF
-
-#define MAX_BLOCKNODE_VALUE 0xFFFFFF
-#define DISCARD_BLOCK 0x800000
-#define SPARE_BLOCK 0x400000
-#define BAD_BLOCK 0xC00000
-
-#define UNHIT_CACHE_ITEM 0xFFFF
-
-#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
-
-#define IN_PROGRESS_BLOCK_TABLE 0x00
-#define CURRENT_BLOCK_TABLE 0x01
-
-#define BTSIG_OFFSET (0)
-#define BTSIG_BYTES (5)
-#define BTSIG_DELTA (3)
-
-#define MAX_READ_COUNTER 0x2710
-
-#define FIRST_BT_ID (1)
-#define LAST_BT_ID (254)
-#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
-
-struct device_info_tag {
- u16 wDeviceMaker;
- u16 wDeviceID;
- u32 wDeviceType;
- u32 wSpectraStartBlock;
- u32 wSpectraEndBlock;
- u32 wTotalBlocks;
- u16 wPagesPerBlock;
- u16 wPageSize;
- u16 wPageDataSize;
- u16 wPageSpareSize;
- u16 wNumPageSpareFlag;
- u16 wECCBytesPerSector;
- u32 wBlockSize;
- u32 wBlockDataSize;
- u32 wDataBlockNum;
- u8 bPlaneNum;
- u16 wDeviceMainAreaSize;
- u16 wDeviceSpareAreaSize;
- u16 wDevicesConnected;
- u16 wDeviceWidth;
- u16 wHWRevision;
- u16 wHWFeatures;
-
- u16 wONFIDevFeatures;
- u16 wONFIOptCommands;
- u16 wONFITimingMode;
- u16 wONFIPgmCacheTimingMode;
-
- u16 MLCDevice;
- u16 wSpareSkipBytes;
-
- u8 nBitsInPageNumber;
- u8 nBitsInPageDataSize;
- u8 nBitsInBlockDataSize;
-};
-
-extern struct device_info_tag DeviceInfo;
-
-/* Cache item format */
-struct flash_cache_item_tag {
- u64 address;
- u16 use_cnt;
- u16 changed;
- u8 *buf;
-};
-
-struct flash_cache_tag {
- u32 cache_item_size; /* Size in bytes of each cache item */
- u16 pages_per_item; /* How many NAND pages in each cache item */
- u16 LRU; /* No. of the least recently used cache item */
- struct flash_cache_item_tag array[CACHE_ITEM_NUM];
-};
-
-/*
- *Data structure for each list node of the management table
- * used for the Level 2 Cache. Each node maps one logical NAND block.
- */
-struct spectra_l2_cache_list {
- struct list_head list;
- u32 logical_blk_num; /* Logical block number */
- u32 pages_array[]; /* Page map array of this logical block.
- * Array index is the logical block number,
- * and for every item of this arry:
- * high 16 bit is index of the L2 cache block num,
- * low 16 bit is the phy page num
- * of the above L2 cache block.
- * This array will be kmalloc during run time.
- */
-};
-
-struct spectra_l2_cache_info {
- u32 blk_array[BLK_NUM_FOR_L2_CACHE];
- u16 cur_blk_idx; /* idx to the phy block number of current using */
- u16 cur_page_num; /* pages number of current using */
- struct spectra_l2_cache_list table; /* First node of the table */
-};
-
-#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-struct flash_cache_mod_item_tag {
- u64 address;
- u8 changed;
-};
-
-struct flash_cache_delta_list_tag {
- u8 item; /* used cache item */
- struct flash_cache_mod_item_tag cache;
-};
-#endif
-
-extern struct flash_cache_tag Cache;
-
-extern u8 *buf_read_page_main_spare;
-extern u8 *buf_write_page_main_spare;
-extern u8 *buf_read_page_spare;
-extern u8 *buf_get_bad_block;
-extern u8 *cdma_desc_buf;
-extern u8 *memcp_desc_buf;
-
-/* struture used for IndentfyDevice function */
-struct spectra_indentfy_dev_tag {
- u32 NumBlocks;
- u16 PagesPerBlock;
- u16 PageDataSize;
- u16 wECCBytesPerSector;
- u32 wDataBlockNum;
-};
-
-int GLOB_FTL_Flash_Init(void);
-int GLOB_FTL_Flash_Release(void);
-/*void GLOB_FTL_Erase_Flash(void);*/
-int GLOB_FTL_Block_Erase(u64 block_addr);
-int GLOB_FTL_Is_BadBlock(u32 block_num);
-int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
-int GLOB_FTL_Event_Status(int *);
-u16 glob_ftl_execute_cmds(void);
-
-/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
-int FTL_Read_Disturbance(u32 dwBlockAddr);
-
-/*Flash r/w based on cache*/
-int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
-int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
-int GLOB_FTL_Wear_Leveling(void);
-int GLOB_FTL_Flash_Format(void);
-int GLOB_FTL_Init(void);
-int GLOB_FTL_Flush_Cache(void);
-int GLOB_FTL_Garbage_Collection(void);
-int GLOB_FTL_BT_Garbage_Collection(void);
-void GLOB_FTL_Cache_Release(void);
-u8 *get_blk_table_start_addr(void);
-u8 *get_wear_leveling_table_start_addr(void);
-unsigned long get_blk_table_len(void);
-unsigned long get_wear_leveling_table_len(void);
-
-#if DEBUG_BNDRY
-void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
- char *filename);
-#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
- limit, no, __LINE__, __FILE__)
-#else
-#define debug_boundary_error(chnl, limit, no) ;
-#endif
-
-#endif /*_FLASH_INTERFACE_*/
diff --git a/drivers/staging/spectra/lld.c b/drivers/staging/spectra/lld.c
deleted file mode 100644
index 5c3b9762dc3e..000000000000
--- a/drivers/staging/spectra/lld.c
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "spectraswconfig.h"
-#include "ffsport.h"
-#include "ffsdefs.h"
-#include "lld.h"
-#include "lld_nand.h"
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
-#include "lld_emu.h"
-#include "lld_cdma.h"
-
-/* common functions: */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return emu_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return emu_Read_Device_ID();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return emu_Flash_Release();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return emu_Flash_Init();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return emu_Erase_Block(block_add);
-}
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Read_Page_Main(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return emu_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return emu_Get_Bad_Block(block);
-}
-
-#endif /* FLASH_EMU */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_MTD /* vector all the LLD calls to the LLD_MTD code */
-#include "lld_mtd.h"
-#include "lld_cdma.h"
-
-/* common functions: */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return mtd_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return mtd_Read_Device_ID();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return mtd_Flash_Release();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return mtd_Flash_Init();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return mtd_Erase_Block(block_add);
-}
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Read_Page_Main(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return mtd_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return mtd_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return mtd_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return mtd_Get_Bad_Block(block);
-}
-
-#endif /* FLASH_MTD */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
-#include "lld_nand.h"
-#include "lld_cdma.h"
-#include "flash.h"
-
-/* common functions for LLD_NAND */
-void GLOB_LLD_ECC_Control(int enable)
-{
- NAND_ECC_Ctrl(enable);
-}
-
-/* common functions for LLD_NAND */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return NAND_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return NAND_Read_Device_ID();
-}
-
-u16 GLOB_LLD_UnlockArrayAll(void)
-{
- return NAND_UnlockArrayAll();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return NAND_Flash_Init();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return nand_release_spectra();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return NAND_Erase_Block(block_add);
-}
-
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- if (page_count == 1) /* Using polling to improve read speed */
- return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
- else
- return NAND_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return NAND_Read_Page_Main_Polling(read_data,
- block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 page, u16 page_count)
-{
- return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return NAND_Get_Bad_Block(block);
-}
-
-#if CMD_DMA
-u16 GLOB_LLD_Event_Status(void)
-{
- return CDMA_Event_Status();
-}
-
-u16 glob_lld_execute_cmds(void)
-{
- return CDMA_Execute_CMDs();
-}
-
-u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
- u32 ByteCount, u16 flag)
-{
- /* Replace the hardware memcopy with software memcpy function */
- if (CDMA_Execute_CMDs())
- return FAIL;
- memcpy(dest, src, ByteCount);
- return PASS;
-
- /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
-}
-
-u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
-{
- return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
-}
-
-u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
-{
- return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
-}
-
-u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
- u16 count, u16 flags)
-{
- return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
- u16 count, u16 flags)
-{
- return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
- data, block, page, count, flags);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count)
-{
- return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
- LLD_CMD_FLAG_MODE_CDMA);
-}
-
-#endif /* CMD_DMA */
-#endif /* FLASH_NAND */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-/* end of LLD.c */
diff --git a/drivers/staging/spectra/lld.h b/drivers/staging/spectra/lld.h
deleted file mode 100644
index d3738e0e1fea..000000000000
--- a/drivers/staging/spectra/lld.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-
-
-#ifndef _LLD_
-#define _LLD_
-
-#include "ffsport.h"
-#include "spectraswconfig.h"
-#include "flash.h"
-
-#define GOOD_BLOCK 0
-#define DEFECTIVE_BLOCK 1
-#define READ_ERROR 2
-
-#define CLK_X 5
-#define CLK_MULTI 4
-
-/* Typedefs */
-
-/* prototypes: API for LLD */
-/* Currently, Write_Page_Main
- * MemCopy
- * Read_Page_Main_Spare
- * do not have flag because they were not implemented prior to this
- * They are not being added to keep changes to a minimum for now.
- * Currently, they are not required (only reqd for Wr_P_M_S.)
- * Later on, these NEED to be changed.
- */
-
-extern void GLOB_LLD_ECC_Control(int enable);
-
-extern u16 GLOB_LLD_Flash_Reset(void);
-
-extern u16 GLOB_LLD_Read_Device_ID(void);
-
-extern u16 GLOB_LLD_UnlockArrayAll(void);
-
-extern u16 GLOB_LLD_Flash_Init(void);
-
-extern int GLOB_LLD_Flash_Release(void);
-
-extern u16 GLOB_LLD_Erase_Block(u32 block_add);
-
-extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
-
-extern u16 GLOB_LLD_Event_Status(void);
-
-extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
-
-extern u16 glob_lld_execute_cmds(void);
-
-extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
-
-extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
- u32 block, u16 page, u16 count);
-
-extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
- u32 block, u16 page, u16 count, u16 flags);
-
-extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count, u16 flags);
-
-extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count);
-
-#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
-#define LLD_CMD_FLAG_MODE_CDMA (0x8)
-
-
-#endif /*_LLD_ */
-
-
diff --git a/drivers/staging/spectra/lld_cdma.c b/drivers/staging/spectra/lld_cdma.c
deleted file mode 100644
index c6e76103d43c..000000000000
--- a/drivers/staging/spectra/lld_cdma.c
+++ /dev/null
@@ -1,910 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-#include "spectraswconfig.h"
-#include "lld.h"
-#include "lld_nand.h"
-#include "lld_cdma.h"
-#include "lld_emu.h"
-#include "flash.h"
-#include "nand_regs.h"
-
-#define MAX_PENDING_CMDS 4
-#define MODE_02 (0x2 << 26)
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Data_Cmd
-* Inputs: cmd code (aligned for hw)
-* data: pointer to source or destination
-* block: block address
-* page: page address
-* num: num pages to transfer
-* Outputs: PASS
-* Description: This function takes the parameters and puts them
-* into the "pending commands" array.
-* It does not parse or validate the parameters.
-* The array index is same as the tag.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
-{
- u8 bank;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (0 == cmd)
- nand_dbg_print(NAND_DBG_DEBUG,
- "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
-
- /* If a command of another bank comes, then first execute */
- /* pending commands of the current bank, then set the new */
- /* bank as current bank */
- bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
- if (bank != info.flash_bank) {
- nand_dbg_print(NAND_DBG_WARN,
- "Will access new bank. old bank: %d, new bank: %d\n",
- info.flash_bank, bank);
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- info.flash_bank = bank;
- }
-
- info.pcmds[info.pcmds_num].CMD = cmd;
- info.pcmds[info.pcmds_num].DataAddr = data;
- info.pcmds[info.pcmds_num].Block = block;
- info.pcmds[info.pcmds_num].Page = page;
- info.pcmds[info.pcmds_num].PageCount = num;
- info.pcmds[info.pcmds_num].DataDestAddr = 0;
- info.pcmds[info.pcmds_num].DataSrcAddr = 0;
- info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
- info.pcmds[info.pcmds_num].Flags = flags;
- info.pcmds[info.pcmds_num].Status = 0xB0B;
-
- switch (cmd) {
- case WRITE_MAIN_SPARE_CMD:
- Conv_Main_Spare_Data_Log2Phy_Format(data, num);
- break;
- case WRITE_SPARE_CMD:
- Conv_Spare_Data_Log2Phy_Format(data);
- break;
- default:
- break;
- }
-
- info.pcmds_num++;
-
- if (info.pcmds_num >= MAX_PENDING_CMDS) {
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_MemCopy_CMD
-* Inputs: dest: pointer to destination
-* src: pointer to source
-* count: num bytes to transfer
-* Outputs: PASS
-* Description: This function takes the parameters and puts them
-* into the "pending commands" array.
-* It does not parse or validate the parameters.
-* The array index is same as the tag.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
- info.pcmds[info.pcmds_num].DataAddr = 0;
- info.pcmds[info.pcmds_num].Block = 0;
- info.pcmds[info.pcmds_num].Page = 0;
- info.pcmds[info.pcmds_num].PageCount = 0;
- info.pcmds[info.pcmds_num].DataDestAddr = dest;
- info.pcmds[info.pcmds_num].DataSrcAddr = src;
- info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
- info.pcmds[info.pcmds_num].Flags = flags;
- info.pcmds[info.pcmds_num].Status = 0xB0B;
-
- info.pcmds_num++;
-
- if (info.pcmds_num >= MAX_PENDING_CMDS) {
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- }
-
- return PASS;
-}
-
-#if 0
-/* Prints the PendingCMDs array */
-void print_pending_cmds(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < info.pcmds_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- switch (info.pcmds[i].CMD) {
- case ERASE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Erase Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case WRITE_MAIN_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Write Main Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case WRITE_MAIN_SPARE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Write Main Spare Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case READ_MAIN_SPARE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Main Spare Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case READ_MAIN_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Main Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case MEMCOPY_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Memcopy Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case DUMMY_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Dummy Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- default:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Illegal Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
- (u32)info.pcmds[i].DataAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
- info.pcmds[i].Block);
- nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
- info.pcmds[i].Page);
- nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
- info.pcmds[i].PageCount);
- nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
- (u32)info.pcmds[i].DataDestAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
- (u32)info.pcmds[i].DataSrcAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
- info.pcmds[i].MemCopyByteCnt);
- nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
- info.pcmds[i].Flags);
- nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
- info.pcmds[i].Status);
- }
-}
-
-/* Print the CDMA descriptors */
-void print_cdma_descriptors(void)
-{
- struct cdma_descriptor *pc;
- int i;
-
- pc = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
-
- for (i = 0; i < info.cdma_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- nand_dbg_print(NAND_DBG_DEBUG,
- "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
- pc[i].NxtPointerHi, pc[i].NxtPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
- pc[i].FlashPointerHi, pc[i].FlashPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
- pc[i].CommandType);
- nand_dbg_print(NAND_DBG_DEBUG,
- "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
- pc[i].MemAddrHi, pc[i].MemAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
- pc[i].CommandFlags);
- nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
- pc[i].Channel, pc[i].Status);
- nand_dbg_print(NAND_DBG_DEBUG,
- "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
- pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reserved12: 0x%x, Reserved13: 0x%x, "
- "Reserved14: 0x%x, pcmd: %d\n",
- pc[i].Reserved12, pc[i].Reserved13,
- pc[i].Reserved14, pc[i].pcmd);
- }
-}
-
-/* Print the Memory copy descriptors */
-static void print_memcp_descriptors(void)
-{
- struct memcpy_descriptor *pm;
- int i;
-
- pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
-
- for (i = 0; i < info.cdma_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- nand_dbg_print(NAND_DBG_DEBUG,
- "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
- pm[i].NxtPointerHi, pm[i].NxtPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
- pm[i].SrcAddrHi, pm[i].SrcAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
- pm[i].DestAddrHi, pm[i].DestAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
- pm[i].XferSize);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
- pm[i].MemCopyFlags);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
- pm[i].MemCopyStatus);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
- pm[i].reserved9);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
- pm[i].reserved10);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
- pm[i].reserved11);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
- pm[i].reserved12);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
- pm[i].reserved13);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
- pm[i].reserved14);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
- pm[i].reserved15);
- }
-}
-#endif
-
-/* Reset cdma_descriptor chain to 0 */
-static void reset_cdma_desc(int i)
-{
- struct cdma_descriptor *ptr;
-
- BUG_ON(i >= MAX_DESCS);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- ptr[i].NxtPointerHi = 0;
- ptr[i].NxtPointerLo = 0;
- ptr[i].FlashPointerHi = 0;
- ptr[i].FlashPointerLo = 0;
- ptr[i].CommandType = 0;
- ptr[i].MemAddrHi = 0;
- ptr[i].MemAddrLo = 0;
- ptr[i].CommandFlags = 0;
- ptr[i].Channel = 0;
- ptr[i].Status = 0;
- ptr[i].MemCopyPointerHi = 0;
- ptr[i].MemCopyPointerLo = 0;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_UpdateEventStatus
-* Inputs: none
-* Outputs: none
-* Description: This function update the event status of all the channels
-* when an error condition is reported.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void CDMA_UpdateEventStatus(void)
-{
- int i, j, active_chan;
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (j = 0; j < info.cdma_num; j++) {
- /* Check for the descriptor with failure */
- if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
- break;
-
- }
-
- /* All the previous cmd's status for this channel must be good */
- for (i = 0; i < j; i++) {
- if (ptr[i].pcmd != 0xff)
- info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
- }
-
- /* Abort the channel with type 0 reset command. It resets the */
- /* selected channel after the descriptor completes the flash */
- /* operation and status has been updated for the descriptor. */
- /* Memory Copy and Sync associated with this descriptor will */
- /* not be executed */
- active_chan = ioread32(FlashReg + CHNL_ACTIVE);
- if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
- iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
- iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
- } else { /* Should not reached here */
- printk(KERN_ERR "Error! Used bank is not set in"
- " reg CHNL_ACTIVE\n");
- }
-}
-
-static void cdma_trans(u16 chan)
-{
- u32 addr;
-
- addr = info.cdma_desc;
-
- iowrite32(MODE_10 | (chan << 24), FlashMem);
- iowrite32((1 << 7) | chan, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
- FlashMem);
- iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
- iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24), FlashMem);
- iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
-* for each pending command, start the CDMA engine, and return.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Execute_CMDs(void)
-{
- int i, ret;
- u64 flash_add;
- u32 ptr;
- dma_addr_t map_addr, next_ptr;
- u16 status = PASS;
- u16 tmp_c;
- struct cdma_descriptor *pc;
- struct memcpy_descriptor *pm;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /* No pending cmds to execute, just exit */
- if (0 == info.pcmds_num) {
- nand_dbg_print(NAND_DBG_TRACE,
- "No pending cmds to execute. Just exit.\n");
- return PASS;
- }
-
- for (i = 0; i < MAX_DESCS; i++)
- reset_cdma_desc(i);
-
- pc = (struct cdma_descriptor *)info.cdma_desc_buf;
- pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-
- info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
- info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
- next_ptr = info.cdma_desc;
- info.cdma_num = 0;
-
- for (i = 0; i < info.pcmds_num; i++) {
- if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
- info.pcmds[i].Status = CMD_NOT_DONE;
- continue;
- }
-
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-
- /* Use the Block offset within a bank */
- tmp_c = info.pcmds[i].Block /
- (DeviceInfo.wTotalBlocks / totalUsedBanks);
- flash_add = (u64)(info.pcmds[i].Block - tmp_c *
- (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
- DeviceInfo.wBlockDataSize +
- (u64)(info.pcmds[i].Page) *
- DeviceInfo.wPageDataSize;
-
- ptr = MODE_10 | (info.flash_bank << 24) |
- (u32)GLOB_u64_Div(flash_add,
- DeviceInfo.wPageDataSize);
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-
- if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
- (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
- /* Descriptor to set Main+Spare Access Mode */
- pc[info.cdma_num].CommandType = 0x43;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- reset_cdma_desc(info.cdma_num);
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
- }
-
- switch (info.pcmds[i].CMD) {
- case ERASE_CMD:
- pc[info.cdma_num].CommandType = 1;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- break;
-
- case WRITE_MAIN_CMD:
- pc[info.cdma_num].CommandType =
- 0x2100 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case READ_MAIN_CMD:
- pc[info.cdma_num].CommandType =
- 0x2000 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case WRITE_MAIN_SPARE_CMD:
- pc[info.cdma_num].CommandType =
- 0x2100 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case READ_MAIN_SPARE_CMD:
- pc[info.cdma_num].CommandType =
- 0x2000 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case MEMCOPY_CMD:
- pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
- /* Set bit 11 to let the CDMA engine continue to */
- /* execute only after it has finished processing */
- /* the memcopy descriptor. */
- /* Also set bit 10 and bit 9 to 1 */
- pc[info.cdma_num].CommandFlags = 0x0E40;
- map_addr = info.memcp_desc + info.cdma_num *
- sizeof(struct memcpy_descriptor);
- pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
- pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
-
- pm[info.cdma_num].NxtPointerHi = 0;
- pm[info.cdma_num].NxtPointerLo = 0;
-
- map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
- pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
- pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
- map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
- pm[info.cdma_num].DestAddrHi = map_addr >> 16;
- pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
-
- pm[info.cdma_num].XferSize =
- info.pcmds[i].MemCopyByteCnt;
- pm[info.cdma_num].MemCopyFlags =
- (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
- pm[info.cdma_num].MemCopyStatus = 0;
- break;
-
- case DUMMY_CMD:
- default:
- pc[info.cdma_num].CommandType = 0XFFFF;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- break;
- }
-
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
- (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
- /* Descriptor to set back Main Area Access Mode */
- reset_cdma_desc(info.cdma_num);
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-
- pc[info.cdma_num].CommandType = 0x42;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
-
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
- }
- }
-
- /* Add a dummy descriptor at end of the CDMA chain */
- reset_cdma_desc(info.cdma_num);
- ptr = MODE_10 | (info.flash_bank << 24);
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
- pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
- /* Set Command Flags for the last CDMA descriptor: */
- /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
- pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- /* Wait for DMA to be enabled before issuing the next command */
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- cdma_trans(info.flash_bank);
-
- ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
- if (!ret)
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = info.ret;
-
- info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
-
- return status;
-}
-
-int is_cdma_interrupt(void)
-{
- u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
- u32 int_en_mask;
- u32 cdma_int_en_mask;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /* Set the global Enable masks for only those interrupts
- * that are supported */
- cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
-
- int_en_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-
- ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
- ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
- ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
- ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
- ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
-
- nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
- "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
- ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
-
- if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
- return 1;
- } else {
- iowrite32(ints_b0, FlashReg + INTR_STATUS0);
- iowrite32(ints_b1, FlashReg + INTR_STATUS1);
- iowrite32(ints_b2, FlashReg + INTR_STATUS2);
- iowrite32(ints_b3, FlashReg + INTR_STATUS3);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Not a NAND controller interrupt! Ignore it.\n");
- return 0;
- }
-}
-
-static void update_event_status(void)
-{
- int i;
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (i = 0; i < info.cdma_num; i++) {
- if (ptr[i].pcmd != 0xff)
- info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
- if ((ptr[i].CommandType == 0x41) ||
- (ptr[i].CommandType == 0x42) ||
- (ptr[i].CommandType == 0x43))
- continue;
-
- switch (info.pcmds[ptr[i].pcmd].CMD) {
- case READ_MAIN_SPARE_CMD:
- Conv_Main_Spare_Data_Phy2Log_Format(
- info.pcmds[ptr[i].pcmd].DataAddr,
- info.pcmds[ptr[i].pcmd].PageCount);
- break;
- case READ_SPARE_CMD:
- Conv_Spare_Data_Phy2Log_Format(
- info.pcmds[ptr[i].pcmd].DataAddr);
- break;
- }
- }
-}
-
-static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
-{
- u16 event = EVENT_NONE;
- u16 err_byte;
- u16 err_page = 0;
- u8 err_sector;
- u8 err_device;
- u16 ecc_correction_info;
- u16 err_address;
- u32 eccSectorSize;
- u8 *err_pos;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- do {
- if (0 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
- else if (1 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
- else if (2 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
- else if (3 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
-
- err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
- err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
- err_sector = ((err_address &
- ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
-
- ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
- err_device = ((ecc_correction_info &
- ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
-
- if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
- event = EVENT_UNCORRECTABLE_DATA_ERROR;
- } else {
- event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
- if (err_byte < ECC_SECTOR_SIZE) {
- err_pos = buf +
- (err_page - page) *
- DeviceInfo.wPageDataSize +
- err_sector * eccSectorSize +
- err_byte *
- DeviceInfo.wDevicesConnected +
- err_device;
- *err_pos ^= ecc_correction_info &
- ERR_CORRECTION_INFO__BYTEMASK;
- }
- }
- } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-
- return event;
-}
-
-static u16 process_ecc_int(u32 c, u16 *p_desc_num)
-{
- struct cdma_descriptor *ptr;
- u16 j;
- int event = EVENT_PASS;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (c != info.flash_bank)
- printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
- info.flash_bank, c);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (j = 0; j < info.cdma_num; j++)
- if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
- break;
-
- *p_desc_num = j; /* Pass the descripter number found here */
-
- if (j >= info.cdma_num) {
- printk(KERN_ERR "Can not find the correct descriptor number "
- "when ecc interrupt triggered!"
- "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
- return EVENT_UNCORRECTABLE_DATA_ERROR;
- }
-
- event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
- info.pcmds[ptr[j].pcmd].Page);
-
- if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
- printk(KERN_ERR "Uncorrectable ECC error!"
- "info.cdma_num: %d, j: %d, "
- "pending cmd CMD: 0x%x, "
- "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
- info.cdma_num, j,
- info.pcmds[ptr[j].pcmd].CMD,
- info.pcmds[ptr[j].pcmd].Block,
- info.pcmds[ptr[j].pcmd].Page,
- info.pcmds[ptr[j].pcmd].PageCount);
-
- if (ptr[j].pcmd != 0xff)
- info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
- CDMA_UpdateEventStatus();
- }
-
- return event;
-}
-
-static void process_prog_erase_fail_int(u16 desc_num)
-{
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- if (ptr[desc_num].pcmd != 0xFF)
- info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
-
- CDMA_UpdateEventStatus();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Event_Status (for use with CMD_DMA)
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function is called after an interrupt has happened
-* It reads the HW status register and ...tbd
-* It returns the appropriate event status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Event_Status(void)
-{
- u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
- DMA_INTR__DESC_COMP_CHANNEL1,
- DMA_INTR__DESC_COMP_CHANNEL2,
- DMA_INTR__DESC_COMP_CHANNEL3};
- u32 cdma_int_status, int_status;
- u32 ecc_enable = 0;
- u16 event = EVENT_PASS;
- u16 cur_desc = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ecc_enable = ioread32(FlashReg + ECC_ENABLE);
-
- while (1) {
- int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
- if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
- event = process_ecc_int(info.flash_bank, &cur_desc);
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + ints_addr[info.flash_bank]);
- if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
- nand_dbg_print(NAND_DBG_WARN,
- "ints_bank0 to ints_bank3: "
- "0x%x, 0x%x, 0x%x, 0x%x, "
- "ints_cdma: 0x%x\n",
- ioread32(FlashReg + INTR_STATUS0),
- ioread32(FlashReg + INTR_STATUS1),
- ioread32(FlashReg + INTR_STATUS2),
- ioread32(FlashReg + INTR_STATUS3),
- ioread32(FlashReg + DMA_INTR));
- break;
- }
- } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
- printk(KERN_ERR "NAND program fail interrupt!\n");
- process_prog_erase_fail_int(cur_desc);
- event = EVENT_PROGRAM_FAILURE;
- break;
- } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
- printk(KERN_ERR "NAND erase fail interrupt!\n");
- process_prog_erase_fail_int(cur_desc);
- event = EVENT_ERASE_FAILURE;
- break;
- } else {
- cdma_int_status = ioread32(FlashReg + DMA_INTR);
- if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
- iowrite32(dma_intr_bit[info.flash_bank],
- FlashReg + DMA_INTR);
- update_event_status();
- event = EVENT_PASS;
- break;
- }
- }
- }
-
- int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
- iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
- cdma_int_status = ioread32(FlashReg + DMA_INTR);
- iowrite32(cdma_int_status, FlashReg + DMA_INTR);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return event;
-}
-
-
-
diff --git a/drivers/staging/spectra/lld_cdma.h b/drivers/staging/spectra/lld_cdma.h
deleted file mode 100644
index 854ea066f0c4..000000000000
--- a/drivers/staging/spectra/lld_cdma.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-/* header for LLD_CDMA.c module */
-
-#ifndef _LLD_CDMA_
-#define _LLD_CDMA_
-
-#include "flash.h"
-
-#define DEBUG_SYNC 1
-
-/*/////////// CDMA specific MACRO definition */
-#define MAX_DESCS (255)
-#define MAX_CHANS (4)
-#define MAX_SYNC_POINTS (16)
-#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
-
-#define CHANNEL_SYNC_MASK (0x000F)
-#define CHANNEL_DMA_MASK (0x00F0)
-#define CHANNEL_ID_MASK (0x0300)
-#define CHANNEL_CONT_MASK (0x4000)
-#define CHANNEL_INTR_MASK (0x8000)
-
-#define CHANNEL_SYNC_OFFSET (0)
-#define CHANNEL_DMA_OFFSET (4)
-#define CHANNEL_ID_OFFSET (8)
-#define CHANNEL_CONT_OFFSET (14)
-#define CHANNEL_INTR_OFFSET (15)
-
-u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
-u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
-u16 CDMA_Execute_CMDs(void);
-void print_pending_cmds(void);
-void print_cdma_descriptors(void);
-
-extern u8 g_SBDCmdIndex;
-extern struct mrst_nand_info info;
-
-
-/*/////////// prototypes: APIs for LLD_CDMA */
-int is_cdma_interrupt(void);
-u16 CDMA_Event_Status(void);
-
-/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
-struct cdma_descriptor {
- u32 NxtPointerHi;
- u32 NxtPointerLo;
- u32 FlashPointerHi;
- u32 FlashPointerLo;
- u32 CommandType;
- u32 MemAddrHi;
- u32 MemAddrLo;
- u32 CommandFlags;
- u32 Channel;
- u32 Status;
- u32 MemCopyPointerHi;
- u32 MemCopyPointerLo;
- u32 Reserved12;
- u32 Reserved13;
- u32 Reserved14;
- u32 pcmd; /* pending cmd num related to this descriptor */
-};
-
-/* This struct holds one MemCopy descriptor as defined by the HW */
-struct memcpy_descriptor {
- u32 NxtPointerHi;
- u32 NxtPointerLo;
- u32 SrcAddrHi;
- u32 SrcAddrLo;
- u32 DestAddrHi;
- u32 DestAddrLo;
- u32 XferSize;
- u32 MemCopyFlags;
- u32 MemCopyStatus;
- u32 reserved9;
- u32 reserved10;
- u32 reserved11;
- u32 reserved12;
- u32 reserved13;
- u32 reserved14;
- u32 reserved15;
-};
-
-/* Pending CMD table entries (includes MemCopy parameters */
-struct pending_cmd {
- u8 CMD;
- u8 *DataAddr;
- u32 Block;
- u16 Page;
- u16 PageCount;
- u8 *DataDestAddr;
- u8 *DataSrcAddr;
- u32 MemCopyByteCnt;
- u16 Flags;
- u16 Status;
-};
-
-#if DEBUG_SYNC
-extern u32 debug_sync_cnt;
-#endif
-
-/* Definitions for CMD DMA descriptor chain fields */
-#define CMD_DMA_DESC_COMP 0x8000
-#define CMD_DMA_DESC_FAIL 0x4000
-
-#endif /*_LLD_CDMA_*/
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
deleted file mode 100644
index 095f2f0c2e5b..000000000000
--- a/drivers/staging/spectra/lld_emu.c
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld_emu.h"
-#include "lld.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-#if FLASH_EMU
-u32 totalUsedBanks;
-u32 valid_banks[MAX_CHANS];
-#endif
-#endif
-
-#define GLOB_LLD_PAGES 64
-#define GLOB_LLD_PAGE_SIZE (512+16)
-#define GLOB_LLD_PAGE_DATA_SIZE 512
-#define GLOB_LLD_BLOCKS 2048
-
-#if FLASH_EMU /* This is for entire module */
-
-static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
-
-/* Read nand emu file and then fill it's content to flash_memory */
-int emu_load_file_to_mem(void)
-{
- mm_segment_t fs;
- struct file *nef_filp = NULL;
- struct inode *inode = NULL;
- loff_t nef_size = 0;
- loff_t tmp_file_offset, file_offset;
- ssize_t nread;
- int i, rc = -EINVAL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- fs = get_fs();
- set_fs(get_ds());
-
- nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(nef_filp)) {
- printk(KERN_ERR "filp_open error: "
- "Unable to open nand emu file!\n");
- return PTR_ERR(nef_filp);
- }
-
- if (nef_filp->f_path.dentry) {
- inode = nef_filp->f_path.dentry->d_inode;
- } else {
- printk(KERN_ERR "Can not get valid inode!\n");
- goto out;
- }
-
- nef_size = i_size_read(inode->i_mapping->host);
- if (nef_size <= 0) {
- printk(KERN_ERR "Invalid nand emu file size: "
- "0x%llx\n", nef_size);
- goto out;
- } else {
- nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
- nef_size);
- }
-
- file_offset = 0;
- for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
- tmp_file_offset = file_offset;
- nread = vfs_read(nef_filp,
- (char __user *)flash_memory[i],
- GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
- if (nread < GLOB_LLD_PAGE_SIZE) {
- printk(KERN_ERR "%s, Line %d - "
- "nand emu file partial read: "
- "%d bytes\n", __FILE__, __LINE__, (int)nread);
- goto out;
- }
- file_offset += GLOB_LLD_PAGE_SIZE;
- }
- rc = 0;
-
-out:
- filp_close(nef_filp, current->files);
- set_fs(fs);
- return rc;
-}
-
-/* Write contents of flash_memory to nand emu file */
-int emu_write_mem_to_file(void)
-{
- mm_segment_t fs;
- struct file *nef_filp = NULL;
- struct inode *inode = NULL;
- loff_t nef_size = 0;
- loff_t tmp_file_offset, file_offset;
- ssize_t nwritten;
- int i, rc = -EINVAL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- fs = get_fs();
- set_fs(get_ds());
-
- nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(nef_filp)) {
- printk(KERN_ERR "filp_open error: "
- "Unable to open nand emu file!\n");
- return PTR_ERR(nef_filp);
- }
-
- if (nef_filp->f_path.dentry) {
- inode = nef_filp->f_path.dentry->d_inode;
- } else {
- printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
- goto out;
- }
-
- nef_size = i_size_read(inode->i_mapping->host);
- if (nef_size <= 0) {
- printk(KERN_ERR "Invalid "
- "nand emu file size: 0x%llx\n", nef_size);
- goto out;
- } else {
- nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
- "%lld\n", nef_size);
- }
-
- file_offset = 0;
- for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
- tmp_file_offset = file_offset;
- nwritten = vfs_write(nef_filp,
- (char __user *)flash_memory[i],
- GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
- if (nwritten < GLOB_LLD_PAGE_SIZE) {
- printk(KERN_ERR "%s, Line %d - "
- "nand emu file partial write: "
- "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
- goto out;
- }
- file_offset += GLOB_LLD_PAGE_SIZE;
- }
- rc = 0;
-
-out:
- filp_close(nef_filp, current->files);
- set_fs(fs);
- return rc;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Creates & initializes the flash RAM array.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Flash_Init(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- flash_memory[0] = vmalloc(GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS *
- GLOB_LLD_PAGES * sizeof(u8));
- if (!flash_memory[0]) {
- printk(KERN_ERR "Fail to allocate memory "
- "for nand emulator!\n");
- return ERR;
- }
-
- memset((char *)(flash_memory[0]), 0xFF,
- GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
- sizeof(u8));
-
- for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
- flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
-
- emu_load_file_to_mem(); /* Load nand emu file to mem */
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Release
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Releases the flash.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int emu_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- emu_write_mem_to_file(); /* Write back mem to nand emu file */
-
- vfree(flash_memory[0]);
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Device_ID
-* Inputs: none
-* Outputs: PASS=1 FAIL=0
-* Description: Reads the info from the controller registers.
-* Sets up DeviceInfo structure with device parameters
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-u16 emu_Read_Device_ID(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- DeviceInfo.wDeviceMaker = 0;
- DeviceInfo.wDeviceType = 8;
- DeviceInfo.wSpectraStartBlock = 36;
- DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
- DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
- DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
- DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
- DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
- DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
- GLOB_LLD_PAGE_DATA_SIZE;
- DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
- DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
- DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock
- + 1);
- DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
-#if CMD_DMA
- totalUsedBanks = 4;
- valid_banks[0] = 1;
- valid_banks[1] = 1;
- valid_banks[2] = 1;
- valid_banks[3] = 1;
-#endif
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Reset
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Reset the flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Flash_Reset(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Erase_Block
-* Inputs: Address
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Erase a block
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Erase_Block(u32 block_add)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (block_add >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "emu_Erase_Block error! "
- "Too big block address: %d\n", block_add);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
- (int)block_add);
-
- for (i = block_add * GLOB_LLD_PAGES;
- i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
- if (flash_memory[i]) {
- memset((u8 *)(flash_memory[i]), 0xFF,
- DeviceInfo.wPageSize * sizeof(u8));
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Main
-* Inputs: Write buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the data in the buffer to main area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory\n");
- return FAIL;
- }
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
- write_data, DeviceInfo.wPageDataSize);
- write_data += DeviceInfo.wPageDataSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Main
-* Inputs: Read buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read the data from the flash main area to the buffer
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
- } else {
- memcpy(read_data,
- (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
- + Page]),
- DeviceInfo.wPageDataSize);
- }
- read_data += DeviceInfo.wPageDataSize;
- Page++;
- }
-
- return PASS;
-}
-
-#ifndef ELDORA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Main_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read from flash main+spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)PageCount,
- (unsigned int)Block, (unsigned int)Page);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(read_data, 0xFF, DeviceInfo.wPageSize);
- } else {
- memcpy(read_data, (u8 *) (flash_memory[Block *
- GLOB_LLD_PAGES
- + Page]),
- DeviceInfo.wPageSize);
- }
-
- read_data += DeviceInfo.wPageSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Main_Spare
-* Inputs: Write buffer
-* address
-* buffer length
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer to main+spare area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 page_count)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + page_count > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)page_count,
- (unsigned int)Block, (unsigned int)Page);
-
- for (i = 0; i < page_count; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory!\n");
- return FAIL;
- }
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
- write_data, DeviceInfo.wPageSize);
- write_data += DeviceInfo.wPageSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Spare
-* Inputs: Write buffer
-* Address
-* buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer in the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare Error: "
- "Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare Error: "
- "Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
- "block %u page %u\n",
- (unsigned int)Block, (unsigned int)Page);
-
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory!\n");
- return FAIL;
- }
-
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
- DeviceInfo.wPageDataSize), write_data,
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read data from the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
- "block %u page %u\n",
- (unsigned int)Block, (unsigned int)Page);
-
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(write_data, 0xFF,
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
- } else {
- memcpy(write_data,
- (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
- + DeviceInfo.wPageDataSize),
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Enable_Disable_Interrupts
-* Inputs: enable or disable
-* Outputs: none
-* Description: NOP
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-}
-
-u16 emu_Get_Bad_Block(u32 block)
-{
- return 0;
-}
-
-#if CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Support for CDMA functions
-************************************
-* emu_CDMA_Flash_Init
-* CDMA_process_data command (use LLD_CDMA)
-* CDMA_MemCopy_CMD (use LLD_CDMA)
-* emu_CDMA_execute all commands
-* emu_CDMA_Event_Status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Flash_Init(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = 3;
- }
-
- return PASS;
-}
-
-static void emu_isr(int irq, void *dev_id)
-{
- /* TODO: ... */
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: execute each command in the pending CMD array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Execute_CMDs(u16 tag_count)
-{
- u16 i, j;
- u8 CMD; /* cmd parameter */
- u8 *data;
- u32 block;
- u16 page;
- u16 count;
- u16 status = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
- "Tag Count %u\n", tag_count);
-
- for (i = 0; i < totalUsedBanks; i++) {
- PendingCMD[i].CMD = DUMMY_CMD;
- PendingCMD[i].Tag = 0xFF;
- PendingCMD[i].Block =
- (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-
- for (j = 0; j <= MAX_CHANS; j++)
- PendingCMD[i].ChanSync[j] = 0;
- }
-
- CDMA_Execute_CMDs(tag_count);
-
- print_pending_cmds(tag_count);
-
-#if DEBUG_SYNC
- }
- debug_sync_cnt++;
-#endif
-
- for (i = MAX_CHANS;
- i < tag_count + MAX_CHANS; i++) {
- CMD = PendingCMD[i].CMD;
- data = PendingCMD[i].DataAddr;
- block = PendingCMD[i].Block;
- page = PendingCMD[i].Page;
- count = PendingCMD[i].PageCount;
-
- switch (CMD) {
- case ERASE_CMD:
- emu_Erase_Block(block);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_CMD:
- emu_Write_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_SPARE_CMD:
- emu_Write_Page_Main_Spare(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case READ_MAIN_CMD:
- emu_Read_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case MEMCOPY_CMD:
- memcpy(PendingCMD[i].DataDestAddr,
- PendingCMD[i].DataSrcAddr,
- PendingCMD[i].MemCopyByteCnt);
- case DUMMY_CMD:
- PendingCMD[i].Status = PASS;
- break;
- default:
- PendingCMD[i].Status = FAIL;
- break;
- }
- }
-
- /*
- * Temperory adding code to reset PendingCMD array for basic testing.
- * It should be done at the end of event status function.
- */
- for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = CMD_NOT_DONE;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-
- emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Event_Status
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function can also be used to force errors
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Event_Status(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return EVENT_PASS;
-}
-
-#endif /* CMD_DMA */
-#endif /* !ELDORA */
-#endif /* FLASH_EMU */
diff --git a/drivers/staging/spectra/lld_emu.h b/drivers/staging/spectra/lld_emu.h
deleted file mode 100644
index 63f84c38d3c1..000000000000
--- a/drivers/staging/spectra/lld_emu.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_EMU_
-#define _LLD_EMU_
-
-#include "ffsport.h"
-#include "ffsdefs.h"
-
-/* prototypes: emulator API functions */
-extern u16 emu_Flash_Reset(void);
-extern u16 emu_Flash_Init(void);
-extern int emu_Flash_Release(void);
-extern u16 emu_Read_Device_ID(void);
-extern u16 emu_Erase_Block(u32 block_addr);
-extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 emu_Event_Status(void);
-extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
-extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 emu_Get_Bad_Block(u32 block);
-
-u16 emu_CDMA_Flash_Init(void);
-u16 emu_CDMA_Execute_CMDs(u16 tag_count);
-u16 emu_CDMA_Event_Status(void);
-#endif /*_LLD_EMU_*/
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
deleted file mode 100644
index a9c309a167c2..000000000000
--- a/drivers/staging/spectra/lld_mtd.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld_emu.h"
-#include "lld.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-u32 totalUsedBanks;
-u32 valid_banks[MAX_CHANS];
-#endif
-
-#define GLOB_LLD_PAGES 64
-#define GLOB_LLD_PAGE_SIZE (512+16)
-#define GLOB_LLD_PAGE_DATA_SIZE 512
-#define GLOB_LLD_BLOCKS 2048
-
-static struct mtd_info *spectra_mtd;
-static int mtddev = -1;
-module_param(mtddev, int, 0);
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Creates & initializes the flash RAM array.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Flash_Init(void)
-{
- if (mtddev == -1) {
- printk(KERN_ERR "No MTD device specified. Give mtddev parameter\n");
- return FAIL;
- }
-
- spectra_mtd = get_mtd_device(NULL, mtddev);
- if (!spectra_mtd) {
- printk(KERN_ERR "Failed to obtain MTD device #%d\n", mtddev);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Release
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Releases the flash.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int mtd_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- if (!spectra_mtd)
- return PASS;
-
- put_mtd_device(spectra_mtd);
- spectra_mtd = NULL;
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Device_ID
-* Inputs: none
-* Outputs: PASS=1 FAIL=0
-* Description: Reads the info from the controller registers.
-* Sets up DeviceInfo structure with device parameters
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-u16 mtd_Read_Device_ID(void)
-{
- uint64_t tmp;
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!spectra_mtd)
- return FAIL;
-
- DeviceInfo.wDeviceMaker = 0;
- DeviceInfo.wDeviceType = 8;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- tmp = spectra_mtd->size;
- do_div(tmp, spectra_mtd->erasesize);
- DeviceInfo.wTotalBlocks = tmp;
- DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wPagesPerBlock = spectra_mtd->erasesize / spectra_mtd->writesize;
- DeviceInfo.wPageSize = spectra_mtd->writesize + spectra_mtd->oobsize;
- DeviceInfo.wPageDataSize = spectra_mtd->writesize;
- DeviceInfo.wPageSpareSize = spectra_mtd->oobsize;
- DeviceInfo.wBlockSize = DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock
- + 1);
- DeviceInfo.MLCDevice = 0;//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
-#if CMD_DMA
- totalUsedBanks = 4;
- valid_banks[0] = 1;
- valid_banks[1] = 1;
- valid_banks[2] = 1;
- valid_banks[3] = 1;
-#endif
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Reset
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Reset the flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Flash_Reset(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-void erase_callback(struct erase_info *e)
-{
- complete((void *)e->priv);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Erase_Block
-* Inputs: Address
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Erase a block
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Erase_Block(u32 block_add)
-{
- struct erase_info erase;
- DECLARE_COMPLETION_ONSTACK(comp);
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (block_add >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "mtd_Erase_Block error! "
- "Too big block address: %d\n", block_add);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
- (int)block_add);
-
- erase.mtd = spectra_mtd;
- erase.callback = erase_callback;
- erase.addr = block_add * spectra_mtd->erasesize;
- erase.len = spectra_mtd->erasesize;
- erase.priv = (unsigned long)&comp;
-
- ret = spectra_mtd->erase(spectra_mtd, &erase);
- if (!ret) {
- wait_for_completion(&comp);
- if (erase.state != MTD_ERASE_DONE)
- ret = -EIO;
- }
- if (ret) {
- printk(KERN_WARNING "mtd_Erase_Block error! "
- "erase of region [0x%llx, 0x%llx] failed\n",
- erase.addr, erase.len);
- return FAIL;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Main
-* Inputs: Write buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the data in the buffer to main area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- size_t retlen;
- int ret = 0;
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "mtd_Write_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
-
- while (PageCount) {
- ret = spectra_mtd->write(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- DeviceInfo.wPageDataSize, &retlen, write_data);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- write_data += DeviceInfo.wPageDataSize;
- Page++;
- PageCount--;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Main
-* Inputs: Read buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read the data from the flash main area to the buffer
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Main(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- size_t retlen;
- int ret = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "mtd_Read_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
-
- while (PageCount) {
- ret = spectra_mtd->read(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- DeviceInfo.wPageDataSize, &retlen, read_data);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- read_data += DeviceInfo.wPageDataSize;
- Page++;
- PageCount--;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-#ifndef ELDORA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Main_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read from flash main+spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Page number %d+%d too big in block %d\n",
- Page, PageCount, Block);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)PageCount,
- (unsigned int)Block, (unsigned int)Page);
-
-
- while (PageCount) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = read_data;
- ops.len = DeviceInfo.wPageDataSize;
- ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->read_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- read_data += DeviceInfo.wPageSize;
- Page++;
- PageCount--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Main_Spare
-* Inputs: Write buffer
-* address
-* buffer length
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer to main+spare area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 page_count)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + page_count > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Page number %d+%d too big in block %d\n",
- Page, page_count, Block);
- WARN_ON(1);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)page_count,
- (unsigned int)Block, (unsigned int)Page);
-
- while (page_count) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = write_data;
- ops.len = DeviceInfo.wPageDataSize;
- ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->write_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- write_data += DeviceInfo.wPageSize;
- Page++;
- page_count--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Spare
-* Inputs: Write buffer
-* Address
-* buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer in the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- WARN_ON(1);
- return FAIL;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read data from the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
- "block %u page %u (%u pages)\n",
- (unsigned int)Block, (unsigned int)Page, PageCount);
-
- while (PageCount) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = NULL;
- ops.len = 0;
- ops.oobbuf = read_data;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->read_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
-
- read_data += DeviceInfo.wPageSize;
- Page++;
- PageCount--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Enable_Disable_Interrupts
-* Inputs: enable or disable
-* Outputs: none
-* Description: NOP
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-}
-
-u16 mtd_Get_Bad_Block(u32 block)
-{
- return 0;
-}
-
-#if CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Support for CDMA functions
-************************************
-* mtd_CDMA_Flash_Init
-* CDMA_process_data command (use LLD_CDMA)
-* CDMA_MemCopy_CMD (use LLD_CDMA)
-* mtd_CDMA_execute all commands
-* mtd_CDMA_Event_Status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Flash_Init(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = 3;
- }
-
- return PASS;
-}
-
-static void mtd_isr(int irq, void *dev_id)
-{
- /* TODO: ... */
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: execute each command in the pending CMD array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Execute_CMDs(u16 tag_count)
-{
- u16 i, j;
- u8 CMD; /* cmd parameter */
- u8 *data;
- u32 block;
- u16 page;
- u16 count;
- u16 status = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
- "Tag Count %u\n", tag_count);
-
- for (i = 0; i < totalUsedBanks; i++) {
- PendingCMD[i].CMD = DUMMY_CMD;
- PendingCMD[i].Tag = 0xFF;
- PendingCMD[i].Block =
- (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-
- for (j = 0; j <= MAX_CHANS; j++)
- PendingCMD[i].ChanSync[j] = 0;
- }
-
- CDMA_Execute_CMDs(tag_count);
-
-#ifdef VERBOSE
- print_pending_cmds(tag_count);
-#endif
-#if DEBUG_SYNC
- }
- debug_sync_cnt++;
-#endif
-
- for (i = MAX_CHANS;
- i < tag_count + MAX_CHANS; i++) {
- CMD = PendingCMD[i].CMD;
- data = PendingCMD[i].DataAddr;
- block = PendingCMD[i].Block;
- page = PendingCMD[i].Page;
- count = PendingCMD[i].PageCount;
-
- switch (CMD) {
- case ERASE_CMD:
- mtd_Erase_Block(block);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_CMD:
- mtd_Write_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_SPARE_CMD:
- mtd_Write_Page_Main_Spare(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case READ_MAIN_CMD:
- mtd_Read_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case MEMCOPY_CMD:
- memcpy(PendingCMD[i].DataDestAddr,
- PendingCMD[i].DataSrcAddr,
- PendingCMD[i].MemCopyByteCnt);
- case DUMMY_CMD:
- PendingCMD[i].Status = PASS;
- break;
- default:
- PendingCMD[i].Status = FAIL;
- break;
- }
- }
-
- /*
- * Temperory adding code to reset PendingCMD array for basic testing.
- * It should be done at the end of event status function.
- */
- for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = CMD_NOT_DONE;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-
- mtd_isr(0, 0); /* This is a null isr now. Need fill it in future */
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Event_Status
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function can also be used to force errors
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Event_Status(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return EVENT_PASS;
-}
-
-#endif /* CMD_DMA */
-#endif /* !ELDORA */
diff --git a/drivers/staging/spectra/lld_mtd.h b/drivers/staging/spectra/lld_mtd.h
deleted file mode 100644
index 4e81ee87b53d..000000000000
--- a/drivers/staging/spectra/lld_mtd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_MTD_
-#define _LLD_MTD_
-
-#include "ffsport.h"
-#include "ffsdefs.h"
-
-/* prototypes: MTD API functions */
-extern u16 mtd_Flash_Reset(void);
-extern u16 mtd_Flash_Init(void);
-extern int mtd_Flash_Release(void);
-extern u16 mtd_Read_Device_ID(void);
-extern u16 mtd_Erase_Block(u32 block_addr);
-extern u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 mtd_Event_Status(void);
-extern void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE);
-extern u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 mtd_Get_Bad_Block(u32 block);
-
-u16 mtd_CDMA_Flash_Init(void);
-u16 mtd_CDMA_Execute_CMDs(u16 tag_count);
-u16 mtd_CDMA_Event_Status(void);
-#endif /*_LLD_MTD_*/
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
deleted file mode 100644
index 60a14ff26c7f..000000000000
--- a/drivers/staging/spectra/lld_nand.c
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "lld.h"
-#include "lld_nand.h"
-#include "lld_cdma.h"
-
-#include "spectraswconfig.h"
-#include "flash.h"
-#include "ffsdefs.h"
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include "nand_regs.h"
-
-#define SPECTRA_NAND_NAME "nd"
-
-#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
-#define MAX_PAGES_PER_RW 128
-
-#define INT_IDLE_STATE 0
-#define INT_READ_PAGE_MAIN 0x01
-#define INT_WRITE_PAGE_MAIN 0x02
-#define INT_PIPELINE_READ_AHEAD 0x04
-#define INT_PIPELINE_WRITE_AHEAD 0x08
-#define INT_MULTI_PLANE_READ 0x10
-#define INT_MULTI_PLANE_WRITE 0x11
-
-static u32 enable_ecc;
-
-struct mrst_nand_info info;
-
-int totalUsedBanks;
-u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-
-void __iomem *FlashReg;
-void __iomem *FlashMem;
-
-u16 conf_parameters[] = {
- 0x0000,
- 0x0000,
- 0x01F4,
- 0x01F4,
- 0x01F4,
- 0x01F4,
- 0x0000,
- 0x0000,
- 0x0001,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0040,
- 0x0001,
- 0x000A,
- 0x000A,
- 0x000A,
- 0x0000,
- 0x0000,
- 0x0005,
- 0x0012,
- 0x000C
-};
-
-u16 NAND_Get_Bad_Block(u32 block)
-{
- u32 status = PASS;
- u32 flag_bytes = 0;
- u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
- u32 page, i;
- u8 *pReadSpareBuf = buf_get_bad_block;
-
- if (enable_ecc)
- flag_bytes = DeviceInfo.wNumPageSpareFlag;
-
- for (page = 0; page < 2; page++) {
- status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
- if (status != PASS)
- return READ_ERROR;
- for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
- if (pReadSpareBuf[i] != 0xff)
- return DEFECTIVE_BLOCK;
- }
-
- for (page = 1; page < 3; page++) {
- status = NAND_Read_Page_Spare(pReadSpareBuf, block,
- DeviceInfo.wPagesPerBlock - page , 1);
- if (status != PASS)
- return READ_ERROR;
- for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
- if (pReadSpareBuf[i] != 0xff)
- return DEFECTIVE_BLOCK;
- }
-
- return GOOD_BLOCK;
-}
-
-
-u16 NAND_Flash_Reset(void)
-{
- u32 i;
- u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
- u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
- u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
- FlashReg + intr_status[i]);
-
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
- while (!(ioread32(FlashReg + intr_status[i]) &
- (intr_status_rst_comp[i] | intr_status_time_out[i])))
- ;
- if (ioread32(FlashReg + intr_status[i]) &
- intr_status_time_out[i])
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Reset operation timed out on bank %d\n", i);
- }
-
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
- FlashReg + intr_status[i]);
-
- return PASS;
-}
-
-static void NAND_ONFi_Timing_Mode(u16 mode)
-{
- u16 Trea[6] = {40, 30, 25, 20, 20, 16};
- u16 Trp[6] = {50, 25, 17, 15, 12, 10};
- u16 Treh[6] = {30, 15, 15, 10, 10, 7};
- u16 Trc[6] = {100, 50, 35, 30, 25, 20};
- u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
- u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
- u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
- u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
- u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
- u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
- u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
- u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
-
- u16 TclsRising = 1;
- u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
- u16 dv_window = 0;
- u16 en_lo, en_hi;
- u16 acc_clks;
- u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- en_lo = CEIL_DIV(Trp[mode], CLK_X);
- en_hi = CEIL_DIV(Treh[mode], CLK_X);
-
-#if ONFI_BLOOM_TIME
- if ((en_hi * CLK_X) < (Treh[mode] + 2))
- en_hi++;
-#endif
-
- if ((en_lo + en_hi) * CLK_X < Trc[mode])
- en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
-
- if ((en_lo + en_hi) < CLK_MULTI)
- en_lo += CLK_MULTI - en_lo - en_hi;
-
- while (dv_window < 8) {
- data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
-
- data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
-
- data_invalid =
- data_invalid_rhoh <
- data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
-
- dv_window = data_invalid - Trea[mode];
-
- if (dv_window < 8)
- en_lo++;
- }
-
- acc_clks = CEIL_DIV(Trea[mode], CLK_X);
-
- while (((acc_clks * CLK_X) - Trea[mode]) < 3)
- acc_clks++;
-
- if ((data_invalid - acc_clks * CLK_X) < 2)
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
- __FILE__, __LINE__);
-
- addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
- re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
- re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
- we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
- cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
- if (!TclsRising)
- cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
- if (cs_cnt == 0)
- cs_cnt = 1;
-
- if (Tcea[mode]) {
- while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
- cs_cnt++;
- }
-
-#if MODE5_WORKAROUND
- if (mode == 5)
- acc_clks = 5;
-#endif
-
- /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
- if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
- (ioread32(FlashReg + DEVICE_ID) == 0x88))
- acc_clks = 6;
-
- iowrite32(acc_clks, FlashReg + ACC_CLKS);
- iowrite32(re_2_we, FlashReg + RE_2_WE);
- iowrite32(re_2_re, FlashReg + RE_2_RE);
- iowrite32(we_2_re, FlashReg + WE_2_RE);
- iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
- iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
- iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
- iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
-}
-
-static void index_addr(u32 address, u32 data)
-{
- iowrite32(address, FlashMem);
- iowrite32(data, FlashMem + 0x10);
-}
-
-static void index_addr_read_data(u32 address, u32 *pdata)
-{
- iowrite32(address, FlashMem);
- *pdata = ioread32(FlashMem + 0x10);
-}
-
-static void set_ecc_config(void)
-{
-#if SUPPORT_8BITECC
- if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
- (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
-
- if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
- == 1) {
- DeviceInfo.wECCBytesPerSector = 4;
- DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
- DeviceInfo.wNumPageSpareFlag =
- DeviceInfo.wPageSpareSize -
- DeviceInfo.wPageDataSize /
- (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
- DeviceInfo.wECCBytesPerSector
- - DeviceInfo.wSpareSkipBytes;
- } else {
- DeviceInfo.wECCBytesPerSector =
- (ioread32(FlashReg + ECC_CORRECTION) &
- ECC_CORRECTION__VALUE) * 13 / 8;
- if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
- DeviceInfo.wECCBytesPerSector += 2;
- else
- DeviceInfo.wECCBytesPerSector += 1;
-
- DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
- DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
- DeviceInfo.wPageDataSize /
- (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
- DeviceInfo.wECCBytesPerSector
- - DeviceInfo.wSpareSkipBytes;
- }
-}
-
-static u16 get_onfi_nand_para(void)
-{
- int i;
- u16 blks_lun_l, blks_lun_h, n_of_luns;
- u32 blockperlun, id;
-
- iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
-
- while (!((ioread32(FlashReg + INTR_STATUS0) &
- INTR_STATUS0__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS0) &
- INTR_STATUS0__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK2,
- FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK3,
- FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS3) &
- INTR_STATUS3__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS3) &
- INTR_STATUS3__TIME_OUT)))
- ;
- } else {
- printk(KERN_ERR "Getting a time out for bank 2!\n");
- }
- } else {
- printk(KERN_ERR "Getting a time out for bank 1!\n");
- }
- }
-
- iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
- iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
- iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
- iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
-
- DeviceInfo.wONFIDevFeatures =
- ioread32(FlashReg + ONFI_DEVICE_FEATURES);
- DeviceInfo.wONFIOptCommands =
- ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
- DeviceInfo.wONFITimingMode =
- ioread32(FlashReg + ONFI_TIMING_MODE);
- DeviceInfo.wONFIPgmCacheTimingMode =
- ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
-
- n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
- blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
- blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
-
- blockperlun = (blks_lun_h << 16) | blks_lun_l;
-
- DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
-
- if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
- ONFI_TIMING_MODE__VALUE))
- return FAIL;
-
- for (i = 5; i > 0; i--) {
- if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
- break;
- }
-
- NAND_ONFi_Timing_Mode(i);
-
- index_addr(MODE_11 | 0, 0x90);
- index_addr(MODE_11 | 1, 0);
-
- for (i = 0; i < 3; i++)
- index_addr_read_data(MODE_11 | 2, &id);
-
- nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
-
- DeviceInfo.MLCDevice = id & 0x0C;
-
- /* By now, all the ONFI devices we know support the page cache */
- /* rw feature. So here we enable the pipeline_rw_ahead feature */
- /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
- /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
-
- return PASS;
-}
-
-static void get_samsung_nand_para(void)
-{
- u8 no_of_planes;
- u32 blk_size;
- u64 plane_size, capacity;
- u32 id_bytes[5];
- int i;
-
- index_addr((u32)(MODE_11 | 0), 0x90);
- index_addr((u32)(MODE_11 | 1), 0);
- for (i = 0; i < 5; i++)
- index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
- id_bytes[0], id_bytes[1], id_bytes[2],
- id_bytes[3], id_bytes[4]);
-
- if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
- /* Set timing register values according to datasheet */
- iowrite32(5, FlashReg + ACC_CLKS);
- iowrite32(20, FlashReg + RE_2_WE);
- iowrite32(12, FlashReg + WE_2_RE);
- iowrite32(14, FlashReg + ADDR_2_DATA);
- iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
- iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
- iowrite32(2, FlashReg + CS_SETUP_CNT);
- }
-
- no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
- plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
- blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
- capacity = (u64)128 * plane_size * no_of_planes;
-
- DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
-}
-
-static void get_toshiba_nand_para(void)
-{
- void __iomem *scratch_reg;
- u32 tmp;
-
- /* Workaround to fix a controller bug which reports a wrong */
- /* spare area size for some kind of Toshiba NAND device */
- if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
- (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
- iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
- tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
- ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
- iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-#if SUPPORT_15BITECC
- iowrite32(15, FlashReg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
- }
-
- /* As Toshiba NAND can not provide it's block number, */
- /* so here we need user to provide the correct block */
- /* number in a scratch register before the Linux NAND */
- /* driver is loaded. If no valid value found in the scratch */
- /* register, then we use default block number value */
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (DeviceInfo.wTotalBlocks < 512)
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
-}
-
-static void get_hynix_nand_para(void)
-{
- void __iomem *scratch_reg;
- u32 main_size, spare_size;
-
- switch (DeviceInfo.wDeviceID) {
- case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
- case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
- iowrite32(128, FlashReg + PAGES_PER_BLOCK);
- iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
- iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
- main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
- spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
- iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
- iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
- iowrite32(0, FlashReg + DEVICE_WIDTH);
-#if SUPPORT_15BITECC
- iowrite32(15, FlashReg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
- DeviceInfo.MLCDevice = 1;
- break;
- default:
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
- "Will use default parameter values instead.\n",
- DeviceInfo.wDeviceID);
- }
-
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (DeviceInfo.wTotalBlocks < 512)
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
-}
-
-static void find_valid_banks(void)
-{
- u32 id[LLD_MAX_FLASH_BANKS];
- int i;
-
- totalUsedBanks = 0;
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
- index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
- index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
- index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Return 1st ID for bank[%d]: %x\n", i, id[i]);
-
- if (i == 0) {
- if (id[i] & 0x0ff)
- GLOB_valid_banks[i] = 1;
- } else {
- if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
- GLOB_valid_banks[i] = 1;
- }
-
- totalUsedBanks += GLOB_valid_banks[i];
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "totalUsedBanks: %d\n", totalUsedBanks);
-}
-
-static void detect_partition_feature(void)
-{
- if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(FlashReg + PERM_SRC_ID_1) &
- PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
- DeviceInfo.wSpectraStartBlock =
- ((ioread32(FlashReg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MIN_VALUE) *
- DeviceInfo.wTotalBlocks)
- +
- (ioread32(FlashReg + MIN_BLK_ADDR_1) &
- MIN_BLK_ADDR_1__VALUE);
-
- DeviceInfo.wSpectraEndBlock =
- (((ioread32(FlashReg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
- DeviceInfo.wTotalBlocks)
- +
- (ioread32(FlashReg + MAX_BLK_ADDR_1) &
- MAX_BLK_ADDR_1__VALUE);
-
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
-
- if (DeviceInfo.wSpectraEndBlock >=
- DeviceInfo.wTotalBlocks) {
- DeviceInfo.wSpectraEndBlock =
- DeviceInfo.wTotalBlocks - 1;
- }
-
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- } else {
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- DeviceInfo.wSpectraEndBlock =
- DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- }
- } else {
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- }
-}
-
-static void dump_device_info(void)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
- DeviceInfo.wDeviceMaker);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
- DeviceInfo.wDeviceID);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
- DeviceInfo.wDeviceType);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
- DeviceInfo.wSpectraStartBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
- DeviceInfo.wSpectraEndBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
- DeviceInfo.wTotalBlocks);
- nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
- DeviceInfo.wPagesPerBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
- DeviceInfo.wPageSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
- DeviceInfo.wPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
- DeviceInfo.wPageSpareSize);
- nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
- DeviceInfo.wNumPageSpareFlag);
- nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
- DeviceInfo.wECCBytesPerSector);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
- DeviceInfo.wBlockSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
- DeviceInfo.wBlockDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
- DeviceInfo.wDataBlockNum);
- nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
- DeviceInfo.bPlaneNum);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
- DeviceInfo.wDeviceMainAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
- DeviceInfo.wDeviceSpareAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
- DeviceInfo.wDevicesConnected);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
- DeviceInfo.wDeviceWidth);
- nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
- DeviceInfo.wHWRevision);
- nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
- DeviceInfo.wHWFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
- DeviceInfo.wONFIDevFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
- DeviceInfo.wONFIOptCommands);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
- DeviceInfo.wONFITimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
- DeviceInfo.wONFIPgmCacheTimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
- DeviceInfo.MLCDevice ? "Yes" : "No");
- nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
- DeviceInfo.wSpareSkipBytes);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
- DeviceInfo.nBitsInPageNumber);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
- DeviceInfo.nBitsInPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
- DeviceInfo.nBitsInBlockDataSize);
-}
-
-u16 NAND_Read_Device_ID(void)
-{
- u16 status = PASS;
- u8 no_of_planes;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
- iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
- DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
- DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
- DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
-
- if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
- if (FAIL == get_onfi_nand_para())
- return FAIL;
- } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
- get_samsung_nand_para();
- } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
- get_toshiba_nand_para();
- } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
- get_hynix_nand_para();
- } else {
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
- DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
-
- DeviceInfo.wDeviceMainAreaSize =
- ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
- DeviceInfo.wDeviceSpareAreaSize =
- ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
-
- DeviceInfo.wPageDataSize =
- ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
-
- /* Note: When using the Micon 4K NAND device, the controller will report
- * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
- * And if force set it to 218 bytes, the controller can not work
- * correctly. So just let it be. But keep in mind that this bug may
- * cause
- * other problems in future. - Yunpeng 2008-10-10
- */
- DeviceInfo.wPageSpareSize =
- ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-
- DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
-
- DeviceInfo.wPageSize =
- DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
- DeviceInfo.wBlockSize =
- DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wBlockDataSize =
- DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-
- DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
- DeviceInfo.wDeviceType =
- ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
-
- DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
-
- DeviceInfo.wSpareSkipBytes =
- ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
- DeviceInfo.wDevicesConnected;
-
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
- set_ecc_config();
-
- no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
- NUMBER_OF_PLANES__VALUE;
-
- switch (no_of_planes) {
- case 0:
- case 1:
- case 3:
- case 7:
- DeviceInfo.bPlaneNum = no_of_planes + 1;
- break;
- default:
- status = FAIL;
- break;
- }
-
- find_valid_banks();
-
- detect_partition_feature();
-
- dump_device_info();
-
- return status;
-}
-
-u16 NAND_UnlockArrayAll(void)
-{
- u64 start_addr, end_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- start_addr = 0;
- end_addr = ((u64)DeviceInfo.wBlockSize *
- (DeviceInfo.wTotalBlocks - 1)) >>
- DeviceInfo.nBitsInPageDataSize;
-
- index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
- index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
-
- return PASS;
-}
-
-void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (INT_ENABLE)
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
- else
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-}
-
-u16 NAND_Erase_Block(u32 block)
-{
- u16 status = PASS;
- u64 flash_add;
- u16 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (block >= DeviceInfo.wTotalBlocks)
- status = FAIL;
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
- FlashReg + intr_status);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
-
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ERASE_FAIL)
- status = FAIL;
-
- iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
- FlashReg + intr_status);
- }
-
- return status;
-}
-
-static u32 Boundary_Check_Block_Page(u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
-
- if (block >= DeviceInfo.wTotalBlocks)
- status = FAIL;
-
- if (page + page_count > DeviceInfo.wPagesPerBlock)
- status = FAIL;
-
- return status;
-}
-
-u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 i;
- u64 flash_add;
- u32 PageSpareSize = DeviceInfo.wPageSpareSize;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_spare = buf_read_page_spare;
-
- if (block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "block too big: %d\n", (int)block);
- status = FAIL;
- }
-
- if (page >= DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "page too big: %d\n", page);
- status = FAIL;
- }
-
- if (page_count > 1) {
- printk(KERN_ERR "page count too big: %d\n", page_count);
- status = FAIL;
- }
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x41);
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x2000 | page_count);
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__LOAD_COMP))
- ;
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- for (i = 0; i < (PageSpareSize / 4); i++)
- *((u32 *)page_spare + i) =
- ioread32(FlashMem + 0x10);
-
- if (enable_ecc) {
- for (i = 0; i < spareFlagBytes; i++)
- read_data[i] =
- page_spare[PageSpareSize -
- spareFlagBytes + i];
- for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
- read_data[spareFlagBytes + i] =
- page_spare[i];
- } else {
- for (i = 0; i < PageSpareSize; i++)
- read_data[i] = page_spare[i];
- }
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- }
-
- return status;
-}
-
-/* No use function. Should be removed later */
-u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- printk(KERN_ERR
- "Error! This function (NAND_Write_Page_Spare) should never"
- " be called!\n");
- return ERR;
-}
-
-/* op value: 0 - DDMA read; 1 - DDMA write */
-static void ddma_trans(u8 *data, u64 flash_add,
- u32 flash_bank, int op, u32 numPages)
-{
- u32 data_addr;
-
- /* Map virtual address to bus address for DDMA */
- data_addr = virt_to_bus(data);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- (u16)(2 << 12) | (op << 8) | numPages);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
- (u16)(2 << 12) | (2 << 8) | 0);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- ((u16)(0x0FFFF & data_addr) << 8)),
- (u16)(2 << 12) | (3 << 8) | 0);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (1 << 16) | (0x40 << 8)),
- (u16)(2 << 12) | (4 << 8) | 0);
-}
-
-/* If data in buf are all 0xff, then return 1; otherwise return 0 */
-static int check_all_1(u8 *buf)
-{
- int i, j, cnt;
-
- for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
- if (buf[i] != 0xff) {
- cnt = 0;
- nand_dbg_print(NAND_DBG_WARN,
- "the first non-0xff data byte is: %d\n", i);
- for (j = i; j < DeviceInfo.wPageDataSize; j++) {
- nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
- cnt++;
- if (cnt > 8)
- break;
- }
- nand_dbg_print(NAND_DBG_WARN, "\n");
- return 0;
- }
- }
-
- return 1;
-}
-
-static int do_ecc_new(unsigned long bank, u8 *buf,
- u32 block, u16 page)
-{
- int status = PASS;
- u16 err_page = 0;
- u16 err_byte;
- u8 err_sect;
- u8 err_dev;
- u16 err_fix_info;
- u16 err_addr;
- u32 ecc_sect_size;
- u8 *err_pos;
- u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
- ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
-
- ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- do {
- err_page = ioread32(FlashReg + err_page_addr[bank]);
- err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
- err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
- err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
- err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
- err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
- >> 8);
- if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
- nand_dbg_print(NAND_DBG_WARN,
- "%s, Line %d Uncorrectable ECC error "
- "when read block %d page %d."
- "PTN_INTR register: 0x%x "
- "err_page: %d, err_sect: %d, err_byte: %d, "
- "err_dev: %d, ecc_sect_size: %d, "
- "err_fix_info: 0x%x\n",
- __FILE__, __LINE__, block, page,
- ioread32(FlashReg + PTN_INTR),
- err_page, err_sect, err_byte, err_dev,
- ecc_sect_size, (u32)err_fix_info);
-
- if (check_all_1(buf))
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
- "All 0xff!\n",
- __FILE__, __LINE__);
- else
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
- "Not all 0xff!\n",
- __FILE__, __LINE__);
- status = FAIL;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "%s, Line %d Found ECC error "
- "when read block %d page %d."
- "err_page: %d, err_sect: %d, err_byte: %d, "
- "err_dev: %d, ecc_sect_size: %d, "
- "err_fix_info: 0x%x\n",
- __FILE__, __LINE__, block, page,
- err_page, err_sect, err_byte, err_dev,
- ecc_sect_size, (u32)err_fix_info);
- if (err_byte < ECC_SECTOR_SIZE) {
- err_pos = buf +
- (err_page - page) *
- DeviceInfo.wPageDataSize +
- err_sect * ecc_sect_size +
- err_byte *
- DeviceInfo.wDevicesConnected +
- err_dev;
-
- *err_pos ^= err_fix_info &
- ERR_CORRECTION_INFO__BYTEMASK;
- }
- }
- } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-
- return status;
-}
-
-u16 NAND_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *read_data_l;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- if (page_count > 1) {
- read_data_l = read_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Read_Ahead_Polling(
- read_data_l, block, page,
- MAX_PAGES_PER_RW);
-
- if (status == FAIL)
- return status;
-
- read_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Read_Ahead_Polling(
- read_data_l, block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-
- if (enable_ecc) {
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank, read_data,
- block, page);
- }
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE &
- INTR_STATUS0__ECC_ERR)
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE)
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR)
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
- iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 ecc_done_OR_dma_comp;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- *DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- ecc_done_OR_dma_comp = 0;
- while (1) {
- if (enable_ecc) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
-
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
-
- }
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- }
- return status;
-}
-
-u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
- u8 *read_data_l;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- if (page_count > 1) {
- read_data_l = read_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Read_Ahead(
- read_data_l, block, page,
- MAX_PAGES_PER_RW);
-
- if (status == FAIL)
- return status;
-
- read_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Read_Ahead(
- read_data_l, block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_READ_PAGE_MAIN;
- info.read_data = read_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-void Conv_Spare_Data_Log2Phy_Format(u8 *data)
-{
- int i;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-
- if (enable_ecc) {
- for (i = spareFlagBytes - 1; i >= 0; i--)
- data[PageSpareSize - spareFlagBytes + i] = data[i];
- }
-}
-
-void Conv_Spare_Data_Phy2Log_Format(u8 *data)
-{
- int i;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-
- if (enable_ecc) {
- for (i = 0; i < spareFlagBytes; i++)
- data[i] = data[PageSpareSize - spareFlagBytes + i];
- }
-}
-
-
-void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
-{
- const u32 PageSize = DeviceInfo.wPageSize;
- const u32 PageDataSize = DeviceInfo.wPageDataSize;
- const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 eccSectorSize;
- u32 page_offset;
- int i, j;
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
- if (enable_ecc) {
- while (page_count > 0) {
- page_offset = (page_count - 1) * PageSize;
- j = (DeviceInfo.wPageDataSize / eccSectorSize);
- for (i = spareFlagBytes - 1; i >= 0; i--)
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i] =
- data[page_offset + PageDataSize + i];
- for (j--; j >= 1; j--) {
- for (i = eccSectorSize - 1; i >= 0; i--)
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i] =
- data[page_offset +
- eccSectorSize * j + i];
- }
- for (i = (PageSize - spareSkipBytes) - 1;
- i >= PageDataSize; i--)
- data[page_offset + i + spareSkipBytes] =
- data[page_offset + i];
- page_count--;
- }
- }
-}
-
-void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
-{
- const u32 PageSize = DeviceInfo.wPageSize;
- const u32 PageDataSize = DeviceInfo.wPageDataSize;
- const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 eccSectorSize;
- u32 page_offset;
- int i, j;
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
- if (enable_ecc) {
- while (page_count > 0) {
- page_offset = (page_count - 1) * PageSize;
- for (i = PageDataSize;
- i < PageSize - spareSkipBytes;
- i++)
- data[page_offset + i] =
- data[page_offset + i +
- spareSkipBytes];
- for (j = 1;
- j < DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
- for (i = 0; i < eccSectorSize; i++)
- data[page_offset +
- eccSectorSize * j + i] =
- data[page_offset +
- (eccSectorSize + eccBytes) * j
- + i];
- }
- for (i = 0; i < spareFlagBytes; i++)
- data[page_offset + PageDataSize + i] =
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i];
- page_count--;
- }
- }
-}
-
-/* Un-tested function */
-u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 ecc_done_OR_dma_comp;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- ecc_done_OR_dma_comp = 0;
- while (1) {
- if (enable_ecc) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
-
- }
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
- }
-
- return status;
-}
-
-u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
- u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- *DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_PIPELINE_READ_AHEAD;
- info.read_data = read_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-
-u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
- u8 *write_data_l;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
-
- if (page_count > 1) {
- write_data_l = write_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Write(write_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Write_Ahead(
- write_data_l, block, page,
- MAX_PAGES_PER_RW);
- if (status == FAIL)
- return status;
-
- write_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Write(write_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Write_Ahead(write_data_l,
- block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_WRITE_PAGE_MAIN;
- info.write_data = write_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- ddma_trans(write_data, flash_add, flash_bank, 1, 1);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
- ;
-
- return status;
-}
-
-void NAND_ECC_Ctrl(int enable)
-{
- if (enable) {
- nand_dbg_print(NAND_DBG_WARN,
- "Will enable ECC in %s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Will disable ECC in %s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- iowrite32(0, FlashReg + ECC_ENABLE);
- enable_ecc = 0;
- }
-}
-
-u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 i, j, page_num = 0;
- u32 PageSize = DeviceInfo.wPageSize;
- u32 PageDataSize = DeviceInfo.wPageDataSize;
- u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- u64 flash_add;
- u32 eccSectorSize;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_main_spare = buf_write_page_main_spare;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-
- while ((status != FAIL) && (page_count > 0)) {
- flash_add = (u64)(block %
- (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
- DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >>
- DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- if (enable_ecc) {
- for (j = 0;
- j <
- DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
- for (i = 0; i < eccSectorSize; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j +
- i] =
- write_data[eccSectorSize *
- j + i];
-
- for (i = 0; i < eccBytes; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j +
- eccSectorSize +
- i] =
- write_data[PageDataSize +
- spareFlagBytes +
- eccBytes * j +
- i];
- }
-
- for (i = 0; i < spareFlagBytes; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j + i] =
- write_data[PageDataSize + i];
-
- for (i = PageSize - 1; i >= PageDataSize +
- spareSkipBytes; i--)
- page_main_spare[i] = page_main_spare[i -
- spareSkipBytes];
-
- for (i = PageDataSize; i < PageDataSize +
- spareSkipBytes; i++)
- page_main_spare[i] = 0xff;
-
- for (i = 0; i < PageSize / 4; i++)
- iowrite32(
- *((u32 *)page_main_spare + i),
- FlashMem + 0x10);
- } else {
-
- for (i = 0; i < PageSize / 4; i++)
- iowrite32(*((u32 *)write_data + i),
- FlashMem + 0x10);
- }
-
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL)
- status = FAIL;
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- page_num++;
- page_count--;
- write_data += PageSize;
- }
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- }
-
- return status;
-}
-
-u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 i, j;
- u64 flash_add = 0;
- u32 PageSize = DeviceInfo.wPageSize;
- u32 PageDataSize = DeviceInfo.wPageDataSize;
- u32 PageSpareSize = DeviceInfo.wPageSpareSize;
- u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- u32 eccSectorSize;
- u32 flash_bank;
- u32 intr_status = 0;
- u8 *read_data_l = read_data;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_main_spare = buf_read_page_main_spare;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- while ((status != FAIL) && (page_count > 0)) {
- flash_add = (u64)(block %
- (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x43);
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x2000 | page_count);
-
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__LOAD_COMP))
- ;
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >>
- DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- for (i = 0; i < PageSize / 4; i++)
- *(((u32 *)page_main_spare) + i) =
- ioread32(FlashMem + 0x10);
-
- if (enable_ecc) {
- for (i = PageDataSize; i < PageSize -
- spareSkipBytes; i++)
- page_main_spare[i] = page_main_spare[i +
- spareSkipBytes];
-
- for (j = 0;
- j < DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
-
- for (i = 0; i < eccSectorSize; i++)
- read_data_l[eccSectorSize * j +
- i] =
- page_main_spare[
- (eccSectorSize +
- eccBytes) * j + i];
-
- for (i = 0; i < eccBytes; i++)
- read_data_l[PageDataSize +
- spareFlagBytes +
- eccBytes * j + i] =
- page_main_spare[
- (eccSectorSize +
- eccBytes) * j +
- eccSectorSize + i];
- }
-
- for (i = 0; i < spareFlagBytes; i++)
- read_data_l[PageDataSize + i] =
- page_main_spare[(eccSectorSize +
- eccBytes) * j + i];
- } else {
- for (i = 0; i < (PageDataSize + PageSpareSize);
- i++)
- read_data_l[i] = page_main_spare[i];
-
- }
-
- if (enable_ecc) {
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- }
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- }
- }
-
- page++;
- page_count--;
- read_data_l += PageSize;
- }
- }
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- return status;
-}
-
-u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
- u16 page, u16 page_count)
-{
- u16 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_PIPELINE_WRITE_AHEAD;
- info.write_data = write_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-/* Un-tested function */
-u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- u16 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u16 status2 = PASS;
- u32 t;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-
- while (1) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- status = PASS;
- if (status2 == FAIL)
- status = FAIL;
- break;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL) {
- status2 = FAIL;
- status = FAIL;
- t = ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL;
- iowrite32(t, FlashReg + intr_status);
- } else {
- iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
-
- return status;
-}
-
-
-#if CMD_DMA
-static irqreturn_t cdma_isr(int irq, void *dev_id)
-{
- struct mrst_nand_info *dev = dev_id;
- int first_failed_cmd;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!is_cdma_interrupt())
- return IRQ_NONE;
-
- /* Disable controller interrupts */
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- GLOB_FTL_Event_Status(&first_failed_cmd);
- complete(&dev->complete);
-
- return IRQ_HANDLED;
-}
-#else
-static void handle_nand_int_read(struct mrst_nand_info *dev)
-{
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 intr_status;
- u32 ecc_done_OR_dma_comp = 0;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev->ret = PASS;
- intr_status = intr_status_addresses[dev->flash_bank];
-
- while (1) {
- if (enable_ecc) {
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- dev->ret = do_ecc_new(dev->flash_bank,
- dev->read_data,
- dev->block, dev->page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- if (1 == ecc_done_OR_dma_comp)
- break;
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- if (1 == ecc_done_OR_dma_comp)
- break;
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- } else {
- printk(KERN_ERR "Illegal INTS "
- "(offset addr 0x%x) value: 0x%x\n",
- intr_status,
- ioread32(FlashReg + intr_status));
- }
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
-}
-
-static void handle_nand_int_write(struct mrst_nand_info *dev)
-{
- u32 intr_status;
- u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- int status = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev->ret = PASS;
- intr_status = intr[dev->flash_bank];
-
- while (1) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- if (FAIL == status)
- dev->ret = FAIL;
- break;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL) {
- status = FAIL;
- iowrite32(INTR_STATUS0__PROGRAM_FAIL,
- FlashReg + intr_status);
- } else {
- iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
- }
-}
-
-static irqreturn_t ddma_isr(int irq, void *dev_id)
-{
- struct mrst_nand_info *dev = dev_id;
- u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
- u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
-
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-
- ints0 = ioread32(FlashReg + INTR_STATUS0);
- ints1 = ioread32(FlashReg + INTR_STATUS1);
- ints2 = ioread32(FlashReg + INTR_STATUS2);
- ints3 = ioread32(FlashReg + INTR_STATUS3);
-
- ints_offset = intr[dev->flash_bank];
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
- "DMA_INTR: 0x%x, "
- "dev->state: 0x%x, dev->flash_bank: %d\n",
- ints0, ints1, ints2, ints3,
- ioread32(FlashReg + DMA_INTR),
- dev->state, dev->flash_bank);
-
- if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
- iowrite32(ints0, FlashReg + INTR_STATUS0);
- iowrite32(ints1, FlashReg + INTR_STATUS1);
- iowrite32(ints2, FlashReg + INTR_STATUS2);
- iowrite32(ints3, FlashReg + INTR_STATUS3);
- nand_dbg_print(NAND_DBG_WARN,
- "ddma_isr: Invalid interrupt for NAND controller. "
- "Ignore it\n");
- return IRQ_NONE;
- }
-
- switch (dev->state) {
- case INT_READ_PAGE_MAIN:
- case INT_PIPELINE_READ_AHEAD:
- /* Disable controller interrupts */
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- handle_nand_int_read(dev);
- break;
- case INT_WRITE_PAGE_MAIN:
- case INT_PIPELINE_WRITE_AHEAD:
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- handle_nand_int_write(dev);
- break;
- default:
- printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
- dev->state);
- return IRQ_NONE;
- }
-
- dev->state = INT_IDLE_STATE;
- complete(&dev->complete);
- return IRQ_HANDLED;
-}
-#endif
-
-static const struct pci_device_id nand_pci_ids[] = {
- {
- .vendor = 0x8086,
- .device = 0x0809,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { /* end: all zeroes */ }
-};
-
-static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- int ret = -ENODEV;
- unsigned long csr_base;
- unsigned long csr_len;
- struct mrst_nand_info *pndev = &info;
- u32 int_mask;
-
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- return ret;
- }
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
- GLOB_HWCTL_REG_SIZE);
- if (!FlashReg) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- goto failed_disable;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped reg base address: "
- "0x%p, len: %d\n",
- FlashReg, GLOB_HWCTL_REG_SIZE);
-
- FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
- GLOB_HWCTL_MEM_SIZE);
- if (!FlashMem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- iounmap(FlashReg);
- goto failed_disable;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped flash base address: "
- "0x%p, len: %d\n",
- (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- NAND_Flash_Reset();
-
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-
-#if CMD_DMA
- info.pcmds_num = 0;
- info.flash_bank = 0;
- info.cdma_num = 0;
- int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
- iowrite32(int_mask, FlashReg + DMA_INTR_EN);
- iowrite32(0xFFFF, FlashReg + DMA_INTR);
-
- int_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-#else
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-#endif
- iowrite32(int_mask, FlashReg + INTR_EN0);
- iowrite32(int_mask, FlashReg + INTR_EN1);
- iowrite32(int_mask, FlashReg + INTR_EN2);
- iowrite32(int_mask, FlashReg + INTR_EN3);
-
- /* Clear all status bits */
- iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
-
- iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
-
- /* Should set value for these registers when init */
- iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
-
- pci_set_master(dev);
- pndev->dev = dev;
-
- csr_base = pci_resource_start(dev, 0);
- if (!csr_base) {
- printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
- ret = -ENODEV;
- goto failed_req_csr;
- }
-
- csr_len = pci_resource_len(dev, 0);
- if (!csr_len) {
- printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
- ret = -ENODEV;
- goto failed_req_csr;
- }
-
- ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request "
- "memory region\n");
- goto failed_req_csr;
- }
-
- pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
- if (!pndev->ioaddr) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_remap_csr;
- }
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
- csr_base, pndev->ioaddr, csr_len);
-
- init_completion(&pndev->complete);
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
-
-#if CMD_DMA
- if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
- SPECTRA_NAND_NAME, &info)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-#else
- if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
- SPECTRA_NAND_NAME, &info)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-#endif
-
- pci_set_drvdata(dev, pndev);
-
- ret = GLOB_LLD_Read_Device_ID();
- if (ret) {
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-
- ret = register_spectra_ftl();
- if (ret) {
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-
- return 0;
-
-failed_remap_csr:
- pci_release_regions(dev);
-failed_req_csr:
- iounmap(FlashMem);
- iounmap(FlashReg);
-failed_disable:
- pci_disable_device(dev);
-
- return ret;
-}
-
-static void nand_pci_remove(struct pci_dev *dev)
-{
- struct mrst_nand_info *pndev = pci_get_drvdata(dev);
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
- free_irq(dev->irq, pndev);
-#endif
- iounmap(pndev->ioaddr);
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-MODULE_DEVICE_TABLE(pci, nand_pci_ids);
-
-static struct pci_driver nand_pci_driver = {
- .name = SPECTRA_NAND_NAME,
- .id_table = nand_pci_ids,
- .probe = nand_pci_probe,
- .remove = nand_pci_remove,
-};
-
-int NAND_Flash_Init(void)
-{
- int retval;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- retval = pci_register_driver(&nand_pci_driver);
- if (retval)
- return -ENOMEM;
-
- return PASS;
-}
-
-/* Free memory */
-int nand_release_spectra(void)
-{
- pci_unregister_driver(&nand_pci_driver);
- iounmap(FlashMem);
- iounmap(FlashReg);
-
- return 0;
-}
-
-
-
diff --git a/drivers/staging/spectra/lld_nand.h b/drivers/staging/spectra/lld_nand.h
deleted file mode 100644
index d08388287da8..000000000000
--- a/drivers/staging/spectra/lld_nand.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_NAND_
-#define _LLD_NAND_
-
-#ifdef ELDORA
-#include "defs.h"
-#else
-#include "flash.h"
-#include "ffsport.h"
-#endif
-
-#define MODE_00 0x00000000
-#define MODE_01 0x04000000
-#define MODE_10 0x08000000
-#define MODE_11 0x0C000000
-
-
-#define DATA_TRANSFER_MODE 0
-#define PROTECTION_PER_BLOCK 1
-#define LOAD_WAIT_COUNT 2
-#define PROGRAM_WAIT_COUNT 3
-#define ERASE_WAIT_COUNT 4
-#define INT_MONITOR_CYCLE_COUNT 5
-#define READ_BUSY_PIN_ENABLED 6
-#define MULTIPLANE_OPERATION_SUPPORT 7
-#define PRE_FETCH_MODE 8
-#define CE_DONT_CARE_SUPPORT 9
-#define COPYBACK_SUPPORT 10
-#define CACHE_WRITE_SUPPORT 11
-#define CACHE_READ_SUPPORT 12
-#define NUM_PAGES_IN_BLOCK 13
-#define ECC_ENABLE_SELECT 14
-#define WRITE_ENABLE_2_READ_ENABLE 15
-#define ADDRESS_2_DATA 16
-#define READ_ENABLE_2_WRITE_ENABLE 17
-#define TWO_ROW_ADDRESS_CYCLES 18
-#define MULTIPLANE_ADDRESS_RESTRICT 19
-#define ACC_CLOCKS 20
-#define READ_WRITE_ENABLE_LOW_COUNT 21
-#define READ_WRITE_ENABLE_HIGH_COUNT 22
-
-#define ECC_SECTOR_SIZE 512
-#define LLD_MAX_FLASH_BANKS 4
-
-struct mrst_nand_info {
- struct pci_dev *dev;
- u32 state;
- u32 flash_bank;
- u8 *read_data;
- u8 *write_data;
- u32 block;
- u16 page;
- u32 use_dma;
- void __iomem *ioaddr; /* Mapped io reg base address */
- int ret;
- u32 pcmds_num;
- struct pending_cmd *pcmds;
- int cdma_num; /* CDMA descriptor number in this chan */
- u8 *cdma_desc_buf; /* CDMA descriptor table */
- u8 *memcp_desc_buf; /* Memory copy descriptor table */
- dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
- dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
- struct completion complete;
-};
-
-int NAND_Flash_Init(void);
-int nand_release_spectra(void);
-u16 NAND_Flash_Reset(void);
-u16 NAND_Read_Device_ID(void);
-u16 NAND_Erase_Block(u32 flash_add);
-u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_UnlockArrayAll(void);
-u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 page, u16 page_count);
-u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
-u16 NAND_Get_Bad_Block(u32 block);
-u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
- u16 page, u16 page_count);
-u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
- u16 page_count);
-void NAND_ECC_Ctrl(int enable);
-u16 NAND_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-void Conv_Spare_Data_Log2Phy_Format(u8 *data);
-void Conv_Spare_Data_Phy2Log_Format(u8 *data);
-void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
-void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
-
-extern void __iomem *FlashReg;
-extern void __iomem *FlashMem;
-
-extern int totalUsedBanks;
-extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-
-#endif /*_LLD_NAND_*/
-
-
-
diff --git a/drivers/staging/spectra/nand_regs.h b/drivers/staging/spectra/nand_regs.h
deleted file mode 100644
index e192e4ae8c1e..000000000000
--- a/drivers/staging/spectra/nand_regs.h
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#define DEVICE_RESET 0x0
-#define DEVICE_RESET__BANK0 0x0001
-#define DEVICE_RESET__BANK1 0x0002
-#define DEVICE_RESET__BANK2 0x0004
-#define DEVICE_RESET__BANK3 0x0008
-
-#define TRANSFER_SPARE_REG 0x10
-#define TRANSFER_SPARE_REG__FLAG 0x0001
-
-#define LOAD_WAIT_CNT 0x20
-#define LOAD_WAIT_CNT__VALUE 0xffff
-
-#define PROGRAM_WAIT_CNT 0x30
-#define PROGRAM_WAIT_CNT__VALUE 0xffff
-
-#define ERASE_WAIT_CNT 0x40
-#define ERASE_WAIT_CNT__VALUE 0xffff
-
-#define INT_MON_CYCCNT 0x50
-#define INT_MON_CYCCNT__VALUE 0xffff
-
-#define RB_PIN_ENABLED 0x60
-#define RB_PIN_ENABLED__BANK0 0x0001
-#define RB_PIN_ENABLED__BANK1 0x0002
-#define RB_PIN_ENABLED__BANK2 0x0004
-#define RB_PIN_ENABLED__BANK3 0x0008
-
-#define MULTIPLANE_OPERATION 0x70
-#define MULTIPLANE_OPERATION__FLAG 0x0001
-
-#define MULTIPLANE_READ_ENABLE 0x80
-#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
-
-#define COPYBACK_DISABLE 0x90
-#define COPYBACK_DISABLE__FLAG 0x0001
-
-#define CACHE_WRITE_ENABLE 0xa0
-#define CACHE_WRITE_ENABLE__FLAG 0x0001
-
-#define CACHE_READ_ENABLE 0xb0
-#define CACHE_READ_ENABLE__FLAG 0x0001
-
-#define PREFETCH_MODE 0xc0
-#define PREFETCH_MODE__PREFETCH_EN 0x0001
-#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
-
-#define CHIP_ENABLE_DONT_CARE 0xd0
-#define CHIP_EN_DONT_CARE__FLAG 0x01
-
-#define ECC_ENABLE 0xe0
-#define ECC_ENABLE__FLAG 0x0001
-
-#define GLOBAL_INT_ENABLE 0xf0
-#define GLOBAL_INT_EN_FLAG 0x01
-
-#define WE_2_RE 0x100
-#define WE_2_RE__VALUE 0x003f
-
-#define ADDR_2_DATA 0x110
-#define ADDR_2_DATA__VALUE 0x003f
-
-#define RE_2_WE 0x120
-#define RE_2_WE__VALUE 0x003f
-
-#define ACC_CLKS 0x130
-#define ACC_CLKS__VALUE 0x000f
-
-#define NUMBER_OF_PLANES 0x140
-#define NUMBER_OF_PLANES__VALUE 0x0007
-
-#define PAGES_PER_BLOCK 0x150
-#define PAGES_PER_BLOCK__VALUE 0xffff
-
-#define DEVICE_WIDTH 0x160
-#define DEVICE_WIDTH__VALUE 0x0003
-
-#define DEVICE_MAIN_AREA_SIZE 0x170
-#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
-
-#define DEVICE_SPARE_AREA_SIZE 0x180
-#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
-
-#define TWO_ROW_ADDR_CYCLES 0x190
-#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
-
-#define MULTIPLANE_ADDR_RESTRICT 0x1a0
-#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
-
-#define ECC_CORRECTION 0x1b0
-#define ECC_CORRECTION__VALUE 0x001f
-
-#define READ_MODE 0x1c0
-#define READ_MODE__VALUE 0x000f
-
-#define WRITE_MODE 0x1d0
-#define WRITE_MODE__VALUE 0x000f
-
-#define COPYBACK_MODE 0x1e0
-#define COPYBACK_MODE__VALUE 0x000f
-
-#define RDWR_EN_LO_CNT 0x1f0
-#define RDWR_EN_LO_CNT__VALUE 0x001f
-
-#define RDWR_EN_HI_CNT 0x200
-#define RDWR_EN_HI_CNT__VALUE 0x001f
-
-#define MAX_RD_DELAY 0x210
-#define MAX_RD_DELAY__VALUE 0x000f
-
-#define CS_SETUP_CNT 0x220
-#define CS_SETUP_CNT__VALUE 0x001f
-
-#define SPARE_AREA_SKIP_BYTES 0x230
-#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
-
-#define SPARE_AREA_MARKER 0x240
-#define SPARE_AREA_MARKER__VALUE 0xffff
-
-#define DEVICES_CONNECTED 0x250
-#define DEVICES_CONNECTED__VALUE 0x0007
-
-#define DIE_MASK 0x260
-#define DIE_MASK__VALUE 0x00ff
-
-#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
-#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
-
-#define WRITE_PROTECT 0x280
-#define WRITE_PROTECT__FLAG 0x0001
-
-#define RE_2_RE 0x290
-#define RE_2_RE__VALUE 0x003f
-
-#define MANUFACTURER_ID 0x300
-#define MANUFACTURER_ID__VALUE 0x00ff
-
-#define DEVICE_ID 0x310
-#define DEVICE_ID__VALUE 0x00ff
-
-#define DEVICE_PARAM_0 0x320
-#define DEVICE_PARAM_0__VALUE 0x00ff
-
-#define DEVICE_PARAM_1 0x330
-#define DEVICE_PARAM_1__VALUE 0x00ff
-
-#define DEVICE_PARAM_2 0x340
-#define DEVICE_PARAM_2__VALUE 0x00ff
-
-#define LOGICAL_PAGE_DATA_SIZE 0x350
-#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
-
-#define LOGICAL_PAGE_SPARE_SIZE 0x360
-#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
-
-#define REVISION 0x370
-#define REVISION__VALUE 0xffff
-
-#define ONFI_DEVICE_FEATURES 0x380
-#define ONFI_DEVICE_FEATURES__VALUE 0x003f
-
-#define ONFI_OPTIONAL_COMMANDS 0x390
-#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
-
-#define ONFI_TIMING_MODE 0x3a0
-#define ONFI_TIMING_MODE__VALUE 0x003f
-
-#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
-#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
-
-#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
-#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
-#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
-
-#define FEATURES 0x3f0
-#define FEATURES__N_BANKS 0x0003
-#define FEATURES__ECC_MAX_ERR 0x003c
-#define FEATURES__DMA 0x0040
-#define FEATURES__CMD_DMA 0x0080
-#define FEATURES__PARTITION 0x0100
-#define FEATURES__XDMA_SIDEBAND 0x0200
-#define FEATURES__GPREG 0x0400
-#define FEATURES__INDEX_ADDR 0x0800
-
-#define TRANSFER_MODE 0x400
-#define TRANSFER_MODE__VALUE 0x0003
-
-#define INTR_STATUS0 0x410
-#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS0__ECC_ERR 0x0002
-#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-#define INTR_STATUS0__TIME_OUT 0x0008
-#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-#define INTR_STATUS0__ERASE_FAIL 0x0020
-#define INTR_STATUS0__LOAD_COMP 0x0040
-#define INTR_STATUS0__PROGRAM_COMP 0x0080
-#define INTR_STATUS0__ERASE_COMP 0x0100
-#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS0__LOCKED_BLK 0x0400
-#define INTR_STATUS0__UNSUP_CMD 0x0800
-#define INTR_STATUS0__INT_ACT 0x1000
-#define INTR_STATUS0__RST_COMP 0x2000
-#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-
-#define INTR_EN0 0x420
-#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN0__ECC_ERR 0x0002
-#define INTR_EN0__DMA_CMD_COMP 0x0004
-#define INTR_EN0__TIME_OUT 0x0008
-#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
-#define INTR_EN0__LOAD_COMP 0x0040
-#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
-#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
-#define INTR_EN0__UNSUP_CMD 0x0800
-#define INTR_EN0__INT_ACT 0x1000
-#define INTR_EN0__RST_COMP 0x2000
-#define INTR_EN0__PIPE_CMD_ERR 0x4000
-#define INTR_EN0__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT0 0x430
-#define PAGE_CNT0__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR0 0x440
-#define ERR_PAGE_ADDR0__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR0 0x450
-#define ERR_BLOCK_ADDR0__VALUE 0xffff
-
-#define INTR_STATUS1 0x460
-#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS1__ECC_ERR 0x0002
-#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-#define INTR_STATUS1__TIME_OUT 0x0008
-#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-#define INTR_STATUS1__ERASE_FAIL 0x0020
-#define INTR_STATUS1__LOAD_COMP 0x0040
-#define INTR_STATUS1__PROGRAM_COMP 0x0080
-#define INTR_STATUS1__ERASE_COMP 0x0100
-#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS1__LOCKED_BLK 0x0400
-#define INTR_STATUS1__UNSUP_CMD 0x0800
-#define INTR_STATUS1__INT_ACT 0x1000
-#define INTR_STATUS1__RST_COMP 0x2000
-#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-
-#define INTR_EN1 0x470
-#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN1__ECC_ERR 0x0002
-#define INTR_EN1__DMA_CMD_COMP 0x0004
-#define INTR_EN1__TIME_OUT 0x0008
-#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
-#define INTR_EN1__LOAD_COMP 0x0040
-#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
-#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
-#define INTR_EN1__UNSUP_CMD 0x0800
-#define INTR_EN1__INT_ACT 0x1000
-#define INTR_EN1__RST_COMP 0x2000
-#define INTR_EN1__PIPE_CMD_ERR 0x4000
-#define INTR_EN1__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT1 0x480
-#define PAGE_CNT1__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR1 0x490
-#define ERR_PAGE_ADDR1__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR1 0x4a0
-#define ERR_BLOCK_ADDR1__VALUE 0xffff
-
-#define INTR_STATUS2 0x4b0
-#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS2__ECC_ERR 0x0002
-#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-#define INTR_STATUS2__TIME_OUT 0x0008
-#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-#define INTR_STATUS2__ERASE_FAIL 0x0020
-#define INTR_STATUS2__LOAD_COMP 0x0040
-#define INTR_STATUS2__PROGRAM_COMP 0x0080
-#define INTR_STATUS2__ERASE_COMP 0x0100
-#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS2__LOCKED_BLK 0x0400
-#define INTR_STATUS2__UNSUP_CMD 0x0800
-#define INTR_STATUS2__INT_ACT 0x1000
-#define INTR_STATUS2__RST_COMP 0x2000
-#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-
-#define INTR_EN2 0x4c0
-#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN2__ECC_ERR 0x0002
-#define INTR_EN2__DMA_CMD_COMP 0x0004
-#define INTR_EN2__TIME_OUT 0x0008
-#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
-#define INTR_EN2__LOAD_COMP 0x0040
-#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
-#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
-#define INTR_EN2__UNSUP_CMD 0x0800
-#define INTR_EN2__INT_ACT 0x1000
-#define INTR_EN2__RST_COMP 0x2000
-#define INTR_EN2__PIPE_CMD_ERR 0x4000
-#define INTR_EN2__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT2 0x4d0
-#define PAGE_CNT2__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR2 0x4e0
-#define ERR_PAGE_ADDR2__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR2 0x4f0
-#define ERR_BLOCK_ADDR2__VALUE 0xffff
-
-#define INTR_STATUS3 0x500
-#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS3__ECC_ERR 0x0002
-#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-#define INTR_STATUS3__TIME_OUT 0x0008
-#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-#define INTR_STATUS3__ERASE_FAIL 0x0020
-#define INTR_STATUS3__LOAD_COMP 0x0040
-#define INTR_STATUS3__PROGRAM_COMP 0x0080
-#define INTR_STATUS3__ERASE_COMP 0x0100
-#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS3__LOCKED_BLK 0x0400
-#define INTR_STATUS3__UNSUP_CMD 0x0800
-#define INTR_STATUS3__INT_ACT 0x1000
-#define INTR_STATUS3__RST_COMP 0x2000
-#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-
-#define INTR_EN3 0x510
-#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN3__ECC_ERR 0x0002
-#define INTR_EN3__DMA_CMD_COMP 0x0004
-#define INTR_EN3__TIME_OUT 0x0008
-#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
-#define INTR_EN3__LOAD_COMP 0x0040
-#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
-#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
-#define INTR_EN3__UNSUP_CMD 0x0800
-#define INTR_EN3__INT_ACT 0x1000
-#define INTR_EN3__RST_COMP 0x2000
-#define INTR_EN3__PIPE_CMD_ERR 0x4000
-#define INTR_EN3__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT3 0x520
-#define PAGE_CNT3__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR3 0x530
-#define ERR_PAGE_ADDR3__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR3 0x540
-#define ERR_BLOCK_ADDR3__VALUE 0xffff
-
-#define DATA_INTR 0x550
-#define DATA_INTR__WRITE_SPACE_AV 0x0001
-#define DATA_INTR__READ_DATA_AV 0x0002
-
-#define DATA_INTR_EN 0x560
-#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
-#define DATA_INTR_EN__READ_DATA_AV 0x0002
-
-#define GPREG_0 0x570
-#define GPREG_0__VALUE 0xffff
-
-#define GPREG_1 0x580
-#define GPREG_1__VALUE 0xffff
-
-#define GPREG_2 0x590
-#define GPREG_2__VALUE 0xffff
-
-#define GPREG_3 0x5a0
-#define GPREG_3__VALUE 0xffff
-
-#define ECC_THRESHOLD 0x600
-#define ECC_THRESHOLD__VALUE 0x03ff
-
-#define ECC_ERROR_BLOCK_ADDRESS 0x610
-#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
-
-#define ECC_ERROR_PAGE_ADDRESS 0x620
-#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
-#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
-
-#define ECC_ERROR_ADDRESS 0x630
-#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
-#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
-
-#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
-#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
-#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
-
-#define DMA_ENABLE 0x700
-#define DMA_ENABLE__FLAG 0x0001
-
-#define IGNORE_ECC_DONE 0x710
-#define IGNORE_ECC_DONE__FLAG 0x0001
-
-#define DMA_INTR 0x720
-#define DMA_INTR__TARGET_ERROR 0x0001
-#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
-
-#define DMA_INTR_EN 0x730
-#define DMA_INTR_EN__TARGET_ERROR 0x0001
-#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
-
-#define TARGET_ERR_ADDR_LO 0x740
-#define TARGET_ERR_ADDR_LO__VALUE 0xffff
-
-#define TARGET_ERR_ADDR_HI 0x750
-#define TARGET_ERR_ADDR_HI__VALUE 0xffff
-
-#define CHNL_ACTIVE 0x760
-#define CHNL_ACTIVE__CHANNEL0 0x0001
-#define CHNL_ACTIVE__CHANNEL1 0x0002
-#define CHNL_ACTIVE__CHANNEL2 0x0004
-#define CHNL_ACTIVE__CHANNEL3 0x0008
-
-#define ACTIVE_SRC_ID 0x800
-#define ACTIVE_SRC_ID__VALUE 0x00ff
-
-#define PTN_INTR 0x810
-#define PTN_INTR__CONFIG_ERROR 0x0001
-#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
-#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
-#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
-#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
-#define PTN_INTR__REG_ACCESS_ERROR 0x0020
-
-#define PTN_INTR_EN 0x820
-#define PTN_INTR_EN__CONFIG_ERROR 0x0001
-#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
-#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
-#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
-#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
-#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-
-#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
-#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_0 0x840
-#define MIN_BLK_ADDR_0__VALUE 0xffff
-
-#define MAX_BLK_ADDR_0 0x850
-#define MAX_BLK_ADDR_0__VALUE 0xffff
-
-#define MIN_MAX_BANK_0 0x860
-#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
-#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_1 0x880
-#define MIN_BLK_ADDR_1__VALUE 0xffff
-
-#define MAX_BLK_ADDR_1 0x890
-#define MAX_BLK_ADDR_1__VALUE 0xffff
-
-#define MIN_MAX_BANK_1 0x8a0
-#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
-#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_2 0x8c0
-#define MIN_BLK_ADDR_2__VALUE 0xffff
-
-#define MAX_BLK_ADDR_2 0x8d0
-#define MAX_BLK_ADDR_2__VALUE 0xffff
-
-#define MIN_MAX_BANK_2 0x8e0
-#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
-#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_3 0x900
-#define MIN_BLK_ADDR_3__VALUE 0xffff
-
-#define MAX_BLK_ADDR_3 0x910
-#define MAX_BLK_ADDR_3__VALUE 0xffff
-
-#define MIN_MAX_BANK_3 0x920
-#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
-#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_4 0x940
-#define MIN_BLK_ADDR_4__VALUE 0xffff
-
-#define MAX_BLK_ADDR_4 0x950
-#define MAX_BLK_ADDR_4__VALUE 0xffff
-
-#define MIN_MAX_BANK_4 0x960
-#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
-#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_5 0x980
-#define MIN_BLK_ADDR_5__VALUE 0xffff
-
-#define MAX_BLK_ADDR_5 0x990
-#define MAX_BLK_ADDR_5__VALUE 0xffff
-
-#define MIN_MAX_BANK_5 0x9a0
-#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
-#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_6 0x9c0
-#define MIN_BLK_ADDR_6__VALUE 0xffff
-
-#define MAX_BLK_ADDR_6 0x9d0
-#define MAX_BLK_ADDR_6__VALUE 0xffff
-
-#define MIN_MAX_BANK_6 0x9e0
-#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
-#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_7 0xa00
-#define MIN_BLK_ADDR_7__VALUE 0xffff
-
-#define MAX_BLK_ADDR_7 0xa10
-#define MAX_BLK_ADDR_7__VALUE 0xffff
-
-#define MIN_MAX_BANK_7 0xa20
-#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
diff --git a/drivers/staging/spectra/spectraswconfig.h b/drivers/staging/spectra/spectraswconfig.h
deleted file mode 100644
index 17259469e955..000000000000
--- a/drivers/staging/spectra/spectraswconfig.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _SPECTRASWCONFIG_
-#define _SPECTRASWCONFIG_
-
-/* NAND driver version */
-#define GLOB_VERSION "driver version 20100311"
-
-
-/***** Common Parameters *****/
-#define RETRY_TIMES 3
-
-#define READ_BADBLOCK_INFO 1
-#define READBACK_VERIFY 0
-#define AUTO_FORMAT_FLASH 0
-
-/***** Cache Parameters *****/
-#define CACHE_ITEM_NUM 128
-#define BLK_NUM_FOR_L2_CACHE 16
-
-/***** Block Table Parameters *****/
-#define BLOCK_TABLE_INDEX 0
-
-/***** Wear Leveling Parameters *****/
-#define WEAR_LEVELING_GATE 0x10
-#define WEAR_LEVELING_BLOCK_NUM 10
-
-#define DEBUG_BNDRY 0
-
-/***** Product Feature Support *****/
-#define FLASH_EMU defined(CONFIG_SPECTRA_EMU)
-#define FLASH_NAND defined(CONFIG_SPECTRA_MRST_HW)
-#define FLASH_MTD defined(CONFIG_SPECTRA_MTD)
-#define CMD_DMA defined(CONFIG_SPECTRA_MRST_HW_DMA)
-
-#define SPECTRA_PARTITION_ID 0
-
-/* Enable this macro if the number of flash blocks is larger than 16K. */
-#define SUPPORT_LARGE_BLOCKNUM 1
-
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK 3
-//#define NUM_FREE_BLOCKS_GATE 30
-#define NUM_FREE_BLOCKS_GATE 60
-
-/**** Hardware Parameters ****/
-#define GLOB_HWCTL_REG_BASE 0xFFA40000
-#define GLOB_HWCTL_REG_SIZE 4096
-
-#define GLOB_HWCTL_MEM_BASE 0xFFA48000
-#define GLOB_HWCTL_MEM_SIZE 4096
-
-/* KBV - Updated to LNW scratch register address */
-#define SCRATCH_REG_ADDR 0xFF108018
-#define SCRATCH_REG_SIZE 64
-
-#define GLOB_HWCTL_DEFAULT_BLKS 2048
-
-#define SUPPORT_15BITECC 1
-#define SUPPORT_8BITECC 1
-
-#define ONFI_BLOOM_TIME 0
-#define MODE5_WORKAROUND 1
-
-#endif /*_SPECTRASWCONFIG_*/
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 93de4f2e8bf8..21a559ecbbb1 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -78,7 +78,7 @@ config TIDSPBRIDGE_NTFY_PWRERR
bool "Notify power errors"
depends on TIDSPBRIDGE
help
- Enable notifications to registered clients on the event of power errror
+ Enable notifications to registered clients on the event of power error
trying to suspend bridge driver. Say Y, to signal this event as a fatal
error, this will require a bridge restart to recover.
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a8..7eb56178fb64 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index a7e407e25187..fda240214cd6 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -285,7 +285,7 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
enum_refs = 0;
/*
- * TODO: Revisit, this is not an errror case but code
+ * TODO: Revisit, this is not an error case but code
* expects non-zero value.
*/
status = ENODATA;
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c8..76cfc6edecd9 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 55c0b510889c..03420e25d9c6 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -109,11 +109,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
spin_unlock(&sdev->ud.lock);
return -EINVAL;
}
-#if 0
- setnodelay(socket);
- setkeepalive(socket);
- setreuse(socket);
-#endif
sdev->ud.tcp_socket = socket;
spin_unlock(&sdev->ud.lock);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 6b4e3e182de8..27ac363d1cfa 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -564,7 +564,7 @@ static void stub_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
/* 1. receive a pdu header */
- ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 3b7a847f4657..d93e7f1f7973 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -334,9 +334,8 @@ void usbip_dump_header(struct usbip_header *pdu)
}
EXPORT_SYMBOL_GPL(usbip_dump_header);
-/* Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */
-int usbip_xmit(int send, struct socket *sock, char *buf, int size,
- int msg_flags)
+/* Receive data over TCP/IP. */
+int usbip_recv(struct socket *sock, void *buf, int size)
{
int result;
struct msghdr msg;
@@ -355,19 +354,6 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
return -EINVAL;
}
- if (usbip_dbg_flag_xmit) {
- if (send) {
- if (!in_interrupt())
- pr_debug("%-10s:", current->comm);
- else
- pr_debug("interrupt :");
-
- pr_debug("sending... , sock %p, buf %p, size %d, "
- "msg_flags %d\n", sock, buf, size, msg_flags);
- usbip_dump_buffer(buf, size);
- }
- }
-
do {
sock->sk->sk_allocation = GFP_NOIO;
iov.iov_base = buf;
@@ -377,42 +363,30 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
- if (send)
- result = kernel_sendmsg(sock, &msg, &iov, 1, size);
- else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size,
- MSG_WAITALL);
+ msg.msg_flags = MSG_NOSIGNAL;
+ result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
if (result <= 0) {
- pr_debug("%s sock %p buf %p size %u ret %d total %d\n",
- send ? "send" : "receive", sock, buf, size,
- result, total);
+ pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
+ sock, buf, size, result, total);
goto err;
}
size -= result;
buf += result;
total += result;
-
} while (size > 0);
if (usbip_dbg_flag_xmit) {
- if (!send) {
- if (!in_interrupt())
- pr_debug("%-10s:", current->comm);
- else
- pr_debug("interrupt :");
-
- pr_debug("receiving....\n");
- usbip_dump_buffer(bp, osize);
- pr_debug("received, osize %d ret %d size %d total %d\n",
- osize, result, size, total);
- }
+ if (!in_interrupt())
+ pr_debug("%-10s:", current->comm);
+ else
+ pr_debug("interrupt :");
- if (send)
- pr_debug("send, total %d\n", total);
+ pr_debug("receiving....\n");
+ usbip_dump_buffer(bp, osize);
+ pr_debug("received, osize %d ret %d size %d total %d\n",
+ osize, result, size, total);
}
return total;
@@ -420,7 +394,7 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
err:
return result;
}
-EXPORT_SYMBOL_GPL(usbip_xmit);
+EXPORT_SYMBOL_GPL(usbip_recv);
struct socket *sockfd_to_socket(unsigned int sockfd)
{
@@ -712,7 +686,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
if (!buff)
return -ENOMEM;
- ret = usbip_xmit(0, ud->tcp_socket, buff, size, 0);
+ ret = usbip_recv(ud->tcp_socket, buff, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n",
ret);
@@ -823,8 +797,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
- ret = usbip_xmit(0, ud->tcp_socket, (char *)urb->transfer_buffer,
- size, 0);
+ ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
if (ud->side == USBIP_STUB) {
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index be216175ae87..b8f8c48b8a72 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -292,21 +292,11 @@ struct usbip_device {
} eh_ops;
};
-#if 0
-int usbip_sendmsg(struct socket *, struct msghdr *, int);
-int set_sockaddr(struct socket *socket, struct sockaddr_storage *ss);
-int setnodelay(struct socket *);
-int setquickack(struct socket *);
-int setkeepalive(struct socket *socket);
-void setreuse(struct socket *);
-#endif
-
/* usbip_common.c */
void usbip_dump_urb(struct urb *purb);
void usbip_dump_header(struct usbip_header *pdu);
-int usbip_xmit(int send, struct socket *sock, char *buf, int size,
- int msg_flags);
+int usbip_recv(struct socket *sock, void *buf, int size);
struct socket *sockfd_to_socket(unsigned int sockfd);
void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
@@ -337,9 +327,4 @@ static inline int interface_to_devnum(struct usb_interface *interface)
return udev->devnum;
}
-static inline int interface_to_infnum(struct usb_interface *interface)
-{
- return interface->cur_altsetting->desc.bInterfaceNumber;
-}
-
#endif /* __USBIP_COMMON_H */
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e8..3f511b47563d 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
@@ -204,7 +206,7 @@ static void vhci_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
/* 1. receive a pdu header */
- ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret < 0) {
if (ret == -ECONNRESET)
pr_info("connection reset by peer\n");
diff --git a/drivers/staging/vme/TODO b/drivers/staging/vme/TODO
index 82c222b4a146..79f00333e7ef 100644
--- a/drivers/staging/vme/TODO
+++ b/drivers/staging/vme/TODO
@@ -1,70 +1,5 @@
TODO
====
-API
-===
-
-Master window broadcast select mask
------------------------------------
-
-API currently provides no method to set or get Broadcast Select mask. Suggest
-somthing like:
-
- int vme_master_bmsk_set (struct vme_resource *res, int mask);
- int vme_master_bmsk_get (struct vme_resource *res, int *mask);
-
-
-Interrupt Generation
---------------------
-
-Add optional timeout when waiting for an IACK.
-
-
-CR/CSR Buffer
--------------
-
-The VME API provides no functions to access the buffer mapped into the CR/CSR
-space.
-
-
-Mailboxes
----------
-
-Whilst not part of the VME specification, they are provided by a number of
-chips. They are currently not supported at all by the API.
-
-
-Core
-====
-
-- Improve generic sanity checks (Such as does an offset and size fit within a
- window and parameter checking).
-
-Bridge Support
-==============
-
-Tempe (tsi148)
---------------
-
-- 2eSST Broadcast mode.
-- Mailboxes unsupported.
-- Improve error detection.
-- Control of prefetch size, threshold.
-- Arbiter control
-- Requestor control
-
-Universe II (ca91c142)
-----------------------
-
-- Mailboxes unsupported.
-- Error Detection.
-- Control of prefetch size, threshold.
-- Arbiter control
-- Requestor control
-- Slot detection
-
-Universe I (ca91x042)
----------------------
-
-Currently completely unsupported.
+- Add one or more device drivers which use the VME framework.
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 0e4feac138eb..515b8b8e32a8 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -338,7 +338,7 @@ static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity;
unsigned int temp_ctl = 0;
@@ -444,7 +444,7 @@ static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned long long vme_bound, pci_offset;
@@ -595,8 +595,8 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i, granularity = 0;
@@ -753,7 +753,7 @@ err_window:
static int __ca91cx42_master_get(struct vme_master_resource *image,
int *enabled, unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned long long pci_base, pci_bound, vme_offset;
@@ -839,8 +839,8 @@ static int __ca91cx42_master_get(struct vme_master_resource *image,
}
static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
int retval;
@@ -876,13 +876,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
* maximal configured data cycle is used and splits it
* automatically for non-aligned addresses.
*/
- if ((int)addr & 0x1) {
+ if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
done += 1;
if (done == count)
goto out;
}
- if ((int)addr & 0x2) {
+ if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -930,13 +930,13 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
/* Here we apply for the same strategy we do in master_read
* function in order to assure D16 cycle when required.
*/
- if ((int)addr & 0x1) {
+ if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
done += 1;
if (done == count)
goto out;
}
- if ((int)addr & 0x2) {
+ if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
@@ -973,7 +973,8 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
- u32 pci_addr, result;
+ u32 result;
+ uintptr_t pci_addr;
int i;
struct ca91cx42_driver *bridge;
struct device *dev;
@@ -990,7 +991,7 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
/* Lock image */
spin_lock(&image->lock);
- pci_addr = (u32)image->kern_base + offset;
+ pci_addr = (uintptr_t)image->kern_base + offset;
/* Address must be 4-byte aligned */
if (pci_addr & 0x3) {
@@ -1291,7 +1292,7 @@ static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int ca91cx42_lm_set(struct vme_lm_resource *lm,
- unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
+ unsigned long long lm_base, u32 aspace, u32 cycle)
{
u32 temp_base, lm_ctl = 0;
int i;
@@ -1359,7 +1360,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
* or disabled.
*/
static int ca91cx42_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_ctl, enabled = 0;
struct ca91cx42_driver *bridge;
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 6c1167c2bea9..f50582169b24 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -41,7 +41,7 @@ static void __exit tsi148_exit(void);
/* Module parameter */
-static int err_chk;
+static bool err_chk;
static int geoid;
static const char driver_name[] = "vme_tsi148";
@@ -483,7 +483,7 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
* Find the first error in this address range
*/
static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
- vme_address_t aspace, unsigned long long address, size_t count)
+ u32 aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
@@ -517,7 +517,7 @@ static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
* Clear errors in the provided address range.
*/
static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
- vme_address_t aspace, unsigned long long address, size_t count)
+ u32 aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
@@ -551,7 +551,7 @@ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
@@ -701,7 +701,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
@@ -893,8 +893,8 @@ static void tsi148_free_resource(struct vme_master_resource *image)
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
@@ -1129,8 +1129,8 @@ err_window:
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
@@ -1239,8 +1239,8 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
int retval;
@@ -1259,9 +1259,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
{
int retval, enabled;
unsigned long long vme_base, size;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
@@ -1301,9 +1299,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
{
int retval = 0, enabled;
unsigned long long vme_base, size;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
@@ -1420,7 +1416,7 @@ static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1514,7 +1510,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1886,7 +1882,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+ u32 aspace, u32 cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
@@ -1953,7 +1949,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index ca5ba89e2d8c..55ec30cb1fa2 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -6,3 +6,16 @@ config VME_USER
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
provided with the original driver at http://vmelinux.org/.
+
+config VME_PIO2
+ tristate "GE PIO2 VME"
+ depends on GPIOLIB
+ help
+ Say Y here to include support for the GE PIO2. The PIO2 is a 6U VME
+ slave card, implementing 32 solid-state relay switched IO lines, in
+ 4 groups of 8. Each bank of IO lines is built to function as input,
+ output or both depending on the variant of the card.
+
+ To compile this driver as a module, choose M here. The module will
+ be called vme_pio2. If unsure, say N.
+
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme/devices/Makefile
index 459742a75283..172512cb5dbf 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_VME_USER) += vme_user.o
+
+vme_pio2-objs := vme_pio2_cntr.o vme_pio2_gpio.o vme_pio2_core.o
+obj-$(CONFIG_VME_PIO2) += vme_pio2.o
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
new file mode 100644
index 000000000000..3c5931364535
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -0,0 +1,249 @@
+#ifndef _VME_PIO2_H_
+#define _VME_PIO2_H_
+
+#define PIO2_CARDS_MAX 32
+
+#define PIO2_VARIANT_LENGTH 5
+
+#define PIO2_NUM_CHANNELS 32
+#define PIO2_NUM_IRQS 11
+#define PIO2_NUM_CNTRS 6
+
+#define PIO2_REGS_SIZE 0x40
+
+#define PIO2_REGS_DATA0 0x0
+#define PIO2_REGS_DATA1 0x1
+#define PIO2_REGS_DATA2 0x2
+#define PIO2_REGS_DATA3 0x3
+
+static const int PIO2_REGS_DATA[4] = { PIO2_REGS_DATA0, PIO2_REGS_DATA1,
+ PIO2_REGS_DATA2, PIO2_REGS_DATA3 };
+
+#define PIO2_REGS_INT_STAT0 0x8
+#define PIO2_REGS_INT_STAT1 0x9
+#define PIO2_REGS_INT_STAT2 0xa
+#define PIO2_REGS_INT_STAT3 0xb
+
+static const int PIO2_REGS_INT_STAT[4] = { PIO2_REGS_INT_STAT0,
+ PIO2_REGS_INT_STAT1,
+ PIO2_REGS_INT_STAT2,
+ PIO2_REGS_INT_STAT3 };
+
+#define PIO2_REGS_INT_STAT_CNTR 0xc
+#define PIO2_REGS_INT_MASK0 0x10
+#define PIO2_REGS_INT_MASK1 0x11
+#define PIO2_REGS_INT_MASK2 0x12
+#define PIO2_REGS_INT_MASK3 0x13
+#define PIO2_REGS_INT_MASK4 0x14
+#define PIO2_REGS_INT_MASK5 0x15
+#define PIO2_REGS_INT_MASK6 0x16
+#define PIO2_REGS_INT_MASK7 0x17
+
+static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
+ PIO2_REGS_INT_MASK1,
+ PIO2_REGS_INT_MASK2,
+ PIO2_REGS_INT_MASK3,
+ PIO2_REGS_INT_MASK4,
+ PIO2_REGS_INT_MASK5,
+ PIO2_REGS_INT_MASK6,
+ PIO2_REGS_INT_MASK7 };
+
+
+
+#define PIO2_REGS_CTRL 0x18
+#define PIO2_REGS_VME_VECTOR 0x19
+#define PIO2_REGS_CNTR0 0x20
+#define PIO2_REGS_CNTR1 0x22
+#define PIO2_REGS_CNTR2 0x24
+#define PIO2_REGS_CTRL_WRD0 0x26
+#define PIO2_REGS_CNTR3 0x28
+#define PIO2_REGS_CNTR4 0x2a
+#define PIO2_REGS_CNTR5 0x2c
+#define PIO2_REGS_CTRL_WRD1 0x2e
+
+#define PIO2_REGS_ID 0x30
+
+
+/* PIO2_REGS_DATAx (0x0 - 0x3) */
+
+static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3 };
+
+#define PIO2_CHANNEL0_BIT (1 << 0)
+#define PIO2_CHANNEL1_BIT (1 << 1)
+#define PIO2_CHANNEL2_BIT (1 << 2)
+#define PIO2_CHANNEL3_BIT (1 << 3)
+#define PIO2_CHANNEL4_BIT (1 << 4)
+#define PIO2_CHANNEL5_BIT (1 << 5)
+#define PIO2_CHANNEL6_BIT (1 << 6)
+#define PIO2_CHANNEL7_BIT (1 << 7)
+#define PIO2_CHANNEL8_BIT (1 << 0)
+#define PIO2_CHANNEL9_BIT (1 << 1)
+#define PIO2_CHANNEL10_BIT (1 << 2)
+#define PIO2_CHANNEL11_BIT (1 << 3)
+#define PIO2_CHANNEL12_BIT (1 << 4)
+#define PIO2_CHANNEL13_BIT (1 << 5)
+#define PIO2_CHANNEL14_BIT (1 << 6)
+#define PIO2_CHANNEL15_BIT (1 << 7)
+#define PIO2_CHANNEL16_BIT (1 << 0)
+#define PIO2_CHANNEL17_BIT (1 << 1)
+#define PIO2_CHANNEL18_BIT (1 << 2)
+#define PIO2_CHANNEL19_BIT (1 << 3)
+#define PIO2_CHANNEL20_BIT (1 << 4)
+#define PIO2_CHANNEL21_BIT (1 << 5)
+#define PIO2_CHANNEL22_BIT (1 << 6)
+#define PIO2_CHANNEL23_BIT (1 << 7)
+#define PIO2_CHANNEL24_BIT (1 << 0)
+#define PIO2_CHANNEL25_BIT (1 << 1)
+#define PIO2_CHANNEL26_BIT (1 << 2)
+#define PIO2_CHANNEL27_BIT (1 << 3)
+#define PIO2_CHANNEL28_BIT (1 << 4)
+#define PIO2_CHANNEL29_BIT (1 << 5)
+#define PIO2_CHANNEL30_BIT (1 << 6)
+#define PIO2_CHANNEL31_BIT (1 << 7)
+
+static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
+ PIO2_CHANNEL2_BIT, PIO2_CHANNEL3_BIT,
+ PIO2_CHANNEL4_BIT, PIO2_CHANNEL5_BIT,
+ PIO2_CHANNEL6_BIT, PIO2_CHANNEL7_BIT,
+ PIO2_CHANNEL8_BIT, PIO2_CHANNEL9_BIT,
+ PIO2_CHANNEL10_BIT, PIO2_CHANNEL11_BIT,
+ PIO2_CHANNEL12_BIT, PIO2_CHANNEL13_BIT,
+ PIO2_CHANNEL14_BIT, PIO2_CHANNEL15_BIT,
+ PIO2_CHANNEL16_BIT, PIO2_CHANNEL17_BIT,
+ PIO2_CHANNEL18_BIT, PIO2_CHANNEL19_BIT,
+ PIO2_CHANNEL20_BIT, PIO2_CHANNEL21_BIT,
+ PIO2_CHANNEL22_BIT, PIO2_CHANNEL23_BIT,
+ PIO2_CHANNEL24_BIT, PIO2_CHANNEL25_BIT,
+ PIO2_CHANNEL26_BIT, PIO2_CHANNEL27_BIT,
+ PIO2_CHANNEL28_BIT, PIO2_CHANNEL29_BIT,
+ PIO2_CHANNEL30_BIT, PIO2_CHANNEL31_BIT
+ };
+
+/* PIO2_REGS_INT_STAT_CNTR (0xc) */
+#define PIO2_COUNTER0 (1 << 0)
+#define PIO2_COUNTER1 (1 << 1)
+#define PIO2_COUNTER2 (1 << 2)
+#define PIO2_COUNTER3 (1 << 3)
+#define PIO2_COUNTER4 (1 << 4)
+#define PIO2_COUNTER5 (1 << 5)
+
+static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
+ PIO2_COUNTER2, PIO2_COUNTER3,
+ PIO2_COUNTER4, PIO2_COUNTER5 };
+
+/* PIO2_REGS_CTRL (0x18) */
+#define PIO2_VME_INT_MASK 0x7
+#define PIO2_LED (1 << 6)
+#define PIO2_LOOP (1 << 7)
+
+/* PIO2_REGS_VME_VECTOR (0x19) */
+#define PIO2_VME_VECTOR_SPUR 0x0
+#define PIO2_VME_VECTOR_BANK0 0x1
+#define PIO2_VME_VECTOR_BANK1 0x2
+#define PIO2_VME_VECTOR_BANK2 0x3
+#define PIO2_VME_VECTOR_BANK3 0x4
+#define PIO2_VME_VECTOR_CNTR0 0x5
+#define PIO2_VME_VECTOR_CNTR1 0x6
+#define PIO2_VME_VECTOR_CNTR2 0x7
+#define PIO2_VME_VECTOR_CNTR3 0x8
+#define PIO2_VME_VECTOR_CNTR4 0x9
+#define PIO2_VME_VECTOR_CNTR5 0xa
+
+#define PIO2_VME_VECTOR_MASK 0xf0
+
+static const int PIO2_VECTOR_BANK[4] = { PIO2_VME_VECTOR_BANK0,
+ PIO2_VME_VECTOR_BANK1,
+ PIO2_VME_VECTOR_BANK2,
+ PIO2_VME_VECTOR_BANK3 };
+
+static const int PIO2_VECTOR_CNTR[6] = { PIO2_VME_VECTOR_CNTR0,
+ PIO2_VME_VECTOR_CNTR1,
+ PIO2_VME_VECTOR_CNTR2,
+ PIO2_VME_VECTOR_CNTR3,
+ PIO2_VME_VECTOR_CNTR4,
+ PIO2_VME_VECTOR_CNTR5 };
+
+/* PIO2_REGS_CNTRx (0x20 - 0x24 & 0x28 - 0x2c) */
+
+static const int PIO2_CNTR_DATA[6] = { PIO2_REGS_CNTR0, PIO2_REGS_CNTR1,
+ PIO2_REGS_CNTR2, PIO2_REGS_CNTR3,
+ PIO2_REGS_CNTR4, PIO2_REGS_CNTR5 };
+
+/* PIO2_REGS_CTRL_WRDx (0x26 & 0x2e) */
+
+static const int PIO2_CNTR_CTRL[6] = { PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD1,
+ PIO2_REGS_CTRL_WRD1,
+ PIO2_REGS_CTRL_WRD1 };
+
+#define PIO2_CNTR_SC_DEV0 0
+#define PIO2_CNTR_SC_DEV1 (1 << 6)
+#define PIO2_CNTR_SC_DEV2 (2 << 6)
+#define PIO2_CNTR_SC_RDBACK (3 << 6)
+
+static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
+ PIO2_CNTR_SC_DEV2, PIO2_CNTR_SC_DEV0,
+ PIO2_CNTR_SC_DEV1, PIO2_CNTR_SC_DEV2 };
+
+#define PIO2_CNTR_RW_LATCH 0
+#define PIO2_CNTR_RW_LSB (1 << 4)
+#define PIO2_CNTR_RW_MSB (2 << 4)
+#define PIO2_CNTR_RW_BOTH (3 << 4)
+
+#define PIO2_CNTR_MODE0 0
+#define PIO2_CNTR_MODE1 (1 << 1)
+#define PIO2_CNTR_MODE2 (2 << 1)
+#define PIO2_CNTR_MODE3 (3 << 1)
+#define PIO2_CNTR_MODE4 (4 << 1)
+#define PIO2_CNTR_MODE5 (5 << 1)
+
+#define PIO2_CNTR_BCD 1
+
+
+
+enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
+enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
+
+/* Bank configuration structure */
+struct pio2_io_bank {
+ enum pio2_bank_config config;
+ u8 value;
+ enum pio2_int_config irq[8];
+};
+
+/* Counter configuration structure */
+struct pio2_cntr {
+ int mode;
+ int count;
+};
+
+struct pio2_card {
+ int id;
+ int bus;
+ long base;
+ int irq_vector;
+ int irq_level;
+ char variant[6];
+ int led;
+
+ struct vme_dev *vdev;
+ struct vme_resource *window;
+
+ struct gpio_chip gc;
+ struct pio2_io_bank bank[4];
+
+ struct pio2_cntr cntr[6];
+};
+
+int pio2_cntr_reset(struct pio2_card *);
+
+int pio2_gpio_reset(struct pio2_card *);
+int __init pio2_gpio_init(struct pio2_card *);
+void __exit pio2_gpio_exit(struct pio2_card *);
+
+#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_cntr.c b/drivers/staging/vme/devices/vme_pio2_cntr.c
new file mode 100644
index 000000000000..08e0d59806ca
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_cntr.c
@@ -0,0 +1,71 @@
+/*
+ * GE PIO2 Counter Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The PIO-2 has 6 counters, currently this code just disables the interrupts
+ * and leaves them alone.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+static int pio2_cntr_irq_set(struct pio2_card *card, int id)
+{
+ int retval;
+ u8 data;
+
+ data = PIO2_CNTR_SC_DEV[id] | PIO2_CNTR_RW_BOTH | card->cntr[id].mode;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_CTRL[id]);
+ if (retval < 0)
+ return retval;
+
+ data = card->cntr[id].count & 0xFF;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
+ if (retval < 0)
+ return retval;
+
+ data = (card->cntr[id].count >> 8) & 0xFF;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+int pio2_cntr_reset(struct pio2_card *card)
+{
+ int i, retval = 0;
+ u8 reg;
+
+ /* Clear down all timers */
+ for (i = 0; i < 6; i++) {
+ card->cntr[i].mode = PIO2_CNTR_MODE5;
+ card->cntr[i].count = 0;
+ retval = pio2_cntr_irq_set(card, i);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Ensure all counter interrupts are cleared */
+ do {
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_INT_STAT_CNTR);
+ if (retval < 0)
+ return retval;
+ } while (reg != 0);
+
+ return retval;
+}
+
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
new file mode 100644
index 000000000000..9fedc442a779
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -0,0 +1,524 @@
+/*
+ * GE PIO2 6U VME I/O Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+
+static const char driver_name[] = "pio2";
+
+static int bus[PIO2_CARDS_MAX];
+static int bus_num;
+static long base[PIO2_CARDS_MAX];
+static int base_num;
+static int vector[PIO2_CARDS_MAX];
+static int vector_num;
+static int level[PIO2_CARDS_MAX];
+static int level_num;
+static const char *variant[PIO2_CARDS_MAX];
+static int variant_num;
+
+static int loopback;
+
+static int pio2_match(struct vme_dev *);
+static int __devinit pio2_probe(struct vme_dev *);
+static int __devexit pio2_remove(struct vme_dev *);
+
+static int pio2_get_led(struct pio2_card *card)
+{
+ /* Can't read hardware, state saved in structure */
+ return card->led;
+}
+
+static int pio2_set_led(struct pio2_card *card, int state)
+{
+ u8 reg;
+ int retval;
+
+ reg = card->irq_level;
+
+ /* Register state inverse of led state */
+ if (!state)
+ reg |= PIO2_LED;
+
+ if (loopback)
+ reg |= PIO2_LOOP;
+
+ retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ card->led = state ? 1 : 0;
+
+ return 0;
+}
+
+static void pio2_int(int level, int vector, void *ptr)
+{
+ int vec, i, channel, retval;
+ u8 reg;
+ struct pio2_card *card = ptr;
+
+ vec = vector & ~PIO2_VME_VECTOR_MASK;
+
+ switch (vec) {
+ case 0:
+ dev_warn(&card->vdev->dev, "Spurious Interrupt\n");
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ /* Channels 0 to 7 */
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_INT_STAT[vec - 1]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to read IRQ status register\n");
+ return;
+ }
+ for (i = 0; i < 8; i++) {
+ channel = ((vec - 1) * 8) + i;
+ if (reg & PIO2_CHANNEL_BIT[channel])
+ dev_info(&card->vdev->dev,
+ "Interrupt on I/O channel %d\n",
+ channel);
+ }
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ /* Counters are dealt with by their own handler */
+ dev_err(&card->vdev->dev,
+ "Counter interrupt\n");
+ break;
+ }
+}
+
+
+/*
+ * We return whether this has been successful - this is used in the probe to
+ * ensure we have a valid card.
+ */
+static int pio2_reset_card(struct pio2_card *card)
+{
+ int retval = 0;
+ u8 data = 0;
+
+ /* Clear main register*/
+ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ /* Clear VME vector */
+ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_VME_VECTOR);
+ if (retval < 0)
+ return retval;
+
+ /* Reset GPIO */
+ retval = pio2_gpio_reset(card);
+ if (retval < 0)
+ return retval;
+
+ /* Reset counters */
+ retval = pio2_cntr_reset(card);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static struct vme_driver pio2_driver = {
+ .name = driver_name,
+ .match = pio2_match,
+ .probe = pio2_probe,
+ .remove = __devexit_p(pio2_remove),
+};
+
+
+static int __init pio2_init(void)
+{
+ int retval = 0;
+
+ if (bus_num == 0) {
+ printk(KERN_ERR "%s: No cards, skipping registration\n",
+ driver_name);
+ goto err_nocard;
+ }
+
+ if (bus_num > PIO2_CARDS_MAX) {
+ printk(KERN_ERR
+ "%s: Driver only able to handle %d PIO2 Cards\n",
+ driver_name, PIO2_CARDS_MAX);
+ bus_num = PIO2_CARDS_MAX;
+ }
+
+ /* Register the PIO2 driver */
+ retval = vme_register_driver(&pio2_driver, bus_num);
+ if (retval != 0)
+ goto err_reg;
+
+ return retval;
+
+err_reg:
+err_nocard:
+ return retval;
+}
+
+static int pio2_match(struct vme_dev *vdev)
+{
+
+ if (vdev->num >= bus_num) {
+ dev_err(&vdev->dev,
+ "The enumeration of the VMEbus to which the board is connected must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= base_num) {
+ dev_err(&vdev->dev,
+ "The VME address for the cards registers must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= vector_num) {
+ dev_err(&vdev->dev,
+ "The IRQ vector used by the card must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= level_num) {
+ dev_err(&vdev->dev,
+ "The IRQ level used by the card must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= variant_num) {
+ dev_err(&vdev->dev, "The variant of the card must be specified");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int __devinit pio2_probe(struct vme_dev *vdev)
+{
+ struct pio2_card *card;
+ int retval;
+ int i;
+ u8 reg;
+ int vec;
+
+ card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL);
+ if (card == NULL) {
+ dev_err(&vdev->dev, "Unable to allocate card structure\n");
+ retval = -ENOMEM;
+ goto err_struct;
+ }
+
+ card->id = vdev->num;
+ card->bus = bus[card->id];
+ card->base = base[card->id];
+ card->irq_vector = vector[card->id];
+ card->irq_level = level[card->id] & PIO2_VME_INT_MASK;
+ strncpy(card->variant, variant[card->id], PIO2_VARIANT_LENGTH);
+ card->vdev = vdev;
+
+ for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
+
+ if (isdigit(card->variant[i]) == 0) {
+ dev_err(&card->vdev->dev, "Variant invalid\n");
+ retval = -EINVAL;
+ goto err_variant;
+ }
+ }
+
+ /*
+ * Bottom 4 bits of VME interrupt vector used to determine source,
+ * provided vector should only use upper 4 bits.
+ */
+ if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) {
+ dev_err(&card->vdev->dev,
+ "Invalid VME IRQ Vector, vector must not use lower 4 bits\n");
+ retval = -EINVAL;
+ goto err_vector;
+ }
+
+ /*
+ * There is no way to determine the build variant or whether each bank
+ * is input, output or both at run time. The inputs are also inverted
+ * if configured as both.
+ *
+ * We pass in the board variant and use that to determine the
+ * configuration of the banks.
+ */
+ for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
+ switch (card->variant[i]) {
+ case '0':
+ card->bank[i-1].config = NOFIT;
+ break;
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ card->bank[i-1].config = INPUT;
+ break;
+ case '5':
+ card->bank[i-1].config = OUTPUT;
+ break;
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ card->bank[i-1].config = BOTH;
+ break;
+ }
+ }
+
+ /* Get a master window and position over regs */
+ card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
+ if (card->window == NULL) {
+ dev_err(&card->vdev->dev,
+ "Unable to assign VME master resource\n");
+ retval = -EIO;
+ goto err_window;
+ }
+
+ retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24,
+ (VME_SCT | VME_USER | VME_DATA), VME_D16);
+ if (retval) {
+ dev_err(&card->vdev->dev,
+ "Unable to configure VME master resource\n");
+ goto err_set;
+ }
+
+ /*
+ * There is also no obvious register which we can probe to determine
+ * whether the provided base is valid. If we can read the "ID Register"
+ * offset and the reset function doesn't error, assume we have a valid
+ * location.
+ */
+ retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_ID);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to read from device\n");
+ goto err_read;
+ }
+
+ dev_dbg(&card->vdev->dev, "ID Register:%x\n", reg);
+
+ /*
+ * Ensure all the I/O is cleared. We can't read back the states, so
+ * this is the only method we have to ensure that the I/O is in a known
+ * state.
+ */
+ retval = pio2_reset_card(card);
+ if (retval) {
+ dev_err(&card->vdev->dev,
+ "Failed to reset card, is location valid?");
+ retval = -ENODEV;
+ goto err_reset;
+ }
+
+ /* Configure VME Interrupts */
+ reg = card->irq_level;
+ if (pio2_get_led(card))
+ reg |= PIO2_LED;
+ if (loopback)
+ reg |= PIO2_LOOP;
+ retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ /* Set VME vector */
+ retval = vme_master_write(card->window, &card->irq_vector, 1,
+ PIO2_REGS_VME_VECTOR);
+ if (retval < 0)
+ return retval;
+
+ /* Attach spurious interrupt handler. */
+ vec = card->irq_vector | PIO2_VME_VECTOR_SPUR;
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_irq;
+ }
+
+ /* Attach GPIO interrupt handlers. */
+ for (i = 0; i < 4; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_gpio_irq;
+ }
+ }
+
+ /* Attach counter interrupt handlers. */
+ for (i = 0; i < 6; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_cntr_irq;
+ }
+ }
+
+ /* Register IO */
+ retval = pio2_gpio_init(card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to register with GPIO framework\n");
+ goto err_gpio;
+ }
+
+ /* Set LED - This also sets interrupt level */
+ retval = pio2_set_led(card, 0);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to set LED\n");
+ goto err_led;
+ }
+
+ dev_set_drvdata(&card->vdev->dev, card);
+
+ dev_info(&card->vdev->dev,
+ "PIO2 (variant %s) configured at 0x%lx\n", card->variant,
+ card->base);
+
+ return 0;
+
+err_led:
+ pio2_gpio_exit(card);
+err_gpio:
+ i = 6;
+err_cntr_irq:
+ while (i > 0) {
+ i--;
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ i = 4;
+err_gpio_irq:
+ while (i > 0) {
+ i--;
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
+ vme_irq_free(vdev, card->irq_level, vec);
+err_irq:
+ pio2_reset_card(card);
+err_reset:
+err_read:
+ vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
+err_set:
+ vme_master_free(card->window);
+err_window:
+err_vector:
+err_variant:
+ kfree(card);
+err_struct:
+ return retval;
+}
+
+static int __devexit pio2_remove(struct vme_dev *vdev)
+{
+ int vec;
+ int i;
+
+ struct pio2_card *card = dev_get_drvdata(&vdev->dev);
+
+ pio2_gpio_exit(card);
+
+ for (i = 0; i < 6; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ for (i = 0; i < 4; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
+ vme_irq_free(vdev, card->irq_level, vec);
+
+ pio2_reset_card(card);
+
+ vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
+
+ vme_master_free(card->window);
+
+ kfree(card);
+
+ return 0;
+}
+
+static void __exit pio2_exit(void)
+{
+ vme_unregister_driver(&pio2_driver);
+}
+
+
+/* These are required for each board */
+MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
+module_param_array(bus, int, &bus_num, S_IRUGO);
+
+MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers");
+module_param_array(base, long, &base_num, S_IRUGO);
+
+MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)");
+module_param_array(vector, int, &vector_num, S_IRUGO);
+
+MODULE_PARM_DESC(level, "VME IRQ Level");
+module_param_array(level, int, &level_num, S_IRUGO);
+
+MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant");
+module_param_array(variant, charp, &variant_num, S_IRUGO);
+
+/* This is for debugging */
+MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards");
+module_param(loopback, bool, S_IRUGO);
+
+MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_LICENSE("GPL");
+
+module_init(pio2_init);
+module_exit(pio2_exit);
+
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
new file mode 100644
index 000000000000..dc837deb99dd
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -0,0 +1,232 @@
+/*
+ * GE PIO2 GPIO Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+static const char driver_name[] = "pio2_gpio";
+
+static struct pio2_card *gpio_to_pio2_card(struct gpio_chip *chip)
+{
+ return container_of(chip, struct pio2_card, gc);
+}
+
+static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u8 reg;
+ int retval;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+
+ dev_err(&card->vdev->dev, "Channel not available as input\n");
+ return 0;
+ }
+
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to read from GPIO\n");
+ return 0;
+ }
+
+ /*
+ * Remember, input on channels configured as both input and output
+ * are inverted!
+ */
+ if (reg & PIO2_CHANNEL_BIT[offset]) {
+ if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
+ return 0;
+ else
+ return 1;
+ } else {
+ if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
+ return 1;
+ else
+ return 0;
+ }
+}
+
+static void pio2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ u8 reg;
+ int retval;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+
+ dev_err(&card->vdev->dev, "Channel not availabe as output\n");
+ return;
+ }
+
+ if (value)
+ reg = card->bank[PIO2_CHANNEL_BANK[offset]].value |
+ PIO2_CHANNEL_BIT[offset];
+ else
+ reg = card->bank[PIO2_CHANNEL_BANK[offset]].value &
+ ~PIO2_CHANNEL_BIT[offset];
+
+ retval = vme_master_write(card->window, &reg, 1,
+ PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to write to GPIO\n");
+ return;
+ }
+
+ card->bank[PIO2_CHANNEL_BANK[offset]].value = reg;
+}
+
+/* Directionality configured at board build - send appropriate response */
+static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ int data;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+ dev_err(&card->vdev->dev,
+ "Channel directionality not configurable at runtine\n");
+
+ data = -EINVAL;
+ } else {
+ data = 0;
+ }
+
+ return data;
+}
+
+/* Directionality configured at board build - send appropriate response */
+static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ int data;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+ dev_err(&card->vdev->dev,
+ "Channel directionality not configurable at runtine\n");
+
+ data = -EINVAL;
+ } else {
+ data = 0;
+ }
+
+ return data;
+}
+
+/*
+ * We return whether this has been successful - this is used in the probe to
+ * ensure we have a valid card.
+ */
+int pio2_gpio_reset(struct pio2_card *card)
+{
+ int retval = 0;
+ int i, j;
+
+ u8 data = 0;
+
+ /* Zero output registers */
+ for (i = 0; i < 4; i++) {
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_DATA[i]);
+ if (retval < 0)
+ return retval;
+ card->bank[i].value = 0;
+ }
+
+ /* Set input interrupt masks */
+ for (i = 0; i < 4; i++) {
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_INT_MASK[i * 2]);
+ if (retval < 0)
+ return retval;
+
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_INT_MASK[(i * 2) + 1]);
+ if (retval < 0)
+ return retval;
+
+ for (j = 0; j < 8; j++)
+ card->bank[i].irq[j] = NONE;
+ }
+
+ /* Ensure all I/O interrupts are cleared */
+ for (i = 0; i < 4; i++) {
+ do {
+ retval = vme_master_read(card->window, &data, 1,
+ PIO2_REGS_INT_STAT[i]);
+ if (retval < 0)
+ return retval;
+ } while (data != 0);
+ }
+
+ return 0;
+}
+
+int __init pio2_gpio_init(struct pio2_card *card)
+{
+ int retval = 0;
+ char *label;
+
+ label = kmalloc(PIO2_NUM_CHANNELS, GFP_KERNEL);
+ if (label == NULL) {
+ dev_err(&card->vdev->dev, "Unable to allocate GPIO label\n");
+ return -ENOMEM;
+ }
+
+ sprintf(label, "%s@%s", driver_name, dev_name(&card->vdev->dev));
+ card->gc.label = label;
+
+ card->gc.ngpio = PIO2_NUM_CHANNELS;
+ /* Dynamic allocation of base */
+ card->gc.base = -1;
+ /* Setup pointers to chip functions */
+ card->gc.direction_input = pio2_gpio_dir_in;
+ card->gc.direction_output = pio2_gpio_dir_out;
+ card->gc.get = pio2_gpio_get;
+ card->gc.set = pio2_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = gpiochip_add(&(card->gc));
+ if (retval) {
+ dev_err(&card->vdev->dev, "Unable to register GPIO\n");
+ kfree(card->gc.label);
+ }
+
+ return retval;
+};
+
+void __exit pio2_gpio_exit(struct pio2_card *card)
+{
+ const char *label = card->gc.label;
+
+ if (gpiochip_remove(&(card->gc)))
+ dev_err(&card->vdev->dev, "Failed to remove GPIO");
+
+ kfree(label);
+}
+
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index d85a1e9dbe3a..7d24cd6343e4 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -10,9 +10,9 @@ struct vme_master {
int enable; /* State of Window */
unsigned long long vme_addr; /* Starting Address on the VMEbus */
unsigned long long size; /* Window Size */
- vme_address_t aspace; /* Address Space */
- vme_cycle_t cycle; /* Cycle properties */
- vme_width_t dwidth; /* Maximum Data Width */
+ u32 aspace; /* Address Space */
+ u32 cycle; /* Cycle properties */
+ u32 dwidth; /* Maximum Data Width */
#if 0
char prefetchEnable; /* Prefetch Read Enable State */
int prefetchSize; /* Prefetch Read Size (Cache Lines) */
@@ -34,8 +34,8 @@ struct vme_slave {
int enable; /* State of Window */
unsigned long long vme_addr; /* Starting Address on the VMEbus */
unsigned long long size; /* Window Size */
- vme_address_t aspace; /* Address Space */
- vme_cycle_t cycle; /* Cycle properties */
+ u32 aspace; /* Address Space */
+ u32 cycle; /* Cycle properties */
#if 0
char wrPostEnable; /* Write Post State */
char rmwLock; /* Lock PCI during RMW Cycles */
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index b04b4688f705..70722ae52321 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -153,9 +153,7 @@ size_t vme_get_size(struct vme_resource *resource)
int enabled, retval;
unsigned long long base, size;
dma_addr_t buf_base;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
switch (resource->type) {
case VME_MASTER:
@@ -181,7 +179,7 @@ size_t vme_get_size(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_get_size);
-static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
+static int vme_check_window(u32 aspace, unsigned long long vme_base,
unsigned long long size)
{
int retval = 0;
@@ -232,8 +230,8 @@ static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
* Request a slave image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_slave_request(struct vme_dev *vdev,
- vme_address_t address, vme_cycle_t cycle)
+struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
+ u32 cycle)
{
struct vme_bridge *bridge;
struct list_head *slave_pos = NULL;
@@ -298,7 +296,7 @@ EXPORT_SYMBOL(vme_slave_request);
int vme_slave_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -333,7 +331,7 @@ EXPORT_SYMBOL(vme_slave_set);
int vme_slave_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -388,8 +386,8 @@ EXPORT_SYMBOL(vme_slave_free);
* Request a master image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_master_request(struct vme_dev *vdev,
- vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
+struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge;
struct list_head *master_pos = NULL;
@@ -456,8 +454,8 @@ err_bus:
EXPORT_SYMBOL(vme_master_request);
int vme_master_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -492,8 +490,8 @@ int vme_master_set(struct vme_resource *resource, int enabled,
EXPORT_SYMBOL(vme_master_set);
int vme_master_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -646,8 +644,7 @@ EXPORT_SYMBOL(vme_master_free);
* Request a DMA controller with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_dma_request(struct vme_dev *vdev,
- vme_dma_route_t route)
+struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
{
struct vme_bridge *bridge;
struct list_head *dma_pos = NULL;
@@ -743,8 +740,7 @@ EXPORT_SYMBOL(vme_new_dma_list);
/*
* Create "Pattern" type attributes
*/
-struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
- vme_pattern_t type)
+struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
{
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
@@ -822,7 +818,7 @@ EXPORT_SYMBOL(vme_dma_pci_attribute);
* Create "VME" type attributes
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
@@ -1173,7 +1169,7 @@ int vme_lm_count(struct vme_resource *resource)
EXPORT_SYMBOL(vme_lm_count);
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+ u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1195,7 +1191,7 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
EXPORT_SYMBOL(vme_lm_set);
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
- vme_address_t *aspace, vme_cycle_t *cycle)
+ u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1307,7 +1303,12 @@ EXPORT_SYMBOL(vme_slot_get);
/* - Bridge Registration --------------------------------------------------- */
-static int vme_add_bus(struct vme_bridge *bridge)
+static void vme_dev_release(struct device *dev)
+{
+ kfree(dev_to_vme_dev(dev));
+}
+
+int vme_register_bridge(struct vme_bridge *bridge)
{
int i;
int ret = -1;
@@ -1327,8 +1328,9 @@ static int vme_add_bus(struct vme_bridge *bridge)
return ret;
}
+EXPORT_SYMBOL(vme_register_bridge);
-static void vme_remove_bus(struct vme_bridge *bridge)
+void vme_unregister_bridge(struct vme_bridge *bridge)
{
struct vme_dev *vdev;
struct vme_dev *tmp;
@@ -1343,22 +1345,6 @@ static void vme_remove_bus(struct vme_bridge *bridge)
list_del(&bridge->bus_list);
mutex_unlock(&vme_buses_lock);
}
-
-static void vme_dev_release(struct device *dev)
-{
- kfree(dev_to_vme_dev(dev));
-}
-
-int vme_register_bridge(struct vme_bridge *bridge)
-{
- return vme_add_bus(bridge);
-}
-EXPORT_SYMBOL(vme_register_bridge);
-
-void vme_unregister_bridge(struct vme_bridge *bridge)
-{
- vme_remove_bus(bridge);
-}
EXPORT_SYMBOL(vme_unregister_bridge);
/* - Driver Registration --------------------------------------------------- */
@@ -1421,10 +1407,7 @@ static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
* and if the bridge is removed, it will have to go through
* vme_unregister_bridge() to do it (which calls remove() on
* the bridge which in turn tries to acquire vme_buses_lock and
- * will have to wait). The probe() called after device
- * registration in __vme_register_driver below will also fail
- * as the bridge is being removed (since the probe() calls
- * vme_bridge_get()).
+ * will have to wait).
*/
err = __vme_register_driver_bus(drv, bridge, ndevs);
if (err)
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index e3828badca61..9d38ceed60e2 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -10,7 +10,6 @@ enum vme_resource_type {
};
/* VME Address Spaces */
-typedef u32 vme_address_t;
#define VME_A16 0x1
#define VME_A24 0x2
#define VME_A32 0x4
@@ -29,7 +28,6 @@ typedef u32 vme_address_t;
/* VME Cycle Types */
-typedef u32 vme_cycle_t;
#define VME_SCT 0x1
#define VME_BLT 0x2
#define VME_MBLT 0x4
@@ -47,28 +45,23 @@ typedef u32 vme_cycle_t;
#define VME_DATA 0x8000
/* VME Data Widths */
-typedef u32 vme_width_t;
#define VME_D8 0x1
#define VME_D16 0x2
#define VME_D32 0x4
#define VME_D64 0x8
/* Arbitration Scheduling Modes */
-typedef u32 vme_arbitration_t;
#define VME_R_ROBIN_MODE 0x1
#define VME_PRIORITY_MODE 0x2
-typedef u32 vme_dma_t;
#define VME_DMA_PATTERN (1<<0)
#define VME_DMA_PCI (1<<1)
#define VME_DMA_VME (1<<2)
-typedef u32 vme_pattern_t;
#define VME_DMA_PATTERN_BYTE (1<<0)
#define VME_DMA_PATTERN_WORD (1<<1)
#define VME_DMA_PATTERN_INCREMENT (1<<2)
-typedef u32 vme_dma_route_t;
#define VME_DMA_VME_TO_MEM (1<<0)
#define VME_DMA_MEM_TO_VME (1<<1)
#define VME_DMA_VME_TO_VME (1<<2)
@@ -77,7 +70,7 @@ typedef u32 vme_dma_route_t;
#define VME_DMA_PATTERN_TO_MEM (1<<5)
struct vme_dma_attr {
- vme_dma_t type;
+ u32 type;
void *private;
};
@@ -97,7 +90,7 @@ extern struct bus_type vme_bus_type;
/**
* Structure representing a VME device
- * @id: The ID of the device (currently the bus and slot number)
+ * @num: The device number
* @bridge: Pointer to the bridge device this device is on
* @dev: Internal device structure
* @drv_list: List of devices (per driver)
@@ -128,32 +121,29 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
size_t vme_get_size(struct vme_resource *);
-struct vme_resource *vme_slave_request(struct vme_dev *, vme_address_t,
- vme_cycle_t);
+struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
+ unsigned long long, dma_addr_t, u32, u32);
int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
void vme_slave_free(struct vme_resource *);
-struct vme_resource *vme_master_request(struct vme_dev *, vme_address_t,
- vme_cycle_t, vme_width_t);
+struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
+ unsigned long long, u32, u32, u32);
int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
+ unsigned long long *, u32 *, u32 *, u32 *);
ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
unsigned int, loff_t);
void vme_master_free(struct vme_resource *);
-struct vme_resource *vme_dma_request(struct vme_dev *, vme_dma_route_t);
+struct vme_resource *vme_dma_request(struct vme_dev *, u32);
struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
-struct vme_dma_attr *vme_dma_pattern_attribute(u32, vme_pattern_t);
+struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
-struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, vme_address_t,
- vme_cycle_t, vme_width_t);
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
void vme_dma_free_attribute(struct vme_dma_attr *);
int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
struct vme_dma_attr *, size_t);
@@ -168,10 +158,8 @@ int vme_irq_generate(struct vme_dev *, int, int);
struct vme_resource * vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
-int vme_lm_set(struct vme_resource *, unsigned long long, vme_address_t,
- vme_cycle_t);
-int vme_lm_get(struct vme_resource *, unsigned long long *, vme_address_t *,
- vme_cycle_t *);
+int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
+int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
diff --git a/drivers/staging/vme/vme_api.txt b/drivers/staging/vme/vme_api.txt
index e8ff2151a487..856efa35f6e3 100644
--- a/drivers/staging/vme/vme_api.txt
+++ b/drivers/staging/vme/vme_api.txt
@@ -86,26 +86,26 @@ driver allows a resource to be assigned based on the required attributes of the
driver in question:
struct vme_resource * vme_master_request(struct vme_dev *dev,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
-
- struct vme_resource * vme_slave_request(struct vme_dev *dev,
- vme_address_t aspace, vme_cycle_t cycle);
-
- struct vme_resource *vme_dma_request(struct vme_dev *dev,
- vme_dma_route_t route);
-
-For slave windows these attributes are split into those of type 'vme_address_t'
-and 'vme_cycle_t'. Master windows add a further set of attributes
-'vme_cycle_t'. These attributes are defined as bitmasks and as such any
-combination of the attributes can be requested for a single window, the core
-will assign a window that meets the requirements, returning a pointer of type
-vme_resource that should be used to identify the allocated resource when it is
-used. For DMA controllers, the request function requires the potential
-direction of any transfers to be provided in the route attributes. This is
-typically VME-to-MEM and/or MEM-to-VME, though some hardware can support
-VME-to-VME and MEM-to-MEM transfers as well as test pattern generation. If an
-unallocated window fitting the requirements can not be found a NULL pointer
-will be returned.
+ u32 aspace, u32 cycle, u32 width);
+
+ struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
+ u32 cycle);
+
+ struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
+
+For slave windows these attributes are split into the VME address spaces that
+need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
+Master windows add a further set of attributes in 'width' specifying the
+required data transfer widths. These attributes are defined as bitmasks and as
+such any combination of the attributes can be requested for a single window,
+the core will assign a window that meets the requirements, returning a pointer
+of type vme_resource that should be used to identify the allocated resource
+when it is used. For DMA controllers, the request function requires the
+potential direction of any transfers to be provided in the route attributes.
+This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
+support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
+If an unallocated window fitting the requirements can not be found a NULL
+pointer will be returned.
Functions are also provided to free window allocations once they are no longer
required. These functions should be passed the pointer to the resource provided
@@ -133,12 +133,12 @@ Once a master window has been assigned the following functions can be used to
configure it and retrieve the current settings:
int vme_master_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
+ unsigned long long base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 width);
int vme_master_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *width);
+ unsigned long long *base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *width);
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
@@ -189,11 +189,11 @@ configure it and retrieve the current settings:
int vme_slave_set (struct vme_resource *res, int enabled,
unsigned long long base, unsigned long long size,
- dma_addr_t mem, vme_address_t aspace, vme_cycle_t cycle);
+ dma_addr_t mem, u32 aspace, u32 cycle);
int vme_slave_get (struct vme_resource *res, int *enabled,
unsigned long long *base, unsigned long long *size,
- dma_addr_t *mem, vme_address_t *aspace, vme_cycle_t *cycle);
+ dma_addr_t *mem, u32 *aspace, u32 *cycle);
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
@@ -273,8 +273,7 @@ and pattern sources and destinations (where appropriate):
Pattern source:
- struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
- vme_pattern_t type);
+ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
PCI source or destination:
@@ -283,7 +282,7 @@ PCI source or destination:
VME source or destination:
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
+ u32 aspace, u32 cycle, u32 width);
The following function should be used to free an attribute:
@@ -366,10 +365,10 @@ Once a bank of location monitors has been allocated, the following functions
are provided to configure the location and mode of the location monitor:
int vme_lm_set(struct vme_resource *res, unsigned long long base,
- vme_address_t aspace, vme_cycle_t cycle);
+ u32 aspace, u32 cycle);
int vme_lm_get(struct vme_resource *res, unsigned long long *base,
- vme_address_t *aspace, vme_cycle_t *cycle);
+ u32 *aspace, u32 *cycle);
Location Monitor Use
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index c2deda2c38df..934949abd745 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -15,9 +15,9 @@ struct vme_master_resource {
spinlock_t lock;
int locked;
int number;
- vme_address_t address_attr;
- vme_cycle_t cycle_attr;
- vme_width_t width_attr;
+ u32 address_attr;
+ u32 cycle_attr;
+ u32 width_attr;
struct resource bus_resource;
void __iomem *kern_base;
};
@@ -28,13 +28,13 @@ struct vme_slave_resource {
struct mutex mtx;
int locked;
int number;
- vme_address_t address_attr;
- vme_cycle_t cycle_attr;
+ u32 address_attr;
+ u32 cycle_attr;
};
struct vme_dma_pattern {
u32 pattern;
- vme_pattern_t type;
+ u32 type;
};
struct vme_dma_pci {
@@ -43,9 +43,9 @@ struct vme_dma_pci {
struct vme_dma_vme {
unsigned long long address;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace;
+ u32 cycle;
+ u32 dwidth;
};
struct vme_dma_list {
@@ -63,7 +63,7 @@ struct vme_dma_resource {
int number;
struct list_head pending;
struct list_head running;
- vme_dma_route_t route_attr;
+ u32 route_attr;
};
struct vme_lm_resource {
@@ -122,17 +122,16 @@ struct vme_bridge {
/* Slave Functions */
int (*slave_get) (struct vme_slave_resource *, int *,
unsigned long long *, unsigned long long *, dma_addr_t *,
- vme_address_t *, vme_cycle_t *);
+ u32 *, u32 *);
int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
+ unsigned long long, dma_addr_t, u32, u32);
/* Master Functions */
int (*master_get) (struct vme_master_resource *, int *,
- unsigned long long *, unsigned long long *, vme_address_t *,
- vme_cycle_t *, vme_width_t *);
+ unsigned long long *, unsigned long long *, u32 *, u32 *,
+ u32 *);
int (*master_set) (struct vme_master_resource *, int,
- unsigned long long, unsigned long long, vme_address_t,
- vme_cycle_t, vme_width_t);
+ unsigned long long, unsigned long long, u32, u32, u32);
ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
loff_t);
ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
@@ -151,10 +150,9 @@ struct vme_bridge {
int (*irq_generate) (struct vme_bridge *, int, int);
/* Location monitor functions */
- int (*lm_set) (struct vme_lm_resource *, unsigned long long,
- vme_address_t, vme_cycle_t);
- int (*lm_get) (struct vme_lm_resource *, unsigned long long *,
- vme_address_t *, vme_cycle_t *);
+ int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
+ int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
+ u32 *);
int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
int (*lm_detach) (struct vme_lm_resource *, int);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index d8dd7846447d..3e8283c2dc73 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3153,11 +3153,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCGIWNWID: //0x8b03 support
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL);
- #else
- rc = -EOPNOTSUPP;
- #endif
+ rc = -EOPNOTSUPP;
break;
// Set frequency/channel
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 432a20993c6e..7fd5cc5a55f6 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -300,6 +300,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
+ result = -EINVAL;
+ break;
+ }
pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
if (pList == NULL) {
result = -ENOMEM;
@@ -571,6 +575,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
+ result = -EINVAL;
+ break;
+ }
pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 5e425d1476b8..87288db21785 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -151,18 +151,6 @@ int iwctl_giwname(struct net_device *dev,
return 0;
}
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- //wrq->value = 0x100;
- //wrq->disabled = 0;
- //wrq->fixed = 1;
- //return 0;
- return -EOPNOTSUPP;
-}
-
/*
* Wireless Handler : set scan
*/
diff --git a/drivers/staging/vt6655/iwctl.h b/drivers/staging/vt6655/iwctl.h
index 3096de0ba1bd..d224f913a624 100644
--- a/drivers/staging/vt6655/iwctl.h
+++ b/drivers/staging/vt6655/iwctl.h
@@ -79,11 +79,6 @@ int iwctl_giwname(struct net_device *dev,
char *wrq,
char *extra);
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra) ;
-
int iwctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
diff --git a/drivers/staging/vt6656/80211mgr.c b/drivers/staging/vt6656/80211mgr.c
index fceec4999c36..39f98423dc02 100644
--- a/drivers/staging/vt6656/80211mgr.c
+++ b/drivers/staging/vt6656/80211mgr.c
@@ -224,8 +224,6 @@ vMgrDecodeBeacon(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
-
- return;
}
@@ -248,8 +246,6 @@ vMgrEncodeIBSSATIM(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
pFrame->len = WLAN_HDR_ADDR3_LEN;
-
- return;
}
@@ -270,8 +266,6 @@ vMgrDecodeIBSSATIM(
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- return;
}
@@ -298,8 +292,6 @@ vMgrEncodeDisassociation(
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
-
- return;
}
@@ -324,8 +316,6 @@ vMgrDecodeDisassociation(
/* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
-
- return;
}
/*+
@@ -352,7 +342,6 @@ vMgrEncodeAssocRequest(
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT + sizeof(*(pFrame->pwListenInterval));
- return;
}
@@ -418,7 +407,6 @@ vMgrDecodeAssocRequest(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
/*+
@@ -448,8 +436,6 @@ vMgrEncodeAssocResponse(
+ WLAN_ASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID
+ sizeof(*(pFrame->pwAid));
-
- return;
}
@@ -491,10 +477,8 @@ vMgrDecodeAssocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
- } else {
+ } else
pFrame->pExtSuppRates = NULL;
- }
- return;
}
@@ -524,8 +508,6 @@ vMgrEncodeReassocRequest(
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP + sizeof(*(pFrame->pAddrCurrAP));
-
- return;
}
@@ -578,10 +560,9 @@ vMgrDecodeReassocRequest(
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
break;
case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
+ if (pFrame->pRSNWPA == NULL)
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
break;
case WLAN_EID_EXTSUPP_RATES:
@@ -595,7 +576,6 @@ vMgrDecodeReassocRequest(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -619,7 +599,6 @@ vMgrEncodeProbeRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
pFrame->len = WLAN_HDR_ADDR3_LEN;
- return;
}
/*+
@@ -670,7 +649,6 @@ vMgrDecodeProbeRequest(
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -703,8 +681,6 @@ vMgrEncodeProbeResponse(
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
sizeof(*(pFrame->pwCapInfo));
-
- return;
}
@@ -818,7 +794,6 @@ vMgrDecodeProbeResponse(
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -848,7 +823,6 @@ vMgrEncodeAuthen(
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
- return;
}
@@ -886,7 +860,6 @@ vMgrDecodeAuthen(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE))
pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
- return;
}
@@ -912,7 +885,6 @@ vMgrEncodeDeauthen(
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
- return;
}
@@ -937,7 +909,6 @@ vMgrDecodeDeauthen(
/* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
- return;
}
@@ -968,7 +939,6 @@ vMgrEncodeReassocResponse(
+ WLAN_REASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
- return;
}
@@ -1010,5 +980,4 @@ vMgrDecodeReassocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES))
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- return;
}
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 0d11147f91c1..06f27f624db4 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -932,30 +932,8 @@ BBvCaculateParameter (
void
BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
{
- //{{ RobertYu: 20041124, ABG Mode, VC1/VC2 define, make the ANT_A, ANT_B inverted
- /*if ( (pDevice->byRFType == RF_MAXIM2829) ||
- (pDevice->byRFType == RF_UW2452) ||
- (pDevice->byRFType == RF_AIROHA7230) ) { // RobertYu: 20041210, 20050104
-
- switch (byAntennaMode) {
- case ANT_TXA:
- byAntennaMode = ANT_TXB;
- break;
- case ANT_TXB:
- byAntennaMode = ANT_TXA;
- break;
- case ANT_RXA:
- byAntennaMode = ANT_RXB;
- break;
- case ANT_RXB:
- byAntennaMode = ANT_RXA;
- break;
- }
- }*/
-
switch (byAntennaMode) {
case ANT_TXA:
- break;
case ANT_TXB:
break;
case ANT_RXA:
@@ -1249,8 +1227,7 @@ void BBvLoopbackOff (PSDevice pDevice)
// Set the CR33 Bit2 to disable internal Loopback.
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x21, &byData);//CR33
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x21, (BYTE)(byData & 0xFE));//CR33
- }
- else { // OFDM
+ } else { /* OFDM */
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x9A, &byData);//CR154
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9A, (BYTE)(byData & 0xFE));//CR154
}
@@ -1277,19 +1254,16 @@ BBvSetShortSlotTime (PSDevice pDevice)
{
BYTE byBBVGA=0;
- if (pDevice->bShortSlotTime) {
+ if (pDevice->bShortSlotTime)
pDevice->byBBRxConf &= 0xDF;//1101 1111
- } else {
+ else
pDevice->byBBRxConf |= 0x20;//0010 0000
- }
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0xE7, &byBBVGA);
- if (byBBVGA == pDevice->abyBBVGA[0]) {
+ if (byBBVGA == pDevice->abyBBVGA[0])
pDevice->byBBRxConf |= 0x20;//0010 0000
- }
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);
-
}
@@ -1299,13 +1273,11 @@ void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, byData);
// patch for 3253B0 Baseband with Cardbus module
- if (byData == pDevice->abyBBVGA[0]) {
- pDevice->byBBRxConf |= 0x20;//0010 0000
- } else if (pDevice->bShortSlotTime) {
- pDevice->byBBRxConf &= 0xDF;//1101 1111
- } else {
- pDevice->byBBRxConf |= 0x20;//0010 0000
- }
+ if (pDevice->bShortSlotTime)
+ pDevice->byBBRxConf &= 0xDF; /* 1101 1111 */
+ else
+ pDevice->byBBRxConf |= 0x20; /* 0010 0000 */
+
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);//CR10
}
@@ -1365,15 +1337,14 @@ static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
unsigned long ulMaxPacket;
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
- if ( pDevice->aulPktNum[RATE_54M] != 0 ) {
+ if (pDevice->aulPktNum[RATE_54M] != 0)
ulSQ3 = pDevice->aulSQ3Val[RATE_54M] / pDevice->aulPktNum[RATE_54M];
- }
- for ( ii=RATE_48M;ii>=RATE_6M;ii-- ) {
- if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
+
+ for (ii = RATE_48M; ii >= RATE_6M; ii--)
+ if (pDevice->aulPktNum[ii] > ulMaxPacket) {
ulMaxPacket = pDevice->aulPktNum[ii];
ulSQ3 = pDevice->aulSQ3Val[ii] / pDevice->aulPktNum[ii];
}
- }
return ulSQ3;
}
@@ -1392,7 +1363,7 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
ulRatio += TOP_RATE_54M;
}
- for ( ii=RATE_48M;ii>=RATE_1M;ii-- ) {
+ for (ii = RATE_48M; ii >= RATE_1M; ii--)
if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
ulPacketNum = 0;
for ( jj=RATE_54M;jj>=ii;jj--)
@@ -1402,8 +1373,6 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
ulMaxPacket = pDevice->aulPktNum[ii];
}
- }
-
return ulRatio;
}
@@ -1589,7 +1558,6 @@ void TimerSQ3CallBack(void *hDeviceContext)
spin_unlock_irq(&pDevice->lock);
- return;
}
@@ -1637,7 +1605,6 @@ void TimerSQ3Tmax3CallBack(void *hDeviceContext)
add_timer(&pDevice->TimerSQ3Tmax1);
spin_unlock_irq(&pDevice->lock);
- return;
}
void
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index af006df4c8e9..32c67ed8435a 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -216,26 +216,6 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
continue;
}
}
-/*
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) {
- if (pCurrBSS->bWPAValid == TRUE) {
- // WPA AP will reject connection of station without WPA enable.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- if (pCurrBSS->bWPAValid == FALSE) {
- // station with WPA enable can't join NonWPA AP.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (pCurrBSS->bWPA2Valid == FALSE) {
- // station with WPA2 enable can't join NonWPA2 AP.
- continue;
- }
- }
-*/
pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList pSelect1[%02X %02X %02X-%02X %02X %02X]\n",*pCurrBSS->abyBSSID,*(pCurrBSS->abyBSSID+1),*(pCurrBSS->abyBSSID+2),*(pCurrBSS->abyBSSID+3),*(pCurrBSS->abyBSSID+4),*(pCurrBSS->abyBSSID+5));
@@ -300,18 +280,11 @@ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
continue;
}
}
-/*
- if ((pMgmt->sBSSList[ii].bActive) && (pMgmt->sBSSList[ii].uClearCount < BSS_CLEAR_COUNT)) {
- pMgmt->sBSSList[ii].uClearCount ++;
- continue;
- }
-*/
- pMgmt->sBSSList[ii].bActive = FALSE;
+
+ pMgmt->sBSSList[ii].bActive = FALSE;
memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
}
BSSvClearAnyBSSJoinRecord(pDevice);
-
- return;
}
@@ -524,46 +497,6 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
pBSSList->ldBmAverage[ii] = 0;
}
-/*
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == TRUE)) {
- CARDvSetCountryInfo(pMgmt->pAdapter,
- pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if ((bParsingQuiet == TRUE) && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet( pMgmt->pAdapter,
- TRUE,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
- );
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet( pMgmt->pAdapter,
- FALSE,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if ((bParsingQuiet == TRUE) &&
- (pQuiet != NULL)) {
- CARDbStartQuiet(pMgmt->pAdapter);
- }
-*/
-
pBSSList->uIELength = uIELength;
if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
@@ -609,8 +542,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
signed long ldBm, ldBmSum;
BOOL bParsingQuiet = FALSE;
- // BYTE abyTmpSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
-
if (pBSSList == NULL)
return FALSE;
@@ -622,7 +553,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
pBSSList->uChannel = byCurrChannel;
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbUpdateToBSSList: pBSSList->uChannel: %d\n", pBSSList->uChannel);
if (pSSID->len > WLAN_SSID_MAXLEN)
pSSID->len = WLAN_SSID_MAXLEN;
@@ -809,7 +739,6 @@ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
- return;
};
@@ -840,8 +769,6 @@ void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex)
memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
// clear tx bit map
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-
- return;
};
/*+
*
@@ -1054,10 +981,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
// Rate fallback check
if (!pDevice->bFixRate) {
-/*
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (ii == 0))
- RATEvTxRateFallBack(pDevice, &(pMgmt->sNodeDBTable[ii]));
-*/
if (ii > 0) {
// ii = 0 for multicast node (AP & Adhoc)
RATEvTxRateFallBack((void *)pDevice,
@@ -1152,7 +1075,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- // DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Callback inactive Count = [%d]\n", pMgmt->sNodeDBTable[0].uInActiveCount);
if (pDevice->bUpdateBBVGA) {
/* s_vCheckSensitivity((void *) pDevice); */
@@ -1194,7 +1116,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
}
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1223,7 +1144,6 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
pDevice->uIsroamingTime = 0;
pDevice->bRoaming = FALSE;
-// if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_CCKM_ROAM_MSG;
wpahdr->resp_ie_len = 0;
@@ -1237,7 +1157,6 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
netif_rx(pDevice->skb);
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
-// }
}
else if ((pDevice->bRoaming == FALSE)&&(pDevice->bIsRoaming == TRUE)) {
pDevice->uIsroamingTime++;
@@ -1315,7 +1234,6 @@ else {
pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
add_timer(&pMgmt->sTimerSecondCallback);
- return;
}
/*+
@@ -1364,7 +1282,6 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
// Only Unicast using support rates
if (wFIFOCtl & FIFOCTL_NEEDACK) {
- //DBG_PRN_GRP21(("Device %08X, wRate %04X, byTSR %02X\n", hDeviceContext, wRate, byTSR));
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
pMgmt->sNodeDBTable[0].uTxAttempts += 1;
if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
@@ -1475,10 +1392,6 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
}
}
}
-
- return;
-
-
}
/*+
@@ -1519,8 +1432,6 @@ void BSSvClearNodeDBTable(void *hDeviceContext,
memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
}
}
-
- return;
};
void s_vCheckSensitivity(void *hDeviceContext)
@@ -1584,7 +1495,6 @@ RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
//decide link quality
if(pDevice->bLinkPass !=TRUE)
{
- // printk("s_uCalculateLinkQual-->Link disconnect and Poor quality**\n");
pDevice->scStatistic.LinkQuality = 0;
pDevice->scStatistic.SignalStren = 0;
}
@@ -1608,7 +1518,6 @@ else
pDevice->scStatistic.TxFailCount = 0;
pDevice->scStatistic.TxNoRetryOkCount = 0;
pDevice->scStatistic.TxRetryOkCount = 0;
- return;
}
void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
@@ -1617,10 +1526,8 @@ void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
unsigned int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++)
pMgmt->sBSSList[ii].bSelected = FALSE;
- }
- return;
}
void s_vCheckPreEDThreshold(void *hDeviceContext)
@@ -1637,6 +1544,5 @@ void s_vCheckPreEDThreshold(void *hDeviceContext)
BBvUpdatePreEDThreshold(pDevice, FALSE);
}
}
- return;
}
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index a49053bd7c65..9d09e9fd8e18 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -91,15 +91,10 @@ const WORD cwRXBCNTSFOff[MAX_RATE] =
* uConnectionChannel - Channel to be set
* Out:
* none
- *
- * Return Value: TRUE if succeeded; FALSE if failed.
- *
*/
-BOOL CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
+void CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
-BOOL bResult = TRUE;
-
if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38
if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL))
@@ -140,7 +135,6 @@ BOOL bResult = TRUE;
RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M);
}
ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(BYTE)(uConnectionChannel|0x80));
- return(bResult);
}
/*
@@ -607,7 +601,7 @@ BYTE ii;
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
+void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
WORD wRate = (WORD)(1<<wRateIdx);
@@ -616,8 +610,6 @@ WORD wRate = (WORD)(1<<wRateIdx);
//Determines the highest basic rate.
CARDvUpdateBasicTopRate(pDevice);
-
- return(TRUE);
}
BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler)
@@ -1090,7 +1082,7 @@ CARDbChannelSwitch (
if (byCount == 0) {
pDevice->sMgmtObj.uCurrChannel = byNewChannel;
- bResult = CARDbSetMediaChannel(pDevice, byNewChannel);
+ CARDbSetMediaChannel(pDevice, byNewChannel);
return bResult;
}
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 6c91343d0d14..9cf71a3d8801 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -60,12 +60,12 @@ typedef enum _CARD_OP_MODE {
/*--------------------- Export Functions --------------------------*/
-BOOL CARDbSetMediaChannel(void *pDeviceHandler,
+void CARDbSetMediaChannel(void *pDeviceHandler,
unsigned int uConnectionChannel);
void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType);
void vUpdateIFS(void *pDeviceHandler);
void CARDvUpdateBasicTopRate(void *pDeviceHandler);
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
+void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
QWORD qwBSSTimestamp, QWORD qwLocalTSF);
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index c95833ac58e0..0a114231145f 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -92,9 +92,8 @@ void INTvWorkItem(void *Context)
spin_unlock_irq(&pDevice->lock);
}
-int INTnsProcessData(PSDevice pDevice)
+void INTnsProcessData(PSDevice pDevice)
{
- int status = STATUS_SUCCESS;
PSINTData pINTData;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct net_device_stats *pStats = &pDevice->stats;
@@ -218,6 +217,4 @@ int INTnsProcessData(PSDevice pDevice)
pDevice->scStatistic.ullTxBroadcastBytes;
pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
-
- return status;
}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 3176c8d08d6d..a5d96b968176 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -68,6 +68,6 @@ SINTData, *PSINTData;
/*--------------------- Export Functions --------------------------*/
void INTvWorkItem(void *Context);
-int INTnsProcessData(PSDevice pDevice);
+void INTnsProcessData(PSDevice pDevice);
#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 49390026dea3..1463d76895f0 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -295,6 +295,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
+ result = -EINVAL;
+ break;
+ }
pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
if (pList == NULL) {
result = -ENOMEM;
@@ -557,6 +561,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
+ result = -ENOMEM;
+ break;
+ }
pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 2121205a912b..ecfda5272fa1 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -128,17 +128,6 @@ int iwctl_giwname(struct net_device *dev,
return 0;
}
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- //wrq->value = 0x100;
- //wrq->disabled = 0;
- //wrq->fixed = 1;
- //return 0;
- return -EOPNOTSUPP;
-}
/*
* Wireless Handler : set scan
*/
@@ -1939,7 +1928,6 @@ static const iw_handler iwctl_handler[] =
(iw_handler) iwctl_commit, // SIOCSIWCOMMIT
(iw_handler) iwctl_giwname, // SIOCGIWNAME
(iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) NULL, // SIOCGIWNWID
(iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
(iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
(iw_handler) iwctl_siwmode, // SIOCSIWMODE
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index cc48954783fc..10a240e65012 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -77,11 +77,6 @@ int iwctl_giwname(struct net_device *dev,
char *wrq,
char *extra);
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra) ;
-
int iwctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 26c19d1408c4..af4a29d14775 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -133,10 +133,9 @@ void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL MACbShutdown (PSDevice pDevice)
+void MACbShutdown(PSDevice pDevice)
{
CONTROLnsRequestOutAsyn(pDevice,
MESSAGE_TYPE_MACSHUTDOWN,
@@ -145,7 +144,6 @@ BOOL MACbShutdown (PSDevice pDevice)
0,
NULL
);
- return TRUE;
}
void MACvSetBBType(PSDevice pDevice,BYTE byType)
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 491ff5ecd04b..147ac50218d3 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -422,7 +422,7 @@
void MACvSetMultiAddrByHash(PSDevice pDevice, BYTE byHashIdx);
void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData);
-BOOL MACbShutdown(PSDevice pDevice);
+void MACbShutdown(PSDevice pDevice);
void MACvSetBBType(PSDevice pDevice, BYTE byType);
void MACvSetMISCFifo(PSDevice pDevice, WORD wOffset, DWORD dwData);
void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 541f9aa8ef6d..6a708f447651 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -900,7 +900,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
}
// allocate rcb mem
- pDevice->pRCBMem = kmalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
+ pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -912,7 +912,6 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
pDevice->FirstRecvMngList = NULL;
pDevice->LastRecvMngList = NULL;
pDevice->NumRecvFreeList = 0;
- memset(pDevice->pRCBMem, 0, (sizeof(RCB) * pDevice->cbRD));
pRCB = (PRCB) pDevice->pRCBMem;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -1618,15 +1617,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCSIWNWID:
- rc = -EOPNOTSUPP;
- break;
-
case SIOCGIWNWID: //0x8b03 support
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL);
- #else
- rc = -EOPNOTSUPP;
- #endif
+ rc = -EOPNOTSUPP;
break;
// Set frequency/channel
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 8d5dddf08055..811698f1070c 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/dhfcfg.h b/drivers/staging/wlags49_h2/dhfcfg.h
index 75c279f268ae..147f4c83c00c 100644
--- a/drivers/staging/wlags49_h2/dhfcfg.h
+++ b/drivers/staging/wlags49_h2/dhfcfg.h
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 7dc176a95aab..b008773323b3 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -32,9 +32,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
- * COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
- * COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcf.h b/drivers/staging/wlags49_h2/hcf.h
index 000994731166..95527b5cf863 100644
--- a/drivers/staging/wlags49_h2/hcf.h
+++ b/drivers/staging/wlags49_h2/hcf.h
@@ -40,9 +40,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfcfg.h b/drivers/staging/wlags49_h2/hcfcfg.h
index 7545bc554112..ef60da8c3ebc 100644
--- a/drivers/staging/wlags49_h2/hcfcfg.h
+++ b/drivers/staging/wlags49_h2/hcfcfg.h
@@ -64,9 +64,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfdef.h b/drivers/staging/wlags49_h2/hcfdef.h
index a62b53a22891..30744e194a23 100644
--- a/drivers/staging/wlags49_h2/hcfdef.h
+++ b/drivers/staging/wlags49_h2/hcfdef.h
@@ -33,9 +33,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
- * COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
- * COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mdd.h b/drivers/staging/wlags49_h2/mdd.h
index b02e3ea9e479..5f951efb9c07 100644
--- a/drivers/staging/wlags49_h2/mdd.h
+++ b/drivers/staging/wlags49_h2/mdd.h
@@ -33,9 +33,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.c b/drivers/staging/wlags49_h2/mmd.c
index de138c481a9e..c8f52107e6ca 100644
--- a/drivers/staging/wlags49_h2/mmd.c
+++ b/drivers/staging/wlags49_h2/mmd.c
@@ -35,7 +35,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.h b/drivers/staging/wlags49_h2/mmd.h
index 06890c1b30a4..914952513005 100644
--- a/drivers/staging/wlags49_h2/mmd.h
+++ b/drivers/staging/wlags49_h2/mmd.h
@@ -33,7 +33,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 21f17be4f02a..a7ab579759de 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 26cf5486edd6..4c6f776cc4da 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.h b/drivers/staging/wlags49_h2/wl_enc.h
index b4f54d81f311..46629f3b112b 100644
--- a/drivers/staging/wlags49_h2/wl_enc.h
+++ b/drivers/staging/wlags49_h2/wl_enc.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_if.h b/drivers/staging/wlags49_h2/wl_if.h
index ed2b4135a10e..6a26130f5a3a 100644
--- a/drivers/staging/wlags49_h2/wl_if.h
+++ b/drivers/staging/wlags49_h2/wl_if.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index 575340834051..553601f48873 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 483eee1bf63a..dab603e0f452 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.h b/drivers/staging/wlags49_h2/wl_main.h
index d593ae535fb8..3b5acdf4e329 100644
--- a/drivers/staging/wlags49_h2/wl_main.h
+++ b/drivers/staging/wlags49_h2/wl_main.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 5a2b334f206e..9c16f5478a75 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.h b/drivers/staging/wlags49_h2/wl_netdev.h
index 632ab2e6302c..61f040f26d97 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.h
+++ b/drivers/staging/wlags49_h2/wl_netdev.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 28ae9dd1b44e..2bd9b84ace8e 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
@@ -112,17 +112,10 @@ extern dbg_info_t *DbgInfo;
#endif // DBG
/* define the PCI device Table Cardname and id tables */
-enum hermes_pci_versions {
- CH_Agere_Systems_Mini_PCI_V1 = 0,
-};
-
static struct pci_device_id wl_pci_tbl[] __devinitdata = {
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0), },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1), },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2), },
{ } /* Terminating entry */
};
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
index cea04c44ec47..86831f1b4de2 100644
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ b/drivers/staging/wlags49_h2/wl_pci.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 260d4f0d47b4..f30e5ee4bca3 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.h b/drivers/staging/wlags49_h2/wl_priv.h
index 9b0254497aa7..b647bfd90098 100644
--- a/drivers/staging/wlags49_h2/wl_priv.h
+++ b/drivers/staging/wlags49_h2/wl_priv.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index a459e48c7bf0..b8c96cf18de5 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.h b/drivers/staging/wlags49_h2/wl_profile.h
index 81db8e8c6ba8..f81df51d2216 100644
--- a/drivers/staging/wlags49_h2/wl_profile.h
+++ b/drivers/staging/wlags49_h2/wl_profile.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 3b6f5a59b2bb..b748a3ff7954 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.h b/drivers/staging/wlags49_h2/wl_util.h
index 2661bcd6b0ec..946b1b64c46f 100644
--- a/drivers/staging/wlags49_h2/wl_util.h
+++ b/drivers/staging/wlags49_h2/wl_util.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index fd37040afd01..3deacfac9d25 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 8ac5e1081aa0..7ff0a108da13 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -18,7 +18,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -39,7 +39,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index a713058c8027..029da52c4c49 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/xgifb/Makefile b/drivers/staging/xgifb/Makefile
index 3c8c7de9eadd..55e519905346 100644
--- a/drivers/staging/xgifb/Makefile
+++ b/drivers/staging/xgifb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FB_XGI) += xgifb.o
-xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o vb_ext.o
+xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 71aebe3e84d8..35f7b2a485e1 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -32,14 +32,10 @@
#endif
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 1},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 2},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 3},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)},
{0}
};
@@ -128,7 +124,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* display status */
static int XGIfb_crt1off;
static int XGIfb_forcecrt1 = -1;
-static int XGIfb_userom ;
/* global flags */
static int XGIfb_tvmode;
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 277e408c39c5..2502c49c9c5b 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/vmalloc.h>
#include <linux/vt_kern.h>
#include <linux/capability.h>
#include <linux/fs.h>
@@ -46,8 +45,7 @@
#define GPIOG_EN (1<<6)
#define GPIOG_READ (1<<1)
-#define XGIFB_ROM_SIZE 65536
-
+static char *forcecrt2type;
static char *mode;
static int vesa = -1;
static unsigned int refresh_rate;
@@ -159,7 +157,6 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
/* unsigned long temp = 0; */
int Clock;
- XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
@@ -199,7 +196,6 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
unsigned char sr_data, cr_data, cr_data2;
unsigned long cr_data3;
int A, B, C, D, E, F, temp, j;
- XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -387,7 +383,7 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
/* ------------------ Internal helper routines ----------------- */
-static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
+static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
{
int found_mode = 0;
@@ -396,11 +392,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
found_mode = 0;
while ((XGIbios_mode[XGIfb_mode_idx].mode_no != 0)
&& (XGIbios_mode[XGIfb_mode_idx].xres
- <= XGI21_LCDCapList[0].LVDSHDE)) {
+ <= xgifb_info->lvds_data.LVDSHDE)) {
if ((XGIbios_mode[XGIfb_mode_idx].xres
- == XGI21_LCDCapList[0].LVDSHDE)
+ == xgifb_info->lvds_data.LVDSHDE)
&& (XGIbios_mode[XGIfb_mode_idx].yres
- == XGI21_LCDCapList[0].LVDSVDE)
+ == xgifb_info->lvds_data.LVDSVDE)
&& (XGIbios_mode[XGIfb_mode_idx].bpp == 8)) {
found_mode = 1;
break;
@@ -456,51 +452,6 @@ invalid:
printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
}
-static int XGIfb_GetXG21LVDSData(struct xgifb_video_info *xgifb_info)
-{
- u8 tmp;
- void __iomem *data = xgifb_info->mmio_vbase + 0x20000;
- int i, j, k;
-
- tmp = xgifb_reg_get(XGISR, 0x1e);
- xgifb_reg_set(XGISR, 0x1e, tmp | 4);
-
- if ((readb(data) == 0x55) &&
- (readb(data + 1) == 0xAA) &&
- (readb(data + 0x65) & 0x1)) {
- i = readw(data + 0x316);
- j = readb(data + i - 1);
- if (j == 0xff)
- j = 1;
-
- k = 0;
- do {
- XGI21_LCDCapList[k].LVDS_Capability = readw(data + i);
- XGI21_LCDCapList[k].LVDSHT = readw(data + i + 2);
- XGI21_LCDCapList[k].LVDSVT = readw(data + i + 4);
- XGI21_LCDCapList[k].LVDSHDE = readw(data + i + 6);
- XGI21_LCDCapList[k].LVDSVDE = readw(data + i + 8);
- XGI21_LCDCapList[k].LVDSHFP = readw(data + i + 10);
- XGI21_LCDCapList[k].LVDSVFP = readw(data + i + 12);
- XGI21_LCDCapList[k].LVDSHSYNC = readw(data + i + 14);
- XGI21_LCDCapList[k].LVDSVSYNC = readw(data + i + 16);
- XGI21_LCDCapList[k].VCLKData1 = readb(data + i + 18);
- XGI21_LCDCapList[k].VCLKData2 = readb(data + i + 19);
- XGI21_LCDCapList[k].PSC_S1 = readb(data + i + 20);
- XGI21_LCDCapList[k].PSC_S2 = readb(data + i + 21);
- XGI21_LCDCapList[k].PSC_S3 = readb(data + i + 22);
- XGI21_LCDCapList[k].PSC_S4 = readb(data + i + 23);
- XGI21_LCDCapList[k].PSC_S5 = readb(data + i + 24);
- i += 25;
- j--;
- k++;
- } while ((j > 0) && (k < (sizeof(XGI21_LCDCapList)
- / sizeof(struct XGI21_LVDSCapStruct))));
- return 1;
- }
- return 0;
-}
-
static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
{
u16 xres, yres;
@@ -508,8 +459,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
if (xgifb_info->chip == XG21) {
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
- xres = XGI21_LCDCapList[0].LVDSHDE;
- yres = XGI21_LCDCapList[0].LVDSVDE;
+ xres = xgifb_info->lvds_data.LVDSHDE;
+ yres = xgifb_info->lvds_data.LVDSVDE;
if (XGIbios_mode[myindex].xres > xres)
return -1;
if (XGIbios_mode[myindex].yres > yres)
@@ -1223,7 +1174,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (isactive) {
XGIfb_pre_setmode(xgifb_info);
- if (XGISetModeNew(hw_info,
+ if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
== 0) {
printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
@@ -1794,17 +1745,16 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
XGIfb_crt1off = 0;
}
- if (XGIfb_crt2type != -1)
- /* TW: Override with option */
- xgifb_info->display2 = XGIfb_crt2type;
- else if (cr32 & XGI_VB_TV)
- xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & XGI_VB_LCD)
- xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & XGI_VB_CRT2)
- xgifb_info->display2 = XGIFB_DISP_CRT;
- else
- xgifb_info->display2 = XGIFB_DISP_NONE;
+ if (!xgifb_info->display2_force) {
+ if (cr32 & XGI_VB_TV)
+ xgifb_info->display2 = XGIFB_DISP_TV;
+ else if (cr32 & XGI_VB_LCD)
+ xgifb_info->display2 = XGIFB_DISP_LCD;
+ else if (cr32 & XGI_VB_CRT2)
+ xgifb_info->display2 = XGIFB_DISP_CRT;
+ else
+ xgifb_info->display2 = XGIFB_DISP_NONE;
+ }
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
@@ -1925,8 +1875,6 @@ static int __init XGIfb_setup(char *options)
XGIfb_crt2type = XGIFB_DISP_LCD;
} else if (!strncmp(this_opt, "noypan", 6)) {
XGIfb_ypan = 0;
- } else if (!strncmp(this_opt, "userom:", 7)) {
- XGIfb_userom = xgifb_optval(this_opt, 7);
} else {
mode = this_opt;
}
@@ -1934,35 +1882,12 @@ static int __init XGIfb_setup(char *options)
return 0;
}
-static unsigned char *xgifb_copy_rom(struct pci_dev *dev)
-{
- void __iomem *rom_address;
- unsigned char *rom_copy;
- size_t rom_size;
-
- rom_address = pci_map_rom(dev, &rom_size);
- if (rom_address == NULL)
- return NULL;
-
- rom_copy = vzalloc(XGIFB_ROM_SIZE);
- if (rom_copy == NULL)
- goto done;
-
- rom_size = min_t(size_t, rom_size, XGIFB_ROM_SIZE);
- memcpy_fromio(rom_copy, rom_address, rom_size);
-
-done:
- pci_unmap_rom(dev, rom_address);
- return rom_copy;
-}
-
static int __devinit xgifb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 reg, reg1;
u8 CR48, CR38;
int ret;
- bool xgi21_drvlcdcaplist = false;
struct fb_info *fb_info;
struct xgifb_video_info *xgifb_info;
struct xgi_hw_device_info *hw_info;
@@ -2001,6 +1926,11 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
+ if (XGIfb_crt2type != -1) {
+ xgifb_info->display2 = XGIfb_crt2type;
+ xgifb_info->display2_force = true;
+ }
+
XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
@@ -2041,18 +1971,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
printk("XGIfb:chipid = %x\n", xgifb_info->chip);
hw_info->jChipType = xgifb_info->chip;
- if ((xgifb_info->chip == XG21) || (XGIfb_userom)) {
- hw_info->pjVirtualRomBase = xgifb_copy_rom(pdev);
- if (hw_info->pjVirtualRomBase)
- printk(KERN_INFO "XGIfb: Video ROM found and mapped to %p\n",
- hw_info->pjVirtualRomBase);
- else
- printk(KERN_INFO "XGIfb: Video ROM not found\n");
- } else {
- hw_info->pjVirtualRomBase = NULL;
- printk(KERN_INFO "XGIfb: Video ROM usage disabled\n");
- }
-
if (XGIfb_get_dram_size(xgifb_info)) {
printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
@@ -2117,8 +2035,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
CR38 = xgifb_reg_get(XGICR, 0x38);
if ((CR38&0xE0) == 0xC0) {
xgifb_info->display2 = XGIFB_DISP_LCD;
- if (!XGIfb_GetXG21LVDSData(xgifb_info))
- xgi21_drvlcdcaplist = true;
} else if ((CR38&0xE0) == 0x60) {
xgifb_info->hasVB = HASVB_CHRONTEL;
} else {
@@ -2193,6 +2109,8 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->hasVB != HASVB_NONE)
XGIfb_detect_VB(xgifb_info);
+ else if (xgifb_info->chip != XG21)
+ xgifb_info->display2 = XGIFB_DISP_NONE;
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
if (!enable_dstn) {
@@ -2254,7 +2172,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->display2 == XGIFB_DISP_LCD &&
xgifb_info->chip == XG21)
xgifb_info->mode_idx =
- XGIfb_GetXG21DefaultLVDSModeIdx();
+ XGIfb_GetXG21DefaultLVDSModeIdx(xgifb_info);
else
xgifb_info->mode_idx = DEFAULT_MODE;
}
@@ -2264,21 +2182,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error_1;
}
- if (xgi21_drvlcdcaplist) {
- int m;
-
- for (m = 0; m < ARRAY_SIZE(XGI21_LCDCapList); m++)
- if ((XGI21_LCDCapList[m].LVDSHDE ==
- XGIbios_mode[xgifb_info->mode_idx].xres) &&
- (XGI21_LCDCapList[m].LVDSVDE ==
- XGIbios_mode[xgifb_info->mode_idx].yres)) {
- xgifb_reg_set(xgifb_info->dev_info.P3d4,
- 0x36,
- m);
- break;
- }
- }
-
/* yilin set default refresh rate */
xgifb_info->refresh_rate = refresh_rate;
if (xgifb_info->refresh_rate == 0)
@@ -2418,7 +2321,6 @@ error_1:
error_0:
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
error:
- vfree(hw_info->pjVirtualRomBase);
framebuffer_release(fb_info);
return ret;
}
@@ -2442,7 +2344,6 @@ static void __devexit xgifb_remove(struct pci_dev *pdev)
iounmap(xgifb_info->video_vbase);
release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
- vfree(xgifb_info->hw_info.pjVirtualRomBase);
framebuffer_release(fb_info);
pci_set_drvdata(pdev, NULL);
}
@@ -2458,6 +2359,8 @@ static int __init xgifb_init(void)
{
char *option = NULL;
+ if (forcecrt2type != NULL)
+ XGIfb_search_crt2type(forcecrt2type);
if (fb_get_options("xgifb", &option))
return -ENODEV;
XGIfb_setup(option);
@@ -2480,6 +2383,11 @@ MODULE_AUTHOR("XGITECH , Others");
module_param(mode, charp, 0);
module_param(vesa, int, 0);
module_param(filter, int, 0);
+module_param(forcecrt2type, charp, 0);
+
+MODULE_PARM_DESC(forcecrt2type,
+ "\nForce the second display output type. Possible values are NONE,\n"
+ "LCD, TV, VGA, SVIDEO or COMPOSITE.\n");
MODULE_PARM_DESC(mode,
"\nSelects the desired default display mode in the format XxYxDepth,\n"
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 7611846a7039..2c866bb65a00 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -86,10 +86,13 @@ struct xgifb_video_info {
unsigned int refresh_rate;
enum xgifb_display_type display2; /* the second display output type */
+ bool display2_force;
unsigned char hasVB;
unsigned char TV_type;
unsigned char TV_plug;
+ struct XGI21_LVDSCapStruct lvds_data;
+
enum XGI_CHIP_TYPE chip;
unsigned char revision_id;
diff --git a/drivers/staging/xgifb/vb_ext.c b/drivers/staging/xgifb/vb_ext.c
deleted file mode 100644
index b1a25730b7ca..000000000000
--- a/drivers/staging/xgifb/vb_ext.c
+++ /dev/null
@@ -1,444 +0,0 @@
-#include <linux/io.h>
-#include <linux/types.h>
-#include "XGIfb.h"
-
-#include "vb_def.h"
-#include "vgatypes.h"
-#include "vb_struct.h"
-#include "vb_util.h"
-#include "vb_setmode.h"
-#include "vb_ext.h"
-
-/**************************************************************
- *********************** Dynamic Sense ************************
- *************************************************************/
-
-static unsigned char XGINew_Is301B(struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
-
- if (flag > 0x0B0)
- return 0; /* 301b */
- else
- return 1;
-}
-
-static unsigned char XGINew_Sense(unsigned short tempbx,
- unsigned short tempcx,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp, i, tempch;
-
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0x7F00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp > 0)
- return 1;
- else
- return 0;
-}
-
-static unsigned char
-XGINew_GetLCDDDCInfo(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp;
-
- /* add lcd sense */
- if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
- return 0;
- } else {
- temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
- switch (HwDeviceExtension->ulCRT2LCDType) {
- case LCD_INVALID:
- case LCD_800x600:
- case LCD_1024x768:
- case LCD_1280x1024:
- break;
-
- case LCD_640x480:
- case LCD_1024x600:
- case LCD_1152x864:
- case LCD_1280x960:
- case LCD_1152x768:
- temp = 0;
- break;
-
- case LCD_1400x1050:
- case LCD_1280x768:
- case LCD_1600x1200:
- break;
-
- case LCD_1920x1440:
- case LCD_2048x1536:
- temp = 0;
- break;
-
- default:
- break;
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
- return 1;
- }
-}
-
-static unsigned char XGINew_GetPanelID(struct vb_device_info *pVBInfo)
-{
- unsigned short PanelTypeTable[16] = { SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType00, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType01, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType02, SyncNN | PanelRGB18Bit
- | Panel640x480 | _PanelType03, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType04, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType05, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType06, SyncNN | PanelRGB24Bit
- | Panel1024x768 | _PanelType07, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType08, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType09, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType0A, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0B, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0C, SyncNN | PanelRGB24Bit
- | Panel1024x768 | _PanelType0D, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0E, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0F };
- unsigned short tempax, tempbx, temp;
- /* unsigned short return_flag; */
-
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x1A);
- tempbx = tempax & 0x1E;
-
- if (tempax == 0)
- return 0;
- else {
- /*
- if (!(tempax & 0x10)) {
- if (pVBInfo->IF_DEF_LVDS == 1) {
- tempbx = 0;
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x38);
- if (temp & 0x40)
- tempbx |= 0x08;
- if (temp & 0x20)
- tempbx |= 0x02;
- if (temp & 0x01)
- tempbx |= 0x01;
-
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x39);
- if (temp & 0x80)
- tempbx |= 0x04;
- } else {
- return(0);
- }
- }
- */
-
- tempbx = tempbx >> 1;
- temp = tempbx & 0x00F;
- xgifb_reg_set(pVBInfo->P3d4, 0x36, temp);
- tempbx--;
- tempbx = PanelTypeTable[tempbx];
-
- temp = (tempbx & 0xFF00) >> 8;
- xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~(LCDSyncBit
- | LCDRGB18Bit), temp);
- return 1;
- }
-}
-
-static unsigned char
-XGINew_BridgeIsEnable(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- if (XGI_BridgeIsOn(pVBInfo) == 0) {
- flag = xgifb_reg_get(pVBInfo->Part1Port, 0x0);
-
- if (flag & 0x050)
- return 1;
- else
- return 0;
-
- }
- return 0;
-}
-
-static unsigned char
-XGINew_SenseHiTV(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx, tempcx, temp, i, tempch;
-
- tempbx = *pVBInfo->pYCSenseData2;
-
- tempcx = 0x0604;
-
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch)
- return 0;
-
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch) {
- return 0;
- } else {
- tempbx = 0x3FF;
- tempcx = 0x0804;
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch)
- return 1;
- else
- return 0;
- }
-}
-
-void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax = 0, tempbx, tempcx, temp,
- P2reg0 = 0, SenseModeNo = 0,
- OutputSelect = *pVBInfo->pOutputSelect,
- ModeIdIndex, i;
- pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
-
- if (pVBInfo->IF_DEF_LVDS == 1) {
- /* ynlai 02/27/2002 */
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x1A);
- tempbx = xgifb_reg_get(pVBInfo->P3c4, 0x1B);
- tempax = ((tempax & 0xFE) >> 1) | (tempbx << 8);
- if (tempax == 0x00) { /* Get Panel id from DDC */
- temp = XGINew_GetLCDDDCInfo(HwDeviceExtension, pVBInfo);
- if (temp == 1) { /* LCD connect */
- /* set CR39 bit0="1" */
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x39, 0xFF, 0x01);
- /* clean CR37 bit4="0" */
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x37, 0xEF, 0x00);
- temp = LCDSense;
- } else { /* LCD don't connect */
- temp = 0;
- }
- } else {
- XGINew_GetPanelID(pVBInfo);
- temp = LCDSense;
- }
-
- tempbx = ~(LCDSense | AVIDEOSense | SVIDEOSense);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, tempbx, temp);
- } else { /* for 301 */
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiVision */
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x38);
- temp = tempax & 0x01;
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x3A);
- temp = temp | (tempax & 0x02);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xA0, temp);
- } else {
- if (XGI_BridgeIsOn(pVBInfo)) {
- P2reg0 = xgifb_reg_get(pVBInfo->Part2Port,
- 0x00);
- if (!XGINew_BridgeIsEnable(HwDeviceExtension,
- pVBInfo)) {
- SenseModeNo = 0x2e;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x41);
- * XGISetModeNew(HwDeviceExtension, 0x2e);
- * // ynlai InitMode */
-
- temp = XGI_SearchModeID(SenseModeNo,
- &ModeIdIndex,
- pVBInfo);
- XGI_GetVGAType(HwDeviceExtension,
- pVBInfo);
- XGI_GetVBType(pVBInfo);
- pVBInfo->SetFlag = 0x00;
- pVBInfo->ModeType = ModeVGA;
- pVBInfo->VBInfo = SetCRT2ToRAMDAC |
- LoadDACFlag |
- SetInSlaveMode;
- XGI_GetLCDInfo(0x2e,
- ModeIdIndex,
- pVBInfo);
- XGI_GetTVInfo(0x2e,
- ModeIdIndex,
- pVBInfo);
- XGI_EnableBridge(HwDeviceExtension,
- pVBInfo);
- XGI_SetCRT2Group301(SenseModeNo,
- HwDeviceExtension,
- pVBInfo);
- XGI_SetCRT2ModeRegs(0x2e,
- HwDeviceExtension,
- pVBInfo);
- /* XGI_DisableBridge(HwDeviceExtension,
- * pVBInfo ) ; */
- /* Display Off 0212 */
- xgifb_reg_and_or(pVBInfo->P3c4,
- 0x01,
- 0xDF,
- 0x20);
- for (i = 0; i < 20; i++)
- XGI_LongWait(pVBInfo);
- }
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1c);
- tempax = 0;
- tempbx = *pVBInfo->pRGBSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pRGBSenseData2;
-
- tempcx = 0x0E08;
- if (XGINew_Sense(tempbx, tempcx, pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= Monitor2Sense;
- }
-
- if (pVBInfo->VBType & VB_XGI301C)
- xgifb_reg_or(pVBInfo->Part4Port,
- 0x0d,
- 0x04);
-
- /* add by kuku for Multi-adapter sense HiTV */
- if (XGINew_SenseHiTV(HwDeviceExtension,
- pVBInfo)) {
- tempax |= HiTVSense;
- if ((pVBInfo->VBType & VB_XGI301C))
- tempax ^= (HiTVSense |
- YPbPrSense);
- }
-
- /* start */
- if (!(tempax & (HiTVSense | YPbPrSense))) {
- tempbx = *pVBInfo->pYCSenseData;
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pYCSenseData2;
- tempcx = 0x0604;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= SVIDEOSense;
- }
-
- if (OutputSelect & BoardTVType) {
- tempbx = *pVBInfo->pVideoSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= AVIDEOSense;
- }
- } else {
- if (!(tempax & SVIDEOSense)) {
- tempbx = *pVBInfo->pVideoSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx, tempcx, pVBInfo))
- tempax |= AVIDEOSense;
- }
- }
- }
- }
- } /* end */
- if (!(tempax & Monitor2Sense)) {
- if (XGINew_SenseLCD(HwDeviceExtension, pVBInfo))
- tempax |= LCDSense;
- }
- tempbx = 0;
- tempcx = 0;
- XGINew_Sense(tempbx, tempcx, pVBInfo);
-
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, ~0xDF, tempax);
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, P2reg0);
-
- if (!(P2reg0 & 0x20)) {
- pVBInfo->VBInfo = DisableCRT2Display;
- /* XGI_SetCRT2Group301(SenseModeNo,
- * HwDeviceExtension,
- * pVBInfo); */
- }
- }
- }
- XGI_DisableBridge(HwDeviceExtension, pVBInfo); /* shampoo 0226 */
-
-}
-
-unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- /* unsigned short SoftSetting ; */
- unsigned short temp;
-
- temp = XGINew_GetLCDDDCInfo(HwDeviceExtension, pVBInfo);
-
- return temp;
-}
diff --git a/drivers/staging/xgifb/vb_ext.h b/drivers/staging/xgifb/vb_ext.h
deleted file mode 100644
index 0b1f55b4242f..000000000000
--- a/drivers/staging/xgifb/vb_ext.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _VBEXT_
-#define _VBEXT_
-
-extern void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo);
-extern unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *,
- struct vb_device_info *pVBInfo);
-
-#endif
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 9e890a17fbc2..4ccd988ffd7c 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,6 +1,7 @@
#include <linux/types.h>
#include <linux/delay.h> /* udelay */
#include <linux/pci.h>
+#include <linux/vmalloc.h>
#include "vgatypes.h"
#include "XGIfb.h"
@@ -10,7 +11,6 @@
#include "vb_util.h"
#include "vb_setmode.h"
#include "vb_init.h"
-#include "vb_ext.h"
#include <linux/io.h>
@@ -35,6 +35,8 @@ static const unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
{ 2, 12, 9, 8, 0x35},
{ 2, 12, 8, 4, 0x31} };
+#define XGIFB_ROM_SIZE 65536
+
static unsigned char
XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -1068,20 +1070,20 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
return 0;
}
-static void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short data;
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
- XGISetModeNew(HwDeviceExtension, 0x2e);
+ XGISetModeNew(xgifb_info, HwDeviceExtension, 0x2e);
data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
/* disable read cache */
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data & 0xDF));
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
/* data = xgifb_reg_get(pVBInfo->P3c4, 0x1); */
/* data |= 0x20 ; */
@@ -1092,118 +1094,100 @@ static void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data | 0x20));
}
-static void ReadVBIOSTablData(unsigned char ChipType,
+static u8 *xgifb_copy_rom(struct pci_dev *dev, size_t *rom_size)
+{
+ void __iomem *rom_address;
+ u8 *rom_copy;
+
+ rom_address = pci_map_rom(dev, rom_size);
+ if (rom_address == NULL)
+ return NULL;
+
+ rom_copy = vzalloc(XGIFB_ROM_SIZE);
+ if (rom_copy == NULL)
+ goto done;
+
+ *rom_size = min_t(size_t, *rom_size, XGIFB_ROM_SIZE);
+ memcpy_fromio(rom_copy, rom_address, *rom_size);
+
+done:
+ pci_unmap_rom(dev, rom_address);
+ return rom_copy;
+}
+
+static void xgifb_read_vbios(struct pci_dev *pdev,
struct vb_device_info *pVBInfo)
{
- volatile unsigned char *pVideoMemory =
- (unsigned char *) pVBInfo->ROMAddr;
+ struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
+ u8 *vbios;
unsigned long i;
- unsigned char j, k;
- /* Volari customize data area end */
-
- if (ChipType == XG21) {
- pVBInfo->IF_DEF_LVDS = 0;
- if (pVideoMemory[0x65] & 0x1) {
- pVBInfo->IF_DEF_LVDS = 1;
- i = pVideoMemory[0x316] | (pVideoMemory[0x317] << 8);
- j = pVideoMemory[i - 1];
- if (j != 0xff) {
- k = 0;
- do {
- pVBInfo->XG21_LVDSCapList[k].
- LVDS_Capability
- = pVideoMemory[i] |
- (pVideoMemory[i + 1] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHT
- = pVideoMemory[i + 2] |
- (pVideoMemory[i + 3] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVT
- = pVideoMemory[i + 4] |
- (pVideoMemory[i + 5] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHDE
- = pVideoMemory[i + 6] |
- (pVideoMemory[i + 7] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVDE
- = pVideoMemory[i + 8] |
- (pVideoMemory[i + 9] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHFP
- = pVideoMemory[i + 10] |
- (pVideoMemory[i + 11] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVFP
- = pVideoMemory[i + 12] |
- (pVideoMemory[i + 13] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHSYNC
- = pVideoMemory[i + 14] |
- (pVideoMemory[i + 15] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVSYNC
- = pVideoMemory[i + 16] |
- (pVideoMemory[i + 17] << 8);
- pVBInfo->XG21_LVDSCapList[k].VCLKData1
- = pVideoMemory[i + 18];
- pVBInfo->XG21_LVDSCapList[k].VCLKData2
- = pVideoMemory[i + 19];
- pVBInfo->XG21_LVDSCapList[k].PSC_S1
- = pVideoMemory[i + 20];
- pVBInfo->XG21_LVDSCapList[k].PSC_S2
- = pVideoMemory[i + 21];
- pVBInfo->XG21_LVDSCapList[k].PSC_S3
- = pVideoMemory[i + 22];
- pVBInfo->XG21_LVDSCapList[k].PSC_S4
- = pVideoMemory[i + 23];
- pVBInfo->XG21_LVDSCapList[k].PSC_S5
- = pVideoMemory[i + 24];
- i += 25;
- j--;
- k++;
- } while ((j > 0) &&
- (k < (sizeof(XGI21_LCDCapList) /
- sizeof(struct
- XGI21_LVDSCapStruct))));
- } else {
- pVBInfo->XG21_LVDSCapList[0].LVDS_Capability
- = pVideoMemory[i] |
- (pVideoMemory[i + 1] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHT
- = pVideoMemory[i + 2] |
- (pVideoMemory[i + 3] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVT
- = pVideoMemory[i + 4] |
- (pVideoMemory[i + 5] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHDE
- = pVideoMemory[i + 6] |
- (pVideoMemory[i + 7] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVDE
- = pVideoMemory[i + 8] |
- (pVideoMemory[i + 9] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHFP
- = pVideoMemory[i + 10] |
- (pVideoMemory[i + 11] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVFP
- = pVideoMemory[i + 12] |
- (pVideoMemory[i + 13] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHSYNC
- = pVideoMemory[i + 14] |
- (pVideoMemory[i + 15] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVSYNC
- = pVideoMemory[i + 16] |
- (pVideoMemory[i + 17] << 8);
- pVBInfo->XG21_LVDSCapList[0].VCLKData1
- = pVideoMemory[i + 18];
- pVBInfo->XG21_LVDSCapList[0].VCLKData2
- = pVideoMemory[i + 19];
- pVBInfo->XG21_LVDSCapList[0].PSC_S1
- = pVideoMemory[i + 20];
- pVBInfo->XG21_LVDSCapList[0].PSC_S2
- = pVideoMemory[i + 21];
- pVBInfo->XG21_LVDSCapList[0].PSC_S3
- = pVideoMemory[i + 22];
- pVBInfo->XG21_LVDSCapList[0].PSC_S4
- = pVideoMemory[i + 23];
- pVBInfo->XG21_LVDSCapList[0].PSC_S5
- = pVideoMemory[i + 24];
- }
- }
+ unsigned char j;
+ struct XGI21_LVDSCapStruct *lvds;
+ size_t vbios_size;
+ int entry;
+
+ if (xgifb_info->chip != XG21)
+ return;
+ pVBInfo->IF_DEF_LVDS = 0;
+ vbios = xgifb_copy_rom(pdev, &vbios_size);
+ if (vbios == NULL) {
+ dev_err(&pdev->dev, "video BIOS not available\n");
+ return;
+ }
+ if (vbios_size <= 0x65)
+ goto error;
+ /*
+ * The user can ignore the LVDS bit in the BIOS and force the display
+ * type.
+ */
+ if (!(vbios[0x65] & 0x1) &&
+ (!xgifb_info->display2_force ||
+ xgifb_info->display2 != XGIFB_DISP_LCD)) {
+ vfree(vbios);
+ return;
}
+ if (vbios_size <= 0x317)
+ goto error;
+ i = vbios[0x316] | (vbios[0x317] << 8);
+ if (vbios_size <= i - 1)
+ goto error;
+ j = vbios[i - 1];
+ if (j == 0)
+ goto error;
+ if (j == 0xff)
+ j = 1;
+ /*
+ * Read the LVDS table index scratch register set by the BIOS.
+ */
+ entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
+ if (entry >= j)
+ entry = 0;
+ i += entry * 25;
+ lvds = &xgifb_info->lvds_data;
+ if (vbios_size <= i + 24)
+ goto error;
+ lvds->LVDS_Capability = vbios[i] | (vbios[i + 1] << 8);
+ lvds->LVDSHT = vbios[i + 2] | (vbios[i + 3] << 8);
+ lvds->LVDSVT = vbios[i + 4] | (vbios[i + 5] << 8);
+ lvds->LVDSHDE = vbios[i + 6] | (vbios[i + 7] << 8);
+ lvds->LVDSVDE = vbios[i + 8] | (vbios[i + 9] << 8);
+ lvds->LVDSHFP = vbios[i + 10] | (vbios[i + 11] << 8);
+ lvds->LVDSVFP = vbios[i + 12] | (vbios[i + 13] << 8);
+ lvds->LVDSHSYNC = vbios[i + 14] | (vbios[i + 15] << 8);
+ lvds->LVDSVSYNC = vbios[i + 16] | (vbios[i + 17] << 8);
+ lvds->VCLKData1 = vbios[i + 18];
+ lvds->VCLKData2 = vbios[i + 19];
+ lvds->PSC_S1 = vbios[i + 20];
+ lvds->PSC_S2 = vbios[i + 21];
+ lvds->PSC_S3 = vbios[i + 22];
+ lvds->PSC_S4 = vbios[i + 23];
+ lvds->PSC_S5 = vbios[i + 24];
+ vfree(vbios);
+ pVBInfo->IF_DEF_LVDS = 1;
+ return;
+error:
+ dev_err(&pdev->dev, "video BIOS corrupted\n");
+ vfree(vbios);
}
static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
@@ -1336,18 +1320,57 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
}
+static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
+ *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned short temp;
+
+ /* add lcd sense */
+ if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
+ return 0;
+ } else {
+ temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
+ switch (HwDeviceExtension->ulCRT2LCDType) {
+ case LCD_INVALID:
+ case LCD_800x600:
+ case LCD_1024x768:
+ case LCD_1280x1024:
+ break;
+
+ case LCD_640x480:
+ case LCD_1024x600:
+ case LCD_1152x864:
+ case LCD_1280x960:
+ case LCD_1152x768:
+ temp = 0;
+ break;
+
+ case LCD_1400x1050:
+ case LCD_1280x768:
+ case LCD_1600x1200:
+ break;
+
+ case LCD_1920x1440:
+ case LCD_2048x1536:
+ temp = 0;
+ break;
+
+ default:
+ break;
+ }
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
+ return 1;
+ }
+}
+
static void XGINew_GetXG21Sense(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char Temp;
- volatile unsigned char *pVideoMemory =
- (unsigned char *) pVBInfo->ROMAddr;
-
- pVBInfo->IF_DEF_LVDS = 0;
#if 1
- if ((pVideoMemory[0x65] & 0x01)) { /* For XG21 LVDS */
- pVBInfo->IF_DEF_LVDS = 1;
+ if (pVBInfo->IF_DEF_LVDS) { /* For XG21 LVDS */
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
@@ -1393,7 +1416,6 @@ static void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
if (Temp <= 0x02) {
- pVBInfo->IF_DEF_LVDS = 1;
/* LVDS setting */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x21);
@@ -1451,24 +1473,15 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
struct vb_device_info *pVBInfo = &VBINF;
unsigned char i, temp = 0, temp1;
/* VBIOSVersion[5]; */
- volatile unsigned char *pVideoMemory;
/* unsigned long j, k; */
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
-
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
- pVideoMemory = (unsigned char *) pVBInfo->ROMAddr;
-
/* Newdebugcode(0x99); */
-
- /* if (pVBInfo->ROMAddr == 0) */
- /* return(0); */
-
if (pVBInfo->FBAddr == NULL) {
printk("\n pVBInfo->FBAddr == 0 ");
return 0;
@@ -1516,8 +1529,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- /* ReadVBIOSData */
- ReadVBIOSTablData(HwDeviceExtension->jChipType, pVBInfo);
+ xgifb_read_vbios(pdev, pVBInfo);
/* 1.Openkey */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
@@ -1774,7 +1786,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo);
printk("20");
- XGINew_SetDRAMSize_340(HwDeviceExtension, pVBInfo);
+ XGINew_SetDRAMSize_340(xgifb_info, HwDeviceExtension, pVBInfo);
printk("21");
printk("22");
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 81c0cc41bb42..67a316c3c108 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -67,11 +67,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
- /* add for new UNIVGABIOS */
- /* XGINew_UBLCDDataTable =
- * (struct XGI_LCDDataTablStruct *) XGI_LCDDataTable; */
- /* XGINew_UBTVDataTable = (XGI_TVDataTablStruct *) XGI_TVDataTable; */
-
pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
@@ -148,9 +143,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
else
pVBInfo->LCDCapList = XGI_LCDCapList;
- if ((ChipType == XG21) || (ChipType == XG27))
- pVBInfo->XG21_LVDSCapList = XGI21_LCDCapList;
-
pVBInfo->XGI_TVDelayList = XGI301TVDelayList;
pVBInfo->XGI_TVDelayList2 = XGI301TVDelayList2;
@@ -236,28 +228,6 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
}
}
-static void XGI_SetMiscRegs(unsigned short StandTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char Miscdata;
-
- /* Get Misc from file */
- Miscdata = pVBInfo->StandTable[StandTableIndex].MISC;
- /*
- if (pVBInfo->VBType & (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
- Miscdata |= 0x0C;
- }
- }
- */
-
- outb(Miscdata, pVBInfo->P3c2); /* Set Misc(3c2) */
-}
-
static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short StandTableIndex,
struct vb_device_info *pVBInfo)
@@ -274,16 +244,6 @@ static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
CRTCdata = pVBInfo->StandTable[StandTableIndex].CRTC[i];
xgifb_reg_set(pVBInfo->P3d4, i, CRTCdata); /* Set CRTC(3d4) */
}
- /*
- if ((HwDeviceExtension->jChipType == XGI_630) &&
- (HwDeviceExtension->jChipRevision == 0x30)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- xgifb_reg_set(pVBInfo->P3d4, 0x18, 0xFE);
- }
- }
- }
- */
}
static void XGI_SetATTRegs(unsigned short ModeNo,
@@ -530,10 +490,6 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
unsigned char data, data1, pushax;
unsigned short i, j;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
- /* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
- /* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
-
/* unlock cr0-7 */
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
@@ -595,10 +551,6 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
unsigned char data;
unsigned short i, j;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
- /* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
- /* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
-
for (i = 0x00; i <= 0x01; i++) {
data = pVBInfo->TimingV[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data);
@@ -976,6 +928,20 @@ static void XGI_SetXG27CRTC(unsigned short ModeNo,
}
}
+static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
+{
+ unsigned char temp;
+
+ /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ temp = (temp & 3) << 6;
+ /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
+ /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
+
+}
+
static void xgifb_set_lcd(int chip_id,
struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex,
@@ -1088,6 +1054,20 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
}
}
+static unsigned short XGI_GetResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+{
+ unsigned short resindex;
+
+ if (ModeNo <= 0x13)
+ /* si+St_ResInfo */
+ resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
+ else
+ /* si+Ext_ResInfo */
+ resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
+ return resindex;
+}
+
static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
@@ -1127,9 +1107,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
tempcx = 8;
- /* if (!(modeflag & Charx8Dot)) */
- /* tempcx = 9; */
-
tempax /= tempcx;
tempax -= 1;
tempbx -= 1;
@@ -1163,20 +1140,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp);
}
-unsigned short XGI_GetResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
-{
- unsigned short resindex;
-
- if (ModeNo <= 0x13)
- /* si+St_ResInfo */
- resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
- else
- /* si+Ext_ResInfo */
- resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- return resindex;
-}
-
static void XGI_SetCRT1Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
@@ -1308,77 +1271,55 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
- } else { /* for TV */
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = HiTVVCLKDIV2;
- VCLKIndex += 25;
- } else {
- VCLKIndex = HiTVVCLK;
- VCLKIndex += 25;
- }
-
- if (pVBInfo->SetFlag & TVSimuMode) {
- if (modeflag & Charx8Dot) {
- VCLKIndex =
- HiTVSimuVCLK;
- VCLKIndex += 25;
- } else {
- VCLKIndex =
- HiTVTextVCLK;
- VCLKIndex += 25;
- }
- }
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO) {
+ VCLKIndex = HiTVVCLKDIV2;
+ VCLKIndex += 25;
+ } else {
+ VCLKIndex = HiTVVCLK;
+ VCLKIndex += 25;
+ }
- /* 301lv */
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (!(pVBInfo->VBExtInfo ==
- VB_YPbPr1080i)) {
- VCLKIndex =
- YPbPr750pVCLK;
- if (!(pVBInfo->VBExtInfo
- ==
- VB_YPbPr750p)) {
- VCLKIndex =
- YPbPr525pVCLK;
- if (!(pVBInfo->VBExtInfo
- == VB_YPbPr525p)) {
- VCLKIndex
- = YPbPr525iVCLK_2;
- if (!(pVBInfo->SetFlag
- & RPLLDIV2XO))
- VCLKIndex
- = YPbPr525iVCLK;
- }
- }
- }
- }
+ if (pVBInfo->SetFlag & TVSimuMode) {
+ if (modeflag & Charx8Dot) {
+ VCLKIndex = HiTVSimuVCLK;
+ VCLKIndex += 25;
} else {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->SetFlag &
- RPLLDIV2XO) {
- VCLKIndex = TVVCLKDIV2;
- VCLKIndex += 25;
- } else {
- VCLKIndex = TVVCLK;
- VCLKIndex += 25;
- }
- }
+ VCLKIndex = HiTVTextVCLK;
+ VCLKIndex += 25;
}
- } else { /* for CRT2 */
- /* Port 3cch */
- VCLKIndex = (unsigned char) inb(
- (pVBInfo->P3ca + 0x02));
- VCLKIndex = ((VCLKIndex >> 2) & 0x03);
- if (ModeNo > 0x13) {
- /* di+Ext_CRTVCLK */
- VCLKIndex =
- pVBInfo->RefIndex[
+ }
+
+ /* 301lv */
+ if ((pVBInfo->VBType & VB_XGI301LV) &&
+ !(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
+ if (pVBInfo->VBExtInfo == VB_YPbPr750p)
+ VCLKIndex = YPbPr750pVCLK;
+ else if (pVBInfo->VBExtInfo == VB_YPbPr525p)
+ VCLKIndex = YPbPr525pVCLK;
+ else if (pVBInfo->SetFlag & RPLLDIV2XO)
+ VCLKIndex = YPbPr525iVCLK_2;
+ else
+ VCLKIndex = YPbPr525iVCLK;
+ }
+ } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO) {
+ VCLKIndex = TVVCLKDIV2;
+ VCLKIndex += 25;
+ } else {
+ VCLKIndex = TVVCLK;
+ VCLKIndex += 25;
+ }
+ } else { /* for CRT2 */
+ /* Port 3cch */
+ VCLKIndex = (unsigned char) inb((pVBInfo->P3ca + 0x02));
+ VCLKIndex = ((VCLKIndex >> 2) & 0x03);
+ if (ModeNo > 0x13) {
+ /* di+Ext_CRTVCLK */
+ VCLKIndex = pVBInfo->RefIndex[
RefreshRateTableIndex].
Ext_CRTVCLK;
- VCLKIndex &= IndexMask;
- }
+ VCLKIndex &= IndexMask;
}
}
} else { /* LVDS */
@@ -1397,7 +1338,6 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
}
- /* VCLKIndex = VCLKIndex&IndexMask; */
return VCLKIndex;
}
@@ -1461,6 +1401,19 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
}
}
+static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
+{
+ unsigned char temp;
+
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
+ temp = (temp & 1) << 6;
+ /* SR06[6] 18bit Dither */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
+ /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
+
+}
+
static void XGI_SetCRT1FIFO(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -1532,16 +1485,6 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x1F, data);
}
- /* Jong for Adavantech LCD ripple issue
- if ((VCLK >= 0) && (VCLK < 135))
- data2 = 0x03;
- else if ((VCLK >= 135) && (VCLK < 160))
- data2 = 0x02;
- else if ((VCLK >= 160) && (VCLK < 260))
- data2 = 0x01;
- else if (VCLK > 260)
- data2 = 0x00;
- */
data2 = 0x00;
xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
@@ -1591,7 +1534,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data2 |= 0x20;
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x3F, data2);
- /* xgifb_reg_set(pVBInfo->P3c4,0x06,data2); */
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13)
xres = pVBInfo->StResInfo[resindex].HTotal;
@@ -1636,11 +1578,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetVCLKState(HwDeviceExtension, ModeNo, RefreshRateTableIndex,
pVBInfo);
- /* if (modeflag&HalfDCLK) //030305 fix lowresolution bug */
- /* if (XGINew_IF_DEF_NEW_LOWRES) */
- /* XGI_VesaLowResolution(ModeNo, ModeIdIndex);
- * //030305 fix lowresolution bug */
-
data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
if (HwDeviceExtension->jChipType == XG27) {
@@ -1803,11 +1740,6 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- /* if (ModeNo > 0x13) */
- /* modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; */
- /* else */
- /* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag; */
-
if (ModeNo <= 0x13)
/* si+St_ResInfo */
resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
@@ -1815,8 +1747,6 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
/* si+Ext_ResInfo */
resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- /* resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo); */
-
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
@@ -1831,13 +1761,10 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
if (modeflag & DoubleScanMode)
yres = yres << 1;
}
- /* if (modeflag & Charx8Dot) */
- /* { */
if (xres == 720)
xres = 640;
- /* } */
pVBInfo->VGAHDE = xres;
pVBInfo->HDE = xres;
pVBInfo->VGAVDE = yres;
@@ -1890,7 +1817,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
tempal = (tempal & 0x0f);
}
- tempcx = LCDLenList[tempbx]; /* mov cl,byte ptr cs:LCDLenList[bx] */
+ tempcx = LCDLenList[tempbx];
if (pVBInfo->LCDInfo & EnableScalingLCD) { /* ScaleLCD */
if ((tempbx == 5) || (tempbx) == 7)
@@ -1898,9 +1825,6 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
else if ((tempbx == 3) || (tempbx == 8))
tempcx = LVDSDesDataLen2;
}
- /* mov di, word ptr cs:LCDDataList[bx] */
- /* tempdi = pVideoMemory[LCDDataList + tempbx * 2] |
- (pVideoMemory[LCDDataList + tempbx * 2 + 1] << 8); */
switch (tempbx) {
case 0:
@@ -2321,10 +2245,10 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
switch (tempbx) {
case 0:
- tempdi = NULL; /*EPLCHTVCRT1Ptr_H;*/
+ tempdi = NULL;
break;
case 1:
- tempdi = NULL; /*EPLCHTVCRT1Ptr_V;*/
+ tempdi = NULL;
break;
case 2:
case 6:
@@ -2363,9 +2287,7 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
}
/* 07/05/22 */
- if (table == 0x00) {
- } else if (table == 0x01) {
- } else if (table == 0x04) {
+ if (table == 0x04) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_ExtPALData[tempal];
@@ -2429,7 +2351,6 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
default:
break;
}
- } else if (table == 0x06) {
}
return NULL;
}
@@ -2741,7 +2662,6 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
else
tempbx = LCDPtr->LCDVRS;
- /* tempbx = tempbx >> 4; */
tempcx = push1;
if (pVBInfo->LCDInfo & EnableScalingLCD)
@@ -2881,7 +2801,6 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
unsigned short index;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- /* index = XGI_GetLCDCapPtr(pVBInfo); */
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -3105,19 +3024,6 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
}
}
-void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- /*
- if ( HwDeviceExtension->jChipType >= XG20 ) {
- pVBInfo->Set_VGAType = XG20;
- } else {
- pVBInfo->Set_VGAType = VGA_XGI340;
- }
- */
- pVBInfo->Set_VGAType = HwDeviceExtension->jChipType;
-}
-
void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
unsigned short flag, tempbx, tempah;
@@ -3160,7 +3066,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
}
-void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -3193,14 +3099,9 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_LCDA == 1) {
- if ((pVBInfo->Set_VGAType >= XG20)
- || (pVBInfo->Set_VGAType >= XG40)) {
+ if ((HwDeviceExtension->jChipType >= XG20) ||
+ (HwDeviceExtension->jChipType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- /* if ((pVBInfo->VBType & VB_XGI302B)
- || (pVBInfo->VBType & VB_XGI301LV)
- || (pVBInfo->VBType & VB_XGI302LV)
- || (pVBInfo->VBType & VB_XGI301C))
- */
if (pVBInfo->VBType &
(VB_XGI302B |
VB_XGI301LV |
@@ -3225,7 +3126,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV) ||
(pVBInfo->VBType & VB_XGI301C)))) {
- if (temp & SetYPbPr) { /* temp = CR38 */
+ if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
/* shampoo add for new
* scratch */
@@ -3242,8 +3143,6 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
SetCRT2ToYPbPr;
}
}
-
- /* tempbx |= SetCRT2ToYPbPr; */
}
}
}
@@ -3368,7 +3267,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp, tempbx = 0, resinfo = 0, modeflag, index1;
@@ -3404,17 +3303,6 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx &= (SetCHTVOverScan |
SetNTSCJ |
SetPALTV);
- /*
- if (pVBInfo->IF_DEF_LVDS == 0) {
- //PAL-M/PAL-N Info
- index1 = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- //00:PAL, 01:PAL-M, 10:PAL-N
- temp2 = (index1 & 0xC0) >> 5;
- tempbx |= temp2;
- if (temp2 & 0x02) //PAL-M
- tempbx &= (~SetPALTV);
- }
- */
}
if (pVBInfo->IF_DEF_LVDS == 0) {
@@ -3476,8 +3364,8 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->TVInfo = tempbx;
}
-unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short temp, tempax, tempbx, modeflag, resinfo = 0, LCDIdIndex;
@@ -3553,15 +3441,8 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx |= SetLCDtoNonExpanding;
}
- /*
- if (tempax & LCDBToA) {
- tempbx |= SetLCDBToA;
- }
- */
-
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
- /* if (!(pVBInfo->LCDInfo&LCDNonExpanding)) */
if (!(tempbx & SetLCDtoNonExpanding)) {
tempbx |= EnableLVDSDDA;
} else {
@@ -3604,25 +3485,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- /*
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (tempax & (LockLCDBToA | StLCDBToA)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (!((!(tempax & LockLCDBToA)) &&
- (ModeNo > 0x13))) {
- pVBInfo->VBInfo &=
- ~(SetSimuScanMode |
- SetInSlaveMode |
- SetCRT2ToLCD);
- pVBInfo->VBInfo |=
- SetCRT2ToLCDA |
- SetCRT2ToDualEdge;
- }
- }
- }
- }
- */
-
return 1;
}
@@ -3632,10 +3494,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
if (ModeNo <= 5)
ModeNo |= 1;
if (ModeNo <= 0x13) {
- /* for (*ModeIdIndex=0;
- *ModeIdIndex < sizeof(pVBInfo->SModeIDTable)
- / sizeof(struct XGI_StStruct);
- (*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID ==
ModeNo)
@@ -3651,10 +3509,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
(*ModeIdIndex) += 2; /* 400 lines */
/* else 350 lines */
} else {
- /* for (*ModeIdIndex=0;
- *ModeIdIndex < sizeof(pVBInfo->EModeIDTable)
- / sizeof(struct XGI_ExtStruct);
- (*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID ==
ModeNo)
@@ -3675,7 +3529,6 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
for (i = 0; i < 8; i++) {
ujRet = ujRet << 1;
- /* ujRet |= GETBITS(ujDate >> i, 0:0); */
ujRet |= (ujDate >> i) & 1;
}
@@ -3726,7 +3579,101 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
+/*----------------------------------------------------------------------------*/
+/* input */
+/* bl[5] : 1;LVDS signal on */
+/* bl[1] : 1;LVDS backlight on */
+/* bl[0] : 1:LVDS VDD on */
+/* bh: 100000b : clear bit 5, to set bit5 */
+/* 000010b : clear bit 1, to set bit1 */
+/* 000001b : clear bit 0, to set bit0 */
+/*----------------------------------------------------------------------------*/
+static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned char CR4A, temp;
+
+ CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
+ tempbh &= 0x23;
+ tempbl &= 0x23;
+ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
+
+ if (tempbh & 0x20) {
+ temp = (tempbl >> 4) & 0x02;
+
+ /* CR B4[1] */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
+
+ }
+
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
+
+ temp = XG21GPIODataTransfer(temp);
+ temp &= ~tempbh;
+ temp |= tempbl;
+ xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
+}
+
+static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned char CR4A, temp;
+ unsigned short tempbh0, tempbl0;
+
+ tempbh0 = tempbh;
+ tempbl0 = tempbl;
+ tempbh0 &= 0x20;
+ tempbl0 &= 0x20;
+ tempbh0 >>= 3;
+ tempbl0 >>= 3;
+
+ if (tempbh & 0x20) {
+ temp = (tempbl >> 4) & 0x02;
+
+ /* CR B4[1] */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
+
+ }
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
+
+ CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
+ tempbh &= 0x03;
+ tempbl &= 0x03;
+ tempbh <<= 2;
+ tempbl <<= 2; /* GPIOC,GPIOD */
+ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
+}
+
+/* --------------------------------------------------------------------- */
+/* Function : XGI_XG21SetPanelDelay */
+/* Input : */
+/* Output : */
+/* Description : */
+/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
+/* : bl : 2 ; T2 : the duration signal on and Vdd on */
+/* : bl : 3 ; T3 : the duration between CPL off and signal off */
+/* : bl : 4 ; T4 : the duration signal off and Vdd off */
+/* --------------------------------------------------------------------- */
+static void XGI_XG21SetPanelDelay(struct xgifb_video_info *xgifb_info,
+ unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ if (tempbl == 1)
+ mdelay(xgifb_info->lvds_data.PSC_S1);
+
+ if (tempbl == 2)
+ mdelay(xgifb_info->lvds_data.PSC_S2);
+
+ if (tempbl == 3)
+ mdelay(xgifb_info->lvds_data.PSC_S3);
+
+ if (tempbl == 4)
+ mdelay(xgifb_info->lvds_data.PSC_S4);
+}
+
+static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
@@ -3736,12 +3683,12 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG21BLSignalVDD(0x01, 0x01, pVBInfo);
- XGI_XG21SetPanelDelay(2, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 2, pVBInfo);
}
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
/* LVDS backlight on */
XGI_XG21BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
@@ -3756,12 +3703,12 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG27BLSignalVDD(0x01, 0x01, pVBInfo);
- XGI_XG21SetPanelDelay(2, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 2, pVBInfo);
}
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
/* LVDS backlight on */
XGI_XG27BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
@@ -3772,7 +3719,8 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
}
}
-void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
+void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
@@ -3780,7 +3728,7 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
if (pVBInfo->IF_DEF_LVDS == 1) {
/* LVDS backlight off */
XGI_XG21BLSignalVDD(0x02, 0x00, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
} else {
/* DVO/DVI signal off */
XGI_XG21BLSignalVDD(0x20, 0x00, pVBInfo);
@@ -3791,7 +3739,7 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
if ((XGI_XG27GetPSCValue(pVBInfo) & 0x2)) {
/* LVDS backlight off */
XGI_XG27BLSignalVDD(0x02, 0x00, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
}
if (pVBInfo->IF_DEF_LVDS == 0)
@@ -3838,26 +3786,17 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
- /* si+St_ResInfo */
- /* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;*/
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- /*
- if (pVBInfo->IF_DEF_FSTN) {
- xres *= 2;
- yres *= 2;
- } else {
- */
if (modeflag & HalfDCLK)
xres *= 2;
if (modeflag & DoubleScanMode)
yres *= 2;
- /* } */
}
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
@@ -4028,8 +3967,6 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 775;
else if (pVBInfo->VGAVDE == 600)
tempbx = 775;
- /* else if (pVBInfo->VGAVDE==350) tempbx=560; */
- /* else if (pVBInfo->VGAVDE==400) tempbx=640; */
else
tempbx = 768;
} else
@@ -4294,7 +4231,6 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetCRT2FIFO(pVBInfo);
- /* XGI_SetCRT2Sync(ModeNo,RefreshRateTableIndex); */
for (tempcx = 4; tempcx < 7; tempcx++)
xgifb_reg_set(pVBInfo->Part1Port, tempcx, 0x0);
@@ -4497,9 +4433,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = 0xFF; /* set MAX HT */
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
- /* if (modeflag & Charx8Dot) */
- /* tempcx = 0x08; */
- /* else */
tempcx = 0x08;
if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
@@ -4565,7 +4498,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
} else {
- /* tempcx = tempbx & 0x00FF ; */
tempbx = (tempbx & 0xFF00) >> 8;
tempcx = (tempcx + tempbx) >> 1;
temp = (tempcx & 0x00FF) + 2;
@@ -4579,36 +4511,23 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp -= 6;
}
}
- } else {
- if (!(modeflag & HalfDCLK)) {
- temp -= 4;
- if (pVBInfo->LCDResInfo != Panel1280x960) {
- if (pVBInfo->VGAHDE >= 800) {
- temp -= 7;
- if (pVBInfo->ModeType ==
- ModeEGA) {
- if (pVBInfo->VGAVDE ==
- 1024) {
- temp += 15;
- if (pVBInfo->LCDResInfo != Panel1280x1024) {
- temp +=
- 7;
- }
- }
- }
-
- if (pVBInfo->VGAHDE >= 1280) {
- if (pVBInfo->LCDResInfo
- != Panel1280x960) {
- if (pVBInfo->LCDInfo
- & LCDNonExpanding) {
- temp
- += 28;
- }
- }
- }
- }
+ } else if (!(modeflag & HalfDCLK)) {
+ temp -= 4;
+ if (pVBInfo->LCDResInfo != Panel1280x960 &&
+ pVBInfo->VGAHDE >= 800) {
+ temp -= 7;
+ if (pVBInfo->ModeType == ModeEGA &&
+ pVBInfo->VGAVDE == 1024) {
+ temp += 15;
+ if (pVBInfo->LCDResInfo !=
+ Panel1280x1024)
+ temp += 7;
}
+
+ if (pVBInfo->VGAHDE >= 1280 &&
+ pVBInfo->LCDResInfo != Panel1280x960 &&
+ (pVBInfo->LCDInfo & LCDNonExpanding))
+ temp += 28;
}
}
}
@@ -5297,7 +5216,6 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax--;
xgifb_reg_and(pVBInfo->Part2Port, 0x01, tempax);
- /* if ( !( pVBInfo->VBType & VB_XGI301C ) ) */
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
@@ -5436,7 +5354,6 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = pVBInfo->VT;
tempbx = pVBInfo->LCDVRS;
- /* if (SetLCD_Info & EnableScalingLCD) */
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
@@ -5478,12 +5395,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempcx & 0xFF00) >> 8;
xgifb_reg_set(pVBInfo->Part2Port, 0x25, temp);
- /* getlcdsync() */
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
tempcx = tempax;
tempax = pVBInfo->HT;
tempbx = pVBInfo->LCDHRS;
- /* if ( SetLCD_Info & EnableScalingLCD) */
if (XGI_IsLCDDualLink(pVBInfo)) {
tempax = tempax >> 1;
tempbx = tempbx >> 1;
@@ -5801,9 +5716,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (XGI_IsLCDDualLink(pVBInfo))
tempax = tempax >> 1;
- /* if((pVBInfo->VBInfo&(SetCRT2ToLCD)) ||
- ((pVBInfo->TVInfo&SetYPbPrMode525p) ||
- (pVBInfo->TVInfo&SetYPbPrMode750p))) { */
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (tempax > 800)
tempax -= 800;
@@ -5817,33 +5729,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
tempax -= 1;
- /*
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i))) {
- if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax =(tempax * 25 /
- 32) - 1;
- else
- tempax = (tempax * 20 /
- 32) - 1;
- }
- }
- } else {
- if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax = (tempax * 25 / 32) - 1;
- else
- tempax = (tempax * 20 / 32) - 1;
- }
- }
- }
- */
-
temp = (tempax & 0xFF00) >> 8;
temp = ((temp & 0x0003) << 4);
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
@@ -5902,7 +5787,6 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
| CRT2DisplayFlag))) {
XGINew_EnableCRT2(pVBInfo);
- /* LoadDAC2(pVBInfo->Part5Port, ModeNo, ModeIdIndex); */
}
}
return;
@@ -5921,118 +5805,11 @@ static void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
}
-/*----------------------------------------------------------------------------*/
-/* input */
-/* bl[5] : 1;LVDS signal on */
-/* bl[1] : 1;LVDS backlight on */
-/* bl[0] : 1:LVDS VDD on */
-/* bh: 100000b : clear bit 5, to set bit5 */
-/* 000010b : clear bit 1, to set bit1 */
-/* 000001b : clear bit 0, to set bit0 */
-/*----------------------------------------------------------------------------*/
-void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x23;
- tempbl &= 0x23;
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
- }
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-
- temp = XG21GPIODataTransfer(temp);
- temp &= ~tempbh;
- temp |= tempbl;
- xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
-}
-
-void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
- unsigned short tempbh0, tempbl0;
-
- tempbh0 = tempbh;
- tempbl0 = tempbl;
- tempbh0 &= 0x20;
- tempbl0 &= 0x20;
- tempbh0 >>= 3;
- tempbl0 >>= 3;
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x03;
- tempbl &= 0x03;
- tempbh <<= 2;
- tempbl <<= 2; /* GPIOC,GPIOD */
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
-}
-
-/* --------------------------------------------------------------------- */
-unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo)
-{
- unsigned short index;
-
- index = xgifb_reg_get(pVBInfo->P3d4, 0x36);
- if (index < sizeof(XGI21_LCDCapList)
- / sizeof(struct XGI21_LVDSCapStruct))
- return index;
- return 0;
-}
-
-/* --------------------------------------------------------------------- */
-/* Function : XGI_XG21SetPanelDelay */
-/* Input : */
-/* Output : */
-/* Description : */
-/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
-/* : bl : 2 ; T2 : the duration signal on and Vdd on */
-/* : bl : 3 ; T3 : the duration between CPL off and signal off */
-/* : bl : 4 ; T4 : the duration signal off and Vdd off */
-/* --------------------------------------------------------------------- */
-void XGI_XG21SetPanelDelay(unsigned short tempbl,
+static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short index;
-
- index = XGI_GetLVDSOEMTableIndex(pVBInfo);
- if (tempbl == 1)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S1);
-
- if (tempbl == 2)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S2);
-
- if (tempbl == 3)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S3);
-
- if (tempbl == 4)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S4);
-}
-
-unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
-{
- unsigned short xres, yres, colordepth, modeflag, resindex,
- lvdstableindex;
+ unsigned short xres, yres, colordepth, modeflag, resindex;
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
@@ -6061,18 +5838,15 @@ unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
}
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
- if (xres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE))
+ if (xres > xgifb_info->lvds_data.LVDSHDE)
return 0;
- if (yres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE))
+ if (yres > xgifb_info->lvds_data.LVDSVDE)
return 0;
if (ModeNo > 0x13) {
- if ((xres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSHDE)) ||
- (yres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSVDE))) {
+ if (xres != xgifb_info->lvds_data.LVDSHDE ||
+ yres != xgifb_info->lvds_data.LVDSVDE) {
colordepth = XGI_GetColorDepth(ModeNo,
ModeIdIndex,
pVBInfo);
@@ -6084,55 +5858,26 @@ unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
return 1;
}
-void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
- temp = (temp & 1) << 6;
- /* SR06[6] 18bit Dither */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
-}
-
-void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- temp = (temp & 3) << 6;
- /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
-}
-
-static void xgifb_set_lvds(int chip_id,
+static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
+ int chip_id,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char temp, Miscdata;
- unsigned short xres, yres, modeflag, resindex, lvdstableindex;
+ unsigned short xres, yres, modeflag, resindex;
unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
- temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability &
+ temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = (unsigned char) inb(pVBInfo->P3cc);
outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
- temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability & LCDPolarity);
+ temp = xgifb_info->lvds_data.LVDS_Capability & LCDPolarity;
/* SR35[7] FP VSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
/* SR30[5] FP HSync polarity */
@@ -6159,48 +5904,43 @@ static void xgifb_set_lvds(int chip_id,
if (!(modeflag & Charx8Dot))
xres = xres * 8 / 9;
- LVDSHT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHT;
+ LVDSHT = xgifb_info->lvds_data.LVDSHT;
- LVDSHBS = xres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE
- - xres) / 2;
+ LVDSHBS = xres + (xgifb_info->lvds_data.LVDSHDE - xres) / 2;
if ((ModeNo <= 0x13) && (modeflag & HalfDCLK))
LVDSHBS -= xres / 4;
if (LVDSHBS > LVDSHT)
LVDSHBS -= LVDSHT;
- LVDSHRS = LVDSHBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHFP;
+ LVDSHRS = LVDSHBS + xgifb_info->lvds_data.LVDSHFP;
if (LVDSHRS > LVDSHT)
LVDSHRS -= LVDSHT;
- LVDSHRE = LVDSHRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHSYNC;
+ LVDSHRE = LVDSHRS + xgifb_info->lvds_data.LVDSHSYNC;
if (LVDSHRE > LVDSHT)
LVDSHRE -= LVDSHT;
- LVDSHBE = LVDSHBS + LVDSHT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE;
+ LVDSHBE = LVDSHBS + LVDSHT - xgifb_info->lvds_data.LVDSHDE;
- LVDSVT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVT;
+ LVDSVT = xgifb_info->lvds_data.LVDSVT;
- LVDSVBS = yres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE
- - yres) / 2;
+ LVDSVBS = yres + (xgifb_info->lvds_data.LVDSVDE - yres) / 2;
if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
LVDSVBS += yres / 2;
if (LVDSVBS > LVDSVT)
LVDSVBS -= LVDSVT;
- LVDSVRS = LVDSVBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVFP;
+ LVDSVRS = LVDSVBS + xgifb_info->lvds_data.LVDSVFP;
if (LVDSVRS > LVDSVT)
LVDSVRS -= LVDSVT;
- LVDSVRE = LVDSVRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSVSYNC;
+ LVDSVRE = LVDSVRS + xgifb_info->lvds_data.LVDSVSYNC;
if (LVDSVRE > LVDSVT)
LVDSVRE -= LVDSVT;
- LVDSVBE = LVDSVBS + LVDSVT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE;
+ LVDSVBE = LVDSVBS + LVDSVT - xgifb_info->lvds_data.LVDSVDE;
temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
@@ -6300,13 +6040,9 @@ static void xgifb_set_lvds(int chip_id,
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
xgifb_reg_set(pVBInfo->P3c4,
- 0x2B,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData1);
+ 0x2B, xgifb_info->lvds_data.VCLKData1);
xgifb_reg_set(pVBInfo->P3c4,
- 0x2C,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData2);
+ 0x2C, xgifb_info->lvds_data.VCLKData2);
value += 0x10;
}
@@ -6398,7 +6134,8 @@ static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo)
return 0;
}
-void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah = 0;
@@ -6442,7 +6179,7 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
@@ -6464,7 +6201,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
- /* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
if ((pVBInfo->SetFlag & DisableChB) ||
@@ -6484,7 +6220,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
}
} else { /* {301} */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- /* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
/* Disable CRT2 */
xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
@@ -6494,7 +6229,7 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
| SetSimuScanMode))
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
}
@@ -6610,17 +6345,6 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
- /*
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC)
- tempbl = CRT2Delay1; // Get CRT2 Delay
- if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C))
- tempbl = CRT2Delay2;
- */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
@@ -6680,23 +6404,6 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
(unsigned short) (0x30 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
}
-
- /*
- if (tempcx & EnableLCD24bpp) { // 24bits
- xgifb_reg_and_or(pVBInfo->Part1Port,
- 0x19,
- 0x0F,
- (unsigned short)(0x30 | (tempcx&0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
- } else {
- xgifb_reg_and_or(pVBInfo->Part1Port,
- 0x19,
- 0x0F,
- // Enable Dither
- (unsigned short)(0x20 | (tempcx&0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
- }
- */
}
/* --------------------------------------------------------------------- */
@@ -6718,6 +6425,25 @@ static void XGI_SetLCDCap_B(unsigned short tempcx,
| 0x18)); /* Enable Dither */
}
+static void XGI_LongWait(struct vb_device_info *pVBInfo)
+{
+ unsigned short i;
+
+ i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
+
+ if (!(i & 0xC0)) {
+ for (i = 0; i < 0xFFFF; i++) {
+ if (!(inb(pVBInfo->P3da) & 0x08))
+ break;
+ }
+
+ for (i = 0; i < 0xFFFF; i++) {
+ if ((inb(pVBInfo->P3da) & 0x08))
+ break;
+ }
+ }
+}
+
static void SetSpectrum(struct vb_device_info *pVBInfo)
{
unsigned short index;
@@ -6940,14 +6666,12 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- /* GetPart1IO(); */
XGI_SetDelayComp(pVBInfo);
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* GetPart2IO() */
XGI_SetPhaseIncr(pVBInfo);
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
@@ -6963,7 +6687,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
+static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -6972,8 +6696,6 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
unsigned char tempah;
- /* // fix write part1 index 0 BTDRAM bit Bug
- * xgifb_reg_set(pVBInfo->Part1Port, 0x03, 0x00); */
tempah = 0;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
@@ -6999,32 +6721,6 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
}
}
- /* 0210 shampoo
- if (pVBInfo->VBInfo & DisableCRT2Display) {
- tempah = 0;
- }
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
- if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD)) {
- tempcl = pVBInfo->ModeType;
- if (ModeNo > 0x13) {
- tempcl -= ModeVGA;
- if ((tempcl > 0) || (tempcl == 0)) {
- tempah=(0x008>>tempcl) ;
- if (tempah == 0)
- tempah = 1;
- tempah |= 0x040;
- }
- } else {
- tempah = 0x040;
- }
-
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- tempah = (tempah ^ 0x050);
- }
- }
- */
-
xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
tempah = 0x08;
tempbl = 0xf0;
@@ -7093,14 +6789,11 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x080;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p))) { */
tempah |= 0x020;
if (ModeNo > 0x13) {
if (pVBInfo->VBInfo & DriverMode)
tempah = tempah ^ 0x20;
}
- /* } */
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, ~0x0BF, tempah);
@@ -7110,12 +6803,8 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x40;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* if ((!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) &&
- (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p)))) { */
if (pVBInfo->TVInfo & RPLLDIV2XO)
tempah |= 0x40;
- /* } */
}
if ((pVBInfo->LCDResInfo == Panel1280x1024)
@@ -7219,57 +6908,6 @@ unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo)
}
}
-void XGI_LongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short i;
-
- i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
-
- if (!(i & 0xC0)) {
- for (i = 0; i < 0xFFFF; i++) {
- if (!(inb(pVBInfo->P3da) & 0x08))
- break;
- }
-
- for (i = 0; i < 0xFFFF; i++) {
- if ((inb(pVBInfo->P3da) & 0x08))
- break;
- }
- }
-}
-
-static void XGI_VBLongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short tempal, temp, i, j;
- return;
- if (!(pVBInfo->VBInfo & SetCRT2ToTV)) {
- temp = 0;
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 100; j++) {
- tempal = inb(pVBInfo->P3da);
- if (temp & 0x01) { /* VBWaitMode2 */
- if ((tempal & 0x08))
- continue;
-
- if (!(tempal & 0x08))
- break;
-
- } else { /* VBWaitMode1 */
- if (!(tempal & 0x08))
- continue;
-
- if ((tempal & 0x08))
- break;
- }
- }
- temp = temp ^ 0x01;
- }
- } else {
- XGI_LongWait(pVBInfo);
- }
- return;
-}
-
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
@@ -7322,12 +6960,6 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
RefreshRateTableIndex = pVBInfo->EModeIDTable[ModeIdIndex].REFindex;
ModeNo = pVBInfo->RefIndex[RefreshRateTableIndex].ModeID;
if (pXGIHWDE->jChipType >= XG20) { /* for XG20, XG21, XG27 */
- /*
- if (pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag &
- XG2xNotSupport) {
- index++;
- }
- */
if ((pVBInfo->RefIndex[RefreshRateTableIndex].XRes == 800) &&
(pVBInfo->RefIndex[RefreshRateTableIndex].YRes == 600)) {
index++;
@@ -7371,7 +7003,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
temp = XGI_AjustCRT2Rate(ModeNo, ModeIdIndex,
RefreshRateTableIndex, &i, pVBInfo);
}
- return RefreshRateTableIndex + i; /* return (0x01 | (temp1<<1)); */
+ return RefreshRateTableIndex + i;
}
static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
@@ -7379,9 +7011,6 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex;
- /* unsigned short temp ; */
-
- /* pVBInfo->SelectCRT2Rate = 0; */
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
@@ -7394,7 +7023,7 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT2ECLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
-unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
+static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -7499,10 +7128,6 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
outb((unsigned char) DAC_TEST_PARMS[2], (pVBInfo->P3c8 + 1));
}
- XGI_VBLongWait(pVBInfo);
- XGI_VBLongWait(pVBInfo);
- XGI_VBLongWait(pVBInfo);
-
mdelay(1);
XGI_WaitDisply(pVBInfo);
@@ -7532,7 +7157,8 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
}
-void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah;
@@ -7544,7 +7170,6 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
} else {
- /* SetCRT2ToLCDA ) */
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port,
@@ -7572,10 +7197,8 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->Part1Port, 0x2E);
if (!(tempah & 0x80))
- /* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port,
0x2E, 0x80);
- /* BScreenOFF = 0 */
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
}
}
@@ -7638,12 +7261,11 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
if (!(pVBInfo->SetFlag & DisableChA)) {
- XGI_VBLongWait(pVBInfo);
if (!(pVBInfo->SetFlag & GatingCRT)) {
XGI_DisableGatingCRT(HwDeviceExtension,
pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- XGI_VBLongWait(pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension,
+ pVBInfo);
}
}
} /* 301 */
@@ -7656,15 +7278,15 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
tempah = (unsigned char) xgifb_reg_get(pVBInfo->Part1Port,
0x2E);
if (!(tempah & 0x80))
- /* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
} /* End of VB */
}
-static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -7672,18 +7294,14 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short XGINew_P3cc = pVBInfo->P3cc;
- /* XGINew_CRT1Mode = ModeNo; // SaveModeID */
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
- /* XGI_SetBIOSData(ModeNo, ModeIdIndex); */
- /* XGI_ClearBankRegs(ModeNo, ModeIdIndex); */
XGI_SetSeqRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
- XGI_SetMiscRegs(StandTableIndex, pVBInfo);
+ outb(pVBInfo->StandTable[StandTableIndex].MISC, pVBInfo->P3c2);
XGI_SetCRTCRegs(HwDeviceExtension, StandTableIndex, pVBInfo);
XGI_SetATTRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
XGI_SetGRCRegs(StandTableIndex, pVBInfo);
XGI_ClearExt1Regs(pVBInfo);
- /* if (pVBInfo->IF_DEF_ExpLink) */
if (HwDeviceExtension->jChipType == XG27) {
if (pVBInfo->IF_DEF_LVDS == 0)
XGI_SetDefaultVCLK(pVBInfo);
@@ -7735,11 +7353,6 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
if (temp & 0xA0) {
- /* Enable write GPIOF */
- /* xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20); */
- /* P. DWN */
- /* xgifb_reg_and(pVBInfo->P3d4, 0x48, ~0x20); */
- /* XG21 CRT1 Timing */
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27CRTC(ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
@@ -7754,10 +7367,9 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo, RefreshRateTableIndex, ModeNo);
if (pVBInfo->IF_DEF_LVDS == 1)
- xgifb_set_lvds(HwDeviceExtension->jChipType,
+ xgifb_set_lvds(xgifb_info,
+ HwDeviceExtension->jChipType,
ModeNo, ModeIdIndex, pVBInfo);
- /* P. ON */
- /* xgifb_reg_or(pVBInfo->P3d4, 0x48, 0x20); */
}
}
@@ -7765,22 +7377,16 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetCRT1FIFO(ModeNo, HwDeviceExtension, pVBInfo);
XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
-
- /* XGI_LoadCharacter(); //dif ifdef TVFont */
-
XGI_LoadDAC(ModeNo, ModeIdIndex, pVBInfo);
- /* XGI_ClearBuffer(HwDeviceExtension, ModeNo, pVBInfo); */
}
-unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo)
{
unsigned short ModeIdIndex;
- /* unsigned char *pVBInfo->FBAddr =
- HwDeviceExtension->pjVideoMemoryAddress; */
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
pVBInfo->IF_DEF_LVDS = 0;
pVBInfo->IF_DEF_LCDA = 1;
@@ -7831,14 +7437,8 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_GetVBType(pVBInfo);
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- if (ModeNo & 0x80) {
+ if (ModeNo & 0x80)
ModeNo = ModeNo & 0x7F;
- /* XGINew_flag_clearbuffer = 0; */
- }
- /* else {
- XGINew_flag_clearbuffer = 1;
- }
- */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 1.Openkey */
@@ -7846,16 +7446,14 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
- XGI_GetVGAType(HwDeviceExtension, pVBInfo);
-
if (HwDeviceExtension->jChipType < XG20) { /* kuku 2004/06/25 */
XGI_GetVBInfo(ModeNo, ModeIdIndex, HwDeviceExtension, pVBInfo);
XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_DisableBridge(HwDeviceExtension, pVBInfo);
+ XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
+ XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
@@ -7864,7 +7462,8 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
}
} else {
if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
+ XGI_SetCRT1Group(xgifb_info,
+ HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
@@ -7894,11 +7493,11 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetCRT2ModeRegs(ModeNo, HwDeviceExtension, pVBInfo);
XGI_OEM310Setting(ModeNo, ModeIdIndex, pVBInfo); /*0212*/
XGI_CloseCRTC(HwDeviceExtension, pVBInfo);
- XGI_EnableBridge(HwDeviceExtension, pVBInfo);
+ XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
if (pVBInfo->IF_DEF_LVDS == 1)
- if (!XGI_XG21CheckLVDSMode(ModeNo,
+ if (!XGI_XG21CheckLVDSMode(xgifb_info, ModeNo,
ModeIdIndex,
pVBInfo))
return 0;
@@ -7914,39 +7513,13 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->SetFlag = 0;
pVBInfo->VBInfo = DisableCRT2Display;
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex,
- pVBInfo);
+ XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
+ ModeIdIndex, pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- /*
- if (HwDeviceExtension->jChipType == XG21)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0x80, 0x80);
- */
- }
-
- /*
- if (ModeNo <= 0x13) {
- modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
- } else {
- modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
- pVBInfo->ModeType = modeflag&ModeInfoFlag;
- pVBInfo->SetFlag = 0x00;
- pVBInfo->VBInfo = DisableCRT2Display;
- temp = XGINew_CheckMemorySize(HwDeviceExtension,
- ModeNo,
- ModeIdIndex,
- pVBInfo);
-
- if (temp == 0)
- return (0);
-
- XGI_DisplayOff(HwDeviceExtension, pVBInfo) ;
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex, pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- */
XGI_UpdateModeInfo(HwDeviceExtension, pVBInfo);
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 1bd8667ff5cf..552482858c1c 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -6,66 +6,22 @@ extern void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *);
extern void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *);
-extern void XGI_LongWait(struct vb_device_info *);
-extern void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
- struct xgi_hw_device_info *,
- struct vb_device_info *);
-extern void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_DisplayOff(struct xgi_hw_device_info *,
+extern void XGI_DisplayOff(struct xgifb_video_info *,
+ struct xgi_hw_device_info *,
struct vb_device_info *);
-extern void XGI_DisplayOn(struct xgi_hw_device_info *,
- struct vb_device_info *);
extern void XGI_GetVBType(struct vb_device_info *);
extern void XGI_SenseCRT1(struct vb_device_info *);
-extern void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_GetVBInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_GetTVInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
-extern unsigned short XGI_GetResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-
-extern unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo) ;
extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
unsigned short *ModeIdIndex,
struct vb_device_info *);
-extern unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
extern unsigned char XGI_BridgeIsOn(struct vb_device_info *);
-
-extern unsigned char
-XGI_SetCRT2Group301(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *);
-extern void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo);
-extern void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo);
-extern void XGI_XG21BLSignalVDD(unsigned short tempbh,
- unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern void XGI_XG27BLSignalVDD(unsigned short tempbh,
- unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern void XGI_XG21SetPanelDelay(unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-extern unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo);
-
#endif
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index f9ade6f9f7ee..6556a0d6ff82 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -293,13 +293,12 @@ struct vb_device_info {
unsigned short IF_DEF_ExpLink;
unsigned short IF_DEF_HiVision;
unsigned short LCDResInfo, LCDTypeInfo, VBType;/*301b*/
- unsigned short VBInfo, TVInfo, LCDInfo, Set_VGAType;
+ unsigned short VBInfo, TVInfo, LCDInfo;
unsigned short VBExtInfo;/*301lv*/
unsigned short SetFlag;
unsigned short NewFlickerMode;
unsigned short SelectCRT2Rate;
- unsigned char *ROMAddr;
void __iomem *FBAddr;
unsigned long BaseAddr;
unsigned long RelIO;
@@ -376,7 +375,6 @@ struct vb_device_info {
unsigned char *pXGINew_CR97 ;
struct XGI330_LCDCapStruct *LCDCapList;
- struct XGI21_LVDSCapStruct *XG21_LVDSCapList;
struct XGI_TimingHStruct *TimingH;
struct XGI_TimingVStruct *TimingV;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index b81ac7726d1c..e7946f1c1143 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -2569,33 +2569,6 @@ static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-struct XGI21_LVDSCapStruct XGI21_LCDCapList[] = {
- {DisableLCD24bpp + LCDPolarity,
- 2160, 1250, 1600, 1200, 64, 1, 192, 3,
- 0x70, 0x24, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity,
- 1688, 1066, 1280, 1024, 48, 1, 112, 3,
- 0x70, 0x44, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity + (LCDPolarity << 8),
- 1344, 806, 1024, 768, 24, 3, 136, 6,
- 0x6C, 0x65, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity,
- 1056, 628, 800, 600, 40, 1, 128, 4,
- 0x42, 0xE2, 0x20, 0x14, 0x0A, 0x02, 0x00
- },
- {DisableLCD24bpp + LCDPolarity,
- 928, 525, 800, 480, 40, 13, 48, 3,
- 0x52, 0xC5, 0x20, 0x14, 0x0A, 0x02, 0x00
- },
- {DisableLCD24bpp + LCDPolarity + (LCDPolarity << 8),
- 800, 525, 640, 480, 16, 10, 96, 2,
- 0x1B, 0xE1, 0x20, 0x04, 0x0A, 0x02, 0xC8
- }
-};
-
static struct XGI_Ext2Struct XGI330_RefIndex[] = {
{Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x59, 320, 200},/* 00 */
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9b939b75c309..9e166bbb00c4 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -51,8 +51,6 @@ struct xgi_hw_device_info {
unsigned long ulExternalChip; /* NO VB or other video bridge*/
/* if ujVBChipID = VB_CHIP_UNKNOWN, */
- unsigned char *pjVirtualRomBase; /* ROM image */
-
void __iomem *pjVideoMemoryAddress;/* base virtual memory address */
/* of Linear VGA memory */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 56c1f9c80dc1..642840c612ac 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -787,7 +787,7 @@ static ssize_t zv_max_zsize_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
return -EINVAL;
zv_max_zsize = val;
@@ -819,7 +819,7 @@ static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
return -EINVAL;
zv_max_mean_zsize = val;
@@ -853,7 +853,7 @@ static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > 150))
return -EINVAL;
zv_page_count_policy_percent = val;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 09de99fbb7e0..2a2a92d389e6 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -653,7 +653,8 @@ int zram_init_device(struct zram *zram)
goto fail_no_table;
}
- zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
+ zram->compress_buffer =
+ (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 0ea8ed296a9d..d521122826f6 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -58,7 +58,7 @@ static ssize_t disksize_store(struct device *dev,
u64 disksize;
struct zram *zram = dev_to_zram(dev);
- ret = strict_strtoull(buf, 10, &disksize);
+ ret = kstrtoull(buf, 10, &disksize);
if (ret)
return ret;
@@ -88,7 +88,7 @@ static ssize_t reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
- unsigned long do_reset;
+ unsigned short do_reset;
struct zram *zram;
struct block_device *bdev;
@@ -99,7 +99,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev->bd_holders)
return -EBUSY;
- ret = strict_strtoul(buf, 10, &do_reset);
+ ret = kstrtou16(buf, 10, &do_reset);
if (ret)
return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271d..ac44af165b27 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,7 @@
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
sock_in6 = (struct sockaddr_in6 *)sockaddr;
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
- if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
- (void *)&sock_in6_e->sin6_addr.in6_u,
+ if (!memcmp(&sock_in6->sin6_addr.in6_u,
+ &sock_in6_e->sin6_addr.in6_u,
sizeof(struct in6_addr)))
ip_match = 1;
@@ -614,13 +613,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +659,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1014,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
@@ -1044,6 +1036,8 @@ done:
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1117,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@@ -1230,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
crypto_hash_init(hash);
- sg_init_one(&sg, (u8 *)buf, payload_length);
+ sg_init_one(&sg, buf, payload_length);
crypto_hash_update(hash, &sg, payload_length);
if (padding) {
@@ -1341,7 +1335,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -1608,7 +1602,7 @@ static int iscsit_handle_nop_out(
/*
* Attach ping data to struct iscsi_cmd->buf_ptr.
*/
- cmd->buf_ptr = (void *)ping_data;
+ cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
pr_debug("Got %u bytes of NOPOUT ping"
@@ -2513,10 +2507,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@@ -3018,10 +3012,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3127,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
@@ -3201,7 +3196,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy((void *)payload + payload_len, buf, len);
+ memcpy(payload + payload_len, buf, len);
payload_len += len;
spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3233,7 +3228,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy((void *)payload + payload_len, buf, len);
+ memcpy(payload + payload_len, buf, len);
payload_len += len;
}
spin_unlock(&tpg->tpg_np_lock);
@@ -3490,7 +3485,7 @@ int iscsi_target_tx_thread(void *arg)
struct iscsi_conn *conn;
struct iscsi_queue_req *qr = NULL;
struct se_cmd *se_cmd;
- struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct iscsi_thread_set *ts = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@@ -3779,7 +3774,7 @@ int iscsi_target_rx_thread(void *arg)
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct iscsi_thread_set *ts = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f1..db0cf7c8adde 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
@@ -80,7 +82,7 @@ static void chap_gen_challenge(
unsigned int *c_len)
{
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
@@ -118,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
if (!conn->auth_protocol)
return NULL;
- chap = (struct iscsi_chap *) conn->auth_protocol;
+ chap = conn->auth_protocol;
/*
* We only support MD5 MDA presently.
*/
@@ -163,14 +165,15 @@ static int chap_server_compute_md5(
unsigned int *nr_out_len)
{
char *endptr;
- unsigned char id, digest[MD5_SIGNATURE_SIZE];
+ unsigned long id;
+ unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
unsigned char *challenge_binhex = NULL;
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
struct scatterlist sg;
@@ -244,7 +247,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&chap->id, 1);
+ sg_init_one(&sg, &chap->id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@@ -252,7 +255,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+ sg_init_one(&sg, &auth->password, strlen(auth->password));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
if (ret < 0) {
pr_err("crypto_hash_update() failed for password\n");
@@ -260,7 +263,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+ sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
if (ret < 0) {
pr_err("crypto_hash_update() failed for challenge\n");
@@ -303,14 +306,17 @@ static int chap_server_compute_md5(
}
if (type == HEX)
- id = (unsigned char)simple_strtoul((char *)&identifier[2],
- &endptr, 0);
+ id = simple_strtoul(&identifier[2], &endptr, 0);
else
- id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+ id = simple_strtoul(identifier, &endptr, 0);
+ if (id > 255) {
+ pr_err("chap identifier: %lu greater than 255\n", id);
+ goto out;
+ }
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
- pr_debug("[server] Got CHAP_I=%d\n", id);
+ pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
@@ -349,7 +355,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&id, 1);
+ sg_init_one(&sg, &id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@@ -357,7 +363,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)auth->password_mutual,
+ sg_init_one(&sg, auth->password_mutual,
strlen(auth->password_mutual));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
if (ret < 0) {
@@ -369,7 +375,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+ sg_init_one(&sg, challenge_binhex, challenge_len);
ret = crypto_hash_update(&desc, &sg, challenge_len);
if (ret < 0) {
pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -412,7 +418,7 @@ static int chap_got_response(
char *nr_out_ptr,
unsigned int *nr_out_len)
{
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
switch (chap->digest_type) {
case CHAP_DIGEST_MD5:
@@ -435,7 +441,7 @@ u32 chap_main_loop(
int *in_len,
int *out_len)
{
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index db327845e46b..3468caab47a2 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -22,12 +22,8 @@
#include <linux/configfs.h>
#include <linux/export.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
@@ -56,8 +52,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
- struct iscsi_portal_group *tpg =
- (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
int ret;
if (!tpg) {
@@ -1225,7 +1220,7 @@ struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
- wwn, &tpg->tpg_se_tpg, (void *)tpg,
+ wwn, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return NULL;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae5..f1a02dad05a0 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index a19fa5eea88e..f63ea35bc4ae 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,8 +21,7 @@
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b7ffc3cd40cc..478451167b62 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e500..255c0d67e898 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -21,7 +21,7 @@
#include <linux/list.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 0b8404c30125..1af1f21af21f 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_datain_values.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93ce..373b0cc6abd8 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -23,7 +23,7 @@
#include <linux/crypto.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_tq.h"
@@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
- sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess_p = se_sess->fabric_sess_ptr;
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
@@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_unlock(&sess_p->conn_lock);
continue;
}
- if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
- (!strcmp((void *)sess_p->sess_ops->InitiatorName,
- (void *)initiatorname_param->value) &&
+ if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
+ (!strcmp(sess_p->sess_ops->InitiatorName,
+ initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
spin_unlock(&sess_p->conn_lock);
@@ -224,12 +224,12 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
sess->init_task_tag = pdu->itt;
- memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+ memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = pdu->cmdsn;
INIT_LIST_HEAD(&sess->sess_conn_list);
INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
@@ -437,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2(
atomic_read(&sess_p->session_logout) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
- if (!memcmp((const void *)sess_p->isid,
- (const void *)pdu->isid, 6) &&
+ if (!memcmp(sess_p->isid, pdu->isid, 6) &&
(sess_p->tsih == pdu->tsih)) {
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -651,7 +653,7 @@ static int iscsi_post_login_handler(
spin_lock_bh(&se_tpg->session_lock);
__transport_register_session(&sess->tpg->tpg_se_tpg,
- se_sess->se_node_acl, se_sess, (void *)sess);
+ se_sess->se_node_acl, se_sess, sess);
pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
@@ -808,7 +810,7 @@ int iscsi_target_setup_login_socket(
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
- memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+ memcpy(&np->np_sockaddr, sockaddr,
sizeof(struct __kernel_sockaddr_storage));
if (sockaddr->ss_family == AF_INET6)
@@ -818,6 +820,7 @@ int iscsi_target_setup_login_socket(
/*
* Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
*/
+ /* FIXME: Someone please explain why this is endian-safe */
opt = 1;
if (np->np_network_transport == ISCSI_TCP) {
ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@@ -829,6 +832,7 @@ int iscsi_target_setup_login_socket(
}
}
+ /* FIXME: Someone please explain why this is endian-safe */
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&opt, sizeof(opt));
if (ret < 0) {
@@ -1203,7 +1207,7 @@ out:
int iscsi_target_login_thread(void *arg)
{
- struct iscsi_np *np = (struct iscsi_np *)arg;
+ struct iscsi_np *np = arg;
int ret;
allow_signal(SIGINT);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9a..e89fa7457254 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -21,7 +21,7 @@
#include <linux/ctype.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower(
u32 iqn_size = strlen(param_buf), i;
for (i = 0; i < iqn_size; i++) {
- c = (char *)&param_buf[i];
+ c = &param_buf[i];
if (!isupper(*c))
continue;
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index aeafbe0cd7d1..b3c699c4fe8c 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -19,7 +19,6 @@
******************************************************************************/
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
@@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list,
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f1db83077e0a..421d6947dc64 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,7 +23,6 @@
#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
#include <target/configfs_macros.h>
#include "iscsi_target_core.h"
@@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->session_index);
@@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
}
@@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
}
@@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%llu\n",
(unsigned long long)sess->tx_data_octets);
@@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%llu\n",
(unsigned long long)sess->rx_data_octets);
@@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->conn_digest_errors);
@@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->conn_timeout_errors);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 490207eacde9..255ed35da815 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -21,7 +21,7 @@
#include <asm/unaligned.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index d4cf2cd25c44..879d8d0fa3fe 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -19,10 +19,8 @@
******************************************************************************/
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_tpg.h>
#include "iscsi_target_core.h"
#include "iscsi_target_erl0.h"
@@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void)
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
- NULL, &tpg->tpg_se_tpg, (void *)tpg,
+ NULL, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_DISCOVERY);
if (ret < 0) {
kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 02348f727bd4..a05ca1c4f01c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -22,9 +22,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "iscsi_target_core.h"
@@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
}
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
- (void *)cmd->tmr_req, tcm_function,
+ cmd->tmr_req, tcm_function,
GFP_KERNEL);
if (!se_cmd->se_tmr_req)
goto out;
@@ -1066,7 +1064,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
if (tiqn) {
spin_lock_bh(&tiqn->sess_err_stats.lock);
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- (void *)conn->sess->sess_ops->InitiatorName);
+ conn->sess->sess_ops->InitiatorName);
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6b..c47ff7f59e57 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -33,14 +33,9 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_tmr.h>
#include "tcm_loop.h"
@@ -113,11 +108,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@@ -148,27 +141,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@@ -194,12 +173,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
@@ -441,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = {
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
.eh_device_reset_handler = tcm_loop_device_reset,
- .can_queue = TL_SCSI_CAN_QUEUE,
+ .can_queue = 1024,
.this_id = -1,
- .sg_tablesize = TL_SCSI_SG_TABLESIZE,
- .cmd_per_lun = TL_SCSI_CMD_PER_LUN,
- .max_sectors = TL_SCSI_MAX_SECTORS,
+ .sg_tablesize = 256,
+ .cmd_per_lun = 1024,
+ .max_sectors = 0xFFFF,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc,
.slave_configure = tcm_loop_slave_configure,
@@ -584,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void)
static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
/*
* tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@@ -612,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Return the passed NAA identifier for the SAS Target Port
*/
@@ -622,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
@@ -643,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@@ -673,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
struct t10_pr_registration *pr_reg,
int *format_code)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@@ -707,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
u32 *out_tid_len,
char **port_nexus_ptr)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@@ -1360,17 +1329,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6b76c7a22bb0..15a036441471 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,16 +1,7 @@
#define TCM_LOOP_VERSION "v2.1-rc1"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
-/*
- * Defaults for struct scsi_host_template tcm_loop_driver_template
- *
- * We use large can_queue and cmd_per_lun here and let TCM enforce
- * the underlying se_device_t->queue_depth.
- */
-#define TL_SCSI_CAN_QUEUE 1024
-#define TL_SCSI_CMD_PER_LUN 1024
-#define TL_SCSI_MAX_SECTORS 1024
-#define TL_SCSI_SG_TABLESIZE 256
+
/*
* Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
*/
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8b..1b1edd14f4bf 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -32,13 +32,12 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_ua.h"
static int core_alua_check_transition(int state, int *primary);
@@ -191,9 +190,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
@@ -203,7 +203,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +212,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +222,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
@@ -245,7 +248,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
@@ -298,7 +302,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
@@ -335,7 +340,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
@@ -1184,7 +1190,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@@ -1438,7 +1443,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b8247..2f2235edefff 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -29,10 +29,11 @@
#include <scsi/scsi.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
#include "target_core_ua.h"
-#include "target_core_cdb.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -94,6 +95,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
buf[2] = dev->transport->get_device_rev(dev);
/*
+ * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+ *
+ * SPC4 says:
+ * A RESPONSE DATA FORMAT field set to 2h indicates that the
+ * standard INQUIRY data is in the format defined in this
+ * standard. Response data format values less than 2h are
+ * obsolete. Response data format values greater than 2h are
+ * reserved.
+ */
+ buf[3] = 2;
+
+ /*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
@@ -115,11 +128,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
goto out;
}
- snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
- snprintf((unsigned char *)&buf[16], 16, "%s",
- &dev->se_sub_dev->t10_wwn.model[0]);
- snprintf((unsigned char *)&buf[32], 4, "%s",
- &dev->se_sub_dev->t10_wwn.revision[0]);
+ snprintf(&buf[8], 8, "LIO-ORG");
+ snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
+ snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
buf[4] = 31; /* Set additional length to 31 */
out:
@@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
- unit_serial_len =
- strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
buf[3] = (len & 0xff);
return 0;
}
- len += sprintf((unsigned char *)&buf[4], "%s",
- &dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ len += sprintf(&buf[4], "%s",
+ dev->se_sub_dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -279,14 +289,13 @@ check_t10_vend_desc:
len += (prod_len + unit_serial_len);
goto check_port;
}
- id_len += sprintf((unsigned char *)&buf[off+12],
- "%s:%s", prod,
+ id_len += sprintf(&buf[off+12], "%s:%s", prod,
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
- memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+ memcpy(&buf[off+4], "LIO-ORG", 8);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
@@ -478,7 +487,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -703,6 +712,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@@ -719,6 +729,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@@ -969,7 +980,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
@@ -1027,7 +1039,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1113,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1171,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1208,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
deleted file mode 100644
index ad6b1e393001..000000000000
--- a/drivers/target/target_core_cdb.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef TARGET_CORE_CDB_H
-#define TARGET_CORE_CDB_H
-
-int target_emulate_inquiry(struct se_task *task);
-int target_emulate_readcapacity(struct se_task *task);
-int target_emulate_readcapacity_16(struct se_task *task);
-int target_emulate_modesense(struct se_task *task);
-int target_emulate_request_sense(struct se_task *task);
-int target_emulate_unmap(struct se_task *task);
-int target_emulate_write_same(struct se_task *task);
-int target_emulate_synchronize_cache(struct se_task *task);
-int target_emulate_noop(struct se_task *task);
-
-#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4e..0955bb8979fb 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -39,18 +39,16 @@
#include <linux/spinlock.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
-#include "target_core_stat.h"
extern struct t10_alua_lu_gp *default_lu_gp;
@@ -67,9 +65,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -1455,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
return -ENOMEM;
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -1634,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = {
static ssize_t target_core_show_dev_info(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
int bl = 0;
@@ -1662,7 +1657,7 @@ static ssize_t target_core_store_dev_control(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
@@ -1685,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
return 0;
@@ -1698,7 +1693,7 @@ static ssize_t target_core_store_dev_alias(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
@@ -1713,6 +1708,9 @@ static ssize_t target_core_store_dev_alias(
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
+ if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
+ se_dev->se_dev_alias[read_bytes - 1] = '\0';
+
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1731,7 +1729,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
return 0;
@@ -1744,7 +1742,7 @@ static ssize_t target_core_store_dev_udev_path(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
@@ -1759,6 +1757,9 @@ static ssize_t target_core_store_dev_udev_path(
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
+ if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
+ se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1780,7 +1781,7 @@ static ssize_t target_core_store_dev_enable(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_device *dev;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
@@ -1825,7 +1826,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
struct se_device *dev;
- struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *su_dev = p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1863,7 +1864,7 @@ static ssize_t target_core_store_alua_lu_gp(
size_t count)
{
struct se_device *dev;
- struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *su_dev = p;
struct se_hba *hba = su_dev->se_dev_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -2741,7 +2742,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2777,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2874,10 +2871,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f8..0c5992f0d946 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -42,13 +42,11 @@
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@@ -104,7 +102,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +134,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@@ -200,7 +196,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -708,7 +703,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
@@ -957,8 +952,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +967,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@@ -985,8 +984,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +998,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@@ -1056,7 +1059,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1077,7 +1080,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1129,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
- u32 orig_queue_depth = dev->queue_depth;
-
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
@@ -1164,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
- if (queue_depth > orig_queue_depth)
- atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
- else if (queue_depth < orig_queue_depth)
- atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
-
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
@@ -1587,7 +1583,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 09b6f8729f91..4f77cce22646 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -36,18 +36,14 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
-#include "target_core_stat.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index ec4249be617e..283a36e464e6 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,13 +34,10 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
#include "target_core_pr.h"
/*
@@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id(
add_len = ((buf[2] >> 8) & 0xff);
add_len |= (buf[3] & 0xff);
- tid_len = strlen((char *)&buf[4]);
+ tid_len = strlen(&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
tid_len += 1; /* Add one byte for NULL terminator */
padding = ((-tid_len) & 3);
@@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id(
* format.
*/
if (format_code == 0x40) {
- p = strstr((char *)&buf[4], ",i,0x");
+ p = strstr(&buf[4], ",i,0x");
if (!p) {
pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
- (char *)&buf[4]);
+ &buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bfa..7ed58e2df791 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,8 +37,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
#include "target_core_file.h"
@@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
- struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+ struct fd_host *fd_host = hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!fd_dev) {
@@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice(
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
- struct fd_dev *fd_dev = (struct fd_dev *) p;
- struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+ struct fd_dev *fd_dev = p;
+ struct fd_host *fd_host = hba->hba_ptr;
mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
@@ -240,7 +239,7 @@ fail:
*/
static void fd_free_device(void *p)
{
- struct fd_dev *fd_dev = (struct fd_dev *) p;
+ struct fd_dev *fd_dev = p;
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
@@ -289,9 +288,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -342,9 +341,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -438,7 +437,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -449,13 +448,15 @@ static int fd_do_task(struct se_task *task)
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
@@ -496,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params(
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -557,7 +558,7 @@ out:
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
{
- struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index c68019d6c406..3dd1bd4b6f71 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -37,11 +37,10 @@
#include <net/tcp.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
deleted file mode 100644
index bb0fea5f730c..000000000000
--- a/drivers/target/target_core_hba.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef TARGET_CORE_HBA_H
-#define TARGET_CORE_HBA_H
-
-extern struct se_hba *core_alloc_hba(const char *, u32, u32);
-extern int core_delete_hba(struct se_hba *);
-
-#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe29262..cc8e6b58ef20 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -42,8 +42,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
#include "target_core_iblock.h"
@@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params(
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+ "" : (bd->bd_holder == ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -531,7 +530,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -554,12 +553,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -588,12 +590,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644
index 000000000000..26f135e94f6e
--- /dev/null
+++ b/drivers/target/target_core_internal.h
@@ -0,0 +1,123 @@
+#ifndef TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_INTERNAL_H
+
+/* target_core_alua.c */
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+/* target_core_cdb.c */
+int target_emulate_inquiry(struct se_task *task);
+int target_emulate_readcapacity(struct se_task *task);
+int target_emulate_readcapacity_16(struct se_task *task);
+int target_emulate_modesense(struct se_task *task);
+int target_emulate_request_sense(struct se_task *task);
+int target_emulate_unmap(struct se_task *task);
+int target_emulate_write_same(struct se_task *task);
+int target_emulate_synchronize_cache(struct se_task *task);
+int target_emulate_noop(struct se_task *task);
+
+/* target_core_device.c */
+struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
+int core_free_device_list_for_node(struct se_node_acl *,
+ struct se_portal_group *);
+void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+void core_update_device_list_access(u32, u32, struct se_node_acl *);
+int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+int core_dev_export(struct se_device *, struct se_portal_group *,
+ struct se_lun *);
+void core_dev_unexport(struct se_device *, struct se_portal_group *,
+ struct se_lun *);
+int target_report_luns(struct se_task *);
+void se_release_device_for_hba(struct se_device *);
+void se_release_vpd_for_dev(struct se_device *);
+int se_free_virtual_device(struct se_device *, struct se_hba *);
+int se_dev_check_online(struct se_device *);
+int se_dev_check_shutdown(struct se_device *);
+void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+int se_dev_set_task_timeout(struct se_device *, u32);
+int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int se_dev_set_unmap_granularity(struct se_device *, u32);
+int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_emulate_dpo(struct se_device *, int);
+int se_dev_set_emulate_fua_write(struct se_device *, int);
+int se_dev_set_emulate_fua_read(struct se_device *, int);
+int se_dev_set_emulate_write_cache(struct se_device *, int);
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int se_dev_set_emulate_tas(struct se_device *, int);
+int se_dev_set_emulate_tpu(struct se_device *, int);
+int se_dev_set_emulate_tpws(struct se_device *, int);
+int se_dev_set_enforce_pr_isids(struct se_device *, int);
+int se_dev_set_is_nonrot(struct se_device *, int);
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int se_dev_set_queue_depth(struct se_device *, u32);
+int se_dev_set_max_sectors(struct se_device *, u32);
+int se_dev_set_optimal_sectors(struct se_device *, u32);
+int se_dev_set_block_size(struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+ struct se_device *, u32);
+int core_dev_del_lun(struct se_portal_group *, u32);
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+ u32, char *, int *);
+int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun_acl *, u32, u32);
+int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun *, struct se_lun_acl *);
+void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun_acl *lacl);
+int core_dev_setup_virtual_lun0(void);
+void core_dev_release_virtual_lun0(void);
+
+/* target_core_hba.c */
+struct se_hba *core_alloc_hba(const char *, u32, u32);
+int core_delete_hba(struct se_hba *);
+
+/* target_core_tmr.c */
+int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+ struct list_head *, struct se_cmd *);
+
+/* target_core_tpg.c */
+extern struct se_device *g_lun0_dev;
+
+struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+ const char *);
+struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+ unsigned char *);
+void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
+ u32, void *);
+struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
+int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+/* target_core_transport.c */
+extern struct kmem_cache *se_tmr_req_cache;
+
+int init_se_kmem_caches(void);
+void release_se_kmem_caches(void);
+u32 scsi_get_new_index(scsi_index_t);
+void transport_subsystem_check_init(void);
+void transport_cmd_finish_abort(struct se_cmd *, int);
+void __transport_remove_task_from_execute_queue(struct se_task *,
+ struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+void transport_dump_dev_state(struct se_device *, char *, int *);
+void transport_dump_dev_info(struct se_device *, struct se_lun *,
+ unsigned long long, char *, int *);
+void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
+int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
+int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
+int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
+bool target_stop_task(struct se_task *task, unsigned long *flags);
+int transport_clear_lun_from_sessions(struct se_lun *);
+void transport_send_task_abort(struct se_cmd *);
+
+/* target_core_stat.c */
+void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void target_stat_setup_port_default_groups(struct se_lun *);
+void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54f..429ad7291664 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -33,14 +33,11 @@
#include <asm/unaligned.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@@ -191,7 +188,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@@ -252,7 +249,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -277,7 +275,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
@@ -1510,7 +1509,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1522,7 +1522,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1548,7 +1549,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1598,7 +1600,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1628,7 +1632,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
@@ -1646,7 +1652,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1660,7 +1667,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1678,7 +1686,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -1690,7 +1699,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1727,7 +1738,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1772,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2112,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2132,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
@@ -2138,7 +2154,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
@@ -2197,14 +2214,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2234,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2247,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
@@ -2241,7 +2263,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2428,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2441,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2458,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2474,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2507,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2531,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2546,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2603,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -2630,7 +2660,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2670,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2693,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2707,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2729,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2752,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@@ -2802,7 +2836,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2856,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@@ -2945,21 +2981,6 @@ static void core_scsi3_release_preempt_and_abort(
}
}
-int core_scsi3_check_cdb_abort_and_preempt(
- struct list_head *preempt_and_abort_list,
- struct se_cmd *cmd)
-{
- struct t10_pr_registration *pr_reg, *pr_reg_tmp;
-
- list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
- pr_reg_abort_list) {
- if (pr_reg->pr_res_key == cmd->pr_res_key)
- return 0;
- }
-
- return 1;
-}
-
static int core_scsi3_pro_preempt(
struct se_cmd *cmd,
int type,
@@ -2979,8 +3000,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3012,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@@ -3012,7 +3038,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3133,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
@@ -3297,7 +3325,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -3331,7 +3360,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3379,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3360,7 +3391,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@@ -3369,7 +3401,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
@@ -3392,7 +3425,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3417,7 +3451,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3430,7 +3465,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3481,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3498,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3489,7 +3528,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3537,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
@@ -3517,7 +3558,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3569,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3543,7 +3586,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3553,7 +3597,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3572,7 +3617,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3585,7 +3631,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3603,7 +3650,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3688,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3820,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
@@ -3779,13 +3829,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3820,7 +3873,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3837,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3878,7 +3933,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
@@ -3906,7 +3962,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4022,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4105,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4167,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4315,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4335,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index b97f6940dd05..7a233feb7e99 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
- struct se_cmd *);
extern int target_scsi3_emulate_pr_in(struct se_task *task);
extern int target_scsi3_emulate_pr_out(struct se_task *task);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe429..d35467d42e12 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,8 +44,7 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
#include "target_core_pscsi.h"
@@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba)
static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
/*
* Release the struct Scsi_Host
@@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
* scsi_device_put() and the pdv->pdv_sd cleared.
*/
pdv->pdv_sd = sd;
-
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
@@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk(
__releases(sh->host_lock)
{
struct se_device *dev;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
u32 dev_flags = 0;
@@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom(
__releases(sh->host_lock)
{
struct se_device *dev;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
@@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other(
__releases(sh->host_lock)
{
struct se_device *dev;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
@@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice(
struct se_subsystem_dev *se_dev,
void *p)
{
- struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+ struct pscsi_dev_virt *pdv = p;
struct se_device *dev;
struct scsi_device *sd;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
@@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -963,6 +961,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +970,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@@ -1058,11 +1057,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@@ -1078,7 +1079,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@@ -1087,8 +1090,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@@ -1115,7 +1121,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
@@ -1124,7 +1130,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@@ -1135,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- return (unsigned char *)&pt->pscsi_sense[0];
+ return pt->pscsi_sense;
}
/* pscsi_get_device_rev():
@@ -1198,9 +1205,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f19..8b68f7b82631 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -37,9 +37,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
#include "target_core_rd.h"
@@ -343,235 +341,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
@@ -583,28 +420,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
@@ -642,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params(
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 874152aed94a..f8c2d2cc3431 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -43,12 +43,12 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
- tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
- (unsigned char *)&buf[0], 64);
+ tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
spin_unlock_irq(&nacl->nacl_sess_lock);
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
deleted file mode 100644
index 86c252f9ea47..000000000000
--- a/drivers/target/target_core_stat.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef TARGET_CORE_STAT_H
-#define TARGET_CORE_STAT_H
-
-extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
-extern void target_stat_setup_port_default_groups(struct se_lun *);
-extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
-
-#endif /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df6297..dcb0618c9388 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -32,12 +32,11 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
@@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort(
transport_cmd_finish_abort(cmd, 0);
}
+static int target_check_cdb_and_preempt(struct list_head *list,
+ struct se_cmd *cmd)
+{
+ struct t10_pr_registration *reg;
+
+ if (!list)
+ return 0;
+ list_for_each_entry(reg, list, pr_reg_abort_list) {
+ if (reg->pr_res_key == cmd->pr_res_key)
+ return 0;
+ }
+
+ return 1;
+}
+
static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
- if (preempt_and_abort_list &&
- (core_scsi3_check_cdb_abort_and_preempt(
- preempt_and_abort_list, cmd) != 0))
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
spin_lock(&cmd->t_state_lock);
@@ -211,9 +223,7 @@ static void core_tmr_drain_task_list(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if (preempt_and_abort_list &&
- (core_scsi3_check_cdb_abort_and_preempt(
- preempt_and_abort_list, cmd) != 0))
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -222,7 +232,7 @@ static void core_tmr_drain_task_list(
continue;
list_move_tail(&task->t_state_list, &drain_task_list);
- atomic_set(&task->task_state_active, 0);
+ task->t_state_active = false;
/*
* Remove from task execute list before processing drain_task_list
*/
@@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if (preempt_and_abort_list &&
- (core_scsi3_check_cdb_abort_and_preempt(
- preempt_and_abort_list, cmd) != 0))
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -345,10 +353,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8ddd133025b9..b7668029bb31 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -39,13 +39,10 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
-#include "target_core_hba.h"
-#include "target_core_stat.h"
+#include "target_core_internal.h"
extern struct se_device *g_lun0_dev;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f8..d3ddd1361949 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -45,23 +45,18 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_cdb.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@@ -73,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
static int transport_generic_write_pending(struct se_cmd *);
static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
@@ -82,24 +77,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +171,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@@ -191,7 +178,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -222,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index;
}
-void transport_init_queue_obj(struct se_queue_obj *qobj)
+static void transport_init_queue_obj(struct se_queue_obj *qobj)
{
atomic_set(&qobj->queue_cnt, 0);
INIT_LIST_HEAD(&qobj->qobj_list);
init_waitqueue_head(&qobj->thread_wq);
spin_lock_init(&qobj->cmd_queue_lock);
}
-EXPORT_SYMBOL(transport_init_queue_obj);
void transport_subsystem_check_init(void)
{
@@ -436,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
if (task->task_flags & TF_ACTIVE)
continue;
- if (!atomic_read(&task->task_state_active))
- continue;
-
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_del(&task->t_state_list);
- pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
- cmd->se_tfo->get_task_tag(cmd), dev, task);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ if (task->t_state_active) {
+ pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ cmd->se_tfo->get_task_tag(cmd), dev, task);
- atomic_set(&task->task_state_active, 0);
- atomic_dec(&cmd->t_task_cdbs_ex_left);
+ list_del(&task->t_state_list);
+ atomic_dec(&cmd->t_task_cdbs_ex_left);
+ task->t_state_active = false;
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+
}
/* transport_cmd_check_stop():
@@ -680,9 +665,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
@@ -693,7 +678,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@@ -706,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned long flags;
-#if 0
- pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
- cmd->t_task_cdb[0], dev);
-#endif
- if (dev)
- atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
@@ -724,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success)
if (dev && dev->transport->transport_complete) {
if (dev->transport->transport_complete(task) != 0) {
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
- task->task_sense = 1;
+ task->task_flags |= TF_HAS_SENSE;
success = 1;
}
}
@@ -753,12 +732,7 @@ void transport_complete_task(struct se_task *task, int success)
}
if (cmd->t_tasks_failed) {
- if (!task->task_error_status) {
- task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- }
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -833,7 +807,7 @@ static void __transport_add_task_to_execute_queue(
head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
atomic_inc(&dev->execute_tasks);
- if (atomic_read(&task->task_state_active))
+ if (task->t_state_active)
return;
/*
* Determine if this task needs to go to HEAD_OF_QUEUE for the
@@ -847,7 +821,7 @@ static void __transport_add_task_to_execute_queue(
else
list_add_tail(&task->t_state_list, &dev->state_task_list);
- atomic_set(&task->task_state_active, 1);
+ task->t_state_active = true;
pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@@ -862,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_state_active))
- continue;
-
spin_lock(&dev->execute_task_lock);
- list_add_tail(&task->t_state_list, &dev->state_task_list);
- atomic_set(&task->task_state_active, 1);
-
- pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
- task->task_se_cmd->se_tfo->get_task_tag(
- task->task_se_cmd), task, dev);
-
+ if (!task->t_state_active) {
+ list_add_tail(&task->t_state_list,
+ &dev->state_task_list);
+ task->t_state_active = true;
+
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(
+ task->task_se_cmd), task, dev);
+ }
spin_unlock(&dev->execute_task_lock);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_task *task, *task_prev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (!list_empty(&task->t_execute_list))
continue;
@@ -895,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
__transport_add_task_to_execute_queue(task, task_prev, dev);
task_prev = task;
}
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+ unsigned long flags;
+ struct se_device *dev = cmd->se_dev;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ __transport_add_tasks_from_cmd(cmd);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -905,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task,
atomic_dec(&dev->execute_tasks);
}
-void transport_remove_task_from_execute_queue(
+static void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
@@ -992,9 +972,8 @@ void transport_dump_dev_state(
break;
}
- *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
- atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
- dev->queue_depth);
+ *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
+ atomic_read(&dev->execute_tasks), dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
@@ -1335,29 +1314,20 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
-
- dev->queue_depth = dev_limits->queue_depth;
- atomic_set(&dev->depth_left, dev->queue_depth);
atomic_set(&dev->dev_ordered_id, 0);
se_dev_set_default_attribs(dev, dev_limits);
@@ -1507,7 +1477,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1542,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@@ -1588,6 +1559,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@@ -1658,15 +1632,87 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
+/**
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ **/
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+ u32 data_length, int task_attr, int data_dir, int flags)
+{
+ struct se_portal_group *se_tpg;
+ int rc;
+
+ se_tpg = se_sess->se_tpg;
+ BUG_ON(!se_tpg);
+ BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+ BUG_ON(in_interrupt());
+ /*
+ * Initialize se_cmd for target operation. From this point
+ * exceptions are handled by sending exception status via
+ * target_core_fabric_ops->queue_status() callback
+ */
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ data_length, data_dir, task_attr, sense);
+ /*
+ * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+ * se_sess->sess_cmd_list. A second kref_get here is necessary
+ * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ * kref_put() to happen during fabric packet acknowledgement.
+ */
+ target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ /*
+ * Signal bidirectional data payloads to target-core
+ */
+ if (flags & TARGET_SCF_BIDI_OP)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+ /*
+ * Locate se_lun pointer and attach it to struct se_cmd
+ */
+ if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
+ goto out_check_cond;
+ /*
+ * Sanitize CDBs via transport_generic_cmd_sequencer() and
+ * allocate the necessary tasks to complete the received CDB+data
+ */
+ rc = transport_generic_allocate_tasks(se_cmd, cdb);
+ if (rc != 0)
+ goto out_check_cond;
+ /*
+ * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
+ * for immediate execution of READs, otherwise wait for
+ * transport_generic_handle_data() to be called for WRITEs
+ * when fabric has filled the incoming buffer.
+ */
+ transport_handle_cdb_direct(se_cmd);
+ return 0;
+
+out_check_cond:
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ return 0;
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1798,20 +1844,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1871,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@@ -1893,15 +1908,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@@ -1912,14 +1921,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@@ -1974,18 +1979,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-static inline int transport_tcq_window_closed(struct se_device *dev)
-{
- if (dev->dev_tcq_window_closed++ <
- PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
- msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
- } else
- msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
-
- wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
- return 0;
-}
-
/*
* Called from Fabric Module context from transport_execute_tasks()
*
@@ -2002,19 +1995,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -2075,13 +2061,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
-
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
- return 0;
- }
-
+ struct se_device *se_dev = cmd->se_dev;
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
@@ -2095,19 +2075,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
if (!add_tasks)
goto execute_tasks;
/*
- * This calls transport_add_tasks_from_cmd() to handle
- * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
- * (if enabled) in __transport_add_task_to_execute_queue() and
- * transport_add_task_check_sam_attr().
+ * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+ * adds associated se_tasks while holding dev->execute_task_lock
+ * before I/O dispath to avoid a double spinlock access.
*/
- transport_add_tasks_from_cmd(cmd);
+ __transport_execute_tasks(se_dev, cmd);
+ return 0;
}
- /*
- * Kick the execution queue for the cmd associated struct se_device
- * storage object.
- */
+
execute_tasks:
- __transport_execute_tasks(cmd->se_dev);
+ __transport_execute_tasks(se_dev, NULL);
return 0;
}
@@ -2117,24 +2094,18 @@ execute_tasks:
*
* Called from transport_processing_thread()
*/
-static int __transport_execute_tasks(struct se_device *dev)
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
{
int error;
struct se_cmd *cmd = NULL;
struct se_task *task = NULL;
unsigned long flags;
- /*
- * Check if there is enough room in the device and HBA queue to send
- * struct se_tasks to the selected transport.
- */
check_depth:
- if (!atomic_read(&dev->depth_left))
- return transport_tcq_window_closed(dev);
-
- dev->dev_tcq_window_closed = 0;
-
spin_lock_irq(&dev->execute_task_lock);
+ if (new_cmd != NULL)
+ __transport_add_tasks_from_cmd(new_cmd);
+
if (list_empty(&dev->execute_task_list)) {
spin_unlock_irq(&dev->execute_task_lock);
return 0;
@@ -2144,10 +2115,7 @@ check_depth:
__transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irq(&dev->execute_task_lock);
- atomic_dec(&dev->depth_left);
-
cmd = task->task_se_cmd;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2163,34 +2131,20 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
+ new_cmd = NULL;
goto check_depth;
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2213,10 +2167,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@@ -2421,7 +2380,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
- if (!task->task_sense)
+ if (!(task->task_flags & TF_HAS_SENSE))
continue;
if (!dev->transport->get_sense_buffer) {
@@ -2460,27 +2419,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2533,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2600,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -2667,7 +2610,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -2676,12 +2620,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@@ -2700,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2674,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3118,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3433,6 +3375,32 @@ static inline void transport_free_pages(struct se_cmd *cmd)
}
/**
+ * transport_release_cmd - free a command
+ * @cmd: command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
+static void transport_release_cmd(struct se_cmd *cmd)
+{
+ BUG_ON(!cmd->se_tfo);
+
+ if (cmd->se_tmr_req)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
+ /*
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+ */
+ if (cmd->check_release != 0) {
+ target_put_sess_cmd(cmd->se_sess, cmd);
+ return;
+ }
+ cmd->se_tfo->release_cmd(cmd);
+}
+
+/**
* transport_put_cmd - release a reference to a command
* @cmd: command to release
*
@@ -3495,6 +3463,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@@ -3813,7 +3793,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
@@ -3842,8 +3822,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3916,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -3938,33 +3925,6 @@ queue_full:
return 0;
}
-/**
- * transport_release_cmd - free a command
- * @cmd: command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-void transport_release_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
-
- if (cmd->se_tmr_req)
- core_tmr_release_req(cmd->se_tmr_req);
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
- kfree(cmd->t_task_cdb);
- /*
- * Check if target_wait_for_sess_cmds() is expecting to
- * release se_cmd directly here..
- */
- if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
- if (cmd->se_tfo->check_release_cmd(cmd) != 0)
- return;
-
- cmd->se_tfo->release_cmd(cmd);
-}
-EXPORT_SYMBOL(transport_release_cmd);
-
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3991,11 +3951,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
* @se_sess: session to reference
* @se_cmd: command descriptor to add
+ * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ bool ack_kref)
{
unsigned long flags;
+ kref_init(&se_cmd->cmd_kref);
+ /*
+ * Add a second kref if the fabric caller is expecting to handle
+ * fabric acknowledgement that requires two target_put_sess_cmd()
+ * invocations before se_cmd descriptor release.
+ */
+ if (ack_kref == true)
+ kref_get(&se_cmd->cmd_kref);
+
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1;
@@ -4003,30 +3974,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_get_sess_cmd);
-/* target_put_sess_cmd - Check for active I/O shutdown or list delete
- * @se_sess: session to reference
- * @se_cmd: command descriptor to drop
- */
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+static void target_release_cmd_kref(struct kref *kref)
{
+ struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+ struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
WARN_ON(1);
- return 0;
+ return;
}
-
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
complete(&se_cmd->cmd_wait_comp);
- return 1;
+ return;
}
list_del(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return 0;
+ se_cmd->se_tfo->release_cmd(se_cmd);
+}
+
+/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+ * @se_sess: session to reference
+ * @se_cmd: command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+ return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);
@@ -4242,7 +4219,7 @@ check_cond:
static int transport_clear_lun_thread(void *p)
{
- struct se_lun *lun = (struct se_lun *)p;
+ struct se_lun *lun = p;
__transport_clear_lun_from_sessions(lun);
complete(&lun->lun_shutdown_comp);
@@ -4421,6 +4398,7 @@ int transport_send_check_condition_and_sense(
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT NOT SUPPORTED */
@@ -4430,6 +4408,7 @@ int transport_send_check_condition_and_sense(
case TCM_SECTOR_COUNT_TOO_MANY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID COMMAND OPERATION CODE */
@@ -4438,6 +4417,7 @@ int transport_send_check_condition_and_sense(
case TCM_UNKNOWN_MODE_PAGE:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
@@ -4446,6 +4426,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_ABORT_CMD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4455,6 +4436,7 @@ int transport_send_check_condition_and_sense(
case TCM_INCORRECT_AMOUNT_OF_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
@@ -4465,6 +4447,7 @@ int transport_send_check_condition_and_sense(
case TCM_INVALID_CDB_FIELD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* INVALID FIELD IN CDB */
@@ -4473,6 +4456,7 @@ int transport_send_check_condition_and_sense(
case TCM_INVALID_PARAMETER_LIST:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* INVALID FIELD IN PARAMETER LIST */
@@ -4481,6 +4465,7 @@ int transport_send_check_condition_and_sense(
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
@@ -4491,6 +4476,7 @@ int transport_send_check_condition_and_sense(
case TCM_SERVICE_CRC_ERROR:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* PROTOCOL SERVICE CRC ERROR */
@@ -4501,6 +4487,7 @@ int transport_send_check_condition_and_sense(
case TCM_SNACK_REJECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* READ ERROR */
@@ -4511,6 +4498,7 @@ int transport_send_check_condition_and_sense(
case TCM_WRITE_PROTECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* DATA PROTECT */
buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
/* WRITE PROTECTED */
@@ -4519,6 +4507,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* UNIT ATTENTION */
buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4528,6 +4517,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_NOT_READY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* Not Ready */
buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4538,6 +4528,7 @@ int transport_send_check_condition_and_sense(
default:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4602,9 +4593,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4616,11 +4604,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->se_tfo->queue_status(cmd);
}
-/* transport_generic_do_tmr():
- *
- *
- */
-int transport_generic_do_tmr(struct se_cmd *cmd)
+static int transport_generic_do_tmr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
@@ -4668,9 +4652,7 @@ static int transport_processing_thread(void *param)
{
int ret;
struct se_cmd *cmd;
- struct se_device *dev = (struct se_device *) param;
-
- set_user_nice(current, -20);
+ struct se_device *dev = param;
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@@ -4680,8 +4662,6 @@ static int transport_processing_thread(void *param)
goto out;
get_cmd:
- __transport_execute_tasks(dev);
-
cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
if (!cmd)
continue;
@@ -4698,18 +4678,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 50a480db7a66..3e12f6bcfa10 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -30,13 +30,11 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c615..addc18f727ea 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -39,12 +39,8 @@
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_tmr.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -200,7 +196,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
@@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd)
struct ft_sess *sess;
u8 tm_func;
+ transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
+ cmd->sess->se_sess, 0, DMA_NONE, 0,
+ &cmd->ft_sense_buffer[0]);
+ target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
+
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
@@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
sess = cmd->sess;
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
- transport_generic_free_cmd(&cmd->se_cmd, 0);
ft_sess_put(sess);
return;
}
@@ -536,7 +536,6 @@ static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
- struct se_cmd *se_cmd;
struct fcp_cmnd *fcp;
int data_dir = 0;
u32 data_len;
@@ -591,15 +590,6 @@ static void ft_send_work(struct work_struct *work)
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
}
-
- se_cmd = &cmd->se_cmd;
- /*
- * Initialize struct se_cmd descriptor from target_core_mod
- * infrastructure
- */
- transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
- data_len, data_dir, task_attr,
- &cmd->ft_sense_buffer[0]);
/*
* Check for FCP task management flags
*/
@@ -607,39 +597,20 @@ static void ft_send_work(struct work_struct *work)
ft_send_tm(cmd);
return;
}
-
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
-
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
- ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
+ /*
+ * Use a single se_cmd->cmd_kref as we expect to release se_cmd
+ * directly from ft_check_stop_free callback in response path.
+ */
+ ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
+ &cmd->ft_sense_buffer[0], cmd->lun, data_len,
+ task_attr, data_dir, 0);
+ pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- return;
- }
-
- ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
-
- pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
- ft_dump_cmd(cmd, __func__);
-
- if (ret == -ENOMEM) {
- transport_send_check_condition_and_sense(se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0);
- return;
- }
- if (ret == -EINVAL) {
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- ft_queue_status(se_cmd);
- else
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0);
return;
}
- transport_handle_cdb_direct(se_cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca40..73852fbc857b 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -41,12 +41,8 @@
#include <scsi/libfc.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
@@ -436,8 +432,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1369b1cb103d..d8cabc21036d 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -48,10 +48,7 @@
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 326921385aff..4c0507cf808c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -40,10 +40,7 @@
#include <scsi/libfc.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cea56033b34c..a09ce3ef5d74 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -417,7 +417,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
__FILE__,__LINE__,tbuf,tbuf->count);
/* Send the next block of data to device */
- tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
/* rollback was possible and has been done */
@@ -459,7 +459,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
}
if (!tbuf)
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
/* Clear the re-entry flag */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
@@ -491,7 +491,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
return;
if (tty != n_hdlc->tty) {
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
return;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 39d6ab6551e0..d2256d08ee7e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -61,7 +61,7 @@
* controlling the space in the read buffer.
*/
#define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */
-#define TTY_THRESHOLD_UNTHROTTLE 128
+#define TTY_THRESHOLD_UNTHROTTLE 128
/*
* Special byte codes used in the echo buffer to represent operations
@@ -405,7 +405,7 @@ static ssize_t process_output_block(struct tty_struct *tty,
const unsigned char *buf, unsigned int nr)
{
int space;
- int i;
+ int i;
const unsigned char *cp;
mutex_lock(&tty->output_lock);
@@ -1607,7 +1607,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
}
/**
- * copy_from_read_buf - copy read data directly
+ * copy_from_read_buf - copy read data directly
* @tty: terminal device
* @b: user data
* @nr: size of data
@@ -1909,7 +1909,7 @@ do_it_again:
if (nr)
clear_bit(TTY_PUSH, &tty->flags);
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
- goto do_it_again;
+ goto do_it_again;
n_tty_set_room(tty);
return retval;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e18604b3fc7d..d8653ab6f498 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,19 +446,8 @@ static inline void legacy_pty_init(void) { }
int pty_limit = NR_UNIX98_PTY_DEFAULT;
static int pty_limit_min;
static int pty_limit_max = NR_UNIX98_PTY_MAX;
-static int tty_count;
static int pty_count;
-static inline void pty_inc_count(void)
-{
- pty_count = (++tty_count) / 2;
-}
-
-static inline void pty_dec_count(void)
-{
- pty_count = (--tty_count) / 2;
-}
-
static struct cdev ptmx_cdev;
static struct ctl_table pty_table[] = {
@@ -600,8 +589,7 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
*/
tty_driver_kref_get(driver);
tty->count++;
- pty_inc_count(); /* tty */
- pty_inc_count(); /* tty->link */
+ pty_count++;
return 0;
err_free_mem:
deinitialize_tty_struct(o_tty);
@@ -613,15 +601,19 @@ err_free_tty:
return -ENOMEM;
}
-static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+static void ptm_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+ pty_count--;
+}
+
+static void pts_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- pty_dec_count();
}
static const struct tty_operations ptm_unix98_ops = {
.lookup = ptm_unix98_lookup,
.install = pty_unix98_install,
- .remove = pty_unix98_remove,
+ .remove = ptm_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
@@ -638,7 +630,7 @@ static const struct tty_operations ptm_unix98_ops = {
static const struct tty_operations pty_unix98_ops = {
.lookup = pts_unix98_lookup,
.install = pty_unix98_install,
- .remove = pty_unix98_remove,
+ .remove = pts_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 6a1241c7f841..de88aa5566e5 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -118,7 +118,7 @@ static unsigned long board2;
static unsigned long board3;
static unsigned long board4;
static unsigned long controller;
-static int support_low_speed;
+static bool support_low_speed;
static unsigned long modem1;
static unsigned long modem2;
static unsigned long modem3;
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index eeadf1b8e093..9f50c4e3c2be 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -129,32 +129,6 @@ static unsigned long probe_rsa[PORT_RSA_MAX];
static unsigned int probe_rsa_count;
#endif /* CONFIG_SERIAL_8250_RSA */
-struct uart_8250_port {
- struct uart_port port;
- struct timer_list timer; /* "no irq" timer */
- struct list_head list; /* ports on this IRQ */
- unsigned short capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- unsigned int tx_loadsz; /* transmit fifo load size */
- unsigned char acr;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
- unsigned char cur_iotype; /* Running I/O type */
-
- /*
- * Some bits in registers are cleared on a read, so they must
- * be saved whenever the register is read but the bits will not
- * be immediately processed.
- */
-#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
-#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
- unsigned char msr_saved_flags;
-};
-
struct irq_info {
struct hlist_node node;
int irq;
@@ -1326,8 +1300,6 @@ static void serial8250_stop_tx(struct uart_port *port)
}
}
-static void transmit_chars(struct uart_8250_port *up);
-
static void serial8250_start_tx(struct uart_port *port)
{
struct uart_8250_port *up =
@@ -1344,7 +1316,7 @@ static void serial8250_start_tx(struct uart_port *port)
if ((up->port.type == PORT_RM9000) ?
(lsr & UART_LSR_THRE) :
(lsr & UART_LSR_TEMT))
- transmit_chars(up);
+ serial8250_tx_chars(up);
}
}
@@ -1401,11 +1373,16 @@ static void clear_rx_fifo(struct uart_8250_port *up)
} while (1);
}
-static void
-receive_chars(struct uart_8250_port *up, unsigned int *status)
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
{
struct tty_struct *tty = up->port.state->port.tty;
- unsigned char ch, lsr = *status;
+ unsigned char ch;
int max_count = 256;
char flag;
@@ -1481,10 +1458,11 @@ ignore_char:
spin_unlock(&up->port.lock);
tty_flip_buffer_push(tty);
spin_lock(&up->port.lock);
- *status = lsr;
+ return lsr;
}
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
-static void transmit_chars(struct uart_8250_port *up)
+void serial8250_tx_chars(struct uart_8250_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
@@ -1521,8 +1499,9 @@ static void transmit_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
__stop_tx(up);
}
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
-static unsigned int check_modem_status(struct uart_8250_port *up)
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
{
unsigned int status = serial_in(up, UART_MSR);
@@ -1544,14 +1523,20 @@ static unsigned int check_modem_status(struct uart_8250_port *up)
return status;
}
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
/*
* This handles the interrupt from one port.
*/
-static void serial8250_handle_port(struct uart_8250_port *up)
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned int status;
+ unsigned char status;
unsigned long flags;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ if (iir & UART_IIR_NO_INT)
+ return 0;
spin_lock_irqsave(&up->port.lock, flags);
@@ -1560,25 +1545,13 @@ static void serial8250_handle_port(struct uart_8250_port *up)
DEBUG_INTR("status = %x...", status);
if (status & (UART_LSR_DR | UART_LSR_BI))
- receive_chars(up, &status);
- check_modem_status(up);
+ status = serial8250_rx_chars(up, status);
+ serial8250_modem_status(up);
if (status & UART_LSR_THRE)
- transmit_chars(up);
+ serial8250_tx_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
-
- if (!(iir & UART_IIR_NO_INT)) {
- serial8250_handle_port(up);
- return 1;
- }
-
- return 0;
+ return 1;
}
EXPORT_SYMBOL_GPL(serial8250_handle_irq);
@@ -1619,11 +1592,13 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
do {
struct uart_8250_port *up;
struct uart_port *port;
+ bool skip;
up = list_entry(l, struct uart_8250_port, list);
port = &up->port;
+ skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
- if (port->handle_irq(port)) {
+ if (!skip && port->handle_irq(port)) {
handled = 1;
end = NULL;
} else if (end == NULL)
@@ -1758,11 +1733,8 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
static void serial8250_timeout(unsigned long data)
{
struct uart_8250_port *up = (struct uart_8250_port *)data;
- unsigned int iir;
- iir = serial_in(up, UART_IIR);
- if (!(iir & UART_IIR_NO_INT))
- serial8250_handle_port(up);
+ up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
}
@@ -1801,7 +1773,7 @@ static void serial8250_backup_timeout(unsigned long data)
}
if (!(iir & UART_IIR_NO_INT))
- transmit_chars(up);
+ serial8250_tx_chars(up);
if (is_real_interrupt(up->port.irq))
serial_out(up, UART_IER, ier);
@@ -1835,7 +1807,7 @@ static unsigned int serial8250_get_mctrl(struct uart_port *port)
unsigned int status;
unsigned int ret;
- status = check_modem_status(up);
+ status = serial8250_modem_status(up);
ret = 0;
if (status & UART_MSR_DCD)
@@ -2000,7 +1972,7 @@ static int serial8250_startup(struct uart_port *port)
serial_outp(up, UART_IER, 0);
serial_outp(up, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_outp(up, UART_EFR, UART_EFR_ECB);
serial_outp(up, UART_LCR, 0);
}
@@ -2848,7 +2820,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
local_irq_save(flags);
if (up->port.sysrq) {
- /* serial8250_handle_port() already took the lock */
+ /* serial8250_handle_irq() already took the lock */
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
@@ -2882,7 +2854,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
- check_modem_status(up);
+ serial8250_modem_status(up);
if (locked)
spin_unlock(&up->port.lock);
diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
index 6edf4a6a22d4..ae027be57e25 100644
--- a/drivers/tty/serial/8250.h
+++ b/drivers/tty/serial/8250.h
@@ -13,6 +13,32 @@
#include <linux/serial_8250.h>
+struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+ struct list_head list; /* ports on this IRQ */
+ unsigned short capabilities; /* port capabilities */
+ unsigned short bugs; /* port bugs */
+ unsigned int tx_loadsz; /* transmit fifo load size */
+ unsigned char acr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char mcr_mask; /* mask of user bits */
+ unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
+
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
+ unsigned char lsr_saved_flags;
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+};
+
struct old_serial_port {
unsigned int uart;
unsigned int baud_base;
diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250_dw.c
index bf1fba640c2d..f574eef3075f 100644
--- a/drivers/tty/serial/8250_dw.c
+++ b/drivers/tty/serial/8250_dw.c
@@ -177,17 +177,7 @@ static struct platform_driver dw8250_platform_driver = {
.remove = __devexit_p(dw8250_remove),
};
-static int __init dw8250_init(void)
-{
- return platform_driver_register(&dw8250_platform_driver);
-}
-module_init(dw8250_init);
-
-static void __exit dw8250_exit(void)
-{
- platform_driver_unregister(&dw8250_platform_driver);
-}
-module_exit(dw8250_exit);
+module_platform_driver(dw8250_platform_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_fsl.c b/drivers/tty/serial/8250_fsl.c
new file mode 100644
index 000000000000..f4d3c47b88e8
--- /dev/null
+++ b/drivers/tty/serial/8250_fsl.c
@@ -0,0 +1,63 @@
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+
+#include "8250.h"
+
+/*
+ * Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This isn't a full driver; it just provides an alternate IRQ
+ * handler to deal with an errata. Everything else is just
+ * using the bog standard 8250 support.
+ *
+ * We follow code flow of serial8250_default_handle_irq() but add
+ * a check for a break and insert a dummy read on the Rx for the
+ * immediately following IRQ event.
+ *
+ * We re-use the already existing "bug handling" lsr_saved_flags
+ * field to carry the "what we just did" information from the one
+ * IRQ event to the next one.
+ */
+
+int fsl8250_handle_irq(struct uart_port *port)
+{
+ unsigned char lsr, orig_lsr;
+ unsigned long flags;
+ unsigned int iir;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ iir = port->serial_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 0;
+ }
+
+ /* This is the WAR; if last event was BRK, then read and return */
+ if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ up->lsr_saved_flags &= ~UART_LSR_BI;
+ port->serial_in(port, UART_RX);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 1;
+ }
+
+ lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
+
+ if (lsr & (UART_LSR_DR | UART_LSR_BI))
+ lsr = serial8250_rx_chars(up, lsr);
+
+ serial8250_modem_status(up);
+
+ if (lsr & UART_LSR_THRE)
+ serial8250_tx_chars(up);
+
+ up->lsr_saved_flags = orig_lsr;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 1;
+}
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 825937a5f210..da2b0b0a183f 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1092,6 +1092,14 @@ static int skip_tx_en_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int kt_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ port->flags |= UPF_IIR_ONCE;
+ return skip_tx_en_setup(priv, board, port, idx);
+}
+
static int pci_eg20t_init(struct pci_dev *dev)
{
#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
@@ -1110,7 +1118,18 @@ pci_xr17c154_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
-/* This should be in linux/pci_ids.h */
+static int try_enable_msi(struct pci_dev *dev)
+{
+ /* use msi if available, but fallback to legacy otherwise */
+ pci_enable_msi(dev);
+ return 0;
+}
+
+static void disable_msi(struct pci_dev *dev)
+{
+ pci_disable_msi(dev);
+}
+
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
@@ -1133,9 +1152,14 @@ pci_xr17c154_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_400V3 0xA310
+#define PCI_DEVICE_ID_TITAN_410V3 0xA312
+#define PCI_DEVICE_ID_TITAN_800V3 0xA314
+#define PCI_DEVICE_ID_TITAN_800V3B 0xA315
#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -1220,6 +1244,15 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = ce4100_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = try_enable_msi,
+ .setup = kt_serial_setup,
+ .exit = disable_msi,
+ },
/*
* ITE
*/
@@ -3414,6 +3447,18 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 925a1e547a83..aca2386c5ef1 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -97,6 +97,11 @@ config SERIAL_8250_PNP
This builds standard PNP serial support. You may be able to
disable this feature if you only need legacy serial support.
+config SERIAL_8250_FSL
+ bool
+ depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
+ default PPC
+
config SERIAL_8250_HP300
tristate
depends on SERIAL_8250 && HP300
@@ -457,7 +462,7 @@ config SERIAL_SAMSUNG
config SERIAL_SAMSUNG_UARTS_4
bool
depends on ARM && PLAT_SAMSUNG
- default y if CPU_S3C2443
+ default y if !(CPU_S3C2410 || SERIAL_S3C2412 || CPU_S3C2440 || CPU_S3C2442)
help
Internal node for the common case of 4 Samsung compatible UARTs
@@ -465,7 +470,7 @@ config SERIAL_SAMSUNG_UARTS
int
depends on ARM && PLAT_SAMSUNG
default 6 if ARCH_S5P6450
- default 4 if SERIAL_SAMSUNG_UARTS_4
+ default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416
default 3
help
Select the number of available UART ports for the Samsung S3C
@@ -495,46 +500,27 @@ config SERIAL_SAMSUNG_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
-config SERIAL_S3C2410
- tristate "Samsung S3C2410 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C2410
- default y if CPU_S3C2410
- help
- Serial port support for the Samsung S3C2410 SoC
-
-config SERIAL_S3C2412
- tristate "Samsung S3C2412/S3C2413 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C2412
- default y if CPU_S3C2412
- help
- Serial port support for the Samsung S3C2412 and S3C2413 SoC
-
-config SERIAL_S3C2440
- tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
- default y if CPU_S3C2440
- default y if CPU_S3C2442
- select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
- help
- Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
-
-config SERIAL_S3C6400
- tristate "Samsung S3C6400/S3C6410/S5P6440/S5P6450/S5PC100 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5P6450 || CPU_S5PC100)
- select SERIAL_SAMSUNG_UARTS_4
- default y
- help
- Serial port support for the Samsung S3C6400, S3C6410, S5P6440, S5P6450
- and S5PC100 SoCs
-
-config SERIAL_S5PV210
- tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
- default y
- help
- Serial port support for Samsung's S5P Family of SoC's
-
+config SERIAL_SIRFSOC
+ tristate "SiRF SoC Platform Serial port support"
+ depends on ARM && ARCH_PRIMA2
+ select SERIAL_CORE
+ help
+ Support for the on-chip UART on the CSR SiRFprimaII series,
+ providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured).
+
+config SERIAL_SIRFSOC_CONSOLE
+ bool "Support for console on SiRF SoC serial port"
+ depends on SERIAL_SIRFSOC=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
config SERIAL_MAX3100
tristate "MAX3100 support"
@@ -808,7 +794,7 @@ config BFIN_UART3_CTSRTS
config SERIAL_IMX
bool "IMX serial port support"
- depends on ARM && (ARCH_IMX || ARCH_MXC)
+ depends on ARCH_MXC
select SERIAL_CORE
select RATIONAL
help
@@ -1324,7 +1310,7 @@ config SERIAL_OF_PLATFORM
config SERIAL_OMAP
tristate "OMAP serial port support"
- depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on ARCH_OMAP2PLUS
select SERIAL_CORE
help
If you have a machine based on an Texas Instruments OMAP CPU you
@@ -1575,6 +1561,15 @@ config SERIAL_PCH_UART
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+config SERIAL_PCH_UART_CONSOLE
+ bool "Support for console on Intel EG20T PCH UART/OKI SEMICONDUCTOR ML7213 IOH"
+ depends on SERIAL_PCH_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use the PCH UART as the system console
+ (the system console is the device which receives all kernel messages and
+ warnings and which allows logins in single user mode).
+
config SERIAL_MSM_SMD
bool "Enable tty device interface for some SMD ports"
default n
@@ -1610,4 +1605,27 @@ config SERIAL_XILINX_PS_UART_CONSOLE
help
Enable a Xilinx PS UART port to be the system console.
+config SERIAL_AR933X
+ bool "AR933X serial port support"
+ depends on SOC_AR933X
+ select SERIAL_CORE
+ help
+ If you have an Atheros AR933X SOC based board and want to use the
+ built-in UART of the SoC, say Y to this option.
+
+config SERIAL_AR933X_CONSOLE
+ bool "Console on AR933X serial port"
+ depends on SERIAL_AR933X=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a built-in UART port of the AR933X to be the system console.
+
+config SERIAL_AR933X_NR_UARTS
+ int "Maximum number of AR933X serial ports"
+ depends on SERIAL_AR933X
+ default "2"
+ help
+ Set this to the number of serial ports you want the driver
+ to support.
+
endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index e10cf5b54b6d..f5b01f2ce525 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
+obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
@@ -39,11 +40,6 @@ obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
obj-$(CONFIG_SERIAL_BFIN) += bfin_uart.o
obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
-obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
-obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
-obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
-obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
-obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
@@ -94,3 +90,5 @@ obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
+obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index efdf92c3a352..0d91a540bf11 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -795,6 +795,8 @@ static struct amba_id pl010_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl010_ids);
+
static struct amba_driver pl010_driver = {
.drv = {
.name = "uart-pl010",
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 00233af1acc4..9ae024025ff3 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -268,7 +268,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_slave_config tx_conf = {
.dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_TO_DEVICE,
+ .direction = DMA_MEM_TO_DEV,
.dst_maxburst = uap->fifosize >> 1,
};
struct dma_chan *chan;
@@ -301,7 +301,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase + UART01x_DR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_FROM_DEVICE,
+ .direction = DMA_DEV_TO_MEM,
.src_maxburst = uap->fifosize >> 1,
};
@@ -480,7 +480,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
return -EBUSY;
}
- desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
+ desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -676,7 +676,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
dma_dev = rxchan->device;
desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
* If the DMA engine is busy and cannot prepare a
@@ -1994,6 +1994,8 @@ static struct amba_id pl011_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl011_ids);
+
static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 77554fd68d1f..7162f70d9260 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -577,7 +577,7 @@ static int __devinit apbuart_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id __initdata apbuart_match[] = {
+static struct of_device_id apbuart_match[] = {
{
.name = "GAISLER_APBUART",
},
@@ -597,7 +597,7 @@ static struct platform_driver grlib_apbuart_of_driver = {
};
-static int grlib_apbuart_configure(void)
+static int __init grlib_apbuart_configure(void)
{
struct device_node *np;
int line = 0;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
new file mode 100644
index 000000000000..e4f60e2b87f3
--- /dev/null
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -0,0 +1,688 @@
+/*
+ * Atheros AR933X SoC built-in UART driver
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <asm/mach-ath79/ar933x_uart.h>
+#include <asm/mach-ath79/ar933x_uart_platform.h>
+
+#define DRIVER_NAME "ar933x-uart"
+
+#define AR933X_DUMMY_STATUS_RD 0x01
+
+static struct uart_driver ar933x_uart_driver;
+
+struct ar933x_uart_port {
+ struct uart_port port;
+ unsigned int ier; /* shadow Interrupt Enable Register */
+};
+
+static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
+ int offset)
+{
+ return readl(up->port.membase + offset);
+}
+
+static inline void ar933x_uart_write(struct ar933x_uart_port *up,
+ int offset, unsigned int value)
+{
+ writel(value, up->port.membase + offset);
+}
+
+static inline void ar933x_uart_rmw(struct ar933x_uart_port *up,
+ unsigned int offset,
+ unsigned int mask,
+ unsigned int val)
+{
+ unsigned int t;
+
+ t = ar933x_uart_read(up, offset);
+ t &= ~mask;
+ t |= val;
+ ar933x_uart_write(up, offset, t);
+}
+
+static inline void ar933x_uart_rmw_set(struct ar933x_uart_port *up,
+ unsigned int offset,
+ unsigned int val)
+{
+ ar933x_uart_rmw(up, offset, 0, val);
+}
+
+static inline void ar933x_uart_rmw_clear(struct ar933x_uart_port *up,
+ unsigned int offset,
+ unsigned int val)
+{
+ ar933x_uart_rmw(up, offset, val, 0);
+}
+
+static inline void ar933x_uart_start_tx_interrupt(struct ar933x_uart_port *up)
+{
+ up->ier |= AR933X_UART_INT_TX_EMPTY;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up)
+{
+ up->ier &= ~AR933X_UART_INT_TX_EMPTY;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch)
+{
+ unsigned int rdata;
+
+ rdata = ch & AR933X_UART_DATA_TX_RX_MASK;
+ rdata |= AR933X_UART_DATA_TX_CSR;
+ ar933x_uart_write(up, AR933X_UART_DATA_REG, rdata);
+}
+
+static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned long flags;
+ unsigned int rdata;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int ar933x_uart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR;
+}
+
+static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void ar933x_uart_start_tx(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ ar933x_uart_start_tx_interrupt(up);
+}
+
+static void ar933x_uart_stop_tx(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ ar933x_uart_stop_tx_interrupt(up);
+}
+
+static void ar933x_uart_stop_rx(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ up->ier &= ~AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+ else
+ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void ar933x_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void ar933x_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned int cs;
+ unsigned long flags;
+ unsigned int baud, scale;
+
+ /* Only CS8 is supported */
+ new->c_cflag &= ~CSIZE;
+ new->c_cflag |= CS8;
+
+ /* Only one stop bit is supported */
+ new->c_cflag &= ~CSTOPB;
+
+ cs = 0;
+ if (new->c_cflag & PARENB) {
+ if (!(new->c_cflag & PARODD))
+ cs |= AR933X_UART_CS_PARITY_EVEN;
+ else
+ cs |= AR933X_UART_CS_PARITY_ODD;
+ } else {
+ cs |= AR933X_UART_CS_PARITY_NONE;
+ }
+
+ /* Mark/space parity is not supported */
+ new->c_cflag &= ~CMSPAR;
+
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ scale = (port->uartclk / (16 * baud)) - 1;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ up->port.ignore_status_mask = 0;
+
+ /* ignore all characters if CREAD is not set */
+ if ((new->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= AR933X_DUMMY_STATUS_RD;
+
+ ar933x_uart_write(up, AR933X_UART_CLOCK_REG,
+ scale << AR933X_UART_CLOCK_SCALE_S | 8192);
+
+ /* setup configuration register */
+ ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs);
+
+ /* enable host interrupt */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+}
+
+static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
+{
+ struct tty_struct *tty;
+ int max_count = 256;
+
+ tty = tty_port_tty_get(&up->port.state->port);
+ do {
+ unsigned int rdata;
+ unsigned char ch;
+
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ if ((rdata & AR933X_UART_DATA_RX_CSR) == 0)
+ break;
+
+ /* remove the character from the FIFO */
+ ar933x_uart_write(up, AR933X_UART_DATA_REG,
+ AR933X_UART_DATA_RX_CSR);
+
+ if (!tty) {
+ /* discard the data if no tty available */
+ continue;
+ }
+
+ up->port.icount.rx++;
+ ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+ if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ } while (max_count-- > 0);
+
+ if (tty) {
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+}
+
+static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (uart_tx_stopped(&up->port))
+ return;
+
+ count = up->port.fifosize;
+ do {
+ unsigned int rdata;
+
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ if ((rdata & AR933X_UART_DATA_TX_CSR) == 0)
+ break;
+
+ if (up->port.x_char) {
+ ar933x_uart_putc(up, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ continue;
+ }
+
+ if (uart_circ_empty(xmit))
+ break;
+
+ ar933x_uart_putc(up, xmit->buf[xmit->tail]);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (!uart_circ_empty(xmit))
+ ar933x_uart_start_tx_interrupt(up);
+}
+
+static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
+{
+ struct ar933x_uart_port *up = dev_id;
+ unsigned int status;
+
+ status = ar933x_uart_read(up, AR933X_UART_CS_REG);
+ if ((status & AR933X_UART_CS_HOST_INT) == 0)
+ return IRQ_NONE;
+
+ spin_lock(&up->port.lock);
+
+ status = ar933x_uart_read(up, AR933X_UART_INT_REG);
+ status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
+
+ if (status & AR933X_UART_INT_RX_VALID) {
+ ar933x_uart_write(up, AR933X_UART_INT_REG,
+ AR933X_UART_INT_RX_VALID);
+ ar933x_uart_rx_chars(up);
+ }
+
+ if (status & AR933X_UART_INT_TX_EMPTY) {
+ ar933x_uart_write(up, AR933X_UART_INT_REG,
+ AR933X_UART_INT_TX_EMPTY);
+ ar933x_uart_stop_tx_interrupt(up);
+ ar933x_uart_tx_chars(up);
+ }
+
+ spin_unlock(&up->port.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ar933x_uart_startup(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(up->port.irq, ar933x_uart_interrupt,
+ up->port.irqflags, dev_name(up->port.dev), up);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Enable HOST interrupts */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
+ /* Enable RX interrupts */
+ up->ier = AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return 0;
+}
+
+static void ar933x_uart_shutdown(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ /* Disable all interrupts */
+ up->ier = 0;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+
+ /* Disable break condition */
+ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+
+ free_irq(up->port.irq, up);
+}
+
+static const char *ar933x_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_AR933X) ? "AR933X UART" : NULL;
+}
+
+static void ar933x_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release ... */
+}
+
+static int ar933x_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void ar933x_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_AR933X;
+}
+
+static int ar933x_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN &&
+ ser->type != PORT_AR933X)
+ return -EINVAL;
+
+ if (ser->irq < 0 || ser->irq >= NR_IRQS)
+ return -EINVAL;
+
+ if (ser->baud_base < 28800)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct uart_ops ar933x_uart_ops = {
+ .tx_empty = ar933x_uart_tx_empty,
+ .set_mctrl = ar933x_uart_set_mctrl,
+ .get_mctrl = ar933x_uart_get_mctrl,
+ .stop_tx = ar933x_uart_stop_tx,
+ .start_tx = ar933x_uart_start_tx,
+ .stop_rx = ar933x_uart_stop_rx,
+ .enable_ms = ar933x_uart_enable_ms,
+ .break_ctl = ar933x_uart_break_ctl,
+ .startup = ar933x_uart_startup,
+ .shutdown = ar933x_uart_shutdown,
+ .set_termios = ar933x_uart_set_termios,
+ .type = ar933x_uart_type,
+ .release_port = ar933x_uart_release_port,
+ .request_port = ar933x_uart_request_port,
+ .config_port = ar933x_uart_config_port,
+ .verify_port = ar933x_uart_verify_port,
+};
+
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+
+static struct ar933x_uart_port *
+ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
+
+static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up)
+{
+ unsigned int status;
+ unsigned int timeout = 60000;
+
+ /* Wait up to 60ms for the character(s) to be sent. */
+ do {
+ status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ if (--timeout == 0)
+ break;
+ udelay(1);
+ } while ((status & AR933X_UART_DATA_TX_CSR) == 0);
+}
+
+static void ar933x_uart_console_putchar(struct uart_port *port, int ch)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ ar933x_uart_wait_xmitr(up);
+ ar933x_uart_putc(up, ch);
+}
+
+static void ar933x_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct ar933x_uart_port *up = ar933x_console_ports[co->index];
+ unsigned long flags;
+ unsigned int int_en;
+ int locked = 1;
+
+ local_irq_save(flags);
+
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&up->port.lock);
+ else
+ spin_lock(&up->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ int_en = ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, 0);
+
+ uart_console_write(&up->port, s, count, ar933x_uart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ ar933x_uart_wait_xmitr(up);
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, int_en);
+
+ ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+
+ local_irq_restore(flags);
+}
+
+static int ar933x_uart_console_setup(struct console *co, char *options)
+{
+ struct ar933x_uart_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= CONFIG_SERIAL_AR933X_NR_UARTS)
+ return -EINVAL;
+
+ up = ar933x_console_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console ar933x_uart_console = {
+ .name = "ttyATH",
+ .write = ar933x_uart_console_write,
+ .device = uart_console_device,
+ .setup = ar933x_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &ar933x_uart_driver,
+};
+
+static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
+{
+ ar933x_console_ports[up->port.line] = up;
+}
+
+#define AR933X_SERIAL_CONSOLE (&ar933x_uart_console)
+
+#else
+
+static inline void ar933x_uart_add_console_port(struct ar933x_uart_port *up) {}
+
+#define AR933X_SERIAL_CONSOLE NULL
+
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
+
+static struct uart_driver ar933x_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = "ttyATH",
+ .nr = CONFIG_SERIAL_AR933X_NR_UARTS,
+ .cons = AR933X_SERIAL_CONSOLE,
+};
+
+static int __devinit ar933x_uart_probe(struct platform_device *pdev)
+{
+ struct ar933x_uart_platform_data *pdata;
+ struct ar933x_uart_port *up;
+ struct uart_port *port;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ int id;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ id = pdev->id;
+ if (id == -1)
+ id = 0;
+
+ if (id > CONFIG_SERIAL_AR933X_NR_UARTS)
+ return -EINVAL;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev, "no MEM resource\n");
+ return -EINVAL;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "no IRQ resource\n");
+ return -EINVAL;
+ }
+
+ up = kzalloc(sizeof(struct ar933x_uart_port), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+
+ port = &up->port;
+ port->mapbase = mem_res->start;
+
+ port->membase = ioremap(mem_res->start, AR933X_UART_REGS_SIZE);
+ if (!port->membase) {
+ ret = -ENOMEM;
+ goto err_free_up;
+ }
+
+ port->line = id;
+ port->irq = irq_res->start;
+ port->dev = &pdev->dev;
+ port->type = PORT_AR933X;
+ port->iotype = UPIO_MEM32;
+ port->uartclk = pdata->uartclk;
+
+ port->regshift = 2;
+ port->fifosize = AR933X_UART_FIFO_SIZE;
+ port->ops = &ar933x_uart_ops;
+
+ ar933x_uart_add_console_port(up);
+
+ ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
+ if (ret)
+ goto err_unmap;
+
+ platform_set_drvdata(pdev, up);
+ return 0;
+
+err_unmap:
+ iounmap(up->port.membase);
+err_free_up:
+ kfree(up);
+ return ret;
+}
+
+static int __devexit ar933x_uart_remove(struct platform_device *pdev)
+{
+ struct ar933x_uart_port *up;
+
+ up = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+
+ if (up) {
+ uart_remove_one_port(&ar933x_uart_driver, &up->port);
+ iounmap(up->port.membase);
+ kfree(up);
+ }
+
+ return 0;
+}
+
+static struct platform_driver ar933x_uart_platform_driver = {
+ .probe = ar933x_uart_probe,
+ .remove = __devexit_p(ar933x_uart_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar933x_uart_init(void)
+{
+ int ret;
+
+ ar933x_uart_driver.nr = CONFIG_SERIAL_AR933X_NR_UARTS;
+ ret = uart_register_driver(&ar933x_uart_driver);
+ if (ret)
+ goto err_out;
+
+ ret = platform_driver_register(&ar933x_uart_platform_driver);
+ if (ret)
+ goto err_unregister_uart_driver;
+
+ return 0;
+
+err_unregister_uart_driver:
+ uart_unregister_driver(&ar933x_uart_driver);
+err_out:
+ return ret;
+}
+
+static void __exit ar933x_uart_exit(void)
+{
+ platform_driver_unregister(&ar933x_uart_platform_driver);
+ uart_unregister_driver(&ar933x_uart_driver);
+}
+
+module_init(ar933x_uart_init);
+module_exit(ar933x_uart_exit);
+
+MODULE_DESCRIPTION("Atheros AR933X UART driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4c823f341d98..10605ecc99ab 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -212,8 +212,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
+ unsigned long flags;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
@@ -244,7 +245,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
- spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1256,12 +1257,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
static void atmel_set_ldisc(struct uart_port *port, int new)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
-
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index ee101c0d358f..7fbc3a08f10d 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -299,8 +299,13 @@ static int sport_startup(struct uart_port *port)
dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n");
}
}
- if (up->rts_pin >= 0)
- gpio_direction_output(up->rts_pin, 0);
+ if (up->rts_pin >= 0) {
+ if (gpio_request(up->rts_pin, DRV_NAME)) {
+ dev_info(port->dev, "fail to request RTS PIN at GPIO_%d\n", up->rts_pin);
+ up->rts_pin = -1;
+ } else
+ gpio_direction_output(up->rts_pin, 0);
+ }
#endif
return 0;
@@ -445,6 +450,8 @@ static void sport_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
if (up->cts_pin >= 0)
free_irq(gpio_to_irq(up->cts_pin), up);
+ if (up->rts_pin >= 0)
+ gpio_free(up->rts_pin);
#endif
}
@@ -803,17 +810,16 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
sport->cts_pin = -1;
- else
+ else {
sport->cts_pin = res->start;
+ sport->port.flags |= ASYNC_CTS_FLOW;
+ }
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
sport->rts_pin = -1;
else
sport->rts_pin = res->start;
-
- if (sport->rts_pin >= 0)
- gpio_request(sport->rts_pin, DRV_NAME);
#endif
}
@@ -853,10 +859,6 @@ static int __devexit sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (sport->rts_pin >= 0)
- gpio_free(sport->rts_pin);
-#endif
iounmap(sport->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1d5675..e4510ea135ce 100644
--- a/drivers/tty/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -45,11 +45,12 @@
#define SPORT_GET_RX32(sport) \
({ \
unsigned int __ret; \
+ unsigned long flags; \
if (ANOMALY_05000473) \
- local_irq_disable(); \
+ local_irq_save(flags); \
__ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
if (ANOMALY_05000473) \
- local_irq_enable(); \
+ local_irq_restore(flags); \
__ret; \
})
#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 66afb98b77b5..26953bfa6922 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -116,15 +116,22 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
- unsigned int status;
-
- status = bfin_serial_get_mctrl(&uart->port);
- uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
+ unsigned int status = bfin_serial_get_mctrl(&uart->port);
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- uart->scts = 1;
+ struct tty_struct *tty = uart->port.state->port.tty;
+
UART_CLEAR_SCTS(uart);
- UART_CLEAR_IER(uart, EDSSI);
+ if (tty->hw_stopped) {
+ if (status) {
+ tty->hw_stopped = 0;
+ uart_write_wakeup(&uart->port);
+ }
+ } else {
+ if (!status)
+ tty->hw_stopped = 1;
+ }
#endif
+ uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
return IRQ_HANDLED;
}
@@ -175,13 +182,6 @@ static void bfin_serial_start_tx(struct uart_port *port)
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
struct tty_struct *tty = uart->port.state->port.tty;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
/*
* To avoid losting RX interrupt, we reset IR function
* before sending data.
@@ -380,12 +380,6 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
spin_lock(&uart->port.lock);
if (UART_GET_LSR(uart) & THRE)
bfin_serial_tx_chars(uart);
@@ -531,13 +525,6 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
struct bfin_serial_port *uart = dev_id;
struct circ_buf *xmit = &uart->port.state->xmit;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
spin_lock(&uart->port.lock);
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
@@ -739,20 +726,26 @@ static int bfin_serial_startup(struct uart_port *port)
pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
}
}
- if (uart->rts_pin >= 0)
- gpio_direction_output(uart->rts_pin, 0);
+ if (uart->rts_pin >= 0) {
+ if (gpio_request(uart->rts_pin, DRIVER_NAME)) {
+ pr_info("fail to request RTS PIN at GPIO_%d\n", uart->rts_pin);
+ uart->rts_pin = -1;
+ } else
+ gpio_direction_output(uart->rts_pin, 0);
+ }
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
- bfin_serial_mctrl_cts_int,
- 0, "BFIN_UART_MODEM_STATUS", uart)) {
- uart->cts_pin = -1;
- pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
- }
+ if (uart->cts_pin >= 0) {
+ if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
+ IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
+ dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
+ }
- /* CTS RTS PINs are negative assertive. */
- UART_PUT_MCR(uart, ACTS);
- UART_SET_IER(uart, EDSSI);
+ /* CTS RTS PINs are negative assertive. */
+ UART_PUT_MCR(uart, ACTS);
+ UART_SET_IER(uart, EDSSI);
+ }
#endif
UART_SET_IER(uart, ERBFI);
@@ -792,6 +785,8 @@ static void bfin_serial_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0)
@@ -1370,18 +1365,18 @@ static int bfin_serial_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
uart->cts_pin = -1;
- else
+ else {
uart->cts_pin = res->start;
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ uart->port.flags |= ASYNC_CTS_FLOW;
+#endif
+ }
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
uart->rts_pin = -1;
else
uart->rts_pin = res->start;
-# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
- if (uart->rts_pin >= 0)
- gpio_request(uart->rts_pin, DRIVER_NAME);
-# endif
#endif
}
@@ -1421,10 +1416,6 @@ static int __devexit bfin_serial_remove(struct platform_device *pdev)
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
-#endif
iounmap(uart->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 426434e5eb7c..7e925e20cbaa 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1334,7 +1334,6 @@ MODULE_DEVICE_TABLE(spi, ifx_id_table);
static const struct spi_driver ifx_spi_driver = {
.driver = {
.name = DRVNAME,
- .bus = &spi_bus_type,
.pm = &ifx_spi_pm,
.owner = THIS_MODULE},
.probe = ifx_spi_spi_probe,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 163fc9021f5a..0b7fed746b27 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -102,6 +102,7 @@
#define UCR2_STPB (1<<6) /* Stop */
#define UCR2_WS (1<<5) /* Word size */
#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
+#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
#define UCR2_TXEN (1<<2) /* Transmitter enabled */
#define UCR2_RXEN (1<<1) /* Receiver enabled */
#define UCR2_SRST (1<<0) /* SW reset */
@@ -207,6 +208,12 @@ struct imx_port {
struct imx_uart_data *devdata;
};
+struct imx_port_ucrs {
+ unsigned int ucr1;
+ unsigned int ucr2;
+ unsigned int ucr3;
+};
+
#ifdef CONFIG_IRDA
#define USE_IRDA(sport) ((sport)->use_irda)
#else
@@ -260,6 +267,27 @@ static inline int is_imx21_uart(struct imx_port *sport)
}
/*
+ * Save and restore functions for UCR1, UCR2 and UCR3 registers
+ */
+static void imx_port_ucrs_save(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* save control registers */
+ ucr->ucr1 = readl(port->membase + UCR1);
+ ucr->ucr2 = readl(port->membase + UCR2);
+ ucr->ucr3 = readl(port->membase + UCR3);
+}
+
+static void imx_port_ucrs_restore(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* restore control registers */
+ writel(ucr->ucr1, port->membase + UCR1);
+ writel(ucr->ucr2, port->membase + UCR2);
+ writel(ucr->ucr3, port->membase + UCR3);
+}
+
+/*
* Handle any change of modem status signal since we were last called.
*/
static void imx_mctrl_check(struct imx_port *sport)
@@ -566,6 +594,9 @@ static irqreturn_t imx_int(int irq, void *dev_id)
if (sts & USR1_RTSD)
imx_rtsint(irq, dev_id);
+ if (sts & USR1_AWAKE)
+ writel(USR1_AWAKE, sport->port.membase + USR1);
+
return IRQ_HANDLED;
}
@@ -901,6 +932,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 |= UCR2_PROE;
}
+ del_timer_sync(&sport->timer);
+
/*
* Ask the core to calculate the divisor for us.
*/
@@ -931,8 +964,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.ignore_status_mask |= URXD_OVRRUN;
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
@@ -1079,6 +1110,70 @@ imx_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
+#if defined(CONFIG_CONSOLE_POLL)
+static int imx_poll_get_char(struct uart_port *port)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+ unsigned char c;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* poll */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_RDR);
+
+ /* read */
+ c = readl(port->membase + URXD0);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+
+ return c;
+}
+
+static void imx_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* drain */
+ do {
+ status = readl(port->membase + USR1);
+ } while (~status & USR1_TRDY);
+
+ /* write */
+ writel(c, port->membase + URTX0);
+
+ /* flush */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_TXDC);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+}
+#endif
+
static struct uart_ops imx_pops = {
.tx_empty = imx_tx_empty,
.set_mctrl = imx_set_mctrl,
@@ -1096,6 +1191,10 @@ static struct uart_ops imx_pops = {
.request_port = imx_request_port,
.config_port = imx_config_port,
.verify_port = imx_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_get_char = imx_poll_get_char,
+ .poll_put_char = imx_poll_put_char,
+#endif
};
static struct imx_port *imx_ports[UART_NR];
@@ -1118,13 +1217,14 @@ static void
imx_console_write(struct console *co, const char *s, unsigned int count)
{
struct imx_port *sport = imx_ports[co->index];
- unsigned int old_ucr1, old_ucr2, ucr1;
+ struct imx_port_ucrs old_ucr;
+ unsigned int ucr1;
/*
- * First, save UCR1/2 and then disable interrupts
+ * First, save UCR1/2/3 and then disable interrupts
*/
- ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
- old_ucr2 = readl(sport->port.membase + UCR2);
+ imx_port_ucrs_save(&sport->port, &old_ucr);
+ ucr1 = old_ucr.ucr1;
if (is_imx1_uart(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
@@ -1133,18 +1233,17 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
writel(ucr1, sport->port.membase + UCR1);
- writel(old_ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
+ writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
uart_console_write(&sport->port, s, count, imx_console_putchar);
/*
* Finally, wait for transmitter to become empty
- * and restore UCR1/2
+ * and restore UCR1/2/3
*/
while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
- writel(old_ucr1, sport->port.membase + UCR1);
- writel(old_ucr2, sport->port.membase + UCR2);
+ imx_port_ucrs_restore(&sport->port, &old_ucr);
}
/*
@@ -1269,6 +1368,12 @@ static struct uart_driver imx_reg = {
static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
{
struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* enable wakeup from i.MX UART */
+ val = readl(sport->port.membase + UCR3);
+ val |= UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
if (sport)
uart_suspend_port(&imx_reg, &sport->port);
@@ -1279,6 +1384,12 @@ static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
static int serial_imx_resume(struct platform_device *dev)
{
struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* disable wakeup from i.MX UART */
+ val = readl(sport->port.membase + UCR3);
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
if (sport)
uart_resume_port(&imx_reg, &sport->port);
@@ -1287,6 +1398,10 @@ static int serial_imx_resume(struct platform_device *dev)
}
#ifdef CONFIG_OF
+/*
+ * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
+ * could successfully get all information from dt or a negative errno.
+ */
static int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
@@ -1296,12 +1411,13 @@ static int serial_imx_probe_dt(struct imx_port *sport,
int ret;
if (!np)
- return -ENODEV;
+ /* no device tree device */
+ return 1;
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
- return -ENODEV;
+ return ret;
}
sport->port.line = ret;
@@ -1319,7 +1435,7 @@ static int serial_imx_probe_dt(struct imx_port *sport,
static inline int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
- return -ENODEV;
+ return 1;
}
#endif
@@ -1354,8 +1470,10 @@ static int serial_imx_probe(struct platform_device *pdev)
return -ENOMEM;
ret = serial_imx_probe_dt(sport, pdev);
- if (ret == -ENODEV)
+ if (ret > 0)
serial_imx_probe_pdata(sport, pdev);
+ else if (ret < 0)
+ goto free;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1476,7 +1594,7 @@ static int __init imx_serial_init(void)
if (ret != 0)
uart_unregister_driver(&imx_reg);
- return 0;
+ return ret;
}
static void __exit imx_serial_exit(void)
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 08018934e013..94a6792bf97b 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -1000,11 +1000,8 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
init_timer(&up->timer);
up->timer.function = m32r_sio_timeout;
- /*
- * ALPHA_KLUDGE_MCR needs to be killed.
- */
- up->mcr_mask = ~ALPHA_KLUDGE_MCR;
- up->mcr_force = ALPHA_KLUDGE_MCR;
+ up->mcr_mask = ~0;
+ up->mcr_force = 0;
uart_add_one_port(drv, &up->port);
}
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 8a6cc8c30b5a..b4902b99cfd2 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -901,7 +901,6 @@ static int max3100_resume(struct spi_device *spi)
static struct spi_driver max3100_driver = {
.driver = {
.name = "max3100",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index 90c40f22ec70..aae772a71de6 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -315,7 +315,6 @@ static int __devinit max3107_probe_aava(struct spi_device *spi)
static struct spi_driver max3107_driver = {
.driver = {
.name = "aava-max3107",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max3107_probe_aava,
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 7827000db4f5..17c7ba805d98 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1181,7 +1181,6 @@ static int max3107_probe_generic(struct spi_device *spi)
static struct spi_driver max3107_driver = {
.driver = {
.name = "max3107",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max3107_probe_generic,
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index e272d3919c67..a9234ba8f8d5 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -1154,7 +1154,6 @@ serial_hsu_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
- int ret;
if (co->index == -1 || co->index >= serial_hsu_reg.nr)
co->index = 0;
@@ -1165,9 +1164,7 @@ serial_hsu_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
-
- return ret;
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_hsu_console = {
@@ -1176,9 +1173,13 @@ static struct console serial_hsu_console = {
.device = uart_console_device,
.setup = serial_hsu_console_setup,
.flags = CON_PRINTBUFFER,
- .index = 2,
+ .index = -1,
.data = &serial_hsu_reg,
};
+
+#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE NULL
#endif
struct uart_ops serial_hsu_pops = {
@@ -1208,6 +1209,7 @@ static struct uart_driver serial_hsu_reg = {
.major = TTY_MAJOR,
.minor = 128,
.nr = 3,
+ .cons = SERIAL_HSU_CONSOLE,
};
#ifdef CONFIG_PM
@@ -1342,12 +1344,6 @@ static int serial_hsu_probe(struct pci_dev *pdev,
}
uart_add_one_port(&serial_hsu_reg, &uport->port);
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
- if (index == 2) {
- register_console(&serial_hsu_console);
- uport->port.cons = &serial_hsu_console;
- }
-#endif
pci_set_drvdata(pdev, uport);
}
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 4c309e869903..df2a2240a3ae 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -876,7 +876,6 @@ static int __devexit serial_m3110_remove(struct spi_device *dev)
static struct spi_driver uart_max3110_driver = {
.driver = {
.name = "spi_max3111",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = serial_m3110_probe,
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 60c6eb850265..5e85e1e14c44 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -422,9 +422,9 @@ static int __devexit msm_hs_remove(struct platform_device *pdev)
msm_uport->rx.rbuffer);
dma_pool_destroy(msm_uport->rx.pool);
- dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *),
+ dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
DMA_TO_DEVICE);
- dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *),
+ dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
DMA_TO_DEVICE);
dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
DMA_TO_DEVICE);
@@ -812,7 +812,7 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
*tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
/* Save tx_count to use in Callback */
tx->tx_count = tx_count;
@@ -1087,12 +1087,10 @@ static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
}
/* Handle CTS changes (Called from interrupt handler) */
-static void msm_hs_handle_delta_cts(struct uart_port *uport)
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
{
- unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
/* clear interrupt */
@@ -1100,7 +1098,6 @@ static void msm_hs_handle_delta_cts(struct uart_port *uport)
uport->icount.cts++;
clk_disable(msm_uport->clk);
- spin_unlock_irqrestore(&uport->lock, flags);
/* clear the IOCTL TIOCMIWAIT if called */
wake_up_interruptible(&uport->state->port.delta_msr_wait);
@@ -1248,7 +1245,7 @@ static irqreturn_t msm_hs_isr(int irq, void *dev)
/* Change in CTS interrupt */
if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
- msm_hs_handle_delta_cts(uport);
+ msm_hs_handle_delta_cts_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -1537,7 +1534,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
if (!tx->command_ptr)
return -ENOMEM;
- tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
if (!tx->command_ptr_ptr) {
ret = -ENOMEM;
goto err_tx_command_ptr_ptr;
@@ -1547,7 +1544,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
sizeof(dmov_box), DMA_TO_DEVICE);
tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
tx->command_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
init_waitqueue_head(&rx->wait);
@@ -1575,7 +1572,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
goto err_rx_command_ptr;
}
- rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
if (!rx->command_ptr_ptr) {
pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
ret = -ENOMEM;
@@ -1593,7 +1590,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
*rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
@@ -1609,7 +1606,7 @@ err_dma_pool_alloc:
dma_pool_destroy(msm_uport->rx.pool);
err_dma_pool_create:
dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
sizeof(dmov_box), DMA_TO_DEVICE);
kfree(msm_uport->tx.command_ptr_ptr);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 7e02c9c344fe..55fd362b9879 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -145,11 +145,12 @@ static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
writel(xmit->buf[xmit->tail],
s->port.membase + AUART_DATA);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&s->port);
} else
break;
}
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+
if (uart_circ_empty(&(s->port.state->xmit)))
writel(AUART_INTR_TXIEN,
s->port.membase + AUART_INTR_CLR);
@@ -424,7 +425,7 @@ static int mxs_auart_startup(struct uart_port *u)
{
struct mxs_auart_port *s = to_auart_port(u);
- clk_enable(s->clk);
+ clk_prepare_enable(s->clk);
writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
@@ -453,7 +454,7 @@ static void mxs_auart_shutdown(struct uart_port *u)
writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
u->membase + AUART_INTR_CLR);
- clk_disable(s->clk);
+ clk_disable_unprepare(s->clk);
}
static unsigned int mxs_auart_tx_empty(struct uart_port *u)
@@ -634,7 +635,7 @@ auart_console_setup(struct console *co, char *options)
if (!s)
return -ENODEV;
- clk_enable(s->clk);
+ clk_prepare_enable(s->clk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -643,7 +644,7 @@ auart_console_setup(struct console *co, char *options)
ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
- clk_disable(s->clk);
+ clk_disable_unprepare(s->clk);
return ret;
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 5e713d3ef1f4..d192dcbb82f5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -37,17 +37,24 @@
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <plat/dma.h>
#include <plat/dmtimer.h>
#include <plat/omap-serial.h>
+#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
+
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
-static void serial_omap_rx_timeout(unsigned long uart_no);
+static void serial_omap_rxdma_poll(unsigned long uart_no);
static int serial_omap_start_rxdma(struct uart_omap_port *up);
+static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
+
+static struct workqueue_struct *serial_omap_uart_wq;
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
@@ -102,6 +109,8 @@ static void serial_omap_stop_rxdma(struct uart_omap_port *up)
omap_free_dma(up->uart_dma.rx_dma_channel);
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_used = false;
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
}
@@ -109,9 +118,12 @@ static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_stop_tx(struct uart_port *port)
@@ -129,30 +141,40 @@ static void serial_omap_stop_tx(struct uart_port *port)
omap_stop_dma(up->uart_dma.tx_dma_channel);
omap_free_dma(up->uart_dma.tx_dma_channel);
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
+ pm_runtime_get_sync(&up->pdev->dev);
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
+
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+ pm_runtime_get_sync(&up->pdev->dev);
if (up->use_dma)
serial_omap_stop_rxdma(up);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
-static inline void receive_chars(struct uart_omap_port *up, int *status)
+static inline void receive_chars(struct uart_omap_port *up,
+ unsigned int *status)
{
struct tty_struct *tty = up->port.state->port.tty;
- unsigned int flag;
- unsigned char ch, lsr = *status;
+ unsigned int flag, lsr = *status;
+ unsigned char ch = 0;
int max_count = 256;
do {
@@ -262,7 +284,10 @@ static void serial_omap_start_tx(struct uart_port *port)
int ret = 0;
if (!up->use_dma) {
+ pm_runtime_get_sync(&up->pdev->dev);
serial_omap_enable_ier_thri(up);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
return;
}
@@ -272,6 +297,7 @@ static void serial_omap_start_tx(struct uart_port *port)
xmit = &up->port.state->xmit;
if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
+ pm_runtime_get_sync(&up->pdev->dev);
ret = omap_request_dma(up->uart_dma.uart_dma_tx,
"UART Tx DMA",
(void *)uart_tx_dma_callback, up,
@@ -354,9 +380,13 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
unsigned int iir, lsr;
unsigned long flags;
+ pm_runtime_get_sync(&up->pdev->dev);
iir = serial_in(up, UART_IIR);
- if (iir & UART_IIR_NO_INT)
+ if (iir & UART_IIR_NO_INT) {
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
return IRQ_NONE;
+ }
spin_lock_irqsave(&up->port.lock, flags);
lsr = serial_in(up, UART_LSR);
@@ -378,6 +408,9 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
+
up->port_activity = jiffies;
return IRQ_HANDLED;
}
@@ -388,22 +421,26 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
unsigned long flags = 0;
unsigned int ret = 0;
- dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
+ pm_runtime_get_sync(&up->pdev->dev);
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
-
+ pm_runtime_put(&up->pdev->dev);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned char status;
+ unsigned int status;
unsigned int ret = 0;
+ pm_runtime_get_sync(&up->pdev->dev);
status = check_modem_status(up);
- dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
+ pm_runtime_put(&up->pdev->dev);
+
+ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
@@ -421,7 +458,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char mcr = 0;
- dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
@@ -433,8 +470,11 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
- mcr |= up->mcr;
- serial_out(up, UART_MCR, mcr);
+ pm_runtime_get_sync(&up->pdev->dev);
+ up->mcr = serial_in(up, UART_MCR);
+ up->mcr |= mcr;
+ serial_out(up, UART_MCR, up->mcr);
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
@@ -442,7 +482,8 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
+ pm_runtime_get_sync(&up->pdev->dev);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
@@ -450,6 +491,7 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_put(&up->pdev->dev);
}
static int serial_omap_startup(struct uart_port *port)
@@ -466,8 +508,9 @@ static int serial_omap_startup(struct uart_port *port)
if (retval)
return retval;
- dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
+ pm_runtime_get_sync(&up->pdev->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
@@ -505,8 +548,8 @@ static int serial_omap_startup(struct uart_port *port)
(dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
0);
init_timer(&(up->uart_dma.rx_timer));
- up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
- up->uart_dma.rx_timer.data = up->pdev->id;
+ up->uart_dma.rx_timer.function = serial_omap_rxdma_poll;
+ up->uart_dma.rx_timer.data = up->port.line;
/* Currently the buffer size is 4KB. Can increase it */
up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
up->uart_dma.rx_buf_size,
@@ -523,6 +566,8 @@ static int serial_omap_startup(struct uart_port *port)
/* Enable module level wake up */
serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
up->port_activity = jiffies;
return 0;
}
@@ -532,7 +577,9 @@ static void serial_omap_shutdown(struct uart_port *port)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
/*
* Disable interrupts from this port
*/
@@ -566,6 +613,8 @@ static void serial_omap_shutdown(struct uart_port *port)
up->uart_dma.rx_buf_dma_phys);
up->uart_dma.rx_buf = NULL;
}
+
+ pm_runtime_put(&up->pdev->dev);
free_irq(up->port.irq, up);
}
@@ -573,8 +622,6 @@ static inline void
serial_omap_configure_xonxoff
(struct uart_omap_port *up, struct ktermios *termios)
{
- unsigned char efr = 0;
-
up->lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -584,8 +631,7 @@ serial_omap_configure_xonxoff
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* clear SW control mode bits */
- efr = up->efr;
- efr &= OMAP_UART_SW_CLR;
+ up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
@@ -593,7 +639,7 @@ serial_omap_configure_xonxoff
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXON)
- efr |= OMAP_UART_SW_TX;
+ up->efr |= OMAP_UART_SW_TX;
/*
* IXOFF Flag:
@@ -601,7 +647,7 @@ serial_omap_configure_xonxoff
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXOFF)
- efr |= OMAP_UART_SW_RX;
+ up->efr |= OMAP_UART_SW_RX;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
@@ -624,13 +670,21 @@ serial_omap_configure_xonxoff
* load the new software flow control mode IXON or IXOFF
* and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
*/
- serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_SCD);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
serial_out(up, UART_LCR, up->lcr);
}
+static void serial_omap_uart_qos_work(struct work_struct *work)
+{
+ struct uart_omap_port *up = container_of(work, struct uart_omap_port,
+ qos_work);
+
+ pm_qos_update_request(&up->pm_qos_request, up->latency);
+}
+
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -671,6 +725,16 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
+ /* calculate wakeup latency constraint */
+ up->calc_latency = (1000000 * up->port.fifosize) /
+ (1000 * baud / 8);
+ up->latency = up->calc_latency;
+ schedule_work(&up->qos_work);
+
+ up->dll = quot & 0xff;
+ up->dlh = quot >> 8;
+ up->mdr1 = UART_OMAP_MDR1_DISABLE;
+
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
if (up->use_dma)
@@ -680,6 +744,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
+ pm_runtime_get_sync(&up->pdev->dev);
spin_lock_irqsave(&up->port.lock, flags);
/*
@@ -723,6 +788,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval;
+ up->scr = OMAP_UART_SCR_TX_EMPTY;
/* FIFOs and DMA Settings */
@@ -749,17 +816,22 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
if (up->use_dma) {
serial_out(up, UART_TI752_TLR, 0);
- serial_out(up, UART_OMAP_SCR,
- (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ up->scr |= (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8);
}
+ serial_out(up, UART_OMAP_SCR, up->scr);
+
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
/* Protocol, Baud Rate, and Interrupt Settings */
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -769,8 +841,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_DLL, up->dll); /* LS of divisor */
+ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
@@ -780,9 +852,14 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_LCR, cval);
if (baud > 230400 && baud != 3000000)
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_16X_MODE;
+
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Hardware Flow Control Configuration */
@@ -809,7 +886,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_omap_configure_xonxoff(up, termios);
spin_unlock_irqrestore(&up->port.lock, flags);
- dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+ pm_runtime_put(&up->pdev->dev);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
static void
@@ -819,7 +897,9 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char efr;
- dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
@@ -829,6 +909,15 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
+
+ if (!device_may_wakeup(&up->pdev->dev)) {
+ if (!state)
+ pm_runtime_forbid(&up->pdev->dev);
+ else
+ pm_runtime_allow(&up->pdev->dev);
+ }
+
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_release_port(struct uart_port *port)
@@ -847,7 +936,7 @@ static void serial_omap_config_port(struct uart_port *port, int flags)
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
- up->pdev->id);
+ up->port.line);
up->port.type = PORT_OMAP;
}
@@ -864,7 +953,7 @@ serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
return up->name;
}
@@ -906,19 +995,26 @@ static inline void wait_for_xmitr(struct uart_omap_port *up)
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ pm_runtime_get_sync(&up->pdev->dev);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
+ pm_runtime_put(&up->pdev->dev);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned int status = serial_in(up, UART_LSR);
+ unsigned int status;
+ pm_runtime_get_sync(&up->pdev->dev);
+ status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR))
return NO_POLL_CHAR;
- return serial_in(up, UART_RX);
+ status = serial_in(up, UART_RX);
+ pm_runtime_put(&up->pdev->dev);
+ return status;
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -946,6 +1042,8 @@ serial_omap_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
+ pm_runtime_get_sync(&up->pdev->dev);
+
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
@@ -978,6 +1076,8 @@ serial_omap_console_write(struct console *co, const char *s,
if (up->msr_saved_flags)
check_modem_status(up);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
@@ -1014,7 +1114,7 @@ static struct console serial_omap_console = {
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
- serial_omap_console_ports[up->pdev->id] = up;
+ serial_omap_console_ports[up->port.line] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
@@ -1060,26 +1160,30 @@ static struct uart_driver serial_omap_reg = {
.cons = OMAP_CONSOLE,
};
-static int
-serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_SUSPEND
+static int serial_omap_suspend(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(pdev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
- if (up)
+ if (up) {
uart_suspend_port(&serial_omap_reg, &up->port);
+ flush_work_sync(&up->qos_work);
+ }
+
return 0;
}
-static int serial_omap_resume(struct platform_device *dev)
+static int serial_omap_resume(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(dev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
if (up)
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
+#endif
-static void serial_omap_rx_timeout(unsigned long uart_no)
+static void serial_omap_rxdma_poll(unsigned long uart_no)
{
struct uart_omap_port *up = ui[uart_no];
unsigned int curr_dma_pos, curr_transmitted_size;
@@ -1089,9 +1193,9 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
(curr_dma_pos == 0)) {
if (jiffies_to_msecs(jiffies - up->port_activity) <
- RX_TIMEOUT) {
+ up->uart_dma.rx_timeout) {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
} else {
serial_omap_stop_rxdma(up);
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
@@ -1120,7 +1224,7 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
}
} else {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
}
up->port_activity = jiffies;
}
@@ -1135,6 +1239,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
int ret = 0;
if (up->uart_dma.rx_dma_channel == -1) {
+ pm_runtime_get_sync(&up->pdev->dev);
ret = omap_request_dma(up->uart_dma.uart_dma_rx,
"UART Rx DMA",
(void *)uart_rx_dma_callback, up,
@@ -1158,7 +1263,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.rx_dma_channel);
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
up->uart_dma.rx_dma_used = true;
return ret;
}
@@ -1221,6 +1326,19 @@ static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
return;
}
+static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
+{
+ struct omap_uart_port_info *omap_up_info;
+
+ omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
+ if (!omap_up_info)
+ return NULL; /* out of memory */
+
+ of_property_read_u32(dev->of_node, "clock-frequency",
+ &omap_up_info->uartclk);
+ return omap_up_info;
+}
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct uart_omap_port *up;
@@ -1228,6 +1346,9 @@ static int serial_omap_probe(struct platform_device *pdev)
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
int ret = -ENOSPC;
+ if (pdev->dev.of_node)
+ omap_up_info = of_get_uart_port_info(&pdev->dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
@@ -1263,7 +1384,6 @@ static int serial_omap_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto do_release_region;
}
- sprintf(up->name, "OMAP UART%d", pdev->id);
up->pdev = pdev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
@@ -1273,34 +1393,74 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
- up->port.line = pdev->id;
- up->port.membase = omap_up_info->membase;
- up->port.mapbase = omap_up_info->mapbase;
+ if (pdev->dev.of_node)
+ up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
+ else
+ up->port.line = pdev->id;
+
+ if (up->port.line < 0) {
+ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
+ up->port.line);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ sprintf(up->name, "OMAP UART%d", up->port.line);
+ up->port.mapbase = mem->start;
+ up->port.membase = ioremap(mem->start, resource_size(mem));
+ if (!up->port.membase) {
+ dev_err(&pdev->dev, "can't ioremap UART\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
up->port.flags = omap_up_info->flags;
- up->port.irqflags = omap_up_info->irqflags;
up->port.uartclk = omap_up_info->uartclk;
+ if (!up->port.uartclk) {
+ up->port.uartclk = DEFAULT_CLK_SPEED;
+ dev_warn(&pdev->dev, "No clock speed specified: using default:"
+ "%d\n", DEFAULT_CLK_SPEED);
+ }
up->uart_dma.uart_base = mem->start;
+ up->errata = omap_up_info->errata;
if (omap_up_info->dma_enabled) {
up->uart_dma.uart_dma_tx = dma_tx->start;
up->uart_dma.uart_dma_rx = dma_rx->start;
up->use_dma = 1;
- up->uart_dma.rx_buf_size = 4096;
- up->uart_dma.rx_timeout = 2;
+ up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
+ up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
+ up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
spin_lock_init(&(up->uart_dma.tx_lock));
spin_lock_init(&(up->uart_dma.rx_lock));
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
}
- ui[pdev->id] = up;
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ pm_qos_add_request(&up->pm_qos_request,
+ PM_QOS_CPU_DMA_LATENCY, up->latency);
+ serial_omap_uart_wq = create_singlethread_workqueue(up->name);
+ INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ omap_up_info->autosuspend_timeout);
+
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ui[up->port.line] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto do_release_region;
+ pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, up);
return 0;
err:
@@ -1315,22 +1475,168 @@ static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
if (up) {
+ pm_runtime_disable(&up->pdev->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
+ pm_qos_remove_request(&up->pm_qos_request);
+
kfree(up);
}
+
+ platform_set_drvdata(dev, NULL);
return 0;
}
+/*
+ * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
+ * The access to uart register after MDR1 Access
+ * causes UART to corrupt data.
+ *
+ * Need a delay =
+ * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
+ * give 10 times as much
+ */
+static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
+{
+ u8 timeout = 255;
+
+ serial_out(up, UART_OMAP_MDR1, mdr1);
+ udelay(2);
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
+ UART_FCR_CLEAR_RCVR);
+ /*
+ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+ * TX_FIFO_E bit is 1.
+ */
+ while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
+ (UART_LSR_THRE | UART_LSR_DR))) {
+ timeout--;
+ if (!timeout) {
+ /* Should *never* happen. we warn and carry on */
+ dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n",
+ serial_in(up, UART_LSR));
+ break;
+ }
+ udelay(1);
+ }
+}
+
+static void serial_omap_restore_context(struct uart_omap_port *up)
+{
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
+ else
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, 0x0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_DLL, up->dll);
+ serial_out(up, UART_DLM, up->dlh);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, up->lcr);
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_omap_runtime_suspend(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_uart_port_info *pdata = dev->platform_data;
+
+ if (!up)
+ return -EINVAL;
+
+ if (!pdata || !pdata->enable_wakeup)
+ return 0;
+
+ if (pdata->get_context_loss_count)
+ up->context_loss_cnt = pdata->get_context_loss_count(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (!up->wakeups_enabled) {
+ pdata->enable_wakeup(up->pdev, true);
+ up->wakeups_enabled = true;
+ }
+ } else {
+ if (up->wakeups_enabled) {
+ pdata->enable_wakeup(up->pdev, false);
+ up->wakeups_enabled = false;
+ }
+ }
+
+ /* Errata i291 */
+ if (up->use_dma && pdata->set_forceidle &&
+ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
+ pdata->set_forceidle(up->pdev);
+
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ schedule_work(&up->qos_work);
+
+ return 0;
+}
+
+static int serial_omap_runtime_resume(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_uart_port_info *pdata = dev->platform_data;
+
+ if (up) {
+ if (pdata->get_context_loss_count) {
+ u32 loss_cnt = pdata->get_context_loss_count(dev);
+
+ if (up->context_loss_cnt != loss_cnt)
+ serial_omap_restore_context(up);
+ }
+
+ /* Errata i291 */
+ if (up->use_dma && pdata->set_noidle &&
+ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
+ pdata->set_noidle(up->pdev);
+
+ up->latency = up->calc_latency;
+ schedule_work(&up->qos_work);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops serial_omap_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
+ SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
+ serial_omap_runtime_resume, NULL)
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id omap_serial_of_match[] = {
+ { .compatible = "ti,omap2-uart" },
+ { .compatible = "ti,omap3-uart" },
+ { .compatible = "ti,omap4-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_serial_of_match);
+#endif
+
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
-
- .suspend = serial_omap_suspend,
- .resume = serial_omap_resume,
.driver = {
.name = DRIVER_NAME,
+ .pm = &serial_omap_dev_pm_ops,
+ .of_match_table = of_match_ptr(omap_serial_of_match),
},
};
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d6aba8c087e4..17ae65762d1a 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -25,6 +25,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dmi.h>
+#include <linux/console.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/pch_dma.h>
@@ -198,6 +201,10 @@ enum {
#define PCI_VENDOR_ID_ROHM 0x10DB
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define DEFAULT_BAUD_RATE 1843200 /* 1.8432MHz */
+
struct pch_uart_buffer {
unsigned char *buf;
int size;
@@ -276,6 +283,9 @@ static struct pch_uart_driver_data drv_dat[] = {
[pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
};
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+static struct eg20t_port *pch_uart_ports[PCH_UART_NR];
+#endif
static unsigned int default_baud = 9600;
static const int trigger_level_256[4] = { 1, 64, 128, 224 };
static const int trigger_level_64[4] = { 1, 16, 32, 56 };
@@ -754,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
sg_dma_address(sg) = priv->rx_buf_dma;
desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
- sg, 1, DMA_FROM_DEVICE,
+ sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
@@ -913,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
}
desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
- priv->sg_tx_p, nent, DMA_TO_DEVICE,
+ priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
@@ -1385,6 +1395,143 @@ static struct uart_ops pch_uart_ops = {
.verify_port = pch_uart_verify_port
};
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct eg20t_port *up, int bits)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ for (;;) {
+ status = ioread8(up->membase + UART_LSR);
+
+ if ((status & bits) == bits)
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ unsigned int tmout;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = ioread8(up->membase + UART_MSR);
+ if (msr & UART_MSR_CTS)
+ break;
+ udelay(1);
+ touch_nmi_watchdog();
+ }
+ }
+}
+
+static void pch_console_putchar(struct uart_port *port, int ch)
+{
+ struct eg20t_port *priv =
+ container_of(port, struct eg20t_port, port);
+
+ wait_for_xmitr(priv, UART_LSR_THRE);
+ iowrite8(ch, priv->membase + PCH_UART_THR);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+pch_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct eg20t_port *priv;
+
+ unsigned long flags;
+ u8 ier;
+ int locked = 1;
+
+ priv = pch_uart_ports[co->index];
+
+ touch_nmi_watchdog();
+
+ local_irq_save(flags);
+ if (priv->port.sysrq) {
+ /* serial8250_handle_port() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&priv->port.lock);
+ } else
+ spin_lock(&priv->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = ioread8(priv->membase + UART_IER);
+
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+
+ uart_console_write(&priv->port, s, count, pch_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(priv, BOTH_EMPTY);
+ iowrite8(ier, priv->membase + UART_IER);
+
+ if (locked)
+ spin_unlock(&priv->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init pch_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= PCH_UART_NR)
+ co->index = 0;
+ port = &pch_uart_ports[co->index]->port;
+
+ if (!port || (!port->iobase && !port->membase))
+ return -ENODEV;
+
+ /* setup uartclock */
+ port->uartclk = DEFAULT_BAUD_RATE;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pch_uart_driver;
+
+static struct console pch_console = {
+ .name = PCH_UART_DRIVER_DEVICE,
+ .write = pch_console_write,
+ .device = uart_console_device,
+ .setup = pch_console_setup,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .index = -1,
+ .data = &pch_uart_driver,
+};
+
+#define PCH_CONSOLE (&pch_console)
+#else
+#define PCH_CONSOLE NULL
+#endif
+
static struct uart_driver pch_uart_driver = {
.owner = THIS_MODULE,
.driver_name = KBUILD_MODNAME,
@@ -1392,6 +1539,7 @@ static struct uart_driver pch_uart_driver = {
.major = 0,
.minor = 0,
.nr = PCH_UART_NR,
+ .cons = PCH_CONSOLE,
};
static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
@@ -1418,7 +1566,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
if (!rxbuf)
goto init_port_free_txbuf;
- base_baud = 1843200; /* 1.8432MHz */
+ base_baud = DEFAULT_BAUD_RATE;
/* quirk for CM-iTC board */
board_name = dmi_get_system_info(DMI_BOARD_NAME);
@@ -1468,6 +1616,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
pci_set_drvdata(pdev, priv);
pch_uart_hal_request(pdev, fifosize, base_baud);
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[board->line_no] = priv;
+#endif
ret = uart_add_one_port(&pch_uart_driver, &priv->port);
if (ret < 0)
goto init_port_hal_free;
@@ -1475,6 +1626,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
return priv;
init_port_hal_free:
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[board->line_no] = NULL;
+#endif
free_page((unsigned long)rxbuf);
init_port_free_txbuf:
kfree(priv);
@@ -1497,6 +1651,10 @@ static void pch_uart_pci_remove(struct pci_dev *pdev)
priv = (struct eg20t_port *)pci_get_drvdata(pdev);
pci_disable_msi(pdev);
+
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[priv->port.line] = NULL;
+#endif
pch_uart_exit_port(priv);
pci_disable_device(pdev);
kfree(priv);
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5acd24a27d08..e9c2dfe471a2 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -99,6 +99,9 @@ MODULE_LICENSE("GPL");
#define PMACZILOG_NAME "ttyPZ"
#endif
+#define pmz_debug(fmt, arg...) pr_debug("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_error(fmt, arg...) pr_err("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_info(fmt, arg...) pr_info("ttyPZ%d: " fmt, uap->port.line, ## arg)
/*
* For the sake of early serial console, we can do a pre-probe
@@ -106,7 +109,6 @@ MODULE_LICENSE("GPL");
*/
static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS];
static int pmz_ports_count;
-static DEFINE_MUTEX(pmz_irq_mutex);
static struct uart_driver pmz_uart_reg = {
.owner = THIS_MODULE,
@@ -126,9 +128,6 @@ static void pmz_load_zsregs(struct uart_pmac_port *uap, u8 *regs)
{
int i;
- if (ZS_IS_ASLEEP(uap))
- return;
-
/* Let pending transmits finish. */
for (i = 0; i < 1000; i++) {
unsigned char stat = read_zsreg(uap, R1);
@@ -216,32 +215,24 @@ static void pmz_maybe_update_regs(struct uart_pmac_port *uap)
}
}
+static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable)
+{
+ if (enable) {
+ uap->curregs[1] |= INT_ALL_Rx | TxINT_ENAB;
+ if (!ZS_IS_EXTCLK(uap))
+ uap->curregs[1] |= EXT_INT_ENAB;
+ } else {
+ uap->curregs[1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ }
+ write_zsreg(uap, R1, uap->curregs[1]);
+}
+
static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_struct *tty = NULL;
unsigned char ch, r1, drop, error, flag;
int loops = 0;
- /* The interrupt can be enabled when the port isn't open, typically
- * that happens when using one port is open and the other closed (stale
- * interrupt) or when one port is used as a console.
- */
- if (!ZS_IS_OPEN(uap)) {
- pmz_debug("pmz: draining input\n");
- /* Port is closed, drain input data */
- for (;;) {
- if ((++loops) > 1000)
- goto flood;
- (void)read_zsreg(uap, R1);
- write_zsreg(uap, R0, ERR_RES);
- (void)read_zsdata(uap);
- ch = read_zsreg(uap, R0);
- if (!(ch & Rx_CH_AV))
- break;
- }
- return NULL;
- }
-
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
WARN_ON(1);
@@ -339,9 +330,7 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
return tty;
flood:
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
- zssync(uap);
+ pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
return tty;
}
@@ -383,8 +372,6 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap)
{
struct circ_buf *xmit;
- if (ZS_IS_ASLEEP(uap))
- return;
if (ZS_IS_CONS(uap)) {
unsigned char status = read_zsreg(uap, R0);
@@ -481,6 +468,10 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
/* Channel A */
tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+ if (!ZS_IS_OPEN(uap_a)) {
+ pmz_debug("ChanA interrupt while open !\n");
+ goto skip_a;
+ }
write_zsreg(uap_a, R0, RES_H_IUS);
zssync(uap_a);
if (r3 & CHAEXT)
@@ -491,16 +482,21 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_transmit_chars(uap_a);
rc = IRQ_HANDLED;
}
+ skip_a:
spin_unlock(&uap_a->port.lock);
if (tty != NULL)
tty_flip_buffer_push(tty);
- if (uap_b->node == NULL)
+ if (!uap_b)
goto out;
spin_lock(&uap_b->port.lock);
tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ if (!ZS_IS_OPEN(uap_a)) {
+ pmz_debug("ChanB interrupt while open !\n");
+ goto skip_b;
+ }
write_zsreg(uap_b, R0, RES_H_IUS);
zssync(uap_b);
if (r3 & CHBEXT)
@@ -511,14 +507,12 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_transmit_chars(uap_b);
rc = IRQ_HANDLED;
}
+ skip_b:
spin_unlock(&uap_b->port.lock);
if (tty != NULL)
tty_flip_buffer_push(tty);
out:
-#ifdef DEBUG_HARD
- pmz_debug("irq done.\n");
-#endif
return rc;
}
@@ -543,12 +537,8 @@ static inline u8 pmz_peek_status(struct uart_pmac_port *uap)
*/
static unsigned int pmz_tx_empty(struct uart_port *port)
{
- struct uart_pmac_port *uap = to_pmz(port);
unsigned char status;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return TIOCSER_TEMT;
-
status = pmz_peek_status(to_pmz(port));
if (status & Tx_BUF_EMP)
return TIOCSER_TEMT;
@@ -570,8 +560,7 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (ZS_IS_IRDA(uap))
return;
/* We get called during boot with a port not up yet */
- if (ZS_IS_ASLEEP(uap) ||
- !(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
+ if (!(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
return;
set_bits = clear_bits = 0;
@@ -590,8 +579,7 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* NOTE: Not subject to 'transmitter active' rule. */
uap->curregs[R5] |= set_bits;
uap->curregs[R5] &= ~clear_bits;
- if (ZS_IS_ASLEEP(uap))
- return;
+
write_zsreg(uap, R5, uap->curregs[R5]);
pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n",
set_bits, clear_bits, uap->curregs[R5]);
@@ -609,9 +597,6 @@ static unsigned int pmz_get_mctrl(struct uart_port *port)
unsigned char status;
unsigned int ret;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return 0;
-
status = read_zsreg(uap, R0);
ret = 0;
@@ -649,9 +634,6 @@ static void pmz_start_tx(struct uart_port *port)
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return;
-
status = read_zsreg(uap, R0);
/* TX busy? Just wait for the TX done interrupt. */
@@ -690,9 +672,6 @@ static void pmz_stop_rx(struct uart_port *port)
{
struct uart_pmac_port *uap = to_pmz(port);
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return;
-
pmz_debug("pmz: stop_rx()()\n");
/* Disable all RX interrupts. */
@@ -711,14 +690,12 @@ static void pmz_enable_ms(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned char new_reg;
- if (ZS_IS_IRDA(uap) || uap->node == NULL)
+ if (ZS_IS_IRDA(uap))
return;
new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
if (new_reg != uap->curregs[R15]) {
uap->curregs[R15] = new_reg;
- if (ZS_IS_ASLEEP(uap))
- return;
/* NOTE: Not subject to 'transmitter active' rule. */
write_zsreg(uap, R15, uap->curregs[R15]);
}
@@ -734,8 +711,6 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
unsigned char set_bits, clear_bits, new_reg;
unsigned long flags;
- if (uap->node == NULL)
- return;
set_bits = clear_bits = 0;
if (break_state)
@@ -748,12 +723,6 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != uap->curregs[R5]) {
uap->curregs[R5] = new_reg;
-
- /* NOTE: Not subject to 'transmitter active' rule. */
- if (ZS_IS_ASLEEP(uap)) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- }
write_zsreg(uap, R5, uap->curregs[R5]);
}
@@ -927,14 +896,21 @@ static int __pmz_startup(struct uart_pmac_port *uap)
static void pmz_irda_reset(struct uart_pmac_port *uap)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
uap->curregs[R5] |= DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- mdelay(110);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ msleep(110);
+
+ spin_lock_irqsave(&uap->port.lock, flags);
uap->curregs[R5] &= ~DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- mdelay(10);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ msleep(10);
}
/*
@@ -949,13 +925,6 @@ static int pmz_startup(struct uart_port *port)
pmz_debug("pmz: startup()\n");
- if (ZS_IS_ASLEEP(uap))
- return -EAGAIN;
- if (uap->node == NULL)
- return -ENODEV;
-
- mutex_lock(&pmz_irq_mutex);
-
uap->flags |= PMACZILOG_FLAG_IS_OPEN;
/* A console is never powered down. Else, power up and
@@ -966,18 +935,14 @@ static int pmz_startup(struct uart_port *port)
pwr_delay = __pmz_startup(uap);
spin_unlock_irqrestore(&port->lock, flags);
}
-
- pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
+ sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
- "SCC", uap)) {
+ uap->irq_name, uap)) {
pmz_error("Unable to register zs interrupt handler.\n");
pmz_set_scc_power(uap, 0);
- mutex_unlock(&pmz_irq_mutex);
return -ENXIO;
}
- mutex_unlock(&pmz_irq_mutex);
-
/* Right now, we deal with delay by blocking here, I'll be
* smarter later on
*/
@@ -990,12 +955,9 @@ static int pmz_startup(struct uart_port *port)
if (ZS_IS_IRDA(uap))
pmz_irda_reset(uap);
- /* Enable interrupts emission from the chip */
+ /* Enable interrupt requests for the channel */
spin_lock_irqsave(&port->lock, flags);
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
+ pmz_interrupt_control(uap, 1);
spin_unlock_irqrestore(&port->lock, flags);
pmz_debug("pmz: startup() done.\n");
@@ -1010,49 +972,35 @@ static void pmz_shutdown(struct uart_port *port)
pmz_debug("pmz: shutdown()\n");
- if (uap->node == NULL)
- return;
-
- mutex_lock(&pmz_irq_mutex);
-
- /* Release interrupt handler */
- free_irq(uap->port.irq, uap);
-
spin_lock_irqsave(&port->lock, flags);
- uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
+ /* Disable interrupt requests for the channel */
+ pmz_interrupt_control(uap, 0);
- if (!ZS_IS_OPEN(uap->mate))
- pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
+ if (!ZS_IS_CONS(uap)) {
+ /* Disable receiver and transmitter */
+ uap->curregs[R3] &= ~RxENABLE;
+ uap->curregs[R5] &= ~TxENABLE;
- /* Disable interrupts */
- if (!ZS_IS_ASLEEP(uap)) {
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
- zssync(uap);
+ /* Disable break assertion */
+ uap->curregs[R5] &= ~SND_BRK;
+ pmz_maybe_update_regs(uap);
}
- if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
- spin_unlock_irqrestore(&port->lock, flags);
- mutex_unlock(&pmz_irq_mutex);
- return;
- }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Release interrupt handler */
+ free_irq(uap->port.irq, uap);
- /* Disable receiver and transmitter. */
- uap->curregs[R3] &= ~RxENABLE;
- uap->curregs[R5] &= ~TxENABLE;
+ spin_lock_irqsave(&port->lock, flags);
- /* Disable all interrupts and BRK assertion. */
- uap->curregs[R5] &= ~SND_BRK;
- pmz_maybe_update_regs(uap);
+ uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
- /* Shut the chip down */
- pmz_set_scc_power(uap, 0);
+ if (!ZS_IS_CONS(uap))
+ pmz_set_scc_power(uap, 0); /* Shut the chip down */
spin_unlock_irqrestore(&port->lock, flags);
- mutex_unlock(&pmz_irq_mutex);
-
pmz_debug("pmz: shutdown() done.\n");
}
@@ -1300,9 +1248,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
pmz_debug("pmz: set_termios()\n");
- if (ZS_IS_ASLEEP(uap))
- return;
-
memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
/* XXX Check which revs of machines actually allow 1 and 4Mb speeds
@@ -1352,19 +1297,15 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
spin_lock_irqsave(&port->lock, flags);
/* Disable IRQs on the port */
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
+ pmz_interrupt_control(uap, 0);
/* Setup new port configuration */
__pmz_set_termios(port, termios, old);
/* Re-enable IRQs on the port */
- if (ZS_IS_OPEN(uap)) {
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
- }
+ if (ZS_IS_OPEN(uap))
+ pmz_interrupt_control(uap, 1);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1604,25 +1545,34 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
*/
static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
+ struct uart_pmac_port *uap;
int i;
/* Iterate the pmz_ports array to find a matching entry
*/
for (i = 0; i < MAX_ZS_PORTS; i++)
- if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
- struct uart_pmac_port *uap = &pmz_ports[i];
-
- uap->dev = mdev;
- dev_set_drvdata(&mdev->ofdev.dev, uap);
- if (macio_request_resources(uap->dev, "pmac_zilog"))
- printk(KERN_WARNING "%s: Failed to request resource"
- ", port still active\n",
- uap->node->name);
- else
- uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
- return 0;
- }
- return -ENODEV;
+ if (pmz_ports[i].node == mdev->ofdev.dev.of_node)
+ break;
+ if (i >= MAX_ZS_PORTS)
+ return -ENODEV;
+
+
+ uap = &pmz_ports[i];
+ uap->dev = mdev;
+ uap->port.dev = &mdev->ofdev.dev;
+ dev_set_drvdata(&mdev->ofdev.dev, uap);
+
+ /* We still activate the port even when failing to request resources
+ * to work around bugs in ancient Apple device-trees
+ */
+ if (macio_request_resources(uap->dev, "pmac_zilog"))
+ printk(KERN_WARNING "%s: Failed to request resource"
+ ", port still active\n",
+ uap->node->name);
+ else
+ uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
+
+ return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
/*
@@ -1636,12 +1586,15 @@ static int pmz_detach(struct macio_dev *mdev)
if (!uap)
return -ENODEV;
+ uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) {
macio_release_resources(uap->dev);
uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED;
}
dev_set_drvdata(&mdev->ofdev.dev, NULL);
uap->dev = NULL;
+ uap->port.dev = NULL;
return 0;
}
@@ -1650,59 +1603,13 @@ static int pmz_detach(struct macio_dev *mdev)
static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
- struct uart_state *state;
- unsigned long flags;
if (uap == NULL) {
printk("HRM... pmz_suspend with NULL uap\n");
return 0;
}
- if (pm_state.event == mdev->ofdev.dev.power.power_state.event)
- return 0;
-
- pmz_debug("suspend, switching to state %d\n", pm_state.event);
-
- state = pmz_uart_reg.state + uap->port.line;
-
- mutex_lock(&pmz_irq_mutex);
- mutex_lock(&state->port.mutex);
-
- spin_lock_irqsave(&uap->port.lock, flags);
-
- if (ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)) {
- /* Disable receiver and transmitter. */
- uap->curregs[R3] &= ~RxENABLE;
- uap->curregs[R5] &= ~TxENABLE;
-
- /* Disable all interrupts and BRK assertion. */
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- uap->curregs[R5] &= ~SND_BRK;
- pmz_load_zsregs(uap, uap->curregs);
- uap->flags |= PMACZILOG_FLAG_IS_ASLEEP;
- mb();
- }
-
- spin_unlock_irqrestore(&uap->port.lock, flags);
-
- if (ZS_IS_OPEN(uap) || ZS_IS_OPEN(uap->mate))
- if (ZS_IS_ASLEEP(uap->mate) && ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
- pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
- disable_irq(uap->port.irq);
- }
-
- if (ZS_IS_CONS(uap))
- uap->port.cons->flags &= ~CON_ENABLED;
-
- /* Shut the chip down */
- pmz_set_scc_power(uap, 0);
-
- mutex_unlock(&state->port.mutex);
- mutex_unlock(&pmz_irq_mutex);
-
- pmz_debug("suspend, switching complete\n");
-
- mdev->ofdev.dev.power.power_state = pm_state;
+ uart_suspend_port(&pmz_uart_reg, &uap->port);
return 0;
}
@@ -1711,76 +1618,20 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
static int pmz_resume(struct macio_dev *mdev)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
- struct uart_state *state;
- unsigned long flags;
- int pwr_delay = 0;
if (uap == NULL)
return 0;
- if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
- return 0;
-
- pmz_debug("resume, switching to state 0\n");
-
- state = pmz_uart_reg.state + uap->port.line;
-
- mutex_lock(&pmz_irq_mutex);
- mutex_lock(&state->port.mutex);
-
- spin_lock_irqsave(&uap->port.lock, flags);
- if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) {
- spin_unlock_irqrestore(&uap->port.lock, flags);
- goto bail;
- }
- pwr_delay = __pmz_startup(uap);
-
- /* Take care of config that may have changed while asleep */
- __pmz_set_termios(&uap->port, &uap->termios_cache, NULL);
-
- if (ZS_IS_OPEN(uap)) {
- /* Enable interrupts */
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
- }
-
- spin_unlock_irqrestore(&uap->port.lock, flags);
-
- if (ZS_IS_CONS(uap))
- uap->port.cons->flags |= CON_ENABLED;
-
- /* Re-enable IRQ on the controller */
- if (ZS_IS_OPEN(uap) && !ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
- pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
- enable_irq(uap->port.irq);
- }
-
- bail:
- mutex_unlock(&state->port.mutex);
- mutex_unlock(&pmz_irq_mutex);
-
- /* Right now, we deal with delay by blocking here, I'll be
- * smarter later on
- */
- if (pwr_delay != 0) {
- pmz_debug("pmz: delaying %d ms\n", pwr_delay);
- msleep(pwr_delay);
- }
-
- pmz_debug("resume, switching complete\n");
-
- mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
+ uart_resume_port(&pmz_uart_reg, &uap->port);
return 0;
}
/*
* Probe all ports in the system and build the ports array, we register
- * with the serial layer at this point, the macio-type probing is only
- * used later to "attach" to the sysfs tree so we get power management
- * events
+ * with the serial layer later, so we get a proper struct device which
+ * allows the tty to attach properly. This is later than it used to be
+ * but the tty layer really wants it that way.
*/
static int __init pmz_probe(void)
{
@@ -1816,8 +1667,10 @@ static int __init pmz_probe(void)
/*
* Fill basic fields in the port structures
*/
- pmz_ports[count].mate = &pmz_ports[count+1];
- pmz_ports[count+1].mate = &pmz_ports[count];
+ if (node_b != NULL) {
+ pmz_ports[count].mate = &pmz_ports[count+1];
+ pmz_ports[count+1].mate = &pmz_ports[count];
+ }
pmz_ports[count].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
pmz_ports[count].node = node_a;
pmz_ports[count+1].node = node_b;
@@ -1855,8 +1708,8 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
struct resource *r_ports;
int irq;
- r_ports = platform_get_resource(uap->node, IORESOURCE_MEM, 0);
- irq = platform_get_irq(uap->node, 0);
+ r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(uap->pdev, 0);
if (!r_ports || !irq)
return -ENODEV;
@@ -1885,19 +1738,19 @@ static int __init pmz_probe(void)
pmz_ports_count = 0;
- pmz_ports[0].mate = &pmz_ports[1];
pmz_ports[0].port.line = 0;
pmz_ports[0].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
- pmz_ports[0].node = &scc_a_pdev;
+ pmz_ports[0].pdev = &scc_a_pdev;
err = pmz_init_port(&pmz_ports[0]);
if (err)
return err;
pmz_ports_count++;
+ pmz_ports[0].mate = &pmz_ports[1];
pmz_ports[1].mate = &pmz_ports[0];
pmz_ports[1].port.line = 1;
pmz_ports[1].flags = 0;
- pmz_ports[1].node = &scc_b_pdev;
+ pmz_ports[1].pdev = &scc_b_pdev;
err = pmz_init_port(&pmz_ports[1]);
if (err)
return err;
@@ -1913,16 +1766,35 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
static int __init pmz_attach(struct platform_device *pdev)
{
+ struct uart_pmac_port *uap;
int i;
+ /* Iterate the pmz_ports array to find a matching entry */
for (i = 0; i < pmz_ports_count; i++)
- if (pmz_ports[i].node == pdev)
- return 0;
- return -ENODEV;
+ if (pmz_ports[i].pdev == pdev)
+ break;
+ if (i >= pmz_ports_count)
+ return -ENODEV;
+
+ uap = &pmz_ports[i];
+ uap->port.dev = &pdev->dev;
+ platform_set_drvdata(pdev, uap);
+
+ return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
static int __exit pmz_detach(struct platform_device *pdev)
{
+ struct uart_pmac_port *uap = platform_get_drvdata(pdev);
+
+ if (!uap)
+ return -ENODEV;
+
+ uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
+ platform_set_drvdata(pdev, NULL);
+ uap->port.dev = NULL;
+
return 0;
}
@@ -1954,38 +1826,13 @@ static struct console pmz_console = {
*/
static int __init pmz_register(void)
{
- int i, rc;
-
pmz_uart_reg.nr = pmz_ports_count;
pmz_uart_reg.cons = PMACZILOG_CONSOLE;
/*
* Register this driver with the serial core
*/
- rc = uart_register_driver(&pmz_uart_reg);
- if (rc)
- return rc;
-
- /*
- * Register each port with the serial core
- */
- for (i = 0; i < pmz_ports_count; i++) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- /* NULL node may happen on wallstreet */
- if (uport->node != NULL)
- rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
- if (rc)
- goto err_out;
- }
-
- return 0;
-err_out:
- while (i-- > 0) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
- }
- uart_unregister_driver(&pmz_uart_reg);
- return rc;
+ return uart_register_driver(&pmz_uart_reg);
}
#ifdef CONFIG_PPC_PMAC
@@ -2084,10 +1931,13 @@ static void __exit exit_pmz(void)
for (i = 0; i < pmz_ports_count; i++) {
struct uart_pmac_port *uport = &pmz_ports[i];
- if (uport->node != NULL) {
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
+#ifdef CONFIG_PPC_PMAC
+ if (uport->node != NULL)
pmz_dispose_port(uport);
- }
+#else
+ if (uport->pdev != NULL)
+ pmz_dispose_port(uport);
+#endif
}
/* Unregister UART driver */
uart_unregister_driver(&pmz_uart_reg);
@@ -2114,8 +1964,6 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
struct uart_pmac_port *uap = &pmz_ports[con->index];
unsigned long flags;
- if (ZS_IS_ASLEEP(uap))
- return;
spin_lock_irqsave(&uap->port.lock, flags);
/* Turn of interrupts and enable the transmitter. */
@@ -2160,8 +2008,13 @@ static int __init pmz_console_setup(struct console *co, char *options)
if (co->index >= pmz_ports_count)
co->index = 0;
uap = &pmz_ports[co->index];
+#ifdef CONFIG_PPC_PMAC
if (uap->node == NULL)
return -ENODEV;
+#else
+ if (uap->pdev == NULL)
+ return -ENODEV;
+#endif
port = &uap->port;
/*
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fbb1b20..3483242ee3e0 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -1,16 +1,6 @@
#ifndef __PMAC_ZILOG_H__
#define __PMAC_ZILOG_H__
-#ifdef CONFIG_PPC_PMAC
-#define pmz_debug(fmt, arg...) dev_dbg(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_error(fmt, arg...) dev_err(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_info(fmt, arg...) dev_info(&uap->dev->ofdev.dev, fmt, ## arg)
-#else
-#define pmz_debug(fmt, arg...) dev_dbg(&uap->node->dev, fmt, ## arg)
-#define pmz_error(fmt, arg...) dev_err(&uap->node->dev, fmt, ## arg)
-#define pmz_info(fmt, arg...) dev_info(&uap->node->dev, fmt, ## arg)
-#endif
-
/*
* At most 2 ESCCs with 2 ports each
*/
@@ -35,7 +25,7 @@ struct uart_pmac_port {
*/
struct device_node *node;
#else
- struct platform_device *node;
+ struct platform_device *pdev;
#endif
/* Port type as obtained from device tree (IRDA, modem, ...) */
@@ -50,14 +40,11 @@ struct uart_pmac_port {
#define PMACZILOG_FLAG_REGS_HELD 0x00000010
#define PMACZILOG_FLAG_TX_STOPPED 0x00000020
#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040
-#define PMACZILOG_FLAG_ENABLED 0x00000080
#define PMACZILOG_FLAG_IS_IRDA 0x00000100
#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200
#define PMACZILOG_FLAG_HAS_DMA 0x00000400
#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800
-#define PMACZILOG_FLAG_IS_ASLEEP 0x00001000
#define PMACZILOG_FLAG_IS_OPEN 0x00002000
-#define PMACZILOG_FLAG_IS_IRQ_ON 0x00004000
#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000
#define PMACZILOG_FLAG_BREAK 0x00010000
@@ -74,6 +61,8 @@ struct uart_pmac_port {
volatile struct dbdma_regs __iomem *rx_dma_regs;
#endif
+ unsigned char irq_name[8];
+
struct ktermios termios_cache;
};
@@ -388,9 +377,7 @@ static inline void zssync(struct uart_pmac_port *port)
#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
#define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
-#define ZS_IS_ASLEEP(UP) ((UP)->flags & PMACZILOG_FLAG_IS_ASLEEP)
#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
-#define ZS_IS_IRQ_ON(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRQ_ON)
#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
#endif /* __PMAC_ZILOG_H__ */
diff --git a/drivers/tty/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
deleted file mode 100644
index b1d7e7c1849d..000000000000
--- a/drivers/tty/serial/s3c2410.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Driver for Samsung S3C2410 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2410_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2410_UCON_UCLK;
- else
- ucon &= ~S3C2410_UCON_UCLK;
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-static int s3c2410_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
- clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
-
- return 0;
-}
-
-static int s3c2410_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- dbg("s3c2410_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- wr_regl(port, S3C2410_UCON, cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2410_uart_inf = {
- .name = "Samsung S3C2410 UART",
- .type = PORT_S3C2410,
- .fifosize = 16,
- .rx_fifomask = S3C2410_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2410_UFSTAT_RXFULL,
- .tx_fifofull = S3C2410_UFSTAT_TXFULL,
- .tx_fifomask = S3C2410_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2410_serial_getsource,
- .set_clksrc = s3c2410_serial_setsource,
- .reset_port = s3c2410_serial_resetport,
-};
-
-static int s3c2410_serial_probe(struct platform_device *dev)
-{
- return s3c24xx_serial_probe(dev, &s3c2410_uart_inf);
-}
-
-static struct platform_driver s3c2410_serial_driver = {
- .probe = s3c2410_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2410-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2410_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
-}
-
-static void __exit s3c2410_serial_exit(void)
-{
- platform_driver_unregister(&s3c2410_serial_driver);
-}
-
-module_init(s3c2410_serial_init);
-module_exit(s3c2410_serial_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver");
-MODULE_ALIAS("platform:s3c2410-uart");
diff --git a/drivers/tty/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
deleted file mode 100644
index 2234bf9ced45..000000000000
--- a/drivers/tty/serial/s3c2412.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Driver for Samsung S3C2412 and S3C2413 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2412_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- ucon &= ~S3C2412_UCON_CLKMASK;
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2440_UCON_UCLK;
- else if (strcmp(clk->name, "pclk") == 0)
- ucon |= S3C2440_UCON_PCLK;
- else if (strcmp(clk->name, "usysclk") == 0)
- ucon |= S3C2412_UCON_USYSCLK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c2412_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- switch (ucon & S3C2412_UCON_CLKMASK) {
- case S3C2412_UCON_UCLK:
- clk->divisor = 1;
- clk->name = "uclk";
- break;
-
- case S3C2412_UCON_PCLK:
- case S3C2412_UCON_PCLK2:
- clk->divisor = 1;
- clk->name = "pclk";
- break;
-
- case S3C2412_UCON_USYSCLK:
- clk->divisor = 1;
- clk->name = "usysclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c2412_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("%s: port=%p (%08lx), cfg=%p\n",
- __func__, port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= S3C2412_UCON_CLKMASK;
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2412_uart_inf = {
- .name = "Samsung S3C2412 UART",
- .type = PORT_S3C2412,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2412_serial_getsource,
- .set_clksrc = s3c2412_serial_setsource,
- .reset_port = s3c2412_serial_resetport,
-};
-
-/* device management */
-
-static int s3c2412_serial_probe(struct platform_device *dev)
-{
- dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
-}
-
-static struct platform_driver s3c2412_serial_driver = {
- .probe = s3c2412_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2412-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static inline int s3c2412_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
-}
-
-static inline void s3c2412_serial_exit(void)
-{
- platform_driver_unregister(&s3c2412_serial_driver);
-}
-
-module_init(s3c2412_serial_init);
-module_exit(s3c2412_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C2412,S3C2413 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c2412-uart");
diff --git a/drivers/tty/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
deleted file mode 100644
index 1d0c324b813f..000000000000
--- a/drivers/tty/serial/s3c2440.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Driver for Samsung S3C2440 and S3C2442 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-
-static int s3c2440_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- /* todo - proper fclk<>nonfclk switch. */
-
- ucon &= ~S3C2440_UCON_CLKMASK;
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2440_UCON_UCLK;
- else if (strcmp(clk->name, "pclk") == 0)
- ucon |= S3C2440_UCON_PCLK;
- else if (strcmp(clk->name, "fclk") == 0)
- ucon |= S3C2440_UCON_FCLK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c2440_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
- unsigned long ucon0, ucon1, ucon2;
-
- switch (ucon & S3C2440_UCON_CLKMASK) {
- case S3C2440_UCON_UCLK:
- clk->divisor = 1;
- clk->name = "uclk";
- break;
-
- case S3C2440_UCON_PCLK:
- case S3C2440_UCON_PCLK2:
- clk->divisor = 1;
- clk->name = "pclk";
- break;
-
- case S3C2440_UCON_FCLK:
- /* the fun of calculating the uart divisors on
- * the s3c2440 */
-
- ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
- ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
- ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
-
- printk("ucons: %08lx, %08lx, %08lx\n", ucon0, ucon1, ucon2);
-
- ucon0 &= S3C2440_UCON0_DIVMASK;
- ucon1 &= S3C2440_UCON1_DIVMASK;
- ucon2 &= S3C2440_UCON2_DIVMASK;
-
- if (ucon0 != 0) {
- clk->divisor = ucon0 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 6;
- } else if (ucon1 != 0) {
- clk->divisor = ucon1 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 21;
- } else if (ucon2 != 0) {
- clk->divisor = ucon2 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 36;
- } else {
- /* manual calims 44, seems to be 9 */
- clk->divisor = 9;
- }
-
- clk->name = "fclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c2440_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("s3c2440_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= (S3C2440_UCON0_DIVMASK | (3<<10));
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2440_uart_inf = {
- .name = "Samsung S3C2440 UART",
- .type = PORT_S3C2440,
- .fifosize = 64,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2440_serial_getsource,
- .set_clksrc = s3c2440_serial_setsource,
- .reset_port = s3c2440_serial_resetport,
-};
-
-/* device management */
-
-static int s3c2440_serial_probe(struct platform_device *dev)
-{
- dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
-}
-
-static struct platform_driver s3c2440_serial_driver = {
- .probe = s3c2440_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2440-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2440_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
-}
-
-static void __exit s3c2440_serial_exit(void)
-{
- platform_driver_unregister(&s3c2440_serial_driver);
-}
-
-module_init(s3c2440_serial_init);
-module_exit(s3c2440_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/tty/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
deleted file mode 100644
index e2f6913d84d5..000000000000
--- a/drivers/tty/serial/s3c6400.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Driver for Samsung S3C6400 and S3C6410 SoC onboard UARTs.
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-
-#include "samsung.h"
-
-static int s3c6400_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk0") == 0) {
- ucon &= ~S3C6400_UCON_CLKMASK;
- ucon |= S3C6400_UCON_UCLK0;
- } else if (strcmp(clk->name, "uclk1") == 0)
- ucon |= S3C6400_UCON_UCLK1;
- else if (strcmp(clk->name, "pclk") == 0) {
- /* See notes about transitioning from UCLK to PCLK */
- ucon &= ~S3C6400_UCON_UCLK0;
- } else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c6400_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- u32 ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
-
- switch (ucon & S3C6400_UCON_CLKMASK) {
- case S3C6400_UCON_UCLK0:
- clk->name = "uclk0";
- break;
-
- case S3C6400_UCON_UCLK1:
- clk->name = "uclk1";
- break;
-
- case S3C6400_UCON_PCLK:
- case S3C6400_UCON_PCLK2:
- clk->name = "pclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c6400_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("s3c6400_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= S3C6400_UCON_CLKMASK;
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c6400_uart_inf = {
- .name = "Samsung S3C6400 UART",
- .type = PORT_S3C6400,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c6400_serial_getsource,
- .set_clksrc = s3c6400_serial_setsource,
- .reset_port = s3c6400_serial_resetport,
-};
-
-/* device management */
-
-static int s3c6400_serial_probe(struct platform_device *dev)
-{
- dbg("s3c6400_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c6400_uart_inf);
-}
-
-static struct platform_driver s3c6400_serial_driver = {
- .probe = s3c6400_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c6400-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c6400_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
-}
-
-static void __exit s3c6400_serial_exit(void)
-{
- platform_driver_unregister(&s3c6400_serial_driver);
-}
-
-module_init(s3c6400_serial_init);
-module_exit(s3c6400_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C6400,S3C6410 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c6400-uart");
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
deleted file mode 100644
index 8b0b888a1b76..000000000000
--- a/drivers/tty/serial/s5pv210.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on drivers/serial/s3c6400.c
- *
- * Driver for Samsung S5PV210 SoC UARTs.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/delay.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <plat/regs-serial.h>
-#include "samsung.h"
-
-static int s5pv210_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- struct s3c2410_uartcfg *cfg = port->dev->platform_data;
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (cfg->flags & NO_NEED_CHECK_CLKSRC)
- return 0;
-
- if (strcmp(clk->name, "pclk") == 0)
- ucon &= ~S5PV210_UCON_CLKMASK;
- else if (strcmp(clk->name, "uclk1") == 0)
- ucon |= S5PV210_UCON_CLKMASK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s5pv210_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- struct s3c2410_uartcfg *cfg = port->dev->platform_data;
- u32 ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
-
- if (cfg->flags & NO_NEED_CHECK_CLKSRC)
- return 0;
-
- switch (ucon & S5PV210_UCON_CLKMASK) {
- case S5PV210_UCON_PCLK:
- clk->name = "pclk";
- break;
- case S5PV210_UCON_UCLK:
- clk->name = "uclk1";
- break;
- }
-
- return 0;
-}
-
-static int s5pv210_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- ucon &= S5PV210_UCON_CLKMASK;
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- /* It is need to delay When reset FIFO register */
- udelay(1);
-
- return 0;
-}
-
-#define S5PV210_UART_DEFAULT_INFO(fifo_size) \
- .name = "Samsung S5PV210 UART0", \
- .type = PORT_S3C6400, \
- .fifosize = fifo_size, \
- .has_divslot = 1, \
- .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
- .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
- .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
- .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
- .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
- .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
- .get_clksrc = s5pv210_serial_getsource, \
- .set_clksrc = s5pv210_serial_setsource, \
- .reset_port = s5pv210_serial_resetport
-
-static struct s3c24xx_uart_info s5p_port_fifo256 = {
- S5PV210_UART_DEFAULT_INFO(256),
-};
-
-static struct s3c24xx_uart_info s5p_port_fifo64 = {
- S5PV210_UART_DEFAULT_INFO(64),
-};
-
-static struct s3c24xx_uart_info s5p_port_fifo16 = {
- S5PV210_UART_DEFAULT_INFO(16),
-};
-
-static struct s3c24xx_uart_info *s5p_uart_inf[] = {
- [0] = &s5p_port_fifo256,
- [1] = &s5p_port_fifo64,
- [2] = &s5p_port_fifo16,
- [3] = &s5p_port_fifo16,
-};
-
-/* device management */
-static int s5p_serial_probe(struct platform_device *pdev)
-{
- return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
-}
-
-static struct platform_driver s5p_serial_driver = {
- .probe = s5p_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s5pv210-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s5p_serial_init(void)
-{
- return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
-}
-
-static void __exit s5p_serial_exit(void)
-{
- platform_driver_unregister(&s5p_serial_driver);
-}
-
-module_init(s5p_serial_init);
-module_exit(s5p_serial_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s5pv210-uart");
-MODULE_DESCRIPTION("Samsung S5PV210 UART Driver support");
-MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index b31f1c3a2c4c..f96f37b5fec6 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
#include <asm/irq.h>
@@ -49,6 +50,7 @@
#include <mach/map.h>
#include <plat/regs-serial.h>
+#include <plat/clock.h>
#include "samsung.h"
@@ -190,10 +192,13 @@ static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *p
static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport;
+
if (port->dev == NULL)
return NULL;
- return (struct s3c2410_uartcfg *)port->dev->platform_data;
+ ourport = container_of(port, struct s3c24xx_uart_port, port);
+ return ourport->cfg;
}
static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
@@ -202,7 +207,7 @@ static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
if (ufstat & info->rx_fifofull)
- return info->fifosize;
+ return ourport->port.fifosize;
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
@@ -555,154 +560,98 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
*
*/
+#define MAX_CLK_NAME_LENGTH 15
-#define MAX_CLKS (8)
-
-static struct s3c24xx_uart_clksrc tmp_clksrc = {
- .name = "pclk",
- .min_baud = 0,
- .max_baud = 0,
- .divisor = 1,
-};
-
-static inline int
-s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
+static inline int s3c24xx_serial_getsource(struct uart_port *port)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned int ucon;
- return (info->get_clksrc)(port, c);
-}
-
-static inline int
-s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
-{
- struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ if (info->num_clks == 1)
+ return 0;
- return (info->set_clksrc)(port, c);
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= info->clksel_mask;
+ return ucon >> info->clksel_shift;
}
-struct baud_calc {
- struct s3c24xx_uart_clksrc *clksrc;
- unsigned int calc;
- unsigned int divslot;
- unsigned int quot;
- struct clk *src;
-};
-
-static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
- struct uart_port *port,
- struct s3c24xx_uart_clksrc *clksrc,
- unsigned int baud)
+static void s3c24xx_serial_setsource(struct uart_port *port,
+ unsigned int clk_sel)
{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned long rate;
-
- calc->src = clk_get(port->dev, clksrc->name);
- if (calc->src == NULL || IS_ERR(calc->src))
- return 0;
-
- rate = clk_get_rate(calc->src);
- rate /= clksrc->divisor;
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned int ucon;
- calc->clksrc = clksrc;
+ if (info->num_clks == 1)
+ return;
- if (ourport->info->has_divslot) {
- unsigned long div = rate / baud;
-
- /* The UDIVSLOT register on the newer UARTs allows us to
- * get a divisor adjustment of 1/16th on the baud clock.
- *
- * We don't keep the UDIVSLOT value (the 16ths we calculated
- * by not multiplying the baud by 16) as it is easy enough
- * to recalculate.
- */
-
- calc->quot = div / 16;
- calc->calc = rate / div;
- } else {
- calc->quot = (rate + (8 * baud)) / (16 * baud);
- calc->calc = (rate / (calc->quot * 16));
- }
+ ucon = rd_regl(port, S3C2410_UCON);
+ if ((ucon & info->clksel_mask) >> info->clksel_shift == clk_sel)
+ return;
- calc->quot--;
- return 1;
+ ucon &= ~info->clksel_mask;
+ ucon |= clk_sel << info->clksel_shift;
+ wr_regl(port, S3C2410_UCON, ucon);
}
-static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
- struct s3c24xx_uart_clksrc **clksrc,
- struct clk **clk,
- unsigned int baud)
+static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ unsigned int req_baud, struct clk **best_clk,
+ unsigned int *clk_num)
{
- struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
- struct s3c24xx_uart_clksrc *clkp;
- struct baud_calc res[MAX_CLKS];
- struct baud_calc *resptr, *best, *sptr;
- int i;
-
- clkp = cfg->clocks;
- best = NULL;
-
- if (cfg->clocks_size < 2) {
- if (cfg->clocks_size == 0)
- clkp = &tmp_clksrc;
-
- /* check to see if we're sourcing fclk, and if so we're
- * going to have to update the clock source
- */
-
- if (strcmp(clkp->name, "fclk") == 0) {
- struct s3c24xx_uart_clksrc src;
-
- s3c24xx_serial_getsource(port, &src);
-
- /* check that the port already using fclk, and if
- * not, then re-select fclk
+ struct s3c24xx_uart_info *info = ourport->info;
+ struct clk *clk;
+ unsigned long rate;
+ unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+ char clkname[MAX_CLK_NAME_LENGTH];
+ int calc_deviation, deviation = (1 << 30) - 1;
+
+ *best_clk = NULL;
+ clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
+ ourport->info->def_clk_sel;
+ for (cnt = 0; cnt < info->num_clks; cnt++) {
+ if (!(clk_sel & (1 << cnt)))
+ continue;
+
+ sprintf(clkname, "clk_uart_baud%d", cnt);
+ clk = clk_get(ourport->port.dev, clkname);
+ if (IS_ERR_OR_NULL(clk))
+ continue;
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ continue;
+
+ if (ourport->info->has_divslot) {
+ unsigned long div = rate / req_baud;
+
+ /* The UDIVSLOT register on the newer UARTs allows us to
+ * get a divisor adjustment of 1/16th on the baud clock.
+ *
+ * We don't keep the UDIVSLOT value (the 16ths we
+ * calculated by not multiplying the baud by 16) as it
+ * is easy enough to recalculate.
*/
- if (strcmp(src.name, clkp->name) == 0) {
- s3c24xx_serial_setsource(port, clkp);
- s3c24xx_serial_getsource(port, &src);
- }
-
- clkp->divisor = src.divisor;
- }
-
- s3c24xx_serial_calcbaud(res, port, clkp, baud);
- best = res;
- resptr = best + 1;
- } else {
- resptr = res;
-
- for (i = 0; i < cfg->clocks_size; i++, clkp++) {
- if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
- resptr++;
+ quot = div / 16;
+ baud = rate / div;
+ } else {
+ quot = (rate + (8 * req_baud)) / (16 * req_baud);
+ baud = rate / (quot * 16);
}
- }
-
- /* ok, we now need to select the best clock we found */
-
- if (!best) {
- unsigned int deviation = (1<<30)|((1<<30)-1);
- int calc_deviation;
+ quot--;
- for (sptr = res; sptr < resptr; sptr++) {
- calc_deviation = baud - sptr->calc;
- if (calc_deviation < 0)
- calc_deviation = -calc_deviation;
+ calc_deviation = req_baud - baud;
+ if (calc_deviation < 0)
+ calc_deviation = -calc_deviation;
- if (calc_deviation < deviation) {
- best = sptr;
- deviation = calc_deviation;
- }
+ if (calc_deviation < deviation) {
+ *best_clk = clk;
+ best_quot = quot;
+ *clk_num = cnt;
+ deviation = calc_deviation;
}
}
- /* store results to pass back */
-
- *clksrc = best->clksrc;
- *clk = best->src;
-
- return best->quot;
+ return best_quot;
}
/* udivslot_table[]
@@ -735,10 +684,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
{
struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- struct s3c24xx_uart_clksrc *clksrc = NULL;
struct clk *clk = NULL;
unsigned long flags;
- unsigned int baud, quot;
+ unsigned int baud, quot, clk_sel = 0;
unsigned int ulcon;
unsigned int umcon;
unsigned int udivslot = 0;
@@ -754,17 +702,16 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
*/
baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
-
+ quot = s3c24xx_serial_getclk(ourport, baud, &clk, &clk_sel);
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
- else
- quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
+ if (!clk)
+ return;
/* check to see if we need to change clock source */
- if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
- dbg("selecting clock %p\n", clk);
- s3c24xx_serial_setsource(port, clksrc);
+ if (ourport->baudclk != clk) {
+ s3c24xx_serial_setsource(port, clk_sel);
if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
clk_disable(ourport->baudclk);
@@ -773,7 +720,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
clk_enable(clk);
- ourport->clksrc = clksrc;
ourport->baudclk = clk;
ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
}
@@ -1020,16 +966,29 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
/* s3c24xx_serial_resetport
*
- * wrapper to call the specific reset for this port (reset the fifos
- * and the settings)
+ * reset the fifos and other the settings.
*/
-static inline int s3c24xx_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
+static void s3c24xx_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ unsigned int ucon_mask;
+
+ ucon_mask = info->clksel_mask;
+ if (info->type == PORT_S3C2440)
+ ucon_mask |= S3C2440_UCON0_DIVMASK;
- return (info->reset_port)(port, cfg);
+ ucon &= ucon_mask;
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+
+ /* reset both fifos */
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ /* some delay is required after fifo reset */
+ udelay(1);
}
@@ -1121,11 +1080,10 @@ static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *p
*/
static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
- struct s3c24xx_uart_info *info,
struct platform_device *platdev)
{
struct uart_port *port = &ourport->port;
- struct s3c2410_uartcfg *cfg;
+ struct s3c2410_uartcfg *cfg = ourport->cfg;
struct resource *res;
int ret;
@@ -1134,30 +1092,16 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
if (platdev == NULL)
return -ENODEV;
- cfg = s3c24xx_dev_to_cfg(&platdev->dev);
-
if (port->mapbase != 0)
return 0;
- if (cfg->hwport > CONFIG_SERIAL_SAMSUNG_UARTS) {
- printk(KERN_ERR "%s: port %d bigger than %d\n", __func__,
- cfg->hwport, CONFIG_SERIAL_SAMSUNG_UARTS);
- return -ERANGE;
- }
-
/* setup info for port */
port->dev = &platdev->dev;
- ourport->info = info;
/* Startup sequence is different for s3c64xx and higher SoC's */
if (s3c24xx_serial_has_interrupt_mask(port))
s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
- /* copy the info in from provided structure */
- ourport->port.fifosize = info->fifosize;
-
- dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
-
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
@@ -1215,43 +1159,74 @@ static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->clksrc->name);
+ return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->baudclk->name);
}
static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
+
/* Device driver serial port probe */
+static const struct of_device_id s3c24xx_uart_dt_match[];
static int probe_index;
-int s3c24xx_serial_probe(struct platform_device *dev,
- struct s3c24xx_uart_info *info)
+static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node);
+ return (struct s3c24xx_serial_drv_data *)match->data;
+ }
+#endif
+ return (struct s3c24xx_serial_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c24xx_serial_probe(struct platform_device *pdev)
{
struct s3c24xx_uart_port *ourport;
int ret;
- dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
+ dbg("s3c24xx_serial_probe(%p) %d\n", pdev, probe_index);
ourport = &s3c24xx_serial_ports[probe_index];
+
+ ourport->drv_data = s3c24xx_get_driver_data(pdev);
+ if (!ourport->drv_data) {
+ dev_err(&pdev->dev, "could not find driver data\n");
+ return -ENODEV;
+ }
+
+ ourport->info = ourport->drv_data->info;
+ ourport->cfg = (pdev->dev.platform_data) ?
+ (struct s3c2410_uartcfg *)pdev->dev.platform_data :
+ ourport->drv_data->def_cfg;
+
+ ourport->port.fifosize = (ourport->info->fifosize) ?
+ ourport->info->fifosize :
+ ourport->drv_data->fifosize[probe_index];
+
probe_index++;
dbg("%s: initialising port %p...\n", __func__, ourport);
- ret = s3c24xx_serial_init_port(ourport, info, dev);
+ ret = s3c24xx_serial_init_port(ourport, pdev);
if (ret < 0)
goto probe_err;
dbg("%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
- platform_set_drvdata(dev, &ourport->port);
+ platform_set_drvdata(pdev, &ourport->port);
- ret = device_create_file(&dev->dev, &dev_attr_clock_source);
+ ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
if (ret < 0)
- printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
+ dev_err(&pdev->dev, "failed to add clock source attr.\n");
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
- dev_err(&dev->dev, "failed to add cpufreq notifier\n");
+ dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
return 0;
@@ -1259,9 +1234,7 @@ int s3c24xx_serial_probe(struct platform_device *dev,
return ret;
}
-EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-
-int __devexit s3c24xx_serial_remove(struct platform_device *dev)
+static int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
@@ -1274,8 +1247,6 @@ int __devexit s3c24xx_serial_remove(struct platform_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
-
/* UART power management code */
#ifdef CONFIG_PM_SLEEP
static int s3c24xx_serial_suspend(struct device *dev)
@@ -1315,41 +1286,6 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
#define SERIAL_SAMSUNG_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
-int s3c24xx_serial_init(struct platform_driver *drv,
- struct s3c24xx_uart_info *info)
-{
- dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
-
- drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
-
- return platform_driver_register(drv);
-}
-
-EXPORT_SYMBOL_GPL(s3c24xx_serial_init);
-
-/* module initialisation code */
-
-static int __init s3c24xx_serial_modinit(void)
-{
- int ret;
-
- ret = uart_register_driver(&s3c24xx_uart_drv);
- if (ret < 0) {
- printk(KERN_ERR "failed to register UART driver\n");
- return -1;
- }
-
- return 0;
-}
-
-static void __exit s3c24xx_serial_modexit(void)
-{
- uart_unregister_driver(&s3c24xx_uart_drv);
-}
-
-module_init(s3c24xx_serial_modinit);
-module_exit(s3c24xx_serial_modexit);
-
/* Console code */
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
@@ -1395,12 +1331,13 @@ static void __init
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
- struct s3c24xx_uart_clksrc clksrc;
struct clk *clk;
unsigned int ulcon;
unsigned int ucon;
unsigned int ubrdiv;
unsigned long rate;
+ unsigned int clk_sel;
+ char clk_name[MAX_CLK_NAME_LENGTH];
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
@@ -1445,44 +1382,21 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
/* now calculate the baud rate */
- s3c24xx_serial_getsource(port, &clksrc);
+ clk_sel = s3c24xx_serial_getsource(port);
+ sprintf(clk_name, "clk_uart_baud%d", clk_sel);
- clk = clk_get(port->dev, clksrc.name);
+ clk = clk_get(port->dev, clk_name);
if (!IS_ERR(clk) && clk != NULL)
- rate = clk_get_rate(clk) / clksrc.divisor;
+ rate = clk_get_rate(clk);
else
rate = 1;
-
*baud = rate / (16 * (ubrdiv + 1));
dbg("calculated baud %d\n", *baud);
}
}
-/* s3c24xx_serial_init_ports
- *
- * initialise the serial ports from the machine provided initialisation
- * data.
-*/
-
-static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info **info)
-{
- struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
- struct platform_device **platdev_ptr;
- int i;
-
- dbg("s3c24xx_serial_init_ports: initialising ports...\n");
-
- platdev_ptr = s3c24xx_uart_devs;
-
- for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
- s3c24xx_serial_init_port(ptr, info[i], *platdev_ptr);
- }
-
- return 0;
-}
-
static int __init
s3c24xx_serial_console_setup(struct console *co, char *options)
{
@@ -1526,11 +1440,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-/* s3c24xx_serial_initconsole
- *
- * initialise the console from one of the uart drivers
-*/
-
static struct console s3c24xx_serial_console = {
.name = S3C24XX_SERIAL_NAME,
.device = uart_console_device,
@@ -1540,34 +1449,250 @@ static struct console s3c24xx_serial_console = {
.setup = s3c24xx_serial_console_setup,
.data = &s3c24xx_uart_drv,
};
+#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
-int s3c24xx_serial_initconsole(struct platform_driver *drv,
- struct s3c24xx_uart_info **info)
+#ifdef CONFIG_CPU_S3C2410
+static struct s3c24xx_serial_drv_data s3c2410_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2410 UART",
+ .type = PORT_S3C2410,
+ .fifosize = 16,
+ .rx_fifomask = S3C2410_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2410_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2410_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2410_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 2,
+ .clksel_mask = S3C2410_UCON_CLKMASK,
+ .clksel_shift = S3C2410_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2410_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2410_serial_drv_data)
+#else
+#define S3C2410_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
-{
- struct platform_device *dev = s3c24xx_uart_devs[0];
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c24xx_serial_drv_data s3c2412_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2412 UART",
+ .type = PORT_S3C2412,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C2412_UCON_CLKMASK,
+ .clksel_shift = S3C2412_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2412_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2412_serial_drv_data)
+#else
+#define S3C2412_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- dbg("s3c24xx_serial_initconsole\n");
+#if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2416) || \
+ defined(CONFIG_CPU_S3C2443)
+static struct s3c24xx_serial_drv_data s3c2440_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2440 UART",
+ .type = PORT_S3C2440,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C2412_UCON_CLKMASK,
+ .clksel_shift = S3C2412_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2440_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2440_serial_drv_data)
+#else
+#define S3C2440_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- /* select driver based on the cpu */
+#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410) || \
+ defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) || \
+ defined(CONFIG_CPU_S5PC100)
+static struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C6400 UART",
+ .type = PORT_S3C6400,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C6400_UCON_CLKMASK,
+ .clksel_shift = S3C6400_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C6400_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c6400_serial_drv_data)
+#else
+#define S3C6400_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- if (dev == NULL) {
- printk(KERN_ERR "s3c24xx: no devices for console init\n");
- return 0;
- }
+#ifdef CONFIG_CPU_S5PV210
+static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S5PV210 UART",
+ .type = PORT_S3C6400,
+ .has_divslot = 1,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 2,
+ .clksel_mask = S5PV210_UCON_CLKMASK,
+ .clksel_shift = S5PV210_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ .fifosize = { 256, 64, 16, 16 },
+};
+#define S5PV210_SERIAL_DRV_DATA ((kernel_ulong_t)&s5pv210_serial_drv_data)
+#else
+#define S5PV210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- if (strcmp(dev->name, drv->driver.name) != 0)
- return 0;
+#ifdef CONFIG_CPU_EXYNOS4210
+static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung Exynos4 UART",
+ .type = PORT_S3C6400,
+ .has_divslot = 1,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 1,
+ .clksel_mask = 0,
+ .clksel_shift = 0,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ .has_fracval = 1,
+ },
+ .fifosize = { 256, 64, 16, 16 },
+};
+#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#else
+#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- s3c24xx_serial_console.data = &s3c24xx_uart_drv;
- s3c24xx_serial_init_ports(info);
+static struct platform_device_id s3c24xx_serial_driver_ids[] = {
+ {
+ .name = "s3c2410-uart",
+ .driver_data = S3C2410_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c2412-uart",
+ .driver_data = S3C2412_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c2440-uart",
+ .driver_data = S3C2440_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c6400-uart",
+ .driver_data = S3C6400_SERIAL_DRV_DATA,
+ }, {
+ .name = "s5pv210-uart",
+ .driver_data = S5PV210_SERIAL_DRV_DATA,
+ }, {
+ .name = "exynos4210-uart",
+ .driver_data = EXYNOS4210_SERIAL_DRV_DATA,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids);
- register_console(&s3c24xx_serial_console);
- return 0;
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_uart_dt_match[] = {
+ { .compatible = "samsung,exynos4210-uart",
+ .data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
+#else
+#define s3c24xx_uart_dt_match NULL
+#endif
+
+static struct platform_driver samsung_serial_driver = {
+ .probe = s3c24xx_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .id_table = s3c24xx_serial_driver_ids,
+ .driver = {
+ .name = "samsung-uart",
+ .owner = THIS_MODULE,
+ .pm = SERIAL_SAMSUNG_PM_OPS,
+ .of_match_table = s3c24xx_uart_dt_match,
+ },
+};
+
+/* module initialisation code */
+
+static int __init s3c24xx_serial_modinit(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&s3c24xx_uart_drv);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to register UART driver\n");
+ return -1;
+ }
+
+ return platform_driver_register(&samsung_serial_driver);
}
-#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
+static void __exit s3c24xx_serial_modexit(void)
+{
+ uart_unregister_driver(&s3c24xx_uart_drv);
+}
+
+module_init(s3c24xx_serial_modinit);
+module_exit(s3c24xx_serial_modexit);
+MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 8e87b788e5c6..1a4bca3e4179 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -19,20 +19,25 @@ struct s3c24xx_uart_info {
unsigned long tx_fifomask;
unsigned long tx_fifoshift;
unsigned long tx_fifofull;
+ unsigned int def_clk_sel;
+ unsigned long num_clks;
+ unsigned long clksel_mask;
+ unsigned long clksel_shift;
/* uart port features */
unsigned int has_divslot:1;
- /* clock source control */
-
- int (*get_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
- int (*set_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
-
/* uart controls */
int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
};
+struct s3c24xx_serial_drv_data {
+ struct s3c24xx_uart_info *info;
+ struct s3c2410_uartcfg *def_cfg;
+ unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+};
+
struct s3c24xx_uart_port {
unsigned char rx_claimed;
unsigned char tx_claimed;
@@ -43,10 +48,13 @@ struct s3c24xx_uart_port {
unsigned int tx_irq;
struct s3c24xx_uart_info *info;
- struct s3c24xx_uart_clksrc *clksrc;
struct clk *clk;
struct clk *baudclk;
struct uart_port port;
+ struct s3c24xx_serial_drv_data *drv_data;
+
+ /* reference to platform data */
+ struct s3c2410_uartcfg *cfg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
@@ -56,7 +64,6 @@ struct s3c24xx_uart_port {
/* conversion functions */
#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
-#define s3c24xx_dev_to_cfg(__dev) (struct s3c2410_uartcfg *)((__dev)->platform_data)
/* register access controls */
@@ -69,17 +76,6 @@ struct s3c24xx_uart_port {
#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
-extern int s3c24xx_serial_probe(struct platform_device *dev,
- struct s3c24xx_uart_info *uart);
-
-extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
-
-extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
- struct s3c24xx_uart_info **uart);
-
-extern int s3c24xx_serial_init(struct platform_driver *drv,
- struct s3c24xx_uart_info *info);
-
#ifdef CONFIG_SERIAL_SAMSUNG_DEBUG
extern void printascii(const char *);
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad2b242..e0b4b0a30a5a 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -736,19 +736,7 @@ static struct platform_driver sc26xx_driver = {
},
};
-static int __init sc26xx_init(void)
-{
- return platform_driver_register(&sc26xx_driver);
-}
-
-static void __exit sc26xx_exit(void)
-{
- platform_driver_unregister(&sc26xx_driver);
-}
-
-module_init(sc26xx_init);
-module_exit(sc26xx_exit);
-
+module_platform_driver(sc26xx_driver);
MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SC681/SC2692 serial driver");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0406d7ff505e..c7bf31a6a7e7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -22,6 +22,7 @@
*/
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -60,6 +61,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state, int pm_state);
+static void uart_port_shutdown(struct tty_port *port);
+
/*
* This routine is used by the interrupt handler to schedule processing in
* the software interrupt portion of the driver.
@@ -128,25 +131,16 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
-static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
+static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
+ int init_hw)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
unsigned long page;
int retval = 0;
- if (port->flags & ASYNC_INITIALIZED)
- return 0;
-
- /*
- * Set the TTY IO error marker - we will only clear this
- * once we have successfully opened the port. Also set
- * up the tty->alt_speed kludge
- */
- set_bit(TTY_IO_ERROR, &tty->flags);
-
if (uport->type == PORT_UNKNOWN)
- return 0;
+ return 1;
/*
* Initialise and allocate the transmit and temporary
@@ -188,10 +182,6 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
tty->hw_stopped = 1;
spin_unlock_irq(&uport->lock);
}
-
- set_bit(ASYNCB_INITIALIZED, &port->flags);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
}
/*
@@ -200,6 +190,31 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
* now.
*/
if (retval && capable(CAP_SYS_ADMIN))
+ return 1;
+
+ return retval;
+}
+
+static int uart_startup(struct tty_struct *tty, struct uart_state *state,
+ int init_hw)
+{
+ struct tty_port *port = &state->port;
+ int retval;
+
+ if (port->flags & ASYNC_INITIALIZED)
+ return 0;
+
+ /*
+ * Set the TTY IO error marker - we will only clear this
+ * once we have successfully opened the port.
+ */
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ retval = uart_port_startup(tty, state, init_hw);
+ if (!retval) {
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ } else if (retval > 0)
retval = 0;
return retval;
@@ -228,24 +243,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (!tty || (tty->termios->c_cflag & HUPCL))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free
- * the irq here so the queue might never be woken up. Note
- * that we won't end up waiting on delta_msr_wait again since
- * any outstanding file descriptors should be pointing at
- * hung_up_tty_fops now.
- */
- wake_up_interruptible(&port->delta_msr_wait);
-
- /*
- * Free the IRQ and disable the port.
- */
- uport->ops->shutdown(uport);
-
- /*
- * Ensure that the IRQ handler isn't running on another CPU.
- */
- synchronize_irq(uport->irq);
+ uart_port_shutdown(port);
}
/*
@@ -423,7 +421,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
- quot = (port->uartclk + (8 * baud)) / (16 * baud);
+ quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
return quot;
}
@@ -658,10 +656,10 @@ static int uart_get_info(struct uart_state *state,
tmp.flags = uport->flags;
tmp.xmit_fifo_size = uport->fifosize;
tmp.baud_base = uport->uartclk / 16;
- tmp.close_delay = port->close_delay / 10;
+ tmp.close_delay = jiffies_to_msecs(port->close_delay) / 10;
tmp.closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
- port->closing_wait / 10;
+ jiffies_to_msecs(port->closing_wait) / 10;
tmp.custom_divisor = uport->custom_divisor;
tmp.hub6 = uport->hub6;
tmp.io_type = uport->iotype;
@@ -695,9 +693,10 @@ static int uart_set_info(struct tty_struct *tty, struct uart_state *state,
new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
new_serial.irq = irq_canonicalize(new_serial.irq);
- close_delay = new_serial.close_delay * 10;
+ close_delay = msecs_to_jiffies(new_serial.close_delay * 10);
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ ASYNC_CLOSING_WAIT_NONE :
+ msecs_to_jiffies(new_serial.closing_wait * 10);
/*
* This semaphore protects port->count. It is also
@@ -1265,47 +1264,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
pr_debug("uart_close(%d) called\n", uport->line);
- spin_lock_irqsave(&port->lock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ if (tty_port_close_start(port, tty, filp) == 0)
return;
- }
-
- if ((tty->count == 1) && (port->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. port->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, "
- "port->count is %d\n", port->count);
- port->count = 1;
- }
- if (--port->count < 0) {
- printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n",
- tty->name, port->count);
- port->count = 0;
- }
- if (port->count) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- }
-
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters by
- * setting tty->closing.
- */
- set_bit(ASYNCB_CLOSING, &port->flags);
- tty->closing = 1;
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent_from_close(tty,
- msecs_to_jiffies(port->closing_wait));
/*
* At this point, we stop accepting input. To do this, we
@@ -1337,7 +1297,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
- msleep_interruptible(port->close_delay);
+ msleep_interruptible(
+ jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1402,36 @@ static void uart_hangup(struct tty_struct *tty)
mutex_unlock(&port->mutex);
}
+static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ return 0;
+}
+
+static void uart_port_shutdown(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free
+ * the irq here so the queue might never be woken up. Note
+ * that we won't end up waiting on delta_msr_wait again since
+ * any outstanding file descriptors should be pointing at
+ * hung_up_tty_fops now.
+ */
+ wake_up_interruptible(&port->delta_msr_wait);
+
+ /*
+ * Free the IRQ and disable the port.
+ */
+ uport->ops->shutdown(uport);
+
+ /*
+ * Ensure that the IRQ handler isn't running on another CPU.
+ */
+ synchronize_irq(uport->irq);
+}
+
static int uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
@@ -1466,33 +1457,6 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
-static struct uart_state *uart_get(struct uart_driver *drv, int line)
-{
- struct uart_state *state;
- struct tty_port *port;
- int ret = 0;
-
- state = drv->state + line;
- port = &state->port;
- if (mutex_lock_interruptible(&port->mutex)) {
- ret = -ERESTARTSYS;
- goto err;
- }
-
- port->count++;
- if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
- ret = -ENXIO;
- goto err_unlock;
- }
- return state;
-
- err_unlock:
- port->count--;
- mutex_unlock(&port->mutex);
- err:
- return ERR_PTR(ret);
-}
-
/*
* calls to uart_open are serialised by the BKL in
* fs/char_dev.c:chrdev_open()
@@ -1506,26 +1470,29 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
- struct uart_state *state;
- struct tty_port *port;
int retval, line = tty->index;
+ struct uart_state *state = drv->state + line;
+ struct tty_port *port = &state->port;
pr_debug("uart_open(%d) called\n", line);
/*
- * We take the semaphore inside uart_get to guarantee that we won't
- * be re-entered while allocating the state structure, or while we
- * request any IRQs that the driver may need. This also has the nice
- * side-effect that it delays the action of uart_hangup, so we can
- * guarantee that state->port.tty will always contain something
- * reasonable.
+ * We take the semaphore here to guarantee that we won't be re-entered
+ * while allocating the state structure, or while we request any IRQs
+ * that the driver may need. This also has the nice side-effect that
+ * it delays the action of uart_hangup, so we can guarantee that
+ * state->port.tty will always contain something reasonable.
*/
- state = uart_get(drv, line);
- if (IS_ERR(state)) {
- retval = PTR_ERR(state);
- goto fail;
+ if (mutex_lock_interruptible(&port->mutex)) {
+ retval = -ERESTARTSYS;
+ goto end;
+ }
+
+ port->count++;
+ if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+ retval = -ENXIO;
+ goto err_dec_count;
}
- port = &state->port;
/*
* Once we set tty->driver_data here, we are guaranteed that
@@ -1535,7 +1502,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = state;
state->uart_port->state = state;
tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
- tty->alt_speed = 0;
tty_port_tty_set(port, tty);
/*
@@ -1543,9 +1509,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
*/
if (tty_hung_up_p(filp)) {
retval = -EAGAIN;
- port->count--;
- mutex_unlock(&port->mutex);
- goto fail;
+ goto err_dec_count;
}
/*
@@ -1566,8 +1530,12 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
if (retval == 0)
retval = tty_port_block_til_ready(port, tty, filp);
-fail:
+end:
return retval;
+err_dec_count:
+ port->count--;
+ mutex_unlock(&port->mutex);
+ goto end;
}
static const char *uart_type(struct uart_port *port)
@@ -1858,6 +1826,14 @@ uart_set_options(struct uart_port *port, struct console *co,
EXPORT_SYMBOL_GPL(uart_set_options);
#endif /* CONFIG_SERIAL_CORE_CONSOLE */
+/**
+ * uart_change_pm - set power state of the port
+ *
+ * @state: port descriptor
+ * @pm_state: new state
+ *
+ * Locking: port->mutex has to be held
+ */
static void uart_change_pm(struct uart_state *state, int pm_state)
{
struct uart_port *port = state->uart_port;
@@ -2214,6 +2190,8 @@ static const struct tty_operations uart_ops = {
};
static const struct tty_port_operations uart_port_ops = {
+ .activate = uart_port_activate,
+ .shutdown = uart_port_shutdown,
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
};
@@ -2275,8 +2253,8 @@ int uart_register_driver(struct uart_driver *drv)
tty_port_init(port);
port->ops = &uart_port_ops;
- port->close_delay = 500; /* .5 seconds */
- port->closing_wait = 30000; /* 30 seconds */
+ port->close_delay = HZ / 2; /* .5 seconds */
+ port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
@@ -2467,6 +2445,99 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
}
EXPORT_SYMBOL(uart_match_port);
+/**
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
+ */
+void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
+{
+ struct uart_state *state = uport->state;
+ struct tty_port *port = &state->port;
+ struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
+ struct pps_event_time ts;
+
+ if (ld && ld->ops->dcd_change)
+ pps_get_ts(&ts);
+
+ uport->icount.dcd++;
+#ifdef CONFIG_HARD_PPS
+ if ((uport->flags & UPF_HARDPPS_CD) && status)
+ hardpps();
+#endif
+
+ if (port->flags & ASYNC_CHECK_CD) {
+ if (status)
+ wake_up_interruptible(&port->open_wait);
+ else if (port->tty)
+ tty_hangup(port->tty);
+ }
+
+ if (ld && ld->ops->dcd_change)
+ ld->ops->dcd_change(port->tty, status, &ts);
+ if (ld)
+ tty_ldisc_deref(ld);
+}
+EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
+
+/**
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
+ */
+void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
+{
+ struct tty_port *port = &uport->state->port;
+ struct tty_struct *tty = port->tty;
+
+ uport->icount.cts++;
+
+ if (port->flags & ASYNC_CTS_FLOW) {
+ if (tty->hw_stopped) {
+ if (status) {
+ tty->hw_stopped = 0;
+ uport->ops->start_tx(uport);
+ uart_write_wakeup(uport);
+ }
+ } else {
+ if (!status) {
+ tty->hw_stopped = 1;
+ uport->ops->stop_tx(uport);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(uart_handle_cts_change);
+
+/**
+ * uart_insert_char - push a char to the uart layer
+ *
+ * User is responsible to call tty_flip_buffer_push when they are done with
+ * insertion.
+ *
+ * @port: corresponding port
+ * @status: state of the serial port RX buffer (LSR for 8250)
+ * @overrun: mask of overrun bits in @status
+ * @ch: character to push
+ * @flag: flag for the character (see TTY_NORMAL and friends)
+ */
+void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, unsigned int ch, unsigned int flag)
+{
+ struct tty_struct *tty = port->state->port.tty;
+
+ if ((status & port->ignore_status_mask & ~overrun) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ /*
+ * Overrun is special. Since it's reported immediately,
+ * it doesn't affect the current character.
+ */
+ if (status & ~port->ignore_status_mask & overrun)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+}
+EXPORT_SYMBOL_GPL(uart_insert_char);
+
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index eef736ff810a..86090605a84e 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -317,7 +317,7 @@ static int serial_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
if (do_sound)
link->config_flags |= CONF_ENABLE_SPKR;
@@ -445,7 +445,7 @@ static int simple_config(struct pcmcia_device *link)
/* First pass: look for a config entry that looks normal.
* Two tries: without IO aliases, then with aliases */
- link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO;
+ link->config_flags |= CONF_AUTO_SET_VPP;
for (try = 0; try < 4; try++)
if (!pcmcia_loop_config(link, simple_config_check, &try))
goto found_port;
@@ -501,7 +501,8 @@ static int multi_config_check_notpicky(struct pcmcia_device *p_dev,
{
int *base2 = priv_data;
- if (!p_dev->resource[0]->end || !p_dev->resource[1]->end)
+ if (!p_dev->resource[0]->end || !p_dev->resource[1]->end ||
+ p_dev->resource[0]->start + 8 != p_dev->resource[1]->start)
return -ENODEV;
p_dev->resource[0]->end = p_dev->resource[1]->end = 8;
@@ -520,7 +521,6 @@ static int multi_config(struct pcmcia_device *link)
struct serial_info *info = link->priv;
int i, base2 = 0;
- link->config_flags |= CONF_AUTO_SET_IO;
/* First, look for a generic full-sized window */
if (!pcmcia_loop_config(link, multi_config_check, &info->multi))
base2 = link->resource[0]->start + 8;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index aff9d612dff0..75085795528e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
@@ -73,6 +74,7 @@ struct sci_port {
struct clk *fclk;
char *irqstr[SCIx_NR_IRQS];
+ char *gpiostr[SCIx_NR_FNS];
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -474,8 +476,15 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
if (!reg->size)
return;
- if (!(cflag & CRTSCTS))
- sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
+ if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
+ ((!(cflag & CRTSCTS)))) {
+ unsigned short status;
+
+ status = sci_in(port, SCSPTR);
+ status &= ~SCSPTR_CTSIO;
+ status |= SCSPTR_RTSIO;
+ sci_out(port, SCSPTR, status); /* Set RTS = 1 */
+ }
}
static int sci_txfill(struct uart_port *port)
@@ -621,6 +630,7 @@ static void sci_receive_chars(struct uart_port *port)
} else {
for (i = 0; i < count; i++) {
char c = sci_in(port, SCxRDR);
+
status = sci_in(port, SCxSR);
#if defined(CONFIG_CPU_SH3)
/* Skip "chars" during break */
@@ -649,9 +659,11 @@ static void sci_receive_chars(struct uart_port *port)
/* Store data and status */
if (status & SCxSR_FER(port)) {
flag = TTY_FRAME;
+ port->icount.frame++;
dev_notice(port->dev, "frame error\n");
} else if (status & SCxSR_PER(port)) {
flag = TTY_PARITY;
+ port->icount.parity++;
dev_notice(port->dev, "parity error\n");
} else
flag = TTY_NORMAL;
@@ -723,6 +735,8 @@ static int sci_handle_errors(struct uart_port *port)
*/
if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
if (status & (1 << s->cfg->overrun_bit)) {
+ port->icount.overrun++;
+
/* overrun error */
if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
copied++;
@@ -737,6 +751,8 @@ static int sci_handle_errors(struct uart_port *port)
struct sci_port *sci_port = to_sci_port(port);
if (!sci_port->break_flag) {
+ port->icount.brk++;
+
sci_port->break_flag = 1;
sci_schedule_break_timer(sci_port);
@@ -752,6 +768,8 @@ static int sci_handle_errors(struct uart_port *port)
} else {
/* frame error */
+ port->icount.frame++;
+
if (tty_insert_flip_char(tty, 0, TTY_FRAME))
copied++;
@@ -761,6 +779,8 @@ static int sci_handle_errors(struct uart_port *port)
if (status & SCxSR_PER(port)) {
/* parity error */
+ port->icount.parity++;
+
if (tty_insert_flip_char(tty, 0, TTY_PARITY))
copied++;
@@ -787,6 +807,8 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
sci_out(port, SCLSR, 0);
+ port->icount.overrun++;
+
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
tty_flip_buffer_push(tty);
@@ -812,6 +834,9 @@ static int sci_handle_breaks(struct uart_port *port)
/* Debounce break */
s->break_flag = 1;
#endif
+
+ port->icount.brk++;
+
/* Notify of BREAK */
if (tty_insert_flip_char(tty, 0, TTY_BREAK))
copied++;
@@ -1082,6 +1107,67 @@ static void sci_free_irq(struct sci_port *port)
}
}
+static const char *sci_gpio_names[SCIx_NR_FNS] = {
+ "sck", "rxd", "txd", "cts", "rts",
+};
+
+static const char *sci_gpio_str(unsigned int index)
+{
+ return sci_gpio_names[index];
+}
+
+static void __devinit sci_init_gpios(struct sci_port *port)
+{
+ struct uart_port *up = &port->port;
+ int i;
+
+ if (!port->cfg)
+ return;
+
+ for (i = 0; i < SCIx_NR_FNS; i++) {
+ const char *desc;
+ int ret;
+
+ if (!port->cfg->gpios[i])
+ continue;
+
+ desc = sci_gpio_str(i);
+
+ port->gpiostr[i] = kasprintf(GFP_KERNEL, "%s:%s",
+ dev_name(up->dev), desc);
+
+ /*
+ * If we've failed the allocation, we can still continue
+ * on with a NULL string.
+ */
+ if (!port->gpiostr[i])
+ dev_notice(up->dev, "%s string allocation failure\n",
+ desc);
+
+ ret = gpio_request(port->cfg->gpios[i], port->gpiostr[i]);
+ if (unlikely(ret != 0)) {
+ dev_notice(up->dev, "failed %s gpio request\n", desc);
+
+ /*
+ * If we can't get the GPIO for whatever reason,
+ * no point in keeping the verbose string around.
+ */
+ kfree(port->gpiostr[i]);
+ }
+ }
+}
+
+static void sci_free_gpios(struct sci_port *port)
+{
+ int i;
+
+ for (i = 0; i < SCIx_NR_FNS; i++)
+ if (port->cfg->gpios[i]) {
+ gpio_free(port->cfg->gpios[i]);
+ kfree(port->gpiostr[i]);
+ }
+}
+
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = sci_in(port, SCxSR);
@@ -1090,19 +1176,39 @@ static unsigned int sci_tx_empty(struct uart_port *port)
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
+/*
+ * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
+ * CTS/RTS is supported in hardware by at least one port and controlled
+ * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
+ * handled via the ->init_pins() op, which is a bit of a one-way street,
+ * lacking any ability to defer pin control -- this will later be
+ * converted over to the GPIO framework).
+ *
+ * Other modes (such as loopback) are supported generically on certain
+ * port types, but not others. For these it's sufficient to test for the
+ * existence of the support register and simply ignore the port type.
+ */
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
- /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
- /* If you have signals for DTR and DCD, please implement here. */
+ if (mctrl & TIOCM_LOOP) {
+ struct plat_sci_reg *reg;
+
+ /*
+ * Standard loopback mode for SCFCR ports.
+ */
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size)
+ sci_out(port, SCFCR, sci_in(port, SCFCR) | 1);
+ }
}
static unsigned int sci_get_mctrl(struct uart_port *port)
{
- /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
- and CTS/RTS */
-
- return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
+ /*
+ * CTS/RTS is handled in hardware when supported, while nothing
+ * else is wired up. Keep it simple and simply assert DSR/CAR.
+ */
+ return TIOCM_DSR | TIOCM_CAR;
}
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1233,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
struct dma_async_tx_descriptor *desc;
desc = chan->device->device_prep_slave_sg(chan,
- sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
+ sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (desc) {
s->desc_rx[i] = desc;
@@ -1348,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
- sg, s->sg_len_tx, DMA_TO_DEVICE,
+ sg, s->sg_len_tx, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
/* switch to PIO */
@@ -1449,12 +1555,17 @@ static void sci_stop_rx(struct uart_port *port)
static void sci_enable_ms(struct uart_port *port)
{
- /* Nothing here yet .. */
+ /*
+ * Not supported by hardware, always a nop.
+ */
}
static void sci_break_ctl(struct uart_port *port, int break_state)
{
- /* Nothing here yet .. */
+ /*
+ * Not supported by hardware. Most parts couple break and rx
+ * interrupts together, with break detection always enabled.
+ */
}
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1652,6 +1763,7 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
static void sci_reset(struct uart_port *port)
{
+ struct plat_sci_reg *reg;
unsigned int status;
do {
@@ -1660,7 +1772,8 @@ static void sci_reset(struct uart_port *port)
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
- if (port->type != PORT_SCI)
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size)
sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
}
@@ -1668,9 +1781,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg;
unsigned int baud, smr_val, max_baud;
int t = -1;
- u16 scfcr = 0;
/*
* earlyprintk comes here early on with port->uartclk set to zero.
@@ -1720,7 +1833,27 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
}
sci_init_pins(port, termios->c_cflag);
- sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
+
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size) {
+ unsigned short ctrl = sci_in(port, SCFCR);
+
+ if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
+ if (termios->c_cflag & CRTSCTS)
+ ctrl |= SCFCR_MCE;
+ else
+ ctrl &= ~SCFCR_MCE;
+ }
+
+ /*
+ * As we've done a sci_reset() above, ensure we don't
+ * interfere with the FIFOs while toggling MCE. As the
+ * reset values could still be set, simply mask them out.
+ */
+ ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
+
+ sci_out(port, SCFCR, ctrl);
+ }
sci_out(port, SCSCR, s->cfg->scscr);
@@ -1892,6 +2025,8 @@ static int __devinit sci_init_single(struct platform_device *dev,
struct uart_port *port = &sci_port->port;
int ret;
+ sci_port->cfg = p;
+
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
port->line = index;
@@ -1937,6 +2072,8 @@ static int __devinit sci_init_single(struct platform_device *dev,
port->dev = &dev->dev;
+ sci_init_gpios(sci_port);
+
pm_runtime_irq_safe(&dev->dev);
pm_runtime_enable(&dev->dev);
}
@@ -1971,8 +2108,6 @@ static int __devinit sci_init_single(struct platform_device *dev,
p->error_mask |= (1 << p->overrun_bit);
}
- sci_port->cfg = p;
-
port->mapbase = p->mapbase;
port->type = p->type;
port->flags = p->flags;
@@ -2113,9 +2248,16 @@ static int sci_runtime_suspend(struct device *dev)
struct uart_port *port = &sci_port->port;
if (uart_console(port)) {
+ struct plat_sci_reg *reg;
+
sci_port->saved_smr = sci_in(port, SCSMR);
sci_port->saved_brr = sci_in(port, SCBRR);
- sci_port->saved_fcr = sci_in(port, SCFCR);
+
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size)
+ sci_port->saved_fcr = sci_in(port, SCFCR);
+ else
+ sci_port->saved_fcr = 0;
}
return 0;
}
@@ -2129,7 +2271,10 @@ static int sci_runtime_resume(struct device *dev)
sci_reset(port);
sci_out(port, SCSMR, sci_port->saved_smr);
sci_out(port, SCBRR, sci_port->saved_brr);
- sci_out(port, SCFCR, sci_port->saved_fcr);
+
+ if (sci_port->saved_fcr)
+ sci_out(port, SCFCR, sci_port->saved_fcr);
+
sci_out(port, SCSCR, sci_port->cfg->scscr);
}
return 0;
@@ -2169,6 +2314,8 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
+ sci_free_gpios(port);
+
uart_remove_one_port(&sci_uart_driver, &port->port);
clk_put(port->iclk);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index e9bed038aa1f..a1a2d364f92b 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -17,7 +17,9 @@
defined(CONFIG_ARCH_SH73A0) || \
defined(CONFIG_ARCH_SH7367) || \
defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
+ defined(CONFIG_ARCH_SH7372) || \
+ defined(CONFIG_ARCH_R8A7740)
+
# define SCxSR_RDxF_CLEAR(port) (sci_in(port, SCxSR) & 0xfffc)
# define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73)
# define SCxSR_TDxE_CLEAR(port) (sci_in(port, SCxSR) & 0xffdf)
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
new file mode 100644
index 000000000000..a60523fee11b
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -0,0 +1,783 @@
+/*
+ * Driver for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "sirfsoc_uart.h"
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
+static struct uart_driver sirfsoc_uart_drv;
+
+static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
+ {4000000, 2359296},
+ {3500000, 1310721},
+ {3000000, 1572865},
+ {2500000, 1245186},
+ {2000000, 1572866},
+ {1500000, 1245188},
+ {1152000, 1638404},
+ {1000000, 1572869},
+ {921600, 1114120},
+ {576000, 1245196},
+ {500000, 1245198},
+ {460800, 1572876},
+ {230400, 1310750},
+ {115200, 1310781},
+ {57600, 1310843},
+ {38400, 1114328},
+ {19200, 1114545},
+ {9600, 1114979},
+};
+
+static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
+ [0] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+ },
+ [1] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
+ },
+ },
+ [2] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
+ },
+ },
+};
+
+static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
+{
+ return container_of(port, struct sirfsoc_uart_port, port);
+}
+
+static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
+{
+ unsigned long reg;
+ reg = rd_regl(port, SIRFUART_TX_FIFO_STATUS);
+ if (reg & SIRFUART_FIFOEMPTY_MASK(port))
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ if (!(sirfport->ms_enabled)) {
+ goto cts_asserted;
+ } else if (sirfport->hw_flow_ctrl) {
+ if (!(rd_regl(port, SIRFUART_AFC_CTRL) &
+ SIRFUART_CTS_IN_STATUS))
+ goto cts_asserted;
+ else
+ goto cts_deasserted;
+ }
+cts_deasserted:
+ return TIOCM_CAR | TIOCM_DSR;
+cts_asserted:
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned int assert = mctrl & TIOCM_RTS;
+ unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
+ unsigned int current_val;
+ if (sirfport->hw_flow_ctrl) {
+ current_val = rd_regl(port, SIRFUART_AFC_CTRL) & ~0xFF;
+ val |= current_val;
+ wr_regl(port, SIRFUART_AFC_CTRL, val);
+ }
+}
+
+static void sirfsoc_uart_stop_tx(struct uart_port *port)
+{
+ unsigned int regv;
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_TX_INT_EN);
+}
+
+void sirfsoc_uart_start_tx(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long regv;
+ sirfsoc_uart_pio_tx_chars(sirfport, 1);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START);
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN);
+}
+
+static void sirfsoc_uart_stop_rx(struct uart_port *port)
+{
+ unsigned long regv;
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_RX_IO_INT_EN);
+}
+
+static void sirfsoc_uart_disable_ms(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long reg;
+ sirfport->ms_enabled = 0;
+ if (!sirfport->hw_flow_ctrl)
+ return;
+ reg = rd_regl(port, SIRFUART_AFC_CTRL);
+ wr_regl(port, SIRFUART_AFC_CTRL, reg & ~0x3FF);
+ reg = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, reg & ~SIRFUART_CTS_INT_EN);
+}
+
+static void sirfsoc_uart_enable_ms(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long reg;
+ unsigned long flg;
+ if (!sirfport->hw_flow_ctrl)
+ return;
+ flg = SIRFUART_AFC_RX_EN | SIRFUART_AFC_TX_EN;
+ reg = rd_regl(port, SIRFUART_AFC_CTRL);
+ wr_regl(port, SIRFUART_AFC_CTRL, reg | flg);
+ reg = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, reg | SIRFUART_CTS_INT_EN);
+ uart_handle_cts_change(port,
+ !(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS));
+ sirfport->ms_enabled = 1;
+}
+
+static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long ulcon = rd_regl(port, SIRFUART_LINE_CTRL);
+ if (break_state)
+ ulcon |= SIRFUART_SET_BREAK;
+ else
+ ulcon &= ~SIRFUART_SET_BREAK;
+ wr_regl(port, SIRFUART_LINE_CTRL, ulcon);
+}
+
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
+{
+ unsigned int ch, rx_count = 0;
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&port->state->port);
+ if (!tty)
+ return -ENODEV;
+
+ while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
+ SIRFUART_FIFOEMPTY_MASK(port))) {
+ ch = rd_regl(port, SIRFUART_RX_FIFO_DATA) | SIRFUART_DUMMY_READ;
+ if (unlikely(uart_handle_sysrq_char(port, ch)))
+ continue;
+ uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
+ rx_count++;
+ if (rx_count >= max_rx_count)
+ break;
+ }
+
+ port->icount.rx += rx_count;
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+
+ return rx_count;
+}
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
+{
+ struct uart_port *port = &sirfport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int num_tx = 0;
+ while (!uart_circ_empty(xmit) &&
+ !(rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+ SIRFUART_FIFOFULL_MASK(port)) &&
+ count--) {
+ wr_regl(port, SIRFUART_TX_FIFO_DATA, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ num_tx++;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ return num_tx;
+}
+
+static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
+{
+ unsigned long intr_status;
+ unsigned long cts_status;
+ unsigned long flag = TTY_NORMAL;
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+ struct uart_port *port = &sirfport->port;
+ struct uart_state *state = port->state;
+ struct circ_buf *xmit = &port->state->xmit;
+ intr_status = rd_regl(port, SIRFUART_INT_STATUS);
+ wr_regl(port, SIRFUART_INT_STATUS, intr_status);
+ intr_status &= rd_regl(port, SIRFUART_INT_EN);
+ if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT))) {
+ if (intr_status & SIRFUART_RXD_BREAK) {
+ if (uart_handle_break(port))
+ goto recv_char;
+ uart_insert_char(port, intr_status,
+ SIRFUART_RX_OFLOW, 0, TTY_BREAK);
+ return IRQ_HANDLED;
+ }
+ if (intr_status & SIRFUART_RX_OFLOW)
+ port->icount.overrun++;
+ if (intr_status & SIRFUART_FRM_ERR) {
+ port->icount.frame++;
+ flag = TTY_FRAME;
+ }
+ if (intr_status & SIRFUART_PARITY_ERR)
+ flag = TTY_PARITY;
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+ intr_status &= port->read_status_mask;
+ uart_insert_char(port, intr_status,
+ SIRFUART_RX_OFLOW_INT, 0, flag);
+ }
+recv_char:
+ if (intr_status & SIRFUART_CTS_INT_EN) {
+ cts_status = !(rd_regl(port, SIRFUART_AFC_CTRL) &
+ SIRFUART_CTS_IN_STATUS);
+ if (cts_status != 0) {
+ uart_handle_cts_change(port, 1);
+ } else {
+ uart_handle_cts_change(port, 0);
+ wake_up_interruptible(&state->port.delta_msr_wait);
+ }
+ }
+ if (intr_status & SIRFUART_RX_IO_INT_EN)
+ sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
+ if (intr_status & SIRFUART_TX_INT_EN) {
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ return IRQ_HANDLED;
+ } else {
+ sirfsoc_uart_pio_tx_chars(sirfport,
+ SIRFSOC_UART_IO_TX_REASONABLE_CNT);
+ if ((uart_circ_empty(xmit)) &&
+ (rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+ SIRFUART_FIFOEMPTY_MASK(port)))
+ sirfsoc_uart_stop_tx(port);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void sirfsoc_uart_start_rx(struct uart_port *port)
+{
+ unsigned long regv;
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_RX_IO_INT_EN);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+}
+
+static unsigned int
+sirfsoc_calc_sample_div(unsigned long baud_rate,
+ unsigned long ioclk_rate, unsigned long *setted_baud)
+{
+ unsigned long min_delta = ~0UL;
+ unsigned short sample_div;
+ unsigned int regv = 0;
+ unsigned long ioclk_div;
+ unsigned long baud_tmp;
+ int temp_delta;
+
+ for (sample_div = SIRF_MIN_SAMPLE_DIV;
+ sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
+ ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
+ if (ioclk_div > SIRF_IOCLK_DIV_MAX)
+ continue;
+ baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
+ temp_delta = baud_tmp - baud_rate;
+ temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
+ if (temp_delta < min_delta) {
+ regv = regv & (~SIRF_IOCLK_DIV_MASK);
+ regv = regv | ioclk_div;
+ regv = regv & (~SIRF_SAMPLE_DIV_MASK);
+ regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
+ min_delta = temp_delta;
+ *setted_baud = baud_tmp;
+ }
+ }
+ return regv;
+}
+
+static void sirfsoc_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long ioclk_rate;
+ unsigned long config_reg = 0;
+ unsigned long baud_rate;
+ unsigned long setted_baud;
+ unsigned long flags;
+ unsigned long ic;
+ unsigned int clk_div_reg = 0;
+ unsigned long temp_reg_val;
+ unsigned long rx_time_out;
+ int threshold_div;
+ int temp;
+
+ ioclk_rate = 150000000;
+ switch (termios->c_cflag & CSIZE) {
+ default:
+ case CS8:
+ config_reg |= SIRFUART_DATA_BIT_LEN_8;
+ break;
+ case CS7:
+ config_reg |= SIRFUART_DATA_BIT_LEN_7;
+ break;
+ case CS6:
+ config_reg |= SIRFUART_DATA_BIT_LEN_6;
+ break;
+ case CS5:
+ config_reg |= SIRFUART_DATA_BIT_LEN_5;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ config_reg |= SIRFUART_STOP_BIT_LEN_2;
+ baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ spin_lock_irqsave(&port->lock, flags);
+ port->read_status_mask = SIRFUART_RX_OFLOW_INT;
+ port->ignore_status_mask = 0;
+ /* read flags */
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |=
+ SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SIRFUART_RXD_BREAK_INT;
+ /* ignore flags */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= SIRFUART_DUMMY_READ;
+ /* enable parity if PARENB is set*/
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ config_reg |= SIRFUART_STICK_BIT_MARK;
+ else
+ config_reg |= SIRFUART_STICK_BIT_SPACE;
+ } else if (termios->c_cflag & PARODD) {
+ config_reg |= SIRFUART_STICK_BIT_ODD;
+ } else {
+ config_reg |= SIRFUART_STICK_BIT_EVEN;
+ }
+ }
+ /* Hardware Flow Control Settings */
+ if (UART_ENABLE_MS(port, termios->c_cflag)) {
+ if (!sirfport->ms_enabled)
+ sirfsoc_uart_enable_ms(port);
+ } else {
+ if (sirfport->ms_enabled)
+ sirfsoc_uart_disable_ms(port);
+ }
+
+ /* common rate: fast calculation */
+ for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
+ if (baud_rate == baudrate_to_regv[ic].baud_rate)
+ clk_div_reg = baudrate_to_regv[ic].reg_val;
+ setted_baud = baud_rate;
+ /* arbitary rate setting */
+ if (unlikely(clk_div_reg == 0))
+ clk_div_reg = sirfsoc_calc_sample_div(baud_rate, ioclk_rate,
+ &setted_baud);
+ wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
+
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, setted_baud, setted_baud);
+
+ /* set receive timeout */
+ rx_time_out = SIRFSOC_UART_RX_TIMEOUT(baud_rate, 20000);
+ rx_time_out = (rx_time_out > 0xFFFF) ? 0xFFFF : rx_time_out;
+ config_reg |= SIRFUART_RECV_TIMEOUT(rx_time_out);
+ temp_reg_val = rd_regl(port, SIRFUART_TX_FIFO_OP);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_TX_FIFO_OP,
+ temp_reg_val & ~SIRFUART_TX_FIFO_START);
+ wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, SIRFUART_TX_MODE_IO);
+ wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, SIRFUART_RX_MODE_IO);
+ wr_regl(port, SIRFUART_LINE_CTRL, config_reg);
+
+ /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
+ if (baud_rate < 1000000)
+ threshold_div = 1;
+ else
+ threshold_div = 2;
+ temp = port->line == 1 ? 16 : 64;
+ wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp / threshold_div);
+ wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp / threshold_div);
+ temp_reg_val |= SIRFUART_TX_FIFO_START;
+ wr_regl(port, SIRFUART_TX_FIFO_OP, temp_reg_val);
+ uart_update_timeout(port, termios->c_cflag, baud_rate);
+ sirfsoc_uart_start_rx(port);
+ wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_TX_EN | SIRFUART_RX_EN);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void startup_uart_controller(struct uart_port *port)
+{
+ unsigned long temp_regv;
+ int temp;
+ temp_regv = rd_regl(port, SIRFUART_TX_DMA_IO_CTRL);
+ wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, temp_regv | SIRFUART_TX_MODE_IO);
+ temp_regv = rd_regl(port, SIRFUART_RX_DMA_IO_CTRL);
+ wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, temp_regv | SIRFUART_RX_MODE_IO);
+ wr_regl(port, SIRFUART_TX_DMA_IO_LEN, 0);
+ wr_regl(port, SIRFUART_RX_DMA_IO_LEN, 0);
+ wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_RX_EN | SIRFUART_TX_EN);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_RESET);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ temp = port->line == 1 ? 16 : 64;
+ wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp);
+ wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp);
+}
+
+static int sirfsoc_uart_startup(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned int index = port->line;
+ int ret;
+ set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
+ ret = request_irq(port->irq,
+ sirfsoc_uart_isr,
+ 0,
+ SIRFUART_PORT_NAME,
+ sirfport);
+ if (ret != 0) {
+ dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
+ index, port->irq);
+ goto irq_err;
+ }
+ startup_uart_controller(port);
+ enable_irq(port->irq);
+irq_err:
+ return ret;
+}
+
+static void sirfsoc_uart_shutdown(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ wr_regl(port, SIRFUART_INT_EN, 0);
+ free_irq(port->irq, sirfport);
+ if (sirfport->ms_enabled) {
+ sirfsoc_uart_disable_ms(port);
+ sirfport->ms_enabled = 0;
+ }
+}
+
+static const char *sirfsoc_uart_type(struct uart_port *port)
+{
+ return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
+}
+
+static int sirfsoc_uart_request_port(struct uart_port *port)
+{
+ void *ret;
+ ret = request_mem_region(port->mapbase,
+ SIRFUART_MAP_SIZE, SIRFUART_PORT_NAME);
+ return ret ? 0 : -EBUSY;
+}
+
+static void sirfsoc_uart_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
+}
+
+static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = SIRFSOC_PORT_TYPE;
+ sirfsoc_uart_request_port(port);
+ }
+}
+
+static struct uart_ops sirfsoc_uart_ops = {
+ .tx_empty = sirfsoc_uart_tx_empty,
+ .get_mctrl = sirfsoc_uart_get_mctrl,
+ .set_mctrl = sirfsoc_uart_set_mctrl,
+ .stop_tx = sirfsoc_uart_stop_tx,
+ .start_tx = sirfsoc_uart_start_tx,
+ .stop_rx = sirfsoc_uart_stop_rx,
+ .enable_ms = sirfsoc_uart_enable_ms,
+ .break_ctl = sirfsoc_uart_break_ctl,
+ .startup = sirfsoc_uart_startup,
+ .shutdown = sirfsoc_uart_shutdown,
+ .set_termios = sirfsoc_uart_set_termios,
+ .type = sirfsoc_uart_type,
+ .release_port = sirfsoc_uart_release_port,
+ .request_port = sirfsoc_uart_request_port,
+ .config_port = sirfsoc_uart_config_port,
+};
+
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+static int __init sirfsoc_uart_console_setup(struct console *co, char *options)
+{
+ unsigned int baud = 115200;
+ unsigned int bits = 8;
+ unsigned int parity = 'n';
+ unsigned int flow = 'n';
+ struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+
+ if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
+ return -EINVAL;
+
+ if (!port->mapbase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ port->cons = co;
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (rd_regl(port,
+ SIRFUART_TX_FIFO_STATUS) & SIRFUART_FIFOFULL_MASK(port))
+ cpu_relax();
+ wr_regb(port, SIRFUART_TX_FIFO_DATA, ch);
+}
+
+static void sirfsoc_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+ uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
+}
+
+static struct console sirfsoc_uart_console = {
+ .name = SIRFSOC_UART_NAME,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .write = sirfsoc_uart_console_write,
+ .setup = sirfsoc_uart_console_setup,
+ .data = &sirfsoc_uart_drv,
+};
+
+static int __init sirfsoc_uart_console_init(void)
+{
+ register_console(&sirfsoc_uart_console);
+ return 0;
+}
+console_initcall(sirfsoc_uart_console_init);
+#endif
+
+static struct uart_driver sirfsoc_uart_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = SIRFUART_PORT_NAME,
+ .nr = SIRFSOC_UART_NR,
+ .dev_name = SIRFSOC_UART_NAME,
+ .major = SIRFSOC_UART_MAJOR,
+ .minor = SIRFSOC_UART_MINOR,
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+ .cons = &sirfsoc_uart_console,
+#else
+ .cons = NULL,
+#endif
+};
+
+int sirfsoc_uart_probe(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport;
+ struct uart_port *port;
+ struct resource *res;
+ int ret;
+
+ if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
+ dev_err(&pdev->dev,
+ "Unable to find cell-index in uart node.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ sirfport = &sirfsoc_uart_ports[pdev->id];
+ port = &sirfport->port;
+ port->dev = &pdev->dev;
+ port->private_data = sirfport;
+
+ if (of_find_property(pdev->dev.of_node, "hw_flow_ctrl", NULL))
+ sirfport->hw_flow_ctrl = 1;
+
+ if (of_property_read_u32(pdev->dev.of_node,
+ "fifosize",
+ &port->fifosize)) {
+ dev_err(&pdev->dev,
+ "Unable to find fifosize in uart node.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Insufficient resources.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ port->mapbase = res->start;
+ port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!port->membase) {
+ dev_err(&pdev->dev, "Cannot remap resource.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Insufficient resources.\n");
+ ret = -EFAULT;
+ goto irq_err;
+ }
+ port->irq = res->start;
+
+ if (sirfport->hw_flow_ctrl) {
+ sirfport->pmx = pinmux_get(&pdev->dev, NULL);
+ ret = IS_ERR(sirfport->pmx);
+ if (ret)
+ goto pmx_err;
+
+ pinmux_enable(sirfport->pmx);
+ }
+
+ port->ops = &sirfsoc_uart_ops;
+ spin_lock_init(&port->lock);
+
+ platform_set_drvdata(pdev, sirfport);
+ ret = uart_add_one_port(&sirfsoc_uart_drv, port);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
+ goto port_err;
+ }
+
+ return 0;
+
+port_err:
+ platform_set_drvdata(pdev, NULL);
+ if (sirfport->hw_flow_ctrl) {
+ pinmux_disable(sirfport->pmx);
+ pinmux_put(sirfport->pmx);
+ }
+pmx_err:
+irq_err:
+ devm_iounmap(&pdev->dev, port->membase);
+err:
+ return ret;
+}
+
+static int sirfsoc_uart_remove(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ platform_set_drvdata(pdev, NULL);
+ if (sirfport->hw_flow_ctrl) {
+ pinmux_disable(sirfport->pmx);
+ pinmux_put(sirfport->pmx);
+ }
+ devm_iounmap(&pdev->dev, port->membase);
+ uart_remove_one_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static int
+sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ uart_suspend_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static int sirfsoc_uart_resume(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ uart_resume_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_uart_ids[] __devinitdata = {
+ { .compatible = "sirf,prima2-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_serial_of_match);
+
+static struct platform_driver sirfsoc_uart_driver = {
+ .probe = sirfsoc_uart_probe,
+ .remove = __devexit_p(sirfsoc_uart_remove),
+ .suspend = sirfsoc_uart_suspend,
+ .resume = sirfsoc_uart_resume,
+ .driver = {
+ .name = SIRFUART_PORT_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_uart_ids,
+ },
+};
+
+static int __init sirfsoc_uart_init(void)
+{
+ int ret = 0;
+
+ ret = uart_register_driver(&sirfsoc_uart_drv);
+ if (ret)
+ goto out;
+
+ ret = platform_driver_register(&sirfsoc_uart_driver);
+ if (ret)
+ uart_unregister_driver(&sirfsoc_uart_drv);
+out:
+ return ret;
+}
+module_init(sirfsoc_uart_init);
+
+static void __exit sirfsoc_uart_exit(void)
+{
+ platform_driver_unregister(&sirfsoc_uart_driver);
+ uart_unregister_driver(&sirfsoc_uart_drv);
+}
+module_exit(sirfsoc_uart_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
+MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
new file mode 100644
index 000000000000..fc64260fa93c
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -0,0 +1,185 @@
+/*
+ * Drivers for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include <linux/bitops.h>
+
+/* UART Register Offset Define */
+#define SIRFUART_LINE_CTRL 0x0040
+#define SIRFUART_TX_RX_EN 0x004c
+#define SIRFUART_DIVISOR 0x0050
+#define SIRFUART_INT_EN 0x0054
+#define SIRFUART_INT_STATUS 0x0058
+#define SIRFUART_TX_DMA_IO_CTRL 0x0100
+#define SIRFUART_TX_DMA_IO_LEN 0x0104
+#define SIRFUART_TX_FIFO_CTRL 0x0108
+#define SIRFUART_TX_FIFO_LEVEL_CHK 0x010C
+#define SIRFUART_TX_FIFO_OP 0x0110
+#define SIRFUART_TX_FIFO_STATUS 0x0114
+#define SIRFUART_TX_FIFO_DATA 0x0118
+#define SIRFUART_RX_DMA_IO_CTRL 0x0120
+#define SIRFUART_RX_DMA_IO_LEN 0x0124
+#define SIRFUART_RX_FIFO_CTRL 0x0128
+#define SIRFUART_RX_FIFO_LEVEL_CHK 0x012C
+#define SIRFUART_RX_FIFO_OP 0x0130
+#define SIRFUART_RX_FIFO_STATUS 0x0134
+#define SIRFUART_RX_FIFO_DATA 0x0138
+#define SIRFUART_AFC_CTRL 0x0140
+#define SIRFUART_SWH_DMA_IO 0x0148
+
+/* UART Line Control Register */
+#define SIRFUART_DATA_BIT_LEN_MASK 0x3
+#define SIRFUART_DATA_BIT_LEN_5 BIT(0)
+#define SIRFUART_DATA_BIT_LEN_6 1
+#define SIRFUART_DATA_BIT_LEN_7 2
+#define SIRFUART_DATA_BIT_LEN_8 3
+#define SIRFUART_STOP_BIT_LEN_1 0
+#define SIRFUART_STOP_BIT_LEN_2 BIT(2)
+#define SIRFUART_PARITY_EN BIT(3)
+#define SIRFUART_EVEN_BIT BIT(4)
+#define SIRFUART_STICK_BIT_MASK (7 << 3)
+#define SIRFUART_STICK_BIT_NONE (0 << 3)
+#define SIRFUART_STICK_BIT_EVEN BIT(3)
+#define SIRFUART_STICK_BIT_ODD (3 << 3)
+#define SIRFUART_STICK_BIT_MARK (5 << 3)
+#define SIRFUART_STICK_BIT_SPACE (7 << 3)
+#define SIRFUART_SET_BREAK BIT(6)
+#define SIRFUART_LOOP_BACK BIT(7)
+#define SIRFUART_PARITY_MASK (7 << 3)
+#define SIRFUART_DUMMY_READ BIT(16)
+
+#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
+#define SIRFUART_RECV_TIMEOUT_MASK (0xFFFF << 16)
+#define SIRFUART_RECV_TIMEOUT(x) (((x) & 0xFFFF) << 16)
+
+/* UART Auto Flow Control */
+#define SIRFUART_AFC_RX_THD_MASK 0x000000FF
+#define SIRFUART_AFC_RX_EN BIT(8)
+#define SIRFUART_AFC_TX_EN BIT(9)
+#define SIRFUART_CTS_CTRL BIT(10)
+#define SIRFUART_RTS_CTRL BIT(11)
+#define SIRFUART_CTS_IN_STATUS BIT(12)
+#define SIRFUART_RTS_OUT_STATUS BIT(13)
+
+/* UART Interrupt Enable Register */
+#define SIRFUART_RX_DONE_INT BIT(0)
+#define SIRFUART_TX_DONE_INT BIT(1)
+#define SIRFUART_RX_OFLOW_INT BIT(2)
+#define SIRFUART_TX_ALLOUT_INT BIT(3)
+#define SIRFUART_RX_IO_DMA_INT BIT(4)
+#define SIRFUART_TX_IO_DMA_INT BIT(5)
+#define SIRFUART_RXFIFO_FULL_INT BIT(6)
+#define SIRFUART_TXFIFO_EMPTY_INT BIT(7)
+#define SIRFUART_RXFIFO_THD_INT BIT(8)
+#define SIRFUART_TXFIFO_THD_INT BIT(9)
+#define SIRFUART_FRM_ERR_INT BIT(10)
+#define SIRFUART_RXD_BREAK_INT BIT(11)
+#define SIRFUART_RX_TIMEOUT_INT BIT(12)
+#define SIRFUART_PARITY_ERR_INT BIT(13)
+#define SIRFUART_CTS_INT_EN BIT(14)
+#define SIRFUART_RTS_INT_EN BIT(15)
+
+/* UART Interrupt Status Register */
+#define SIRFUART_RX_DONE BIT(0)
+#define SIRFUART_TX_DONE BIT(1)
+#define SIRFUART_RX_OFLOW BIT(2)
+#define SIRFUART_TX_ALL_EMPTY BIT(3)
+#define SIRFUART_DMA_IO_RX_DONE BIT(4)
+#define SIRFUART_DMA_IO_TX_DONE BIT(5)
+#define SIRFUART_RXFIFO_FULL BIT(6)
+#define SIRFUART_TXFIFO_EMPTY BIT(7)
+#define SIRFUART_RXFIFO_THD_REACH BIT(8)
+#define SIRFUART_TXFIFO_THD_REACH BIT(9)
+#define SIRFUART_FRM_ERR BIT(10)
+#define SIRFUART_RXD_BREAK BIT(11)
+#define SIRFUART_RX_TIMEOUT BIT(12)
+#define SIRFUART_PARITY_ERR BIT(13)
+#define SIRFUART_CTS_CHANGE BIT(14)
+#define SIRFUART_RTS_CHANGE BIT(15)
+#define SIRFUART_PLUG_IN BIT(16)
+
+#define SIRFUART_ERR_INT_STAT \
+ (SIRFUART_RX_OFLOW | \
+ SIRFUART_FRM_ERR | \
+ SIRFUART_RXD_BREAK | \
+ SIRFUART_PARITY_ERR)
+#define SIRFUART_ERR_INT_EN \
+ (SIRFUART_RX_OFLOW_INT | \
+ SIRFUART_FRM_ERR_INT | \
+ SIRFUART_RXD_BREAK_INT | \
+ SIRFUART_PARITY_ERR_INT)
+#define SIRFUART_TX_INT_EN SIRFUART_TXFIFO_EMPTY_INT
+#define SIRFUART_RX_IO_INT_EN \
+ (SIRFUART_RX_TIMEOUT_INT | \
+ SIRFUART_RXFIFO_THD_INT | \
+ SIRFUART_RXFIFO_FULL_INT | \
+ SIRFUART_ERR_INT_EN)
+
+/* UART FIFO Register */
+#define SIRFUART_TX_FIFO_STOP 0x0
+#define SIRFUART_TX_FIFO_RESET 0x1
+#define SIRFUART_TX_FIFO_START 0x2
+#define SIRFUART_RX_FIFO_STOP 0x0
+#define SIRFUART_RX_FIFO_RESET 0x1
+#define SIRFUART_RX_FIFO_START 0x2
+#define SIRFUART_TX_MODE_DMA 0
+#define SIRFUART_TX_MODE_IO 1
+#define SIRFUART_RX_MODE_DMA 0
+#define SIRFUART_RX_MODE_IO 1
+
+#define SIRFUART_RX_EN 0x1
+#define SIRFUART_TX_EN 0x2
+
+/* Generic Definitions */
+#define SIRFSOC_UART_NAME "ttySiRF"
+#define SIRFSOC_UART_MAJOR 0
+#define SIRFSOC_UART_MINOR 0
+#define SIRFUART_PORT_NAME "sirfsoc-uart"
+#define SIRFUART_MAP_SIZE 0x200
+#define SIRFSOC_UART_NR 3
+#define SIRFSOC_PORT_TYPE 0xa5
+
+/* Baud Rate Calculation */
+#define SIRF_MIN_SAMPLE_DIV 0xf
+#define SIRF_MAX_SAMPLE_DIV 0x3f
+#define SIRF_IOCLK_DIV_MAX 0xffff
+#define SIRF_SAMPLE_DIV_SHIFT 16
+#define SIRF_IOCLK_DIV_MASK 0xffff
+#define SIRF_SAMPLE_DIV_MASK 0x3f0000
+#define SIRF_BAUD_RATE_SUPPORT_NR 18
+
+/* For Fast Baud Rate Calculation */
+struct sirfsoc_baudrate_to_regv {
+ unsigned int baud_rate;
+ unsigned int reg_val;
+};
+
+struct sirfsoc_uart_port {
+ unsigned char hw_flow_ctrl;
+ unsigned char ms_enabled;
+
+ struct uart_port port;
+ struct pinmux *pmx;
+};
+
+/* Hardware Flow Control */
+#define SIRFUART_AFC_CTRL_RX_THD 0x70
+
+/* Register Access Control */
+#define portaddr(port, reg) ((port)->membase + (reg))
+#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
+#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
+#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+
+/* UART Port Mask */
+#define SIRFUART_FIFOLEVEL_MASK(port) ((port->line == 1) ? (0x1f) : (0x7f))
+#define SIRFUART_FIFOFULL_MASK(port) ((port->line == 1) ? (0x20) : (0x80))
+#define SIRFUART_FIFOEMPTY_MASK(port) ((port->line == 1) ? (0x40) : (0x100))
+
+/* I/O Mode */
+#define SIRFSOC_UART_IO_RX_MAX_CNT 256
+#define SIRFSOC_UART_IO_TX_REASONABLE_CNT 6
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index e76c8b747fb8..70f97491d8f2 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -513,20 +513,7 @@ static struct platform_driver timbuart_platform_driver = {
.remove = __devexit_p(timbuart_remove),
};
-/*--------------------------------------------------------------------------*/
-
-static int __init timbuart_init(void)
-{
- return platform_driver_register(&timbuart_platform_driver);
-}
-
-static void __exit timbuart_exit(void)
-{
- platform_driver_unregister(&timbuart_platform_driver);
-}
-
-module_init(timbuart_init);
-module_exit(timbuart_exit);
+module_platform_driver(timbuart_platform_driver);
MODULE_DESCRIPTION("Timberdale UART driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index cea8918b8233..2ebe606a2db1 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -963,6 +963,9 @@ static void qe_uart_set_termios(struct uart_port *port,
/* Do we really need a spinlock here? */
spin_lock_irqsave(&port->lock, flags);
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
out_be16(&uccp->upsmr, upsmr);
if (soft_uart) {
out_be16(&uccup->supsmr, supsmr);
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab4fa68..83148e79ca13 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -961,18 +961,7 @@ static struct platform_driver siu_device_driver = {
},
};
-static int __init vr41xx_siu_init(void)
-{
- return platform_driver_register(&siu_device_driver);
-}
-
-static void __exit vr41xx_siu_exit(void)
-{
- platform_driver_unregister(&siu_device_driver);
-}
-
-module_init(vr41xx_siu_init);
-module_exit(vr41xx_siu_exit);
+module_platform_driver(siu_device_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index e67fb20490d2..ff8017f87914 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -850,7 +850,7 @@ static int mgsl_device_count;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static int break_on_load;
+static bool break_on_load;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 0f6b796c95c5..a7efe538df00 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -456,7 +456,7 @@ static int synclinkmp_device_count = 0;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static int break_on_load = 0;
+static bool break_on_load = 0;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 43db715f1502..7867b7c4538e 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for fsync_bdev() */
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/vt_kern.h>
@@ -41,6 +40,7 @@
#include <linux/oom.h>
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 05085beb83db..e41b9bbc107d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -790,19 +790,24 @@ static void session_clear_tty(struct pid *session)
void disassociate_ctty(int on_exit)
{
struct tty_struct *tty;
- struct pid *tty_pgrp = NULL;
if (!current->signal->leader)
return;
tty = get_current_tty();
if (tty) {
- tty_pgrp = get_pid(tty->pgrp);
+ struct pid *tty_pgrp = get_pid(tty->pgrp);
if (on_exit) {
if (tty->driver->type != TTY_DRIVER_TYPE_PTY)
tty_vhangup(tty);
}
tty_kref_put(tty);
+ if (tty_pgrp) {
+ kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ put_pid(tty_pgrp);
+ }
} else if (on_exit) {
struct pid *old_pgrp;
spin_lock_irq(&current->sighand->siglock);
@@ -816,12 +821,6 @@ void disassociate_ctty(int on_exit)
}
return;
}
- if (tty_pgrp) {
- kill_pgrp(tty_pgrp, SIGHUP, on_exit);
- if (!on_exit)
- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
- put_pid(tty_pgrp);
- }
spin_lock_irq(&current->sighand->siglock);
put_pid(current->signal->tty_old_pgrp);
@@ -1558,6 +1557,59 @@ static void release_tty(struct tty_struct *tty, int idx)
}
/**
+ * tty_release_checks - check a tty before real release
+ * @tty: tty to check
+ * @o_tty: link of @tty (if any)
+ * @idx: index of the tty
+ *
+ * Performs some paranoid checking before true release of the @tty.
+ * This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
+ int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver->num) {
+ printk(KERN_DEBUG "%s: bad idx when trying to free (%s)\n",
+ __func__, tty->name);
+ return -1;
+ }
+
+ /* not much to check for devpts */
+ if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+ return 0;
+
+ if (tty != tty->driver->ttys[idx]) {
+ printk(KERN_DEBUG "%s: driver.table[%d] not tty for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (tty->termios != tty->driver->termios[idx]) {
+ printk(KERN_DEBUG "%s: driver.termios[%d] not termios for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (tty->driver->other) {
+ if (o_tty != tty->driver->other->ttys[idx]) {
+ printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (o_tty->termios != tty->driver->other->termios[idx]) {
+ printk(KERN_DEBUG "%s: other->termios[%d] not o_termios for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (o_tty->link != tty) {
+ printk(KERN_DEBUG "%s: bad pty pointers\n", __func__);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
@@ -1585,11 +1637,11 @@ int tty_release(struct inode *inode, struct file *filp)
int idx;
char buf[64];
- if (tty_paranoia_check(tty, inode, "tty_release_dev"))
+ if (tty_paranoia_check(tty, inode, __func__))
return 0;
tty_lock();
- check_tty_count(tty, "tty_release_dev");
+ check_tty_count(tty, __func__);
__tty_fasync(-1, filp, 0);
@@ -1599,59 +1651,16 @@ int tty_release(struct inode *inode, struct file *filp)
devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
o_tty = tty->link;
-#ifdef TTY_PARANOIA_CHECK
- if (idx < 0 || idx >= tty->driver->num) {
- printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
- "free (%s)\n", tty->name);
+ if (tty_release_checks(tty, o_tty, idx)) {
tty_unlock();
return 0;
}
- if (!devpts) {
- if (tty != tty->driver->ttys[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
- "for (%s)\n", idx, tty->name);
- return 0;
- }
- if (tty->termios != tty->driver->termios[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
- "for (%s)\n",
- idx, tty->name);
- return 0;
- }
- }
-#endif
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "tty_release_dev of %s (tty count=%d)...",
- tty_name(tty, buf), tty->count);
+ printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
+ tty_name(tty, buf), tty->count);
#endif
-#ifdef TTY_PARANOIA_CHECK
- if (tty->driver->other &&
- !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- if (o_tty != tty->driver->other->ttys[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
- "not o_tty for (%s)\n",
- idx, tty->name);
- return 0 ;
- }
- if (o_tty->termios != tty->driver->other->termios[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
- "not o_termios for (%s)\n",
- idx, tty->name);
- return 0;
- }
- if (o_tty->link != tty) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
- return 0;
- }
- }
-#endif
if (tty->ops->close)
tty->ops->close(tty, filp);
@@ -1707,8 +1716,8 @@ int tty_release(struct inode *inode, struct file *filp)
if (!do_sleep)
break;
- printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
- "active!\n", tty_name(tty, buf));
+ printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
+ __func__, tty_name(tty, buf));
tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
@@ -1721,15 +1730,14 @@ int tty_release(struct inode *inode, struct file *filp)
*/
if (pty_master) {
if (--o_tty->count < 0) {
- printk(KERN_WARNING "tty_release_dev: bad pty slave count "
- "(%d) for %s\n",
- o_tty->count, tty_name(o_tty, buf));
+ printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n",
+ __func__, o_tty->count, tty_name(o_tty, buf));
o_tty->count = 0;
}
}
if (--tty->count < 0) {
- printk(KERN_WARNING "tty_release_dev: bad tty->count (%d) for %s\n",
- tty->count, tty_name(tty, buf));
+ printk(KERN_WARNING "%s: bad tty->count (%d) for %s\n",
+ __func__, tty->count, tty_name(tty, buf));
tty->count = 0;
}
@@ -1778,7 +1786,7 @@ int tty_release(struct inode *inode, struct file *filp)
}
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "freeing tty structure...");
+ printk(KERN_DEBUG "%s: freeing tty structure...\n", __func__);
#endif
/*
* Ask the line discipline code to release its structures
@@ -1798,6 +1806,83 @@ int tty_release(struct inode *inode, struct file *filp)
}
/**
+ * tty_open_current_tty - get tty of current task for open
+ * @device: device number
+ * @filp: file pointer to tty
+ * @return: tty of the current task iff @device is /dev/tty
+ *
+ * We cannot return driver and index like for the other nodes because
+ * devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+ struct tty_struct *tty;
+
+ if (device != MKDEV(TTYAUX_MAJOR, 0))
+ return NULL;
+
+ tty = get_current_tty();
+ if (!tty)
+ return ERR_PTR(-ENXIO);
+
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ tty_kref_put(tty);
+ /* FIXME: we put a reference and return a TTY! */
+ return tty;
+}
+
+/**
+ * tty_lookup_driver - lookup a tty driver for a given device file
+ * @device: device number
+ * @filp: file pointer to tty
+ * @noctty: set if the device should not become a controlling tty
+ * @index: index for the device in the @return driver
+ * @return: driver for this inode (with increased refcount)
+ *
+ * If @return is not erroneous, the caller is responsible to decrement the
+ * refcount by tty_driver_kref_put.
+ *
+ * Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+ int *noctty, int *index)
+{
+ struct tty_driver *driver;
+
+ switch (device) {
+#ifdef CONFIG_VT
+ case MKDEV(TTY_MAJOR, 0): {
+ extern struct tty_driver *console_driver;
+ driver = tty_driver_kref_get(console_driver);
+ *index = fg_console;
+ *noctty = 1;
+ break;
+ }
+#endif
+ case MKDEV(TTYAUX_MAJOR, 1): {
+ struct tty_driver *console_driver = console_device(index);
+ if (console_driver) {
+ driver = tty_driver_kref_get(console_driver);
+ if (driver) {
+ /* Don't let /dev/console block */
+ filp->f_flags |= O_NONBLOCK;
+ *noctty = 1;
+ break;
+ }
+ }
+ return ERR_PTR(-ENODEV);
+ }
+ default:
+ driver = get_tty_driver(device, index);
+ if (!driver)
+ return ERR_PTR(-ENODEV);
+ break;
+ }
+ return driver;
+}
+
+/**
* tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
@@ -1813,16 +1898,16 @@ int tty_release(struct inode *inode, struct file *filp)
* The termios state of a pty is reset on first open so that
* settings don't persist across reuse.
*
- * Locking: tty_mutex protects tty, get_tty_driver and tty_init_dev work.
+ * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
* tty->count should protect the rest.
* ->siglock protects ->signal/->sighand
*/
static int tty_open(struct inode *inode, struct file *filp)
{
- struct tty_struct *tty = NULL;
+ struct tty_struct *tty;
int noctty, retval;
- struct tty_driver *driver;
+ struct tty_driver *driver = NULL;
int index;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
@@ -1841,66 +1926,22 @@ retry_open:
mutex_lock(&tty_mutex);
tty_lock();
- if (device == MKDEV(TTYAUX_MAJOR, 0)) {
- tty = get_current_tty();
- if (!tty) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENXIO;
- }
- driver = tty_driver_kref_get(tty->driver);
- index = tty->index;
- filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
- /* noctty = 1; */
- /* FIXME: Should we take a driver reference ? */
- tty_kref_put(tty);
- goto got_driver;
- }
-#ifdef CONFIG_VT
- if (device == MKDEV(TTY_MAJOR, 0)) {
- extern struct tty_driver *console_driver;
- driver = tty_driver_kref_get(console_driver);
- index = fg_console;
- noctty = 1;
- goto got_driver;
- }
-#endif
- if (device == MKDEV(TTYAUX_MAJOR, 1)) {
- struct tty_driver *console_driver = console_device(&index);
- if (console_driver) {
- driver = tty_driver_kref_get(console_driver);
- if (driver) {
- /* Don't let /dev/console block */
- filp->f_flags |= O_NONBLOCK;
- noctty = 1;
- goto got_driver;
- }
+ tty = tty_open_current_tty(device, filp);
+ if (IS_ERR(tty)) {
+ retval = PTR_ERR(tty);
+ goto err_unlock;
+ } else if (!tty) {
+ driver = tty_lookup_driver(device, filp, &noctty, &index);
+ if (IS_ERR(driver)) {
+ retval = PTR_ERR(driver);
+ goto err_unlock;
}
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENODEV;
- }
- driver = get_tty_driver(device, &index);
- if (!driver) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENODEV;
- }
-got_driver:
- if (!tty) {
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, inode, index);
-
if (IS_ERR(tty)) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_driver_kref_put(driver);
- tty_free_file(filp);
- return PTR_ERR(tty);
+ retval = PTR_ERR(tty);
+ goto err_unlock;
}
}
@@ -1912,21 +1953,22 @@ got_driver:
tty = tty_init_dev(driver, index, 0);
mutex_unlock(&tty_mutex);
- tty_driver_kref_put(driver);
+ if (driver)
+ tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
tty_unlock();
- tty_free_file(filp);
- return PTR_ERR(tty);
+ retval = PTR_ERR(tty);
+ goto err_file;
}
tty_add_file(tty, filp);
- check_tty_count(tty, "tty_open");
+ check_tty_count(tty, __func__);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
noctty = 1;
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "opening %s...", tty->name);
+ printk(KERN_DEBUG "%s: opening %s...\n", __func__, tty->name);
#endif
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
@@ -1940,8 +1982,8 @@ got_driver:
if (retval) {
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "error %d in opening %s...", retval,
- tty->name);
+ printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
+ retval, tty->name);
#endif
tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
@@ -1976,6 +2018,15 @@ got_driver:
tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
+err_unlock:
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ /* after locks to avoid deadlock */
+ if (!IS_ERR_OR_NULL(driver))
+ tty_driver_kref_put(driver);
+err_file:
+ tty_free_file(filp);
+ return retval;
}
@@ -3267,7 +3318,7 @@ void __init console_init(void)
}
}
-static char *tty_devnode(struct device *dev, mode_t *mode)
+static char *tty_devnode(struct device *dev, umode_t *mode)
{
if (!mode)
return NULL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 8e0924f55446..24b95db75d84 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -1,19 +1,11 @@
#include <linux/types.h>
-#include <linux/major.h>
#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
+#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
#include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -24,18 +16,8 @@
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/bitops.h>
-#include <linux/delay.h>
#include <linux/seq_file.h>
-
#include <linux/uaccess.h>
-#include <asm/system.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-
-#include <linux/kmod.h>
-#include <linux/nsproxy.h>
#include <linux/ratelimit.h>
/*
@@ -558,8 +540,6 @@ static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
long ret;
ret = wait_event_timeout(tty_ldisc_idle,
atomic_read(&tty->ldisc->users) == 1, timeout);
- if (ret < 0)
- return ret;
return ret > 0 ? 0 : -EBUSY;
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 45d3e80156d4..a0f3d6c4d39d 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -584,7 +584,7 @@ int con_set_default_unimap(struct vc_data *vc)
return 0;
dflt->refcount++;
*vc->vc_uni_pagedir_loc = (unsigned long)dflt;
- if (p && --p->refcount) {
+ if (p && !--p->refcount) {
con_release_unimap(p);
kfree(p);
}
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 02bd47bdee1c..0bd08ef2b394 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -45,77 +45,12 @@ to_uio_pci_generic_dev(struct uio_info *info)
static irqreturn_t irqhandler(int irq, struct uio_info *info)
{
struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
- struct pci_dev *pdev = gdev->pdev;
- irqreturn_t ret = IRQ_NONE;
- u32 cmd_status_dword;
- u16 origcmd, newcmd, status;
-
- /* We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible. */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- pci_block_user_cfg_access(pdev);
-
- /* Read both command and status registers in a single 32-bit operation.
- * Note: we could cache the value for command and move the status read
- * out of the lock if there was a way to get notified of user changes
- * to command register through sysfs. Should be good for shared irqs. */
- pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
- origcmd = cmd_status_dword;
- status = cmd_status_dword >> 16;
-
- /* Check interrupt status register to see whether our device
- * triggered the interrupt. */
- if (!(status & PCI_STATUS_INTERRUPT))
- goto done;
-
- /* We triggered the interrupt, disable it. */
- newcmd = origcmd | PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- pci_write_config_word(pdev, PCI_COMMAND, newcmd);
- /* UIO core will signal the user process. */
- ret = IRQ_HANDLED;
-done:
-
- pci_unblock_user_cfg_access(pdev);
- return ret;
-}
+ if (!pci_check_and_mask_intx(gdev->pdev))
+ return IRQ_NONE;
-/* Verify that the device supports Interrupt Disable bit in command register,
- * per PCI 2.3, by flipping this bit and reading it back: this bit was readonly
- * in PCI 2.2. */
-static int __devinit verify_pci_2_3(struct pci_dev *pdev)
-{
- u16 orig, new;
- int err = 0;
-
- pci_block_user_cfg_access(pdev);
- pci_read_config_word(pdev, PCI_COMMAND, &orig);
- pci_write_config_word(pdev, PCI_COMMAND,
- orig ^ PCI_COMMAND_INTX_DISABLE);
- pci_read_config_word(pdev, PCI_COMMAND, &new);
- /* There's no way to protect against
- * hardware bugs or detect them reliably, but as long as we know
- * what the value should be, let's go ahead and check it. */
- if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
- err = -EBUSY;
- dev_err(&pdev->dev, "Command changed from 0x%x to 0x%x: "
- "driver or HW bug?\n", orig, new);
- goto err;
- }
- if (!((new ^ orig) & PCI_COMMAND_INTX_DISABLE)) {
- dev_warn(&pdev->dev, "Device does not support "
- "disabling interrupts: unable to bind.\n");
- err = -ENODEV;
- goto err;
- }
- /* Now restore the original value. */
- pci_write_config_word(pdev, PCI_COMMAND, orig);
-err:
- pci_unblock_user_cfg_access(pdev);
- return err;
+ /* UIO core will signal the user process. */
+ return IRQ_HANDLED;
}
static int __devinit probe(struct pci_dev *pdev,
@@ -138,9 +73,10 @@ static int __devinit probe(struct pci_dev *pdev,
return -ENODEV;
}
- err = verify_pci_2_3(pdev);
- if (err)
+ if (!pci_intx_mask_supported(pdev)) {
+ err = -ENODEV;
goto err_verify;
+ }
gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev) {
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 791f11bed606..75823a1abeb6 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -48,6 +48,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_DAVINCI_DA8XX
default y if ARCH_CNS3XXX
default y if PLAT_SPEAR
+ default y if ARCH_EXYNOS
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 75eca7645227..53a7bc07dd8d 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_USB) += core/
+obj-$(CONFIG_USB_OTG_UTILS) += otg/
+
obj-$(CONFIG_USB_DWC3) += dwc3/
obj-$(CONFIG_USB_MON) += mon/
@@ -51,7 +53,6 @@ obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
obj-$(CONFIG_USB_MUSB_HDRC) += musb/
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs/
-obj-$(CONFIG_USB_OTG_UTILS) += otg/
obj-$(CONFIG_USB_GADGET) += gadget/
obj-$(CONFIG_USB_COMMON) += usb-common.o
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index b42092e1f164..98dd9e49b684 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -73,9 +73,9 @@ static const char speedtch_driver_name[] = "speedtch";
#define DEFAULT_SW_BUFFERING 0
static unsigned int altsetting = 0; /* zero means: use the default */
-static int dl_512_first = DEFAULT_DL_512_FIRST;
-static int enable_isoc = DEFAULT_ENABLE_ISOC;
-static int sw_buffering = DEFAULT_SW_BUFFERING;
+static bool dl_512_first = DEFAULT_DL_512_FIRST;
+static bool enable_isoc = DEFAULT_ENABLE_ISOC;
+static bool sw_buffering = DEFAULT_SW_BUFFERING;
#define DEFAULT_B_MAX_DSL 8128
#define DEFAULT_MODEM_MODE 11
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 00f171a7a8a0..01ea5d7421d4 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -542,7 +542,7 @@ static int modem_index;
static unsigned int debug;
static unsigned int altsetting[NB_MODEM] = {
[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
-static int sync_wait[NB_MODEM];
+static bool sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 57ae44cd0b88..6f3b6e267398 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -225,21 +225,10 @@ static struct platform_driver c67x00_driver = {
.name = "c67x00",
},
};
-MODULE_ALIAS("platform:c67x00");
-
-static int __init c67x00_init(void)
-{
- return platform_driver_register(&c67x00_driver);
-}
-static void __exit c67x00_exit(void)
-{
- platform_driver_unregister(&c67x00_driver);
-}
-
-module_init(c67x00_init);
-module_exit(c67x00_exit);
+module_platform_driver(c67x00_driver);
MODULE_AUTHOR("Peter Korsgaard, Jan Veldeman, Grant Likely");
MODULE_DESCRIPTION("Cypress C67X00 USB Controller Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:c67x00");
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index d3e1356d091e..75e47b860a53 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -271,7 +271,6 @@ static void c67x00_hcd_irq(struct c67x00_sie *sie, u16 int_status, u16 msg)
if (int_status & SOFEOP_FLG(sie->sie_num)) {
c67x00_ll_usb_clear_status(sie, SOF_EOP_IRQ_FLG);
c67x00_sched_kick(c67x00);
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
}
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e8c564a53346..9543b19d410c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -58,12 +58,62 @@ static struct usb_driver acm_driver;
static struct tty_driver *acm_tty_driver;
static struct acm *acm_table[ACM_TTY_MINORS];
-static DEFINE_MUTEX(open_mutex);
+static DEFINE_MUTEX(acm_table_lock);
-#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
+/*
+ * acm_table accessors
+ */
-static const struct tty_port_operations acm_port_ops = {
-};
+/*
+ * Look up an ACM structure by index. If found and not disconnected, increment
+ * its refcount and return it with its mutex held.
+ */
+static struct acm *acm_get_by_index(unsigned index)
+{
+ struct acm *acm;
+
+ mutex_lock(&acm_table_lock);
+ acm = acm_table[index];
+ if (acm) {
+ mutex_lock(&acm->mutex);
+ if (acm->disconnected) {
+ mutex_unlock(&acm->mutex);
+ acm = NULL;
+ } else {
+ tty_port_get(&acm->port);
+ mutex_unlock(&acm->mutex);
+ }
+ }
+ mutex_unlock(&acm_table_lock);
+ return acm;
+}
+
+/*
+ * Try to find an available minor number and if found, associate it with 'acm'.
+ */
+static int acm_alloc_minor(struct acm *acm)
+{
+ int minor;
+
+ mutex_lock(&acm_table_lock);
+ for (minor = 0; minor < ACM_TTY_MINORS; minor++) {
+ if (!acm_table[minor]) {
+ acm_table[minor] = acm;
+ break;
+ }
+ }
+ mutex_unlock(&acm_table_lock);
+
+ return minor;
+}
+
+/* Release the minor number associated with 'acm'. */
+static void acm_release_minor(struct acm *acm)
+{
+ mutex_lock(&acm_table_lock);
+ acm_table[acm->minor] = NULL;
+ mutex_unlock(&acm_table_lock);
+}
/*
* Functions for ACM control messages.
@@ -267,9 +317,6 @@ static void acm_ctrl_irq(struct urb *urb)
goto exit;
}
- if (!ACM_READY(acm))
- goto exit;
-
usb_mark_last_busy(acm->dev);
data = (unsigned char *)(dr + 1);
@@ -429,8 +476,7 @@ static void acm_write_bulk(struct urb *urb)
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
- if (ACM_READY(acm))
- schedule_work(&acm->work);
+ schedule_work(&acm->work);
}
static void acm_softint(struct work_struct *work)
@@ -440,8 +486,6 @@ static void acm_softint(struct work_struct *work)
dev_vdbg(&acm->data->dev, "%s\n", __func__);
- if (!ACM_READY(acm))
- return;
tty = tty_port_tty_get(&acm->port);
if (!tty)
return;
@@ -453,93 +497,122 @@ static void acm_softint(struct work_struct *work)
* TTY handlers
*/
-static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct acm *acm;
- int rv = -ENODEV;
-
- mutex_lock(&open_mutex);
+ int retval;
- acm = acm_table[tty->index];
- if (!acm || !acm->dev)
- goto out;
- else
- rv = 0;
+ dev_dbg(tty->dev, "%s\n", __func__);
- dev_dbg(&acm->control->dev, "%s\n", __func__);
+ acm = acm_get_by_index(tty->index);
+ if (!acm)
+ return -ENODEV;
- set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ retval = tty_init_termios(tty);
+ if (retval)
+ goto error_init_termios;
tty->driver_data = acm;
- tty_port_tty_set(&acm->port, tty);
- if (usb_autopm_get_interface(acm->control) < 0)
- goto early_bail;
- else
- acm->control->needs_remote_wakeup = 1;
+ /* Final install (we use the default method) */
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+
+ return 0;
+
+error_init_termios:
+ tty_port_put(&acm->port);
+ return retval;
+}
+
+static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct acm *acm = tty->driver_data;
+
+ dev_dbg(tty->dev, "%s\n", __func__);
+
+ return tty_port_open(&acm->port, tty, filp);
+}
+
+static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct acm *acm = container_of(port, struct acm, port);
+ int retval = -ENODEV;
+
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
mutex_lock(&acm->mutex);
- if (acm->port.count++) {
- mutex_unlock(&acm->mutex);
- usb_autopm_put_interface(acm->control);
- goto out;
- }
+ if (acm->disconnected)
+ goto disconnected;
+
+ retval = usb_autopm_get_interface(acm->control);
+ if (retval)
+ goto error_get_interface;
+
+ /*
+ * FIXME: Why do we need this? Allocating 64K of physically contiguous
+ * memory is really nasty...
+ */
+ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ acm->control->needs_remote_wakeup = 1;
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
- goto bail_out;
+ goto error_submit_urb;
}
- if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
+ acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
+ if (acm_set_control(acm, acm->ctrlout) < 0 &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
- goto bail_out;
+ goto error_set_control;
usb_autopm_put_interface(acm->control);
if (acm_submit_read_urbs(acm, GFP_KERNEL))
- goto bail_out;
-
- set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
- rv = tty_port_block_til_ready(&acm->port, tty, filp);
+ goto error_submit_read_urbs;
mutex_unlock(&acm->mutex);
-out:
- mutex_unlock(&open_mutex);
- return rv;
-bail_out:
- acm->port.count--;
- mutex_unlock(&acm->mutex);
+ return 0;
+
+error_submit_read_urbs:
+ acm->ctrlout = 0;
+ acm_set_control(acm, acm->ctrlout);
+error_set_control:
+ usb_kill_urb(acm->ctrlurb);
+error_submit_urb:
usb_autopm_put_interface(acm->control);
-early_bail:
- mutex_unlock(&open_mutex);
- tty_port_tty_set(&acm->port, NULL);
- return -EIO;
+error_get_interface:
+disconnected:
+ mutex_unlock(&acm->mutex);
+ return retval;
}
-static void acm_tty_unregister(struct acm *acm)
+static void acm_port_destruct(struct tty_port *port)
{
- int i;
+ struct acm *acm = container_of(port, struct acm, port);
+
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
tty_unregister_device(acm_tty_driver, acm->minor);
+ acm_release_minor(acm);
usb_put_intf(acm->control);
- acm_table[acm->minor] = NULL;
- usb_free_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_free_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_urb(acm->read_urbs[i]);
kfree(acm->country_codes);
kfree(acm);
}
-static void acm_port_down(struct acm *acm)
+static void acm_port_shutdown(struct tty_port *port)
{
+ struct acm *acm = container_of(port, struct acm, port);
int i;
- if (acm->dev) {
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+
+ mutex_lock(&acm->mutex);
+ if (!acm->disconnected) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
usb_kill_urb(acm->ctrlurb);
@@ -550,40 +623,28 @@ static void acm_port_down(struct acm *acm)
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
+ mutex_unlock(&acm->mutex);
+}
+
+static void acm_tty_cleanup(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+ tty_port_put(&acm->port);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
tty_port_hangup(&acm->port);
- mutex_lock(&open_mutex);
- acm_port_down(acm);
- mutex_unlock(&open_mutex);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
-
- /* Perform the closing process and see if we need to do the hardware
- shutdown */
- if (!acm)
- return;
-
- mutex_lock(&open_mutex);
- if (tty_port_close_start(&acm->port, tty, filp) == 0) {
- if (!acm->dev) {
- tty_port_tty_set(&acm->port, NULL);
- acm_tty_unregister(acm);
- tty->driver_data = NULL;
- }
- mutex_unlock(&open_mutex);
- return;
- }
- acm_port_down(acm);
- tty_port_close_end(&acm->port, tty);
- tty_port_tty_set(&acm->port, NULL);
- mutex_unlock(&open_mutex);
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+ tty_port_close(&acm->port, tty, filp);
}
static int acm_tty_write(struct tty_struct *tty,
@@ -595,8 +656,6 @@ static int acm_tty_write(struct tty_struct *tty,
int wbn;
struct acm_wb *wb;
- if (!ACM_READY(acm))
- return -EINVAL;
if (!count)
return 0;
@@ -625,8 +684,6 @@ static int acm_tty_write(struct tty_struct *tty,
static int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return -EINVAL;
/*
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
@@ -637,7 +694,11 @@ static int acm_tty_write_room(struct tty_struct *tty)
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
+ /*
+ * if the device was unplugged then any remaining characters fell out
+ * of the connector ;)
+ */
+ if (acm->disconnected)
return 0;
/*
* This is inaccurate (overcounts), but it works.
@@ -649,9 +710,6 @@ static void acm_tty_throttle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return;
-
spin_lock_irq(&acm->read_lock);
acm->throttle_req = 1;
spin_unlock_irq(&acm->read_lock);
@@ -662,9 +720,6 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
struct acm *acm = tty->driver_data;
unsigned int was_throttled;
- if (!ACM_READY(acm))
- return;
-
spin_lock_irq(&acm->read_lock);
was_throttled = acm->throttled;
acm->throttled = 0;
@@ -679,8 +734,7 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
{
struct acm *acm = tty->driver_data;
int retval;
- if (!ACM_READY(acm))
- return -EINVAL;
+
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
dev_dbg(&acm->control->dev, "%s - send break failed\n",
@@ -692,9 +746,6 @@ static int acm_tty_tiocmget(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return -EINVAL;
-
return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
@@ -709,9 +760,6 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
struct acm *acm = tty->driver_data;
unsigned int newctrl;
- if (!ACM_READY(acm))
- return -EINVAL;
-
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
@@ -728,11 +776,6 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
static int acm_tty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
- struct acm *acm = tty->driver_data;
-
- if (!ACM_READY(acm))
- return -EINVAL;
-
return -ENOIOCTLCMD;
}
@@ -756,9 +799,6 @@ static void acm_tty_set_termios(struct tty_struct *tty,
struct usb_cdc_line_coding newline;
int newctrl = acm->ctrlout;
- if (!ACM_READY(acm))
- return;
-
newline.dwDTERate = cpu_to_le32(tty_get_baud_rate(tty));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
@@ -788,6 +828,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
}
}
+static const struct tty_port_operations acm_port_ops = {
+ .shutdown = acm_port_shutdown,
+ .activate = acm_port_activate,
+ .destruct = acm_port_destruct,
+};
+
/*
* USB probe and disconnect routines.
*/
@@ -1047,12 +1093,6 @@ skip_normal_probe:
}
made_compressed_probe:
dev_dbg(&intf->dev, "interfaces are valid\n");
- for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
-
- if (minor == ACM_TTY_MINORS) {
- dev_err(&intf->dev, "no more free acm devices\n");
- return -ENODEV;
- }
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
if (acm == NULL) {
@@ -1060,6 +1100,13 @@ made_compressed_probe:
goto alloc_fail;
}
+ minor = acm_alloc_minor(acm);
+ if (minor == ACM_TTY_MINORS) {
+ dev_err(&intf->dev, "no more free acm devices\n");
+ kfree(acm);
+ return -ENODEV;
+ }
+
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1183,6 +1230,8 @@ made_compressed_probe:
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
if (i < 0) {
kfree(acm->country_codes);
+ acm->country_codes = NULL;
+ acm->country_code_size = 0;
goto skip_countries;
}
@@ -1191,6 +1240,8 @@ made_compressed_probe:
if (i < 0) {
device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
kfree(acm->country_codes);
+ acm->country_codes = NULL;
+ acm->country_code_size = 0;
goto skip_countries;
}
}
@@ -1218,8 +1269,6 @@ skip_countries:
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
- acm_table[minor] = acm;
-
return 0;
alloc_fail7:
for (i = 0; i < ACM_NW; i++)
@@ -1234,6 +1283,7 @@ alloc_fail5:
alloc_fail4:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
+ acm_release_minor(acm);
kfree(acm);
alloc_fail:
return -ENOMEM;
@@ -1259,12 +1309,16 @@ static void acm_disconnect(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct tty_struct *tty;
+ int i;
+
+ dev_dbg(&intf->dev, "%s\n", __func__);
/* sibling interface is already cleaning up */
if (!acm)
return;
- mutex_lock(&open_mutex);
+ mutex_lock(&acm->mutex);
+ acm->disconnected = true;
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1272,33 +1326,32 @@ static void acm_disconnect(struct usb_interface *intf)
&dev_attr_iCountryCodeRelDate);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
- acm->dev = NULL;
usb_set_intfdata(acm->control, NULL);
usb_set_intfdata(acm->data, NULL);
+ mutex_unlock(&acm->mutex);
+
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
stop_data_traffic(acm);
+ usb_free_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_free_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
- usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
- acm->ctrl_dma);
+ usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
- if (acm->port.count == 0) {
- acm_tty_unregister(acm);
- mutex_unlock(&open_mutex);
- return;
- }
-
- mutex_unlock(&open_mutex);
- tty = tty_port_tty_get(&acm->port);
- if (tty) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_put(&acm->port);
}
#ifdef CONFIG_PM
@@ -1325,16 +1378,10 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- /*
- we treat opened interfaces differently,
- we must guard against open
- */
- mutex_lock(&acm->mutex);
- if (acm->port.count)
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
stop_data_traffic(acm);
- mutex_unlock(&acm->mutex);
return 0;
}
@@ -1353,8 +1400,7 @@ static int acm_resume(struct usb_interface *intf)
if (cnt)
return 0;
- mutex_lock(&acm->mutex);
- if (acm->port.count) {
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
spin_lock_irq(&acm->write_lock);
@@ -1378,7 +1424,6 @@ static int acm_resume(struct usb_interface *intf)
}
err_out:
- mutex_unlock(&acm->mutex);
return rv;
}
@@ -1387,15 +1432,14 @@ static int acm_reset_resume(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
- mutex_lock(&acm->mutex);
- if (acm->port.count) {
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
tty = tty_port_tty_get(&acm->port);
if (tty) {
tty_hangup(tty);
tty_kref_put(tty);
}
}
- mutex_unlock(&acm->mutex);
+
return acm_resume(intf);
}
@@ -1458,6 +1502,16 @@ static const struct usb_device_id acm_ids[] = {
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
@@ -1594,8 +1648,10 @@ static struct usb_driver acm_driver = {
*/
static const struct tty_operations acm_ops = {
+ .install = acm_tty_install,
.open = acm_tty_open,
.close = acm_tty_close,
+ .cleanup = acm_tty_cleanup,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca7937f26e27..35ef887b7417 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -101,6 +101,7 @@ struct acm {
int transmitting;
spinlock_t write_lock;
struct mutex mutex;
+ bool disconnected;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 81ef2e207a8d..a68c1a63dc65 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1045,7 +1045,7 @@ static const struct file_operations usblp_fops = {
.llseek = noop_llseek,
};
-static char *usblp_devnode(struct device *dev, mode_t *mode)
+static char *usblp_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e3beaf229ee3..8df4b76465ac 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -86,13 +86,14 @@ struct async {
void __user *userbuffer;
void __user *userurb;
struct urb *urb;
+ unsigned int mem_usage;
int status;
u32 secid;
u8 bulk_addr;
u8 bulk_status;
};
-static int usbfs_snoop;
+static bool usbfs_snoop;
module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
@@ -108,8 +109,44 @@ enum snoop_when {
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
-#define MAX_USBFS_BUFFER_SIZE 16384
+/* Limit on the total amount of memory we can allocate for transfers */
+static unsigned usbfs_memory_mb = 16;
+module_param(usbfs_memory_mb, uint, 0644);
+MODULE_PARM_DESC(usbfs_memory_mb,
+ "maximum MB allowed for usbfs buffers (0 = no limit)");
+/* Hard limit, necessary to avoid aithmetic overflow */
+#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
+
+static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
+
+/* Check whether it's okay to allocate more memory for a transfer */
+static int usbfs_increase_memory_usage(unsigned amount)
+{
+ unsigned lim;
+
+ /*
+ * Convert usbfs_memory_mb to bytes, avoiding overflows.
+ * 0 means use the hard limit (effectively unlimited).
+ */
+ lim = ACCESS_ONCE(usbfs_memory_mb);
+ if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
+ lim = USBFS_XFER_MAX;
+ else
+ lim <<= 20;
+
+ atomic_add(amount, &usbfs_memory_usage);
+ if (atomic_read(&usbfs_memory_usage) <= lim)
+ return 0;
+ atomic_sub(amount, &usbfs_memory_usage);
+ return -ENOMEM;
+}
+
+/* Memory for a transfer is being deallocated */
+static void usbfs_decrease_memory_usage(unsigned amount)
+{
+ atomic_sub(amount, &usbfs_memory_usage);
+}
static int connected(struct dev_state *ps)
{
@@ -249,10 +286,12 @@ static struct async *alloc_async(unsigned int numisoframes)
static void free_async(struct async *as)
{
put_pid(as->pid);
- put_cred(as->cred);
+ if (as->cred)
+ put_cred(as->cred);
kfree(as->urb->transfer_buffer);
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
+ usbfs_decrease_memory_usage(as->mem_usage);
kfree(as);
}
@@ -792,9 +831,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
+ ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
+ sizeof(struct usb_ctrlrequest));
+ if (ret)
+ return ret;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ ret = -ENOMEM;
+ goto done;
+ }
tmo = ctrl.timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
@@ -806,8 +851,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
- free_page((unsigned long)tbuf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
@@ -821,15 +866,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
tbuf, max(i, 0));
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
} else {
if (ctrl.wLength) {
if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
@@ -843,14 +888,18 @@ static int proc_control(struct dev_state *ps, void __user *arg)
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
- free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl.bRequestType, ctrl.bRequest,
ctrl.wLength, i);
}
- return i;
+ ret = i;
+ done:
+ free_page((unsigned long) tbuf);
+ usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
+ sizeof(struct usb_ctrlrequest));
+ return ret;
}
static int proc_bulk(struct dev_state *ps, void __user *arg)
@@ -877,15 +926,20 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
return -EINVAL;
len1 = bulk.len;
- if (len1 > MAX_USBFS_BUFFER_SIZE)
+ if (len1 >= USBFS_XFER_MAX)
return -EINVAL;
- if (!(tbuf = kmalloc(len1, GFP_KERNEL)))
- return -ENOMEM;
+ ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
+ if (ret)
+ return ret;
+ if (!(tbuf = kmalloc(len1, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
tmo = bulk.timeout;
if (bulk.ep & 0x80) {
if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) {
- kfree(tbuf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
@@ -896,15 +950,15 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
- kfree(tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
} else {
if (len1) {
if (copy_from_user(tbuf, bulk.data, len1)) {
- kfree(tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
@@ -914,10 +968,11 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
+ ret = (i < 0 ? i : len2);
+ done:
kfree(tbuf);
- if (i < 0)
- return i;
- return len2;
+ usbfs_decrease_memory_usage(len1 + sizeof(struct urb));
+ return ret;
}
static int proc_resetep(struct dev_state *ps, void __user *arg)
@@ -1062,7 +1117,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
- struct async *as;
+ struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int ret, ifnum = -1;
@@ -1095,32 +1150,30 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
if (!ep)
return -ENOENT;
+
+ u = 0;
switch(uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
- /* min 8 byte setup packet,
- * max 8 byte setup plus an arbitrary data stage */
- if (uurb->buffer_length < 8 ||
- uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE))
+ /* min 8 byte setup packet */
+ if (uurb->buffer_length < 8)
return -EINVAL;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (copy_from_user(dr, uurb->buffer, 8)) {
- kfree(dr);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
if (uurb->buffer_length < (le16_to_cpup(&dr->wLength) + 8)) {
- kfree(dr);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
le16_to_cpup(&dr->wIndex));
- if (ret) {
- kfree(dr);
- return ret;
- }
+ if (ret)
+ goto error;
uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
@@ -1138,6 +1191,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
__le16_to_cpup(&dr->wValue),
__le16_to_cpup(&dr->wIndex),
__le16_to_cpup(&dr->wLength));
+ u = sizeof(struct usb_ctrlrequest);
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1151,8 +1205,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
goto interrupt_urb;
}
uurb->number_of_packets = 0;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
@@ -1160,8 +1212,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
interrupt_urb:
uurb->number_of_packets = 0;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1176,50 +1226,53 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
- kfree(isopkt);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
/* arbitrary limit,
* sufficient for USB 2.0 high-bandwidth iso */
if (isopkt[u].length > 8192) {
- kfree(isopkt);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
totlen += isopkt[u].length;
}
- /* 3072 * 64 microframes */
- if (totlen > 196608) {
- kfree(isopkt);
- return -EINVAL;
- }
+ u *= sizeof(struct usb_iso_packet_descriptor);
uurb->buffer_length = totlen;
break;
default:
return -EINVAL;
}
+
+ if (uurb->buffer_length >= USBFS_XFER_MAX) {
+ ret = -EINVAL;
+ goto error;
+ }
if (uurb->buffer_length > 0 &&
!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
uurb->buffer, uurb->buffer_length)) {
- kfree(isopkt);
- kfree(dr);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
as = alloc_async(uurb->number_of_packets);
if (!as) {
- kfree(isopkt);
- kfree(dr);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
+ u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length;
+ ret = usbfs_increase_memory_usage(u);
+ if (ret)
+ goto error;
+ as->mem_usage = u;
+
if (uurb->buffer_length > 0) {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
GFP_KERNEL);
if (!as->urb->transfer_buffer) {
- kfree(isopkt);
- kfree(dr);
- free_async(as);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
/* Isochronous input data may end up being discontiguous
* if some of the packets are short. Clear the buffer so
@@ -1253,6 +1306,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
+ dr = NULL;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = uurb->number_of_packets;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
@@ -1268,6 +1322,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
totlen += isopkt[u].length;
}
kfree(isopkt);
+ isopkt = NULL;
as->ps = ps;
as->userurb = arg;
if (is_in && uurb->buffer_length > 0)
@@ -1282,8 +1337,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (!is_in && uurb->buffer_length > 0) {
if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
uurb->buffer_length)) {
- free_async(as);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
@@ -1329,10 +1384,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
0, ret, COMPLETE, NULL, 0);
async_removepending(as);
- free_async(as);
- return ret;
+ goto error;
}
return 0;
+
+ error:
+ kfree(isopkt);
+ kfree(dr);
+ if (as)
+ free_async(as);
+ return ret;
}
static int proc_submiturb(struct dev_state *ps, void __user *arg)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 45887a0ff873..d40ff9568813 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -45,10 +45,12 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
struct usb_dynid *dynid;
u32 idVendor = 0;
u32 idProduct = 0;
+ unsigned int bInterfaceClass = 0;
int fields = 0;
int retval = 0;
- fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
+ fields = sscanf(buf, "%x %x %x", &idVendor, &idProduct,
+ &bInterfaceClass);
if (fields < 2)
return -EINVAL;
@@ -60,6 +62,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idVendor = idVendor;
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
+ if (fields == 3) {
+ dynid->id.bInterfaceClass = (u8)bInterfaceClass;
+ dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
+ }
spin_lock(&dynids->lock);
list_add_tail(&dynid->node, &dynids->list);
@@ -1073,17 +1079,10 @@ static int usb_suspend_interface(struct usb_device *udev,
goto done;
driver = to_usb_driver(intf->dev.driver);
- if (driver->suspend) {
- status = driver->suspend(intf, msg);
- if (status && !PMSG_IS_AUTO(msg))
- dev_err(&intf->dev, "%s error %d\n",
- "suspend", status);
- } else {
- /* Later we will unbind the driver and reprobe */
- intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "suspend", driver->name);
- }
+ /* at this time we know the driver supports suspend */
+ status = driver->suspend(intf, msg);
+ if (status && !PMSG_IS_AUTO(msg))
+ dev_err(&intf->dev, "suspend error %d\n", status);
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
@@ -1132,16 +1131,9 @@ static int usb_resume_interface(struct usb_device *udev,
"reset_resume", driver->name);
}
} else {
- if (driver->resume) {
- status = driver->resume(intf);
- if (status)
- dev_err(&intf->dev, "%s error %d\n",
- "resume", status);
- } else {
- intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "resume", driver->name);
- }
+ status = driver->resume(intf);
+ if (status)
+ dev_err(&intf->dev, "resume error %d\n", status);
}
done:
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 99458c843d60..d95760de9e8b 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -66,7 +66,7 @@ static struct usb_class {
struct class *class;
} *usb_class;
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
{
struct usb_class_driver *drv;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a004db35f6d0..d136b8f4c8a7 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -453,10 +453,6 @@ static int resume_common(struct device *dev, int event)
pci_set_master(pci_dev);
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
-
if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) {
if (event != PM_EVENT_AUTO_RESUME)
wait_for_companions(pci_dev, hcd);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 13222d352a61..eb19cba34ac9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -658,7 +658,7 @@ error:
len > offsetof(struct usb_device_descriptor,
bDeviceProtocol))
((struct usb_device_descriptor *) ubuf)->
- bDeviceProtocol = 1;
+ bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT;
}
/* any errors get returned through the urb completion */
@@ -1168,20 +1168,6 @@ int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
if (urb->unlinked)
return -EBUSY;
urb->unlinked = status;
-
- /* IRQ setup can easily be broken so that USB controllers
- * never get completion IRQs ... maybe even the ones we need to
- * finish unlinking the initial failed usb_set_address()
- * or device descriptor fetch.
- */
- if (!HCD_SAW_IRQ(hcd) && !is_root_hub(urb->dev)) {
- dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
- "Controller is probably using the wrong IRQ.\n");
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
@@ -1412,11 +1398,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SG;
- if (n != urb->num_sgs) {
- urb->num_sgs = n;
+ urb->num_mapped_sgs = n;
+ if (n != urb->num_sgs)
urb->transfer_flags |=
URB_DMA_SG_COMBINED;
- }
} else if (urb->sg) {
struct scatterlist *sg = urb->sg;
urb->transfer_dma = dma_map_page(
@@ -2148,16 +2133,12 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
*/
local_irq_save(flags);
- if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
+ if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
- } else if (hcd->driver->irq(hcd) == IRQ_NONE) {
+ else if (hcd->driver->irq(hcd) == IRQ_NONE)
rc = IRQ_NONE;
- } else {
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
+ else
rc = IRQ_HANDLED;
- }
local_irq_restore(flags);
return rc;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 79781461eec9..a0613d8f9be7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -84,7 +84,7 @@ struct usb_hub {
static inline int hub_is_superspeed(struct usb_device *hdev)
{
- return (hdev->descriptor.bDeviceProtocol == 3);
+ return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
}
/* Protect struct usb_device->state and ->children members
@@ -102,7 +102,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khubd_wait);
static struct task_struct *khubd_task;
/* cycle leds on hubs that aren't blinking for attention */
-static int blinkenlights = 0;
+static bool blinkenlights = 0;
module_param (blinkenlights, bool, S_IRUGO);
MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
@@ -131,12 +131,12 @@ MODULE_PARM_DESC(initial_descriptor_timeout,
* otherwise the new scheme is used. If that fails and "use_both_schemes"
* is set, then the driver will make another attempt, using the other scheme.
*/
-static int old_scheme_first = 0;
+static bool old_scheme_first = 0;
module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
-static int use_both_schemes = 1;
+static bool use_both_schemes = 1;
module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_both_schemes,
"try the other device initialization scheme if the "
@@ -1041,58 +1041,58 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "standalone hub\n");
switch (wHubCharacteristics & HUB_CHAR_LPSM) {
- case 0x00:
- dev_dbg(hub_dev, "ganged power switching\n");
- break;
- case 0x01:
- dev_dbg(hub_dev, "individual port power switching\n");
- break;
- case 0x02:
- case 0x03:
- dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
- break;
+ case HUB_CHAR_COMMON_LPSM:
+ dev_dbg(hub_dev, "ganged power switching\n");
+ break;
+ case HUB_CHAR_INDV_PORT_LPSM:
+ dev_dbg(hub_dev, "individual port power switching\n");
+ break;
+ case HUB_CHAR_NO_LPSM:
+ case HUB_CHAR_LPSM:
+ dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
+ break;
}
switch (wHubCharacteristics & HUB_CHAR_OCPM) {
- case 0x00:
- dev_dbg(hub_dev, "global over-current protection\n");
- break;
- case 0x08:
- dev_dbg(hub_dev, "individual port over-current protection\n");
- break;
- case 0x10:
- case 0x18:
- dev_dbg(hub_dev, "no over-current protection\n");
- break;
+ case HUB_CHAR_COMMON_OCPM:
+ dev_dbg(hub_dev, "global over-current protection\n");
+ break;
+ case HUB_CHAR_INDV_PORT_OCPM:
+ dev_dbg(hub_dev, "individual port over-current protection\n");
+ break;
+ case HUB_CHAR_NO_OCPM:
+ case HUB_CHAR_OCPM:
+ dev_dbg(hub_dev, "no over-current protection\n");
+ break;
}
spin_lock_init (&hub->tt.lock);
INIT_LIST_HEAD (&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
- case 0:
- break;
- case 1:
- dev_dbg(hub_dev, "Single TT\n");
- hub->tt.hub = hdev;
- break;
- case 2:
- ret = usb_set_interface(hdev, 0, 1);
- if (ret == 0) {
- dev_dbg(hub_dev, "TT per port\n");
- hub->tt.multi = 1;
- } else
- dev_err(hub_dev, "Using single TT (err %d)\n",
- ret);
- hub->tt.hub = hdev;
- break;
- case 3:
- /* USB 3.0 hubs don't have a TT */
- break;
- default:
- dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
- hdev->descriptor.bDeviceProtocol);
- break;
+ case USB_HUB_PR_FS:
+ break;
+ case USB_HUB_PR_HS_SINGLE_TT:
+ dev_dbg(hub_dev, "Single TT\n");
+ hub->tt.hub = hdev;
+ break;
+ case USB_HUB_PR_HS_MULTI_TT:
+ ret = usb_set_interface(hdev, 0, 1);
+ if (ret == 0) {
+ dev_dbg(hub_dev, "TT per port\n");
+ hub->tt.multi = 1;
+ } else
+ dev_err(hub_dev, "Using single TT (err %d)\n",
+ ret);
+ hub->tt.hub = hdev;
+ break;
+ case USB_HUB_PR_SS:
+ /* USB 3.0 hubs don't have a TT */
+ break;
+ default:
+ dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
+ hdev->descriptor.bDeviceProtocol);
+ break;
}
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
@@ -1360,7 +1360,6 @@ descriptor_error:
return -ENODEV;
}
-/* No BKL needed */
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
@@ -2027,7 +2026,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i) ((i) / 2 == old_scheme_first)
+#define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first)
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2278dad886e2..9e186f3da839 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -65,7 +65,7 @@ static umode_t devmode = USBFS_DEFAULT_DEVMODE;
static umode_t busmode = USBFS_DEFAULT_BUSMODE;
static umode_t listmode = USBFS_DEFAULT_LISTMODE;
-static int usbfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int usbfs_show_options(struct seq_file *seq, struct dentry *root)
{
if (devuid != 0)
seq_printf(seq, ",devuid=%u", devuid);
@@ -264,21 +264,19 @@ static int remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
- if (usbfs_mount && usbfs_mount->mnt_sb)
+ if (usbfs_mount)
update_sb(usbfs_mount->mnt_sb);
return 0;
}
-static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t dev)
+static struct inode *usbfs_get_inode (struct super_block *sb, umode_t mode, dev_t dev)
{
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
+ inode_init_owner(inode, NULL, mode);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
default:
@@ -300,7 +298,7 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de
}
/* SMP-safe */
-static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
+static int usbfs_mknod (struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
struct inode *inode = usbfs_get_inode(dir->i_sb, mode, dev);
@@ -317,7 +315,7 @@ static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
return error;
}
-static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, umode_t mode)
{
int res;
@@ -328,7 +326,7 @@ static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
return res;
}
-static int usbfs_create (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_create (struct inode *dir, struct dentry *dentry, umode_t mode)
{
mode = (mode & S_IALLUGO) | S_IFREG;
return usbfs_mknod (dir, dentry, mode, 0);
@@ -489,7 +487,7 @@ static int usbfs_fill_super(struct super_block *sb, void *data, int silent)
*
* This function handles both regular files and directories.
*/
-static int fs_create_by_name (const char *name, mode_t mode,
+static int fs_create_by_name (const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry)
{
int error = 0;
@@ -500,9 +498,8 @@ static int fs_create_by_name (const char *name, mode_t mode,
* have around.
*/
if (!parent ) {
- if (usbfs_mount && usbfs_mount->mnt_sb) {
- parent = usbfs_mount->mnt_sb->s_root;
- }
+ if (usbfs_mount)
+ parent = usbfs_mount->mnt_root;
}
if (!parent) {
@@ -514,7 +511,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry)) {
- if ((mode & S_IFMT) == S_IFDIR)
+ if (S_ISDIR(mode))
error = usbfs_mkdir (parent->d_inode, *dentry, mode);
else
error = usbfs_create (parent->d_inode, *dentry, mode);
@@ -525,7 +522,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
return error;
}
-static struct dentry *fs_create_file (const char *name, mode_t mode,
+static struct dentry *fs_create_file (const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
uid_t uid, gid_t gid)
@@ -608,7 +605,7 @@ static int create_special_files (void)
ignore_mount = 0;
- parent = usbfs_mount->mnt_sb->s_root;
+ parent = usbfs_mount->mnt_root;
devices_usbfs_dentry = fs_create_file ("devices",
listmode | S_IFREG, parent,
NULL, &usbfs_devices_fops,
@@ -662,7 +659,7 @@ static void usbfs_add_bus(struct usb_bus *bus)
sprintf (name, "%03d", bus->busnum);
- parent = usbfs_mount->mnt_sb->s_root;
+ parent = usbfs_mount->mnt_root;
bus->usbfs_dentry = fs_create_file (name, busmode | S_IFDIR, parent,
bus, NULL, busuid, busgid);
if (bus->usbfs_dentry == NULL) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ecf12e15a7ef..4c65eb6a867a 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -117,9 +117,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
- /* Guillemot Webcam Hercules Dualpix Exchange*/
+ /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Guillemot Webcam Hercules Dualpix Exchange*/
+ { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 662c0cf3a3e1..9e491ca2e5c4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -642,7 +642,7 @@ static struct attribute *dev_string_attrs[] = {
NULL
};
-static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
+static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -877,7 +877,7 @@ static struct attribute *intf_assoc_attrs[] = {
NULL,
};
-static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
+static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 73cd90012ec5..8ca9f994a280 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -47,7 +47,7 @@
const char *usbcore_name = "usbcore";
-static int nousb; /* Disable USB when built into kernel image */
+static bool nousb; /* Disable USB when built into kernel image */
#ifdef CONFIG_USB_SUSPEND
static int usb_autosuspend_delay = 2; /* Default delay value,
@@ -326,7 +326,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
#endif /* CONFIG_PM */
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
{
struct usb_device *usb_dev;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 3888778582c4..45e8479c377d 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -132,20 +132,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
for_devices;
}
-/* translate USB error codes to codes user space understands */
-static inline int usb_translate_errors(int error_code)
-{
- switch (error_code) {
- case 0:
- case -ENOMEM:
- case -ENODEV:
- return error_code;
- default:
- return -EIO;
- }
-}
-
-
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 3c1d67d324fd..d8f741f9e56e 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,10 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB || USB_GADGET)
+ depends on (USB && USB_GADGET)
select USB_OTG_UTILS
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SUPERSPEED
+ select USB_XHCI_PLATFORM
help
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 593d1dbc465b..900ae74357f1 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -4,10 +4,8 @@ ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC3) += dwc3.o
dwc3-y := core.o
-
-ifneq ($(CONFIG_USB_GADGET_DWC3),)
- dwc3-y += gadget.o ep0.o
-endif
+dwc3-y += host.o
+dwc3-y += gadget.o ep0.o
ifneq ($(CONFIG_DEBUG_FS),)
dwc3-y += debugfs.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 717ebc9ff941..7c9df630dbe4 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -59,6 +59,60 @@
#include "debug.h"
+static char *maximum_speed = "super";
+module_param(maximum_speed, charp, 0);
+MODULE_PARM_DESC(maximum_speed, "Maximum supported speed.");
+
+/* -------------------------------------------------------------------------- */
+
+#define DWC3_DEVS_POSSIBLE 32
+
+static DECLARE_BITMAP(dwc3_devs, DWC3_DEVS_POSSIBLE);
+
+int dwc3_get_device_id(void)
+{
+ int id;
+
+again:
+ id = find_first_zero_bit(dwc3_devs, DWC3_DEVS_POSSIBLE);
+ if (id < DWC3_DEVS_POSSIBLE) {
+ int old;
+
+ old = test_and_set_bit(id, dwc3_devs);
+ if (old)
+ goto again;
+ } else {
+ pr_err("dwc3: no space for new device\n");
+ id = -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_get_device_id);
+
+void dwc3_put_device_id(int id)
+{
+ int ret;
+
+ if (id < 0)
+ return;
+
+ ret = test_bit(id, dwc3_devs);
+ WARN(!ret, "dwc3: ID %d not in use\n", id);
+ clear_bit(id, dwc3_devs);
+}
+EXPORT_SYMBOL_GPL(dwc3_put_device_id);
+
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
/**
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
@@ -150,7 +204,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int i;
- for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ for (i = 0; i < dwc->num_event_buffers; i++) {
evt = dwc->ev_buffs[i];
if (evt) {
dwc3_free_one_event_buffer(dwc, evt);
@@ -162,17 +216,25 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
/**
* dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
* @dwc: Pointer to out controller context structure
- * @num: number of event buffers to allocate
* @length: size of event buffer
*
* Returns 0 on success otherwise negative errno. In error the case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned num,
- unsigned length)
+static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
+ int num;
int i;
+ num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
+ dwc->num_event_buffers = num;
+
+ dwc->ev_buffs = kzalloc(sizeof(*dwc->ev_buffs) * num, GFP_KERNEL);
+ if (!dwc->ev_buffs) {
+ dev_err(dwc->dev, "can't allocate event buffers array\n");
+ return -ENOMEM;
+ }
+
for (i = 0; i < num; i++) {
struct dwc3_event_buffer *evt;
@@ -198,7 +260,7 @@ static int __devinit dwc3_event_buffers_setup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int n;
- for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
evt->buf, (unsigned long long) evt->dma,
@@ -221,7 +283,7 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int n;
- for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
@@ -264,7 +326,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
ret = -ENODEV;
goto err0;
}
- dwc->revision = reg & DWC3_GSNPSREV_MASK;
+ dwc->revision = reg;
dwc3_core_soft_reset(dwc);
@@ -285,8 +347,32 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
cpu_relax();
} while (true);
- ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_NUM,
- DWC3_EVENT_BUFFERS_SIZE);
+ dwc3_cache_hwparams(dwc);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_SCALEDOWN(3);
+ reg &= ~DWC3_GCTL_DISSCRAMBLE;
+
+ switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ default:
+ dev_dbg(dwc->dev, "No power optimization available\n");
+ }
+
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have a bug
+ * when The device fails to connect at SuperSpeed
+ * and falls back to high-speed mode which causes
+ * the device to enter in a Connect/Disconnect loop
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ reg |= DWC3_GCTL_U2RSTECN;
+
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
@@ -299,8 +385,6 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
goto err1;
}
- dwc3_cache_hwparams(dwc);
-
return 0;
err1:
@@ -320,15 +404,17 @@ static void dwc3_core_exit(struct dwc3 *dwc)
static int __devinit dwc3_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct resource *res;
struct dwc3 *dwc;
- void __iomem *regs;
- unsigned int features = id->driver_data;
+
int ret = -ENOMEM;
int irq;
+
+ void __iomem *regs;
void *mem;
+ u8 mode;
+
mem = kzalloc(sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
if (!mem) {
dev_err(&pdev->dev, "not enough memory\n");
@@ -343,6 +429,8 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
goto err1;
}
+ dwc->res = res;
+
res = request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev));
if (!res) {
@@ -370,6 +458,17 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
dwc->dev = &pdev->dev;
dwc->irq = irq;
+ if (!strncmp("super", maximum_speed, 5))
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+ else if (!strncmp("high", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
+ else if (!strncmp("full", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
+ else if (!strncmp("low", maximum_speed, 3))
+ dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
+ else
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_forbid(&pdev->dev);
@@ -380,13 +479,44 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
goto err3;
}
- if (features & DWC3_HAS_PERIPHERAL) {
+ mode = DWC3_MODE(dwc->hwparams.hwparams0);
+
+ switch (mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialized gadget\n");
+ dev_err(&pdev->dev, "failed to initialize gadget\n");
goto err4;
}
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ goto err4;
+ }
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ goto err4;
+ }
+
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize gadget\n");
+ goto err4;
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported mode of operation %d\n", mode);
+ goto err4;
}
+ dwc->mode = mode;
ret = dwc3_debugfs_init(dwc);
if (ret) {
@@ -399,8 +529,21 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
return 0;
err5:
- if (features & DWC3_HAS_PERIPHERAL)
+ switch (mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
err4:
dwc3_core_exit(dwc);
@@ -420,10 +563,8 @@ err0:
static int __devexit dwc3_remove(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct dwc3 *dwc = platform_get_drvdata(pdev);
struct resource *res;
- unsigned int features = id->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -432,8 +573,21 @@ static int __devexit dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
- if (features & DWC3_HAS_PERIPHERAL)
+ switch (dwc->mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
dwc3_core_exit(dwc);
release_mem_region(res->start, resource_size(res));
@@ -443,30 +597,15 @@ static int __devexit dwc3_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id dwc3_id_table[] __devinitconst = {
- {
- .name = "dwc3-omap",
- .driver_data = (DWC3_HAS_PERIPHERAL
- | DWC3_HAS_XHCI
- | DWC3_HAS_OTG),
- },
- {
- .name = "dwc3-pci",
- .driver_data = DWC3_HAS_PERIPHERAL,
- },
- { }, /* Terminating Entry */
-};
-MODULE_DEVICE_TABLE(platform, dwc3_id_table);
-
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = __devexit_p(dwc3_remove),
.driver = {
.name = "dwc3",
},
- .id_table = dwc3_id_table,
};
+MODULE_ALIAS("platform:dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29a8e1679e12..9e57f8e9bf17 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -41,6 +41,7 @@
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
@@ -52,7 +53,6 @@
/* Global constants */
#define DWC3_ENDPOINTS_NUM 32
-#define DWC3_EVENT_BUFFERS_NUM 2
#define DWC3_EVENT_BUFFERS_SIZE PAGE_SIZE
#define DWC3_EVENT_TYPE_MASK 0xfe
@@ -153,6 +153,7 @@
#define DWC3_GCTL_CLK_PIPEHALF (2)
#define DWC3_GCTL_CLK_MASK (3)
+#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12)
#define DWC3_GCTL_PRTCAPDIR(n) (n << 12)
#define DWC3_GCTL_PRTCAP_HOST 1
#define DWC3_GCTL_PRTCAP_DEVICE 2
@@ -347,6 +348,7 @@ struct dwc3_ep {
u32 free_slot;
u32 busy_slot;
const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
unsigned flags;
@@ -536,6 +538,31 @@ struct dwc3_hwparams {
u32 hwparams8;
};
+/* HWPARAMS0 */
+#define DWC3_MODE(n) ((n) & 0x7)
+
+#define DWC3_MODE_DEVICE 0
+#define DWC3_MODE_HOST 1
+#define DWC3_MODE_DRD 2
+#define DWC3_MODE_HUB 3
+
+/* HWPARAMS1 */
+#define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15)
+
+struct dwc3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct dwc3_ep *dep;
+
+ u8 epnum;
+ struct dwc3_trb_hw *trb;
+ dma_addr_t trb_dma;
+
+ unsigned direction:1;
+ unsigned mapped:1;
+ unsigned queued:1;
+};
+
/**
* struct dwc3 - representation of our controller
* @ctrl_req: usb control request which is used for ep0
@@ -549,19 +576,24 @@ struct dwc3_hwparams {
* @ep0_bounce_addr: dma address of ep0_bounce
* @lock: for synchronizing
* @dev: pointer to our struct device
+ * @xhci: pointer to our xHCI child
* @event_buffer_list: a list of event buffers
* @gadget: device side representation of the peripheral controller
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
* @irq: IRQ number
+ * @num_event_buffers: calculated number of event buffers
+ * @u1u2: only used on revisions <1.83a for workaround
+ * @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
+ * @mode: mode of operation
* @is_selfpowered: true when we are selfpowered
* @three_stage_setup: set if we perform a three phase setup
- * @ep0_status_pending: ep0 status response without a req is pending
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @start_config_issued: true when StartConfig command has been issued
+ * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @ep0_next_event: hold the next expected event
* @ep0state: state of endpoint zero
* @link_state: link state
@@ -579,12 +611,15 @@ struct dwc3 {
dma_addr_t ep0_trb_addr;
dma_addr_t setup_buf_addr;
dma_addr_t ep0_bounce_addr;
- struct usb_request ep0_usb_req;
+ struct dwc3_request ep0_usb_req;
/* device lock */
spinlock_t lock;
struct device *dev;
- struct dwc3_event_buffer *ev_buffs[DWC3_EVENT_BUFFERS_NUM];
+ struct platform_device *xhci;
+ struct resource *res;
+
+ struct dwc3_event_buffer **ev_buffs;
struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
struct usb_gadget gadget;
@@ -595,7 +630,11 @@ struct dwc3 {
int irq;
+ u32 num_event_buffers;
+ u32 u1u2;
+ u32 maximum_speed;
u32 revision;
+ u32 mode;
#define DWC3_REVISION_173A 0x5533173a
#define DWC3_REVISION_175A 0x5533175a
@@ -607,10 +646,11 @@ struct dwc3 {
unsigned is_selfpowered:1;
unsigned three_stage_setup:1;
- unsigned ep0_status_pending:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned start_config_issued:1;
+ unsigned setup_packet_pending:1;
+ unsigned delayed_status:1;
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -765,4 +805,16 @@ union dwc3_event {
#define DWC3_HAS_XHCI BIT(1)
#define DWC3_HAS_OTG BIT(3)
+/* prototypes */
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
+
+int dwc3_host_init(struct dwc3 *dwc);
+void dwc3_host_exit(struct dwc3 *dwc);
+
+int dwc3_gadget_init(struct dwc3 *dwc);
+void dwc3_gadget_exit(struct dwc3 *dwc);
+
+extern int dwc3_get_device_id(void);
+extern void dwc3_put_device_id(int id);
+
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index fcfa91517ea1..433c97c15fc5 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -44,12 +44,12 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
+#include "debug.h"
#define dump_register(nm) \
{ \
@@ -395,6 +395,75 @@ static const struct file_operations dwc3_regdump_fops = {
.release = single_release,
};
+static int dwc3_mode_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (DWC3_GCTL_PRTCAP(reg)) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ seq_printf(s, "host\n");
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ seq_printf(s, "device\n");
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ seq_printf(s, "OTG\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
+ }
+
+ return 0;
+}
+
+static int dwc3_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_mode_show, inode->i_private);
+}
+
+static ssize_t dwc3_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 mode = 0;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "host", 4))
+ mode |= DWC3_GCTL_PRTCAP_HOST;
+
+ if (!strncmp(buf, "device", 6))
+ mode |= DWC3_GCTL_PRTCAP_DEVICE;
+
+ if (!strncmp(buf, "otg", 3))
+ mode |= DWC3_GCTL_PRTCAP_OTG;
+
+ if (mode) {
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_set_mode(dwc, mode);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ return count;
+}
+
+static const struct file_operations dwc3_mode_fops = {
+ .open = dwc3_mode_open,
+ .write = dwc3_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -402,7 +471,7 @@ int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
int ret;
root = debugfs_create_dir(dev_name(dwc->dev), NULL);
- if (IS_ERR(root)){
+ if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto err0;
}
@@ -415,6 +484,14 @@ int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
ret = PTR_ERR(file);
goto err1;
}
+
+ file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_mode_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 062552b5fc8a..3274ac8f1200 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include "core.h"
#include "io.h"
/*
@@ -200,6 +201,7 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
struct dwc3_omap *omap;
struct resource *res;
+ int devid;
int ret = -ENOMEM;
int irq;
@@ -236,16 +238,20 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
goto err1;
}
- dwc3 = platform_device_alloc("dwc3-omap", -1);
+ devid = dwc3_get_device_id();
+ if (devid < 0)
+ goto err2;
+
+ dwc3 = platform_device_alloc("dwc3", devid);
if (!dwc3) {
dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
- goto err2;
+ goto err3;
}
context = kzalloc(resource_size(res), GFP_KERNEL);
if (!context) {
dev_err(&pdev->dev, "couldn't allocate dwc3 context memory\n");
- goto err3;
+ goto err4;
}
spin_lock_init(&omap->lock);
@@ -299,7 +305,7 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
- goto err4;
+ goto err5;
}
/* enable all IRQs */
@@ -322,26 +328,29 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
- goto err5;
+ goto err6;
}
ret = platform_device_add(dwc3);
if (ret) {
dev_err(&pdev->dev, "failed to register dwc3 device\n");
- goto err5;
+ goto err6;
}
return 0;
-err5:
+err6:
free_irq(omap->irq, omap);
-err4:
+err5:
kfree(omap->context);
-err3:
+err4:
platform_device_put(dwc3);
+err3:
+ dwc3_put_device_id(devid);
+
err2:
iounmap(base);
@@ -358,6 +367,7 @@ static int __devexit dwc3_omap_remove(struct platform_device *pdev)
platform_device_unregister(omap->dwc3);
+ dwc3_put_device_id(omap->dwc3->id);
free_irq(omap->irq, omap);
iounmap(omap->base);
@@ -384,18 +394,9 @@ static struct platform_driver dwc3_omap_driver = {
},
};
+module_platform_driver(dwc3_omap_driver);
+
+MODULE_ALIAS("platform:omap-dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
-
-static int __devinit dwc3_omap_init(void)
-{
- return platform_driver_register(&dwc3_omap_driver);
-}
-module_init(dwc3_omap_init);
-
-static void __exit dwc3_omap_exit(void)
-{
- platform_driver_unregister(&dwc3_omap_driver);
-}
-module_exit(dwc3_omap_exit);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index f77c00042685..64e1f7c67b08 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -42,52 +42,17 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "core.h"
+
/* FIXME define these in <linux/pci_ids.h> */
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-#define DWC3_PCI_DEVS_POSSIBLE 32
-
struct dwc3_pci {
struct device *dev;
struct platform_device *dwc3;
};
-static DECLARE_BITMAP(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
-
-static int dwc3_pci_get_device_id(struct dwc3_pci *glue)
-{
- int id;
-
-again:
- id = find_first_zero_bit(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
- if (id < DWC3_PCI_DEVS_POSSIBLE) {
- int old;
-
- old = test_and_set_bit(id, dwc3_pci_devs);
- if (old)
- goto again;
- } else {
- dev_err(glue->dev, "no space for new device\n");
- id = -ENOMEM;
- }
-
- return 0;
-}
-
-static void dwc3_pci_put_device_id(struct dwc3_pci *glue, int id)
-{
- int ret;
-
- if (id < 0)
- return;
-
- ret = test_bit(id, dwc3_pci_devs);
- WARN(!ret, "Device: %s\nID %d not in use\n",
- dev_driver_string(glue->dev), id);
- clear_bit(id, dwc3_pci_devs);
-}
-
static int __devinit dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -114,11 +79,11 @@ static int __devinit dwc3_pci_probe(struct pci_dev *pci,
pci_set_power_state(pci, PCI_D0);
pci_set_master(pci);
- devid = dwc3_pci_get_device_id(glue);
+ devid = dwc3_get_device_id();
if (devid < 0)
goto err2;
- dwc3 = platform_device_alloc("dwc3-pci", devid);
+ dwc3 = platform_device_alloc("dwc3", devid);
if (!dwc3) {
dev_err(&pci->dev, "couldn't allocate dwc3 device\n");
goto err3;
@@ -163,13 +128,13 @@ err4:
platform_device_put(dwc3);
err3:
- dwc3_pci_put_device_id(glue, devid);
+ dwc3_put_device_id(devid);
err2:
pci_disable_device(pci);
err1:
- kfree(pci);
+ kfree(glue);
err0:
return ret;
@@ -179,7 +144,7 @@ static void __devexit dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *glue = pci_get_drvdata(pci);
- dwc3_pci_put_device_id(glue, glue->dwc3->id);
+ dwc3_put_device_id(glue->dwc3->id);
platform_device_unregister(glue->dwc3);
pci_set_drvdata(pci, NULL);
pci_disable_device(pci);
@@ -196,7 +161,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
static struct pci_driver dwc3_pci_driver = {
- .name = "pci-dwc3",
+ .name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = __devexit_p(dwc3_pci_remove),
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 69a4e43ddf59..2f51de57593a 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -48,13 +48,13 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
-static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
- const struct dwc3_event_depevt *event);
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
{
@@ -125,6 +125,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
struct dwc3_request *req)
{
+ struct dwc3 *dwc = dep->dwc;
+ u32 type;
int ret = 0;
req->request.actual = 0;
@@ -143,9 +145,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
* IRQ we were waiting for is long gone.
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
- struct dwc3 *dwc = dep->dwc;
unsigned direction;
- u32 type;
direction = !!(dep->flags & DWC3_EP0_DIR_IN);
@@ -165,6 +165,13 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
req->request.dma, req->request.length, type);
dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
DWC3_EP0_DIR_IN);
+ } else if (dwc->delayed_status) {
+ dwc->delayed_status = false;
+
+ if (dwc->ep0state == EP0_STATUS_PHASE)
+ dwc3_ep0_do_control_status(dwc, 1);
+ else
+ dev_dbg(dwc->dev, "too early for delayed status\n");
}
return ret;
@@ -190,9 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
}
/* we share one TRB for ep0/1 */
- if (!list_empty(&dwc->eps[0]->request_list) ||
- !list_empty(&dwc->eps[1]->request_list) ||
- dwc->ep0_status_pending) {
+ if (!list_empty(&dep->request_list)) {
ret = -EBUSY;
goto out;
}
@@ -214,8 +219,9 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
struct dwc3_ep *dep = dwc->eps[0];
/* stall is always issued on EP0 */
- __dwc3_gadget_ep_set_halt(dwc->eps[0], 1);
- dwc->eps[0]->flags = DWC3_EP_ENABLED;
+ __dwc3_gadget_ep_set_halt(dep, 1);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
if (!list_empty(&dep->request_list)) {
struct dwc3_request *req;
@@ -254,17 +260,14 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
return NULL;
}
-static void dwc3_ep0_send_status_response(struct dwc3 *dwc)
+static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
{
- dwc3_ep0_start_trans(dwc, 1, dwc->setup_buf_addr,
- dwc->ep0_usb_req.length,
- DWC3_TRBCTL_CONTROL_DATA);
}
-
/*
* ch 9.4.5
*/
-static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+static int dwc3_ep0_handle_status(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
u32 recip;
@@ -291,7 +294,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl
case USB_RECIP_ENDPOINT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
- return -EINVAL;
+ return -EINVAL;
if (dep->flags & DWC3_EP_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
@@ -302,10 +305,14 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl
response_pkt = (__le16 *) dwc->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
- dwc->ep0_usb_req.length = sizeof(*response_pkt);
- dwc->ep0_status_pending = 1;
- return 0;
+ dep = dwc->eps[0];
+ dwc->ep0_usb_req.dep = dep;
+ dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
+ dwc->ep0_usb_req.request.dma = dwc->setup_buf_addr;
+ dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
+
+ return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
@@ -396,8 +403,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_RECIP_ENDPOINT:
switch (wValue) {
case USB_ENDPOINT_HALT:
-
- dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ dep = dwc3_wIndex_to_dep(dwc, wIndex);
if (!dep)
return -EINVAL;
ret = __dwc3_gadget_ep_set_halt(dep, set);
@@ -422,8 +428,15 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
u32 reg;
addr = le16_to_cpu(ctrl->wValue);
- if (addr > 127)
+ if (addr > 127) {
+ dev_dbg(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
+ }
+
+ if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
+ dev_dbg(dwc->dev, "trying to set address when configured\n");
+ return -EINVAL;
+ }
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
@@ -473,8 +486,10 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
if (!cfg)
dwc->dev_state = DWC3_ADDRESS_STATE;
break;
+ default:
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -537,6 +552,9 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
else
ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ if (ret == USB_GADGET_DELAYED_STATUS)
+ dwc->delayed_status = true;
+
if (ret >= 0)
return;
@@ -550,27 +568,21 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct dwc3_request *r = NULL;
struct usb_request *ur;
struct dwc3_trb trb;
- struct dwc3_ep *dep;
+ struct dwc3_ep *ep0;
u32 transferred;
u8 epnum;
epnum = event->endpoint_number;
- dep = dwc->eps[epnum];
+ ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
- if (!dwc->ep0_status_pending) {
- r = next_request(&dwc->eps[0]->request_list);
- ur = &r->request;
- } else {
- ur = &dwc->ep0_usb_req;
- dwc->ep0_status_pending = 0;
- }
+ r = next_request(&ep0->request_list);
+ ur = &r->request;
dwc3_trb_to_nat(dwc->ep0_trb, &trb);
if (dwc->ep0_bounced) {
- struct dwc3_ep *ep0 = dwc->eps[0];
transferred = min_t(u32, ur->length,
ep0->endpoint.maxpacket - trb.length);
@@ -591,7 +603,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
* seems to be case when req.length > maxpacket. Could it be?
*/
if (r)
- dwc3_gadget_giveback(dep, r, 0);
+ dwc3_gadget_giveback(ep0, r, 0);
}
}
@@ -619,6 +631,7 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
dep->flags &= ~DWC3_EP_BUSY;
+ dwc->setup_packet_pending = false;
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
@@ -643,7 +656,6 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
@@ -655,12 +667,6 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
int ret;
dep = dwc->eps[0];
- dwc->ep0state = EP0_DATA_PHASE;
-
- if (dwc->ep0_status_pending) {
- dwc3_ep0_send_status_response(dwc);
- return;
- }
if (list_empty(&dep->request_list)) {
dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
@@ -674,7 +680,6 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
req = next_request(&dep->request_list);
req->direction = !!event->endpoint_number;
- dwc->ep0state = EP0_DATA_PHASE;
if (req->request.length == 0) {
ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
dwc->ctrl_req_addr, 0,
@@ -706,35 +711,79 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
WARN_ON(ret < 0);
}
-static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
- const struct dwc3_event_depevt *event)
+static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
{
+ struct dwc3 *dwc = dep->dwc;
u32 type;
- int ret;
-
- dwc->ep0state = EP0_STATUS_PHASE;
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ return dwc3_ep0_start_trans(dwc, dep->number,
dwc->ctrl_req_addr, 0, type);
+}
- WARN_ON(ret < 0);
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
+{
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ WARN_ON(dwc3_ep0_start_control_status(dep));
}
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
+ dwc->setup_packet_pending = true;
+
+ /*
+ * This part is very tricky: If we has just handled
+ * XferNotReady(Setup) and we're now expecting a
+ * XferComplete but, instead, we receive another
+ * XferNotReady(Setup), we should STALL and restart
+ * the state machine.
+ *
+ * In all other cases, we just continue waiting
+ * for the XferComplete event.
+ *
+ * We are a little bit unsafe here because we're
+ * not trying to ensure that last event was, indeed,
+ * XferNotReady(Setup).
+ *
+ * Still, we don't expect any condition where that
+ * should happen and, even if it does, it would be
+ * another error condition.
+ */
+ if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
+ switch (event->status) {
+ case DEPEVT_STATUS_CONTROL_SETUP:
+ dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
+ dwc3_ep0_stall_and_restart(dwc);
+ break;
+ case DEPEVT_STATUS_CONTROL_DATA:
+ /* FALLTHROUGH */
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ /* FALLTHROUGH */
+ default:
+ dev_vdbg(dwc->dev, "waiting for XferComplete\n");
+ }
+
+ return;
+ }
+
switch (event->status) {
case DEPEVT_STATUS_CONTROL_SETUP:
dev_vdbg(dwc->dev, "Control Setup\n");
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+
dwc3_ep0_do_control_setup(dwc, event);
break;
case DEPEVT_STATUS_CONTROL_DATA:
dev_vdbg(dwc->dev, "Control Data\n");
+ dwc->ep0state = EP0_DATA_PHASE;
+
if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
@@ -764,6 +813,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
case DEPEVT_STATUS_CONTROL_STATUS:
dev_vdbg(dwc->dev, "Control Status\n");
+ dwc->ep0state = EP0_STATUS_PHASE;
+
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
@@ -772,12 +823,19 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
dwc3_ep0_stall_and_restart(dwc);
return;
}
- dwc3_ep0_do_control_status(dwc, event);
+
+ if (dwc->delayed_status) {
+ WARN_ON_ONCE(event->endpoint_number != 1);
+ dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
+ return;
+ }
+
+ dwc3_ep0_do_control_status(dwc, event->endpoint_number);
}
}
void dwc3_ep0_interrupt(struct dwc3 *dwc,
- const const struct dwc3_event_depevt *event)
+ const struct dwc3_event_depevt *event)
{
u8 epnum = event->endpoint_number;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 25dbd8614e72..a696bde53222 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -65,6 +65,22 @@ void dwc3_map_buffer_to_dma(struct dwc3_request *req)
return;
}
+ if (req->request.num_sgs) {
+ int mapped;
+
+ mapped = dma_map_sg(dwc->dev, req->request.sg,
+ req->request.num_sgs,
+ req->direction ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ if (mapped < 0) {
+ dev_err(dwc->dev, "failed to map SGs\n");
+ return;
+ }
+
+ req->request.num_mapped_sgs = mapped;
+ return;
+ }
+
if (req->request.dma == DMA_ADDR_INVALID) {
req->request.dma = dma_map_single(dwc->dev, req->request.buf,
req->request.length, req->direction
@@ -82,6 +98,17 @@ void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
return;
}
+ if (req->request.num_mapped_sgs) {
+ req->request.dma = DMA_ADDR_INVALID;
+ dma_unmap_sg(dwc->dev, req->request.sg,
+ req->request.num_sgs,
+ req->direction ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ req->request.num_mapped_sgs = 0;
+ return;
+ }
+
if (req->mapped) {
dma_unmap_single(dwc->dev, req->request.dma,
req->request.length, req->direction
@@ -97,7 +124,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
struct dwc3 *dwc = dep->dwc;
if (req->queued) {
- dep->busy_slot++;
+ if (req->request.num_mapped_sgs)
+ dep->busy_slot += req->request.num_mapped_sgs;
+ else
+ dep->busy_slot++;
+
/*
* Skip LINK TRB. We can't use req->trb and check for
* DWC3_TRBCTL_LINK_TRB because it points the TRB we just
@@ -108,6 +139,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
dep->busy_slot++;
}
list_del(&req->list);
+ req->trb = NULL;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -251,7 +283,8 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc)
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -264,7 +297,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_XFER_NOT_READY_EN;
- if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
@@ -317,7 +350,8 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
* Caller should take care of locking
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc)
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -329,7 +363,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
if (ret)
return ret;
@@ -343,6 +377,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
dep->desc = desc;
+ dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
@@ -405,6 +440,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->desc = NULL;
+ dep->comp_desc = NULL;
dep->type = 0;
dep->flags = 0;
@@ -473,7 +509,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc);
+ ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -539,6 +575,85 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
kfree(req);
}
+/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, dma_addr_t dma,
+ unsigned length, unsigned last, unsigned chain)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb_hw *trb_hw;
+ struct dwc3_trb trb;
+
+ unsigned int cur_slot;
+
+ dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
+ dep->name, req, (unsigned long long) dma,
+ length, last ? " last" : "",
+ chain ? " chain" : "");
+
+ trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ cur_slot = dep->free_slot;
+ dep->free_slot++;
+
+ /* Skip the LINK-TRB on ISOC */
+ if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->desc))
+ return;
+
+ memset(&trb, 0, sizeof(trb));
+ if (!req->trb) {
+ dwc3_gadget_move_request_queued(req);
+ req->trb = trb_hw;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
+ }
+
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ trb.isp_imi = true;
+ trb.csp = true;
+ } else {
+ trb.chn = chain;
+ trb.lst = last;
+ }
+
+ if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
+ trb.sid_sofn = req->request.stream_id;
+
+ switch (usb_endpoint_type(dep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+
+ /* IOC every DWC3_TRB_NUM / 4 so we can refill */
+ if (!(cur_slot % (DWC3_TRB_NUM / 4)))
+ trb.ioc = last;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ trb.trbctl = DWC3_TRBCTL_NORMAL;
+ break;
+ default:
+ /*
+ * This is only possible with faulty memory because we
+ * checked it already :)
+ */
+ BUG();
+ }
+
+ trb.length = length;
+ trb.bplh = dma;
+ trb.hwo = true;
+
+ dwc3_trb_to_hw(&trb, trb_hw);
+}
+
/*
* dwc3_prepare_trbs - setup TRBs from requests
* @dep: endpoint for which requests are being prepared
@@ -548,18 +663,17 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
* transfers. The functions returns once there are not more TRBs available or
* it run out of requests.
*/
-static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
- bool starting)
+static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
{
- struct dwc3_request *req, *n, *ret = NULL;
- struct dwc3_trb_hw *trb_hw;
- struct dwc3_trb trb;
+ struct dwc3_request *req, *n;
u32 trbs_left;
+ unsigned int last_one = 0;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
/* the first request must not be queued */
trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
+
/*
* if busy & slot are equal than it is either full or empty. If we are
* starting to proceed requests then we are empty. Otherwise we ar
@@ -567,7 +681,7 @@ static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
*/
if (!trbs_left) {
if (!starting)
- return NULL;
+ return;
trbs_left = DWC3_TRB_NUM;
/*
* In case we start from scratch, we queue the ISOC requests
@@ -591,94 +705,62 @@ static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
/* The last TRB is a link TRB, not used for xfer */
if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
- return NULL;
+ return;
list_for_each_entry_safe(req, n, &dep->request_list, list) {
- unsigned int last_one = 0;
- unsigned int cur_slot;
+ unsigned length;
+ dma_addr_t dma;
- trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
- cur_slot = dep->free_slot;
- dep->free_slot++;
+ if (req->request.num_mapped_sgs > 0) {
+ struct usb_request *request = &req->request;
+ struct scatterlist *sg = request->sg;
+ struct scatterlist *s;
+ int i;
- /* Skip the LINK-TRB on ISOC */
- if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->desc))
- continue;
+ for_each_sg(sg, s, request->num_mapped_sgs, i) {
+ unsigned chain = true;
- dwc3_gadget_move_request_queued(req);
- memset(&trb, 0, sizeof(trb));
- trbs_left--;
+ length = sg_dma_len(s);
+ dma = sg_dma_address(s);
- /* Is our TRB pool empty? */
- if (!trbs_left)
- last_one = 1;
- /* Is this the last request? */
- if (list_empty(&dep->request_list))
- last_one = 1;
+ if (i == (request->num_mapped_sgs - 1)
+ || sg_is_last(s)) {
+ last_one = true;
+ chain = false;
+ }
- /*
- * FIXME we shouldn't need to set LST bit always but we are
- * facing some weird problem with the Hardware where it doesn't
- * complete even though it has been previously started.
- *
- * While we're debugging the problem, as a workaround to
- * multiple TRBs handling, use only one TRB at a time.
- */
- last_one = 1;
+ trbs_left--;
+ if (!trbs_left)
+ last_one = true;
- req->trb = trb_hw;
- if (!ret)
- ret = req;
+ if (last_one)
+ chain = false;
- trb.bplh = req->request.dma;
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last_one, chain);
- if (usb_endpoint_xfer_isoc(dep->desc)) {
- trb.isp_imi = true;
- trb.csp = true;
+ if (last_one)
+ break;
+ }
} else {
- trb.lst = last_one;
- }
+ dma = req->request.dma;
+ length = req->request.length;
+ trbs_left--;
- if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
- trb.sid_sofn = req->request.stream_id;
-
- switch (usb_endpoint_type(dep->desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
- break;
+ if (!trbs_left)
+ last_one = 1;
- case USB_ENDPOINT_XFER_ISOC:
- trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /* Is this the last request? */
+ if (list_is_last(&req->list, &dep->request_list))
+ last_one = 1;
- /* IOC every DWC3_TRB_NUM / 4 so we can refill */
- if (!(cur_slot % (DWC3_TRB_NUM / 4)))
- trb.ioc = last_one;
- break;
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last_one, false);
- case USB_ENDPOINT_XFER_BULK:
- case USB_ENDPOINT_XFER_INT:
- trb.trbctl = DWC3_TRBCTL_NORMAL;
- break;
- default:
- /*
- * This is only possible with faulty memory because we
- * checked it already :)
- */
- BUG();
+ if (last_one)
+ break;
}
-
- trb.length = req->request.length;
- trb.hwo = true;
-
- dwc3_trb_to_hw(&trb, trb_hw);
- req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
-
- if (last_one)
- break;
}
-
- return ret;
}
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
@@ -707,11 +789,13 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
/* req points to the first request which will be sent */
req = next_request(&dep->req_queued);
} else {
+ dwc3_prepare_trbs(dep, start_new);
+
/*
* req points to the first request where HWO changed
* from 0 to 1
*/
- req = dwc3_prepare_trbs(dep, start_new);
+ req = next_request(&dep->req_queued);
}
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
@@ -745,8 +829,9 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
dep->flags |= DWC3_EP_BUSY;
dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
dep->number);
- if (!dep->res_trans_idx)
- printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
+
+ WARN_ON_ONCE(!dep->res_trans_idx);
+
return 0;
}
@@ -1155,35 +1240,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget_driver = driver;
dwc->gadget.dev.driver = &driver->driver;
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-
- reg &= ~DWC3_GCTL_SCALEDOWN(3);
- reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
- reg &= ~DWC3_GCTL_DISSCRAMBLE;
- reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
-
- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams0)) {
- case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
- reg &= ~DWC3_GCTL_DSBLCLKGTNG;
- break;
- default:
- dev_dbg(dwc->dev, "No power optimization available\n");
- }
-
- /*
- * WORKAROUND: DWC3 revisions <1.90a have a bug
- * when The device fails to connect at SuperSpeed
- * and falls back to high-speed mode which causes
- * the device to enter in a Connect/Disconnect loop
- */
- if (dwc->revision < DWC3_REVISION_190A)
- reg |= DWC3_GCTL_U2RSTECN;
-
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
- reg |= DWC3_DCFG_SUPERSPEED;
+ reg |= dwc->maximum_speed;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
dwc->start_config_issued = false;
@@ -1192,14 +1251,14 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
@@ -1290,11 +1349,10 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
&dwc->gadget.ep_list);
ret = dwc3_alloc_trb_pool(dep);
- if (ret) {
- dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
+ if (ret)
return ret;
- }
}
+
INIT_LIST_HEAD(&dep->request_list);
INIT_LIST_HEAD(&dep->req_queued);
}
@@ -1334,8 +1392,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
do {
req = next_request(&dep->req_queued);
- if (!req)
- break;
+ if (!req) {
+ WARN_ON_ONCE(1);
+ return 1;
+ }
dwc3_trb_to_nat(req->trb, &trb);
@@ -1400,6 +1460,31 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_BUSY;
dep->res_trans_idx = 0;
}
+
+ /*
+ * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
+ * See dwc3_gadget_linksts_change_interrupt() for 1st half.
+ */
+ if (dwc->revision < DWC3_REVISION_183A) {
+ u32 reg;
+ int i;
+
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (!list_empty(&dep->req_queued))
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= dwc->u1u2;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc->u1u2 = 0;
+ }
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1639,6 +1724,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->setup_packet_pending = false;
}
static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
@@ -1675,6 +1761,40 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dev_vdbg(dwc->dev, "%s\n", __func__);
+ /*
+ * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ * would cause a missing Disconnect Event if there's a
+ * pending Setup Packet in the FIFO.
+ *
+ * There's no suggested workaround on the official Bug
+ * report, which states that "unless the driver/application
+ * is doing any special handling of a disconnect event,
+ * there is no functional issue".
+ *
+ * Unfortunately, it turns out that we _do_ some special
+ * handling of a disconnect event, namely complete all
+ * pending transfers, notify gadget driver of the
+ * disconnection, and so on.
+ *
+ * Our suggested workaround is to follow the Disconnect
+ * Event steps here, instead, based on a setup_packet_pending
+ * flag. Such flag gets set whenever we have a XferNotReady
+ * event on EP0 and gets cleared on XferComplete for the
+ * same endpoint.
+ *
+ * Refers to:
+ *
+ * STAR#9000466709: RTL: Device : Disconnect event not
+ * generated if setup packet pending in FIFO
+ */
+ if (dwc->revision < DWC3_REVISION_188A) {
+ if (dwc->setup_packet_pending)
+ dwc3_gadget_disconnect_interrupt(dwc);
+ }
+
+ /* after reset -> Default State */
+ dwc->dev_state = DWC3_DEFAULT_STATE;
+
/* Enable PHYs */
dwc3_gadget_usb2_phy_power(dwc, true);
dwc3_gadget_usb3_phy_power(dwc, true);
@@ -1755,6 +1875,22 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
switch (speed) {
case DWC3_DCFG_SUPERSPEED:
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have an issue which
+ * would cause a missing USB3 Reset event.
+ *
+ * In such situations, we should force a USB3 Reset
+ * event by calling our dwc3_gadget_reset_interrupt()
+ * routine.
+ *
+ * Refers to:
+ *
+ * STAR#9000483510: RTL: SS : USB3 reset event may
+ * not be generated always when the link enters poll
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ dwc3_gadget_reset_interrupt(dwc);
+
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
@@ -1781,14 +1917,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -1818,8 +1954,55 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
- /* The fith bit says SuperSpeed yes or no. */
- dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ /*
+ * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
+ * on the link partner, the USB session might do multiple entry/exit
+ * of low power states before a transfer takes place.
+ *
+ * Due to this problem, we might experience lower throughput. The
+ * suggested workaround is to disable DCTL[12:9] bits if we're
+ * transitioning from U1/U2 to U0 and enable those bits again
+ * after a transfer completes and there are no pending transfers
+ * on any of the enabled endpoints.
+ *
+ * This is the first half of that workaround.
+ *
+ * Refers to:
+ *
+ * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
+ * core send LGO_Ux entering U0
+ */
+ if (dwc->revision < DWC3_REVISION_183A) {
+ if (next == DWC3_LINK_STATE_U0) {
+ u32 u1u2;
+ u32 reg;
+
+ switch (dwc->link_state) {
+ case DWC3_LINK_STATE_U1:
+ case DWC3_LINK_STATE_U2:
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ u1u2 = reg & (DWC3_DCTL_INITU2ENA
+ | DWC3_DCTL_ACCEPTU2ENA
+ | DWC3_DCTL_INITU1ENA
+ | DWC3_DCTL_ACCEPTU1ENA);
+
+ if (!dwc->u1u2)
+ dwc->u1u2 = reg & u1u2;
+
+ reg &= ~u1u2;
+
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ dwc->link_state = next;
dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
}
@@ -1925,7 +2108,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
spin_lock(&dwc->lock);
- for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ for (i = 0; i < dwc->num_event_buffers; i++) {
irqreturn_t status;
status = dwc3_process_event_buf(dwc, i);
@@ -1986,9 +2169,10 @@ int __devinit dwc3_gadget_init(struct dwc3 *dwc)
dev_set_name(&dwc->gadget.dev, "gadget");
dwc->gadget.ops = &dwc3_gadget_ops;
- dwc->gadget.is_dualspeed = true;
+ dwc->gadget.max_speed = USB_SPEED_SUPER;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.dev.parent = dwc->dev;
+ dwc->gadget.sg_supported = true;
dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
@@ -2076,7 +2260,6 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
int irq;
- int i;
usb_del_gadget_udc(&dwc->gadget);
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
@@ -2084,9 +2267,6 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
free_irq(irq, dwc);
- for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
- __dwc3_gadget_ep_disable(dwc->eps[i]);
-
dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 71145a449d99..d97f467d41cc 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -79,19 +79,6 @@ struct dwc3_gadget_ep_cmd_params {
/* -------------------------------------------------------------------------- */
-struct dwc3_request {
- struct usb_request request;
- struct list_head list;
- struct dwc3_ep *dep;
-
- u8 epnum;
- struct dwc3_trb_hw *trb;
- dma_addr_t trb_dma;
-
- unsigned direction:1;
- unsigned mapped:1;
- unsigned queued:1;
-};
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
static inline struct dwc3_request *next_request(struct list_head *list)
@@ -110,23 +97,11 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
list_move_tail(&req->list, &dep->req_queued);
}
-#if defined(CONFIG_USB_GADGET_DWC3) || defined(CONFIG_USB_GADGET_DWC3_MODULE)
-int dwc3_gadget_init(struct dwc3 *dwc);
-void dwc3_gadget_exit(struct dwc3 *dwc);
-#else
-static inline int dwc3_gadget_init(struct dwc3 *dwc) { return 0; }
-static inline void dwc3_gadget_exit(struct dwc3 *dwc) { }
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
-{
- return 0;
-}
-#endif
-
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
-void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event);
+void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
new file mode 100644
index 000000000000..7cfe211b6c37
--- /dev/null
+++ b/drivers/usb/dwc3/host.c
@@ -0,0 +1,102 @@
+/**
+ * host.c - DesignWare USB3 DRD Controller Host Glue
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/platform_device.h>
+
+#include "core.h"
+
+static struct resource generic_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+int dwc3_host_init(struct dwc3 *dwc)
+{
+ struct platform_device *xhci;
+ int ret;
+
+ xhci = platform_device_alloc("xhci", -1);
+ if (!xhci) {
+ dev_err(dwc->dev, "couldn't allocate xHCI device\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
+
+ xhci->dev.parent = dwc->dev;
+ xhci->dev.dma_mask = dwc->dev->dma_mask;
+ xhci->dev.dma_parms = dwc->dev->dma_parms;
+
+ dwc->xhci = xhci;
+
+ /* setup resources */
+ generic_resources[0].start = dwc->irq;
+
+ generic_resources[1].start = dwc->res->start;
+ generic_resources[1].end = dwc->res->start + 0x7fff;
+
+ ret = platform_device_add_resources(xhci, generic_resources,
+ ARRAY_SIZE(generic_resources));
+ if (ret) {
+ dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
+ goto err1;
+ }
+
+ ret = platform_device_add(xhci);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register xHCI device\n");
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ platform_device_put(xhci);
+
+err0:
+ return ret;
+}
+
+void dwc3_host_exit(struct dwc3 *dwc)
+{
+ platform_device_unregister(dwc->xhci);
+}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index bc957db1ea4b..071d561f3e68 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -39,7 +39,7 @@
#ifndef __DRIVERS_USB_DWC3_IO_H
#define __DRIVERS_USB_DWC3_IO_H
-#include <asm/io.h>
+#include <linux/io.h>
static inline u32 dwc3_readl(void __iomem *base, u32 offset)
{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 23a447373c51..7ecb68a67411 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -15,6 +15,7 @@
menuconfig USB_GADGET
tristate "USB Gadget Support"
+ select NLS
help
USB is a master/slave protocol, organized with one master
host (such as a PC) controlling up to 127 peripheral devices.
@@ -124,7 +125,6 @@ config USB_GADGET_STORAGE_NUM_BUFFERS
#
choice
prompt "USB Peripheral Controller"
- depends on USB_GADGET
help
A USB device uses a controller to talk to its host.
Systems should have only one such upstream link.
@@ -234,7 +234,6 @@ config USB_R8A66597
config USB_RENESAS_USBHS_UDC
tristate 'Renesas USBHS controller'
- depends on SUPERH || ARCH_SHMOBILE
depends on USB_RENESAS_USBHS
select USB_GADGET_DUALSPEED
help
@@ -264,7 +263,6 @@ config USB_PXA27X
config USB_S3C_HSOTG
tristate "S3C HS/OtG USB Device controller"
depends on S3C_DEV_USB_HSOTG
- select USB_GADGET_S3C_HSOTG_PIO
select USB_GADGET_DUALSPEED
help
The Samsung S3C64XX USB2.0 high-speed gadget controller
@@ -310,25 +308,13 @@ config USB_S3C_HSUDC
This driver has been tested on S3C2416 and S3C2450 processors.
-config USB_PXA_U2O
- tristate "PXA9xx Processor USB2.0 controller"
- depends on ARCH_MMP
+config USB_MV_UDC
+ tristate "Marvell USB2.0 Device Controller"
select USB_GADGET_DUALSPEED
help
- PXA9xx Processor series include a high speed USB2.0 device
- controller, which support high speed and full speed USB peripheral.
-
-config USB_GADGET_DWC3
- tristate "DesignWare USB3.0 (DRD) Controller"
- depends on USB_DWC3
- select USB_GADGET_DUALSPEED
- select USB_GADGET_SUPERSPEED
- help
- DesignWare USB3.0 controller is a SuperSpeed USB3.0 Controller
- which can be configured for peripheral-only, host-only, hub-only
- and Dual-Role operation. This Controller was first integrated into
- the OMAP5 series of processors. More information about the OMAP5
- version of this controller, refer to http://www.ti.com/omap5.
+ Marvell Socs (including PXA and MMP series) include a high speed
+ USB2.0 OTG controller, which can be configured as high speed or
+ full speed USB peripheral.
#
# Controllers available in both integrated and discrete versions
@@ -544,12 +530,10 @@ endchoice
# Selected by UDC drivers that support high-speed operation.
config USB_GADGET_DUALSPEED
bool
- depends on USB_GADGET
# Selected by UDC drivers that support super-speed opperation
config USB_GADGET_SUPERSPEED
bool
- depends on USB_GADGET
depends on USB_GADGET_DUALSPEED
#
@@ -557,7 +541,6 @@ config USB_GADGET_SUPERSPEED
#
choice
tristate "USB Gadget Drivers"
- depends on USB_GADGET
default USB_ETH
help
A Linux "Gadget Driver" talks to the USB Peripheral Controller
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b54ac6190890..b7f6eefc3927 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
-obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
+obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
mv_udc-y := mv_udc_core.o
obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd4..c16ff55a74e8 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -152,15 +152,15 @@ static const char *ep_string[] = {
};
/* DMA usage flag */
-static int use_dma = 1;
+static bool use_dma = 1;
/* packet per buffer dma */
-static int use_dma_ppb = 1;
+static bool use_dma_ppb = 1;
/* with per descr. update */
-static int use_dma_ppb_du;
+static bool use_dma_ppb_du;
/* buffer fill mode */
static int use_dma_bufferfill_mode;
/* full speed only mode */
-static int use_fullspeed;
+static bool use_fullspeed;
/* tx buffer size for high speed */
static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed != USB_SPEED_HIGH)
+ || driver->max_speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
@@ -3349,7 +3349,7 @@ static int udc_probe(struct udc *dev)
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.release = gadget_release;
dev->gadget.name = name;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
/* init registers, interrupts, ... */
startup_registers(dev);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 8efe0fa9228d..143a7256b598 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1633,7 +1633,7 @@ static int at91_start(struct usb_gadget_driver *driver,
unsigned long flags;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->setup) {
DBG("bad parameter.\n");
@@ -1748,7 +1748,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
/* rm9200 needs manual D+ pullup; off by default */
if (cpu_is_at91rm9200()) {
- if (udc->board.pullup_pin <= 0) {
+ if (gpio_is_valid(udc->board.pullup_pin)) {
DBG("no D+ pullup?\n");
retval = -ENODEV;
goto fail0;
@@ -1815,7 +1815,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
DBG("request irq %d failed\n", udc->udp_irq);
goto fail1;
}
- if (udc->board.vbus_pin > 0) {
+ if (gpio_is_valid(udc->board.vbus_pin)) {
retval = gpio_request(udc->board.vbus_pin, "udc_vbus");
if (retval < 0) {
DBG("request vbus pin failed\n");
@@ -1859,10 +1859,10 @@ static int __init at91udc_probe(struct platform_device *pdev)
INFO("%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
fail4:
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled)
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
free_irq(udc->board.vbus_pin, udc);
fail3:
- if (udc->board.vbus_pin > 0)
+ if (gpio_is_valid(udc->board.vbus_pin))
gpio_free(udc->board.vbus_pin);
fail2:
free_irq(udc->udp_irq, udc);
@@ -1897,7 +1897,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
- if (udc->board.vbus_pin > 0) {
+ if (gpio_is_valid(udc->board.vbus_pin)) {
free_irq(udc->board.vbus_pin, udc);
gpio_free(udc->board.vbus_pin);
}
@@ -1941,7 +1941,7 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(udc->udp_irq);
udc->active_suspend = wake;
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled && wake)
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake)
enable_irq_wake(udc->board.vbus_pin);
return 0;
}
@@ -1951,7 +1951,7 @@ static int at91udc_resume(struct platform_device *pdev)
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled &&
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled &&
udc->active_suspend)
disable_irq_wake(udc->board.vbus_pin);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 271a9d873608..e2fb6d583bd9 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1038,7 +1038,7 @@ static struct usba_udc the_udc = {
.gadget = {
.ops = &usba_udc_ops,
.ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
- .is_dualspeed = 1,
+ .max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
.dev = {
.init_name = "gadget",
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 9a0c3979ff43..27e313718422 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -182,6 +182,16 @@ static inline int hw_ep_bit(int num, int dir)
return num + (dir ? 16 : 0);
}
+static int ep_to_bit(int n)
+{
+ int fill = 16 - hw_ep_max / 2;
+
+ if (n >= hw_ep_max / 2)
+ n += fill;
+
+ return n;
+}
+
/**
* hw_aread: reads from register bitfield
* @addr: address relative to bus map
@@ -440,12 +450,13 @@ static int hw_ep_get_halt(int num, int dir)
/**
* hw_test_and_clear_setup_status: test & clear setup status (execute without
* interruption)
- * @n: bit number (endpoint)
+ * @n: endpoint number
*
* This function returns setup status
*/
static int hw_test_and_clear_setup_status(int n)
{
+ n = ep_to_bit(n);
return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
}
@@ -641,12 +652,13 @@ static int hw_register_write(u16 addr, u32 data)
/**
* hw_test_and_clear_complete: test & clear complete status (execute without
* interruption)
- * @n: bit number (endpoint)
+ * @n: endpoint number
*
* This function returns complete status
*/
static int hw_test_and_clear_complete(int n)
{
+ n = ep_to_bit(n);
return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
}
@@ -754,8 +766,11 @@ static ssize_t show_device(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
+ gadget->max_speed);
+ /* TODO: Scheduled for removal in 3.8. */
n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
- gadget->is_dualspeed);
+ gadget_is_dualspeed(gadget));
n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
gadget->is_otg);
n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
@@ -798,7 +813,7 @@ static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
(driver->function ? driver->function : ""));
n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
- driver->speed);
+ driver->max_speed);
return n;
}
@@ -2563,9 +2578,7 @@ static int ci13xxx_start(struct usb_gadget_driver *driver,
if (driver == NULL ||
bind == NULL ||
driver->setup == NULL ||
- driver->disconnect == NULL ||
- driver->suspend == NULL ||
- driver->resume == NULL)
+ driver->disconnect == NULL)
return -EINVAL;
else if (udc == NULL)
return -ENODEV;
@@ -2693,8 +2706,6 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
driver->unbind == NULL ||
driver->setup == NULL ||
driver->disconnect == NULL ||
- driver->suspend == NULL ||
- driver->resume == NULL ||
driver != udc->driver)
return -EINVAL;
@@ -2793,7 +2804,7 @@ static irqreturn_t udc_irq(void)
isr_statistics.pci++;
udc->gadget.speed = hw_port_is_high_speed() ?
USB_SPEED_HIGH : USB_SPEED_FULL;
- if (udc->suspended) {
+ if (udc->suspended && udc->driver->resume) {
spin_unlock(udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(udc->lock);
@@ -2807,7 +2818,8 @@ static irqreturn_t udc_irq(void)
isr_tr_complete_handler(udc);
}
if (USBi_SLI & intr) {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->driver->suspend) {
udc->suspended = 1;
spin_unlock(udc->lock);
udc->driver->suspend(&udc->gadget);
@@ -2871,7 +2883,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
udc->gadget.ops = &usb_gadget_ops;
udc->gadget.speed = USB_SPEED_UNKNOWN;
- udc->gadget.is_dualspeed = 1;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.is_otg = 0;
udc->gadget.name = driver->name;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 23707775cb43..f4871e1fac59 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -127,7 +127,7 @@ struct ci13xxx {
struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
u32 ep0_dir; /* ep0 direction */
#define ep0out ci13xxx_ep[0]
-#define ep0in ci13xxx_ep[16]
+#define ep0in ci13xxx_ep[hw_ep_max / 2]
u8 remote_wakeup; /* Is remote wakeup feature
enabled by the host? */
u8 suspended; /* suspended by the host */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f71b0787983f..a95de6a4a134 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1535,9 +1535,9 @@ composite_resume(struct usb_gadget *gadget)
static struct usb_gadget_driver composite_driver = {
#ifdef CONFIG_USB_GADGET_SUPERSPEED
- .speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER,
#else
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
#endif
.unbind = composite_unbind,
@@ -1584,8 +1584,8 @@ int usb_composite_probe(struct usb_composite_driver *driver,
driver->iProduct = driver->name;
composite_driver.function = (char *) driver->name;
composite_driver.driver.name = driver->name;
- composite_driver.speed = min((u8)composite_driver.speed,
- (u8)driver->max_speed);
+ composite_driver.max_speed =
+ min_t(u8, composite_driver.max_speed, driver->max_speed);
composite = driver;
composite_gadget_bind = bind;
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index 6256420089f3..19d7bb0df75a 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -404,7 +404,7 @@ fail:
static struct usb_gadget_driver dbgp_driver = {
.function = "dbgp",
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
.unbind = dbgp_unbind,
.setup = dbgp_setup,
.disconnect = dbgp_disconnect,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index ab8f1b488d54..db815c2da7ed 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -823,19 +823,18 @@ static int dummy_pullup (struct usb_gadget *_gadget, int value)
if (value && dum->driver) {
if (mod_data.is_super_speed)
- dum->gadget.speed = dum->driver->speed;
+ dum->gadget.speed = dum->driver->max_speed;
else if (mod_data.is_high_speed)
dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
- dum->driver->speed);
+ dum->driver->max_speed);
else
dum->gadget.speed = USB_SPEED_FULL;
dummy_udc_udpate_ep0(dum);
- if (dum->gadget.speed < dum->driver->speed)
+ if (dum->gadget.speed < dum->driver->max_speed)
dev_dbg(udc_dev(dum), "This device can perform faster"
- " if you connect it to a %s port...\n",
- (dum->driver->speed == USB_SPEED_SUPER ?
- "SuperSpeed" : "HighSpeed"));
+ " if you connect it to a %s port...\n",
+ usb_speed_string(dum->driver->max_speed));
}
dum_hcd = gadget_to_dummy_hcd(_gadget);
@@ -898,7 +897,7 @@ static int dummy_udc_start(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver->speed == USB_SPEED_UNKNOWN)
+ if (driver->max_speed == USB_SPEED_UNKNOWN)
return -EINVAL;
/*
@@ -977,7 +976,7 @@ static int dummy_udc_probe (struct platform_device *pdev)
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.is_dualspeed = 1;
+ dum->gadget.max_speed = USB_SPEED_SUPER;
dev_set_name(&dum->gadget.dev, "gadget");
dum->gadget.dev.parent = &pdev->dev;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 596a0b464e61..753aa0683ac1 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -130,9 +130,6 @@ ep_matches (
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
- /* Update the ep_comp descriptor if needed */
- if (num_req_streams != ep->max_streams)
- ep_comp->bmAttributes = ep->max_streams;
}
}
@@ -152,7 +149,7 @@ ep_matches (
switch (type) {
case USB_ENDPOINT_XFER_INT:
/* INT: limit 64 bytes full speed, 1024 high/super speed */
- if (!gadget->is_dualspeed && max > 64)
+ if (!gadget_is_dualspeed(gadget) && max > 64)
return 0;
/* FALLTHROUGH */
@@ -160,12 +157,12 @@ ep_matches (
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (ep->maxpacket < max)
return 0;
- if (!gadget->is_dualspeed && max > 1023)
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
return 0;
/* BOTH: "high bandwidth" works only at high speed */
if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
return 0;
/* configure your hardware with enough buffering!! */
}
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 0cd764d59351..a28f6ffcd0f3 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -250,9 +250,9 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_ETH_EEM
-static int use_eem = 1;
+static bool use_eem = 1;
#else
-static int use_eem;
+static bool use_eem;
#endif
module_param(use_eem, bool, 0);
MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index acb38004eec0..f63dc6c150d2 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1037,7 +1037,6 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
{
struct ffs_sb_fill_data *data = _data;
struct inode *inode;
- struct dentry *d;
struct ffs_data *ffs;
ENTER();
@@ -1045,7 +1044,7 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
/* Initialise data */
ffs = ffs_data_new();
if (unlikely(!ffs))
- goto enomem0;
+ goto Enomem;
ffs->sb = sb;
ffs->dev_name = data->dev_name;
@@ -1065,26 +1064,21 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&simple_dir_inode_operations,
&data->perms);
if (unlikely(!inode))
- goto enomem1;
- d = d_alloc_root(inode);
- if (unlikely(!d))
- goto enomem2;
- sb->s_root = d;
+ goto Enomem;
+ sb->s_root = d_alloc_root(inode);
+ if (unlikely(!sb->s_root)) {
+ iput(inode);
+ goto Enomem;
+ }
/* EP0 file */
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
&ffs_ep0_operations, NULL)))
- goto enomem3;
+ goto Enomem;
return 0;
-enomem3:
- dput(d);
-enomem2:
- iput(inode);
-enomem1:
- ffs_data_put(ffs);
-enomem0:
+Enomem:
return -ENOMEM;
}
@@ -1196,14 +1190,11 @@ ffs_fs_mount(struct file_system_type *t, int flags,
static void
ffs_fs_kill_sb(struct super_block *sb)
{
- void *ptr;
-
ENTER();
kill_litter_super(sb);
- ptr = xchg(&sb->s_fs_info, NULL);
- if (ptr)
- ffs_data_put(ptr);
+ if (sb->s_fs_info)
+ ffs_data_put(sb->s_fs_info);
}
static struct file_system_type ffs_fs_type = {
@@ -1408,7 +1399,7 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
count = ffs->eps_count;
- epfiles = kzalloc(count * sizeof *epfiles, GFP_KERNEL);
+ epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
return -ENOMEM;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index c39d58860fa0..6353eca1e852 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -1873,17 +1873,14 @@ static int check_command(struct fsg_common *common, int cmnd_size,
common->lun, lun);
/* Check the LUN */
- if (common->lun < common->nluns) {
- curlun = &common->luns[common->lun];
- common->curlun = curlun;
+ curlun = common->curlun;
+ if (curlun) {
if (common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
- common->curlun = NULL;
- curlun = NULL;
common->bad_lun_okay = 0;
/*
@@ -1929,6 +1926,17 @@ static int check_command(struct fsg_common *common, int cmnd_size,
return 0;
}
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_common *common,
+ int cmnd_size, enum data_direction data_dir,
+ unsigned int mask, int needs_medium, const char *name)
+{
+ if (common->curlun)
+ common->data_size_from_cmnd <<= common->curlun->blkbits;
+ return check_command(common, cmnd_size, data_dir,
+ mask, needs_medium, name);
+}
+
static int do_scsi_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
@@ -2011,9 +2019,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
- common->curlun->blkbits;
- reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ common->data_size_from_cmnd = (i == 0) ? 256 : i;
+ reply = check_command_size_in_blocks(common, 6,
+ DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)");
if (reply == 0)
@@ -2022,9 +2030,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) <<
- common->curlun->blkbits;
- reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command_size_in_blocks(common, 10,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)");
if (reply == 0)
@@ -2033,9 +2041,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) <<
- common->curlun->blkbits;
- reply = check_command(common, 12, DATA_DIR_TO_HOST,
+ get_unaligned_be32(&common->cmnd[6]);
+ reply = check_command_size_in_blocks(common, 12,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)");
if (reply == 0)
@@ -2134,9 +2142,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
- common->curlun->blkbits;
- reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+ common->data_size_from_cmnd = (i == 0) ? 256 : i;
+ reply = check_command_size_in_blocks(common, 6,
+ DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)");
if (reply == 0)
@@ -2145,9 +2153,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) <<
- common->curlun->blkbits;
- reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command_size_in_blocks(common, 10,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)");
if (reply == 0)
@@ -2156,9 +2164,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) <<
- common->curlun->blkbits;
- reply = check_command(common, 12, DATA_DIR_FROM_HOST,
+ get_unaligned_be32(&common->cmnd[6]);
+ reply = check_command_size_in_blocks(common, 12,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)");
if (reply == 0)
@@ -2273,6 +2281,10 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
+ if (common->lun >= 0 && common->lun < common->nluns)
+ common->curlun = &common->luns[common->lun];
+ else
+ common->curlun = NULL;
common->tag = cbw->Tag;
return 0;
}
@@ -2763,7 +2775,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs.
*/
- curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
+ curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
if (unlikely(!curlun)) {
rc = -ENOMEM;
goto error_release;
@@ -2975,6 +2987,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 16a509ae517b..7cdcb63b21ff 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{
- struct net_device *dev = fp->dev;
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err))
- netdev_free_page(dev, page);
+ put_page(page);
return err;
}
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- pn_rx_submit(fp, req, GFP_ATOMIC);
+ pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
}
/*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
- pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+ pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
}
spin_unlock(&port->lock);
return 0;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed20..cf33a8d0fd5d 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
- !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 11b5196284ae..47766f0e7caa 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -303,16 +303,16 @@ MODULE_LICENSE("Dual BSD/GPL");
static struct {
char *file[FSG_MAX_LUNS];
char *serial;
- int ro[FSG_MAX_LUNS];
- int nofua[FSG_MAX_LUNS];
+ bool ro[FSG_MAX_LUNS];
+ bool nofua[FSG_MAX_LUNS];
unsigned int num_filenames;
unsigned int num_ros;
unsigned int num_nofuas;
unsigned int nluns;
- int removable;
- int can_stall;
- int cdrom;
+ bool removable;
+ bool can_stall;
+ bool cdrom;
char *transport_parm;
char *protocol_parm;
@@ -2297,19 +2297,17 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
DBG(fsg, "using LUN %d from CBW, "
"not LUN %d from CDB\n",
fsg->lun, lun);
- } else
- fsg->lun = lun; // Use LUN from the command
+ }
/* Check the LUN */
- if (fsg->lun < fsg->nluns) {
- fsg->curlun = curlun = &fsg->luns[fsg->lun];
+ curlun = fsg->curlun;
+ if (curlun) {
if (fsg->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
- fsg->curlun = curlun = NULL;
fsg->bad_lun_okay = 0;
/* INQUIRY and REQUEST SENSE commands are explicitly allowed
@@ -2351,6 +2349,16 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
return 0;
}
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size,
+ enum data_direction data_dir, unsigned int mask,
+ int needs_medium, const char *name)
+{
+ if (fsg->curlun)
+ fsg->data_size_from_cmnd <<= fsg->curlun->blkbits;
+ return check_command(fsg, cmnd_size, data_dir,
+ mask, needs_medium, name);
+}
static int do_scsi_command(struct fsg_dev *fsg)
{
@@ -2425,26 +2433,27 @@ static int do_scsi_command(struct fsg_dev *fsg)
case READ_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+ if ((reply = check_command_size_in_blocks(fsg, 6,
+ DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)")) == 0)
reply = do_read(fsg);
break;
case READ_10:
- fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+ if ((reply = check_command_size_in_blocks(fsg, 10,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)")) == 0)
reply = do_read(fsg);
break;
case READ_12:
- fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+ if ((reply = check_command_size_in_blocks(fsg, 12,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)")) == 0)
reply = do_read(fsg);
@@ -2529,26 +2538,27 @@ static int do_scsi_command(struct fsg_dev *fsg)
case WRITE_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+ if ((reply = check_command_size_in_blocks(fsg, 6,
+ DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)")) == 0)
reply = do_write(fsg);
break;
case WRITE_10:
- fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+ if ((reply = check_command_size_in_blocks(fsg, 10,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)")) == 0)
reply = do_write(fsg);
break;
case WRITE_12:
- fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+ if ((reply = check_command_size_in_blocks(fsg, 12,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)")) == 0)
reply = do_write(fsg);
@@ -2715,7 +2725,17 @@ static int get_next_command(struct fsg_dev *fsg)
memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
fsg->cbbuf_cmnd_size = 0;
spin_unlock_irq(&fsg->lock);
+
+ /* Use LUN from the command */
+ fsg->lun = fsg->cmnd[1] >> 5;
}
+
+ /* Update current lun */
+ if (fsg->lun >= 0 && fsg->lun < fsg->nluns)
+ fsg->curlun = &fsg->luns[fsg->lun];
+ else
+ fsg->curlun = NULL;
+
return rc;
}
@@ -3584,7 +3604,7 @@ static void fsg_resume(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver fsg_driver = {
- .speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER,
.function = (char *) fsg_string_product,
.unbind = fsg_unbind,
.disconnect = fsg_disconnect,
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f36..dcbc0a2e48dd 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
@@ -88,7 +89,6 @@ eenahb:
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
if (cpu_is_mx35()) {
unsigned int v;
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
USBPHYCTRL_OTGBASE_OFFSET));
}
}
-#endif
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c1..b95697c03d07 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2351,7 +2350,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
/* hook up the driver */
udc_controller->driver = driver;
udc_controller->gadget.dev.driver = &driver->driver;
- udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed);
+ udc_controller->gadget.speed = driver->max_speed;
spin_unlock_irqrestore(&udc_controller->lock, flags);
retval = bind(&udc_controller->gadget);
@@ -2815,20 +2814,7 @@ static struct platform_driver udc_driver = {
#endif
};
-static int __init qe_udc_init(void)
-{
- printk(KERN_INFO "%s: %s, %s\n", driver_name, driver_desc,
- DRIVER_VERSION);
- return platform_driver_register(&udc_driver);
-}
-
-static void __exit qe_udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-
-module_init(qe_udc_init);
-module_exit(qe_udc_exit);
+module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b3b3d83b7c33..d7ea6c076ce9 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
- int i = ep_index(ep) * 2 + ep_is_in(ep);
u32 temp, bitmask, tmp_stat;
- struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
- goto out;
+ return;
do {
/* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
- goto out;
+ return;
}
- /* Write dQH next pointer and terminate bit to 0 */
- temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
- /* Clear active and halt bit */
- temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
- | EP_QUEUE_HEAD_STATUS_HALT));
- dQH->size_ioc_int_sts &= temp;
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime endpoint by writing 1 to ENDPTPRIME */
- temp = ep_is_in(ep)
- ? (1 << (ep_index(ep) + 16))
- : (1 << (ep_index(ep)));
- fsl_writel(temp, &dr_regs->endpointprime);
-out:
- return;
+ fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct ep_queue_head *qh;
struct fsl_req *next_req;
- qh = ep->qh;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
- /* Point the QH to the first TD of next request */
- fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
}
-
- /* The request hasn't been processed, patch up the TD chain */
+ /* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
- fsl_writel(fsl_readl(&req->tail->next_td_ptr),
- &prev_req->tail->next_td_ptr);
-
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
goto out;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
status = -EOPNOTSUPP;
goto out;
}
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
- struct ep_queue_head *d_qh;
+ struct ep_queue_head *qh;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+ qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
- size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2530,7 +2525,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
- udc_controller->gadget.is_dualspeed = 1;
+ udc_controller->gadget.max_speed = USB_SPEED_HIGH;
udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda8..f781f5dec417 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 74da206c8406..5831cb4a0b35 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -1317,7 +1317,7 @@ static int fusb300_udc_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->setup)
return -EINVAL;
@@ -1463,7 +1463,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
dev_set_name(&fusb300->gadget.dev, "gadget");
- fusb300->gadget.is_dualspeed = 1;
+ fusb300->gadget.max_speed = USB_SPEED_HIGH;
fusb300->gadget.dev.parent = &pdev->dev;
fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask;
fusb300->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 7f87805cddc4..5af70fcce139 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1357,7 +1357,7 @@ static int goku_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
@@ -1796,6 +1796,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
+ dev->gadget.max_speed = USB_SPEED_FULL;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 2d978c0e7ced..8d1c75abd73d 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1336,7 +1336,7 @@ static int imx_udc_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 6ccae2707e59..ae04266dba1b 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1766,9 +1766,9 @@ gadgetfs_suspend (struct usb_gadget *gadget)
static struct usb_gadget_driver gadgetfs_driver = {
#ifdef CONFIG_USB_GADGET_DUALSPEED
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
#else
- .speed = USB_SPEED_FULL,
+ .max_speed = USB_SPEED_FULL,
#endif
.function = (char *) driver_desc,
.unbind = gadgetfs_unbind,
@@ -1792,7 +1792,7 @@ static int gadgetfs_probe (struct usb_gadget *gadget)
}
static struct usb_gadget_driver probe_driver = {
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
.unbind = gadgetfs_nop,
.setup = (void *)gadgetfs_nop,
.disconnect = gadgetfs_nop,
@@ -2035,7 +2035,6 @@ static int
gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
{
struct inode *inode;
- struct dentry *d;
struct dev_data *dev;
if (the_device)
@@ -2058,24 +2057,27 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
NULL, &simple_dir_operations,
S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode)
- goto enomem0;
+ goto Enomem;
inode->i_op = &simple_dir_inode_operations;
- if (!(d = d_alloc_root (inode)))
- goto enomem1;
- sb->s_root = d;
+ if (!(sb->s_root = d_alloc_root (inode))) {
+ iput(inode);
+ goto Enomem;
+ }
/* the ep0 file is named after the controller we expect;
* user mode code can use it for sanity checks, like we do.
*/
dev = dev_new ();
if (!dev)
- goto enomem2;
+ goto Enomem;
dev->sb = sb;
if (!gadgetfs_create_file (sb, CHIP,
dev, &dev_init_operations,
- &dev->dentry))
- goto enomem3;
+ &dev->dentry)) {
+ put_dev(dev);
+ goto Enomem;
+ }
/* other endpoint files are available after hardware setup,
* from binding to a controller.
@@ -2083,13 +2085,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
the_device = dev;
return 0;
-enomem3:
- put_dev (dev);
-enomem2:
- dput (d);
-enomem1:
- iput (inode);
-enomem0:
+Enomem:
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index c9fa3bf5b377..fa0fcc11263f 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -3267,7 +3267,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- dev->gadget.is_dualspeed = 1; /* support dual speed */
+ dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
#ifdef OTG_TRANSCEIVER
dev->gadget.is_otg = 1; /* support otg mode */
#endif
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a8..3608b3bd5732 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->max_speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
@@ -1653,7 +1653,7 @@ static int __init m66592_probe(struct platform_device *pdev)
m66592->gadget.ops = &m66592_gadget_ops;
device_initialize(&m66592->gadget.dev);
dev_set_name(&m66592->gadget.dev, "gadget");
- m66592->gadget.is_dualspeed = 1;
+ m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.dev.parent = &pdev->dev;
m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
m66592->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/mv_udc.h b/drivers/usb/gadget/mv_udc.h
index daa75c12f336..34aadfae723d 100644
--- a/drivers/usb/gadget/mv_udc.h
+++ b/drivers/usb/gadget/mv_udc.h
@@ -180,7 +180,7 @@ struct mv_udc {
struct mv_cap_regs __iomem *cap_regs;
struct mv_op_regs __iomem *op_regs;
- unsigned int phy_regs;
+ void __iomem *phy_regs;
unsigned int max_eps;
struct mv_dqh *ep_dqh;
size_t ep_dqh_size;
@@ -211,11 +211,14 @@ struct mv_udc {
softconnected:1,
force_fs:1,
clock_gating:1,
- active:1;
+ active:1,
+ stopped:1; /* stop bit is setted */
struct work_struct vbus_work;
struct workqueue_struct *qwork;
+ struct otg_transceiver *transceiver;
+
struct mv_usb_platform_data *pdata;
/* some SOC has mutiple clock sources for USB*/
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index 892412103dd8..f97e737d26f7 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -276,11 +276,12 @@ static void done(struct mv_ep *ep, struct mv_req *req, int status)
static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
{
- u32 tmp, epstatus, bit_pos, direction;
struct mv_udc *udc;
struct mv_dqh *dqh;
+ u32 bit_pos, direction;
+ u32 usbcmd, epstatus;
unsigned int loops;
- int readsafe, retval = 0;
+ int retval = 0;
udc = ep->udc;
direction = ep_dir(ep);
@@ -293,30 +294,18 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
lastreq->tail->dtd_next =
req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- if (readl(&udc->op_regs->epprime) & bit_pos) {
- loops = LOOPS(PRIME_TIMEOUT);
- while (readl(&udc->op_regs->epprime) & bit_pos) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
- udelay(LOOPS_USEC);
- loops--;
- }
- if (readl(&udc->op_regs->epstatus) & bit_pos)
- goto done;
- }
- readsafe = 0;
+
+ wmb();
+
+ if (readl(&udc->op_regs->epprime) & bit_pos)
+ goto done;
+
loops = LOOPS(READSAFE_TIMEOUT);
- while (readsafe == 0) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
+ while (1) {
/* start with setting the semaphores */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
- writel(tmp, &udc->op_regs->usbcmd);
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
+ writel(usbcmd, &udc->op_regs->usbcmd);
/* read the endpoint status */
epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
@@ -329,98 +318,46 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
* primed.
*/
if (readl(&udc->op_regs->usbcmd)
- & USBCMD_ATDTW_TRIPWIRE_SET) {
- readsafe = 1;
- }
+ & USBCMD_ATDTW_TRIPWIRE_SET)
+ break;
+
loops--;
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "Timeout for ATDTW_TRIPWIRE...\n");
+ retval = -ETIME;
+ goto done;
+ }
udelay(LOOPS_USEC);
}
/* Clear the semaphore */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
- writel(tmp, &udc->op_regs->usbcmd);
-
- /* If endpoint is not active, we activate it now. */
- if (!epstatus) {
- if (direction == EP_DIR_IN) {
- struct mv_dtd *curr_dtd = dma_to_virt(
- &udc->dev->dev, dqh->curr_dtd_ptr);
-
- loops = LOOPS(DTD_TIMEOUT);
- while (curr_dtd->size_ioc_sts
- & DTD_STATUS_ACTIVE) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
- }
- /* No other transfers on the queue */
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+ writel(usbcmd, &udc->op_regs->usbcmd);
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dqh->size_ioc_int_sts = 0;
+ if (epstatus)
+ goto done;
+ }
- /*
- * Ensure that updates to the QH will
- * occur before priming.
- */
- wmb();
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
- }
- } else {
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dqh->size_ioc_int_sts = 0;
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
+ /* Prime the Endpoint */
+ writel(bit_pos, &udc->op_regs->epprime);
- if (direction == EP_DIR_IN) {
- /* FIXME add status check after prime the IN ep */
- int prime_again;
- u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
-
- loops = LOOPS(DTD_TIMEOUT);
- prime_again = 0;
- while ((curr_dtd_ptr != req->head->td_dma)) {
- curr_dtd_ptr = dqh->curr_dtd_ptr;
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "failed to prime %s\n",
- ep->name);
- retval = -ETIME;
- goto done;
- }
- loops--;
- udelay(LOOPS_USEC);
-
- if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
- if (prime_again)
- goto done;
- dev_info(&udc->dev->dev,
- "prime again\n");
- writel(bit_pos,
- &udc->op_regs->epprime);
- prime_again = 1;
- }
- }
- }
- }
done:
return retval;
}
+
static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
dma_addr_t *dma, int *is_last)
{
@@ -841,6 +778,27 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return 0;
}
+static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
+{
+ struct mv_dqh *dqh = ep->dqh;
+ u32 bit_pos;
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
+
+ bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+ /* Prime the Endpoint */
+ writel(bit_pos, &ep->udc->op_regs->epprime);
+}
+
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
@@ -883,15 +841,13 @@ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct mv_dqh *qh;
struct mv_req *next_req;
- qh = ep->dqh;
- next_req = list_entry(req->queue.next, struct mv_req,
- queue);
+ next_req = list_entry(req->queue.next,
+ struct mv_req, queue);
/* Point the QH to the first TD of next request */
- writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ mv_prime_ep(ep, next_req);
} else {
struct mv_dqh *qh;
@@ -1056,6 +1012,8 @@ static void udc_stop(struct mv_udc *udc)
USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
writel(tmp, &udc->op_regs->usbintr);
+ udc->stopped = 1;
+
/* Reset the Run the bit in the command register to stop VUSB */
tmp = readl(&udc->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
@@ -1072,6 +1030,8 @@ static void udc_start(struct mv_udc *udc)
/* Enable interrupts */
writel(usbintr, &udc->op_regs->usbintr);
+ udc->stopped = 0;
+
/* Set the Run bit in the command register */
writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
}
@@ -1134,11 +1094,11 @@ static int udc_reset(struct mv_udc *udc)
return 0;
}
-static int mv_udc_enable(struct mv_udc *udc)
+static int mv_udc_enable_internal(struct mv_udc *udc)
{
int retval;
- if (udc->clock_gating == 0 || udc->active)
+ if (udc->active)
return 0;
dev_dbg(&udc->dev->dev, "enable udc\n");
@@ -1157,9 +1117,17 @@ static int mv_udc_enable(struct mv_udc *udc)
return 0;
}
-static void mv_udc_disable(struct mv_udc *udc)
+static int mv_udc_enable(struct mv_udc *udc)
{
- if (udc->clock_gating && udc->active) {
+ if (udc->clock_gating)
+ return mv_udc_enable_internal(udc);
+
+ return 0;
+}
+
+static void mv_udc_disable_internal(struct mv_udc *udc)
+{
+ if (udc->active) {
dev_dbg(&udc->dev->dev, "disable udc\n");
if (udc->pdata->phy_deinit)
udc->pdata->phy_deinit(udc->phy_regs);
@@ -1168,6 +1136,12 @@ static void mv_udc_disable(struct mv_udc *udc)
}
}
+static void mv_udc_disable(struct mv_udc *udc)
+{
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+}
+
static int mv_udc_get_frame(struct usb_gadget *gadget)
{
struct mv_udc *udc;
@@ -1178,7 +1152,7 @@ static int mv_udc_get_frame(struct usb_gadget *gadget)
udc = container_of(gadget, struct mv_udc, gadget);
- retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+ retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
return retval;
}
@@ -1212,10 +1186,11 @@ static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ udc->vbus_active = (is_active != 0);
+
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
- udc->vbus_active = (is_active != 0);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
@@ -1244,10 +1219,11 @@ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ udc->softconnect = (is_on != 0);
+
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
- udc->softconnect = (is_on != 0);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
@@ -1407,6 +1383,20 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
return retval;
}
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "unable to register peripheral to otg\n");
+ if (driver->unbind) {
+ driver->unbind(&udc->gadget);
+ udc->gadget.dev.driver = NULL;
+ udc->driver = NULL;
+ }
+ return retval;
+ }
+ }
+
/* pullup is always on */
mv_udc_pullup(&udc->gadget, 1);
@@ -2026,6 +2016,10 @@ static irqreturn_t mv_udc_irq(int irq, void *dev)
struct mv_udc *udc = (struct mv_udc *)dev;
u32 status, intr;
+ /* Disable ISR when stopped bit is set */
+ if (udc->stopped)
+ return IRQ_NONE;
+
spin_lock(&udc->lock);
status = readl(&udc->op_regs->usbsts);
@@ -2109,7 +2103,12 @@ static int __devexit mv_udc_remove(struct platform_device *dev)
destroy_workqueue(udc->qwork);
}
- if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ /*
+ * If we have transceiver inited,
+ * then vbus irq will not be requested in udc driver.
+ */
+ if (udc->pdata && udc->pdata->vbus
+ && udc->clock_gating && udc->transceiver == NULL)
free_irq(udc->pdata->vbus->irq, &dev->dev);
/* free memory allocated in probe */
@@ -2129,11 +2128,9 @@ static int __devexit mv_udc_remove(struct platform_device *dev)
if (udc->cap_regs)
iounmap(udc->cap_regs);
- udc->cap_regs = NULL;
if (udc->phy_regs)
- iounmap((void *)udc->phy_regs);
- udc->phy_regs = 0;
+ iounmap(udc->phy_regs);
if (udc->status_req) {
kfree(udc->status_req->req.buf);
@@ -2182,6 +2179,11 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
udc->dev = dev;
+#ifdef CONFIG_USB_OTG_UTILS
+ if (pdata->mode == MV_USB_MODE_OTG)
+ udc->transceiver = otg_get_transceiver();
+#endif
+
udc->clknum = pdata->clknum;
for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
@@ -2213,24 +2215,20 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
goto err_iounmap_capreg;
}
- udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
- if (udc->phy_regs == 0) {
+ udc->phy_regs = ioremap(r->start, resource_size(r));
+ if (udc->phy_regs == NULL) {
dev_err(&dev->dev, "failed to map phy I/O memory\n");
retval = -EBUSY;
goto err_iounmap_capreg;
}
/* we will acces controller register, so enable the clk */
- udc_clock_enable(udc);
- if (pdata->phy_init) {
- retval = pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&dev->dev, "phy init error %d\n", retval);
- goto err_iounmap_phyreg;
- }
- }
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
+ goto err_iounmap_phyreg;
- udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
+ udc->op_regs =
+ (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+ (readl(&udc->cap_regs->caplength_hciversion)
& CAPLENGTH_MASK));
udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
@@ -2312,7 +2310,7 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- udc->gadget.is_dualspeed = 1; /* support dual speed */
+ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&udc->gadget.dev, "gadget");
@@ -2328,7 +2326,9 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
eps_init(udc);
/* VBUS detect: we can disable/enable clock on demand.*/
- if (pdata->vbus) {
+ if (udc->transceiver)
+ udc->clock_gating = 1;
+ else if (pdata->vbus) {
udc->clock_gating = 1;
retval = request_threaded_irq(pdata->vbus->irq, NULL,
mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
@@ -2354,11 +2354,9 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
* If not, it means that VBUS detection is not supported, we
* have to enable vbus active all the time to let controller work.
*/
- if (udc->clock_gating) {
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
- } else
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+ else
udc->vbus_active = 1;
retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
@@ -2371,7 +2369,8 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
return 0;
err_unregister:
- if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ if (udc->pdata && udc->pdata->vbus
+ && udc->clock_gating && udc->transceiver == NULL)
free_irq(pdata->vbus->irq, &dev->dev);
device_unregister(&udc->gadget.dev);
err_free_irq:
@@ -2387,11 +2386,9 @@ err_free_dma:
dma_free_coherent(&dev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
err_disable_clock:
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
+ mv_udc_disable_internal(udc);
err_iounmap_phyreg:
- iounmap((void *)udc->phy_regs);
+ iounmap(udc->phy_regs);
err_iounmap_capreg:
iounmap(udc->cap_regs);
err_put_clk:
@@ -2407,7 +2404,30 @@ static int mv_udc_suspend(struct device *_dev)
{
struct mv_udc *udc = the_controller;
- udc_stop(udc);
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (udc->pdata->vbus && udc->pdata->vbus->poll)
+ if (udc->pdata->vbus->poll() == VBUS_HIGH) {
+ dev_info(&udc->dev->dev, "USB cable is connected!\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * only cable is unplugged, udc can suspend.
+ * So do not care about clock_gating == 1.
+ */
+ if (!udc->clock_gating) {
+ udc_stop(udc);
+
+ spin_lock_irq(&udc->lock);
+ /* stop all usb activities */
+ stop_activity(udc, udc->driver);
+ spin_unlock_irq(&udc->lock);
+
+ mv_udc_disable_internal(udc);
+ }
return 0;
}
@@ -2417,20 +2437,22 @@ static int mv_udc_resume(struct device *_dev)
struct mv_udc *udc = the_controller;
int retval;
- if (udc->pdata->phy_init) {
- retval = udc->pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&udc->dev->dev,
- "init phy error %d when resume back\n",
- retval);
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (!udc->clock_gating) {
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
return retval;
+
+ if (udc->driver && udc->softconnect) {
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
}
}
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
-
return 0;
}
@@ -2457,30 +2479,16 @@ static struct platform_driver udc_driver = {
.shutdown = mv_udc_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "pxa-u2o",
+ .name = "mv-udc",
#ifdef CONFIG_PM
.pm = &mv_udc_pm_ops,
#endif
},
};
-MODULE_ALIAS("platform:pxa-u2o");
+module_platform_driver(udc_driver);
+MODULE_ALIAS("platform:mv-udc");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-
-static int __init init(void)
-{
- return platform_driver_register(&udc_driver);
-}
-module_init(init);
-
-
-static void __exit cleanup(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-module_exit(cleanup);
-
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index d1b76368472f..7322d293213e 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -69,7 +69,7 @@ static const char * const ep_name[] = {
*
* If use_dma is disabled, pio will be used instead.
*/
-static int use_dma = 0;
+static bool use_dma = 0;
module_param(use_dma, bool, 0644);
/*
@@ -1459,7 +1459,7 @@ static int net2272_start(struct usb_gadget *_gadget,
unsigned i;
if (!driver || !driver->unbind || !driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->max_speed != USB_SPEED_HIGH)
return -EINVAL;
dev = container_of(_gadget, struct net2272, gadget);
@@ -2235,7 +2235,7 @@ net2272_probe_init(struct device *dev, unsigned int irq)
ret->irq = irq;
ret->dev = dev;
ret->gadget.ops = &net2272_ops;
- ret->gadget.is_dualspeed = 1;
+ ret->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&ret->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cda..cdedd1336745 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -90,8 +90,8 @@ static const char *const ep_name [] = {
* Some gadget drivers work better with the dma support here than others.
* These two parameters let you use PIO or more aggressive DMA.
*/
-static int use_dma = 1;
-static int use_dma_chaining = 0;
+static bool use_dma = 1;
+static bool use_dma_chaining = 0;
/* "modprobe net2280 use_dma=n" etc */
module_param (use_dma, bool, S_IRUGO);
@@ -112,7 +112,7 @@ module_param (fifo_mode, ushort, 0644);
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices
*/
-static int enable_suspend = 0;
+static bool enable_suspend = 0;
/* "modprobe net2280 enable_suspend=1" etc */
module_param (enable_suspend, bool, S_IRUGO);
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed != USB_SPEED_HIGH
+ if (!driver || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
@@ -2698,7 +2698,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init (&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 788989a10223..576cd8578b45 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -98,7 +98,7 @@ module_param (fifo_mode, uint, 0);
MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
#ifdef USE_DMA
-static unsigned use_dma = 1;
+static bool use_dma = 1;
/* "modprobe omap_udc use_dma=y", or else as a kernel
* boot parameter "omap_udc:use_dma=y"
@@ -2110,7 +2110,7 @@ static int omap_udc_start(struct usb_gadget_driver *driver,
return -ENODEV;
if (!driver
// FIXME if otg, check: driver->is_otg
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->setup)
return -EINVAL;
@@ -2676,6 +2676,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->iso);
udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
device_initialize(&udc->gadget.dev);
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 5048a0c07640..a3fcaae4bc2a 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -359,7 +359,7 @@ struct pch_udc_dev {
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
struct pch_udc_dev *pch_udc; /* pointer to device object */
-static int speed_fs;
+static bool speed_fs;
module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
@@ -2693,7 +2693,7 @@ static int pch_udc_start(struct usb_gadget_driver *driver,
struct pch_udc_dev *dev = pch_udc;
int retval;
- if (!driver || (driver->speed == USB_SPEED_UNKNOWN) || !bind ||
+ if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
!driver->setup || !driver->unbind || !driver->disconnect) {
dev_err(&dev->pdev->dev,
"%s: invalid driver parameter\n", __func__);
@@ -2941,7 +2941,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = KBUILD_MODNAME;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
retval = device_register(&dev->gadget.dev);
if (retval)
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 65a8834f274b..d83134b0f78a 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1141,7 +1141,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_DT_DEVICE_QUALIFIER:
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
break;
/*
* assumes ep0 uses the same value for both
@@ -1155,7 +1155,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
case USB_DT_OTHER_SPEED_CONFIG:
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
break;
/* FALLTHROUGH */
#endif /* CONFIG_USB_GADGET_DUALSPEED */
@@ -1535,7 +1535,7 @@ fail:
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver printer_driver = {
- .speed = DEVSPEED,
+ .max_speed = DEVSPEED,
.function = (char *) driver_desc,
.unbind = printer_unbind,
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index c090a7e3ecf8..dd470635f4f7 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -1264,7 +1264,7 @@ static int pxa25x_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 18b6b091f2a6..f4c44eb806c3 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1807,7 +1807,7 @@ static int pxa27x_udc_start(struct usb_gadget_driver *driver,
struct pxa_udc *udc = the_controller;
int retval;
- if (!driver || driver->speed < USB_SPEED_FULL || !bind
+ if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
|| !driver->disconnect || !driver->setup)
return -EINVAL;
if (!udc)
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 24f84b210ce1..f5b8d215e1d5 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
@@ -1911,7 +1911,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.ops = &r8a66597_gadget_ops;
dev_set_name(&r8a66597->gadget.dev, "gadget");
- r8a66597->gadget.is_dualspeed = 1;
+ r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.dev.parent = &pdev->dev;
r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
r8a66597->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc946..69295ba9d99a 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
return -EINVAL;
}
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
+ if (driver->max_speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
if (!bind || !driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
@@ -3364,7 +3362,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
dev_set_name(&hsotg->gadget.dev, "gadget");
- hsotg->gadget.is_dualspeed = 1;
+ hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
@@ -3469,18 +3467,7 @@ static struct platform_driver s3c_hsotg_driver = {
.resume = s3c_hsotg_resume,
};
-static int __init s3c_hsotg_modinit(void)
-{
- return platform_driver_register(&s3c_hsotg_driver);
-}
-
-static void __exit s3c_hsotg_modexit(void)
-{
- platform_driver_unregister(&s3c_hsotg_driver);
-}
-
-module_init(s3c_hsotg_modinit);
-module_exit(s3c_hsotg_modexit);
+module_platform_driver(s3c_hsotg_driver);
MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cefe..df8661d266cb 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -28,9 +28,10 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/prefetch.h>
+#include <linux/platform_data/s3c-hsudc.h>
+#include <linux/regulator/consumer.h>
#include <mach/regs-s3c2443-clock.h>
-#include <plat/udc.h>
#define S3C_HSUDC_REG(x) (x)
@@ -87,6 +88,12 @@
#define DATA_STATE_XMIT (1)
#define DATA_STATE_RECV (2)
+static const char * const s3c_hsudc_supply_names[] = {
+ "vdda", /* analog phy supply, 3.3V */
+ "vddi", /* digital phy supply, 1.2V */
+ "vddosc", /* oscillator supply, 1.8V - 3.3V */
+};
+
/**
* struct s3c_hsudc_ep - Endpoint representation used by driver.
* @ep: USB gadget layer representation of device endpoint.
@@ -139,6 +146,7 @@ struct s3c_hsudc {
struct device *dev;
struct s3c24xx_hsudc_platdata *pd;
struct otg_transceiver *transceiver;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)];
spinlock_t lock;
void __iomem *regs;
struct resource *mem_rsrc;
@@ -153,7 +161,6 @@ struct s3c_hsudc {
#define ep_index(_ep) ((_ep)->bEndpointAddress & \
USB_ENDPOINT_NUMBER_MASK)
-static struct s3c_hsudc *the_controller;
static const char driver_name[] = "s3c-udc";
static const char ep0name[] = "ep0-control";
@@ -282,8 +289,7 @@ static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
* All the endpoints are stopped and any pending transfer requests if any on
* the endpoint are terminated.
*/
-static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
- struct usb_gadget_driver *driver)
+static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc)
{
struct s3c_hsudc_ep *hsep;
int epnum;
@@ -295,10 +301,6 @@ static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
hsep->stopped = 1;
s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
}
-
- spin_unlock(&hsudc->lock);
- driver->disconnect(&hsudc->gadget);
- spin_lock(&hsudc->lock);
}
/**
@@ -1135,17 +1137,15 @@ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
return IRQ_HANDLED;
}
-static int s3c_hsudc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int s3c_hsudc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
int ret;
if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
- || !bind
- || !driver->unbind || !driver->disconnect || !driver->setup)
+ || driver->max_speed < USB_SPEED_FULL
+ || !driver->setup)
return -EINVAL;
if (!hsudc)
@@ -1156,21 +1156,12 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
hsudc->driver = driver;
hsudc->gadget.dev.driver = &driver->driver;
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
- ret = device_add(&hsudc->gadget.dev);
- if (ret) {
- dev_err(hsudc->dev, "failed to probe gadget device");
- return ret;
- }
- ret = bind(&hsudc->gadget);
- if (ret) {
- dev_err(hsudc->dev, "%s: bind failed\n", hsudc->gadget.name);
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret);
+ goto err_supplies;
}
/* connect to bus through transceiver */
@@ -1179,13 +1170,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
if (ret) {
dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
hsudc->gadget.name);
- driver->unbind(&hsudc->gadget);
-
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
+ goto err_otg;
}
}
@@ -1198,34 +1183,43 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
hsudc->pd->gpio_init();
return 0;
+err_otg:
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ return ret;
}
-static int s3c_hsudc_stop(struct usb_gadget_driver *driver)
+static int s3c_hsudc_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
unsigned long flags;
if (!hsudc)
return -ENODEV;
- if (!driver || driver != hsudc->driver || !driver->unbind)
+ if (!driver || driver != hsudc->driver)
return -EINVAL;
spin_lock_irqsave(&hsudc->lock, flags);
- hsudc->driver = 0;
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_uninit_phy();
if (hsudc->pd->gpio_uninit)
hsudc->pd->gpio_uninit();
- s3c_hsudc_stop_activity(hsudc, driver);
+ s3c_hsudc_stop_activity(hsudc);
spin_unlock_irqrestore(&hsudc->lock, flags);
if (hsudc->transceiver)
(void) otg_set_peripheral(hsudc->transceiver, NULL);
- driver->unbind(&hsudc->gadget);
- device_del(&hsudc->gadget.dev);
disable_irq(hsudc->irq);
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+
dev_info(hsudc->dev, "unregistered gadget driver '%s'\n",
driver->driver.name);
return 0;
@@ -1243,7 +1237,7 @@ static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
if (!hsudc)
return -ENODEV;
@@ -1256,18 +1250,18 @@ static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
.get_frame = s3c_hsudc_gadget_getframe,
- .start = s3c_hsudc_start,
- .stop = s3c_hsudc_stop,
+ .udc_start = s3c_hsudc_start,
+ .udc_stop = s3c_hsudc_stop,
.vbus_draw = s3c_hsudc_vbus_draw,
};
-static int s3c_hsudc_probe(struct platform_device *pdev)
+static int __devinit s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data;
- int ret;
+ int ret, i;
hsudc = kzalloc(sizeof(struct s3c_hsudc) +
sizeof(struct s3c_hsudc_ep) * pd->epnum,
@@ -1277,13 +1271,22 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- the_controller = hsudc;
platform_set_drvdata(pdev, dev);
hsudc->dev = dev;
hsudc->pd = pdev->dev.platform_data;
hsudc->transceiver = otg_get_transceiver();
+ for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
+ hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
+
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ goto err_supplies;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "unable to obtain driver resource data\n");
@@ -1308,10 +1311,9 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
spin_lock_init(&hsudc->lock);
- device_initialize(&hsudc->gadget.dev);
dev_set_name(&hsudc->gadget.dev, "gadget");
- hsudc->gadget.is_dualspeed = 1;
+ hsudc->gadget.max_speed = USB_SPEED_HIGH;
hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
hsudc->gadget.name = dev_name(dev);
hsudc->gadget.dev.parent = dev;
@@ -1320,6 +1322,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
hsudc->gadget.is_otg = 0;
hsudc->gadget.is_a_peripheral = 0;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_setup_ep(hsudc);
@@ -1349,12 +1352,20 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
disable_irq(hsudc->irq);
local_irq_enable();
+ ret = device_register(&hsudc->gadget.dev);
+ if (ret) {
+ put_device(&hsudc->gadget.dev);
+ goto err_add_device;
+ }
+
ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
if (ret)
goto err_add_udc;
return 0;
err_add_udc:
+ device_unregister(&hsudc->gadget.dev);
+err_add_device:
clk_disable(hsudc->uclk);
clk_put(hsudc->uclk);
err_clk:
@@ -1363,10 +1374,13 @@ err_irq:
iounmap(hsudc->regs);
err_remap:
- release_resource(hsudc->mem_rsrc);
- kfree(hsudc->mem_rsrc);
-
+ release_mem_region(res->start, resource_size(res));
err_res:
+ if (hsudc->transceiver)
+ otg_put_transceiver(hsudc->transceiver);
+
+ regulator_bulk_free(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
kfree(hsudc);
return ret;
}
@@ -1378,21 +1392,10 @@ static struct platform_driver s3c_hsudc_driver = {
},
.probe = s3c_hsudc_probe,
};
-MODULE_ALIAS("platform:s3c-hsudc");
-
-static int __init s3c_hsudc_modinit(void)
-{
- return platform_driver_register(&s3c_hsudc_driver);
-}
-static void __exit s3c_hsudc_modexit(void)
-{
- platform_driver_unregister(&s3c_hsudc_driver);
-}
-
-module_init(s3c_hsudc_modinit);
-module_exit(s3c_hsudc_modexit);
+module_platform_driver(s3c_hsudc_driver);
MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsudc");
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index b8643771fa80..3f87cb9344bb 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -3,7 +3,7 @@
*
* Samsung S3C24xx series on-chip full speed USB device controllers
*
- * Copyright (C) 2004-2007 Herbert Ptzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@
#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
#define DRIVER_VERSION "29 Apr 2007"
-#define DRIVER_AUTHOR "Herbert Ptzl <herbert@13thfloor.at>, " \
+#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
"Arnaud Patard <arnaud.patard@rtp-net.org>"
static const char gadget_name[] = "s3c2410_udc";
@@ -1683,9 +1683,9 @@ static int s3c2410_udc_start(struct usb_gadget_driver *driver,
if (udc->driver)
return -EBUSY;
- if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
+ if (!bind || !driver->setup || driver->max_speed < USB_SPEED_FULL) {
printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
- bind, driver->setup, driver->speed);
+ bind, driver->setup, driver->max_speed);
return -EINVAL;
}
#if defined(MODULE)
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
index a48f619cb1cc..1653bae08b80 100644
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -2,7 +2,7 @@
* linux/drivers/usb/gadget/s3c2410_udc.h
* Samsung on-chip full speed USB device controllers
*
- * Copyright (C) 2004-2007 Herbert Ptzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index ed1b816e58d8..ad9e5b2df642 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -123,11 +123,11 @@ MODULE_AUTHOR("Al Borchers");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
-static int use_acm = true;
+static bool use_acm = true;
module_param(use_acm, bool, 0);
MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes");
-static int use_obex = false;
+static bool use_obex = false;
module_param(use_obex, bool, 0);
MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no");
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 6939e17f4580..0b0d12ccc487 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -371,14 +371,28 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
}
static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
-static ssize_t usb_udc_speed_show(struct device *dev,
+#define USB_UDC_SPEED_ATTR(name, param) \
+ssize_t usb_udc_##param##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ return snprintf(buf, PAGE_SIZE, "%s\n", \
+ usb_speed_string(udc->gadget->param)); \
+} \
+static DEVICE_ATTR(name, S_IRUSR, usb_udc_##param##_show, NULL)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+/* TODO: Scheduled for removal in 3.8. */
+static ssize_t usb_udc_is_dualspeed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- usb_speed_string(udc->gadget->speed));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ gadget_is_dualspeed(udc->gadget));
}
-static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
+static DEVICE_ATTR(is_dualspeed, S_IRUSR, usb_udc_is_dualspeed_show, NULL);
#define USB_UDC_ATTR(name) \
ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -391,7 +405,6 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
} \
static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
-static USB_UDC_ATTR(is_dualspeed);
static USB_UDC_ATTR(is_otg);
static USB_UDC_ATTR(is_a_peripheral);
static USB_UDC_ATTR(b_hnp_enable);
@@ -401,7 +414,8 @@ static USB_UDC_ATTR(a_alt_hnp_support);
static struct attribute *usb_udc_attrs[] = {
&dev_attr_srp.attr,
&dev_attr_soft_connect.attr,
- &dev_attr_speed.attr,
+ &dev_attr_current_speed.attr,
+ &dev_attr_maximum_speed.attr,
&dev_attr_is_dualspeed.attr,
&dev_attr_is_otg.attr,
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 58c4d37d312a..4d25b9009edf 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -13,82 +13,17 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/nls.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <asm/unaligned.h>
-
-
-static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
-{
- int count = 0;
- u8 c;
- u16 uchar;
-
- /* this insists on correct encodings, though not minimal ones.
- * BUT it currently rejects legit 4-byte UTF-8 code points,
- * which need surrogate pairs. (Unicode 3.1 can use them.)
- */
- while (len != 0 && (c = (u8) *s++) != 0) {
- if (unlikely(c & 0x80)) {
- // 2-byte sequence:
- // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
- if ((c & 0xe0) == 0xc0) {
- uchar = (c & 0x1f) << 6;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c;
-
- // 3-byte sequence (most CJKV characters):
- // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
- } else if ((c & 0xf0) == 0xe0) {
- uchar = (c & 0x0f) << 12;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c << 6;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c;
-
- /* no bogus surrogates */
- if (0xd800 <= uchar && uchar <= 0xdfff)
- goto fail;
-
- // 4-byte sequence (surrogate pairs, currently rare):
- // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
- // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
- // (uuuuu = wwww + 1)
- // FIXME accept the surrogate code points (only)
-
- } else
- goto fail;
- } else
- uchar = c;
- put_unaligned_le16(uchar, cp++);
- count++;
- len--;
- }
- return count;
-fail:
- return -1;
-}
-
/**
* usb_gadget_get_string - fill out a string descriptor
* @table: of c strings encoded using UTF-8
* @id: string id, from low byte of wValue in get string descriptor
- * @buf: at least 256 bytes
+ * @buf: at least 256 bytes, must be 16-bit aligned
*
* Finds the UTF-8 string matching the ID, and converts it into a
* string descriptor in utf16-le.
@@ -125,8 +60,8 @@ usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
/* string descriptors have length, tag, then UTF16-LE text */
len = min ((size_t) 126, strlen (s->s));
- memset (buf + 2, 0, 2 * len); /* zero all the bytes */
- len = utf8_to_utf16le(s->s, (__le16 *)&buf[2], len);
+ len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
+ (wchar_t *) &buf[2], 126);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 20697cc132d1..31d34832907e 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -81,7 +81,7 @@ module_param(buflen, uint, 0);
* work better with hosts where config changes are problematic or
* controllers (like original superh) that only support one config.
*/
-static int loopdefault = 0;
+static bool loopdefault = 0;
module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 060e0e2b1ae6..91413cac97be 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -194,6 +194,15 @@ config USB_EHCI_S5P
help
Enable support for the S5P SOC's on-chip EHCI controller.
+config USB_EHCI_MV
+ bool "EHCI support for Marvell on-chip controller"
+ depends on USB_EHCI_HCD
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for Marvell (including PXA and MMP series) on-chip
+ USB SPH and OTG controller. SPH is a single port host, and it can
+ only be EHCI host. OTG is controller that can switch to host mode.
+
config USB_W90X900_EHCI
bool "W90X900(W90P910) EHCI support"
depends on USB_EHCI_HCD && ARCH_W90X900
@@ -210,7 +219,7 @@ config USB_CNS3XXX_EHCI
config USB_EHCI_ATH79
bool "EHCI support for AR7XXX/AR9XXX SoCs"
- depends on USB_EHCI_HCD && (SOC_AR71XX || SOC_AR724X || SOC_AR913X)
+ depends on USB_EHCI_HCD && (SOC_AR71XX || SOC_AR724X || SOC_AR913X || SOC_AR933X)
select USB_EHCI_ROOT_HUB_TT
default y
---help---
@@ -371,6 +380,12 @@ config USB_OHCI_SH
Enables support for the on-chip OHCI controller on the SuperH.
If you use the PCI OHCI controller, this option is not necessary.
+config USB_OHCI_EXYNOS
+ boolean "OHCI support for Samsung EXYNOS SoC Series"
+ depends on USB_OHCI_HCD && ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module"
depends on USB_OHCI_HCD && ARCH_CNS3XXX
diff --git a/drivers/usb/host/alchemy-common.c b/drivers/usb/host/alchemy-common.c
index b4192c964d0d..936af8359fb2 100644
--- a/drivers/usb/host/alchemy-common.c
+++ b/drivers/usb/host/alchemy-common.c
@@ -52,9 +52,263 @@
USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \
USBCFG_OME)
+/* Au1300 USB config registers */
+#define USB_DWC_CTRL1 0x00
+#define USB_DWC_CTRL2 0x04
+#define USB_VBUS_TIMER 0x10
+#define USB_SBUS_CTRL 0x14
+#define USB_MSR_ERR 0x18
+#define USB_DWC_CTRL3 0x1C
+#define USB_DWC_CTRL4 0x20
+#define USB_OTG_STATUS 0x28
+#define USB_DWC_CTRL5 0x2C
+#define USB_DWC_CTRL6 0x30
+#define USB_DWC_CTRL7 0x34
+#define USB_PHY_STATUS 0xC0
+#define USB_INT_STATUS 0xC4
+#define USB_INT_ENABLE 0xC8
+
+#define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */
+#define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */
+#define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */
+
+#define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */
+#define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */
+#define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */
+
+#define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19)
+#define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18)
+#define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17)
+#define USB_DWC_CTRL3_OTG0_CKEN (1 << 16)
+
+#define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */
+
+#define USB_INTEN_FORCE 0x20
+#define USB_INTEN_PHY 0x10
+#define USB_INTEN_UDC 0x08
+#define USB_INTEN_EHCI 0x04
+#define USB_INTEN_OHCI1 0x02
+#define USB_INTEN_OHCI0 0x01
static DEFINE_SPINLOCK(alchemy_usb_lock);
+static inline void __au1300_usb_phyctl(void __iomem *base, int enable)
+{
+ unsigned long r, s;
+
+ r = __raw_readl(base + USB_DWC_CTRL2);
+ s = __raw_readl(base + USB_DWC_CTRL3);
+
+ s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN |
+ USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN;
+
+ if (enable) {
+ /* simply enable all PHYs */
+ r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS;
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ } else if (!s) {
+ /* no USB block active, do disable all PHYs */
+ r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS);
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ }
+}
+
+static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
+{
+ unsigned long r;
+
+ if (enable) {
+ __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
+ r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable); /* power up the PHYs */
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ /* reset the OHCI start clock bit */
+ __raw_writel(0, base + USB_DWC_CTRL7);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1);
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN);
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_otg_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ } else {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline int au1300_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1300_ohci_control(base, enable, 0);
+ break;
+ case ALCHEMY_USB_OHCI1:
+ __au1300_ohci_control(base, enable, 1);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ __au1300_ehci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1300_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_OTG0:
+ __au1300_otg_control(base, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static inline void au1300_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+
+ /* set some sane defaults. Note: we don't fiddle with DWC_CTRL4
+ * here at all: Port 2 routing (EHCI or UDC) must be set either
+ * by boot firmware or platform init code; I can't autodetect
+ * a sane setting.
+ */
+ __raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */
+ wmb();
+ __raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */
+ wmb();
+ __raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */
+ wmb();
+ __raw_writel(~0, base + USB_INT_STATUS); /* clear int status */
+ wmb();
+ /* set coherent access bit */
+ __raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL);
+ wmb();
+}
static inline void __au1200_ohci_control(void __iomem *base, int enable)
{
@@ -233,6 +487,9 @@ int alchemy_usb_control(int block, int enable)
case ALCHEMY_CPU_AU1200:
ret = au1200_usb_control(block, enable);
break;
+ case ALCHEMY_CPU_AU1300:
+ ret = au1300_usb_control(block, enable);
+ break;
default:
ret = -ENODEV;
}
@@ -281,6 +538,20 @@ static void au1200_usb_pm(int susp)
}
}
+static void au1300_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ /* remember Port2 routing */
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4);
+ } else {
+ au1300_usb_init();
+ __raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4);
+ wmb();
+ }
+}
+
static void alchemy_usb_pm(int susp)
{
switch (alchemy_get_cputype()) {
@@ -295,6 +566,9 @@ static void alchemy_usb_pm(int susp)
case ALCHEMY_CPU_AU1200:
au1200_usb_pm(susp);
break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_pm(susp);
+ break;
}
}
@@ -328,6 +602,9 @@ static int __init alchemy_usb_init(void)
case ALCHEMY_CPU_AU1200:
au1200_usb_init();
break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_init();
+ break;
}
register_syscore_ops(&alchemy_usb_pm_ops);
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index afb6743cf094..f1424f9bc363 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -33,6 +33,10 @@ static const struct platform_device_id ehci_ath79_id_table[] = {
.driver_data = EHCI_ATH79_IP_V2,
},
{
+ .name = "ar933x-ehci",
+ .driver_data = EHCI_ATH79_IP_V2,
+ },
+ {
/* terminating entry */
},
};
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 18bafa99fe57..bf7441afed16 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -23,6 +23,7 @@ static int au1xxx_ehci_setup(struct usb_hcd *hcd)
int ret = ehci_init(hcd);
ehci->need_io_watchdog = 0;
+ ehci_reset(ehci);
return ret;
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 3ff9f82f7263..a007a9fe0f87 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -48,6 +48,10 @@
#include <asm/system.h>
#include <asm/unaligned.h>
+#if defined(CONFIG_PPC_PS3)
+#include <asm/firmware.h>
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -108,7 +112,7 @@ module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc = 0;
+static bool ignore_oc = 0;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
@@ -230,12 +234,58 @@ static int ehci_halt (struct ehci_hcd *ehci)
STS_HALT, STS_HALT, 16 * 125);
}
+#if defined(CONFIG_USB_SUSPEND) && defined(CONFIG_PPC_PS3)
+
+/*
+ * The EHCI controller of the Cell Super Companion Chip used in the
+ * PS3 will stop the root hub after all root hub ports are suspended.
+ * When in this condition handshake will return -ETIMEDOUT. The
+ * STS_HLT bit will not be set, so inspection of the frame index is
+ * used here to test for the condition. If the condition is found
+ * return success to allow the USB suspend to complete.
+ */
+
+static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
+ void __iomem *ptr, u32 mask, u32 done,
+ int usec)
+{
+ unsigned int old_index;
+ int error;
+
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return -ETIMEDOUT;
+
+ old_index = ehci_read_frame_index(ehci);
+
+ error = handshake(ehci, ptr, mask, done, usec);
+
+ if (error == -ETIMEDOUT && ehci_read_frame_index(ehci) == old_index)
+ return 0;
+
+ return error;
+}
+
+#else
+
+static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
+ void __iomem *ptr, u32 mask, u32 done,
+ int usec)
+{
+ return -ETIMEDOUT;
+}
+
+#endif
+
static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
int error;
error = handshake(ehci, ptr, mask, done, usec);
+ if (error == -ETIMEDOUT)
+ error = handshake_for_broken_root_hub(ehci, ptr, mask, done,
+ usec);
+
if (error) {
ehci_halt(ehci);
ehci->rh_state = EHCI_RH_HALTED;
@@ -620,6 +670,7 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+ hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
@@ -677,22 +728,13 @@ static int ehci_init(struct usb_hcd *hcd)
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- int retval;
u32 temp;
u32 hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
- /*
- * TDI driver does the ehci_reset in their reset callback.
- * Don't reset here, because configuration settings will
- * vanish.
- */
- if (!ehci_is_TDI(ehci) && (retval = ehci_reset(ehci)) != 0) {
- ehci_mem_cleanup(ehci);
- return retval;
- }
+
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
@@ -1324,11 +1366,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_pxa168_driver
#endif
-#ifdef CONFIG_NLM_XLR
+#ifdef CONFIG_CPU_XLR
#include "ehci-xls.c"
#define PLATFORM_DRIVER ehci_xls_driver
#endif
+#ifdef CONFIG_USB_EHCI_MV
+#include "ehci-mv.c"
+#define PLATFORM_DRIVER ehci_mv_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
new file mode 100644
index 000000000000..52a604fb9321
--- /dev/null
+++ b/drivers/usb/host/ehci-mv.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_data/mv_usb.h>
+
+#define CAPLENGTH_MASK (0xff)
+
+struct ehci_hcd_mv {
+ struct usb_hcd *hcd;
+
+ /* Which mode does this ehci running OTG/Host ? */
+ int mode;
+
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ void __iomem *op_regs;
+
+ struct otg_transceiver *otg;
+
+ struct mv_usb_platform_data *pdata;
+
+ /* clock source and total clock number */
+ unsigned int clknum;
+ struct clk *clk[0];
+};
+
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ unsigned int i;
+
+ for (i = 0; i < ehci_mv->clknum; i++)
+ clk_enable(ehci_mv->clk[i]);
+}
+
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ unsigned int i;
+
+ for (i = 0; i < ehci_mv->clknum; i++)
+ clk_disable(ehci_mv->clk[i]);
+}
+
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ int retval;
+
+ ehci_clock_enable(ehci_mv);
+ if (ehci_mv->pdata->phy_init) {
+ retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
+ if (retval)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ if (ehci_mv->pdata->phy_deinit)
+ ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ ehci_clock_disable(ehci_mv);
+}
+
+static int mv_ehci_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ int retval;
+
+ if (ehci_mv == NULL) {
+ dev_err(dev, "Can not find private ehci data\n");
+ return -ENODEV;
+ }
+
+ /*
+ * data structure init
+ */
+ retval = ehci_init(hcd);
+ if (retval) {
+ dev_err(dev, "ehci_init failed %d\n", retval);
+ return retval;
+ }
+
+ hcd->has_tt = 1;
+ ehci->sbrn = 0x20;
+
+ retval = ehci_reset(ehci);
+ if (retval) {
+ dev_err(dev, "ehci_reset failed %d\n", retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver mv_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Marvell EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = mv_ehci_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+};
+
+static int mv_ehci_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct ehci_hcd_mv *ehci_mv;
+ struct resource *r;
+ int clk_i, retval = -ENODEV;
+ u32 offset;
+ size_t size;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ if (!hcd)
+ return -ENOMEM;
+
+ size = sizeof(*ehci_mv) + sizeof(struct clk *) * pdata->clknum;
+ ehci_mv = kzalloc(size, GFP_KERNEL);
+ if (ehci_mv == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
+ retval = -ENOMEM;
+ goto err_put_hcd;
+ }
+
+ platform_set_drvdata(pdev, ehci_mv);
+ ehci_mv->pdata = pdata;
+ ehci_mv->hcd = hcd;
+
+ ehci_mv->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < ehci_mv->clknum; clk_i++) {
+ ehci_mv->clk[clk_i] =
+ clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(ehci_mv->clk[clk_i])) {
+ dev_err(&pdev->dev, "error get clck \"%s\"\n",
+ pdata->clkname[clk_i]);
+ retval = PTR_ERR(ehci_mv->clk[clk_i]);
+ goto err_put_clk;
+ }
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_clk;
+ }
+
+ ehci_mv->phy_regs = ioremap(r->start, resource_size(r));
+ if (ehci_mv->phy_regs == 0) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ retval = -EFAULT;
+ goto err_put_clk;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
+ if (!r) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_iounmap_phyreg;
+ }
+
+ ehci_mv->cap_regs = ioremap(r->start, resource_size(r));
+ if (ehci_mv->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ retval = -EFAULT;
+ goto err_iounmap_phyreg;
+ }
+
+ retval = mv_ehci_enable(ehci_mv);
+ if (retval) {
+ dev_err(&pdev->dev, "init phy error %d\n", retval);
+ goto err_iounmap_capreg;
+ }
+
+ offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
+ ehci_mv->op_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
+
+ hcd->rsrc_start = r->start;
+ hcd->rsrc_len = r->end - r->start + 1;
+ hcd->regs = ehci_mv->op_regs;
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (!hcd->irq) {
+ dev_err(&pdev->dev, "Cannot get irq.");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+ ehci->regs = (struct ehci_regs *) ehci_mv->op_regs;
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci_mv->mode = pdata->mode;
+ if (ehci_mv->mode == MV_USB_MODE_OTG) {
+#ifdef CONFIG_USB_OTG_UTILS
+ ehci_mv->otg = otg_get_transceiver();
+ if (!ehci_mv->otg) {
+ dev_err(&pdev->dev,
+ "unable to find transceiver\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_host(ehci_mv->otg, &hcd->self);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "unable to register with transceiver\n");
+ retval = -ENODEV;
+ goto err_put_transceiver;
+ }
+ /* otg will enable clock before use as host */
+ mv_ehci_disable(ehci_mv);
+#else
+ dev_info(&pdev->dev, "MV_USB_MODE_OTG "
+ "must have CONFIG_USB_OTG_UTILS enabled\n");
+ goto err_disable_clk;
+#endif
+ } else {
+ if (pdata->set_vbus)
+ pdata->set_vbus(1);
+
+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "failed to add hcd with err %d\n", retval);
+ goto err_set_vbus;
+ }
+ }
+
+ if (pdata->private_init)
+ pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
+
+ dev_info(&pdev->dev,
+ "successful find EHCI device with regs 0x%p irq %d"
+ " working in %s mode\n", hcd->regs, hcd->irq,
+ ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
+
+ return 0;
+
+err_set_vbus:
+ if (pdata->set_vbus)
+ pdata->set_vbus(0);
+#ifdef CONFIG_USB_OTG_UTILS
+err_put_transceiver:
+ if (ehci_mv->otg)
+ otg_put_transceiver(ehci_mv->otg);
+#endif
+err_disable_clk:
+ mv_ehci_disable(ehci_mv);
+err_iounmap_capreg:
+ iounmap(ehci_mv->cap_regs);
+err_iounmap_phyreg:
+ iounmap(ehci_mv->phy_regs);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(ehci_mv->clk[clk_i]);
+ platform_set_drvdata(pdev, NULL);
+ kfree(ehci_mv);
+err_put_hcd:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static int mv_ehci_remove(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+ int clk_i;
+
+ if (hcd->rh_registered)
+ usb_remove_hcd(hcd);
+
+ if (ehci_mv->otg) {
+ otg_set_host(ehci_mv->otg, NULL);
+ otg_put_transceiver(ehci_mv->otg);
+ }
+
+ if (ehci_mv->mode == MV_USB_MODE_HOST) {
+ if (ehci_mv->pdata->set_vbus)
+ ehci_mv->pdata->set_vbus(0);
+
+ mv_ehci_disable(ehci_mv);
+ }
+
+ iounmap(ehci_mv->cap_regs);
+ iounmap(ehci_mv->phy_regs);
+
+ for (clk_i = 0; clk_i < ehci_mv->clknum; clk_i++)
+ clk_put(ehci_mv->clk[clk_i]);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(ehci_mv);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+MODULE_ALIAS("mv-ehci");
+
+static const struct platform_device_id ehci_id_table[] = {
+ {"pxa-u2oehci", PXA_U2OEHCI},
+ {"pxa-sph", PXA_SPH},
+ {"mmp3-hsic", MMP3_HSIC},
+ {"mmp3-fsic", MMP3_FSIC},
+ {},
+};
+
+static void mv_ehci_shutdown(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (!hcd->rh_registered)
+ return;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_mv_driver = {
+ .probe = mv_ehci_probe,
+ .remove = mv_ehci_remove,
+ .shutdown = mv_ehci_shutdown,
+ .driver = {
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ },
+ .id_table = ehci_id_table,
+};
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ba1f51361134..c0104882c72d 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -155,6 +155,8 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ ehci_reset(ehci);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index e39b0297bad1..bba9850f32f0 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -41,6 +41,7 @@
#include <linux/usb/ulpi.h>
#include <plat/usb.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -190,11 +191,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_err(dev, "failed to start usbhs with err %d\n", ret);
- goto err_enable;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
@@ -228,6 +226,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
+ ehci_reset(omap_ehci);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
@@ -240,11 +240,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_enable:
disable_put_regulator(pdata);
- usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
err_io:
iounmap(regs);
@@ -266,10 +263,12 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
return 0;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index a68a2a5c4b83..6c6a5a3b4ea7 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -172,7 +172,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
static void __init
ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -182,7 +182,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -194,6 +194,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
@@ -259,8 +260,9 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- ehci_orion_conf_mbus_windows(hcd, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 2dc32da75cfc..a20e496eb479 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -21,6 +21,34 @@
#include <asm/firmware.h>
#include <asm/ps3.h>
+static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
+{
+ /* PS3 HC internal setup register offsets. */
+
+ enum ps3_ehci_hc_insnreg {
+ ps3_ehci_hc_insnreg01 = 0x084,
+ ps3_ehci_hc_insnreg02 = 0x088,
+ ps3_ehci_hc_insnreg03 = 0x08c,
+ };
+
+ /* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
+ * internal INSNREGXX setup regs back to the chip default values
+ * on Host Controller Reset (CMD_RESET) or Light Host Controller
+ * Reset (CMD_LRESET). The work-around for this is for the HC
+ * driver to re-initialise these regs when ever the HC is reset.
+ */
+
+ /* Set burst transfer counts to 256 out, 32 in. */
+
+ writel_be(0x01000020, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg01);
+
+ /* Enable burst transfer counts. */
+
+ writel_be(0x00000001, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg03);
+}
+
static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
{
int result;
@@ -49,6 +77,8 @@ static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
ehci_reset(ehci);
+ ps3_ehci_setup_insnreg(ehci);
+
return result;
}
diff --git a/drivers/usb/host/ehci-pxa168.c b/drivers/usb/host/ehci-pxa168.c
index ac0c16e8f539..8d0e7a22e711 100644
--- a/drivers/usb/host/ehci-pxa168.c
+++ b/drivers/usb/host/ehci-pxa168.c
@@ -299,7 +299,7 @@ static int __devinit ehci_pxa168_drv_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
ehci->sbrn = 0x20;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 4e4066c35a09..36ca5077cdf7 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -373,6 +373,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ ehci_dbg(ehci,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
@@ -647,7 +658,7 @@ qh_urb_transaction (
/*
* data transfer stage: buffer setup
*/
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
buf = sg_dma_address(sg);
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 024b65c4990d..293f7412992e 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -14,8 +14,6 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <mach/regs-pmu.h>
-#include <plat/cpu.h>
#include <plat/ehci.h>
#include <plat/usb-phy.h>
@@ -136,6 +134,8 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
+ ehci_reset(ehci);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 56a32033adb3..a60679cbbf85 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
* jump until after the queue is primed.
*/
else {
+ int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
- break;
+ done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, mod, stream,
start, sched, period))
- break;
+ done = 1;
}
- } while (start > next);
+ } while (start > next && !done);
/* no room in the schedule */
- if (start == next) {
+ if (!done) {
ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index db9d1b4bfbdc..dbc7fe8ca9e7 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -21,7 +21,12 @@
#include <linux/platform_data/tegra_usb.h>
#include <linux/irq.h>
#include <linux/usb/otg.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
#include <mach/usb_phy.h>
+#include <mach/iomap.h>
#define TEGRA_USB_DMA_ALIGN 32
@@ -574,6 +579,35 @@ static const struct hc_driver tegra_ehci_hc_driver = {
.port_handed_over = ehci_port_handed_over,
};
+static int setup_vbus_gpio(struct platform_device *pdev)
+{
+ int err = 0;
+ int gpio;
+
+ if (!pdev->dev.of_node)
+ return 0;
+
+ gpio = of_get_named_gpio(pdev->dev.of_node, "nvidia,vbus-gpio", 0);
+ if (!gpio_is_valid(gpio))
+ return 0;
+
+ err = gpio_request(gpio, "vbus_gpio");
+ if (err) {
+ dev_err(&pdev->dev, "can't request vbus gpio %d", gpio);
+ return err;
+ }
+ err = gpio_direction_output(gpio, 1);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable vbus\n");
+ return err;
+ }
+ gpio_set_value(gpio, 1);
+
+ return err;
+}
+
+static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
+
static int tegra_ehci_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -590,6 +624,15 @@ static int tegra_ehci_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &tegra_ehci_dma_mask;
+
+ setup_vbus_gpio(pdev);
+
tegra = kzalloc(sizeof(struct tegra_ehci_hcd), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
@@ -640,6 +683,28 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
+ /* This is pretty ugly and needs to be fixed when we do only
+ * device-tree probing. Old code relies on the platform_device
+ * numbering that we lack for device-tree-instantiated devices.
+ */
+ if (instance < 0) {
+ switch (res->start) {
+ case TEGRA_USB_BASE:
+ instance = 0;
+ break;
+ case TEGRA_USB2_BASE:
+ instance = 1;
+ break;
+ case TEGRA_USB3_BASE:
+ instance = 2;
+ break;
+ default:
+ err = -ENODEV;
+ dev_err(&pdev->dev, "unknown usb instance\n");
+ goto fail_phy;
+ }
+ }
+
tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
TEGRA_USB_PHY_MODE_HOST);
if (IS_ERR(tegra->phy)) {
@@ -773,6 +838,11 @@ static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+static struct of_device_id tegra_ehci_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-ehci", },
+ { },
+};
+
static struct platform_driver tegra_ehci_driver = {
.probe = tegra_ehci_probe,
.remove = tegra_ehci_remove,
@@ -783,5 +853,6 @@ static struct platform_driver tegra_ehci_driver = {
.shutdown = tegra_ehci_hcd_shutdown,
.driver = {
.name = "tegra-ehci",
+ .of_match_table = tegra_ehci_of_match,
}
};
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
index 54d1ab8aec49..c1eda73916cd 100644
--- a/drivers/usb/host/ehci-vt8500.c
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -132,6 +132,8 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev)
ehci_port_power(ehci, 1);
+ ehci_reset(ehci);
+
ret = usb_add_hcd(hcd, pdev->resource[1].start,
IRQF_SHARED);
if (ret == 0) {
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index d661cf7de140..3d2e26cbb34c 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -78,6 +78,8 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
if (irq < 0)
goto err4;
+ ehci_reset(ehci);
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err4;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 32793ce3d9e9..9c2cc4633894 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
}
irq = irq_of_parse_and_map(dn, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index b4fb511d24bc..72f08196f8cd 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -69,7 +69,7 @@ int ehci_xls_probe_internal(const struct hc_driver *driver,
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 4ed6d19f2a54..d2623747b489 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -824,17 +824,7 @@ static struct platform_driver of_fhci_driver = {
.remove = __devexit_p(of_fhci_remove),
};
-static int __init fhci_module_init(void)
-{
- return platform_driver_register(&of_fhci_driver);
-}
-module_init(fhci_module_init);
-
-static void __exit fhci_module_exit(void)
-{
- platform_driver_unregister(&of_fhci_driver);
-}
-module_exit(fhci_module_exit);
+module_platform_driver(of_fhci_driver);
MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9037035ad1e4..7916e56a725e 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -297,17 +297,7 @@ static struct platform_driver fsl_usb2_mph_dr_driver = {
.remove = __devexit_p(fsl_usb2_mph_dr_of_remove),
};
-static int __init fsl_usb2_mph_dr_init(void)
-{
- return platform_driver_register(&fsl_usb2_mph_dr_driver);
-}
-module_init(fsl_usb2_mph_dr_init);
-
-static void __exit fsl_usb2_mph_dr_exit(void)
-{
- platform_driver_unregister(&fsl_usb2_mph_dr_driver);
-}
-module_exit(fsl_usb2_mph_dr_exit);
+module_platform_driver(fsl_usb2_mph_dr_driver);
MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 056fb2d37c88..104730dabd2d 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -481,7 +481,7 @@ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
encryption_value = 0;
}
- /* Set the encryption type for commmunicating with the device */
+ /* Set the encryption type for communicating with the device */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
@@ -776,7 +776,6 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 2ee18cfa1efe..ff471c1c165e 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -473,7 +473,7 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
/* End handling */
/* =========================================== */
-/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i;
@@ -1924,18 +1924,7 @@ static struct platform_driver imx21_hcd_driver = {
.resume = NULL,
};
-static int __init imx21_hcd_init(void)
-{
- return platform_driver_register(&imx21_hcd_driver);
-}
-
-static void __exit imx21_hcd_cleanup(void)
-{
- platform_driver_unregister(&imx21_hcd_driver);
-}
-
-module_init(imx21_hcd_init);
-module_exit(imx21_hcd_cleanup);
+module_platform_driver(imx21_hcd_driver);
MODULE_DESCRIPTION("i.MX21 USB Host controller");
MODULE_AUTHOR("Martin Fuzzey");
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27dfab80ed8f..fc72d44bf787 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -32,6 +32,13 @@ static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
static struct kmem_cache *urb_listitem_cachep;
+enum queue_head_types {
+ QH_CONTROL,
+ QH_BULK,
+ QH_INTERRUPT,
+ QH_END
+};
+
struct isp1760_hcd {
u32 hcs_params;
spinlock_t lock;
@@ -40,7 +47,7 @@ struct isp1760_hcd {
struct slotinfo int_slots[32];
int int_done_map;
struct memory_chunk memory_pool[BLOCKS];
- struct list_head controlqhs, bulkqhs, interruptqhs;
+ struct list_head qh_list[QH_END];
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024
@@ -406,12 +413,12 @@ static int priv_init(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 hcc_params;
+ int i;
spin_lock_init(&priv->lock);
- INIT_LIST_HEAD(&priv->interruptqhs);
- INIT_LIST_HEAD(&priv->controlqhs);
- INIT_LIST_HEAD(&priv->bulkqhs);
+ for (i = 0; i < QH_END; i++)
+ INIT_LIST_HEAD(&priv->qh_list[i]);
/*
* hw default: 1K periodic list heads, one per frame.
@@ -930,9 +937,9 @@ void schedule_ptds(struct usb_hcd *hcd)
struct isp1760_hcd *priv;
struct isp1760_qh *qh, *qh_next;
struct list_head *ep_queue;
- struct usb_host_endpoint *ep;
LIST_HEAD(urb_list);
struct urb_listitem *urb_listitem, *urb_listitem_next;
+ int i;
if (!hcd) {
WARN_ON(1);
@@ -944,28 +951,13 @@ void schedule_ptds(struct usb_hcd *hcd)
/*
* check finished/retired xfers, transfer payloads, call urb_done()
*/
- ep_queue = &priv->interruptqhs;
- while (ep_queue) {
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
- ep = list_entry(qh->qtd_list.next, struct isp1760_qtd,
- qtd_list)->urb->ep;
collect_qtds(hcd, qh, &urb_list);
- if (list_empty(&qh->qtd_list)) {
+ if (list_empty(&qh->qtd_list))
list_del(&qh->qh_list);
- if (ep->hcpriv == NULL) {
- /* Endpoint has been disabled, so we
- can free the associated queue head. */
- qh_free(qh);
- }
- }
}
-
- if (ep_queue == &priv->interruptqhs)
- ep_queue = &priv->controlqhs;
- else if (ep_queue == &priv->controlqhs)
- ep_queue = &priv->bulkqhs;
- else
- ep_queue = NULL;
}
list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
@@ -998,17 +990,10 @@ void schedule_ptds(struct usb_hcd *hcd)
*
* I'm sure this scheme could be improved upon!
*/
- ep_queue = &priv->controlqhs;
- while (ep_queue) {
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
enqueue_qtds(hcd, qh);
-
- if (ep_queue == &priv->controlqhs)
- ep_queue = &priv->interruptqhs;
- else if (ep_queue == &priv->interruptqhs)
- ep_queue = &priv->bulkqhs;
- else
- ep_queue = NULL;
}
}
@@ -1543,16 +1528,16 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
- ep_queue = &priv->controlqhs;
+ ep_queue = &priv->qh_list[QH_CONTROL];
break;
case PIPE_BULK:
- ep_queue = &priv->bulkqhs;
+ ep_queue = &priv->qh_list[QH_BULK];
break;
case PIPE_INTERRUPT:
if (urb->interval < 0)
return -EINVAL;
/* FIXME: Check bandwidth */
- ep_queue = &priv->interruptqhs;
+ ep_queue = &priv->qh_list[QH_INTERRUPT];
break;
case PIPE_ISOCHRONOUS:
dev_err(hcd->self.controller, "%s: isochronous USB packets "
@@ -1714,8 +1699,8 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
unsigned long spinflags;
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
+ struct isp1760_qh *qh, *qh_iter;
+ int i;
spin_lock_irqsave(&priv->lock, spinflags);
@@ -1723,14 +1708,17 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
if (!qh)
goto out;
- list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
- if (qtd->status != QTD_RETIRE) {
- dequeue_urb_from_qtd(hcd, qh, qtd);
- qtd->urb->status = -ECONNRESET;
- }
+ WARN_ON(!list_empty(&qh->qtd_list));
+ for (i = 0; i < QH_END; i++)
+ list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
+ if (qh_iter == qh) {
+ list_del(&qh_iter->qh_list);
+ i = QH_END;
+ break;
+ }
+ qh_free(qh);
ep->hcpriv = NULL;
- /* Cannot free qh here since it will be parsed by schedule_ptds() */
schedule_ptds(hcd);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index a7dc1e1d45f2..4592dc17a9f9 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -18,7 +18,7 @@
#include "isp1760-hcd.h"
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -31,7 +31,7 @@
#include <linux/pci.h>
#endif
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct isp1760 {
struct usb_hcd *hcd;
int rst_gpio;
@@ -47,23 +47,27 @@ static int of_isp1760_probe(struct platform_device *dev)
int virq;
resource_size_t res_len;
int ret;
- const unsigned int *prop;
unsigned int devflags = 0;
enum of_gpio_flags gpio_flags;
+ u32 bus_width = 0;
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
ret = of_address_to_resource(dp, 0, &memory);
- if (ret)
- return -ENXIO;
+ if (ret) {
+ ret = -ENXIO;
+ goto free_data;
+ }
res_len = resource_size(&memory);
res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
- if (!res)
- return -EBUSY;
+ if (!res) {
+ ret = -EBUSY;
+ goto free_data;
+ }
if (of_irq_map_one(dp, 0, &oirq)) {
ret = -ENODEV;
@@ -77,8 +81,8 @@ static int of_isp1760_probe(struct platform_device *dev)
devflags |= ISP1760_FLAG_ISP1761;
/* Some systems wire up only 16 of the 32 data lines */
- prop = of_get_property(dp, "bus-width", NULL);
- if (prop && *prop == 16)
+ of_property_read_u32(dp, "bus-width", &bus_width);
+ if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
if (of_get_property(dp, "port1-otg", NULL) != NULL)
@@ -125,6 +129,7 @@ free_gpio:
gpio_free(drvdata->rst_gpio);
release_reg:
release_mem_region(memory.start, res_len);
+free_data:
kfree(drvdata);
return ret;
}
@@ -437,7 +442,7 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
@@ -457,7 +462,7 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 95a9fec38e89..5df0b0e3392b 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,7 +223,7 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (port < 0 || port >= 2)
return;
- if (pdata->vbus_pin[port] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
return;
gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
@@ -234,7 +234,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (port < 0 || port >= 2)
return -EINVAL;
- if (pdata->vbus_pin[port] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
return -EINVAL;
return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
@@ -465,7 +465,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
gpio_request(pdata->vbus_pin[i], "ohci_vbus");
ohci_at91_usb_set_power(pdata, i, 1);
@@ -474,7 +474,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
int ret;
- if (pdata->overcurrent_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
continue;
gpio_request(pdata->overcurrent_pin[i], "ohci_overcurrent");
@@ -499,14 +499,14 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
ohci_at91_usb_set_power(pdata, i, 0);
gpio_free(pdata->vbus_pin[i]);
}
for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
- if (pdata->overcurrent_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
continue;
free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
gpio_free(pdata->overcurrent_pin[i]);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 9b66df8278f3..4ea63b2cac42 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -89,7 +89,7 @@ static const struct hc_driver ohci_au1xxx_hc_driver = {
static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
{
- int ret;
+ int ret, unit;
struct usb_hcd *hcd;
if (usb_disabled())
@@ -120,7 +120,9 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
goto err2;
}
- if (alchemy_usb_control(ALCHEMY_USB_OHCI0, 1)) {
+ unit = (hcd->rsrc_start == AU1300_USB_OHCI1_PHYS_ADDR) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
+ if (alchemy_usb_control(unit, 1)) {
printk(KERN_INFO "%s: controller init failed!\n", pdev->name);
ret = -ENODEV;
goto err3;
@@ -135,7 +137,7 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
return ret;
}
- alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
+ alchemy_usb_control(unit, 0);
err3:
iounmap(hcd->regs);
err2:
@@ -148,9 +150,12 @@ err1:
static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ int unit;
+ unit = (hcd->rsrc_start == AU1300_USB_OHCI1_PHYS_ADDR) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
usb_remove_hcd(hcd);
- alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
+ alchemy_usb_control(unit, 0);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
@@ -173,12 +178,9 @@ static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
*/
spin_lock_irqsave(&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
rc = -EINVAL;
goto bail;
}
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d7d34492934a..5179fcd73d8a 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -127,6 +127,19 @@ static char *hcfs2string (int state)
return "?";
}
+static const char *rh_state_string(struct ohci_hcd *ohci)
+{
+ switch (ohci->rh_state) {
+ case OHCI_RH_HALTED:
+ return "halted";
+ case OHCI_RH_SUSPENDED:
+ return "suspended";
+ case OHCI_RH_RUNNING:
+ return "running";
+ }
+ return "?";
+}
+
// dump control and status registers
static void
ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
@@ -136,9 +149,10 @@ ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
temp = ohci_readl (controller, &regs->revision) & 0xff;
ohci_dbg_sw (controller, next, size,
- "OHCI %d.%d, %s legacy support registers\n",
+ "OHCI %d.%d, %s legacy support registers, rh state %s\n",
0x03 & (temp >> 4), (temp & 0x0f),
- (temp & 0x0100) ? "with" : "NO");
+ (temp & 0x0100) ? "with" : "NO",
+ rh_state_string(controller));
temp = ohci_readl (controller, &regs->control);
ohci_dbg_sw (controller, next, size,
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index dc45d489d00e..3d63574d2c7e 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -179,8 +179,6 @@ static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_
ohci->next_statechange = jiffies;
ep93xx_stop_hc(&pdev->dev);
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
new file mode 100644
index 000000000000..55aa35aa3d7b
--- /dev/null
+++ b/drivers/usb/host/ohci-exynos.c
@@ -0,0 +1,274 @@
+/*
+ * SAMSUNG EXYNOS USB HOST OHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <mach/ohci.h>
+#include <plat/usb-phy.h>
+
+struct exynos_ohci_hcd {
+ struct device *dev;
+ struct usb_hcd *hcd;
+ struct clk *clk;
+};
+
+static int ohci_exynos_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ohci_dbg(ohci, "ohci_exynos_start, ohci:%p", ohci);
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ err("can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver exynos_ohci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "EXYNOS OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY|HCD_USB11,
+
+ .start = ohci_exynos_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ .get_frame_number = ohci_get_frame,
+
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int __devinit exynos_ohci_probe(struct platform_device *pdev)
+{
+ struct exynos4_ohci_platdata *pdata;
+ struct exynos_ohci_hcd *exynos_ohci;
+ struct usb_hcd *hcd;
+ struct ohci_hcd *ohci;
+ struct resource *res;
+ int irq;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data defined\n");
+ return -EINVAL;
+ }
+
+ exynos_ohci = kzalloc(sizeof(struct exynos_ohci_hcd), GFP_KERNEL);
+ if (!exynos_ohci)
+ return -ENOMEM;
+
+ exynos_ohci->dev = &pdev->dev;
+
+ hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ err = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ exynos_ohci->hcd = hcd;
+ exynos_ohci->clk = clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ohci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ohci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_enable(exynos_ohci->clk);
+ if (err)
+ goto fail_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ ohci = hcd_to_ohci(hcd);
+ ohci_hcd_init(ohci);
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, exynos_ohci);
+
+ return 0;
+
+fail:
+ iounmap(hcd->regs);
+fail_io:
+ clk_disable(exynos_ohci->clk);
+fail_clken:
+ clk_put(exynos_ohci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+fail_hcd:
+ kfree(exynos_ohci);
+ return err;
+}
+
+static int __devexit exynos_ohci_remove(struct platform_device *pdev)
+{
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+
+ iounmap(hcd->regs);
+
+ clk_disable(exynos_ohci->clk);
+ clk_put(exynos_ohci->clk);
+
+ usb_put_hcd(hcd);
+ kfree(exynos_ohci);
+
+ return 0;
+}
+
+static void exynos_ohci_shutdown(struct platform_device *pdev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+#ifdef CONFIG_PM
+static int exynos_ohci_suspend(struct device *dev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ unsigned long flags;
+ int rc = 0;
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible, bail out if RH has been resumed. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ *
+ * This is still racy as hcd->state is manipulated outside of
+ * any locks =P But that will be a different fix.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ if (hcd->state != HC_STATE_SUSPENDED && hcd->state != HC_STATE_HALT) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+fail:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return rc;
+}
+
+static int exynos_ohci_resume(struct device *dev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ /* Mark hardware accessible again as we are out of D3 state by now */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define exynos_ohci_suspend NULL
+#define exynos_ohci_resume NULL
+#endif
+
+static const struct dev_pm_ops exynos_ohci_pm_ops = {
+ .suspend = exynos_ohci_suspend,
+ .resume = exynos_ohci_resume,
+};
+
+static struct platform_driver exynos_ohci_driver = {
+ .probe = exynos_ohci_probe,
+ .remove = __devexit_p(exynos_ohci_remove),
+ .shutdown = exynos_ohci_shutdown,
+ .driver = {
+ .name = "exynos-ohci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ohci_pm_ops,
+ }
+};
+
+MODULE_ALIAS("platform:exynos-ohci");
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b2639191549e..34b9edd86651 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -115,13 +115,13 @@ static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
/* Some boards misreport power switching/overcurrent */
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
-static int no_handshake = 0;
+static bool no_handshake = 0;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
@@ -209,7 +209,7 @@ static int ohci_urb_enqueue (
retval = -ENODEV;
goto fail;
}
- if (!HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
@@ -274,7 +274,7 @@ static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
- } else if (HC_IS_RUNNING(hcd->state)) {
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
@@ -321,7 +321,7 @@ ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
rescan:
spin_lock_irqsave (&ohci->lock, flags);
- if (!HC_IS_RUNNING (hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
@@ -377,6 +377,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
@@ -500,7 +501,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
@@ -575,7 +576,7 @@ static int ohci_run (struct ohci_hcd *ohci)
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
@@ -688,7 +689,7 @@ retry:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
- hcd->state = HC_STATE_RUNNING;
+ ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
@@ -725,7 +726,6 @@ retry:
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
- hcd->state = HC_STATE_RUNNING;
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
@@ -761,7 +761,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
usb_hc_died(hcd);
return IRQ_HANDLED;
@@ -771,7 +771,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
ints &= ohci_readl(ohci, &regs->intrenable);
/* interrupt for some other device? */
- if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT))
+ if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
@@ -786,8 +786,8 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
schedule_work (&ohci->nec_work);
} else {
- disable (ohci);
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+ ohci->rh_state = OHCI_RH_HALTED;
usb_hc_died(hcd);
}
@@ -871,11 +871,11 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
- && HC_IS_RUNNING(hcd->state))
+ && ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
spin_unlock (&ohci->lock);
- if (HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, &regs->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
// flush those writes
@@ -929,7 +929,7 @@ static int ohci_restart (struct ohci_hcd *ohci)
struct urb_priv *priv;
spin_lock_irq(&ohci->lock);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
@@ -1005,6 +1005,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
+#ifdef CONFIG_USB_OHCI_EXYNOS
+#include "ohci-exynos.c"
+#define PLATFORM_DRIVER exynos_ohci_driver
+#endif
+
#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
@@ -1111,7 +1116,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_ath79_driver
#endif
-#ifdef CONFIG_NLM_XLR
+#ifdef CONFIG_CPU_XLR
#include "ohci-xls.c"
#define PLATFORM_DRIVER ohci_xls_driver
#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 2f00040fc408..836772dfabd3 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -111,6 +111,7 @@ __acquires(ohci->lock)
if (!autostop) {
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
ohci->autostop = 0;
+ ohci->rh_state = OHCI_RH_SUSPENDED;
}
done:
@@ -140,7 +141,7 @@ __acquires(ohci->lock)
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
/* this can happen after resuming a swsusp snapshot */
- if (hcd->state == HC_STATE_RESUMING) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
ohci->hc_control);
status = -EBUSY;
@@ -274,6 +275,7 @@ skip_resume:
(void) ohci_readl (ohci, &ohci->regs->control);
}
+ ohci->rh_state = OHCI_RH_RUNNING;
return 0;
}
@@ -336,11 +338,8 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
/* If needed, reinitialize and suspend the root hub */
if (need_reinit) {
spin_lock_irq(&ohci->lock);
- hcd->state = HC_STATE_RESUMING;
ohci_rh_resume(ohci);
- hcd->state = HC_STATE_QUIESCING;
ohci_rh_suspend(ohci, 0);
- hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irq(&ohci->lock);
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index e4b8782cc6e2..db3968656d21 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -516,7 +516,6 @@ static int ohci_omap_suspend(struct platform_device *dev, pm_message_t message)
ohci->next_statechange = jiffies;
omap_ohci_clock_power(0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 516ebc4d6cc2..1b8133b6e451 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
/*-------------------------------------------------------------------------*/
@@ -134,7 +135,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
int irq;
if (usb_disabled())
- goto err_end;
+ return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
@@ -172,11 +173,8 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_dbg(dev, "failed to start ohci\n");
- goto err_end;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -189,9 +187,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_end:
+ pm_runtime_put_sync(dev);
usb_put_hcd(hcd);
err_io:
@@ -220,9 +216,9 @@ static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
iounmap(hcd->regs);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
usb_put_hcd(hcd);
-
return 0;
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bc01b064585a..6109810cc2d3 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -308,12 +308,9 @@ static int ohci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
*/
spin_lock_irqsave (&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
rc = -EINVAL;
goto bail;
}
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 29dfefe1c726..6313e4439f37 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -502,8 +502,6 @@ static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
ohci->ohci.next_statechange = jiffies;
pxa27x_stop_hc(ohci, dev);
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 15dc51ded61a..c5a1ea9145fa 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -912,7 +912,7 @@ rescan_all:
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
- if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
+ if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
if (tick_before (tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
@@ -1012,7 +1012,7 @@ rescan_this:
/* but if there's work queued, reschedule */
if (!list_empty (&ed->td_list)) {
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
+ if (ohci->rh_state == OHCI_RH_RUNNING)
ed_schedule (ohci, ed);
}
@@ -1021,9 +1021,7 @@ rescan_this:
}
/* maybe reenable control and bulk lists */
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
- && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
- && !ohci->ed_rm_list) {
+ if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index a1877c47601e..56dcf069246d 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -486,15 +486,66 @@ static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned long flags;
+ int rc = 0;
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible, bail out if RH has been resumed. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
+ rc = -EINVAL;
+ goto bail;
+ }
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ s3c2410_stop_hc(pdev);
+bail:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return rc;
+}
+
+static int ohci_hcd_s3c2410_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ s3c2410_start_hc(pdev, hcd);
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define ohci_hcd_s3c2410_drv_suspend NULL
+#define ohci_hcd_s3c2410_drv_resume NULL
+#endif
+
+static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
+ .suspend = ohci_hcd_s3c2410_drv_suspend,
+ .resume = ohci_hcd_s3c2410_drv_resume,
+};
+
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_drv_probe,
.remove = __devexit_p(ohci_hcd_s3c2410_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
- /*.suspend = ohci_hcd_s3c2410_drv_suspend, */
- /*.resume = ohci_hcd_s3c2410_drv_resume, */
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-ohci",
+ .pm = &ohci_hcd_s3c2410_pm_ops,
},
};
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index afc4eb6bb9d0..84686d90805b 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -29,7 +29,6 @@ static int ohci_sh_start(struct usb_hcd *hcd)
ohci_hcd_init(ohci);
ohci_init(ohci);
ohci_run(ohci);
- hcd->state = HC_STATE_RUNNING;
return 0;
}
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 968cea2b6d4e..5596ac2ba1ca 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -224,7 +224,6 @@ static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
ohci->next_statechange = jiffies;
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 69874654f3b5..95c16489e883 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -203,7 +203,6 @@ static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
ohci->next_statechange = jiffies;
spear_stop_ohci(ohci_p);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 06331d931171..120bfe6ede38 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -318,9 +318,6 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
if (ret)
return ret;
}
-
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c
index a3a9c6f45b91..a2247867af86 100644
--- a/drivers/usb/host/ohci-xls.c
+++ b/drivers/usb/host/ohci-xls.c
@@ -40,7 +40,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver,
goto err1;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 0795b934d00c..8ff6f7ea96fd 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -344,6 +344,12 @@ typedef struct urb_priv {
* a subset of what the full implementation needs. (Linus)
*/
+enum ohci_rh_state {
+ OHCI_RH_HALTED,
+ OHCI_RH_SUSPENDED,
+ OHCI_RH_RUNNING
+};
+
struct ohci_hcd {
spinlock_t lock;
@@ -384,6 +390,7 @@ struct ohci_hcd {
/*
* driver state
*/
+ enum ohci_rh_state rh_state;
int num_ports;
int load [NUM_INTS];
u32 hc_control; /* copy of hc control reg */
@@ -679,11 +686,6 @@ static inline u16 ohci_hwPSW(const struct ohci_hcd *ohci,
/*-------------------------------------------------------------------------*/
-static inline void disable (struct ohci_hcd *ohci)
-{
- ohci_to_hcd(ohci)->state = HC_STATE_HALT;
-}
-
#define FI 0x2edf /* 12000 bits per frame (-1) */
#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
#define FIT (1 << 31)
@@ -707,7 +709,7 @@ static inline void periodic_reinit (struct ohci_hcd *ohci)
#define read_roothub(hc, register, mask) ({ \
u32 temp = ohci_readl (hc, &hc->regs->roothub.register); \
if (temp == -1) \
- disable (hc); \
+ hc->rh_state = OHCI_RH_HALTED; \
else if (hc->flags & OHCI_QUIRK_AMD756) \
while (temp & mask) \
temp = ohci_readl (hc, &hc->regs->roothub.register); \
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index dcd889803f0f..015c7c62ed49 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -233,7 +233,7 @@ module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
-static int ignore_oc;
+static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
@@ -3951,24 +3951,7 @@ static struct platform_driver oxu_driver = {
}
};
-static int __init oxu_module_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_register(&oxu_driver);
- if (retval < 0)
- return retval;
-
- return retval;
-}
-
-static void __exit oxu_module_cleanup(void)
-{
- platform_driver_unregister(&oxu_driver);
-}
-
-module_init(oxu_module_init);
-module_exit(oxu_module_cleanup);
+module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 533d12cca371..16dd6a6abf00 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -74,7 +74,7 @@ MODULE_LICENSE("GPL");
#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c8ae199cfbb8..6b5eb1017e2c 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -59,7 +59,7 @@
#define DRIVER_DESC "USB Universal Host Controller Interface driver"
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc;
+static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index f6ca80ee4cec..d2c6f5ac4626 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
if (usb_pipein(urb->pipe))
status |= TD_CTRL_SPD;
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
data = sg_dma_address(sg);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e175428618..76083ae92138 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 430e88fd3f6c..35e257f79c7b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -57,17 +57,15 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
- /* Ugh, these should be #defines, FIXME */
- /* Using table 11-13 in USB 2.0 spec. */
temp = 0;
- /* Bits 1:0 - support port power switching, or power always on */
+ /* Bits 1:0 - support per-port power switching, or power always on */
if (HCC_PPC(xhci->hcc_params))
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
else
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
/* Bit 2 - root hubs are not part of a compound device */
/* Bits 4:3 - individual port over current protection */
- temp |= 0x0008;
+ temp |= HUB_CHAR_INDV_PORT_OCPM;
/* Bits 6:5 - no TTs in root ports */
/* Bit 7 - no port indicators */
desc->wHubCharacteristics = cpu_to_le16(temp);
@@ -86,9 +84,9 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
ports = xhci->num_usb2_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
- desc->bDescriptorType = 0x29;
+ desc->bDescriptorType = USB_DT_HUB;
temp = 1 + (ports / 8);
- desc->bDescLength = 7 + 2 * temp;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
/* The Device Removable bits are reported on a byte granularity.
* If the port doesn't exist within that byte, the bit is set to 0.
@@ -137,8 +135,8 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
ports = xhci->num_usb3_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
- desc->bDescriptorType = 0x2a;
- desc->bDescLength = 12;
+ desc->bDescriptorType = USB_DT_SS_HUB;
+ desc->bDescLength = USB_DT_SS_HUB_SIZE;
/* header decode latency should be zero for roothubs,
* see section 4.23.5.2.
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0e4b25fa3bcd..36cbe2226a44 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -42,15 +42,12 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
seg = kzalloc(sizeof *seg, flags);
if (!seg)
return NULL;
- xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
- xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)dma);
memset(seg->trbs, 0, SEGMENT_SIZE);
seg->dma = dma;
@@ -62,12 +59,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
- xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)seg->dma);
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
- xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
kfree(seg);
}
@@ -101,9 +95,6 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
- xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
- (unsigned long long)prev->dma,
- (unsigned long long)next->dma);
}
/* XXX: Do we need the hcd structure in all these functions? */
@@ -117,7 +108,6 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->first_seg) {
first_seg = ring->first_seg;
seg = first_seg->next;
- xhci_dbg(xhci, "Freeing ring at %p\n", ring);
while (seg != first_seg) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
@@ -160,7 +150,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
struct xhci_segment *prev;
ring = kzalloc(sizeof *(ring), flags);
- xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring)
return NULL;
@@ -191,9 +180,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
/* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
cpu_to_le32(LINK_TOGGLE);
- xhci_dbg(xhci, "Wrote link toggle flag to"
- " segment %p (virtual), 0x%llx (DMA)\n",
- prev, (unsigned long long)prev->dma);
}
xhci_initialize_ring_info(ring);
return ring;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9f1d4b15d818..b90e1386418b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -155,10 +155,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
@@ -231,10 +227,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
}
ring->enq_seg = ring->enq_seg->next;
@@ -560,12 +552,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
- xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
- "in seg %p (0x%llx dma)\n",
- cur_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
- cur_seg,
- (unsigned long long)cur_seg->dma);
+ xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
+ (unsigned long long)
+ xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
if (cur_trb == cur_td->last_trb)
break;
@@ -705,9 +694,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
- xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
- cur_td->first_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
+ (unsigned long long)xhci_trb_virt_to_dma(
+ cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking
@@ -1627,7 +1616,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
@@ -1643,7 +1631,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
break;
case COMP_SHORT_TX:
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
@@ -1946,6 +1933,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -1959,6 +1956,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -1985,7 +1992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
case COMP_STALL:
- xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
@@ -1995,11 +2002,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_SPLIT_ERR:
case COMP_TX_ERR:
- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
case COMP_BABBLE:
- xhci_warn(xhci, "WARN: babble error on endpoint\n");
+ xhci_dbg(xhci, "Babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR:
@@ -2390,17 +2397,7 @@ hw_died:
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
- irqreturn_t ret;
- struct xhci_hcd *xhci;
-
- xhci = hcd_to_xhci(hcd);
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (xhci->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
-
- ret = xhci_irq(hcd);
-
- return ret;
+ return xhci_irq(hcd);
}
/**** Endpoint Ring Operations ****/
@@ -2488,11 +2485,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt()) {
- xhci_dbg(xhci, "queue_trb: Toggle cycle "
- "state for ring %p = %i\n",
- ring, (unsigned int)ring->cycle_state);
- }
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
@@ -2561,13 +2553,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
struct scatterlist *sg;
sg = NULL;
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
temp = urb->transfer_buffer_length;
- xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
for_each_sg(urb->sg, sg, num_sgs, i) {
- unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
/* Scatter gather list entries may cross 64KB boundaries */
@@ -2582,22 +2572,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
- xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
- i, (unsigned long long)sg_dma_address(sg),
- len, len, num_trbs - previous_total_trbs);
-
len = min_t(int, len, temp);
temp -= len;
if (temp == 0)
break;
}
- xhci_dbg(xhci, "\n");
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
- "num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- num_trbs);
return num_trbs;
}
@@ -2745,7 +2724,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
num_trbs = count_sg_trbs_needed(xhci, urb);
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
@@ -2783,8 +2762,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
- xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
- trb_buff_len);
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
@@ -2816,11 +2793,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
- xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
- "64KB boundary at %#x, end dma = %#x\n",
- (unsigned int) addr, trb_buff_len, trb_buff_len,
- (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
- (unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
@@ -2926,15 +2898,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
- "addr = %#llx, num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_trbs);
-
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
@@ -3055,9 +3018,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!urb->setup_packet)
return -EINVAL;
- if (!in_interrupt())
- xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
- slot_id, ep_index);
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
@@ -3249,15 +3209,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
}
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
- " addr = %#llx, num_tds = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_tds);
-
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index aa94c0195791..6bbe3c3a7111 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -200,14 +200,14 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
ret = pci_enable_msi(pdev);
if (ret) {
- xhci_err(xhci, "failed to allocate MSI entry\n");
+ xhci_dbg(xhci, "failed to allocate MSI entry\n");
return ret;
}
ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
- xhci_err(xhci, "disable MSI interrupt\n");
+ xhci_dbg(xhci, "disable MSI interrupt\n");
pci_disable_msi(pdev);
}
@@ -270,7 +270,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
- xhci_err(xhci, "Failed to enable MSI-X\n");
+ xhci_dbg(xhci, "Failed to enable MSI-X\n");
goto free_entries;
}
@@ -286,7 +286,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
return ret;
disable_msix:
- xhci_err(xhci, "disable MSI-X interrupt\n");
+ xhci_dbg(xhci, "disable MSI-X interrupt\n");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
@@ -1330,9 +1333,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Cancel URB %p\n", urb);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -1341,12 +1341,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Endpoint ring:\n");
- xhci_debug_ring(xhci, ep_ring);
-
urb_priv = urb->hcpriv;
-
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ i = urb_priv->td_cnt;
+ if (i < urb_priv->length)
+ xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
+ "starting at offset 0x%llx\n",
+ urb, urb->dev->devpath,
+ urb->ep->desc.bEndpointAddress,
+ (unsigned long long) xhci_trb_virt_to_dma(
+ urb_priv->td[i]->start_seg,
+ urb_priv->td[i]->first_trb));
+
+ for (; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
}
@@ -1617,6 +1623,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
+ case COMP_2ND_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
@@ -2793,8 +2800,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
if (ret < 0)
return ret;
- max_streams = USB_SS_MAX_STREAMS(
- eps[i]->ss_ep_comp.bmAttributes);
+ max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (max_streams < (*num_streams - 1)) {
xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
eps[i]->desc.bEndpointAddress,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c8fbd2772ea..fb99c8379142 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1033,7 +1033,6 @@ struct xhci_transfer_event {
/* Invalid Stream ID Error */
#define COMP_STRID_ERR 34
/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
-/* FIXME - check for this */
#define COMP_2ND_BW_ERR 35
/* Split Transaction Error */
#define COMP_SPLIT_ERR 36
@@ -1356,7 +1355,7 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
return 1;
}
-/* There is one ehci_hci structure per controller */
+/* There is one xhci_hcd structure per controller */
struct xhci_hcd {
struct usb_hcd *main_hcd;
struct usb_hcd *shared_hcd;
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 2dbe600fbc11..a4a3c7cd4a11 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -53,7 +53,7 @@ MODULE_AUTHOR("Tony Olech");
MODULE_DESCRIPTION("FTDI ELAN driver");
MODULE_LICENSE("GPL");
#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 7676b5b7e171..4fd0dc835ae5 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -62,7 +62,7 @@ MODULE_LICENSE("GPL");
/* Module parameters */
static DEFINE_MUTEX(iowarrior_mutex);
-static int debug = 0;
+static bool debug = 0;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "debug=1 enables debugging messages");
@@ -734,7 +734,7 @@ static const struct file_operations iowarrior_fops = {
.llseek = noop_llseek,
};
-static char *iowarrior_devnode(struct device *dev, mode_t *mode)
+static char *iowarrior_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index 1dc7e9581cc6..1c61830e96f9 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf,
ptr = firmware->data;
+ buf[0] = 0x01;
if (usb_control_msg
- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR
"Failed to initialise isight firmware loader\n");
@@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf,
}
}
+ buf[0] = 0x00;
if (usb_control_msg
- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR "isight firmware loading completion failed\n");
ret = -ENODEV;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 16937da31cd8..575222042767 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -269,7 +269,7 @@ static const struct file_operations tower_fops = {
.llseek = tower_llseek,
};
-static char *legousbtower_devnode(struct device *dev, mode_t *mode)
+static char *legousbtower_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 4af56fbc3c06..12d03e7ad636 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -31,6 +31,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR },
{ USB_DEVICE(0x1d34, 0x0004),
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+ { USB_DEVICE(0x1d34, 0x000a),
+ .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index bd6d00802eab..959145baf3cf 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1765,7 +1765,6 @@ static int test_unaligned_bulk(
* off just killing the userspace task and waiting for it to exit.
*/
-/* No BKL needed */
static int
usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
{
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 07a03460a598..f70cab3beeec 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -5,14 +5,13 @@
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
depends on USB && USB_GADGET
- depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
select USB_GADGET_DUALSPEED
- tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -31,9 +30,10 @@ config USB_MUSB_HDRC
To compile this driver as a module, choose M here; the
module will be called "musb-hdrc".
+if USB_MUSB_HDRC
+
choice
prompt "Platform Glue Layer"
- depends on USB_MUSB_HDRC
config USB_MUSB_DAVINCI
tristate "DaVinci"
@@ -45,7 +45,6 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
- depends on ARCH_OMAP
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
@@ -65,46 +64,54 @@ config USB_MUSB_UX500
endchoice
-config MUSB_PIO_ONLY
- bool 'Disable DMA (always use PIO)'
- depends on USB_MUSB_HDRC
- default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
+choice
+ prompt 'MUSB DMA mode'
+ default USB_UX500_DMA if USB_MUSB_UX500
+ default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
+ default USB_TUSB_OMAP_DMA if USB_MUSB_TUSB6010
+ default MUSB_PIO_ONLY if USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
help
- All data is copied between memory and FIFO by the CPU.
- DMA controllers are ignored.
-
- Do not select 'n' here unless DMA support for your SOC or board
- is unavailable (or unstable). When DMA is enabled at compile time,
- you can still disable it at run time using the "use_dma=n" module
- parameter.
+ Unfortunately, only one option can be enabled here. Ideally one
+ should be able to build all these drivers into one kernel to
+ allow using DMA on multiplatform kernels.
config USB_UX500_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_UX500
+ bool 'ST Ericsson U8500 and U5500'
+ depends on USB_MUSB_UX500
help
Enable DMA transfers on UX500 platforms.
config USB_INVENTRA_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ bool 'Inventra'
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
help
Enable DMA transfers using Mentor's engine.
config USB_TI_CPPI_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_DAVINCI
+ bool 'TI CPPI (Davinci)'
+ depends on USB_MUSB_DAVINCI
help
Enable DMA transfers when TI CPPI DMA is available.
config USB_TUSB_OMAP_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ bool 'TUSB 6010'
depends on USB_MUSB_TUSB6010
depends on ARCH_OMAP
- default y
help
Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+endchoice
+
+endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index d8fd9d092dec..88bfb9dee4bf 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,25 +24,7 @@ obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
# PIO only, or DMA (several potential schemes).
# though PIO is always there to back up DMA, and for ep0
-ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
-
- ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
- musb_hdrc-y += musbhsdma.o
-
- else
- ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
- musb_hdrc-y += cppi_dma.o
-
- else
- ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
- musb_hdrc-y += tusb6010_omap.o
-
- else
- ifeq ($(CONFIG_USB_UX500_DMA),y)
- musb_hdrc-y += ux500_dma.o
-
- endif
- endif
- endif
- endif
-endif
+musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o
+musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o
+musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 318fb4e8a885..66bc376005d2 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -513,7 +513,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
- /* flush writebufer */
+ /* flush writebuffer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
@@ -750,7 +750,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
* So this module parameter lets the heuristic be disabled. When using
* gadgetfs, the heuristic will probably need to be disabled.
*/
-static int cppi_rx_rndis = 1;
+static bool cppi_rx_rndis = 1;
module_param(cppi_rx_rndis, bool, 0);
MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c1fa12ec7a9a..56cf0243979e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -661,7 +661,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
handled = IRQ_HANDLED;
musb->is_active = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
musb->ep0_stage = MUSB_EP0_START;
@@ -1432,7 +1431,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
struct musb_hw_ep *hw_ep = musb->endpoints + i;
hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync_va =
@@ -1587,7 +1586,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
EXPORT_SYMBOL_GPL(musb_interrupt);
#ifndef CONFIG_MUSB_PIO_ONLY
-static int __initdata use_dma = 1;
+static bool __initdata use_dma = 1;
/* "modprobe ... use_dma=0" etc */
module_param(use_dma, bool, 0);
@@ -1631,6 +1630,7 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
}
}
}
+EXPORT_SYMBOL_GPL(musb_dma_completion);
#else
#define use_dma 0
@@ -2012,8 +2012,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
- pm_runtime_put(musb->controller);
-
status = musb_init_debugfs(musb);
if (status < 0)
goto fail4;
@@ -2158,6 +2156,7 @@ static void musb_save_context(struct musb *musb)
if (!epio)
continue;
+ musb_writeb(musb_base, MUSB_INDEX, i);
musb->context.index_regs[i].txmaxp =
musb_readw(epio, MUSB_TXMAXP);
musb->context.index_regs[i].txcsr =
@@ -2233,6 +2232,7 @@ static void musb_restore_context(struct musb *musb)
if (!epio)
continue;
+ musb_writeb(musb_base, MUSB_INDEX, i);
musb_writew(epio, MUSB_TXMAXP,
musb->context.index_regs[i].txmaxp);
musb_writew(epio, MUSB_TXCSR,
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
*/
}
- musb_save_context(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
-
- musb_restore_context(musb);
-
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b3c065ab9dbc..3d28fb8a2dc9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -40,7 +40,6 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/timer.h>
-#include <linux/clk.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -311,6 +310,7 @@ struct musb_context_registers {
u8 index, testmode;
u8 devctl, busctl, misc;
+ u32 otg_interfsel;
struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
};
@@ -327,6 +327,7 @@ struct musb {
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+ struct work_struct otg_notifier_work;
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -372,6 +373,7 @@ struct musb {
u16 int_tx;
struct otg_transceiver *xceiv;
+ u8 xceiv_event;
int nIrq;
unsigned irq_wake:1;
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 742eada5002e..27ba8f799462 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -43,8 +43,8 @@
#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
#ifdef CONFIG_DEBUG_FS
-extern int musb_init_debugfs(struct musb *musb);
-extern void musb_exit_debugfs(struct musb *musb);
+int musb_init_debugfs(struct musb *musb);
+void musb_exit_debugfs(struct musb *musb);
#else
static inline int musb_init_debugfs(struct musb *musb)
{
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 61f4ee466df7..13d9af9bf920 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -33,11 +33,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -46,10 +42,6 @@
#include "musb_core.h"
#include "musb_debug.h"
-#ifdef CONFIG_ARCH_DAVINCI
-#include "davinci.h"
-#endif
-
struct musb_register_map {
char *name;
unsigned offset;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d51043acfe1a..ac3d2eec20fe 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -40,8 +40,6 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/stat.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -1844,7 +1842,7 @@ int __init musb_gadget_setup(struct musb *musb)
*/
musb->g.ops = &musb_gadget_operations;
- musb->g.is_dualspeed = 1;
+ musb->g.max_speed = USB_SPEED_HIGH;
musb->g.speed = USB_SPEED_UNKNOWN;
/* this "gadget" abstracts/virtualizes the controller */
@@ -1903,7 +1901,7 @@ static int musb_gadget_start(struct usb_gadget *g,
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed != USB_SPEED_HIGH)
+ if (driver->max_speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 6a0d0467ec74..e40d7647caf1 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -37,7 +37,6 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 60ddba8066ea..79cb0af779fa 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
+ else if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
else
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 03c6ccdbb3be..e61aa95f2d2a 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -74,7 +74,7 @@ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
{ __raw_writel(data, addr + offset); }
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ba85f273e487..c27bbbf32b52 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -29,7 +29,6 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -228,21 +227,25 @@ static int musb_otg_notifications(struct notifier_block *nb,
unsigned long event, void *unused)
{
struct musb *musb = container_of(nb, struct musb, nb);
+
+ musb->xceiv_event = event;
+ schedule_work(&musb->otg_notifier_work);
+
+ return 0;
+}
+
+static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
+{
+ struct musb *musb = container_of(data_notifier_work, struct musb, otg_notifier_work);
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *pdata = dev->platform_data;
struct omap_musb_board_data *data = pdata->board_data;
- switch (event) {
+ switch (musb->xceiv_event) {
case USB_EVENT_ID:
dev_dbg(musb->controller, "ID GND\n");
- if (is_otg_enabled(musb)) {
- if (musb->gadget_driver) {
- pm_runtime_get_sync(musb->controller);
- otg_init(musb->xceiv);
- omap2430_musb_set_vbus(musb, 1);
- }
- } else {
+ if (!is_otg_enabled(musb) || musb->gadget_driver) {
pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
omap2430_musb_set_vbus(musb, 1);
@@ -274,10 +277,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
break;
default:
dev_dbg(musb->controller, "ID float\n");
- return NOTIFY_DONE;
}
-
- return NOTIFY_OK;
}
static int omap2430_musb_init(struct musb *musb)
@@ -297,6 +297,8 @@ static int omap2430_musb_init(struct musb *musb)
return -ENODEV;
}
+ INIT_WORK(&musb->otg_notifier_work, musb_otg_notifier_work);
+
status = pm_runtime_get_sync(dev);
if (status < 0) {
dev_err(dev, "pm_runtime_get_sync FAILED");
@@ -334,7 +336,6 @@ static int omap2430_musb_init(struct musb *musb)
return 0;
err1:
- pm_runtime_disable(dev);
return status;
}
@@ -350,20 +351,19 @@ static void omap2430_musb_enable(struct musb *musb)
case USB_EVENT_ID:
otg_init(musb->xceiv);
- if (data->interface_type == MUSB_INTERFACE_UTMI) {
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_err(musb->controller,
- "configured as A device timeout");
- break;
- }
+ if (data->interface_type != MUSB_INTERFACE_UTMI)
+ break;
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+ MUSB_DEVCTL_BDEVICE) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "configured as A device timeout");
+ break;
}
}
break;
@@ -478,7 +478,6 @@ static int __exit omap2430_remove(struct platform_device *pdev)
platform_device_del(glue->musb);
platform_device_put(glue->musb);
pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
kfree(glue);
return 0;
@@ -491,6 +490,9 @@ static int omap2430_runtime_suspend(struct device *dev)
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
omap2430_low_level_exit(musb);
otg_set_suspend(musb->xceiv, 1);
@@ -503,6 +505,9 @@ static int omap2430_runtime_resume(struct device *dev)
struct musb *musb = glue_to_musb(glue);
omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
+
otg_set_suspend(musb->xceiv, 0);
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index ec1480191f78..1f405616e6cd 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -56,6 +56,7 @@ u8 tusb_get_revision(struct musb *musb)
return rev;
}
+EXPORT_SYMBOL_GPL(tusb_get_revision);
static int tusb_print_revision(struct musb *musb)
{
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index ef4333f4bbe0..97cb45916c43 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -37,7 +37,6 @@ struct ux500_dma_channel {
struct dma_channel channel;
struct ux500_dma_controller *controller;
struct musb_hw_ep *hw_ep;
- struct work_struct channel_work;
struct dma_chan *dma_chan;
unsigned int cur_len;
dma_cookie_t cookie;
@@ -56,31 +55,11 @@ struct ux500_dma_controller {
dma_addr_t phy_base;
};
-/* Work function invoked from DMA callback to handle tx transfers. */
-static void ux500_tx_work(struct work_struct *data)
-{
- struct ux500_dma_channel *ux500_channel = container_of(data,
- struct ux500_dma_channel, channel_work);
- struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
- struct musb *musb = hw_ep->musb;
- unsigned long flags;
-
- dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
- hw_ep->epnum);
-
- spin_lock_irqsave(&musb->lock, flags);
- ux500_channel->channel.actual_len = ux500_channel->cur_len;
- ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
- musb_dma_completion(musb, hw_ep->epnum,
- ux500_channel->is_tx);
- spin_unlock_irqrestore(&musb->lock, flags);
-}
-
/* Work function invoked from DMA callback to handle rx transfers. */
-static void ux500_rx_work(struct work_struct *data)
+void ux500_dma_callback(void *private_data)
{
- struct ux500_dma_channel *ux500_channel = container_of(data,
- struct ux500_dma_channel, channel_work);
+ struct dma_channel *channel = private_data;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct musb *musb = hw_ep->musb;
unsigned long flags;
@@ -94,14 +73,7 @@ static void ux500_rx_work(struct work_struct *data)
musb_dma_completion(musb, hw_ep->epnum,
ux500_channel->is_tx);
spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-void ux500_dma_callback(void *private_data)
-{
- struct dma_channel *channel = (struct dma_channel *)private_data;
- struct ux500_dma_channel *ux500_channel = channel->private_data;
- schedule_work(&ux500_channel->channel_work);
}
static bool ux500_configure_channel(struct dma_channel *channel,
@@ -112,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct dma_chan *dma_chan = ux500_channel->dma_chan;
struct dma_async_tx_descriptor *dma_desc;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
struct scatterlist sg;
struct dma_slave_config slave_conf;
enum dma_slave_buswidth addr_width;
@@ -132,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
sg_dma_address(&sg) = dma_addr;
sg_dma_len(&sg) = len;
- direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -330,7 +302,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
void **param_array;
struct ux500_dma_channel *channel_array;
u32 ch_count;
- void (*musb_channel_work)(struct work_struct *);
dma_cap_mask_t mask;
if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) ||
@@ -347,7 +318,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
channel_array = controller->rx_channel;
ch_count = data->num_rx_channels;
param_array = data->dma_rx_param_array;
- musb_channel_work = ux500_rx_work;
for (dir = 0; dir < 2; dir++) {
for (ch_num = 0; ch_num < ch_count; ch_num++) {
@@ -374,15 +344,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
return -EBUSY;
}
- INIT_WORK(&ux500_channel->channel_work,
- musb_channel_work);
}
/* Prepare the loop for TX channels */
channel_array = controller->tx_channel;
ch_count = data->num_tx_channels;
param_array = data->dma_tx_param_array;
- musb_channel_work = ux500_tx_work;
is_tx = 1;
}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index c66481ad98d7..2a25955881fc 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -82,9 +82,9 @@ config NOP_USB_XCEIV
tristate "NOP USB Transceiver Driver"
select USB_OTG_UTILS
help
- this driver is to be used by all the usb transceiver which are either
- built-in with usb ip or which are autonomous and doesn't require any
- phy programming such as ISP1x04 etc.
+ This driver is to be used by all the usb transceiver which are either
+ built-in with usb ip or which are autonomous and doesn't require any
+ phy programming such as ISP1x04 etc.
config USB_LANGWELL_OTG
tristate "Intel Langwell USB OTG dual-role support"
@@ -114,13 +114,13 @@ config USB_MSM_OTG
has an external PHY.
config AB8500_USB
- tristate "AB8500 USB Transceiver Driver"
- depends on AB8500_CORE
- select USB_OTG_UTILS
- help
- Enable this to support the USB OTG transceiver in AB8500 chip.
- This transceiver supports high and full speed devices plus,
- in host mode, low speed.
+ tristate "AB8500 USB Transceiver Driver"
+ depends on AB8500_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver in AB8500 chip.
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
@@ -130,4 +130,16 @@ config FSL_USB2_OTG
help
Enable this to support Freescale USB OTG transceiver.
+config USB_MV_OTG
+ tristate "Marvell USB OTG support"
+ depends on USB_MV_UDC
+ select USB_OTG
+ select USB_OTG_UTILS
+ help
+ Say Y here if you want to build Marvell USB OTG transciever
+ driver in kernel (including PXA and MMP series). This driver
+ implements role switch between EHCI host driver and gadget driver.
+
+ To compile this driver as a module, choose M here.
+
endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 566655c53331..b2c5a9598637 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o
obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o
+obj-$(CONFIG_USB_MV_OTG) += mv_otg.o
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
index 07ccea9ada40..74fe6e62e0f7 100644
--- a/drivers/usb/otg/ab8500-usb.c
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -30,7 +30,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#define AB8500_MAIN_WD_CTRL_REG 0x01
#define AB8500_USB_LINE_STAT_REG 0x80
diff --git a/drivers/usb/otg/fsl_otg.c b/drivers/usb/otg/fsl_otg.c
index 0f420b25e9a9..a190850d2d3b 100644
--- a/drivers/usb/otg/fsl_otg.c
+++ b/drivers/usb/otg/fsl_otg.c
@@ -639,7 +639,7 @@ static int fsl_otg_set_power(struct otg_transceiver *otg_p, unsigned mA)
* Delayed pin detect interrupt processing.
*
* When the Mini-A cable is disconnected from the board,
- * the pin-detect interrupt happens before the disconnnect
+ * the pin-detect interrupt happens before the disconnect
* interrupts for the connected device(s). In order to
* process the disconnect interrupt(s) prior to switching
* roles, the pin-detect interrupts are delayed, and handled
@@ -1151,18 +1151,7 @@ struct platform_driver fsl_otg_driver = {
},
};
-static int __init fsl_usb_otg_init(void)
-{
- pr_info(DRIVER_INFO "\n");
- return platform_driver_register(&fsl_otg_driver);
-}
-module_init(fsl_usb_otg_init);
-
-static void __exit fsl_usb_otg_exit(void)
-{
- platform_driver_unregister(&fsl_otg_driver);
-}
-module_exit(fsl_usb_otg_exit);
+module_platform_driver(fsl_otg_driver);
MODULE_DESCRIPTION(DRIVER_INFO);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/otg/mv_otg.c b/drivers/usb/otg/mv_otg.c
new file mode 100644
index 000000000000..db0d4fcdc8e2
--- /dev/null
+++ b/drivers/usb/otg/mv_otg.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+#include <linux/clk.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_data/mv_usb.h>
+
+#include "mv_otg.h"
+
+#define DRIVER_DESC "Marvell USB OTG transceiver driver"
+#define DRIVER_VERSION "Jan 20, 2010"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "mv-otg";
+
+static char *state_string[] = {
+ "undefined",
+ "b_idle",
+ "b_srp_init",
+ "b_peripheral",
+ "b_wait_acon",
+ "b_host",
+ "a_idle",
+ "a_wait_vrise",
+ "a_wait_bcon",
+ "a_host",
+ "a_suspend",
+ "a_peripheral",
+ "a_wait_vfall",
+ "a_vbus_err"
+};
+
+static int mv_otg_set_vbus(struct otg_transceiver *otg, bool on)
+{
+ struct mv_otg *mvotg = container_of(otg, struct mv_otg, otg);
+ if (mvotg->pdata->set_vbus == NULL)
+ return -ENODEV;
+
+ return mvotg->pdata->set_vbus(on);
+}
+
+static int mv_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ otg->host = host;
+
+ return 0;
+}
+
+static int mv_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ otg->gadget = gadget;
+
+ return 0;
+}
+
+static void mv_otg_run_state_machine(struct mv_otg *mvotg,
+ unsigned long delay)
+{
+ dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
+ if (!mvotg->qwork)
+ return;
+
+ queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
+}
+
+static void mv_otg_timer_await_bcon(unsigned long data)
+{
+ struct mv_otg *mvotg = (struct mv_otg *) data;
+
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
+
+ dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+}
+
+static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
+{
+ struct timer_list *timer;
+
+ if (id >= OTG_TIMER_NUM)
+ return -EINVAL;
+
+ timer = &mvotg->otg_ctrl.timer[id];
+
+ if (timer_pending(timer))
+ del_timer(timer);
+
+ return 0;
+}
+
+static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
+ unsigned long interval,
+ void (*callback) (unsigned long))
+{
+ struct timer_list *timer;
+
+ if (id >= OTG_TIMER_NUM)
+ return -EINVAL;
+
+ timer = &mvotg->otg_ctrl.timer[id];
+ if (timer_pending(timer)) {
+ dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
+ return -EBUSY;
+ }
+
+ init_timer(timer);
+ timer->data = (unsigned long) mvotg;
+ timer->function = callback;
+ timer->expires = jiffies + interval;
+ add_timer(timer);
+
+ return 0;
+}
+
+static int mv_otg_reset(struct mv_otg *mvotg)
+{
+ unsigned int loops;
+ u32 tmp;
+
+ /* Stop the controller */
+ tmp = readl(&mvotg->op_regs->usbcmd);
+ tmp &= ~USBCMD_RUN_STOP;
+ writel(tmp, &mvotg->op_regs->usbcmd);
+
+ /* Reset the controller to get default values */
+ writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
+
+ loops = 500;
+ while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+ if (loops == 0) {
+ dev_err(&mvotg->pdev->dev,
+ "Wait for RESET completed TIMEOUT\n");
+ return -ETIMEDOUT;
+ }
+ loops--;
+ udelay(20);
+ }
+
+ writel(0x0, &mvotg->op_regs->usbintr);
+ tmp = readl(&mvotg->op_regs->usbsts);
+ writel(tmp, &mvotg->op_regs->usbsts);
+
+ return 0;
+}
+
+static void mv_otg_init_irq(struct mv_otg *mvotg)
+{
+ u32 otgsc;
+
+ mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID
+ | OTGSC_INTR_A_VBUS_VALID;
+ mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID
+ | OTGSC_INTSTS_A_VBUS_VALID;
+
+ if (mvotg->pdata->vbus == NULL) {
+ mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID
+ | OTGSC_INTR_B_SESSION_END;
+ mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID
+ | OTGSC_INTSTS_B_SESSION_END;
+ }
+
+ if (mvotg->pdata->id == NULL) {
+ mvotg->irq_en |= OTGSC_INTR_USB_ID;
+ mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
+ }
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ otgsc |= mvotg->irq_en;
+ writel(otgsc, &mvotg->op_regs->otgsc);
+}
+
+static void mv_otg_start_host(struct mv_otg *mvotg, int on)
+{
+ struct otg_transceiver *otg = &mvotg->otg;
+ struct usb_hcd *hcd;
+
+ if (!otg->host)
+ return;
+
+ dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
+
+ hcd = bus_to_hcd(otg->host);
+
+ if (on)
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ else
+ usb_remove_hcd(hcd);
+}
+
+static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
+{
+ struct otg_transceiver *otg = &mvotg->otg;
+
+ if (!otg->gadget)
+ return;
+
+ dev_info(otg->dev, "gadget %s\n", on ? "on" : "off");
+
+ if (on)
+ usb_gadget_vbus_connect(otg->gadget);
+ else
+ usb_gadget_vbus_disconnect(otg->gadget);
+}
+
+static void otg_clock_enable(struct mv_otg *mvotg)
+{
+ unsigned int i;
+
+ for (i = 0; i < mvotg->clknum; i++)
+ clk_enable(mvotg->clk[i]);
+}
+
+static void otg_clock_disable(struct mv_otg *mvotg)
+{
+ unsigned int i;
+
+ for (i = 0; i < mvotg->clknum; i++)
+ clk_disable(mvotg->clk[i]);
+}
+
+static int mv_otg_enable_internal(struct mv_otg *mvotg)
+{
+ int retval = 0;
+
+ if (mvotg->active)
+ return 0;
+
+ dev_dbg(&mvotg->pdev->dev, "otg enabled\n");
+
+ otg_clock_enable(mvotg);
+ if (mvotg->pdata->phy_init) {
+ retval = mvotg->pdata->phy_init(mvotg->phy_regs);
+ if (retval) {
+ dev_err(&mvotg->pdev->dev,
+ "init phy error %d\n", retval);
+ otg_clock_disable(mvotg);
+ return retval;
+ }
+ }
+ mvotg->active = 1;
+
+ return 0;
+
+}
+
+static int mv_otg_enable(struct mv_otg *mvotg)
+{
+ if (mvotg->clock_gating)
+ return mv_otg_enable_internal(mvotg);
+
+ return 0;
+}
+
+static void mv_otg_disable_internal(struct mv_otg *mvotg)
+{
+ if (mvotg->active) {
+ dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
+ if (mvotg->pdata->phy_deinit)
+ mvotg->pdata->phy_deinit(mvotg->phy_regs);
+ otg_clock_disable(mvotg);
+ mvotg->active = 0;
+ }
+}
+
+static void mv_otg_disable(struct mv_otg *mvotg)
+{
+ if (mvotg->clock_gating)
+ mv_otg_disable_internal(mvotg);
+}
+
+static void mv_otg_update_inputs(struct mv_otg *mvotg)
+{
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
+ u32 otgsc;
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+
+ if (mvotg->pdata->vbus) {
+ if (mvotg->pdata->vbus->poll() == VBUS_HIGH) {
+ otg_ctrl->b_sess_vld = 1;
+ otg_ctrl->b_sess_end = 0;
+ } else {
+ otg_ctrl->b_sess_vld = 0;
+ otg_ctrl->b_sess_end = 1;
+ }
+ } else {
+ otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID);
+ otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END);
+ }
+
+ if (mvotg->pdata->id)
+ otg_ctrl->id = !!mvotg->pdata->id->poll();
+ else
+ otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID);
+
+ if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id)
+ otg_ctrl->a_bus_req = 1;
+
+ otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID);
+ otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID);
+
+ dev_dbg(&mvotg->pdev->dev, "%s: ", __func__);
+ dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
+ dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
+ dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end);
+ dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
+ dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld);
+}
+
+static void mv_otg_update_state(struct mv_otg *mvotg)
+{
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
+ struct otg_transceiver *otg = &mvotg->otg;
+ int old_state = otg->state;
+
+ switch (old_state) {
+ case OTG_STATE_UNDEFINED:
+ otg->state = OTG_STATE_B_IDLE;
+ /* FALL THROUGH */
+ case OTG_STATE_B_IDLE:
+ if (otg_ctrl->id == 0)
+ otg->state = OTG_STATE_A_IDLE;
+ else if (otg_ctrl->b_sess_vld)
+ otg->state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
+ otg->state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_A_IDLE:
+ if (otg_ctrl->id)
+ otg->state = OTG_STATE_B_IDLE;
+ else if (!(otg_ctrl->a_bus_drop) &&
+ (otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
+ otg->state = OTG_STATE_A_WAIT_VRISE;
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ if (otg_ctrl->a_vbus_vld)
+ otg->state = OTG_STATE_A_WAIT_BCON;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (otg_ctrl->id || otg_ctrl->a_bus_drop
+ || otg_ctrl->a_wait_bcon_timeout) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_WAIT_VFALL;
+ otg_ctrl->a_bus_req = 0;
+ } else if (!otg_ctrl->a_vbus_vld) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_VBUS_ERR;
+ } else if (otg_ctrl->b_conn) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_HOST;
+ }
+ break;
+ case OTG_STATE_A_HOST:
+ if (otg_ctrl->id || !otg_ctrl->b_conn
+ || otg_ctrl->a_bus_drop)
+ otg->state = OTG_STATE_A_WAIT_BCON;
+ else if (!otg_ctrl->a_vbus_vld)
+ otg->state = OTG_STATE_A_VBUS_ERR;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (otg_ctrl->id
+ || (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
+ || otg_ctrl->a_bus_req)
+ otg->state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ if (otg_ctrl->id || otg_ctrl->a_clr_err
+ || otg_ctrl->a_bus_drop) {
+ otg_ctrl->a_clr_err = 0;
+ otg->state = OTG_STATE_A_WAIT_VFALL;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void mv_otg_work(struct work_struct *work)
+{
+ struct mv_otg *mvotg;
+ struct otg_transceiver *otg;
+ int old_state;
+
+ mvotg = container_of((struct delayed_work *)work, struct mv_otg, work);
+
+run:
+ /* work queue is single thread, or we need spin_lock to protect */
+ otg = &mvotg->otg;
+ old_state = otg->state;
+
+ if (!mvotg->active)
+ return;
+
+ mv_otg_update_inputs(mvotg);
+ mv_otg_update_state(mvotg);
+
+ if (old_state != otg->state) {
+ dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
+ state_string[old_state],
+ state_string[otg->state]);
+
+ switch (otg->state) {
+ case OTG_STATE_B_IDLE:
+ mvotg->otg.default_a = 0;
+ if (old_state == OTG_STATE_B_PERIPHERAL)
+ mv_otg_start_periphrals(mvotg, 0);
+ mv_otg_reset(mvotg);
+ mv_otg_disable(mvotg);
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ mv_otg_enable(mvotg);
+ mv_otg_start_periphrals(mvotg, 1);
+ break;
+ case OTG_STATE_A_IDLE:
+ mvotg->otg.default_a = 1;
+ mv_otg_enable(mvotg);
+ if (old_state == OTG_STATE_A_WAIT_VFALL)
+ mv_otg_start_host(mvotg, 0);
+ mv_otg_reset(mvotg);
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ mv_otg_set_vbus(&mvotg->otg, 1);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (old_state != OTG_STATE_A_HOST)
+ mv_otg_start_host(mvotg, 1);
+ mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
+ T_A_WAIT_BCON,
+ mv_otg_timer_await_bcon);
+ /*
+ * Now, we directly enter A_HOST. So set b_conn = 1
+ * here. In fact, it need host driver to notify us.
+ */
+ mvotg->otg_ctrl.b_conn = 1;
+ break;
+ case OTG_STATE_A_HOST:
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /*
+ * Now, we has exited A_HOST. So set b_conn = 0
+ * here. In fact, it need host driver to notify us.
+ */
+ mvotg->otg_ctrl.b_conn = 0;
+ mv_otg_set_vbus(&mvotg->otg, 0);
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ break;
+ default:
+ break;
+ }
+ goto run;
+ }
+}
+
+static irqreturn_t mv_otg_irq(int irq, void *dev)
+{
+ struct mv_otg *mvotg = dev;
+ u32 otgsc;
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ writel(otgsc, &mvotg->op_regs->otgsc);
+
+ /*
+ * if we have vbus, then the vbus detection for B-device
+ * will be done by mv_otg_inputs_irq().
+ */
+ if (mvotg->pdata->vbus)
+ if ((otgsc & OTGSC_STS_USB_ID) &&
+ !(otgsc & OTGSC_INTSTS_USB_ID))
+ return IRQ_NONE;
+
+ if ((otgsc & mvotg->irq_status) == 0)
+ return IRQ_NONE;
+
+ mv_otg_run_state_machine(mvotg, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
+{
+ struct mv_otg *mvotg = dev;
+
+ /* The clock may disabled at this time */
+ if (!mvotg->active) {
+ mv_otg_enable(mvotg);
+ mv_otg_init_irq(mvotg);
+ }
+
+ mv_otg_run_state_machine(mvotg, 0);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ mvotg->otg_ctrl.a_bus_req);
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+
+ if (count > 2)
+ return -1;
+
+ /* We will use this interface to change to A device */
+ if (mvotg->otg.state != OTG_STATE_B_IDLE
+ && mvotg->otg.state != OTG_STATE_A_IDLE)
+ return -1;
+
+ /* The clock may disabled and we need to set irq for ID detected */
+ mv_otg_enable(mvotg);
+ mv_otg_init_irq(mvotg);
+
+ if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_bus_req = 1;
+ mvotg->otg_ctrl.a_bus_drop = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_req = 1\n");
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req,
+ set_a_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ if (!mvotg->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_clr_err = 1;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_clr_err = 1\n");
+ }
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ mvotg->otg_ctrl.a_bus_drop);
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ if (!mvotg->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ mvotg->otg_ctrl.a_bus_drop = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_drop = 0\n");
+ } else if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_bus_drop = 1;
+ mvotg->otg_ctrl.a_bus_req = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_drop = 1\n");
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: and a_bus_req = 0\n");
+ }
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR,
+ get_a_bus_drop, set_a_bus_drop);
+
+static struct attribute *inputs_attrs[] = {
+ &dev_attr_a_bus_req.attr,
+ &dev_attr_a_clr_err.attr,
+ &dev_attr_a_bus_drop.attr,
+ NULL,
+};
+
+static struct attribute_group inputs_attr_group = {
+ .name = "inputs",
+ .attrs = inputs_attrs,
+};
+
+int mv_otg_remove(struct platform_device *pdev)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+ int clk_i;
+
+ sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
+
+ if (mvotg->irq)
+ free_irq(mvotg->irq, mvotg);
+
+ if (mvotg->pdata->vbus)
+ free_irq(mvotg->pdata->vbus->irq, mvotg);
+ if (mvotg->pdata->id)
+ free_irq(mvotg->pdata->id->irq, mvotg);
+
+ if (mvotg->qwork) {
+ flush_workqueue(mvotg->qwork);
+ destroy_workqueue(mvotg->qwork);
+ }
+
+ mv_otg_disable(mvotg);
+
+ if (mvotg->cap_regs)
+ iounmap(mvotg->cap_regs);
+
+ if (mvotg->phy_regs)
+ iounmap(mvotg->phy_regs);
+
+ for (clk_i = 0; clk_i <= mvotg->clknum; clk_i++)
+ clk_put(mvotg->clk[clk_i]);
+
+ otg_set_transceiver(NULL);
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(mvotg);
+
+ return 0;
+}
+
+static int mv_otg_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_otg *mvotg;
+ struct resource *r;
+ int retval = 0, clk_i, i;
+ size_t size;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "failed to get platform data\n");
+ return -ENODEV;
+ }
+
+ size = sizeof(*mvotg) + sizeof(struct clk *) * pdata->clknum;
+ mvotg = kzalloc(size, GFP_KERNEL);
+ if (!mvotg) {
+ dev_err(&pdev->dev, "failed to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mvotg);
+
+ mvotg->pdev = pdev;
+ mvotg->pdata = pdata;
+
+ mvotg->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < mvotg->clknum; clk_i++) {
+ mvotg->clk[clk_i] = clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(mvotg->clk[clk_i])) {
+ retval = PTR_ERR(mvotg->clk[clk_i]);
+ goto err_put_clk;
+ }
+ }
+
+ mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
+ if (!mvotg->qwork) {
+ dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
+ retval = -ENOMEM;
+ goto err_put_clk;
+ }
+
+ INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
+
+ /* OTG common part */
+ mvotg->pdev = pdev;
+ mvotg->otg.dev = &pdev->dev;
+ mvotg->otg.label = driver_name;
+ mvotg->otg.set_host = mv_otg_set_host;
+ mvotg->otg.set_peripheral = mv_otg_set_peripheral;
+ mvotg->otg.set_vbus = mv_otg_set_vbus;
+ mvotg->otg.state = OTG_STATE_UNDEFINED;
+
+ for (i = 0; i < OTG_TIMER_NUM; i++)
+ init_timer(&mvotg->otg_ctrl.timer[i]);
+
+ r = platform_get_resource_byname(mvotg->pdev,
+ IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_destroy_workqueue;
+ }
+
+ mvotg->phy_regs = ioremap(r->start, resource_size(r));
+ if (mvotg->phy_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ retval = -EFAULT;
+ goto err_destroy_workqueue;
+ }
+
+ r = platform_get_resource_byname(mvotg->pdev,
+ IORESOURCE_MEM, "capregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_unmap_phyreg;
+ }
+
+ mvotg->cap_regs = ioremap(r->start, resource_size(r));
+ if (mvotg->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ retval = -EFAULT;
+ goto err_unmap_phyreg;
+ }
+
+ /* we will acces controller register, so enable the udc controller */
+ retval = mv_otg_enable_internal(mvotg);
+ if (retval) {
+ dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
+ goto err_unmap_capreg;
+ }
+
+ mvotg->op_regs =
+ (struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs
+ + (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
+
+ if (pdata->id) {
+ retval = request_threaded_irq(pdata->id->irq, NULL,
+ mv_otg_inputs_irq,
+ IRQF_ONESHOT, "id", mvotg);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Failed to request irq for ID\n");
+ pdata->id = NULL;
+ }
+ }
+
+ if (pdata->vbus) {
+ mvotg->clock_gating = 1;
+ retval = request_threaded_irq(pdata->vbus->irq, NULL,
+ mv_otg_inputs_irq,
+ IRQF_ONESHOT, "vbus", mvotg);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Failed to request irq for VBUS, "
+ "disable clock gating\n");
+ mvotg->clock_gating = 0;
+ pdata->vbus = NULL;
+ }
+ }
+
+ if (pdata->disable_otg_clock_gating)
+ mvotg->clock_gating = 0;
+
+ mv_otg_reset(mvotg);
+ mv_otg_init_irq(mvotg);
+
+ r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ mvotg->irq = r->start;
+ if (request_irq(mvotg->irq, mv_otg_irq, IRQF_SHARED,
+ driver_name, mvotg)) {
+ dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
+ mvotg->irq);
+ mvotg->irq = 0;
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_transceiver(&mvotg->otg);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "can't register transceiver, %d\n",
+ retval);
+ goto err_free_irq;
+ }
+
+ retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
+ if (retval < 0) {
+ dev_dbg(&pdev->dev,
+ "Can't register sysfs attr group: %d\n", retval);
+ goto err_set_transceiver;
+ }
+
+ spin_lock_init(&mvotg->wq_lock);
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 2 * HZ);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ dev_info(&pdev->dev,
+ "successful probe OTG device %s clock gating.\n",
+ mvotg->clock_gating ? "with" : "without");
+
+ return 0;
+
+err_set_transceiver:
+ otg_set_transceiver(NULL);
+err_free_irq:
+ free_irq(mvotg->irq, mvotg);
+err_disable_clk:
+ if (pdata->vbus)
+ free_irq(pdata->vbus->irq, mvotg);
+ if (pdata->id)
+ free_irq(pdata->id->irq, mvotg);
+ mv_otg_disable_internal(mvotg);
+err_unmap_capreg:
+ iounmap(mvotg->cap_regs);
+err_unmap_phyreg:
+ iounmap(mvotg->phy_regs);
+err_destroy_workqueue:
+ flush_workqueue(mvotg->qwork);
+ destroy_workqueue(mvotg->qwork);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(mvotg->clk[clk_i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(mvotg);
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+
+ if (mvotg->otg.state != OTG_STATE_B_IDLE) {
+ dev_info(&pdev->dev,
+ "OTG state is not B_IDLE, it is %d!\n",
+ mvotg->otg.state);
+ return -EAGAIN;
+ }
+
+ if (!mvotg->clock_gating)
+ mv_otg_disable_internal(mvotg);
+
+ return 0;
+}
+
+static int mv_otg_resume(struct platform_device *pdev)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+ u32 otgsc;
+
+ if (!mvotg->clock_gating) {
+ mv_otg_enable_internal(mvotg);
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ otgsc |= mvotg->irq_en;
+ writel(otgsc, &mvotg->op_regs->otgsc);
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+ }
+ return 0;
+}
+#endif
+
+static struct platform_driver mv_otg_driver = {
+ .probe = mv_otg_probe,
+ .remove = __exit_p(mv_otg_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = driver_name,
+ },
+#ifdef CONFIG_PM
+ .suspend = mv_otg_suspend,
+ .resume = mv_otg_resume,
+#endif
+};
+
+static int __init mv_otg_init(void)
+{
+ return platform_driver_register(&mv_otg_driver);
+}
+
+static void __exit mv_otg_exit(void)
+{
+ platform_driver_unregister(&mv_otg_driver);
+}
+
+module_init(mv_otg_init);
+module_exit(mv_otg_exit);
diff --git a/drivers/usb/otg/mv_otg.h b/drivers/usb/otg/mv_otg.h
new file mode 100644
index 000000000000..be6ca1437645
--- /dev/null
+++ b/drivers/usb/otg/mv_otg.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_USB_OTG_CONTROLLER__
+#define __MV_USB_OTG_CONTROLLER__
+
+#include <linux/types.h>
+
+/* Command Register Bit Masks */
+#define USBCMD_RUN_STOP (0x00000001)
+#define USBCMD_CTRL_RESET (0x00000002)
+
+/* otgsc Register Bit Masks */
+#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
+#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
+#define OTGSC_CTRL_OTG_TERM 0x00000008
+#define OTGSC_CTRL_DATA_PULSING 0x00000010
+#define OTGSC_STS_USB_ID 0x00000100
+#define OTGSC_STS_A_VBUS_VALID 0x00000200
+#define OTGSC_STS_A_SESSION_VALID 0x00000400
+#define OTGSC_STS_B_SESSION_VALID 0x00000800
+#define OTGSC_STS_B_SESSION_END 0x00001000
+#define OTGSC_STS_1MS_TOGGLE 0x00002000
+#define OTGSC_STS_DATA_PULSING 0x00004000
+#define OTGSC_INTSTS_USB_ID 0x00010000
+#define OTGSC_INTSTS_A_VBUS_VALID 0x00020000
+#define OTGSC_INTSTS_A_SESSION_VALID 0x00040000
+#define OTGSC_INTSTS_B_SESSION_VALID 0x00080000
+#define OTGSC_INTSTS_B_SESSION_END 0x00100000
+#define OTGSC_INTSTS_1MS 0x00200000
+#define OTGSC_INTSTS_DATA_PULSING 0x00400000
+#define OTGSC_INTR_USB_ID 0x01000000
+#define OTGSC_INTR_A_VBUS_VALID 0x02000000
+#define OTGSC_INTR_A_SESSION_VALID 0x04000000
+#define OTGSC_INTR_B_SESSION_VALID 0x08000000
+#define OTGSC_INTR_B_SESSION_END 0x10000000
+#define OTGSC_INTR_1MS_TIMER 0x20000000
+#define OTGSC_INTR_DATA_PULSING 0x40000000
+
+#define CAPLENGTH_MASK (0xff)
+
+/* Timer's interval, unit 10ms */
+#define T_A_WAIT_VRISE 100
+#define T_A_WAIT_BCON 2000
+#define T_A_AIDL_BDIS 100
+#define T_A_BIDL_ADIS 20
+#define T_B_ASE0_BRST 400
+#define T_B_SE0_SRP 300
+#define T_B_SRP_FAIL 2000
+#define T_B_DATA_PLS 10
+#define T_B_SRP_INIT 100
+#define T_A_SRP_RSPNS 10
+#define T_A_DRV_RSM 5
+
+enum otg_function {
+ OTG_B_DEVICE = 0,
+ OTG_A_DEVICE
+};
+
+enum mv_otg_timer {
+ A_WAIT_BCON_TIMER = 0,
+ OTG_TIMER_NUM
+};
+
+/* PXA OTG state machine */
+struct mv_otg_ctrl {
+ /* internal variables */
+ u8 a_set_b_hnp_en; /* A-Device set b_hnp_en */
+ u8 b_srp_done;
+ u8 b_hnp_en;
+
+ /* OTG inputs */
+ u8 a_bus_drop;
+ u8 a_bus_req;
+ u8 a_clr_err;
+ u8 a_bus_resume;
+ u8 a_bus_suspend;
+ u8 a_conn;
+ u8 a_sess_vld;
+ u8 a_srp_det;
+ u8 a_vbus_vld;
+ u8 b_bus_req; /* B-Device Require Bus */
+ u8 b_bus_resume;
+ u8 b_bus_suspend;
+ u8 b_conn;
+ u8 b_se0_srp;
+ u8 b_sess_end;
+ u8 b_sess_vld;
+ u8 id;
+ u8 a_suspend_req;
+
+ /*Timer event */
+ u8 a_aidl_bdis_timeout;
+ u8 b_ase0_brst_timeout;
+ u8 a_bidl_adis_timeout;
+ u8 a_wait_bcon_timeout;
+
+ struct timer_list timer[OTG_TIMER_NUM];
+};
+
+#define VUSBHS_MAX_PORTS 8
+
+struct mv_otg_regs {
+ u32 usbcmd; /* Command register */
+ u32 usbsts; /* Status register */
+ u32 usbintr; /* Interrupt enable */
+ u32 frindex; /* Frame index */
+ u32 reserved1[1];
+ u32 deviceaddr; /* Device Address */
+ u32 eplistaddr; /* Endpoint List Address */
+ u32 ttctrl; /* HOST TT status and control */
+ u32 burstsize; /* Programmable Burst Size */
+ u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
+ u32 reserved[4];
+ u32 epnak; /* Endpoint NAK */
+ u32 epnaken; /* Endpoint NAK Enable */
+ u32 configflag; /* Configured Flag register */
+ u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
+ u32 otgsc;
+ u32 usbmode; /* USB Host/Device mode */
+ u32 epsetupstat; /* Endpoint Setup Status */
+ u32 epprime; /* Endpoint Initialize */
+ u32 epflush; /* Endpoint De-initialize */
+ u32 epstatus; /* Endpoint Status */
+ u32 epcomplete; /* Endpoint Interrupt On Complete */
+ u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
+ u32 mcr; /* Mux Control */
+ u32 isr; /* Interrupt Status */
+ u32 ier; /* Interrupt Enable */
+};
+
+struct mv_otg {
+ struct otg_transceiver otg;
+ struct mv_otg_ctrl otg_ctrl;
+
+ /* base address */
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ struct mv_otg_regs __iomem *op_regs;
+
+ struct platform_device *pdev;
+ int irq;
+ u32 irq_status;
+ u32 irq_en;
+
+ struct delayed_work work;
+ struct workqueue_struct *qwork;
+
+ spinlock_t wq_lock;
+
+ struct mv_usb_platform_data *pdata;
+
+ unsigned int active;
+ unsigned int clock_gating;
+ unsigned int clknum;
+ struct clk *clk[0];
+};
+
+#endif
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 08c679c0dde5..e9a5b1d2615e 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -95,25 +95,15 @@ struct usbhs_priv *usbhs_pdev_to_priv(struct platform_device *pdev)
/*
* syscfg functions
*/
-void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
+static void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
{
usbhs_bset(priv, SYSCFG, SCKE, enable ? SCKE : 0);
}
-void usbhs_sys_hispeed_ctrl(struct usbhs_priv *priv, int enable)
-{
- usbhs_bset(priv, SYSCFG, HSE, enable ? HSE : 0);
-}
-
-void usbhs_sys_usb_ctrl(struct usbhs_priv *priv, int enable)
-{
- usbhs_bset(priv, SYSCFG, USBE, enable ? USBE : 0);
-}
-
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
{
- u16 mask = DCFM | DRPD | DPRPU;
- u16 val = DCFM | DRPD;
+ u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
+ u16 val = DCFM | DRPD | HSE | USBE;
int has_otg = usbhs_get_dparam(priv, has_otg);
if (has_otg)
@@ -130,8 +120,8 @@ void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
{
- u16 mask = DCFM | DRPD | DPRPU;
- u16 val = DPRPU;
+ u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
+ u16 val = DPRPU | HSE | USBE;
/*
* if enable
@@ -142,6 +132,11 @@ void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
+void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode)
+{
+ usbhs_write(priv, TESTMODE, mode);
+}
+
/*
* frame functions
*/
@@ -229,7 +224,7 @@ static void usbhsc_bus_init(struct usbhs_priv *priv)
/*
* device configuration
*/
-int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum,
+int usbhs_set_device_config(struct usbhs_priv *priv, int devnum,
u16 upphub, u16 hubport, u16 speed)
{
struct device *dev = usbhs_priv_to_dev(priv);
@@ -301,18 +296,25 @@ static u32 usbhsc_default_pipe_type[] = {
*/
static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
{
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct device *dev = usbhs_priv_to_dev(priv);
if (enable) {
/* enable PM */
pm_runtime_get_sync(dev);
+ /* enable platform power */
+ usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+
/* USB on */
usbhs_sys_clock_ctrl(priv, enable);
} else {
/* USB off */
usbhs_sys_clock_ctrl(priv, enable);
+ /* disable platform power */
+ usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+
/* disable PM */
pm_runtime_put_sync(dev);
}
@@ -388,7 +390,7 @@ static void usbhsc_notify_hotplug(struct work_struct *work)
usbhsc_hotplug(priv);
}
-int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
+static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int delay = usbhs_get_dparam(priv, detection_delay);
@@ -398,7 +400,8 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
* To make sure safety context,
* use workqueue for usbhs_notify_hotplug
*/
- schedule_delayed_work(&priv->notify_hotplug_work, delay);
+ schedule_delayed_work(&priv->notify_hotplug_work,
+ msecs_to_jiffies(delay));
return 0;
}
@@ -637,18 +640,7 @@ static struct platform_driver renesas_usbhs_driver = {
.remove = __devexit_p(usbhs_remove),
};
-static int __init usbhs_init(void)
-{
- return platform_driver_register(&renesas_usbhs_driver);
-}
-
-static void __exit usbhs_exit(void)
-{
- platform_driver_unregister(&renesas_usbhs_driver);
-}
-
-module_init(usbhs_init);
-module_exit(usbhs_exit);
+module_platform_driver(renesas_usbhs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas USB driver");
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 8729da5c3be6..d79b3e27db95 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -33,6 +33,7 @@ struct usbhs_priv;
#define SYSCFG 0x0000
#define BUSWAIT 0x0002
#define DVSTCTR 0x0008
+#define TESTMODE 0x000C
#define CFIFO 0x0014
#define CFIFOSEL 0x0020
#define CFIFOCTR 0x0022
@@ -275,19 +276,15 @@ u16 usbhs_read(struct usbhs_priv *priv, u32 reg);
void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data);
void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data);
-int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev);
-
#define usbhs_lock(p, f) spin_lock_irqsave(usbhs_priv_to_lock(p), f)
#define usbhs_unlock(p, f) spin_unlock_irqrestore(usbhs_priv_to_lock(p), f)
/*
* sysconfig
*/
-void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable);
-void usbhs_sys_hispeed_ctrl(struct usbhs_priv *priv, int enable);
-void usbhs_sys_usb_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable);
+void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode);
/*
* usb request
@@ -311,7 +308,7 @@ int usbhs_frame_get_num(struct usbhs_priv *priv);
/*
* device config
*/
-int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum, u16 upphub,
+int usbhs_set_device_config(struct usbhs_priv *priv, int devnum, u16 upphub,
u16 hubport, u16 speed);
/*
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index ffdf5d15085e..72339bd6fcab 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -56,7 +56,7 @@ static struct usbhs_pkt_handle usbhsf_null_handler = {
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
- void *buf, int len, int zero)
+ void *buf, int len, int zero, int sequence)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
@@ -90,6 +90,7 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
pkt->zero = zero;
pkt->actual = 0;
pkt->done = done;
+ pkt->sequence = sequence;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
@@ -481,6 +482,9 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
int i, ret, len;
int is_short;
+ usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ pkt->sequence = -1; /* -1 sequence will be ignored */
+
ret = usbhsf_fifo_select(pipe, fifo, 1);
if (ret < 0)
return 0;
@@ -584,6 +588,8 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
/*
* pipe enable to prepare packet receive
*/
+ usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ pkt->sequence = -1; /* -1 sequence will be ignored */
usbhs_pipe_enable(pipe);
usbhsf_rx_irq_ctrl(pipe, 1);
@@ -641,6 +647,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
* "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
*/
if (0 == rcv_len) {
+ pkt->zero = 1;
usbhsf_fifo_clear(pipe, fifo);
goto usbhs_fifo_read_end;
}
@@ -765,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
struct device *dev = usbhs_priv_to_dev(priv);
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
dma_cookie_t cookie;
- dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
sg_init_table(&sg, 1);
sg_set_page(&sg, virt_to_page(pkt->dma),
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 32a7b246b28d..f68609c0f489 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -59,6 +59,7 @@ struct usbhs_pkt {
int trans;
int actual;
int zero;
+ int sequence;
};
struct usbhs_pkt_handle {
@@ -95,7 +96,7 @@ void usbhs_pkt_init(struct usbhs_pkt *pkt);
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
- void *buf, int len, int zero);
+ void *buf, int len, int zero, int sequence);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
void usbhs_pkt_start(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d70009..1b97fb12694b 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -50,7 +50,9 @@ static int usbhsm_autonomy_irq_vbus(struct usbhs_priv *priv,
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- return usbhsc_drvcllbck_notify_hotplug(pdev);
+ renesas_usbhs_call_notify_hotplug(pdev);
+
+ return 0;
}
void usbhs_mod_autonomy_mode(struct usbhs_priv *priv)
@@ -349,7 +351,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index d9717e0bc1ff..528691d5f3e2 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -44,7 +45,6 @@ struct usbhsg_uep {
struct usbhsg_gpriv {
struct usb_gadget gadget;
struct usbhs_mod mod;
- struct list_head link;
struct usbhsg_uep *uep;
int uep_size;
@@ -114,16 +114,6 @@ struct usbhsg_recip_handle {
#define usbhsg_status_clr(gp, b) (gp->status &= ~b)
#define usbhsg_status_has(gp, b) (gp->status & b)
-/* controller */
-LIST_HEAD(the_controller_link);
-
-#define usbhsg_for_each_controller(gpriv)\
- list_for_each_entry(gpriv, &the_controller_link, link)
-#define usbhsg_controller_register(gpriv)\
- list_add_tail(&(gpriv)->link, &the_controller_link)
-#define usbhsg_controller_unregister(gpriv)\
- list_del_init(&(gpriv)->link)
-
/*
* queue push/pop
*/
@@ -164,7 +154,7 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep,
req->actual = 0;
req->status = -EINPROGRESS;
usbhs_pkt_push(pipe, pkt, usbhsg_queue_done,
- req->buf, req->length, req->zero);
+ req->buf, req->length, req->zero, -1);
usbhs_pkt_start(pipe);
dev_dbg(dev, "pipe %d : queue push (%d)\n",
@@ -195,7 +185,7 @@ static int usbhsg_dma_map(struct device *dev,
}
if (dma_mapping_error(dev, pkt->dma)) {
- dev_err(dev, "dma mapping error %x\n", pkt->dma);
+ dev_err(dev, "dma mapping error %llx\n", (u64)pkt->dma);
return -EIO;
}
@@ -271,6 +261,8 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ usbhs_pkt_start(pipe);
+
return 0;
}
@@ -282,6 +274,145 @@ struct usbhsg_recip_handle req_clear_feature = {
};
/*
+ * USB_TYPE_STANDARD / set feature functions
+ */
+static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ udelay(100);
+ usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8));
+ break;
+ default:
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+
+ usbhs_pipe_stall(pipe);
+
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+
+ return 0;
+}
+
+struct usbhsg_recip_handle req_set_feature = {
+ .name = "set feature",
+ .device = usbhsg_recip_handler_std_set_device,
+ .interface = usbhsg_recip_handler_std_control_done,
+ .endpoint = usbhsg_recip_handler_std_set_endpoint,
+};
+
+/*
+ * USB_TYPE_STANDARD / get status functions
+ */
+static void __usbhsg_recip_send_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
+
+ /* free allocated recip-buffer/usb_request */
+ kfree(ureq->pkt.buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
+ unsigned short status)
+{
+ struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
+ struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ struct usb_request *req;
+ unsigned short *buf;
+
+ /* alloc new usb_request for recip */
+ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
+ if (!req) {
+ dev_err(dev, "recip request allocation fail\n");
+ return;
+ }
+
+ /* alloc recip data buffer */
+ buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf) {
+ usb_ep_free_request(&dcp->ep, req);
+ dev_err(dev, "recip data allocation fail\n");
+ return;
+ }
+
+ /* recip data is status */
+ *buf = cpu_to_le16(status);
+
+ /* allocated usb_request/buffer will be freed */
+ req->complete = __usbhsg_recip_send_complete;
+ req->buf = buf;
+ req->length = sizeof(*buf);
+ req->zero = 0;
+
+ /* push packet */
+ pipe->handler = &usbhs_fifo_pio_push_handler;
+ usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req));
+}
+
+static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ unsigned short status = 1 << USB_DEVICE_SELF_POWERED;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_get_interface(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ unsigned short status = 0;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ unsigned short status = 0;
+
+ if (usbhs_pipe_is_stall(pipe))
+ status = 1 << USB_ENDPOINT_HALT;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+struct usbhsg_recip_handle req_get_status = {
+ .name = "get status",
+ .device = usbhsg_recip_handler_std_get_device,
+ .interface = usbhsg_recip_handler_std_get_interface,
+ .endpoint = usbhsg_recip_handler_std_get_endpoint,
+};
+
+/*
* USB_TYPE handler
*/
static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
@@ -303,8 +434,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
pipe = usbhsg_uep_to_pipe(uep);
if (!pipe) {
dev_err(dev, "wrong recip request\n");
- ret = -EINVAL;
- goto usbhsg_recip_run_handle_end;
+ return -EINVAL;
}
switch (recip) {
@@ -327,20 +457,10 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
}
if (func) {
- unsigned long flags;
-
dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg);
-
- /******************** spin lock ********************/
- usbhs_lock(priv, flags);
ret = func(priv, uep, ctrl);
- usbhs_unlock(priv, flags);
- /******************** spin unlock ******************/
}
-usbhsg_recip_run_handle_end:
- usbhs_pkt_start(pipe);
-
return ret;
}
@@ -412,6 +532,12 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
case USB_REQ_CLEAR_FEATURE:
recip_handler = &req_clear_feature;
break;
+ case USB_REQ_SET_FEATURE:
+ recip_handler = &req_set_feature;
+ break;
+ case USB_REQ_GET_STATUS:
+ recip_handler = &req_get_status;
+ break;
}
}
@@ -439,14 +565,16 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhs_pkt *pkt;
- usbhs_pipe_disable(pipe);
-
while (1) {
pkt = usbhs_pkt_pop(pipe, NULL);
if (!pkt)
break;
+
+ usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET);
}
+ usbhs_pipe_disable(pipe);
+
return 0;
}
@@ -681,9 +809,7 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
* - function
* - usb module
*/
- usbhs_sys_hispeed_ctrl(priv, 1);
usbhs_sys_function_ctrl(priv, 1);
- usbhs_sys_usb_ctrl(priv, 1);
/*
* enable irq callback
@@ -731,9 +857,8 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
gpriv->gadget.speed = USB_SPEED_UNKNOWN;
/* disable sys */
- usbhs_sys_hispeed_ctrl(priv, 0);
+ usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhs_sys_usb_ctrl(priv, 0);
usbhsg_pipe_disable(dcp);
@@ -751,53 +876,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
- int ret;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->max_speed < USB_SPEED_FULL)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
/* first hook up the driver ... */
gpriv->driver = driver;
gpriv->gadget.dev.driver = &driver->driver;
- ret = device_add(&gpriv->gadget.dev);
- if (ret) {
- dev_err(dev, "device_add error %d\n", ret);
- goto add_fail;
- }
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
- gpriv->driver = NULL;
- gpriv->gadget.dev.driver = NULL;
-
- return ret;
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->unbind)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
- device_del(&gpriv->gadget.dev);
+ gpriv->gadget.dev.driver = NULL;
gpriv->driver = NULL;
return 0;
@@ -827,6 +931,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
static int usbhsg_stop(struct usbhs_priv *priv)
{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ /* cable disconnect */
+ if (gpriv->driver &&
+ gpriv->driver->disconnect)
+ gpriv->driver->disconnect(&gpriv->gadget);
+
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
@@ -876,12 +987,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
/*
* init gadget
*/
- device_initialize(&gpriv->gadget.dev);
dev_set_name(&gpriv->gadget.dev, "gadget");
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
- gpriv->gadget.is_dualspeed = 1;
+ gpriv->gadget.max_speed = USB_SPEED_HIGH;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
@@ -908,16 +1021,17 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
}
}
- usbhsg_controller_register(gpriv);
-
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
- goto err_add_udc;
+ goto err_register;
dev_info(dev, "gadget probed\n");
return 0;
+
+err_register:
+ device_unregister(&gpriv->gadget.dev);
err_add_udc:
kfree(gpriv->uep);
@@ -933,7 +1047,7 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
usb_del_gadget_udc(&gpriv->gadget);
- usbhsg_controller_unregister(gpriv);
+ device_unregister(&gpriv->gadget.dev);
kfree(gpriv->uep);
kfree(gpriv);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index bade761a1e52..1834cf50888c 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -45,36 +45,34 @@
*
* +--------+ pipes are reused for each uep.
* | udev 1 |-+- [uep 0 (dcp) ] --+ pipe will be switched when
- * +--------+ | | target device was changed
+ * +--------+ | | other device requested
* +- [uep 1 (bulk)] --|---+ +--------------+
* | +--------------> | pipe0 (dcp) |
- * +- [uep 2 (bulk)] --|---|---+ +--------------+
- * | | | | pipe1 (isoc) |
- * +--------+ | | | +--------------+
- * | udev 2 |-+- [uep 0 (dcp) ] --+ +-- |------> | pipe2 (bulk) |
- * +--------+ | | | | +--------------+
- * +- [uep 1 (int) ] --|-+ | +------> | pipe3 (bulk) |
- * | | | | +--------------+
- * +--------+ | +-|---|------> | pipe4 (int) |
- * | udev 3 |-+- [uep 0 (dcp) ] --+ | | +--------------+
- * +--------+ | | | | .... |
- * +- [uep 1 (bulk)] ------+ | | .... |
+ * +- [uep 2 (bulk)] -@ | +--------------+
+ * | | pipe1 (isoc) |
+ * +--------+ | +--------------+
+ * | udev 2 |-+- [uep 0 (dcp) ] -@ +----------> | pipe2 (bulk) |
+ * +--------+ | +--------------+
+ * +- [uep 1 (int) ] ----+ +------> | pipe3 (bulk) |
+ * | | +--------------+
+ * +--------+ +-----|------> | pipe4 (int) |
+ * | udev 3 |-+- [uep 0 (dcp) ] -@ | +--------------+
+ * +--------+ | | | .... |
+ * +- [uep 1 (bulk)] -@ | | .... |
* | |
* +- [uep 2 (bulk)]-----------+
+ *
+ * @ : uep requested free pipe, but all have been used.
+ * now it is waiting for free pipe
*/
/*
* struct
*/
-struct usbhsh_pipe_info {
- unsigned int usr_cnt; /* see usbhsh_endpoint_alloc() */
-};
-
struct usbhsh_request {
struct urb *urb;
struct usbhs_pkt pkt;
- struct list_head ureq_link; /* see hpriv :: ureq_link_xxx */
};
struct usbhsh_device {
@@ -83,11 +81,10 @@ struct usbhsh_device {
};
struct usbhsh_ep {
- struct usbhs_pipe *pipe;
+ struct usbhs_pipe *pipe; /* attached pipe */
struct usbhsh_device *udev; /* attached udev */
+ struct usb_host_endpoint *ep;
struct list_head ep_list; /* list to usbhsh_device */
-
- int maxp;
};
#define USBHSH_DEVICE_MAX 10 /* see DEVADDn / DCPMAXP / PIPEMAXP */
@@ -98,16 +95,9 @@ struct usbhsh_hpriv {
struct usbhsh_device udev[USBHSH_DEVICE_MAX];
- struct usbhsh_pipe_info *pipe_info;
- int pipe_size;
-
u32 port_stat; /* USB_PORT_STAT_xxx */
struct completion setup_ack_done;
-
- /* see usbhsh_req_alloc/free */
- struct list_head ureq_link_active;
- struct list_head ureq_link_free;
};
@@ -119,17 +109,6 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_priv_to_hpriv(priv) \
container_of(usbhs_mod_get(priv, USBHS_HOST), struct usbhsh_hpriv, mod)
-#define __usbhsh_for_each_hpipe(start, pos, h, i) \
- for (i = start, pos = (h)->hpipe + i; \
- i < (h)->hpipe_size; \
- i++, pos = (h)->hpipe + i)
-
-#define usbhsh_for_each_hpipe(pos, hpriv, i) \
- __usbhsh_for_each_hpipe(1, pos, hpriv, i)
-
-#define usbhsh_for_each_hpipe_with_dcp(pos, hpriv, i) \
- __usbhsh_for_each_hpipe(0, pos, hpriv, i)
-
#define __usbhsh_for_each_udev(start, pos, h, i) \
for (i = start, pos = (h)->udev + i; \
i < USBHSH_DEVICE_MAX; \
@@ -152,15 +131,20 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_ep_to_uep(u) ((u)->hcpriv)
#define usbhsh_uep_to_pipe(u) ((u)->pipe)
#define usbhsh_uep_to_udev(u) ((u)->udev)
+#define usbhsh_uep_to_ep(u) ((u)->ep)
+
#define usbhsh_urb_to_ureq(u) ((u)->hcpriv)
#define usbhsh_urb_to_usbv(u) ((u)->dev)
#define usbhsh_usbv_to_udev(d) dev_get_drvdata(&(d)->dev)
#define usbhsh_udev_to_usbv(h) ((h)->usbv)
+#define usbhsh_udev_is_used(h) usbhsh_udev_to_usbv(h)
-#define usbhsh_pipe_info(p) ((p)->mod_private)
+#define usbhsh_pipe_to_uep(p) ((p)->mod_private)
+#define usbhsh_device_parent(d) (usbhsh_usbv_to_udev((d)->usbv->parent))
+#define usbhsh_device_hubport(d) ((d)->usbv->portnum)
#define usbhsh_device_number(h, d) ((int)((d) - (h)->udev))
#define usbhsh_device_nth(h, d) ((h)->udev + d)
#define usbhsh_device0(h) usbhsh_device_nth(h, 0)
@@ -170,38 +154,13 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_port_stat_clear(h, s) ((h)->port_stat &= ~(s))
#define usbhsh_port_stat_get(h) ((h)->port_stat)
-#define usbhsh_pkt_to_req(p) \
+#define usbhsh_pkt_to_ureq(p) \
container_of((void *)p, struct usbhsh_request, pkt)
/*
* req alloc/free
*/
-static void usbhsh_req_list_init(struct usbhsh_hpriv *hpriv)
-{
- INIT_LIST_HEAD(&hpriv->ureq_link_active);
- INIT_LIST_HEAD(&hpriv->ureq_link_free);
-}
-
-static void usbhsh_req_list_quit(struct usbhsh_hpriv *hpriv)
-{
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usbhsh_request *ureq, *next;
-
- /* kfree all active ureq */
- list_for_each_entry_safe(ureq, next,
- &hpriv->ureq_link_active,
- ureq_link) {
- dev_err(dev, "active ureq (%p) is force freed\n", ureq);
- kfree(ureq);
- }
-
- /* kfree all free ureq */
- list_for_each_entry_safe(ureq, next, &hpriv->ureq_link_free, ureq_link)
- kfree(ureq);
-}
-
-static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
+static struct usbhsh_request *usbhsh_ureq_alloc(struct usbhsh_hpriv *hpriv,
struct urb *urb,
gfp_t mem_flags)
{
@@ -209,270 +168,460 @@ static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- if (list_empty(&hpriv->ureq_link_free)) {
- /*
- * create new one if there is no free ureq
- */
- ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
- if (ureq)
- INIT_LIST_HEAD(&ureq->ureq_link);
- } else {
- /*
- * reuse "free" ureq if exist
- */
- ureq = list_entry(hpriv->ureq_link_free.next,
- struct usbhsh_request,
- ureq_link);
- if (ureq)
- list_del_init(&ureq->ureq_link);
- }
-
+ ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
if (!ureq) {
dev_err(dev, "ureq alloc fail\n");
return NULL;
}
usbhs_pkt_init(&ureq->pkt);
-
- /*
- * push it to "active" list
- */
- list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_active);
ureq->urb = urb;
+ usbhsh_urb_to_ureq(urb) = ureq;
return ureq;
}
-static void usbhsh_req_free(struct usbhsh_hpriv *hpriv,
+static void usbhsh_ureq_free(struct usbhsh_hpriv *hpriv,
struct usbhsh_request *ureq)
{
- struct usbhs_pkt *pkt = &ureq->pkt;
+ usbhsh_urb_to_ureq(ureq->urb) = NULL;
+ ureq->urb = NULL;
- usbhs_pkt_init(pkt);
+ kfree(ureq);
+}
+/*
+ * status
+ */
+static int usbhsh_is_running(struct usbhsh_hpriv *hpriv)
+{
/*
- * removed from "active" list,
- * and push it to "free" list
+ * we can decide some device is attached or not
+ * by checking mod.irq_attch
+ * see
+ * usbhsh_irq_attch()
+ * usbhsh_irq_dtch()
*/
- ureq->urb = NULL;
- list_del_init(&ureq->ureq_link);
- list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_free);
+ return (hpriv->mod.irq_attch == NULL);
}
/*
- * device control
+ * pipe control
*/
-
-static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+static void usbhsh_endpoint_sequence_save(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pkt *pkt)
{
- return !list_empty(&udev->ep_list_head);
-}
+ int len = urb->actual_length;
+ int maxp = usb_endpoint_maxp(&urb->ep->desc);
+ int t = 0;
-static struct usbhsh_device *usbhsh_device_alloc(struct usbhsh_hpriv *hpriv,
- struct urb *urb)
-{
- struct usbhsh_device *udev = NULL;
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
- struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- int i;
+ /* DCP is out of sequence control */
+ if (usb_pipecontrol(urb->pipe))
+ return;
/*
- * device 0
+ * renesas_usbhs pipe has a limitation in a number.
+ * So, driver should re-use the limited pipe for each device/endpoint.
+ * DATA0/1 sequence should be saved for it.
+ * see [image of mod_host]
+ * [HARDWARE LIMITATION]
*/
- if (0 == usb_pipedevice(urb->pipe)) {
- udev = usbhsh_device0(hpriv);
- goto usbhsh_device_find;
- }
/*
- * find unused device
+ * next sequence depends on actual_length
+ *
+ * ex) actual_length = 1147, maxp = 512
+ * data0 : 512
+ * data1 : 512
+ * data0 : 123
+ * data1 is the next sequence
*/
- usbhsh_for_each_udev(udev, hpriv, i) {
- if (usbhsh_udev_to_usbv(udev))
- continue;
- goto usbhsh_device_find;
+ t = len / maxp;
+ if (len % maxp)
+ t++;
+ if (pkt->zero)
+ t++;
+ t %= 2;
+
+ if (t)
+ usb_dotoggle(urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+}
+
+static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
+ struct urb *urb);
+
+static int usbhsh_pipe_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
+ struct usbhs_pipe *pipe;
+ struct usb_endpoint_descriptor *desc = &urb->ep->desc;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
+ int dir_in_req = !!usb_pipein(urb->pipe);
+ int is_dcp = usb_endpoint_xfer_control(desc);
+ int i, dir_in;
+ int ret = -EBUSY;
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ if (unlikely(usbhsh_uep_to_pipe(uep))) {
+ dev_err(dev, "uep already has pipe\n");
+ goto usbhsh_pipe_attach_done;
}
- dev_err(dev, "no free usbhsh_device\n");
+ usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
- return NULL;
+ /* check pipe type */
+ if (!usbhs_pipe_type_is(pipe, usb_endpoint_type(desc)))
+ continue;
-usbhsh_device_find:
- if (usbhsh_device_has_endpoint(udev))
- dev_warn(dev, "udev have old endpoint\n");
+ /* check pipe direction if normal pipe */
+ if (!is_dcp) {
+ dir_in = !!usbhs_pipe_is_dir_in(pipe);
+ if (0 != (dir_in - dir_in_req))
+ continue;
+ }
- /* uep will be attached */
- INIT_LIST_HEAD(&udev->ep_list_head);
+ /* check pipe is free */
+ if (usbhsh_pipe_to_uep(pipe))
+ continue;
- /*
- * usbhsh_usbv_to_udev()
- * usbhsh_udev_to_usbv()
- * will be enable
- */
- dev_set_drvdata(&usbv->dev, udev);
- udev->usbv = usbv;
+ /*
+ * attach pipe to uep
+ *
+ * usbhs_pipe_config_update() should be called after
+ * usbhs_set_device_config()
+ * see
+ * DCPMAXP/PIPEMAXP
+ */
+ usbhsh_uep_to_pipe(uep) = pipe;
+ usbhsh_pipe_to_uep(pipe) = uep;
- /* set device config */
- usbhs_set_device_speed(priv,
- usbhsh_device_number(hpriv, udev),
- usbhsh_device_number(hpriv, udev),
- 0, /* FIXME no parent */
- usbv->speed);
+ usbhs_pipe_config_update(pipe,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ usb_endpoint_maxp(desc));
- dev_dbg(dev, "%s [%d](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev), udev);
+ dev_dbg(dev, "%s [%d-%d(%s:%s)]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ usbhs_pipe_name(pipe),
+ dir_in_req ? "in" : "out");
- return udev;
+ ret = 0;
+ break;
+ }
+
+usbhsh_pipe_attach_done:
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ return ret;
}
-static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
- struct usbhsh_device *udev)
+static void usbhsh_pipe_detach(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_ep *uep)
{
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhs_pipe *pipe;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
- dev_dbg(dev, "%s [%d](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev), udev);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- if (usbhsh_device_has_endpoint(udev))
- dev_warn(dev, "udev still have endpoint\n");
+ pipe = usbhsh_uep_to_pipe(uep);
- /*
- * usbhsh_usbv_to_udev()
- * usbhsh_udev_to_usbv()
- * will be disable
- */
- dev_set_drvdata(&usbv->dev, NULL);
- udev->usbv = NULL;
+ if (unlikely(!pipe)) {
+ dev_err(dev, "uep doens't have pipe\n");
+ } else {
+ struct usb_host_endpoint *ep = usbhsh_uep_to_ep(uep);
+ struct usbhsh_device *udev = usbhsh_uep_to_udev(uep);
+
+ /* detach pipe from uep */
+ usbhsh_uep_to_pipe(uep) = NULL;
+ usbhsh_pipe_to_uep(pipe) = NULL;
+
+ dev_dbg(dev, "%s [%d-%d(%s)]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(&ep->desc),
+ usbhs_pipe_name(pipe));
+ }
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
/*
- * end-point control
+ * endpoint control
*/
-struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
- struct usbhsh_device *udev,
- struct usb_host_endpoint *ep,
- int dir_in_req,
- gfp_t mem_flags)
+static int usbhsh_endpoint_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ gfp_t mem_flags)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
+ struct usb_host_endpoint *ep = urb->ep;
struct usbhsh_ep *uep;
- struct usbhsh_pipe_info *info;
- struct usbhs_pipe *pipe, *best_pipe;
- struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct device *dev = usbhs_priv_to_dev(priv);
struct usb_endpoint_descriptor *desc = &ep->desc;
- int type, i, dir_in;
- unsigned int min_usr;
-
- dir_in_req = !!dir_in_req;
+ unsigned long flags;
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
if (!uep) {
dev_err(dev, "usbhsh_ep alloc fail\n");
- return NULL;
+ return -ENOMEM;
}
- if (usb_endpoint_xfer_control(desc)) {
- best_pipe = usbhsh_hpriv_to_dcp(hpriv);
- goto usbhsh_endpoint_alloc_find_pipe;
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ /*
+ * init endpoint
+ */
+ INIT_LIST_HEAD(&uep->ep_list);
+ list_add_tail(&uep->ep_list, &udev->ep_list_head);
+
+ usbhsh_uep_to_udev(uep) = udev;
+ usbhsh_uep_to_ep(uep) = ep;
+ usbhsh_ep_to_uep(ep) = uep;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ dev_dbg(dev, "%s [%d-%d]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc));
+
+ return 0;
+}
+
+static void usbhsh_endpoint_detach(struct usbhsh_hpriv *hpriv,
+ struct usb_host_endpoint *ep)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
+ unsigned long flags;
+
+ if (!uep)
+ return;
+
+ dev_dbg(dev, "%s [%d-%d]\n", __func__,
+ usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
+ usb_endpoint_num(&ep->desc));
+
+ if (usbhsh_uep_to_pipe(uep))
+ usbhsh_pipe_detach(hpriv, uep);
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ /* remove this endpoint from udev */
+ list_del_init(&uep->ep_list);
+
+ usbhsh_uep_to_udev(uep) = NULL;
+ usbhsh_uep_to_ep(uep) = NULL;
+ usbhsh_ep_to_uep(ep) = NULL;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ kfree(uep);
+}
+
+static void usbhsh_endpoint_detach_all(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
+{
+ struct usbhsh_ep *uep, *next;
+
+ list_for_each_entry_safe(uep, next, &udev->ep_list_head, ep_list)
+ usbhsh_endpoint_detach(hpriv, usbhsh_uep_to_ep(uep));
+}
+
+/*
+ * device control
+ */
+static int usbhsh_connected_to_rhdev(struct usb_hcd *hcd,
+ struct usbhsh_device *udev)
+{
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+
+ return hcd->self.root_hub == usbv->parent;
+}
+
+static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+{
+ return !list_empty(&udev->ep_list_head);
+}
+
+static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
+
+ /* usbhsh_device_attach() is still not called */
+ if (!udev)
+ return NULL;
+
+ /* if it is device0, return it */
+ if (0 == usb_pipedevice(urb->pipe))
+ return usbhsh_device0(hpriv);
+
+ /* return attached device */
+ return udev;
+}
+
+static struct usbhsh_device *usbhsh_device_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhsh_device *udev = NULL;
+ struct usbhsh_device *udev0 = usbhsh_device0(hpriv);
+ struct usbhsh_device *pos;
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ unsigned long flags;
+ u16 upphub, hubport;
+ int i;
+
+ /*
+ * This function should be called only while urb is pointing to device0.
+ * It will attach unused usbhsh_device to urb (usbv),
+ * and initialize device0.
+ * You can use usbhsh_device_get() to get "current" udev,
+ * and usbhsh_usbv_to_udev() is for "attached" udev.
+ */
+ if (0 != usb_pipedevice(urb->pipe)) {
+ dev_err(dev, "%s fail: urb isn't pointing device0\n", __func__);
+ return NULL;
}
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
/*
- * find best pipe for endpoint
- * see
- * HARDWARE LIMITATION
+ * find unused device
*/
- type = usb_endpoint_type(desc);
- min_usr = ~0;
- best_pipe = NULL;
- usbhs_for_each_pipe(pipe, priv, i) {
- if (!usbhs_pipe_type_is(pipe, type))
+ usbhsh_for_each_udev(pos, hpriv, i) {
+ if (usbhsh_udev_is_used(pos))
continue;
+ udev = pos;
+ break;
+ }
- dir_in = !!usbhs_pipe_is_dir_in(pipe);
- if (0 != (dir_in - dir_in_req))
- continue;
+ if (udev) {
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be enable
+ */
+ dev_set_drvdata(&usbv->dev, udev);
+ udev->usbv = usbv;
+ }
- info = usbhsh_pipe_info(pipe);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
- if (min_usr > info->usr_cnt) {
- min_usr = info->usr_cnt;
- best_pipe = pipe;
- }
+ if (!udev) {
+ dev_err(dev, "no free usbhsh_device\n");
+ return NULL;
}
- if (unlikely(!best_pipe)) {
- dev_err(dev, "couldn't find best pipe\n");
- kfree(uep);
- return NULL;
+ if (usbhsh_device_has_endpoint(udev)) {
+ dev_warn(dev, "udev have old endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev);
+ }
+
+ if (usbhsh_device_has_endpoint(udev0)) {
+ dev_warn(dev, "udev0 have old endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev0);
}
-usbhsh_endpoint_alloc_find_pipe:
+
+ /* uep will be attached */
+ INIT_LIST_HEAD(&udev0->ep_list_head);
+ INIT_LIST_HEAD(&udev->ep_list_head);
+
/*
- * init uep
+ * set device0 config
*/
- uep->pipe = best_pipe;
- uep->maxp = usb_endpoint_maxp(desc);
- usbhsh_uep_to_udev(uep) = udev;
- usbhsh_ep_to_uep(ep) = uep;
+ usbhs_set_device_config(priv,
+ 0, 0, 0, usbv->speed);
/*
- * update pipe user count
+ * set new device config
*/
- info = usbhsh_pipe_info(best_pipe);
- info->usr_cnt++;
+ upphub = 0;
+ hubport = 0;
+ if (!usbhsh_connected_to_rhdev(hcd, udev)) {
+ /* if udev is not connected to rhdev, it means parent is Hub */
+ struct usbhsh_device *parent = usbhsh_device_parent(udev);
- /* init this endpoint, and attach it to udev */
- INIT_LIST_HEAD(&uep->ep_list);
- list_add_tail(&uep->ep_list, &udev->ep_list_head);
+ upphub = usbhsh_device_number(hpriv, parent);
+ hubport = usbhsh_device_hubport(udev);
- /*
- * usbhs_pipe_config_update() should be called after
- * usbhs_device_config()
- * see
- * DCPMAXP/PIPEMAXP
- */
- usbhs_pipe_sequence_data0(uep->pipe);
- usbhs_pipe_config_update(uep->pipe,
- usbhsh_device_number(hpriv, udev),
- usb_endpoint_num(desc),
- uep->maxp);
+ dev_dbg(dev, "%s connecte to Hub [%d:%d](%p)\n", __func__,
+ upphub, hubport, parent);
+ }
- dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev),
- usbhs_pipe_name(uep->pipe), uep);
+ usbhs_set_device_config(priv,
+ usbhsh_device_number(hpriv, udev),
+ upphub, hubport, usbv->speed);
- return uep;
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
+
+ return udev;
}
-void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
- struct usb_host_endpoint *ep)
+static void usbhsh_device_detach(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
{
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct device *dev = usbhs_priv_to_dev(priv);
- struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
- struct usbhsh_pipe_info *info;
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+ unsigned long flags;
- if (!uep)
- return;
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
- dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
- usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
- usbhs_pipe_name(uep->pipe), uep);
+ if (usbhsh_device_has_endpoint(udev)) {
+ dev_warn(dev, "udev still have endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev);
+ }
- info = usbhsh_pipe_info(uep->pipe);
- info->usr_cnt--;
+ /*
+ * There is nothing to do if it is device0.
+ * see
+ * usbhsh_device_attach()
+ * usbhsh_device_get()
+ */
+ if (0 == usbhsh_device_number(hpriv, udev))
+ return;
- /* remove this endpoint from udev */
- list_del_init(&uep->ep_list);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- usbhsh_uep_to_udev(uep) = NULL;
- usbhsh_ep_to_uep(ep) = NULL;
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be disable
+ */
+ dev_set_drvdata(&usbv->dev, NULL);
+ udev->usbv = NULL;
- kfree(uep);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
/*
@@ -480,11 +629,12 @@ void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
*/
static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
- struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct urb *urb = ureq->urb;
struct device *dev = usbhs_priv_to_dev(priv);
+ int status = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -493,29 +643,43 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
return;
}
+ if (!usbhsh_is_running(hpriv))
+ status = -ESHUTDOWN;
+
urb->actual_length = pkt->actual;
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ usbhsh_ureq_free(hpriv, ureq);
+
+ usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+ usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
usb_hcd_unlink_urb_from_ep(hcd, urb);
- usb_hcd_giveback_urb(hcd, urb, 0);
+ usb_hcd_giveback_urb(hcd, urb, status);
}
static int usbhsh_queue_push(struct usb_hcd *hcd,
- struct usbhs_pipe *pipe,
- struct urb *urb)
+ struct urb *urb,
+ gfp_t mem_flags)
{
- struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
- struct usbhs_pkt *pkt = &ureq->pkt;
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usbhsh_request *ureq;
void *buf;
- int len;
+ int len, sequence;
if (usb_pipeisoc(urb->pipe)) {
dev_err(dev, "pipe iso is not supported now\n");
return -EIO;
}
+ /* this ureq will be freed on usbhsh_queue_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq)) {
+ dev_err(dev, "ureq alloc fail\n");
+ return -ENOMEM;
+ }
+
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_fifo_pio_pop_handler;
else
@@ -524,25 +688,59 @@ static int usbhsh_queue_push(struct usb_hcd *hcd,
buf = (void *)(urb->transfer_buffer + urb->actual_length);
len = urb->transfer_buffer_length - urb->actual_length;
+ sequence = usb_gettoggle(urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+
dev_dbg(dev, "%s\n", __func__);
- usbhs_pkt_push(pipe, pkt, usbhsh_queue_done,
- buf, len, (urb->transfer_flags & URB_ZERO_PACKET));
+ usbhs_pkt_push(pipe, &ureq->pkt, usbhsh_queue_done,
+ buf, len, (urb->transfer_flags & URB_ZERO_PACKET),
+ sequence);
+
usbhs_pkt_start(pipe);
return 0;
}
+static void usbhsh_queue_force_pop(struct usbhs_priv *priv,
+ struct usbhs_pipe *pipe)
+{
+ struct usbhs_pkt *pkt;
+
+ while (1) {
+ pkt = usbhs_pkt_pop(pipe, NULL);
+ if (!pkt)
+ break;
+
+ /*
+ * if all packet are gone, usbhsh_endpoint_disable()
+ * will be called.
+ * then, attached device/endpoint/pipe will be detached
+ */
+ usbhsh_queue_done(priv, pkt);
+ }
+}
+
+static void usbhsh_queue_force_pop_all(struct usbhs_priv *priv)
+{
+ struct usbhs_pipe *pos;
+ int i;
+
+ usbhs_for_each_pipe_with_dcp(pos, priv, i)
+ usbhsh_queue_force_pop(priv, pos);
+}
+
/*
* DCP setup stage
*/
static int usbhsh_is_request_address(struct urb *urb)
{
- struct usb_ctrlrequest *cmd;
+ struct usb_ctrlrequest *req;
- cmd = (struct usb_ctrlrequest *)urb->setup_packet;
+ req = (struct usb_ctrlrequest *)urb->setup_packet;
- if ((DeviceOutRequest == cmd->bRequestType << 8) &&
- (USB_REQ_SET_ADDRESS == cmd->bRequest))
+ if ((DeviceOutRequest == req->bRequestType << 8) &&
+ (USB_REQ_SET_ADDRESS == req->bRequest))
return 1;
else
return 0;
@@ -570,11 +768,15 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
/*
* renesas_usbhs can not use original usb address.
* see HARDWARE LIMITATION.
- * modify usb address here.
+ * modify usb address here to use attached device.
+ * see usbhsh_device_attach()
*/
if (usbhsh_is_request_address(urb)) {
- /* FIXME */
- req.wValue = 1;
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
+
+ /* udev is a attached device */
+ req.wValue = usbhsh_device_number(hpriv, udev);
dev_dbg(dev, "create new address - %d\n", req.wValue);
}
@@ -595,82 +797,80 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
static void usbhsh_data_stage_packet_done(struct usbhs_priv *priv,
struct usbhs_pkt *pkt)
{
- struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
- struct urb *urb = ureq->urb;
/* this ureq was connected to urb when usbhsh_urb_enqueue() */
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ usbhsh_ureq_free(hpriv, ureq);
}
-static void usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
- struct urb *urb,
- struct usbhs_pipe *pipe)
+static int usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pipe *pipe,
+ gfp_t mem_flags)
+
{
struct usbhsh_request *ureq;
- struct usbhs_pkt *pkt;
- /*
- * FIXME
- *
- * data stage uses ureq which is connected to urb
- * see usbhsh_urb_enqueue() :: alloc new request.
- * it will be freed in usbhsh_data_stage_packet_done()
- */
- ureq = usbhsh_urb_to_ureq(urb);
- pkt = &ureq->pkt;
+ /* this ureq will be freed on usbhsh_data_stage_packet_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq))
+ return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_data_stage_in_handler;
else
pipe->handler = &usbhs_dcp_data_stage_out_handler;
- usbhs_pkt_push(pipe, pkt,
+ usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_data_stage_packet_done,
urb->transfer_buffer,
urb->transfer_buffer_length,
- (urb->transfer_flags & URB_ZERO_PACKET));
+ (urb->transfer_flags & URB_ZERO_PACKET),
+ -1);
+
+ return 0;
}
/*
* DCP status stage
*/
-static void usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
+static int usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
struct urb *urb,
- struct usbhs_pipe *pipe)
+ struct usbhs_pipe *pipe,
+ gfp_t mem_flags)
{
struct usbhsh_request *ureq;
- struct usbhs_pkt *pkt;
- /*
- * FIXME
- *
- * status stage uses allocated ureq.
- * it will be freed on usbhsh_queue_done()
- */
- ureq = usbhsh_req_alloc(hpriv, urb, GFP_KERNEL);
- pkt = &ureq->pkt;
+ /* This ureq will be freed on usbhsh_queue_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq))
+ return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_status_stage_in_handler;
else
pipe->handler = &usbhs_dcp_status_stage_out_handler;
- usbhs_pkt_push(pipe, pkt,
+ usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_queue_done,
NULL,
urb->transfer_buffer_length,
- 0);
+ 0, -1);
+
+ return 0;
}
static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
- struct usbhsh_hpriv *hpriv,
- struct usbhs_pipe *pipe,
- struct urb *urb)
+ struct urb *urb,
+ gfp_t mflags)
{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
+ int ret;
dev_dbg(dev, "%s\n", __func__);
@@ -686,13 +886,22 @@ static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
*
* It is pushed only when urb has buffer.
*/
- if (urb->transfer_buffer_length)
- usbhsh_data_stage_packet_push(hpriv, urb, pipe);
+ if (urb->transfer_buffer_length) {
+ ret = usbhsh_data_stage_packet_push(hpriv, urb, pipe, mflags);
+ if (ret < 0) {
+ dev_err(dev, "data stage failed\n");
+ return ret;
+ }
+ }
/*
* status stage
*/
- usbhsh_status_stage_packet_push(hpriv, urb, pipe);
+ ret = usbhsh_status_stage_packet_push(hpriv, urb, pipe, mflags);
+ if (ret < 0) {
+ dev_err(dev, "status stage failed\n");
+ return ret;
+ }
/*
* start pushed packets
@@ -729,71 +938,82 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
struct usb_host_endpoint *ep = urb->ep;
- struct usbhsh_request *ureq;
- struct usbhsh_device *udev, *new_udev = NULL;
- struct usbhs_pipe *pipe;
- struct usbhsh_ep *uep;
+ struct usbhsh_device *new_udev = NULL;
int is_dir_in = usb_pipein(urb->pipe);
-
+ int i;
int ret;
dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
+ if (!usbhsh_is_running(hpriv)) {
+ ret = -EIO;
+ dev_err(dev, "host is not running\n");
+ goto usbhsh_urb_enqueue_error_not_linked;
+ }
+
ret = usb_hcd_link_urb_to_ep(hcd, urb);
- if (ret)
+ if (ret) {
+ dev_err(dev, "urb link failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
+ }
/*
- * get udev
+ * attach udev if needed
+ * see [image of mod_host]
*/
- udev = usbhsh_usbv_to_udev(usbv);
- if (!udev) {
- new_udev = usbhsh_device_alloc(hpriv, urb);
- if (!new_udev)
+ if (!usbhsh_device_get(hpriv, urb)) {
+ new_udev = usbhsh_device_attach(hpriv, urb);
+ if (!new_udev) {
+ ret = -EIO;
+ dev_err(dev, "device attach failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
-
- udev = new_udev;
+ }
}
/*
- * get uep
+ * attach endpoint if needed
+ * see [image of mod_host]
*/
- uep = usbhsh_ep_to_uep(ep);
- if (!uep) {
- uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
- is_dir_in, mem_flags);
- if (!uep)
+ if (!usbhsh_ep_to_uep(ep)) {
+ ret = usbhsh_endpoint_attach(hpriv, urb, mem_flags);
+ if (ret < 0) {
+ dev_err(dev, "endpoint attach failed\n");
goto usbhsh_urb_enqueue_error_free_device;
+ }
}
- pipe = usbhsh_uep_to_pipe(uep);
/*
- * alloc new request
+ * attach pipe to endpoint
+ * see [image of mod_host]
*/
- ureq = usbhsh_req_alloc(hpriv, urb, mem_flags);
- if (unlikely(!ureq)) {
- ret = -ENOMEM;
+ for (i = 0; i < 1024; i++) {
+ ret = usbhsh_pipe_attach(hpriv, urb);
+ if (ret < 0)
+ msleep(100);
+ else
+ break;
+ }
+ if (ret < 0) {
+ dev_err(dev, "pipe attach failed\n");
goto usbhsh_urb_enqueue_error_free_endpoint;
}
- usbhsh_urb_to_ureq(urb) = ureq;
/*
* push packet
*/
if (usb_pipecontrol(urb->pipe))
- usbhsh_dcp_queue_push(hcd, hpriv, pipe, urb);
+ ret = usbhsh_dcp_queue_push(hcd, urb, mem_flags);
else
- usbhsh_queue_push(hcd, pipe, urb);
+ ret = usbhsh_queue_push(hcd, urb, mem_flags);
- return 0;
+ return ret;
usbhsh_urb_enqueue_error_free_endpoint:
- usbhsh_endpoint_free(hpriv, ep);
+ usbhsh_endpoint_detach(hpriv, ep);
usbhsh_urb_enqueue_error_free_device:
if (new_udev)
- usbhsh_device_free(hpriv, new_udev);
+ usbhsh_device_detach(hpriv, new_udev);
usbhsh_urb_enqueue_error_not_linked:
dev_dbg(dev, "%s error\n", __func__);
@@ -807,8 +1027,11 @@ static int usbhsh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
if (ureq) {
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhs_pkt *pkt = &ureq->pkt;
+
+ usbhs_pkt_pop(pkt->pipe, pkt);
+ usbhsh_queue_done(priv, pkt);
}
return 0;
@@ -823,7 +1046,7 @@ static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
/*
* this function might be called manytimes by same hcd/ep
- * in-endpoitn == out-endpoint if ep == dcp.
+ * in-endpoint == out-endpoint if ep == dcp.
*/
if (!uep)
return;
@@ -831,15 +1054,14 @@ static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
udev = usbhsh_uep_to_udev(uep);
hpriv = usbhsh_hcd_to_hpriv(hcd);
- usbhsh_endpoint_free(hpriv, ep);
- ep->hcpriv = NULL;
+ usbhsh_endpoint_detach(hpriv, ep);
/*
* if there is no endpoint,
* free device
*/
if (!usbhsh_device_has_endpoint(udev))
- usbhsh_device_free(hpriv, udev);
+ usbhsh_device_detach(hpriv, udev);
}
static int usbhsh_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -919,6 +1141,8 @@ static int __usbhsh_hub_port_feature(struct usbhsh_hpriv *hpriv,
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_LOW_SPEED);
+ usbhsh_queue_force_pop_all(priv);
+
usbhs_bus_send_reset(priv);
msleep(20);
usbhs_bus_send_sof_enable(priv);
@@ -1082,6 +1306,20 @@ static int usbhsh_irq_attch(struct usbhs_priv *priv,
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+ /*
+ * attch interrupt might happen infinitely on some device
+ * (on self power USB hub ?)
+ * disable it here.
+ *
+ * usbhsh_is_running() becomes effective
+ * according to this process.
+ * see
+ * usbhsh_is_running()
+ * usbhsh_urb_enqueue()
+ */
+ hpriv->mod.irq_attch = NULL;
+ usbhs_irq_callback_update(priv, &hpriv->mod);
+
return 0;
}
@@ -1096,6 +1334,24 @@ static int usbhsh_irq_dtch(struct usbhs_priv *priv,
usbhsh_port_stat_clear(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+ /*
+ * enable attch interrupt again
+ *
+ * usbhsh_is_running() becomes invalid
+ * according to this process.
+ * see
+ * usbhsh_is_running()
+ * usbhsh_urb_enqueue()
+ */
+ hpriv->mod.irq_attch = usbhsh_irq_attch;
+ usbhs_irq_callback_update(priv, &hpriv->mod);
+
+ /*
+ * usbhsh_queue_force_pop_all() should be called
+ * after usbhsh_is_running() becomes invalid.
+ */
+ usbhsh_queue_force_pop_all(priv);
+
return 0;
}
@@ -1131,7 +1387,6 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
- struct usbhsh_pipe_info *pipe_info = hpriv->pipe_info;
struct usbhs_pipe *pipe;
u32 *pipe_type = usbhs_get_dparam(priv, pipe_type);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
@@ -1140,7 +1395,6 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
/* init all pipe */
old_type = USB_ENDPOINT_XFER_CONTROL;
for (i = 0; i < pipe_size; i++) {
- pipe_info[i].usr_cnt = 0;
/*
* data "output" will be finished as soon as possible,
@@ -1174,7 +1428,7 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
dir_in);
}
- pipe->mod_private = pipe_info + i;
+ pipe->mod_private = NULL;
}
}
@@ -1205,9 +1459,7 @@ static int usbhsh_start(struct usbhs_priv *priv)
* - host
* - usb module
*/
- usbhs_sys_hispeed_ctrl(priv, 1);
usbhs_sys_host_ctrl(priv, 1);
- usbhs_sys_usb_ctrl(priv, 1);
/*
* enable irq callback
@@ -1242,9 +1494,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
usb_remove_hcd(hcd);
/* disable sys */
- usbhs_sys_hispeed_ctrl(priv, 0);
usbhs_sys_host_ctrl(priv, 0);
- usbhs_sys_usb_ctrl(priv, 0);
dev_dbg(dev, "quit host\n");
@@ -1255,10 +1505,8 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv;
struct usb_hcd *hcd;
- struct usbhsh_pipe_info *pipe_info;
struct usbhsh_device *udev;
struct device *dev = usbhs_priv_to_dev(priv);
- int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
/* initialize hcd */
@@ -1267,12 +1515,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
-
- pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
- if (!pipe_info) {
- dev_err(dev, "Could not allocate pipe_info\n");
- goto usbhs_mod_host_probe_err;
- }
+ hcd->has_tt = 1; /* for low/full speed */
/*
* CAUTION
@@ -1293,9 +1536,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
hpriv->mod.name = "host";
hpriv->mod.start = usbhsh_start;
hpriv->mod.stop = usbhsh_stop;
- hpriv->pipe_info = pipe_info;
- hpriv->pipe_size = pipe_size;
- usbhsh_req_list_init(hpriv);
usbhsh_port_stat_init(hpriv);
/* init all device */
@@ -1307,11 +1547,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_info(dev, "host probed\n");
return 0;
-
-usbhs_mod_host_probe_err:
- usb_put_hcd(hcd);
-
- return -ENOMEM;
}
int usbhs_mod_host_remove(struct usbhs_priv *priv)
@@ -1319,8 +1554,6 @@ int usbhs_mod_host_remove(struct usbhs_priv *priv)
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- usbhsh_req_list_quit(hpriv);
-
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c74389ce2177..feb06d6d2814 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -257,6 +257,13 @@ void usbhs_pipe_stall(struct usbhs_pipe *pipe)
}
}
+int usbhs_pipe_is_stall(struct usbhs_pipe *pipe)
+{
+ u16 pid = usbhsp_pipectrl_get(pipe) & PID_MASK;
+
+ return (int)(pid == PID_STALL10 || pid == PID_STALL11);
+}
+
/*
* pipe setup
*/
@@ -323,8 +330,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
if (dir_in)
usbhsp_flags_set(pipe, IS_DIR_HOST);
- if ((is_host && !dir_in) ||
- (!is_host && dir_in))
+ if (!!is_host ^ !!dir_in)
dir |= DIR_OUT;
if (!dir)
@@ -471,10 +477,27 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
-void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int data)
+void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
{
u16 mask = (SQCLR | SQSET);
- u16 val = (data) ? SQSET : SQCLR;
+ u16 val;
+
+ /*
+ * sequence
+ * 0 : data0
+ * 1 : data1
+ * -1 : no change
+ */
+ switch (sequence) {
+ case 0:
+ val = SQCLR;
+ break;
+ case 1:
+ val = SQSET;
+ break;
+ default:
+ return;
+ }
usbhsp_pipectrl_set(pipe, mask, val);
}
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 6334fc644cc0..fa18b7dc2b2a 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -87,6 +87,7 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_stall(struct usbhs_pipe *pipe);
void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo);
void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
u16 epnum, u16 maxp);
diff --git a/drivers/usb/serial/ChangeLog.history b/drivers/usb/serial/ChangeLog.history
deleted file mode 100644
index f13fd488ebec..000000000000
--- a/drivers/usb/serial/ChangeLog.history
+++ /dev/null
@@ -1,730 +0,0 @@
-This is the contents of some of the drivers/usb/serial/ files that had old
-changelog comments. They were quite old, and out of date, and we don't keep
-them anymore, so I've put them here, away from the source files, in case
-people still care to see them.
-
-- Greg Kroah-Hartman <greg@kroah.com> October 20, 2005
-
------------------------------------------------------------------------
-usb-serial.h Change Log comments:
-
- (03/26/2002) gkh
- removed the port->tty check from port_paranoia_check() due to serial
- consoles not having a tty device assigned to them.
-
- (12/03/2001) gkh
- removed active from the port structure.
- added documentation to the usb_serial_device_type structure
-
- (10/10/2001) gkh
- added vendor and product to serial structure. Needed to determine device
- owner when the device is disconnected.
-
- (05/30/2001) gkh
- added sem to port structure and removed port_lock
-
- (10/05/2000) gkh
- Added interrupt_in_endpointAddress and bulk_in_endpointAddress to help
- fix bug with urb->dev not being set properly, now that the usb core
- needs it.
-
- (09/11/2000) gkh
- Added usb_serial_debug_data function to help get rid of #DEBUG in the
- drivers.
-
- (08/28/2000) gkh
- Added port_lock to port structure.
-
- (08/08/2000) gkh
- Added open_count to port structure.
-
- (07/23/2000) gkh
- Added bulk_out_endpointAddress to port structure.
-
- (07/19/2000) gkh, pberger, and borchers
- Modifications to allow usb-serial drivers to be modules.
-
------------------------------------------------------------------------
-usb-serial.c Change Log comments:
-
- (12/10/2002) gkh
- Split the ports off into their own struct device, and added a
- usb-serial bus driver.
-
- (11/19/2002) gkh
- removed a few #ifdefs for the generic code and cleaned up the failure
- logic in initialization.
-
- (10/02/2002) gkh
- moved the console code to console.c and out of this file.
-
- (06/05/2002) gkh
- moved location of startup() call in serial_probe() until after all
- of the port information and endpoints are initialized. This makes
- things easier for some drivers.
-
- (04/10/2002) gkh
- added serial_read_proc function which creates a
- /proc/tty/driver/usb-serial file.
-
- (03/27/2002) gkh
- Got USB serial console code working properly and merged into the main
- version of the tree. Thanks to Randy Dunlap for the initial version
- of this code, and for pushing me to finish it up.
- The USB serial console works with any usb serial driver device.
-
- (03/21/2002) gkh
- Moved all manipulation of port->open_count into the core. Now the
- individual driver's open and close functions are called only when the
- first open() and last close() is called. Making the drivers a bit
- smaller and simpler.
- Fixed a bug if a driver didn't have the owner field set.
-
- (02/26/2002) gkh
- Moved all locking into the main serial_* functions, instead of having
- the individual drivers have to grab the port semaphore. This should
- reduce races.
- Reworked the MOD_INC logic a bit to always increment and decrement, even
- if the generic driver is being used.
-
- (10/10/2001) gkh
- usb_serial_disconnect() now sets the serial->dev pointer is to NULL to
- help prevent child drivers from accessing the device since it is now
- gone.
-
- (09/13/2001) gkh
- Moved generic driver initialize after we have registered with the USB
- core. Thanks to Randy Dunlap for pointing this problem out.
-
- (07/03/2001) gkh
- Fixed module paramater size. Thanks to John Brockmeyer for the pointer.
- Fixed vendor and product getting defined through the MODULE_PARM macro
- if the Generic driver wasn't compiled in.
- Fixed problem with generic_shutdown() not being called for drivers that
- don't have a shutdown() function.
-
- (06/06/2001) gkh
- added evil hack that is needed for the prolific pl2303 device due to the
- crazy way its endpoints are set up.
-
- (05/30/2001) gkh
- switched from using spinlock to a semaphore, which fixes lots of problems.
-
- (04/08/2001) gb
- Identify version on module load.
-
- 2001_02_05 gkh
- Fixed buffer overflows bug with the generic serial driver. Thanks to
- Todd Squires <squirest@ct0.com> for fixing this.
-
- (01/10/2001) gkh
- Fixed bug where the generic serial adaptor grabbed _any_ device that was
- offered to it.
-
- (12/12/2000) gkh
- Removed MOD_INC and MOD_DEC from poll and disconnect functions, and
- moved them to the serial_open and serial_close functions.
- Also fixed bug with there not being a MOD_DEC for the generic driver
- (thanks to Gary Brubaker for finding this.)
-
- (11/29/2000) gkh
- Small NULL pointer initialization cleanup which saves a bit of disk image
-
- (11/01/2000) Adam J. Richter
- instead of using idVendor/idProduct pairs, usb serial drivers
- now identify their hardware interest with usb_device_id tables,
- which they usually have anyhow for use with MODULE_DEVICE_TABLE.
-
- (10/05/2000) gkh
- Fixed bug with urb->dev not being set properly, now that the usb
- core needs it.
-
- (09/11/2000) gkh
- Removed DEBUG #ifdefs with call to usb_serial_debug_data
-
- (08/28/2000) gkh
- Added port_lock to port structure.
- Added locks for SMP safeness to generic driver
- Fixed the ability to open a generic device's port more than once.
-
- (07/23/2000) gkh
- Added bulk_out_endpointAddress to port structure.
-
- (07/19/2000) gkh, pberger, and borchers
- Modifications to allow usb-serial drivers to be modules.
-
- (07/03/2000) gkh
- Added more debugging to serial_ioctl call
-
- (06/25/2000) gkh
- Changed generic_write_bulk_callback to not call wake_up_interruptible
- directly, but to have port_softint do it at a safer time.
-
- (06/23/2000) gkh
- Cleaned up debugging statements in a quest to find UHCI timeout bug.
-
- (05/22/2000) gkh
- Changed the makefile, enabling the big CONFIG_USB_SERIAL_SOMTHING to be
- removed from the individual device source files.
-
- (05/03/2000) gkh
- Added the Digi Acceleport driver from Al Borchers and Peter Berger.
-
- (05/02/2000) gkh
- Changed devfs and tty register code to work properly now. This was based on
- the ACM driver changes by Vojtech Pavlik.
-
- (04/27/2000) Ryan VanderBijl
- Put calls to *_paranoia_checks into one function.
-
- (04/23/2000) gkh
- Fixed bug that Randy Dunlap found for Generic devices with no bulk out ports.
- Moved when the startup code printed out the devices that are supported.
-
- (04/19/2000) gkh
- Added driver for ZyXEL omni.net lcd plus ISDN TA
- Made startup info message specify which drivers were compiled in.
-
- (04/03/2000) gkh
- Changed the probe process to remove the module unload races.
- Changed where the tty layer gets initialized to have devfs work nicer.
- Added initial devfs support.
-
- (03/26/2000) gkh
- Split driver up into device specific pieces.
-
- (03/19/2000) gkh
- Fixed oops that could happen when device was removed while a program
- was talking to the device.
- Removed the static urbs and now all urbs are created and destroyed
- dynamically.
- Reworked the internal interface. Now everything is based on the
- usb_serial_port structure instead of the larger usb_serial structure.
- This fixes the bug that a multiport device could not have more than
- one port open at one time.
-
- (03/17/2000) gkh
- Added config option for debugging messages.
- Added patch for keyspan pda from Brian Warner.
-
- (03/06/2000) gkh
- Added the keyspan pda code from Brian Warner <warner@lothar.com>
- Moved a bunch of the port specific stuff into its own structure. This
- is in anticipation of the true multiport devices (there's a bug if you
- try to access more than one port of any multiport device right now)
-
- (02/21/2000) gkh
- Made it so that any serial devices only have to specify which functions
- they want to overload from the generic function calls (great,
- inheritance in C, in a driver, just what I wanted...)
- Added support for set_termios and ioctl function calls. No drivers take
- advantage of this yet.
- Removed the #ifdef MODULE, now there is no module specific code.
- Cleaned up a few comments in usb-serial.h that were wrong (thanks again
- to Miles Lott).
- Small fix to get_free_serial.
-
- (02/14/2000) gkh
- Removed the Belkin and Peracom functionality from the driver due to
- the lack of support from the vendor, and me not wanting people to
- accidenatly buy the device, expecting it to work with Linux.
- Added read_bulk_callback and write_bulk_callback to the type structure
- for the needs of the FTDI and WhiteHEAT driver.
- Changed all reverences to FTDI to FTDI_SIO at the request of Bill
- Ryder.
- Changed the output urb size back to the max endpoint size to make
- the ftdi_sio driver have it easier, and due to the fact that it didn't
- really increase the speed any.
-
- (02/11/2000) gkh
- Added VISOR_FUNCTION_CONSOLE to the visor startup function. This was a
- patch from Miles Lott (milos@insync.net).
- Fixed bug with not restoring the minor range that a device grabs, if
- the startup function fails (thanks Miles for finding this).
-
- (02/05/2000) gkh
- Added initial framework for the Keyspan PDA serial converter so that
- Brian Warner has a place to put his code.
- Made the ezusb specific functions generic enough that different
- devices can use them (whiteheat and keyspan_pda both need them).
- Split out a whole bunch of structure and other stuff to a separate
- usb-serial.h file.
- Made the Visor connection messages a little more understandable, now
- that Miles Lott (milos@insync.net) has gotten the Generic channel to
- work. Also made them always show up in the log file.
-
- (01/25/2000) gkh
- Added initial framework for FTDI serial converter so that Bill Ryder
- has a place to put his code.
- Added the vendor specific info from Handspring. Now we can print out
- informational debug messages as well as understand what is happening.
-
- (01/23/2000) gkh
- Fixed problem of crash when trying to open a port that didn't have a
- device assigned to it. Made the minor node finding a little smarter,
- now it looks to find a continuous space for the new device.
-
- (01/21/2000) gkh
- Fixed bug in visor_startup with patch from Miles Lott (milos@insync.net)
- Fixed get_serial_by_minor which was all messed up for multi port
- devices. Fixed multi port problem for generic devices. Now the number
- of ports is determined by the number of bulk out endpoints for the
- generic device.
-
- (01/19/2000) gkh
- Removed lots of cruft that was around from the old (pre urb) driver
- interface.
- Made the serial_table dynamic. This should save lots of memory when
- the number of minor nodes goes up to 256.
- Added initial support for devices that have more than one port.
- Added more debugging comments for the Visor, and added a needed
- set_configuration call.
-
- (01/17/2000) gkh
- Fixed the WhiteHEAT firmware (my processing tool had a bug)
- and added new debug loader firmware for it.
- Removed the put_char function as it isn't really needed.
- Added visor startup commands as found by the Win98 dump.
-
- (01/13/2000) gkh
- Fixed the vendor id for the generic driver to the one I meant it to be.
-
- (01/12/2000) gkh
- Forget the version numbering...that's pretty useless...
- Made the driver able to be compiled so that the user can select which
- converter they want to use. This allows people who only want the Visor
- support to not pay the memory size price of the WhiteHEAT.
- Fixed bug where the generic driver (idVendor=0000 and idProduct=0000)
- grabbed the root hub. Not good.
-
- version 0.4.0 (01/10/2000) gkh
- Added whiteheat.h containing the firmware for the ConnectTech WhiteHEAT
- device. Added startup function to allow firmware to be downloaded to
- a device if it needs to be.
- Added firmware download logic to the WhiteHEAT device.
- Started to add #defines to split up the different drivers for potential
- configuration option.
-
- version 0.3.1 (12/30/99) gkh
- Fixed problems with urb for bulk out.
- Added initial support for multiple sets of endpoints. This enables
- the Handspring Visor to be attached successfully. Only the first
- bulk in / bulk out endpoint pair is being used right now.
-
- version 0.3.0 (12/27/99) gkh
- Added initial support for the Handspring Visor based on a patch from
- Miles Lott (milos@sneety.insync.net)
- Cleaned up the code a bunch and converted over to using urbs only.
-
- version 0.2.3 (12/21/99) gkh
- Added initial support for the Connect Tech WhiteHEAT converter.
- Incremented the number of ports in expectation of getting the
- WhiteHEAT to work properly (4 ports per connection).
- Added notification on insertion and removal of what port the
- device is/was connected to (and what kind of device it was).
-
- version 0.2.2 (12/16/99) gkh
- Changed major number to the new allocated number. We're legal now!
-
- version 0.2.1 (12/14/99) gkh
- Fixed bug that happens when device node is opened when there isn't a
- device attached to it. Thanks to marek@webdesign.no for noticing this.
-
- version 0.2.0 (11/10/99) gkh
- Split up internals to make it easier to add different types of serial
- converters to the code.
- Added a "generic" driver that gets it's vendor and product id
- from when the module is loaded. Thanks to David E. Nelson (dnelson@jump.net)
- for the idea and sample code (from the usb scanner driver.)
- Cleared up any licensing questions by releasing it under the GNU GPL.
-
- version 0.1.2 (10/25/99) gkh
- Fixed bug in detecting device.
-
- version 0.1.1 (10/05/99) gkh
- Changed the major number to not conflict with anything else.
-
- version 0.1 (09/28/99) gkh
- Can recognize the two different devices and start up a read from
- device when asked to. Writes also work. No control signals yet, this
- all is vendor specific data (i.e. no spec), also no control for
- different baud rates or other bit settings.
- Currently we are using the same devid as the acm driver. This needs
- to change.
-
------------------------------------------------------------------------
-visor.c Change Log comments:
-
- (06/03/2003) Judd Montgomery <judd at jpilot.org>
- Added support for module parameter options for untested/unknown
- devices.
-
- (03/09/2003) gkh
- Added support for the Sony Clie NZ90V device. Thanks to Martin Brachtl
- <brachtl@redgrep.cz> for the information.
-
- (03/05/2003) gkh
- Think Treo support is now working.
-
- (04/03/2002) gkh
- Added support for the Sony OS 4.1 devices. Thanks to Hiroyuki ARAKI
- <hiro@zob.ne.jp> for the information.
-
- (03/27/2002) gkh
- Removed assumptions that port->tty was always valid (is not true
- for usb serial console devices.)
-
- (03/23/2002) gkh
- Added support for the Palm i705 device, thanks to Thomas Riemer
- <tom@netmech.com> for the information.
-
- (03/21/2002) gkh
- Added support for the Palm m130 device, thanks to Udo Eisenbarth
- <udo.eisenbarth@web.de> for the information.
-
- (02/27/2002) gkh
- Reworked the urb handling logic. We have no more pool, but dynamically
- allocate the urb and the transfer buffer on the fly. In testing this
- does not incure any measurable overhead. This also relies on the fact
- that we have proper reference counting logic for urbs.
-
- (02/21/2002) SilaS
- Added initial support for the Palm m515 devices.
-
- (02/14/2002) gkh
- Added support for the Clie S-360 device.
-
- (12/18/2001) gkh
- Added better Clie support for 3.5 devices. Thanks to Geoffrey Levand
- for the patch.
-
- (11/11/2001) gkh
- Added support for the m125 devices, and added check to prevent oopses
- for Clié devices that lie about the number of ports they have.
-
- (08/30/2001) gkh
- Added support for the Clie devices, both the 3.5 and 4.0 os versions.
- Many thanks to Daniel Burke, and Bryan Payne for helping with this.
-
- (08/23/2001) gkh
- fixed a few potential bugs pointed out by Oliver Neukum.
-
- (05/30/2001) gkh
- switched from using spinlock to a semaphore, which fixes lots of problems.
-
- (05/28/2000) gkh
- Added initial support for the Palm m500 and Palm m505 devices.
-
- (04/08/2001) gb
- Identify version on module load.
-
- (01/21/2000) gkh
- Added write_room and chars_in_buffer, as they were previously using the
- generic driver versions which is all wrong now that we are using an urb
- pool. Thanks to Wolfgang Grandegger for pointing this out to me.
- Removed count assignment in the write function, which was not needed anymore
- either. Thanks to Al Borchers for pointing this out.
-
- (12/12/2000) gkh
- Moved MOD_DEC to end of visor_close to be nicer, as the final write
- message can sleep.
-
- (11/12/2000) gkh
- Fixed bug with data being dropped on the floor by forcing tty->low_latency
- to be on. Hopefully this fixes the OHCI issue!
-
- (11/01/2000) Adam J. Richter
- usb_device_id table support
-
- (10/05/2000) gkh
- Fixed bug with urb->dev not being set properly, now that the usb
- core needs it.
-
- (09/11/2000) gkh
- Got rid of always calling kmalloc for every urb we wrote out to the
- device.
- Added visor_read_callback so we can keep track of bytes in and out for
- those people who like to know the speed of their device.
- Removed DEBUG #ifdefs with call to usb_serial_debug_data
-
- (09/06/2000) gkh
- Fixed oops in visor_exit. Need to uncomment usb_unlink_urb call _after_
- the host controller drivers set urb->dev = NULL when the urb is finished.
-
- (08/28/2000) gkh
- Added locks for SMP safeness.
-
- (08/08/2000) gkh
- Fixed endian problem in visor_startup.
- Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- than once.
-
- (07/23/2000) gkh
- Added pool of write urbs to speed up transfers to the visor.
-
- (07/19/2000) gkh
- Added module_init and module_exit functions to handle the fact that this
- driver is a loadable module now.
-
- (07/03/2000) gkh
- Added visor_set_ioctl and visor_set_termios functions (they don't do much
- of anything, but are good for debugging.)
-
- (06/25/2000) gkh
- Fixed bug in visor_unthrottle that should help with the disconnect in PPP
- bug that people have been reporting.
-
- (06/23/2000) gkh
- Cleaned up debugging statements in a quest to find UHCI timeout bug.
-
- (04/27/2000) Ryan VanderBijl
- Fixed memory leak in visor_close
-
- (03/26/2000) gkh
- Split driver up into device specific pieces.
-
------------------------------------------------------------------------
-pl2303.c Change Log comments:
-
- 2002_Mar_26 gkh
- allowed driver to work properly if there is no tty assigned to a port
- (this happens for serial console devices.)
-
- 2001_Oct_06 gkh
- Added RTS and DTR line control. Thanks to joe@bndlg.de for parts of it.
-
- 2001_Sep_19 gkh
- Added break support.
-
- 2001_Aug_30 gkh
- fixed oops in write_bulk_callback.
-
- 2001_Aug_28 gkh
- reworked buffer logic to be like other usb-serial drivers. Hopefully
- removing some reported problems.
-
- 2001_Jun_06 gkh
- finished porting to 2.4 format.
-
-
------------------------------------------------------------------------
-io_edgeport.c Change Log comments:
-
- 2003_04_03 al borchers
- - fixed a bug (that shows up with dosemu) where the tty struct is
- used in a callback after it has been freed
-
- 2.3 2002_03_08 greg kroah-hartman
- - fixed bug when multiple devices were attached at the same time.
-
- 2.2 2001_11_14 greg kroah-hartman
- - fixed bug in edge_close that kept the port from being used more
- than once.
- - fixed memory leak on device removal.
- - fixed potential double free of memory when command urb submitting
- failed.
- - other small cleanups when the device is removed
-
- 2.1 2001_07_09 greg kroah-hartman
- - added support for TIOCMBIS and TIOCMBIC.
-
- (04/08/2001) gb
- - Identify version on module load.
-
- 2.0 2001_03_05 greg kroah-hartman
- - reworked entire driver to fit properly in with the other usb-serial
- drivers. Occasional oopses still happen, but it's a good start.
-
- 1.2.3 (02/23/2001) greg kroah-hartman
- - changed device table to work properly for 2.4.x final format.
- - fixed problem with dropping data at high data rates.
-
- 1.2.2 (11/27/2000) greg kroah-hartman
- - cleaned up more NTisms.
- - Added device table for 2.4.0-test11
-
- 1.2.1 (11/08/2000) greg kroah-hartman
- - Started to clean up NTisms.
- - Fixed problem with dev field of urb for kernels >= 2.4.0-test9
-
- 1.2 (10/17/2000) David Iacovelli
- Remove all EPIC code and GPL source
- Fix RELEVANT_IFLAG macro to include flow control
- changes port configuration changes.
- Fix redefinition of SERIAL_MAGIC
- Change all timeout values to 5 seconds
- Tried to fix the UHCI multiple urb submission, but failed miserably.
- it seems to work fine with OHCI.
- ( Greg take a look at the #if 0 at end of WriteCmdUsb() we must
- find a way to work arount this UHCI bug )
-
- 1.1 (10/11/2000) David Iacovelli
- Fix XON/XOFF flow control to support both IXON and IXOFF
-
- 0.9.27 (06/30/2000) David Iacovelli
- Added transmit queue and now allocate urb for command writes.
-
- 0.9.26 (06/29/2000) David Iacovelli
- Add support for 80251 based edgeport
-
- 0.9.25 (06/27/2000) David Iacovelli
- Do not close the port if it has multiple opens.
-
- 0.9.24 (05/26/2000) David Iacovelli
- Add IOCTLs to support RXTX and JAVA POS
- and first cut at running BlackBox Demo
-
- 0.9.23 (05/24/2000) David Iacovelli
- Add IOCTLs to support RXTX and JAVA POS
-
- 0.9.22 (05/23/2000) David Iacovelli
- fixed bug in enumeration. If epconfig turns on mapping by
- path after a device is already plugged in, we now update
- the mapping correctly
-
- 0.9.21 (05/16/2000) David Iacovelli
- Added BlockUntilChaseResp() to also wait for txcredits
- Updated the way we allocate and handle write URBs
- Add debug code to dump buffers
-
- 0.9.20 (05/01/2000) David Iacovelli
- change driver to use usb/tts/
-
- 0.9.19 (05/01/2000) David Iacovelli
- Update code to compile if DEBUG is off
-
- 0.9.18 (04/28/2000) David Iacovelli
- cleanup and test tty_register with devfs
-
- 0.9.17 (04/27/2000) greg kroah-hartman
- changed tty_register around to be like the way it
- was before, but now it works properly with devfs.
-
- 0.9.16 (04/26/2000) david iacovelli
- Fixed bug in GetProductInfo()
-
- 0.9.15 (04/25/2000) david iacovelli
- Updated enumeration
-
- 0.9.14 (04/24/2000) david iacovelli
- Removed all config/status IOCTLS and
- converted to using /proc/edgeport
- still playing with devfs
-
- 0.9.13 (04/24/2000) david iacovelli
- Removed configuration based on ttyUSB0
- Added support for configuration using /prod/edgeport
- first attempt at using devfs (not working yet!)
- Added IOCTL to GetProductInfo()
- Added support for custom baud rates
- Add support for random port numbers
-
- 0.9.12 (04/18/2000) david iacovelli
- added additional configuration IOCTLs
- use ttyUSB0 for configuration
-
- 0.9.11 (04/17/2000) greg kroah-hartman
- fixed module initialization race conditions.
- made all urbs dynamically allocated.
- made driver devfs compatible. now it only registers the tty device
- when the device is actually plugged in.
-
- 0.9.10 (04/13/2000) greg kroah-hartman
- added proc interface framework.
-
- 0.9.9 (04/13/2000) david iacovelli
- added enumeration code and ioctls to configure the device
-
- 0.9.8 (04/12/2000) david iacovelli
- Change interrupt read start when device is plugged in
- and stop when device is removed
- process interrupt reads when all ports are closed
- (keep value of rxBytesAvail consistent with the edgeport)
- set the USB_BULK_QUEUE flag so that we can shove a bunch
- of urbs at once down the pipe
-
- 0.9.7 (04/10/2000) david iacovelli
- start to add enumeration code.
- generate serial number for epic devices
- add support for kdb
-
- 0.9.6 (03/30/2000) david iacovelli
- add IOCTL to get string, manufacture, and boot descriptors
-
- 0.9.5 (03/14/2000) greg kroah-hartman
- more error checking added to SerialOpen to try to fix UHCI open problem
-
- 0.9.4 (03/09/2000) greg kroah-hartman
- added more error checking to handle oops when data is hanging
- around and tty is abruptly closed.
-
- 0.9.3 (03/09/2000) david iacovelli
- Add epic support for xon/xoff chars
- play with performance
-
- 0.9.2 (03/08/2000) greg kroah-hartman
- changed most "info" calls to "dbg"
- implemented flow control properly in the termios call
-
- 0.9.1 (03/08/2000) david iacovelli
- added EPIC support
- enabled bootloader update
-
- 0.9 (03/08/2000) greg kroah-hartman
- Release to IO networks.
- Integrated changes that David made
- made getting urbs for writing SMP safe
-
- 0.8 (03/07/2000) greg kroah-hartman
- Release to IO networks.
- Fixed problems that were seen in code by David.
- Now both Edgeport/4 and Edgeport/2 works properly.
- Changed most of the functions to use port instead of serial.
-
- 0.7 (02/27/2000) greg kroah-hartman
- Milestone 3 release.
- Release to IO Networks
- ioctl for waiting on line change implemented.
- ioctl for getting statistics implemented.
- multiport support working.
- lsr and msr registers are now handled properly.
- change break now hooked up and working.
- support for all known Edgeport devices.
-
- 0.6 (02/22/2000) greg kroah-hartman
- Release to IO networks.
- CHASE is implemented correctly when port is closed.
- SerialOpen now blocks correctly until port is fully opened.
-
- 0.5 (02/20/2000) greg kroah-hartman
- Release to IO networks.
- Known problems:
- modem status register changes are not sent on to the user
- CHASE is not implemented when the port is closed.
-
- 0.4 (02/16/2000) greg kroah-hartman
- Second cut at the CeBit demo.
- Doesn't leak memory on every write to the port
- Still small leaks on startup.
- Added support for Edgeport/2 and Edgeport/8
-
- 0.3 (02/15/2000) greg kroah-hartman
- CeBit demo release.
- Force the line settings to 4800, 8, 1, e for the demo.
- Warning! This version leaks memory like crazy!
-
- 0.2 (01/30/2000) greg kroah-hartman
- Milestone 1 release.
- Device is found by USB subsystem, enumerated, firmware is downloaded
- and the descriptors are printed to the debug log, config is set, and
- green light starts to blink. Open port works, and data can be sent
- and received at the default settings of the UART. Loopback connector
- and debug log confirms this.
-
- 0.1 (01/23/2000) greg kroah-hartman
- Initial release to help IO Networks try to set up their test system.
- Edgeport4 is recognized, firmware is downloaded, config is set so
- device blinks green light every 3 sec. Port is bound, but opening,
- closing, and sending data do not work properly.
-
-
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b43d07df4c44..123bf9155339 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -52,7 +52,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/* Vendor and Product ID */
#define AIRCABLE_VID 0x16CA
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 18e875b92e00..69328dcfd91a 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -37,7 +37,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
-static int debug;
+static bool debug;
/*
* Version information
*/
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index d6921fa1403c..29ffeb6279c7 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -20,50 +20,7 @@
* TODO:
* -- Add true modem contol line query capability. Currently we track the
* states reported by the interrupt and the states we request.
- * -- Add error reporting back to application for UART error conditions.
- * Just point me at how to implement this and I'll do it. I've put the
- * framework in, but haven't analyzed the "tty_flip" interface yet.
* -- Add support for flush commands
- * -- Add everything that is missing :)
- *
- * 27-Nov-2001 gkh
- * compressed all the differnent device entries into 1.
- *
- * 30-May-2001 gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * 08-Apr-2001 gb
- * - Identify version on module load.
- *
- * 12-Mar-2001 gkh
- * - Added support for the GoHubs GO-COM232 device which is the same as the
- * Peracom device.
- *
- * 06-Nov-2000 gkh
- * - Added support for the old Belkin and Peracom devices.
- * - Made the port able to be opened multiple times.
- * - Added some defaults incase the line settings are things these devices
- * can't support.
- *
- * 18-Oct-2000 William Greathouse
- * Released into the wild (linux-usb-devel)
- *
- * 17-Oct-2000 William Greathouse
- * Add code to recognize firmware version and set hardware flow control
- * appropriately. Belkin states that firmware prior to 3.05 does not
- * operate correctly in hardware handshake mode. I have verified this
- * on firmware 2.05 -- for both RTS and DTR input flow control, the control
- * line is not reset. The test performed by the Belkin Win* driver is
- * to enable hardware flow control for firmware 2.06 or greater and
- * for 1.00 or prior. I am only enabling for 2.06 or greater.
- *
- * 12-Oct-2000 William Greathouse
- * First cut at supporting Belkin USB Serial Adapter F5U103
- * I did not have a copy of the original work to support this
- * adapter, so pardon any stupid mistakes. All of the information
- * I am using to write this driver was acquired by using a modified
- * UsbSnoop on Windows2000 and from examining the other USB drivers.
*/
#include <linux/kernel.h>
@@ -80,7 +37,7 @@
#include <linux/usb/serial.h>
#include "belkin_sa.h"
-static int debug;
+static bool debug;
/*
* Version Information
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 6ae1c0688b5e..5e53cc59e652 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -70,7 +70,7 @@
#define CH341_NBREAK_BITS_REG2 0x40
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
@@ -335,13 +335,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
goto out;
dbg("%s - submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (r) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, r);
ch341_close(port);
- return -EPROTO;
+ goto out;
}
r = usb_serial_generic_open(tty, port);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fd67cc53545b..fba1147ed916 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -49,7 +49,7 @@ static void cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_startup(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
@@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+ { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
@@ -280,7 +281,10 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
dbg("%s - Unable to send config request, "
"request=0x%x size=%d result=%d\n",
__func__, request, size, result);
- return -EPROTO;
+ if (result > 0)
+ result = -EPROTO;
+
+ return result;
}
return 0;
@@ -331,7 +335,10 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
dbg("%s - Unable to send request, "
"request=0x%x size=%d result=%d\n",
__func__, request, size, result);
- return -EPROTO;
+ if (result > 0)
+ result = -EPROTO;
+
+ return result;
}
return 0;
@@ -395,10 +402,11 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
- if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
- dev_err(&port->dev, "%s - Unable to enable UART\n",
- __func__);
- return -EPROTO;
+ result = cp210x_set_config_single(port, CP210X_IFC_ENABLE,
+ UART_ENABLE);
+ if (result) {
+ dev_err(&port->dev, "%s - Unable to enable UART\n", __func__);
+ return result;
}
result = usb_serial_generic_open(tty, port);
@@ -520,18 +528,13 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
cflag |= PARENB;
break;
case BITS_PARITY_MARK:
- dbg("%s - parity = MARK (not supported, disabling parity)",
- __func__);
- cflag &= ~PARENB;
- bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ dbg("%s - parity = MARK", __func__);
+ cflag |= (PARENB|PARODD|CMSPAR);
break;
case BITS_PARITY_SPACE:
- dbg("%s - parity = SPACE (not supported, disabling parity)",
- __func__);
- cflag &= ~PARENB;
- bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ dbg("%s - parity = SPACE", __func__);
+ cflag &= ~PARODD;
+ cflag |= (PARENB|CMSPAR);
break;
default:
dbg("%s - Unknown parity mode, disabling parity", __func__);
@@ -588,7 +591,6 @@ static void cp210x_set_termios(struct tty_struct *tty,
if (!tty)
return;
- tty->termios->c_cflag &= ~CMSPAR;
cflag = tty->termios->c_cflag;
old_cflag = old_termios->c_cflag;
baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
@@ -643,16 +645,27 @@ static void cp210x_set_termios(struct tty_struct *tty,
"not supported by device\n");
}
- if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) {
+ if ((cflag & (PARENB|PARODD|CMSPAR)) !=
+ (old_cflag & (PARENB|PARODD|CMSPAR))) {
cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_PARITY_MASK;
if (cflag & PARENB) {
- if (cflag & PARODD) {
- bits |= BITS_PARITY_ODD;
- dbg("%s - parity = ODD", __func__);
+ if (cflag & CMSPAR) {
+ if (cflag & PARODD) {
+ bits |= BITS_PARITY_MARK;
+ dbg("%s - parity = MARK", __func__);
+ } else {
+ bits |= BITS_PARITY_SPACE;
+ dbg("%s - parity = SPACE", __func__);
+ }
} else {
- bits |= BITS_PARITY_EVEN;
- dbg("%s - parity = EVEN", __func__);
+ if (cflag & PARODD) {
+ bits |= BITS_PARITY_ODD;
+ dbg("%s - parity = ODD", __func__);
+ } else {
+ bits |= BITS_PARITY_EVEN;
+ dbg("%s - parity = EVEN", __func__);
+ }
}
}
if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index f744ab7a3b19..6bc3802a581a 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -43,7 +43,7 @@
#define CYBERJACK_LOCAL_BUF_SIZE 32
-static int debug;
+static bool debug;
/*
* Version Information
@@ -138,7 +138,6 @@ static int cyberjack_startup(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
int result;
- serial->port[i]->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(serial->port[i]->interrupt_in_urb,
GFP_KERNEL);
if (result)
@@ -208,7 +207,6 @@ static void cyberjack_close(struct usb_serial_port *port)
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
@@ -221,22 +219,18 @@ static int cyberjack_write(struct tty_struct *tty,
return 0;
}
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
+ if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dbg("%s - already writing", __func__);
return 0;
}
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
spin_lock_irqsave(&priv->lock, flags);
if (count+priv->wrfilled > sizeof(priv->wrbuf)) {
/* To much data for buffer. Reset buffer. */
priv->wrfilled = 0;
- port->write_urb_busy = 0;
spin_unlock_irqrestore(&priv->lock, flags);
+ set_bit(0, &port->write_urbs_free);
return 0;
}
@@ -265,13 +259,7 @@ static int cyberjack_write(struct tty_struct *tty,
priv->wrsent = length;
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, length,
- ((serial->type->write_bulk_callback) ?
- serial->type->write_bulk_callback :
- cyberjack_write_bulk_callback),
- port);
+ port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -283,7 +271,7 @@ static int cyberjack_write(struct tty_struct *tty,
priv->wrfilled = 0;
priv->wrsent = 0;
spin_unlock_irqrestore(&priv->lock, flags);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
return 0;
}
@@ -351,7 +339,6 @@ static void cyberjack_read_int_callback(struct urb *urb)
spin_unlock(&priv->lock);
if (!old_rdtodo) {
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting "
@@ -362,7 +349,6 @@ static void cyberjack_read_int_callback(struct urb *urb)
}
resubmit:
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
@@ -415,7 +401,6 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
/* Continue to read if we have still urbs to do. */
if (todo /* || (urb->actual_length==port->bulk_in_endpointAddress)*/) {
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting read "
@@ -432,7 +417,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
@@ -455,13 +440,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
priv->wrsent += length;
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, length,
- ((port->serial->type->write_bulk_callback) ?
- port->serial->type->write_bulk_callback :
- cyberjack_write_bulk_callback),
- port);
+ port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index d9906eb9d16a..3bdeafa29c24 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -16,32 +16,6 @@
*
* See http://geocities.com/i0xox0i for information on this driver and the
* earthmate usb device.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 4-29-2005
- * Fixed problem where setting or retreiving the serial config would fail
- * with EPIPE. Removed CRTS toggling so the driver behaves more like
- * other usbserial adapters. Issued new interval of 1ms instead of the
- * default 10ms. As a result, transfer speed has been substantially
- * increased from avg. 850bps to avg. 3300bps. initial termios has also
- * been modified. Cleaned up code and formatting issues so it is more
- * readable. Replaced the C++ style comments.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 12-15-2004
- * Incorporated write buffering from pl2303 driver. Fixed bug with line
- * handling so both lines are raised in cypress_open. (was dropping rts)
- * Various code cleanups made as well along with other misc bug fixes.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 04-10-2004
- * Driver modified to support dynamic line settings. Various improvements
- * and features.
- *
- * Neil Whelchel
- * 10-2003
- * Driver first released.
- *
*/
/* Thanks to Neil Whelchel for writing the first cypress m8 implementation
@@ -72,10 +46,10 @@
#include "cypress_m8.h"
-static int debug;
-static int stats;
+static bool debug;
+static bool stats;
static int interval;
-static int unstable_bauds;
+static bool unstable_bauds;
/*
* Version Information
@@ -1162,8 +1136,6 @@ static void cypress_unthrottle(struct tty_struct *tty)
return;
if (actually_throttled) {
- port->interrupt_in_urb->dev = port->serial->dev;
-
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb, "
@@ -1352,7 +1324,6 @@ static void cypress_write_int_callback(struct urb *urb)
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
port->interrupt_out_urb->transfer_buffer_length = 1;
- port->interrupt_out_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (!result)
return;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e92cbefc0f88..b23bebd721a1 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -13,222 +13,6 @@
*
* Peter Berger (pberger@brimson.com)
* Al Borchers (borchers@steinerpoint.com)
-*
-* (12/03/2001) gkh
-* switched to using port->port.count instead of private version.
-* Removed port->active
-*
-* (04/08/2001) gb
-* Identify version on module load.
-*
-* (11/01/2000) Adam J. Richter
-* usb_device_id table support
-*
-* (11/01/2000) pberger and borchers
-* -- Turned off the USB_DISABLE_SPD flag for write bulk urbs--it caused
-* USB 4 ports to hang on startup.
-* -- Serialized access to write urbs by adding the dp_write_urb_in_use
-* flag; otherwise, the driver caused SMP system hangs. Watching the
-* urb status is not sufficient.
-*
-* (10/05/2000) gkh
-* -- Fixed bug with urb->dev not being set properly, now that the usb
-* core needs it.
-*
-* (8/8/2000) pberger and borchers
-* -- Fixed close so that
-* - it can timeout while waiting for transmit idle, if needed;
-* - it ignores interrupts when flushing the port, turning
-* of modem signalling, and so on;
-* - it waits for the flush to really complete before returning.
-* -- Read_bulk_callback and write_bulk_callback check for a closed
-* port before using the tty struct or writing to the port.
-* -- The two changes above fix the oops caused by interrupted closes.
-* -- Added interruptible args to write_oob_command and set_modem_signals
-* and added a timeout arg to transmit_idle; needed for fixes to
-* close.
-* -- Added code for rx_throttle and rx_unthrottle so that input flow
-* control works.
-* -- Added code to set overrun, parity, framing, and break errors
-* (untested).
-* -- Set USB_DISABLE_SPD flag for write bulk urbs, so no 0 length
-* bulk writes are done. These hung the Digi USB device. The
-* 0 length bulk writes were a new feature of usb-uhci added in
-* the 2.4.0-test6 kernels.
-* -- Fixed mod inc race in open; do mod inc before sleeping to wait
-* for a close to finish.
-*
-* (7/31/2000) pberger
-* -- Fixed bugs with hardware handshaking:
-* - Added code to set/clear tty->hw_stopped in digi_read_oob_callback()
-* and digi_set_termios()
-* -- Added code in digi_set_termios() to
-* - add conditional in code handling transition from B0 to only
-* set RTS if RTS/CTS flow control is either not in use or if
-* the port is not currently throttled.
-* - handle turning off CRTSCTS.
-*
-* (7/30/2000) borchers
-* -- Added support for more than one Digi USB device by moving
-* globals to a private structure in the pointed to from the
-* usb_serial structure.
-* -- Moved the modem change and transmit idle wait queues into
-* the port private structure, so each port has its own queue
-* rather than sharing global queues.
-* -- Added support for break signals.
-*
-* (7/25/2000) pberger
-* -- Added USB-2 support. Note: the USB-2 supports 3 devices: two
-* serial and a parallel port. The parallel port is implemented
-* as a serial-to-parallel converter. That is, the driver actually
-* presents all three USB-2 interfaces as serial ports, but the third
-* one physically connects to a parallel device. Thus, for example,
-* one could plug a parallel printer into the USB-2's third port,
-* but from the kernel's (and userland's) point of view what's
-* actually out there is a serial device.
-*
-* (7/15/2000) borchers
-* -- Fixed race in open when a close is in progress.
-* -- Keep count of opens and dec the module use count for each
-* outstanding open when shutdown is called (on disconnect).
-* -- Fixed sanity checks in read_bulk_callback and write_bulk_callback
-* so pointers are checked before use.
-* -- Split read bulk callback into in band and out of band
-* callbacks, and no longer restart read chains if there is
-* a status error or a sanity error. This fixed the seg
-* faults and other errors we used to get on disconnect.
-* -- Port->active is once again a flag as usb-serial intended it
-* to be, not a count. Since it was only a char it would
-* have been limited to 256 simultaneous opens. Now the open
-* count is kept in the port private structure in dp_open_count.
-* -- Added code for modularization of the digi_acceleport driver.
-*
-* (6/27/2000) pberger and borchers
-* -- Zeroed out sync field in the wakeup_task before first use;
-* otherwise the uninitialized value might prevent the task from
-* being scheduled.
-* -- Initialized ret value to 0 in write_bulk_callback, otherwise
-* the uninitialized value could cause a spurious debugging message.
-*
-* (6/22/2000) pberger and borchers
-* -- Made cond_wait_... inline--apparently on SPARC the flags arg
-* to spin_lock_irqsave cannot be passed to another function
-* to call spin_unlock_irqrestore. Thanks to Pauline Middelink.
-* -- In digi_set_modem_signals the inner nested spin locks use just
-* spin_lock() rather than spin_lock_irqsave(). The old code
-* mistakenly left interrupts off. Thanks to Pauline Middelink.
-* -- copy_from_user (which can sleep) is no longer called while a
-* spinlock is held. We copy to a local buffer before getting
-* the spinlock--don't like the extra copy but the code is simpler.
-* -- Printk and dbg are no longer called while a spin lock is held.
-*
-* (6/4/2000) pberger and borchers
-* -- Replaced separate calls to spin_unlock_irqrestore and
-* interruptible_sleep_on_timeout with a new function
-* cond_wait_interruptible_timeout_irqrestore. This eliminates
-* the race condition where the wake up could happen after
-* the unlock and before the sleep.
-* -- Close now waits for output to drain.
-* -- Open waits until any close in progress is finished.
-* -- All out of band responses are now processed, not just the
-* first in a USB packet.
-* -- Fixed a bug that prevented the driver from working when the
-* first Digi port was not the first USB serial port--the driver
-* was mistakenly using the external USB serial port number to
-* try to index into its internal ports.
-* -- Fixed an SMP bug -- write_bulk_callback is called directly from
-* an interrupt, so spin_lock_irqsave/spin_unlock_irqrestore are
-* needed for locks outside write_bulk_callback that are also
-* acquired by write_bulk_callback to prevent deadlocks.
-* -- Fixed support for select() by making digi_chars_in_buffer()
-* return 256 when -EINPROGRESS is set, as the line discipline
-* code in n_tty.c expects.
-* -- Fixed an include file ordering problem that prevented debugging
-* messages from working.
-* -- Fixed an intermittent timeout problem that caused writes to
-* sometimes get stuck on some machines on some kernels. It turns
-* out in these circumstances write_chan() (in n_tty.c) was
-* asleep waiting for our wakeup call. Even though we call
-* wake_up_interruptible() in digi_write_bulk_callback(), there is
-* a race condition that could cause the wakeup to fail: if our
-* wake_up_interruptible() call occurs between the time that our
-* driver write routine finishes and write_chan() sets current->state
-* to TASK_INTERRUPTIBLE, the effect of our wakeup setting the state
-* to TASK_RUNNING will be lost and write_chan's subsequent call to
-* schedule() will never return (unless it catches a signal).
-* This race condition occurs because write_bulk_callback() (and thus
-* the wakeup) are called asynchronously from an interrupt, rather than
-* from the scheduler. We can avoid the race by calling the wakeup
-* from the scheduler queue and that's our fix: Now, at the end of
-* write_bulk_callback() we queue up a wakeup call on the scheduler
-* task queue. We still also invoke the wakeup directly since that
-* squeezes a bit more performance out of the driver, and any lost
-* race conditions will get cleaned up at the next scheduler run.
-*
-* NOTE: The problem also goes away if you comment out
-* the two code lines in write_chan() where current->state
-* is set to TASK_RUNNING just before calling driver.write() and to
-* TASK_INTERRUPTIBLE immediately afterwards. This is why the
-* problem did not show up with the 2.2 kernels -- they do not
-* include that code.
-*
-* (5/16/2000) pberger and borchers
-* -- Added timeouts to sleeps, to defend against lost wake ups.
-* -- Handle transition to/from B0 baud rate in digi_set_termios.
-*
-* (5/13/2000) pberger and borchers
-* -- All commands now sent on out of band port, using
-* digi_write_oob_command.
-* -- Get modem control signals whenever they change, support TIOCMGET/
-* SET/BIS/BIC ioctls.
-* -- digi_set_termios now supports parity, word size, stop bits, and
-* receive enable.
-* -- Cleaned up open and close, use digi_set_termios and
-* digi_write_oob_command to set port parameters.
-* -- Added digi_startup_device to start read chains on all ports.
-* -- Write buffer is only used when count==1, to be sure put_char can
-* write a char (unless the buffer is full).
-*
-* (5/10/2000) pberger and borchers
-* -- Added MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT calls on open/close.
-* -- Fixed problem where the first incoming character is lost on
-* port opens after the first close on that port. Now we keep
-* the read_urb chain open until shutdown.
-* -- Added more port conditioning calls in digi_open and digi_close.
-* -- Convert port->active to a use count so that we can deal with multiple
-* opens and closes properly.
-* -- Fixed some problems with the locking code.
-*
-* (5/3/2000) pberger and borchers
-* -- First alpha version of the driver--many known limitations and bugs.
-*
-*
-* Locking and SMP
-*
-* - Each port, including the out-of-band port, has a lock used to
-* serialize all access to the port's private structure.
-* - The port lock is also used to serialize all writes and access to
-* the port's URB.
-* - The port lock is also used for the port write_wait condition
-* variable. Holding the port lock will prevent a wake up on the
-* port's write_wait; this can be used with cond_wait_... to be sure
-* the wake up is not lost in a race when dropping the lock and
-* sleeping waiting for the wakeup.
-* - digi_write() does not sleep, since it is sometimes called on
-* interrupt time.
-* - digi_write_bulk_callback() and digi_read_bulk_callback() are
-* called directly from interrupts. Hence spin_lock_irqsave()
-* and spin_unlock_irqrestore() are used in the rest of the code
-* for any locks they acquire.
-* - digi_write_bulk_callback() gets the port lock before waking up
-* processes sleeping on the port write_wait. It also schedules
-* wake ups so they happen from the scheduler, because the tty
-* system can miss wake ups from interrupts.
-* - All sleeps use a timeout of DIGI_RETRY_TIMEOUT before looping to
-* recheck the condition they are sleeping on. This is defensive,
-* in case a wake up is lost.
-* - Following Documentation/DocBook/kernel-locking.tmpl no spin locks
-* are held when calling copy_to/from_user or printk.
*/
#include <linux/kernel.h>
@@ -467,7 +251,7 @@ static int digi_read_oob_callback(struct urb *urb);
/* Statics */
-static int debug;
+static bool debug;
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
@@ -654,7 +438,6 @@ static int digi_write_oob_command(struct usb_serial_port *port,
len &= ~3;
memcpy(oob_port->write_urb->transfer_buffer, buf, len);
oob_port->write_urb->transfer_buffer_length = len;
- oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
@@ -732,7 +515,6 @@ static int digi_write_inb_command(struct usb_serial_port *port,
memcpy(data, buf, len);
port->write_urb->transfer_buffer_length = len;
}
- port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
@@ -803,7 +585,6 @@ static int digi_set_modem_signals(struct usb_serial_port *port,
data[7] = 0;
oob_port->write_urb->transfer_buffer_length = 8;
- oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
@@ -899,10 +680,8 @@ static void digi_rx_unthrottle(struct tty_struct *tty)
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* restart read chain */
- if (priv->dp_throttle_restart) {
- port->read_urb->dev = port->serial->dev;
+ if (priv->dp_throttle_restart)
ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- }
/* turn throttle off */
priv->dp_throttled = 0;
@@ -1195,7 +974,6 @@ static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
}
port->write_urb->transfer_buffer_length = data_len+2;
- port->write_urb->dev = port->serial->dev;
*data++ = DIGI_CMD_SEND_DATA;
*data++ = data_len;
@@ -1271,7 +1049,6 @@ static void digi_write_bulk_callback(struct urb *urb)
= (unsigned char)priv->dp_out_buf_len;
port->write_urb->transfer_buffer_length =
priv->dp_out_buf_len + 2;
- port->write_urb->dev = serial->dev;
memcpy(port->write_urb->transfer_buffer + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -1473,7 +1250,6 @@ static int digi_startup_device(struct usb_serial *serial)
/* set USB_DISABLE_SPD flag for write bulk urbs */
for (i = 0; i < serial->type->num_ports + 1; i++) {
port = serial->port[i];
- port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(&port->dev,
@@ -1616,7 +1392,6 @@ static void digi_read_bulk_callback(struct urb *urb)
}
/* continue read */
- urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 504b5585ea45..aced6817bf95 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -28,7 +28,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/*
* Version Information
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index bd4298bb6750..01b6404df395 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -55,7 +55,7 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
-static int debug;
+static bool debug;
static __u16 vendor = FTDI_VID;
static __u16 product;
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -2104,6 +2105,9 @@ static void ftdi_set_termios(struct tty_struct *tty,
cflag = termios->c_cflag;
+ if (old_termios == 0)
+ goto no_skip;
+
if (old_termios->c_cflag == termios->c_cflag
&& old_termios->c_ispeed == termios->c_ispeed
&& old_termios->c_ospeed == termios->c_ospeed)
@@ -2117,6 +2121,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
(termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
goto no_data_parity_stop_changes;
+no_skip:
/* Set number of data bits, parity, stop bits */
urb_value = 0;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c7..df1d7da933ec 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -2,7 +2,7 @@
* vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
* Please keep numerically sorted within individual areas, thanks!
*
- * Philipp Ghring - pg@futureware.at - added the Device ID of the USB relais
+ * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
* from Rudolf Gugler
*
*/
@@ -78,7 +78,7 @@
*/
#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
-/* www.starting-point-systems.com Chameleon device */
+/* www.starting-point-systems.com µChameleon device */
#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
/*
@@ -112,6 +112,7 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
@@ -290,7 +291,7 @@
/*
* Teratronik product ids.
- * Submitted by O. Wlfelschneider.
+ * Submitted by O. Wölfelschneider.
*/
#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index e21ce9ddfc63..5d4b099dcf8b 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -16,7 +16,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1404, 0xcddc) },
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 1a49ca9c8ea5..21343378c322 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -42,7 +42,7 @@
static int initial_mode = 1;
/* debug flag */
-static int debug;
+static bool debug;
#define GARMIN_VENDOR_ID 0x091E
@@ -901,7 +901,6 @@ static int garmin_init_session(struct usb_serial_port *port)
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - adding interrupt input", __func__);
- port->interrupt_in_urb->dev = serial->dev;
status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (status)
dev_err(&serial->dev->dev,
@@ -1277,7 +1276,6 @@ static void garmin_read_int_callback(struct urb *urb)
unsigned long flags;
int retval;
struct usb_serial_port *port = urb->context;
- struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -1311,12 +1309,6 @@ static void garmin_read_int_callback(struct urb *urb)
if (0 == (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
/* bulk data available */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- garmin_read_bulk_callback, port);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
@@ -1353,7 +1345,6 @@ static void garmin_read_int_callback(struct urb *urb)
garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e4db5ad2bc55..f7403576f99f 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,7 +1,7 @@
/*
* USB Serial Converter Generic functions
*
- * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
+ * Copyright (C) 2010 - 2011 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
@@ -132,7 +132,7 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
/* if we have a bulk endpoint, start reading from it */
if (port->bulk_in_size)
- result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+ result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
return result;
}
@@ -157,8 +157,10 @@ static void generic_cleanup(struct usb_serial_port *port)
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
}
- if (port->bulk_in_size)
- usb_kill_urb(port->read_urb);
+ if (port->bulk_in_size) {
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
+ }
}
}
@@ -308,19 +310,52 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
return chars;
}
-int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+ int index, gfp_t mem_flags)
+{
+ int res;
+
+ if (!test_and_clear_bit(index, &port->read_urbs_free))
+ return 0;
+
+ dbg("%s - port %d, urb %d\n", __func__, port->number, index);
+
+ res = usb_submit_urb(port->read_urbs[index], mem_flags);
+ if (res) {
+ if (res != -EPERM) {
+ dev_err(&port->dev,
+ "%s - usb_submit_urb failed: %d\n",
+ __func__, res);
+ }
+ set_bit(index, &port->read_urbs_free);
+ return res;
+ }
+
+ return 0;
+}
+
+int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
gfp_t mem_flags)
{
- int result;
+ int res;
+ int i;
- result = usb_submit_urb(port->read_urb, mem_flags);
- if (result && result != -EPERM) {
- dev_err(&port->dev, "%s - error submitting urb: %d\n",
- __func__, result);
+ dbg("%s - port %d", __func__, port->number);
+
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ res = usb_serial_generic_submit_read_urb(port, i, mem_flags);
+ if (res)
+ goto err;
}
- return result;
+
+ return 0;
+err:
+ for (; i >= 0; --i)
+ usb_kill_urb(port->read_urbs[i]);
+
+ return res;
}
-EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urb);
+EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
@@ -356,14 +391,19 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
unsigned long flags;
+ int i;
- dbg("%s - port %d", __func__, port->number);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ if (urb == port->read_urbs[i])
+ break;
+ }
+ set_bit(i, &port->read_urbs_free);
- if (unlikely(status != 0)) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
+ dbg("%s - port %d, urb %d, len %d\n", __func__, port->number, i,
+ urb->actual_length);
+ if (urb->status) {
+ dbg("%s - non-zero urb status: %d\n", __func__, urb->status);
return;
}
@@ -376,7 +416,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
- usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
+ usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
} else
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -443,7 +483,7 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
spin_unlock_irq(&port->lock);
if (was_throttled)
- usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+ usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
@@ -509,8 +549,9 @@ int usb_serial_generic_resume(struct usb_serial *serial)
if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
- if (port->read_urb) {
- r = usb_submit_urb(port->read_urb, GFP_NOIO);
+ if (port->bulk_in_size) {
+ r = usb_serial_generic_submit_read_urbs(port,
+ GFP_NOIO);
if (r < 0)
c++;
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 2ee807523f53..0497575e4799 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -191,7 +191,7 @@ static const struct divisor_table_entry divisor_table[] = {
};
/* local variables */
-static int debug;
+static bool debug;
static atomic_t CmdUrbs; /* Number of outstanding Command Write Urbs */
@@ -610,7 +610,6 @@ static void edge_interrupt_callback(struct urb *urb)
/* we have pending bytes on the
bulk in pipe, send a request */
- edge_serial->read_urb->dev = edge_serial->serial->dev;
result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (result) {
dev_err(&edge_serial->serial->dev->dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __func__, result);
@@ -711,7 +710,6 @@ static void edge_bulk_in_callback(struct urb *urb)
/* check to see if there's any more data for us to read */
if (edge_serial->rxBytesAvail > 0) {
dbg("%s - posting a read", __func__);
- edge_serial->read_urb->dev = edge_serial->serial->dev;
retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&urb->dev->dev,
@@ -1330,7 +1328,6 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
edge_port->txCredits -= count;
edge_port->icount.tx += count;
- urb->dev = edge_serial->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
/* something went wrong */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 0aac00afb5c8..65bf06aa591a 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -15,13 +15,6 @@
* For questions or problems with this driver, contact Inside Out
* Networks technical support, or Peter Berger <pberger@brimson.com>,
* or Al Borchers <alborchers@steinerpoint.com>.
- *
- * Version history:
- *
- * July 11, 2002 Removed 4 port device structure since all TI UMP
- * chips have only 2 ports
- * David Iacovelli (davidi@ionetworks.com)
- *
*/
#include <linux/kernel.h>
@@ -217,10 +210,10 @@ static unsigned char OperationalMajorVersion;
static unsigned char OperationalMinorVersion;
static unsigned short OperationalBuildNumber;
-static int debug;
+static bool debug;
static int closing_wait = EDGE_CLOSING_WAIT;
-static int ignore_cpu_rev;
+static bool ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
@@ -1777,12 +1770,11 @@ static void edge_bulk_in_callback(struct urb *urb)
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
- if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) {
- urb->dev = edge_port->port->serial->dev;
+ if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
- } else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) {
+ else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
- }
+
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(&urb->dev->dev,
@@ -1959,9 +1951,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
status = -EINVAL;
goto release_es_lock;
}
- urb->complete = edge_interrupt_callback;
urb->context = edge_serial;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -1987,9 +1977,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
- urb->complete = edge_bulk_in_callback;
urb->context = edge_port;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -2118,12 +2106,7 @@ static void edge_send(struct tty_struct *tty)
port->write_urb->transfer_buffer);
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- edge_bulk_out_callback,
- port);
+ port->write_urb->transfer_buffer_length = count;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -2267,9 +2250,6 @@ static int restart_read(struct edgeport_port *edge_port)
if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) {
urb = edge_port->port->read_urb;
- urb->complete = edge_bulk_in_callback;
- urb->context = edge_port;
- urb->dev = edge_port->port->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 4735931b4c7b..06053a920dd8 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -8,40 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * (12/12/2002) ganesh
- * Added support for practically all devices supported by ActiveSync
- * on Windows. Thanks to Wes Cilldhaire <billybobjoehenrybob@hotmail.com>.
- *
- * (26/11/2002) ganesh
- * Added insmod options to specify product and vendor id.
- * Use modprobe ipaq vendor=0xfoo product=0xbar
- *
- * (26/7/2002) ganesh
- * Fixed up broken error handling in ipaq_open. Retry the "kickstart"
- * packet much harder - this drastically reduces connection failures.
- *
- * (30/4/2002) ganesh
- * Added support for the Casio EM500. Completely untested. Thanks
- * to info from Nathan <wfilardo@fuse.net>
- *
- * (19/3/2002) ganesh
- * Don't submit urbs while holding spinlocks. Not strictly necessary
- * in 2.5.x.
- *
- * (8/3/2002) ganesh
- * The ipaq sometimes emits a '\0' before the CLIENT string. At this
- * point of time, the ppp ldisc is not yet attached to the tty, so
- * n_tty echoes "^ " to the ipaq, which messes up the chat. In 2.5.6-pre2
- * this causes a panic because echo_char() tries to sleep in interrupt
- * context.
- * The fix is to tell the upper layers that this is a raw device so that
- * echoing is suppressed. Thanks to Lyle Lindholm for a detailed bug
- * report.
- *
- * (25/2/2002) ganesh
- * Added support for the HP Jornada 548 and 568. Completely untested.
- * Thanks to info from Heath Robinson and Arieh Davidoff.
*/
#include <linux/kernel.h>
@@ -68,7 +34,7 @@
#define DRIVER_DESC "USB PocketPC PDA driver"
static __u16 product, vendor;
-static int debug;
+static bool debug;
static int connect_retries = KP_RETRIES;
static int initial_wait;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 5170799d6e94..6f9356f3f99e 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -147,7 +147,7 @@ static struct usb_driver usb_ipw_driver = {
.no_dynamic_id = 1,
};
-static int debug;
+static bool debug;
static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
{
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index ccbce4066d04..84a396e83671 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -22,38 +22,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * 2008_Jun_02 Felipe Balbi <me@felipebalbi.com>
- * Introduced common header to be used also in USB Gadget Framework.
- * Still needs some other style fixes.
- *
- * 2007_Jun_21 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Minimal cleanups for some of the driver problens and tty layer abuse.
- * Still needs fixing to allow multiple dongles.
- *
- * 2002_Mar_07 greg kh
- * moved some needed structures and #define values from the
- * net/irda/irda-usb.h file into our file, as we don't want to depend on
- * that codebase compiling correctly :)
- *
- * 2002_Jan_14 gb
- * Added module parameter to force specific number of XBOFs.
- * Added ir_xbof_change().
- * Reorganized read_bulk_callback error handling.
- * Switched from FILL_BULK_URB() to usb_fill_bulk_urb().
- *
- * 2001_Nov_08 greg kh
- * Changed the irda_usb_find_class_desc() function based on comments and
- * code from Martin Diehl.
- *
- * 2001_Nov_01 greg kh
- * Added support for more IrDA USB devices.
- * Added support for zero packet. Added buffer override paramater, so
- * users can transfer larger packets at once if they wish. Both patches
- * came from Dag Brattli <dag@obexcode.com>.
- *
- * 2001_Oct_07 greg kh
- * initial version released.
*/
#include <linux/kernel.h>
@@ -77,7 +45,7 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB IR Dongle driver"
-static int debug;
+static bool debug;
/* if overridden by the user, then use their value for the size of the read and
* write urbs */
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 6aca631a407a..3077a4436976 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -34,9 +34,9 @@
#ifdef CONFIG_USB_SERIAL_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
/*
@@ -65,7 +65,7 @@ static int clockmode = 1;
static int cdmode = 1;
static int iuu_cardin;
static int iuu_cardout;
-static int xmas;
+static bool xmas;
static int vcc_default = 5;
static void read_rxcmd_callback(struct urb *urb);
@@ -1168,15 +1168,14 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
result = usb_submit_urb(port->write_urb, GFP_KERNEL);
-
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
iuu_close(port);
- return -EPROTO;
} else {
dbg("%s - rxcmd OK", __func__);
}
+
return result;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index a442352d7b61..4cc36c761801 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -25,73 +25,6 @@
Tip 'o the hat to IBM (and previously Linuxcare :) for supporting
staff in their work on open source projects.
-
- Change History
-
- 2003sep04 LPM (Keyspan) add support for new single port product USA19HS.
- Improve setup message handling for all devices.
-
- Wed Feb 19 22:00:00 PST 2003 (Jeffrey S. Laing <keyspan@jsl.com>)
- Merged the current (1/31/03) Keyspan code with the current (2.4.21-pre4)
- Linux source tree. The Linux tree lacked support for the 49WLC and
- others. The Keyspan patches didn't work with the current kernel.
-
- 2003jan30 LPM add support for the 49WLC and MPR
-
- Wed Apr 25 12:00:00 PST 2002 (Keyspan)
- Started with Hugh Blemings' code dated Jan 17, 2002. All adapters
- now supported (including QI and QW). Modified port open, port
- close, and send setup() logic to fix various data and endpoint
- synchronization bugs and device LED status bugs. Changed keyspan_
- write_room() to accurately return transmit buffer availability.
- Changed forwardingLength from 1 to 16 for all adapters.
-
- Fri Oct 12 16:45:00 EST 2001
- Preliminary USA-19QI and USA-28 support (both test OK for me, YMMV)
-
- Wed Apr 25 12:00:00 PST 2002 (Keyspan)
- Started with Hugh Blemings' code dated Jan 17, 2002. All adapters
- now supported (including QI and QW). Modified port open, port
- close, and send setup() logic to fix various data and endpoint
- synchronization bugs and device LED status bugs. Changed keyspan_
- write_room() to accurately return transmit buffer availability.
- Changed forwardingLength from 1 to 16 for all adapters.
-
- Fri Oct 12 16:45:00 EST 2001
- Preliminary USA-19QI and USA-28 support (both test OK for me, YMMV)
-
- Mon Oct 8 14:29:00 EST 2001 hugh
- Fixed bug that prevented mulitport devices operating correctly
- if they weren't the first unit attached.
-
- Sat Oct 6 12:31:21 EST 2001 hugh
- Added support for USA-28XA and -28XB, misc cleanups, break support
- for usa26 based models thanks to David Gibson.
-
- Thu May 31 11:56:42 PDT 2001 gkh
- switched from using spinlock to a semaphore
-
- (04/08/2001) gb
- Identify version on module load.
-
- (11/01/2000) Adam J. Richter
- usb_device_id table support.
-
- Tue Oct 10 23:15:33 EST 2000 Hugh
- Merged Paul's changes with my USA-49W mods. Work in progress
- still...
-
- Wed Jul 19 14:00:42 EST 2000 gkh
- Added module_init and module_exit functions to handle the fact that
- this driver is a loadable module now.
-
- Tue Jul 18 16:14:52 EST 2000 Hugh
- Basic character input/output for USA-19 now mostly works,
- fixed at 9600 baud for the moment.
-
- Sat Jul 8 11:11:48 EST 2000 Hugh
- First public release - nothing works except the firmware upload.
- Tested on PPC and x86 architectures, seems to behave...
*/
@@ -112,7 +45,7 @@
#include <linux/usb/serial.h>
#include "keyspan.h"
-static int debug;
+static bool debug;
/*
* Version Information
@@ -397,7 +330,6 @@ static int keyspan_write(struct tty_struct *tty,
/* send the data out the bulk port */
this_urb->transfer_buffer_length = todo + dataOffset;
- this_urb->dev = port->serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("usb_submit_urb(write bulk) failed (%d)", err);
@@ -463,7 +395,6 @@ static void usa26_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -559,7 +490,6 @@ static void usa26_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -609,7 +539,6 @@ static void usa28_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)",
@@ -694,7 +623,6 @@ static void usa28_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -789,8 +717,6 @@ static void usa49_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -848,7 +774,6 @@ static void usa49_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -919,8 +844,6 @@ static void usa49wg_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -996,7 +919,6 @@ static void usa90_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1047,7 +969,6 @@ static void usa90_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1123,7 +1044,6 @@ static void usa67_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1223,7 +1143,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
urb = p_priv->in_urbs[i];
if (urb == NULL)
continue;
- urb->dev = serial->dev;
/* make sure endpoint data toggle is synchronized
with the device */
@@ -1239,7 +1158,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
urb = p_priv->out_urbs[i];
if (urb == NULL)
continue;
- urb->dev = serial->dev;
/* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 0); */
}
@@ -1956,7 +1874,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
@@ -2084,7 +2001,6 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed", __func__);
@@ -2271,8 +2187,6 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
-
- this_urb->dev = serial->dev;
}
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
@@ -2415,7 +2329,6 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
@@ -2561,7 +2474,6 @@ static int keyspan_usa67_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
@@ -2650,14 +2562,12 @@ static int keyspan_startup(struct usb_serial *serial)
keyspan_setup_urbs(serial);
if (s_priv->instat_urb != NULL) {
- s_priv->instat_urb->dev = serial->dev;
err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit instat urb failed %d", __func__,
err);
}
if (s_priv->indat_urb != NULL) {
- s_priv->indat_urb->dev = serial->dev;
err = usb_submit_urb(s_priv->indat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit indat urb failed %d", __func__,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index d5c0c6ab4966..7c62a7048302 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -12,59 +12,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (09/07/2001) gkh
- * cleaned up the Xircom support. Added ids for Entregra device which is
- * the same as the Xircom device. Enabled the code to be compiled for
- * either Xircom or Keyspan devices.
- *
- * (08/11/2001) Cristian M. Craciunescu
- * support for Xircom PGSDB9
- *
- * (05/31/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (08/28/2000) gkh
- * Added locks for SMP safeness.
- * Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- * than once.
- *
- * (07/20/2000) borchers
- * - keyspan_pda_write no longer sleeps if it is called on interrupt time;
- * PPP and the line discipline with stty echo on can call write on
- * interrupt time and this would cause an oops if write slept
- * - if keyspan_pda_write is in an interrupt, it will not call
- * usb_control_msg (which sleeps) to query the room in the device
- * buffer, it simply uses the current room value it has
- * - if the urb is busy or if it is throttled keyspan_pda_write just
- * returns 0, rather than sleeping to wait for this to change; the
- * write_chan code in n_tty.c will sleep if needed before calling
- * keyspan_pda_write again
- * - if the device needs to be unthrottled, write now queues up the
- * call to usb_control_msg (which sleeps) to unthrottle the device
- * - the wakeups from keyspan_pda_write_bulk_callback are queued rather
- * than done directly from the callback to avoid the race in write_chan
- * - keyspan_pda_chars_in_buffer also indicates its buffer is full if the
- * urb status is -EINPROGRESS, meaning it cannot write at the moment
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- *
- * (03/26/2000) gkh
- * Split driver up into device specific pieces.
- *
*/
@@ -84,7 +31,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/* make a simple define to handle if we are compiling keyspan_pda or xircom support */
#if defined(CONFIG_USB_SERIAL_KEYSPAN_PDA) || defined(CONFIG_USB_SERIAL_KEYSPAN_PDA_MODULE)
@@ -290,7 +237,6 @@ static void keyspan_pda_rx_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
/* just restart the receive interrupt URB */
dbg("keyspan_pda_rx_unthrottle port %d", port->number);
- port->interrupt_in_urb->dev = port->serial->dev;
if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL))
dbg(" usb_submit_urb(read urb) failed");
}
@@ -532,11 +478,11 @@ static int keyspan_pda_write(struct tty_struct *tty,
the device is full (wait until it says there is room)
*/
spin_lock_bh(&port->lock);
- if (port->write_urb_busy || priv->tx_throttled) {
+ if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) {
spin_unlock_bh(&port->lock);
return 0;
}
- port->write_urb_busy = 1;
+ clear_bit(0, &port->write_urbs_free);
spin_unlock_bh(&port->lock);
/* At this point the URB is in our control, nobody else can submit it
@@ -598,7 +544,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
priv->tx_room -= count;
- port->write_urb->dev = port->serial->dev;
rc = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (rc) {
dbg(" usb_submit_urb(write bulk) failed");
@@ -618,7 +563,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
rc = count;
exit:
if (rc < 0)
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
return rc;
}
@@ -628,7 +573,7 @@ static void keyspan_pda_write_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct keyspan_pda_private *priv;
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
priv = usb_get_serial_port_data(port);
/* queue up a wakeup at scheduler time */
@@ -661,7 +606,7 @@ static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
n_tty.c:normal_poll() ) that we're not writeable. */
spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy || priv->tx_throttled)
+ if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled)
ret = 256;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
@@ -717,7 +662,6 @@ static int keyspan_pda_open(struct tty_struct *tty,
priv->tx_throttled = *room ? 0 : 1;
/*Start reading from the device*/
- port->interrupt_in_urb->dev = serial->dev;
rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (rc) {
dbg("%s - usb_submit_urb(read int) failed", __func__);
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 19373cb7c5bf..fc064e1442ca 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -49,7 +49,7 @@
#include <linux/usb/serial.h>
#include "kl5kusb105.h"
-static int debug;
+static bool debug;
/*
* Version Information
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index ddd146300ddb..5d3beeeb5fd9 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -20,18 +20,6 @@
*
* Supported readers: USB TWIN, KAAN Standard Plus and SecOVID Reader Plus
* (Adapter K), B1 Professional and KAAN Professional (Adapter B)
- *
- * (21/05/2004) tw
- * Fix bug with P'n'P readers
- *
- * (28/05/2003) tw
- * Add support for KAAN SIM
- *
- * (12/09/2002) tw
- * Adapted to 2.5.
- *
- * (11/08/2002) tw
- * Initial version.
*/
@@ -231,9 +219,6 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
priv = usb_get_serial_port_data(port);
- /* someone sets the dev to 0 if the close method has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
@@ -393,8 +378,6 @@ static void kobil_read_int_callback(struct urb *urb)
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
- /* someone sets the dev to 0 if the close method has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dbg("%s - port %d Send read URB returns: %i",
@@ -475,17 +458,9 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
priv->filled = 0;
priv->cur_pos = 0;
- /* someone sets the dev to 0 if the close method
- has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
/* start reading (except TWIN and KAAN SIM) */
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
- /* someone sets the dev to 0 if the close method has
- been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
result = usb_submit_urb(port->interrupt_in_urb,
GFP_NOIO);
dbg("%s - port %d Send read URB returns: %i",
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index ba0d28727ccb..27fa9c8a77b0 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -19,50 +19,6 @@
* DTR/RTS signal handling may be incomplete or incorrect. I have mainly
* implemented what I have seen with SniffUSB or found in belkin_sa.c.
* For further TODOs check also belkin_sa.c.
- *
- * TEST STATUS:
- * Basic tests have been performed with minicom/zmodem transfers and
- * modem dialing under Linux 2.4.0-test10 (for me it works fine).
- *
- * 04-Nov-2003 Bill Marr <marr at flex dot com>
- * - Mimic Windows driver by sending 2 USB 'device request' messages
- * following normal 'baud rate change' message. This allows data to be
- * transmitted to RS-232 devices which don't assert the 'CTS' signal.
- *
- * 10-Nov-2001 Wolfgang Grandegger
- * - Fixed an endianess problem with the baudrate selection for PowerPC.
- *
- * 06-Dec-2001 Martin Hamilton <martinh@gnu.org>
- * - Added support for the Belkin F5U109 DB9 adaptor
- *
- * 30-May-2001 Greg Kroah-Hartman
- * - switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * 04-May-2001 Stelian Pop
- * - Set the maximum bulk output size for Sitecom U232-P25 model to 16 bytes
- * instead of the device reported 32 (using 32 bytes causes many data
- * loss, Windows driver uses 16 too).
- *
- * 02-May-2001 Stelian Pop
- * - Fixed the baud calculation for Sitecom U232-P25 model
- *
- * 08-Apr-2001 gb
- * - Identify version on module load.
- *
- * 06-Jan-2001 Cornel Ciocirlan
- * - Added support for Sitecom U232-P25 model (Product Id 0x0230)
- * - Added support for D-Link DU-H3SP USB BAY (Product Id 0x0200)
- *
- * 29-Nov-2000 Greg Kroah-Hartman
- * - Added device id table to fit with 2.4.0-test11 structure.
- * - took out DEAL_WITH_TWO_INT_IN_ENDPOINTS #define as it's not needed
- * (lots of things will change if/when the usb-serial core changes to
- * handle these issues.
- *
- * 27-Nov-2000 Wolfgang Grandegge
- * A version for kernel 2.4.0-test10 released to the Linux community
- * (via linux-usb-devel).
*/
#include <linux/kernel.h>
@@ -89,7 +45,7 @@
#define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>"
#define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver"
-static int debug;
+static bool debug;
/*
* Function prototypes
@@ -526,7 +482,6 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
mct_u232_msr_to_state(&priv->control_state, priv->last_msr);
spin_unlock_irqrestore(&priv->lock, flags);
- port->read_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&port->dev,
@@ -535,7 +490,6 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
goto error;
}
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
usb_kill_urb(port->read_urb);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 3524a105d042..4554ee49e635 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -71,7 +71,7 @@ struct moschip_port {
struct urb *write_urb_pool[NUM_URBS];
};
-static int debug;
+static bool debug;
static struct usb_serial_driver moschip7720_2port_driver;
@@ -939,14 +939,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
}
tty_kref_put(tty);
- if (!port->read_urb) {
- dbg("URB KILLED !!!");
- return;
- }
-
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = port->serial->dev;
-
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dbg("usb_submit_urb(read bulk) failed, retval = %d",
@@ -1014,7 +1007,6 @@ static int mos77xx_calc_num_ports(struct usb_serial *serial)
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
- struct usb_serial_port *port0;
struct urb *urb;
struct moschip_port *mos7720_port;
int response;
@@ -1029,8 +1021,6 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
if (mos7720_port == NULL)
return -ENODEV;
- port0 = serial->port[0];
-
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
@@ -1735,8 +1725,6 @@ static void change_port_settings(struct tty_struct *tty,
write_mos_reg(serial, port_number, IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = serial->dev;
-
status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (status)
dbg("usb_submit_urb(read bulk) failed, status = %d",
@@ -1786,13 +1774,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
/* change the port settings to the new ones specified */
change_port_settings(tty, mos7720_port, old_termios);
- if (!port->read_urb) {
- dbg("%s", "URB KILLED !!!!!");
- return;
- }
-
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = serial->dev;
status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (status)
dbg("usb_submit_urb(read bulk) failed, status = %d",
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index c72abd524983..03b5e249e95e 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -263,7 +263,7 @@ struct moschip_port {
};
-static int debug;
+static bool debug;
/*
* mos7840_set_reg_sync
@@ -792,8 +792,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
}
- mos7840_port->read_urb->dev = serial->dev;
-
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -2058,7 +2056,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (mos7840_port->read_urb_busy == false) {
- mos7840_port->read_urb->dev = serial->dev;
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
@@ -2130,7 +2127,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
}
if (mos7840_port->read_urb_busy == false) {
- mos7840_port->read_urb->dev = serial->dev;
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 1f00f243c26c..b28f1db0195f 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -21,7 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 60f38d5e64fc..8b8d58a2ac12 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -9,31 +9,6 @@
* driver
*
* Please report both successes and troubles to the author at omninet@kroah.com
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (08/28/2000) gkh
- * Added locks for SMP safeness.
- * Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- * than once.
- * Fixed potential race in omninet_write_bulk_callback
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- *
*/
#include <linux/kernel.h>
@@ -44,12 +19,11 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/*
* Version Information
@@ -174,12 +148,6 @@ static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
tty_port_tty_set(&wport->port, tty);
/* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- omninet_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
@@ -236,11 +204,6 @@ static void omninet_read_bulk_callback(struct urb *urb)
}
/* Continue trying to always read */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer, urb->transfer_buffer_length,
- omninet_read_bulk_callback, port);
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
@@ -267,14 +230,10 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
}
- spin_lock_bh(&wport->lock);
- if (wport->write_urb_busy) {
- spin_unlock_bh(&wport->lock);
+ if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dbg("%s - already writing", __func__);
return 0;
}
- wport->write_urb_busy = 1;
- spin_unlock_bh(&wport->lock);
count = (count > OMNINET_BULKOUTSIZE) ? OMNINET_BULKOUTSIZE : count;
@@ -292,10 +251,9 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
/* send the data out the bulk port, always 64 bytes */
wport->write_urb->transfer_buffer_length = 64;
- wport->write_urb->dev = serial->dev;
result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
if (result) {
- wport->write_urb_busy = 0;
+ set_bit(0, &wport->write_urbs_free);
dev_err(&port->dev,
"%s - failed submitting write urb, error %d\n",
__func__, result);
@@ -314,8 +272,7 @@ static int omninet_write_room(struct tty_struct *tty)
int room = 0; /* Default: no room */
- /* FIXME: no consistent locking for write_urb_busy */
- if (wport->write_urb_busy)
+ if (test_bit(0, &wport->write_urbs_free))
room = wport->bulk_out_size - OMNINET_HEADERLEN;
dbg("%s - returns %d", __func__, room);
@@ -332,7 +289,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
dbg("%s - port %0x", __func__, port->number);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c248a9147439..262ded9e076b 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -32,7 +32,7 @@
* an examples of 1D barcode types are EAN, UPC, Code39, IATA etc.. */
#define DRIVER_DESC "Opticon USB barcode to serial driver (1D)"
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x065a, 0x0009) },
@@ -384,7 +384,6 @@ static void opticon_unthrottle(struct tty_struct *tty)
priv->actually_throttled = false;
spin_unlock_irqrestore(&priv->lock, flags);
- priv->bulk_read_urb->dev = port->serial->dev;
if (was_throttled) {
result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
if (result)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d865878c9f97..420d9857394a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -476,6 +476,10 @@ static void option_instat_callback(struct urb *urb);
#define VIETTEL_VENDOR_ID 0x2262
#define VIETTEL_PRODUCT_VT1000 0x0002
+/* ZD Incorporated */
+#define ZD_VENDOR_ID 0x0685
+#define ZD_PRODUCT_7000 0x7000
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -661,6 +665,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +759,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
@@ -1169,6 +1182,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1220,7 +1234,7 @@ static struct usb_serial_driver option_1port_device = {
#endif
};
-static int debug;
+static bool debug;
/* per port private data */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 4c29e6c2bda7..e287fd32682c 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -74,7 +74,7 @@ static struct usb_driver oti6858_driver = {
.no_dynamic_id = 1,
};
-static int debug;
+static bool debug;
/* requests */
#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
@@ -264,7 +264,6 @@ static void setup_line(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -321,7 +320,6 @@ static void send_data(struct work_struct *work)
priv->flags.write_urb_in_use = 0;
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -334,7 +332,6 @@ static void send_data(struct work_struct *work)
port->write_urb->transfer_buffer,
count, &port->lock);
port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -583,13 +580,12 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
kfree(buf);
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
oti6858_close(port);
- return -EPROTO;
+ return result;
}
/* setup termios */
@@ -837,7 +833,6 @@ static void oti6858_read_int_callback(struct urb *urb)
if (can_recv) {
int result;
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result != 0) {
priv->flags.read_urb_in_use = 0;
@@ -866,7 +861,6 @@ static void oti6858_read_int_callback(struct urb *urb)
int result;
/* dbg("%s(): submitting interrupt urb", __func__); */
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&urb->dev->dev,
@@ -894,18 +888,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
- /*
- if (status == -EPROTO) {
- * PL2303 mysteriously fails with -EPROTO reschedule
- the read *
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n", __func__, result);
- return;
- }
- */
dbg("%s(): unable to handle the error, exiting", __func__);
return;
}
@@ -918,7 +900,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
tty_kref_put(tty);
/* schedule the interrupt urb */
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0 && result != -EPERM) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
@@ -955,7 +936,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
dbg("%s(): overflow in write", __func__);
port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
@@ -968,7 +948,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
priv->flags.write_urb_in_use = 0;
/* schedule the interrupt urb if we are still open */
- port->interrupt_in_urb->dev = port->serial->dev;
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index fc2d66f7f4eb..3d8cda57ce7a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -36,7 +36,7 @@
*/
#define DRIVER_DESC "Prolific PL2303 USB to serial adaptor driver"
-static int debug;
+static bool debug;
#define PL2303_CLOSING_WAIT (30*HZ)
@@ -502,21 +502,20 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
pl2303_set_termios(tty, port, &tmp_termios);
- dbg("%s - submitting read urb", __func__);
- result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
- if (result) {
- pl2303_close(port);
- return -EPROTO;
- }
-
dbg("%s - submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
- pl2303_close(port);
- return -EPROTO;
+ return result;
}
+
+ result = usb_serial_generic_open(tty, port);
+ if (result) {
+ usb_kill_urb(port->interrupt_in_urb);
+ return result;
+ }
+
port->port.drain_delay = 256;
return 0;
}
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index aa9367f5b421..1d5deee3be52 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -22,7 +22,7 @@
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index a36e2313eed0..d074b3740dcb 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -81,9 +81,9 @@
#define CONFIG_USB_SERIAL_SAFE_PADDED 0
#endif
-static int debug;
-static int safe = 1;
-static int padded = CONFIG_USB_SERIAL_SAFE_PADDED;
+static bool debug;
+static bool safe = 1;
+static bool padded = CONFIG_USB_SERIAL_SAFE_PADDED;
#define DRIVER_VERSION "v0.1"
#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com, Johan Hovold <jhovold@gmail.com>"
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index b18179bda0d8..fdae0a4407cb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -46,8 +46,8 @@
allocations > PAGE_SIZE and the number of packets in a page
is an integer 512 is the largest possible packet on EHCI */
-static int debug;
-static int nmea;
+static bool debug;
+static bool nmea;
/* Used in interface blacklisting */
struct sierra_iface_info {
@@ -681,7 +681,6 @@ static void sierra_instat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving IRQ data */
if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 180ea6c7911c..d7f5eee18f00 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -33,7 +33,7 @@
#define DRIVER_VERSION "v0.10"
#define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver"
-static int debug;
+static bool debug;
#define SPCP8x5_007_VID 0x04FC
#define SPCP8x5_007_PID 0x0201
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 87362e48796e..7697858d8858 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -46,7 +46,7 @@
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "v0.1"
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 7096f799b071..50651cf4fc61 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -20,7 +20,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
@@ -182,7 +182,6 @@ static void symbol_unthrottle(struct tty_struct *tty)
priv->actually_throttled = false;
spin_unlock_irq(&priv->lock);
- priv->int_urb->dev = port->serial->dev;
if (was_throttled) {
result = usb_submit_urb(priv->int_urb, GFP_KERNEL);
if (result)
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index ea8445689c85..8468eb769a29 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -150,7 +150,7 @@ static int ti_download_firmware(struct ti_device *tdev);
/* Data */
/* module parameters */
-static int debug;
+static bool debug;
static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT];
static unsigned int vendor_3410_count;
@@ -535,9 +535,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
status = -EINVAL;
goto release_lock;
}
- urb->complete = ti_interrupt_callback;
urb->context = tdev;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -619,9 +617,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
- urb->complete = ti_bulk_in_callback;
urb->context = tport;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s - submit read urb failed, %d\n",
@@ -1236,12 +1232,11 @@ static void ti_bulk_in_callback(struct urb *urb)
exit:
/* continue to read unless stopping */
spin_lock(&tport->tp_lock);
- if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) {
- urb->dev = port->serial->dev;
+ if (tport->tp_read_urb_state == TI_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
- } else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING) {
+ else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING)
tport->tp_read_urb_state = TI_READ_URB_STOPPED;
- }
+
spin_unlock(&tport->tp_lock);
if (retval)
dev_err(dev, "%s - resubmit read urb failed, %d\n",
@@ -1574,9 +1569,7 @@ static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty)
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
urb = tport->tp_port->read_urb;
spin_unlock_irqrestore(&tport->tp_lock, flags);
- urb->complete = ti_bulk_in_callback;
urb->context = tport;
- urb->dev = tport->tp_port->serial->dev;
status = usb_submit_urb(urb, GFP_KERNEL);
} else {
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index cc274fdf2627..611b206591cb 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -50,7 +50,7 @@ static struct usb_driver usb_serial_driver = {
.disconnect = usb_serial_disconnect,
.suspend = usb_serial_suspend,
.resume = usb_serial_resume,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
.supports_autosuspend = 1,
};
@@ -61,7 +61,7 @@ static struct usb_driver usb_serial_driver = {
drivers depend on it.
*/
-static int debug;
+static bool debug;
/* initially all NULL */
static struct usb_serial *serial_table[SERIAL_TTY_MINORS];
static DEFINE_MUTEX(table_lock);
@@ -260,6 +260,10 @@ static int serial_activate(struct tty_port *tport, struct tty_struct *tty)
else
retval = port->serial->type->open(tty, port);
mutex_unlock(&serial->disc_mutex);
+
+ if (retval < 0)
+ retval = usb_translate_errors(retval);
+
return retval;
}
@@ -360,7 +364,8 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
/* pass on to the driver specific version of this function */
retval = port->serial->type->write(tty, port, buf, count);
-
+ if (retval < 0)
+ retval = usb_translate_errors(retval);
exit:
return retval;
}
@@ -562,8 +567,8 @@ static void kill_traffic(struct usb_serial_port *port)
{
int i;
- usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_kill_urb(port->write_urbs[i]);
/*
@@ -595,17 +600,17 @@ static void port_release(struct device *dev)
kill_traffic(port);
cancel_work_sync(&port->work);
- usb_free_urb(port->read_urb);
- usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ usb_free_urb(port->read_urbs[i]);
+ kfree(port->bulk_in_buffers[i]);
+ }
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
usb_free_urb(port->write_urbs[i]);
kfree(port->bulk_out_buffers[i]);
}
kfifo_free(&port->write_fifo);
- kfree(port->bulk_in_buffer);
- kfree(port->bulk_out_buffer);
kfree(port->interrupt_in_buffer);
kfree(port->interrupt_out_buffer);
kfree(port);
@@ -686,16 +691,18 @@ static int serial_carrier_raised(struct tty_port *port)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
+
if (drv->carrier_raised)
return drv->carrier_raised(p);
/* No carrier control - don't block */
- return 1;
+ return 1;
}
static void serial_dtr_rts(struct tty_port *port, int on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
+
if (drv->dtr_rts)
drv->dtr_rts(p, on);
}
@@ -724,6 +731,7 @@ int usb_serial_probe(struct usb_interface *interface,
unsigned int minor;
int buffer_size;
int i;
+ int j;
int num_interrupt_in = 0;
int num_interrupt_out = 0;
int num_bulk_in = 0;
@@ -906,38 +914,41 @@ int usb_serial_probe(struct usb_interface *interface,
for (i = 0; i < num_bulk_in; ++i) {
endpoint = bulk_in_endpoint[i];
port = serial->port[i];
- port->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->read_urb) {
- dev_err(&interface->dev, "No free urbs available\n");
- goto probe_error;
- }
buffer_size = max_t(int, serial->type->bulk_in_size,
usb_endpoint_maxp(endpoint));
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
- port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_in_buffer) {
- dev_err(&interface->dev,
+
+ for (j = 0; j < ARRAY_SIZE(port->read_urbs); ++j) {
+ set_bit(j, &port->read_urbs_free);
+ port->read_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->read_urbs[j]) {
+ dev_err(&interface->dev,
+ "No free urbs available\n");
+ goto probe_error;
+ }
+ port->bulk_in_buffers[j] = kmalloc(buffer_size,
+ GFP_KERNEL);
+ if (!port->bulk_in_buffers[j]) {
+ dev_err(&interface->dev,
"Couldn't allocate bulk_in_buffer\n");
- goto probe_error;
- }
- usb_fill_bulk_urb(port->read_urb, dev,
- usb_rcvbulkpipe(dev,
+ goto probe_error;
+ }
+ usb_fill_bulk_urb(port->read_urbs[j], dev,
+ usb_rcvbulkpipe(dev,
endpoint->bEndpointAddress),
- port->bulk_in_buffer, buffer_size,
- serial->type->read_bulk_callback, port);
+ port->bulk_in_buffers[j], buffer_size,
+ serial->type->read_bulk_callback,
+ port);
+ }
+
+ port->read_urb = port->read_urbs[0];
+ port->bulk_in_buffer = port->bulk_in_buffers[0];
}
for (i = 0; i < num_bulk_out; ++i) {
- int j;
-
endpoint = bulk_out_endpoint[i];
port = serial->port[i];
- port->write_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->write_urb) {
- dev_err(&interface->dev, "No free urbs available\n");
- goto probe_error;
- }
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
buffer_size = serial->type->bulk_out_size;
@@ -945,17 +956,7 @@ int usb_serial_probe(struct usb_interface *interface,
buffer_size = usb_endpoint_maxp(endpoint);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
- port->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_out_buffer) {
- dev_err(&interface->dev,
- "Couldn't allocate bulk_out_buffer\n");
- goto probe_error;
- }
- usb_fill_bulk_urb(port->write_urb, dev,
- usb_sndbulkpipe(dev,
- endpoint->bEndpointAddress),
- port->bulk_out_buffer, buffer_size,
- serial->type->write_bulk_callback, port);
+
for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
set_bit(j, &port->write_urbs_free);
port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
@@ -978,6 +979,9 @@ int usb_serial_probe(struct usb_interface *interface,
serial->type->write_bulk_callback,
port);
}
+
+ port->write_urb = port->write_urbs[0];
+ port->bulk_out_buffer = port->bulk_out_buffers[0];
}
if (serial->type->read_int_callback) {
@@ -1196,7 +1200,7 @@ static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
.write = serial_write,
- .hangup = serial_hangup,
+ .hangup = serial_hangup,
.write_room = serial_write_room,
.ioctl = serial_ioctl,
.set_termios = serial_set_termios,
@@ -1206,9 +1210,9 @@ static const struct tty_operations serial_ops = {
.chars_in_buffer = serial_chars_in_buffer,
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
- .get_icount = serial_get_icount,
- .cleanup = serial_cleanup,
- .install = serial_install,
+ .get_icount = serial_get_icount,
+ .cleanup = serial_cleanup,
+ .install = serial_install,
.proc_fops = &serial_proc_fops,
};
@@ -1237,7 +1241,7 @@ static int __init usb_serial_init(void)
usb_serial_tty_driver->owner = THIS_MODULE;
usb_serial_tty_driver->driver_name = "usbserial";
- usb_serial_tty_driver->name = "ttyUSB";
+ usb_serial_tty_driver->name = "ttyUSB";
usb_serial_tty_driver->major = SERIAL_TTY_MAJOR;
usb_serial_tty_driver->minor_start = 0;
usb_serial_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1336,7 +1340,6 @@ static void fixup_generic(struct usb_serial_driver *device)
int usb_serial_register(struct usb_serial_driver *driver)
{
- /* must be called with BKL held */
int retval;
if (usb_disabled())
@@ -1374,7 +1377,6 @@ EXPORT_SYMBOL_GPL(usb_serial_register);
void usb_serial_deregister(struct usb_serial_driver *device)
{
- /* must be called with BKL held */
printk(KERN_INFO "USB Serial deregistering driver %s\n",
device->description);
mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 95a82148ee81..9b632e753210 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -40,7 +40,7 @@ static struct usb_driver debug_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
/* This HW really does not support a serial break, so one will be
@@ -54,19 +54,18 @@ static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
}
-static void usb_debug_read_bulk_callback(struct urb *urb)
+static void usb_debug_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
- memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
- USB_DEBUG_BRK_SIZE) == 0) {
+ memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
+ USB_DEBUG_BRK_SIZE) == 0) {
usb_serial_handle_break(port);
- usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
return;
}
- usb_serial_generic_read_bulk_callback(urb);
+ usb_serial_generic_process_read_urb(urb);
}
static struct usb_serial_driver debug_device = {
@@ -79,7 +78,7 @@ static struct usb_serial_driver debug_device = {
.num_ports = 1,
.bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
- .read_bulk_callback = usb_debug_read_bulk_callback,
+ .process_read_urb = usb_debug_process_read_urb,
};
static int __init debug_init(void)
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index d555ca9567b8..c88657dd31c8 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -37,7 +37,7 @@
#include <linux/serial.h>
#include "usb-wwan.h"
-static int debug;
+static bool debug;
void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
{
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 1c11959a7d58..210e4b10dc11 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -52,7 +52,7 @@ static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id);
/* Parameters that may be passed into the module. */
-static int debug;
+static bool debug;
static __u16 vendor;
static __u16 product;
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5b073bcc807b..7e0acf5c8e38 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -14,57 +14,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (10/09/2002) Stuart MacDonald (stuartm@connecttech.com)
- * Upgrade to full working driver
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * 2001_Mar_19 gkh
- * Fixed MOD_INC and MOD_DEC logic, the ability to open a port more
- * than once, and the got the proper usb_device_id table entries so
- * the driver works again.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (10/03/2000) smd
- * firmware is improved to guard against crap sent to device
- * firmware now replies CMD_FAILURE on bad things
- * read_callback fix you provided for private info struct
- * command_finished now indicates success or fail
- * setup_port struct now packed to avoid gcc padding
- * firmware uses 1 based port numbering, driver now handles that
- *
- * (09/11/2000) gkh
- * Removed DEBUG #ifdefs with call to usb_serial_debug_data
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- * Fixed bug with port->minor that was found by Al Borchers
- *
- * (07/04/2000) gkh
- * Added support for port settings. Baud rate can now be changed. Line
- * signals are not transferred to and from the tty layer yet, but things
- * seem to be working well now.
- *
- * (05/04/2000) gkh
- * First cut at open and close commands. Data can flow through the ports at
- * default speeds now.
- *
- * (03/26/2000) gkh
- * Split driver up into device specific pieces.
- *
*/
#include <linux/kernel.h>
@@ -87,7 +36,7 @@
#include <linux/ihex.h>
#include "whiteheat.h" /* WhiteHEAT specific commands */
-static int debug;
+static bool debug;
#ifndef CMSPAR
#define CMSPAR 0
@@ -753,7 +702,6 @@ static void whiteheat_close(struct usb_serial_port *port)
static int whiteheat_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
struct whiteheat_private *info = usb_get_serial_port_data(port);
struct whiteheat_urb_wrap *wrap;
struct urb *urb;
@@ -789,7 +737,6 @@ static int whiteheat_write(struct tty_struct *tty,
usb_serial_debug_data(debug, &port->dev,
__func__, bytes, urb->transfer_buffer);
- urb->dev = serial->dev;
urb->transfer_buffer_length = bytes;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
@@ -1035,7 +982,6 @@ static void command_port_read_callback(struct urb *urb)
dbg("%s - bad reply from firmware", __func__);
/* Continue trying to always read */
- command_port->read_urb->dev = command_port->serial->dev;
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed resubmitting read urb, error %d",
@@ -1141,7 +1087,6 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
transfer_buffer[0] = command;
memcpy(&transfer_buffer[1], data, datasize);
command_port->write_urb->transfer_buffer_length = datasize + 1;
- command_port->write_urb->dev = port->serial->dev;
retval = usb_submit_urb(command_port->write_urb, GFP_NOIO);
if (retval) {
dbg("%s - submit urb failed", __func__);
@@ -1362,7 +1307,6 @@ static int start_command_port(struct usb_serial *serial)
/* Work around HCD bugs */
usb_clear_halt(serial->dev, command_port->read_urb->pipe);
- command_port->read_urb->dev = serial->dev;
retval = usb_submit_urb(command_port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&serial->dev->dev,
@@ -1410,7 +1354,6 @@ static int start_port_read(struct usb_serial_port *port)
list_del(tmp);
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- urb->dev = port->serial->dev;
spin_unlock_irqrestore(&info->lock, flags);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
@@ -1490,7 +1433,6 @@ static void rx_data_softint(struct work_struct *work)
sent += tty_insert_flip_string(tty,
urb->transfer_buffer, urb->actual_length);
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 3ca87a823342..51af2fee2efd 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -139,7 +139,7 @@ static int init_alauda(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id alauda_usb_ids[] = {
+static struct usb_device_id alauda_usb_ids[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index c7909dfa2434..387cbd47acc9 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id cypress_usb_ids[] = {
+static struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index a99be857b794..15d41f2b3d6f 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -88,7 +88,7 @@ static int datafab_determine_lun(struct us_data *us,
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id datafab_usb_ids[] = {
+static struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index b0a1687ca942..a6ade4071a9a 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -42,7 +42,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id ene_ub6250_usb_ids[] = {
+static struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -607,8 +607,8 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
- u32 bl_num;
- u16 bl_len;
+ u32 bl_num;
+ u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
@@ -622,7 +622,7 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
else
bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
} else {
- bl_len = 1<<(info->SD_READ_BL_LEN);
+ bl_len = 1 << (info->SD_READ_BL_LEN);
bl_num = info->SD_Block_Mult * (info->SD_C_SIZE + 1)
* (1 << (info->SD_C_SIZE_MULT + 2)) - 1;
}
@@ -777,7 +777,7 @@ static int ms_lib_free_logicalmap(struct us_data *us)
return 0;
}
-int ms_lib_alloc_logicalmap(struct us_data *us)
+static int ms_lib_alloc_logicalmap(struct us_data *us)
{
u32 i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
@@ -2248,7 +2248,7 @@ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
/*
* ms_scsi_irp()
*/
-int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
+static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 03d4a873748c..fa1615748475 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -119,7 +119,7 @@ static int init_freecom(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id freecom_usb_ids[] = {
+static struct usb_device_id freecom_usb_ids[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 6d6923317f10..bd5502700831 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -61,7 +61,7 @@
#include "scsiglue.h"
MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC");
-MODULE_AUTHOR("Bjrn Stenberg <bjorn@haxx.se>");
+MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>");
MODULE_LICENSE("GPL");
static int isd200_Initialization(struct us_data *us);
@@ -76,7 +76,7 @@ static int isd200_Initialization(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id isd200_usb_ids[] = {
+static struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 54b71650b69c..a19211b5c265 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -71,7 +71,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id jumpshot_usb_ids[] = {
+static struct usb_device_id jumpshot_usb_ids[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index 35181e29124d..e720f8ebdf9f 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -59,7 +59,7 @@ static int rio_karma_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id karma_usb_ids[] = {
+static struct usb_device_id karma_usb_ids[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 721c8c587305..d75155c38200 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -69,7 +69,7 @@ struct usb_onetouch {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id onetouch_usb_ids[] = {
+static struct usb_device_id onetouch_usb_ids[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index c41cd30d2c01..1f62723ef1a8 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -398,10 +398,9 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
u8 cmnd[12] = { 0 };
u8 *buf;
- buf = kmalloc(len, GFP_NOIO);
+ buf = kmemdup(data, len, GFP_NOIO);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
- memcpy(buf, data, len);
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
@@ -507,15 +506,14 @@ static int enable_oscillator(struct us_data *us)
static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
{
int retval;
- u16 addr = 0xFE47;
u8 cmnd[12] = {0};
- US_DEBUGP("%s, addr = 0x%x, len = %d\n", __FUNCTION__, addr, len);
+ US_DEBUGP("%s, addr = 0xfe47, len = %d\n", __FUNCTION__, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0E;
- cmnd[2] = (u8)(addr >> 8);
- cmnd[3] = (u8)addr;
+ cmnd[2] = 0xfe;
+ cmnd[3] = 0x47;
cmnd[4] = (u8)(len >> 8);
cmnd[5] = (u8)len;
@@ -818,7 +816,7 @@ static inline int working_scsi(struct scsi_cmnd *srb)
return 1;
}
-void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
static int card_first_show = 1;
@@ -977,7 +975,7 @@ static void realtek_cr_destructor(void *extra)
}
#ifdef CONFIG_PM
-int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
+static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 83ee49e737bd..425df7df2e56 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -71,7 +71,7 @@ static int usb_stor_sddr09_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id sddr09_usb_ids[] = {
+static struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 8983ec2ffb5a..e4ca5fcb7cc3 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id sddr55_usb_ids[] = {
+static struct usb_device_id sddr55_usb_ids[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index a4c02751af4e..1369d2590616 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -170,7 +170,7 @@ static int init_usbat_flash(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id usbat_usb_ids[] = {
+static struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf3..856ad92c40de 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+ "DT 101 G2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/* Reported by Francesco Foresti <frafore@tiscali.it> */
UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
"Super Top",
@@ -1907,7 +1914,7 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
-/* Patch by Richard Schtz <r.schtz@t-online.de>
+/* Patch by Richard Schütz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c325e69415a1..3dd7da9fd504 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
dev_dbg(dev, "device found\n");
- set_freezable_with_signal();
+ set_freezable();
+
/*
* Wait for the timeout to expire or for a disconnect
*
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
* fail to freeze, but we can't be non-freezable either. Nor can
* khubd freeze while waiting for scanning to complete as it may
* hold the device lock, causing a hang when suspending devices.
- * So we request a fake signal when freezing and use
- * interruptible sleep to kick us out of our wait early when
- * freezing happens.
+ * So instead of using wait_event_freezable(), explicitly test
+ * for (DONT_SCAN || freezing) in interruptible wait and proceed
+ * if any of DONT_SCAN, freezing or timeout has happened.
*/
if (delay_use > 0) {
dev_dbg(dev, "waiting for device to settle "
"before scanning\n");
wait_event_interruptible_timeout(us->delay_wait,
- test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
- delay_use * HZ);
+ test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
+ freezing(current), delay_use * HZ);
}
/* If the device is still connected, perform the scanning */
@@ -1073,6 +1074,7 @@ static struct usb_driver usb_storage_driver = {
.id_table = usb_storage_usb_ids,
.supports_autosuspend = 1,
.soft_unbind = 1,
+ .no_dynamic_id = 1,
};
static int __init usb_stor_init(void)
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 5c6c1bdbd455..8efeae24764f 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -27,6 +27,8 @@
#define USB_SKEL_VENDOR_ID 0xfff0
#define USB_SKEL_PRODUCT_ID 0xfff0
+static DEFINE_MUTEX(skel_mutex);
+
/* table of devices that work with this driver */
static const struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
@@ -60,7 +62,6 @@ struct usb_skel {
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
int errors; /* the last request tanked */
- int open_count; /* count the number of openers */
bool ongoing_read; /* a read is going on */
bool processed_urb; /* indicates we haven't processed the urb */
spinlock_t err_lock; /* lock for errors */
@@ -100,39 +101,37 @@ static int skel_open(struct inode *inode, struct file *file)
goto exit;
}
+ mutex_lock(&skel_mutex);
dev = usb_get_intfdata(interface);
if (!dev) {
+ mutex_unlock(&skel_mutex);
retval = -ENODEV;
goto exit;
}
/* increment our usage count for the device */
kref_get(&dev->kref);
+ mutex_unlock(&skel_mutex);
/* lock the device to allow correctly handling errors
* in resumption */
mutex_lock(&dev->io_mutex);
+ if (!dev->interface) {
+ retval = -ENODEV;
+ goto out_err;
+ }
- if (!dev->open_count++) {
- retval = usb_autopm_get_interface(interface);
- if (retval) {
- dev->open_count--;
- mutex_unlock(&dev->io_mutex);
- kref_put(&dev->kref, skel_delete);
- goto exit;
- }
- } /* else { //uncomment this block if you want exclusive open
- retval = -EBUSY;
- dev->open_count--;
- mutex_unlock(&dev->io_mutex);
- kref_put(&dev->kref, skel_delete);
- goto exit;
- } */
- /* prevent the device from being autosuspended */
+ retval = usb_autopm_get_interface(interface);
+ if (retval)
+ goto out_err;
/* save our object in the file's private structure */
file->private_data = dev;
+
+out_err:
mutex_unlock(&dev->io_mutex);
+ if (retval)
+ kref_put(&dev->kref, skel_delete);
exit:
return retval;
@@ -148,7 +147,7 @@ static int skel_release(struct inode *inode, struct file *file)
/* allow the device to be autosuspended */
mutex_lock(&dev->io_mutex);
- if (!--dev->open_count && dev->interface)
+ if (dev->interface)
usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->io_mutex);
@@ -612,7 +611,6 @@ static void skel_disconnect(struct usb_interface *interface)
int minor = interface->minor;
dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
/* give back our minor */
usb_deregister_dev(interface, &skel_class);
@@ -624,8 +622,12 @@ static void skel_disconnect(struct usb_interface *interface)
usb_kill_anchored_urbs(&dev->submitted);
+ mutex_lock(&skel_mutex);
+ usb_set_intfdata(interface, NULL);
+
/* decrement our usage count */
kref_put(&dev->kref, skel_delete);
+ mutex_unlock(&skel_mutex);
dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
}
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
index eb09a0a14a80..0ead8826ec79 100644
--- a/drivers/usb/wusbcore/Kconfig
+++ b/drivers/usb/wusbcore/Kconfig
@@ -5,6 +5,7 @@ config USB_WUSB
tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on USB
+ depends on PCI
select UWB
select CRYPTO
select CRYPTO_BLKCIPHER
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 371f61733f05..fa810a83e830 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -354,7 +354,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct wusb_keydvt_in keydvt_in;
struct wusb_keydvt_out keydvt_out;
- hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
+ hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
if (hs == NULL) {
dev_err(dev, "can't allocate handshake data\n");
goto error_kzalloc;
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
index de81ebf51784..86ed7e61e597 100644
--- a/drivers/uwb/est.c
+++ b/drivers/uwb/est.c
@@ -184,7 +184,7 @@ int uwb_est_create(void)
uwb_est_size = 2;
uwb_est_used = 0;
- uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
+ uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
if (uwb_est == NULL)
return -ENOMEM;
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index c8f684833d58..2bfc846ac071 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -104,7 +104,7 @@ void i1480_usb_destroy(struct i1480_usb *i1480_usb)
*
* Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
* so we copy it to the local i1480 buffer before proceeding. In any
- * case, we have a max size we can send, soooo.
+ * case, we have a max size we can send.
*/
static
int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 882a51fe7b3c..9dab1f51dd43 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = {
};
static struct miscdevice vhost_net_misc = {
- MISC_DYNAMIC_MINOR,
- "vhost-net",
- &vhost_net_fops,
+ .minor = VHOST_NET_MINOR,
+ .name = "vhost-net",
+ .fops = &vhost_net_fops,
};
static int vhost_net_init(void)
@@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
+MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
+MODULE_ALIAS("devname:vhost-net");
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d83e967e4e15..6ca0c407c144 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1763,16 +1763,16 @@ config FB_AU1100
au1100fb:panel=<name>.
config FB_AU1200
- bool "Au1200 LCD Driver"
+ bool "Au1200/Au1300 LCD Driver"
depends on (FB = y) && MIPS_ALCHEMY
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
help
- This is the framebuffer driver for the AMD Au1200 SOC. It can drive
- various panels and CRTs by passing in kernel cmd line option
- au1200fb:panel=<name>.
+ This is the framebuffer driver for the Au1200/Au1300 SOCs.
+ It can drive various panels and CRTs by passing in kernel cmd line
+ option au1200fb:panel=<name>.
config FB_VT8500
bool "VT8500 LCD Driver"
@@ -2413,7 +2413,6 @@ source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/backlight/Kconfig"
-source "drivers/video/display/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9b9d8fff7732..142606814d98 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -13,7 +13,7 @@ fb-objs := $(fb-y)
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
-obj-y += backlight/ display/
+obj-y += backlight/
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 2cda6ba0939b..0a2cce7285be 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -621,6 +621,8 @@ static struct amba_id clcdfb_id_table[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, clcdfb_id_table);
+
static struct amba_driver clcd_driver = {
.drv = {
.name = "clcd-pl11x",
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 5ea6596dd824..f23cae094f1b 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -152,10 +152,10 @@
- hsstrt: Start of horizontal synchronization pulse
- hsstop: End of horizontal synchronization pulse
- - htotal: Last value on the line (i.e. line length = htotal+1)
+ - htotal: Last value on the line (i.e. line length = htotal + 1)
- vsstrt: Start of vertical synchronization pulse
- vsstop: End of vertical synchronization pulse
- - vtotal: Last line value (i.e. number of lines = vtotal+1)
+ - vtotal: Last line value (i.e. number of lines = vtotal + 1)
- hcenter: Start of vertical retrace for interlace
You can specify the blanking timings independently. Currently I just set
@@ -184,7 +184,7 @@
clock):
- diwstrt_h: Horizontal start of the visible window
- - diwstop_h: Horizontal stop+1(*) of the visible window
+ - diwstop_h: Horizontal stop + 1(*) of the visible window
- diwstrt_v: Vertical start of the visible window
- diwstop_v: Vertical stop of the visible window
- ddfstrt: Horizontal start of display DMA
@@ -193,7 +193,7 @@
Sprite positioning:
- - sprstrt_h: Horizontal start-4 of sprite
+ - sprstrt_h: Horizontal start - 4 of sprite
- sprstrt_v: Vertical start of sprite
(*) Even Commodore did it wrong in the AGA monitor drivers by not adding 1.
@@ -212,21 +212,21 @@
display parameters. Here's what I found out:
- ddfstrt and ddfstop are best aligned to 64 pixels.
- - the chipset needs 64+4 horizontal pixels after the DMA start before the
- first pixel is output, so diwstrt_h = ddfstrt+64+4 if you want to
- display the first pixel on the line too. Increase diwstrt_h for virtual
- screen panning.
+ - the chipset needs 64 + 4 horizontal pixels after the DMA start before
+ the first pixel is output, so diwstrt_h = ddfstrt + 64 + 4 if you want
+ to display the first pixel on the line too. Increase diwstrt_h for
+ virtual screen panning.
- the display DMA always fetches 64 pixels at a time (fmode = 3).
- - ddfstop is ddfstrt+#pixels-64.
- - diwstop_h = diwstrt_h+xres+1. Because of the additional 1 this can be 1
- more than htotal.
+ - ddfstop is ddfstrt+#pixels - 64.
+ - diwstop_h = diwstrt_h + xres + 1. Because of the additional 1 this can
+ be 1 more than htotal.
- hscroll simply adds a delay to the display output. Smooth horizontal
- panning needs an extra 64 pixels on the left to prefetch the pixels that
- `fall off' on the left.
+ panning needs an extra 64 pixels on the left to prefetch the pixels that
+ `fall off' on the left.
- if ddfstrt < 192, the sprite DMA cycles are all stolen by the bitplane
- DMA, so it's best to make the DMA start as late as possible.
+ DMA, so it's best to make the DMA start as late as possible.
- you really don't want to make ddfstrt < 128, since this will steal DMA
- cycles from the other DMA channels (audio, floppy and Chip RAM refresh).
+ cycles from the other DMA channels (audio, floppy and Chip RAM refresh).
- I make diwstop_h and diwstop_v as large as possible.
General dependencies
@@ -234,8 +234,8 @@
- all values are SHRES pixel (35ns)
- table 1:fetchstart table 2:prefetch table 3:fetchsize
- ------------------ ---------------- -----------------
+ table 1:fetchstart table 2:prefetch table 3:fetchsize
+ ------------------ ---------------- -----------------
Pixclock # SHRES|HIRES|LORES # SHRES|HIRES|LORES # SHRES|HIRES|LORES
-------------#------+-----+------#------+-----+------#------+-----+------
Bus width 1x # 16 | 32 | 64 # 16 | 32 | 64 # 64 | 64 | 64
@@ -245,21 +245,21 @@
- chipset needs 4 pixels before the first pixel is output
- ddfstrt must be aligned to fetchstart (table 1)
- chipset needs also prefetch (table 2) to get first pixel data, so
- ddfstrt = ((diwstrt_h-4) & -fetchstart) - prefetch
+ ddfstrt = ((diwstrt_h - 4) & -fetchstart) - prefetch
- for horizontal panning decrease diwstrt_h
- the length of a fetchline must be aligned to fetchsize (table 3)
- if fetchstart is smaller than fetchsize, then ddfstrt can a little bit
- moved to optimize use of dma (useful for OCS/ECS overscan displays)
- - ddfstop is ddfstrt+ddfsize-fetchsize
+ moved to optimize use of dma (useful for OCS/ECS overscan displays)
+ - ddfstop is ddfstrt + ddfsize - fetchsize
- If C= didn't change anything for AGA, then at following positions the
- dma bus is already used:
- ddfstrt < 48 -> memory refresh
- < 96 -> disk dma
- < 160 -> audio dma
- < 192 -> sprite 0 dma
- < 416 -> sprite dma (32 per sprite)
+ dma bus is already used:
+ ddfstrt < 48 -> memory refresh
+ < 96 -> disk dma
+ < 160 -> audio dma
+ < 192 -> sprite 0 dma
+ < 416 -> sprite dma (32 per sprite)
- in accordance with the hardware reference manual a hardware stop is at
- 192, but AGA (ECS?) can go below this.
+ 192, but AGA (ECS?) can go below this.
DMA priorities
--------------
@@ -269,7 +269,7 @@
the hardware cursor:
- if you want to start display DMA too early, you lose the ability to
- do smooth horizontal panning (xpanstep 1 -> 64).
+ do smooth horizontal panning (xpanstep 1 -> 64).
- if you want to go even further, you lose the hardware cursor too.
IMHO a hardware cursor is more important for X than horizontal scrolling,
@@ -286,8 +286,8 @@
Standard VGA timings
--------------------
- xres yres left right upper lower hsync vsync
- ---- ---- ---- ----- ----- ----- ----- -----
+ xres yres left right upper lower hsync vsync
+ ---- ---- ---- ----- ----- ----- ----- -----
80x25 720 400 27 45 35 12 108 2
80x30 720 480 27 45 30 9 108 2
@@ -297,8 +297,8 @@
As a comparison, graphics/monitor.h suggests the following:
- xres yres left right upper lower hsync vsync
- ---- ---- ---- ----- ----- ----- ----- -----
+ xres yres left right upper lower hsync vsync
+ ---- ---- ---- ----- ----- ----- ----- -----
VGA 640 480 52 112 24 19 112 - 2 +
VGA70 640 400 52 112 27 21 112 - 2 -
@@ -309,10 +309,10 @@
VSYNC HSYNC Vertical size Vertical total
----- ----- ------------- --------------
- + + Reserved Reserved
- + - 400 414
- - + 350 362
- - - 480 496
+ + + Reserved Reserved
+ + - 400 414
+ - + 350 362
+ - - 480 496
Source: CL-GD542X Technical Reference Manual, Cirrus Logic, Oct 1992
@@ -326,33 +326,34 @@
-----------
- a scanline is 64 µs long, of which 52.48 µs are visible. This is about
- 736 visible 70 ns pixels per line.
+ 736 visible 70 ns pixels per line.
- we have 625 scanlines, of which 575 are visible (interlaced); after
- rounding this becomes 576.
+ rounding this becomes 576.
RETMA -> NTSC
-------------
- a scanline is 63.5 µs long, of which 53.5 µs are visible. This is about
- 736 visible 70 ns pixels per line.
+ 736 visible 70 ns pixels per line.
- we have 525 scanlines, of which 485 are visible (interlaced); after
- rounding this becomes 484.
+ rounding this becomes 484.
Thus if you want a PAL compatible display, you have to do the following:
- set the FB_SYNC_BROADCAST flag to indicate that standard broadcast
- timings are to be used.
- - make sure upper_margin+yres+lower_margin+vsync_len = 625 for an
- interlaced, 312 for a non-interlaced and 156 for a doublescanned
- display.
- - make sure left_margin+xres+right_margin+hsync_len = 1816 for a SHRES,
- 908 for a HIRES and 454 for a LORES display.
+ timings are to be used.
+ - make sure upper_margin + yres + lower_margin + vsync_len = 625 for an
+ interlaced, 312 for a non-interlaced and 156 for a doublescanned
+ display.
+ - make sure left_margin + xres + right_margin + hsync_len = 1816 for a
+ SHRES, 908 for a HIRES and 454 for a LORES display.
- the left visible part begins at 360 (SHRES; HIRES:180, LORES:90),
- left_margin+2*hsync_len must be greater or equal.
+ left_margin + 2 * hsync_len must be greater or equal.
- the upper visible part begins at 48 (interlaced; non-interlaced:24,
- doublescanned:12), upper_margin+2*vsync_len must be greater or equal.
+ doublescanned:12), upper_margin + 2 * vsync_len must be greater or
+ equal.
- ami_encode_var() calculates margins with a hsync of 5320 ns and a vsync
- of 4 scanlines
+ of 4 scanlines
The settings for a NTSC compatible display are straightforward.
@@ -361,7 +362,7 @@
anything about horizontal/vertical synchronization nor refresh rates.
- -- Geert --
+ -- Geert --
*******************************************************************************/
@@ -540,45 +541,45 @@ static u_short maxfmode, chipset;
* Various macros
*/
-#define up2(v) (((v)+1) & -2)
+#define up2(v) (((v) + 1) & -2)
#define down2(v) ((v) & -2)
#define div2(v) ((v)>>1)
#define mod2(v) ((v) & 1)
-#define up4(v) (((v)+3) & -4)
+#define up4(v) (((v) + 3) & -4)
#define down4(v) ((v) & -4)
-#define mul4(v) ((v)<<2)
+#define mul4(v) ((v) << 2)
#define div4(v) ((v)>>2)
#define mod4(v) ((v) & 3)
-#define up8(v) (((v)+7) & -8)
+#define up8(v) (((v) + 7) & -8)
#define down8(v) ((v) & -8)
#define div8(v) ((v)>>3)
#define mod8(v) ((v) & 7)
-#define up16(v) (((v)+15) & -16)
+#define up16(v) (((v) + 15) & -16)
#define down16(v) ((v) & -16)
#define div16(v) ((v)>>4)
#define mod16(v) ((v) & 15)
-#define up32(v) (((v)+31) & -32)
+#define up32(v) (((v) + 31) & -32)
#define down32(v) ((v) & -32)
#define div32(v) ((v)>>5)
#define mod32(v) ((v) & 31)
-#define up64(v) (((v)+63) & -64)
+#define up64(v) (((v) + 63) & -64)
#define down64(v) ((v) & -64)
#define div64(v) ((v)>>6)
#define mod64(v) ((v) & 63)
-#define upx(x,v) (((v)+(x)-1) & -(x))
-#define downx(x,v) ((v) & -(x))
-#define modx(x,v) ((v) & ((x)-1))
+#define upx(x, v) (((v) + (x) - 1) & -(x))
+#define downx(x, v) ((v) & -(x))
+#define modx(x, v) ((v) & ((x) - 1))
/* if x1 is not a constant, this macro won't make real sense :-) */
#ifdef __mc68000__
#define DIVUL(x1, x2) ({int res; asm("divul %1,%2,%3": "=d" (res): \
- "d" (x2), "d" ((long)((x1)/0x100000000ULL)), "0" ((long)(x1))); res;})
+ "d" (x2), "d" ((long)((x1) / 0x100000000ULL)), "0" ((long)(x1))); res;})
#else
/* We know a bit about the numbers, so we can do it this way */
#define DIVUL(x1, x2) ((((long)((unsigned long long)x1 >> 8) / x2) << 8) + \
@@ -607,7 +608,7 @@ static u_short maxfmode, chipset;
#define VIDEOMEMSIZE_ECS_1M (393216) /* ECS (1MB) : max 1024*768*16 */
#define VIDEOMEMSIZE_OCS (262144) /* OCS : max ca. 800*600*16 */
-#define SPRITEMEMSIZE (64*64/4) /* max 64*64*4 */
+#define SPRITEMEMSIZE (64 * 64 / 4) /* max 64*64*4 */
#define DUMMYSPRITEMEMSIZE (8)
static u_long spritememory;
@@ -634,9 +635,9 @@ static u_long min_fstrt = 192;
* Copper Instructions
*/
-#define CMOVE(val, reg) (CUSTOM_OFS(reg)<<16 | (val))
-#define CMOVE2(val, reg) ((CUSTOM_OFS(reg)+2)<<16 | (val))
-#define CWAIT(x, y) (((y) & 0x1fe)<<23 | ((x) & 0x7f0)<<13 | 0x0001fffe)
+#define CMOVE(val, reg) (CUSTOM_OFS(reg) << 16 | (val))
+#define CMOVE2(val, reg) ((CUSTOM_OFS(reg) + 2) << 16 | (val))
+#define CWAIT(x, y) (((y) & 0x1fe) << 23 | ((x) & 0x7f0) << 13 | 0x0001fffe)
#define CEND (0xfffffffe)
@@ -709,7 +710,7 @@ static u_short *lofsprite, *shfsprite, *dummysprite;
* Current Video Mode
*/
-static struct amifb_par {
+struct amifb_par {
/* General Values */
@@ -772,15 +773,6 @@ static struct amifb_par {
/* Additional AGA Hardware Registers */
u_short fmode; /* vmode */
-} currentpar;
-
-
-static struct fb_info fb_info = {
- .fix = {
- .id = "Amiga ",
- .visual = FB_VISUAL_PSEUDOCOLOR,
- .accel = FB_ACCEL_AMIGABLITT
- }
};
@@ -820,116 +812,123 @@ static u_short is_lace = 0; /* Screen is laced */
static struct fb_videomode ami_modedb[] __initdata = {
- /*
- * AmigaOS Video Modes
- *
- * If you change these, make sure to update DEFMODE_* as well!
- */
-
- {
- /* 640x200, 15 kHz, 60 Hz (NTSC) */
- "ntsc", 60, 640, 200, TAG_HIRES, 106, 86, 44, 16, 76, 2,
- FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 15 kHz, 60 Hz interlaced (NTSC) */
- "ntsc-lace", 60, 640, 400, TAG_HIRES, 106, 86, 88, 33, 76, 4,
- FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x256, 15 kHz, 50 Hz (PAL) */
- "pal", 50, 640, 256, TAG_HIRES, 106, 86, 40, 14, 76, 2,
- FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x512, 15 kHz, 50 Hz interlaced (PAL) */
- "pal-lace", 50, 640, 512, TAG_HIRES, 106, 86, 80, 29, 76, 4,
- FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x480, 29 kHz, 57 Hz */
- "multiscan", 57, 640, 480, TAG_SHRES, 96, 112, 29, 8, 72, 8,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x960, 29 kHz, 57 Hz interlaced */
- "multiscan-lace", 57, 640, 960, TAG_SHRES, 96, 112, 58, 16, 72, 16,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x200, 15 kHz, 72 Hz */
- "euro36", 72, 640, 200, TAG_HIRES, 92, 124, 6, 6, 52, 5,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 15 kHz, 72 Hz interlaced */
- "euro36-lace", 72, 640, 400, TAG_HIRES, 92, 124, 12, 12, 52, 10,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 29 kHz, 68 Hz */
- "euro72", 68, 640, 400, TAG_SHRES, 164, 92, 9, 9, 80, 8,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x800, 29 kHz, 68 Hz interlaced */
- "euro72-lace", 68, 640, 800, TAG_SHRES, 164, 92, 18, 18, 80, 16,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 800x300, 23 kHz, 70 Hz */
- "super72", 70, 800, 300, TAG_SHRES, 212, 140, 10, 11, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 800x600, 23 kHz, 70 Hz interlaced */
- "super72-lace", 70, 800, 600, TAG_SHRES, 212, 140, 20, 22, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x200, 27 kHz, 57 Hz doublescan */
- "dblntsc", 57, 640, 200, TAG_SHRES, 196, 124, 18, 17, 80, 4,
- 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
- }, {
- /* 640x400, 27 kHz, 57 Hz */
- "dblntsc-ff", 57, 640, 400, TAG_SHRES, 196, 124, 36, 35, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x800, 27 kHz, 57 Hz interlaced */
- "dblntsc-lace", 57, 640, 800, TAG_SHRES, 196, 124, 72, 70, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x256, 27 kHz, 47 Hz doublescan */
- "dblpal", 47, 640, 256, TAG_SHRES, 196, 124, 14, 13, 80, 4,
- 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
- }, {
- /* 640x512, 27 kHz, 47 Hz */
- "dblpal-ff", 47, 640, 512, TAG_SHRES, 196, 124, 28, 27, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x1024, 27 kHz, 47 Hz interlaced */
- "dblpal-lace", 47, 640, 1024, TAG_SHRES, 196, 124, 56, 54, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- },
-
- /*
- * VGA Video Modes
- */
-
- {
- /* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 60, 640, 480, TAG_SHRES, 64, 96, 30, 9, 112, 2,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, TAG_SHRES, 64, 96, 35, 12, 112, 2,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- },
+ /*
+ * AmigaOS Video Modes
+ *
+ * If you change these, make sure to update DEFMODE_* as well!
+ */
+
+ {
+ /* 640x200, 15 kHz, 60 Hz (NTSC) */
+ "ntsc", 60, 640, 200, TAG_HIRES, 106, 86, 44, 16, 76, 2,
+ FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 15 kHz, 60 Hz interlaced (NTSC) */
+ "ntsc-lace", 60, 640, 400, TAG_HIRES, 106, 86, 88, 33, 76, 4,
+ FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x256, 15 kHz, 50 Hz (PAL) */
+ "pal", 50, 640, 256, TAG_HIRES, 106, 86, 40, 14, 76, 2,
+ FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x512, 15 kHz, 50 Hz interlaced (PAL) */
+ "pal-lace", 50, 640, 512, TAG_HIRES, 106, 86, 80, 29, 76, 4,
+ FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x480, 29 kHz, 57 Hz */
+ "multiscan", 57, 640, 480, TAG_SHRES, 96, 112, 29, 8, 72, 8,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x960, 29 kHz, 57 Hz interlaced */
+ "multiscan-lace", 57, 640, 960, TAG_SHRES, 96, 112, 58, 16, 72,
+ 16,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x200, 15 kHz, 72 Hz */
+ "euro36", 72, 640, 200, TAG_HIRES, 92, 124, 6, 6, 52, 5,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 15 kHz, 72 Hz interlaced */
+ "euro36-lace", 72, 640, 400, TAG_HIRES, 92, 124, 12, 12, 52,
+ 10,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 29 kHz, 68 Hz */
+ "euro72", 68, 640, 400, TAG_SHRES, 164, 92, 9, 9, 80, 8,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x800, 29 kHz, 68 Hz interlaced */
+ "euro72-lace", 68, 640, 800, TAG_SHRES, 164, 92, 18, 18, 80,
+ 16,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 800x300, 23 kHz, 70 Hz */
+ "super72", 70, 800, 300, TAG_SHRES, 212, 140, 10, 11, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 800x600, 23 kHz, 70 Hz interlaced */
+ "super72-lace", 70, 800, 600, TAG_SHRES, 212, 140, 20, 22, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x200, 27 kHz, 57 Hz doublescan */
+ "dblntsc", 57, 640, 200, TAG_SHRES, 196, 124, 18, 17, 80, 4,
+ 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 27 kHz, 57 Hz */
+ "dblntsc-ff", 57, 640, 400, TAG_SHRES, 196, 124, 36, 35, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x800, 27 kHz, 57 Hz interlaced */
+ "dblntsc-lace", 57, 640, 800, TAG_SHRES, 196, 124, 72, 70, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x256, 27 kHz, 47 Hz doublescan */
+ "dblpal", 47, 640, 256, TAG_SHRES, 196, 124, 14, 13, 80, 4,
+ 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
+ }, {
+ /* 640x512, 27 kHz, 47 Hz */
+ "dblpal-ff", 47, 640, 512, TAG_SHRES, 196, 124, 28, 27, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x1024, 27 kHz, 47 Hz interlaced */
+ "dblpal-lace", 47, 640, 1024, TAG_SHRES, 196, 124, 56, 54, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ },
+
+ /*
+ * VGA Video Modes
+ */
+
+ {
+ /* 640x480, 31 kHz, 60 Hz (VGA) */
+ "vga", 60, 640, 480, TAG_SHRES, 64, 96, 30, 9, 112, 2,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 31 kHz, 70 Hz (VGA) */
+ "vga70", 70, 640, 400, TAG_SHRES, 64, 96, 35, 12, 112, 2,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ },
#if 0
- /*
- * A2024 video modes
- * These modes don't work yet because there's no A2024 driver.
- */
-
- {
- /* 1024x800, 10 Hz */
- "a2024-10", 10, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 1024x800, 15 Hz */
- "a2024-15", 15, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }
+ /*
+ * A2024 video modes
+ * These modes don't work yet because there's no A2024 driver.
+ */
+
+ {
+ /* 1024x800, 10 Hz */
+ "a2024-10", 10, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 1024x800, 15 Hz */
+ "a2024-15", 15, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }
#endif
};
@@ -953,6 +952,11 @@ static int round_down_bpp = 1; /* for mode probing */
static int amifb_ilbm = 0; /* interleaved or normal bitplanes */
static int amifb_inverse = 0;
+static u32 amifb_hfmin __initdata; /* monitor hfreq lower limit (Hz) */
+static u32 amifb_hfmax __initdata; /* monitor hfreq upper limit (Hz) */
+static u16 amifb_vfmin __initdata; /* monitor vfreq lower limit (Hz) */
+static u16 amifb_vfmax __initdata; /* monitor vfreq upper limit (Hz) */
+
/*
* Macros for the conversion from real world values to hardware register
@@ -992,19 +996,20 @@ static int amifb_inverse = 0;
/* bplcon1 (smooth scrolling) */
#define hscroll2hw(hscroll) \
- (((hscroll)<<12 & 0x3000) | ((hscroll)<<8 & 0xc300) | \
- ((hscroll)<<4 & 0x0c00) | ((hscroll)<<2 & 0x00f0) | ((hscroll)>>2 & 0x000f))
+ (((hscroll) << 12 & 0x3000) | ((hscroll) << 8 & 0xc300) | \
+ ((hscroll) << 4 & 0x0c00) | ((hscroll) << 2 & 0x00f0) | \
+ ((hscroll)>>2 & 0x000f))
/* diwstrt/diwstop/diwhigh (visible display window) */
#define diwstrt2hw(diwstrt_h, diwstrt_v) \
- (((diwstrt_v)<<7 & 0xff00) | ((diwstrt_h)>>2 & 0x00ff))
+ (((diwstrt_v) << 7 & 0xff00) | ((diwstrt_h)>>2 & 0x00ff))
#define diwstop2hw(diwstop_h, diwstop_v) \
- (((diwstop_v)<<7 & 0xff00) | ((diwstop_h)>>2 & 0x00ff))
+ (((diwstop_v) << 7 & 0xff00) | ((diwstop_h)>>2 & 0x00ff))
#define diwhigh2hw(diwstrt_h, diwstrt_v, diwstop_h, diwstop_v) \
- (((diwstop_h)<<3 & 0x2000) | ((diwstop_h)<<11 & 0x1800) | \
+ (((diwstop_h) << 3 & 0x2000) | ((diwstop_h) << 11 & 0x1800) | \
((diwstop_v)>>1 & 0x0700) | ((diwstrt_h)>>5 & 0x0020) | \
- ((diwstrt_h)<<3 & 0x0018) | ((diwstrt_v)>>9 & 0x0007))
+ ((diwstrt_h) << 3 & 0x0018) | ((diwstrt_v)>>9 & 0x0007))
/* ddfstrt/ddfstop (display DMA) */
@@ -1015,38 +1020,39 @@ static int amifb_inverse = 0;
#define hsstrt2hw(hsstrt) (div8(hsstrt))
#define hsstop2hw(hsstop) (div8(hsstop))
-#define htotal2hw(htotal) (div8(htotal)-1)
+#define htotal2hw(htotal) (div8(htotal) - 1)
#define vsstrt2hw(vsstrt) (div2(vsstrt))
#define vsstop2hw(vsstop) (div2(vsstop))
-#define vtotal2hw(vtotal) (div2(vtotal)-1)
+#define vtotal2hw(vtotal) (div2(vtotal) - 1)
#define hcenter2hw(htotal) (div8(htotal))
/* hbstrt/hbstop/vbstrt/vbstop (blanking timings) */
-#define hbstrt2hw(hbstrt) (((hbstrt)<<8 & 0x0700) | ((hbstrt)>>3 & 0x00ff))
-#define hbstop2hw(hbstop) (((hbstop)<<8 & 0x0700) | ((hbstop)>>3 & 0x00ff))
+#define hbstrt2hw(hbstrt) (((hbstrt) << 8 & 0x0700) | ((hbstrt)>>3 & 0x00ff))
+#define hbstop2hw(hbstop) (((hbstop) << 8 & 0x0700) | ((hbstop)>>3 & 0x00ff))
#define vbstrt2hw(vbstrt) (div2(vbstrt))
#define vbstop2hw(vbstop) (div2(vbstop))
/* colour */
#define rgb2hw8_high(red, green, blue) \
- (((red & 0xf0)<<4) | (green & 0xf0) | ((blue & 0xf0)>>4))
+ (((red & 0xf0) << 4) | (green & 0xf0) | ((blue & 0xf0)>>4))
#define rgb2hw8_low(red, green, blue) \
- (((red & 0x0f)<<8) | ((green & 0x0f)<<4) | (blue & 0x0f))
+ (((red & 0x0f) << 8) | ((green & 0x0f) << 4) | (blue & 0x0f))
#define rgb2hw4(red, green, blue) \
- (((red & 0xf0)<<4) | (green & 0xf0) | ((blue & 0xf0)>>4))
+ (((red & 0xf0) << 4) | (green & 0xf0) | ((blue & 0xf0)>>4))
#define rgb2hw2(red, green, blue) \
- (((red & 0xc0)<<4) | (green & 0xc0) | ((blue & 0xc0)>>4))
+ (((red & 0xc0) << 4) | (green & 0xc0) | ((blue & 0xc0)>>4))
/* sprpos/sprctl (sprite positioning) */
#define spr2hw_pos(start_v, start_h) \
- (((start_v)<<7&0xff00) | ((start_h)>>3&0x00ff))
+ (((start_v) << 7 & 0xff00) | ((start_h)>>3 & 0x00ff))
#define spr2hw_ctl(start_v, start_h, stop_v) \
- (((stop_v)<<7&0xff00) | ((start_v)>>4&0x0040) | ((stop_v)>>5&0x0020) | \
- ((start_h)<<3&0x0018) | ((start_v)>>7&0x0004) | ((stop_v)>>8&0x0002) | \
- ((start_h)>>2&0x0001))
+ (((stop_v) << 7 & 0xff00) | ((start_v)>>4 & 0x0040) | \
+ ((stop_v)>>5 & 0x0020) | ((start_h) << 3 & 0x0018) | \
+ ((start_v)>>7 & 0x0004) | ((stop_v)>>8 & 0x0002) | \
+ ((start_h)>>2 & 0x0001))
/* get current vertical position of beam */
#define get_vbpos() ((u_short)((*(u_long volatile *)&custom.vposr >> 7) & 0xffe))
@@ -1055,7 +1061,7 @@ static int amifb_inverse = 0;
* Copper Initialisation List
*/
-#define COPINITSIZE (sizeof(copins)*40)
+#define COPINITSIZE (sizeof(copins) * 40)
enum {
cip_bplcon0
@@ -1066,7 +1072,7 @@ enum {
* Don't change the order, build_copper()/rebuild_copper() rely on this
*/
-#define COPLISTSIZE (sizeof(copins)*64)
+#define COPLISTSIZE (sizeof(copins) * 64)
enum {
cop_wait, cop_bplcon0,
@@ -1108,82 +1114,1199 @@ static u_short sprfetchmode[3] = {
};
+/* --------------------------- Hardware routines --------------------------- */
+
/*
- * Interface used by the world
+ * Get the video params out of `var'. If a value doesn't fit, round
+ * it up, if it's too big, return -EINVAL.
*/
-int amifb_setup(char*);
+static int ami_decode_var(struct fb_var_screeninfo *var, struct amifb_par *par,
+ const struct fb_info *info)
+{
+ u_short clk_shift, line_shift;
+ u_long maxfetchstop, fstrt, fsize, fconst, xres_n, yres_n;
+ u_int htotal, vtotal;
-static int amifb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int amifb_set_par(struct fb_info *info);
-static int amifb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info);
-static int amifb_blank(int blank, struct fb_info *info);
-static int amifb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static void amifb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-static void amifb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region);
-static void amifb_imageblit(struct fb_info *info,
- const struct fb_image *image);
-static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg);
+ /*
+ * Find a matching Pixel Clock
+ */
+ for (clk_shift = TAG_SHRES; clk_shift <= TAG_LORES; clk_shift++)
+ if (var->pixclock <= pixclock[clk_shift])
+ break;
+ if (clk_shift > TAG_LORES) {
+ DPRINTK("pixclock too high\n");
+ return -EINVAL;
+ }
+ par->clk_shift = clk_shift;
/*
- * Interface to the low level console driver
+ * Check the Geometry Values
*/
-static void amifb_deinit(struct platform_device *pdev);
+ if ((par->xres = var->xres) < 64)
+ par->xres = 64;
+ if ((par->yres = var->yres) < 64)
+ par->yres = 64;
+ if ((par->vxres = var->xres_virtual) < par->xres)
+ par->vxres = par->xres;
+ if ((par->vyres = var->yres_virtual) < par->yres)
+ par->vyres = par->yres;
+
+ par->bpp = var->bits_per_pixel;
+ if (!var->nonstd) {
+ if (par->bpp < 1)
+ par->bpp = 1;
+ if (par->bpp > maxdepth[clk_shift]) {
+ if (round_down_bpp && maxdepth[clk_shift])
+ par->bpp = maxdepth[clk_shift];
+ else {
+ DPRINTK("invalid bpp\n");
+ return -EINVAL;
+ }
+ }
+ } else if (var->nonstd == FB_NONSTD_HAM) {
+ if (par->bpp < 6)
+ par->bpp = 6;
+ if (par->bpp != 6) {
+ if (par->bpp < 8)
+ par->bpp = 8;
+ if (par->bpp != 8 || !IS_AGA) {
+ DPRINTK("invalid bpp for ham mode\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ DPRINTK("unknown nonstd mode\n");
+ return -EINVAL;
+ }
/*
- * Internal routines
+ * FB_VMODE_SMOOTH_XPAN will be cleared, if one of the folloing
+ * checks failed and smooth scrolling is not possible
*/
-static int flash_cursor(void);
-static irqreturn_t amifb_interrupt(int irq, void *dev_id);
-static u_long chipalloc(u_long size);
-static void chipfree(void);
+ par->vmode = var->vmode | FB_VMODE_SMOOTH_XPAN;
+ switch (par->vmode & FB_VMODE_MASK) {
+ case FB_VMODE_INTERLACED:
+ line_shift = 0;
+ break;
+ case FB_VMODE_NONINTERLACED:
+ line_shift = 1;
+ break;
+ case FB_VMODE_DOUBLE:
+ if (!IS_AGA) {
+ DPRINTK("double mode only possible with aga\n");
+ return -EINVAL;
+ }
+ line_shift = 2;
+ break;
+ default:
+ DPRINTK("unknown video mode\n");
+ return -EINVAL;
+ break;
+ }
+ par->line_shift = line_shift;
/*
- * Hardware routines
+ * Vertical and Horizontal Timings
*/
-static int ami_decode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par);
-static int ami_encode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par);
-static void ami_pan_var(struct fb_var_screeninfo *var);
-static int ami_update_par(void);
-static void ami_update_display(void);
-static void ami_init_display(void);
-static void ami_do_blank(void);
-static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix);
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
-static int ami_get_cursorstate(struct fb_cursorstate *state);
-static int ami_set_cursorstate(struct fb_cursorstate *state);
-static void ami_set_sprite(void);
-static void ami_init_copper(void);
-static void ami_reinit_copper(void);
-static void ami_build_copper(void);
-static void ami_rebuild_copper(void);
+ xres_n = par->xres << clk_shift;
+ yres_n = par->yres << line_shift;
+ par->htotal = down8((var->left_margin + par->xres + var->right_margin +
+ var->hsync_len) << clk_shift);
+ par->vtotal =
+ down2(((var->upper_margin + par->yres + var->lower_margin +
+ var->vsync_len) << line_shift) + 1);
+ if (IS_AGA)
+ par->bplcon3 = sprpixmode[clk_shift];
+ else
+ par->bplcon3 = 0;
+ if (var->sync & FB_SYNC_BROADCAST) {
+ par->diwstop_h = par->htotal -
+ ((var->right_margin - var->hsync_len) << clk_shift);
+ if (IS_AGA)
+ par->diwstop_h += mod4(var->hsync_len);
+ else
+ par->diwstop_h = down4(par->diwstop_h);
+
+ par->diwstrt_h = par->diwstop_h - xres_n;
+ par->diwstop_v = par->vtotal -
+ ((var->lower_margin - var->vsync_len) << line_shift);
+ par->diwstrt_v = par->diwstop_v - yres_n;
+ if (par->diwstop_h >= par->htotal + 8) {
+ DPRINTK("invalid diwstop_h\n");
+ return -EINVAL;
+ }
+ if (par->diwstop_v > par->vtotal) {
+ DPRINTK("invalid diwstop_v\n");
+ return -EINVAL;
+ }
+
+ if (!IS_OCS) {
+ /* Initialize sync with some reasonable values for pwrsave */
+ par->hsstrt = 160;
+ par->hsstop = 320;
+ par->vsstrt = 30;
+ par->vsstop = 34;
+ } else {
+ par->hsstrt = 0;
+ par->hsstop = 0;
+ par->vsstrt = 0;
+ par->vsstop = 0;
+ }
+ if (par->vtotal > (PAL_VTOTAL + NTSC_VTOTAL) / 2) {
+ /* PAL video mode */
+ if (par->htotal != PAL_HTOTAL) {
+ DPRINTK("htotal invalid for pal\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_h < PAL_DIWSTRT_H) {
+ DPRINTK("diwstrt_h too low for pal\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_v < PAL_DIWSTRT_V) {
+ DPRINTK("diwstrt_v too low for pal\n");
+ return -EINVAL;
+ }
+ htotal = PAL_HTOTAL>>clk_shift;
+ vtotal = PAL_VTOTAL>>1;
+ if (!IS_OCS) {
+ par->beamcon0 = BMC0_PAL;
+ par->bplcon3 |= BPC3_BRDRBLNK;
+ } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
+ AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
+ par->beamcon0 = BMC0_PAL;
+ par->hsstop = 1;
+ } else if (amiga_vblank != 50) {
+ DPRINTK("pal not supported by this chipset\n");
+ return -EINVAL;
+ }
+ } else {
+ /* NTSC video mode
+ * In the AGA chipset seems to be hardware bug with BPC3_BRDRBLNK
+ * and NTSC activated, so than better let diwstop_h <= 1812
+ */
+ if (par->htotal != NTSC_HTOTAL) {
+ DPRINTK("htotal invalid for ntsc\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_h < NTSC_DIWSTRT_H) {
+ DPRINTK("diwstrt_h too low for ntsc\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_v < NTSC_DIWSTRT_V) {
+ DPRINTK("diwstrt_v too low for ntsc\n");
+ return -EINVAL;
+ }
+ htotal = NTSC_HTOTAL>>clk_shift;
+ vtotal = NTSC_VTOTAL>>1;
+ if (!IS_OCS) {
+ par->beamcon0 = 0;
+ par->bplcon3 |= BPC3_BRDRBLNK;
+ } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
+ AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
+ par->beamcon0 = 0;
+ par->hsstop = 1;
+ } else if (amiga_vblank != 60) {
+ DPRINTK("ntsc not supported by this chipset\n");
+ return -EINVAL;
+ }
+ }
+ if (IS_OCS) {
+ if (par->diwstrt_h >= 1024 || par->diwstop_h < 1024 ||
+ par->diwstrt_v >= 512 || par->diwstop_v < 256) {
+ DPRINTK("invalid position for display on ocs\n");
+ return -EINVAL;
+ }
+ }
+ } else if (!IS_OCS) {
+ /* Programmable video mode */
+ par->hsstrt = var->right_margin << clk_shift;
+ par->hsstop = (var->right_margin + var->hsync_len) << clk_shift;
+ par->diwstop_h = par->htotal - mod8(par->hsstrt) + 8 - (1 << clk_shift);
+ if (!IS_AGA)
+ par->diwstop_h = down4(par->diwstop_h) - 16;
+ par->diwstrt_h = par->diwstop_h - xres_n;
+ par->hbstop = par->diwstrt_h + 4;
+ par->hbstrt = par->diwstop_h + 4;
+ if (par->hbstrt >= par->htotal + 8)
+ par->hbstrt -= par->htotal;
+ par->hcenter = par->hsstrt + (par->htotal >> 1);
+ par->vsstrt = var->lower_margin << line_shift;
+ par->vsstop = (var->lower_margin + var->vsync_len) << line_shift;
+ par->diwstop_v = par->vtotal;
+ if ((par->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
+ par->diwstop_v -= 2;
+ par->diwstrt_v = par->diwstop_v - yres_n;
+ par->vbstop = par->diwstrt_v - 2;
+ par->vbstrt = par->diwstop_v - 2;
+ if (par->vtotal > 2048) {
+ DPRINTK("vtotal too high\n");
+ return -EINVAL;
+ }
+ if (par->htotal > 2048) {
+ DPRINTK("htotal too high\n");
+ return -EINVAL;
+ }
+ par->bplcon3 |= BPC3_EXTBLKEN;
+ par->beamcon0 = BMC0_HARDDIS | BMC0_VARVBEN | BMC0_LOLDIS |
+ BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARBEAMEN |
+ BMC0_PAL | BMC0_VARCSYEN;
+ if (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ par->beamcon0 |= BMC0_HSYTRUE;
+ if (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ par->beamcon0 |= BMC0_VSYTRUE;
+ if (var->sync & FB_SYNC_COMP_HIGH_ACT)
+ par->beamcon0 |= BMC0_CSYTRUE;
+ htotal = par->htotal>>clk_shift;
+ vtotal = par->vtotal>>1;
+ } else {
+ DPRINTK("only broadcast modes possible for ocs\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Checking the DMA timing
+ */
+
+ fconst = 16 << maxfmode << clk_shift;
+
+ /*
+ * smallest window start value without turn off other dma cycles
+ * than sprite1-7, unless you change min_fstrt
+ */
+
+
+ fsize = ((maxfmode + clk_shift <= 1) ? fconst : 64);
+ fstrt = downx(fconst, par->diwstrt_h - 4) - fsize;
+ if (fstrt < min_fstrt) {
+ DPRINTK("fetch start too low\n");
+ return -EINVAL;
+ }
+
+ /*
+ * smallest window start value where smooth scrolling is possible
+ */
+
+ fstrt = downx(fconst, par->diwstrt_h - fconst + (1 << clk_shift) - 4) -
+ fsize;
+ if (fstrt < min_fstrt)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ maxfetchstop = down16(par->htotal - 80);
+
+ fstrt = downx(fconst, par->diwstrt_h - 4) - 64 - fconst;
+ fsize = upx(fconst, xres_n +
+ modx(fconst, downx(1 << clk_shift, par->diwstrt_h - 4)));
+ if (fstrt + fsize > maxfetchstop)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ fsize = upx(fconst, xres_n);
+ if (fstrt + fsize > maxfetchstop) {
+ DPRINTK("fetch stop too high\n");
+ return -EINVAL;
+ }
+
+ if (maxfmode + clk_shift <= 1) {
+ fsize = up64(xres_n + fconst - 1);
+ if (min_fstrt + fsize - 64 > maxfetchstop)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ fsize = up64(xres_n);
+ if (min_fstrt + fsize - 64 > maxfetchstop) {
+ DPRINTK("fetch size too high\n");
+ return -EINVAL;
+ }
+
+ fsize -= 64;
+ } else
+ fsize -= fconst;
+
+ /*
+ * Check if there is enough time to update the bitplane pointers for ywrap
+ */
+
+ if (par->htotal - fsize - 64 < par->bpp * 64)
+ par->vmode &= ~FB_VMODE_YWRAP;
+
+ /*
+ * Bitplane calculations and check the Memory Requirements
+ */
+
+ if (amifb_ilbm) {
+ par->next_plane = div8(upx(16 << maxfmode, par->vxres));
+ par->next_line = par->bpp * par->next_plane;
+ if (par->next_line * par->vyres > info->fix.smem_len) {
+ DPRINTK("too few video mem\n");
+ return -EINVAL;
+ }
+ } else {
+ par->next_line = div8(upx(16 << maxfmode, par->vxres));
+ par->next_plane = par->vyres * par->next_line;
+ if (par->next_plane * par->bpp > info->fix.smem_len) {
+ DPRINTK("too few video mem\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Hardware Register Values
+ */
+
+ par->bplcon0 = BPC0_COLOR | bplpixmode[clk_shift];
+ if (!IS_OCS)
+ par->bplcon0 |= BPC0_ECSENA;
+ if (par->bpp == 8)
+ par->bplcon0 |= BPC0_BPU3;
+ else
+ par->bplcon0 |= par->bpp << 12;
+ if (var->nonstd == FB_NONSTD_HAM)
+ par->bplcon0 |= BPC0_HAM;
+ if (var->sync & FB_SYNC_EXT)
+ par->bplcon0 |= BPC0_ERSY;
+
+ if (IS_AGA)
+ par->fmode = bplfetchmode[maxfmode];
+
+ switch (par->vmode & FB_VMODE_MASK) {
+ case FB_VMODE_INTERLACED:
+ par->bplcon0 |= BPC0_LACE;
+ break;
+ case FB_VMODE_DOUBLE:
+ if (IS_AGA)
+ par->fmode |= FMODE_SSCAN2 | FMODE_BSCAN2;
+ break;
+ }
+
+ if (!((par->vmode ^ var->vmode) & FB_VMODE_YWRAP)) {
+ par->xoffset = var->xoffset;
+ par->yoffset = var->yoffset;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if (par->xoffset || par->yoffset < 0 ||
+ par->yoffset >= par->vyres)
+ par->xoffset = par->yoffset = 0;
+ } else {
+ if (par->xoffset < 0 ||
+ par->xoffset > upx(16 << maxfmode, par->vxres - par->xres) ||
+ par->yoffset < 0 || par->yoffset > par->vyres - par->yres)
+ par->xoffset = par->yoffset = 0;
+ }
+ } else
+ par->xoffset = par->yoffset = 0;
+
+ par->crsr.crsr_x = par->crsr.crsr_y = 0;
+ par->crsr.spot_x = par->crsr.spot_y = 0;
+ par->crsr.height = par->crsr.width = 0;
+
+ return 0;
+}
+
+ /*
+ * Fill the `var' structure based on the values in `par' and maybe
+ * other values read out of the hardware.
+ */
+
+static void ami_encode_var(struct fb_var_screeninfo *var,
+ struct amifb_par *par)
+{
+ u_short clk_shift, line_shift;
+
+ memset(var, 0, sizeof(struct fb_var_screeninfo));
+
+ clk_shift = par->clk_shift;
+ line_shift = par->line_shift;
+
+ var->xres = par->xres;
+ var->yres = par->yres;
+ var->xres_virtual = par->vxres;
+ var->yres_virtual = par->vyres;
+ var->xoffset = par->xoffset;
+ var->yoffset = par->yoffset;
+
+ var->bits_per_pixel = par->bpp;
+ var->grayscale = 0;
+
+ var->red.offset = 0;
+ var->red.msb_right = 0;
+ var->red.length = par->bpp;
+ if (par->bplcon0 & BPC0_HAM)
+ var->red.length -= 2;
+ var->blue = var->green = var->red;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->transp.msb_right = 0;
+
+ if (par->bplcon0 & BPC0_HAM)
+ var->nonstd = FB_NONSTD_HAM;
+ else
+ var->nonstd = 0;
+ var->activate = 0;
+
+ var->height = -1;
+ var->width = -1;
+
+ var->pixclock = pixclock[clk_shift];
+
+ if (IS_AGA && par->fmode & FMODE_BSCAN2)
+ var->vmode = FB_VMODE_DOUBLE;
+ else if (par->bplcon0 & BPC0_LACE)
+ var->vmode = FB_VMODE_INTERLACED;
+ else
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ if (!IS_OCS && par->beamcon0 & BMC0_VARBEAMEN) {
+ var->hsync_len = (par->hsstop - par->hsstrt)>>clk_shift;
+ var->right_margin = par->hsstrt>>clk_shift;
+ var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
+ var->vsync_len = (par->vsstop - par->vsstrt)>>line_shift;
+ var->lower_margin = par->vsstrt>>line_shift;
+ var->upper_margin = (par->vtotal>>line_shift) - var->yres - var->lower_margin - var->vsync_len;
+ var->sync = 0;
+ if (par->beamcon0 & BMC0_HSYTRUE)
+ var->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (par->beamcon0 & BMC0_VSYTRUE)
+ var->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (par->beamcon0 & BMC0_CSYTRUE)
+ var->sync |= FB_SYNC_COMP_HIGH_ACT;
+ } else {
+ var->sync = FB_SYNC_BROADCAST;
+ var->hsync_len = (152>>clk_shift) + mod4(par->diwstop_h);
+ var->right_margin = ((par->htotal - down4(par->diwstop_h))>>clk_shift) + var->hsync_len;
+ var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
+ var->vsync_len = 4>>line_shift;
+ var->lower_margin = ((par->vtotal - par->diwstop_v)>>line_shift) + var->vsync_len;
+ var->upper_margin = (((par->vtotal - 2)>>line_shift) + 1) - var->yres -
+ var->lower_margin - var->vsync_len;
+ }
+
+ if (par->bplcon0 & BPC0_ERSY)
+ var->sync |= FB_SYNC_EXT;
+ if (par->vmode & FB_VMODE_YWRAP)
+ var->vmode |= FB_VMODE_YWRAP;
+}
+
+
+ /*
+ * Update hardware
+ */
+
+static void ami_update_par(struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+ short clk_shift, vshift, fstrt, fsize, fstop, fconst, shift, move, mod;
+
+ clk_shift = par->clk_shift;
+
+ if (!(par->vmode & FB_VMODE_SMOOTH_XPAN))
+ par->xoffset = upx(16 << maxfmode, par->xoffset);
+
+ fconst = 16 << maxfmode << clk_shift;
+ vshift = modx(16 << maxfmode, par->xoffset);
+ fstrt = par->diwstrt_h - (vshift << clk_shift) - 4;
+ fsize = (par->xres + vshift) << clk_shift;
+ shift = modx(fconst, fstrt);
+ move = downx(2 << maxfmode, div8(par->xoffset));
+ if (maxfmode + clk_shift > 1) {
+ fstrt = downx(fconst, fstrt) - 64;
+ fsize = upx(fconst, fsize);
+ fstop = fstrt + fsize - fconst;
+ } else {
+ mod = fstrt = downx(fconst, fstrt) - fconst;
+ fstop = fstrt + upx(fconst, fsize) - 64;
+ fsize = up64(fsize);
+ fstrt = fstop - fsize + 64;
+ if (fstrt < min_fstrt) {
+ fstop += min_fstrt - fstrt;
+ fstrt = min_fstrt;
+ }
+ move = move - div8((mod - fstrt)>>clk_shift);
+ }
+ mod = par->next_line - div8(fsize>>clk_shift);
+ par->ddfstrt = fstrt;
+ par->ddfstop = fstop;
+ par->bplcon1 = hscroll2hw(shift);
+ par->bpl2mod = mod;
+ if (par->bplcon0 & BPC0_LACE)
+ par->bpl2mod += par->next_line;
+ if (IS_AGA && (par->fmode & FMODE_BSCAN2))
+ par->bpl1mod = -div8(fsize>>clk_shift);
+ else
+ par->bpl1mod = par->bpl2mod;
+
+ if (par->yoffset) {
+ par->bplpt0 = info->fix.smem_start +
+ par->next_line * par->yoffset + move;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if (par->yoffset > par->vyres - par->yres) {
+ par->bplpt0wrap = info->fix.smem_start + move;
+ if (par->bplcon0 & BPC0_LACE &&
+ mod2(par->diwstrt_v + par->vyres -
+ par->yoffset))
+ par->bplpt0wrap += par->next_line;
+ }
+ }
+ } else
+ par->bplpt0 = info->fix.smem_start + move;
+
+ if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v))
+ par->bplpt0 += par->next_line;
+}
+
+
+ /*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ * in `var'.
+ */
+
+static void ami_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+
+ par->xoffset = var->xoffset;
+ par->yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ par->vmode |= FB_VMODE_YWRAP;
+ else
+ par->vmode &= ~FB_VMODE_YWRAP;
+
+ do_vmode_pan = 0;
+ ami_update_par(info);
+ do_vmode_pan = 1;
+}
+
+
+static void ami_update_display(const struct amifb_par *par)
+{
+ custom.bplcon1 = par->bplcon1;
+ custom.bpl1mod = par->bpl1mod;
+ custom.bpl2mod = par->bpl2mod;
+ custom.ddfstrt = ddfstrt2hw(par->ddfstrt);
+ custom.ddfstop = ddfstop2hw(par->ddfstop);
+}
+
+ /*
+ * Change the video mode (called by VBlank interrupt)
+ */
+
+static void ami_init_display(const struct amifb_par *par)
+{
+ int i;
+
+ custom.bplcon0 = par->bplcon0 & ~BPC0_LACE;
+ custom.bplcon2 = (IS_OCS ? 0 : BPC2_KILLEHB) | BPC2_PF2P2 | BPC2_PF1P2;
+ if (!IS_OCS) {
+ custom.bplcon3 = par->bplcon3;
+ if (IS_AGA)
+ custom.bplcon4 = BPC4_ESPRM4 | BPC4_OSPRM4;
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ custom.htotal = htotal2hw(par->htotal);
+ custom.hbstrt = hbstrt2hw(par->hbstrt);
+ custom.hbstop = hbstop2hw(par->hbstop);
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.hcenter = hcenter2hw(par->hcenter);
+ custom.vtotal = vtotal2hw(par->vtotal);
+ custom.vbstrt = vbstrt2hw(par->vbstrt);
+ custom.vbstop = vbstop2hw(par->vbstop);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstop2hw(par->vsstop);
+ }
+ }
+ if (!IS_OCS || par->hsstop)
+ custom.beamcon0 = par->beamcon0;
+ if (IS_AGA)
+ custom.fmode = par->fmode;
+
+ /*
+ * The minimum period for audio depends on htotal
+ */
+
+ amiga_audio_min_period = div16(par->htotal);
+
+ is_lace = par->bplcon0 & BPC0_LACE ? 1 : 0;
+#if 1
+ if (is_lace) {
+ i = custom.vposr >> 15;
+ } else {
+ custom.vposw = custom.vposr | 0x8000;
+ i = 1;
+ }
+#else
+ i = 1;
+ custom.vposw = custom.vposr | 0x8000;
+#endif
+ custom.cop2lc = (u_short *)ZTWO_PADDR(copdisplay.list[currentcop][i]);
+}
+
+ /*
+ * (Un)Blank the screen (called by VBlank interrupt)
+ */
+
+static void ami_do_blank(const struct amifb_par *par)
+{
+#if defined(CONFIG_FB_AMIGA_AGA)
+ u_short bplcon3 = par->bplcon3;
+#endif
+ u_char red, green, blue;
+
+ if (do_blank > 0) {
+ custom.dmacon = DMAF_RASTER | DMAF_SPRITE;
+ red = green = blue = 0;
+ if (!IS_OCS && do_blank > 1) {
+ switch (do_blank) {
+ case FB_BLANK_VSYNC_SUSPEND:
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.vsstrt = vsstrt2hw(par->vtotal + 4);
+ custom.vsstop = vsstop2hw(par->vtotal + 4);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ custom.hsstrt = hsstrt2hw(par->htotal + 16);
+ custom.hsstop = hsstop2hw(par->htotal + 16);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstrt2hw(par->vsstop);
+ break;
+ case FB_BLANK_POWERDOWN:
+ custom.hsstrt = hsstrt2hw(par->htotal + 16);
+ custom.hsstop = hsstop2hw(par->htotal + 16);
+ custom.vsstrt = vsstrt2hw(par->vtotal + 4);
+ custom.vsstop = vsstop2hw(par->vtotal + 4);
+ break;
+ }
+ if (!(par->beamcon0 & BMC0_VARBEAMEN)) {
+ custom.htotal = htotal2hw(par->htotal);
+ custom.vtotal = vtotal2hw(par->vtotal);
+ custom.beamcon0 = BMC0_HARDDIS | BMC0_VARBEAMEN |
+ BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARCSYEN;
+ }
+ }
+ } else {
+ custom.dmacon = DMAF_SETCLR | DMAF_RASTER | DMAF_SPRITE;
+ red = red0;
+ green = green0;
+ blue = blue0;
+ if (!IS_OCS) {
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstop2hw(par->vsstop);
+ custom.beamcon0 = par->beamcon0;
+ }
+ }
+#if defined(CONFIG_FB_AMIGA_AGA)
+ if (IS_AGA) {
+ custom.bplcon3 = bplcon3;
+ custom.color[0] = rgb2hw8_high(red, green, blue);
+ custom.bplcon3 = bplcon3 | BPC3_LOCT;
+ custom.color[0] = rgb2hw8_low(red, green, blue);
+ custom.bplcon3 = bplcon3;
+ } else
+#endif
+#if defined(CONFIG_FB_AMIGA_ECS)
+ if (par->bplcon0 & BPC0_SHRES) {
+ u_short color, mask;
+ int i;
+
+ mask = 0x3333;
+ color = rgb2hw2(red, green, blue);
+ for (i = 12; i >= 0; i -= 4)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ mask <<= 2; color >>= 2;
+ for (i = 3; i >= 0; i--)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ } else
+#endif
+ custom.color[0] = rgb2hw4(red, green, blue);
+ is_blanked = do_blank > 0 ? do_blank : 0;
+}
+
+static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix,
+ const struct amifb_par *par)
+{
+ fix->crsr_width = fix->crsr_xsize = par->crsr.width;
+ fix->crsr_height = fix->crsr_ysize = par->crsr.height;
+ fix->crsr_color1 = 17;
+ fix->crsr_color2 = 18;
+ return 0;
+}
+
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var,
+ u_char __user *data,
+ const struct amifb_par *par)
+{
+ register u_short *lspr, *sspr;
+#ifdef __mc68000__
+ register u_long datawords asm ("d2");
+#else
+ register u_long datawords;
+#endif
+ register short delta;
+ register u_char color;
+ short height, width, bits, words;
+ int size, alloc;
+
+ size = par->crsr.height * par->crsr.width;
+ alloc = var->height * var->width;
+ var->height = par->crsr.height;
+ var->width = par->crsr.width;
+ var->xspot = par->crsr.spot_x;
+ var->yspot = par->crsr.spot_y;
+ if (size > var->height * var->width)
+ return -ENAMETOOLONG;
+ if (!access_ok(VERIFY_WRITE, data, size))
+ return -EFAULT;
+ delta = 1 << par->crsr.fmode;
+ lspr = lofsprite + (delta << 1);
+ if (par->bplcon0 & BPC0_LACE)
+ sspr = shfsprite + (delta << 1);
+ else
+ sspr = NULL;
+ for (height = (short)var->height - 1; height >= 0; height--) {
+ bits = 0; words = delta; datawords = 0;
+ for (width = (short)var->width - 1; width >= 0; width--) {
+ if (bits == 0) {
+ bits = 16; --words;
+#ifdef __mc68000__
+ asm volatile ("movew %1@(%3:w:2),%0 ; swap %0 ; movew %1@+,%0"
+ : "=d" (datawords), "=a" (lspr) : "1" (lspr), "d" (delta));
+#else
+ datawords = (*(lspr + delta) << 16) | (*lspr++);
+#endif
+ }
+ --bits;
+#ifdef __mc68000__
+ asm volatile (
+ "clrb %0 ; swap %1 ; lslw #1,%1 ; roxlb #1,%0 ; "
+ "swap %1 ; lslw #1,%1 ; roxlb #1,%0"
+ : "=d" (color), "=d" (datawords) : "1" (datawords));
+#else
+ color = (((datawords >> 30) & 2)
+ | ((datawords >> 15) & 1));
+ datawords <<= 1;
+#endif
+ put_user(color, data++);
+ }
+ if (bits > 0) {
+ --words; ++lspr;
+ }
+ while (--words >= 0)
+ ++lspr;
+#ifdef __mc68000__
+ asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
+ : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
+#else
+ lspr += delta;
+ if (sspr) {
+ u_short *tmp = lspr;
+ lspr = sspr;
+ sspr = tmp;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var,
+ u_char __user *data, struct amifb_par *par)
+{
+ register u_short *lspr, *sspr;
+#ifdef __mc68000__
+ register u_long datawords asm ("d2");
+#else
+ register u_long datawords;
+#endif
+ register short delta;
+ u_short fmode;
+ short height, width, bits, words;
+
+ if (!var->width)
+ return -EINVAL;
+ else if (var->width <= 16)
+ fmode = TAG_FMODE_1;
+ else if (var->width <= 32)
+ fmode = TAG_FMODE_2;
+ else if (var->width <= 64)
+ fmode = TAG_FMODE_4;
+ else
+ return -EINVAL;
+ if (fmode > maxfmode)
+ return -EINVAL;
+ if (!var->height)
+ return -EINVAL;
+ if (!access_ok(VERIFY_READ, data, var->width * var->height))
+ return -EFAULT;
+ delta = 1 << fmode;
+ lofsprite = shfsprite = (u_short *)spritememory;
+ lspr = lofsprite + (delta << 1);
+ if (par->bplcon0 & BPC0_LACE) {
+ if (((var->height + 4) << fmode << 2) > SPRITEMEMSIZE)
+ return -EINVAL;
+ memset(lspr, 0, (var->height + 4) << fmode << 2);
+ shfsprite += ((var->height + 5)&-2) << fmode;
+ sspr = shfsprite + (delta << 1);
+ } else {
+ if (((var->height + 2) << fmode << 2) > SPRITEMEMSIZE)
+ return -EINVAL;
+ memset(lspr, 0, (var->height + 2) << fmode << 2);
+ sspr = NULL;
+ }
+ for (height = (short)var->height - 1; height >= 0; height--) {
+ bits = 16; words = delta; datawords = 0;
+ for (width = (short)var->width - 1; width >= 0; width--) {
+ unsigned long tdata = 0;
+ get_user(tdata, data);
+ data++;
+#ifdef __mc68000__
+ asm volatile (
+ "lsrb #1,%2 ; roxlw #1,%0 ; swap %0 ; "
+ "lsrb #1,%2 ; roxlw #1,%0 ; swap %0"
+ : "=d" (datawords)
+ : "0" (datawords), "d" (tdata));
+#else
+ datawords = ((datawords << 1) & 0xfffefffe);
+ datawords |= tdata & 1;
+ datawords |= (tdata & 2) << (16 - 1);
+#endif
+ if (--bits == 0) {
+ bits = 16; --words;
+#ifdef __mc68000__
+ asm volatile ("swap %2 ; movew %2,%0@(%3:w:2) ; swap %2 ; movew %2,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta));
+#else
+ *(lspr + delta) = (u_short) (datawords >> 16);
+ *lspr++ = (u_short) (datawords & 0xffff);
+#endif
+ }
+ }
+ if (bits < 16) {
+ --words;
+#ifdef __mc68000__
+ asm volatile (
+ "swap %2 ; lslw %4,%2 ; movew %2,%0@(%3:w:2) ; "
+ "swap %2 ; lslw %4,%2 ; movew %2,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta), "d" (bits));
+#else
+ *(lspr + delta) = (u_short) (datawords >> (16 + bits));
+ *lspr++ = (u_short) ((datawords & 0x0000ffff) >> bits);
+#endif
+ }
+ while (--words >= 0) {
+#ifdef __mc68000__
+ asm volatile ("moveql #0,%%d0 ; movew %%d0,%0@(%2:w:2) ; movew %%d0,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (delta) : "d0");
+#else
+ *(lspr + delta) = 0;
+ *lspr++ = 0;
+#endif
+ }
+#ifdef __mc68000__
+ asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
+ : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
+#else
+ lspr += delta;
+ if (sspr) {
+ u_short *tmp = lspr;
+ lspr = sspr;
+ sspr = tmp;
+ }
+#endif
+ }
+ par->crsr.height = var->height;
+ par->crsr.width = var->width;
+ par->crsr.spot_x = var->xspot;
+ par->crsr.spot_y = var->yspot;
+ par->crsr.fmode = fmode;
+ if (IS_AGA) {
+ par->fmode &= ~(FMODE_SPAGEM | FMODE_SPR32);
+ par->fmode |= sprfetchmode[fmode];
+ custom.fmode = par->fmode;
+ }
+ return 0;
+}
+
+static int ami_get_cursorstate(struct fb_cursorstate *state,
+ const struct amifb_par *par)
+{
+ state->xoffset = par->crsr.crsr_x;
+ state->yoffset = par->crsr.crsr_y;
+ state->mode = cursormode;
+ return 0;
+}
+
+static int ami_set_cursorstate(struct fb_cursorstate *state,
+ struct amifb_par *par)
+{
+ par->crsr.crsr_x = state->xoffset;
+ par->crsr.crsr_y = state->yoffset;
+ if ((cursormode = state->mode) == FB_CURSOR_OFF)
+ cursorstate = -1;
+ do_cursor = 1;
+ return 0;
+}
+
+static void ami_set_sprite(const struct amifb_par *par)
+{
+ copins *copl, *cops;
+ u_short hs, vs, ve;
+ u_long pl, ps, pt;
+ short mx, my;
+
+ cops = copdisplay.list[currentcop][0];
+ copl = copdisplay.list[currentcop][1];
+ ps = pl = ZTWO_PADDR(dummysprite);
+ mx = par->crsr.crsr_x - par->crsr.spot_x;
+ my = par->crsr.crsr_y - par->crsr.spot_y;
+ if (!(par->vmode & FB_VMODE_YWRAP)) {
+ mx -= par->xoffset;
+ my -= par->yoffset;
+ }
+ if (!is_blanked && cursorstate > 0 && par->crsr.height > 0 &&
+ mx > -(short)par->crsr.width && mx < par->xres &&
+ my > -(short)par->crsr.height && my < par->yres) {
+ pl = ZTWO_PADDR(lofsprite);
+ hs = par->diwstrt_h + (mx << par->clk_shift) - 4;
+ vs = par->diwstrt_v + (my << par->line_shift);
+ ve = vs + (par->crsr.height << par->line_shift);
+ if (par->bplcon0 & BPC0_LACE) {
+ ps = ZTWO_PADDR(shfsprite);
+ lofsprite[0] = spr2hw_pos(vs, hs);
+ shfsprite[0] = spr2hw_pos(vs + 1, hs);
+ if (mod2(vs)) {
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
+ shfsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs + 1, hs, ve + 1);
+ pt = pl; pl = ps; ps = pt;
+ } else {
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve + 1);
+ shfsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs + 1, hs, ve);
+ }
+ } else {
+ lofsprite[0] = spr2hw_pos(vs, hs) | (IS_AGA && (par->fmode & FMODE_BSCAN2) ? 0x80 : 0);
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
+ }
+ }
+ copl[cop_spr0ptrh].w[1] = highw(pl);
+ copl[cop_spr0ptrl].w[1] = loww(pl);
+ if (par->bplcon0 & BPC0_LACE) {
+ cops[cop_spr0ptrh].w[1] = highw(ps);
+ cops[cop_spr0ptrl].w[1] = loww(ps);
+ }
+}
+
+
+ /*
+ * Initialise the Copper Initialisation List
+ */
+
+static void __init ami_init_copper(void)
+{
+ copins *cop = copdisplay.init;
+ u_long p;
+ int i;
+
+ if (!IS_OCS) {
+ (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0);
+ (cop++)->l = CMOVE(0x0181, diwstrt);
+ (cop++)->l = CMOVE(0x0281, diwstop);
+ (cop++)->l = CMOVE(0x0000, diwhigh);
+ } else
+ (cop++)->l = CMOVE(BPC0_COLOR, bplcon0);
+ p = ZTWO_PADDR(dummysprite);
+ for (i = 0; i < 8; i++) {
+ (cop++)->l = CMOVE(0, spr[i].pos);
+ (cop++)->l = CMOVE(highw(p), sprpt[i]);
+ (cop++)->l = CMOVE2(loww(p), sprpt[i]);
+ }
+
+ (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq);
+ copdisplay.wait = cop;
+ (cop++)->l = CEND;
+ (cop++)->l = CMOVE(0, copjmp2);
+ cop->l = CEND;
+
+ custom.cop1lc = (u_short *)ZTWO_PADDR(copdisplay.init);
+ custom.copjmp1 = 0;
+}
+
+static void ami_reinit_copper(const struct amifb_par *par)
+{
+ copdisplay.init[cip_bplcon0].w[1] = ~(BPC0_BPU3 | BPC0_BPU2 | BPC0_BPU1 | BPC0_BPU0) & par->bplcon0;
+ copdisplay.wait->l = CWAIT(32, par->diwstrt_v - 4);
+}
+
+
+ /*
+ * Rebuild the Copper List
+ *
+ * We only change the things that are not static
+ */
+
+static void ami_rebuild_copper(const struct amifb_par *par)
+{
+ copins *copl, *cops;
+ u_short line, h_end1, h_end2;
+ short i;
+ u_long p;
+
+ if (IS_AGA && maxfmode + par->clk_shift == 0)
+ h_end1 = par->diwstrt_h - 64;
+ else
+ h_end1 = par->htotal - 32;
+ h_end2 = par->ddfstop + 64;
+
+ ami_set_sprite(par);
+
+ copl = copdisplay.rebuild[1];
+ p = par->bplpt0;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if ((par->vyres - par->yoffset) != 1 || !mod2(par->diwstrt_v)) {
+ if (par->yoffset > par->vyres - par->yres) {
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (copl++)->l = CMOVE(highw(p), bplpt[i]);
+ (copl++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ line = par->diwstrt_v + ((par->vyres - par->yoffset) << par->line_shift) - 1;
+ while (line >= 512) {
+ (copl++)->l = CWAIT(h_end1, 510);
+ line -= 512;
+ }
+ if (line >= 510 && IS_AGA && maxfmode + par->clk_shift == 0)
+ (copl++)->l = CWAIT(h_end1, line);
+ else
+ (copl++)->l = CWAIT(h_end2, line);
+ p = par->bplpt0wrap;
+ }
+ } else
+ p = par->bplpt0wrap;
+ }
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (copl++)->l = CMOVE(highw(p), bplpt[i]);
+ (copl++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ copl->l = CEND;
+
+ if (par->bplcon0 & BPC0_LACE) {
+ cops = copdisplay.rebuild[0];
+ p = par->bplpt0;
+ if (mod2(par->diwstrt_v))
+ p -= par->next_line;
+ else
+ p += par->next_line;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if ((par->vyres - par->yoffset) != 1 || mod2(par->diwstrt_v)) {
+ if (par->yoffset > par->vyres - par->yres + 1) {
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (cops++)->l = CMOVE(highw(p), bplpt[i]);
+ (cops++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ line = par->diwstrt_v + ((par->vyres - par->yoffset) << par->line_shift) - 2;
+ while (line >= 512) {
+ (cops++)->l = CWAIT(h_end1, 510);
+ line -= 512;
+ }
+ if (line > 510 && IS_AGA && maxfmode + par->clk_shift == 0)
+ (cops++)->l = CWAIT(h_end1, line);
+ else
+ (cops++)->l = CWAIT(h_end2, line);
+ p = par->bplpt0wrap;
+ if (mod2(par->diwstrt_v + par->vyres -
+ par->yoffset))
+ p -= par->next_line;
+ else
+ p += par->next_line;
+ }
+ } else
+ p = par->bplpt0wrap - par->next_line;
+ }
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (cops++)->l = CMOVE(highw(p), bplpt[i]);
+ (cops++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ cops->l = CEND;
+ }
+}
+
+
+ /*
+ * Build the Copper List
+ */
+
+static void ami_build_copper(struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+ copins *copl, *cops;
+ u_long p;
+
+ currentcop = 1 - currentcop;
+
+ copl = copdisplay.list[currentcop][1];
+
+ (copl++)->l = CWAIT(0, 10);
+ (copl++)->l = CMOVE(par->bplcon0, bplcon0);
+ (copl++)->l = CMOVE(0, sprpt[0]);
+ (copl++)->l = CMOVE2(0, sprpt[0]);
+
+ if (par->bplcon0 & BPC0_LACE) {
+ cops = copdisplay.list[currentcop][0];
+
+ (cops++)->l = CWAIT(0, 10);
+ (cops++)->l = CMOVE(par->bplcon0, bplcon0);
+ (cops++)->l = CMOVE(0, sprpt[0]);
+ (cops++)->l = CMOVE2(0, sprpt[0]);
+
+ (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v + 1), diwstrt);
+ (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v + 1), diwstop);
+ (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
+ (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
+ if (!IS_OCS) {
+ (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v + 1,
+ par->diwstop_h, par->diwstop_v + 1), diwhigh);
+ (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
+ par->diwstop_h, par->diwstop_v), diwhigh);
+#if 0
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt + 1), vbstrt);
+ (copl++)->l = CMOVE(vbstop2hw(par->vbstop + 1), vbstop);
+ (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
+ (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
+ }
+#endif
+ }
+ p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
+ (copl++)->l = CMOVE(highw(p), cop2lc);
+ (copl++)->l = CMOVE2(loww(p), cop2lc);
+ p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
+ (cops++)->l = CMOVE(highw(p), cop2lc);
+ (cops++)->l = CMOVE2(loww(p), cop2lc);
+ copdisplay.rebuild[0] = cops;
+ } else {
+ (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
+ (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
+ if (!IS_OCS) {
+ (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
+ par->diwstop_h, par->diwstop_v), diwhigh);
+#if 0
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
+ (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
+ }
+#endif
+ }
+ }
+ copdisplay.rebuild[1] = copl;
+
+ ami_update_par(info);
+ ami_rebuild_copper(info->par);
+}
-static struct fb_ops amifb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = amifb_check_var,
- .fb_set_par = amifb_set_par,
- .fb_setcolreg = amifb_setcolreg,
- .fb_blank = amifb_blank,
- .fb_pan_display = amifb_pan_display,
- .fb_fillrect = amifb_fillrect,
- .fb_copyarea = amifb_copyarea,
- .fb_imageblit = amifb_imageblit,
- .fb_ioctl = amifb_ioctl,
-};
static void __init amifb_setup_mcap(char *spec)
{
@@ -1216,13 +2339,13 @@ static void __init amifb_setup_mcap(char *spec)
if (hmax <= 0 || hmax <= hmin)
return;
- fb_info.monspecs.vfmin = vmin;
- fb_info.monspecs.vfmax = vmax;
- fb_info.monspecs.hfmin = hmin;
- fb_info.monspecs.hfmax = hmax;
+ amifb_hfmin = hmin;
+ amifb_hfmax = hmax;
+ amifb_vfmin = vmin;
+ amifb_vfmax = vmax;
}
-int __init amifb_setup(char *options)
+static int __init amifb_setup(char *options)
{
char *this_opt;
@@ -1238,9 +2361,9 @@ int __init amifb_setup(char *options)
} else if (!strcmp(this_opt, "ilbm"))
amifb_ilbm = 1;
else if (!strncmp(this_opt, "monitorcap:", 11))
- amifb_setup_mcap(this_opt+11);
+ amifb_setup_mcap(this_opt + 11);
else if (!strncmp(this_opt, "fstart:", 7))
- min_fstrt = simple_strtoul(this_opt+7, NULL, 0);
+ min_fstrt = simple_strtoul(this_opt + 7, NULL, 0);
else
mode_option = this_opt;
}
@@ -1259,7 +2382,8 @@ static int amifb_check_var(struct fb_var_screeninfo *var,
struct amifb_par par;
/* Validate wanted screen parameters */
- if ((err = ami_decode_var(var, &par)))
+ err = ami_decode_var(var, &par, info);
+ if (err)
return err;
/* Encode (possibly rounded) screen parameters */
@@ -1270,16 +2394,19 @@ static int amifb_check_var(struct fb_var_screeninfo *var,
static int amifb_set_par(struct fb_info *info)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
+ int error;
do_vmode_pan = 0;
do_vmode_full = 0;
/* Decode wanted screen parameters */
- ami_decode_var(&info->var, par);
+ error = ami_decode_var(&info->var, par, info);
+ if (error)
+ return error;
/* Set new videomode */
- ami_build_copper();
+ ami_build_copper(info);
/* Set VBlank trigger */
do_vmode_full = 1;
@@ -1295,20 +2422,20 @@ static int amifb_set_par(struct fb_info *info)
info->fix.type = FB_TYPE_PLANES;
info->fix.type_aux = 0;
}
- info->fix.line_length = div8(upx(16<<maxfmode, par->vxres));
+ info->fix.line_length = div8(upx(16 << maxfmode, par->vxres));
if (par->vmode & FB_VMODE_YWRAP) {
info->fix.ywrapstep = 1;
info->fix.xpanstep = 0;
info->fix.ypanstep = 0;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YWRAP |
- FBINFO_READS_FAST; /* override SCROLL_REDRAW */
+ FBINFO_READS_FAST; /* override SCROLL_REDRAW */
} else {
info->fix.ywrapstep = 0;
if (par->vmode & FB_VMODE_SMOOTH_XPAN)
info->fix.xpanstep = 1;
else
- info->fix.xpanstep = 16<<maxfmode;
+ info->fix.xpanstep = 16 << maxfmode;
info->fix.ypanstep = 1;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
}
@@ -1317,6 +2444,95 @@ static int amifb_set_par(struct fb_info *info)
/*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
+ */
+
+static int amifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ const struct amifb_par *par = info->par;
+
+ if (IS_AGA) {
+ if (regno > 255)
+ return 1;
+ } else if (par->bplcon0 & BPC0_SHRES) {
+ if (regno > 3)
+ return 1;
+ } else {
+ if (regno > 31)
+ return 1;
+ }
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ if (!regno) {
+ red0 = red;
+ green0 = green;
+ blue0 = blue;
+ }
+
+ /*
+ * Update the corresponding Hardware Color Register, unless it's Color
+ * Register 0 and the screen is blanked.
+ *
+ * VBlank is switched off to protect bplcon3 or ecs_palette[] from
+ * being changed by ami_do_blank() during the VBlank.
+ */
+
+ if (regno || !is_blanked) {
+#if defined(CONFIG_FB_AMIGA_AGA)
+ if (IS_AGA) {
+ u_short bplcon3 = par->bplcon3;
+ VBlankOff();
+ custom.bplcon3 = bplcon3 | (regno << 8 & 0xe000);
+ custom.color[regno & 31] = rgb2hw8_high(red, green,
+ blue);
+ custom.bplcon3 = bplcon3 | (regno << 8 & 0xe000) |
+ BPC3_LOCT;
+ custom.color[regno & 31] = rgb2hw8_low(red, green,
+ blue);
+ custom.bplcon3 = bplcon3;
+ VBlankOn();
+ } else
+#endif
+#if defined(CONFIG_FB_AMIGA_ECS)
+ if (par->bplcon0 & BPC0_SHRES) {
+ u_short color, mask;
+ int i;
+
+ mask = 0x3333;
+ color = rgb2hw2(red, green, blue);
+ VBlankOff();
+ for (i = regno + 12; i >= (int)regno; i -= 4)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ mask <<= 2; color >>= 2;
+ regno = down16(regno) + mul4(mod4(regno));
+ for (i = regno + 3; i >= (int)regno; i--)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ VBlankOn();
+ } else
+#endif
+ custom.color[regno] = rgb2hw4(red, green, blue);
+ }
+ return 0;
+}
+
+
+ /*
+ * Blank the display.
+ */
+
+static int amifb_blank(int blank, struct fb_info *info)
+{
+ do_blank = blank ? blank : -1;
+
+ return 0;
+}
+
+
+ /*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
@@ -1327,18 +2543,19 @@ static int amifb_pan_display(struct fb_var_screeninfo *var,
{
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
+ var->yoffset >= info->var.yres_virtual || var->xoffset)
+ return -EINVAL;
} else {
/*
* TODO: There will be problems when xpan!=1, so some columns
* on the right side will never be seen
*/
- if (var->xoffset+info->var.xres > upx(16<<maxfmode, info->var.xres_virtual) ||
- var->yoffset+info->var.yres > info->var.yres_virtual)
+ if (var->xoffset + info->var.xres >
+ upx(16 << maxfmode, info->var.xres_virtual) ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
- ami_pan_var(var);
+ ami_pan_var(var, info);
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
@@ -1360,10 +2577,10 @@ static int amifb_pan_display(struct fb_var_screeninfo *var,
#endif
- /*
- * Compose two values, using a bitmask as decision value
- * This is equivalent to (a & mask) | (b & ~mask)
- */
+ /*
+ * Compose two values, using a bitmask as decision value
+ * This is equivalent to (a & mask) | (b & ~mask)
+ */
static inline unsigned long comp(unsigned long a, unsigned long b,
unsigned long mask)
@@ -1379,29 +2596,29 @@ static inline unsigned long xor(unsigned long a, unsigned long b,
}
- /*
- * Unaligned forward bit copy using 32-bit or 64-bit memory accesses
- */
+ /*
+ * Unaligned forward bit copy using 32-bit or 64-bit memory accesses
+ */
static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
int src_idx, u32 n)
{
unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
+ int shift = dst_idx - src_idx, left, right;
unsigned long d0, d1;
int m;
if (!n)
return;
- shift = dst_idx-src_idx;
+ shift = dst_idx - src_idx;
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
if (!shift) {
// Same alignment for source and dest
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1413,7 +2630,7 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
*dst = comp(*src, *dst, first);
dst++;
src++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1439,17 +2656,17 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
} else {
// Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single destination word
if (last)
first &= last;
if (shift > 0) {
// Single source word
*dst = comp(*src >> right, *dst, first);
- } else if (src_idx+n <= BITS_PER_LONG) {
+ } else if (src_idx + n <= BITS_PER_LONG) {
// Single source word
*dst = comp(*src << left, *dst, first);
} else {
@@ -1467,7 +2684,7 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
// Single source word
*dst = comp(d0 >> right, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
} else {
// 2 source words
d1 = *src++;
@@ -1475,7 +2692,7 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
first);
d0 = d1;
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1519,40 +2736,40 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
}
- /*
- * Unaligned reverse bit copy using 32-bit or 64-bit memory accesses
- */
+ /*
+ * Unaligned reverse bit copy using 32-bit or 64-bit memory accesses
+ */
static void bitcpy_rev(unsigned long *dst, int dst_idx,
const unsigned long *src, int src_idx, u32 n)
{
unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
+ int shift = dst_idx - src_idx, left, right;
unsigned long d0, d1;
int m;
if (!n)
return;
- dst += (n-1)/BITS_PER_LONG;
- src += (n-1)/BITS_PER_LONG;
- if ((n-1) % BITS_PER_LONG) {
- dst_idx += (n-1) % BITS_PER_LONG;
+ dst += (n - 1) / BITS_PER_LONG;
+ src += (n - 1) / BITS_PER_LONG;
+ if ((n - 1) % BITS_PER_LONG) {
+ dst_idx += (n - 1) % BITS_PER_LONG;
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= BITS_PER_LONG-1;
- src_idx += (n-1) % BITS_PER_LONG;
+ dst_idx &= BITS_PER_LONG - 1;
+ src_idx += (n - 1) % BITS_PER_LONG;
src += src_idx >> SHIFT_PER_LONG;
- src_idx &= BITS_PER_LONG-1;
+ src_idx &= BITS_PER_LONG - 1;
}
- shift = dst_idx-src_idx;
- first = ~0UL << (BITS_PER_LONG-1-dst_idx);
- last = ~(~0UL << (BITS_PER_LONG-1-((dst_idx-n) % BITS_PER_LONG)));
+ shift = dst_idx - src_idx;
+ first = ~0UL << (BITS_PER_LONG - 1 - dst_idx);
+ last = ~(~0UL << (BITS_PER_LONG - 1 - ((dst_idx - n) % BITS_PER_LONG)));
if (!shift) {
// Same alignment for source and dest
- if ((unsigned long)dst_idx+1 >= n) {
+ if ((unsigned long)dst_idx + 1 >= n) {
// Single word
if (last)
first &= last;
@@ -1564,7 +2781,7 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
*dst = comp(*src, *dst, first);
dst--;
src--;
- n -= dst_idx+1;
+ n -= dst_idx + 1;
}
// Main chunk
@@ -1590,17 +2807,17 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
} else {
// Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if ((unsigned long)dst_idx+1 >= n) {
+ if ((unsigned long)dst_idx + 1 >= n) {
// Single destination word
if (last)
first &= last;
if (shift < 0) {
// Single source word
*dst = comp(*src << left, *dst, first);
- } else if (1+(unsigned long)src_idx >= n) {
+ } else if (1 + (unsigned long)src_idx >= n) {
// Single source word
*dst = comp(*src >> right, *dst, first);
} else {
@@ -1618,7 +2835,7 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
// Single source word
*dst = comp(d0 << left, *dst, first);
dst--;
- n -= dst_idx+1;
+ n -= dst_idx + 1;
} else {
// 2 source words
d1 = *src--;
@@ -1626,7 +2843,7 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
first);
d0 = d1;
dst--;
- n -= dst_idx+1;
+ n -= dst_idx + 1;
}
// Main chunk
@@ -1670,30 +2887,30 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
}
- /*
- * Unaligned forward inverting bit copy using 32-bit or 64-bit memory
- * accesses
- */
+ /*
+ * Unaligned forward inverting bit copy using 32-bit or 64-bit memory
+ * accesses
+ */
static void bitcpy_not(unsigned long *dst, int dst_idx,
const unsigned long *src, int src_idx, u32 n)
{
unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
+ int shift = dst_idx - src_idx, left, right;
unsigned long d0, d1;
int m;
if (!n)
return;
- shift = dst_idx-src_idx;
+ shift = dst_idx - src_idx;
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
if (!shift) {
// Same alignment for source and dest
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1705,7 +2922,7 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
*dst = comp(~*src, *dst, first);
dst++;
src++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1731,17 +2948,17 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
} else {
// Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single destination word
if (last)
first &= last;
if (shift > 0) {
// Single source word
*dst = comp(~*src >> right, *dst, first);
- } else if (src_idx+n <= BITS_PER_LONG) {
+ } else if (src_idx + n <= BITS_PER_LONG) {
// Single source word
*dst = comp(~*src << left, *dst, first);
} else {
@@ -1759,7 +2976,7 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
// Single source word
*dst = comp(d0 >> right, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
} else {
// 2 source words
d1 = ~*src++;
@@ -1767,7 +2984,7 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
first);
d0 = d1;
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1811,9 +3028,9 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
}
- /*
- * Unaligned 32-bit pattern fill using 32/64-bit memory accesses
- */
+ /*
+ * Unaligned 32-bit pattern fill using 32/64-bit memory accesses
+ */
static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
{
@@ -1828,9 +3045,9 @@ static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
#endif
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1841,7 +3058,7 @@ static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
if (first) {
*dst = comp(val, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1867,9 +3084,9 @@ static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
}
- /*
- * Unaligned 32-bit pattern xor using 32/64-bit memory accesses
- */
+ /*
+ * Unaligned 32-bit pattern xor using 32/64-bit memory accesses
+ */
static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
{
@@ -1884,9 +3101,9 @@ static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
#endif
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1897,7 +3114,7 @@ static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
if (first) {
*dst = xor(val, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1924,12 +3141,12 @@ static inline void fill_one_line(int bpp, unsigned long next_plane,
{
while (1) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
bitfill32(dst, dst_idx, color & 1 ? ~0 : 0, n);
if (!--bpp)
break;
color >>= 1;
- dst_idx += next_plane*8;
+ dst_idx += next_plane * 8;
}
}
@@ -1939,12 +3156,12 @@ static inline void xor_one_line(int bpp, unsigned long next_plane,
{
while (color) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
bitxor32(dst, dst_idx, color & 1 ? ~0 : 0, n);
if (!--bpp)
break;
color >>= 1;
- dst_idx += next_plane*8;
+ dst_idx += next_plane * 8;
}
}
@@ -1952,7 +3169,7 @@ static inline void xor_one_line(int bpp, unsigned long next_plane,
static void amifb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
int dst_idx, x2, y2;
unsigned long *dst;
u32 width, height;
@@ -1972,23 +3189,23 @@ static void amifb_fillrect(struct fb_info *info,
height = y2 - rect->dy;
dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- dst_idx += rect->dy*par->next_line*8+rect->dx;
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ dst_idx += rect->dy * par->next_line * 8 + rect->dx;
while (height--) {
switch (rect->rop) {
- case ROP_COPY:
+ case ROP_COPY:
fill_one_line(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, width,
rect->color);
break;
- case ROP_XOR:
+ case ROP_XOR:
xor_one_line(info->var.bits_per_pixel, par->next_plane,
dst, dst_idx, width, rect->color);
break;
}
- dst_idx += par->next_line*8;
+ dst_idx += par->next_line * 8;
}
}
@@ -1998,14 +3215,14 @@ static inline void copy_one_line(int bpp, unsigned long next_plane,
{
while (1) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
src += src_idx >> SHIFT_PER_LONG;
- src_idx &= (BITS_PER_LONG-1);
+ src_idx &= (BITS_PER_LONG - 1);
bitcpy(dst, dst_idx, src, src_idx, n);
if (!--bpp)
break;
- dst_idx += next_plane*8;
- src_idx += next_plane*8;
+ dst_idx += next_plane * 8;
+ src_idx += next_plane * 8;
}
}
@@ -2015,14 +3232,14 @@ static inline void copy_one_line_rev(int bpp, unsigned long next_plane,
{
while (1) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
src += src_idx >> SHIFT_PER_LONG;
- src_idx &= (BITS_PER_LONG-1);
+ src_idx &= (BITS_PER_LONG - 1);
bitcpy_rev(dst, dst_idx, src, src_idx, n);
if (!--bpp)
break;
- dst_idx += next_plane*8;
- src_idx += next_plane*8;
+ dst_idx += next_plane * 8;
+ src_idx += next_plane * 8;
}
}
@@ -2030,7 +3247,7 @@ static inline void copy_one_line_rev(int bpp, unsigned long next_plane,
static void amifb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
int x2, y2;
u32 dx, dy, sx, sy, width, height;
unsigned long *dst, *src;
@@ -2065,16 +3282,16 @@ static void amifb_copyarea(struct fb_info *info,
rev_copy = 1;
}
dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
src = dst;
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
src_idx = dst_idx;
- dst_idx += dy*par->next_line*8+dx;
- src_idx += sy*par->next_line*8+sx;
+ dst_idx += dy * par->next_line * 8 + dx;
+ src_idx += sy * par->next_line * 8 + sx;
if (rev_copy) {
while (height--) {
- dst_idx -= par->next_line*8;
- src_idx -= par->next_line*8;
+ dst_idx -= par->next_line * 8;
+ src_idx -= par->next_line * 8;
copy_one_line_rev(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, src,
src_idx, width);
@@ -2084,8 +3301,8 @@ static void amifb_copyarea(struct fb_info *info,
copy_one_line(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, src,
src_idx, width);
- dst_idx += par->next_line*8;
- src_idx += par->next_line*8;
+ dst_idx += par->next_line * 8;
+ src_idx += par->next_line * 8;
}
}
}
@@ -2095,34 +3312,35 @@ static inline void expand_one_line(int bpp, unsigned long next_plane,
unsigned long *dst, int dst_idx, u32 n,
const u8 *data, u32 bgcolor, u32 fgcolor)
{
- const unsigned long *src;
- int src_idx;
-
- while (1) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- if ((bgcolor ^ fgcolor) & 1) {
- src = (unsigned long *)((unsigned long)data & ~(BYTES_PER_LONG-1));
- src_idx = ((unsigned long)data & (BYTES_PER_LONG-1))*8;
- if (fgcolor & 1)
- bitcpy(dst, dst_idx, src, src_idx, n);
- else
- bitcpy_not(dst, dst_idx, src, src_idx, n);
- /* set or clear */
- } else
- bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n);
- if (!--bpp)
- break;
- bgcolor >>= 1;
- fgcolor >>= 1;
- dst_idx += next_plane*8;
- }
+ const unsigned long *src;
+ int src_idx;
+
+ while (1) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ if ((bgcolor ^ fgcolor) & 1) {
+ src = (unsigned long *)
+ ((unsigned long)data & ~(BYTES_PER_LONG - 1));
+ src_idx = ((unsigned long)data & (BYTES_PER_LONG - 1)) * 8;
+ if (fgcolor & 1)
+ bitcpy(dst, dst_idx, src, src_idx, n);
+ else
+ bitcpy_not(dst, dst_idx, src, src_idx, n);
+ /* set or clear */
+ } else
+ bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n);
+ if (!--bpp)
+ break;
+ bgcolor >>= 1;
+ fgcolor >>= 1;
+ dst_idx += next_plane * 8;
+ }
}
static void amifb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
int x2, y2;
unsigned long *dst;
int dst_idx;
@@ -2145,17 +3363,17 @@ static void amifb_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth == 1) {
dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- dst_idx += dy*par->next_line*8+dx;
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ dst_idx += dy * par->next_line * 8 + dx;
src = image->data;
- pitch = (image->width+7)/8;
+ pitch = (image->width + 7) / 8;
while (height--) {
expand_one_line(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, width,
src, image->bg_color,
image->fg_color);
- dst_idx += par->next_line*8;
+ dst_idx += par->next_line * 8;
src += pitch;
}
} else {
@@ -2182,45 +3400,119 @@ static int amifb_ioctl(struct fb_info *info,
int i;
switch (cmd) {
- case FBIOGET_FCURSORINFO:
- i = ami_get_fix_cursorinfo(&crsr.fix);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.fix,
- sizeof(crsr.fix)) ? -EFAULT : 0;
-
- case FBIOGET_VCURSORINFO:
- i = ami_get_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo __user *)arg)->data);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.var,
- sizeof(crsr.var)) ? -EFAULT : 0;
-
- case FBIOPUT_VCURSORINFO:
- if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
- return -EFAULT;
- return ami_set_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo __user *)arg)->data);
-
- case FBIOGET_CURSORSTATE:
- i = ami_get_cursorstate(&crsr.state);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.state,
- sizeof(crsr.state)) ? -EFAULT : 0;
-
- case FBIOPUT_CURSORSTATE:
- if (copy_from_user(&crsr.state, argp,
- sizeof(crsr.state)))
- return -EFAULT;
- return ami_set_cursorstate(&crsr.state);
+ case FBIOGET_FCURSORINFO:
+ i = ami_get_fix_cursorinfo(&crsr.fix, info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.fix,
+ sizeof(crsr.fix)) ? -EFAULT : 0;
+
+ case FBIOGET_VCURSORINFO:
+ i = ami_get_var_cursorinfo(&crsr.var,
+ ((struct fb_var_cursorinfo __user *)arg)->data,
+ info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.var,
+ sizeof(crsr.var)) ? -EFAULT : 0;
+
+ case FBIOPUT_VCURSORINFO:
+ if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
+ return -EFAULT;
+ return ami_set_var_cursorinfo(&crsr.var,
+ ((struct fb_var_cursorinfo __user *)arg)->data,
+ info->par);
+
+ case FBIOGET_CURSORSTATE:
+ i = ami_get_cursorstate(&crsr.state, info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.state,
+ sizeof(crsr.state)) ? -EFAULT : 0;
+
+ case FBIOPUT_CURSORSTATE:
+ if (copy_from_user(&crsr.state, argp, sizeof(crsr.state)))
+ return -EFAULT;
+ return ami_set_cursorstate(&crsr.state, info->par);
}
return -EINVAL;
}
/*
+ * Flash the cursor (called by VBlank interrupt)
+ */
+
+static int flash_cursor(void)
+{
+ static int cursorcount = 1;
+
+ if (cursormode == FB_CURSOR_FLASH) {
+ if (!--cursorcount) {
+ cursorstate = -cursorstate;
+ cursorcount = cursorrate;
+ if (!is_blanked)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+ /*
+ * VBlank Display Interrupt
+ */
+
+static irqreturn_t amifb_interrupt(int irq, void *dev_id)
+{
+ struct amifb_par *par = dev_id;
+
+ if (do_vmode_pan || do_vmode_full)
+ ami_update_display(par);
+
+ if (do_vmode_full)
+ ami_init_display(par);
+
+ if (do_vmode_pan) {
+ flash_cursor();
+ ami_rebuild_copper(par);
+ do_cursor = do_vmode_pan = 0;
+ } else if (do_cursor) {
+ flash_cursor();
+ ami_set_sprite(par);
+ do_cursor = 0;
+ } else {
+ if (flash_cursor())
+ ami_set_sprite(par);
+ }
+
+ if (do_blank) {
+ ami_do_blank(par);
+ do_blank = 0;
+ }
+
+ if (do_vmode_full) {
+ ami_reinit_copper(par);
+ do_vmode_full = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+
+static struct fb_ops amifb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = amifb_check_var,
+ .fb_set_par = amifb_set_par,
+ .fb_setcolreg = amifb_setcolreg,
+ .fb_blank = amifb_blank,
+ .fb_pan_display = amifb_pan_display,
+ .fb_fillrect = amifb_fillrect,
+ .fb_copyarea = amifb_copyarea,
+ .fb_imageblit = amifb_imageblit,
+ .fb_ioctl = amifb_ioctl,
+};
+
+
+ /*
* Allocate, Clear and Align a Block of Chip Memory
*/
@@ -2250,6 +3542,7 @@ static inline void chipfree(void)
static int __init amifb_probe(struct platform_device *pdev)
{
+ struct fb_info *info;
int tag, i, err = 0;
u_long chipptr;
u_int defmode;
@@ -2265,71 +3558,80 @@ static int __init amifb_probe(struct platform_device *pdev)
#endif
custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ info = framebuffer_alloc(sizeof(struct amifb_par), &pdev->dev);
+ if (!info) {
+ dev_err(&pdev->dev, "framebuffer_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ strcpy(info->fix.id, "Amiga ");
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.accel = FB_ACCEL_AMIGABLITT;
+
switch (amiga_chipset) {
#ifdef CONFIG_FB_AMIGA_OCS
- case CS_OCS:
- strcat(fb_info.fix.id, "OCS");
+ case CS_OCS:
+ strcat(info->fix.id, "OCS");
default_chipset:
- chipset = TAG_OCS;
- maxdepth[TAG_SHRES] = 0; /* OCS means no SHRES */
- maxdepth[TAG_HIRES] = 4;
- maxdepth[TAG_LORES] = 6;
- maxfmode = TAG_FMODE_1;
- defmode = amiga_vblank == 50 ? DEFMODE_PAL
- : DEFMODE_NTSC;
- fb_info.fix.smem_len = VIDEOMEMSIZE_OCS;
- break;
+ chipset = TAG_OCS;
+ maxdepth[TAG_SHRES] = 0; /* OCS means no SHRES */
+ maxdepth[TAG_HIRES] = 4;
+ maxdepth[TAG_LORES] = 6;
+ maxfmode = TAG_FMODE_1;
+ defmode = amiga_vblank == 50 ? DEFMODE_PAL : DEFMODE_NTSC;
+ info->fix.smem_len = VIDEOMEMSIZE_OCS;
+ break;
#endif /* CONFIG_FB_AMIGA_OCS */
#ifdef CONFIG_FB_AMIGA_ECS
- case CS_ECS:
- strcat(fb_info.fix.id, "ECS");
- chipset = TAG_ECS;
- maxdepth[TAG_SHRES] = 2;
- maxdepth[TAG_HIRES] = 4;
- maxdepth[TAG_LORES] = 6;
- maxfmode = TAG_FMODE_1;
- if (AMIGAHW_PRESENT(AMBER_FF))
- defmode = amiga_vblank == 50 ? DEFMODE_AMBER_PAL
- : DEFMODE_AMBER_NTSC;
- else
- defmode = amiga_vblank == 50 ? DEFMODE_PAL
- : DEFMODE_NTSC;
- if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
- VIDEOMEMSIZE_ECS_2M)
- fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M;
- else
- fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M;
- break;
+ case CS_ECS:
+ strcat(info->fix.id, "ECS");
+ chipset = TAG_ECS;
+ maxdepth[TAG_SHRES] = 2;
+ maxdepth[TAG_HIRES] = 4;
+ maxdepth[TAG_LORES] = 6;
+ maxfmode = TAG_FMODE_1;
+ if (AMIGAHW_PRESENT(AMBER_FF))
+ defmode = amiga_vblank == 50 ? DEFMODE_AMBER_PAL
+ : DEFMODE_AMBER_NTSC;
+ else
+ defmode = amiga_vblank == 50 ? DEFMODE_PAL
+ : DEFMODE_NTSC;
+ if (amiga_chip_avail() - CHIPRAM_SAFETY_LIMIT >
+ VIDEOMEMSIZE_ECS_2M)
+ info->fix.smem_len = VIDEOMEMSIZE_ECS_2M;
+ else
+ info->fix.smem_len = VIDEOMEMSIZE_ECS_1M;
+ break;
#endif /* CONFIG_FB_AMIGA_ECS */
#ifdef CONFIG_FB_AMIGA_AGA
- case CS_AGA:
- strcat(fb_info.fix.id, "AGA");
- chipset = TAG_AGA;
- maxdepth[TAG_SHRES] = 8;
- maxdepth[TAG_HIRES] = 8;
- maxdepth[TAG_LORES] = 8;
- maxfmode = TAG_FMODE_4;
- defmode = DEFMODE_AGA;
- if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
- VIDEOMEMSIZE_AGA_2M)
- fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M;
- else
- fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M;
- break;
+ case CS_AGA:
+ strcat(info->fix.id, "AGA");
+ chipset = TAG_AGA;
+ maxdepth[TAG_SHRES] = 8;
+ maxdepth[TAG_HIRES] = 8;
+ maxdepth[TAG_LORES] = 8;
+ maxfmode = TAG_FMODE_4;
+ defmode = DEFMODE_AGA;
+ if (amiga_chip_avail() - CHIPRAM_SAFETY_LIMIT >
+ VIDEOMEMSIZE_AGA_2M)
+ info->fix.smem_len = VIDEOMEMSIZE_AGA_2M;
+ else
+ info->fix.smem_len = VIDEOMEMSIZE_AGA_1M;
+ break;
#endif /* CONFIG_FB_AMIGA_AGA */
- default:
+ default:
#ifdef CONFIG_FB_AMIGA_OCS
- printk("Unknown graphics chipset, defaulting to OCS\n");
- strcat(fb_info.fix.id, "Unknown");
- goto default_chipset;
+ printk("Unknown graphics chipset, defaulting to OCS\n");
+ strcat(info->fix.id, "Unknown");
+ goto default_chipset;
#else /* CONFIG_FB_AMIGA_OCS */
- err = -ENODEV;
- goto amifb_error;
+ err = -ENODEV;
+ goto release;
#endif /* CONFIG_FB_AMIGA_OCS */
- break;
+ break;
}
/*
@@ -2356,42 +3658,44 @@ default_chipset:
}
}
- /*
- * These monitor specs are for a typical Amiga monitor (e.g. A1960)
- */
- if (fb_info.monspecs.hfmin == 0) {
- fb_info.monspecs.hfmin = 15000;
- fb_info.monspecs.hfmax = 38000;
- fb_info.monspecs.vfmin = 49;
- fb_info.monspecs.vfmax = 90;
+ if (amifb_hfmin) {
+ info->monspecs.hfmin = amifb_hfmin;
+ info->monspecs.hfmax = amifb_hfmax;
+ info->monspecs.vfmin = amifb_vfmin;
+ info->monspecs.vfmax = amifb_vfmax;
+ } else {
+ /*
+ * These are for a typical Amiga monitor (e.g. A1960)
+ */
+ info->monspecs.hfmin = 15000;
+ info->monspecs.hfmax = 38000;
+ info->monspecs.vfmin = 49;
+ info->monspecs.vfmax = 90;
}
- fb_info.fbops = &amifb_ops;
- fb_info.par = &currentpar;
- fb_info.flags = FBINFO_DEFAULT;
- fb_info.device = &pdev->dev;
+ info->fbops = &amifb_ops;
+ info->flags = FBINFO_DEFAULT;
+ info->device = &pdev->dev;
- if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
+ if (!fb_find_mode(&info->var, info, mode_option, ami_modedb,
NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
err = -EINVAL;
- goto amifb_error;
+ goto release;
}
fb_videomode_to_modelist(ami_modedb, NUM_TOTAL_MODES,
- &fb_info.modelist);
+ &info->modelist);
round_down_bpp = 0;
- chipptr = chipalloc(fb_info.fix.smem_len+
- SPRITEMEMSIZE+
- DUMMYSPRITEMEMSIZE+
- COPINITSIZE+
- 4*COPLISTSIZE);
+ chipptr = chipalloc(info->fix.smem_len + SPRITEMEMSIZE +
+ DUMMYSPRITEMEMSIZE + COPINITSIZE +
+ 4 * COPLISTSIZE);
if (!chipptr) {
err = -ENOMEM;
- goto amifb_error;
+ goto release;
}
- assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len);
+ assignchunk(videomemory, u_long, chipptr, info->fix.smem_len);
assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE);
assignchunk(dummysprite, u_short *, chipptr, DUMMYSPRITEMEMSIZE);
assignchunk(copdisplay.init, copins *, chipptr, COPINITSIZE);
@@ -2403,1398 +3707,78 @@ default_chipset:
/*
* access the videomem with writethrough cache
*/
- fb_info.fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
- videomemory = (u_long)ioremap_writethrough(fb_info.fix.smem_start,
- fb_info.fix.smem_len);
+ info->fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
+ videomemory = (u_long)ioremap_writethrough(info->fix.smem_start,
+ info->fix.smem_len);
if (!videomemory) {
- printk("amifb: WARNING! unable to map videomem cached writethrough\n");
- fb_info.screen_base = (char *)ZTWO_VADDR(fb_info.fix.smem_start);
+ dev_warn(&pdev->dev,
+ "Unable to map videomem cached writethrough\n");
+ info->screen_base = (char *)ZTWO_VADDR(info->fix.smem_start);
} else
- fb_info.screen_base = (char *)videomemory;
+ info->screen_base = (char *)videomemory;
memset(dummysprite, 0, DUMMYSPRITEMEMSIZE);
/*
- * Enable Display DMA
- */
-
- custom.dmacon = DMAF_SETCLR | DMAF_MASTER | DMAF_RASTER | DMAF_COPPER |
- DMAF_BLITTER | DMAF_SPRITE;
-
- /*
* Make sure the Copper has something to do
*/
-
ami_init_copper();
- if (request_irq(IRQ_AMIGA_COPPER, amifb_interrupt, 0,
- "fb vertb handler", &currentpar)) {
- err = -EBUSY;
- goto amifb_error;
- }
-
- err = fb_alloc_cmap(&fb_info.cmap, 1<<fb_info.var.bits_per_pixel, 0);
- if (err)
- goto amifb_error;
-
- if (register_framebuffer(&fb_info) < 0) {
- err = -EINVAL;
- goto amifb_error;
- }
-
- printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- fb_info.node, fb_info.fix.id, fb_info.fix.smem_len>>10);
-
- return 0;
-
-amifb_error:
- amifb_deinit(pdev);
- return err;
-}
-
-static void amifb_deinit(struct platform_device *pdev)
-{
- if (fb_info.cmap.len)
- fb_dealloc_cmap(&fb_info.cmap);
- fb_dealloc_cmap(&fb_info.cmap);
- chipfree();
- if (videomemory)
- iounmap((void*)videomemory);
- custom.dmacon = DMAF_ALL | DMAF_MASTER;
-}
-
-
- /*
- * Blank the display.
- */
-
-static int amifb_blank(int blank, struct fb_info *info)
-{
- do_blank = blank ? blank : -1;
-
- return 0;
-}
-
- /*
- * Flash the cursor (called by VBlank interrupt)
- */
-
-static int flash_cursor(void)
-{
- static int cursorcount = 1;
-
- if (cursormode == FB_CURSOR_FLASH) {
- if (!--cursorcount) {
- cursorstate = -cursorstate;
- cursorcount = cursorrate;
- if (!is_blanked)
- return 1;
- }
- }
- return 0;
-}
-
- /*
- * VBlank Display Interrupt
- */
-
-static irqreturn_t amifb_interrupt(int irq, void *dev_id)
-{
- if (do_vmode_pan || do_vmode_full)
- ami_update_display();
-
- if (do_vmode_full)
- ami_init_display();
-
- if (do_vmode_pan) {
- flash_cursor();
- ami_rebuild_copper();
- do_cursor = do_vmode_pan = 0;
- } else if (do_cursor) {
- flash_cursor();
- ami_set_sprite();
- do_cursor = 0;
- } else {
- if (flash_cursor())
- ami_set_sprite();
- }
-
- if (do_blank) {
- ami_do_blank();
- do_blank = 0;
- }
-
- if (do_vmode_full) {
- ami_reinit_copper();
- do_vmode_full = 0;
- }
- return IRQ_HANDLED;
-}
-
-/* --------------------------- Hardware routines --------------------------- */
-
- /*
- * Get the video params out of `var'. If a value doesn't fit, round
- * it up, if it's too big, return -EINVAL.
- */
-
-static int ami_decode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par)
-{
- u_short clk_shift, line_shift;
- u_long maxfetchstop, fstrt, fsize, fconst, xres_n, yres_n;
- u_int htotal, vtotal;
-
- /*
- * Find a matching Pixel Clock
- */
-
- for (clk_shift = TAG_SHRES; clk_shift <= TAG_LORES; clk_shift++)
- if (var->pixclock <= pixclock[clk_shift])
- break;
- if (clk_shift > TAG_LORES) {
- DPRINTK("pixclock too high\n");
- return -EINVAL;
- }
- par->clk_shift = clk_shift;
-
- /*
- * Check the Geometry Values
- */
-
- if ((par->xres = var->xres) < 64)
- par->xres = 64;
- if ((par->yres = var->yres) < 64)
- par->yres = 64;
- if ((par->vxres = var->xres_virtual) < par->xres)
- par->vxres = par->xres;
- if ((par->vyres = var->yres_virtual) < par->yres)
- par->vyres = par->yres;
-
- par->bpp = var->bits_per_pixel;
- if (!var->nonstd) {
- if (par->bpp < 1)
- par->bpp = 1;
- if (par->bpp > maxdepth[clk_shift]) {
- if (round_down_bpp && maxdepth[clk_shift])
- par->bpp = maxdepth[clk_shift];
- else {
- DPRINTK("invalid bpp\n");
- return -EINVAL;
- }
- }
- } else if (var->nonstd == FB_NONSTD_HAM) {
- if (par->bpp < 6)
- par->bpp = 6;
- if (par->bpp != 6) {
- if (par->bpp < 8)
- par->bpp = 8;
- if (par->bpp != 8 || !IS_AGA) {
- DPRINTK("invalid bpp for ham mode\n");
- return -EINVAL;
- }
- }
- } else {
- DPRINTK("unknown nonstd mode\n");
- return -EINVAL;
- }
-
- /*
- * FB_VMODE_SMOOTH_XPAN will be cleared, if one of the folloing
- * checks failed and smooth scrolling is not possible
- */
-
- par->vmode = var->vmode | FB_VMODE_SMOOTH_XPAN;
- switch (par->vmode & FB_VMODE_MASK) {
- case FB_VMODE_INTERLACED:
- line_shift = 0;
- break;
- case FB_VMODE_NONINTERLACED:
- line_shift = 1;
- break;
- case FB_VMODE_DOUBLE:
- if (!IS_AGA) {
- DPRINTK("double mode only possible with aga\n");
- return -EINVAL;
- }
- line_shift = 2;
- break;
- default:
- DPRINTK("unknown video mode\n");
- return -EINVAL;
- break;
- }
- par->line_shift = line_shift;
-
- /*
- * Vertical and Horizontal Timings
- */
-
- xres_n = par->xres<<clk_shift;
- yres_n = par->yres<<line_shift;
- par->htotal = down8((var->left_margin+par->xres+var->right_margin+var->hsync_len)<<clk_shift);
- par->vtotal = down2(((var->upper_margin+par->yres+var->lower_margin+var->vsync_len)<<line_shift)+1);
-
- if (IS_AGA)
- par->bplcon3 = sprpixmode[clk_shift];
- else
- par->bplcon3 = 0;
- if (var->sync & FB_SYNC_BROADCAST) {
- par->diwstop_h = par->htotal-((var->right_margin-var->hsync_len)<<clk_shift);
- if (IS_AGA)
- par->diwstop_h += mod4(var->hsync_len);
- else
- par->diwstop_h = down4(par->diwstop_h);
-
- par->diwstrt_h = par->diwstop_h - xres_n;
- par->diwstop_v = par->vtotal-((var->lower_margin-var->vsync_len)<<line_shift);
- par->diwstrt_v = par->diwstop_v - yres_n;
- if (par->diwstop_h >= par->htotal+8) {
- DPRINTK("invalid diwstop_h\n");
- return -EINVAL;
- }
- if (par->diwstop_v > par->vtotal) {
- DPRINTK("invalid diwstop_v\n");
- return -EINVAL;
- }
-
- if (!IS_OCS) {
- /* Initialize sync with some reasonable values for pwrsave */
- par->hsstrt = 160;
- par->hsstop = 320;
- par->vsstrt = 30;
- par->vsstop = 34;
- } else {
- par->hsstrt = 0;
- par->hsstop = 0;
- par->vsstrt = 0;
- par->vsstop = 0;
- }
- if (par->vtotal > (PAL_VTOTAL+NTSC_VTOTAL)/2) {
- /* PAL video mode */
- if (par->htotal != PAL_HTOTAL) {
- DPRINTK("htotal invalid for pal\n");
- return -EINVAL;
- }
- if (par->diwstrt_h < PAL_DIWSTRT_H) {
- DPRINTK("diwstrt_h too low for pal\n");
- return -EINVAL;
- }
- if (par->diwstrt_v < PAL_DIWSTRT_V) {
- DPRINTK("diwstrt_v too low for pal\n");
- return -EINVAL;
- }
- htotal = PAL_HTOTAL>>clk_shift;
- vtotal = PAL_VTOTAL>>1;
- if (!IS_OCS) {
- par->beamcon0 = BMC0_PAL;
- par->bplcon3 |= BPC3_BRDRBLNK;
- } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
- AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
- par->beamcon0 = BMC0_PAL;
- par->hsstop = 1;
- } else if (amiga_vblank != 50) {
- DPRINTK("pal not supported by this chipset\n");
- return -EINVAL;
- }
- } else {
- /* NTSC video mode
- * In the AGA chipset seems to be hardware bug with BPC3_BRDRBLNK
- * and NTSC activated, so than better let diwstop_h <= 1812
- */
- if (par->htotal != NTSC_HTOTAL) {
- DPRINTK("htotal invalid for ntsc\n");
- return -EINVAL;
- }
- if (par->diwstrt_h < NTSC_DIWSTRT_H) {
- DPRINTK("diwstrt_h too low for ntsc\n");
- return -EINVAL;
- }
- if (par->diwstrt_v < NTSC_DIWSTRT_V) {
- DPRINTK("diwstrt_v too low for ntsc\n");
- return -EINVAL;
- }
- htotal = NTSC_HTOTAL>>clk_shift;
- vtotal = NTSC_VTOTAL>>1;
- if (!IS_OCS) {
- par->beamcon0 = 0;
- par->bplcon3 |= BPC3_BRDRBLNK;
- } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
- AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
- par->beamcon0 = 0;
- par->hsstop = 1;
- } else if (amiga_vblank != 60) {
- DPRINTK("ntsc not supported by this chipset\n");
- return -EINVAL;
- }
- }
- if (IS_OCS) {
- if (par->diwstrt_h >= 1024 || par->diwstop_h < 1024 ||
- par->diwstrt_v >= 512 || par->diwstop_v < 256) {
- DPRINTK("invalid position for display on ocs\n");
- return -EINVAL;
- }
- }
- } else if (!IS_OCS) {
- /* Programmable video mode */
- par->hsstrt = var->right_margin<<clk_shift;
- par->hsstop = (var->right_margin+var->hsync_len)<<clk_shift;
- par->diwstop_h = par->htotal - mod8(par->hsstrt) + 8 - (1 << clk_shift);
- if (!IS_AGA)
- par->diwstop_h = down4(par->diwstop_h) - 16;
- par->diwstrt_h = par->diwstop_h - xres_n;
- par->hbstop = par->diwstrt_h + 4;
- par->hbstrt = par->diwstop_h + 4;
- if (par->hbstrt >= par->htotal + 8)
- par->hbstrt -= par->htotal;
- par->hcenter = par->hsstrt + (par->htotal >> 1);
- par->vsstrt = var->lower_margin<<line_shift;
- par->vsstop = (var->lower_margin+var->vsync_len)<<line_shift;
- par->diwstop_v = par->vtotal;
- if ((par->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
- par->diwstop_v -= 2;
- par->diwstrt_v = par->diwstop_v - yres_n;
- par->vbstop = par->diwstrt_v - 2;
- par->vbstrt = par->diwstop_v - 2;
- if (par->vtotal > 2048) {
- DPRINTK("vtotal too high\n");
- return -EINVAL;
- }
- if (par->htotal > 2048) {
- DPRINTK("htotal too high\n");
- return -EINVAL;
- }
- par->bplcon3 |= BPC3_EXTBLKEN;
- par->beamcon0 = BMC0_HARDDIS | BMC0_VARVBEN | BMC0_LOLDIS |
- BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARBEAMEN |
- BMC0_PAL | BMC0_VARCSYEN;
- if (var->sync & FB_SYNC_HOR_HIGH_ACT)
- par->beamcon0 |= BMC0_HSYTRUE;
- if (var->sync & FB_SYNC_VERT_HIGH_ACT)
- par->beamcon0 |= BMC0_VSYTRUE;
- if (var->sync & FB_SYNC_COMP_HIGH_ACT)
- par->beamcon0 |= BMC0_CSYTRUE;
- htotal = par->htotal>>clk_shift;
- vtotal = par->vtotal>>1;
- } else {
- DPRINTK("only broadcast modes possible for ocs\n");
- return -EINVAL;
- }
-
- /*
- * Checking the DMA timing
- */
-
- fconst = 16<<maxfmode<<clk_shift;
-
- /*
- * smallest window start value without turn off other dma cycles
- * than sprite1-7, unless you change min_fstrt
- */
-
-
- fsize = ((maxfmode+clk_shift <= 1) ? fconst : 64);
- fstrt = downx(fconst, par->diwstrt_h-4) - fsize;
- if (fstrt < min_fstrt) {
- DPRINTK("fetch start too low\n");
- return -EINVAL;
- }
-
- /*
- * smallest window start value where smooth scrolling is possible
- */
-
- fstrt = downx(fconst, par->diwstrt_h-fconst+(1<<clk_shift)-4) - fsize;
- if (fstrt < min_fstrt)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-
- maxfetchstop = down16(par->htotal - 80);
-
- fstrt = downx(fconst, par->diwstrt_h-4) - 64 - fconst;
- fsize = upx(fconst, xres_n + modx(fconst, downx(1<<clk_shift, par->diwstrt_h-4)));
- if (fstrt + fsize > maxfetchstop)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-
- fsize = upx(fconst, xres_n);
- if (fstrt + fsize > maxfetchstop) {
- DPRINTK("fetch stop too high\n");
- return -EINVAL;
- }
-
- if (maxfmode + clk_shift <= 1) {
- fsize = up64(xres_n + fconst - 1);
- if (min_fstrt + fsize - 64 > maxfetchstop)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-
- fsize = up64(xres_n);
- if (min_fstrt + fsize - 64 > maxfetchstop) {
- DPRINTK("fetch size too high\n");
- return -EINVAL;
- }
-
- fsize -= 64;
- } else
- fsize -= fconst;
-
- /*
- * Check if there is enough time to update the bitplane pointers for ywrap
- */
-
- if (par->htotal-fsize-64 < par->bpp*64)
- par->vmode &= ~FB_VMODE_YWRAP;
-
- /*
- * Bitplane calculations and check the Memory Requirements
- */
-
- if (amifb_ilbm) {
- par->next_plane = div8(upx(16<<maxfmode, par->vxres));
- par->next_line = par->bpp*par->next_plane;
- if (par->next_line * par->vyres > fb_info.fix.smem_len) {
- DPRINTK("too few video mem\n");
- return -EINVAL;
- }
- } else {
- par->next_line = div8(upx(16<<maxfmode, par->vxres));
- par->next_plane = par->vyres*par->next_line;
- if (par->next_plane * par->bpp > fb_info.fix.smem_len) {
- DPRINTK("too few video mem\n");
- return -EINVAL;
- }
- }
-
- /*
- * Hardware Register Values
- */
-
- par->bplcon0 = BPC0_COLOR | bplpixmode[clk_shift];
- if (!IS_OCS)
- par->bplcon0 |= BPC0_ECSENA;
- if (par->bpp == 8)
- par->bplcon0 |= BPC0_BPU3;
- else
- par->bplcon0 |= par->bpp<<12;
- if (var->nonstd == FB_NONSTD_HAM)
- par->bplcon0 |= BPC0_HAM;
- if (var->sync & FB_SYNC_EXT)
- par->bplcon0 |= BPC0_ERSY;
-
- if (IS_AGA)
- par->fmode = bplfetchmode[maxfmode];
-
- switch (par->vmode & FB_VMODE_MASK) {
- case FB_VMODE_INTERLACED:
- par->bplcon0 |= BPC0_LACE;
- break;
- case FB_VMODE_DOUBLE:
- if (IS_AGA)
- par->fmode |= FMODE_SSCAN2 | FMODE_BSCAN2;
- break;
- }
-
- if (!((par->vmode ^ var->vmode) & FB_VMODE_YWRAP)) {
- par->xoffset = var->xoffset;
- par->yoffset = var->yoffset;
- if (par->vmode & FB_VMODE_YWRAP) {
- if (par->xoffset || par->yoffset < 0 || par->yoffset >= par->vyres)
- par->xoffset = par->yoffset = 0;
- } else {
- if (par->xoffset < 0 || par->xoffset > upx(16<<maxfmode, par->vxres-par->xres) ||
- par->yoffset < 0 || par->yoffset > par->vyres-par->yres)
- par->xoffset = par->yoffset = 0;
- }
- } else
- par->xoffset = par->yoffset = 0;
-
- par->crsr.crsr_x = par->crsr.crsr_y = 0;
- par->crsr.spot_x = par->crsr.spot_y = 0;
- par->crsr.height = par->crsr.width = 0;
-
- return 0;
-}
-
- /*
- * Fill the `var' structure based on the values in `par' and maybe
- * other values read out of the hardware.
- */
-
-static int ami_encode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par)
-{
- u_short clk_shift, line_shift;
-
- memset(var, 0, sizeof(struct fb_var_screeninfo));
-
- clk_shift = par->clk_shift;
- line_shift = par->line_shift;
-
- var->xres = par->xres;
- var->yres = par->yres;
- var->xres_virtual = par->vxres;
- var->yres_virtual = par->vyres;
- var->xoffset = par->xoffset;
- var->yoffset = par->yoffset;
-
- var->bits_per_pixel = par->bpp;
- var->grayscale = 0;
-
- var->red.offset = 0;
- var->red.msb_right = 0;
- var->red.length = par->bpp;
- if (par->bplcon0 & BPC0_HAM)
- var->red.length -= 2;
- var->blue = var->green = var->red;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->transp.msb_right = 0;
-
- if (par->bplcon0 & BPC0_HAM)
- var->nonstd = FB_NONSTD_HAM;
- else
- var->nonstd = 0;
- var->activate = 0;
-
- var->height = -1;
- var->width = -1;
-
- var->pixclock = pixclock[clk_shift];
-
- if (IS_AGA && par->fmode & FMODE_BSCAN2)
- var->vmode = FB_VMODE_DOUBLE;
- else if (par->bplcon0 & BPC0_LACE)
- var->vmode = FB_VMODE_INTERLACED;
- else
- var->vmode = FB_VMODE_NONINTERLACED;
-
- if (!IS_OCS && par->beamcon0 & BMC0_VARBEAMEN) {
- var->hsync_len = (par->hsstop-par->hsstrt)>>clk_shift;
- var->right_margin = par->hsstrt>>clk_shift;
- var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
- var->vsync_len = (par->vsstop-par->vsstrt)>>line_shift;
- var->lower_margin = par->vsstrt>>line_shift;
- var->upper_margin = (par->vtotal>>line_shift) - var->yres - var->lower_margin - var->vsync_len;
- var->sync = 0;
- if (par->beamcon0 & BMC0_HSYTRUE)
- var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (par->beamcon0 & BMC0_VSYTRUE)
- var->sync |= FB_SYNC_VERT_HIGH_ACT;
- if (par->beamcon0 & BMC0_CSYTRUE)
- var->sync |= FB_SYNC_COMP_HIGH_ACT;
- } else {
- var->sync = FB_SYNC_BROADCAST;
- var->hsync_len = (152>>clk_shift) + mod4(par->diwstop_h);
- var->right_margin = ((par->htotal - down4(par->diwstop_h))>>clk_shift) + var->hsync_len;
- var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
- var->vsync_len = 4>>line_shift;
- var->lower_margin = ((par->vtotal - par->diwstop_v)>>line_shift) + var->vsync_len;
- var->upper_margin = (((par->vtotal - 2)>>line_shift) + 1) - var->yres -
- var->lower_margin - var->vsync_len;
- }
-
- if (par->bplcon0 & BPC0_ERSY)
- var->sync |= FB_SYNC_EXT;
- if (par->vmode & FB_VMODE_YWRAP)
- var->vmode |= FB_VMODE_YWRAP;
-
- return 0;
-}
-
-
/*
- * Pan or Wrap the Display
- *
- * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- * in `var'.
- */
-
-static void ami_pan_var(struct fb_var_screeninfo *var)
-{
- struct amifb_par *par = &currentpar;
-
- par->xoffset = var->xoffset;
- par->yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- par->vmode |= FB_VMODE_YWRAP;
- else
- par->vmode &= ~FB_VMODE_YWRAP;
-
- do_vmode_pan = 0;
- ami_update_par();
- do_vmode_pan = 1;
-}
-
- /*
- * Update hardware
- */
-
-static int ami_update_par(void)
-{
- struct amifb_par *par = &currentpar;
- short clk_shift, vshift, fstrt, fsize, fstop, fconst, shift, move, mod;
-
- clk_shift = par->clk_shift;
-
- if (!(par->vmode & FB_VMODE_SMOOTH_XPAN))
- par->xoffset = upx(16<<maxfmode, par->xoffset);
-
- fconst = 16<<maxfmode<<clk_shift;
- vshift = modx(16<<maxfmode, par->xoffset);
- fstrt = par->diwstrt_h - (vshift<<clk_shift) - 4;
- fsize = (par->xres+vshift)<<clk_shift;
- shift = modx(fconst, fstrt);
- move = downx(2<<maxfmode, div8(par->xoffset));
- if (maxfmode + clk_shift > 1) {
- fstrt = downx(fconst, fstrt) - 64;
- fsize = upx(fconst, fsize);
- fstop = fstrt + fsize - fconst;
- } else {
- mod = fstrt = downx(fconst, fstrt) - fconst;
- fstop = fstrt + upx(fconst, fsize) - 64;
- fsize = up64(fsize);
- fstrt = fstop - fsize + 64;
- if (fstrt < min_fstrt) {
- fstop += min_fstrt - fstrt;
- fstrt = min_fstrt;
- }
- move = move - div8((mod-fstrt)>>clk_shift);
- }
- mod = par->next_line - div8(fsize>>clk_shift);
- par->ddfstrt = fstrt;
- par->ddfstop = fstop;
- par->bplcon1 = hscroll2hw(shift);
- par->bpl2mod = mod;
- if (par->bplcon0 & BPC0_LACE)
- par->bpl2mod += par->next_line;
- if (IS_AGA && (par->fmode & FMODE_BSCAN2))
- par->bpl1mod = -div8(fsize>>clk_shift);
- else
- par->bpl1mod = par->bpl2mod;
-
- if (par->yoffset) {
- par->bplpt0 = fb_info.fix.smem_start + par->next_line*par->yoffset + move;
- if (par->vmode & FB_VMODE_YWRAP) {
- if (par->yoffset > par->vyres-par->yres) {
- par->bplpt0wrap = fb_info.fix.smem_start + move;
- if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v+par->vyres-par->yoffset))
- par->bplpt0wrap += par->next_line;
- }
- }
- } else
- par->bplpt0 = fb_info.fix.smem_start + move;
-
- if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v))
- par->bplpt0 += par->next_line;
-
- return 0;
-}
-
-
- /*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- */
-
-static int amifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- if (IS_AGA) {
- if (regno > 255)
- return 1;
- } else if (currentpar.bplcon0 & BPC0_SHRES) {
- if (regno > 3)
- return 1;
- } else {
- if (regno > 31)
- return 1;
- }
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- if (!regno) {
- red0 = red;
- green0 = green;
- blue0 = blue;
- }
-
- /*
- * Update the corresponding Hardware Color Register, unless it's Color
- * Register 0 and the screen is blanked.
- *
- * VBlank is switched off to protect bplcon3 or ecs_palette[] from
- * being changed by ami_do_blank() during the VBlank.
- */
-
- if (regno || !is_blanked) {
-#if defined(CONFIG_FB_AMIGA_AGA)
- if (IS_AGA) {
- u_short bplcon3 = currentpar.bplcon3;
- VBlankOff();
- custom.bplcon3 = bplcon3 | (regno<<8 & 0xe000);
- custom.color[regno&31] = rgb2hw8_high(red, green, blue);
- custom.bplcon3 = bplcon3 | (regno<<8 & 0xe000) | BPC3_LOCT;
- custom.color[regno&31] = rgb2hw8_low(red, green, blue);
- custom.bplcon3 = bplcon3;
- VBlankOn();
- } else
-#endif
-#if defined(CONFIG_FB_AMIGA_ECS)
- if (currentpar.bplcon0 & BPC0_SHRES) {
- u_short color, mask;
- int i;
-
- mask = 0x3333;
- color = rgb2hw2(red, green, blue);
- VBlankOff();
- for (i = regno+12; i >= (int)regno; i -= 4)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- mask <<=2; color >>= 2;
- regno = down16(regno)+mul4(mod4(regno));
- for (i = regno+3; i >= (int)regno; i--)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- VBlankOn();
- } else
-#endif
- custom.color[regno] = rgb2hw4(red, green, blue);
- }
- return 0;
-}
-
-static void ami_update_display(void)
-{
- struct amifb_par *par = &currentpar;
-
- custom.bplcon1 = par->bplcon1;
- custom.bpl1mod = par->bpl1mod;
- custom.bpl2mod = par->bpl2mod;
- custom.ddfstrt = ddfstrt2hw(par->ddfstrt);
- custom.ddfstop = ddfstop2hw(par->ddfstop);
-}
-
- /*
- * Change the video mode (called by VBlank interrupt)
- */
-
-static void ami_init_display(void)
-{
- struct amifb_par *par = &currentpar;
- int i;
-
- custom.bplcon0 = par->bplcon0 & ~BPC0_LACE;
- custom.bplcon2 = (IS_OCS ? 0 : BPC2_KILLEHB) | BPC2_PF2P2 | BPC2_PF1P2;
- if (!IS_OCS) {
- custom.bplcon3 = par->bplcon3;
- if (IS_AGA)
- custom.bplcon4 = BPC4_ESPRM4 | BPC4_OSPRM4;
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- custom.htotal = htotal2hw(par->htotal);
- custom.hbstrt = hbstrt2hw(par->hbstrt);
- custom.hbstop = hbstop2hw(par->hbstop);
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.hcenter = hcenter2hw(par->hcenter);
- custom.vtotal = vtotal2hw(par->vtotal);
- custom.vbstrt = vbstrt2hw(par->vbstrt);
- custom.vbstop = vbstop2hw(par->vbstop);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstop2hw(par->vsstop);
- }
- }
- if (!IS_OCS || par->hsstop)
- custom.beamcon0 = par->beamcon0;
- if (IS_AGA)
- custom.fmode = par->fmode;
-
- /*
- * The minimum period for audio depends on htotal
- */
-
- amiga_audio_min_period = div16(par->htotal);
-
- is_lace = par->bplcon0 & BPC0_LACE ? 1 : 0;
-#if 1
- if (is_lace) {
- i = custom.vposr >> 15;
- } else {
- custom.vposw = custom.vposr | 0x8000;
- i = 1;
- }
-#else
- i = 1;
- custom.vposw = custom.vposr | 0x8000;
-#endif
- custom.cop2lc = (u_short *)ZTWO_PADDR(copdisplay.list[currentcop][i]);
-}
-
- /*
- * (Un)Blank the screen (called by VBlank interrupt)
+ * Enable Display DMA
*/
+ custom.dmacon = DMAF_SETCLR | DMAF_MASTER | DMAF_RASTER | DMAF_COPPER |
+ DMAF_BLITTER | DMAF_SPRITE;
-static void ami_do_blank(void)
-{
- struct amifb_par *par = &currentpar;
-#if defined(CONFIG_FB_AMIGA_AGA)
- u_short bplcon3 = par->bplcon3;
-#endif
- u_char red, green, blue;
-
- if (do_blank > 0) {
- custom.dmacon = DMAF_RASTER | DMAF_SPRITE;
- red = green = blue = 0;
- if (!IS_OCS && do_blank > 1) {
- switch (do_blank) {
- case FB_BLANK_VSYNC_SUSPEND:
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.vsstrt = vsstrt2hw(par->vtotal+4);
- custom.vsstop = vsstop2hw(par->vtotal+4);
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- custom.hsstrt = hsstrt2hw(par->htotal+16);
- custom.hsstop = hsstop2hw(par->htotal+16);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstrt2hw(par->vsstop);
- break;
- case FB_BLANK_POWERDOWN:
- custom.hsstrt = hsstrt2hw(par->htotal+16);
- custom.hsstop = hsstop2hw(par->htotal+16);
- custom.vsstrt = vsstrt2hw(par->vtotal+4);
- custom.vsstop = vsstop2hw(par->vtotal+4);
- break;
- }
- if (!(par->beamcon0 & BMC0_VARBEAMEN)) {
- custom.htotal = htotal2hw(par->htotal);
- custom.vtotal = vtotal2hw(par->vtotal);
- custom.beamcon0 = BMC0_HARDDIS | BMC0_VARBEAMEN |
- BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARCSYEN;
- }
- }
- } else {
- custom.dmacon = DMAF_SETCLR | DMAF_RASTER | DMAF_SPRITE;
- red = red0;
- green = green0;
- blue = blue0;
- if (!IS_OCS) {
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstop2hw(par->vsstop);
- custom.beamcon0 = par->beamcon0;
- }
- }
-#if defined(CONFIG_FB_AMIGA_AGA)
- if (IS_AGA) {
- custom.bplcon3 = bplcon3;
- custom.color[0] = rgb2hw8_high(red, green, blue);
- custom.bplcon3 = bplcon3 | BPC3_LOCT;
- custom.color[0] = rgb2hw8_low(red, green, blue);
- custom.bplcon3 = bplcon3;
- } else
-#endif
-#if defined(CONFIG_FB_AMIGA_ECS)
- if (par->bplcon0 & BPC0_SHRES) {
- u_short color, mask;
- int i;
-
- mask = 0x3333;
- color = rgb2hw2(red, green, blue);
- for (i = 12; i >= 0; i -= 4)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- mask <<=2; color >>= 2;
- for (i = 3; i >= 0; i--)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- } else
-#endif
- custom.color[0] = rgb2hw4(red, green, blue);
- is_blanked = do_blank > 0 ? do_blank : 0;
-}
-
-static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix)
-{
- struct amifb_par *par = &currentpar;
-
- fix->crsr_width = fix->crsr_xsize = par->crsr.width;
- fix->crsr_height = fix->crsr_ysize = par->crsr.height;
- fix->crsr_color1 = 17;
- fix->crsr_color2 = 18;
- return 0;
-}
-
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
-{
- struct amifb_par *par = &currentpar;
- register u_short *lspr, *sspr;
-#ifdef __mc68000__
- register u_long datawords asm ("d2");
-#else
- register u_long datawords;
-#endif
- register short delta;
- register u_char color;
- short height, width, bits, words;
- int size, alloc;
-
- size = par->crsr.height*par->crsr.width;
- alloc = var->height*var->width;
- var->height = par->crsr.height;
- var->width = par->crsr.width;
- var->xspot = par->crsr.spot_x;
- var->yspot = par->crsr.spot_y;
- if (size > var->height*var->width)
- return -ENAMETOOLONG;
- if (!access_ok(VERIFY_WRITE, data, size))
- return -EFAULT;
- delta = 1<<par->crsr.fmode;
- lspr = lofsprite + (delta<<1);
- if (par->bplcon0 & BPC0_LACE)
- sspr = shfsprite + (delta<<1);
- else
- sspr = NULL;
- for (height = (short)var->height-1; height >= 0; height--) {
- bits = 0; words = delta; datawords = 0;
- for (width = (short)var->width-1; width >= 0; width--) {
- if (bits == 0) {
- bits = 16; --words;
-#ifdef __mc68000__
- asm volatile ("movew %1@(%3:w:2),%0 ; swap %0 ; movew %1@+,%0"
- : "=d" (datawords), "=a" (lspr) : "1" (lspr), "d" (delta));
-#else
- datawords = (*(lspr+delta) << 16) | (*lspr++);
-#endif
- }
- --bits;
-#ifdef __mc68000__
- asm volatile (
- "clrb %0 ; swap %1 ; lslw #1,%1 ; roxlb #1,%0 ; "
- "swap %1 ; lslw #1,%1 ; roxlb #1,%0"
- : "=d" (color), "=d" (datawords) : "1" (datawords));
-#else
- color = (((datawords >> 30) & 2)
- | ((datawords >> 15) & 1));
- datawords <<= 1;
-#endif
- put_user(color, data++);
- }
- if (bits > 0) {
- --words; ++lspr;
- }
- while (--words >= 0)
- ++lspr;
-#ifdef __mc68000__
- asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
- : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
-#else
- lspr += delta;
- if (sspr) {
- u_short *tmp = lspr;
- lspr = sspr;
- sspr = tmp;
- }
-#endif
- }
- return 0;
-}
-
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
-{
- struct amifb_par *par = &currentpar;
- register u_short *lspr, *sspr;
-#ifdef __mc68000__
- register u_long datawords asm ("d2");
-#else
- register u_long datawords;
-#endif
- register short delta;
- u_short fmode;
- short height, width, bits, words;
+ err = request_irq(IRQ_AMIGA_COPPER, amifb_interrupt, 0,
+ "fb vertb handler", info->par);
+ if (err)
+ goto disable_dma;
- if (!var->width)
- return -EINVAL;
- else if (var->width <= 16)
- fmode = TAG_FMODE_1;
- else if (var->width <= 32)
- fmode = TAG_FMODE_2;
- else if (var->width <= 64)
- fmode = TAG_FMODE_4;
- else
- return -EINVAL;
- if (fmode > maxfmode)
- return -EINVAL;
- if (!var->height)
- return -EINVAL;
- if (!access_ok(VERIFY_READ, data, var->width*var->height))
- return -EFAULT;
- delta = 1<<fmode;
- lofsprite = shfsprite = (u_short *)spritememory;
- lspr = lofsprite + (delta<<1);
- if (par->bplcon0 & BPC0_LACE) {
- if (((var->height+4)<<fmode<<2) > SPRITEMEMSIZE)
- return -EINVAL;
- memset(lspr, 0, (var->height+4)<<fmode<<2);
- shfsprite += ((var->height+5)&-2)<<fmode;
- sspr = shfsprite + (delta<<1);
- } else {
- if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE)
- return -EINVAL;
- memset(lspr, 0, (var->height+2)<<fmode<<2);
- sspr = NULL;
- }
- for (height = (short)var->height-1; height >= 0; height--) {
- bits = 16; words = delta; datawords = 0;
- for (width = (short)var->width-1; width >= 0; width--) {
- unsigned long tdata = 0;
- get_user(tdata, data);
- data++;
-#ifdef __mc68000__
- asm volatile (
- "lsrb #1,%2 ; roxlw #1,%0 ; swap %0 ; "
- "lsrb #1,%2 ; roxlw #1,%0 ; swap %0"
- : "=d" (datawords)
- : "0" (datawords), "d" (tdata));
-#else
- datawords = ((datawords << 1) & 0xfffefffe);
- datawords |= tdata & 1;
- datawords |= (tdata & 2) << (16-1);
-#endif
- if (--bits == 0) {
- bits = 16; --words;
-#ifdef __mc68000__
- asm volatile ("swap %2 ; movew %2,%0@(%3:w:2) ; swap %2 ; movew %2,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta));
-#else
- *(lspr+delta) = (u_short) (datawords >> 16);
- *lspr++ = (u_short) (datawords & 0xffff);
-#endif
- }
- }
- if (bits < 16) {
- --words;
-#ifdef __mc68000__
- asm volatile (
- "swap %2 ; lslw %4,%2 ; movew %2,%0@(%3:w:2) ; "
- "swap %2 ; lslw %4,%2 ; movew %2,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta), "d" (bits));
-#else
- *(lspr+delta) = (u_short) (datawords >> (16+bits));
- *lspr++ = (u_short) ((datawords & 0x0000ffff) >> bits);
-#endif
- }
- while (--words >= 0) {
-#ifdef __mc68000__
- asm volatile ("moveql #0,%%d0 ; movew %%d0,%0@(%2:w:2) ; movew %%d0,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (delta) : "d0");
-#else
- *(lspr+delta) = 0;
- *lspr++ = 0;
-#endif
- }
-#ifdef __mc68000__
- asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
- : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
-#else
- lspr += delta;
- if (sspr) {
- u_short *tmp = lspr;
- lspr = sspr;
- sspr = tmp;
- }
-#endif
- }
- par->crsr.height = var->height;
- par->crsr.width = var->width;
- par->crsr.spot_x = var->xspot;
- par->crsr.spot_y = var->yspot;
- par->crsr.fmode = fmode;
- if (IS_AGA) {
- par->fmode &= ~(FMODE_SPAGEM | FMODE_SPR32);
- par->fmode |= sprfetchmode[fmode];
- custom.fmode = par->fmode;
- }
- return 0;
-}
+ err = fb_alloc_cmap(&info->cmap, 1 << info->var.bits_per_pixel, 0);
+ if (err)
+ goto free_irq;
-static int ami_get_cursorstate(struct fb_cursorstate *state)
-{
- struct amifb_par *par = &currentpar;
+ dev_set_drvdata(&pdev->dev, info);
- state->xoffset = par->crsr.crsr_x;
- state->yoffset = par->crsr.crsr_y;
- state->mode = cursormode;
- return 0;
-}
+ err = register_framebuffer(info);
+ if (err)
+ goto unset_drvdata;
-static int ami_set_cursorstate(struct fb_cursorstate *state)
-{
- struct amifb_par *par = &currentpar;
+ printk("fb%d: %s frame buffer device, using %dK of video memory\n",
+ info->node, info->fix.id, info->fix.smem_len>>10);
- par->crsr.crsr_x = state->xoffset;
- par->crsr.crsr_y = state->yoffset;
- if ((cursormode = state->mode) == FB_CURSOR_OFF)
- cursorstate = -1;
- do_cursor = 1;
return 0;
-}
-
-static void ami_set_sprite(void)
-{
- struct amifb_par *par = &currentpar;
- copins *copl, *cops;
- u_short hs, vs, ve;
- u_long pl, ps, pt;
- short mx, my;
-
- cops = copdisplay.list[currentcop][0];
- copl = copdisplay.list[currentcop][1];
- ps = pl = ZTWO_PADDR(dummysprite);
- mx = par->crsr.crsr_x-par->crsr.spot_x;
- my = par->crsr.crsr_y-par->crsr.spot_y;
- if (!(par->vmode & FB_VMODE_YWRAP)) {
- mx -= par->xoffset;
- my -= par->yoffset;
- }
- if (!is_blanked && cursorstate > 0 && par->crsr.height > 0 &&
- mx > -(short)par->crsr.width && mx < par->xres &&
- my > -(short)par->crsr.height && my < par->yres) {
- pl = ZTWO_PADDR(lofsprite);
- hs = par->diwstrt_h + (mx<<par->clk_shift) - 4;
- vs = par->diwstrt_v + (my<<par->line_shift);
- ve = vs + (par->crsr.height<<par->line_shift);
- if (par->bplcon0 & BPC0_LACE) {
- ps = ZTWO_PADDR(shfsprite);
- lofsprite[0] = spr2hw_pos(vs, hs);
- shfsprite[0] = spr2hw_pos(vs+1, hs);
- if (mod2(vs)) {
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
- shfsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs+1, hs, ve+1);
- pt = pl; pl = ps; ps = pt;
- } else {
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve+1);
- shfsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs+1, hs, ve);
- }
- } else {
- lofsprite[0] = spr2hw_pos(vs, hs) | (IS_AGA && (par->fmode & FMODE_BSCAN2) ? 0x80 : 0);
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
- }
- }
- copl[cop_spr0ptrh].w[1] = highw(pl);
- copl[cop_spr0ptrl].w[1] = loww(pl);
- if (par->bplcon0 & BPC0_LACE) {
- cops[cop_spr0ptrh].w[1] = highw(ps);
- cops[cop_spr0ptrl].w[1] = loww(ps);
- }
-}
-
-
- /*
- * Initialise the Copper Initialisation List
- */
-
-static void __init ami_init_copper(void)
-{
- copins *cop = copdisplay.init;
- u_long p;
- int i;
-
- if (!IS_OCS) {
- (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0);
- (cop++)->l = CMOVE(0x0181, diwstrt);
- (cop++)->l = CMOVE(0x0281, diwstop);
- (cop++)->l = CMOVE(0x0000, diwhigh);
- } else
- (cop++)->l = CMOVE(BPC0_COLOR, bplcon0);
- p = ZTWO_PADDR(dummysprite);
- for (i = 0; i < 8; i++) {
- (cop++)->l = CMOVE(0, spr[i].pos);
- (cop++)->l = CMOVE(highw(p), sprpt[i]);
- (cop++)->l = CMOVE2(loww(p), sprpt[i]);
- }
-
- (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq);
- copdisplay.wait = cop;
- (cop++)->l = CEND;
- (cop++)->l = CMOVE(0, copjmp2);
- cop->l = CEND;
-
- custom.cop1lc = (u_short *)ZTWO_PADDR(copdisplay.init);
- custom.copjmp1 = 0;
-}
-static void ami_reinit_copper(void)
-{
- struct amifb_par *par = &currentpar;
-
- copdisplay.init[cip_bplcon0].w[1] = ~(BPC0_BPU3 | BPC0_BPU2 | BPC0_BPU1 | BPC0_BPU0) & par->bplcon0;
- copdisplay.wait->l = CWAIT(32, par->diwstrt_v-4);
+unset_drvdata:
+ dev_set_drvdata(&pdev->dev, NULL);
+ fb_dealloc_cmap(&info->cmap);
+free_irq:
+ free_irq(IRQ_AMIGA_COPPER, info->par);
+disable_dma:
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ if (videomemory)
+ iounmap((void *)videomemory);
+ chipfree();
+release:
+ framebuffer_release(info);
+ return err;
}
- /*
- * Build the Copper List
- */
-
-static void ami_build_copper(void)
-{
- struct amifb_par *par = &currentpar;
- copins *copl, *cops;
- u_long p;
-
- currentcop = 1 - currentcop;
-
- copl = copdisplay.list[currentcop][1];
-
- (copl++)->l = CWAIT(0, 10);
- (copl++)->l = CMOVE(par->bplcon0, bplcon0);
- (copl++)->l = CMOVE(0, sprpt[0]);
- (copl++)->l = CMOVE2(0, sprpt[0]);
-
- if (par->bplcon0 & BPC0_LACE) {
- cops = copdisplay.list[currentcop][0];
-
- (cops++)->l = CWAIT(0, 10);
- (cops++)->l = CMOVE(par->bplcon0, bplcon0);
- (cops++)->l = CMOVE(0, sprpt[0]);
- (cops++)->l = CMOVE2(0, sprpt[0]);
-
- (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v+1), diwstrt);
- (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v+1), diwstop);
- (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
- (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
- if (!IS_OCS) {
- (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v+1,
- par->diwstop_h, par->diwstop_v+1), diwhigh);
- (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
- par->diwstop_h, par->diwstop_v), diwhigh);
-#if 0
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt+1), vbstrt);
- (copl++)->l = CMOVE(vbstop2hw(par->vbstop+1), vbstop);
- (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
- (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
- }
-#endif
- }
- p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
- (copl++)->l = CMOVE(highw(p), cop2lc);
- (copl++)->l = CMOVE2(loww(p), cop2lc);
- p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
- (cops++)->l = CMOVE(highw(p), cop2lc);
- (cops++)->l = CMOVE2(loww(p), cop2lc);
- copdisplay.rebuild[0] = cops;
- } else {
- (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
- (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
- if (!IS_OCS) {
- (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
- par->diwstop_h, par->diwstop_v), diwhigh);
-#if 0
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
- (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
- }
-#endif
- }
- }
- copdisplay.rebuild[1] = copl;
-
- ami_update_par();
- ami_rebuild_copper();
-}
-
- /*
- * Rebuild the Copper List
- *
- * We only change the things that are not static
- */
-
-static void ami_rebuild_copper(void)
-{
- struct amifb_par *par = &currentpar;
- copins *copl, *cops;
- u_short line, h_end1, h_end2;
- short i;
- u_long p;
-
- if (IS_AGA && maxfmode + par->clk_shift == 0)
- h_end1 = par->diwstrt_h-64;
- else
- h_end1 = par->htotal-32;
- h_end2 = par->ddfstop+64;
-
- ami_set_sprite();
-
- copl = copdisplay.rebuild[1];
- p = par->bplpt0;
- if (par->vmode & FB_VMODE_YWRAP) {
- if ((par->vyres-par->yoffset) != 1 || !mod2(par->diwstrt_v)) {
- if (par->yoffset > par->vyres-par->yres) {
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (copl++)->l = CMOVE(highw(p), bplpt[i]);
- (copl++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- line = par->diwstrt_v + ((par->vyres-par->yoffset)<<par->line_shift) - 1;
- while (line >= 512) {
- (copl++)->l = CWAIT(h_end1, 510);
- line -= 512;
- }
- if (line >= 510 && IS_AGA && maxfmode + par->clk_shift == 0)
- (copl++)->l = CWAIT(h_end1, line);
- else
- (copl++)->l = CWAIT(h_end2, line);
- p = par->bplpt0wrap;
- }
- } else p = par->bplpt0wrap;
- }
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (copl++)->l = CMOVE(highw(p), bplpt[i]);
- (copl++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- copl->l = CEND;
-
- if (par->bplcon0 & BPC0_LACE) {
- cops = copdisplay.rebuild[0];
- p = par->bplpt0;
- if (mod2(par->diwstrt_v))
- p -= par->next_line;
- else
- p += par->next_line;
- if (par->vmode & FB_VMODE_YWRAP) {
- if ((par->vyres-par->yoffset) != 1 || mod2(par->diwstrt_v)) {
- if (par->yoffset > par->vyres-par->yres+1) {
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (cops++)->l = CMOVE(highw(p), bplpt[i]);
- (cops++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- line = par->diwstrt_v + ((par->vyres-par->yoffset)<<par->line_shift) - 2;
- while (line >= 512) {
- (cops++)->l = CWAIT(h_end1, 510);
- line -= 512;
- }
- if (line > 510 && IS_AGA && maxfmode + par->clk_shift == 0)
- (cops++)->l = CWAIT(h_end1, line);
- else
- (cops++)->l = CWAIT(h_end2, line);
- p = par->bplpt0wrap;
- if (mod2(par->diwstrt_v+par->vyres-par->yoffset))
- p -= par->next_line;
- else
- p += par->next_line;
- }
- } else p = par->bplpt0wrap - par->next_line;
- }
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (cops++)->l = CMOVE(highw(p), bplpt[i]);
- (cops++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- cops->l = CEND;
- }
-}
static int __exit amifb_remove(struct platform_device *pdev)
{
- unregister_framebuffer(&fb_info);
- amifb_deinit(pdev);
+ struct fb_info *info = dev_get_drvdata(&pdev->dev);
+
+ unregister_framebuffer(info);
+ dev_set_drvdata(&pdev->dev, NULL);
+ fb_dealloc_cmap(&info->cmap);
+ free_irq(IRQ_AMIGA_COPPER, info->par);
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ if (videomemory)
+ iounmap((void *)videomemory);
+ chipfree();
+ framebuffer_release(info);
amifb_video_off();
return 0;
}
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 63409c122ae8..0d7b20d4285d 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -100,8 +100,11 @@ static int atmel_bl_update_status(struct backlight_device *bl)
brightness = 0;
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
+ if (contrast_ctr & ATMEL_LCDC_POL_POSITIVE)
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
brightness ? contrast_ctr : 0);
+ else
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
@@ -682,14 +685,30 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
case FB_VISUAL_PSEUDOCOLOR:
if (regno < 256) {
- val = ((red >> 11) & 0x001f);
- val |= ((green >> 6) & 0x03e0);
- val |= ((blue >> 1) & 0x7c00);
-
- /*
- * TODO: intensity bit. Maybe something like
- * ~(red[10] ^ green[10] ^ blue[10]) & 1
- */
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9263()
+ || cpu_is_at91sam9rl()) {
+ /* old style I+BGR:555 */
+ val = ((red >> 11) & 0x001f);
+ val |= ((green >> 6) & 0x03e0);
+ val |= ((blue >> 1) & 0x7c00);
+
+ /*
+ * TODO: intensity bit. Maybe something like
+ * ~(red[10] ^ green[10] ^ blue[10]) & 1
+ */
+ } else {
+ /* new style BGR:565 / RGB:565 */
+ if (sinfo->lcd_wiring_mode ==
+ ATMEL_LCDC_WIRING_RGB) {
+ val = ((blue >> 11) & 0x001f);
+ val |= ((red >> 0) & 0xf800);
+ } else {
+ val = ((red >> 11) & 0x001f);
+ val |= ((blue >> 0) & 0xf800);
+ }
+
+ val |= ((green >> 5) & 0x07e0);
+ }
lcdc_writel(sinfo, ATMEL_LCDC_LUT(regno), val);
ret = 0;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 44bdce4242ad..622f12b62a47 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -301,9 +301,9 @@ static struct fb_ops atyfb_ops = {
.fb_sync = atyfb_sync,
};
-static int noaccel;
+static bool noaccel;
#ifdef CONFIG_MTRR
-static int nomtrr;
+static bool nomtrr;
#endif
static int vram;
static int pll;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 150684882ef7..ce1506b75adf 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -263,19 +263,19 @@ static reg_val common_regs[] = {
static char *mode_option;
static char *monitor_layout;
-static int noaccel = 0;
+static bool noaccel = 0;
static int default_dynclk = -2;
-static int nomodeset = 0;
-static int ignore_edid = 0;
-static int mirror = 0;
+static bool nomodeset = 0;
+static bool ignore_edid = 0;
+static bool mirror = 0;
static int panel_yres = 0;
-static int force_dfp = 0;
-static int force_measure_pll = 0;
+static bool force_dfp = 0;
+static bool force_measure_pll = 0;
#ifdef CONFIG_MTRR
-static int nomtrr = 0;
+static bool nomtrr = 0;
#endif
-static int force_sleep;
-static int ignore_devlist;
+static bool force_sleep;
+static bool ignore_devlist;
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight = 1;
#else
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 649cb35de4ed..de9da6774fd9 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -60,18 +60,6 @@
#include "au1100fb.h"
-/*
- * Sanity check. If this is a new Au1100 based board, search for
- * the PB1100 ifdefs to make sure you modify the code accordingly.
- */
-#if defined(CONFIG_MIPS_PB1100)
- #include <asm/mach-pb1x00/pb1100.h>
-#elif defined(CONFIG_MIPS_DB1100)
- #include <asm/mach-db1x00/db1x00.h>
-#else
- #error "Unknown Au1100 board, Au1100 FB driver not supported"
-#endif
-
#define DRIVER_NAME "au1100fb"
#define DRIVER_DESC "LCD controller driver for AU1100 processors"
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 72005598040f..04e4479d5afd 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1200fb.h> /* platform_data */
#include "au1200fb.h"
#define DRIVER_NAME "au1200fb"
@@ -143,6 +144,7 @@ struct au1200_lcd_iodata_t {
/* Private, per-framebuffer management information (independent of the panel itself) */
struct au1200fb_device {
struct fb_info *fb_info; /* FB driver info record */
+ struct au1200fb_platdata *pd;
int plane;
unsigned char* fb_mem; /* FrameBuffer memory map */
@@ -201,9 +203,6 @@ struct window_settings {
#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
#endif
-extern int board_au1200fb_panel_init (void);
-extern int board_au1200fb_panel_shutdown (void);
-
/*
* Default window configurations
*/
@@ -334,8 +333,6 @@ struct panel_settings
uint32 mode_toyclksrc;
uint32 mode_backlight;
uint32 mode_auxpll;
- int (*device_init)(void);
- int (*device_shutdown)(void);
#define Xres min_xres
#define Yres min_yres
u32 min_xres; /* Minimum horizontal resolution */
@@ -385,8 +382,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
320, 320,
240, 240,
},
@@ -415,8 +410,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
640, 480,
640, 480,
},
@@ -445,8 +438,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
800, 800,
600, 600,
},
@@ -475,8 +466,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 6, /* 72MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
1024, 1024,
768, 768,
},
@@ -505,8 +494,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 10, /* 120MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
1280, 1280,
1024, 1024,
},
@@ -535,8 +522,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
1024, 1024,
768, 768,
},
@@ -568,8 +553,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
640, 480,
640, 480,
},
@@ -601,8 +584,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
320, 320,
240, 240,
},
@@ -634,11 +615,43 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
856, 856,
480, 480,
},
+ [9] = {
+ .name = "DB1300_800x480",
+ .monspecs = {
+ .modedb = NULL,
+ .modedb_len = 0,
+ .hfmin = 30000,
+ .hfmax = 70000,
+ .vfmin = 60,
+ .vfmax = 60,
+ .dclkmin = 6000000,
+ .dclkmax = 28000000,
+ .input = FB_DISP_RGB,
+ },
+ .mode_screen = LCD_SCREEN_SX_N(800) |
+ LCD_SCREEN_SY_N(480),
+ .mode_horztiming = LCD_HORZTIMING_HPW_N(5) |
+ LCD_HORZTIMING_HND1_N(16) |
+ LCD_HORZTIMING_HND2_N(8),
+ .mode_verttiming = LCD_VERTTIMING_VPW_N(4) |
+ LCD_VERTTIMING_VND1_N(8) |
+ LCD_VERTTIMING_VND2_N(5),
+ .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(1) |
+ LCD_CLKCONTROL_IV |
+ LCD_CLKCONTROL_IH,
+ .mode_pwmdiv = 0x00000000,
+ .mode_pwmhi = 0x00000000,
+ .mode_outmask = 0x00FFFFFF,
+ .mode_fifoctrl = 0x2f2f2f2f,
+ .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
+ .mode_backlight = 0x00000000,
+ .mode_auxpll = (48/12) * 2,
+ 800, 800,
+ 480, 480,
+ },
};
#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
@@ -764,7 +777,8 @@ static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
return 0;
}
-static void au1200_setpanel (struct panel_settings *newpanel)
+static void au1200_setpanel(struct panel_settings *newpanel,
+ struct au1200fb_platdata *pd)
{
/*
* Perform global setup/init of LCD controller
@@ -798,8 +812,8 @@ static void au1200_setpanel (struct panel_settings *newpanel)
the controller, the clock cannot be turned off before first
shutting down the controller.
*/
- if (panel->device_shutdown != NULL)
- panel->device_shutdown();
+ if (pd->panel_shutdown)
+ pd->panel_shutdown();
}
/* Newpanel == NULL indicates a shutdown operation only */
@@ -852,7 +866,8 @@ static void au1200_setpanel (struct panel_settings *newpanel)
au_sync();
/* Call init of panel */
- if (panel->device_init != NULL) panel->device_init();
+ if (pd->panel_init)
+ pd->panel_init();
/* FIX!!!! not appropriate on panel change!!! Global setup/init */
lcd->intenable = 0;
@@ -1185,6 +1200,8 @@ static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
*/
static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
+ struct au1200fb_device *fbdev = fbi->par;
+
/* Short-circuit screen blanking */
if (noblanking)
return 0;
@@ -1194,13 +1211,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
/* printk("turn on panel\n"); */
- au1200_setpanel(panel);
+ au1200_setpanel(panel, fbdev->pd);
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
/* printk("turn off panel\n"); */
- au1200_setpanel(NULL);
+ au1200_setpanel(NULL, fbdev->pd);
break;
default:
break;
@@ -1428,6 +1445,7 @@ static void get_window(unsigned int plane,
static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
+ struct au1200fb_device *fbdev = info->par;
int plane;
int val;
@@ -1472,7 +1490,7 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
struct panel_settings *newpanel;
panel_index = iodata.global.panel_choice;
newpanel = &known_lcd_panels[panel_index];
- au1200_setpanel(newpanel);
+ au1200_setpanel(newpanel, fbdev->pd);
}
break;
@@ -1588,22 +1606,102 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
/*-------------------------------------------------------------------------*/
-/* AU1200 LCD controller device driver */
+static int au1200fb_setup(struct au1200fb_platdata *pd)
+{
+ char *options = NULL;
+ char *this_opt, *endptr;
+ int num_panels = ARRAY_SIZE(known_lcd_panels);
+ int panel_idx = -1;
+
+ fb_get_options(DRIVER_NAME, &options);
+
+ if (!options)
+ goto out;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ /* Panel option - can be panel name,
+ * "bs" for board-switch, or number/index */
+ if (!strncmp(this_opt, "panel:", 6)) {
+ int i;
+ long int li;
+ char *endptr;
+ this_opt += 6;
+ /* First check for index, which allows
+ * to short circuit this mess */
+ li = simple_strtol(this_opt, &endptr, 0);
+ if (*endptr == '\0')
+ panel_idx = (int)li;
+ else if (strcmp(this_opt, "bs") == 0)
+ panel_idx = pd->panel_index();
+ else {
+ for (i = 0; i < num_panels; i++) {
+ if (!strcmp(this_opt,
+ known_lcd_panels[i].name)) {
+ panel_idx = i;
+ break;
+ }
+ }
+ }
+ if ((panel_idx < 0) || (panel_idx >= num_panels))
+ print_warn("Panel %s not supported!", this_opt);
+ else
+ panel_index = panel_idx;
+
+ } else if (strncmp(this_opt, "nohwcursor", 10) == 0)
+ nohwcursor = 1;
+ else if (strncmp(this_opt, "devices:", 8) == 0) {
+ this_opt += 8;
+ device_count = simple_strtol(this_opt, &endptr, 0);
+ if ((device_count < 0) ||
+ (device_count > MAX_DEVICE_COUNT))
+ device_count = MAX_DEVICE_COUNT;
+ } else if (strncmp(this_opt, "wincfg:", 7) == 0) {
+ this_opt += 7;
+ window_index = simple_strtol(this_opt, &endptr, 0);
+ if ((window_index < 0) ||
+ (window_index >= ARRAY_SIZE(windows)))
+ window_index = DEFAULT_WINDOW_INDEX;
+ } else if (strncmp(this_opt, "off", 3) == 0)
+ return 1;
+ else
+ print_warn("Unsupported option \"%s\"", this_opt);
+ }
+
+out:
+ return 0;
+}
+
+/* AU1200 LCD controller device driver */
static int __devinit au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
+ struct au1200fb_platdata *pd;
struct fb_info *fbi = NULL;
unsigned long page;
int bpp, plane, ret, irq;
+ print_info("" DRIVER_DESC "");
+
+ pd = dev->dev.platform_data;
+ if (!pd)
+ return -ENODEV;
+
+ /* Setup driver with options */
+ if (au1200fb_setup(pd))
+ return -ENODEV;
+
+ /* Point to the panel selected */
+ panel = &known_lcd_panels[panel_index];
+ win = &windows[window_index];
+
+ printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
+ printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
+
/* shut gcc up */
ret = 0;
fbdev = NULL;
- /* Kickstart the panel */
- au1200_setpanel(panel);
-
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
@@ -1619,6 +1717,7 @@ static int __devinit au1200fb_drv_probe(struct platform_device *dev)
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
fbdev->fb_info = fbi;
+ fbdev->pd = pd;
fbdev->plane = plane;
@@ -1680,6 +1779,11 @@ static int __devinit au1200fb_drv_probe(struct platform_device *dev)
goto failed;
}
+ platform_set_drvdata(dev, pd);
+
+ /* Kickstart the panel */
+ au1200_setpanel(panel, pd);
+
return 0;
failed:
@@ -1699,12 +1803,13 @@ failed:
static int __devexit au1200fb_drv_remove(struct platform_device *dev)
{
+ struct au1200fb_platdata *pd = platform_get_drvdata(dev);
struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
/* Turn off the panel */
- au1200_setpanel(NULL);
+ au1200_setpanel(NULL, pd);
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
@@ -1732,7 +1837,8 @@ static int __devexit au1200fb_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int au1200fb_drv_suspend(struct device *dev)
{
- au1200_setpanel(NULL);
+ struct au1200fb_platdata *pd = dev_get_drvdata(dev);
+ au1200_setpanel(NULL, pd);
lcd->outmask = 0;
au_sync();
@@ -1742,11 +1848,12 @@ static int au1200fb_drv_suspend(struct device *dev)
static int au1200fb_drv_resume(struct device *dev)
{
+ struct au1200fb_platdata *pd = dev_get_drvdata(dev);
struct fb_info *fbi;
int i;
/* Kickstart the panel */
- au1200_setpanel(panel);
+ au1200_setpanel(panel, pd);
for (i = 0; i < device_count; i++) {
fbi = _au1200fb_infos[i];
@@ -1781,100 +1888,8 @@ static struct platform_driver au1200fb_driver = {
/*-------------------------------------------------------------------------*/
-/* Kernel driver */
-
-static int au1200fb_setup(void)
-{
- char *options = NULL;
- char *this_opt, *endptr;
- int num_panels = ARRAY_SIZE(known_lcd_panels);
- int panel_idx = -1;
-
- fb_get_options(DRIVER_NAME, &options);
-
- if (options) {
- while ((this_opt = strsep(&options,",")) != NULL) {
- /* Panel option - can be panel name,
- * "bs" for board-switch, or number/index */
- if (!strncmp(this_opt, "panel:", 6)) {
- int i;
- long int li;
- char *endptr;
- this_opt += 6;
- /* First check for index, which allows
- * to short circuit this mess */
- li = simple_strtol(this_opt, &endptr, 0);
- if (*endptr == '\0') {
- panel_idx = (int)li;
- }
- else if (strcmp(this_opt, "bs") == 0) {
- extern int board_au1200fb_panel(void);
- panel_idx = board_au1200fb_panel();
- }
-
- else
- for (i = 0; i < num_panels; i++) {
- if (!strcmp(this_opt, known_lcd_panels[i].name)) {
- panel_idx = i;
- break;
- }
- }
-
- if ((panel_idx < 0) || (panel_idx >= num_panels)) {
- print_warn("Panel %s not supported!", this_opt);
- }
- else
- panel_index = panel_idx;
- }
-
- else if (strncmp(this_opt, "nohwcursor", 10) == 0) {
- nohwcursor = 1;
- }
-
- else if (strncmp(this_opt, "devices:", 8) == 0) {
- this_opt += 8;
- device_count = simple_strtol(this_opt,
- &endptr, 0);
- if ((device_count < 0) ||
- (device_count > MAX_DEVICE_COUNT))
- device_count = MAX_DEVICE_COUNT;
- }
-
- else if (strncmp(this_opt, "wincfg:", 7) == 0) {
- this_opt += 7;
- window_index = simple_strtol(this_opt,
- &endptr, 0);
- if ((window_index < 0) ||
- (window_index >= ARRAY_SIZE(windows)))
- window_index = DEFAULT_WINDOW_INDEX;
- }
-
- else if (strncmp(this_opt, "off", 3) == 0)
- return 1;
- /* Unsupported option */
- else {
- print_warn("Unsupported option \"%s\"", this_opt);
- }
- }
- }
- return 0;
-}
-
static int __init au1200fb_init(void)
{
- print_info("" DRIVER_DESC "");
-
- /* Setup driver with options */
- if (au1200fb_setup())
- return -ENODEV;
-
- /* Point to the panel selected */
- panel = &known_lcd_panels[panel_index];
- win = &windows[window_index];
-
- printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
- printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
-
return platform_driver_register(&au1200fb_driver);
}
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 1105fa1ed7f4..a1376dc73d71 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -270,17 +270,7 @@ static struct platform_driver pm860x_backlight_driver = {
.remove = pm860x_backlight_remove,
};
-static int __init pm860x_backlight_init(void)
-{
- return platform_driver_register(&pm860x_backlight_driver);
-}
-module_init(pm860x_backlight_init);
-
-static void __exit pm860x_backlight_exit(void)
-{
- platform_driver_unregister(&pm860x_backlight_driver);
-}
-module_exit(pm860x_backlight_exit);
+module_platform_driver(pm860x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Marvell Semiconductor 88PM8606");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 278aeaa92505..681b36929fe4 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -280,14 +280,6 @@ config BACKLIGHT_WM831X
If you have a backlight driven by the ISINK and DCDC of a
WM831x PMIC say y to enable the backlight driver for it.
-config BACKLIGHT_ADX
- tristate "Avionic Design Xanthos Backlight Driver"
- depends on ARCH_PXA_ADX
- default y
- help
- Say Y to enable the backlight driver on Avionic Design Xanthos-based
- boards.
-
config BACKLIGHT_ADP5520
tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
depends on PMIC_ADP5520
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index fdd1fc4b2770..af5cf654ec7c 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
-obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index dfb763e9147f..2e630bf1164c 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -384,17 +384,7 @@ static struct platform_driver adp5520_bl_driver = {
.resume = adp5520_bl_resume,
};
-static int __init adp5520_bl_init(void)
-{
- return platform_driver_register(&adp5520_bl_driver);
-}
-module_init(adp5520_bl_init);
-
-static void __exit adp5520_bl_exit(void)
-{
- platform_driver_unregister(&adp5520_bl_driver);
-}
-module_exit(adp5520_bl_exit);
+module_platform_driver(adp5520_bl_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
deleted file mode 100644
index c861c41af442..000000000000
--- a/drivers/video/backlight/adx_bl.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * linux/drivers/video/backlight/adx.c
- *
- * Copyright (C) 2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Written by Thierry Reding <thierry.reding@avionic-design.de>
- */
-
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-/* register definitions */
-#define ADX_BACKLIGHT_CONTROL 0x00
-#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0)
-#define ADX_BACKLIGHT_BRIGHTNESS 0x08
-#define ADX_BACKLIGHT_STATUS 0x10
-#define ADX_BACKLIGHT_ERROR 0x18
-
-struct adxbl {
- void __iomem *base;
-};
-
-static int adx_backlight_update_status(struct backlight_device *bldev)
-{
- struct adxbl *bl = bl_get_data(bldev);
- u32 value;
-
- value = bldev->props.brightness;
- writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
-
- value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
-
- if (bldev->props.state & BL_CORE_FBBLANK)
- value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
- else
- value |= ADX_BACKLIGHT_CONTROL_ENABLE;
-
- writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
-
- return 0;
-}
-
-static int adx_backlight_get_brightness(struct backlight_device *bldev)
-{
- struct adxbl *bl = bl_get_data(bldev);
- u32 brightness;
-
- brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
- return brightness & 0xff;
-}
-
-static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb)
-{
- return 1;
-}
-
-static const struct backlight_ops adx_backlight_ops = {
- .options = 0,
- .update_status = adx_backlight_update_status,
- .get_brightness = adx_backlight_get_brightness,
- .check_fb = adx_backlight_check_fb,
-};
-
-static int __devinit adx_backlight_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct backlight_device *bldev;
- struct resource *res;
- struct adxbl *bl;
- int ret = 0;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENXIO;
- goto out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), res->name);
- if (!res) {
- ret = -ENXIO;
- goto out;
- }
-
- bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
- if (!bl) {
- ret = -ENOMEM;
- goto out;
- }
-
- bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!bl->base) {
- ret = -ENXIO;
- goto out;
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 0xff;
- bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
- bl, &adx_backlight_ops, &props);
- if (IS_ERR(bldev)) {
- ret = PTR_ERR(bldev);
- goto out;
- }
-
- bldev->props.brightness = 0xff;
- bldev->props.power = FB_BLANK_UNBLANK;
-
- platform_set_drvdata(pdev, bldev);
-
-out:
- return ret;
-}
-
-static int __devexit adx_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bldev;
- int ret = 0;
-
- bldev = platform_get_drvdata(pdev);
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 0xff;
- backlight_update_status(bldev);
- backlight_device_unregister(bldev);
- platform_set_drvdata(pdev, NULL);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int adx_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return 0;
-}
-
-static int adx_backlight_resume(struct platform_device *pdev)
-{
- return 0;
-}
-#else
-#define adx_backlight_suspend NULL
-#define adx_backlight_resume NULL
-#endif
-
-static struct platform_driver adx_backlight_driver = {
- .probe = adx_backlight_probe,
- .remove = __devexit_p(adx_backlight_remove),
- .suspend = adx_backlight_suspend,
- .resume = adx_backlight_resume,
- .driver = {
- .name = "adx-backlight",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init adx_backlight_init(void)
-{
- return platform_driver_register(&adx_backlight_driver);
-}
-
-static void __exit adx_backlight_exit(void)
-{
- platform_driver_unregister(&adx_backlight_driver);
-}
-
-module_init(adx_backlight_init);
-module_exit(adx_backlight_exit);
-
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 7363c1b169e8..bf5b1ece7160 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -102,7 +102,7 @@ static void backlight_generate_event(struct backlight_device *bd,
}
static ssize_t backlight_show_power(struct device *dev,
- struct device_attribute *attr,char *buf)
+ struct device_attribute *attr, char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
@@ -116,7 +116,7 @@ static ssize_t backlight_store_power(struct device *dev,
struct backlight_device *bd = to_backlight_device(dev);
unsigned long power;
- rc = strict_strtoul(buf, 0, &power);
+ rc = kstrtoul(buf, 0, &power);
if (rc)
return rc;
@@ -150,7 +150,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
struct backlight_device *bd = to_backlight_device(dev);
unsigned long brightness;
- rc = strict_strtoul(buf, 0, &brightness);
+ rc = kstrtoul(buf, 0, &brightness);
if (rc)
return rc;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index d68f14bbb687..abb4a06268f1 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -199,17 +199,7 @@ static struct platform_driver da903x_backlight_driver = {
.remove = da903x_backlight_remove,
};
-static int __init da903x_backlight_init(void)
-{
- return platform_driver_register(&da903x_backlight_driver);
-}
-module_init(da903x_backlight_init);
-
-static void __exit da903x_backlight_exit(void)
-{
- platform_driver_unregister(&da903x_backlight_driver);
-}
-module_exit(da903x_backlight_exit);
+module_platform_driver(da903x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index c74a6f4baa12..b62b8b9063b5 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/backlight.h>
@@ -144,17 +143,7 @@ static struct platform_driver ep93xxbl_driver = {
.resume = ep93xxbl_resume,
};
-static int __init ep93xxbl_init(void)
-{
- return platform_driver_register(&ep93xxbl_driver);
-}
-module_init(ep93xxbl_init);
-
-static void __exit ep93xxbl_exit(void)
-{
- platform_driver_unregister(&ep93xxbl_driver);
-}
-module_exit(ep93xxbl_exit);
+module_platform_driver(ep93xxbl_driver);
MODULE_DESCRIPTION("EP93xx Backlight Driver");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index adb191466d64..9ce6170c1860 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -132,18 +132,7 @@ static struct platform_driver genericbl_driver = {
},
};
-static int __init genericbl_init(void)
-{
- return platform_driver_register(&genericbl_driver);
-}
-
-static void __exit genericbl_exit(void)
-{
- platform_driver_unregister(&genericbl_driver);
-}
-
-module_init(genericbl_init);
-module_exit(genericbl_exit);
+module_platform_driver(genericbl_driver);
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
MODULE_DESCRIPTION("Generic Backlight Driver");
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index de65d80159be..2f8af5d786ab 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -147,19 +147,8 @@ static struct platform_driver jornada_bl_driver = {
},
};
-static int __init jornada_bl_init(void)
-{
- return platform_driver_register(&jornada_bl_driver);
-}
-
-static void __exit jornada_bl_exit(void)
-{
- platform_driver_unregister(&jornada_bl_driver);
-}
+module_platform_driver(jornada_bl_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 Backlight driver");
MODULE_LICENSE("GPL");
-
-module_init(jornada_bl_init);
-module_exit(jornada_bl_exit);
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index d2ff658b4144..22d231a17e3c 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -135,19 +135,8 @@ static struct platform_driver jornada_lcd_driver = {
},
};
-static int __init jornada_lcd_init(void)
-{
- return platform_driver_register(&jornada_lcd_driver);
-}
-
-static void __exit jornada_lcd_exit(void)
-{
- platform_driver_unregister(&jornada_lcd_driver);
-}
+module_platform_driver(jornada_lcd_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver");
MODULE_LICENSE("GPL");
-
-module_init(jornada_lcd_init);
-module_exit(jornada_lcd_exit);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 71a11cadffc4..79c1b0d609a8 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -97,19 +97,16 @@ static ssize_t lcd_store_power(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
- char *endp;
struct lcd_device *ld = to_lcd_device(dev);
- int power = simple_strtoul(buf, &endp, 0);
- size_t size = endp - buf;
+ unsigned long power;
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &power);
+ if (rc)
+ return rc;
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %d\n", power);
+ pr_debug("lcd: set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -136,19 +133,16 @@ static ssize_t lcd_store_contrast(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
- char *endp;
struct lcd_device *ld = to_lcd_device(dev);
- int contrast = simple_strtoul(buf, &endp, 0);
- size_t size = endp - buf;
+ unsigned long contrast;
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &contrast);
+ if (rc)
+ return rc;
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %d\n", contrast);
+ pr_debug("lcd: set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index da9a5ce0ccb8..78dafc0c8fc5 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -31,6 +31,7 @@
#include <linux/lcd.h>
#include <linux/backlight.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include "ld9040_gamma.h"
@@ -53,8 +54,51 @@ struct ld9040 {
struct lcd_device *ld;
struct backlight_device *bd;
struct lcd_platform_data *lcd_pd;
+
+ struct mutex lock;
+ bool enabled;
+};
+
+static struct regulator_bulk_data supplies[] = {
+ { .supply = "vdd3", },
+ { .supply = "vci", },
};
+static void ld9040_regulator_enable(struct ld9040 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+
+ pd = lcd->lcd_pd;
+ mutex_lock(&lcd->lock);
+ if (!lcd->enabled) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ goto out;
+
+ lcd->enabled = true;
+ }
+ mdelay(pd->power_on_delay);
+out:
+ mutex_unlock(&lcd->lock);
+}
+
+static void ld9040_regulator_disable(struct ld9040 *lcd)
+{
+ int ret = 0;
+
+ mutex_lock(&lcd->lock);
+ if (lcd->enabled) {
+ ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ goto out;
+
+ lcd->enabled = false;
+ }
+out:
+ mutex_unlock(&lcd->lock);
+}
+
static const unsigned short seq_swreset[] = {
0x01, COMMAND_ONLY,
ENDDEF, 0x00
@@ -532,13 +576,8 @@ static int ld9040_power_on(struct ld9040 *lcd)
return -EFAULT;
}
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else {
- pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
- }
+ /* lcd power on */
+ ld9040_regulator_enable(lcd);
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
@@ -582,11 +621,8 @@ static int ld9040_power_off(struct ld9040 *lcd)
mdelay(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
- pd->power_on(lcd->ld, 0);
+ /* lcd power off */
+ ld9040_regulator_disable(lcd);
return 0;
}
@@ -693,6 +729,14 @@ static int ld9040_probe(struct spi_device *spi)
goto out_free_lcd;
}
+ mutex_init(&lcd->lock);
+
+ ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
+ goto out_free_lcd;
+ }
+
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
@@ -739,6 +783,8 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
out_free_lcd:
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+
kfree(lcd);
return ret;
}
@@ -750,6 +796,7 @@ static int __devexit ld9040_remove(struct spi_device *spi)
ld9040_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
kfree(lcd);
return 0;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 7bbc802560ea..c915e3b53886 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -188,17 +188,7 @@ static struct platform_driver max8925_backlight_driver = {
.remove = __devexit_p(max8925_backlight_remove),
};
-static int __init max8925_backlight_init(void)
-{
- return platform_driver_register(&max8925_backlight_driver);
-}
-module_init(max8925_backlight_init);
-
-static void __exit max8925_backlight_exit(void)
-{
- platform_driver_unregister(&max8925_backlight_driver);
-};
-module_exit(max8925_backlight_exit);
+module_platform_driver(max8925_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Maxim MAX8925");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 08d26a72394c..d8cde277ec83 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -195,18 +195,7 @@ static struct platform_driver omapbl_driver = {
},
};
-static int __init omapbl_init(void)
-{
- return platform_driver_register(&omapbl_driver);
-}
-
-static void __exit omapbl_exit(void)
-{
- platform_driver_unregister(&omapbl_driver);
-}
-
-module_init(omapbl_init);
-module_exit(omapbl_exit);
+module_platform_driver(omapbl_driver);
MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>");
MODULE_DESCRIPTION("OMAP LCD Backlight driver");
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index ef5628d60563..13e88b71daec 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -173,17 +173,7 @@ static struct platform_driver pcf50633_bl_driver = {
},
};
-static int __init pcf50633_bl_init(void)
-{
- return platform_driver_register(&pcf50633_bl_driver);
-}
-module_init(pcf50633_bl_init);
-
-static void __exit pcf50633_bl_exit(void)
-{
- platform_driver_unregister(&pcf50633_bl_driver);
-}
-module_exit(pcf50633_bl_exit);
+module_platform_driver(pcf50633_bl_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PCF50633 backlight driver");
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 302330acf628..f0bf491ed087 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -85,7 +85,8 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
return -EINVAL;
}
- plcd = kzalloc(sizeof(struct platform_lcd), GFP_KERNEL);
+ plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd),
+ GFP_KERNEL);
if (!plcd) {
dev_err(dev, "no memory for state\n");
return -ENOMEM;
@@ -98,7 +99,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
if (IS_ERR(plcd->lcd)) {
dev_err(dev, "cannot register lcd device\n");
err = PTR_ERR(plcd->lcd);
- goto err_mem;
+ goto err;
}
platform_set_drvdata(pdev, plcd);
@@ -106,8 +107,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
return 0;
- err_mem:
- kfree(plcd);
+ err:
return err;
}
@@ -116,7 +116,6 @@ static int __devexit platform_lcd_remove(struct platform_device *pdev)
struct platform_lcd *plcd = platform_get_drvdata(pdev);
lcd_device_unregister(plcd->lcd);
- kfree(plcd);
return 0;
}
@@ -157,18 +156,7 @@ static struct platform_driver platform_lcd_driver = {
.resume = platform_lcd_resume,
};
-static int __init platform_lcd_init(void)
-{
- return platform_driver_register(&platform_lcd_driver);
-}
-
-static void __exit platform_lcd_cleanup(void)
-{
- platform_driver_unregister(&platform_lcd_driver);
-}
-
-module_init(platform_lcd_init);
-module_exit(platform_lcd_cleanup);
+module_platform_driver(platform_lcd_driver);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 8b5b2a4124c7..7496d04e1d3c 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -169,10 +169,9 @@ static int pwm_backlight_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int pwm_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int pwm_backlight_suspend(struct device *dev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
if (pb->notify)
@@ -184,40 +183,32 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
return 0;
}
-static int pwm_backlight_resume(struct platform_device *pdev)
+static int pwm_backlight_resume(struct device *dev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
return 0;
}
-#else
-#define pwm_backlight_suspend NULL
-#define pwm_backlight_resume NULL
+
+static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
+ pwm_backlight_resume);
+
#endif
static struct platform_driver pwm_backlight_driver = {
.driver = {
.name = "pwm-backlight",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pwm_backlight_pm_ops,
+#endif
},
.probe = pwm_backlight_probe,
.remove = pwm_backlight_remove,
- .suspend = pwm_backlight_suspend,
- .resume = pwm_backlight_resume,
};
-static int __init pwm_backlight_init(void)
-{
- return platform_driver_register(&pwm_backlight_driver);
-}
-module_init(pwm_backlight_init);
-
-static void __exit pwm_backlight_exit(void)
-{
- platform_driver_unregister(&pwm_backlight_driver);
-}
-module_exit(pwm_backlight_exit);
+module_platform_driver(pwm_backlight_driver);
MODULE_DESCRIPTION("PWM based Backlight Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index fbe9e9316f3b..4e915f5eca99 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -236,17 +236,7 @@ static struct platform_driver wm831x_backlight_driver = {
.remove = wm831x_backlight_remove,
};
-static int __init wm831x_backlight_init(void)
-{
- return platform_driver_register(&wm831x_backlight_driver);
-}
-module_init(wm831x_backlight_init);
-
-static void __exit wm831x_backlight_exit(void)
-{
- platform_driver_unregister(&wm831x_backlight_driver);
-}
-module_exit(wm831x_backlight_exit);
+module_platform_driver(wm831x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 56720fb476b3..46b03f53985f 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -4,7 +4,7 @@
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
- * Description: ADSP-BF54x Framebufer driver
+ * Description: ADSP-BF54x Framebuffer driver
*
*
* Modified:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index d5e126759612..7a0c05f3537e 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -4,7 +4,7 @@
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
- * Description: Blackfin LCD Framebufer driver
+ * Description: Blackfin LCD Framebuffer driver
*
*
* Modified:
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 6df7c54db0a3..738c8ce7d132 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -280,52 +280,74 @@ MODULE_DEVICE_TABLE(pci, cirrusfb_pci_table);
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
-static const struct zorro_device_id cirrusfb_zorro_table[] = {
+struct zorrocl {
+ enum cirrus_board type; /* Board type */
+ u32 regoffset; /* Offset of registers in first Zorro device */
+ u32 ramsize; /* Size of video RAM in first Zorro device */
+ /* If zero, use autoprobe on RAM device */
+ u32 ramoffset; /* Offset of video RAM in first Zorro device */
+ zorro_id ramid; /* Zorro ID of RAM device */
+ zorro_id ramid2; /* Zorro ID of optional second RAM device */
+};
+
+static const struct zorrocl zcl_sd64 __devinitconst = {
+ .type = BT_SD64,
+ .ramid = ZORRO_PROD_HELFRICH_SD64_RAM,
+};
+
+static const struct zorrocl zcl_piccolo __devinitconst = {
+ .type = BT_PICCOLO,
+ .ramid = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
+};
+
+static const struct zorrocl zcl_picasso __devinitconst = {
+ .type = BT_PICASSO,
+ .ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
+};
+
+static const struct zorrocl zcl_spectrum __devinitconst = {
+ .type = BT_SPECTRUM,
+ .ramid = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
+};
+
+static const struct zorrocl zcl_picasso4_z3 __devinitconst = {
+ .type = BT_PICASSO4,
+ .regoffset = 0x00600000,
+ .ramsize = 4 * MB_,
+ .ramoffset = 0x01000000, /* 0x02000000 for 64 MiB boards */
+};
+
+static const struct zorrocl zcl_picasso4_z2 __devinitconst = {
+ .type = BT_PICASSO4,
+ .regoffset = 0x10000,
+ .ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1,
+ .ramid2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2,
+};
+
+
+static const struct zorro_device_id cirrusfb_zorro_table[] __devinitconst = {
{
- .id = ZORRO_PROD_HELFRICH_SD64_RAM,
- .driver_data = BT_SD64,
+ .id = ZORRO_PROD_HELFRICH_SD64_REG,
+ .driver_data = (unsigned long)&zcl_sd64,
}, {
- .id = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
- .driver_data = BT_PICCOLO,
+ .id = ZORRO_PROD_HELFRICH_PICCOLO_REG,
+ .driver_data = (unsigned long)&zcl_piccolo,
}, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
- .driver_data = BT_PICASSO,
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
+ .driver_data = (unsigned long)&zcl_picasso,
}, {
- .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
- .driver_data = BT_SPECTRUM,
+ .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
+ .driver_data = (unsigned long)&zcl_spectrum,
}, {
.id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
- .driver_data = BT_PICASSO4,
+ .driver_data = (unsigned long)&zcl_picasso4_z3,
+ }, {
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG,
+ .driver_data = (unsigned long)&zcl_picasso4_z2,
},
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
-
-static const struct {
- zorro_id id2;
- unsigned long size;
-} cirrusfb_zorro_table2[] = {
- [BT_SD64] = {
- .id2 = ZORRO_PROD_HELFRICH_SD64_REG,
- .size = 0x400000
- },
- [BT_PICCOLO] = {
- .id2 = ZORRO_PROD_HELFRICH_PICCOLO_REG,
- .size = 0x200000
- },
- [BT_PICASSO] = {
- .id2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
- .size = 0x200000
- },
- [BT_SPECTRUM] = {
- .id2 = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
- .size = 0x200000
- },
- [BT_PICASSO4] = {
- .id2 = 0,
- .size = 0x400000
- }
-};
#endif /* CONFIG_ZORRO */
#ifdef CIRRUSFB_DEBUG
@@ -350,7 +372,7 @@ struct cirrusfb_info {
void (*unmap)(struct fb_info *info);
};
-static int noaccel __devinitdata;
+static bool noaccel __devinitdata;
static char *mode_option __devinitdata = "640x480@60";
/****************************************************************************/
@@ -1956,16 +1978,12 @@ static void cirrusfb_zorro_unmap(struct fb_info *info)
struct cirrusfb_info *cinfo = info->par;
struct zorro_dev *zdev = to_zorro_dev(info->device);
- zorro_release_device(zdev);
-
- if (cinfo->btype == BT_PICASSO4) {
- cinfo->regbase -= 0x600000;
- iounmap((void *)cinfo->regbase);
+ if (info->fix.smem_start > 16 * MB_)
iounmap(info->screen_base);
- } else {
- if (zorro_resource_start(zdev) > 0x01000000)
- iounmap(info->screen_base);
- }
+ if (info->fix.mmio_start > 16 * MB_)
+ iounmap(cinfo->regbase);
+
+ zorro_release_device(zdev);
}
#endif /* CONFIG_ZORRO */
@@ -2222,115 +2240,116 @@ static struct pci_driver cirrusfb_pci_driver = {
static int __devinit cirrusfb_zorro_register(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
- struct cirrusfb_info *cinfo;
struct fb_info *info;
+ int error;
+ const struct zorrocl *zcl;
enum cirrus_board btype;
- struct zorro_dev *z2 = NULL;
- unsigned long board_addr, board_size, size;
- int ret;
-
- btype = ent->driver_data;
- if (cirrusfb_zorro_table2[btype].id2)
- z2 = zorro_find_device(cirrusfb_zorro_table2[btype].id2, NULL);
- size = cirrusfb_zorro_table2[btype].size;
+ unsigned long regbase, ramsize, rambase;
+ struct cirrusfb_info *cinfo;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
printk(KERN_ERR "cirrusfb: could not allocate memory\n");
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
+ }
+
+ zcl = (const struct zorrocl *)ent->driver_data;
+ btype = zcl->type;
+ regbase = zorro_resource_start(z) + zcl->regoffset;
+ ramsize = zcl->ramsize;
+ if (ramsize) {
+ rambase = zorro_resource_start(z) + zcl->ramoffset;
+ if (zorro_resource_len(z) == 64 * MB_) {
+ /* Quirk for 64 MiB Picasso IV */
+ rambase += zcl->ramoffset;
+ }
+ } else {
+ struct zorro_dev *ram = zorro_find_device(zcl->ramid, NULL);
+ if (!ram || !zorro_resource_len(ram)) {
+ dev_err(info->device, "No video RAM found\n");
+ error = -ENODEV;
+ goto err_release_fb;
+ }
+ rambase = zorro_resource_start(ram);
+ ramsize = zorro_resource_len(ram);
+ if (zcl->ramid2 &&
+ (ram = zorro_find_device(zcl->ramid2, NULL))) {
+ if (zorro_resource_start(ram) != rambase + ramsize) {
+ dev_warn(info->device,
+ "Skipping non-contiguous RAM at %pR\n",
+ &ram->resource);
+ } else {
+ ramsize += zorro_resource_len(ram);
+ }
+ }
}
- dev_info(info->device, "%s board detected\n",
- cirrusfb_board_info[btype].name);
-
- cinfo = info->par;
- cinfo->btype = btype;
-
- assert(z);
- assert(btype != BT_NONE);
-
- board_addr = zorro_resource_start(z);
- board_size = zorro_resource_len(z);
- info->screen_size = size;
+ dev_info(info->device,
+ "%s board detected, REG at 0x%lx, %lu MiB RAM at 0x%lx\n",
+ cirrusfb_board_info[btype].name, regbase, ramsize / MB_,
+ rambase);
if (!zorro_request_device(z, "cirrusfb")) {
- dev_err(info->device, "cannot reserve region 0x%lx, abort\n",
- board_addr);
- ret = -EBUSY;
+ dev_err(info->device, "Cannot reserve %pR\n", &z->resource);
+ error = -EBUSY;
goto err_release_fb;
}
- ret = -EIO;
-
- if (btype == BT_PICASSO4) {
- dev_info(info->device, " REG at $%lx\n", board_addr + 0x600000);
-
- /* To be precise, for the P4 this is not the */
- /* begin of the board, but the begin of RAM. */
- /* for P4, map in its address space in 2 chunks (### TEST! ) */
- /* (note the ugly hardcoded 16M number) */
- cinfo->regbase = ioremap(board_addr, 16777216);
- if (!cinfo->regbase)
- goto err_release_region;
-
- dev_dbg(info->device, "Virtual address for board set to: $%p\n",
- cinfo->regbase);
- cinfo->regbase += 0x600000;
- info->fix.mmio_start = board_addr + 0x600000;
-
- info->fix.smem_start = board_addr + 16777216;
- info->screen_base = ioremap(info->fix.smem_start, 16777216);
- if (!info->screen_base)
- goto err_unmap_regbase;
- } else {
- dev_info(info->device, " REG at $%lx\n",
- (unsigned long) z2->resource.start);
-
- info->fix.smem_start = board_addr;
- if (board_addr > 0x01000000)
- info->screen_base = ioremap(board_addr, board_size);
- else
- info->screen_base = (caddr_t) ZTWO_VADDR(board_addr);
- if (!info->screen_base)
- goto err_release_region;
+ cinfo = info->par;
+ cinfo->btype = btype;
- /* set address for REG area of board */
- cinfo->regbase = (caddr_t) ZTWO_VADDR(z2->resource.start);
- info->fix.mmio_start = z2->resource.start;
+ info->fix.mmio_start = regbase;
+ cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
+ : (caddr_t)ZTWO_VADDR(regbase);
+ if (!cinfo->regbase) {
+ dev_err(info->device, "Cannot map registers\n");
+ error = -EIO;
+ goto err_release_dev;
+ }
- dev_dbg(info->device, "Virtual address for board set to: $%p\n",
- cinfo->regbase);
+ info->fix.smem_start = rambase;
+ info->screen_size = ramsize;
+ info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
+ : (caddr_t)ZTWO_VADDR(rambase);
+ if (!info->screen_base) {
+ dev_err(info->device, "Cannot map video RAM\n");
+ error = -EIO;
+ goto err_unmap_reg;
}
+
cinfo->unmap = cirrusfb_zorro_unmap;
dev_info(info->device,
- "Cirrus Logic chipset on Zorro bus, RAM (%lu MB) at $%lx\n",
- board_size / MB_, board_addr);
-
- zorro_set_drvdata(z, info);
+ "Cirrus Logic chipset on Zorro bus, RAM (%lu MiB) at 0x%lx\n",
+ ramsize / MB_, rambase);
/* MCLK select etc. */
if (cirrusfb_board_info[btype].init_sr1f)
vga_wseq(cinfo->regbase, CL_SEQR1F,
cirrusfb_board_info[btype].sr1f);
- ret = cirrusfb_register(info);
- if (!ret)
- return 0;
+ error = cirrusfb_register(info);
+ if (error) {
+ dev_err(info->device, "Failed to register device, error %d\n",
+ error);
+ goto err_unmap_ram;
+ }
- if (btype == BT_PICASSO4 || board_addr > 0x01000000)
+ zorro_set_drvdata(z, info);
+ return 0;
+
+err_unmap_ram:
+ if (rambase > 16 * MB_)
iounmap(info->screen_base);
-err_unmap_regbase:
- if (btype == BT_PICASSO4)
- iounmap(cinfo->regbase - 0x600000);
-err_release_region:
- release_region(board_addr, board_size);
+err_unmap_reg:
+ if (regbase > 16 * MB_)
+ iounmap(cinfo->regbase);
+err_release_dev:
+ zorro_release_device(z);
err_release_fb:
framebuffer_release(info);
-err_out:
- return ret;
+ return error;
}
void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
@@ -2338,6 +2357,7 @@ void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
struct fb_info *info = zorro_get_drvdata(z);
cirrusfb_cleanup(info);
+ zorro_set_drvdata(z, NULL);
}
static struct zorro_driver cirrusfb_zorro_driver = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 2209e354f531..c2d11fef114b 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -66,8 +66,6 @@ config SGI_NEWPORT_CONSOLE
Say Y here if you want the console on the Newport aka XL graphics
card of your Indy. Most people say Y here.
-# bool 'IODC console' CONFIG_IODC_CONSOLE
-
config DUMMY_CONSOLE
bool
depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 93317b5b8740..a122d9287d16 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -25,14 +25,13 @@
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/gio_device.h>
+
#include <video/newport.h>
#include <linux/linux_logo.h>
#include <linux/font.h>
-
-extern unsigned long sgi_gfxaddr;
-
#define FONT_DATA ((unsigned char *)font_vga_8x16.data)
/* borrowed from fbcon.c */
@@ -304,12 +303,6 @@ static const char *newport_startup(void)
{
int i;
- if (!sgi_gfxaddr)
- return NULL;
-
- if (!npregs)
- npregs = (struct newport_regs *)/* ioremap cannot fail */
- ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
npregs->cset.config = NPORT_CFG_GD0;
if (newport_wait(npregs))
@@ -743,26 +736,58 @@ const struct consw newport_con = {
.con_save_screen = DUMMY
};
-#ifdef MODULE
-static int __init newport_console_init(void)
+static int newport_probe(struct gio_device *dev,
+ const struct gio_device_id *id)
{
- if (!sgi_gfxaddr)
- return 0;
+ unsigned long newport_addr;
- if (!npregs)
- npregs = (struct newport_regs *)/* ioremap cannot fail */
- ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
+ if (!dev->resource.start)
+ return -EINVAL;
+
+ if (npregs)
+ return -EBUSY; /* we only support one Newport as console */
+
+ newport_addr = dev->resource.start + 0xF0000;
+ if (!request_mem_region(newport_addr, 0x10000, "Newport"))
+ return -ENODEV;
+
+ npregs = (struct newport_regs *)/* ioremap cannot fail */
+ ioremap(newport_addr, sizeof(struct newport_regs));
return take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
}
-module_init(newport_console_init);
-static void __exit newport_console_exit(void)
+static void newport_remove(struct gio_device *dev)
{
give_up_console(&newport_con);
iounmap((void *)npregs);
}
+
+static struct gio_device_id newport_ids[] = {
+ { .id = 0x7e },
+ { .id = 0xff }
+};
+
+MODULE_ALIAS("gio:7e");
+
+static struct gio_driver newport_driver = {
+ .name = "newport",
+ .id_table = newport_ids,
+ .probe = newport_probe,
+ .remove = newport_remove,
+};
+
+int __init newport_console_init(void)
+{
+ return gio_register_driver(&newport_driver);
+}
+
+void __exit newport_console_exit(void)
+{
+ gio_unregister_driver(&newport_driver);
+}
+
+module_init(newport_console_init);
module_exit(newport_console_exit);
-#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 7b2c40abae15..0c189b32a4c5 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -420,7 +420,7 @@ static int __init init_control(struct fb_info_control *p)
/* Try to pick a video mode out of NVRAM if we have one. */
#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM){
+ if (default_cmode == CMODE_NVRAM) {
cmode = nvram_read_byte(NV_CMODE);
if(cmode < CMODE_8 || cmode > CMODE_32)
cmode = CMODE_8;
diff --git a/drivers/video/display/Kconfig b/drivers/video/display/Kconfig
deleted file mode 100644
index f99af931d4f8..000000000000
--- a/drivers/video/display/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Display drivers configuration
-#
-
-menu "Display device support"
-
-config DISPLAY_SUPPORT
- tristate "Display panel/monitor support"
- ---help---
- This framework adds support for low-level control of a display.
- This includes support for power.
-
- Enable this to be able to choose the drivers for controlling the
- physical display panel/monitor on some platforms. This not only
- covers LCD displays for PDAs but also other types of displays
- such as CRT, TVout etc.
-
- To have support for your specific display panel you will have to
- select the proper drivers which depend on this option.
-
-comment "Display hardware drivers"
- depends on DISPLAY_SUPPORT
-
-endmenu
diff --git a/drivers/video/display/Makefile b/drivers/video/display/Makefile
deleted file mode 100644
index c0ea832bf171..000000000000
--- a/drivers/video/display/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Display drivers
-
-display-objs := display-sysfs.o
-
-obj-$(CONFIG_DISPLAY_SUPPORT) += display.o
-
diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c
deleted file mode 100644
index 0c647d7af0ee..000000000000
--- a/drivers/video/display/display-sysfs.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * display-sysfs.c - Display output driver sysfs interface
- *
- * Copyright (C) 2007 James Simmons <jsimmons@infradead.org>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/display.h>
-#include <linux/ctype.h>
-#include <linux/idr.h>
-#include <linux/err.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-
-static ssize_t display_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", dsp->name);
-}
-
-static ssize_t display_show_type(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", dsp->type);
-}
-
-static ssize_t display_show_contrast(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t rc = -ENXIO;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver) && dsp->driver->get_contrast)
- rc = sprintf(buf, "%d\n", dsp->driver->get_contrast(dsp));
- mutex_unlock(&dsp->lock);
- return rc;
-}
-
-static ssize_t display_store_contrast(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t ret = -EINVAL, size;
- int contrast;
- char *endp;
-
- contrast = simple_strtoul(buf, &endp, 0);
- size = endp - buf;
-
- if (isspace(*endp))
- size++;
-
- if (size != count)
- return ret;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver && dsp->driver->set_contrast)) {
- pr_debug("display: set contrast to %d\n", contrast);
- dsp->driver->set_contrast(dsp, contrast);
- ret = count;
- }
- mutex_unlock(&dsp->lock);
- return ret;
-}
-
-static ssize_t display_show_max_contrast(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t rc = -ENXIO;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver))
- rc = sprintf(buf, "%d\n", dsp->driver->max_contrast);
- mutex_unlock(&dsp->lock);
- return rc;
-}
-
-static struct device_attribute display_attrs[] = {
- __ATTR(name, S_IRUGO, display_show_name, NULL),
- __ATTR(type, S_IRUGO, display_show_type, NULL),
- __ATTR(contrast, S_IRUGO | S_IWUSR, display_show_contrast, display_store_contrast),
- __ATTR(max_contrast, S_IRUGO, display_show_max_contrast, NULL),
-};
-
-static int display_suspend(struct device *dev, pm_message_t state)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver->suspend))
- dsp->driver->suspend(dsp, state);
- mutex_unlock(&dsp->lock);
- return 0;
-};
-
-static int display_resume(struct device *dev)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver->resume))
- dsp->driver->resume(dsp);
- mutex_unlock(&dsp->lock);
- return 0;
-};
-
-static struct mutex allocated_dsp_lock;
-static DEFINE_IDR(allocated_dsp);
-static struct class *display_class;
-
-struct display_device *display_device_register(struct display_driver *driver,
- struct device *parent, void *devdata)
-{
- struct display_device *new_dev = NULL;
- int ret = -EINVAL;
-
- if (unlikely(!driver))
- return ERR_PTR(ret);
-
- mutex_lock(&allocated_dsp_lock);
- ret = idr_pre_get(&allocated_dsp, GFP_KERNEL);
- mutex_unlock(&allocated_dsp_lock);
- if (!ret)
- return ERR_PTR(ret);
-
- new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL);
- if (likely(new_dev) && unlikely(driver->probe(new_dev, devdata))) {
- // Reserve the index for this display
- mutex_lock(&allocated_dsp_lock);
- ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx);
- mutex_unlock(&allocated_dsp_lock);
-
- if (!ret) {
- new_dev->dev = device_create(display_class, parent,
- MKDEV(0, 0), new_dev,
- "display%d", new_dev->idx);
- if (!IS_ERR(new_dev->dev)) {
- new_dev->parent = parent;
- new_dev->driver = driver;
- mutex_init(&new_dev->lock);
- return new_dev;
- }
- mutex_lock(&allocated_dsp_lock);
- idr_remove(&allocated_dsp, new_dev->idx);
- mutex_unlock(&allocated_dsp_lock);
- ret = -EINVAL;
- }
- }
- kfree(new_dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(display_device_register);
-
-void display_device_unregister(struct display_device *ddev)
-{
- if (!ddev)
- return;
- // Free device
- mutex_lock(&ddev->lock);
- device_unregister(ddev->dev);
- mutex_unlock(&ddev->lock);
- // Mark device index as available
- mutex_lock(&allocated_dsp_lock);
- idr_remove(&allocated_dsp, ddev->idx);
- mutex_unlock(&allocated_dsp_lock);
- kfree(ddev);
-}
-EXPORT_SYMBOL(display_device_unregister);
-
-static int __init display_class_init(void)
-{
- display_class = class_create(THIS_MODULE, "display");
- if (IS_ERR(display_class)) {
- printk(KERN_ERR "Failed to create display class\n");
- display_class = NULL;
- return -EINVAL;
- }
- display_class->dev_attrs = display_attrs;
- display_class->suspend = display_suspend;
- display_class->resume = display_resume;
- mutex_init(&allocated_dsp_lock);
- return 0;
-}
-
-static void __exit display_class_exit(void)
-{
- class_destroy(display_class);
-}
-
-module_init(display_class_init);
-module_exit(display_class_exit);
-
-MODULE_DESCRIPTION("Display Hardware handling");
-MODULE_AUTHOR("James Simmons <jsimmons@infradead.org>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index ad936295d8f4..ac9141b85356 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -967,6 +967,20 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
u32 activate = var->activate;
+ /* When using FOURCC mode, make sure the red, green, blue and
+ * transp fields are set to 0.
+ */
+ if ((info->fix.capabilities & FB_CAP_FOURCC) &&
+ var->grayscale > 1) {
+ if (var->red.offset || var->green.offset ||
+ var->blue.offset || var->transp.offset ||
+ var->red.length || var->green.length ||
+ var->blue.length || var->transp.length ||
+ var->red.msb_right || var->green.msb_right ||
+ var->blue.msb_right || var->transp.msb_right)
+ return -EINVAL;
+ }
+
if (!info->fbops->fb_check_var) {
*var = info->var;
goto done;
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index a16beeb5f548..acf292bfba02 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -36,8 +36,7 @@
#include <linux/fsl-diu-fb.h>
#include "edid.h"
-#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
- /* 1 for plane 0, 2 for plane 1&2 each */
+#define NUM_AOIS 5 /* 1 for plane 0, 2 for planes 1 & 2 each */
/* HW cursor parameters */
#define MAX_CURS 32
@@ -49,12 +48,6 @@
#define INT_PARERR 0x08 /* Display parameters error interrupt */
#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
-struct diu_addr {
- void *vaddr; /* Virtual address */
- dma_addr_t paddr; /* Physical address */
- __u32 offset;
-};
-
/*
* List of supported video modes
*
@@ -330,23 +323,6 @@ static unsigned int d_cache_line_size;
static DEFINE_SPINLOCK(diu_lock);
-struct fsl_diu_data {
- struct fb_info *fsl_diu_info[FSL_AOI_NUM - 1];
- /*FSL_AOI_NUM has one dummy AOI */
- struct device_attribute dev_attr;
- struct diu_ad *dummy_ad;
- void *dummy_aoi_virt;
- unsigned int irq;
- int fb_enabled;
- enum fsl_diu_monitor_port monitor_port;
- struct diu __iomem *diu_reg;
- spinlock_t reg_lock;
- struct diu_addr ad;
- struct diu_addr gamma;
- struct diu_addr pallete;
- struct diu_addr cursor;
-};
-
enum mfb_index {
PLANE0 = 0, /* Plane 0, only one AOI that fills the screen */
PLANE1_AOI0, /* Plane 1, first AOI */
@@ -370,6 +346,42 @@ struct mfb_info {
u8 *edid_data;
};
+/**
+ * struct fsl_diu_data - per-DIU data structure
+ * @dma_addr: DMA address of this structure
+ * @fsl_diu_info: fb_info objects, one per AOI
+ * @dev_attr: sysfs structure
+ * @irq: IRQ
+ * @monitor_port: the monitor port this DIU is connected to
+ * @diu_reg: pointer to the DIU hardware registers
+ * @reg_lock: spinlock for register access
+ * @dummy_aoi: video buffer for the 4x4 32-bit dummy AOI
+ * dummy_ad: DIU Area Descriptor for the dummy AOI
+ * @ad[]: Area Descriptors for each real AOI
+ * @gamma: gamma color table
+ * @cursor: hardware cursor data
+ *
+ * This data structure must be allocated with 32-byte alignment, so that the
+ * internal fields can be aligned properly.
+ */
+struct fsl_diu_data {
+ dma_addr_t dma_addr;
+ struct fb_info fsl_diu_info[NUM_AOIS];
+ struct mfb_info mfb[NUM_AOIS];
+ struct device_attribute dev_attr;
+ unsigned int irq;
+ enum fsl_diu_monitor_port monitor_port;
+ struct diu __iomem *diu_reg;
+ spinlock_t reg_lock;
+ u8 dummy_aoi[4 * 4 * 4];
+ struct diu_ad dummy_ad __aligned(8);
+ struct diu_ad ad[NUM_AOIS] __aligned(8);
+ u8 gamma[256 * 3] __aligned(32);
+ u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32);
+} __aligned(32);
+
+/* Determine the DMA address of a member of the fsl_diu_data structure */
+#define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f))
static struct mfb_info mfb_template[] = {
{
@@ -449,37 +461,6 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
return diu_ops.valid_monitor_port(port);
}
-/**
- * fsl_diu_alloc - allocate memory for the DIU
- * @size: number of bytes to allocate
- * @param: returned physical address of memory
- *
- * This function allocates a physically-contiguous block of memory.
- */
-static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
-{
- void *virt;
-
- virt = alloc_pages_exact(size, GFP_DMA | __GFP_ZERO);
- if (virt)
- *phys = virt_to_phys(virt);
-
- return virt;
-}
-
-/**
- * fsl_diu_free - release DIU memory
- * @virt: pointer returned by fsl_diu_alloc()
- * @size: number of bytes allocated by fsl_diu_alloc()
- *
- * This function releases memory allocated by fsl_diu_alloc().
- */
-static void fsl_diu_free(void *virt, size_t size)
-{
- if (virt && size)
- free_pages_exact(virt, size);
-}
-
/*
* Workaround for failed writing desc register of planes.
* Needed with MPC5121 DIU rev 2.0 silicon.
@@ -495,8 +476,8 @@ static void fsl_diu_enable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
@@ -504,7 +485,7 @@ static void fsl_diu_enable_panel(struct fb_info *info)
wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case PLANE1_AOI0:
- cmfbi = machine_data->fsl_diu_info[2]->par;
+ cmfbi = &data->mfb[2];
if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
@@ -515,7 +496,7 @@ static void fsl_diu_enable_panel(struct fb_info *info)
}
break;
case PLANE2_AOI0:
- cmfbi = machine_data->fsl_diu_info[4]->par;
+ cmfbi = &data->mfb[4];
if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
@@ -526,17 +507,17 @@ static void fsl_diu_enable_panel(struct fb_info *info)
}
break;
case PLANE1_AOI1:
- pmfbi = machine_data->fsl_diu_info[1]->par;
+ pmfbi = &data->mfb[1];
ad->next_ad = 0;
- if (hw->desc[1] == machine_data->dummy_ad->paddr)
+ if (hw->desc[1] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[1], ad->paddr);
else /* AOI0 open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
case PLANE2_AOI1:
- pmfbi = machine_data->fsl_diu_info[3]->par;
+ pmfbi = &data->mfb[3];
ad->next_ad = 0;
- if (hw->desc[2] == machine_data->dummy_ad->paddr)
+ if (hw->desc[2] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[2], ad->paddr);
else /* AOI0 was open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
@@ -548,52 +529,52 @@ static void fsl_diu_disable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != machine_data->dummy_ad->paddr)
- wr_reg_wa(&hw->desc[0], machine_data->dummy_ad->paddr);
+ if (hw->desc[0] != data->dummy_ad.paddr)
+ wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr);
break;
case PLANE1_AOI0:
- cmfbi = machine_data->fsl_diu_info[2]->par;
+ cmfbi = &data->mfb[2];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE2_AOI0:
- cmfbi = machine_data->fsl_diu_info[4]->par;
+ cmfbi = &data->mfb[4];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE1_AOI1:
- pmfbi = machine_data->fsl_diu_info[1]->par;
+ pmfbi = &data->mfb[1];
if (hw->desc[1] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 1 */
break;
case PLANE2_AOI1:
- pmfbi = machine_data->fsl_diu_info[3]->par;
+ pmfbi = &data->mfb[3];
if (hw->desc[2] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 1 */
break;
}
@@ -602,39 +583,33 @@ static void fsl_diu_disable_panel(struct fb_info *info)
static void enable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
- if (!machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, MFB_MODE1);
- machine_data->fb_enabled++;
- }
+ out_be32(&hw->diu_mode, MFB_MODE1);
}
static void disable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
- if (machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, 0);
- machine_data->fb_enabled = 0;
- }
+ out_be32(&hw->diu_mode, 0);
}
static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
int available_height, upper_aoi_bottom;
enum mfb_index index = mfbi->index;
int lower_aoi_is_open, upper_aoi_is_open;
__u32 base_plane_width, base_plane_height, upper_aoi_height;
- base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
- base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+ base_plane_width = data->fsl_diu_info[0].var.xres;
+ base_plane_height = data->fsl_diu_info[0].var.yres;
if (mfbi->x_aoi_d < 0)
mfbi->x_aoi_d = 0;
@@ -649,7 +624,7 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
break;
case PLANE1_AOI0:
case PLANE2_AOI0:
- lower_aoi_mfbi = machine_data->fsl_diu_info[index+1]->par;
+ lower_aoi_mfbi = data->fsl_diu_info[index+1].par;
lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
var->xres = base_plane_width;
@@ -667,9 +642,8 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
break;
case PLANE1_AOI1:
case PLANE2_AOI1:
- upper_aoi_mfbi = machine_data->fsl_diu_info[index-1]->par;
- upper_aoi_height =
- machine_data->fsl_diu_info[index-1]->var.yres;
+ upper_aoi_mfbi = data->fsl_diu_info[index-1].par;
+ upper_aoi_height = data->fsl_diu_info[index-1].var.yres;
upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height;
upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
@@ -809,33 +783,33 @@ static void update_lcdc(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw;
int i, j;
- char __iomem *cursor_base, *gamma_table_base;
+ u8 *gamma_table_base;
u32 temp;
- hw = machine_data->diu_reg;
+ hw = data->diu_reg;
+
+ diu_ops.set_monitor_port(data->monitor_port);
+ gamma_table_base = data->gamma;
- diu_ops.set_monitor_port(machine_data->monitor_port);
- gamma_table_base = machine_data->gamma.vaddr;
- cursor_base = machine_data->cursor.vaddr;
/* Prep for DIU init - gamma table, cursor table */
for (i = 0; i <= 2; i++)
for (j = 0; j <= 255; j++)
*gamma_table_base++ = j;
- diu_ops.set_gamma_table(machine_data->monitor_port,
- machine_data->gamma.vaddr);
+ if (diu_ops.set_gamma_table)
+ diu_ops.set_gamma_table(data->monitor_port, data->gamma);
disable_lcdc(info);
/* Program DIU registers */
- out_be32(&hw->gamma, machine_data->gamma.paddr);
- out_be32(&hw->cursor, machine_data->cursor.paddr);
+ out_be32(&hw->gamma, DMA_ADDR(data, gamma));
+ out_be32(&hw->cursor, DMA_ADDR(data, cursor));
out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
@@ -870,16 +844,17 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
- phys_addr_t phys;
u32 smem_len = info->fix.line_length * info->var.yres_virtual;
+ void *p;
- info->screen_base = fsl_diu_alloc(smem_len, &phys);
- if (info->screen_base == NULL) {
+ p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO);
+ if (!p) {
dev_err(info->dev, "unable to allocate fb memory\n");
return -ENOMEM;
}
mutex_lock(&info->mm_lock);
- info->fix.smem_start = (unsigned long) phys;
+ info->screen_base = p;
+ info->fix.smem_start = virt_to_phys(info->screen_base);
info->fix.smem_len = smem_len;
mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
@@ -889,12 +864,17 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
- fsl_diu_free(info->screen_base, info->fix.smem_len);
+ void *p = info->screen_base;
+ size_t l = info->fix.smem_len;
+
mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
mutex_unlock(&info->mm_lock);
+
+ if (p)
+ free_pages_exact(p, l);
}
/*
@@ -913,6 +893,59 @@ static int fsl_diu_set_aoi(struct fb_info *info)
return 0;
}
+/**
+ * fsl_diu_get_pixel_format: return the pixel format for a given color depth
+ *
+ * The pixel format is a 32-bit value that determine which bits in each
+ * pixel are to be used for each color. This is the default function used
+ * if the platform does not define its own version.
+ */
+static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+{
+#define PF_BYTE_F 0x10000000
+#define PF_ALPHA_C_MASK 0x0E000000
+#define PF_ALPHA_C_SHIFT 25
+#define PF_BLUE_C_MASK 0x01800000
+#define PF_BLUE_C_SHIFT 23
+#define PF_GREEN_C_MASK 0x00600000
+#define PF_GREEN_C_SHIFT 21
+#define PF_RED_C_MASK 0x00180000
+#define PF_RED_C_SHIFT 19
+#define PF_PALETTE 0x00040000
+#define PF_PIXEL_S_MASK 0x00030000
+#define PF_PIXEL_S_SHIFT 16
+#define PF_COMP_3_MASK 0x0000F000
+#define PF_COMP_3_SHIFT 12
+#define PF_COMP_2_MASK 0x00000F00
+#define PF_COMP_2_SHIFT 8
+#define PF_COMP_1_MASK 0x000000F0
+#define PF_COMP_1_SHIFT 4
+#define PF_COMP_0_MASK 0x0000000F
+#define PF_COMP_0_SHIFT 0
+
+#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
+ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
+ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
+ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
+ (c2 << PF_COMP_2_SHIFT) | (c1 << PF_COMP_1_SHIFT) | \
+ (c0 << PF_COMP_0_SHIFT) | (size << PF_PIXEL_S_SHIFT))
+
+ switch (bits_per_pixel) {
+ case 32:
+ /* 0x88883316 */
+ return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
+ case 24:
+ /* 0x88082219 */
+ return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
+ case 16:
+ /* 0x65053118 */
+ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
+ default:
+ pr_err("fsl-diu: unsupported color depth %u\n", bits_per_pixel);
+ return 0;
+ }
+}
+
/*
* Using the fb_var_screeninfo in fb_info we set the resolution of this
* particular framebuffer. This function alters the fb_fix_screeninfo stored
@@ -926,11 +959,11 @@ static int fsl_diu_set_par(struct fb_info *info)
unsigned long len;
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
struct diu_ad *ad = mfbi->ad;
struct diu __iomem *hw;
- hw = machine_data->diu_reg;
+ hw = data->diu_reg;
set_fix(info);
mfbi->cursor_reset = 1;
@@ -948,8 +981,12 @@ static int fsl_diu_set_par(struct fb_info *info)
}
}
- ad->pix_fmt = diu_ops.get_pixel_format(machine_data->monitor_port,
- var->bits_per_pixel);
+ if (diu_ops.get_pixel_format)
+ ad->pix_fmt = diu_ops.get_pixel_format(data->monitor_port,
+ var->bits_per_pixel);
+ else
+ ad->pix_fmt = fsl_diu_get_pixel_format(var->bits_per_pixel);
+
ad->addr = cpu_to_le32(info->fix.smem_start);
ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
var->xres_virtual) | mfbi->g_alpha;
@@ -1208,21 +1245,6 @@ static struct fb_ops fsl_diu_ops = {
.fb_release = fsl_diu_release,
};
-static int init_fbinfo(struct fb_info *info)
-{
- struct mfb_info *mfbi = info->par;
-
- info->device = NULL;
- info->var.activate = FB_ACTIVATE_NOW;
- info->fbops = &fsl_diu_ops;
- info->flags = FBINFO_FLAG_DEFAULT;
- info->pseudo_palette = &mfbi->pseudo_palette;
-
- /* Allocate colormap */
- fb_alloc_cmap(&info->cmap, 16, 0);
- return 0;
-}
-
static int __devinit install_fb(struct fb_info *info)
{
int rc;
@@ -1232,8 +1254,15 @@ static int __devinit install_fb(struct fb_info *info)
unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
int has_default_mode = 1;
- if (init_fbinfo(info))
- return -EINVAL;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->fbops = &fsl_diu_ops;
+ info->flags = FBINFO_DEFAULT | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_READS_FAST;
+ info->pseudo_palette = mfbi->pseudo_palette;
+
+ rc = fb_alloc_cmap(&info->cmap, 16, 0);
+ if (rc)
+ return rc;
if (mfbi->index == PLANE0) {
if (mfbi->edid_data) {
@@ -1359,16 +1388,16 @@ static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static int request_irq_local(struct fsl_diu_data *machine_data)
+static int request_irq_local(struct fsl_diu_data *data)
{
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct diu __iomem *hw = data->diu_reg;
u32 ints;
int ret;
/* Read to clear the status */
in_be32(&hw->int_status);
- ret = request_irq(machine_data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
+ ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
if (!ret) {
ints = INT_PARERR | INT_LS_BF_VS;
#if !defined(CONFIG_NOT_COHERENT_CACHE)
@@ -1383,14 +1412,14 @@ static int request_irq_local(struct fsl_diu_data *machine_data)
return ret;
}
-static void free_irq_local(struct fsl_diu_data *machine_data)
+static void free_irq_local(struct fsl_diu_data *data)
{
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct diu __iomem *hw = data->diu_reg;
/* Disable all LCDC interrupt */
out_be32(&hw->int_mask, 0x1f);
- free_irq(machine_data->irq, NULL);
+ free_irq(data->irq, NULL);
}
#ifdef CONFIG_PM
@@ -1400,20 +1429,20 @@ static void free_irq_local(struct fsl_diu_data *machine_data)
*/
static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
- machine_data = dev_get_drvdata(&ofdev->dev);
- disable_lcdc(machine_data->fsl_diu_info[0]);
+ data = dev_get_drvdata(&ofdev->dev);
+ disable_lcdc(data->fsl_diu_info[0]);
return 0;
}
static int fsl_diu_resume(struct platform_device *ofdev)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
- machine_data = dev_get_drvdata(&ofdev->dev);
- enable_lcdc(machine_data->fsl_diu_info[0]);
+ data = dev_get_drvdata(&ofdev->dev);
+ enable_lcdc(data->fsl_diu_info[0]);
return 0;
}
@@ -1423,56 +1452,24 @@ static int fsl_diu_resume(struct platform_device *ofdev)
#define fsl_diu_resume NULL
#endif /* CONFIG_PM */
-/* Align to 64-bit(8-byte), 32-byte, etc. */
-static int allocate_buf(struct device *dev, struct diu_addr *buf, u32 size,
- u32 bytes_align)
-{
- u32 offset;
- dma_addr_t mask;
-
- buf->vaddr =
- dma_alloc_coherent(dev, size + bytes_align, &buf->paddr,
- GFP_DMA | __GFP_ZERO);
- if (!buf->vaddr)
- return -ENOMEM;
-
- mask = bytes_align - 1;
- offset = buf->paddr & mask;
- if (offset) {
- buf->offset = bytes_align - offset;
- buf->paddr = buf->paddr + offset;
- } else
- buf->offset = 0;
-
- return 0;
-}
-
-static void free_buf(struct device *dev, struct diu_addr *buf, u32 size,
- u32 bytes_align)
-{
- dma_free_coherent(dev, size + bytes_align, buf->vaddr,
- buf->paddr - buf->offset);
-}
-
static ssize_t store_monitor(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
enum fsl_diu_monitor_port old_monitor_port;
- struct fsl_diu_data *machine_data =
+ struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
- old_monitor_port = machine_data->monitor_port;
- machine_data->monitor_port = fsl_diu_name_to_port(buf);
+ old_monitor_port = data->monitor_port;
+ data->monitor_port = fsl_diu_name_to_port(buf);
- if (old_monitor_port != machine_data->monitor_port) {
+ if (old_monitor_port != data->monitor_port) {
/* All AOIs need adjust pixel format
* fsl_diu_set_par only change the pixsel format here
* unlikely to fail. */
- fsl_diu_set_par(machine_data->fsl_diu_info[0]);
- fsl_diu_set_par(machine_data->fsl_diu_info[1]);
- fsl_diu_set_par(machine_data->fsl_diu_info[2]);
- fsl_diu_set_par(machine_data->fsl_diu_info[3]);
- fsl_diu_set_par(machine_data->fsl_diu_info[4]);
+ unsigned int i;
+
+ for (i=0; i < NUM_AOIS; i++)
+ fsl_diu_set_par(&data->fsl_diu_info[i]);
}
return count;
}
@@ -1480,10 +1477,10 @@ static ssize_t store_monitor(struct device *device,
static ssize_t show_monitor(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct fsl_diu_data *machine_data =
+ struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
- switch (machine_data->monitor_port) {
+ switch (data->monitor_port) {
case FSL_DIU_PORT_DVI:
return sprintf(buf, "DVI\n");
case FSL_DIU_PORT_LVDS:
@@ -1499,28 +1496,52 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
- phys_addr_t dummy_ad_addr = 0;
- int ret, i, error = 0;
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
int diu_mode;
+ dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */
+ unsigned int i;
+ int ret;
- machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL);
- if (!machine_data)
+ data = dma_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data),
+ &dma_addr, GFP_DMA | __GFP_ZERO);
+ if (!data)
return -ENOMEM;
+ data->dma_addr = dma_addr;
+
+ /*
+ * dma_alloc_coherent() uses a page allocator, so the address is
+ * always page-aligned. We need the memory to be 32-byte aligned,
+ * so that's good. However, if one day the allocator changes, we
+ * need to catch that. It's not worth the effort to handle unaligned
+ * alloctions now because it's highly unlikely to ever be a problem.
+ */
+ if ((unsigned long)data & 31) {
+ dev_err(&pdev->dev, "misaligned allocation");
+ ret = -ENOMEM;
+ goto error;
+ }
- spin_lock_init(&machine_data->reg_lock);
+ spin_lock_init(&data->reg_lock);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
- machine_data->fsl_diu_info[i] =
- framebuffer_alloc(sizeof(struct mfb_info), &pdev->dev);
- if (!machine_data->fsl_diu_info[i]) {
- dev_err(&pdev->dev, "cannot allocate memory\n");
- ret = -ENOMEM;
- goto error2;
- }
- mfbi = machine_data->fsl_diu_info[i]->par;
+ for (i = 0; i < NUM_AOIS; i++) {
+ struct fb_info *info = &data->fsl_diu_info[i];
+
+ info->device = &pdev->dev;
+ info->par = &data->mfb[i];
+
+ /*
+ * We store the physical address of the AD in the reserved
+ * 'paddr' field of the AD itself.
+ */
+ data->ad[i].paddr = DMA_ADDR(data, ad[i]);
+
+ info->fix.smem_start = 0;
+
+ /* Initialize the AOI data structure */
+ mfbi = info->par;
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
- mfbi->parent = machine_data;
+ mfbi->parent = data;
+ mfbi->ad = &data->ad[i];
if (mfbi->index == PLANE0) {
const u8 *prop;
@@ -1534,158 +1555,102 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
}
}
- machine_data->diu_reg = of_iomap(np, 0);
- if (!machine_data->diu_reg) {
+ data->diu_reg = of_iomap(np, 0);
+ if (!data->diu_reg) {
dev_err(&pdev->dev, "cannot map DIU registers\n");
ret = -EFAULT;
- goto error2;
+ goto error;
}
- diu_mode = in_be32(&machine_data->diu_reg->diu_mode);
+ diu_mode = in_be32(&data->diu_reg->diu_mode);
if (diu_mode == MFB_MODE0)
- out_be32(&machine_data->diu_reg->diu_mode, 0); /* disable DIU */
+ out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */
/* Get the IRQ of the DIU */
- machine_data->irq = irq_of_parse_and_map(np, 0);
+ data->irq = irq_of_parse_and_map(np, 0);
- if (!machine_data->irq) {
+ if (!data->irq) {
dev_err(&pdev->dev, "could not get DIU IRQ\n");
ret = -EINVAL;
goto error;
}
- machine_data->monitor_port = monitor_port;
-
- /* Area descriptor memory pool aligns to 64-bit boundary */
- if (allocate_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8))
- return -ENOMEM;
-
- /* Get memory for Gamma Table - 32-byte aligned memory */
- if (allocate_buf(&pdev->dev, &machine_data->gamma, 768, 32)) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* For performance, cursor bitmap buffer aligns to 32-byte boundary */
- if (allocate_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32)) {
- ret = -ENOMEM;
- goto error;
- }
-
- i = ARRAY_SIZE(machine_data->fsl_diu_info);
- machine_data->dummy_ad = (struct diu_ad *)((u32)machine_data->ad.vaddr +
- machine_data->ad.offset) + i;
- machine_data->dummy_ad->paddr = machine_data->ad.paddr +
- i * sizeof(struct diu_ad);
- machine_data->dummy_aoi_virt = fsl_diu_alloc(64, &dummy_ad_addr);
- if (!machine_data->dummy_aoi_virt) {
- ret = -ENOMEM;
- goto error;
- }
- machine_data->dummy_ad->addr = cpu_to_le32(dummy_ad_addr);
- machine_data->dummy_ad->pix_fmt = 0x88882317;
- machine_data->dummy_ad->src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
- machine_data->dummy_ad->aoi_size = cpu_to_le32((4 << 16) | 2);
- machine_data->dummy_ad->offset_xyi = 0;
- machine_data->dummy_ad->offset_xyd = 0;
- machine_data->dummy_ad->next_ad = 0;
+ data->monitor_port = monitor_port;
+
+ /* Initialize the dummy Area Descriptor */
+ data->dummy_ad.addr = cpu_to_le32(DMA_ADDR(data, dummy_aoi));
+ data->dummy_ad.pix_fmt = 0x88882317;
+ data->dummy_ad.src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
+ data->dummy_ad.aoi_size = cpu_to_le32((4 << 16) | 2);
+ data->dummy_ad.offset_xyi = 0;
+ data->dummy_ad.offset_xyd = 0;
+ data->dummy_ad.next_ad = 0;
+ data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad);
/*
* Let DIU display splash screen if it was pre-initialized
* by the bootloader, set dummy area descriptor otherwise.
*/
if (diu_mode == MFB_MODE0)
- out_be32(&machine_data->diu_reg->desc[0],
- machine_data->dummy_ad->paddr);
-
- out_be32(&machine_data->diu_reg->desc[1], machine_data->dummy_ad->paddr);
- out_be32(&machine_data->diu_reg->desc[2], machine_data->dummy_ad->paddr);
-
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
- machine_data->fsl_diu_info[i]->fix.smem_start = 0;
- mfbi = machine_data->fsl_diu_info[i]->par;
- mfbi->ad = (struct diu_ad *)((u32)machine_data->ad.vaddr
- + machine_data->ad.offset) + i;
- mfbi->ad->paddr =
- machine_data->ad.paddr + i * sizeof(struct diu_ad);
- ret = install_fb(machine_data->fsl_diu_info[i]);
+ out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr);
+
+ out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
+ out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
+
+ for (i = 0; i < NUM_AOIS; i++) {
+ ret = install_fb(&data->fsl_diu_info[i]);
if (ret) {
dev_err(&pdev->dev, "could not register fb %d\n", i);
goto error;
}
}
- if (request_irq_local(machine_data)) {
+ if (request_irq_local(data)) {
dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
- sysfs_attr_init(&machine_data->dev_attr.attr);
- machine_data->dev_attr.attr.name = "monitor";
- machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
- machine_data->dev_attr.show = show_monitor;
- machine_data->dev_attr.store = store_monitor;
- error = device_create_file(machine_data->fsl_diu_info[0]->dev,
- &machine_data->dev_attr);
- if (error) {
+ sysfs_attr_init(&data->dev_attr.attr);
+ data->dev_attr.attr.name = "monitor";
+ data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
+ data->dev_attr.show = show_monitor;
+ data->dev_attr.store = store_monitor;
+ ret = device_create_file(&pdev->dev, &data->dev_attr);
+ if (ret) {
dev_err(&pdev->dev, "could not create sysfs file %s\n",
- machine_data->dev_attr.attr.name);
+ data->dev_attr.attr.name);
}
- dev_set_drvdata(&pdev->dev, machine_data);
+ dev_set_drvdata(&pdev->dev, data);
return 0;
error:
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- uninstall_fb(machine_data->fsl_diu_info[i]);
-
- if (machine_data->ad.vaddr)
- free_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (machine_data->gamma.vaddr)
- free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
- if (machine_data->cursor.vaddr)
- free_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32);
- if (machine_data->dummy_aoi_virt)
- fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(machine_data->diu_reg);
-
-error2:
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- if (machine_data->fsl_diu_info[i])
- framebuffer_release(machine_data->fsl_diu_info[i]);
- kfree(machine_data);
+ for (i = 0; i < NUM_AOIS; i++)
+ uninstall_fb(&data->fsl_diu_info[i]);
+
+ iounmap(data->diu_reg);
+
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
+ data->dma_addr);
return ret;
}
static int fsl_diu_remove(struct platform_device *pdev)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
int i;
- machine_data = dev_get_drvdata(&pdev->dev);
- disable_lcdc(machine_data->fsl_diu_info[0]);
- free_irq_local(machine_data);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- uninstall_fb(machine_data->fsl_diu_info[i]);
- if (machine_data->ad.vaddr)
- free_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (machine_data->gamma.vaddr)
- free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
- if (machine_data->cursor.vaddr)
- free_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32);
- if (machine_data->dummy_aoi_virt)
- fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(machine_data->diu_reg);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- if (machine_data->fsl_diu_info[i])
- framebuffer_release(machine_data->fsl_diu_info[i]);
- kfree(machine_data);
+ data = dev_get_drvdata(&pdev->dev);
+ disable_lcdc(&data->fsl_diu_info[0]);
+ free_irq_local(data);
+
+ for (i = 0; i < NUM_AOIS; i++)
+ uninstall_fb(&data->fsl_diu_info[i]);
+
+ iounmap(data->diu_reg);
+
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
+ data->dma_addr);
return 0;
}
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index f37e02538203..da066c210923 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -70,7 +70,7 @@ static const struct fb_videomode grvga_modedb[] = {
}
};
-static struct fb_fix_screeninfo grvga_fix __initdata = {
+static struct fb_fix_screeninfo grvga_fix __devinitdata = {
.id = "AG SVGACTRL",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -267,7 +267,7 @@ static struct fb_ops grvga_ops = {
.fb_imageblit = cfb_imageblit
};
-static int __init grvga_parse_custom(char *options,
+static int __devinit grvga_parse_custom(char *options,
struct fb_var_screeninfo *screendata)
{
char *this_opt;
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 4394389caf68..c645f9282650 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -133,7 +133,7 @@ static struct fb_fix_screeninfo hga_fix __devinitdata = {
/* Don't assume that tty1 will be the initial current console. */
static int release_io_port = 0;
static int release_io_ports = 0;
-static int nologo = 0;
+static bool nologo = 0;
/* -------------------------------------------------------------------------
*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 318f6fb895b2..b83f36190cae 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -135,8 +135,8 @@ static struct pci_driver i810fb_driver = {
static char *mode_option __devinitdata = NULL;
static int vram __devinitdata = 4;
static int bpp __devinitdata = 8;
-static int mtrr __devinitdata;
-static int accel __devinitdata;
+static bool mtrr __devinitdata;
+static bool accel __devinitdata;
static int hsync1 __devinitdata;
static int hsync2 __devinitdata;
static int vsync1 __devinitdata;
@@ -144,10 +144,10 @@ static int vsync2 __devinitdata;
static int xres __devinitdata;
static int yres;
static int vyres __devinitdata;
-static int sync __devinitdata;
-static int extvga __devinitdata;
-static int dcolor __devinitdata;
-static int ddc3 __devinitdata = 2;
+static bool sync __devinitdata;
+static bool extvga __devinitdata;
+static bool dcolor __devinitdata;
+static bool ddc3 __devinitdata;
/*------------------------------------------------------------*/
@@ -1776,7 +1776,7 @@ static void __devinit i810_init_defaults(struct i810fb_par *par,
if (sync)
par->dev_flags |= ALWAYS_SYNC;
- par->ddc_num = ddc3;
+ par->ddc_num = (ddc3 ? 3 : 2);
if (bpp < 8)
bpp = 8;
@@ -1999,7 +1999,7 @@ static int __devinit i810fb_setup(char *options)
else if (!strncmp(this_opt, "dcolor", 6))
dcolor = 1;
else if (!strncmp(this_opt, "ddc3", 4))
- ddc3 = 3;
+ ddc3 = true;
else
mode_option = this_opt;
}
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 5ba399991050..c6afa33a4532 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -230,15 +230,15 @@ MODULE_DESCRIPTION("Framebuffer driver for Intel(R) " SUPPORTED_CHIPSETS
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
-static int accel = 1;
+static bool accel = 1;
static int vram = 4;
-static int hwcursor = 0;
-static int mtrr = 1;
-static int fixed = 0;
-static int noinit = 0;
-static int noregister = 0;
-static int probeonly = 0;
-static int idonly = 0;
+static bool hwcursor = 0;
+static bool mtrr = 1;
+static bool fixed = 0;
+static bool noinit = 0;
+static bool noregister = 0;
+static bool probeonly = 0;
+static bool idonly = 0;
static int bailearly = 0;
static int voffset = 48;
static char *mode = NULL;
@@ -263,7 +263,7 @@ module_param(probeonly, bool, 0);
MODULE_PARM_DESC(probeonly, "Do a minimal probe (debug)");
module_param(idonly, bool, 0);
MODULE_PARM_DESC(idonly, "Just identify without doing anything else (debug)");
-module_param(bailearly, bool, 0);
+module_param(bailearly, int, 0);
MODULE_PARM_DESC(bailearly, "Bail out early, depending on value (debug)");
module_param(mode, charp, S_IRUGO);
MODULE_PARM_DESC(mode,
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index ea7a8ccc830c..080c35b34bbb 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,7 +21,7 @@
#include <asm/bootinfo.h>
#endif
-static int nologo;
+static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 44bf8d4a216b..401a56e250bd 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -147,7 +147,6 @@ static struct fb_var_screeninfo vesafb_defined = {
39721L,48L,16L,33L,10L,
96L,2L,~0, /* No sync info */
FB_VMODE_NONINTERLACED,
- 0, {0,0,0,0,0}
};
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index d7112c39614b..02796a4317a9 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -593,7 +593,6 @@ static struct fb_var_screeninfo matroxfb_dh_defined = {
39721L,48L,16L,33L,10L,
96L,2,0, /* no sync info */
FB_VMODE_NONINTERLACED,
- 0, {0,0,0,0,0}
};
static int matroxfb_dh_regit(const struct matrox_fb_info *minfo,
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 6ce34160da78..55bf6196b7a0 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -1053,18 +1053,7 @@ static struct platform_driver mbxfb_driver = {
},
};
-int __devinit mbxfb_init(void)
-{
- return platform_driver_register(&mbxfb_driver);
-}
-
-static void __devexit mbxfb_exit(void)
-{
- platform_driver_unregister(&mbxfb_driver);
-}
-
-module_init(mbxfb_init);
-module_exit(mbxfb_exit);
+module_platform_driver(mbxfb_driver);
MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device");
MODULE_AUTHOR("Mike Rapoport, Compulab");
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index e3406ab31305..727a5149d818 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -245,6 +245,7 @@ struct mx3fb_data {
uint32_t h_start_width;
uint32_t v_start_width;
+ enum disp_data_mapping disp_data_fmt;
};
struct dma_chan_request {
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
__raw_writel(value, mx3fb->reg_base + reg);
}
-static const uint32_t di_mappings[] = {
- 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
- 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
- 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
- 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
+struct di_mapping {
+ uint32_t b0, b1, b2;
+};
+
+static const struct di_mapping di_mappings[] = {
+ [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
+ [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
+ [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
};
static void sdc_fb_init(struct mx3fb_info *fbi)
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
/* This enables the channel */
if (mx3_fbi->cookie < 0) {
mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
- &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+ &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!mx3_fbi->txd) {
dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
dma_chan->chan_id);
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
* @pixel_clk: desired pixel clock frequency in Hz.
* @width: width of panel in pixels.
* @height: height of panel in pixels.
- * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
* @h_start_width: number of pixel clocks between the HSYNC signal pulse
* and the start of valid data.
* @h_sync_width: width of the HSYNC signal in units of pixel clocks.
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint32_t pixel_clk,
uint16_t width, uint16_t height,
- enum pixel_fmt pixel_fmt,
uint16_t h_start_width, uint16_t h_sync_width,
uint16_t h_end_width, uint16_t v_start_width,
uint16_t v_sync_width, uint16_t v_end_width,
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint32_t old_conf;
uint32_t div;
struct clk *ipu_clk;
+ const struct di_mapping *map;
dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
- switch (pixel_fmt) {
- case IPU_PIX_FMT_RGB24:
- mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
- break;
- case IPU_PIX_FMT_RGB666:
- mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
- break;
- case IPU_PIX_FMT_BGR666:
- mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
- break;
- default:
- mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
- break;
- }
+ map = &di_mappings[mx3fb->disp_data_fmt];
+ mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
+ mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
+ mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
if (sdc_init_panel(mx3fb, mode,
(PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
fbi->var.xres, fbi->var.yres,
- (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
- IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
fbi->var.left_margin,
fbi->var.hsync_len,
fbi->var.right_margin +
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
async_tx_ack(mx3_fbi->txd);
txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
- mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+ mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txd) {
dev_err(fbi->device,
"Error preparing a DMA transaction descriptor.\n");
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
const struct fb_videomode *mode;
int ret, num_modes;
+ if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
+ dev_err(dev, "Illegal display data format %d\n",
+ mx3fb_pdata->disp_data_fmt);
+ return -EINVAL;
+ }
+
ichan->client = mx3fb;
irq = ichan->eof_irq;
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
mx3fbi->mx3fb = mx3fb;
mx3fbi->blank = FB_BLANK_NORMAL;
+ mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt;
+
init_completion(&mx3fbi->flip_cmpl);
disable_irq(ichan->eof_irq);
dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d837d63c456f..4a89f889852d 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -328,7 +328,7 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
dev_dbg(&host->pdev->dev, "%s\n", __func__);
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
/* if it was disabled, re-enable the mode again */
@@ -368,7 +368,7 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->enabled = 0;
}
@@ -668,7 +668,7 @@ static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
fb_info->fix.ypanstep = 1;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->enabled = 1;
return 0;
@@ -841,7 +841,7 @@ static int __devinit mxsfb_probe(struct platform_device *pdev)
error_register:
if (host->enabled)
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
fb_destroy_modelist(&fb_info->modelist);
error_init_fb:
kfree(fb_info->pseudo_palette);
@@ -902,18 +902,7 @@ static struct platform_driver mxsfb_driver = {
},
};
-static int __init mxsfb_init(void)
-{
- return platform_driver_register(&mxsfb_driver);
-}
-
-static void __exit mxsfb_exit(void)
-{
- platform_driver_unregister(&mxsfb_driver);
-}
-
-module_init(mxsfb_init);
-module_exit(mxsfb_exit);
+module_platform_driver(mxsfb_driver);
MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index feea7b1dc386..fb3f67391105 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -84,11 +84,11 @@
/* --------------------------------------------------------------------- */
-static int internal;
-static int external;
-static int libretto;
-static int nostretch;
-static int nopciburst;
+static bool internal;
+static bool external;
+static bool libretto;
+static bool nostretch;
+static bool nopciburst;
static char *mode_option __devinitdata = NULL;
#ifdef MODULE
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index d1fbbd888cf4..e10f551ade21 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -762,18 +762,7 @@ static struct platform_driver nuc900fb_driver = {
},
};
-int __devinit nuc900fb_init(void)
-{
- return platform_driver_register(&nuc900fb_driver);
-}
-
-static void __exit nuc900fb_cleanup(void)
-{
- platform_driver_unregister(&nuc900fb_driver);
-}
-
-module_init(nuc900fb_init);
-module_exit(nuc900fb_cleanup);
+module_platform_driver(nuc900fb_driver);
MODULE_DESCRIPTION("Framebuffer driver for the NUC900");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 081dc4745274..fe13ac567d54 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -81,7 +81,7 @@ static int vram __devinitdata = 0;
static int bpp __devinitdata = 8;
static int reverse_i2c __devinitdata;
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata = 0;
+static bool nomtrr __devinitdata = false;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
@@ -1509,7 +1509,7 @@ static int __devinit nvidiafb_setup(char *options)
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
- nomtrr = 1;
+ nomtrr = true;
#endif
} else if (!strncmp(this_opt, "fpdither:", 9)) {
fpdither = simple_strtol(this_opt+9, NULL, 0);
@@ -1599,7 +1599,7 @@ MODULE_PARM_DESC(bpp, "pixel width in bits"
module_param(reverse_i2c, int, 0);
MODULE_PARM_DESC(reverse_i2c, "reverse port assignment of the i2c bus");
#ifdef CONFIG_MTRR
-module_param(nomtrr, bool, 0);
+module_param(nomtrr, bool, false);
MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) "
"(default=0)");
#endif
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index cb163a5397be..0c4f34311eda 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -41,13 +41,14 @@
/* Supported palette hacks */
enum {
cmap_unknown,
- cmap_m64, /* ATI Mach64 */
+ cmap_simple, /* ATI Mach64 */
cmap_r128, /* ATI Rage128 */
cmap_M3A, /* ATI Rage Mobility M3 Head A */
cmap_M3B, /* ATI Rage Mobility M3 Head B */
cmap_radeon, /* ATI Radeon */
cmap_gxt2000, /* IBM GXT2000 */
cmap_avivo, /* ATI R5xx */
+ cmap_qemu, /* qemu vga */
};
struct offb_par {
@@ -100,36 +101,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct offb_par *par = (struct offb_par *) info->par;
- int i, depth;
- u32 *pal = info->pseudo_palette;
-
- depth = info->var.bits_per_pixel;
- if (depth == 16)
- depth = (info->var.green.length == 5) ? 15 : 16;
-
- if (regno > 255 ||
- (depth == 16 && regno > 63) ||
- (depth == 15 && regno > 31))
- return 1;
-
- if (regno < 16) {
- switch (depth) {
- case 15:
- pal[regno] = (regno << 10) | (regno << 5) | regno;
- break;
- case 16:
- pal[regno] = (regno << 11) | (regno << 5) | regno;
- break;
- case 24:
- pal[regno] = (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- i = (regno << 8) | regno;
- pal[regno] = (i << 16) | i;
- break;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *pal = info->pseudo_palette;
+ u32 cr = red >> (16 - info->var.red.length);
+ u32 cg = green >> (16 - info->var.green.length);
+ u32 cb = blue >> (16 - info->var.blue.length);
+ u32 value;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ value = (cr << info->var.red.offset) |
+ (cg << info->var.green.offset) |
+ (cb << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
}
+ pal[regno] = value;
+ return 0;
}
+ if (regno > 255)
+ return -EINVAL;
+
red >>= 8;
green >>= 8;
blue >>= 8;
@@ -138,7 +135,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
switch (par->cmap_type) {
- case cmap_m64:
+ case cmap_simple:
writeb(regno, par->cmap_adr);
writeb(red, par->cmap_data);
writeb(green, par->cmap_data);
@@ -208,7 +205,7 @@ static int offb_blank(int blank, struct fb_info *info)
if (blank)
for (i = 0; i < 256; i++) {
switch (par->cmap_type) {
- case cmap_m64:
+ case cmap_simple:
writeb(i, par->cmap_adr);
for (j = 0; j < 3; j++)
writeb(0, par->cmap_data);
@@ -350,7 +347,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_adr =
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
par->cmap_data = par->cmap_adr + 1;
- par->cmap_type = cmap_m64;
+ par->cmap_type = cmap_simple;
} else if (dp && (of_device_is_compatible(dp, "pci1014,b7") ||
of_device_is_compatible(dp, "pci1014,21c"))) {
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
@@ -371,6 +368,16 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_type = cmap_avivo;
}
of_node_put(pciparent);
+ } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
+ const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+ u64 io_addr = of_translate_address(dp, io_of_addr);
+ if (io_addr != OF_BAD_ADDR) {
+ par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
+ if (par->cmap_adr) {
+ par->cmap_type = cmap_simple;
+ par->cmap_data = par->cmap_adr + 1;
+ }
+ }
}
info->fix.visual = (par->cmap_type != cmap_unknown) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR;
@@ -381,7 +388,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
int pitch, unsigned long address,
int foreign_endian, struct device_node *dp)
{
- unsigned long res_size = pitch * height * (depth + 7) / 8;
+ unsigned long res_size = pitch * height;
struct offb_par *par = &default_par;
unsigned long res_start = address;
struct fb_fix_screeninfo *fix;
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index 6978ae4ef83a..0fdd6f6873bf 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -198,7 +198,7 @@ static int ams_delta_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver ams_delta_panel_driver = {
+static struct platform_driver ams_delta_panel_driver = {
.probe = ams_delta_panel_probe,
.remove = ams_delta_panel_remove,
.suspend = ams_delta_panel_suspend,
@@ -209,15 +209,4 @@ struct platform_driver ams_delta_panel_driver = {
},
};
-static int __init ams_delta_panel_drv_init(void)
-{
- return platform_driver_register(&ams_delta_panel_driver);
-}
-
-static void __exit ams_delta_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&ams_delta_panel_driver);
-}
-
-module_init(ams_delta_panel_drv_init);
-module_exit(ams_delta_panel_drv_cleanup);
+module_platform_driver(ams_delta_panel_driver);
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 622ad839fd9d..49bdeca81e50 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -113,7 +113,7 @@ static int h3_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver h3_panel_driver = {
+static struct platform_driver h3_panel_driver = {
.probe = h3_panel_probe,
.remove = h3_panel_remove,
.suspend = h3_panel_suspend,
@@ -124,16 +124,4 @@ struct platform_driver h3_panel_driver = {
},
};
-static int __init h3_panel_drv_init(void)
-{
- return platform_driver_register(&h3_panel_driver);
-}
-
-static void __exit h3_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&h3_panel_driver);
-}
-
-module_init(h3_panel_drv_init);
-module_exit(h3_panel_drv_cleanup);
-
+module_platform_driver(h3_panel_driver);
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index 4802419da83b..20f477851d54 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -104,7 +104,7 @@ static int htcherald_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver htcherald_panel_driver = {
+static struct platform_driver htcherald_panel_driver = {
.probe = htcherald_panel_probe,
.remove = htcherald_panel_remove,
.suspend = htcherald_panel_suspend,
@@ -115,16 +115,4 @@ struct platform_driver htcherald_panel_driver = {
},
};
-static int __init htcherald_panel_drv_init(void)
-{
- return platform_driver_register(&htcherald_panel_driver);
-}
-
-static void __exit htcherald_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&htcherald_panel_driver);
-}
-
-module_init(htcherald_panel_drv_init);
-module_exit(htcherald_panel_drv_cleanup);
-
+module_platform_driver(htcherald_panel_driver);
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index 3271f1643b26..b38b1dd15ce3 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -98,7 +98,7 @@ static int innovator1510_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver innovator1510_panel_driver = {
+static struct platform_driver innovator1510_panel_driver = {
.probe = innovator1510_panel_probe,
.remove = innovator1510_panel_remove,
.suspend = innovator1510_panel_suspend,
@@ -109,16 +109,4 @@ struct platform_driver innovator1510_panel_driver = {
},
};
-static int __init innovator1510_panel_drv_init(void)
-{
- return platform_driver_register(&innovator1510_panel_driver);
-}
-
-static void __exit innovator1510_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&innovator1510_panel_driver);
-}
-
-module_init(innovator1510_panel_drv_init);
-module_exit(innovator1510_panel_drv_cleanup);
-
+module_platform_driver(innovator1510_panel_driver);
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 12cc52a70f96..7e8bd8e08a98 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -122,7 +122,7 @@ static int innovator1610_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver innovator1610_panel_driver = {
+static struct platform_driver innovator1610_panel_driver = {
.probe = innovator1610_panel_probe,
.remove = innovator1610_panel_remove,
.suspend = innovator1610_panel_suspend,
@@ -133,16 +133,4 @@ struct platform_driver innovator1610_panel_driver = {
},
};
-static int __init innovator1610_panel_drv_init(void)
-{
- return platform_driver_register(&innovator1610_panel_driver);
-}
-
-static void __exit innovator1610_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&innovator1610_panel_driver);
-}
-
-module_init(innovator1610_panel_drv_init);
-module_exit(innovator1610_panel_drv_cleanup);
-
+module_platform_driver(innovator1610_panel_driver);
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index eb381db7fe51..8d546dd55e81 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -603,7 +603,6 @@ static int mipid_spi_remove(struct spi_device *spi)
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = MIPID_MODULE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index 6f8d13c41202..5914220dfa9c 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -116,7 +116,7 @@ static int osk_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver osk_panel_driver = {
+static struct platform_driver osk_panel_driver = {
.probe = osk_panel_probe,
.remove = osk_panel_remove,
.suspend = osk_panel_suspend,
@@ -127,16 +127,4 @@ struct platform_driver osk_panel_driver = {
},
};
-static int __init osk_panel_drv_init(void)
-{
- return platform_driver_register(&osk_panel_driver);
-}
-
-static void __exit osk_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&osk_panel_driver);
-}
-
-module_init(osk_panel_drv_init);
-module_exit(osk_panel_drv_cleanup);
-
+module_platform_driver(osk_panel_driver);
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index 4cb301750d02..88c31eb0cd6c 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -97,7 +97,7 @@ static int palmte_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver palmte_panel_driver = {
+static struct platform_driver palmte_panel_driver = {
.probe = palmte_panel_probe,
.remove = palmte_panel_remove,
.suspend = palmte_panel_suspend,
@@ -108,16 +108,4 @@ struct platform_driver palmte_panel_driver = {
},
};
-static int __init palmte_panel_drv_init(void)
-{
- return platform_driver_register(&palmte_panel_driver);
-}
-
-static void __exit palmte_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&palmte_panel_driver);
-}
-
-module_init(palmte_panel_drv_init);
-module_exit(palmte_panel_drv_cleanup);
-
+module_platform_driver(palmte_panel_driver);
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index b51b332e5a2b..aaf3c8ba1243 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -102,7 +102,7 @@ static int palmtt_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver palmtt_panel_driver = {
+static struct platform_driver palmtt_panel_driver = {
.probe = palmtt_panel_probe,
.remove = palmtt_panel_remove,
.suspend = palmtt_panel_suspend,
@@ -113,15 +113,4 @@ struct platform_driver palmtt_panel_driver = {
},
};
-static int __init palmtt_panel_drv_init(void)
-{
- return platform_driver_register(&palmtt_panel_driver);
-}
-
-static void __exit palmtt_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&palmtt_panel_driver);
-}
-
-module_init(palmtt_panel_drv_init);
-module_exit(palmtt_panel_drv_cleanup);
+module_platform_driver(palmtt_panel_driver);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index 2334e56536bc..3b7d8aa1cf34 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -98,7 +98,7 @@ static int palmz71_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver palmz71_panel_driver = {
+static struct platform_driver palmz71_panel_driver = {
.probe = palmz71_panel_probe,
.remove = palmz71_panel_remove,
.suspend = palmz71_panel_suspend,
@@ -109,15 +109,4 @@ struct platform_driver palmz71_panel_driver = {
},
};
-static int __init palmz71_panel_drv_init(void)
-{
- return platform_driver_register(&palmz71_panel_driver);
-}
-
-static void __exit palmz71_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&palmz71_panel_driver);
-}
-
-module_init(palmz71_panel_drv_init);
-module_exit(palmz71_panel_drv_cleanup);
+module_platform_driver(palmz71_panel_driver);
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 25d8e5103193..b291bfaac80e 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -47,9 +47,9 @@ static unsigned int def_rotate;
static unsigned int def_mirror;
#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
-static int manual_update = 1;
+static bool manual_update = 1;
#else
-static int manual_update;
+static bool manual_update;
#endif
static struct platform_device *fbdev_pdev;
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index 0c6981f1a4a3..2c1a3402bef0 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -2,7 +2,7 @@
* OMAP2 Remote Frame Buffer Interface support
*
* Copyright (C) 2005 Nokia Corporation
- * Author: Juha Yrjl <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
* Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 8fb7c708f563..f79c137753d7 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -2,7 +2,7 @@
* OMAP1 Special OptimiSed Screen Interface support
*
* Copyright (C) 2004-2005 Nokia Corporation
- * Author: Juha Yrjl <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 8d8e1fe1901c..74d29b552901 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -41,7 +41,7 @@ config PANEL_NEC_NL8048HL11_01B
config PANEL_PICODLP
tristate "TI PICO DLP mini-projector"
- depends on OMAP2_DSS && I2C
+ depends on OMAP2_DSS_DPI && I2C
help
A mini-projector used in TI's SDP4430 and EVM boards
For more info please visit http://www.dlp.com/projector/
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index dbd59b8e5b36..51a87e149e24 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -803,7 +803,6 @@ static int acx565akm_spi_remove(struct spi_device *spi)
static struct spi_driver acx565akm_spi_driver = {
.driver = {
.name = "acx565akm",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = acx565akm_spi_probe,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 519c47d2057f..28b9a6d61b0f 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -297,6 +297,72 @@ static struct panel_config generic_dpi_panels[] = {
.name = "apollon",
},
+ /* FocalTech ETM070003DH6 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 28000,
+
+ .hsw = 48,
+ .hfp = 40,
+ .hbp = 40,
+
+ .vsw = 3,
+ .vfp = 13,
+ .vbp = 29,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .name = "focaltech_etm070003dh6",
+ },
+
+ /* Microtips Technologies - UMSH-8173MD */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 34560,
+
+ .hsw = 13,
+ .hfp = 101,
+ .hbp = 101,
+
+ .vsw = 23,
+ .vfp = 1,
+ .vbp = 1,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "microtips_umsh_8173md",
+ },
+
+ /* OrtusTech COM43H4M10XTC */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 8000,
+
+ .hsw = 41,
+ .hfp = 8,
+ .hbp = 4,
+
+ .vsw = 10,
+ .vfp = 4,
+ .vbp = 2,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+
+ .name = "ortustech_com43h4m10xtc",
+ },
};
struct panel_drv_data {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index 150e8bae35a1..dc9408dc93d1 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -708,7 +708,6 @@ static int mipid_spi_remove(struct spi_device *spi)
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = "lcd_mipid",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 2ba9d0ca187c..0eb31caddca8 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -163,50 +163,93 @@ static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
kfree(necd);
}
-static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
{
- int r = 0;
+ int r;
struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
struct backlight_device *bl = necd->bl;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
if (dssdev->platform_enable) {
r = dssdev->platform_enable(dssdev);
if (r)
- return r;
+ goto err1;
}
r = nec_8048_bl_update_status(bl);
if (r < 0)
dev_err(&dssdev->dev, "failed to set lcd brightness\n");
- r = omapdss_dpi_display_enable(dssdev);
-
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
return r;
}
-static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+static void nec_8048_panel_power_off(struct omap_dss_device *dssdev)
{
struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
struct backlight_device *bl = necd->bl;
- omapdss_dpi_display_disable(dssdev);
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
bl->props.brightness = 0;
nec_8048_bl_update_status(bl);
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = nec_8048_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+{
+ nec_8048_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
{
- nec_8048_panel_disable(dssdev);
+ nec_8048_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
return 0;
}
static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
{
- return nec_8048_panel_enable(dssdev);
+ int r;
+
+ r = nec_8048_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
}
static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
@@ -303,7 +346,6 @@ static struct spi_driver nec_8048_spi_driver = {
.resume = nec_8048_spi_resume,
.driver = {
.name = "nec_8048_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 80c3f6ab1a94..00c5c615585f 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -198,12 +198,6 @@ struct taal_data {
bool te_enabled;
atomic_t do_update;
- struct {
- u16 x;
- u16 y;
- u16 w;
- u16 h;
- } update_region;
int channel;
struct delayed_work te_timeout_work;
@@ -1188,6 +1182,10 @@ static int taal_power_on(struct omap_dss_device *dssdev)
if (r)
goto err;
+ r = dsi_enable_video_output(dssdev, td->channel);
+ if (r)
+ goto err;
+
td->enabled = 1;
if (!td->intro_printed) {
@@ -1217,6 +1215,8 @@ static void taal_power_off(struct omap_dss_device *dssdev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ dsi_disable_video_output(dssdev, td->channel);
+
r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_OFF);
if (!r)
r = taal_sleep_in(td);
@@ -1394,12 +1394,8 @@ static irqreturn_t taal_te_isr(int irq, void *data)
if (old) {
cancel_delayed_work(&td->te_timeout_work);
- r = omap_dsi_update(dssdev, td->channel,
- td->update_region.x,
- td->update_region.y,
- td->update_region.w,
- td->update_region.h,
- taal_framedone_cb, dssdev);
+ r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb,
+ dssdev);
if (r)
goto err;
}
@@ -1444,26 +1440,20 @@ static int taal_update(struct omap_dss_device *dssdev,
goto err;
}
- r = omap_dsi_prepare_update(dssdev, &x, &y, &w, &h, true);
- if (r)
- goto err;
-
- r = taal_set_update_window(td, x, y, w, h);
+ /* XXX no need to send this every frame, but dsi break if not done */
+ r = taal_set_update_window(td, 0, 0,
+ td->panel_config->timings.x_res,
+ td->panel_config->timings.y_res);
if (r)
goto err;
if (td->te_enabled && panel_data->use_ext_te) {
- td->update_region.x = x;
- td->update_region.y = y;
- td->update_region.w = w;
- td->update_region.h = h;
- barrier();
schedule_delayed_work(&td->te_timeout_work,
msecs_to_jiffies(250));
atomic_set(&td->do_update, 1);
} else {
- r = omap_dsi_update(dssdev, td->channel, x, y, w, h,
- taal_framedone_cb, dssdev);
+ r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb,
+ dssdev);
if (r)
goto err;
}
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 2462b9ec6662..e6649aa89591 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -512,7 +512,6 @@ static int __devexit tpo_td043_spi_remove(struct spi_device *spi)
static struct spi_driver tpo_td043_spi_driver = {
.driver = {
.name = "tpo_td043mtea1_panel_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = tpo_td043_spi_probe,
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index bd34ac5b2026..5c450b0f94d0 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dss_features.o dispc.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
+ manager.o overlay.o apply.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
new file mode 100644
index 000000000000..052dc874cd3d
--- /dev/null
+++ b/drivers/video/omap2/dss/apply.c
@@ -0,0 +1,1324 @@
+/*
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "APPLY"
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "dss_features.h"
+
+/*
+ * We have 4 levels of cache for the dispc settings. First two are in SW and
+ * the latter two in HW.
+ *
+ * set_info()
+ * v
+ * +--------------------+
+ * | user_info |
+ * +--------------------+
+ * v
+ * apply()
+ * v
+ * +--------------------+
+ * | info |
+ * +--------------------+
+ * v
+ * write_regs()
+ * v
+ * +--------------------+
+ * | shadow registers |
+ * +--------------------+
+ * v
+ * VFP or lcd/digit_enable
+ * v
+ * +--------------------+
+ * | registers |
+ * +--------------------+
+ */
+
+struct ovl_priv_data {
+
+ bool user_info_dirty;
+ struct omap_overlay_info user_info;
+
+ bool info_dirty;
+ struct omap_overlay_info info;
+
+ bool shadow_info_dirty;
+
+ bool extra_info_dirty;
+ bool shadow_extra_info_dirty;
+
+ bool enabled;
+ enum omap_channel channel;
+ u32 fifo_low, fifo_high;
+
+ /*
+ * True if overlay is to be enabled. Used to check and calculate configs
+ * for the overlay before it is enabled in the HW.
+ */
+ bool enabling;
+};
+
+struct mgr_priv_data {
+
+ bool user_info_dirty;
+ struct omap_overlay_manager_info user_info;
+
+ bool info_dirty;
+ struct omap_overlay_manager_info info;
+
+ bool shadow_info_dirty;
+
+ /* If true, GO bit is up and shadow registers cannot be written.
+ * Never true for manual update displays */
+ bool busy;
+
+ /* If true, dispc output is enabled */
+ bool updating;
+
+ /* If true, a display is enabled using this manager */
+ bool enabled;
+};
+
+static struct {
+ struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
+ struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
+
+ bool irq_enabled;
+} dss_data;
+
+/* protects dss_data */
+static spinlock_t data_lock;
+/* lock for blocking functions */
+static DEFINE_MUTEX(apply_lock);
+static DECLARE_COMPLETION(extra_updated_completion);
+
+static void dss_register_vsync_isr(void);
+
+static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
+{
+ return &dss_data.ovl_priv_data_array[ovl->id];
+}
+
+static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
+{
+ return &dss_data.mgr_priv_data_array[mgr->id];
+}
+
+void dss_apply_init(void)
+{
+ const int num_ovls = dss_feat_get_num_ovls();
+ int i;
+
+ spin_lock_init(&data_lock);
+
+ for (i = 0; i < num_ovls; ++i) {
+ struct ovl_priv_data *op;
+
+ op = &dss_data.ovl_priv_data_array[i];
+
+ op->info.global_alpha = 255;
+
+ switch (i) {
+ case 0:
+ op->info.zorder = 0;
+ break;
+ case 1:
+ op->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
+ break;
+ case 2:
+ op->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
+ break;
+ case 3:
+ op->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
+ break;
+ }
+
+ op->user_info = op->info;
+ }
+}
+
+static bool ovl_manual_update(struct omap_overlay *ovl)
+{
+ return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+}
+
+static bool mgr_manual_update(struct omap_overlay_manager *mgr)
+{
+ return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+}
+
+static int dss_check_settings_low(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev, bool applying)
+{
+ struct omap_overlay_info *oi;
+ struct omap_overlay_manager_info *mi;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
+ struct ovl_priv_data *op;
+ struct mgr_priv_data *mp;
+
+ mp = get_mgr_priv(mgr);
+
+ if (applying && mp->user_info_dirty)
+ mi = &mp->user_info;
+ else
+ mi = &mp->info;
+
+ /* collect the infos to be tested into the array */
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ op = get_ovl_priv(ovl);
+
+ if (!op->enabled && !op->enabling)
+ oi = NULL;
+ else if (applying && op->user_info_dirty)
+ oi = &op->user_info;
+ else
+ oi = &op->info;
+
+ ois[ovl->id] = oi;
+ }
+
+ return dss_mgr_check(mgr, dssdev, mi, ois);
+}
+
+/*
+ * check manager and overlay settings using overlay_info from data->info
+ */
+static int dss_check_settings(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ return dss_check_settings_low(mgr, dssdev, false);
+}
+
+/*
+ * check manager and overlay settings using overlay_info from ovl->info if
+ * dirty and from data->info otherwise
+ */
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ return dss_check_settings_low(mgr, dssdev, true);
+}
+
+static bool need_isr(void)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+ struct omap_overlay *ovl;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled)
+ continue;
+
+ if (mgr_manual_update(mgr)) {
+ /* to catch FRAMEDONE */
+ if (mp->updating)
+ return true;
+ } else {
+ /* to catch GO bit going down */
+ if (mp->busy)
+ return true;
+
+ /* to write new values to registers */
+ if (mp->info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (mp->shadow_info_dirty)
+ return true;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ struct ovl_priv_data *op;
+
+ op = get_ovl_priv(ovl);
+
+ /*
+ * NOTE: we check extra_info flags even for
+ * disabled overlays, as extra_infos need to be
+ * always written.
+ */
+
+ /* to write new values to registers */
+ if (op->extra_info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (op->shadow_extra_info_dirty)
+ return true;
+
+ if (!op->enabled)
+ continue;
+
+ /* to write new values to registers */
+ if (op->info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (op->shadow_info_dirty)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool need_go(struct omap_overlay_manager *mgr)
+{
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+ struct ovl_priv_data *op;
+
+ mp = get_mgr_priv(mgr);
+
+ if (mp->shadow_info_dirty)
+ return true;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ op = get_ovl_priv(ovl);
+ if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
+ return true;
+ }
+
+ return false;
+}
+
+/* returns true if an extra_info field is currently being updated */
+static bool extra_info_update_ongoing(void)
+{
+ const int num_ovls = omap_dss_get_num_overlays();
+ struct ovl_priv_data *op;
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+ int i;
+
+ for (i = 0; i < num_ovls; ++i) {
+ ovl = omap_dss_get_overlay(i);
+ op = get_ovl_priv(ovl);
+
+ if (!ovl->manager)
+ continue;
+
+ mp = get_mgr_priv(ovl->manager);
+
+ if (!mp->enabled)
+ continue;
+
+ if (!mp->updating)
+ continue;
+
+ if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+ return true;
+ }
+
+ return false;
+}
+
+/* wait until no extra_info updates are pending */
+static void wait_pending_extra_info_updates(void)
+{
+ bool updating;
+ unsigned long flags;
+ unsigned long t;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ updating = extra_info_update_ongoing();
+
+ if (!updating) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ return;
+ }
+
+ init_completion(&extra_updated_completion);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ t = msecs_to_jiffies(500);
+ wait_for_completion_timeout(&extra_updated_completion, t);
+
+ updating = extra_info_update_ongoing();
+
+ WARN_ON(updating);
+}
+
+int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct mgr_priv_data *mp;
+ u32 irq;
+ int r;
+ int i;
+ struct omap_dss_device *dssdev = mgr->device;
+
+ if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ if (mgr_manual_update(mgr))
+ return 0;
+
+ irq = dispc_mgr_get_vsync_irq(mgr->id);
+
+ mp = get_mgr_priv(mgr);
+ i = 0;
+ while (1) {
+ unsigned long flags;
+ bool shadow_dirty, dirty;
+
+ spin_lock_irqsave(&data_lock, flags);
+ dirty = mp->info_dirty;
+ shadow_dirty = mp->shadow_info_dirty;
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (!dirty && !shadow_dirty) {
+ r = 0;
+ break;
+ }
+
+ /* 4 iterations is the worst case:
+ * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+ * 2 - first VSYNC, dirty = true
+ * 3 - dirty = false, shadow_dirty = true
+ * 4 - shadow_dirty = false */
+ if (i++ == 3) {
+ DSSERR("mgr(%d)->wait_for_go() not finishing\n",
+ mgr->id);
+ r = 0;
+ break;
+ }
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (r == -ERESTARTSYS)
+ break;
+
+ if (r) {
+ DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
+ break;
+ }
+ }
+
+ return r;
+}
+
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct ovl_priv_data *op;
+ struct omap_dss_device *dssdev;
+ u32 irq;
+ int r;
+ int i;
+
+ if (!ovl->manager)
+ return 0;
+
+ dssdev = ovl->manager->device;
+
+ if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ if (ovl_manual_update(ovl))
+ return 0;
+
+ irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
+
+ op = get_ovl_priv(ovl);
+ i = 0;
+ while (1) {
+ unsigned long flags;
+ bool shadow_dirty, dirty;
+
+ spin_lock_irqsave(&data_lock, flags);
+ dirty = op->info_dirty;
+ shadow_dirty = op->shadow_info_dirty;
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (!dirty && !shadow_dirty) {
+ r = 0;
+ break;
+ }
+
+ /* 4 iterations is the worst case:
+ * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+ * 2 - first VSYNC, dirty = true
+ * 3 - dirty = false, shadow_dirty = true
+ * 4 - shadow_dirty = false */
+ if (i++ == 3) {
+ DSSERR("ovl(%d)->wait_for_go() not finishing\n",
+ ovl->id);
+ r = 0;
+ break;
+ }
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (r == -ERESTARTSYS)
+ break;
+
+ if (r) {
+ DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
+ break;
+ }
+ }
+
+ return r;
+}
+
+static void dss_ovl_write_regs(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ struct omap_overlay_info *oi;
+ bool ilace, replication;
+ struct mgr_priv_data *mp;
+ int r;
+
+ DSSDBGF("%d", ovl->id);
+
+ if (!op->enabled || !op->info_dirty)
+ return;
+
+ oi = &op->info;
+
+ replication = dss_use_replication(ovl->manager->device, oi->color_mode);
+
+ ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
+
+ r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+ if (r) {
+ /*
+ * We can't do much here, as this function can be called from
+ * vsync interrupt.
+ */
+ DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
+
+ /* This will leave fifo configurations in a nonoptimal state */
+ op->enabled = false;
+ dispc_ovl_enable(ovl->id, false);
+ return;
+ }
+
+ mp = get_mgr_priv(ovl->manager);
+
+ op->info_dirty = false;
+ if (mp->updating)
+ op->shadow_info_dirty = true;
+}
+
+static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ struct mgr_priv_data *mp;
+
+ DSSDBGF("%d", ovl->id);
+
+ if (!op->extra_info_dirty)
+ return;
+
+ /* note: write also when op->enabled == false, so that the ovl gets
+ * disabled */
+
+ dispc_ovl_enable(ovl->id, op->enabled);
+ dispc_ovl_set_channel_out(ovl->id, op->channel);
+ dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
+
+ mp = get_mgr_priv(ovl->manager);
+
+ op->extra_info_dirty = false;
+ if (mp->updating)
+ op->shadow_extra_info_dirty = true;
+}
+
+static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ struct omap_overlay *ovl;
+
+ DSSDBGF("%d", mgr->id);
+
+ if (!mp->enabled)
+ return;
+
+ WARN_ON(mp->busy);
+
+ /* Commit overlay settings */
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ dss_ovl_write_regs(ovl);
+ dss_ovl_write_regs_extra(ovl);
+ }
+
+ if (mp->info_dirty) {
+ dispc_mgr_setup(mgr->id, &mp->info);
+
+ mp->info_dirty = false;
+ if (mp->updating)
+ mp->shadow_info_dirty = true;
+ }
+}
+
+static void dss_write_regs(void)
+{
+ const int num_mgrs = omap_dss_get_num_overlay_managers();
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+ int r;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
+ continue;
+
+ r = dss_check_settings(mgr, mgr->device);
+ if (r) {
+ DSSERR("cannot write registers for manager %s: "
+ "illegal configuration\n", mgr->name);
+ continue;
+ }
+
+ dss_mgr_write_regs(mgr);
+ }
+}
+
+static void dss_set_go_bits(void)
+{
+ const int num_mgrs = omap_dss_get_num_overlay_managers();
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
+ continue;
+
+ if (!need_go(mgr))
+ continue;
+
+ mp->busy = true;
+
+ if (!dss_data.irq_enabled && need_isr())
+ dss_register_vsync_isr();
+
+ dispc_mgr_go(mgr->id);
+ }
+
+}
+
+void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ WARN_ON(mp->updating);
+
+ r = dss_check_settings(mgr, mgr->device);
+ if (r) {
+ DSSERR("cannot start manual update: illegal configuration\n");
+ spin_unlock_irqrestore(&data_lock, flags);
+ return;
+ }
+
+ dss_mgr_write_regs(mgr);
+
+ mp->updating = true;
+
+ if (!dss_data.irq_enabled && need_isr())
+ dss_register_vsync_isr();
+
+ dispc_mgr_enable(mgr->id, true);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+}
+
+static void dss_apply_irq_handler(void *data, u32 mask);
+
+static void dss_register_vsync_isr(void)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ u32 mask;
+ int r, i;
+
+ mask = 0;
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_vsync_irq(i);
+
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_framedone_irq(i);
+
+ r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
+ WARN_ON(r);
+
+ dss_data.irq_enabled = true;
+}
+
+static void dss_unregister_vsync_isr(void)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ u32 mask;
+ int r, i;
+
+ mask = 0;
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_vsync_irq(i);
+
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_framedone_irq(i);
+
+ r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
+ WARN_ON(r);
+
+ dss_data.irq_enabled = false;
+}
+
+static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
+{
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+ struct ovl_priv_data *op;
+
+ mp = get_mgr_priv(mgr);
+ mp->shadow_info_dirty = false;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ op = get_ovl_priv(ovl);
+ op->shadow_info_dirty = false;
+ op->shadow_extra_info_dirty = false;
+ }
+}
+
+static void dss_apply_irq_handler(void *data, u32 mask)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ int i;
+ bool extra_updating;
+
+ spin_lock(&data_lock);
+
+ /* clear busy, updating flags, shadow_dirty flags */
+ for (i = 0; i < num_mgrs; i++) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+ bool was_updating;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled)
+ continue;
+
+ was_updating = mp->updating;
+ mp->updating = dispc_mgr_is_enabled(i);
+
+ if (!mgr_manual_update(mgr)) {
+ bool was_busy = mp->busy;
+ mp->busy = dispc_mgr_go_busy(i);
+
+ if (was_busy && !mp->busy)
+ mgr_clear_shadow_dirty(mgr);
+ } else {
+ if (was_updating && !mp->updating)
+ mgr_clear_shadow_dirty(mgr);
+ }
+ }
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ extra_updating = extra_info_update_ongoing();
+ if (!extra_updating)
+ complete_all(&extra_updated_completion);
+
+ if (!need_isr())
+ dss_unregister_vsync_isr();
+
+ spin_unlock(&data_lock);
+}
+
+static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op;
+
+ op = get_ovl_priv(ovl);
+
+ if (!op->user_info_dirty)
+ return;
+
+ op->user_info_dirty = false;
+ op->info_dirty = true;
+ op->info = op->user_info;
+}
+
+static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp;
+
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->user_info_dirty)
+ return;
+
+ mp->user_info_dirty = false;
+ mp->info_dirty = true;
+ mp->info = mp->user_info;
+}
+
+int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+{
+ unsigned long flags;
+ struct omap_overlay *ovl;
+ int r;
+
+ DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ r = dss_check_settings_apply(mgr, mgr->device);
+ if (r) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("failed to apply settings: illegal configuration.\n");
+ return r;
+ }
+
+ /* Configure overlays */
+ list_for_each_entry(ovl, &mgr->overlays, list)
+ omap_dss_mgr_apply_ovl(ovl);
+
+ /* Configure manager */
+ omap_dss_mgr_apply_mgr(mgr);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return 0;
+}
+
+static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
+{
+ struct ovl_priv_data *op;
+
+ op = get_ovl_priv(ovl);
+
+ if (op->enabled == enable)
+ return;
+
+ op->enabled = enable;
+ op->extra_info_dirty = true;
+}
+
+static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
+ u32 fifo_low, u32 fifo_high)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+ if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
+ return;
+
+ op->fifo_low = fifo_low;
+ op->fifo_high = fifo_high;
+ op->extra_info_dirty = true;
+}
+
+static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ struct omap_dss_device *dssdev;
+ u32 size, burst_size;
+ u32 fifo_low, fifo_high;
+
+ if (!op->enabled && !op->enabling)
+ return;
+
+ dssdev = ovl->manager->device;
+
+ size = dispc_ovl_get_fifo_size(ovl->id);
+
+ burst_size = dispc_ovl_get_burst_size(ovl->id);
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_HDMI:
+ default_get_overlay_fifo_thresholds(ovl->id, size,
+ burst_size, &fifo_low, &fifo_high);
+ break;
+#ifdef CONFIG_OMAP2_DSS_DSI
+ case OMAP_DISPLAY_TYPE_DSI:
+ dsi_get_overlay_fifo_thresholds(ovl->id, size,
+ burst_size, &fifo_low, &fifo_high);
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
+}
+
+static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
+{
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled)
+ return;
+
+ list_for_each_entry(ovl, &mgr->overlays, list)
+ dss_ovl_setup_fifo(ovl);
+}
+
+static void dss_setup_fifos(void)
+{
+ const int num_mgrs = omap_dss_get_num_overlay_managers();
+ struct omap_overlay_manager *mgr;
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ mgr = omap_dss_get_overlay_manager(i);
+ dss_mgr_setup_fifos(mgr);
+ }
+}
+
+int dss_mgr_enable(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (mp->enabled)
+ goto out;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp->enabled = true;
+
+ r = dss_check_settings(mgr, mgr->device);
+ if (r) {
+ DSSERR("failed to enable manager %d: check_settings failed\n",
+ mgr->id);
+ goto err;
+ }
+
+ dss_setup_fifos();
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ if (!mgr_manual_update(mgr))
+ mp->updating = true;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (!mgr_manual_update(mgr))
+ dispc_mgr_enable(mgr->id, true);
+
+out:
+ mutex_unlock(&apply_lock);
+
+ return 0;
+
+err:
+ mp->enabled = false;
+ spin_unlock_irqrestore(&data_lock, flags);
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+void dss_mgr_disable(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+
+ mutex_lock(&apply_lock);
+
+ if (!mp->enabled)
+ goto out;
+
+ if (!mgr_manual_update(mgr))
+ dispc_mgr_enable(mgr->id, false);
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp->updating = false;
+ mp->enabled = false;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+out:
+ mutex_unlock(&apply_lock);
+}
+
+int dss_mgr_set_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+ int r;
+
+ r = dss_mgr_simple_check(mgr, info);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp->user_info = *info;
+ mp->user_info_dirty = true;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return 0;
+}
+
+void dss_mgr_get_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ *info = mp->user_info;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+}
+
+int dss_mgr_set_device(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (dssdev->manager) {
+ DSSERR("display '%s' already has a manager '%s'\n",
+ dssdev->name, dssdev->manager->name);
+ r = -EINVAL;
+ goto err;
+ }
+
+ if ((mgr->supported_displays & dssdev->type) == 0) {
+ DSSERR("display '%s' does not support manager '%s'\n",
+ dssdev->name, mgr->name);
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->manager = mgr;
+ mgr->device = dssdev;
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
+{
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (!mgr->device) {
+ DSSERR("failed to unset display, display not set.\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Don't allow currently enabled displays to have the overlay manager
+ * pulled out from underneath them
+ */
+ if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ mgr->device->manager = NULL;
+ mgr->device = NULL;
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+
+int dss_ovl_set_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ r = dss_ovl_simple_check(ovl, info);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ op->user_info = *info;
+ op->user_info_dirty = true;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return 0;
+}
+
+void dss_ovl_get_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ *info = op->user_info;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+}
+
+int dss_ovl_set_manager(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ if (!mgr)
+ return -EINVAL;
+
+ mutex_lock(&apply_lock);
+
+ if (ovl->manager) {
+ DSSERR("overlay '%s' already has a manager '%s'\n",
+ ovl->name, ovl->manager->name);
+ r = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ if (op->enabled) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("overlay has to be disabled to change the manager\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ op->channel = mgr->id;
+ op->extra_info_dirty = true;
+
+ ovl->manager = mgr;
+ list_add_tail(&ovl->list, &mgr->overlays);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ /* XXX: When there is an overlay on a DSI manual update display, and
+ * the overlay is first disabled, then moved to tv, and enabled, we
+ * seem to get SYNC_LOST_DIGIT error.
+ *
+ * Waiting doesn't seem to help, but updating the manual update display
+ * after disabling the overlay seems to fix this. This hints that the
+ * overlay is perhaps somehow tied to the LCD output until the output
+ * is updated.
+ *
+ * Userspace workaround for this is to update the LCD after disabling
+ * the overlay, but before moving the overlay to TV.
+ */
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+int dss_ovl_unset_manager(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (!ovl->manager) {
+ DSSERR("failed to detach overlay: manager not set\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ if (op->enabled) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("overlay has to be disabled to unset the manager\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ op->channel = -1;
+
+ ovl->manager = NULL;
+ list_del(&ovl->list);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+bool dss_ovl_is_enabled(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ bool e;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ e = op->enabled;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return e;
+}
+
+int dss_ovl_enable(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (op->enabled) {
+ r = 0;
+ goto err1;
+ }
+
+ if (ovl->manager == NULL || ovl->manager->device == NULL) {
+ r = -EINVAL;
+ goto err1;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ op->enabling = true;
+
+ r = dss_check_settings(ovl->manager, ovl->manager->device);
+ if (r) {
+ DSSERR("failed to enable overlay %d: check_settings failed\n",
+ ovl->id);
+ goto err2;
+ }
+
+ dss_setup_fifos();
+
+ op->enabling = false;
+ dss_apply_ovl_enable(ovl, true);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err2:
+ op->enabling = false;
+ spin_unlock_irqrestore(&data_lock, flags);
+err1:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+int dss_ovl_disable(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (!op->enabled) {
+ r = 0;
+ goto err;
+ }
+
+ if (ovl->manager == NULL || ovl->manager->device == NULL) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ dss_apply_ovl_enable(ovl, false);
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 86ec12e16c7c..8613f86fb56d 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -50,7 +50,7 @@ module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp, "default display name");
#ifdef DEBUG
-unsigned int dss_debug;
+bool dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
@@ -178,6 +178,8 @@ static int omap_dss_probe(struct platform_device *pdev)
dss_features_init();
+ dss_apply_init();
+
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 5c81533eacaa..a5ec7f37c185 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -64,22 +64,6 @@ struct omap_dispc_isr_data {
u32 mask;
};
-struct dispc_h_coef {
- s8 hc4;
- s8 hc3;
- u8 hc2;
- s8 hc1;
- s8 hc0;
-};
-
-struct dispc_v_coef {
- s8 vc22;
- s8 vc2;
- u8 vc1;
- s8 vc0;
- s8 vc00;
-};
-
enum omap_burst_size {
BURST_SIZE_X2 = 0,
BURST_SIZE_X4 = 1,
@@ -438,6 +422,34 @@ static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
return mgr ? mgr->device : NULL;
}
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
+{
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return DISPC_IRQ_VSYNC;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return DISPC_IRQ_VSYNC2;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+ default:
+ BUG();
+ }
+}
+
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
+{
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return DISPC_IRQ_FRAMEDONE;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return DISPC_IRQ_FRAMEDONE2;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ return 0;
+ default:
+ BUG();
+ }
+}
+
bool dispc_mgr_go_busy(enum omap_channel channel)
{
int bit;
@@ -533,105 +545,27 @@ static void dispc_ovl_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
-static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
- int vscaleup, int five_taps,
- enum omap_color_component color_comp)
-{
- /* Coefficients for horizontal up-sampling */
- static const struct dispc_h_coef coef_hup[8] = {
- { 0, 0, 128, 0, 0 },
- { -1, 13, 124, -8, 0 },
- { -2, 30, 112, -11, -1 },
- { -5, 51, 95, -11, -2 },
- { 0, -9, 73, 73, -9 },
- { -2, -11, 95, 51, -5 },
- { -1, -11, 112, 30, -2 },
- { 0, -8, 124, 13, -1 },
- };
-
- /* Coefficients for vertical up-sampling */
- static const struct dispc_v_coef coef_vup_3tap[8] = {
- { 0, 0, 128, 0, 0 },
- { 0, 3, 123, 2, 0 },
- { 0, 12, 111, 5, 0 },
- { 0, 32, 89, 7, 0 },
- { 0, 0, 64, 64, 0 },
- { 0, 7, 89, 32, 0 },
- { 0, 5, 111, 12, 0 },
- { 0, 2, 123, 3, 0 },
- };
-
- static const struct dispc_v_coef coef_vup_5tap[8] = {
- { 0, 0, 128, 0, 0 },
- { -1, 13, 124, -8, 0 },
- { -2, 30, 112, -11, -1 },
- { -5, 51, 95, -11, -2 },
- { 0, -9, 73, 73, -9 },
- { -2, -11, 95, 51, -5 },
- { -1, -11, 112, 30, -2 },
- { 0, -8, 124, 13, -1 },
- };
-
- /* Coefficients for horizontal down-sampling */
- static const struct dispc_h_coef coef_hdown[8] = {
- { 0, 36, 56, 36, 0 },
- { 4, 40, 55, 31, -2 },
- { 8, 44, 54, 27, -5 },
- { 12, 48, 53, 22, -7 },
- { -9, 17, 52, 51, 17 },
- { -7, 22, 53, 48, 12 },
- { -5, 27, 54, 44, 8 },
- { -2, 31, 55, 40, 4 },
- };
-
- /* Coefficients for vertical down-sampling */
- static const struct dispc_v_coef coef_vdown_3tap[8] = {
- { 0, 36, 56, 36, 0 },
- { 0, 40, 57, 31, 0 },
- { 0, 45, 56, 27, 0 },
- { 0, 50, 55, 23, 0 },
- { 0, 18, 55, 55, 0 },
- { 0, 23, 55, 50, 0 },
- { 0, 27, 56, 45, 0 },
- { 0, 31, 57, 40, 0 },
- };
-
- static const struct dispc_v_coef coef_vdown_5tap[8] = {
- { 0, 36, 56, 36, 0 },
- { 4, 40, 55, 31, -2 },
- { 8, 44, 54, 27, -5 },
- { 12, 48, 53, 22, -7 },
- { -9, 17, 52, 51, 17 },
- { -7, 22, 53, 48, 12 },
- { -5, 27, 54, 44, 8 },
- { -2, 31, 55, 40, 4 },
- };
-
- const struct dispc_h_coef *h_coef;
- const struct dispc_v_coef *v_coef;
+static void dispc_ovl_set_scale_coef(enum omap_plane plane, int fir_hinc,
+ int fir_vinc, int five_taps,
+ enum omap_color_component color_comp)
+{
+ const struct dispc_coef *h_coef, *v_coef;
int i;
- if (hscaleup)
- h_coef = coef_hup;
- else
- h_coef = coef_hdown;
-
- if (vscaleup)
- v_coef = five_taps ? coef_vup_5tap : coef_vup_3tap;
- else
- v_coef = five_taps ? coef_vdown_5tap : coef_vdown_3tap;
+ h_coef = dispc_ovl_get_scale_coef(fir_hinc, true);
+ v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps);
for (i = 0; i < 8; i++) {
u32 h, hv;
- h = FLD_VAL(h_coef[i].hc0, 7, 0)
- | FLD_VAL(h_coef[i].hc1, 15, 8)
- | FLD_VAL(h_coef[i].hc2, 23, 16)
- | FLD_VAL(h_coef[i].hc3, 31, 24);
- hv = FLD_VAL(h_coef[i].hc4, 7, 0)
- | FLD_VAL(v_coef[i].vc0, 15, 8)
- | FLD_VAL(v_coef[i].vc1, 23, 16)
- | FLD_VAL(v_coef[i].vc2, 31, 24);
+ h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0)
+ | FLD_VAL(h_coef[i].hc1_vc0, 15, 8)
+ | FLD_VAL(h_coef[i].hc2_vc1, 23, 16)
+ | FLD_VAL(h_coef[i].hc3_vc2, 31, 24);
+ hv = FLD_VAL(h_coef[i].hc4_vc22, 7, 0)
+ | FLD_VAL(v_coef[i].hc1_vc0, 15, 8)
+ | FLD_VAL(v_coef[i].hc2_vc1, 23, 16)
+ | FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
dispc_ovl_write_firh_reg(plane, i, h);
@@ -646,8 +580,8 @@ static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
if (five_taps) {
for (i = 0; i < 8; i++) {
u32 v;
- v = FLD_VAL(v_coef[i].vc00, 7, 0)
- | FLD_VAL(v_coef[i].vc22, 15, 8);
+ v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
+ | FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
dispc_ovl_write_firv_reg(plane, i, v);
else
@@ -875,8 +809,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static void dispc_ovl_set_channel_out(enum omap_plane plane,
- enum omap_channel channel)
+void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
{
int shift;
u32 val;
@@ -923,6 +856,39 @@ static void dispc_ovl_set_channel_out(enum omap_plane plane,
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
+static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
+{
+ int shift;
+ u32 val;
+ enum omap_channel channel;
+
+ switch (plane) {
+ case OMAP_DSS_GFX:
+ shift = 8;
+ break;
+ case OMAP_DSS_VIDEO1:
+ case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
+ shift = 16;
+ break;
+ default:
+ BUG();
+ }
+
+ val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (FLD_GET(val, 31, 30) == 0)
+ channel = FLD_GET(val, shift, shift);
+ else
+ channel = OMAP_DSS_CHANNEL_LCD2;
+ } else {
+ channel = FLD_GET(val, shift, shift);
+ }
+
+ return channel;
+}
+
static void dispc_ovl_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
@@ -964,7 +930,7 @@ void dispc_enable_gamma_table(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
-void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
u16 reg;
@@ -978,7 +944,7 @@ void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
REG_FLD_MOD(reg, enable, 15, 15);
}
-void dispc_mgr_set_cpr_coef(enum omap_channel channel,
+static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
@@ -1057,8 +1023,7 @@ u32 dispc_ovl_get_fifo_size(enum omap_plane plane)
return dispc.fifo_size[plane];
}
-static void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low,
- u32 high)
+void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
@@ -1169,17 +1134,12 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
enum omap_color_component color_comp)
{
int fir_hinc, fir_vinc;
- int hscaleup, vscaleup;
-
- hscaleup = orig_width <= out_width;
- vscaleup = orig_height <= out_height;
-
- dispc_ovl_set_scale_coef(plane, hscaleup, vscaleup, five_taps,
- color_comp);
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
+ dispc_ovl_set_scale_coef(plane, fir_hinc, fir_vinc, five_taps,
+ color_comp);
dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
}
@@ -1654,6 +1614,9 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
u32 fclk = 0;
u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
+ if (height <= out_height && width <= out_width)
+ return (unsigned long) pclk;
+
if (height > out_height) {
struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
unsigned int ppl = dssdev->panel.timings.x_res;
@@ -1708,7 +1671,16 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
else
vf = 1;
- return dispc_mgr_pclk_rate(channel) * vf * hf;
+ if (cpu_is_omap24xx()) {
+ if (vf > 1 && hf > 1)
+ return dispc_mgr_pclk_rate(channel) * 4;
+ else
+ return dispc_mgr_pclk_rate(channel) * 2;
+ } else if (cpu_is_omap34xx()) {
+ return dispc_mgr_pclk_rate(channel) * vf * hf;
+ } else {
+ return dispc_mgr_pclk_rate(channel) * hf;
+ }
}
static int dispc_ovl_calc_scaling(enum omap_plane plane,
@@ -1718,6 +1690,8 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ const int maxsinglelinewidth =
+ dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
unsigned long fclk = 0;
if (width == out_width && height == out_height)
@@ -1734,28 +1708,40 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
out_height > height * 8)
return -EINVAL;
- /* Must use 5-tap filter? */
- *five_taps = height > out_height * 2;
-
- if (!*five_taps) {
+ if (cpu_is_omap24xx()) {
+ if (width > maxsinglelinewidth)
+ DSSERR("Cannot scale max input width exceeded");
+ *five_taps = false;
+ fclk = calc_fclk(channel, width, height, out_width,
+ out_height);
+ } else if (cpu_is_omap34xx()) {
+ if (width > (maxsinglelinewidth * 2)) {
+ DSSERR("Cannot setup scaling");
+ DSSERR("width exceeds maximum width possible");
+ return -EINVAL;
+ }
+ fclk = calc_fclk_five_taps(channel, width, height, out_width,
+ out_height, color_mode);
+ if (width > maxsinglelinewidth) {
+ if (height > out_height && height < out_height * 2)
+ *five_taps = false;
+ else {
+ DSSERR("cannot setup scaling with five taps");
+ return -EINVAL;
+ }
+ }
+ if (!*five_taps)
+ fclk = calc_fclk(channel, width, height, out_width,
+ out_height);
+ } else {
+ if (width > maxsinglelinewidth) {
+ DSSERR("Cannot scale width exceeds max line width");
+ return -EINVAL;
+ }
fclk = calc_fclk(channel, width, height, out_width,
out_height);
-
- /* Try 5-tap filter if 3-tap fclk is too high */
- if (cpu_is_omap34xx() && height > out_height &&
- fclk > dispc_fclk_rate())
- *five_taps = true;
- }
-
- if (width > (2048 >> *five_taps)) {
- DSSERR("failed to set up scaling, fclk too low\n");
- return -EINVAL;
}
- if (*five_taps)
- fclk = calc_fclk_five_taps(channel, width, height,
- out_width, out_height, color_mode);
-
DSSDBG("required fclk rate = %lu Hz\n", fclk);
DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
@@ -1771,11 +1757,10 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, enum omap_channel channel, bool replication,
- u32 fifo_low, u32 fifo_high)
+ bool ilace, bool replication)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
- bool five_taps = false;
+ bool five_taps = true;
bool fieldmode = 0;
int r, cconv = 0;
unsigned offset0, offset1;
@@ -1783,36 +1768,43 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
s32 pix_inc;
u16 frame_height = oi->height;
unsigned int field_offset = 0;
+ u16 outw, outh;
+ enum omap_channel channel;
+
+ channel = dispc_ovl_get_channel_out(plane);
DSSDBG("dispc_ovl_setup %d, pa %x, pa_uv %x, sw %d, %d,%d, %dx%d -> "
- "%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d "
- "fifo_low %d fifo high %d\n", plane, oi->paddr, oi->p_uv_addr,
+ "%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d\n",
+ plane, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
- oi->mirror, ilace, channel, replication, fifo_low, fifo_high);
+ oi->mirror, ilace, channel, replication);
if (oi->paddr == 0)
return -EINVAL;
- if (ilace && oi->height == oi->out_height)
+ outw = oi->out_width == 0 ? oi->width : oi->out_width;
+ outh = oi->out_height == 0 ? oi->height : oi->out_height;
+
+ if (ilace && oi->height == outh)
fieldmode = 1;
if (ilace) {
if (fieldmode)
oi->height /= 2;
oi->pos_y /= 2;
- oi->out_height /= 2;
+ outh /= 2;
DSSDBG("adjusting for ilace: height %d, pos_y %d, "
"out_height %d\n",
- oi->height, oi->pos_y, oi->out_height);
+ oi->height, oi->pos_y, outh);
}
if (!dss_feat_color_mode_supported(plane, oi->color_mode))
return -EINVAL;
r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
- oi->out_width, oi->out_height, oi->color_mode,
+ outw, outh, oi->color_mode,
&five_taps);
if (r)
return r;
@@ -1830,10 +1822,10 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
* so the integer part must be added to the base address of the
* bottom field.
*/
- if (!oi->height || oi->height == oi->out_height)
+ if (!oi->height || oi->height == outh)
field_offset = 0;
else
- field_offset = oi->height / oi->out_height / 2;
+ field_offset = oi->height / outh / 2;
}
/* Fields are independent but interleaved in memory. */
@@ -1869,7 +1861,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
dispc_ovl_set_pix_inc(plane, pix_inc);
DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
- oi->height, oi->out_width, oi->out_height);
+ oi->height, outw, outh);
dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
@@ -1877,10 +1869,10 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
dispc_ovl_set_scaling(plane, oi->width, oi->height,
- oi->out_width, oi->out_height,
+ outw, outh,
ilace, five_taps, fieldmode,
oi->color_mode, oi->rotation);
- dispc_ovl_set_vid_size(plane, oi->out_width, oi->out_height);
+ dispc_ovl_set_vid_size(plane, outw, outh);
dispc_ovl_set_vid_color_conv(plane, cconv);
}
@@ -1891,10 +1883,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
dispc_ovl_set_pre_mult_alpha(plane, oi->pre_mult_alpha);
dispc_ovl_setup_global_alpha(plane, oi->global_alpha);
- dispc_ovl_set_channel_out(plane, channel);
-
dispc_ovl_enable_replication(plane, replication);
- dispc_ovl_set_fifo_threshold(plane, fifo_low, fifo_high);
return 0;
}
@@ -1916,10 +1905,14 @@ static void dispc_disable_isr(void *data, u32 mask)
static void _enable_lcd_out(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD2)
+ if (channel == OMAP_DSS_CHANNEL_LCD2) {
REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
- else
+ /* flush posted write */
+ dispc_read_reg(DISPC_CONTROL2);
+ } else {
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+ dispc_read_reg(DISPC_CONTROL);
+ }
}
static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
@@ -1967,6 +1960,8 @@ static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
static void _enable_digit_out(bool enable)
{
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
+ /* flush posted write */
+ dispc_read_reg(DISPC_CONTROL);
}
static void dispc_mgr_enable_digit_out(bool enable)
@@ -2124,25 +2119,12 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode)
}
-void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
+static void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
{
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
}
-u32 dispc_mgr_get_default_color(enum omap_channel channel)
-{
- u32 l;
-
- BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
- channel != OMAP_DSS_CHANNEL_LCD &&
- channel != OMAP_DSS_CHANNEL_LCD2);
-
- l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
-
- return l;
-}
-
-void dispc_mgr_set_trans_key(enum omap_channel ch,
+static void dispc_mgr_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
@@ -2156,26 +2138,7 @@ void dispc_mgr_set_trans_key(enum omap_channel ch,
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
}
-void dispc_mgr_get_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type *type,
- u32 *trans_key)
-{
- if (type) {
- if (ch == OMAP_DSS_CHANNEL_LCD)
- *type = REG_GET(DISPC_CONFIG, 11, 11);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- *type = REG_GET(DISPC_CONFIG, 13, 13);
- else if (ch == OMAP_DSS_CHANNEL_LCD2)
- *type = REG_GET(DISPC_CONFIG2, 11, 11);
- else
- BUG();
- }
-
- if (trans_key)
- *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
-}
-
-void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
+static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
{
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
@@ -2185,7 +2148,8 @@ void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
}
-void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable)
+static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
+ bool enable)
{
if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return;
@@ -2196,40 +2160,20 @@ void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
}
-bool dispc_mgr_alpha_fixed_zorder_enabled(enum omap_channel ch)
-{
- bool enabled;
-
- if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
- return false;
-
- if (ch == OMAP_DSS_CHANNEL_LCD)
- enabled = REG_GET(DISPC_CONFIG, 18, 18);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- enabled = REG_GET(DISPC_CONFIG, 19, 19);
- else
- BUG();
-
- return enabled;
-}
-
-bool dispc_mgr_trans_key_enabled(enum omap_channel ch)
+void dispc_mgr_setup(enum omap_channel channel,
+ struct omap_overlay_manager_info *info)
{
- bool enabled;
-
- if (ch == OMAP_DSS_CHANNEL_LCD)
- enabled = REG_GET(DISPC_CONFIG, 10, 10);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- enabled = REG_GET(DISPC_CONFIG, 12, 12);
- else if (ch == OMAP_DSS_CHANNEL_LCD2)
- enabled = REG_GET(DISPC_CONFIG2, 10, 10);
- else
- BUG();
-
- return enabled;
+ dispc_mgr_set_default_color(channel, info->default_color);
+ dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
+ dispc_mgr_enable_trans_key(channel, info->trans_enabled);
+ dispc_mgr_enable_alpha_fixed_zorder(channel,
+ info->partial_alpha_enabled);
+ if (dss_has_feature(FEAT_CPR)) {
+ dispc_mgr_enable_cpr(channel, info->cpr_enable);
+ dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
+ }
}
-
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -3184,7 +3128,8 @@ static void dispc_error_worker(struct work_struct *work)
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
struct omap_overlay_manager *mgr;
mgr = omap_dss_get_overlay_manager(i);
- mgr->device->driver->disable(mgr->device);
+ if (mgr->device && mgr->device->driver)
+ mgr->device->driver->disable(mgr->device);
}
}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index c06efc38983e..5836bd1650f9 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -97,6 +97,17 @@
#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
DISPC_PRELOAD_OFFSET(n))
+/* DISPC up/downsampling FIR filter coefficient structure */
+struct dispc_coef {
+ s8 hc4_vc22;
+ s8 hc3_vc2;
+ u8 hc2_vc1;
+ s8 hc1_vc0;
+ s8 hc0_vc00;
+};
+
+const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps);
+
/* DISPC manager/channel specific registers */
static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
{
diff --git a/drivers/video/omap2/dss/dispc_coefs.c b/drivers/video/omap2/dss/dispc_coefs.c
new file mode 100644
index 000000000000..069bccbb3f12
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc_coefs.c
@@ -0,0 +1,326 @@
+/*
+ * linux/drivers/video/omap2/dss/dispc_coefs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Chandrabhanu Mahapatra <cmahapatra@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <video/omapdss.h>
+#include "dispc.h"
+
+#define ARRAY_LEN(array) (sizeof(array) / sizeof(array[0]))
+
+static const struct dispc_coef coef3_M8[8] = {
+ { 0, 0, 128, 0, 0 },
+ { 0, -4, 123, 9, 0 },
+ { 0, -4, 108, 87, 0 },
+ { 0, -2, 87, 43, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 43, 87, -2, 0 },
+ { 0, 24, 108, -4, 0 },
+ { 0, 9, 123, -4, 0 },
+};
+
+static const struct dispc_coef coef3_M9[8] = {
+ { 0, 6, 116, 6, 0 },
+ { 0, 0, 112, 16, 0 },
+ { 0, -2, 100, 30, 0 },
+ { 0, -2, 83, 47, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 47, 83, -2, 0 },
+ { 0, 30, 100, -2, 0 },
+ { 0, 16, 112, 0, 0 },
+};
+
+static const struct dispc_coef coef3_M10[8] = {
+ { 0, 10, 108, 10, 0 },
+ { 0, 3, 104, 21, 0 },
+ { 0, 0, 94, 34, 0 },
+ { 0, -1, 80, 49, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 49, 80, -1, 0 },
+ { 0, 34, 94, 0, 0 },
+ { 0, 21, 104, 3, 0 },
+};
+
+static const struct dispc_coef coef3_M11[8] = {
+ { 0, 14, 100, 14, 0 },
+ { 0, 6, 98, 24, 0 },
+ { 0, 2, 90, 36, 0 },
+ { 0, 0, 78, 50, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 50, 78, 0, 0 },
+ { 0, 36, 90, 2, 0 },
+ { 0, 24, 98, 6, 0 },
+};
+
+static const struct dispc_coef coef3_M12[8] = {
+ { 0, 16, 96, 16, 0 },
+ { 0, 9, 93, 26, 0 },
+ { 0, 4, 86, 38, 0 },
+ { 0, 1, 76, 51, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 51, 76, 1, 0 },
+ { 0, 38, 86, 4, 0 },
+ { 0, 26, 93, 9, 0 },
+};
+
+static const struct dispc_coef coef3_M13[8] = {
+ { 0, 18, 92, 18, 0 },
+ { 0, 10, 90, 28, 0 },
+ { 0, 5, 83, 40, 0 },
+ { 0, 1, 75, 52, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 52, 75, 1, 0 },
+ { 0, 40, 83, 5, 0 },
+ { 0, 28, 90, 10, 0 },
+};
+
+static const struct dispc_coef coef3_M14[8] = {
+ { 0, 20, 88, 20, 0 },
+ { 0, 12, 86, 30, 0 },
+ { 0, 6, 81, 41, 0 },
+ { 0, 2, 74, 52, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 52, 74, 2, 0 },
+ { 0, 41, 81, 6, 0 },
+ { 0, 30, 86, 12, 0 },
+};
+
+static const struct dispc_coef coef3_M16[8] = {
+ { 0, 22, 84, 22, 0 },
+ { 0, 14, 82, 32, 0 },
+ { 0, 8, 78, 42, 0 },
+ { 0, 3, 72, 53, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 53, 72, 3, 0 },
+ { 0, 42, 78, 8, 0 },
+ { 0, 32, 82, 14, 0 },
+};
+
+static const struct dispc_coef coef3_M19[8] = {
+ { 0, 24, 80, 24, 0 },
+ { 0, 16, 79, 33, 0 },
+ { 0, 9, 76, 43, 0 },
+ { 0, 4, 70, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 70, 4, 0 },
+ { 0, 43, 76, 9, 0 },
+ { 0, 33, 79, 16, 0 },
+};
+
+static const struct dispc_coef coef3_M22[8] = {
+ { 0, 25, 78, 25, 0 },
+ { 0, 17, 77, 34, 0 },
+ { 0, 10, 74, 44, 0 },
+ { 0, 5, 69, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 69, 5, 0 },
+ { 0, 44, 74, 10, 0 },
+ { 0, 34, 77, 17, 0 },
+};
+
+static const struct dispc_coef coef3_M26[8] = {
+ { 0, 26, 76, 26, 0 },
+ { 0, 19, 74, 35, 0 },
+ { 0, 11, 72, 45, 0 },
+ { 0, 5, 69, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 69, 5, 0 },
+ { 0, 45, 72, 11, 0 },
+ { 0, 35, 74, 19, 0 },
+};
+
+static const struct dispc_coef coef3_M32[8] = {
+ { 0, 27, 74, 27, 0 },
+ { 0, 19, 73, 36, 0 },
+ { 0, 12, 71, 45, 0 },
+ { 0, 6, 68, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 68, 6, 0 },
+ { 0, 45, 71, 12, 0 },
+ { 0, 36, 73, 19, 0 },
+};
+
+static const struct dispc_coef coef5_M8[8] = {
+ { 0, 0, 128, 0, 0 },
+ { -2, 14, 125, -10, 1 },
+ { -6, 33, 114, -15, 2 },
+ { -10, 55, 98, -16, 1 },
+ { 0, -14, 78, 78, -14 },
+ { 1, -16, 98, 55, -10 },
+ { 2, -15, 114, 33, -6 },
+ { 1, -10, 125, 14, -2 },
+};
+
+static const struct dispc_coef coef5_M9[8] = {
+ { -3, 10, 114, 10, -3 },
+ { -6, 24, 110, 0, -1 },
+ { -8, 40, 103, -7, 0 },
+ { -11, 58, 91, -11, 1 },
+ { 0, -12, 76, 76, -12 },
+ { 1, -11, 91, 58, -11 },
+ { 0, -7, 103, 40, -8 },
+ { -1, 0, 111, 24, -6 },
+};
+
+static const struct dispc_coef coef5_M10[8] = {
+ { -4, 18, 100, 18, -4 },
+ { -6, 30, 99, 8, -3 },
+ { -8, 44, 93, 0, -1 },
+ { -9, 58, 84, -5, 0 },
+ { 0, -8, 72, 72, -8 },
+ { 0, -5, 84, 58, -9 },
+ { -1, 0, 93, 44, -8 },
+ { -3, 8, 99, 30, -6 },
+};
+
+static const struct dispc_coef coef5_M11[8] = {
+ { -5, 23, 92, 23, -5 },
+ { -6, 34, 90, 13, -3 },
+ { -6, 45, 85, 6, -2 },
+ { -6, 57, 78, 0, -1 },
+ { 0, -4, 68, 68, -4 },
+ { -1, 0, 78, 57, -6 },
+ { -2, 6, 85, 45, -6 },
+ { -3, 13, 90, 34, -6 },
+};
+
+static const struct dispc_coef coef5_M12[8] = {
+ { -4, 26, 84, 26, -4 },
+ { -5, 36, 82, 18, -3 },
+ { -4, 46, 78, 10, -2 },
+ { -3, 55, 72, 5, -1 },
+ { 0, 0, 64, 64, 0 },
+ { -1, 5, 72, 55, -3 },
+ { -2, 10, 78, 46, -4 },
+ { -3, 18, 82, 36, -5 },
+};
+
+static const struct dispc_coef coef5_M13[8] = {
+ { -3, 28, 78, 28, -3 },
+ { -3, 37, 76, 21, -3 },
+ { -2, 45, 73, 14, -2 },
+ { 0, 53, 68, 8, -1 },
+ { 0, 3, 61, 61, 3 },
+ { -1, 8, 68, 53, 0 },
+ { -2, 14, 73, 45, -2 },
+ { -3, 21, 76, 37, -3 },
+};
+
+static const struct dispc_coef coef5_M14[8] = {
+ { -2, 30, 72, 30, -2 },
+ { -1, 37, 71, 23, -2 },
+ { 0, 45, 69, 16, -2 },
+ { 3, 52, 64, 10, -1 },
+ { 0, 6, 58, 58, 6 },
+ { -1, 10, 64, 52, 3 },
+ { -2, 16, 69, 45, 0 },
+ { -2, 23, 71, 37, -1 },
+};
+
+static const struct dispc_coef coef5_M16[8] = {
+ { 0, 31, 66, 31, 0 },
+ { 1, 38, 65, 25, -1 },
+ { 3, 44, 62, 20, -1 },
+ { 6, 49, 59, 14, 0 },
+ { 0, 10, 54, 54, 10 },
+ { 0, 14, 59, 49, 6 },
+ { -1, 20, 62, 44, 3 },
+ { -1, 25, 65, 38, 1 },
+};
+
+static const struct dispc_coef coef5_M19[8] = {
+ { 3, 32, 58, 32, 3 },
+ { 4, 38, 58, 27, 1 },
+ { 7, 42, 55, 23, 1 },
+ { 10, 46, 54, 18, 0 },
+ { 0, 14, 50, 50, 14 },
+ { 0, 18, 54, 46, 10 },
+ { 1, 23, 55, 42, 7 },
+ { 1, 27, 58, 38, 4 },
+};
+
+static const struct dispc_coef coef5_M22[8] = {
+ { 4, 33, 54, 33, 4 },
+ { 6, 37, 54, 28, 3 },
+ { 9, 41, 53, 24, 1 },
+ { 12, 45, 51, 20, 0 },
+ { 0, 16, 48, 48, 16 },
+ { 0, 20, 51, 45, 12 },
+ { 1, 24, 53, 41, 9 },
+ { 3, 28, 54, 37, 6 },
+};
+
+static const struct dispc_coef coef5_M26[8] = {
+ { 6, 33, 50, 33, 6 },
+ { 8, 36, 51, 29, 4 },
+ { 11, 40, 50, 25, 2 },
+ { 14, 43, 48, 22, 1 },
+ { 0, 18, 46, 46, 18 },
+ { 1, 22, 48, 43, 14 },
+ { 2, 25, 50, 40, 11 },
+ { 4, 29, 51, 36, 8 },
+};
+
+static const struct dispc_coef coef5_M32[8] = {
+ { 7, 33, 48, 33, 7 },
+ { 10, 36, 48, 29, 5 },
+ { 13, 39, 47, 26, 3 },
+ { 16, 42, 46, 23, 1 },
+ { 0, 19, 45, 45, 19 },
+ { 1, 23, 46, 42, 16 },
+ { 3, 26, 47, 39, 13 },
+ { 5, 29, 48, 36, 10 },
+};
+
+const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps)
+{
+ int i;
+ static const struct {
+ int Mmin;
+ int Mmax;
+ const struct dispc_coef *coef_3;
+ const struct dispc_coef *coef_5;
+ } coefs[] = {
+ { 27, 32, coef3_M32, coef5_M32 },
+ { 23, 26, coef3_M26, coef5_M26 },
+ { 20, 22, coef3_M22, coef5_M22 },
+ { 17, 19, coef3_M19, coef5_M19 },
+ { 15, 16, coef3_M16, coef5_M16 },
+ { 14, 14, coef3_M14, coef5_M14 },
+ { 13, 13, coef3_M13, coef5_M13 },
+ { 12, 12, coef3_M12, coef5_M12 },
+ { 11, 11, coef3_M11, coef5_M11 },
+ { 10, 10, coef3_M10, coef5_M10 },
+ { 9, 9, coef3_M9, coef5_M9 },
+ { 4, 8, coef3_M8, coef5_M8 },
+ /*
+ * When upscaling more than two times, blockiness and outlines
+ * around the image are observed when M8 tables are used. M11,
+ * M16 and M19 tables are used to prevent this.
+ */
+ { 3, 3, coef3_M11, coef5_M11 },
+ { 2, 2, coef3_M16, coef5_M16 },
+ { 0, 1, coef3_M19, coef5_M19 },
+ };
+
+ inc /= 128;
+ for (i = 0; i < ARRAY_LEN(coefs); ++i)
+ if (inc >= coefs[i].Mmin && inc <= coefs[i].Mmax)
+ return five_taps ? coefs[i].coef_5 : coefs[i].coef_3;
+ return NULL;
+}
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 976ac23dcd0c..395d658a94fc 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -223,10 +223,13 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err_mgr_enable;
return 0;
+err_mgr_enable:
err_set_mode:
if (dpi_use_dsi_pll(dssdev))
dsi_pll_uninit(dpi.dsidev, true);
@@ -249,7 +252,7 @@ EXPORT_SYMBOL(omapdss_dpi_display_enable);
void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
{
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
if (dpi_use_dsi_pll(dssdev)) {
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 5abf8e7e7456..d4d676c82c12 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -203,6 +203,21 @@ struct dsi_reg { u16 idx; };
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
#define DSI_MAX_NR_ISRS 2
+#define DSI_MAX_NR_LANES 5
+
+enum dsi_lane_function {
+ DSI_LANE_UNUSED = 0,
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+};
+
+struct dsi_lane_config {
+ enum dsi_lane_function function;
+ u8 polarity;
+};
struct dsi_isr_data {
omap_dsi_isr_t isr;
@@ -223,24 +238,6 @@ enum dsi_vc_source {
DSI_VC_SOURCE_VP,
};
-enum dsi_lane {
- DSI_CLK_P = 1 << 0,
- DSI_CLK_N = 1 << 1,
- DSI_DATA1_P = 1 << 2,
- DSI_DATA1_N = 1 << 3,
- DSI_DATA2_P = 1 << 4,
- DSI_DATA2_N = 1 << 5,
- DSI_DATA3_P = 1 << 6,
- DSI_DATA3_N = 1 << 7,
- DSI_DATA4_P = 1 << 8,
- DSI_DATA4_N = 1 << 9,
-};
-
-struct dsi_update_region {
- u16 x, y, w, h;
- struct omap_dss_device *device;
-};
-
struct dsi_irq_stats {
unsigned long last_reset;
unsigned irq_count;
@@ -290,7 +287,9 @@ struct dsi_data {
struct dsi_isr_tables isr_tables_copy;
int update_channel;
- struct dsi_update_region update_region;
+#ifdef DEBUG
+ unsigned update_bytes;
+#endif
bool te_enabled;
bool ulps_enabled;
@@ -327,7 +326,10 @@ struct dsi_data {
unsigned long fint_min, fint_max;
unsigned long lpdiv_max;
- int num_data_lanes;
+ unsigned num_lanes_supported;
+
+ struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
+ unsigned num_lanes_used;
unsigned scp_clk_refcount;
};
@@ -340,8 +342,8 @@ struct dsi_packet_sent_handler_data {
static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
#ifdef DEBUG
-static unsigned int dsi_perf;
-module_param_named(dsi_perf, dsi_perf, bool, 0644);
+static bool dsi_perf;
+module_param(dsi_perf, bool, 0644);
#endif
static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
@@ -413,14 +415,29 @@ static void dsi_completion_handler(void *data, u32 mask)
static inline int wait_for_bit_change(struct platform_device *dsidev,
const struct dsi_reg idx, int bitnum, int value)
{
- int t = 100000;
+ unsigned long timeout;
+ ktime_t wait;
+ int t;
- while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
- if (--t == 0)
- return !value;
+ /* first busyloop to see if the bit changes right away */
+ t = 100;
+ while (t-- > 0) {
+ if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
+ return value;
}
- return value;
+ /* then loop for 500ms, sleeping for 1ms in between */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (time_before(jiffies, timeout)) {
+ if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
+ return value;
+
+ wait = ns_to_ktime(1000 * 1000);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
+ }
+
+ return !value;
}
u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
@@ -454,7 +471,6 @@ static void dsi_perf_mark_start(struct platform_device *dsidev)
static void dsi_perf_show(struct platform_device *dsidev, const char *name)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_dss_device *dssdev = dsi->update_region.device;
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
@@ -476,9 +492,7 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_us = setup_us + trans_us;
- total_bytes = dsi->update_region.w *
- dsi->update_region.h *
- dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
+ total_bytes = dsi->update_bytes;
printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
"%u bytes, %u kbytes/sec\n",
@@ -1720,17 +1734,19 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
cinfo->clkin4ddr, cinfo->regm);
- seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
- dss_get_generic_clk_source_name(dispc_clk_src),
- dss_feat_get_clk_source_name(dispc_clk_src),
+ seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n",
+ dss_feat_get_clk_source_name(dsi_module == 0 ?
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
cinfo->dsi_pll_hsdiv_dispc_clk,
cinfo->regm_dispc,
dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
"off" : "on");
- seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
- dss_get_generic_clk_source_name(dsi_clk_src),
- dss_feat_get_clk_source_name(dsi_clk_src),
+ seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n",
+ dss_feat_get_clk_source_name(dsi_module == 0 ?
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
cinfo->dsi_pll_hsdiv_dsi_clk,
cinfo->regm_dsi,
dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
@@ -2029,34 +2045,6 @@ static int dsi_cio_power(struct platform_device *dsidev,
return 0;
}
-/* Number of data lanes present on DSI interface */
-static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
-{
- /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
- * of data lanes as 2 by default */
- if (dss_has_feature(FEAT_DSI_GNQ))
- return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
- else
- return 2;
-}
-
-/* Number of data lanes used by the dss device */
-static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
-{
- int num_data_lanes = 0;
-
- if (dssdev->phy.dsi.data1_lane != 0)
- num_data_lanes++;
- if (dssdev->phy.dsi.data2_lane != 0)
- num_data_lanes++;
- if (dssdev->phy.dsi.data3_lane != 0)
- num_data_lanes++;
- if (dssdev->phy.dsi.data4_lane != 0)
- num_data_lanes++;
-
- return num_data_lanes;
-}
-
static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
{
int val;
@@ -2088,59 +2076,112 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
}
}
-static void dsi_set_lane_config(struct omap_dss_device *dssdev)
+static int dsi_parse_lane_config(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- u32 r;
- int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ u8 lanes[DSI_MAX_NR_LANES];
+ u8 polarities[DSI_MAX_NR_LANES];
+ int num_lanes, i;
+
+ static const enum dsi_lane_function functions[] = {
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+ };
+
+ lanes[0] = dssdev->phy.dsi.clk_lane;
+ lanes[1] = dssdev->phy.dsi.data1_lane;
+ lanes[2] = dssdev->phy.dsi.data2_lane;
+ lanes[3] = dssdev->phy.dsi.data3_lane;
+ lanes[4] = dssdev->phy.dsi.data4_lane;
+ polarities[0] = dssdev->phy.dsi.clk_pol;
+ polarities[1] = dssdev->phy.dsi.data1_pol;
+ polarities[2] = dssdev->phy.dsi.data2_pol;
+ polarities[3] = dssdev->phy.dsi.data3_pol;
+ polarities[4] = dssdev->phy.dsi.data4_pol;
- int clk_lane = dssdev->phy.dsi.clk_lane;
- int data1_lane = dssdev->phy.dsi.data1_lane;
- int data2_lane = dssdev->phy.dsi.data2_lane;
- int clk_pol = dssdev->phy.dsi.clk_pol;
- int data1_pol = dssdev->phy.dsi.data1_pol;
- int data2_pol = dssdev->phy.dsi.data2_pol;
+ num_lanes = 0;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i)
+ dsi->lanes[i].function = DSI_LANE_UNUSED;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ int num;
+
+ if (lanes[i] == DSI_LANE_UNUSED)
+ break;
+
+ num = lanes[i] - 1;
+
+ if (num >= dsi->num_lanes_supported)
+ return -EINVAL;
+
+ if (dsi->lanes[num].function != DSI_LANE_UNUSED)
+ return -EINVAL;
+
+ dsi->lanes[num].function = functions[i];
+ dsi->lanes[num].polarity = polarities[i];
+ num_lanes++;
+ }
+
+ if (num_lanes < 2 || num_lanes > dsi->num_lanes_supported)
+ return -EINVAL;
+
+ dsi->num_lanes_used = num_lanes;
+
+ return 0;
+}
+
+static int dsi_set_lane_config(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ static const u8 offsets[] = { 0, 4, 8, 12, 16 };
+ static const enum dsi_lane_function functions[] = {
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+ };
+ u32 r;
+ int i;
r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
- r = FLD_MOD(r, clk_lane, 2, 0);
- r = FLD_MOD(r, clk_pol, 3, 3);
- r = FLD_MOD(r, data1_lane, 6, 4);
- r = FLD_MOD(r, data1_pol, 7, 7);
- r = FLD_MOD(r, data2_lane, 10, 8);
- r = FLD_MOD(r, data2_pol, 11, 11);
- if (num_data_lanes_dssdev > 2) {
- int data3_lane = dssdev->phy.dsi.data3_lane;
- int data3_pol = dssdev->phy.dsi.data3_pol;
-
- r = FLD_MOD(r, data3_lane, 14, 12);
- r = FLD_MOD(r, data3_pol, 15, 15);
+
+ for (i = 0; i < dsi->num_lanes_used; ++i) {
+ unsigned offset = offsets[i];
+ unsigned polarity, lane_number;
+ unsigned t;
+
+ for (t = 0; t < dsi->num_lanes_supported; ++t)
+ if (dsi->lanes[t].function == functions[i])
+ break;
+
+ if (t == dsi->num_lanes_supported)
+ return -EINVAL;
+
+ lane_number = t;
+ polarity = dsi->lanes[t].polarity;
+
+ r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
+ r = FLD_MOD(r, polarity, offset + 3, offset + 3);
}
- if (num_data_lanes_dssdev > 3) {
- int data4_lane = dssdev->phy.dsi.data4_lane;
- int data4_pol = dssdev->phy.dsi.data4_pol;
- r = FLD_MOD(r, data4_lane, 18, 16);
- r = FLD_MOD(r, data4_pol, 19, 19);
+ /* clear the unused lanes */
+ for (; i < dsi->num_lanes_supported; ++i) {
+ unsigned offset = offsets[i];
+
+ r = FLD_MOD(r, 0, offset + 2, offset);
+ r = FLD_MOD(r, 0, offset + 3, offset + 3);
}
- dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
- /* The configuration of the DSI complex I/O (number of data lanes,
- position, differential order) should not be changed while
- DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
- the hardware to take into account a new configuration of the complex
- I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
- follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
- then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
- DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
- DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
- DSI complex I/O configuration is unknown. */
+ dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
- /*
- REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
- REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
- REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
- */
+ return 0;
}
static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
@@ -2230,49 +2271,28 @@ static void dsi_cio_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
}
+/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
- enum dsi_lane lanes)
+ unsigned mask_p, unsigned mask_n)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int clk_lane = dssdev->phy.dsi.clk_lane;
- int data1_lane = dssdev->phy.dsi.data1_lane;
- int data2_lane = dssdev->phy.dsi.data2_lane;
- int data3_lane = dssdev->phy.dsi.data3_lane;
- int data4_lane = dssdev->phy.dsi.data4_lane;
- int clk_pol = dssdev->phy.dsi.clk_pol;
- int data1_pol = dssdev->phy.dsi.data1_pol;
- int data2_pol = dssdev->phy.dsi.data2_pol;
- int data3_pol = dssdev->phy.dsi.data3_pol;
- int data4_pol = dssdev->phy.dsi.data4_pol;
-
- u32 l = 0;
- u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
-
- if (lanes & DSI_CLK_P)
- l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
- if (lanes & DSI_CLK_N)
- l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
-
- if (lanes & DSI_DATA1_P)
- l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
- if (lanes & DSI_DATA1_N)
- l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
-
- if (lanes & DSI_DATA2_P)
- l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
- if (lanes & DSI_DATA2_N)
- l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
-
- if (lanes & DSI_DATA3_P)
- l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
- if (lanes & DSI_DATA3_N)
- l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
-
- if (lanes & DSI_DATA4_P)
- l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
- if (lanes & DSI_DATA4_N)
- l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
+ int i;
+ u32 l;
+ u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
+
+ l = 0;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ unsigned p = dsi->lanes[i].polarity;
+
+ if (mask_p & (1 << i))
+ l |= 1 << (i * 2 + (p ? 0 : 1));
+
+ if (mask_n & (1 << i))
+ l |= 1 << (i * 2 + (p ? 1 : 0));
+ }
+
/*
* Bits in REGLPTXSCPDAT4TO0DXDY:
* 17: DY0 18: DX0
@@ -2305,51 +2325,40 @@ static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- int t;
- int bits[3];
- bool in_use[3];
-
- if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
- bits[0] = 28;
- bits[1] = 27;
- bits[2] = 26;
- } else {
- bits[0] = 24;
- bits[1] = 25;
- bits[2] = 26;
- }
-
- in_use[0] = false;
- in_use[1] = false;
- in_use[2] = false;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int t, i;
+ bool in_use[DSI_MAX_NR_LANES];
+ static const u8 offsets_old[] = { 28, 27, 26 };
+ static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
+ const u8 *offsets;
+
+ if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
+ offsets = offsets_old;
+ else
+ offsets = offsets_new;
- if (dssdev->phy.dsi.clk_lane != 0)
- in_use[dssdev->phy.dsi.clk_lane - 1] = true;
- if (dssdev->phy.dsi.data1_lane != 0)
- in_use[dssdev->phy.dsi.data1_lane - 1] = true;
- if (dssdev->phy.dsi.data2_lane != 0)
- in_use[dssdev->phy.dsi.data2_lane - 1] = true;
+ for (i = 0; i < dsi->num_lanes_supported; ++i)
+ in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
t = 100000;
while (true) {
u32 l;
- int i;
int ok;
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
ok = 0;
- for (i = 0; i < 3; ++i) {
- if (!in_use[i] || (l & (1 << bits[i])))
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (!in_use[i] || (l & (1 << offsets[i])))
ok++;
}
- if (ok == 3)
+ if (ok == dsi->num_lanes_supported)
break;
if (--t == 0) {
- for (i = 0; i < 3; ++i) {
- if (!in_use[i] || (l & (1 << bits[i])))
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (!in_use[i] || (l & (1 << offsets[i])))
continue;
DSSERR("CIO TXCLKESC%d domain not coming " \
@@ -2362,22 +2371,20 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
return 0;
}
+/* return bitmask of enabled lanes, lane0 being the lsb */
static unsigned dsi_get_lane_mask(struct omap_dss_device *dssdev)
{
- unsigned lanes = 0;
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ unsigned mask = 0;
+ int i;
- if (dssdev->phy.dsi.clk_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.clk_lane - 1);
- if (dssdev->phy.dsi.data1_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data1_lane - 1);
- if (dssdev->phy.dsi.data2_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data2_lane - 1);
- if (dssdev->phy.dsi.data3_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data3_lane - 1);
- if (dssdev->phy.dsi.data4_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data4_lane - 1);
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (dsi->lanes[i].function != DSI_LANE_UNUSED)
+ mask |= 1 << i;
+ }
- return lanes;
+ return mask;
}
static int dsi_cio_init(struct omap_dss_device *dssdev)
@@ -2385,7 +2392,6 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
- int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
u32 l;
DSSDBGF();
@@ -2407,7 +2413,9 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
goto err_scp_clk_dom;
}
- dsi_set_lane_config(dssdev);
+ r = dsi_set_lane_config(dssdev);
+ if (r)
+ goto err_scp_clk_dom;
/* set TX STOP MODE timer to maximum for this operation */
l = dsi_read_reg(dsidev, DSI_TIMING1);
@@ -2418,7 +2426,8 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
dsi_write_reg(dsidev, DSI_TIMING1, l);
if (dsi->ulps_enabled) {
- u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
+ unsigned mask_p;
+ int i;
DSSDBG("manual ulps exit\n");
@@ -2427,16 +2436,19 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
* ULPS exit sequence, as after reset the DSS HW thinks
* that we are not in ULPS mode, and refuses to send the
* sequence. So we need to send the ULPS exit sequence
- * manually.
+ * manually by setting positive lines high and negative lines
+ * low for 1ms.
*/
- if (num_data_lanes_dssdev > 2)
- lane_mask |= DSI_DATA3_P;
+ mask_p = 0;
- if (num_data_lanes_dssdev > 3)
- lane_mask |= DSI_DATA4_P;
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (dsi->lanes[i].function == DSI_LANE_UNUSED)
+ continue;
+ mask_p |= 1 << i;
+ }
- dsi_cio_enable_lane_override(dssdev, lane_mask);
+ dsi_cio_enable_lane_override(dssdev, mask_p, 0);
}
r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
@@ -2913,6 +2925,9 @@ static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ /* flush posted write */
+ dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
+
return 0;
}
@@ -3513,7 +3528,8 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
- int r;
+ int r, i;
+ unsigned mask;
DSSDBGF();
@@ -3524,9 +3540,11 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
if (dsi->ulps_enabled)
return 0;
+ /* DDR_CLK_ALWAYS_ON */
if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
- DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
- return -EIO;
+ dsi_if_enable(dsidev, 0);
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
+ dsi_if_enable(dsidev, 1);
}
dsi_sync_vc(dsidev, 0);
@@ -3556,10 +3574,19 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
if (r)
return r;
+ mask = 0;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (dsi->lanes[i].function == DSI_LANE_UNUSED)
+ continue;
+ mask |= 1 << i;
+ }
/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
/* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
- 7, 5);
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
+
+ /* flush posted write and wait for SCP interface to finish the write */
+ dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(1000)) == 0) {
@@ -3572,8 +3599,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
/* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
- 7, 5);
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
+
+ /* flush posted write and wait for SCP interface to finish the write */
+ dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
@@ -3836,6 +3865,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
static void dsi_proto_timings(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
unsigned tclk_pre, tclk_post;
unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
@@ -3843,7 +3873,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
unsigned ddr_clk_pre, ddr_clk_post;
unsigned enter_hs_mode_lat, exit_hs_mode_lat;
unsigned ths_eot;
- int ndl = dsi_get_num_data_lanes_dssdev(dssdev);
+ int ndl = dsi->num_lanes_used - 1;
u32 r;
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
@@ -3945,68 +3975,82 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
}
}
-int dsi_video_mode_enable(struct omap_dss_device *dssdev, int channel)
+int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
u8 data_type;
u16 word_count;
+ int r;
- switch (dssdev->panel.dsi_pix_fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- break;
- case OMAP_DSS_DSI_FMT_RGB666:
- data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB565:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- break;
- default:
- BUG();
- };
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ switch (dssdev->panel.dsi_pix_fmt) {
+ case OMAP_DSS_DSI_FMT_RGB888:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB666:
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB666_PACKED:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB565:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ BUG();
+ };
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
- /* MODE, 1 = video mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
+ /* MODE, 1 = video mode */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dssdev->panel.timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dssdev->panel.timings.x_res * bpp, 8);
- dsi_vc_write_long_header(dsidev, channel, data_type, word_count, 0);
+ dsi_vc_write_long_header(dsidev, channel, data_type,
+ word_count, 0);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsidev, channel, true);
+ dsi_if_enable(dsidev, true);
+ }
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r) {
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
+ }
+
+ return r;
+ }
return 0;
}
-EXPORT_SYMBOL(dsi_video_mode_enable);
+EXPORT_SYMBOL(dsi_enable_video_output);
-void dsi_video_mode_disable(struct omap_dss_device *dssdev, int channel)
+void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
- /* MODE, 0 = command mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
+ /* MODE, 0 = command mode */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsidev, channel, true);
+ dsi_if_enable(dsidev, true);
+ }
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
}
-EXPORT_SYMBOL(dsi_video_mode_disable);
+EXPORT_SYMBOL(dsi_disable_video_output);
static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
+ u16 w, u16 h)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4021,8 +4065,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
- DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
- x, y, w, h);
+ DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
@@ -4070,7 +4113,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_start_update(dssdev);
+ dss_mgr_start_update(dssdev->manager);
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
@@ -4146,66 +4189,27 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
#endif
}
-int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
- u16 *x, u16 *y, u16 *w, u16 *h,
- bool enlarge_update_area)
+int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
+ void (*callback)(int, void *), void *data)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u16 dw, dh;
- dssdev->driver->get_resolution(dssdev, &dw, &dh);
-
- if (*x > dw || *y > dh)
- return -EINVAL;
-
- if (*x + *w > dw)
- return -EINVAL;
-
- if (*y + *h > dh)
- return -EINVAL;
-
- if (*w == 1)
- return -EINVAL;
-
- if (*w == 0 || *h == 0)
- return -EINVAL;
-
dsi_perf_mark_setup(dsidev);
- dss_setup_partial_planes(dssdev, x, y, w, h,
- enlarge_update_area);
- dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_dsi_prepare_update);
-
-int omap_dsi_update(struct omap_dss_device *dssdev,
- int channel,
- u16 x, u16 y, u16 w, u16 h,
- void (*callback)(int, void *), void *data)
-{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
dsi->update_channel = channel;
- /* OMAP DSS cannot send updates of odd widths.
- * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
- * here to make sure we catch erroneous updates. Otherwise we'll only
- * see rather obscure HW error happening, as DSS halts. */
- BUG_ON(x % 2 == 1);
-
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dsi->update_region.x = x;
- dsi->update_region.y = y;
- dsi->update_region.w = w;
- dsi->update_region.h = h;
- dsi->update_region.device = dssdev;
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
- dsi_update_screen_dispc(dssdev, x, y, w, h);
+#ifdef DEBUG
+ dsi->update_bytes = dw * dh *
+ dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
+#endif
+ dsi_update_screen_dispc(dssdev, dw, dh);
return 0;
}
@@ -4218,6 +4222,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
int r;
if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
+ u16 dw, dh;
u32 irq;
struct omap_video_timings timings = {
.hsw = 1,
@@ -4228,6 +4233,10 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
.vbp = 0,
};
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
+ timings.x_res = dw;
+ timings.y_res = dh;
+
irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
@@ -4330,6 +4339,12 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
int dsi_module = dsi_get_dsidev_id(dsidev);
int r;
+ r = dsi_parse_lane_config(dssdev);
+ if (r) {
+ DSSERR("illegal lane config");
+ goto err0;
+ }
+
r = dsi_pll_init(dsidev, true, true);
if (r)
goto err0;
@@ -4521,7 +4536,6 @@ int dsi_init_display(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int dsi_module = dsi_get_dsidev_id(dsidev);
DSSDBG("DSI init\n");
@@ -4543,12 +4557,6 @@ int dsi_init_display(struct omap_dss_device *dssdev)
dsi->vdds_dsi_reg = vdds_dsi;
}
- if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
- DSSERR("DSI%d can't support more than %d data lanes\n",
- dsi_module + 1, dsi->num_data_lanes);
- return -EINVAL;
- }
-
return 0;
}
@@ -4771,7 +4779,13 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
+ /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
+ * of data to 3 by default */
+ if (dss_has_feature(FEAT_DSI_GNQ))
+ /* NB_DATA_LANES */
+ dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
+ else
+ dsi->num_lanes_supported = 3;
dsi_runtime_put(dsidev);
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 6308fc59fc9e..32ff69fb3333 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -28,7 +28,7 @@
#endif
#ifdef DEBUG
-extern unsigned int dss_debug;
+extern bool dss_debug;
#ifdef DSS_SUBSYS_NAME
#define DSSDBG(format, ...) \
if (dss_debug) \
@@ -163,6 +163,34 @@ struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
+/* apply */
+void dss_apply_init(void);
+int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr);
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
+void dss_mgr_start_update(struct omap_overlay_manager *mgr);
+int omap_dss_mgr_apply(struct omap_overlay_manager *mgr);
+
+int dss_mgr_enable(struct omap_overlay_manager *mgr);
+void dss_mgr_disable(struct omap_overlay_manager *mgr);
+int dss_mgr_set_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+void dss_mgr_get_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+int dss_mgr_set_device(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev);
+int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+
+bool dss_ovl_is_enabled(struct omap_overlay *ovl);
+int dss_ovl_enable(struct omap_overlay *ovl);
+int dss_ovl_disable(struct omap_overlay *ovl);
+int dss_ovl_set_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+void dss_ovl_get_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+int dss_ovl_set_manager(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr);
+int dss_ovl_unset_manager(struct omap_overlay *ovl);
+
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
@@ -181,21 +209,22 @@ void default_get_overlay_fifo_thresholds(enum omap_plane plane,
/* manager */
int dss_init_overlay_managers(struct platform_device *pdev);
void dss_uninit_overlay_managers(struct platform_device *pdev);
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
-void dss_setup_partial_planes(struct omap_dss_device *dssdev,
- u16 *x, u16 *y, u16 *w, u16 *h,
- bool enlarge_update_area);
-void dss_start_update(struct omap_dss_device *dssdev);
+int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
+ const struct omap_overlay_manager_info *info);
+int dss_mgr_check(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev,
+ struct omap_overlay_manager_info *info,
+ struct omap_overlay_info **overlay_infos);
/* overlay */
void dss_init_overlays(struct platform_device *pdev);
void dss_uninit_overlays(struct platform_device *pdev);
-int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev);
void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
-#ifdef L4_EXAMPLE
-void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
-#endif
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
+int dss_ovl_simple_check(struct omap_overlay *ovl,
+ const struct omap_overlay_info *info);
+int dss_ovl_check(struct omap_overlay *ovl,
+ struct omap_overlay_info *info, struct omap_dss_device *dssdev);
/* DSS */
int dss_init_platform_driver(void);
@@ -399,21 +428,22 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
+void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
u32 dispc_ovl_get_fifo_size(enum omap_plane plane);
u32 dispc_ovl_get_burst_size(enum omap_plane plane);
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, enum omap_channel channel, bool replication,
- u32 fifo_low, u32 fifo_high);
+ bool ilace, bool replication);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
-
+void dispc_ovl_set_channel_out(enum omap_plane plane,
+ enum omap_channel channel);
void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
-void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable);
-void dispc_mgr_set_cpr_coef(enum omap_channel channel,
- struct omap_dss_cpr_coefs *coefs);
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
bool dispc_mgr_go_busy(enum omap_channel channel);
void dispc_mgr_go(enum omap_channel channel);
+bool dispc_mgr_is_enabled(enum omap_channel channel);
void dispc_mgr_enable(enum omap_channel channel, bool enable);
bool dispc_mgr_is_channel_enabled(enum omap_channel channel);
void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode);
@@ -421,18 +451,6 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
enum omap_lcd_display_type type);
-void dispc_mgr_set_default_color(enum omap_channel channel, u32 color);
-u32 dispc_mgr_get_default_color(enum omap_channel channel);
-void dispc_mgr_set_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type type,
- u32 trans_key);
-void dispc_mgr_get_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type *type,
- u32 *trans_key);
-void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable);
-void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable);
-bool dispc_mgr_trans_key_enabled(enum omap_channel ch);
-bool dispc_mgr_alpha_fixed_zorder_enabled(enum omap_channel ch);
void dispc_mgr_set_lcd_timings(enum omap_channel channel,
struct omap_video_timings *timings);
void dispc_mgr_set_pol_freq(enum omap_channel channel,
@@ -443,6 +461,8 @@ int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
+void dispc_mgr_setup(enum omap_channel channel,
+ struct omap_overlay_manager_info *info);
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index b402699168a5..afcb59301c37 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -304,6 +304,11 @@ static const struct dss_param_range omap2_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
[FEAT_PARAM_DOWNSCALE] = { 1, 2 },
+ /*
+ * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC
+ * scaler cannot scale a image with width more than 768.
+ */
+ [FEAT_PARAM_LINEWIDTH] = { 1, 768 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
@@ -316,6 +321,7 @@ static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
+ [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
@@ -328,6 +334,7 @@ static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
+ [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
};
/* OMAP2 DSS Features */
@@ -465,6 +472,10 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
.dump_core = ti_hdmi_4xxx_core_dump,
.dump_pll = ti_hdmi_4xxx_pll_dump,
.dump_phy = ti_hdmi_4xxx_phy_dump,
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+ .audio_enable = ti_hdmi_4xxx_wp_audio_enable,
+#endif
};
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 6a6c05dd45ce..cd833bbaac3d 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -86,6 +86,7 @@ enum dss_range_param {
FEAT_PARAM_DSIPLL_FINT,
FEAT_PARAM_DSIPLL_LPDIV,
FEAT_PARAM_DOWNSCALE,
+ FEAT_PARAM_LINEWIDTH,
};
/* DSS Feature Functions */
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c56378c555b0..b4c270edb915 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -333,7 +333,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
if (r)
return r;
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 0);
+ dss_mgr_disable(dssdev->manager);
p = &dssdev->panel.timings;
@@ -387,9 +387,16 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 1);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err_mgr_enable;
return 0;
+
+err_mgr_enable:
+ hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
+ hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
err:
hdmi_runtime_put();
return -EIO;
@@ -397,7 +404,7 @@ err:
static void hdmi_power_off(struct omap_dss_device *dssdev)
{
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 0);
+ dss_mgr_disable(dssdev->manager);
hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
@@ -554,11 +561,44 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-static int hdmi_audio_hw_params(struct hdmi_ip_data *ip_data,
- struct snd_pcm_substream *substream,
+static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct platform_device *pdev = to_platform_device(codec->dev);
+ struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
+ int err = 0;
+
+ if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
+ dev_err(&pdev->dev, "Cannot enable/disable audio\n");
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ip_data->ops->audio_enable(ip_data, true);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ip_data->ops->audio_enable(ip_data, false);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
@@ -698,7 +738,16 @@ static int hdmi_audio_startup(struct snd_pcm_substream *substream,
return 0;
}
+static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
+{
+ struct hdmi_ip_data *priv = &hdmi.ip_data;
+
+ snd_soc_codec_set_drvdata(codec, priv);
+ return 0;
+}
+
static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
+ .probe = hdmi_audio_codec_probe,
};
static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 6e63845cc7d7..d1858e71c64e 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -26,17 +26,15 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
#include "dss_features.h"
static int num_managers;
-static struct list_head manager_list;
+static struct omap_overlay_manager *managers;
static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
{
@@ -106,7 +104,11 @@ put_device:
static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%#x\n", mgr->info.default_color);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
}
static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
@@ -144,8 +146,11 @@ static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
char *buf)
{
enum omap_dss_trans_key_type key_type;
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
- key_type = mgr->info.trans_key_type;
+ key_type = info.trans_key_type;
BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
@@ -185,7 +190,11 @@ static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%#x\n", mgr->info.trans_key);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
}
static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
@@ -217,7 +226,11 @@ static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_enabled);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
}
static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
@@ -249,10 +262,14 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
static ssize_t manager_alpha_blending_enabled_show(
struct omap_overlay_manager *mgr, char *buf)
{
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
return snprintf(buf, PAGE_SIZE, "%d\n",
- mgr->info.partial_alpha_enabled);
+ info.partial_alpha_enabled);
}
static ssize_t manager_alpha_blending_enabled_store(
@@ -287,7 +304,11 @@ static ssize_t manager_alpha_blending_enabled_store(
static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
}
static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
@@ -469,143 +490,6 @@ static struct kobj_type manager_ktype = {
.default_attrs = manager_sysfs_attrs,
};
-/*
- * We have 4 levels of cache for the dispc settings. First two are in SW and
- * the latter two in HW.
- *
- * +--------------------+
- * |overlay/manager_info|
- * +--------------------+
- * v
- * apply()
- * v
- * +--------------------+
- * | dss_cache |
- * +--------------------+
- * v
- * configure()
- * v
- * +--------------------+
- * | shadow registers |
- * +--------------------+
- * v
- * VFP or lcd/digit_enable
- * v
- * +--------------------+
- * | registers |
- * +--------------------+
- */
-
-struct overlay_cache_data {
- /* If true, cache changed, but not written to shadow registers. Set
- * in apply(), cleared when registers written. */
- bool dirty;
- /* If true, shadow registers contain changed values not yet in real
- * registers. Set when writing to shadow registers, cleared at
- * VSYNC/EVSYNC */
- bool shadow_dirty;
-
- bool enabled;
-
- struct omap_overlay_info info;
-
- enum omap_channel channel;
- bool replication;
- bool ilace;
-
- u32 fifo_low;
- u32 fifo_high;
-};
-
-struct manager_cache_data {
- /* If true, cache changed, but not written to shadow registers. Set
- * in apply(), cleared when registers written. */
- bool dirty;
- /* If true, shadow registers contain changed values not yet in real
- * registers. Set when writing to shadow registers, cleared at
- * VSYNC/EVSYNC */
- bool shadow_dirty;
-
- struct omap_overlay_manager_info info;
-
- bool manual_update;
- bool do_manual_update;
-
- /* manual update region */
- u16 x, y, w, h;
-
- /* enlarge the update area if the update area contains scaled
- * overlays */
- bool enlarge_update_area;
-};
-
-static struct {
- spinlock_t lock;
- struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
- struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
-
- bool irq_enabled;
-} dss_cache;
-
-
-
-static int omap_dss_set_device(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev)
-{
- int i;
- int r;
-
- if (dssdev->manager) {
- DSSERR("display '%s' already has a manager '%s'\n",
- dssdev->name, dssdev->manager->name);
- return -EINVAL;
- }
-
- if ((mgr->supported_displays & dssdev->type) == 0) {
- DSSERR("display '%s' does not support manager '%s'\n",
- dssdev->name, mgr->name);
- return -EINVAL;
- }
-
- for (i = 0; i < mgr->num_overlays; i++) {
- struct omap_overlay *ovl = mgr->overlays[i];
-
- if (ovl->manager != mgr || !ovl->info.enabled)
- continue;
-
- r = dss_check_overlay(ovl, dssdev);
- if (r)
- return r;
- }
-
- dssdev->manager = mgr;
- mgr->device = dssdev;
- mgr->device_changed = true;
-
- return 0;
-}
-
-static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
-{
- if (!mgr->device) {
- DSSERR("failed to unset display, display not set.\n");
- return -EINVAL;
- }
-
- /*
- * Don't allow currently enabled displays to have the overlay manager
- * pulled out from underneath them
- */
- if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED)
- return -EINVAL;
-
- mgr->device->manager = NULL;
- mgr->device = NULL;
- mgr->device_changed = true;
-
- return 0;
-}
-
static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
@@ -624,1022 +508,169 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
}
-static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct manager_cache_data *mc;
- u32 irq;
- int r;
- int i;
- struct omap_dss_device *dssdev = mgr->device;
-
- if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
- return 0;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
- || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
- }
-
- mc = &dss_cache.manager_cache[mgr->id];
- i = 0;
- while (1) {
- unsigned long flags;
- bool shadow_dirty, dirty;
-
- spin_lock_irqsave(&dss_cache.lock, flags);
- dirty = mc->dirty;
- shadow_dirty = mc->shadow_dirty;
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- if (!dirty && !shadow_dirty) {
- r = 0;
- break;
- }
-
- /* 4 iterations is the worst case:
- * 1 - initial iteration, dirty = true (between VFP and VSYNC)
- * 2 - first VSYNC, dirty = true
- * 3 - dirty = false, shadow_dirty = true
- * 4 - shadow_dirty = false */
- if (i++ == 3) {
- DSSERR("mgr(%d)->wait_for_go() not finishing\n",
- mgr->id);
- r = 0;
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
- if (r == -ERESTARTSYS)
- break;
-
- if (r) {
- DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
- break;
- }
- }
-
- return r;
-}
-
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct overlay_cache_data *oc;
- struct omap_dss_device *dssdev;
- u32 irq;
- int r;
- int i;
-
- if (!ovl->manager)
- return 0;
-
- dssdev = ovl->manager->device;
-
- if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
- return 0;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
- || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
- }
-
- oc = &dss_cache.overlay_cache[ovl->id];
- i = 0;
- while (1) {
- unsigned long flags;
- bool shadow_dirty, dirty;
-
- spin_lock_irqsave(&dss_cache.lock, flags);
- dirty = oc->dirty;
- shadow_dirty = oc->shadow_dirty;
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- if (!dirty && !shadow_dirty) {
- r = 0;
- break;
- }
-
- /* 4 iterations is the worst case:
- * 1 - initial iteration, dirty = true (between VFP and VSYNC)
- * 2 - first VSYNC, dirty = true
- * 3 - dirty = false, shadow_dirty = true
- * 4 - shadow_dirty = false */
- if (i++ == 3) {
- DSSERR("ovl(%d)->wait_for_go() not finishing\n",
- ovl->id);
- r = 0;
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
- if (r == -ERESTARTSYS)
- break;
-
- if (r) {
- DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
- break;
- }
- }
-
- return r;
-}
-
-static int overlay_enabled(struct omap_overlay *ovl)
-{
- return ovl->info.enabled && ovl->manager && ovl->manager->device;
-}
-
-/* Is rect1 a subset of rect2? */
-static bool rectangle_subset(int x1, int y1, int w1, int h1,
- int x2, int y2, int w2, int h2)
-{
- if (x1 < x2 || y1 < y2)
- return false;
-
- if (x1 + w1 > x2 + w2)
- return false;
-
- if (y1 + h1 > y2 + h2)
- return false;
-
- return true;
-}
-
-/* Do rect1 and rect2 overlap? */
-static bool rectangle_intersects(int x1, int y1, int w1, int h1,
- int x2, int y2, int w2, int h2)
-{
- if (x1 >= x2 + w2)
- return false;
-
- if (x2 >= x1 + w1)
- return false;
-
- if (y1 >= y2 + h2)
- return false;
-
- if (y2 >= y1 + h1)
- return false;
-
- return true;
-}
-
-static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
-{
- struct omap_overlay_info *oi = &oc->info;
-
- if (oi->out_width != 0 && oi->width != oi->out_width)
- return true;
-
- if (oi->out_height != 0 && oi->height != oi->out_height)
- return true;
-
- return false;
-}
-
-static int configure_overlay(enum omap_plane plane)
+int dss_init_overlay_managers(struct platform_device *pdev)
{
- struct overlay_cache_data *c;
- struct manager_cache_data *mc;
- struct omap_overlay_info *oi, new_oi;
- struct omap_overlay_manager_info *mi;
- u16 outw, outh;
- u16 x, y, w, h;
- u32 paddr;
- int r;
- u16 orig_w, orig_h, orig_outw, orig_outh;
+ int i, r;
- DSSDBGF("%d", plane);
+ num_managers = dss_feat_get_num_mgrs();
- c = &dss_cache.overlay_cache[plane];
- oi = &c->info;
+ managers = kzalloc(sizeof(struct omap_overlay_manager) * num_managers,
+ GFP_KERNEL);
- if (!c->enabled) {
- dispc_ovl_enable(plane, 0);
- return 0;
- }
+ BUG_ON(managers == NULL);
- mc = &dss_cache.manager_cache[c->channel];
- mi = &mc->info;
-
- x = oi->pos_x;
- y = oi->pos_y;
- w = oi->width;
- h = oi->height;
- outw = oi->out_width == 0 ? oi->width : oi->out_width;
- outh = oi->out_height == 0 ? oi->height : oi->out_height;
- paddr = oi->paddr;
-
- orig_w = w;
- orig_h = h;
- orig_outw = outw;
- orig_outh = outh;
-
- if (mc->manual_update && mc->do_manual_update) {
- unsigned bpp;
- unsigned scale_x_m = w, scale_x_d = outw;
- unsigned scale_y_m = h, scale_y_d = outh;
-
- /* If the overlay is outside the update region, disable it */
- if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
- x, y, outw, outh)) {
- dispc_ovl_enable(plane, 0);
- return 0;
- }
+ for (i = 0; i < num_managers; ++i) {
+ struct omap_overlay_manager *mgr = &managers[i];
- switch (oi->color_mode) {
- case OMAP_DSS_COLOR_NV12:
- bpp = 8;
- break;
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
- case OMAP_DSS_COLOR_RGBA16:
- case OMAP_DSS_COLOR_RGBX16:
- case OMAP_DSS_COLOR_ARGB16_1555:
- case OMAP_DSS_COLOR_XRGB16_1555:
- bpp = 16;
+ switch (i) {
+ case 0:
+ mgr->name = "lcd";
+ mgr->id = OMAP_DSS_CHANNEL_LCD;
break;
-
- case OMAP_DSS_COLOR_RGB24P:
- bpp = 24;
+ case 1:
+ mgr->name = "tv";
+ mgr->id = OMAP_DSS_CHANNEL_DIGIT;
break;
-
- case OMAP_DSS_COLOR_RGB24U:
- case OMAP_DSS_COLOR_ARGB32:
- case OMAP_DSS_COLOR_RGBA32:
- case OMAP_DSS_COLOR_RGBX32:
- bpp = 32;
+ case 2:
+ mgr->name = "lcd2";
+ mgr->id = OMAP_DSS_CHANNEL_LCD2;
break;
-
- default:
- BUG();
}
- if (mc->x > oi->pos_x) {
- x = 0;
- outw -= (mc->x - oi->pos_x);
- paddr += (mc->x - oi->pos_x) *
- scale_x_m / scale_x_d * bpp / 8;
- } else {
- x = oi->pos_x - mc->x;
- }
-
- if (mc->y > oi->pos_y) {
- y = 0;
- outh -= (mc->y - oi->pos_y);
- paddr += (mc->y - oi->pos_y) *
- scale_y_m / scale_y_d *
- oi->screen_width * bpp / 8;
- } else {
- y = oi->pos_y - mc->y;
- }
-
- if (mc->w < (x + outw))
- outw -= (x + outw) - (mc->w);
-
- if (mc->h < (y + outh))
- outh -= (y + outh) - (mc->h);
-
- w = w * outw / orig_outw;
- h = h * outh / orig_outh;
-
- /* YUV mode overlay's input width has to be even and the
- * algorithm above may adjust the width to be odd.
- *
- * Here we adjust the width if needed, preferring to increase
- * the width if the original width was bigger.
- */
- if ((w & 1) &&
- (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
- oi->color_mode == OMAP_DSS_COLOR_UYVY)) {
- if (orig_w > w)
- w += 1;
- else
- w -= 1;
- }
- }
-
- new_oi = *oi;
-
- /* update new_oi members which could have been possibly updated */
- new_oi.pos_x = x;
- new_oi.pos_y = y;
- new_oi.width = w;
- new_oi.height = h;
- new_oi.out_width = outw;
- new_oi.out_height = outh;
- new_oi.paddr = paddr;
-
- r = dispc_ovl_setup(plane, &new_oi, c->ilace, c->channel,
- c->replication, c->fifo_low, c->fifo_high);
- if (r) {
- /* this shouldn't happen */
- DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
- dispc_ovl_enable(plane, 0);
- return r;
- }
-
- dispc_ovl_enable(plane, 1);
-
- return 0;
-}
-
-static void configure_manager(enum omap_channel channel)
-{
- struct omap_overlay_manager_info *mi;
-
- DSSDBGF("%d", channel);
-
- /* picking info from the cache */
- mi = &dss_cache.manager_cache[channel].info;
-
- dispc_mgr_set_default_color(channel, mi->default_color);
- dispc_mgr_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
- dispc_mgr_enable_trans_key(channel, mi->trans_enabled);
- dispc_mgr_enable_alpha_fixed_zorder(channel, mi->partial_alpha_enabled);
- if (dss_has_feature(FEAT_CPR)) {
- dispc_mgr_enable_cpr(channel, mi->cpr_enable);
- dispc_mgr_set_cpr_coef(channel, &mi->cpr_coefs);
- }
-}
-
-/* configure_dispc() tries to write values from cache to shadow registers.
- * It writes only to those managers/overlays that are not busy.
- * returns 0 if everything could be written to shadow registers.
- * returns 1 if not everything could be written to shadow registers. */
-static int configure_dispc(void)
-{
- struct overlay_cache_data *oc;
- struct manager_cache_data *mc;
- const int num_ovls = dss_feat_get_num_ovls();
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i;
- int r;
- bool mgr_busy[MAX_DSS_MANAGERS];
- bool mgr_go[MAX_DSS_MANAGERS];
- bool busy;
-
- r = 0;
- busy = false;
-
- for (i = 0; i < num_mgrs; i++) {
- mgr_busy[i] = dispc_mgr_go_busy(i);
- mgr_go[i] = false;
- }
-
- /* Commit overlay settings */
- for (i = 0; i < num_ovls; ++i) {
- oc = &dss_cache.overlay_cache[i];
- mc = &dss_cache.manager_cache[oc->channel];
+ mgr->set_device = &dss_mgr_set_device;
+ mgr->unset_device = &dss_mgr_unset_device;
+ mgr->apply = &omap_dss_mgr_apply;
+ mgr->set_manager_info = &dss_mgr_set_info;
+ mgr->get_manager_info = &dss_mgr_get_info;
+ mgr->wait_for_go = &dss_mgr_wait_for_go;
+ mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
- if (!oc->dirty)
- continue;
+ mgr->caps = 0;
+ mgr->supported_displays =
+ dss_feat_get_supported_displays(mgr->id);
- if (mc->manual_update && !mc->do_manual_update)
- continue;
+ INIT_LIST_HEAD(&mgr->overlays);
- if (mgr_busy[oc->channel]) {
- busy = true;
- continue;
- }
+ r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
+ &pdev->dev.kobj, "manager%d", i);
- r = configure_overlay(i);
if (r)
- DSSERR("configure_overlay %d failed\n", i);
-
- oc->dirty = false;
- oc->shadow_dirty = true;
- mgr_go[oc->channel] = true;
- }
-
- /* Commit manager settings */
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
-
- if (!mc->dirty)
- continue;
-
- if (mc->manual_update && !mc->do_manual_update)
- continue;
-
- if (mgr_busy[i]) {
- busy = true;
- continue;
- }
-
- configure_manager(i);
- mc->dirty = false;
- mc->shadow_dirty = true;
- mgr_go[i] = true;
- }
-
- /* set GO */
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
-
- if (!mgr_go[i])
- continue;
-
- /* We don't need GO with manual update display. LCD iface will
- * always be turned off after frame, and new settings will be
- * taken in to use at next update */
- if (!mc->manual_update)
- dispc_mgr_go(i);
- }
-
- if (busy)
- r = 1;
- else
- r = 0;
-
- return r;
-}
-
-/* Make the coordinates even. There are some strange problems with OMAP and
- * partial DSI update when the update widths are odd. */
-static void make_even(u16 *x, u16 *w)
-{
- u16 x1, x2;
-
- x1 = *x;
- x2 = *x + *w;
-
- x1 &= ~1;
- x2 = ALIGN(x2, 2);
-
- *x = x1;
- *w = x2 - x1;
-}
-
-/* Configure dispc for partial update. Return possibly modified update
- * area */
-void dss_setup_partial_planes(struct omap_dss_device *dssdev,
- u16 *xi, u16 *yi, u16 *wi, u16 *hi, bool enlarge_update_area)
-{
- struct overlay_cache_data *oc;
- struct manager_cache_data *mc;
- struct omap_overlay_info *oi;
- const int num_ovls = dss_feat_get_num_ovls();
- struct omap_overlay_manager *mgr;
- int i;
- u16 x, y, w, h;
- unsigned long flags;
- bool area_changed;
-
- x = *xi;
- y = *yi;
- w = *wi;
- h = *hi;
-
- DSSDBG("dispc_setup_partial_planes %d,%d %dx%d\n",
- *xi, *yi, *wi, *hi);
-
- mgr = dssdev->manager;
-
- if (!mgr) {
- DSSDBG("no manager\n");
- return;
+ DSSERR("failed to create sysfs file\n");
}
- make_even(&x, &w);
-
- spin_lock_irqsave(&dss_cache.lock, flags);
-
- /*
- * Execute the outer loop until the inner loop has completed
- * once without increasing the update area. This will ensure that
- * all scaled overlays end up completely within the update area.
- */
- do {
- area_changed = false;
-
- /* We need to show the whole overlay if it is scaled. So look
- * for those, and make the update area larger if found.
- * Also mark the overlay cache dirty */
- for (i = 0; i < num_ovls; ++i) {
- unsigned x1, y1, x2, y2;
- unsigned outw, outh;
-
- oc = &dss_cache.overlay_cache[i];
- oi = &oc->info;
-
- if (oc->channel != mgr->id)
- continue;
-
- oc->dirty = true;
-
- if (!enlarge_update_area)
- continue;
-
- if (!oc->enabled)
- continue;
-
- if (!dispc_is_overlay_scaled(oc))
- continue;
-
- outw = oi->out_width == 0 ?
- oi->width : oi->out_width;
- outh = oi->out_height == 0 ?
- oi->height : oi->out_height;
-
- /* is the overlay outside the update region? */
- if (!rectangle_intersects(x, y, w, h,
- oi->pos_x, oi->pos_y,
- outw, outh))
- continue;
-
- /* if the overlay totally inside the update region? */
- if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh,
- x, y, w, h))
- continue;
-
- if (x > oi->pos_x)
- x1 = oi->pos_x;
- else
- x1 = x;
-
- if (y > oi->pos_y)
- y1 = oi->pos_y;
- else
- y1 = y;
-
- if ((x + w) < (oi->pos_x + outw))
- x2 = oi->pos_x + outw;
- else
- x2 = x + w;
-
- if ((y + h) < (oi->pos_y + outh))
- y2 = oi->pos_y + outh;
- else
- y2 = y + h;
-
- x = x1;
- y = y1;
- w = x2 - x1;
- h = y2 - y1;
-
- make_even(&x, &w);
-
- DSSDBG("changing upd area due to ovl(%d) "
- "scaling %d,%d %dx%d\n",
- i, x, y, w, h);
-
- area_changed = true;
- }
- } while (area_changed);
-
- mc = &dss_cache.manager_cache[mgr->id];
- mc->do_manual_update = true;
- mc->enlarge_update_area = enlarge_update_area;
- mc->x = x;
- mc->y = y;
- mc->w = w;
- mc->h = h;
-
- configure_dispc();
-
- mc->do_manual_update = false;
-
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- *xi = x;
- *yi = y;
- *wi = w;
- *hi = h;
+ return 0;
}
-void dss_start_update(struct omap_dss_device *dssdev)
+void dss_uninit_overlay_managers(struct platform_device *pdev)
{
- struct manager_cache_data *mc;
- struct overlay_cache_data *oc;
- const int num_ovls = dss_feat_get_num_ovls();
- const int num_mgrs = dss_feat_get_num_mgrs();
- struct omap_overlay_manager *mgr;
int i;
- mgr = dssdev->manager;
+ for (i = 0; i < num_managers; ++i) {
+ struct omap_overlay_manager *mgr = &managers[i];
- for (i = 0; i < num_ovls; ++i) {
- oc = &dss_cache.overlay_cache[i];
- if (oc->channel != mgr->id)
- continue;
-
- oc->shadow_dirty = false;
- }
-
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
- if (mgr->id != i)
- continue;
-
- mc->shadow_dirty = false;
+ kobject_del(&mgr->kobj);
+ kobject_put(&mgr->kobj);
}
- dssdev->manager->enable(dssdev->manager);
+ kfree(managers);
+ managers = NULL;
+ num_managers = 0;
}
-static void dss_apply_irq_handler(void *data, u32 mask)
+int omap_dss_get_num_overlay_managers(void)
{
- struct manager_cache_data *mc;
- struct overlay_cache_data *oc;
- const int num_ovls = dss_feat_get_num_ovls();
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i, r;
- bool mgr_busy[MAX_DSS_MANAGERS];
- u32 irq_mask;
-
- for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_mgr_go_busy(i);
-
- spin_lock(&dss_cache.lock);
-
- for (i = 0; i < num_ovls; ++i) {
- oc = &dss_cache.overlay_cache[i];
- if (!mgr_busy[oc->channel])
- oc->shadow_dirty = false;
- }
-
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
- if (!mgr_busy[i])
- mc->shadow_dirty = false;
- }
-
- r = configure_dispc();
- if (r == 1)
- goto end;
-
- /* re-read busy flags */
- for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_mgr_go_busy(i);
-
- /* keep running as long as there are busy managers, so that
- * we can collect overlay-applied information */
- for (i = 0; i < num_mgrs; ++i) {
- if (mgr_busy[i])
- goto end;
- }
-
- irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN;
- if (dss_has_feature(FEAT_MGR_LCD2))
- irq_mask |= DISPC_IRQ_VSYNC2;
-
- omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
- dss_cache.irq_enabled = false;
-
-end:
- spin_unlock(&dss_cache.lock);
+ return num_managers;
}
+EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
-static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
{
- struct overlay_cache_data *oc;
- struct manager_cache_data *mc;
- int i;
- struct omap_overlay *ovl;
- int num_planes_enabled = 0;
- bool use_fifomerge;
- unsigned long flags;
- int r;
-
- DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- spin_lock_irqsave(&dss_cache.lock, flags);
-
- /* Configure overlays */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_dss_device *dssdev;
-
- ovl = omap_dss_get_overlay(i);
-
- oc = &dss_cache.overlay_cache[ovl->id];
-
- if (ovl->manager_changed) {
- ovl->manager_changed = false;
- ovl->info_dirty = true;
- }
-
- if (!overlay_enabled(ovl)) {
- if (oc->enabled) {
- oc->enabled = false;
- oc->dirty = true;
- }
- continue;
- }
-
- if (!ovl->info_dirty) {
- if (oc->enabled)
- ++num_planes_enabled;
- continue;
- }
-
- dssdev = ovl->manager->device;
-
- if (dss_check_overlay(ovl, dssdev)) {
- if (oc->enabled) {
- oc->enabled = false;
- oc->dirty = true;
- }
- continue;
- }
-
- ovl->info_dirty = false;
- oc->dirty = true;
- oc->info = ovl->info;
-
- oc->replication =
- dss_use_replication(dssdev, ovl->info.color_mode);
-
- oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
-
- oc->channel = ovl->manager->id;
-
- oc->enabled = true;
-
- ++num_planes_enabled;
- }
-
- /* Configure managers */
- list_for_each_entry(mgr, &manager_list, list) {
- struct omap_dss_device *dssdev;
+ if (num >= num_managers)
+ return NULL;
- mc = &dss_cache.manager_cache[mgr->id];
-
- if (mgr->device_changed) {
- mgr->device_changed = false;
- mgr->info_dirty = true;
- }
-
- if (!mgr->info_dirty)
- continue;
-
- if (!mgr->device)
- continue;
-
- dssdev = mgr->device;
-
- mgr->info_dirty = false;
- mc->dirty = true;
- mc->info = mgr->info;
-
- mc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
- }
-
- /* XXX TODO: Try to get fifomerge working. The problem is that it
- * affects both managers, not individually but at the same time. This
- * means the change has to be well synchronized. I guess the proper way
- * is to have a two step process for fifo merge:
- * fifomerge enable:
- * 1. disable other planes, leaving one plane enabled
- * 2. wait until the planes are disabled on HW
- * 3. config merged fifo thresholds, enable fifomerge
- * fifomerge disable:
- * 1. config unmerged fifo thresholds, disable fifomerge
- * 2. wait until fifo changes are in HW
- * 3. enable planes
- */
- use_fifomerge = false;
-
- /* Configure overlay fifos */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_dss_device *dssdev;
- u32 size, burst_size;
-
- ovl = omap_dss_get_overlay(i);
-
- oc = &dss_cache.overlay_cache[ovl->id];
-
- if (!oc->enabled)
- continue;
-
- dssdev = ovl->manager->device;
-
- size = dispc_ovl_get_fifo_size(ovl->id);
- if (use_fifomerge)
- size *= 3;
-
- burst_size = dispc_ovl_get_burst_size(ovl->id);
-
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_SDI:
- case OMAP_DISPLAY_TYPE_VENC:
- case OMAP_DISPLAY_TYPE_HDMI:
- default_get_overlay_fifo_thresholds(ovl->id, size,
- burst_size, &oc->fifo_low,
- &oc->fifo_high);
- break;
-#ifdef CONFIG_OMAP2_DSS_DSI
- case OMAP_DISPLAY_TYPE_DSI:
- dsi_get_overlay_fifo_thresholds(ovl->id, size,
- burst_size, &oc->fifo_low,
- &oc->fifo_high);
- break;
-#endif
- default:
- BUG();
- }
- }
-
- r = 0;
- if (!dss_cache.irq_enabled) {
- u32 mask;
-
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN;
- if (dss_has_feature(FEAT_MGR_LCD2))
- mask |= DISPC_IRQ_VSYNC2;
-
- r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
- dss_cache.irq_enabled = true;
- }
- configure_dispc();
-
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- dispc_runtime_put();
-
- return r;
+ return &managers[num];
}
+EXPORT_SYMBOL(omap_dss_get_overlay_manager);
-static int dss_check_manager(struct omap_overlay_manager *mgr)
+int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
+ const struct omap_overlay_manager_info *info)
{
if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) {
/*
* OMAP3 supports only graphics source transparency color key
* and alpha blending simultaneously. See TRM 15.4.2.4.2.2
- * Alpha Mode
+ * Alpha Mode.
*/
- if (mgr->info.partial_alpha_enabled && mgr->info.trans_enabled
- && mgr->info.trans_key_type !=
- OMAP_DSS_COLOR_KEY_GFX_DST)
+ if (info->partial_alpha_enabled && info->trans_enabled
+ && info->trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST) {
+ DSSERR("check_manager: illegal transparency key\n");
return -EINVAL;
+ }
}
return 0;
}
-static int omap_dss_mgr_set_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info)
-{
- int r;
- struct omap_overlay_manager_info old_info;
-
- old_info = mgr->info;
- mgr->info = *info;
-
- r = dss_check_manager(mgr);
- if (r) {
- mgr->info = old_info;
- return r;
- }
-
- mgr->info_dirty = true;
-
- return 0;
-}
-
-static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info)
+static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
+ struct omap_overlay_info **overlay_infos)
{
- *info = mgr->info;
-}
+ struct omap_overlay *ovl1, *ovl2;
+ struct omap_overlay_info *info1, *info2;
-static int dss_mgr_enable(struct omap_overlay_manager *mgr)
-{
- dispc_mgr_enable(mgr->id, 1);
- return 0;
-}
+ list_for_each_entry(ovl1, &mgr->overlays, list) {
+ info1 = overlay_infos[ovl1->id];
-static int dss_mgr_disable(struct omap_overlay_manager *mgr)
-{
- dispc_mgr_enable(mgr->id, 0);
- return 0;
-}
-
-static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager)
-{
- ++num_managers;
- list_add_tail(&manager->list, &manager_list);
-}
-
-int dss_init_overlay_managers(struct platform_device *pdev)
-{
- int i, r;
-
- spin_lock_init(&dss_cache.lock);
-
- INIT_LIST_HEAD(&manager_list);
-
- num_managers = 0;
-
- for (i = 0; i < dss_feat_get_num_mgrs(); ++i) {
- struct omap_overlay_manager *mgr;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-
- BUG_ON(mgr == NULL);
-
- switch (i) {
- case 0:
- mgr->name = "lcd";
- mgr->id = OMAP_DSS_CHANNEL_LCD;
- break;
- case 1:
- mgr->name = "tv";
- mgr->id = OMAP_DSS_CHANNEL_DIGIT;
- break;
- case 2:
- mgr->name = "lcd2";
- mgr->id = OMAP_DSS_CHANNEL_LCD2;
- break;
- }
-
- mgr->set_device = &omap_dss_set_device;
- mgr->unset_device = &omap_dss_unset_device;
- mgr->apply = &omap_dss_mgr_apply;
- mgr->set_manager_info = &omap_dss_mgr_set_info;
- mgr->get_manager_info = &omap_dss_mgr_get_info;
- mgr->wait_for_go = &dss_mgr_wait_for_go;
- mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
-
- mgr->enable = &dss_mgr_enable;
- mgr->disable = &dss_mgr_disable;
-
- mgr->caps = 0;
- mgr->supported_displays =
- dss_feat_get_supported_displays(mgr->id);
+ if (info1 == NULL)
+ continue;
- dss_overlay_setup_dispc_manager(mgr);
+ list_for_each_entry(ovl2, &mgr->overlays, list) {
+ if (ovl1 == ovl2)
+ continue;
- omap_dss_add_overlay_manager(mgr);
+ info2 = overlay_infos[ovl2->id];
- r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
- &pdev->dev.kobj, "manager%d", i);
+ if (info2 == NULL)
+ continue;
- if (r) {
- DSSERR("failed to create sysfs file\n");
- continue;
+ if (info1->zorder == info2->zorder) {
+ DSSERR("overlays %d and %d have the same "
+ "zorder %d\n",
+ ovl1->id, ovl2->id, info1->zorder);
+ return -EINVAL;
+ }
}
}
return 0;
}
-void dss_uninit_overlay_managers(struct platform_device *pdev)
+int dss_mgr_check(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev,
+ struct omap_overlay_manager_info *info,
+ struct omap_overlay_info **overlay_infos)
{
- struct omap_overlay_manager *mgr;
+ struct omap_overlay *ovl;
+ int r;
- while (!list_empty(&manager_list)) {
- mgr = list_first_entry(&manager_list,
- struct omap_overlay_manager, list);
- list_del(&mgr->list);
- kobject_del(&mgr->kobj);
- kobject_put(&mgr->kobj);
- kfree(mgr);
+ if (dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) {
+ r = dss_mgr_check_zorder(mgr, overlay_infos);
+ if (r)
+ return r;
}
- num_managers = 0;
-}
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ struct omap_overlay_info *oi;
+ int r;
-int omap_dss_get_num_overlay_managers(void)
-{
- return num_managers;
-}
-EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
+ oi = overlay_infos[ovl->id];
-struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
-{
- int i = 0;
- struct omap_overlay_manager *mgr;
+ if (oi == NULL)
+ continue;
- list_for_each_entry(mgr, &manager_list, list) {
- if (i++ == num)
- return mgr;
+ r = dss_ovl_check(ovl, oi, dssdev);
+ if (r)
+ return r;
}
- return NULL;
+ return 0;
}
-EXPORT_SYMBOL(omap_dss_get_overlay_manager);
-
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index ab8e40e48759..6e821810deec 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -38,7 +38,7 @@
#include "dss_features.h"
static int num_overlays;
-static struct list_head overlay_list;
+static struct omap_overlay *overlays;
static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
{
@@ -124,19 +124,31 @@ err:
static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- ovl->info.width, ovl->info.height);
+ info.width, info.height);
}
static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width);
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
}
static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- ovl->info.pos_x, ovl->info.pos_y);
+ info.pos_x, info.pos_y);
}
static ssize_t overlay_position_store(struct omap_overlay *ovl,
@@ -170,8 +182,12 @@ static ssize_t overlay_position_store(struct omap_overlay *ovl,
static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- ovl->info.out_width, ovl->info.out_height);
+ info.out_width, info.out_height);
}
static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
@@ -205,7 +221,7 @@ static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
}
static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
@@ -213,33 +229,30 @@ static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
{
int r;
bool enable;
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
r = strtobool(buf, &enable);
if (r)
return r;
- info.enabled = enable;
+ if (enable)
+ r = ovl->enable(ovl);
+ else
+ r = ovl->disable(ovl);
- r = ovl->set_overlay_info(ovl, &info);
if (r)
return r;
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
return size;
}
static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d\n",
- ovl->info.global_alpha);
+ info.global_alpha);
}
static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
@@ -276,8 +289,12 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d\n",
- ovl->info.pre_mult_alpha);
+ info.pre_mult_alpha);
}
static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
@@ -313,7 +330,11 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.zorder);
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
}
static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
@@ -430,183 +451,6 @@ static struct kobj_type overlay_ktype = {
.default_attrs = overlay_sysfs_attrs,
};
-/* Check if overlay parameters are compatible with display */
-int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
-{
- struct omap_overlay_info *info;
- u16 outw, outh;
- u16 dw, dh;
- int i;
-
- if (!dssdev)
- return 0;
-
- if (!ovl->info.enabled)
- return 0;
-
- info = &ovl->info;
-
- if (info->paddr == 0) {
- DSSDBG("check_overlay failed: paddr 0\n");
- return -EINVAL;
- }
-
- dssdev->driver->get_resolution(dssdev, &dw, &dh);
-
- DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n",
- ovl->id,
- info->pos_x, info->pos_y,
- info->width, info->height,
- info->out_width, info->out_height,
- dw, dh);
-
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- outw = info->width;
- outh = info->height;
- } else {
- if (info->out_width == 0)
- outw = info->width;
- else
- outw = info->out_width;
-
- if (info->out_height == 0)
- outh = info->height;
- else
- outh = info->out_height;
- }
-
- if (dw < info->pos_x + outw) {
- DSSDBG("check_overlay failed 1: %d < %d + %d\n",
- dw, info->pos_x, outw);
- return -EINVAL;
- }
-
- if (dh < info->pos_y + outh) {
- DSSDBG("check_overlay failed 2: %d < %d + %d\n",
- dh, info->pos_y, outh);
- return -EINVAL;
- }
-
- if ((ovl->supported_modes & info->color_mode) == 0) {
- DSSERR("overlay doesn't support mode %d\n", info->color_mode);
- return -EINVAL;
- }
-
- if (ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) {
- if (info->zorder < 0 || info->zorder > 3) {
- DSSERR("zorder out of range: %d\n",
- info->zorder);
- return -EINVAL;
- }
- /*
- * Check that zorder doesn't match with zorder of any other
- * overlay which is enabled and is also connected to the same
- * manager
- */
- for (i = 0; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *tmp_ovl = omap_dss_get_overlay(i);
-
- if (tmp_ovl->id != ovl->id &&
- tmp_ovl->manager == ovl->manager &&
- tmp_ovl->info.enabled == true &&
- tmp_ovl->info.zorder == info->zorder) {
- DSSERR("%s and %s have same zorder: %d\n",
- ovl->name, tmp_ovl->name, info->zorder);
- return -EINVAL;
- }
- }
- }
-
- return 0;
-}
-
-static int dss_ovl_set_overlay_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- int r;
- struct omap_overlay_info old_info;
-
- old_info = ovl->info;
- ovl->info = *info;
-
- if (ovl->manager) {
- r = dss_check_overlay(ovl, ovl->manager->device);
- if (r) {
- ovl->info = old_info;
- return r;
- }
- }
-
- ovl->info_dirty = true;
-
- return 0;
-}
-
-static void dss_ovl_get_overlay_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- *info = ovl->info;
-}
-
-static int dss_ovl_wait_for_go(struct omap_overlay *ovl)
-{
- return dss_mgr_wait_for_go_ovl(ovl);
-}
-
-static int omap_dss_set_manager(struct omap_overlay *ovl,
- struct omap_overlay_manager *mgr)
-{
- if (!mgr)
- return -EINVAL;
-
- if (ovl->manager) {
- DSSERR("overlay '%s' already has a manager '%s'\n",
- ovl->name, ovl->manager->name);
- return -EINVAL;
- }
-
- if (ovl->info.enabled) {
- DSSERR("overlay has to be disabled to change the manager\n");
- return -EINVAL;
- }
-
- ovl->manager = mgr;
- ovl->manager_changed = true;
-
- /* XXX: When there is an overlay on a DSI manual update display, and
- * the overlay is first disabled, then moved to tv, and enabled, we
- * seem to get SYNC_LOST_DIGIT error.
- *
- * Waiting doesn't seem to help, but updating the manual update display
- * after disabling the overlay seems to fix this. This hints that the
- * overlay is perhaps somehow tied to the LCD output until the output
- * is updated.
- *
- * Userspace workaround for this is to update the LCD after disabling
- * the overlay, but before moving the overlay to TV.
- */
-
- return 0;
-}
-
-static int omap_dss_unset_manager(struct omap_overlay *ovl)
-{
- if (!ovl->manager) {
- DSSERR("failed to detach overlay: manager not set\n");
- return -EINVAL;
- }
-
- if (ovl->info.enabled) {
- DSSERR("overlay has to be disabled to unset the manager\n");
- return -EINVAL;
- }
-
- ovl->manager = NULL;
- ovl->manager_changed = true;
-
- return 0;
-}
-
int omap_dss_get_num_overlays(void)
{
return num_overlays;
@@ -615,134 +459,65 @@ EXPORT_SYMBOL(omap_dss_get_num_overlays);
struct omap_overlay *omap_dss_get_overlay(int num)
{
- int i = 0;
- struct omap_overlay *ovl;
+ if (num >= num_overlays)
+ return NULL;
- list_for_each_entry(ovl, &overlay_list, list) {
- if (i++ == num)
- return ovl;
- }
-
- return NULL;
+ return &overlays[num];
}
EXPORT_SYMBOL(omap_dss_get_overlay);
-static void omap_dss_add_overlay(struct omap_overlay *overlay)
-{
- ++num_overlays;
- list_add_tail(&overlay->list, &overlay_list);
-}
-
-static struct omap_overlay *dispc_overlays[MAX_DSS_OVERLAYS];
-
-void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
-{
- mgr->num_overlays = dss_feat_get_num_ovls();
- mgr->overlays = dispc_overlays;
-}
-
-#ifdef L4_EXAMPLE
-static struct omap_overlay *l4_overlays[1];
-void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr)
-{
- mgr->num_overlays = 1;
- mgr->overlays = l4_overlays;
-}
-#endif
-
void dss_init_overlays(struct platform_device *pdev)
{
int i, r;
- INIT_LIST_HEAD(&overlay_list);
+ num_overlays = dss_feat_get_num_ovls();
- num_overlays = 0;
+ overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays,
+ GFP_KERNEL);
- for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
- struct omap_overlay *ovl;
- ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
+ BUG_ON(overlays == NULL);
- BUG_ON(ovl == NULL);
+ for (i = 0; i < num_overlays; ++i) {
+ struct omap_overlay *ovl = &overlays[i];
switch (i) {
case 0:
ovl->name = "gfx";
ovl->id = OMAP_DSS_GFX;
- ovl->info.global_alpha = 255;
- ovl->info.zorder = 0;
break;
case 1:
ovl->name = "vid1";
ovl->id = OMAP_DSS_VIDEO1;
- ovl->info.global_alpha = 255;
- ovl->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
break;
case 2:
ovl->name = "vid2";
ovl->id = OMAP_DSS_VIDEO2;
- ovl->info.global_alpha = 255;
- ovl->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
break;
case 3:
ovl->name = "vid3";
ovl->id = OMAP_DSS_VIDEO3;
- ovl->info.global_alpha = 255;
- ovl->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
break;
}
- ovl->set_manager = &omap_dss_set_manager;
- ovl->unset_manager = &omap_dss_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_overlay_info;
- ovl->get_overlay_info = &dss_ovl_get_overlay_info;
- ovl->wait_for_go = &dss_ovl_wait_for_go;
+ ovl->is_enabled = &dss_ovl_is_enabled;
+ ovl->enable = &dss_ovl_enable;
+ ovl->disable = &dss_ovl_disable;
+ ovl->set_manager = &dss_ovl_set_manager;
+ ovl->unset_manager = &dss_ovl_unset_manager;
+ ovl->set_overlay_info = &dss_ovl_set_info;
+ ovl->get_overlay_info = &dss_ovl_get_info;
+ ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
ovl->caps = dss_feat_get_overlay_caps(ovl->id);
ovl->supported_modes =
dss_feat_get_supported_color_modes(ovl->id);
- omap_dss_add_overlay(ovl);
-
r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
&pdev->dev.kobj, "overlay%d", i);
- if (r) {
- DSSERR("failed to create sysfs file\n");
- continue;
- }
-
- dispc_overlays[i] = ovl;
- }
-
-#ifdef L4_EXAMPLE
- {
- struct omap_overlay *ovl;
- ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
-
- BUG_ON(ovl == NULL);
-
- ovl->name = "l4";
- ovl->supported_modes = OMAP_DSS_COLOR_RGB24U;
-
- ovl->set_manager = &omap_dss_set_manager;
- ovl->unset_manager = &omap_dss_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_overlay_info;
- ovl->get_overlay_info = &dss_ovl_get_overlay_info;
-
- omap_dss_add_overlay(ovl);
-
- r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
- &pdev->dev.kobj, "overlayl4");
-
if (r)
DSSERR("failed to create sysfs file\n");
-
- l4_overlays[0] = ovl;
}
-#endif
}
/* connect overlays to the new device, if not already connected. if force
@@ -795,8 +570,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
ovl = omap_dss_get_overlay(i);
if (!ovl->manager || force) {
if (ovl->manager)
- omap_dss_unset_manager(ovl);
- omap_dss_set_manager(ovl, mgr);
+ ovl->unset_manager(ovl);
+ ovl->set_manager(ovl, mgr);
}
}
@@ -806,17 +581,95 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
void dss_uninit_overlays(struct platform_device *pdev)
{
- struct omap_overlay *ovl;
+ int i;
+
+ for (i = 0; i < num_overlays; ++i) {
+ struct omap_overlay *ovl = &overlays[i];
- while (!list_empty(&overlay_list)) {
- ovl = list_first_entry(&overlay_list,
- struct omap_overlay, list);
- list_del(&ovl->list);
kobject_del(&ovl->kobj);
kobject_put(&ovl->kobj);
- kfree(ovl);
}
+ kfree(overlays);
+ overlays = NULL;
num_overlays = 0;
}
+int dss_ovl_simple_check(struct omap_overlay *ovl,
+ const struct omap_overlay_info *info)
+{
+ if (info->paddr == 0) {
+ DSSERR("check_overlay: paddr cannot be 0\n");
+ return -EINVAL;
+ }
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ if (info->out_width != 0 && info->width != info->out_width) {
+ DSSERR("check_overlay: overlay %d doesn't support "
+ "scaling\n", ovl->id);
+ return -EINVAL;
+ }
+
+ if (info->out_height != 0 && info->height != info->out_height) {
+ DSSERR("check_overlay: overlay %d doesn't support "
+ "scaling\n", ovl->id);
+ return -EINVAL;
+ }
+ }
+
+ if ((ovl->supported_modes & info->color_mode) == 0) {
+ DSSERR("check_overlay: overlay %d doesn't support mode %d\n",
+ ovl->id, info->color_mode);
+ return -EINVAL;
+ }
+
+ if (info->zorder >= omap_dss_get_num_overlays()) {
+ DSSERR("check_overlay: zorder %d too high\n", info->zorder);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dss_ovl_check(struct omap_overlay *ovl,
+ struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+{
+ u16 outw, outh;
+ u16 dw, dh;
+
+ if (dssdev == NULL)
+ return 0;
+
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ outw = info->width;
+ outh = info->height;
+ } else {
+ if (info->out_width == 0)
+ outw = info->width;
+ else
+ outw = info->out_width;
+
+ if (info->out_height == 0)
+ outh = info->height;
+ else
+ outh = info->out_height;
+ }
+
+ if (dw < info->pos_x + outw) {
+ DSSERR("overlay %d horizontally not inside the display area "
+ "(%d + %d >= %d)\n",
+ ovl->id, info->pos_x, outw, dw);
+ return -EINVAL;
+ }
+
+ if (dh < info->pos_y + outh) {
+ DSSERR("overlay %d vertically not inside the display area "
+ "(%d + %d >= %d)\n",
+ ovl->id, info->pos_y, outh, dh);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 1130c608a561..814bb9500dca 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -784,7 +784,6 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
if (*w == 0 || *h == 0)
return -EINVAL;
- dss_setup_partial_planes(dssdev, x, y, w, h, true);
dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
return 0;
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 40305ad7841e..8266ca0d666b 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -123,10 +123,14 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
goto err_sdi_enable;
mdelay(2);
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err_mgr_enable;
return 0;
+err_mgr_enable:
+ dss_sdi_disable();
err_sdi_enable:
err_set_dispc_clock_div:
err_set_dss_clock_div:
@@ -145,7 +149,7 @@ EXPORT_SYMBOL(omapdss_sdi_display_enable);
void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
{
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
dss_sdi_disable();
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 2c3443dabb14..7503f7f619a7 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -110,6 +110,11 @@ struct ti_hdmi_ip_ops {
void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+ void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#endif
+
};
struct hdmi_ip_data {
@@ -134,5 +139,8 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#endif
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index e1a6ce518af6..9af81f18f163 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -1204,36 +1204,13 @@ int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
return 0;
}
-int hdmi_audio_trigger(struct hdmi_ip_data *ip_data,
- struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
+void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
{
- int err = 0;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 1, 31, 31);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 1, 30, 30);
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 0, 30, 30);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 0, 31, 31);
- break;
- default:
- err = -EINVAL;
- }
- return err;
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, enable, 30, 30);
}
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index 204095632d27..a442998980f1 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -576,9 +576,6 @@ struct hdmi_core_audio_config {
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_audio_trigger(struct hdmi_ip_data *ip_data,
- struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai);
int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
u32 sample_freq, u32 *n, u32 *cts);
void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 7533458ba4d2..b3e9f9091581 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -417,9 +417,10 @@ static const struct venc_config *venc_timings_to_config(
BUG();
}
-static void venc_power_on(struct omap_dss_device *dssdev)
+static int venc_power_on(struct omap_dss_device *dssdev)
{
u32 l;
+ int r;
venc_reset();
venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
@@ -447,7 +448,22 @@ static void venc_power_on(struct omap_dss_device *dssdev)
if (dssdev->platform_enable)
dssdev->platform_enable(dssdev);
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err;
+
+ return 0;
+
+err:
+ venc_write_reg(VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(0);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ regulator_disable(venc.vdda_dac_reg);
+
+ return r;
}
static void venc_power_off(struct omap_dss_device *dssdev)
@@ -455,7 +471,7 @@ static void venc_power_off(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(0);
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
@@ -504,7 +520,9 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
if (r)
goto err1;
- venc_power_on(dssdev);
+ r = venc_power_on(dssdev);
+ if (r)
+ goto err2;
venc.wss_data = 0;
@@ -512,6 +530,8 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
mutex_unlock(&venc.venc_lock);
return 0;
+err2:
+ venc_runtime_put();
err1:
omap_dss_stop_device(dssdev);
err0:
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 83d3fe7ec9ae..4ea17dc3258c 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,6 +1,6 @@
menuconfig FB_OMAP2
tristate "OMAP2+ frame buffer support"
- depends on FB && OMAP2_DSS
+ depends on FB && OMAP2_DSS && !DRM_OMAP
select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index df7bcce5b107..16ba6196f330 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -111,28 +111,22 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
set_fb_fix(fbi);
}
- if (pi->enabled) {
- struct omap_overlay_info info;
+ if (!pi->enabled) {
+ r = ovl->disable(ovl);
+ if (r)
+ goto undo;
+ }
+ if (pi->enabled) {
r = omapfb_setup_overlay(fbi, ovl, pi->pos_x, pi->pos_y,
pi->out_width, pi->out_height);
if (r)
goto undo;
-
- ovl->get_overlay_info(ovl, &info);
-
- if (!info.enabled) {
- info.enabled = pi->enabled;
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- goto undo;
- }
} else {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
- info.enabled = pi->enabled;
info.pos_x = pi->pos_x;
info.pos_y = pi->pos_y;
info.out_width = pi->out_width;
@@ -146,6 +140,12 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
if (ovl->manager)
ovl->manager->apply(ovl->manager);
+ if (pi->enabled) {
+ r = ovl->enable(ovl);
+ if (r)
+ goto undo;
+ }
+
/* Release the locks in a specific order to keep lockdep happy */
if (old_rg->id > new_rg->id) {
omapfb_put_mem_region(old_rg);
@@ -189,19 +189,19 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
memset(pi, 0, sizeof(*pi));
} else {
struct omap_overlay *ovl;
- struct omap_overlay_info *ovli;
+ struct omap_overlay_info ovli;
ovl = ofbi->overlays[0];
- ovli = &ovl->info;
+ ovl->get_overlay_info(ovl, &ovli);
- pi->pos_x = ovli->pos_x;
- pi->pos_y = ovli->pos_y;
- pi->enabled = ovli->enabled;
+ pi->pos_x = ovli.pos_x;
+ pi->pos_y = ovli.pos_y;
+ pi->enabled = ovl->is_enabled(ovl);
pi->channel_out = 0; /* xxx */
pi->mirror = 0;
pi->mem_idx = get_mem_idx(ofbi);
- pi->out_width = ovli->out_width;
- pi->out_height = ovli->out_height;
+ pi->out_width = ovli.out_width;
+ pi->out_height = ovli.out_height;
}
return 0;
@@ -238,7 +238,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
continue;
for (j = 0; j < ofbi2->num_overlays; j++) {
- if (ofbi2->overlays[j]->info.enabled) {
+ struct omap_overlay *ovl;
+ ovl = ofbi2->overlays[j];
+ if (ovl->is_enabled(ovl)) {
r = -EBUSY;
goto out;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 70aa47de7146..ce158311ff59 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -43,18 +43,18 @@
static char *def_mode;
static char *def_vram;
-static int def_vrfb;
+static bool def_vrfb;
static int def_rotate;
-static int def_mirror;
+static bool def_mirror;
static bool auto_update;
static unsigned int auto_update_freq;
module_param(auto_update, bool, 0);
module_param(auto_update_freq, uint, 0644);
#ifdef DEBUG
-unsigned int omapfb_debug;
+bool omapfb_debug;
module_param_named(debug, omapfb_debug, bool, 0644);
-static unsigned int omapfb_test_pattern;
+static bool omapfb_test_pattern;
module_param_named(test, omapfb_test_pattern, bool, 0644);
#endif
@@ -970,16 +970,20 @@ int omapfb_apply_changes(struct fb_info *fbi, int init)
outh = var->yres;
}
} else {
- outw = ovl->info.out_width;
- outh = ovl->info.out_height;
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ outw = info.out_width;
+ outh = info.out_height;
}
if (init) {
posx = 0;
posy = 0;
} else {
- posx = ovl->info.pos_x;
- posy = ovl->info.pos_y;
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ posx = info.pos_x;
+ posy = info.pos_y;
}
r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh);
@@ -2067,6 +2071,8 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
if (ofbi->num_overlays > 0) {
struct omap_overlay *ovl = ofbi->overlays[0];
+ ovl->manager->apply(ovl->manager);
+
r = omapfb_overlay_enable(ovl, 1);
if (r) {
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 1694d5148f32..e8d8cc76a435 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -473,7 +473,9 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
continue;
for (j = 0; j < ofbi2->num_overlays; j++) {
- if (ofbi2->overlays[j]->info.enabled) {
+ struct omap_overlay *ovl;
+ ovl = ofbi2->overlays[j];
+ if (ovl->is_enabled(ovl)) {
r = -EBUSY;
goto out;
}
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index fdf0edeccf4e..c0bdc9b54ecf 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -32,7 +32,7 @@
#include <video/omapdss.h>
#ifdef DEBUG
-extern unsigned int omapfb_debug;
+extern bool omapfb_debug;
#define DBG(format, ...) \
do { \
if (omapfb_debug) \
@@ -181,13 +181,10 @@ static inline void omapfb_unlock(struct omapfb2_device *fbdev)
static inline int omapfb_overlay_enable(struct omap_overlay *ovl,
int enable)
{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
- if (info.enabled == enable)
- return 0;
- info.enabled = enable;
- return ovl->set_overlay_info(ovl, &info);
+ if (enable)
+ return ovl->enable(ovl);
+ else
+ return ovl->disable(ovl);
}
static inline struct omapfb2_mem_region *
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index dc7bfa91e57a..df31a24a5026 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -78,12 +78,12 @@ static char *mode_option __devinitdata;
* these flags allow the user to specify that requests for +ve sync
* should be silently turned in -ve sync.
*/
-static int lowhsync;
-static int lowvsync;
-static int noaccel __devinitdata;
+static bool lowhsync;
+static bool lowvsync;
+static bool noaccel __devinitdata;
/* mtrr option */
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata;
+static bool nomtrr __devinitdata;
#endif
/*
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 6632ee5ecb7e..055e527a8e45 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -57,11 +57,11 @@
*/
static int hwcursor = 1;
static char *mode_option __devinitdata;
-static int noaccel __devinitdata;
+static bool noaccel __devinitdata;
/* mtrr option */
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata;
+static bool nomtrr __devinitdata;
#endif
/*
diff --git a/drivers/video/pnx4008/pnxrgbfb.c b/drivers/video/pnx4008/pnxrgbfb.c
index b2252fea2858..6d30428e9cf9 100644
--- a/drivers/video/pnx4008/pnxrgbfb.c
+++ b/drivers/video/pnx4008/pnxrgbfb.c
@@ -193,17 +193,6 @@ static struct platform_driver rgbfb_driver = {
.remove = rgbfb_remove,
};
-static int __init rgbfb_init(void)
-{
- return platform_driver_register(&rgbfb_driver);
-}
-
-static void __exit rgbfb_exit(void)
-{
- platform_driver_unregister(&rgbfb_driver);
-}
-
-module_init(rgbfb_init);
-module_exit(rgbfb_exit);
+module_platform_driver(rgbfb_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
index 50e00395240f..c5c741452cac 100644
--- a/drivers/video/pnx4008/sdum.c
+++ b/drivers/video/pnx4008/sdum.c
@@ -856,17 +856,6 @@ static struct platform_driver sdum_driver = {
.resume = sdum_resume,
};
-int __init sdum_init(void)
-{
- return platform_driver_register(&sdum_driver);
-}
-
-static void __exit sdum_exit(void)
-{
- platform_driver_unregister(&sdum_driver);
-};
-
-module_init(sdum_init);
-module_exit(sdum_exit);
+module_platform_driver(sdum_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 18ead6f0184d..8384b941f6ba 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -832,17 +832,7 @@ static struct platform_driver pxa168fb_driver = {
.remove = __devexit_p(pxa168fb_remove),
};
-static int __init pxa168fb_init(void)
-{
- return platform_driver_register(&pxa168fb_driver);
-}
-module_init(pxa168fb_init);
-
-static void __exit pxa168fb_exit(void)
-{
- platform_driver_unregister(&pxa168fb_driver);
-}
-module_exit(pxa168fb_exit);
+module_platform_driver(pxa168fb_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> "
"Green Wan <gwan@marvell.com>");
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1ed8b366618d..1d71c08a818f 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -747,20 +747,7 @@ static struct platform_driver pxa3xx_gcu_driver = {
},
};
-static int __init
-pxa3xx_gcu_init(void)
-{
- return platform_driver_register(&pxa3xx_gcu_driver);
-}
-
-static void __exit
-pxa3xx_gcu_exit(void)
-{
- platform_driver_unregister(&pxa3xx_gcu_driver);
-}
-
-module_init(pxa3xx_gcu_init);
-module_exit(pxa3xx_gcu_exit);
+module_platform_driver(pxa3xx_gcu_driver);
MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index d8ab7be4fd6b..2f58cf9c813b 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -207,9 +207,9 @@ MODULE_DEVICE_TABLE(pci, rivafb_pci_tbl);
/* command line data, set in rivafb_setup() */
static int flatpanel __devinitdata = -1; /* Autodetect later */
static int forceCRTC __devinitdata = -1;
-static int noaccel __devinitdata = 0;
+static bool noaccel __devinitdata = 0;
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata = 0;
+static bool nomtrr __devinitdata = 0;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
@@ -218,7 +218,7 @@ static int backlight __devinitdata = 0;
#endif
static char *mode_option __devinitdata = NULL;
-static int strictmode = 0;
+static bool strictmode = 0;
static struct fb_fix_screeninfo __devinitdata rivafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 0753b1cfcb8b..0c63b69b6340 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -192,6 +192,7 @@ struct s3c_fb_vsync {
* @regs: The mapped hardware registers.
* @variant: Variant information for this hardware.
* @enabled: A bitmask of enabled hardware windows.
+ * @output_on: Flag if the physical output is enabled.
* @pdata: The platform configuration data passed with the device.
* @windows: The hardware windows that have been claimed.
* @irq_no: IRQ line number
@@ -208,6 +209,7 @@ struct s3c_fb {
struct s3c_fb_variant variant;
unsigned char enabled;
+ bool output_on;
struct s3c_fb_platdata *pdata;
struct s3c_fb_win *windows[S3C_FB_MAX_WIN];
@@ -441,6 +443,39 @@ static void shadow_protect_win(struct s3c_fb_win *win, bool protect)
}
/**
+ * s3c_fb_enable() - Set the state of the main LCD output
+ * @sfb: The main framebuffer state.
+ * @enable: The state to set.
+ */
+static void s3c_fb_enable(struct s3c_fb *sfb, int enable)
+{
+ u32 vidcon0 = readl(sfb->regs + VIDCON0);
+
+ if (enable && !sfb->output_on)
+ pm_runtime_get_sync(sfb->dev);
+
+ if (enable) {
+ vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+ } else {
+ /* see the note in the framebuffer datasheet about
+ * why you cannot take both of these bits down at the
+ * same time. */
+
+ if (vidcon0 & VIDCON0_ENVID) {
+ vidcon0 |= VIDCON0_ENVID;
+ vidcon0 &= ~VIDCON0_ENVID_F;
+ }
+ }
+
+ writel(vidcon0, sfb->regs + VIDCON0);
+
+ if (!enable && sfb->output_on)
+ pm_runtime_put_sync(sfb->dev);
+
+ sfb->output_on = enable;
+}
+
+/**
* s3c_fb_set_par() - framebuffer request to set new framebuffer state.
* @info: The framebuffer to change.
*
@@ -461,6 +496,8 @@ static int s3c_fb_set_par(struct fb_info *info)
dev_dbg(sfb->dev, "setting framebuffer parameters\n");
+ pm_runtime_get_sync(sfb->dev);
+
shadow_protect_win(win, 1);
switch (var->bits_per_pixel) {
@@ -510,9 +547,10 @@ static int s3c_fb_set_par(struct fb_info *info)
if (sfb->variant.is_2443)
data |= (1 << 5);
- data |= VIDCON0_ENVID | VIDCON0_ENVID_F;
writel(data, regs + VIDCON0);
+ s3c_fb_enable(sfb, 1);
+
data = VIDTCON0_VBPD(var->upper_margin - 1) |
VIDTCON0_VFPD(var->lower_margin - 1) |
VIDTCON0_VSPW(var->vsync_len - 1);
@@ -574,6 +612,7 @@ static int s3c_fb_set_par(struct fb_info *info)
}
data = WINCONx_ENWIN;
+ sfb->enabled |= (1 << win->index);
/* note, since we have to round up the bits-per-pixel, we end up
* relying on the bitfield information for r/g/b/a to work out
@@ -621,7 +660,8 @@ static int s3c_fb_set_par(struct fb_info *info)
} else if (var->transp.length == 1)
data |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX;
- else if (var->transp.length == 4)
+ else if ((var->transp.length == 4) ||
+ (var->transp.length == 8))
data |= WINCON1_BPPMODE_28BPP_A4888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
else
@@ -654,6 +694,8 @@ static int s3c_fb_set_par(struct fb_info *info)
shadow_protect_win(win, 0);
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
@@ -725,6 +767,8 @@ static int s3c_fb_setcolreg(unsigned regno,
dev_dbg(sfb->dev, "%s: win %d: %d => rgb=%d/%d/%d\n",
__func__, win->index, regno, red, green, blue);
+ pm_runtime_get_sync(sfb->dev);
+
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
/* true-colour, use pseudo-palette */
@@ -752,39 +796,15 @@ static int s3c_fb_setcolreg(unsigned regno,
break;
default:
+ pm_runtime_put_sync(sfb->dev);
return 1; /* unknown type */
}
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
/**
- * s3c_fb_enable() - Set the state of the main LCD output
- * @sfb: The main framebuffer state.
- * @enable: The state to set.
- */
-static void s3c_fb_enable(struct s3c_fb *sfb, int enable)
-{
- u32 vidcon0 = readl(sfb->regs + VIDCON0);
-
- if (enable)
- vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F;
- else {
- /* see the note in the framebuffer datasheet about
- * why you cannot take both of these bits down at the
- * same time. */
-
- if (!(vidcon0 & VIDCON0_ENVID))
- return;
-
- vidcon0 |= VIDCON0_ENVID;
- vidcon0 &= ~VIDCON0_ENVID_F;
- }
-
- writel(vidcon0, sfb->regs + VIDCON0);
-}
-
-/**
* s3c_fb_blank() - blank or unblank the given window
* @blank_mode: The blank state from FB_BLANK_*
* @info: The framebuffer to blank.
@@ -800,6 +820,8 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
+ pm_runtime_get_sync(sfb->dev);
+
wincon = readl(sfb->regs + sfb->variant.wincon + (index * 4));
switch (blank_mode) {
@@ -810,12 +832,16 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
+ shadow_protect_win(win, 1);
writel(WINxMAP_MAP | WINxMAP_MAP_COLOUR(0x0),
sfb->regs + sfb->variant.winmap + (index * 4));
+ shadow_protect_win(win, 0);
break;
case FB_BLANK_UNBLANK:
+ shadow_protect_win(win, 1);
writel(0x0, sfb->regs + sfb->variant.winmap + (index * 4));
+ shadow_protect_win(win, 0);
wincon |= WINCONx_ENWIN;
sfb->enabled |= (1 << index);
break;
@@ -823,10 +849,13 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
default:
+ pm_runtime_put_sync(sfb->dev);
return 1;
}
+ shadow_protect_win(win, 1);
writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
+ shadow_protect_win(win, 0);
/* Check the enabled state to see if we need to be running the
* main LCD interface, as if there are no active windows then
@@ -845,8 +874,13 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
/* we're stuck with this until we can do something about overriding
* the power control using the blanking event for a single fb.
*/
- if (index == sfb->pdata->default_win)
+ if (index == sfb->pdata->default_win) {
+ shadow_protect_win(win, 1);
s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
+ shadow_protect_win(win, 0);
+ }
+
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
@@ -870,6 +904,8 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
void __iomem *buf = sfb->regs + win->index * 8;
unsigned int start_boff, end_boff;
+ pm_runtime_get_sync(sfb->dev);
+
/* Offset in bytes to the start of the displayed area */
start_boff = var->yoffset * info->fix.line_length;
/* X offset depends on the current bpp */
@@ -888,6 +924,7 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
break;
default:
dev_err(sfb->dev, "invalid bpp\n");
+ pm_runtime_put_sync(sfb->dev);
return -EINVAL;
}
}
@@ -903,6 +940,7 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
shadow_protect_win(win, 0);
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
@@ -992,11 +1030,16 @@ static int s3c_fb_wait_for_vsync(struct s3c_fb *sfb, u32 crtc)
if (crtc != 0)
return -ENODEV;
+ pm_runtime_get_sync(sfb->dev);
+
count = sfb->vsync_info.count;
s3c_fb_enable_irq(sfb);
ret = wait_event_interruptible_timeout(sfb->vsync_info.wait,
count != sfb->vsync_info.count,
msecs_to_jiffies(VSYNC_TIMEOUT_MSEC));
+
+ pm_runtime_put_sync(sfb->dev);
+
if (ret == 0)
return -ETIMEDOUT;
@@ -1027,30 +1070,8 @@ static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
return ret;
}
-static int s3c_fb_open(struct fb_info *info, int user)
-{
- struct s3c_fb_win *win = info->par;
- struct s3c_fb *sfb = win->parent;
-
- pm_runtime_get_sync(sfb->dev);
-
- return 0;
-}
-
-static int s3c_fb_release(struct fb_info *info, int user)
-{
- struct s3c_fb_win *win = info->par;
- struct s3c_fb *sfb = win->parent;
-
- pm_runtime_put_sync(sfb->dev);
-
- return 0;
-}
-
static struct fb_ops s3c_fb_ops = {
.owner = THIS_MODULE,
- .fb_open = s3c_fb_open,
- .fb_release = s3c_fb_release,
.fb_check_var = s3c_fb_check_var,
.fb_set_par = s3c_fb_set_par,
.fb_blank = s3c_fb_blank,
@@ -1452,7 +1473,7 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
dev_err(dev, "failed to create window %d\n", win);
for (; win >= 0; win--)
s3c_fb_release_win(sfb, sfb->windows[win]);
- goto err_irq;
+ goto err_pm_runtime;
}
}
@@ -1461,7 +1482,8 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
return 0;
-err_irq:
+err_pm_runtime:
+ pm_runtime_put_sync(sfb->dev);
free_irq(sfb->irq_no, sfb);
err_ioremap:
@@ -1471,6 +1493,8 @@ err_req_region:
release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
err_lcd_clk:
+ pm_runtime_disable(sfb->dev);
+
if (!sfb->variant.has_clksel) {
clk_disable(sfb->lcd_clk);
clk_put(sfb->lcd_clk);
@@ -1524,7 +1548,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c_fb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1571,10 +1595,15 @@ static int s3c_fb_resume(struct device *dev)
for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
void __iomem *regs = sfb->regs + sfb->variant.keycon;
+ win = sfb->windows[win_no];
+ if (!win)
+ continue;
+ shadow_protect_win(win, 1);
regs += (win_no * 8);
writel(0xffffff, regs + WKEYCON0);
writel(0xffffff, regs + WKEYCON1);
+ shadow_protect_win(win, 0);
}
/* restore framebuffers */
@@ -1589,27 +1618,19 @@ static int s3c_fb_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM_RUNTIME
static int s3c_fb_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c_fb *sfb = platform_get_drvdata(pdev);
- struct s3c_fb_win *win;
- int win_no;
-
- for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
- win = sfb->windows[win_no];
- if (!win)
- continue;
-
- /* use the blank function to push into power-down */
- s3c_fb_blank(FB_BLANK_POWERDOWN, win->fbinfo);
- }
if (!sfb->variant.has_clksel)
clk_disable(sfb->lcd_clk);
clk_disable(sfb->bus_clk);
+
return 0;
}
@@ -1618,8 +1639,6 @@ static int s3c_fb_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct s3c_fb *sfb = platform_get_drvdata(pdev);
struct s3c_fb_platdata *pd = sfb->pdata;
- struct s3c_fb_win *win;
- int win_no;
clk_enable(sfb->bus_clk);
@@ -1630,39 +1649,10 @@ static int s3c_fb_runtime_resume(struct device *dev)
pd->setup_gpio();
writel(pd->vidcon1, sfb->regs + VIDCON1);
- /* zero all windows before we do anything */
- for (win_no = 0; win_no < sfb->variant.nr_windows; win_no++)
- s3c_fb_clear_win(sfb, win_no);
-
- for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
- void __iomem *regs = sfb->regs + sfb->variant.keycon;
-
- regs += (win_no * 8);
- writel(0xffffff, regs + WKEYCON0);
- writel(0xffffff, regs + WKEYCON1);
- }
-
- /* restore framebuffers */
- for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
- win = sfb->windows[win_no];
- if (!win)
- continue;
-
- dev_dbg(&pdev->dev, "resuming window %d\n", win_no);
- s3c_fb_set_par(win->fbinfo);
- }
-
return 0;
}
-
-#else
-#define s3c_fb_suspend NULL
-#define s3c_fb_resume NULL
-#define s3c_fb_runtime_suspend NULL
-#define s3c_fb_runtime_resume NULL
#endif
-
#define VALID_BPP124 (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(4))
#define VALID_BPP1248 (VALID_BPP124 | VALID_BPP(8))
@@ -1985,10 +1975,9 @@ static struct platform_device_id s3c_fb_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, s3c_fb_driver_ids);
static const struct dev_pm_ops s3cfb_pm_ops = {
- .suspend = s3c_fb_suspend,
- .resume = s3c_fb_resume,
- .runtime_suspend = s3c_fb_runtime_suspend,
- .runtime_resume = s3c_fb_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(s3c_fb_suspend, s3c_fb_resume)
+ SET_RUNTIME_PM_OPS(s3c_fb_runtime_suspend, s3c_fb_runtime_resume,
+ NULL)
};
static struct platform_driver s3c_fb_driver = {
@@ -2002,18 +1991,7 @@ static struct platform_driver s3c_fb_driver = {
},
};
-static int __init s3c_fb_init(void)
-{
- return platform_driver_register(&s3c_fb_driver);
-}
-
-static void __exit s3c_fb_cleanup(void)
-{
- platform_driver_unregister(&s3c_fb_driver);
-}
-
-module_init(s3c_fb_init);
-module_exit(s3c_fb_cleanup);
+module_platform_driver(s3c_fb_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung S3C SoC Framebuffer driver");
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index ee4c0df217f7..77f34c614c86 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -26,8 +26,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/div64.h>
#include <asm/mach/map.h>
@@ -45,10 +45,10 @@
#ifdef CONFIG_FB_S3C2410_DEBUG
static int debug = 1;
#else
-static int debug = 0;
+static int debug;
#endif
-#define dprintk(msg...) if (debug) { printk(KERN_DEBUG "s3c2410fb: " msg); }
+#define dprintk(msg...) if (debug) printk(KERN_DEBUG "s3c2410fb: " msg);
/* useful functions */
@@ -567,11 +567,10 @@ static int s3c2410fb_blank(int blank_mode, struct fb_info *info)
tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL;
- if (blank_mode == FB_BLANK_POWERDOWN) {
+ if (blank_mode == FB_BLANK_POWERDOWN)
s3c2410fb_lcd_enable(fbi, 0);
- } else {
+ else
s3c2410fb_lcd_enable(fbi, 1);
- }
if (blank_mode == FB_BLANK_UNBLANK)
writel(0x0, tpal_reg);
@@ -812,7 +811,7 @@ static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
#endif
-static char driver_name[] = "s3c2410fb";
+static const char driver_name[] = "s3c2410fb";
static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
enum s3c_drv_type drv_type)
@@ -881,7 +880,10 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
goto release_mem;
}
- info->irq_base = info->io + ((drv_type == DRV_S3C2412) ? S3C2412_LCDINTBASE : S3C2410_LCDINTBASE);
+ if (drv_type == DRV_S3C2412)
+ info->irq_base = info->io + S3C2412_LCDINTBASE;
+ else
+ info->irq_base = info->io + S3C2410_LCDINTBASE;
dprintk("devinit\n");
@@ -927,7 +929,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
clk_enable(info->clk);
dprintk("got and enabled clock\n");
- msleep(1);
+ usleep_range(1000, 1000);
info->clk_rate = clk_get_rate(info->clk);
@@ -975,9 +977,8 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
/* create device files */
ret = device_create_file(&pdev->dev, &dev_attr_debug);
- if (ret) {
+ if (ret)
printk(KERN_ERR "failed to add debug attribute\n");
- }
printk(KERN_INFO "fb%d: %s frame buffer device\n",
fbinfo->node, fbinfo->fix.id);
@@ -1027,7 +1028,7 @@ static int __devexit s3c2410fb_remove(struct platform_device *pdev)
s3c2410fb_cpufreq_deregister(info);
s3c2410fb_lcd_enable(info, 0);
- msleep(1);
+ usleep_range(1000, 1000);
s3c2410fb_unmap_video_memory(fbinfo);
@@ -1064,7 +1065,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
* the LCD DMA engine is not going to get back on the bus
* before the clock goes off again (bjd) */
- msleep(1);
+ usleep_range(1000, 1000);
clk_disable(info->clk);
return 0;
@@ -1076,7 +1077,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
struct s3c2410fb_info *info = fbinfo->par;
clk_enable(info->clk);
- msleep(1);
+ usleep_range(1000, 1000);
s3c2410fb_init_registers(fbinfo);
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 946a949f4c7d..2c80246b18b8 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -727,7 +727,7 @@ static int s3fb_set_par(struct fb_info *info)
if (par->chip == CHIP_988_VIRGE_VX) {
vga_wcrt(par->state.vgabase, 0x50, 0x00);
vga_wcrt(par->state.vgabase, 0x67, 0x50);
-
+ msleep(10); /* screen remains blank sometimes without this */
vga_wcrt(par->state.vgabase, 0x63, (mode <= 2) ? 0x90 : 0x09);
vga_wcrt(par->state.vgabase, 0x66, 0x90);
}
@@ -901,7 +901,8 @@ static int s3fb_set_par(struct fb_info *info)
/* Set Data Transfer Position */
hsstart = ((info->var.xres + info->var.right_margin) * hmul) / 8;
- value = clamp((htotal + hsstart + 1) / 2, hsstart + 4, htotal + 1);
+ /* + 2 is needed for Virge/VX, does no harm on other cards */
+ value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
memset_io(info->screen_base, 0x00, screen_size);
@@ -1216,6 +1217,31 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
info->screen_size = 2 << 20;
break;
}
+ } else if (par->chip == CHIP_988_VIRGE_VX) {
+ switch ((regval & 0x60) >> 5) {
+ case 0: /* 2MB */
+ info->screen_size = 2 << 20;
+ break;
+ case 1: /* 4MB */
+ info->screen_size = 4 << 20;
+ break;
+ case 2: /* 6MB */
+ info->screen_size = 6 << 20;
+ break;
+ case 3: /* 8MB */
+ info->screen_size = 8 << 20;
+ break;
+ }
+ /* off-screen memory */
+ regval = vga_rcrt(par->state.vgabase, 0x37);
+ switch ((regval & 0x60) >> 5) {
+ case 1: /* 4MB */
+ info->screen_size -= 4 << 20;
+ break;
+ case 2: /* 2MB */
+ info->screen_size -= 2 << 20;
+ break;
+ }
} else
info->screen_size = s3_memsizes[regval >> 5] << 10;
info->fix.smem_len = info->screen_size;
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 37d764ad56b0..3c1de981a18c 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -76,7 +76,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
- if (!map_size){
+ if (!map_size) {
page += PAGE_SIZE;
continue;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 45e47d847163..83b16e237a0e 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -585,18 +585,7 @@ static struct platform_driver sh7760_lcdc_driver = {
.remove = __devexit_p(sh7760fb_remove),
};
-static int __init sh7760fb_init(void)
-{
- return platform_driver_register(&sh7760_lcdc_driver);
-}
-
-static void __exit sh7760fb_exit(void)
-{
- platform_driver_unregister(&sh7760_lcdc_driver);
-}
-
-module_init(sh7760fb_init);
-module_exit(sh7760fb_exit);
+module_platform_driver(sh7760_lcdc_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Manuel Lauss");
MODULE_DESCRIPTION("FBdev for SH7760/63 integrated LCD Controller");
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
index 72ee96bc6b3e..05151b82f40f 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/sh_mipi_dsi.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -41,6 +42,7 @@
#define VMCTR1 0x0020
#define VMCTR2 0x0024
#define VMLEN1 0x0028
+#define VMLEN2 0x002c
#define CMTSRTREQ 0x0070
#define CMTSRTCTR 0x00d0
@@ -51,8 +53,7 @@ struct sh_mipi {
void __iomem *base;
void __iomem *linkbase;
struct clk *dsit_clk;
- struct clk *dsip_clk;
- struct device *dev;
+ struct platform_device *pdev;
void *next_board_data;
void (*next_display_on)(void *board_data, struct fb_info *info);
@@ -124,35 +125,15 @@ static void sh_mipi_shutdown(struct platform_device *pdev)
sh_mipi_dsi_enable(mipi, false);
}
-static void mipi_display_on(void *arg, struct fb_info *info)
-{
- struct sh_mipi *mipi = arg;
-
- pm_runtime_get_sync(mipi->dev);
- sh_mipi_dsi_enable(mipi, true);
-
- if (mipi->next_display_on)
- mipi->next_display_on(mipi->next_board_data, info);
-}
-
-static void mipi_display_off(void *arg)
-{
- struct sh_mipi *mipi = arg;
-
- if (mipi->next_display_off)
- mipi->next_display_off(mipi->next_board_data);
-
- sh_mipi_dsi_enable(mipi, false);
- pm_runtime_put(mipi->dev);
-}
-
static int __init sh_mipi_setup(struct sh_mipi *mipi,
struct sh_mipi_dsi_info *pdata)
{
void __iomem *base = mipi->base;
struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
- u32 pctype, datatype, pixfmt, linelength, vmctr2 = 0x00e00000;
+ u32 pctype, datatype, pixfmt, linelength, vmctr2;
+ u32 tmp, top, bottom, delay, div;
bool yuv;
+ int bpp;
/*
* Select data format. MIPI DSI is not hot-pluggable, so, we just use
@@ -253,6 +234,9 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
(!yuv && ch->interface_type != RGB24))
return -EINVAL;
+ if (!pdata->lane)
+ return -EINVAL;
+
/* reset DSI link */
iowrite32(0x00000001, base + SYSCTRL);
/* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */
@@ -262,15 +246,6 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
/* setup DSI link */
/*
- * Default = ULPS enable |
- * Contention detection enabled |
- * EoT packet transmission enable |
- * CRC check enable |
- * ECC check enable
- * additionally enable first two lanes
- */
- iowrite32(0x00003703, base + SYSCONF);
- /*
* T_wakeup = 0x7000
* T_hs-trail = 3
* T_hs-prepare = 3
@@ -290,15 +265,24 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
iowrite32(0x0fffffff, base + TATOVSET);
/* Peripheral reset timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + PRTOVSET);
- /* Enable timeout counters */
- iowrite32(0x00000f00, base + DSICTRL);
/* Interrupts not used, disable all */
iowrite32(0, base + DSIINTE);
/* DSI-Tx bias on */
iowrite32(0x00000001, base + PHYCTRL);
udelay(200);
- /* Deassert resets, power on, set multiplier */
- iowrite32(0x03070b01, base + PHYCTRL);
+ /* Deassert resets, power on */
+ iowrite32(0x03070001, base + PHYCTRL);
+
+ /*
+ * Default = ULPS enable |
+ * Contention detection enabled |
+ * EoT packet transmission enable |
+ * CRC check enable |
+ * ECC check enable
+ */
+ bitmap_fill((unsigned long *)&tmp, pdata->lane);
+ tmp |= 0x00003700;
+ iowrite32(tmp, base + SYSCONF);
/* setup l-bridge */
@@ -316,18 +300,68 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
* Non-burst mode with sync pulses: VSE and HSE are output,
* HSA period allowed, no commands in LP
*/
+ vmctr2 = 0;
+ if (pdata->flags & SH_MIPI_DSI_VSEE)
+ vmctr2 |= 1 << 23;
+ if (pdata->flags & SH_MIPI_DSI_HSEE)
+ vmctr2 |= 1 << 22;
+ if (pdata->flags & SH_MIPI_DSI_HSAE)
+ vmctr2 |= 1 << 21;
+ if (pdata->flags & SH_MIPI_DSI_BL2E)
+ vmctr2 |= 1 << 17;
if (pdata->flags & SH_MIPI_DSI_HSABM)
- vmctr2 |= 0x20;
- if (pdata->flags & SH_MIPI_DSI_HSPBM)
- vmctr2 |= 0x10;
+ vmctr2 |= 1 << 5;
+ if (pdata->flags & SH_MIPI_DSI_HBPBM)
+ vmctr2 |= 1 << 4;
+ if (pdata->flags & SH_MIPI_DSI_HFPBM)
+ vmctr2 |= 1 << 3;
iowrite32(vmctr2, mipi->linkbase + VMCTR2);
/*
- * 0x660 = 1632 bytes per line (RGB24, 544 pixels: see
- * sh_mobile_lcdc_info.ch[0].lcd_cfg[0].xres), HSALEN = 1 - default
- * (unused if VMCTR2[HSABM] = 0)
+ * VMLEN1 = RGBLEN | HSALEN
+ *
+ * see
+ * Video mode - Blanking Packet setting
+ */
+ top = linelength << 16; /* RGBLEN */
+ bottom = 0x00000001;
+ if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
+ bottom = (pdata->lane * ch->lcd_cfg[0].hsync_len) - 10;
+ iowrite32(top | bottom , mipi->linkbase + VMLEN1);
+
+ /*
+ * VMLEN2 = HBPLEN | HFPLEN
+ *
+ * see
+ * Video mode - Blanking Packet setting
*/
- iowrite32(1 | (linelength << 16), mipi->linkbase + VMLEN1);
+ top = 0x00010000;
+ bottom = 0x00000001;
+ delay = 0;
+
+ div = 1; /* HSbyteCLK is calculation base
+ * HS4divCLK = HSbyteCLK/2
+ * HS6divCLK is not supported for now */
+ if (pdata->flags & SH_MIPI_DSI_HS4divCLK)
+ div = 2;
+
+ if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
+ top = ch->lcd_cfg[0].hsync_len + ch->lcd_cfg[0].left_margin;
+ top = ((pdata->lane * top / div) - 10) << 16;
+ }
+ if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
+ bottom = ch->lcd_cfg[0].right_margin;
+ bottom = (pdata->lane * bottom / div) - 12;
+ }
+
+ bpp = linelength / ch->lcd_cfg[0].xres; /* byte / pixel */
+ if ((pdata->lane / div) > bpp) {
+ tmp = ch->lcd_cfg[0].xres / bpp; /* output cycle */
+ tmp = ch->lcd_cfg[0].xres - tmp; /* (input - output) cycle */
+ delay = (pdata->lane * tmp);
+ }
+
+ iowrite32(top | (bottom + delay) , mipi->linkbase + VMLEN2);
msleep(5);
@@ -352,9 +386,56 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
pixfmt << 4);
sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
+ /* Enable timeout counters */
+ iowrite32(0x00000f00, base + DSICTRL);
+
return 0;
}
+static void mipi_display_on(void *arg, struct fb_info *info)
+{
+ struct sh_mipi *mipi = arg;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
+ int ret;
+
+ pm_runtime_get_sync(&mipi->pdev->dev);
+
+ ret = pdata->set_dot_clock(mipi->pdev, mipi->base, 1);
+ if (ret < 0)
+ goto mipi_display_on_fail1;
+
+ ret = sh_mipi_setup(mipi, pdata);
+ if (ret < 0)
+ goto mipi_display_on_fail2;
+
+ sh_mipi_dsi_enable(mipi, true);
+
+ if (mipi->next_display_on)
+ mipi->next_display_on(mipi->next_board_data, info);
+
+ return;
+
+mipi_display_on_fail1:
+ pm_runtime_put_sync(&mipi->pdev->dev);
+mipi_display_on_fail2:
+ pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
+}
+
+static void mipi_display_off(void *arg)
+{
+ struct sh_mipi *mipi = arg;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
+
+ if (mipi->next_display_off)
+ mipi->next_display_off(mipi->next_board_data);
+
+ sh_mipi_dsi_enable(mipi, false);
+
+ pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
+
+ pm_runtime_put_sync(&mipi->pdev->dev);
+}
+
static int __init sh_mipi_probe(struct platform_device *pdev)
{
struct sh_mipi *mipi;
@@ -363,11 +444,13 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
unsigned long rate, f_current;
int idx = pdev->id, ret;
- char dsip_clk[] = "dsi.p_clk";
if (!res || !res2 || idx >= ARRAY_SIZE(mipi_dsi) || !pdata)
return -ENODEV;
+ if (!pdata->set_dot_clock)
+ return -EINVAL;
+
mutex_lock(&array_lock);
if (idx < 0)
for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++)
@@ -408,7 +491,7 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
goto emap2;
}
- mipi->dev = &pdev->dev;
+ mipi->pdev = pdev;
mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk");
if (IS_ERR(mipi->dsit_clk)) {
@@ -428,44 +511,15 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate);
- sprintf(dsip_clk, "dsi%1.1dp_clk", idx);
- mipi->dsip_clk = clk_get(&pdev->dev, dsip_clk);
- if (IS_ERR(mipi->dsip_clk)) {
- ret = PTR_ERR(mipi->dsip_clk);
- goto eclkpget;
- }
-
- f_current = clk_get_rate(mipi->dsip_clk);
- /* Between 10 and 50MHz */
- rate = clk_round_rate(mipi->dsip_clk, 24000000);
- if (rate > 0 && rate != f_current)
- ret = clk_set_rate(mipi->dsip_clk, rate);
- else
- ret = rate;
- if (ret < 0)
- goto esetprate;
-
- dev_dbg(&pdev->dev, "DSI-P clk %lu -> %lu\n", f_current, rate);
-
- msleep(10);
-
ret = clk_enable(mipi->dsit_clk);
if (ret < 0)
goto eclkton;
- ret = clk_enable(mipi->dsip_clk);
- if (ret < 0)
- goto eclkpon;
-
mipi_dsi[idx] = mipi;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
- ret = sh_mipi_setup(mipi, pdata);
- if (ret < 0)
- goto emipisetup;
-
mutex_unlock(&array_lock);
platform_set_drvdata(pdev, mipi);
@@ -482,16 +536,7 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
return 0;
-emipisetup:
- mipi_dsi[idx] = NULL;
- pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsip_clk);
-eclkpon:
- clk_disable(mipi->dsit_clk);
eclkton:
-esetprate:
- clk_put(mipi->dsip_clk);
-eclkpget:
esettrate:
clk_put(mipi->dsit_clk);
eclktget:
@@ -542,10 +587,9 @@ static int __exit sh_mipi_remove(struct platform_device *pdev)
pdata->lcd_chan->board_cfg.board_data = NULL;
pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsip_clk);
clk_disable(mipi->dsit_clk);
clk_put(mipi->dsit_clk);
- clk_put(mipi->dsip_clk);
+
iounmap(mipi->linkbase);
if (res2)
release_mem_region(res2->start, resource_size(res2));
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index facffc254976..aac5b369d73c 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
@@ -102,7 +103,7 @@ struct sh_mobile_lcdc_priv {
struct sh_mobile_lcdc_chan ch[2];
struct notifier_block notifier;
int started;
- int forced_bpp; /* 2 channel LCDC must share bpp setting */
+ int forced_fourcc; /* 2 channel LCDC must share fourcc setting */
struct sh_mobile_meram_info *meram_dev;
};
@@ -215,6 +216,47 @@ struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
lcdc_sys_read_data,
};
+static int sh_mobile_format_fourcc(const struct fb_var_screeninfo *var)
+{
+ if (var->grayscale > 1)
+ return var->grayscale;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ return V4L2_PIX_FMT_RGB565;
+ case 24:
+ return V4L2_PIX_FMT_BGR24;
+ case 32:
+ return V4L2_PIX_FMT_BGR32;
+ default:
+ return 0;
+ }
+}
+
+static int sh_mobile_format_is_fourcc(const struct fb_var_screeninfo *var)
+{
+ return var->grayscale > 1;
+}
+
+static bool sh_mobile_format_is_yuv(const struct fb_var_screeninfo *var)
+{
+ if (var->grayscale <= 1)
+ return false;
+
+ switch (var->grayscale) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
@@ -420,7 +462,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
tmp = ((display_var->xres & 7) << 24) |
((display_h_total & 7) << 16) |
((display_var->hsync_len & 7) << 8) |
- hsync_pos;
+ (hsync_pos & 7);
lcdc_write_chan(ch, LDHAJR, tmp);
}
@@ -435,7 +477,6 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
{
struct sh_mobile_lcdc_chan *ch;
unsigned long tmp;
- int bpp = 0;
int k, m;
/* Enable LCDC channels. Read data from external memory, avoid using the
@@ -454,9 +495,6 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
if (!ch->enabled)
continue;
- if (!bpp)
- bpp = ch->info->var.bits_per_pixel;
-
/* Power supply */
lcdc_write_chan(ch, LDPMR, 0);
@@ -487,31 +525,37 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
sh_mobile_lcdc_geometry(ch);
- if (ch->info->var.nonstd) {
- tmp = (ch->info->var.nonstd << 16);
- switch (ch->info->var.bits_per_pixel) {
- case 12:
- tmp |= LDDFR_YF_420;
- break;
- case 16:
- tmp |= LDDFR_YF_422;
- break;
- case 24:
- default:
- tmp |= LDDFR_YF_444;
- break;
- }
- } else {
- switch (ch->info->var.bits_per_pixel) {
- case 16:
- tmp = LDDFR_PKF_RGB16;
- break;
- case 24:
- tmp = LDDFR_PKF_RGB24;
+ switch (sh_mobile_format_fourcc(&ch->info->var)) {
+ case V4L2_PIX_FMT_RGB565:
+ tmp = LDDFR_PKF_RGB16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ tmp = LDDFR_PKF_RGB24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ tmp = LDDFR_PKF_ARGB32;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ tmp = LDDFR_CC | LDDFR_YF_420;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ tmp = LDDFR_CC | LDDFR_YF_422;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ tmp = LDDFR_CC | LDDFR_YF_444;
+ break;
+ }
+
+ if (sh_mobile_format_is_yuv(&ch->info->var)) {
+ switch (ch->info->var.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ tmp |= LDDFR_CF1;
break;
- case 32:
- default:
- tmp = LDDFR_PKF_ARGB32;
+ case V4L2_COLORSPACE_JPEG:
+ tmp |= LDDFR_CF0;
break;
}
}
@@ -519,7 +563,7 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
lcdc_write_chan(ch, LDDFR, tmp);
lcdc_write_chan(ch, LDMLSR, ch->pitch);
lcdc_write_chan(ch, LDSA1R, ch->base_addr_y);
- if (ch->info->var.nonstd)
+ if (sh_mobile_format_is_yuv(&ch->info->var))
lcdc_write_chan(ch, LDSA2R, ch->base_addr_c);
/* When using deferred I/O mode, configure the LCDC for one-shot
@@ -536,21 +580,23 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
}
/* Word and long word swap. */
- if (priv->ch[0].info->var.nonstd)
+ switch (sh_mobile_format_fourcc(&priv->ch[0].info->var)) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV42:
+ tmp = LDDDSR_LS | LDDDSR_WS;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- else {
- switch (bpp) {
- case 16:
- tmp = LDDDSR_LS | LDDDSR_WS;
- break;
- case 24:
- tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- break;
- case 32:
- default:
- tmp = LDDDSR_LS;
- break;
- }
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ tmp = LDDDSR_LS;
+ break;
}
lcdc_write(priv, _LDDDSR, tmp);
@@ -622,12 +668,24 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
ch->meram_enabled = 0;
}
- if (!ch->info->var.nonstd)
- pixelformat = SH_MOBILE_MERAM_PF_RGB;
- else if (ch->info->var.bits_per_pixel == 24)
- pixelformat = SH_MOBILE_MERAM_PF_NV24;
- else
+ switch (sh_mobile_format_fourcc(&ch->info->var)) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
pixelformat = SH_MOBILE_MERAM_PF_NV;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ pixelformat = SH_MOBILE_MERAM_PF_NV24;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ pixelformat = SH_MOBILE_MERAM_PF_RGB;
+ break;
+ }
ret = mdev->ops->meram_register(mdev, cfg, ch->pitch,
ch->info->var.yres, pixelformat,
@@ -845,6 +903,7 @@ static struct fb_fix_screeninfo sh_mobile_lcdc_fix = {
.xpanstep = 0,
.ypanstep = 1,
.ywrapstep = 0,
+ .capabilities = FB_CAP_FOURCC,
};
static void sh_mobile_lcdc_fillrect(struct fb_info *info,
@@ -877,8 +936,9 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
unsigned long new_pan_offset;
unsigned long base_addr_y, base_addr_c;
unsigned long c_offset;
+ bool yuv = sh_mobile_format_is_yuv(&info->var);
- if (!info->var.nonstd)
+ if (!yuv)
new_pan_offset = var->yoffset * info->fix.line_length
+ var->xoffset * (info->var.bits_per_pixel / 8);
else
@@ -892,7 +952,7 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
/* Set the source address for the next refresh */
base_addr_y = ch->dma_handle + new_pan_offset;
- if (info->var.nonstd) {
+ if (yuv) {
/* Set y offset */
c_offset = var->yoffset * info->fix.line_length
* (info->var.bits_per_pixel - 8) / 8;
@@ -900,7 +960,7 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
+ info->var.xres * info->var.yres_virtual
+ c_offset;
/* Set x offset */
- if (info->var.bits_per_pixel == 24)
+ if (sh_mobile_format_fourcc(&info->var) == V4L2_PIX_FMT_NV24)
base_addr_c += 2 * var->xoffset;
else
base_addr_c += var->xoffset;
@@ -924,7 +984,7 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
ch->base_addr_c = base_addr_c;
lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
- if (info->var.nonstd)
+ if (yuv)
lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
if (lcdc_chan_is_sublcd(ch))
@@ -1100,51 +1160,84 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
- if (var->bits_per_pixel <= 16) { /* RGB 565 */
- var->bits_per_pixel = 16;
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
- var->bits_per_pixel = 24;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
- var->bits_per_pixel = 32;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- } else
- return -EINVAL;
+ if (sh_mobile_format_is_fourcc(var)) {
+ switch (var->grayscale) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ var->bits_per_pixel = 12;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ var->bits_per_pixel = 16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ var->bits_per_pixel = 24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
+ /* Default to RGB and JPEG color-spaces for RGB and YUV formats
+ * respectively.
+ */
+ if (!sh_mobile_format_is_yuv(var))
+ var->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (var->colorspace != V4L2_COLORSPACE_REC709)
+ var->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ if (var->bits_per_pixel <= 16) { /* RGB 565 */
+ var->bits_per_pixel = 16;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
+ var->bits_per_pixel = 24;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
+ var->bits_per_pixel = 32;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ } else
+ return -EINVAL;
+
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+ }
/* Make sure we don't exceed our allocated memory. */
if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 >
info->fix.smem_len)
return -EINVAL;
- /* only accept the forced_bpp for dual channel configurations */
- if (p->forced_bpp && p->forced_bpp != var->bits_per_pixel)
+ /* only accept the forced_fourcc for dual channel configurations */
+ if (p->forced_fourcc &&
+ p->forced_fourcc != sh_mobile_format_fourcc(var))
return -EINVAL;
return 0;
@@ -1158,7 +1251,7 @@ static int sh_mobile_set_par(struct fb_info *info)
sh_mobile_lcdc_stop(ch->lcdc);
- if (info->var.nonstd)
+ if (sh_mobile_format_is_yuv(&info->var))
info->fix.line_length = info->var.xres;
else
info->fix.line_length = info->var.xres
@@ -1170,6 +1263,14 @@ static int sh_mobile_set_par(struct fb_info *info)
info->fix.line_length = line_length;
}
+ if (sh_mobile_format_is_fourcc(&info->var)) {
+ info->fix.type = FB_TYPE_FOURCC;
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
+
return ret;
}
@@ -1464,9 +1565,9 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
for (i = 0, mode = cfg->lcd_cfg; i < cfg->num_cfg; i++, mode++) {
unsigned int size = mode->yres * mode->xres;
- /* NV12 buffers must have even number of lines */
- if ((cfg->nonstd) && cfg->bpp == 12 &&
- (mode->yres & 0x1)) {
+ /* NV12/NV21 buffers must have even number of lines */
+ if ((cfg->fourcc == V4L2_PIX_FMT_NV12 ||
+ cfg->fourcc == V4L2_PIX_FMT_NV21) && (mode->yres & 0x1)) {
dev_err(dev, "yres must be multiple of 2 for YCbCr420 "
"mode.\n");
return -EINVAL;
@@ -1484,14 +1585,6 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
dev_dbg(dev, "Found largest videomode %ux%u\n",
max_mode->xres, max_mode->yres);
- /* Initialize fixed screen information. Restrict pan to 2 lines steps
- * for NV12.
- */
- info->fix = sh_mobile_lcdc_fix;
- info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
- if (cfg->nonstd && cfg->bpp == 12)
- info->fix.ypanstep = 2;
-
/* Create the mode list. */
if (cfg->lcd_cfg == NULL) {
mode = &default_720p;
@@ -1509,19 +1602,38 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
*/
var = &info->var;
fb_videomode_to_var(var, mode);
- var->bits_per_pixel = cfg->bpp;
var->width = cfg->lcd_size_cfg.width;
var->height = cfg->lcd_size_cfg.height;
var->yres_virtual = var->yres * 2;
var->activate = FB_ACTIVATE_NOW;
+ switch (cfg->fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ var->bits_per_pixel = 16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ var->bits_per_pixel = 24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ var->grayscale = cfg->fourcc;
+ break;
+ }
+
+ /* Make sure the memory size check won't fail. smem_len is initialized
+ * later based on var.
+ */
+ info->fix.smem_len = UINT_MAX;
ret = sh_mobile_check_var(var, info);
if (ret)
return ret;
+ max_size = max_size * var->bits_per_pixel / 8 * 2;
+
/* Allocate frame buffer memory and color map. */
- buf = dma_alloc_coherent(dev, info->fix.smem_len, &ch->dma_handle,
- GFP_KERNEL);
+ buf = dma_alloc_coherent(dev, max_size, &ch->dma_handle, GFP_KERNEL);
if (!buf) {
dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
@@ -1530,16 +1642,27 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
ret = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
if (ret < 0) {
dev_err(dev, "unable to allocate cmap\n");
- dma_free_coherent(dev, info->fix.smem_len,
- buf, ch->dma_handle);
+ dma_free_coherent(dev, max_size, buf, ch->dma_handle);
return ret;
}
+ /* Initialize fixed screen information. Restrict pan to 2 lines steps
+ * for NV12 and NV21.
+ */
+ info->fix = sh_mobile_lcdc_fix;
info->fix.smem_start = ch->dma_handle;
- if (var->nonstd)
+ info->fix.smem_len = max_size;
+ if (cfg->fourcc == V4L2_PIX_FMT_NV12 ||
+ cfg->fourcc == V4L2_PIX_FMT_NV21)
+ info->fix.ypanstep = 2;
+
+ if (sh_mobile_format_is_yuv(var)) {
info->fix.line_length = var->xres;
- else
- info->fix.line_length = var->xres * (cfg->bpp / 8);
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
info->screen_base = buf;
info->device = dev;
@@ -1626,9 +1749,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
goto err1;
}
- /* for dual channel LCDC (MAIN + SUB) force shared bpp setting */
+ /* for dual channel LCDC (MAIN + SUB) force shared format setting */
if (num_channels == 2)
- priv->forced_bpp = pdata->ch[0].bpp;
+ priv->forced_fourcc = pdata->ch[0].fourcc;
priv->base = ioremap_nocache(res->start, resource_size(res));
if (!priv->base)
@@ -1675,13 +1798,10 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
if (error < 0)
goto err1;
- dev_info(info->dev,
- "registered %s/%s as %dx%d %dbpp.\n",
- pdev->name,
- (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
- "mainlcd" : "sublcd",
- info->var.xres, info->var.yres,
- ch->cfg.bpp);
+ dev_info(info->dev, "registered %s/%s as %dx%d %dbpp.\n",
+ pdev->name, (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
+ "mainlcd" : "sublcd", info->var.xres, info->var.yres,
+ info->var.bits_per_pixel);
/* deferred io mode: disable clock to save power */
if (info->fbdefio || info->state == FBINFO_STATE_SUSPENDED)
@@ -1709,18 +1829,7 @@ static struct platform_driver sh_mobile_lcdc_driver = {
.remove = sh_mobile_lcdc_remove,
};
-static int __init sh_mobile_lcdc_init(void)
-{
- return platform_driver_register(&sh_mobile_lcdc_driver);
-}
-
-static void __exit sh_mobile_lcdc_exit(void)
-{
- platform_driver_unregister(&sh_mobile_lcdc_driver);
-}
-
-module_init(sh_mobile_lcdc_init);
-module_exit(sh_mobile_lcdc_exit);
+module_platform_driver(sh_mobile_lcdc_driver);
MODULE_DESCRIPTION("SuperH Mobile LCDC Framebuffer driver");
MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 4d63490209cd..f45d83ecfd21 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -679,18 +679,7 @@ static struct platform_driver sh_mobile_meram_driver = {
.remove = sh_mobile_meram_remove,
};
-static int __init sh_mobile_meram_init(void)
-{
- return platform_driver_register(&sh_mobile_meram_driver);
-}
-
-static void __exit sh_mobile_meram_exit(void)
-{
- platform_driver_unregister(&sh_mobile_meram_driver);
-}
-
-module_init(sh_mobile_meram_init);
-module_exit(sh_mobile_meram_exit);
+module_platform_driver(sh_mobile_meram_driver);
MODULE_DESCRIPTION("SuperH Mobile MERAM driver");
MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama");
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index a78254cf8e83..3690effbedcc 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -2230,18 +2230,7 @@ static struct platform_driver sm501fb_driver = {
},
};
-static int __devinit sm501fb_init(void)
-{
- return platform_driver_register(&sm501fb_driver);
-}
-
-static void __exit sm501fb_cleanup(void)
-{
- platform_driver_unregister(&sm501fb_driver);
-}
-
-module_init(sm501fb_init);
-module_exit(sm501fb_cleanup);
+module_platform_driver(sm501fb_driver);
module_param_named(mode, fb_mode, charp, 0);
MODULE_PARM_DESC(mode,
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index 3c22994ea31a..ccbfef5e828f 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -130,8 +130,8 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
-static int console; /* Optionally allow fbcon to consume first framebuffer */
-static int fb_defio = true; /* Optionally enable fb_defio mmap support */
+static bool console; /* Optionally allow fbcon to consume first framebuffer */
+static bool fb_defio = true; /* Optionally enable fb_defio mmap support */
/* ufx keeps a list of urbs for efficient bulk transfers */
static void ufx_urb_completion(struct urb *urb);
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 2301c275d63a..111fb32e8769 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -93,11 +93,11 @@
/* initialized by setup */
-static int vgapass; /* enable VGA passthrough cable */
+static bool vgapass; /* enable VGA passthrough cable */
static int mem; /* mem size in MB, 0 = autodetect */
-static int clipping = 1; /* use clipping (slower, safer) */
+static bool clipping = 1; /* use clipping (slower, safer) */
static int gfxclk; /* force FBI freq in Mhz . Dangerous */
-static int slowpci; /* slow PCI settings */
+static bool slowpci; /* slow PCI settings */
/*
Possible default video modes: 800x600@60, 640x480@75, 1024x768@76, 640x480@60
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index a99b994c9b6b..e026724a3a56 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -169,7 +169,7 @@ static int nowrap = 1; /* not implemented (yet) */
static int hwcursor = 1;
static char *mode_option __devinitdata;
/* mtrr option */
-static int nomtrr __devinitdata;
+static bool nomtrr __devinitdata;
/* -------------------------------------------------------------------------
* Hardware-specific funcions
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 1f868d0187a2..a19773149bd7 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -69,9 +69,9 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
-static int console = 1; /* Allow fbcon to open framebuffer */
-static int fb_defio = 1; /* Detect mmap writes using page faults */
-static int shadow = 1; /* Optionally disable shadow framebuffer */
+static bool console = 1; /* Allow fbcon to open framebuffer */
+static bool fb_defio = 1; /* Detect mmap writes using page faults */
+static bool shadow = 1; /* Optionally disable shadow framebuffer */
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7f8472cc993b..e7f69ef572dc 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -44,11 +44,11 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
};
static int mtrr __devinitdata = 3; /* enable mtrr by default */
-static int blank = 1; /* enable blanking by default */
+static bool blank = 1; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
-static int nocrtc __devinitdata; /* ignore CRTC settings */
-static int noedid __devinitdata; /* don't try DDC transfers */
+static bool nocrtc __devinitdata; /* ignore CRTC settings */
+static bool noedid __devinitdata; /* don't try DDC transfers */
static int vram_remap __devinitdata; /* set amt. of memory to be used */
static int vram_total __devinitdata; /* set total amount of memory */
static u16 maxclk __devinitdata; /* maximum pixel clock */
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index bf2f78065cf9..501a922aa9dc 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -110,7 +110,7 @@ static struct fb_fix_screeninfo vfb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static int vfb_enable __initdata = 0; /* disabled by default */
+static bool vfb_enable __initdata = 0; /* disabled by default */
module_param(vfb_enable, bool, 0);
static int vfb_check_var(struct fb_var_screeninfo *var,
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 777c21dd7a6b..2a5fe6ede845 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -457,18 +457,7 @@ static struct platform_driver vt8500lcd_driver = {
},
};
-static int __init vt8500lcd_init(void)
-{
- return platform_driver_register(&vt8500lcd_driver);
-}
-
-static void __exit vt8500lcd_exit(void)
-{
- platform_driver_unregister(&vt8500lcd_driver);
-}
-
-module_init(vt8500lcd_init);
-module_exit(vt8500lcd_exit);
+module_platform_driver(vt8500lcd_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("LCD controller driver for VIA VT8500");
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 2375e5bbf572..90a2e30272ad 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -1620,18 +1620,7 @@ static struct platform_driver w100fb_driver = {
},
};
-int __init w100fb_init(void)
-{
- return platform_driver_register(&w100fb_driver);
-}
-
-void __exit w100fb_cleanup(void)
-{
- platform_driver_unregister(&w100fb_driver);
-}
-
-module_init(w100fb_init);
-module_exit(w100fb_cleanup);
+module_platform_driver(w100fb_driver);
MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/wm8505fb.c b/drivers/video/wm8505fb.c
index 96e34a569169..c8703bd61b74 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/wm8505fb.c
@@ -404,18 +404,7 @@ static struct platform_driver wm8505fb_driver = {
},
};
-static int __init wm8505fb_init(void)
-{
- return platform_driver_register(&wm8505fb_driver);
-}
-
-static void __exit wm8505fb_exit(void)
-{
- platform_driver_unregister(&wm8505fb_driver);
-}
-
-module_init(wm8505fb_init);
-module_exit(wm8505fb_exit);
+module_platform_driver(wm8505fb_driver);
MODULE_AUTHOR("Ed Spiridonov <edo.rus@gmail.com>");
MODULE_DESCRIPTION("Framebuffer driver for WMT WM8505");
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/wmt_ge_rops.c
index 45832b7ef7d2..55be3865015b 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/wmt_ge_rops.c
@@ -167,18 +167,7 @@ static struct platform_driver wmt_ge_rops_driver = {
},
};
-static int __init wmt_ge_rops_init(void)
-{
- return platform_driver_register(&wmt_ge_rops_driver);
-}
-
-static void __exit wmt_ge_rops_exit(void)
-{
- platform_driver_unregister(&wmt_ge_rops_driver);
-}
-
-module_init(wmt_ge_rops_init);
-module_exit(wmt_ge_rops_exit);
+module_platform_driver(wmt_ge_rops_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com");
MODULE_DESCRIPTION("Accelerators for raster operations using "
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index beac52fc1c0e..cb4529c40d74 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -671,20 +671,17 @@ InitWait:
}
}
-static struct xenbus_device_id xenfb_ids[] = {
+static const struct xenbus_device_id xenfb_ids[] = {
{ "vfb" },
{ "" }
};
-static struct xenbus_driver xenfb_driver = {
- .name = "vfb",
- .owner = THIS_MODULE,
- .ids = xenfb_ids,
+static DEFINE_XENBUS_DRIVER(xenfb, ,
.probe = xenfb_probe,
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
-};
+);
static int __init xenfb_init(void)
{
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index fcb6cd90f64d..18084525402a 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -511,25 +511,7 @@ static struct platform_driver xilinxfb_of_driver = {
},
};
-
-/* ---------------------------------------------------------------------
- * Module setup and teardown
- */
-
-static int __init
-xilinxfb_init(void)
-{
- return platform_driver_register(&xilinxfb_of_driver);
-}
-
-static void __exit
-xilinxfb_cleanup(void)
-{
- platform_driver_unregister(&xilinxfb_of_driver);
-}
-
-module_init(xilinxfb_init);
-module_exit(xilinxfb_cleanup);
+module_platform_driver(xilinxfb_of_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx TFT frame buffer driver");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 94fd738a7741..95aeedf198f8 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -1,4 +1,5 @@
-/* Virtio balloon implementation, inspired by Dor Loar and Marcelo
+/*
+ * Virtio balloon implementation, inspired by Dor Laor and Marcelo
* Tosatti's implementations.
*
* Copyright 2008 Rusty Russell IBM Corporation
@@ -17,7 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-//#define DEBUG
+
#include <linux/virtio.h>
#include <linux/virtio_balloon.h>
#include <linux/swap.h>
@@ -87,7 +88,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
@@ -149,7 +150,6 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages--;
}
-
/*
* Note that if
* virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
@@ -220,7 +220,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
vq = vb->stats_vq;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
}
@@ -275,32 +275,21 @@ static int balloon(void *_vballoon)
return 0;
}
-static int virtballoon_probe(struct virtio_device *vdev)
+static int init_vqs(struct virtio_balloon *vb)
{
- struct virtio_balloon *vb;
struct virtqueue *vqs[3];
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
const char *names[] = { "inflate", "deflate", "stats" };
int err, nvqs;
- vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
- if (!vb) {
- err = -ENOMEM;
- goto out;
- }
-
- INIT_LIST_HEAD(&vb->pages);
- vb->num_pages = 0;
- init_waitqueue_head(&vb->config_change);
- vb->vdev = vdev;
- vb->need_stats_update = 0;
-
- /* We expect two virtqueues: inflate and deflate,
- * and optionally stat. */
+ /*
+ * We expect two virtqueues: inflate and deflate, and
+ * optionally stat.
+ */
nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
- err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+ err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names);
if (err)
- goto out_free_vb;
+ return err;
vb->inflate_vq = vqs[0];
vb->deflate_vq = vqs[1];
@@ -313,10 +302,34 @@ static int virtballoon_probe(struct virtio_device *vdev)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
+ < 0)
BUG();
virtqueue_kick(vb->stats_vq);
}
+ return 0;
+}
+
+static int virtballoon_probe(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb;
+ int err;
+
+ vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
+ if (!vb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&vb->pages);
+ vb->num_pages = 0;
+ init_waitqueue_head(&vb->config_change);
+ vb->vdev = vdev;
+ vb->need_stats_update = 0;
+
+ err = init_vqs(vb);
+ if (err)
+ goto out_free_vb;
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
@@ -351,6 +364,48 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
+#ifdef CONFIG_PM
+static int virtballoon_freeze(struct virtio_device *vdev)
+{
+ /*
+ * The kthread is already frozen by the PM core before this
+ * function is called.
+ */
+
+ /* Ensure we don't get any more requests from the host */
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtballoon_thaw(struct virtio_device *vdev)
+{
+ return init_vqs(vdev->priv);
+}
+
+static int virtballoon_restore(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+ struct page *page, *page2;
+
+ /* We're starting from a clean slate */
+ vb->num_pages = 0;
+
+ /*
+ * If a request wasn't complete at the time of freezing, this
+ * could have been set.
+ */
+ vb->need_stats_update = 0;
+
+ /* We don't have these pages in the balloon anymore! */
+ list_for_each_entry_safe(page, page2, &vb->pages, lru) {
+ list_del(&page->lru);
+ totalram_pages++;
+ }
+ return init_vqs(vdev->priv);
+}
+#endif
+
static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
@@ -365,6 +420,11 @@ static struct virtio_driver virtio_balloon_driver = {
.probe = virtballoon_probe,
.remove = __devexit_p(virtballoon_remove),
.config_changed = virtballoon_changed,
+#ifdef CONFIG_PM
+ .freeze = virtballoon_freeze,
+ .restore = virtballoon_restore,
+ .thaw = virtballoon_thaw,
+#endif
};
static int __init init(void)
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 7317dc2ec426..01d6dc250d5c 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -310,8 +310,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
/* Create the vring */
- vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN,
- vdev, info->queue, vm_notify, callback, name);
+ vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ true, info->queue, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
@@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return 0;
}
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ return vm_dev->pdev->name;
+}
static struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
@@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
.del_vqs = vm_del_vqs,
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
+ .bus_name = vm_bus_name,
};
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 03d1984bd363..635e1efb3792 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -55,6 +55,10 @@ struct virtio_pci_device
unsigned msix_vectors;
/* Vectors allocated, excluding per-vq vectors if any */
unsigned msix_used_vectors;
+
+ /* Status saved during hibernate/restore */
+ u8 saved_status;
+
/* Whether we have vector per vq */
bool per_vq_vectors;
};
@@ -414,8 +418,8 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
/* create the vring */
- vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
- vdev, info->queue, vp_notify, callback, name);
+ vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
+ true, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto out_activate_queue;
@@ -598,6 +602,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
false, false);
}
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return pci_name(vp_dev->pci_dev);
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
@@ -608,6 +619,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
.del_vqs = vp_del_vqs,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
+ .bus_name = vp_bus_name,
};
static void virtio_pci_release_dev(struct device *_d)
@@ -708,19 +720,114 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
}
#ifdef CONFIG_PM
-static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int virtio_pci_suspend(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
pci_save_state(pci_dev);
pci_set_power_state(pci_dev, PCI_D3hot);
return 0;
}
-static int virtio_pci_resume(struct pci_dev *pci_dev)
+static int virtio_pci_resume(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
pci_restore_state(pci_dev);
pci_set_power_state(pci_dev, PCI_D0);
return 0;
}
+
+static int virtio_pci_freeze(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ ret = 0;
+ vp_dev->saved_status = vp_get_status(&vp_dev->vdev);
+ if (drv && drv->freeze)
+ ret = drv->freeze(&vp_dev->vdev);
+
+ if (!ret)
+ pci_disable_device(pci_dev);
+ return ret;
+}
+
+static int restore_common(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ return ret;
+ pci_set_master(pci_dev);
+ vp_finalize_features(&vp_dev->vdev);
+
+ return ret;
+}
+
+static int virtio_pci_thaw(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
+ int ret;
+
+ ret = restore_common(dev);
+ if (ret)
+ return ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ if (drv && drv->thaw)
+ ret = drv->thaw(&vp_dev->vdev);
+ else if (drv && drv->restore)
+ ret = drv->restore(&vp_dev->vdev);
+
+ /* Finally, tell the device we're all set */
+ if (!ret)
+ vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
+
+ return ret;
+}
+
+static int virtio_pci_restore(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ ret = restore_common(dev);
+ if (!ret && drv && drv->restore)
+ ret = drv->restore(&vp_dev->vdev);
+
+ /* Finally, tell the device we're all set */
+ if (!ret)
+ vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
+
+ return ret;
+}
+
+static const struct dev_pm_ops virtio_pci_pm_ops = {
+ .suspend = virtio_pci_suspend,
+ .resume = virtio_pci_resume,
+ .freeze = virtio_pci_freeze,
+ .thaw = virtio_pci_thaw,
+ .restore = virtio_pci_restore,
+ .poweroff = virtio_pci_suspend,
+};
#endif
static struct pci_driver virtio_pci_driver = {
@@ -729,8 +836,7 @@ static struct pci_driver virtio_pci_driver = {
.probe = virtio_pci_probe,
.remove = __devexit_p(virtio_pci_remove),
#ifdef CONFIG_PM
- .suspend = virtio_pci_suspend,
- .resume = virtio_pci_resume,
+ .driver.pm = &virtio_pci_pm_ops,
#endif
};
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c7a2c208f6ea..79e1b292c030 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -22,23 +22,27 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/hrtimer.h>
/* virtio guest is communicating with a virtual "device" that actually runs on
* a host processor. Memory barriers are used to control SMP effects. */
#ifdef CONFIG_SMP
/* Where possible, use SMP barriers which are more lightweight than mandatory
* barriers, because mandatory barriers control MMIO effects on accesses
- * through relaxed memory I/O windows (which virtio does not use). */
-#define virtio_mb() smp_mb()
-#define virtio_rmb() smp_rmb()
-#define virtio_wmb() smp_wmb()
+ * through relaxed memory I/O windows (which virtio-pci does not use). */
+#define virtio_mb(vq) \
+ do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
+#define virtio_rmb(vq) \
+ do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
+#define virtio_wmb(vq) \
+ do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
#else
/* We must force memory ordering even if guest is UP since host could be
* running on another CPU, but SMP barriers are defined to barrier() in that
* configuration. So fall back to mandatory barriers instead. */
-#define virtio_mb() mb()
-#define virtio_rmb() rmb()
-#define virtio_wmb() wmb()
+#define virtio_mb(vq) mb()
+#define virtio_rmb(vq) rmb()
+#define virtio_wmb(vq) wmb()
#endif
#ifdef DEBUG
@@ -77,6 +81,9 @@ struct vring_virtqueue
/* Actual memory layout for this queue */
struct vring vring;
+ /* Can we use weak barriers? */
+ bool weak_barriers;
+
/* Other side has made a mess, don't try any more. */
bool broken;
@@ -102,6 +109,10 @@ struct vring_virtqueue
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
+
+ /* Figure out if their kicks are too delayed. */
+ bool last_add_time_valid;
+ ktime_t last_add_time;
#endif
/* Tokens for callbacks. */
@@ -160,12 +171,29 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-int virtqueue_add_buf_gfp(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data,
- gfp_t gfp)
+/**
+ * virtqueue_add_buf - expose buffer to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: the description of the buffer(s).
+ * @out_num: the number of sg readable by other side
+ * @in_num: the number of sg which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns remaining capacity of queue or a negative error
+ * (ie. ENOSPC). Note that it only really makes sense to treat all
+ * positive return values as "available": indirect buffers mean that
+ * we can put an entire sg[] array inside a single queue entry.
+ */
+int virtqueue_add_buf(struct virtqueue *_vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, uninitialized_var(prev);
@@ -175,6 +203,19 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
BUG_ON(data == NULL);
+#ifdef DEBUG
+ {
+ ktime_t now = ktime_get();
+
+ /* No kick or get, with .1 second between? Warn. */
+ if (vq->last_add_time_valid)
+ WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
+ > 100);
+ vq->last_add_time = now;
+ vq->last_add_time_valid = true;
+ }
+#endif
+
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
@@ -227,40 +268,102 @@ add_head:
vq->data[head] = data;
/* Put entry in available array (but don't update avail->idx until they
- * do sync). FIXME: avoid modulus here? */
- avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
+ * do sync). */
+ avail = (vq->vring.avail->idx & (vq->vring.num-1));
vq->vring.avail->ring[avail] = head;
+ /* Descriptors and available array need to be set before we expose the
+ * new available array entries. */
+ virtio_wmb(vq);
+ vq->vring.avail->idx++;
+ vq->num_added++;
+
+ /* This is very unlikely, but theoretically possible. Kick
+ * just in case. */
+ if (unlikely(vq->num_added == (1 << 16) - 1))
+ virtqueue_kick(_vq);
+
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return vq->num_free;
}
-EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
+EXPORT_SYMBOL_GPL(virtqueue_add_buf);
-void virtqueue_kick(struct virtqueue *_vq)
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ * if (virtqueue_kick_prepare(vq))
+ * virtqueue_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_notify() call does not.
+ */
+bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
+ bool needs_kick;
+
START_USE(vq);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
- virtio_wmb();
+ virtio_wmb(vq);
- old = vq->vring.avail->idx;
- new = vq->vring.avail->idx = old + vq->num_added;
+ old = vq->vring.avail->idx - vq->num_added;
+ new = vq->vring.avail->idx;
vq->num_added = 0;
- /* Need to update avail index before checking if we should notify */
- virtio_mb();
-
- if (vq->event ?
- vring_need_event(vring_avail_event(&vq->vring), new, old) :
- !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
- /* Prod other side to tell it about changes. */
- vq->notify(&vq->vq);
+#ifdef DEBUG
+ if (vq->last_add_time_valid) {
+ WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
+ vq->last_add_time)) > 100);
+ }
+ vq->last_add_time_valid = false;
+#endif
+ if (vq->event) {
+ needs_kick = vring_need_event(vring_avail_event(&vq->vring),
+ new, old);
+ } else {
+ needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
+ }
END_USE(vq);
+ return needs_kick;
+}
+EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
+
+/**
+ * virtqueue_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * This does not need to be serialized.
+ */
+void virtqueue_notify(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Prod other side to tell it about changes. */
+ vq->notify(_vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_notify);
+
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_buf calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+void virtqueue_kick(struct virtqueue *vq)
+{
+ if (virtqueue_kick_prepare(vq))
+ virtqueue_notify(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
@@ -294,11 +397,28 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->last_used_idx != vq->vring.used->idx;
}
+/**
+ * virtqueue_get_buf - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ *
+ * If the driver wrote data into the buffer, @len will be set to the
+ * amount written. This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_buf().
+ */
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
unsigned int i;
+ u16 last_used;
START_USE(vq);
@@ -314,10 +434,11 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
}
/* Only get used array entries after they have been exposed by host. */
- virtio_rmb();
+ virtio_rmb(vq);
- i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
- *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
+ last_used = (vq->last_used_idx & (vq->vring.num - 1));
+ i = vq->vring.used->ring[last_used].id;
+ *len = vq->vring.used->ring[last_used].len;
if (unlikely(i >= vq->vring.num)) {
BAD_RING(vq, "id %u out of range\n", i);
@@ -337,14 +458,27 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
* the read in the next get_buf call. */
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb();
+ virtio_mb(vq);
}
+#ifdef DEBUG
+ vq->last_add_time_valid = false;
+#endif
+
END_USE(vq);
return ret;
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -353,6 +487,17 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -366,7 +511,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb();
+ virtio_mb(vq);
if (unlikely(more_used(vq))) {
END_USE(vq);
return false;
@@ -377,6 +522,19 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -393,7 +551,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
/* TODO: tune this threshold */
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
- virtio_mb();
+ virtio_mb(vq);
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
@@ -404,6 +562,14 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * This is not valid on an active queue; it is useful only for device
+ * shutdown.
+ */
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -453,6 +619,7 @@ EXPORT_SYMBOL_GPL(vring_interrupt);
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
+ bool weak_barriers,
void *pages,
void (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
@@ -476,12 +643,14 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->notify = notify;
+ vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
+ vq->last_add_time_valid = false;
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
@@ -530,7 +699,13 @@ void vring_transport_features(struct virtio_device *vdev)
}
EXPORT_SYMBOL_GPL(vring_transport_features);
-/* return the size of the vring within the virtqueue */
+/**
+ * virtqueue_get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring. This is mainly used for boasting to
+ * userspace. Unlike other operations, this need not be serialized.
+ */
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 79fd606b7cd5..877b107f77a7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -772,6 +772,19 @@ config SMSC37B787_WDT
Most people will say N.
+config VIA_WDT
+ tristate "VIA Watchdog Timer"
+ depends on X86
+ select WATCHDOG_CORE
+ ---help---
+ This is the driver for the hardware watchdog timer on VIA
+ southbridge chipset CX700, VX800/VX820 or VX855/VX875.
+
+ To compile this driver as a module, choose M here; the module
+ will be called via_wdt.
+
+ Most people will say N.
+
config W83627HF_WDT
tristate "W83627HF/W83627DHG Watchdog Timer"
depends on X86
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index fe893e91935b..e8f479a16402 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o
obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
obj-$(CONFIG_SMSC_SCH311X_WDT) += sch311x_wdt.o
obj-$(CONFIG_SMSC37B787_WDT) += smsc37b787_wdt.o
+obj-$(CONFIG_VIA_WDT) += via_wdt.o
obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
obj-$(CONFIG_W83697HF_WDT) += w83697hf_wdt.o
obj-$(CONFIG_W83697UG_WDT) += w83697ug_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b29221783598..502773ad5acd 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -70,8 +70,8 @@ struct ar7_wdt {
};
static unsigned long wdt_is_open;
-static spinlock_t wdt_lock;
static unsigned expect_close;
+static DEFINE_SPINLOCK(wdt_lock);
/* XXX currently fixed, allows max margin ~68.72 secs */
#define prescale_value 0xffff
@@ -280,8 +280,6 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
{
int rc;
- spin_lock_init(&wdt_lock);
-
ar7_regs_wdt =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!ar7_regs_wdt) {
@@ -355,15 +353,4 @@ static struct platform_driver ar7_wdt_driver = {
},
};
-static int __init ar7_wdt_init(void)
-{
- return platform_driver_register(&ar7_wdt_driver);
-}
-
-static void __exit ar7_wdt_cleanup(void)
-{
- platform_driver_unregister(&ar7_wdt_driver);
-}
-
-module_init(ar7_wdt_init);
-module_exit(ar7_wdt_cleanup);
+module_platform_driver(ar7_wdt_driver);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 87445b2d72a7..00562566ef5f 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -35,6 +35,11 @@
#define DRV_NAME "AT91SAM9 Watchdog"
+#define wdt_read(field) \
+ __raw_readl(at91wdt_private.base + field)
+#define wdt_write(field, val) \
+ __raw_writel((val), at91wdt_private.base + field)
+
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
@@ -63,6 +68,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static void at91_ping(unsigned long data);
static struct {
+ void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
unsigned long open;
char expect_close;
@@ -77,7 +83,7 @@ static struct {
*/
static inline void at91_wdt_reset(void)
{
- at91_sys_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+ wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
@@ -132,7 +138,7 @@ static int at91_wdt_settimeout(unsigned int timeout)
unsigned int mr;
/* Check if disabled */
- mr = at91_sys_read(AT91_WDT_MR);
+ mr = wdt_read(AT91_WDT_MR);
if (mr & AT91_WDT_WDDIS) {
printk(KERN_ERR DRV_NAME": sorry, watchdog is disabled\n");
return -EIO;
@@ -149,7 +155,7 @@ static int at91_wdt_settimeout(unsigned int timeout)
| AT91_WDT_WDDBGHLT /* disabled in debug mode */
| AT91_WDT_WDD /* restart at any time */
| (timeout & AT91_WDT_WDV); /* timer value */
- at91_sys_write(AT91_WDT_MR, reg);
+ wdt_write(AT91_WDT_MR, reg);
return 0;
}
@@ -248,12 +254,22 @@ static struct miscdevice at91wdt_miscdev = {
static int __init at91wdt_probe(struct platform_device *pdev)
{
+ struct resource *r;
int res;
if (at91wdt_miscdev.parent)
return -EBUSY;
at91wdt_miscdev.parent = &pdev->dev;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+ at91wdt_private.base = ioremap(r->start, resource_size(r));
+ if (!at91wdt_private.base) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ return -ENOMEM;
+ }
+
/* Set watchdog */
res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
if (res)
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index 757f9cab5c82..c6fbb2e6c41b 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -16,11 +16,11 @@
#ifndef AT91_WDT_H
#define AT91_WDT_H
-#define AT91_WDT_CR (AT91_WDT + 0x00) /* Watchdog Control Register */
+#define AT91_WDT_CR 0x00 /* Watchdog Control Register */
#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
-#define AT91_WDT_MR (AT91_WDT + 0x04) /* Watchdog Mode Register */
+#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
@@ -30,7 +30,7 @@
#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
-#define AT91_WDT_SR (AT91_WDT + 0x08) /* Watchdog Status Register */
+#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 725c84bfdd76..9db808349f8b 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -68,17 +68,23 @@ static int max_timeout;
static inline void ath79_wdt_keepalive(void)
{
ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG);
}
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
}
static inline void ath79_wdt_disable(void)
{
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
}
static int ath79_wdt_set_timeout(int val)
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 5064e8317521..8dc7de641e26 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -311,18 +311,7 @@ static struct platform_driver bcm63xx_wdt = {
}
};
-static int __init bcm63xx_wdt_init(void)
-{
- return platform_driver_register(&bcm63xx_wdt);
-}
-
-static void __exit bcm63xx_wdt_exit(void)
-{
- platform_driver_unregister(&bcm63xx_wdt);
-}
-
-module_init(bcm63xx_wdt_init);
-module_exit(bcm63xx_wdt_exit);
+module_platform_driver(bcm63xx_wdt);
MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 03f449a430d2..5b89f7d6cd0f 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -76,8 +76,6 @@ static int irq;
static void __iomem *virtbase;
static unsigned long coh901327_users;
static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
static struct device *parent;
/*
@@ -461,6 +459,10 @@ out:
}
#ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
{
irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index edd3475f41db..251c863d71dd 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -39,7 +39,7 @@
static int verbose;
static int port = 0x91;
static int ticks = 10000;
-static spinlock_t cpu5wdt_lock;
+static DEFINE_SPINLOCK(cpu5wdt_lock);
#define PFX "cpu5wdt: "
@@ -223,7 +223,6 @@ static int __devinit cpu5wdt_init(void)
"port=0x%x, verbose=%i\n", port, verbose);
init_completion(&cpu5wdt_device.stop);
- spin_lock_init(&cpu5wdt_lock);
cpu5wdt_device.queue = 0;
setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 1e013e8457b7..1b793dfd868f 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -687,15 +687,4 @@ static struct platform_driver cpwd_driver = {
.remove = __devexit_p(cpwd_remove),
};
-static int __init cpwd_init(void)
-{
- return platform_driver_register(&cpwd_driver);
-}
-
-static void __exit cpwd_exit(void)
-{
- platform_driver_unregister(&cpwd_driver);
-}
-
-module_init(cpwd_init);
-module_exit(cpwd_exit);
+module_platform_driver(cpwd_driver);
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 51b5551b4e3f..c8c5c8032bcb 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -271,18 +271,7 @@ static struct platform_driver platform_wdt_driver = {
.remove = __devexit_p(davinci_wdt_remove),
};
-static int __init davinci_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit davinci_wdt_exit(void)
-{
- platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(davinci_wdt_init);
-module_exit(davinci_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("DaVinci Watchdog Driver");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index f10f8c0abba4..1b0e3dd81c1a 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -358,17 +358,7 @@ static struct platform_driver dw_wdt_driver = {
},
};
-static int __init dw_wdt_watchdog_init(void)
-{
- return platform_driver_register(&dw_wdt_driver);
-}
-module_init(dw_wdt_watchdog_init);
-
-static void __exit dw_wdt_watchdog_exit(void)
-{
- platform_driver_unregister(&dw_wdt_driver);
-}
-module_exit(dw_wdt_watchdog_exit);
+module_platform_driver(dw_wdt_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 41018d429abb..3946c51099c0 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -64,7 +64,7 @@
static unsigned long eurwdt_is_open;
static int eurwdt_timeout;
static char eur_expect_close;
-static spinlock_t eurwdt_lock;
+static DEFINE_SPINLOCK(eurwdt_lock);
/*
* You must set these - there is no sane way to probe for this board.
@@ -446,8 +446,6 @@ static int __init eurwdt_init(void)
goto outreg;
}
- spin_lock_init(&eurwdt_lock);
-
ret = misc_register(&eurwdt_miscdev);
if (ret) {
printk(KERN_ERR "eurwdt: can't misc_register on minor=%d\n",
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index d4d8d1fdccc4..e45ca2b4bfbe 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -100,7 +100,7 @@ MODULE_PARM_DESC(f71862fg_pin,
"Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
" (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
-static int nowayout = WATCHDOG_NOWAYOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0444);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3774c9b8dac9..8464ea1c36a1 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
asminline_call(&cmn_regs, bios32_entrypoint);
if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
if ((physical_bios_base + physical_bios_offset)) {
cru_rom_addr =
ioremap(cru_physical_address, cru_length);
- if (cru_rom_addr)
+ if (cru_rom_addr) {
+ set_memory_x((unsigned long)cru_rom_addr, cru_length);
retval = 0;
+ }
}
printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index ba6ad662635a..99796c5d913d 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
module_param(turn_SMI_watchdog_clear_off, int, 0);
MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
- "Turn off SMI clearing watchdog (default=0)");
+ "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
/*
* Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
ret = -EIO;
goto out_unmap;
}
- if (turn_SMI_watchdog_clear_off) {
+ if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 195e0f798e76..c7481ad51629 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -68,7 +68,7 @@ static char asr_expect_close;
static unsigned int asr_type, asr_base, asr_length;
static unsigned int asr_read_addr, asr_write_addr;
static unsigned char asr_toggle_mask, asr_disable_mask;
-static spinlock_t asr_lock;
+static DEFINE_SPINLOCK(asr_lock);
static void __asr_toggle(void)
{
@@ -386,8 +386,6 @@ static int __init ibmasr_init(void)
if (!asr_type)
return -ENODEV;
- spin_lock_init(&asr_lock);
-
rc = asr_get_base_address();
if (rc)
return rc;
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 1cc5609666d1..1475e09f9af2 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -28,7 +28,7 @@
#define PFX "indydog: "
static unsigned long indydog_alive;
-static spinlock_t indydog_lock;
+static DEFINE_SPINLOCK(indydog_lock);
#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
@@ -185,8 +185,6 @@ static int __init watchdog_init(void)
{
int ret;
- spin_lock_init(&indydog_lock);
-
ret = register_reboot_notifier(&indydog_notifier);
if (ret) {
printk(KERN_ERR PFX
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index aef94789019f..82fa7a92a8d2 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -37,7 +37,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned long wdt_status;
static unsigned long boot_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -226,9 +226,6 @@ static int __init iop_wdt_init(void)
{
int ret;
- spin_lock_init(&wdt_lock);
-
-
/* check if the reset was caused by the watchdog timer */
boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0;
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index e86952a7168c..084f71aa855a 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -32,7 +32,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -189,7 +189,6 @@ static int __init ixp2000_wdt_init(void)
return -EIO;
}
wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256;
- spin_lock_init(&wdt_lock);
return misc_register(&ixp2000_wdt_miscdev);
}
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index e02c0ecda26b..4fc2e9ac26f7 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -181,7 +181,6 @@ static int __init ixp4xx_wdt_init(void)
return -ENODEV;
}
- spin_lock_init(&wdt_lock);
boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
WDIOF_CARDRESET : 0;
ret = misc_register(&ixp4xx_wdt_miscdev);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 684ba01fb540..17ef300bccc5 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -295,18 +295,7 @@ static struct platform_driver jz4740_wdt_driver = {
},
};
-
-static int __init jz4740_wdt_init(void)
-{
- return platform_driver_register(&jz4740_wdt_driver);
-}
-module_init(jz4740_wdt_init);
-
-static void __exit jz4740_wdt_exit(void)
-{
- platform_driver_unregister(&jz4740_wdt_driver);
-}
-module_exit(jz4740_wdt_exit);
+module_platform_driver(jz4740_wdt_driver);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("jz4740 Watchdog Driver");
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 811471903e8a..51757a520e8f 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
static unsigned long ks8695wdt_busy;
-static spinlock_t ks8695_lock;
+static DEFINE_SPINLOCK(ks8695_lock);
/* ......................................................................... */
@@ -288,7 +288,6 @@ static struct platform_driver ks8695wdt_driver = {
static int __init ks8695_wdt_init(void)
{
- spin_lock_init(&ks8695_lock);
/* Check that the heartbeat value is within range;
if not reset to the default */
if (ks8695_wdt_settimeout(wdt_time)) {
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 102aed0efbf1..d3a63be2e28d 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -222,9 +222,6 @@ ltq_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ltq_wdt_miscdev);
- if (ltq_wdt_membase)
- iounmap(ltq_wdt_membase);
-
return 0;
}
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 73ba2fd8e591..af63ecfbfa6f 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -364,18 +364,7 @@ static struct platform_driver max63xx_wdt_driver = {
},
};
-static int __init max63xx_wdt_init(void)
-{
- return platform_driver_register(&max63xx_wdt_driver);
-}
-
-static void __exit max63xx_wdt_exit(void)
-{
- platform_driver_unregister(&max63xx_wdt_driver);
-}
-
-module_init(max63xx_wdt_init);
-module_exit(max63xx_wdt_exit);
+module_platform_driver(max63xx_wdt_driver);
MODULE_AUTHOR("Marc Zyngier <maz@misterjones.org>");
MODULE_DESCRIPTION("max63xx Watchdog Driver");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index eed5436ffb51..20feb4d3d791 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -55,7 +55,7 @@ module_param(timeout, ushort, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in ticks. (0<timeout<65536, default=65535)");
-static int reset = 1;
+static bool reset = 1;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset,
"Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset");
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ac37bb82392c..c29e31d99fe8 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -253,18 +253,7 @@ static struct platform_driver mtx1_wdt_driver = {
.driver.owner = THIS_MODULE,
};
-static int __init mtx1_wdt_init(void)
-{
- return platform_driver_register(&mtx1_wdt_driver);
-}
-
-static void __exit mtx1_wdt_exit(void)
-{
- platform_driver_unregister(&mtx1_wdt_driver);
-}
-
-module_init(mtx1_wdt_init);
-module_exit(mtx1_wdt_exit);
+module_platform_driver(mtx1_wdt_driver);
MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index 6cee33d4b161..50359bad9177 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -334,18 +334,7 @@ static struct platform_driver nuc900wdt_driver = {
},
};
-static int __init nuc900_wdt_init(void)
-{
- return platform_driver_register(&nuc900wdt_driver);
-}
-
-static void __exit nuc900_wdt_exit(void)
-{
- platform_driver_unregister(&nuc900wdt_driver);
-}
-
-module_init(nuc900_wdt_init);
-module_exit(nuc900_wdt_exit);
+module_platform_driver(nuc900wdt_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("Watchdog driver for NUC900");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 4ec741ac952c..f359ab85c3bc 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -414,18 +414,7 @@ static struct platform_driver xwdt_driver = {
},
};
-static int __init xwdt_init(void)
-{
- return platform_driver_register(&xwdt_driver);
-}
-
-static void __exit xwdt_exit(void)
-{
- platform_driver_unregister(&xwdt_driver);
-}
-
-module_init(xwdt_init);
-module_exit(xwdt_exit);
+module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 2b4acb86c191..4b33e3fd726b 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -55,7 +55,7 @@ module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
static unsigned int wdt_trgr_pattern = 0x1234;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
struct omap_wdt_dev {
void __iomem *base; /* physical */
@@ -232,6 +232,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
if (cpu_is_omap24xx())
return put_user(omap_prcm_get_reset_sources(),
(int __user *)arg);
+ return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
@@ -437,19 +438,7 @@ static struct platform_driver omap_wdt_driver = {
},
};
-static int __init omap_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return platform_driver_register(&omap_wdt_driver);
-}
-
-static void __exit omap_wdt_exit(void)
-{
- platform_driver_unregister(&omap_wdt_driver);
-}
-
-module_init(omap_wdt_init);
-module_exit(omap_wdt_exit);
+module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 2d9fb96a9ee9..4ad78f868515 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -41,7 +41,7 @@ static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
static unsigned int wdt_tclk;
static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
static void orion_wdt_ping(void)
{
@@ -294,19 +294,7 @@ static struct platform_driver orion_wdt_driver = {
},
};
-static int __init orion_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return platform_driver_register(&orion_wdt_driver);
-}
-
-static void __exit orion_wdt_exit(void)
-{
- platform_driver_unregister(&orion_wdt_driver);
-}
-
-module_init(orion_wdt_init);
-module_exit(orion_wdt_exit);
+module_platform_driver(orion_wdt_driver);
MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
MODULE_DESCRIPTION("Orion Processor Watchdog");
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 614933225560..bd143c9dd3e6 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -334,18 +334,7 @@ static struct platform_driver platform_wdt_driver = {
.remove = __devexit_p(pnx4008_wdt_remove),
};
-static int __init pnx4008_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit pnx4008_wdt_exit(void)
-{
- platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(pnx4008_wdt_init);
-module_exit(pnx4008_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("PNX4008 Watchdog Driver");
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index d4c29b5311a4..bf7bc8aa1c01 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -332,18 +332,7 @@ static struct platform_driver rc32434_wdt_driver = {
}
};
-static int __init rc32434_wdt_init(void)
-{
- return platform_driver_register(&rc32434_wdt_driver);
-}
-
-static void __exit rc32434_wdt_exit(void)
-{
- platform_driver_unregister(&rc32434_wdt_driver);
-}
-
-module_init(rc32434_wdt_init);
-module_exit(rc32434_wdt_exit);
+module_platform_driver(rc32434_wdt_driver);
MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
"Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 428f8a1583e8..042ccc56ae26 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -293,18 +293,7 @@ static struct platform_driver rdc321x_wdt_driver = {
},
};
-static int __init rdc321x_wdt_init(void)
-{
- return platform_driver_register(&rdc321x_wdt_driver);
-}
-
-static void __exit rdc321x_wdt_exit(void)
-{
- platform_driver_unregister(&rdc321x_wdt_driver);
-}
-
-module_init(rdc321x_wdt_init);
-module_exit(rdc321x_wdt_exit);
+module_platform_driver(rdc321x_wdt_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x watchdog driver");
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 109b533896b7..c7e17ceafa6a 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -247,15 +247,4 @@ static struct platform_driver riowd_driver = {
.remove = __devexit_p(riowd_remove),
};
-static int __init riowd_init(void)
-{
- return platform_driver_register(&riowd_driver);
-}
-
-static void __exit riowd_exit(void)
-{
- platform_driver_unregister(&riowd_driver);
-}
-
-module_init(riowd_init);
-module_exit(riowd_exit);
+module_platform_driver(riowd_driver);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index a79e3840782a..4bc3744e14e4 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -378,6 +378,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
"cannot start\n");
}
+ watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+
ret = watchdog_register_device(&s3c2410_wdd);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index cc2cfbe33b30..eef1524ae52e 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
{
.id = 0x00141805,
.mask = 0x00ffffff,
@@ -359,6 +359,8 @@ static struct amba_id sp805_wdt_ids[] __initdata = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, sp805_wdt_ids);
+
static struct amba_driver sp805_wdt_driver = {
.drv = {
.name = MODULE_NAME,
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
index ac2346a452e5..4c2a4e8698f9 100644
--- a/drivers/watchdog/stmp3xxx_wdt.c
+++ b/drivers/watchdog/stmp3xxx_wdt.c
@@ -272,18 +272,7 @@ static struct platform_driver platform_wdt_driver = {
.resume = stmp3xxx_wdt_resume,
};
-static int __init stmp3xxx_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit stmp3xxx_wdt_exit(void)
-{
- return platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(stmp3xxx_wdt_init);
-module_exit(stmp3xxx_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 5a90a4a871dd..1490293dc7da 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -506,17 +506,7 @@ static struct platform_driver ts72xx_wdt_driver = {
},
};
-static __init int ts72xx_wdt_init(void)
-{
- return platform_driver_register(&ts72xx_wdt_driver);
-}
-module_init(ts72xx_wdt_init);
-
-static __exit void ts72xx_wdt_exit(void)
-{
- platform_driver_unregister(&ts72xx_wdt_driver);
-}
-module_exit(ts72xx_wdt_exit);
+module_platform_driver(ts72xx_wdt_driver);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("TS-72xx SBC Watchdog");
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index b5045ca7e61c..0764c6239b98 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -256,17 +256,7 @@ static struct platform_driver twl4030_wdt_driver = {
},
};
-static int __devinit twl4030_wdt_init(void)
-{
- return platform_driver_register(&twl4030_wdt_driver);
-}
-module_init(twl4030_wdt_init);
-
-static void __devexit twl4030_wdt_exit(void)
-{
- platform_driver_unregister(&twl4030_wdt_driver);
-}
-module_exit(twl4030_wdt_exit);
+module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
new file mode 100644
index 000000000000..026b4bbfa0aa
--- /dev/null
+++ b/drivers/watchdog/via_wdt.c
@@ -0,0 +1,267 @@
+/*
+ * VIA Chipset Watchdog Driver
+ *
+ * Copyright (C) 2011 Sigfox
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Marc Vertes <marc.vertes@sigfox.com>
+ * Based on a preliminary version from Harald Welte <HaraldWelte@viatech.com>
+ * Timer code by Wim Van Sebroeck <wim@iguana.be>
+ *
+ * Caveat: PnP must be enabled in BIOS to allow full access to watchdog
+ * control registers. If not, the watchdog must be configured in BIOS manually.
+ */
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/watchdog.h>
+
+/* Configuration registers relative to the pci device */
+#define VIA_WDT_MMIO_BASE 0xe8 /* MMIO region base address */
+#define VIA_WDT_CONF 0xec /* watchdog enable state */
+
+/* Relevant bits for the VIA_WDT_CONF register */
+#define VIA_WDT_CONF_ENABLE 0x01 /* 1: enable watchdog */
+#define VIA_WDT_CONF_MMIO 0x02 /* 1: enable watchdog MMIO */
+
+/*
+ * The MMIO region contains the watchog control register and the
+ * hardware timer counter.
+ */
+#define VIA_WDT_MMIO_LEN 8 /* MMIO region length in bytes */
+#define VIA_WDT_CTL 0 /* MMIO addr+0: state/control reg. */
+#define VIA_WDT_COUNT 4 /* MMIO addr+4: timer counter reg. */
+
+/* Bits for the VIA_WDT_CTL register */
+#define VIA_WDT_RUNNING 0x01 /* 0: stop, 1: running */
+#define VIA_WDT_FIRED 0x02 /* 1: restarted by expired watchdog */
+#define VIA_WDT_PWROFF 0x04 /* 0: reset, 1: poweroff */
+#define VIA_WDT_DISABLED 0x08 /* 1: timer is disabled */
+#define VIA_WDT_TRIGGER 0x80 /* 1: start a new countdown */
+
+/* Hardware heartbeat in seconds */
+#define WDT_HW_HEARTBEAT 1
+
+/* Timer heartbeat (500ms) */
+#define WDT_HEARTBEAT (HZ/2) /* should be <= ((WDT_HW_HEARTBEAT*HZ)/2) */
+
+/* User space timeout in seconds */
+#define WDT_TIMEOUT_MAX 1023 /* approx. 17 min. */
+#define WDT_TIMEOUT 60
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, between 1 and 1023 "
+ "(default = " __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default = " __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static struct watchdog_device wdt_dev;
+static struct resource wdt_res;
+static void __iomem *wdt_mem;
+static unsigned int mmio;
+static void wdt_timer_tick(unsigned long data);
+static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0);
+ /* The timer that pings the watchdog */
+static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
+
+static inline void wdt_reset(void)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(ctl | VIA_WDT_TRIGGER, wdt_mem);
+}
+
+/*
+ * Timer tick: the timer will make sure that the watchdog timer hardware
+ * is being reset in time. The conditions to do this are:
+ * 1) the watchog timer has been started and /dev/watchdog is open
+ * and there is still time left before userspace should send the
+ * next heartbeat/ping. (note: the internal heartbeat is much smaller
+ * then the external/userspace heartbeat).
+ * 2) the watchdog timer has been stopped by userspace.
+ */
+static void wdt_timer_tick(unsigned long data)
+{
+ if (time_before(jiffies, next_heartbeat) ||
+ (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ wdt_reset();
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ } else
+ pr_crit("I will reboot your machine !\n");
+}
+
+static int wdt_ping(struct watchdog_device *wdd)
+{
+ /* calculate when the next userspace timeout will be */
+ next_heartbeat = jiffies + timeout * HZ;
+ return 0;
+}
+
+static int wdt_start(struct watchdog_device *wdd)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(timeout, wdt_mem + VIA_WDT_COUNT);
+ writel(ctl | VIA_WDT_RUNNING | VIA_WDT_TRIGGER, wdt_mem);
+ wdt_ping(wdd);
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ return 0;
+}
+
+static int wdt_stop(struct watchdog_device *wdd)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(ctl & ~VIA_WDT_RUNNING, wdt_mem);
+ return 0;
+}
+
+static int wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_timeout)
+{
+ if (new_timeout < 1 || new_timeout > WDT_TIMEOUT_MAX)
+ return -EINVAL;
+ writel(new_timeout, wdt_mem + VIA_WDT_COUNT);
+ timeout = new_timeout;
+ return 0;
+}
+
+static const struct watchdog_info wdt_info = {
+ .identity = "VIA watchdog",
+ .options = WDIOF_CARDRESET |
+ WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .ping = wdt_ping,
+ .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_device wdt_dev = {
+ .info = &wdt_info,
+ .ops = &wdt_ops,
+};
+
+static int __devinit wdt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned char conf;
+ int ret = -ENODEV;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Allocate a MMIO region which contains watchdog control register
+ * and counter, then configure the watchdog to use this region.
+ * This is possible only if PnP is properly enabled in BIOS.
+ * If not, the watchdog must be configured in BIOS manually.
+ */
+ if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN,
+ 0xf0000000, 0xffffff00, 0xff, NULL, NULL)) {
+ dev_err(&pdev->dev, "MMIO allocation failed\n");
+ goto err_out_disable_device;
+ }
+
+ pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start);
+ pci_read_config_byte(pdev, VIA_WDT_CONF, &conf);
+ conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO;
+ pci_write_config_byte(pdev, VIA_WDT_CONF, conf);
+
+ pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio);
+ if (mmio) {
+ dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio);
+ } else {
+ dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n");
+ goto err_out_resource;
+ }
+
+ if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) {
+ dev_err(&pdev->dev, "MMIO region busy\n");
+ goto err_out_resource;
+ }
+
+ wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN);
+ if (wdt_mem == NULL) {
+ dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n");
+ goto err_out_release;
+ }
+
+ wdt_dev.timeout = timeout;
+ watchdog_set_nowayout(&wdt_dev, nowayout);
+ if (readl(wdt_mem) & VIA_WDT_FIRED)
+ wdt_dev.bootstatus |= WDIOF_CARDRESET;
+
+ ret = watchdog_register_device(&wdt_dev);
+ if (ret)
+ goto err_out_iounmap;
+
+ /* start triggering, in case of watchdog already enabled by BIOS */
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ return 0;
+
+err_out_iounmap:
+ iounmap(wdt_mem);
+err_out_release:
+ release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+err_out_resource:
+ release_resource(&wdt_res);
+err_out_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit wdt_remove(struct pci_dev *pdev)
+{
+ watchdog_unregister_device(&wdt_dev);
+ del_timer(&timer);
+ iounmap(wdt_mem);
+ release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+ release_resource(&wdt_res);
+ pci_disable_device(pdev);
+}
+
+DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
+ { 0 }
+};
+
+static struct pci_driver wdt_driver = {
+ .name = "via_wdt",
+ .id_table = wdt_pci_table,
+ .probe = wdt_probe,
+ .remove = __devexit_p(wdt_remove),
+};
+
+static int __init wdt_init(void)
+{
+ if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
+ timeout = WDT_TIMEOUT;
+ return pci_register_driver(&wdt_driver);
+}
+
+static void __exit wdt_exit(void)
+{
+ pci_unregister_driver(&wdt_driver);
+}
+
+module_init(wdt_init);
+module_exit(wdt_exit);
+
+MODULE_AUTHOR("Marc Vertes");
+MODULE_DESCRIPTION("Driver for watchdog timer on VIA chipset");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index dd5d67548758..576a388a1164 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -4,7 +4,7 @@
* (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com>
* added support for W83627THF.
*
- * (c) Copyright 2003,2007 Pdraig Brady <P@draigBrady.com>
+ * (c) Copyright 2003,2007 Pádraig Brady <P@draigBrady.com>
*
* Based on advantechwdt.c which is based on wdt.c.
* Original copyright messages:
@@ -401,6 +401,6 @@ module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pdraig Brady <P@draigBrady.com>");
+MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>");
MODULE_DESCRIPTION("w83627hf/thf WDT driver");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index e789a47db41f..263c883f0806 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -199,7 +199,8 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
- driver_data = kzalloc(sizeof(*driver_data), GFP_KERNEL);
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
if (!driver_data) {
dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
@@ -213,11 +214,9 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
+ watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
- if (nowayout)
- wm831x_wdt->status |= WDOG_NO_WAY_OUT;
-
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
reg &= WM831X_WDOG_TO_MASK;
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
@@ -252,7 +251,7 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
ret);
- goto err_alloc;
+ goto err;
}
ret = gpio_direction_output(pdata->update_gpio, 0);
@@ -294,8 +293,6 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
err_gpio:
if (driver_data->update_gpio)
gpio_free(driver_data->update_gpio);
-err_alloc:
- kfree(driver_data);
err:
return ret;
}
@@ -320,17 +317,7 @@ static struct platform_driver wm831x_wdt_driver = {
},
};
-static int __init wm831x_wdt_init(void)
-{
- return platform_driver_register(&wm831x_wdt_driver);
-}
-module_init(wm831x_wdt_init);
-
-static void __exit wm831x_wdt_exit(void)
-{
- platform_driver_unregister(&wm831x_wdt_driver);
-}
-module_exit(wm831x_wdt_exit);
+module_platform_driver(wm831x_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM831x Watchdog");
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index b68d928c8f90..909c78650d3e 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -311,17 +311,7 @@ static struct platform_driver wm8350_wdt_driver = {
},
};
-static int __init wm8350_wdt_init(void)
-{
- return platform_driver_register(&wm8350_wdt_driver);
-}
-module_init(wm8350_wdt_init);
-
-static void __exit wm8350_wdt_exit(void)
-{
- platform_driver_unregister(&wm8350_wdt_driver);
-}
-module_exit(wm8350_wdt_exit);
+module_platform_driver(wm8350_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 Watchdog");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8795480c2350..a1ced521cf74 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -86,6 +86,7 @@ config XEN_BACKEND
config XENFS
tristate "Xen filesystem"
+ select XEN_PRIVCMD
default y
help
The xen filesystem provides a way for domains to share
@@ -171,4 +172,10 @@ config XEN_PCIDEV_BACKEND
xen-pciback.hide=(03:00.0)(04:00.0)
If in doubt, say m.
+
+config XEN_PRIVCMD
+ tristate
+ depends on XEN
+ default m
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 974fffdf22b2..aa31337192cc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
+obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
+xen-privcmd-y := privcmd.o
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index ba6eda4b5143..0edb91c0de6b 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,5 +1,6 @@
#include <linux/bio.h>
#include <linux/io.h>
+#include <linux/export.h>
#include <xen/page.h>
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -11,3 +12,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
((mfn1 == mfn2) || ((mfn1+1) == mfn2));
}
+EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6e075cdd0c6b..e5e5812a1014 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -87,6 +87,7 @@ enum xen_irq_type {
*/
struct irq_info {
struct list_head list;
+ int refcnt;
enum xen_irq_type type; /* type */
unsigned irq;
unsigned short evtchn; /* event channel */
@@ -406,6 +407,7 @@ static void xen_irq_init(unsigned irq)
panic("Unable to allocate metadata for IRQ%d\n", irq);
info->type = IRQT_UNBOUND;
+ info->refcnt = -1;
irq_set_handler_data(irq, info);
@@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq)
irq_set_handler_data(irq, NULL);
+ WARN_ON(info->refcnt > 0);
+
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
@@ -637,7 +641,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
- goto out; /* XXX need refcount? */
+ goto out;
}
irq = xen_allocate_irq_gsi(gsi);
@@ -939,9 +943,16 @@ static void unbind_from_irq(unsigned int irq)
{
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = irq_get_handler_data(irq);
mutex_lock(&irq_mapping_update_lock);
+ if (info->refcnt > 0) {
+ info->refcnt--;
+ if (info->refcnt != 0)
+ goto done;
+ }
+
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
@@ -970,6 +981,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq);
+ done:
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1065,6 +1077,69 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+int evtchn_make_refcounted(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ struct irq_info *info;
+
+ if (irq == -1)
+ return -ENOENT;
+
+ info = irq_get_handler_data(irq);
+
+ if (!info)
+ return -ENOENT;
+
+ WARN_ON(info->refcnt != -1);
+
+ info->refcnt = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
+
+int evtchn_get(unsigned int evtchn)
+{
+ int irq;
+ struct irq_info *info;
+ int err = -ENOENT;
+
+ if (evtchn >= NR_EVENT_CHANNELS)
+ return -EINVAL;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+ irq = evtchn_to_irq[evtchn];
+ if (irq == -1)
+ goto done;
+
+ info = irq_get_handler_data(irq);
+
+ if (!info)
+ goto done;
+
+ err = -EINVAL;
+ if (info->refcnt <= 0)
+ goto done;
+
+ info->refcnt++;
+ err = 0;
+ done:
+ mutex_unlock(&irq_mapping_update_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(evtchn_get);
+
+void evtchn_put(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ if (WARN_ON(irq == -1))
+ return;
+ unbind_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(evtchn_put);
+
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{
int irq = per_cpu(ipi_to_irq, cpu)[vector];
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index dbc13e94b612..b1f60a0c0bea 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -268,7 +268,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
u->name, (void *)(unsigned long)port);
if (rc >= 0)
- rc = 0;
+ rc = evtchn_make_refcounted(port);
return rc;
}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e1c4c6e5b469..934985d14c24 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
"the gntalloc device");
static LIST_HEAD(gref_list);
-static DEFINE_SPINLOCK(gref_lock);
+static DEFINE_MUTEX(gref_mutex);
static int gref_size;
struct notify_info {
@@ -99,6 +99,12 @@ struct gntalloc_file_private_data {
uint64_t index;
};
+struct gntalloc_vma_private_data {
+ struct gntalloc_gref *gref;
+ int users;
+ int count;
+};
+
static void __del_gref(struct gntalloc_gref *gref);
static void do_cleanup(void)
@@ -143,15 +149,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
}
/* Add to gref lists. */
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
list_splice_tail(&queue_gref, &gref_list);
list_splice_tail(&queue_file, &priv->list);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return 0;
undo:
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref_size -= (op->count - i);
list_for_each_entry(gref, &queue_file, next_file) {
@@ -167,7 +173,7 @@ undo:
*/
if (unlikely(!list_empty(&queue_gref)))
list_splice_tail(&queue_gref, &gref_list);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rc;
}
@@ -178,8 +184,10 @@ static void __del_gref(struct gntalloc_gref *gref)
tmp[gref->notify.pgoff] = 0;
kunmap(gref->page);
}
- if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(gref->notify.event);
+ evtchn_put(gref->notify.event);
+ }
gref->notify.flags = 0;
@@ -189,6 +197,8 @@ static void __del_gref(struct gntalloc_gref *gref)
if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
return;
+
+ gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
@@ -251,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
pr_debug("%s: priv %p\n", __func__, priv);
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
while (!list_empty(&priv->list)) {
gref = list_entry(priv->list.next,
struct gntalloc_gref, next_file);
@@ -261,7 +271,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
__del_gref(gref);
}
kfree(priv);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return 0;
}
@@ -286,21 +296,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
goto out;
}
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
/* Clean up pages that were at zero (local) users but were still mapped
* by remote domains. Since those pages count towards the limit that we
* are about to enforce, removing them here is a good idea.
*/
do_cleanup();
if (gref_size + op.count > limit) {
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
rc = -ENOSPC;
goto out_free;
}
gref_size += op.count;
op.index = priv->index;
priv->index += op.count * PAGE_SIZE;
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
rc = add_grefs(&op, gref_ids, priv);
if (rc < 0)
@@ -343,7 +353,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
goto dealloc_grant_out;
}
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref = find_grefs(priv, op.index, op.count);
if (gref) {
/* Remove from the file list only, and decrease reference count.
@@ -363,7 +373,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
do_cleanup();
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
dealloc_grant_out:
return rc;
}
@@ -383,7 +393,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
index = op.index & ~(PAGE_SIZE - 1);
pgoff = op.index & (PAGE_SIZE - 1);
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref = find_grefs(priv, index, 1);
if (!gref) {
@@ -396,12 +406,30 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
goto unlock_out;
}
+ /* We need to grab a reference to the event channel we are going to use
+ * to send the notify before releasing the reference we may already have
+ * (if someone has called this ioctl twice). This is required so that
+ * it is possible to change the clear_byte part of the notification
+ * without disturbing the event channel part, which may now be the last
+ * reference to that event channel.
+ */
+ if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+ if (evtchn_get(op.event_channel_port)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+ }
+
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ evtchn_put(gref->notify.event);
+
gref->notify.flags = op.action;
gref->notify.pgoff = pgoff;
gref->notify.event = op.event_channel_port;
rc = 0;
+
unlock_out:
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rc;
}
@@ -429,26 +457,40 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
static void gntalloc_vma_open(struct vm_area_struct *vma)
{
- struct gntalloc_gref *gref = vma->vm_private_data;
- if (!gref)
+ struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+
+ if (!priv)
return;
- spin_lock(&gref_lock);
- gref->users++;
- spin_unlock(&gref_lock);
+ mutex_lock(&gref_mutex);
+ priv->users++;
+ mutex_unlock(&gref_mutex);
}
static void gntalloc_vma_close(struct vm_area_struct *vma)
{
- struct gntalloc_gref *gref = vma->vm_private_data;
- if (!gref)
+ struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+ struct gntalloc_gref *gref, *next;
+ int i;
+
+ if (!priv)
return;
- spin_lock(&gref_lock);
- gref->users--;
- if (gref->users == 0)
- __del_gref(gref);
- spin_unlock(&gref_lock);
+ mutex_lock(&gref_mutex);
+ priv->users--;
+ if (priv->users == 0) {
+ gref = priv->gref;
+ for (i = 0; i < priv->count; i++) {
+ gref->users--;
+ next = list_entry(gref->next_gref.next,
+ struct gntalloc_gref, next_gref);
+ if (gref->users == 0)
+ __del_gref(gref);
+ gref = next;
+ }
+ kfree(priv);
+ }
+ mutex_unlock(&gref_mutex);
}
static struct vm_operations_struct gntalloc_vmops = {
@@ -459,30 +501,41 @@ static struct vm_operations_struct gntalloc_vmops = {
static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_vma_private_data *vm_priv;
struct gntalloc_gref *gref;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int rv, i;
- pr_debug("%s: priv %p, page %lu+%d\n", __func__,
- priv, vma->vm_pgoff, count);
-
if (!(vma->vm_flags & VM_SHARED)) {
printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
return -EINVAL;
}
- spin_lock(&gref_lock);
+ vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
+ if (!vm_priv)
+ return -ENOMEM;
+
+ mutex_lock(&gref_mutex);
+
+ pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
+ priv, vm_priv, vma->vm_pgoff, count);
+
gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
if (gref == NULL) {
rv = -ENOENT;
pr_debug("%s: Could not find grant reference",
__func__);
+ kfree(vm_priv);
goto out_unlock;
}
- vma->vm_private_data = gref;
+ vm_priv->gref = gref;
+ vm_priv->users = 1;
+ vm_priv->count = count;
+
+ vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
vma->vm_ops = &gntalloc_vmops;
@@ -499,7 +552,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
rv = 0;
out_unlock:
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rv;
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index afca14d9042e..99d8151c824a 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -193,8 +193,10 @@ static void gntdev_put_map(struct grant_map *map)
atomic_sub(map->count, &pages_mapped);
- if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
+ evtchn_put(map->notify.event);
+ }
if (map->pages) {
if (!use_ptemod)
@@ -312,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
+ pages, true);
if (err)
return err;
@@ -599,6 +602,8 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
struct ioctl_gntdev_unmap_notify op;
struct grant_map *map;
int rc;
+ int out_flags;
+ unsigned int out_event;
if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
@@ -606,6 +611,21 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
return -EINVAL;
+ /* We need to grab a reference to the event channel we are going to use
+ * to send the notify before releasing the reference we may already have
+ * (if someone has called this ioctl twice). This is required so that
+ * it is possible to change the clear_byte part of the notification
+ * without disturbing the event channel part, which may now be the last
+ * reference to that event channel.
+ */
+ if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+ if (evtchn_get(op.event_channel_port))
+ return -EINVAL;
+ }
+
+ out_flags = op.action;
+ out_event = op.event_channel_port;
+
spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -624,12 +644,22 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
goto unlock_out;
}
+ out_flags = map->notify.flags;
+ out_event = map->notify.event;
+
map->notify.flags = op.action;
map->notify.addr = op.index - (map->index << PAGE_SHIFT);
map->notify.event = op.event_channel_port;
+
rc = 0;
+
unlock_out:
spin_unlock(&priv->lock);
+
+ /* Drop the reference to the event channel we did not save in the map */
+ if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+ evtchn_put(out_event);
+
return rc;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bf1c094f4ebf..1cd94daa71db 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -44,16 +44,19 @@
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/interface/memory.h>
+#include <xen/hvc-console.h>
#include <asm/xen/hypercall.h>
#include <asm/pgtable.h>
#include <asm/sync_bitops.h>
-
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
#define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+#define GREFS_PER_GRANT_FRAME \
+(grant_table_version == 1 ? \
+(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
+(PAGE_SIZE / sizeof(union grant_entry_v2)))
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
@@ -64,13 +67,97 @@ static DEFINE_SPINLOCK(gnttab_list_lock);
unsigned long xen_hvm_resume_frames;
EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
-static struct grant_entry *shared;
+static union {
+ struct grant_entry_v1 *v1;
+ union grant_entry_v2 *v2;
+ void *addr;
+} gnttab_shared;
+
+/*This is a structure of function pointers for grant table*/
+struct gnttab_ops {
+ /*
+ * Mapping a list of frames for storing grant entries. Frames parameter
+ * is used to store grant table address when grant table being setup,
+ * nr_gframes is the number of frames to map grant table. Returning
+ * GNTST_okay means success and negative value means failure.
+ */
+ int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
+ /*
+ * Release a list of frames which are mapped in map_frames for grant
+ * entry status.
+ */
+ void (*unmap_frames)(void);
+ /*
+ * Introducing a valid entry into the grant table, granting the frame of
+ * this grant entry to domain for accessing or transfering. Ref
+ * parameter is reference of this introduced grant entry, domid is id of
+ * granted domain, frame is the page frame to be granted, and flags is
+ * status of the grant entry to be updated.
+ */
+ void (*update_entry)(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags);
+ /*
+ * Stop granting a grant entry to domain for accessing. Ref parameter is
+ * reference of a grant entry whose grant access will be stopped,
+ * readonly is not in use in this function. If the grant entry is
+ * currently mapped for reading or writing, just return failure(==0)
+ * directly and don't tear down the grant access. Otherwise, stop grant
+ * access for this entry and return success(==1).
+ */
+ int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
+ /*
+ * Stop granting a grant entry to domain for transfer. Ref parameter is
+ * reference of a grant entry whose grant transfer will be stopped. If
+ * tranfer has not started, just reclaim the grant entry and return
+ * failure(==0). Otherwise, wait for the transfer to complete and then
+ * return the frame.
+ */
+ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
+ /*
+ * Query the status of a grant entry. Ref parameter is reference of
+ * queried grant entry, return value is the status of queried entry.
+ * Detailed status(writing/reading) can be gotten from the return value
+ * by bit operations.
+ */
+ int (*query_foreign_access)(grant_ref_t ref);
+ /*
+ * Grant a domain to access a range of bytes within the page referred by
+ * an available grant entry. Ref parameter is reference of a grant entry
+ * which will be sub-page accessed, domid is id of grantee domain, frame
+ * is frame address of subpage grant, flags is grant type and flag
+ * information, page_off is offset of the range of bytes, and length is
+ * length of bytes to be accessed.
+ */
+ void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off, unsigned length);
+ /*
+ * Redirect an available grant entry on domain A to another grant
+ * reference of domain B, then allow domain C to use grant reference
+ * of domain B transitively. Ref parameter is an available grant entry
+ * reference on domain A, domid is id of domain C which accesses grant
+ * entry transitively, flags is grant type and flag information,
+ * trans_domid is id of domain B whose grant entry is finally accessed
+ * transitively, trans_gref is grant entry transitive reference of
+ * domain B.
+ */
+ void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
+ domid_t trans_domid, grant_ref_t trans_gref);
+};
+
+static struct gnttab_ops *gnttab_interface;
+
+/*This reflects status of grant entries, so act as a global value*/
+static grant_status_t *grstatus;
+
+static int grant_table_version;
static struct gnttab_free_callback *gnttab_free_callback_list;
static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+#define SPP (PAGE_SIZE / sizeof(grant_status_t))
static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
{
@@ -142,23 +229,33 @@ static void put_free_entry(grant_ref_t ref)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
-static void update_grant_entry(grant_ref_t ref, domid_t domid,
- unsigned long frame, unsigned flags)
+/*
+ * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ */
+static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
+{
+ gnttab_shared.v1[ref].domid = domid;
+ gnttab_shared.v1[ref].frame = frame;
+ wmb();
+ gnttab_shared.v1[ref].flags = flags;
+}
+
+static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
{
- /*
- * Introducing a valid entry into the grant table:
- * 1. Write ent->domid.
- * 2. Write ent->frame:
- * GTF_permit_access: Frame to which access is permitted.
- * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
- * frame, or zero if none.
- * 3. Write memory barrier (WMB).
- * 4. Write ent->flags, inc. valid type.
- */
- shared[ref].frame = frame;
- shared[ref].domid = domid;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ gnttab_shared.v2[ref].full_page.frame = frame;
wmb();
- shared[ref].flags = flags;
+ gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
}
/*
@@ -167,7 +264,7 @@ static void update_grant_entry(grant_ref_t ref, domid_t domid,
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
unsigned long frame, int readonly)
{
- update_grant_entry(ref, domid, frame,
+ gnttab_interface->update_entry(ref, domid, frame,
GTF_permit_access | (readonly ? GTF_readonly : 0));
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
@@ -187,31 +284,184 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-int gnttab_query_foreign_access(grant_ref_t ref)
+void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length)
+{
+ gnttab_shared.v2[ref].sub_page.frame = frame;
+ gnttab_shared.v2[ref].sub_page.page_off = page_off;
+ gnttab_shared.v2[ref].sub_page.length = length;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ wmb();
+ gnttab_shared.v2[ref].hdr.flags =
+ GTF_permit_access | GTF_sub_page | flags;
+}
+
+int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length)
{
- u16 nflags;
+ if (flags & (GTF_accept_transfer | GTF_reading |
+ GTF_writing | GTF_transitive))
+ return -EPERM;
- nflags = shared[ref].flags;
+ if (gnttab_interface->update_subpage_entry == NULL)
+ return -ENOSYS;
- return nflags & (GTF_reading|GTF_writing);
+ gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
+ page_off, length);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
+
+int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
+ int flags, unsigned page_off,
+ unsigned length)
+{
+ int ref, rc;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
+ page_off, length);
+ if (rc < 0) {
+ put_free_entry(ref);
+ return rc;
+ }
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
+
+bool gnttab_subpage_grants_available(void)
+{
+ return gnttab_interface->update_subpage_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
+
+void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
+ gnttab_shared.v2[ref].transitive.gref = trans_gref;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ wmb();
+ gnttab_shared.v2[ref].hdr.flags =
+ GTF_permit_access | GTF_transitive | flags;
+}
+
+int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ if (flags & (GTF_accept_transfer | GTF_reading |
+ GTF_writing | GTF_sub_page))
+ return -EPERM;
+
+ if (gnttab_interface->update_trans_entry == NULL)
+ return -ENOSYS;
+
+ gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
+ trans_gref);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
+
+int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
+ domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ int ref, rc;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
+ trans_domid, trans_gref);
+ if (rc < 0) {
+ put_free_entry(ref);
+ return rc;
+ }
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
+
+bool gnttab_trans_grants_available(void)
+{
+ return gnttab_interface->update_trans_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
+
+static int gnttab_query_foreign_access_v1(grant_ref_t ref)
+{
+ return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
+}
+
+static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+{
+ return grstatus[ref] & (GTF_reading|GTF_writing);
+}
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+ return gnttab_interface->query_foreign_access(ref);
}
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
+ u16 *pflags;
- nflags = shared[ref].flags;
+ pflags = &gnttab_shared.v1[ref].flags;
+ nflags = *pflags;
do {
flags = nflags;
if (flags & (GTF_reading|GTF_writing)) {
printk(KERN_ALERT "WARNING: g.e. still in use!\n");
return 0;
}
- } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+ } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
+
+ return 1;
+}
+
+static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
+{
+ gnttab_shared.v2[ref].hdr.flags = 0;
+ mb();
+ if (grstatus[ref] & (GTF_reading|GTF_writing)) {
+ return 0;
+ } else {
+ /* The read of grstatus needs to have acquire
+ semantics. On x86, reads already have
+ that, and we just need to protect against
+ compiler reorderings. On other
+ architectures we may need a full
+ barrier. */
+#ifdef CONFIG_X86
+ barrier();
+#else
+ mb();
+#endif
+ }
return 1;
}
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ return gnttab_interface->end_foreign_access_ref(ref, readonly);
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
@@ -246,37 +496,76 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
unsigned long pfn)
{
- update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+ gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
-unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
{
unsigned long frame;
u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v1[ref].flags;
/*
* If a transfer is not even yet started, try to reclaim the grant
* reference and return failure (== 0).
*/
- while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
- if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
return 0;
cpu_relax();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
- flags = shared[ref].flags;
+ flags = *pflags;
cpu_relax();
}
rmb(); /* Read the frame number /after/ reading completion status. */
- frame = shared[ref].frame;
+ frame = gnttab_shared.v1[ref].frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+
+static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v2[ref].hdr.flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = *pflags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = gnttab_shared.v2[ref].full_page.frame;
BUG_ON(frame == 0);
return frame;
}
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+ return gnttab_interface->end_foreign_transfer_ref(ref);
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
@@ -448,8 +737,8 @@ unsigned int gnttab_max_grant_frames(void)
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
@@ -472,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
- /* If you really wanted to do this:
- * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
- *
- * The reason we do not implement it is b/c on the
- * unmap path (gnttab_unmap_refs) we have no means of
- * checking whether the page is !GNTMAP_contains_pte.
- *
- * That is without some extra data-structure to carry
- * the struct page, bool clear_pte, and list_head next
- * tuples and deal with allocation/delallocation, etc.
- *
- * The users of this API set the GNTMAP_contains_pte
- * flag so lets just return not supported until it
- * becomes neccessary to implement.
- */
- return -EOPNOTSUPP;
+ mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
- ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
return ret;
}
@@ -499,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct page **pages, unsigned int count)
+ struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
@@ -511,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
for (i = 0; i < count; i++) {
- ret = m2p_remove_override(pages[i], true /* clear the PTE */);
+ ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
@@ -520,6 +795,77 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+static unsigned nr_status_frames(unsigned nr_grant_frames)
+{
+ return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+}
+
+static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
+{
+ int rc;
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v1(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+}
+
+static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
+{
+ uint64_t *sframes;
+ unsigned int nr_sframes;
+ struct gnttab_get_status_frames getframes;
+ int rc;
+
+ nr_sframes = nr_status_frames(nr_gframes);
+
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_get_status_frames.
+ */
+ sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
+ if (!sframes)
+ return -ENOMEM;
+
+ getframes.dom = DOMID_SELF;
+ getframes.nr_frames = nr_sframes;
+ set_xen_guest_handle(getframes.frame_list, sframes);
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
+ &getframes, 1);
+ if (rc == -ENOSYS) {
+ kfree(sframes);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || getframes.status);
+
+ rc = arch_gnttab_map_status(sframes, nr_sframes,
+ nr_status_frames(gnttab_max_grant_frames()),
+ &grstatus);
+ BUG_ON(rc);
+ kfree(sframes);
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v2(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+ arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
+}
+
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
@@ -551,6 +897,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
return rc;
}
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_setup_table.
+ */
frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
if (!frames)
return -ENOMEM;
@@ -567,19 +916,65 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
BUG_ON(rc || setup.status);
- rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
- &shared);
- BUG_ON(rc);
+ rc = gnttab_interface->map_frames(frames, nr_gframes);
kfree(frames);
- return 0;
+ return rc;
+}
+
+static struct gnttab_ops gnttab_v1_ops = {
+ .map_frames = gnttab_map_frames_v1,
+ .unmap_frames = gnttab_unmap_frames_v1,
+ .update_entry = gnttab_update_entry_v1,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
+ .query_foreign_access = gnttab_query_foreign_access_v1,
+};
+
+static struct gnttab_ops gnttab_v2_ops = {
+ .map_frames = gnttab_map_frames_v2,
+ .unmap_frames = gnttab_unmap_frames_v2,
+ .update_entry = gnttab_update_entry_v2,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
+ .query_foreign_access = gnttab_query_foreign_access_v2,
+ .update_subpage_entry = gnttab_update_subpage_entry_v2,
+ .update_trans_entry = gnttab_update_trans_entry_v2,
+};
+
+static void gnttab_request_version(void)
+{
+ int rc;
+ struct gnttab_set_version gsv;
+
+ gsv.version = 2;
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ if (rc == 0) {
+ grant_table_version = 2;
+ gnttab_interface = &gnttab_v2_ops;
+ } else if (grant_table_version == 2) {
+ /*
+ * If we've already used version 2 features,
+ * but then suddenly discover that they're not
+ * available (e.g. migrating to an older
+ * version of Xen), almost unbounded badness
+ * can happen.
+ */
+ panic("we need grant tables version 2, but only version 1 is available");
+ } else {
+ grant_table_version = 1;
+ gnttab_interface = &gnttab_v1_ops;
+ }
+ printk(KERN_INFO "Grant tables using version %d layout.\n",
+ grant_table_version);
}
int gnttab_resume(void)
{
unsigned int max_nr_gframes;
+ gnttab_request_version();
max_nr_gframes = gnttab_max_grant_frames();
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
@@ -587,9 +982,10 @@ int gnttab_resume(void)
if (xen_pv_domain())
return gnttab_map(0, nr_grant_frames - 1);
- if (!shared) {
- shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
- if (shared == NULL) {
+ if (gnttab_shared.addr == NULL) {
+ gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
+ PAGE_SIZE * max_nr_gframes);
+ if (gnttab_shared.addr == NULL) {
printk(KERN_WARNING
"Failed to ioremap gnttab share frames!");
return -ENOMEM;
@@ -603,7 +999,7 @@ int gnttab_resume(void)
int gnttab_suspend(void)
{
- arch_gnttab_unmap_shared(shared, nr_grant_frames);
+ gnttab_interface->unmap_frames();
return 0;
}
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/privcmd.c
index dbd3b16fd131..ccee0f16bcf8 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -18,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -32,6 +34,10 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
@@ -359,7 +365,6 @@ static long privcmd_ioctl(struct file *file,
return ret;
}
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -392,9 +397,39 @@ static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
return (xchg(&vma->vm_private_data, (void *)1) == NULL);
}
-#endif
-const struct file_operations privcmd_file_ops = {
+const struct file_operations xen_privcmd_fops = {
+ .owner = THIS_MODULE,
.unlocked_ioctl = privcmd_ioctl,
.mmap = privcmd_mmap,
};
+EXPORT_SYMBOL_GPL(xen_privcmd_fops);
+
+static struct miscdevice privcmd_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/privcmd",
+ .fops = &xen_privcmd_fops,
+};
+
+static int __init privcmd_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&privcmd_dev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register Xen privcmd device\n");
+ return err;
+ }
+ return 0;
+}
+
+static void __exit privcmd_exit(void)
+{
+ misc_deregister(&privcmd_dev);
+}
+
+module_init(privcmd_init);
+module_exit(privcmd_exit);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
new file mode 100644
index 000000000000..14facaeed36f
--- /dev/null
+++ b/drivers/xen/privcmd.h
@@ -0,0 +1,3 @@
+#include <linux/fs.h>
+
+extern const struct file_operations xen_privcmd_fops;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b91c447..19e6a2041371 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
char *m = NULL;
unsigned int repeat = 3;
- nr_tbl = swioltb_nr_tbl();
+ nr_tbl = swiotlb_nr_tbl();
if (nr_tbl)
xen_io_tlb_nslabs = nr_tbl;
else {
@@ -166,7 +166,7 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
@@ -179,7 +179,7 @@ retry:
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 52fed16d8701..30d7be026c18 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,7 +16,7 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-static int permissive;
+static bool permissive;
module_param(permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 8f06e1ed028c..7944a17f5cbf 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -99,6 +99,7 @@ static void pcistub_device_release(struct kref *kref)
kfree(pci_get_drvdata(psdev->dev));
pci_set_drvdata(psdev->dev, NULL);
+ psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
pci_dev_put(psdev->dev);
kfree(psdev);
@@ -234,6 +235,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
xen_pcibk_config_free_dyn_fields(found_psdev->dev);
xen_pcibk_config_reset_dev(found_psdev->dev);
+ xen_unregister_device_domain_owner(found_psdev->dev);
+
spin_lock_irqsave(&found_psdev->lock, flags);
found_psdev->pdev = NULL;
spin_unlock_irqrestore(&found_psdev->lock, flags);
@@ -331,6 +334,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
dev_dbg(&dev->dev, "reset device\n");
xen_pcibk_reset_device(dev);
+ dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
return 0;
config_release:
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 075525945e36..d5dcf8d5d3d9 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -16,7 +16,7 @@
#define INVALID_EVTCHN_IRQ (-1)
struct workqueue_struct *xen_pcibk_wq;
-static int __read_mostly passthrough;
+static bool __read_mostly passthrough;
module_param(passthrough, bool, S_IRUGO);
MODULE_PARM_DESC(passthrough,
"Option to specify how to export PCI topology to guest:\n"\
@@ -241,11 +241,10 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
goto out;
dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
- dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
if (xen_register_device_domain_owner(dev,
pdev->xdev->otherend_id) != 0) {
- dev_err(&dev->dev, "device has been assigned to another " \
- "domain! Over-writting the ownership, but beware.\n");
+ dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
+ xen_find_device_domain_owner(dev));
xen_unregister_device_domain_owner(dev);
xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
}
@@ -281,7 +280,6 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
}
dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
- dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
xen_unregister_device_domain_owner(dev);
xen_pcibk_release_pci_dev(pdev, dev);
@@ -707,19 +705,16 @@ static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
return 0;
}
-static const struct xenbus_device_id xenpci_ids[] = {
+static const struct xenbus_device_id xen_pcibk_ids[] = {
{"pci"},
{""},
};
-static struct xenbus_driver xenbus_xen_pcibk_driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME,
.probe = xen_pcibk_xenbus_probe,
.remove = xen_pcibk_xenbus_remove,
.otherend_changed = xen_pcibk_frontend_changed,
-};
+);
const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
@@ -735,11 +730,11 @@ int __init xen_pcibk_xenbus_register(void)
if (passthrough)
xen_pcibk_backend = &xen_pcibk_passthrough_backend;
pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
- return xenbus_register_backend(&xenbus_xen_pcibk_driver);
+ return xenbus_register_backend(&xen_pcibk_driver);
}
void __exit xen_pcibk_xenbus_unregister(void)
{
destroy_workqueue(xen_pcibk_wq);
- xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
+ xenbus_unregister_driver(&xen_pcibk_driver);
}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index b7b9e95f8717..767ff656d5a7 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -74,6 +74,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
#include <xen/balloon.h>
#include <xen/tmem.h>
#include <xen/xen.h>
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 8dca685358b4..31e2e9050c7a 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -1,4 +1,5 @@
obj-y += xenbus.o
+obj-y += xenbus_dev_frontend.o
xenbus-objs =
xenbus-objs += xenbus_client.o
@@ -9,4 +10,5 @@ xenbus-objs += xenbus_probe.o
xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
xenbus-objs += $(xenbus-be-objs-y)
+obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o
obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 1906125eab49..566d2adbd6ea 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -32,15 +32,39 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
+#include <xen/balloon.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
+#include <xen/xen.h>
+
+#include "xenbus_probe.h"
+
+struct xenbus_map_node {
+ struct list_head next;
+ union {
+ struct vm_struct *area; /* PV */
+ struct page *page; /* HVM */
+ };
+ grant_handle_t handle;
+};
+
+static DEFINE_SPINLOCK(xenbus_valloc_lock);
+static LIST_HEAD(xenbus_valloc_pages);
+
+struct xenbus_ring_ops {
+ int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
+ int (*unmap)(struct xenbus_device *dev, void *vaddr);
+};
+
+static const struct xenbus_ring_ops *ring_ops __read_mostly;
const char *xenbus_strstate(enum xenbus_state state)
{
@@ -436,19 +460,33 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
*/
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
+ return ring_ops->map(dev, gnt_ref, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
struct gnttab_map_grant_ref op = {
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
.ref = gnt_ref,
.dom = dev->otherend_id,
};
+ struct xenbus_map_node *node;
struct vm_struct *area;
pte_t *pte;
*vaddr = NULL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
area = alloc_vm_area(PAGE_SIZE, &pte);
- if (!area)
+ if (!area) {
+ kfree(node);
return -ENOMEM;
+ }
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
if (op.status != GNTST_okay) {
free_vm_area(area);
+ kfree(node);
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
return op.status;
}
- /* Stuff the handle in an unused field */
- area->phys_addr = (unsigned long)op.handle;
+ node->handle = op.handle;
+ node->area = area;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
return 0;
}
-EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
+ struct xenbus_map_node *node;
+ int err;
+ void *addr;
+
+ *vaddr = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
+ if (err)
+ goto out_err;
+
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+
+ err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
+ if (err)
+ goto out_err;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
+
+ *vaddr = addr;
+ return 0;
+
+ out_err:
+ free_xenballooned_pages(1, &node->page);
+ kfree(node);
+ return err;
+}
/**
@@ -489,12 +567,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
grant_handle_t *handle, void *vaddr)
{
- struct gnttab_map_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .flags = GNTMAP_host_map,
- .ref = gnt_ref,
- .dom = dev->otherend_id,
- };
+ struct gnttab_map_grant_ref op;
+
+ gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
+ dev->otherend_id);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
@@ -525,32 +601,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
*/
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
- struct vm_struct *area;
+ return ring_ops->unmap(dev, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+{
+ struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref op = {
.host_addr = (unsigned long)vaddr,
};
unsigned int level;
- /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
- * method so that we don't have to muck with vmalloc internals here.
- * We could force the user to hang on to their struct vm_struct from
- * xenbus_map_ring_valloc, but these 6 lines considerably simplify
- * this API.
- */
- read_lock(&vmlist_lock);
- for (area = vmlist; area != NULL; area = area->next) {
- if (area->addr == vaddr)
- break;
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ if (node->area->addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
}
- read_unlock(&vmlist_lock);
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
- if (!area) {
+ if (!node) {
xenbus_dev_error(dev, -ENOENT,
"can't find mapped virtual address %p", vaddr);
return GNTST_bad_virt_addr;
}
- op.handle = (grant_handle_t)area->phys_addr;
+ op.handle = node->handle;
op.host_addr = arbitrary_virt_to_machine(
lookup_address((unsigned long)vaddr, &level)).maddr;
@@ -558,16 +638,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
BUG();
if (op.status == GNTST_okay)
- free_vm_area(area);
+ free_vm_area(node->area);
else
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
- (int16_t)area->phys_addr, op.status);
+ node->handle, op.status);
+ kfree(node);
return op.status;
}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+{
+ int rv;
+ struct xenbus_map_node *node;
+ void *addr;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+ if (addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
+ }
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
+
+ if (!node) {
+ xenbus_dev_error(dev, -ENOENT,
+ "can't find mapped virtual address %p", vaddr);
+ return GNTST_bad_virt_addr;
+ }
+
+ rv = xenbus_unmap_ring(dev, node->handle, addr);
+
+ if (!rv)
+ free_xenballooned_pages(1, &node->page);
+ else
+ WARN(1, "Leaking %p\n", vaddr);
+
+ kfree(node);
+ return rv;
+}
/**
* xenbus_unmap_ring
@@ -582,10 +696,9 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
int xenbus_unmap_ring(struct xenbus_device *dev,
grant_handle_t handle, void *vaddr)
{
- struct gnttab_unmap_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .handle = handle,
- };
+ struct gnttab_unmap_grant_ref op;
+
+ gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
@@ -617,3 +730,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+
+static const struct xenbus_ring_ops ring_ops_pv = {
+ .map = xenbus_map_ring_valloc_pv,
+ .unmap = xenbus_unmap_ring_vfree_pv,
+};
+
+static const struct xenbus_ring_ops ring_ops_hvm = {
+ .map = xenbus_map_ring_valloc_hvm,
+ .unmap = xenbus_unmap_ring_vfree_hvm,
+};
+
+void __init xenbus_ring_ops_init(void)
+{
+ if (xen_pv_domain())
+ ring_ops = &ring_ops_pv;
+ else
+ ring_ops = &ring_ops_hvm;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index c21db7513736..6e42800fa499 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -31,6 +31,8 @@
#ifndef _XENBUS_COMMS_H
#define _XENBUS_COMMS_H
+#include <linux/fs.h>
+
int xs_init(void);
int xb_init_comms(void);
@@ -43,4 +45,6 @@ int xs_input_avail(void);
extern struct xenstore_domain_interface *xen_store_interface;
extern int xen_store_evtchn;
+extern const struct file_operations xen_xenbus_fops;
+
#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
new file mode 100644
index 000000000000..3d3be78c1093
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -0,0 +1,90 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/xenbus_dev.h>
+
+#include "xenbus_comms.h"
+
+MODULE_LICENSE("GPL");
+
+static int xenbus_backend_open(struct inode *inode, struct file *filp)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return nonseekable_open(inode, filp);
+}
+
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IOCTL_XENBUS_BACKEND_EVTCHN:
+ if (xen_store_evtchn > 0)
+ return xen_store_evtchn;
+ return -ENODEV;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+ return -EINVAL;
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_pfn(xen_store_interface),
+ size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+const struct file_operations xenbus_backend_fops = {
+ .open = xenbus_backend_open,
+ .mmap = xenbus_backend_mmap,
+ .unlocked_ioctl = xenbus_backend_ioctl,
+};
+
+static struct miscdevice xenbus_backend_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/xenbus_backend",
+ .fops = &xenbus_backend_fops,
+};
+
+static int __init xenbus_backend_init(void)
+{
+ int err;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ err = misc_register(&xenbus_backend_dev);
+ if (err)
+ printk(KERN_ERR "Could not register xenbus backend device\n");
+ return err;
+}
+
+static void __exit xenbus_backend_exit(void)
+{
+ misc_deregister(&xenbus_backend_dev);
+}
+
+module_init(xenbus_backend_init);
+module_exit(xenbus_backend_exit);
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index bbd000f88af7..527dc2a3b89f 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -52,13 +52,17 @@
#include <linux/namei.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
-#include "xenfs.h"
-#include "../xenbus/xenbus_comms.h"
+#include "xenbus_comms.h"
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
+MODULE_LICENSE("GPL");
+
/*
* An element of a list of outstanding transactions, for which we're
* still waiting a reply.
@@ -101,7 +105,7 @@ struct xenbus_file_priv {
unsigned int len;
union {
struct xsd_sockmsg msg;
- char buffer[PAGE_SIZE];
+ char buffer[XENSTORE_PAYLOAD_MAX];
} u;
/* Response queue. */
@@ -583,7 +587,7 @@ static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
return 0;
}
-const struct file_operations xenbus_file_ops = {
+const struct file_operations xen_xenbus_fops = {
.read = xenbus_file_read,
.write = xenbus_file_write,
.open = xenbus_file_open,
@@ -591,3 +595,31 @@ const struct file_operations xenbus_file_ops = {
.poll = xenbus_file_poll,
.llseek = no_llseek,
};
+EXPORT_SYMBOL_GPL(xen_xenbus_fops);
+
+static struct miscdevice xenbus_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/xenbus",
+ .fops = &xen_xenbus_fops,
+};
+
+static int __init xenbus_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&xenbus_dev);
+ if (err)
+ printk(KERN_ERR "Could not register xenbus frontend device\n");
+ return err;
+}
+
+static void __exit xenbus_exit(void)
+{
+ misc_deregister(&xenbus_dev);
+}
+
+module_init(xenbus_init);
+module_exit(xenbus_exit);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 1b178c6e8937..3864967202b5 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -291,14 +291,9 @@ void xenbus_dev_shutdown(struct device *_dev)
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name)
+ struct xen_bus_type *bus)
{
- drv->driver.name = drv->name;
drv->driver.bus = &bus->bus;
- drv->driver.owner = owner;
- drv->driver.mod_name = mod_name;
return driver_register(&drv->driver);
}
@@ -730,6 +725,8 @@ static int __init xenbus_init(void)
if (!xen_domain())
return -ENODEV;
+ xenbus_ring_ops_init();
+
if (xen_hvm_domain()) {
uint64_t v = 0;
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 9b1de4e34c64..bb4f92ed8730 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -53,9 +53,7 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
extern int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name);
+ struct xen_bus_type *bus);
extern int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
@@ -76,4 +74,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch,
extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node);
+void xenbus_ring_ops_init(void);
+
#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index c3c7cd195c11..257be37d9091 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -232,15 +232,13 @@ int xenbus_dev_is_online(struct xenbus_device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
-int __xenbus_register_backend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_backend(struct xenbus_driver *drv)
{
drv->read_otherend_details = read_frontend_details;
- return xenbus_register_driver_common(drv, &xenbus_backend,
- owner, mod_name);
+ return xenbus_register_driver_common(drv, &xenbus_backend);
}
-EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+EXPORT_SYMBOL_GPL(xenbus_register_backend);
static int backend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 2f73195512b4..9c57819df51a 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -230,15 +230,13 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
print_device_status);
}
-int __xenbus_register_frontend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_frontend(struct xenbus_driver *drv)
{
int ret;
drv->read_otherend_details = read_backend_details;
- ret = xenbus_register_driver_common(drv, &xenbus_frontend,
- owner, mod_name);
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
if (ret)
return ret;
@@ -247,7 +245,7 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
return 0;
}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
static int backend_state;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b3b8f2f3ad10..d1c217b23a42 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -532,21 +532,18 @@ int xenbus_printf(struct xenbus_transaction t,
{
va_list ap;
int ret;
-#define PRINTF_BUFFER_SIZE 4096
- char *printf_buffer;
-
- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
- if (printf_buffer == NULL)
- return -ENOMEM;
+ char *buf;
va_start(ap, fmt);
- ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+ buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
va_end(ap);
- BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
- ret = xenbus_write(t, dir, node, printf_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = xenbus_write(t, dir, node, buf);
- kfree(printf_buffer);
+ kfree(buf);
return ret;
}
@@ -621,15 +618,6 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
-static void xs_reset_watches(void)
-{
- int err;
-
- err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
- if (err && err != -EEXIST)
- printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -810,6 +798,12 @@ static int process_msg(void)
goto out;
}
+ if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
+ kfree(msg);
+ err = -EINVAL;
+ goto out;
+ }
+
body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
if (body == NULL) {
kfree(msg);
@@ -906,9 +900,5 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
- /* shutdown watches for kexec boot */
- if (xen_hvm_domain())
- xs_reset_watches();
-
return 0;
}
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index 4fde9440fe1f..b019865fcc56 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_XENFS) += xenfs.o
-xenfs-y = super.o xenbus.o privcmd.o
+xenfs-y = super.o
xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 1aa389719846..a84b53c01436 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -16,6 +16,8 @@
#include <xen/xen.h>
#include "xenfs.h"
+#include "../privcmd.h"
+#include "../xenbus/xenbus_comms.h"
#include <asm/xen/hypervisor.h>
@@ -82,9 +84,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
[1] = {},
- { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
+ { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
- { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
+ { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{""},
};
int rc;
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index b68aa6200003..6b80c7779c02 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -1,8 +1,6 @@
#ifndef _XENFS_XENBUS_H
#define _XENFS_XENBUS_H
-extern const struct file_operations xenbus_file_ops;
-extern const struct file_operations privcmd_file_ops;
extern const struct file_operations xsd_kva_file_ops;
extern const struct file_operations xsd_port_file_ops;
diff --git a/drivers/zorro/zorro.ids b/drivers/zorro/zorro.ids
index de24e3decedd..119abea8c6cb 100644
--- a/drivers/zorro/zorro.ids
+++ b/drivers/zorro/zorro.ids
@@ -351,7 +351,7 @@
0200 EGS 28/24 Spectrum [Graphics Card]
0892 Apollo
0100 A1200 [FPU and RAM Expansion]
-0893 Ingenieurbro Helfrich
+0893 Ingenieurbüro Helfrich
0500 Piccolo RAM [Graphics Card]
0600 Piccolo [Graphics Card]
0700 PeggyPlus MPEG [Video Card]